Merge remote-tracking branch 'origin/tmp-3e5de27' into msm-4.8

* origin/tmp-3e5de27:
  Linux 4.9-rc8
  mm, vmscan: add cond_resched() into shrink_node_memcg()
  mm: workingset: fix NULL ptr in count_shadow_nodes
  kbuild: fix building bzImage with CONFIG_TRIM_UNUSED_KSYMS enabled
  net: avoid signed overflows for SO_{SND|RCV}BUFFORCE
  geneve: avoid use-after-free of skb->data
  tipc: check minimum bearer MTU
  net: renesas: ravb: unintialized return value
  sh_eth: remove unchecked interrupts for RZ/A1
  net: bcmgenet: Utilize correct struct device for all DMA operations
  Fix up a couple of field names in the CREDITS file
  NET: usb: qmi_wwan: add support for Telit LE922A PID 0x1040
  cdc_ether: Fix handling connection notification
  ip6_offload: check segs for NULL in ipv6_gso_segment.
  RDS: TCP: unregister_netdevice_notifier() in error path of rds_tcp_init_net
  Revert: "ip6_tunnel: Update skb->protocol to ETH_P_IPV6 in ip6_tnl_xmit()"
  ipv6: Set skb->protocol properly for local output
  ipv4: Set skb->protocol properly for local output
  packet: fix race condition in packet_set_ring
  net: ethernet: altera: TSE: do not use tx queue lock in tx completion handler
  net: ethernet: altera: TSE: Remove unneeded dma sync for tx buffers
  default exported asm symbols to zero
  arm64: dts: juno: fix cluster sleep state entry latency on all SoC versions
  net: ethernet: stmmac: fix of-node and fixed-link-phydev leaks
  net: ethernet: stmmac: platform: fix outdated function header
  net: ethernet: stmmac: dwmac-meson8b: fix probe error path
  net: ethernet: stmmac: dwmac-generic: fix probe error path
  net: ethernet: stmmac: dwmac-rk: fix probe error path
  net: ethernet: stmmac: dwmac-sti: fix probe error path
  net: ethernet: stmmac: dwmac-socfpga: fix use-after-free on probe errors
  net/rtnetlink: fix attribute name in nlmsg_size() comments
  ixgbe/ixgbevf: Don't use lco_csum to compute IPv4 checksum
  igb/igbvf: Don't use lco_csum to compute IPv4 checksum
  net: asix: Fix AX88772_suspend() USB vendor commands failure issues
  kbuild: make sure autoksyms.h exists early
  KVM: use after free in kvm_ioctl_create_device()
  can: peak: Add support for PCAN-USB X6 USB interface
  can: peak: Fix bittiming fields size in bits
  mm: fix false-positive WARN_ON() in truncate/invalidate for hugetlb
  kasan: support use-after-scope detection
  kasan: update kasan_global for gcc 7
  lib/debugobjects: export for use in modules
  zram: fix unbalanced idr management at hot removal
  thp: fix corner case of munlock() of PTE-mapped THPs
  mm, thp: propagation of conditional compilation in khugepaged.c
  arm64: dts: juno: Correct PCI IO window
  macvtap: handle ubuf refcount correctly when meet errors
  tun: handle ubuf refcount correctly when meet errors
  net: ethernet: ti: cpsw: fix ASSERT_RTNL() warning during resume
  bpf: fix states equal logic for varlen access
  netfilter: arp_tables: fix invoking 32bit "iptable -P INPUT ACCEPT" failed in 64bit kernel
  l2tp: fix address test in __l2tp_ip6_bind_lookup()
  l2tp: fix lookup for sockets not bound to a device in l2tp_ip
  l2tp: fix racy socket lookup in l2tp_ip and l2tp_ip6 bind()
  l2tp: hold socket before dropping lock in l2tp_ip{, 6}_recv()
  l2tp: lock socket before checking flags in connect()
  cxgb4: Add PCI device ID for new adapter
  isofs: add KERN_CONT to printing of ER records
  net: fec: cache statistics while device is down
  vxlan: fix a potential issue when create a new vxlan fdb entry.
  Input: change KEY_DATA from 0x275 to 0x277
  openvswitch: Fix skb leak in IPv6 reassembly.
  esp6: Fix integrity verification when ESN are used
  esp4: Fix integrity verification when ESN are used
  drm/i915: drop the struct_mutex when wedged or trying to reset
  drm/i915: Don't touch NULL sg on i915_gem_object_get_pages_gtt() error
  xfrm_user: fix return value from xfrm_user_rcv_msg
  drm: Don't call drm_for_each_crtc with a non-KMS driver
  net: dsa: slave: fix fixed-link phydev leaks
  net: ethernet: ti: davinci_emac: fix fixed-link phydev and of-node leaks
  net: ethernet: dwc_eth_qos: fix fixed-link phydev leaks
  net: ethernet: renesas: ravb: fix fixed-link phydev leaks
  net: ethernet: mediatek: fix fixed-link phydev leaks
  net: ethernet: marvell: mvneta: fix fixed-link phydev leaks
  net: ethernet: ucc_geth: fix fixed-link phydev leaks
  net: ethernet: gianfar: fix fixed-link phydev leaks
  net: ethernet: fs_enet: fix fixed-link phydev leaks
  net: ethernet: fec: fix fixed-link phydev leaks
  net: ethernet: bcmgenet: fix fixed-link phydev leaks
  net: ethernet: bcmsysport: fix fixed-link phydev leaks
  net: ethernet: aurora: nb8800: fix fixed-link phydev leaks
  net: ethernet: altera: fix fixed-link phydev leaks
  of_mdio: add helper to deregister fixed-link PHYs
  net: dsa: slave: fix of-node leak and phy priority
  GSO: Reload iph after pskb_may_pull
  sched: cls_flower: remove from hashtable only in case skip sw flag is not set
  net/dccp: fix use-after-free in dccp_invalid_packet
  net: macb: ensure ordering write to re-enable RX smoothly
  net: macb: fix the RX queue reset in macb_rx()
  netlink: Call cb->done from a worker thread
  net/sched: pedit: make sure that offset is valid
  Re-enable CONFIG_MODVERSIONS in a slightly weaker form
  netfilter: ipv6: nf_defrag: drop mangled skb on ream error
  Revert "i2c: octeon: thunderx: Limit register access retries"
  ARC: mm: PAE40: Fix crash at munmap
  mremap: move_ptes: check pte dirty after its removal
  pwm: Fix device reference leak
  drm/radeon: fix check for port PM availability
  drm/amdgpu: fix check for port PM availability
  ovl: fix d_real() for stacked fs
  CIFS: iterate over posix acl xattr entry correctly in ACL_to_cifs_posix()
  Call echo service immediately after socket reconnect
  CIFS: Fix BUG() in calc_seckey()
  drm/amd/powerplay: initialize the soft_regs offset in struct smu7_hwmgr
  bpf/samples: Fix PT_REGS_IP on s390x and use it
  net: dsa: fix unbalanced dsa_switch_tree reference counting
  net: handle no dst on skb in icmp6_send
  dbri: Fix compiler warning
  qlogicpti: Fix compiler warnings
  net/mlx4: Fix uninitialized fields in rule when adding promiscuous mode to device managed flow steering
  Revert "net/mlx4_en: Avoid unregister_netdev at shutdown flow"
  net/sched: Export tc_tunnel_key so its UAPI accessible
  amd-xgbe: Fix unused suspend handlers build warning
  ARC: mm: IOC: Don't enable IOC by default
  ARC: Don't use "+l" inline asm constraint
  tcp: Set DEFAULT_TCP_CONG to bbr if DEFAULT_BBR is set
  net: phy: realtek: fix enabling of the TX-delay for RTL8211F
  Documentation: devicetree: clarify usage of the RGMII phy-modes
  net, sched: respect rcu grace period on cls destruction
  tipc: fix link statistics counter errors
  driver: macvtap: Unregister netdev rx_handler if macvtap_newlink fails
  net: qcom/emac: fix of_node and phydev leaks
  net: fsl/fman: fix fixed-link-phydev reference leak
  net: fsl/fman: fix phydev reference leak
  net: bcmgenet: fix phydev reference leak
  net: dsa: fix fixed-link-phy device leaks
  irda: fix overly long udelay()
  driver: ipvlan: Fix one possible memleak in ipvlan_link_new
  drm: hdlcd: Fix cleanup order
  netfilter: nat: fix crash when conntrack entry is re-used
  netfilter: nft_range: add the missing NULL pointer check
  netfilter: nf_tables: fix inconsistent element expiration calculation
  netfilter: nat: switch to new rhlist interface
  netfilter: nat: fix cmp return value
  netfilter: nft_hash: validate maximum value of u32 netlink hash attribute
  KVM: arm/arm64: vgic: Don't notify EOI for non-SPIs
  netfilter: fix nf_conntrack_helper documentation
  netfilter: Update nf_send_reset6 to consider L3 domain
  netfilter: Update ip_route_me_harder to consider L3 domain
  clk: bcm: Fix unmet Kconfig dependencies for CLK_BCM_63XX
  PCI: Set Read Completion Boundary to 128 iff Root Port supports it (_HPX)
  PCI: Export pcie_find_root_port
  clk: sunxi-ng: enable so-said LDOs for A33 SoC's pll-mipi clock
  ARM: dts: STiH407-family: fix i2c nodes
  flowcache: Increase threshold for refusing new allocations
  scsi: be2iscsi: allocate enough memory in beiscsi_boot_get_sinfo()
  scsi: mpt3sas: Unblock device after controller reset
  scsi: hpsa: use bus '3' for legacy HBA devices
  ARM: gr8: Rename the DTSI and relevant DTS
  clk: sunxi-ng: sun6i-a31: Enable PLL-MIPI LDOs when ungating it
  ahci: always fall back to single-MSI mode
  xfrm: unbreak xfrm_sk_policy_lookup
  scsi: libfc: fix seconds_since_last_reset miscalculation
  mwifiex: printk() overflow with 32-byte SSIDs
  Input: psmouse - disable automatic probing of BYD touchpads
  PCI: designware-plat: Update author email
  PCI: designware: Change maintainer to Joao Pinto
  MAINTAINERS: Add devicetree binding to PCI i.MX6 entry
  MAINTAINERS: Update Richard Zhu's email address
  libata-scsi: Fixup ata_gen_passthru_sense()
  mvsas: fix error return code in mvs_task_prep()
  pwm: meson: Add missing spin_lock_init()
  vti6: flush x-netns xfrm cache when vti interface is removed

Change-Id: I055dd31e2827d41c82aa2c23ec9772ec536e0c0b
Signed-off-by: Kyle Yan <kyan@codeaurora.org>
diff --git a/Documentation/ABI/testing/sysfs-class-dual-role-usb b/Documentation/ABI/testing/sysfs-class-dual-role-usb
new file mode 100644
index 0000000..a900fd7
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-dual-role-usb
@@ -0,0 +1,71 @@
+What:		/sys/class/dual_role_usb/.../
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		Provide a generic interface to monitor and change
+		the state of dual role usb ports. The name here
+		refers to the name mentioned in the
+		dual_role_phy_desc that is passed while registering
+		the dual_role_phy_intstance through
+		devm_dual_role_instance_register.
+
+What:           /sys/class/dual_role_usb/.../supported_modes
+Date:           June 2015
+Contact:        Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		This is a static node, once initialized this
+		is not expected to change during runtime. "dfp"
+		refers to "downstream facing port" i.e. port can
+		only act as host. "ufp" refers to "upstream
+		facing port" i.e. port can only act as device.
+		"dfp ufp" refers to "dual role port" i.e. the port
+		can either be a host port or a device port.
+
+What:		/sys/class/dual_role_usb/.../mode
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		The mode node refers to the current mode in which the
+		port is operating. "dfp" for host ports. "ufp" for device
+		ports and "none" when cable is not connected.
+
+		On devices where the USB mode is software-controllable,
+		userspace can change the mode by writing "dfp" or "ufp".
+		On devices where the USB mode is fixed in hardware,
+		this attribute is read-only.
+
+What:		/sys/class/dual_role_usb/.../power_role
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		The power_role node mentions whether the port
+		is "sink"ing or "source"ing power. "none" if
+		they are not connected.
+
+		On devices implementing USB Power Delivery,
+		userspace can control the power role by writing "sink" or
+		"source". On devices without USB-PD, this attribute is
+		read-only.
+
+What:		/sys/class/dual_role_usb/.../data_role
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		The data_role node mentions whether the port
+		is acting as "host" or "device" for USB data connection.
+		"none" if there is no active data link.
+
+		On devices implementing USB Power Delivery, userspace
+		can control the data role by writing "host" or "device".
+		On devices without USB-PD, this attribute is read-only
+
+What:		/sys/class/dual_role_usb/.../powers_vconn
+Date:		June 2015
+Contact:	Badhri Jagan Sridharan<badhri@google.com>
+Description:
+		The powers_vconn node mentions whether the port
+		is supplying power for VCONN pin.
+
+		On devices with software control of VCONN,
+		userspace can disable the power supply to VCONN by writing "n",
+		or enable the power supply by writing "y".
diff --git a/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
new file mode 100644
index 0000000..acb19b9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
@@ -0,0 +1,16 @@
+What:		/sys/kernel/wakeup_reasons/last_resume_reason
+Date:		February 2014
+Contact:	Ruchi Kandoi <kandoiruchi@google.com>
+Description:
+		The /sys/kernel/wakeup_reasons/last_resume_reason is
+		used to report wakeup reasons after system exited suspend.
+
+What:		/sys/kernel/wakeup_reasons/last_suspend_time
+Date:		March 2015
+Contact:	jinqian <jinqian@google.com>
+Description:
+		The /sys/kernel/wakeup_reasons/last_suspend_time is
+		used to report time spent in last suspend cycle. It contains
+		two numbers (in seconds) separated by space. First number is
+		the time spent in suspend and resume processes. Second number
+		is the time spent in sleep state.
\ No newline at end of file
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt
index 98bf7ac..9fa070c 100644
--- a/Documentation/DMA-attributes.txt
+++ b/Documentation/DMA-attributes.txt
@@ -143,3 +143,36 @@
 where allocation failures are not a problem, and shouldn't bother the logs.
 
 NOTE: At the moment DMA_ATTR_NO_WARN is only implemented on PowerPC.
+
+DMA_ATTR_STRONGLY_ORDERED
+-------------------------
+
+DMA_ATTR_STRONGLY_ORDERED allocates memory with a very restrictive type
+of mapping (no unaligned accesses, no re-ordering, no write merging, no
+buffering, no pre-fetching). This has severe performance penalties and
+should not be used for general purpose DMA allocations. It should only
+be used if one of the restrictions on strongly ordered memory is required.
+
+DMA_ATTR_NO_DELAYED_UNMAP
+-------------------------
+
+DMA_ATTR_NO_DELAYED_UNMAP is used only by the msm specific lazy mapping
+feature. By default, the lazy mapping and unmapping holds an additional
+reference so that when the buffer is freed, the mapping is not destroyed
+and can be re-used. By specifying this attribute, an additional reference
+will NOT be held by the lazy mapping code and it will be released as soon
+as the buffer is freed.
+
+DMA_ATTR_EXEC_MAPPING
+---------------------
+
+By default, the DMA mappings are non-executable. Some use cases might require
+an executable mapping. This attribute can be used to indicate to the DMA
+subsystem to create an executable mappings for the buffer.
+
+DMA_ATTR_IOMMU_USE_UPSTREAM_HINT
+--------------------------------
+
+DMA_ATTR_IOMMU_USE_UPSTREAM_HINT: Normally an smmu will override any bus
+attributes (i.e cacheablilty) provided by the client device. Some hardware
+may be designed to use the original attributes instead.
diff --git a/Documentation/android.txt b/Documentation/android.txt
new file mode 100644
index 0000000..d53aba4
--- /dev/null
+++ b/Documentation/android.txt
@@ -0,0 +1,119 @@
+				=============
+				A N D R O I D
+				=============
+
+Copyright (C) 2009 Google, Inc.
+Written by Mike Chan <mike@android.com>
+
+CONTENTS:
+---------
+
+1. Android
+  1.1 Required enabled config options
+  1.2 Required disabled config options
+  1.3 Recommended enabled config options
+2. Contact
+
+
+1. Android
+==========
+
+Android (www.android.com) is an open source operating system for mobile devices.
+This document describes configurations needed to run the Android framework on
+top of the Linux kernel.
+
+To see a working defconfig look at msm_defconfig or goldfish_defconfig
+which can be found at http://android.git.kernel.org in kernel/common.git
+and kernel/msm.git
+
+
+1.1 Required enabled config options
+-----------------------------------
+After building a standard defconfig, ensure that these options are enabled in
+your .config or defconfig if they are not already. Based off the msm_defconfig.
+You should keep the rest of the default options enabled in the defconfig
+unless you know what you are doing.
+
+ANDROID_PARANOID_NETWORK
+ASHMEM
+CONFIG_FB_MODE_HELPERS
+CONFIG_FONT_8x16
+CONFIG_FONT_8x8
+CONFIG_YAFFS_SHORT_NAMES_IN_RAM
+DAB
+EARLYSUSPEND
+FB
+FB_CFB_COPYAREA
+FB_CFB_FILLRECT
+FB_CFB_IMAGEBLIT
+FB_DEFERRED_IO
+FB_TILEBLITTING
+HIGH_RES_TIMERS
+INOTIFY
+INOTIFY_USER
+INPUT_EVDEV
+INPUT_GPIO
+INPUT_MISC
+LEDS_CLASS
+LEDS_GPIO
+LOCK_KERNEL
+LkOGGER
+LOW_MEMORY_KILLER
+MISC_DEVICES
+NEW_LEDS
+NO_HZ
+POWER_SUPPLY
+PREEMPT
+RAMFS
+RTC_CLASS
+RTC_LIB
+TMPFS
+UID_STAT
+UID16
+USB_FUNCTION
+USB_FUNCTION_ADB
+USER_WAKELOCK
+VIDEO_OUTPUT_CONTROL
+WAKELOCK
+YAFFS_AUTO_YAFFS2
+YAFFS_FS
+YAFFS_YAFFS1
+YAFFS_YAFFS2
+
+
+1.2 Required disabled config options
+------------------------------------
+CONFIG_YAFFS_DISABLE_LAZY_LOAD
+DNOTIFY
+
+
+1.3 Recommended enabled config options
+------------------------------
+ANDROID_PMEM
+PSTORE_CONSOLE
+PSTORE_RAM
+SCHEDSTATS
+DEBUG_PREEMPT
+DEBUG_MUTEXES
+DEBUG_SPINLOCK_SLEEP
+DEBUG_INFO
+FRAME_POINTER
+CPU_FREQ
+CPU_FREQ_TABLE
+CPU_FREQ_DEFAULT_GOV_ONDEMAND
+CPU_FREQ_GOV_ONDEMAND
+CRC_CCITT
+EMBEDDED
+INPUT_TOUCHSCREEN
+I2C
+I2C_BOARDINFO
+LOG_BUF_SHIFT=17
+SERIAL_CORE
+SERIAL_CORE_CONSOLE
+
+
+2. Contact
+==========
+website: http://android.git.kernel.org
+
+mailing-lists: android-kernel@googlegroups.com
diff --git a/Documentation/arm/msm/glink.txt b/Documentation/arm/msm/glink.txt
new file mode 100644
index 0000000..814c118
--- /dev/null
+++ b/Documentation/arm/msm/glink.txt
@@ -0,0 +1,1239 @@
+Introduction
+============
+
+G-Link, short for Generic Link, is a generic link-layer transport that supports
+a plug-in framework for physical transports. This allows it to adapt to
+different physical transports such as shared memory, UARTs, buses, and DMA.
+
+It is designed to enable zero copy, is power aware, and supports version and
+feature negotiation to enable phased upgrades. The design is symmetrical with
+the same high-level design and the same client API across all subsystems
+regardless of OS.
+
+Hardware description
+====================
+
+The hardware is managed by the transport plug-in. In the initial
+driver version, this is done by the SMEM Native Transport which requires
+shared memory and an interrupt in each direction between the local and remote
+subsystems. This transport is a replacement for SMD.
+
+Software description
+=================================
+
+G-Link consists of:
+ * Client API
+ * Core that implements the high-level protocol
+ * Transport plug-ins that handle translation of the high-level protocol to wire
+   format
+
+The below diagram shows the organization of G-Link. Clients use G-Link to
+interface with the Transport Plug-in, which interfaces directly with the
+physical transport. The G-Link component is shown in further detail in the
+"Design" section.
+
+        +-----------+     +---------+      +-----------+
+        |  G-Link   |---->|Transport|----->| Physical  |
+        |           |<----| Plug-in |<-----| Transport |
+        +-----------+     +---------+      +-----------+
+
+Design
+======
+
+G-Link is conceptually broken down into a command and data queue. The command
+queue is used to pass commands for synchronizing buffer status, channel state,
+and for the equivalent of packet headers. The data queue is reserved for raw
+client data.
+
+For every packet in the data queue, there is at least one command
+that acts as the packet header that goes through the command queue. This
+separation is necessary to support the zero-copy use case for data packets and
+for DMA-type transports that need to have the transfer size known ahead of time.
+
+For copy-based transports, the command and data transports can be merged
+together in the transport plug-in resulting in traditional packet headers like
+we have today in other common protocols such as IP.
+
+As shown in the diagram below, the client itself communicates directly with the
+G-Link core, and uses the Transport Interface and the G-Link Core interface to
+create the transport plug-in.
+
+                                             +-----------+
+                     +------+    +---------+ |           |
+                     |      |<---|Transport|-|           |
+        +-------+    |      |    |   IF    | |           |
+        |Clients|<-->|      |    +---------+ | Transport |
+        +-------+    |G-Link|                |  Plug-in  |
+                     | Core |                |           |
+                     |      | +-------+      |           |
+                     |      |-|G-Link |----->|           |
+                     |      | |Core IF|      |           |
+                     +------+ +-------+      |           |
+                                             +-----------+
+
+A channel is a discrete data pipe used by a client to communicate with a
+remote system. All channels are multiplexed onto one or more physical
+transports.
+
+A receive intent is queued by a client to indicate that it is ready to receive
+a packet up to the specified size. The intent is sent to the remote side to
+let clients know that they can transmit and expect the client to be able to
+process it.
+
+Intents provide back-pressure to transmitting clients and remove the
+requirement for flow control. In addition, the intent size is used for
+copy-based transports to either reserve buffer space or allocate buffers to
+receive data to prevent blocking the underlying transport. Multiple intents
+can be queued.
+
+Transport Plug-in
+-----------------
+The transport plug-in is responsible for marshaling data and commands between
+the G-Link core and the physical transport. If the remote side is not running
+G-Link, then the transport plug-in will be more complex and will handle
+translating G-Link core commands and data into the proper protocol to
+interact with the remote system.
+
+Client API
+----------
+The API into the G-Link core is asynchronous by default, but may have blocking
+variants. If necessary, a thin client layer can be built on top of the
+asynchronous API to create a synchronous API for clients. All client
+notifications are serialized to ensure in-order notification of events.
+
+The G-Link client API includes the following functions for the following
+operations. Details of each function are described later in this section.
+ * glink_open() - Open a G-Link channel
+ * glink_close() - Close a G-Link channel
+ * glink_tx() - Transmit data
+ * glink_txv() - Transmit a buffer chain
+ * glink_queue_rx_intent() - Queue a receive intent
+ * glink_rx_done() - Signal consumption of the receive buffer
+ * glink_sigs_set() - Set a 32-bit signal field for client-specific signaling.
+                      Standard TIOCM bits are reserved in the upper bits of the
+                      field as a conviencence for users, but they are 100
+                      percent pass-through.
+ * glink_sigs_local_get() - Get the local 32-bit control signal field
+ * glink_sigs_remote_get() - Get the remote 32-bit control signal field
+
+The TIOCM bitmasks are defined as follows:
+   #define SMD_CTR_SIG_SHFT 31
+   #define SMD_CTS_SIG_SHFT 30
+   #define SMD_CD_SIG_SHFT 29
+   #define SMD_RI_SIG_SHFT 28
+
+When a channel is opened by a client, the client provides an open
+configuration to the G-Link core as a parameter to the glink_open() function.
+This open configuration contains the following information:
+ * The name of the transport (optional)
+ * The name of the edge (remote processor)
+ * The name of the channel
+
+The open configuration also contains function pointers for the following
+operations, which are defined by the client:
+ * notify_rx() - Notify the client that data has been received
+ * notify_rxv() - Notify the client that vector data has been received
+ * notify_tx_done() - Notify the client that data has been transmitted
+ * notify_state() - Notify the client that the channel's state has changed
+ * notify_rx_intent_req() - Notify the client whether their request for a
+                            receive intent was granted
+ * notify_rx_sigs() - Notify the client that there has been a change in the
+                      state of the remote side's control signals
+ * notify_rx_abort() - During channel close, return structures associated
+                       with receive intents to the client
+ * notify_tx_abort() - During channel close, return structures associated
+                       with TX packets to the client
+ * notify_rx_tracer_pkt() - Notify the client that a tracer packet has been
+                            received
+
+Buffer ownership is transferred between the local G-Link and the remote G-Link
+using message passing. A typical transmit sequence is as follows:
+
+ 1. The remote side queues an RX intent. When the remote client queues the
+    intent by calling glink_queue_rx_intent(), the ownership of the buffer
+    transfers to the remote G-Link core, which notifies the local G-Link core
+    of the receive intent. The remote G-Link core is now ready to receive data
+    from the local client. In the zero-copy case, the remote G-Link core does
+    not need to allocate a buffer.
+
+        +------+                    +------+                 +------+
+        |Local |                    |Remote|                 |Remote|
+        |G-Link|                    |G-Link|                 |Client|
+        +------+                    +------+                 +------+
+           |                           |                        |
+           |                           | glink_queue_rx_intent()|
+           |                          +-+<---------------------+-+
+           |                          |-|                      |-|
+           |                          |-|(allocate/reserve     |-|
+           |                          |-| buffer)              |-|
+           |                          |-|-----+                |-|
+           |                          |-|     |                |-|
+           |                          |-|<----+                |-|
+           |                          +-+                      +-+
+           |   send_intent()           |                        |
+           |<--------------------------+                        |
+           |                           |                        |
+           |   (signal)                |                        |
+          +-+<-------------------------|                        |
+          |-|                          |                        |
+          |-|                          |                        |
+          |-|                          |                        |
+          +-+                          |                        |
+           |                           |                        |
+        +------+                    +------+                 +------+
+        |Local |                    |Remote|                 |Remote|
+        |G-Link|                    |G-Link|                 |Client|
+        +------+                    +------+                 +------+
+
+ 2. The local client can allocate a buffer, fill it, and send it to the remote
+    side. If multiple receive intents are available, then a first-fit
+    algorithm is used to select the receive intent.
+
+        +------+               +------+      +------+        +------+
+        |Local |               |Local |      |Remote|        |Remote|
+        |Client|               |G-Link|      |G-Link|        |Client|
+        +------+               +------+      +------+        +------+
+           |                      |             |               |
+           | (Allocate tx buffer) |             |               |
+          +-+--------+            |             |               |
+          |-|        |            |             |               |
+          |-|<-------+            |             |               |
+          |-|                     |             |               |
+          |-| (Copy data into     |             |               |
+          |-|  tx buffer)         |             |               |
+          |-|--------+            |             |               |
+          |-|        |            |             |               |
+          |-|<-------+            |             |               |
+          |-|                     |             |               |
+          |-| glink_tx()          |             |               |
+          |-|------------------->+-+ tx()       |               |
+          |-|                    |-|---------->+-+ notify_rx()  |
+          |-|                    |-|           |-|------------>+-+
+          +-+                    |-| (signal)  |-|             |-|
+           |                     |-|---------->|-|             |-|
+           |                     +-+           |-|             |-|
+           |                      |            +-+             |-|
+           |                      |             |              +-+
+           |                      |             |               |
+        +------+               +------+      +------+        +------+
+        |Local |               |Local |      |Remote|        |Remote|
+        |Client|               |G-Link|      |G-Link|        |Client|
+        +------+               +------+      +------+        +------+
+
+ 3. The transmit buffer ownership is returned to the local client after the
+    remote client has finished with it. At this point, the local client can
+    destroy/reuse the buffer.
+
+        +------+           +------+      +------+            +------+
+        |Local |           |Local |      |Remote|            |Remote|
+        |Client|           |G-Link|      |G-Link|            |Client|
+        +------+           +------+      +------+            +------+
+           |                  |             |                   |
+           |                  |             |   glink_rx_done() |
+           |                  |            +-+<----------------+-+
+           |                  |            |-|                 |-|
+           |                  |            |-| (copy-based     |-|
+           |                  |            |-|  transport:     |-|
+           |                  |            |-|  destroy/reuse  |-|
+           |                  |            |-|  buffer)        |-|
+           |                  |            |-|----------+      +-+
+           |                  |            |-|          |       |
+           |                  |            |-|          |       |
+           |                  |            |-|<---------+       |
+           |                  |   tx_done()|-|                  |
+           |                 +-+<----------|-|                  |
+           |                 |-|           |-|                  |
+           |                 |-| (signal)  |-|                  |
+           | notify_tx_done()|-|<----------|-|                  |
+          +-+<---------------|-|           |-|                  |
+          |-|                |-|           +-+                  |
+          |-|                |-|            |                   |
+          |-|                +-+            |                   |
+          +-+                 |             |                   |
+           |                  |             |                   |
+        +------+           +------+      +------+            +------+
+        |Local |           |Local |      |Remote|            |Remote|
+        |Client|           |G-Link|      |G-Link|            |Client|
+        +------+           +------+      +------+            +------+
+
+Transport Interface
+-------------------
+The transport interface is used for function calls from the G-Link core to a
+G-Link transport. Modules which implement this interface are G-Link
+transports. All function calls include the pointer to the transport instance
+and the data fields that should be encoded into a command packet to be sent to
+the remote processor. These functions act on the transport itself - they
+translate the commands into actions for each different transport. This interface
+contains APIs for transport negotiation, channel state, channel data, and
+power. Requests that change state always have an ACK to synchronize
+the state between the local and remote subsystems.
+
+The transport interface is implemented as follows:
+
+struct glink_transport_if {
+        /* Negotiation */
+        void (*tx_cmd_version)(struct glink_transport_if *if_ptr,
+                        uint32_t version,
+                        uint32_t features);
+        void (*tx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+                        uint32_t version,
+                        uint32_t features);
+        void (*set_version)(struct glink_transport_if *if_ptr, uint32_t version,
+                        uint32_t features);
+
+        /* channel state */
+        int (*tx_cmd_ch_open)(struct glink_transport_if *if_ptr, uint32_t lcid,
+                        const char *name);
+        int (*tx_cmd_ch_close)(struct glink_transport_if *if_ptr,
+                        uint32_t lcid);
+        void (*tx_cmd_ch_remote_open_ack)(struct glink_transport_if *if_ptr,
+                        uint32_t rcid);
+        void (*tx_cmd_ch_remote_close_ack)(struct glink_transport_if *if_ptr,
+                        uint32_t rcid);
+        int (*ssr)(struct glink_transport_if *if_ptr);
+
+        /* channel data */
+        int (*allocate_rx_intent)(size_t size,
+                                  struct glink_core_rx_intent *intent);
+        int (*deallocate_rx_intent)(struct glink_core_rx_intent *intent);
+
+        int (*tx_cmd_local_rx_intent)(struct glink_transport_if *if_ptr,
+                        uint32_t lcid, size_t size, uint32_t liid);
+        void (*tx_cmd_local_rx_done)(struct glink_transport_if *if_ptr,
+                        uint32_t lcid, uint32_t liid);
+        int (*tx)(struct glink_transport_if *if_ptr, uint32_t lcid,
+                        struct glink_core_tx_pkt *pctx);
+        int (*tx_cmd_rx_intent_req)(struct glink_transport_if *if_ptr,
+                        uint32_t lcid, size_t size);
+        int (*tx_cmd_remote_rx_intent_req_ack)(
+                        struct glink_transport_if *if_ptr,
+                        uint32_t lcid, bool granted);
+        int (*tx_cmd_set_sigs)(struct glink_transport_if *if_ptr,
+                        uint32_t lcid, uint32_t sigs);
+
+        /* Optional.  If NULL at xprt registration, dummies will be used */
+        int (*poll)(struct glink_transport_if *if_ptr, uint32_t lcid);
+        int (*mask_rx_irq)(struct glink_transport_if *if_ptr, uint32_t lcid,
+                        bool mask, void *pstruct);
+        int (*wait_link_down)(struct glink_transport_if *if_ptr);
+        int (*tx_cmd_tracer_pkt)(struct glink_transport_if *if_ptr,
+                        uint32_t lcid, struct glink_core_tx_pkt *pctx);
+
+        /* private pointer for core */
+        struct glink_core_xprt_ctx *glink_core_priv;
+
+        /* core pointer (set during transport registration) */
+        struct glink_core_if *glink_core_if_ptr;
+};
+
+G-Link Core Interface
+---------------------
+The G-Link Core Interface is used by the transport to call back into G-Link
+core for messages or events received from the transport. This interface has
+APIs for transport negotiation, power, channel state, and channel data.
+Like the transport interface, requests that change state always have an ACK
+to synchronize the state between the local and remote subsystems.
+
+The G-Link Core Interface is implemented as follows:
+
+struct glink_core_if {
+        /* Negotiation */
+        void (*link_up)(struct glink_transport_if *if_ptr);
+        void (*rx_cmd_version)(struct glink_transport_if *if_ptr,
+                        uint32_t version,
+                        uint32_t features);
+        void (*rx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+                        uint32_t version,
+                        uint32_t features);
+
+        /* channel management */
+        void (*rx_cmd_ch_remote_open)(struct glink_transport_if *if_ptr,
+                        uint32_t rcid, const char *name);
+        void (*rx_cmd_ch_open_ack)(struct glink_transport_if *if_ptr,
+                        uint32_t lcid);
+        void (*rx_cmd_ch_remote_close)(struct glink_transport_if *if_ptr,
+                        uint32_t rcid);
+        void (*rx_cmd_ch_close_ack)(struct glink_transport_if *if_ptr,
+                        uint32_t lcid);
+        void (*ch_state_local_trans)(struct glink_transport_if *if_ptr,
+                        uint32_t lcid,
+                        enum local_channel_state_e new_state);
+
+        /* channel data */
+        struct glink_core_rx_intent *(*rx_get_pkt_ctx)(
+                        struct glink_transport_if *if_ptr,
+                        uint32_t rcid, uint32_t liid);
+        void (*rx_put_pkt_ctx)(struct glink_transport_if *if_ptr, uint32_t rcid,
+                        struct glink_core_rx_intent *intent_ptr, bool complete);
+        void (*rx_cmd_remote_rx_intent_put)(struct glink_transport_if *if_ptr,
+                        uint32_t rcid, uint32_t riid, size_t size);
+        void (*rx_cmd_tx_done)(struct glink_transport_if *if_ptr, uint32_t rcid,
+                        uint32_t riid);
+        void (*rx_cmd_remote_rx_intent_req)(struct glink_transport_if *if_ptr,
+                        uint32_t rcid, size_t size);
+        void (*rx_cmd_rx_intent_req_ack)(struct glink_transport_if *if_ptr,
+                        uint32_t rcid, bool granted);
+        void (*rx_cmd_remote_sigs)(struct glink_transport_if *if_ptr,
+                        uint32_t rcid, uint32_t sigs);
+
+        /* channel scheduling */
+        void (*tx_resume)(struct glink_transport_if *if_ptr);
+};
+
+Power Management
+================
+
+Power management has yet to be implemented. See the "To do" section for more
+information.
+
+SMP/multi-core
+==============
+
+Locking and synchronization will be done using mutexes or spinlocks where
+appropriate.
+
+Security
+========
+
+No known security issues.
+
+Performance
+===========
+
+No known performance issues.
+
+Client Interface
+================
+
+Open
+----
+void *glink_open(const struct glink_open_config *cfg)
+
+Opens a logical channel. When this function is called, a notification is sent
+to the remote processor. Once the remote processor responds with an open
+command, the channel will be opened locally. At this point, the channel is
+considered fully open and ready for data operations. The client will be notified
+at this point with a GLINK_CONNECTED notification.
+
+When a channel is opened by calling glink_open(), a structure of configuration
+information (struct glink_open_config) is passed to it. This includes the name
+of the transport, the name of the edge, and the name of the channel, along with
+pointers to notification functions:
+ * notify_rx() - Notify the client that data has been received
+ * notify_tx_done() - Notify the client that data has been transmitted
+ * notify_state() - Notify the client that the channel's state has
+                    changed
+ * notify_rx_intent_req() - Notify the client whether their request for a
+                            receive intent was granted
+ * notify_rxv() - Receive notification for vector buffers
+ * notify_rx_sigs() - Notification callback for change in state of remote
+                      side's control signals.
+ * notify_rx_abort() - During channel close, return structures associated with
+                       receive intents to the client.
+ * notify_tx_abort() - During channel close, return structures associated with
+                       TX packets to the client.
+ * notify_rx_tracer_pkt() - Receive notification for tracer packet
+
+This structure is copied internally during the glink_open() call. The full
+definition of the structure is below:
+
+struct glink_open_config {
+        unsigned options;
+
+        const char *transport;
+        const char *edge;
+        const char *name;
+
+        struct glink_ch_ctx (*notify_rx)(void *handle, const void *priv,
+                        const void *pkt_priv, const void *ptr, size_t size);
+        struct glink_ch_ctx (*notify_tx_done)(void *handle, const void *priv,
+                        const void *pkt_priv, const void *ptr);
+        struct glink_ch_ctx (*notify_state)(void *handle, const void *priv,
+                        unsigned event);
+        bool (*notify_rx_intent_req)(void *handle, const void *priv,
+                        size_t req_size);
+        struct glink_ch_ctx (*notify_rxv)(void *handle, const void *priv,
+                        const void *pkt_priv, void *iovec, size_t size,
+                        void *(*vbuf_provider)(void *iovec, size_t offset,
+                                              size_t *size),
+                        void *(*pbuf_provider)(void *iovec, size_t offset,
+                                              size_t *size));
+        struct glink_ch_ctx (*notify_rx_sigs)(void *handle, const void *priv,
+                        uint32_t old_sigs, uint32_t new_sigs);
+        struct glink_ch_ctx (*notify_rx_abort)(void *handle, const void *priv,
+                        const void *pkt_priv);
+        struct glink_ch_ctx (*notify_tx_abort)(void *handle, const void *priv,
+                        const void *pkt_priv);
+        struct glink_ch_ctx (*notify_rx_tracer_pkt)(void *handle,
+                        const void *priv, const void *pkt_priv, const void *ptr,
+                        size_t size);
+};
+
+The following are the possible event notification values. The GLINK_CONNECTED
+notification is sent using the notify_state() callback once the channel has
+been fully opened. See the Close section for the closing state details.
+enum {
+        GLINK_CONNECTED,
+        GLINK_LOCAL_DISCONNECTED,
+        GLINK_REMOTE_DISCONNECTED,
+};
+
+glink_open() returns the following standard error codes:
+ * ERR_PTR(-EINVAL) - The glink_open_config structure which was passed to
+                      glink_open() is invalid.
+ * ERR_PTR(-ENODEV) - No transport is available.
+ * ERR_PTR(-EBUSY)  - The requested channel is not ready to be re-opened.
+
+Close
+-----
+int glink_close (void *handle)
+
+Closes the logical channel. Once the close request has been processed by the
+remote processor, the GLINK_LOCAL_DISCONNECTED notification is sent to the
+client. If the remote processor closes the channel first, then the
+GLINK_REMOTE_DISCONNECTED notification is sent to the client. After the
+GLINK_LOCAL_DISCONNECTED notification is sent, no additional activity will occur
+on the channel, regardless of whether the GLINK_REMOTE_DISCONNECTED notification
+was sent or not. At this point, it's safe for the callbacks and/or their
+resources to be destroyed. If the client wishes to re-establish communication
+on the channel, then the client will need to re-open the channel.
+
+glink_close() returns the following standard error codes:
+ * -EINVAL - The channel to be closed is NULL.
+ * -EBUSY  - The channel to be closed is already closing.
+
+If the channel to be closed is already closed, 0 is returned.
+
+Transmit Data
+-------------
+int glink_tx(void *handle, void *pkt_priv, void *data, size_t size,
+        bool req_intent)
+
+Arguments:
+handle:     The handle returned by glink_open()
+pkt_priv:   Opaque data value that will be returned to client with the
+            notify_tx_done() notification
+data:       Pointer to the data being sent
+size:       Size of the data being sent
+req_intent: Boolean indicating whether or not to request an intent from the
+            remote channel
+
+Transmit data packet for a matching RX Intent.
+
+If a client would like to transmit a packet, but a suitable RX Intent has not
+been queued, the client can request that glink_tx() block and request a
+receive intent from the remote system. The remote system can still deny the
+request at which point glink_tx() will return -EAGAIN to the client. The call
+sequence for this exchange is:
+
+ 1. Client wants to transmit a packet, and sets the req_intent flag to true in
+    the call to glink_tx() in order to request an intent if one is not already
+    available.
+ 3. Remote G-Link calls its client's notify_rx_intent_req() function and that
+    client returns a boolean indicating whether the intent will be granted or
+    not
+ 4. If the client grants the remote intent request, glink_tx() receives the
+    intent and returns success
+ 5. If the client rejects the remote intent request, glink_tx() returns an error
+
+int glink_txv(void *handle, void* pkt_priv, const void *iovec, size_t size,
+        glink_buffer_provider_fn vbuff_provider,
+        glink_buffer_proivder_fn pbuff_provider,
+        bool req_intent)
+
+Arguments:
+handle:        The handle returned by glink_open()
+pkt_priv:      Opaque data value that will be returned to client with the
+               notify_tx_done() notification
+iovec:         Pointer to the vector
+size:          Size of the data being sent
+vbuf_provider: Client-provided helper function to iterate the vector in
+               virtual address space
+pbuf_provider: Client-provided helper function to iterate the vector in
+               physical address space
+req_intent:    Boolean indicating whether or not to request an intent from the
+               remote channel
+
+glink_txv() provides a transmit function that accommodates clients using vector
+buffer implementations (DSM, SKB, etc.), and allows transports to operate on
+virtual or physical address mappings when necessary. This is done through the
+vbuf_provider() and pbuf_provider() functions, which are defined by the client
+and return a pointer to the contiguous vector data after iteration. After
+assembling the data from the vector, the call sequence is the same as
+glink_tx().
+
+Queue Receive Intent
+--------------------
+int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
+
+Queues a receive intent. A receive intent indicates that the client is ready to
+receive a packet up to the specified size. The transport interface will either
+reserve space for this packet or allocate a buffer to receive this packet such
+that the packet can be received without stalling the physical transport.
+
+The pkt_priv parameter is an opaque value provided by the client that is
+returned in the notify_rx() callback.
+
+Signal Consumption of Receive Buffer
+------------------------------------
+int glink_rx_done(void *handle, const void *ptr)
+
+This function is called by the client to signal that they are done with the
+receive buffer. The remote client is notified that it now owns the buffer again.
+
+Set Control Signal Field
+------------------------
+glink_sigs_set(void *handle, uint32 *sig_value)
+
+This function is called by the client to set a 32-bit control signal field.
+Depending on the transport, it may take appropriate actions on the set bit-mask,
+or transmit the entire 32-bit value to the remote host.
+
+Get Local Control Signal Field
+------------------------------
+glink_sigs_local_get(void *handle, uint32 *sig_value)
+
+This function is called by the client to retrieve the cached signals sent
+using glink_sigs_set().
+
+Get Remote Control Signal Field
+-------------------------------
+glink_sigs_remote_get(void *handle, uint32 *sig_value)
+
+This function is called by the client to retrieve the cached remote signals
+that were passed to notify_rx_sigs().
+
+Register a Transport
+--------------------
+int glink_core_register_transport(struct glink_transport_if *if_ptr,
+                                  struct glink_core_transport_cfg *cfg)
+
+Register a new transport with the G-Link Core. The if_ptr parameter is the
+interface to the transport, and the cfg parameter is the configuration, which
+contains the following information:
+ * The name of the transport
+ * The edge of the transport, i.e. remote processor name
+ * An array of the transport versions supported
+ * The maximum number of entries in the versions array
+ * The maximum number of channel identifiers supported
+ * The maximum number of intent identifiers supported
+
+The implementation of this transport configuration structure is below.
+
+struct glink_core_transport_cfg {
+        const char *name;
+        const char *edge;
+        const struct glink_core_version *versions;
+        size_t versions_count;
+        uint32_t max_cid;
+        uint32_t max_iid;
+};
+
+The initial state of a transport after it is registered is GLINK_XPRT_DOWN.
+The possible states of a transport are as follows:
+enum transport_state_e {
+        GLINK_XPRT_DOWN,
+        GLINK_XPRT_NEGOTIATING,
+        GLINK_XPRT_OPENED,
+        GLINK_XPRT_FAILED,
+}
+
+Unregister a Transport
+----------------------
+void glink_core_unregister_transport(struct glink_transport_if *if_ptr)
+
+Unregister/destroy an existing transport. The transport's state is changed to
+GLINK_XPRT_DOWN, and the following values are reset:
+ * the next local channel ID
+ * The local version index
+ * The remote version index
+ * The version negotiation completion flag
+ * The list of channels on the transport (list is deleted)
+
+Driver parameters
+=================
+
+The G-Link core and G-Link Loopback Server modules both have a module parameter
+called "debug_mask". The possible values are detailed in the "Config options"
+section.
+
+Config options
+==============
+
+G-Link supports several logging configurations. The following options are
+available for the core and loopback client. They can be bitwise OR'ed together
+to have multiple options at once.
+
+QCOM_GLINK_INFO - The default option. Turn on only INFO messages
+QCOM_GLINK_DEBUG - Turn on debug log messages - much more verbose logging to
+                   aid in debugging.
+QCOM_GLINK_PERF - Performance logging. This removes all other logging except
+                  for logging messages that are created through a special
+                  set of macros. These logs can be post-processed for
+                  performance metrics.
+
+The values of these options are as follows:
+enum {
+        QCOM_GLINK_INFO = 1U << 0,
+        QCOM_GLINK_DEBUG = 1U << 1,
+        QCOM_GLINK_PERF = 1U << 2,
+};
+
+Dependencies
+============
+
+IPC Logging is a dependency of the G-Link core. The Transport Plug-ins will have
+their own dependencies. The SMEM Native Transport depends on SMEM and the
+interrupt subsystem.
+
+DebugFS
+=======
+
+Several DebugFS nodes are exported under the glink directory for testing and
+debugging. The directory structure below allows listing information by
+subsystem, channel, and transport.
+
+glink Directory
+---------------
+`-- debugfs
+    `-- glink
+        |-- channel
+        |   |-- channels <- lists all of the channels in the system, their
+        |   |               state, the transport they are assigned to, etc.
+        |   |-- SUBSYSTEM_NAME <- directory (such as "mpss")
+        |   |   `-- CHANNEL_NAME <- one directory per channel
+        |   |       |-- intents <- list of all open intents, their size, and
+        |   |       |              their ID
+        |   |       `-- stats <- statistics for the channel (contents TBD)
+        `-- xprt
+            |-- xprts <- lists all of the transports in the system and basic
+            |            transport-specific state
+            |-- XPRT_NAME <-- directory (such as "smem")
+                `-- XPRT_INFO <-- transport specific files
+
+User space utilities
+====================
+
+A user space driver is provided which can export a character device for a single
+channel based upon Device Tree configuration. The full DT schema is detailed in
+Documentation/devicetree/bindings/arm/msm/glinkpkt.txt. The user space driver
+implements the standard file operations of open, release, read, write, poll, and
+mmap. An ioctl is defined to queue RX intents.
+
+The file operations map to the G-Link Client API as follows:
+open()    -> glink_open() (Open a channel. Exported channels configured
+                           through DT)
+write()   -> glink_tx() (Transmit data)
+read()    -> Receive data and send RX done notification (glink_rx_done())
+release() -> glink_close() (Close a channel)
+ioctl()   -> glink_queue_rx_intent()
+
+Other operations are:
+poll()    -> Poll waiting for the channel to become available
+mmap()    -> Prevent having to do a copy between kernel space and user space
+             for clients that need that performance.
+
+A typical transmit and receive call flow is as follows:
+1. G-Link user space driver opens the channel using open(), which returns a
+   file descriptor for the channel
+2. An ioctl is sent to queue an RX intent on the channel
+3. Data is transmitted on the channel using write()
+4. The data is received using read(). read() also sends an RX done
+   notification.
+
+Version/Feature Negotiation
+===========================
+
+To enable upgrading transports, G-Link supports a version number and feature
+flags for each transport. The combination of the version number and feature
+flags enable:
+        1. G-Link software updates to be rolled out to each processor
+           separately.
+        2. Individual features to be enabled or disabled on an edge-by-edge
+           basis.
+
+Endpoints negotiate both the version and the feature flags when the transport
+is opened; they cannot be changed after negotiation has been completed.
+
+The version number represents any change in G-Link or the transport that
+breaks compatibility between processors. Examples would be a change in the
+shared data structures or changes to fundamental behavior in either the
+transport or in G-Link Core. Each full implementation of G-Link must support a
+minimum of the current version, the previous version, and the base negotiation
+version called v0. For resource-constrained devices, this can be relaxed to
+only support the latest version of the protocol and the v0 version.
+
+The feature flags represent any changes in G-Link that are optional and
+backwards-compatible. Feature flags can be version-specific, but to limit code
+maintenance and documentation overhead, feature flags should not be re-used
+unless the limit of 32 feature flags has been reached.
+
+Negotiation Algorithm
+---------------------
+After a transport is registered with G-Link core, it should be configured with
+the v0 transport settings. Once communication can be done without losing
+messages, the link_up() call in the G-Link core should be made to start the
+negotiation process. Both the local and remote sides will follow the same
+negotiation state machines.
+
+Since both sides follow the same sequence and both sides start once the link is
+up, it is possible that both sides may start the negotiation sequence at the
+same time resulting in a perceived race condition. However, both negotiation
+sequences are independent and the transport is not considered opened until both
+negotiation sequences are complete, so this is not a true race condition and
+both sides will converge to the same version and feature set even if they
+start with different versions and feature sets. Since this sequence is not
+performance-critical, the extra complexity in the negotiation algorithm to
+short-circuit the process is not deemed necessary.
+
+Local-Initiated Negotiation Sequence
+------------------------------------
+The following local negotiation sequence is initiated and followed by each
+side. The processor that is running this algorithm will be matched by the
+remote processor following the Remote-Initiated Negotiation Sequence.
+
+ 1. Set current version and feature variables to the maximum supported version
+    and feature set
+ 2. Send Version Command (glink_transport_if::tx_cmd_version()) with local
+    version and feature set
+ 3. If version is 0, then negotiation has failed and the transport should be
+    marked as having failed negotiation and the negotiation sequence
+    terminated.
+ 4. When Version ACK is received (glink_core_if::rx_cmd_version_ack()):
+     a. Compare ACK version to the sent version and:
+         i. If equal, we are done with version negotiation
+        ii. Else set current version to the lower of:
+             1. Remote version number
+             2. Highest supported local version
+     b. Compare ACK features to the sent features and:
+         i. If equal, we are done with the negotiation sequence
+        ii. Else, call glink_core_version:: negotiate_features() for the
+            current version to set the features to either the bitwise AND of
+            the ACK features and the locally supported features or a lesser
+            feature set for cases where only certain combinations of features
+            are valid.
+     c. Go back to step 2 to send the updated version/feature set
+
+Remote-Initiated Negotiation Sequence
+-------------------------------------
+The following remote negotiation sequence is followed by each side based upon
+receiving a Version Command.
+
+ 1. Receive Version Command (glink_core_if::rx_cmd_version())
+ 2. Compare received version with the locally supported version and:
+     a. If equal, set ACK version to the received version
+     b. Else, set ACK version to the lower of:
+         i. Remote version number
+        ii. Highest supported local version
+       iii. Version 0 if no supported version less than or equal to
+            the remote version number can be found.
+ 3. Compare received features with the locally supported features and:
+     a. If equal, set ACK features to the received features
+     b. Else, call glink_core_version:: negotiate_features() for the current
+        version to set the features to either the bitwise AND of the ACK
+        features and the locally supported features or a lesser feature set
+        for cases where only certain combinations of features are valid.
+ 4. Send the Version ACK Command (glink_transport_if::tx_cmd_version_ack()).
+
+Packets
+=======
+
+Packets are scheduled in a round-robin fashion from all active channels. Large
+packets can be fragmented by the transport to ensure fairness between channels
+and ensure latency.
+
+Channel Migration
+=================
+
+The G-Link core has the capability of selecting the best transport available on
+an edge-by-edge basis. The transport is selected based upon a pre-defined
+transport priority and from optional transport selection information passed in
+by the client in the glink_open_config structure.
+
+Subsystem Restart (SSR)
+=======================
+
+In order to properly clean up channel state and recover buffer ownership
+consistently across different physical transports, G-Link requires an
+additional SSR notification system on top of the existing SSR framework. The
+notification system is a star topology with the application processor as the
+master. When a subsystem is restarted, all other subsystems are notified by the
+application processor and must respond after cleaning up all affected channels
+before the SSR event is allowed to continue.
+
+The solution has four components:
+ 1. Target-specific configuration for each subsystem, which consists of a list
+    of which subsystems should be notified in the event of SSR, specified in
+    Device Tree
+ 2. SSR module that uses the G-Link Client API to isolate SSR functionality,
+    and handles calls to the SSR Framework, Device Tree parsing, etc.
+ 3. SSR notification messages between the application processor and
+    other subsystems, which will be exchanged using G-Link.
+ 4. SSR API:
+     a. glink_ssr(const char *subsystem_name) - G-Link Client API
+     b. ssr(struct glink_transport_if *if_ptr) - Transport Interface
+
+1. Target-specific configuration using Device Tree
+--------------------------------------------------
+The target-specific configuration provides the G-Link SSR module with a list
+of subsystems that should be notified in the event of SSR. This is necessary
+to simplify handling of cases where there are multiple SoCs in one device -
+there is no need to notify a subsystem on a second SoC of a restart in the
+first SoC. The configuration also provides a mapping of the subsystem's name
+in the SSR framework to its name as a G-Link edge, and allows the specification
+of a transport for each notification. The figures below provide an example:
+
++----+      +------+        +-------------------+
+|SSR |----->|G-Link|------->|Subsystem A        |
+|Name|      |Name  |        |Subsystem B: xprt x|
++----+      +------+        |Subsystem C        |
+                            +-------------------+
+
++-------+      +------+        +--------------+
+|"modem"|----->|"mpss"|------->|"wcnss"       |
++-------+      +------+        |"lpass": "smd"|
+                               |"dsps"        |
+                               +--------------+
+
+The above configuration tells the G-Link SSR module to notify all subsystems
+on G-Link edges "wcnss", "lpass", and "dsps" that the subsystem on edge "mpss"
+has restarted, and to send the notifications to the "lpass" edge on the "smd"
+transport.
+
+2. G-Link SSR Module (glink_ssr)
+--------------------------------
+This module is a G-Link client which handles notifications from the SSR
+framework on the application processor and triggers local clean-up in response
+to these notifications by calling glink_ssr(const char *subsystem). This
+module also sends notifications to any subsystems that need to be notified of
+the SSR event, and ensures that they respond within the standard timeout (500
+ms). If the subsystem fails to respond, it is restarted.
+
+3. G-Link SSR Messages
+----------------------
+When an SSR event occurs, the application processor notifies all necessary
+subsystems by sending a "do_cleanup" message. After the subsystem performs the
+necessary clean-up, it sends back a "cleanup_done" message. If the
+"cleanup_done" message for a given subsystem is not received within the
+standard timeout (500 ms), the subsystem is restarted.
+
+SSR "do_cleanup" Message
+------------------------
++-----------------+-----------------------+------------------------------+
+| Element         | Type                  | Description                  |
++=================+=======================+==============================+
+| version         | uint32_t              | G-Link SSR Protocol Version  |
++-----------------+-----------------------+------------------------------+
+| command         | uint32_t              | do_cleanup message (0)       |
++-----------------+-----------------------+------------------------------+
+| sequence_number | uint32_t              | Sequence number              |
++-----------------+-----------------------+------------------------------+
+| name_len        | uint32_t              | Name length of the subsystem |
+|                 |                       | being restarted              |
++-----------------+-----------------------+------------------------------+
+| name            | char[GLINK_NAME_SIZE] | NULL-terminated name of the  |
+|                 |                       | subsystem being restarted    |
+|                 |                       | (GLINK_NAME_SIZE == 32)      |
++-----------------+-----------------------+------------------------------+
+
+SSR "cleanup_done" Message
+--------------------------
++-----------------+-----------------------+------------------------------+
+| Element         | Type                  | Description                  |
++=================+=======================+==============================+
+| version         | uint32_t              | G-Link SSR Protocol Version  |
++-----------------+-----------------------+------------------------------+
+| response        | uint32_t              | cleanup_done message (1)     |
++-----------------+-----------------------+------------------------------+
+| sequence_number | uint32_t              | Sequence number              |
++-----------------+-----------------------+------------------------------+
+
+G-Link SSR Protocol Sequence Diagram
+------------------------------------
+                   +------+       +------+
++---------+        |G-Link|       |G-Link|                         +----------+
+|SSR      |        |SSR   |       |Client|       +---------+       |Remote    |
+|Framework|        |Module|       |API   |       |Transport|       |Processors|
++---------+        +------+       +------+       +---------+       +----------+
+     |                |              |               |                  |
+     | SSR            |              |               |                  |
+     | Notification   |              |               |                  |
+     +--------------->|              |               |                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |                | do_cleanup   |               |                  |
+     |                +------------------------------------------------>|
+     |                |              |               |                  |
+     |                | glink_ssr(subsystem)         |                  |
+     |                +------------->|               |                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |                |              | ssr(if_ptr)   |                  |
+     |                |              +-------------->|                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |                |              |               |     cleanup_done |
+     |                |<------------------------------------------------+
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |  ssr(subsystem)|              |               |                  |
+     |<---------------+              |               |                  |
+     |                |              |               |                  |
+     |    +-----------+---------+    |               |                  |
+     |    |If no cleanup_done   |    |               |                  |
+     |    |response is received,|    |               |                  |
+     |    |restart the subsystem|    |               |                  |
+     |    +-----------+---------+    |               |                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
+     |                |              |               |                  |
++---------+        +------+       +------+       +---------+       +----------+
+|SSR      |        |G-Link|       |G-Link|       |Transport|       |Remote    |
+|Framework|        |SSR   |       |Client|       +---------+       |Processors|
++---------+        |Module|       |API   |                         +----------+
+                   +------+       +------+
+
+4. SSR API
+----------
+glink_ssr(const char *subsystem_name)
+-------------------------------------
+Called by the G-Link SSR Module, this function calls into the transport using
+ssr(struct glink_transport_if *if_ptr) to allow the transport to perform any
+necessary clean-up, and simulates receiving a remote close from the restarting
+subsystem for all channels on the affected edge.
+
+ssr(struct glink_transport_if *if_ptr)
+--------------------------------------
+The ssr() function is part of the Transport Interface, and as mentioned above is
+used to perform any necessary clean-up on the transport.
+
+Tracer Packet Framework
+=======================
+
+A tracer packet is a special type of packet that can be used to trace the timing
+of events. This helps profile the latency experienced by a packet, and provides
+granular information regarding the individual latencies that make up the overall
+latency. The information obtained using the tracer packet can be used to
+configure the Power Management Quality of Service (PM QoS) in the system in
+order to achieve a client's desired packet latency. The tracer packet moves
+through the system along with other normal traffic without any impact on the
+normal traffic.
+
+When a transport is registered with the local G-Link core, it performs a
+transport-specific version and feature negotiation with the remote G-Link core.
+Based on this negotiation, the transport reports its capability of supporting
+tracer packets to the local G-Link core.
+
+Once the transport has successfully completed the negotiation, the clients open
+the G-Link channel over the concerned transport. After the channel is open, the
+clients can exchange tracer packets over G-Link, in a way similar to normal
+traffic.
+
+When a tracer packet is exchanged over a G-Link channel, the G-Link core and the
+transport log events related to packet exchange and their time.
+
+1. Tracer Packet Format
+-----------------------
+The tracer packet contains a header and a payload. The header contains
+identification and configuration information associated with a tracer packet.
+The payload contains a series of event logs. The below diagram shows the layout
+of the tracer packet:
+
+Tracer Packet Header Layout
+---------------------------
+ 31                           16 15 14 13  12   11   4 3       0
++-------------------------------+-----+---+----+------+---------+
+|      Packet Length (words)    | CCL | Q | ID | Res. | Version |
++-------------------------------+-------------------------------+
+|       Client Event Cfg.       |     Packet Offset (words)     |
+|           Bit Mask            |                               |
++-------------------------------+-------------------------------+
+|                  G-Link Event Config Bit Mask                 |
++---------------------------------------------------------------+
+|                   Base Timestamp (MS 32 bits)                 |
++---------------------------------------------------------------+
+|                   Base Timestamp (LS 32 bits)                 |
++---------------------------------------------------------------+
+|                         Client Cookie                         |
++---------------------------------------------------------------+
+
+Tracer Packet Payload Layout
+----------------------------
+ 31                           16 15                            0
++-------------------------------+-------------------------------+
+|            Reserved           |          Event 1 ID           |
++-------------------------------+-------------------------------+
+|                   Event 1 Timestamp (LS 32 bits)              |
++---------------------------------------------------------------+
+                                .
+                                .
+                                .
++-------------------------------+-------------------------------+
+|            Reserved           |          Event N ID           |
++-------------------------------+-------------------------------+
+|                   Event N Timestamp (LS 32 bits)              |
++---------------------------------------------------------------+
+
+Tracer Packet Header Fields
+---------------------------
+Version - This field contains the tracer packet version. The current version
+          of tracer packet supported by G-Link is 1. If a version of the tracer
+          packet is not supported by G-Link or its transport abstraction layer,
+          the tracer packet is still exchanged, but no events are logged.
+
+Reserved - The reserved bit fields are set to 0 and can be used for future
+           extension of tracer packet functionality.
+
+ID - The ID bit field indicates the presence or absence of the Source Processor
+     ID, Destination Processor ID and Transport ID fields in the tracer
+     packet. Currently this field is set to 0 and the concerned IDs are not
+     defined.
+
+CoreSight ("Q" in the diagram above) - This bit field is used to indicate the
+                                       location of the log events. If this bit
+                                       field is set, the log events are logged
+                                       into CoreSight, otherwise the log events
+                                       are logged into the packet itself. If the
+                                       log events are logged into the packet,
+                                       then the number of events logged into the
+                                       packet depends on the size of the packet.
+
+CCL - The tracer packet framework allows clients to differentiate multiple
+      tracer packets through a client-specified cookie. The Client Cookie Length
+      (CCL) bit field indicates the length of that cookie in units of words.
+
+Packet Length - These 16 bits indicate the length of the tracer packet in units
+                of words.
+
+Packet Offset - This field is used when events are logged into the packet. This
+                16-bit field indicates the offset into the packet, in units of
+                words, to log an event. Once an event is logged, this field is
+                updated with the appropriate offset to log future events.
+
+Client Configuration Bit Mask - This bit-mask is used to enable/disable the
+                                G-Link client-specific events. The procedure to
+                                enable/disable client events is dependent upon
+                                the client's implementation and is not included
+                                in this document.
+
+G-Link Configuration Bit Mask - This bit-mask is used to enable/disable the
+                                G-Link-specific events. When a bit is set, the
+                                concerned event logging is enabled.
+
+Base Timestamp - The base timestamp contains the starting time of the tracer
+                 packet exchange. The timestamp logged along with the event is
+                 used as an offset from this base timestamp. This optimization
+                 helps in reducing the log size of an event.
+
+Client Cookie - The tracer packet framework allows clients to differentiate
+                multiple tracer packets through a client-specified cookie.
+
+Tracer Packet Payload Fields
+----------------------------
+Event ID - The Event ID field uniquely identifies the G-Link and client-specific
+           tracer packet events. This field is present only when the events are
+           logged into the packet. The G-Link and client event IDs are assigned
+           a unique range. Refer to the table below for more information
+           regarding the event ID definition.
+
+Reserved - The reserved field is set to 0 and can be used for future extension
+           of tracer packet functionality.
+
+Event Timestamp - The Event Timestamp field contains the time at which the event
+                  is logged. This field is used as an offset from the Base
+                  Timestamp field in the header to calculate the actual event
+                  timestamp. This field is present only when the events are
+                  logged into the packet.
+
+2. Tracer Packet Events
+-----------------------
+Each event has a uniquely defined ID. Since G-Link clients can use the tracer
+packet framework, G-Link events and G-Link client events are defined in mutually
+exclusive ranges. Since client events are client-context specific, the event
+IDs can be reused among the clients. The ranges are detailed in the table below:
++--------------------------+-----------------------+
+| Event Type               | Range                 |
++==========================+=======================+
+| G-Link                   | 1-255                 |
++--------------------------+-----------------------+
+| Client                   | 255 and above         |
++--------------------------+-----------------------+
+
+The G-Link specific events and their associated IDs are defined in the below
+table:
++--------------------------+-----------------------+
+| G-Link Event             | ID                    |
++==========================+=======================+
+| GLINK_CORE_TX            | 1                     |
++--------------------------+-----------------------+
+| GLINK_QUEUE_TO_SCHEDULER | 2                     |
++--------------------------+-----------------------+
+| GLINK_SCHEDULER_TX       | 3                     |
++--------------------------+-----------------------+
+| GLINK_XPRT_TX            | 4                     |
++--------------------------+-----------------------+
+| GLINK_XPRT_RX            | 5                     |
++--------------------------+-----------------------+
+| GLINK_CORE_RX            | 6                     |
++--------------------------+-----------------------+
+
+3. Tracer Packet API
+--------------------
+tracer_pkt_init(void *data, size_t data_len, uint16_t client_event_cfg,
+            uint32_t glink_event_cfg, void *pkt_priv, size_t pkt_priv_len)
+--------------------------------------------------------------------------
+Initialize a buffer with the tracer packet header. The tracer packet header
+includes the data passed in the parameters.
+
+tracer_pkt_set_event_cfg(void *data, uint16_t client_event_cfg,
+            uint32_t glink_event_cfg)
+---------------------------------------------------------------
+Initialize a buffer with the event configuration mask passed in the parameters.
+
+tracer_pkt_log_event(void *data, uint32_t event_id)
+---------------------------------------------------
+Log an event specific to the tracer packet. The event is logged either into
+the tracer packet itself or a different tracing mechanism as configured.
+
+tracer_pkt_calc_hex_dump_size(void *data, size_t data_len)
+----------------------------------------------------------
+Calculate the length of the buffer required to hold the hex dump of the tracer
+packet.
+
+tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data, size_t data_len)
+---------------------------------------------------------------------------
+Dump the contents of the tracer packet into a buffer in a specific hexadecimal
+format. The hex dump buffer can then be dumped through debugfs.
+
+Known issues
+============
+
+No known issues.
+
+To do
+=====
+
+Power Management
+----------------
+An internal power voting API will be defined to bring the transport out of power
+collapse for SMUX and BAM DMUX-type systems. In addition, power for
+request/response type systems can be optimized to prevent powering down
+unnecessarily after sending a request only to power up immediately to process
+the response.
+
+Round-Robin Scheduling
+----------------------
+Add deficit round-robin schedule to ensure fairness between channels that have
+a large number of small packets and channels that are sending the maximum
+MTU-sized packets.
+
+Transport Filter Internal API
+-----------------------------
+An internal transport filter API will be defined. This can be plugged into a
+filter chain at the transport level to easily add data coding, encryption,
+integrity hashes, etc.
diff --git a/Documentation/arm/msm/glink_pkt.txt b/Documentation/arm/msm/glink_pkt.txt
new file mode 100644
index 0000000..c6c7740
--- /dev/null
+++ b/Documentation/arm/msm/glink_pkt.txt
@@ -0,0 +1,196 @@
+Introduction
+============
+
+Glink packet drivers are companion adaptation driver which use the kernel APIs
+to expose the Glink core logical channels as charecter devices to the
+user-space clients.
+
+The Glink core APIs are detailed in Documentation/arm/msm/glink.txt.
+
+Software description
+====================
+
+Glink packet drivers supports the Glink core APIs to user-space client through
+standard file operations like open, read, write, ioctl, poll and release etc.
+The standard Linux permissions are used for the device node and SELinux does
+further security checks.
+
+
+    Device node [0..n]
+           |
+           |
+  -------------------
+ |  VFS Framework    |
+  -------------------
+    |             |
+    |             |
+  -------     -------
+ | CDEV  |   | CDEV  |
+ | Dev 0 |...| Dev n |
+ ----------------------
+|  Glink packet driver |
+ ----------------------
+           |
+           |
+    -----------------
+   |                 |
+   |   G-Link core   |
+   |                 |
+    -----------------
+           |
+           |
+    To Remote System
+
+
+The file operations map to the G-link client API as follows:
+
+Open():
+----------
+The Open system call is mapped to glink_open() which opens a channel. The
+expected channel configuration has to done through DT files. The full DT schema
+is detailed in Documentation/devicetree/bindings/arm/msm/glinkpkt.txt.
+
+Open on the glink packet character device is a blocking call which blocks until
+the channel is fully open by both local processor and remote processor.
+Clients can configure the blocking time through a device configurable parameter
+defined per device.
+
+The timeout value is specified in seconds with a default timeout of 1 second.
+A negative value indicates an infinite wait.
+
+Example:
+# get open timeout value
+	cat /sys/class/glinkpkt/device_name/open_timeout
+# set to 20 seconds value
+	echo 20 > /sys/class/glinkpkt/device_name/open_timeout
+
+If the channel is not opened by remote processor or any other problem which
+fails the channel to be ready will result in timeout and -ETIMEOUT will return
+to client. Open on success returns the valid file descriptor to client and on
+fail case standard Linux error codes.
+
+The same device can be opened by multiple clients but passing the same file
+descriptor from multiple threads may lead unexpected results.
+
+Write():
+----------
+The Write system call is mapped to glink_tx() which transmits the data over the
+glink channel.
+
+Read():
+----------
+The Read system call consumes any pending data on the channel. Glink signals
+incoming data through the glink_notify_rx() call back and the glink packet
+driver queues the data internally and provides to client through read system
+call. Once the Read is completed, the glink packet driver calls glink_rx_done()
+API to notify the completion of receiving operation to Glink core.
+
+                       +
+       User-Space      |   Kernel-Space
+                       |
+                       |
++---------+            |                +----------+         +------------+
+| Local   |            |                | GlinkPKT |         |            |
+| Client  |            |                | Driver   |         | Glink core |
+|         |            |                |          |         |            |
++---------+            |                +----------+         +------------+
+                       |
+    +                  |                     +                     +
+    |                  |                     |                     |
+    |    open()        |  glink_pkt_open()   |    glink_open()     |
+    | +--------------> | +-----------------> | +-----------------> |
+    |                  |                     |                     |
+    |   File Handle[fd]|    Valid Fd         |    Handle           |
+    | <--------------+ | <-----------------+ | <-----------------+ |
+    |                  |                     |                     |
+    |     Ioctl()      |                     |                     |
+    |  QUEUE_RX_INTENT |  glink_pkt_ioctl()  | glink_queue_rx_intent()
+    | +--------------> | +-----------------> | +-----------------> |
+    |                  |                     |                     |
+    |                  |                     |                     |
+    | <----------------------------------------------------------+ |
+    |                  |                     |                     |
+    |     Read()       |  glink_pkt_read()   |                     |
+    | +--------------> | +-----------------> | +---+               |
+    |                  |                     |     |               |
+    |                  |                 Wait for data             |
+    |                  |                     | <---+               |
+    |                  |                     |  glink_notify_rx()  |
+    |                  |                     | <-----------------+ |
+    |                  |   Wake-up read()    |                     |
+    |                  |   copy_to_user()    |                     |
+    |   read() return  | <-----------------+ |                     |
+    | <--------------+ |                     |                     |
+    +                  |                     +                     +
+                       |
+                       |
+                       +
+
+Clients can also poll on device node for POLLIN mask to get notification for
+any incoming data. Clients have to call the GLINK_PKT_IOCTL_QUEUE_RX_INTENT
+ioctl to queue the RX buffer to glink core in advance.
+
+Release():
+----------
+The Release system call is mapped to glink_close() to close a channel and free
+the resources.
+
+Poll():
+----------
+The Poll system call provides waiting operation like wait for incoming data on
+POLLIN mask and to get the TIOCM signal notification on POLLPRI mask. Clients
+can wait on poll for POLLPRI mask to get any notification regarding TICOM
+signals. In SSR case Poll call will return with POLLHUP mask and in this case
+client has to close and re-open the port.
+
+* POLLPRI - TIOCM bits changed
+* POLLIN - RX data available
+* POLLHUP - link is down due to either remote side closing or an SSR
+
+Ioctl():
+----------
+Multiple ioctls are supported to get the TICOM signal status and to queue the
+Rx intent with Glink core. Supported ioctls are TIOCMSET, TIOCMGET, TIOCMBIS,
+TIOCMBIC and GLINK_PKT_IOCTL_QUEUE_RX_INTENT.
+
+The GLINK_PKT_IOCTL_QUEUE_RX_INTENT ioctl is mapped to glink_queue_rx_intent()
+API which queues an RX intent with Glink core.
+
+Signals:
+==========
+Glink protocol provoide 32-bit control signal field to pass through for the
+clients-specific signaling where as Glink packet driver client which are from
+user space can use the signal filed as mentioned below.
+
+* 31:28 - Reserved for SMD RS-232 signals
+* 27:16 - Pass through for client usage
+* 15:0 - TICOM bits
+
+SSR operation:
+==============
+On remote subsystem restart all open channels on that edge will be closed and
+local clients have to close and re-open the channel to re-start the
+communication. All blocking calls such as open, read and write will be returned
+with -ENETRESET and the poll call will be return with the POLLHUP error codes.
+
+Files:
+==========
+Documentation/devicetree/bindings/arm/msm/glinkpkt.txt
+drivers/soc/qcom/msm_glink_pkt.c
+
+Wakelock:
+==========
+By default, GLINK PKT will acquire a wakelock for 2 seconds. To optimize this
+behavior, use the poll() function:
+	1. Client calls poll() which blocks until data is available to read
+	2. Data comes in, GLINK PKT grabs a wakelock and poll()is unblocked
+	3. Client grabs wakelock to prevent system from suspending
+	4. Client calls GLINK PKT read() to read the data
+	5. GLINK PKT releases its wakelock
+	6. Client Processes the data
+	7. Client releases the wakelock
+
+Logging:
+==========
+	cat /d/ipc_logging/glink_pkt/log_cont
+
diff --git a/Documentation/arm/msm/msm_ipc_logging.txt b/Documentation/arm/msm/msm_ipc_logging.txt
new file mode 100644
index 0000000..9d42200
--- /dev/null
+++ b/Documentation/arm/msm/msm_ipc_logging.txt
@@ -0,0 +1,361 @@
+Introduction
+============
+
+This module will be used to log the events by any module/driver which
+enables Inter Processor Communication (IPC). Some of the IPC drivers such
+as Message Routers, Multiplexers etc. which act as a passive pipe need
+some mechanism to log their events. Since all such IPC drivers handle a
+large amount of traffic/events, using kernel logs renders kernel logs
+unusable by other drivers and also degrades the performance of IPC
+drivers. This new module will help in logging such high frequency IPC
+driver events while keeping the standard kernel logging mechanism
+intact.
+
+Hardware description
+====================
+
+This module does not drive any hardware resource and will only use the
+kernel memory-space to log the events.
+
+Software description
+====================
+
+Design Goals
+------------
+This module is designed to
+	* support logging for drivers handling large amount of
+	  traffic/events
+	* define & differentiate events/logs from different drivers
+	* support both id-based and stream-based logging
+	* support extracting the logs from both live target & memory dump
+
+IPC Log Context
+----------------
+
+This module will support logging by multiple drivers. To differentiate
+between the multiple drivers that are using this logging mechanism, each
+driver will be assigned a unique context by this module. Associated with
+each context is the logging space, dynamically allocated from the kernel
+memory-space, specific to that context so that the events logged using that
+context will not interfere with other contexts.
+
+Event Logging
+--------------
+
+Every event will be logged as a <Type: Size: Value> combination. Type
+field identifies the type of the event that is logged. Size field represents
+the size of the log information. Value field represents the actual
+information being logged. This approach will support both id-based logging
+and stream-based logging. This approach will also support logging sub-events
+of an event. This module will provide helper routines to encode/decode the
+logs to/from this format.
+
+Encode Context
+---------------
+
+Encode context is a temporary storage space that will be used by the client
+drivers to log the events in <Type: Size: Value> format. The client drivers
+will perform an encode start operation to initialize the encode context
+data structure. Then the client drivers will log their events into the
+encode context. Upon completion of event logging, the client drivers will
+perform an encode end operation to finalize the encode context data
+structure to be logged. Then this updated encode context data structure
+will be written into the client driver's IPC Log Context. The maximum
+event log size will be defined as 256 bytes.
+
+Log Space
+----------
+
+Each context (Figure 1) has an associated log space, which is dynamically
+allocated from the kernel memory-space. The log space is organized as a list of
+1 or more kernel memory pages. Each page (Figure 2) contains header information
+which is used to differentiate the log kernel page from the other kernel pages.
+
+
+      0 ---------------------------------
+        |     magic_no = 0x25874452     |
+        ---------------------------------
+        |    nmagic_no = 0x52784425     |
+        ---------------------------------
+        |            version            |
+        ---------------------------------
+        |          user_version         |
+        ---------------------------------
+        |            log_id             |
+        ---------------------------------
+        |          header_size          |
+        ---------------------------------
+        |                               |
+        |                               |
+        |       name [20 chars]         |
+        |                               |
+        |                               |
+        ---------------------------------
+        |    run-time data structures   |
+        ---------------------------------
+         Figure 1 - Log Context Structure
+
+
+        31                             0
+      0 ---------------------------------
+        |     magic_no = 0x52784425     |
+        ---------------------------------
+        |    nmagic_no = 0xAD87BBDA     |
+        ---------------------------------
+        |1|         page_num            |
+        ---------------------------------
+        |  read_offset  | write_offset  |
+        ---------------------------------
+        |            log_id             |
+        ---------------------------------
+        |     start_time low word       |
+        |     start_time high word      |
+        ---------------------------------
+        |       end_time low word       |
+        |       end_time high word      |
+        ---------------------------------
+        |         context offset        |
+        ---------------------------------
+        |    run-time data structures   |
+        .            . . .              .
+        ---------------------------------
+        |                               |
+        |           Log Data            |
+        .              .                .
+        .              .                .
+        |                               |
+        --------------------------------- PAGE_SIZE - 1
+            Figure 2 - Log Page Structure
+
+In addition to extracting logs at runtime through DebugFS, IPC Logging has been
+designed to allow extraction of logs from a memory dump.  The magic numbers,
+timestamps, and context offset are all added to support the memory-dump
+extraction use case.
+
+Design
+======
+
+Alternate solutions discussed include using kernel & SMEM logs which are
+limited in size and hence using them render them unusable by other drivers.
+Also kernel logging into serial console is slowing down the performance of
+the drivers by multiple times and sometimes lead to APPs watchdog bite.
+
+Power Management
+================
+
+Not-Applicable
+
+SMP/multi-core
+==============
+
+This module uses spinlocks & mutexes to handle multi-core safety.
+
+Security
+========
+
+Not-Applicable
+
+Performance
+===========
+
+This logging mechanism, based on experimental data, is not expected to
+cause a significant performance degradation. Under worst case, it can
+cause 1 - 2 percent degradation in the throughput of the IPC Drivers.
+
+Interface
+=========
+
+Exported Data Structures
+------------------------
+struct encode_context {
+	struct tsv_header hdr;
+	char buff[MAX_MSG_SIZE];
+	int offset;
+};
+
+struct decode_context {
+	int output_format;
+	char *buff;
+	int size;
+};
+
+Kernel-Space Interface APIs
+----------------------------
+/*
+ * ipc_log_context_create: Create a ipc log context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name     : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns reference to context on success, NULL on failure
+ */
+void * ipc_log_context_create(int max_num_pages,
+			      const char *mod_name);
+
+/*
+ * msg_encode_start: Start encoding a log message
+ *
+ * @ectxt: Temporary storage to hold the encoded message
+ * @type:  Root event type defined by the module which is logging
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void msg_encode_end(struct encode_context *ectxt);
+
+/*
+ * tsv_timestamp_write: Writes the current timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ *
+ * Returns 0 on success, -ve error code on failure
+ */
+int tsv_timestamp_write(struct encode_context *ectxt);
+
+/*
+ * tsv_pointer_write: Writes a data pointer
+ *
+ * @ectxt:   Context initialized by calling msg_encode_start()
+ * @pointer: Pointer value to write
+ *
+ * Returns 0 on success, -ve error code on failure
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n:     Integer to write
+ *
+ * Returns 0 on success, -ve error code on failure
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n);
+
+/*
+ * tsv_byte_array_write: Writes a byte array
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @data:  Location of data
+ * @data_size: Size of data to be written
+ *
+ * Returns 0 on success, -ve error code on failure
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+			  void *data, int data_size);
+
+/*
+ * ipc_log_write: Write the encoded message into the log space
+ *
+ * @ctxt: IPC log context where the message has to be logged into
+ * @ectxt: Temporary storage containing the encoded message
+ */
+void ipc_log_write(unsigned long ctxt, struct encode_context *ectxt);
+
+/*
+ * ipc_log_string: Helper function to log a string
+ *
+ * @dlctxt: IPC Log Context created using ipc_log_context_create()
+ * @fmt:    Data specified using format specifiers
+ */
+int ipc_log_string(unsigned long dlctxt, const char *fmt, ...);
+
+/*
+ * tsv_timestamp_read: Reads a timestamp
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_pointer_read: Reads a data pointer
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+		      struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_int32_read(struct encode_context *ectxt,
+		    struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_byte_array_read: Reads a byte array/string
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+			 struct decode_context *dctxt, const char *format);
+
+/*
+ * add_deserialization_func: Register a deserialization function to
+ *                           to unpack the subevents of a main event
+ *
+ * @ctxt: IPC log context to which the deserialization function has
+ *        to be registered
+ * @type: Main/Root event, defined by the module which is logging, to
+ *        which this deserialization function has to be registered.
+ * @dfune: Deserialization function to be registered
+ *
+ * return 0 on success, -ve value on FAILURE
+ */
+int add_deserialization_func(unsigned long ctxt, int type,
+			void (*dfunc)(struct encode_context *,
+				      struct decode_context *));
+
+Driver parameters
+=================
+
+Not-Applicable
+
+Config options
+==============
+
+Not-Applicable
+
+Dependencies
+============
+
+This module will partially depend on CONFIG_DEBUGFS, in order to dump the
+logs through debugfs. If CONFIG_DEBUGFS is disabled, the above mentioned
+helper functions will perform no operation and return appropriate error
+code if the return value is non void. Under such circumstances the logs can
+only be extracted through the memory dump.
+
+User space utilities
+====================
+
+DEBUGFS
+
+Other
+=====
+
+Not-Applicable
+
+Known issues
+============
+
+None
+
+To do
+=====
+
+None
diff --git a/Documentation/arm/msm/msm_ipc_router.txt b/Documentation/arm/msm/msm_ipc_router.txt
new file mode 100644
index 0000000..f8626ec
--- /dev/null
+++ b/Documentation/arm/msm/msm_ipc_router.txt
@@ -0,0 +1,404 @@
+Introduction
+============
+
+Inter Process Communication(IPC) Message Router
+
+IPC Router provides a connectionless message routing service between
+multiple processes in a MSM setup. The communicating processes can
+run either in the same processor or in a different processor within
+the MSM setup. The IPC Router has been designed to
+	1) Route messages of any types
+	2) Support a broader network of processors
+The IPC Router follows the same protocol as the existing RPC Router
+in the kernel while communicating with its peer IPC Routers.
+
+Hardware description
+====================
+
+The IPC Router doesn't implement any specific hardware driver.
+The IPC Router uses the existing hardware drivers to transport messages
+across to the peer router. IPC Router contains a XPRT interface layer to
+handle the different types of transports/links. This interface layer
+abstracts the underlying transport complexities from the router and
+provides a packet/message interface with the transports.
+
+Software description
+====================
+
+The IPC Router is designed to support a client-server model. The
+clients and servers communicate with one another by exchanging data
+units known as messages. A message is a byte string from 1 to 64K bytes
+long. The IPC Router provides a connectionless message routing service
+between the clients and servers i.e. any client/server can communicate
+with any other client/server in the network of processors.
+
+Network Topology Overview:
+--------------------------
+
+The system is organized as a network of nodes. Each processor in the
+network is the most fundamental element called node. The complete
+network is hierarchically structured i.e. the network is divided into
+tiers and each tier is fully-meshed. The following figure shows an
+example network topology.
+
+
+	---N1---	---N4---
+	|      |	|      |
+	|      |	|      |
+        N2----N3-------N5-----N6
+	       |	|
+	       |	|
+	       ---N7----
+	       |	|
+	       |	|
+	       N8------N9
+
+Each node in the complete network is identified using a unique node id
+(Nx in the example network). In the example network, nodes N1, N2 & N3
+are fully-meshed to form a tier 1 network. Similarly nodes N4, N5 & N6
+form another tier 1 network and nodes N7, N8 & N9 form third tier 1
+network. These three tier 1 networks are fully-meshed to form a tier 2
+network.
+
+Each transport/link in the network is identified using a unique name/id
+called XPRT id. This XPRT id is used by the nodes to identify the
+link to be used while sending message to a specific destination.
+In addition, each transport/link in the network is assigned a link id.
+This link id is used to identify the tier to which the link belongs to.
+This link marking is used to avoid the routing loops while forwarding
+the broadcast messages. The incoming messages are only forwarded onto an
+egress link which has a link id different from that of an ingress link.
+
+IPC Router Addressing Overview:
+-------------------------------
+
+Each client/server in the network is identified using a unique
+<Node_id:Port_id> combination. Node_id identifies the processor on which
+a client/server is running. Port_id is a unique id within a node. This
+Port_id is assigned by the IPC Router in that node when a client/server
+comes up. The Node_id & Port_id are 32 bits each.
+
+Port_id 0xFFFFFFFE is reserved for Router &
+0xFFFFFFFF is reserved for broadcast messages.
+
+Each server in the network can also be addressed using a service name.
+The service name is of the form <service(32 bits):instance(32 bits)>.
+When a server comes up, it binds itself with a service name. This name
+information along with the <Node_id:Port_id> is broadcast onto the
+entire network.
+
+Control Path:
+-------------
+
+IPC Router uses the control messages to communicate and propagate the
+system wide events to the other routers in the system. Some of the
+events include:
+	1) Node Status
+	2) Server Status
+	3) Client Status
+	4) Flow Control Request/Confirmation
+
+Message Header:
+---------------
+
+IPC Router prepends a header to every message that it communicates with
+other IPC Router. The receiving IPC Routers use the header to identify
+the source and destination of the message, size of the message, type
+of the message and handle any flow control requests. The IPC Router
+header format is as follows:
+
+	0                                              31
+	-------------------------------------------------
+	|                     Version                   |
+	-------------------------------------------------
+	|                  Message Type                 |
+	-------------------------------------------------
+	|                 Source Node ID                |
+	-------------------------------------------------
+	|                 Source Port ID                |
+	-------------------------------------------------
+	|                   Confirm RX                  |
+	-------------------------------------------------
+	|                 Payload Length                |
+	-------------------------------------------------
+	|               Destination Node ID             |
+	-------------------------------------------------
+	|               Destination Port ID             |
+	-------------------------------------------------
+
+Message Header v2(Optimized):
+-----------------------------
+
+The following optimization has been done to the IPC Router header to
+make it concise, align with the system requirement and enable future
+expansion:
+
+	0               8               16              24             31
+	-----------------------------------------------------------------
+	|    Version    | Message Type  |         Control Flag          |
+	-----------------------------------------------------------------
+	|                         Payload Length                        |
+	-----------------------------------------------------------------
+	|        Source Node ID         |        Source Port ID         |
+	-----------------------------------------------------------------
+	|     Destination Node ID       |     Destination Port ID       |
+	-----------------------------------------------------------------
+
+Control Flag:
+
+	0                                       14          15
+	------------------------------------------------------------------
+	|               Reserved                | Opt. Hdr. | Confirm RX |
+	------------------------------------------------------------------
+
+IPC Router identifies and processes the header depending on the version
+field. The Confirm RX field in message header v1 becomes part of the
+control flag. All the other fields are reduced in size to align with the
+system requirement.
+
+Optional Header:
+An optional header bit field is introduced in the control flag to handle
+any unforeseen future requirement that this header cannot handle. When
+that bit is set, an optional header follows the current header. The
+optional header format is as follows:
+
+	0               8               16                             31
+	-----------------------------------------------------------------
+	| Length(words) |      Type     |           Control Flag        |
+	-----------------------------------------------------------------
+	|                                                               |
+	|                     Optional Header Contents                  |
+	|                                                               |
+	-----------------------------------------------------------------
+
+Design
+======
+
+The IPC Router is organized into 2 layers:
+	1) Router Core layer
+	2) Router - XPRT Interface layer
+
+
+This organization allows the router to abstract the XPRT's complexities
+from that of the core router functionalities. The Router Core layer
+performs the following core functions:
+	1) Message Routing
+	2) Distributed name service
+	3) Flow control between ports
+The Router core layer contains the following important data structures
+to perform the core functions in their respective order:
+	1) Routing Table
+	2) Table of Active Servers
+	3) Table of Active Remote ports
+All these data structures get updated based on the events passed through
+the control path.
+
+
+The Router - XPRT Interface layer hides the underlying transport
+complexities and provides and abstracted packet interface to the
+Router Core layer. The Router - XPRT Interface layer registers itself
+with the Router Core layer upon complete initialization of the XPRT.
+The Router - XPRT Interface layer upon registration exports the
+following functionalities to the Router Core:
+	1) Read from the XPRT
+	2) # of bytes of data available to read
+	3) Write to the XPRT
+	4) Space available to write to the XPRT
+	5) Close the XPRT
+
+
+The user behavioral model of the IPC Router should be
+	1) Create a port
+	2) If server, register a name to the port
+	3) If remote port not known, lookup through the name
+	4) Send messages over the port to the remote port
+	5) Receive messages along with the source info from the port
+	6) Repeat steps 3, 4 & 5 as required
+	7) If server, unregister the name from the port
+	8) Close the port
+
+Power Management
+================
+
+IPC Message Router uses wakelocks to ensure that the system does not go
+into suspend mode while there are pending messages to be handled. Once all
+the messages are handled, IPC Message Router releases the wakelocks to
+allow the system to go into suspend mode and comply with the system power
+management requirement.
+
+SMP/multi-core
+==============
+
+The IPC Router uses mutexes & spinlocks to protect the shared data
+structures to be SMP/multi-core safe.
+
+Security
+========
+
+None
+
+Performance
+===========
+
+None
+
+Interface
+=========
+
+Kernel-space APIs:
+------------------
+
+/*
+ * msm_ipc_router_create_port - Create a IPC Router port
+ *
+ * @msm_ipc_port_notify: notification function which will notify events
+ *			 like READ_DATA_AVAIL, WRITE_DONE etc.
+ * @priv: caller private context pointer, passed to the notify callback.
+ *
+ * @return: a valid port pointer on success, NULL on failure
+ *
+ */
+struct msm_ipc_port * msm_ipc_router_create_port(
+	void (*msm_ipc_port_notify)(unsigned event, void *data,
+				    void *addr, void *priv),
+	void *priv)
+
+
+/*
+ * msm_ipc_router_close_port - Close down the port
+ *
+ * @port: Port to be closed
+ *
+ * @return: 0 on success, -ve value on failure
+ *
+ */
+int msm_ipc_router_close_port(struct msm_ipc_port *port)
+
+
+/*
+ * msm_ipc_router_send_to - Send data to a remote port
+ *
+ * @from_port: Source port of the message
+ * @data: Data to be sent
+ * @to_addr: Destination port name or address
+ *
+ * @return: number of bytes sent on success, -ve value on failure
+ *
+ */
+int msm_ipc_router_send_to(struct msm_ipc_port *from_port,
+			   struct sk_buff_head *data,
+			   struct msm_ipc_addr *to_addr)
+
+
+/*
+ * msm_ipc_router_recv_from - Receive data over a port
+ *
+ * @port: Port from which the data has to be read
+ * @data: Pointer to the data
+ * @src_addr: If valid, filled with the source address of the data
+ * @timeout: Time to wait for the data, if already not present
+ *
+ * @return: number of bytes received on success, -ve value on failure
+ *
+ */
+int msm_ipc_router_recv_from(struct msm_ipc_port *port,
+			     struct sk_buff_head **data,
+			     struct msm_ipc_addr *src_addr,
+			     unsigned long timeout)
+
+/*
+ * msm_ipc_router_register_server - Bind a local port with a service
+ *				    name
+ *
+ * @server_port: Port to be bound with a service name
+ * @name: Name to bind with the port
+ *
+ * @return: 0 on success, -ve value on failure
+ *
+ */
+int msm_ipc_router_register_server(struct msm_ipc_port *server_port,
+				   struct msm_ipc_addr *name)
+
+
+/*
+ * msm_ipc_router_unregister_server - Unbind the local port from its
+ *				      service name
+ *
+ * @server_port: Port to be unbound from its service name
+ *
+ * @return: 0 on success, -ve value on failure
+ *
+ */
+int msm_ipc_router_unregister_server(struct msm_ipc_port *server_port)
+
+
+/*
+ * msm_ipc_router_lookup_server - Lookup port address for the port name
+ *
+ * @name: Name to be looked up for
+ *
+ * @return: Port address corresponding to the service name on success,
+ *	    NULL on failure
+ *
+ */
+struct msm_ipc_addr * msm_ipc_router_lookup_server(
+				struct msm_ipc_addr *name)
+
+
+User-space APIs:
+----------------
+
+User-space applications/utilities can use the socket APIs to interface
+with the IPC Router. IPC Router, in order to support the socket APIs,
+will register a new socket Address/Protocol Family with the kernel
+Socket layer. The identity of the new Address/Protocol Family will be
+defined using the macro AF_MSM_IPC/PF_MSM_IPC (hardcoded to 38) in
+include/linux/socket.h file. Since IPC Router supports only message
+oriented transfer, only SOCK_DGRAM type of sockets will be supported
+by the IPC Router.
+
+Driver parameters
+=================
+
+debug_mask - This module parameter is used to enable/disable Router
+log messages in the kernel logs. This parameter can take any value
+from 0 to 255.
+
+Dependencies
+============
+
+Drivers in this project:
+-----------------------
+
+The following drivers are present in this project, listed in the
+bottom - up order of the stack.
+
+1a) Router - SMD XPRT Interface driver. This driver is used to interface
+the Router with the SMD transport.
+1b) Router - HSIC XPRT Interface driver. This driver is used to interface
+the Router with the HSIC_IPC Bridge transport for off-chip communication.
+2) Core Router driver. This driver performs the core functionalities
+3) Socket - Router Interface driver. This driver enables the socket
+interface to be used with the IPC Router.
+
+In the write/send direction, these drivers interact by invoking the
+exported APIs from the underlying drivers. In the read/receive
+directions, these drivers interact by passing messages/events.
+
+Drivers Needed:
+---------------
+
+	1) SMD
+	2) Kernel Socket Layer
+	3) Platform Driver
+	4) HSIC IPC Bridge Driver
+
+To do
+=====
+Improvements:
+-------------
+
+The IPC Router is designed to route any messages, as long as the system
+follows the network architecture and addressing schemes. But the
+implementation in progress will route only QMI messages. With few
+additional enhancements, it can route existing RPC messages too.
diff --git a/Documentation/arm/msm/msm_qmi.txt b/Documentation/arm/msm/msm_qmi.txt
new file mode 100644
index 0000000..590b9ab
--- /dev/null
+++ b/Documentation/arm/msm/msm_qmi.txt
@@ -0,0 +1,520 @@
+Introduction
+============
+
+Qualcomm Technologies, Inc. MSM Interface(QMI) is a messaging format used
+to communicate between software components in the modem and other
+peripheral subsystems. This document proposes an architecture to introduce
+the QMI messaging into the kernel. This document proposes introducing a
+QMI encode/decode library to enable QMI message marshaling and an
+interface library to enable sending and receiving QMI messages through
+MSM IPC Router.
+
+Hardware description
+====================
+
+QMI is a messaging format used to interface with the components in modem
+and other subsystems. QMI does not drive or manage any hardware resources.
+
+Software description
+====================
+QMI communication is based on a client-server model, where clients and
+servers exchange messages in QMI wire format. A module can act as a client
+of any number of QMI services and a QMI service can serve any number of
+clients.
+
+QMI communication is of request/response type or an unsolicited event type.
+QMI client driver sends a request to a QMI service and receives a response.
+QMI client driver registers with the QMI service to receive indications
+regarding a system event and the QMI service sends the indications to the
+client when the event occurs in the system.
+
+The wire format of QMI message is as follows:
+
+   ----------------------------------------------------
+   |  QMI Header  |  TLV 0  |  TLV 1  | ... |  TLV N  |
+   ----------------------------------------------------
+
+QMI Header:
+-----------
+   --------------------------------------------------------
+   | Flags | Transaction ID | Message ID | Message Length |
+   --------------------------------------------------------
+
+The flags field is used to indicate the kind of QMI message - request,
+response or indication. The transaction ID is a client specific field
+to uniquely match the QMI request and the response. The message ID is
+also a client specific field to indicate the kind of information present
+in the QMI payload. The message length field holds the size information
+of the QMI payload.
+
+Flags:
+------
+  * 0 - QMI Request
+  * 2 - QMI Response
+  * 4 - QMI Indication
+
+TLV:
+----
+QMI payload is represented using a series of Type, Length and Value fields.
+Each information being passed is encoded into a type, length and value
+combination. The type element identifies the type of information being
+encoded. The length element specifies the length of the information/values
+being encoded. The information can be of a primitive type or a structure
+or an array.
+
+    -------------------------------------------
+    | Type | Length | Value 0 | ... | Value N |
+    -------------------------------------------
+
+QMI Message Marshaling and Transport:
+-------------------------------------
+QMI encode/decode library is designed to encode the kernel C data
+structures into QMI wire format and to decode the QMI messages into kernel
+C data strcuture format. This library will provide a single interface to
+transform any data structure into a QMI message and vice-versa.
+
+QMI interface library is designed to send and receive QMI messages over
+IPC Router.
+
+
+                 ----------------------------------
+                 |         Kernel Drivers         |
+                 ----------------------------------
+                        |                  |
+                        |                  |
+                -----------------   -----------------
+                | QMI Interface |___|  QMI Enc/Dec  |
+                |    Library    |   |    Library    |
+                -----------------   -----------------
+                        |
+                        |
+               -------------------
+               |   IPC Message   |
+               |      Router     |
+               -------------------
+                        |
+                        |
+                     -------
+                     | SMD |
+                     -------
+
+Design
+======
+
+The design goals of this proposed QMI messaging mechanism are:
+    * To enable QMI messaging from within the kernel
+    * To provide a common library to marshal QMI messages
+    * To provide a common interface library to send/receive QMI messages
+    * To support kernel QMI clients which have latency constraints
+
+The reason behind this design decision is:
+    * To provide a simple QMI marshaling interface to the kernel users
+    * To hide the complexities of QMI message transports
+    * To minimize code redundancy
+
+In order to provide a single encode/decode API, the library expects
+the kernel drivers to pass the:
+    * starting address of the data structure to be encoded/decoded
+    * starting address of the QMI message buffer
+    * a table containing information regarding the data structure to
+      be encoded/decoded
+
+The design is based on the idea that any complex data structure is a
+collection of primary data elements. Hence the information about any
+data structure can be constructed as an array of information about its
+primary data elements. The following structure is defined to describe
+information about a primary data element.
+
+/**
+ * elem_info - Data structure to specify information about an element
+ *             in a data structure. An array of this data structure
+ *             can be used to specify info about a complex data
+ *             structure to be encoded/decoded.
+ * @data_type: Data type of this element
+ * @elem_len: Array length of this element, if an array
+ * @elem_size: Size of a single instance of this data type
+ * @is_array: Array type of this element
+ * @tlv_type: QMI message specific type to identify which element
+ *            is present in an incoming message
+ * @offset: To identify the address of the first instance of this
+ *          element in the data structure
+ * @ei_array: Array to provide information about the nested structure
+ *            within a data structure to be encoded/decoded.
+ */
+struct elem_info {
+    enum elem_type data_type;
+    uint32_t elem_len;
+    uint32_t elem_size;
+    enum array_type is_array;
+    uint8_t tlv_type;
+    uint32_t offset;
+    struct elem_info *ei_array;
+};
+
+The alternate design discussions include manual encoding/decoding of QMI
+messages. From RPC experience, this approach has mostly been error prone.
+This in turn lead to increased development and debugging effort. Another
+approach included data-structure specific marshaling API -- i.e. every
+data structure to be encoded/decoded should have a unique auto-generated
+marshaling API. This approach comes with the cost of code redundancy and
+was therefore rejected.
+
+Power Management
+================
+
+N/A
+
+SMP/multi-core
+==============
+
+The QMI encode/decode library does not access any global or shared data
+structures. Hence it does not require any locking mechanisms to ensure
+multi-core safety.
+
+The QMI interface library uses mutexes while accessing shared resources.
+
+Security
+========
+
+N/A
+
+Performance
+===========
+
+This design proposal is to support kernel QMI clients which have latency
+constraints. Hence the number and size of QMI messages are expected to be
+kept short, in order to achieve latency of less than 1 ms consistently.
+
+Interface
+=========
+
+Kernel-APIs:
+------------
+
+Encode/Decode Library APIs:
+---------------------------
+
+/**
+ * elem_type - Enum to identify the data type of elements in a data
+ *             structure.
+ */
+enum elem_type {
+    QMI_OPT_FLAG = 1,
+    QMI_DATA_LEN,
+    QMI_UNSIGNED_1_BYTE,
+    QMI_UNSIGNED_2_BYTE,
+    QMI_UNSIGNED_4_BYTE,
+    QMI_UNSIGNED_8_BYTE,
+    QMI_SIGNED_2_BYTE_ENUM,
+    QMI_SIGNED_4_BYTE_ENUM,
+    QMI_STRUCT,
+    QMI_END_OF_TYPE_INFO,
+};
+
+/**
+ * array_type - Enum to identify if an element in a data structure is
+ *              an array. If so, then is it a static length array or a
+ *              variable length array.
+ */
+enum array_type {
+    NO_ARRAY = 0,
+    STATIC_ARRAY = 1,
+    VAR_LEN_ARRAY = 2,
+};
+
+/**
+ * msg_desc - Describe about the main/outer structure to be
+ *            encoded/decoded.
+ * @msg_id: Message ID to identify the kind of QMI message.
+ * @max_msg_len: Maximum possible length of the QMI message.
+ * @ei_array: Array to provide information about a data structure.
+ */
+struct msg_desc {
+    uint16_t msg_id;
+    int max_msg_len;
+    struct elem_info *ei_array;
+};
+
+/**
+ * qmi_kernel_encode() - Encode to QMI message wire format
+ * @desc: Structure describing the data structure to be encoded.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @out_buf_len: Length of the buffer to hold the QMI message.
+ * @in_c_struct: C Structure to be encoded.
+ *
+ * @return: size of encoded message on success,
+ *          -ve value on failure.
+ */
+int qmi_kernel_encode(struct msg_desc *desc,
+                      void *out_buf, uint32_t out_buf_len,
+                      void *in_c_struct);
+
+/**
+ * qmi_kernel_decode() - Decode to C Structure format
+ * @desc: Structure describing the data structure format.
+ * @out_c_struct: Buffer to hold the decoded C structure.
+ * @in_buf: Buffer containg the QMI message to be decoded.
+ * @in_buf_len: Length of the incoming QMI message.
+ *
+ * @return: 0 on success, -ve value on failure.
+ */
+int qmi_kernel_decode(struct msg_desc *desc, void *out_c_struct,
+                      void *in_buf, uint32_t in_buf_len);
+
+Interface Library APIs:
+-----------------------
+
+/**
+ * qmi_svc_event_notifier_register() - Register a notifier block to receive
+ *                                     events regarding a QMI service
+ * @service_id: Service ID to identify the QMI service.
+ * @instance_id: Instance ID to identify the instance of the QMI service.
+ * @nb: Notifier block used to receive the event.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_event_notifier_register(uint32_t service_id,
+                                    uint32_t instance_id,
+                                    struct notifier_block *nb);
+
+/**
+ * qmi_handle_create() - Create a QMI handle
+ * @notify: Callback to notify events on the handle created.
+ * @notify_priv: Private info to be passed along with the notification.
+ *
+ * @return: Valid QMI handle on success, NULL on error.
+ */
+struct qmi_handle *qmi_handle_create(
+    void (*notify)(struct qmi_handle *handle,
+                   enum qmi_event_type event, void *notify_priv),
+    void *notify_priv);
+
+/**
+ * qmi_connect_to_service() - Connect the QMI handle with a QMI service
+ * @handle: QMI handle to be connected with the QMI service.
+ * @service_id: Service id to identify the QMI service.
+ * @instance_id: Instance id to identify the instance of the QMI service.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_connect_to_service(struct qmi_handle *handle,
+                           uint32_t service_id, uint32_t instance_id);
+
+/**
+ * qmi_register_ind_cb() - Register the indication callback function
+ * @handle: QMI handle with which the function is registered.
+ * @ind_cb: Callback function to be registered.
+ * @ind_cb_priv: Private data to be passed with the indication callback.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_register_ind_cb(struct qmi_handle *handle,
+    void (*ind_cb)(struct qmi_handle *handle,
+                   unsigned int msg_id, void *msg,
+                   unsigned int msg_len, void *ind_cb_priv),
+    void *ind_cb_priv);
+
+/**
+ * qmi_send_req_wait() - Send a synchronous QMI request
+ * @handle: QMI handle through which the QMI request is sent.
+ * @req_desc: Structure describing the request data structure.
+ * @req: Buffer containing the request data structure.
+ * @req_len: Length of the request data structure.
+ * @resp_desc: Structure describing the response data structure.
+ * @resp: Buffer to hold the response data structure.
+ * @resp_len: Length of the response data structure.
+ * @timeout_ms: Timeout before a response is received.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_req_wait(struct qmi_handle *handle,
+                      struct msg_desc *req_desc,
+                      void *req, unsigned int req_len,
+                      struct msg_desc *resp_desc,
+                      void *resp, unsigned int resp_len,
+                      unsigned long timeout_ms);
+
+/**
+ * qmi_send_req_nowait() - Send an asynchronous QMI request
+ * @handle: QMI handle through which the QMI request is sent.
+ * @req_desc: Structure describing the request data structure.
+ * @req: Buffer containing the request data structure.
+ * @req_len: Length of the request data structure.
+ * @resp_desc: Structure describing the response data structure.
+ * @resp: Buffer to hold the response data structure.
+ * @resp_len: Length of the response data structure.
+ * @resp_cb: Callback function to be invoked when the response arrives.
+ * @resp_cb_data: Private information to be passed along with the callback.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_req_nowait(struct qmi_handle *handle,
+                        struct msg_desc *req_desc,
+                        void *req, unsigned int req_len,
+                        struct msg_desc *resp_desc,
+                        void *resp, unsigned int resp_len,
+                        void (*resp_cb)(struct qmi_handle *handle,
+                                        unsigned int msg_id, void *msg,
+                                        void *resp_cb_data),
+                        void *resp_cb_data);
+
+/**
+ * qmi_recv_msg() - Receive the QMI message
+ * @handle: Handle for which the QMI message has to be received.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_recv_msg(struct qmi_handle *handle);
+
+/**
+ * qmi_handle_destroy() - Destroy the QMI handle
+ * @handle: QMI handle to be destroyed.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_handle_destroy(struct qmi_handle *handle);
+
+/**
+ * qmi_svc_event_notifier_unregister() - Unregister service event notifier block
+ * @service_id: Service ID to identify the QMI service.
+ * @instance_id: Instance ID to identify the instance of the QMI service.
+ * @nb: Notifier block registered to receive the events.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_event_notifier_unregister(uint32_t service_id,
+                                      uint32_t instance_id,
+                                      struct notifier_block *nb);
+
+/**
+ * qmi_svc_ops_options - Operations and options to be specified when
+ *                       a service registers.
+ * @version: Version field to identify the ops_options structure.
+ * @service_id: Service ID of the service being registered.
+ * @instance_id: Instance ID of the service being registered.
+ * @connect_cb: Callback when a new client connects with the service.
+ * @disconnect_cb: Callback when the client exits the connection.
+ * @req_desc_cb: Callback to get request structure and its descriptor
+ *               for a message id.
+ * @req_cb: Callback to process the request.
+ */
+struct qmi_svc_ops_options {
+	unsigned version;
+	uint32_t service_id;
+	uint32_t instance_id;
+	int (*connect_cb)(struct qmi_handle *handle,
+			  struct qmi_svc_clnt *clnt);
+	int (*disconnect_cb)(struct qmi_handle *handle,
+			     struct qmi_svc_clnt *clnt);
+	struct msg_desc *(*req_desc_cb)(unsigned int msg_id,
+					void **req,
+					unsigned int req_len);
+	int (*req_cb)(struct qmi_handle *handle,
+		      struct qmi_svc_clnt *clnt,
+		      void *req_handle,
+		      unsigned int msg_id,
+		      void *req);
+};
+
+/**
+ * qmi_svc_register() - Register a QMI service with a QMI handle
+ * @handle: QMI handle on which the service has to be registered.
+ * @ops_options: Service specific operations and options.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_register(struct qmi_handle *handle,
+		     void *ops_options);
+
+/**
+ * qmi_send_resp() - Send response to a request
+ * @handle: QMI handle from which the response is sent.
+ * @clnt: Client to which the response is sent.
+ * @req_handle: Request for which the response is sent.
+ * @resp_desc: Descriptor explaining the response structure.
+ * @resp: Pointer to the response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_resp(struct qmi_handle *handle,
+		  struct qmi_svc_clnt *clnt,
+		  void *req_handle,
+		  struct msg_desc *resp_desc,
+		  void *resp,
+		  unsigned int resp_len);
+
+/**
+ * qmi_send_ind() - Send unsolicited event/indication to a client
+ * @handle: QMI handle from which the indication is sent.
+ * @clnt: Client to which the indication is sent.
+ * @ind_desc: Descriptor explaining the indication structure.
+ * @ind: Pointer to the indication structure.
+ * @ind_len: Length of the indication structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_ind(struct qmi_handle *handle,
+		 struct qmi_svc_clnt *clnt,
+		 struct msg_desc *ind_desc,
+		 void *ind,
+		 unsigned int ind_len);
+
+/**
+ * qmi_svc_unregister() - Unregister the service from a QMI handle
+ * @handle: QMI handle from which the service has to be unregistered.
+ *
+ * return: 0 on success, < 0 on error.
+ */
+int qmi_svc_unregister(struct qmi_handle *handle);
+
+User-space APIs:
+----------------
+This proposal is meant only for kernel QMI clients/services and hence no
+user-space interface is defined as part of this proposal.
+
+Driver parameters
+=================
+
+N/A
+
+Config options
+==============
+
+The QMI encode/decode library will be enabled by default in the kernel.
+It can be disabled using CONFIG_QMI_ENCDEC kernel config option.
+
+The QMI Interface library will be disabled by default in the kernel,
+since it depends on other components which are disabled by default.
+It can be enabled using CONFIG_MSM_QMI_INTERFACE kernel config option.
+
+Dependencies
+============
+
+The QMI encode/decode library is a stand-alone module and is not
+dependent on any other kernel modules.
+
+The QMI Interface library depends on QMI Encode/Decode library and
+IPC Message Router.
+
+User space utilities
+====================
+
+N/A
+
+Other
+=====
+
+N/A
+
+Known issues
+============
+
+N/A
+
+To do
+=====
+
+Look into the possibility of making QMI Interface Library transport
+agnostic. This may involve the kernel drivers to register the transport,
+with the QMI Interface Library, to be used for transporting QMI messages.
diff --git a/Documentation/arm/msm/msm_smem.txt b/Documentation/arm/msm/msm_smem.txt
new file mode 100644
index 0000000..e42973c
--- /dev/null
+++ b/Documentation/arm/msm/msm_smem.txt
@@ -0,0 +1,595 @@
+Introduction
+============
+The Shared Memory (SMEM) protocol allows multiple processors in Qualcomm
+Technologies, Inc. MSM System on Chips to communicate at a low level using a
+segment of shared system memory that is accessible by any of the processors.
+This is accomplished by an operating-system independent mechanism that allows a
+client on any processor to dynamically allocate a block of memory from shared
+system memory which is then visible and accessible to clients on other
+processors for the purpose of storing and exchanging data.
+
+
+                                                            +-------------+
+                                   SMEM                     | Processor 1 |
+  +---------+                 +------------+                +-------------+
+  |         |+--------------->|   Item 1   |<----------------+  ^
+  | Linux   |                 |            |                    |
+  |         |<---------------+|            |+-------------------+
+  +---------+                 +------------+
+       ^   +                  |   Item 2   |                +-------------+
+       |   |                  |            |<--------------+| Processor 2 |
+       |   |                  |            |+-------------->|             |
+       |   |                  |            |                +-------------+
+       |   |                  |            |
+       |   |                  |            |                +-------------+
+       |   |                  +------------+                | Processor 3 |
+       |   |                        .                       +-------------+
+       |   |                        .                         +  ^ .
+       |   |                  +------------+                  |  | .
+       |   +----------------->|   Item N   |<-----------------+  | .
+       +---------------------+|            |+--------------------+ .
+                              |            |                +-------------+
+                              |            |+-------------->|             |
+                              |            |                | Processor N |
+                              |            |<--------------+|             |
+                              +------------+                +-------------+
+
+The SMEM driver supports all known versions of the SMEM protocol.
+
+Hardware description
+====================
+The SMEM protocol requires a contiguous segment of system memory that is
+accessible by both the local processor and one or more remote processors.
+Each processor supporting the SMEM protocol must configure their MMUs and other
+applicable hardware such that accesses to shared memory are non-cacheable.
+
+Optionally, additional segments of system memory may be provided to act as
+auxiliary memory areas for the SMEM protocol.  Such segments may provide
+performance benefits to certain processors by optimizing access latency.  Such
+auxiliary memory areas must be a slave to the single main SMEM area.
+
+While the SMEM protocol has provisions for software-based remote spinlocks to
+manage synchronization between processors, this functionality may be
+substituted with dedicated hardware.  Such hardware is expected to be managed
+by another driver providing a standardized API.
+
+Software description
+====================
+At its core, the SMEM protocol is a heap memory management system.  The core
+functionality consists of allocating segments of memory, and lookup operations
+to find the address of existing segments of memory.  There is no provision to
+free memory that is allocated.
+
+Allocated segments of memory are called SMEM items.  Each SMEM item has a unique
+32-bit identifier which maps each specific SMEM item to a slot in the table of
+contents that lives at the start of the SMEM region.
+
+A SMEM client that wishes to allocate a SMEM item will provide the item
+identifier and a desired size in bytes.  Assuming there is enough free space in
+the SMEM region to accommodate the request, the amount of desired bytes will be
+carved out, and the base address and size for the item will be stored in the
+table of contents.  The base address will be returned as a pointer to the
+client, so that the client may use the SMEM item as if it were normal memory
+allocated through "malloc".
+
+A SMEM client that wishes to find an already allocated SMEM item will provide
+the item identifier and the size in bytes that the client expects for the item.
+A lookup in the table of contents for the specified item identifier will be
+performed.  Assuming a matching SMEM item is found, the size of the item that
+is stored in the table of contents will be compared to the size specified by the
+client.  This sanity check of the expected vs actual size is done to ensure that
+all users of a particular SMEM item agree on the size of the data to be
+exchanged under the assumption that if the users do not agree on the item size,
+then they will not be able to successfully communicate as one or more sides may
+view a corruption of the data stored in the SMEM item.  Assuming the sizes
+match, the virtual address corresponding to the base address stored in the table
+of contents for the item will be returned to the client.
+
+  +------+     Request       +-----------+                Memory
+  |Client|+----------------->|SMEM Driver|          +---------------+
+  +------+ Item X of size Y  +-----------+          |               |
+     ^                             +                |               |
+     |                             | Lookup/Alloc   +---------------+  Find X
+     |                             +--------------->|     TOC[X]    |+--------+
+     |                                              +---------------+         |
+     |                                              |               |         |
+     |                                              |               |         |
+     |                                              |               |         |
+     |                                              |               |         |
+     |                                              |               |         |
+     |                                              |               |         |
+     |         Return pointer for client            +---------------+         |
+     +---------------------------------------------+|     Item X    |<--------+
+                                                    +---------------+
+                                                    |               |
+                                                    |               |
+                                                    |               |
+                                                    +---------------+
+
+The SMEM driver depends on the kernel memory management subsystem for managing
+the system memory that SMEM uses.  The main SMEM memory region is statically
+mapped at boot, and the virtual address for the base of the region is stored
+in MSM_SHARED_RAM_BASE.  Auxiliary memory regions are ioremap'd at driver init.
+All SMEM regions are mapped as non-cacheable.
+
+Although the SMEM driver is aware of auxiliary memory regions, and capable of
+understanding SMEM items that exist in auxiliary memory regions, the SMEM
+driver does not allocate from the auxiliary memory regions.  A detailed
+description of the purpose and use of auxiliary memory regions is outside the
+scope of this document.
+
+Design
+======
+The SMEM protocol requires that the system bootloader initialize (zero out) and
+bootstrap the main SMEM region before any processor in the system has booted to
+avoid an initialization race condition.
+
+SMEM regions are configured as non-cachable memory.  While this results in a
+small performance hit, it significantly reduces the complexity for the SMEM
+driver and clients in terms of cache management and memory barriers.  Clients
+are generally able to treat their SMEM items like regular local memory, which
+eases the requirements to write correct code.
+
+The unsigned data type is assumed to be an unsigned 32-bit integer value.
+
+The root structure at the base of the main SMEM region is:
+
+#define SMD_HEAP_SIZE 512
+
+struct smem_shared {
+        struct smem_proc_comm proc_comm[4];
+        unsigned version[32];
+        struct smem_heap_info heap_info;
+        struct smem_heap_entry heap_toc[SMD_HEAP_SIZE];
+};
+
+This structure and its fields are initialized by the bootloader.
+
+The proc_comm field is reserved as the first part of the SMEM region to maintain
+compatibility with legacy systems, but is otherwise deprecated.  While the
+proc comm driver is beyond the scope of this document, the remaining structure
+definition to fully define smem_shared is:
+
+struct smem_proc_comm {
+        unsigned command;
+        unsigned status;
+        unsigned data1;
+        unsigned data2;
+};
+
+The version field of the smem_shared struct is an array of version entries
+specifying the SMEM protocol version of every supporting processor active in the
+system.  Each unsigned value in the array corresponds to one entry.  This
+provides a mechanism for ensuring protocol version compatibility between
+processors.  While the full table of assigned and reserved entries in the array
+is beyond the scope of this document, index 8 (smem_shared.version[8]) is
+reserved for any future use by Linux.  The bootloader always initializes it's
+entry (index 7, or smem_shared.version[7]) to the SMEM protocol version
+supported by the bootloader.  Checking the value of the bootloader's entry can
+be used as a sanity check to determine if the SMEM region was successfully
+initialized.
+
+The heap_info field of smem_shared contains basic information of the SMEM heap.
+The bootloader fills in values corresponding to the main SMEM region when it
+initializes the heap.  It is defined as:
+
+struct smem_heap_info {
+        unsigned initialized;
+        unsigned free_offset;
+        unsigned heap_remaining;
+        unsigned reserved;
+};
+
+The initialized field is set to 1 by the bootloader when it initializes the
+heap.  The free_offset field contains the offset from the base of the SMEM
+region for the first free byte in the heap.  When a new SMEM item is allocated,
+free_offset is incremented by the size of the allocated item.  SMEM item sizes
+are 8-byte aligned.  The heap_remaining field contains the number of free bytes
+remaining in the heap.  When a new SMEM item is allocated, heap_remaining is
+decremented by the size of the item.  The reserved field is defined to be 0.
+
+The heap_toc field of smem_shared is the heap table of contents.  It is an array
+containing a slot for every defined SMEM item.  SMEM item identifiers index into
+this array.  The structures definition is:
+
+struct smem_heap_entry {
+        unsigned allocated;
+        unsigned offset;
+        unsigned size;
+        unsigned reserved; /* bits 1:0 reserved, bits 31:2 aux smem base addr */
+};
+
+If an SMEM item is allocated, the allocated field is 1.  The offset field is
+either the offset from the main SMEM region base where this SMEM item exists, or
+the offset from the auxiliary SMEM region base specified in the reserved field.
+The size field contains the size of the SMEM item in bytes.  The size is defined
+to be 8-byte aligned.  The reserved field is 0 if the SMEM item is located in
+the main SMEM region, or bits 31(MSB) to 2 specify the physical address of the
+auxiliary SMEM region where the SMEM item resides.  If reserved is used as a
+physical address, then the address must be 4-byte aligned per ARM architectural
+requirements.
+
+The bootloader allocates and intializes the following SMEM items:
+
+Name                            ID      Size (bytes)
+----------------------------------------------------
+SMEM_PROC_COMM                  0       64
+SMEM_HEAP_INFO                  1       16
+SMEM_ALLOCATION_TABLE           2       8192
+SMEM_VERSION_INFO               3       128
+SMEM_HW_RESET_DETECT            4       8
+SMEM_AARM_WARM_BOOT             5       4
+SMEM_DIAG_ERR_MESSAGE           6       200
+SMEM_SPINLOCK_ARRAY             7       32
+SMEM_MEMORY_BARRIER_LOCATION    8       4
+
+All other SMEM items are dynamically allocated by processors in the system.
+
+Although the SMEM protocol requires the bootloader to initialize the SMEM region
+before any processor in the system is active, early development of new systems
+do not always have a fully functional bootloader.  To determine if the
+bootloader initialized the main SMEM region properly, the SMEM driver will check
+the expected values of smem_shared.heap_info.initialized,
+smem_shared.heap_info.reserved, and the bootloader entry of the
+SMEM_VERSION_INFO SMEM item.  If this check fails, the SMEM driver will print
+an error message to the kernel log, and enter a disabled state.
+
+Security Feature
+----------------
+The SMEM protocol supports an optional security feature that segments the main
+SMEM region into multiple partitions.  Each partition becomes a unique item
+namespace.  Access to partitions is restricted to a maximum of two processors
+and enforced by Memory Protection Units (MPUs).  The exceptions to this are the
+Partition Table of Contents partition, which is read-only accessible by all
+processors, and the Legacy/Default partition, which is freely accessible by all
+processors.
+
+  +-------------------------+ SMEM Base address
+  |Legacy/Default           |
+  |SMEM Partition           |
+  +-------------------------+
+  |SMEM Partition 0         |
+  |Processor 1 - Processor 2|
+  +-------------------------+
+  |SMEM Partition 1         |
+  |Processor 1 - Processor 3|
+  +-------------------------+
+  |SMEM Partition 2         |
+  |Processor 4 - Processor 5|
+  +-------------------------+
+               .
+               .
+               .
+  +-------------------------+
+  |SMEM Partition N         |
+  |Processor N - Processor M|
+  +-------------------------+ SMEM Base address + SMEM size - 4k
+  |Table of Contents        |
+  |                         |
+  +-------------------------+ SMEM Base address + SMEM size
+
+SMEM items which are point-to-point in nature and accessed by two or fewer
+processors may be allocated from a partition that is restricted to those
+processors.  SMEM items which are non-sensitive, accessed by 3 or more
+processors, and/or do not correspond to a secured partition are allocated from
+the Legacy/Default partition.
+
+During the firmware boot process, the Table of Contents is initialized with a
+description of all the secured partitions.  Each secured partition is also
+initialized.  The required MPU settings to protect the Table of Contents and the
+secured partitions are also established.  The Table of Contents is located 4k
+bytes prior to the end of the main SMEM region so that it is in a known position
+for all processors to find and do local configuration.
+
+The Table of Contents is defined as:
+
+struct smem_toc {
+	/*
+	 * Identifier is a constant for use in debugging and identifying this
+	 * struct in a binary capture. Set to 0x434f5424 ("$TOC").
+	 */
+	uint32_t identifier;
+
+	/* Version number */
+	uint32_t version;
+
+	/* Number of entries in the table */
+	uint32_t num_entries;
+
+	uint32_t reserved[5];
+
+	/* Zero or more entries follow */
+	struct smem_toc_entry entry[];
+};
+
+Each entry in the Table of Contents is defined as:
+
+struct smem_toc_entry {
+	/* Offset in bytes from SMEM base of the region */
+	uint32_t offset;
+
+	/* Size in bytes of the region */
+	uint32_t size;
+
+	/* Flags for this region */
+	uint32_t flags;
+
+	/*
+	 * IDs for the 2 subsystems which have access to this partition.
+	 * Order does not matter.
+	 * For the entry which describes the TOC itself, these are both set to
+	 * SMEM_INVALID_HOST.
+	 * Use uint16_t, rather than enum type, to ensure size.
+	 */
+	uint16_t host0;
+	uint16_t host1;
+
+	/*
+	 * Lowest common multiple of cacheline sizes for both endpoints. For
+	 * example, if host0 has cacheline size of 32 and host1 has cacheline
+	 * size of 64, this value is set to 64.
+	 */
+	uint32_t size_cacheline;
+
+	uint32_t reserved[3];
+
+	/*
+	 * Sizes of sub ranges that are part of the region, but are excluded
+	 * from the SMEM heap. These are allocated from the end of the region
+	 * starting with sizes[0]. Set to 0 when not used.
+	 */
+	uint32_t exclusion_sizes[SMEM_TOC_MAX_EXCLUSIONS];
+};
+
+While the Legacy/Default partition maintains the structure and format of the
+main SMEM region with the security feature disabled, the secured partitions have
+a different format and structure:
+
+  +--------------+  +--------------------------+ Partition Base Address
+  |              |  | Partition Header         |
+  |              |  |                          |   +
+  | Uncached Page|  +--------------------------+   |
+  |              |  | Item A Header            |   |
+  |              |  +--------------------------+   |
+  |              |  | Item A Data              |   |
+  +--------------+  |                          |   |
+  |              |  |                          |   |
+  |              |  +--------------------------+   |
+  | Uncached Page|  | Item B Header            |   |Direction of heap growth
+  |              |  +--------------------------+   |
+  |              |  | Item B Data              |   |
+  |              |  |                          |   |
+  +--------------+  |                          |   |
+  |              |  +--------------------------+   |
+  |              |  |                          |   |
+  | Uncached Page|  |       Unused Heap        |   |
+  |              |  |         space to         |   v
+  |              |  |       page boundary      |
+  |              |  |                          |
+  +--------------+  +--------------------------+<----------+ End of heap
+                  . . .                      . . .
+                            Free Space
+                          Can be used for
+                            either heap.
+                  . . .                      . . .
+  +--------------+  +--------------------------+<----------+ End of heap
+  |              |  |                          |
+  |              |  |      Unused Heap         |
+  |  Cached Page |  |        space to          |   ^
+  |              |  |      page boundary       |   |
+  |              |  +--------------------------+   |
+  |              |  | Item Y Data              |   |
+  +--------------+  |                          |   |
+  |              |  +--------------------------+   |
+  |              |  | Item Y Header            |   |
+  |  Cached Page |  +--------------------------+   |
+  |              |  | Item Y Header Padding    |   |Direction of heap growth
+  |              |  +--------------------------+   |
+  |              |  | Item Z Data              |   |
+  +--------------+  |                          |   |
+  |              |  |                          |   |
+  |              |  |                          |   |
+  |  Cached Page |  +--------------------------+   |
+  |              |  | Item Z Header            |   |
+  |              |  +--------------------------+   +  Padding is here to ensure
+  |              |  | Item Z Header Padding    |      the the data buffer start
+  +--------------+  +--------------------------+      and end addresses are
+  |              |                                    aligned to cachelines for
+  |              |        Exclusion Range             both endpoints.
+  | Uncached Page|. . .   Free Space         . . .
+  |              |  +--------------------------+
+  |              |  | Exclusion Ranges 0..N    |
+  |              |  |                          |
+  +--------------+  +--------------------------+ Partition Base Address + size
+
+The design of the secured partitions has two advantages over the Legacy/Default
+Partition
+	1. Using a linked list instead of a static array to track allocated SMEM
+		items maximizes space utilization
+	2. Creating two heaps allows one to be cacheline aligned, thus providing
+		an option for a higher level of performance to clients (requires
+		client to specify they want their SMEM item allocated in the
+		cached area)
+
+The partition header struct is defined as:
+
+struct smem_partition_header {
+	/* Identifier magic number - 0x54525024 ("$PRT") */
+	uint32_t identifier;
+
+	/*
+	 * IDs for the 2 subsystems which have access to this partition.
+	 * Order does not matter.
+	 * Use uint16_t, rather than enum type, to ensure size.
+	 */
+	uint16_t host0;
+	uint16_t host1;
+
+	/* Partition size, in bytes, not including the exclusion ranges */
+	uint32_t size;
+
+	/* Offset of the byte following the last allocation in uncached heap */
+	uint32_t offset_free_uncached;
+
+	/* Offset of the byte following the last allocation in cached heap */
+	uint32_t offset_free_cached;
+
+	uint32_t reserved[3];
+};
+
+The allocated SMEM item header struct is defined as:
+
+struct smem_partition_allocation_header {
+	/* 0xa5a5 canary value to detect overrun problems */
+	uint16_t canary;
+
+	/* SMEM item ID. Use uint16_t here, rather than enum, to ensure size. */
+	uint16_t smem_id;
+
+	/* Size of the allocated item, includes any necessary padding. */
+	uint32_t size;
+
+	/* Size of the data padding for cacheline alignment, if applicable */
+	uint16_t data_padding;
+
+	/* Size of the header padding for cacheline alignment, if applicable */
+	uint16_t header_padding;
+
+	uint32_t reserved[1];
+};
+
+SMP/multi-core
+==============
+The SMEM driver expects a remote spinlock driver to provide inter-processor
+synchronization primitives which not only provide locking between multiple cores
+but locking between multiple processors to protect the state of structures
+stored in SMEM regions during allocation and lookup.  Once a pointer to a SMEM
+item is returned to a client, that client is expected to provide all the
+necessary locking and other synchronization as required.
+
+The remote spinlocks may make use of the SMEM_SPINLOCK_ARRAY SMEM item (typical
+of legacy systems).
+
+SMEM regions are non-cachable to maintain a consistent state of the data
+throughout all operations.  This simplifies cache management and memory barrier
+requirements to a few key points in the SMEM item allocation process, and allows
+clients to treat SMEM items like local memory once allocated.
+
+Security
+========
+SMEM by default provides no security of SMEM items.  If a SMEM item is intended
+to only be used between clients on processors A and B, malicious clients on
+processor C are free to sniff or inject data into the SMEM item.
+
+An optional security feature may be enabled that makes use of Memory Protection
+Units (MPUs) to limit access of special segments of the main SMEM region.
+Access to these partitions is limited to two processors, so only point-to-point
+traffic (such as SMD or SMP2P) is able to be protected.  Auxiliary SMEM regions
+are not protected under this feature.  Support for this feature is activated by
+a Device Tree property.
+
+Performance
+===========
+Some client use cases such as SMD may benefit from caching, but that places an
+additional burden of cache maintenance and protocol design onto the clients.
+
+Interface
+=========
+Kernel-space APIs:
+
+/**
+ * smem_alloc() - Find an existing item, otherwise allocate it with security
+ *                support
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size of the SMEM item
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it couldn't be found/allocated, or
+ *           -EPROBE_DEFER if the driver is not ready
+ */
+void *smem_alloc(unsigned id, unsigned size_in, unsigned to_proc,
+                   unsigned flags);
+
+/**
+ * smem_get_entry() - Get existing item with security support
+ *
+ * @id: ID of SMEM item
+ * @size: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ *           if the driver isn't ready
+ */
+void *smem_get_entry(unsigned id, unsigned *size, unsigned to_proc,
+                                unsigned flags);
+
+/**
+ * smem_get_entry_no_rlock() - Get existing item without using remote spinlock.
+ *
+ * @id: ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ *           if the driver isn't ready
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned id, unsigned *size_out, unsigned to_proc,
+                                unsigned flags);
+
+/**
+ * smem_find() - Find existing item with security support
+ *
+ * @id: ID of SMEM item
+ * @size_in: Size of the SMEM item
+ * @to_proc: SMEM host that shares the item with apps
+ * @flags: Item attribute flags
+ * @returns: Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ *           if the driver is not ready
+ */
+void *smem_find(unsigned id, unsigned size);
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine. This function will not return a version of EPROBE_DEFER
+ * if the driver is not ready since the caller should obtain @smem_address from
+ * one of the other public APIs and get EPROBE_DEFER at that time, if
+ * applicable.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address);
+
+Driver parameters
+=================
+Module parameters:
+debug_mask - 0 for off (default), 1 for on.
+	Enables or disables printing debug messages to the kernel log
+
+Config options
+==============
+Configuration of SMEM regions is done via Device Tree per the format in
+Documentation/devicetree/bindings/arm/msm/smem.txt.
+
+Dependencies
+============
+Drivers needed:
+	Remote spinlocks
+
+Depends on the system bootloader to initialize the main SMEM region.
+
+Known issues
+============
+None.
+
+To do
+=====
+Convert use of the unsigned data type to well defined value such as uint32_t for
+better portability.
diff --git a/Documentation/arm/msm/msm_smp2p.txt b/Documentation/arm/msm/msm_smp2p.txt
new file mode 100644
index 0000000..3e4173a
--- /dev/null
+++ b/Documentation/arm/msm/msm_smp2p.txt
@@ -0,0 +1,478 @@
+Introduction
+============
+The Shared Memory Point to Point (SMP2P) protocol facilitates communication of
+a single 32-bit value between two processors.  Each value has a single writer
+(the local side) and a single reader (the remote side).  Values are uniquely
+identified in the system by the directed edge (local processor ID to remote
+processor ID) and a string identifier.
+
+Version and feature negotiation has been included in the design to allow for
+phased upgrades of all processors.
+
+Software Architecture Description
+=================================
+The data and interrupt coupling between processors is shown in Fig. 1.  Each
+processor is responsible for creating the outgoing SMEM items and each item is
+writable by the local processor and readable by the remote processor.  By using
+two separate SMEM items that are single-reader and single-writer, SMP2P does
+not require any remote locking mechanisms.
+
+The client API uses the Linux GPIO and interrupt framework to expose a virtual
+GPIO and a virtual interrupt controller for each entry.
+
+                                      =================
+                                      |               |
+                     -----write------>|SMEM item A->B |-----read------
+                    |                 |               |               |
+                    |                 =================               |
+                    |                                                 |
+                    |                                                 v
+  GPIO API =>  ------------    ======= Interrupt line ======>     ------------
+               Processor A                                        Processor B
+ Interrupt <=  ------------    <====== Interrupt line =======     ------------
+    API             ^                                                 |
+                    |                                                 |
+                    |                                                 |
+                    |                 =================               |
+                    |                 |               |               |
+                     ------read-------|SMEM item A<-B |<-----write----
+                                      |               |
+                                      =================
+
+                                    Fig 1
+
+
+Design
+======
+Each SMEM item contains a header that is used to identify and manage the edge
+along with an array of actual entries.  The overall structure is captured in
+Fig 2 and the details of the header and entries are covered later in this
+section.  The memory format of all shared structures is little-endian.
+
+      -----------------------------------------------
+     |               SMEM item A->B                  |
+     |                                               |
+     |   -----------------------------------------   |
+     |  |31      24|       16|         8|        0|  |
+     |  |----------|---------|----------|---------|  |
+     |  |       Identifier Constant(Magic Number) |  |
+     |  |----------|---------|----------|---------|  |
+     |  | Feature Flags                 |Version  |  |
+     |  |                               |Number   |  |
+     |  |----------|---------|----------|---------|  |
+     |  | Remote Proc ID     |Local Proc ID       |  |
+     |  |----------|---------|----------|---------|  |
+     |  | Entries Valid      | Entries Total      |  |
+     |  |-----------------------------------------|  |
+     |                                               |
+     |                                               |
+     |   -----------------------------------------   |
+     |   |            Entry 0                    |   |
+     |   |  ----------------------------------   |   |
+     |   |  |       Identifier String         |  |   |
+     |   |  |---------------------------------|  |   |
+     |   |  |       Data                      |  |   |
+     |   |  |---------------------------------|  |   |
+     |   |---------------------------------------|   |
+     |   -----------------------------------------   |
+     |   |            Entry 1                    |   |
+     |   |  ----------------------------------   |   |
+     |   |  |       Identifier String         |  |   |
+     |   |  |---------------------------------|  |   |
+     |   |  |       Data                      |  |   |
+     |   |  |---------------------------------|  |   |
+     |   |---------------------------------------|   |
+     |                      -                        |
+     |                      -                        |
+     |                      -                        |
+     |   -----------------------------------------   |
+     |   |            Entry N                    |   |
+     |   |  ----------------------------------   |   |
+     |   |  |       Identifier String         |  |   |
+     |   |  |---------------------------------|  |   |
+     |   |  |       Data                      |  |   |
+     |   |  |---------------------------------|  |   |
+     |   |---------------------------------------|   |
+      -----------------------------------------------
+
+                            Fig 2
+
+
+The header of each SMEM item contains metadata that describes the processors
+using the edge, the version information, and the entry count.  The constant
+identifier is used as a magic number to enable extraction of the items from a
+memory dump.  The size of each entry depends upon the version, but the number
+of total entries (and hence the size of each SMEM item) is configurable with a
+suggested value of 16.
+
+The number of valid entries is used to indicate how many of the Entries Total
+are currently used and are current valid.
+
+   ---------------------------------------------------------------------------
+  |Field        Size       Description                Valid Values            |
+   ---------------------------------------------------------------------------
+  | Identifier  4 Bytes    Value used to identify                             |
+  | Constant               structure in memory.     Must be set to $SMP       |
+  |                        Useful for debugging.    (0x504D5324)              |
+   ---------------------------------------------------------------------------
+  | Local       2 Bytes    Writing processor ID.    Refer Processor ID Table 3|
+  | Processor                                                                 |
+  | ID                                                                        |
+   ---------------------------------------------------------------------------
+  | Remote      2 Bytes    Reading processor ID.    Refer Processor ID Table 3|
+  | Processor                                                                 |
+  | ID                                                                        |
+   ---------------------------------------------------------------------------
+  | Version      1 Bytes   Refer to Version                                   |
+  | Number                 Feature Negotiation      Must be set to 1.         |
+  |                        section.                                           |
+   ---------------------------------------------------------------------------
+  | Feature      3 Bytes   Refer to Version                                   |
+  | flags                  and Feature Negotiation                            |
+  |                        section for details.                               |
+  |   bit 0                SSR_ACK Feature          Supported when set to 1   |
+  |   bits 1:31            Reserved                 Must be set to 0.         |
+   ---------------------------------------------------------------------------
+  | Entries      2 Bytes   Total number of          Must be 0 or greater.     |
+  | Total                  entries.                                           |
+   ---------------------------------------------------------------------------
+  | Entries      2 Bytes   Number of valid          Must be between 0         |
+  | Valid                  entries.                 and Entries Total.        |
+   ---------------------------------------------------------------------------
+  | Flags        4 Bytes                                                      |
+  |   bit 0                RESTART_DONE             Toggle for every restart  |
+  |   bit 1                RESTART_ACK              Toggle to ACK remote      |
+  |                                                 RESTART_DONE              |
+  |   bits 2:31            Reserved                 Must be set to 0.         |
+   ---------------------------------------------------------------------------
+                           Table 1 - SMEM Item Header
+
+The content of each SMEM entries is described in Table 2 and consists of a
+string identifier and a 32-bit data value.  The string identifier must be
+unique for each SMEM item.  The data value is opaque to SMP2P giving the client
+complete flexibility as to its usage.
+
+   ----------------------- --------------------- -----------------------------
+  | Field      | Size     | Description         |      Valid Values           |
+   ------------|----------|---------------------|-----------------------------
+  |            |          |                     |                             |
+  | Identifier | 16 Bytes | Null Terminated     |     NON-NULL for            |
+  | String     |          | ASCII string.       |     valid entries.          |
+  |            |          |                     |                             |
+   ------------|----------|---------------------|-----------------------------
+  | Data       |  4 Bytes | Data                |     Any (client defined)    |
+   ------------ ---------- --------------------- -----------------------------
+                              Table 2 - Entry Format
+
+
+The processor IDs in the system are fixed and new processors IDs will be
+added to the end of the list (Table 3).
+
+              -------------------------------------------------
+             | Processor Name             |     ID value       |
+              -------------------------------------------------
+             | Application processor      |        0           |
+              -------------------------------------------------
+             | Modem processor            |        1           |
+              -------------------------------------------------
+             | Audio processor            |        2           |
+              -------------------------------------------------
+             | Sensor processor           |        3           |
+              -------------------------------------------------
+             | Wireless processor         |        4           |
+              -------------------------------------------------
+             | CDSP processor             |        5           |
+              -------------------------------------------------
+             | Power processor            |        6           |
+              -------------------------------------------------
+             | TrustZone processor        |        7           |
+              -------------------------------------------------
+             | NUM PROCESSORS             |        8           |
+              -------------------------------------------------
+                            Table 3 - Processor IDs
+
+SMEM Item
+---------
+The responsibility of creating an SMEM item is with the local processor that is
+initiating outbound traffic.  After creating the item, the local and remote
+processors negotiate the version and feature flags for the item to ensure
+compatibility.
+
+Table 4 lists the SMEM item base identifiers.  To get the SMEM item ID for a
+particular edge, the remote processor ID (Table 3) is added to the base item ID
+for the local processor (Table 4).  For example, the Apps ==> Modem (id 1) SMEM
+Item ID will be 427 + 1 = 428.
+
+          ---------------------------------------------------
+         | Description                    | SMEM ID value    |
+          ---------------------------------------------------
+         | CDSP SMEM Item base            |       94         |
+          ---------------------------------------------------
+         | Apps SMP2P SMEM Item base      |       427        |
+          ---------------------------------------------------
+         | Modem SMP2P SMEM Item base     |       435        |
+          ---------------------------------------------------
+         | Audio SMP2P SMEM Item base     |       443        |
+          ---------------------------------------------------
+         | Sensors SMP2P SMEM Item base   |       481        |
+          ---------------------------------------------------
+         | Wireless SMP2P SMEM Item base  |       451        |
+          ---------------------------------------------------
+         | Power SMP2P SMEM Item base     |       459        |
+          ---------------------------------------------------
+         | TrustZone SMP2P SMEM Item base |       489        |
+          ---------------------------------------------------
+                      Table 4 - SMEM Items Base IDs
+
+
+Version and Feature Negotiation
+-------------------------------
+To enable upgrading without breaking the system and to enable graceful feature
+fall-back support, SMP2P supports a version number and feature flags.  The
+combination of the version number and feature flags enable:
+ 1) SMP2P software updates to be rolled out to each processor separately.
+ 2) Individual features to be enabled or disabled per connection or edge.
+
+The version number represents any change in SMP2P that breaks compatibility
+between processors.  Examples would be a change in the shared data structures
+or changes to fundamental behavior.  Each implementation of SMP2P must be able
+to support a minimum of the current version and the previous version.
+
+The feature flags represent any changes in SMP2P that are optional and
+backwards compatible.  Endpoints will negotiate the supported flag when the
+SMEM items are created and they cannot be changed after negotiation has been
+completed.
+
+
+Negotiation Algorithm
+----------------------
+While creating the SMEM item the following algorithm shall be used.
+
+    if remote endpoint's SMEM Item exists
+        Read remote version number and flags
+        Local version number must be lower of
+            - remote version number
+            - highest supported local version number
+        Flags value is bitwise AND of
+            - remote feature flags
+            - locally supported flags
+        Create SMEM item and populate negotiated number and flags
+        Interrupt remote processor
+        if version and flags match, negotiation is complete, else wait
+        for remote interrupt below.
+    Else
+        Create SMEM item and populate it with highest supported version and any
+        requested feature flag.
+        Interrupt remote processor.
+        Wait for Interrupt below.
+
+Upon receiving the interrupt from remote processor and negotiation is not
+complete, check the version number and feature flags:
+    if equal, negotiation is complete.
+    if remote number is less than local number, and remote number is
+    supported:
+        Set local version number to remote version number
+        Bitwise AND local flags with remote flags
+        Interrupt remote processor
+        Negotiation is complete
+    if remote number is not supported, then negotiation has failed
+        Set version number to 0xFF and report failure in kernel log.
+    if remote number is more than local number:
+        Wait for remote endpoint to process our interrupt and negotiate down.
+
+
+Creating an SMEM Entry
+----------------------
+Each new SMEM entry used in data transfer must be created at the end of the
+entry array in the SMEM item and cannot be deleted until the system is
+rebooted.  The following sequence is be followed:
+    1) Compare Entries Valid and Entries Total to verify if there is room in the
+       entry array for this request (if not, return error code to client).
+    2) Populate the Identifier of new entry and do a write memory barrier.
+    3) Update Entries Valid and Entries Total and do a write memory barrier.
+    4) Interrupt remote endpoint.
+
+
+Entry Write
+-----------
+An entry write is achieved by the following sequence of operations:
+    1) Update data field in the entry and do a write memory barrier.
+    2) Interrupt remote endpoint.
+
+
+Entry Read / Receiving Interrupts
+---------------------------------
+An interrupt will be received from the remote system for one or more of the following events:
+    1) Initialization
+    2) Entry change
+    3) New Entry
+
+As long as the SMEM item initialization is complete, then each interrupt should
+trigger SMP2P to:
+    1) Compare valid entry data value to cached value and notify client if it
+       has changed.
+    2) Compare Entries Valid to cached value.  If changed, initialize new entries.
+
+Security
+========
+Since the implementation resides in the kernel and does not expose interfaces
+to userspace, no security issues are anticipated.  The usage of separate SMEM
+items allows for future security enhancements in SMEM.
+
+Performance
+===========
+No performance issues are anticipated as the signaling rate is expected to be
+low and is performed in interrupt context which minimizes latency.
+
+Interfaces
+================
+SMP2P is only supported in the kernel and interfaces with clients through the
+GPIO and interrupt subsystems.
+
+To map an entry to the client, the client must add two nodes to the Device
+Tree:
+    1) A node that matches "qcom,smp2pgpio" to create the entry
+    2) A node that matches the client driver to provide the GPIO pin mapping
+
+The details of the device tree entries for the GPIO interface are contained in
+the file Documentation/devicetree/bindings/gpio/gpio-smp2p.txt.
+
+    /* SMP2P Test Driver for inbound entry. */
+    smp2pgpio_smp2p_7_in: qcom,smp2pgpio-smp2p-7-in {
+        compatible = "qcom,smp2pgpio";
+        qcom,entry-name = "smp2p";
+        qcom,remote-pid = <7>;
+        qcom,is-inbound;
+        gpio-controller;
+        #gpio-cells = <2>;
+        interrupt-controller;
+        #interrupt-cells = <2>;
+    };
+
+    /* SMP2P Test Client for inbound entry. */
+    qcom,smp2pgpio_test_smp2p_7_in {
+        compatible = "qcom,smp2pgpio_test_smp2p_7_in";
+        gpios = <&smp2pgpio_smp2p_7_in 0 0>,
+            <&smp2pgpio_smp2p_7_in 1 0>,
+            . . .
+            <&smp2pgpio_smp2p_7_in 31 0>;
+    };
+
+    /* SMP2P Test Driver for outbound entries */
+    smp2pgpio_smp2p_345_out: qcom,smp2pgpio-smp2p-7-out {
+        compatible = "qcom,smp2pgpio";
+        qcom,entry-name = "smp2p";
+        qcom,remote-pid = <7>;
+        gpio-controller;
+        #gpio-cells = <2>;
+        interrupt-controller;
+        #interrupt-cells = <2>;
+    };
+
+    /* SMP2P Test Client for outbound entry. */
+    qcom,smp2pgpio_test_smp2p_7_out {
+        compatible = "qcom,smp2pgpio_test_smp2p_7_out";
+        gpios = <&smp2pgpio_smp2p_7_out 0 0>,
+            <&smp2pgpio_smp2p_7_out 1 0>,
+            . . .
+            <&smp2pgpio_smp2p_7_out 31 0>;
+
+The client can use a match entry for "qcom,smp2pgpio_test_smp2p_7_in" to
+retrieve the Device Tree configuration node.  Once that node has been
+retrieved, the client can call of_get_gpio() to get the virtual GPIO pin and
+also use gpio_to_irq() to map the GPIO pin to a virtual interrupt.  After that
+point, the standard GPIO and Interrupt APIs can be used to manipulate the SMP2P
+entries and receive notifications of changes.  Examples of typical function
+calls are shown below:
+    of_get_gpio()
+    gpio_get_value()
+    gpio_set_value()
+    gpio_to_irq()
+    request_irq()
+    free_irq()
+
+Please reference the unit test code for example usage.
+
+Subsystem Restart
+=================
+SMP2P is unaffected by SubSystem Restart (SSR) on the high-level OS side and is
+actually used as an underlying communication mechanism for SSR.  On the
+peripheral system that is being restarted, SMP2P will zero out all existing
+state entries upon reboot as part of the SMP2P initialization process and if the
+SSR_ACK feature is enabled, then it waits for an acknowledgment as outlined in
+the following subsections.
+
+SSR_ACK Feature - Reboot Use Case (Non-HLOS Only)
+-------------------------------------------------
+If a remote system boots up after an SSR and sees that the remote and local
+version numbers and feature flags are equal, then it zeros out entry values.  If
+the SSR_ACK feature is enabled, it will wait for an acknowledgment from the other
+processor that it has seen the zero entry before completing the negotiation
+sequence.
+
+    if remote and local version numbers and feature flags are equal
+        Zero out all entry values
+        if SSR_ACK feature is enabled
+            Set local RESTART_DONE flag to inverse of the remote RESTART_ACK
+            Send interrupt to remote system
+            Wait for interrupt and for remote RESTART_ACK to be equal to local
+            RESTART_DONE
+    Continue with normal negotiation sequence
+
+Interrupt Use Case
+------------------
+For every interrupt triggered by a remote change, SMP2P will notify the client
+of a change in state.  In addition, if the SSR_ACK feature is enabled, the SSR
+handshaking will also be handled.
+
+    if SSR_ACK feature is enabled
+        if remote RESTART_DONE != local RESTART_ACK
+            Notify client of entry change (will be * -> 0 transition)
+            Toggle local RESTART_ACK
+            Send interrupt to remote system
+        else
+            Notify client of entry change as usual
+    else
+        Notify client of entry change as usual
+
+Debug
+=====
+The state values and names for all entries accessible by the Apps are
+accessible through debugfs nodes for general debug purposes.
+
+Debugfs entries for triggering unit-tests are also exported.
+
+Internal logging will be performed using the IPC Logging module to enable
+post-mortem analysis.
+
+Testing
+=======
+On-target unit testing will be done to verify internal functionality and the
+GPIO/IRQ API's.
+
+Driver parameters
+=================
+One module parameter will be provided to change the verbosity of internal logging.
+
+Config options
+==============
+Configuration of interrupts will be done using Device Tree per the format in
+Documentation/devicetree/bindings/arm/msm/smp2p.txt.  By default, the testing
+components will be enabled since it does not affect performance and has a
+minimal impact on kernel size.  However, customers can disable the testing
+component for size optimization.
+
+    CONFIG_MSM_SMP2P - enables SMP2P
+    CONFIG_MSM_SMP2P_TEST - enables unit test support
+
+Dependencies
+===========
+Requires SMEM for creating the SMEM items.
+
+User Space utilities
+====================
+No userspace utilities are planned.
+
+Known issues
+============
+None.
diff --git a/Documentation/arm/msm/system_health_monitor.txt b/Documentation/arm/msm/system_health_monitor.txt
new file mode 100644
index 0000000..4e2e4c6
--- /dev/null
+++ b/Documentation/arm/msm/system_health_monitor.txt
@@ -0,0 +1,213 @@
+Introduction
+============
+
+System Health Monitor (SHM) passively monitors the health of the
+peripherals connected to the application processor. Software components
+in the application processor that experience communication failure can
+request the SHM to perform a system-wide health check. If any failures
+are detected during the health-check, then a subsystem restart will be
+triggered for the failed subsystem.
+
+Hardware description
+====================
+
+SHM is solely a software component and it interfaces with peripherals
+through QMI communication. SHM does not control any hardware blocks and
+it uses subsystem_restart to restart any peripheral.
+
+Software description
+====================
+
+SHM hosts a QMI service in the kernel that is connected to the Health
+Monitor Agents (HMA) hosted in the peripherals. HMAs in the peripherals
+are initialized along with other critical services in the peripherals and
+hence the connection between SHM and HMAs are established during the early
+stages of the peripheral boot-up procedure. Software components within the
+application processor, either user-space or kernel-space, identify any
+communication failure with the peripheral by a lack of response and report
+that failure to SHM. SHM checks the health of the entire system through
+HMAs that are connected to it. If all the HMAs respond in time, then the
+failure report by the software component is ignored. If any HMAs do not
+respond in time, then SHM will restart the concerned peripheral. Figure 1
+shows a high level design diagram and Figure 2 shows a flow diagram of the
+design.
+
+Figure 1 - System Health Monitor Overview:
+
+    +------------------------------------+         +----------------------+
+    |   Application Processor            |         |    Peripheral 1      |
+    |                  +--------------+  |         |  +----------------+  |
+    |                  | Applications |  |         |  | Health Monitor |  |
+    |                  +------+-------+  |   +------->| Agent 1        |  |
+    |   User-space            |          |   |     |  +----------------+  |
+    +-------------------------|----------+   |     +----------------------+
+    |   Kernel-space          v          |  QMI         .
+    | +---------+      +---------------+ |   |          .
+    | | Kernel  |----->| System Health |<----+          .
+    | | Drivers |      |    Monitor    | |   |
+    | +---------+      +---------------+ |  QMI    +----------------------+
+    |                                    |   |     |    Peripheral N      |
+    |                                    |   |     |  +----------------+  |
+    |                                    |   |     |  | Health Monitor |  |
+    |                                    |   +------->| Agent N        |  |
+    |                                    |         |  +----------------+  |
+    +------------------------------------+         +----------------------+
+
+
+Figure 2 - System Health Monitor Message Flow with 2 peripherals:
+
+  +-----------+          +-------+         +-------+         +-------+
+  |Application|          |  SHM  |         | HMA 1 |         | HMA 2 |
+  +-----+-----+          +-------+         +---+---+         +---+---+
+        |                    |                 |                 |
+        |                    |                 |                 |
+        |   check_system     |                 |                 |
+        |------------------->|                 |                 |
+        |   _health()        |    Report_      |                 |
+        |                    |---------------->|                 |
+        |                    |  health_req(1)  |                 |
+        |                    |                 |                 |
+        |                    |    Report_      |                 |
+        |                    |---------------------------------->|
+        |                   +-+ health_req(2)  |                 |
+        |                   |T|                |                 |
+        |                   |i|                |                 |
+        |                   |m|                |                 |
+        |                   |e|   Report_      |                 |
+        |                   |o|<---------------|                 |
+        |                   |u| health_resp(1) |                 |
+        |                   |t|                |                 |
+        |                   +-+                |                 |
+        |                    |   subsystem_    |                 |
+        |                    |---------------------------------->|
+        |                    |   restart(2)    |                 |
+        +                    +                 +                 +
+
+HMAs can be extended to monitor the health of individual software services
+executing in their concerned peripherals. HMAs can restore the services
+that are not responding to a responsive state.
+
+Design
+======
+
+The design goal of SHM is to:
+  * Restore the unresponsive peripheral to a responsive state.
+  * Restore the unresponsive software services in a peripheral to a
+    responsive state.
+  * Perform power-efficient monitoring of the system health.
+
+The alternate design discussion includes sending keepalive messages in
+IPC protocols at Transport Layer. This approach requires rolling out the
+protocol update in all the peripherals together and hence has considerable
+coupling unless a suitable feature negotiation algorithm is implemented.
+This approach also requires all the IPC protocols at transport layer to be
+updated and hence replication of effort. There are multiple link-layer
+protocols and adding keep-alive at the link-layer protocols does not solve
+issues at the client layer which is solved by SHM. Restoring a peripheral
+or a remote software service by an IPC protocol has not been an industry
+standard practice. Industry standard IPC protocols only terminate the
+connection if there is any communication failure and rely upon other
+mechanisms to restore the system to full operation.
+
+Power Management
+================
+
+This driver ensures that the health monitor messages are sent only upon
+request and hence does not wake up application processor or any peripheral
+unnecessarily.
+
+SMP/multi-core
+==============
+
+This driver uses standard kernel mutexes and wait queues to achieve any
+required synchronization.
+
+Security
+========
+
+Denial of Service (DoS) attack by an application that keeps requesting
+health checks at a high rate can be throttled by the SHM to minimize the
+impact of the misbehaving application.
+
+Interface
+=========
+
+Kernel-space APIs:
+------------------
+/**
+ * kern_check_system_health() - Check the system health
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function is used by the kernel drivers to initiate the
+ * system health check. This function in turn trigger SHM to send
+ * QMI message to all the HMAs connected to it.
+ */
+int kern_check_system_health(void);
+
+User-space Interface:
+---------------------
+This driver provides a devfs interface(/dev/system_health_monitor) to the
+user-space. A wrapper API library will be provided to the user-space
+applications in order to initiate the system health check. The API in turn
+will interface with the driver through the sysfs interface provided by the
+driver.
+
+/**
+ * check_system_health() - Check the system health
+ *
+ * @return: 0 on success, -1 on failure.
+ *
+ * This function is used by the user-space applications to initiate the
+ * system health check. This function in turn trigger SHM to send QMI
+ * message to all the HMAs connected to it.
+ */
+int check_system_health(void);
+
+The above mentioned interface function works by opening the sysfs
+interface provided by SHM, perform an ioctl operation and then close the
+sysfs interface. The concerned ioctl command(CHECK_SYS_HEALTH_IOCTL) does
+not take any argument. This function performs the health check, handles the
+response and timeout in an asynchronous manner.
+
+Driver parameters
+=================
+
+The time duration for which the SHM has to wait before a response
+arrives from HMAs can be configured using a module parameter. This
+parameter will be used only for debugging purposes. The default SHM health
+check timeout is 2s, which can be overwritten by the timeout provided by
+HMA during the connection establishment.
+
+Config options
+==============
+
+This driver is enabled through kernel config option
+CONFIG_SYSTEM_HEALTH_MONITOR.
+
+Dependencies
+============
+
+This driver depends on the following kernel modules for its complete
+functionality:
+  * Kernel QMI interface
+  * Subsystem Restart support
+
+User space utilities
+====================
+
+Any user-space or kernel-space modules that experience communication
+failure with peripherals will interface with this driver. Some of the
+modules include:
+  * RIL
+  * Location Manager
+  * Data Services
+
+Other
+=====
+
+SHM provides a debug interface to enumerate some information regarding the
+recent health checks. The debug information includes, but not limited to:
+* application name that triggered the health check.
+* time of the health check.
+* status of the health check.
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index e55103a..a542b9f 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -30,3 +30,9 @@
 	- Switching I/O schedulers at runtime
 writeback_cache_control.txt
 	- Control of volatile write back caches
+mmc-max-speed.txt
+	- eMMC layer speed simulation, related to /sys/block/mmcblk*/
+          attributes:
+            max_read_speed
+            max_write_speed
+            cache_size
diff --git a/Documentation/block/mmc-max-speed.txt b/Documentation/block/mmc-max-speed.txt
new file mode 100644
index 0000000..3f052b9
--- /dev/null
+++ b/Documentation/block/mmc-max-speed.txt
@@ -0,0 +1,38 @@
+eMMC Block layer simulation speed controls in /sys/block/mmcblk*/
+===============================================
+
+Turned on with CONFIG_MMC_SIMULATE_MAX_SPEED which enables MMC device speed
+limiting. Used to test and simulate the behavior of the system when
+confronted with a slow MMC.
+
+Enables max_read_speed, max_write_speed and cache_size attributes and module
+default parameters to control the write or read maximum KB/second speed
+behaviors.
+
+NB: There is room for improving the algorithm for aspects tied directly to
+eMMC specific behavior. For instance, wear leveling and stalls from an
+exhausted erase pool. We would expect that if there was a need to provide
+similar speed simulation controls to other types of block devices, aspects of
+their behavior are modelled separately (e.g. head seek times, heat assist,
+shingling and rotational latency).
+
+/sys/block/mmcblk0/max_read_speed:
+
+Number of KB/second reads allowed to the block device. Used to test and
+simulate the behavior of the system when confronted with a slow reading MMC.
+Set to 0 or "off" to place no speed limit.
+
+/sys/block/mmcblk0/max_write_speed:
+
+Number of KB/second writes allowed to the block device. Used to test and
+simulate the behavior of the system when confronted with a slow writing MMC.
+Set to 0 or "off" to place no speed limit.
+
+/sys/block/mmcblk0/cache_size:
+
+Number of MB of high speed memory or high speed SLC cache expected on the
+eMMC device being simulated. Used to help simulate the write-back behavior
+more accurately. The assumption is the cache has no delay, but draws down
+in the background to the MLC/TLC primary store at the max_write_speed rate.
+Any write speed delays will show up when the cache is full, or when an I/O
+request to flush is issued.
diff --git a/Documentation/cgroup-v1/cgroups.txt b/Documentation/cgroup-v1/cgroups.txt
index 308e5ff..295f026 100644
--- a/Documentation/cgroup-v1/cgroups.txt
+++ b/Documentation/cgroup-v1/cgroups.txt
@@ -578,6 +578,15 @@
 be called for a newly-created cgroup if an error occurs after this
 subsystem's create() method has been called for the new cgroup).
 
+int allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+(cgroup_mutex held by caller)
+
+Called prior to moving a task into a cgroup; if the subsystem
+returns an error, this will abort the attach operation.  Used
+to extend the permission checks - if all subsystems in a cgroup
+return 0, the attach will be allowed to proceed, even if the
+default permission check (root or same user) fails.
+
 int can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
 (cgroup_mutex held by caller)
 
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index c15aa75..ac8a37e 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -28,6 +28,7 @@
 2.3  Userspace
 2.4  Ondemand
 2.5  Conservative
+2.6  Interactive
 
 3.   The Governor Interface in the CPUfreq Core
 
@@ -218,6 +219,90 @@
 speed. Load for frequency increase is still evaluated every
 sampling rate.
 
+2.6 Interactive
+---------------
+
+The CPUfreq governor "interactive" is designed for latency-sensitive,
+interactive workloads. This governor sets the CPU speed depending on
+usage, similar to "ondemand" and "conservative" governors, but with a
+different set of configurable behaviors.
+
+The tuneable values for this governor are:
+
+target_loads: CPU load values used to adjust speed to influence the
+current CPU load toward that value.  In general, the lower the target
+load, the more often the governor will raise CPU speeds to bring load
+below the target.  The format is a single target load, optionally
+followed by pairs of CPU speeds and CPU loads to target at or above
+those speeds.  Colons can be used between the speeds and associated
+target loads for readability.  For example:
+
+   85 1000000:90 1700000:99
+
+targets CPU load 85% below speed 1GHz, 90% at or above 1GHz, until
+1.7GHz and above, at which load 99% is targeted.  If speeds are
+specified these must appear in ascending order.  Higher target load
+values are typically specified for higher speeds, that is, target load
+values also usually appear in an ascending order. The default is
+target load 90% for all speeds.
+
+min_sample_time: The minimum amount of time to spend at the current
+frequency before ramping down. Default is 80000 uS.
+
+hispeed_freq: An intermediate "hi speed" at which to initially ramp
+when CPU load hits the value specified in go_hispeed_load.  If load
+stays high for the amount of time specified in above_hispeed_delay,
+then speed may be bumped higher.  Default is the maximum speed
+allowed by the policy at governor initialization time.
+
+go_hispeed_load: The CPU load at which to ramp to hispeed_freq.
+Default is 99%.
+
+above_hispeed_delay: When speed is at or above hispeed_freq, wait for
+this long before raising speed in response to continued high load.
+The format is a single delay value, optionally followed by pairs of
+CPU speeds and the delay to use at or above those speeds.  Colons can
+be used between the speeds and associated delays for readability.  For
+example:
+
+   80000 1300000:200000 1500000:40000
+
+uses delay 80000 uS until CPU speed 1.3 GHz, at which speed delay
+200000 uS is used until speed 1.5 GHz, at which speed (and above)
+delay 40000 uS is used.  If speeds are specified these must appear in
+ascending order.  Default is 20000 uS.
+
+timer_rate: Sample rate for reevaluating CPU load when the CPU is not
+idle.  A deferrable timer is used, such that the CPU will not be woken
+from idle to service this timer until something else needs to run.
+(The maximum time to allow deferring this timer when not running at
+minimum speed is configurable via timer_slack.)  Default is 20000 uS.
+
+timer_slack: Maximum additional time to defer handling the governor
+sampling timer beyond timer_rate when running at speeds above the
+minimum.  For platforms that consume additional power at idle when
+CPUs are running at speeds greater than minimum, this places an upper
+bound on how long the timer will be deferred prior to re-evaluating
+load and dropping speed.  For example, if timer_rate is 20000uS and
+timer_slack is 10000uS then timers will be deferred for up to 30msec
+when not at lowest speed.  A value of -1 means defer timers
+indefinitely at all speeds.  Default is 80000 uS.
+
+boost: If non-zero, immediately boost speed of all CPUs to at least
+hispeed_freq until zero is written to this attribute.  If zero, allow
+CPU speeds to drop below hispeed_freq according to load as usual.
+Default is zero.
+
+boostpulse: On each write, immediately boost speed of all CPUs to
+hispeed_freq for at least the period of time specified by
+boostpulse_duration, after which speeds are allowed to drop below
+hispeed_freq according to load as usual.
+
+boostpulse_duration: Length of time to hold CPU speed at hispeed_freq
+on a write to boostpulse, before allowing speed to drop according to
+load as usual.  Default is 80000 uS.
+
+
 3. The Governor Interface in the CPUfreq Core
 =============================================
 
diff --git a/Documentation/device-mapper/boot.txt b/Documentation/device-mapper/boot.txt
new file mode 100644
index 0000000..adcaad5
--- /dev/null
+++ b/Documentation/device-mapper/boot.txt
@@ -0,0 +1,42 @@
+Boot time creation of mapped devices
+===================================
+
+It is possible to configure a device mapper device to act as the root
+device for your system in two ways.
+
+The first is to build an initial ramdisk which boots to a minimal
+userspace which configures the device, then pivot_root(8) in to it.
+
+For simple device mapper configurations, it is possible to boot directly
+using the following kernel command line:
+
+dm="<name> <uuid> <ro>,table line 1,...,table line n"
+
+name = the name to associate with the device
+	after boot, udev, if used, will use that name to label
+	the device node.
+uuid = may be 'none' or the UUID desired for the device.
+ro = may be "ro" or "rw".  If "ro", the device and device table will be
+	marked read-only.
+
+Each table line may be as normal when using the dmsetup tool except for
+two variations:
+1. Any use of commas will be interpreted as a newline
+2. Quotation marks cannot be escaped and cannot be used without
+   terminating the dm= argument.
+
+Unless renamed by udev, the device node created will be dm-0 as the
+first minor number for the device-mapper is used during early creation.
+
+Example
+=======
+
+- Booting to a linear array made up of user-mode linux block devices:
+
+  dm="lroot none 0, 0 4096 linear 98:16 0, 4096 4096 linear 98:32 0" \
+  root=/dev/dm-0
+
+Will boot to a rw dm-linear target of 8192 sectors split across two
+block devices identified by their major:minor numbers.  After boot, udev
+will rename this target to /dev/mapper/lroot (depending on the rules).
+No uuid was assigned.
diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
index fcbae6a..2e5c1ee 100644
--- a/Documentation/devicetree/bindings/arm/coresight.txt
+++ b/Documentation/devicetree/bindings/arm/coresight.txt
@@ -56,6 +56,8 @@
 	  layout using the generic DT graph presentation found in
 	  "bindings/graph.txt".
 
+	* coresight-name: unique descriptive name of the component.
+
 * Additional required properties for System Trace Macrocells (STM):
 	* reg: along with the physical base address and length of the register
 	  set as described above, another entry is required to describe the
@@ -70,9 +72,21 @@
 	* compatible: Currently supported value is (note the absence of the
 	  AMBA markee):
 		- "arm,coresight-replicator"
+		- "qcom,coresight-csr"
+		- "arm,coresight-cti"
+		- "qcom,coresight-tpda"
+		- "qcom,coresight-tpdm"
+		- "qcom,coresight-remote-etm"
+		- "qcom,coresight-hwevent"
+		- "qcom,coresight-dummy"
 
 	* port or ports: same as above.
 
+	* coresight-name: unique descriptive name of the component.
+
+* Optional properties for all components:
+	* reg-names: names corresponding to each reg property value.
+
 * Optional properties for ETM/PTMs:
 
 	* arm,cp14: must be present if the system accesses ETM/PTM management
@@ -86,6 +100,48 @@
 	* arm,buffer-size: size of contiguous buffer space for TMC ETR
 	 (embedded trace router)
 
+	* arm,default-sink: represents the default compile time CoreSight sink
+
+	* coresight-ctis: represents flush and reset CTIs for TMC buffer
+
+	* qcom,force-reg-dump: enables TMC reg dump support
+
+	* arm,sg-enable : indicates whether scatter gather feature is enabled
+	  by default for TMC ETR configuration.
+
+* Required property for TPDAs:
+
+	* qcom,tpda-atid: must be present. Specifies the ATID for TPDA.
+
+* Optional properties for TPDAs:
+
+	* qcom,bc-elem-size: specifies the BC element size supported by each
+	  monitor connected to the aggregator on each port. Should be specified
+          in pairs (port, bc element size).
+
+	* qcom,tc-elem-size: specifies the TC element size supported by each
+	  monitor connected to the aggregator on each port. Should be specified
+	  in pairs (port, tc element size).
+
+	* qcom,dsb-elem-size: specifies the DSB element size supported by each
+	  monitor connected to the aggregator on each port. Should be specified
+	  in pairs (port, dsb element size).
+
+	* qcom,cmb-elem-size: specifies the CMB element size supported by each
+	  monitor connected to the aggregator on each port. Should be specified
+	  in pairs (port, cmb element size).
+
+* Optional properties for TPDM:
+
+	* qcom,clk-enable: specifies whether additional clock bit needs to be
+	  set for M4M TPDM.
+
+	* qcom,msr-fix-req: boolean, indicating if MSRs need to be programmed
+	  after enabling the subunit.
+
+* Required property for Remote ETMs:
+
+	* qcom,inst-id: must be present. QMI instance id for remote ETMs.
 
 Example:
 
@@ -202,6 +258,42 @@
 		};
 	};
 
+	tpda_mss: tpda@7043000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7043000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-mss";
+
+		qcom,tpda-atid = <67>;
+		qcom,dsb-elem-size = <0 32>;
+		qcom,cmb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_mss_out_funnel_in1: endpoint {
+					remote-endpoint =
+						<&funnel_in1_in_tpda_mss>;
+				};
+			};
+			port@1 {
+				reg = <0>;
+				tpda_mss_in_tpdm_mss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_mss_out_tpda_mss>;
+				};
+			};
+		};
+	};
+
 3. Sources
 	ptm@2201c000 {
 		compatible = "arm,coresight-etm3x", "arm,primecell";
@@ -247,5 +339,36 @@
 		};
 	};
 
+	tpdm_mss: tpdm@7042000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7042000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-mss";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port{
+			tpdm_mss_out_tpda_mss: endpoint {
+				remote-endpoint = <&tpda_mss_in_tpdm_mss>;
+			};
+		};
+	};
+
+4. CTIs
+	cti0: cti@6010000 {
+		compatible = "arm,coresight-cti";
+		reg = <0x6010000 0x1000>;
+		reg-names = "cti-base";
+
+		coresight-name = "coresight-cti0";
+
+		clocks = <&clock_gcc clk_qdss_clk>,
+			 <&clock_gcc clk_qdss_a_clk>;
+		clock-names = "core_clk", "core_a_clk";
+	};
+
 [1]. There is currently two version of STM: STM32 and STM500.  Both
 have the same HW interface and as such don't need an explicit binding name.
diff --git a/Documentation/devicetree/bindings/arm/msm/glink_config_qos.txt b/Documentation/devicetree/bindings/arm/msm/glink_config_qos.txt
new file mode 100644
index 0000000..d9c8a00
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/glink_config_qos.txt
@@ -0,0 +1,22 @@
+Qualcomm Technologies, Inc. G-link QoS Configuration
+
+Required properties:
+-compatible : should be "qcom,glink-qos-config"
+-qcom,flow-info : A table of MTU transmission time specific for a power state.
+		The total number of entries in the table should be equal to the
+		number of supported flows.
+-qcom,mtu-size : The MTU size for which the qos elements are configured.
+-qcom,tput-stats-cycle: The number of allowable cycles for a packet to be
+			transmitted without its priority being re-evaluated.
+
+Example:
+
+	qcom,glink-qos-config-adsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/glink_mailbox_xprt.txt b/Documentation/devicetree/bindings/arm/msm/glink_mailbox_xprt.txt
new file mode 100644
index 0000000..acf98c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/glink_mailbox_xprt.txt
@@ -0,0 +1,35 @@
+Qualcomm Technologies, Inc. G-link Mailbox Transport
+
+Required properties:
+-compatible : should be "qcom,glink-mailbox-xprt"
+-reg : the mailbox register to store the location of the fifo
+	the mailbox register to store the size of the fifo
+	the irq register base address for triggering interrupts
+	the register to enable sending interrupts
+	the register to reset the rx irq line
+-reg-names : "mbox-loc-addr" - string to identify the mailbox location reg
+		"mbox-loc-size" - string to identify the mailbox size reg
+		"irq-reg-base" - string to identify the irq register region
+		"irq-rx-reset" - string to identify the rx irq reset register
+-qcom,irq-mask : the bitmask to trigger an interrupt
+-interrupts : the receiving interrupt line
+-label : the name of the subsystem this link connects to
+-qcom,tx-ring-size: size of the transmit ring buffer in bytes
+-qcom,rx-ring-size: size of the receive ring buffer in bytes
+
+Example:
+
+	qcom,glink-mailbox-xprt-spss@1d05008 {
+		compatible = "qcom,glink-mailbox-xprt";
+		reg = <0x1d05008 0x8>,
+			<0x1d05010 0x4>,
+			<0x1d06004 0x4>,
+			<0x1d06020 0x4>;
+		reg-names = "mbox-loc-addr", "mbox-loc-size", "irq-reg-base",
+			"irq-rx-reset";
+		qcom,irq-mask = <0x1000>;
+		interrupts = <0 25 4>;
+		label = "spss";
+		qcom,tx-ring-size = <0x400>;
+		qcom,rx-ring-size = <0x400>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/glink_rpm_native_xprt.txt b/Documentation/devicetree/bindings/arm/msm/glink_rpm_native_xprt.txt
new file mode 100644
index 0000000..2da6f2a
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/glink_rpm_native_xprt.txt
@@ -0,0 +1,21 @@
+Qualcomm Technologies, Inc. G-link RPM Native Transport
+
+Required properties:
+-compatible : should be "qcom,glink-rpm-native-xprt"
+-reg : the location and size of RPM message RAM
+	the irq register base address for triggering interrupts
+-reg-names : "msgram" - string to identify the RPM message RAM region
+		"irq-reg-base" - string to identify the irq register region
+-qcom,irq-mask : the bitmark to trigger an interrupt
+-interrupts : the receiving interrupt line
+
+Example:
+
+	qcom,glink-rpm-native-xprt@68000 {
+		compatible = "qcom,glink-rpm-native-xprt";
+		reg = <0x68000 0x8000>,
+			<0xfa006008 0x4>;
+		reg-names = "msgram", "irq-reg-base";
+		qcom,irq-mask = <0x1000>;
+		interrupts = <0 25 1>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt b/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt
new file mode 100644
index 0000000..f68c8e4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/glink_smem_native_xprt.txt
@@ -0,0 +1,54 @@
+Qualcomm Technologies, Inc. G-link SMEM Native Transport
+
+Required properties:
+-compatible : should be "qcom,glink-smem-native-xprt"
+-reg : the location and size of shared memory
+	the irq register base address for triggering interrupts
+-reg-names : "smem" - string to identify the shared memory region
+		"irq-reg-base" - string to identify the irq register region
+-qcom,irq-mask : the bitmark to trigger an interrupt
+-interrupts : the receiving interrupt line
+-label : the name of the subsystem this link connects to
+
+Optional properties:
+-qcom,qos-config: Reference to the qos configuration elements.It depends on
+		ramp-time.
+-qcom,ramp-time: Worst case time in microseconds to transition to this power
+		state. Power states are numbered by array index position.
+
+Example:
+
+	qcom,glink-smem-native-xprt-modem@fa00000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0xfa00000 0x200000>,
+			<0xfa006008 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x1000>;
+		interrupts = <0 25 1>;
+		label = "mpss";
+	};
+
+	qcom,glink-smem-native-xprt-adsp@fa00000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0xfa00000 0x200000>,
+			<0xfa006008 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x1000>;
+		interrupts = <0 25 1>;
+		label = "lpass";
+		qcom,qos-config = <&glink_qos_adsp>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_qos_adsp: qcom,glink-qos-config-adsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/glink_spi_xprt.txt b/Documentation/devicetree/bindings/arm/msm/glink_spi_xprt.txt
new file mode 100644
index 0000000..0a78eb6
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/glink_spi_xprt.txt
@@ -0,0 +1,44 @@
+Qualcomm Technologies, Inc. G-link SPI Transport
+
+Required properties:
+-compatible : should be "qcom,glink-spi-xprt".
+-label : the name of the subsystem this link connects to.
+
+Optional properties:
+-qcom,remote-fifo-config: Reference to the FIFO configuratio in the remote
+			processor.
+-qcom,qos-config: Reference to the qos configuration elements.It depends on
+		ramp-time.
+-qcom,ramp-time: Worst case time in microseconds to transition to this power
+		state. Power states are numbered by array index position.
+
+Example:
+
+	glink_spi_xprt_wdsp: qcom,glink-spi-xprt-wdsp {
+		compatible = "qcom,glink-spi-xprt";
+		label = "wdsp";
+		qcom,remote-fifo-config = <&glink_fifo_wdsp>;
+		qcom,qos-config = <&glink_qos_wdsp>;
+		qcom,ramp-time = <0x10>,
+				     <0x20>,
+				     <0x30>,
+				     <0x40>;
+	};
+
+	glink_fifo_wdsp: qcom,glink-fifo-config-wdsp {
+		compatible = "qcom,glink-fifo-config";
+		qcom,out-read-idx-reg = <0x12000>;
+		qcom,out-write-idx-reg = <0x12004>;
+		qcom,in-read-idx-reg = <0x1200C>;
+		qcom,in-write-idx-reg = <0x12010>;
+	};
+
+	glink_qos_wdsp: qcom,glink-qos-config-wdsp {
+		compatible = "qcom,glink-qos-config";
+		qcom,flow-info = <0x80 0x0>,
+				 <0x70 0x1>,
+				 <0x60 0x2>,
+				 <0x50 0x3>;
+		qcom,mtu-size = <0x800>;
+		qcom,tput-stats-cycle = <0xa>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/glink_ssr.txt b/Documentation/devicetree/bindings/arm/msm/glink_ssr.txt
new file mode 100644
index 0000000..5a0ac7c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/glink_ssr.txt
@@ -0,0 +1,36 @@
+Qualcomm Technologies, Inc. G-Link SSR
+
+[Root level node]
+Required properties:
+-compatible : should be "qcom,glink_ssr"
+-label : The name of this subsystem.
+-qcom,edge : The name of the edge to this subsystem.
+-qcom,notify-edges : Reference to other subsystems to notify when this
+	subsystem goes down.
+
+Optional properties:
+-qcom,xprt : The name of the transport on which to notify this subystem.
+
+Example:
+
+	glink_mpss: qcom,glink-ssr-modem {
+		compatible = "qcom,glink_ssr";
+		label = "modem";
+		qcom,edge = "mpss";
+		qcom,notify-edges = <&glink_adsp>, <&glink_wcnss>;
+		qcom,xprt = "smem";
+	};
+
+	glink_adsp: qcom,glink-ssr-adsp {
+		compatible = "qcom,glink_ssr";
+		label = "adsp";
+		qcom,edge = "adsp";
+		qcom,notify-edges = <&glink_mpss>, <&glink_wcnss>;
+	};
+
+	glink_wcnss: qcom,glink-ssr-wcnss {
+		compatible = "qcom,glink_ssr";
+		label = "wcnss";
+		qcom,edge = "wcnss";
+		qcom,notify-edges = <&glink_mpss>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/glinkpkt.txt b/Documentation/devicetree/bindings/arm/msm/glinkpkt.txt
new file mode 100644
index 0000000..b5c660c8
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/glinkpkt.txt
@@ -0,0 +1,40 @@
+Qualcomm Technologies, Inc. G-Link Packet Driver (glinkpkt)
+
+[Root level node]
+Required properties:
+-compatible : should be "qcom,glinkpkt"
+
+[Second level nodes]
+qcom,glinkpkt-channel-names
+Required properties:
+-qcom,glinkpkt-transport : the glinkpkt transport layer
+-qcom,glinkpkt-edge : the remote subsystem name
+-qcom,glinkpkt-ch-name : the glink channel name
+-qcom,glinkpkt-dev-name : the glinkpkt device name
+
+Example:
+
+         qcom,glink_pkt {
+                 compatible = "qcom,glinkpkt";
+
+                 qcom,glinkpkt-at-mdm0 {
+                         qcom,glinkpkt-transport = "smd_trans";
+                         qcom,glinkpkt-edge = "mpss";
+                         qcom,glinkpkt-ch-name = "DS";
+                         qcom,glinkpkt-dev-name = "at_mdm0";
+                 };
+
+                 qcom,glinkpkt-loopback-cntl {
+                         qcom,glinkpkt-transport = "lloop";
+                         qcom,glinkpkt-edge = "local";
+                         qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT";
+                         qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl";
+                 };
+
+                 qcom,glinkpkt-loopback-data {
+                         qcom,glinkpkt-transport = "lloop";
+                         qcom,glinkpkt-edge = "local";
+                         qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT";
+                         qcom,glinkpkt-dev-name = "glink_pkt_loopback";
+                 };
+         };
diff --git a/Documentation/devicetree/bindings/arm/msm/imem.txt b/Documentation/devicetree/bindings/arm/msm/imem.txt
new file mode 100644
index 0000000..d1f8ce1
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/imem.txt
@@ -0,0 +1,107 @@
+Qualcomm IMEM
+
+IMEM is fast on-chip memory used for various debug features and dma transactions.
+
+Required properties
+
+-compatible: "qcom,msm-imem"
+-reg: start address and size of imem memory
+
+If any children nodes exist the following properties are required:
+-#address-cells: should be 1
+-#size-cells: should be 1
+-ranges: A triplet that includes the child address, parent address, &
+	 length.  The child address is assumed to be 0.
+
+Child nodes:
+------------
+
+Peripheral Image Loader (pil):
+------------------------------
+Required properties:
+-compatible: "qcom,msm-imem-pil"
+-reg: start address and size of PIL region in imem
+
+Bootloader Stats:
+-----------------
+Required properties:
+-compatible: "qcom,msm-imem-boot_stats"
+-reg: start address and size of boot_stats region in imem
+
+Cache error reporting:
+-----------------
+Required properties:
+-compatible: "qcom,msm-imem-cache_erp"
+-reg: start address and size of cache_erp region in imem
+
+Memory Dump:
+------------
+Required properties:
+-compatible: "qcom,msm-imem-mem_dump_table"
+-reg: start address and size of mem_dump_table region in imem
+
+Restart Reason:
+---------------
+Required properties:
+-compatible: "qcom,msm-imem-restart_reason
+-reg: start address and size of restart_reason region in imem
+
+Download Mode:
+--------------
+Required properties:
+-compatible: "qcom,msm-imem-download_mode"
+-reg: start address and size of download_mode region in imem
+
+Emergency Download Mode:
+------------------------
+-compatible: "qcom,msm-imem-emergency_download_mode"
+-reg: start address and size of emergency_download_mode region in imem
+
+USB Diag Cookies:
+-----------------
+Memory region used to store USB PID and serial numbers to be used by
+bootloader in download mode.
+
+Required properties:
+-compatible: "qcom,msm-imem-diag-dload"
+-reg: start address and size of USB Diag download mode region in imem
+
+Example:
+
+	qcom,msm-imem {
+		compatible = "qcom,msm-imem";
+		reg = <0xdeadbeef 0x1000>; /* < start_address size > */
+		ranges = <0x0 0xdeadbeef 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		download_mode@0 {
+			compatible = "qcom,msm-imem-download_mode";
+			reg = <0x0 8>;
+		};
+
+		restart_reason@65c {
+			compatible = "qcom,msm-imem-restart_reason";
+			reg = <0x65c 4>;
+		};
+
+		imem_cache_erp: cache_erp@6a4 {
+			compatible = "qcom,msm-imem-cache_erp";
+			reg = <0x6a4 4>;
+		};
+
+		boot_stats@6b0 {
+			compatible = "qcom,msm-imem-boot_stats";
+			reg = <0x6b0 32>;
+		};
+
+		pil@94c {
+			compatible = "qcom,msm-imem-pil";
+			reg = <0x94c 200>;
+		};
+
+		emergency_download_mode@fe0 {
+			compatible = "qcom,msm-imem-emergency_download_mode";
+			reg = <0xfe0 12>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
new file mode 100644
index 0000000..625b9d0
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/ipc-spinlock.txt
@@ -0,0 +1,27 @@
+Qualcomm Technologies inc Interprocessor Communication Spinlock
+
+--Dedicated Hardware Implementation--
+Required properties:
+- compatible : should be "qcom,ipc-spinlock-sfpb"
+- reg : the location and size of the spinlock hardware
+- qcom,num-locks : the number of locks supported
+
+Example:
+
+	qcom,ipc-spinlock@fd484000 {
+		compatible = "qcom,ipc-spinlock-sfpb";
+		reg = <0xfd484000 0x1000>;
+		qcom,num-locks = <32>;
+	};
+
+--LDREX Implementation--
+Required properties:
+- compatible : should be "qcom,ipc-spinlock-ldrex"
+- reg : the location and size of the shared lock memory
+
+Example:
+
+	qcom,ipc-spinlock@fa00000 {
+		compatible = "qcom,ipc-spinlock-ldrex";
+		reg = <0xfa00000 0x200000>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
new file mode 100644
index 0000000..6ddc725
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/mdm-modem.txt
@@ -0,0 +1,148 @@
+Attached MDM Modem Devices
+
+External modems are devices that are attached to the msm and controlled by gpios.
+There is also a data channel between the msm and the external modem that sometimes needs
+to be reset.
+
+Required Properties:
+- compatible:	The bus devices need to be compatible with
+		"qcom,mdm2-modem", "qcom,ext-mdm9x25", "qcom,ext-mdm9x35", "qcom, ext-mdm9x45",
+		"qcom,ext-mdm9x55".
+
+Required named gpio properties:
+- qcom,mdm2ap-errfatal-gpio: gpio for the external modem to indicate to the apps processor
+		of an error fatal condition on the modem.
+- qcom,ap2mdm-errfatal-gpio: gpio for the apps processor to indicate to the external modem
+		of an error fatal condition on the apps processor.
+- qcom,mdm2ap-status-gpio: gpio to indicate to the apps processor when there is a watchdog
+		bite on the external modem.
+- qcom,ap2mdm-status-gpio: gpio for the apps processor to indicate to the modem that an apps
+		processor watchdog bite has occurred.
+- qcom,ap2mdm-soft-reset-gpio: gpio for the apps processor to use to soft-reset the external
+		modem. If the flags parameter has a value of 0x1 then the gpio is active LOW.
+
+Required Interrupts:
+- "err_fatal_irq": Interrupt generated on the apps processor when the error fatal gpio is pulled
+		high by the external modem.
+- "status_irq": Interrupt generated on the apps processor when the mdm2ap-status gpio falls low
+		on the external modem. This usually indicates a watchdog bite on the modem.
+- "plbrdy_irq": Interrupt generated on the aps processor when the mdm2ap-pblrdy gpio is pulled
+		either high or low by the external modem. This is an indication that the modem
+		has rebooted.
+- "mdm2ap_vddmin_irq": Interrupt generated on the apps processor when the external modem goes
+		into vddmin power state.
+
+Optional named gpio properties:
+- qcom,mdm2ap-pblrdy-gpio: gpio used by some external modems to indicate when the modem has
+		booted into the PBL bootloader.
+- qcom,ap2mdm-wakeup-gpio: gpio used by the apps processor to wake the external modem
+		out of a low power state.
+- qcom,ap2mdm-chnl-rdy-gpio: gpio used by the apps processor to inform the external modem
+		that data link is ready.
+- qcom,mdm2ap-wakeup-gpio: gpio from the external modem to the apps processor to wake it
+		out of a low power state.
+- qcom,ap2mdm-vddmin-gpio: gpio to indicate to the external modem when the apps processor
+		is about to enter vddmin power state.
+- qcom,mdm2ap-vddmin-gpio: gpio used by the external modem to inform the apps processor
+		when it is about to enter vddmin power state.
+- qcom,ap2mdm-kpdpwr-gpio: gpio used to simulate a power button press on the external
+		modem. Some modems use this as part of their initial power-up sequence.
+		If the "flags" parameter has a value of 0x1 then it is active LOW.
+- qcom,ap2mdm-pmic-pwr-en-gpio: Some modems need this gpio for the apps processor to enable
+		the pmic on the external modem.
+- qcom,use-usb-port-gpio: some modems use this gpio to switch a port connection from uart to usb.
+		This is used during firmware upgrade of some modems.
+- qcom,mdm-link-detect-gpio: some modems may support two interfaces. This gpio
+		indicates whether only one or both links can be used.
+
+Optional driver parameters:
+- qcom,ramdump-delay-ms: time in milliseconds to wait before starting to collect ramdumps.
+		This interval is the time to wait after an error on the external modem is
+		signaled to the apps processor before starting to collect ramdumps. Its
+		value depends on the type of external modem (e.g. MDM vs QSC), and how
+		error fatal handing is done on the modem.
+		The default value is 2 seconds (2000 milliseconds) as specified by the
+		mdm9x15 software developer. Consultation with the developer of the modem
+		software is required to determine this value for that modem.
+- qcom,ps-hold-delay-ms: minimum delay in milliseconds between consecutive PS_HOLD toggles.
+		SGLTE targets that use a QSC1215 modem require a minimum delay between consecutive
+		toggling of the PS_HOLD pmic input. For one target it is 500 milliseconds but it
+		may vary depending on the target and how the external modem is connected. The value
+		is specified by the hardware designers.
+- qcom,early-power-on: boolean flag to indicate if to power on the modem when the device is probed.
+- qcom,sfr-query: boolean flag to indicate if to query the modem for a reset reason.
+- qcom,no-powerdown-after-ramdumps: boolean flag to indicate if to power down the modem after ramdumps.
+- qcom,no-a2m-errfatal-on-ssr: boolean to tell driver not to raise ap2mdm errfatal during SSR.
+- qcom,no-reset-on-first-powerup: boolean to tell driver not to reset the modem when first
+		powering up the modem.
+- qcom,ramdump-timeout-ms: ramdump timeout interval in milliseconds.
+		This interval is the time to wait for collection of the external modem's ramdump
+		to complete. It's value depends on the speed of the data connection between the
+		external modem and the apps processor on the platform. If the connection is a
+		UART port then this delay needs to be longer in order to avoid premature timeout
+		of the ramdump collection.
+		The default value is 2 minutes (120000 milliseconds) which is based on the
+		measured time it takes over a UART connection. It is reduced when the data
+		connection is an HSIC port. The value is usually tuned empirically for a
+		particular target.
+- qcom,image-upgrade-supported: boolean flag to indicate if software upgrade is supported.
+- qcom,support-shutdown: boolean flag to indicate if graceful shutdown is supported.
+- qcom,vddmin-drive-strength: drive strength in milliamps of the ap2mdm-vddmin gpio.
+		The ap2mdm_vddmin gpio is controlled by the RPM processor. It is pulled low
+		to indicate to the external modem that the apps processor has entered vddmin
+		state, and high to indicate the reverse. Its parameters are passed to the RPM
+		software from the HLOS because the RPM software has to way of saving this type
+		of configuration when an external modem is attached.
+		The value of the drive strength is specified by the hardware designers. A value
+		of 8 milliamps is typical.
+		This property is ignored if the property "qcom,ap2mdm-vddmin-gpio" is
+		not set.
+- qcom,vddmin-modes: a string indicating the "modes" requested for the ap2mdm-vddmin gpio.
+		This value is passed to RPM and is used by the RPM module to determine the
+		gpio mux function. The only currently supported modes string is "normal" and
+		corresponds to the value 0x03 that is passed to RPM.
+- qcom,restart-group: List of subsystems that will need to restart together.
+- qcom,mdm-dual-link: Boolean indicates whether both links can used for
+		communication.
+- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL service.
+- qcom,sysmon-id: platform device id that sysmon is probed with for the subsystem.
+- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
+			   on behalf of the subsystem driver.
+
+Example:
+	mdm0: qcom,mdm0 {
+                compatible = "qcom,mdm2-modem";
+		cell-index = <0>;
+		#address-cells = <0>;
+                interrupt-parent = <&mdm0>;
+                interrupts = <0 1 2 3>;
+                #interrupt-cells = <1>;
+                interrupt-map-mask = <0xffffffff>;
+                interrupt-map =
+			<0 &msmgpio 82 0x3
+			1 &msmgpio 46 0x3
+			2 &msmgpio 80 0x3
+			3 &msmgpio 27 0x3>;
+                interrupt-names =
+			"err_fatal_irq",
+			"status_irq",
+			"plbrdy_irq",
+			"mdm2ap_vddmin_irq";
+
+                qcom,mdm2ap-errfatal-gpio = <&msmgpio 82 0x00>;
+		qcom,ap2mdm-errfatal-gpio = <&msmgpio 106 0x00>;
+		qcom,mdm2ap-status-gpio   = <&msmgpio 46 0x00>;
+		qcom,ap2mdm-status-gpio   = <&msmgpio 105 0x00>;
+		qcom,ap2mdm-soft-reset-gpio = <&msmgpio 24 0x00>;
+		qcom,mdm2ap-pblrdy-gpio = <&msmgpio 80 0x00>;
+		qcom,ap2mdm-wakeup-gpio = <&msmgpio 104 0x00>;
+		qcom,ap2mdm-vddmin-gpio = <&msmgpio 108 0x00>;
+		qcom,mdm2ap-vddmin-gpio = <&msmgpio 27 0x00>;
+
+                qcom,ramdump-delay-ms = <2000>;
+                qcom,ramdump-timeout-ms = <120000>;
+                qcom,vddmin-modes  = "normal";
+                qcom,vddmin-drive-strength = <8>;
+		qcom,ssctl-instance-id = <10>;
+		qcom,sysmon-id = <20>;
+        };
diff --git a/Documentation/devicetree/bindings/arm/msm/mpm_counter.txt b/Documentation/devicetree/bindings/arm/msm/mpm_counter.txt
new file mode 100644
index 0000000..ab0d3a0
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/mpm_counter.txt
@@ -0,0 +1,18 @@
+* MSM MPM sleep counter (mpm-v2)
+
+The MPM provides a timetick that starts when the device is powered up and
+is not reset by any of the boot loaders or the HLOS. The MPM timetick counter
+driver provides an api to get this value.
+
+The required nodes for the MPM timetick counter driver are:
+
+- compatible: "qcom,mpm2-sleep-counter"
+- reg: Specifies the physical address of the timetick count register.
+- clock-frequency: the physical counter frequency.
+
+Example:
+	qcom,mpm2-sleep-counter@4a3000 {
+		compatible = "qcom,mpm2-sleep-counter";
+		reg = <0x4a3000 0x1000>;
+		clock-frequency = <32768>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm.txt b/Documentation/devicetree/bindings/arm/msm/msm.txt
new file mode 100644
index 0000000..bb6924f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm.txt
@@ -0,0 +1,296 @@
+* Qualcomm Technologies, Inc. MSM
+
+MSM uses a combination of DTS and DTSI files to describe the hardware on various
+SoCs and boards. Typically, a SoC-specific DTSI file describes the devices
+present on a given SoC, and a board-specific DTSI file describes the devices
+external to the SoC, although some targets may follow a more simplified
+approach. Additionally, the SoC-specific DTSI files may further consist of a
+base chip-specific file and a version-specific DTSI file, to facilitate reuse
+of device definitions among multiple revisions of the same SoC.
+
+Required properties:
+- compatible: Every device present on the MSM SoC shall have a 'qcom,' prefix
+  in its compatible string
+
+Example:
+restart@fc4ab000 {
+	compatible = "qcom,pshold";
+	reg = <0xfc4ab000 0x4>;
+};
+
+
+* Compatible strings:
+
+SoCs:
+
+- APQ8016
+  compatible = "qcom,apq8016"
+
+- APQ8026
+  compatible = "qcom,apq8026"
+
+- APQ8074
+  compatible = "qcom,apq8074"
+
+- APQ8084
+  compatible = "qcom,apq8084"
+
+- APQ8094
+  compatible = "qcom,apq8094"
+
+- APQ8096
+  compatible = "qcom,apq8096"
+
+- APQ8937
+  compatible = "qcom,apq8037"
+
+- APQ8017
+  compatible = "qcom,apq8017"
+
+- APQ8053
+  compatible = "qcom,apq8053"
+
+- MDM9630
+  compatible = "qcom,mdm9630"
+
+- MSM8226
+  compatible = "qcom,msm8226"
+
+- MSM8610
+  compatible = "qcom,msm8610"
+
+- MSM8909
+  compatible = "qcom,msm8909"
+
+- MSM8916
+  compatible = "qcom,msm8916"
+
+- MSM8917
+  compatible = "qcom,msm8917"
+
+- MSM8936
+  compatible = "qcom,msm8936"
+
+- MSM8960
+  compatible = "qcom,msm8960"
+
+- MSM8992
+  compatible = "qcom,msm8992"
+
+- MSM8994
+  compatible = "qcom,msm8994"
+
+- MSM8996
+  compatible = "qcom,msm8996"
+
+- MSMCOBALT
+  compatible = "qcom,msmcobalt"
+
+- MSMSKUNK
+  compatible = "qcom,msmskunk"
+
+- MSM8952
+  compatible = "qcom,msm8952"
+
+- APQ8052
+  compatible = "qcom,apq8052"
+
+- MSM8953
+  compatible = "qcom,msm8953"
+
+- MSM8937
+  compatible = "qcom,msm8937"
+
+- MDM9640
+  compatible = "qcom,mdm9640"
+
+- MDMCALIFORNIUM
+  compatible = "qcom,mdmcalifornium"
+
+- VPIPA
+  compatible = "qcom,msmvpipa"
+
+- MDM9607
+  compatible = "qcom,mdm9607"
+
+- MSM8909
+  compatible = "qcom,apq8009"
+
+Generic board variants:
+
+- CDP device:
+  compatible = "qcom,cdp"
+
+- MTP device:
+  compatible = "qcom,mtp"
+
+- FLUID device:
+  compatible = "qcom,fluid"
+
+- LIQUID device:
+  compatible = "qcom,liquid"
+
+- Dragonboard device:
+  compatible = "qcom,dragonboard"
+
+- SBC device:
+  compatible = "qcom,sbc"
+
+- SURF device:
+  compatible = "qcom,surf"
+
+- QRD device:
+  compatible = "qcom,qrd"
+
+- ADP device:
+  compatible = "qcom,adp"
+
+- Simulator device:
+  compatible = "qcom,sim"
+
+- RUMI device:
+  compatible = "qcom,rumi"
+
+
+
+Boards (SoC type + board variant):
+
+compatible = "qcom,apq8016"
+compatible = "qcom,apq8026-cdp"
+compatible = "qcom,apq8026-mtp"
+compatible = "qcom,apq8026-xpm"
+compatible = "qcom,apq8074-cdp"
+compatible = "qcom,apq8074-dragonboard"
+compatible = "qcom,apq8074-liquid"
+compatible = "qcom,apq8084-cdp"
+compatible = "qcom,apq8084-liquid"
+compatible = "qcom,apq8084-mtp"
+compatible = "qcom,apq8084-sbc"
+compatible = "qcom,apq8094-cdp"
+compatible = "qcom,apq8094-fluid"
+compatible = "qcom,apq8094-liquid"
+compatible = "qcom,apq8094-mtp"
+compatible = "qcom,apq8094-dragonboard"
+compatible = "qcom,apq8096-cdp"
+compatible = "qcom,apq8096-mtp"
+compatible = "qcom,apq8096-dragonboard"
+compatible = "qcom,apq8096-sbc"
+compatible = "qcom,apq8096-liquid"
+compatible = "qcom,apq8037-cdp"
+compatible = "qcom,apq8037-mtp"
+compatible = "qcom,apq8017-cdp"
+compatible = "qcom,apq8017-mtp"
+compatible = "qcom,apq8053-cdp"
+compatible = "qcom,apq8053-mtp"
+compatible = "qcom,mdm9630-cdp"
+compatible = "qcom,mdm9630-mtp"
+compatible = "qcom,mdm9630-sim"
+compatible = "qcom,msm8226-cdp"
+compatible = "qcom,msm8226-fluid"
+compatible = "qcom,msm8226-mtp"
+compatible = "qcom,msm8226-qrd"
+compatible = "qcom,msm8226-sim"
+compatible = "qcom,msm8610-cdp"
+compatible = "qcom,msm8610-mtp"
+compatible = "qcom,msm8610-qrd"
+compatible = "qcom,msm8610-rumi"
+compatible = "qcom,msm8610-sim"
+compatible = "qcom,msm8660-surf"
+compatible = "qcom,msm8909-cdp"
+compatible = "qcom,msm8909-mtp"
+compatible = "qcom,msm8909-qrd"
+compatible = "qcom,msm8909-rumi"
+compatible = "qcom,msm8909-sim"
+compatible = "qcom,msm8916-cdp"
+compatible = "qcom,msm8916-mtp"
+compatible = "qcom,msm8916-qrd-skuh"
+compatible = "qcom,msm8916-qrd-skuhf"
+compatible = "qcom,msm8916-qrd-skui"
+compatible = "qcom,msm8916-qrd-skuic"
+compatible = "qcom,msm8916-qrd-skuid"
+compatible = "qcom,msm8916-qrd-skut1"
+compatible = "qcom,msm8916-rumi"
+compatible = "qcom,msm8916-sim"
+compatible = "qcom,msm8917-cdp"
+compatible = "qcom,msm8917-mtp"
+compatible = "qcom,msm8917-rumi"
+compatible = "qcom,msm8917-qrd"
+compatible = "qcom,msm8917-qrd-sku5"
+compatible = "qcom,msm8926-cdp"
+compatible = "qcom,msm8926-mtp"
+compatible = "qcom,msm8926-qrd"
+compatible = "qcom,msm8936-cdp"
+compatible = "qcom,msm8936-mtp"
+compatible = "qcom,msm8939-cdp"
+compatible = "qcom,msm8939-mtp"
+compatible = "qcom,msm8939-qrd-skuk"
+compatible = "qcom,msm8939-qrd-skul"
+compatible = "qcom,msm8939-rumi"
+compatible = "qcom,msm8939-sim"
+compatible = "qcom,msm8960-cdp"
+compatible = "qcom,msm8974-cdp"
+compatible = "qcom,msm8974-fluid"
+compatible = "qcom,msm8974-liquid"
+compatible = "qcom,msm8974-mtp"
+compatible = "qcom,msm8974-rumi"
+compatible = "qcom,msm8974-sim"
+compatible = "qcom,msm8992-cdp"
+compatible = "qcom,msm8992-mtp"
+compatible = "qcom,msm8992-rumi"
+compatible = "qcom,msm8992-sim"
+compatible = "qcom,msm8994-cdp"
+compatible = "qcom,msm8994-fluid"
+compatible = "qcom,msm8994-liquid"
+compatible = "qcom,msm8994-mtp"
+compatible = "qcom,msm8994-rumi"
+compatible = "qcom,msm8994-sim"
+compatible = "qcom,msm8996-rumi"
+compatible = "qcom,msm8996-sim"
+compatible = "qcom,msm8996-cdp"
+compatible = "qcom,msm8996-dtp"
+compatible = "qcom,msm8996-fluid"
+compatible = "qcom,msm8996-liquid"
+compatible = "qcom,msm8996-mtp"
+compatible = "qcom,msm8996-adp"
+compatible = "qcom,msmcobalt-sim"
+compatible = "qcom,msmcobalt-rumi"
+compatible = "qcom,msmcobalt-cdp"
+compatible = "qcom,msmskunk-sim"
+compatible = "qcom,msmskunk-rumi"
+compatible = "qcom,msmskunk-cdp"
+compatible = "qcom,msmskunk-mtp"
+compatible = "qcom,msmskunk-mtp"
+compatible = "qcom,msm8952-rumi"
+compatible = "qcom,msm8952-sim"
+compatible = "qcom,msm8952-qrd"
+compatible = "qcom,msm8952-qrd-skum"
+compatible = "qcom,msm8952-cdp"
+compatible = "qcom,msm8952-mtp"
+compatible = "qcom,apq8052-cdp"
+compatible = "qcom,apq8052-mtp"
+compatible = "qcom,msm8937-rumi"
+compatible = "qcom,msm8937-cdp"
+compatible = "qcom,msm8937-mtp"
+compatible = "qcom,msm8937-qrd"
+compatible = "qcom,msm8937-pmi8950-qrd-sku1"
+compatible = "qcom,msm8937-pmi8937-qrd-sku2"
+compatible = "qcom,msm8953-rumi"
+compatible = "qcom,msm8953-sim"
+compatible = "qcom,msm8953-cdp"
+compatible = "qcom,msm8953-mtp"
+compatible = "qcom,msm8953-qrd"
+compatible = "qcom,msm8953-qrd-sku3"
+compatible = "qcom,mdm9640-cdp"
+compatible = "qcom,mdm9640-mtp"
+compatible = "qcom,mdm9640-rumi"
+compatible = "qcom,mdm9640-sim"
+compatible = "qcom,msmvpipa-sim"
+compatible = "qcom,mdm9607-rumi"
+compatible = "qcom,mdm9607-cdp"
+compatible = "qcom,mdm9607-mtp"
+compatible = "qcom,mdmcalifornium-rumi"
+compatible = "qcom,mdmcalifornium-sim"
+compatible = "qcom,mdmcalifornium-cdp"
+compatible = "qcom,mdmcalifornium-mtp"
+compatible = "qcom,apq8009-cdp"
+compatible = "qcom,apq8009-mtp"
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
new file mode 100644
index 0000000..117907b
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus.txt
@@ -0,0 +1,258 @@
+MSM Bus Devices
+
+The bus devices (fabrics/NoCs) are the interconnects between various
+components on chipsets. These devices form the backbone of the chip
+topology. Entire topology of the chipset is built using the
+device-tree data of these bus devices.
+
+To add the bus devices following properties are required:
+
+compatible:		The bus devices need to be compatible with
+			msm-bus-fabric
+cell-id:		A 32 bit integer unique per bus per chipset. The IDs
+			for buses are in multiples of 1024.
+label:			Bus name
+qcom,fabclk-dual:	Dual set (active/sleep) bus clock name
+qcom,fabclk-active:	Active set bus clock name
+qcom,nfab:		An integer property which specifies the total number
+			of buses on the chipset.
+
+The following properties are optional as a bus might not support
+these features:
+
+qcom,ntieredslaves:	Number of tiered slaves on the bus.
+qcom,qos-freq:		QoS frequency (In KHz)
+qcom,hw-sel:		A string which decides whether QoS data
+			should be sent to RPM, set using BIMC or NoCs.
+			It can be set to "RPM", "NoC" or "BIMC".
+qcom,qos-baseoffset:	Base address offset of QoS registers from the bus device
+			base address.
+qcom,qos-delta:	 	Address delta between QoS registers of different masters.
+qcom,rpm-en:		A boolean flag indicating whether RPM transactions are
+			supported for nodes of the bus.
+qcom,ahb:		A boolean flag indicating whether the bus is ahb type.
+qcom,virt:		A boolean property indicating this is a virtual bus.
+reg:			Register space of the bus device. Not required in case
+			the bus is virtual.
+qom,nr-lim-thresh	The threshold below which to apply throttling of non
+			real time masters.
+qcom,eff-fact		The DDR effeciency factor to be assumed. This only
+			comes into play for buses that connect to the DDR.
+
+
+The following properties are optional as collecting data via coresight might
+not be supported for every bus. The documentation for coresight properties
+can be found in:
+Documentation/devicetree/bindings/coresight/coresight.txt
+
+coreisght-id		Unique integer identifier for the bus.
+coresight-name		Unique descriptive name of the bus.
+coresight-nr-inports	Number of input ports on the bus.
+coresight-outports	List of output port numbers on the bus.
+coresight-child-list	List of phandles pointing to the children of this
+			component.
+coresight-child-ports	List of input port numbers of the children.
+
+
+Any interconnect on the bus is represented as a child node.
+A child node can be of type: master, slave or a gateway.
+A gateway is an interconnect between buses and can be of both
+master and slave type.
+
+The following properties are available to characterize a child node.
+The properties can be chosen depending on the type of child node.
+
+cell-id:		For a master the ID is between 0 - 512
+			For a slave the ID is between 512 - 1024
+label:			Name of the master/slave/gateway
+qcom,masterp:		Hardware master port number(s)
+qcom,tier:		The tier to which a master/slave belongs.
+			Note that tiering might not be supported on
+			all architectures.
+qcom,hw-sel:		A string which decides whether QoS data should be sent
+			to RPM, set using BIMC or NoCs.
+			It can be set to "RPM", "NoC" or "BIMC".
+qcom,mode:		Used for masters on NoC/BIMC. Indicates which of the
+			four modes (Fixed/Limiter/Bypass/Regulator) the master
+			belongs to.
+qcom,perm-mode:		Permissible mode switches. Indicates which of the four
+			modes are supported of the master node. Generally,
+			modes are set at boot-up and not switched at run-time.
+qcom,qport:		QoS port number. This can be different from the
+			master-port number.
+qcom,ws:		Window size (in Hz), used for NoC/BIMC masters to
+			calculate saturation values.
+qcom,mas-hw-id:		A unique hardware ID agreed upon by processors across
+			the system. This ID is assigned to every master. It can
+			be used to send master specific data from
+			Apps/Modem/LPASS to RPM.
+qcom,slv-hw-id:		A unique hardware ID agreed upon by processors across
+			the system. This ID is assigned to every slave. It can
+			be used to send slave specific data from
+qcom,slaveclk-dual:	Dual set (active/sleep) slave clock name
+qcom,slaveclk-active:	Active set slave clock name
+			Apps/Modem/LPASS to RPM.
+qcom,gateway:		Flag indicating whether a particular node is a gateway.
+qcom,slavep:		Hardware slave port number(s).
+qcom,buswidth:		Width of the interconnect between a node and the bus.
+			(In Bytes).
+qcom,prio-rd:		Read priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio-wr:		Write priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio0:		Priority low signal for a NoC bus master
+			(Can be 0/1/2).
+qcom,prio1:		Priority high signal for a NoC bus master
+			(Can be 0/1/2)
+qcom,dual-conf:		Indicates whether a BIMC/NoC master can be configured
+			in multiple modes at run-time. (Boolean)
+qcom,mode-thresh:	Threshold mode for a BIMC/NoC master. Beyond a certain
+			threshold frequency, a threshold mode can be used.
+			(Can be Fixed/Limiter/Bypass/Regulator)
+qcom,bimc,bw:		Bandwidth limit for a BIMC master using dual modes.
+			This bandwidth is used to calculate Grant count and
+			other parameters used in Limiter and Regular mode.
+			for static BKE configuration. It is defined in KBytes/s.
+qcom,bimc,gp:		Grant Period for configuring a master in limiter
+			mode. This is an integer value in nano-seconds.
+qcom,bimc,thmp:		Medium threshold percentage for BIMC masters.
+			This percentage is used to calculate medium threshold
+			value for BIMC Masters in Limiter mode for static
+			configuration. This can be any integer value between
+			1 and 100.
+qcom,thresh:		Beyond this threshold frequency, the mode usage is
+			switched from mode specified by property qcom,mode
+			to the one specified by qcom,mode-thresh. These thresholds
+			can be setup in increasing order of thresholds, so the
+			requested IB is evaluated at each threshold level before
+			making the decision to switch QoS modes and applying the
+			corresponding qcom,bimc,bw limitig bw as needed.
+			This is specified in KBytes/s.
+qcom,rt-mas:		Indicates if a master node is a realtime master with
+			hard deadlines.
+qcom,nr-lim:		Indicates that this is non-real time master which can
+			be throttled in case of concurrent scenarios.
+qcom,floor-bw:		Represents the floor bandwidth below which this master
+			cannot be throttled. This floor bandwidth is specified in
+			KBytes/s.
+qcom,ff:		The fudge factor used by clients when voting for
+			bandwidth from the node.
+
+
+
+Example:
+
+
+	msm-mmss-noc@fc478000 {
+		compatible = "msm-bus-fabric";
+		reg = <0xfc478000 0x00004000>;
+		cell-id = <2048>;
+		label = "msm_mmss_noc";
+		qcom,fabclk-dual = "bus_clk";
+		qcom,fabclk-active = "bus_a_clk";
+		qcom,ntieredslaves = <0>;
+		qcom,qos-freq = <4800>;
+		qcom,hw-sel = "NoC";
+		qcom,rpm-en;
+		qcom,nfab = <6>;
+
+		mas-gfx3d {
+			cell-id = <26>;
+			label = "mas-gfx3d";
+			qcom,masterp = <2 3>;
+			qcom,tier = <2>;
+			qcom,hw-sel = "NoC";
+			qcom,perm-mode = "Bypass";
+			qcom,mode = "Bypass";
+			qcom,ws = <10000>;
+			qcom,qport = <2 3>;
+			qcom,mas-hw-id = <6>;
+		};
+
+		mas-jpeg {
+			cell-id = <62>;
+			label = "mas-jpeg";
+			qcom,masterp = <4>;
+			qcom,tier = <2>;
+			qcom,hw-sel = "NoC";
+			qcom,perm-mode = "Bypass";
+			qcom,mode = "Bypass";
+			qcom,qport = <0>;
+			qcom,ws = <10000>;
+			qcom,mas-hw-id = <7>;
+		};
+	};
+
+	msm-bimc@0xfc380000 {
+		compatible = "msm-bus-fabric";
+		reg = <0xfc380000 0x0006A000>;
+		cell-id = <0>;
+		label = "msm_bimc";
+		qcom,fabclk-dual = "mem_clk";
+		qcom,fabclk-active = "mem_a_clk";
+		qcom,ntieredslaves = <0>;
+		qcom,qos-freq = <19200>;
+		qcom,hw-sel = "BIMC";
+		qcom,rpm-en;
+
+		coresight-id = <55>;
+		coresight-name = "coresight-bimc";
+		coresight-nr-inports = <0>;
+		coresight-outports = <0>;
+		coresight-child-list = <&funnel_in1>;
+		coresight-child-ports = <3>;
+
+		mas-ampss-m0 {
+			cell-id = <1>;
+			label = "mas-ampss-m0";
+			qcom,masterp = <0>;
+			qcom,tier = <2>;
+			qcom,hw-sel = "BIMC";
+			qcom,mode = "Limiter";
+			qcom,qport = <0>;
+			qcom,ws = <10000>;
+			qcom,mas-hw-id = <0>;
+			qcom,prio-rd = <0>;
+			qcom,prio-wr = <0>;
+			qcom,mode-thresh = "Fixed";
+			qcom,thresh = <2000000>;
+			qcom,dual-conf;
+			qcom,bimc,bw = <300000>;
+			qcom,bimc,gp = <5>;
+			qcom,bimc,thmp = <50>;
+		};
+	};
+
+
+
+
+The bus scaling driver also provides the ability to configure
+bus performance parameters across the entire chip-set.
+Various clients use MSM scaling APIs to request bandwidth
+between multiple master-slave pairs. The bus driver then finds
+the optimal path between the master and the slave, and aggregates
+the bandwidth and clock requests for all master-slave pairs on
+that path, and programs hardware accordingly.
+
+The device-tree data required for bus-scaling can be embedded within
+the clients' device nodes. The clients can register with the bus driver
+using the following properties:
+
+- qcom,msm-bus,name:		String representing the client-name
+- qcom,msm-bus,num-cases:	Total number of usecases
+- qcom,msm-bus,active-only:	Boolean context flag for requests in active or
+				dual (active & sleep) contex
+- qcom,msm-bus,num-paths:	Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps:	Arrays of unsigned integers representing:
+				master-id, slave-id, arbitrated bandwidth
+				in KBps, instantaneous bandwidth in KBps
+
+Example:
+
+	qcom,msm-bus,name = "client-name";
+	qcom,msm-bus,num-cases = <3>;
+	qcom,msm-bus,active-only;
+	qcom,msm-bus,num-paths = <2>;
+	qcom,msm-bus,vectors =
+			<22 512 0 0>, <26 512 0 0>,
+			<22 512 320000 3200000>, <26 512 3200000 3200000>,
+			<22 512 160000 1600000>, <26 512 1600000 1600000>;
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt
new file mode 100644
index 0000000..96e42c5
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus_adhoc.txt
@@ -0,0 +1,231 @@
+MSM Bus Devices for adhoc bus topologies
+
+Buses are the interconnects between various devices. The devices are
+connected in different topologies. The bus scaling driver accepts
+bandwidth requests from clients and ensures that the bandwidth requests
+can be met between the source and destination for that client.
+In order to accept and honor bandwidth requests the bus scaling driver
+needs to know about the bus topology.
+This device tree binding represents the bus devices in the SOC, their
+connections to other bus devices and the resources associated with each
+node. The bus scaling driver uses this device tree to setup the bus
+topology in order to apply client bandwidth requests.
+
+The mandatory properties for bus driver are:
+
+compatible:		"qcom,msm-bus-device"
+
+The register space associated with the bus devices are represented with
+the following optional properties:
+reg:			Register space for a bus device.
+reg-name:		Name of the register space for the bus device.
+
+The child nodes represent the devices on the bus.
+
+The following properties are mandatory for a child node
+
+cell-id:		The unique device id of the child node.
+			For a master the ID is between 0 - 512
+			For a slave the ID is between 512 - 1024
+			For internal nodes the range is > 10000
+			The range of ids for the different types of child
+			devices are chosen for convenience, the only
+			requirement is that the id's be unique among the
+			child devices.
+label:			Unique name of the device.
+
+The following are optional properties for child nodes:
+
+
+qcom,fab-dev:		Optional boolean parameter that states if the device
+			is a fabric device or not.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,bypass-qos-prg:	Optional debug parameter to avoid programming the QoS
+			HW registers for a given fabric device.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,base-name:		Parameter that specifies the physical base address for
+			accessing registers associated with the child device.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,base-offset:	Parameter that gives the offset from the base address to access
+			the QoS registers.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,qos-off:		Parameter that represents the delta between QoS register address
+			space for different devices.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+qcom,agg-scheme:	Parameter that represents the aggregation scheme to be used for the
+			node. This parameter defaults to LEGACY scheme. The valid options
+			are LEGACY/SCHEME_1.
+qcom,util-fact:		Parameter that represents the DDR utilization factor to be used in
+			LEGACY scheme. It is represented as actual util-factor * 100.
+qcom,vrail-comp:	Parameter that represents the voltage rail compensation to push
+			the bus to the next level if needed in LEGACY and SCHEME 1 aggregation
+			schemes. It is represented as actual vrail-comp * 100.
+qcom,util-levels:	Array of tuples that represent a bandwidth threshold and util factor
+			to be used uptil the given threshold.
+qcom,bus-type:		Parameter that represents the bus type such as BIMC or NOC.
+			Typically these optional properties are used for
+			devices that represent fabric devices.
+bus-gdsc-supply:	Optional fabric device parameter that is a reference to the dual
+			context GDSC supply that is needed before clock operations.
+bus-a-gdsc-supply:	Optional fabric device parameter that is a reference to an active
+			only context GDSC supply that is needed before clock operations.
+bus-qos-gdsc-supply:	Optional node or fabric device parameter that is a reference to a GDSC
+			supply that is needed before use of the clock needed to program
+			QoS registers.
+node-gdsc-supply:	Optional node device parameter that is a reference to a GDSC supply
+			that is needed before node-clock operations.
+qcom,enable-only-clk:   Optional property that is represents if the clock doesn't support
+                        the clk_set_rate API and should only be enabled/disabled.
+qcom,setrate-only-clk:   Optional property that is indicates that bus driver should only
+			set a rate on a clock handle and not call the enable/disable
+			clock API.
+clock-names:		Optional property that represents the clock name associated
+			with the device "bus_clk", "bus_a_clk";
+clocks:			Property pair that represents the clock controller and the clock
+			id. This in combimination with the clock-name is used to obtain
+			the handle for the clock associated with this device.
+qcom,virt-dev:		Parameter used for devices that represent virtual devices. Virtual
+			devices aren't real devices on the SOC but are used to aggregate
+			resources in some special cases.
+qcom,qport:		The offset index into the masters QoS register space.
+qcom,num-ports:		The number of ports that the device has.
+qcom,ap-owned:		Property that states if the device is "owned" by the Apps processor.
+			If true then the AP will program the QoS registers for the device
+			else it is done by RPM.
+qcom,connections:	An array of phandles that represent the devices this device is connected to.;
+qcom,bus-dev:		Phandle that represents the fabric device that this child node belongs to.
+qcom,qos-mode:		QoS mode to be programmed for this device, only applicable for AP owned resource.
+qcom,prio-rd:		Read priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio-wr:		Write priority for a BIMC bus master (Can be 0/1/2)
+qcom,prio0:		Priority low signal for a NoC bus master
+			(Can be 0/1/2).
+qcom,reg-prio1:		Regulator mode Priority high signal for a NoC bus master if the master port is in
+			regulator QoS mode
+qcom,reg-prio0:		Regulator Priority low signal for a NoC bus master if the master port is in
+			regulator Qos mode.
+			(Can be 0/1/2).
+qcom,prio1:		Priority high signal for a NoC bus master
+qcom,bw_buffer:		Optional parameter in KBytes used to specify a buffer value that should be added to
+			the voted bandwidth value to figure out the limiting bandwidth for a master port.
+qcom,buswidth:		The buswidth at the device, default is 8 bytes.
+qcom,mas-rpm-id:	For non-AP owned device this is the RPM id for devices that are bus masters.
+			This is the id that is used when sending a message to RPM for this device.
+qcom,slv-rpm-id:	For non-AP owned device this is the RPM id for devices that are bus slaves.
+			This is the id that is used when sending a message to RPM for this device.
+qcom,blacklist:         An array of phandles that represent devices that this device
+			cannot connect to either directly or via any number of
+			intermediate nodes.
+qcom,agg-ports:		The number of aggregation ports on the bus.
+
+The following properties are optional as collecting data via coresight might
+and are present on child nodes that represent NOC devices. The documentation
+for coresight properties can be found in:
+Documentation/devicetree/bindings/coresight/coresight.txt
+
+coreisght-id		Unique integer identifier for the bus.
+coresight-name		Unique descriptive name of the bus.
+coresight-nr-inports	Number of input ports on the bus.
+coresight-outports	List of output port numbers on the bus.
+coresight-child-list	List of phandles pointing to the children of this
+			component.
+coresight-child-ports	List of input port numbers of the children.
+
+The following sub-nodes are optional parameters:
+
+qcom,node-qos-clks:	Optional node listing all the clocks and regulators required for programming of
+			QoS registers. Usually these are associated with fabric nodes.
+	clock-names:	An array of clock names for QoS programming,
+	clocks:		An array of clock phandles corresponding to the clock names listed above.
+	clock-name-gdsc:
+			An optional property listing the regulator associated with a given clock name.
+
+Example:
+
+&ad_hoc_bus {
+        compatible = "msm-bus-device";
+        reg = <0x580000 0x62000>;
+        reg-names = "snoc-base";
+
+        fab_snoc: fab-snoc {
+                cell-id = <1024>;
+                label = "fab-snoc";
+                qcom,fab-dev;
+                qcom,bypass-qos-prg;
+		qcom,agg-scheme = <SCHEME_1>;
+		qcom,util-levels = <450000 133>,
+			<750000 154>;
+                qcom,base-name = "snoc-base";
+                qcom,base-offset = <0x7000>;
+                qcom,qos-off = <0x1000>;
+                qcom,bus-type = <1>;
+                clock-names = "bus_clk", "bus_a_clk";
+                clocks = <&clock_rpm  clk_snoc_msmbus_clk>,
+                      <&clock_rpm  clk_snoc_msmbus_a_clk>;
+		qcom,node-qos-clks {
+			clock-names = "q0-clk", "q1-clk";
+			clocks = <&clock_gcc clk_q0_clk>,
+				<&clock_gcc clk_q1_clk>;
+			q0-clk-supply = <&gdsc_q0_clk>;
+		};
+        };
+
+        mm_int_bimc: mm-int-bimc {
+                cell-id = <10003>;
+                label = "mm-int-bimc";
+		qcom,util-fact = <154>;
+		qcom,vrail-comp = <100>;
+                qcom,ap-owned;
+                qcom,connections = <&snoc_bimc_1_mas>;
+                qcom,bus-dev = <&fab_snoc>;
+                qcom,buswidth = <16>;
+        };
+
+        snoc_int_0: snoc-int-0 {
+                cell-id = <10004>;
+                label = "snoc-int-0";
+                qcom,connections = <&slv_qdss_stm &slv_imem &snoc_pnoc_mas>;
+                qcom,bus-dev = <&fab_snoc>;
+                qcom,mas-rpm-id = <99>;
+                qcom,slv-rpm-id = <130>;
+                qcom,buswidth = <8>;
+        };
+};
+
+
+The bus scaling driver also provides the ability to configure
+bus performance parameters across the entire chip-set.
+Various clients use MSM scaling APIs to request bandwidth
+between multiple master-slave pairs. The bus driver then finds
+the optimal path between the master and the slave, and aggregates
+the bandwidth and clock requests for all master-slave pairs on
+that path, and programs hardware accordingly.
+
+The device-tree data required for bus-scaling can be embedded within
+the clients' device nodes. The clients can register with the bus driver
+using the following properties:
+
+- qcom,msm-bus,name:		String representing the client-name
+- qcom,msm-bus,num-cases:	Total number of usecases
+- qcom,msm-bus,active-only:	Boolean context flag for requests in active or
+				dual (active & sleep) contex
+- qcom,msm-bus,num-paths:	Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps:	Arrays of unsigned integers representing:
+				master-id, slave-id, arbitrated bandwidth
+				in KBps, instantaneous bandwidth in KBps
+
+Example:
+
+	qcom,msm-bus,name = "client-name";
+	qcom,msm-bus,num-cases = <3>;
+	qcom,msm-bus,active-only;
+	qcom,msm-bus,num-paths = <2>;
+	qcom,msm-bus,vectors =
+			<22 512 0 0>, <26 512 0 0>,
+			<22 512 320000 3200000>, <26 512 3200000 3200000>,
+			<22 512 160000 1600000>, <26 512 1600000 1600000>;
+
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_bus_rules.txt b/Documentation/devicetree/bindings/arm/msm/msm_bus_rules.txt
new file mode 100644
index 0000000..b68284c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_bus_rules.txt
@@ -0,0 +1,62 @@
+MSM Bus static bandwidth rules for adhoc bus topologies
+
+Buses are the interconnects between various devices. The devices are
+connected in different topologies. The static bandwidth rules allow
+setting up SOC specific rules to monitor certain bandwidth requests
+at different bus nodes. When the conditions of the rule are met
+the bus driver will be given a list of actions to be take on specific
+bus master ports (throttle on/off, what bandwidth to throttle to etc).
+
+The mandatory properties for bus driver are:
+
+compatible:	"qcom,msm-bus-static-bw-rules"
+
+The static_rules node can have numerous rules for the different bandwidth voting
+conditions to be monitored. The mandatory properties for the rules are
+
+- qcom,src-nodes:		An array of phandles denoting the source nodes
+				whose bandwidth votes need to be monitored.
+- qcom,src-field:		This field represents the voted field of the
+				source node to be monitored. Possible values
+				are FLD_IB/FLD_AB/FLD_CLK
+- qcom,src-op:			The operand to be used when evaluating a node's
+				bandwidth vote with a threshold.Possible values
+				are OP_LE/OP_LT/OP_GT/OP_GE.
+- qcom,thresh:			The threshold in Kbytes/s to be used in vote
+				evaluation.
+- qcom,mode:			The QoS mode to be applied when this rule's
+				criterion are satisfied. Possible values are
+				THROTTLE_ON/THROTTLE_OFF
+- qcom,dest-node:		An array of phandles representing the nodes to
+				which the QoS mode is to be applied.
+
+The optional properties for the rule node are:
+- qcom,dest-bw:			The destination bandwidth value in Kbytes/s to
+				be used toward the QoS mode for the destination
+				node.
+
+Example:
+	static-rules {
+		compatible = "qcom,msm-bus-static-bw-rules";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		rule@0 {
+			qcom,src-nodes = <&mas_apss>;
+			qcom,src-field = <FLD_IB>;
+			qcom,src-op = <OP_LE>;
+			qcom,thresh = <1599078>;
+			qcom,mode = <THROTTLE_ON>;
+			qcom,dest-node = <&mas_apss>;
+			qcom,dest-bw = <1599078>;
+		};
+
+		rule@1 {
+			qcom,src-nodes = <&mas_apss>;
+			qcom,src-field = <FLD_IB>;
+			qcom,src-op = <OP_GT>;
+			qcom,thresh = <1599078>;
+			qcom,mode = <THROTTLE_OFF>;
+			qcom,dest-node = <&mas_apss>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_gladiator_hang_detect.txt b/Documentation/devicetree/bindings/arm/msm/msm_gladiator_hang_detect.txt
new file mode 100644
index 0000000..d35bb84
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_gladiator_hang_detect.txt
@@ -0,0 +1,40 @@
+Gladiator Hang Detection provides sysfs entries for configuring
+thresholds and enable on ACE_port, IO_port, M1_port, M2_port,
+and PCIO_port
+
+If gladiator is hung for threshold time (value * 5ns) and no
+heart beat event from gladiator port to gladiator hang monitor
+detection, gladiator hang interrupt would be generated to reset
+the SOC to collect all cores context.
+
+Gladiator hang detection can be enabled on different ports.
+
+Writing 1 into ace_enabled sysfs entry, enables gladiator hang
+detection on ACE port
+Writing 1 into io_enabled sysfs entry, enables gladiator hang
+detection on IO port
+Writing 1 into ace_enabled sysfs entry, enables gladiator hang
+detection on M1 port
+Writing 1 into ace_enabled sysfs entry, enables gladiator hang
+detection on M2 port
+Writing 1 into pcio_enabled sysfs entry, enables gladiator hang
+detection on PCIO port
+
+Required properties:
+- compatible : "qcom,gladiator-hang-detect"
+- qcom, threshold-arr:
+		Array of APCS_COMMON_GLADIATOR_HANG_THRESHOLD_n register
+		address
+- qcom, config-reg:
+		APCS_COMMON_GLADIATOR_HANG_CONFIG register address
+
+Optional properties:
+
+Example:
+	For msmcobalt:
+		qcom,ghd {
+				compatible = "qcom,gladiator-hang-detect";
+				qcom,threshold-arr = <0x179d141c 0x179d1420
+					0x179d1424 0x179d1428 0x179d1420 0x179d1430>;
+				qcom,config-reg = <0x179d1434>;
+		};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_hang_detect.txt b/Documentation/devicetree/bindings/arm/msm/msm_hang_detect.txt
new file mode 100644
index 0000000..8aa6879
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_hang_detect.txt
@@ -0,0 +1,55 @@
+* QTI MSM Core Hang Detection
+
+Core Hang Detection provides the three sysfs entries for configuring
+threshold, PMU event mux select and to enable hang detection.
+
+If core is hung for threshold time (value X 10ns) and no
+heart beat event from pmu to core hang monitor detection, core hang
+interrupt would be generated to reset the SOC via secure watchdog
+to collect all cores context.
+
+PMU event mux select can be programmed to one of the supported
+events, for example-
+1) Load Instruction executed,
+2) Store Instructions executed
+3) Instruction architecturally executed and etc.
+
+Writing 1 into enable sysfs entry, enables core hang detection and
+if there is no selected PMU mux event for 10ns core hang counter
+gets incremented. Once counter reaches the programmed threshold value,
+core hang interrupts generated to reset the SOC.
+
+
+The device tree parameters for the core hang detection are:
+
+Required properties:
+
+- compatible : "qcom,core-hang-detect"
+- label: unique name used to created sysfs entry
+- qcom,threshold-arr :
+	Array of APCS_ALIAS*_CORE_HANG_THRESHOLD register address
+	for each core.
+- qcom,config-arr :
+	Array of APCS_ALIAS*_CORE_HANG_CONFIG register address
+	for each core.
+
+Optional properties:
+
+Example:
+  For msm8937:
+	qcom,chd {
+		compatible = "qcom,core-hang-detect";
+		qcom,threshold-arr = <0xB088094 0xB098094 0xB0A8094
+			0xB0B8094 0xB188094 0xB198094 0xB1A8094 0xB1B8094>;
+		qcom,config-arr = <0xB08809C 0xB09809C 0xB0A809C
+			0xB0B809C 0xB18809C 0xB19809C 0xB1A809C 0xB1B809C>;
+	};
+
+  For msmtitanium:
+	qcom,chd {
+		compatible = "qcom,core-hang-detect";
+		qcom,threshold-arr = <0xB1880B0 0xB1980B0 0xB1A80B0
+			0xB1B80B0 0xB0880B0 0xB0980B0 0xB0A80B0 0xB0B80B0>;
+		qcom,config-arr = <0xB1880B8 0xB1980B8 0xB1A80B8
+			0xB1B80B8 0xB0880B8 0xB0980B8 0xB0A80B8 0xB0B80B8>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ion.txt b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
new file mode 100644
index 0000000..6527675
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ion.txt
@@ -0,0 +1,72 @@
+ION Memory Manager (ION)
+
+ION is a memory manager that allows for sharing of buffers between different
+processes and between user space and kernel space. ION manages different
+memory spaces by separating the memory spaces into "heaps". Depending on the
+type of heap ION must reserve memory using the msm specific memory reservation
+bindings (see Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
+
+Required properties for Ion
+
+- compatible: "qcom,msm-ion"
+
+
+All child nodes of a qcom,msm-ion node are interpreted as Ion heap
+configurations.
+
+Required properties for Ion heaps
+
+- reg: The ID of the ION heap.
+- qcom,ion-heap-type: The heap type to use for this heap. Should be one of
+  the following:
+    - "SYSTEM"
+    - "SYSTEM_CONTIG"
+    - "CARVEOUT"
+    - "CHUNK"
+    - "CP"
+    - "DMA"
+    - "SECURE_DMA"
+
+Optional properties for Ion heaps
+
+- compatible: "qcom,msm-ion-reserve" This is required if memory is to be reserved
+  as specified by qcom,memory-reservation-size below.
+- qcom,heap-align: Alignment of start of the memory in the heap.
+- qcom,heap-adjacent: ID of heap this heap needs to be adjacent to.
+- qcom,memory-reservation-size: size of reserved memory for the ION heap.
+- qcom,memory-reservation-type: type of memory to be reserved
+(see memory-reserve.txt for information about memory reservations)
+- qcom,default-prefetch-size: Base value to be used for prefetching
+  optimizations. Ignored if the heap does not support prefetching.
+  Will set to a reasonable default value (e.g. the maximum heap size)
+  if this option is not set.
+
+Example:
+	qcom,ion {
+                 compatible = "qcom,msm-ion";
+                 #address-cells = <1>;
+                 #size-cells = <0>;
+
+                 qcom,ion-heap@25 {
+                         reg = <25>;
+                         qcom,ion-heap-type = "SYSTEM";
+                 };
+
+                 qcom,ion-heap@8 { /* CP_MM HEAP */
+                         compatible = "qcom,msm-ion-reserve";
+                         reg = <8>;
+                         qcom,heap-align = <0x1000>;
+                         linux,contiguous-region = <&secure_mem>;
+                         qcom,ion-heap-type = "SECURE_DMA";
+                 };
+
+                 qcom,ion-heap@29 { /* FIRMWARE HEAP */
+                         compatible = "qcom,msm-ion-reserve";
+                         reg = <29>;
+                         qcom,heap-align = <0x20000>;
+                         qcom,heap-adjacent = <8>;
+                         qcom,memory-reservation-type = "EBI1"; /* reserve EBI memory */
+                         qcom,memory-reservation-size = <0xA00000>;
+                         qcom,ion-heap-type = "CARVEOUT";
+                 };
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router.txt
new file mode 100644
index 0000000..256905c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router.txt
@@ -0,0 +1,16 @@
+Qualcomm Technologies, Inc. IPC Router
+
+Required properties:
+-compatible:	should be "qcom,ipc_router"
+-qcom,node-id:	unique ID to identify the node in network
+
+Optional properties:
+-qcom,default-peripheral: String property to indicate the default peripheral
+			  to communicate
+
+Example:
+	qcom,ipc_router {
+		compatible = "qcom,ipc_router";
+		qcom,node-id = <1>;
+		qcom,default-peripheral = "modem";
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt
new file mode 100644
index 0000000..9e1d230
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_glink_xprt.txt
@@ -0,0 +1,42 @@
+Qualcomm Technologies, Inc. IPC Router G-Link Transport
+
+Required properties:
+-compatible:		should be "qcom,ipc_router_glink_xprt"
+-qcom,ch-name:		the G-Link channel name used by the G-Link transport
+-qcom,xprt-remote:	string that defines the edge of the transport
+-qcom,glink-xprt:	string that describes the underlying physical transport
+-qcom,xprt-linkid:	unique integer to identify the tier to which the link
+			belongs to in the network and is used to avoid the
+			routing loops while forwarding the broadcast messages
+-qcom,xprt-version:	unique version ID used by G-Link transport header
+
+Optional properties:
+-qcom,fragmented-data:	Boolean property to indicate that G-Link transport
+			supports fragmented data
+-qcom,pil-label: 	string that defines remote subsystem name understood
+			by pil. Absence of this property indicates that
+			subsystem loading through pil voting is disabled for
+			that subsystem.
+
+Example:
+	qcom,ipc_router_modem_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "mpss";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+		qcom,pil-label = "modem";
+	};
+
+	qcom,ipc_router_q6_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "lpass";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+		qcom,pil-label = "adsp";
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_hsic_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_hsic_xprt.txt
new file mode 100644
index 0000000..71d0c0d
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_hsic_xprt.txt
@@ -0,0 +1,19 @@
+Qualcomm Technologies, Inc. IPC Router HSIC Transport
+
+Required properties:
+-compatible:		should be "qcom,ipc_router_hsic_xprt"
+-qcom,ch-name:		the HSIC channel name used by the HSIC transport
+-qcom,xprt-remote:	string that defines the edge of the transport (PIL Name)
+-qcom,xprt-linkid:	unique integer to identify the tier to which the link
+			belongs to in the network and is used to avoid the
+			routing loops while forwarding the broadcast messages
+-qcom,xprt-version:	unique version ID used by HSIC transport header
+
+Example:
+	qcom,ipc_router_external_modem_xprt {
+		compatible = "qcom,ipc_router_hsic_xprt";
+	        qcom,ch-name = "ipc_bridge";
+		qcom,xprt-remote = "external-modem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <3>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt
new file mode 100644
index 0000000..de5ab2c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_mhi_xprt.txt
@@ -0,0 +1,21 @@
+Qualcomm Technologies, Inc. IPC Router MHI Transport
+
+Required properties:
+-compatible:		should be "qcom,ipc_router_mhi_xprt"
+-qcom,out-chan-id:	MHI Channel ID for the transmit path
+-qcom,in-chan-id:	MHI Channel ID for the receive path
+-qcom,xprt-remote:	string that defines the edge of the transport (PIL Name)
+-qcom,xprt-linkid:	unique integer to identify the tier to which the link
+			belongs to in the network and is used to avoid the
+			routing loops while forwarding the broadcast messages
+-qcom,xprt-version:	unique version ID used by MHI transport header
+
+Example:
+	qcom,ipc_router_external_modem_xprt2 {
+		compatible = "qcom,ipc_router_mhi_xprt";
+	        qcom,out-chan-id = <34>;
+		qcom,in-chan-id = <35>;
+		qcom,xprt-remote = "external-modem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <3>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_smd_xprt.txt b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_smd_xprt.txt
new file mode 100644
index 0000000..1d74447
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_ipc_router_smd_xprt.txt
@@ -0,0 +1,34 @@
+Qualcomm Technologies, Inc. IPC Router SMD Transport
+
+Required properties:
+-compatible:		should be "qcom,ipc_router_smd_xprt"
+-qcom,ch-name:		the SMD channel name used by the SMD transport
+-qcom,xprt-remote:	string that defines the edge of the transport (PIL Name)
+-qcom,xprt-linkid:	unique integer to identify the tier to which the link
+			belongs to in the network and is used to avoid the
+			routing loops while forwarding the broadcast messages
+-qcom,xprt-version:	unique version ID used by SMD transport header
+
+Optional properties:
+-qcom,fragmented-data:	Indicate the SMD transport supports fragmented data
+-qcom,disable-pil-loading: Disable PIL Loading of the remote subsystem
+
+Example:
+	qcom,ipc_router_modem_xprt {
+		compatible = "qcom,ipc_router_smd_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "modem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+		qcom,disable-pil-loading;
+	};
+
+	qcom,ipc_router_q6_xprt {
+		compatible = "qcom,ipc_router_smd_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "adsp";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt b/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt
new file mode 100644
index 0000000..ae61ebf
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_rtb.txt
@@ -0,0 +1,22 @@
+Register Trace Buffer (RTB)
+
+The RTB is used to log discrete events in the system in an uncached buffer that
+can be post processed from RAM dumps. The RTB must reserve memory using
+the msm specific memory reservation bindings (see
+Documentation/devicetree/bindings/arm/msm/memory-reserve.txt).
+
+Required properties
+
+- compatible: "qcom,msm-rtb"
+- qcom,rtb-size: size of the RTB buffer in bytes
+
+Optional properties:
+
+- linux,contiguous-region: phandle reference to a CMA region
+
+Example:
+
+	qcom,msm-rtb {
+		compatible = "qcom,msm-rtb";
+		qcom,rtb-size = <0x100000>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
new file mode 100644
index 0000000..53ad68e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/msm_watchdog.txt
@@ -0,0 +1,48 @@
+* Qualcomm MSM Watchdog
+
+Watchdog timer is configured with a bark and a bite time.
+If the watchdog is not "pet" at regular intervals, the system
+is assumed to have become non responsive and needs to be reset.
+A warning in the form of a bark timeout leads to a bark interrupt
+and a kernel panic. If the watchdog timer is still not reset,
+a bite timeout occurs, which is an interrupt in the secure mode,
+which leads to a reset of the SOC via the secure watchdog. The
+driver needs the petting time, and the bark timeout to be programmed
+into the watchdog, as well as the bark and bite irqs.
+
+The device tree parameters for the watchdog are:
+
+Required properties:
+
+- compatible : "qcom,msm-watchdog"
+- reg : offset and length of the register set for the watchdog block.
+- reg-names : names corresponding to each reg property value.
+        "wdt-base" - physical base address of watchdog timer registers
+        "wdt-absent-base" - physical base address of watchdog absent register
+- interrupts : should contain bark and bite irq numbers
+- qcom,pet-time : Non zero time interval at which watchdog should be pet in ms.
+- qcom,bark-time : Non zero timeout value for a watchdog bark in ms.
+- qcom,userspace-watchdog :
+        (boolean) Allow enabling the userspace-watchdog feature. This feature
+        requires userspace to pet the watchdog every qcom,pet-time interval
+        in addition to the existing kernel-level checks.
+        This feature is supported through device sysfs files.
+
+Optional properties:
+
+- qcom,ipi-ping : (boolean) send keep alive ping to other cpus if present
+- qcom,wakeup-enable : (boolean) enable non secure watchdog to freeze / unfreeze
+                        automatically across suspend / resume path.
+
+Example:
+
+        qcom,wdt@f9017000 {
+                compatible = "qcom,msm-watchdog";
+                reg = <0xf9017000 0x1000>;
+                reg-names = "wdt-base";
+                interrupts = <0 3 0>, <0 4 0>;
+                qcom,bark-time = <11000>;
+                qcom,pet-time = <10000>;
+                qcom,ipi-ping;
+                qcom,wakeup-enable;
+        };
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
new file mode 100644
index 0000000..a4672e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,llcc.txt
@@ -0,0 +1,118 @@
+== Introduction==
+
+LLCC (Last Level Cache Controller) driver is implemented as simple-mfd. The
+driver is classified into four drivers based on the hw block that provide
+different functionality.
+
+1. LLCC Core Driver:
+LLCC core driver takes care of initialization of the LLCC required to enable LLCC,
+ECC (Error Correction Code) & AMON (Activity Monitor) drivers.
+
+2. LLCC driver:
+Programs the SCT (system configuration table). The SCT programming divides the
+system cache into slices. Each slice is assigned an ID A.K.A SCID(Sub-cache ID).
+
+HW modules that are designated to use the system cache are known as clients.
+Each client must also be represented as a node in the device tree just like
+any other hw module.
+
+One client can have multiple SCID's assigned meaning each client could get
+multiple slices in the cache. Client can use the slices for various pre-defined
+usecases. Each client defines a set of names for these usecases in its
+device tree binding.
+
+Client makes a request to LLCC device to get cache-slices properties for each
+of its usecase. Client gets the information like cache slice ID and size of the
+cache slice.
+
+3. LLCC ECC Driver:
+Reports single and double bit errors in the data and tag ram of LLCC.
+
+4. LLCC AMON Driver:
+Keeps track of the data progress within the internal channels of LLCC.
+
+== llcc device ==
+
+Require Properties:
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be "qcom,llcc-core"
+
+- reg:
+	Usage: required
+	Value Type: <prop-encoded-array>
+	Definition: must be addresses and sizes of the LLCC registers
+
+- reg-names:
+	Usage: required
+	Value Type: <stringlist>
+	Definition: Address names. Must be "llcc"
+
+- #cache-cells:
+	Usage: required
+	Value Type: <u32>
+	Definition: Number of cache cells, must be 1
+
+- max-slices:
+	usage: required
+	Value Type: <u32>
+	Definition: Number of cache slices supported by hardware
+
+Optional Properties:
+- status:
+	Usage: optional
+	Value type: <string>
+	Definition: Property to enable or disable the driver
+
+== llcc amon device ==
+
+Optional Properties:
+-qcom,fg-cnt : The value of fine grained counter of activity monitor
+	        block.
+
+Example:
+
+	qcom,llcc@01300000 {
+		compatible = "llcc-core", "syscon", "simple-mfd";
+		reg = <0x1300000 0x50000>;
+		reg-names = "llcc_base";
+		status = "disabled";
+
+		llcc: qcom,msmskunk-llcc {
+			compatible = "qcom,msmskunk-llcc";
+			#cache-cells = <1>;
+			max-slices = <32>;
+		};
+
+		qcom,llcc-erp {
+			compatible = "qcom,llcc-erp";
+		};
+
+		qcom,llcc-amon {
+			compatible = "qcom,llcc-amon";
+			qcom,fg-cnt = <0x7>;
+		};
+	};
+
+== Client ==
+
+Required properties:
+- cache-slice-names:
+	Usage: required
+	Value type: <stringlist>
+	Definition: A set of names that identify the usecase names of a client that uses
+		    cache slice. These strings are used to look up the cache slice
+		    entries by name.
+
+- cache-slices:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: The tuple has phandle to llcc device as the first argument and the
+		    second argument is the usecase id of the client.
+For Example:
+
+	venus {
+		cache-slice-names = "vidsc0", "vidsc1";
+		cache-slices = <&llcc 2>, <&llcc 3>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt
new file mode 100644
index 0000000..adfa94f
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt
@@ -0,0 +1,13 @@
+Qualcomm Technologies, Inc. SMSM Point-to-Point (SMP2P) Sleepstate driver
+
+Required properties:
+-compatible : should be one of the following:
+- "qcom,smp2pgpio_sleepstate_3_out" - for sensor processor on remote pid 3
+- "qcom,smp2pgpio-sleepstate-out" - for other cases
+-gpios : the relevant gpio pins of the entry.
+
+Example:
+	qcom,smp2pgpio-sleepstate-3-out {
+		compatible = "qcom,smp2pgpio_sleepstate_3_out";
+		gpios = <&smp2pgpio_sleepstate_3_out 0 0>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/smem.txt b/Documentation/devicetree/bindings/arm/msm/smem.txt
new file mode 100644
index 0000000..2f92c0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/smem.txt
@@ -0,0 +1,122 @@
+MSM Shared Memory
+
+[Root level node]
+Required properties:
+-compatible : should be "qcom,smem"
+-reg : the location and size of smem (optional)
+	the irq register base address (required)
+	the location and size of auxiliary smem areas (optional)
+	the smem target info either from IMEM or register (optional)
+-reg-names : "smem" - optional string to identify the shared memory region
+	     "irq-reg-base" - string to identify the irq register region
+	     "aux-mem1", "aux-mem2", "aux-mem3", ... - optional strings to
+			identify any auxiliary shared memory regions
+	     "smem_targ_info_imem" - optional string to identify
+				the smem target info from IMEM memory
+	     "smem_targ_info_reg" - optional string to identify
+				the smem target info from registers
+	     one of the optional register names smem_targ_info_imem,
+				smem_targ_info_reg, or smem is required.
+
+Optional properties:
+-qcom,mpu-enabled : boolean value indicating that Memory Protection Unit based
+	security is enabled on the "smem" shared memory region
+
+[Second level nodes]
+
+qcom,smd
+Required properties:
+-compatible : should be "qcom,smd"
+-qcom,smd-edge : the smd edge
+-qcom,smd-irq-offset : the offset into the irq register base memory for sending
+	interrupts
+-qcom,smd-irq-bitmask : the sending irq bitmask
+-interrupts : the receiving interrupt line
+-label : the name of the remote subsystem for this edge
+
+Optional properties:
+-qcom,irq-no-suspend: configure the incoming irq line as active during suspend
+-qcom,not-loadable : indicates this processor cannot be loaded by PIL
+
+qcom,smsm
+Required properties:
+-compatible : should be "qcom,smsm"
+-qcom,smsm-edge : the smsm edge
+-qcom,smsm-irq-offset : the offset into the irq register base memory for sending
+	interrupts
+-qcom,smsm-irq-bitmask : the sending irq bitmask
+-interrupts : the receiving interrupt line
+
+
+Example:
+
+	qcom,smem@fa00000 {
+		compatible = "qcom,smem";
+		reg = <0xfa00000 0x200000>,
+			<0xfa006000 0x1000>,
+			<0xfc428000 0x4000>;
+		reg-names = "smem", "irq-reg-base", "aux-mem1";
+
+		qcom,smd-modem {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <0>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x1000>;
+			interrupts = <0 25 1>;
+			label = "modem";
+		};
+
+		qcom,smsm-modem {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <0>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x2000>;
+			interrupts = <0 26 1>;
+		};
+
+		qcom,smd-adsp {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <1>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x100>;
+			interrupts = <0 156 1>;
+			label = "adsp";
+		};
+
+		qcom,smsm-adsp {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <1>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x200>;
+			interrupts = <0 157 1>;
+		};
+
+		qcom,smd-wcnss {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <6>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x20000>;
+			interrupts = <0 142 1>;
+			label = "wcnss";
+		};
+
+		qcom,smsm-wcnss {
+			compatible = "qcom,smsm";
+			qcom,smsm-edge = <6>;
+			qcom,smsm-irq-offset = <0x8>;
+			qcom,smsm-irq-bitmask = <0x80000>;
+			interrupts = <0 144 1>;
+		};
+
+		qcom,smd-rpm {
+			compatible = "qcom,smd";
+			qcom,smd-edge = <15>;
+			qcom,smd-irq-offset = <0x8>;
+			qcom,smd-irq-bitmask = <0x1>;
+			interrupts = <0 168 1>;
+			label = "rpm";
+			qcom,irq-no-syspend;
+			qcom,not-loadable;
+		};
+	};
+
diff --git a/Documentation/devicetree/bindings/arm/msm/smp2p.txt b/Documentation/devicetree/bindings/arm/msm/smp2p.txt
new file mode 100644
index 0000000..e211064
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/smp2p.txt
@@ -0,0 +1,18 @@
+Qualcomm Technologies, Inc. SMSM Point-to-Point (SMP2P)
+
+Required properties:
+-compatible : should be "qcom,smp2p"
+-reg : the location of the irq register base memory
+-qcom,remote-pid : the SMP2P remote processor ID (see smp2p_private_api.h)
+-qcom,irq-bitmask : the sending irq bitmask
+-interrupts : the receiving interrupt line
+
+Example:
+
+	qcom,smp2p-modem {
+		compatible = "qcom,smp2p";
+		reg = <0xf9011008 0x4>;
+		qcom,remote-pid = <1>;
+		qcom,irq-bitmask = <0x4000>;
+		interrupts = <0 27 1>;
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/system_health_monitor.txt b/Documentation/devicetree/bindings/arm/msm/system_health_monitor.txt
new file mode 100644
index 0000000..bcc8e00
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/system_health_monitor.txt
@@ -0,0 +1,35 @@
+System Health Monitor
+
+[Root level node]
+Required properties:
+-compatible: should be "qcom,system-health-monitor"
+
+[Second level nodes]
+Information about subsystems that are monitored by System Health Monitor.
+Subsystems include modem, adsp, wcnss, external MDM(esoc).
+Required properties:
+-qcom,subsys-name: Name as identified by a subsystem
+-qcom,ssrestart-string:	String used by subsystem restart to identify
+			the subsystem
+
+Example:
+	qcom,system-health-monitor {
+		compatible = "qcom,system-health-monitor";
+
+		qcom,modem {
+			qcom,subsys-name = "msm_mpss";
+			qcom,ssrestart-string = "modem";
+		};
+		qcom,adsp {
+			qcom,subsys-name = "msm_adsp";
+			qcom,ssrestart-string = "adsp";
+		};
+		qcom,wcnss {
+			qcom,subsys-name = "msm_wcnss";
+			qcom,ssrestart-string = "wcnss";
+		};
+		qcom,esoc {
+			qcom,subsystem-name = "mdm";
+			qcom,ssrestart-string = "esoc";
+		};
+	};
diff --git a/Documentation/devicetree/bindings/arm/msm/system_pm.txt b/Documentation/devicetree/bindings/arm/msm/system_pm.txt
new file mode 100644
index 0000000..9628d9e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/system_pm.txt
@@ -0,0 +1,29 @@
+SYSTEM PM
+
+System PM device is a virtual device that handles all CPU subsystem low power
+mode activties. When entering core shutdown, resource state that were requested
+from the processor may be relinquished and set to idle and restored when the
+cores are brought out of sleep.
+
+PROPERTIES
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be "qcom,system-pm".
+
+-mboxes:
+	Usage: optional
+	Value type: <phandle>
+	Definition: phandle the TCS mailbox controller for the CPU subsystem.
+	This property is generally set only for SoCs that use RPMH communication
+	through a mailbox controller.
+
+EXAMPLE
+
+	system_pm {
+		compatible = "qcom,system-pm";
+		mboxes = <&apps_rsc 0>;
+	};
+
+
diff --git a/Documentation/devicetree/bindings/cache/msm_gladiator_erp_v2.txt b/Documentation/devicetree/bindings/cache/msm_gladiator_erp_v2.txt
new file mode 100644
index 0000000..3c1c5c0
--- /dev/null
+++ b/Documentation/devicetree/bindings/cache/msm_gladiator_erp_v2.txt
@@ -0,0 +1,20 @@
+* MSM Gladiator error reporting driver
+
+Required properties:
+- compatible: Should be "qcom,msm-gladiator-v2"
+- reg: I/O address Gladiator H/W block
+- reg-names: Should be "gladiator_base"
+- interrupts: Should contain the gladiator error interrupt number
+- clock-names: Should be "atb_clk"
+- clocks: Handles to clocks specified in "clock-names" property.
+
+Example:
+
+qcom,msm-gladiator-v2@b1c0000 {
+	compatible = "qcom,msm-gladiator-v2";
+	reg = <0xb1c0000 0xe000>;
+	reg-names = "gladiator_base";
+	interrupts = <0 34 0>;
+	clock-names = "atb_clk";
+	clocks = <&clock_gcc clk_qdss_clk>;
+}
diff --git a/Documentation/devicetree/bindings/clock/qcom,dummycc.txt b/Documentation/devicetree/bindings/clock/qcom,dummycc.txt
new file mode 100644
index 0000000..3521ec9
--- /dev/null
+++ b/Documentation/devicetree/bindings/clock/qcom,dummycc.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies Dummy Clock controller
+
+Qualcomm Technologies Dummy Clock controller devices provide a dummy clock
+for driver development during pre-silicon stage. The driver will always
+return a dummy clock that has no effect on hardware.
+
+Required properties:
+- compatible:		Must be "qcom,dummycc"
+- #clock-cells:		Must be <1>. This will allow the common clock device
+			tree framework to recognize _this_ device node as a
+			clock provider.
+
+Optional properties:
+- clock-output-names:	Name of the clock or the clock type.
+- #reset-cells:		Must be <1>. This will allow the common reset device
+			tree framework to recognize _this_ device node as a
+			reset controller provider.
+
+Example:
+	clock_gcc: qcom,gcc {
+		compatible = "qcom,dummycc";
+		clock-output-names = "gcc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/cpuss_dump/cpuss_dump.txt b/Documentation/devicetree/bindings/cpuss_dump/cpuss_dump.txt
new file mode 100644
index 0000000..e19632a
--- /dev/null
+++ b/Documentation/devicetree/bindings/cpuss_dump/cpuss_dump.txt
@@ -0,0 +1,27 @@
+CPU Subsystem Dump Driver
+
+The CPU Subsystem dump driver is used to dump various hardware entities
+like the instruction and data tlbs or the unified tlbs etc. to an
+allocated buffer. This allows the data to be analysed in case of corruption.
+
+Required Properties for the cpuss_dump node:
+-compatible = "qcom,cpuss-dump";
+
+All child nodes of cpuss_dump node are interpreted as the various hardware
+entities which need to be dumped.
+
+Required properties of the dump nodes
+
+- qcom,dump-node: phandle to the acutal cpuss hardware entity present
+		  in the cpu map
+- qcom,dump-id: The ID within the data dump table where this entry needs to
+		be added.
+
+Example:
+	msm_cpuss_dump {
+		compatible = "qcom,cpuss-dump";
+		qcom,itlb_dump100 {
+			qcom,dump-node = <&L1_itlb_100>;
+			qcom,dump-id = <34>;
+		};
+	};
diff --git a/Documentation/devicetree/bindings/edac/kryo3xx-edac.txt b/Documentation/devicetree/bindings/edac/kryo3xx-edac.txt
new file mode 100644
index 0000000..e2819d9
--- /dev/null
+++ b/Documentation/devicetree/bindings/edac/kryo3xx-edac.txt
@@ -0,0 +1,33 @@
+* Kryo 3xx EDAC node
+
+Kryo3xx EDAC node is defined to describe on-chip error detection and correction
+for the Kryo3xx core.
+
+Kryo3xx will report all SBE and DBE found in L1/L2/L3/SCU caches in two registers:
+	ERRXSTATUS - Error Record Primary Status Register
+	ERRXMISC0 - Error Record Miscellaneous Register
+
+Current implementation of Kryo 3xx ECC mechanism does not have interrupts setup,
+and so requires polling of the registers to check if a error has been reported.
+
+The following section describes the DT node binding for kryo_3xx_cpu_erp.
+
+Required properties:
+- compatible		: Shall be "arm,arm64-kryo3xx-cpu-erp".
+- interrupts		: Interrupt-specifier for L1/L2, L3/SCU error IRQ(s)
+- interrupt-names	: Descriptive names of the interrupts
+
+Example:
+
+	kryo3xx-erp {
+		compatible = "arm,arm64-kryo3xx-cpu-erp";
+		interrupts = <1 6 4>,
+			<1 7 4>,
+			<0 34 4>,
+			<0 35 4>;
+
+		interrupt-names = "l1-l2-faultirq",
+				"l1-l2-errirq",
+				"l3-scu-errirq",
+				"l3-scu-faultirq";
+	};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-smp2p.txt b/Documentation/devicetree/bindings/gpio/gpio-smp2p.txt
new file mode 100644
index 0000000..f9b227e
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-smp2p.txt
@@ -0,0 +1,95 @@
+Qualcomm Technologies, Inc. SMSM Point-to-Point (SMP2P) GPIO Driver
+
+Used to map an SMP2P entry and remote processor ID to a virtual GPIO controller
+and virtual interrupt controller.
+
+Required properties:
+-compatible : should be "qcom,smp2pgpio";
+-qcom,entry-name : name of the SMP2P entry
+-qcom,remote-pid : the SMP2P remote processor ID (see smp2p_private_api.h)
+-gpio-controller : specifies that this is a GPIO controller
+-#gpio-cells : number of GPIO cells (should always be <2>)
+-interrupt-controller : specifies that this is an interrupt controller
+-#interrupt-cells : number of interrupt cells (should always be <2>)
+
+Optional properties:
+-qcom,is-inbound : specifies that this is an inbound entry (default is outbound)
+
+Comments:
+All device tree entries must be unique.  Therefore to prevent naming collisions
+between clients, it is recommended that the DT nodes should be named using the
+format:
+	smp2pgpio_<ENTRY_NAME>_<REMOTE PID>_<in|out>
+
+Unit test devices ("smp2p" entries):
+-compatible : should be one of
+		"qcom,smp2pgpio_test_smp2p_1_out"
+		"qcom,smp2pgpio_test_smp2p_1_in"
+		"qcom,smp2pgpio_test_smp2p_2_out"
+		"qcom,smp2pgpio_test_smp2p_2_in"
+		"qcom,smp2pgpio_test_smp2p_3_out"
+		"qcom,smp2pgpio_test_smp2p_3_in"
+		"qcom,smp2pgpio_test_smp2p_4_out"
+		"qcom,smp2pgpio_test_smp2p_4_in"
+		"qcom,smp2pgpio_test_smp2p_5_out"
+		"qcom,smp2pgpio_test_smp2p_5_in"
+		"qcom,smp2pgpio_test_smp2p_7_out"
+		"qcom,smp2pgpio_test_smp2p_7_in"
+		"qcom,smp2pgpio_test_smp2p_15_out"
+		"qcom,smp2pgpio_test_smp2p_15_in"
+-gpios : the relevant gpio pins of the entry
+
+Example:
+	/* Maps inbound "smp2p" entry on remote PID 7 to GPIO controller. */
+	smp2pgpio_smp2p_7_in: qcom,smp2pgpio-smp2p-7-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <7>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/*
+	 * Maps inbound "smp2p" entry on remote PID 7 to client driver
+	 * "qcom,smp2pgpio_test_smp2p_7_in".
+	 *
+	 * Note:  If all 32-pins are used by this client, then you
+	 *        can just list pin 0 here as a shortcut.
+	 */
+	qcom,smp2pgpio_test_smp2p_7_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_7_in";
+		gpios = <&smp2pgpio_smp2p_7_in 0 0>, /* pin 0 */
+			<&smp2pgpio_smp2p_7_in 1 0>,
+			. . .
+			<&smp2pgpio_smp2p_7_in 31 0>;    /* pin 31 */
+	};
+
+
+	/* Maps outbound "smp2p" entry on remote PID 7 to GPIO controller. */
+	smp2pgpio_smp2p_7_out: qcom,smp2pgpio-smp2p-7-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <7>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/*
+	 * Maps outbound "smp2p" entry on remote PID 7 to client driver
+	 * "qcom,smp2pgpio_test_smp2p_7_out".
+	 *
+	 * Note:  If all 32-pins are used by this client, then you
+	 *        can just list pin 0 here as a shortcut.
+	 */
+	qcom,smp2pgpio_test_smp2p_7_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_7_out";
+		gpios = <&smp2pgpio_smp2p_7_out 0 0>, /* pin 0 */
+			<&smp2pgpio_smp2p_7_out 1 0>,
+			. . .
+			<&smp2pgpio_smp2p_7_out 31 0>;    /* pin 31 */
+	};
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu.txt b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
index e862d148..8016936 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu.txt
@@ -17,6 +17,8 @@
                         "arm,mmu-401"
                         "arm,mmu-500"
                         "cavium,smmu-v2"
+                        "qcom,smmu-v2"
+                        "qcom,qsmmu-v500"
 
                   depending on the particular implementation and/or the
                   version of the architecture implemented.
@@ -35,16 +37,12 @@
                   interrupt per context bank. In the case of a single,
                   combined interrupt, it must be listed multiple times.
 
-- #iommu-cells  : See Documentation/devicetree/bindings/iommu/iommu.txt
-                  for details. With a value of 1, each "iommus" entry
-                  represents a distinct stream ID emitted by that device
-                  into the relevant SMMU.
-
-                  SMMUs with stream matching support and complex masters
-                  may use a value of 2, where the second cell represents
-                  an SMR mask to combine with the ID in the first cell.
-                  Care must be taken to ensure the set of matched IDs
-                  does not result in conflicts.
+- mmu-masters   : A list of phandles to device nodes representing bus
+                  masters for which the SMMU can provide a translation
+                  and their corresponding StreamIDs (see example below).
+                  Each device node linked from this list must have a
+                  "#stream-id-cells" property, indicating the number of
+                  StreamIDs associated with it.
 
 ** System MMU optional properties:
 
@@ -60,20 +58,55 @@
                   aliases of secure registers have to be used during
                   SMMU configuration.
 
-** Deprecated properties:
+- attach-impl-defs : global registers to program at device attach
+                  time. This should be a list of 2-tuples of the format:
+                  <offset reg_value>.
 
-- mmu-masters (deprecated in favour of the generic "iommus" binding) :
-                  A list of phandles to device nodes representing bus
-                  masters for which the SMMU can provide a translation
-                  and their corresponding Stream IDs. Each device node
-                  linked from this list must have a "#stream-id-cells"
-                  property, indicating the number of Stream ID
-                  arguments associated with its phandle.
+- qcom,fatal-asf : Enable BUG_ON for address size faults.  Some hardware
+                  requires special fixups to recover from address size
+                  faults.  Rather than applying the fixups just BUG since
+                  address size faults are due to a fundamental programming
+                  error from which we don't care about recovering anyways.
 
-** Examples:
+- qcom,skip-init : Disable resetting configuration for all context banks
+                  during device reset.  This is useful for targets where
+                  some context banks are dedicated to other execution
+                  environments outside of Linux and those other EEs are
+                  programming their own stream match tables, SCTLR, etc.
+                  Without setting this option we will trample on their
+                  configuration.
 
-        /* SMMU with stream matching or stream indexing */
-        smmu1: iommu {
+- qcom,dynamic  : Allow dynamic domains to be attached. This is only
+		  useful if the upstream hardware is capable of switching
+		  between multiple domains within a single context bank.
+
+- clocks        : List of clocks to be used during SMMU register access. See
+                  Documentation/devicetree/bindings/clock/clock-bindings.txt
+                  for information about the format. For each clock specified
+                  here, there must be a corresponding entry in clock-names
+                  (see below).
+
+- clock-names   : List of clock names corresponding to the clocks specified in
+                  the "clocks" property (above). See
+                  Documentation/devicetree/bindings/clock/clock-bindings.txt
+                  for more info.
+
+- (%s)-supply   : Phandle of the regulator that should be powered on during
+                  SMMU register access. (%s) is a string from the
+		  qcom,regulator-names property.
+
+- qcom,regulator-names :
+		  List of strings to use with the (%s)-supply property.
+
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+		: Refer to devicetree/bindings/arm/msm/msm_bus.txt
+
+Example:
+
+        smmu {
                 compatible = "arm,smmu-v1";
                 reg = <0xba5e0000 0x10000>;
                 #global-interrupts = <2>;
@@ -83,29 +116,46 @@
                              <0 35 4>,
                              <0 36 4>,
                              <0 37 4>;
-                #iommu-cells = <1>;
-        };
 
-        /* device with two stream IDs, 0 and 7 */
-        master1 {
-                iommus = <&smmu1 0>,
-                         <&smmu1 7>;
+                /*
+                 * Two DMA controllers, the first with two StreamIDs (0xd01d
+                 * and 0xd01e) and the second with only one (0xd11c).
+                 */
+                mmu-masters = <&dma0 0xd01d 0xd01e>,
+                              <&dma1 0xd11c>;
+
+                attach-impl-defs = <0x124 0x3>,
+                    <0x128 0xa5>,
+                    <0x12c 0x1>;
         };
 
 
-        /* SMMU with stream matching */
-        smmu2: iommu {
-                ...
-                #iommu-cells = <2>;
-        };
+* Qualcomm MMU-500 TBU Device
 
-        /* device with stream IDs 0 and 7 */
-        master2 {
-                iommus = <&smmu2 0 0>,
-                         <&smmu2 7 0>;
-        };
+The qcom,qsmmu-v500 device implements a number of register regions containing
+debug functionality. Each register region maps to a separate tbu from the
+arm mmu-500 implementation.
 
-        /* device with stream IDs 1, 17, 33 and 49 */
-        master3 {
-                iommus = <&smmu2 1 0x30>;
-        };
+** TBU required properties:
+
+- compatible    : Should be one of:
+                        "qcom,qsmmuv500-tbu"
+
+- reg           : Base address and size.
+
+- reg-names	: "base" and "status-reg" are expected
+		"base" is the main TBU register region.
+		"status-reg" indicates whether hw can process a new request.
+
+
+Example:
+smmu {
+	compatible = "qcom,qsmmu-v500";
+	tbu@0x1000 {
+		compatible = "qcom,qsmmuv500-tbu";
+		regs = <0x1000 0x1000>,
+			<0x2000 0x8>;
+		reg-names = "base",
+			"status-reg";
+	};
+};
diff --git a/Documentation/devicetree/bindings/iommu/iommu-debug.txt b/Documentation/devicetree/bindings/iommu/iommu-debug.txt
new file mode 100644
index 0000000..1d79f18
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/iommu-debug.txt
@@ -0,0 +1,27 @@
+This document describes the device tree binding for IOMMU test devices.
+
+The iommu-debug framework can optionally make use of some platform devices
+for improved standalone testing and other features.
+
+- compatible: iommu-debug-test
+
+
+Required properties
+===================
+
+- iommus: The IOMMU for the test device (see iommu.txt)
+
+
+Example
+=======
+
+	iommu_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * 42 shouldn't be used by anyone on the cpp_fd_smmu.  We just
+		 * need _something_ here to get this node recognized by the
+		 * SMMU driver. Our test uses ATOS, which doesn't use SIDs
+		 * anyways, so using a dummy value is ok.
+		 */
+		iommus = <&cpp_fd_smmu 42>;
+	};
diff --git a/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt b/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt
new file mode 100644
index 0000000..4ef34bf
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/qcom-tcs.txt
@@ -0,0 +1,160 @@
+TCS Mailbox:
+------------
+
+Trigger Command Set (TCS) is the mailbox mechanism for communicating with
+the hardened resource accelerators. Requests to the resources can be written
+to the mailbox registers and using a (addr, val) pair and triggered. Messages
+in the mailbox are then sent in sequence over an internal bus.
+
+The implementation of the TCS mailbox, follows the mailbox controller
+architecture [1]. The logical block (DRV) is a part of the h/w entity
+(Resource State Coordinator a.k.a RSC) that can handle a multiple sleep and
+active/wake resource request related functionality including the mailbox.
+Multiple such DRVs can exist in a SoC and can be written to from Linux. The
+structure of each DRV follows the same template with a few variations that are
+captured by the properties here.
+
+Each DRV could have 'm' TCS instances. Each TCS could have 'n' slots. Each
+slot has a header (u32), address (u32), data (u32), status (u32) and a
+read-response (u32). A TCS when triggered will send all the enabled commands
+of the 'n' commands in that slot in sequence.
+
+A TCS may be triggered from Linux or triggered by the F/W after all the CPUs
+have powered off to faciliate idle power saving. TCS could be classified as -
+
+	SLEEP,  /* Triggered by F/W and not by Linux */
+	WAKE,   /* Triggered by F/W, may be used by Linux */
+	ACTIVE, /* Triggered by Linux */
+	CONTROL /* Triggered by F/W */
+
+Requests can be made for the state of a resource, when the subsystem is active
+or idle. When all subsystems like Modem, GPU, CPU are idle, the resource state
+will be an aggregeate of the sleep votes from each of those subsystem. Drivers
+may request a sleep value for their shared resources in addition to the active
+mode requests.
+
+Control requests are instance specific requests that may or may not reach an
+accelerator. Only one platform device in Linux can request a control channel
+on a DRV.
+
+CONTROLLER:
+----------
+
+PROPERTIES:
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: Should be "qcom,tcs-drv".
+
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: the first element specifies the base address of the DRV,
+	            the second element specifies the size of the region.
+
+- #mbox-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: the number of mail-box cells. Must be 1.
+
+- interrupts:
+	Usage: required
+	Value type: <prop-encoded-interrupt>
+	Definition: the interrupt that trips when a message complete/response
+	           is received for this DRV from the accelertors.
+
+- qcom,drv-id:
+	Usage: required
+	Value type: <u32>
+	Definition: the id of the DRV in the RSC block.
+
+- qcom, tcs-config:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: the tuple definining the configuration of TCS.
+	            Must have 2 cells which describe each TCS type.
+	            <type number_of_tcs>
+	- Cell #1 (TCS Type): Only the TCS types can be specified -
+		SLEEP_TCS
+		WAKE_TCS
+		ACTIVE_TCS
+		CONTROL_TCS
+	- Cell #2 (Number of TCS): <u32>
+
+EXAMPLE 1:
+
+For a TCS whose RSC base address is is 0x179C0000 and is at a DRV of 2, the
+register offsets for DRV2 start at 0D00, the register calculations are like
+this -
+First tuple: 0x179C0000 + 0x10000 * 2 = 0x179E0000
+Second tuple: 0x179E0000 + 0xD00  = 0x179E0D00
+
+	apps_rsc: mailbox@179e000 {
+		compatible = "qcom,tcs_drv";
+		reg = <0x179E0000 0x10000>, <0x179E0D00 0x3000>;
+		interrupts = <0 5 0>;
+		#mbox-cells = <1>;
+		qcom,drv-id = <2>;
+		qcom,tcs-config = <SLEEP_TCS   3>,
+				  <WAKE_TCS    3>,
+				  <ACTIVE_TCS  2>,
+				  <CONTROL_TCS 1>;
+	};
+
+EXAMPLE 2:
+
+For a TCS whose RSC base address is 0xAF20000 and is at DRV of 0, the register
+offsets for DRV0 start at 01C00, the register calculations are like this -
+First tuple: 0xAF20000
+Second tuple: 0xAF20000 + 0x1C00
+
+	disp_rsc: mailbox@af20000 {
+			status = "disabled";
+			compatible = "qcom,tcs-drv";
+			reg = <0xAF20000 0x10000>, <0xAF21C00 0x3000>;
+			interrupts = <0 129 0>;
+			#mbox-cells = <1>;
+			qcom,drv-id = <0>;
+			qcom,tcs-config = <SLEEP_TCS 1>,
+					<WAKE_TCS    1>,
+					<ACTIVE_TCS  0>,
+					<CONTROL_TCS 1>;
+		};
+
+
+CLIENT:
+-------
+
+A device needing to communicate with the accelerators should specify the
+common mailbox client properties described in [1]. mbox-names can be used to
+provide a string name optionally for driver to lookup by name.
+
+- mboxes:
+	Usage: required, if the device wants to communicate with the mailbox
+	Value type: <prop-encoded-array>
+	Definition: The tuple has an handle to the mailbox instance the client
+	            as the first argument, the second argument must be 0. This
+		    is one per MBOX controller addressed.
+
+EXAMPLE:
+
+	leaky_device@0 {
+		<...>;
+		mbox-names = <"leaky-channel">;
+		mboxes = <&tcs_box 0>;
+	};
+
+	leaky_device@1 {
+		<...>;
+		mbox-names = <"apps", "display">;
+		mboxes = <&tcs_box 0>, <&dsp_box 0>;
+	};
+
+	power_ctrl@0 {
+		<...>;
+		mbox-names = <"rpmh">;
+		mboxes = <&tcs_box 0>;
+	};
+
+[1]. Documentation/mailbox.txt
diff --git a/Documentation/devicetree/bindings/misc/memory-state-time.txt b/Documentation/devicetree/bindings/misc/memory-state-time.txt
new file mode 100644
index 0000000..c99a506
--- /dev/null
+++ b/Documentation/devicetree/bindings/misc/memory-state-time.txt
@@ -0,0 +1,8 @@
+Memory bandwidth and frequency state tracking
+
+Required properties:
+- compatible : should be:
+       "memory-state-time"
+- freq-tbl: Should contain entries with each frequency in Hz.
+- bw-buckets: Should contain upper-bound limits for each bandwidth bucket in Mbps.
+       Must match the framework power_profile.xml for the device.
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
new file mode 100644
index 0000000..ae1a2ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -0,0 +1,128 @@
+Qualcomm Technologies Inc MSS QDSP6v5 Peripheral Image Loader
+
+pil-qdsp6v5-mss is a peripheral image loader (PIL) driver. It is used for
+loading QDSP6v5 (Hexagon) firmware images for modem subsystems into memory and
+preparing the subsystem's processor to execute code. It's also responsible for
+shutting down the processor when it's not needed.
+
+Required properties:
+- compatible:	      Must be "qcom,pil-q6v5-mss" or "qcom,pil-q6v55-mss" or
+			"qcom,pil-q6v56-mss".
+- reg:		      Pairs of physical base addresses and region sizes of
+		      memory mapped registers.
+- reg-names:	      Names of the bases for the above registers. "qdsp6_base",
+		      "rmb_base", "restart_reg" or "restart_reg_sec"(optional
+		      for secure mode) are expected.
+		      If "halt_base" is in same 4K pages this register then
+		      this will be defined else "halt_q6", "halt_modem",
+		      "halt_nc" is required.
+- interrupts:         The modem watchdog interrupt
+- vdd_cx-supply:      Reference to the regulator that supplies the vdd_cx domain.
+- vdd_cx-voltage:     Voltage corner/level(max) for cx rail.
+- vdd_mx-supply:      Reference to the regulator that supplies the memory rail.
+- vdd_mx-uV:          Voltage setting for the mx rail.
+- qcom,firmware-name: Base name of the firmware image. Ex. "mdsp"
+
+Optional properties:
+- vdd_mss-supply:     Reference to the regulator that supplies the processor.
+		      This may be a shared regulator that is already voted
+		      on in the PIL proxy voting code (and also managed by the
+		      modem on its own), hence we mark it as as optional.
+- vdd_pll-supply:     Reference to the regulator that supplies the PLL's rail.
+- qcom,vdd_pll:       Voltage to be set for the PLL's rail.
+- reg-names:          "cxrail_bhs_reg" - control register for modem power
+		      domain.
+- clocks:	      Array of <clock_controller_phandle clock_reference> listing
+		      all the clocks that are accesed by this subsystem.
+- qcom,proxy-clock-names:  Names of the clocks that need to be turned on/off during
+			   proxy voting/unvoting.
+- qcom,active-clock-names: Names of the clocks that need to be turned on for the
+			   subsystem to run. Turned off when the subsystem is shutdown.
+- clock-names:		   Names of all the clocks that are accessed by the subsystem.
+- qcom,is-not-loadable: Boolean- Present if the image does not need to
+			be loaded.
+- qcom,pil-self-auth: Boolean- True if authentication is required.
+- qcom,mem-protect-id: Virtual ID used by PIL to call into TZ/HYP to protect/unprotect
+			subsystem related memory.
+- qcom,gpio-err-fatal: GPIO used by the modem to indicate error fatal to the apps.
+- qcom,gpio-err-ready: GPIO used by the modem to indicate error ready to the apps.
+- qcom,gpio-proxy-unvote: GPIO used by the modem to trigger proxy unvoting in
+  the apps.
+- qcom,gpio-force-stop: GPIO used by the apps to force the modem to shutdown.
+- qcom,gpio-stop-ack: GPIO used by the modem to ack force stop or a graceful stop
+		      to the apps.
+- qcom,gpio-ramdump-disable: GPIO used by the modem to inform the apps that ramdump
+			     collection should be disabled.
+- qcom,gpio-shutdown-ack: GPIO used by the modem to indicate that it has done the
+			  necessary cleanup and that the apps can move forward with
+			  the shutdown sequence.
+- qcom,restart-group: List of subsystems that will need to restart together.
+- qcom,mba-image-is-not-elf:	Boolean- Present if MBA image doesn't use the ELF
+				format.
+- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL
+			  service.
+- qcom,sysmon-id:	platform device id that sysmon is probed with for the subsystem.
+- qcom,override-acc: Boolean- Present if we need to override the default ACC settings
+- qcom,ahb-clk-vote: Boolean- Present if we need to remove the vote for the mss_cfg_ahb
+		     clock after the modem boots up
+- qcom,pnoc-clk-vote: Boolean- Present if the modem needs the PNOC bus to be
+		      clocked before it boots up
+- qcom,qdsp6v56-1-3: Boolean- Present if the qdsp version is v56 1.3
+- qcom,qdsp6v56-1-5: Boolean- Present if the qdsp version is v56 1.5
+- qcom,edge:		GLINK logical name of the remote subsystem
+- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
+                           on behalf of the subsystem driver.
+- qcom,pil-mss-memsetup: Boolean - True if TZ need to be informed of modem start address and size.
+- qcom,pas-id:	      pas_id of the subsystem.
+- qcom,qdsp6v56-1-8: Boolean- Present if the qdsp version is v56 1.8
+- qcom,qdsp6v56-1-8-inrush-current: Boolean- Present if the qdsp version is V56 1.8 and has in-rush
+				    current issue.
+- qcom,qdsp6v61-1-1: Boolean- Present if the qdsp version is v61 1.1
+- qcom,qdsp6v62-1-2: Boolean- Present if the qdsp version is v62 1.2
+- qcom,qdsp6v62-1-5: Boolean- Present if the qdsp version is v62 1.5
+- qcom,qdsp6v65-1-0: Boolean- Present if the qdsp version is v65 1.0
+- qcom,mx-spike-wa: Boolean- Present if we need to assert QDSP6 I/O clamp, memory
+		    wordline clamp, and compiler memory clamp during MSS restart.
+- qcom,qdsp6v56-1-10: Boolean- Present if the qdsp version is v56 1.10
+- qcom,override-acc-1: Override the default ACC settings with this value if present.
+
+Example:
+	qcom,mss@fc880000 {
+		compatible = "qcom,pil-q6v5-mss";
+		reg = <0xfc880000 0x100>,
+		      <0xfd485000 0x400>,
+		      <0xfc820000 0x020>,
+		      <0xfc401680 0x004>;
+		reg-names = "qdsp6_base", "halt_base", "rmb_base",
+			    "restart_reg";
+		interrupts = <0 24 1>;
+		vdd_mss-supply = <&pm8841_s3>;
+		vdd_cx-supply = <&pm8841_s2>;
+		vdd_cx-voltage = <7>;
+		vdd_mx-supply = <&pm8841_s1>;
+		vdd_mx-uV = <105000>;
+
+		clocks = <&clock_rpm clk_xo_pil_mss_clk>,
+			 <&clock_gcc clk_gcc_mss_cfg_ahb_clk>,
+			 <&clock_gcc clk_gcc_mss_q6_bimc_axi_clk>,
+			 <&clock_gcc clk_gcc_boot_rom_ahb_clk>;
+		clock-names = "xo", "iface_clk", "bus_clk", "mem_clk";
+		qcom,proxy-clock-names = "xo";
+		qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk";
+
+		qcom,is-not-loadable;
+		qcom,firmware-name = "mba";
+		qcom,pil-self-auth;
+		qcom,mba-image-is-not-elf;
+		qcom,override-acc;
+
+		/* GPIO inputs from mss */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+
+		/* GPIO output to mss */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+		qcom,ssctl-instance-id = <12>;
+		qcom,sysmon-id = <0>;
+	};
diff --git a/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
new file mode 100644
index 0000000..d7edafc
--- /dev/null
+++ b/Documentation/devicetree/bindings/pil/subsys-pil-tz.txt
@@ -0,0 +1,135 @@
+* Generic Subsystem Peripheral Image Loader
+
+subsys-pil-tz is a generic peripheral image loader (PIL) driver. It is
+used for loading the firmware images of the subsystems into memory and
+preparing the subsystem's processor to execute code. It's also responsible
+for shutting down the processor when it's not needed.
+
+Required properties:
+- compatible:	      Must be "qcom,pil-tz-generic"
+- qcom,firmware-name: Base name of the firmware image.
+
+Optional properties:
+- reg:		      Pairs of physical base addresses and region sizes of
+		      memory mapped registers.
+- reg-names:	      Names of the bases for the above registers. Not required for
+		      PIL usage. Ex. "wrapper_base", "vbif_base".
+- interrupts:	      Subsystem to Apps watchdog bite interrupt.
+- vdd_'reg'-supply: Reference to the regulator that supplies the corresponding
+		    'reg' domain.
+- qcom,proxy-reg-names: Names of the regulators that need to be turned on/off
+			during proxy voting/unvoting.
+- qcom,active-reg-names: Names of the regulators that need to be turned on for the
+			subsystem to run. Turned off when the subsystem is shutdown.
+- qcom,vdd_'reg'-uV-uA:    Voltage and current values for the 'reg' regulator.
+- qcom,proxy-clock-names:  Names of the clocks that need to be turned on/off during
+			   proxy voting/unvoting.
+- qcom,active-clock-names: Names of the clocks that need to be turned on for the
+			   subsystem to run. Turned off when the subsystem is shutdown.
+- clock-names:	      Names of all the clocks that are accessed by the subsystem.
+- qcom,<clock-name>-freq: Frequency to be set for that clock in Hz. If the property
+			  isn't added for a clock, then the default clock frequency
+			  would be set to 19200000 Hz.
+- qcom,msm-bus,name:  Name of the bus client for the subsystem.
+- qcom,msm-bus,num-cases: Number of use-cases.
+- qcom,msm-bus,num-paths: Number of paths.
+- qcom,msm-bus,active-only: If not set, uses the dual context by default.
+- qcom,msm-bus,vectors-KBps: Vector array of master id, slave id, arbitrated
+			     bandwidth and instantaneous bandwidth.
+- qcom,pas-id:	      pas_id of the subsystem.
+- qcom,proxy-timeout-ms: Proxy vote timeout value for the subsystem.
+- qcom,smem-id:	      ID of the SMEM item for the subsystem.
+- qcom,is-not-loadable: Boolean. Present if the subsystem's firmware image does not
+			need to be loaded.
+- qcom,pil-no-auth: Boolean. Present if the subsystem is not authenticated and brought
+		    out of reset by using the PIL ops.
+- qcom,mem-protect-id: Virtual ID used by PIL to call into TZ/HYP to protect/unprotect
+			subsystem related memory.
+- qcom,gpio-err-fatal: GPIO used by the subsystem to indicate error fatal to the apps.
+- qcom,gpio-err-ready: GPIO used by the subsystem to indicate error ready to the apps.
+- qcom,gpio-proxy-unvote: GPIO used by the subsystem to trigger proxy unvoting in
+			  the apps.
+- qcom,gpio-force-stop: GPIO used by the apps to force the subsystem to shutdown.
+- qcom,gpio-stop-ack: GPIO used by the subsystem to ack force stop or a graceful stop
+		      to the apps.
+- qcom,restart-group: List of subsystems that will need to restart together.
+- qcom,keep-proxy-regs-on: Boolean. Present if during proxy unvoting, PIL needs to leave
+			the regulators enabled after removing the voltage/current votes.
+- qcom,edge:		GLINK logical name of the remote subsystem
+- qcom,ssctl-instance-id: Instance id used by the subsystem to connect with the SSCTL
+			  service.
+- qcom,sysmon-id:	platform device id that sysmon is probed with for the subsystem.
+- qcom,pil-force-shutdown: Boolean. If set, the SSR framework will not trigger graceful shutdown
+                           on behalf of the subsystem driver.
+- qcom,pil-generic-irq-handler: generic interrupt handler used for communication with subsytem
+				based on bit values in scsr registers.
+- qcom,spss-scsr-bits: array of bit positions into the scsr registers used in generic handler.
+- qcom,complete-ramdump: Boolean. If set, complete ramdump i.e. region between start address of
+			first segment to end address of last segment will be collected without
+			leaving any hole in between.
+
+Example:
+	qcom,venus@fdce0000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0xfdce0000 0x4000>,
+		      <0xfdc80000 0x400>;
+
+		vdd-supply = <&gdsc_venus>;
+		qcom,proxy-reg-names = "vdd";
+		clock-names = "core_clk", "iface_clk", "bus_clk", "mem_clk",
+				"scm_core_clk", "scm_iface_clk", "scm_bus_clk",
+				"scm_core_clk_src";
+		qcom,proxy-clock-names = "core_clk", "iface_clk", "bus_clk",
+					"mem_clk", "scm_core_clk",
+					"scm_iface_clk", "scm_bus_clk",
+					"scm_core_clk_src";
+		qcom,scm_core_clk_src-freq = <50000000>;
+
+		qcom,msm-bus,name = "pil-venus";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,active-only = <0>;
+		qcom,msm-bus,vectors-KBps =
+				<63 512 0 0>,
+				<63 512 0 304000>;
+
+		qcom,pas-id = <9>;
+		qcom,proxy-timeout-ms = <2000>;
+		qcom,firmware-name = "venus";
+	};
+
+	qcom,lpass@fe200000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0xfe200000 0x00100>,
+		      <0xfd485100 0x00010>,
+		      <0xfc4016c0 0x00004>;
+
+		interrupts = <0 162 1>;
+
+		vdd_cx-supply = <&pm8841_s2_corner>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <7 100000>;
+		clock-names = "bus_clk", "xo", "scm_core_clk", "scm_iface_clk",
+				"scm_bus_clk", "scm_core_clk_src";
+		qcom,active-clock-names = "bus_clk";
+		qcom,proxy-clock-names = "xo", "scm_core_clk", "scm_iface_clk",
+					"scm_bus_clk", "scm_core_clk_src";
+		qcom,scm_core_clk_src-freq = <50000000>;
+
+		qcom,smem-id = <423>;
+		qcom,pas-id = <1>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,firmware-name = "adsp";
+		qcom,edge = "lpass";
+
+		/* GPIO inputs from lpass */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+		/* GPIO output to lpass */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+		qcom,ssctl-instance-id = <14>;
+		qcom,sysmon-id = <1>;
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msmskunk-pinctrl b/Documentation/devicetree/bindings/pinctrl/qcom,msmskunk-pinctrl
new file mode 100644
index 0000000..10bbe56
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msmskunk-pinctrl
@@ -0,0 +1,186 @@
+Qualcomm Technologies, Inc. MSMSKUNK TLMM block
+
+This binding describes the Top Level Mode Multiplexer block found in the
+MSMSKUNK platform.
+
+- compatible:
+	Usage: required
+	Value type: <string>
+	Definition: must be "qcom,msmskunk-pinctrl"
+
+- reg:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: the base address and size of the TLMM register space.
+
+- interrupts:
+	Usage: required
+	Value type: <prop-encoded-array>
+	Definition: should specify the TLMM summary IRQ.
+
+- interrupt-controller:
+	Usage: required
+	Value type: <none>
+	Definition: identifies this node as an interrupt controller
+
+- #interrupt-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: must be 2. Specifying the pin number and flags, as defined
+		    in <dt-bindings/interrupt-controller/irq.h>
+
+- gpio-controller:
+	Usage: required
+	Value type: <none>
+	Definition: identifies this node as a gpio controller
+
+- #gpio-cells:
+	Usage: required
+	Value type: <u32>
+	Definition: must be 2. Specifying the pin number and flags, as defined
+		    in <dt-bindings/gpio/gpio.h>
+
+Please refer to ../gpio/gpio.txt and ../interrupt-controller/interrupts.txt for
+a general description of GPIO and interrupt bindings.
+
+Please refer to pinctrl-bindings.txt in this directory for details of the
+common pinctrl bindings used by client devices, including the meaning of the
+phrase "pin configuration node".
+
+The pin configuration nodes act as a container for an arbitrary number of
+subnodes. Each of these subnodes represents some desired configuration for a
+pin, a group, or a list of pins or groups. This configuration can include the
+mux function to select on those pin(s)/group(s), and various pin configuration
+parameters, such as pull-up, drive strength, etc.
+
+
+PIN CONFIGURATION NODES:
+
+The name of each subnode is not important; all subnodes should be enumerated
+and processed purely based on their content.
+
+Each subnode only affects those parameters that are explicitly listed. In
+other words, a subnode that lists a mux function but no pin configuration
+parameters implies no information about any pin configuration parameters.
+Similarly, a pin subnode that describes a pullup parameter implies no
+information about e.g. the mux function.
+
+
+The following generic properties as defined in pinctrl-bindings.txt are valid
+to specify in a pin configuration subnode:
+
+- pins:
+	Usage: required
+	Value type: <string-array>
+	Definition: List of gpio pins affected by the properties specified in
+		    this subnode.
+
+		    Valid pins are:
+		      gpio0-gpio149
+		        Supports mux, bias and drive-strength
+
+		      sdc1_clk, sdc1_cmd, sdc1_data sdc2_clk, sdc2_cmd,
+		      sdc2_data sdc1_rclk
+		        Supports bias and drive-strength
+
+- function:
+	Usage: required
+	Value type: <string>
+	Definition: Specify the alternative function to be configured for the
+		    specified pins. Functions are only valid for gpio pins.
+		    Valid values are:
+
+		    blsp_uart1, blsp_spi1, blsp_i2c1, blsp_uim1, atest_tsens,
+		    bimc_dte1, dac_calib0, blsp_spi8, blsp_uart8, blsp_uim8,
+		    qdss_cti_trig_out_b, bimc_dte0, dac_calib1, qdss_cti_trig_in_b,
+		    dac_calib2, atest_tsens2, atest_usb1, blsp_spi10, blsp_uart10,
+		    blsp_uim10, atest_bbrx1, atest_usb13, atest_bbrx0, atest_usb12,
+		    mdp_vsync, edp_lcd, blsp_i2c10, atest_gpsadc1, atest_usb11,
+		    atest_gpsadc0, edp_hot, atest_usb10, m_voc, dac_gpio, atest_char,
+		    cam_mclk, pll_bypassnl, qdss_stm7, blsp_i2c8, qdss_tracedata_b,
+		    pll_reset, qdss_stm6, qdss_stm5, qdss_stm4, atest_usb2, cci_i2c,
+		    qdss_stm3, dac_calib3, atest_usb23, atest_char3, dac_calib4,
+		    qdss_stm2, atest_usb22, atest_char2, qdss_stm1, dac_calib5,
+		    atest_usb21, atest_char1, dbg_out, qdss_stm0, dac_calib6,
+		    atest_usb20, atest_char0, dac_calib10, qdss_stm10,
+		    qdss_cti_trig_in_a, cci_timer4, blsp_spi6, blsp_uart6, blsp_uim6,
+		    blsp2_spi, qdss_stm9, qdss_cti_trig_out_a, dac_calib11,
+		    qdss_stm8, cci_timer0, qdss_stm13, dac_calib7, cci_timer1,
+		    qdss_stm12, dac_calib8, cci_timer2, blsp1_spi, qdss_stm11,
+		    dac_calib9, cci_timer3, cci_async, dac_calib12, blsp_i2c6,
+		    qdss_tracectl_a, dac_calib13, qdss_traceclk_a, dac_calib14,
+		    dac_calib15, hdmi_rcv, dac_calib16, hdmi_cec, pwr_modem,
+		    dac_calib17, hdmi_ddc, pwr_nav, dac_calib18, pwr_crypto,
+		    dac_calib19, hdmi_hot, dac_calib20, dac_calib21, pci_e0,
+		    dac_calib22, dac_calib23, dac_calib24, tsif1_sync, dac_calib25,
+		    sd_write, tsif1_error, blsp_spi2, blsp_uart2, blsp_uim2,
+		    qdss_cti, blsp_i2c2, blsp_spi3, blsp_uart3, blsp_uim3, blsp_i2c3,
+		    uim3, blsp_spi9, blsp_uart9, blsp_uim9, blsp10_spi, blsp_i2c9,
+		    blsp_spi7, blsp_uart7, blsp_uim7, qdss_tracedata_a, blsp_i2c7,
+		    qua_mi2s, gcc_gp1_clk_a, ssc_irq, uim4, blsp_spi11, blsp_uart11,
+		    blsp_uim11, gcc_gp2_clk_a, gcc_gp3_clk_a, blsp_i2c11, cri_trng0,
+		    cri_trng1, cri_trng, qdss_stm18, pri_mi2s, qdss_stm17, blsp_spi4,
+		    blsp_uart4, blsp_uim4, qdss_stm16, qdss_stm15, blsp_i2c4,
+		    qdss_stm14, dac_calib26, spkr_i2s, audio_ref, lpass_slimbus,
+		    isense_dbg, tsense_pwm1, tsense_pwm2, btfm_slimbus, ter_mi2s,
+		    qdss_stm22, qdss_stm21, qdss_stm20, qdss_stm19, gcc_gp1_clk_b,
+		    sec_mi2s, blsp_spi5, blsp_uart5, blsp_uim5, gcc_gp2_clk_b,
+		    gcc_gp3_clk_b, blsp_i2c5, blsp_spi12, blsp_uart12, blsp_uim12,
+		    qdss_stm25, qdss_stm31, blsp_i2c12, qdss_stm30, qdss_stm29,
+		    tsif1_clk, qdss_stm28, tsif1_en, tsif1_data, sdc4_cmd, qdss_stm27,
+		    qdss_traceclk_b, tsif2_error, sdc43, vfr_1, qdss_stm26, tsif2_clk,
+		    sdc4_clk, qdss_stm24, tsif2_en, sdc42, qdss_stm23, qdss_tracectl_b,
+		    sd_card, tsif2_data, sdc41, tsif2_sync, sdc40, mdp_vsync_p_b,
+		    ldo_en, mdp_vsync_s_b, ldo_update, blsp11_uart_tx_b, blsp11_uart_rx_b,
+		    blsp11_i2c_sda_b, prng_rosc, blsp11_i2c_scl_b, uim2, uim1, uim_batt,
+		    pci_e2, pa_indicator, adsp_ext, ddr_bist, qdss_tracedata_11,
+		    qdss_tracedata_12, modem_tsync, nav_dr, nav_pps, pci_e1, gsm_tx,
+		    qspi_cs, ssbi2, ssbi1, mss_lte, qspi_clk, qspi0, qspi1, qspi2, qspi3,
+		    gpio
+
+- bias-disable:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins should be configued as no pull.
+
+- bias-pull-down:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins should be configued as pull down.
+
+- bias-pull-up:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins should be configued as pull up.
+
+- output-high:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins are configured in output mode, driven
+		    high.
+		    Not valid for sdc pins.
+
+- output-low:
+	Usage: optional
+	Value type: <none>
+	Definition: The specified pins are configured in output mode, driven
+		    low.
+		    Not valid for sdc pins.
+
+- drive-strength:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects the drive strength for the specified pins, in mA.
+		    Valid values are: 2, 4, 6, 8, 10, 12, 14 and 16
+
+Example:
+
+	tlmm: pinctrl@03400000 {
+		compatible = "qcom,msmskunk-pinctrl";
+		reg = <0x03800000 0xc00000>;
+		interrupts = <0 208 0>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
index 8d893a8..521c783 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-gpio.txt
@@ -91,14 +91,18 @@
 	Value type: <string>
 	Definition: Specify the alternative function to be configured for the
 		    specified pins.  Valid values are:
-		    "normal",
-		    "paired",
-		    "func1",
-		    "func2",
-		    "dtest1",
-		    "dtest2",
-		    "dtest3",
-		    "dtest4"
+			"normal",
+			"paired",
+			"func1",
+			"func2",
+			"dtest1",
+			"dtest2",
+			"dtest3",
+			"dtest4",
+		    And following values are supported by LV/MV GPIO subtypes:
+			"func3",
+			"func4",
+			"analog"
 
 - bias-disable:
 	Usage: optional
@@ -183,6 +187,29 @@
 	Value type: <none>
 	Definition: The specified pins are configured in open-source mode.
 
+- qcom,atest:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects ATEST rail to route to GPIO when it's configured
+		    in analog-pass-through mode by specifying "analog" function.
+		    Valid values are 0-3 corresponding to PMIC_GPIO_AOUT_ATESTx
+		    defined in <dt-bindings/pinctrl/qcom,pmic-gpio.h>.
+
+- qcom,dtest-buffer:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects DTEST rail to route to GPIO when it's configured
+		    as a digital input.
+		    For LV/MV GPIO subtypes, the valid values are 0-3
+		    corresponding to PMIC_GPIO_DIN_DTESTx defined in
+		    <dt-bindings/pinctrl/qcom,pmic-gpio.h>. Only one
+		    DTEST rail can be selected at a time.
+		    For 4CH/8CH GPIO subtypes, supported values are 1-15.
+		    4 DTEST rails are supported in total and more than 1 DTEST
+		    rail can be selected simultaneously. Each bit of the
+		    4 LSBs represent one DTEST rail, such as [3:0] = 0101
+		    means both DTEST1 and DTEST3 are selected.
+
 Example:
 
 	pm8921_gpio: gpio@150 {
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
index 2ab95bc..2616424 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,pmic-mpp.txt
@@ -144,7 +144,7 @@
 - qcom,dtest:
 	Usage: optional
 	Value type: <u32>
-	Definition: Selects which dtest rail to be routed in the various functions.
+	Definition: Selects which dtest rail to be routed for digital output.
 		    Valid values are 1-4
 
 - qcom,amux-route:
@@ -158,6 +158,16 @@
 	Value type: <none>
 	Definition: Indicates that the pin should be operating in paired mode.
 
+- qcom,dtest-buffer:
+	Usage: optional
+	Value type: <u32>
+	Definition: Selects which dtest rail to be routed for digital input.
+		    It's also valid when the pin is configured as digital
+		    input and output.
+		    4 dtest rails supported in total and more than one rail
+		    could be selected simultaneously. Each bit of the 4 LSBs
+		    represent one dtest rail, such as [3:0] = 0101 means both
+		    dtest1 and dtest3 are selected. Valid values are 1-15.
 Example:
 
 	mpps@a000 {
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
new file mode 100644
index 0000000..f3e6ca9
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -0,0 +1,212 @@
+Qualcomm technologies inc. Internet Packet Accelerator
+
+Internet Packet Accelerator (IPA) is a programmable protocol
+processor HW block. It is designed to support generic HW processing
+of UL/DL IP packets for various use cases independent of radio technology.
+
+Required properties:
+
+IPA node:
+
+- compatible : "qcom,ipa"
+- reg: Specifies the base physical addresses and the sizes of the IPA
+       registers.
+- reg-names: "ipa-base" - string to identify the IPA CORE base registers.
+	     "bam-base" - string to identify the IPA BAM base registers.
+	     "a2-bam-base" - string to identify the A2 BAM base registers.
+- interrupts: Specifies the interrupt associated with IPA.
+- interrupt-names: "ipa-irq" - string to identify the IPA core interrupt.
+                   "bam-irq" - string to identify the IPA BAM interrupt.
+                   "a2-bam-irq" - string to identify the A2 BAM interrupt.
+- qcom,ipa-hw-ver: Specifies the IPA hardware version.
+
+Optional:
+
+- qcom,wan-rx-ring-size: size of WAN rx ring, default is 32
+- qcom,arm-smmu: SMMU is present and ARM SMMU driver is used
+- qcom,msm-smmu: SMMU is present and QSMMU driver is used
+- qcom,smmu-s1-bypass: Boolean context flag to set SMMU to S1 bypass
+- qcom,smmu-fast-map: Boolean context flag to set SMMU to fastpath mode
+- ipa_smmu_ap: AP general purpose SMMU device
+	compatible "qcom,ipa-smmu-ap-cb"
+- ipa_smmu_wlan: WDI SMMU device
+	compatible "qcom,ipa-smmu-wlan-cb"
+- ipa_smmu_uc: uc SMMU device
+	compatible "qcom,ipa-smmu-uc-cb"
+- qcom,use-a2-service: determine if A2 service will be used
+- qcom,use-ipa-tethering-bridge: determine if tethering bridge will be used
+- qcom,use-ipa-bamdma-a2-bridge: determine if a2/ipa hw bridge will be used
+- qcom,ee: which EE is assigned to (non-secure) APPS from IPA-BAM POV. This
+is a number
+- qcom,ipa-hw-mode: IPA hardware mode - Normal, Virtual memory allocation,
+memory allocation over a PCIe bridge
+- qcom,msm-bus,name:            String representing the client-name
+- qcom,msm-bus,num-cases:       Total number of usecases
+- qcom,msm-bus,active-only:     Boolean context flag for requests in active or
+                                dual (active & sleep) contex
+- qcom,msm-bus,num-paths:       Total number of master-slave pairs
+- qcom,msm-bus,vectors-KBps:    Arrays of unsigned integers representing:
+                                master-id, slave-id, arbitrated bandwidth
+                                in KBps, instantaneous bandwidth in KBps
+- qcom,ipa-bam-remote-mode:     Boolean context flag to determine if ipa bam
+                                is in remote mode.
+- qcom,modem-cfg-emb-pipe-flt:  Boolean context flag to determine if modem
+                                configures embedded pipe filtering rules
+- qcom,skip-uc-pipe-reset:      Boolean context flag to indicate whether
+                                a pipe reset via the IPA uC is required
+- qcom,ipa-wdi2:		Boolean context flag to indicate whether
+				using wdi-2.0 or not
+- qcom,use-dma-zone:            Boolean context flag to indicate whether memory
+                                allocations controlled by IPA driver that do not
+				specify a struct device * should use GFP_DMA to
+				workaround IPA HW limitations
+- qcom,use-gsi:                 Boolean context flag to indicate if the
+                                transport protocol is GSI
+- qcom,use-rg10-limitation-mitigation:	Boolean context flag to activate
+					the mitigation to register group 10
+					AP access limitation
+- qcom,do-not-use-ch-gsi-20:	Boolean context flag to activate
+				software workaround for IPA limitation
+				to not use GSI physical channel 20
+- qcom,tethered-flow-control:   Boolean context flag to indicate whether
+                                apps based flow control is needed for tethered
+                                call.
+- qcom,rx-polling-sleep-ms:	Receive Polling Timeout in millisecond,
+				default is 1 millisecond.
+- qcom,ipa-polling-iteration:	IPA Polling Iteration Count,default is 40.
+- qcom,ipa-tz-unlock-reg:       Register start addresses and ranges which
+                                need to be unlocked by TZ.
+
+IPA pipe sub nodes (A2 static pipes configurations):
+
+-label: two labels are supported, a2-to-ipa and ipa-to-a2 which
+supply static configuration for A2-IPA connection.
+-qcom,src-bam-physical-address: The physical address of the source BAM
+-qcom,ipa-bam-mem-type:The memory type:
+                       0(Pipe memory), 1(Private memory), 2(System memory)
+-qcom,src-bam-pipe-index: Source pipe index
+-qcom,dst-bam-physical-address: The physical address of the
+                                destination BAM
+-qcom,dst-bam-pipe-index: Destination pipe index
+-qcom,data-fifo-offset: Data fifo base offset
+-qcom,data-fifo-size:  Data fifo size (bytes)
+-qcom,descriptor-fifo-offset: Descriptor fifo base offset
+-qcom,descriptor-fifo-size: Descriptor fifo size (bytes)
+
+Optional properties:
+-qcom,ipa-pipe-mem: Specifies the base physical address and the
+                    size of the IPA pipe memory region.
+                    Pipe memory is a feature which may be supported by the
+                    target (HW platform). The Driver support using pipe
+                    memory instead of system memory. In case this property
+                    will not appear in the IPA DTS entry, the driver will
+                    use system memory.
+- clocks: This property shall provide a list of entries each of which
+    contains a phandle to clock controller device and a macro that is
+    the clock's name in hardware.This should be "clock_rpm" as clock
+    controller phandle and "clk_ipa_clk" as macro for "iface_clk"
+- clock-names: This property shall contain the clock input names used
+    by driver in same order as the clocks property.This should be "iface_clk"
+
+IPA SMMU sub nodes
+
+-compatible: "qcom,ipa-smmu-ap-cb" - represents the AP context bank.
+
+-compatible: "qcom,ipa-smmu-wlan-cb" - represents IPA WLAN context bank.
+
+-compatible: "qcom,ipa-smmu-uc-cb" - represents IPA uC context bank (for uC
+					offload scenarios).
+- iommus : the phandle and stream IDs for the SMMU used by this root
+
+- qcom,iova-mapping: specifies the start address and size of iova space.
+
+IPA SMP2P sub nodes
+
+-compatible: "qcom,smp2pgpio-map-ipa-1-out" - represents the out gpio from
+					      ipa driver to modem.
+
+-compatible: "qcom,smp2pgpio-map-ipa-1-in" - represents the in gpio to
+					     ipa driver from modem.
+
+-gpios: Binding to the gpio defined in XXX-smp2p.dtsi
+
+
+Example:
+
+qcom,ipa@fd4c0000 {
+	compatible = "qcom,ipa";
+	reg = <0xfd4c0000 0x26000>,
+	      <0xfd4c4000 0x14818>;
+	      <0xfc834000 0x7000>;
+	reg-names = "ipa-base", "bam-base"; "a2-bam-base";
+	interrupts = <0 252 0>,
+	             <0 253 0>;
+	             <0 29 1>;
+	interrupt-names = "ipa-irq", "bam-irq"; "a2-bam-irq";
+	qcom,ipa-hw-ver = <1>;
+	clocks = <&clock_rpm clk_ipa_clk>;
+	clock-names = "iface_clk";
+
+        qcom,msm-bus,name = "ipa";
+        qcom,msm-bus,num-cases = <3>;
+        qcom,msm-bus,num-paths = <2>;
+        qcom,msm-bus,vectors-KBps =
+        <90 512 0 0>, <90 585 0 0>,         /* No vote */
+        <90 512 100000 800000>, <90 585 100000 800000>,    /* SVS */
+        <90 512 100000 1200000>, <90 585 100000 1200000>;    /* PERF */
+        qcom,bus-vector-names = "MIN", "SVS", "PERF";
+
+	qcom,pipe1 {
+		label = "a2-to-ipa";
+		qcom,src-bam-physical-address = <0xfc834000>;
+		qcom,ipa-bam-mem-type = <0>;
+		qcom,src-bam-pipe-index = <1>;
+		qcom,dst-bam-physical-address = <0xfd4c0000>;
+		qcom,dst-bam-pipe-index = <6>;
+		qcom,data-fifo-offset = <0x1000>;
+		qcom,data-fifo-size = <0xd00>;
+		qcom,descriptor-fifo-offset = <0x1d00>;
+		qcom,descriptor-fifo-size = <0x300>;
+	};
+
+	qcom,pipe2 {
+		label = "ipa-to-a2";
+		qcom,src-bam-physical-address = <0xfd4c0000>;
+		qcom,ipa-bam-mem-type = <0>;
+		qcom,src-bam-pipe-index = <7>;
+		qcom,dst-bam-physical-address = <0xfc834000>;
+		qcom,dst-bam-pipe-index = <0>;
+		qcom,data-fifo-offset = <0x00>;
+		qcom,data-fifo-size = <0xd00>;
+		qcom,descriptor-fifo-offset = <0xd00>;
+		qcom,descriptor-fifo-size = <0x300>;
+	};
+
+	/* smp2p gpio information */
+	qcom,smp2pgpio_map_ipa_1_out {
+		compatible = "qcom,smp2pgpio-map-ipa-1-out";
+		gpios = <&smp2pgpio_ipa_1_out 0 0>;
+	};
+
+	qcom,smp2pgpio_map_ipa_1_in {
+		compatible = "qcom,smp2pgpio-map-ipa-1-in";
+		gpios = <&smp2pgpio_ipa_1_in 0 0>;
+	};
+
+	ipa_smmu_ap: ipa_smmu_ap {
+		compatible = "qcom,ipa-smmu-ap-cb";
+		iommus = <&anoc2_smmu 0x30>;
+		qcom,iova-mapping = <0x10000000 0x40000000>;
+	};
+
+	ipa_smmu_wlan: ipa_smmu_wlan {
+		compatible = "qcom,ipa-smmu-wlan-cb";
+		iommus = <&anoc2_smmu 0x31>;
+	};
+
+	ipa_smmu_uc: ipa_smmu_uc {
+		compatible = "qcom,ipa-smmu-uc-cb";
+		iommus = <&anoc2_smmu 0x32>;
+		qcom,iova-mapping = <0x40000000 0x20000000>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/platform/msm/msm_gsi.txt b/Documentation/devicetree/bindings/platform/msm/msm_gsi.txt
new file mode 100644
index 0000000..7b29724
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/msm_gsi.txt
@@ -0,0 +1,15 @@
+* Qualcomm Technologies, Inc. GSI driver module
+
+GSI is a HW accelerator that supports Generic SW Interfaces (GSI) which are
+peripheral specific (IPA in this case).
+GSI translates SW transfer elements (TRE) into TLV transactions which are
+then processed by the peripheral.
+This Driver configures and communicates with GSI HW.
+
+Required properties:
+- compatible:		Must be "qcom,msm_gsi"
+
+Example:
+	qcom,msm-gsi {
+		compatible = "qcom,msm_gsi";
+	}
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
new file mode 100644
index 0000000..c7024e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
@@ -0,0 +1,18 @@
+* Qualcomm Technologies, Inc. RmNet IPA driver module
+
+This module enables embedded data calls using IPA HW.
+
+Required properties:
+- compatible:		Must be "qcom,rmnet-ipa"
+
+Optional:
+- qcom,rmnet-ipa-ssr: determine if modem SSR is supported
+- qcom,ipa-loaduC: indicate that ipa uC should be loaded
+- qcom,ipa-advertise-sg-support: determine how to respond to a query
+regarding scatter-gather capability
+
+Example:
+	qcom,rmnet-ipa {
+		compatible = "qcom,rmnet-ipa";
+	}
+
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
new file mode 100644
index 0000000..3f55312
--- /dev/null
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
@@ -0,0 +1,18 @@
+* Qualcomm Technologies, Inc. RmNet IPA driver module
+
+This module enables embedded data calls using IPA v3 HW.
+
+Required properties:
+- compatible:		Must be "qcom,rmnet-ipa3"
+
+Optional:
+- qcom,rmnet-ipa-ssr: determine if modem SSR is supported
+- qcom,ipa-loaduC: indicate that ipa uC should be loaded
+- qcom,ipa-advertise-sg-support: determine how to respond to a query
+regarding scatter-gather capability
+
+Example:
+	qcom,rmnet-ipa3 {
+		compatible = "qcom,rmnet-ipa3";
+	}
+
diff --git a/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
new file mode 100644
index 0000000..5d0499c
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/gdsc-regulator.txt
@@ -0,0 +1,70 @@
+QTI Global Distributed Switch Controller (GDSC) Regulator Driver
+
+The GDSC driver, implemented under the regulator framework, is responsible for
+safely collapsing and restoring power to peripheral cores on chipsets like
+msm8996 for power savings.
+
+Required properties:
+ - compatible:      Must be "qcom,gdsc"
+ - regulator-name:  A string used as a descriptive name for regulator outputs
+ - reg:             The address of the GDSCR register
+
+Optional properties:
+ - parent-supply:   phandle to the parent supply/regulator node
+ - clock-names:     List of string names for core clocks
+ - qcom,retain-mem:  Presence denotes a hardware requirement to leave the
+		     forced core memory retention signals in the core's clock
+		     branch control registers asserted.
+ - qcom,retain-periph: Presence denotes a hardware requirement to leave the
+		     forced periph memory retention signal in the core's clock
+		     branch control registers asserted.
+ - qcom,skip-logic-collapse: Presence denotes a requirement to leave power to
+                             the core's logic enabled.
+ - qcom,support-hw-trigger: Presence denotes a hardware feature to switch
+			    on/off this regulator based on internal HW signals
+			    to save more power.
+ - qcom,enable-root-clk: Presence denotes that the clocks in the "clocks"
+			property are required to be enabled before gdsc is
+			turned on and disabled before turning off gdsc. This
+			will be used in subsystems where reset is synchronous
+			and root clk is active without sw being aware of its
+			state. The clock-name which denotes the root clock
+			should be named as "core_root_clk".
+ - qcom,force-enable-root-clk: If set, denotes that the root clock should be
+			force enabled before turning on the GDSC and then be
+			immediately force disabled. Likewise for GDSC disable.
+			This is used in cases where the core root clock needs
+			to be force-enabled prior to turning on the core. The
+			clock-name which denotes the root clock should be
+			"core_root_clk".
+ - qcom,clk-dis-wait-val: Input value for CLK_DIS_WAIT controls state transition
+			 delay after halting clock in the collapsible core.
+ - reg-names:		Names of the bases for the above "reg" registers.
+			Ex. "base", "domain-addr", "sw-reset", "hw-ctrl-addr".
+ - qcom,no-status-check-on-disable: Do not poll the status bit when GDSC
+			is disabled.
+ - qcom,disallow-clear: Presence denotes the periph & core memory will not be
+			cleared, unless the required subsystem does not invoke
+			the api which will allow clearing the bits.
+ - qcom,gds-timeout:	Maximum time (in usecs) that might be taken by a GDSC
+			to enable.
+ - qcom,reset-aon-logic: If present, the GPU DEMET cells need to be reset while
+			 enabling the GX GDSC.
+ - resets: reset specifier pair consisting of phandle for the reset controller
+			and reset lines used by this controller. These can be
+			supplied only if we support qcom,skip-logic-collapse.
+ - reset-names: reset signal name strings sorted in the same order as the resets
+			property. These can be supplied only if we support
+			qcom,skip-logic-collapse.
+ - qcom,poll-cfg-gdscr:	Poll the CFG register of the GDSC to determine if the
+			GDSC is enabled/disabled. This flag should not be set
+			in conjunction with "hw-ctrl-addr".
+
+Example:
+	gdsc_oxili_gx: qcom,gdsc@fd8c4024 {
+		compatible = "qcom,gdsc";
+		regulator-name = "gdsc_oxili_gx";
+		parent-supply = <&pm8841_s4>;
+		reg = <0xfd8c4024 0x4>;
+		clock-names = "core_clk";
+	};
diff --git a/Documentation/devicetree/bindings/regulator/stub-regulator.txt b/Documentation/devicetree/bindings/regulator/stub-regulator.txt
new file mode 100644
index 0000000..1057e17
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/stub-regulator.txt
@@ -0,0 +1,48 @@
+Stub Voltage Regulators
+
+stub-regulators are place-holder regulator devices which do not impact any
+hardware state.  They provide a means for consumer devices to utilize all
+regulator features for testing purposes.
+
+Required properties:
+- compatible:      Must be "qcom,stub-regulator".
+- regulator-name:  A string used as a descriptive name for regulator outputs.
+
+Optional properties:
+- parent-supply:     phandle to the parent supply/regulator node if one exists.
+- qcom,hpm-min-load: Load current in uA which corresponds to the minimum load
+			which requires the regulator to be in high power mode.
+- qcom,system-load:  Load in uA present on regulator that is not captured by any
+			consumer request.
+
+All properties specified within the core regulator framework can also be used.
+These bindings can be found in regulator.txt.
+
+Example:
+
+/ {
+	pm8026_s3: regulator-s3 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "8026_s3";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <1300000>;
+		regulator-max-microvolt = <1300000>;
+	};
+
+	pm8026_l1: regulator-l1 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "8026_l1";
+		parent-supply = <&pm8026_s3>;
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1225000>;
+		regulator-max-microvolt = <1225000>;
+	};
+
+	pm8026_l20: regulator-l20 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "8026_l20";
+		qcom,hpm-min-load = <5000>;
+		regulator-min-microvolt = <3075000>;
+		regulator-max-microvolt = <3075000>;
+	};
+};
diff --git a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
index 3da0ebd..5718153 100644
--- a/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
+++ b/Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
@@ -51,12 +51,24 @@
           used as a shared pool of DMA buffers for a set of devices. It can
           be used by an operating system to instanciate the necessary pool
           management subsystem if necessary.
+	- removed-dma-pool: This indicates a region of memory which is meant to
+	  be carved out and not exposed to kernel.
         - vendor specific string in the form <vendor>,[<device>-]<usage>
 no-map (optional) - empty property
     - Indicates the operating system must not create a virtual mapping
       of the region as part of its standard mapping of system memory,
       nor permit speculative access to it under any circumstances other
       than under the control of the device driver using the region.
+no-map-fixup (optional) - empty property
+    - Indicates the operating system must reserve the memory region and keep
+      virtual mapping. Upon first allocation the actual allocated region is
+      removed for any virtual mapping and behaves as "no-map" while the
+      remaining memory is returned back to the system for normal use. One would
+      like to use this property where he is not sure about how much region size
+      must be reserved, so he gives it a max size which then is shrink once
+      (first) allocation is done. This property is for some specific use cases,
+      if unsure please don't use it. This property cannot be used together with
+      "no-map" attribute.
 reusable (optional) - empty property
     - The operating system can use the memory in this region with the
       limitation that the device driver(s) owning the region need to be
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
new file mode 100644
index 0000000..800508a
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,msm-eud.txt
@@ -0,0 +1,32 @@
+* Qualcomm Technologies Inc Embedded USB Debugger (EUD)
+
+The EUD (Embedded USB Debugger) is a mini-USB hub implemented
+on chip to support the USB-based debug and trace capabilities.
+
+Required properties:
+
+ - compatible:  Should be "qcom,msm-eud"
+ - interrupt-names:  Should be "eud_irq"
+ - interrupts:  Interrupt number
+ - reg: Should be address and size of EUD register space
+ - reg-names: Should be "eud_base"
+
+Driver notifies clients via extcon for VBUS spoof attach/detach
+and charger enable/disable events. Clients registered for these
+notifications should have extcon property set to eud.
+
+An example for EUD device node:
+
+	eud: qcom,msm-eud@88e0000 {
+		compatible = "qcom,msm-eud";
+		interrupt-names = "eud_irq";
+		interrupts = <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>;
+		reg = <0x88e0000 0x4000>;
+		reg-names = "eud_base";
+	};
+
+An example for EUD extcon client:
+
+	usb3 {
+		extcon = <&eud>;
+	};
diff --git a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
index 070baf4..30f2f6c 100644
--- a/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
+++ b/Documentation/devicetree/bindings/ufs/ufs-qcom.txt
@@ -7,8 +7,11 @@
 contain a phandle reference to UFS PHY node.
 
 Required properties:
-- compatible        : compatible list, contains "qcom,ufs-phy-qmp-20nm"
-		      or "qcom,ufs-phy-qmp-14nm" according to the relevant phy in use.
+- compatible        : compatible list, contains one of the following:
+		      "qcom,ufs-phy-qmp-14nm"
+		      "qcom,ufs-phy-qmp-v3"
+		      "qcom,ufs-phy-qrbtc-msmskunk"
+according to the relevant phy in use.
 - reg               : should contain PHY register address space (mandatory),
 - reg-names         : indicates various resources passed to driver (via reg proptery) by name.
                       Required "reg-names" is "phy_mem".
@@ -27,11 +30,13 @@
 - vddp-ref-clk-supply   : phandle to UFS device ref_clk pad power supply
 - vddp-ref-clk-max-microamp : specifies max. load that can be drawn from this supply
 - vddp-ref-clk-always-on : specifies if this supply needs to be kept always on
+- qcom,disable-lpm : disable various LPM mechanisms in UFS for platform compatibility
+  (limit link to PWM Gear-1, 1-lane slow mode; disable hibernate, and avoid suspend/resume)
 
 Example:
 
 	ufsphy1: ufsphy@0xfc597000 {
-		compatible = "qcom,ufs-phy-qmp-20nm";
+		compatible = "qcom,ufs-phy-qmp-14nm";
 		reg = <0xfc597000 0x800>;
 		reg-names = "phy_mem";
 		#phy-cells = <0>;
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index a99ed55..1650d3e 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -4,13 +4,15 @@
 Each UFS controller instance should have its own node.
 
 Required properties:
-- compatible		: must contain "jedec,ufs-1.1" or "jedec,ufs-2.0", may
-			  also list one or more of the following:
+- compatible		: must contain "jedec,ufs-1.1", may also list one or more
+					  of the following:
 					  "qcom,msm8994-ufshc"
 					  "qcom,msm8996-ufshc"
 					  "qcom,ufshc"
 - interrupts        : <interrupt mapping for UFS host controller IRQ>
 - reg               : <registers mapping>
+		      first entry should contain UFS host controller register address space (mandatory),
+                      second entry is the device ref. clock control register map (optional).
 
 Optional properties:
 - phys                  : phandle to UFS PHY node
@@ -38,9 +40,25 @@
 			  defined or a value in the array is "0" then it is assumed
 			  that the frequency is set by the parent clock or a
 			  fixed rate clock source.
--lanes-per-direction	: number of lanes available per direction - either 1 or 2.
-			  Note that it is assume same number of lanes is used both
-			  directions at once. If not specified, default is 2 lanes per direction.
+- rpm-level		: UFS Runtime power management level. Following PM levels are suppported:
+			  0 - Both UFS device and Link in active state (Highest power consumption)
+			  1 - UFS device in active state but Link in Hibern8 state
+			  2 - UFS device in Sleep state but Link in active state
+			  3 - UFS device in Sleep state and Link in hibern8 state (default PM level)
+			  4 - UFS device in Power-down state and Link in Hibern8 state
+			  5 - UFS device in Power-down state and Link in OFF state (Lowest power consumption)
+- spm-level		: UFS System power management level. Allowed PM levels are same as rpm-level.
+- ufs-qcom-crypto	: phandle to UFS-QCOM ICE (Inline Cryptographic Engine) node
+- lanes-per-direction:	number of lanes available per direction - either 1 or 2.
+			Note that it is assume same number of lanes is used both directions at once.
+			If not specified, default is 2 lanes per direction.
+- limit-tx-hs-gear	: Specify the max. limit on the TX HS gear.
+			  Valid range: 1-3. 1 => HS-G1, 2 => HS-G2, 3 => HS-G3
+- limit-rx-hs-gear	: Specify the max. limit on the RX HS gear. Refer "limit-tx-hs-gear" for expected values.
+- limit-tx-pwm-gear	: Specify the max. limit on the TX PWM gear
+			  Valid range: 1-4. 1 => PWM-G1, 2 => PWM-G2, 3 => PWM-G3, 4 => PWM-G4
+- limit-rx-pwm-gear	: Specify the max. limit on the RX PWM gear. Refer "limit-tx-pwm-gear" for expected values.
+- scsi-cmd-timeout	: Specify the command timeout (in seconds) for scsi commands
 
 Note: If above properties are not defined it can be assumed that the supply
 regulators or clocks are always on.
@@ -48,9 +66,10 @@
 Example:
 	ufshc@0xfc598000 {
 		compatible = "jedec,ufs-1.1";
-		reg = <0xfc598000 0x800>;
+		reg = <0xfc598000 0x800>, <0xfd512074 0x4>;
 		interrupts = <0 28 0>;
 
+		ufs-qcom-crypto = <&ufs_ice>;
 		vdd-hba-supply = <&xxx_reg0>;
 		vdd-hba-fixed-regulator;
 		vcc-supply = <&xxx_reg1>;
@@ -66,4 +85,105 @@
 		freq-table-hz = <100000000 200000000>, <0 0>, <0 0>;
 		phys = <&ufsphy1>;
 		phy-names = "ufsphy";
+		rpm-level = <3>;
+		spm-level = <5>;
+	};
+
+==== MSM UFS platform driver properties =====
+* For UFS host controller in MSM platform following clocks are required -
+    Controller clock source -
+        "core_clk_src", max-clock-frequency-hz = 200MHz
+
+    Controller System clock branch:
+        "core_clk" - Controller core clock
+
+    AHB/AXI interface clocks:
+        "iface_clk" - AHB interface clock
+        "bus_clk" - AXI bus master clock
+
+    PHY to controller symbol synchronization clocks:
+        "rx_lane0_sync_clk" - RX Lane 0
+        "rx_lane1_sync_clk" - RX Lane 1
+        "tx_lane0_sync_clk" - TX Lane 0
+        "tx_lane1_sync_clk" - TX Lane 1
+
+    Optional reference clock input to UFS device
+        "ref_clk", max-clock-frequency-hz = 19.2MHz
+
+* Following bus parameters are required -
+- qcom,msm-bus,name
+- qcom,msm-bus,num-cases
+- qcom,msm-bus,num-paths
+- qcom,msm-bus,vectors-KBps
+For the above four properties please refer to
+Documentation/devicetree/bindings/arm/msm/msm_bus.txt
+Note: The instantaneous bandwidth (IB) value in the vectors-KBps field should
+      be zero as UFS data transfer path doesn't have latency requirements and
+      voting for aggregated bandwidth (AB) should take care of providing
+      optimum throughput requested.
+
+- qcom,bus-vector-names: specifies string IDs for the corresponding
+bus vectors in the same order as qcom,msm-bus,vectors-KBps property.
+
+* The following parameters are optional, but required in order for PM QoS to be
+enabled and functional in the driver:
+- qcom,pm-qos-cpu-groups:		arrays of unsigned integers representing the cpu groups.
+					The number of values in the array defines the number of cpu-groups.
+					Each value is a bit-mask defining the cpus that take part in that cpu group.
+					i.e. if bit N is set, then cpuN is a part of the cpu group. So basically,
+					a cpu group corelated to a cpu cluster.
+					A PM QoS request object is maintained for each cpu-group.
+- qcom,pm-qos-cpu-group-latency-us:	array of values used for PM QoS voting, one for each cpu-group defined.
+					the number of values must match the number of values defined in
+					qcom,pm-qos-cpu-mask property.
+- qcom,pm-qos-default-cpu:		PM QoS voting is based on the cpu associated with each IO request by the block layer.
+					This defined the default cpu used for PM QoS voting in case a specific cpu value is not available.
+
+Example:
+	ufshc@0xfc598000 {
+		...
+
+		qcom,msm-bus,name = "ufs1";
+		qcom,msm-bus,num-cases = <22>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+				<95 512 0 0>, <1 650 0 0>,         /* No vote */
+
+				<95 512 922 0>, <1 650 1000 0>,   /* PWM G1 */
+				<95 512 1844 0>, <1 650 1000 0>, /* PWM G2 */
+				<95 512 3688 0>, <1 650 1000 0>, /* PWM G3 */
+				<95 512 7376 0>, <1 650 1000 0>,  /* PWM G4 */
+				<95 512 1844 0>, <1 650 1000 0>, /* PWM G1 L2 */
+				<95 512 3688 0>, <1 650 1000 0>, /* PWM G2 L2 */
+				<95 512 7376 0>, <1 650 1000 0>,  /* PWM G3 L2 */
+				<95 512 14752 0>, <1 650 1000 0>,  /* PWM G4 L2 */
+
+				<95 512 127796 0>, <1 650 1000 0>,  /* HS G1 RA */
+				<95 512 255591 0>, <1 650 1000 0>, /* HS G2 RA */
+				<95 512 511181 0>, <1 650 1000 0>, /* HS G3 RA */
+				<95 512 255591 0>, <1 650 1000 0>, /* HS G1 RA L2 */
+				<95 512 511181 0>, <1 650 1000 0>, /* HS G2 RA L2 */
+				<95 512 1022362 0>, <1 650 1000 0>, /* HS G3 RA L2 */
+
+				<95 512 149422 0>, <1 650 1000 0>,  /* HS G1 RB */
+				<95 512 298189 0>, <1 650 1000 0>, /* HS G2 RB */
+				<95 512 596378 0>, <1 650 1000 0>, /* HS G3 RB */
+				<95 512 298189 0>, <1 650 1000 0>, /* HS G1 RB L2 */
+				<95 512 596378 0>, <1 650 1000 0>, /* HS G2 RB L2 */
+				<95 512 1192756 0>, <1 650 1000 0>, /* HS G3 RB L2 */
+
+				<95 512 4096000 0>, <1 650 1000 0>; /* Max. bandwidth */
+
+		qcom,bus-vector-names = "MIN",
+					"PWM_G1_L1", "PWM_G2_L1", "PWM_G3_L1", "PWM_G4_L1",
+					"PWM_G1_L2", "PWM_G2_L2", "PWM_G3_L2", "PWM_G4_L2",
+					"HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1",
+					"HS_RA_G1_L2", "HS_RA_G2_L2", "HS_RA_G3_L2",
+					"HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1",
+					"HS_RB_G1_L2", "HS_RB_G2_L2", "HS_RB_G3_L2",
+					"MAX";
+
+		qcom,pm-qos-cpu-groups = <0x03 0x0C>; /* group0: cpu0, cpu1, group1: cpu2, cpu3 */
+		qcom,pm-qos-cpu-group-latency-us = <200 300>; /* group0: 200us, group1: 300us */
+		qcom,pm-qos-default-cpu = <0>;
 	};
diff --git a/Documentation/devicetree/bindings/usb/msm-dbm.txt b/Documentation/devicetree/bindings/usb/msm-dbm.txt
new file mode 100644
index 0000000..6ded526
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/msm-dbm.txt
@@ -0,0 +1,15 @@
+MSM DBM (Device Bus Manager)
+
+Required properties :
+- compatible : must be one of "qcom,usb-dbm-1p4", or "qcom,usb-dbm-1p5"
+- reg : offset and length of the register set in the memory map.
+
+Optional properties :
+- qcom,reset-ep-after-lpm-resume: If present, dbm requires ep reset after
+	going to lpm
+
+Example MSM DBM (Device Bus Manager) device node :
+	dbm_1p4: dbm@f92f8000 {
+		compatible = "qcom,usb-dbm-1p4";
+		reg = <0xf92f8000 0x1000>;
+	};
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
new file mode 100644
index 0000000..ad4adf0
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -0,0 +1,144 @@
+MSM USB PHY transceivers
+
+SSUSB-QMP PHY
+
+Required properties:
+ - compatible: Should be "qcom,usb-ssphy-qmp", "qcom,usb-ssphy-qmp-v1" or
+   "qcom,usb-ssphy-qmp-v2"
+ - reg: Address and length of the register set for the device
+   Required regs are:
+   "qmp_phy_base" : QMP PHY Base register set.
+ - "vls_clamp_reg" : top-level CSR register to be written to enable phy vls
+   clamp which allows phy to detect autonomous mode.
+ - <supply-name>-supply: phandle to the regulator device tree node
+   Required "supply-name" examples are:
+	"vdd" : vdd supply for SSPHY digital circuit operation
+	"core" : high-voltage analog supply for SSPHY
+ - clocks: a list of phandles to the PHY clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+   property. Required clocks are "aux_clk" and "pipe_clk".
+ - qcom,vdd-voltage-level: This property must be a list of three integer
+   values (no, min, max) where each value represents either a voltage in
+   microvolts or a value corresponding to voltage corner
+ - qcom,qmp-phy-init-seq: QMP PHY initialization sequence with reg offset, its
+   value, delay after register write. It is not must property to have for emulation.
+ - qcom,qmp-phy-reg-offset: Provides important phy register offsets in an order
+   defined in the phy driver. Provide below mentioned register offsets in order:
+   USB3_PHY_PCS_STATUS,
+   USB3_PHY_AUTONOMOUS_MODE_CTRL,
+   USB3_PHY_LFPS_RXTERM_IRQ_CLEAR,
+   USB3_PHY_POWER_DOWN_CONTROL,
+   USB3_PHY_SW_RESET,
+   USB3_PHY_START
+- resets: reset specifier pair consists of phandle for the reset controller
+  and reset lines used by this controller.
+- reset-names: reset signal name strings sorted in the same order as the resets
+  property.
+
+Optional properties:
+ - reg: Additional register set of address and length to control QMP PHY are:
+   "tcsr_usb3_dp_phymode" : top-level CSR register to be written to select
+   super speed usb qmp phy.
+ - clocks: a list of phandles to the PHY clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+   property. "cfg_ahb_clk" is an optional clock.
+ - qcom,vbus-valid-override: If present, indicates VBUS pin is not connected to
+   the USB PHY and the controller must rely on external VBUS notification in
+   order to manually relay the notification to the SSPHY.
+ - qcom,emulation: Indicates that we are running on emulation platform.
+ - qcom,core-voltage-level: This property must be a list of three integer
+   values (no, min, max) where each value represents either a voltage in
+   microvolts or a value corresponding to voltage corner.
+
+Example:
+	ssphy0: ssphy@f9b38000 {
+		compatible = "qcom,usb-ssphy-qmp";
+		reg = <0xf9b38000 0x16c>,
+			<0x01947244 0x4>;
+		reg-names = "qmp_phy_base",
+			"vls_clamp_reg";
+		vdd-supply = <&pmd9635_l4>;
+		vdda18-supply = <&pmd9635_l8>;
+		qcom,vdd-voltage-level = <0 900000 1050000>;
+		qcom,vbus-valid-override;
+
+		clocks = <&clock_gcc clk_gcc_usb3_phy_aux_clk>,
+			 <&clock_gcc clk_gcc_usb3_phy_pipe_clk>,
+			 <&clock_gcc clk_gcc_usb_phy_cfg_ahb2phy_clk>,
+			 <&clock_gcc clk_ln_bb_clk1>,
+			 <&clock_gcc clk_gcc_usb3_clkref_clk>;
+
+		clock-names = "aux_clk", "pipe_clk", "cfg_ahb_clk",
+			      "ref_clk_src", "ref_clk";
+
+		resets = <&clock_gcc GCC_USB3_PHY_BCR>,
+			<&clock_gcc GCC_USB3PHY_PHY_BCR>;
+		reset-names = "phy_reset",
+				"phy_phy_reset";
+
+	};
+
+QUSB2 High-Speed PHY
+
+Required properties:
+ - compatible: Should be "qcom,qusb2phy" or "qcom,qusb2phy-v2"
+ - reg: Address and length of the QUSB2 PHY register set
+ - reg-names: Should be "qusb_phy_base".
+ - <supply-name>-supply: phandle to the regulator device tree node
+   Required supplies are:
+	"vdd" : vdd supply for digital circuit operation
+	"vdda18" : 1.8v high-voltage analog supply
+	"vdda33" : 3.3v high-voltage analog supply
+ - qcom,vdd-voltage-level: This property must be a list of three integer
+   values (no, min, max) where each value represents either a voltage in
+   microvolts or a value corresponding to voltage corner
+ - phy_type: Should be one of "ulpi" or "utmi". ChipIdea core uses "ulpi" mode.
+ - resets: reset specifier pair consists of phandle for the reset controller
+   and reset lines used by this controller.
+ - reset-names: reset signal name strings sorted in the same order as the resets
+   property.
+
+Optional properties:
+ - reg-names: Additional registers corresponding with the following:
+   "tune2_efuse_addr": EFUSE based register address to read TUNE2 parameter.
+   via the QSCRATCH interface.
+   "emu_phy_base" : phy base address used for programming emulation target phy.
+   "ref_clk_addr" : ref_clk bcr address used for on/off ref_clk before reset.
+ - clocks: a list of phandles to the PHY clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+   property. "cfg_ahb_clk", "ref_clk_src" and "ref_clk" are optional clocks.
+ - qcom,qusb-phy-init-seq: QUSB PHY initialization sequence with value,reg pair.
+ - qcom,qusb-phy-host-init-seq: QUSB PHY initialization sequence for host mode
+   with value,reg pair.
+ - qcom,emu-init-seq : emulation initialization sequence with value,reg pair.
+ - qcom,phy-pll-reset-seq : emulation PLL reset sequence with value,reg pair.
+ - qcom,emu-dcm-reset-seq : emulation DCM reset sequence with value,reg pair.
+ - qcom,tune2-efuse-bit-pos: TUNE2 parameter related start bit position with EFUSE register
+ - qcom,tune2-efuse-num-bits: Number of bits based value to use for TUNE2 high nibble
+ - qcom,emulation: Indicates that we are running on emulation platform.
+ - qcom,hold-reset: Indicates that hold QUSB PHY into reset state.
+ - qcom,phy-clk-scheme: Should be one of "cml" or "cmos" if ref_clk_addr is provided.
+ - qcom,major-rev: provide major revision number to differentiate power up sequence. default is 2.0
+
+Example:
+	qusb_phy: qusb@f9b39000 {
+		compatible = "qcom,qusb2phy";
+		reg = <0x00079000 0x7000>;
+		reg-names = "qusb_phy_base";
+		vdd-supply = <&pm8994_s2_corner>;
+		vdda18-supply = <&pm8994_l6>;
+		vdda33-supply = <&pm8994_l24>;
+		qcom,vdd-voltage-level = <1 5 7>;
+		qcom,tune2-efuse-bit-pos = <21>;
+		qcom,tune2-efuse-num-bits = <3>;
+
+		clocks = <&clock_rpm clk_ln_bb_clk>,
+			 <&clock_gcc clk_gcc_rx2_usb1_clkref_clk>,
+			 <&clock_gcc clk_gcc_usb_phy_cfg_ahb2phy_clk>;
+		clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk";
+		resets = <&clock_gcc GCC_QUSB2PHY_PRIM_BCR>;
+		reset-names = "phy_reset";
+	};
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
new file mode 100644
index 0000000..2b583f3
--- /dev/null
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -0,0 +1,115 @@
+MSM SuperSpeed USB3.0 SoC controller
+
+Required properties :
+- compatible : should be "qcom,dwc-usb3-msm"
+ - reg: Address and length of the register set for the device
+   Required regs are:
+	"core_base" : usb controller register set
+- interrupts: IRQ lines used by this controller
+- interrupt-names : Interrupt resource entries are :
+	"hs_phy_irq" : Interrupt from HS PHY for asynchronous events in LPM.
+	"pwr_event_irq" : Interrupt to controller for asynchronous events in LPM.
+	Used for SS-USB power events.
+ - clocks: a list of phandles to the controller clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+   property. Required clocks are "xo", "iface_clk", "core_clk", "sleep_clk"
+   and "utmi_clk".
+- resets: reset specifier pair consists of phandle for the reset provider
+  and reset lines used by this controller.
+- reset-names: reset signal name strings sorted in the same order as the resets
+  property.
+
+Optional properties :
+- reg: Additional registers
+     "tcsr_base" : top-level CSR register to be written during power-on reset
+     initialize the internal MUX that controls whether to use USB3 controller
+     with primary port.
+     "ahb2phy_base" : top-level register to configure read/write wait cycle with
+     both QMP and QUSB PHY registers.
+- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
+  below optional properties:
+    - qcom,msm_bus,name
+    - qcom,msm_bus,num_cases
+    - qcom,msm_bus,num_paths
+    - qcom,msm_bus,vectors
+- interrupt-names : Optional interrupt resource entries are:
+    "pmic_id_irq" : Interrupt from PMIC for external ID pin notification.
+    "ss_phy_irq"  : Interrupt from super speed phy for wake up notification.
+ - clocks: a list of phandles to the controller clocks. Use as per
+   Documentation/devicetree/bindings/clock/clock-bindings.txt
+ - clock-names: Names of the clocks in 1-1 correspondence with the "clocks"
+   property. Optional clocks are "bus_aggr_clk" and "cfg_ahb_clk".
+- qcom,charging-disabled: If present then battery charging using USB
+  is disabled.
+- vbus_dwc3-supply: phandle to the 5V VBUS supply regulator used for host mode.
+- USB3_GDSC-supply : phandle to the globally distributed switch controller
+  regulator node to the USB controller.
+- qcom,dwc-usb3-msm-tx-fifo-size: If present, represents RAM size available for
+		TX fifo allocation in bytes
+- qcom,usb-dbm : phandle for the DBM device
+- qcom,lpm-to-suspend-delay-ms: Indicates timeout (in milliseconds) to release wakeup source
+  after USB is kept into LPM.
+- qcom,ext-hub-reset-gpio: This corresponds to gpio which is used for HUB reset.
+- qcom,disable-dev-mode-pm: If present, it disables PM runtime functionality for device mode.
+- qcom,disable-host-mode-pm: If present, it disables XHCI PM runtime functionality when USB
+  host mode is used.
+- extcon: phandles to external connector devices. First phandle should point to
+	  external connector, which provide "USB" cable events, the second
+	  should point to external connector device, which provide "USB-HOST"
+	  cable events. A single phandle may be specified if a single connector
+	  device provides both "USB" and "USB-HOST" events.
+
+Sub nodes:
+- Sub node for "DWC3- USB3 controller".
+  This sub node is required property for device node. The properties of this subnode
+  are specified in dwc3.txt.
+
+Example MSM USB3.0 controller device node :
+	usb@f9200000 {
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0xf9200000 0xfc000>,
+		      <0xfd4ab000 0x4>,
+		      <0xf9b3e000 0x3ff>;
+		reg-names = "core_base",
+			"tcsr_base",
+			"ahb2phy_base",
+		interrupts = <0 133 0>;
+		interrupt-names = "hs_phy_irq";
+		vbus_dwc3-supply = <&pm8941_mvs1>;
+		USB3_GDSC-supply = <&gdsc_usb30>;
+		qcom,dwc-usb3-msm-dbm-eps = <4>
+		qcom,dwc_usb3-adc_tm = <&pm8941_adc_tm>;
+		qcom,dwc-usb3-msm-tx-fifo-size = <29696>;
+		qcom,usb-dbm = <&dbm_1p4>;
+		qcom,lpm-to-suspend-delay-ms = <2>;
+
+		qcom,msm_bus,name = "usb3";
+		qcom,msm_bus,num_cases = <2>;
+		qcom,msm_bus,num_paths = <1>;
+		qcom,msm_bus,vectors =
+				<61 512 0 0>,
+				<61 512 240000000 960000000>;
+
+		clocks = <&clock_gcc clk_gcc_usb30_master_clk>,
+			<&clock_gcc clk_gcc_cfg_noc_usb3_axi_clk>,
+			<&clock_gcc clk_gcc_aggre1_usb3_axi_clk>,
+			<&clock_gcc clk_gcc_usb30_mock_utmi_clk>,
+			<&clock_gcc clk_gcc_usb30_sleep_clk>,
+			<&clock_gcc clk_gcc_usb_phy_cfg_ahb2phy_clk>,
+			<&clock_gcc clk_cxo_dwc3_clk>;
+
+		clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+				"utmi_clk", "sleep_clk", "cfg_ahb_clk", "xo";
+
+		resets = <&clock_gcc GCC_USB_30_BCR>;
+		reset-names = "core_reset";
+
+		dwc3@f9200000 {
+			compatible = "synopsys,dwc3";
+			reg = <0xf9200000 0xfc000>;
+			interrupts = <0 131 0>, <0 179 0>;
+			interrupt-names = "irq", "otg_irq";
+			tx-fifo-resize;
+		};
+	};
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 74329fd..6e027ae 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -392,6 +392,8 @@
  [stack]                  = the stack of the main process
  [vdso]                   = the "virtual dynamic shared object",
                             the kernel system call handler
+ [anon:<name>]            = an anonymous mapping that has been
+                            named by userspace
 
  or if empty, the mapping is anonymous.
 
@@ -419,6 +421,7 @@
 MMUPageSize:           4 kB
 Locked:                0 kB
 VmFlags: rd ex mr mw me dw
+Name:           name from userspace
 
 the first of these lines shows the same information as is displayed for the
 mapping in /proc/PID/maps.  The remaining lines show the size of the mapping
@@ -486,6 +489,9 @@
 be present in all further kernel releases. Things get changed, the flags may
 be vanished or the reverse -- new added.
 
+The "Name" field will only be present on a mapping that has been named by
+userspace, and will show the name passed in by userspace.
+
 This file is only present if the CONFIG_MMU kernel configuration option is
 enabled.
 
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 37babf9..be49573 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -87,6 +87,7 @@
 	BLACKFIN Blackfin architecture is enabled.
 	CLK	Common clock infrastructure is enabled.
 	CMA	Contiguous Memory Area support is enabled.
+	DM	Device mapper support is enabled.
 	DRM	Direct Rendering Management support is enabled.
 	DYNAMIC_DEBUG Build in debug messages and enable them at runtime
 	EDD	BIOS Enhanced Disk Drive Services (EDD) is enabled
@@ -615,6 +616,10 @@
 			embedded devices based on command line input.
 			See Documentation/block/cmdline-partition.txt
 
+	boot_cpus=	[SMP]
+			Rather than attempting to online all possible CPUs at
+			boot time, only online the specified set of CPUs.
+
 	boot_delay=	Milliseconds to delay each printk during boot.
 			Values larger than 10 seconds (10000) are changed to
 			no delay (0).
@@ -1025,6 +1030,11 @@
 
 	dis_ucode_ldr	[X86] Disable the microcode loader.
 
+	dm=		[DM] Allows early creation of a device-mapper device.
+			See Documentation/device-mapper/boot.txt.
+
+	dmasound=	[HW,OSS] Sound subsystem buff
+
 	dma_debug=off	If the kernel is compiled with DMA_API_DEBUG support,
 			this option disables the debugging code at boot.
 
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 3db8c67..e206560 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -603,6 +603,26 @@
 	Note that that additional client or server features are only
 	effective if the basic support (0x1 and 0x2) are enabled respectively.
 
+tcp_fwmark_accept - BOOLEAN
+	If set, incoming connections to listening sockets that do not have a
+	socket mark will set the mark of the accepting socket to the fwmark of
+	the incoming SYN packet. This will cause all packets on that connection
+	(starting from the first SYNACK) to be sent with that fwmark. The
+	listening socket's mark is unchanged. Listening sockets that already
+	have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are
+	unaffected.
+	Default: 0
+
+tcp_fwmark_accept - BOOLEAN
+	If set, incoming connections to listening sockets that do not have a
+	socket mark will set the mark of the accepting socket to the fwmark of
+	the incoming SYN packet. This will cause all packets on that connection
+	(starting from the first SYNACK) to be sent with that fwmark. The
+	listening socket's mark is unchanged. Listening sockets that already
+	have a fwmark set via setsockopt(SOL_SOCKET, SO_MARK, ...) are
+	unaffected.
+	Default: 0
+
 tcp_syn_retries - INTEGER
 	Number of times initial SYNs for an active TCP connection attempt
 	will be retransmitted. Should not be higher than 127. Default value
diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt
index 129f7c0..73bfa16 100644
--- a/Documentation/power/pm_qos_interface.txt
+++ b/Documentation/power/pm_qos_interface.txt
@@ -43,6 +43,17 @@
 Clients of pm_qos need to save the returned handle for future use in other
 pm_qos API functions.
 
+The handle is a pm_qos_request object. By default the request object sets the
+request type to PM_QOS_REQ_ALL_CORES, in which case, the PM QoS request
+applies to all cores. However, the driver can also specify a request type to
+be either of
+        PM_QOS_REQ_ALL_CORES,
+        PM_QOS_REQ_AFFINE_CORES,
+        PM_QOS_REQ_AFFINE_IRQ,
+
+Specify the cpumask when type is set to PM_QOS_REQ_AFFINE_CORES and specify
+the IRQ number with PM_QOS_REQ_AFFINE_IRQ.
+
 void pm_qos_update_request(handle, new_target_value):
 Will update the list element pointed to by the handle with the new target value
 and recompute the new aggregated target, calling the notification tree if the
@@ -56,6 +67,13 @@
 int pm_qos_request(param_class):
 Returns the aggregated value for a given PM QoS class.
 
+int pm_qos_request_for_cpu(param_class, cpu):
+Returns the aggregated value for a given PM QoS class for the specified cpu.
+
+int pm_qos_request_for_cpumask(param_class, cpumask):
+Returns the aggregated value for a given PM QoS class for the specified
+cpumask.
+
 int pm_qos_request_active(handle):
 Returns if the request is still active, i.e. it has not been removed from a
 PM QoS class constraints list.
diff --git a/Documentation/scheduler/sched-hmp.txt b/Documentation/scheduler/sched-hmp.txt
new file mode 100644
index 0000000..22449ae
--- /dev/null
+++ b/Documentation/scheduler/sched-hmp.txt
@@ -0,0 +1,1432 @@
+CONTENTS
+
+1. Introduction
+   1.1 Heterogeneous Systems
+   1.2 CPU Frequency Guidance
+2. Window-Based Load Tracking Scheme
+   2.1 Synchronized Windows
+   2.2 struct ravg
+   2.3 Scaling Load Statistics
+   2.4 sched_window_stats_policy
+   2.5 Task Events
+   2.6 update_task_ravg()
+   2.7 update_history()
+   2.8 Per-task 'initial task load'
+3. CPU Capacity
+   3.1 Load scale factor
+   3.2 CPU Power
+4. CPU Power
+5. HMP Scheduler
+   5.1 Classification of Tasks and CPUs
+   5.2 select_best_cpu()
+   5.2.1 sched_boost
+   5.2.2 task_will_fit()
+   5.2.3 Tunables affecting select_best_cpu()
+   5.2.4 Wakeup Logic
+   5.3 Scheduler Tick
+   5.4 Load Balancer
+   5.5 Real Time Tasks
+   5.6 Task packing
+6. Frequency Guidance
+   6.1 Per-CPU Window-Based Stats
+   6.2 Per-task Window-Based Stats
+   6.3 Effect of various task events
+7. Tunables
+8. HMP Scheduler Trace Points
+   8.1 sched_enq_deq_task
+   8.2 sched_task_load
+   8.3 sched_cpu_load_*
+   8.4 sched_update_task_ravg
+   8.5 sched_update_history
+   8.6 sched_reset_all_windows_stats
+   8.7 sched_migration_update_sum
+   8.8 sched_get_busy
+   8.9 sched_freq_alert
+   8.10 sched_set_boost
+
+===============
+1. INTRODUCTION
+===============
+
+Scheduler extensions described in this document serves two goals:
+
+1) handle heterogeneous multi-processor (HMP) systems
+2) guide cpufreq governor on proactive changes to cpu frequency
+
+*** 1.1 Heterogeneous systems
+
+Heterogeneous systems have cpus that differ with regard to their performance and
+power characteristics. Some cpus could offer peak performance better than
+others, although at cost of consuming more power. We shall refer such cpus as
+"high performance" or "performance efficient" cpus. Other cpus that offer lesser
+peak performance are referred to as "power efficient".
+
+In this situation the scheduler is tasked with the responsibility of assigning
+tasks to run on the right cpus where their performance requirements can be met
+at the least expense of power.
+
+Achieving that goal is made complicated by the fact that the scheduler has
+little clue about performance requirements of tasks and how they may change by
+running on power or performance efficient cpus!  One simplifying assumption here
+could be that a task's desire for more performance is expressed by its cpu
+utilization. A task demanding high cpu utilization on a power-efficient cpu
+would likely improve in its performance by running on a performance-efficient
+cpu. This idea forms the basis for HMP-related scheduler extensions.
+
+Key inputs required by the HMP scheduler for its task placement decisions are:
+
+a) task load - this reflects cpu utilization or demand of tasks
+b) CPU capacity - this reflects peak performance offered by cpus
+c) CPU power - this reflects power or energy cost of cpus
+
+Once all 3 pieces of information are available, the HMP scheduler can place
+tasks on the lowest power cpus where their demand can be satisfied.
+
+*** 1.2 CPU Frequency guidance
+
+A somewhat separate but related goal of the scheduler extensions described here
+is to provide guidance to the cpufreq governor on the need to change cpu
+frequency. Most governors that control cpu frequency work on a reactive basis.
+CPU utilization is sampled at regular intervals, based on which the need to
+change frequency is determined. Higher utilization leads to a frequency increase
+and vice-versa.  There are several problems with this approach that scheduler
+can help resolve.
+
+a) latency
+
+	Reactive nature introduces latency for cpus to ramp up to desired speed
+	which can hurt application performance. This is inevitable as cpufreq
+	governors can only track cpu utilization as a whole and not tasks which
+	are driving that demand. Scheduler can however keep track of individual
+	task demand and can alert the governor on changing task activity. For
+	example, request raise in frequency when tasks activity is increasing on
+	a cpu because of wakeup or migration or request frequency to be lowered
+	when task activity is decreasing because of sleep/exit or migration.
+
+b) part-picture
+
+	Most governors track utilization of each CPU independently. When a task
+	migrates from one cpu to another the task's execution time is split
+	across the two cpus. The governor can fail to see the full picture of
+	task demand in this case and thus the need for increasing frequency,
+	affecting the task's performance. Scheduler can keep track of task
+	migrations, fix up busy time upon migration and report per-cpu busy time
+	to the governor that reflects task demand accurately.
+
+The rest of this document explains key enhancements made to the scheduler to
+accomplish both of the aforementioned goals.
+
+====================================
+2. WINDOW-BASED LOAD TRACKING SCHEME
+====================================
+
+As mentioned in the introduction section, knowledge of the CPU demand exerted by
+a task is a prerequisite to knowing where to best place the task in an HMP
+system. The per-entity load tracking (PELT) scheme, present in Linux kernel
+since v3.7, has some perceived shortcomings when used to place tasks on HMP
+systems or provide recommendations on CPU frequency.
+
+Per-entity load tracking does not make a distinction between the ramp up
+vs ramp down time of task load. It also decays task load without exception when
+a task sleeps. As an example, a cpu bound task at its peak load (LOAD_AVG_MAX or
+47742) can see its load decay to 0 after a sleep of just 213ms! A cpu-bound task
+running on a performance-efficient cpu could thus get re-classified as not
+requiring such a cpu after a short sleep. In the case of mobile workloads, tasks
+could go to sleep due to a lack of user input. When they wakeup it is very
+likely their cpu utilization pattern repeats. Resetting their load across sleep
+and incurring latency to reclassify them as requiring a high performance cpu can
+hurt application performance.
+
+The window-based load tracking scheme described in this document avoids these
+drawbacks. It keeps track of N windows of execution for every task. Windows
+where a task had no activity are ignored and not recorded. N can be tuned at
+compile time (RAVG_HIST_SIZE defined in include/linux/sched.h) or at runtime
+(/proc/sys/kernel/sched_ravg_hist_size). The window size, W, is common for all
+tasks and currently defaults to 10ms ('sched_ravg_window' defined in
+kernel/sched/core.c). The window size can be tuned at boot time via the
+sched_ravg_window=W argument to kernel. Alternately it can be tuned after boot
+via tunables provided by the interactive governor. More on this later.
+
+Based on the N samples available per-task, a per-task "demand" attribute is
+calculated which represents the cpu demand of that task. The demand attribute is
+used to classify tasks as to whether or not they need a performance-efficient
+CPU and also serves to provide inputs on frequency to the cpufreq governor. More
+on this later.  The 'sched_window_stats_policy' tunable (defined in
+kernel/sched/core.c) controls how the demand field for a task is derived from
+its N past samples.
+
+*** 2.1 Synchronized windows
+
+Windows of observation for task activity are synchronized across cpus. This
+greatly aids in the scheduler's frequency guidance feature. Scheduler currently
+relies on a synchronized clock (sched_clock()) for this feature to work. It may
+be possible to extend this feature to work on systems having an unsynchronized
+sched_clock().
+
+struct rq {
+
+	..
+
+	u64	window_start;
+
+	..
+};
+
+The 'window_start' attribute represents the time when current window began on a
+cpu.  It is updated when key task events such as wakeup or context-switch call
+update_task_ravg() to record task activity. The window_start value is expected
+to be the same for all cpus, although it could be behind on some cpus where it
+has not yet been updated because update_task_ravg() has not been recently
+called. For example, when a cpu is idle for a long time its window_start could
+be stale.  The window_start value for such cpus is rolled forward upon
+occurrence of a task event resulting in a call to update_task_ravg().
+
+*** 2.2 struct ravg
+
+The ravg struct contains information tracked per-task.
+
+struct ravg {
+	u64 mark_start;
+	u32 sum, demand;
+	u32 sum_history[RAVG_HIST_SIZE];
+};
+
+struct task_struct {
+
+	..
+
+	struct ravg ravg;
+
+	..
+};
+
+sum_history[] 	- stores cpu utilization samples from N previous windows
+		  where task had activity
+
+sum		- stores cpu utilization of the task in its most recently
+		  tracked window. Once the corresponding window terminates,
+		  'sum' will be pushed into the sum_history[] array and is then
+		  reset to 0. It is possible that the window corresponding to
+		  sum is not the current window being tracked on a cpu.  For
+		  example, a task could go to sleep in window X and wakeup in
+		  window Y (Y > X).  In this case, sum would correspond to the
+		  task's activity seen in window X.  When update_task_ravg() is
+		  called during the task's wakeup event it will be seen that
+		  window X has elapsed. The sum value will be pushed to
+		  'sum_history[]' array before being reset to 0.
+
+demand		- represents task's cpu demand and is derived from the
+		  elements in sum_history[]. The section on
+		  'sched_window_stats_policy' provides more details on how
+		  'demand' is derived from elements in sum_history[] array
+
+mark_start	- records timestamp of the beginning of the most recent task
+		  event. See section on 'Task events' for possible events that
+		  update 'mark_start'
+
+curr_window	- this is described in the section on 'Frequency guidance'
+
+prev_window	- this is described in the section on 'Frequency guidance'
+
+
+*** 2.3 Scaling load statistics
+
+Time required for a task to complete its work (and hence its load) depends on,
+among various other factors, cpu frequency and its efficiency. In a HMP system,
+some cpus are more performance efficient than others. Performance efficiency of
+a cpu can be described by its "instructions-per-cycle" (IPC) attribute. History
+of task execution could involve task having run at different frequencies and on
+cpus with different IPC attributes. To avoid ambiguity of how task load relates
+to the frequency and IPC of cpus on which a task has run, task load is captured
+in a scaled form, with scaling being done in reference to an "ideal" cpu that
+has best possible IPC and frequency. Such an "ideal" cpu, having the best
+possible frequency and IPC, may or may not exist in system.
+
+As an example, consider a HMP system, with two types of cpus, A53 and A57. A53
+has IPC count of 1024 and can run at maximum frequency of 1 GHz, while A57 has
+IPC count of 2048 and can run at maximum frequency of 2 GHz. Ideal cpu in this
+case is A57 running at 2 GHz.
+
+A unit of work that takes 100ms to finish on A53 running at 100MHz would get
+done in 10ms on A53 running at 1GHz, in 5 ms running on A57 at 1 GHz and 2.5ms
+on A57 running at 2 GHz.  Thus a load of 100ms can be expressed as 2.5ms in
+reference to ideal cpu of A57 running at 2 GHz.
+
+In order to understand how much load a task will consume on a given cpu, its
+scaled load needs to be multiplied by a factor (load scale factor). In above
+example, scaled load of 2.5ms needs to be multiplied by a factor of 4 in order
+to estimate the load of task on A53 running at 1 GHz.
+
+/proc/sched_debug provides IPC attribute and load scale factor for every cpu.
+
+In summary, task load information stored in a task's sum_history[] array is
+scaled for both frequency and efficiency. If a task runs for X ms, then the
+value stored in its 'sum' field is derived as:
+
+	X_s = X * (f_cur / max_possible_freq) *
+		  (efficiency / max_possible_efficiency)
+
+where:
+
+X   		   	= cpu utilization that needs to be accounted
+X_s 		   	= Scaled derivative of X
+f_cur		   	= current frequency of the cpu where the task was
+			  running
+max_possible_freq  	= maximum possible frequency (across all cpus)
+efficiency	   	= instructions per cycle (IPC) of cpu where task was
+			  running
+max_possible_efficiency = maximum IPC offered by any cpu in system
+
+
+*** 2.4 sched_window_stats_policy
+
+sched_window_stats_policy controls how the 'demand' attribute for a task is
+derived from elements in its 'sum_history[]' array.
+
+WINDOW_STATS_RECENT (0)
+	demand = recent
+
+WINDOW_STATS_MAX (1)
+	demand = max
+
+WINDOW_STATS_MAX_RECENT_AVG (2)
+	demand = maximum(average, recent)
+
+WINDOW_STATS_AVG (3)
+	demand = average
+
+where:
+	M 	= history size specified by
+		  /proc/sys/kernel/sched_ravg_hist_size
+	average = average of first M samples found in the sum_history[] array
+	max	= maximum value of first M samples found in the sum_history[]
+		  array
+	recent  = most recent sample (sum_history[0])
+	demand	= demand attribute found in 'struct ravg'
+
+This policy can be changed at runtime via
+/proc/sys/kernel/sched_window_stats_policy. For example, the command
+below would select WINDOW_STATS_USE_MAX policy
+
+echo 1 > /proc/sys/kernel/sched_window_stats_policy
+
+*** 2.5 Task events
+
+A number of events results in the window-based stats of a task being
+updated. These are:
+
+PICK_NEXT_TASK	- the task is about to start running on a cpu
+PUT_PREV_TASK	- the task stopped running on a cpu
+TASK_WAKE	- the task is waking from sleep
+TASK_MIGRATE	- the task is migrating from one cpu to another
+TASK_UPDATE	- this event is invoked on a currently running task to
+		  update the task's window-stats and also the cpu's
+		  window-stats such as 'window_start'
+IRQ_UPDATE	- event to record the busy time spent by an idle cpu
+		  processing interrupts
+
+*** 2.6 update_task_ravg()
+
+update_task_ravg() is called to mark the beginning of an event for a task or a
+cpu. It serves to accomplish these functions:
+
+a. Update a cpu's window_start value
+b. Update a task's window-stats (sum, sum_history[], demand and mark_start)
+
+In addition update_task_ravg() updates the busy time information for the given
+cpu, which is used for frequency guidance. This is described further in section
+6.
+
+*** 2.7 update_history()
+
+update_history() is called on a task to record its activity in an elapsed
+window. 'sum', which represents task's cpu demand in its elapsed window is
+pushed onto sum_history[] array and its 'demand' attribute is updated based on
+the sched_window_stats_policy in effect.
+
+*** 2.8 Initial task load attribute for a task (init_load_pct)
+
+In some cases, it may be desirable for children of a task to be assigned a
+"high" load so that they can start running on best capacity cluster. By default,
+newly created tasks are assigned a load defined by tunable sched_init_task_load
+(Sec 7.8). Some specialized tasks may need a higher value than the global
+default for their child tasks. This will let child tasks run on cpus with best
+capacity. This is accomplished by setting the 'initial task load' attribute
+(init_load_pct) for a task. Child tasks starting load (ravg.demand and
+ravg.sum_history[]) is initialized from their parent's 'initial task load'
+attribute. Note that child task's 'initial task load' attribute itself will be 0
+by default (i.e it is not inherited from parent).
+
+A task's 'initial task load' attribute can be set in two ways:
+
+**** /proc interface
+
+/proc/[pid]/sched_init_task_load can be written to for setting a task's 'initial
+task load' attribute. A numeric value between 0 - 100 (in percent scale) is
+accepted for task's 'initial task load' attribute.
+
+Reading /proc/[pid]/sched_init_task_load returns the 'initial task load'
+attribute for the given task.
+
+**** kernel API
+
+Following kernel APIs are provided to set or retrieve a given task's 'initial
+task load' attribute:
+
+int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
+int sched_get_init_task_load(struct task_struct *p);
+
+
+===============
+3. CPU CAPACITY
+===============
+
+CPU capacity reflects peak performance offered by a cpu. It is defined both by
+maximum frequency at which cpu can run and its efficiency attribute. Capacity of
+a cpu is defined in reference to "least" performing cpu such that "least"
+performing cpu has capacity of 1024.
+
+	capacity = 1024 * (fmax_cur * / min_max_freq) *
+			  (efficiency / min_possible_efficiency)
+
+where:
+
+	fmax_cur    		= maximum frequency at which cpu is currently
+				  allowed to run at
+	efficiency	    		= IPC of cpu
+	min_max_freq 		= max frequency at which "least" performing cpu
+				  can run
+	min_possible_efficiency	= IPC of "least" performing cpu
+
+'fmax_cur' reflects the fact that a cpu may be constrained at runtime to run at
+a maximum frequency less than what is supported. This may be a constraint placed
+by user or drivers such as thermal that intends to reduce temperature of a cpu
+by restricting its maximum frequency.
+
+'max_possible_capacity' reflects the maximum capacity of a cpu based on the
+maximum frequency it supports.
+
+max_possible_capacity = 1024 * (fmax * / min_max_freq) *
+			       (efficiency / min_possible_efficiency)
+
+where:
+	fmax 	= maximum frequency supported by a cpu
+
+/proc/sched_debug lists capacity and maximum_capacity information for a cpu.
+
+In the example HMP system quoted in Sec 2.3, "least" performing CPU is A53 and
+thus min_max_freq = 1GHz and min_possible_efficiency = 1024.
+
+Capacity of A57 = 1024 * (2GHz / 1GHz) * (2048 / 1024) = 4096
+Capacity of A53 = 1024 * (1GHz / 1GHz) * (1024 / 1024) = 1024
+
+Capacity of A57 when constrained to run at maximum frequency of 500MHz can be
+calculated as:
+
+Capacity of A57 = 1024 * (500MHz / 1GHz) * (2048 / 1024) = 1024
+
+*** 3.1 load_scale_factor
+
+'lsf' or load scale factor attribute of a cpu is used to estimate load of a task
+on that cpu when running at its fmax_cur frequency. 'lsf' is defined in
+reference to "best" performing cpu such that it's lsf is 1024. 'lsf' for a cpu
+is defined as:
+
+	lsf = 1024 * (max_possible_freq / fmax_cur) *
+		     (max_possible_efficiency / ipc)
+
+where:
+	fmax_cur    		= maximum frequency at which cpu is currently
+				  allowed to run at
+	ipc	    		= IPC of cpu
+	max_possible_freq	= max frequency at which "best" performing cpu
+				  can run
+	max_possible_efficiency	= IPC of "best" performing cpu
+
+In the example HMP system quoted in Sec 2.3, "best" performing CPU is A57 and
+thus max_possible_freq = 2 GHz, max_possible_efficiency = 2048
+
+lsf of A57 = 1024 * (2GHz / 2GHz) * (2048 / 2048) = 1024
+lsf of A53 = 1024 * (2GHz / 1 GHz) * (2048 / 1024) = 4096
+
+lsf of A57 constrained to run at maximum frequency of 500MHz can be calculated
+as:
+
+lsf of A57 = 1024 * (2GHz / 500Mhz) * (2048 / 2048) = 4096
+
+To estimate load of a task on a given cpu running at its fmax_cur:
+
+	load = scaled_load * lsf / 1024
+
+A task with scaled load of 20% would thus be estimated to consume 80% bandwidth
+of A53 running at 1GHz. The same task with scaled load of 20% would be estimated
+to consume 160% bandwidth on A53 constrained to run at maximum frequency of
+500MHz.
+
+load_scale_factor, thus, is very useful to estimate load of a task on a given
+cpu and thus to decide whether it can fit in a cpu or not.
+
+*** 3.2 cpu_power
+
+A metric 'cpu_power' related to 'capacity' is also listed in /proc/sched_debug.
+'cpu_power' is ideally same for all cpus (1024) when they are idle and running
+at the same frequency. 'cpu_power' of a cpu can be scaled down from its ideal
+value to reflect reduced frequency it is operating at and also to reflect the
+amount of cpu bandwidth consumed by real-time tasks executing on it.
+'cpu_power' metric is used by scheduler to decide task load distribution among
+cpus. CPUs with low 'cpu_power' will be assigned less task load compared to cpus
+with higher 'cpu_power'
+
+============
+4. CPU POWER
+============
+
+The HMP scheduler extensions currently depend on an architecture-specific driver
+to provide runtime information on cpu power. In the absence of an
+architecture-specific driver, the scheduler will resort to using the
+max_possible_capacity metric of a cpu as a measure of its power.
+
+================
+5. HMP SCHEDULER
+================
+
+For normal (SCHED_OTHER/fair class) tasks there are three paths in the
+scheduler which these HMP extensions affect. The task wakeup path, the
+load balancer, and the scheduler tick are each modified.
+
+Real-time and stop-class tasks are served by different code
+paths. These will be discussed separately.
+
+Prior to delving further into the algorithm and implementation however
+some definitions are required.
+
+*** 5.1 Classification of Tasks and CPUs
+
+With the extensions described thus far, the following information is
+available to the HMP scheduler:
+
+- per-task CPU demand information from either Per-Entity Load Tracking
+  (PELT) or the window-based algorithm described above
+
+- a power value for each frequency supported by each CPU via the API
+  described in section 4
+
+- current CPU frequency, maximum CPU frequency (may be throttled by at
+  runtime due to thermal conditions), maximum possible CPU frequency supported
+  by hardware
+
+- data previously maintained within the scheduler such as the number
+  of currently runnable tasks on each CPU
+
+Combined with tunable parameters, this information can be used to classify
+both tasks and CPUs to aid in the placement of tasks.
+
+- big task
+
+  A big task is one that exerts a CPU demand too high for a particular
+  CPU to satisfy. The scheduler will attempt to find a CPU with more
+  capacity for such a task.
+
+  The definition of "big" is specific to a task *and* a CPU. A task
+  may be considered big on one CPU in the system and not big on
+  another if the first CPU has less capacity than the second.
+
+  What task demand is "too high" for a particular CPU? One obvious
+  answer would be a task demand which, as measured by PELT or
+  window-based load tracking, matches or exceeds the capacity of that
+  CPU. A task which runs on a CPU for a long time, for example, might
+  meet this criteria as it would report 100% demand of that CPU. It
+  may be desirable however to classify tasks which use less than 100%
+  of a particular CPU as big so that the task has some "headroom" to grow
+  without its CPU bandwidth getting capped and its performance requirements
+  not being met. This task demand is therefore a tunable parameter:
+
+  /proc/sys/kernel/sched_upmigrate
+
+  This value is a percentage. If a task consumes more than this much of a
+  particular CPU, that CPU will be considered too small for the task. The task
+  will thus be seen as a "big" task on the cpu and will reflect in nr_big_tasks
+  statistics maintained for that cpu. Note that certain tasks (whose nice
+  value exceeds SCHED_UPMIGRATE_MIN_NICE value or those that belong to a cgroup
+  whose upmigrate_discourage flag is set) will never be classified as big tasks
+  despite their high demand.
+
+  As the load scale factor is calculated against current fmax, it gets boosted
+  when a lower capacity CPU is restricted to run at lower fmax. The task
+  demand is inflated in this scenario and the task upmigrates early to the
+  maximum capacity CPU. Hence this threshold is auto-adjusted by a factor
+  equal to max_possible_frequency/current_frequency of a lower capacity CPU.
+  This adjustment happens only when the lower capacity CPU frequency is
+  restricted. The same adjustment is applied to the downmigrate threshold
+  as well.
+
+  When the frequency restriction is relaxed, the previous values are restored.
+  sched_up_down_migrate_auto_update macro defined in kernel/sched/core.c
+  controls this auto-adjustment behavior and it is enabled by default.
+
+  If the adjusted upmigrate threshold exceeds the window size, it is clipped to
+  the window size. If the adjusted downmigrate threshold decreases the difference
+  between the upmigrate and downmigrate, it is clipped to a value such that the
+  difference between the modified and the original thresholds is same.
+
+- spill threshold
+
+  Tasks will normally be placed on lowest power-cost cluster where they can fit.
+  This could result in power-efficient cluster becoming overcrowded when there
+  are "too" many low-demand tasks. Spill threshold provides a spill over
+  criteria, wherein low-demand task are allowed to be placed on idle or
+  busy cpus in high-performance cluster.
+
+  Scheduler will avoid placing a task on a cpu if it can result in cpu exceeding
+  its spill threshold, which is defined by two tunables:
+
+  /proc/sys/kernel/sched_spill_nr_run (default: 10)
+  /proc/sys/kernel/sched_spill_load   (default : 100%)
+
+  A cpu is considered to be above its spill level if it already has 10 tasks or
+  if the sum of task load (scaled in reference to given cpu) and
+  rq->cumulative_runnable_avg exceeds 'sched_spill_load'.
+
+- power band
+
+  The scheduler may be faced with a tradeoff between power and performance when
+  placing a task. If the scheduler sees two CPUs which can accommodate a task:
+
+  CPU 1, power cost of 20, load of 10
+  CPU 2, power cost of 10, load of 15
+
+  It is not clear what the right choice of CPU is. The HMP scheduler
+  offers the sched_powerband_limit tunable to determine how this
+  situation should be handled. When the power delta between two CPUs
+  is less than sched_powerband_limit_pct, load will be prioritized as
+  the deciding factor as to which CPU is selected. If the power delta
+  between two CPUs exceeds that, the lower power CPU is considered to
+  be in a different "band" and it is selected, despite perhaps having
+  a higher current task load.
+
+*** 5.2 select_best_cpu()
+
+CPU placement decisions for a task at its wakeup or creation time are the
+most important decisions made by the HMP scheduler. This section will describe
+the call flow and algorithm used in detail.
+
+The primary entry point for a task wakeup operation is try_to_wake_up(),
+located in kernel/sched/core.c. This function relies on select_task_rq() to
+determine the target CPU for the waking task. For fair-class (SCHED_OTHER)
+tasks, that request will be routed to select_task_rq_fair() in
+kernel/sched/fair.c. As part of these scheduler extensions a hook has been
+inserted into the top of that function. If HMP scheduling is enabled the normal
+scheduling behavior will be replaced by a call to select_best_cpu(). This
+function, select_best_cpu(), represents the heart of the HMP scheduling
+algorithm described in this document. Note that select_best_cpu() is also
+invoked for a task being created.
+
+The behavior of select_best_cpu() depends on several factors such as boost
+setting, choice of several tunables and on task demand.
+
+**** 5.2.1 Boost
+
+The task placement policy changes signifincantly when scheduler boost is in
+effect. When boost is in effect the scheduler ignores the power cost of
+placing tasks on CPUs. Instead it figures out the load on each CPU and then
+places task on the least loaded CPU. If the load of two or more CPUs is the
+same (generally when CPUs are idle) the task prefers to go highest capacity
+CPU in the system.
+
+A further enhancement during boost is the scheduler' early detection feature.
+While boost is in effect the scheduler checks for the precence of tasks that
+have been runnable for over some period of time within the tick. For such
+tasks the scheduler informs the governor of imminent need for high frequency.
+If there exists a task on the runqueue at the tick that has been runnable
+for greater than SCHED_EARLY_DETECTION_DURATION amount of time, it notifies
+the governor with a fabricated load of the full window at the highest
+frequency. The fabricated load is maintained until the task is no longer
+runnable or until the next tick.
+
+Boost can be set via either /proc/sys/kernel/sched_boost or by invoking
+kernel API sched_set_boost().
+
+	int sched_set_boost(int enable);
+
+Once turned on, boost will remain in effect until it is explicitly turned off.
+To allow for boost to be controlled by multiple external entities (application
+or kernel module) at same time, boost setting is reference counted.  This means
+that two applications can turn on boost and the effect of boost is eliminated
+only after both applications have turned off boost. boost_refcount variable
+represents this reference count.
+
+**** 5.2.2 task_will_fit()
+
+The overall goal of select_best_cpu() is to place a task on the least power
+cluster where it can "fit" i.e where its cpu usage shall be below the capacity
+offered by cluster. Criteria for a task to be considered as fitting in a cluster
+is:
+
+ i) A low-priority task, whose nice value is greater than
+     SCHED_UPMIGRATE_MIN_NICE or whose cgroup has its
+     upmigrate_discourage flag set, is considered to be fitting in all clusters,
+     irrespective of their capacity and task's cpu demand.
+
+ ii) All tasks are considered to fit in highest capacity cluster.
+
+ iii) Task demand scaled in reference to the given cluster should be less than a
+     threshold. See section on load_scale_factor to know more about how task
+     demand is scaled in reference to a given cpu (cluster). The threshold used
+     is normally sched_upmigrate. Its possible for a task's demand to exceed
+     sched_upmigrate threshold in reference to a cluster when its upmigrated to
+     higher capacity cluster. To prevent it from coming back immediately to
+     lower capacity cluster, the  task is not considered to "fit" on its earlier
+     cluster until its demand has dropped below sched_downmigrate in reference
+     to that earlier cluster. sched_downmigrate thus provides for some
+     hysteresis control.
+
+
+**** 5.2.3 Factors affecting select_best_cpu()
+
+Behavior of select_best_cpu() is further controlled by several tunables and
+synchronous nature of wakeup.
+
+a. /proc/sys/kernel/sched_cpu_high_irqload
+	A cpu whose irq load is greater than this threshold will not be
+	considered eligible for placement. This threshold value in expressed in
+	nanoseconds scale, with default threshold being 10000000 (10ms). See
+	notes on sched_cpu_high_irqload tunable to understand how irq load on a
+	cpu is measured.
+
+b. Synchronous nature of wakeup
+	Synchronous wakeup is a hint to scheduler that the task issuing wakeup
+	(i.e task currently running on cpu where wakeup is being processed by
+	scheduler) will "soon" relinquish CPU. A simple example is two tasks
+	communicating with each other using a pipe structure. When reader task
+	blocks waiting for data, its woken by writer task after it has written
+	data to pipe. Writer task usually blocks waiting for reader task to
+	consume data in pipe (which may not have any more room for writes).
+
+	Synchronous wakeup is accounted for by adjusting load of a cpu to not
+	include load of currently running task. As a result, a cpu that has only
+	one runnable task and which is currently processing synchronous wakeup
+	will be considered idle.
+
+c. PF_WAKE_UP_IDLE
+	Any task with this flag set will be woken up to an idle cpu (if one is
+	available) independent of sched_prefer_idle flag setting, its demand and
+	synchronous nature of wakeup. Similarly idle cpu is preferred during
+	wakeup for any task that does not have this flag set but is being woken
+	by a task with PF_WAKE_UP_IDLE flag set. For simplicity, we will use the
+	term "PF_WAKE_UP_IDLE wakeup" to signify wakeups involving a task with
+	PF_WAKE_UP_IDLE flag set.
+
+d. /proc/sys/kernel/sched_select_prev_cpu_us
+	This threshold controls whether task placement goes through fast path or
+	not. If task's wakeup time since last sleep is short there are high
+	chances that it's better to place the task on its previous CPU. This
+	reduces task placement latency, cache miss and number of migrations.
+	Default value of sched_select_prev_cpu_us is 2000 (2ms).  This can be
+	turned off by setting it to 0.
+
+**** 5.2.4 Wakeup Logic for Task "p"
+
+Wakeup task placement logic is as follows:
+
+1) Eliminate CPUs with high irq load based on sched_cpu_high_irqload tunable.
+
+2) Eliminate CPUs where either the task does not fit or CPUs where placement
+will result in exceeding the spill threshold tunables. CPUs elimiated at this
+stage will be considered as backup choices incase none of the CPUs get past
+this stage.
+
+3) Find out and return the least power CPU that satisfies all conditions above.
+
+4) If two or more CPUs are projected to have the same power, break ties in the
+following preference order:
+ a) The CPU is the task's previous CPU.
+ b) The CPU is in the same cluster as the task's previous CPU.
+ c) The CPU has the least load
+
+The placement logic described above does not apply when PF_WAKE_UP_IDLE is set
+for either the waker task or the wakee task. Instead the scheduler chooses the
+most power efficient idle CPU.
+
+5) If no CPU is found after step 2, resort to backup CPU selection logic
+whereby the CPU with highest amount of spare capacity is selected.
+
+6) If none of the CPUs have any spare capacity, return the task's previous
+CPU.
+
+*** 5.3 Scheduler Tick
+
+Every CPU is interrupted periodically to let kernel update various statistics
+and possibly preempt the currently running task in favor of a waiting task. This
+periodicity, determined by CONFIG_HZ value, is set at 10ms. There are various
+optimizations by which a CPU however can skip taking these interrupts (ticks).
+A cpu going idle for considerable time in one such case.
+
+HMP scheduler extensions brings in a change in processing of tick
+(scheduler_tick()) that can result in task migration. In case the currently
+running task on a cpu belongs to fair_sched class, a check is made if it needs
+to be migrated. Possible reasons for migrating task could be:
+
+a) A big task is running on a power-efficient cpu and a high-performance cpu is
+available (idle) to service it
+
+b) A task is starving on a CPU with high irq load.
+
+c) A task with upmigration discouraged is running on a performance cluster.
+See notes on 'cpu.upmigrate_discourage'.
+
+In case the test for migration turns out positive (which is expected to be rare
+event), a candidate cpu is identified for task migration. To avoid multiple task
+migrations to the same candidate cpu(s), identification of candidate cpu is
+serialized via global spinlock (migration_lock).
+
+*** 5.4 Load Balancer
+
+Load balance is a key functionality of scheduler that strives to distribute task
+across available cpus in a "fair" manner. Most of the complexity associated with
+this feature involves balancing fair_sched class tasks. Changes made to load
+balance code serve these goals:
+
+1. Restrict flow of tasks from power-efficient cpus to high-performance cpu.
+   Provide a spill-over threshold, defined in terms of number of tasks
+   (sched_spill_nr_run) and cpu demand (sched_spill_load), beyond which tasks
+   can spill over from power-efficient cpu to high-performance cpus.
+
+2. Allow idle power-efficient cpus to pick up extra load from over-loaded
+   performance-efficient cpu
+
+3. Allow idle high-performance cpu to pick up big tasks from power-efficient cpu
+
+*** 5.5 Real Time Tasks
+
+Minimal changes introduced in treatment of real-time tasks by HMP scheduler
+aims at preferring scheduling of real-time tasks on cpus with low load on
+a power efficient cluster.
+
+Prior to HMP scheduler, the fast-path cpu selection for placing a real-time task
+(at wakeup) is its previous cpu, provided the currently running task on its
+previous cpu is not a real-time task or a real-time task with lower priority.
+Failing this, cpu selection in slow-path involves building a list of candidate
+cpus where the waking real-time task will be of highest priority and thus can be
+run immediately. The first cpu from this candidate list is chosen for the waking
+real-time task. Much of the premise for this simple approach is the assumption
+that real-time tasks often execute for very short intervals and thus the focus
+is to place them on a cpu where they can be run immediately.
+
+HMP scheduler brings in a change which avoids fast-path and always resorts to
+slow-path. Further cpu with lowest load in a power efficient cluster from
+candidate list of cpus is chosen as cpu for placing waking real-time task.
+
+- PF_WAKE_UP_IDLE
+
+Idle cpu is preferred for any waking task that has this flag set in its
+'task_struct.flags' field. Further idle cpu is preferred for any task woken by
+such tasks. PF_WAKE_UP_IDLE flag of a task is inherited by it's children. It can
+be modified for a task in two ways:
+
+	> kernel-space interface
+		set_wake_up_idle() needs to be called in the context of a task
+		to set or clear its PF_WAKE_UP_IDLE flag.
+
+	> user-space interface
+		/proc/[pid]/sched_wake_up_idle file needs to be written to for
+		setting or clearing PF_WAKE_UP_IDLE flag for a given task
+
+=====================
+6. FREQUENCY GUIDANCE
+=====================
+
+As mentioned in the introduction section the scheduler is in a unique
+position to assist with the determination of CPU frequency. Because
+the scheduler now maintains an estimate of per-task CPU demand, task
+activity can be tracked, aggregated and provided to the CPUfreq
+governor as a replacement for simple CPU busy time.
+
+Two of the most popular CPUfreq governors, interactive and ondemand,
+utilize a window-based approach for measuring CPU busy time. This
+works well with the window-based load tracking scheme previously
+described. The following APIs are provided to allow the CPUfreq
+governor to query busy time from the scheduler instead of using the
+basic CPU busy time value derived via get_cpu_idle_time_us() and
+get_cpu_iowait_time_us() APIs.
+
+  int sched_set_window(u64 window_start, unsigned int window_size)
+
+    This API is invoked by governor at initialization time or whenever
+    window size is changed. 'window_size' argument (in jiffy units)
+    indicates the size of window to be used. The first window of size
+    'window_size' is set to begin at jiffy 'window_start'
+
+    -EINVAL is returned if per-entity load tracking is in use rather
+    than window-based load tracking, otherwise a success value of 0
+    is returned.
+
+  int sched_get_busy(int cpu)
+
+    Returns the busy time for the given CPU in the most recent
+    complete window. The value returned is microseconds of busy
+    time at fmax of given CPU.
+
+The values returned by sched_get_busy() take a bit of explanation,
+both in what they mean and also how they are derived.
+
+*** 6.1 Per-CPU Window-Based Stats
+
+In addition to the per-task window-based demand, the HMP scheduler
+extensions also track the aggregate demand seen on each CPU. This is
+done using the same windows that the task demand is tracked with
+(which is in turn set by the governor when frequency guidance is in
+use). There are four quantities maintained for each CPU by the HMP scheduler:
+
+  curr_runnable_sum: aggregate demand from all tasks which executed during
+  the current (not yet completed) window
+
+  prev_runnable_sum: aggregate demand from all tasks which executed during
+  the most recent completed window
+
+  nt_curr_runnable_sum: aggregate demand from all 'new' tasks which executed
+  during the current (not yet completed) window
+
+  nt_prev_runnable_sum: aggregate demand from all 'new' tasks which executed
+  during the most recent completed window.
+
+When the scheduler is updating a task's window-based stats it also
+updates these values. Like per-task window-based demand these
+quantities are normalized against the max possible frequency and max
+efficiency (instructions per cycle) in the system. If an update occurs
+and a window rollover is observed, curr_runnable_sum is copied into
+prev_runnable_sum before being reset to 0. The sched_get_busy() API
+returns prev_runnable_sum, scaled to the efficiency and fmax of given
+CPU. The same applies to nt_curr_runnable_sum and  nt_prev_runnable_sum.
+
+A 'new' task is defined as a task whose number of active windows since fork is
+less than sysctl_sched_new_task_windows. An active window is defined as a window
+where a task was observed to be runnable.
+
+*** 6.2 Per-task window-based stats
+
+Corresponding to curr_runnable_sum and prev_runnable_sum, two counters are
+maintained per-task
+
+curr_window - represents cpu demand of task in its most recently tracked
+	      window
+prev_window - represents cpu demand of task in the window prior to the one
+	      being tracked by curr_window
+
+The above counters are resued for nt_curr_runnable_sum and
+nt_prev_runnable_sum.
+
+"cpu demand" of a task includes its execution time and can also include its
+wait time. 'SCHED_FREQ_ACCOUNT_WAIT_TIME' controls whether task's wait
+time is included in its 'curr_window' and 'prev_window' counters or not.
+
+Needless to say, curr_runnable_sum counter of a cpu is derived from curr_window
+counter of various tasks that ran on it in its most recent window.
+
+*** 6.3 Effect of various task events
+
+We now consider various events and how they affect above mentioned counters.
+
+PICK_NEXT_TASK
+	This represents beginning of execution for a task. Provided the task
+	refers to a non-idle task, a portion of task's wait time that
+	corresponds to the current window being tracked on a cpu is added to
+	task's curr_window counter, provided SCHED_FREQ_ACCOUNT_WAIT_TIME is
+	set. The same quantum is also added to cpu's curr_runnable_sum counter.
+	The remaining portion, which corresponds to task's wait time in previous
+	window is added to task's prev_window and cpu's prev_runnable_sum
+	counters.
+
+PUT_PREV_TASK
+	This represents end of execution of a time-slice for a task, where the
+	task could refer to a cpu's idle task also. In case the task is non-idle
+	or (in case of task being idle with cpu having non-zero rq->nr_iowait
+	count and sched_io_is_busy =1), a portion of task's execution time, that
+	corresponds to current window being tracked on a cpu is added to task's
+	curr_window_counter and also to cpu's curr_runnable_sum counter. Portion
+	of task's execution that corresponds to the previous window is added to
+	task's prev_window and cpu's prev_runnable_sum counters.
+
+TASK_UPDATE
+	This event is called on a cpu's currently running task and hence
+	behaves effectively as PUT_PREV_TASK. Task continues executing after
+	this event, until PUT_PREV_TASK event occurs on the task (during
+	context switch).
+
+TASK_WAKE
+	This event signifies a task waking from sleep. Since many windows
+	could have elapsed since the task went to sleep, its curr_window
+	and prev_window are updated to reflect task's demand in the most
+	recent and its previous window that is being tracked on a cpu.
+
+TASK_MIGRATE
+	This event signifies task migration across cpus. It is invoked on the
+	task prior to being moved. Thus at the time of this event, the task
+	can be considered to be in "waiting" state on src_cpu. In that way
+	this event reflects actions taken under PICK_NEXT_TASK (i.e its
+	wait time is added to task's curr/prev_window counters as well
+	as src_cpu's curr/prev_runnable_sum counters, provided
+	SCHED_FREQ_ACCOUNT_WAIT_TIME is non-zero). After that update,
+	src_cpu's curr_runnable_sum is reduced by task's curr_window value
+	and dst_cpu's curr_runnable_sum is increased by task's curr_window
+	value. Similarly, src_cpu's prev_runnable_sum is reduced by task's
+	prev_window value and dst_cpu's prev_runnable_sum is increased by
+	task's prev_window value.
+
+IRQ_UPDATE
+	This event signifies end of execution of an interrupt handler. This
+	event results in update of cpu's busy time counters, curr_runnable_sum
+	and prev_runnable_sum, provided cpu was idle.
+	When sched_io_is_busy = 0, only the interrupt handling time is added
+	to cpu's curr_runnable_sum and prev_runnable_sum counters. When
+	sched_io_is_busy = 1, the event mirrors actions taken under
+	TASK_UPDATED event i.e  time since last accounting of idle task's cpu
+	usage is added to cpu's curr_runnable_sum and prev_runnable_sum
+	counters.
+
+===========
+7. TUNABLES
+===========
+
+*** 7.1 sched_spill_load
+
+Appears at: /proc/sys/kernel/sched_spill_load
+
+Default value: 100
+
+CPU selection criteria for fair-sched class tasks is the lowest power cpu where
+they can fit. When the most power-efficient cpu where a task can fit is
+overloaded (aggregate demand of tasks currently queued on it exceeds
+sched_spill_load), a task can be placed on a higher-performance cpu, even though
+the task strictly doesn't need one.
+
+*** 7.2 sched_spill_nr_run
+
+Appears at: /proc/sys/kernel/sched_spill_nr_run
+
+Default value: 10
+
+The intent of this tunable is similar to sched_spill_load, except it applies to
+nr_running count of a cpu. A task can spill over to a higher-performance cpu
+when the most power-efficient cpu where it can normally fit has more tasks than
+sched_spill_nr_run.
+
+*** 7.3 sched_upmigrate
+
+Appears at: /proc/sys/kernel/sched_upmigrate
+
+Default value: 80
+
+This tunable is a percentage. If a task consumes more than this much
+of a CPU, the CPU is considered too small for the task and the
+scheduler will try to find a bigger CPU to place the task on.
+
+*** 7.4 sched_init_task_load
+
+Appears at: /proc/sys/kernel/sched_init_task_load
+
+Default value: 15
+
+This tunable is a percentage. When a task is first created it has no
+history, so the task load tracking mechanism cannot determine a
+historical load value to assign to it. This tunable specifies the
+initial load value for newly created tasks. Also see Sec 2.8 on per-task
+'initial task load' attribute.
+
+*** 7.5 sched_ravg_hist_size
+
+Appears at: /proc/sys/kernel/sched_ravg_hist_size
+
+Default value: 5
+
+This tunable controls the number of samples used from task's sum_history[]
+array for determination of its demand.
+
+*** 7.6 sched_window_stats_policy
+
+Appears at: /proc/sys/kernel/sched_window_stats_policy
+
+Default value: 2
+
+This tunable controls the policy in how window-based load tracking
+calculates an overall demand value based on the windows of CPU
+utilization it has collected for a task.
+
+Possible values for this tunable are:
+0: Just use the most recent window sample of task activity when calculating
+   task demand.
+1: Use the maximum value of first M samples found in task's cpu demand
+   history (sum_history[] array), where M = sysctl_sched_ravg_hist_size
+2: Use the maximum of (the most recent window sample, average of first M
+   samples), where M = sysctl_sched_ravg_hist_size
+3. Use average of first M samples, where M = sysctl_sched_ravg_hist_size
+
+*** 7.7 sched_ravg_window
+
+Appears at: kernel command line argument
+
+Default value: 10000000 (10ms, units of tunable are nanoseconds)
+
+This specifies the duration of each window in window-based load
+tracking. By default each window is 10ms long. This quantity must
+currently be set at boot time on the kernel command line (or the
+default value of 10ms can be used).
+
+*** 7.8 RAVG_HIST_SIZE
+
+Appears at: compile time only (see RAVG_HIST_SIZE in include/linux/sched.h)
+
+Default value: 5
+
+This macro specifies the number of windows the window-based load
+tracking mechanism maintains per task. If default values are used for
+both this and sched_ravg_window then a total of 50ms of task history
+would be maintained in 5 10ms windows.
+
+*** 7.9 sched_freq_inc_notify
+
+Appears at: /proc/sys/kernel/sched_freq_inc_notify
+
+Default value: 10 * 1024 * 1024 (10 Ghz)
+
+When scheduler detects that cur_freq of a cluster is insufficient to meet
+demand, it sends notification to governor, provided (freq_required - cur_freq)
+exceeds sched_freq_inc_notify, where freq_required is the frequency calculated
+by scheduler to meet current task demand. Note that sched_freq_inc_notify is
+specified in kHz units.
+
+*** 7.10 sched_freq_dec_notify
+
+Appears at: /proc/sys/kernel/sched_freq_dec_notify
+
+Default value: 10 * 1024 * 1024 (10 Ghz)
+
+When scheduler detects that cur_freq of a cluster is far greater than what is
+needed to serve current task demand, it will send notification to governor.
+More specifically, notification is sent when (cur_freq - freq_required)
+exceeds sched_freq_dec_notify, where freq_required is the frequency calculated
+by scheduler to meet current task demand. Note that sched_freq_dec_notify is
+specified in kHz units.
+
+*** 7.11 sched_cpu_high_irqload
+
+Appears at: /proc/sys/kernel/sched_cpu_high_irqload
+
+Default value: 10000000 (10ms)
+
+The scheduler keeps a decaying average of the amount of irq and softirq activity
+seen on each CPU within a ten millisecond window. Note that this "irqload"
+(reported in the sched_cpu_load_* tracepoint) will be higher than the typical load
+in a single window since every time the window rolls over, the value is decayed
+by some fraction and then added to the irq/softirq time spent in the next
+window.
+
+When the irqload on a CPU exceeds the value of this tunable, the CPU is no
+longer eligible for placement. This will affect the task placement logic
+described above, causing the scheduler to try and steer tasks away from
+the CPU.
+
+*** 7.12 cpu.upmigrate_discourage
+
+Default value : 0
+
+This is a cgroup attribute supported by the cpu resource controller. It normally
+appears at [root_cpu]/[name1]/../[name2]/cpu.upmigrate_discourage. Here
+"root_cpu" is the mount point for cgroup (cpu resource control) filesystem
+and name1, name2 etc are names of cgroups that form a hierarchy.
+
+Setting this flag to 1 discourages upmigration for all tasks of a cgroup. High
+demand tasks of such a cgroup will never be classified as big tasks and hence
+not upmigrated. Any task of the cgroup is allowed to upmigrate only under
+overcommitted scenario. See notes on sched_spill_nr_run and sched_spill_load for
+how overcommitment threshold is defined.
+
+*** 7.13 sched_static_cpu_pwr_cost
+
+Default value: 0
+
+Appears at /sys/devices/system/cpu/cpu<x>/sched_static_cpu_pwr_cost
+
+This is the power cost associated with bringing an idle CPU out of low power
+mode. It ignores the actual C-state that a CPU may be in and assumes the
+worst case power cost of the highest C-state. It is means of biasing task
+placement away from idle CPUs when necessary. It can be defined per CPU,
+however, a more appropriate usage to define the same value for every CPU
+within a cluster and possibly have differing value between clusters as
+needed.
+
+
+*** 7.14 sched_static_cluster_pwr_cost
+
+Default value: 0
+
+Appears at /sys/devices/system/cpu/cpu<x>/sched_static_cluster_pwr_cost
+
+This is the power cost associated with bringing an idle cluster out of low
+power mode. It ignores the actual D-state that a cluster may be in and assumes
+the worst case power cost of the highest D-state. It is means of biasing task
+placement away from idle clusters when necessary.
+
+*** 7.15 sched_restrict_cluster_spill
+
+Default value: 0
+
+Appears at /proc/sys/kernel/sched_restrict_cluster_spill
+
+This tunable can be used to restrict tasks spilling to the higher capacity
+(higher power) cluster. When this tunable is enabled,
+
+- Restrict the higher capacity cluster pulling tasks from the lower capacity
+cluster in the load balance path. The restriction is lifted if all of the CPUS
+in the lower capacity cluster are above spill. The power cost is used to break
+the ties if the capacity of clusters are same for applying this restriction.
+
+- The current CPU selection algorithm for RT tasks looks for the least loaded
+CPU across all clusters. When this tunable is enabled, the RT tasks are
+restricted to the lowest possible power cluster.
+
+
+*** 7.16 sched_downmigrate
+
+Appears at: /proc/sys/kernel/sched_downmigrate
+
+Default value: 60
+
+This tunable is a percentage. It exists to control hysteresis. Lets say a task
+migrated to a high-performance cpu when it crossed 80% demand on a
+power-efficient cpu. We don't let it come back to a power-efficient cpu until
+its demand *in reference to the power-efficient cpu* drops less than 60%
+(sched_downmigrate).
+
+
+*** 7.17 sched_small_wakee_task_load
+
+Appears at: /proc/sys/kernel/sched_small_wakee_task_load
+
+Default value: 10
+
+This tunable is a percentage.  Configure the maximum demand of small wakee task.
+Sync wakee tasks which have demand less than sched_small_wakee_task_load are
+categorized as small wakee tasks.  Scheduler places small wakee tasks on the
+waker's cluster.
+
+
+*** 7.18 sched_big_waker_task_load
+
+Appears at: /proc/sys/kernel/sched_big_waker_task_load
+
+Default value: 25
+
+This tunable is a percentage.  Configure the minimum demand of big sync waker
+task.  Scheduler places small wakee tasks woken up by big sync waker on the
+waker's cluster.
+
+=========================
+8. HMP SCHEDULER TRACE POINTS
+=========================
+
+*** 8.1 sched_enq_deq_task
+
+Logged when a task is either enqueued or dequeued on a CPU's run queue.
+
+  <idle>-0     [004] d.h4 12700.711665: sched_enq_deq_task: cpu=4 enqueue comm=powertop pid=13227 prio=120 nr_running=1 cpu_load=0 rt_nr_running=0 affine=ff demand=13364423
+
+- cpu: the CPU that the task is being enqueued on to or dequeued off of
+- enqueue/dequeue: whether this was an enqueue or dequeue event
+- comm: name of task
+- pid: PID of task
+- prio: priority of task
+- nr_running: number of runnable tasks on this CPU
+- cpu_load: current priority-weighted load on the CPU (note, this is *not*
+  the same as CPU utilization or a metric tracked by PELT/window-based tracking)
+- rt_nr_running: number of real-time processes running on this CPU
+- affine: CPU affinity mask in hex for this task (so ff is a task eligible to
+  run on CPUs 0-7)
+- demand: window-based task demand computed based on selected policy (recent,
+  max, or average) (ns)
+
+*** 8.2 sched_task_load
+
+Logged when selecting the best CPU to run the task (select_best_cpu()).
+
+sched_task_load: 4004 (adbd): demand=698425 boost=0 reason=0 sync=0 need_idle=0 best_cpu=0 latency=103177
+
+- demand: window-based task demand computed based on selected policy (recent,
+  max, or average) (ns)
+- boost: whether boost is in effect
+- reason: reason we are picking a new CPU:
+  0: no migration - selecting a CPU for a wakeup or new task wakeup
+  1: move to big CPU (migration)
+  2: move to little CPU (migration)
+  3: move to low irq load CPU (migration)
+- sync: is the nature synchronous in nature
+- need_idle: is an idle CPU required for this task based on PF_WAKE_UP_IDLE
+- best_cpu: The CPU selected by the select_best_cpu() function for placement
+- latency: The execution time of the function select_best_cpu()
+
+*** 8.3 sched_cpu_load_*
+
+Logged when selecting the best CPU to run a task (select_best_cpu() for fair
+class tasks, find_lowest_rq_hmp() for RT tasks) and load balancing
+(update_sg_lb_stats()).
+
+<idle>-0     [004] d.h3 12700.711541: sched_cpu_load_*: cpu 0 idle 1 nr_run 0 nr_big 0 lsf 1119 capacity 1024 cr_avg 0 irqload 3301121 fcur 729600 fmax 1459200 power_cost 5 cstate 2 temp 38
+
+- cpu: the CPU being described
+- idle: boolean indicating whether the CPU is idle
+- nr_run: number of tasks running on CPU
+- nr_big: number of BIG tasks running on CPU
+- lsf: load scale factor - multiply normalized load by this factor to determine
+  how much load task will exert on CPU
+- capacity: capacity of CPU (based on max possible frequency and efficiency)
+- cr_avg: cumulative runnable average, instantaneous sum of the demand (either
+  PELT or window-based) of all the runnable task on a CPU (ns)
+- irqload: decaying average of irq activity on CPU (ns)
+- fcur: current CPU frequency (Khz)
+- fmax: max CPU frequency (but not maximum _possible_ frequency) (KHz)
+- power_cost: cost of running this CPU at the current frequency
+- cstate: current cstate of CPU
+- temp: current temperature of the CPU
+
+The power_cost value above differs in how it is calculated depending on the
+callsite of this tracepoint. The select_best_cpu() call to this tracepoint
+finds the minimum frequency required to satisfy the existing load on the CPU
+as well as the task being placed, and returns the power cost of that frequency.
+The load balance and real time task placement paths used a fixed frequency
+(highest frequency common to all CPUs for load balancing, minimum
+frequency of the CPU for real time task placement).
+
+*** 8.4 sched_update_task_ravg
+
+Logged when window-based stats are updated for a task. The update may happen
+for a variety of reasons, see section 2.5, "Task Events."
+
+<idle>-0     [004] d.h4 12700.711513: sched_update_task_ravg: wc 12700711473496 ws 12700691772135 delta 19701361 event TASK_WAKE cpu 4 cur_freq 199200 cur_pid 0 task 13227 (powertop) ms 12640648272532 delta 60063200964 demand 13364423 sum 0 irqtime 0 cs 0 ps 495018 cur_window 0 prev_window 0
+
+- wc: wallclock, output of sched_clock(), monotonically increasing time since
+  boot (will roll over in 585 years) (ns)
+- ws: window start, time when the current window started (ns)
+- delta: time since the window started (wc - ws) (ns)
+- event: What event caused this trace event to occur (see section 2.5 for more
+  details)
+- cpu: which CPU the task is running on
+- cur_freq: CPU's current frequency in KHz
+- curr_pid: PID of the current running task (current)
+- task: PID and name of task being updated
+- ms: mark start - timestamp of the beginning of a segment of task activity,
+  either sleeping or runnable/running (ns)
+- delta: time since last event within the window (wc - ms) (ns)
+- demand: task demand computed based on selected policy (recent, max, or
+  average) (ns)
+- sum: the task's run time during current window scaled by frequency and
+  efficiency (ns)
+- irqtime: length of interrupt activity (ns). A non-zero irqtime is seen
+  when an idle cpu handles interrupts, the time for which needs to be
+  accounted as cpu busy time
+- cs: curr_runnable_sum of cpu (ns). See section 6.1 for more details of this
+  counter.
+- ps: prev_runnable_sum of cpu (ns). See section 6.1 for more details of this
+  counter.
+- cur_window: cpu demand of task in its most recently tracked window (ns)
+- prev_window: cpu demand of task in the window prior to the one being tracked
+  by cur_window
+
+*** 8.5 sched_update_history
+
+Logged when update_task_ravg() is accounting task activity into one or
+more windows that have completed. This may occur more than once for a
+single call into update_task_ravg(). A task that ran for 24ms spanning
+four 10ms windows (the last 2ms of window 1, all of windows 2 and 3,
+and the first 2ms of window 4) would result in two calls into
+update_history() from update_task_ravg(). The first call would record activity
+in completed window 1 and second call would record activity for windows 2 and 3
+together (samples will be 2 in second call).
+
+<idle>-0     [004] d.h4 12700.711489: sched_update_history: 13227 (powertop): runtime 13364423 samples 1 event TASK_WAKE demand 13364423 (hist: 13364423 9871252 2236009 6162476 10282078) cpu 4 nr_big 0
+
+- runtime: task cpu demand in recently completed window(s). This value is scaled
+  to max_possible_freq and max_possible_efficiency. This value is pushed into
+  task's demand history array. The number of windows to which runtime applies is
+  provided by samples field.
+- samples: Number of samples (windows), each having value of runtime, that is
+  recorded in task's demand history array.
+- event: What event caused this trace event to occur (see section 2.5 for more
+  details) - PUT_PREV_TASK, PICK_NEXT_TASK, TASK_WAKE, TASK_MIGRATE,
+  TASK_UPDATE
+- demand: task demand computed based on selected policy (recent, max, or
+  average) (ns)
+- hist: last 5 windows of history for the task with the most recent window
+  listed first
+- cpu: CPU the task is associated with
+- nr_big: number of big tasks on the CPU
+
+*** 8.6 sched_reset_all_windows_stats
+
+Logged when key parameters controlling window-based statistics collection are
+changed. This event signifies that all window-based statistics for tasks and
+cpus are being reset. Changes to below attributes result in such a reset:
+
+* sched_ravg_window (See Sec 2)
+* sched_window_stats_policy (See Sec 2.4)
+* sched_ravg_hist_size (See Sec 7.11)
+
+<task>-0     [004] d.h4 12700.711489: sched_reset_all_windows_stats: time_taken 1123 window_start 0 window_size 0 reason POLICY_CHANGE old_val 0 new_val 1
+
+- time_taken: time taken for the reset function to complete (ns)
+- window_start: Beginning of first window following change to window size (ns)
+- window_size: Size of window. Non-zero if window-size is changing (in ticks)
+- reason: Reason for reset of statistics.
+- old_val: Old value of variable, change of which is triggering reset
+- new_val: New value of variable, change of which is triggering reset
+
+*** 8.7 sched_migration_update_sum
+
+Logged when a task is migrating to another cpu.
+
+<task>-0 [000] d..8  5020.404137: sched_migration_update_sum: cpu 0: cs 471278 ps 902463 nt_cs 0 nt_ps 0 pid 2645
+
+- cpu: cpu, away from which or to which, task is migrating
+- cs: curr_runnable_sum of cpu (ns). See Sec 6.1 for more details of this
+  counter.
+- ps: prev_runnable_sum of cpu (ns). See Sec 6.1 for more details of this
+  counter.
+- nt_cs: nt_curr_runnable_sum  of cpu (ns). See Sec 6.1 for more details of
+  this counter.
+- nt_ps: nt_prev_runnable_sum of cpu (ns). See Sec 6.1 for more details of
+  this counter
+- pid: PID of migrating task
+
+*** 8.8 sched_get_busy
+
+Logged when scheduler is returning busy time statistics for a cpu.
+
+<...>-4331  [003] d.s3   313.700108: sched_get_busy: cpu 3 load 19076 new_task_load 0 early 0
+
+
+- cpu: cpu, for which busy time statistic (prev_runnable_sum) is being
+  returned (ns)
+- load: corresponds to prev_runnable_sum (ns), scaled to fmax of cpu
+- new_task_load: corresponds to nt_prev_runnable_sum to fmax of cpu
+- early: A flag indicating whether the scheduler is passing regular load or early detection load
+  0 - regular load
+  1 - early detection load
+
+*** 8.9 sched_freq_alert
+
+Logged when scheduler is alerting cpufreq governor about need to change
+frequency
+
+<task>-0     [004] d.h4 12700.711489: sched_freq_alert: cpu 0 old_load=XXX new_load=YYY
+
+- cpu: cpu in cluster that has highest load (prev_runnable_sum)
+- old_load: cpu busy time last reported to governor. This is load scaled in
+  reference to max_possible_freq and max_possible_efficiency.
+- new_load: recent cpu busy time. This is load scaled in
+  reference to max_possible_freq and max_possible_efficiency.
+
+*** 8.10 sched_set_boost
+
+Logged when boost settings are being changed
+
+<task>-0     [004] d.h4 12700.711489: sched_set_boost: ref_count=1
+
+- ref_count: A non-zero value indicates boost is in effect
diff --git a/Documentation/sync.txt b/Documentation/sync.txt
new file mode 100644
index 0000000..a2d05e7
--- /dev/null
+++ b/Documentation/sync.txt
@@ -0,0 +1,75 @@
+Motivation:
+
+In complicated DMA pipelines such as graphics (multimedia, camera, gpu, display)
+a consumer of a buffer needs to know when the producer has finished producing
+it.  Likewise the producer needs to know when the consumer is finished with the
+buffer so it can reuse it.  A particular buffer may be consumed by multiple
+consumers which will retain the buffer for different amounts of time.  In
+addition, a consumer may consume multiple buffers atomically.
+The sync framework adds an API which allows synchronization between the
+producers and consumers in a generic way while also allowing platforms which
+have shared hardware synchronization primitives to exploit them.
+
+Goals:
+	* provide a generic API for expressing synchronization dependencies
+	* allow drivers to exploit hardware synchronization between hardware
+	  blocks
+	* provide a userspace API that allows a compositor to manage
+	  dependencies.
+	* provide rich telemetry data to allow debugging slowdowns and stalls of
+	   the graphics pipeline.
+
+Objects:
+	* sync_timeline
+	* sync_pt
+	* sync_fence
+
+sync_timeline:
+
+A sync_timeline is an abstract monotonically increasing counter. In general,
+each driver/hardware block context will have one of these.  They can be backed
+by the appropriate hardware or rely on the generic sw_sync implementation.
+Timelines are only ever created through their specific implementations
+(i.e. sw_sync.)
+
+sync_pt:
+
+A sync_pt is an abstract value which marks a point on a sync_timeline. Sync_pts
+have a single timeline parent.  They have 3 states: active, signaled, and error.
+They start in active state and transition, once, to either signaled (when the
+timeline counter advances beyond the sync_pt’s value) or error state.
+
+sync_fence:
+
+Sync_fences are the primary primitives used by drivers to coordinate
+synchronization of their buffers.  They are a collection of sync_pts which may
+or may not have the same timeline parent.  A sync_pt can only exist in one fence
+and the fence's list of sync_pts is immutable once created.  Fences can be
+waited on synchronously or asynchronously.  Two fences can also be merged to
+create a third fence containing a copy of the two fences’ sync_pts.  Fences are
+backed by file descriptors to allow userspace to coordinate the display pipeline
+dependencies.
+
+Use:
+
+A driver implementing sync support should have a work submission function which:
+     * takes a fence argument specifying when to begin work
+     * asynchronously queues that work to kick off when the fence is signaled
+     * returns a fence to indicate when its work will be done.
+     * signals the returned fence once the work is completed.
+
+Consider an imaginary display driver that has the following API:
+/*
+ * assumes buf is ready to be displayed.
+ * blocks until the buffer is on screen.
+ */
+    void display_buffer(struct dma_buf *buf);
+
+The new API will become:
+/*
+ * will display buf when fence is signaled.
+ * returns immediately with a fence that will signal when buf
+ * is no longer displayed.
+ */
+struct sync_fence* display_buffer(struct dma_buf *buf,
+                                 struct sync_fence *fence);
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index ffab8b5..52daff6 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -659,12 +659,14 @@
 perf_event_paranoid:
 
 Controls use of the performance events system by unprivileged
-users (without CAP_SYS_ADMIN).  The default value is 2.
+users (without CAP_SYS_ADMIN).  The default value is 3 if
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT is set, or 2 otherwise.
 
  -1: Allow use of (almost) all events by all users
 >=0: Disallow raw tracepoint access by users without CAP_IOC_LOCK
 >=1: Disallow CPU event access by users without CAP_SYS_ADMIN
 >=2: Disallow kernel profiling by users without CAP_SYS_ADMIN
+>=3: Disallow all event access by users without CAP_SYS_ADMIN
 
 ==============================================================
 
diff --git a/Documentation/trace/events-power.txt b/Documentation/trace/events-power.txt
index 21d514c..4d817d5 100644
--- a/Documentation/trace/events-power.txt
+++ b/Documentation/trace/events-power.txt
@@ -25,6 +25,7 @@
 
 cpu_idle		"state=%lu cpu_id=%lu"
 cpu_frequency		"state=%lu cpu_id=%lu"
+cpu_frequency_limits	"min=%lu max=%lu cpu_id=%lu"
 
 A suspend event is used to indicate the system going in and out of the
 suspend mode:
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 185c39f..e20aacb 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -2102,6 +2102,35 @@
  1)   1.449 us    |             }
 
 
+You can disable the hierarchical function call formatting and instead print a
+flat list of function entry and return events.  This uses the format described
+in the Output Formatting section and respects all the trace options that
+control that formatting.  Hierarchical formatting is the default.
+
+	hierachical: echo nofuncgraph-flat > trace_options
+	flat: echo funcgraph-flat > trace_options
+
+  ie:
+
+  # tracer: function_graph
+  #
+  # entries-in-buffer/entries-written: 68355/68355   #P:2
+  #
+  #                              _-----=> irqs-off
+  #                             / _----=> need-resched
+  #                            | / _---=> hardirq/softirq
+  #                            || / _--=> preempt-depth
+  #                            ||| /     delay
+  #           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION
+  #              | |       |   ||||       |         |
+                sh-1806  [001] d...   198.843443: graph_ent: func=_raw_spin_lock
+                sh-1806  [001] d...   198.843445: graph_ent: func=__raw_spin_lock
+                sh-1806  [001] d..1   198.843447: graph_ret: func=__raw_spin_lock
+                sh-1806  [001] d..1   198.843449: graph_ret: func=_raw_spin_lock
+                sh-1806  [001] d..1   198.843451: graph_ent: func=_raw_spin_unlock_irqrestore
+                sh-1806  [001] d...   198.843453: graph_ret: func=_raw_spin_unlock_irqrestore
+
+
 You might find other useful features for this tracer in the
 following "dynamic ftrace" section such as tracing only specific
 functions or tasks.
diff --git a/Makefile b/Makefile
index 369099d..fb2f21b 100644
--- a/Makefile
+++ b/Makefile
@@ -346,7 +346,7 @@
 # Make variables (CC, etc...)
 AS		= $(CROSS_COMPILE)as
 LD		= $(CROSS_COMPILE)ld
-CC		= $(CROSS_COMPILE)gcc
+REAL_CC		= $(CROSS_COMPILE)gcc
 CPP		= $(CC) -E
 AR		= $(CROSS_COMPILE)ar
 NM		= $(CROSS_COMPILE)nm
@@ -361,6 +361,10 @@
 PYTHON		= python
 CHECK		= sparse
 
+# Use the wrapper for the compiler.  This wrapper scans for new
+# warnings and causes the build to stop upon encountering them
+CC		= $(srctree)/scripts/gcc-wrapper.py $(REAL_CC)
+
 CHECKFLAGS     := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
 		  -Wbitwise -Wno-return-void $(CF)
 NOSTDINC_FLAGS  =
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b5d529f..00be82f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1836,6 +1836,15 @@
 	help
 	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM.
 
+config ARM_FLUSH_CONSOLE_ON_RESTART
+	bool "Force flush the console on restart"
+	help
+	  If the console is locked while the system is rebooted, the messages
+	  in the temporary logbuffer would not have propogated to all the
+	  console drivers. This option forces the console lock to be
+	  released if it failed to be acquired, which will cause all the
+	  pending messages to be flushed.
+
 endmenu
 
 menu "Boot options"
@@ -1864,6 +1873,21 @@
 	  This was deprecated in 2001 and announced to live on for 5 years.
 	  Some old boot loaders still use this way.
 
+config BUILD_ARM_APPENDED_DTB_IMAGE
+	bool "Build a concatenated zImage/dtb by default"
+	depends on OF
+	help
+	  Enabling this option will cause a concatenated zImage and list of
+	  DTBs to be built by default (instead of a standalone zImage.)
+	  The image will built in arch/arm/boot/zImage-dtb
+
+config BUILD_ARM_APPENDED_DTB_IMAGE_NAMES
+	string "Default dtb names"
+	depends on BUILD_ARM_APPENDED_DTB_IMAGE
+	help
+	  Space separated list of names of dtbs to append when
+	  building a concatenated zImage-dtb.
+
 # Compressed boot loader in ROM.  Yes, we really want to ask about
 # TEXT and BSS so we preserve their values in the config files.
 config ZBOOT_ROM_TEXT
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index d83f7c3..17dcd94 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1723,6 +1723,14 @@
 	  kernel low-level debugging functions. Add earlyprintk to your
 	  kernel parameters to enable this console.
 
+config EARLY_PRINTK_DIRECT
+	bool "Early printk direct"
+	depends on DEBUG_LL
+	help
+	  Say Y here if you want to have an early console using the
+	  kernel low-level debugging functions and EARLY_PRINTK is
+	  not early enough.
+
 config ARM_KPROBES_TEST
 	tristate "Kprobes test module"
 	depends on KPROBES && MODULES
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 6be9ee1..b53a7b4 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -298,6 +298,8 @@
 # Default target when executing plain make
 ifeq ($(CONFIG_XIP_KERNEL),y)
 KBUILD_IMAGE := xipImage
+else ifeq ($(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE := zImage-dtb
 else
 KBUILD_IMAGE := zImage
 endif
@@ -349,6 +351,9 @@
 	$(Q)$(MAKE) $(build)=arch/arm/vdso $@
 endif
 
+zImage-dtb: vmlinux scripts dtbs
+	$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
+
 # We use MRPROPER_FILES and CLEAN_FILES now
 archclean:
 	$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm/boot/.gitignore b/arch/arm/boot/.gitignore
index 3c79f85..ad7a025 100644
--- a/arch/arm/boot/.gitignore
+++ b/arch/arm/boot/.gitignore
@@ -4,3 +4,4 @@
 bootpImage
 uImage
 *.dtb
+zImage-dtb
\ No newline at end of file
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 50f8d1b..da75630 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -16,6 +16,7 @@
 ifneq ($(MACHINE),)
 include $(MACHINE)/Makefile.boot
 endif
+include $(srctree)/arch/arm/boot/dts/Makefile
 
 # Note: the following conditions must always be true:
 #   ZRELADDR == virt_to_phys(PAGE_OFFSET + TEXT_OFFSET)
@@ -29,6 +30,14 @@
 
 targets := Image zImage xipImage bootpImage uImage
 
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+
 ifeq ($(CONFIG_XIP_KERNEL),y)
 
 $(obj)/xipImage: vmlinux FORCE
@@ -55,6 +64,10 @@
 $(obj)/zImage:	$(obj)/compressed/vmlinux FORCE
 	$(call if_changed,objcopy)
 
+$(obj)/zImage-dtb:	$(obj)/zImage $(DTB_OBJS) FORCE
+	$(call if_changed,cat)
+	@echo '  Kernel: $@ is ready'
+
 endif
 
 ifneq ($(LOADADDR),)
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index fc6d541..51fc9fb 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -781,6 +781,8 @@
 		bic     r6, r6, #1 << 31        @ 32-bit translation system
 		bic     r6, r6, #(7 << 0) | (1 << 4)	@ use only ttbr0
 		mcrne	p15, 0, r3, c2, c0, 0	@ load page table pointer
+		mcrne	p15, 0, r0, c8, c7, 0	@ flush I,D TLBs
+		mcr	p15, 0, r0, c7, c5, 4	@ ISB
 		mcrne	p15, 0, r1, c3, c0, 0	@ load domain access control
 		mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
 #endif
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index c558ba7..5af3ec1 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -959,5 +959,15 @@
 dtstree		:= $(srctree)/$(src)
 dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts))
 
-always		:= $(dtb-y)
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+
+targets += dtbs dtbs_install
+targets += $(DTB_LIST)
+
+always		:= $(DTB_LIST)
 clean-files	:= *.dtb
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 9353184..ce01364 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -17,3 +17,7 @@
 
 config SHARP_SCOOP
 	bool
+
+config FIQ_GLUE
+	bool
+	select FIQ
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 27f23b1..04aca89 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -4,6 +4,7 @@
 
 obj-y				+= firmware.o
 
+obj-$(CONFIG_FIQ_GLUE)		+= fiq_glue.o fiq_glue_setup.o
 obj-$(CONFIG_ICST)		+= icst.o
 obj-$(CONFIG_SA1111)		+= sa1111.o
 obj-$(CONFIG_DMABOUNCE)		+= dmabounce.o
diff --git a/arch/arm/common/fiq_glue.S b/arch/arm/common/fiq_glue.S
new file mode 100644
index 0000000..24b42ce
--- /dev/null
+++ b/arch/arm/common/fiq_glue.S
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+		.text
+
+		.global fiq_glue_end
+
+		/* fiq stack: r0-r15,cpsr,spsr of interrupted mode */
+
+ENTRY(fiq_glue)
+		/* store pc, cpsr from previous mode, reserve space for spsr */
+		mrs	r12, spsr
+		sub	lr, lr, #4
+		subs	r10, #1
+		bne	nested_fiq
+
+		str	r12, [sp, #-8]!
+		str	lr, [sp, #-4]!
+
+		/* store r8-r14 from previous mode */
+		sub	sp, sp, #(7 * 4)
+		stmia	sp, {r8-r14}^
+		nop
+
+		/* store r0-r7 from previous mode */
+		stmfd	sp!, {r0-r7}
+
+		/* setup func(data,regs) arguments */
+		mov	r0, r9
+		mov	r1, sp
+		mov	r3, r8
+
+		mov	r7, sp
+
+		/* Get sp and lr from non-user modes */
+		and	r4, r12, #MODE_MASK
+		cmp	r4, #USR_MODE
+		beq	fiq_from_usr_mode
+
+		mov	r7, sp
+		orr	r4, r4, #(PSR_I_BIT | PSR_F_BIT)
+		msr	cpsr_c, r4
+		str	sp, [r7, #(4 * 13)]
+		str	lr, [r7, #(4 * 14)]
+		mrs	r5, spsr
+		str	r5, [r7, #(4 * 17)]
+
+		cmp	r4, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+		/* use fiq stack if we reenter this mode */
+		subne	sp, r7, #(4 * 3)
+
+fiq_from_usr_mode:
+		msr	cpsr_c, #(SVC_MODE | PSR_I_BIT | PSR_F_BIT)
+		mov	r2, sp
+		sub	sp, r7, #12
+		stmfd	sp!, {r2, ip, lr}
+		/* call func(data,regs) */
+		blx	r3
+		ldmfd	sp, {r2, ip, lr}
+		mov	sp, r2
+
+		/* restore/discard saved state */
+		cmp	r4, #USR_MODE
+		beq	fiq_from_usr_mode_exit
+
+		msr	cpsr_c, r4
+		ldr	sp, [r7, #(4 * 13)]
+		ldr	lr, [r7, #(4 * 14)]
+		msr	spsr_cxsf, r5
+
+fiq_from_usr_mode_exit:
+		msr	cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+
+		ldmfd	sp!, {r0-r7}
+		ldr	lr, [sp, #(4 * 7)]
+		ldr	r12, [sp, #(4 * 8)]
+		add	sp, sp, #(10 * 4)
+exit_fiq:
+		msr	spsr_cxsf, r12
+		add	r10, #1
+		cmp	r11, #0
+		moveqs	pc, lr
+		bx	r11 /* jump to custom fiq return function */
+
+nested_fiq:
+		orr	r12, r12, #(PSR_F_BIT)
+		b	exit_fiq
+
+fiq_glue_end:
+
+ENTRY(fiq_glue_setup) /* func, data, sp, smc call number */
+		stmfd		sp!, {r4}
+		mrs		r4, cpsr
+		msr		cpsr_c, #(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)
+		movs		r8, r0
+		mov		r9, r1
+		mov		sp, r2
+		mov		r11, r3
+		moveq		r10, #0
+		movne		r10, #1
+		msr		cpsr_c, r4
+		ldmfd		sp!, {r4}
+		bx		lr
+
diff --git a/arch/arm/common/fiq_glue_setup.c b/arch/arm/common/fiq_glue_setup.c
new file mode 100644
index 0000000..8cb1b61
--- /dev/null
+++ b/arch/arm/common/fiq_glue_setup.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/fiq.h>
+#include <asm/fiq_glue.h>
+
+extern unsigned char fiq_glue, fiq_glue_end;
+extern void fiq_glue_setup(void *func, void *data, void *sp,
+			   fiq_return_handler_t fiq_return_handler);
+
+static struct fiq_handler fiq_debbuger_fiq_handler = {
+	.name = "fiq_glue",
+};
+DEFINE_PER_CPU(void *, fiq_stack);
+static struct fiq_glue_handler *current_handler;
+static fiq_return_handler_t fiq_return_handler;
+static DEFINE_MUTEX(fiq_glue_lock);
+
+static void fiq_glue_setup_helper(void *info)
+{
+	struct fiq_glue_handler *handler = info;
+	fiq_glue_setup(handler->fiq, handler,
+		__get_cpu_var(fiq_stack) + THREAD_START_SP,
+		fiq_return_handler);
+}
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler)
+{
+	int ret;
+	int cpu;
+
+	if (!handler || !handler->fiq)
+		return -EINVAL;
+
+	mutex_lock(&fiq_glue_lock);
+	if (fiq_stack) {
+		ret = -EBUSY;
+		goto err_busy;
+	}
+
+	for_each_possible_cpu(cpu) {
+		void *stack;
+		stack = (void *)__get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
+		if (WARN_ON(!stack)) {
+			ret = -ENOMEM;
+			goto err_alloc_fiq_stack;
+		}
+		per_cpu(fiq_stack, cpu) = stack;
+	}
+
+	ret = claim_fiq(&fiq_debbuger_fiq_handler);
+	if (WARN_ON(ret))
+		goto err_claim_fiq;
+
+	current_handler = handler;
+	on_each_cpu(fiq_glue_setup_helper, handler, true);
+	set_fiq_handler(&fiq_glue, &fiq_glue_end - &fiq_glue);
+
+	mutex_unlock(&fiq_glue_lock);
+	return 0;
+
+err_claim_fiq:
+err_alloc_fiq_stack:
+	for_each_possible_cpu(cpu) {
+		__free_pages(per_cpu(fiq_stack, cpu), THREAD_SIZE_ORDER);
+		per_cpu(fiq_stack, cpu) = NULL;
+	}
+err_busy:
+	mutex_unlock(&fiq_glue_lock);
+	return ret;
+}
+
+static void fiq_glue_update_return_handler(void (*fiq_return)(void))
+{
+	fiq_return_handler = fiq_return;
+	if (current_handler)
+		on_each_cpu(fiq_glue_setup_helper, current_handler, true);
+}
+
+int fiq_glue_set_return_handler(void (*fiq_return)(void))
+{
+	int ret;
+
+	mutex_lock(&fiq_glue_lock);
+	if (fiq_return_handler) {
+		ret = -EBUSY;
+		goto err_busy;
+	}
+	fiq_glue_update_return_handler(fiq_return);
+	ret = 0;
+err_busy:
+	mutex_unlock(&fiq_glue_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(fiq_glue_set_return_handler);
+
+int fiq_glue_clear_return_handler(void (*fiq_return)(void))
+{
+	int ret;
+
+	mutex_lock(&fiq_glue_lock);
+	if (WARN_ON(fiq_return_handler != fiq_return)) {
+		ret = -EINVAL;
+		goto err_inval;
+	}
+	fiq_glue_update_return_handler(NULL);
+	ret = 0;
+err_inval:
+	mutex_unlock(&fiq_glue_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(fiq_glue_clear_return_handler);
+
+/**
+ * fiq_glue_resume - Restore fiqs after suspend or low power idle states
+ *
+ * This must be called before calling local_fiq_enable after returning from a
+ * power state where the fiq mode registers were lost. If a driver provided
+ * a resume hook when it registered the handler it will be called.
+ */
+
+void fiq_glue_resume(void)
+{
+	if (!current_handler)
+		return;
+	fiq_glue_setup(current_handler->fiq, current_handler,
+		__get_cpu_var(fiq_stack) + THREAD_START_SP,
+		fiq_return_handler);
+	if (current_handler->resume)
+		current_handler->resume(current_handler);
+}
+
diff --git a/arch/arm/include/asm/fiq_glue.h b/arch/arm/include/asm/fiq_glue.h
new file mode 100644
index 0000000..a9e244f9
--- /dev/null
+++ b/arch/arm/include/asm/fiq_glue.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_FIQ_GLUE_H
+#define __ASM_FIQ_GLUE_H
+
+struct fiq_glue_handler {
+	void (*fiq)(struct fiq_glue_handler *h, void *regs, void *svc_sp);
+	void (*resume)(struct fiq_glue_handler *h);
+};
+typedef void (*fiq_return_handler_t)(void);
+
+int fiq_glue_register_handler(struct fiq_glue_handler *handler);
+int fiq_glue_set_return_handler(fiq_return_handler_t fiq_return);
+int fiq_glue_clear_return_handler(fiq_return_handler_t fiq_return);
+
+#ifdef CONFIG_FIQ_GLUE
+void fiq_glue_resume(void);
+#else
+static inline void fiq_glue_resume(void) {}
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/system_misc.h b/arch/arm/include/asm/system_misc.h
index a3d61ad..062c484 100644
--- a/arch/arm/include/asm/system_misc.h
+++ b/arch/arm/include/asm/system_misc.h
@@ -21,6 +21,7 @@
 #define UDBG_BUS	(1 << 4)
 
 extern unsigned int user_debug;
+extern char* (*arch_read_hardware_id)(void);
 
 #endif /* !__ASSEMBLY__ */
 
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 6080082..a2a850b 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -10,7 +10,6 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
-#include <linux/compiler.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
@@ -41,19 +40,10 @@
  * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
  *            (http://gcc.gnu.org/PR8896) and incorrect structure
  *	      initialisation in fs/jffs2/erase.c
- * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
- *	      miscompiles find_get_entry(), and can result in EXT3 and EXT4
- *	      filesystem corruption (possibly other FS too).
  */
-#ifdef __GNUC__
 #if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
 #error Your compiler is too buggy; it is known to miscompile kernels.
-#error    Known good compilers: 3.3, 4.x
-#endif
-#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
-#error Your compiler is too buggy; it is known to miscompile kernels
-#error and result in filesystem corruption and oopses.
-#endif
+#error    Known good compilers: 3.3
 #endif
 
 int main(void)
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 9232cae..f3c6622 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -140,6 +140,8 @@
 
 static int kgdb_brk_fn(struct pt_regs *regs, unsigned int instr)
 {
+	if (user_mode(regs))
+		return -1;
 	kgdb_handle_exception(1, SIGTRAP, 0, regs);
 
 	return 0;
@@ -147,6 +149,8 @@
 
 static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
 {
+	if (user_mode(regs))
+		return -1;
 	compiled_break = 1;
 	kgdb_handle_exception(1, SIGTRAP, 0, regs);
 
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index b942349..4f9e2b5 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1102,7 +1102,7 @@
 	}
 
 	/* Initialize & Reset PMNC: C and P bits */
-	armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
+	armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_P | ARMV7_PMNC_C);
 }
 
 static int armv7_a8_map_event(struct perf_event *event)
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 91d2d5b..c6324b5 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -80,6 +80,7 @@
 
 void arch_cpu_idle_enter(void)
 {
+	idle_notifier_call_chain(IDLE_START);
 	ledtrig_cpu(CPU_LED_IDLE_START);
 #ifdef CONFIG_PL310_ERRATA_769419
 	wmb();
@@ -89,6 +90,78 @@
 void arch_cpu_idle_exit(void)
 {
 	ledtrig_cpu(CPU_LED_IDLE_END);
+	idle_notifier_call_chain(IDLE_END);
+}
+
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+	int	i, j;
+	int	nlines;
+	u32	*p;
+
+	/*
+	 * don't attempt to dump non-kernel addresses or
+	 * values that are probably just small negative numbers
+	 */
+	if (addr < PAGE_OFFSET || addr > -256UL)
+		return;
+
+	printk("\n%s: %#lx:\n", name, addr);
+
+	/*
+	 * round address down to a 32 bit boundary
+	 * and always dump a multiple of 32 bytes
+	 */
+	p = (u32 *)(addr & ~(sizeof(u32) - 1));
+	nbytes += (addr & (sizeof(u32) - 1));
+	nlines = (nbytes + 31) / 32;
+
+
+	for (i = 0; i < nlines; i++) {
+		/*
+		 * just display low 16 bits of address to keep
+		 * each line of the dump < 80 characters
+		 */
+		printk("%04lx ", (unsigned long)p & 0xffff);
+		for (j = 0; j < 8; j++) {
+			u32	data;
+			if (probe_kernel_address(p, data)) {
+				printk(" ********");
+			} else {
+				printk(" %08x", data);
+			}
+			++p;
+		}
+		printk("\n");
+	}
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+	mm_segment_t fs;
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
+	show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
+	show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
+	show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
+	show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
+	show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
+	show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
+	show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
+	show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
+	show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
+	show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
+	show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
+	show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
+	show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
+	show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
+	show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
+	set_fs(fs);
 }
 
 void __show_regs(struct pt_regs *regs)
@@ -182,6 +255,8 @@
 		printk("Control: %08x%s\n", ctrl, buf);
 	}
 #endif
+
+	show_extra_register_data(regs, 128);
 }
 
 void show_regs(struct pt_regs * regs)
diff --git a/arch/arm/kernel/psci_smp.c b/arch/arm/kernel/psci_smp.c
index cb3fcae..29286fb 100644
--- a/arch/arm/kernel/psci_smp.c
+++ b/arch/arm/kernel/psci_smp.c
@@ -98,12 +98,12 @@
 	for (i = 0; i < 10; i++) {
 		err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
 		if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
-			pr_info("CPU%d killed.\n", cpu);
+			pr_debug("CPU%d killed.\n", cpu);
 			return 1;
 		}
 
 		msleep(10);
-		pr_info("Retrying again to check for CPU kill\n");
+		pr_debug("Retrying again to check for CPU kill\n");
 	}
 
 	pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
diff --git a/arch/arm/kernel/reboot.c b/arch/arm/kernel/reboot.c
index 3fa867a..d704df8 100644
--- a/arch/arm/kernel/reboot.c
+++ b/arch/arm/kernel/reboot.c
@@ -6,6 +6,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+#include <linux/console.h>
 #include <linux/cpu.h>
 #include <linux/delay.h>
 #include <linux/reboot.h>
@@ -122,6 +123,31 @@
 		pm_power_off();
 }
 
+#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
+void arm_machine_flush_console(void)
+{
+	printk("\n");
+	pr_emerg("Restarting %s\n", linux_banner);
+	if (console_trylock()) {
+		console_unlock();
+		return;
+	}
+
+	mdelay(50);
+
+	local_irq_disable();
+	if (!console_trylock())
+		pr_emerg("arm_restart: Console was locked! Busting\n");
+	else
+		pr_emerg("arm_restart: Console was locked!\n");
+	console_unlock();
+}
+#else
+void arm_machine_flush_console(void)
+{
+}
+#endif
+
 /*
  * Restart requires that the secondary CPUs stop performing any activity
  * while the primary CPU resets the system. Systems with a single CPU can
@@ -138,6 +164,10 @@
 	local_irq_disable();
 	smp_send_stop();
 
+	/* Flush the console to make sure all the relevant messages make it
+	 * out to the console drivers */
+	arm_machine_flush_console();
+
 	if (arm_pm_restart)
 		arm_pm_restart(reboot_mode, cmd);
 	else
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 34e3f3c..ccf79ca 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -113,6 +113,9 @@
 EXPORT_SYMBOL(elf_hwcap2);
 
 
+char* (*arch_read_hardware_id)(void);
+EXPORT_SYMBOL(arch_read_hardware_id);
+
 #ifdef MULTI_CPU
 struct processor processor __ro_after_init;
 #endif
@@ -1264,7 +1267,10 @@
 		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
 	}
 
-	seq_printf(m, "Hardware\t: %s\n", machine_name);
+	if (!arch_read_hardware_id)
+		seq_printf(m, "Hardware\t: %s\n", machine_name);
+	else
+		seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id());
 	seq_printf(m, "Revision\t: %04x\n", system_rev);
 	seq_printf(m, "Serial\t\t: %s\n", system_serial);
 
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 7dd14e8..b1a3599 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -388,12 +388,12 @@
 	if (smp_ops.smp_secondary_init)
 		smp_ops.smp_secondary_init(cpu);
 
+	smp_store_cpu_info(cpu);
+
 	notify_cpu_starting(cpu);
 
 	calibrate_delay();
 
-	smp_store_cpu_info(cpu);
-
 	/*
 	 * OK, now it's safe to let the boot CPU continue.  Wait for
 	 * the CPU migration code to notice that the CPU is online
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 2465995..11da0f5 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -270,6 +270,11 @@
  *	- end     - virtual end address of region
  */
 ENTRY(v6_dma_flush_range)
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+	sub	r2, r1, r0
+	cmp	r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT
+	bhi	v6_dma_flush_dcache_all
+#endif
 #ifdef CONFIG_DMA_CACHE_RWFO
 	ldrb	r2, [r0]		@ read for ownership
 	strb	r2, [r0]		@ write for ownership
@@ -292,6 +297,18 @@
 	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 	ret	lr
 
+#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT
+v6_dma_flush_dcache_all:
+	mov	r0, #0
+#ifdef HARVARD_CACHE
+	mcr	p15, 0, r0, c7, c14, 0		@ D cache clean+invalidate
+#else
+	mcr	p15, 0, r0, c7, c15, 0		@ Cache clean+invalidate
+#endif
+	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
+	mov	pc, lr
+#endif
+
 /*
  *	dma_map_area(start, size, dir)
  *	- start	- kernel virtual start address
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ab77100..4da3175 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -28,6 +28,7 @@
 #include <linux/vmalloc.h>
 #include <linux/sizes.h>
 #include <linux/cma.h>
+#include <linux/msm_dma_iommu_mapping.h>
 
 #include <asm/memory.h>
 #include <asm/highmem.h>
@@ -1276,8 +1277,8 @@
 					  int coherent_flag)
 {
 	struct page **pages;
-	int count = size >> PAGE_SHIFT;
-	int array_size = count * sizeof(struct page *);
+	size_t count = size >> PAGE_SHIFT;
+	size_t array_size = count * sizeof(struct page *);
 	int i = 0;
 	int order_idx = 0;
 
@@ -2186,7 +2187,7 @@
 	mapping->nr_bitmaps = 1;
 	mapping->extensions = extensions;
 	mapping->base = base;
-	mapping->bits = BITS_PER_BYTE * bitmap_size;
+	mapping->bits = bits;
 
 	spin_lock_init(&mapping->lock);
 
@@ -2298,6 +2299,9 @@
 		return;
 	}
 
+	if (msm_dma_unmap_all_for_dev(dev))
+		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
+
 	iommu_detach_device(mapping->domain, dev);
 	kref_put(&mapping->kref, release_iommu_mapping);
 	to_dma_iommu_mapping(dev) = NULL;
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3a2e678..217ddb2 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -273,10 +273,10 @@
 		local_irq_enable();
 
 	/*
-	 * If we're in an interrupt or have no user
+	 * If we're in an interrupt, or have no irqs, or have no user
 	 * context, we must not take the fault..
 	 */
-	if (faulthandler_disabled() || !mm)
+	if (faulthandler_disabled() || irqs_disabled() || !mm)
 		goto no_context;
 
 	if (user_mode(regs))
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 969ef88..3aff389 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -166,6 +166,10 @@
 config NO_IOPORT_MAP
 	def_bool y if !PCI
 
+config ILLEGAL_POINTER_VALUE
+	hex
+	default 0xdead000000000000
+
 config STACKTRACE_SUPPORT
 	def_bool y
 
@@ -217,6 +221,32 @@
 config SMP
 	def_bool y
 
+config ARM64_DMA_USE_IOMMU
+	bool
+	select ARM_HAS_SG_CHAIN
+	select NEED_SG_DMA_LENGTH
+
+if ARM64_DMA_USE_IOMMU
+
+config ARM64_DMA_IOMMU_ALIGNMENT
+	int "Maximum PAGE_SIZE order of alignment for DMA IOMMU buffers"
+	range 4 9
+	default 8
+	help
+	  DMA mapping framework by default aligns all buffers to the smallest
+	  PAGE_SIZE order which is greater than or equal to the requested buffer
+	  size. This works well for buffers up to a few hundreds kilobytes, but
+	  for larger buffers it just a waste of address space. Drivers which has
+	  relatively small addressing window (like 64Mib) might run out of
+	  virtual space with just a few allocations.
+
+	  With this parameter you can specify the maximum PAGE_SIZE order for
+	  DMA IOMMU buffers. Larger buffers will be aligned only to this
+	  specified order. The order is expressed as a power of two multiplied
+	  by the PAGE_SIZE.
+
+endif
+
 config SWIOTLB
 	def_bool y
 
@@ -790,6 +820,14 @@
 	  If unsure, say Y
 endif
 
+config ARM64_SW_TTBR0_PAN
+	bool "Emulate Priviledged Access Never using TTBR0_EL1 switching"
+	help
+	  Enabling this option prevents the kernel from accessing
+	  user-space memory directly by pointing TTBR0_EL1 to a reserved
+	  zeroed area and reserved ASID. The user access routines
+	  restore the valid TTBR0_EL1 temporarily.
+
 menu "ARMv8.1 architectural features"
 
 config ARM64_HW_AFDBM
@@ -949,6 +987,23 @@
 	  entering them here. As a minimum, you should specify the the
 	  root device (e.g. root=/dev/nfs).
 
+choice
+	prompt "Kernel command line type" if CMDLINE != ""
+	default CMDLINE_FROM_BOOTLOADER
+
+config CMDLINE_FROM_BOOTLOADER
+	bool "Use bootloader kernel arguments if available"
+	help
+	  Uses the command-line options passed by the boot loader. If
+	  the boot loader doesn't provide any, the default kernel command
+	  string provided in CMDLINE will be used.
+
+config CMDLINE_EXTEND
+	bool "Extend bootloader kernel arguments"
+	help
+	  The command-line arguments provided by the boot loader will be
+	  appended to the default kernel command string.
+
 config CMDLINE_FORCE
 	bool "Always use the default kernel command string"
 	help
@@ -956,6 +1011,7 @@
 	  loader passes other arguments to the kernel.
 	  This is useful if you cannot or don't want to change the
 	  command-line options your boot loader passes to the kernel.
+endchoice
 
 config EFI_STUB
 	bool
@@ -988,6 +1044,21 @@
 	  However, even with this option, the resultant kernel should
 	  continue to boot on existing non-UEFI platforms.
 
+config BUILD_ARM64_APPENDED_DTB_IMAGE
+	bool "Build a concatenated Image.gz/dtb by default"
+	depends on OF
+	help
+	  Enabling this option will cause a concatenated Image.gz and list of
+	  DTBs to be built by default (instead of a standalone Image.gz.)
+	  The image will built in arch/arm64/boot/Image.gz-dtb
+
+config BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES
+	string "Default dtb names"
+	depends on BUILD_ARM64_APPENDED_DTB_IMAGE
+	help
+	  Space separated list of names of dtbs to append when
+	  building a concatenated Image.gz-dtb.
+
 endmenu
 
 menu "Userspace binary formats"
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 101794f..2f36b15 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -114,9 +114,19 @@
 config ARCH_QCOM
 	bool "Qualcomm Platforms"
 	select PINCTRL
+	select SOC_BUS
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
+config ARCH_MSMSKUNK
+	bool "Enable Support for Qualcomm MSMSKUNK"
+	depends on ARCH_QCOM
+	select COMMON_CLK_QCOM
+	select QCOM_GDSC
+	help
+	  This enables support for the MSMSKUNK chipset. If you do not
+	  wish to build a kernel that runs on this chipset, say 'N' here.
+
 config ARCH_ROCKCHIP
 	bool "Rockchip Platforms"
 	select ARCH_HAS_RESET_CONTROLLER
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 3635b86..bba8d2c 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -40,6 +40,7 @@
 KBUILD_CFLAGS	+= -mgeneral-regs-only $(lseinstr)
 KBUILD_CFLAGS	+= -fno-asynchronous-unwind-tables
 KBUILD_CFLAGS	+= $(call cc-option, -mpc-relative-literal-loads)
+KBUILD_CFLAGS	+= -fno-pic
 KBUILD_AFLAGS	+= $(lseinstr)
 
 ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
@@ -94,7 +95,12 @@
 core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
 
 # Default target when executing plain make
+ifeq ($(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE),y)
+KBUILD_IMAGE	:= Image.gz-dtb
+else
 KBUILD_IMAGE	:= Image.gz
+endif
+
 KBUILD_DTBS	:= dtbs
 
 all:	$(KBUILD_IMAGE) $(KBUILD_DTBS)
@@ -121,6 +127,9 @@
 dtbs_install:
 	$(Q)$(MAKE) $(dtbinst)=$(boot)/dts
 
+Image-dtb Image.gz-dtb: vmlinux scripts dtbs
+	$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+
 PHONY += vdso_install
 vdso_install:
 	$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
diff --git a/arch/arm64/boot/.gitignore b/arch/arm64/boot/.gitignore
index 8dab0bb..34e3520 100644
--- a/arch/arm64/boot/.gitignore
+++ b/arch/arm64/boot/.gitignore
@@ -1,2 +1,4 @@
 Image
+Image-dtb
 Image.gz
+Image.gz-dtb
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index 1f012c5..718bd0b 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -18,12 +18,23 @@
 
 targets := Image Image.gz
 
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+DTB_OBJS := $(addprefix $(obj)/dts/,$(DTB_LIST))
+else
+DTB_OBJS := $(shell find $(obj)/dts/ -name \*.dtb)
+endif
+
 $(obj)/Image: vmlinux FORCE
 	$(call if_changed,objcopy)
 
 $(obj)/Image.bz2: $(obj)/Image FORCE
 	$(call if_changed,bzip2)
 
+$(obj)/Image-dtb: $(obj)/Image $(DTB_OBJS) FORCE
+	$(call if_changed,cat)
+
 $(obj)/Image.gz: $(obj)/Image FORCE
 	$(call if_changed,gzip)
 
@@ -36,6 +47,9 @@
 $(obj)/Image.lzo: $(obj)/Image FORCE
 	$(call if_changed,lzo)
 
+$(obj)/Image.gz-dtb: $(obj)/Image.gz $(DTB_OBJS) FORCE
+	$(call if_changed,cat)
+
 install:
 	$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
 	$(obj)/Image System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 6684f97..7ad2cf0 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -28,3 +28,17 @@
 dtb-$(CONFIG_OF_ALL_DTBS) := $(patsubst $(dtstree)/%.dts,%.dtb, $(foreach d,$(dts-dirs), $(wildcard $(dtstree)/$(d)/*.dts)))
 
 always		:= $(dtb-y)
+
+targets += dtbs
+
+DTB_NAMES := $(subst $\",,$(CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE_NAMES))
+ifneq ($(DTB_NAMES),)
+DTB_LIST := $(addsuffix .dtb,$(DTB_NAMES))
+else
+DTB_LIST := $(dtb-y)
+endif
+targets += $(DTB_LIST)
+
+dtbs: $(addprefix $(obj)/, $(DTB_LIST))
+
+clean-files := dts/*.dtb *.dtb
diff --git a/arch/arm64/boot/dts/qcom/Makefile b/arch/arm64/boot/dts/qcom/Makefile
index 5dd05de..c5b5060 100644
--- a/arch/arm64/boot/dts/qcom/Makefile
+++ b/arch/arm64/boot/dts/qcom/Makefile
@@ -2,6 +2,9 @@
 dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
 dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
 
+dtb-$(CONFIG_ARCH_QCOM) += msmskunk-sim.dtb
+dtb-$(CONFIG_ARCH_QCOM) += msmskunk-rumi.dtb
+
 always		:= $(dtb-y)
 subdir-y	:= $(dts-dirs)
 clean-files	:= *.dtb
diff --git a/arch/arm64/boot/dts/qcom/msm-arm-smmu-skunk.dtsi b/arch/arm64/boot/dts/qcom/msm-arm-smmu-skunk.dtsi
new file mode 100644
index 0000000..dc60b54
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-arm-smmu-skunk.dtsi
@@ -0,0 +1,209 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+	kgsl_smmu: arm,smmu-kgsl@5040000 {
+		status = "ok";
+		compatible = "qcom,smmu-v2";
+		reg = <0x5040000 0x10000>;
+		#iommu-cells = <1>;
+		qcom,dynamic;
+		#global-interrupts = <2>;
+		interrupts = <GIC_SPI 229 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 231 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 364 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 365 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 366 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 367 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 368 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 369 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 370 IRQ_TYPE_EDGE_RISING>,
+				<GIC_SPI 371 IRQ_TYPE_EDGE_RISING>;
+	};
+
+	apps_smmu: apps-smmu@0x15000000 {
+		compatible = "qcom,qsmmu-v500";
+		reg = <0x15000000 0x80000>;
+		#iommu-cells = <1>;
+		qcom,skip-init;
+		#global-interrupts = <1>;
+		#size-cells = <1>;
+		#address-cells = <1>;
+		ranges;
+		interrupts =	<GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 315 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 316 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 317 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 318 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 319 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 322 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 323 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 324 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 325 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 326 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 327 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 328 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 329 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 330 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 331 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 332 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 333 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 334 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 335 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 336 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 337 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 338 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 343 IRQ_TYPE_LEVEL_HIGH>;
+
+		anoc_1_tbu: anoc_1_tbu@0x150c5000 {
+			status = "disabled";
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150c5000 0x1000>,
+				<0x150c2200 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu1_gdsc>;
+		};
+
+		anoc_2_tbu: anoc_2_tbu@0x150c9000 {
+			status = "disabled";
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150c9000 0x1000>,
+				<0x150c2208 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_aggre_noc_mmu_tbu2_gdsc>;
+		};
+
+		mnoc_hf_0_tbu: mnoc_hf_0_tbu@0x150cd000 {
+			status = "disabled";
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150cd000 0x1000>,
+				<0x150c2210 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc>;
+		};
+
+		mnoc_hf_1_tbu: mnoc_hf_1_tbu@0x150d1000 {
+			status = "disabled";
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150d1000 0x1000>,
+				<0x150c2218 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc>;
+		};
+
+		mnoc_sf_0_tbu: mnoc_sf_0_tbu@0x150d5000 {
+			status = "disabled";
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150d5000 0x1000>,
+				<0x150c2220 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc>;
+		};
+
+		compute_dsp_tbu: compute_dsp_tbu@0x150d9000 {
+			status = "disabled";
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150d9000 0x1000>,
+				<0x150c2228 0x8>;
+			reg-names = "base", "status-reg";
+			/* No GDSC */
+		};
+
+		adsp_tbu: adsp_tbu@0x150dd000 {
+			status = "disabled";
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150dd000 0x1000>,
+				<0x150c2230 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc>;
+		};
+
+		anoc_1_pcie_tbu: anoc_1_pcie_tbu@0x150e1000 {
+			status = "disabled";
+			compatible = "qcom,qsmmuv500-tbu";
+			reg = <0x150e1000 0x1000>,
+				<0x150c2238 0x8>;
+			reg-names = "base", "status-reg";
+			qcom,regulator-names = "vdd";
+			vdd-supply = <&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc>;
+		};
+	};
+
+	iommu_test_device {
+		compatible = "iommu-debug-test";
+		/*
+		 * 42 shouldn't be used by anyone on the mmss_smmu.  We just
+		 * need _something_ here to get this node recognized by the
+		 * SMMU driver. Our test uses ATOS, which doesn't use SIDs
+		 * anyways, so using a dummy value is ok.
+		 */
+		iommus = <&kgsl_smmu 42>;
+	};
+
+	iommu_test_device2 {
+		compatible = "iommu-debug-test";
+		/*
+		 * This SID belongs to PCIE. We can't use a fake SID for
+		 * the apps_smmu device.
+		 */
+		iommus = <&apps_smmu 0x1c03>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msm-gdsc-skunk.dtsi b/arch/arm64/boot/dts/qcom/msm-gdsc-skunk.dtsi
new file mode 100644
index 0000000..64cb626
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msm-gdsc-skunk.dtsi
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	/* GDSCs in Global CC */
+	pcie_0_gdsc: qcom,gdsc@0x16b004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "pcie_0_gdsc";
+		reg = <0x16b004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	pcie_1_gdsc: qcom,gdsc@0x18d004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "pcie_1_gdsc";
+		reg = <0x18d004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	ufs_card_gdsc: qcom,gdsc@0x175004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "ufs_card_gdsc";
+		reg = <0x175004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	ufs_phy_gdsc: qcom,gdsc@0x177004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "ufs_phy_gdsc";
+		reg = <0x177004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	usb30_prim_gdsc: qcom,gdsc@0x10f004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "usb30_prim_gdsc";
+		reg = <0x10f004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	usb30_sec_gdsc: qcom,gdsc@0x110004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "usb30_sec_gdsc";
+		reg = <0x110004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc: qcom,gdsc@0x17d030 {
+		compatible = "qcom,gdsc";
+		regulator-name = "hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc";
+		reg = <0x17d030 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc: qcom,gdsc@0x17d03c {
+		compatible = "qcom,gdsc";
+		regulator-name = "hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc";
+		reg = <0x17d03c 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	hlos1_vote_aggre_noc_mmu_tbu1_gdsc: qcom,gdsc@0x17d034 {
+		compatible = "qcom,gdsc";
+		regulator-name = "hlos1_vote_aggre_noc_mmu_tbu1_gdsc";
+		reg = <0x17d034 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	hlos1_vote_aggre_noc_mmu_tbu2_gdsc: qcom,gdsc@0x17d038 {
+		compatible = "qcom,gdsc";
+		regulator-name = "hlos1_vote_aggre_noc_mmu_tbu2_gdsc";
+		reg = <0x17d038 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc: qcom,gdsc@0x17d040 {
+		compatible = "qcom,gdsc";
+		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc";
+		reg = <0x17d040 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc: qcom,gdsc@0x17d048 {
+		compatible = "qcom,gdsc";
+		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc";
+		reg = <0x17d048 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	hlos1_vote_mmnoc_mmu_tbu_sf_gdsc: qcom,gdsc@0x17d044 {
+		compatible = "qcom,gdsc";
+		regulator-name = "hlos1_vote_mmnoc_mmu_tbu_sf_gdsc";
+		reg = <0x17d044 0x4>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	/* GDSCs in Camera CC */
+	bps_gdsc: qcom,gdsc@0xad06004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "bps_gdsc";
+		reg = <0xad06004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	ife_0_gdsc: qcom,gdsc@0xad09004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "ife_0_gdsc";
+		reg = <0xad09004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	ife_1_gdsc: qcom,gdsc@0xad0a004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "ife_1_gdsc";
+		reg = <0xad0a004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	ipe_0_gdsc: qcom,gdsc@0xad07004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "ipe_0_gdsc";
+		reg = <0xad07004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	ipe_1_gdsc: qcom,gdsc@0xad08004 {
+		compatible = "qcom,gdsc";
+		regulator-name = "ipe_1_gdsc";
+		reg = <0xad08004 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	titan_top_gdsc: qcom,gdsc@0xad0b134 {
+		compatible = "qcom,gdsc";
+		regulator-name = "titan_top_gdsc";
+		reg = <0xad0b134 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	/* GDSCs in Display CC */
+	mdss_core_gdsc: qcom,gdsc@0xaf03000 {
+		compatible = "qcom,gdsc";
+		regulator-name = "mdss_core_gdsc";
+		reg = <0xaf03000 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	/* GDSCs in Graphics CC */
+	gpu_cx_hw_ctrl: syscon@0x5091540 {
+		compatible = "syscon";
+		reg = <0x5091540 0x4>;
+	};
+
+	gpu_cx_gdsc: qcom,gdsc@0x509106c {
+		compatible = "qcom,gdsc";
+		regulator-name = "gpu_cx_gdsc";
+		reg = <0x509106c 0x4>;
+		hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
+		qcom,no-status-check-on-disable;
+		qcom,gds-timeout = <500>;
+		status = "disabled";
+	};
+
+	gpu_gx_domain_addr: syscon@0x5091508 {
+		compatible = "syscon";
+		reg = <0x5091508 0x4>;
+	};
+
+	gpu_gx_sw_reset: syscon@0x5091008 {
+		compatible = "syscon";
+		reg = <0x5091008 0x4>;
+	};
+
+	gpu_gx_gdsc: qcom,gdsc@0x509100c {
+		compatible = "qcom,gdsc";
+		regulator-name = "gpu_gx_gdsc";
+		reg = <0x509100c 0x4>;
+		domain-addr = <&gpu_gx_domain_addr>;
+		sw-reset = <&gpu_gx_sw_reset>;
+		qcom,reset-aon-logic;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	/* GDSCs in Video CC */
+	vcodec0_gdsc: qcom,gdsc@0xab00874 {
+		compatible = "qcom,gdsc";
+		regulator-name = "vcodec0_gdsc";
+		reg = <0xab00874 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	vcodec1_gdsc: qcom,gdsc@0xab008b4 {
+		compatible = "qcom,gdsc";
+		regulator-name = "vcodec1_gdsc";
+		reg = <0xab008b4 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+
+	venus_gdsc: qcom,gdsc@0xab00814 {
+		compatible = "qcom,gdsc";
+		regulator-name = "venus_gdsc";
+		reg = <0xab00814 0x4>;
+		qcom,poll-cfg-gdscr;
+		status = "disabled";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-coresight.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-coresight.dtsi
new file mode 100644
index 0000000..0f7ac44
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-coresight.dtsi
@@ -0,0 +1,198 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+	replicator_qdss: replicator@6046000 {
+		compatible = "arm,coresight-replicator";
+
+		coresight-name = "coresight-replicator";
+
+		ports{
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				replicator_out_tmc_etr: endpoint {
+					remote-endpoint=
+						<&tmc_etr_in_replicator>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				replicator_in_tmc_etf: endpoint {
+					slave-mode;
+					remote-endpoint=
+						<&tmc_etf_out_replicator>;
+				};
+			};
+		};
+	};
+
+	tmc_etr:tmc@6048000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6048000 0x1000>,
+		      <0x6064000 0x15000>;
+		reg-names = "tmc-base", "bam-base";
+
+		arm,buffer-size = <0x400000>;
+
+		coresight-name = "coresight-tmc-etr";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port {
+			tmc_etr_in_replicator: endpoint {
+				slave-mode;
+				remote-endpoint = <&replicator_out_tmc_etr>;
+			};
+		};
+	};
+
+	tmc_etf:tmc@6047000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b961>;
+
+		reg = <0x6047000 0x1000>;
+		reg-names = "tmc-base";
+
+		coresight-name = "coresight-tmc-etf";
+
+		arm,default-sink;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				tmc_etf_out_replicator: endpoint {
+					remote-endpoint =
+						<&replicator_in_tmc_etf>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				tmc_etf_in_funnel_merg: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_merg_out_tmc_etf>;
+				};
+			};
+		};
+
+	};
+
+	stm: stm@6002000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b962>;
+
+		reg = <0x6002000 0x1000>,
+		      <0x16280000 0x180000>;
+		reg-names = "stm-base", "stm-stimulus-base";
+
+		coresight-name = "coresight-stm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		port {
+			stm_out_funnel_in0: endpoint {
+				remote-endpoint = <&funnel_in0_in_stm>;
+			};
+		};
+
+	};
+
+	funnel_in0: funnel@0x6041000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6041000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-in0";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_in0_out_funnel_merg: endpoint {
+					remote-endpoint =
+						<&funnel_merg_in_funnel_in0>;
+				};
+			};
+
+			port@1 {
+				reg = <7>;
+				funnel_in0_in_stm: endpoint {
+					slave-mode;
+					remote-endpoint = <&stm_out_funnel_in0>;
+				};
+			};
+		};
+	};
+
+	funnel_merg:funnel@6045000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6045000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-merg";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_merg_out_tmc_etf: endpoint {
+					remote-endpoint =
+						<&tmc_etf_in_funnel_merg>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_merg_in_funnel_in0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_in0_out_funnel_merg>;
+				};
+			};
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-ion.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-ion.dtsi
new file mode 100644
index 0000000..2579819
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-ion.dtsi
@@ -0,0 +1,53 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	qcom,ion {
+		compatible = "qcom,msm-ion";
+		#address-cells = <1>;
+		#size-cells = <0>;
+
+		system_heap: qcom,ion-heap@25 {
+			reg = <25>;
+			qcom,ion-heap-type = "SYSTEM";
+		};
+
+		qcom,ion-heap@22 { /* ADSP HEAP */
+			reg = <22>;
+			memory-region = <&adsp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@27 { /* QSEECOM HEAP */
+			reg = <27>;
+			memory-region = <&qseecom_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@13 { /* SPSS HEAP */
+			reg = <13>;
+			memory-region = <&sp_mem>;
+			qcom,ion-heap-type = "DMA";
+		};
+
+		qcom,ion-heap@10 { /* SECURE DISPLAY HEAP */
+			reg = <10>;
+			memory-region = <&secure_display_memory>;
+			qcom,ion-heap-type = "HYP_CMA";
+		};
+
+		qcom,ion-heap@9 {
+			reg = <9>;
+			qcom,ion-heap-type = "SYSTEM_SECURE";
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-pinctrl.dtsi
new file mode 100644
index 0000000..84010bd
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-pinctrl.dtsi
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+	tlmm: pinctrl@03400000 {
+		compatible = "qcom,msmskunk-pinctrl";
+		reg = <0x03800000 0xc00000>;
+		interrupts = <0 208 0>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-regulator.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-regulator.dtsi
new file mode 100644
index 0000000..6851992
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-regulator.dtsi
@@ -0,0 +1,388 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+
+/* Stub regulators */
+/ {
+	pmcobalt_s1: regulator-pmcobalt-s1 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s1";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <800000>;
+		regulator-max-microvolt = <800000>;
+	};
+
+	pmcobalt_s2: regulator-pmcobalt-s2 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s2";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <1100000>;
+		regulator-max-microvolt = <1100000>;
+	};
+
+	pmcobalt_s3: regulator-pmcobalt-s3 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s3";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <1352000>;
+		regulator-max-microvolt = <1352000>;
+	};
+
+	pmcobalt_s4: regulator-pmcobalt-s4 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s4";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pmcobalt_s5: regulator-pmcobalt-s5 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s5";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <1904000>;
+		regulator-max-microvolt = <2040000>;
+	};
+
+	/* PMCOBALT S6 = VDD_MX supply */
+	pmcobalt_s6_level: regulator-pmcobalt-s6-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s6_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmcobalt_s6_level_ao: regulator-pmcobalt-s6-level-ao {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s6_level_ao";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmcobalt_s7: regulator-pmcobalt-s7 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s7";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <900000>;
+		regulator-max-microvolt = <1028000>;
+	};
+
+	/* PMCOBALT S9 + S8 = VDD_CX supply */
+	pmcobalt_s9_level: regulator-pmcobalt-s9-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s9_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmcobalt_s9_level_ao: regulator-pmcobalt-s9-level-ao {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_s9_level_ao";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmcobalt_l1: regulator-pmcobalt-l1 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l1";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <880000>;
+		regulator-max-microvolt = <880000>;
+	};
+
+	pmcobalt_l2: regulator-pmcobalt-l2 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l2";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+	};
+
+	pmcobalt_l3: regulator-pmcobalt-l3 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l3";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1000000>;
+		regulator-max-microvolt = <1000000>;
+	};
+
+	/* PMCOBALT L4 = VDD_SSC_MX supply */
+	pmcobalt_l4_level: regulator-pmcobalt-l4-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l4_level";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmcobalt_l5: regulator-pmcobalt-l5 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l5";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <800000>;
+		regulator-max-microvolt = <800000>;
+	};
+
+	pmcobalt_l6: regulator-pmcobalt-l6 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l6";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1856000>;
+		regulator-max-microvolt = <1856000>;
+	};
+
+	pmcobalt_l7: regulator-pmcobalt-l7 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l7";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pmcobalt_l8: regulator-pmcobalt-l8 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l8";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+	};
+
+	pmcobalt_l9: regulator-pmcobalt-l9 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l9";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1808000>;
+		regulator-max-microvolt = <2960000>;
+	};
+
+	pmcobalt_l10: regulator-pmcobalt-l10 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l10";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1808000>;
+		regulator-max-microvolt = <2960000>;
+	};
+
+	pmcobalt_l11: regulator-pmcobalt-l11 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l11";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1000000>;
+		regulator-max-microvolt = <1000000>;
+	};
+
+	pmcobalt_l12: regulator-pmcobalt-l12 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l12";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pmcobalt_l13: regulator-pmcobalt-l13 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l13";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1808000>;
+		regulator-max-microvolt = <2960000>;
+	};
+
+	pmcobalt_l14: regulator-pmcobalt-l14 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l14";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pmcobalt_l15: regulator-pmcobalt-l15 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l15";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+	};
+
+	pmcobalt_l16: regulator-pmcobalt-l16 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l16";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <2704000>;
+		regulator-max-microvolt = <2704000>;
+	};
+
+	pmcobalt_l17: regulator-pmcobalt-l17 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l17";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1304000>;
+		regulator-max-microvolt = <1304000>;
+	};
+
+	pmcobalt_l18: regulator-pmcobalt-l18 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l18";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <2704000>;
+		regulator-max-microvolt = <2704000>;
+	};
+
+	pmcobalt_l19: regulator-pmcobalt-l19 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l19";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <3008000>;
+		regulator-max-microvolt = <3008000>;
+	};
+
+	pmcobalt_l20: regulator-pmcobalt-l20 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l20";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <2960000>;
+		regulator-max-microvolt = <2960000>;
+	};
+
+	pmcobalt_l21: regulator-pmcobalt-l21 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l21";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <2960000>;
+		regulator-max-microvolt = <2960000>;
+	};
+
+	pmcobalt_l22: regulator-pmcobalt-l22 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l22";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <2864000>;
+		regulator-max-microvolt = <2864000>;
+	};
+
+	pmcobalt_l23: regulator-pmcobalt-l23 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l23";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <3312000>;
+		regulator-max-microvolt = <3312000>;
+	};
+
+	pmcobalt_l24: regulator-pmcobalt-l24 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l24";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <3088000>;
+		regulator-max-microvolt = <3088000>;
+	};
+
+	pmcobalt_l25: regulator-pmcobalt-l25 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l25";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <3104000>;
+		regulator-max-microvolt = <3104000>;
+	};
+
+	pmcobalt_l26: regulator-pmcobalt-l26 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l26";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <1200000>;
+		regulator-max-microvolt = <1200000>;
+	};
+
+	/* PMCOBALT L27 = VDD_SSC_CX supply */
+	pmcobalt_l27_level: regulator-pmcobalt-l27-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l27_level";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pmcobalt_l28: regulator-pmcobalt-l28 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_l28";
+		qcom,hpm-min-load = <10000>;
+		regulator-min-microvolt = <3008000>;
+		regulator-max-microvolt = <3008000>;
+	};
+
+	pmcobalt_lvs1: regulator-pmcobalt-lvs1 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_lvs1";
+	};
+
+	pmcobalt_lvs2: regulator-pmcobalt-lvs2 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmcobalt_lvs2";
+	};
+
+	pmicobalt_bob: regulator-pmicobalt-bob {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pmicobalt_bob";
+		regulator-min-microvolt = <3312000>;
+		regulator-max-microvolt = <3600000>;
+	};
+
+	/* PM8005 S1 + S4 = 2 phase VDD_GFX supply */
+	pm8005_s1_level: regulator-pm8005-s1-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm8005_s1_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	/* PM8005 S2 = VDD_MODEM supply */
+	pm8005_s2_level: regulator-pm8005-s2-level {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm8005_s2_level";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <RPMH_REGULATOR_LEVEL_OFF>;
+		regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+	};
+
+	pm8005_s3: regulator-pm8005-s3 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "pm8005_s3";
+		qcom,hpm-min-load = <100000>;
+		regulator-min-microvolt = <600000>;
+		regulator-max-microvolt = <600000>;
+	};
+
+	apc0_pwrcl_vreg: regulator-pwrcl {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "apc0_pwrcl_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <23>;
+	};
+
+	apc0_l3_vreg: regulator-l3 {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "apc0_l3_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <19>;
+	};
+
+	apc1_perfcl_vreg: regulator-perfcl {
+		compatible = "qcom,stub-regulator";
+		regulator-name = "apc1_perfcl_corner";
+		regulator-min-microvolt = <1>;
+		regulator-max-microvolt = <26>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dts b/arch/arm64/boot/dts/qcom/msmskunk-rumi.dts
new file mode 100644
index 0000000..7ccdf71
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-rumi.dts
@@ -0,0 +1,62 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/memreserve/ 0x90000000 0x00000100;
+
+#include "msmskunk.dtsi"
+#include "msmskunk-rumi.dtsi"
+#include "msmskunk-usb.dtsi"
+/ {
+	model = "Qualcomm Technologies, Inc. MSM SKUNK RUMI";
+	compatible = "qcom,msmskunk-rumi", "qcom,msmskunk", "qcom,rumi";
+	qcom,board-id = <15 0>;
+};
+
+&soc {
+	timer {
+		clock-frequency = <1000000>;
+	};
+
+	timer@0x17c90000 {
+		clock-frequency = <1000000>;
+	};
+};
+
+&usb3 {
+	/delete-property/ qcom,usb-dbm;
+	qcom,charging-disabled;
+	dwc3@a600000 {
+		maximum-speed = "high-speed";
+	};
+};
+
+&qusb_phy0 {
+	reg = <0x088e2000 0x4>,
+	      <0x0a720000 0x9500>;
+	reg-names = "qusb_phy_base",
+		"emu_phy_base";
+	qcom,emulation;
+	qcom,emu-init-seq = <0x19 0x1404
+			     0x20 0x1414
+			     0x79 0x1410
+			     0x00 0x1418
+			     0x99 0x1404
+			     0x04 0x1408
+			     0xd9 0x1404>;
+
+	qcom,emu-dcm-reset-seq = <0x5 0x14	/* 0x1 0x14 for E1.2 */
+				  0x100000 0x20
+				  0x0 0x20
+				  0x1a0 0x20>;	/* 0x220 0x20 for E1.2 */
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-rumi.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-rumi.dtsi
new file mode 100644
index 0000000..2558091
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-rumi.dtsi
@@ -0,0 +1,43 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+&ufsphy_mem {
+	compatible = "qcom,ufs-phy-qrbtc-msmskunk";
+
+	vdda-phy-supply = <&pmcobalt_l1>;
+	vdda-pll-supply = <&pmcobalt_l2>;
+	vddp-ref-clk-supply = <&pmcobalt_l26>;
+	vdda-phy-max-microamp = <44000>;
+	vdda-pll-max-microamp = <14600>;
+	vddp-ref-clk-max-microamp = <100>;
+	vddp-ref-clk-always-on;
+
+	status = "ok";
+};
+
+&ufs_mem {
+	limit-tx-hs-gear = <1>;
+	limit-rx-hs-gear = <1>;
+	scsi-cmd-timeout = <300000>;
+
+	vdd-hba-supply = <&ufs_phy_gdsc>;
+	vdd-hba-fixed-regulator;
+	vcc-supply = <&pmcobalt_l20>;
+	vccq2-supply = <&pmcobalt_s4>;
+	vcc-max-microamp = <600000>;
+	vccq2-max-microamp = <600000>;
+
+	qcom,disable-lpm;
+	rpm-level = <0>;
+	spm-level = <0>;
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sim.dts b/arch/arm64/boot/dts/qcom/msmskunk-sim.dts
new file mode 100644
index 0000000..eb95256
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-sim.dts
@@ -0,0 +1,24 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+/memreserve/ 0x90000000 0x00000100;
+
+#include "msmskunk.dtsi"
+#include "msmskunk-sim.dtsi"
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM SKUNK SIM";
+	compatible = "qcom,msmskunk-sim", "qcom,msmskunk", "qcom,sim";
+	qcom,board-id = <16 0>;
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-sim.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-sim.dtsi
new file mode 100644
index 0000000..0f94d812
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-sim.dtsi
@@ -0,0 +1,11 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-smp2p.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-smp2p.dtsi
new file mode 100644
index 0000000..a75b6a7
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-smp2p.dtsi
@@ -0,0 +1,310 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+&soc {
+	qcom,smp2p-modem@1799000c {
+		compatible = "qcom,smp2p";
+		reg = <0x1799000c 0x4>;
+		qcom,remote-pid = <1>;
+		qcom,irq-bitmask = <0x4000>;
+		interrupts = <GIC_SPI 451 IRQ_TYPE_EDGE_RISING>;
+	};
+
+	qcom,smp2p-adsp@1799000c {
+		compatible = "qcom,smp2p";
+		reg = <0x1799000c 0x4>;
+		qcom,remote-pid = <2>;
+		qcom,irq-bitmask = <0x400>;
+		interrupts = <GIC_SPI 158 IRQ_TYPE_EDGE_RISING>;
+	};
+
+	qcom,smp2p-dsps@1799000c {
+		compatible = "qcom,smp2p";
+		reg = <0x1799000c 0x4>;
+		qcom,remote-pid = <3>;
+		qcom,irq-bitmask = <0x4000000>;
+		interrupts = <GIC_SPI 172 IRQ_TYPE_EDGE_RISING>;
+	};
+
+	qcom,smp2p-cdsp@1799000c {
+		compatible = "qcom,smp2p";
+		reg = <0x1799000c 0x4>;
+		qcom,remote-pid = <5>;
+		qcom,irq-bitmask = <0x40>;
+		interrupts = <GIC_SPI 576 IRQ_TYPE_EDGE_RISING>;
+	};
+
+
+	smp2pgpio_smp2p_15_in: qcom,smp2pgpio-smp2p-15-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_in";
+		gpios = <&smp2pgpio_smp2p_15_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_15_out: qcom,smp2pgpio-smp2p-15-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <15>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_15_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_15_out";
+		gpios = <&smp2pgpio_smp2p_15_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_in: qcom,smp2pgpio-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_in";
+		gpios = <&smp2pgpio_smp2p_1_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_1_out: qcom,smp2pgpio-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_1_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_1_out";
+		gpios = <&smp2pgpio_smp2p_1_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_2_in: qcom,smp2pgpio-smp2p-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_2_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_2_in";
+		gpios = <&smp2pgpio_smp2p_2_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_2_out: qcom,smp2pgpio-smp2p-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_2_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_2_out";
+		gpios = <&smp2pgpio_smp2p_2_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_3_in: qcom,smp2pgpio-smp2p-3-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <3>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_3_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_3_in";
+		gpios = <&smp2pgpio_smp2p_3_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_3_out: qcom,smp2pgpio-smp2p-3-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <3>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_3_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_3_out";
+		gpios = <&smp2pgpio_smp2p_3_out 0 0>;
+	};
+
+	smp2pgpio_smp2p_5_in: qcom,smp2pgpio-smp2p-5-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <5>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_5_in {
+		compatible = "qcom,smp2pgpio_test_smp2p_5_in";
+		gpios = <&smp2pgpio_smp2p_5_in 0 0>;
+	};
+
+	smp2pgpio_smp2p_5_out: qcom,smp2pgpio-smp2p-5-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "smp2p";
+		qcom,remote-pid = <5>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio_test_smp2p_5_out {
+		compatible = "qcom,smp2pgpio_test_smp2p_5_out";
+		gpios = <&smp2pgpio_smp2p_5_out 0 0>;
+	};
+
+	smp2pgpio_sleepstate_3_out: qcom,smp2pgpio-sleepstate-gpio-3-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "sleepstate";
+		qcom,remote-pid = <3>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	qcom,smp2pgpio-sleepstate-3-out {
+		compatible = "qcom,smp2pgpio_sleepstate_3_out";
+		gpios = <&smp2pgpio_sleepstate_3_out 0 0>;
+	};
+
+	/* ssr - inbound entry from mss */
+	smp2pgpio_ssr_smp2p_1_in: qcom,smp2pgpio-ssr-smp2p-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to mss */
+	smp2pgpio_ssr_smp2p_1_out: qcom,smp2pgpio-ssr-smp2p-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - inbound entry from lpass */
+	smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <2>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to lpass */
+	smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <2>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - inbound entry from ssc */
+	smp2pgpio_ssr_smp2p_3_in: qcom,smp2pgpio-ssr-smp2p-3-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <3>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to ssc */
+	smp2pgpio_ssr_smp2p_3_out: qcom,smp2pgpio-ssr-smp2p-3-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <3>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - inbound entry from cdsp */
+	smp2pgpio_ssr_smp2p_5_in: qcom,smp2pgpio-ssr-smp2p-5-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "slave-kernel";
+		qcom,remote-pid = <5>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ssr - outbound entry to cdsp */
+	smp2pgpio_ssr_smp2p_5_out: qcom,smp2pgpio-ssr-smp2p-5-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "master-kernel";
+		qcom,remote-pid = <5>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk-usb.dtsi b/arch/arm64/boot/dts/qcom/msmskunk-usb.dtsi
new file mode 100644
index 0000000..8bb398e
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk-usb.dtsi
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/clock/qcom,gcc-skunk.h>
+&soc {
+	usb3: ssusb@a600000 {
+		compatible = "qcom,dwc-usb3-msm";
+		reg = <0x0a600000 0xf8c00>,
+		      <0x0141e000 0x400>;
+		reg-names = "core_base", "ahb2phy_base";
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+
+		interrupts = <0 346 0>, <0 130 0>;
+		interrupt-names = "hs_phy_irq", "pwr_event_irq";
+
+		USB3_GDSC-supply = <&usb30_prim_gdsc>;
+		qcom,usb-dbm = <&dbm_1p5>;
+		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
+
+		clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>,
+			 <&clock_gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
+			 <&clock_gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+			 <&clock_gcc GCC_USB30_PRIM_MOCK_UTMI_CLK>,
+			 <&clock_gcc GCC_USB30_PRIM_SLEEP_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>;
+
+		clock-names = "core_clk", "iface_clk", "bus_aggr_clk",
+			"utmi_clk", "sleep_clk", "cfg_ahb_clk", "xo";
+
+		resets = <&clock_gcc GCC_USB30_PRIM_BCR>;
+		reset-names = "core_reset";
+
+		dwc3@a600000 {
+			compatible = "snps,dwc3";
+			reg = <0x0a600000 0xcd00>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 133 0>;
+			usb-phy = <&qusb_phy0>, <&usb_nop_phy>;
+			tx-fifo-resize;
+			snps,nominal-elastic-buffer;
+			snps,hird_thresh = <0x10>;
+			snps,num-gsi-evt-buffs = <0x3>;
+		};
+	};
+
+	qusb_phy0: qusb@88e2000 {
+		compatible = "qcom,qusb2phy-v2";
+		reg = <0x088e2000 0x400>;
+		reg-names = "qusb_phy_base";
+
+		vdd-supply = <&pmcobalt_l1>;
+		vdda18-supply = <&pmcobalt_l12>;
+		vdda33-supply = <&pmcobalt_l24>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,qusb-phy-init-seq =
+				/* <value reg_offset> */
+					<0x13 0x04
+					0x7c 0x18c
+					0x80 0x2c
+					0x0a 0x184
+					0x00 0x240>;
+		phy_type= "utmi";
+		clocks = <&clock_gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+			 <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>;
+		clock-names = "ref_clk_src", "ref_clk", "cfg_ahb_clk";
+
+		resets = <&clock_gcc GCC_USB_PHY_CFG_AHB2PHY_BCR>;
+		reset-names = "phy_reset";
+
+	};
+
+	dbm_1p5: dbm@a8f8000 {
+		compatible = "qcom,usb-dbm-1p5";
+		reg = <0xa8f8000 0x400>;
+		qcom,reset-ep-after-lpm-resume;
+	};
+
+	usb_nop_phy: usb_nop_phy {
+		compatible = "usb-nop-xceiv";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/msmskunk.dtsi b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
new file mode 100644
index 0000000..e50103d
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/msmskunk.dtsi
@@ -0,0 +1,1252 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "skeleton64.dtsi"
+#include <dt-bindings/clock/qcom,gcc-skunk.h>
+#include <dt-bindings/clock/qcom,camcc-skunk.h>
+#include <dt-bindings/clock/qcom,dispcc-skunk.h>
+#include <dt-bindings/clock/qcom,gpucc-skunk.h>
+#include <dt-bindings/clock/qcom,videocc-skunk.h>
+#include <dt-bindings/regulator/qcom,rpmh-regulator.h>
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/soc/qcom,tcs-mbox.h>
+
+/ {
+	model = "Qualcomm Technologies, Inc. MSM SKUNK";
+	compatible = "qcom,msmskunk";
+	qcom,msm-id = <321 0x0>;
+	interrupt-parent = <&intc>;
+
+	cpus {
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		CPU0: cpu@0 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x0>;
+			enable-method = "spin-table";
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_0>;
+			L2_0: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x20000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+
+				L3_0: l3-cache {
+				      compatible = "arm,arch-cache";
+				      cache-size = <0x200000>;
+				      cache-level = <3>;
+				};
+			};
+		};
+
+		CPU1: cpu@1 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x100>;
+			enable-method = "spin-table";
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_1>;
+			L2_1: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x20000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU2: cpu@2 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x200>;
+			enable-method = "spin-table";
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_2>;
+			L2_2: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x20000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU3: cpu@3 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x300>;
+			enable-method = "spin-table";
+			cache-size = <0x8000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_3>;
+			L2_3: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x20000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU4: cpu@100 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x400>;
+			enable-method = "spin-table";
+			cache-size = <0x20000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_4>;
+			L2_4: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x40000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU5: cpu@101 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x500>;
+			enable-method = "spin-table";
+			cache-size = <0x20000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_5>;
+			L2_5: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x40000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU6: cpu@102 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x600>;
+			enable-method = "spin-table";
+			cache-size = <0x20000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_6>;
+			L2_6: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x40000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+		};
+
+		CPU7: cpu@103 {
+			device_type = "cpu";
+			compatible = "arm,armv8";
+			reg = <0x0 0x700>;
+			enable-method = "spin-table";
+			cache-size = <0x20000>;
+			cpu-release-addr = <0x0 0x90000000>;
+			next-level-cache = <&L2_7>;
+			L2_7: l2-cache {
+			      compatible = "arm,arch-cache";
+			      cache-size = <0x40000>;
+			      cache-level = <2>;
+			      next-level-cache = <&L3_0>;
+			};
+		};
+
+		cpu-map {
+			cluster0 {
+				core0 {
+					cpu = <&CPU0>;
+				};
+
+				core1 {
+					cpu = <&CPU1>;
+				};
+
+				core2 {
+					cpu = <&CPU2>;
+				};
+
+				core3 {
+					cpu = <&CPU3>;
+				};
+			};
+
+			cluster1 {
+				core0 {
+					cpu = <&CPU4>;
+				};
+
+				core1 {
+					cpu = <&CPU5>;
+				};
+
+				core2 {
+					cpu = <&CPU6>;
+				};
+
+				core3 {
+					cpu = <&CPU7>;
+				};
+			};
+		};
+	};
+
+	soc: soc { };
+
+	reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		removed_regions: removed_regions@85800000 {
+			no-map;
+			reg = <0 0x85800000 0 0x3700000>;
+		};
+
+		pil_camera_mem: camera_region@8ab00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8ab00000 0 0x500000>;
+		};
+
+		pil_modem_mem: modem_region@8b000000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x8b000000 0 0x6e00000>;
+		};
+
+		pil_video_mem: pil_video_region@91e00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x91e00000 0 0x500000>;
+		};
+
+		pil_cdsp_mem: cdsp_regions@92300000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x92300000 0 0x800000>;
+		};
+
+		pil_adsp_mem: pil_adsp_region@92b00000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x92b00000 0 0x1a00000>;
+		};
+
+		pil_slpi_mem: pil_slpi_region@94500000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x94500000 0 0xf00000>;
+		};
+
+		pil_spss_mem: spss_region@95400000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x95400000 0 0x700000>;
+		};
+
+		adsp_mem: adsp_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x800000>;
+		};
+
+		qseecom_mem: qseecom_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x1400000>;
+		};
+
+		sp_mem: sp_region {  /* SPSS-HLOS ION shared mem */
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>; /* 32-bit */
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x800000>;
+		};
+
+		secure_display_memory: secure_display_region {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x5c00000>;
+		};
+
+		/* global autoconfigured region for contiguous allocations */
+		linux,cma {
+			compatible = "shared-dma-pool";
+			alloc-ranges = <0 0x00000000 0 0xffffffff>;
+			reusable;
+			alignment = <0 0x400000>;
+			size = <0 0x2000000>;
+			linux,cma-default;
+		};
+	};
+};
+
+#include "msm-gdsc-skunk.dtsi"
+
+&soc {
+	#address-cells = <1>;
+	#size-cells = <1>;
+	ranges = <0 0 0 0xffffffff>;
+	compatible = "simple-bus";
+
+	intc: interrupt-controller@17a00000 {
+		compatible = "arm,gic-v3";
+		#interrupt-cells = <3>;
+		interrupt-controller;
+		#redistributor-regions = <1>;
+		redistributor-stride = <0x0 0x20000>;
+		reg = <0x17a00000 0x10000>,     /* GICD */
+		      <0x17a60000 0x100000>;    /* GICR * 8 */
+		interrupts = <1 9 4>;
+	};
+
+	timer {
+		compatible = "arm,armv8-timer";
+		interrupts = <1 1 0xf08>,
+			     <1 2 0xf08>,
+			     <1 3 0xf08>,
+			     <1 0 0xf08>;
+		clock-frequency = <19200000>;
+	};
+
+	timer@0x17C90000{
+		#address-cells = <1>;
+		#size-cells = <1>;
+		ranges;
+		compatible = "arm,armv7-timer-mem";
+		reg = <0x17C90000 0x1000>;
+		clock-frequency = <19200000>;
+
+		frame@0x17CA0000 {
+			frame-number = <0>;
+			interrupts = <0 8 0x4>,
+				     <0 7 0x4>;
+			reg = <0x17CA0000 0x1000>,
+			      <0x17CB0000 0x1000>;
+		};
+
+		frame@17cc0000 {
+			frame-number = <1>;
+			interrupts = <0 9 0x4>;
+			reg = <0x17cc0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17cd0000 {
+			frame-number = <2>;
+			interrupts = <0 10 0x4>;
+			reg = <0x17cd0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17ce0000 {
+			frame-number = <3>;
+			interrupts = <0 11 0x4>;
+			reg = <0x17ce0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17cf0000 {
+			frame-number = <4>;
+			interrupts = <0 12 0x4>;
+			reg = <0x17cf0000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17d00000 {
+			frame-number = <5>;
+			interrupts = <0 36 0x4>;
+			reg = <0x17d00000 0x1000>;
+			status = "disabled";
+		};
+
+		frame@17d10000 {
+			frame-number = <6>;
+			interrupts = <0 37 0x4>;
+			reg = <0x17d10000 0x1000>;
+			status = "disabled";
+		};
+	};
+
+	clock_gcc: qcom,gcc {
+		compatible = "qcom,dummycc";
+		clock-output-names = "gcc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_videocc: qcom,videocc {
+		compatible = "qcom,dummycc";
+		clock-output-names = "videocc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_camcc: qcom,camcc {
+		compatible = "qcom,dummycc";
+		clock-output-names = "camcc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_dispcc: qcom,dispcc {
+		compatible = "qcom,dummycc";
+		clock-output-names = "dispcc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	clock_gpucc: qcom,gpucc {
+		compatible = "qcom,dummycc";
+		clock-output-names = "gpucc_clocks";
+		#clock-cells = <1>;
+		#reset-cells = <1>;
+	};
+
+	ufsphy_mem: ufsphy@1d87000 {
+		reg = <0x1d87000 0xda8>; /* PHY regs */
+		reg-names = "phy_mem";
+		#phy-cells = <0>;
+
+		/* TODO: add "ref_clk_src" */
+		clock-names = "ref_clk",
+			"ref_aux_clk";
+		clocks = <&clock_gcc GCC_UFS_MEM_CLKREF_CLK>,
+			<&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
+
+		status = "disabled";
+	};
+
+	ufs_mem: ufshc@1d84000 {
+		compatible = "qcom,ufshc";
+		reg = <0x1d84000 0x2500>;
+		interrupts = <0 265 0>;
+		phys = <&ufsphy_mem>;
+		phy-names = "ufsphy";
+
+		lanes-per-direction = <2>;
+
+		/* TODO: add "ref_clk" */
+		clock-names =
+			"core_clk",
+			"bus_aggr_clk",
+			"iface_clk",
+			"core_clk_unipro",
+			"core_clk_ice",
+			"tx_lane0_sync_clk",
+			"rx_lane0_sync_clk",
+			"rx_lane1_sync_clk";
+		clocks =
+			<&clock_gcc GCC_UFS_PHY_AXI_CLK>,
+			<&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
+			<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
+			<&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+			<&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
+			<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
+			<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
+			<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_1_CLK>;
+		freq-table-hz =
+			<50000000 200000000>,
+			<0 0>,
+			<0 0>,
+			<37500000 150000000>,
+			<75000000 300000000>,
+			<0 0>,
+			<0 0>,
+			<0 0>;
+
+		qcom,msm-bus,name = "ufs_mem";
+		qcom,msm-bus,num-cases = <22>;
+		qcom,msm-bus,num-paths = <2>;
+		qcom,msm-bus,vectors-KBps =
+		<95 512 0 0>, <1 650 0 0>,          /* No vote */
+		<95 512 922 0>, <1 650 1000 0>,     /* PWM G1 */
+		<95 512 1844 0>, <1 650 1000 0>,    /* PWM G2 */
+		<95 512 3688 0>, <1 650 1000 0>,    /* PWM G3 */
+		<95 512 7376 0>, <1 650 1000 0>,    /* PWM G4 */
+		<95 512 1844 0>, <1 650 1000 0>,    /* PWM G1 L2 */
+		<95 512 3688 0>, <1 650 1000 0>,    /* PWM G2 L2 */
+		<95 512 7376 0>, <1 650 1000 0>,    /* PWM G3 L2 */
+		<95 512 14752 0>, <1 650 1000 0>,   /* PWM G4 L2 */
+		<95 512 127796 0>, <1 650 1000 0>,  /* HS G1 RA */
+		<95 512 255591 0>, <1 650 1000 0>,  /* HS G2 RA */
+		<95 512 511181 0>, <1 650 1000 0>,  /* HS G3 RA */
+		<95 512 255591 0>, <1 650 1000 0>,  /* HS G1 RA L2 */
+		<95 512 511181 0>, <1 650 1000 0>,  /* HS G2 RA L2 */
+		<95 512 1022362 0>, <1 650 1000 0>, /* HS G3 RA L2 */
+		<95 512 149422 0>, <1 650 1000 0>,  /* HS G1 RB */
+		<95 512 298189 0>, <1 650 1000 0>,  /* HS G2 RB */
+		<95 512 596378 0>, <1 650 1000 0>,  /* HS G3 RB */
+		<95 512 298189 0>, <1 650 1000 0>,  /* HS G1 RB L2 */
+		<95 512 596378 0>, <1 650 1000 0>,  /* HS G2 RB L2 */
+		<95 512 1192756 0>, <1 650 1000 0>, /* HS G3 RB L2 */
+		<95 512 4096000 0>, <1 650 1000 0>; /* Max. bandwidth */
+		qcom,bus-vector-names = "MIN",
+		"PWM_G1_L1", "PWM_G2_L1", "PWM_G3_L1", "PWM_G4_L1",
+		"PWM_G1_L2", "PWM_G2_L2", "PWM_G3_L2", "PWM_G4_L2",
+		"HS_RA_G1_L1", "HS_RA_G2_L1", "HS_RA_G3_L1",
+		"HS_RA_G1_L2", "HS_RA_G2_L2", "HS_RA_G3_L2",
+		"HS_RB_G1_L1", "HS_RB_G2_L1", "HS_RB_G3_L1",
+		"HS_RB_G1_L2", "HS_RB_G2_L2", "HS_RB_G3_L2",
+		"MAX";
+
+		status = "disabled";
+	};
+
+	pil_modem: qcom,mss@4080000 {
+		compatible = "qcom,pil-q6v55-mss";
+		reg = <0x4080000 0x100>,
+		      <0x1f63000 0x008>,
+		      <0x1f65000 0x008>,
+		      <0x1f64000 0x008>,
+		      <0x4180000 0x020>,
+		      <0x00179000 0x004>;
+		reg-names = "qdsp6_base", "halt_q6", "halt_modem",
+			    "halt_nc", "rmb_base", "restart_reg";
+
+		clocks = <&clock_gcc RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_MSS_CFG_AHB_CLK>,
+			 <&clock_gcc GCC_MSS_Q6_MEMNOC_AXI_CLK>,
+			 <&clock_gcc GCC_BOOT_ROM_AHB_CLK>,
+			 <&clock_gcc GCC_MSS_GPLL0_DIV_CLK_SRC>,
+			 <&clock_gcc GCC_MSS_SNOC_AXI_CLK>,
+			 <&clock_gcc GCC_MSS_MFAB_AXIS_CLK>;
+		clock-names = "xo", "iface_clk", "bus_clk",
+			      "mem_clk", "gpll0_mss_clk", "snoc_axi_clk",
+			      "mnoc_axi_clk";
+		qcom,proxy-clock-names = "xo";
+		qcom,active-clock-names = "iface_clk", "bus_clk", "mem_clk",
+					  "gpll0_mss_clk", "snoc_axi_clk",
+					  "mnoc_axi_clk";
+
+		interrupts = <0 266 1>;
+		vdd_cx-supply = <&pmcobalt_s9_level>;
+		vdd_cx-voltage = <RPMH_REGULATOR_LEVEL_MAX>;
+		vdd_mx-supply = <&pmcobalt_s6_level>;
+		vdd_mx-uV = <RPMH_REGULATOR_LEVEL_MAX>;
+		qcom,firmware-name = "modem";
+		qcom,pil-self-auth;
+		qcom,sysmon-id = <0>;
+		qcom,ssctl-instance-id = <0x12>;
+		qcom,override-acc;
+		qcom,qdsp6v65-1-0;
+		status = "ok";
+		memory-region = <&pil_modem_mem>;
+		qcom,mem-protect-id = <0xF>;
+
+		/* GPIO inputs from mss */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_1_in 0 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_1_in 1 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_1_in 2 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_1_in 3 0>;
+		qcom,gpio-shutdown-ack = <&smp2pgpio_ssr_smp2p_1_in 7 0>;
+
+		/* GPIO output to mss */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
+	};
+
+	qcom,lpass@17300000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x17300000 0x00100>;
+		interrupts = <0 162 1>;
+
+		vdd_cx-supply = <&pmcobalt_s9_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_MAX 100000>;
+
+		clocks = <&clock_gcc RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <1>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <423>;
+		qcom,sysmon-id = <1>;
+		status = "ok";
+		qcom,ssctl-instance-id = <0x14>;
+		qcom,firmware-name = "adsp";
+		memory-region = <&pil_adsp_mem>;
+
+		/* GPIO inputs from lpass */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+		/* GPIO output to lpass */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+	};
+
+	qcom,ssc@5c00000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x5c00000 0x4000>;
+		interrupts = <0 494 1>;
+
+		vdd_cx-supply = <&pmcobalt_l27_level>;
+		vdd_px-supply = <&pmcobalt_lvs2>;
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_MAX 0>;
+		qcom,proxy-reg-names = "vdd_cx", "vdd_px";
+		qcom,keep-proxy-regs-on;
+
+		clocks = <&clock_gcc RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <12>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <424>;
+		qcom,sysmon-id = <3>;
+		qcom,ssctl-instance-id = <0x16>;
+		qcom,firmware-name = "slpi";
+		status = "ok";
+		memory-region = <&pil_slpi_mem>;
+
+		/* GPIO inputs from ssc */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_3_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_3_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_3_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_3_in 3 0>;
+
+		/* GPIO output to ssc */
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_3_out 0 0>;
+	};
+
+	eud: qcom,msm-eud@88e0000 {
+		compatible = "qcom,msm-eud";
+		interrupt-names = "eud_irq";
+		interrupts = <GIC_SPI 492 IRQ_TYPE_LEVEL_HIGH>;
+		reg = <0x88e0000 0x2000>;
+		reg-names = "eud_base";
+		status = "ok";
+	};
+
+	qcom,spss@1880000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x188101c 0x4>,
+		      <0x1881024 0x4>,
+		      <0x1881028 0x4>,
+		      <0x188103c 0x4>,
+		      <0x1882014 0x4>;
+		reg-names = "sp2soc_irq_status", "sp2soc_irq_clr",
+			    "sp2soc_irq_mask", "rmb_err", "rmb_err_spare2";
+		interrupts = <0 352 1>;
+
+		vdd_cx-supply = <&pmcobalt_s9_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_MAX 100000>;
+		vdd_mx-supply = <&pmcobalt_s6_level>;
+		vdd_mx-uV = <RPMH_REGULATOR_LEVEL_MAX 100000>;
+
+		clocks = <&clock_gcc RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+		qcom,pil-generic-irq-handler;
+		status = "ok";
+
+		qcom,pas-id = <14>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,firmware-name = "spss";
+		memory-region = <&pil_spss_mem>;
+		qcom,spss-scsr-bits = <24 25>;
+	};
+
+	wdog: qcom,wdt@17980000{
+		compatible = "qcom,msm-watchdog";
+		reg = <0x17980000 0x1000>;
+		reg-names = "wdt-base";
+		interrupts = <0 3 0>, <0 4 0>;
+		qcom,bark-time = <11000>;
+		qcom,pet-time = <10000>;
+		qcom,ipi-ping;
+		qcom,wakeup-enable;
+	};
+
+	qcom,turing@8300000 {
+		compatible = "qcom,pil-tz-generic";
+		reg = <0x8300000 0x100000>;
+		interrupts = <0 578 1>;
+
+		vdd_cx-supply = <&pmcobalt_s9_level>;
+		qcom,proxy-reg-names = "vdd_cx";
+		qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_MAX 100000>;
+
+		clocks = <&clock_gcc RPMH_CXO_CLK>;
+		clock-names = "xo";
+		qcom,proxy-clock-names = "xo";
+
+		qcom,pas-id = <18>;
+		qcom,proxy-timeout-ms = <10000>;
+		qcom,smem-id = <423>;
+		qcom,sysmon-id = <7>;
+		qcom,ssctl-instance-id = <0x17>;
+		qcom,firmware-name = "cdsp";
+		memory-region = <&pil_cdsp_mem>;
+
+		/* GPIO inputs from turing */
+		qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_5_in 0 0>;
+		qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_5_in 2 0>;
+		qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_5_in 1 0>;
+		qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_5_in 3 0>;
+
+		/* GPIO output to turing*/
+		qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_5_out 0 0>;
+		status = "ok";
+	};
+
+	qcom,msm-imem@146bf000 {
+		compatible = "qcom,msm-imem";
+		reg = <0x146bf000 0x1000>;
+		ranges = <0x0 0x146bf000 0x1000>;
+		#address-cells = <1>;
+		#size-cells = <1>;
+
+		mem_dump_table@10 {
+			compatible = "qcom,msm-imem-mem_dump_table";
+			reg = <0x10 8>;
+		};
+
+		pil@94c {
+			compatible = "qcom,msm-imem-pil";
+			reg = <0x94c 200>;
+		};
+	};
+
+	kryo3xx-erp {
+		compatible = "arm,arm64-kryo3xx-cpu-erp";
+		interrupts = <1 6 4>,
+			     <1 7 4>,
+			     <0 34 4>,
+			     <0 35 4>;
+
+		interrupt-names = "l1-l2-faultirq",
+				  "l1-l2-errirq",
+				  "l3-scu-errirq",
+				  "l3-scu-faultirq";
+	};
+
+	qcom,llcc@1300000 {
+		compatible = "qcom,llcc-core", "syscon", "simple-mfd";
+		reg = <0x1300000 0x50000>;
+		reg-names = "llcc_base";
+
+		llcc: qcom,msmskunk-llcc {
+			compatible = "qcom,msmskunk-llcc";
+			#cache-cells = <1>;
+			max-slices = <32>;
+		};
+
+		qcom,llcc-erp {
+			compatible = "qcom,llcc-erp";
+			interrupt-names = "ecc_irq";
+			interrupts = <GIC_SPI 582 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		qcom,llcc-amon {
+			compatible = "qcom,llcc-amon";
+		};
+	};
+
+	qcom,ipc-spinlock@1f40000 {
+		compatible = "qcom,ipc-spinlock-sfpb";
+		reg = <0x1f40000 0x8000>;
+		qcom,num-locks = <8>;
+	};
+
+	qcom,smem@86000000 {
+		compatible = "qcom,smem";
+		reg = <0x86000000 0x200000>,
+			<0x17911008 0x4>,
+			<0x778000 0x7000>,
+			<0x1fd4000 0x8>;
+		reg-names = "smem", "irq-reg-base", "aux-mem1",
+			"smem_targ_info_reg";
+		qcom,mpu-enabled;
+	};
+
+	qcom,glink-mailbox-xprt-spss@1885008 {
+		compatible = "qcom,glink-mailbox-xprt";
+		reg = <0x1885008 0x8>,
+			<0x1885010 0x4>,
+			<0x188501c 0x4>,
+			<0x1886008 0x4>;
+		reg-names = "mbox-loc-addr", "mbox-loc-size", "irq-reg-base",
+				"irq-rx-reset";
+		qcom,irq-mask = <0x1>;
+		interrupts = <0 348 4>;
+		label = "spss";
+		qcom,tx-ring-size = <0x400>;
+		qcom,rx-ring-size = <0x400>;
+	};
+
+	apps_rsc: mailbox@179e0000 {
+		compatible = "qcom,tcs-drv";
+		reg = <0x179e0000 0x100>, <0x179e0d00 0x3000>;
+		interrupts = <0 5 0>;
+		#mbox-cells = <1>;
+		qcom,drv-id = <2>;
+		qcom,tcs-config = <SLEEP_TCS 3>,
+				<WAKE_TCS    3>,
+				<ACTIVE_TCS  2>,
+				<CONTROL_TCS 1>;
+	};
+
+	disp_rsc: mailbox@af20000 {
+		status = "disabled";
+		compatible = "qcom,tcs-drv";
+		reg = <0xaf20000 0x100>, <0xaf21c00 0x3000>;
+		interrupts = <0 129 0>;
+		#mbox-cells = <1>;
+		qcom,drv-id = <0>;
+		qcom,tcs-config = <SLEEP_TCS 1>,
+				<WAKE_TCS    1>,
+				<ACTIVE_TCS  0>,
+				<CONTROL_TCS 1>;
+	};
+
+	system_pm {
+		compatible = "qcom,system-pm";
+		mboxes = <&apps_rsc 0>;
+	};
+
+	qcom,glink-smem-native-xprt-modem@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x1799000c 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x1000>;
+		interrupts = <GIC_SPI 449 IRQ_TYPE_EDGE_RISING>;
+		label = "mpss";
+	};
+
+	qcom,glink-smem-native-xprt-adsp@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x1799000c 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x100>;
+		interrupts = <GIC_SPI 156 IRQ_TYPE_EDGE_RISING>;
+		label = "lpass";
+	};
+
+	qcom,glink-smem-native-xprt-dsps@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x1799000c 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x1000000>;
+		interrupts = <GIC_SPI 170 IRQ_TYPE_EDGE_RISING>;
+		label = "dsps";
+	};
+
+	qcom,glink-smem-native-xprt-cdsp@86000000 {
+		compatible = "qcom,glink-smem-native-xprt";
+		reg = <0x86000000 0x200000>,
+			<0x1799000c 0x4>;
+		reg-names = "smem", "irq-reg-base";
+		qcom,irq-mask = <0x10>;
+		interrupts = <GIC_SPI 574 IRQ_TYPE_EDGE_RISING>;
+		label = "cdsp";
+	};
+
+	glink_mpss: qcom,glink-ssr-modem {
+		compatible = "qcom,glink_ssr";
+		label = "modem";
+		qcom,edge = "mpss";
+		qcom,notify-edges = <&glink_lpass>, <&glink_dsps>,
+				    <&glink_cdsp>, <&glink_spss>;
+		qcom,xprt = "smem";
+	};
+
+	glink_lpass: qcom,glink-ssr-adsp {
+		compatible = "qcom,glink_ssr";
+		label = "adsp";
+		qcom,edge = "lpass";
+		qcom,notify-edges = <&glink_mpss>, <&glink_dsps>, <&glink_cdsp>;
+		qcom,xprt = "smem";
+	};
+
+	glink_dsps: qcom,glink-ssr-dsps {
+		compatible = "qcom,glink_ssr";
+		label = "slpi";
+		qcom,edge = "dsps";
+		qcom,notify-edges = <&glink_mpss>, <&glink_lpass>,
+				    <&glink_cdsp>;
+		qcom,xprt = "smem";
+	};
+
+	glink_cdsp: qcom,glink-ssr-cdsp {
+		compatible = "qcom,glink_ssr";
+		label = "cdsp";
+		qcom,edge = "cdsp";
+		qcom,notify-edges = <&glink_mpss>, <&glink_lpass>,
+				    <&glink_dsps>;
+		qcom,xprt = "smem";
+	};
+
+	glink_spss: qcom,glink-ssr-spss {
+		compatible = "qcom,glink_ssr";
+		label = "spss";
+		qcom,edge = "spss";
+		qcom,notify-edges = <&glink_mpss>;
+		qcom,xprt = "mailbox";
+	};
+
+	qcom,ipc_router {
+		compatible = "qcom,ipc_router";
+		qcom,node-id = <1>;
+	};
+
+	qcom,ipc_router_modem_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "mpss";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,ipc_router_q6_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "lpass";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,ipc_router_dsps_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "dsps";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,ipc_router_cdsp_xprt {
+		compatible = "qcom,ipc_router_glink_xprt";
+		qcom,ch-name = "IPCRTR";
+		qcom,xprt-remote = "cdsp";
+		qcom,glink-xprt = "smem";
+		qcom,xprt-linkid = <1>;
+		qcom,xprt-version = <1>;
+		qcom,fragmented-data;
+	};
+
+	qcom,glink_pkt {
+		compatible = "qcom,glinkpkt";
+
+		qcom,glinkpkt-at-mdm0 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DS";
+			qcom,glinkpkt-dev-name = "at_mdm0";
+		};
+
+		qcom,glinkpkt-loopback_cntl {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "LOCAL_LOOPBACK_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback_ctrl";
+		};
+
+		qcom,glinkpkt-loopback_data {
+			qcom,glinkpkt-transport = "lloop";
+			qcom,glinkpkt-edge = "local";
+			qcom,glinkpkt-ch-name = "glink_pkt_lloop_CLNT";
+			qcom,glinkpkt-dev-name = "glink_pkt_loopback";
+		};
+
+		qcom,glinkpkt-apr-apps2 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "adsp";
+			qcom,glinkpkt-ch-name = "apr_apps2";
+			qcom,glinkpkt-dev-name = "apr_apps2";
+		};
+
+		qcom,glinkpkt-data40-cntl {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA40_CNTL";
+			qcom,glinkpkt-dev-name = "smdcntl8";
+		};
+
+		qcom,glinkpkt-data1 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA1";
+			qcom,glinkpkt-dev-name = "smd7";
+		};
+
+		qcom,glinkpkt-data4 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA4";
+			qcom,glinkpkt-dev-name = "smd8";
+		};
+
+		qcom,glinkpkt-data11 {
+			qcom,glinkpkt-transport = "smem";
+			qcom,glinkpkt-edge = "mpss";
+			qcom,glinkpkt-ch-name = "DATA11";
+			qcom,glinkpkt-dev-name = "smd11";
+		};
+	};
+
+	qcom,msm_gsi {
+		compatible = "qcom,msm_gsi";
+	};
+
+	qcom,rmnet-ipa {
+		compatible = "qcom,rmnet-ipa3";
+		qcom,rmnet-ipa-ssr;
+		qcom,ipa-loaduC;
+		qcom,ipa-advertise-sg-support;
+	};
+
+	ipa_hw: qcom,ipa@01e00000 {
+		compatible = "qcom,ipa";
+		reg = <0x1e00000 0x34000>,
+		      <0x1e04000 0x2c000>;
+		reg-names = "ipa-base", "gsi-base";
+		interrupts =
+			<0 311 0>,
+			<0 432 0>;
+		interrupt-names = "ipa-irq", "gsi-irq";
+		qcom,ipa-hw-ver = <13>; /* IPA core version = IPAv3.5.1 */
+		qcom,ipa-hw-mode = <1>;
+		qcom,ee = <0>;
+		qcom,use-gsi;
+		qcom,use-ipa-tethering-bridge;
+		qcom,modem-cfg-emb-pipe-flt;
+		qcom,ipa-wdi2;
+		qcom,use-64-bit-dma-mask;
+		clock-names = "core_clk";
+		clocks = <&clock_gcc 0xfa685cda>;
+		qcom,msm-bus,name = "ipa";
+		qcom,msm-bus,num-cases = <4>;
+		qcom,msm-bus,num-paths = <3>;
+		qcom,msm-bus,vectors-KBps =
+		/* No vote */
+			<90 512 0 0>,
+			<90 585 0 0>,
+			<1 676 0 0>,
+		/* SVS */
+			<90 512 80000 640000>,
+			<90 585 80000 640000>,
+			<1 676 80000 80000>,
+		/* NOMINAL */
+			<90 512 206000 960000>,
+			<90 585 206000 960000>,
+			<1 676 206000 160000>,
+		/* TURBO */
+			<90 512 206000 3600000>,
+			<90 585 206000 3600000>,
+			<1 676 206000 300000>;
+		qcom,bus-vector-names = "MIN", "SVS", "NOMINAL", "TURBO";
+
+		/* IPA RAM mmap */
+		qcom,ipa-ram-mmap = <
+			0x280	/* ofst_start; */
+			0x0	/* nat_ofst; */
+			0x0	/* nat_size; */
+			0x288	/* v4_flt_hash_ofst; */
+			0x78	/* v4_flt_hash_size; */
+			0x4000	/* v4_flt_hash_size_ddr; */
+			0x308	/* v4_flt_nhash_ofst; */
+			0x78	/* v4_flt_nhash_size; */
+			0x4000	/* v4_flt_nhash_size_ddr; */
+			0x388	/* v6_flt_hash_ofst; */
+			0x78	/* v6_flt_hash_size; */
+			0x4000	/* v6_flt_hash_size_ddr; */
+			0x408	/* v6_flt_nhash_ofst; */
+			0x78	/* v6_flt_nhash_size; */
+			0x4000	/* v6_flt_nhash_size_ddr; */
+			0xf	/* v4_rt_num_index; */
+			0x0	/* v4_modem_rt_index_lo; */
+			0x7	/* v4_modem_rt_index_hi; */
+			0x8	/* v4_apps_rt_index_lo; */
+			0xe	/* v4_apps_rt_index_hi; */
+			0x488	/* v4_rt_hash_ofst; */
+			0x78	/* v4_rt_hash_size; */
+			0x4000	/* v4_rt_hash_size_ddr; */
+			0x508	/* v4_rt_nhash_ofst; */
+			0x78	/* v4_rt_nhash_size; */
+			0x4000	/* v4_rt_nhash_size_ddr; */
+			0xf	/* v6_rt_num_index; */
+			0x0	/* v6_modem_rt_index_lo; */
+			0x7	/* v6_modem_rt_index_hi; */
+			0x8	/* v6_apps_rt_index_lo; */
+			0xe	/* v6_apps_rt_index_hi; */
+			0x588	/* v6_rt_hash_ofst; */
+			0x78	/* v6_rt_hash_size; */
+			0x4000	/* v6_rt_hash_size_ddr; */
+			0x608	/* v6_rt_nhash_ofst; */
+			0x78	/* v6_rt_nhash_size; */
+			0x4000	/* v6_rt_nhash_size_ddr; */
+			0x688	/* modem_hdr_ofst; */
+			0x140	/* modem_hdr_size; */
+			0x7c8	/* apps_hdr_ofst; */
+			0x0	/* apps_hdr_size; */
+			0x800	/* apps_hdr_size_ddr; */
+			0x7d0	/* modem_hdr_proc_ctx_ofst; */
+			0x200	/* modem_hdr_proc_ctx_size; */
+			0x9d0	/* apps_hdr_proc_ctx_ofst; */
+			0x200	/* apps_hdr_proc_ctx_size; */
+			0x0	/* apps_hdr_proc_ctx_size_ddr; */
+			0x0	/* modem_comp_decomp_ofst; diff */
+			0x0	/* modem_comp_decomp_size; diff */
+			0xbd8	/* modem_ofst; */
+			0x1424	/* modem_size; */
+			0x1ffc	/* apps_v4_flt_hash_ofst; */
+			0x0	/* apps_v4_flt_hash_size; */
+			0x1ffc	/* apps_v4_flt_nhash_ofst; */
+			0x0	/* apps_v4_flt_nhash_size; */
+			0x1ffc	/* apps_v6_flt_hash_ofst; */
+			0x0	/* apps_v6_flt_hash_size; */
+			0x1ffc	/* apps_v6_flt_nhash_ofst; */
+			0x0	/* apps_v6_flt_nhash_size; */
+			0x80	/* uc_info_ofst; */
+			0x200	/* uc_info_size; */
+			0x2000	/* end_ofst; */
+			0x1ffc	/* apps_v4_rt_hash_ofst; */
+			0x0	/* apps_v4_rt_hash_size; */
+			0x1ffc	/* apps_v4_rt_nhash_ofst; */
+			0x0	/* apps_v4_rt_nhash_size; */
+			0x1ffc	/* apps_v6_rt_hash_ofst; */
+			0x0	/* apps_v6_rt_hash_size; */
+			0x1ffc	/* apps_v6_rt_nhash_ofst; */
+			0x0	/* apps_v6_rt_nhash_size; */
+		>;
+	};
+};
+
+&pcie_0_gdsc {
+	status = "ok";
+};
+
+&pcie_1_gdsc {
+	status = "ok";
+};
+
+&ufs_card_gdsc {
+	status = "ok";
+};
+
+&ufs_phy_gdsc {
+	status = "ok";
+};
+
+&usb30_prim_gdsc {
+	status = "ok";
+};
+
+&usb30_sec_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_aggre_noc_mmu_audio_tbu_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_aggre_noc_mmu_pcie_tbu_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_aggre_noc_mmu_tbu1_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_aggre_noc_mmu_tbu2_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_mmnoc_mmu_tbu_hf0_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_mmnoc_mmu_tbu_hf1_gdsc {
+	status = "ok";
+};
+
+&hlos1_vote_mmnoc_mmu_tbu_sf_gdsc {
+	status = "ok";
+};
+
+&bps_gdsc {
+	status = "ok";
+};
+
+&ife_0_gdsc {
+	status = "ok";
+};
+
+&ife_1_gdsc {
+	status = "ok";
+};
+
+&ipe_0_gdsc {
+	status = "ok";
+};
+
+&ipe_1_gdsc {
+	status = "ok";
+};
+
+&titan_top_gdsc {
+	status = "ok";
+};
+
+&mdss_core_gdsc {
+	status = "ok";
+};
+
+&gpu_cx_gdsc {
+	status = "ok";
+};
+
+&gpu_gx_gdsc {
+	parent-supply = <&pm8005_s1_level>;
+	status = "ok";
+};
+
+&vcodec0_gdsc {
+	status = "ok";
+};
+
+&vcodec1_gdsc {
+	status = "ok";
+};
+
+&venus_gdsc {
+	status = "ok";
+};
+
+#include "msmskunk-regulator.dtsi"
+#include "msmskunk-coresight.dtsi"
+#include "msm-arm-smmu-skunk.dtsi"
+#include "msmskunk-ion.dtsi"
+#include "msmskunk-smp2p.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/skeleton64.dtsi b/arch/arm64/boot/dts/qcom/skeleton64.dtsi
new file mode 100644
index 0000000..1f8ba28
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/skeleton64.dtsi
@@ -0,0 +1,15 @@
+/*
+ * Skeleton device tree in the 64 bits version; the bare minimum
+ * needed to boot; just include and add a compatible value.  The
+ * bootloader will typically populate the memory node.
+ */
+
+/ {
+	#address-cells = <2>;
+	#size-cells = <2>;
+	cpus { };
+	soc { };
+	chosen { };
+	aliases { };
+	memory { device_type = "memory"; reg = <0 0 0 0>; };
+};
diff --git a/arch/arm64/configs/msmskunk-perf_defconfig b/arch/arm64/configs/msmskunk-perf_defconfig
new file mode 100644
index 0000000..2a3c602
--- /dev/null
+++ b/arch/arm64/configs/msmskunk-perf_defconfig
@@ -0,0 +1,390 @@
+CONFIG_LOCALVERSION="-perf"
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSMSKUNK=y
+CONFIG_PCI=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+# CONFIG_EFI is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+CONFIG_RFKILL=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_SKY2=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_USB_USBNET=y
+CONFIG_WIL6210=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_MSMSKUNK_LLCC=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_PWM=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_INFO=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_IPC_LOGGING=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/msmskunk_defconfig b/arch/arm64/configs/msmskunk_defconfig
new file mode 100644
index 0000000..a00fe99
--- /dev/null
+++ b/arch/arm64/configs/msmskunk_defconfig
@@ -0,0 +1,439 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODULE_FORCE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_FORCE=y
+CONFIG_MODULE_SIG_SHA512=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_MSMSKUNK=y
+CONFIG_PCI=y
+CONFIG_SCHED_MC=y
+CONFIG_NR_CPUS=8
+CONFIG_PREEMPT=y
+CONFIG_HZ_100=y
+CONFIG_CLEANCACHE=y
+CONFIG_CMA=y
+CONFIG_ZSMALLOC=y
+CONFIG_SECCOMP=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+# CONFIG_ARM64_VHE is not set
+CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_DNS_RESOLVER=y
+CONFIG_RMNET_DATA=y
+CONFIG_RMNET_DATA_DEBUG_PKT=y
+CONFIG_BT=y
+CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
+CONFIG_RFKILL=y
+CONFIG_IPC_ROUTER=y
+CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_DMA_CMA=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_CHR_DEV_SCH=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_QCOM=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_TUN=y
+CONFIG_PHYLIB=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_WIL6210=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM=y
+CONFIG_SERIAL_MSM_CONSOLE=y
+CONFIG_HVC_DCC=y
+CONFIG_HW_RANDOM=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_SPI=y
+CONFIG_SPI_QUP=y
+CONFIG_SPI_SPIDEV=y
+CONFIG_SPMI=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_POWER_RESET_XGENE=y
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_MFD_SPMI_PMIC=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_REGULATOR_STUB=y
+CONFIG_MEDIA_SUPPORT=y
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_CONTROLLER=y
+CONFIG_VIDEO_V4L2_SUBDEV_API=y
+CONFIG_VIDEO_ADV_DEBUG=y
+CONFIG_VIDEO_FIXED_MINOR_RANGES=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_FB=y
+CONFIG_FB_VIRTUAL=y
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_DWC3_MSM=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_ISP1760_HOST_ROLE=y
+CONFIG_NOP_USB_XCEIV=y
+CONFIG_USB_MSM_SSPHY_QMP=y
+CONFIG_MSM_QUSB_PHY=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_CONFIGFS=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_DIAG=y
+CONFIG_MMC=y
+CONFIG_MMC_BLOCK_MINORS=32
+CONFIG_MMC_TEST=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_MMC_SDHCI_MSM=y
+CONFIG_EDAC=y
+CONFIG_EDAC_MM_EDAC=y
+CONFIG_EDAC_QCOM_LLCC=y
+CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE=y
+CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMADEVICES=y
+CONFIG_UIO=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ION=y
+CONFIG_ION_MSM=y
+CONFIG_REMOTE_SPINLOCK_MSM=y
+CONFIG_IOMMU_IO_PGTABLE_FAST=y
+CONFIG_ARM_SMMU=y
+CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_TESTS=y
+CONFIG_QCOM_LLCC=y
+CONFIG_QCOM_MSMSKUNK_LLCC=y
+CONFIG_QCOM_SCM=y
+CONFIG_QCOM_EUD=y
+CONFIG_QCOM_WATCHDOG_V2=y
+CONFIG_QCOM_MEMORY_DUMP_V2=y
+CONFIG_QCOM_SECURE_BUFFER=y
+CONFIG_MSM_SMEM=y
+CONFIG_MSM_GLINK=y
+CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
+CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
+CONFIG_TRACER_PKT=y
+CONFIG_QTI_RPMH_API=y
+CONFIG_MSM_SMP2P=y
+CONFIG_MSM_SMP2P_TEST=y
+CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
+CONFIG_MSM_QMI_INTERFACE=y
+CONFIG_MSM_GLINK_PKT=y
+CONFIG_MSM_SUBSYSTEM_RESTART=y
+CONFIG_MSM_PIL=y
+CONFIG_MSM_PIL_SSR_GENERIC=y
+CONFIG_MSM_PIL_MSS_QDSP6V5=y
+CONFIG_EXTCON=y
+CONFIG_IIO=y
+CONFIG_PWM=y
+CONFIG_ARM_GIC_V3_ACL=y
+CONFIG_PHY_XGENE=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
+CONFIG_EXT3_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_EFIVAR_FS=y
+CONFIG_ECRYPT_FS=y
+CONFIG_ECRYPT_FS_MESSAGING=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+CONFIG_PAGE_OWNER=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=4000
+CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_SCHEDSTATS=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LIST=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_FAIL_PAGE_ALLOC=y
+CONFIG_FAULT_INJECTION_DEBUG_FS=y
+CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
+CONFIG_IPC_LOGGING=y
+CONFIG_QCOM_RTB=y
+CONFIG_QCOM_RTB_SEPARATE_CPUS=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_LKDTM=y
+CONFIG_MEMTEST=y
+CONFIG_ARM64_PTDUMP=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
+CONFIG_CORESIGHT_SOURCE_ETM4X=y
+CONFIG_CORESIGHT_QCOM_REPLICATOR=y
+CONFIG_CORESIGHT_STM=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SMACK=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_ANSI_CPRNG=y
+CONFIG_CRYPTO_DEV_QCE=y
+CONFIG_ARM64_CRYPTO=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+CONFIG_CRYPTO_CRC32_ARM64=y
+CONFIG_XZ_DEC=y
+CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index f8ae6d6..5473263 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -146,6 +146,8 @@
 static inline void gic_write_pmr(u32 val)
 {
 	asm volatile("msr_s " __stringify(ICC_PMR_EL1) ", %0" : : "r" ((u64)val));
+	/* As per the architecture specification */
+	mb();
 }
 
 static inline void gic_write_ctlr(u32 val)
@@ -163,6 +165,8 @@
 static inline void gic_write_sgi1r(u64 val)
 {
 	asm volatile("msr_s " __stringify(ICC_SGI1R_EL1) ", %0" : : "r" (val));
+	/* As per the architecture specification */
+	mb();
 }
 
 static inline u32 gic_read_sre(void)
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 28bfe61..359d9d2 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -41,6 +41,15 @@
 	msr	daifclr, #2
 	.endm
 
+	.macro	save_and_disable_irq, flags
+	mrs	\flags, daif
+	msr	daifset, #2
+	.endm
+
+	.macro	restore_irq, flags
+	msr	daif, \flags
+	.endm
+
 /*
  * Enable and disable debug exceptions.
  */
@@ -395,4 +404,24 @@
 	movk	\reg, :abs_g0_nc:\val
 	.endm
 
+/*
+ * Return the current thread_info.
+ */
+	.macro	get_thread_info, rd
+	mrs	\rd, sp_el0
+	.endm
+
+/*
+ * Errata workaround post TTBR0_EL1 update.
+ */
+	.macro	post_ttbr0_update_workaround
+#ifdef CONFIG_CAVIUM_ERRATUM_27456
+alternative_if ARM64_WORKAROUND_CAVIUM_27456
+	ic	iallu
+	dsb	nsh
+	isb
+alternative_else_nop_endif
+#endif
+	.endm
+
 #endif	/* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 2e5fb97..012b95c 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -87,6 +87,12 @@
 extern void __dma_map_area(const void *, size_t, int);
 extern void __dma_unmap_area(const void *, size_t, int);
 extern void __dma_flush_area(const void *, size_t);
+extern void __dma_inv_area(const void *, size_t);
+extern void __dma_clean_area(const void *, size_t);
+
+#define dmac_flush_range(start, end) __dma_flush_area(start, (void *)(end) - (void *)(start))
+#define dmac_inv_range(start, end) __dma_inv_area(start, (void *)(end) - (void *)(start))
+#define dmac_clean_range(start, end) __dma_clean_area(start, (void *)(end) - (void *)(start))
 
 /*
  * Copy user data from/to a page which is mapped into a different
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 0bc0b1de..b3423f5 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -29,6 +29,8 @@
 
 #include <linux/kernel.h>
 
+extern const char *machine_name;
+
 /* CPU feature register tracking */
 enum ftr_type {
 	FTR_EXACT,	/* Use a predefined safe value */
@@ -208,6 +210,12 @@
 	return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
 }
 
+static inline bool system_uses_ttbr0_pan(void)
+{
+	return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+		!cpus_have_cap(ARM64_HAS_PAN);
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 243ef25..ffa5af4 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -17,14 +17,24 @@
 #define __ASM_DEVICE_H
 
 struct dev_archdata {
-	struct dma_map_ops *dma_ops;
+	const struct dma_map_ops *dma_ops;
 #ifdef CONFIG_IOMMU_API
 	void *iommu;			/* private IOMMU data */
 #endif
 	bool dma_coherent;
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+	struct dma_iommu_mapping	*mapping;
+#endif
 };
 
 struct pdev_archdata {
+	u64 dma_mask;
 };
 
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+#define to_dma_iommu_mapping(dev) ((dev)->archdata.mapping)
+#else
+#define to_dma_iommu_mapping(dev) NULL
+#endif
+
 #endif
diff --git a/arch/arm64/include/asm/dma-iommu.h b/arch/arm64/include/asm/dma-iommu.h
new file mode 100644
index 0000000..ab0e5b2
--- /dev/null
+++ b/arch/arm64/include/asm/dma-iommu.h
@@ -0,0 +1,64 @@
+#ifndef ASMARM_DMA_IOMMU_H
+#define ASMARM_DMA_IOMMU_H
+
+#ifdef __KERNEL__
+
+#include <linux/err.h>
+#include <linux/mm_types.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-debug.h>
+#include <linux/kmemcheck.h>
+#include <linux/kref.h>
+#include <linux/dma-mapping-fast.h>
+
+struct dma_iommu_mapping {
+	/* iommu specific data */
+	struct iommu_domain	*domain;
+
+	void			*bitmap;
+	size_t			bits;
+	dma_addr_t		base;
+
+	spinlock_t		lock;
+	struct kref		kref;
+
+	struct dma_fast_smmu_mapping *fast;
+};
+
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
+
+int arm_iommu_attach_device(struct device *dev,
+					struct dma_iommu_mapping *mapping);
+void arm_iommu_detach_device(struct device *dev);
+
+#else  /* !CONFIG_ARM64_DMA_USE_IOMMU */
+
+static inline struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+	return NULL;
+}
+
+static inline void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+}
+
+static inline int arm_iommu_attach_device(struct device *dev,
+			struct dma_iommu_mapping *mapping)
+{
+	return -ENODEV;
+}
+
+static inline void arm_iommu_detach_device(struct device *dev)
+{
+}
+
+#endif	/* CONFIG_ARM64_DMA_USE_IOMMU */
+
+#endif /* __KERNEL__ */
+#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index ccea82c..b89a7f3 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -27,7 +27,7 @@
 #define DMA_ERROR_CODE	(~(dma_addr_t)0)
 extern struct dma_map_ops dummy_dma_ops;
 
-static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *__generic_dma_ops(struct device *dev)
 {
 	if (dev && dev->archdata.dma_ops)
 		return dev->archdata.dma_ops;
@@ -39,7 +39,7 @@
 	return &dummy_dma_ops;
 }
 
-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
 {
 	if (xen_initial_domain())
 		return xen_dma_ops;
@@ -47,6 +47,12 @@
 		return __generic_dma_ops(dev);
 }
 
+static inline void set_dma_ops(struct device *dev,
+				const struct dma_map_ops *dma_ops)
+{
+	dev->archdata.dma_ops = dma_ops;
+}
+
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 			const struct iommu_ops *iommu, bool coherent);
 #define arch_setup_dma_ops	arch_setup_dma_ops
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h
index a9e54aa..3a405dc 100644
--- a/arch/arm64/include/asm/efi.h
+++ b/arch/arm64/include/asm/efi.h
@@ -1,6 +1,7 @@
 #ifndef _ASM_EFI_H
 #define _ASM_EFI_H
 
+#include <asm/cpufeature.h>
 #include <asm/io.h>
 #include <asm/mmu_context.h>
 #include <asm/neon.h>
@@ -75,7 +76,30 @@
 
 static inline void efi_set_pgd(struct mm_struct *mm)
 {
-	switch_mm(NULL, mm, NULL);
+	__switch_mm(mm);
+
+	if (system_uses_ttbr0_pan()) {
+		if (mm != current->active_mm) {
+			/*
+			 * Update the current thread's saved ttbr0 since it is
+			 * restored as part of a return from exception. Set
+			 * the hardware TTBR0_EL1 using cpu_switch_mm()
+			 * directly to enable potential errata workarounds.
+			 */
+			update_saved_ttbr0(current, mm);
+			cpu_switch_mm(mm->pgd, mm);
+		} else {
+			/*
+			 * Defer the switch to the current thread's TTBR0_EL1
+			 * until uaccess_enable(). Restore the current
+			 * thread's saved ttbr0 corresponding to its active_mm
+			 * (if different from init_mm).
+			 */
+			cpu_set_reserved_ttbr0();
+			if (current->active_mm != &init_mm)
+				update_saved_ttbr0(current, current->active_mm);
+		}
+	}
 }
 
 void efi_virtmap_load(void);
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index f2585cd..71dfa3b 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -27,9 +27,9 @@
 #include <asm/sysreg.h>
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg)		\
+do {									\
+	uaccess_enable();						\
 	asm volatile(							\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,		\
-		    CONFIG_ARM64_PAN)					\
 "	prfm	pstl1strm, %2\n"					\
 "1:	ldxr	%w1, %2\n"						\
 	insn "\n"							\
@@ -44,11 +44,11 @@
 "	.popsection\n"							\
 	_ASM_EXTABLE(1b, 4b)						\
 	_ASM_EXTABLE(2b, 4b)						\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,		\
-		    CONFIG_ARM64_PAN)					\
 	: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp)	\
 	: "r" (oparg), "Ir" (-EFAULT)					\
-	: "memory")
+	: "memory");							\
+	uaccess_disable();						\
+} while (0)
 
 static inline int
 futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -118,8 +118,8 @@
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
+	uaccess_enable();
 	asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 "	prfm	pstl1strm, %2\n"
 "1:	ldxr	%w1, %2\n"
 "	sub	%w3, %w1, %w4\n"
@@ -134,10 +134,10 @@
 "	.popsection\n"
 	_ASM_EXTABLE(1b, 4b)
 	_ASM_EXTABLE(2b, 4b)
-ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
 	: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
 	: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
 	: "memory");
+	uaccess_disable();
 
 	*uval = val;
 	return ret;
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 0bba427..5c0c57e 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -31,38 +31,35 @@
 #include <asm/early_ioremap.h>
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
+#include <linux/msm_rtb.h>
 
 #include <xen/xen.h>
 
 /*
  * Generic IO read/write.  These perform native-endian accesses.
+ * that some architectures will want to re-define __raw_{read,write}w.
  */
-#define __raw_writeb __raw_writeb
-static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+static inline void __raw_writeb_no_log(u8 val, volatile void __iomem *addr)
 {
 	asm volatile("strb %w0, [%1]" : : "rZ" (val), "r" (addr));
 }
 
-#define __raw_writew __raw_writew
-static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+static inline void __raw_writew_no_log(u16 val, volatile void __iomem *addr)
 {
 	asm volatile("strh %w0, [%1]" : : "rZ" (val), "r" (addr));
 }
 
-#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static inline void __raw_writel_no_log(u32 val, volatile void __iomem *addr)
 {
 	asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
 }
 
-#define __raw_writeq __raw_writeq
-static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+static inline void __raw_writeq_no_log(u64 val, volatile void __iomem *addr)
 {
 	asm volatile("str %x0, [%1]" : : "rZ" (val), "r" (addr));
 }
 
-#define __raw_readb __raw_readb
-static inline u8 __raw_readb(const volatile void __iomem *addr)
+static inline u8 __raw_readb_no_log(const volatile void __iomem *addr)
 {
 	u8 val;
 	asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
@@ -72,8 +69,7 @@
 	return val;
 }
 
-#define __raw_readw __raw_readw
-static inline u16 __raw_readw(const volatile void __iomem *addr)
+static inline u16 __raw_readw_no_log(const volatile void __iomem *addr)
 {
 	u16 val;
 
@@ -84,8 +80,7 @@
 	return val;
 }
 
-#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static inline u32 __raw_readl_no_log(const volatile void __iomem *addr)
 {
 	u32 val;
 	asm volatile(ALTERNATIVE("ldr %w0, [%1]",
@@ -95,8 +90,7 @@
 	return val;
 }
 
-#define __raw_readq __raw_readq
-static inline u64 __raw_readq(const volatile void __iomem *addr)
+static inline u64 __raw_readq_no_log(const volatile void __iomem *addr)
 {
 	u64 val;
 	asm volatile(ALTERNATIVE("ldr %0, [%1]",
@@ -106,6 +100,46 @@
 	return val;
 }
 
+/*
+ * There may be cases when  clients don't want to support or can't support the
+ * logging, The appropriate functions can be used but clinets should carefully
+ * consider why they can't support the logging
+ */
+
+#define __raw_write_logged(v, a, _t) ({ \
+	int _ret; \
+	volatile void __iomem *_a = (a); \
+	void *_addr = (void __force *)(_a); \
+	_ret = uncached_logk(LOGK_WRITEL, _addr); \
+	ETB_WAYPOINT; \
+	__raw_write##_t##_no_log((v), _a); \
+	if (_ret) \
+		LOG_BARRIER; \
+	})
+
+#define __raw_writeb(v, a)	__raw_write_logged((v), a, b)
+#define __raw_writew(v, a)	__raw_write_logged((v), a, w)
+#define __raw_writel(v, a)	__raw_write_logged((v), a, l)
+#define __raw_writeq(v, a)	__raw_write_logged((v), a, q)
+
+#define __raw_read_logged(a, _l, _t)    ({ \
+	_t __a; \
+	const volatile void __iomem *_a = (const volatile void __iomem *)(a); \
+	void *_addr = (void __force *)(_a); \
+	int _ret; \
+	_ret = uncached_logk(LOGK_READL, _addr); \
+	ETB_WAYPOINT; \
+	__a = __raw_read##_l##_no_log(_a); \
+	if (_ret) \
+		LOG_BARRIER; \
+	__a; \
+	})
+
+#define __raw_readb(a)		__raw_read_logged((a), b, u8)
+#define __raw_readw(a)		__raw_read_logged((a), w, u16)
+#define __raw_readl(a)		__raw_read_logged((a), l, u32)
+#define __raw_readq(a)		__raw_read_logged((a), q, u64)
+
 /* IO barriers */
 #define __iormb()		rmb()
 #define __iowmb()		wmb()
@@ -127,6 +161,16 @@
 #define writel_relaxed(v,c)	((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
 #define writeq_relaxed(v,c)	((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
 
+#define readb_relaxed_no_log(c)	({ u8 __v = __raw_readb_no_log(c); __v; })
+#define readw_relaxed_no_log(c)	({ u16 __v = le16_to_cpu((__force __le16)__raw_readw_no_log(c)); __v; })
+#define readl_relaxed_no_log(c)	({ u32 __v = le32_to_cpu((__force __le32)__raw_readl_no_log(c)); __v; })
+#define readq_relaxed_no_log(c)	({ u64 __v = le64_to_cpu((__force __le64)__raw_readq_no_log(c)); __v; })
+
+#define writeb_relaxed_no_log(v, c)	((void)__raw_writeb_no_log((v), (c)))
+#define writew_relaxed_no_log(v, c)	((void)__raw_writew_no_log((__force u16)cpu_to_le32(v), (c)))
+#define writel_relaxed_no_log(v, c)	((void)__raw_writel_no_log((__force u32)cpu_to_le32(v), (c)))
+#define writeq_relaxed_no_log(v, c)	((void)__raw_writeq_no_log((__force u64)cpu_to_le32(v), (c)))
+
 /*
  * I/O memory access primitives. Reads are ordered relative to any
  * following Normal memory access. Writes are ordered relative to any prior
@@ -142,6 +186,16 @@
 #define writel(v,c)		({ __iowmb(); writel_relaxed((v),(c)); })
 #define writeq(v,c)		({ __iowmb(); writeq_relaxed((v),(c)); })
 
+#define readb_no_log(c)		({ u8  __v = readb_relaxed_no_log(c); __iormb(); __v; })
+#define readw_no_log(c)		({ u16 __v = readw_relaxed_no_log(c); __iormb(); __v; })
+#define readl_no_log(c)		({ u32 __v = readl_relaxed_no_log(c); __iormb(); __v; })
+#define readq_no_log(c)		({ u64 __v = readq_relaxed_no_log(c); __iormb(); __v; })
+
+#define writeb_no_log(v, c)		({ __iowmb(); writeb_relaxed_no_log((v), (c)); })
+#define writew_no_log(v, c)		({ __iowmb(); writew_relaxed_no_log((v), (c)); })
+#define writel_no_log(v, c)		({ __iowmb(); writel_relaxed_no_log((v), (c)); })
+#define writeq_no_log(v, c)		({ __iowmb(); writeq_relaxed_no_log((v), (c)); })
+
 /*
  *  I/O port access primitives.
  */
diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h
index 7e51d1b..7803343 100644
--- a/arch/arm64/include/asm/kernel-pgtable.h
+++ b/arch/arm64/include/asm/kernel-pgtable.h
@@ -19,6 +19,7 @@
 #ifndef __ASM_KERNEL_PGTABLE_H
 #define __ASM_KERNEL_PGTABLE_H
 
+#include <asm/pgtable.h>
 #include <asm/sparsemem.h>
 
 /*
@@ -54,6 +55,12 @@
 #define SWAPPER_DIR_SIZE	(SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
 #define IDMAP_DIR_SIZE		(IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_TTBR0_SIZE	(PAGE_SIZE)
+#else
+#define RESERVED_TTBR0_SIZE	(0)
+#endif
+
 /* Initial memory map size */
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_BLOCK_SHIFT	SECTION_SHIFT
diff --git a/arch/arm64/include/asm/kryo3xx-arm64-edac.h b/arch/arm64/include/asm/kryo3xx-arm64-edac.h
new file mode 100644
index 0000000..cc59dc0
--- /dev/null
+++ b/arch/arm64/include/asm/kryo3xx-arm64-edac.h
@@ -0,0 +1,22 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ASM_KRYO3xx_EDAC_H
+#define ASM_KRYO3xx_EDAC_H
+
+#if defined(CONFIG_EDAC_KRYO3XX_ARM64)
+void kryo3xx_poll_cache_errors(void *info);
+#else
+static inline void kryo3xx_poll_cache_errors(void *info) { }
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index a501853..52a0e43 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -23,6 +23,7 @@
 #include <linux/sched.h>
 
 #include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
 #include <asm/proc-fns.h>
 #include <asm-generic/mm_hooks.h>
 #include <asm/cputype.h>
@@ -103,7 +104,7 @@
 	local_flush_tlb_all();
 	cpu_set_default_tcr_t0sz();
 
-	if (mm != &init_mm)
+	if (mm != &init_mm && !system_uses_ttbr0_pan())
 		cpu_switch_mm(mm->pgd, mm);
 }
 
@@ -163,21 +164,27 @@
 {
 }
 
-/*
- * This is the actual mm switch as far as the scheduler
- * is concerned.  No registers are touched.  We avoid
- * calling the CPU specific function when the mm hasn't
- * actually changed.
- */
-static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
-	  struct task_struct *tsk)
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+				      struct mm_struct *mm)
+{
+	if (system_uses_ttbr0_pan()) {
+		BUG_ON(mm->pgd == swapper_pg_dir);
+		task_thread_info(tsk)->ttbr0 =
+			virt_to_phys(mm->pgd) | ASID(mm) << 48;
+	}
+}
+#else
+static inline void update_saved_ttbr0(struct task_struct *tsk,
+				      struct mm_struct *mm)
+{
+}
+#endif
+
+static inline void __switch_mm(struct mm_struct *next)
 {
 	unsigned int cpu = smp_processor_id();
 
-	if (prev == next)
-		return;
-
 	/*
 	 * init_mm.pgd does not contain any user mappings and it is always
 	 * active for kernel addresses in TTBR1. Just set the reserved TTBR0.
@@ -190,8 +197,24 @@
 	check_and_switch_context(next, cpu);
 }
 
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	  struct task_struct *tsk)
+{
+	if (prev != next)
+		__switch_mm(next);
+
+	/*
+	 * Update the saved TTBR0_EL1 of the scheduled-in task as the previous
+	 * value may have not been initialised yet (activate_mm caller) or the
+	 * ASID has changed since the last run (following the context switch
+	 * of another thread of the same process).
+	 */
+	update_saved_ttbr0(tsk, next);
+}
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
-#define activate_mm(prev,next)	switch_mm(prev, next, NULL)
+#define activate_mm(prev,next)	switch_mm(prev, next, current)
 
 void verify_cpu_asid_bits(void);
 
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index ada08b5..458773a 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -21,6 +21,8 @@
 
 #include <uapi/asm/ptrace.h>
 
+#define _PSR_PAN_BIT		22
+
 /* Current Exception Level values, as contained in CurrentEL */
 #define CurrentEL_EL1		(1 << 2)
 #define CurrentEL_EL2		(2 << 2)
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index cae331d..55082cc 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -127,7 +127,6 @@
 
 	asm volatile(ARM64_LSE_ATOMIC_INSN(
 	/* LL/SC */
-	"	prfm	pstl1strm, %2\n"
 	"1:	ldaxr	%w0, %2\n"
 	"	eor	%w1, %w0, %w0, ror #16\n"
 	"	cbnz	%w1, 2f\n"
diff --git a/arch/arm64/include/asm/system_misc.h b/arch/arm64/include/asm/system_misc.h
index bc81243..b64410c 100644
--- a/arch/arm64/include/asm/system_misc.h
+++ b/arch/arm64/include/asm/system_misc.h
@@ -44,6 +44,7 @@
 extern void __show_regs(struct pt_regs *);
 
 extern void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+extern char* (*arch_read_hardware_id)(void);
 
 #define show_unhandled_signals_ratelimited()				\
 ({									\
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index e9ea5a6..8b2703e 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -47,6 +47,9 @@
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
 	mm_segment_t		addr_limit;	/* address limit */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	u64			ttbr0;		/* saved TTBR0_EL1 */
+#endif
 	struct task_struct	*task;		/* main task structure */
 	int			preempt_count;	/* 0 => preemptable, <0 => bug */
 	int			cpu;		/* cpu */
@@ -122,6 +125,7 @@
 #define TIF_RESTORE_SIGMASK	20
 #define TIF_SINGLESTEP		21
 #define TIF_32BIT		22	/* 32bit process */
+#define TIF_MM_RELEASED		24
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 55d0adb..9e06272 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -18,6 +18,8 @@
 #ifndef __ASM_UACCESS_H
 #define __ASM_UACCESS_H
 
+#ifndef __ASSEMBLY__
+
 /*
  * User space memory access functions
  */
@@ -28,6 +30,7 @@
 
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/ptrace.h>
 #include <asm/sysreg.h>
 #include <asm/errno.h>
@@ -120,6 +123,85 @@
 	"	.popsection\n"
 
 /*
+ * User access enabling/disabling.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void uaccess_ttbr0_disable(void)
+{
+	unsigned long ttbr;
+
+	/* reserved_ttbr0 placed at the end of swapper_pg_dir */
+	ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
+	write_sysreg(ttbr, ttbr0_el1);
+	isb();
+}
+
+static inline void uaccess_ttbr0_enable(void)
+{
+	unsigned long flags;
+
+	/*
+	 * Disable interrupts to avoid preemption between reading the 'ttbr0'
+	 * variable and the MSR. A context switch could trigger an ASID
+	 * roll-over and an update of 'ttbr0'.
+	 */
+	local_irq_save(flags);
+	write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+	isb();
+	local_irq_restore(flags);
+}
+#else
+static inline void uaccess_ttbr0_disable(void)
+{
+}
+
+static inline void uaccess_ttbr0_enable(void)
+{
+}
+#endif
+
+#define __uaccess_disable(alt)						\
+do {									\
+	if (system_uses_ttbr0_pan())					\
+		uaccess_ttbr0_disable();				\
+	else								\
+		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,		\
+				CONFIG_ARM64_PAN));			\
+} while (0)
+
+#define __uaccess_enable(alt)						\
+do {									\
+	if (system_uses_ttbr0_pan())					\
+		uaccess_ttbr0_enable();					\
+	else								\
+		asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,		\
+				CONFIG_ARM64_PAN));			\
+} while (0)
+
+static inline void uaccess_disable(void)
+{
+	__uaccess_disable(ARM64_HAS_PAN);
+}
+
+static inline void uaccess_enable(void)
+{
+	__uaccess_enable(ARM64_HAS_PAN);
+}
+
+/*
+ * These functions are no-ops when UAO is present.
+ */
+static inline void uaccess_disable_not_uao(void)
+{
+	__uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+static inline void uaccess_enable_not_uao(void)
+{
+	__uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
+}
+
+/*
  * The "__xxx" versions of the user access functions do not verify the address
  * space - it must have been done previously with a separate "access_ok()"
  * call.
@@ -146,8 +228,7 @@
 do {									\
 	unsigned long __gu_val;						\
 	__chk_user_ptr(ptr);						\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
-			CONFIG_ARM64_PAN));				\
+	uaccess_enable_not_uao();					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__get_user_asm("ldrb", "ldtrb", "%w", __gu_val, (ptr),  \
@@ -168,9 +249,8 @@
 	default:							\
 		BUILD_BUG();						\
 	}								\
+	uaccess_disable_not_uao();					\
 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
-			CONFIG_ARM64_PAN));				\
 } while (0)
 
 #define __get_user(x, ptr)						\
@@ -215,8 +295,7 @@
 do {									\
 	__typeof__(*(ptr)) __pu_val = (x);				\
 	__chk_user_ptr(ptr);						\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_ALT_PAN_NOT_UAO,\
-			CONFIG_ARM64_PAN));				\
+	uaccess_enable_not_uao();					\
 	switch (sizeof(*(ptr))) {					\
 	case 1:								\
 		__put_user_asm("strb", "sttrb", "%w", __pu_val, (ptr),	\
@@ -237,8 +316,7 @@
 	default:							\
 		BUILD_BUG();						\
 	}								\
-	asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_ALT_PAN_NOT_UAO,\
-			CONFIG_ARM64_PAN));				\
+	uaccess_disable_not_uao();					\
 } while (0)
 
 #define __put_user(x, ptr)						\
@@ -331,4 +409,73 @@
 extern __must_check long strlen_user(const char __user *str);
 extern __must_check long strnlen_user(const char __user *str, long n);
 
+#else	/* __ASSEMBLY__ */
+
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+#include <asm/kernel-pgtable.h>
+
+/*
+ * User access enabling/disabling macros.
+ */
+	.macro	uaccess_ttbr0_disable, tmp1
+	mrs	\tmp1, ttbr1_el1		// swapper_pg_dir
+	add	\tmp1, \tmp1, #SWAPPER_DIR_SIZE	// reserved_ttbr0 at the end of swapper_pg_dir
+	msr	ttbr0_el1, \tmp1		// set reserved TTBR0_EL1
+	isb
+	.endm
+
+	.macro	uaccess_ttbr0_enable, tmp1
+	get_thread_info \tmp1
+	ldr	\tmp1, [\tmp1, #TI_TTBR0]	// load saved TTBR0_EL1
+	msr	ttbr0_el1, \tmp1		// set the non-PAN TTBR0_EL1
+	isb
+	.endm
+
+/*
+ * These macros are no-ops when UAO is present.
+ */
+	.macro	uaccess_disable_not_uao, tmp1
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+alternative_if_not ARM64_HAS_PAN
+	uaccess_ttbr0_disable \tmp1
+alternative_else
+	nop
+	nop
+	nop
+	nop
+alternative_endif
+#endif
+alternative_if_not ARM64_ALT_PAN_NOT_UAO
+	nop
+alternative_else
+	SET_PSTATE_PAN(1)
+alternative_endif
+	.endm
+
+	.macro	uaccess_enable_not_uao, tmp1, tmp2
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+alternative_if_not ARM64_HAS_PAN
+	save_and_disable_irq \tmp2		// avoid preemption
+	uaccess_ttbr0_enable \tmp1
+	restore_irq \tmp2
+alternative_else
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+	nop
+alternative_endif
+#endif
+alternative_if_not ARM64_ALT_PAN_NOT_UAO
+	nop
+alternative_else
+	SET_PSTATE_PAN(0)
+alternative_endif
+	.endm
+
+#endif	/* __ASSEMBLY__ */
+
 #endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index b0988bb..fbdb8bb 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -285,10 +285,10 @@
 #define __SWP_LL_SC_LOOPS	4
 
 #define __user_swpX_asm(data, addr, res, temp, temp2, B)	\
+do {								\
+	uaccess_enable();					\
 	__asm__ __volatile__(					\
 	"	mov		%w3, %w7\n"			\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,	\
-		    CONFIG_ARM64_PAN)				\
 	"0:	ldxr"B"		%w2, [%4]\n"			\
 	"1:	stxr"B"		%w0, %w1, [%4]\n"		\
 	"	cbz		%w0, 2f\n"			\
@@ -306,12 +306,12 @@
 	"	.popsection"					\
 	_ASM_EXTABLE(0b, 4b)					\
 	_ASM_EXTABLE(1b, 4b)					\
-	ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,	\
-		CONFIG_ARM64_PAN)				\
 	: "=&r" (res), "+r" (data), "=&r" (temp), "=&r" (temp2)	\
 	: "r" (addr), "i" (-EAGAIN), "i" (-EFAULT),		\
 	  "i" (__SWP_LL_SC_LOOPS)				\
-	: "memory")
+	: "memory");						\
+	uaccess_disable();					\
+} while (0)
 
 #define __user_swp_asm(data, addr, res, temp, temp2) \
 	__user_swpX_asm(data, addr, res, temp, temp2, "")
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 4a2f0f0..e555321 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -39,6 +39,9 @@
   DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
   DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
   DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+  DEFINE(TI_TTBR0,		offsetof(struct thread_info, ttbr0));
+#endif
   DEFINE(TI_TASK,		offsetof(struct thread_info, task));
   DEFINE(TI_CPU,		offsetof(struct thread_info, cpu));
   BLANK();
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c02504e..debae34a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -47,6 +47,7 @@
 #endif
 
 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
 
 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
 EXPORT_SYMBOL(cpu_hwcap_keys);
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index b3d5b3e..537eb84 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -33,6 +33,10 @@
 #include <linux/sched.h>
 #include <linux/smp.h>
 #include <linux/delay.h>
+#include <linux/of_fdt.h>
+
+char* (*arch_read_hardware_id)(void);
+EXPORT_SYMBOL(arch_read_hardware_id);
 
 /*
  * In case the boot CPU is hotpluggable, we record its initial state and
@@ -158,6 +162,11 @@
 		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
 	}
 
+	if (!arch_read_hardware_id)
+		seq_printf(m, "Hardware\t: %s\n", machine_name);
+	else
+		seq_printf(m, "Hardware\t: %s\n", arch_read_hardware_id());
+
 	return 0;
 }
 
@@ -320,7 +329,8 @@
 	if (l1ip == ICACHE_POLICY_AIVIVT)
 		set_bit(ICACHEF_AIVIVT, &__icache_flags);
 
-	pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
+	pr_debug("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip],
+			cpu);
 }
 
 static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 223d54a..16d1b34 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -29,7 +29,9 @@
 #include <asm/esr.h>
 #include <asm/irq.h>
 #include <asm/memory.h>
+#include <asm/ptrace.h>
 #include <asm/thread_info.h>
+#include <asm/uaccess.h>
 #include <asm/unistd.h>
 
 /*
@@ -109,6 +111,34 @@
 	mrs	x22, elr_el1
 	mrs	x23, spsr_el1
 	stp	lr, x21, [sp, #S_LR]
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
+	 * EL0, there is no need to check the state of TTBR0_EL1 since
+	 * accesses are always enabled.
+	 * Note that the meaning of this bit differs from the ARMv8.1 PAN
+	 * feature as all TTBR0_EL1 accesses are disabled, not just those to
+	 * user mappings.
+	 */
+alternative_if_not ARM64_HAS_PAN
+	nop
+alternative_else
+	b	1f				// skip TTBR0 PAN
+alternative_endif
+
+	.if	\el != 0
+	mrs	x21, ttbr0_el1
+	tst	x21, #0xffff << 48		// Check for the reserved ASID
+	orr	x23, x23, #PSR_PAN_BIT		// Set the emulated PAN in the saved SPSR
+	b.eq	1f				// TTBR0 access already disabled
+	and	x23, x23, #~PSR_PAN_BIT		// Clear the emulated PAN in the saved SPSR
+	.endif
+
+	uaccess_ttbr0_disable x21
+1:
+#endif
+
 	stp	x22, x23, [sp, #S_PC]
 
 	/*
@@ -147,6 +177,42 @@
 	ldp	x21, x22, [sp, #S_PC]		// load ELR, SPSR
 	.if	\el == 0
 	ct_user_enter
+	.endif
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
+	 * PAN bit checking.
+	 */
+alternative_if_not ARM64_HAS_PAN
+	nop
+alternative_else
+	b	2f				// skip TTBR0 PAN
+alternative_endif
+
+	.if	\el != 0
+	tbnz	x22, #_PSR_PAN_BIT, 1f		// Skip re-enabling TTBR0 access if previously disabled
+	.endif
+
+	uaccess_ttbr0_enable x0
+
+	.if	\el == 0
+	/*
+	 * Enable errata workarounds only if returning to user. The only
+	 * workaround currently required for TTBR0_EL1 changes are for the
+	 * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
+	 * corruption).
+	 */
+	post_ttbr0_update_workaround
+	.endif
+1:
+	.if	\el != 0
+	and	x22, x22, #~PSR_PAN_BIT		// ARMv8.0 CPUs do not understand this bit
+	.endif
+2:
+#endif
+
+	.if	\el == 0
 	ldr	x23, [sp, #S_SP]		// load return stack pointer
 	msr	sp_el0, x23
 #ifdef CONFIG_ARM64_ERRATUM_845719
@@ -162,6 +228,7 @@
 alternative_else_nop_endif
 #endif
 	.endif
+
 	msr	elr_el1, x21			// set up the return data
 	msr	spsr_el1, x22
 	ldp	x0, x1, [sp, #16 * 0]
@@ -184,10 +251,6 @@
 	eret					// return to kernel
 	.endm
 
-	.macro	get_thread_info, rd
-	mrs	\rd, sp_el0
-	.endm
-
 	.macro	irq_stack_entry
 	mov	x19, sp			// preserve the original sp
 
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 332e331..c7d26bb 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -326,14 +326,14 @@
 	 * dirty cache lines being evicted.
 	 */
 	adrp	x0, idmap_pg_dir
-	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 	bl	__inval_cache_range
 
 	/*
 	 * Clear the idmap and swapper page tables.
 	 */
 	adrp	x0, idmap_pg_dir
-	adrp	x6, swapper_pg_dir + SWAPPER_DIR_SIZE
+	adrp	x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 1:	stp	xzr, xzr, [x0], #16
 	stp	xzr, xzr, [x0], #16
 	stp	xzr, xzr, [x0], #16
@@ -412,7 +412,7 @@
 	 * tables again to remove any speculatively loaded cache lines.
 	 */
 	adrp	x0, idmap_pg_dir
-	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE
+	adrp	x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 	dmb	sy
 	bl	__inval_cache_range
 
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 57ae9d9..6d47969 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -905,8 +905,8 @@
 	 * Initialize & Reset PMNC. Request overflow interrupt for
 	 * 64 bit cycle counter but cheat in armv8pmu_write_counter().
 	 */
-	armv8pmu_pmcr_write(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C |
-			    ARMV8_PMU_PMCR_LC);
+	armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_P |
+			ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC);
 }
 
 static int armv8_pmuv3_map_event(struct perf_event *event)
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 01753cd..2a47416 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -166,6 +166,70 @@
 	while (1);
 }
 
+/*
+ * dump a block of kernel memory from around the given address
+ */
+static void show_data(unsigned long addr, int nbytes, const char *name)
+{
+	int	i, j;
+	int	nlines;
+	u32	*p;
+
+	/*
+	 * don't attempt to dump non-kernel addresses or
+	 * values that are probably just small negative numbers
+	 */
+	if (addr < PAGE_OFFSET || addr > -256UL)
+		return;
+
+	printk("\n%s: %#lx:\n", name, addr);
+
+	/*
+	 * round address down to a 32 bit boundary
+	 * and always dump a multiple of 32 bytes
+	 */
+	p = (u32 *)(addr & ~(sizeof(u32) - 1));
+	nbytes += (addr & (sizeof(u32) - 1));
+	nlines = (nbytes + 31) / 32;
+
+
+	for (i = 0; i < nlines; i++) {
+		/*
+		 * just display low 16 bits of address to keep
+		 * each line of the dump < 80 characters
+		 */
+		printk("%04lx ", (unsigned long)p & 0xffff);
+		for (j = 0; j < 8; j++) {
+			u32	data;
+			if (probe_kernel_address(p, data)) {
+				printk(" ********");
+			} else {
+				printk(" %08x", data);
+			}
+			++p;
+		}
+		printk("\n");
+	}
+}
+
+static void show_extra_register_data(struct pt_regs *regs, int nbytes)
+{
+	mm_segment_t fs;
+	unsigned int i;
+
+	fs = get_fs();
+	set_fs(KERNEL_DS);
+	show_data(regs->pc - nbytes, nbytes * 2, "PC");
+	show_data(regs->regs[30] - nbytes, nbytes * 2, "LR");
+	show_data(regs->sp - nbytes, nbytes * 2, "SP");
+	for (i = 0; i < 30; i++) {
+		char name[4];
+		snprintf(name, sizeof(name), "X%u", i);
+		show_data(regs->regs[i] - nbytes, nbytes * 2, name);
+	}
+	set_fs(fs);
+}
+
 void __show_regs(struct pt_regs *regs)
 {
 	int i, top_reg;
@@ -201,6 +265,8 @@
 
 		pr_cont("\n");
 	}
+	if (!user_mode(regs))
+		show_extra_register_data(regs, 128);
 	printk("\n");
 }
 
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 42816be..81762dd 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -96,12 +96,12 @@
 	for (i = 0; i < 10; i++) {
 		err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
 		if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
-			pr_info("CPU%d killed.\n", cpu);
+			pr_debug("CPU%d killed.\n", cpu);
 			return 0;
 		}
 
 		msleep(10);
-		pr_info("Retrying again to check for CPU kill\n");
+		pr_debug("Retrying again to check for CPU kill\n");
 	}
 
 	pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index f534f49..75f0efe 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -42,6 +42,8 @@
 #include <linux/of_fdt.h>
 #include <linux/efi.h>
 #include <linux/psci.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
 
 #include <asm/acpi.h>
 #include <asm/fixmap.h>
@@ -65,6 +67,7 @@
 
 phys_addr_t __fdt_pointer __initdata;
 
+const char *machine_name;
 /*
  * Standard memory resources
  */
@@ -191,7 +194,11 @@
 			cpu_relax();
 	}
 
-	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
+	machine_name = of_flat_dt_get_machine_name();
+	if (machine_name) {
+		dump_stack_set_arch_desc("%s (DT)", machine_name);
+		pr_info("Machine: %s\n", machine_name);
+	}
 }
 
 static void __init request_standard_resources(void)
@@ -291,6 +298,15 @@
 	smp_init_cpus();
 	smp_build_mpidr_hash();
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Make sure init_thread_info.ttbr0 always generates translation
+	 * faults in case uaccess_enable() is inadvertently called by the init
+	 * thread.
+	 */
+	init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+#endif
+
 #ifdef CONFIG_VT
 #if defined(CONFIG_VGA_CONSOLE)
 	conswitchp = &vga_con;
@@ -351,3 +367,9 @@
 	return 0;
 }
 __initcall(register_kernel_offset_dumper);
+
+void arch_setup_pdev_archdata(struct platform_device *pdev)
+{
+	pdev->archdata.dma_mask = DMA_BIT_MASK(32);
+	pdev->dev.dma_mask = &pdev->archdata.dma_mask;
+}
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 8507703..4dd2704 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -219,6 +219,8 @@
 
 	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
 
+	pr_debug("CPU%u: Booted secondary processor\n", cpu);
+
 	/*
 	 * TTBR0 is only used for the identity mapping at this stage. Make it
 	 * point to zero page to avoid speculatively fetching new entries.
@@ -341,7 +343,7 @@
 		pr_crit("CPU%u: cpu didn't die\n", cpu);
 		return;
 	}
-	pr_notice("CPU%u: shutdown\n", cpu);
+	pr_debug("CPU%u: shutdown\n", cpu);
 
 	/*
 	 * Now that the dying CPU is beyond the point of no return w.r.t.
@@ -811,7 +813,7 @@
  */
 static void ipi_cpu_stop(unsigned int cpu)
 {
-	set_cpu_online(cpu, false);
+	set_cpu_active(cpu, false);
 
 	local_irq_disable();
 
@@ -913,10 +915,10 @@
 
 	/* Wait up to one second for other CPUs to stop */
 	timeout = USEC_PER_SEC;
-	while (num_online_cpus() > 1 && timeout--)
+	while (num_active_cpus() > 1 && timeout--)
 		udelay(1);
 
-	if (num_online_cpus() > 1)
+	if (num_active_cpus() > 1)
 		pr_warning("SMP: failed to stop secondary CPUs %*pbl\n",
 			   cpumask_pr_args(cpu_online_mask));
 }
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index c9986b3..a2e2118 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -248,8 +248,6 @@
 		 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
 
 	if (!user_mode(regs)) {
-		dump_mem(KERN_EMERG, "Stack: ", regs->sp,
-			 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
 		dump_backtrace(regs, tsk);
 		dump_instr(KERN_EMERG, regs);
 	}
@@ -257,7 +255,55 @@
 	return ret;
 }
 
-static DEFINE_RAW_SPINLOCK(die_lock);
+static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
+static int die_owner = -1;
+static unsigned int die_nest_count;
+
+static unsigned long oops_begin(void)
+{
+	int cpu;
+	unsigned long flags;
+
+	oops_enter();
+
+	/* racy, but better than risking deadlock. */
+	raw_local_irq_save(flags);
+	cpu = smp_processor_id();
+	if (!arch_spin_trylock(&die_lock)) {
+		if (cpu == die_owner)
+			/* nested oops. should stop eventually */;
+		else
+			arch_spin_lock(&die_lock);
+	}
+	die_nest_count++;
+	die_owner = cpu;
+	console_verbose();
+	bust_spinlocks(1);
+	return flags;
+}
+
+static void oops_end(unsigned long flags, struct pt_regs *regs, int notify)
+{
+	if (regs && kexec_should_crash(current))
+		crash_kexec(regs);
+
+	bust_spinlocks(0);
+	die_owner = -1;
+	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+	die_nest_count--;
+	if (!die_nest_count)
+		/* Nest count reaches zero, release the lock. */
+		arch_spin_unlock(&die_lock);
+	raw_local_irq_restore(flags);
+	oops_exit();
+
+	if (in_interrupt())
+		panic("Fatal exception in interrupt");
+	if (panic_on_oops)
+		panic("Fatal exception");
+	if (notify != NOTIFY_STOP)
+		do_exit(SIGSEGV);
+}
 
 /*
  * This function is protected against re-entrancy.
@@ -265,29 +311,18 @@
 void die(const char *str, struct pt_regs *regs, int err)
 {
 	struct thread_info *thread = current_thread_info();
+	enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
+	unsigned long flags = oops_begin();
 	int ret;
 
-	oops_enter();
+	if (!user_mode(regs))
+		bug_type = report_bug(regs->pc, regs);
+	if (bug_type != BUG_TRAP_TYPE_NONE)
+		str = "Oops - BUG";
 
-	raw_spin_lock_irq(&die_lock);
-	console_verbose();
-	bust_spinlocks(1);
 	ret = __die(str, err, thread, regs);
 
-	if (regs && kexec_should_crash(thread->task))
-		crash_kexec(regs);
-
-	bust_spinlocks(0);
-	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
-	raw_spin_unlock_irq(&die_lock);
-	oops_exit();
-
-	if (in_interrupt())
-		panic("Fatal exception in interrupt");
-	if (panic_on_oops)
-		panic("Fatal exception");
-	if (ret != NOTIFY_STOP)
-		do_exit(SIGSEGV);
+	oops_end(flags, regs, ret);
 }
 
 void arm64_notify_die(const char *str, struct pt_regs *regs,
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 1105aab..b8deffa 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -216,6 +216,11 @@
 	swapper_pg_dir = .;
 	. += SWAPPER_DIR_SIZE;
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	reserved_ttbr0 = .;
+	. += RESERVED_TTBR0_SIZE;
+#endif
+
 	_end = .;
 
 	STABS_DEBUG
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 5d1cad3..08b5f18 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -17,10 +17,10 @@
  */
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 	.text
 
@@ -33,8 +33,7 @@
  * Alignment fixed up by hardware.
  */
 ENTRY(__clear_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x2, x3
 	mov	x2, x1			// save the size for fixup return
 	subs	x1, x1, #8
 	b.mi	2f
@@ -54,8 +53,7 @@
 	b.mi	5f
 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:	mov	x0, #0
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x2
 	ret
 ENDPROC(__clear_user)
 
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S
index 4fd67ea..5f8f812 100644
--- a/arch/arm64/lib/copy_from_user.S
+++ b/arch/arm64/lib/copy_from_user.S
@@ -16,11 +16,11 @@
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy from user space to a kernel buffer (alignment handled by the hardware)
@@ -67,12 +67,10 @@
 
 end	.req	x5
 ENTRY(__arch_copy_from_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x3, x4
 	add	end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x3
 	mov	x0, #0				// Nothing to copy
 	ret
 ENDPROC(__arch_copy_from_user)
diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S
index f7292dd0..9b04ff3 100644
--- a/arch/arm64/lib/copy_in_user.S
+++ b/arch/arm64/lib/copy_in_user.S
@@ -18,11 +18,11 @@
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy from user space to user space (alignment handled by the hardware)
@@ -68,12 +68,10 @@
 
 end	.req	x5
 ENTRY(__copy_in_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x3, x4
 	add	end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x3
 	mov	x0, #0
 	ret
 ENDPROC(__copy_in_user)
diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S
index 7a7efe2..8077e4f 100644
--- a/arch/arm64/lib/copy_to_user.S
+++ b/arch/arm64/lib/copy_to_user.S
@@ -16,11 +16,11 @@
 
 #include <linux/linkage.h>
 
-#include <asm/alternative.h>
 #include <asm/assembler.h>
 #include <asm/cache.h>
 #include <asm/cpufeature.h>
 #include <asm/sysreg.h>
+#include <asm/uaccess.h>
 
 /*
  * Copy to user space from a kernel buffer (alignment handled by the hardware)
@@ -66,12 +66,10 @@
 
 end	.req	x5
 ENTRY(__arch_copy_to_user)
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_enable_not_uao x3, x4
 	add	end, x0, x2
 #include "copy_template.S"
-ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
-	    CONFIG_ARM64_PAN)
+	uaccess_disable_not_uao x3
 	mov	x0, #0
 	ret
 ENDPROC(__arch_copy_to_user)
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 58b5a90..e9a7b3a 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -109,7 +109,7 @@
  *	- start   - virtual start address of region
  *	- size    - size in question
  */
-__dma_inv_area:
+ENTRY(__dma_inv_area)
 	add	x1, x1, x0
 	/* FALLTHROUGH */
 
@@ -156,7 +156,7 @@
  *	- start   - virtual start address of region
  *	- size    - size in question
  */
-__dma_clean_area:
+ENTRY(__dma_clean_area)
 	dcache_by_line_op cvac, sy, x0, x1, x2, x3
 	ret
 ENDPIPROC(__clean_dcache_area_poc)
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index efcf1f7..4c63cb1 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -221,7 +221,12 @@
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
 switch_mm_fastpath:
-	cpu_switch_mm(mm->pgd, mm);
+	/*
+	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
+	 * emulating PAN.
+	 */
+	if (!system_uses_ttbr0_pan())
+		cpu_switch_mm(mm->pgd, mm);
 }
 
 static int asids_init(void)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 3f74d0d..e9c55ce 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -26,23 +26,47 @@
 #include <linux/genalloc.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
+#include <linux/mm.h>
+#include <linux/iommu.h>
 #include <linux/vmalloc.h>
 #include <linux/swiotlb.h>
+#include <linux/io.h>
 
 #include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/dma-iommu.h>
+#include <linux/dma-mapping-fast.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
+
 
 static int swiotlb __ro_after_init;
 
 static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
 				 bool coherent)
 {
-	if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
+	if (attrs & DMA_ATTR_STRONGLY_ORDERED)
+		return pgprot_noncached(prot);
+	else if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
 		return pgprot_writecombine(prot);
 	return prot;
 }
 
-static struct gen_pool *atomic_pool;
+static int __get_iommu_pgprot(unsigned long attrs, int prot,
+			      bool coherent)
+{
+	if (!(attrs & DMA_ATTR_EXEC_MAPPING))
+		prot |= IOMMU_NOEXEC;
+	if (attrs & DMA_ATTR_IOMMU_USE_UPSTREAM_HINT)
+		prot |= IOMMU_USE_UPSTREAM_HINT;
+	if (coherent)
+		prot |= IOMMU_CACHE;
 
+	return prot;
+}
+
+static struct gen_pool *atomic_pool;
+#define NO_KERNEL_MAPPING_DUMMY 0x2222
 #define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
 static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
 
@@ -90,6 +114,42 @@
 	return 1;
 }
 
+static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	struct page *page = virt_to_page(addr);
+	pgprot_t prot = *(pgprot_t *)data;
+
+	set_pte(pte, mk_pte(page, prot));
+	return 0;
+}
+
+static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	pte_clear(&init_mm, addr, pte);
+	return 0;
+}
+
+static void __dma_remap(struct page *page, size_t size, pgprot_t prot,
+			bool no_kernel_map)
+{
+	unsigned long start = (unsigned long) page_address(page);
+	unsigned long end = start + size;
+	int (*func)(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data);
+
+	if (no_kernel_map)
+		func = __dma_clear_pte;
+	else
+		func = __dma_update_pte;
+
+	apply_to_page_range(&init_mm, start, size, func, &prot);
+	/* ensure prot is applied before returning */
+	mb();
+	flush_tlb_kernel_range(start, end);
+}
+
 static void *__dma_alloc_coherent(struct device *dev, size_t size,
 				  dma_addr_t *dma_handle, gfp_t flags,
 				  unsigned long attrs)
@@ -114,6 +174,16 @@
 		*dma_handle = phys_to_dma(dev, page_to_phys(page));
 		addr = page_address(page);
 		memset(addr, 0, size);
+
+		if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) ||
+		    (attrs & DMA_ATTR_STRONGLY_ORDERED)) {
+			/*
+			 * flush the caches here because we can't later
+			 */
+			__dma_flush_area(addr, size);
+			__dma_remap(page, size, __pgprot(0), true);
+		}
+
 		return addr;
 	} else {
 		return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
@@ -127,11 +197,16 @@
 	bool freed;
 	phys_addr_t paddr = dma_to_phys(dev, dma_handle);
 
+	size = PAGE_ALIGN(size);
 	if (dev == NULL) {
 		WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
 		return;
 	}
 
+	if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) ||
+	    (attrs & DMA_ATTR_STRONGLY_ORDERED))
+		__dma_remap(phys_to_page(paddr), size, PAGE_KERNEL, false);
+
 	freed = dma_release_from_contiguous(dev,
 					phys_to_page(paddr),
 					size >> PAGE_SHIFT);
@@ -146,7 +221,6 @@
 	struct page *page;
 	void *ptr, *coherent_ptr;
 	bool coherent = is_device_dma_coherent(dev);
-	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
 
 	size = PAGE_ALIGN(size);
 
@@ -168,16 +242,23 @@
 	if (coherent)
 		return ptr;
 
-	/* remove any dirty cache lines on the kernel alias */
-	__dma_flush_area(ptr, size);
+	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) {
+		coherent_ptr = (void *)NO_KERNEL_MAPPING_DUMMY;
+	} else {
+		pgprot_t prot;
 
-	/* create a coherent mapping */
-	page = virt_to_page(ptr);
-	coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
-						   prot, NULL);
-	if (!coherent_ptr)
-		goto no_map;
+		if (!(attrs & DMA_ATTR_STRONGLY_ORDERED))
+			/* remove any dirty cache lines on the kernel alias */
+			__dma_flush_area(ptr, size);
 
+		/* create a coherent mapping */
+		page = virt_to_page(ptr);
+		prot = __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false);
+		coherent_ptr = dma_common_contiguous_remap(
+					page, size, VM_USERMAP, prot, NULL);
+		if (!coherent_ptr)
+			goto no_map;
+	}
 	return coherent_ptr;
 
 no_map:
@@ -198,7 +279,8 @@
 	if (!is_device_dma_coherent(dev)) {
 		if (__free_from_pool(vaddr, size))
 			return;
-		vunmap(vaddr);
+		if (!(attrs & DMA_ATTR_NO_KERNEL_MAPPING))
+			vunmap(vaddr);
 	}
 	__dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
 }
@@ -352,6 +434,55 @@
 	return 1;
 }
 
+static void *arm64_dma_remap(struct device *dev, void *cpu_addr,
+			dma_addr_t handle, size_t size,
+			unsigned long attrs)
+{
+	struct page *page = phys_to_page(dma_to_phys(dev, handle));
+	bool coherent = is_device_dma_coherent(dev);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+	unsigned long offset = handle & ~PAGE_MASK;
+	struct vm_struct *area;
+	unsigned long addr;
+
+	size = PAGE_ALIGN(size + offset);
+
+	/*
+	 * DMA allocation can be mapped to user space, so lets
+	 * set VM_USERMAP flags too.
+	 */
+	area = get_vm_area(size, VM_USERMAP);
+	if (!area)
+		return NULL;
+
+	addr = (unsigned long)area->addr;
+	area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+	if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
+		vunmap((void *)addr);
+		return NULL;
+	}
+	return (void *)addr + offset;
+}
+
+static void arm64_dma_unremap(struct device *dev, void *remapped_addr,
+				size_t size)
+{
+	struct vm_struct *area;
+
+	remapped_addr = (void *)((unsigned long)remapped_addr & PAGE_MASK);
+
+	area = find_vm_area(remapped_addr);
+	if (!area) {
+		WARN(1, "trying to free invalid coherent area: %p\n",
+			remapped_addr);
+		return;
+	}
+	vunmap(remapped_addr);
+	flush_tlb_kernel_range((unsigned long)remapped_addr,
+			(unsigned long)(remapped_addr + size));
+}
+
 static struct dma_map_ops swiotlb_dma_ops = {
 	.alloc = __dma_alloc,
 	.free = __dma_free,
@@ -367,6 +498,8 @@
 	.sync_sg_for_device = __swiotlb_sync_sg_for_device,
 	.dma_supported = __swiotlb_dma_supported,
 	.mapping_error = swiotlb_dma_mapping_error,
+	.remap = arm64_dma_remap,
+	.unremap = arm64_dma_unremap,
 };
 
 static int __init atomic_pool_init(void)
@@ -417,7 +550,7 @@
 	goto out;
 
 remove_mapping:
-	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
+	dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP, false);
 destroy_genpool:
 	gen_pool_destroy(atomic_pool);
 	atomic_pool = NULL;
@@ -438,6 +571,7 @@
 			   dma_addr_t *dma_handle, gfp_t flags,
 			   unsigned long attrs)
 {
+	WARN(1, "dma alloc failure, device may be missing a call to arch_setup_dma_ops");
 	return NULL;
 }
 
@@ -638,7 +772,7 @@
 		if (WARN_ON(!area || !area->pages))
 			return;
 		iommu_dma_free(dev, area->pages, iosize, &handle);
-		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
+		dma_common_free_remap(cpu_addr, size, VM_USERMAP, false);
 	} else {
 		iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
 		__free_pages(virt_to_page(cpu_addr), get_order(size));
@@ -963,3 +1097,895 @@
 	dev->archdata.dma_coherent = coherent;
 	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
 }
+EXPORT_SYMBOL(arch_setup_dma_ops);
+
+#ifdef CONFIG_ARM64_DMA_USE_IOMMU
+
+/*
+ * Make an area consistent for devices.
+ * Note: Drivers should NOT use this function directly, as it will break
+ * platforms with CONFIG_DMABOUNCE.
+ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+ */
+static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+	size_t size, enum dma_data_direction dir)
+{
+	__dma_map_area(page_address(page) + off, size, dir);
+}
+
+static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+	size_t size, enum dma_data_direction dir)
+{
+	__dma_unmap_area(page_address(page) + off, size, dir);
+
+	/*
+	 * Mark the D-cache clean for this page to avoid extra flushing.
+	 */
+	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+		set_bit(PG_dcache_clean, &page->flags);
+}
+
+static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+		return -EIO;
+
+	*dev->dma_mask = dma_mask;
+
+	return 0;
+}
+
+/* IOMMU */
+
+static void __dma_clear_buffer(struct page *page, size_t size,
+			       unsigned long attrs, bool is_coherent)
+{
+	/*
+	 * Ensure that the allocated pages are zeroed, and that any data
+	 * lurking in the kernel direct-mapped region is invalidated.
+	 */
+	void *ptr = page_address(page);
+
+	if (!(attrs & DMA_ATTR_SKIP_ZEROING))
+		memset(ptr, 0, size);
+	if (!is_coherent)
+		__dma_flush_area(ptr, size);
+}
+
+static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
+				      size_t size)
+{
+	unsigned int order = get_order(size);
+	unsigned int align = 0;
+	unsigned int count, start;
+	unsigned long flags;
+
+	if (order > CONFIG_ARM64_DMA_IOMMU_ALIGNMENT)
+		order = CONFIG_ARM64_DMA_IOMMU_ALIGNMENT;
+
+	count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	align = (1 << order) - 1;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
+					   count, align);
+	if (start > mapping->bits) {
+		spin_unlock_irqrestore(&mapping->lock, flags);
+		return DMA_ERROR_CODE;
+	}
+
+	bitmap_set(mapping->bitmap, start, count);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	return mapping->base + (start << PAGE_SHIFT);
+}
+
+static inline void __free_iova(struct dma_iommu_mapping *mapping,
+			       dma_addr_t addr, size_t size)
+{
+	unsigned int start = (addr - mapping->base) >> PAGE_SHIFT;
+	unsigned int count = size >> PAGE_SHIFT;
+	unsigned long flags;
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	bitmap_clear(mapping->bitmap, start, count);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
+					  gfp_t gfp, unsigned long attrs)
+{
+	struct page **pages;
+	size_t count = size >> PAGE_SHIFT;
+	size_t array_size = count * sizeof(struct page *);
+	int i = 0;
+	bool is_coherent = is_device_dma_coherent(dev);
+
+	if (array_size <= PAGE_SIZE)
+		pages = kzalloc(array_size, gfp);
+	else
+		pages = vzalloc(array_size);
+	if (!pages)
+		return NULL;
+
+	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+		unsigned long order = get_order(size);
+		struct page *page;
+
+		page = dma_alloc_from_contiguous(dev, count, order);
+		if (!page)
+			goto error;
+
+		__dma_clear_buffer(page, size, attrs, is_coherent);
+
+		for (i = 0; i < count; i++)
+			pages[i] = page + i;
+
+		return pages;
+	}
+
+	/*
+	 * IOMMU can map any pages, so himem can also be used here
+	 */
+	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
+	while (count) {
+		int j, order = __fls(count);
+
+		pages[i] = alloc_pages(gfp, order);
+		while (!pages[i] && order)
+			pages[i] = alloc_pages(gfp, --order);
+		if (!pages[i])
+			goto error;
+
+		if (order) {
+			split_page(pages[i], order);
+			j = 1 << order;
+			while (--j)
+				pages[i + j] = pages[i] + j;
+		}
+
+		__dma_clear_buffer(pages[i], PAGE_SIZE << order, attrs,
+				   is_coherent);
+		i += 1 << order;
+		count -= 1 << order;
+	}
+
+	return pages;
+error:
+	while (i--)
+		if (pages[i])
+			__free_pages(pages[i], 0);
+	if (array_size <= PAGE_SIZE)
+		kfree(pages);
+	else
+		vfree(pages);
+	return NULL;
+}
+
+static int __iommu_free_buffer(struct device *dev, struct page **pages,
+			       size_t size, unsigned long attrs)
+{
+	int count = size >> PAGE_SHIFT;
+	int array_size = count * sizeof(struct page *);
+	int i;
+
+	if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
+		dma_release_from_contiguous(dev, pages[0], count);
+	} else {
+		for (i = 0; i < count; i++)
+			if (pages[i])
+				__free_pages(pages[i], 0);
+	}
+
+	if (array_size <= PAGE_SIZE)
+		kfree(pages);
+	else
+		vfree(pages);
+	return 0;
+}
+
+/*
+ * Create a CPU mapping for a specified pages
+ */
+static void *
+__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
+		    const void *caller)
+{
+	return dma_common_pages_remap(pages, size, VM_USERMAP, prot, caller);
+}
+
+/*
+ * Create a mapping in device IO address space for specified pages
+ */
+static dma_addr_t __iommu_create_mapping(struct device *dev,
+					struct page **pages, size_t size,
+					unsigned long attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	dma_addr_t dma_addr, iova;
+	int i, ret;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+
+	dma_addr = __alloc_iova(mapping, size);
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+
+	prot = __get_iommu_pgprot(attrs, prot,
+				  is_device_dma_coherent(dev));
+
+	iova = dma_addr;
+	for (i = 0; i < count; ) {
+		unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
+		phys_addr_t phys = page_to_phys(pages[i]);
+		unsigned int len, j;
+
+		for (j = i + 1; j < count; j++, next_pfn++)
+			if (page_to_pfn(pages[j]) != next_pfn)
+				break;
+
+		len = (j - i) << PAGE_SHIFT;
+		ret = iommu_map(mapping->domain, iova, phys, len, prot);
+		if (ret < 0)
+			goto fail;
+		iova += len;
+		i = j;
+	}
+	return dma_addr;
+fail:
+	iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
+	__free_iova(mapping, dma_addr, size);
+	return DMA_ERROR_CODE;
+}
+
+static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova,
+				size_t size)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+
+	/*
+	 * add optional in-page offset from iova to size and align
+	 * result to page size
+	 */
+	size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
+	iova &= PAGE_MASK;
+
+	iommu_unmap(mapping->domain, iova, size);
+	__free_iova(mapping, iova, size);
+	return 0;
+}
+
+static struct page **__atomic_get_pages(void *addr)
+{
+	struct page *page;
+	phys_addr_t phys;
+
+	phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
+	page = phys_to_page(phys);
+
+	return (struct page **)page;
+}
+
+static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
+{
+	struct vm_struct *area;
+
+	if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
+		return __atomic_get_pages(cpu_addr);
+
+	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+		return cpu_addr;
+
+	area = find_vm_area(cpu_addr);
+	if (area)
+		return area->pages;
+	return NULL;
+}
+
+static void *__iommu_alloc_atomic(struct device *dev, size_t size,
+				  dma_addr_t *handle, gfp_t gfp,
+				  unsigned long attrs)
+{
+	struct page *page;
+	struct page **pages;
+	size_t count = size >> PAGE_SHIFT;
+	size_t array_size = count * sizeof(struct page *);
+	int i;
+	void *addr;
+	bool coherent = is_device_dma_coherent(dev);
+
+	if (array_size <= PAGE_SIZE)
+		pages = kzalloc(array_size, gfp);
+	else
+		pages = vzalloc(array_size);
+
+	if (!pages)
+		return NULL;
+
+	if (coherent) {
+		page = alloc_pages(gfp, get_order(size));
+		addr = page ? page_address(page) : NULL;
+	} else {
+		addr = __alloc_from_pool(size, &page, gfp);
+	}
+
+	if (!addr)
+		goto err_free;
+
+	for (i = 0; i < count ; i++)
+		pages[i] = page + i;
+
+	*handle = __iommu_create_mapping(dev, pages, size, attrs);
+	if (*handle == DMA_ERROR_CODE)
+		goto err_mapping;
+
+	kvfree(pages);
+	return addr;
+
+err_mapping:
+	if (coherent)
+		__free_pages(page, get_order(size));
+	else
+		__free_from_pool(addr, size);
+err_free:
+	kvfree(pages);
+	return NULL;
+}
+
+static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
+				dma_addr_t handle, size_t size)
+{
+	__iommu_remove_mapping(dev, handle, size);
+	__free_from_pool(cpu_addr, size);
+}
+
+static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+	    dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
+{
+	bool coherent = is_device_dma_coherent(dev);
+	pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
+	struct page **pages;
+	void *addr = NULL;
+
+	*handle = DMA_ERROR_CODE;
+	size = PAGE_ALIGN(size);
+
+	if (!gfpflags_allow_blocking(gfp))
+		return __iommu_alloc_atomic(dev, size, handle, gfp, attrs);
+
+	/*
+	 * Following is a work-around (a.k.a. hack) to prevent pages
+	 * with __GFP_COMP being passed to split_page() which cannot
+	 * handle them.  The real problem is that this flag probably
+	 * should be 0 on ARM as it is not supported on this
+	 * platform; see CONFIG_HUGETLBFS.
+	 */
+	gfp &= ~(__GFP_COMP);
+
+	pages = __iommu_alloc_buffer(dev, size, gfp, attrs);
+	if (!pages)
+		return NULL;
+
+	*handle = __iommu_create_mapping(dev, pages, size, attrs);
+	if (*handle == DMA_ERROR_CODE)
+		goto err_buffer;
+
+	if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+		return pages;
+
+	addr = __iommu_alloc_remap(pages, size, gfp, prot,
+				   __builtin_return_address(0));
+	if (!addr)
+		goto err_mapping;
+
+	return addr;
+
+err_mapping:
+	__iommu_remove_mapping(dev, *handle, size);
+err_buffer:
+	__iommu_free_buffer(dev, pages, size, attrs);
+	return NULL;
+}
+
+static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+		    void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		    unsigned long attrs)
+{
+	unsigned long uaddr = vma->vm_start;
+	unsigned long usize = vma->vm_end - vma->vm_start;
+	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+	bool coherent = is_device_dma_coherent(dev);
+
+	vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
+					     coherent);
+
+	if (!pages)
+		return -ENXIO;
+
+	do {
+		int ret = vm_insert_page(vma, uaddr, *pages++);
+
+		if (ret) {
+			pr_err("Remapping memory failed: %d\n", ret);
+			return ret;
+		}
+		uaddr += PAGE_SIZE;
+		usize -= PAGE_SIZE;
+	} while (usize > 0);
+
+	return 0;
+}
+
+/*
+ * free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
+ */
+void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+			  dma_addr_t handle, unsigned long attrs)
+{
+	struct page **pages;
+
+	size = PAGE_ALIGN(size);
+
+	if (__in_atomic_pool(cpu_addr, size)) {
+		__iommu_free_atomic(dev, cpu_addr, handle, size);
+		return;
+	}
+
+	pages = __iommu_get_pages(cpu_addr, attrs);
+	if (!pages) {
+		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+		return;
+	}
+
+	if (!(attrs & DMA_ATTR_NO_KERNEL_MAPPING))
+		dma_common_free_remap(cpu_addr, size, VM_USERMAP, true);
+
+	__iommu_remove_mapping(dev, handle, size);
+	__iommu_free_buffer(dev, pages, size, attrs);
+}
+
+int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
+				 void *cpu_addr, dma_addr_t dma_addr,
+				 size_t size, unsigned long attrs)
+{
+	unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	struct page **pages = __iommu_get_pages(cpu_addr, attrs);
+
+	if (!pages)
+		return -ENXIO;
+
+	return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
+					 GFP_KERNEL);
+}
+
+static int __dma_direction_to_prot(enum dma_data_direction dir)
+{
+	int prot;
+
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+		prot = IOMMU_READ | IOMMU_WRITE;
+		break;
+	case DMA_TO_DEVICE:
+		prot = IOMMU_READ;
+		break;
+	case DMA_FROM_DEVICE:
+		prot = IOMMU_WRITE;
+		break;
+	default:
+		prot = 0;
+	}
+
+	return prot;
+}
+
+/**
+ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+ * @dir: DMA transfer direction
+ *
+ * Map a set of buffers described by scatterlist in streaming mode for DMA.
+ * The scatter gather list elements are merged together (if possible) and
+ * tagged with the appropriate dma address and length. They are obtained via
+ * sg_dma_{address,length}.
+ */
+int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
+		int nents, enum dma_data_direction dir, unsigned long attrs)
+{
+	struct scatterlist *s;
+	int ret, i;
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int total_length = 0, current_offset = 0;
+	dma_addr_t iova;
+	int prot = __dma_direction_to_prot(dir);
+
+	for_each_sg(sg, s, nents, i)
+		total_length += s->length;
+
+	iova = __alloc_iova(mapping, total_length);
+	if (iova == DMA_ERROR_CODE) {
+		dev_err(dev, "Couldn't allocate iova for sg %p\n", sg);
+		return 0;
+	}
+	prot = __get_iommu_pgprot(attrs, prot,
+				  is_device_dma_coherent(dev));
+
+	ret = iommu_map_sg(mapping->domain, iova, sg, nents, prot);
+	if (ret != total_length) {
+		__free_iova(mapping, iova, total_length);
+		return 0;
+	}
+
+	for_each_sg(sg, s, nents, i) {
+		s->dma_address = iova + current_offset;
+		s->dma_length = total_length - current_offset;
+		current_offset += s->length;
+	}
+
+	return nents;
+}
+
+/**
+ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ *
+ * Unmap a set of streaming mode DMA translations.  Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+			enum dma_data_direction dir, unsigned long attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	unsigned int total_length = sg_dma_len(sg);
+	dma_addr_t iova = sg_dma_address(sg);
+
+	total_length = PAGE_ALIGN((iova & ~PAGE_MASK) + total_length);
+	iova &= PAGE_MASK;
+
+	iommu_unmap(mapping->domain, iova, total_length);
+	__free_iova(mapping, iova, total_length);
+}
+
+/**
+ * arm_iommu_sync_sg_for_cpu
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	if (is_device_dma_coherent(dev))
+		return;
+
+	for_each_sg(sg, s, nents, i)
+		__dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
+
+}
+
+/**
+ * arm_iommu_sync_sg_for_device
+ * @dev: valid struct device pointer
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir)
+{
+	struct scatterlist *s;
+	int i;
+
+	if (is_device_dma_coherent(dev))
+		return;
+
+	for_each_sg(sg, s, nents, i)
+		__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
+}
+
+
+/**
+ * arm_coherent_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * Coherent IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_coherent_iommu_map_page(struct device *dev,
+	     struct page *page, unsigned long offset, size_t size,
+	     enum dma_data_direction dir, unsigned long attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t dma_addr;
+	int ret, prot, len = PAGE_ALIGN(size + offset);
+
+	dma_addr = __alloc_iova(mapping, len);
+	if (dma_addr == DMA_ERROR_CODE)
+		return dma_addr;
+
+	prot = __dma_direction_to_prot(dir);
+	prot = __get_iommu_pgprot(attrs, prot,
+				  is_device_dma_coherent(dev));
+
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
+			prot);
+	if (ret < 0)
+		goto fail;
+
+	return dma_addr + offset;
+fail:
+	__free_iova(mapping, dma_addr, len);
+	return DMA_ERROR_CODE;
+}
+
+/**
+ * arm_iommu_map_page
+ * @dev: valid struct device pointer
+ * @page: page that buffer resides in
+ * @offset: offset into page for start of buffer
+ * @size: size of buffer to map
+ * @dir: DMA transfer direction
+ *
+ * IOMMU aware version of arm_dma_map_page()
+ */
+static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
+	     unsigned long offset, size_t size, enum dma_data_direction dir,
+	     unsigned long attrs)
+{
+	if (!is_device_dma_coherent(dev) &&
+	      !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+
+	return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
+}
+
+/**
+ * arm_iommu_unmap_page
+ * @dev: valid struct device pointer
+ * @handle: DMA address of buffer
+ * @size: size of buffer (same as passed to dma_map_page)
+ * @dir: DMA transfer direction (same as passed to dma_map_page)
+ *
+ * IOMMU aware version of arm_dma_unmap_page()
+ */
+static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
+		size_t size, enum dma_data_direction dir,
+		unsigned long attrs)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(
+						mapping->domain, iova));
+	int offset = handle & ~PAGE_MASK;
+	int len = PAGE_ALIGN(size + offset);
+
+	if (!iova)
+		return;
+
+	if (!(is_device_dma_coherent(dev) ||
+	      (attrs & DMA_ATTR_SKIP_CPU_SYNC)))
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+
+	iommu_unmap(mapping->domain, iova, len);
+	__free_iova(mapping, iova, len);
+}
+
+static void arm_iommu_sync_single_for_cpu(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(
+						mapping->domain, iova));
+	unsigned int offset = handle & ~PAGE_MASK;
+
+	if (!iova)
+		return;
+
+	if (!is_device_dma_coherent(dev))
+		__dma_page_dev_to_cpu(page, offset, size, dir);
+}
+
+static void arm_iommu_sync_single_for_device(struct device *dev,
+		dma_addr_t handle, size_t size, enum dma_data_direction dir)
+{
+	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
+	dma_addr_t iova = handle & PAGE_MASK;
+	struct page *page = phys_to_page(iommu_iova_to_phys(
+						mapping->domain, iova));
+	unsigned int offset = handle & ~PAGE_MASK;
+
+	if (!iova)
+		return;
+
+	if (!is_device_dma_coherent(dev))
+		__dma_page_cpu_to_dev(page, offset, size, dir);
+}
+
+static int arm_iommu_mapping_error(struct device *dev,
+				   dma_addr_t dma_addr)
+{
+	return dma_addr == DMA_ERROR_CODE;
+}
+
+const struct dma_map_ops iommu_ops = {
+	.alloc		= arm_iommu_alloc_attrs,
+	.free		= arm_iommu_free_attrs,
+	.mmap		= arm_iommu_mmap_attrs,
+	.get_sgtable	= arm_iommu_get_sgtable,
+
+	.map_page		= arm_iommu_map_page,
+	.unmap_page		= arm_iommu_unmap_page,
+	.sync_single_for_cpu	= arm_iommu_sync_single_for_cpu,
+	.sync_single_for_device	= arm_iommu_sync_single_for_device,
+
+	.map_sg			= arm_iommu_map_sg,
+	.unmap_sg		= arm_iommu_unmap_sg,
+	.sync_sg_for_cpu	= arm_iommu_sync_sg_for_cpu,
+	.sync_sg_for_device	= arm_iommu_sync_sg_for_device,
+
+	.set_dma_mask		= arm_dma_set_mask,
+	.mapping_error		= arm_iommu_mapping_error,
+};
+
+/**
+ * arm_iommu_create_mapping
+ * @bus: pointer to the bus holding the client device (for IOMMU calls)
+ * @base: start address of the valid IO address space
+ * @size: maximum size of the valid IO address space
+ *
+ * Creates a mapping structure which holds information about used/unused
+ * IO address ranges, which is required to perform memory allocation and
+ * mapping with IOMMU aware functions.
+ *
+ * The client device need to be attached to the mapping with
+ * arm_iommu_attach_device function.
+ */
+struct dma_iommu_mapping *
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+{
+	unsigned int bits = size >> PAGE_SHIFT;
+	unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
+	struct dma_iommu_mapping *mapping;
+	int err = -ENOMEM;
+
+	if (!bitmap_size)
+		return ERR_PTR(-EINVAL);
+
+	mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
+	if (!mapping)
+		goto err;
+
+	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!mapping->bitmap)
+		goto err2;
+
+	mapping->base = base;
+	mapping->bits = bits;
+	spin_lock_init(&mapping->lock);
+
+	mapping->domain = iommu_domain_alloc(bus);
+	if (!mapping->domain)
+		goto err3;
+
+	kref_init(&mapping->kref);
+	return mapping;
+err3:
+	kfree(mapping->bitmap);
+err2:
+	kfree(mapping);
+err:
+	return ERR_PTR(err);
+}
+EXPORT_SYMBOL(arm_iommu_create_mapping);
+
+static void release_iommu_mapping(struct kref *kref)
+{
+	struct dma_iommu_mapping *mapping =
+		container_of(kref, struct dma_iommu_mapping, kref);
+
+	iommu_domain_free(mapping->domain);
+	kfree(mapping->bitmap);
+	kfree(mapping);
+}
+
+void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
+{
+	if (mapping)
+		kref_put(&mapping->kref, release_iommu_mapping);
+}
+EXPORT_SYMBOL(arm_iommu_release_mapping);
+
+/**
+ * arm_iommu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *	arm_iommu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int arm_iommu_attach_device(struct device *dev,
+			    struct dma_iommu_mapping *mapping)
+{
+	int err;
+	int s1_bypass = 0, is_fast = 0;
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+	if (is_fast)
+		return fast_smmu_attach_device(dev, mapping);
+
+	err = iommu_attach_device(mapping->domain, dev);
+	if (err)
+		return err;
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
+
+	kref_get(&mapping->kref);
+	dev->archdata.mapping = mapping;
+	if (!s1_bypass)
+		set_dma_ops(dev, &iommu_ops);
+
+	pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+	return 0;
+}
+EXPORT_SYMBOL(arm_iommu_attach_device);
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+	struct dma_iommu_mapping *mapping;
+	int is_fast, s1_bypass = 0;
+
+	mapping = to_dma_iommu_mapping(dev);
+	if (!mapping) {
+		dev_warn(dev, "Not attached\n");
+		return;
+	}
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_FAST, &is_fast);
+	if (is_fast) {
+		fast_smmu_detach_device(dev, mapping);
+		return;
+	}
+
+	iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_S1_BYPASS,
+					&s1_bypass);
+
+	if (msm_dma_unmap_all_for_dev(dev))
+		dev_warn(dev, "IOMMU detach with outstanding mappings\n");
+
+	iommu_detach_device(mapping->domain, dev);
+	kref_put(&mapping->kref, release_iommu_mapping);
+	dev->archdata.mapping = NULL;
+	if (!s1_bypass)
+		set_dma_ops(dev, NULL);
+
+	pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
+}
+EXPORT_SYMBOL(arm_iommu_detach_device);
+
+#endif
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0f87883..163c5f2 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,6 +40,7 @@
 #include <asm/system_misc.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
+#include <asm/kryo3xx-arm64-edac.h>
 
 static const char *fault_name(unsigned int esr);
 
@@ -269,13 +270,19 @@
 	return fault;
 }
 
-static inline bool is_permission_fault(unsigned int esr)
+static inline bool is_permission_fault(unsigned int esr, struct pt_regs *regs)
 {
 	unsigned int ec       = ESR_ELx_EC(esr);
 	unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
 
-	return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM) ||
-	       (ec == ESR_ELx_EC_IABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
+	if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
+		return false;
+
+	if (system_uses_ttbr0_pan())
+		return fsc_type == ESR_ELx_FSC_FAULT &&
+			(regs->pstate & PSR_PAN_BIT);
+	else
+		return fsc_type == ESR_ELx_FSC_PERM;
 }
 
 static bool is_el0_instruction_abort(unsigned int esr)
@@ -315,7 +322,7 @@
 		mm_flags |= FAULT_FLAG_WRITE;
 	}
 
-	if (is_permission_fault(esr) && (addr < USER_DS)) {
+	if (addr < USER_DS && is_permission_fault(esr, regs)) {
 		/* regs->orig_addr_limit may be 0 if we entered from EL0 */
 		if (regs->orig_addr_limit == KERNEL_DS)
 			die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
@@ -478,6 +485,7 @@
  */
 static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 {
+	kryo3xx_poll_cache_errors(NULL);
 	return 1;
 }
 
@@ -507,10 +515,10 @@
 	{ do_bad,		SIGBUS,  0,		"unknown 17"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 18"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 19"			},
-	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
-	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
-	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
-	{ do_bad,		SIGBUS,  0,		"synchronous abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
+	{ do_bad,		SIGBUS,  0,		"synchronous external abort (translation table walk)" },
 	{ do_bad,		SIGBUS,  0,		"synchronous parity error"	},
 	{ do_bad,		SIGBUS,  0,		"unknown 25"			},
 	{ do_bad,		SIGBUS,  0,		"unknown 26"			},
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 212c4d1..af38d02 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -145,9 +145,11 @@
 #endif /* CONFIG_NUMA */
 
 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
+#define PFN_MASK ((1UL << (64 - PAGE_SHIFT)) - 1)
+
 int pfn_valid(unsigned long pfn)
 {
-	return memblock_is_map_memory(pfn << PAGE_SHIFT);
+	return (pfn & PFN_MASK) == pfn && memblock_is_map_memory(pfn << PAGE_SHIFT);
 }
 EXPORT_SYMBOL(pfn_valid);
 #endif
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 352c73b..c2adb0c 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -136,11 +136,7 @@
 	bfi	x0, x1, #48, #16		// set the ASID
 	msr	ttbr0_el1, x0			// set TTBR0
 	isb
-alternative_if ARM64_WORKAROUND_CAVIUM_27456
-	ic	iallu
-	dsb	nsh
-	isb
-alternative_else_nop_endif
+	post_ttbr0_update_workaround
 	ret
 ENDPROC(cpu_do_switch_mm)
 
diff --git a/arch/arm64/xen/hypercall.S b/arch/arm64/xen/hypercall.S
index 329c802..a23b2e8 100644
--- a/arch/arm64/xen/hypercall.S
+++ b/arch/arm64/xen/hypercall.S
@@ -49,6 +49,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/uaccess.h>
 #include <xen/interface/xen.h>
 
 
@@ -91,6 +92,24 @@
 	mov x2, x3
 	mov x3, x4
 	mov x4, x5
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Privcmd calls are issued by the userspace. The kernel needs to
+	 * enable access to TTBR0_EL1 as the hypervisor would issue stage 1
+	 * translations to user memory via AT instructions. Since AT
+	 * instructions are not affected by the PAN bit (ARMv8.1), we only
+	 * need the explicit uaccess_enable/disable if the TTBR0 PAN emulation
+	 * is enabled (it implies that hardware UAO and PAN disabled).
+	 */
+	uaccess_enable_not_uao x6, x7
+#endif
 	hvc XEN_IMM
+
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+	/*
+	 * Disable userspace access from kernel once the hyp call completed.
+	 */
+	uaccess_disable_not_uao x6
+#endif
 	ret
 ENDPROC(privcmd_call);
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index c5d1785..02bab09 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -1,13 +1,6 @@
 #ifndef _ASM_X86_IDLE_H
 #define _ASM_X86_IDLE_H
 
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
 #ifdef CONFIG_X86_64
 void enter_idle(void);
 void exit_idle(void);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 0888a87..76629f4 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -67,19 +67,6 @@
 
 #ifdef CONFIG_X86_64
 static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
-	atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
-	atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 #endif
 
 /*
@@ -255,14 +242,14 @@
 void enter_idle(void)
 {
 	this_cpu_write(is_idle, 1);
-	atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+	idle_notifier_call_chain(IDLE_START);
 }
 
 static void __exit_idle(void)
 {
 	if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
 		return;
-	atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+	idle_notifier_call_chain(IDLE_END);
 }
 
 /* Called from interrupts to signify idle end */
diff --git a/block/blk-core.c b/block/blk-core.c
index 14d7c07..df9e160 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -40,6 +40,8 @@
 #include "blk.h"
 #include "blk-mq.h"
 
+#include <linux/math64.h>
+
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
@@ -3547,3 +3549,85 @@
 
 	return 0;
 }
+
+/*
+ * Blk IO latency support. We want this to be as cheap as possible, so doing
+ * this lockless (and avoiding atomics), a few off by a few errors in this
+ * code is not harmful, and we don't want to do anything that is
+ * perf-impactful.
+ * TODO : If necessary, we can make the histograms per-cpu and aggregate
+ * them when printing them out.
+ */
+void
+blk_zero_latency_hist(struct io_latency_state *s)
+{
+	memset(s->latency_y_axis_read, 0,
+	       sizeof(s->latency_y_axis_read));
+	memset(s->latency_y_axis_write, 0,
+	       sizeof(s->latency_y_axis_write));
+	s->latency_reads_elems = 0;
+	s->latency_writes_elems = 0;
+}
+EXPORT_SYMBOL(blk_zero_latency_hist);
+
+ssize_t
+blk_latency_hist_show(struct io_latency_state *s, char *buf)
+{
+	int i;
+	int bytes_written = 0;
+	u_int64_t num_elem, elem;
+	int pct;
+
+	num_elem = s->latency_reads_elems;
+	if (num_elem > 0) {
+		bytes_written += scnprintf(buf + bytes_written,
+			   PAGE_SIZE - bytes_written,
+			   "IO svc_time Read Latency Histogram (n = %llu):\n",
+			   num_elem);
+		for (i = 0;
+		     i < ARRAY_SIZE(latency_x_axis_us);
+		     i++) {
+			elem = s->latency_y_axis_read[i];
+			pct = div64_u64(elem * 100, num_elem);
+			bytes_written += scnprintf(buf + bytes_written,
+						   PAGE_SIZE - bytes_written,
+						   "\t< %5lluus%15llu%15d%%\n",
+						   latency_x_axis_us[i],
+						   elem, pct);
+		}
+		/* Last element in y-axis table is overflow */
+		elem = s->latency_y_axis_read[i];
+		pct = div64_u64(elem * 100, num_elem);
+		bytes_written += scnprintf(buf + bytes_written,
+					   PAGE_SIZE - bytes_written,
+					   "\t> %5dms%15llu%15d%%\n", 10,
+					   elem, pct);
+	}
+	num_elem = s->latency_writes_elems;
+	if (num_elem > 0) {
+		bytes_written += scnprintf(buf + bytes_written,
+			   PAGE_SIZE - bytes_written,
+			   "IO svc_time Write Latency Histogram (n = %llu):\n",
+			   num_elem);
+		for (i = 0;
+		     i < ARRAY_SIZE(latency_x_axis_us);
+		     i++) {
+			elem = s->latency_y_axis_write[i];
+			pct = div64_u64(elem * 100, num_elem);
+			bytes_written += scnprintf(buf + bytes_written,
+						   PAGE_SIZE - bytes_written,
+						   "\t< %5lluus%15llu%15d%%\n",
+						   latency_x_axis_us[i],
+						   elem, pct);
+		}
+		/* Last element in y-axis table is overflow */
+		elem = s->latency_y_axis_write[i];
+		pct = div64_u64(elem * 100, num_elem);
+		bytes_written += scnprintf(buf + bytes_written,
+					   PAGE_SIZE - bytes_written,
+					   "\t> %5dms%15llu%15d%%\n", 10,
+					   elem, pct);
+	}
+	return bytes_written;
+}
+EXPORT_SYMBOL(blk_latency_hist_show);
diff --git a/block/genhd.c b/block/genhd.c
index fcd6d4f..c6eb25d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1146,6 +1146,22 @@
 		blk_put_queue(disk->queue);
 	kfree(disk);
 }
+
+static int disk_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct gendisk *disk = dev_to_disk(dev);
+	struct disk_part_iter piter;
+	struct hd_struct *part;
+	int cnt = 0;
+
+	disk_part_iter_init(&piter, disk, 0);
+	while((part = disk_part_iter_next(&piter)))
+		cnt++;
+	disk_part_iter_exit(&piter);
+	add_uevent_var(env, "NPARTS=%u", cnt);
+	return 0;
+}
+
 struct class block_class = {
 	.name		= "block",
 };
@@ -1165,6 +1181,7 @@
 	.groups		= disk_attr_groups,
 	.release	= disk_release,
 	.devnode	= block_devnode,
+	.uevent		= disk_uevent,
 };
 
 #ifdef CONFIG_PROC_FS
diff --git a/drivers/Kconfig b/drivers/Kconfig
index e1e2066..cc11302 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -112,6 +112,8 @@
 
 source "drivers/rtc/Kconfig"
 
+source "drivers/esoc/Kconfig"
+
 source "drivers/dma/Kconfig"
 
 source "drivers/dma-buf/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 194d20b..cf40194 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -172,4 +172,5 @@
 obj-$(CONFIG_STM)		+= hwtracing/stm/
 obj-$(CONFIG_ANDROID)		+= android/
 obj-$(CONFIG_NVMEM)		+= nvmem/
+obj-$(CONFIG_ESOC)              += esoc/
 obj-$(CONFIG_FPGA)		+= fpga/
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 2609ba2..b373fe9 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -9,7 +9,7 @@
 obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
 obj-y			+= power/
 obj-$(CONFIG_HAS_DMA)	+= dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o dma-removed.o
 obj-$(CONFIG_ISA_BUS_API)	+= isa.o
 obj-$(CONFIG_FW_LOADER)	+= firmware_class.o
 obj-$(CONFIG_NUMA)	+= node.o
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 8f8b68c..aa5e22c 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -311,7 +311,12 @@
 	void *ptr;
 	unsigned long pfn;
 
-	pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
+	pages = kmalloc(sizeof(struct page *) << get_order(size),
+			GFP_KERNEL | __GFP_NOWARN);
+
+	if (!pages)
+		pages = vmalloc(sizeof(struct page *) << get_order(size));
+
 	if (!pages)
 		return NULL;
 
@@ -320,7 +325,7 @@
 
 	ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
 
-	kfree(pages);
+	kvfree(pages);
 
 	return ptr;
 }
@@ -328,12 +333,14 @@
 /*
  * unmaps a range previously mapped by dma_common_*_remap
  */
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
+			   bool no_warn)
 {
 	struct vm_struct *area = find_vm_area(cpu_addr);
 
 	if (!area || (area->flags & vm_flags) != vm_flags) {
-		WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+		WARN(!no_warn, "trying to free invalid coherent area: %p\n",
+			cpu_addr);
 		return;
 	}
 
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
new file mode 100644
index 0000000..4281801
--- /dev/null
+++ b/drivers/base/dma-removed.c
@@ -0,0 +1,444 @@
+/*
+ *
+ *  Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *  Copyright (C) 2000-2004 Russell King
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/io.h>
+#include <linux/vmalloc.h>
+#include <linux/sizes.h>
+#include <linux/spinlock.h>
+#include <asm/dma-contiguous.h>
+#include <asm/tlbflush.h>
+
+struct removed_region {
+	phys_addr_t	base;
+	int		nr_pages;
+	unsigned long	*bitmap;
+	int		fixup;
+	struct mutex	lock;
+};
+
+#define NO_KERNEL_MAPPING_DUMMY	0x2222
+
+static int dma_init_removed_memory(phys_addr_t phys_addr, size_t size,
+				struct removed_region **mem)
+{
+	struct removed_region *dma_mem = NULL;
+	int pages = size >> PAGE_SHIFT;
+	int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
+
+	dma_mem = kzalloc(sizeof(struct removed_region), GFP_KERNEL);
+	if (!dma_mem)
+		goto out;
+	dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	if (!dma_mem->bitmap)
+		goto free1_out;
+
+	dma_mem->base = phys_addr;
+	dma_mem->nr_pages = pages;
+	mutex_init(&dma_mem->lock);
+
+	*mem = dma_mem;
+
+	return 0;
+
+free1_out:
+	kfree(dma_mem);
+out:
+	return -ENOMEM;
+}
+
+static int dma_assign_removed_region(struct device *dev,
+					struct removed_region *mem)
+{
+	if (dev->removed_mem)
+		return -EBUSY;
+
+	dev->removed_mem = mem;
+	return 0;
+}
+
+static void adapt_iomem_resource(unsigned long base_pfn, unsigned long end_pfn)
+{
+	struct resource *res, *conflict;
+	resource_size_t cstart, cend;
+
+	res = kzalloc(sizeof(*res), GFP_KERNEL);
+	if (!res)
+		return;
+
+	res->name  = "System RAM";
+	res->start = __pfn_to_phys(base_pfn);
+	res->end = __pfn_to_phys(end_pfn) - 1;
+	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+
+	conflict = request_resource_conflict(&iomem_resource, res);
+	if (!conflict) {
+		pr_err("Removed memory: no conflict resource found\n");
+		kfree(res);
+		goto done;
+	}
+
+	cstart = conflict->start;
+	cend = conflict->end;
+	if ((cstart == res->start) && (cend == res->end)) {
+		release_resource(conflict);
+	} else if ((res->start >= cstart) && (res->start <= cend)) {
+		if (res->start == cstart) {
+			adjust_resource(conflict, res->end + 1,
+					cend - res->end);
+		} else if (res->end == cend) {
+			adjust_resource(conflict, cstart,
+					res->start - cstart);
+		} else {
+			adjust_resource(conflict, cstart,
+					res->start - cstart);
+			res->start = res->end + 1;
+			res->end = cend;
+			request_resource(&iomem_resource, res);
+			goto done;
+		}
+	} else {
+		pr_err("Removed memory: incorrect resource conflict start=%llx end=%llx\n",
+				(unsigned long long) conflict->start,
+				(unsigned long long) conflict->end);
+	}
+
+	kfree(res);
+done:
+	return;
+}
+
+#ifdef CONFIG_FLATMEM
+static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+	struct page *start_pg, *end_pg;
+	unsigned long pg, pgend;
+
+	start_pfn = ALIGN(start_pfn, pageblock_nr_pages);
+	end_pfn = round_down(end_pfn, pageblock_nr_pages);
+	/*
+	 * Convert start_pfn/end_pfn to a struct page pointer.
+	 */
+	start_pg = pfn_to_page(start_pfn - 1) + 1;
+	end_pg = pfn_to_page(end_pfn - 1) + 1;
+
+	/*
+	 * Convert to physical addresses, and round start upwards and end
+	 * downwards.
+	 */
+	pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
+	pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
+
+	/*
+	 * If there are free pages between these, free the section of the
+	 * memmap array.
+	 */
+	if (pg < pgend)
+		free_bootmem_late(pg, pgend - pg);
+}
+#else
+static void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
+{
+}
+#endif
+
+static int _clear_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+			    void *data)
+{
+	pte_clear(&init_mm, addr, pte);
+	return 0;
+}
+
+static void clear_mapping(unsigned long addr, unsigned long size)
+{
+	apply_to_page_range(&init_mm, addr, size, _clear_pte, NULL);
+	/* ensure ptes are updated */
+	mb();
+	flush_tlb_kernel_range(addr, addr + size);
+}
+
+static void removed_region_fixup(struct removed_region *dma_mem, int index)
+{
+	unsigned long fixup_size;
+	unsigned long base_pfn;
+	unsigned long flags;
+
+	if (index > dma_mem->nr_pages)
+		return;
+
+	/* carve-out */
+	flags = memblock_region_resize_late_begin();
+	memblock_free(dma_mem->base, dma_mem->nr_pages * PAGE_SIZE);
+	memblock_remove(dma_mem->base, index * PAGE_SIZE);
+	memblock_region_resize_late_end(flags);
+
+	/* clear page-mappings */
+	base_pfn = dma_mem->base >> PAGE_SHIFT;
+	if (!PageHighMem(pfn_to_page(base_pfn))) {
+		clear_mapping((unsigned long) phys_to_virt(dma_mem->base),
+				index * PAGE_SIZE);
+	}
+
+	/* free page objects */
+	free_memmap(base_pfn, base_pfn + index);
+
+	/* return remaining area to system */
+	fixup_size = (dma_mem->nr_pages - index) * PAGE_SIZE;
+	free_bootmem_late(dma_mem->base + index * PAGE_SIZE, fixup_size);
+
+	/*
+	 * release freed resource region so as to show up under iomem resource
+	 * list
+	 */
+	adapt_iomem_resource(base_pfn, base_pfn + index);
+
+	/* limit the fixup region */
+	dma_mem->nr_pages = index;
+}
+
+void *removed_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+		    gfp_t gfp, unsigned long attrs)
+{
+	bool no_kernel_mapping = attrs & DMA_ATTR_NO_KERNEL_MAPPING;
+	bool skip_zeroing = attrs & DMA_ATTR_SKIP_ZEROING;
+	int pageno;
+	unsigned long order;
+	void *addr = NULL;
+	struct removed_region *dma_mem = dev->removed_mem;
+	int nbits;
+	unsigned int align;
+
+	if (!gfpflags_allow_blocking(gfp))
+		return NULL;
+
+	size = PAGE_ALIGN(size);
+	nbits = size >> PAGE_SHIFT;
+	order = get_order(size);
+
+	if (order > get_order(SZ_1M))
+		order = get_order(SZ_1M);
+
+	align = (1 << order) - 1;
+
+
+	mutex_lock(&dma_mem->lock);
+	pageno = bitmap_find_next_zero_area(dma_mem->bitmap, dma_mem->nr_pages,
+						0, nbits, align);
+
+	if (pageno < dma_mem->nr_pages) {
+		phys_addr_t base = dma_mem->base + pageno * PAGE_SIZE;
+		*handle = base;
+
+		bitmap_set(dma_mem->bitmap, pageno, nbits);
+
+		if (dma_mem->fixup) {
+			removed_region_fixup(dma_mem, pageno + nbits);
+			dma_mem->fixup = 0;
+		}
+
+		if (no_kernel_mapping && skip_zeroing) {
+			addr = (void *)NO_KERNEL_MAPPING_DUMMY;
+			goto out;
+		}
+
+		addr = ioremap(base, size);
+		if (WARN_ON(!addr)) {
+			bitmap_clear(dma_mem->bitmap, pageno, nbits);
+		} else {
+			if (!skip_zeroing)
+				memset_io(addr, 0, size);
+			if (no_kernel_mapping) {
+				iounmap(addr);
+				addr = (void *)NO_KERNEL_MAPPING_DUMMY;
+			}
+			*handle = base;
+		}
+	}
+
+out:
+	mutex_unlock(&dma_mem->lock);
+	return addr;
+}
+
+
+int removed_mmap(struct device *dev, struct vm_area_struct *vma,
+		 void *cpu_addr, dma_addr_t dma_addr, size_t size,
+		 unsigned long attrs)
+{
+	return -ENXIO;
+}
+
+void removed_free(struct device *dev, size_t size, void *cpu_addr,
+		  dma_addr_t handle, unsigned long attrs)
+{
+	bool no_kernel_mapping = attrs & DMA_ATTR_NO_KERNEL_MAPPING;
+	struct removed_region *dma_mem = dev->removed_mem;
+
+	if (!no_kernel_mapping)
+		iounmap(cpu_addr);
+	mutex_lock(&dma_mem->lock);
+	bitmap_clear(dma_mem->bitmap, (handle - dma_mem->base) >> PAGE_SHIFT,
+				size >> PAGE_SHIFT);
+	mutex_unlock(&dma_mem->lock);
+}
+
+static dma_addr_t removed_map_page(struct device *dev, struct page *page,
+			unsigned long offset, size_t size,
+			enum dma_data_direction dir,
+			unsigned long attrs)
+{
+	return ~(dma_addr_t)0;
+}
+
+static void removed_unmap_page(struct device *dev, dma_addr_t dma_handle,
+		size_t size, enum dma_data_direction dir,
+		unsigned long attrs)
+{
+}
+
+static int removed_map_sg(struct device *dev, struct scatterlist *sg,
+			int nents, enum dma_data_direction dir,
+			unsigned long attrs)
+{
+	return 0;
+}
+
+static void removed_unmap_sg(struct device *dev,
+			struct scatterlist *sg, int nents,
+			enum dma_data_direction dir,
+			unsigned long attrs)
+{
+}
+
+static void removed_sync_single_for_cpu(struct device *dev,
+			dma_addr_t dma_handle, size_t size,
+			enum dma_data_direction dir)
+{
+}
+
+void removed_sync_single_for_device(struct device *dev,
+			dma_addr_t dma_handle, size_t size,
+			enum dma_data_direction dir)
+{
+}
+
+void removed_sync_sg_for_cpu(struct device *dev,
+			struct scatterlist *sg, int nents,
+			enum dma_data_direction dir)
+{
+}
+
+void removed_sync_sg_for_device(struct device *dev,
+			struct scatterlist *sg, int nents,
+			enum dma_data_direction dir)
+{
+}
+
+void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
+			size_t size, unsigned long attrs)
+{
+	return ioremap(handle, size);
+}
+
+void removed_unremap(struct device *dev, void *remapped_address, size_t size)
+{
+	iounmap(remapped_address);
+}
+
+const struct dma_map_ops removed_dma_ops = {
+	.alloc			= removed_alloc,
+	.free			= removed_free,
+	.mmap			= removed_mmap,
+	.map_page		= removed_map_page,
+	.unmap_page		= removed_unmap_page,
+	.map_sg			= removed_map_sg,
+	.unmap_sg		= removed_unmap_sg,
+	.sync_single_for_cpu	= removed_sync_single_for_cpu,
+	.sync_single_for_device	= removed_sync_single_for_device,
+	.sync_sg_for_cpu	= removed_sync_sg_for_cpu,
+	.sync_sg_for_device	= removed_sync_sg_for_device,
+	.remap			= removed_remap,
+	.unremap		= removed_unremap,
+};
+EXPORT_SYMBOL(removed_dma_ops);
+
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+	struct removed_region *mem = rmem->priv;
+
+	if (!mem && dma_init_removed_memory(rmem->base, rmem->size, &mem)) {
+		pr_info("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+			&rmem->base, (unsigned long)rmem->size / SZ_1M);
+		return -EINVAL;
+	}
+	mem->fixup = rmem->fixup;
+	set_dma_ops(dev, &removed_dma_ops);
+	rmem->priv = mem;
+	dma_assign_removed_region(dev, mem);
+	return 0;
+}
+
+static void rmem_dma_device_release(struct reserved_mem *rmem,
+					struct device *dev)
+{
+	dev->dma_mem = NULL;
+}
+
+static const struct reserved_mem_ops removed_mem_ops = {
+	.device_init    = rmem_dma_device_init,
+	.device_release = rmem_dma_device_release,
+};
+
+static int __init removed_dma_setup(struct reserved_mem *rmem)
+{
+	unsigned long node = rmem->fdt_node;
+	int nomap, fixup;
+
+	nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL;
+	fixup = of_get_flat_dt_prop(node, "no-map-fixup", NULL) != NULL;
+
+	if (nomap && fixup) {
+		pr_err("Removed memory: nomap & nomap-fixup can't co-exist\n");
+		return -EINVAL;
+	}
+
+	rmem->fixup = fixup;
+	if (rmem->fixup) {
+		/* Architecture specific contiguous memory fixup only for
+		 * no-map-fixup to split mappings
+		 */
+		dma_contiguous_early_fixup(rmem->base, rmem->size);
+	}
+
+	rmem->ops = &removed_mem_ops;
+	pr_info("Removed memory: created DMA memory pool at %pa, size %ld MiB\n",
+		&rmem->base, (unsigned long)rmem->size / SZ_1M);
+	return 0;
+}
+RESERVEDMEM_OF_DECLARE(dma, "removed-dma-pool", removed_dma_setup);
+#endif
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 2932a5b..4f99101 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -33,6 +33,7 @@
 #include <linux/cpufreq.h>
 #include <linux/cpuidle.h>
 #include <linux/timer.h>
+#include <linux/wakeup_reason.h>
 
 #include "../base.h"
 #include "power.h"
@@ -1353,6 +1354,7 @@
 	pm_callback_t callback = NULL;
 	char *info = NULL;
 	int error = 0;
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
 	DECLARE_DPM_WATCHDOG_ON_STACK(wd);
 
 	TRACE_DEVICE(dev);
@@ -1373,6 +1375,9 @@
 		pm_wakeup_event(dev, 0);
 
 	if (pm_wakeup_pending()) {
+		pm_get_active_wakeup_sources(suspend_abort,
+			MAX_SUSPEND_ABORT_LEN);
+		log_suspend_abort_reason(suspend_abort);
 		async_error = -EBUSY;
 		goto Complete;
 	}
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 7f3646e..b7ee756 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -147,7 +147,7 @@
 	switch(req->type) {
 	case DEV_PM_QOS_RESUME_LATENCY:
 		ret = pm_qos_update_target(&qos->resume_latency,
-					   &req->data.pnode, action, value);
+					   &req->data.lat, action, value);
 		if (ret) {
 			value = pm_qos_read_value(&qos->resume_latency);
 			blocking_notifier_call_chain(&dev_pm_notifiers,
@@ -157,7 +157,7 @@
 		break;
 	case DEV_PM_QOS_LATENCY_TOLERANCE:
 		ret = pm_qos_update_target(&qos->latency_tolerance,
-					   &req->data.pnode, action, value);
+					   &req->data.lat, action, value);
 		if (ret) {
 			value = pm_qos_read_value(&qos->latency_tolerance);
 			req->dev->power.set_latency_tolerance(req->dev, value);
@@ -258,7 +258,7 @@
 
 	/* Flush the constraints lists for the device. */
 	c = &qos->resume_latency;
-	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+	plist_for_each_entry_safe(req, tmp, &c->list, data.lat.node) {
 		/*
 		 * Update constraints list and call the notification
 		 * callbacks if needed
@@ -267,7 +267,7 @@
 		memset(req, 0, sizeof(*req));
 	}
 	c = &qos->latency_tolerance;
-	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
+	plist_for_each_entry_safe(req, tmp, &c->list, data.lat.node) {
 		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
 		memset(req, 0, sizeof(*req));
 	}
@@ -382,7 +382,7 @@
 	switch(req->type) {
 	case DEV_PM_QOS_RESUME_LATENCY:
 	case DEV_PM_QOS_LATENCY_TOLERANCE:
-		curr_value = req->data.pnode.prio;
+		curr_value = req->data.lat.node.prio;
 		break;
 	case DEV_PM_QOS_FLAGS:
 		curr_value = req->data.flr.flags;
@@ -835,7 +835,7 @@
 	ret = IS_ERR_OR_NULL(dev->power.qos)
 		|| !dev->power.qos->latency_tolerance_req ?
 			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
-			dev->power.qos->latency_tolerance_req->data.pnode.prio;
+			dev->power.qos->latency_tolerance_req->data.lat.node.prio;
 	mutex_unlock(&dev_pm_qos_mtx);
 	return ret;
 }
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 62e4de2..16d307b 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -15,6 +15,7 @@
 #include <linux/seq_file.h>
 #include <linux/debugfs.h>
 #include <linux/pm_wakeirq.h>
+#include <linux/types.h>
 #include <trace/events/power.h>
 
 #include "power.h"
@@ -802,6 +803,37 @@
 }
 EXPORT_SYMBOL_GPL(pm_wakeup_event);
 
+void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
+{
+	struct wakeup_source *ws, *last_active_ws = NULL;
+	int len = 0;
+	bool active = false;
+
+	rcu_read_lock();
+	list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+		if (ws->active && len < max) {
+			if (!active)
+				len += scnprintf(pending_wakeup_source, max,
+						"Pending Wakeup Sources: ");
+			len += scnprintf(pending_wakeup_source + len, max - len,
+				"%s ", ws->name);
+			active = true;
+		} else if (!active &&
+			   (!last_active_ws ||
+			    ktime_to_ns(ws->last_time) >
+			    ktime_to_ns(last_active_ws->last_time))) {
+			last_active_ws = ws;
+		}
+	}
+	if (!active && last_active_ws) {
+		scnprintf(pending_wakeup_source, max,
+				"Last active Wakeup Source: %s",
+				last_active_ws->name);
+	}
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
+
 void pm_print_active_wakeup_sources(void)
 {
 	struct wakeup_source *ws;
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 8d98a32..96c34a9 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -11,6 +11,7 @@
 #include <linux/module.h>
 #include <linux/suspend.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 
 static LIST_HEAD(syscore_ops_list);
 static DEFINE_MUTEX(syscore_ops_lock);
@@ -75,6 +76,8 @@
 	return 0;
 
  err_out:
+	log_suspend_abort_reason("System core suspend callback %pF failed",
+		ops->suspend);
 	pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend);
 
 	list_for_each_entry_continue(ops, &syscore_ops_list, node)
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 00269de..5e40ca6 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -219,7 +219,7 @@
 			unsigned long flags)
 {
 	struct clk_hw *hw;
-	struct clk_init_data init;
+	struct clk_init_data init = {};
 	struct clk_composite *composite;
 	struct clk_ops *clk_composite_ops;
 	int ret;
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 96386ff..3d6754e 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -434,7 +434,7 @@
 {
 	struct clk_divider *div;
 	struct clk_hw *hw;
-	struct clk_init_data init;
+	struct clk_init_data init = {};
 	int ret;
 
 	if (clk_divider_flags & CLK_DIVIDER_HIWORD_MASK) {
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
index a5d402d..fda42ab 100644
--- a/drivers/clk/clk-fixed-factor.c
+++ b/drivers/clk/clk-fixed-factor.c
@@ -74,7 +74,7 @@
 		unsigned int mult, unsigned int div)
 {
 	struct clk_fixed_factor *fix;
-	struct clk_init_data init;
+	struct clk_init_data init = {};
 	struct clk_hw *hw;
 	int ret;
 
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index b5c46b3..9933e1c 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -61,7 +61,7 @@
 {
 	struct clk_fixed_rate *fixed;
 	struct clk_hw *hw;
-	struct clk_init_data init;
+	struct clk_init_data init = {};
 	int ret;
 
 	/* allocate fixed-rate clock */
diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
index aab9046..c0a25f5 100644
--- a/drivers/clk/clk-fractional-divider.c
+++ b/drivers/clk/clk-fractional-divider.c
@@ -122,7 +122,7 @@
 		u8 clk_divider_flags, spinlock_t *lock)
 {
 	struct clk_fractional_divider *fd;
-	struct clk_init_data init;
+	struct clk_init_data init = {};
 	struct clk_hw *hw;
 	int ret;
 
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index 4e691e3..146e226 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -127,7 +127,7 @@
 {
 	struct clk_gate *gate;
 	struct clk_hw *hw;
-	struct clk_init_data init;
+	struct clk_init_data init = {};
 	int ret;
 
 	if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index 16a3d57..b77a742 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -121,7 +121,7 @@
 {
 	struct clk_mux *mux;
 	struct clk_hw *hw;
-	struct clk_init_data init;
+	struct clk_init_data init = {};
 	u8 width = 0;
 	int ret;
 
diff --git a/drivers/clk/clk-pwm.c b/drivers/clk/clk-pwm.c
index 8cb9d11..94c44f7 100644
--- a/drivers/clk/clk-pwm.c
+++ b/drivers/clk/clk-pwm.c
@@ -56,7 +56,7 @@
 static int clk_pwm_probe(struct platform_device *pdev)
 {
 	struct device_node *node = pdev->dev.of_node;
-	struct clk_init_data init;
+	struct clk_init_data init = {};
 	struct clk_pwm *clk_pwm;
 	struct pwm_device *pwm;
 	struct pwm_args pargs;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 0fb39fe..98eef6fe 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
  * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -23,6 +24,7 @@
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/clkdev.h>
+#include <linux/regulator/consumer.h>
 
 #include "clk.h"
 
@@ -39,6 +41,13 @@
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+struct clk_handoff_vdd {
+	struct list_head list;
+	struct clk_vdd_class *vdd_class;
+};
+
+static LIST_HEAD(clk_handoff_vdd_list);
+
 /***    private data structures    ***/
 
 struct clk_core {
@@ -73,6 +82,9 @@
 	struct hlist_node	debug_node;
 #endif
 	struct kref		ref;
+	struct clk_vdd_class	*vdd_class;
+	unsigned long		*rate_max;
+	int			num_rate_max;
 };
 
 #define CREATE_TRACE_POINTS
@@ -464,6 +476,220 @@
 }
 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
 
+/*
+ *  Find the voltage level required for a given clock rate.
+ */
+static int clk_find_vdd_level(struct clk_core *clk, unsigned long rate)
+{
+	int level;
+
+	/*
+	 * For certain PLLs, due to the limitation in the bits allocated for
+	 * programming the fractional divider, the actual rate of the PLL will
+	 * be slightly higher than the requested rate (in the order of several
+	 * Hz). To accommodate this difference, convert the FMAX rate and the
+	 * clock frequency to KHz and use that for deriving the voltage level.
+	 */
+	for (level = 0; level < clk->num_rate_max; level++)
+		if (DIV_ROUND_CLOSEST(rate, 1000) <=
+				DIV_ROUND_CLOSEST(clk->rate_max[level], 1000))
+			break;
+
+	if (level == clk->num_rate_max) {
+		pr_err("Rate %lu for %s is greater than highest Fmax\n", rate,
+				clk->name);
+		return -EINVAL;
+	}
+
+	return level;
+}
+
+/*
+ * Update voltage level given the current votes.
+ */
+static int clk_update_vdd(struct clk_vdd_class *vdd_class)
+{
+	int level, rc = 0, i, ignore;
+	struct regulator **r = vdd_class->regulator;
+	int *uv = vdd_class->vdd_uv;
+	int n_reg = vdd_class->num_regulators;
+	int cur_lvl = vdd_class->cur_level;
+	int max_lvl = vdd_class->num_levels - 1;
+	int cur_base = cur_lvl * n_reg;
+	int new_base;
+
+	/* aggregate votes */
+	for (level = max_lvl; level > 0; level--)
+		if (vdd_class->level_votes[level])
+			break;
+
+	if (level == cur_lvl)
+		return 0;
+
+	max_lvl = max_lvl * n_reg;
+	new_base = level * n_reg;
+
+	for (i = 0; i < vdd_class->num_regulators; i++) {
+		pr_debug("Set Voltage level Min %d, Max %d\n", uv[new_base + i],
+				uv[max_lvl + i]);
+		rc = regulator_set_voltage(r[i], uv[new_base + i],
+				uv[max_lvl + i]);
+		if (rc)
+			goto set_voltage_fail;
+
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			rc = regulator_enable(r[i]);
+		else if (level == 0)
+			rc = regulator_disable(r[i]);
+		if (rc)
+			goto enable_disable_fail;
+	}
+
+	if (vdd_class->set_vdd && !vdd_class->num_regulators)
+		rc = vdd_class->set_vdd(vdd_class, level);
+
+	if (!rc)
+		vdd_class->cur_level = level;
+
+	return rc;
+
+enable_disable_fail:
+	regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]);
+
+set_voltage_fail:
+	for (i--; i >= 0; i--) {
+		regulator_set_voltage(r[i], uv[cur_base + i], uv[max_lvl + i]);
+		if (cur_lvl == 0 || cur_lvl == vdd_class->num_levels)
+			regulator_disable(r[i]);
+		else if (level == 0)
+			ignore = regulator_enable(r[i]);
+	}
+
+	return rc;
+}
+
+/*
+ *  Vote for a voltage level.
+ */
+static int clk_vote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc = 0;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+
+	vdd_class->level_votes[level]++;
+
+	rc = clk_update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]--;
+
+	mutex_unlock(&vdd_class->lock);
+
+	return rc;
+}
+
+/*
+ * Remove vote for a voltage level.
+ */
+static int clk_unvote_vdd_level(struct clk_vdd_class *vdd_class, int level)
+{
+	int rc = 0;
+
+	if (level >= vdd_class->num_levels)
+		return -EINVAL;
+
+	mutex_lock(&vdd_class->lock);
+
+	if (WARN(!vdd_class->level_votes[level],
+				"Reference counts are incorrect for %s level %d\n",
+				vdd_class->class_name, level))
+		goto out;
+
+	vdd_class->level_votes[level]--;
+
+	rc = clk_update_vdd(vdd_class);
+	if (rc)
+		vdd_class->level_votes[level]++;
+
+out:
+	mutex_unlock(&vdd_class->lock);
+	return rc;
+}
+
+/*
+ * Vote for a voltage level corresponding to a clock's rate.
+ */
+static int clk_vote_rate_vdd(struct clk_core *core, unsigned long rate)
+{
+	int level;
+
+	if (!core->vdd_class)
+		return 0;
+
+	level = clk_find_vdd_level(core, rate);
+	if (level < 0)
+		return level;
+
+	return clk_vote_vdd_level(core->vdd_class, level);
+}
+
+/*
+ * Remove vote for a voltage level corresponding to a clock's rate.
+ */
+static void clk_unvote_rate_vdd(struct clk_core *core, unsigned long rate)
+{
+	int level;
+
+	if (!core->vdd_class)
+		return;
+
+	level = clk_find_vdd_level(core, rate);
+	if (level < 0)
+		return;
+
+	clk_unvote_vdd_level(core->vdd_class, level);
+}
+
+static bool clk_is_rate_level_valid(struct clk_core *core, unsigned long rate)
+{
+	int level;
+
+	if (!core->vdd_class)
+		return true;
+
+	level = clk_find_vdd_level(core, rate);
+
+	return level >= 0;
+}
+
+static int clk_vdd_class_init(struct clk_vdd_class *vdd)
+{
+	struct clk_handoff_vdd *v;
+
+	list_for_each_entry(v, &clk_handoff_vdd_list, list) {
+		if (v->vdd_class == vdd)
+			return 0;
+	}
+
+	pr_debug("voting for vdd_class %s\n", vdd->class_name);
+
+	if (clk_vote_vdd_level(vdd, vdd->num_levels - 1))
+		pr_err("failed to vote for %s\n", vdd->class_name);
+
+	v = kmalloc(sizeof(*v), GFP_KERNEL);
+	if (!v)
+		return -ENOMEM;
+
+	v->vdd_class = vdd;
+
+	list_add_tail(&v->list, &clk_handoff_vdd_list);
+
+	return 0;
+}
+
 /***        clk api        ***/
 
 static void clk_core_unprepare(struct clk_core *core)
@@ -490,6 +716,9 @@
 		core->ops->unprepare(core->hw);
 
 	trace_clk_unprepare_complete(core);
+
+	clk_unvote_rate_vdd(core, core->rate);
+
 	clk_core_unprepare(core->parent);
 }
 
@@ -536,12 +765,19 @@
 
 		trace_clk_prepare(core);
 
+		ret = clk_vote_rate_vdd(core, core->rate);
+		if (ret) {
+			clk_core_unprepare(core->parent);
+			return ret;
+		}
+
 		if (core->ops->prepare)
 			ret = core->ops->prepare(core->hw);
 
 		trace_clk_prepare_complete(core);
 
 		if (ret) {
+			clk_unvote_rate_vdd(core, core->rate);
 			clk_core_unprepare(core->parent);
 			return ret;
 		}
@@ -807,6 +1043,7 @@
 static int clk_disable_unused(void)
 {
 	struct clk_core *core;
+	struct clk_handoff_vdd *v, *v_temp;
 
 	if (clk_ignore_unused) {
 		pr_warn("clk: Not disabling unused clocks\n");
@@ -827,6 +1064,13 @@
 	hlist_for_each_entry(core, &clk_orphan_list, child_node)
 		clk_unprepare_unused_subtree(core);
 
+	list_for_each_entry_safe(v, v_temp, &clk_handoff_vdd_list, list) {
+		clk_unvote_vdd_level(v->vdd_class,
+				v->vdd_class->num_levels - 1);
+		list_del(&v->list);
+		kfree(v);
+	};
+
 	clk_prepare_unlock();
 
 	return 0;
@@ -1418,6 +1662,9 @@
 		top = clk_calc_new_rates(parent, best_parent_rate);
 
 out:
+	if (!clk_is_rate_level_valid(core, rate))
+		return NULL;
+
 	clk_calc_subtree(core, new_rate, parent, p_index);
 
 	return top;
@@ -1466,7 +1713,7 @@
  * walk down a subtree and set the new rates notifying the rate
  * change on the way
  */
-static void clk_change_rate(struct clk_core *core)
+static int clk_change_rate(struct clk_core *core)
 {
 	struct clk_core *child;
 	struct hlist_node *tmp;
@@ -1475,6 +1722,7 @@
 	bool skip_set_rate = false;
 	struct clk_core *old_parent;
 	struct clk_core *parent = NULL;
+	int rc = 0;
 
 	old_rate = core->rate;
 
@@ -1517,11 +1765,26 @@
 
 	trace_clk_set_rate(core, core->new_rate);
 
-	if (!skip_set_rate && core->ops->set_rate)
-		core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
+	/* Enforce vdd requirements for new frequency. */
+	if (core->prepare_count) {
+		rc = clk_vote_rate_vdd(core, core->new_rate);
+		if (rc)
+			goto out;
+	}
+
+	if (!skip_set_rate && core->ops->set_rate) {
+		rc = core->ops->set_rate(core->hw, core->new_rate,
+						best_parent_rate);
+		if (rc)
+			goto err_set_rate;
+	}
 
 	trace_clk_set_rate_complete(core, core->new_rate);
 
+	/* Release vdd requirements for old frequency. */
+	if (core->prepare_count)
+		clk_unvote_rate_vdd(core, old_rate);
+
 	core->rate = clk_recalc(core, best_parent_rate);
 
 	if (core->flags & CLK_SET_RATE_UNGATE) {
@@ -1550,12 +1813,24 @@
 		/* Skip children who will be reparented to another clock */
 		if (child->new_parent && child->new_parent != core)
 			continue;
-		clk_change_rate(child);
+		rc = clk_change_rate(child);
+		if (rc)
+			return rc;
 	}
 
 	/* handle the new child who might not be in core->children yet */
 	if (core->new_child)
-		clk_change_rate(core->new_child);
+		rc = clk_change_rate(core->new_child);
+
+	return rc;
+
+err_set_rate:
+	if (core->prepare_count)
+		clk_unvote_rate_vdd(core, core->new_rate);
+out:
+	trace_clk_set_rate_complete(core, core->new_rate);
+
+	return rc;
 }
 
 static int clk_core_set_rate_nolock(struct clk_core *core,
@@ -1563,6 +1838,7 @@
 {
 	struct clk_core *top, *fail_clk;
 	unsigned long rate = req_rate;
+	int ret = 0;
 
 	if (!core)
 		return 0;
@@ -1582,18 +1858,24 @@
 	/* notify that we are about to change rates */
 	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
 	if (fail_clk) {
-		pr_debug("%s: failed to set %s rate\n", __func__,
-				fail_clk->name);
+		pr_debug("%s: failed to set %s clock to run at %lu\n", __func__,
+				fail_clk->name, req_rate);
 		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
 		return -EBUSY;
 	}
 
 	/* change the rates */
-	clk_change_rate(top);
+	ret = clk_change_rate(top);
+	if (ret) {
+		pr_err("%s: failed to set %s clock to run at %lu\n", __func__,
+				top->name, req_rate);
+		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+		return ret;
+	}
 
 	core->req_rate = req_rate;
 
-	return 0;
+	return ret;
 }
 
 /**
@@ -1977,6 +2259,18 @@
 }
 EXPORT_SYMBOL_GPL(clk_is_match);
 
+int clk_set_flags(struct clk *clk, unsigned long flags)
+{
+	if (!clk)
+		return 0;
+
+	if (!clk->core->ops->set_flags)
+		return -EINVAL;
+
+	return clk->core->ops->set_flags(clk->core->hw, flags);
+}
+EXPORT_SYMBOL_GPL(clk_set_flags);
+
 /***        debugfs support        ***/
 
 #ifdef CONFIG_DEBUG_FS
@@ -2556,8 +2850,19 @@
 	core->num_parents = hw->init->num_parents;
 	core->min_rate = 0;
 	core->max_rate = ULONG_MAX;
+	core->vdd_class = hw->init->vdd_class;
+	core->rate_max = hw->init->rate_max;
+	core->num_rate_max = hw->init->num_rate_max;
 	hw->core = core;
 
+	if (core->vdd_class) {
+		ret = clk_vdd_class_init(core->vdd_class);
+		if (ret) {
+			pr_err("Failed to initialize vdd class\n");
+			goto fail_parent_names;
+		}
+	}
+
 	/* allocate local copy in case parent_names is __initdata */
 	core->parent_names = kcalloc(core->num_parents, sizeof(char *),
 					GFP_KERNEL);
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 1fb1f54..282b998 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -10,7 +10,8 @@
 clk-qcom-y += clk-regmap-divider.o
 clk-qcom-y += clk-regmap-mux.o
 clk-qcom-y += reset.o
-clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
+clk-qcom-y += clk-dummy.o
+clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o gdsc-regulator.o
 
 # Keep alphabetically sorted by config
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index e6a03ea..6ee6a6b 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -55,6 +55,20 @@
  */
 #define ALPHA_REG_BITWIDTH	40
 #define ALPHA_BITWIDTH		32
+#define FABIA_BITWIDTH		16
+
+#define FABIA_USER_CTL_LO	0xc
+#define FABIA_USER_CTL_HI	0x10
+#define FABIA_CAL_L_VAL		0x8
+#define FABIA_FRAC_VAL		0x38
+#define FABIA_OPMODE		0x2c
+#define FABIA_PLL_STANDBY	0x0
+#define FABIA_PLL_RUN		0x1
+#define FABIA_PLL_OUT_MASK	0x7
+#define FABIA_PLL_RATE_MARGIN	500
+#define FABIA_PLL_ACK_LATCH	BIT(29)
+#define FABIA_PLL_UPDATE	BIT(22)
+#define FABIA_PLL_HW_UPDATE_LOGIC_BYPASS	BIT(23)
 
 #define to_clk_alpha_pll(_hw) container_of(to_clk_regmap(_hw), \
 					   struct clk_alpha_pll, clkr)
@@ -62,9 +76,10 @@
 #define to_clk_alpha_pll_postdiv(_hw) container_of(to_clk_regmap(_hw), \
 					   struct clk_alpha_pll_postdiv, clkr)
 
-static int wait_for_pll(struct clk_alpha_pll *pll)
+static int wait_for_pll(struct clk_alpha_pll *pll, u32 mask, bool inverse,
+			const char *action)
 {
-	u32 val, mask, off;
+	u32 val, off;
 	int count;
 	int ret;
 	const char *name = clk_hw_get_name(&pll->clkr.hw);
@@ -74,26 +89,122 @@
 	if (ret)
 		return ret;
 
-	if (val & PLL_VOTE_FSM_ENA)
-		mask = PLL_ACTIVE_FLAG;
-	else
-		mask = PLL_LOCK_DET;
-
-	/* Wait for pll to enable. */
 	for (count = 100; count > 0; count--) {
 		ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
 		if (ret)
 			return ret;
-		if ((val & mask) == mask)
+		if (inverse && (val & mask))
+			return 0;
+		else if ((val & mask) == mask)
 			return 0;
 
 		udelay(1);
 	}
 
-	WARN(1, "%s didn't enable after voting for it!\n", name);
+	WARN(1, "%s failed to %s!\n", name, action);
 	return -ETIMEDOUT;
 }
 
+static int wait_for_pll_enable(struct clk_alpha_pll *pll, u32 mask)
+{
+	return wait_for_pll(pll, mask, 0, "enable");
+}
+
+static int wait_for_pll_disable(struct clk_alpha_pll *pll, u32 mask)
+{
+	return wait_for_pll(pll, mask, 1, "disable");
+}
+
+static int wait_for_pll_offline(struct clk_alpha_pll *pll, u32 mask)
+{
+	return wait_for_pll(pll, mask, 0, "offline");
+}
+
+
+/* alpha pll with hwfsm support */
+
+#define PLL_OFFLINE_REQ		BIT(7)
+#define PLL_FSM_ENA		BIT(20)
+#define PLL_OFFLINE_ACK		BIT(28)
+#define PLL_ACTIVE_FLAG		BIT(30)
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+			     const struct pll_config *config)
+{
+	u32 val, mask;
+
+	regmap_write(regmap, pll->offset + PLL_CONFIG_CTL,
+			   config->config_ctl_val);
+
+	val = config->main_output_mask;
+	val |= config->aux_output_mask;
+	val |= config->aux2_output_mask;
+	val |= config->early_output_mask;
+	val |= config->post_div_val;
+
+	mask = config->main_output_mask;
+	mask |= config->aux_output_mask;
+	mask |= config->aux2_output_mask;
+	mask |= config->early_output_mask;
+	mask |= config->post_div_mask;
+
+	regmap_update_bits(regmap, pll->offset + PLL_USER_CTL, mask, val);
+}
+
+static int clk_alpha_pll_hwfsm_enable(struct clk_hw *hw)
+{
+	int ret;
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	u32 val, off;
+
+	off = pll->offset;
+	ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+	if (ret)
+		return ret;
+	/* Enable HW FSM mode, clear OFFLINE request */
+	val |= PLL_FSM_ENA;
+	val &= ~PLL_OFFLINE_REQ;
+	ret = regmap_write(pll->clkr.regmap, off + PLL_MODE, val);
+	if (ret)
+		return ret;
+
+	/* Make sure enable request goes through before waiting for update */
+	mb();
+
+	ret = wait_for_pll_enable(pll, PLL_ACTIVE_FLAG);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static void clk_alpha_pll_hwfsm_disable(struct clk_hw *hw)
+{
+	int ret;
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	u32 val, off;
+
+	off = pll->offset;
+	ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+	if (ret)
+		return;
+	/* Request PLL_OFFLINE and wait for ack */
+	ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+				 PLL_OFFLINE_REQ, PLL_OFFLINE_REQ);
+	if (ret)
+		return;
+	ret = wait_for_pll_offline(pll, PLL_OFFLINE_ACK);
+	if (ret)
+		return;
+
+	/* Disable hwfsm */
+	ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+				 PLL_FSM_ENA, 0);
+	if (ret)
+		return;
+	wait_for_pll_disable(pll, PLL_ACTIVE_FLAG);
+}
+
 static int clk_alpha_pll_enable(struct clk_hw *hw)
 {
 	int ret;
@@ -112,7 +223,7 @@
 		ret = clk_enable_regmap(hw);
 		if (ret)
 			return ret;
-		return wait_for_pll(pll);
+		return wait_for_pll_enable(pll, PLL_ACTIVE_FLAG);
 	}
 
 	/* Skip if already enabled */
@@ -136,7 +247,7 @@
 	if (ret)
 		return ret;
 
-	ret = wait_for_pll(pll);
+	ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
 	if (ret)
 		return ret;
 
@@ -177,16 +288,24 @@
 	regmap_update_bits(pll->clkr.regmap, off + PLL_MODE, mask, 0);
 }
 
-static unsigned long alpha_pll_calc_rate(u64 prate, u32 l, u32 a)
+static unsigned long alpha_pll_calc_rate(const struct clk_alpha_pll *pll,
+						u64 prate, u32 l, u32 a)
 {
-	return (prate * l) + ((prate * a) >> ALPHA_BITWIDTH);
+	int alpha_bw = ALPHA_BITWIDTH;
+
+	if (pll->type == FABIA_PLL)
+		alpha_bw = FABIA_BITWIDTH;
+
+	return (prate * l) + ((prate * a) >> alpha_bw);
 }
 
 static unsigned long
-alpha_pll_round_rate(unsigned long rate, unsigned long prate, u32 *l, u64 *a)
+alpha_pll_round_rate(const struct clk_alpha_pll *pll, unsigned long rate,
+				unsigned long prate, u32 *l, u64 *a)
 {
 	u64 remainder;
 	u64 quotient;
+	int alpha_bw = ALPHA_BITWIDTH;
 
 	quotient = rate;
 	remainder = do_div(quotient, prate);
@@ -197,15 +316,19 @@
 		return rate;
 	}
 
+	/* Fabia PLLs only have 16 bits to program the fractional divider */
+	if (pll->type == FABIA_PLL)
+		alpha_bw = FABIA_BITWIDTH;
+
 	/* Upper ALPHA_BITWIDTH bits of Alpha */
-	quotient = remainder << ALPHA_BITWIDTH;
+	quotient = remainder << alpha_bw;
 	remainder = do_div(quotient, prate);
 
 	if (remainder)
 		quotient++;
 
 	*a = quotient;
-	return alpha_pll_calc_rate(prate, *l, *a);
+	return alpha_pll_calc_rate(pll, prate, *l, *a);
 }
 
 static const struct pll_vco *
@@ -239,7 +362,7 @@
 		a >>= ALPHA_REG_BITWIDTH - ALPHA_BITWIDTH;
 	}
 
-	return alpha_pll_calc_rate(prate, l, a);
+	return alpha_pll_calc_rate(pll, prate, l, a);
 }
 
 static int clk_alpha_pll_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -250,7 +373,7 @@
 	u32 l, off = pll->offset;
 	u64 a;
 
-	rate = alpha_pll_round_rate(rate, prate, &l, &a);
+	rate = alpha_pll_round_rate(pll, rate, prate, &l, &a);
 	vco = alpha_pll_find_vco(pll, rate);
 	if (!vco) {
 		pr_err("alpha pll not in a valid vco range\n");
@@ -281,8 +404,8 @@
 	u64 a;
 	unsigned long min_freq, max_freq;
 
-	rate = alpha_pll_round_rate(rate, *prate, &l, &a);
-	if (alpha_pll_find_vco(pll, rate))
+	rate = alpha_pll_round_rate(pll, rate, *prate, &l, &a);
+	if (pll->type == FABIA_PLL || alpha_pll_find_vco(pll, rate))
 		return rate;
 
 	min_freq = pll->vco_table[0].min_freq;
@@ -291,6 +414,209 @@
 	return clamp(rate, min_freq, max_freq);
 }
 
+void clk_fabia_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+				const struct pll_config *config)
+{
+	u32 val, mask;
+
+	if (config->l) {
+		regmap_write(regmap, pll->offset + PLL_L_VAL,
+						config->l);
+		regmap_write(regmap, pll->offset + FABIA_CAL_L_VAL,
+						config->l);
+	}
+
+	if (config->frac)
+		regmap_write(regmap, pll->offset + FABIA_FRAC_VAL,
+						config->frac);
+	if (config->config_ctl_val)
+		regmap_write(regmap, pll->offset + PLL_CONFIG_CTL,
+				config->config_ctl_val);
+
+	if (config->post_div_mask) {
+		mask = config->post_div_mask;
+		val = config->post_div_val;
+		regmap_update_bits(regmap, pll->offset + FABIA_USER_CTL_LO,
+					mask, val);
+	}
+
+	regmap_update_bits(regmap, pll->offset + PLL_MODE,
+				 FABIA_PLL_HW_UPDATE_LOGIC_BYPASS,
+				 FABIA_PLL_HW_UPDATE_LOGIC_BYPASS);
+
+	regmap_update_bits(regmap, pll->offset + PLL_MODE,
+			   PLL_RESET_N, PLL_RESET_N);
+
+	pll->inited = true;
+}
+
+
+static int clk_fabia_pll_enable(struct clk_hw *hw)
+{
+	int ret;
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	u32 val, off = pll->offset;
+
+	ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+	if (ret)
+		return ret;
+
+	/* If in FSM mode, just vote for it */
+	if (val & PLL_VOTE_FSM_ENA) {
+		ret = clk_enable_regmap(hw);
+		if (ret)
+			return ret;
+		return wait_for_pll_enable(pll, PLL_ACTIVE_FLAG);
+	}
+
+	if (unlikely(!pll->inited))
+		clk_fabia_pll_configure(pll, pll->clkr.regmap, pll->config);
+
+	/* Disable PLL output */
+	ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+					PLL_OUTCTRL, 0);
+	if (ret)
+		return ret;
+
+	/* Set operation mode to STANDBY */
+	regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_STANDBY);
+
+	/* PLL should be in STANDBY mode before continuing */
+	mb();
+
+	/* Bring PLL out of reset */
+	ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+				 PLL_RESET_N, PLL_RESET_N);
+	if (ret)
+		return ret;
+
+	/* Set operation mode to RUN */
+	regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_RUN);
+
+	ret = wait_for_pll_enable(pll, PLL_LOCK_DET);
+	if (ret)
+		return ret;
+
+	/* Enable the main PLL output */
+	ret = regmap_update_bits(pll->clkr.regmap, off + FABIA_USER_CTL_LO,
+				 FABIA_PLL_OUT_MASK, FABIA_PLL_OUT_MASK);
+	if (ret)
+		return ret;
+
+	/* Enable PLL outputs */
+	ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+				 PLL_OUTCTRL, PLL_OUTCTRL);
+	if (ret)
+		return ret;
+
+	/* Ensure that the write above goes through before returning. */
+	mb();
+	return ret;
+}
+
+static void clk_fabia_pll_disable(struct clk_hw *hw)
+{
+	int ret;
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	u32 val, off = pll->offset;
+
+	ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &val);
+	if (ret)
+		return;
+
+	/* If in FSM mode, just unvote it */
+	if (val & PLL_VOTE_FSM_ENA) {
+		clk_disable_regmap(hw);
+		return;
+	}
+
+	/* Disable PLL outputs */
+	ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+							PLL_OUTCTRL, 0);
+	if (ret)
+		return;
+
+	/* Disable the main PLL output */
+	ret = regmap_update_bits(pll->clkr.regmap, off + FABIA_USER_CTL_LO,
+			FABIA_PLL_OUT_MASK, 0);
+	if (ret)
+		return;
+
+	/* Place the PLL mode in STANDBY */
+	regmap_write(pll->clkr.regmap, off + FABIA_OPMODE, FABIA_PLL_STANDBY);
+}
+
+static unsigned long
+clk_fabia_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
+{
+	u32 l, frac;
+	u64 prate = parent_rate;
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	u32 off = pll->offset;
+
+	regmap_read(pll->clkr.regmap, off + PLL_L_VAL, &l);
+	regmap_read(pll->clkr.regmap, off + FABIA_FRAC_VAL, &frac);
+
+	return alpha_pll_calc_rate(pll, prate, l, frac);
+}
+
+static int clk_fabia_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+				  unsigned long prate)
+{
+	struct clk_alpha_pll *pll = to_clk_alpha_pll(hw);
+	unsigned long rrate;
+	u32 regval, l, off = pll->offset;
+	u64 a;
+	int ret;
+
+	ret = regmap_read(pll->clkr.regmap, off + PLL_MODE, &regval);
+	if (ret)
+		return ret;
+
+	rrate = alpha_pll_round_rate(pll, rate, prate, &l, &a);
+	/*
+	 * Due to limited number of bits for fractional rate programming, the
+	 * rounded up rate could be marginally higher than the requested rate.
+	 */
+	if (rrate > (rate + FABIA_PLL_RATE_MARGIN) || rrate < rate) {
+		pr_err("Call set rate on the PLL with rounded rates!\n");
+		return -EINVAL;
+	}
+
+	regmap_write(pll->clkr.regmap, off + PLL_L_VAL, l);
+	/*
+	 * pll_cal_l_val is set to pll_l_val on MOST targets. Set it
+	 * explicitly here for PLL out-of-reset calibration to work
+	 * without a glitch on ALL of them.
+	 */
+	regmap_write(pll->clkr.regmap, off + FABIA_CAL_L_VAL, l);
+	regmap_write(pll->clkr.regmap, off + FABIA_FRAC_VAL, a);
+
+	/* Latch the PLL input */
+	ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+			   FABIA_PLL_UPDATE, FABIA_PLL_UPDATE);
+	if (ret)
+		return ret;
+
+	/* Wait for 2 reference cycles before checking the ACK bit. */
+	udelay(1);
+	regmap_read(pll->clkr.regmap, off + PLL_MODE, &regval);
+	if (!(regval & FABIA_PLL_ACK_LATCH)) {
+		WARN(1, "PLL latch failed. Output may be unstable!\n");
+		return -EINVAL;
+	}
+
+	/* Return the latch input to 0 */
+	ret = regmap_update_bits(pll->clkr.regmap, off + PLL_MODE,
+			   FABIA_PLL_UPDATE, 0);
+	if (ret)
+		return ret;
+
+	/* Wait for PLL output to stabilize */
+	udelay(100);
+	return 0;
+}
+
 const struct clk_ops clk_alpha_pll_ops = {
 	.enable = clk_alpha_pll_enable,
 	.disable = clk_alpha_pll_disable,
@@ -300,6 +626,32 @@
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_ops);
 
+const struct clk_ops clk_alpha_pll_hwfsm_ops = {
+	.enable = clk_alpha_pll_hwfsm_enable,
+	.disable = clk_alpha_pll_hwfsm_disable,
+	.recalc_rate = clk_alpha_pll_recalc_rate,
+	.round_rate = clk_alpha_pll_round_rate,
+	.set_rate = clk_alpha_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_alpha_pll_hwfsm_ops);
+
+const struct clk_ops clk_fabia_pll_ops = {
+	.enable = clk_fabia_pll_enable,
+	.disable = clk_fabia_pll_disable,
+	.recalc_rate = clk_fabia_pll_recalc_rate,
+	.round_rate = clk_alpha_pll_round_rate,
+	.set_rate = clk_fabia_pll_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_fabia_pll_ops);
+
+const struct clk_ops clk_fabia_fixed_pll_ops = {
+	.enable = clk_fabia_pll_enable,
+	.disable = clk_fabia_pll_disable,
+	.recalc_rate = clk_fabia_pll_recalc_rate,
+	.round_rate = clk_alpha_pll_round_rate,
+};
+EXPORT_SYMBOL_GPL(clk_fabia_fixed_pll_ops);
+
 static unsigned long
 clk_alpha_pll_postdiv_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 {
@@ -353,3 +705,73 @@
 	.set_rate = clk_alpha_pll_postdiv_set_rate,
 };
 EXPORT_SYMBOL_GPL(clk_alpha_pll_postdiv_ops);
+
+static unsigned long clk_generic_pll_postdiv_recalc_rate(struct clk_hw *hw,
+				unsigned long parent_rate)
+{
+	struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+	u32 i, div = 1, val;
+
+	if (!pll->post_div_table) {
+		pr_err("Missing the post_div_table for the PLL\n");
+		return -EINVAL;
+	}
+
+	regmap_read(pll->clkr.regmap, pll->offset + FABIA_USER_CTL_LO, &val);
+
+	val >>= pll->post_div_shift;
+	val &= PLL_POST_DIV_MASK;
+
+	for (i = 0; i < pll->num_post_div; i++) {
+		if (pll->post_div_table[i].val == val) {
+			div = pll->post_div_table[i].div;
+			break;
+		}
+	}
+
+	return (parent_rate / div);
+}
+
+static long clk_generic_pll_postdiv_round_rate(struct clk_hw *hw,
+				unsigned long rate, unsigned long *prate)
+{
+	struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+
+	if (!pll->post_div_table)
+		return -EINVAL;
+
+	return divider_round_rate(hw, rate, prate, pll->post_div_table,
+					pll->width, CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int clk_generic_pll_postdiv_set_rate(struct clk_hw *hw,
+				unsigned long rate, unsigned long parent_rate)
+{
+	struct clk_alpha_pll_postdiv *pll = to_clk_alpha_pll_postdiv(hw);
+	int i, val = 0, div;
+
+	if (!pll->post_div_table) {
+		pr_err("Missing the post_div_table for the PLL\n");
+		return -EINVAL;
+	}
+
+	div = DIV_ROUND_UP_ULL((u64)parent_rate, rate);
+	for (i = 0; i < pll->num_post_div; i++) {
+		if (pll->post_div_table[i].div == div) {
+			val = pll->post_div_table[i].val;
+			break;
+		}
+	}
+
+	return regmap_update_bits(pll->clkr.regmap,
+				pll->offset + FABIA_USER_CTL_LO,
+				PLL_POST_DIV_MASK << pll->post_div_shift,
+				val << pll->post_div_shift);
+}
+
+const struct clk_ops clk_generic_pll_postdiv_ops = {
+	.recalc_rate = clk_generic_pll_postdiv_recalc_rate,
+	.round_rate = clk_generic_pll_postdiv_round_rate,
+	.set_rate = clk_generic_pll_postdiv_set_rate,
+};
+EXPORT_SYMBOL_GPL(clk_generic_pll_postdiv_ops);
diff --git a/drivers/clk/qcom/clk-alpha-pll.h b/drivers/clk/qcom/clk-alpha-pll.h
index 90ce201..2656cd6 100644
--- a/drivers/clk/qcom/clk-alpha-pll.h
+++ b/drivers/clk/qcom/clk-alpha-pll.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -16,6 +16,7 @@
 
 #include <linux/clk-provider.h>
 #include "clk-regmap.h"
+#include "clk-pll.h"
 
 struct pll_vco {
 	unsigned long min_freq;
@@ -23,35 +24,65 @@
 	u32 val;
 };
 
+enum pll_type {
+	ALPHA_PLL,
+	FABIA_PLL,
+};
+
 /**
  * struct clk_alpha_pll - phase locked loop (PLL)
  * @offset: base address of registers
+ * @inited: flag that's set when the PLL is initialized
  * @vco_table: array of VCO settings
  * @clkr: regmap clock handle
+ * @is_fabia: Set if the PLL type is FABIA
  */
 struct clk_alpha_pll {
 	u32 offset;
+	struct pll_config *config;
+	bool inited;
 
 	const struct pll_vco *vco_table;
 	size_t num_vco;
 
 	struct clk_regmap clkr;
+	u32 config_ctl_val;
+#define PLLOUT_MAIN	BIT(0)
+#define PLLOUT_AUX	BIT(1)
+#define PLLOUT_AUX2	BIT(2)
+#define PLLOUT_EARLY	BIT(3)
+	u32 pllout_flags;
+	enum pll_type type;
 };
 
 /**
  * struct clk_alpha_pll_postdiv - phase locked loop (PLL) post-divider
  * @offset: base address of registers
  * @width: width of post-divider
+ * @post_div_shift: shift to differentiate between odd and even post-divider
+ * @post_div_table: table with PLL odd and even post-divider settings
+ * @num_post_div: Number of PLL post-divider settings
  * @clkr: regmap clock handle
  */
 struct clk_alpha_pll_postdiv {
 	u32 offset;
 	u8 width;
-
+	int post_div_shift;
+	const struct clk_div_table *post_div_table;
+	size_t num_post_div;
 	struct clk_regmap clkr;
 };
 
 extern const struct clk_ops clk_alpha_pll_ops;
+extern const struct clk_ops clk_alpha_pll_hwfsm_ops;
 extern const struct clk_ops clk_alpha_pll_postdiv_ops;
+extern const struct clk_ops clk_fabia_pll_ops;
+extern const struct clk_ops clk_fabia_fixed_pll_ops;
+extern const struct clk_ops clk_generic_pll_postdiv_ops;
+
+void clk_alpha_pll_configure(struct clk_alpha_pll *pll, struct regmap *regmap,
+		const struct pll_config *config);
+void clk_fabia_pll_configure(struct clk_alpha_pll *pll,
+		struct regmap *regmap, const struct pll_config *config);
 
 #endif
diff --git a/drivers/clk/qcom/clk-branch.c b/drivers/clk/qcom/clk-branch.c
index 26f7af31..f9d3f86 100644
--- a/drivers/clk/qcom/clk-branch.c
+++ b/drivers/clk/qcom/clk-branch.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -18,6 +18,7 @@
 #include <linux/export.h>
 #include <linux/clk-provider.h>
 #include <linux/regmap.h>
+#include <linux/clk/qcom.h>
 
 #include "clk-branch.h"
 
@@ -122,15 +123,62 @@
 	return clk_branch_toggle(hw, true, clk_branch_check_halt);
 }
 
+static int clk_cbcr_set_flags(struct regmap *regmap, unsigned int reg,
+				unsigned long flags)
+{
+	u32 cbcr_val;
+
+	regmap_read(regmap, reg, &cbcr_val);
+
+	switch (flags) {
+	case CLKFLAG_PERIPH_OFF_SET:
+		cbcr_val |= BIT(12);
+		break;
+	case CLKFLAG_PERIPH_OFF_CLEAR:
+		cbcr_val &= ~BIT(12);
+		break;
+	case CLKFLAG_RETAIN_PERIPH:
+		cbcr_val |= BIT(13);
+		break;
+	case CLKFLAG_NORETAIN_PERIPH:
+		cbcr_val &= ~BIT(13);
+		break;
+	case CLKFLAG_RETAIN_MEM:
+		cbcr_val |= BIT(14);
+		break;
+	case CLKFLAG_NORETAIN_MEM:
+		cbcr_val &= ~BIT(14);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	regmap_write(regmap, reg, cbcr_val);
+
+	/* Make sure power is enabled/disabled before returning. */
+	mb();
+	udelay(1);
+
+	return 0;
+}
+
 static void clk_branch_disable(struct clk_hw *hw)
 {
 	clk_branch_toggle(hw, false, clk_branch_check_halt);
 }
 
+static int clk_branch_set_flags(struct clk_hw *hw, unsigned int flags)
+{
+	struct clk_branch *br = to_clk_branch(hw);
+
+	return clk_cbcr_set_flags(br->clkr.regmap, br->halt_reg, flags);
+}
+
 const struct clk_ops clk_branch_ops = {
 	.enable = clk_branch_enable,
 	.disable = clk_branch_disable,
 	.is_enabled = clk_is_enabled_regmap,
+	.set_flags = clk_branch_set_flags,
 };
 EXPORT_SYMBOL_GPL(clk_branch_ops);
 
@@ -148,9 +196,46 @@
 	.enable = clk_branch2_enable,
 	.disable = clk_branch2_disable,
 	.is_enabled = clk_is_enabled_regmap,
+	.set_flags = clk_branch_set_flags,
 };
 EXPORT_SYMBOL_GPL(clk_branch2_ops);
 
+static int clk_gate_toggle(struct clk_hw *hw, bool en)
+{
+	struct clk_gate2 *gt = to_clk_gate2(hw);
+	int ret = 0;
+
+	if (en) {
+		ret = clk_enable_regmap(hw);
+		if (ret)
+			return ret;
+	} else {
+		clk_disable_regmap(hw);
+	}
+
+	if (gt->udelay)
+		udelay(gt->udelay);
+
+	return ret;
+}
+
+static int clk_gate2_enable(struct clk_hw *hw)
+{
+	return clk_gate_toggle(hw, true);
+}
+
+static void clk_gate2_disable(struct clk_hw *hw)
+{
+	clk_gate_toggle(hw, false);
+}
+
+const struct clk_ops clk_gate2_ops = {
+	.enable = clk_gate2_enable,
+	.disable = clk_gate2_disable,
+	.is_enabled = clk_is_enabled_regmap,
+};
+EXPORT_SYMBOL_GPL(clk_gate2_ops);
+
 const struct clk_ops clk_branch_simple_ops = {
 	.enable = clk_enable_regmap,
 	.disable = clk_disable_regmap,
diff --git a/drivers/clk/qcom/clk-branch.h b/drivers/clk/qcom/clk-branch.h
index 284df3f..76ae5cd 100644
--- a/drivers/clk/qcom/clk-branch.h
+++ b/drivers/clk/qcom/clk-branch.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -46,11 +46,27 @@
 	struct clk_regmap clkr;
 };
 
+/**
+ * struct clk_gate2 - gating clock with status bit and dynamic hardware gating
+ * @udelay: halt delay in microseconds on clock branch Enable/Disable
+ * @clkr: handle between common and hardware-specific interfaces
+ *
+ * Clock which can gate its output.
+ */
+struct clk_gate2 {
+	u32	udelay;
+	struct	clk_regmap clkr;
+};
+
 extern const struct clk_ops clk_branch_ops;
 extern const struct clk_ops clk_branch2_ops;
+extern const struct clk_ops clk_gate2_ops;
 extern const struct clk_ops clk_branch_simple_ops;
 
 #define to_clk_branch(_hw) \
 	container_of(to_clk_regmap(_hw), struct clk_branch, clkr)
 
+#define to_clk_gate2(_hw) \
+	container_of(to_clk_regmap(_hw), struct clk_gate2, clkr)
+
 #endif
diff --git a/drivers/clk/qcom/clk-dummy.c b/drivers/clk/qcom/clk-dummy.c
new file mode 100644
index 0000000..e2465c4
--- /dev/null
+++ b/drivers/clk/qcom/clk-dummy.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include "common.h"
+
+#define to_clk_dummy(_hw)	container_of(_hw, struct clk_dummy, hw)
+
+#define RESET_MAX	100
+
+static int dummy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long parent_rate)
+{
+	struct clk_dummy *dummy = to_clk_dummy(hw);
+
+	dummy->rrate = rate;
+
+	pr_debug("set rate: %lu\n", rate);
+
+	return 0;
+}
+
+static long dummy_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long *parent_rate)
+{
+	return rate;
+}
+
+static unsigned long dummy_clk_recalc_rate(struct clk_hw *hw,
+		unsigned long parent_rate)
+{
+	struct clk_dummy *dummy = to_clk_dummy(hw);
+
+	pr_debug("clock rate: %lu\n", dummy->rrate);
+
+	return dummy->rrate;
+}
+
+static int dummy_clk_set_flags(struct clk_hw *hw, unsigned int flags)
+{
+	return 0;
+}
+
+struct clk_ops clk_dummy_ops = {
+	.set_rate = dummy_clk_set_rate,
+	.round_rate = dummy_clk_round_rate,
+	.recalc_rate = dummy_clk_recalc_rate,
+	.set_flags = dummy_clk_set_flags,
+};
+EXPORT_SYMBOL_GPL(clk_dummy_ops);
+
+static int dummy_reset_assert(struct reset_controller_dev *rcdev,
+				unsigned long id)
+{
+	pr_debug("%s\n", __func__);
+	return 0;
+}
+
+static int dummy_reset_deassert(struct reset_controller_dev *rcdev,
+				unsigned long id)
+{
+	pr_debug("%s\n", __func__);
+	return 0;
+}
+
+static struct reset_control_ops dummy_reset_ops = {
+	.assert         = dummy_reset_assert,
+	.deassert       = dummy_reset_deassert,
+};
+
+/**
+ * clk_register_dummy - register dummy clock with the
+ *				   clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @flags: framework-specific flags
+ * @node: device node
+ */
+static struct clk *clk_register_dummy(struct device *dev, const char *name,
+		unsigned long flags, struct device_node *node)
+{
+	struct clk_dummy *dummy;
+	struct clk *clk;
+	struct clk_init_data init = {};
+
+	/* allocate dummy clock */
+	dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
+	if (!dummy)
+		return ERR_PTR(-ENOMEM);
+
+	init.name = name;
+	init.ops = &clk_dummy_ops;
+	init.flags = flags | CLK_IS_BASIC;
+	init.num_parents = 0;
+	dummy->hw.init = &init;
+
+	/* register the clock */
+	clk = clk_register(dev, &dummy->hw);
+	if (IS_ERR(clk)) {
+		kfree(dummy);
+		return clk;
+	}
+
+	dummy->reset.of_node = node;
+	dummy->reset.ops = &dummy_reset_ops;
+	dummy->reset.nr_resets = RESET_MAX;
+
+	if (reset_controller_register(&dummy->reset))
+		pr_err("Failed to register reset controller for %s\n", name);
+	else
+		pr_info("Successfully registered dummy reset controller for %s\n",
+								name);
+
+	return clk;
+}
+
+/**
+ * of_dummy_clk_setup() - Setup function for simple fixed rate clock
+ */
+static void of_dummy_clk_setup(struct device_node *node)
+{
+	struct clk *clk;
+	const char *clk_name = "dummy_clk";
+
+	of_property_read_string(node, "clock-output-names", &clk_name);
+
+	clk = clk_register_dummy(NULL, clk_name, 0, node);
+	if (!IS_ERR(clk)) {
+		of_clk_add_provider(node, of_clk_src_simple_get, clk);
+	} else {
+		pr_err("Failed to register dummy clock controller for %s\n",
+								clk_name);
+		return;
+	}
+
+	pr_info("Successfully registered dummy clock controller for %s\n",
+								clk_name);
+}
+CLK_OF_DECLARE(dummy_clk, "qcom,dummycc", of_dummy_clk_setup);
diff --git a/drivers/clk/qcom/clk-pll.h b/drivers/clk/qcom/clk-pll.h
index ffd0c63..9682799 100644
--- a/drivers/clk/qcom/clk-pll.h
+++ b/drivers/clk/qcom/clk-pll.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -70,6 +70,7 @@
 	u16 l;
 	u32 m;
 	u32 n;
+	u32 frac;
 	u32 vco_val;
 	u32 vco_mask;
 	u32 pre_div_val;
@@ -79,6 +80,9 @@
 	u32 mn_ena_mask;
 	u32 main_output_mask;
 	u32 aux_output_mask;
+	u32 aux2_output_mask;
+	u32 early_output_mask;
+	u32 config_ctl_val;
 };
 
 void clk_pll_configure_sr(struct clk_pll *pll, struct regmap *regmap,
diff --git a/drivers/clk/qcom/clk-rcg.h b/drivers/clk/qcom/clk-rcg.h
index b904c33..acbe793 100644
--- a/drivers/clk/qcom/clk-rcg.h
+++ b/drivers/clk/qcom/clk-rcg.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -157,6 +157,7 @@
  * @parent_map: map from software's parent index to hardware's src_sel field
  * @freq_tbl: frequency table
  * @current_freq: last cached frequency when using branches with shared RCGs
+ * @enable_safe_config: When set, the RCG is parked at CXO when it's disabled
  * @clkr: regmap clock handle
  *
  */
@@ -167,6 +168,7 @@
 	const struct parent_map	*parent_map;
 	const struct freq_tbl	*freq_tbl;
 	unsigned long		current_freq;
+	bool			enable_safe_config;
 	struct clk_regmap	clkr;
 };
 
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a071bba..590cf45 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013, 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -20,6 +20,7 @@
 #include <linux/delay.h>
 #include <linux/regmap.h>
 #include <linux/math64.h>
+#include <linux/clk.h>
 
 #include <asm/div64.h>
 
@@ -42,11 +43,20 @@
 #define CFG_MODE_SHIFT		12
 #define CFG_MODE_MASK		(0x3 << CFG_MODE_SHIFT)
 #define CFG_MODE_DUAL_EDGE	(0x2 << CFG_MODE_SHIFT)
+#define CFG_HW_CLK_CTRL_MASK	BIT(20)
 
 #define M_REG			0x8
 #define N_REG			0xc
 #define D_REG			0x10
 
+static struct freq_tbl cxo_f = {
+	.freq = 19200000,
+	.src = 0,
+	.pre_div = 1,
+	.m = 0,
+	.n = 0,
+};
+
 static int clk_rcg2_is_enabled(struct clk_hw *hw)
 {
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -124,6 +134,35 @@
 	return update_config(rcg);
 }
 
+static int clk_rcg2_set_force_enable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	int ret = 0, count = 500;
+
+	ret = regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+					CMD_ROOT_EN, CMD_ROOT_EN);
+	if (ret)
+		return ret;
+
+	for (; count > 0; count--) {
+		if (clk_rcg2_is_enabled(hw))
+			return ret;
+		/* Delay for 1usec and retry polling the status bit */
+		udelay(1);
+	}
+
+	WARN(1, "%s: rcg didn't turn on.", clk_hw_get_name(hw));
+	return ret;
+}
+
+static void clk_rcg2_clear_force_enable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	regmap_update_bits(rcg->clkr.regmap, rcg->cmd_rcgr + CMD_REG,
+					CMD_ROOT_EN, 0);
+}
+
 /*
  * Calculate m/n:d rate
  *
@@ -155,6 +194,12 @@
 	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 	u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask;
 
+	if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) {
+		if (!rcg->current_freq)
+			rcg->current_freq = cxo_f.freq;
+		return rcg->current_freq;
+	}
+
 	regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg);
 
 	if (rcg->mnd_width) {
@@ -252,7 +297,7 @@
 	}
 
 	mask = BIT(rcg->hid_width) - 1;
-	mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK;
+	mask |= CFG_SRC_SEL_MASK | CFG_MODE_MASK | CFG_HW_CLK_CTRL_MASK;
 	cfg = f->pre_div << CFG_SRC_DIV_SHIFT;
 	cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
 	if (rcg->mnd_width && f->n && (f->m != f->n))
@@ -274,6 +319,15 @@
 	if (!f)
 		return -EINVAL;
 
+	/*
+	 * Return if the RCG is currently disabled. This configuration update
+	 * will happen as part of the RCG enable sequence.
+	 */
+	if (rcg->enable_safe_config && !clk_hw_is_prepared(hw)) {
+		rcg->current_freq = rate;
+		return 0;
+	}
+
 	return clk_rcg2_configure(rcg, f);
 }
 
@@ -289,8 +343,65 @@
 	return __clk_rcg2_set_rate(hw, rate);
 }
 
+static int clk_rcg2_enable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+	unsigned long rate = clk_get_rate(hw->clk);
+	const struct freq_tbl *f;
+
+	if (!rcg->enable_safe_config)
+		return 0;
+
+	/*
+	 * Switch from CXO to the stashed mux selection. Force enable and
+	 * disable the RCG while configuring it to safeguard against any update
+	 * signal coming from the downstream clock. The current parent has
+	 * already been prepared and enabled at this point, and the CXO source
+	 * is always on while APPS is online. Therefore, the RCG can safely be
+	 * switched.
+	 */
+	f = qcom_find_freq(rcg->freq_tbl, rate);
+	if (!f)
+		return -EINVAL;
+
+	clk_rcg2_set_force_enable(hw);
+	clk_rcg2_configure(rcg, f);
+	clk_rcg2_clear_force_enable(hw);
+
+	return 0;
+}
+
+static void clk_rcg2_disable(struct clk_hw *hw)
+{
+	struct clk_rcg2 *rcg = to_clk_rcg2(hw);
+
+	if (!rcg->enable_safe_config)
+		return;
+	/*
+	 * Park the RCG at a safe configuration - sourced off the CXO. This is
+	 * needed for 2 reasons: In the case of RCGs sourcing PSCBCs, due to a
+	 * default HW behavior, the RCG will turn on when its corresponding
+	 * GDSC is enabled. We might also have cases when the RCG might be left
+	 * enabled without the overlying SW knowing about it. This results from
+	 * hard to track cases of downstream clocks being left enabled. In both
+	 * these cases, scaling the RCG will fail since it's enabled but with
+	 * its sources cut off.
+	 *
+	 * Save mux select and switch to CXO. Force enable and disable the RCG
+	 * while configuring it to safeguard against any update signal coming
+	 * from the downstream clock. The current parent is still prepared and
+	 * enabled at this point, and the CXO source is always on while APPS is
+	 * online. Therefore, the RCG can safely be switched.
+	 */
+	clk_rcg2_set_force_enable(hw);
+	clk_rcg2_configure(rcg, &cxo_f);
+	clk_rcg2_clear_force_enable(hw);
+}
+
 const struct clk_ops clk_rcg2_ops = {
 	.is_enabled = clk_rcg2_is_enabled,
+	.enable = clk_rcg2_enable,
+	.disable = clk_rcg2_disable,
 	.get_parent = clk_rcg2_get_parent,
 	.set_parent = clk_rcg2_set_parent,
 	.recalc_rate = clk_rcg2_recalc_rate,
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index ae9bdeb..eface18 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -13,6 +13,8 @@
 #ifndef __QCOM_CLK_COMMON_H__
 #define __QCOM_CLK_COMMON_H__
 
+#include <linux/reset-controller.h>
+
 struct platform_device;
 struct regmap_config;
 struct clk_regmap;
@@ -32,6 +34,12 @@
 	size_t num_gdscs;
 };
 
+struct clk_dummy {
+	struct clk_hw hw;
+	struct reset_controller_dev reset;
+	unsigned long rrate;
+};
+
 extern const struct freq_tbl *qcom_find_freq(const struct freq_tbl *f,
 					     unsigned long rate);
 extern int qcom_find_src_index(struct clk_hw *hw, const struct parent_map *map,
@@ -48,5 +56,5 @@
 				struct regmap *regmap);
 extern int qcom_cc_probe(struct platform_device *pdev,
 			 const struct qcom_cc_desc *desc);
-
+extern struct clk_ops clk_dummy_ops;
 #endif
diff --git a/drivers/clk/qcom/gdsc-regulator.c b/drivers/clk/qcom/gdsc-regulator.c
new file mode 100644
index 0000000..3bb7c04
--- /dev/null
+++ b/drivers/clk/qcom/gdsc-regulator.c
@@ -0,0 +1,800 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/mfd/syscon.h>
+#include <linux/clk/qcom.h>
+
+/* GDSCR */
+#define PWR_ON_MASK		BIT(31)
+#define CLK_DIS_WAIT_MASK	(0xF << 12)
+#define CLK_DIS_WAIT_SHIFT	(12)
+#define SW_OVERRIDE_MASK	BIT(2)
+#define HW_CONTROL_MASK		BIT(1)
+#define SW_COLLAPSE_MASK	BIT(0)
+
+/* CFG_GDSCR */
+#define GDSC_POWER_UP_COMPLETE		BIT(16)
+#define GDSC_POWER_DOWN_COMPLETE	BIT(15)
+
+/* Domain Address */
+#define GMEM_CLAMP_IO_MASK	BIT(0)
+#define GMEM_RESET_MASK         BIT(4)
+
+/* SW Reset */
+#define BCR_BLK_ARES_BIT	BIT(0)
+
+/* Register Offset */
+#define REG_OFFSET		0x0
+#define CFG_GDSCR_OFFSET	0x4
+
+/* Timeout Delay */
+#define TIMEOUT_US		100
+
+struct gdsc {
+	struct regulator_dev	*rdev;
+	struct regulator_desc	rdesc;
+	void __iomem		*gdscr;
+	struct regmap           *regmap;
+	struct regmap           *domain_addr;
+	struct regmap           *hw_ctrl;
+	struct regmap           *sw_reset;
+	struct clk		**clocks;
+	struct reset_control	**reset_clocks;
+	bool			toggle_mem;
+	bool			toggle_periph;
+	bool			toggle_logic;
+	bool			resets_asserted;
+	bool			root_en;
+	bool			force_root_en;
+	bool			no_status_check_on_disable;
+	bool			is_gdsc_enabled;
+	bool			allow_clear;
+	bool			reset_aon;
+	bool			poll_cfg_gdscr;
+	int			clock_count;
+	int			reset_count;
+	int			root_clk_idx;
+	u32			gds_timeout;
+};
+
+enum gdscr_status {
+	ENABLED,
+	DISABLED,
+};
+
+static DEFINE_MUTEX(gdsc_seq_lock);
+
+void gdsc_allow_clear_retention(struct regulator *regulator)
+{
+	struct gdsc *sc = regulator_get_drvdata(regulator);
+
+	if (sc)
+		sc->allow_clear = true;
+}
+
+static int poll_gdsc_status(struct gdsc *sc, enum gdscr_status status)
+{
+	struct regmap *regmap;
+	int count = sc->gds_timeout;
+	u32 val;
+
+	if (sc->hw_ctrl)
+		regmap = sc->hw_ctrl;
+	else
+		regmap = sc->regmap;
+
+	for (; count > 0; count--) {
+		regmap_read(regmap, REG_OFFSET, &val);
+		val &= PWR_ON_MASK;
+
+		switch (status) {
+		case ENABLED:
+			if (val)
+				return 0;
+			break;
+		case DISABLED:
+			if (!val)
+				return 0;
+			break;
+		}
+		/*
+		 * There is no guarantee about the delay needed for the enable
+		 * bit in the GDSCR to be set or reset after the GDSC state
+		 * changes. Hence, keep on checking for a reasonable number
+		 * of times until the bit is set with the least possible delay
+		 * between succeessive tries.
+		 */
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int poll_cfg_gdsc_status(struct gdsc *sc, enum gdscr_status status)
+{
+	struct regmap *regmap = sc->regmap;
+	int count = sc->gds_timeout;
+	u32 val;
+
+	for (; count > 0; count--) {
+		regmap_read(regmap, CFG_GDSCR_OFFSET, &val);
+
+		switch (status) {
+		case ENABLED:
+			if (val & GDSC_POWER_UP_COMPLETE)
+				return 0;
+			break;
+		case DISABLED:
+			if (val & GDSC_POWER_DOWN_COMPLETE)
+				return 0;
+			break;
+		}
+		udelay(1);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int gdsc_is_enabled(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+
+	if (!sc->toggle_logic)
+		return !sc->resets_asserted;
+
+	regmap_read(sc->regmap, REG_OFFSET, &regval);
+
+	if (regval & PWR_ON_MASK) {
+		/*
+		 * The GDSC might be turned on due to TZ/HYP vote on the
+		 * votable GDS registers. Check the SW_COLLAPSE_MASK to
+		 * determine if HLOS has voted for it.
+		 */
+		if (!(regval & SW_COLLAPSE_MASK))
+			return true;
+	}
+
+	return false;
+}
+
+static int gdsc_enable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval, cfg_regval, hw_ctrl_regval = 0x0;
+	int i, ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	if (sc->root_en || sc->force_root_en)
+		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+	regmap_read(sc->regmap, REG_OFFSET, &regval);
+	if (regval & HW_CONTROL_MASK) {
+		dev_warn(&rdev->dev, "Invalid enable while %s is under HW control\n",
+				sc->rdesc.name);
+		mutex_unlock(&gdsc_seq_lock);
+		return -EBUSY;
+	}
+
+	if (sc->toggle_logic) {
+		if (sc->sw_reset) {
+			regmap_read(sc->sw_reset, REG_OFFSET, &regval);
+			regval |= BCR_BLK_ARES_BIT;
+			regmap_write(sc->sw_reset, REG_OFFSET, regval);
+			/*
+			 * BLK_ARES should be kept asserted for 1us before
+			 * being de-asserted.
+			 */
+			wmb();
+			udelay(1);
+
+			regval &= ~BCR_BLK_ARES_BIT;
+			regmap_write(sc->sw_reset, REG_OFFSET, regval);
+			/* Make sure de-assert goes through before continuing */
+			wmb();
+		}
+
+		if (sc->domain_addr) {
+			if (sc->reset_aon) {
+				regmap_read(sc->domain_addr, REG_OFFSET,
+								&regval);
+				regval |= GMEM_RESET_MASK;
+				regmap_write(sc->domain_addr, REG_OFFSET,
+								regval);
+				/*
+				 * Keep reset asserted for at-least 1us before
+				 * continuing.
+				 */
+				wmb();
+				udelay(1);
+
+				regval &= ~GMEM_RESET_MASK;
+				regmap_write(sc->domain_addr, REG_OFFSET,
+							regval);
+				/*
+				 * Make sure GMEM_RESET is de-asserted before
+				 * continuing.
+				 */
+				wmb();
+			}
+
+			regmap_read(sc->domain_addr, REG_OFFSET, &regval);
+			regval &= ~GMEM_CLAMP_IO_MASK;
+			regmap_write(sc->domain_addr, REG_OFFSET, regval);
+
+			/*
+			 * Make sure CLAMP_IO is de-asserted before continuing.
+			 */
+			wmb();
+		}
+
+		regmap_read(sc->regmap, REG_OFFSET, &regval);
+		regval &= ~SW_COLLAPSE_MASK;
+		regmap_write(sc->regmap, REG_OFFSET, regval);
+
+		/* Wait for 8 XO cycles before polling the status bit. */
+		mb();
+		udelay(1);
+
+		if (sc->poll_cfg_gdscr)
+			ret = poll_cfg_gdsc_status(sc, ENABLED);
+		else
+			ret = poll_gdsc_status(sc, ENABLED);
+		if (ret) {
+			regmap_read(sc->regmap, REG_OFFSET, &regval);
+
+			if (sc->hw_ctrl) {
+				regmap_read(sc->hw_ctrl, REG_OFFSET,
+						&hw_ctrl_regval);
+				dev_warn(&rdev->dev, "%s state (after %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x. Re-polling.\n",
+					sc->rdesc.name, sc->gds_timeout,
+					regval, hw_ctrl_regval);
+
+				ret = poll_gdsc_status(sc, ENABLED);
+				if (ret) {
+					regmap_read(sc->regmap, REG_OFFSET,
+								&regval);
+					regmap_read(sc->hw_ctrl, REG_OFFSET,
+							&hw_ctrl_regval);
+					dev_err(&rdev->dev, "%s final state (after additional %d us timeout): 0x%x, GDS_HW_CTRL: 0x%x\n",
+						sc->rdesc.name, sc->gds_timeout,
+						regval, hw_ctrl_regval);
+
+					mutex_unlock(&gdsc_seq_lock);
+					return ret;
+				}
+			} else {
+				dev_err(&rdev->dev, "%s enable timed out: 0x%x\n",
+					sc->rdesc.name,
+					regval);
+				udelay(sc->gds_timeout);
+
+				if (sc->poll_cfg_gdscr) {
+					regmap_read(sc->regmap, REG_OFFSET,
+							&regval);
+					regmap_read(sc->regmap,
+						CFG_GDSCR_OFFSET, &cfg_regval);
+					dev_err(&rdev->dev, "%s final state: gdscr - 0x%x, cfg_gdscr - 0x%x (%d us after timeout)\n",
+						sc->rdesc.name, regval,
+						cfg_regval, sc->gds_timeout);
+				} else {
+					regmap_read(sc->regmap, REG_OFFSET,
+							&regval);
+					dev_err(&rdev->dev, "%s final state: 0x%x (%d us after timeout)\n",
+						sc->rdesc.name, regval,
+						sc->gds_timeout);
+				}
+				mutex_unlock(&gdsc_seq_lock);
+
+				return ret;
+			}
+		}
+	} else {
+		for (i = 0; i < sc->reset_count; i++)
+			reset_control_deassert(sc->reset_clocks[i]);
+		sc->resets_asserted = false;
+	}
+
+	for (i = 0; i < sc->clock_count; i++) {
+		if (unlikely(i == sc->root_clk_idx))
+			continue;
+		if (sc->toggle_mem)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+		if (sc->toggle_periph)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+	}
+
+	/*
+	 * If clocks to this power domain were already on, they will take an
+	 * additional 4 clock cycles to re-enable after the rail is enabled.
+	 * Delay to account for this. A delay is also needed to ensure clocks
+	 * are not enabled within 400ns of enabling power to the memories.
+	 */
+	udelay(1);
+
+	/* Delay to account for staggered memory powerup. */
+	udelay(1);
+
+	if (sc->force_root_en)
+		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+
+	sc->is_gdsc_enabled = true;
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static int gdsc_disable(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int i, ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	if (sc->force_root_en)
+		clk_prepare_enable(sc->clocks[sc->root_clk_idx]);
+
+	for (i = sc->clock_count - 1; i >= 0; i--) {
+		if (unlikely(i == sc->root_clk_idx))
+			continue;
+		if (sc->toggle_mem && sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+		if (sc->toggle_periph && sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+	}
+
+	/* Delay to account for staggered memory powerdown. */
+	udelay(1);
+
+	if (sc->toggle_logic) {
+		regmap_read(sc->regmap, REG_OFFSET, &regval);
+		regval |= SW_COLLAPSE_MASK;
+		regmap_write(sc->regmap, REG_OFFSET, regval);
+
+		/* Wait for 8 XO cycles before polling the status bit. */
+		mb();
+		udelay(1);
+
+		if (sc->no_status_check_on_disable) {
+			/*
+			 * Add a short delay here to ensure that gdsc_enable
+			 * right after it was disabled does not put it in a
+			 * weird state.
+			 */
+			udelay(TIMEOUT_US);
+		} else {
+			if (sc->poll_cfg_gdscr)
+				ret = poll_cfg_gdsc_status(sc, DISABLED);
+			else
+				ret = poll_gdsc_status(sc, DISABLED);
+			if (ret)
+				dev_err(&rdev->dev, "%s disable timed out: 0x%x\n",
+					sc->rdesc.name, regval);
+		}
+
+		if (sc->domain_addr) {
+			regmap_read(sc->domain_addr, REG_OFFSET, &regval);
+			regval |= GMEM_CLAMP_IO_MASK;
+			regmap_write(sc->domain_addr, REG_OFFSET, regval);
+		}
+
+	} else {
+		for (i = sc->reset_count - 1; i >= 0; i--)
+			reset_control_assert(sc->reset_clocks[i]);
+		sc->resets_asserted = true;
+	}
+
+	/*
+	 * Check if gdsc_enable was called for this GDSC. If not, the root
+	 * clock will not have been enabled prior to this.
+	 */
+	if ((sc->is_gdsc_enabled && sc->root_en) || sc->force_root_en)
+		clk_disable_unprepare(sc->clocks[sc->root_clk_idx]);
+
+	sc->is_gdsc_enabled = false;
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static unsigned int gdsc_get_mode(struct regulator_dev *rdev)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+
+	mutex_lock(&gdsc_seq_lock);
+	regmap_read(sc->regmap, REG_OFFSET, &regval);
+	mutex_unlock(&gdsc_seq_lock);
+
+	if (regval & HW_CONTROL_MASK)
+		return REGULATOR_MODE_FAST;
+
+	return REGULATOR_MODE_NORMAL;
+}
+
+static int gdsc_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+	struct gdsc *sc = rdev_get_drvdata(rdev);
+	uint32_t regval;
+	int ret = 0;
+
+	mutex_lock(&gdsc_seq_lock);
+
+	regmap_read(sc->regmap, REG_OFFSET, &regval);
+
+	switch (mode) {
+	case REGULATOR_MODE_FAST:
+		/* Turn on HW trigger mode */
+		regval |= HW_CONTROL_MASK;
+		regmap_write(sc->regmap, REG_OFFSET, regval);
+		/*
+		 * There may be a race with internal HW trigger signal,
+		 * that will result in GDSC going through a power down and
+		 * up cycle.  In case HW trigger signal is controlled by
+		 * firmware that also poll same status bits as we do, FW
+		 * might read an 'on' status before the GDSC can finish
+		 * power cycle.  We wait 1us before returning to ensure
+		 * FW can't immediately poll the status bit.
+		 */
+		mb();
+		udelay(1);
+		break;
+	case REGULATOR_MODE_NORMAL:
+		/* Turn off HW trigger mode */
+		regval &= ~HW_CONTROL_MASK;
+		regmap_write(sc->regmap, REG_OFFSET, regval);
+		/*
+		 * There may be a race with internal HW trigger signal,
+		 * that will result in GDSC going through a power down and
+		 * up cycle.  If we poll too early, status bit will
+		 * indicate 'on' before the GDSC can finish the power cycle.
+		 * Account for this case by waiting 1us before polling.
+		 */
+		mb();
+		udelay(1);
+
+		if (sc->poll_cfg_gdscr)
+			ret = poll_cfg_gdsc_status(sc, ENABLED);
+		else
+			ret = poll_gdsc_status(sc, ENABLED);
+		if (ret)
+			dev_err(&rdev->dev, "%s set_mode timed out: 0x%x\n",
+				sc->rdesc.name, regval);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	mutex_unlock(&gdsc_seq_lock);
+
+	return ret;
+}
+
+static struct regulator_ops gdsc_ops = {
+	.is_enabled = gdsc_is_enabled,
+	.enable = gdsc_enable,
+	.disable = gdsc_disable,
+	.set_mode = gdsc_set_mode,
+	.get_mode = gdsc_get_mode,
+};
+
+static const struct regmap_config gdsc_regmap_config = {
+	.reg_bits   = 32,
+	.reg_stride = 4,
+	.val_bits   = 32,
+	.fast_io    = true,
+};
+
+static int gdsc_probe(struct platform_device *pdev)
+{
+	static atomic_t gdsc_count = ATOMIC_INIT(-1);
+	struct regulator_config reg_config = {};
+	struct regulator_init_data *init_data;
+	struct resource *res;
+	struct gdsc *sc;
+	uint32_t regval, clk_dis_wait_val = 0;
+	bool retain_mem, retain_periph, support_hw_trigger;
+	int i, ret;
+	u32 timeout;
+
+	sc = devm_kzalloc(&pdev->dev, sizeof(struct gdsc), GFP_KERNEL);
+	if (sc == NULL)
+		return -ENOMEM;
+
+	init_data = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node,
+								&sc->rdesc);
+	if (init_data == NULL)
+		return -ENOMEM;
+
+	if (of_get_property(pdev->dev.of_node, "parent-supply", NULL))
+		init_data->supply_regulator = "parent";
+
+	ret = of_property_read_string(pdev->dev.of_node, "regulator-name",
+							&sc->rdesc.name);
+	if (ret)
+		return ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (res == NULL) {
+		dev_err(&pdev->dev, "Failed to get resources\n");
+		return -EINVAL;
+	}
+
+	sc->gdscr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+	if (sc->gdscr == NULL)
+		return -ENOMEM;
+
+	sc->regmap = devm_regmap_init_mmio(&pdev->dev, sc->gdscr,
+							&gdsc_regmap_config);
+	if (!sc->regmap) {
+		dev_err(&pdev->dev, "Couldn't get regmap\n");
+		return -EINVAL;
+	}
+
+	if (of_find_property(pdev->dev.of_node, "domain-addr", NULL)) {
+		sc->domain_addr = syscon_regmap_lookup_by_phandle
+					(pdev->dev.of_node, "domain-addr");
+		if (IS_ERR(sc->domain_addr))
+			return -ENODEV;
+	}
+
+	if (of_find_property(pdev->dev.of_node, "sw-reset", NULL)) {
+		sc->sw_reset = syscon_regmap_lookup_by_phandle
+						(pdev->dev.of_node, "sw-reset");
+		if (IS_ERR(sc->sw_reset))
+			return -ENODEV;
+	}
+
+	if (of_find_property(pdev->dev.of_node, "hw-ctrl-addr", NULL)) {
+		sc->hw_ctrl = syscon_regmap_lookup_by_phandle(
+					pdev->dev.of_node, "hw-ctrl-addr");
+		if (IS_ERR(sc->hw_ctrl))
+			return -ENODEV;
+	}
+
+	sc->poll_cfg_gdscr = of_property_read_bool(pdev->dev.of_node,
+						"qcom,poll-cfg-gdscr");
+
+	sc->gds_timeout = TIMEOUT_US;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,gds-timeout",
+							&timeout);
+	if (!ret)
+		sc->gds_timeout = timeout;
+
+	sc->clock_count = of_property_count_strings(pdev->dev.of_node,
+					    "clock-names");
+	if (sc->clock_count == -EINVAL) {
+		sc->clock_count = 0;
+	} else if (sc->clock_count < 0) {
+		dev_err(&pdev->dev, "Failed to get clock names\n");
+		return -EINVAL;
+	}
+
+	sc->clocks = devm_kzalloc(&pdev->dev,
+			sizeof(struct clk *) * sc->clock_count, GFP_KERNEL);
+	if (!sc->clocks)
+		return -ENOMEM;
+
+	sc->root_clk_idx = -1;
+
+	sc->root_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,enable-root-clk");
+
+	sc->force_root_en = of_property_read_bool(pdev->dev.of_node,
+						"qcom,force-enable-root-clk");
+
+	for (i = 0; i < sc->clock_count; i++) {
+		const char *clock_name;
+
+		of_property_read_string_index(pdev->dev.of_node, "clock-names",
+				i, &clock_name);
+
+		sc->clocks[i] = devm_clk_get(&pdev->dev, clock_name);
+		if (IS_ERR(sc->clocks[i])) {
+			int rc = PTR_ERR(sc->clocks[i]);
+
+			if (rc != -EPROBE_DEFER)
+				dev_err(&pdev->dev, "Failed to get %s\n",
+					clock_name);
+			return rc;
+		}
+
+		if (!strcmp(clock_name, "core_root_clk"))
+			sc->root_clk_idx = i;
+	}
+
+	if ((sc->root_en || sc->force_root_en) && (sc->root_clk_idx == -1)) {
+		dev_err(&pdev->dev, "Failed to get root clock name\n");
+		return -EINVAL;
+	}
+
+	sc->reset_aon = of_property_read_bool(pdev->dev.of_node,
+							"qcom,reset-aon-logic");
+
+	sc->rdesc.id = atomic_inc_return(&gdsc_count);
+	sc->rdesc.ops = &gdsc_ops;
+	sc->rdesc.type = REGULATOR_VOLTAGE;
+	sc->rdesc.owner = THIS_MODULE;
+	platform_set_drvdata(pdev, sc);
+
+	/*
+	 * Disable HW trigger: collapse/restore occur based on registers writes.
+	 * Disable SW override: Use hardware state-machine for sequencing.
+	 */
+	regmap_read(sc->regmap, REG_OFFSET, &regval);
+	regval &= ~(HW_CONTROL_MASK | SW_OVERRIDE_MASK);
+
+	if (!of_property_read_u32(pdev->dev.of_node, "qcom,clk-dis-wait-val",
+				  &clk_dis_wait_val)) {
+		clk_dis_wait_val = clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
+
+		/* Configure wait time between states. */
+		regval &= ~(CLK_DIS_WAIT_MASK);
+		regval |= clk_dis_wait_val;
+	}
+
+	regmap_write(sc->regmap, REG_OFFSET, regval);
+
+	sc->no_status_check_on_disable =
+			of_property_read_bool(pdev->dev.of_node,
+					"qcom,no-status-check-on-disable");
+	retain_mem = of_property_read_bool(pdev->dev.of_node,
+					    "qcom,retain-mem");
+	sc->toggle_mem = !retain_mem;
+	retain_periph = of_property_read_bool(pdev->dev.of_node,
+					    "qcom,retain-periph");
+	sc->toggle_periph = !retain_periph;
+	sc->toggle_logic = !of_property_read_bool(pdev->dev.of_node,
+						"qcom,skip-logic-collapse");
+	support_hw_trigger = of_property_read_bool(pdev->dev.of_node,
+						    "qcom,support-hw-trigger");
+	if (support_hw_trigger) {
+		init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_MODE;
+		init_data->constraints.valid_modes_mask |=
+				REGULATOR_MODE_NORMAL | REGULATOR_MODE_FAST;
+	}
+
+	if (!sc->toggle_logic) {
+		sc->reset_count = of_property_count_strings(pdev->dev.of_node,
+					    "reset-names");
+		if (sc->reset_count == -EINVAL) {
+			sc->reset_count = 0;
+		} else if (sc->reset_count < 0) {
+			dev_err(&pdev->dev, "Failed to get reset clock names\n");
+			return -EINVAL;
+		}
+
+		sc->reset_clocks = devm_kzalloc(&pdev->dev,
+			sizeof(struct reset_control *) * sc->reset_count,
+							GFP_KERNEL);
+		if (!sc->reset_clocks)
+			return -ENOMEM;
+
+		for (i = 0; i < sc->reset_count; i++) {
+			const char *reset_name;
+
+			of_property_read_string_index(pdev->dev.of_node,
+					"reset-names", i, &reset_name);
+			sc->reset_clocks[i] = devm_reset_control_get(&pdev->dev,
+								reset_name);
+			if (IS_ERR(sc->reset_clocks[i])) {
+				int rc = PTR_ERR(sc->reset_clocks[i]);
+
+				if (rc != -EPROBE_DEFER)
+					dev_err(&pdev->dev, "Failed to get %s\n",
+							reset_name);
+				return rc;
+			}
+		}
+
+		regval &= ~SW_COLLAPSE_MASK;
+		regmap_write(sc->regmap, REG_OFFSET, regval);
+
+		if (sc->poll_cfg_gdscr)
+			ret = poll_cfg_gdsc_status(sc, ENABLED);
+		else
+			ret = poll_gdsc_status(sc, ENABLED);
+		if (ret) {
+			dev_err(&pdev->dev, "%s enable timed out: 0x%x\n",
+				sc->rdesc.name, regval);
+			return ret;
+		}
+	}
+
+	sc->allow_clear = of_property_read_bool(pdev->dev.of_node,
+							"qcom,disallow-clear");
+	sc->allow_clear = !sc->allow_clear;
+
+	for (i = 0; i < sc->clock_count; i++) {
+		if (retain_mem || (regval & PWR_ON_MASK) || !sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_MEM);
+
+		if (retain_periph || (regval & PWR_ON_MASK) || !sc->allow_clear)
+			clk_set_flags(sc->clocks[i], CLKFLAG_RETAIN_PERIPH);
+		else
+			clk_set_flags(sc->clocks[i], CLKFLAG_NORETAIN_PERIPH);
+	}
+
+	reg_config.dev = &pdev->dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = sc;
+	reg_config.of_node = pdev->dev.of_node;
+	reg_config.regmap = sc->regmap;
+
+	sc->rdev = regulator_register(&sc->rdesc, &reg_config);
+	if (IS_ERR(sc->rdev)) {
+		dev_err(&pdev->dev, "regulator_register(\"%s\") failed.\n",
+			sc->rdesc.name);
+		return PTR_ERR(sc->rdev);
+	}
+
+	return 0;
+}
+
+static int gdsc_remove(struct platform_device *pdev)
+{
+	struct gdsc *sc = platform_get_drvdata(pdev);
+
+	regulator_unregister(sc->rdev);
+
+	return 0;
+}
+
+static const struct of_device_id gdsc_match_table[] = {
+	{ .compatible = "qcom,gdsc" },
+	{}
+};
+
+static struct platform_driver gdsc_driver = {
+	.probe  = gdsc_probe,
+	.remove = gdsc_remove,
+	.driver = {
+		.name = "gdsc",
+		.of_match_table = gdsc_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init gdsc_init(void)
+{
+	return platform_driver_register(&gdsc_driver);
+}
+subsys_initcall(gdsc_init);
+
+static void __exit gdsc_exit(void)
+{
+	platform_driver_unregister(&gdsc_driver);
+}
+module_exit(gdsc_exit);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 73c487d..e5a2016 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -139,20 +139,20 @@
 		struct arch_timer *timer = to_arch_timer(clk);
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			writel_relaxed(val, timer->base + CNTP_CTL);
+			writel_relaxed_no_log(val, timer->base + CNTP_CTL);
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			writel_relaxed(val, timer->base + CNTP_TVAL);
+			writel_relaxed_no_log(val, timer->base + CNTP_TVAL);
 			break;
 		}
 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
 		struct arch_timer *timer = to_arch_timer(clk);
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			writel_relaxed(val, timer->base + CNTV_CTL);
+			writel_relaxed_no_log(val, timer->base + CNTV_CTL);
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			writel_relaxed(val, timer->base + CNTV_TVAL);
+			writel_relaxed_no_log(val, timer->base + CNTV_TVAL);
 			break;
 		}
 	} else {
@@ -170,20 +170,20 @@
 		struct arch_timer *timer = to_arch_timer(clk);
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			val = readl_relaxed(timer->base + CNTP_CTL);
+			val = readl_relaxed_no_log(timer->base + CNTP_CTL);
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			val = readl_relaxed(timer->base + CNTP_TVAL);
+			val = readl_relaxed_no_log(timer->base + CNTP_TVAL);
 			break;
 		}
 	} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
 		struct arch_timer *timer = to_arch_timer(clk);
 		switch (reg) {
 		case ARCH_TIMER_REG_CTRL:
-			val = readl_relaxed(timer->base + CNTV_CTL);
+			val = readl_relaxed_no_log(timer->base + CNTV_CTL);
 			break;
 		case ARCH_TIMER_REG_TVAL:
-			val = readl_relaxed(timer->base + CNTV_TVAL);
+			val = readl_relaxed_no_log(timer->base + CNTV_TVAL);
 			break;
 		}
 	} else {
@@ -508,7 +508,8 @@
 	if (!acpi_disabled ||
 	    of_property_read_u32(np, "clock-frequency", &arch_timer_rate)) {
 		if (cntbase)
-			arch_timer_rate = readl_relaxed(cntbase + CNTFRQ);
+			arch_timer_rate = readl_relaxed_no_log(cntbase
+								+ CNTFRQ);
 		else
 			arch_timer_rate = arch_timer_get_cntfrq();
 	}
@@ -545,9 +546,9 @@
 	u32 vct_lo, vct_hi, tmp_hi;
 
 	do {
-		vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
-		vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
-		tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
+		vct_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI);
+		vct_lo = readl_relaxed_no_log(arch_counter_base + CNTVCT_LO);
+		tmp_hi = readl_relaxed_no_log(arch_counter_base + CNTVCT_HI);
 	} while (vct_hi != tmp_hi);
 
 	return ((u64) vct_hi << 32) | vct_lo;
@@ -926,7 +927,7 @@
 		return -ENXIO;
 	}
 
-	cnttidr = readl_relaxed(cntctlbase + CNTTIDR);
+	cnttidr = readl_relaxed_no_log(cntctlbase + CNTTIDR);
 
 	/*
 	 * Try to find a virtual capable frame. Otherwise fall back to a
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index d8b164a..61be70e 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -112,6 +112,15 @@
 	  have a look at the help section of that governor. The fallback
 	  governor will be 'performance'.
 
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+	bool "interactive"
+	select CPU_FREQ_GOV_INTERACTIVE
+	help
+	  Use the CPUFreq governor 'interactive' as default. This allows
+	  you to get a full dynamic cpu frequency capable system by simply
+	  loading your cpufreq low-level hardware driver, using the
+	  'interactive' governor for latency-sensitive workloads.
+
 endchoice
 
 config CPU_FREQ_GOV_PERFORMANCE
@@ -169,6 +178,20 @@
 
 	  If in doubt, say N.
 
+config CPU_FREQ_GOV_INTERACTIVE
+	bool "'interactive' cpufreq policy governor"
+	help
+	  'interactive' - This driver adds a dynamic cpufreq policy governor
+	  designed for latency-sensitive workloads.
+
+	  This governor attempts to reduce the latency of clock
+	  increases so that the system is more responsive to
+	  interactive workloads.
+
+	  For details, take a look at linux/Documentation/cpu-freq.
+
+	  If in doubt, say N.
+
 config CPU_FREQ_GOV_CONSERVATIVE
 	tristate "'conservative' cpufreq governor"
 	depends on CPU_FREQ
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 0a9b6a09..f0c9905 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -10,6 +10,7 @@
 obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE)	+= cpufreq_userspace.o
 obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND)	+= cpufreq_ondemand.o
 obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE)	+= cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE)	+= cpufreq_interactive.o
 obj-$(CONFIG_CPU_FREQ_GOV_COMMON)		+= cpufreq_governor.o
 obj-$(CONFIG_CPU_FREQ_GOV_ATTR_SET)	+= cpufreq_governor_attr_set.o
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6e6c1fb..c910111 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2194,6 +2194,7 @@
 
 	policy->min = new_policy->min;
 	policy->max = new_policy->max;
+	trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
 
 	policy->cached_target_freq = UINT_MAX;
 
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
new file mode 100644
index 0000000..f3266a3
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -0,0 +1,1380 @@
+/*
+ * drivers/cpufreq/cpufreq_interactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/rwsem.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <linux/tick.h>
+#include <linux/time.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/cpufreq_interactive.h>
+
+struct cpufreq_interactive_cpuinfo {
+	struct timer_list cpu_timer;
+	struct timer_list cpu_slack_timer;
+	spinlock_t load_lock; /* protects the next 4 fields */
+	u64 time_in_idle;
+	u64 time_in_idle_timestamp;
+	u64 cputime_speedadj;
+	u64 cputime_speedadj_timestamp;
+	struct cpufreq_policy *policy;
+	struct cpufreq_frequency_table *freq_table;
+	spinlock_t target_freq_lock; /*protects target freq */
+	unsigned int target_freq;
+	unsigned int floor_freq;
+	u64 pol_floor_val_time; /* policy floor_validate_time */
+	u64 loc_floor_val_time; /* per-cpu floor_validate_time */
+	u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
+	u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
+	struct rw_semaphore enable_sem;
+	int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
+
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
+static struct mutex gov_lock;
+
+/* Target load.  Lower values result in higher CPU speeds. */
+#define DEFAULT_TARGET_LOAD 90
+static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
+
+#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
+#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
+static unsigned int default_above_hispeed_delay[] = {
+	DEFAULT_ABOVE_HISPEED_DELAY };
+
+struct cpufreq_interactive_tunables {
+	int usage_count;
+	/* Hi speed to bump to from lo speed when load burst (default max) */
+	unsigned int hispeed_freq;
+	/* Go to hi speed when CPU load at or above this value. */
+#define DEFAULT_GO_HISPEED_LOAD 99
+	unsigned long go_hispeed_load;
+	/* Target load. Lower values result in higher CPU speeds. */
+	spinlock_t target_loads_lock;
+	unsigned int *target_loads;
+	int ntarget_loads;
+	/*
+	 * The minimum amount of time to spend at a frequency before we can ramp
+	 * down.
+	 */
+#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
+	unsigned long min_sample_time;
+	/*
+	 * The sample rate of the timer used to increase frequency
+	 */
+	unsigned long timer_rate;
+	/*
+	 * Wait this long before raising speed above hispeed, by default a
+	 * single timer interval.
+	 */
+	spinlock_t above_hispeed_delay_lock;
+	unsigned int *above_hispeed_delay;
+	int nabove_hispeed_delay;
+	/* Non-zero means indefinite speed boost active */
+	int boost_val;
+	/* Duration of a boot pulse in usecs */
+	int boostpulse_duration_val;
+	/* End time of boost pulse in ktime converted to usecs */
+	u64 boostpulse_endtime;
+	bool boosted;
+	/*
+	 * Max additional time to wait in idle, beyond timer_rate, at speeds
+	 * above minimum before wakeup to reduce speed, or -1 if unnecessary.
+	 */
+#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
+	int timer_slack_val;
+	bool io_is_busy;
+};
+
+/* For cases where we have single governor instance for system */
+static struct cpufreq_interactive_tunables *common_tunables;
+
+static struct attribute_group *get_sysfs_attr(void);
+
+static void cpufreq_interactive_timer_resched(
+	struct cpufreq_interactive_cpuinfo *pcpu)
+{
+	struct cpufreq_interactive_tunables *tunables =
+		pcpu->policy->governor_data;
+	unsigned long expires;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pcpu->load_lock, flags);
+	pcpu->time_in_idle =
+		get_cpu_idle_time(smp_processor_id(),
+				  &pcpu->time_in_idle_timestamp,
+				  tunables->io_is_busy);
+	pcpu->cputime_speedadj = 0;
+	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+	expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
+	mod_timer(&pcpu->cpu_timer, expires);
+
+	if (tunables->timer_slack_val >= 0 &&
+	    pcpu->target_freq > pcpu->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		mod_timer(&pcpu->cpu_slack_timer, expires);
+	}
+
+	spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+/* The caller shall take enable_sem write semaphore to avoid any timer race.
+ * The cpu_timer and cpu_slack_timer must be deactivated when calling this
+ * function.
+ */
+static void cpufreq_interactive_timer_start(
+	struct cpufreq_interactive_tunables *tunables, int cpu)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+	unsigned long expires = jiffies +
+		usecs_to_jiffies(tunables->timer_rate);
+	unsigned long flags;
+
+	pcpu->cpu_timer.expires = expires;
+	add_timer_on(&pcpu->cpu_timer, cpu);
+	if (tunables->timer_slack_val >= 0 &&
+	    pcpu->target_freq > pcpu->policy->min) {
+		expires += usecs_to_jiffies(tunables->timer_slack_val);
+		pcpu->cpu_slack_timer.expires = expires;
+		add_timer_on(&pcpu->cpu_slack_timer, cpu);
+	}
+
+	spin_lock_irqsave(&pcpu->load_lock, flags);
+	pcpu->time_in_idle =
+		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
+				  tunables->io_is_busy);
+	pcpu->cputime_speedadj = 0;
+	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
+	spin_unlock_irqrestore(&pcpu->load_lock, flags);
+}
+
+static unsigned int freq_to_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+	for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
+			freq >= tunables->above_hispeed_delay[i+1]; i += 2)
+		;
+
+	ret = tunables->above_hispeed_delay[i];
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return ret;
+}
+
+static unsigned int freq_to_targetload(
+	struct cpufreq_interactive_tunables *tunables, unsigned int freq)
+{
+	int i;
+	unsigned int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+	for (i = 0; i < tunables->ntarget_loads - 1 &&
+		    freq >= tunables->target_loads[i+1]; i += 2)
+		;
+
+	ret = tunables->target_loads[i];
+	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+	return ret;
+}
+
+/*
+ * If increasing frequencies never map to a lower target load then
+ * choose_freq() will find the minimum frequency that does not exceed its
+ * target load given the current load.
+ */
+static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
+		unsigned int loadadjfreq)
+{
+	unsigned int freq = pcpu->policy->cur;
+	unsigned int prevfreq, freqmin, freqmax;
+	unsigned int tl;
+	int index;
+
+	freqmin = 0;
+	freqmax = UINT_MAX;
+
+	do {
+		prevfreq = freq;
+		tl = freq_to_targetload(pcpu->policy->governor_data, freq);
+
+		/*
+		 * Find the lowest frequency where the computed load is less
+		 * than or equal to the target load.
+		 */
+
+		index = cpufreq_frequency_table_target(
+			    pcpu->policy, loadadjfreq / tl,
+			    CPUFREQ_RELATION_L);
+		freq = pcpu->freq_table[index].frequency;
+
+		if (freq > prevfreq) {
+			/* The previous frequency is too low. */
+			freqmin = prevfreq;
+
+			if (freq >= freqmax) {
+				/*
+				 * Find the highest frequency that is less
+				 * than freqmax.
+				 */
+				index = cpufreq_frequency_table_target(
+					    pcpu->policy,
+					    freqmax - 1, CPUFREQ_RELATION_H);
+				freq = pcpu->freq_table[index].frequency;
+
+				if (freq == freqmin) {
+					/*
+					 * The first frequency below freqmax
+					 * has already been found to be too
+					 * low.  freqmax is the lowest speed
+					 * we found that is fast enough.
+					 */
+					freq = freqmax;
+					break;
+				}
+			}
+		} else if (freq < prevfreq) {
+			/* The previous frequency is high enough. */
+			freqmax = prevfreq;
+
+			if (freq <= freqmin) {
+				/*
+				 * Find the lowest frequency that is higher
+				 * than freqmin.
+				 */
+				index = cpufreq_frequency_table_target(
+					    pcpu->policy,
+					    freqmin + 1, CPUFREQ_RELATION_L);
+				freq = pcpu->freq_table[index].frequency;
+
+				/*
+				 * If freqmax is the first frequency above
+				 * freqmin then we have already found that
+				 * this speed is fast enough.
+				 */
+				if (freq == freqmax)
+					break;
+			}
+		}
+
+		/* If same frequency chosen as previous then done. */
+	} while (freq != prevfreq);
+
+	return freq;
+}
+
+static u64 update_load(int cpu)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
+	struct cpufreq_interactive_tunables *tunables =
+		pcpu->policy->governor_data;
+	u64 now;
+	u64 now_idle;
+	unsigned int delta_idle;
+	unsigned int delta_time;
+	u64 active_time;
+
+	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
+	delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
+	delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
+
+	if (delta_time <= delta_idle)
+		active_time = 0;
+	else
+		active_time = delta_time - delta_idle;
+
+	pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
+
+	pcpu->time_in_idle = now_idle;
+	pcpu->time_in_idle_timestamp = now;
+	return now;
+}
+
+static void cpufreq_interactive_timer(unsigned long data)
+{
+	u64 now;
+	unsigned int delta_time;
+	u64 cputime_speedadj;
+	int cpu_load;
+	struct cpufreq_interactive_cpuinfo *pcpu =
+		&per_cpu(cpuinfo, data);
+	struct cpufreq_interactive_tunables *tunables =
+		pcpu->policy->governor_data;
+	unsigned int new_freq;
+	unsigned int loadadjfreq;
+	unsigned int index;
+	unsigned long flags;
+	u64 max_fvtime;
+
+	if (!down_read_trylock(&pcpu->enable_sem))
+		return;
+	if (!pcpu->governor_enabled)
+		goto exit;
+
+	spin_lock_irqsave(&pcpu->load_lock, flags);
+	now = update_load(data);
+	delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
+	cputime_speedadj = pcpu->cputime_speedadj;
+	spin_unlock_irqrestore(&pcpu->load_lock, flags);
+
+	if (WARN_ON_ONCE(!delta_time))
+		goto rearm;
+
+	spin_lock_irqsave(&pcpu->target_freq_lock, flags);
+	do_div(cputime_speedadj, delta_time);
+	loadadjfreq = (unsigned int)cputime_speedadj * 100;
+	cpu_load = loadadjfreq / pcpu->policy->cur;
+	tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+
+	if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
+		if (pcpu->policy->cur < tunables->hispeed_freq) {
+			new_freq = tunables->hispeed_freq;
+		} else {
+			new_freq = choose_freq(pcpu, loadadjfreq);
+
+			if (new_freq < tunables->hispeed_freq)
+				new_freq = tunables->hispeed_freq;
+		}
+	} else {
+		new_freq = choose_freq(pcpu, loadadjfreq);
+		if (new_freq > tunables->hispeed_freq &&
+				pcpu->policy->cur < tunables->hispeed_freq)
+			new_freq = tunables->hispeed_freq;
+	}
+
+	if (pcpu->policy->cur >= tunables->hispeed_freq &&
+	    new_freq > pcpu->policy->cur &&
+	    now - pcpu->pol_hispeed_val_time <
+	    freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
+		trace_cpufreq_interactive_notyet(
+			data, cpu_load, pcpu->target_freq,
+			pcpu->policy->cur, new_freq);
+		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	pcpu->loc_hispeed_val_time = now;
+
+	index = cpufreq_frequency_table_target(pcpu->policy,
+					   new_freq, CPUFREQ_RELATION_L);
+	new_freq = pcpu->freq_table[index].frequency;
+
+	/*
+	 * Do not scale below floor_freq unless we have been at or above the
+	 * floor frequency for the minimum sample time since last validated.
+	 */
+	max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
+	if (new_freq < pcpu->floor_freq &&
+	    pcpu->target_freq >= pcpu->policy->cur) {
+		if (now - max_fvtime < tunables->min_sample_time) {
+			trace_cpufreq_interactive_notyet(
+				data, cpu_load, pcpu->target_freq,
+				pcpu->policy->cur, new_freq);
+			spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+			goto rearm;
+		}
+	}
+
+	/*
+	 * Update the timestamp for checking whether speed has been held at
+	 * or above the selected frequency for a minimum of min_sample_time,
+	 * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
+	 * allow the speed to drop as soon as the boostpulse duration expires
+	 * (or the indefinite boost is turned off).
+	 */
+
+	if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
+		pcpu->floor_freq = new_freq;
+		if (pcpu->target_freq >= pcpu->policy->cur ||
+		    new_freq >= pcpu->policy->cur)
+			pcpu->loc_floor_val_time = now;
+	}
+
+	if (pcpu->target_freq == new_freq &&
+			pcpu->target_freq <= pcpu->policy->cur) {
+		trace_cpufreq_interactive_already(
+			data, cpu_load, pcpu->target_freq,
+			pcpu->policy->cur, new_freq);
+		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+		goto rearm;
+	}
+
+	trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
+					 pcpu->policy->cur, new_freq);
+
+	pcpu->target_freq = new_freq;
+	spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+	spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+	cpumask_set_cpu(data, &speedchange_cpumask);
+	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+	wake_up_process(speedchange_task);
+
+rearm:
+	if (!timer_pending(&pcpu->cpu_timer))
+		cpufreq_interactive_timer_resched(pcpu);
+
+exit:
+	up_read(&pcpu->enable_sem);
+	return;
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu =
+		&per_cpu(cpuinfo, smp_processor_id());
+
+	if (!down_read_trylock(&pcpu->enable_sem))
+		return;
+	if (!pcpu->governor_enabled) {
+		up_read(&pcpu->enable_sem);
+		return;
+	}
+
+	/* Arm the timer for 1-2 ticks later if not already. */
+	if (!timer_pending(&pcpu->cpu_timer)) {
+		cpufreq_interactive_timer_resched(pcpu);
+	} else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
+		del_timer(&pcpu->cpu_timer);
+		del_timer(&pcpu->cpu_slack_timer);
+		cpufreq_interactive_timer(smp_processor_id());
+	}
+
+	up_read(&pcpu->enable_sem);
+}
+
+static void cpufreq_interactive_get_policy_info(struct cpufreq_policy *policy,
+						unsigned int *pmax_freq,
+						u64 *phvt, u64 *pfvt)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	unsigned int max_freq = 0;
+	u64 hvt = ~0ULL, fvt = 0;
+	unsigned int i;
+
+	for_each_cpu(i, policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+
+		fvt = max(fvt, pcpu->loc_floor_val_time);
+		if (pcpu->target_freq > max_freq) {
+			max_freq = pcpu->target_freq;
+			hvt = pcpu->loc_hispeed_val_time;
+		} else if (pcpu->target_freq == max_freq) {
+			hvt = min(hvt, pcpu->loc_hispeed_val_time);
+		}
+	}
+
+	*pmax_freq = max_freq;
+	*phvt = hvt;
+	*pfvt = fvt;
+}
+
+static void cpufreq_interactive_adjust_cpu(unsigned int cpu,
+					   struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	u64 hvt, fvt;
+	unsigned int max_freq;
+	int i;
+
+	cpufreq_interactive_get_policy_info(policy, &max_freq, &hvt, &fvt);
+
+	for_each_cpu(i, policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, i);
+		pcpu->pol_floor_val_time = fvt;
+	}
+
+	if (max_freq != policy->cur) {
+		__cpufreq_driver_target(policy, max_freq, CPUFREQ_RELATION_H);
+		for_each_cpu(i, policy->cpus) {
+			pcpu = &per_cpu(cpuinfo, i);
+			pcpu->pol_hispeed_val_time = hvt;
+		}
+	}
+
+	trace_cpufreq_interactive_setspeed(cpu, max_freq, policy->cur);
+}
+
+static int cpufreq_interactive_speedchange_task(void *data)
+{
+	unsigned int cpu;
+	cpumask_t tmp_mask;
+	unsigned long flags;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+
+	while (1) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+
+		if (cpumask_empty(&speedchange_cpumask)) {
+			spin_unlock_irqrestore(&speedchange_cpumask_lock,
+					       flags);
+			schedule();
+
+			if (kthread_should_stop())
+				break;
+
+			spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+		}
+
+		set_current_state(TASK_RUNNING);
+		tmp_mask = speedchange_cpumask;
+		cpumask_clear(&speedchange_cpumask);
+		spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+
+		for_each_cpu(cpu, &tmp_mask) {
+			pcpu = &per_cpu(cpuinfo, cpu);
+
+			down_write(&pcpu->policy->rwsem);
+
+			if (likely(down_read_trylock(&pcpu->enable_sem))) {
+				if (likely(pcpu->governor_enabled))
+					cpufreq_interactive_adjust_cpu(cpu,
+							pcpu->policy);
+				up_read(&pcpu->enable_sem);
+			}
+
+			up_write(&pcpu->policy->rwsem);
+		}
+	}
+
+	return 0;
+}
+
+static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
+{
+	int i;
+	int anyboost = 0;
+	unsigned long flags[2];
+	struct cpufreq_interactive_cpuinfo *pcpu;
+
+	tunables->boosted = true;
+
+	spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
+
+	for_each_online_cpu(i) {
+		pcpu = &per_cpu(cpuinfo, i);
+
+		if (!down_read_trylock(&pcpu->enable_sem))
+			continue;
+
+		if (!pcpu->governor_enabled) {
+			up_read(&pcpu->enable_sem);
+			continue;
+		}
+
+		if (tunables != pcpu->policy->governor_data) {
+			up_read(&pcpu->enable_sem);
+			continue;
+		}
+
+		spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
+		if (pcpu->target_freq < tunables->hispeed_freq) {
+			pcpu->target_freq = tunables->hispeed_freq;
+			cpumask_set_cpu(i, &speedchange_cpumask);
+			pcpu->pol_hispeed_val_time =
+				ktime_to_us(ktime_get());
+			anyboost = 1;
+		}
+		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
+
+		up_read(&pcpu->enable_sem);
+	}
+
+	spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
+
+	if (anyboost)
+		wake_up_process(speedchange_task);
+}
+
+static int cpufreq_interactive_notifier(
+	struct notifier_block *nb, unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = data;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	int cpu;
+	unsigned long flags;
+
+	if (val == CPUFREQ_POSTCHANGE) {
+		pcpu = &per_cpu(cpuinfo, freq->cpu);
+		if (!down_read_trylock(&pcpu->enable_sem))
+			return 0;
+		if (!pcpu->governor_enabled) {
+			up_read(&pcpu->enable_sem);
+			return 0;
+		}
+
+		for_each_cpu(cpu, pcpu->policy->cpus) {
+			struct cpufreq_interactive_cpuinfo *pjcpu =
+				&per_cpu(cpuinfo, cpu);
+			if (cpu != freq->cpu) {
+				if (!down_read_trylock(&pjcpu->enable_sem))
+					continue;
+				if (!pjcpu->governor_enabled) {
+					up_read(&pjcpu->enable_sem);
+					continue;
+				}
+			}
+			spin_lock_irqsave(&pjcpu->load_lock, flags);
+			update_load(cpu);
+			spin_unlock_irqrestore(&pjcpu->load_lock, flags);
+			if (cpu != freq->cpu)
+				up_read(&pjcpu->enable_sem);
+		}
+
+		up_read(&pcpu->enable_sem);
+	}
+	return 0;
+}
+
+static struct notifier_block cpufreq_notifier_block = {
+	.notifier_call = cpufreq_interactive_notifier,
+};
+
+static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
+{
+	const char *cp;
+	int i;
+	int ntokens = 1;
+	unsigned int *tokenized_data;
+	int err = -EINVAL;
+
+	cp = buf;
+	while ((cp = strpbrk(cp + 1, " :")))
+		ntokens++;
+
+	if (!(ntokens & 0x1))
+		goto err;
+
+	tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
+	if (!tokenized_data) {
+		err = -ENOMEM;
+		goto err;
+	}
+
+	cp = buf;
+	i = 0;
+	while (i < ntokens) {
+		if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
+			goto err_kfree;
+
+		cp = strpbrk(cp, " :");
+		if (!cp)
+			break;
+		cp++;
+	}
+
+	if (i != ntokens)
+		goto err_kfree;
+
+	*num_tokens = ntokens;
+	return tokenized_data;
+
+err_kfree:
+	kfree(tokenized_data);
+err:
+	return ERR_PTR(err);
+}
+
+static ssize_t show_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	char *buf)
+{
+	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->target_loads_lock, flags);
+
+	for (i = 0; i < tunables->ntarget_loads; i++)
+		ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
+			       i & 0x1 ? ":" : " ");
+
+	sprintf(buf + ret - 1, "\n");
+	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+	return ret;
+}
+
+static ssize_t store_target_loads(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
+{
+	int ntokens;
+	unsigned int *new_target_loads = NULL;
+	unsigned long flags;
+
+	new_target_loads = get_tokenized_data(buf, &ntokens);
+	if (IS_ERR(new_target_loads))
+		return PTR_RET(new_target_loads);
+
+	spin_lock_irqsave(&tunables->target_loads_lock, flags);
+	if (tunables->target_loads != default_target_loads)
+		kfree(tunables->target_loads);
+	tunables->target_loads = new_target_loads;
+	tunables->ntarget_loads = ntokens;
+	spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
+	return count;
+}
+
+static ssize_t show_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables, char *buf)
+{
+	int i;
+	ssize_t ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+
+	for (i = 0; i < tunables->nabove_hispeed_delay; i++)
+		ret += sprintf(buf + ret, "%u%s",
+			       tunables->above_hispeed_delay[i],
+			       i & 0x1 ? ":" : " ");
+
+	sprintf(buf + ret - 1, "\n");
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return ret;
+}
+
+static ssize_t store_above_hispeed_delay(
+	struct cpufreq_interactive_tunables *tunables,
+	const char *buf, size_t count)
+{
+	int ntokens;
+	unsigned int *new_above_hispeed_delay = NULL;
+	unsigned long flags;
+
+	new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
+	if (IS_ERR(new_above_hispeed_delay))
+		return PTR_RET(new_above_hispeed_delay);
+
+	spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
+	if (tunables->above_hispeed_delay != default_above_hispeed_delay)
+		kfree(tunables->above_hispeed_delay);
+	tunables->above_hispeed_delay = new_above_hispeed_delay;
+	tunables->nabove_hispeed_delay = ntokens;
+	spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
+	return count;
+
+}
+
+static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	long unsigned int val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->hispeed_freq = val;
+	return count;
+}
+
+static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
+}
+
+static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->go_hispeed_load = val;
+	return count;
+}
+
+static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->min_sample_time = val;
+	return count;
+}
+
+static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%lu\n", tunables->timer_rate);
+}
+
+static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val, val_round;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	val_round = jiffies_to_usecs(usecs_to_jiffies(val));
+	if (val != val_round)
+		pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
+			val_round);
+
+	tunables->timer_rate = val_round;
+	return count;
+}
+
+static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->timer_slack_val);
+}
+
+static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtol(buf, 10, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->timer_slack_val = val;
+	return count;
+}
+
+static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
+			  char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->boost_val);
+}
+
+static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
+			   const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->boost_val = val;
+
+	if (tunables->boost_val) {
+		trace_cpufreq_interactive_boost("on");
+		if (!tunables->boosted)
+			cpufreq_interactive_boost(tunables);
+	} else {
+		tunables->boostpulse_endtime = ktime_to_us(ktime_get());
+		trace_cpufreq_interactive_unboost("off");
+	}
+
+	return count;
+}
+
+static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
+				const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
+		tunables->boostpulse_duration_val;
+	trace_cpufreq_interactive_boost("pulse");
+	if (!tunables->boosted)
+		cpufreq_interactive_boost(tunables);
+	return count;
+}
+
+static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, char *buf)
+{
+	return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
+}
+
+static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
+		*tunables, const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+
+	tunables->boostpulse_duration_val = val;
+	return count;
+}
+
+static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		char *buf)
+{
+	return sprintf(buf, "%u\n", tunables->io_is_busy);
+}
+
+static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
+		const char *buf, size_t count)
+{
+	int ret;
+	unsigned long val;
+
+	ret = kstrtoul(buf, 0, &val);
+	if (ret < 0)
+		return ret;
+	tunables->io_is_busy = val;
+	return count;
+}
+
+/*
+ * Create show/store routines
+ * - sys: One governor instance for complete SYSTEM
+ * - pol: One governor instance per struct cpufreq_policy
+ */
+#define show_gov_pol_sys(file_name)					\
+static ssize_t show_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, char *buf)		\
+{									\
+	return show_##file_name(common_tunables, buf);			\
+}									\
+									\
+static ssize_t show_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, char *buf)				\
+{									\
+	return show_##file_name(policy->governor_data, buf);		\
+}
+
+#define store_gov_pol_sys(file_name)					\
+static ssize_t store_##file_name##_gov_sys				\
+(struct kobject *kobj, struct attribute *attr, const char *buf,		\
+	size_t count)							\
+{									\
+	return store_##file_name(common_tunables, buf, count);		\
+}									\
+									\
+static ssize_t store_##file_name##_gov_pol				\
+(struct cpufreq_policy *policy, const char *buf, size_t count)		\
+{									\
+	return store_##file_name(policy->governor_data, buf, count);	\
+}
+
+#define show_store_gov_pol_sys(file_name)				\
+show_gov_pol_sys(file_name);						\
+store_gov_pol_sys(file_name)
+
+show_store_gov_pol_sys(target_loads);
+show_store_gov_pol_sys(above_hispeed_delay);
+show_store_gov_pol_sys(hispeed_freq);
+show_store_gov_pol_sys(go_hispeed_load);
+show_store_gov_pol_sys(min_sample_time);
+show_store_gov_pol_sys(timer_rate);
+show_store_gov_pol_sys(timer_slack);
+show_store_gov_pol_sys(boost);
+store_gov_pol_sys(boostpulse);
+show_store_gov_pol_sys(boostpulse_duration);
+show_store_gov_pol_sys(io_is_busy);
+
+#define gov_sys_attr_rw(_name)						\
+static struct global_attr _name##_gov_sys =				\
+__ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
+
+#define gov_pol_attr_rw(_name)						\
+static struct freq_attr _name##_gov_pol =				\
+__ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
+
+#define gov_sys_pol_attr_rw(_name)					\
+	gov_sys_attr_rw(_name);						\
+	gov_pol_attr_rw(_name)
+
+gov_sys_pol_attr_rw(target_loads);
+gov_sys_pol_attr_rw(above_hispeed_delay);
+gov_sys_pol_attr_rw(hispeed_freq);
+gov_sys_pol_attr_rw(go_hispeed_load);
+gov_sys_pol_attr_rw(min_sample_time);
+gov_sys_pol_attr_rw(timer_rate);
+gov_sys_pol_attr_rw(timer_slack);
+gov_sys_pol_attr_rw(boost);
+gov_sys_pol_attr_rw(boostpulse_duration);
+gov_sys_pol_attr_rw(io_is_busy);
+
+static struct global_attr boostpulse_gov_sys =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
+
+static struct freq_attr boostpulse_gov_pol =
+	__ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
+
+/* One Governor instance for entire system */
+static struct attribute *interactive_attributes_gov_sys[] = {
+	&target_loads_gov_sys.attr,
+	&above_hispeed_delay_gov_sys.attr,
+	&hispeed_freq_gov_sys.attr,
+	&go_hispeed_load_gov_sys.attr,
+	&min_sample_time_gov_sys.attr,
+	&timer_rate_gov_sys.attr,
+	&timer_slack_gov_sys.attr,
+	&boost_gov_sys.attr,
+	&boostpulse_gov_sys.attr,
+	&boostpulse_duration_gov_sys.attr,
+	&io_is_busy_gov_sys.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_sys = {
+	.attrs = interactive_attributes_gov_sys,
+	.name = "interactive",
+};
+
+/* Per policy governor instance */
+static struct attribute *interactive_attributes_gov_pol[] = {
+	&target_loads_gov_pol.attr,
+	&above_hispeed_delay_gov_pol.attr,
+	&hispeed_freq_gov_pol.attr,
+	&go_hispeed_load_gov_pol.attr,
+	&min_sample_time_gov_pol.attr,
+	&timer_rate_gov_pol.attr,
+	&timer_slack_gov_pol.attr,
+	&boost_gov_pol.attr,
+	&boostpulse_gov_pol.attr,
+	&boostpulse_duration_gov_pol.attr,
+	&io_is_busy_gov_pol.attr,
+	NULL,
+};
+
+static struct attribute_group interactive_attr_group_gov_pol = {
+	.attrs = interactive_attributes_gov_pol,
+	.name = "interactive",
+};
+
+static struct attribute_group *get_sysfs_attr(void)
+{
+	if (have_governor_per_policy())
+		return &interactive_attr_group_gov_pol;
+	else
+		return &interactive_attr_group_gov_sys;
+}
+
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+					     unsigned long val,
+					     void *data)
+{
+	if (val == IDLE_END)
+		cpufreq_interactive_idle_end();
+
+	return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+	.notifier_call = cpufreq_interactive_idle_notifier,
+};
+
+static int cpufreq_governor_interactive_init(struct cpufreq_policy *policy)
+{
+	int rc;
+	struct cpufreq_interactive_tunables *tunables;
+
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
+
+	if (have_governor_per_policy()) {
+		WARN_ON(tunables);
+	} else if (tunables) {
+		tunables->usage_count++;
+		policy->governor_data = tunables;
+		return 0;
+	}
+
+	tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
+	if (!tunables) {
+		pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	tunables->usage_count = 1;
+	tunables->above_hispeed_delay = default_above_hispeed_delay;
+	tunables->nabove_hispeed_delay =
+		ARRAY_SIZE(default_above_hispeed_delay);
+	tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
+	tunables->target_loads = default_target_loads;
+	tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
+	tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_rate = DEFAULT_TIMER_RATE;
+	tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
+	tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
+
+	spin_lock_init(&tunables->target_loads_lock);
+	spin_lock_init(&tunables->above_hispeed_delay_lock);
+
+	policy->governor_data = tunables;
+	if (!have_governor_per_policy()) {
+		common_tunables = tunables;
+	}
+
+	rc = sysfs_create_group(get_governor_parent_kobj(policy),
+			get_sysfs_attr());
+	if (rc) {
+		kfree(tunables);
+		policy->governor_data = NULL;
+		if (!have_governor_per_policy()) {
+			common_tunables = NULL;
+		}
+		return rc;
+	}
+
+	idle_notifier_register(&cpufreq_interactive_idle_nb);
+	cpufreq_register_notifier(&cpufreq_notifier_block,
+			CPUFREQ_TRANSITION_NOTIFIER);
+
+	return 0;
+}
+
+static void cpufreq_governor_interactive_exit(struct cpufreq_policy *policy)
+{
+	struct cpufreq_interactive_tunables *tunables;
+
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
+
+	WARN_ON(!tunables);
+
+	if (!--tunables->usage_count) {
+		cpufreq_unregister_notifier(&cpufreq_notifier_block,
+				CPUFREQ_TRANSITION_NOTIFIER);
+		idle_notifier_unregister(&cpufreq_interactive_idle_nb);
+
+		sysfs_remove_group(get_governor_parent_kobj(policy),
+				get_sysfs_attr());
+
+		kfree(tunables);
+		common_tunables = NULL;
+	}
+
+	policy->governor_data = NULL;
+}
+
+static int cpufreq_governor_interactive_start(struct cpufreq_policy *policy)
+{
+	unsigned int j;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct cpufreq_frequency_table *freq_table;
+	struct cpufreq_interactive_tunables *tunables;
+
+	if (have_governor_per_policy())
+		tunables = policy->governor_data;
+	else
+		tunables = common_tunables;
+
+	WARN_ON(!tunables);
+
+	mutex_lock(&gov_lock);
+
+	freq_table = policy->freq_table;
+	if (!tunables->hispeed_freq)
+		tunables->hispeed_freq = policy->max;
+
+	for_each_cpu(j, policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, j);
+		pcpu->policy = policy;
+		pcpu->target_freq = policy->cur;
+		pcpu->freq_table = freq_table;
+		pcpu->floor_freq = pcpu->target_freq;
+		pcpu->pol_floor_val_time =
+			ktime_to_us(ktime_get());
+		pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
+		pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
+		pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
+		down_write(&pcpu->enable_sem);
+		del_timer_sync(&pcpu->cpu_timer);
+		del_timer_sync(&pcpu->cpu_slack_timer);
+		cpufreq_interactive_timer_start(tunables, j);
+		pcpu->governor_enabled = 1;
+		up_write(&pcpu->enable_sem);
+	}
+
+	mutex_unlock(&gov_lock);
+	return 0;
+}
+
+static void cpufreq_governor_interactive_stop(struct cpufreq_policy *policy)
+{
+	unsigned int j;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+
+	mutex_lock(&gov_lock);
+	for_each_cpu(j, policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, j);
+		down_write(&pcpu->enable_sem);
+		pcpu->governor_enabled = 0;
+		del_timer_sync(&pcpu->cpu_timer);
+		del_timer_sync(&pcpu->cpu_slack_timer);
+		up_write(&pcpu->enable_sem);
+	}
+
+	mutex_unlock(&gov_lock);
+}
+
+static void cpufreq_governor_interactive_limits(struct cpufreq_policy *policy)
+{
+	unsigned int j;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	unsigned long flags;
+
+	if (policy->max < policy->cur)
+		__cpufreq_driver_target(policy,
+				policy->max, CPUFREQ_RELATION_H);
+	else if (policy->min > policy->cur)
+		__cpufreq_driver_target(policy,
+				policy->min, CPUFREQ_RELATION_L);
+	for_each_cpu(j, policy->cpus) {
+		pcpu = &per_cpu(cpuinfo, j);
+
+		down_read(&pcpu->enable_sem);
+		if (pcpu->governor_enabled == 0) {
+			up_read(&pcpu->enable_sem);
+			continue;
+		}
+
+		spin_lock_irqsave(&pcpu->target_freq_lock, flags);
+		if (policy->max < pcpu->target_freq)
+			pcpu->target_freq = policy->max;
+		else if (policy->min > pcpu->target_freq)
+			pcpu->target_freq = policy->min;
+
+		spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
+		up_read(&pcpu->enable_sem);
+	}
+}
+
+static struct cpufreq_governor cpufreq_gov_interactive = {
+	.name = "interactive",
+	.init = cpufreq_governor_interactive_init,
+	.exit = cpufreq_governor_interactive_exit,
+	.start = cpufreq_governor_interactive_start,
+	.stop = cpufreq_governor_interactive_stop,
+	.limits = cpufreq_governor_interactive_limits,
+	.max_transition_latency = 10000000,
+	.owner = THIS_MODULE,
+};
+
+static void cpufreq_interactive_nop_timer(unsigned long data)
+{
+}
+
+static int __init cpufreq_interactive_init(void)
+{
+	unsigned int i;
+	struct cpufreq_interactive_cpuinfo *pcpu;
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+	/* Initalize per-cpu timers */
+	for_each_possible_cpu(i) {
+		pcpu = &per_cpu(cpuinfo, i);
+		init_timer_pinned_deferrable(&pcpu->cpu_timer);
+		pcpu->cpu_timer.function = cpufreq_interactive_timer;
+		pcpu->cpu_timer.data = i;
+		init_timer(&pcpu->cpu_slack_timer);
+		pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
+		spin_lock_init(&pcpu->load_lock);
+		spin_lock_init(&pcpu->target_freq_lock);
+		init_rwsem(&pcpu->enable_sem);
+	}
+
+	spin_lock_init(&speedchange_cpumask_lock);
+	mutex_init(&gov_lock);
+	speedchange_task =
+		kthread_create(cpufreq_interactive_speedchange_task, NULL,
+			       "cfinteractive");
+	if (IS_ERR(speedchange_task))
+		return PTR_ERR(speedchange_task);
+
+	sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+	get_task_struct(speedchange_task);
+
+	/* NB: wake up so the thread does not look hung to the freezer */
+	wake_up_process(speedchange_task);
+
+	return cpufreq_register_governor(&cpufreq_gov_interactive);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
+struct cpufreq_governor *cpufreq_default_governor(void)
+{
+	return &cpufreq_gov_interactive;
+}
+fs_initcall(cpufreq_interactive_init);
+#else
+module_init(cpufreq_interactive_init);
+#endif
+
+static void __exit cpufreq_interactive_exit(void)
+{
+	cpufreq_unregister_governor(&cpufreq_gov_interactive);
+	kthread_stop(speedchange_task);
+	put_task_struct(speedchange_task);
+}
+
+module_exit(cpufreq_interactive_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
+	"Latency sensitive workloads");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 03d38c2..65bb6fd 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -178,7 +178,12 @@
 
 	/* for higher loadavg, we are more reluctant */
 
-	mult += 2 * get_loadavg(load);
+	/*
+	 * this doesn't work as intended - it is almost always 0, but can
+	 * sometimes, depending on workload, spike very high into the hundreds
+	 * even when the average cpu load is under 10%.
+	 */
+	/* mult += 2 * get_loadavg(); */
 
 	/* for IO wait tasks (per cpu!) we add 5x each */
 	mult += 10 * nr_iowaiters;
diff --git a/drivers/devfreq/governor_simpleondemand.c b/drivers/devfreq/governor_simpleondemand.c
index ae72ba5..2cbd87b 100644
--- a/drivers/devfreq/governor_simpleondemand.c
+++ b/drivers/devfreq/governor_simpleondemand.c
@@ -28,6 +28,7 @@
 	unsigned int dfso_downdifferential = DFSO_DOWNDIFFERENCTIAL;
 	struct devfreq_simple_ondemand_data *data = df->data;
 	unsigned long max = (df->max_freq) ? df->max_freq : UINT_MAX;
+	unsigned long min = (df->min_freq) ? df->min_freq : 0;
 
 	err = devfreq_update_stats(df);
 	if (err)
@@ -45,18 +46,31 @@
 	    dfso_upthreshold < dfso_downdifferential)
 		return -EINVAL;
 
-	/* Assume MAX if it is going to be divided by zero */
-	if (stat->total_time == 0) {
-		*freq = max;
-		return 0;
-	}
-
 	/* Prevent overflow */
 	if (stat->busy_time >= (1 << 24) || stat->total_time >= (1 << 24)) {
 		stat->busy_time >>= 7;
 		stat->total_time >>= 7;
 	}
 
+	if (data && data->simple_scaling) {
+		if (stat->busy_time * 100 >
+		    stat->total_time * dfso_upthreshold)
+			*freq = max;
+		else if (stat->busy_time * 100 <
+			 stat->total_time *
+			 (dfso_upthreshold - dfso_downdifferential))
+			*freq = min;
+		else
+			*freq = df->previous_freq;
+		return 0;
+	}
+
+	/* Assume MAX if it is going to be divided by zero */
+	if (stat->total_time == 0) {
+		*freq = max;
+		return 0;
+	}
+
 	/* Set MAX if it's busy enough */
 	if (stat->busy_time * 100 >
 	    stat->total_time * dfso_upthreshold) {
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 4d51f9e..2453e07 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -304,8 +304,12 @@
 	spin_lock_irqsave(fence->lock, flags);
 
 	ret = !list_empty(&cb->node);
-	if (ret)
+	if (ret) {
 		list_del_init(&cb->node);
+		if (list_empty(&fence->cb_list))
+			if (fence->ops->disable_signaling)
+				fence->ops->disable_signaling(fence);
+	}
 
 	spin_unlock_irqrestore(fence->lock, flags);
 
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
index 62e8e6d..454d3b3 100644
--- a/drivers/dma-buf/sw_sync.c
+++ b/drivers/dma-buf/sw_sync.c
@@ -234,6 +234,13 @@
 	return true;
 }
 
+static void timeline_fence_disable_signaling(struct fence *fence)
+{
+	struct sync_pt *pt = container_of(fence, struct sync_pt, base);
+
+	list_del_init(&pt->active_list);
+}
+
 static void timeline_fence_value_str(struct fence *fence,
 				    char *str, int size)
 {
@@ -252,6 +259,7 @@
 	.get_driver_name = timeline_fence_get_driver_name,
 	.get_timeline_name = timeline_fence_get_timeline_name,
 	.enable_signaling = timeline_fence_enable_signaling,
+	.disable_signaling = timeline_fence_disable_signaling,
 	.signaled = timeline_fence_signaled,
 	.wait = fence_default_wait,
 	.release = timeline_fence_release,
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig
index 82d85cce..192080e 100644
--- a/drivers/edac/Kconfig
+++ b/drivers/edac/Kconfig
@@ -455,6 +455,47 @@
 	  Support for error detection and correction on the Synopsys DDR
 	  memory controller.
 
+config EDAC_KRYO3XX_ARM64
+	depends on EDAC_MM_EDAC && ARM64
+	tristate "ARM KRYO3XX Gold and Silver L1/L2/L3/SCU Caches"
+	help
+	   Support for error detection and correction on the
+	   Kryo3xx Gold and Silver CPUs. Reports errors caught by Kryo3xx
+	   ECC mechanism.
+	   For debugging issues having to do with stability and overall system
+	   health, you should probably say 'Y' here.
+
+config EDAC_KRYO3XX_ARM64_POLL
+	depends on EDAC_KRYO3XX_ARM64
+	bool "Poll on kryo3xx ECC registers - kryo3xx"
+	help
+	   This option chooses whether or not you want to poll on the Kryo3xx
+	   ECC registers. When this is enabled, the polling rate can be set as
+	   a module parameter. By default, it will call the polling function
+	   every second.
+	   This option should only be used if the associated interrupt lines
+	   are not enabled.
+
+config EDAC_KRYO3XX_ARM64_PANIC_ON_CE
+	depends on EDAC_KRYO3XX_ARM64
+	bool "Panic on correctable errors - kryo3xx"
+	help
+	   Forcibly cause a kernel panic if an correctable error (CE) is
+	   detected, even though the error is (by definition) correctable and
+	   would otherwise result in no adverse system effects. This can reduce
+	   debugging times on hardware which may be operating at voltages or
+	   frequencies outside normal specification.
+	   For production builds, you should definitely say 'N' here.
+
+config EDAC_KRYO3XX_ARM64_PANIC_ON_UE
+	depends on EDAC_KRYO3XX_ARM64
+	bool "Panic on uncorrectable errors - kryo3xx"
+	help
+	   Forcibly cause a kernel panic if an uncorrectable error (UE) is
+	   detected. This can reduce debugging times on hardware which may be
+	   operating at voltages or frequencies outside normal specification.
+	   For production builds, you should probably say 'N' here.
+
 config EDAC_XGENE
 	tristate "APM X-Gene SoC"
 	depends on EDAC_MM_EDAC && (ARM64 || COMPILE_TEST)
@@ -462,4 +503,37 @@
 	  Support for error detection and correction on the
 	  APM X-Gene family of SOCs.
 
+config EDAC_QCOM_LLCC
+	depends on EDAC_MM_EDAC && QCOM_LLCC
+	tristate "QCOM LLCC Caches"
+	help
+	  Support for error detection and correction on the
+	  QCOM LLCC cache. Report errors caught by LLCC ECC
+	  mechanism.
+
+	  For debugging issues having to do with stability and overall system
+	  health, you should probably say 'Y' here.
+
+config EDAC_QCOM_LLCC_PANIC_ON_CE
+	depends on EDAC_QCOM_LLCC
+	bool "panic on correctable errors - qcom llcc"
+	help
+	  Forcibly cause kernel panic if a correctable error (CE) is
+	  detected, even though the error is (by definition) correctable and
+	  would otherwise result in no adverse system effects. This can reduce
+	  debugging times on hardware which my be operating at voltages or
+	  frequencies outside normal specifications.
+
+	  For production builds, you should definitely say 'N' here.
+
+config EDAC_QCOM_LLCC_PANIC_ON_UE
+	depends on EDAC_QCOM_LLCC
+	bool "Panic on uncorrectable errors - qcom llcc"
+	help
+	  Forcibly cause a kernel panic if an uncorrectable error (UE) is
+	  detected. This can reduce debugging times on hardware which may be
+	  operating at voltages or frequencies outside normal specification.
+
+	  For production builds, you should probably say 'N' here.
+
 endif # EDAC
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index 88e472e..ab02673 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -75,5 +75,7 @@
 obj-$(CONFIG_EDAC_OCTEON_PCI)		+= octeon_edac-pci.o
 
 obj-$(CONFIG_EDAC_ALTERA)		+= altera_edac.o
+obj-$(CONFIG_EDAC_KRYO3XX_ARM64)	+= kryo3xx_arm64_edac.o
 obj-$(CONFIG_EDAC_SYNOPSYS)		+= synopsys_edac.o
 obj-$(CONFIG_EDAC_XGENE)		+= xgene_edac.o
+obj-$(CONFIG_EDAC_QCOM_LLCC)		+= qcom_llcc_edac.o
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 4861542..1fa7a36 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -223,9 +223,11 @@
 	/* Per instance controls for this edac_device */
 	int log_ue;		/* boolean for logging UEs */
 	int log_ce;		/* boolean for logging CEs */
+	int panic_on_ce;	/* boolean for panic'ing on an CE */
 	int panic_on_ue;	/* boolean for panic'ing on an UE */
 	unsigned poll_msec;	/* number of milliseconds to poll interval */
 	unsigned long delay;	/* number of jiffies for poll_msec */
+	bool defer_work;	/* Create a deferrable work for polling */
 
 	/* Additional top controller level attributes, but specified
 	 * by the low level driver.
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index a979003..814031d 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -412,7 +412,11 @@
 	edac_dev->poll_msec = msec;
 	edac_dev->delay = msecs_to_jiffies(msec);
 
-	INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
+	if (edac_dev->defer_work)
+		INIT_DEFERRABLE_WORK(&edac_dev->work,
+					edac_device_workq_function);
+	else
+		INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
 
 	/* optimize here for the 1 second case, which will be normal value, to
 	 * fire ON the 1 second time event. This helps reduce all sorts of
@@ -602,6 +606,12 @@
 	return edac_dev->log_ue;
 }
 
+static inline int edac_device_get_panic_on_ce(struct edac_device_ctl_info
+					*edac_dev)
+{
+	return edac_dev->panic_on_ce;
+}
+
 static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
 					*edac_dev)
 {
@@ -651,6 +661,11 @@
 				"CE: %s instance: %s block: %s '%s'\n",
 				edac_dev->ctl_name, instance->name,
 				block ? block->name : "N/A", msg);
+
+	if (edac_device_get_panic_on_ce(edac_dev))
+		panic("EDAC %s: CE instance: %s block %s '%s'\n",
+			edac_dev->ctl_name, instance->name,
+			block ? block->name : "N/A", msg);
 }
 EXPORT_SYMBOL_GPL(edac_device_handle_ce);
 
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 93da1a4..b69eaf6 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -62,6 +62,13 @@
 	return count;
 }
 
+/* 'panic_on_ce' */
+static ssize_t edac_device_ctl_panic_on_ce_show(struct edac_device_ctl_info
+						*ctl_info, char *data)
+{
+	return snprintf(data, PAGE_SIZE, "%u\n", ctl_info->panic_on_ce);
+}
+
 /* 'panic_on_ue' */
 static ssize_t edac_device_ctl_panic_on_ue_show(struct edac_device_ctl_info
 						*ctl_info, char *data)
@@ -69,6 +76,21 @@
 	return sprintf(data, "%u\n", ctl_info->panic_on_ue);
 }
 
+static ssize_t edac_device_ctl_panic_on_ce_store(struct edac_device_ctl_info
+						 *ctl_info, const char *data,
+						 size_t count)
+{
+	unsigned long val;
+
+	/* if parameter is zero, turn off flag, if non-zero turn on flag */
+	if (kstrtoul(data, 0, &val) < 0)
+		return -EINVAL;
+
+	ctl_info->panic_on_ce = !!val;
+
+	return count;
+}
+
 static ssize_t edac_device_ctl_panic_on_ue_store(struct edac_device_ctl_info
 						 *ctl_info, const char *data,
 						 size_t count)
@@ -156,6 +178,9 @@
 	edac_device_ctl_log_ue_show, edac_device_ctl_log_ue_store);
 CTL_INFO_ATTR(log_ce, S_IRUGO | S_IWUSR,
 	edac_device_ctl_log_ce_show, edac_device_ctl_log_ce_store);
+CTL_INFO_ATTR(panic_on_ce, S_IRUGO | S_IWUSR,
+	edac_device_ctl_panic_on_ce_show,
+	edac_device_ctl_panic_on_ce_store);
 CTL_INFO_ATTR(panic_on_ue, S_IRUGO | S_IWUSR,
 	edac_device_ctl_panic_on_ue_show,
 	edac_device_ctl_panic_on_ue_store);
@@ -164,6 +189,7 @@
 
 /* Base Attributes of the EDAC_DEVICE ECC object */
 static struct ctl_info_attribute *device_ctrl_attr[] = {
+	&attr_ctl_info_panic_on_ce,
 	&attr_ctl_info_panic_on_ue,
 	&attr_ctl_info_log_ue,
 	&attr_ctl_info_log_ce,
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
new file mode 100644
index 0000000..7e2aadc
--- /dev/null
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -0,0 +1,447 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/edac.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/of_irq.h>
+
+#include <asm/cputype.h>
+
+#include "edac_core.h"
+
+#ifdef CONFIG_EDAC_KRYO3XX_ARM64_POLL
+static int poll_msec = 1000;
+module_param(poll_msec, int, 0444);
+#endif
+
+#ifdef CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_CE
+#define ARM64_ERP_PANIC_ON_CE 1
+#else
+#define ARM64_ERP_PANIC_ON_CE 0
+#endif
+
+#ifdef CONFIG_EDAC_KRYO3XX_ARM64_PANIC_ON_UE
+#define ARM64_ERP_PANIC_ON_UE 1
+#else
+#define ARM64_ERP_PANIC_ON_UE 0
+#endif
+
+#define L1 0x0
+#define L2 0x1
+#define L3 0x2
+
+#define EDAC_CPU	"kryo3xx_edac"
+
+#define KRYO3XX_ERRXSTATUS_VALID(a)	((a >> 30) & 0x1)
+#define KRYO3XX_ERRXSTATUS_UE(a)	((a >> 29) & 0x1)
+#define KRYO3XX_ERRXSTATUS_SERR(a)	(a & 0xFF)
+
+#define KRYO3XX_ERRXMISC_LVL(a)		((a >> 1) & 0x7)
+#define KRYO3XX_ERRXMISC_WAY(a)		((a >> 28) & 0xF)
+
+static inline void set_errxctlr_el1(void)
+{
+	u64 val = 0x10f;
+
+	asm volatile("msr s3_0_c5_c4_1, %0" : : "r" (val));
+}
+
+static inline void write_errselr_el1(u64 val)
+{
+	asm volatile("msr s3_0_c5_c3_1, %0" : : "r" (val));
+}
+
+static inline u64 read_errxstatus_el1(void)
+{
+	u64 val;
+
+	asm volatile("mrs %0, s3_0_c5_c4_2" : "=r" (val));
+	return val;
+}
+
+static inline u64 read_errxmisc_el1(void)
+{
+	u64 val;
+
+	asm volatile("mrs %0, s3_0_c5_c5_0" : "=r" (val));
+	return val;
+}
+
+static inline void clear_errxstatus_valid(u64 val)
+{
+	asm volatile("msr s3_0_c5_c4_2, %0" : : "r" (val));
+}
+
+struct errors_edac {
+	const char * const msg;
+	void (*func)(struct edac_device_ctl_info *edac_dev,
+			int inst_nr, int block_nr, const char *msg);
+};
+
+static const struct errors_edac errors[] = {
+	{"Kryo3xx L1 Correctable Error", edac_device_handle_ce },
+	{"Kryo3xx L1 Uncorrectable Error", edac_device_handle_ue },
+	{"Kryo3xx L2 Correctable Error", edac_device_handle_ce },
+	{"Kryo3xx L2 Uncorrectable Error", edac_device_handle_ue },
+	{"L3 Correctable Error", edac_device_handle_ce },
+	{"L3 Uncorrectable Error", edac_device_handle_ue },
+};
+
+#define KRYO3XX_L1_CE 0
+#define KRYO3XX_L1_UE 1
+#define KRYO3XX_L2_CE 2
+#define KRYO3XX_L2_UE 3
+#define KRYO3XX_L3_CE 4
+#define KRYO3XX_L3_UE 5
+
+#define DATA_BUF_ERR		0x2
+#define CACHE_DATA_ERR		0x6
+#define CACHE_TAG_DIRTY_ERR	0x7
+#define TLB_PARITY_ERR		0x8
+#define BUS_ERROR		0x18
+
+struct erp_drvdata {
+	struct edac_device_ctl_info *edev_ctl;
+	struct erp_drvdata __percpu **erp_cpu_drvdata;
+	int ppi;
+};
+
+static struct erp_drvdata *panic_handler_drvdata;
+
+static DEFINE_SPINLOCK(local_handler_lock);
+
+static void l1_l2_irq_enable(void *info)
+{
+	int irq = *(int *)info;
+
+	enable_percpu_irq(irq, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static int request_erp_irq(struct platform_device *pdev, const char *propname,
+			const char *desc, irq_handler_t handler,
+			void *ed, int percpu)
+{
+	int rc;
+	struct resource *r;
+	struct erp_drvdata *drv = ed;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, propname);
+
+	if (!r) {
+		pr_err("ARM64 CPU ERP: Could not find <%s> IRQ property. Proceeding anyway.\n",
+			propname);
+		goto out;
+	}
+
+	if (!percpu) {
+		rc = devm_request_threaded_irq(&pdev->dev, r->start, NULL,
+					       handler,
+					       IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+					       desc,
+					       ed);
+
+		if (rc) {
+			pr_err("ARM64 CPU ERP: Failed to request IRQ %d: %d (%s / %s). Proceeding anyway.\n",
+			       (int) r->start, rc, propname, desc);
+			goto out;
+		}
+
+	} else {
+		drv->erp_cpu_drvdata = alloc_percpu(struct erp_drvdata *);
+		if (!drv->erp_cpu_drvdata) {
+			pr_err("Failed to allocate percpu erp data\n");
+			goto out;
+		}
+
+		*raw_cpu_ptr(drv->erp_cpu_drvdata) = drv;
+		rc = request_percpu_irq(r->start, handler, desc,
+				drv->erp_cpu_drvdata);
+
+		if (rc) {
+			pr_err("ARM64 CPU ERP: Failed to request IRQ %d: %d (%s / %s). Proceeding anyway.\n",
+			       (int) r->start, rc, propname, desc);
+			goto out_free;
+		}
+
+		drv->ppi = r->start;
+		on_each_cpu(l1_l2_irq_enable, &(r->start), 1);
+	}
+
+	return 0;
+
+out_free:
+	free_percpu(drv->erp_cpu_drvdata);
+	drv->erp_cpu_drvdata = NULL;
+out:
+	return -EINVAL;
+}
+
+static void dump_err_reg(int errorcode, int level, u64 errxstatus, u64 errxmisc,
+	struct edac_device_ctl_info *edev_ctl)
+{
+	edac_printk(KERN_CRIT, EDAC_CPU, "ERRXSTATUS_EL1: %llx\n", errxstatus);
+	edac_printk(KERN_CRIT, EDAC_CPU, "ERRXMISC_EL1: %llx\n", errxmisc);
+	edac_printk(KERN_CRIT, EDAC_CPU, "Cache level: L%d\n", level + 1);
+
+	switch (KRYO3XX_ERRXSTATUS_SERR(errxstatus)) {
+	case DATA_BUF_ERR:
+		edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from internal data buffer\n");
+		break;
+
+	case CACHE_DATA_ERR:
+		edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from cache data RAM\n");
+		break;
+
+	case CACHE_TAG_DIRTY_ERR:
+		edac_printk(KERN_CRIT, EDAC_CPU, "ECC Error from cache tag or dirty RAM\n");
+		break;
+
+	case TLB_PARITY_ERR:
+		edac_printk(KERN_CRIT, EDAC_CPU, "Parity error on TLB RAM\n");
+		break;
+
+	case BUS_ERROR:
+		edac_printk(KERN_CRIT, EDAC_CPU, "Bus Error\n");
+		break;
+	}
+
+	if (level == L3)
+		edac_printk(KERN_CRIT, EDAC_CPU,
+			"Way: %d\n", (int) KRYO3XX_ERRXMISC_WAY(errxmisc));
+	else
+		edac_printk(KERN_CRIT, EDAC_CPU,
+			"Way: %d\n", (int) KRYO3XX_ERRXMISC_WAY(errxmisc) >> 2);
+	errors[errorcode].func(edev_ctl, smp_processor_id(),
+				level, errors[errorcode].msg);
+}
+
+static void kryo3xx_parse_l1_l2_cache_error(u64 errxstatus, u64 errxmisc,
+	struct edac_device_ctl_info *edev_ctl)
+{
+	switch (KRYO3XX_ERRXMISC_LVL(errxmisc)) {
+	case L1:
+		if (KRYO3XX_ERRXSTATUS_UE(errxstatus))
+			dump_err_reg(KRYO3XX_L1_UE, L1, errxstatus, errxmisc,
+				edev_ctl);
+		else
+			dump_err_reg(KRYO3XX_L1_CE, L1, errxstatus, errxmisc,
+				edev_ctl);
+		break;
+
+	case L2:
+		if (KRYO3XX_ERRXSTATUS_UE(errxstatus))
+			dump_err_reg(KRYO3XX_L2_UE, L2, errxstatus, errxmisc,
+				edev_ctl);
+		else
+			dump_err_reg(KRYO3XX_L2_CE, L2, errxstatus, errxmisc,
+				edev_ctl);
+		break;
+	}
+
+}
+
+static void kryo3xx_check_l1_l2_ecc(void *info)
+{
+	struct edac_device_ctl_info *edev_ctl = info;
+	u64 errxstatus = 0;
+	u64 errxmisc = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_handler_lock, flags);
+	write_errselr_el1(0);
+	errxstatus = read_errxstatus_el1();
+	if (KRYO3XX_ERRXSTATUS_VALID(errxstatus)) {
+		errxmisc = read_errxmisc_el1();
+		edac_printk(KERN_CRIT, EDAC_CPU,
+		"Kryo3xx CPU%d detected a L1/L2 cache error\n",
+		smp_processor_id());
+
+		kryo3xx_parse_l1_l2_cache_error(errxstatus, errxmisc, edev_ctl);
+		clear_errxstatus_valid(errxstatus);
+	}
+	spin_unlock_irqrestore(&local_handler_lock, flags);
+}
+
+static void kryo3xx_check_l3_scu_error(struct edac_device_ctl_info *edev_ctl)
+{
+	u64 errxstatus = 0;
+	u64 errxmisc = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&local_handler_lock, flags);
+	write_errselr_el1(1);
+	errxstatus = read_errxstatus_el1();
+	errxmisc = read_errxmisc_el1();
+
+	if (KRYO3XX_ERRXSTATUS_VALID(errxstatus) &&
+		KRYO3XX_ERRXMISC_LVL(errxmisc) == L3) {
+		if (KRYO3XX_ERRXSTATUS_UE(errxstatus)) {
+			edac_printk(KERN_CRIT, EDAC_CPU, "Detected L3 uncorrectable error\n");
+			dump_err_reg(KRYO3XX_L3_UE, L3, errxstatus, errxmisc,
+				edev_ctl);
+		} else {
+			edac_printk(KERN_CRIT, EDAC_CPU, "Detected L3 correctable error\n");
+			dump_err_reg(KRYO3XX_L3_CE, L3, errxstatus, errxmisc,
+				edev_ctl);
+		}
+
+		clear_errxstatus_valid(errxstatus);
+	}
+	spin_unlock_irqrestore(&local_handler_lock, flags);
+}
+
+void kryo3xx_poll_cache_errors(struct edac_device_ctl_info *edev_ctl)
+{
+	int cpu;
+
+	if (edev_ctl == NULL)
+		edev_ctl = panic_handler_drvdata->edev_ctl;
+
+	kryo3xx_check_l3_scu_error(edev_ctl);
+	for_each_possible_cpu(cpu)
+		smp_call_function_single(cpu, kryo3xx_check_l1_l2_ecc,
+			edev_ctl, 0);
+}
+
+static irqreturn_t kryo3xx_l1_l2_handler(int irq, void *drvdata)
+{
+	struct erp_drvdata *drv = *(struct erp_drvdata **)(drvdata);
+
+	kryo3xx_check_l1_l2_ecc(drv->edev_ctl);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t kryo3xx_l3_scu_handler(int irq, void *drvdata)
+{
+	struct erp_drvdata *drv = drvdata;
+	struct edac_device_ctl_info *edev_ctl = drv->edev_ctl;
+
+	kryo3xx_check_l3_scu_error(edev_ctl);
+	return IRQ_HANDLED;
+}
+
+static int kryo3xx_cpu_erp_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct erp_drvdata *drv;
+	int rc = 0;
+	int fail = 0;
+
+	set_errxctlr_el1();
+	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
+
+	if (!drv)
+		return -ENOMEM;
+
+	drv->edev_ctl = edac_device_alloc_ctl_info(0, "cpu",
+					num_possible_cpus(), "L", 3, 1, NULL, 0,
+					edac_device_alloc_index());
+
+	if (!drv->edev_ctl)
+		return -ENOMEM;
+
+	#ifdef CONFIG_EDAC_KRYO3XX_ARM64_POLL
+	drv->edev_ctl->edac_check = kryo3xx_poll_cache_errors;
+	drv->edev_ctl->poll_msec = poll_msec;
+	drv->edev_ctl->defer_work = 1;
+	#endif
+
+	drv->edev_ctl->dev = dev;
+	drv->edev_ctl->mod_name = dev_name(dev);
+	drv->edev_ctl->dev_name = dev_name(dev);
+	drv->edev_ctl->ctl_name = "cache";
+	drv->edev_ctl->panic_on_ce = ARM64_ERP_PANIC_ON_CE;
+	drv->edev_ctl->panic_on_ue = ARM64_ERP_PANIC_ON_UE;
+	platform_set_drvdata(pdev, drv);
+
+	rc = edac_device_add_device(drv->edev_ctl);
+	if (rc)
+		goto out_mem;
+
+	panic_handler_drvdata = drv;
+
+	if (request_erp_irq(pdev, "l1-l2-faultirq",
+			"KRYO3XX L1-L2 ECC FAULTIRQ",
+			kryo3xx_l1_l2_handler, drv, 1))
+		fail++;
+
+	if (request_erp_irq(pdev, "l3-scu-faultirq",
+			"KRYO3XX L3-SCU ECC FAULTIRQ",
+			kryo3xx_l3_scu_handler, drv, 0))
+		fail++;
+
+	if (fail == of_irq_count(dev->of_node)) {
+		pr_err("KRYO3XX ERP: Could not request any IRQs. Giving up.\n");
+		rc = -ENODEV;
+		goto out_dev;
+	}
+
+	return 0;
+
+out_dev:
+	edac_device_del_device(dev);
+out_mem:
+	edac_device_free_ctl_info(drv->edev_ctl);
+	return rc;
+}
+
+static int kryo3xx_cpu_erp_remove(struct platform_device *pdev)
+{
+	struct erp_drvdata *drv = dev_get_drvdata(&pdev->dev);
+	struct edac_device_ctl_info *edac_ctl = drv->edev_ctl;
+
+
+	if (drv->erp_cpu_drvdata != NULL) {
+		free_percpu_irq(drv->ppi, drv->erp_cpu_drvdata);
+		free_percpu(drv->erp_cpu_drvdata);
+	}
+
+	edac_device_del_device(edac_ctl->dev);
+	edac_device_free_ctl_info(edac_ctl);
+
+	return 0;
+}
+
+static const struct of_device_id kryo3xx_cpu_erp_match_table[] = {
+	{ .compatible = "arm,arm64-kryo3xx-cpu-erp" },
+	{ }
+};
+
+static struct platform_driver kryo3xx_cpu_erp_driver = {
+	.probe = kryo3xx_cpu_erp_probe,
+	.remove = kryo3xx_cpu_erp_remove,
+	.driver = {
+		.name = "kryo3xx_cpu_cache_erp",
+		.owner = THIS_MODULE,
+		.of_match_table = of_match_ptr(kryo3xx_cpu_erp_match_table),
+	},
+};
+
+static int __init kryo3xx_cpu_erp_init(void)
+{
+	return platform_driver_register(&kryo3xx_cpu_erp_driver);
+}
+module_init(kryo3xx_cpu_erp_init);
+
+static void __exit kryo3xx_cpu_erp_exit(void)
+{
+	platform_driver_unregister(&kryo3xx_cpu_erp_driver);
+}
+module_exit(kryo3xx_cpu_erp_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Kryo3xx EDAC driver");
diff --git a/drivers/edac/qcom_llcc_edac.c b/drivers/edac/qcom_llcc_edac.c
new file mode 100644
index 0000000..18b2da7
--- /dev/null
+++ b/drivers/edac/qcom_llcc_edac.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/edac.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+#include "edac_core.h"
+
+#ifdef CONFIG_EDAC_QCOM_LLCC_PANIC_ON_CE
+#define LLCC_ERP_PANIC_ON_CE 1
+#else
+#define LLCC_ERP_PANIC_ON_CE 0
+#endif
+
+#ifdef CONFIG_EDAC_QCOM_LLCC_PANIC_ON_UE
+#define LLCC_ERP_PANIC_ON_UE 1
+#else
+#define LLCC_ERP_PANIC_ON_UE 0
+#endif
+
+#define EDAC_LLCC	"qcom_llcc"
+
+#define TRP_SYN_REG_CNT	6
+
+#define DRP_SYN_REG_CNT	8
+
+/* single & Double Bit syndrome register offsets */
+#define TRP_ECC_SB_ERR_SYN0		0x0002304C
+#define TRP_ECC_DB_ERR_SYN0		0x00020370
+#define DRP_ECC_SB_ERR_SYN0		0x0004204C
+#define DRP_ECC_DB_ERR_SYN0		0x00042070
+
+/* Error register offsets */
+#define TRP_ECC_ERROR_STATUS1		0x00020348
+#define TRP_ECC_ERROR_STATUS0		0x00020344
+#define DRP_ECC_ERROR_STATUS1		0x00042048
+#define DRP_ECC_ERROR_STATUS0		0x00042044
+
+/* TRP, DRP interrupt register offsets */
+#define DRP_INTERRUPT_STATUS		0x00041000
+#define TRP_INTERRUPT_0_STATUS		0x00020480
+#define DRP_INTERRUPT_CLEAR		0x00041008
+#define DRP_ECC_ERROR_CNTR_CLEAR	0x00040004
+#define TRP_INTERRUPT_0_CLEAR		0x00020484
+#define TRP_ECC_ERROR_CNTR_CLEAR	0x00020440
+
+/* Mask and shift macros */
+#define ECC_DB_ERR_COUNT_MASK	0x0000001f
+#define ECC_DB_ERR_WAYS_MASK	0xffff0000
+#define ECC_DB_ERR_WAYS_SHIFT	16
+
+#define ECC_SB_ERR_COUNT_MASK	0x00ff0000
+#define ECC_SB_ERR_COUNT_SHIFT	16
+#define ECC_SB_ERR_WAYS_MASK	0x0000ffff
+
+#define SB_ECC_ERROR	0x1
+#define DB_ECC_ERROR	0x2
+
+#define DRP_TRP_INT_CLEAR	0x3
+#define DRP_TRP_CNT_CLEAR	0x3
+
+static int poll_msec = 5000;
+module_param(poll_msec, int, 0444);
+
+static int interrupt_mode;
+module_param(interrupt_mode, int, 0444);
+MODULE_PARM_DESC(interrupt_mode,
+		 "Controls whether to use interrupt or poll mode");
+
+enum {
+	LLCC_DRAM_CE = 0,
+	LLCC_DRAM_UE,
+	LLCC_TRAM_CE,
+	LLCC_TRAM_UE,
+};
+
+struct errors_edac {
+	const char *msg;
+	void (*func)(struct edac_device_ctl_info *edev_ctl,
+				int inst_nr, int block_nr, const char *msg);
+};
+
+struct erp_drvdata {
+	struct regmap *llcc_map;
+	u32 ecc_irq;
+};
+
+static const struct errors_edac errors[] = {
+	{"LLCC Data RAM correctable Error", edac_device_handle_ce},
+	{"LLCC Data RAM uncorrectable Error", edac_device_handle_ue},
+	{"LLCC Tag RAM correctable Error", edac_device_handle_ce},
+	{"LLCC Tag RAM uncorrectable Error", edac_device_handle_ue},
+};
+
+/* Clear the error interrupt and counter registers */
+static void qcom_llcc_clear_errors(int err_type, struct regmap *llcc_map)
+{
+	switch (err_type) {
+	case LLCC_DRAM_CE:
+	case LLCC_DRAM_UE:
+		/* Clear the interrupt */
+		regmap_write(llcc_map, DRP_INTERRUPT_CLEAR, DRP_TRP_INT_CLEAR);
+		/* Clear the counters */
+		regmap_write(llcc_map, DRP_ECC_ERROR_CNTR_CLEAR,
+			DRP_TRP_CNT_CLEAR);
+		break;
+	case LLCC_TRAM_CE:
+	case LLCC_TRAM_UE:
+		regmap_write(llcc_map, TRP_INTERRUPT_0_CLEAR,
+			     DRP_TRP_INT_CLEAR);
+		regmap_write(llcc_map, TRP_ECC_ERROR_CNTR_CLEAR,
+			     DRP_TRP_CNT_CLEAR);
+		break;
+	}
+}
+
+/* Dump syndrome registers for tag Ram Double bit errors */
+static void dump_trp_db_syn_reg(struct regmap *llcc_map)
+{
+	int i;
+	int db_err_cnt;
+	int db_err_ways;
+	u32 synd_reg;
+	u32 synd_val;
+
+	for (i = 0; i < TRP_SYN_REG_CNT; i++) {
+		synd_reg = TRP_ECC_DB_ERR_SYN0 + (i * 4);
+		regmap_read(llcc_map, synd_reg, &synd_val);
+		edac_printk(KERN_CRIT, EDAC_LLCC, "TRP_ECC_SYN%d: 0x%8x\n",
+			i, synd_val);
+	}
+
+	regmap_read(llcc_map, TRP_ECC_ERROR_STATUS1, &db_err_cnt);
+	db_err_cnt = (db_err_cnt & ECC_DB_ERR_COUNT_MASK);
+	edac_printk(KERN_CRIT, EDAC_LLCC, "Double-Bit error count: 0x%4x\n",
+		db_err_cnt);
+
+	regmap_read(llcc_map, TRP_ECC_ERROR_STATUS0, &db_err_ways);
+	db_err_ways = (db_err_ways & ECC_DB_ERR_WAYS_MASK);
+	db_err_ways >>= ECC_DB_ERR_WAYS_SHIFT;
+
+	edac_printk(KERN_CRIT, EDAC_LLCC, "Double-Bit error ways: 0x%4x\n",
+		db_err_ways);
+}
+
+/* Dump syndrome register for tag Ram Single Bit Errors */
+static void dump_trp_sb_syn_reg(struct regmap *llcc_map)
+{
+	int i;
+	int sb_err_cnt;
+	int sb_err_ways;
+	u32 synd_reg;
+	u32 synd_val;
+
+	for (i = 0; i < TRP_SYN_REG_CNT; i++) {
+		synd_reg = TRP_ECC_SB_ERR_SYN0 + (i * 4);
+		regmap_read(llcc_map, synd_reg, &synd_val);
+		edac_printk(KERN_CRIT, EDAC_LLCC, "TRP_ECC_SYN%d: 0x%8x\n",
+			i, synd_val);
+	}
+
+	regmap_read(llcc_map, TRP_ECC_ERROR_STATUS1, &sb_err_cnt);
+	sb_err_cnt = (sb_err_cnt & ECC_SB_ERR_COUNT_MASK);
+	sb_err_cnt >>= ECC_SB_ERR_COUNT_SHIFT;
+	edac_printk(KERN_CRIT, EDAC_LLCC, "Single-Bit error count: 0x%4x\n",
+		sb_err_cnt);
+
+	regmap_read(llcc_map, TRP_ECC_ERROR_STATUS0, &sb_err_ways);
+	sb_err_ways = sb_err_ways & ECC_SB_ERR_WAYS_MASK;
+
+	edac_printk(KERN_CRIT, EDAC_LLCC, "Single-Bit error ways: 0x%4x\n",
+		sb_err_ways);
+}
+
+/* Dump syndrome registers for Data Ram Double bit errors */
+static void dump_drp_db_syn_reg(struct regmap *llcc_map)
+{
+	int i;
+	int db_err_cnt;
+	int db_err_ways;
+	u32 synd_reg;
+	u32 synd_val;
+
+	for (i = 0; i < DRP_SYN_REG_CNT; i++) {
+		synd_reg = DRP_ECC_DB_ERR_SYN0 + (i * 4);
+		regmap_read(llcc_map, synd_reg, &synd_val);
+		edac_printk(KERN_CRIT, EDAC_LLCC, "DRP_ECC_SYN%d: 0x%8x\n",
+			i, synd_val);
+	}
+
+	regmap_read(llcc_map, DRP_ECC_ERROR_STATUS1, &db_err_cnt);
+	db_err_cnt = (db_err_cnt & ECC_DB_ERR_COUNT_MASK);
+	edac_printk(KERN_CRIT, EDAC_LLCC, "Double-Bit error count: 0x%4x\n",
+		db_err_cnt);
+
+	regmap_read(llcc_map, DRP_ECC_ERROR_STATUS0, &db_err_ways);
+	db_err_ways &= ECC_DB_ERR_WAYS_MASK;
+	db_err_ways >>= ECC_DB_ERR_WAYS_SHIFT;
+	edac_printk(KERN_CRIT, EDAC_LLCC, "Double-Bit error ways: 0x%4x\n",
+		db_err_ways);
+}
+
+/* Dump Syndrome registers for Data Ram Single bit errors*/
+static void dump_drp_sb_syn_reg(struct regmap *llcc_map)
+{
+	int i;
+	int sb_err_cnt;
+	int sb_err_ways;
+	u32 synd_reg;
+	u32 synd_val;
+	u32 synd_reg_offset;
+
+	for (i = 0; i < DRP_SYN_REG_CNT; i++) {
+		synd_reg_offset = DRP_ECC_SB_ERR_SYN0 + (i * 4);
+		regmap_read(llcc_map, synd_reg, &synd_val);
+		edac_printk(KERN_CRIT, EDAC_LLCC, "DRP_ECC_SYN%d: 0x%8x\n",
+			i, synd_val);
+	}
+
+	regmap_read(llcc_map, DRP_ECC_ERROR_STATUS1, &sb_err_cnt);
+	sb_err_cnt &= ECC_SB_ERR_COUNT_MASK;
+	sb_err_cnt >>= ECC_SB_ERR_COUNT_SHIFT;
+	edac_printk(KERN_CRIT, EDAC_LLCC, "Single-Bit error count: 0x%4x\n",
+		sb_err_cnt);
+
+	regmap_read(llcc_map, DRP_ECC_ERROR_STATUS0, &sb_err_ways);
+	sb_err_ways = sb_err_ways & ECC_SB_ERR_WAYS_MASK;
+
+	edac_printk(KERN_CRIT, EDAC_LLCC, "Single-Bit error ways: 0x%4x\n",
+		sb_err_ways);
+}
+
+
+static void dump_syn_reg(struct edac_device_ctl_info *edev_ctl,
+			 int err_type, struct regmap *llcc_map)
+{
+	switch (err_type) {
+	case LLCC_DRAM_CE:
+		dump_drp_sb_syn_reg(llcc_map);
+		break;
+	case LLCC_DRAM_UE:
+		dump_drp_db_syn_reg(llcc_map);
+		break;
+	case LLCC_TRAM_CE:
+		dump_trp_sb_syn_reg(llcc_map);
+		break;
+	case LLCC_TRAM_UE:
+		dump_trp_db_syn_reg(llcc_map);
+		break;
+	}
+
+	qcom_llcc_clear_errors(err_type, llcc_map);
+
+	errors[err_type].func(edev_ctl, 0, 0, errors[err_type].msg);
+}
+
+static void qcom_llcc_check_cache_errors
+		(struct edac_device_ctl_info *edev_ctl)
+{
+	u32 drp_error;
+	u32 trp_error;
+	struct erp_drvdata *drv = edev_ctl->pvt_info;
+
+	/* Look for Data RAM errors */
+	regmap_read(drv->llcc_map, DRP_INTERRUPT_STATUS, &drp_error);
+
+	if (drp_error & SB_ECC_ERROR) {
+		edac_printk(KERN_CRIT, EDAC_LLCC,
+			"Single Bit Error detected in Data Ram\n");
+		dump_syn_reg(edev_ctl, LLCC_DRAM_CE, drv->llcc_map);
+	} else if (drp_error & DB_ECC_ERROR) {
+		edac_printk(KERN_CRIT, EDAC_LLCC,
+			"Double Bit Error detected in Data Ram\n");
+		dump_syn_reg(edev_ctl, LLCC_DRAM_UE, drv->llcc_map);
+	}
+
+	/* Look for Tag RAM errors */
+	regmap_read(drv->llcc_map, TRP_INTERRUPT_0_STATUS, &trp_error);
+	if (trp_error & SB_ECC_ERROR) {
+		edac_printk(KERN_CRIT, EDAC_LLCC,
+			"Single Bit Error detected in Tag Ram\n");
+		dump_syn_reg(edev_ctl, LLCC_TRAM_CE, drv->llcc_map);
+	} else if (trp_error & DB_ECC_ERROR) {
+		edac_printk(KERN_CRIT, EDAC_LLCC,
+			"Double Bit Error detected in Tag Ram\n");
+		dump_syn_reg(edev_ctl, LLCC_TRAM_UE, drv->llcc_map);
+	}
+}
+
+static void qcom_llcc_poll_cache_errors(struct edac_device_ctl_info *edev_ctl)
+{
+	qcom_llcc_check_cache_errors(edev_ctl);
+}
+
+static irqreturn_t llcc_ecc_irq_handler
+			(int irq, void *edev_ctl)
+{
+	qcom_llcc_check_cache_errors(edev_ctl);
+	return IRQ_HANDLED;
+}
+
+static int qcom_llcc_erp_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	struct erp_drvdata *drv;
+	struct edac_device_ctl_info *edev_ctl;
+	struct device *dev = &pdev->dev;
+
+	/* Allocate edac control info */
+	edev_ctl = edac_device_alloc_ctl_info(sizeof(*drv), "qcom-llcc", 1,
+			NULL, 1, 1, NULL, 0, edac_device_alloc_index());
+
+	edev_ctl->dev = dev;
+	edev_ctl->mod_name = dev_name(dev);
+	edev_ctl->dev_name = dev_name(dev);
+	edev_ctl->ctl_name = "llcc";
+	edev_ctl->poll_msec = poll_msec;
+	edev_ctl->edac_check = qcom_llcc_poll_cache_errors;
+	edev_ctl->defer_work = 1;
+	edev_ctl->panic_on_ce = LLCC_ERP_PANIC_ON_CE;
+	edev_ctl->panic_on_ue = LLCC_ERP_PANIC_ON_UE;
+
+	drv = edev_ctl->pvt_info;
+
+	drv->llcc_map = syscon_node_to_regmap(dev->parent->of_node);
+	if (IS_ERR(drv->llcc_map)) {
+		dev_err(dev, "no regmap for syscon llcc parent\n");
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	if (interrupt_mode) {
+		drv->ecc_irq = platform_get_irq_byname(pdev, "ecc_irq");
+		if (!drv->ecc_irq) {
+			rc = -ENODEV;
+			goto out;
+		}
+
+		rc = devm_request_irq(dev, drv->ecc_irq, llcc_ecc_irq_handler,
+				IRQF_TRIGGER_RISING, "llcc_ecc", edev_ctl);
+		if (rc) {
+			dev_err(dev, "failed to request ecc irq\n");
+			goto out;
+		}
+	}
+
+	platform_set_drvdata(pdev, edev_ctl);
+
+	rc = edac_device_add_device(edev_ctl);
+out:
+	if (rc)
+		edac_device_free_ctl_info(edev_ctl);
+
+	return rc;
+}
+
+static int qcom_llcc_erp_remove(struct platform_device *pdev)
+{
+	struct edac_device_ctl_info *edev_ctl = dev_get_drvdata(&pdev->dev);
+
+	edac_device_del_device(edev_ctl->dev);
+	edac_device_free_ctl_info(edev_ctl);
+
+	return 0;
+}
+
+static const struct of_device_id qcom_llcc_erp_match_table[] = {
+	{ .compatible = "qcom,llcc-erp" },
+	{ },
+};
+
+static struct platform_driver qcom_llcc_erp_driver = {
+	.probe = qcom_llcc_erp_probe,
+	.remove = qcom_llcc_erp_remove,
+	.driver = {
+		.name = "qcom_llcc_erp",
+		.owner = THIS_MODULE,
+		.of_match_table = qcom_llcc_erp_match_table,
+	},
+};
+
+static int __init qcom_llcc_erp_init(void)
+{
+	return platform_driver_register(&qcom_llcc_erp_driver);
+}
+module_init(qcom_llcc_erp_init);
+
+static void __exit qcom_llcc_erp_exit(void)
+{
+	platform_driver_unregister(&qcom_llcc_erp_driver);
+}
+module_exit(qcom_llcc_erp_exit);
+
+MODULE_DESCRIPTION("QCOM LLCC Error Reporting");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/Kconfig b/drivers/esoc/Kconfig
new file mode 100644
index 0000000..0efca1e
--- /dev/null
+++ b/drivers/esoc/Kconfig
@@ -0,0 +1,64 @@
+#
+# External soc control infrastructure and drivers
+#
+menuconfig ESOC
+	bool "External SOCs Control"
+	help
+	  External SOCs can be powered on and monitored by user
+	  space or kernel drivers. Additionally they can be controlled
+	  to respond to control commands. This framework provides an
+	  interface to track events related to the external slave socs.
+
+if ESOC
+
+config ESOC_DEV
+	bool "ESOC userspace interface"
+	help
+	  Say yes here to enable a userspace representation of the control
+	  link. Userspace can register a request engine or a command engine
+	  for the external soc. It can receive event notifications from the
+	  control link.
+
+config ESOC_CLIENT
+	bool "ESOC client interface"
+	depends on OF
+	help
+	  Say yes here to enable client interface for external socs.
+	  Clients can specify the external soc that they are interested in
+	  by using device tree phandles. Based on this, clients can register
+	  for notifications from a specific soc.
+
+config ESOC_DEBUG
+	bool "ESOC debug support"
+	help
+	  Say yes here to enable debugging support in the ESOC framework
+	  and individual esoc drivers.
+	  If you wish to debug the esoc driver and enable more logging enable
+	  this option. Based on this, DEBUG macro would be defined which will
+	  allow logging of different esoc driver traces.
+
+config ESOC_MDM_4x
+	bool "Add support for external mdm9x25/mdm9x35/mdm9x45/mdm9x55"
+	help
+	  In some Qualcomm Technologies, Inc. boards, an external modem such as
+	  mdm9x25 or mdm9x35 is connected to a primary msm. The primary soc can
+	  control/monitor the modem via gpios. The data communication with such
+	  modems can occur over PCIE or HSIC.
+
+config ESOC_MDM_DRV
+	tristate "Command engine for 4x series external modems"
+	help
+	  Provides a command engine to control the behavior of an external modem
+	  such as mdm9x25/mdm9x35/mdm9x45/mdm9x55/QSC. Allows the primary soc to put the
+	  external modem in a specific mode. Also listens for events on the
+	  external modem.
+
+config ESOC_MDM_DBG_ENG
+	tristate "debug engine for 4x series external modems"
+	depends on ESOC_MDM_DRV
+	help
+	  Provides a user interface to mask out certain commands sent
+	  by command engine to the external modem. Also allows masking
+	  of certain notifications being sent to the external modem.
+
+endif
diff --git a/drivers/esoc/Makefile b/drivers/esoc/Makefile
new file mode 100644
index 0000000..76137ea
--- /dev/null
+++ b/drivers/esoc/Makefile
@@ -0,0 +1,9 @@
+# generic  external soc control support
+
+ccflags-$(CONFIG_ESOC_DEBUG)	:= -DDEBUG
+obj-$(CONFIG_ESOC)	+= esoc_bus.o
+obj-$(CONFIG_ESOC_DEV)	+= esoc_dev.o
+obj-$(CONFIG_ESOC_CLIENT)	+= esoc_client.o
+obj-$(CONFIG_ESOC_MDM_4x)	+= esoc-mdm-pon.o esoc-mdm-4x.o
+obj-$(CONFIG_ESOC_MDM_DRV)	+= esoc-mdm-drv.o
+obj-$(CONFIG_ESOC_MDM_DBG_ENG)	+= esoc-mdm-dbg-eng.o
diff --git a/drivers/esoc/esoc-mdm-4x.c b/drivers/esoc/esoc-mdm-4x.c
new file mode 100644
index 0000000..b1834e2
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-4x.c
@@ -0,0 +1,1033 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
+#include <linux/workqueue.h>
+#include <soc/qcom/sysmon.h>
+#include "esoc-mdm.h"
+
+enum gpio_update_config {
+	GPIO_UPDATE_BOOTING_CONFIG = 1,
+	GPIO_UPDATE_RUNNING_CONFIG,
+};
+
+enum irq_mask {
+	IRQ_ERRFATAL = 0x1,
+	IRQ_STATUS = 0x2,
+	IRQ_PBLRDY = 0x4,
+};
+
+
+static struct gpio_map {
+	const char *name;
+	int index;
+} gpio_map[] = {
+	{"qcom,mdm2ap-errfatal-gpio",   MDM2AP_ERRFATAL},
+	{"qcom,ap2mdm-errfatal-gpio",   AP2MDM_ERRFATAL},
+	{"qcom,mdm2ap-status-gpio",     MDM2AP_STATUS},
+	{"qcom,ap2mdm-status-gpio",     AP2MDM_STATUS},
+	{"qcom,mdm2ap-pblrdy-gpio",     MDM2AP_PBLRDY},
+	{"qcom,ap2mdm-wakeup-gpio",     AP2MDM_WAKEUP},
+	{"qcom,ap2mdm-chnlrdy-gpio",    AP2MDM_CHNLRDY},
+	{"qcom,mdm2ap-wakeup-gpio",     MDM2AP_WAKEUP},
+	{"qcom,ap2mdm-vddmin-gpio",     AP2MDM_VDDMIN},
+	{"qcom,mdm2ap-vddmin-gpio",     MDM2AP_VDDMIN},
+	{"qcom,ap2mdm-pmic-pwr-en-gpio", AP2MDM_PMIC_PWR_EN},
+	{"qcom,mdm-link-detect-gpio", MDM_LINK_DETECT},
+};
+
+/* Required gpios */
+static const int required_gpios[] = {
+	MDM2AP_ERRFATAL,
+	AP2MDM_ERRFATAL,
+	MDM2AP_STATUS,
+	AP2MDM_STATUS,
+};
+
+static void mdm_debug_gpio_show(struct mdm_ctrl *mdm)
+{
+	struct device *dev = mdm->dev;
+
+	dev_dbg(dev, "%s: MDM2AP_ERRFATAL gpio = %d\n",
+			__func__, MDM_GPIO(mdm, MDM2AP_ERRFATAL));
+	dev_dbg(dev, "%s: AP2MDM_ERRFATAL gpio = %d\n",
+			__func__, MDM_GPIO(mdm, AP2MDM_ERRFATAL));
+	dev_dbg(dev, "%s: MDM2AP_STATUS gpio = %d\n",
+			__func__, MDM_GPIO(mdm, MDM2AP_STATUS));
+	dev_dbg(dev, "%s: AP2MDM_STATUS gpio = %d\n",
+			__func__, MDM_GPIO(mdm, AP2MDM_STATUS));
+	dev_dbg(dev, "%s: AP2MDM_SOFT_RESET gpio = %d\n",
+			__func__, MDM_GPIO(mdm, AP2MDM_SOFT_RESET));
+	dev_dbg(dev, "%s: MDM2AP_WAKEUP gpio = %d\n",
+			__func__, MDM_GPIO(mdm, MDM2AP_WAKEUP));
+	dev_dbg(dev, "%s: AP2MDM_WAKEUP gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, AP2MDM_WAKEUP));
+	dev_dbg(dev, "%s: AP2MDM_PMIC_PWR_EN gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, AP2MDM_PMIC_PWR_EN));
+	dev_dbg(dev, "%s: MDM2AP_PBLRDY gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, MDM2AP_PBLRDY));
+	dev_dbg(dev, "%s: AP2MDM_VDDMIN gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, AP2MDM_VDDMIN));
+	dev_dbg(dev, "%s: MDM2AP_VDDMIN gpio = %d\n",
+			 __func__, MDM_GPIO(mdm, MDM2AP_VDDMIN));
+}
+
+static void mdm_enable_irqs(struct mdm_ctrl *mdm)
+{
+	if (!mdm)
+		return;
+	if (mdm->irq_mask & IRQ_ERRFATAL) {
+		enable_irq(mdm->errfatal_irq);
+		irq_set_irq_wake(mdm->errfatal_irq, 1);
+		mdm->irq_mask &= ~IRQ_ERRFATAL;
+	}
+	if (mdm->irq_mask & IRQ_STATUS) {
+		enable_irq(mdm->status_irq);
+		irq_set_irq_wake(mdm->status_irq, 1);
+		mdm->irq_mask &= ~IRQ_STATUS;
+	}
+	if (mdm->irq_mask & IRQ_PBLRDY) {
+		enable_irq(mdm->pblrdy_irq);
+		mdm->irq_mask &= ~IRQ_PBLRDY;
+	}
+}
+
+static void mdm_disable_irqs(struct mdm_ctrl *mdm)
+{
+	if (!mdm)
+		return;
+	if (!(mdm->irq_mask & IRQ_ERRFATAL)) {
+		irq_set_irq_wake(mdm->errfatal_irq, 0);
+		disable_irq_nosync(mdm->errfatal_irq);
+		mdm->irq_mask |= IRQ_ERRFATAL;
+	}
+	if (!(mdm->irq_mask & IRQ_STATUS)) {
+		irq_set_irq_wake(mdm->status_irq, 0);
+		disable_irq_nosync(mdm->status_irq);
+		mdm->irq_mask |= IRQ_STATUS;
+	}
+	if (!(mdm->irq_mask & IRQ_PBLRDY)) {
+		disable_irq_nosync(mdm->pblrdy_irq);
+		mdm->irq_mask |= IRQ_PBLRDY;
+	}
+}
+
+static void mdm_deconfigure_ipc(struct mdm_ctrl *mdm)
+{
+	int i;
+
+	for (i = 0; i < NUM_GPIOS; ++i) {
+		if (gpio_is_valid(MDM_GPIO(mdm, i)))
+			gpio_free(MDM_GPIO(mdm, i));
+	}
+	if (mdm->mdm_queue) {
+		destroy_workqueue(mdm->mdm_queue);
+		mdm->mdm_queue = NULL;
+	}
+}
+
+static void mdm_update_gpio_configs(struct mdm_ctrl *mdm,
+				enum gpio_update_config gpio_config)
+{
+	struct pinctrl_state *pins_state = NULL;
+	/* Some gpio configuration may need updating after modem bootup.*/
+	switch (gpio_config) {
+	case GPIO_UPDATE_RUNNING_CONFIG:
+		pins_state = mdm->gpio_state_running;
+		break;
+	case GPIO_UPDATE_BOOTING_CONFIG:
+		pins_state = mdm->gpio_state_booting;
+		break;
+	default:
+		pins_state = NULL;
+		dev_err(mdm->dev, "%s: called with no config\n", __func__);
+		break;
+	}
+	if (pins_state != NULL) {
+		if (pinctrl_select_state(mdm->pinctrl, pins_state))
+			dev_err(mdm->dev, "switching gpio config failed\n");
+	}
+}
+
+static void mdm_trigger_dbg(struct mdm_ctrl *mdm)
+{
+	int ret;
+
+	if (mdm->dbg_mode && !mdm->trig_cnt) {
+		ret = coresight_cti_pulse_trig(mdm->cti, MDM_CTI_CH);
+		mdm->trig_cnt++;
+		if (ret)
+			dev_err(mdm->dev, "unable to trigger cti pulse on\n");
+	}
+}
+
+static int mdm_cmd_exe(enum esoc_cmd cmd, struct esoc_clink *esoc)
+{
+	unsigned long end_time;
+	bool status_down = false;
+	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+	struct device *dev = mdm->dev;
+	int ret;
+	bool graceful_shutdown = false;
+
+	switch (cmd) {
+	case ESOC_PWR_ON:
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+		mdm_enable_irqs(mdm);
+		mdm->init = 1;
+		mdm_do_first_power_on(mdm);
+		break;
+	case ESOC_PWR_OFF:
+		mdm_disable_irqs(mdm);
+		mdm->debug = 0;
+		mdm->ready = false;
+		mdm->trig_cnt = 0;
+		graceful_shutdown = true;
+		ret = sysmon_send_shutdown(&esoc->subsys);
+		if (ret) {
+			dev_err(mdm->dev, "sysmon shutdown fail, ret = %d\n",
+									ret);
+			graceful_shutdown = false;
+			goto force_poff;
+		}
+		dev_dbg(mdm->dev, "Waiting for status gpio go low\n");
+		status_down = false;
+		end_time = jiffies + msecs_to_jiffies(10000);
+		while (time_before(jiffies, end_time)) {
+			if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS))
+									== 0) {
+				dev_dbg(dev, "Status went low\n");
+				status_down = true;
+				break;
+			}
+			msleep(100);
+		}
+		if (status_down)
+			dev_dbg(dev, "shutdown successful\n");
+		else
+			dev_err(mdm->dev, "graceful poff ipc fail\n");
+		break;
+force_poff:
+	case ESOC_FORCE_PWR_OFF:
+		if (!graceful_shutdown) {
+			mdm_disable_irqs(mdm);
+			mdm->debug = 0;
+			mdm->ready = false;
+			mdm->trig_cnt = 0;
+
+			dev_err(mdm->dev, "Graceful shutdown fail, ret = %d\n",
+				esoc->subsys.sysmon_shutdown_ret);
+		}
+
+		/*
+		 * Force a shutdown of the mdm. This is required in order
+		 * to prevent the mdm from immediately powering back on
+		 * after the shutdown
+		 */
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
+		esoc_clink_queue_request(ESOC_REQ_SHUTDOWN, esoc);
+		mdm_power_down(mdm);
+		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
+		break;
+	case ESOC_RESET:
+		mdm_toggle_soft_reset(mdm, false);
+		break;
+	case ESOC_PREPARE_DEBUG:
+		/*
+		 * disable all irqs except request irq (pblrdy)
+		 * force a reset of the mdm by signaling
+		 * an APQ crash, wait till mdm is ready for ramdumps.
+		 */
+		mdm->ready = false;
+		cancel_delayed_work(&mdm->mdm2ap_status_check_work);
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+		dev_dbg(mdm->dev, "set ap2mdm errfatal to force reset\n");
+		msleep(mdm->ramdump_delay_ms);
+		break;
+	case ESOC_EXE_DEBUG:
+		mdm->debug = 1;
+		mdm->trig_cnt = 0;
+		mdm_toggle_soft_reset(mdm, false);
+		/*
+		 * wait for ramdumps to be collected
+		 * then power down the mdm and switch gpios to booting
+		 * config
+		 */
+		wait_for_completion(&mdm->debug_done);
+		if (mdm->debug_fail) {
+			dev_err(mdm->dev, "unable to collect ramdumps\n");
+			mdm->debug = 0;
+			return -EIO;
+		}
+		dev_dbg(mdm->dev, "ramdump collection done\n");
+		mdm->debug = 0;
+		init_completion(&mdm->debug_done);
+		break;
+	case ESOC_EXIT_DEBUG:
+		/*
+		 * Deassert APQ to mdm err fatal
+		 * Power on the mdm
+		 */
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+		dev_dbg(mdm->dev, "exiting debug state after power on\n");
+		mdm->get_restart_reason = true;
+		break;
+	default:
+		return -EINVAL;
+	};
+	return 0;
+}
+
+static void mdm2ap_status_check(struct work_struct *work)
+{
+	struct mdm_ctrl *mdm =
+		container_of(work, struct mdm_ctrl,
+					 mdm2ap_status_check_work.work);
+	struct device *dev = mdm->dev;
+	struct esoc_clink *esoc = mdm->esoc;
+
+	if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0) {
+		dev_dbg(dev, "MDM2AP_STATUS did not go high\n");
+		esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
+	}
+}
+
+static void mdm_status_fn(struct work_struct *work)
+{
+	struct mdm_ctrl *mdm =
+		container_of(work, struct mdm_ctrl, mdm_status_work);
+	struct device *dev = mdm->dev;
+	int value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
+
+	dev_dbg(dev, "%s: status:%d\n", __func__, value);
+	/* Update gpio configuration to "running" config. */
+	mdm_update_gpio_configs(mdm, GPIO_UPDATE_RUNNING_CONFIG);
+}
+
+static void mdm_get_restart_reason(struct work_struct *work)
+{
+	int ret, ntries = 0;
+	char sfr_buf[RD_BUF_SIZE];
+	struct mdm_ctrl *mdm =
+		container_of(work, struct mdm_ctrl, restart_reason_work);
+	struct device *dev = mdm->dev;
+
+	do {
+		ret = sysmon_get_reason(&mdm->esoc->subsys, sfr_buf,
+							sizeof(sfr_buf));
+		if (!ret) {
+			dev_err(dev, "mdm restart reason is %s\n", sfr_buf);
+			break;
+		}
+		msleep(SFR_RETRY_INTERVAL);
+	} while (++ntries < SFR_MAX_RETRIES);
+	if (ntries == SFR_MAX_RETRIES)
+		dev_dbg(dev, "%s: Error retrieving restart reason: %d\n",
+						__func__, ret);
+	mdm->get_restart_reason = false;
+}
+
+static void mdm_notify(enum esoc_notify notify, struct esoc_clink *esoc)
+{
+	bool status_down;
+	uint64_t timeout;
+	uint64_t now;
+	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+	struct device *dev = mdm->dev;
+
+	switch (notify) {
+	case ESOC_IMG_XFER_DONE:
+		if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) ==  0)
+			schedule_delayed_work(&mdm->mdm2ap_status_check_work,
+				msecs_to_jiffies(MDM2AP_STATUS_TIMEOUT_MS));
+		break;
+	case ESOC_BOOT_DONE:
+		esoc_clink_evt_notify(ESOC_RUN_STATE, esoc);
+		break;
+	case ESOC_IMG_XFER_RETRY:
+		mdm->init = 1;
+		mdm_toggle_soft_reset(mdm, false);
+		break;
+	case ESOC_IMG_XFER_FAIL:
+		esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
+		break;
+	case ESOC_BOOT_FAIL:
+		esoc_clink_evt_notify(ESOC_INVALID_STATE, esoc);
+		break;
+	case ESOC_UPGRADE_AVAILABLE:
+		break;
+	case ESOC_DEBUG_DONE:
+		mdm->debug_fail = false;
+		mdm_update_gpio_configs(mdm, GPIO_UPDATE_BOOTING_CONFIG);
+		complete(&mdm->debug_done);
+		break;
+	case ESOC_DEBUG_FAIL:
+		mdm->debug_fail = true;
+		complete(&mdm->debug_done);
+		break;
+	case ESOC_PRIMARY_CRASH:
+		mdm_disable_irqs(mdm);
+		status_down = false;
+		dev_dbg(dev, "signal apq err fatal for graceful restart\n");
+		gpio_set_value(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 1);
+		timeout = local_clock();
+		do_div(timeout, NSEC_PER_MSEC);
+		timeout += MDM_MODEM_TIMEOUT;
+		do {
+			if (gpio_get_value(MDM_GPIO(mdm,
+						MDM2AP_STATUS)) == 0) {
+				status_down = true;
+				break;
+			}
+			now = local_clock();
+			do_div(now, NSEC_PER_MSEC);
+		} while (!time_after64(now, timeout));
+
+		if (!status_down) {
+			dev_err(mdm->dev, "%s MDM2AP status did not go low\n",
+								__func__);
+			mdm_toggle_soft_reset(mdm, true);
+		}
+		break;
+	case ESOC_PRIMARY_REBOOT:
+		mdm_disable_irqs(mdm);
+		mdm->debug = 0;
+		mdm->ready = false;
+		mdm_cold_reset(mdm);
+		break;
+	};
+}
+
+static irqreturn_t mdm_errfatal(int irq, void *dev_id)
+{
+	struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
+	struct esoc_clink *esoc;
+	struct device *dev;
+
+	if (!mdm)
+		goto no_mdm_irq;
+	dev = mdm->dev;
+	if (!mdm->ready)
+		goto mdm_pwroff_irq;
+	esoc = mdm->esoc;
+	dev_err(dev, "%s: mdm sent errfatal interrupt\n",
+					 __func__);
+	/* disable irq ?*/
+	esoc_clink_evt_notify(ESOC_ERR_FATAL, esoc);
+	return IRQ_HANDLED;
+mdm_pwroff_irq:
+	dev_info(dev, "errfatal irq when in pwroff\n");
+no_mdm_irq:
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mdm_status_change(int irq, void *dev_id)
+{
+	int value;
+	struct esoc_clink *esoc;
+	struct mdm_ctrl *mdm = (struct mdm_ctrl *)dev_id;
+	struct device *dev = mdm->dev;
+
+	if (!mdm)
+		return IRQ_HANDLED;
+	esoc = mdm->esoc;
+	value = gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS));
+	if (value == 0 && mdm->ready) {
+		dev_err(dev, "unexpected reset external modem\n");
+		esoc_clink_evt_notify(ESOC_UNEXPECTED_RESET, esoc);
+	} else if (value == 1) {
+		cancel_delayed_work(&mdm->mdm2ap_status_check_work);
+		dev_dbg(dev, "status = 1: mdm is now ready\n");
+		mdm->ready = true;
+		mdm_trigger_dbg(mdm);
+		queue_work(mdm->mdm_queue, &mdm->mdm_status_work);
+		if (mdm->get_restart_reason)
+			queue_work(mdm->mdm_queue, &mdm->restart_reason_work);
+	}
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t mdm_pblrdy_change(int irq, void *dev_id)
+{
+	struct mdm_ctrl *mdm;
+	struct device *dev;
+	struct esoc_clink *esoc;
+
+	mdm = (struct mdm_ctrl *)dev_id;
+	if (!mdm)
+		return IRQ_HANDLED;
+	esoc = mdm->esoc;
+	dev = mdm->dev;
+	dev_dbg(dev, "pbl ready %d:\n",
+			gpio_get_value(MDM_GPIO(mdm, MDM2AP_PBLRDY)));
+	if (mdm->init) {
+		mdm->init = 0;
+		mdm_trigger_dbg(mdm);
+		esoc_clink_queue_request(ESOC_REQ_IMG, esoc);
+		return IRQ_HANDLED;
+	}
+	if (mdm->debug)
+		esoc_clink_queue_request(ESOC_REQ_DEBUG, esoc);
+	return IRQ_HANDLED;
+}
+
+static int mdm_get_status(u32 *status, struct esoc_clink *esoc)
+{
+	struct mdm_ctrl *mdm = get_esoc_clink_data(esoc);
+
+	if (gpio_get_value(MDM_GPIO(mdm, MDM2AP_STATUS)) == 0)
+		*status = 0;
+	else
+		*status = 1;
+	return 0;
+}
+
+static void mdm_configure_debug(struct mdm_ctrl *mdm)
+{
+	void __iomem *addr;
+	unsigned int val;
+	int ret;
+	struct device_node *node = mdm->dev->of_node;
+
+	addr = of_iomap(node, 0);
+	if (IS_ERR(addr)) {
+		dev_err(mdm->dev, "failed to get debug base address\n");
+		return;
+	}
+	mdm->dbg_addr = addr + MDM_DBG_OFFSET;
+	val = readl_relaxed(mdm->dbg_addr);
+	if (val == MDM_DBG_MODE) {
+		mdm->dbg_mode = true;
+		mdm->cti = coresight_cti_get(MDM_CTI_NAME);
+		if (IS_ERR(mdm->cti)) {
+			dev_err(mdm->dev, "unable to get cti handle\n");
+			goto cti_get_err;
+		}
+		ret = coresight_cti_map_trigout(mdm->cti, MDM_CTI_TRIG,
+								MDM_CTI_CH);
+		if (ret) {
+			dev_err(mdm->dev, "unable to map trig to channel\n");
+			goto cti_map_err;
+		}
+		mdm->trig_cnt = 0;
+	} else {
+		dev_dbg(mdm->dev, "Not in debug mode. debug mode = %u\n", val);
+		mdm->dbg_mode = false;
+	}
+	return;
+cti_map_err:
+	coresight_cti_put(mdm->cti);
+cti_get_err:
+	mdm->dbg_mode = false;
+}
+
+/* Fail if any of the required gpios is absent. */
+static int mdm_dt_parse_gpios(struct mdm_ctrl *mdm)
+{
+	int i, val, rc = 0;
+	struct device_node *node = mdm->dev->of_node;
+
+	for (i = 0; i < NUM_GPIOS; i++)
+		mdm->gpios[i] = INVALID_GPIO;
+
+	for (i = 0; i < ARRAY_SIZE(gpio_map); i++) {
+		val = of_get_named_gpio(node, gpio_map[i].name, 0);
+		if (val >= 0)
+			MDM_GPIO(mdm, gpio_map[i].index) = val;
+	}
+	/* These two are special because they can be inverted. */
+	/* Verify that the required gpios have valid values */
+	for (i = 0; i < ARRAY_SIZE(required_gpios); i++) {
+		if (MDM_GPIO(mdm, required_gpios[i]) == INVALID_GPIO) {
+			rc = -ENXIO;
+			break;
+		}
+	}
+	mdm_debug_gpio_show(mdm);
+	return rc;
+}
+
+static int mdm_configure_ipc(struct mdm_ctrl *mdm, struct platform_device *pdev)
+{
+	int ret = -1;
+	int irq;
+	struct device *dev = mdm->dev;
+	struct device_node *node = pdev->dev.of_node;
+
+	ret = of_property_read_u32(node, "qcom,ramdump-timeout-ms",
+						&mdm->dump_timeout_ms);
+	if (ret)
+		mdm->dump_timeout_ms = DEF_RAMDUMP_TIMEOUT;
+	ret = of_property_read_u32(node, "qcom,ramdump-delay-ms",
+						&mdm->ramdump_delay_ms);
+	if (ret)
+		mdm->ramdump_delay_ms = DEF_RAMDUMP_DELAY;
+	/* Multilple gpio_request calls are allowed */
+	if (gpio_request(MDM_GPIO(mdm, AP2MDM_STATUS), "AP2MDM_STATUS"))
+		dev_err(dev, "Failed to configure AP2MDM_STATUS gpio\n");
+	/* Multilple gpio_request calls are allowed */
+	if (gpio_request(MDM_GPIO(mdm, AP2MDM_ERRFATAL), "AP2MDM_ERRFATAL"))
+		dev_err(dev, "%s Failed to configure AP2MDM_ERRFATAL gpio\n",
+			   __func__);
+	if (gpio_request(MDM_GPIO(mdm, MDM2AP_STATUS), "MDM2AP_STATUS")) {
+		dev_err(dev, "%s Failed to configure MDM2AP_STATUS gpio\n",
+			   __func__);
+		goto fatal_err;
+	}
+	if (gpio_request(MDM_GPIO(mdm, MDM2AP_ERRFATAL), "MDM2AP_ERRFATAL")) {
+		dev_err(dev, "%s Failed to configure MDM2AP_ERRFATAL gpio\n",
+			   __func__);
+		goto fatal_err;
+	}
+	if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
+		if (gpio_request(MDM_GPIO(mdm, MDM2AP_PBLRDY),
+						"MDM2AP_PBLRDY")) {
+			dev_err(dev, "Cannot configure MDM2AP_PBLRDY gpio\n");
+			goto fatal_err;
+		}
+	}
+	if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_WAKEUP))) {
+		if (gpio_request(MDM_GPIO(mdm, AP2MDM_WAKEUP),
+					"AP2MDM_WAKEUP")) {
+			dev_err(dev, "Cannot configure AP2MDM_WAKEUP gpio\n");
+			goto fatal_err;
+		}
+	}
+	if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY))) {
+		if (gpio_request(MDM_GPIO(mdm, AP2MDM_CHNLRDY),
+						"AP2MDM_CHNLRDY")) {
+			dev_err(dev, "Cannot configure AP2MDM_CHNLRDY gpio\n");
+			goto fatal_err;
+		}
+	}
+
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 0);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_ERRFATAL), 0);
+
+	if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_CHNLRDY)))
+		gpio_direction_output(MDM_GPIO(mdm, AP2MDM_CHNLRDY), 0);
+
+	gpio_direction_input(MDM_GPIO(mdm, MDM2AP_STATUS));
+	gpio_direction_input(MDM_GPIO(mdm, MDM2AP_ERRFATAL));
+
+	/* ERR_FATAL irq. */
+	irq = gpio_to_irq(MDM_GPIO(mdm, MDM2AP_ERRFATAL));
+	if (irq < 0) {
+		dev_err(dev, "bad MDM2AP_ERRFATAL IRQ resource\n");
+		goto errfatal_err;
+
+	}
+	ret = request_irq(irq, mdm_errfatal,
+			IRQF_TRIGGER_RISING, "mdm errfatal", mdm);
+
+	if (ret < 0) {
+		dev_err(dev, "%s: MDM2AP_ERRFATAL IRQ#%d request failed,\n",
+					__func__, irq);
+		goto errfatal_err;
+	}
+	mdm->errfatal_irq = irq;
+
+errfatal_err:
+	 /* status irq */
+	irq = gpio_to_irq(MDM_GPIO(mdm, MDM2AP_STATUS));
+	if (irq < 0) {
+		dev_err(dev, "%s: bad MDM2AP_STATUS IRQ resource, err = %d\n",
+				__func__, irq);
+		goto status_err;
+	}
+	ret = request_threaded_irq(irq, NULL, mdm_status_change,
+		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+		"mdm status", mdm);
+	if (ret < 0) {
+		dev_err(dev, "%s: MDM2AP_STATUS IRQ#%d request failed, err=%d",
+			 __func__, irq, ret);
+		goto status_err;
+	}
+	mdm->status_irq = irq;
+status_err:
+	if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
+		irq =  platform_get_irq_byname(pdev, "plbrdy_irq");
+		if (irq < 0) {
+			dev_err(dev, "%s: MDM2AP_PBLRDY IRQ request failed\n",
+				 __func__);
+			goto pblrdy_err;
+		}
+
+		ret = request_threaded_irq(irq, NULL, mdm_pblrdy_change,
+				IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+				"mdm pbl ready", mdm);
+		if (ret < 0) {
+			dev_err(dev, "MDM2AP_PBL IRQ#%d request failed %d\n",
+								irq, ret);
+			goto pblrdy_err;
+		}
+		mdm->pblrdy_irq = irq;
+	}
+	mdm_disable_irqs(mdm);
+pblrdy_err:
+	return 0;
+fatal_err:
+	mdm_deconfigure_ipc(mdm);
+	return ret;
+
+}
+
+static int mdm_pinctrl_init(struct mdm_ctrl *mdm)
+{
+	int retval = 0;
+
+	mdm->pinctrl = devm_pinctrl_get(mdm->dev);
+	if (IS_ERR_OR_NULL(mdm->pinctrl)) {
+		retval = PTR_ERR(mdm->pinctrl);
+		goto err_state_suspend;
+	}
+	mdm->gpio_state_booting =
+		pinctrl_lookup_state(mdm->pinctrl,
+				"mdm_booting");
+	if (IS_ERR_OR_NULL(mdm->gpio_state_booting)) {
+		mdm->gpio_state_running = NULL;
+		mdm->gpio_state_booting = NULL;
+	} else {
+		mdm->gpio_state_running =
+			pinctrl_lookup_state(mdm->pinctrl,
+				"mdm_running");
+		if (IS_ERR_OR_NULL(mdm->gpio_state_running)) {
+			mdm->gpio_state_booting = NULL;
+			mdm->gpio_state_running = NULL;
+		}
+	}
+	mdm->gpio_state_active =
+		pinctrl_lookup_state(mdm->pinctrl,
+				"mdm_active");
+	if (IS_ERR_OR_NULL(mdm->gpio_state_active)) {
+		retval = PTR_ERR(mdm->gpio_state_active);
+		goto err_state_active;
+	}
+	mdm->gpio_state_suspend =
+		pinctrl_lookup_state(mdm->pinctrl,
+				"mdm_suspend");
+	if (IS_ERR_OR_NULL(mdm->gpio_state_suspend)) {
+		retval = PTR_ERR(mdm->gpio_state_suspend);
+		goto err_state_suspend;
+	}
+	retval = pinctrl_select_state(mdm->pinctrl, mdm->gpio_state_active);
+	return retval;
+
+err_state_suspend:
+	mdm->gpio_state_active = NULL;
+err_state_active:
+	mdm->gpio_state_suspend = NULL;
+	mdm->gpio_state_booting = NULL;
+	mdm->gpio_state_running = NULL;
+	return retval;
+}
+static int mdm9x25_setup_hw(struct mdm_ctrl *mdm,
+					const struct mdm_ops *ops,
+					struct platform_device *pdev)
+{
+	int ret;
+	struct esoc_clink *esoc;
+	const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+	const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+	mdm->dev = &pdev->dev;
+	mdm->pon_ops = pon_ops;
+	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+	if (IS_ERR(esoc)) {
+		dev_err(mdm->dev, "cannot allocate esoc device\n");
+		return PTR_ERR(esoc);
+	}
+	mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+	if (!mdm->mdm_queue) {
+		dev_err(mdm->dev, "could not create mdm_queue\n");
+		return -ENOMEM;
+	}
+	mdm->irq_mask = 0;
+	mdm->ready = false;
+	ret = mdm_dt_parse_gpios(mdm);
+	if (ret)
+		return ret;
+	dev_err(mdm->dev, "parsing gpio done\n");
+	ret = mdm_pon_dt_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon dt init done\n");
+	ret = mdm_pinctrl_init(mdm);
+	if (ret)
+		return ret;
+	dev_err(mdm->dev, "pinctrl init done\n");
+	ret = mdm_pon_setup(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon setup done\n");
+	ret = mdm_configure_ipc(mdm, pdev);
+	if (ret)
+		return ret;
+	mdm_configure_debug(mdm);
+	dev_err(mdm->dev, "ipc configure done\n");
+	esoc->name = MDM9x25_LABEL;
+	esoc->link_name = MDM9x25_HSIC;
+	esoc->clink_ops = clink_ops;
+	esoc->parent = mdm->dev;
+	esoc->owner = THIS_MODULE;
+	esoc->np = pdev->dev.of_node;
+	set_esoc_clink_data(esoc, mdm);
+	ret = esoc_clink_register(esoc);
+	if (ret) {
+		dev_err(mdm->dev, "esoc registration failed\n");
+		return ret;
+	}
+	dev_dbg(mdm->dev, "esoc registration done\n");
+	init_completion(&mdm->debug_done);
+	INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+	INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+	INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+	mdm->get_restart_reason = false;
+	mdm->debug_fail = false;
+	mdm->esoc = esoc;
+	mdm->init = 0;
+	return 0;
+}
+
+static int mdm9x35_setup_hw(struct mdm_ctrl *mdm,
+					const struct mdm_ops *ops,
+					struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node;
+	struct esoc_clink *esoc;
+	const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+	const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+	mdm->dev = &pdev->dev;
+	mdm->pon_ops = pon_ops;
+	node = pdev->dev.of_node;
+	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+	if (IS_ERR(esoc)) {
+		dev_err(mdm->dev, "cannot allocate esoc device\n");
+		return PTR_ERR(esoc);
+	}
+	mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+	if (!mdm->mdm_queue) {
+		dev_err(mdm->dev, "could not create mdm_queue\n");
+		return -ENOMEM;
+	}
+	mdm->irq_mask = 0;
+	mdm->ready = false;
+	ret = mdm_dt_parse_gpios(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "parsing gpio done\n");
+	ret = mdm_pon_dt_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon dt init done\n");
+	ret = mdm_pinctrl_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pinctrl init done\n");
+	ret = mdm_pon_setup(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon setup done\n");
+	ret = mdm_configure_ipc(mdm, pdev);
+	if (ret)
+		return ret;
+	mdm_configure_debug(mdm);
+	dev_dbg(mdm->dev, "ipc configure done\n");
+	esoc->name = MDM9x35_LABEL;
+	mdm->dual_interface = of_property_read_bool(node,
+						"qcom,mdm-dual-link");
+	/* Check if link gpio is available */
+	if (gpio_is_valid(MDM_GPIO(mdm, MDM_LINK_DETECT))) {
+		if (mdm->dual_interface) {
+			if (gpio_get_value(MDM_GPIO(mdm, MDM_LINK_DETECT)))
+				esoc->link_name = MDM9x35_DUAL_LINK;
+			else
+				esoc->link_name = MDM9x35_PCIE;
+		} else {
+			if (gpio_get_value(MDM_GPIO(mdm, MDM_LINK_DETECT)))
+				esoc->link_name = MDM9x35_HSIC;
+			else
+				esoc->link_name = MDM9x35_PCIE;
+		}
+	} else if (mdm->dual_interface)
+		esoc->link_name = MDM9x35_DUAL_LINK;
+	else
+		esoc->link_name = MDM9x35_HSIC;
+	esoc->clink_ops = clink_ops;
+	esoc->parent = mdm->dev;
+	esoc->owner = THIS_MODULE;
+	esoc->np = pdev->dev.of_node;
+	set_esoc_clink_data(esoc, mdm);
+	ret = esoc_clink_register(esoc);
+	if (ret) {
+		dev_err(mdm->dev, "esoc registration failed\n");
+		return ret;
+	}
+	dev_dbg(mdm->dev, "esoc registration done\n");
+	init_completion(&mdm->debug_done);
+	INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+	INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+	INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+	mdm->get_restart_reason = false;
+	mdm->debug_fail = false;
+	mdm->esoc = esoc;
+	mdm->init = 0;
+	return 0;
+}
+
+static int mdm9x55_setup_hw(struct mdm_ctrl *mdm,
+					const struct mdm_ops *ops,
+					struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node;
+	struct esoc_clink *esoc;
+	const struct esoc_clink_ops *clink_ops = ops->clink_ops;
+	const struct mdm_pon_ops *pon_ops = ops->pon_ops;
+
+	mdm->dev = &pdev->dev;
+	mdm->pon_ops = pon_ops;
+	node = pdev->dev.of_node;
+	esoc = devm_kzalloc(mdm->dev, sizeof(*esoc), GFP_KERNEL);
+	if (IS_ERR(esoc)) {
+		dev_err(mdm->dev, "cannot allocate esoc device\n");
+		return PTR_ERR(esoc);
+	}
+	mdm->mdm_queue = alloc_workqueue("mdm_queue", 0, 0);
+	if (!mdm->mdm_queue) {
+		dev_err(mdm->dev, "could not create mdm_queue\n");
+		return -ENOMEM;
+	}
+	mdm->irq_mask = 0;
+	mdm->ready = false;
+	ret = mdm_dt_parse_gpios(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "parsing gpio done\n");
+	ret = mdm_pon_dt_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon dt init done\n");
+	ret = mdm_pinctrl_init(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pinctrl init done\n");
+	ret = mdm_pon_setup(mdm);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "pon setup done\n");
+	ret = mdm_configure_ipc(mdm, pdev);
+	if (ret)
+		return ret;
+	dev_dbg(mdm->dev, "ipc configure done\n");
+	esoc->name = MDM9x55_LABEL;
+	mdm->dual_interface = of_property_read_bool(node,
+						"qcom,mdm-dual-link");
+	esoc->link_name = MDM9x55_PCIE;
+	esoc->clink_ops = clink_ops;
+	esoc->parent = mdm->dev;
+	esoc->owner = THIS_MODULE;
+	esoc->np = pdev->dev.of_node;
+	set_esoc_clink_data(esoc, mdm);
+	ret = esoc_clink_register(esoc);
+	if (ret) {
+		dev_err(mdm->dev, "esoc registration failed\n");
+		return ret;
+	}
+	dev_dbg(mdm->dev, "esoc registration done\n");
+	init_completion(&mdm->debug_done);
+	INIT_WORK(&mdm->mdm_status_work, mdm_status_fn);
+	INIT_WORK(&mdm->restart_reason_work, mdm_get_restart_reason);
+	INIT_DELAYED_WORK(&mdm->mdm2ap_status_check_work, mdm2ap_status_check);
+	mdm->get_restart_reason = false;
+	mdm->debug_fail = false;
+	mdm->esoc = esoc;
+	mdm->init = 0;
+	return 0;
+}
+
+static struct esoc_clink_ops mdm_cops = {
+	.cmd_exe = mdm_cmd_exe,
+	.get_status = mdm_get_status,
+	.notify = mdm_notify,
+};
+
+static struct mdm_ops mdm9x25_ops = {
+	.clink_ops = &mdm_cops,
+	.config_hw = mdm9x25_setup_hw,
+	.pon_ops = &mdm9x25_pon_ops,
+};
+
+static struct mdm_ops mdm9x35_ops = {
+	.clink_ops = &mdm_cops,
+	.config_hw = mdm9x35_setup_hw,
+	.pon_ops = &mdm9x35_pon_ops,
+};
+
+static struct mdm_ops mdm9x55_ops = {
+	.clink_ops = &mdm_cops,
+	.config_hw = mdm9x55_setup_hw,
+	.pon_ops = &mdm9x55_pon_ops,
+};
+
+static const struct of_device_id mdm_dt_match[] = {
+	{ .compatible = "qcom,ext-mdm9x25",
+		.data = &mdm9x25_ops, },
+	{ .compatible = "qcom,ext-mdm9x35",
+		.data = &mdm9x35_ops, },
+	{ .compatible = "qcom,ext-mdm9x55",
+		.data = &mdm9x55_ops, },
+	{},
+};
+MODULE_DEVICE_TABLE(of, mdm_dt_match);
+
+static int mdm_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *match;
+	const struct mdm_ops *mdm_ops;
+	struct device_node *node = pdev->dev.of_node;
+	struct mdm_ctrl *mdm;
+
+	match = of_match_node(mdm_dt_match, node);
+	if (IS_ERR(match))
+		return PTR_ERR(match);
+	mdm_ops = match->data;
+	mdm = devm_kzalloc(&pdev->dev, sizeof(*mdm), GFP_KERNEL);
+	if (IS_ERR(mdm))
+		return PTR_ERR(mdm);
+	return mdm_ops->config_hw(mdm, mdm_ops, pdev);
+}
+
+static struct platform_driver mdm_driver = {
+	.probe		= mdm_probe,
+	.driver = {
+		.name	= "ext-mdm",
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(mdm_dt_match),
+	},
+};
+
+static int __init mdm_register(void)
+{
+	return platform_driver_register(&mdm_driver);
+}
+module_init(mdm_register);
+
+static void __exit mdm_unregister(void)
+{
+	platform_driver_unregister(&mdm_driver);
+}
+module_exit(mdm_unregister);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc-mdm-dbg-eng.c b/drivers/esoc/esoc-mdm-dbg-eng.c
new file mode 100644
index 0000000..a186ea8
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-dbg-eng.c
@@ -0,0 +1,204 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include "esoc.h"
+
+/*
+ * cmd_mask : Specifies if a command/notifier is masked, and
+ * whats the trigger value for mask to take effect.
+ * @mask_trigger: trigger value for mask.
+ * @mask: boolean to determine if command should be masked.
+ */
+struct esoc_mask {
+	atomic_t mask_trigger;
+	bool mask;
+};
+
+/*
+ * manual_to_esoc_cmd: Converts a user provided command
+ * to a corresponding esoc command.
+ * @cmd: ESOC command
+ * @manual_cmd: user specified command string.
+ */
+struct manual_to_esoc_cmd {
+	unsigned int cmd;
+	char manual_cmd[20];
+};
+
+/*
+ * manual_to_esoc_notify: Converts a user provided notification
+ * to corresponding esoc notification for Primary SOC.
+ * @notfication: ESOC notification.
+ * @manual_notifier: user specified notification string.
+ */
+struct manual_to_esoc_notify {
+	unsigned int notify;
+	char manual_notify[20];
+};
+
+static const struct manual_to_esoc_cmd cmd_map[] = {
+	{
+		.cmd = ESOC_PWR_ON,
+		.manual_cmd = "PON",
+	},
+	{
+		.cmd = ESOC_PREPARE_DEBUG,
+		.manual_cmd = "ENTER_DLOAD",
+	},
+	{	.cmd = ESOC_PWR_OFF,
+		.manual_cmd = "POFF",
+	},
+	{
+		.cmd = ESOC_FORCE_PWR_OFF,
+		.manual_cmd = "FORCE_POFF",
+	},
+};
+
+static struct esoc_mask cmd_mask[] = {
+	[ESOC_PWR_ON] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(1),
+	},
+	[ESOC_PREPARE_DEBUG] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+	[ESOC_PWR_OFF] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+	[ESOC_FORCE_PWR_OFF] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+};
+
+static const struct manual_to_esoc_notify notify_map[] = {
+	{
+		.notify = ESOC_PRIMARY_REBOOT,
+		.manual_notify = "REBOOT",
+	},
+	{
+		.notify = ESOC_PRIMARY_CRASH,
+		.manual_notify = "PANIC",
+	},
+};
+
+static struct esoc_mask notify_mask[] = {
+	[ESOC_PRIMARY_REBOOT] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+	[ESOC_PRIMARY_CRASH] = {
+		.mask = false,
+		.mask_trigger = ATOMIC_INIT(0),
+	},
+};
+
+bool dbg_check_cmd_mask(unsigned int cmd)
+{
+	pr_debug("command to mask %d\n", cmd);
+	if (cmd_mask[cmd].mask)
+		return atomic_add_negative(-1, &cmd_mask[cmd].mask_trigger);
+	else
+		return false;
+}
+EXPORT_SYMBOL(dbg_check_cmd_mask);
+
+bool dbg_check_notify_mask(unsigned int notify)
+{
+	pr_debug("notifier to mask %d\n", notify);
+	if (notify_mask[notify].mask)
+		return atomic_add_negative(-1,
+					&notify_mask[notify].mask_trigger);
+	else
+		return false;
+}
+EXPORT_SYMBOL(dbg_check_notify_mask);
+/*
+ * Create driver attributes that let you mask
+ * specific commands.
+ */
+static ssize_t cmd_mask_store(struct device_driver *drv, const char *buf,
+							size_t count)
+{
+	unsigned int cmd, i;
+
+	pr_debug("user input command %s", buf);
+	for (i = 0; i < ARRAY_SIZE(cmd_map); i++) {
+		if (!strcmp(cmd_map[i].manual_cmd, buf)) {
+			/*
+			 * Map manual command string to ESOC command
+			 * set mask for ESOC command
+			 */
+			cmd = cmd_map[i].cmd;
+			cmd_mask[cmd].mask = true;
+			pr_debug("Setting mask for manual command %s\n",
+								buf);
+			break;
+		}
+	}
+	if (i >= ARRAY_SIZE(cmd_map))
+		pr_err("invalid command specified\n");
+	return count;
+}
+static DRIVER_ATTR(command_mask, 00200, NULL, cmd_mask_store);
+
+static ssize_t notifier_mask_store(struct device_driver *drv, const char *buf,
+							size_t count)
+{
+	unsigned int notify, i;
+
+	pr_debug("user input notifier %s", buf);
+	for (i = 0; i < ARRAY_SIZE(notify_map); i++) {
+		if (!strcmp(buf, notify_map[i].manual_notify)) {
+			/*
+			 * Map manual notifier string to primary soc
+			 * notifier. Also set mask for the notifier.
+			 */
+			notify = notify_map[i].notify;
+			notify_mask[notify].mask = true;
+			pr_debug("Setting mask for manual notification %s\n",
+									buf);
+			break;
+		}
+	}
+	if (i >= ARRAY_SIZE(notify_map))
+		pr_err("invalid notifier specified\n");
+	return count;
+}
+static DRIVER_ATTR(notifier_mask, 00200, NULL, notifier_mask_store);
+
+int mdm_dbg_eng_init(struct esoc_drv *esoc_drv)
+{
+	int ret;
+	struct device_driver *drv = &esoc_drv->driver;
+
+	ret = driver_create_file(drv, &driver_attr_command_mask);
+	if (ret) {
+		pr_err("Unable to create command mask file\n");
+		goto cmd_mask_err;
+	}
+	ret = driver_create_file(drv, &driver_attr_notifier_mask);
+	if (ret) {
+		pr_err("Unable to create notify mask file\n");
+		goto notify_mask_err;
+	}
+	return 0;
+notify_mask_err:
+	driver_remove_file(drv, &driver_attr_command_mask);
+cmd_mask_err:
+	return ret;
+}
+EXPORT_SYMBOL(mdm_dbg_eng_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc-mdm-drv.c b/drivers/esoc/esoc-mdm-drv.c
new file mode 100644
index 0000000..473a9c7
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-drv.c
@@ -0,0 +1,309 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/reboot.h>
+#include "esoc.h"
+#include "mdm-dbg.h"
+
+enum {
+	 PWR_OFF = 0x1,
+	 PWR_ON,
+	 BOOT,
+	 RUN,
+	 CRASH,
+	 IN_DEBUG,
+	 SHUTDOWN,
+	 RESET,
+	 PEER_CRASH,
+};
+
+struct mdm_drv {
+	unsigned int mode;
+	struct esoc_eng cmd_eng;
+	struct completion boot_done;
+	struct completion req_eng_wait;
+	struct esoc_clink *esoc_clink;
+	bool boot_fail;
+	struct workqueue_struct *mdm_queue;
+	struct work_struct ssr_work;
+	struct notifier_block esoc_restart;
+};
+#define to_mdm_drv(d)	container_of(d, struct mdm_drv, cmd_eng)
+
+static int esoc_msm_restart_handler(struct notifier_block *nb,
+		unsigned long action, void *data)
+{
+	struct mdm_drv *mdm_drv = container_of(nb, struct mdm_drv,
+					esoc_restart);
+	struct esoc_clink *esoc_clink = mdm_drv->esoc_clink;
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (action == SYS_RESTART) {
+		if (mdm_dbg_stall_notify(ESOC_PRIMARY_REBOOT))
+			return NOTIFY_OK;
+		dev_dbg(&esoc_clink->dev, "Notifying esoc of cold reboot\n");
+		clink_ops->notify(ESOC_PRIMARY_REBOOT, esoc_clink);
+	}
+	return NOTIFY_OK;
+}
+static void mdm_handle_clink_evt(enum esoc_evt evt,
+					struct esoc_eng *eng)
+{
+	struct mdm_drv *mdm_drv = to_mdm_drv(eng);
+
+	switch (evt) {
+	case ESOC_INVALID_STATE:
+		mdm_drv->boot_fail = true;
+		complete(&mdm_drv->boot_done);
+		break;
+	case ESOC_RUN_STATE:
+		mdm_drv->boot_fail = false;
+		mdm_drv->mode = RUN,
+		complete(&mdm_drv->boot_done);
+		break;
+	case ESOC_UNEXPECTED_RESET:
+	case ESOC_ERR_FATAL:
+		if (mdm_drv->mode == CRASH)
+			return;
+		mdm_drv->mode = CRASH;
+		queue_work(mdm_drv->mdm_queue, &mdm_drv->ssr_work);
+		break;
+	case ESOC_REQ_ENG_ON:
+		complete(&mdm_drv->req_eng_wait);
+		break;
+	default:
+		break;
+	}
+}
+
+static void mdm_ssr_fn(struct work_struct *work)
+{
+	struct mdm_drv *mdm_drv = container_of(work, struct mdm_drv, ssr_work);
+
+	/*
+	 * If restarting esoc fails, the SSR framework triggers a kernel panic
+	 */
+	esoc_clink_request_ssr(mdm_drv->esoc_clink);
+}
+
+static void mdm_crash_shutdown(const struct subsys_desc *mdm_subsys)
+{
+	struct esoc_clink *esoc_clink =
+					container_of(mdm_subsys,
+							struct esoc_clink,
+								subsys);
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (mdm_dbg_stall_notify(ESOC_PRIMARY_CRASH))
+		return;
+
+	clink_ops->notify(ESOC_PRIMARY_CRASH, esoc_clink);
+}
+
+static int mdm_subsys_shutdown(const struct subsys_desc *crashed_subsys,
+							bool force_stop)
+{
+	int ret;
+	struct esoc_clink *esoc_clink =
+	 container_of(crashed_subsys, struct esoc_clink, subsys);
+	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (mdm_drv->mode == CRASH || mdm_drv->mode == PEER_CRASH) {
+		if (mdm_dbg_stall_cmd(ESOC_PREPARE_DEBUG))
+			/* We want to mask debug command.
+			 * In this case return success
+			 * to move to next stage
+			 */
+			return 0;
+		ret = clink_ops->cmd_exe(ESOC_PREPARE_DEBUG,
+							esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "failed to enter debug\n");
+			return ret;
+		}
+		mdm_drv->mode = IN_DEBUG;
+	} else if (!force_stop) {
+		if (esoc_clink->subsys.sysmon_shutdown_ret)
+			ret = clink_ops->cmd_exe(ESOC_FORCE_PWR_OFF,
+							esoc_clink);
+		else {
+			if (mdm_dbg_stall_cmd(ESOC_PWR_OFF))
+				/* Since power off command is masked
+				 * we return success, and leave the state
+				 * of the command engine as is.
+				 */
+				return 0;
+			ret = clink_ops->cmd_exe(ESOC_PWR_OFF, esoc_clink);
+		}
+		if (ret) {
+			dev_err(&esoc_clink->dev, "failed to exe power off\n");
+			return ret;
+		}
+		mdm_drv->mode = PWR_OFF;
+	}
+	return 0;
+}
+
+static int mdm_subsys_powerup(const struct subsys_desc *crashed_subsys)
+{
+	int ret;
+	struct esoc_clink *esoc_clink =
+				container_of(crashed_subsys, struct esoc_clink,
+								subsys);
+	struct mdm_drv *mdm_drv = esoc_get_drv_data(esoc_clink);
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (!esoc_req_eng_enabled(esoc_clink)) {
+		dev_dbg(&esoc_clink->dev, "Wait for req eng registration\n");
+		wait_for_completion(&mdm_drv->req_eng_wait);
+	}
+	if (mdm_drv->mode == PWR_OFF) {
+		if (mdm_dbg_stall_cmd(ESOC_PWR_ON))
+			return -EBUSY;
+		ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "pwr on fail\n");
+			return ret;
+		}
+	} else if (mdm_drv->mode == IN_DEBUG) {
+		ret = clink_ops->cmd_exe(ESOC_EXIT_DEBUG, esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "cannot exit debug mode\n");
+			return ret;
+		}
+		mdm_drv->mode = PWR_OFF;
+		ret = clink_ops->cmd_exe(ESOC_PWR_ON, esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "pwr on fail\n");
+			return ret;
+		}
+	}
+	wait_for_completion(&mdm_drv->boot_done);
+	if (mdm_drv->boot_fail) {
+		dev_err(&esoc_clink->dev, "booting failed\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static int mdm_subsys_ramdumps(int want_dumps,
+				const struct subsys_desc *crashed_subsys)
+{
+	int ret;
+	struct esoc_clink *esoc_clink =
+				container_of(crashed_subsys, struct esoc_clink,
+								subsys);
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+
+	if (want_dumps) {
+		ret = clink_ops->cmd_exe(ESOC_EXE_DEBUG, esoc_clink);
+		if (ret) {
+			dev_err(&esoc_clink->dev, "debugging failed\n");
+			return ret;
+		}
+	}
+	return 0;
+}
+
+static int mdm_register_ssr(struct esoc_clink *esoc_clink)
+{
+	esoc_clink->subsys.shutdown = mdm_subsys_shutdown;
+	esoc_clink->subsys.ramdump = mdm_subsys_ramdumps;
+	esoc_clink->subsys.powerup = mdm_subsys_powerup;
+	esoc_clink->subsys.crash_shutdown = mdm_crash_shutdown;
+	return esoc_clink_register_ssr(esoc_clink);
+}
+
+int esoc_ssr_probe(struct esoc_clink *esoc_clink, struct esoc_drv *drv)
+{
+	int ret;
+	struct mdm_drv *mdm_drv;
+	struct esoc_eng *esoc_eng;
+
+	mdm_drv = devm_kzalloc(&esoc_clink->dev, sizeof(*mdm_drv), GFP_KERNEL);
+	if (IS_ERR(mdm_drv))
+		return PTR_ERR(mdm_drv);
+	esoc_eng = &mdm_drv->cmd_eng;
+	esoc_eng->handle_clink_evt = mdm_handle_clink_evt;
+	ret = esoc_clink_register_cmd_eng(esoc_clink, esoc_eng);
+	if (ret) {
+		dev_err(&esoc_clink->dev, "failed to register cmd engine\n");
+		return ret;
+	}
+	ret = mdm_register_ssr(esoc_clink);
+	if (ret)
+		goto ssr_err;
+	mdm_drv->mdm_queue = alloc_workqueue("mdm_drv_queue", 0, 0);
+	if (!mdm_drv->mdm_queue) {
+		dev_err(&esoc_clink->dev, "could not create mdm_queue\n");
+		goto queue_err;
+	}
+	esoc_set_drv_data(esoc_clink, mdm_drv);
+	init_completion(&mdm_drv->boot_done);
+	init_completion(&mdm_drv->req_eng_wait);
+	INIT_WORK(&mdm_drv->ssr_work, mdm_ssr_fn);
+	mdm_drv->esoc_clink = esoc_clink;
+	mdm_drv->mode = PWR_OFF;
+	mdm_drv->boot_fail = false;
+	mdm_drv->esoc_restart.notifier_call = esoc_msm_restart_handler;
+	ret = register_reboot_notifier(&mdm_drv->esoc_restart);
+	if (ret)
+		dev_err(&esoc_clink->dev, "register for reboot failed\n");
+	ret = mdm_dbg_eng_init(drv);
+	if (ret) {
+		debug_init_done = false;
+		dev_err(&esoc_clink->dev, "dbg engine failure\n");
+	} else {
+		dev_dbg(&esoc_clink->dev, "dbg engine initialized\n");
+		debug_init_done = true;
+	}
+	return 0;
+queue_err:
+	esoc_clink_unregister_ssr(esoc_clink);
+ssr_err:
+	esoc_clink_unregister_cmd_eng(esoc_clink, esoc_eng);
+	return ret;
+}
+
+static struct esoc_compat compat_table[] = {
+	{	.name = "MDM9x25",
+		.data = NULL,
+	},
+	{
+		.name = "MDM9x35",
+		.data = NULL,
+	},
+	{
+		.name = "MDM9x55",
+		.data = NULL,
+	},
+};
+
+static struct esoc_drv esoc_ssr_drv = {
+	.owner = THIS_MODULE,
+	.probe = esoc_ssr_probe,
+	.compat_table = compat_table,
+	.compat_entries = ARRAY_SIZE(compat_table),
+	.driver = {
+		.name = "mdm-4x",
+	},
+};
+
+int __init esoc_ssr_init(void)
+{
+	return esoc_drv_register(&esoc_ssr_drv);
+}
+module_init(esoc_ssr_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc-mdm-pon.c b/drivers/esoc/esoc-mdm-pon.c
new file mode 100644
index 0000000..47d54db
--- /dev/null
+++ b/drivers/esoc/esoc-mdm-pon.c
@@ -0,0 +1,220 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "esoc-mdm.h"
+
+/* This function can be called from atomic context. */
+static int mdm4x_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+	int soft_reset_direction_assert = 0,
+	    soft_reset_direction_de_assert = 1;
+
+	if (mdm->soft_reset_inverted) {
+		soft_reset_direction_assert = 1;
+		soft_reset_direction_de_assert = 0;
+	}
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			soft_reset_direction_assert);
+	/*
+	 * Allow PS hold assert to be detected
+	 */
+	if (!atomic)
+		usleep_range(8000, 9000);
+	else
+		mdelay(6);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			soft_reset_direction_de_assert);
+	return 0;
+}
+
+/* This function can be called from atomic context. */
+static int mdm9x55_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+	int soft_reset_direction_assert = 0,
+	    soft_reset_direction_de_assert = 1;
+
+	if (mdm->soft_reset_inverted) {
+		soft_reset_direction_assert = 1;
+		soft_reset_direction_de_assert = 0;
+	}
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			soft_reset_direction_assert);
+	/*
+	 * Allow PS hold assert to be detected
+	 */
+	if (!atomic)
+		usleep_range(203000, 300000);
+	else
+		mdelay(203);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			soft_reset_direction_de_assert);
+	return 0;
+}
+
+
+static int mdm4x_do_first_power_on(struct mdm_ctrl *mdm)
+{
+	int i;
+	int pblrdy;
+	struct device *dev = mdm->dev;
+
+	dev_dbg(dev, "Powering on modem for the first time\n");
+	mdm_toggle_soft_reset(mdm, false);
+	/* Add a delay to allow PON sequence to complete*/
+	mdelay(50);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_STATUS), 1);
+	if (gpio_is_valid(MDM_GPIO(mdm, MDM2AP_PBLRDY))) {
+		for (i = 0; i  < MDM_PBLRDY_CNT; i++) {
+			pblrdy = gpio_get_value(MDM_GPIO(mdm, MDM2AP_PBLRDY));
+			if (pblrdy)
+				break;
+			usleep_range(5000, 6000);
+		}
+		dev_dbg(dev, "pblrdy i:%d\n", i);
+		mdelay(200);
+	}
+	/*
+	 * No PBLRDY gpio associated with this modem
+	 * Send request for image. Let userspace confirm establishment of
+	 * link to external modem.
+	 */
+	else
+		esoc_clink_queue_request(ESOC_REQ_IMG, mdm->esoc);
+	return 0;
+}
+
+static int mdm4x_power_down(struct mdm_ctrl *mdm)
+{
+	struct device *dev = mdm->dev;
+	int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+	/* Assert the soft reset line whether mdm2ap_status went low or not */
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+					soft_reset_direction);
+	dev_dbg(dev, "Doing a hard reset\n");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+						soft_reset_direction);
+	/*
+	 * Currently, there is a debounce timer on the charm PMIC. It is
+	 * necessary to hold the PMIC RESET low for 400ms
+	 * for the reset to fully take place. Sleep here to ensure the
+	 * reset has occurred before the function exits.
+	 */
+	mdelay(400);
+	return 0;
+}
+
+static int mdm9x55_power_down(struct mdm_ctrl *mdm)
+{
+	struct device *dev = mdm->dev;
+	int soft_reset_direction = mdm->soft_reset_inverted ? 1 : 0;
+	/* Assert the soft reset line whether mdm2ap_status went low or not */
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+					soft_reset_direction);
+	dev_dbg(dev, "Doing a hard reset\n");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+						soft_reset_direction);
+	/*
+	 * Currently, there is a debounce timer on the charm PMIC. It is
+	 * necessary to hold the PMIC RESET low for 406ms
+	 * for the reset to fully take place. Sleep here to ensure the
+	 * reset has occurred before the function exits.
+	 */
+	mdelay(406);
+	return 0;
+}
+
+static void mdm4x_cold_reset(struct mdm_ctrl *mdm)
+{
+	dev_dbg(mdm->dev, "Triggering mdm cold reset");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!!mdm->soft_reset_inverted);
+	mdelay(300);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!mdm->soft_reset_inverted);
+}
+
+static void mdm9x55_cold_reset(struct mdm_ctrl *mdm)
+{
+	dev_dbg(mdm->dev, "Triggering mdm cold reset");
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!!mdm->soft_reset_inverted);
+	mdelay(334);
+	gpio_direction_output(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+			!mdm->soft_reset_inverted);
+}
+
+static int mdm4x_pon_dt_init(struct mdm_ctrl *mdm)
+{
+	int val;
+	struct device_node *node = mdm->dev->of_node;
+	enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
+
+	val = of_get_named_gpio_flags(node, "qcom,ap2mdm-soft-reset-gpio",
+						0, &flags);
+	if (val >= 0) {
+		MDM_GPIO(mdm, AP2MDM_SOFT_RESET) = val;
+		if (flags & OF_GPIO_ACTIVE_LOW)
+			mdm->soft_reset_inverted = 1;
+		return 0;
+	} else
+		return -EIO;
+}
+
+static int mdm4x_pon_setup(struct mdm_ctrl *mdm)
+{
+	struct device *dev = mdm->dev;
+
+	if (gpio_is_valid(MDM_GPIO(mdm, AP2MDM_SOFT_RESET))) {
+		if (gpio_request(MDM_GPIO(mdm, AP2MDM_SOFT_RESET),
+					 "AP2MDM_SOFT_RESET")) {
+			dev_err(dev, "Cannot config AP2MDM_SOFT_RESET gpio\n");
+			return -EIO;
+		}
+	}
+	return 0;
+}
+
+struct mdm_pon_ops mdm9x25_pon_ops = {
+	.pon = mdm4x_do_first_power_on,
+	.soft_reset = mdm4x_toggle_soft_reset,
+	.poff_force = mdm4x_power_down,
+	.cold_reset = mdm4x_cold_reset,
+	.dt_init = mdm4x_pon_dt_init,
+	.setup = mdm4x_pon_setup,
+};
+
+struct mdm_pon_ops mdm9x35_pon_ops = {
+	.pon = mdm4x_do_first_power_on,
+	.soft_reset = mdm4x_toggle_soft_reset,
+	.poff_force = mdm4x_power_down,
+	.cold_reset = mdm4x_cold_reset,
+	.dt_init = mdm4x_pon_dt_init,
+	.setup = mdm4x_pon_setup,
+};
+
+struct mdm_pon_ops mdm9x45_pon_ops = {
+	.pon = mdm4x_do_first_power_on,
+	.soft_reset = mdm4x_toggle_soft_reset,
+	.poff_force = mdm4x_power_down,
+	.cold_reset = mdm4x_cold_reset,
+	.dt_init = mdm4x_pon_dt_init,
+	.setup = mdm4x_pon_setup,
+};
+
+struct mdm_pon_ops mdm9x55_pon_ops = {
+	.pon = mdm4x_do_first_power_on,
+	.soft_reset = mdm9x55_toggle_soft_reset,
+	.poff_force = mdm9x55_power_down,
+	.cold_reset = mdm9x55_cold_reset,
+	.dt_init = mdm4x_pon_dt_init,
+	.setup = mdm4x_pon_setup,
+};
diff --git a/drivers/esoc/esoc-mdm.h b/drivers/esoc/esoc-mdm.h
new file mode 100644
index 0000000..fa3a576
--- /dev/null
+++ b/drivers/esoc/esoc-mdm.h
@@ -0,0 +1,156 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ESOC_MDM_H__
+#define __ESOC_MDM_H__
+
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include "esoc.h"
+
+#define MDM_PBLRDY_CNT			20
+#define INVALID_GPIO			(-1)
+#define MDM_GPIO(mdm, i)		(mdm->gpios[i])
+#define MDM9x25_LABEL			"MDM9x25"
+#define MDM9x25_HSIC			"HSIC"
+#define MDM9x35_LABEL			"MDM9x35"
+#define MDM9x35_PCIE			"PCIe"
+#define MDM9x35_DUAL_LINK		"HSIC+PCIe"
+#define MDM9x35_HSIC			"HSIC"
+#define MDM9x45_LABEL			"MDM9x45"
+#define MDM9x45_PCIE			"PCIe"
+#define MDM9x55_LABEL			"MDM9x55"
+#define MDM9x55_PCIE			"PCIe"
+#define MDM2AP_STATUS_TIMEOUT_MS	120000L
+#define MDM_MODEM_TIMEOUT		3000
+#define DEF_RAMDUMP_TIMEOUT		120000
+#define DEF_RAMDUMP_DELAY		2000
+#define RD_BUF_SIZE			100
+#define SFR_MAX_RETRIES			10
+#define SFR_RETRY_INTERVAL		1000
+#define MDM_DBG_OFFSET			0x934
+#define MDM_DBG_MODE			0x53444247
+#define MDM_CTI_NAME			"coresight-cti-rpm-cpu0"
+#define MDM_CTI_TRIG			0
+#define MDM_CTI_CH			0
+
+enum mdm_gpio {
+	AP2MDM_WAKEUP = 0,
+	AP2MDM_STATUS,
+	AP2MDM_SOFT_RESET,
+	AP2MDM_VDD_MIN,
+	AP2MDM_CHNLRDY,
+	AP2MDM_ERRFATAL,
+	AP2MDM_VDDMIN,
+	AP2MDM_PMIC_PWR_EN,
+	MDM2AP_WAKEUP,
+	MDM2AP_ERRFATAL,
+	MDM2AP_PBLRDY,
+	MDM2AP_STATUS,
+	MDM2AP_VDDMIN,
+	MDM_LINK_DETECT,
+	NUM_GPIOS,
+};
+
+struct mdm_pon_ops;
+
+struct mdm_ctrl {
+	unsigned int gpios[NUM_GPIOS];
+	spinlock_t status_lock;
+	struct workqueue_struct *mdm_queue;
+	struct delayed_work mdm2ap_status_check_work;
+	struct work_struct mdm_status_work;
+	struct work_struct restart_reason_work;
+	struct completion debug_done;
+	struct device *dev;
+	struct pinctrl *pinctrl;
+	struct pinctrl_state *gpio_state_booting;
+	struct pinctrl_state *gpio_state_running;
+	struct pinctrl_state *gpio_state_active;
+	struct pinctrl_state *gpio_state_suspend;
+	int mdm2ap_status_valid_old_config;
+	int soft_reset_inverted;
+	int errfatal_irq;
+	int status_irq;
+	int pblrdy_irq;
+	int debug;
+	int init;
+	bool debug_fail;
+	unsigned int dump_timeout_ms;
+	unsigned int ramdump_delay_ms;
+	struct esoc_clink *esoc;
+	bool get_restart_reason;
+	unsigned long irq_mask;
+	bool ready;
+	bool dual_interface;
+	u32 status;
+	void __iomem *dbg_addr;
+	bool dbg_mode;
+	struct coresight_cti *cti;
+	int trig_cnt;
+	const struct mdm_pon_ops *pon_ops;
+};
+
+struct mdm_pon_ops {
+	int (*pon)(struct mdm_ctrl *mdm);
+	int (*soft_reset)(struct mdm_ctrl *mdm, bool atomic);
+	int (*poff_force)(struct mdm_ctrl *mdm);
+	int (*poff_cleanup)(struct mdm_ctrl *mdm);
+	void (*cold_reset)(struct mdm_ctrl *mdm);
+	int (*dt_init)(struct mdm_ctrl *mdm);
+	int (*setup)(struct mdm_ctrl *mdm);
+};
+
+struct mdm_ops {
+	struct esoc_clink_ops *clink_ops;
+	struct mdm_pon_ops *pon_ops;
+	int (*config_hw)(struct mdm_ctrl *mdm, const struct mdm_ops *ops,
+					struct platform_device *pdev);
+};
+
+static inline int mdm_toggle_soft_reset(struct mdm_ctrl *mdm, bool atomic)
+{
+	return mdm->pon_ops->soft_reset(mdm, atomic);
+}
+static inline int mdm_do_first_power_on(struct mdm_ctrl *mdm)
+{
+	return mdm->pon_ops->pon(mdm);
+}
+static inline int mdm_power_down(struct mdm_ctrl *mdm)
+{
+	return mdm->pon_ops->poff_force(mdm);
+}
+static inline void mdm_cold_reset(struct mdm_ctrl *mdm)
+{
+	mdm->pon_ops->cold_reset(mdm);
+}
+static inline int mdm_pon_dt_init(struct mdm_ctrl *mdm)
+{
+	return mdm->pon_ops->dt_init(mdm);
+}
+static inline int mdm_pon_setup(struct mdm_ctrl *mdm)
+{
+	return mdm->pon_ops->setup(mdm);
+}
+
+extern struct mdm_pon_ops mdm9x25_pon_ops;
+extern struct mdm_pon_ops mdm9x35_pon_ops;
+extern struct mdm_pon_ops mdm9x45_pon_ops;
+extern struct mdm_pon_ops mdm9x55_pon_ops;
+#endif
diff --git a/drivers/esoc/esoc.h b/drivers/esoc/esoc.h
new file mode 100644
index 0000000..0cec985
--- /dev/null
+++ b/drivers/esoc/esoc.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ESOC_H__
+#define __ESOC_H__
+
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/esoc_ctrl.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+
+#define ESOC_DEV_MAX		4
+#define ESOC_NAME_LEN		20
+#define ESOC_LINK_LEN		20
+
+struct esoc_clink;
+/**
+ * struct esoc_eng: Engine of the esoc control link
+ * @handle_clink_req: handle incoming esoc requests.
+ * @handle_clink_evt: handle for esoc events.
+ * @esoc_clink: pointer to esoc control link.
+ */
+struct esoc_eng {
+	void (*handle_clink_req)(enum esoc_req req,
+						struct esoc_eng *eng);
+	void (*handle_clink_evt)(enum esoc_evt evt,
+						struct esoc_eng *eng);
+	struct esoc_clink *esoc_clink;
+};
+
+/**
+ * struct esoc_clink: Representation of external esoc device
+ * @name: Name of the external esoc.
+ * @link_name: name of the physical link.
+ * @parent: parent device.
+ * @dev: device for userspace interface.
+ * @id: id of the external device.
+ * @owner: owner of the device.
+ * @clink_ops: control operations for the control link
+ * @req_eng: handle for request engine.
+ * @cmd_eng: handle for command engine.
+ * @clink_data: private data of esoc control link.
+ * @compat_data: compat data of esoc driver.
+ * @subsys_desc: descriptor for subsystem restart
+ * @subsys_dev: ssr device handle.
+ * @np: device tree node for esoc_clink.
+ */
+struct esoc_clink {
+	const char *name;
+	const char *link_name;
+	struct device *parent;
+	struct device dev;
+	unsigned int id;
+	struct module *owner;
+	const struct esoc_clink_ops *clink_ops;
+	struct esoc_eng *req_eng;
+	struct esoc_eng *cmd_eng;
+	spinlock_t notify_lock;
+	void *clink_data;
+	void *compat_data;
+	struct subsys_desc subsys;
+	struct subsys_device *subsys_dev;
+	struct device_node *np;
+};
+
+/**
+ * struct esoc_clink_ops: Operations to control external soc
+ * @cmd_exe: Execute control command
+ * @get_status: Get current status, or response to previous command
+ * @notify_esoc: notify external soc of events
+ */
+struct esoc_clink_ops {
+	int (*cmd_exe)(enum esoc_cmd cmd, struct esoc_clink *dev);
+	int (*get_status)(u32 *status, struct esoc_clink *dev);
+	void (*notify)(enum esoc_notify notify, struct esoc_clink *dev);
+};
+
+/**
+ * struct esoc_compat: Compatibility of esoc drivers.
+ * @name: esoc link that driver is compatible with.
+ * @data: driver data associated with esoc clink.
+ */
+struct esoc_compat {
+	const char *name;
+	void *data;
+};
+
+/**
+ * struct esoc_drv: Driver for an esoc clink
+ * @driver: drivers for esoc.
+ * @owner: module owner of esoc driver.
+ * @compat_table: compatible table for driver.
+ * @compat_entries
+ * @probe: probe function for esoc driver.
+ */
+struct esoc_drv {
+	struct device_driver driver;
+	struct module *owner;
+	struct esoc_compat *compat_table;
+	unsigned int compat_entries;
+	int (*probe)(struct esoc_clink *esoc_clink,
+				struct esoc_drv *drv);
+};
+
+#define to_esoc_clink(d) container_of(d, struct esoc_clink, dev)
+#define to_esoc_drv(d) container_of(d, struct esoc_drv, driver)
+
+extern struct bus_type esoc_bus_type;
+
+
+/* Exported apis */
+void esoc_dev_exit(void);
+int esoc_dev_init(void);
+void esoc_clink_unregister(struct esoc_clink *esoc_dev);
+int esoc_clink_register(struct esoc_clink *esoc_dev);
+struct esoc_clink *get_esoc_clink(int id);
+struct esoc_clink *get_esoc_clink_by_node(struct device_node *node);
+void put_esoc_clink(struct esoc_clink *esoc_clink);
+void *get_esoc_clink_data(struct esoc_clink *esoc);
+void set_esoc_clink_data(struct esoc_clink *esoc, void *data);
+void esoc_clink_evt_notify(enum esoc_evt, struct esoc_clink *esoc_dev);
+void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_dev);
+void esoc_for_each_dev(void *data, int (*fn)(struct device *dev,
+								void *data));
+int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng);
+void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng);
+int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng);
+void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng);
+int esoc_drv_register(struct esoc_drv *driver);
+void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data);
+void *esoc_get_drv_data(struct esoc_clink *esoc_clink);
+/* ssr operations */
+int esoc_clink_register_ssr(struct esoc_clink *esoc_clink);
+int esoc_clink_request_ssr(struct esoc_clink *esoc_clink);
+void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink);
+/* client notification */
+#ifdef CONFIG_ESOC_CLIENT
+void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt);
+#else
+static inline void notify_esoc_clients(struct esoc_clink *esoc_clink,
+							unsigned long evt)
+{
+}
+#endif
+bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink);
+bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink);
+#endif
diff --git a/drivers/esoc/esoc_bus.c b/drivers/esoc/esoc_bus.c
new file mode 100644
index 0000000..4807e2b
--- /dev/null
+++ b/drivers/esoc/esoc_bus.c
@@ -0,0 +1,386 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/idr.h>
+#include <linux/slab.h>
+#include "esoc.h"
+
+static DEFINE_IDA(esoc_ida);
+
+/* SYSFS */
+static ssize_t
+esoc_name_show(struct device *dev, struct device_attribute *attr,
+							char *buf)
+{
+	return snprintf(buf, ESOC_NAME_LEN, "%s", to_esoc_clink(dev)->name);
+}
+
+static ssize_t
+esoc_link_show(struct device *dev, struct device_attribute *attr,
+							char *buf)
+{
+	return snprintf(buf, ESOC_LINK_LEN, "%s",
+				to_esoc_clink(dev)->link_name);
+}
+
+static struct device_attribute esoc_clink_attrs[] = {
+
+	__ATTR_RO(esoc_name),
+	__ATTR_RO(esoc_link),
+	__ATTR_NULL,
+};
+
+static int esoc_bus_match(struct device *dev, struct device_driver *drv)
+{
+	int i = 0, match = 1;
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+	struct esoc_drv *esoc_drv = to_esoc_drv(drv);
+	int entries = esoc_drv->compat_entries;
+	struct esoc_compat *table = esoc_drv->compat_table;
+
+	for (i = 0; i < entries; i++) {
+		if (strcasecmp(esoc_clink->name, table[i].name) == 0)
+			return match;
+	}
+	return 0;
+}
+
+static int esoc_bus_probe(struct device *dev)
+{
+	int ret;
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+	struct esoc_drv *esoc_drv = to_esoc_drv(dev->driver);
+
+	ret = esoc_drv->probe(esoc_clink, esoc_drv);
+	if (ret) {
+		pr_err("failed to probe %s dev\n", esoc_clink->name);
+		return ret;
+	}
+	return 0;
+}
+
+struct bus_type esoc_bus_type = {
+	.name = "esoc",
+	.match = esoc_bus_match,
+	.dev_attrs = esoc_clink_attrs,
+};
+EXPORT_SYMBOL(esoc_bus_type);
+
+struct device esoc_bus = {
+	.init_name = "esoc-bus"
+};
+EXPORT_SYMBOL(esoc_bus);
+
+/* bus accessor */
+static void esoc_clink_release(struct device *dev)
+{
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+
+	ida_simple_remove(&esoc_ida, esoc_clink->id);
+	kfree(esoc_clink);
+}
+
+static int esoc_clink_match_id(struct device *dev, void *id)
+{
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+	int *esoc_id = (int *)id;
+
+	if (esoc_clink->id == *esoc_id) {
+		if (!try_module_get(esoc_clink->owner))
+			return 0;
+		return 1;
+	}
+	return 0;
+}
+
+static int esoc_clink_match_node(struct device *dev, void *id)
+{
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+	struct device_node *node = id;
+
+	if (esoc_clink->np == node) {
+		if (!try_module_get(esoc_clink->owner))
+			return 0;
+		return 1;
+	}
+	return 0;
+}
+
+void esoc_for_each_dev(void *data, int (*fn)(struct device *dev, void *))
+{
+	int ret;
+
+	ret = bus_for_each_dev(&esoc_bus_type, NULL, data, fn);
+}
+EXPORT_SYMBOL(esoc_for_each_dev);
+
+struct esoc_clink *get_esoc_clink(int id)
+{
+	struct esoc_clink *esoc_clink;
+	struct device *dev;
+
+	dev = bus_find_device(&esoc_bus_type, NULL, &id, esoc_clink_match_id);
+	if (IS_ERR(dev))
+		return NULL;
+	esoc_clink = to_esoc_clink(dev);
+	return esoc_clink;
+}
+EXPORT_SYMBOL(get_esoc_clink);
+
+struct esoc_clink *get_esoc_clink_by_node(struct device_node *node)
+{
+	struct esoc_clink *esoc_clink;
+	struct device *dev;
+
+	dev = bus_find_device(&esoc_bus_type, NULL, node,
+						esoc_clink_match_node);
+	if (IS_ERR(dev))
+		return NULL;
+	esoc_clink = to_esoc_clink(dev);
+	return esoc_clink;
+}
+
+void put_esoc_clink(struct esoc_clink *esoc_clink)
+{
+	module_put(esoc_clink->owner);
+}
+EXPORT_SYMBOL(put_esoc_clink);
+
+bool esoc_req_eng_enabled(struct esoc_clink *esoc_clink)
+{
+	return !esoc_clink->req_eng ? false : true;
+}
+EXPORT_SYMBOL(esoc_req_eng_enabled);
+
+bool esoc_cmd_eng_enabled(struct esoc_clink *esoc_clink)
+{
+	return !esoc_clink->cmd_eng ? false : true;
+}
+EXPORT_SYMBOL(esoc_cmd_eng_enabled);
+/* ssr operations */
+int esoc_clink_register_ssr(struct esoc_clink *esoc_clink)
+{
+	int ret;
+	int len;
+	char *subsys_name;
+
+	len = strlen("esoc") + sizeof(esoc_clink->id);
+	subsys_name = kzalloc(len, GFP_KERNEL);
+	if (IS_ERR(subsys_name))
+		return PTR_ERR(subsys_name);
+	snprintf(subsys_name, len, "esoc%d", esoc_clink->id);
+	esoc_clink->subsys.name = subsys_name;
+	esoc_clink->dev.of_node = esoc_clink->np;
+	esoc_clink->subsys.dev = &esoc_clink->dev;
+	esoc_clink->subsys_dev = subsys_register(&esoc_clink->subsys);
+	if (IS_ERR(esoc_clink->subsys_dev)) {
+		dev_err(&esoc_clink->dev, "failed to register ssr node\n");
+		ret = PTR_ERR(esoc_clink->subsys_dev);
+		goto subsys_err;
+	}
+	return 0;
+subsys_err:
+	kfree(subsys_name);
+	return ret;
+}
+EXPORT_SYMBOL(esoc_clink_register_ssr);
+
+void esoc_clink_unregister_ssr(struct esoc_clink *esoc_clink)
+{
+	subsys_unregister(esoc_clink->subsys_dev);
+	kfree(esoc_clink->subsys.name);
+}
+EXPORT_SYMBOL(esoc_clink_unregister_ssr);
+
+int esoc_clink_request_ssr(struct esoc_clink *esoc_clink)
+{
+	subsystem_restart_dev(esoc_clink->subsys_dev);
+	return 0;
+}
+EXPORT_SYMBOL(esoc_clink_request_ssr);
+
+/* bus operations */
+void esoc_clink_evt_notify(enum esoc_evt evt, struct esoc_clink *esoc_clink)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&esoc_clink->notify_lock, flags);
+	notify_esoc_clients(esoc_clink, evt);
+	if (esoc_clink->req_eng && esoc_clink->req_eng->handle_clink_evt)
+		esoc_clink->req_eng->handle_clink_evt(evt, esoc_clink->req_eng);
+	if (esoc_clink->cmd_eng && esoc_clink->cmd_eng->handle_clink_evt)
+		esoc_clink->cmd_eng->handle_clink_evt(evt, esoc_clink->cmd_eng);
+	spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
+}
+EXPORT_SYMBOL(esoc_clink_evt_notify);
+
+void *get_esoc_clink_data(struct esoc_clink *esoc)
+{
+	return esoc->clink_data;
+}
+EXPORT_SYMBOL(get_esoc_clink_data);
+
+void set_esoc_clink_data(struct esoc_clink *esoc, void *data)
+{
+	esoc->clink_data = data;
+}
+EXPORT_SYMBOL(set_esoc_clink_data);
+
+void esoc_clink_queue_request(enum esoc_req req, struct esoc_clink *esoc_clink)
+{
+	unsigned long flags;
+	struct esoc_eng *req_eng;
+
+	spin_lock_irqsave(&esoc_clink->notify_lock, flags);
+	if (esoc_clink->req_eng != NULL) {
+		req_eng = esoc_clink->req_eng;
+		req_eng->handle_clink_req(req, req_eng);
+	}
+	spin_unlock_irqrestore(&esoc_clink->notify_lock, flags);
+}
+EXPORT_SYMBOL(esoc_clink_queue_request);
+
+void esoc_set_drv_data(struct esoc_clink *esoc_clink, void *data)
+{
+	dev_set_drvdata(&esoc_clink->dev, data);
+}
+EXPORT_SYMBOL(esoc_set_drv_data);
+
+void *esoc_get_drv_data(struct esoc_clink *esoc_clink)
+{
+	return dev_get_drvdata(&esoc_clink->dev);
+}
+EXPORT_SYMBOL(esoc_get_drv_data);
+
+/* bus registration functions */
+void esoc_clink_unregister(struct esoc_clink *esoc_clink)
+{
+	if (get_device(&esoc_clink->dev) != NULL) {
+		device_unregister(&esoc_clink->dev);
+		put_device(&esoc_clink->dev);
+	}
+}
+EXPORT_SYMBOL(esoc_clink_unregister);
+
+int esoc_clink_register(struct esoc_clink *esoc_clink)
+{
+	int id, err;
+	struct device *dev;
+
+	if (!esoc_clink->name || !esoc_clink->link_name ||
+					!esoc_clink->clink_ops) {
+		dev_err(esoc_clink->parent, "invalid esoc arguments\n");
+		return -EINVAL;
+	}
+	id = ida_simple_get(&esoc_ida, 0, ESOC_DEV_MAX, GFP_KERNEL);
+	if (id < 0) {
+		err = id;
+		goto exit_ida;
+	}
+	esoc_clink->id = id;
+	dev = &esoc_clink->dev;
+	dev->bus = &esoc_bus_type;
+	dev->release = esoc_clink_release;
+	if (!esoc_clink->parent)
+		dev->parent = &esoc_bus;
+	else
+		dev->parent = esoc_clink->parent;
+	dev_set_name(dev, "esoc%d", id);
+	err = device_register(dev);
+	if (err) {
+		dev_err(esoc_clink->parent, "esoc device register failed\n");
+		goto exit_ida;
+	}
+	spin_lock_init(&esoc_clink->notify_lock);
+	return 0;
+exit_ida:
+	ida_simple_remove(&esoc_ida, id);
+	pr_err("unable to register %s, err = %d\n", esoc_clink->name, err);
+	return err;
+}
+EXPORT_SYMBOL(esoc_clink_register);
+
+int esoc_clink_register_req_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng)
+{
+	if (esoc_clink->req_eng)
+		return -EBUSY;
+	if (!eng->handle_clink_req)
+		return -EINVAL;
+	esoc_clink->req_eng = eng;
+	eng->esoc_clink = esoc_clink;
+	esoc_clink_evt_notify(ESOC_REQ_ENG_ON, esoc_clink);
+	return 0;
+}
+EXPORT_SYMBOL(esoc_clink_register_req_eng);
+
+int esoc_clink_register_cmd_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng)
+{
+	if (esoc_clink->cmd_eng)
+		return -EBUSY;
+	esoc_clink->cmd_eng = eng;
+	eng->esoc_clink = esoc_clink;
+	esoc_clink_evt_notify(ESOC_CMD_ENG_ON, esoc_clink);
+	return 0;
+}
+EXPORT_SYMBOL(esoc_clink_register_cmd_eng);
+
+void esoc_clink_unregister_req_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng)
+{
+	esoc_clink->req_eng = NULL;
+	esoc_clink_evt_notify(ESOC_REQ_ENG_OFF, esoc_clink);
+}
+EXPORT_SYMBOL(esoc_clink_unregister_req_eng);
+
+void esoc_clink_unregister_cmd_eng(struct esoc_clink *esoc_clink,
+						struct esoc_eng *eng)
+{
+	esoc_clink->cmd_eng = NULL;
+	esoc_clink_evt_notify(ESOC_CMD_ENG_OFF, esoc_clink);
+}
+EXPORT_SYMBOL(esoc_clink_unregister_cmd_eng);
+
+int esoc_drv_register(struct esoc_drv *driver)
+{
+	int ret;
+
+	driver->driver.bus = &esoc_bus_type;
+	driver->driver.probe = esoc_bus_probe;
+	ret = driver_register(&driver->driver);
+	if (ret)
+		return ret;
+	return 0;
+}
+EXPORT_SYMBOL(esoc_drv_register);
+
+static int __init esoc_init(void)
+{
+	int ret;
+
+	ret = device_register(&esoc_bus);
+	if (ret) {
+		pr_err("esoc bus device register fail\n");
+		return ret;
+	}
+	ret = bus_register(&esoc_bus_type);
+	if (ret) {
+		pr_err("esoc bus register fail\n");
+		return ret;
+	}
+	pr_debug("esoc bus registration done\n");
+	return 0;
+}
+
+subsys_initcall(esoc_init);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/esoc/esoc_client.c b/drivers/esoc/esoc_client.c
new file mode 100644
index 0000000..5b194e31
--- /dev/null
+++ b/drivers/esoc/esoc_client.c
@@ -0,0 +1,132 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/esoc_client.h>
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include "esoc.h"
+
+static DEFINE_SPINLOCK(notify_lock);
+static ATOMIC_NOTIFIER_HEAD(client_notify);
+
+static void devm_esoc_desc_release(struct device *dev, void *res)
+{
+	struct esoc_desc *esoc_desc = res;
+
+	kfree(esoc_desc->name);
+	kfree(esoc_desc->link);
+	put_esoc_clink(esoc_desc->priv);
+}
+
+static int devm_esoc_desc_match(struct device *dev, void *res, void *data)
+{
+	struct esoc_desc *esoc_desc = res;
+	return esoc_desc == data;
+}
+
+struct esoc_desc *devm_register_esoc_client(struct device *dev,
+							const char *name)
+{
+	int ret, index;
+	const char *client_desc;
+	char *esoc_prop;
+	const __be32 *parp;
+	struct device_node *esoc_node;
+	struct device_node *np = dev->of_node;
+	struct esoc_clink *esoc_clink;
+	struct esoc_desc *desc;
+	char *esoc_name, *esoc_link;
+
+	for (index = 0;; index++) {
+		esoc_prop = kasprintf(GFP_KERNEL, "esoc-%d", index);
+		parp = of_get_property(np, esoc_prop, NULL);
+		if (parp == NULL) {
+			dev_err(dev, "esoc device not present\n");
+			kfree(esoc_prop);
+			return NULL;
+		}
+		ret = of_property_read_string_index(np, "esoc-names", index,
+								&client_desc);
+		if (ret) {
+			dev_err(dev, "cannot find matching string\n");
+			kfree(esoc_prop);
+			return NULL;
+		}
+		if (strcmp(client_desc, name)) {
+			kfree(esoc_prop);
+			continue;
+		}
+		kfree(esoc_prop);
+		esoc_node = of_find_node_by_phandle(be32_to_cpup(parp));
+		esoc_clink = get_esoc_clink_by_node(esoc_node);
+		if (IS_ERR_OR_NULL(esoc_clink)) {
+			dev_err(dev, "matching esoc clink not present\n");
+			return ERR_PTR(-EPROBE_DEFER);
+		}
+		esoc_name = kasprintf(GFP_KERNEL, "esoc%d",
+							esoc_clink->id);
+		if (IS_ERR_OR_NULL(esoc_name)) {
+			dev_err(dev, "unable to allocate esoc name\n");
+			return ERR_PTR(-ENOMEM);
+		}
+		esoc_link = kasprintf(GFP_KERNEL, "%s", esoc_clink->link_name);
+		if (IS_ERR_OR_NULL(esoc_link)) {
+			dev_err(dev, "unable to allocate esoc link name\n");
+			kfree(esoc_name);
+			return ERR_PTR(-ENOMEM);
+		}
+		desc = devres_alloc(devm_esoc_desc_release,
+						sizeof(*desc), GFP_KERNEL);
+		if (IS_ERR_OR_NULL(desc)) {
+			kfree(esoc_name);
+			kfree(esoc_link);
+			dev_err(dev, "unable to allocate esoc descriptor\n");
+			return ERR_PTR(-ENOMEM);
+		}
+		desc->name = esoc_name;
+		desc->link = esoc_link;
+		desc->priv = esoc_clink;
+		devres_add(dev, desc);
+		return desc;
+	}
+	return NULL;
+}
+EXPORT_SYMBOL(devm_register_esoc_client);
+
+void devm_unregister_esoc_client(struct device *dev,
+					struct esoc_desc *esoc_desc)
+{
+	int ret;
+
+	ret = devres_release(dev, devm_esoc_desc_release,
+				devm_esoc_desc_match, esoc_desc);
+	WARN_ON(ret);
+}
+EXPORT_SYMBOL(devm_unregister_esoc_client);
+
+int esoc_register_client_notifier(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&client_notify, nb);
+}
+EXPORT_SYMBOL(esoc_register_client_notifier);
+
+void notify_esoc_clients(struct esoc_clink *esoc_clink, unsigned long evt)
+{
+	unsigned int id;
+	unsigned long flags;
+
+	spin_lock_irqsave(&notify_lock, flags);
+	id = esoc_clink->id;
+	atomic_notifier_call_chain(&client_notify, evt, &id);
+	spin_unlock_irqrestore(&notify_lock, flags);
+}
+EXPORT_SYMBOL(notify_esoc_clients);
diff --git a/drivers/esoc/esoc_dev.c b/drivers/esoc/esoc_dev.c
new file mode 100644
index 0000000..17a30b8
--- /dev/null
+++ b/drivers/esoc/esoc_dev.c
@@ -0,0 +1,392 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kfifo.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/wait.h>
+#include "esoc.h"
+
+/**
+ * struct esoc_udev: Userspace char interface
+ * @dev: interface device.
+ * @req_fifio: fifo for clink requests.
+ * @req_wait: signal availability of request from clink
+ * @req_fifo_lock: serialize access to req fifo
+ * @evt_fito: fifo for clink events
+ * @evt_wait: signal availability of clink event
+ * @evt_fifo_lock: serialize access to event fifo
+ * @list: entry in esoc dev list.
+ * @clink: reference to contorl link
+ */
+struct esoc_udev {
+	struct device *dev;
+	struct kfifo req_fifo;
+	wait_queue_head_t req_wait;
+	spinlock_t req_fifo_lock;
+	struct kfifo evt_fifo;
+	wait_queue_head_t evt_wait;
+	spinlock_t evt_fifo_lock;
+	struct list_head list;
+	struct esoc_clink *clink;
+};
+
+/**
+ * struct esoc_uhandle: Userspace handle of esoc
+ * @esoc_clink: esoc control link.
+ * @eng: esoc engine for commands/ requests.
+ * @esoc_udev: user interface device.
+ * @req_eng_reg: indicates if engine is registered as request eng
+ * @cmd_eng_reg: indicates if engine is registered as cmd eng
+ */
+struct esoc_uhandle {
+	struct esoc_clink *esoc_clink;
+	struct esoc_eng eng;
+	struct esoc_udev *esoc_udev;
+	bool req_eng_reg;
+	bool cmd_eng_reg;
+};
+
+#define ESOC_MAX_MINOR	256
+#define ESOC_MAX_REQ	8
+#define ESOC_MAX_EVT	4
+
+static LIST_HEAD(esoc_udev_list);
+static DEFINE_SPINLOCK(esoc_udev_list_lock);
+struct class *esoc_class;
+static int esoc_major;
+
+static struct esoc_udev *get_free_esoc_udev(struct esoc_clink *esoc_clink)
+{
+	struct esoc_udev *esoc_udev;
+	int err;
+
+	if (esoc_clink->id > ESOC_MAX_MINOR) {
+		pr_err("too many esoc devices\n");
+		return ERR_PTR(-ENODEV);
+	}
+	esoc_udev = kzalloc(sizeof(*esoc_udev), GFP_KERNEL);
+	if (!esoc_udev)
+		return ERR_PTR(-ENOMEM);
+	err = kfifo_alloc(&esoc_udev->req_fifo, (sizeof(u32)) * ESOC_MAX_REQ,
+								GFP_KERNEL);
+	if (err) {
+		pr_err("unable to allocate request fifo for %s\n",
+							esoc_clink->name);
+		goto req_fifo_fail;
+	}
+	err = kfifo_alloc(&esoc_udev->evt_fifo, (sizeof(u32)) * ESOC_MAX_EVT,
+								GFP_KERNEL);
+	if (err) {
+		pr_err("unable to allocate evt fifo for %s\n",
+							esoc_clink->name);
+		goto evt_fifo_fail;
+	}
+	init_waitqueue_head(&esoc_udev->req_wait);
+	init_waitqueue_head(&esoc_udev->evt_wait);
+	spin_lock_init(&esoc_udev->req_fifo_lock);
+	spin_lock_init(&esoc_udev->evt_fifo_lock);
+	esoc_udev->clink = esoc_clink;
+	spin_lock(&esoc_udev_list_lock);
+	list_add_tail(&esoc_udev->list, &esoc_udev_list);
+	spin_unlock(&esoc_udev_list_lock);
+	return esoc_udev;
+evt_fifo_fail:
+	kfifo_free(&esoc_udev->req_fifo);
+req_fifo_fail:
+	kfree(esoc_udev);
+	return ERR_PTR(-ENODEV);
+}
+
+static void return_esoc_udev(struct esoc_udev *esoc_udev)
+{
+	spin_lock(&esoc_udev_list_lock);
+	list_del(&esoc_udev->list);
+	spin_unlock(&esoc_udev_list_lock);
+	kfifo_free(&esoc_udev->req_fifo);
+	kfifo_free(&esoc_udev->evt_fifo);
+	kfree(esoc_udev);
+}
+
+static struct esoc_udev *esoc_udev_get_by_minor(unsigned int index)
+{
+	struct esoc_udev *esoc_udev;
+
+	spin_lock(&esoc_udev_list_lock);
+	list_for_each_entry(esoc_udev, &esoc_udev_list, list) {
+		if (esoc_udev->clink->id == index)
+			goto found;
+	}
+	esoc_udev = NULL;
+found:
+	spin_unlock(&esoc_udev_list_lock);
+	return esoc_udev;
+}
+
+void esoc_udev_handle_clink_req(enum esoc_req req, struct esoc_eng *eng)
+{
+	int err;
+	u32 clink_req;
+	struct esoc_clink *esoc_clink = eng->esoc_clink;
+	struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
+
+	if (!esoc_udev)
+		return;
+	clink_req = (u32)req;
+	err = kfifo_in_spinlocked(&esoc_udev->req_fifo, &clink_req,
+						sizeof(clink_req),
+						&esoc_udev->req_fifo_lock);
+	if (err != sizeof(clink_req)) {
+		pr_err("unable to queue request for %s\n", esoc_clink->name);
+		return;
+	}
+	wake_up_interruptible(&esoc_udev->req_wait);
+}
+
+void esoc_udev_handle_clink_evt(enum esoc_evt evt, struct esoc_eng *eng)
+{
+	int err;
+	u32 clink_evt;
+	struct esoc_clink *esoc_clink = eng->esoc_clink;
+	struct esoc_udev *esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
+
+	if (!esoc_udev)
+		return;
+	clink_evt = (u32)evt;
+	err = kfifo_in_spinlocked(&esoc_udev->evt_fifo, &clink_evt,
+						sizeof(clink_evt),
+						&esoc_udev->evt_fifo_lock);
+	if (err != sizeof(clink_evt)) {
+		pr_err("unable to queue event for %s\n", esoc_clink->name);
+		return;
+	}
+	wake_up_interruptible(&esoc_udev->evt_wait);
+}
+
+static long esoc_dev_ioctl(struct file *file, unsigned int cmd,
+						unsigned long arg)
+{
+	int err;
+	u32 esoc_cmd, status, req, evt;
+	struct esoc_uhandle *uhandle = file->private_data;
+	struct esoc_udev *esoc_udev = uhandle->esoc_udev;
+	struct esoc_clink *esoc_clink = uhandle->esoc_clink;
+	const struct esoc_clink_ops * const clink_ops = esoc_clink->clink_ops;
+	void __user *uarg = (void __user *)arg;
+
+	switch (cmd) {
+	case ESOC_REG_REQ_ENG:
+		err = esoc_clink_register_req_eng(esoc_clink, &uhandle->eng);
+		if (err)
+			return err;
+		uhandle->req_eng_reg = true;
+		break;
+	case ESOC_REG_CMD_ENG:
+		err = esoc_clink_register_cmd_eng(esoc_clink, &uhandle->eng);
+		if (err)
+			return err;
+		uhandle->cmd_eng_reg = true;
+		break;
+	case ESOC_CMD_EXE:
+		if (esoc_clink->cmd_eng != &uhandle->eng)
+			return -EACCES;
+		get_user(esoc_cmd, (u32 __user *)arg);
+		return clink_ops->cmd_exe(esoc_cmd, esoc_clink);
+	case ESOC_WAIT_FOR_REQ:
+		if (esoc_clink->req_eng != &uhandle->eng)
+			return -EACCES;
+		err = wait_event_interruptible(esoc_udev->req_wait,
+					!kfifo_is_empty(&esoc_udev->req_fifo));
+		if (!err) {
+			err = kfifo_out_spinlocked(&esoc_udev->req_fifo, &req,
+								sizeof(req),
+						&esoc_udev->req_fifo_lock);
+			if (err != sizeof(req)) {
+				pr_err("read from clink %s req q failed\n",
+							esoc_clink->name);
+				return -EIO;
+			}
+			put_user(req, (unsigned long __user *)uarg);
+
+		}
+		return err;
+	case ESOC_NOTIFY:
+		get_user(esoc_cmd, (u32 __user *)arg);
+		clink_ops->notify(esoc_cmd, esoc_clink);
+		break;
+	case ESOC_GET_STATUS:
+		err = clink_ops->get_status(&status, esoc_clink);
+		if (err)
+			return err;
+		put_user(status, (unsigned long __user *)uarg);
+		break;
+	case ESOC_WAIT_FOR_CRASH:
+		err = wait_event_interruptible(esoc_udev->evt_wait,
+					!kfifo_is_empty(&esoc_udev->evt_fifo));
+		if (!err) {
+			err = kfifo_out_spinlocked(&esoc_udev->evt_fifo, &evt,
+								sizeof(evt),
+						&esoc_udev->evt_fifo_lock);
+			if (err != sizeof(evt)) {
+				pr_err("read from clink %s evt q failed\n",
+							esoc_clink->name);
+				return -EIO;
+			}
+			put_user(evt, (unsigned long __user *)uarg);
+		}
+		return err;
+	default:
+		return -EINVAL;
+	};
+	return 0;
+}
+
+static int esoc_dev_open(struct inode *inode, struct file *file)
+{
+	struct esoc_uhandle *uhandle;
+	struct esoc_udev *esoc_udev;
+	struct esoc_clink *esoc_clink;
+	struct esoc_eng *eng;
+	unsigned int minor = iminor(inode);
+
+	esoc_udev = esoc_udev_get_by_minor(minor);
+	esoc_clink = get_esoc_clink(esoc_udev->clink->id);
+
+	uhandle = kzalloc(sizeof(*uhandle), GFP_KERNEL);
+	if (!uhandle) {
+		put_esoc_clink(esoc_clink);
+		return -ENOMEM;
+	}
+	uhandle->esoc_udev = esoc_udev;
+	uhandle->esoc_clink = esoc_clink;
+	eng = &uhandle->eng;
+	eng->handle_clink_req = esoc_udev_handle_clink_req;
+	eng->handle_clink_evt = esoc_udev_handle_clink_evt;
+	file->private_data = uhandle;
+	return 0;
+}
+
+static int esoc_dev_release(struct inode *inode, struct file *file)
+{
+	struct esoc_clink *esoc_clink;
+	struct esoc_uhandle *uhandle = file->private_data;
+
+	esoc_clink = uhandle->esoc_clink;
+	if (uhandle->req_eng_reg)
+		esoc_clink_unregister_req_eng(esoc_clink, &uhandle->eng);
+	if (uhandle->cmd_eng_reg)
+		esoc_clink_unregister_cmd_eng(esoc_clink, &uhandle->eng);
+	uhandle->req_eng_reg = false;
+	uhandle->cmd_eng_reg = false;
+	put_esoc_clink(esoc_clink);
+	kfree(uhandle);
+	return 0;
+}
+static const struct file_operations esoc_dev_fops = {
+	.owner		= THIS_MODULE,
+	.open		= esoc_dev_open,
+	.unlocked_ioctl = esoc_dev_ioctl,
+	.release	= esoc_dev_release,
+};
+
+int esoc_clink_add_device(struct device *dev, void *dummy)
+{
+	struct esoc_udev *esoc_udev;
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+
+	esoc_udev = get_free_esoc_udev(esoc_clink);
+	if (IS_ERR(esoc_udev))
+		return PTR_ERR(esoc_udev);
+	esoc_udev->dev = device_create(esoc_class, &esoc_clink->dev,
+					MKDEV(esoc_major, esoc_clink->id),
+					esoc_clink, "esoc-%d", esoc_clink->id);
+	if (IS_ERR(esoc_udev->dev)) {
+		pr_err("failed to create user device\n");
+		goto dev_err;
+	}
+	return 0;
+dev_err:
+	return_esoc_udev(esoc_udev);
+	return -ENODEV;
+}
+
+int esoc_clink_del_device(struct device *dev, void *dummy)
+{
+	struct esoc_udev *esoc_udev;
+	struct esoc_clink *esoc_clink = to_esoc_clink(dev);
+
+	esoc_udev = esoc_udev_get_by_minor(esoc_clink->id);
+	if (!esoc_udev)
+		return 0;
+	return_esoc_udev(esoc_udev);
+	device_destroy(esoc_class, MKDEV(esoc_major, esoc_clink->id));
+	return_esoc_udev(esoc_udev);
+	return 0;
+}
+
+static int esoc_dev_notifier_call(struct notifier_block *nb,
+					unsigned long action,
+					void *data)
+{
+	struct device *dev = data;
+
+	switch (action) {
+	case BUS_NOTIFY_ADD_DEVICE:
+		return esoc_clink_add_device(dev, NULL);
+	case BUS_NOTIFY_DEL_DEVICE:
+		return esoc_clink_del_device(dev, NULL);
+	};
+	return 0;
+}
+
+static struct notifier_block esoc_dev_notifier = {
+	.notifier_call = esoc_dev_notifier_call,
+};
+
+int __init esoc_dev_init(void)
+{
+	int ret = 0;
+
+	esoc_class = class_create(THIS_MODULE, "esoc-dev");
+
+	if (IS_ERR(esoc_class)) {
+		pr_err("coudn't create class");
+		return PTR_ERR(esoc_class);
+	}
+	esoc_major = register_chrdev(0, "esoc", &esoc_dev_fops);
+	if (esoc_major < 0) {
+		pr_err("failed to allocate char dev\n");
+		ret = esoc_major;
+		goto class_unreg;
+	}
+	ret = bus_register_notifier(&esoc_bus_type, &esoc_dev_notifier);
+	if (ret)
+		goto chrdev_unreg;
+	esoc_for_each_dev(NULL, esoc_clink_add_device);
+	return ret;
+chrdev_unreg:
+	unregister_chrdev(esoc_major, "esoc");
+class_unreg:
+	class_destroy(esoc_class);
+	return 0;
+}
+
+void __exit esoc_dev_exit(void)
+{
+	bus_unregister_notifier(&esoc_bus_type, &esoc_dev_notifier);
+	class_destroy(esoc_class);
+	unregister_chrdev(esoc_major, "esoc-dev");
+}
+
+MODULE_LICENSE("GPL v2");
+module_init(esoc_dev_init);
+module_exit(esoc_dev_exit);
diff --git a/drivers/esoc/mdm-dbg.h b/drivers/esoc/mdm-dbg.h
new file mode 100644
index 0000000..ae31339
--- /dev/null
+++ b/drivers/esoc/mdm-dbg.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+static bool debug_init_done;
+
+#ifndef CONFIG_ESOC_MDM_DBG_ENG
+
+static inline bool dbg_check_cmd_mask(unsigned int cmd)
+{
+	return false;
+}
+
+static inline bool dbg_check_notify_mask(unsigned int notify)
+{
+	return false;
+}
+
+static inline int mdm_dbg_eng_init(struct esoc_drv *drv)
+{
+	return 0;
+}
+
+#else
+extern bool dbg_check_cmd_mask(unsigned int cmd);
+extern bool dbg_check_notify_mask(unsigned int notify);
+extern int mdm_dbg_eng_init(struct esoc_drv *drv);
+#endif
+
+static inline bool mdm_dbg_stall_cmd(unsigned int cmd)
+{
+	if (debug_init_done)
+		return dbg_check_cmd_mask(cmd);
+	else
+		return false;
+}
+
+static inline bool mdm_dbg_stall_notify(unsigned int notify)
+{
+	if (debug_init_done)
+		return dbg_check_notify_mask(notify);
+	else
+		return false;
+}
+
+
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index d074c22..1b08983 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -141,3 +141,5 @@
 obj-$(CONFIG_GPIO_ZYNQ)		+= gpio-zynq.o
 obj-$(CONFIG_GPIO_ZX)		+= gpio-zx.o
 obj-$(CONFIG_GPIO_LOONGSON1)	+= gpio-loongson1.o
+obj-$(CONFIG_MSM_SMP2P)		+= gpio-msm-smp2p.o
+obj-$(CONFIG_MSM_SMP2P_TEST)	+= gpio-msm-smp2p-test.o
diff --git a/drivers/gpio/gpio-msm-smp2p-test.c b/drivers/gpio/gpio-msm-smp2p-test.c
new file mode 100644
index 0000000..1067c4a
--- /dev/null
+++ b/drivers/gpio/gpio-msm-smp2p-test.c
@@ -0,0 +1,763 @@
+/* drivers/gpio/gpio-msm-smp2p-test.c
+ *
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/gpio.h>
+#include <linux/debugfs.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/bitmap.h>
+#include "../soc/qcom/smp2p_private.h"
+#include "../soc/qcom/smp2p_test_common.h"
+
+/* Interrupt callback data */
+struct gpio_info {
+	int gpio_base_id;
+	int irq_base_id;
+
+	bool initialized;
+	struct completion cb_completion;
+	int cb_count;
+	DECLARE_BITMAP(triggered_irqs, SMP2P_BITS_PER_ENTRY);
+};
+
+/* GPIO Inbound/Outbound callback info */
+struct gpio_inout {
+	struct gpio_info in;
+	struct gpio_info out;
+};
+
+static struct gpio_inout gpio_info[SMP2P_NUM_PROCS];
+
+/**
+ * Init/reset the callback data.
+ *
+ * @info: Pointer to callback data
+ */
+static void cb_data_reset(struct gpio_info *info)
+{
+	int n;
+
+	if (!info)
+		return;
+
+	if (!info->initialized) {
+		init_completion(&info->cb_completion);
+		info->initialized = true;
+	}
+	info->cb_count = 0;
+
+	for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n)
+		clear_bit(n,  info->triggered_irqs);
+
+	reinit_completion(&info->cb_completion);
+}
+
+static int smp2p_gpio_test_probe(struct platform_device *pdev)
+{
+	int id;
+	int cnt;
+	struct device_node *node = pdev->dev.of_node;
+	struct gpio_info *gpio_info_ptr = NULL;
+
+	/*
+	 * NOTE:  This does a string-lookup of the GPIO pin name and doesn't
+	 * actually directly link to the SMP2P GPIO driver since all
+	 * GPIO/Interrupt access must be through standard
+	 * Linux GPIO / Interrupt APIs.
+	 */
+	if (strcmp("qcom,smp2pgpio_test_smp2p_1_in", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].in;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_1_out", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_MODEM_PROC].out;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_2_in", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].in;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_2_out", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_AUDIO_PROC].out;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_3_in", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].in;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_3_out", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_SENSOR_PROC].out;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_4_in", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].in;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_4_out", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_WIRELESS_PROC].out;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_5_in", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].in;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_5_out", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_CDSP_PROC].out;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_7_in", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].in;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_7_out", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_TZ_PROC].out;
+	} else if (strcmp("qcom,smp2pgpio_test_smp2p_15_in", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
+	} else if (
+		strcmp("qcom,smp2pgpio_test_smp2p_15_out", node->name) == 0) {
+		gpio_info_ptr = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
+	} else {
+		pr_err("%s: unable to match device type '%s'\n",
+				__func__, node->name);
+		return -ENODEV;
+	}
+
+	/* retrieve the GPIO and interrupt ID's */
+	cnt = of_gpio_count(node);
+	if (cnt && gpio_info_ptr) {
+		/*
+		 * Instead of looping through all 32-bits, we can just get the
+		 * first pin to get the base IDs.  This saves on the verbosity
+		 * of the device tree nodes as well.
+		 */
+		id = of_get_gpio(node, 0);
+		if (id == -EPROBE_DEFER)
+			return id;
+		gpio_info_ptr->gpio_base_id = id;
+		gpio_info_ptr->irq_base_id = gpio_to_irq(id);
+	}
+	return 0;
+}
+
+/*
+ * NOTE:  Instead of match table and device driver, you may be able to just
+ * call of_find_compatible_node() in your init function.
+ */
+static const struct of_device_id msm_smp2p_match_table[] = {
+	/* modem */
+	{.compatible = "qcom,smp2pgpio_test_smp2p_1_out", },
+	{.compatible = "qcom,smp2pgpio_test_smp2p_1_in", },
+
+	/* audio (adsp) */
+	{.compatible = "qcom,smp2pgpio_test_smp2p_2_out", },
+	{.compatible = "qcom,smp2pgpio_test_smp2p_2_in", },
+
+	/* sensor */
+	{.compatible = "qcom,smp2pgpio_test_smp2p_3_out", },
+	{.compatible = "qcom,smp2pgpio_test_smp2p_3_in", },
+
+	/* wcnss */
+	{.compatible = "qcom,smp2pgpio_test_smp2p_4_out", },
+	{.compatible = "qcom,smp2pgpio_test_smp2p_4_in", },
+
+	/* CDSP */
+	{.compatible = "qcom,smp2pgpio_test_smp2p_5_out", },
+	{.compatible = "qcom,smp2pgpio_test_smp2p_5_in", },
+
+	/* TZ */
+	{.compatible = "qcom,smp2pgpio_test_smp2p_7_out", },
+	{.compatible = "qcom,smp2pgpio_test_smp2p_7_in", },
+
+	/* mock loopback */
+	{.compatible = "qcom,smp2pgpio_test_smp2p_15_out", },
+	{.compatible = "qcom,smp2pgpio_test_smp2p_15_in", },
+	{},
+};
+
+static struct platform_driver smp2p_gpio_driver = {
+	.probe = smp2p_gpio_test_probe,
+	.driver = {
+		.name = "smp2pgpio_test",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smp2p_match_table,
+	},
+};
+
+/**
+ * smp2p_ut_local_gpio_out - Verify outbound functionality.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_ut_local_gpio_out(struct seq_file *s)
+{
+	int failed = 0;
+	struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].out;
+	int ret;
+	int id;
+	struct msm_smp2p_remote_mock *mock;
+
+	seq_printf(s, "Running %s\n", __func__);
+	do {
+		/* initialize mock edge */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+
+		mock = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(mock, !=, NULL);
+
+		mock->rx_interrupt_count = 0;
+		memset(&mock->remote_item, 0,
+			sizeof(struct smp2p_smem_item));
+		smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
+			SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
+			0, 1);
+		strlcpy(mock->remote_item.entries[0].name, "smp2p",
+			SMP2P_MAX_ENTRY_NAME);
+		SMP2P_SET_ENT_VALID(
+			mock->remote_item.header.valid_total_ent, 1);
+		msm_smp2p_set_remote_mock_exists(true);
+		mock->tx_interrupt();
+
+		/* open GPIO entry */
+		smp2p_gpio_open_test_entry("smp2p",
+				SMP2P_REMOTE_MOCK_PROC, true);
+
+		/* verify set/get functions */
+		UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
+			int pin = cb_info->gpio_base_id + id;
+
+			mock->rx_interrupt_count = 0;
+			gpio_set_value(pin, 1);
+			UT_ASSERT_INT(1, ==, mock->rx_interrupt_count);
+			UT_ASSERT_INT(1, ==, gpio_get_value(pin));
+
+			gpio_set_value(pin, 0);
+			UT_ASSERT_INT(2, ==, mock->rx_interrupt_count);
+			UT_ASSERT_INT(0, ==, gpio_get_value(pin));
+		}
+		if (failed)
+			break;
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+
+	smp2p_gpio_open_test_entry("smp2p",
+			SMP2P_REMOTE_MOCK_PROC, false);
+}
+
+/**
+ * smp2p_gpio_irq - Interrupt handler for inbound entries.
+ *
+ * @irq:         Virtual IRQ being triggered
+ * @data:        Cookie data (struct gpio_info * in this case)
+ * @returns:     Number of bytes written
+ */
+static irqreturn_t smp2p_gpio_irq(int irq, void *data)
+{
+	struct gpio_info *gpio_ptr = (struct gpio_info *)data;
+	int offset;
+
+	if (!gpio_ptr) {
+		pr_err("%s: gpio_ptr is NULL for irq %d\n", __func__, irq);
+		return IRQ_HANDLED;
+	}
+
+	offset = irq - gpio_ptr->irq_base_id;
+	if (offset >= 0 &&  offset < SMP2P_BITS_PER_ENTRY)
+		set_bit(offset, gpio_ptr->triggered_irqs);
+	else
+		pr_err("%s: invalid irq offset base %d; irq %d\n",
+			__func__, gpio_ptr->irq_base_id, irq);
+
+	++gpio_ptr->cb_count;
+	complete(&gpio_ptr->cb_completion);
+	return IRQ_HANDLED;
+}
+
+/**
+ * smp2p_ut_local_gpio_in - Verify inbound functionality.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_ut_local_gpio_in(struct seq_file *s)
+{
+	int failed = 0;
+	struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
+	int id;
+	int ret;
+	int virq;
+	struct msm_smp2p_remote_mock *mock;
+
+	seq_printf(s, "Running %s\n", __func__);
+
+	cb_data_reset(cb_info);
+	do {
+		/* initialize mock edge */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+
+		mock = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(mock, !=, NULL);
+
+		mock->rx_interrupt_count = 0;
+		memset(&mock->remote_item, 0,
+			sizeof(struct smp2p_smem_item));
+		smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
+			SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
+			0, 1);
+		strlcpy(mock->remote_item.entries[0].name, "smp2p",
+			SMP2P_MAX_ENTRY_NAME);
+		SMP2P_SET_ENT_VALID(
+			mock->remote_item.header.valid_total_ent, 1);
+		msm_smp2p_set_remote_mock_exists(true);
+		mock->tx_interrupt();
+
+		smp2p_gpio_open_test_entry("smp2p",
+				SMP2P_REMOTE_MOCK_PROC, true);
+
+		/* verify set/get functions locally */
+		UT_ASSERT_INT(0, <, cb_info->gpio_base_id);
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
+			int pin;
+			int current_value;
+
+			/* verify pin value cannot be set */
+			pin = cb_info->gpio_base_id + id;
+			current_value = gpio_get_value(pin);
+
+			gpio_set_value(pin, 0);
+			UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
+			gpio_set_value(pin, 1);
+			UT_ASSERT_INT(current_value, ==, gpio_get_value(pin));
+
+			/* verify no interrupts */
+			UT_ASSERT_INT(0, ==, cb_info->cb_count);
+		}
+		if (failed)
+			break;
+
+		/* register for interrupts */
+		UT_ASSERT_INT(0, <, cb_info->irq_base_id);
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
+			virq = cb_info->irq_base_id + id;
+			UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
+			ret = request_irq(virq,
+					smp2p_gpio_irq,	IRQF_TRIGGER_RISING,
+					"smp2p_test", cb_info);
+			UT_ASSERT_INT(0, ==, ret);
+		}
+		if (failed)
+			break;
+
+		/* verify both rising and falling edge interrupts */
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
+			virq = cb_info->irq_base_id + id;
+			irq_set_irq_type(virq, IRQ_TYPE_EDGE_BOTH);
+			cb_data_reset(cb_info);
+
+			/* verify rising-edge interrupt */
+			mock->remote_item.entries[0].entry = 1 << id;
+			mock->tx_interrupt();
+			UT_ASSERT_INT(cb_info->cb_count, ==, 1);
+			UT_ASSERT_INT(0, <,
+				test_bit(id, cb_info->triggered_irqs));
+			test_bit(id, cb_info->triggered_irqs);
+
+			/* verify falling-edge interrupt */
+			mock->remote_item.entries[0].entry = 0;
+			mock->tx_interrupt();
+			UT_ASSERT_INT(cb_info->cb_count, ==, 2);
+			UT_ASSERT_INT(0, <,
+					test_bit(id, cb_info->triggered_irqs));
+		}
+		if (failed)
+			break;
+
+		/* verify rising-edge interrupts */
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
+			virq = cb_info->irq_base_id + id;
+			irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
+			cb_data_reset(cb_info);
+
+			/* verify only rising-edge interrupt is triggered */
+			mock->remote_item.entries[0].entry = 1 << id;
+			mock->tx_interrupt();
+			UT_ASSERT_INT(cb_info->cb_count, ==, 1);
+			UT_ASSERT_INT(0, <,
+				test_bit(id, cb_info->triggered_irqs));
+			test_bit(id, cb_info->triggered_irqs);
+
+			mock->remote_item.entries[0].entry = 0;
+			mock->tx_interrupt();
+			UT_ASSERT_INT(cb_info->cb_count, ==, 1);
+			UT_ASSERT_INT(0, <,
+				test_bit(id, cb_info->triggered_irqs));
+		}
+		if (failed)
+			break;
+
+		/* verify falling-edge interrupts */
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
+			virq = cb_info->irq_base_id + id;
+			irq_set_irq_type(virq, IRQ_TYPE_EDGE_FALLING);
+			cb_data_reset(cb_info);
+
+			/* verify only rising-edge interrupt is triggered */
+			mock->remote_item.entries[0].entry = 1 << id;
+			mock->tx_interrupt();
+			UT_ASSERT_INT(cb_info->cb_count, ==, 0);
+			UT_ASSERT_INT(0, ==,
+				test_bit(id, cb_info->triggered_irqs));
+
+			mock->remote_item.entries[0].entry = 0;
+			mock->tx_interrupt();
+			UT_ASSERT_INT(cb_info->cb_count, ==, 1);
+			UT_ASSERT_INT(0, <,
+				test_bit(id, cb_info->triggered_irqs));
+		}
+		if (failed)
+			break;
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+
+	/* unregister for interrupts */
+	if (cb_info->irq_base_id) {
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
+			free_irq(cb_info->irq_base_id + id, cb_info);
+	}
+
+	smp2p_gpio_open_test_entry("smp2p",
+			SMP2P_REMOTE_MOCK_PROC, false);
+}
+
+/**
+ * smp2p_ut_local_gpio_in_update_open - Verify combined open/update.
+ *
+ * @s:   pointer to output file
+ *
+ * If the remote side updates the SMP2P bits and sends before negotiation is
+ * complete, then the UPDATE event will have to be delayed until negotiation is
+ * complete.  This should result in both the OPEN and UPDATE events coming in
+ * right after each other and the behavior should be transparent to the clients
+ * of SMP2P GPIO.
+ */
+static void smp2p_ut_local_gpio_in_update_open(struct seq_file *s)
+{
+	int failed = 0;
+	struct gpio_info *cb_info = &gpio_info[SMP2P_REMOTE_MOCK_PROC].in;
+	int id;
+	int ret;
+	int virq;
+	struct msm_smp2p_remote_mock *mock;
+
+	seq_printf(s, "Running %s\n", __func__);
+
+	cb_data_reset(cb_info);
+	do {
+		/* initialize mock edge */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+
+		mock = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(mock, !=, NULL);
+
+		mock->rx_interrupt_count = 0;
+		memset(&mock->remote_item, 0,
+			sizeof(struct smp2p_smem_item));
+		smp2p_init_header((struct smp2p_smem *)&mock->remote_item,
+			SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
+			0, 1);
+		strlcpy(mock->remote_item.entries[0].name, "smp2p",
+			SMP2P_MAX_ENTRY_NAME);
+		SMP2P_SET_ENT_VALID(
+			mock->remote_item.header.valid_total_ent, 1);
+
+		/* register for interrupts */
+		smp2p_gpio_open_test_entry("smp2p",
+				SMP2P_REMOTE_MOCK_PROC, true);
+
+		UT_ASSERT_INT(0, <, cb_info->irq_base_id);
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
+			virq = cb_info->irq_base_id + id;
+			UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
+			ret = request_irq(virq,
+					smp2p_gpio_irq,	IRQ_TYPE_EDGE_BOTH,
+					"smp2p_test", cb_info);
+			UT_ASSERT_INT(0, ==, ret);
+		}
+		if (failed)
+			break;
+
+		/* update the state value and complete negotiation */
+		mock->remote_item.entries[0].entry = 0xDEADDEAD;
+		msm_smp2p_set_remote_mock_exists(true);
+		mock->tx_interrupt();
+
+		/* verify delayed state updates were processed */
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
+			virq = cb_info->irq_base_id + id;
+
+			UT_ASSERT_INT(cb_info->cb_count, >, 0);
+			if (0x1 & (0xDEADDEAD >> id)) {
+				/* rising edge should have been triggered */
+				if (!test_bit(id, cb_info->triggered_irqs)) {
+					seq_printf(s, "%s:%d bit %d clear, ",
+						__func__, __LINE__, id);
+					seq_puts(s, "expected set\n");
+					failed = 1;
+					break;
+				}
+			} else {
+				/* edge should not have been triggered */
+				if (test_bit(id, cb_info->triggered_irqs)) {
+					seq_printf(s, "%s:%d bit %d set, ",
+						__func__, __LINE__, id);
+					seq_puts(s, "expected clear\n");
+					failed = 1;
+					break;
+				}
+			}
+		}
+		if (failed)
+			break;
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+
+	/* unregister for interrupts */
+	if (cb_info->irq_base_id) {
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
+			free_irq(cb_info->irq_base_id + id, cb_info);
+	}
+
+	smp2p_gpio_open_test_entry("smp2p",
+			SMP2P_REMOTE_MOCK_PROC, false);
+}
+
+/**
+ * smp2p_gpio_write_bits - writes value to each GPIO pin specified in mask.
+ *
+ * @gpio: gpio test structure
+ * @mask: 1 = write gpio_value to this GPIO pin
+ * @gpio_value: value to write to GPIO pin
+ */
+static void smp2p_gpio_write_bits(struct gpio_info *gpio, uint32_t mask,
+	int gpio_value)
+{
+	int n;
+
+	for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
+		if (mask & 0x1)
+			gpio_set_value(gpio->gpio_base_id + n, gpio_value);
+		mask >>= 1;
+	}
+}
+
+static void smp2p_gpio_set_bits(struct gpio_info *gpio, uint32_t mask)
+{
+	smp2p_gpio_write_bits(gpio, mask, 1);
+}
+
+static void smp2p_gpio_clr_bits(struct gpio_info *gpio, uint32_t mask)
+{
+	smp2p_gpio_write_bits(gpio, mask, 0);
+}
+
+/**
+ * smp2p_gpio_get_value - reads entire 32-bits of GPIO
+ *
+ * @gpio: gpio structure
+ * @returns: 32 bit value of GPIO pins
+ */
+static uint32_t smp2p_gpio_get_value(struct gpio_info *gpio)
+{
+	int n;
+	uint32_t value = 0;
+
+	for (n = 0; n < SMP2P_BITS_PER_ENTRY; ++n) {
+		if (gpio_get_value(gpio->gpio_base_id + n))
+			value |= 1 << n;
+	}
+	return value;
+}
+
+/**
+ * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
+ *
+ * @s:   pointer to output file
+ * @remote_pid:  Remote processor to test
+ * @name:        Name of the test for reporting
+ *
+ * This test verifies inbound/outbound functionality for the remote processor.
+ */
+static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid,
+		const char *name)
+{
+	int failed = 0;
+	uint32_t request;
+	uint32_t response;
+	struct gpio_info *cb_in;
+	struct gpio_info *cb_out;
+	int id;
+	int ret;
+
+	seq_printf(s, "Running %s for '%s' remote pid %d\n",
+		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
+
+	cb_in = &gpio_info[remote_pid].in;
+	cb_out = &gpio_info[remote_pid].out;
+	cb_data_reset(cb_in);
+	cb_data_reset(cb_out);
+	do {
+		/* open test entries */
+		msm_smp2p_deinit_rmt_lpb_proc(remote_pid);
+		smp2p_gpio_open_test_entry("smp2p", remote_pid, true);
+
+		/* register for interrupts */
+		UT_ASSERT_INT(0, <, cb_in->gpio_base_id);
+		UT_ASSERT_INT(0, <, cb_in->irq_base_id);
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY && !failed; ++id) {
+			int virq = cb_in->irq_base_id + id;
+
+			UT_ASSERT_PTR(NULL, !=, irq_to_desc(virq));
+			ret = request_irq(virq,
+				smp2p_gpio_irq,
+				IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+				"smp2p_test", cb_in);
+			UT_ASSERT_INT(0, ==, ret);
+		}
+		if (failed)
+			break;
+
+		/* write echo of data value 0 */
+		UT_ASSERT_INT(0, <, cb_out->gpio_base_id);
+		request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE(request, 1);
+		SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
+		SMP2P_SET_RMT_DATA(request, 0x0);
+
+		smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
+		smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
+		smp2p_gpio_set_bits(cb_out, request);
+
+		UT_ASSERT_INT(cb_in->cb_count, ==, 0);
+		smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
+
+		/* verify response */
+		do {
+			/* wait for up to 32 changes */
+			if (wait_for_completion_timeout(
+					&cb_in->cb_completion, HZ / 2) == 0)
+				break;
+			reinit_completion(&cb_in->cb_completion);
+		} while (cb_in->cb_count < 32);
+		UT_ASSERT_INT(cb_in->cb_count, >, 0);
+		response = smp2p_gpio_get_value(cb_in);
+		SMP2P_SET_RMT_CMD_TYPE(request, 0);
+		UT_ASSERT_HEX(request, ==, response);
+
+		/* write echo of data value of all 1's */
+		request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE(request, 1);
+		SMP2P_SET_RMT_CMD(request, SMP2P_LB_CMD_ECHO);
+		SMP2P_SET_RMT_DATA(request, ~0);
+
+		smp2p_gpio_set_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
+		cb_data_reset(cb_in);
+		smp2p_gpio_clr_bits(cb_out, ~SMP2P_RMT_IGNORE_MASK);
+		smp2p_gpio_set_bits(cb_out, request);
+
+		UT_ASSERT_INT(cb_in->cb_count, ==, 0);
+		smp2p_gpio_clr_bits(cb_out, SMP2P_RMT_IGNORE_MASK);
+
+		/* verify response including 24 interrupts */
+		do {
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_in->cb_completion, HZ / 2),
+			   >, 0);
+			reinit_completion(&cb_in->cb_completion);
+		} while (cb_in->cb_count < 24);
+		response = smp2p_gpio_get_value(cb_in);
+		SMP2P_SET_RMT_CMD_TYPE(request, 0);
+		UT_ASSERT_HEX(request, ==, response);
+		UT_ASSERT_INT(24, ==, cb_in->cb_count);
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", name);
+		seq_puts(s, "\tFailed\n");
+	}
+
+	/* unregister for interrupts */
+	if (cb_in->irq_base_id) {
+		for (id = 0; id < SMP2P_BITS_PER_ENTRY; ++id)
+			free_irq(cb_in->irq_base_id + id, cb_in);
+	}
+
+	smp2p_gpio_open_test_entry("smp2p",	remote_pid, false);
+	msm_smp2p_init_rmt_lpb_proc(remote_pid);
+}
+
+/**
+ * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
+ *
+ * @s:   pointer to output file
+ *
+ * This test verifies inbound and outbound functionality for all
+ * configured remote processor.
+ */
+static void smp2p_ut_remote_inout(struct seq_file *s)
+{
+	struct smp2p_interrupt_config *int_cfg;
+	int pid;
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg) {
+		seq_puts(s, "Remote processor config unavailable\n");
+		return;
+	}
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
+		if (!int_cfg[pid].is_configured)
+			continue;
+
+		smp2p_ut_remote_inout_core(s, pid, __func__);
+	}
+}
+
+static int __init smp2p_debugfs_init(void)
+{
+	/* register GPIO pins */
+	(void)platform_driver_register(&smp2p_gpio_driver);
+
+	/*
+	 * Add Unit Test entries.
+	 *
+	 * The idea with unit tests is that you can run all of them
+	 * from ADB shell by doing:
+	 *  adb shell
+	 *  cat ut*
+	 *
+	 * And if particular tests fail, you can then repeatedly run the
+	 * failing tests as you debug and resolve the failing test.
+	 */
+	smp2p_debug_create("ut_local_gpio_out", smp2p_ut_local_gpio_out);
+	smp2p_debug_create("ut_local_gpio_in", smp2p_ut_local_gpio_in);
+	smp2p_debug_create("ut_local_gpio_in_update_open",
+		smp2p_ut_local_gpio_in_update_open);
+	smp2p_debug_create("ut_remote_gpio_inout", smp2p_ut_remote_inout);
+	return 0;
+}
+late_initcall(smp2p_debugfs_init);
diff --git a/drivers/gpio/gpio-msm-smp2p.c b/drivers/gpio/gpio-msm-smp2p.c
new file mode 100644
index 0000000..8f132fd
--- /dev/null
+++ b/drivers/gpio/gpio-msm-smp2p.c
@@ -0,0 +1,836 @@
+/* drivers/gpio/gpio-msm-smp2p.c
+ *
+ * Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/bitmap.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/ipc_logging.h>
+#include "../soc/qcom/smp2p_private_api.h"
+#include "../soc/qcom/smp2p_private.h"
+
+/* GPIO device - one per SMP2P entry. */
+struct smp2p_chip_dev {
+	struct list_head entry_list;
+	char name[SMP2P_MAX_ENTRY_NAME];
+	int remote_pid;
+	bool is_inbound;
+	bool is_open;
+	bool in_shadow;
+	uint32_t shadow_value;
+	struct work_struct shadow_work;
+	spinlock_t shadow_lock;
+	struct notifier_block out_notifier;
+	struct notifier_block in_notifier;
+	struct msm_smp2p_out *out_handle;
+
+	struct gpio_chip gpio;
+	struct irq_domain *irq_domain;
+	int irq_base;
+
+	spinlock_t irq_lock;
+	DECLARE_BITMAP(irq_enabled, SMP2P_BITS_PER_ENTRY);
+	DECLARE_BITMAP(irq_rising_edge, SMP2P_BITS_PER_ENTRY);
+	DECLARE_BITMAP(irq_falling_edge, SMP2P_BITS_PER_ENTRY);
+};
+
+static struct platform_driver smp2p_gpio_driver;
+static struct lock_class_key smp2p_gpio_lock_class;
+static struct irq_chip smp2p_gpio_irq_chip;
+static DEFINE_SPINLOCK(smp2p_entry_lock_lha1);
+static LIST_HEAD(smp2p_entry_list);
+
+/* Used for mapping edge to name for logging. */
+static const char * const edge_names[] = {
+	"-",
+	"0->1",
+	"1->0",
+	"-",
+};
+
+/* Used for mapping edge to value for logging. */
+static const char * const edge_name_rising[] = {
+	"-",
+	"0->1",
+};
+
+/* Used for mapping edge to value for logging. */
+static const char * const edge_name_falling[] = {
+	"-",
+	"1->0",
+};
+
+static int smp2p_gpio_to_irq(struct gpio_chip *cp,
+	unsigned int offset);
+
+/**
+ * smp2p_get_value - Retrieves GPIO value.
+ *
+ * @cp:      GPIO chip pointer
+ * @offset:  Pin offset
+ * @returns: >=0: value of GPIO Pin; < 0 for error
+ *
+ * Error codes:
+ *   -ENODEV - chip/entry invalid
+ *   -ENETDOWN - valid entry, but entry not yet created
+ */
+static int smp2p_get_value(struct gpio_chip *cp,
+	unsigned int offset)
+{
+	struct smp2p_chip_dev *chip;
+	int ret = 0;
+	uint32_t data;
+
+	if (!cp)
+		return -ENODEV;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+	if (!chip->is_open)
+		return -ENETDOWN;
+
+	if (chip->is_inbound)
+		ret = msm_smp2p_in_read(chip->remote_pid, chip->name, &data);
+	else
+		ret = msm_smp2p_out_read(chip->out_handle, &data);
+
+	if (!ret)
+		ret = (data & (1 << offset)) ? 1 : 0;
+
+	return ret;
+}
+
+/**
+ * smp2p_set_value - Sets GPIO value.
+ *
+ * @cp:     GPIO chip pointer
+ * @offset: Pin offset
+ * @value:  New value
+ */
+static void smp2p_set_value(struct gpio_chip *cp, unsigned int offset,
+			    int value)
+{
+	struct smp2p_chip_dev *chip;
+	uint32_t data_set;
+	uint32_t data_clear;
+	bool send_irq;
+	int ret;
+	unsigned long flags;
+
+	if (!cp)
+		return;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+
+	if (chip->is_inbound) {
+		SMP2P_INFO("%s: '%s':%d virq %d invalid operation\n",
+			__func__, chip->name, chip->remote_pid,
+			chip->irq_base + offset);
+		return;
+	}
+
+	if (value & SMP2P_GPIO_NO_INT) {
+		value &= ~SMP2P_GPIO_NO_INT;
+		send_irq = false;
+	} else {
+		send_irq = true;
+	}
+
+	if (value) {
+		data_set = 1 << offset;
+		data_clear = 0;
+	} else {
+		data_set = 0;
+		data_clear = 1 << offset;
+	}
+
+	spin_lock_irqsave(&chip->shadow_lock, flags);
+	if (!chip->is_open) {
+		chip->in_shadow = true;
+		chip->shadow_value &= ~data_clear;
+		chip->shadow_value |= data_set;
+		spin_unlock_irqrestore(&chip->shadow_lock, flags);
+		return;
+	}
+
+	if (chip->in_shadow) {
+		chip->in_shadow = false;
+		chip->shadow_value &= ~data_clear;
+		chip->shadow_value |= data_set;
+		ret = msm_smp2p_out_modify(chip->out_handle,
+				chip->shadow_value, 0x0, send_irq);
+		chip->shadow_value = 0x0;
+	} else {
+		ret = msm_smp2p_out_modify(chip->out_handle,
+				data_set, data_clear, send_irq);
+	}
+	spin_unlock_irqrestore(&chip->shadow_lock, flags);
+
+	if (ret)
+		SMP2P_GPIO("'%s':%d gpio %d set to %d failed (%d)\n",
+			chip->name, chip->remote_pid,
+			chip->gpio.base + offset, value, ret);
+	else
+		SMP2P_GPIO("'%s':%d gpio %d set to %d\n",
+			chip->name, chip->remote_pid,
+			chip->gpio.base + offset, value);
+}
+
+/**
+ * smp2p_direction_input - Sets GPIO direction to input.
+ *
+ * @cp:      GPIO chip pointer
+ * @offset:  Pin offset
+ * @returns: 0 for success; < 0 for failure
+ */
+static int smp2p_direction_input(struct gpio_chip *cp, unsigned int offset)
+{
+	struct smp2p_chip_dev *chip;
+
+	if (!cp)
+		return -ENODEV;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+	if (!chip->is_inbound)
+		return -EPERM;
+
+	return 0;
+}
+
+/**
+ * smp2p_direction_output - Sets GPIO direction to output.
+ *
+ * @cp:      GPIO chip pointer
+ * @offset:  Pin offset
+ * @value:   Direction
+ * @returns: 0 for success; < 0 for failure
+ */
+static int smp2p_direction_output(struct gpio_chip *cp,
+	unsigned int offset, int value)
+{
+	struct smp2p_chip_dev *chip;
+
+	if (!cp)
+		return -ENODEV;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+	if (chip->is_inbound)
+		return -EPERM;
+
+	return 0;
+}
+
+/**
+ * smp2p_gpio_to_irq - Convert GPIO pin to virtual IRQ pin.
+ *
+ * @cp:      GPIO chip pointer
+ * @offset:  Pin offset
+ * @returns: >0 for virtual irq value; < 0 for failure
+ */
+static int smp2p_gpio_to_irq(struct gpio_chip *cp, unsigned int offset)
+{
+	struct smp2p_chip_dev *chip;
+
+	chip = container_of(cp, struct smp2p_chip_dev, gpio);
+	if (!cp || chip->irq_base <= 0)
+		return -ENODEV;
+
+	return chip->irq_base + offset;
+}
+
+/**
+ * smp2p_gpio_irq_mask_helper - Mask/Unmask interrupt.
+ *
+ * @d:    IRQ data
+ * @mask: true to mask (disable), false to unmask (enable)
+ */
+static void smp2p_gpio_irq_mask_helper(struct irq_data *d, bool mask)
+{
+	struct smp2p_chip_dev *chip;
+	int offset;
+	unsigned long flags;
+
+	chip = (struct smp2p_chip_dev *)irq_get_chip_data(d->irq);
+	if (!chip || chip->irq_base <= 0)
+		return;
+
+	offset = d->irq - chip->irq_base;
+	spin_lock_irqsave(&chip->irq_lock, flags);
+	if (mask)
+		clear_bit(offset, chip->irq_enabled);
+	else
+		set_bit(offset, chip->irq_enabled);
+	spin_unlock_irqrestore(&chip->irq_lock, flags);
+}
+
+/**
+ * smp2p_gpio_irq_mask - Mask interrupt.
+ *
+ * @d: IRQ data
+ */
+static void smp2p_gpio_irq_mask(struct irq_data *d)
+{
+	smp2p_gpio_irq_mask_helper(d, true);
+}
+
+/**
+ * smp2p_gpio_irq_unmask - Unmask interrupt.
+ *
+ * @d: IRQ data
+ */
+static void smp2p_gpio_irq_unmask(struct irq_data *d)
+{
+	smp2p_gpio_irq_mask_helper(d, false);
+}
+
+/**
+ * smp2p_gpio_irq_set_type - Set interrupt edge type.
+ *
+ * @d:      IRQ data
+ * @type:   Edge type for interrupt
+ * @returns 0 for success; < 0 for failure
+ */
+static int smp2p_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct smp2p_chip_dev *chip;
+	int offset;
+	unsigned long flags;
+	int ret = 0;
+
+	chip = (struct smp2p_chip_dev *)irq_get_chip_data(d->irq);
+	if (!chip)
+		return -ENODEV;
+
+	if (chip->irq_base <= 0) {
+		SMP2P_ERR("%s: '%s':%d virqbase %d invalid\n",
+			__func__, chip->name, chip->remote_pid,
+			chip->irq_base);
+		return -ENODEV;
+	}
+
+	offset = d->irq - chip->irq_base;
+
+	spin_lock_irqsave(&chip->irq_lock, flags);
+	clear_bit(offset, chip->irq_rising_edge);
+	clear_bit(offset, chip->irq_falling_edge);
+	switch (type) {
+	case IRQ_TYPE_EDGE_RISING:
+		set_bit(offset, chip->irq_rising_edge);
+		break;
+
+	case IRQ_TYPE_EDGE_FALLING:
+		set_bit(offset, chip->irq_falling_edge);
+		break;
+
+	case IRQ_TYPE_NONE:
+	case IRQ_TYPE_DEFAULT:
+	case IRQ_TYPE_EDGE_BOTH:
+		set_bit(offset, chip->irq_rising_edge);
+		set_bit(offset, chip->irq_falling_edge);
+		break;
+
+	default:
+		SMP2P_ERR("%s: unsupported interrupt type 0x%x\n",
+				__func__, type);
+		ret = -EINVAL;
+		break;
+	}
+	spin_unlock_irqrestore(&chip->irq_lock, flags);
+	return ret;
+}
+
+/**
+ * smp2p_irq_map - Creates or updates binding of virtual IRQ
+ *
+ * @domain_ptr: Interrupt domain pointer
+ * @virq:       Virtual IRQ
+ * @hw:         Hardware IRQ (same as virq for nomap)
+ * @returns:    0 for success
+ */
+static int smp2p_irq_map(struct irq_domain *domain_ptr, unsigned int virq,
+	irq_hw_number_t hw)
+{
+	struct smp2p_chip_dev *chip;
+
+	chip = domain_ptr->host_data;
+	if (!chip) {
+		SMP2P_ERR("%s: invalid domain ptr\n", __func__);
+		return -ENODEV;
+	}
+
+	/* map chip structures to device */
+	irq_set_lockdep_class(virq, &smp2p_gpio_lock_class);
+	irq_set_chip_and_handler(virq, &smp2p_gpio_irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(virq, chip);
+
+	return 0;
+}
+
+static struct irq_chip smp2p_gpio_irq_chip = {
+	.name = "smp2p_gpio",
+	.irq_mask = smp2p_gpio_irq_mask,
+	.irq_unmask = smp2p_gpio_irq_unmask,
+	.irq_set_type = smp2p_gpio_irq_set_type,
+};
+
+/* No-map interrupt Domain */
+static const struct irq_domain_ops smp2p_irq_domain_ops = {
+	.map = smp2p_irq_map,
+};
+
+/**
+ * msm_summary_irq_handler - Handles inbound entry change notification.
+ *
+ * @chip:  GPIO chip pointer
+ * @entry: Change notification data
+ *
+ * Whenever an entry changes, this callback is triggered to determine
+ * which bits changed and if the corresponding interrupts need to be
+ * triggered.
+ */
+static void msm_summary_irq_handler(struct smp2p_chip_dev *chip,
+	struct msm_smp2p_update_notif *entry)
+{
+	int i;
+	uint32_t cur_val;
+	uint32_t prev_val;
+	uint32_t edge;
+	unsigned long flags;
+	bool trigger_interrupt;
+	bool irq_rising;
+	bool irq_falling;
+
+	cur_val = entry->current_value;
+	prev_val = entry->previous_value;
+
+	if (chip->irq_base <= 0)
+		return;
+
+	SMP2P_GPIO("'%s':%d GPIO Summary IRQ Change %08x->%08x\n",
+			chip->name, chip->remote_pid, prev_val, cur_val);
+
+	for (i = 0; i < SMP2P_BITS_PER_ENTRY; ++i) {
+		spin_lock_irqsave(&chip->irq_lock, flags);
+		trigger_interrupt = false;
+		edge = (prev_val & 0x1) << 1 | (cur_val & 0x1);
+		irq_rising = test_bit(i, chip->irq_rising_edge);
+		irq_falling = test_bit(i, chip->irq_falling_edge);
+
+		if (test_bit(i, chip->irq_enabled)) {
+			if (edge == 0x1 && irq_rising)
+				/* 0->1 transition */
+				trigger_interrupt = true;
+			else if (edge == 0x2 && irq_falling)
+				/* 1->0 transition */
+				trigger_interrupt = true;
+		} else {
+			SMP2P_GPIO(
+				"'%s':%d GPIO bit %d virq %d (%s,%s) - edge %s disabled\n",
+				chip->name, chip->remote_pid, i,
+				chip->irq_base + i,
+				edge_name_rising[irq_rising],
+				edge_name_falling[irq_falling],
+				edge_names[edge]);
+		}
+		spin_unlock_irqrestore(&chip->irq_lock, flags);
+
+		if (trigger_interrupt) {
+			SMP2P_INFO(
+				"'%s':%d GPIO bit %d virq %d (%s,%s) - edge %s triggering\n",
+				chip->name, chip->remote_pid, i,
+				chip->irq_base + i,
+				edge_name_rising[irq_rising],
+				edge_name_falling[irq_falling],
+				edge_names[edge]);
+			(void)generic_handle_irq(chip->irq_base + i);
+		}
+
+		cur_val >>= 1;
+		prev_val >>= 1;
+	}
+}
+
+/**
+ * Adds an interrupt domain based upon the DT node.
+ *
+ * @chip: pointer to GPIO chip
+ * @node: pointer to Device Tree node
+ */
+static void smp2p_add_irq_domain(struct smp2p_chip_dev *chip,
+	struct device_node *node)
+{
+	int irq_base;
+
+	/* map GPIO pins to interrupts */
+	chip->irq_domain = irq_domain_add_linear(node, SMP2P_BITS_PER_ENTRY,
+			&smp2p_irq_domain_ops, chip);
+	if (!chip->irq_domain) {
+		SMP2P_ERR("%s: unable to create interrupt domain '%s':%d\n",
+				__func__, chip->name, chip->remote_pid);
+		goto domain_fail;
+	}
+
+	/* alloc a contiguous set of virt irqs from anywhere in the irq space */
+	irq_base = irq_alloc_descs_from(0, SMP2P_BITS_PER_ENTRY, of_node_to_nid(
+				irq_domain_get_of_node(chip->irq_domain)));
+	if (irq_base < 0) {
+		SMP2P_ERR("alloc virt irqs failed:%d name:%s pid%d\n", irq_base,
+						chip->name, chip->remote_pid);
+		goto irq_alloc_fail;
+	}
+
+	/* map the allocated irqs to gpios */
+	irq_domain_associate_many(chip->irq_domain, irq_base, 0,
+				  SMP2P_BITS_PER_ENTRY);
+
+	chip->irq_base = irq_base;
+	SMP2P_DBG("create mapping:%d naem:%s pid:%d\n", chip->irq_base,
+						chip->name, chip->remote_pid);
+	return;
+
+irq_alloc_fail:
+	irq_domain_remove(chip->irq_domain);
+domain_fail:
+	return;
+}
+
+/**
+ * Notifier function passed into smp2p API for out bound entries.
+ *
+ * @self:       Pointer to calling notifier block
+ * @event:	    Event
+ * @data:       Event-specific data
+ * @returns:    0
+ */
+static int smp2p_gpio_out_notify(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	struct smp2p_chip_dev *chip;
+
+	chip = container_of(self, struct smp2p_chip_dev, out_notifier);
+
+	switch (event) {
+	case SMP2P_OPEN:
+		chip->is_open = 1;
+		SMP2P_GPIO("%s: Opened out '%s':%d in_shadow[%d]\n", __func__,
+				chip->name, chip->remote_pid, chip->in_shadow);
+		if (chip->in_shadow)
+			schedule_work(&chip->shadow_work);
+		break;
+	case SMP2P_ENTRY_UPDATE:
+		break;
+	default:
+		SMP2P_ERR("%s: Unknown event\n", __func__);
+		break;
+	}
+	return 0;
+}
+
+/**
+ * Notifier function passed into smp2p API for in bound entries.
+ *
+ * @self:       Pointer to calling notifier block
+ * @event:	    Event
+ * @data:       Event-specific data
+ * @returns:    0
+ */
+static int smp2p_gpio_in_notify(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	struct smp2p_chip_dev *chip;
+
+	chip = container_of(self, struct smp2p_chip_dev, in_notifier);
+
+	switch (event) {
+	case SMP2P_OPEN:
+		chip->is_open = 1;
+		SMP2P_GPIO("%s: Opened in '%s':%d\n", __func__,
+				chip->name, chip->remote_pid);
+		break;
+	case SMP2P_ENTRY_UPDATE:
+		msm_summary_irq_handler(chip, data);
+		break;
+	default:
+		SMP2P_ERR("%s: Unknown event\n", __func__);
+		break;
+	}
+	return 0;
+}
+
+/**
+ * smp2p_gpio_shadow_worker - Handles shadow updates of an entry.
+ *
+ * @work: Work Item scheduled to handle the shadow updates.
+ */
+static void smp2p_gpio_shadow_worker(struct work_struct *work)
+{
+	struct smp2p_chip_dev *chip;
+	int ret;
+	unsigned long flags;
+
+	chip = container_of(work, struct smp2p_chip_dev, shadow_work);
+	spin_lock_irqsave(&chip->shadow_lock, flags);
+	if (chip->in_shadow) {
+		ret = msm_smp2p_out_modify(chip->out_handle,
+					chip->shadow_value, 0x0, true);
+
+		if (ret)
+			SMP2P_GPIO("'%s':%d shadow val[0x%x] failed(%d)\n",
+					chip->name, chip->remote_pid,
+					chip->shadow_value, ret);
+		else
+			SMP2P_GPIO("'%s':%d shadow val[0x%x]\n",
+					chip->name, chip->remote_pid,
+					chip->shadow_value);
+		chip->shadow_value = 0;
+		chip->in_shadow = false;
+	}
+	spin_unlock_irqrestore(&chip->shadow_lock, flags);
+}
+
+/**
+ * Device tree probe function.
+ *
+ * @pdev:	 Pointer to device tree data.
+ * @returns: 0 on success; -ENODEV otherwise
+ *
+ * Called for each smp2pgpio entry in the device tree.
+ */
+static int smp2p_gpio_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	char *key;
+	struct smp2p_chip_dev *chip;
+	const char *name_tmp;
+	unsigned long flags;
+	bool is_test_entry = false;
+	int ret;
+
+	chip = kzalloc(sizeof(struct smp2p_chip_dev), GFP_KERNEL);
+	if (!chip) {
+		SMP2P_ERR("%s: out of memory\n", __func__);
+		ret = -ENOMEM;
+		goto fail;
+	}
+	spin_lock_init(&chip->irq_lock);
+	spin_lock_init(&chip->shadow_lock);
+	INIT_WORK(&chip->shadow_work, smp2p_gpio_shadow_worker);
+
+	/* parse device tree */
+	node = pdev->dev.of_node;
+	key = "qcom,entry-name";
+	ret = of_property_read_string(node, key, &name_tmp);
+	if (ret) {
+		SMP2P_ERR("%s: missing DT key '%s'\n", __func__, key);
+		goto fail;
+	}
+	strlcpy(chip->name, name_tmp, sizeof(chip->name));
+
+	key = "qcom,remote-pid";
+	ret = of_property_read_u32(node, key, &chip->remote_pid);
+	if (ret) {
+		SMP2P_ERR("%s: missing DT key '%s'\n", __func__, key);
+		goto fail;
+	}
+
+	key = "qcom,is-inbound";
+	chip->is_inbound = of_property_read_bool(node, key);
+
+	/* create virtual GPIO controller */
+	chip->gpio.label = chip->name;
+	chip->gpio.parent = &pdev->dev;
+	chip->gpio.owner = THIS_MODULE;
+	chip->gpio.direction_input	= smp2p_direction_input,
+	chip->gpio.get = smp2p_get_value;
+	chip->gpio.direction_output = smp2p_direction_output,
+	chip->gpio.set = smp2p_set_value;
+	chip->gpio.to_irq = smp2p_gpio_to_irq,
+	chip->gpio.base = -1;	/* use dynamic GPIO pin allocation */
+	chip->gpio.ngpio = SMP2P_BITS_PER_ENTRY;
+	ret = gpiochip_add(&chip->gpio);
+	if (ret) {
+		SMP2P_ERR("%s: unable to register GPIO '%s' ret %d\n",
+				__func__, chip->name, ret);
+		goto fail;
+	}
+
+	/*
+	 * Test entries opened by GPIO Test conflict with loopback
+	 * support, so the test entries must be explicitly opened
+	 * in the unit test framework.
+	 */
+	if (strcmp("smp2p", chip->name) == 0)
+		is_test_entry = true;
+
+	if (!chip->is_inbound)	{
+		chip->out_notifier.notifier_call = smp2p_gpio_out_notify;
+		if (!is_test_entry) {
+			ret = msm_smp2p_out_open(chip->remote_pid, chip->name,
+					   &chip->out_notifier,
+					   &chip->out_handle);
+			if (ret < 0)
+				goto error;
+		}
+	} else {
+		chip->in_notifier.notifier_call = smp2p_gpio_in_notify;
+		if (!is_test_entry) {
+			ret = msm_smp2p_in_register(chip->remote_pid,
+					chip->name,
+					&chip->in_notifier);
+			if (ret < 0)
+				goto error;
+		}
+	}
+
+	spin_lock_irqsave(&smp2p_entry_lock_lha1, flags);
+	list_add(&chip->entry_list, &smp2p_entry_list);
+	spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+
+	/*
+	 * Create interrupt domain - note that chip can't be removed from the
+	 * interrupt domain, so chip cannot be deleted after this point.
+	 */
+	if (chip->is_inbound)
+		smp2p_add_irq_domain(chip, node);
+	else
+		chip->irq_base = -1;
+
+	SMP2P_GPIO("%s: added %s%s entry '%s':%d gpio %d irq %d",
+			__func__,
+			is_test_entry ? "test " : "",
+			chip->is_inbound ? "in" : "out",
+			chip->name, chip->remote_pid,
+			chip->gpio.base, chip->irq_base);
+
+	return 0;
+error:
+	gpiochip_remove(&chip->gpio);
+
+fail:
+	kfree(chip);
+	return ret;
+}
+
+/**
+ * smp2p_gpio_open_close - Opens or closes entry.
+ *
+ * @entry:   Entry to open or close
+ * @do_open: true = open port; false = close
+ */
+static void smp2p_gpio_open_close(struct smp2p_chip_dev *entry,
+	bool do_open)
+{
+	int ret;
+
+	if (do_open) {
+		/* open entry */
+		if (entry->is_inbound)
+			ret = msm_smp2p_in_register(entry->remote_pid,
+					entry->name, &entry->in_notifier);
+		else
+			ret = msm_smp2p_out_open(entry->remote_pid,
+					entry->name, &entry->out_notifier,
+					&entry->out_handle);
+		SMP2P_GPIO("%s: opened %s '%s':%d ret %d\n",
+				__func__,
+				entry->is_inbound ? "in" : "out",
+				entry->name, entry->remote_pid,
+				ret);
+	} else {
+		/* close entry */
+		if (entry->is_inbound)
+			ret = msm_smp2p_in_unregister(entry->remote_pid,
+					entry->name, &entry->in_notifier);
+		else
+			ret = msm_smp2p_out_close(&entry->out_handle);
+		entry->is_open = false;
+		SMP2P_GPIO("%s: closed %s '%s':%d ret %d\n",
+				__func__,
+				entry->is_inbound ? "in" : "out",
+				entry->name, entry->remote_pid, ret);
+	}
+}
+
+/**
+ * smp2p_gpio_open_test_entry - Opens or closes test entries for unit testing.
+ *
+ * @name:       Name of the entry
+ * @remote_pid: Remote processor ID
+ * @do_open:    true = open port; false = close
+ */
+void smp2p_gpio_open_test_entry(const char *name, int remote_pid, bool do_open)
+{
+	struct smp2p_chip_dev *entry;
+	struct smp2p_chip_dev *start_entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&smp2p_entry_lock_lha1, flags);
+	if (list_empty(&smp2p_entry_list)) {
+		spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+		return;
+	}
+	start_entry = list_first_entry(&smp2p_entry_list,
+					struct smp2p_chip_dev,
+					entry_list);
+	entry = start_entry;
+	do {
+		if (!strcmp(entry->name, name)
+				&& entry->remote_pid == remote_pid) {
+			/* found entry to change */
+			spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+			smp2p_gpio_open_close(entry, do_open);
+			spin_lock_irqsave(&smp2p_entry_lock_lha1, flags);
+		}
+		list_rotate_left(&smp2p_entry_list);
+		entry = list_first_entry(&smp2p_entry_list,
+						struct smp2p_chip_dev,
+						entry_list);
+	} while (entry != start_entry);
+	spin_unlock_irqrestore(&smp2p_entry_lock_lha1, flags);
+}
+
+static const struct of_device_id msm_smp2p_match_table[] = {
+	{.compatible = "qcom,smp2pgpio", },
+	{},
+};
+
+static struct platform_driver smp2p_gpio_driver = {
+	.probe = smp2p_gpio_probe,
+	.driver = {
+		.name = "smp2pgpio",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smp2p_match_table,
+	},
+};
+
+static int smp2p_init(void)
+{
+	INIT_LIST_HEAD(&smp2p_entry_list);
+	return platform_driver_register(&smp2p_gpio_driver);
+}
+module_init(smp2p_init);
+
+static void __exit smp2p_exit(void)
+{
+	platform_driver_unregister(&smp2p_gpio_driver);
+}
+module_exit(smp2p_exit);
+
+MODULE_DESCRIPTION("SMP2P GPIO");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
new file mode 100644
index 0000000..114998f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "msm-dsi-catalog:[%s] " fmt, __func__
+#include <linux/errno.h>
+
+#include "dsi_catalog.h"
+
+/**
+ * dsi_catalog_14_init() - catalog init for dsi controller v1.4
+ */
+static void dsi_catalog_14_init(struct dsi_ctrl_hw *ctrl)
+{
+	ctrl->ops.host_setup             = dsi_ctrl_hw_14_host_setup;
+	ctrl->ops.setup_lane_map         = dsi_ctrl_hw_14_setup_lane_map;
+	ctrl->ops.video_engine_en        = dsi_ctrl_hw_14_video_engine_en;
+	ctrl->ops.video_engine_setup     = dsi_ctrl_hw_14_video_engine_setup;
+	ctrl->ops.set_video_timing       = dsi_ctrl_hw_14_set_video_timing;
+	ctrl->ops.cmd_engine_setup       = dsi_ctrl_hw_14_cmd_engine_setup;
+	ctrl->ops.ctrl_en                = dsi_ctrl_hw_14_ctrl_en;
+	ctrl->ops.cmd_engine_en          = dsi_ctrl_hw_14_cmd_engine_en;
+	ctrl->ops.phy_sw_reset           = dsi_ctrl_hw_14_phy_sw_reset;
+	ctrl->ops.soft_reset             = dsi_ctrl_hw_14_soft_reset;
+	ctrl->ops.kickoff_command        = dsi_ctrl_hw_14_kickoff_command;
+	ctrl->ops.kickoff_fifo_command   = dsi_ctrl_hw_14_kickoff_fifo_command;
+	ctrl->ops.reset_cmd_fifo         = dsi_ctrl_hw_14_reset_cmd_fifo;
+	ctrl->ops.trigger_command_dma    = dsi_ctrl_hw_14_trigger_command_dma;
+	ctrl->ops.ulps_request           = dsi_ctrl_hw_14_ulps_request;
+	ctrl->ops.ulps_exit              = dsi_ctrl_hw_14_ulps_exit;
+	ctrl->ops.clear_ulps_request     = dsi_ctrl_hw_14_clear_ulps_request;
+	ctrl->ops.get_lanes_in_ulps      = dsi_ctrl_hw_14_get_lanes_in_ulps;
+	ctrl->ops.clamp_enable           = dsi_ctrl_hw_14_clamp_enable;
+	ctrl->ops.clamp_disable          = dsi_ctrl_hw_14_clamp_disable;
+	ctrl->ops.get_interrupt_status   = dsi_ctrl_hw_14_get_interrupt_status;
+	ctrl->ops.get_error_status       = dsi_ctrl_hw_14_get_error_status;
+	ctrl->ops.clear_error_status     = dsi_ctrl_hw_14_clear_error_status;
+	ctrl->ops.clear_interrupt_status =
+		dsi_ctrl_hw_14_clear_interrupt_status;
+	ctrl->ops.enable_status_interrupts =
+		dsi_ctrl_hw_14_enable_status_interrupts;
+	ctrl->ops.enable_error_interrupts =
+		dsi_ctrl_hw_14_enable_error_interrupts;
+	ctrl->ops.video_test_pattern_setup =
+		dsi_ctrl_hw_14_video_test_pattern_setup;
+	ctrl->ops.cmd_test_pattern_setup =
+		dsi_ctrl_hw_14_cmd_test_pattern_setup;
+	ctrl->ops.test_pattern_enable    = dsi_ctrl_hw_14_test_pattern_enable;
+	ctrl->ops.trigger_cmd_test_pattern =
+		dsi_ctrl_hw_14_trigger_cmd_test_pattern;
+}
+
+/**
+ * dsi_catalog_20_init() - catalog init for dsi controller v2.0
+ */
+static void dsi_catalog_20_init(struct dsi_ctrl_hw *ctrl)
+{
+	set_bit(DSI_CTRL_CPHY, ctrl->feature_map);
+}
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl:        Pointer to DSI controller hw object.
+ * @version:     DSI controller version.
+ * @index:       DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+			   enum dsi_ctrl_version version,
+			   u32 index)
+{
+	int rc = 0;
+
+	if (version == DSI_CTRL_VERSION_UNKNOWN ||
+	    version >= DSI_CTRL_VERSION_MAX) {
+		pr_err("Unsupported version: %d\n", version);
+		return -ENOTSUPP;
+	}
+
+	ctrl->index = index;
+	set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
+	set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
+	set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
+	set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map);
+	set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map);
+	set_bit(DSI_CTRL_DPHY, ctrl->feature_map);
+
+	switch (version) {
+	case DSI_CTRL_VERSION_1_4:
+		dsi_catalog_14_init(ctrl);
+		break;
+	case DSI_CTRL_VERSION_2_0:
+		dsi_catalog_20_init(ctrl);
+		break;
+	default:
+		return -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+/**
+ * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY v4.0
+ */
+static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
+{
+	phy->ops.regulator_enable = dsi_phy_hw_v4_0_regulator_enable;
+	phy->ops.regulator_disable = dsi_phy_hw_v4_0_regulator_disable;
+	phy->ops.enable = dsi_phy_hw_v4_0_enable;
+	phy->ops.disable = dsi_phy_hw_v4_0_disable;
+	phy->ops.calculate_timing_params =
+		dsi_phy_hw_v4_0_calculate_timing_params;
+}
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl:        Pointer to DSI PHY hw object.
+ * @version:     DSI PHY version.
+ * @index:       DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+			  enum dsi_phy_version version,
+			  u32 index)
+{
+	int rc = 0;
+
+	if (version == DSI_PHY_VERSION_UNKNOWN ||
+	    version >= DSI_PHY_VERSION_MAX) {
+		pr_err("Unsupported version: %d\n", version);
+		return -ENOTSUPP;
+	}
+
+	phy->index = index;
+	set_bit(DSI_PHY_DPHY, phy->feature_map);
+
+	switch (version) {
+	case DSI_PHY_VERSION_4_0:
+		dsi_catalog_phy_4_0_init(phy);
+		break;
+	case DSI_PHY_VERSION_1_0:
+	case DSI_PHY_VERSION_2_0:
+	case DSI_PHY_VERSION_3_0:
+	default:
+		return -ENOTSUPP;
+	}
+
+	return rc;
+}
+
+
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
new file mode 100644
index 0000000..e4b33c2
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CATALOG_H_
+#define _DSI_CATALOG_H_
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_phy_hw.h"
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl:        Pointer to DSI controller hw object.
+ * @version:     DSI controller version.
+ * @index:       DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+			   enum dsi_ctrl_version version,
+			   u32 index);
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl:        Pointer to DSI PHY hw object.
+ * @version:     DSI PHY version.
+ * @index:       DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+			  enum dsi_phy_version version,
+			  u32 index);
+
+/* Definitions for 4.0 PHY hardware driver */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+				      struct dsi_phy_per_lane_cfgs *cfg);
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy);
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+					    struct dsi_mode_info *mode,
+					    struct dsi_host_common_cfg *cfg,
+					   struct dsi_phy_per_lane_cfgs
+					   *timing);
+
+/* Definitions for 1.4 controller hardware driver */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_host_common_cfg *config);
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+				       struct dsi_host_common_cfg *common_cfg,
+				       struct dsi_video_engine_cfg *cfg);
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+			 struct dsi_mode_info *mode);
+
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_host_common_cfg *common_cfg,
+				     struct dsi_cmd_engine_cfg *cfg);
+
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+		       struct dsi_lane_mapping *lane_map);
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+			struct dsi_ctrl_cmd_dma_info *cmd,
+			u32 flags);
+
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+			     struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+			     u32 flags);
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+				 u32 lanes,
+				 bool enable_ulps);
+
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+				  u32 lanes,
+				  bool disable_ulps);
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
+					     u32 ints);
+
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors);
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+					    u64 errors);
+
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+				 enum dsi_test_pattern type,
+				 u32 init_val);
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+			       enum dsi_test_pattern  type,
+			       u32 init_val,
+			       u32 stream_id);
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable);
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+				 u32 stream_id);
+#endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
new file mode 100644
index 0000000..b5ddfbb
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -0,0 +1,558 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_HW_H_
+#define _DSI_CTRL_HW_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+
+#include "dsi_defs.h"
+
+/**
+ * Modifier flag for command transmission. If this flag is set, command
+ * information is programmed to hardware and transmission is not triggered.
+ * Caller should call the trigger_command_dma() to start the transmission. This
+ * flag is valed for kickoff_command() and kickoff_fifo_command() operations.
+ */
+#define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER            0x1
+
+/**
+ * enum dsi_ctrl_version - version of the dsi host controller
+ * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
+ * @DSI_CTRL_VERSION_1_4:     DSI host v1.4 controller
+ * @DSI_CTRL_VERSION_2_0:     DSI host v2.0 controller
+ * @DSI_CTRL_VERSION_MAX:     max version
+ */
+enum dsi_ctrl_version {
+	DSI_CTRL_VERSION_UNKNOWN,
+	DSI_CTRL_VERSION_1_4,
+	DSI_CTRL_VERSION_2_0,
+	DSI_CTRL_VERSION_MAX
+};
+
+/**
+ * enum dsi_ctrl_hw_features - features supported by dsi host controller
+ * @DSI_CTRL_VIDEO_TPG:               Test pattern support for video mode.
+ * @DSI_CTRL_CMD_TPG:                 Test pattern support for command mode.
+ * @DSI_CTRL_VARIABLE_REFRESH_RATE:   variable panel timing
+ * @DSI_CTRL_DYNAMIC_REFRESH:         variable pixel clock rate
+ * @DSI_CTRL_NULL_PACKET_INSERTION:   NULL packet insertion
+ * @DSI_CTRL_DESKEW_CALIB:            Deskew calibration support
+ * @DSI_CTRL_DPHY:                    Controller support for DPHY
+ * @DSI_CTRL_CPHY:                    Controller support for CPHY
+ * @DSI_CTRL_MAX_FEATURES:
+ */
+enum dsi_ctrl_hw_features {
+	DSI_CTRL_VIDEO_TPG,
+	DSI_CTRL_CMD_TPG,
+	DSI_CTRL_VARIABLE_REFRESH_RATE,
+	DSI_CTRL_DYNAMIC_REFRESH,
+	DSI_CTRL_NULL_PACKET_INSERTION,
+	DSI_CTRL_DESKEW_CALIB,
+	DSI_CTRL_DPHY,
+	DSI_CTRL_CPHY,
+	DSI_CTRL_MAX_FEATURES
+};
+
+/**
+ * enum dsi_test_pattern - test pattern type
+ * @DSI_TEST_PATTERN_FIXED:     Test pattern is fixed, based on init value.
+ * @DSI_TEST_PATTERN_INC:       Incremental test pattern, base on init value.
+ * @DSI_TEST_PATTERN_POLY:      Pattern generated from polynomial and init val.
+ * @DSI_TEST_PATTERN_MAX:
+ */
+enum dsi_test_pattern {
+	DSI_TEST_PATTERN_FIXED = 0,
+	DSI_TEST_PATTERN_INC,
+	DSI_TEST_PATTERN_POLY,
+	DSI_TEST_PATTERN_MAX
+};
+
+/**
+ * enum dsi_status_int_type - status interrupts generated by DSI controller
+ * @DSI_CMD_MODE_DMA_DONE:        Command mode DMA packets are sent out.
+ * @DSI_CMD_STREAM0_FRAME_DONE:   A frame of command mode stream0 is sent out.
+ * @DSI_CMD_STREAM1_FRAME_DONE:   A frame of command mode stream1 is sent out.
+ * @DSI_CMD_STREAM2_FRAME_DONE:   A frame of command mode stream2 is sent out.
+ * @DSI_VIDEO_MODE_FRAME_DONE:    A frame of video mode stream is sent out.
+ * @DSI_BTA_DONE:                 A BTA is completed.
+ * @DSI_CMD_FRAME_DONE:           A frame of selected command mode stream is
+ *                                sent out by MDP.
+ * @DSI_DYN_REFRESH_DONE:         The dynamic refresh operation has completed.
+ * @DSI_DESKEW_DONE:              The deskew calibration operation has completed
+ * @DSI_DYN_BLANK_DMA_DONE:       The dynamic blankin DMA operation has
+ *                                completed.
+ */
+enum dsi_status_int_type {
+	DSI_CMD_MODE_DMA_DONE = BIT(0),
+	DSI_CMD_STREAM0_FRAME_DONE = BIT(1),
+	DSI_CMD_STREAM1_FRAME_DONE = BIT(2),
+	DSI_CMD_STREAM2_FRAME_DONE = BIT(3),
+	DSI_VIDEO_MODE_FRAME_DONE = BIT(4),
+	DSI_BTA_DONE = BIT(5),
+	DSI_CMD_FRAME_DONE = BIT(6),
+	DSI_DYN_REFRESH_DONE = BIT(7),
+	DSI_DESKEW_DONE = BIT(8),
+	DSI_DYN_BLANK_DMA_DONE = BIT(9)
+};
+
+/**
+ * enum dsi_error_int_type - error interrupts generated by DSI controller
+ * @DSI_RDBK_SINGLE_ECC_ERR:        Single bit ECC error in read packet.
+ * @DSI_RDBK_MULTI_ECC_ERR:         Multi bit ECC error in read packet.
+ * @DSI_RDBK_CRC_ERR:               CRC error in read packet.
+ * @DSI_RDBK_INCOMPLETE_PKT:        Incomplete read packet.
+ * @DSI_PERIPH_ERROR_PKT:           Error packet returned from peripheral,
+ * @DSI_LP_RX_TIMEOUT:              Low power reverse transmission timeout.
+ * @DSI_HS_TX_TIMEOUT:              High speed forward transmission timeout.
+ * @DSI_BTA_TIMEOUT:                BTA timeout.
+ * @DSI_PLL_UNLOCK:                 PLL has unlocked.
+ * @DSI_DLN0_ESC_ENTRY_ERR:         Incorrect LP Rx escape entry.
+ * @DSI_DLN0_ESC_SYNC_ERR:          LP Rx data is not byte aligned.
+ * @DSI_DLN0_LP_CONTROL_ERR:        Incorrect LP Rx state sequence.
+ * @DSI_PENDING_HS_TX_TIMEOUT:      Pending High-speed transfer timeout.
+ * @DSI_INTERLEAVE_OP_CONTENTION:   Interleave operation contention.
+ * @DSI_CMD_DMA_FIFO_UNDERFLOW:     Command mode DMA FIFO underflow.
+ * @DSI_CMD_MDP_FIFO_UNDERFLOW:     Command MDP FIFO underflow (failed to
+ *                                  receive one complete line from MDP).
+ * @DSI_DLN0_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 0 overflows.
+ * @DSI_DLN1_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 1 overflows.
+ * @DSI_DLN2_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 2 overflows.
+ * @DSI_DLN3_HS_FIFO_OVERFLOW:      High speed FIFO for data lane 3 overflows.
+ * @DSI_DLN0_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 0 underflows.
+ * @DSI_DLN1_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 1 underflows.
+ * @DSI_DLN2_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 2 underflows.
+ * @DSI_DLN3_HS_FIFO_UNDERFLOW:     High speed FIFO for data lane 3 undeflows.
+ * @DSI_DLN0_LP0_CONTENTION:        PHY level contention while lane 0 is low.
+ * @DSI_DLN1_LP0_CONTENTION:        PHY level contention while lane 1 is low.
+ * @DSI_DLN2_LP0_CONTENTION:        PHY level contention while lane 2 is low.
+ * @DSI_DLN3_LP0_CONTENTION:        PHY level contention while lane 3 is low.
+ * @DSI_DLN0_LP1_CONTENTION:        PHY level contention while lane 0 is high.
+ * @DSI_DLN1_LP1_CONTENTION:        PHY level contention while lane 1 is high.
+ * @DSI_DLN2_LP1_CONTENTION:        PHY level contention while lane 2 is high.
+ * @DSI_DLN3_LP1_CONTENTION:        PHY level contention while lane 3 is high.
+ */
+enum dsi_error_int_type {
+	DSI_RDBK_SINGLE_ECC_ERR = BIT(0),
+	DSI_RDBK_MULTI_ECC_ERR = BIT(1),
+	DSI_RDBK_CRC_ERR = BIT(2),
+	DSI_RDBK_INCOMPLETE_PKT = BIT(3),
+	DSI_PERIPH_ERROR_PKT = BIT(4),
+	DSI_LP_RX_TIMEOUT = BIT(5),
+	DSI_HS_TX_TIMEOUT = BIT(6),
+	DSI_BTA_TIMEOUT = BIT(7),
+	DSI_PLL_UNLOCK = BIT(8),
+	DSI_DLN0_ESC_ENTRY_ERR = BIT(9),
+	DSI_DLN0_ESC_SYNC_ERR = BIT(10),
+	DSI_DLN0_LP_CONTROL_ERR = BIT(11),
+	DSI_PENDING_HS_TX_TIMEOUT = BIT(12),
+	DSI_INTERLEAVE_OP_CONTENTION = BIT(13),
+	DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14),
+	DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15),
+	DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16),
+	DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17),
+	DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18),
+	DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19),
+	DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20),
+	DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21),
+	DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22),
+	DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23),
+	DSI_DLN0_LP0_CONTENTION = BIT(24),
+	DSI_DLN1_LP0_CONTENTION = BIT(25),
+	DSI_DLN2_LP0_CONTENTION = BIT(26),
+	DSI_DLN3_LP0_CONTENTION = BIT(27),
+	DSI_DLN0_LP1_CONTENTION = BIT(28),
+	DSI_DLN1_LP1_CONTENTION = BIT(29),
+	DSI_DLN2_LP1_CONTENTION = BIT(30),
+	DSI_DLN3_LP1_CONTENTION = BIT(31),
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_info - command buffer information
+ * @offset:        IOMMU VA for command buffer address.
+ * @length:        Length of the command buffer.
+ * @en_broadcast:  Enable broadcast mode if set to true.
+ * @is_master:     Is master in broadcast mode.
+ * @use_lpm:       Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_info {
+	u32 offset;
+	u32 length;
+	bool en_broadcast;
+	bool is_master;
+	bool use_lpm;
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_fifo_info - command payload tp be sent using FIFO
+ * @command:        VA for command buffer.
+ * @size:           Size of the command buffer.
+ * @en_broadcast:   Enable broadcast mode if set to true.
+ * @is_master:      Is master in broadcast mode.
+ * @use_lpm:        Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_fifo_info {
+	u32 *command;
+	u32 size;
+	bool en_broadcast;
+	bool is_master;
+	bool use_lpm;
+};
+
+struct dsi_ctrl_hw;
+
+/**
+ * struct dsi_ctrl_hw_ops - operations supported by dsi host hardware
+ */
+struct dsi_ctrl_hw_ops {
+
+	/**
+	 * host_setup() - Setup DSI host configuration
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @config:        Configuration for DSI host controller
+	 */
+	void (*host_setup)(struct dsi_ctrl_hw *ctrl,
+			   struct dsi_host_common_cfg *config);
+
+	/**
+	 * video_engine_en() - enable DSI video engine
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @on:            Enable/disabel video engine.
+	 */
+	void (*video_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+	/**
+	 * video_engine_setup() - Setup dsi host controller for video mode
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @common_cfg:    Common configuration parameters.
+	 * @cfg:           Video mode configuration.
+	 *
+	 * Set up DSI video engine with a specific configuration. Controller and
+	 * video engine are not enabled as part of this function.
+	 */
+	void (*video_engine_setup)(struct dsi_ctrl_hw *ctrl,
+				   struct dsi_host_common_cfg *common_cfg,
+				   struct dsi_video_engine_cfg *cfg);
+
+	/**
+	 * set_video_timing() - set up the timing for video frame
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @mode:          Video mode information.
+	 *
+	 * Set up the video timing parameters for the DSI video mode operation.
+	 */
+	void (*set_video_timing)(struct dsi_ctrl_hw *ctrl,
+				 struct dsi_mode_info *mode);
+
+	/**
+	 * cmd_engine_setup() - setup dsi host controller for command mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @common_cfg:    Common configuration parameters.
+	 * @cfg:           Command mode configuration.
+	 *
+	 * Setup DSI CMD engine with a specific configuration. Controller and
+	 * command engine are not enabled as part of this function.
+	 */
+	void (*cmd_engine_setup)(struct dsi_ctrl_hw *ctrl,
+				 struct dsi_host_common_cfg *common_cfg,
+				 struct dsi_cmd_engine_cfg *cfg);
+
+	/**
+	 * ctrl_en() - enable DSI controller engine
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @on:            turn on/off the DSI controller engine.
+	 */
+	void (*ctrl_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+	/**
+	 * cmd_engine_en() - enable DSI controller command engine
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @on:            Turn on/off the DSI command engine.
+	 */
+	void (*cmd_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+	/**
+	 * phy_sw_reset() - perform a soft reset on the PHY.
+	 * @ctrl:        Pointer to the controller host hardware.
+	 */
+	void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * soft_reset() - perform a soft reset on DSI controller
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * The video, command and controller engines will be disable before the
+	 * reset is triggered. These engines will not be enabled after the reset
+	 * is complete. Caller must re-enable the engines.
+	 *
+	 * If the reset is done while MDP timing engine is turned on, the video
+	 * enigne should be re-enabled only during the vertical blanking time.
+	 */
+	void (*soft_reset)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * setup_lane_map() - setup mapping between logical and physical lanes
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lane_map:      Structure defining the mapping between DSI logical
+	 *                 lanes and physical lanes.
+	 */
+	void (*setup_lane_map)(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_lane_mapping *lane_map);
+
+	/**
+	 * kickoff_command() - transmits commands stored in memory
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @cmd:           Command information.
+	 * @flags:         Modifiers for command transmission.
+	 *
+	 * The controller hardware is programmed with address and size of the
+	 * command buffer. The transmission is kicked off if
+	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+	 * set, caller should make a separate call to trigger_command_dma() to
+	 * transmit the command.
+	 */
+	void (*kickoff_command)(struct dsi_ctrl_hw *ctrl,
+				struct dsi_ctrl_cmd_dma_info *cmd,
+				u32 flags);
+
+	/**
+	 * kickoff_fifo_command() - transmits a command using FIFO in dsi
+	 *                          hardware.
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @cmd:           Command information.
+	 * @flags:         Modifiers for command transmission.
+	 *
+	 * The controller hardware FIFO is programmed with command header and
+	 * payload. The transmission is kicked off if
+	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+	 * set, caller should make a separate call to trigger_command_dma() to
+	 * transmit the command.
+	 */
+	void (*kickoff_fifo_command)(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+				     u32 flags);
+
+	void (*reset_cmd_fifo)(struct dsi_ctrl_hw *ctrl);
+	/**
+	 * trigger_command_dma() - trigger transmission of command buffer.
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * This trigger can be only used if there was a prior call to
+	 * kickoff_command() of kickoff_fifo_command() with
+	 * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+	 */
+	void (*trigger_command_dma)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * get_cmd_read_data() - get data read from the peripheral
+	 * @ctrl:           Pointer to the controller host hardware.
+	 * @rd_buf:         Buffer where data will be read into.
+	 * @total_read_len: Number of bytes to read.
+	 */
+	u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
+				 u8 *rd_buf,
+				 u32 total_read_len);
+
+	/**
+	 * ulps_request() - request ulps entry for specified lanes
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+	 *                 to enter ULPS.
+	 *
+	 * Caller should check if lanes are in ULPS mode by calling
+	 * get_lanes_in_ulps() operation.
+	 */
+	void (*ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+	/**
+	 * ulps_exit() - exit ULPS on specified lanes
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+	 *                 to exit ULPS.
+	 *
+	 * Caller should check if lanes are in active mode by calling
+	 * get_lanes_in_ulps() operation.
+	 */
+	void (*ulps_exit)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+	/**
+	 * clear_ulps_request() - clear ulps request once all lanes are active
+	 * @ctrl:          Pointer to controller host hardware.
+	 * @lanes:         ORed list of lanes (enum dsi_data_lanes).
+	 *
+	 * ULPS request should be cleared after the lanes have exited ULPS.
+	 */
+	void (*clear_ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+	/**
+	 * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+	 * state. If 0 is returned, all the lanes are active.
+	 *
+	 * Return: List of lanes in ULPS state.
+	 */
+	u32 (*get_lanes_in_ulps)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @lanes:         ORed list of lanes which need to be clamped.
+	 * @enable_ulps:   TODO:??
+	 */
+	void (*clamp_enable)(struct dsi_ctrl_hw *ctrl,
+			     u32 lanes,
+			     bool enable_ulps);
+
+	/**
+	 * clamp_disable() - disable DSI clamps
+	 * @ctrl:         Pointer to the controller host hardware.
+	 * @lanes:        ORed list of lanes which need to have clamps released.
+	 * @disable_ulps: TODO:??
+	 */
+	void (*clamp_disable)(struct dsi_ctrl_hw *ctrl,
+			      u32 lanes,
+			      bool disable_ulps);
+
+	/**
+	 * get_interrupt_status() - returns the interrupt status
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+	 * are active. This list does not include any error interrupts. Caller
+	 * should call get_error_status for error interrupts.
+	 *
+	 * Return: List of active interrupts.
+	 */
+	u32 (*get_interrupt_status)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * clear_interrupt_status() - clears the specified interrupts
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @ints:          List of interrupts to be cleared.
+	 */
+	void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+	/**
+	 * enable_status_interrupts() - enable the specified interrupts
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @ints:          List of interrupts to be enabled.
+	 *
+	 * Enables the specified interrupts. This list will override the
+	 * previous interrupts enabled through this function. Caller has to
+	 * maintain the state of the interrupts enabled. To disable all
+	 * interrupts, set ints to 0.
+	 */
+	void (*enable_status_interrupts)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+	/**
+	 * get_error_status() - returns the error status
+	 * @ctrl:          Pointer to the controller host hardware.
+	 *
+	 * Returns the ORed list of errors(enum dsi_error_int_type) that are
+	 * active. This list does not include any status interrupts. Caller
+	 * should call get_interrupt_status for status interrupts.
+	 *
+	 * Return: List of active error interrupts.
+	 */
+	u64 (*get_error_status)(struct dsi_ctrl_hw *ctrl);
+
+	/**
+	 * clear_error_status() - clears the specified errors
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @errors:          List of errors to be cleared.
+	 */
+	void (*clear_error_status)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+	/**
+	 * enable_error_interrupts() - enable the specified interrupts
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @errors:        List of errors to be enabled.
+	 *
+	 * Enables the specified interrupts. This list will override the
+	 * previous interrupts enabled through this function. Caller has to
+	 * maintain the state of the interrupts enabled. To disable all
+	 * interrupts, set errors to 0.
+	 */
+	void (*enable_error_interrupts)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+	/**
+	 * video_test_pattern_setup() - setup test pattern engine for video mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @type:          Type of test pattern.
+	 * @init_val:      Initial value to use for generating test pattern.
+	 */
+	void (*video_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+					 enum dsi_test_pattern type,
+					 u32 init_val);
+
+	/**
+	 * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @type:          Type of test pattern.
+	 * @init_val:      Initial value to use for generating test pattern.
+	 * @stream_id:     Stream Id on which packets are generated.
+	 */
+	void (*cmd_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+				       enum dsi_test_pattern  type,
+				       u32 init_val,
+				       u32 stream_id);
+
+	/**
+	 * test_pattern_enable() - enable test pattern engine
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @enable:        Enable/Disable test pattern engine.
+	 */
+	void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+	/**
+	 * trigger_cmd_test_pattern() - trigger a command mode frame update with
+	 *                              test pattern
+	 * @ctrl:          Pointer to the controller host hardware.
+	 * @stream_id:     Stream on which frame update is sent.
+	 */
+	void (*trigger_cmd_test_pattern)(struct dsi_ctrl_hw *ctrl,
+					 u32 stream_id);
+};
+
+/*
+ * struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
+ * @base:           VA for the DSI controller base address.
+ * @length:         Length of the DSI controller register map.
+ * @index:          Instance ID of the controller.
+ * @feature_map:    Features supported by the DSI controller.
+ * @ops:            Function pointers to the operations supported by the
+ *                  controller.
+ */
+struct dsi_ctrl_hw {
+	void __iomem *base;
+	u32 length;
+	void __iomem *mmss_misc_base;
+	u32 mmss_misc_length;
+	u32 index;
+
+	/* features */
+	DECLARE_BITMAP(feature_map, DSI_CTRL_MAX_FEATURES);
+	struct dsi_ctrl_hw_ops ops;
+
+	/* capabilities */
+	u32 supported_interrupts;
+	u64 supported_errors;
+};
+
+#endif /* _DSI_CTRL_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
new file mode 100644
index 0000000..8326024
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
@@ -0,0 +1,1321 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-hw:" fmt
+#include <linux/delay.h>
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl_reg_1_4.h"
+#include "dsi_hw.h"
+
+#define MMSS_MISC_CLAMP_REG_OFF           0x0014
+
+/* Unsupported formats default to RGB888 */
+static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+	0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
+static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+	0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
+
+
+/**
+ * dsi_setup_trigger_controls() - setup dsi trigger configurations
+ * @ctrl:             Pointer to the controller host hardware.
+ * @cfg:              DSI host configuration that is common to both video and
+ *                    command modes.
+ */
+static void dsi_setup_trigger_controls(struct dsi_ctrl_hw *ctrl,
+				       struct dsi_host_common_cfg *cfg)
+{
+	u32 reg = 0;
+	const u8 trigger_map[DSI_TRIGGER_MAX] = {
+		0x0, 0x2, 0x1, 0x4, 0x5, 0x6 };
+
+	reg |= (cfg->te_mode == DSI_TE_ON_EXT_PIN) ? BIT(31) : 0;
+	reg |= (trigger_map[cfg->dma_cmd_trigger] & 0x7);
+	reg |= (trigger_map[cfg->mdp_cmd_trigger] & 0x7) << 4;
+	DSI_W32(ctrl, DSI_TRIG_CTRL, reg);
+}
+
+/**
+ * dsi_ctrl_hw_14_host_setup() - setup dsi host configuration
+ * @ctrl:             Pointer to the controller host hardware.
+ * @cfg:              DSI host configuration that is common to both video and
+ *                    command modes.
+ */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_host_common_cfg *cfg)
+{
+	u32 reg_value = 0;
+
+	dsi_setup_trigger_controls(ctrl, cfg);
+
+	/* Setup clocking timing controls */
+	reg_value = ((cfg->t_clk_post & 0x3F) << 8);
+	reg_value |= (cfg->t_clk_pre & 0x3F);
+	DSI_W32(ctrl, DSI_CLKOUT_TIMING_CTRL, reg_value);
+
+	/* EOT packet control */
+	reg_value = cfg->append_tx_eot ? 1 : 0;
+	reg_value |= (cfg->ignore_rx_eot ? (1 << 4) : 0);
+	DSI_W32(ctrl, DSI_EOT_PACKET_CTRL, reg_value);
+
+	/* Turn on dsi clocks */
+	DSI_W32(ctrl, DSI_CLK_CTRL, 0x23F);
+
+	/* Setup DSI control register */
+	reg_value = 0;
+	reg_value |= (cfg->en_crc_check ? BIT(24) : 0);
+	reg_value |= (cfg->en_ecc_check ? BIT(20) : 0);
+	reg_value |= BIT(8); /* Clock lane */
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_3) ? BIT(7) : 0);
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_2) ? BIT(6) : 0);
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_1) ? BIT(5) : 0);
+	reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_0) ? BIT(4) : 0);
+
+	DSI_W32(ctrl, DSI_CTRL, reg_value);
+
+	/* Enable Timing double buffering */
+	DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
+
+	pr_debug("[DSI_%d]Host configuration complete\n", ctrl->index);
+}
+
+/**
+ * phy_sw_reset() - perform a soft reset on the PHY.
+ * @ctrl:        Pointer to the controller host hardware.
+ */
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl)
+{
+	DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x1);
+	udelay(1000);
+	DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x0);
+	udelay(100);
+
+	pr_debug("[DSI_%d] phy sw reset done\n", ctrl->index);
+}
+
+/**
+ * soft_reset() - perform a soft reset on DSI controller
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * The video, command and controller engines will be disable before the
+ * reset is triggered. These engines will not be enabled after the reset
+ * is complete. Caller must re-enable the engines.
+ *
+ * If the reset is done while MDP timing engine is turned on, the video
+ * enigne should be re-enabled only during the vertical blanking time.
+ */
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+	u32 reg_ctrl = 0;
+
+	/* Clear DSI_EN, VIDEO_MODE_EN, CMD_MODE_EN */
+	reg_ctrl = DSI_R32(ctrl, DSI_CTRL);
+	DSI_W32(ctrl, DSI_CTRL, reg_ctrl & ~0x7);
+
+	/* Force enable PCLK, BYTECLK, AHBM_HCLK */
+	reg = DSI_R32(ctrl, DSI_CLK_CTRL);
+	reg |= 0x23F;
+	DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+	/* Trigger soft reset */
+	DSI_W32(ctrl, DSI_SOFT_RESET, 0x1);
+	udelay(1);
+	DSI_W32(ctrl, DSI_SOFT_RESET, 0x0);
+
+	/* Disable force clock on */
+	reg &= ~(BIT(20) | BIT(11));
+	DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+	/* Re-enable DSI controller */
+	DSI_W32(ctrl, DSI_CTRL, reg_ctrl);
+	pr_debug("[DSI_%d] ctrl soft reset done\n", ctrl->index);
+}
+
+/**
+ * set_video_timing() - set up the timing for video frame
+ * @ctrl:          Pointer to controller host hardware.
+ * @mode:          Video mode information.
+ *
+ * Set up the video timing parameters for the DSI video mode operation.
+ */
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_mode_info *mode)
+{
+	u32 reg = 0;
+	u32 hs_start = 0;
+	u32 hs_end, active_h_start, active_h_end, h_total;
+	u32 vs_start = 0, vs_end = 0;
+	u32 vpos_start = 0, vpos_end, active_v_start, active_v_end, v_total;
+
+	hs_end = mode->h_sync_width;
+	active_h_start = mode->h_sync_width + mode->h_back_porch;
+	active_h_end = active_h_start + mode->h_active;
+	h_total = (mode->h_sync_width + mode->h_back_porch + mode->h_active +
+		   mode->h_front_porch) - 1;
+
+	vpos_end = mode->v_sync_width;
+	active_v_start = mode->v_sync_width + mode->v_back_porch;
+	active_v_end = active_v_start + mode->v_active;
+	v_total = (mode->v_sync_width + mode->v_back_porch + mode->v_active +
+		   mode->v_front_porch) - 1;
+
+	reg = ((active_h_end & 0xFFFF) << 16) | (active_h_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_H, reg);
+
+	reg = ((active_v_end & 0xFFFF) << 16) | (active_v_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_V, reg);
+
+	reg = ((v_total & 0xFFFF) << 16) | (h_total & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_TOTAL, reg);
+
+	reg = ((hs_end & 0xFFFF) << 16) | (hs_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_HSYNC, reg);
+
+	reg = ((vs_end & 0xFFFF) << 16) | (vs_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC, reg);
+
+	reg = ((vpos_end & 0xFFFF) << 16) | (vpos_start & 0xFFFF);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC_VPOS, reg);
+
+	/* TODO: HS TIMER value? */
+	DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
+	DSI_W32(ctrl, DSI_MISR_VIDEO_CTRL, 0x10100);
+	DSI_W32(ctrl, DSI_DSI_TIMING_FLUSH, 0x1);
+	pr_debug("[DSI_%d] ctrl video parameters updated\n", ctrl->index);
+}
+
+/**
+ * video_engine_setup() - Setup dsi host controller for video mode
+ * @ctrl:          Pointer to controller host hardware.
+ * @common_cfg:    Common configuration parameters.
+ * @cfg:           Video mode configuration.
+ *
+ * Set up DSI video engine with a specific configuration. Controller and
+ * video engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+				       struct dsi_host_common_cfg *common_cfg,
+				       struct dsi_video_engine_cfg *cfg)
+{
+	u32 reg = 0;
+
+	reg |= (cfg->last_line_interleave_en ? BIT(31) : 0);
+	reg |= (cfg->pulse_mode_hsa_he ? BIT(28) : 0);
+	reg |= (cfg->hfp_lp11_en ? BIT(24) : 0);
+	reg |= (cfg->hbp_lp11_en ? BIT(20) : 0);
+	reg |= (cfg->hsa_lp11_en ? BIT(16) : 0);
+	reg |= (cfg->eof_bllp_lp11_en ? BIT(15) : 0);
+	reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
+	reg |= (cfg->traffic_mode & 0x3) << 8;
+	reg |= (cfg->vc_id & 0x3);
+	reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
+	DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
+
+	reg = (common_cfg->swap_mode & 0x7) << 12;
+	reg |= (common_cfg->bit_swap_red ? BIT(0) : 0);
+	reg |= (common_cfg->bit_swap_green ? BIT(4) : 0);
+	reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0);
+	DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg);
+
+	pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_engine_setup() - setup dsi host controller for command mode
+ * @ctrl:          Pointer to the controller host hardware.
+ * @common_cfg:    Common configuration parameters.
+ * @cfg:           Command mode configuration.
+ *
+ * Setup DSI CMD engine with a specific configuration. Controller and
+ * command engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+				     struct dsi_host_common_cfg *common_cfg,
+				     struct dsi_cmd_engine_cfg *cfg)
+{
+	u32 reg = 0;
+
+	reg = (cfg->max_cmd_packets_interleave & 0xF) << 20;
+	reg |= (common_cfg->bit_swap_red ? BIT(4) : 0);
+	reg |= (common_cfg->bit_swap_green ? BIT(8) : 0);
+	reg |= (common_cfg->bit_swap_blue ? BIT(12) : 0);
+	reg |= cmd_mode_format_map[common_cfg->dst_format];
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL, reg);
+
+	reg = cfg->wr_mem_start & 0xFF;
+	reg |= (cfg->wr_mem_continue & 0xFF) << 8;
+	reg |= (cfg->insert_dcs_command ? BIT(16) : 0);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL, reg);
+
+	pr_debug("[DSI_%d] Cmd engine setup done\n", ctrl->index);
+}
+
+/**
+ * video_engine_en() - enable DSI video engine
+ * @ctrl:          Pointer to controller host hardware.
+ * @on:            Enable/disabel video engine.
+ */
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+	u32 reg = 0;
+
+	/* Set/Clear VIDEO_MODE_EN bit */
+	reg = DSI_R32(ctrl, DSI_CTRL);
+	if (on)
+		reg |= BIT(1);
+	else
+		reg &= ~BIT(1);
+
+	DSI_W32(ctrl, DSI_CTRL, reg);
+
+	pr_debug("[DSI_%d] Video engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * ctrl_en() - enable DSI controller engine
+ * @ctrl:          Pointer to the controller host hardware.
+ * @on:            turn on/off the DSI controller engine.
+ */
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+	u32 reg = 0;
+
+	/* Set/Clear DSI_EN bit */
+	reg = DSI_R32(ctrl, DSI_CTRL);
+	if (on)
+		reg |= BIT(0);
+	else
+		reg &= ~BIT(0);
+
+	DSI_W32(ctrl, DSI_CTRL, reg);
+
+	pr_debug("[DSI_%d] Controller engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * cmd_engine_en() - enable DSI controller command engine
+ * @ctrl:          Pointer to the controller host hardware.
+ * @on:            Turn on/off the DSI command engine.
+ */
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+	u32 reg = 0;
+
+	/* Set/Clear CMD_MODE_EN bit */
+	reg = DSI_R32(ctrl, DSI_CTRL);
+	if (on)
+		reg |= BIT(2);
+	else
+		reg &= ~BIT(2);
+
+	DSI_W32(ctrl, DSI_CTRL, reg);
+
+	pr_debug("[DSI_%d] command engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * setup_lane_map() - setup mapping between logical and physical lanes
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lane_map:      Structure defining the mapping between DSI logical
+ *                 lanes and physical lanes.
+ */
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+			       struct dsi_lane_mapping *lane_map)
+{
+	u32 reg_value = 0;
+	u32 lane_number = ((lane_map->physical_lane0 * 1000)+
+			   (lane_map->physical_lane1 * 100) +
+			   (lane_map->physical_lane2 * 10) +
+			   (lane_map->physical_lane3));
+
+	if (lane_number == 123)
+		reg_value = 0;
+	else if (lane_number == 3012)
+		reg_value = 1;
+	else if (lane_number == 2301)
+		reg_value = 2;
+	else if (lane_number == 1230)
+		reg_value = 3;
+	else if (lane_number == 321)
+		reg_value = 4;
+	else if (lane_number == 1032)
+		reg_value = 5;
+	else if (lane_number == 2103)
+		reg_value = 6;
+	else if (lane_number == 3210)
+		reg_value = 7;
+
+	DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value);
+
+	pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
+}
+
+/**
+ * kickoff_command() - transmits commands stored in memory
+ * @ctrl:          Pointer to the controller host hardware.
+ * @cmd:           Command information.
+ * @flags:         Modifiers for command transmission.
+ *
+ * The controller hardware is programmed with address and size of the
+ * command buffer. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+				    struct dsi_ctrl_cmd_dma_info *cmd,
+				    u32 flags)
+{
+	u32 reg = 0;
+
+	/*Set BROADCAST_EN and EMBEDDED_MODE */
+	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+	if (cmd->en_broadcast)
+		reg |= BIT(31);
+	else
+		reg &= ~BIT(31);
+
+	if (cmd->is_master)
+		reg |= BIT(30);
+	else
+		reg &= ~BIT(30);
+
+	if (cmd->use_lpm)
+		reg |= BIT(26);
+	else
+		reg &= ~BIT(26);
+
+	reg |= BIT(28);
+	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+	DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
+	DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->length & 0xFFFFFF));
+
+	/* wait for writes to complete before kick off */
+	wmb();
+
+	if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+		DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+}
+
+/**
+ * kickoff_fifo_command() - transmits a command using FIFO in dsi
+ *                          hardware.
+ * @ctrl:          Pointer to the controller host hardware.
+ * @cmd:           Command information.
+ * @flags:         Modifiers for command transmission.
+ *
+ * The controller hardware FIFO is programmed with command header and
+ * payload. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+					 struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+					 u32 flags)
+{
+	u32 reg = 0, i = 0;
+	u32 *ptr = cmd->command;
+	/*
+	 * Set CMD_DMA_TPG_EN, TPG_DMA_FIFO_MODE and
+	 * CMD_DMA_PATTERN_SEL = custom pattern stored in TPG DMA FIFO
+	 */
+	reg = (BIT(1) | BIT(2) | (0x3 << 16));
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+	/*
+	 * Program the FIFO with command buffer. Hardware requires an extra
+	 * DWORD (set to zero) if the length of command buffer is odd DWORDS.
+	 */
+	for (i = 0; i < cmd->size; i += 4) {
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, *ptr);
+		ptr++;
+	}
+
+	if ((cmd->size / 4) & 0x1)
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, 0);
+
+	/*Set BROADCAST_EN and EMBEDDED_MODE */
+	reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+	if (cmd->en_broadcast)
+		reg |= BIT(31);
+	else
+		reg &= ~BIT(31);
+
+	if (cmd->is_master)
+		reg |= BIT(30);
+	else
+		reg &= ~BIT(30);
+
+	if (cmd->use_lpm)
+		reg |= BIT(26);
+	else
+		reg &= ~BIT(26);
+
+	reg |= BIT(28);
+
+	DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+	DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->size & 0xFFFFFFFF));
+	/* Finish writes before command trigger */
+	wmb();
+
+	if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+		DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+
+	pr_debug("[DSI_%d]size=%d, trigger = %d\n",
+		 ctrl->index, cmd->size,
+		 (flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER) ? false : true);
+}
+
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl)
+{
+	/* disable cmd dma tpg */
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, 0x0);
+
+	DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x1);
+	udelay(1);
+	DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x0);
+}
+
+/**
+ * trigger_command_dma() - trigger transmission of command buffer.
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * This trigger can be only used if there was a prior call to
+ * kickoff_command() of kickoff_fifo_command() with
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+ */
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
+{
+	DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+	pr_debug("[DSI_%d] CMD DMA triggered\n", ctrl->index);
+}
+
+/**
+ * get_cmd_read_data() - get data read from the peripheral
+ * @ctrl:           Pointer to the controller host hardware.
+ * @rd_buf:         Buffer where data will be read into.
+ * @total_read_len: Number of bytes to read.
+ *
+ * return: number of bytes read.
+ */
+u32 dsi_ctrl_hw_14_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
+				     u8 *rd_buf,
+				     u32 read_offset,
+				     u32 total_read_len)
+{
+	u32 *lp, *temp, data;
+	int i, j = 0, cnt;
+	u32 read_cnt;
+	u32 rx_byte = 0;
+	u32 repeated_bytes = 0;
+	u8 reg[16];
+	u32 pkt_size = 0;
+	int buf_offset = read_offset;
+
+	lp = (u32 *)rd_buf;
+	temp = (u32 *)reg;
+	cnt = (rx_byte + 3) >> 2;
+
+	if (cnt > 4)
+		cnt = 4;
+
+	if (rx_byte == 4)
+		read_cnt = 4;
+	else
+		read_cnt = pkt_size + 6;
+
+	if (read_cnt > 16) {
+		int bytes_shifted;
+
+		bytes_shifted = read_cnt - 16;
+		repeated_bytes = buf_offset - bytes_shifted;
+	}
+
+	for (i = cnt - 1; i >= 0; i--) {
+		data = DSI_R32(ctrl, DSI_RDBK_DATA0 + i*4);
+		*temp++ = ntohl(data);
+	}
+
+	for (i = repeated_bytes; i < 16; i++)
+		rd_buf[j++] = reg[i];
+
+	pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, j);
+	return j;
+}
+/**
+ * ulps_request() - request ulps entry for specified lanes
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+ *                 to enter ULPS.
+ *
+ * Caller should check if lanes are in ULPS mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+	u32 reg = 0;
+
+	if (lanes & DSI_CLOCK_LANE)
+		reg = BIT(4);
+	if (lanes & DSI_DATA_LANE_0)
+		reg |= BIT(0);
+	if (lanes & DSI_DATA_LANE_1)
+		reg |= BIT(1);
+	if (lanes & DSI_DATA_LANE_2)
+		reg |= BIT(2);
+	if (lanes & DSI_DATA_LANE_3)
+		reg |= BIT(3);
+
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+	pr_debug("[DSI_%d] ULPS requested for lanes 0x%x\n", ctrl->index,
+		 lanes);
+}
+
+/**
+ * ulps_exit() - exit ULPS on specified lanes
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes (enum dsi_data_lanes) which need
+ *                 to exit ULPS.
+ *
+ * Caller should check if lanes are in active mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+	if (lanes & DSI_CLOCK_LANE)
+		reg |= BIT(12);
+	if (lanes & DSI_DATA_LANE_0)
+		reg |= BIT(8);
+	if (lanes & DSI_DATA_LANE_1)
+		reg |= BIT(9);
+	if (lanes & DSI_DATA_LANE_2)
+		reg |= BIT(10);
+	if (lanes & DSI_DATA_LANE_3)
+		reg |= BIT(11);
+
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+	pr_debug("[DSI_%d] ULPS exit request for lanes=0x%x\n",
+		 ctrl->index, lanes);
+}
+
+/**
+ * clear_ulps_request() - clear ulps request once all lanes are active
+ * @ctrl:          Pointer to controller host hardware.
+ * @lanes:         ORed list of lanes (enum dsi_data_lanes).
+ *
+ * ULPS request should be cleared after the lanes have exited ULPS.
+ */
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+	u32 reg = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+	reg &= ~BIT(4); /* clock lane */
+	if (lanes & DSI_DATA_LANE_0)
+		reg &= ~BIT(0);
+	if (lanes & DSI_DATA_LANE_1)
+		reg &= ~BIT(1);
+	if (lanes & DSI_DATA_LANE_2)
+		reg &= ~BIT(2);
+	if (lanes & DSI_DATA_LANE_3)
+		reg &= ~BIT(3);
+
+	DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+	/*
+	 * HPG recommends separate writes for clearing ULPS_REQUEST and
+	 * ULPS_EXIT.
+	 */
+	DSI_W32(ctrl, DSI_LANE_CTRL, 0x0);
+
+	pr_debug("[DSI_%d] ULPS request cleared\n", ctrl->index);
+}
+
+/**
+ * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+ * state. If 0 is returned, all the lanes are active.
+ *
+ * Return: List of lanes in ULPS state.
+ */
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+	u32 lanes = 0;
+
+	reg = DSI_R32(ctrl, DSI_LANE_STATUS);
+	if (!(reg & BIT(8)))
+		lanes |= DSI_DATA_LANE_0;
+	if (!(reg & BIT(9)))
+		lanes |= DSI_DATA_LANE_1;
+	if (!(reg & BIT(10)))
+		lanes |= DSI_DATA_LANE_2;
+	if (!(reg & BIT(11)))
+		lanes |= DSI_DATA_LANE_3;
+	if (!(reg & BIT(12)))
+		lanes |= DSI_CLOCK_LANE;
+
+	pr_debug("[DSI_%d] lanes in ulps = 0x%x\n", ctrl->index, lanes);
+	return lanes;
+}
+
+/**
+ * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes which need to be clamped.
+ * @enable_ulps:   TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+				 u32 lanes,
+				 bool enable_ulps)
+{
+	u32 clamp_reg = 0;
+	u32 bit_shift = 0;
+	u32 reg = 0;
+
+	if (ctrl->index == 1)
+		bit_shift = 16;
+
+	if (lanes & DSI_CLOCK_LANE) {
+		clamp_reg |= BIT(9);
+		if (enable_ulps)
+			clamp_reg |= BIT(8);
+	}
+
+	if (lanes & DSI_DATA_LANE_0) {
+		clamp_reg |= BIT(7);
+		if (enable_ulps)
+			clamp_reg |= BIT(6);
+	}
+
+	if (lanes & DSI_DATA_LANE_1) {
+		clamp_reg |= BIT(5);
+		if (enable_ulps)
+			clamp_reg |= BIT(4);
+	}
+
+	if (lanes & DSI_DATA_LANE_2) {
+		clamp_reg |= BIT(3);
+		if (enable_ulps)
+			clamp_reg |= BIT(2);
+	}
+
+	if (lanes & DSI_DATA_LANE_3) {
+		clamp_reg |= BIT(1);
+		if (enable_ulps)
+			clamp_reg |= BIT(0);
+	}
+
+	clamp_reg |= BIT(15); /* Enable clamp */
+
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg |= (clamp_reg << bit_shift);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg |= BIT(30);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+	pr_debug("[DSI_%d] Clamps enabled for lanes=0x%x\n", ctrl->index,
+		 lanes);
+}
+
+/**
+ * clamp_disable() - disable DSI clamps
+ * @ctrl:          Pointer to the controller host hardware.
+ * @lanes:         ORed list of lanes which need to have clamps released.
+ * @disable_ulps:   TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+				  u32 lanes,
+				  bool disable_ulps)
+{
+	u32 clamp_reg = 0;
+	u32 bit_shift = 0;
+	u32 reg = 0;
+
+	if (ctrl->index == 1)
+		bit_shift = 16;
+
+	if (lanes & DSI_CLOCK_LANE) {
+		clamp_reg |= BIT(9);
+		if (disable_ulps)
+			clamp_reg |= BIT(8);
+	}
+
+	if (lanes & DSI_DATA_LANE_0) {
+		clamp_reg |= BIT(7);
+		if (disable_ulps)
+			clamp_reg |= BIT(6);
+	}
+
+	if (lanes & DSI_DATA_LANE_1) {
+		clamp_reg |= BIT(5);
+		if (disable_ulps)
+			clamp_reg |= BIT(4);
+	}
+
+	if (lanes & DSI_DATA_LANE_2) {
+		clamp_reg |= BIT(3);
+		if (disable_ulps)
+			clamp_reg |= BIT(2);
+	}
+
+	if (lanes & DSI_DATA_LANE_3) {
+		clamp_reg |= BIT(1);
+		if (disable_ulps)
+			clamp_reg |= BIT(0);
+	}
+
+	clamp_reg |= BIT(15); /* Enable clamp */
+	clamp_reg <<= bit_shift;
+
+	/* Disable PHY reset skip */
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg &= ~BIT(30);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+	reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+	reg &= ~(clamp_reg);
+	DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+	pr_debug("[DSI_%d] Disable clamps for lanes=%d\n", ctrl->index, lanes);
+}
+
+/**
+ * get_interrupt_status() - returns the interrupt status
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+ * are active. This list does not include any error interrupts. Caller
+ * should call get_error_status for error interrupts.
+ *
+ * Return: List of active interrupts.
+ */
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl)
+{
+	u32 reg = 0;
+	u32 ints = 0;
+
+	reg = DSI_R32(ctrl, DSI_INT_CTRL);
+
+	if (reg & BIT(0))
+		ints |= DSI_CMD_MODE_DMA_DONE;
+	if (reg & BIT(8))
+		ints |= DSI_CMD_FRAME_DONE;
+	if (reg & BIT(10))
+		ints |= DSI_CMD_STREAM0_FRAME_DONE;
+	if (reg & BIT(12))
+		ints |= DSI_CMD_STREAM1_FRAME_DONE;
+	if (reg & BIT(14))
+		ints |= DSI_CMD_STREAM2_FRAME_DONE;
+	if (reg & BIT(16))
+		ints |= DSI_VIDEO_MODE_FRAME_DONE;
+	if (reg & BIT(20))
+		ints |= DSI_BTA_DONE;
+	if (reg & BIT(28))
+		ints |= DSI_DYN_REFRESH_DONE;
+	if (reg & BIT(30))
+		ints |= DSI_DESKEW_DONE;
+
+	pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n",
+		 ctrl->index, ints, reg);
+	return ints;
+}
+
+/**
+ * clear_interrupt_status() - clears the specified interrupts
+ * @ctrl:          Pointer to the controller host hardware.
+ * @ints:          List of interrupts to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+	u32 reg = 0;
+
+	if (ints & DSI_CMD_MODE_DMA_DONE)
+		reg |= BIT(0);
+	if (ints & DSI_CMD_FRAME_DONE)
+		reg |= BIT(8);
+	if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+		reg |= BIT(10);
+	if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+		reg |= BIT(12);
+	if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+		reg |= BIT(14);
+	if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+		reg |= BIT(16);
+	if (ints & DSI_BTA_DONE)
+		reg |= BIT(20);
+	if (ints & DSI_DYN_REFRESH_DONE)
+		reg |= BIT(28);
+	if (ints & DSI_DESKEW_DONE)
+		reg |= BIT(30);
+
+	DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+	pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n",
+		 ctrl->index, ints, reg);
+}
+
+/**
+ * enable_status_interrupts() - enable the specified interrupts
+ * @ctrl:          Pointer to the controller host hardware.
+ * @ints:          List of interrupts to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set ints to 0.
+ */
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+	u32 reg = 0;
+
+	/* Do not change value of DSI_ERROR_MASK bit */
+	reg |= (DSI_R32(ctrl, DSI_INT_CTRL) & BIT(25));
+	if (ints & DSI_CMD_MODE_DMA_DONE)
+		reg |= BIT(1);
+	if (ints & DSI_CMD_FRAME_DONE)
+		reg |= BIT(9);
+	if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+		reg |= BIT(11);
+	if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+		reg |= BIT(13);
+	if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+		reg |= BIT(15);
+	if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+		reg |= BIT(17);
+	if (ints & DSI_BTA_DONE)
+		reg |= BIT(21);
+	if (ints & DSI_DYN_REFRESH_DONE)
+		reg |= BIT(29);
+	if (ints & DSI_DESKEW_DONE)
+		reg |= BIT(31);
+
+	DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+	pr_debug("[DSI_%d] Enable interrupts 0x%x, INT_CTRL=0x%x\n",
+		 ctrl->index, ints, reg);
+}
+
+/**
+ * get_error_status() - returns the error status
+ * @ctrl:          Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of errors(enum dsi_error_int_type) that are
+ * active. This list does not include any status interrupts. Caller
+ * should call get_interrupt_status for status interrupts.
+ *
+ * Return: List of active error interrupts.
+ */
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl)
+{
+	u32 dln0_phy_err;
+	u32 fifo_status;
+	u32 ack_error;
+	u32 timeout_errors;
+	u32 clk_error;
+	u32 dsi_status;
+	u64 errors = 0;
+
+	dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
+	if (dln0_phy_err & BIT(0))
+		errors |= DSI_DLN0_ESC_ENTRY_ERR;
+	if (dln0_phy_err & BIT(4))
+		errors |= DSI_DLN0_ESC_SYNC_ERR;
+	if (dln0_phy_err & BIT(8))
+		errors |= DSI_DLN0_LP_CONTROL_ERR;
+	if (dln0_phy_err & BIT(12))
+		errors |= DSI_DLN0_LP0_CONTENTION;
+	if (dln0_phy_err & BIT(16))
+		errors |= DSI_DLN0_LP1_CONTENTION;
+
+	fifo_status = DSI_R32(ctrl, DSI_FIFO_STATUS);
+	if (fifo_status & BIT(7))
+		errors |= DSI_CMD_MDP_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(10))
+		errors |= DSI_CMD_DMA_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(18))
+		errors |= DSI_DLN0_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(19))
+		errors |= DSI_DLN0_HS_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(22))
+		errors |= DSI_DLN1_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(23))
+		errors |= DSI_DLN1_HS_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(26))
+		errors |= DSI_DLN2_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(27))
+		errors |= DSI_DLN2_HS_FIFO_UNDERFLOW;
+	if (fifo_status & BIT(30))
+		errors |= DSI_DLN3_HS_FIFO_OVERFLOW;
+	if (fifo_status & BIT(31))
+		errors |= DSI_DLN3_HS_FIFO_UNDERFLOW;
+
+	ack_error = DSI_R32(ctrl, DSI_ACK_ERR_STATUS);
+	if (ack_error & BIT(16))
+		errors |= DSI_RDBK_SINGLE_ECC_ERR;
+	if (ack_error & BIT(17))
+		errors |= DSI_RDBK_MULTI_ECC_ERR;
+	if (ack_error & BIT(20))
+		errors |= DSI_RDBK_CRC_ERR;
+	if (ack_error & BIT(23))
+		errors |= DSI_RDBK_INCOMPLETE_PKT;
+	if (ack_error & BIT(24))
+		errors |= DSI_PERIPH_ERROR_PKT;
+
+	timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS);
+	if (timeout_errors & BIT(0))
+		errors |= DSI_HS_TX_TIMEOUT;
+	if (timeout_errors & BIT(4))
+		errors |= DSI_LP_RX_TIMEOUT;
+	if (timeout_errors & BIT(8))
+		errors |= DSI_BTA_TIMEOUT;
+
+	clk_error = DSI_R32(ctrl, DSI_CLK_STATUS);
+	if (clk_error & BIT(16))
+		errors |= DSI_PLL_UNLOCK;
+
+	dsi_status = DSI_R32(ctrl, DSI_STATUS);
+	if (dsi_status & BIT(31))
+		errors |= DSI_INTERLEAVE_OP_CONTENTION;
+
+	pr_debug("[DSI_%d] Error status = 0x%llx, phy=0x%x, fifo=0x%x",
+		 ctrl->index, errors, dln0_phy_err, fifo_status);
+	pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+		 ctrl->index, ack_error, timeout_errors, clk_error, dsi_status);
+	return errors;
+}
+
+/**
+ * clear_error_status() - clears the specified errors
+ * @ctrl:          Pointer to the controller host hardware.
+ * @errors:          List of errors to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
+{
+	u32 dln0_phy_err = 0;
+	u32 fifo_status = 0;
+	u32 ack_error = 0;
+	u32 timeout_error = 0;
+	u32 clk_error = 0;
+	u32 dsi_status = 0;
+	u32 int_ctrl = 0;
+
+	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+		ack_error |= BIT(16);
+	if (errors & DSI_RDBK_MULTI_ECC_ERR)
+		ack_error |= BIT(17);
+	if (errors & DSI_RDBK_CRC_ERR)
+		ack_error |= BIT(20);
+	if (errors & DSI_RDBK_INCOMPLETE_PKT)
+		ack_error |= BIT(23);
+	if (errors & DSI_PERIPH_ERROR_PKT)
+		ack_error |= BIT(24);
+
+	if (errors & DSI_LP_RX_TIMEOUT)
+		timeout_error |= BIT(4);
+	if (errors & DSI_HS_TX_TIMEOUT)
+		timeout_error |= BIT(0);
+	if (errors & DSI_BTA_TIMEOUT)
+		timeout_error |= BIT(8);
+
+	if (errors & DSI_PLL_UNLOCK)
+		clk_error |= BIT(16);
+
+	if (errors & DSI_DLN0_LP0_CONTENTION)
+		dln0_phy_err |= BIT(12);
+	if (errors & DSI_DLN0_LP1_CONTENTION)
+		dln0_phy_err |= BIT(16);
+	if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+		dln0_phy_err |= BIT(0);
+	if (errors & DSI_DLN0_ESC_SYNC_ERR)
+		dln0_phy_err |= BIT(4);
+	if (errors & DSI_DLN0_LP_CONTROL_ERR)
+		dln0_phy_err |= BIT(8);
+
+	if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+		fifo_status |= BIT(10);
+	if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+		fifo_status |= BIT(7);
+	if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(18);
+	if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(22);
+	if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(26);
+	if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+		fifo_status |= BIT(30);
+	if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(19);
+	if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(23);
+	if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(27);
+	if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+		fifo_status |= BIT(31);
+
+	if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+		dsi_status |= BIT(31);
+
+	DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
+	DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
+	DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
+	DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error);
+	DSI_W32(ctrl, DSI_CLK_STATUS, clk_error);
+	DSI_W32(ctrl, DSI_STATUS, dsi_status);
+
+	int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+	int_ctrl |= BIT(24);
+	DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+	pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x",
+		 ctrl->index, errors, dln0_phy_err, fifo_status);
+	pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+		 ctrl->index, ack_error, timeout_error, clk_error, dsi_status);
+}
+
+/**
+ * enable_error_interrupts() - enable the specified interrupts
+ * @ctrl:          Pointer to the controller host hardware.
+ * @errors:        List of errors to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set errors to 0.
+ */
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+					    u64 errors)
+{
+	u32 int_ctrl = 0;
+	u32 int_mask0 = 0x7FFF3BFF;
+
+	int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+	if (errors)
+		int_ctrl |= BIT(25);
+	else
+		int_ctrl &= ~BIT(25);
+
+	if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+		int_mask0 &= ~BIT(0);
+	if (errors & DSI_RDBK_MULTI_ECC_ERR)
+		int_mask0 &= ~BIT(1);
+	if (errors & DSI_RDBK_CRC_ERR)
+		int_mask0 &= ~BIT(2);
+	if (errors & DSI_RDBK_INCOMPLETE_PKT)
+		int_mask0 &= ~BIT(3);
+	if (errors & DSI_PERIPH_ERROR_PKT)
+		int_mask0 &= ~BIT(4);
+
+	if (errors & DSI_LP_RX_TIMEOUT)
+		int_mask0 &= ~BIT(5);
+	if (errors & DSI_HS_TX_TIMEOUT)
+		int_mask0 &= ~BIT(6);
+	if (errors & DSI_BTA_TIMEOUT)
+		int_mask0 &= ~BIT(7);
+
+	if (errors & DSI_PLL_UNLOCK)
+		int_mask0 &= ~BIT(28);
+
+	if (errors & DSI_DLN0_LP0_CONTENTION)
+		int_mask0 &= ~BIT(24);
+	if (errors & DSI_DLN0_LP1_CONTENTION)
+		int_mask0 &= ~BIT(25);
+	if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+		int_mask0 &= ~BIT(21);
+	if (errors & DSI_DLN0_ESC_SYNC_ERR)
+		int_mask0 &= ~BIT(22);
+	if (errors & DSI_DLN0_LP_CONTROL_ERR)
+		int_mask0 &= ~BIT(23);
+
+	if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(9);
+	if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(11);
+	if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(16);
+	if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(17);
+	if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(18);
+	if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+		int_mask0 &= ~BIT(19);
+	if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(26);
+	if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(27);
+	if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(29);
+	if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+		int_mask0 &= ~BIT(30);
+
+	if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+		int_mask0 &= ~BIT(8);
+
+	DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+	DSI_W32(ctrl, DSI_ERR_INT_MASK0, int_mask0);
+
+	pr_debug("[DSI_%d] enable errors = 0x%llx, int_mask0=0x%x\n",
+		 ctrl->index, errors, int_mask0);
+}
+
+/**
+ * video_test_pattern_setup() - setup test pattern engine for video mode
+ * @ctrl:          Pointer to the controller host hardware.
+ * @type:          Type of test pattern.
+ * @init_val:      Initial value to use for generating test pattern.
+ */
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+					     enum dsi_test_pattern type,
+					     u32 init_val)
+{
+	u32 reg = 0;
+
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, init_val);
+
+	switch (type) {
+	case DSI_TEST_PATTERN_FIXED:
+		reg |= (0x2 << 4);
+		break;
+	case DSI_TEST_PATTERN_INC:
+		reg |= (0x1 << 4);
+		break;
+	case DSI_TEST_PATTERN_POLY:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_POLY, 0xF0F0F);
+		break;
+	default:
+		break;
+	}
+
+	DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL, 0x100);
+	DSI_W32(ctrl, DSI_TPG_VIDEO_CONFIG, 0x5);
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+	pr_debug("[DSI_%d] Video test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+ * @ctrl:          Pointer to the controller host hardware.
+ * @type:          Type of test pattern.
+ * @init_val:      Initial value to use for generating test pattern.
+ * @stream_id:     Stream Id on which packets are generated.
+ */
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+					   enum dsi_test_pattern type,
+					   u32 init_val,
+					   u32 stream_id)
+{
+	u32 reg = 0;
+	u32 init_offset;
+	u32 poly_offset;
+	u32 pattern_sel_shift;
+
+	switch (stream_id) {
+	case 0:
+		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0;
+		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY;
+		pattern_sel_shift = 8;
+		break;
+	case 1:
+		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1;
+		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY;
+		pattern_sel_shift = 12;
+		break;
+	case 2:
+		init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2;
+		poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY;
+		pattern_sel_shift = 20;
+		break;
+	default:
+		return;
+	}
+
+	DSI_W32(ctrl, init_offset, init_val);
+
+	switch (type) {
+	case DSI_TEST_PATTERN_FIXED:
+		reg |= (0x2 << pattern_sel_shift);
+		break;
+	case DSI_TEST_PATTERN_INC:
+		reg |= (0x1 << pattern_sel_shift);
+		break;
+	case DSI_TEST_PATTERN_POLY:
+		DSI_W32(ctrl, poly_offset, 0xF0F0F);
+		break;
+	default:
+		break;
+	}
+
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+	pr_debug("[DSI_%d] Cmd test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * test_pattern_enable() - enable test pattern engine
+ * @ctrl:          Pointer to the controller host hardware.
+ * @enable:        Enable/Disable test pattern engine.
+ */
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl,
+					bool enable)
+{
+	u32 reg = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_CTRL);
+
+	if (enable)
+		reg |= BIT(0);
+	else
+		reg &= ~BIT(0);
+
+	DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+	pr_debug("[DSI_%d] Test pattern enable=%d\n", ctrl->index, enable);
+}
+
+/**
+ * trigger_cmd_test_pattern() - trigger a command mode frame update with
+ *                              test pattern
+ * @ctrl:          Pointer to the controller host hardware.
+ * @stream_id:     Stream on which frame update is sent.
+ */
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+					     u32 stream_id)
+{
+	switch (stream_id) {
+	case 0:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 0x1);
+		break;
+	case 1:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER, 0x1);
+		break;
+	case 2:
+		DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER, 0x1);
+		break;
+	default:
+		break;
+	}
+
+	pr_debug("[DSI_%d] Cmd Test pattern trigger\n", ctrl->index);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
new file mode 100644
index 0000000..028ad46
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_REG_H_
+#define _DSI_CTRL_REG_H_
+
+#define DSI_HW_VERSION                             (0x0000)
+#define DSI_CTRL                                   (0x0004)
+#define DSI_STATUS                                 (0x0008)
+#define DSI_FIFO_STATUS                            (0x000C)
+#define DSI_VIDEO_MODE_CTRL                        (0x0010)
+#define DSI_VIDEO_MODE_SYNC_DATATYPE               (0x0014)
+#define DSI_VIDEO_MODE_PIXEL_DATATYPE              (0x0018)
+#define DSI_VIDEO_MODE_BLANKING_DATATYPE           (0x001C)
+#define DSI_VIDEO_MODE_DATA_CTRL                   (0x0020)
+#define DSI_VIDEO_MODE_ACTIVE_H                    (0x0024)
+#define DSI_VIDEO_MODE_ACTIVE_V                    (0x0028)
+#define DSI_VIDEO_MODE_TOTAL                       (0x002C)
+#define DSI_VIDEO_MODE_HSYNC                       (0x0030)
+#define DSI_VIDEO_MODE_VSYNC                       (0x0034)
+#define DSI_VIDEO_MODE_VSYNC_VPOS                  (0x0038)
+#define DSI_COMMAND_MODE_DMA_CTRL                  (0x003C)
+#define DSI_COMMAND_MODE_MDP_CTRL                  (0x0040)
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL          (0x0044)
+#define DSI_DMA_CMD_OFFSET                         (0x0048)
+#define DSI_DMA_CMD_LENGTH                         (0x004C)
+#define DSI_DMA_FIFO_CTRL                          (0x0050)
+#define DSI_DMA_NULL_PACKET_DATA                   (0x0054)
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL          (0x0058)
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL         (0x005C)
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL          (0x0060)
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL         (0x0064)
+#define DSI_ACK_ERR_STATUS                         (0x0068)
+#define DSI_RDBK_DATA0                             (0x006C)
+#define DSI_RDBK_DATA1                             (0x0070)
+#define DSI_RDBK_DATA2                             (0x0074)
+#define DSI_RDBK_DATA3                             (0x0078)
+#define DSI_RDBK_DATATYPE0                         (0x007C)
+#define DSI_RDBK_DATATYPE1                         (0x0080)
+#define DSI_TRIG_CTRL                              (0x0084)
+#define DSI_EXT_MUX                                (0x0088)
+#define DSI_EXT_MUX_TE_PULSE_DETECT_CTRL           (0x008C)
+#define DSI_CMD_MODE_DMA_SW_TRIGGER                (0x0090)
+#define DSI_CMD_MODE_MDP_SW_TRIGGER                (0x0094)
+#define DSI_CMD_MODE_BTA_SW_TRIGGER                (0x0098)
+#define DSI_RESET_SW_TRIGGER                       (0x009C)
+#define DSI_MISR_CMD_CTRL                          (0x00A0)
+#define DSI_MISR_VIDEO_CTRL                        (0x00A4)
+#define DSI_LANE_STATUS                            (0x00A8)
+#define DSI_LANE_CTRL                              (0x00AC)
+#define DSI_LANE_SWAP_CTRL                         (0x00B0)
+#define DSI_DLN0_PHY_ERR                           (0x00B4)
+#define DSI_LP_TIMER_CTRL                          (0x00B8)
+#define DSI_HS_TIMER_CTRL                          (0x00BC)
+#define DSI_TIMEOUT_STATUS                         (0x00C0)
+#define DSI_CLKOUT_TIMING_CTRL                     (0x00C4)
+#define DSI_EOT_PACKET                             (0x00C8)
+#define DSI_EOT_PACKET_CTRL                        (0x00CC)
+#define DSI_GENERIC_ESC_TX_TRIGGER                 (0x00D0)
+#define DSI_CAM_BIST_CTRL                          (0x00D4)
+#define DSI_CAM_BIST_FRAME_SIZE                    (0x00D8)
+#define DSI_CAM_BIST_BLOCK_SIZE                    (0x00DC)
+#define DSI_CAM_BIST_FRAME_CONFIG                  (0x00E0)
+#define DSI_CAM_BIST_LSFR_CTRL                     (0x00E4)
+#define DSI_CAM_BIST_LSFR_INIT                     (0x00E8)
+#define DSI_CAM_BIST_START                         (0x00EC)
+#define DSI_CAM_BIST_STATUS                        (0x00F0)
+#define DSI_ERR_INT_MASK0                          (0x010C)
+#define DSI_INT_CTRL                               (0x0110)
+#define DSI_IOBIST_CTRL                            (0x0114)
+#define DSI_SOFT_RESET                             (0x0118)
+#define DSI_CLK_CTRL                               (0x011C)
+#define DSI_CLK_STATUS                             (0x0120)
+#define DSI_PHY_SW_RESET                           (0x012C)
+#define DSI_AXI2AHB_CTRL                           (0x0130)
+#define DSI_MISR_CMD_MDP0_32BIT                    (0x0134)
+#define DSI_MISR_CMD_MDP1_32BIT                    (0x0138)
+#define DSI_MISR_CMD_DMA_32BIT                     (0x013C)
+#define DSI_MISR_VIDEO_32BIT                       (0x0140)
+#define DSI_LANE_MISR_CTRL                         (0x0144)
+#define DSI_LANE0_MISR                             (0x0148)
+#define DSI_LANE1_MISR                             (0x014C)
+#define DSI_LANE2_MISR                             (0x0150)
+#define DSI_LANE3_MISR                             (0x0154)
+#define DSI_TEST_PATTERN_GEN_CTRL                  (0x015C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_POLY            (0x0160)
+#define DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL        (0x0164)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY  (0x0168)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0     (0x016C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY  (0x0170)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1     (0x0174)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_POLY          (0x0178)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL      (0x017C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_ENABLE          (0x0180)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER   (0x0184)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER   (0x0188)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2     (0x018C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY  (0x0190)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY  (0x0190)
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL             (0x0194)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER   (0x0198)
+#define DSI_TPG_MAIN_CONTROL                       (0x019C)
+#define DSI_TPG_MAIN_CONTROL2                      (0x01A0)
+#define DSI_TPG_VIDEO_CONFIG                       (0x01A4)
+#define DSI_TPG_COMPONENT_LIMITS                   (0x01A8)
+#define DSI_TPG_RECTANGLE                          (0x01AC)
+#define DSI_TPG_BLACK_WHITE_PATTERN_FRAMES         (0x01B0)
+#define DSI_TPG_RGB_MAPPING                        (0x01B4)
+#define DSI_COMMAND_MODE_MDP_CTRL2                 (0x01B8)
+#define DSI_COMMAND_MODE_MDP_STREAM2_CTRL          (0x01BC)
+#define DSI_COMMAND_MODE_MDP_STREAM2_TOTAL         (0x01C0)
+#define DSI_MISR_CMD_MDP2_8BIT                     (0x01C4)
+#define DSI_MISR_CMD_MDP2_32BIT                    (0x01C8)
+#define DSI_VBIF_CTRL                              (0x01CC)
+#define DSI_AES_CTRL                               (0x01D0)
+#define DSI_RDBK_DATA_CTRL                         (0x01D4)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2     (0x01D8)
+#define DSI_TPG_DMA_FIFO_STATUS                    (0x01DC)
+#define DSI_TPG_DMA_FIFO_WRITE_TRIGGER             (0x01E0)
+#define DSI_DSI_TIMING_FLUSH                       (0x01E4)
+#define DSI_DSI_TIMING_DB_MODE                     (0x01E8)
+#define DSI_TPG_DMA_FIFO_RESET                     (0x01EC)
+#define DSI_SCRATCH_REGISTER_0                     (0x01F0)
+#define DSI_VERSION                                (0x01F4)
+#define DSI_SCRATCH_REGISTER_1                     (0x01F8)
+#define DSI_SCRATCH_REGISTER_2                     (0x01FC)
+#define DSI_DYNAMIC_REFRESH_CTRL                   (0x0200)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY             (0x0204)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2            (0x0208)
+#define DSI_DYNAMIC_REFRESH_PLL_DELAY              (0x020C)
+#define DSI_DYNAMIC_REFRESH_STATUS                 (0x0210)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0              (0x0214)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1              (0x0218)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2              (0x021C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3              (0x0220)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4              (0x0224)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5              (0x0228)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6              (0x022C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7              (0x0230)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8              (0x0234)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9              (0x0238)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10             (0x023C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11             (0x0240)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12             (0x0244)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13             (0x0248)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14             (0x024C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15             (0x0250)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16             (0x0254)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17             (0x0258)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18             (0x025C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19             (0x0260)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20             (0x0264)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21             (0x0268)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22             (0x026C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23             (0x0270)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24             (0x0274)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25             (0x0278)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26             (0x027C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27             (0x0280)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28             (0x0284)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29             (0x0288)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30             (0x028C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31             (0x0290)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR         (0x0294)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2        (0x0298)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL            (0x02A0)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL2           (0x02A4)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL          (0x02A8)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2         (0x02AC)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL3         (0x02B0)
+#define DSI_COMMAND_MODE_NULL_INSERTION_CTRL       (0x02B4)
+#define DSI_READ_BACK_DISABLE_STATUS               (0x02B8)
+#define DSI_DESKEW_CTRL                            (0x02BC)
+#define DSI_DESKEW_DELAY_CTRL                      (0x02C0)
+#define DSI_DESKEW_SW_TRIGGER                      (0x02C4)
+#define DSI_SECURE_DISPLAY_STATUS                  (0x02CC)
+#define DSI_SECURE_DISPLAY_BLOCK_COMMAND_COLOR     (0x02D0)
+#define DSI_SECURE_DISPLAY_BLOCK_VIDEO_COLOR       (0x02D4)
+
+
+#endif /* _DSI_CTRL_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
new file mode 100644
index 0000000..ded7ed3
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -0,0 +1,357 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DEFS_H_
+#define _DSI_DEFS_H_
+
+#include <linux/types.h>
+
+#define DSI_H_TOTAL(t) (((t)->h_active) + ((t)->h_back_porch) + \
+			((t)->h_sync_width) + ((t)->h_front_porch))
+
+#define DSI_V_TOTAL(t) (((t)->v_active) + ((t)->v_back_porch) + \
+			((t)->v_sync_width) + ((t)->v_front_porch))
+
+/**
+ * enum dsi_pixel_format - DSI pixel formats
+ * @DSI_PIXEL_FORMAT_RGB565:
+ * @DSI_PIXEL_FORMAT_RGB666:
+ * @DSI_PIXEL_FORMAT_RGB666_LOOSE:
+ * @DSI_PIXEL_FORMAT_RGB888:
+ * @DSI_PIXEL_FORMAT_RGB111:
+ * @DSI_PIXEL_FORMAT_RGB332:
+ * @DSI_PIXEL_FORMAT_RGB444:
+ * @DSI_PIXEL_FORMAT_MAX:
+ */
+enum dsi_pixel_format {
+	DSI_PIXEL_FORMAT_RGB565 = 0,
+	DSI_PIXEL_FORMAT_RGB666,
+	DSI_PIXEL_FORMAT_RGB666_LOOSE,
+	DSI_PIXEL_FORMAT_RGB888,
+	DSI_PIXEL_FORMAT_RGB111,
+	DSI_PIXEL_FORMAT_RGB332,
+	DSI_PIXEL_FORMAT_RGB444,
+	DSI_PIXEL_FORMAT_MAX
+};
+
+/**
+ * enum dsi_op_mode - dsi operation mode
+ * @DSI_OP_VIDEO_MODE: DSI video mode operation
+ * @DSI_OP_CMD_MODE:   DSI Command mode operation
+ * @DSI_OP_MODE_MAX:
+ */
+enum dsi_op_mode {
+	DSI_OP_VIDEO_MODE = 0,
+	DSI_OP_CMD_MODE,
+	DSI_OP_MODE_MAX
+};
+
+/**
+ * enum dsi_data_lanes - dsi physical lanes
+ * @DSI_DATA_LANE_0: Physical lane 0
+ * @DSI_DATA_LANE_1: Physical lane 1
+ * @DSI_DATA_LANE_2: Physical lane 2
+ * @DSI_DATA_LANE_3: Physical lane 3
+ * @DSI_CLOCK_LANE:  Physical clock lane
+ */
+enum dsi_data_lanes {
+	DSI_DATA_LANE_0 = BIT(0),
+	DSI_DATA_LANE_1 = BIT(1),
+	DSI_DATA_LANE_2 = BIT(2),
+	DSI_DATA_LANE_3 = BIT(3),
+	DSI_CLOCK_LANE  = BIT(4)
+};
+
+/**
+ * enum dsi_logical_lane - dsi logical lanes
+ * @DSI_LOGICAL_LANE_0:     Logical lane 0
+ * @DSI_LOGICAL_LANE_1:     Logical lane 1
+ * @DSI_LOGICAL_LANE_2:     Logical lane 2
+ * @DSI_LOGICAL_LANE_3:     Logical lane 3
+ * @DSI_LOGICAL_CLOCK_LANE: Clock lane
+ * @DSI_LANE_MAX:           Maximum lanes supported
+ */
+enum dsi_logical_lane {
+	DSI_LOGICAL_LANE_0 = 0,
+	DSI_LOGICAL_LANE_1,
+	DSI_LOGICAL_LANE_2,
+	DSI_LOGICAL_LANE_3,
+	DSI_LOGICAL_CLOCK_LANE,
+	DSI_LANE_MAX
+};
+
+/**
+ * enum dsi_trigger_type - dsi trigger type
+ * @DSI_TRIGGER_NONE:     No trigger.
+ * @DSI_TRIGGER_TE:       TE trigger.
+ * @DSI_TRIGGER_SEOF:     Start or End of frame.
+ * @DSI_TRIGGER_SW:       Software trigger.
+ * @DSI_TRIGGER_SW_SEOF:  Software trigger and start/end of frame.
+ * @DSI_TRIGGER_SW_TE:    Software and TE triggers.
+ * @DSI_TRIGGER_MAX:      Max trigger values.
+ */
+enum dsi_trigger_type {
+	DSI_TRIGGER_NONE = 0,
+	DSI_TRIGGER_TE,
+	DSI_TRIGGER_SEOF,
+	DSI_TRIGGER_SW,
+	DSI_TRIGGER_SW_SEOF,
+	DSI_TRIGGER_SW_TE,
+	DSI_TRIGGER_MAX
+};
+
+/**
+ * enum dsi_color_swap_mode - color swap mode
+ * @DSI_COLOR_SWAP_RGB:
+ * @DSI_COLOR_SWAP_RBG:
+ * @DSI_COLOR_SWAP_BGR:
+ * @DSI_COLOR_SWAP_BRG:
+ * @DSI_COLOR_SWAP_GRB:
+ * @DSI_COLOR_SWAP_GBR:
+ */
+enum dsi_color_swap_mode {
+	DSI_COLOR_SWAP_RGB = 0,
+	DSI_COLOR_SWAP_RBG,
+	DSI_COLOR_SWAP_BGR,
+	DSI_COLOR_SWAP_BRG,
+	DSI_COLOR_SWAP_GRB,
+	DSI_COLOR_SWAP_GBR
+};
+
+/**
+ * enum dsi_dfps_type - Dynamic FPS support type
+ * @DSI_DFPS_NONE:           Dynamic FPS is not supported.
+ * @DSI_DFPS_SUSPEND_RESUME:
+ * @DSI_DFPS_IMMEDIATE_CLK:
+ * @DSI_DFPS_IMMEDIATE_HFP:
+ * @DSI_DFPS_IMMEDIATE_VFP:
+ * @DSI_DPFS_MAX:
+ */
+enum dsi_dfps_type {
+	DSI_DFPS_NONE = 0,
+	DSI_DFPS_SUSPEND_RESUME,
+	DSI_DFPS_IMMEDIATE_CLK,
+	DSI_DFPS_IMMEDIATE_HFP,
+	DSI_DFPS_IMMEDIATE_VFP,
+	DSI_DFPS_MAX
+};
+
+/**
+ * enum dsi_phy_type - DSI phy types
+ * @DSI_PHY_TYPE_DPHY:
+ * @DSI_PHY_TYPE_CPHY:
+ */
+enum dsi_phy_type {
+	DSI_PHY_TYPE_DPHY,
+	DSI_PHY_TYPE_CPHY
+};
+
+/**
+ * enum dsi_te_mode - dsi te source
+ * @DSI_TE_ON_DATA_LINK:    TE read from DSI link
+ * @DSI_TE_ON_EXT_PIN:      TE signal on an external GPIO
+ */
+enum dsi_te_mode {
+	DSI_TE_ON_DATA_LINK = 0,
+	DSI_TE_ON_EXT_PIN,
+};
+
+/**
+ * enum dsi_video_traffic_mode - video mode pixel transmission type
+ * @DSI_VIDEO_TRAFFIC_SYNC_PULSES:       Non-burst mode with sync pulses.
+ * @DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS: Non-burst mode with sync start events.
+ * @DSI_VIDEO_TRAFFIC_BURST_MODE:        Burst mode using sync start events.
+ */
+enum dsi_video_traffic_mode {
+	DSI_VIDEO_TRAFFIC_SYNC_PULSES = 0,
+	DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS,
+	DSI_VIDEO_TRAFFIC_BURST_MODE,
+};
+
+/**
+ * struct dsi_mode_info - video mode information dsi frame
+ * @h_active:         Active width of one frame in pixels.
+ * @h_back_porch:     Horizontal back porch in pixels.
+ * @h_sync_width:     HSYNC width in pixels.
+ * @h_front_porch:    Horizontal fron porch in pixels.
+ * @h_skew:
+ * @h_sync_polarity:  Polarity of HSYNC (false is active low).
+ * @v_active:         Active height of one frame in lines.
+ * @v_back_porch:     Vertical back porch in lines.
+ * @v_sync_width:     VSYNC width in lines.
+ * @v_front_porch:    Vertical front porch in lines.
+ * @v_sync_polarity:  Polarity of VSYNC (false is active low).
+ * @refresh_rate:     Refresh rate in Hz.
+ */
+struct dsi_mode_info {
+	u32 h_active;
+	u32 h_back_porch;
+	u32 h_sync_width;
+	u32 h_front_porch;
+	u32 h_skew;
+	bool h_sync_polarity;
+
+	u32 v_active;
+	u32 v_back_porch;
+	u32 v_sync_width;
+	u32 v_front_porch;
+	bool v_sync_polarity;
+
+	u32 refresh_rate;
+};
+
+/**
+ * struct dsi_lane_mapping - Mapping between DSI logical and physical lanes
+ * @physical_lane0:   Logical lane to which physical lane 0 is mapped.
+ * @physical_lane1:   Logical lane to which physical lane 1 is mapped.
+ * @physical_lane2:   Logical lane to which physical lane 2 is mapped.
+ * @physical_lane3:   Logical lane to which physical lane 3 is mapped.
+ */
+struct dsi_lane_mapping {
+	enum dsi_logical_lane physical_lane0;
+	enum dsi_logical_lane physical_lane1;
+	enum dsi_logical_lane physical_lane2;
+	enum dsi_logical_lane physical_lane3;
+};
+
+/**
+ * struct dsi_host_common_cfg - Host configuration common to video and cmd mode
+ * @dst_format:          Destination pixel format.
+ * @data_lanes:          Physical data lanes to be enabled.
+ * @en_crc_check:        Enable CRC checks.
+ * @en_ecc_check:        Enable ECC checks.
+ * @te_mode:             Source for TE signalling.
+ * @mdp_cmd_trigger:     MDP frame update trigger for command mode.
+ * @dma_cmd_trigger:     Command DMA trigger.
+ * @cmd_trigger_stream:  Command mode stream to trigger.
+ * @bit_swap_read:       Is red color bit swapped.
+ * @bit_swap_green:      Is green color bit swapped.
+ * @bit_swap_blue:       Is blue color bit swapped.
+ * @t_clk_post:          Number of byte clock cycles that the transmitter shall
+ *                       continue sending after last data lane has transitioned
+ *                       to LP mode.
+ * @t_clk_pre:           Number of byte clock cycles that the high spped clock
+ *                       shall be driven prior to data lane transitions from LP
+ *                       to HS mode.
+ * @ignore_rx_eot:       Ignore Rx EOT packets if set to true.
+ * @append_tx_eot:       Append EOT packets for forward transmissions if set to
+ *                       true.
+ */
+struct dsi_host_common_cfg {
+	enum dsi_pixel_format dst_format;
+	enum dsi_data_lanes data_lanes;
+	bool en_crc_check;
+	bool en_ecc_check;
+	enum dsi_te_mode te_mode;
+	enum dsi_trigger_type mdp_cmd_trigger;
+	enum dsi_trigger_type dma_cmd_trigger;
+	u32 cmd_trigger_stream;
+	enum dsi_color_swap_mode swap_mode;
+	bool bit_swap_red;
+	bool bit_swap_green;
+	bool bit_swap_blue;
+	u32 t_clk_post;
+	u32 t_clk_pre;
+	bool ignore_rx_eot;
+	bool append_tx_eot;
+};
+
+/**
+ * struct dsi_video_engine_cfg - DSI video engine configuration
+ * @host_cfg:                  Pointer to host common configuration.
+ * @last_line_interleave_en:   Allow command mode op interleaved on last line of
+ *                             video stream.
+ * @pulse_mode_hsa_he:         Send HSA and HE following VS/VE packet if set to
+ *                             true.
+ * @hfp_lp11_en:               Enter low power stop mode (LP-11) during HFP.
+ * @hbp_lp11_en:               Enter low power stop mode (LP-11) during HBP.
+ * @hsa_lp11_en:               Enter low power stop mode (LP-11) during HSA.
+ * @eof_bllp_lp11_en:          Enter low power stop mode (LP-11) during BLLP of
+ *                             last line of a frame.
+ * @bllp_lp11_en:              Enter low power stop mode (LP-11) during BLLP.
+ * @traffic_mode:              Traffic mode for video stream.
+ * @vc_id:                     Virtual channel identifier.
+ */
+struct dsi_video_engine_cfg {
+	bool last_line_interleave_en;
+	bool pulse_mode_hsa_he;
+	bool hfp_lp11_en;
+	bool hbp_lp11_en;
+	bool hsa_lp11_en;
+	bool eof_bllp_lp11_en;
+	bool bllp_lp11_en;
+	enum dsi_video_traffic_mode traffic_mode;
+	u32 vc_id;
+};
+
+/**
+ * struct dsi_cmd_engine_cfg - DSI command engine configuration
+ * @host_cfg:                  Pointer to host common configuration.
+ * @host_cfg:                      Common host configuration
+ * @max_cmd_packets_interleave     Maximum number of command mode RGB packets to
+ *                                 send with in one horizontal blanking period
+ *                                 of the video mode frame.
+ * @wr_mem_start:                  DCS command for write_memory_start.
+ * @wr_mem_continue:               DCS command for write_memory_continue.
+ * @insert_dcs_command:            Insert DCS command as first byte of payload
+ *                                 of the pixel data.
+ */
+struct dsi_cmd_engine_cfg {
+	u32 max_cmd_packets_interleave;
+	u32 wr_mem_start;
+	u32 wr_mem_continue;
+	bool insert_dcs_command;
+};
+
+/**
+ * struct dsi_host_config - DSI host configuration parameters.
+ * @panel_mode:            Operation mode for panel (video or cmd mode).
+ * @common_config:         Host configuration common to both Video and Cmd mode.
+ * @video_engine:          Video engine configuration if panel is in video mode.
+ * @cmd_engine:            Cmd engine configuration if panel is in cmd mode.
+ * @esc_clk_rate_khz:      Esc clock frequency in Hz.
+ * @bit_clk_rate_hz:       Bit clock frequency in Hz.
+ * @video_timing:          Video timing information of a frame.
+ * @lane_map:              Mapping between logical and physical lanes.
+ * @phy_type:              PHY type to be used.
+ */
+struct dsi_host_config {
+	enum dsi_op_mode panel_mode;
+	struct dsi_host_common_cfg common_config;
+	union {
+		struct dsi_video_engine_cfg video_engine;
+		struct dsi_cmd_engine_cfg cmd_engine;
+	} u;
+	u64 esc_clk_rate_hz;
+	u64 bit_clk_rate_hz;
+	struct dsi_mode_info video_timing;
+	struct dsi_lane_mapping lane_map;
+};
+
+/**
+ * struct dsi_display_mode - specifies mode for dsi display
+ * @timing:         Timing parameters for the panel.
+ * @pixel_clk_khz:  Pixel clock in Khz.
+ * @panel_mode:     Panel operation mode.
+ * @flags:          Additional flags.
+ */
+struct dsi_display_mode {
+	struct dsi_mode_info timing;
+	u32 pixel_clk_khz;
+	enum dsi_op_mode panel_mode;
+
+	u32 flags;
+};
+
+#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
new file mode 100644
index 0000000..01535c0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_HW_H_
+#define _DSI_HW_H_
+#include <linux/io.h>
+
+#define DSI_R32(dsi_hw, off) readl_relaxed((dsi_hw)->base + (off))
+#define DSI_W32(dsi_hw, off, val) \
+	do {\
+		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+			(dsi_hw)->index, #off, val); \
+		writel_relaxed((val), (dsi_hw)->base + (off)); \
+	} while (0)
+
+#define DSI_MMSS_MISC_R32(dsi_hw, off) \
+	readl_relaxed((dsi_hw)->mmss_misc_base + (off))
+#define DSI_MMSS_MISC_W32(dsi_hw, off, val) \
+	do {\
+		pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+			(dsi_hw)->index, #off, val); \
+		writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
+	} while (0)
+
+#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
+#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
+
+#endif /* _DSI_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
new file mode 100644
index 0000000..5edfd5e
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DSI_PHY_HW_H_
+#define _DSI_PHY_HW_H_
+
+#include "dsi_defs.h"
+
+#define DSI_MAX_SETTINGS 8
+
+/**
+ * enum dsi_phy_version - DSI PHY version enumeration
+ * @DSI_PHY_VERSION_UNKNOWN:    Unknown version.
+ * @DSI_PHY_VERSION_1_0:        28nm-HPM.
+ * @DSI_PHY_VERSION_2_0:        28nm-LPM.
+ * @DSI_PHY_VERSION_3_0:        20nm.
+ * @DSI_PHY_VERSION_4_0:        14nm.
+ * @DSI_PHY_VERSION_MAX:
+ */
+enum dsi_phy_version {
+	DSI_PHY_VERSION_UNKNOWN,
+	DSI_PHY_VERSION_1_0, /* 28nm-HPM */
+	DSI_PHY_VERSION_2_0, /* 28nm-LPM */
+	DSI_PHY_VERSION_3_0, /* 20nm */
+	DSI_PHY_VERSION_4_0, /* 14nm */
+	DSI_PHY_VERSION_MAX
+};
+
+/**
+ * enum dsi_phy_hw_features - features supported by DSI PHY hardware
+ * @DSI_PHY_DPHY:        Supports DPHY
+ * @DSI_PHY_CPHY:        Supports CPHY
+ */
+enum dsi_phy_hw_features {
+	DSI_PHY_DPHY,
+	DSI_PHY_CPHY,
+	DSI_PHY_MAX_FEATURES
+};
+
+/**
+ * enum dsi_phy_pll_source - pll clock source for PHY.
+ * @DSI_PLL_SOURCE_STANDALONE:    Clock is sourced from native PLL and is not
+ *				  shared by other PHYs.
+ * @DSI_PLL_SOURCE_NATIVE:        Clock is sourced from native PLL and is
+ *				  shared by other PHYs.
+ * @DSI_PLL_SOURCE_NON_NATIVE:    Clock is sourced from other PHYs.
+ * @DSI_PLL_SOURCE_MAX:
+ */
+enum dsi_phy_pll_source {
+	DSI_PLL_SOURCE_STANDALONE = 0,
+	DSI_PLL_SOURCE_NATIVE,
+	DSI_PLL_SOURCE_NON_NATIVE,
+	DSI_PLL_SOURCE_MAX
+};
+
+/**
+ * struct dsi_phy_per_lane_cfgs - Holds register values for PHY parameters
+ * @lane:           A set of maximum 8 values for each lane.
+ * @count_per_lane: Number of values per each lane.
+ */
+struct dsi_phy_per_lane_cfgs {
+	u8 lane[DSI_LANE_MAX][DSI_MAX_SETTINGS];
+	u32 count_per_lane;
+};
+
+/**
+ * struct dsi_phy_cfg - DSI PHY configuration
+ * @lanecfg:          Lane configuration settings.
+ * @strength:         Strength settings for lanes.
+ * @timing:           Timing parameters for lanes.
+ * @regulators:       Regulator settings for lanes.
+ * @pll_source:       PLL source.
+ */
+struct dsi_phy_cfg {
+	struct dsi_phy_per_lane_cfgs lanecfg;
+	struct dsi_phy_per_lane_cfgs strength;
+	struct dsi_phy_per_lane_cfgs timing;
+	struct dsi_phy_per_lane_cfgs regulators;
+	enum dsi_phy_pll_source pll_source;
+};
+
+struct dsi_phy_hw;
+
+/**
+ * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
+ * @regulator_enable:          Enable PHY regulators.
+ * @regulator_disable:         Disable PHY regulators.
+ * @enable:                    Enable PHY.
+ * @disable:                   Disable PHY.
+ * @calculate_timing_params:   Calculate PHY timing params from mode information
+ */
+struct dsi_phy_hw_ops {
+	/**
+	 * regulator_enable() - enable regulators for DSI PHY
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @reg_cfg:  Regulator configuration for all DSI lanes.
+	 */
+	void (*regulator_enable)(struct dsi_phy_hw *phy,
+				 struct dsi_phy_per_lane_cfgs *reg_cfg);
+
+	/**
+	 * regulator_disable() - disable regulators
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 */
+	void (*regulator_disable)(struct dsi_phy_hw *phy);
+
+	/**
+	 * enable() - Enable PHY hardware
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @cfg:      Per lane configurations for timing, strength and lane
+	 *	      configurations.
+	 */
+	void (*enable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+
+	/**
+	 * disable() - Disable PHY hardware
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 */
+	void (*disable)(struct dsi_phy_hw *phy);
+
+	/**
+	 * calculate_timing_params() - calculates timing parameters.
+	 * @phy:      Pointer to DSI PHY hardware object.
+	 * @mode:     Mode information for which timing has to be calculated.
+	 * @config:   DSI host configuration for this mode.
+	 * @timing:   Timing parameters for each lane which will be returned.
+	 */
+	int (*calculate_timing_params)(struct dsi_phy_hw *phy,
+				       struct dsi_mode_info *mode,
+				       struct dsi_host_common_cfg *config,
+				       struct dsi_phy_per_lane_cfgs *timing);
+};
+
+/**
+ * struct dsi_phy_hw - DSI phy hardware object specific to an instance
+ * @base:                  VA for the DSI PHY base address.
+ * @length:                Length of the DSI PHY register base map.
+ * @index:                 Instance ID of the controller.
+ * @version:               DSI PHY version.
+ * @feature_map:           Features supported by DSI PHY.
+ * @ops:                   Function pointer to PHY operations.
+ */
+struct dsi_phy_hw {
+	void __iomem *base;
+	u32 length;
+	u32 index;
+
+	enum dsi_phy_version version;
+
+	DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES);
+	struct dsi_phy_hw_ops ops;
+};
+
+#endif /* _DSI_PHY_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
new file mode 100644
index 0000000..512352d
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dsi-phy-hw:" fmt
+#include <linux/math64.h>
+#include <linux/delay.h>
+#include "dsi_hw.h"
+#include "dsi_phy_hw.h"
+
+#define DSIPHY_CMN_REVISION_ID0                   0x0000
+#define DSIPHY_CMN_REVISION_ID1                   0x0004
+#define DSIPHY_CMN_REVISION_ID2                   0x0008
+#define DSIPHY_CMN_REVISION_ID3                   0x000C
+#define DSIPHY_CMN_CLK_CFG0                       0x0010
+#define DSIPHY_CMN_CLK_CFG1                       0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL                 0x0018
+#define DSIPHY_CMN_CTRL_0                         0x001C
+#define DSIPHY_CMN_CTRL_1                         0x0020
+#define DSIPHY_CMN_CAL_HW_TRIGGER                 0x0024
+#define DSIPHY_CMN_CAL_SW_CFG0                    0x0028
+#define DSIPHY_CMN_CAL_SW_CFG1                    0x002C
+#define DSIPHY_CMN_CAL_SW_CFG2                    0x0030
+#define DSIPHY_CMN_CAL_HW_CFG0                    0x0034
+#define DSIPHY_CMN_CAL_HW_CFG1                    0x0038
+#define DSIPHY_CMN_CAL_HW_CFG2                    0x003C
+#define DSIPHY_CMN_CAL_HW_CFG3                    0x0040
+#define DSIPHY_CMN_CAL_HW_CFG4                    0x0044
+#define DSIPHY_CMN_PLL_CNTRL                      0x0048
+#define DSIPHY_CMN_LDO_CNTRL                      0x004C
+
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS0          0x0064
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS1          0x0068
+
+/* n = 0..3 for data lanes and n = 4 for clock lane */
+#define DSIPHY_DLNX_CFG0(n)                     (0x100 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG1(n)                     (0x104 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG2(n)                     (0x108 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG3(n)                     (0x10C + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_DATAPATH(n)            (0x110 + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_STR(n)                 (0x114 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_4(n)            (0x118 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_5(n)            (0x11C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_6(n)            (0x120 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_7(n)            (0x124 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_8(n)            (0x128 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_9(n)            (0x12C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_10(n)           (0x130 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_11(n)           (0x134 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_0(n)          (0x138 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_1(n)          (0x13C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_POLY(n)                (0x140 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED0(n)               (0x144 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED1(n)               (0x148 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_HEAD(n)                (0x14C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SOT(n)                 (0x150 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL0(n)               (0x154 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL1(n)               (0x158 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL2(n)               (0x15C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL3(n)               (0x160 + ((n) * 0x80))
+#define DSIPHY_DLNX_VREG_CNTRL(n)               (0x164 + ((n) * 0x80))
+#define DSIPHY_DLNX_HSTX_STR_STATUS(n)          (0x168 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS0(n)             (0x16C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS1(n)             (0x170 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS2(n)             (0x174 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS3(n)             (0x178 + ((n) * 0x80))
+#define DSIPHY_DLNX_MISR_STATUS(n)              (0x17C + ((n) * 0x80))
+
+#define DSIPHY_PLL_CLKBUFLR_EN                  0x041C
+#define DSIPHY_PLL_PLL_BANDGAP                  0x0508
+
+/**
+ * struct timing_entry - Calculated values for each timing parameter.
+ * @mipi_min:
+ * @mipi_max:
+ * @rec_min:
+ * @rec_max:
+ * @rec:
+ * @reg_value:       Value to be programmed in register.
+ */
+struct timing_entry {
+	s32 mipi_min;
+	s32 mipi_max;
+	s32 rec_min;
+	s32 rec_max;
+	s32 rec;
+	u8 reg_value;
+};
+
+/**
+ * struct phy_timing_desc - Timing parameters for DSI PHY.
+ */
+struct phy_timing_desc {
+	struct timing_entry clk_prepare;
+	struct timing_entry clk_zero;
+	struct timing_entry clk_trail;
+	struct timing_entry hs_prepare;
+	struct timing_entry hs_zero;
+	struct timing_entry hs_trail;
+	struct timing_entry hs_rqst;
+	struct timing_entry hs_rqst_clk;
+	struct timing_entry hs_exit;
+	struct timing_entry ta_go;
+	struct timing_entry ta_sure;
+	struct timing_entry ta_set;
+	struct timing_entry clk_post;
+	struct timing_entry clk_pre;
+};
+
+/**
+ * struct phy_clk_params - Clock parameters for PHY timing calculations.
+ */
+struct phy_clk_params {
+	u32 bitclk_mbps;
+	u32 escclk_numer;
+	u32 escclk_denom;
+	u32 tlpx_numer_ns;
+	u32 treot_ns;
+};
+
+/**
+ * regulator_enable() - enable regulators for DSI PHY
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @reg_cfg:  Regulator configuration for all DSI lanes.
+ */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+				      struct dsi_phy_per_lane_cfgs *reg_cfg)
+{
+	int i;
+
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
+		DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]);
+
+	/* make sure all values are written to hardware */
+	wmb();
+
+	pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
+}
+
+/**
+ * regulator_disable() - disable regulators
+ * @phy:      Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy)
+{
+	pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
+}
+
+/**
+ * enable() - Enable PHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @cfg:      Per lane configurations for timing, strength and lane
+ *	      configurations.
+ */
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
+			    struct dsi_phy_cfg *cfg)
+{
+	int i;
+	struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
+	u32 data;
+
+	DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
+
+	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+
+		DSI_W32(phy, DSIPHY_DLNX_CFG0(i), cfg->lanecfg.lane[i][0]);
+		DSI_W32(phy, DSIPHY_DLNX_CFG1(i), cfg->lanecfg.lane[i][1]);
+		DSI_W32(phy, DSIPHY_DLNX_CFG2(i), cfg->lanecfg.lane[i][2]);
+		DSI_W32(phy, DSIPHY_DLNX_CFG3(i), cfg->lanecfg.lane[i][3]);
+
+		DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88);
+
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_4(i), timing->lane[i][0]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_5(i), timing->lane[i][1]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_6(i), timing->lane[i][2]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_7(i), timing->lane[i][3]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_8(i), timing->lane[i][4]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_9(i), timing->lane[i][5]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_10(i), timing->lane[i][6]);
+		DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_11(i), timing->lane[i][7]);
+
+		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
+			cfg->strength.lane[i][0]);
+		DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
+			cfg->strength.lane[i][1]);
+	}
+
+	/* make sure all values are written to hardware before enabling phy */
+	wmb();
+
+	DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80);
+	udelay(100);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00);
+
+	data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
+
+	switch (cfg->pll_source) {
+	case DSI_PLL_SOURCE_STANDALONE:
+		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01);
+		data &= ~BIT(2);
+		break;
+	case DSI_PLL_SOURCE_NATIVE:
+		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03);
+		data &= ~BIT(2);
+		break;
+	case DSI_PLL_SOURCE_NON_NATIVE:
+		DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00);
+		data |= BIT(2);
+		break;
+	default:
+		break;
+	}
+
+	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data);
+
+	/* Enable bias current for pll1 during split display case */
+	if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE)
+		DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3);
+
+	pr_debug("[DSI_%d]Phy enabled ", phy->index);
+}
+
+/**
+ * disable() - Disable PHY hardware
+ * @phy:      Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy)
+{
+	DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0);
+	DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0);
+	DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0);
+	pr_debug("[DSI_%d]Phy disabled ", phy->index);
+}
+
+static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
+	16, 18, 18, 24, 3, 8, 12 };
+
+/**
+ * calc_clk_prepare - calculates prepare timing params for clk lane.
+ */
+static int calc_clk_prepare(struct phy_clk_params *clk_params,
+			    struct phy_timing_desc *desc,
+			    s32 *actual_frac,
+			    s64 *actual_intermediate)
+{
+	u32 const min_prepare_frac = 50;
+	u64 const multiplier = BIT(20);
+
+	struct timing_entry *t = &desc->clk_prepare;
+	int rc = 0;
+	u64 dividend, temp, temp_multiple;
+	s32 frac = 0;
+	s64 intermediate;
+	s64 clk_prep_actual;
+
+	dividend = ((t->rec_max - t->rec_min) * min_prepare_frac * multiplier);
+	temp  = roundup(div_s64(dividend, 100), multiplier);
+	temp += (t->rec_min * multiplier);
+	t->rec = div_s64(temp, multiplier);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor clk_prepare\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	/* calculate theoretical value */
+	temp_multiple = 8 * t->reg_value * clk_params->tlpx_numer_ns
+			 * multiplier;
+	intermediate = div_s64(temp_multiple, clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, clk_params->bitclk_mbps, &frac);
+	clk_prep_actual = div_s64((intermediate + frac), multiplier);
+
+	pr_debug("CLK_PREPARE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max);
+	pr_debug(" reg_value=%d, actual=%lld\n", t->reg_value, clk_prep_actual);
+
+	*actual_frac = frac;
+	*actual_intermediate = intermediate;
+
+	return rc;
+}
+
+/**
+ * calc_clk_zero - calculates zero timing params for clk lane.
+ */
+static int calc_clk_zero(struct phy_clk_params *clk_params,
+			 struct phy_timing_desc *desc,
+			 s32 actual_frac,
+			 s64 actual_intermediate)
+{
+	u32 const clk_zero_min_frac = 2;
+	u64 const multiplier = BIT(20);
+
+	int rc = 0;
+	struct timing_entry *t = &desc->clk_zero;
+	s64 mipi_min, rec_temp1, rec_temp2, rec_temp3, rec_min;
+
+	mipi_min = ((300 * multiplier) - (actual_intermediate + actual_frac));
+	t->mipi_min = div_s64(mipi_min, multiplier);
+
+	rec_temp1 = div_s64((mipi_min * clk_params->bitclk_mbps),
+			    clk_params->tlpx_numer_ns);
+	rec_temp2 = (rec_temp1 - (11 * multiplier));
+	rec_temp3 = roundup(div_s64(rec_temp2, 8), multiplier);
+	rec_min = (div_s64(rec_temp3, multiplier) - 3);
+	t->rec_min = rec_min;
+	t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+	t->rec = DIV_ROUND_UP(
+			(((t->rec_max - t->rec_min) * clk_zero_min_frac) +
+			 (t->rec_min * 100)),
+			100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor clk_zero\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("CLK_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+	return rc;
+}
+
+/**
+ * calc_clk_trail - calculates prepare trail params for clk lane.
+ */
+static int calc_clk_trail(struct phy_clk_params *clk_params,
+			  struct phy_timing_desc *desc,
+			  s64 *teot_clk_lane)
+{
+	u64 const multiplier = BIT(20);
+	u32 const phy_timing_frac = 30;
+
+	int rc = 0;
+	struct timing_entry *t = &desc->clk_trail;
+	u64 temp_multiple;
+	s32 frac;
+	s64 mipi_max_tr, rec_temp1, rec_temp2, rec_temp3, mipi_max;
+	s64 teot_clk_lane1;
+
+	temp_multiple = div_s64(
+			(12 * multiplier * clk_params->tlpx_numer_ns),
+			clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+
+	mipi_max_tr = ((105 * multiplier) +
+		       (temp_multiple + frac));
+	teot_clk_lane1 = div_s64(mipi_max_tr, multiplier);
+
+	mipi_max = (mipi_max_tr - (clk_params->treot_ns * multiplier));
+	t->mipi_max = div_s64(mipi_max, multiplier);
+
+	temp_multiple = div_s64(
+			(t->mipi_min * multiplier * clk_params->bitclk_mbps),
+			clk_params->tlpx_numer_ns);
+
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	rec_temp1 = temp_multiple + frac + (3 * multiplier);
+	rec_temp2 = div_s64(rec_temp1, 8);
+	rec_temp3 = roundup(rec_temp2, multiplier);
+
+	t->rec_min = div_s64(rec_temp3, multiplier);
+
+	/* recommended max */
+	rec_temp1 = div_s64((mipi_max * clk_params->bitclk_mbps),
+			    clk_params->tlpx_numer_ns);
+	rec_temp2 = rec_temp1 + (3 * multiplier);
+	rec_temp3 = rec_temp2 / 8;
+	t->rec_max = div_s64(rec_temp3, multiplier);
+
+	t->rec = DIV_ROUND_UP(
+		(((t->rec_max - t->rec_min) * phy_timing_frac) +
+		 (t->rec_min * 100)),
+		 100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor clk_zero\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	*teot_clk_lane = teot_clk_lane1;
+	pr_debug("CLK_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+	return rc;
+
+}
+
+/**
+ * calc_hs_prepare - calculates prepare timing params for data lanes in HS.
+ */
+static int calc_hs_prepare(struct phy_clk_params *clk_params,
+			   struct phy_timing_desc *desc,
+			   u64 *temp_mul)
+{
+	u64 const multiplier = BIT(20);
+	u32 const min_prepare_frac = 50;
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_prepare;
+	u64 temp_multiple, dividend, temp;
+	s32 frac;
+	s64 rec_temp1, rec_temp2, mipi_max, mipi_min;
+	u32 low_clk_multiplier = 0;
+
+	if (clk_params->bitclk_mbps <= 120)
+		low_clk_multiplier = 2;
+	/* mipi min */
+	temp_multiple = div_s64((4 * multiplier * clk_params->tlpx_numer_ns),
+				clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	mipi_min = (40 * multiplier) + (temp_multiple + frac);
+	t->mipi_min = div_s64(mipi_min, multiplier);
+
+	/* mipi_max */
+	temp_multiple = div_s64(
+			(6 * multiplier * clk_params->tlpx_numer_ns),
+			clk_params->bitclk_mbps);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	mipi_max = (85 * multiplier) + temp_multiple;
+	t->mipi_max = div_s64(mipi_max, multiplier);
+
+	/* recommended min */
+	temp_multiple = div_s64((mipi_min * clk_params->bitclk_mbps),
+				clk_params->tlpx_numer_ns);
+	temp_multiple -= (low_clk_multiplier * multiplier);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	rec_temp1 = roundup(((temp_multiple + frac) / 8), multiplier);
+	t->rec_min = div_s64(rec_temp1, multiplier);
+
+	/* recommended max */
+	temp_multiple = div_s64((mipi_max * clk_params->bitclk_mbps),
+				clk_params->tlpx_numer_ns);
+	temp_multiple -= (low_clk_multiplier * multiplier);
+	div_s64_rem(temp_multiple, multiplier, &frac);
+	rec_temp2 = rounddown((temp_multiple / 8), multiplier);
+	t->rec_max = div_s64(rec_temp2, multiplier);
+
+	/* register value */
+	dividend = ((rec_temp2 - rec_temp1) * min_prepare_frac);
+	temp = roundup(div_u64(dividend, 100), multiplier);
+	t->rec = div_s64((temp + rec_temp1), multiplier);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_prepare\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	temp_multiple = div_s64(
+			(8 * (temp + rec_temp1) * clk_params->tlpx_numer_ns),
+			clk_params->bitclk_mbps);
+
+	*temp_mul = temp_multiple;
+	pr_debug("HS_PREP:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+	return rc;
+}
+
+/**
+ * calc_hs_zero - calculates zero timing params for data lanes in HS.
+ */
+static int calc_hs_zero(struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc,
+			u64 temp_multiple)
+{
+	u32 const hs_zero_min_frac = 10;
+	u64 const multiplier = BIT(20);
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_zero;
+	s64 rec_temp1, rec_temp2, rec_temp3, mipi_min;
+	s64 rec_min;
+
+	mipi_min = div_s64((10 * clk_params->tlpx_numer_ns * multiplier),
+			   clk_params->bitclk_mbps);
+	rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
+	t->mipi_min = div_s64(rec_temp1, multiplier);
+
+	/* recommended min */
+	rec_temp1 = div_s64((rec_temp1 * clk_params->bitclk_mbps),
+			    clk_params->tlpx_numer_ns);
+	rec_temp2 = rec_temp1 - (11 * multiplier);
+	rec_temp3 = roundup((rec_temp2 / 8), multiplier);
+	rec_min = rec_temp3 - (3 * multiplier);
+	t->rec_min =  div_s64(rec_min, multiplier);
+	t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+	t->rec = DIV_ROUND_UP(
+			(((t->rec_max - t->rec_min) * hs_zero_min_frac) +
+			 (t->rec_min * 100)),
+			100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_zero\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_trail - calculates trail timing params for data lanes in HS.
+ */
+static int calc_hs_trail(struct phy_clk_params *clk_params,
+			 struct phy_timing_desc *desc,
+			 u64 teot_clk_lane)
+{
+	u32 const phy_timing_frac = 30;
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_trail;
+	s64 rec_temp1;
+
+	t->mipi_min = 60 +
+			mult_frac(clk_params->tlpx_numer_ns, 4,
+				  clk_params->bitclk_mbps);
+
+	t->mipi_max = teot_clk_lane - clk_params->treot_ns;
+
+	t->rec_min = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) +
+		 (3 * clk_params->tlpx_numer_ns)),
+		(8 * clk_params->tlpx_numer_ns));
+
+	rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) +
+		     (3 * clk_params->tlpx_numer_ns));
+	t->rec_max = (rec_temp1 / (8 * clk_params->tlpx_numer_ns));
+	rec_temp1 = DIV_ROUND_UP(
+			((t->rec_max - t->rec_min) * phy_timing_frac),
+			100);
+	t->rec = rec_temp1 + t->rec_min;
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_trail\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_rqst - calculates rqst timing params for data lanes in HS.
+ */
+static int calc_hs_rqst(struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_rqst;
+
+	t->rec = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) -
+		 (8 * clk_params->tlpx_numer_ns)),
+		(8 * clk_params->tlpx_numer_ns));
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_rqst, %d\n", t->rec);
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_RQST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_exit - calculates exit timing params for data lanes in HS.
+ */
+static int calc_hs_exit(struct phy_clk_params *clk_params,
+			struct phy_timing_desc *desc)
+{
+	u32 const hs_exit_min_frac = 10;
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_exit;
+
+	t->rec_min = (DIV_ROUND_UP(
+			(t->mipi_min * clk_params->bitclk_mbps),
+			(8 * clk_params->tlpx_numer_ns)) - 1);
+
+	t->rec = DIV_ROUND_UP(
+		(((t->rec_max - t->rec_min) * hs_exit_min_frac) +
+		 (t->rec_min * 100)),
+		100);
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_exit\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_EXIT:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * calc_hs_rqst_clk - calculates rqst timing params for clock lane..
+ */
+static int calc_hs_rqst_clk(struct phy_clk_params *clk_params,
+			    struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	struct timing_entry *t = &desc->hs_rqst_clk;
+
+	t->rec = DIV_ROUND_UP(
+		((t->mipi_min * clk_params->bitclk_mbps) -
+		 (8 * clk_params->tlpx_numer_ns)),
+		(8 * clk_params->tlpx_numer_ns));
+
+	if (t->rec & 0xffffff00) {
+		pr_err("Incorrect rec valuefor hs_rqst_clk\n");
+		rc = -EINVAL;
+	} else {
+		t->reg_value = t->rec;
+	}
+
+	pr_debug("HS_RQST_CLK:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+		 t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+		 t->reg_value);
+
+	return rc;
+}
+
+/**
+ * dsi_phy_calc_timing_params - calculates timing paramets for a given bit clock
+ */
+static int dsi_phy_calc_timing_params(struct phy_clk_params *clk_params,
+				      struct phy_timing_desc *desc)
+{
+	int rc = 0;
+	s32 actual_frac = 0;
+	s64 actual_intermediate = 0;
+	u64 temp_multiple;
+	s64 teot_clk_lane;
+
+	rc = calc_clk_prepare(clk_params, desc, &actual_frac,
+			      &actual_intermediate);
+	if (rc) {
+		pr_err("clk_prepare calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_clk_zero(clk_params, desc, actual_frac, actual_intermediate);
+	if (rc) {
+		pr_err("clk_zero calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_clk_trail(clk_params, desc, &teot_clk_lane);
+	if (rc) {
+		pr_err("clk_trail calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_prepare(clk_params, desc, &temp_multiple);
+	if (rc) {
+		pr_err("hs_prepare calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_zero(clk_params, desc, temp_multiple);
+	if (rc) {
+		pr_err("hs_zero calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_trail(clk_params, desc, teot_clk_lane);
+	if (rc) {
+		pr_err("hs_trail calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_rqst(clk_params, desc);
+	if (rc) {
+		pr_err("hs_rqst calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_exit(clk_params, desc);
+	if (rc) {
+		pr_err("hs_exit calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+
+	rc = calc_hs_rqst_clk(clk_params, desc);
+	if (rc) {
+		pr_err("hs_rqst_clk calculations failed, rc=%d\n", rc);
+		goto error;
+	}
+error:
+	return rc;
+}
+
+/**
+ * calculate_timing_params() - calculates timing parameters.
+ * @phy:      Pointer to DSI PHY hardware object.
+ * @mode:     Mode information for which timing has to be calculated.
+ * @config:   DSI host configuration for this mode.
+ * @timing:   Timing parameters for each lane which will be returned.
+ */
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+					    struct dsi_mode_info *mode,
+					    struct dsi_host_common_cfg *host,
+					   struct dsi_phy_per_lane_cfgs *timing)
+{
+	/* constants */
+	u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
+	u32 const esc_clk_mmss_cc_prediv = 10;
+	u32 const tlpx_numer = 1000;
+	u32 const tr_eot = 20;
+	u32 const clk_prepare_spec_min = 38;
+	u32 const clk_prepare_spec_max = 95;
+	u32 const clk_trail_spec_min = 60;
+	u32 const hs_exit_spec_min = 100;
+	u32 const hs_exit_reco_max = 255;
+	u32 const hs_rqst_spec_min = 50;
+
+	/* local vars */
+	int rc = 0;
+	int i;
+	u32 h_total, v_total;
+	u64 inter_num;
+	u32 num_of_lanes = 0;
+	u32 bpp;
+	u64 x, y;
+	struct phy_timing_desc desc;
+	struct phy_clk_params clk_params = {0};
+
+	memset(&desc, 0x0, sizeof(desc));
+	h_total = DSI_H_TOTAL(mode);
+	v_total = DSI_V_TOTAL(mode);
+
+	bpp = bits_per_pixel[host->dst_format];
+
+	inter_num = bpp * mode->refresh_rate;
+
+	if (host->data_lanes & DSI_DATA_LANE_0)
+		num_of_lanes++;
+	if (host->data_lanes & DSI_DATA_LANE_1)
+		num_of_lanes++;
+	if (host->data_lanes & DSI_DATA_LANE_2)
+		num_of_lanes++;
+	if (host->data_lanes & DSI_DATA_LANE_3)
+		num_of_lanes++;
+
+
+	x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+	y = rounddown(x, 1);
+
+	clk_params.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+	clk_params.escclk_numer = esc_clk_mhz;
+	clk_params.escclk_denom = esc_clk_mmss_cc_prediv;
+	clk_params.tlpx_numer_ns = tlpx_numer;
+	clk_params.treot_ns = tr_eot;
+
+
+	/* Setup default parameters */
+	desc.clk_prepare.mipi_min = clk_prepare_spec_min;
+	desc.clk_prepare.mipi_max = clk_prepare_spec_max;
+	desc.clk_trail.mipi_min = clk_trail_spec_min;
+	desc.hs_exit.mipi_min = hs_exit_spec_min;
+	desc.hs_exit.rec_max = hs_exit_reco_max;
+
+	desc.clk_prepare.rec_min = DIV_ROUND_UP(
+			(desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
+			(8 * clk_params.tlpx_numer_ns)
+			);
+
+	desc.clk_prepare.rec_max = rounddown(
+		mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
+			  1, (8 * clk_params.tlpx_numer_ns)),
+		1);
+
+	desc.hs_rqst.mipi_min = hs_rqst_spec_min;
+	desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
+
+	pr_debug("BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
+	       clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
+	       clk_params.treot_ns);
+	rc = dsi_phy_calc_timing_params(&clk_params, &desc);
+	if (rc) {
+		pr_err("Timing calc failed, rc=%d\n", rc);
+		goto error;
+	}
+
+
+	for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+		timing->lane[i][0] = desc.hs_exit.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][1] = desc.clk_zero.reg_value;
+		else
+			timing->lane[i][1] = desc.hs_zero.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][2] = desc.clk_prepare.reg_value;
+		else
+			timing->lane[i][2] = desc.hs_prepare.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][3] = desc.clk_trail.reg_value;
+		else
+			timing->lane[i][3] = desc.hs_trail.reg_value;
+
+		if (i == DSI_LOGICAL_CLOCK_LANE)
+			timing->lane[i][4] = desc.hs_rqst_clk.reg_value;
+		else
+			timing->lane[i][4] = desc.hs_rqst.reg_value;
+
+		timing->lane[i][5] = 0x3;
+		timing->lane[i][6] = 0x4;
+		timing->lane[i][7] = 0xA0;
+		pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0],
+						    timing->lane[i][1],
+						    timing->lane[i][2],
+						    timing->lane[i][3],
+						    timing->lane[i][4]);
+	}
+	timing->count_per_lane = 8;
+
+error:
+	return rc;
+}
diff --git a/drivers/hwspinlock/Kconfig b/drivers/hwspinlock/Kconfig
index 73a4016..d8782ea 100644
--- a/drivers/hwspinlock/Kconfig
+++ b/drivers/hwspinlock/Kconfig
@@ -53,4 +53,15 @@
 
 	  If unsure, say N.
 
+config REMOTE_SPINLOCK_MSM
+	bool "MSM Remote Spinlock Functionality"
+	depends on ARCH_QCOM
+	select HWSPINLOCK
+	help
+	  Say y here to support the MSM Remote Spinlock functionality, which
+	  provides a synchronisation mechanism for the various processor on the
+	  SoC.
+
+	  If unsure, say N.
+
 endmenu
diff --git a/drivers/hwspinlock/Makefile b/drivers/hwspinlock/Makefile
index 6b59cb5a..365a335 100644
--- a/drivers/hwspinlock/Makefile
+++ b/drivers/hwspinlock/Makefile
@@ -7,3 +7,4 @@
 obj-$(CONFIG_HWSPINLOCK_QCOM)		+= qcom_hwspinlock.o
 obj-$(CONFIG_HWSPINLOCK_SIRF)		+= sirf_hwspinlock.o
 obj-$(CONFIG_HSEM_U8500)		+= u8500_hsem.o
+obj-$(CONFIG_REMOTE_SPINLOCK_MSM)	+= msm_remote_spinlock.o
diff --git a/drivers/hwspinlock/msm_remote_spinlock.c b/drivers/hwspinlock/msm_remote_spinlock.c
new file mode 100644
index 0000000..834405b
--- /dev/null
+++ b/drivers/hwspinlock/msm_remote_spinlock.c
@@ -0,0 +1,602 @@
+/* Copyright (c) 2008-2009, 2011-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/msm_remote_spinlock.h>
+#include <linux/slab.h>
+
+#include <soc/qcom/smem.h>
+
+/**
+ * The local processor (APPS) is PID 0, but because 0 is reserved for an empty
+ * lock, the value PID + 1 is used as the APSS token when writing to the lock.
+ */
+#define SPINLOCK_TOKEN_APPS 1
+
+static int is_hw_lock_type;
+static DEFINE_MUTEX(ops_init_lock);
+
+struct spinlock_ops {
+	void (*lock)(raw_remote_spinlock_t *lock);
+	void (*unlock)(raw_remote_spinlock_t *lock);
+	int (*trylock)(raw_remote_spinlock_t *lock);
+	int (*release)(raw_remote_spinlock_t *lock, uint32_t pid);
+	int (*owner)(raw_remote_spinlock_t *lock);
+	void (*lock_rlock_id)(raw_remote_spinlock_t *lock, uint32_t tid);
+	void (*unlock_rlock)(raw_remote_spinlock_t *lock);
+	int (*get_hw_spinlocks_element)(raw_remote_spinlock_t *lock);
+};
+
+static struct spinlock_ops current_ops;
+
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock);
+
+/* ldrex implementation ----------------------------------------------------- */
+static char *ldrex_compatible_string = "qcom,ipc-spinlock-ldrex";
+
+#ifdef CONFIG_ARM
+static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"1:     ldrex   %0, [%1]\n"
+"       teq     %0, #0\n"
+"       strexeq %0, %2, [%1]\n"
+"       teqeq   %0, #0\n"
+"       bne     1b"
+	: "=&r" (tmp)
+	: "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
+	: "cc");
+
+	/*
+	 * Ensure the ordering of read/write operations to ensure the
+	 * proper ownership of the lock during the lock/unlock operations
+	 */
+	smp_mb();
+}
+
+static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
+{
+	unsigned long tmp;
+
+	__asm__ __volatile__(
+"       ldrex   %0, [%1]\n"
+"       teq     %0, #0\n"
+"       strexeq %0, %2, [%1]\n"
+	: "=&r" (tmp)
+	: "r" (&lock->lock), "r" (SPINLOCK_TOKEN_APPS)
+	: "cc");
+
+	if (tmp == 0) {
+		/*
+		 * Ensure the ordering of read/write operations to ensure the
+		 * proper ownership of the lock during the lock/unlock
+		 * operations
+		 */
+		smp_mb();
+		return 1;
+	}
+	return 0;
+}
+
+static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+	int lock_owner;
+
+	/*
+	 * Ensure the ordering of read/write operations to ensure the
+	 * proper ownership of the lock during the lock/unlock operations
+	 */
+	smp_mb();
+	lock_owner = readl_relaxed(&lock->lock);
+	if (lock_owner != SPINLOCK_TOKEN_APPS) {
+		pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
+				__func__, lock_owner);
+	}
+
+	__asm__ __volatile__(
+"       str     %1, [%0]\n"
+	:
+	: "r" (&lock->lock), "r" (0)
+	: "cc");
+}
+#else
+static void __raw_remote_ex_spin_lock(raw_remote_spinlock_t *lock)
+{
+}
+
+static int __raw_remote_ex_spin_trylock(raw_remote_spinlock_t *lock)
+{
+	return 0;
+}
+
+static void __raw_remote_ex_spin_unlock(raw_remote_spinlock_t *lock)
+{
+}
+#endif /* CONFIG_ARM */
+/* end ldrex implementation ------------------------------------------------- */
+
+/* sfpb implementation ------------------------------------------------------ */
+static uint32_t lock_count;
+static phys_addr_t reg_base;
+static uint32_t reg_size;
+static uint32_t lock_offset; /* offset into the hardware block before lock 0 */
+static uint32_t lock_size;
+
+static void *hw_mutex_reg_base;
+static DEFINE_MUTEX(hw_map_init_lock);
+static int *hw_spinlocks;
+
+static char *sfpb_compatible_string = "qcom,ipc-spinlock-sfpb";
+
+static int init_hw_mutex(struct device_node *node)
+{
+	struct resource r;
+	int rc;
+
+	rc = of_address_to_resource(node, 0, &r);
+	if (rc)
+		BUG();
+
+	rc = of_property_read_u32(node, "qcom,num-locks", &lock_count);
+	if (rc)
+		BUG();
+
+	reg_base = r.start;
+	reg_size = (uint32_t)(resource_size(&r));
+	lock_offset = 0;
+	lock_size = reg_size / lock_count;
+
+	return 0;
+}
+
+static void find_and_init_hw_mutex(void)
+{
+	struct device_node *node;
+
+	node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
+	BUG_ON(node == NULL);
+	init_hw_mutex(node);
+	hw_mutex_reg_base = ioremap(reg_base, reg_size);
+	BUG_ON(hw_mutex_reg_base == NULL);
+	hw_spinlocks = kcalloc(lock_count, sizeof(int), GFP_KERNEL);
+	BUG_ON(hw_spinlocks == NULL);
+}
+
+static int remote_spinlock_init_address_hw(int id, _remote_spinlock_t *lock)
+{
+	/*
+	 * Optimistic locking.  Init only needs to be done once by the first
+	 * caller.  After that, serializing inits between different callers
+	 * is unnecessary.  The second check after the lock ensures init
+	 * wasn't previously completed by someone else before the lock could
+	 * be grabbed.
+	 */
+	if (!hw_mutex_reg_base) {
+		mutex_lock(&hw_map_init_lock);
+		if (!hw_mutex_reg_base)
+			find_and_init_hw_mutex();
+		mutex_unlock(&hw_map_init_lock);
+	}
+
+	if (id >= lock_count)
+		return -EINVAL;
+
+	*lock = hw_mutex_reg_base + lock_offset + id * lock_size;
+	return 0;
+}
+
+static unsigned int remote_spinlock_get_lock_id(raw_remote_spinlock_t *lock)
+{
+	unsigned int id;
+
+	BUG_ON((uintptr_t)lock < (uintptr_t)hw_mutex_reg_base);
+	BUG_ON(((uintptr_t)lock - (uintptr_t)hw_mutex_reg_base) < lock_offset);
+
+	id = (unsigned int)((uintptr_t)lock - (uintptr_t)hw_mutex_reg_base -
+			lock_offset) / lock_size;
+	BUG_ON(id >= lock_count);
+	return id;
+}
+
+static void __raw_remote_sfpb_spin_lock(raw_remote_spinlock_t *lock)
+{
+	int owner;
+	unsigned int id = remote_spinlock_get_lock_id(lock);
+
+	/*
+	 * Wait for other local processor task to release spinlock if it
+	 * already has the remote spinlock locked.  This can only happen in
+	 * test cases since the local spinlock will prevent this when using the
+	 * public APIs.
+	 */
+	while (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
+		;
+
+	/* acquire remote spinlock */
+	do {
+		writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
+		/*
+		 * Ensure the ordering of read/write operations to ensure the
+		 * proper ownership of the lock during the lock/unlock
+		 * operations
+		 */
+		smp_mb();
+		owner = readl_relaxed(lock);
+		hw_spinlocks[id] = owner;
+	} while (owner != SPINLOCK_TOKEN_APPS);
+}
+
+static int __raw_remote_sfpb_spin_trylock(raw_remote_spinlock_t *lock)
+{
+	int owner;
+	unsigned int id = remote_spinlock_get_lock_id(lock);
+	/*
+	 * If the local processor owns the spinlock, return failure. This can
+	 * only happen in test cases since the local spinlock will prevent this
+	 * when using the public APIs.
+	 */
+	if (readl_relaxed(lock) == SPINLOCK_TOKEN_APPS)
+		return 0;
+
+	writel_relaxed(SPINLOCK_TOKEN_APPS, lock);
+	/*
+	 * Ensure the ordering of read/write operations to ensure the
+	 * proper ownership of the lock during the lock/unlock operations
+	 */
+	smp_mb();
+	owner = readl_relaxed(lock);
+	hw_spinlocks[id] = owner;
+	return owner == SPINLOCK_TOKEN_APPS;
+}
+
+static void __raw_remote_sfpb_spin_unlock(raw_remote_spinlock_t *lock)
+{
+	int lock_owner;
+
+	lock_owner = readl_relaxed(lock);
+	if (lock_owner != SPINLOCK_TOKEN_APPS) {
+		pr_err("%s: spinlock not owned by Apps (actual owner is %d)\n",
+				__func__, lock_owner);
+	}
+
+	writel_relaxed(0, lock);
+	/*
+	 * Ensure the ordering of read/write operations to ensure the
+	 * proper ownership of the lock during the lock/unlock operations
+	 */
+	smp_mb();
+}
+
+static void __raw_remote_sfpb_spin_lock_rlock_id(raw_remote_spinlock_t *lock,
+						 uint32_t tid)
+{
+	if (unlikely(!tid)) {
+		pr_err("%s: unsupported rlock tid=0\n", __func__);
+		BUG();
+	}
+
+	do {
+		writel_relaxed(tid, lock);
+		/*
+		 * Ensure the ordering of read/write operations to ensure the
+		 * proper ownership of the lock during the lock/unlock
+		 * operations
+		 */
+		smp_mb();
+	} while (readl_relaxed(lock) != tid);
+}
+
+static void __raw_remote_sfpb_spin_unlock_rlock(raw_remote_spinlock_t *lock)
+{
+	writel_relaxed(0, lock);
+	/*
+	 * Ensure the ordering of read/write operations to ensure the
+	 * proper ownership of the lock during the lock/unlock operations
+	 */
+	smp_mb();
+}
+
+static int __raw_remote_sfpb_get_hw_spinlocks_element(
+		raw_remote_spinlock_t *lock)
+{
+	return hw_spinlocks[remote_spinlock_get_lock_id(lock)];
+}
+
+/* end sfpb implementation -------------------------------------------------- */
+
+/* common spinlock API ------------------------------------------------------ */
+/**
+ * Release spinlock if it is owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * the spinlock has crashed and the spinlock must be released.
+ *
+ * @lock: lock structure
+ * @pid: processor ID of processor to release
+ */
+static int __raw_remote_gen_spin_release(raw_remote_spinlock_t *lock,
+		uint32_t pid)
+{
+	int ret = 1;
+
+	/*
+	 * Since 0 is reserved for an empty lock and the PIDs start at 0, the
+	 * value PID + 1 is written to the lock.
+	 */
+	if (readl_relaxed(&lock->lock) == (pid + 1)) {
+		writel_relaxed(0, &lock->lock);
+		/*
+		 * Ensure the ordering of read/write operations to ensure the
+		 * proper ownership of the lock during the lock/unlock
+		 * operations
+		 */
+		wmb();
+		ret = 0;
+	}
+	return ret;
+}
+
+/**
+ * Return owner of the spinlock.
+ *
+ * @lock: pointer to lock structure
+ * @returns: >= 0 owned PID; < 0 for error case
+ *
+ * Used for testing.  PID's are assumed to be 31 bits or less.
+ */
+static int __raw_remote_gen_spin_owner(raw_remote_spinlock_t *lock)
+{
+	int owner;
+
+	/*
+	 * Ensure the ordering of read/write operations to ensure the
+	 * proper ownership of the lock during the lock/unlock operations
+	 */
+	rmb();
+
+	owner = readl_relaxed(&lock->lock);
+	if (owner)
+		return owner - 1;
+	else
+		return -ENODEV;
+}
+
+
+static int dt_node_is_valid(const struct device_node *node)
+{
+	const char *status;
+	int statlen;
+
+	status = of_get_property(node, "status", &statlen);
+	if (status == NULL)
+		return 1;
+
+	if (statlen > 0) {
+		if (!strcmp(status, "okay") || !strcmp(status, "ok"))
+			return 1;
+	}
+
+	return 0;
+}
+
+static void initialize_ops(void)
+{
+	struct device_node *node;
+
+	/*
+	 * of_find_compatible_node() returns a valid pointer even if
+	 * the status property is "disabled", so the validity needs
+	 * to be checked
+	 */
+	node = of_find_compatible_node(NULL, NULL, sfpb_compatible_string);
+	if (node && dt_node_is_valid(node)) {
+		current_ops.lock = __raw_remote_sfpb_spin_lock;
+		current_ops.unlock = __raw_remote_sfpb_spin_unlock;
+		current_ops.trylock = __raw_remote_sfpb_spin_trylock;
+		current_ops.release = __raw_remote_gen_spin_release;
+		current_ops.owner = __raw_remote_gen_spin_owner;
+		current_ops.lock_rlock_id =
+				__raw_remote_sfpb_spin_lock_rlock_id;
+		current_ops.unlock_rlock = __raw_remote_sfpb_spin_unlock_rlock;
+		current_ops.get_hw_spinlocks_element =
+			__raw_remote_sfpb_get_hw_spinlocks_element;
+		is_hw_lock_type = 1;
+		return;
+	}
+
+	node = of_find_compatible_node(NULL, NULL, ldrex_compatible_string);
+	if (node && dt_node_is_valid(node)) {
+		current_ops.lock = __raw_remote_ex_spin_lock;
+		current_ops.unlock = __raw_remote_ex_spin_unlock;
+		current_ops.trylock = __raw_remote_ex_spin_trylock;
+		current_ops.release = __raw_remote_gen_spin_release;
+		current_ops.owner = __raw_remote_gen_spin_owner;
+		is_hw_lock_type = 0;
+		return;
+	}
+
+	current_ops.lock = __raw_remote_ex_spin_lock;
+	current_ops.unlock = __raw_remote_ex_spin_unlock;
+	current_ops.trylock = __raw_remote_ex_spin_trylock;
+	current_ops.release = __raw_remote_gen_spin_release;
+	current_ops.owner = __raw_remote_gen_spin_owner;
+	is_hw_lock_type = 0;
+	pr_warn("Falling back to LDREX remote spinlock implementation");
+}
+
+/**
+ * Release all spinlocks owned by @pid.
+ *
+ * This is only to be used for situations where the processor owning
+ * spinlocks has crashed and the spinlocks must be released.
+ *
+ * @pid - processor ID of processor to release
+ */
+static void remote_spin_release_all_locks(uint32_t pid, int count)
+{
+	int n;
+	 _remote_spinlock_t lock;
+
+	if (pid >= REMOTE_SPINLOCK_NUM_PID) {
+		pr_err("%s: Unsupported PID %d\n", __func__, pid);
+		return;
+	}
+
+	for (n = 0; n < count; ++n) {
+		if (remote_spinlock_init_address(n, &lock) == 0)
+			_remote_spin_release(&lock, pid);
+	}
+}
+
+void _remote_spin_release_all(uint32_t pid)
+{
+	remote_spin_release_all_locks(pid, lock_count);
+}
+
+#define SMEM_SPINLOCK_COUNT 8
+#define SMEM_SPINLOCK_ARRAY_SIZE (SMEM_SPINLOCK_COUNT * sizeof(uint32_t))
+
+static int remote_spinlock_init_address_smem(int id, _remote_spinlock_t *lock)
+{
+	_remote_spinlock_t spinlock_start;
+
+	if (id >= SMEM_SPINLOCK_COUNT)
+		return -EINVAL;
+
+	spinlock_start = smem_find(SMEM_SPINLOCK_ARRAY,
+				    SMEM_SPINLOCK_ARRAY_SIZE,
+				    0,
+				    SMEM_ANY_HOST_FLAG);
+	if (spinlock_start == NULL)
+		return -ENXIO;
+
+	*lock = spinlock_start + id;
+
+	lock_count = SMEM_SPINLOCK_COUNT;
+
+	return 0;
+}
+
+static int remote_spinlock_init_address(int id, _remote_spinlock_t *lock)
+{
+	if (is_hw_lock_type)
+		return remote_spinlock_init_address_hw(id, lock);
+	else
+		return remote_spinlock_init_address_smem(id, lock);
+}
+
+int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
+{
+	BUG_ON(id == NULL);
+
+	/*
+	 * Optimistic locking.  Init only needs to be done once by the first
+	 * caller.  After that, serializing inits between different callers
+	 * is unnecessary.  The second check after the lock ensures init
+	 * wasn't previously completed by someone else before the lock could
+	 * be grabbed.
+	 */
+	if (!current_ops.lock) {
+		mutex_lock(&ops_init_lock);
+		if (!current_ops.lock)
+			initialize_ops();
+		mutex_unlock(&ops_init_lock);
+	}
+
+	if (id[0] == 'S' && id[1] == ':') {
+		/* Single-digit lock ID follows "S:" */
+		BUG_ON(id[3] != '\0');
+
+		return remote_spinlock_init_address((((uint8_t)id[2])-'0'),
+			lock);
+	} else {
+		return -EINVAL;
+	}
+}
+
+/*
+ * lock comes in as a pointer to a pointer to the lock location, so it must
+ * be dereferenced and casted to the right type for the actual lock
+ * implementation functions
+ */
+void _remote_spin_lock(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.lock))
+		BUG();
+	current_ops.lock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_lock);
+
+void _remote_spin_unlock(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.unlock))
+		BUG();
+	current_ops.unlock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_unlock);
+
+int _remote_spin_trylock(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.trylock))
+		BUG();
+	return current_ops.trylock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_trylock);
+
+int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
+{
+	if (unlikely(!current_ops.release))
+		BUG();
+	return current_ops.release((raw_remote_spinlock_t *)(*lock), pid);
+}
+EXPORT_SYMBOL(_remote_spin_release);
+
+int _remote_spin_owner(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.owner))
+		BUG();
+	return current_ops.owner((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_owner);
+
+void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid)
+{
+	if (unlikely(!current_ops.lock_rlock_id))
+		BUG();
+	current_ops.lock_rlock_id((raw_remote_spinlock_t *)(*lock), tid);
+}
+EXPORT_SYMBOL(_remote_spin_lock_rlock_id);
+
+void _remote_spin_unlock_rlock(_remote_spinlock_t *lock)
+{
+	if (unlikely(!current_ops.unlock_rlock))
+		BUG();
+	current_ops.unlock_rlock((raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_unlock_rlock);
+
+int _remote_spin_get_hw_spinlocks_element(_remote_spinlock_t *lock)
+{
+	return current_ops.get_hw_spinlocks_element(
+			(raw_remote_spinlock_t *)(*lock));
+}
+EXPORT_SYMBOL(_remote_spin_get_hw_spinlocks_element);
+
+/* end common spinlock API -------------------------------------------------- */
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index 130cb21..7f1ac30 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -24,6 +24,7 @@
 config CORESIGHT_LINK_AND_SINK_TMC
 	bool "Coresight generic TMC driver"
 	depends on CORESIGHT_LINKS_AND_SINKS
+	select CORESIGHT_CSR
 	help
 	  This enables support for the Trace Memory Controller driver.
 	  Depending on its configuration the device can act as a link (embedded
@@ -83,10 +84,69 @@
 	depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
 	select CORESIGHT_LINKS_AND_SINKS
 	select STM
+	select CORESIGHT_OST
 	help
 	  This driver provides support for hardware assisted software
 	  instrumentation based tracing. This is primarily used for
 	  logging useful software events or data coming from various entities
 	  in the system, possibly running different OSs
 
+config CORESIGHT_OST
+	bool "CoreSight OST framework"
+	depends on CORESIGHT_STM
+	help
+	  This enables support for OST packet in STM.
+
+config CORESIGHT_TPDA
+	bool "CoreSight Trace, Profiling & Diagnostics Aggregator driver"
+	help
+	  This driver provides support for configuring aggregator. This is
+	  primarily useful for pulling the data sets from one or more
+	  attached monitors and pushing the resultant data out. Multiple
+	  monitors are connected on different input ports of TPDA.
+
+config CORESIGHT_TPDM
+	bool "CoreSight Trace, Profiling & Diagnostics Monitor driver"
+	help
+	  This driver provides support for configuring monitor. Monitors are
+	  primarily responsible for data set collection and support the
+	  ability to collect any permutation of data set types. Monitors are
+	  also responsible for interaction with system cross triggering.
+
+config CORESIGHT_TPDM_DEFAULT_ENABLE
+	bool "Turn on TPDM tracing by default"
+	depends on CORESIGHT_TPDM
+	help
+	  Turns on CoreSight TPDM tracing for different data set types by
+	  default. Otherwise, tracing is disabled by default but can be
+	  enabled via sysfs.
+
+	  If unsure, say 'N' here to avoid potential power and performance
+	  penalty.
+
+config CORESIGHT_CTI
+	bool "CoreSight Cross Trigger Interface driver"
+	help
+	  This driver provides support for Cross Trigger Interface that is
+	  used to input or output i.e. pass cross trigger events from one
+	  hardware component to another. It can also be used to pass
+	  software generated events.
+
+config CORESIGHT_CSR
+	bool "CoreSight Slave Register driver"
+	help
+	  This driver provides support for CoreSight Slave Register block
+	  that hosts miscellaneous configuration registers.
+	  Those configuration registers can be used to control, various
+	  coresight configurations.
+
+config CORESIGHT_HWEVENT
+	bool "CoreSight Hardware Event driver"
+	depends on CORESIGHT_STM
+	select CORESIGHT_CSR
+	help
+	  This driver provides support for monitoring and tracing CoreSight
+	  Hardware Event across STM interface. It configures Coresight
+	  Hardware Event mux control registers to select hardware events
+	  based on user input.
 endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index af480d9..7019968 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -16,3 +16,9 @@
 					coresight-etm4x-sysfs.o
 obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
 obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
+obj-$(CONFIG_CORESIGHT_OST) += coresight-ost.o
+obj-$(CONFIG_CORESIGHT_TPDA) += coresight-tpda.o
+obj-$(CONFIG_CORESIGHT_TPDM) += coresight-tpdm.o
+obj-$(CONFIG_CORESIGHT_CTI) += coresight-cti.o
+obj-$(CONFIG_CORESIGHT_CSR) += coresight-csr.o
+obj-$(CONFIG_CORESIGHT_HWEVENT) += coresight-hwevent.o
diff --git a/drivers/hwtracing/coresight/coresight-csr.c b/drivers/hwtracing/coresight/coresight-csr.c
new file mode 100644
index 0000000..1ec73a5
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-csr.c
@@ -0,0 +1,263 @@
+/* Copyright (c) 2012-2013, 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define csr_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
+#define csr_readl(drvdata, off)		__raw_readl(drvdata->base + off)
+
+#define CSR_LOCK(drvdata)						\
+do {									\
+	mb(); /* ensure configuration take effect before we lock it */	\
+	csr_writel(drvdata, 0x0, CORESIGHT_LAR);			\
+} while (0)
+#define CSR_UNLOCK(drvdata)						\
+do {									\
+	csr_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
+	mb(); /* ensure unlock take effect before we configure */	\
+} while (0)
+
+#define CSR_SWDBGPWRCTRL	(0x000)
+#define CSR_SWDBGPWRACK		(0x004)
+#define CSR_SWSPADREG0		(0x008)
+#define CSR_SWSPADREG1		(0x00C)
+#define CSR_STMTRANSCTRL	(0x010)
+#define CSR_STMAWIDCTRL		(0x014)
+#define CSR_STMCHNOFST0		(0x018)
+#define CSR_STMCHNOFST1		(0x01C)
+#define CSR_STMEXTHWCTRL0	(0x020)
+#define CSR_STMEXTHWCTRL1	(0x024)
+#define CSR_STMEXTHWCTRL2	(0x028)
+#define CSR_STMEXTHWCTRL3	(0x02C)
+#define CSR_USBBAMCTRL		(0x030)
+#define CSR_USBFLSHCTRL		(0x034)
+#define CSR_TIMESTAMPCTRL	(0x038)
+#define CSR_AOTIMEVAL0		(0x03C)
+#define CSR_AOTIMEVAL1		(0x040)
+#define CSR_QDSSTIMEVAL0	(0x044)
+#define CSR_QDSSTIMEVAL1	(0x048)
+#define CSR_QDSSTIMELOAD0	(0x04C)
+#define CSR_QDSSTIMELOAD1	(0x050)
+#define CSR_DAPMSAVAL		(0x054)
+#define CSR_QDSSCLKVOTE		(0x058)
+#define CSR_QDSSCLKIPI		(0x05C)
+#define CSR_QDSSPWRREQIGNORE	(0x060)
+#define CSR_QDSSSPARE		(0x064)
+#define CSR_IPCAT		(0x068)
+#define CSR_BYTECNTVAL		(0x06C)
+
+#define BLKSIZE_256		0
+#define BLKSIZE_512		1
+#define BLKSIZE_1024		2
+#define BLKSIZE_2048		3
+
+struct csr_drvdata {
+	void __iomem		*base;
+	phys_addr_t		pbase;
+	struct device		*dev;
+	struct coresight_device	*csdev;
+	uint32_t		blksize;
+};
+
+static struct csr_drvdata *csrdrvdata;
+
+void msm_qdss_csr_enable_bam_to_usb(void)
+{
+	struct csr_drvdata *drvdata = csrdrvdata;
+	uint32_t usbbamctrl, usbflshctrl;
+
+	CSR_UNLOCK(drvdata);
+
+	usbbamctrl = csr_readl(drvdata, CSR_USBBAMCTRL);
+	usbbamctrl = (usbbamctrl & ~0x3) | drvdata->blksize;
+	csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
+
+	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+	usbflshctrl = (usbflshctrl & ~0x3FFFC) | (0xFFFF << 2);
+	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+	usbflshctrl |= 0x2;
+	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+	usbbamctrl |= 0x4;
+	csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
+
+	CSR_LOCK(drvdata);
+}
+EXPORT_SYMBOL(msm_qdss_csr_enable_bam_to_usb);
+
+void msm_qdss_csr_disable_bam_to_usb(void)
+{
+	struct csr_drvdata *drvdata = csrdrvdata;
+	uint32_t usbbamctrl;
+
+	CSR_UNLOCK(drvdata);
+
+	usbbamctrl = csr_readl(drvdata, CSR_USBBAMCTRL);
+	usbbamctrl &= (~0x4);
+	csr_writel(drvdata, usbbamctrl, CSR_USBBAMCTRL);
+
+	CSR_LOCK(drvdata);
+}
+EXPORT_SYMBOL(msm_qdss_csr_disable_bam_to_usb);
+
+void msm_qdss_csr_disable_flush(void)
+{
+	struct csr_drvdata *drvdata = csrdrvdata;
+	uint32_t usbflshctrl;
+
+	CSR_UNLOCK(drvdata);
+
+	usbflshctrl = csr_readl(drvdata, CSR_USBFLSHCTRL);
+	usbflshctrl &= ~0x2;
+	csr_writel(drvdata, usbflshctrl, CSR_USBFLSHCTRL);
+
+	CSR_LOCK(drvdata);
+}
+EXPORT_SYMBOL(msm_qdss_csr_disable_flush);
+
+int coresight_csr_hwctrl_set(uint64_t addr, uint32_t val)
+{
+	struct csr_drvdata *drvdata = csrdrvdata;
+	int ret = 0;
+
+	CSR_UNLOCK(drvdata);
+
+	if (addr == (drvdata->pbase + CSR_STMEXTHWCTRL0))
+		csr_writel(drvdata, val, CSR_STMEXTHWCTRL0);
+	else if (addr == (drvdata->pbase + CSR_STMEXTHWCTRL1))
+		csr_writel(drvdata, val, CSR_STMEXTHWCTRL1);
+	else if (addr == (drvdata->pbase + CSR_STMEXTHWCTRL2))
+		csr_writel(drvdata, val, CSR_STMEXTHWCTRL2);
+	else if (addr == (drvdata->pbase + CSR_STMEXTHWCTRL3))
+		csr_writel(drvdata, val, CSR_STMEXTHWCTRL3);
+	else
+		ret = -EINVAL;
+
+	CSR_LOCK(drvdata);
+
+	return ret;
+}
+EXPORT_SYMBOL(coresight_csr_hwctrl_set);
+
+void coresight_csr_set_byte_cntr(uint32_t count)
+{
+	struct csr_drvdata *drvdata = csrdrvdata;
+
+	CSR_UNLOCK(drvdata);
+
+	csr_writel(drvdata, count, CSR_BYTECNTVAL);
+
+	/* make sure byte count value is written */
+	mb();
+
+	CSR_LOCK(drvdata);
+}
+EXPORT_SYMBOL(coresight_csr_set_byte_cntr);
+
+static int csr_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device *dev = &pdev->dev;
+	struct coresight_platform_data *pdata;
+	struct csr_drvdata *drvdata;
+	struct resource *res;
+	struct coresight_desc *desc;
+
+	pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+	pdev->dev.platform_data = pdata;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csr-base");
+	if (!res)
+		return -ENODEV;
+	drvdata->pbase = res->start;
+
+	drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,blk-size",
+			&drvdata->blksize);
+	if (ret)
+		drvdata->blksize = BLKSIZE_256;
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+	desc->type = CORESIGHT_DEV_TYPE_NONE;
+	desc->pdata = pdev->dev.platform_data;
+	desc->dev = &pdev->dev;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev))
+		return PTR_ERR(drvdata->csdev);
+
+	/* Store the driver data pointer for use in exported functions */
+	csrdrvdata = drvdata;
+	dev_info(dev, "CSR initialized\n");
+	return 0;
+}
+
+static int csr_remove(struct platform_device *pdev)
+{
+	struct csr_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	coresight_unregister(drvdata->csdev);
+	return 0;
+}
+
+static const struct of_device_id csr_match[] = {
+	{.compatible = "qcom,coresight-csr"},
+	{}
+};
+
+static struct platform_driver csr_driver = {
+	.probe          = csr_probe,
+	.remove         = csr_remove,
+	.driver         = {
+		.name   = "coresight-csr",
+		.owner	= THIS_MODULE,
+		.of_match_table = csr_match,
+	},
+};
+
+static int __init csr_init(void)
+{
+	return platform_driver_register(&csr_driver);
+}
+module_init(csr_init);
+
+static void __exit csr_exit(void)
+{
+	platform_driver_unregister(&csr_driver);
+}
+module_exit(csr_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight CSR driver");
diff --git a/drivers/hwtracing/coresight/coresight-cti.c b/drivers/hwtracing/coresight/coresight-cti.c
new file mode 100644
index 0000000..a2ce81a
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-cti.c
@@ -0,0 +1,1566 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/cpu_pm.h>
+#include <linux/topology.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+#include <linux/coresight-cti.h>
+
+#include "coresight-priv.h"
+
+#define cti_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
+#define cti_readl(drvdata, off)		__raw_readl(drvdata->base + off)
+
+#define CTI_LOCK(drvdata)						\
+do {									\
+	mb(); /* ensure configuration take effect before we lock it */	\
+	cti_writel(drvdata, 0x0, CORESIGHT_LAR);			\
+} while (0)
+#define CTI_UNLOCK(drvdata)						\
+do {									\
+	cti_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
+	mb(); /* ensure unlock take effect before we configure */	\
+} while (0)
+
+#define CTICONTROL		(0x000)
+#define CTIINTACK		(0x010)
+#define CTIAPPSET		(0x014)
+#define CTIAPPCLEAR		(0x018)
+#define CTIAPPPULSE		(0x01C)
+#define CTIINEN(n)		(0x020 + (n * 4))
+#define CTIOUTEN(n)		(0x0A0 + (n * 4))
+#define CTITRIGINSTATUS		(0x130)
+#define CTITRIGOUTSTATUS	(0x134)
+#define CTICHINSTATUS		(0x138)
+#define CTICHOUTSTATUS		(0x13C)
+#define CTIGATE			(0x140)
+#define ASICCTL			(0x144)
+#define ITCHINACK		(0xEDC)
+#define ITTRIGINACK		(0xEE0)
+#define ITCHOUT			(0xEE4)
+#define ITTRIGOUT		(0xEE8)
+#define ITCHOUTACK		(0xEEC)
+#define ITTRIGOUTACK		(0xEF0)
+#define ITCHIN			(0xEF4)
+#define ITTRIGIN		(0xEF8)
+
+#define CTI_MAX_TRIGGERS	(8)
+#define CTI_MAX_CHANNELS	(4)
+#define AFFINITY_LEVEL_L2	1
+
+#define to_cti_drvdata(c) container_of(c, struct cti_drvdata, cti)
+
+struct cti_state {
+	unsigned int cticontrol;
+	unsigned int ctiappset;
+	unsigned int ctigate;
+	unsigned int ctiinen[CTI_MAX_TRIGGERS];
+	unsigned int ctiouten[CTI_MAX_TRIGGERS];
+};
+
+struct cti_pctrl {
+	struct pinctrl			*pctrl;
+	int				trig;
+};
+
+struct cti_drvdata {
+	void __iomem			*base;
+	struct device			*dev;
+	struct coresight_device		*csdev;
+	struct clk			*clk;
+	spinlock_t			spinlock;
+	struct mutex			mutex;
+	struct coresight_cti		cti;
+	int				refcnt;
+	int				cpu;
+	bool				cti_save;
+	bool				cti_hwclk;
+	bool				l2_off;
+	struct cti_state		*state;
+	struct cti_pctrl		*gpio_trigin;
+	struct cti_pctrl		*gpio_trigout;
+};
+
+static struct notifier_block cti_cpu_pm_notifier;
+static int registered;
+
+static LIST_HEAD(cti_list);
+static DEFINE_MUTEX(cti_lock);
+#ifdef CONFIG_CORESIGHT_CTI_SAVE_DISABLE
+static int cti_save_disable = 1;
+#else
+static int cti_save_disable;
+#endif
+
+static int cti_verify_trigger_bound(int trig)
+{
+	if (trig < 0 || trig >= CTI_MAX_TRIGGERS)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cti_verify_channel_bound(int ch)
+{
+	if (ch < 0 || ch >= CTI_MAX_CHANNELS)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cti_cpu_verify_access(struct cti_drvdata *drvdata)
+{
+	if (drvdata->cti_save && drvdata->l2_off)
+		return -EPERM;
+
+	return 0;
+}
+
+void coresight_cti_ctx_save(void)
+{
+	struct cti_drvdata *drvdata;
+	struct coresight_cti *cti;
+	int trig, cpuid, cpu;
+	unsigned long flag;
+
+	/*
+	 * Explicitly check and return to avoid latency associated with
+	 * traversing the linked list of all CTIs and checking for their
+	 * respective cti_save flag.
+	 */
+	if (cti_save_disable)
+		return;
+
+	cpu = raw_smp_processor_id();
+
+	list_for_each_entry(cti, &cti_list, link) {
+		drvdata = to_cti_drvdata(cti);
+		if (!drvdata->cti_save)
+			continue;
+
+		for_each_cpu(cpuid, topology_core_cpumask(cpu)) {
+			if (drvdata->cpu == cpuid)
+				goto out;
+		}
+		continue;
+out:
+		spin_lock_irqsave(&drvdata->spinlock, flag);
+		drvdata->l2_off = true;
+		drvdata->state->cticontrol = cti_readl(drvdata, CTICONTROL);
+		drvdata->state->ctiappset = cti_readl(drvdata, CTIAPPSET);
+		drvdata->state->ctigate = cti_readl(drvdata, CTIGATE);
+		for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+			drvdata->state->ctiinen[trig] =
+				cti_readl(drvdata, CTIINEN(trig));
+			drvdata->state->ctiouten[trig] =
+				cti_readl(drvdata, CTIOUTEN(trig));
+		}
+		spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	}
+}
+EXPORT_SYMBOL(coresight_cti_ctx_save);
+
+void coresight_cti_ctx_restore(void)
+{
+	struct cti_drvdata *drvdata;
+	struct coresight_cti *cti;
+	int trig, cpuid, cpu;
+	unsigned long flag;
+
+	/*
+	 * Explicitly check and return to avoid latency associated with
+	 * traversing the linked list of all CTIs and checking for their
+	 * respective cti_save flag.
+	 */
+	if (cti_save_disable)
+		return;
+
+	cpu = raw_smp_processor_id();
+
+	list_for_each_entry(cti, &cti_list, link) {
+		drvdata = to_cti_drvdata(cti);
+		if (!drvdata->cti_save)
+			continue;
+
+		for_each_cpu(cpuid, topology_core_cpumask(cpu)) {
+			if (drvdata->cpu == cpuid)
+				goto out;
+		}
+		continue;
+out:
+		spin_lock_irqsave(&drvdata->spinlock, flag);
+		CTI_UNLOCK(drvdata);
+		cti_writel(drvdata, drvdata->state->ctiappset, CTIAPPSET);
+		cti_writel(drvdata, drvdata->state->ctigate, CTIGATE);
+		for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+			cti_writel(drvdata, drvdata->state->ctiinen[trig],
+				   CTIINEN(trig));
+			cti_writel(drvdata, drvdata->state->ctiouten[trig],
+				   CTIOUTEN(trig));
+		}
+		cti_writel(drvdata, drvdata->state->cticontrol, CTICONTROL);
+		CTI_LOCK(drvdata);
+		drvdata->l2_off = false;
+		spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	}
+}
+EXPORT_SYMBOL(coresight_cti_ctx_restore);
+
+static void cti_enable(struct cti_drvdata *drvdata)
+{
+	CTI_UNLOCK(drvdata);
+
+	cti_writel(drvdata, 0x1, CTICONTROL);
+
+	CTI_LOCK(drvdata);
+}
+
+int cti_trigin_gpio_enable(struct cti_drvdata *drvdata)
+{
+	int ret;
+	struct pinctrl *pctrl;
+	struct pinctrl_state *pctrl_state;
+
+	if (drvdata->gpio_trigin->pctrl)
+		return 0;
+
+	pctrl = devm_pinctrl_get(drvdata->dev);
+	if (IS_ERR(pctrl)) {
+		dev_err(drvdata->dev, "pinctrl get failed\n");
+		return PTR_ERR(pctrl);
+	}
+
+	pctrl_state = pinctrl_lookup_state(pctrl, "cti-trigin-pctrl");
+	if (IS_ERR(pctrl_state)) {
+		dev_err(drvdata->dev,
+			"pinctrl get state failed\n");
+		ret = PTR_ERR(pctrl_state);
+		goto err;
+	}
+
+	ret = pinctrl_select_state(pctrl, pctrl_state);
+	if (ret) {
+		dev_err(drvdata->dev,
+			"pinctrl enable state failed\n");
+		goto err;
+	}
+
+	drvdata->gpio_trigin->pctrl = pctrl;
+	return 0;
+err:
+	devm_pinctrl_put(pctrl);
+	return ret;
+}
+
+int cti_trigout_gpio_enable(struct cti_drvdata *drvdata)
+{
+	int ret;
+	struct pinctrl *pctrl;
+	struct pinctrl_state *pctrl_state;
+
+	if (drvdata->gpio_trigout->pctrl)
+		return 0;
+
+	pctrl = devm_pinctrl_get(drvdata->dev);
+	if (IS_ERR(pctrl)) {
+		dev_err(drvdata->dev, "pinctrl get failed\n");
+		return PTR_ERR(pctrl);
+	}
+
+	pctrl_state = pinctrl_lookup_state(pctrl, "cti-trigout-pctrl");
+	if (IS_ERR(pctrl_state)) {
+		dev_err(drvdata->dev,
+			"pinctrl get state failed\n");
+		ret = PTR_ERR(pctrl_state);
+		goto err;
+	}
+
+	ret = pinctrl_select_state(pctrl, pctrl_state);
+	if (ret) {
+		dev_err(drvdata->dev,
+			"pinctrl enable state failed\n");
+		goto err;
+	}
+
+	drvdata->gpio_trigout->pctrl = pctrl;
+	return 0;
+err:
+	devm_pinctrl_put(pctrl);
+	return ret;
+}
+
+void cti_trigin_gpio_disable(struct cti_drvdata *drvdata)
+{
+	if (!drvdata->gpio_trigin->pctrl)
+		return;
+
+	devm_pinctrl_put(drvdata->gpio_trigin->pctrl);
+	drvdata->gpio_trigin->pctrl = NULL;
+}
+
+void cti_trigout_gpio_disable(struct cti_drvdata *drvdata)
+{
+	if (!drvdata->gpio_trigout->pctrl)
+		return;
+
+	devm_pinctrl_put(drvdata->gpio_trigout->pctrl);
+	drvdata->gpio_trigout->pctrl = NULL;
+}
+
+static void __cti_map_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+{
+	uint32_t ctien;
+
+	if (drvdata->refcnt == 0)
+		cti_enable(drvdata);
+
+	CTI_UNLOCK(drvdata);
+
+	ctien = cti_readl(drvdata, CTIINEN(trig));
+	if (ctien & (0x1 << ch))
+		goto out;
+	cti_writel(drvdata, (ctien | 0x1 << ch), CTIINEN(trig));
+
+	CTI_LOCK(drvdata);
+
+	drvdata->refcnt++;
+	return;
+out:
+	CTI_LOCK(drvdata);
+}
+
+int coresight_cti_map_trigin(struct coresight_cti *cti, int trig, int ch)
+{
+	struct cti_drvdata *drvdata;
+	int ret;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return -EINVAL;
+	ret = cti_verify_trigger_bound(trig);
+	if (ret)
+		return ret;
+	ret = cti_verify_channel_bound(ch);
+	if (ret)
+		return ret;
+
+	drvdata = to_cti_drvdata(cti);
+
+	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->gpio_trigin->trig == trig) {
+		ret = cti_trigin_gpio_enable(drvdata);
+		if (ret)
+			goto err0;
+	}
+
+	/*
+	 * refcnt can be used here since in all cases its value is modified only
+	 * within the mutex lock region in addition to within the spinlock.
+	 */
+	if (drvdata->refcnt == 0) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret)
+			goto err1;
+	}
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	ret = cti_cpu_verify_access(drvdata);
+	if (ret)
+		goto err2;
+
+	__cti_map_trigin(drvdata, trig, ch);
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+	mutex_unlock(&drvdata->mutex);
+	return 0;
+err2:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	/*
+	 * We come here before refcnt is potentially modified in
+	 * __cti_map_trigin so it is safe to check it against 0 without
+	 * adjusting its value.
+	 */
+	if (drvdata->refcnt == 0)
+		clk_disable_unprepare(drvdata->clk);
+err1:
+	cti_trigin_gpio_disable(drvdata);
+err0:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(coresight_cti_map_trigin);
+
+static void __cti_map_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+{
+	uint32_t ctien;
+
+	if (drvdata->refcnt == 0)
+		cti_enable(drvdata);
+
+	CTI_UNLOCK(drvdata);
+
+	ctien = cti_readl(drvdata, CTIOUTEN(trig));
+	if (ctien & (0x1 << ch))
+		goto out;
+	cti_writel(drvdata, (ctien | 0x1 << ch), CTIOUTEN(trig));
+
+	CTI_LOCK(drvdata);
+
+	drvdata->refcnt++;
+	return;
+out:
+	CTI_LOCK(drvdata);
+}
+
+int coresight_cti_map_trigout(struct coresight_cti *cti, int trig, int ch)
+{
+	struct cti_drvdata *drvdata;
+	int ret;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return -EINVAL;
+	ret = cti_verify_trigger_bound(trig);
+	if (ret)
+		return ret;
+	ret = cti_verify_channel_bound(ch);
+	if (ret)
+		return ret;
+
+	drvdata = to_cti_drvdata(cti);
+
+	mutex_lock(&drvdata->mutex);
+
+	if (drvdata->gpio_trigout->trig == trig) {
+		ret = cti_trigout_gpio_enable(drvdata);
+		if (ret)
+			goto err0;
+	}
+
+	/*
+	 * refcnt can be used here since in all cases its value is modified only
+	 * within the mutex lock region in addition to within the spinlock.
+	 */
+	if (drvdata->refcnt == 0) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret)
+			goto err1;
+	}
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	ret = cti_cpu_verify_access(drvdata);
+	if (ret)
+		goto err2;
+
+	__cti_map_trigout(drvdata, trig, ch);
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+	mutex_unlock(&drvdata->mutex);
+	return 0;
+err2:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	/*
+	 * We come here before refcnt is potentially incremented in
+	 * __cti_map_trigout so it is safe to check it against 0.
+	 */
+	if (drvdata->refcnt == 0)
+		clk_disable_unprepare(drvdata->clk);
+err1:
+	cti_trigout_gpio_disable(drvdata);
+err0:
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+EXPORT_SYMBOL(coresight_cti_map_trigout);
+
+static void cti_disable(struct cti_drvdata *drvdata)
+{
+	CTI_UNLOCK(drvdata);
+
+	/* Clear any pending triggers and ensure gate is enabled */
+	cti_writel(drvdata, BM(0, (CTI_MAX_CHANNELS - 1)), CTIAPPCLEAR);
+	cti_writel(drvdata, BM(0, (CTI_MAX_CHANNELS - 1)), CTIGATE);
+
+	cti_writel(drvdata, 0x0, CTICONTROL);
+
+	CTI_LOCK(drvdata);
+}
+
+static void __cti_unmap_trigin(struct cti_drvdata *drvdata, int trig, int ch)
+{
+	uint32_t ctien;
+
+	CTI_UNLOCK(drvdata);
+
+	ctien = cti_readl(drvdata, CTIINEN(trig));
+	if (!(ctien & (0x1 << ch)))
+		goto out;
+	cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIINEN(trig));
+
+	CTI_LOCK(drvdata);
+
+	drvdata->refcnt--;
+
+	if (drvdata->refcnt == 0)
+		cti_disable(drvdata);
+	return;
+out:
+	CTI_LOCK(drvdata);
+}
+
+void coresight_cti_unmap_trigin(struct coresight_cti *cti, int trig, int ch)
+{
+	struct cti_drvdata *drvdata;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return;
+	if (cti_verify_trigger_bound(trig))
+		return;
+	if (cti_verify_channel_bound(ch))
+		return;
+
+	drvdata = to_cti_drvdata(cti);
+
+	mutex_lock(&drvdata->mutex);
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	if (cti_cpu_verify_access(drvdata))
+		goto err;
+	/*
+	 * This is required to avoid clk_disable_unprepare call from being made
+	 * when unmap is called without the corresponding map function call.
+	 */
+	if (!drvdata->refcnt)
+		goto err;
+
+	__cti_unmap_trigin(drvdata, trig, ch);
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+	/*
+	 * refcnt can be used here since in all cases its value is modified only
+	 * within the mutex lock region in addition to within the spinlock.
+	 */
+	if (drvdata->refcnt == 0)
+		clk_disable_unprepare(drvdata->clk);
+
+	if (drvdata->gpio_trigin->trig == trig)
+		cti_trigin_gpio_disable(drvdata);
+
+	mutex_unlock(&drvdata->mutex);
+	return;
+err:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	mutex_unlock(&drvdata->mutex);
+}
+EXPORT_SYMBOL(coresight_cti_unmap_trigin);
+
+static void __cti_unmap_trigout(struct cti_drvdata *drvdata, int trig, int ch)
+{
+	uint32_t ctien;
+
+	CTI_UNLOCK(drvdata);
+
+	ctien = cti_readl(drvdata, CTIOUTEN(trig));
+	if (!(ctien & (0x1 << ch)))
+		goto out;
+	cti_writel(drvdata, (ctien & ~(0x1 << ch)), CTIOUTEN(trig));
+
+	CTI_LOCK(drvdata);
+
+	drvdata->refcnt--;
+
+	if (drvdata->refcnt == 0)
+		cti_disable(drvdata);
+	return;
+out:
+	CTI_LOCK(drvdata);
+}
+
+void coresight_cti_unmap_trigout(struct coresight_cti *cti, int trig, int ch)
+{
+	struct cti_drvdata *drvdata;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return;
+	if (cti_verify_trigger_bound(trig))
+		return;
+	if (cti_verify_channel_bound(ch))
+		return;
+
+	drvdata = to_cti_drvdata(cti);
+
+	mutex_lock(&drvdata->mutex);
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	if (cti_cpu_verify_access(drvdata))
+		goto err;
+	/*
+	 * This is required to avoid clk_disable_unprepare call from being made
+	 * when unmap is called without the corresponding map function call.
+	 */
+	if (!drvdata->refcnt)
+		goto err;
+
+	__cti_unmap_trigout(drvdata, trig, ch);
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+	/*
+	 * refcnt can be used here since in all cases its value is modified only
+	 * within the mutex lock region in addition to within the spinlock.
+	 */
+	if (drvdata->refcnt == 0)
+		clk_disable_unprepare(drvdata->clk);
+
+	if (drvdata->gpio_trigout->trig == trig)
+		cti_trigout_gpio_disable(drvdata);
+
+	mutex_unlock(&drvdata->mutex);
+	return;
+err:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	mutex_unlock(&drvdata->mutex);
+}
+EXPORT_SYMBOL(coresight_cti_unmap_trigout);
+
+static void __cti_reset(struct cti_drvdata *drvdata)
+{
+	int trig;
+
+	if (!drvdata->refcnt)
+		return;
+
+	CTI_UNLOCK(drvdata);
+
+	for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+		cti_writel(drvdata, 0, CTIINEN(trig));
+		cti_writel(drvdata, 0, CTIOUTEN(trig));
+	}
+
+	CTI_LOCK(drvdata);
+
+	cti_disable(drvdata);
+	drvdata->refcnt = 0;
+}
+
+void coresight_cti_reset(struct coresight_cti *cti)
+{
+	struct cti_drvdata *drvdata;
+	unsigned long flag;
+	int trig;
+
+	if (IS_ERR_OR_NULL(cti))
+		return;
+
+	drvdata = to_cti_drvdata(cti);
+
+	mutex_lock(&drvdata->mutex);
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	if (cti_cpu_verify_access(drvdata))
+		goto err;
+
+	__cti_reset(drvdata);
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+	for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+		if (drvdata->gpio_trigin->trig == trig)
+			cti_trigin_gpio_disable(drvdata);
+		if (drvdata->gpio_trigout->trig == trig)
+			cti_trigout_gpio_disable(drvdata);
+	}
+
+	mutex_unlock(&drvdata->mutex);
+	return;
+err:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	mutex_unlock(&drvdata->mutex);
+}
+EXPORT_SYMBOL(coresight_cti_reset);
+
+static int __cti_set_trig(struct cti_drvdata *drvdata, int ch)
+{
+	if (!drvdata->refcnt)
+		return -EINVAL;
+
+	CTI_UNLOCK(drvdata);
+
+	cti_writel(drvdata, (1 << ch), CTIAPPSET);
+
+	CTI_LOCK(drvdata);
+
+	return 0;
+}
+
+int coresight_cti_set_trig(struct coresight_cti *cti, int ch)
+{
+	struct cti_drvdata *drvdata;
+	int ret;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return -EINVAL;
+	ret = cti_verify_channel_bound(ch);
+	if (ret)
+		return ret;
+
+	drvdata = to_cti_drvdata(cti);
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	ret = cti_cpu_verify_access(drvdata);
+	if (ret)
+		goto err;
+
+	ret = __cti_set_trig(drvdata, ch);
+err:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	return ret;
+}
+EXPORT_SYMBOL(coresight_cti_set_trig);
+
+static void __cti_clear_trig(struct cti_drvdata *drvdata, int ch)
+{
+	if (!drvdata->refcnt)
+		return;
+
+	CTI_UNLOCK(drvdata);
+
+	cti_writel(drvdata, (1 << ch), CTIAPPCLEAR);
+
+	CTI_LOCK(drvdata);
+}
+
+void coresight_cti_clear_trig(struct coresight_cti *cti, int ch)
+{
+	struct cti_drvdata *drvdata;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return;
+	if (cti_verify_channel_bound(ch))
+		return;
+
+	drvdata = to_cti_drvdata(cti);
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	if (cti_cpu_verify_access(drvdata))
+		goto err;
+
+	__cti_clear_trig(drvdata, ch);
+err:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+}
+EXPORT_SYMBOL(coresight_cti_clear_trig);
+
+static int __cti_pulse_trig(struct cti_drvdata *drvdata, int ch)
+{
+	if (!drvdata->refcnt)
+		return -EINVAL;
+
+	CTI_UNLOCK(drvdata);
+
+	cti_writel(drvdata, (1 << ch), CTIAPPPULSE);
+
+	CTI_LOCK(drvdata);
+
+	return 0;
+}
+
+int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch)
+{
+	struct cti_drvdata *drvdata;
+	int ret;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return -EINVAL;
+	ret = cti_verify_channel_bound(ch);
+	if (ret)
+		return ret;
+
+	drvdata = to_cti_drvdata(cti);
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	ret = cti_cpu_verify_access(drvdata);
+	if (ret)
+		goto err;
+
+	ret = __cti_pulse_trig(drvdata, ch);
+err:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	return ret;
+}
+EXPORT_SYMBOL(coresight_cti_pulse_trig);
+
+static int __cti_ack_trig(struct cti_drvdata *drvdata, int trig)
+{
+	if (!drvdata->refcnt)
+		return -EINVAL;
+
+	CTI_UNLOCK(drvdata);
+
+	cti_writel(drvdata, (0x1 << trig), CTIINTACK);
+
+	CTI_LOCK(drvdata);
+
+	return 0;
+}
+
+int coresight_cti_ack_trig(struct coresight_cti *cti, int trig)
+{
+	struct cti_drvdata *drvdata;
+	int ret;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return -EINVAL;
+	ret = cti_verify_trigger_bound(trig);
+	if (ret)
+		return ret;
+
+	drvdata = to_cti_drvdata(cti);
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	ret = cti_cpu_verify_access(drvdata);
+	if (ret)
+		goto err;
+
+	ret = __cti_ack_trig(drvdata, trig);
+err:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	return ret;
+}
+EXPORT_SYMBOL(coresight_cti_ack_trig);
+
+static int __cti_enable_gate(struct cti_drvdata *drvdata, int ch)
+{
+	uint32_t ctigate;
+
+	if (!drvdata->refcnt)
+		return -EINVAL;
+
+	CTI_UNLOCK(drvdata);
+
+	ctigate = cti_readl(drvdata, CTIGATE);
+	cti_writel(drvdata, (ctigate & ~(1 << ch)), CTIGATE);
+
+	CTI_LOCK(drvdata);
+
+	return 0;
+}
+
+int coresight_cti_enable_gate(struct coresight_cti *cti, int ch)
+{
+	struct cti_drvdata *drvdata;
+	int ret;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return -EINVAL;
+	ret = cti_verify_channel_bound(ch);
+	if (ret)
+		return ret;
+
+	drvdata = to_cti_drvdata(cti);
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	ret = cti_cpu_verify_access(drvdata);
+	if (ret)
+		goto err;
+
+	ret = __cti_enable_gate(drvdata, ch);
+err:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+	return ret;
+}
+EXPORT_SYMBOL(coresight_cti_enable_gate);
+
+static void __cti_disable_gate(struct cti_drvdata *drvdata, int ch)
+{
+	uint32_t ctigate;
+
+	if (!drvdata->refcnt)
+		return;
+
+	CTI_UNLOCK(drvdata);
+
+	ctigate = cti_readl(drvdata, CTIGATE);
+	cti_writel(drvdata, (ctigate | (1 << ch)), CTIGATE);
+
+	CTI_LOCK(drvdata);
+}
+
+void coresight_cti_disable_gate(struct coresight_cti *cti, int ch)
+{
+	struct cti_drvdata *drvdata;
+	unsigned long flag;
+
+	if (IS_ERR_OR_NULL(cti))
+		return;
+	if (cti_verify_channel_bound(ch))
+		return;
+
+	drvdata = to_cti_drvdata(cti);
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	if (cti_cpu_verify_access(drvdata))
+		goto err;
+
+	__cti_disable_gate(drvdata, ch);
+err:
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+}
+EXPORT_SYMBOL(coresight_cti_disable_gate);
+
+struct coresight_cti *coresight_cti_get(const char *name)
+{
+	struct coresight_cti *cti;
+
+	mutex_lock(&cti_lock);
+	list_for_each_entry(cti, &cti_list, link) {
+		if (!strcmp(cti->name, name)) {
+			mutex_unlock(&cti_lock);
+			return cti;
+		}
+	}
+	mutex_unlock(&cti_lock);
+
+	return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL(coresight_cti_get);
+
+void coresight_cti_put(struct coresight_cti *cti)
+{
+}
+EXPORT_SYMBOL(coresight_cti_put);
+
+static ssize_t cti_show_trigin(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long trig, ch, flag;
+	uint32_t ctien;
+	ssize_t size = 0;
+
+	mutex_lock(&drvdata->mutex);
+	/*
+	 * refcnt can be used here since in all cases its value is modified only
+	 * within the mutex lock region in addition to within the spinlock.
+	 */
+	if (!drvdata->refcnt)
+		goto err;
+
+	for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+		spin_lock_irqsave(&drvdata->spinlock, flag);
+		if (!cti_cpu_verify_access(drvdata))
+			ctien = cti_readl(drvdata, CTIINEN(trig));
+		else
+			ctien = drvdata->state->ctiinen[trig];
+		spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+		for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+			if (ctien & (1 << ch)) {
+				/* Ensure we do not write more than PAGE_SIZE
+				 * bytes of data including \n character and null
+				 * terminator
+				 */
+				size += scnprintf(&buf[size], PAGE_SIZE - size -
+						  1, " %#lx %#lx,", trig, ch);
+				if (size >= PAGE_SIZE - 2) {
+					dev_err(dev, "show buffer full\n");
+					goto err;
+				}
+
+			}
+		}
+	}
+err:
+	size += scnprintf(&buf[size], 2, "\n");
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(show_trigin, 0444, cti_show_trigin, NULL);
+
+static ssize_t cti_show_trigout(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long trig, ch, flag;
+	uint32_t ctien;
+	ssize_t size = 0;
+
+	mutex_lock(&drvdata->mutex);
+	/*
+	 * refcnt can be used here since in all cases its value is modified only
+	 * within the mutex lock region in addition to within the spinlock.
+	 */
+	if (!drvdata->refcnt)
+		goto err;
+
+	for (trig = 0; trig < CTI_MAX_TRIGGERS; trig++) {
+		spin_lock_irqsave(&drvdata->spinlock, flag);
+		if (!cti_cpu_verify_access(drvdata))
+			ctien = cti_readl(drvdata, CTIOUTEN(trig));
+		else
+			ctien = drvdata->state->ctiouten[trig];
+		spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+		for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+			if (ctien & (1 << ch)) {
+				/* Ensure we do not write more than PAGE_SIZE
+				 * bytes of data including \n character and null
+				 * terminator
+				 */
+				size += scnprintf(&buf[size], PAGE_SIZE - size -
+						  1, " %#lx %#lx,", trig, ch);
+				if (size >= PAGE_SIZE - 2) {
+					dev_err(dev, "show buffer full\n");
+					goto err;
+				}
+
+			}
+		}
+	}
+err:
+	size += scnprintf(&buf[size], 2, "\n");
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(show_trigout, 0444, cti_show_trigout, NULL);
+
+static ssize_t cti_store_map_trigin(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val1, val2;
+	int ret;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+
+	ret = coresight_cti_map_trigin(&drvdata->cti, val1, val2);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(map_trigin, 0200, NULL, cti_store_map_trigin);
+
+static ssize_t cti_store_map_trigout(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val1, val2;
+	int ret;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+
+	ret = coresight_cti_map_trigout(&drvdata->cti, val1, val2);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(map_trigout, 0200, NULL, cti_store_map_trigout);
+
+static ssize_t cti_store_unmap_trigin(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val1, val2;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+
+	coresight_cti_unmap_trigin(&drvdata->cti, val1, val2);
+
+	return size;
+}
+static DEVICE_ATTR(unmap_trigin, 0200, NULL, cti_store_unmap_trigin);
+
+static ssize_t cti_store_unmap_trigout(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val1, val2;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+
+	coresight_cti_unmap_trigout(&drvdata->cti, val1, val2);
+
+	return size;
+}
+static DEVICE_ATTR(unmap_trigout, 0200, NULL, cti_store_unmap_trigout);
+
+static ssize_t cti_store_reset(struct device *dev,
+			       struct device_attribute *attr,
+			       const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	if (!val)
+		return -EINVAL;
+
+	coresight_cti_reset(&drvdata->cti);
+	return size;
+}
+static DEVICE_ATTR(reset, 0200, NULL, cti_store_reset);
+
+static ssize_t cti_show_trig(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long ch, flag;
+	uint32_t ctiset;
+	ssize_t size = 0;
+
+	mutex_lock(&drvdata->mutex);
+	/*
+	 * refcnt can be used here since in all cases its value is modified only
+	 * within the mutex lock region in addition to within the spinlock.
+	 */
+	if (!drvdata->refcnt)
+		goto err;
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	if (!cti_cpu_verify_access(drvdata))
+		ctiset = cti_readl(drvdata, CTIAPPSET);
+	else
+		ctiset = drvdata->state->ctiappset;
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+	for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+		if (ctiset & (1 << ch)) {
+			/* Ensure we do not write more than PAGE_SIZE
+			 * bytes of data including \n character and null
+			 * terminator
+			 */
+			size += scnprintf(&buf[size], PAGE_SIZE - size -
+					  1, " %#lx,", ch);
+			if (size >= PAGE_SIZE - 2) {
+				dev_err(dev, "show buffer full\n");
+				goto err;
+			}
+
+		}
+	}
+err:
+	size += scnprintf(&buf[size], 2, "\n");
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(show_trig, 0444, cti_show_trig, NULL);
+
+static ssize_t cti_store_set_trig(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	ret = coresight_cti_set_trig(&drvdata->cti, val);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(set_trig, 0200, NULL, cti_store_set_trig);
+
+static ssize_t cti_store_clear_trig(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	coresight_cti_clear_trig(&drvdata->cti, val);
+
+	return size;
+}
+static DEVICE_ATTR(clear_trig, 0200, NULL, cti_store_clear_trig);
+
+static ssize_t cti_store_pulse_trig(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	ret = coresight_cti_pulse_trig(&drvdata->cti, val);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(pulse_trig, 0200, NULL, cti_store_pulse_trig);
+
+static ssize_t cti_store_ack_trig(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	ret = coresight_cti_ack_trig(&drvdata->cti, val);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(ack_trig, 0200, NULL, cti_store_ack_trig);
+
+static ssize_t cti_show_gate(struct device *dev, struct device_attribute *attr,
+			     char *buf)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long ch, flag;
+	uint32_t ctigate;
+	ssize_t size = 0;
+
+	mutex_lock(&drvdata->mutex);
+	/*
+	 * refcnt can be used here since in all cases its value is modified only
+	 * within the mutex lock region in addition to within the spinlock.
+	 */
+	if (!drvdata->refcnt)
+		goto err;
+
+	spin_lock_irqsave(&drvdata->spinlock, flag);
+	if (!cti_cpu_verify_access(drvdata))
+		ctigate = cti_readl(drvdata, CTIGATE);
+	else
+		ctigate = drvdata->state->ctigate;
+	spin_unlock_irqrestore(&drvdata->spinlock, flag);
+
+	for (ch = 0; ch < CTI_MAX_CHANNELS; ch++) {
+		if (ctigate & (1 << ch)) {
+			/* Ensure we do not write more than PAGE_SIZE
+			 * bytes of data including \n character and null
+			 * terminator
+			 */
+			size += scnprintf(&buf[size], PAGE_SIZE - size -
+					  1, " %#lx,", ch);
+			if (size >= PAGE_SIZE - 2) {
+				dev_err(dev, "show buffer full\n");
+				goto err;
+			}
+
+		}
+	}
+err:
+	size += scnprintf(&buf[size], 2, "\n");
+	mutex_unlock(&drvdata->mutex);
+	return size;
+}
+static DEVICE_ATTR(show_gate, 0444, cti_show_gate, NULL);
+
+static ssize_t cti_store_enable_gate(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+	int ret;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	ret = coresight_cti_enable_gate(&drvdata->cti, val);
+
+	if (ret)
+		return ret;
+	return size;
+}
+static DEVICE_ATTR(enable_gate, 0200, NULL, cti_store_enable_gate);
+
+static ssize_t cti_store_disable_gate(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	struct cti_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	coresight_cti_disable_gate(&drvdata->cti, val);
+
+	return size;
+}
+static DEVICE_ATTR(disable_gate, 0200, NULL, cti_store_disable_gate);
+
+static struct attribute *cti_attrs[] = {
+	&dev_attr_show_trigin.attr,
+	&dev_attr_show_trigout.attr,
+	&dev_attr_map_trigin.attr,
+	&dev_attr_map_trigout.attr,
+	&dev_attr_unmap_trigin.attr,
+	&dev_attr_unmap_trigout.attr,
+	&dev_attr_reset.attr,
+	&dev_attr_show_trig.attr,
+	&dev_attr_set_trig.attr,
+	&dev_attr_clear_trig.attr,
+	&dev_attr_pulse_trig.attr,
+	&dev_attr_ack_trig.attr,
+	&dev_attr_show_gate.attr,
+	&dev_attr_enable_gate.attr,
+	&dev_attr_disable_gate.attr,
+	NULL,
+};
+
+static struct attribute_group cti_attr_grp = {
+	.attrs = cti_attrs,
+};
+
+static const struct attribute_group *cti_attr_grps[] = {
+	&cti_attr_grp,
+	NULL,
+};
+
+static int cti_cpu_pm_callback(struct notifier_block *self,
+			       unsigned long cmd, void *v)
+{
+	unsigned long aff_level = (unsigned long) v;
+
+	switch (cmd) {
+	case CPU_CLUSTER_PM_ENTER:
+		if (aff_level == AFFINITY_LEVEL_L2)
+			coresight_cti_ctx_save();
+		break;
+	case CPU_CLUSTER_PM_ENTER_FAILED:
+	case CPU_CLUSTER_PM_EXIT:
+		if (aff_level == AFFINITY_LEVEL_L2)
+			coresight_cti_ctx_restore();
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block cti_cpu_pm_notifier = {
+	.notifier_call = cti_cpu_pm_callback,
+};
+
+static int cti_probe(struct platform_device *pdev)
+{
+	int ret;
+	int trig;
+	struct device *dev = &pdev->dev;
+	struct coresight_platform_data *pdata;
+	struct cti_drvdata *drvdata;
+	struct resource *res;
+	struct coresight_desc *desc;
+	struct device_node *cpu_node;
+
+	pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+	pdev->dev.platform_data = pdata;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+	/* Store the driver data pointer for use in exported functions */
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cti-base");
+	if (!res)
+		return -ENODEV;
+
+	drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	spin_lock_init(&drvdata->spinlock);
+
+	mutex_init(&drvdata->mutex);
+
+	drvdata->clk = devm_clk_get(dev, "core_clk");
+	if (IS_ERR(drvdata->clk))
+		return PTR_ERR(drvdata->clk);
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		return ret;
+
+	drvdata->gpio_trigin = devm_kzalloc(dev, sizeof(struct cti_pctrl),
+					    GFP_KERNEL);
+	if (!drvdata->gpio_trigin)
+		return -ENOMEM;
+
+	drvdata->gpio_trigin->trig = -1;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "qcom,cti-gpio-trigin", &trig);
+	if (!ret)
+		drvdata->gpio_trigin->trig = trig;
+	else if (ret != -EINVAL)
+		return ret;
+
+	drvdata->gpio_trigout = devm_kzalloc(dev, sizeof(struct cti_pctrl),
+					     GFP_KERNEL);
+	if (!drvdata->gpio_trigout)
+		return -ENOMEM;
+
+	drvdata->gpio_trigout->trig = -1;
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "qcom,cti-gpio-trigout", &trig);
+	if (!ret)
+		drvdata->gpio_trigout->trig = trig;
+	else if (ret != -EINVAL)
+		return ret;
+
+	drvdata->cpu = -1;
+	cpu_node = of_parse_phandle(pdev->dev.of_node, "cpu", 0);
+	if (cpu_node) {
+		drvdata->cpu = pdata ? pdata->cpu : -1;
+		if (drvdata->cpu == -1) {
+			dev_err(drvdata->dev, "CTI cpu node invalid\n");
+			return -EINVAL;
+		}
+	}
+
+	if (!cti_save_disable)
+		drvdata->cti_save = of_property_read_bool(pdev->dev.of_node,
+							  "qcom,cti-save");
+	if (drvdata->cti_save) {
+		drvdata->state = devm_kzalloc(dev, sizeof(struct cti_state),
+					      GFP_KERNEL);
+		if (!drvdata->state)
+			return -ENOMEM;
+
+		drvdata->cti_hwclk = of_property_read_bool(pdev->dev.of_node,
+							   "qcom,cti-hwclk");
+	}
+	if (drvdata->cti_save && !drvdata->cti_hwclk) {
+		ret = clk_prepare_enable(drvdata->clk);
+		if (ret)
+			return ret;
+	}
+
+	mutex_lock(&cti_lock);
+	drvdata->cti.name = ((struct coresight_platform_data *)
+			     (pdev->dev.platform_data))->name;
+	list_add_tail(&drvdata->cti.link, &cti_list);
+	mutex_unlock(&cti_lock);
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	desc->type = CORESIGHT_DEV_TYPE_NONE;
+	desc->pdata = pdev->dev.platform_data;
+	desc->dev = &pdev->dev;
+	desc->groups = cti_attr_grps;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev)) {
+		ret = PTR_ERR(drvdata->csdev);
+		goto err;
+	}
+
+	if (drvdata->cti_save) {
+		if (!registered)
+			cpu_pm_register_notifier(&cti_cpu_pm_notifier);
+		registered++;
+	}
+
+	dev_dbg(dev, "CTI initialized\n");
+	return 0;
+err:
+	if (drvdata->cti_save && !drvdata->cti_hwclk)
+		clk_disable_unprepare(drvdata->clk);
+	return ret;
+}
+
+static int cti_remove(struct platform_device *pdev)
+{
+	struct cti_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	if (drvdata->cti_save) {
+		registered--;
+		if (!registered)
+			cpu_pm_unregister_notifier(&cti_cpu_pm_notifier);
+	}
+	coresight_unregister(drvdata->csdev);
+	if (drvdata->cti_save && !drvdata->cti_hwclk)
+		clk_disable_unprepare(drvdata->clk);
+	return 0;
+}
+
+static const struct of_device_id cti_match[] = {
+	{.compatible = "arm,coresight-cti"},
+	{}
+};
+
+static struct platform_driver cti_driver = {
+	.probe          = cti_probe,
+	.remove         = cti_remove,
+	.driver         = {
+		.name   = "coresight-cti",
+		.owner	= THIS_MODULE,
+		.of_match_table = cti_match,
+	},
+};
+
+static int __init cti_init(void)
+{
+	return platform_driver_register(&cti_driver);
+}
+module_init(cti_init);
+
+static void __exit cti_exit(void)
+{
+	platform_driver_unregister(&cti_driver);
+}
+module_exit(cti_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight CTI driver");
diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
index 3fe368b..f57ad2e 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2016, The Linux Foundation. All rights reserved.
  *
  * Description: CoreSight Program Flow Trace driver
  *
@@ -45,7 +45,7 @@
  * remain consistent with existing use cases for now.
  */
 static int boot_enable;
-module_param_named(boot_enable, boot_enable, int, S_IRUGO);
+module_param_named(boot_enable, boot_enable, int, 0444);
 
 /* The number of ETM/PTM currently registered */
 static int etm_count;
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 4db8d6a..4fc5916 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -40,7 +40,7 @@
 #include "coresight-etm-perf.h"
 
 static int boot_enable;
-module_param_named(boot_enable, boot_enable, int, S_IRUGO);
+module_param_named(boot_enable, boot_enable, int, 0444);
 
 /* The number of ETMv4 currently registered */
 static int etm4_count;
diff --git a/drivers/hwtracing/coresight/coresight-hwevent.c b/drivers/hwtracing/coresight/coresight-hwevent.c
new file mode 100644
index 0000000..5857d30
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-hwevent.c
@@ -0,0 +1,322 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include "coresight-priv.h"
+
+struct hwevent_mux {
+	phys_addr_t				start;
+	phys_addr_t				end;
+};
+
+struct hwevent_drvdata {
+	struct device				*dev;
+	struct coresight_device			*csdev;
+	struct clk				*clk;
+	struct mutex				mutex;
+	int					nr_hclk;
+	struct clk				**hclk;
+	int					nr_hreg;
+	struct regulator			**hreg;
+	int					nr_hmux;
+	struct hwevent_mux			*hmux;
+};
+
+static int hwevent_enable(struct hwevent_drvdata *drvdata)
+{
+	int ret, i, j;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < drvdata->nr_hreg; i++) {
+		ret = regulator_enable(drvdata->hreg[i]);
+		if (ret)
+			goto err0;
+	}
+
+	for (j = 0; j < drvdata->nr_hclk; j++) {
+		ret = clk_prepare_enable(drvdata->hclk[j]);
+		if (ret)
+			goto err1;
+	}
+	return 0;
+err1:
+	for (j--; j >= 0; j--)
+		clk_disable_unprepare(drvdata->hclk[j]);
+err0:
+	for (i--; i >= 0; i--)
+		regulator_disable(drvdata->hreg[i]);
+
+	clk_disable_unprepare(drvdata->clk);
+	return ret;
+}
+
+static void hwevent_disable(struct hwevent_drvdata *drvdata)
+{
+	int i;
+
+	clk_disable_unprepare(drvdata->clk);
+	for (i = 0; i < drvdata->nr_hclk; i++)
+		clk_disable_unprepare(drvdata->hclk[i]);
+	for (i = 0; i < drvdata->nr_hreg; i++)
+		regulator_disable(drvdata->hreg[i]);
+}
+
+static ssize_t hwevent_store_setreg(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t size)
+{
+	struct hwevent_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	void *hwereg;
+	unsigned long long addr;
+	unsigned long val;
+	int ret, i;
+
+	if (sscanf(buf, "%llx %lx", &addr, &val) != 2)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->mutex);
+	ret = hwevent_enable(drvdata);
+	if (ret) {
+		mutex_unlock(&drvdata->mutex);
+		return ret;
+	}
+
+	for (i = 0; i < drvdata->nr_hmux; i++) {
+		if ((addr >= drvdata->hmux[i].start) &&
+		    (addr < drvdata->hmux[i].end)) {
+			hwereg = devm_ioremap(dev,
+					      drvdata->hmux[i].start,
+					      drvdata->hmux[i].end -
+					      drvdata->hmux[i].start);
+			if (!hwereg) {
+				dev_err(dev, "unable to map address 0x%llx\n",
+					addr);
+				ret = -ENOMEM;
+				goto err;
+			}
+			writel_relaxed(val, hwereg + addr -
+				       drvdata->hmux[i].start);
+			/*
+			 * Ensure writes to hwevent control registers
+			 * are completed before unmapping the address
+			 */
+			mb();
+			devm_iounmap(dev, hwereg);
+			break;
+		}
+	}
+
+	if (i == drvdata->nr_hmux) {
+		ret = coresight_csr_hwctrl_set(addr, val);
+		if (ret) {
+			dev_err(dev, "invalid mux control register address\n");
+			ret = -EINVAL;
+			goto err;
+		}
+	}
+
+	hwevent_disable(drvdata);
+	mutex_unlock(&drvdata->mutex);
+	return size;
+err:
+	hwevent_disable(drvdata);
+	mutex_unlock(&drvdata->mutex);
+	return ret;
+}
+static DEVICE_ATTR(setreg, 0200, NULL, hwevent_store_setreg);
+
+static struct attribute *hwevent_attrs[] = {
+	&dev_attr_setreg.attr,
+	NULL,
+};
+
+static struct attribute_group hwevent_attr_grp = {
+	.attrs = hwevent_attrs,
+};
+
+static const struct attribute_group *hwevent_attr_grps[] = {
+	&hwevent_attr_grp,
+	NULL,
+};
+
+static int hwevent_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct hwevent_drvdata *drvdata;
+	struct coresight_desc *desc;
+	struct coresight_platform_data *pdata;
+	struct resource *res;
+	int ret, i;
+	const char *hmux_name, *hclk_name, *hreg_name;
+
+	pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+	pdev->dev.platform_data = pdata;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	drvdata->nr_hmux = of_property_count_strings(pdev->dev.of_node,
+						     "reg-names");
+
+	if (!drvdata->nr_hmux)
+		return -ENODEV;
+
+	if (drvdata->nr_hmux > 0) {
+		drvdata->hmux = devm_kzalloc(dev, drvdata->nr_hmux *
+					     sizeof(*drvdata->hmux),
+					     GFP_KERNEL);
+		if (!drvdata->hmux)
+			return -ENOMEM;
+		for (i = 0; i < drvdata->nr_hmux; i++) {
+			ret = of_property_read_string_index(pdev->dev.of_node,
+							    "reg-names", i,
+							    &hmux_name);
+			if (ret)
+				return ret;
+			res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							   hmux_name);
+			if (!res)
+				return -ENODEV;
+			drvdata->hmux[i].start = res->start;
+			drvdata->hmux[i].end = res->end;
+		}
+	} else {
+		return drvdata->nr_hmux;
+	}
+
+	mutex_init(&drvdata->mutex);
+
+	drvdata->clk = devm_clk_get(dev, "core_clk");
+	if (IS_ERR(drvdata->clk))
+		return PTR_ERR(drvdata->clk);
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		return ret;
+
+	drvdata->nr_hclk = of_property_count_strings(pdev->dev.of_node,
+						     "qcom,hwevent-clks");
+	drvdata->nr_hreg = of_property_count_strings(pdev->dev.of_node,
+						     "qcom,hwevent-regs");
+
+	if (drvdata->nr_hclk > 0) {
+		drvdata->hclk = devm_kzalloc(dev, drvdata->nr_hclk *
+					     sizeof(*drvdata->hclk),
+					     GFP_KERNEL);
+		if (!drvdata->hclk)
+			return -ENOMEM;
+
+		for (i = 0; i < drvdata->nr_hclk; i++) {
+			ret = of_property_read_string_index(pdev->dev.of_node,
+							    "qcom,hwevent-clks",
+							    i, &hclk_name);
+			if (ret)
+				return ret;
+
+			drvdata->hclk[i] = devm_clk_get(dev, hclk_name);
+			if (IS_ERR(drvdata->hclk[i]))
+				return PTR_ERR(drvdata->hclk[i]);
+		}
+	}
+	if (drvdata->nr_hreg > 0) {
+		drvdata->hreg = devm_kzalloc(dev, drvdata->nr_hreg *
+					     sizeof(*drvdata->hreg),
+					     GFP_KERNEL);
+		if (!drvdata->hreg)
+			return -ENOMEM;
+
+		for (i = 0; i < drvdata->nr_hreg; i++) {
+			ret = of_property_read_string_index(pdev->dev.of_node,
+							    "qcom,hwevent-regs",
+							    i, &hreg_name);
+			if (ret)
+				return ret;
+
+			drvdata->hreg[i] = devm_regulator_get(dev, hreg_name);
+			if (IS_ERR(drvdata->hreg[i]))
+				return PTR_ERR(drvdata->hreg[i]);
+		}
+	}
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+
+	desc->type = CORESIGHT_DEV_TYPE_NONE;
+	desc->pdata = pdev->dev.platform_data;
+	desc->dev = &pdev->dev;
+	desc->groups = hwevent_attr_grps;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev))
+		return PTR_ERR(drvdata->csdev);
+
+	dev_info(dev, "Hardware Event driver initialized\n");
+	return 0;
+}
+
+static int hwevent_remove(struct platform_device *pdev)
+{
+	struct hwevent_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	coresight_unregister(drvdata->csdev);
+	return 0;
+}
+
+static const struct of_device_id hwevent_match[] = {
+	{.compatible = "qcom,coresight-hwevent"},
+	{}
+};
+
+static struct platform_driver hwevent_driver = {
+	.probe		= hwevent_probe,
+	.remove		= hwevent_remove,
+	.driver		= {
+		.name	= "coresight-hwevent",
+		.owner	= THIS_MODULE,
+		.of_match_table	= hwevent_match,
+	},
+};
+
+static int __init hwevent_init(void)
+{
+	return platform_driver_register(&hwevent_driver);
+}
+module_init(hwevent_init);
+
+static void __exit hwevent_exit(void)
+{
+	platform_driver_unregister(&hwevent_driver);
+}
+module_exit(hwevent_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Hardware Event driver");
diff --git a/drivers/hwtracing/coresight/coresight-ost.c b/drivers/hwtracing/coresight/coresight-ost.c
new file mode 100644
index 0000000..e40751a
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-ost.c
@@ -0,0 +1,258 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/bitmap.h>
+#include "coresight-ost.h"
+
+#define STM_USERSPACE_HEADER_SIZE	(8)
+#define STM_USERSPACE_MAGIC1_VAL	(0xf0)
+#define STM_USERSPACE_MAGIC2_VAL	(0xf1)
+
+#define OST_TOKEN_STARTSIMPLE		(0x10)
+#define OST_VERSION_MIPI1		(16)
+
+#define STM_MAKE_VERSION(ma, mi)	((ma << 8) | mi)
+#define STM_HEADER_MAGIC		(0x5953)
+
+#define STM_FLAG_MARKED			BIT(4)
+
+#define STM_TRACE_BUF_SIZE		4096
+
+static struct stm_drvdata *stmdrvdata;
+
+static uint32_t stm_channel_alloc(uint32_t off)
+{
+	struct stm_drvdata *drvdata = stmdrvdata;
+	uint32_t ch;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drvdata->spinlock, flags);
+	do {
+		ch = find_next_zero_bit(drvdata->chs.bitmap,
+					drvdata->numsp, off);
+	} while ((ch < drvdata->numsp) &&
+		 test_and_set_bit(ch, drvdata->chs.bitmap));
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+	return ch;
+}
+
+static int stm_ost_send(void *addr, const void *data, uint32_t count)
+{
+	struct stm_drvdata *drvdata = stmdrvdata;
+	const unsigned char *p = data;
+	size_t pos;
+	ssize_t sz;
+
+	for (pos = 0, p = data; count > pos; pos += sz, p += sz) {
+		sz = min_t(unsigned int, count - pos, drvdata->write_bytes);
+		stm_send(addr, p, sz, drvdata->write_bytes);
+	}
+
+	return count;
+}
+
+static void stm_channel_free(uint32_t ch)
+{
+	struct stm_drvdata *drvdata = stmdrvdata;
+	unsigned long flags;
+
+	spin_lock_irqsave(&drvdata->spinlock, flags);
+	clear_bit(ch, drvdata->chs.bitmap);
+	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+}
+
+static int stm_trace_ost_header(unsigned long ch_addr, uint32_t flags,
+				uint8_t entity_id, uint8_t proto_id)
+{
+	void *addr;
+	uint32_t header;
+	char *hdr;
+
+	hdr = (char *)&header;
+
+	hdr[0] = OST_TOKEN_STARTSIMPLE;
+	hdr[1] = OST_VERSION_MIPI1;
+	hdr[2] = entity_id;
+	hdr[3] = proto_id;
+
+	/* header is expected to be D32M type */
+	flags |= STM_FLAG_MARKED;
+	flags &= ~STM_FLAG_TIMESTAMPED;
+	addr =  (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, flags));
+
+	return stm_ost_send(addr, &header, sizeof(header));
+}
+
+static int stm_trace_data_header(void *addr)
+{
+	char hdr[16];
+	int len = 0;
+
+	*(uint16_t *)(hdr) = STM_MAKE_VERSION(0, 1);
+	*(uint16_t *)(hdr + 2) = STM_HEADER_MAGIC;
+	*(uint32_t *)(hdr + 4) = raw_smp_processor_id();
+	*(uint64_t *)(hdr + 8) = sched_clock();
+
+	len += stm_ost_send(addr, hdr, sizeof(hdr));
+	len += stm_ost_send(addr, current->comm, TASK_COMM_LEN);
+
+	return len;
+}
+
+static int stm_trace_data(unsigned long ch_addr, uint32_t flags,
+			  const void *data, uint32_t size)
+{
+	void *addr;
+	int len = 0;
+
+	flags &= ~STM_FLAG_TIMESTAMPED;
+	addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_DATA, flags));
+
+	/* send the data header */
+	len += stm_trace_data_header(addr);
+	/* send the actual data */
+	len += stm_ost_send(addr, data, size);
+
+	return len;
+}
+
+static int stm_trace_ost_tail(unsigned long ch_addr, uint32_t flags)
+{
+	void *addr;
+	uint32_t tail = 0x0;
+
+	addr = (void *)(ch_addr | stm_channel_off(STM_PKT_TYPE_FLAG, flags));
+
+	return stm_ost_send(addr, &tail, sizeof(tail));
+}
+
+static inline int __stm_trace(uint32_t flags, uint8_t entity_id,
+			      uint8_t proto_id, const void *data, uint32_t size)
+{
+	struct stm_drvdata *drvdata = stmdrvdata;
+	int len = 0;
+	uint32_t ch;
+	unsigned long ch_addr;
+
+	/* allocate channel and get the channel address */
+	ch = stm_channel_alloc(0);
+
+	ch_addr = (unsigned long)stm_channel_addr(drvdata, ch);
+
+	/* send the ost header */
+	len += stm_trace_ost_header(ch_addr, flags, entity_id,
+				    proto_id);
+
+	/* send the payload data */
+	len += stm_trace_data(ch_addr, flags, data, size);
+
+	/* send the ost tail */
+	len += stm_trace_ost_tail(ch_addr, flags);
+
+	/* we are done, free the channel */
+	stm_channel_free(ch);
+
+	return len;
+}
+
+/*
+ * stm_trace - trace the binary or string data through STM
+ * @flags: tracing options - guaranteed, timestamped, etc
+ * @entity_id: entity representing the trace data
+ * @proto_id: protocol id to distinguish between different binary formats
+ * @data: pointer to binary or string data buffer
+ * @size: size of data to send
+ *
+ * Packetizes the data as the payload to an OST packet and sends it over STM
+ *
+ * CONTEXT:
+ * Can be called from any context.
+ *
+ * RETURNS:
+ * number of bytes transferred over STM
+ */
+int stm_trace(uint32_t flags, uint8_t entity_id, uint8_t proto_id,
+			const void *data, uint32_t size)
+{
+	struct stm_drvdata *drvdata = stmdrvdata;
+
+	/* we don't support sizes more than 24bits (0 to 23) */
+	if (!(drvdata && drvdata->enable &&
+	      test_bit(entity_id, drvdata->entities) && size &&
+	      (size < 0x1000000)))
+		return 0;
+
+	return __stm_trace(flags, entity_id, proto_id, data, size);
+}
+EXPORT_SYMBOL(stm_trace);
+
+ssize_t stm_ost_packet(struct stm_data *stm_data,
+				  unsigned int size,
+				  const unsigned char *buf)
+{
+	struct stm_drvdata *drvdata = container_of(stm_data,
+						   struct stm_drvdata, stm);
+
+	uint8_t entity_id, proto_id;
+	uint32_t flags;
+
+	if (!drvdata->enable || !size)
+		return -EINVAL;
+
+	if (size > STM_TRACE_BUF_SIZE)
+		size = STM_TRACE_BUF_SIZE;
+
+	if (size >= STM_USERSPACE_HEADER_SIZE &&
+	    buf[0] == STM_USERSPACE_MAGIC1_VAL &&
+	    buf[1] == STM_USERSPACE_MAGIC2_VAL) {
+
+		entity_id = buf[2];
+		proto_id = buf[3];
+		flags = *(uint32_t *)(buf + 4);
+
+		if (!test_bit(entity_id, drvdata->entities) ||
+		    !(size - STM_USERSPACE_HEADER_SIZE)) {
+			return size;
+		}
+
+		__stm_trace(flags, entity_id, proto_id,
+			    buf + STM_USERSPACE_HEADER_SIZE,
+			    size - STM_USERSPACE_HEADER_SIZE);
+	} else {
+		if (!test_bit(OST_ENTITY_DEV_NODE, drvdata->entities))
+			return size;
+
+		__stm_trace(STM_FLAG_TIMESTAMPED, OST_ENTITY_DEV_NODE, 0,
+			    buf, size);
+	}
+
+	return size;
+}
+EXPORT_SYMBOL(stm_ost_packet);
+
+int stm_set_ost_params(struct stm_drvdata *drvdata, size_t bitmap_size)
+{
+	stmdrvdata = drvdata;
+
+	drvdata->chs.bitmap = devm_kzalloc(drvdata->dev, bitmap_size,
+					   GFP_KERNEL);
+	if (!drvdata->chs.bitmap)
+		return -ENOMEM;
+
+	bitmap_fill(drvdata->entities, OST_ENTITY_MAX);
+
+	return 0;
+}
+EXPORT_SYMBOL(stm_set_ost_params);
+
diff --git a/drivers/hwtracing/coresight/coresight-ost.h b/drivers/hwtracing/coresight/coresight-ost.h
new file mode 100644
index 0000000..94d47a0
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-ost.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORESIGHT_CORESIGHT_OST_H
+#define _CORESIGHT_CORESIGHT_OST_H
+
+#include <linux/types.h>
+#include <linux/coresight-stm.h>
+
+#if CONFIG_CORESIGHT_OST
+static inline bool stm_ost_configured(void) { return 1; }
+
+extern ssize_t stm_ost_packet(struct stm_data *stm_data,
+			      unsigned int size,
+			      const unsigned char *buf);
+
+extern int stm_set_ost_params(struct stm_drvdata *drvdata,
+			      size_t bitmap_size);
+#else
+static inline bool stm_ost_configured(void) { return 0; }
+
+static inline ssize_t stm_ost_packet(struct stm_data *stm_data,
+				     unsigned int size,
+				     const unsigned char *buf)
+{
+	return 0;
+}
+
+static inline int stm_set_ost_params(struct stm_drvdata *drvdata,
+				     size_t bitmap_size)
+{
+	return 0;
+}
+#endif
+#endif
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 196a14b..aa5538c 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2012, 2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -33,7 +33,9 @@
 #define CORESIGHT_DEVTYPE	0xfcc
 
 #define TIMEOUT_US		100
+#define BM(lsb, msb)		((BIT(msb) - BIT(lsb)) + BIT(msb))
 #define BMVAL(val, lsb, msb)	((val & GENMASK(msb, lsb)) >> lsb)
+#define BVAL(val, n)		((val & BIT(n)) >> n)
 
 #define ETM_MODE_EXCL_KERN	BIT(30)
 #define ETM_MODE_EXCL_USER	BIT(31)
@@ -108,6 +110,27 @@
 	} while (0);
 }
 
+static inline bool coresight_authstatus_enabled(void __iomem *addr)
+{
+	int ret;
+	unsigned int auth_val;
+
+	if (!addr)
+		return false;
+
+	auth_val = readl_relaxed(addr + CORESIGHT_AUTHSTATUS);
+
+	if ((BMVAL(auth_val, 0, 1) == 0x2) ||
+	    (BMVAL(auth_val, 2, 3) == 0x2) ||
+	    (BMVAL(auth_val, 4, 5) == 0x2) ||
+	    (BMVAL(auth_val, 6, 7) == 0x2))
+		ret = false;
+	else
+		ret = true;
+
+	return ret;
+}
+
 void coresight_disable_path(struct list_head *path);
 int coresight_enable_path(struct list_head *path, u32 mode);
 struct coresight_device *coresight_get_sink(struct list_head *path);
@@ -122,4 +145,17 @@
 static inline int etm_writel_cp14(u32 off, u32 val) { return 0; }
 #endif
 
+#ifdef CONFIG_CORESIGHT_CSR
+extern void msm_qdss_csr_enable_bam_to_usb(void);
+extern void msm_qdss_csr_disable_bam_to_usb(void);
+extern void msm_qdss_csr_disable_flush(void);
+extern int coresight_csr_hwctrl_set(uint64_t addr, uint32_t val);
+#else
+static inline void msm_qdss_csr_enable_bam_to_usb(void) {}
+static inline void msm_qdss_csr_disable_bam_to_usb(void) {}
+static inline void msm_qdss_csr_disable_flush(void) {}
+static inline int coresight_csr_hwctrl_set(uint64_t addr,	95
+					   uint32_t val) { return -EINVAL; }
+#endif
+
 #endif
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
index 49e0f1b..34f89c8 100644
--- a/drivers/hwtracing/coresight/coresight-stm.c
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -22,9 +22,7 @@
  * generic STM API by Chunyan Zhang
  * (C) 2015-2016 Chunyan Zhang <zhang.chunyan@linaro.org>
  */
-#include <asm/local.h>
 #include <linux/amba/bus.h>
-#include <linux/bitmap.h>
 #include <linux/clk.h>
 #include <linux/coresight.h>
 #include <linux/coresight-stm.h>
@@ -36,6 +34,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/stm.h>
 
+#include "coresight-ost.h"
 #include "coresight-priv.h"
 
 #define STMDMASTARTR			0xc04
@@ -73,8 +72,6 @@
 #define STMITATBCTR0			0xef8
 
 #define STM_32_CHANNEL			32
-#define BYTES_PER_CHANNEL		256
-#define STM_TRACE_BUF_SIZE		4096
 #define STM_SW_MASTER_END		127
 
 /* Register bit definition */
@@ -82,16 +79,6 @@
 /* Reserve the first 10 channels for kernel usage */
 #define STM_CHANNEL_OFFSET		0
 
-enum stm_pkt_type {
-	STM_PKT_TYPE_DATA	= 0x98,
-	STM_PKT_TYPE_FLAG	= 0xE8,
-	STM_PKT_TYPE_TRIG	= 0xF8,
-};
-
-#define stm_channel_addr(drvdata, ch)	(drvdata->chs.base +	\
-					(ch * BYTES_PER_CHANNEL))
-#define stm_channel_off(type, opts)	(type & ~opts)
-
 static int boot_nr_channel;
 
 /*
@@ -99,59 +86,9 @@
  * remain consistent with existing use cases for now.
  */
 module_param_named(
-	boot_nr_channel, boot_nr_channel, int, S_IRUGO
+	boot_nr_channel, boot_nr_channel, int, 0444
 );
 
-/**
- * struct channel_space - central management entity for extended ports
- * @base:		memory mapped base address where channels start.
- * @phys:		physical base address of channel region.
- * @guaraneed:		is the channel delivery guaranteed.
- */
-struct channel_space {
-	void __iomem		*base;
-	phys_addr_t		phys;
-	unsigned long		*guaranteed;
-};
-
-/**
- * struct stm_drvdata - specifics associated to an STM component
- * @base:		memory mapped base address for this component.
- * @dev:		the device entity associated to this component.
- * @atclk:		optional clock for the core parts of the STM.
- * @csdev:		component vitals needed by the framework.
- * @spinlock:		only one at a time pls.
- * @chs:		the channels accociated to this STM.
- * @stm:		structure associated to the generic STM interface.
- * @mode:		this tracer's mode, i.e sysFS, or disabled.
- * @traceid:		value of the current ID for this component.
- * @write_bytes:	Maximus bytes this STM can write at a time.
- * @stmsper:		settings for register STMSPER.
- * @stmspscr:		settings for register STMSPSCR.
- * @numsp:		the total number of stimulus port support by this STM.
- * @stmheer:		settings for register STMHEER.
- * @stmheter:		settings for register STMHETER.
- * @stmhebsr:		settings for register STMHEBSR.
- */
-struct stm_drvdata {
-	void __iomem		*base;
-	struct device		*dev;
-	struct clk		*atclk;
-	struct coresight_device	*csdev;
-	spinlock_t		spinlock;
-	struct channel_space	chs;
-	struct stm_data		stm;
-	local_t			mode;
-	u8			traceid;
-	u32			write_bytes;
-	u32			stmsper;
-	u32			stmspscr;
-	u32			numsp;
-	u32			stmheer;
-	u32			stmheter;
-	u32			stmhebsr;
-};
-
 static void stm_hwevent_enable_hw(struct stm_drvdata *drvdata)
 {
 	CS_UNLOCK(drvdata->base);
@@ -216,6 +153,7 @@
 
 	spin_lock(&drvdata->spinlock);
 	stm_enable_hw(drvdata);
+	drvdata->enable = true;
 	spin_unlock(&drvdata->spinlock);
 
 	dev_info(drvdata->dev, "STM tracing enabled\n");
@@ -307,7 +245,7 @@
 	return ((unsigned long)addr & (write_bytes - 1));
 }
 
-static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes)
+void stm_send(void *addr, const void *data, u32 size, u8 write_bytes)
 {
 	u8 paload[8];
 
@@ -336,6 +274,7 @@
 		break;
 	}
 }
+EXPORT_SYMBOL(stm_send);
 
 static int stm_generic_link(struct stm_data *stm_data,
 			    unsigned int master,  unsigned int channel)
@@ -356,6 +295,10 @@
 	if (!drvdata || !drvdata->csdev)
 		return;
 
+	/* If any OST entity is enabled do not disable the device */
+	if (drvdata->entities)
+		return;
+
 	stm_disable(drvdata->csdev, NULL);
 }
 
@@ -635,6 +578,48 @@
 }
 static DEVICE_ATTR_RW(traceid);
 
+static ssize_t entities_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t len;
+
+	len = scnprintf(buf, PAGE_SIZE, "%*pb\n",
+			OST_ENTITY_MAX, drvdata->entities);
+
+	if (PAGE_SIZE - len < 2)
+		len = -EINVAL;
+	else
+		len += scnprintf(buf + len, 2, "\n");
+
+	return len;
+}
+
+static ssize_t entities_store(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t size)
+{
+	struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val1, val2;
+
+	if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+		return -EINVAL;
+
+	if (val1 >= OST_ENTITY_MAX)
+		return -EINVAL;
+
+	if (!stm_ost_configured())
+		return -EPERM;
+
+	if (val2)
+		__set_bit(val1, drvdata->entities);
+	else
+		__clear_bit(val1, drvdata->entities);
+
+	return size;
+}
+static DEVICE_ATTR_RW(entities);
+
 #define coresight_stm_simple_func(name, offset)	\
 	coresight_simple_func(struct stm_drvdata, NULL, name, offset)
 
@@ -657,6 +642,7 @@
 	&dev_attr_port_enable.attr,
 	&dev_attr_port_select.attr,
 	&dev_attr_traceid.attr,
+	&dev_attr_entities.attr,
 	NULL,
 };
 
@@ -758,12 +744,12 @@
 	drvdata->stmsper = ~0x0;
 
 	/*
-	 * The trace ID value for *ETM* tracers start at CPU_ID * 2 + 0x10 and
-	 * anything equal to or higher than 0x70 is reserved.  Since 0x00 is
-	 * also reserved the STM trace ID needs to be higher than 0x00 and
-	 * lowner than 0x10.
+	 * The trace ID value for *ETM* tracers start at CPU_ID + 0x1 and
+	 * anything equal to or higher than 0x70 is reserved. Since 0x00 is
+	 * also reserved the STM trace ID needs to be higher than number
+	 * of cpu i.e 0x8 in our case and lower than 0x70.
 	 */
-	drvdata->traceid = 0x1;
+	drvdata->traceid = 0x10;
 
 	/* Set invariant transaction timing on all channels */
 	bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp);
@@ -781,6 +767,8 @@
 	drvdata->stm.sw_end = 1;
 	drvdata->stm.hw_override = true;
 	drvdata->stm.sw_nchannels = drvdata->numsp;
+	drvdata->stm.ost_configured = stm_ost_configured;
+	drvdata->stm.ost_packet = stm_ost_packet;
 	drvdata->stm.sw_mmiosz = BYTES_PER_CHANNEL;
 	drvdata->stm.packet = stm_generic_packet;
 	drvdata->stm.mmio_addr = stm_mmio_addr;
@@ -850,6 +838,11 @@
 	}
 	bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
 
+	/* Store the driver data pointer for use in exported functions */
+	ret = stm_set_ost_params(drvdata, bitmap_size);
+	if (ret)
+		return ret;
+
 	guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
 	if (!guaranteed)
 		return -ENOMEM;
diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c
new file mode 100644
index 0000000..c96087d
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tpda.c
@@ -0,0 +1,765 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define tpda_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
+#define tpda_readl(drvdata, off)	__raw_readl(drvdata->base + off)
+
+#define TPDA_LOCK(drvdata)						\
+do {									\
+	mb(); /* ensure configuration take effect before we lock it */	\
+	tpda_writel(drvdata, 0x0, CORESIGHT_LAR);			\
+} while (0)
+#define TPDA_UNLOCK(drvdata)						\
+do {									\
+	tpda_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
+	mb(); /* ensure unlock take effect before we configure */	\
+} while (0)
+
+#define TPDA_CR			(0x000)
+#define TPDA_Pn_CR(n)		(0x004 + (n * 4))
+#define TPDA_FPID_CR		(0x084)
+#define TPDA_FREQREQ_VAL	(0x088)
+#define TPDA_SYNCR		(0x08C)
+#define TPDA_FLUSH_CR		(0x090)
+#define TPDA_FLUSH_SR		(0x094)
+#define TPDA_FLUSH_ERR		(0x098)
+
+#define TPDA_MAX_INPORTS	32
+
+struct tpda_drvdata {
+	void __iomem		*base;
+	struct device		*dev;
+	struct coresight_device	*csdev;
+	struct clk		*clk;
+	struct mutex		lock;
+	bool			enable;
+	uint32_t		atid;
+	uint32_t		bc_esize[TPDA_MAX_INPORTS];
+	uint32_t		tc_esize[TPDA_MAX_INPORTS];
+	uint32_t		dsb_esize[TPDA_MAX_INPORTS];
+	uint32_t		cmb_esize[TPDA_MAX_INPORTS];
+	bool			trig_async;
+	bool			trig_flag_ts;
+	bool			trig_freq;
+	bool			freq_ts;
+	uint32_t		freq_req_val;
+	bool			freq_req;
+};
+
+static void __tpda_enable_pre_port(struct tpda_drvdata *drvdata)
+{
+	uint32_t val;
+
+	val = tpda_readl(drvdata, TPDA_CR);
+	/* Set the master id */
+	val = val & ~(0x7F << 13);
+	val = val & ~(0x7F << 6);
+	val |= (drvdata->atid << 6);
+	if (drvdata->trig_async)
+		val = val | BIT(5);
+	else
+		val = val & ~BIT(5);
+	if (drvdata->trig_flag_ts)
+		val = val | BIT(4);
+	else
+		val = val & ~BIT(4);
+	if (drvdata->trig_freq)
+		val = val | BIT(3);
+	else
+		val = val & ~BIT(3);
+	if (drvdata->freq_ts)
+		val = val | BIT(2);
+	else
+		val = val & ~BIT(2);
+	tpda_writel(drvdata, val, TPDA_CR);
+
+	/*
+	 * If FLRIE bit is set, set the master and channel
+	 * id as zero
+	 */
+	if (BVAL(tpda_readl(drvdata, TPDA_CR), 4))
+		tpda_writel(drvdata, 0x0, TPDA_FPID_CR);
+}
+
+static void __tpda_enable_port(struct tpda_drvdata *drvdata, int port)
+{
+	uint32_t val;
+
+	val = tpda_readl(drvdata, TPDA_Pn_CR(port));
+	if (drvdata->bc_esize[port] == 32)
+		val = val & ~BIT(4);
+	else if (drvdata->bc_esize[port] == 64)
+		val = val | BIT(4);
+
+	if (drvdata->tc_esize[port] == 32)
+		val = val & ~BIT(5);
+	else if (drvdata->tc_esize[port] == 64)
+		val = val | BIT(5);
+
+	if (drvdata->dsb_esize[port] == 32)
+		val = val & ~BIT(8);
+	else if (drvdata->dsb_esize[port] == 64)
+		val = val | BIT(8);
+
+	val = val & ~(0x3 << 6);
+	if (drvdata->cmb_esize[port] == 8)
+		val &= ~(0x3 << 6);
+	else if (drvdata->cmb_esize[port] == 32)
+		val |= (0x1 << 6);
+	else if (drvdata->cmb_esize[port] == 64)
+		val |= (0x2 << 6);
+
+	/* Set the hold time */
+	val = val & ~(0x7 << 1);
+	val |= (0x5 << 1);
+	tpda_writel(drvdata, val, TPDA_Pn_CR(port));
+	/* Enable the port */
+	val = val | BIT(0);
+	tpda_writel(drvdata, val, TPDA_Pn_CR(port));
+}
+
+static void __tpda_enable_post_port(struct tpda_drvdata *drvdata)
+{
+	uint32_t val;
+
+	val = tpda_readl(drvdata, TPDA_SYNCR);
+	/* Clear the mode */
+	val = val & ~BIT(12);
+	/* Program the counter value */
+	val = val | 0xFFF;
+	tpda_writel(drvdata, val, TPDA_SYNCR);
+
+	if (drvdata->freq_req_val)
+		tpda_writel(drvdata, drvdata->freq_req_val, TPDA_FREQREQ_VAL);
+	else
+		tpda_writel(drvdata, 0x0, TPDA_FREQREQ_VAL);
+
+	val = tpda_readl(drvdata, TPDA_CR);
+	if (drvdata->freq_req)
+		val = val | BIT(1);
+	else
+		val = val & ~BIT(1);
+	tpda_writel(drvdata, val, TPDA_CR);
+}
+
+static void __tpda_enable(struct tpda_drvdata *drvdata, int port)
+{
+	TPDA_UNLOCK(drvdata);
+
+	if (!drvdata->enable)
+		__tpda_enable_pre_port(drvdata);
+
+	__tpda_enable_port(drvdata, port);
+
+	if (!drvdata->enable)
+		__tpda_enable_post_port(drvdata);
+
+	TPDA_LOCK(drvdata);
+}
+
+static int tpda_enable(struct coresight_device *csdev, int inport, int outport)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+	int ret;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	mutex_lock(&drvdata->lock);
+	__tpda_enable(drvdata, inport);
+	drvdata->enable = true;
+	mutex_unlock(&drvdata->lock);
+
+	dev_info(drvdata->dev, "TPDA inport %d enabled\n", inport);
+	return 0;
+}
+
+static void __tpda_disable(struct tpda_drvdata *drvdata, int port)
+{
+	uint32_t val;
+
+	TPDA_UNLOCK(drvdata);
+
+	val = tpda_readl(drvdata, TPDA_Pn_CR(port));
+	val = val & ~BIT(0);
+	tpda_writel(drvdata, val, TPDA_Pn_CR(port));
+
+	TPDA_LOCK(drvdata);
+}
+
+static void tpda_disable(struct coresight_device *csdev, int inport,
+			   int outport)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	mutex_lock(&drvdata->lock);
+	__tpda_disable(drvdata, inport);
+	drvdata->enable = false;
+	mutex_unlock(&drvdata->lock);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	dev_info(drvdata->dev, "TPDA inport %d disabled\n", inport);
+}
+
+static const struct coresight_ops_link tpda_link_ops = {
+	.enable		= tpda_enable,
+	.disable	= tpda_disable,
+};
+
+static const struct coresight_ops tpda_cs_ops = {
+	.link_ops	= &tpda_link_ops,
+};
+
+static ssize_t tpda_show_trig_async_enable(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->trig_async);
+}
+
+static ssize_t tpda_store_trig_async_enable(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->trig_async = true;
+	else
+		drvdata->trig_async = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(trig_async_enable, 0644,
+		   tpda_show_trig_async_enable,
+		   tpda_store_trig_async_enable);
+
+static ssize_t tpda_show_trig_flag_ts_enable(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->trig_flag_ts);
+}
+
+static ssize_t tpda_store_trig_flag_ts_enable(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf,
+					      size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->trig_flag_ts = true;
+	else
+		drvdata->trig_flag_ts = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(trig_flag_ts_enable, 0644,
+		   tpda_show_trig_flag_ts_enable,
+		   tpda_store_trig_flag_ts_enable);
+
+static ssize_t tpda_show_trig_freq_enable(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->trig_freq);
+}
+
+static ssize_t tpda_store_trig_freq_enable(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf,
+					   size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->trig_freq = true;
+	else
+		drvdata->trig_freq = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(trig_freq_enable, 0644,
+		   tpda_show_trig_freq_enable,
+		   tpda_store_trig_freq_enable);
+
+static ssize_t tpda_show_freq_ts_enable(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->freq_ts);
+}
+
+static ssize_t tpda_store_freq_ts_enable(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->freq_ts = true;
+	else
+		drvdata->freq_ts = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(freq_ts_enable, 0644, tpda_show_freq_ts_enable,
+		   tpda_store_freq_ts_enable);
+
+static ssize_t tpda_show_freq_req_val(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val = drvdata->freq_req_val;
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpda_store_freq_req_val(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf,
+				       size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->freq_req_val = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(freq_req_val, 0644, tpda_show_freq_req_val,
+		   tpda_store_freq_req_val);
+
+static ssize_t tpda_show_freq_req(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->freq_req);
+}
+
+static ssize_t tpda_store_freq_req(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf,
+				   size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->freq_req = true;
+	else
+		drvdata->freq_req = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(freq_req, 0644, tpda_show_freq_req,
+		   tpda_store_freq_req);
+
+static ssize_t tpda_show_global_flush_req(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	mutex_lock(&drvdata->lock);
+
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDA_UNLOCK(drvdata);
+	val = tpda_readl(drvdata, TPDA_CR);
+	TPDA_LOCK(drvdata);
+
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpda_store_global_flush_req(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf,
+					   size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDA_UNLOCK(drvdata);
+		val = tpda_readl(drvdata, TPDA_CR);
+		val = val | BIT(0);
+		tpda_writel(drvdata, val, TPDA_CR);
+		TPDA_LOCK(drvdata);
+	}
+
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(global_flush_req, 0644,
+		   tpda_show_global_flush_req, tpda_store_global_flush_req);
+
+static ssize_t tpda_show_port_flush_req(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	mutex_lock(&drvdata->lock);
+
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDA_UNLOCK(drvdata);
+	val = tpda_readl(drvdata, TPDA_FLUSH_CR);
+	TPDA_LOCK(drvdata);
+
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpda_store_port_flush_req(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpda_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDA_UNLOCK(drvdata);
+		tpda_writel(drvdata, val, TPDA_FLUSH_CR);
+		TPDA_LOCK(drvdata);
+	}
+
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(port_flush_req, 0644, tpda_show_port_flush_req,
+		   tpda_store_port_flush_req);
+
+static struct attribute *tpda_attrs[] = {
+	&dev_attr_trig_async_enable.attr,
+	&dev_attr_trig_flag_ts_enable.attr,
+	&dev_attr_trig_freq_enable.attr,
+	&dev_attr_freq_ts_enable.attr,
+	&dev_attr_freq_req_val.attr,
+	&dev_attr_freq_req.attr,
+	&dev_attr_global_flush_req.attr,
+	&dev_attr_port_flush_req.attr,
+	NULL,
+};
+
+static struct attribute_group tpda_attr_grp = {
+	.attrs = tpda_attrs,
+};
+
+static const struct attribute_group *tpda_attr_grps[] = {
+	&tpda_attr_grp,
+	NULL,
+};
+
+static int tpda_parse_of_data(struct tpda_drvdata *drvdata)
+{
+	int len, port, i, ret;
+	const __be32 *prop;
+	struct device_node *node = drvdata->dev->of_node;
+
+	ret = of_property_read_u32(node, "qcom,tpda-atid", &drvdata->atid);
+	if (ret) {
+		dev_err(drvdata->dev, "TPDA ATID is not specified\n");
+		return -EINVAL;
+	}
+
+	prop = of_get_property(node, "qcom,bc-elem-size", &len);
+	if (prop) {
+		len /= sizeof(__be32);
+		if (len < 2 || len > 63 || len % 2 != 0) {
+			dev_err(drvdata->dev,
+				"Dataset BC width entries are wrong\n");
+			return -EINVAL;
+		}
+
+		for (i = 0; i < len; i++) {
+			port = be32_to_cpu(prop[i++]);
+			if (port >= TPDA_MAX_INPORTS) {
+				dev_err(drvdata->dev,
+					"Wrong port specified for BC\n");
+				return -EINVAL;
+			}
+			drvdata->bc_esize[port] = be32_to_cpu(prop[i]);
+		}
+	}
+
+	prop = of_get_property(node, "qcom,tc-elem-size", &len);
+	if (prop) {
+		len /= sizeof(__be32);
+		if (len < 2 || len > 63 || len % 2 != 0) {
+			dev_err(drvdata->dev,
+				"Dataset TC width entries are wrong\n");
+			return -EINVAL;
+		}
+
+		for (i = 0; i < len; i++) {
+			port = be32_to_cpu(prop[i++]);
+			if (port >= TPDA_MAX_INPORTS) {
+				dev_err(drvdata->dev,
+					"Wrong port specified for TC\n");
+				return -EINVAL;
+			}
+			drvdata->tc_esize[port] = be32_to_cpu(prop[i]);
+		}
+	}
+
+	prop = of_get_property(node, "qcom,dsb-elem-size", &len);
+	if (prop) {
+		len /= sizeof(__be32);
+		if (len < 2 || len > 63 || len % 2 != 0) {
+			dev_err(drvdata->dev,
+				"Dataset DSB width entries are wrong\n");
+			return -EINVAL;
+		}
+
+		for (i = 0; i < len; i++) {
+			port = be32_to_cpu(prop[i++]);
+			if (port >= TPDA_MAX_INPORTS) {
+				dev_err(drvdata->dev,
+					"Wrong port specified for DSB\n");
+				return -EINVAL;
+			}
+			drvdata->dsb_esize[port] = be32_to_cpu(prop[i]);
+		}
+	}
+
+	prop = of_get_property(node, "qcom,cmb-elem-size", &len);
+	if (prop) {
+		len /= sizeof(__be32);
+		if (len < 2 || len > 63 || len % 2 != 0) {
+			dev_err(drvdata->dev,
+				"Dataset CMB width entries are wrong\n");
+			return -EINVAL;
+		}
+
+		for (i = 0; i < len; i++) {
+			port = be32_to_cpu(prop[i++]);
+			if (port >= TPDA_MAX_INPORTS) {
+				dev_err(drvdata->dev,
+					"Wrong port specified for CMB\n");
+				return -EINVAL;
+			}
+			drvdata->cmb_esize[port] = be32_to_cpu(prop[i]);
+		}
+	}
+	return 0;
+}
+
+static void tpda_init_default_data(struct tpda_drvdata *drvdata)
+{
+	drvdata->freq_ts = true;
+}
+
+static int tpda_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device *dev = &pdev->dev;
+	struct coresight_platform_data *pdata;
+	struct tpda_drvdata *drvdata;
+	struct resource *res;
+	struct coresight_desc *desc;
+
+	pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+	pdev->dev.platform_data = pdata;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tpda-base");
+	if (!res)
+		return -ENODEV;
+
+	drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	mutex_init(&drvdata->lock);
+
+	ret = tpda_parse_of_data(drvdata);
+	if (ret)
+		return ret;
+
+	drvdata->clk = devm_clk_get(dev, "core_clk");
+	if (IS_ERR(drvdata->clk))
+		return PTR_ERR(drvdata->clk);
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	if (!coresight_authstatus_enabled(drvdata->base))
+		goto err;
+
+	clk_disable_unprepare(drvdata->clk);
+
+	tpda_init_default_data(drvdata);
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+	desc->type = CORESIGHT_DEV_TYPE_LINK;
+	desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
+	desc->ops = &tpda_cs_ops;
+	desc->pdata = pdev->dev.platform_data;
+	desc->dev = &pdev->dev;
+	desc->groups = tpda_attr_grps;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev))
+		return PTR_ERR(drvdata->csdev);
+
+	dev_dbg(drvdata->dev, "TPDA initialized\n");
+	return 0;
+err:
+	clk_disable_unprepare(drvdata->clk);
+	return -EPERM;
+}
+
+static int tpda_remove(struct platform_device *pdev)
+{
+	struct tpda_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	coresight_unregister(drvdata->csdev);
+	return 0;
+}
+
+static const struct of_device_id tpda_match[] = {
+	{.compatible = "qcom,coresight-tpda"},
+	{}
+};
+
+static struct platform_driver tpda_driver = {
+	.probe          = tpda_probe,
+	.remove         = tpda_remove,
+	.driver         = {
+		.name   = "coresight-tpda",
+		.owner	= THIS_MODULE,
+		.of_match_table = tpda_match,
+	},
+};
+
+static int __init tpda_init(void)
+{
+	return platform_driver_register(&tpda_driver);
+}
+module_init(tpda_init);
+
+static void __exit tpda_exit(void)
+{
+	platform_driver_unregister(&tpda_driver);
+}
+module_exit(tpda_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Aggregator driver");
diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c
new file mode 100644
index 0000000..95d7a90
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tpdm.c
@@ -0,0 +1,4067 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+#define tpdm_writel(drvdata, val, off)	__raw_writel((val), drvdata->base + off)
+#define tpdm_readl(drvdata, off)		__raw_readl(drvdata->base + off)
+
+#define TPDM_LOCK(drvdata)						\
+do {									\
+	mb(); /* ensure configuration take effect before we lock it */	\
+	tpdm_writel(drvdata, 0x0, CORESIGHT_LAR);			\
+} while (0)
+#define TPDM_UNLOCK(drvdata)						\
+do {									\
+	tpdm_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR);		\
+	mb(); /* ensure unlock take effect before we configure */	\
+} while (0)
+
+/* GPR Registers */
+#define TPDM_GPR_CR(n)		(0x0 + (n * 4))
+
+/* BC Subunit Registers */
+#define TPDM_BC_CR		(0x280)
+#define TPDM_BC_SATROLL		(0x284)
+#define TPDM_BC_CNTENSET	(0x288)
+#define TPDM_BC_CNTENCLR	(0x28C)
+#define TPDM_BC_INTENSET	(0x290)
+#define TPDM_BC_INTENCLR	(0x294)
+#define TPDM_BC_TRIG_LO(n)	(0x298 + (n * 4))
+#define TPDM_BC_TRIG_HI(n)	(0x318 + (n * 4))
+#define TPDM_BC_GANG		(0x398)
+#define TPDM_BC_OVERFLOW(n)	(0x39C + (n * 4))
+#define TPDM_BC_OVSR		(0x3C0)
+#define TPDM_BC_SELR		(0x3C4)
+#define TPDM_BC_CNTR_LO		(0x3C8)
+#define TPDM_BC_CNTR_HI		(0x3CC)
+#define TPDM_BC_SHADOW_LO(n)	(0x3D0 + (n * 4))
+#define TPDM_BC_SHADOW_HI(n)	(0x450 + (n * 4))
+#define TPDM_BC_SWINC		(0x4D0)
+#define TPDM_BC_MSR(n)		(0x4F0 + (n * 4))
+
+/* TC Subunit Registers */
+#define TPDM_TC_CR		(0x500)
+#define TPDM_TC_CNTENSET	(0x504)
+#define TPDM_TC_CNTENCLR	(0x508)
+#define TPDM_TC_INTENSET	(0x50C)
+#define TPDM_TC_INTENCLR	(0x510)
+#define TPDM_TC_TRIG_SEL(n)	(0x514 + (n * 4))
+#define TPDM_TC_TRIG_LO(n)	(0x534 + (n * 4))
+#define TPDM_TC_TRIG_HI(n)	(0x554 + (n * 4))
+#define TPDM_TC_OVSR_GP		(0x580)
+#define TPDM_TC_OVSR_IMPL	(0x584)
+#define TPDM_TC_SELR		(0x588)
+#define TPDM_TC_CNTR_LO		(0x58C)
+#define TPDM_TC_CNTR_HI		(0x590)
+#define TPDM_TC_SHADOW_LO(n)	(0x594 + (n * 4))
+#define TPDM_TC_SHADOW_HI(n)	(0x644 + (n * 4))
+#define TPDM_TC_SWINC		(0x700)
+#define TPDM_TC_MSR(n)		(0x768 + (n * 4))
+
+/* DSB Subunit Registers */
+#define TPDM_DSB_CR		(0x780)
+#define TPDM_DSB_TIER		(0x784)
+#define TPDM_DSB_TPR(n)		(0x788 + (n * 4))
+#define TPDM_DSB_TPMR(n)	(0x7A8 + (n * 4))
+#define TPDM_DSB_XPR(n)		(0x7C8 + (n * 4))
+#define TPDM_DSB_XPMR(n)	(0x7E8 + (n * 4))
+#define TPDM_DSB_EDCR(n)	(0x808 + (n * 4))
+#define TPDM_DSB_EDCMR(n)	(0x848 + (n * 4))
+#define TPDM_DSB_CA_SELECT(n)	(0x86c + (n * 4))
+#define TPDM_DSB_MSR(n)		(0x980 + (n * 4))
+
+/* CMB/MCMB Subunit Registers */
+#define TPDM_CMB_CR		(0xA00)
+#define TPDM_CMB_TIER		(0xA04)
+#define TPDM_CMB_TPR(n)		(0xA08 + (n * 4))
+#define TPDM_CMB_TPMR(n)	(0xA10 + (n * 4))
+#define TPDM_CMB_XPR(n)		(0xA18 + (n * 4))
+#define TPDM_CMB_XPMR(n)	(0xA20 + (n * 4))
+#define TPDM_CMB_MARKR		(0xA28)
+#define TPDM_CMB_READCTL	(0xA70)
+#define TPDM_CMB_READVAL	(0xA74)
+#define TPDM_CMB_MSR(n)		(0xA80 + (n * 4))
+
+/* TPDM Specific Registers */
+#define TPDM_ITATBCNTRL		(0xEF0)
+#define TPDM_CLK_CTRL		(0x220)
+
+#define TPDM_DATASETS		32
+#define TPDM_BC_MAX_COUNTERS	32
+#define TPDM_BC_MAX_OVERFLOW	6
+#define TPDM_BC_MAX_MSR		4
+#define TPDM_TC_MAX_COUNTERS	44
+#define TPDM_TC_MAX_TRIG	8
+#define TPDM_TC_MAX_MSR		6
+#define TPDM_DSB_MAX_PATT	8
+#define TPDM_DSB_MAX_SELECT	8
+#define TPDM_DSB_MAX_MSR	32
+#define TPDM_DSB_MAX_EDCR	16
+#define TPDM_DSB_MAX_LINES	256
+#define TPDM_CMB_PATT_CMP	2
+#define TPDM_CMB_MAX_MSR	128
+#define TPDM_MCMB_MAX_LANES	8
+
+/* DSB programming modes */
+#define TPDM_DSB_MODE_CYCACC(val)	BMVAL(val, 0, 2)
+#define TPDM_DSB_MODE_PERF		BIT(3)
+#define TPDM_DSB_MODE_HPBYTESEL(val)	BMVAL(val, 4, 8)
+#define TPDM_MODE_ALL			(0xFFFFFFF)
+
+#define NUM_OF_BITS		32
+#define TPDM_GPR_REGS_MAX	160
+
+#define TPDM_TRACE_ID_START	128
+
+#define TPDM_REVISION_A		0
+#define TPDM_REVISION_B		1
+
+enum tpdm_dataset {
+	TPDM_DS_IMPLDEF,
+	TPDM_DS_DSB,
+	TPDM_DS_CMB,
+	TPDM_DS_TC,
+	TPDM_DS_BC,
+	TPDM_DS_GPR,
+	TPDM_DS_MCMB,
+};
+
+enum tpdm_mode {
+	TPDM_MODE_ATB,
+	TPDM_MODE_APB,
+};
+
+enum tpdm_support_type {
+	TPDM_SUPPORT_TYPE_FULL,
+	TPDM_SUPPORT_TYPE_PARTIAL,
+	TPDM_SUPPORT_TYPE_NO,
+};
+
+enum tpdm_cmb_mode {
+	TPDM_CMB_MODE_CONTINUOUS,
+	TPDM_CMB_MODE_TRACE_ON_CHANGE,
+};
+
+enum tpdm_cmb_patt_bits {
+	TPDM_CMB_LSB,
+	TPDM_CMB_MSB,
+};
+
+#ifdef CONFIG_CORESIGHT_TPDM_DEFAULT_ENABLE
+static int boot_enable = 1;
+#else
+static int boot_enable;
+#endif
+
+module_param_named(
+	boot_enable, boot_enable, int, 0444
+);
+
+struct gpr_dataset {
+	DECLARE_BITMAP(gpr_dirty, TPDM_GPR_REGS_MAX);
+	uint32_t		gp_regs[TPDM_GPR_REGS_MAX];
+};
+
+struct bc_dataset {
+	enum tpdm_mode		capture_mode;
+	enum tpdm_mode		retrieval_mode;
+	uint32_t		sat_mode;
+	uint32_t		enable_counters;
+	uint32_t		clear_counters;
+	uint32_t		enable_irq;
+	uint32_t		clear_irq;
+	uint32_t		trig_val_lo[TPDM_BC_MAX_COUNTERS];
+	uint32_t		trig_val_hi[TPDM_BC_MAX_COUNTERS];
+	uint32_t		enable_ganging;
+	uint32_t		overflow_val[TPDM_BC_MAX_OVERFLOW];
+	uint32_t		msr[TPDM_BC_MAX_MSR];
+};
+
+struct tc_dataset {
+	enum tpdm_mode		capture_mode;
+	enum tpdm_mode		retrieval_mode;
+	bool			sat_mode;
+	uint32_t		enable_counters;
+	uint32_t		clear_counters;
+	uint32_t		enable_irq;
+	uint32_t		clear_irq;
+	uint32_t		trig_sel[TPDM_TC_MAX_TRIG];
+	uint32_t		trig_val_lo[TPDM_TC_MAX_TRIG];
+	uint32_t		trig_val_hi[TPDM_TC_MAX_TRIG];
+	uint32_t		msr[TPDM_TC_MAX_MSR];
+};
+
+struct dsb_dataset {
+	uint32_t		mode;
+	uint32_t		edge_ctrl[TPDM_DSB_MAX_EDCR];
+	uint32_t		edge_ctrl_mask[TPDM_DSB_MAX_EDCR / 2];
+	uint32_t		patt_val[TPDM_DSB_MAX_PATT];
+	uint32_t		patt_mask[TPDM_DSB_MAX_PATT];
+	bool			patt_ts;
+	bool			patt_type;
+	uint32_t		trig_patt_val[TPDM_DSB_MAX_PATT];
+	uint32_t		trig_patt_mask[TPDM_DSB_MAX_PATT];
+	bool			trig_ts;
+	uint32_t		select_val[TPDM_DSB_MAX_SELECT];
+	uint32_t		msr[TPDM_DSB_MAX_MSR];
+};
+
+struct mcmb_dataset {
+	uint8_t		mcmb_trig_lane;
+	uint8_t		mcmb_lane_select;
+};
+
+struct cmb_dataset {
+	enum tpdm_cmb_mode	mode;
+	uint32_t		patt_val[TPDM_CMB_PATT_CMP];
+	uint32_t		patt_mask[TPDM_CMB_PATT_CMP];
+	bool			patt_ts;
+	uint32_t		trig_patt_val[TPDM_CMB_PATT_CMP];
+	uint32_t		trig_patt_mask[TPDM_CMB_PATT_CMP];
+	bool			trig_ts;
+	uint32_t		msr[TPDM_CMB_MAX_MSR];
+	uint8_t			read_ctl_reg;
+	struct mcmb_dataset	*mcmb;
+};
+
+struct tpdm_drvdata {
+	void __iomem		*base;
+	struct device		*dev;
+	struct coresight_device	*csdev;
+	struct clk		*clk;
+	struct mutex		lock;
+	bool			enable;
+	bool			clk_enable;
+	DECLARE_BITMAP(datasets, TPDM_DATASETS);
+	DECLARE_BITMAP(enable_ds, TPDM_DATASETS);
+	enum tpdm_support_type	tc_trig_type;
+	enum tpdm_support_type	bc_trig_type;
+	enum tpdm_support_type	bc_gang_type;
+	uint32_t		bc_counters_avail;
+	uint32_t		tc_counters_avail;
+	struct gpr_dataset	*gpr;
+	struct bc_dataset	*bc;
+	struct tc_dataset	*tc;
+	struct dsb_dataset	*dsb;
+	struct cmb_dataset	*cmb;
+	int			traceid;
+	uint32_t		version;
+	bool			msr_support;
+	bool			msr_fix_req;
+};
+
+static void __tpdm_enable_gpr(struct tpdm_drvdata *drvdata)
+{
+	int i;
+
+	for (i = 0; i < TPDM_GPR_REGS_MAX; i++) {
+		if (!test_bit(i, drvdata->gpr->gpr_dirty))
+			continue;
+		tpdm_writel(drvdata, drvdata->gpr->gp_regs[i], TPDM_GPR_CR(i));
+	}
+}
+
+static void __tpdm_config_bc_msr(struct tpdm_drvdata *drvdata)
+{
+	int i;
+
+	if (!drvdata->msr_support)
+		return;
+
+	for (i = 0; i < TPDM_BC_MAX_MSR; i++)
+		tpdm_writel(drvdata, drvdata->bc->msr[i], TPDM_BC_MSR(i));
+}
+
+static void __tpdm_config_tc_msr(struct tpdm_drvdata *drvdata)
+{
+	int i;
+
+	if (!drvdata->msr_support)
+		return;
+
+	for (i = 0; i < TPDM_TC_MAX_MSR; i++)
+		tpdm_writel(drvdata, drvdata->tc->msr[i], TPDM_TC_MSR(i));
+}
+
+static void __tpdm_config_dsb_msr(struct tpdm_drvdata *drvdata)
+{
+	int i;
+
+	if (!drvdata->msr_support)
+		return;
+
+	for (i = 0; i < TPDM_DSB_MAX_MSR; i++)
+		tpdm_writel(drvdata, drvdata->dsb->msr[i], TPDM_DSB_MSR(i));
+}
+
+static void __tpdm_config_cmb_msr(struct tpdm_drvdata *drvdata)
+{
+	int i;
+
+	if (!drvdata->msr_support)
+		return;
+
+	for (i = 0; i < TPDM_CMB_MAX_MSR; i++)
+		tpdm_writel(drvdata, drvdata->cmb->msr[i], TPDM_CMB_MSR(i));
+}
+
+static void __tpdm_enable_bc(struct tpdm_drvdata *drvdata)
+{
+	int i;
+	uint32_t val;
+
+	if (drvdata->bc->sat_mode)
+		tpdm_writel(drvdata, drvdata->bc->sat_mode,
+			    TPDM_BC_SATROLL);
+	else
+		tpdm_writel(drvdata, 0x0, TPDM_BC_SATROLL);
+
+	if (drvdata->bc->enable_counters) {
+		tpdm_writel(drvdata, 0xFFFFFFFF, TPDM_BC_CNTENCLR);
+		tpdm_writel(drvdata, drvdata->bc->enable_counters,
+			    TPDM_BC_CNTENSET);
+	}
+	if (drvdata->bc->clear_counters)
+		tpdm_writel(drvdata, drvdata->bc->clear_counters,
+			    TPDM_BC_CNTENCLR);
+
+	if (drvdata->bc->enable_irq) {
+		tpdm_writel(drvdata, 0xFFFFFFFF, TPDM_BC_INTENCLR);
+		tpdm_writel(drvdata, drvdata->bc->enable_irq,
+			    TPDM_BC_INTENSET);
+	}
+	if (drvdata->bc->clear_irq)
+		tpdm_writel(drvdata, drvdata->bc->clear_irq,
+			    TPDM_BC_INTENCLR);
+
+	if (drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_FULL) {
+		for (i = 0; i < drvdata->bc_counters_avail; i++) {
+			tpdm_writel(drvdata, drvdata->bc->trig_val_lo[i],
+				    TPDM_BC_TRIG_LO(i));
+			tpdm_writel(drvdata, drvdata->bc->trig_val_hi[i],
+				    TPDM_BC_TRIG_HI(i));
+		}
+	} else if (drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL) {
+		tpdm_writel(drvdata, drvdata->bc->trig_val_lo[0],
+			    TPDM_BC_TRIG_LO(0));
+		tpdm_writel(drvdata, drvdata->bc->trig_val_hi[0],
+			    TPDM_BC_TRIG_HI(0));
+	}
+
+	if (drvdata->bc->enable_ganging)
+		tpdm_writel(drvdata, drvdata->bc->enable_ganging, TPDM_BC_GANG);
+
+	for (i = 0; i < TPDM_BC_MAX_OVERFLOW; i++)
+		tpdm_writel(drvdata, drvdata->bc->overflow_val[i],
+			    TPDM_BC_OVERFLOW(i));
+
+	__tpdm_config_bc_msr(drvdata);
+
+	val = tpdm_readl(drvdata, TPDM_BC_CR);
+	if (drvdata->bc->retrieval_mode == TPDM_MODE_APB)
+		val = val | BIT(2);
+	else
+		val = val & ~BIT(2);
+	tpdm_writel(drvdata, val, TPDM_BC_CR);
+
+	val = tpdm_readl(drvdata, TPDM_BC_CR);
+	/* Set the enable bit */
+	val = val | BIT(0);
+	tpdm_writel(drvdata, val, TPDM_BC_CR);
+}
+
+static void __tpdm_enable_tc(struct tpdm_drvdata *drvdata)
+{
+	int i;
+	uint32_t val;
+
+	if (drvdata->tc->enable_counters) {
+		tpdm_writel(drvdata, 0xF, TPDM_TC_CNTENCLR);
+		tpdm_writel(drvdata, drvdata->tc->enable_counters,
+			    TPDM_TC_CNTENSET);
+	}
+	if (drvdata->tc->clear_counters)
+		tpdm_writel(drvdata, drvdata->tc->clear_counters,
+			    TPDM_TC_CNTENCLR);
+
+	if (drvdata->tc->enable_irq) {
+		tpdm_writel(drvdata, 0xF, TPDM_TC_INTENCLR);
+		tpdm_writel(drvdata, drvdata->tc->enable_irq,
+			    TPDM_TC_INTENSET);
+	}
+	if (drvdata->tc->clear_irq)
+		tpdm_writel(drvdata, drvdata->tc->clear_irq,
+			    TPDM_TC_INTENCLR);
+
+	if (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_FULL) {
+		for (i = 0; i < TPDM_TC_MAX_TRIG; i++) {
+			tpdm_writel(drvdata, drvdata->tc->trig_sel[i],
+				    TPDM_TC_TRIG_SEL(i));
+			tpdm_writel(drvdata, drvdata->tc->trig_val_lo[i],
+				    TPDM_TC_TRIG_LO(i));
+			tpdm_writel(drvdata, drvdata->tc->trig_val_hi[i],
+				    TPDM_TC_TRIG_HI(i));
+		}
+	} else if (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL) {
+		tpdm_writel(drvdata, drvdata->tc->trig_sel[0],
+			    TPDM_TC_TRIG_SEL(0));
+		tpdm_writel(drvdata, drvdata->tc->trig_val_lo[0],
+			    TPDM_TC_TRIG_LO(0));
+		tpdm_writel(drvdata, drvdata->tc->trig_val_hi[0],
+			    TPDM_TC_TRIG_HI(0));
+	}
+
+	__tpdm_config_tc_msr(drvdata);
+
+	val = tpdm_readl(drvdata, TPDM_TC_CR);
+	if (drvdata->tc->sat_mode)
+		val = val | BIT(4);
+	else
+		val = val & ~BIT(4);
+	if (drvdata->tc->retrieval_mode == TPDM_MODE_APB)
+		val = val | BIT(2);
+	else
+		val = val & ~BIT(2);
+	tpdm_writel(drvdata, val, TPDM_TC_CR);
+
+	val = tpdm_readl(drvdata, TPDM_TC_CR);
+	/* Set the enable bit */
+	val = val | BIT(0);
+	tpdm_writel(drvdata, val, TPDM_TC_CR);
+}
+
+static void __tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
+{
+	uint32_t val, mode, i;
+
+	for (i = 0; i < TPDM_DSB_MAX_EDCR; i++)
+		tpdm_writel(drvdata, drvdata->dsb->edge_ctrl[i],
+			    TPDM_DSB_EDCR(i));
+	for (i = 0; i < TPDM_DSB_MAX_EDCR / 2; i++)
+		tpdm_writel(drvdata, drvdata->dsb->edge_ctrl_mask[i],
+			    TPDM_DSB_EDCMR(i));
+
+	for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+		tpdm_writel(drvdata, drvdata->dsb->patt_val[i],
+			    TPDM_DSB_TPR(i));
+		tpdm_writel(drvdata, drvdata->dsb->patt_mask[i],
+			    TPDM_DSB_TPMR(i));
+	}
+
+	for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+		tpdm_writel(drvdata, drvdata->dsb->trig_patt_val[i],
+			    TPDM_DSB_XPR(i));
+		tpdm_writel(drvdata, drvdata->dsb->trig_patt_mask[i],
+			    TPDM_DSB_XPMR(i));
+	}
+
+	for (i = 0; i < TPDM_DSB_MAX_SELECT; i++)
+		tpdm_writel(drvdata, drvdata->dsb->select_val[i],
+			    TPDM_DSB_CA_SELECT(i));
+
+	val = tpdm_readl(drvdata, TPDM_DSB_TIER);
+	if (drvdata->dsb->patt_ts == true) {
+		val = val | BIT(0);
+		if (drvdata->dsb->patt_type == true)
+			val = val | BIT(2);
+		else
+			val = val & ~BIT(2);
+	} else {
+		val = val & ~BIT(0);
+	}
+	if (drvdata->dsb->trig_ts == true)
+		val = val | BIT(1);
+	else
+		val = val & ~BIT(1);
+	tpdm_writel(drvdata, val, TPDM_DSB_TIER);
+
+	if (!drvdata->msr_fix_req)
+		__tpdm_config_dsb_msr(drvdata);
+
+	val = tpdm_readl(drvdata, TPDM_DSB_CR);
+	/* Set the cycle accurate mode */
+	mode = TPDM_DSB_MODE_CYCACC(drvdata->dsb->mode);
+	val = val & ~(0x7 << 9);
+	val = val | (mode << 9);
+	/* Set the byte lane for high-performance mode */
+	mode = TPDM_DSB_MODE_HPBYTESEL(drvdata->dsb->mode);
+	val = val & ~(0x1F << 2);
+	val = val | (mode << 2);
+	/* Set the performance mode */
+	if (drvdata->dsb->mode & TPDM_DSB_MODE_PERF)
+		val = val | BIT(1);
+	else
+		val = val & ~BIT(1);
+	tpdm_writel(drvdata, val, TPDM_DSB_CR);
+
+	val = tpdm_readl(drvdata, TPDM_DSB_CR);
+	/* Set the enable bit */
+	val = val | BIT(0);
+	tpdm_writel(drvdata, val, TPDM_DSB_CR);
+
+	if (drvdata->msr_fix_req)
+		__tpdm_config_dsb_msr(drvdata);
+}
+
+static void __tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
+{
+	uint32_t val;
+
+	tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_LSB],
+		    TPDM_CMB_TPR(TPDM_CMB_LSB));
+	tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_LSB],
+		    TPDM_CMB_TPMR(TPDM_CMB_LSB));
+	tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_MSB],
+		    TPDM_CMB_TPR(TPDM_CMB_MSB));
+	tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_MSB],
+		    TPDM_CMB_TPMR(TPDM_CMB_MSB));
+
+	tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_LSB],
+		    TPDM_CMB_XPR(TPDM_CMB_LSB));
+	tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB],
+		    TPDM_CMB_XPMR(TPDM_CMB_LSB));
+	tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_MSB],
+		    TPDM_CMB_XPR(TPDM_CMB_MSB));
+	tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB],
+		    TPDM_CMB_XPMR(TPDM_CMB_MSB));
+
+	val = tpdm_readl(drvdata, TPDM_CMB_TIER);
+	if (drvdata->cmb->patt_ts == true)
+		val = val | BIT(0);
+	else
+		val = val & ~BIT(0);
+	if (drvdata->cmb->trig_ts == true)
+		val = val | BIT(1);
+	else
+		val = val & ~BIT(1);
+	tpdm_writel(drvdata, val, TPDM_CMB_TIER);
+
+	__tpdm_config_cmb_msr(drvdata);
+
+	val = tpdm_readl(drvdata, TPDM_CMB_CR);
+	/* Set the flow control bit */
+	val = val & ~BIT(2);
+	if (drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS)
+		val = val & ~BIT(1);
+	else
+		val = val | BIT(1);
+	tpdm_writel(drvdata, val, TPDM_CMB_CR);
+	/* Set the enable bit */
+	val = val | BIT(0);
+	tpdm_writel(drvdata, val, TPDM_CMB_CR);
+}
+
+static void __tpdm_enable_mcmb(struct tpdm_drvdata *drvdata)
+{
+	uint32_t val;
+	struct mcmb_dataset *mcmb = drvdata->cmb->mcmb;
+
+	tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_LSB],
+		    TPDM_CMB_TPR(TPDM_CMB_LSB));
+	tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_LSB],
+		    TPDM_CMB_TPMR(TPDM_CMB_LSB));
+	tpdm_writel(drvdata, drvdata->cmb->patt_val[TPDM_CMB_MSB],
+		    TPDM_CMB_TPR(TPDM_CMB_MSB));
+	tpdm_writel(drvdata, drvdata->cmb->patt_mask[TPDM_CMB_MSB],
+		    TPDM_CMB_TPMR(TPDM_CMB_MSB));
+
+	tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_LSB],
+		    TPDM_CMB_XPR(TPDM_CMB_LSB));
+	tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB],
+		    TPDM_CMB_XPMR(TPDM_CMB_LSB));
+	tpdm_writel(drvdata, drvdata->cmb->trig_patt_val[TPDM_CMB_MSB],
+		    TPDM_CMB_XPR(TPDM_CMB_MSB));
+	tpdm_writel(drvdata, drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB],
+		    TPDM_CMB_XPMR(TPDM_CMB_MSB));
+
+	val = tpdm_readl(drvdata, TPDM_CMB_TIER);
+	if (drvdata->cmb->patt_ts == true)
+		val = val | BIT(0);
+	else
+		val = val & ~BIT(0);
+	if (drvdata->cmb->trig_ts == true)
+		val = val | BIT(1);
+	else
+		val = val & ~BIT(1);
+	tpdm_writel(drvdata, val, TPDM_CMB_TIER);
+
+	__tpdm_config_cmb_msr(drvdata);
+
+	val = tpdm_readl(drvdata, TPDM_CMB_CR);
+	/* Set the flow control bit */
+	val = val & ~BIT(2);
+	if (drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS)
+		val = val & ~BIT(1);
+	else
+		val = val | BIT(1);
+
+	val = val | (BMVAL(mcmb->mcmb_trig_lane, 0, 3) << 18);
+
+	val = val | (mcmb->mcmb_lane_select << 10);
+
+	tpdm_writel(drvdata, val, TPDM_CMB_CR);
+	/* Set the enable bit */
+	val = val | BIT(0);
+	tpdm_writel(drvdata, val, TPDM_CMB_CR);
+}
+
+static void __tpdm_enable(struct tpdm_drvdata *drvdata)
+{
+	TPDM_UNLOCK(drvdata);
+
+	if (drvdata->clk_enable)
+		tpdm_writel(drvdata, 0x1, TPDM_CLK_CTRL);
+
+	if (test_bit(TPDM_DS_GPR, drvdata->enable_ds))
+		__tpdm_enable_gpr(drvdata);
+
+	if (test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		__tpdm_enable_bc(drvdata);
+
+	if (test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		__tpdm_enable_tc(drvdata);
+
+	if (test_bit(TPDM_DS_DSB, drvdata->enable_ds))
+		__tpdm_enable_dsb(drvdata);
+
+	if (test_bit(TPDM_DS_CMB, drvdata->enable_ds))
+		__tpdm_enable_cmb(drvdata);
+	else if (test_bit(TPDM_DS_MCMB, drvdata->enable_ds))
+		__tpdm_enable_mcmb(drvdata);
+
+	TPDM_LOCK(drvdata);
+}
+
+static int tpdm_enable(struct coresight_device *csdev,
+		       struct perf_event_attr *attr,  u32 mode)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+	int ret;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	mutex_lock(&drvdata->lock);
+	__tpdm_enable(drvdata);
+	drvdata->enable = true;
+	mutex_unlock(&drvdata->lock);
+
+	dev_info(drvdata->dev, "TPDM tracing enabled\n");
+	return 0;
+}
+
+static void __tpdm_disable_bc(struct tpdm_drvdata *drvdata)
+{
+	uint32_t config;
+
+	config = tpdm_readl(drvdata, TPDM_BC_CR);
+	config = config & ~BIT(0);
+	tpdm_writel(drvdata, config, TPDM_BC_CR);
+}
+
+static void __tpdm_disable_tc(struct tpdm_drvdata *drvdata)
+{
+	uint32_t config;
+
+	config = tpdm_readl(drvdata, TPDM_TC_CR);
+	config = config & ~BIT(0);
+	tpdm_writel(drvdata, config, TPDM_TC_CR);
+}
+
+static void __tpdm_disable_dsb(struct tpdm_drvdata *drvdata)
+{
+	uint32_t config;
+
+	config = tpdm_readl(drvdata, TPDM_DSB_CR);
+	config = config & ~BIT(0);
+	tpdm_writel(drvdata, config, TPDM_DSB_CR);
+}
+
+static void __tpdm_disable_cmb(struct tpdm_drvdata *drvdata)
+{
+	uint32_t config;
+
+	config = tpdm_readl(drvdata, TPDM_CMB_CR);
+	config = config & ~BIT(0);
+	tpdm_writel(drvdata, config, TPDM_CMB_CR);
+}
+
+static void __tpdm_disable(struct tpdm_drvdata *drvdata)
+{
+	TPDM_UNLOCK(drvdata);
+
+	if (test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		__tpdm_disable_bc(drvdata);
+
+	if (test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		__tpdm_disable_tc(drvdata);
+
+	if (test_bit(TPDM_DS_DSB, drvdata->enable_ds))
+		__tpdm_disable_dsb(drvdata);
+
+	if (test_bit(TPDM_DS_CMB, drvdata->enable_ds))
+		__tpdm_disable_cmb(drvdata);
+
+	if (drvdata->clk_enable)
+		tpdm_writel(drvdata, 0x0, TPDM_CLK_CTRL);
+
+	TPDM_LOCK(drvdata);
+}
+
+static void tpdm_disable(struct coresight_device *csdev)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	mutex_lock(&drvdata->lock);
+	__tpdm_disable(drvdata);
+	drvdata->enable = false;
+	mutex_unlock(&drvdata->lock);
+
+	clk_disable_unprepare(drvdata->clk);
+
+	dev_info(drvdata->dev, "TPDM tracing disabled\n");
+}
+
+static int tpdm_trace_id(struct coresight_device *csdev)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+	return drvdata->traceid;
+}
+
+static const struct coresight_ops_source tpdm_source_ops = {
+	.trace_id	= tpdm_trace_id,
+	.enable		= tpdm_enable,
+	.disable	= tpdm_disable,
+};
+
+static const struct coresight_ops tpdm_cs_ops = {
+	.source_ops	= &tpdm_source_ops,
+};
+
+static ssize_t tpdm_show_available_datasets(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+
+	if (test_bit(TPDM_DS_IMPLDEF, drvdata->datasets))
+		size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s",
+				  "IMPLDEF");
+
+	if (test_bit(TPDM_DS_DSB, drvdata->datasets))
+		size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "DSB");
+
+	if (test_bit(TPDM_DS_CMB, drvdata->datasets))
+		size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "CMB");
+
+	if (test_bit(TPDM_DS_TC, drvdata->datasets))
+		size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "TC");
+
+	if (test_bit(TPDM_DS_BC, drvdata->datasets))
+		size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "BC");
+
+	if (test_bit(TPDM_DS_GPR, drvdata->datasets))
+		size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "GPR");
+
+	if (test_bit(TPDM_DS_MCMB, drvdata->datasets))
+		size += scnprintf(buf + size, PAGE_SIZE - size, "%-8s", "MCMB");
+
+	size += scnprintf(buf + size, PAGE_SIZE - size, "\n");
+	return size;
+}
+static DEVICE_ATTR(available_datasets, 0444, tpdm_show_available_datasets,
+		   NULL);
+
+static ssize_t tpdm_show_enable_datasets(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size;
+
+	size = scnprintf(buf, PAGE_SIZE, "%*pb\n", TPDM_DATASETS,
+			 drvdata->enable_ds);
+
+	if (PAGE_SIZE - size < 2)
+		size = -EINVAL;
+	else
+		size += scnprintf(buf + size, 2, "\n");
+	return size;
+}
+
+static ssize_t tpdm_store_enable_datasets(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf,
+					  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+	int i;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	if (drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	for (i = 0; i < TPDM_DATASETS; i++) {
+		if (test_bit(i, drvdata->datasets) && (val & BIT(i)))
+			__set_bit(i, drvdata->enable_ds);
+		else
+			__clear_bit(i, drvdata->enable_ds);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(enable_datasets, 0644,
+		   tpdm_show_enable_datasets, tpdm_store_enable_datasets);
+
+static ssize_t tpdm_show_gp_regs(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_GPR, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_GPR_REGS_MAX; i++) {
+		if (!test_bit(i, drvdata->gpr->gpr_dirty))
+			continue;
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->gpr->gp_regs[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_gp_regs(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_GPR, drvdata->datasets) ||
+	    index >= TPDM_GPR_REGS_MAX)
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->gpr->gp_regs[index] = val;
+	__set_bit(index, drvdata->gpr->gpr_dirty);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(gp_regs, 0644, tpdm_show_gp_regs,
+		   tpdm_store_gp_regs);
+
+static ssize_t tpdm_show_bc_capture_mode(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 drvdata->bc->capture_mode == TPDM_MODE_ATB ?
+			 "ATB" : "APB");
+}
+
+static ssize_t tpdm_store_bc_capture_mode(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf,
+					  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	char str[20] = "";
+	uint32_t val;
+
+	if (size >= 20)
+		return -EINVAL;
+	if (sscanf(buf, "%s", str) != 1)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (!strcmp(str, "ATB")) {
+		drvdata->bc->capture_mode = TPDM_MODE_ATB;
+	} else if (!strcmp(str, "APB") &&
+		   drvdata->bc->retrieval_mode == TPDM_MODE_APB) {
+
+		TPDM_UNLOCK(drvdata);
+		val = tpdm_readl(drvdata, TPDM_BC_CR);
+		val = val | BIT(3);
+		tpdm_writel(drvdata, val, TPDM_BC_CR);
+		TPDM_LOCK(drvdata);
+
+		drvdata->bc->capture_mode = TPDM_MODE_APB;
+	} else {
+		mutex_unlock(&drvdata->lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_capture_mode, 0644,
+		   tpdm_show_bc_capture_mode, tpdm_store_bc_capture_mode);
+
+static ssize_t tpdm_show_bc_retrieval_mode(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 drvdata->bc->retrieval_mode == TPDM_MODE_ATB ?
+			 "ATB" : "APB");
+}
+
+static ssize_t tpdm_store_bc_retrieval_mode(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	char str[20] = "";
+
+	if (size >= 20)
+		return -EINVAL;
+	if (sscanf(buf, "%s", str) != 1)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (!strcmp(str, "ATB")) {
+		drvdata->bc->retrieval_mode = TPDM_MODE_ATB;
+	} else if (!strcmp(str, "APB")) {
+		drvdata->bc->retrieval_mode = TPDM_MODE_APB;
+	} else {
+		mutex_unlock(&drvdata->lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_retrieval_mode, 0644,
+		   tpdm_show_bc_retrieval_mode, tpdm_store_bc_retrieval_mode);
+
+static ssize_t tpdm_store_bc_reset_counters(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		val = tpdm_readl(drvdata, TPDM_BC_CR);
+		val = val | BIT(1);
+		tpdm_writel(drvdata, val, TPDM_BC_CR);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_reset_counters, 0644, NULL,
+		   tpdm_store_bc_reset_counters);
+
+static ssize_t tpdm_show_bc_sat_mode(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->bc->sat_mode);
+}
+
+static ssize_t tpdm_store_bc_sat_mode(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf,
+				      size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->sat_mode = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_sat_mode, 0644,
+		   tpdm_show_bc_sat_mode, tpdm_store_bc_sat_mode);
+
+static ssize_t tpdm_show_bc_enable_counters(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->bc->enable_counters);
+}
+
+static ssize_t tpdm_store_bc_enable_counters(struct device *dev,
+					     struct device_attribute *attr,
+					     const char *buf,
+					     size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->enable_counters = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_enable_counters, 0644,
+		   tpdm_show_bc_enable_counters, tpdm_store_bc_enable_counters);
+
+static ssize_t tpdm_show_bc_clear_counters(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->bc->clear_counters);
+}
+
+static ssize_t tpdm_store_bc_clear_counters(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->clear_counters = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_clear_counters, 0644,
+		   tpdm_show_bc_clear_counters, tpdm_store_bc_clear_counters);
+
+static ssize_t tpdm_show_bc_enable_irq(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->bc->enable_irq);
+}
+
+static ssize_t tpdm_store_bc_enable_irq(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->enable_irq = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_enable_irq, 0644,
+		   tpdm_show_bc_enable_irq, tpdm_store_bc_enable_irq);
+
+static ssize_t tpdm_show_bc_clear_irq(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->bc->clear_irq);
+}
+
+static ssize_t tpdm_store_bc_clear_irq(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf,
+				       size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->clear_irq = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_clear_irq, 0644,
+		   tpdm_show_bc_clear_irq, tpdm_store_bc_clear_irq);
+
+static ssize_t tpdm_show_bc_trig_val_lo(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_BC_MAX_COUNTERS; i++)
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->bc->trig_val_lo[i]);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_bc_trig_val_lo(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets) ||
+	    index >= drvdata->bc_counters_avail ||
+	    drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+	    (drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->trig_val_lo[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_trig_val_lo, 0644,
+		   tpdm_show_bc_trig_val_lo, tpdm_store_bc_trig_val_lo);
+
+static ssize_t tpdm_show_bc_trig_val_hi(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_BC_MAX_COUNTERS; i++)
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->bc->trig_val_hi[i]);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_bc_trig_val_hi(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets) ||
+	    index >= drvdata->bc_counters_avail ||
+	    drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+	    (drvdata->bc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->trig_val_hi[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_trig_val_hi, 0644,
+		   tpdm_show_bc_trig_val_hi, tpdm_store_bc_trig_val_hi);
+
+static ssize_t tpdm_show_bc_enable_ganging(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->bc->enable_ganging);
+}
+
+static ssize_t tpdm_store_bc_enable_ganging(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->enable_ganging = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_enable_ganging, 0644,
+		   tpdm_show_bc_enable_ganging, tpdm_store_bc_enable_ganging);
+
+static ssize_t tpdm_show_bc_overflow_val(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_BC_MAX_OVERFLOW; i++)
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->bc->overflow_val[i]);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_bc_overflow_val(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf,
+					  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets) ||
+	    index >= TPDM_BC_MAX_OVERFLOW)
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->overflow_val[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_overflow_val, 0644,
+		   tpdm_show_bc_overflow_val, tpdm_store_bc_overflow_val);
+
+static ssize_t tpdm_show_bc_ovsr(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_BC_OVSR);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_ovsr(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		tpdm_writel(drvdata, val, TPDM_BC_OVSR);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_ovsr, 0644,
+		   tpdm_show_bc_ovsr, tpdm_store_bc_ovsr);
+
+static ssize_t tpdm_show_bc_counter_sel(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_BC_SELR);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_counter_sel(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable || val >= drvdata->bc_counters_avail) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	tpdm_writel(drvdata, val, TPDM_BC_SELR);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_counter_sel, 0644,
+		   tpdm_show_bc_counter_sel, tpdm_store_bc_counter_sel);
+
+static ssize_t tpdm_show_bc_count_val_lo(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_BC_CNTR_LO);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_count_val_lo(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf,
+					  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val, select;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		select = tpdm_readl(drvdata, TPDM_BC_SELR);
+
+		/* Check if selected counter is disabled */
+		if (BVAL(tpdm_readl(drvdata, TPDM_BC_CNTENSET), select)) {
+			mutex_unlock(&drvdata->lock);
+			return -EPERM;
+		}
+
+		tpdm_writel(drvdata, val, TPDM_BC_CNTR_LO);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_count_val_lo, 0644,
+		   tpdm_show_bc_count_val_lo, tpdm_store_bc_count_val_lo);
+
+static ssize_t tpdm_show_bc_count_val_hi(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_BC_CNTR_HI);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_count_val_hi(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf,
+					  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val, select;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		select = tpdm_readl(drvdata, TPDM_BC_SELR);
+
+		/* Check if selected counter is disabled */
+		if (BVAL(tpdm_readl(drvdata, TPDM_BC_CNTENSET), select)) {
+			mutex_unlock(&drvdata->lock);
+			return -EPERM;
+		}
+
+		tpdm_writel(drvdata, val, TPDM_BC_CNTR_HI);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_count_val_hi, 0644,
+		   tpdm_show_bc_count_val_hi, tpdm_store_bc_count_val_hi);
+
+static ssize_t tpdm_show_bc_shadow_val_lo(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	for (i = 0; i < drvdata->bc_counters_avail; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  tpdm_readl(drvdata, TPDM_BC_SHADOW_LO(i)));
+	}
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_shadow_val_lo, 0644,
+		   tpdm_show_bc_shadow_val_lo, NULL);
+
+static ssize_t tpdm_show_bc_shadow_val_hi(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	for (i = 0; i < drvdata->bc_counters_avail; i++)
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  tpdm_readl(drvdata, TPDM_BC_SHADOW_HI(i)));
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_shadow_val_hi, 0644,
+		   tpdm_show_bc_shadow_val_hi, NULL);
+
+static ssize_t tpdm_show_bc_sw_inc(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_BC_SWINC);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_bc_sw_inc(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_BC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		tpdm_writel(drvdata, val, TPDM_BC_SWINC);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_sw_inc, 0644,
+		   tpdm_show_bc_sw_inc, tpdm_store_bc_sw_inc);
+
+static ssize_t tpdm_store_bc_msr(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf,
+				 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned int num, val;
+	int nval;
+
+	if (!drvdata->msr_support)
+		return -EINVAL;
+
+	if (!test_bit(TPDM_DS_BC, drvdata->datasets))
+		return -EPERM;
+
+	nval = sscanf(buf, "%u %x", &num, &val);
+	if (nval != 2)
+		return -EINVAL;
+
+	if (num >= TPDM_BC_MAX_MSR)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->bc->msr[num] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(bc_msr, 0200, NULL, tpdm_store_bc_msr);
+
+static ssize_t tpdm_show_tc_capture_mode(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 drvdata->tc->capture_mode == TPDM_MODE_ATB ?
+			 "ATB" : "APB");
+}
+
+static ssize_t tpdm_store_tc_capture_mode(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf,
+					  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	char str[20] = "";
+	uint32_t val;
+
+	if (size >= 20)
+		return -EINVAL;
+	if (sscanf(buf, "%s", str) != 1)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (!strcmp(str, "ATB")) {
+		drvdata->tc->capture_mode = TPDM_MODE_ATB;
+	} else if (!strcmp(str, "APB") &&
+		   drvdata->tc->retrieval_mode == TPDM_MODE_APB) {
+
+		TPDM_UNLOCK(drvdata);
+		val = tpdm_readl(drvdata, TPDM_TC_CR);
+		val = val | BIT(3);
+		tpdm_writel(drvdata, val, TPDM_TC_CR);
+		TPDM_LOCK(drvdata);
+
+		drvdata->tc->capture_mode = TPDM_MODE_APB;
+	} else {
+		mutex_unlock(&drvdata->lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_capture_mode, 0644,
+		   tpdm_show_tc_capture_mode, tpdm_store_tc_capture_mode);
+
+static ssize_t tpdm_show_tc_retrieval_mode(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 drvdata->tc->retrieval_mode == TPDM_MODE_ATB ?
+			 "ATB" : "APB");
+}
+
+static ssize_t tpdm_store_tc_retrieval_mode(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	char str[20] = "";
+
+	if (size >= 20)
+		return -EINVAL;
+	if (sscanf(buf, "%s", str) != 1)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (!strcmp(str, "ATB")) {
+		drvdata->tc->retrieval_mode = TPDM_MODE_ATB;
+	} else if (!strcmp(str, "APB")) {
+		drvdata->tc->retrieval_mode = TPDM_MODE_APB;
+	} else {
+		mutex_unlock(&drvdata->lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_retrieval_mode, 0644,
+		   tpdm_show_tc_retrieval_mode, tpdm_store_tc_retrieval_mode);
+
+static ssize_t tpdm_store_tc_reset_counters(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		val = tpdm_readl(drvdata, TPDM_TC_CR);
+		val = val | BIT(1);
+		tpdm_writel(drvdata, val, TPDM_TC_CR);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_reset_counters, 0644, NULL,
+		   tpdm_store_tc_reset_counters);
+
+static ssize_t tpdm_show_tc_sat_mode(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->tc->sat_mode);
+}
+
+static ssize_t tpdm_store_tc_sat_mode(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf,
+				      size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->tc->sat_mode = true;
+	else
+		drvdata->tc->sat_mode = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_sat_mode, 0644,
+		   tpdm_show_tc_sat_mode, tpdm_store_tc_sat_mode);
+
+static ssize_t tpdm_show_tc_enable_counters(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->tc->enable_counters);
+}
+
+static ssize_t tpdm_store_tc_enable_counters(struct device *dev,
+					     struct device_attribute *attr,
+					     const char *buf,
+					     size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+	if (val >> drvdata->tc_counters_avail)
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->tc->enable_counters = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_enable_counters, 0644,
+		   tpdm_show_tc_enable_counters, tpdm_store_tc_enable_counters);
+
+static ssize_t tpdm_show_tc_clear_counters(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->tc->clear_counters);
+}
+
+static ssize_t tpdm_store_tc_clear_counters(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+	if (val >> drvdata->tc_counters_avail)
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->tc->clear_counters = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_clear_counters, 0644,
+		   tpdm_show_tc_clear_counters, tpdm_store_tc_clear_counters);
+
+static ssize_t tpdm_show_tc_enable_irq(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->tc->enable_irq);
+}
+
+static ssize_t tpdm_store_tc_enable_irq(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->tc->enable_irq = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_enable_irq, 0644,
+		   tpdm_show_tc_enable_irq, tpdm_store_tc_enable_irq);
+
+static ssize_t tpdm_show_tc_clear_irq(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->tc->clear_irq);
+}
+
+static ssize_t tpdm_store_tc_clear_irq(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf,
+				       size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->tc->clear_irq = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_clear_irq, 0644,
+		   tpdm_show_tc_clear_irq, tpdm_store_tc_clear_irq);
+
+static ssize_t tpdm_show_tc_trig_sel(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_TC_MAX_TRIG; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->tc->trig_sel[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_tc_trig_sel(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf,
+				      size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets) ||
+	    index >= TPDM_TC_MAX_TRIG ||
+	    drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+	    (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->tc->trig_sel[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_trig_sel, 0644,
+		   tpdm_show_tc_trig_sel, tpdm_store_tc_trig_sel);
+
+static ssize_t tpdm_show_tc_trig_val_lo(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_TC_MAX_TRIG; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->tc->trig_val_lo[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_tc_trig_val_lo(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets) ||
+	    index >= TPDM_TC_MAX_TRIG ||
+	    drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+	    (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->tc->trig_val_lo[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_trig_val_lo, 0644,
+		   tpdm_show_tc_trig_val_lo, tpdm_store_tc_trig_val_lo);
+
+static ssize_t tpdm_show_tc_trig_val_hi(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_TC_MAX_TRIG; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->tc->trig_val_hi[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_tc_trig_val_hi(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets) ||
+	    index >= TPDM_TC_MAX_TRIG ||
+	    drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_NO ||
+	    (drvdata->tc_trig_type == TPDM_SUPPORT_TYPE_PARTIAL && index > 0))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->tc->trig_val_hi[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_trig_val_hi, 0644,
+		   tpdm_show_tc_trig_val_hi, tpdm_store_tc_trig_val_hi);
+
+static ssize_t tpdm_show_tc_ovsr_gp(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_TC_OVSR_GP);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_ovsr_gp(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf,
+				     size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		tpdm_writel(drvdata, val, TPDM_TC_OVSR_GP);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_ovsr_gp, 0644,
+		   tpdm_show_tc_ovsr_gp, tpdm_store_tc_ovsr_gp);
+
+static ssize_t tpdm_show_tc_ovsr_impl(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_TC_OVSR_IMPL);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_ovsr_impl(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf,
+				       size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		tpdm_writel(drvdata, val, TPDM_TC_OVSR_IMPL);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_ovsr_impl, 0644,
+		   tpdm_show_tc_ovsr_impl, tpdm_store_tc_ovsr_impl);
+
+static ssize_t tpdm_show_tc_counter_sel(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_TC_SELR);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_counter_sel(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	tpdm_writel(drvdata, val, TPDM_TC_SELR);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_counter_sel, 0644,
+		   tpdm_show_tc_counter_sel, tpdm_store_tc_counter_sel);
+
+static ssize_t tpdm_show_tc_count_val_lo(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_TC_CNTR_LO);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_count_val_lo(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf,
+					  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val, select;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		select = tpdm_readl(drvdata, TPDM_TC_SELR);
+		select = (select >> 11) & 0x3;
+
+		/* Check if selected counter is disabled */
+		if (BVAL(tpdm_readl(drvdata, TPDM_TC_CNTENSET), select)) {
+			mutex_unlock(&drvdata->lock);
+			return -EPERM;
+		}
+
+		tpdm_writel(drvdata, val, TPDM_TC_CNTR_LO);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_count_val_lo, 0644,
+		   tpdm_show_tc_count_val_lo, tpdm_store_tc_count_val_lo);
+
+static ssize_t tpdm_show_tc_count_val_hi(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_TC_CNTR_HI);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_count_val_hi(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf,
+					  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val, select;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		select = tpdm_readl(drvdata, TPDM_TC_SELR);
+		select = (select >> 11) & 0x3;
+
+		/* Check if selected counter is disabled */
+		if (BVAL(tpdm_readl(drvdata, TPDM_TC_CNTENSET), select)) {
+			mutex_unlock(&drvdata->lock);
+			return -EPERM;
+		}
+
+		tpdm_writel(drvdata, val, TPDM_TC_CNTR_HI);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_count_val_hi, 0644,
+		   tpdm_show_tc_count_val_hi, tpdm_store_tc_count_val_hi);
+
+static ssize_t tpdm_show_tc_shadow_val_lo(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	for (i = 0; i < TPDM_TC_MAX_COUNTERS; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  tpdm_readl(drvdata, TPDM_TC_SHADOW_LO(i)));
+	}
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_shadow_val_lo, 0644,
+		   tpdm_show_tc_shadow_val_lo, NULL);
+
+static ssize_t tpdm_show_tc_shadow_val_hi(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	for (i = 0; i < TPDM_TC_MAX_COUNTERS; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  tpdm_readl(drvdata, TPDM_TC_SHADOW_HI(i)));
+	}
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_shadow_val_hi, 0644,
+		   tpdm_show_tc_shadow_val_hi, NULL);
+
+static ssize_t tpdm_show_tc_sw_inc(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_TC_SWINC);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+
+static ssize_t tpdm_store_tc_sw_inc(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_TC, drvdata->enable_ds))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!drvdata->enable) {
+		mutex_unlock(&drvdata->lock);
+		return -EPERM;
+	}
+
+	if (val) {
+		TPDM_UNLOCK(drvdata);
+		tpdm_writel(drvdata, val, TPDM_TC_SWINC);
+		TPDM_LOCK(drvdata);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_sw_inc, 0644,
+		   tpdm_show_tc_sw_inc, tpdm_store_tc_sw_inc);
+
+static ssize_t tpdm_store_tc_msr(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf,
+				 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned int num, val;
+	int nval;
+
+	if (!drvdata->msr_support)
+		return -EINVAL;
+
+	if (!test_bit(TPDM_DS_TC, drvdata->datasets))
+		return -EPERM;
+
+	nval = sscanf(buf, "%u %x", &num, &val);
+	if (nval != 2)
+		return -EINVAL;
+
+	if (num >= TPDM_TC_MAX_MSR)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->tc->msr[num] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(tc_msr, 0200, NULL, tpdm_store_tc_msr);
+
+static ssize_t tpdm_show_dsb_mode(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n",
+			 (unsigned long)drvdata->dsb->mode);
+}
+
+static ssize_t tpdm_store_dsb_mode(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf,
+				   size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->dsb->mode = val & TPDM_MODE_ALL;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_mode, 0644,
+		   tpdm_show_dsb_mode, tpdm_store_dsb_mode);
+
+static ssize_t tpdm_show_dsb_edge_ctrl(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i;
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_DSB_MAX_EDCR; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index:0x%x Val:0x%x\n", i,
+				  drvdata->dsb->edge_ctrl[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_dsb_edge_ctrl(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long start, end, edge_ctrl;
+	uint32_t val;
+	int i, bit, reg;
+
+	if (sscanf(buf, "%lx %lx %lx", &start, &end, &edge_ctrl) != 3)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+	    (start >= TPDM_DSB_MAX_LINES) || (end >= TPDM_DSB_MAX_LINES) ||
+	    edge_ctrl > 0x2)
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = start; i <= end; i++) {
+		reg = i / (NUM_OF_BITS / 2);
+		bit = i % (NUM_OF_BITS / 2);
+		bit = bit * 2;
+
+		val = drvdata->dsb->edge_ctrl[reg];
+		val = val | (edge_ctrl << bit);
+		drvdata->dsb->edge_ctrl[reg] = val;
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_edge_ctrl, 0644,
+		   tpdm_show_dsb_edge_ctrl, tpdm_store_dsb_edge_ctrl);
+
+static ssize_t tpdm_show_dsb_edge_ctrl_mask(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i;
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_DSB_MAX_EDCR / 2; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index:0x%x Val:0x%x\n", i,
+				  drvdata->dsb->edge_ctrl_mask[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_dsb_edge_ctrl_mask(struct device *dev,
+					     struct device_attribute *attr,
+					     const char *buf,
+					     size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long start, end, val;
+	uint32_t set;
+	int i, bit, reg;
+
+	if (sscanf(buf, "%lx %lx %lx", &start, &end, &val) != 3)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+	    (start >= TPDM_DSB_MAX_LINES) || (end >= TPDM_DSB_MAX_LINES))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = start; i <= end; i++) {
+		reg = i / NUM_OF_BITS;
+		bit = (i % NUM_OF_BITS);
+
+		set = drvdata->dsb->edge_ctrl_mask[reg];
+		if (val)
+			set = set | BIT(bit);
+		else
+			set = set & ~BIT(bit);
+		drvdata->dsb->edge_ctrl_mask[reg] = set;
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_edge_ctrl_mask, 0644,
+		   tpdm_show_dsb_edge_ctrl_mask, tpdm_store_dsb_edge_ctrl_mask);
+
+static ssize_t tpdm_show_dsb_patt_val(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->dsb->patt_val[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_dsb_patt_val(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf,
+				       size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+	    index >= TPDM_DSB_MAX_PATT)
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->dsb->patt_val[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_patt_val, 0644,
+		   tpdm_show_dsb_patt_val, tpdm_store_dsb_patt_val);
+
+static ssize_t tpdm_show_dsb_patt_mask(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->dsb->patt_mask[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_dsb_patt_mask(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf,
+					size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+	    index >= TPDM_DSB_MAX_PATT)
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->dsb->patt_mask[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_patt_mask, 0644,
+		   tpdm_show_dsb_patt_mask, tpdm_store_dsb_patt_mask);
+
+static ssize_t tpdm_show_dsb_patt_ts(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->dsb->patt_ts);
+}
+
+static ssize_t tpdm_store_dsb_patt_ts(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf,
+				      size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->dsb->patt_ts = true;
+	else
+		drvdata->dsb->patt_ts = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_patt_ts, 0644,
+		   tpdm_show_dsb_patt_ts, tpdm_store_dsb_patt_ts);
+
+static ssize_t tpdm_show_dsb_patt_type(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->dsb->patt_type);
+}
+
+static ssize_t tpdm_store_dsb_patt_type(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->dsb->patt_type = true;
+	else
+		drvdata->dsb->patt_type = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_patt_type, 0644,
+		   tpdm_show_dsb_patt_type, tpdm_store_dsb_patt_type);
+
+static ssize_t tpdm_show_dsb_trig_patt_val(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->dsb->trig_patt_val[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_dsb_trig_patt_val(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+	    index >= TPDM_DSB_MAX_PATT)
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->dsb->trig_patt_val[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_trig_patt_val, 0644,
+		   tpdm_show_dsb_trig_patt_val, tpdm_store_dsb_trig_patt_val);
+
+static ssize_t tpdm_show_dsb_trig_patt_mask(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i = 0;
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index: 0x%x Value: 0x%x\n", i,
+				  drvdata->dsb->trig_patt_mask[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_dsb_trig_patt_mask(struct device *dev,
+					     struct device_attribute *attr,
+					     const char *buf,
+					     size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long index, val;
+
+	if (sscanf(buf, "%lx %lx", &index, &val) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+	    index >= TPDM_DSB_MAX_PATT)
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->dsb->trig_patt_mask[index] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_trig_patt_mask, 0644,
+		   tpdm_show_dsb_trig_patt_mask, tpdm_store_dsb_trig_patt_mask);
+
+static ssize_t tpdm_show_dsb_trig_ts(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->dsb->trig_ts);
+}
+
+static ssize_t tpdm_store_dsb_trig_ts(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf,
+				      size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->dsb->trig_ts = true;
+	else
+		drvdata->dsb->trig_ts = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_trig_ts, 0644,
+		   tpdm_show_dsb_trig_ts, tpdm_store_dsb_trig_ts);
+
+static ssize_t tpdm_show_dsb_select_val(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	ssize_t size = 0;
+	int i;
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = 0; i < TPDM_DSB_MAX_SELECT; i++) {
+		size += scnprintf(buf + size, PAGE_SIZE - size,
+				  "Index:0x%x Val:0x%x\n", i,
+				  drvdata->dsb->select_val[i]);
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+
+static ssize_t tpdm_store_dsb_select_val(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long start, end;
+	uint32_t val;
+	int i, bit, reg;
+
+	if (sscanf(buf, "%lx %lx", &start, &end) != 2)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets) ||
+	    (start >= TPDM_DSB_MAX_LINES) || (end >= TPDM_DSB_MAX_LINES))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	for (i = start; i <= end; i++) {
+		reg = i / NUM_OF_BITS;
+		bit = (i % NUM_OF_BITS);
+
+		val = drvdata->dsb->select_val[reg];
+		val = val | BIT(bit);
+		drvdata->dsb->select_val[reg] = val;
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_select_val, 0644,
+		   tpdm_show_dsb_select_val, tpdm_store_dsb_select_val);
+
+static ssize_t tpdm_store_dsb_msr(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned int num, val;
+	int nval;
+
+	if (!drvdata->msr_support)
+		return -EINVAL;
+
+	if (!test_bit(TPDM_DS_DSB, drvdata->datasets))
+		return -EPERM;
+
+	nval = sscanf(buf, "%u %x", &num, &val);
+	if (nval != 2)
+		return -EINVAL;
+
+	if (num >= TPDM_DSB_MAX_MSR)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->dsb->msr[num] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(dsb_msr, 0200, NULL, tpdm_store_dsb_msr);
+
+static ssize_t tpdm_show_cmb_available_modes(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	return scnprintf(buf, PAGE_SIZE, "%s\n", "continuous trace_on_change");
+}
+static DEVICE_ATTR(cmb_available_modes, 0444, tpdm_show_cmb_available_modes,
+		   NULL);
+
+static ssize_t tpdm_show_cmb_mode(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			 drvdata->cmb->mode == TPDM_CMB_MODE_CONTINUOUS ?
+			 "continuous" : "trace_on_change");
+}
+
+static ssize_t tpdm_store_cmb_mode(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf,
+				   size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	char str[20] = "";
+
+	if (strlen(buf) >= 20)
+		return -EINVAL;
+	if (sscanf(buf, "%s", str) != 1)
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (!strcmp(str, "continuous")) {
+		drvdata->cmb->mode = TPDM_CMB_MODE_CONTINUOUS;
+	} else if (!strcmp(str, "trace_on_change")) {
+		drvdata->cmb->mode = TPDM_CMB_MODE_TRACE_ON_CHANGE;
+	} else {
+		mutex_unlock(&drvdata->lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_mode, 0644,
+		   tpdm_show_cmb_mode, tpdm_store_cmb_mode);
+
+static ssize_t tpdm_show_cmb_patt_val_lsb(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	val = drvdata->cmb->patt_val[TPDM_CMB_LSB];
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_patt_val_lsb(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->patt_val[TPDM_CMB_LSB] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_patt_val_lsb, 0644,
+		   tpdm_show_cmb_patt_val_lsb,
+		   tpdm_store_cmb_patt_val_lsb);
+
+static ssize_t tpdm_show_cmb_patt_mask_lsb(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	val = drvdata->cmb->patt_mask[TPDM_CMB_LSB];
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_patt_mask_lsb(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->patt_mask[TPDM_CMB_LSB] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_patt_mask_lsb, 0644,
+		   tpdm_show_cmb_patt_mask_lsb, tpdm_store_cmb_patt_mask_lsb);
+
+static ssize_t tpdm_show_cmb_patt_val_msb(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	val = drvdata->cmb->patt_val[TPDM_CMB_MSB];
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_patt_val_msb(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->patt_val[TPDM_CMB_MSB] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_patt_val_msb, 0644,
+		   tpdm_show_cmb_patt_val_msb,
+		   tpdm_store_cmb_patt_val_msb);
+
+static ssize_t tpdm_show_cmb_patt_mask_msb(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	val = drvdata->cmb->patt_mask[TPDM_CMB_MSB];
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_patt_mask_msb(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->patt_mask[TPDM_CMB_MSB] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_patt_mask_msb, 0644,
+		   tpdm_show_cmb_patt_mask_msb, tpdm_store_cmb_patt_mask_msb);
+
+static ssize_t tpdm_show_cmb_patt_ts(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->cmb->patt_ts);
+}
+
+static ssize_t tpdm_store_cmb_patt_ts(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf,
+				      size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->cmb->patt_ts = true;
+	else
+		drvdata->cmb->patt_ts = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_patt_ts, 0644,
+		   tpdm_show_cmb_patt_ts, tpdm_store_cmb_patt_ts);
+
+static ssize_t tpdm_show_cmb_trig_patt_val_lsb(struct device *dev,
+					       struct device_attribute *attr,
+					       char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	val = drvdata->cmb->trig_patt_val[TPDM_CMB_LSB];
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_trig_patt_val_lsb(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->trig_patt_val[TPDM_CMB_LSB] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_trig_patt_val_lsb, 0644,
+		   tpdm_show_cmb_trig_patt_val_lsb,
+		   tpdm_store_cmb_trig_patt_val_lsb);
+
+static ssize_t tpdm_show_cmb_trig_patt_mask_lsb(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	val = drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB];
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_trig_patt_mask_lsb(struct device *dev,
+						 struct device_attribute *attr,
+						 const char *buf, size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->trig_patt_mask[TPDM_CMB_LSB] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_trig_patt_mask_lsb, 0644,
+		   tpdm_show_cmb_trig_patt_mask_lsb,
+		   tpdm_store_cmb_trig_patt_mask_lsb);
+
+static ssize_t tpdm_show_cmb_trig_patt_val_msb(struct device *dev,
+					       struct device_attribute *attr,
+					       char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	val = drvdata->cmb->trig_patt_val[TPDM_CMB_MSB];
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_trig_patt_val_msb(struct device *dev,
+						struct device_attribute *attr,
+						const char *buf, size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->trig_patt_val[TPDM_CMB_MSB] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_trig_patt_val_msb, 0644,
+		   tpdm_show_cmb_trig_patt_val_msb,
+		   tpdm_store_cmb_trig_patt_val_msb);
+
+static ssize_t tpdm_show_cmb_trig_patt_mask_msb(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	val = drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB];
+
+	return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
+}
+
+static ssize_t tpdm_store_cmb_trig_patt_mask_msb(struct device *dev,
+						 struct device_attribute *attr,
+						 const char *buf, size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->trig_patt_mask[TPDM_CMB_MSB] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_trig_patt_mask_msb, 0644,
+		   tpdm_show_cmb_trig_patt_mask_msb,
+		   tpdm_store_cmb_trig_patt_mask_msb);
+
+static ssize_t tpdm_show_cmb_trig_ts(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->cmb->trig_ts);
+}
+
+static ssize_t tpdm_store_cmb_trig_ts(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf,
+				      size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	if (val)
+		drvdata->cmb->trig_ts = true;
+	else
+		drvdata->cmb->trig_ts = false;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_trig_ts, 0644,
+		   tpdm_show_cmb_trig_ts, tpdm_store_cmb_trig_ts);
+
+static ssize_t tpdm_store_cmb_msr(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned int num, val;
+	int nval;
+
+	if (!drvdata->msr_support)
+		return -EINVAL;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	nval = sscanf(buf, "%u %x", &num, &val);
+	if (nval != 2)
+		return -EINVAL;
+
+	if (num >= TPDM_CMB_MAX_MSR)
+		return -EINVAL;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->msr[num] = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(cmb_msr, 0200, NULL, tpdm_store_cmb_msr);
+
+static ssize_t tpdm_show_cmb_read_interface_state(struct device *dev,
+						  struct device_attribute *attr,
+						  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_CMB_READVAL);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+
+	return scnprintf(buf, PAGE_SIZE, "%lx\n", val);
+}
+static DEVICE_ATTR(cmb_read_interface_state, 0444,
+		   tpdm_show_cmb_read_interface_state, NULL);
+
+static ssize_t tpdm_show_cmb_read_ctl_reg(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	TPDM_UNLOCK(drvdata);
+	val = tpdm_readl(drvdata, TPDM_CMB_READCTL);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+
+	if (test_bit(TPDM_DS_CMB, drvdata->datasets))
+		return scnprintf(buf, PAGE_SIZE, "SEL: %lx\n", val);
+	else
+		return scnprintf(buf, PAGE_SIZE, "Lane %u SEL: %lx\n",
+				 (unsigned int)BMVAL(val, 1, 3), val & 0x1);
+}
+
+static ssize_t tpdm_store_cmb_read_ctl_reg(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf,
+					   size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	TPDM_UNLOCK(drvdata);
+	tpdm_writel(drvdata, val, TPDM_CMB_READCTL);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+
+	return size;
+}
+static DEVICE_ATTR(cmb_read_ctl_reg, 0644,
+		   tpdm_show_cmb_read_ctl_reg, tpdm_store_cmb_read_ctl_reg);
+
+static ssize_t tpdm_show_mcmb_trig_lane(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_MCMB, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->cmb->mcmb->mcmb_trig_lane);
+}
+
+static ssize_t tpdm_store_mcmb_trig_lane(struct device *dev,
+					 struct device_attribute *attr,
+					 const char *buf,
+					 size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+	if (val >= TPDM_MCMB_MAX_LANES)
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_MCMB, drvdata->datasets))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->mcmb->mcmb_trig_lane = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(mcmb_trig_lane, 0644,
+		   tpdm_show_mcmb_trig_lane, tpdm_store_mcmb_trig_lane);
+
+static ssize_t tpdm_show_mcmb_lanes_select(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+	if (!test_bit(TPDM_DS_MCMB, drvdata->datasets))
+		return -EPERM;
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			 (unsigned int)drvdata->cmb->mcmb->mcmb_lane_select);
+}
+
+static ssize_t tpdm_store_mcmb_lanes_select(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf,
+					    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+	if (!test_bit(TPDM_DS_MCMB, drvdata->datasets))
+		return -EPERM;
+
+	val = BMVAL(val, 0, TPDM_MCMB_MAX_LANES - 1);
+
+	mutex_lock(&drvdata->lock);
+	drvdata->cmb->mcmb->mcmb_lane_select = val;
+	mutex_unlock(&drvdata->lock);
+	return size;
+}
+static DEVICE_ATTR(mcmb_lanes_select, 0644,
+		   tpdm_show_mcmb_lanes_select, tpdm_store_mcmb_lanes_select);
+
+static ssize_t tpdm_store_cmb_markr(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf,
+				    size_t size)
+{
+	struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+	unsigned long val;
+
+	if (kstrtoul(buf, 16, &val))
+		return -EINVAL;
+
+	if (!(test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	      test_bit(TPDM_DS_MCMB, drvdata->datasets)))
+		return -EPERM;
+
+	mutex_lock(&drvdata->lock);
+	TPDM_UNLOCK(drvdata);
+	tpdm_writel(drvdata, val, TPDM_CMB_MARKR);
+	TPDM_LOCK(drvdata);
+	mutex_unlock(&drvdata->lock);
+
+	return size;
+}
+static DEVICE_ATTR(cmb_markr, 0200, NULL, tpdm_store_cmb_markr);
+
+static struct attribute *tpdm_bc_attrs[] = {
+	&dev_attr_bc_capture_mode.attr,
+	&dev_attr_bc_retrieval_mode.attr,
+	&dev_attr_bc_reset_counters.attr,
+	&dev_attr_bc_sat_mode.attr,
+	&dev_attr_bc_enable_counters.attr,
+	&dev_attr_bc_clear_counters.attr,
+	&dev_attr_bc_enable_irq.attr,
+	&dev_attr_bc_clear_irq.attr,
+	&dev_attr_bc_trig_val_lo.attr,
+	&dev_attr_bc_trig_val_hi.attr,
+	&dev_attr_bc_enable_ganging.attr,
+	&dev_attr_bc_overflow_val.attr,
+	&dev_attr_bc_ovsr.attr,
+	&dev_attr_bc_counter_sel.attr,
+	&dev_attr_bc_count_val_lo.attr,
+	&dev_attr_bc_count_val_hi.attr,
+	&dev_attr_bc_shadow_val_lo.attr,
+	&dev_attr_bc_shadow_val_hi.attr,
+	&dev_attr_bc_sw_inc.attr,
+	&dev_attr_bc_msr.attr,
+	NULL,
+};
+
+static struct attribute *tpdm_tc_attrs[] = {
+	&dev_attr_tc_capture_mode.attr,
+	&dev_attr_tc_retrieval_mode.attr,
+	&dev_attr_tc_reset_counters.attr,
+	&dev_attr_tc_sat_mode.attr,
+	&dev_attr_tc_enable_counters.attr,
+	&dev_attr_tc_clear_counters.attr,
+	&dev_attr_tc_enable_irq.attr,
+	&dev_attr_tc_clear_irq.attr,
+	&dev_attr_tc_trig_sel.attr,
+	&dev_attr_tc_trig_val_lo.attr,
+	&dev_attr_tc_trig_val_hi.attr,
+	&dev_attr_tc_ovsr_gp.attr,
+	&dev_attr_tc_ovsr_impl.attr,
+	&dev_attr_tc_counter_sel.attr,
+	&dev_attr_tc_count_val_lo.attr,
+	&dev_attr_tc_count_val_hi.attr,
+	&dev_attr_tc_shadow_val_lo.attr,
+	&dev_attr_tc_shadow_val_hi.attr,
+	&dev_attr_tc_sw_inc.attr,
+	&dev_attr_tc_msr.attr,
+	NULL,
+};
+
+static struct attribute *tpdm_dsb_attrs[] = {
+	&dev_attr_dsb_mode.attr,
+	&dev_attr_dsb_edge_ctrl.attr,
+	&dev_attr_dsb_edge_ctrl_mask.attr,
+	&dev_attr_dsb_patt_val.attr,
+	&dev_attr_dsb_patt_mask.attr,
+	&dev_attr_dsb_patt_ts.attr,
+	&dev_attr_dsb_patt_type.attr,
+	&dev_attr_dsb_trig_patt_val.attr,
+	&dev_attr_dsb_trig_patt_mask.attr,
+	&dev_attr_dsb_trig_ts.attr,
+	&dev_attr_dsb_select_val.attr,
+	&dev_attr_dsb_msr.attr,
+	NULL,
+};
+
+static struct attribute *tpdm_cmb_attrs[] = {
+	&dev_attr_cmb_available_modes.attr,
+	&dev_attr_cmb_mode.attr,
+	&dev_attr_cmb_patt_val_lsb.attr,
+	&dev_attr_cmb_patt_mask_lsb.attr,
+	&dev_attr_cmb_patt_val_msb.attr,
+	&dev_attr_cmb_patt_mask_msb.attr,
+	&dev_attr_cmb_patt_ts.attr,
+	&dev_attr_cmb_trig_patt_val_lsb.attr,
+	&dev_attr_cmb_trig_patt_mask_lsb.attr,
+	&dev_attr_cmb_trig_patt_val_msb.attr,
+	&dev_attr_cmb_trig_patt_mask_msb.attr,
+	&dev_attr_cmb_trig_ts.attr,
+	&dev_attr_cmb_msr.attr,
+	&dev_attr_cmb_read_interface_state.attr,
+	&dev_attr_cmb_read_ctl_reg.attr,
+	&dev_attr_cmb_markr.attr,
+	&dev_attr_mcmb_trig_lane.attr,
+	&dev_attr_mcmb_lanes_select.attr,
+	NULL,
+};
+
+static struct attribute_group tpdm_bc_attr_grp = {
+	.attrs = tpdm_bc_attrs,
+};
+
+static struct attribute_group tpdm_tc_attr_grp = {
+	.attrs = tpdm_tc_attrs,
+};
+
+static struct attribute_group tpdm_dsb_attr_grp = {
+	.attrs = tpdm_dsb_attrs,
+};
+
+static struct attribute_group tpdm_cmb_attr_grp = {
+	.attrs = tpdm_cmb_attrs,
+};
+
+static struct attribute *tpdm_attrs[] = {
+	&dev_attr_available_datasets.attr,
+	&dev_attr_enable_datasets.attr,
+	&dev_attr_gp_regs.attr,
+	NULL,
+};
+
+static struct attribute_group tpdm_attr_grp = {
+	.attrs = tpdm_attrs,
+};
+static const struct attribute_group *tpdm_attr_grps[] = {
+	&tpdm_attr_grp,
+	&tpdm_bc_attr_grp,
+	&tpdm_tc_attr_grp,
+	&tpdm_dsb_attr_grp,
+	&tpdm_cmb_attr_grp,
+	NULL,
+};
+
+static int tpdm_datasets_alloc(struct tpdm_drvdata *drvdata)
+{
+	if (test_bit(TPDM_DS_GPR, drvdata->datasets)) {
+		drvdata->gpr = devm_kzalloc(drvdata->dev, sizeof(*drvdata->gpr),
+					    GFP_KERNEL);
+		if (!drvdata->gpr)
+			return -ENOMEM;
+	}
+	if (test_bit(TPDM_DS_BC, drvdata->datasets)) {
+		drvdata->bc = devm_kzalloc(drvdata->dev, sizeof(*drvdata->bc),
+					   GFP_KERNEL);
+		if (!drvdata->bc)
+			return -ENOMEM;
+	}
+	if (test_bit(TPDM_DS_TC, drvdata->datasets)) {
+		drvdata->tc = devm_kzalloc(drvdata->dev, sizeof(*drvdata->tc),
+					   GFP_KERNEL);
+		if (!drvdata->tc)
+			return -ENOMEM;
+	}
+	if (test_bit(TPDM_DS_DSB, drvdata->datasets)) {
+		drvdata->dsb = devm_kzalloc(drvdata->dev, sizeof(*drvdata->dsb),
+					    GFP_KERNEL);
+		if (!drvdata->dsb)
+			return -ENOMEM;
+	}
+	if (test_bit(TPDM_DS_CMB, drvdata->datasets)) {
+		drvdata->cmb = devm_kzalloc(drvdata->dev, sizeof(*drvdata->cmb),
+					    GFP_KERNEL);
+		if (!drvdata->cmb)
+			return -ENOMEM;
+	} else if (test_bit(TPDM_DS_MCMB, drvdata->datasets)) {
+		drvdata->cmb = devm_kzalloc(drvdata->dev, sizeof(*drvdata->cmb),
+					    GFP_KERNEL);
+		if (!drvdata->cmb)
+			return -ENOMEM;
+		drvdata->cmb->mcmb = devm_kzalloc(drvdata->dev,
+						  sizeof(*drvdata->cmb->mcmb),
+						  GFP_KERNEL);
+		if (!drvdata->cmb->mcmb)
+			return -ENOMEM;
+	}
+	return 0;
+}
+
+static void tpdm_init_default_data(struct tpdm_drvdata *drvdata)
+{
+	if (test_bit(TPDM_DS_BC, drvdata->datasets))
+		drvdata->bc->retrieval_mode = TPDM_MODE_ATB;
+
+	if (test_bit(TPDM_DS_TC, drvdata->datasets))
+		drvdata->tc->retrieval_mode = TPDM_MODE_ATB;
+
+	if (test_bit(TPDM_DS_DSB, drvdata->datasets))
+		drvdata->dsb->trig_ts = true;
+
+	if (test_bit(TPDM_DS_CMB, drvdata->datasets) ||
+	    test_bit(TPDM_DS_MCMB, drvdata->datasets))
+		drvdata->cmb->trig_ts = true;
+}
+
+static int tpdm_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	uint32_t pidr, devid;
+	struct device *dev = &pdev->dev;
+	struct coresight_platform_data *pdata;
+	struct tpdm_drvdata *drvdata;
+	struct resource *res;
+	struct coresight_desc *desc;
+	static int traceid = TPDM_TRACE_ID_START;
+	uint32_t version;
+
+	pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+	pdev->dev.platform_data = pdata;
+
+	drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+	if (!drvdata)
+		return -ENOMEM;
+	drvdata->dev = &pdev->dev;
+	platform_set_drvdata(pdev, drvdata);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tpdm-base");
+	if (!res)
+		return -ENODEV;
+
+	drvdata->base = devm_ioremap(dev, res->start, resource_size(res));
+	if (!drvdata->base)
+		return -ENOMEM;
+
+	drvdata->clk_enable = of_property_read_bool(pdev->dev.of_node,
+						    "qcom,clk-enable");
+
+	drvdata->msr_fix_req = of_property_read_bool(pdev->dev.of_node,
+						     "qcom,msr-fix-req");
+
+	mutex_init(&drvdata->lock);
+
+	drvdata->clk = devm_clk_get(dev, "core_clk");
+	if (IS_ERR(drvdata->clk))
+		return PTR_ERR(drvdata->clk);
+
+	ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE);
+	if (ret)
+		return ret;
+
+	ret = clk_prepare_enable(drvdata->clk);
+	if (ret)
+		return ret;
+
+	version = tpdm_readl(drvdata, CORESIGHT_PERIPHIDR2);
+	drvdata->version = BMVAL(version, 4, 7);
+
+	if (drvdata->version)
+		drvdata->msr_support = true;
+
+	pidr = tpdm_readl(drvdata, CORESIGHT_PERIPHIDR0);
+	for (i = 0; i < TPDM_DATASETS; i++) {
+		if (pidr & BIT(i)) {
+			__set_bit(i, drvdata->datasets);
+			__set_bit(i, drvdata->enable_ds);
+		}
+	}
+
+	ret = tpdm_datasets_alloc(drvdata);
+	if (ret)
+		return ret;
+
+	tpdm_init_default_data(drvdata);
+
+	devid = tpdm_readl(drvdata, CORESIGHT_DEVID);
+	drvdata->tc_trig_type = BMVAL(devid, 27, 28);
+	drvdata->bc_trig_type = BMVAL(devid, 25, 26);
+	drvdata->bc_gang_type = BMVAL(devid, 23, 24);
+	drvdata->bc_counters_avail = BMVAL(devid, 6, 10) + 1;
+	drvdata->tc_counters_avail = BMVAL(devid, 4, 5) + 1;
+
+	clk_disable_unprepare(drvdata->clk);
+
+	drvdata->traceid = traceid++;
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc)
+		return -ENOMEM;
+	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+	desc->ops = &tpdm_cs_ops;
+	desc->pdata = pdev->dev.platform_data;
+	desc->dev = &pdev->dev;
+	desc->groups = tpdm_attr_grps;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev))
+		return PTR_ERR(drvdata->csdev);
+
+	dev_dbg(drvdata->dev, "TPDM initialized\n");
+
+	if (boot_enable)
+		coresight_enable(drvdata->csdev);
+
+	return 0;
+}
+
+static int tpdm_remove(struct platform_device *pdev)
+{
+	struct tpdm_drvdata *drvdata = platform_get_drvdata(pdev);
+
+	coresight_unregister(drvdata->csdev);
+	return 0;
+}
+
+static const struct of_device_id tpdm_match[] = {
+	{.compatible = "qcom,coresight-tpdm"},
+	{}
+};
+
+static struct platform_driver tpdm_driver = {
+	.probe          = tpdm_probe,
+	.remove         = tpdm_remove,
+	.driver         = {
+		.name   = "coresight-tpdm",
+		.owner	= THIS_MODULE,
+		.of_match_table = tpdm_match,
+	},
+};
+
+static int __init tpdm_init(void)
+{
+	return platform_driver_register(&tpdm_driver);
+}
+module_init(tpdm_init);
+
+static void __exit tpdm_exit(void)
+{
+	platform_driver_unregister(&tpdm_driver);
+}
+module_exit(tpdm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Monitor driver");
diff --git a/drivers/hwtracing/coresight/of_coresight.c b/drivers/hwtracing/coresight/of_coresight.c
index 629e031..2492f90 100644
--- a/drivers/hwtracing/coresight/of_coresight.c
+++ b/drivers/hwtracing/coresight/of_coresight.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -117,8 +117,11 @@
 	if (!pdata)
 		return ERR_PTR(-ENOMEM);
 
-	/* Use device name as sysfs handle */
-	pdata->name = dev_name(dev);
+	ret = of_property_read_string(node, "coresight-name", &pdata->name);
+	if (ret) {
+		/* Use device name as sysfs handle */
+		pdata->name = dev_name(dev);
+	}
 
 	/* Get the number of input and output port for this component */
 	of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
@@ -168,7 +171,11 @@
 			if (!rdev)
 				return ERR_PTR(-EPROBE_DEFER);
 
-			pdata->child_names[i] = dev_name(rdev);
+			ret = of_property_read_string(rparent, "coresight-name",
+						      &pdata->child_names[i]);
+			if (ret)
+				pdata->child_names[i] = dev_name(rdev);
+
 			pdata->child_ports[i] = rendpoint.id;
 
 			i++;
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index 51f81d6..c4a7f28 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -433,18 +433,24 @@
 	size_t pos;
 	ssize_t sz;
 
-	for (pos = 0, p = buf; count > pos; pos += sz, p += sz) {
-		sz = min_t(unsigned int, count - pos, 8);
-		sz = data->packet(data, master, channel, STP_PACKET_DATA, flags,
-				  sz, p);
-		flags = 0;
+	if (data->ost_configured()) {
+		pos = data->ost_packet(data, count, buf);
+	} else {
+		for (pos = 0, p = buf; count > pos; pos += sz, p += sz) {
+			sz = min_t(unsigned int, count - pos, 8);
+			sz = data->packet(data, master, channel,
+					  STP_PACKET_DATA, flags,
+					  sz, p);
+			flags = 0;
 
-		if (sz < 0)
-			break;
+			if (sz < 0)
+				break;
+		}
+
+		data->packet(data, master, channel, STP_PACKET_FLAG, 0,
+			     0, &nil);
 	}
 
-	data->packet(data, master, channel, STP_PACKET_FLAG, 0, 0, &nil);
-
 	return pos;
 }
 
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 6261874..34ffa02 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -187,6 +187,19 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called apm-power.
 
+config INPUT_KEYRESET
+	bool "Reset key"
+	depends on INPUT
+	select INPUT_KEYCOMBO
+	---help---
+	  Say Y here if you want to reboot when some keys are pressed;
+
+config INPUT_KEYCOMBO
+	bool "Key combo"
+	depends on INPUT
+	---help---
+	  Say Y here if you want to take action when some keys are pressed;
+
 comment "Input Device Drivers"
 
 source "drivers/input/keyboard/Kconfig"
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index 595820b..1ee1786 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -28,3 +28,7 @@
 obj-$(CONFIG_INPUT_APMPOWER)	+= apm-power.o
 
 obj-$(CONFIG_RMI4_CORE)		+= rmi4/
+
+obj-$(CONFIG_INPUT_KEYRESET)	+= keyreset.o
+obj-$(CONFIG_INPUT_KEYCOMBO)	+= keycombo.o
+
diff --git a/drivers/input/keycombo.c b/drivers/input/keycombo.c
new file mode 100644
index 0000000..2fba451
--- /dev/null
+++ b/drivers/input/keycombo.c
@@ -0,0 +1,261 @@
+/* drivers/input/keycombo.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keycombo.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+struct keycombo_state {
+	struct input_handler input_handler;
+	unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+	unsigned long upbit[BITS_TO_LONGS(KEY_CNT)];
+	unsigned long key[BITS_TO_LONGS(KEY_CNT)];
+	spinlock_t lock;
+	struct  workqueue_struct *wq;
+	int key_down_target;
+	int key_down;
+	int key_up;
+	struct delayed_work key_down_work;
+	int delay;
+	struct work_struct key_up_work;
+	void (*key_up_fn)(void *);
+	void (*key_down_fn)(void *);
+	void *priv;
+	int key_is_down;
+	struct wakeup_source combo_held_wake_source;
+	struct wakeup_source combo_up_wake_source;
+};
+
+static void do_key_down(struct work_struct *work)
+{
+	struct delayed_work *dwork = container_of(work, struct delayed_work,
+									work);
+	struct keycombo_state *state = container_of(dwork,
+					struct keycombo_state, key_down_work);
+	if (state->key_down_fn)
+		state->key_down_fn(state->priv);
+}
+
+static void do_key_up(struct work_struct *work)
+{
+	struct keycombo_state *state = container_of(work, struct keycombo_state,
+								key_up_work);
+	if (state->key_up_fn)
+		state->key_up_fn(state->priv);
+	__pm_relax(&state->combo_up_wake_source);
+}
+
+static void keycombo_event(struct input_handle *handle, unsigned int type,
+		unsigned int code, int value)
+{
+	unsigned long flags;
+	struct keycombo_state *state = handle->private;
+
+	if (type != EV_KEY)
+		return;
+
+	if (code >= KEY_MAX)
+		return;
+
+	if (!test_bit(code, state->keybit))
+		return;
+
+	spin_lock_irqsave(&state->lock, flags);
+	if (!test_bit(code, state->key) == !value)
+		goto done;
+	__change_bit(code, state->key);
+	if (test_bit(code, state->upbit)) {
+		if (value)
+			state->key_up++;
+		else
+			state->key_up--;
+	} else {
+		if (value)
+			state->key_down++;
+		else
+			state->key_down--;
+	}
+	if (state->key_down == state->key_down_target && state->key_up == 0) {
+		__pm_stay_awake(&state->combo_held_wake_source);
+		state->key_is_down = 1;
+		if (queue_delayed_work(state->wq, &state->key_down_work,
+								state->delay))
+			pr_debug("Key down work already queued!");
+	} else if (state->key_is_down) {
+		if (!cancel_delayed_work(&state->key_down_work)) {
+			__pm_stay_awake(&state->combo_up_wake_source);
+			queue_work(state->wq, &state->key_up_work);
+		}
+		__pm_relax(&state->combo_held_wake_source);
+		state->key_is_down = 0;
+	}
+done:
+	spin_unlock_irqrestore(&state->lock, flags);
+}
+
+static int keycombo_connect(struct input_handler *handler,
+		struct input_dev *dev,
+		const struct input_device_id *id)
+{
+	int i;
+	int ret;
+	struct input_handle *handle;
+	struct keycombo_state *state =
+		container_of(handler, struct keycombo_state, input_handler);
+	for (i = 0; i < KEY_MAX; i++) {
+		if (test_bit(i, state->keybit) && test_bit(i, dev->keybit))
+			break;
+	}
+	if (i == KEY_MAX)
+		return -ENODEV;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = KEYCOMBO_NAME;
+	handle->private = state;
+
+	ret = input_register_handle(handle);
+	if (ret)
+		goto err_input_register_handle;
+
+	ret = input_open_device(handle);
+	if (ret)
+		goto err_input_open_device;
+
+	return 0;
+
+err_input_open_device:
+	input_unregister_handle(handle);
+err_input_register_handle:
+	kfree(handle);
+	return ret;
+}
+
+static void keycombo_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+static const struct input_device_id keycombo_ids[] = {
+		{
+				.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+				.evbit = { BIT_MASK(EV_KEY) },
+		},
+		{ },
+};
+MODULE_DEVICE_TABLE(input, keycombo_ids);
+
+static int keycombo_probe(struct platform_device *pdev)
+{
+	int ret;
+	int key, *keyp;
+	struct keycombo_state *state;
+	struct keycombo_platform_data *pdata = pdev->dev.platform_data;
+
+	if (!pdata)
+		return -EINVAL;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	spin_lock_init(&state->lock);
+	keyp = pdata->keys_down;
+	while ((key = *keyp++)) {
+		if (key >= KEY_MAX)
+			continue;
+		state->key_down_target++;
+		__set_bit(key, state->keybit);
+	}
+	if (pdata->keys_up) {
+		keyp = pdata->keys_up;
+		while ((key = *keyp++)) {
+			if (key >= KEY_MAX)
+				continue;
+			__set_bit(key, state->keybit);
+			__set_bit(key, state->upbit);
+		}
+	}
+
+	state->wq = alloc_ordered_workqueue("keycombo", 0);
+	if (!state->wq)
+		return -ENOMEM;
+
+	state->priv = pdata->priv;
+
+	if (pdata->key_down_fn)
+		state->key_down_fn = pdata->key_down_fn;
+	INIT_DELAYED_WORK(&state->key_down_work, do_key_down);
+
+	if (pdata->key_up_fn)
+		state->key_up_fn = pdata->key_up_fn;
+	INIT_WORK(&state->key_up_work, do_key_up);
+
+	wakeup_source_init(&state->combo_held_wake_source, "key combo");
+	wakeup_source_init(&state->combo_up_wake_source, "key combo up");
+	state->delay = msecs_to_jiffies(pdata->key_down_delay);
+
+	state->input_handler.event = keycombo_event;
+	state->input_handler.connect = keycombo_connect;
+	state->input_handler.disconnect = keycombo_disconnect;
+	state->input_handler.name = KEYCOMBO_NAME;
+	state->input_handler.id_table = keycombo_ids;
+	ret = input_register_handler(&state->input_handler);
+	if (ret) {
+		kfree(state);
+		return ret;
+	}
+	platform_set_drvdata(pdev, state);
+	return 0;
+}
+
+int keycombo_remove(struct platform_device *pdev)
+{
+	struct keycombo_state *state = platform_get_drvdata(pdev);
+	input_unregister_handler(&state->input_handler);
+	destroy_workqueue(state->wq);
+	kfree(state);
+	return 0;
+}
+
+
+struct platform_driver keycombo_driver = {
+		.driver.name = KEYCOMBO_NAME,
+		.probe = keycombo_probe,
+		.remove = keycombo_remove,
+};
+
+static int __init keycombo_init(void)
+{
+	return platform_driver_register(&keycombo_driver);
+}
+
+static void __exit keycombo_exit(void)
+{
+	return platform_driver_unregister(&keycombo_driver);
+}
+
+module_init(keycombo_init);
+module_exit(keycombo_exit);
diff --git a/drivers/input/keyreset.c b/drivers/input/keyreset.c
new file mode 100644
index 0000000..7e5222a
--- /dev/null
+++ b/drivers/input/keyreset.c
@@ -0,0 +1,144 @@
+/* drivers/input/keyreset.c
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/input.h>
+#include <linux/keyreset.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+#include <linux/keycombo.h>
+
+struct keyreset_state {
+	int restart_requested;
+	int (*reset_fn)(void);
+	struct platform_device *pdev_child;
+	struct work_struct restart_work;
+};
+
+static void do_restart(struct work_struct *unused)
+{
+	orderly_reboot();
+}
+
+static void do_reset_fn(void *priv)
+{
+	struct keyreset_state *state = priv;
+	if (state->restart_requested)
+		panic("keyboard reset failed, %d", state->restart_requested);
+	if (state->reset_fn) {
+		state->restart_requested = state->reset_fn();
+	} else {
+		pr_info("keyboard reset\n");
+		schedule_work(&state->restart_work);
+		state->restart_requested = 1;
+	}
+}
+
+static int keyreset_probe(struct platform_device *pdev)
+{
+	int ret = -ENOMEM;
+	struct keycombo_platform_data *pdata_child;
+	struct keyreset_platform_data *pdata = pdev->dev.platform_data;
+	int up_size = 0, down_size = 0, size;
+	int key, *keyp;
+	struct keyreset_state *state;
+
+	if (!pdata)
+		return -EINVAL;
+	state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL);
+	if (!state)
+		return -ENOMEM;
+
+	state->pdev_child = platform_device_alloc(KEYCOMBO_NAME,
+							PLATFORM_DEVID_AUTO);
+	if (!state->pdev_child)
+		return -ENOMEM;
+	state->pdev_child->dev.parent = &pdev->dev;
+	INIT_WORK(&state->restart_work, do_restart);
+
+	keyp = pdata->keys_down;
+	while ((key = *keyp++)) {
+		if (key >= KEY_MAX)
+			continue;
+		down_size++;
+	}
+	if (pdata->keys_up) {
+		keyp = pdata->keys_up;
+		while ((key = *keyp++)) {
+			if (key >= KEY_MAX)
+				continue;
+			up_size++;
+		}
+	}
+	size = sizeof(struct keycombo_platform_data)
+			+ sizeof(int) * (down_size + 1);
+	pdata_child = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (!pdata_child)
+		goto error;
+	memcpy(pdata_child->keys_down, pdata->keys_down,
+						sizeof(int) * down_size);
+	if (up_size > 0) {
+		pdata_child->keys_up = devm_kzalloc(&pdev->dev, up_size + 1,
+								GFP_KERNEL);
+		if (!pdata_child->keys_up)
+			goto error;
+		memcpy(pdata_child->keys_up, pdata->keys_up,
+							sizeof(int) * up_size);
+		if (!pdata_child->keys_up)
+			goto error;
+	}
+	state->reset_fn = pdata->reset_fn;
+	pdata_child->key_down_fn = do_reset_fn;
+	pdata_child->priv = state;
+	pdata_child->key_down_delay = pdata->key_down_delay;
+	ret = platform_device_add_data(state->pdev_child, pdata_child, size);
+	if (ret)
+		goto error;
+	platform_set_drvdata(pdev, state);
+	return platform_device_add(state->pdev_child);
+error:
+	platform_device_put(state->pdev_child);
+	return ret;
+}
+
+int keyreset_remove(struct platform_device *pdev)
+{
+	struct keyreset_state *state = platform_get_drvdata(pdev);
+	platform_device_put(state->pdev_child);
+	return 0;
+}
+
+
+struct platform_driver keyreset_driver = {
+	.driver.name = KEYRESET_NAME,
+	.probe = keyreset_probe,
+	.remove = keyreset_remove,
+};
+
+static int __init keyreset_init(void)
+{
+	return platform_driver_register(&keyreset_driver);
+}
+
+static void __exit keyreset_exit(void)
+{
+	return platform_driver_unregister(&keyreset_driver);
+}
+
+module_init(keyreset_init);
+module_exit(keyreset_exit);
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 7ffb614..94360fe 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -367,6 +367,17 @@
 	  To compile this driver as a module, choose M here: the module will be
 	  called ati_remote2.
 
+config INPUT_KEYCHORD
+	tristate "Key chord input driver support"
+	help
+	  Say Y here if you want to enable the key chord driver
+	  accessible at /dev/keychord.  This driver can be used
+	  for receiving notifications when client specified key
+	  combinations are pressed.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called keychord.
+
 config INPUT_KEYSPAN_REMOTE
 	tristate "Keyspan DMR USB remote control"
 	depends on USB_ARCH_HAS_HCD
@@ -535,6 +546,11 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called sgi_btns.
 
+config INPUT_GPIO
+	tristate "GPIO driver support"
+	help
+	  Say Y here if you want to support gpio based keys, wheels etc...
+
 config HP_SDC_RTC
 	tristate "HP SDC Real Time Clock"
 	depends on (GSC || HP300) && SERIO
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 0b6d025..64bf231 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -36,10 +36,12 @@
 obj-$(CONFIG_INPUT_GPIO_BEEPER)		+= gpio-beeper.o
 obj-$(CONFIG_INPUT_GPIO_TILT_POLLED)	+= gpio_tilt_polled.o
 obj-$(CONFIG_INPUT_GPIO_DECODER)	+= gpio_decoder.o
+obj-$(CONFIG_INPUT_GPIO)		+= gpio_event.o gpio_matrix.o gpio_input.o gpio_output.o gpio_axis.o
 obj-$(CONFIG_INPUT_HISI_POWERKEY)	+= hisi_powerkey.o
 obj-$(CONFIG_HP_SDC_RTC)		+= hp_sdc_rtc.o
 obj-$(CONFIG_INPUT_IMS_PCU)		+= ims-pcu.o
 obj-$(CONFIG_INPUT_IXP4XX_BEEPER)	+= ixp4xx-beeper.o
+obj-$(CONFIG_INPUT_KEYCHORD)		+= keychord.o
 obj-$(CONFIG_INPUT_KEYSPAN_REMOTE)	+= keyspan_remote.o
 obj-$(CONFIG_INPUT_KXTJ9)		+= kxtj9.o
 obj-$(CONFIG_INPUT_M68K_BEEP)		+= m68kspkr.o
diff --git a/drivers/input/misc/gpio_axis.c b/drivers/input/misc/gpio_axis.c
new file mode 100644
index 0000000..0acf4a5
--- /dev/null
+++ b/drivers/input/misc/gpio_axis.c
@@ -0,0 +1,192 @@
+/* drivers/input/misc/gpio_axis.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+struct gpio_axis_state {
+	struct gpio_event_input_devs *input_devs;
+	struct gpio_event_axis_info *info;
+	uint32_t pos;
+};
+
+uint16_t gpio_axis_4bit_gray_map_table[] = {
+	[0x0] = 0x0, [0x1] = 0x1, /* 0000 0001 */
+	[0x3] = 0x2, [0x2] = 0x3, /* 0011 0010 */
+	[0x6] = 0x4, [0x7] = 0x5, /* 0110 0111 */
+	[0x5] = 0x6, [0x4] = 0x7, /* 0101 0100 */
+	[0xc] = 0x8, [0xd] = 0x9, /* 1100 1101 */
+	[0xf] = 0xa, [0xe] = 0xb, /* 1111 1110 */
+	[0xa] = 0xc, [0xb] = 0xd, /* 1010 1011 */
+	[0x9] = 0xe, [0x8] = 0xf, /* 1001 1000 */
+};
+uint16_t gpio_axis_4bit_gray_map(struct gpio_event_axis_info *info, uint16_t in)
+{
+	return gpio_axis_4bit_gray_map_table[in];
+}
+
+uint16_t gpio_axis_5bit_singletrack_map_table[] = {
+	[0x10] = 0x00, [0x14] = 0x01, [0x1c] = 0x02, /*     10000 10100 11100 */
+	[0x1e] = 0x03, [0x1a] = 0x04, [0x18] = 0x05, /*     11110 11010 11000 */
+	[0x08] = 0x06, [0x0a] = 0x07, [0x0e] = 0x08, /*    01000 01010 01110  */
+	[0x0f] = 0x09, [0x0d] = 0x0a, [0x0c] = 0x0b, /*    01111 01101 01100  */
+	[0x04] = 0x0c, [0x05] = 0x0d, [0x07] = 0x0e, /*   00100 00101 00111   */
+	[0x17] = 0x0f, [0x16] = 0x10, [0x06] = 0x11, /*   10111 10110 00110   */
+	[0x02] = 0x12, [0x12] = 0x13, [0x13] = 0x14, /*  00010 10010 10011    */
+	[0x1b] = 0x15, [0x0b] = 0x16, [0x03] = 0x17, /*  11011 01011 00011    */
+	[0x01] = 0x18, [0x09] = 0x19, [0x19] = 0x1a, /* 00001 01001 11001     */
+	[0x1d] = 0x1b, [0x15] = 0x1c, [0x11] = 0x1d, /* 11101 10101 10001     */
+};
+uint16_t gpio_axis_5bit_singletrack_map(
+	struct gpio_event_axis_info *info, uint16_t in)
+{
+	return gpio_axis_5bit_singletrack_map_table[in];
+}
+
+static void gpio_event_update_axis(struct gpio_axis_state *as, int report)
+{
+	struct gpio_event_axis_info *ai = as->info;
+	int i;
+	int change;
+	uint16_t state = 0;
+	uint16_t pos;
+	uint16_t old_pos = as->pos;
+	for (i = ai->count - 1; i >= 0; i--)
+		state = (state << 1) | gpio_get_value(ai->gpio[i]);
+	pos = ai->map(ai, state);
+	if (ai->flags & GPIOEAF_PRINT_RAW)
+		pr_info("axis %d-%d raw %x, pos %d -> %d\n",
+			ai->type, ai->code, state, old_pos, pos);
+	if (report && pos != old_pos) {
+		if (ai->type == EV_REL) {
+			change = (ai->decoded_size + pos - old_pos) %
+				  ai->decoded_size;
+			if (change > ai->decoded_size / 2)
+				change -= ai->decoded_size;
+			if (change == ai->decoded_size / 2) {
+				if (ai->flags & GPIOEAF_PRINT_EVENT)
+					pr_info("axis %d-%d unknown direction, "
+						"pos %d -> %d\n", ai->type,
+						ai->code, old_pos, pos);
+				change = 0; /* no closest direction */
+			}
+			if (ai->flags & GPIOEAF_PRINT_EVENT)
+				pr_info("axis %d-%d change %d\n",
+					ai->type, ai->code, change);
+			input_report_rel(as->input_devs->dev[ai->dev],
+						ai->code, change);
+		} else {
+			if (ai->flags & GPIOEAF_PRINT_EVENT)
+				pr_info("axis %d-%d now %d\n",
+					ai->type, ai->code, pos);
+			input_event(as->input_devs->dev[ai->dev],
+					ai->type, ai->code, pos);
+		}
+		input_sync(as->input_devs->dev[ai->dev]);
+	}
+	as->pos = pos;
+}
+
+static irqreturn_t gpio_axis_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_axis_state *as = dev_id;
+	gpio_event_update_axis(as, 1);
+	return IRQ_HANDLED;
+}
+
+int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+			 struct gpio_event_info *info, void **data, int func)
+{
+	int ret;
+	int i;
+	int irq;
+	struct gpio_event_axis_info *ai;
+	struct gpio_axis_state *as;
+
+	ai = container_of(info, struct gpio_event_axis_info, info);
+	if (func == GPIO_EVENT_FUNC_SUSPEND) {
+		for (i = 0; i < ai->count; i++)
+			disable_irq(gpio_to_irq(ai->gpio[i]));
+		return 0;
+	}
+	if (func == GPIO_EVENT_FUNC_RESUME) {
+		for (i = 0; i < ai->count; i++)
+			enable_irq(gpio_to_irq(ai->gpio[i]));
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		*data = as = kmalloc(sizeof(*as), GFP_KERNEL);
+		if (as == NULL) {
+			ret = -ENOMEM;
+			goto err_alloc_axis_state_failed;
+		}
+		as->input_devs = input_devs;
+		as->info = ai;
+		if (ai->dev >= input_devs->count) {
+			pr_err("gpio_event_axis: bad device index %d >= %d "
+				"for %d:%d\n", ai->dev, input_devs->count,
+				ai->type, ai->code);
+			ret = -EINVAL;
+			goto err_bad_device_index;
+		}
+
+		input_set_capability(input_devs->dev[ai->dev],
+				     ai->type, ai->code);
+		if (ai->type == EV_ABS) {
+			input_set_abs_params(input_devs->dev[ai->dev], ai->code,
+					     0, ai->decoded_size - 1, 0, 0);
+		}
+		for (i = 0; i < ai->count; i++) {
+			ret = gpio_request(ai->gpio[i], "gpio_event_axis");
+			if (ret < 0)
+				goto err_request_gpio_failed;
+			ret = gpio_direction_input(ai->gpio[i]);
+			if (ret < 0)
+				goto err_gpio_direction_input_failed;
+			ret = irq = gpio_to_irq(ai->gpio[i]);
+			if (ret < 0)
+				goto err_get_irq_num_failed;
+			ret = request_irq(irq, gpio_axis_irq_handler,
+					  IRQF_TRIGGER_RISING |
+					  IRQF_TRIGGER_FALLING,
+					  "gpio_event_axis", as);
+			if (ret < 0)
+				goto err_request_irq_failed;
+		}
+		gpio_event_update_axis(as, 0);
+		return 0;
+	}
+
+	ret = 0;
+	as = *data;
+	for (i = ai->count - 1; i >= 0; i--) {
+		free_irq(gpio_to_irq(ai->gpio[i]), as);
+err_request_irq_failed:
+err_get_irq_num_failed:
+err_gpio_direction_input_failed:
+		gpio_free(ai->gpio[i]);
+err_request_gpio_failed:
+		;
+	}
+err_bad_device_index:
+	kfree(as);
+	*data = NULL;
+err_alloc_axis_state_failed:
+	return ret;
+}
diff --git a/drivers/input/misc/gpio_event.c b/drivers/input/misc/gpio_event.c
new file mode 100644
index 0000000..90f07eb
--- /dev/null
+++ b/drivers/input/misc/gpio_event.c
@@ -0,0 +1,228 @@
+/* drivers/input/misc/gpio_event.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct gpio_event {
+	struct gpio_event_input_devs *input_devs;
+	const struct gpio_event_platform_data *info;
+	void *state[0];
+};
+
+static int gpio_input_event(
+	struct input_dev *dev, unsigned int type, unsigned int code, int value)
+{
+	int i;
+	int devnr;
+	int ret = 0;
+	int tmp_ret;
+	struct gpio_event_info **ii;
+	struct gpio_event *ip = input_get_drvdata(dev);
+
+	for (devnr = 0; devnr < ip->input_devs->count; devnr++)
+		if (ip->input_devs->dev[devnr] == dev)
+			break;
+	if (devnr == ip->input_devs->count) {
+		pr_err("gpio_input_event: unknown device %p\n", dev);
+		return -EIO;
+	}
+
+	for (i = 0, ii = ip->info->info; i < ip->info->info_count; i++, ii++) {
+		if ((*ii)->event) {
+			tmp_ret = (*ii)->event(ip->input_devs, *ii,
+						&ip->state[i],
+						devnr, type, code, value);
+			if (tmp_ret)
+				ret = tmp_ret;
+		}
+	}
+	return ret;
+}
+
+static int gpio_event_call_all_func(struct gpio_event *ip, int func)
+{
+	int i;
+	int ret;
+	struct gpio_event_info **ii;
+
+	if (func == GPIO_EVENT_FUNC_INIT || func == GPIO_EVENT_FUNC_RESUME) {
+		ii = ip->info->info;
+		for (i = 0; i < ip->info->info_count; i++, ii++) {
+			if ((*ii)->func == NULL) {
+				ret = -ENODEV;
+				pr_err("gpio_event_probe: Incomplete pdata, "
+					"no function\n");
+				goto err_no_func;
+			}
+			if (func == GPIO_EVENT_FUNC_RESUME && (*ii)->no_suspend)
+				continue;
+			ret = (*ii)->func(ip->input_devs, *ii, &ip->state[i],
+					  func);
+			if (ret) {
+				pr_err("gpio_event_probe: function failed\n");
+				goto err_func_failed;
+			}
+		}
+		return 0;
+	}
+
+	ret = 0;
+	i = ip->info->info_count;
+	ii = ip->info->info + i;
+	while (i > 0) {
+		i--;
+		ii--;
+		if ((func & ~1) == GPIO_EVENT_FUNC_SUSPEND && (*ii)->no_suspend)
+			continue;
+		(*ii)->func(ip->input_devs, *ii, &ip->state[i], func & ~1);
+err_func_failed:
+err_no_func:
+		;
+	}
+	return ret;
+}
+
+static void __maybe_unused gpio_event_suspend(struct gpio_event *ip)
+{
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_SUSPEND);
+	if (ip->info->power)
+		ip->info->power(ip->info, 0);
+}
+
+static void __maybe_unused gpio_event_resume(struct gpio_event *ip)
+{
+	if (ip->info->power)
+		ip->info->power(ip->info, 1);
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_RESUME);
+}
+
+static int gpio_event_probe(struct platform_device *pdev)
+{
+	int err;
+	struct gpio_event *ip;
+	struct gpio_event_platform_data *event_info;
+	int dev_count = 1;
+	int i;
+	int registered = 0;
+
+	event_info = pdev->dev.platform_data;
+	if (event_info == NULL) {
+		pr_err("gpio_event_probe: No pdata\n");
+		return -ENODEV;
+	}
+	if ((!event_info->name && !event_info->names[0]) ||
+	    !event_info->info || !event_info->info_count) {
+		pr_err("gpio_event_probe: Incomplete pdata\n");
+		return -ENODEV;
+	}
+	if (!event_info->name)
+		while (event_info->names[dev_count])
+			dev_count++;
+	ip = kzalloc(sizeof(*ip) +
+		     sizeof(ip->state[0]) * event_info->info_count +
+		     sizeof(*ip->input_devs) +
+		     sizeof(ip->input_devs->dev[0]) * dev_count, GFP_KERNEL);
+	if (ip == NULL) {
+		err = -ENOMEM;
+		pr_err("gpio_event_probe: Failed to allocate private data\n");
+		goto err_kp_alloc_failed;
+	}
+	ip->input_devs = (void*)&ip->state[event_info->info_count];
+	platform_set_drvdata(pdev, ip);
+
+	for (i = 0; i < dev_count; i++) {
+		struct input_dev *input_dev = input_allocate_device();
+		if (input_dev == NULL) {
+			err = -ENOMEM;
+			pr_err("gpio_event_probe: "
+				"Failed to allocate input device\n");
+			goto err_input_dev_alloc_failed;
+		}
+		input_set_drvdata(input_dev, ip);
+		input_dev->name = event_info->name ?
+					event_info->name : event_info->names[i];
+		input_dev->event = gpio_input_event;
+		ip->input_devs->dev[i] = input_dev;
+	}
+	ip->input_devs->count = dev_count;
+	ip->info = event_info;
+	if (event_info->power)
+		ip->info->power(ip->info, 1);
+
+	err = gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_INIT);
+	if (err)
+		goto err_call_all_func_failed;
+
+	for (i = 0; i < dev_count; i++) {
+		err = input_register_device(ip->input_devs->dev[i]);
+		if (err) {
+			pr_err("gpio_event_probe: Unable to register %s "
+				"input device\n", ip->input_devs->dev[i]->name);
+			goto err_input_register_device_failed;
+		}
+		registered++;
+	}
+
+	return 0;
+
+err_input_register_device_failed:
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+err_call_all_func_failed:
+	if (event_info->power)
+		ip->info->power(ip->info, 0);
+	for (i = 0; i < registered; i++)
+		input_unregister_device(ip->input_devs->dev[i]);
+	for (i = dev_count - 1; i >= registered; i--) {
+		input_free_device(ip->input_devs->dev[i]);
+err_input_dev_alloc_failed:
+		;
+	}
+	kfree(ip);
+err_kp_alloc_failed:
+	return err;
+}
+
+static int gpio_event_remove(struct platform_device *pdev)
+{
+	struct gpio_event *ip = platform_get_drvdata(pdev);
+	int i;
+
+	gpio_event_call_all_func(ip, GPIO_EVENT_FUNC_UNINIT);
+	if (ip->info->power)
+		ip->info->power(ip->info, 0);
+	for (i = 0; i < ip->input_devs->count; i++)
+		input_unregister_device(ip->input_devs->dev[i]);
+	kfree(ip);
+	return 0;
+}
+
+static struct platform_driver gpio_event_driver = {
+	.probe		= gpio_event_probe,
+	.remove		= gpio_event_remove,
+	.driver		= {
+		.name	= GPIO_EVENT_DEV_NAME,
+	},
+};
+
+module_platform_driver(gpio_event_driver);
+
+MODULE_DESCRIPTION("GPIO Event Driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/input/misc/gpio_input.c b/drivers/input/misc/gpio_input.c
new file mode 100644
index 0000000..eefd027
--- /dev/null
+++ b/drivers/input/misc/gpio_input.c
@@ -0,0 +1,390 @@
+/* drivers/input/misc/gpio_input.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/pm_wakeup.h>
+
+enum {
+	DEBOUNCE_UNSTABLE     = BIT(0),	/* Got irq, while debouncing */
+	DEBOUNCE_PRESSED      = BIT(1),
+	DEBOUNCE_NOTPRESSED   = BIT(2),
+	DEBOUNCE_WAIT_IRQ     = BIT(3),	/* Stable irq state */
+	DEBOUNCE_POLL         = BIT(4),	/* Stable polling state */
+
+	DEBOUNCE_UNKNOWN =
+		DEBOUNCE_PRESSED | DEBOUNCE_NOTPRESSED,
+};
+
+struct gpio_key_state {
+	struct gpio_input_state *ds;
+	uint8_t debounce;
+};
+
+struct gpio_input_state {
+	struct gpio_event_input_devs *input_devs;
+	const struct gpio_event_input_info *info;
+	struct hrtimer timer;
+	int use_irq;
+	int debounce_count;
+	spinlock_t irq_lock;
+	struct wakeup_source *ws;
+	struct gpio_key_state key_state[0];
+};
+
+static enum hrtimer_restart gpio_event_input_timer_func(struct hrtimer *timer)
+{
+	int i;
+	int pressed;
+	struct gpio_input_state *ds =
+		container_of(timer, struct gpio_input_state, timer);
+	unsigned gpio_flags = ds->info->flags;
+	unsigned npolarity;
+	int nkeys = ds->info->keymap_size;
+	const struct gpio_event_direct_entry *key_entry;
+	struct gpio_key_state *key_state;
+	unsigned long irqflags;
+	uint8_t debounce;
+	bool sync_needed;
+
+#if 0
+	key_entry = kp->keys_info->keymap;
+	key_state = kp->key_state;
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++)
+		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+			gpio_read_detect_status(key_entry->gpio));
+#endif
+	key_entry = ds->info->keymap;
+	key_state = ds->key_state;
+	sync_needed = false;
+	spin_lock_irqsave(&ds->irq_lock, irqflags);
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+		debounce = key_state->debounce;
+		if (debounce & DEBOUNCE_WAIT_IRQ)
+			continue;
+		if (key_state->debounce & DEBOUNCE_UNSTABLE) {
+			debounce = key_state->debounce = DEBOUNCE_UNKNOWN;
+			enable_irq(gpio_to_irq(key_entry->gpio));
+			if (gpio_flags & GPIOEDF_PRINT_KEY_UNSTABLE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) continue debounce\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+		}
+		npolarity = !(gpio_flags & GPIOEDF_ACTIVE_HIGH);
+		pressed = gpio_get_value(key_entry->gpio) ^ npolarity;
+		if (debounce & DEBOUNCE_POLL) {
+			if (pressed == !(debounce & DEBOUNCE_PRESSED)) {
+				ds->debounce_count++;
+				key_state->debounce = DEBOUNCE_UNKNOWN;
+				if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+					pr_info("gpio_keys_scan_keys: key %x-"
+						"%x, %d (%d) start debounce\n",
+						ds->info->type, key_entry->code,
+						i, key_entry->gpio);
+			}
+			continue;
+		}
+		if (pressed && (debounce & DEBOUNCE_NOTPRESSED)) {
+			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) debounce pressed 1\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+			key_state->debounce = DEBOUNCE_PRESSED;
+			continue;
+		}
+		if (!pressed && (debounce & DEBOUNCE_PRESSED)) {
+			if (gpio_flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_keys_scan_keys: key %x-%x, %d "
+					"(%d) debounce pressed 0\n",
+					ds->info->type, key_entry->code,
+					i, key_entry->gpio);
+			key_state->debounce = DEBOUNCE_NOTPRESSED;
+			continue;
+		}
+		/* key is stable */
+		ds->debounce_count--;
+		if (ds->use_irq)
+			key_state->debounce |= DEBOUNCE_WAIT_IRQ;
+		else
+			key_state->debounce |= DEBOUNCE_POLL;
+		if (gpio_flags & GPIOEDF_PRINT_KEYS)
+			pr_info("gpio_keys_scan_keys: key %x-%x, %d (%d) "
+				"changed to %d\n", ds->info->type,
+				key_entry->code, i, key_entry->gpio, pressed);
+		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+			    key_entry->code, pressed);
+		sync_needed = true;
+	}
+	if (sync_needed) {
+		for (i = 0; i < ds->input_devs->count; i++)
+			input_sync(ds->input_devs->dev[i]);
+	}
+
+#if 0
+	key_entry = kp->keys_info->keymap;
+	key_state = kp->key_state;
+	for (i = 0; i < nkeys; i++, key_entry++, key_state++) {
+		pr_info("gpio_read_detect_status %d %d\n", key_entry->gpio,
+			gpio_read_detect_status(key_entry->gpio));
+	}
+#endif
+
+	if (ds->debounce_count)
+		hrtimer_start(timer, ds->info->debounce_time, HRTIMER_MODE_REL);
+	else if (!ds->use_irq)
+		hrtimer_start(timer, ds->info->poll_time, HRTIMER_MODE_REL);
+	else
+		__pm_relax(ds->ws);
+
+	spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_event_input_irq_handler(int irq, void *dev_id)
+{
+	struct gpio_key_state *ks = dev_id;
+	struct gpio_input_state *ds = ks->ds;
+	int keymap_index = ks - ds->key_state;
+	const struct gpio_event_direct_entry *key_entry;
+	unsigned long irqflags;
+	int pressed;
+
+	if (!ds->use_irq)
+		return IRQ_HANDLED;
+
+	key_entry = &ds->info->keymap[keymap_index];
+
+	if (ds->info->debounce_time.tv64) {
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		if (ks->debounce & DEBOUNCE_WAIT_IRQ) {
+			ks->debounce = DEBOUNCE_UNKNOWN;
+			if (ds->debounce_count++ == 0) {
+				__pm_stay_awake(ds->ws);
+				hrtimer_start(
+					&ds->timer, ds->info->debounce_time,
+					HRTIMER_MODE_REL);
+			}
+			if (ds->info->flags & GPIOEDF_PRINT_KEY_DEBOUNCE)
+				pr_info("gpio_event_input_irq_handler: "
+					"key %x-%x, %d (%d) start debounce\n",
+					ds->info->type, key_entry->code,
+					keymap_index, key_entry->gpio);
+		} else {
+			disable_irq_nosync(irq);
+			ks->debounce = DEBOUNCE_UNSTABLE;
+		}
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+	} else {
+		pressed = gpio_get_value(key_entry->gpio) ^
+			!(ds->info->flags & GPIOEDF_ACTIVE_HIGH);
+		if (ds->info->flags & GPIOEDF_PRINT_KEYS)
+			pr_info("gpio_event_input_irq_handler: key %x-%x, %d "
+				"(%d) changed to %d\n",
+				ds->info->type, key_entry->code, keymap_index,
+				key_entry->gpio, pressed);
+		input_event(ds->input_devs->dev[key_entry->dev], ds->info->type,
+			    key_entry->code, pressed);
+		input_sync(ds->input_devs->dev[key_entry->dev]);
+	}
+	return IRQ_HANDLED;
+}
+
+static int gpio_event_input_request_irqs(struct gpio_input_state *ds)
+{
+	int i;
+	int err;
+	unsigned int irq;
+	unsigned long req_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
+
+	for (i = 0; i < ds->info->keymap_size; i++) {
+		err = irq = gpio_to_irq(ds->info->keymap[i].gpio);
+		if (err < 0)
+			goto err_gpio_get_irq_num_failed;
+		err = request_irq(irq, gpio_event_input_irq_handler,
+				  req_flags, "gpio_keys", &ds->key_state[i]);
+		if (err) {
+			pr_err("gpio_event_input_request_irqs: request_irq "
+				"failed for input %d, irq %d\n",
+				ds->info->keymap[i].gpio, irq);
+			goto err_request_irq_failed;
+		}
+		if (ds->info->info.no_suspend) {
+			err = enable_irq_wake(irq);
+			if (err) {
+				pr_err("gpio_event_input_request_irqs: "
+					"enable_irq_wake failed for input %d, "
+					"irq %d\n",
+					ds->info->keymap[i].gpio, irq);
+				goto err_enable_irq_wake_failed;
+			}
+		}
+	}
+	return 0;
+
+	for (i = ds->info->keymap_size - 1; i >= 0; i--) {
+		irq = gpio_to_irq(ds->info->keymap[i].gpio);
+		if (ds->info->info.no_suspend)
+			disable_irq_wake(irq);
+err_enable_irq_wake_failed:
+		free_irq(irq, &ds->key_state[i]);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+		;
+	}
+	return err;
+}
+
+int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func)
+{
+	int ret;
+	int i;
+	unsigned long irqflags;
+	struct gpio_event_input_info *di;
+	struct gpio_input_state *ds = *data;
+	char *wlname;
+
+	di = container_of(info, struct gpio_event_input_info, info);
+
+	if (func == GPIO_EVENT_FUNC_SUSPEND) {
+		if (ds->use_irq)
+			for (i = 0; i < di->keymap_size; i++)
+				disable_irq(gpio_to_irq(di->keymap[i].gpio));
+		hrtimer_cancel(&ds->timer);
+		return 0;
+	}
+	if (func == GPIO_EVENT_FUNC_RESUME) {
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		if (ds->use_irq)
+			for (i = 0; i < di->keymap_size; i++)
+				enable_irq(gpio_to_irq(di->keymap[i].gpio));
+		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		if (ktime_to_ns(di->poll_time) <= 0)
+			di->poll_time = ktime_set(0, 20 * NSEC_PER_MSEC);
+
+		*data = ds = kzalloc(sizeof(*ds) + sizeof(ds->key_state[0]) *
+					di->keymap_size, GFP_KERNEL);
+		if (ds == NULL) {
+			ret = -ENOMEM;
+			pr_err("gpio_event_input_func: "
+				"Failed to allocate private data\n");
+			goto err_ds_alloc_failed;
+		}
+		ds->debounce_count = di->keymap_size;
+		ds->input_devs = input_devs;
+		ds->info = di;
+		wlname = kasprintf(GFP_KERNEL, "gpio_input:%s%s",
+				   input_devs->dev[0]->name,
+				   (input_devs->count > 1) ? "..." : "");
+
+		ds->ws = wakeup_source_register(wlname);
+		kfree(wlname);
+		if (!ds->ws) {
+			ret = -ENOMEM;
+			pr_err("gpio_event_input_func: "
+				"Failed to allocate wakeup source\n");
+			goto err_ws_failed;
+		}
+
+		spin_lock_init(&ds->irq_lock);
+
+		for (i = 0; i < di->keymap_size; i++) {
+			int dev = di->keymap[i].dev;
+			if (dev >= input_devs->count) {
+				pr_err("gpio_event_input_func: bad device "
+					"index %d >= %d for key code %d\n",
+					dev, input_devs->count,
+					di->keymap[i].code);
+				ret = -EINVAL;
+				goto err_bad_keymap;
+			}
+			input_set_capability(input_devs->dev[dev], di->type,
+					     di->keymap[i].code);
+			ds->key_state[i].ds = ds;
+			ds->key_state[i].debounce = DEBOUNCE_UNKNOWN;
+		}
+
+		for (i = 0; i < di->keymap_size; i++) {
+			ret = gpio_request(di->keymap[i].gpio, "gpio_kp_in");
+			if (ret) {
+				pr_err("gpio_event_input_func: gpio_request "
+					"failed for %d\n", di->keymap[i].gpio);
+				goto err_gpio_request_failed;
+			}
+			ret = gpio_direction_input(di->keymap[i].gpio);
+			if (ret) {
+				pr_err("gpio_event_input_func: "
+					"gpio_direction_input failed for %d\n",
+					di->keymap[i].gpio);
+				goto err_gpio_configure_failed;
+			}
+		}
+
+		ret = gpio_event_input_request_irqs(ds);
+
+		spin_lock_irqsave(&ds->irq_lock, irqflags);
+		ds->use_irq = ret == 0;
+
+		pr_info("GPIO Input Driver: Start gpio inputs for %s%s in %s "
+			"mode\n", input_devs->dev[0]->name,
+			(input_devs->count > 1) ? "..." : "",
+			ret == 0 ? "interrupt" : "polling");
+
+		hrtimer_init(&ds->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		ds->timer.function = gpio_event_input_timer_func;
+		hrtimer_start(&ds->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+		spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+		return 0;
+	}
+
+	ret = 0;
+	spin_lock_irqsave(&ds->irq_lock, irqflags);
+	hrtimer_cancel(&ds->timer);
+	if (ds->use_irq) {
+		for (i = di->keymap_size - 1; i >= 0; i--) {
+			int irq = gpio_to_irq(di->keymap[i].gpio);
+			if (ds->info->info.no_suspend)
+				disable_irq_wake(irq);
+			free_irq(irq, &ds->key_state[i]);
+		}
+	}
+	spin_unlock_irqrestore(&ds->irq_lock, irqflags);
+
+	for (i = di->keymap_size - 1; i >= 0; i--) {
+err_gpio_configure_failed:
+		gpio_free(di->keymap[i].gpio);
+err_gpio_request_failed:
+		;
+	}
+err_bad_keymap:
+	wakeup_source_unregister(ds->ws);
+err_ws_failed:
+	kfree(ds);
+err_ds_alloc_failed:
+	return ret;
+}
diff --git a/drivers/input/misc/gpio_matrix.c b/drivers/input/misc/gpio_matrix.c
new file mode 100644
index 0000000..eaa9e89
--- /dev/null
+++ b/drivers/input/misc/gpio_matrix.c
@@ -0,0 +1,441 @@
+/* drivers/input/misc/gpio_matrix.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+struct gpio_kp {
+	struct gpio_event_input_devs *input_devs;
+	struct gpio_event_matrix_info *keypad_info;
+	struct hrtimer timer;
+	struct wake_lock wake_lock;
+	int current_output;
+	unsigned int use_irq:1;
+	unsigned int key_state_changed:1;
+	unsigned int last_key_state_changed:1;
+	unsigned int some_keys_pressed:2;
+	unsigned int disabled_irq:1;
+	unsigned long keys_pressed[0];
+};
+
+static void clear_phantom_key(struct gpio_kp *kp, int out, int in)
+{
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	int key_index = out * mi->ninputs + in;
+	unsigned short keyentry = mi->keymap[key_index];
+	unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+	unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+	if (!test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+		if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+			pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+				"cleared\n", keycode, out, in,
+				mi->output_gpios[out], mi->input_gpios[in]);
+		__clear_bit(key_index, kp->keys_pressed);
+	} else {
+		if (mi->flags & GPIOKPF_PRINT_PHANTOM_KEYS)
+			pr_info("gpiomatrix: phantom key %x, %d-%d (%d-%d) "
+				"not cleared\n", keycode, out, in,
+				mi->output_gpios[out], mi->input_gpios[in]);
+	}
+}
+
+static int restore_keys_for_input(struct gpio_kp *kp, int out, int in)
+{
+	int rv = 0;
+	int key_index;
+
+	key_index = out * kp->keypad_info->ninputs + in;
+	while (out < kp->keypad_info->noutputs) {
+		if (test_bit(key_index, kp->keys_pressed)) {
+			rv = 1;
+			clear_phantom_key(kp, out, in);
+		}
+		key_index += kp->keypad_info->ninputs;
+		out++;
+	}
+	return rv;
+}
+
+static void remove_phantom_keys(struct gpio_kp *kp)
+{
+	int out, in, inp;
+	int key_index;
+
+	if (kp->some_keys_pressed < 3)
+		return;
+
+	for (out = 0; out < kp->keypad_info->noutputs; out++) {
+		inp = -1;
+		key_index = out * kp->keypad_info->ninputs;
+		for (in = 0; in < kp->keypad_info->ninputs; in++, key_index++) {
+			if (test_bit(key_index, kp->keys_pressed)) {
+				if (inp == -1) {
+					inp = in;
+					continue;
+				}
+				if (inp >= 0) {
+					if (!restore_keys_for_input(kp, out + 1,
+									inp))
+						break;
+					clear_phantom_key(kp, out, inp);
+					inp = -2;
+				}
+				restore_keys_for_input(kp, out, in);
+			}
+		}
+	}
+}
+
+static void report_key(struct gpio_kp *kp, int key_index, int out, int in)
+{
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	int pressed = test_bit(key_index, kp->keys_pressed);
+	unsigned short keyentry = mi->keymap[key_index];
+	unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+	unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+
+	if (pressed != test_bit(keycode, kp->input_devs->dev[dev]->key)) {
+		if (keycode == KEY_RESERVED) {
+			if (mi->flags & GPIOKPF_PRINT_UNMAPPED_KEYS)
+				pr_info("gpiomatrix: unmapped key, %d-%d "
+					"(%d-%d) changed to %d\n",
+					out, in, mi->output_gpios[out],
+					mi->input_gpios[in], pressed);
+		} else {
+			if (mi->flags & GPIOKPF_PRINT_MAPPED_KEYS)
+				pr_info("gpiomatrix: key %x, %d-%d (%d-%d) "
+					"changed to %d\n", keycode,
+					out, in, mi->output_gpios[out],
+					mi->input_gpios[in], pressed);
+			input_report_key(kp->input_devs->dev[dev], keycode, pressed);
+		}
+	}
+}
+
+static void report_sync(struct gpio_kp *kp)
+{
+	int i;
+
+	for (i = 0; i < kp->input_devs->count; i++)
+		input_sync(kp->input_devs->dev[i]);
+}
+
+static enum hrtimer_restart gpio_keypad_timer_func(struct hrtimer *timer)
+{
+	int out, in;
+	int key_index;
+	int gpio;
+	struct gpio_kp *kp = container_of(timer, struct gpio_kp, timer);
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	unsigned gpio_keypad_flags = mi->flags;
+	unsigned polarity = !!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH);
+
+	out = kp->current_output;
+	if (out == mi->noutputs) {
+		out = 0;
+		kp->last_key_state_changed = kp->key_state_changed;
+		kp->key_state_changed = 0;
+		kp->some_keys_pressed = 0;
+	} else {
+		key_index = out * mi->ninputs;
+		for (in = 0; in < mi->ninputs; in++, key_index++) {
+			gpio = mi->input_gpios[in];
+			if (gpio_get_value(gpio) ^ !polarity) {
+				if (kp->some_keys_pressed < 3)
+					kp->some_keys_pressed++;
+				kp->key_state_changed |= !__test_and_set_bit(
+						key_index, kp->keys_pressed);
+			} else
+				kp->key_state_changed |= __test_and_clear_bit(
+						key_index, kp->keys_pressed);
+		}
+		gpio = mi->output_gpios[out];
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(gpio, !polarity);
+		else
+			gpio_direction_input(gpio);
+		out++;
+	}
+	kp->current_output = out;
+	if (out < mi->noutputs) {
+		gpio = mi->output_gpios[out];
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(gpio, polarity);
+		else
+			gpio_direction_output(gpio, polarity);
+		hrtimer_start(timer, mi->settle_time, HRTIMER_MODE_REL);
+		return HRTIMER_NORESTART;
+	}
+	if (gpio_keypad_flags & GPIOKPF_DEBOUNCE) {
+		if (kp->key_state_changed) {
+			hrtimer_start(&kp->timer, mi->debounce_delay,
+				      HRTIMER_MODE_REL);
+			return HRTIMER_NORESTART;
+		}
+		kp->key_state_changed = kp->last_key_state_changed;
+	}
+	if (kp->key_state_changed) {
+		if (gpio_keypad_flags & GPIOKPF_REMOVE_SOME_PHANTOM_KEYS)
+			remove_phantom_keys(kp);
+		key_index = 0;
+		for (out = 0; out < mi->noutputs; out++)
+			for (in = 0; in < mi->ninputs; in++, key_index++)
+				report_key(kp, key_index, out, in);
+		report_sync(kp);
+	}
+	if (!kp->use_irq || kp->some_keys_pressed) {
+		hrtimer_start(timer, mi->poll_time, HRTIMER_MODE_REL);
+		return HRTIMER_NORESTART;
+	}
+
+	/* No keys are pressed, reenable interrupt */
+	for (out = 0; out < mi->noutputs; out++) {
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(mi->output_gpios[out], polarity);
+		else
+			gpio_direction_output(mi->output_gpios[out], polarity);
+	}
+	for (in = 0; in < mi->ninputs; in++)
+		enable_irq(gpio_to_irq(mi->input_gpios[in]));
+	wake_unlock(&kp->wake_lock);
+	return HRTIMER_NORESTART;
+}
+
+static irqreturn_t gpio_keypad_irq_handler(int irq_in, void *dev_id)
+{
+	int i;
+	struct gpio_kp *kp = dev_id;
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+	unsigned gpio_keypad_flags = mi->flags;
+
+	if (!kp->use_irq) {
+		/* ignore interrupt while registering the handler */
+		kp->disabled_irq = 1;
+		disable_irq_nosync(irq_in);
+		return IRQ_HANDLED;
+	}
+
+	for (i = 0; i < mi->ninputs; i++)
+		disable_irq_nosync(gpio_to_irq(mi->input_gpios[i]));
+	for (i = 0; i < mi->noutputs; i++) {
+		if (gpio_keypad_flags & GPIOKPF_DRIVE_INACTIVE)
+			gpio_set_value(mi->output_gpios[i],
+				!(gpio_keypad_flags & GPIOKPF_ACTIVE_HIGH));
+		else
+			gpio_direction_input(mi->output_gpios[i]);
+	}
+	wake_lock(&kp->wake_lock);
+	hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+	return IRQ_HANDLED;
+}
+
+static int gpio_keypad_request_irqs(struct gpio_kp *kp)
+{
+	int i;
+	int err;
+	unsigned int irq;
+	unsigned long request_flags;
+	struct gpio_event_matrix_info *mi = kp->keypad_info;
+
+	switch (mi->flags & (GPIOKPF_ACTIVE_HIGH|GPIOKPF_LEVEL_TRIGGERED_IRQ)) {
+	default:
+		request_flags = IRQF_TRIGGER_FALLING;
+		break;
+	case GPIOKPF_ACTIVE_HIGH:
+		request_flags = IRQF_TRIGGER_RISING;
+		break;
+	case GPIOKPF_LEVEL_TRIGGERED_IRQ:
+		request_flags = IRQF_TRIGGER_LOW;
+		break;
+	case GPIOKPF_LEVEL_TRIGGERED_IRQ | GPIOKPF_ACTIVE_HIGH:
+		request_flags = IRQF_TRIGGER_HIGH;
+		break;
+	}
+
+	for (i = 0; i < mi->ninputs; i++) {
+		err = irq = gpio_to_irq(mi->input_gpios[i]);
+		if (err < 0)
+			goto err_gpio_get_irq_num_failed;
+		err = request_irq(irq, gpio_keypad_irq_handler, request_flags,
+				  "gpio_kp", kp);
+		if (err) {
+			pr_err("gpiomatrix: request_irq failed for input %d, "
+				"irq %d\n", mi->input_gpios[i], irq);
+			goto err_request_irq_failed;
+		}
+		err = enable_irq_wake(irq);
+		if (err) {
+			pr_err("gpiomatrix: set_irq_wake failed for input %d, "
+				"irq %d\n", mi->input_gpios[i], irq);
+		}
+		disable_irq(irq);
+		if (kp->disabled_irq) {
+			kp->disabled_irq = 0;
+			enable_irq(irq);
+		}
+	}
+	return 0;
+
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+		free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+err_request_irq_failed:
+err_gpio_get_irq_num_failed:
+		;
+	}
+	return err;
+}
+
+int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+	struct gpio_event_info *info, void **data, int func)
+{
+	int i;
+	int err;
+	int key_count;
+	struct gpio_kp *kp;
+	struct gpio_event_matrix_info *mi;
+
+	mi = container_of(info, struct gpio_event_matrix_info, info);
+	if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME) {
+		/* TODO: disable scanning */
+		return 0;
+	}
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		if (mi->keymap == NULL ||
+		   mi->input_gpios == NULL ||
+		   mi->output_gpios == NULL) {
+			err = -ENODEV;
+			pr_err("gpiomatrix: Incomplete pdata\n");
+			goto err_invalid_platform_data;
+		}
+		key_count = mi->ninputs * mi->noutputs;
+
+		*data = kp = kzalloc(sizeof(*kp) + sizeof(kp->keys_pressed[0]) *
+				     BITS_TO_LONGS(key_count), GFP_KERNEL);
+		if (kp == NULL) {
+			err = -ENOMEM;
+			pr_err("gpiomatrix: Failed to allocate private data\n");
+			goto err_kp_alloc_failed;
+		}
+		kp->input_devs = input_devs;
+		kp->keypad_info = mi;
+		for (i = 0; i < key_count; i++) {
+			unsigned short keyentry = mi->keymap[i];
+			unsigned short keycode = keyentry & MATRIX_KEY_MASK;
+			unsigned short dev = keyentry >> MATRIX_CODE_BITS;
+			if (dev >= input_devs->count) {
+				pr_err("gpiomatrix: bad device index %d >= "
+					"%d for key code %d\n",
+					dev, input_devs->count, keycode);
+				err = -EINVAL;
+				goto err_bad_keymap;
+			}
+			if (keycode && keycode <= KEY_MAX)
+				input_set_capability(input_devs->dev[dev],
+							EV_KEY, keycode);
+		}
+
+		for (i = 0; i < mi->noutputs; i++) {
+			err = gpio_request(mi->output_gpios[i], "gpio_kp_out");
+			if (err) {
+				pr_err("gpiomatrix: gpio_request failed for "
+					"output %d\n", mi->output_gpios[i]);
+				goto err_request_output_gpio_failed;
+			}
+			if (gpio_cansleep(mi->output_gpios[i])) {
+				pr_err("gpiomatrix: unsupported output gpio %d,"
+					" can sleep\n", mi->output_gpios[i]);
+				err = -EINVAL;
+				goto err_output_gpio_configure_failed;
+			}
+			if (mi->flags & GPIOKPF_DRIVE_INACTIVE)
+				err = gpio_direction_output(mi->output_gpios[i],
+					!(mi->flags & GPIOKPF_ACTIVE_HIGH));
+			else
+				err = gpio_direction_input(mi->output_gpios[i]);
+			if (err) {
+				pr_err("gpiomatrix: gpio_configure failed for "
+					"output %d\n", mi->output_gpios[i]);
+				goto err_output_gpio_configure_failed;
+			}
+		}
+		for (i = 0; i < mi->ninputs; i++) {
+			err = gpio_request(mi->input_gpios[i], "gpio_kp_in");
+			if (err) {
+				pr_err("gpiomatrix: gpio_request failed for "
+					"input %d\n", mi->input_gpios[i]);
+				goto err_request_input_gpio_failed;
+			}
+			err = gpio_direction_input(mi->input_gpios[i]);
+			if (err) {
+				pr_err("gpiomatrix: gpio_direction_input failed"
+					" for input %d\n", mi->input_gpios[i]);
+				goto err_gpio_direction_input_failed;
+			}
+		}
+		kp->current_output = mi->noutputs;
+		kp->key_state_changed = 1;
+
+		hrtimer_init(&kp->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+		kp->timer.function = gpio_keypad_timer_func;
+		wake_lock_init(&kp->wake_lock, WAKE_LOCK_SUSPEND, "gpio_kp");
+		err = gpio_keypad_request_irqs(kp);
+		kp->use_irq = err == 0;
+
+		pr_info("GPIO Matrix Keypad Driver: Start keypad matrix for "
+			"%s%s in %s mode\n", input_devs->dev[0]->name,
+			(input_devs->count > 1) ? "..." : "",
+			kp->use_irq ? "interrupt" : "polling");
+
+		if (kp->use_irq)
+			wake_lock(&kp->wake_lock);
+		hrtimer_start(&kp->timer, ktime_set(0, 0), HRTIMER_MODE_REL);
+
+		return 0;
+	}
+
+	err = 0;
+	kp = *data;
+
+	if (kp->use_irq)
+		for (i = mi->noutputs - 1; i >= 0; i--)
+			free_irq(gpio_to_irq(mi->input_gpios[i]), kp);
+
+	hrtimer_cancel(&kp->timer);
+	wake_lock_destroy(&kp->wake_lock);
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+err_gpio_direction_input_failed:
+		gpio_free(mi->input_gpios[i]);
+err_request_input_gpio_failed:
+		;
+	}
+	for (i = mi->noutputs - 1; i >= 0; i--) {
+err_output_gpio_configure_failed:
+		gpio_free(mi->output_gpios[i]);
+err_request_output_gpio_failed:
+		;
+	}
+err_bad_keymap:
+	kfree(kp);
+err_kp_alloc_failed:
+err_invalid_platform_data:
+	return err;
+}
diff --git a/drivers/input/misc/gpio_output.c b/drivers/input/misc/gpio_output.c
new file mode 100644
index 0000000..2aac2fa
--- /dev/null
+++ b/drivers/input/misc/gpio_output.c
@@ -0,0 +1,97 @@
+/* drivers/input/misc/gpio_output.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/gpio_event.h>
+
+int gpio_event_output_event(
+	struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+	void **data, unsigned int dev, unsigned int type,
+	unsigned int code, int value)
+{
+	int i;
+	struct gpio_event_output_info *oi;
+	oi = container_of(info, struct gpio_event_output_info, info);
+	if (type != oi->type)
+		return 0;
+	if (!(oi->flags & GPIOEDF_ACTIVE_HIGH))
+		value = !value;
+	for (i = 0; i < oi->keymap_size; i++)
+		if (dev == oi->keymap[i].dev && code == oi->keymap[i].code)
+			gpio_set_value(oi->keymap[i].gpio, value);
+	return 0;
+}
+
+int gpio_event_output_func(
+	struct gpio_event_input_devs *input_devs, struct gpio_event_info *info,
+	void **data, int func)
+{
+	int ret;
+	int i;
+	struct gpio_event_output_info *oi;
+	oi = container_of(info, struct gpio_event_output_info, info);
+
+	if (func == GPIO_EVENT_FUNC_SUSPEND || func == GPIO_EVENT_FUNC_RESUME)
+		return 0;
+
+	if (func == GPIO_EVENT_FUNC_INIT) {
+		int output_level = !(oi->flags & GPIOEDF_ACTIVE_HIGH);
+
+		for (i = 0; i < oi->keymap_size; i++) {
+			int dev = oi->keymap[i].dev;
+			if (dev >= input_devs->count) {
+				pr_err("gpio_event_output_func: bad device "
+					"index %d >= %d for key code %d\n",
+					dev, input_devs->count,
+					oi->keymap[i].code);
+				ret = -EINVAL;
+				goto err_bad_keymap;
+			}
+			input_set_capability(input_devs->dev[dev], oi->type,
+					     oi->keymap[i].code);
+		}
+
+		for (i = 0; i < oi->keymap_size; i++) {
+			ret = gpio_request(oi->keymap[i].gpio,
+					   "gpio_event_output");
+			if (ret) {
+				pr_err("gpio_event_output_func: gpio_request "
+					"failed for %d\n", oi->keymap[i].gpio);
+				goto err_gpio_request_failed;
+			}
+			ret = gpio_direction_output(oi->keymap[i].gpio,
+						    output_level);
+			if (ret) {
+				pr_err("gpio_event_output_func: "
+					"gpio_direction_output failed for %d\n",
+					oi->keymap[i].gpio);
+				goto err_gpio_direction_output_failed;
+			}
+		}
+		return 0;
+	}
+
+	ret = 0;
+	for (i = oi->keymap_size - 1; i >= 0; i--) {
+err_gpio_direction_output_failed:
+		gpio_free(oi->keymap[i].gpio);
+err_gpio_request_failed:
+		;
+	}
+err_bad_keymap:
+	return ret;
+}
+
diff --git a/drivers/input/misc/keychord.c b/drivers/input/misc/keychord.c
new file mode 100644
index 0000000..a5ea27a
--- /dev/null
+++ b/drivers/input/misc/keychord.c
@@ -0,0 +1,391 @@
+/*
+ *  drivers/input/misc/keychord.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/keychord.h>
+#include <linux/sched.h>
+
+#define KEYCHORD_NAME		"keychord"
+#define BUFFER_SIZE			16
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Key chord input driver");
+MODULE_SUPPORTED_DEVICE("keychord");
+MODULE_LICENSE("GPL");
+
+#define NEXT_KEYCHORD(kc) ((struct input_keychord *) \
+		((char *)kc + sizeof(struct input_keychord) + \
+		kc->count * sizeof(kc->keycodes[0])))
+
+struct keychord_device {
+	struct input_handler	input_handler;
+	int			registered;
+
+	/* list of keychords to monitor */
+	struct input_keychord	*keychords;
+	int			keychord_count;
+
+	/* bitmask of keys contained in our keychords */
+	unsigned long keybit[BITS_TO_LONGS(KEY_CNT)];
+	/* current state of the keys */
+	unsigned long keystate[BITS_TO_LONGS(KEY_CNT)];
+	/* number of keys that are currently pressed */
+	int key_down;
+
+	/* second input_device_id is needed for null termination */
+	struct input_device_id  device_ids[2];
+
+	spinlock_t		lock;
+	wait_queue_head_t	waitq;
+	unsigned char		head;
+	unsigned char		tail;
+	__u16			buff[BUFFER_SIZE];
+};
+
+static int check_keychord(struct keychord_device *kdev,
+		struct input_keychord *keychord)
+{
+	int i;
+
+	if (keychord->count != kdev->key_down)
+		return 0;
+
+	for (i = 0; i < keychord->count; i++) {
+		if (!test_bit(keychord->keycodes[i], kdev->keystate))
+			return 0;
+	}
+
+	/* we have a match */
+	return 1;
+}
+
+static void keychord_event(struct input_handle *handle, unsigned int type,
+			   unsigned int code, int value)
+{
+	struct keychord_device *kdev = handle->private;
+	struct input_keychord *keychord;
+	unsigned long flags;
+	int i, got_chord = 0;
+
+	if (type != EV_KEY || code >= KEY_MAX)
+		return;
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	/* do nothing if key state did not change */
+	if (!test_bit(code, kdev->keystate) == !value)
+		goto done;
+	__change_bit(code, kdev->keystate);
+	if (value)
+		kdev->key_down++;
+	else
+		kdev->key_down--;
+
+	/* don't notify on key up */
+	if (!value)
+		goto done;
+	/* ignore this event if it is not one of the keys we are monitoring */
+	if (!test_bit(code, kdev->keybit))
+		goto done;
+
+	keychord = kdev->keychords;
+	if (!keychord)
+		goto done;
+
+	/* check to see if the keyboard state matches any keychords */
+	for (i = 0; i < kdev->keychord_count; i++) {
+		if (check_keychord(kdev, keychord)) {
+			kdev->buff[kdev->head] = keychord->id;
+			kdev->head = (kdev->head + 1) % BUFFER_SIZE;
+			got_chord = 1;
+			break;
+		}
+		/* skip to next keychord */
+		keychord = NEXT_KEYCHORD(keychord);
+	}
+
+done:
+	spin_unlock_irqrestore(&kdev->lock, flags);
+
+	if (got_chord) {
+		pr_info("keychord: got keychord id %d. Any tasks: %d\n",
+			keychord->id,
+			!list_empty_careful(&kdev->waitq.task_list));
+		wake_up_interruptible(&kdev->waitq);
+	}
+}
+
+static int keychord_connect(struct input_handler *handler,
+					  struct input_dev *dev,
+					  const struct input_device_id *id)
+{
+	int i, ret;
+	struct input_handle *handle;
+	struct keychord_device *kdev =
+		container_of(handler, struct keychord_device, input_handler);
+
+	/*
+	 * ignore this input device if it does not contain any keycodes
+	 * that we are monitoring
+	 */
+	for (i = 0; i < KEY_MAX; i++) {
+		if (test_bit(i, kdev->keybit) && test_bit(i, dev->keybit))
+			break;
+	}
+	if (i == KEY_MAX)
+		return -ENODEV;
+
+	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
+	if (!handle)
+		return -ENOMEM;
+
+	handle->dev = dev;
+	handle->handler = handler;
+	handle->name = KEYCHORD_NAME;
+	handle->private = kdev;
+
+	ret = input_register_handle(handle);
+	if (ret)
+		goto err_input_register_handle;
+
+	ret = input_open_device(handle);
+	if (ret)
+		goto err_input_open_device;
+
+	pr_info("keychord: using input dev %s for fevent\n", dev->name);
+
+	return 0;
+
+err_input_open_device:
+	input_unregister_handle(handle);
+err_input_register_handle:
+	kfree(handle);
+	return ret;
+}
+
+static void keychord_disconnect(struct input_handle *handle)
+{
+	input_close_device(handle);
+	input_unregister_handle(handle);
+	kfree(handle);
+}
+
+/*
+ * keychord_read is used to read keychord events from the driver
+ */
+static ssize_t keychord_read(struct file *file, char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	struct keychord_device *kdev = file->private_data;
+	__u16   id;
+	int retval;
+	unsigned long flags;
+
+	if (count < sizeof(id))
+		return -EINVAL;
+	count = sizeof(id);
+
+	if (kdev->head == kdev->tail && (file->f_flags & O_NONBLOCK))
+		return -EAGAIN;
+
+	retval = wait_event_interruptible(kdev->waitq,
+			kdev->head != kdev->tail);
+	if (retval)
+		return retval;
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	/* pop a keychord ID off the queue */
+	id = kdev->buff[kdev->tail];
+	kdev->tail = (kdev->tail + 1) % BUFFER_SIZE;
+	spin_unlock_irqrestore(&kdev->lock, flags);
+
+	if (copy_to_user(buffer, &id, count))
+		return -EFAULT;
+
+	return count;
+}
+
+/*
+ * keychord_write is used to configure the driver
+ */
+static ssize_t keychord_write(struct file *file, const char __user *buffer,
+		size_t count, loff_t *ppos)
+{
+	struct keychord_device *kdev = file->private_data;
+	struct input_keychord *keychords = 0;
+	struct input_keychord *keychord, *next, *end;
+	int ret, i, key;
+	unsigned long flags;
+
+	if (count < sizeof(struct input_keychord))
+		return -EINVAL;
+	keychords = kzalloc(count, GFP_KERNEL);
+	if (!keychords)
+		return -ENOMEM;
+
+	/* read list of keychords from userspace */
+	if (copy_from_user(keychords, buffer, count)) {
+		kfree(keychords);
+		return -EFAULT;
+	}
+
+	/* unregister handler before changing configuration */
+	if (kdev->registered) {
+		input_unregister_handler(&kdev->input_handler);
+		kdev->registered = 0;
+	}
+
+	spin_lock_irqsave(&kdev->lock, flags);
+	/* clear any existing configuration */
+	kfree(kdev->keychords);
+	kdev->keychords = 0;
+	kdev->keychord_count = 0;
+	kdev->key_down = 0;
+	memset(kdev->keybit, 0, sizeof(kdev->keybit));
+	memset(kdev->keystate, 0, sizeof(kdev->keystate));
+	kdev->head = kdev->tail = 0;
+
+	keychord = keychords;
+	end = (struct input_keychord *)((char *)keychord + count);
+
+	while (keychord < end) {
+		next = NEXT_KEYCHORD(keychord);
+		if (keychord->count <= 0 || next > end) {
+			pr_err("keychord: invalid keycode count %d\n",
+				keychord->count);
+			goto err_unlock_return;
+		}
+		if (keychord->version != KEYCHORD_VERSION) {
+			pr_err("keychord: unsupported version %d\n",
+				keychord->version);
+			goto err_unlock_return;
+		}
+
+		/* keep track of the keys we are monitoring in keybit */
+		for (i = 0; i < keychord->count; i++) {
+			key = keychord->keycodes[i];
+			if (key < 0 || key >= KEY_CNT) {
+				pr_err("keychord: keycode %d out of range\n",
+					key);
+				goto err_unlock_return;
+			}
+			__set_bit(key, kdev->keybit);
+		}
+
+		kdev->keychord_count++;
+		keychord = next;
+	}
+
+	kdev->keychords = keychords;
+	spin_unlock_irqrestore(&kdev->lock, flags);
+
+	ret = input_register_handler(&kdev->input_handler);
+	if (ret) {
+		kfree(keychords);
+		kdev->keychords = 0;
+		return ret;
+	}
+	kdev->registered = 1;
+
+	return count;
+
+err_unlock_return:
+	spin_unlock_irqrestore(&kdev->lock, flags);
+	kfree(keychords);
+	return -EINVAL;
+}
+
+static unsigned int keychord_poll(struct file *file, poll_table *wait)
+{
+	struct keychord_device *kdev = file->private_data;
+
+	poll_wait(file, &kdev->waitq, wait);
+
+	if (kdev->head != kdev->tail)
+		return POLLIN | POLLRDNORM;
+
+	return 0;
+}
+
+static int keychord_open(struct inode *inode, struct file *file)
+{
+	struct keychord_device *kdev;
+
+	kdev = kzalloc(sizeof(struct keychord_device), GFP_KERNEL);
+	if (!kdev)
+		return -ENOMEM;
+
+	spin_lock_init(&kdev->lock);
+	init_waitqueue_head(&kdev->waitq);
+
+	kdev->input_handler.event = keychord_event;
+	kdev->input_handler.connect = keychord_connect;
+	kdev->input_handler.disconnect = keychord_disconnect;
+	kdev->input_handler.name = KEYCHORD_NAME;
+	kdev->input_handler.id_table = kdev->device_ids;
+
+	kdev->device_ids[0].flags = INPUT_DEVICE_ID_MATCH_EVBIT;
+	__set_bit(EV_KEY, kdev->device_ids[0].evbit);
+
+	file->private_data = kdev;
+
+	return 0;
+}
+
+static int keychord_release(struct inode *inode, struct file *file)
+{
+	struct keychord_device *kdev = file->private_data;
+
+	if (kdev->registered)
+		input_unregister_handler(&kdev->input_handler);
+	kfree(kdev);
+
+	return 0;
+}
+
+static const struct file_operations keychord_fops = {
+	.owner		= THIS_MODULE,
+	.open		= keychord_open,
+	.release	= keychord_release,
+	.read		= keychord_read,
+	.write		= keychord_write,
+	.poll		= keychord_poll,
+};
+
+static struct miscdevice keychord_misc = {
+	.fops		= &keychord_fops,
+	.name		= KEYCHORD_NAME,
+	.minor		= MISC_DYNAMIC_MINOR,
+};
+
+static int __init keychord_init(void)
+{
+	return misc_register(&keychord_misc);
+}
+
+static void __exit keychord_exit(void)
+{
+	misc_deregister(&keychord_misc);
+}
+
+module_init(keychord_init);
+module_exit(keychord_exit);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 8ee54d7..fbab1f1 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -58,6 +58,42 @@
 
 	  If unsure, say N here.
 
+config IOMMU_IO_PGTABLE_FAST
+	bool "Fast ARMv7/v8 Long Descriptor Format"
+	select IOMMU_IO_PGTABLE
+	help
+          Enable support for a subset of the ARM long descriptor pagetable
+	  format.  This allocator achieves fast performance by
+	  pre-allocating and pre-populating page table memory up front.
+	  only supports a 32 bit virtual address space.
+
+          This implementation is mainly optimized for use cases where the
+          buffers are small (<= 64K) since it only supports 4K page sizes.
+
+config IOMMU_IO_PGTABLE_FAST_SELFTEST
+	bool "Fast IO pgtable selftests"
+	depends on IOMMU_IO_PGTABLE_FAST
+	help
+	  Enable self-tests for "fast" page table allocator. This performs
+	  a series of page-table consistency checks during boot.
+
+	  If unsure, say N here.
+
+config IOMMU_IO_PGTABLE_FAST_PROVE_TLB
+	bool "Prove correctness of TLB maintenance in the Fast DMA mapper"
+	depends on IOMMU_IO_PGTABLE_FAST
+	help
+          Enables some debug features that help prove correctness of TLB
+          maintenance routines in the Fast DMA mapper.  This option will
+          slow things down considerably, so should only be used in a debug
+          configuration.  This relies on the ability to set bits in an
+          invalid page table entry, which is disallowed on some hardware
+          due to errata.  If you're running on such a platform then this
+          option can only be used with unit tests.  It will break real use
+          cases.
+
+	  If unsure, say N here.
+
 endmenu
 
 config IOMMU_IOVA
@@ -300,6 +336,7 @@
 	select IOMMU_API
 	select IOMMU_IO_PGTABLE_LPAE
 	select ARM_DMA_USE_IOMMU if ARM
+	select ARM64_DMA_USE_IOMMU if ARM64
 	help
 	  Support for implementations of the ARM System MMU architecture
 	  versions 1 and 2.
@@ -320,6 +357,18 @@
 	  Say Y here if your system includes an IOMMU device implementing
 	  the ARM SMMUv3 architecture.
 
+config QCOM_LAZY_MAPPING
+	bool "Reference counted iommu-mapping support"
+	depends on ION_MSM
+	depends on IOMMU_API
+	help
+	  ION buffers may be shared between several software clients.
+	  Reference counting the mapping may simplify coordination between
+	  these clients, and decrease latency by preventing multiple
+	  map/unmaps of the same region.
+
+	  If unsure, say N here.
+
 config S390_IOMMU
 	def_bool y if S390 && PCI
 	depends on S390 && PCI
@@ -362,4 +411,35 @@
 
 	  if unsure, say N here.
 
+menuconfig IOMMU_DEBUG
+	bool "IOMMU Profiling and Debugging"
+	help
+	  Makes available some additional IOMMU profiling and debugging
+	  options.
+
+if IOMMU_DEBUG
+
+config IOMMU_DEBUG_TRACKING
+	bool "Track key IOMMU events"
+	depends on BROKEN
+	select IOMMU_API
+	help
+	  Enables additional debug tracking in the IOMMU framework code.
+	  Tracking information and tests can be accessed through various
+	  debugfs files.
+
+	  Say Y here if you need to debug IOMMU issues and are okay with
+	  the performance penalty of the tracking.
+
+config IOMMU_TESTS
+	bool "Interactive IOMMU performance/functional tests"
+	select IOMMU_API
+	help
+	  Enables a suite of IOMMU unit tests.  The tests are runnable
+	  through debugfs.  Unlike the IOMMU_DEBUG_TRACKING option, the
+	  impact of enabling this option to overal system performance
+	  should be minimal.
+
+endif # IOMMU_DEBUG
+
 endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 195f7b9..ee0a86b 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -2,11 +2,14 @@
 obj-$(CONFIG_IOMMU_API) += iommu-traces.o
 obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o
 obj-$(CONFIG_IOMMU_DMA) += dma-iommu.o
+obj-$(CONFIG_QCOM_LAZY_MAPPING) += msm_dma_iommu_mapping.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) += io-pgtable-arm-v7s.o
 obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
 obj-$(CONFIG_IOMMU_IOVA) += iova.o
+obj-$(CONFIG_IOMMU_IO_PGTABLE_FAST) += io-pgtable-fast.o dma-mapping-fast.o
 obj-$(CONFIG_OF_IOMMU)	+= of_iommu.o
+obj-$(CONFIG_IOMMU_DEBUG) += iommu-debug.o
 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
 obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8f72814..e8d535f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -28,7 +28,6 @@
 
 #define pr_fmt(fmt) "arm-smmu: " fmt
 
-#include <linux/atomic.h>
 #include <linux/delay.h>
 #include <linux/dma-iommu.h>
 #include <linux/dma-mapping.h>
@@ -41,20 +40,28 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/of_device.h>
-#include <linux/of_iommu.h>
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/of_platform.h>
+#include <linux/msm-bus.h>
+#include <dt-bindings/msm/msm-bus-ids.h>
 
 #include <linux/amba/bus.h>
 
 #include "io-pgtable.h"
 
+/* Maximum number of stream IDs assigned to a single device */
+#define MAX_MASTER_STREAMIDS		128
+
 /* Maximum number of context banks per SMMU */
 #define ARM_SMMU_MAX_CBS		128
 
+/* Maximum number of mapping groups per SMMU */
+#define ARM_SMMU_MAX_SMRS		128
+
 /* SMMU global address space */
 #define ARM_SMMU_GR0(smmu)		((smmu)->base)
 #define ARM_SMMU_GR1(smmu)		((smmu)->base + (1 << (smmu)->pgshift))
@@ -156,33 +163,24 @@
 #define ARM_SMMU_GR0_sTLBGSYNC		0x70
 #define ARM_SMMU_GR0_sTLBGSTATUS	0x74
 #define sTLBGSTATUS_GSACTIVE		(1 << 0)
-#define TLB_LOOP_TIMEOUT		1000000	/* 1s! */
+#define TLB_LOOP_TIMEOUT		500000	/* 500ms */
 
 /* Stream mapping registers */
 #define ARM_SMMU_GR0_SMR(n)		(0x800 + ((n) << 2))
 #define SMR_VALID			(1 << 31)
 #define SMR_MASK_SHIFT			16
+#define SMR_MASK_MASK			0x7fff
 #define SMR_ID_SHIFT			0
+#define SMR_ID_MASK			0x7fff
 
 #define ARM_SMMU_GR0_S2CR(n)		(0xc00 + ((n) << 2))
 #define S2CR_CBNDX_SHIFT		0
 #define S2CR_CBNDX_MASK			0xff
 #define S2CR_TYPE_SHIFT			16
 #define S2CR_TYPE_MASK			0x3
-enum arm_smmu_s2cr_type {
-	S2CR_TYPE_TRANS,
-	S2CR_TYPE_BYPASS,
-	S2CR_TYPE_FAULT,
-};
-
-#define S2CR_PRIVCFG_SHIFT		24
-#define S2CR_PRIVCFG_MASK		0x3
-enum arm_smmu_s2cr_privcfg {
-	S2CR_PRIVCFG_DEFAULT,
-	S2CR_PRIVCFG_DIPAN,
-	S2CR_PRIVCFG_UNPRIV,
-	S2CR_PRIVCFG_PRIV,
-};
+#define S2CR_TYPE_TRANS			(0 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_BYPASS		(1 << S2CR_TYPE_SHIFT)
+#define S2CR_TYPE_FAULT			(2 << S2CR_TYPE_SHIFT)
 
 /* Context bank attribute registers */
 #define ARM_SMMU_GR1_CBAR(n)		(0x0 + ((n) << 2))
@@ -203,6 +201,9 @@
 #define CBAR_IRPTNDX_SHIFT		24
 #define CBAR_IRPTNDX_MASK		0xff
 
+#define ARM_SMMU_GR1_CBFRSYNRA(n)	(0x400 + ((n) << 2))
+#define CBFRSYNRA_SID_MASK		(0xffff)
+
 #define ARM_SMMU_GR1_CBA2R(n)		(0x800 + ((n) << 2))
 #define CBA2R_RW64_32BIT		(0 << 0)
 #define CBA2R_RW64_64BIT		(1 << 0)
@@ -225,6 +226,7 @@
 #define ARM_SMMU_CB_S1_MAIR1		0x3c
 #define ARM_SMMU_CB_PAR			0x50
 #define ARM_SMMU_CB_FSR			0x58
+#define ARM_SMMU_CB_FSRRESTORE		0x5c
 #define ARM_SMMU_CB_FAR			0x60
 #define ARM_SMMU_CB_FSYNR0		0x68
 #define ARM_SMMU_CB_S1_TLBIVA		0x600
@@ -232,6 +234,9 @@
 #define ARM_SMMU_CB_S1_TLBIVAL		0x620
 #define ARM_SMMU_CB_S2_TLBIIPAS2	0x630
 #define ARM_SMMU_CB_S2_TLBIIPAS2L	0x638
+#define ARM_SMMU_CB_TLBSYNC		0x7f0
+#define ARM_SMMU_CB_TLBSTATUS		0x7f4
+#define TLBSTATUS_SACTIVE		(1 << 0)
 #define ARM_SMMU_CB_ATS1PR		0x800
 #define ARM_SMMU_CB_ATSR		0x8f0
 
@@ -243,11 +248,16 @@
 #define SCTLR_AFE			(1 << 2)
 #define SCTLR_TRE			(1 << 1)
 #define SCTLR_M				(1 << 0)
+#define SCTLR_EAE_SBOP			(SCTLR_AFE | SCTLR_TRE)
 
 #define ARM_MMU500_ACTLR_CPRE		(1 << 1)
 
 #define ARM_MMU500_ACR_CACHE_LOCK	(1 << 26)
 
+#define ARM_SMMU_IMPL_DEF0(smmu) \
+	((smmu)->base + (2 * (1 << (smmu)->pgshift)))
+#define ARM_SMMU_IMPL_DEF1(smmu) \
+	((smmu)->base + (6 * (1 << (smmu)->pgshift)))
 #define CB_PAR_F			(1 << 0)
 
 #define ATSR_ACTIVE			(1 << 0)
@@ -297,37 +307,70 @@
 	GENERIC_SMMU,
 	ARM_MMU500,
 	CAVIUM_SMMUV2,
+	QCOM_SMMUV2,
+	QCOM_SMMUV500,
 };
 
-struct arm_smmu_s2cr {
-	struct iommu_group		*group;
-	int				count;
-	enum arm_smmu_s2cr_type		type;
-	enum arm_smmu_s2cr_privcfg	privcfg;
-	u8				cbndx;
+struct arm_smmu_device;
+struct arm_smmu_arch_ops {
+	int (*init)(struct arm_smmu_device *smmu);
+	void (*device_reset)(struct arm_smmu_device *smmu);
+	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
+					 dma_addr_t iova);
+	void (*iova_to_phys_fault)(struct iommu_domain *domain,
+				dma_addr_t iova, phys_addr_t *phys1,
+				phys_addr_t *phys_post_tlbiall);
 };
 
-#define s2cr_init_val (struct arm_smmu_s2cr){				\
-	.type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS,	\
-}
+struct arm_smmu_impl_def_reg {
+	u32 offset;
+	u32 value;
+};
 
 struct arm_smmu_smr {
+	u8				idx;
 	u16				mask;
 	u16				id;
-	bool				valid;
 };
 
 struct arm_smmu_master_cfg {
-	struct arm_smmu_device		*smmu;
-	s16				smendx[];
+	int				num_streamids;
+	u16				streamids[MAX_MASTER_STREAMIDS];
+	struct arm_smmu_smr		*smrs;
 };
-#define INVALID_SMENDX			-1
-#define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
-#define fwspec_smmu(fw)  (__fwspec_cfg(fw)->smmu)
-#define fwspec_smendx(fw, i) \
-	(i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
-#define for_each_cfg_sme(fw, i, idx) \
-	for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
+
+struct arm_smmu_master {
+	struct device_node		*of_node;
+	struct rb_node			node;
+	struct arm_smmu_master_cfg	cfg;
+};
+
+/*
+ * Describes resources required for on/off power operation.
+ * Separate reference count is provided for atomic/nonatomic
+ * operations.
+ */
+struct arm_smmu_power_resources {
+	struct platform_device		*pdev;
+	struct device			*dev;
+
+	struct clk			**clocks;
+	int				num_clocks;
+
+	struct regulator_bulk_data	*gdscs;
+	int				num_gdscs;
+
+	uint32_t			bus_client;
+	struct msm_bus_scale_pdata	*bus_dt_data;
+
+	/* Protects power_count */
+	struct mutex			power_lock;
+	int				power_count;
+
+	/* Protects clock_refs_count */
+	spinlock_t			clock_refs_lock;
+	int				clock_refs_count;
+};
 
 struct arm_smmu_device {
 	struct device			*dev;
@@ -351,6 +394,9 @@
 	u32				features;
 
 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
+#define ARM_SMMU_OPT_FATAL_ASF		(1 << 1)
+#define ARM_SMMU_OPT_SKIP_INIT		(1 << 2)
+#define ARM_SMMU_OPT_DYNAMIC		(1 << 3)
 	u32				options;
 	enum arm_smmu_arch_version	version;
 	enum arm_smmu_implementation	model;
@@ -361,11 +407,7 @@
 	atomic_t			irptndx;
 
 	u32				num_mapping_groups;
-	u16				streamid_mask;
-	u16				smr_mask_mask;
-	struct arm_smmu_smr		*smrs;
-	struct arm_smmu_s2cr		*s2crs;
-	struct mutex			stream_map_mutex;
+	DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
 
 	unsigned long			va_size;
 	unsigned long			ipa_size;
@@ -376,7 +418,24 @@
 	u32				num_context_irqs;
 	unsigned int			*irqs;
 
+	struct list_head		list;
+	struct rb_root			masters;
+
 	u32				cavium_id_base; /* Specific to Cavium */
+	/* Specific to QCOM */
+	struct arm_smmu_impl_def_reg	*impl_def_attach_registers;
+	unsigned int			num_impl_def_attach_registers;
+
+	struct arm_smmu_power_resources *pwr;
+
+	spinlock_t			atos_lock;
+
+	/* protects idr */
+	struct mutex			idr_mutex;
+	struct idr			asid_idr;
+
+	struct arm_smmu_arch_ops	*arch_ops;
+	void				*archdata;
 };
 
 enum arm_smmu_context_fmt {
@@ -390,11 +449,20 @@
 	u8				cbndx;
 	u8				irptndx;
 	u32				cbar;
+	u32				procid;
+	u16				asid;
 	enum arm_smmu_context_fmt	fmt;
 };
 #define INVALID_IRPTNDX			0xff
+#define INVALID_CBNDX			0xff
+#define INVALID_ASID			0xffff
+/*
+ * In V7L and V8L with TTBCR2.AS == 0, ASID is 8 bits.
+ * V8L 16 with TTBCR2.AS == 1 (16 bit ASID) isn't supported yet.
+ */
+#define MAX_ASID			0xff
 
-#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
+#define ARM_SMMU_CB_ASID(smmu, cfg)		((cfg)->asid)
 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
 
 enum arm_smmu_domain_stage {
@@ -403,16 +471,38 @@
 	ARM_SMMU_DOMAIN_NESTED,
 };
 
+struct arm_smmu_pte_info {
+	void *virt_addr;
+	size_t size;
+	struct list_head entry;
+};
+
 struct arm_smmu_domain {
 	struct arm_smmu_device		*smmu;
 	struct io_pgtable_ops		*pgtbl_ops;
+	struct io_pgtable_cfg		pgtbl_cfg;
 	spinlock_t			pgtbl_lock;
 	struct arm_smmu_cfg		cfg;
 	enum arm_smmu_domain_stage	stage;
 	struct mutex			init_mutex; /* Protects smmu pointer */
+	u32 attributes;
+	u32				secure_vmid;
+	struct list_head		pte_info_list;
+	struct list_head		unassign_list;
+	struct mutex			assign_lock;
+	struct list_head		secure_pool_list;
 	struct iommu_domain		domain;
 };
 
+struct arm_smmu_phandle_args {
+	struct device_node *np;
+	int args_count;
+	uint32_t args[MAX_MASTER_STREAMIDS];
+};
+
+static DEFINE_SPINLOCK(arm_smmu_devices_lock);
+static LIST_HEAD(arm_smmu_devices);
+
 struct arm_smmu_option_prop {
 	u32 opt;
 	const char *prop;
@@ -420,13 +510,28 @@
 
 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
 
-static bool using_legacy_binding, using_generic_binding;
-
 static struct arm_smmu_option_prop arm_smmu_options[] = {
 	{ ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
+	{ ARM_SMMU_OPT_FATAL_ASF, "qcom,fatal-asf" },
+	{ ARM_SMMU_OPT_SKIP_INIT, "qcom,skip-init" },
+	{ ARM_SMMU_OPT_DYNAMIC, "qcom,dynamic" },
 	{ 0, NULL},
 };
 
+static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
+					dma_addr_t iova);
+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+					      dma_addr_t iova);
+static void arm_smmu_destroy_domain_context(struct iommu_domain *domain);
+
+static int arm_smmu_prepare_pgtable(void *addr, void *cookie);
+static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size);
+static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain);
+static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain);
+
+static int arm_smmu_arch_init(struct arm_smmu_device *smmu);
+static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu);
+
 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
 {
 	return container_of(dom, struct arm_smmu_domain, domain);
@@ -440,12 +545,36 @@
 		if (of_property_read_bool(smmu->dev->of_node,
 						arm_smmu_options[i].prop)) {
 			smmu->options |= arm_smmu_options[i].opt;
-			dev_notice(smmu->dev, "option %s\n",
+			dev_dbg(smmu->dev, "option %s\n",
 				arm_smmu_options[i].prop);
 		}
 	} while (arm_smmu_options[++i].opt);
 }
 
+static bool is_dynamic_domain(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+	return !!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC));
+}
+
+static bool arm_smmu_is_domain_secure(struct arm_smmu_domain *smmu_domain)
+{
+	return (smmu_domain->secure_vmid != VMID_INVAL);
+}
+
+static void arm_smmu_secure_domain_lock(struct arm_smmu_domain *smmu_domain)
+{
+	if (arm_smmu_is_domain_secure(smmu_domain))
+		mutex_lock(&smmu_domain->assign_lock);
+}
+
+static void arm_smmu_secure_domain_unlock(struct arm_smmu_domain *smmu_domain)
+{
+	if (arm_smmu_is_domain_secure(smmu_domain))
+		mutex_unlock(&smmu_domain->assign_lock);
+}
+
 static struct device_node *dev_get_dev_node(struct device *dev)
 {
 	if (dev_is_pci(dev)) {
@@ -453,86 +582,206 @@
 
 		while (!pci_is_root_bus(bus))
 			bus = bus->parent;
-		return of_node_get(bus->bridge->parent->of_node);
+		return bus->bridge->parent->of_node;
 	}
 
-	return of_node_get(dev->of_node);
+	return dev->of_node;
 }
 
-static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
+static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
+						struct device_node *dev_node)
 {
-	*((__be32 *)data) = cpu_to_be32(alias);
-	return 0; /* Continue walking */
-}
+	struct rb_node *node = smmu->masters.rb_node;
 
-static int __find_legacy_master_phandle(struct device *dev, void *data)
-{
-	struct of_phandle_iterator *it = *(void **)data;
-	struct device_node *np = it->node;
-	int err;
+	while (node) {
+		struct arm_smmu_master *master;
 
-	of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
-			    "#stream-id-cells", 0)
-		if (it->node == np) {
-			*(void **)data = dev;
-			return 1;
-		}
-	it->node = np;
-	return err == -ENOENT ? 0 : err;
-}
+		master = container_of(node, struct arm_smmu_master, node);
 
-static struct platform_driver arm_smmu_driver;
-static struct iommu_ops arm_smmu_ops;
-
-static int arm_smmu_register_legacy_master(struct device *dev,
-					   struct arm_smmu_device **smmu)
-{
-	struct device *smmu_dev;
-	struct device_node *np;
-	struct of_phandle_iterator it;
-	void *data = &it;
-	u32 *sids;
-	__be32 pci_sid;
-	int err;
-
-	np = dev_get_dev_node(dev);
-	if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
-		of_node_put(np);
-		return -ENODEV;
+		if (dev_node < master->of_node)
+			node = node->rb_left;
+		else if (dev_node > master->of_node)
+			node = node->rb_right;
+		else
+			return master;
 	}
 
-	it.node = np;
-	err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
-				     __find_legacy_master_phandle);
-	smmu_dev = data;
-	of_node_put(np);
-	if (err == 0)
-		return -ENODEV;
-	if (err < 0)
-		return err;
+	return NULL;
+}
 
-	if (dev_is_pci(dev)) {
-		/* "mmu-masters" assumes Stream ID == Requester ID */
-		pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
-				       &pci_sid);
-		it.cur = &pci_sid;
-		it.cur_count = 1;
+static struct arm_smmu_master_cfg *
+find_smmu_master_cfg(struct device *dev)
+{
+	struct arm_smmu_master_cfg *cfg = NULL;
+	struct iommu_group *group = iommu_group_get(dev);
+
+	if (group) {
+		cfg = iommu_group_get_iommudata(group);
+		iommu_group_put(group);
 	}
 
-	err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
-				&arm_smmu_ops);
-	if (err)
-		return err;
+	return cfg;
+}
 
-	sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
-	if (!sids)
+static int insert_smmu_master(struct arm_smmu_device *smmu,
+			      struct arm_smmu_master *master)
+{
+	struct rb_node **new, *parent;
+
+	new = &smmu->masters.rb_node;
+	parent = NULL;
+	while (*new) {
+		struct arm_smmu_master *this
+			= container_of(*new, struct arm_smmu_master, node);
+
+		parent = *new;
+		if (master->of_node < this->of_node)
+			new = &((*new)->rb_left);
+		else if (master->of_node > this->of_node)
+			new = &((*new)->rb_right);
+		else
+			return -EEXIST;
+	}
+
+	rb_link_node(&master->node, parent, new);
+	rb_insert_color(&master->node, &smmu->masters);
+	return 0;
+}
+
+struct iommus_entry {
+	struct list_head list;
+	struct device_node *node;
+	u16 streamids[MAX_MASTER_STREAMIDS];
+	int num_sids;
+};
+
+static int register_smmu_master(struct arm_smmu_device *smmu,
+				struct iommus_entry *entry)
+{
+	int i;
+	struct arm_smmu_master *master;
+	struct device *dev = smmu->dev;
+
+	master = find_smmu_master(smmu, entry->node);
+	if (master) {
+		dev_err(dev,
+			"rejecting multiple registrations for master device %s\n",
+			entry->node->name);
+		return -EBUSY;
+	}
+
+	if (entry->num_sids > MAX_MASTER_STREAMIDS) {
+		dev_err(dev,
+			"reached maximum number (%d) of stream IDs for master device %s\n",
+			MAX_MASTER_STREAMIDS, entry->node->name);
+		return -ENOSPC;
+	}
+
+	master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
+	if (!master)
 		return -ENOMEM;
 
-	*smmu = dev_get_drvdata(smmu_dev);
-	of_phandle_iterator_args(&it, sids, it.cur_count);
-	err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
-	kfree(sids);
-	return err;
+	master->of_node			= entry->node;
+	master->cfg.num_streamids	= entry->num_sids;
+
+	for (i = 0; i < master->cfg.num_streamids; ++i) {
+		u16 streamid = entry->streamids[i];
+
+		if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
+		     (streamid >= smmu->num_mapping_groups)) {
+			dev_err(dev,
+				"stream ID for master device %s greater than maximum allowed (%d)\n",
+				entry->node->name, smmu->num_mapping_groups);
+			return -ERANGE;
+		}
+		master->cfg.streamids[i] = streamid;
+	}
+	return insert_smmu_master(smmu, master);
+}
+
+static int arm_smmu_parse_iommus_properties(struct arm_smmu_device *smmu,
+					int *num_masters)
+{
+	struct of_phandle_args iommuspec;
+	struct device_node *master;
+
+	*num_masters = 0;
+
+	for_each_node_with_property(master, "iommus") {
+		int arg_ind = 0;
+		struct iommus_entry *entry, *n;
+		LIST_HEAD(iommus);
+
+		while (!of_parse_phandle_with_args(
+				master, "iommus", "#iommu-cells",
+				arg_ind, &iommuspec)) {
+			if (iommuspec.np != smmu->dev->of_node) {
+				arg_ind++;
+				continue;
+			}
+
+			list_for_each_entry(entry, &iommus, list)
+				if (entry->node == master)
+					break;
+			if (&entry->list == &iommus) {
+				entry = devm_kzalloc(smmu->dev, sizeof(*entry),
+						GFP_KERNEL);
+				if (!entry)
+					return -ENOMEM;
+				entry->node = master;
+				list_add(&entry->list, &iommus);
+			}
+			switch (iommuspec.args_count) {
+			case 0:
+				/*
+				 * For pci-e devices the SIDs are provided
+				 * at device attach time.
+				 */
+				break;
+			case 1:
+				entry->num_sids++;
+				entry->streamids[entry->num_sids - 1]
+					= iommuspec.args[0];
+				break;
+			default:
+				dev_err(smmu->dev, "iommus property has wrong #iommu-cells");
+				return -EINVAL;
+			}
+			arg_ind++;
+		}
+
+		list_for_each_entry_safe(entry, n, &iommus, list) {
+			int rc = register_smmu_master(smmu, entry);
+
+			if (rc) {
+				dev_err(smmu->dev, "Couldn't register %s\n",
+					entry->node->name);
+			} else {
+				(*num_masters)++;
+			}
+			list_del(&entry->list);
+			devm_kfree(smmu->dev, entry);
+		}
+	}
+
+	return 0;
+}
+
+static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
+{
+	struct arm_smmu_device *smmu;
+	struct arm_smmu_master *master = NULL;
+	struct device_node *dev_node = dev_get_dev_node(dev);
+
+	spin_lock(&arm_smmu_devices_lock);
+	list_for_each_entry(smmu, &arm_smmu_devices, list) {
+		master = find_smmu_master(smmu, dev_node);
+		if (master)
+			break;
+	}
+	spin_unlock(&arm_smmu_devices_lock);
+
+	return master ? smmu : NULL;
 }
 
 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
@@ -553,7 +802,243 @@
 	clear_bit(idx, map);
 }
 
+static int arm_smmu_prepare_clocks(struct arm_smmu_power_resources *pwr)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < pwr->num_clocks; ++i) {
+		ret = clk_prepare(pwr->clocks[i]);
+		if (ret) {
+			dev_err(pwr->dev, "Couldn't prepare clock #%d\n", i);
+			while (i--)
+				clk_unprepare(pwr->clocks[i]);
+			break;
+		}
+	}
+	return ret;
+}
+
+static void arm_smmu_unprepare_clocks(struct arm_smmu_power_resources *pwr)
+{
+	int i;
+
+	for (i = pwr->num_clocks; i; --i)
+		clk_unprepare(pwr->clocks[i - 1]);
+}
+
+static int arm_smmu_enable_clocks(struct arm_smmu_power_resources *pwr)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < pwr->num_clocks; ++i) {
+		ret = clk_enable(pwr->clocks[i]);
+		if (ret) {
+			dev_err(pwr->dev, "Couldn't enable clock #%d\n", i);
+			while (i--)
+				clk_disable(pwr->clocks[i]);
+			break;
+		}
+	}
+
+	return ret;
+}
+
+static void arm_smmu_disable_clocks(struct arm_smmu_power_resources *pwr)
+{
+	int i;
+
+	for (i = pwr->num_clocks; i; --i)
+		clk_disable(pwr->clocks[i - 1]);
+}
+
+static int arm_smmu_request_bus(struct arm_smmu_power_resources *pwr)
+{
+	if (!pwr->bus_client)
+		return 0;
+	return msm_bus_scale_client_update_request(pwr->bus_client, 1);
+}
+
+static void arm_smmu_unrequest_bus(struct arm_smmu_power_resources *pwr)
+{
+	if (!pwr->bus_client)
+		return;
+	WARN_ON(msm_bus_scale_client_update_request(pwr->bus_client, 0));
+}
+
+/* Clocks must be prepared before this (arm_smmu_prepare_clocks) */
+static int arm_smmu_power_on_atomic(struct arm_smmu_power_resources *pwr)
+{
+	int ret = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pwr->clock_refs_lock, flags);
+	if (pwr->clock_refs_count > 0) {
+		pwr->clock_refs_count++;
+		spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
+		return 0;
+	}
+
+	ret = arm_smmu_enable_clocks(pwr);
+	if (!ret)
+		pwr->clock_refs_count = 1;
+
+	spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
+	return ret;
+}
+
+/* Clocks should be unprepared after this (arm_smmu_unprepare_clocks) */
+static void arm_smmu_power_off_atomic(struct arm_smmu_power_resources *pwr)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&pwr->clock_refs_lock, flags);
+	if (pwr->clock_refs_count == 0) {
+		WARN(1, "%s: bad clock_ref_count\n", dev_name(pwr->dev));
+		spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
+		return;
+
+	} else if (pwr->clock_refs_count > 1) {
+		pwr->clock_refs_count--;
+		spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
+		return;
+	}
+
+	arm_smmu_disable_clocks(pwr);
+
+	pwr->clock_refs_count = 0;
+	spin_unlock_irqrestore(&pwr->clock_refs_lock, flags);
+}
+
+static int arm_smmu_power_on_slow(struct arm_smmu_power_resources *pwr)
+{
+	int ret;
+
+	mutex_lock(&pwr->power_lock);
+	if (pwr->power_count > 0) {
+		pwr->power_count += 1;
+		mutex_unlock(&pwr->power_lock);
+		return 0;
+	}
+
+	ret = regulator_bulk_enable(pwr->num_gdscs, pwr->gdscs);
+	if (ret)
+		goto out_unlock;
+
+	ret = arm_smmu_request_bus(pwr);
+	if (ret)
+		goto out_disable_regulators;
+
+	ret = arm_smmu_prepare_clocks(pwr);
+	if (ret)
+		goto out_disable_bus;
+
+	pwr->power_count = 1;
+	mutex_unlock(&pwr->power_lock);
+	return 0;
+
+out_disable_bus:
+	arm_smmu_unrequest_bus(pwr);
+out_disable_regulators:
+	regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
+out_unlock:
+	mutex_unlock(&pwr->power_lock);
+	return ret;
+}
+
+static void arm_smmu_power_off_slow(struct arm_smmu_power_resources *pwr)
+{
+	mutex_lock(&pwr->power_lock);
+	if (pwr->power_count == 0) {
+		WARN(1, "%s: Bad power count\n", dev_name(pwr->dev));
+		mutex_unlock(&pwr->power_lock);
+		return;
+
+	} else if (pwr->power_count > 1) {
+		pwr->power_count--;
+		mutex_unlock(&pwr->power_lock);
+		return;
+	}
+
+	arm_smmu_unprepare_clocks(pwr);
+	arm_smmu_unrequest_bus(pwr);
+	regulator_bulk_disable(pwr->num_gdscs, pwr->gdscs);
+
+	mutex_unlock(&pwr->power_lock);
+}
+
+static int arm_smmu_power_on(struct arm_smmu_power_resources *pwr)
+{
+	int ret;
+
+	ret = arm_smmu_power_on_slow(pwr);
+	if (ret)
+		return ret;
+
+	ret = arm_smmu_power_on_atomic(pwr);
+	if (ret)
+		goto out_disable;
+
+	return 0;
+
+out_disable:
+	arm_smmu_power_off_slow(pwr);
+	return ret;
+}
+
+static void arm_smmu_power_off(struct arm_smmu_power_resources *pwr)
+{
+	arm_smmu_power_off_atomic(pwr);
+	arm_smmu_power_off_slow(pwr);
+}
+
+/*
+ * Must be used instead of arm_smmu_power_on if it may be called from
+ * atomic context
+ */
+static int arm_smmu_domain_power_on(struct iommu_domain *domain,
+				struct arm_smmu_device *smmu)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
+
+	if (atomic_domain)
+		return arm_smmu_power_on_atomic(smmu->pwr);
+
+	return arm_smmu_power_on(smmu->pwr);
+}
+
+/*
+ * Must be used instead of arm_smmu_power_on if it may be called from
+ * atomic context
+ */
+static void arm_smmu_domain_power_off(struct iommu_domain *domain,
+				struct arm_smmu_device *smmu)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
+
+	if (atomic_domain) {
+		arm_smmu_power_off_atomic(smmu->pwr);
+		return;
+	}
+
+	arm_smmu_power_off(smmu->pwr);
+}
+
 /* Wait for any pending TLB invalidations to complete */
+static void arm_smmu_tlb_sync_cb(struct arm_smmu_device *smmu,
+				int cbndx)
+{
+	void __iomem *base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cbndx);
+	u32 val;
+
+	writel_relaxed(0, base + ARM_SMMU_CB_TLBSYNC);
+	if (readl_poll_timeout_atomic(base + ARM_SMMU_CB_TLBSTATUS, val,
+				      !(val & TLBSTATUS_SACTIVE),
+				      0, TLB_LOOP_TIMEOUT))
+		dev_err(smmu->dev, "TLBSYNC timeout!\n");
+}
+
 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
 {
 	int count = 0;
@@ -575,9 +1060,10 @@
 static void arm_smmu_tlb_sync(void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
-	__arm_smmu_tlb_sync(smmu_domain->smmu);
+	arm_smmu_tlb_sync_cb(smmu_domain->smmu, smmu_domain->cfg.cbndx);
 }
 
+/* Must be called with clocks/regulators enabled */
 static void arm_smmu_tlb_inv_context(void *cookie)
 {
 	struct arm_smmu_domain *smmu_domain = cookie;
@@ -590,13 +1076,13 @@
 		base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 		writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
 			       base + ARM_SMMU_CB_S1_TLBIASID);
+		arm_smmu_tlb_sync_cb(smmu, cfg->cbndx);
 	} else {
 		base = ARM_SMMU_GR0(smmu);
 		writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
 			       base + ARM_SMMU_GR0_TLBIVMID);
+		__arm_smmu_tlb_sync(smmu);
 	}
-
-	__arm_smmu_tlb_sync(smmu);
 }
 
 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
@@ -642,37 +1128,274 @@
 	}
 }
 
+struct arm_smmu_secure_pool_chunk {
+	void *addr;
+	size_t size;
+	struct list_head list;
+};
+
+static void *arm_smmu_secure_pool_remove(struct arm_smmu_domain *smmu_domain,
+					size_t size)
+{
+	struct arm_smmu_secure_pool_chunk *it;
+
+	list_for_each_entry(it, &smmu_domain->secure_pool_list, list) {
+		if (it->size == size) {
+			void *addr = it->addr;
+
+			list_del(&it->list);
+			kfree(it);
+			return addr;
+		}
+	}
+
+	return NULL;
+}
+
+static int arm_smmu_secure_pool_add(struct arm_smmu_domain *smmu_domain,
+				     void *addr, size_t size)
+{
+	struct arm_smmu_secure_pool_chunk *chunk;
+
+	chunk = kmalloc(sizeof(*chunk), GFP_ATOMIC);
+	if (!chunk)
+		return -ENOMEM;
+
+	chunk->addr = addr;
+	chunk->size = size;
+	memset(addr, 0, size);
+	list_add(&chunk->list, &smmu_domain->secure_pool_list);
+
+	return 0;
+}
+
+static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
+{
+	struct arm_smmu_secure_pool_chunk *it, *i;
+
+	list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
+		arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
+		/* pages will be freed later (after being unassigned) */
+		kfree(it);
+	}
+}
+
+static void *arm_smmu_alloc_pages_exact(void *cookie,
+					size_t size, gfp_t gfp_mask)
+{
+	int ret;
+	void *page;
+	struct arm_smmu_domain *smmu_domain = cookie;
+
+	if (!arm_smmu_is_domain_secure(smmu_domain))
+		return alloc_pages_exact(size, gfp_mask);
+
+	page = arm_smmu_secure_pool_remove(smmu_domain, size);
+	if (page)
+		return page;
+
+	page = alloc_pages_exact(size, gfp_mask);
+	if (page) {
+		ret = arm_smmu_prepare_pgtable(page, cookie);
+		if (ret) {
+			free_pages_exact(page, size);
+			return NULL;
+		}
+	}
+
+	return page;
+}
+
+static void arm_smmu_free_pages_exact(void *cookie, void *virt, size_t size)
+{
+	struct arm_smmu_domain *smmu_domain = cookie;
+
+	if (!arm_smmu_is_domain_secure(smmu_domain)) {
+		free_pages_exact(virt, size);
+		return;
+	}
+
+	if (arm_smmu_secure_pool_add(smmu_domain, virt, size))
+		arm_smmu_unprepare_pgtable(smmu_domain, virt, size);
+}
+
 static struct iommu_gather_ops arm_smmu_gather_ops = {
 	.tlb_flush_all	= arm_smmu_tlb_inv_context,
 	.tlb_add_flush	= arm_smmu_tlb_inv_range_nosync,
 	.tlb_sync	= arm_smmu_tlb_sync,
+	.alloc_pages_exact = arm_smmu_alloc_pages_exact,
+	.free_pages_exact = arm_smmu_free_pages_exact,
 };
 
+static phys_addr_t arm_smmu_verify_fault(struct iommu_domain *domain,
+					 dma_addr_t iova, u32 fsr)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu;
+	phys_addr_t phys;
+	phys_addr_t phys_post_tlbiall;
+
+	smmu = smmu_domain->smmu;
+
+	if (smmu->arch_ops && smmu->arch_ops->iova_to_phys_fault) {
+		smmu->arch_ops->iova_to_phys_fault(domain, iova, &phys,
+		&phys_post_tlbiall);
+	} else {
+		phys = arm_smmu_iova_to_phys_hard(domain, iova);
+		arm_smmu_tlb_inv_context(smmu_domain);
+		phys_post_tlbiall = arm_smmu_iova_to_phys_hard(domain, iova);
+	}
+
+	if (phys != phys_post_tlbiall) {
+		dev_err(smmu->dev,
+			"ATOS results differed across TLBIALL...\n"
+			"Before: %pa After: %pa\n", &phys, &phys_post_tlbiall);
+	}
+	if (!phys_post_tlbiall) {
+		dev_err(smmu->dev,
+			"ATOS still failed. If the page tables look good (check the software table walk) then hardware might be misbehaving.\n");
+	}
+
+	return phys_post_tlbiall;
+}
+
 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
 {
-	u32 fsr, fsynr;
+	int flags, ret, tmp;
+	u32 fsr, fsynr, resume;
 	unsigned long iova;
 	struct iommu_domain *domain = dev;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
 	void __iomem *cb_base;
+	void __iomem *gr1_base;
+	bool fatal_asf = smmu->options & ARM_SMMU_OPT_FATAL_ASF;
+	phys_addr_t phys_soft;
+	u32 frsynra;
+	bool non_fatal_fault = !!(smmu_domain->attributes &
+					DOMAIN_ATTR_NON_FATAL_FAULTS);
 
+	static DEFINE_RATELIMIT_STATE(_rs,
+				      DEFAULT_RATELIMIT_INTERVAL,
+				      DEFAULT_RATELIMIT_BURST);
+
+	ret = arm_smmu_power_on(smmu->pwr);
+	if (ret)
+		return IRQ_NONE;
+
+	gr1_base = ARM_SMMU_GR1(smmu);
 	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
 	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
 
-	if (!(fsr & FSR_FAULT))
-		return IRQ_NONE;
+	if (!(fsr & FSR_FAULT)) {
+		ret = IRQ_NONE;
+		goto out_power_off;
+	}
+
+	if (fatal_asf && (fsr & FSR_ASF)) {
+		dev_err(smmu->dev,
+			"Took an address size fault.  Refusing to recover.\n");
+		BUG();
+	}
 
 	fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
+	flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
+	if (fsr & FSR_TF)
+		flags |= IOMMU_FAULT_TRANSLATION;
+	if (fsr & FSR_PF)
+		flags |= IOMMU_FAULT_PERMISSION;
+	if (fsr & FSR_EF)
+		flags |= IOMMU_FAULT_EXTERNAL;
+	if (fsr & FSR_SS)
+		flags |= IOMMU_FAULT_TRANSACTION_STALLED;
+
 	iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
+	phys_soft = arm_smmu_iova_to_phys(domain, iova);
+	frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
+	frsynra &= CBFRSYNRA_SID_MASK;
+	tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
+	if (!tmp || (tmp == -EBUSY)) {
+		dev_dbg(smmu->dev,
+			"Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
+			iova, fsr, fsynr, cfg->cbndx);
+		dev_dbg(smmu->dev,
+			"soft iova-to-phys=%pa\n", &phys_soft);
+		ret = IRQ_HANDLED;
+		resume = RESUME_TERMINATE;
+	} else {
+		phys_addr_t phys_atos = arm_smmu_verify_fault(domain, iova,
+							      fsr);
+		if (__ratelimit(&_rs)) {
+			dev_err(smmu->dev,
+				"Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
+				iova, fsr, fsynr, cfg->cbndx);
+			dev_err(smmu->dev, "FAR    = %016lx\n",
+				(unsigned long)iova);
+			dev_err(smmu->dev,
+				"FSR    = %08x [%s%s%s%s%s%s%s%s%s]\n",
+				fsr,
+				(fsr & 0x02) ? "TF " : "",
+				(fsr & 0x04) ? "AFF " : "",
+				(fsr & 0x08) ? "PF " : "",
+				(fsr & 0x10) ? "EF " : "",
+				(fsr & 0x20) ? "TLBMCF " : "",
+				(fsr & 0x40) ? "TLBLKF " : "",
+				(fsr & 0x80) ? "MHF " : "",
+				(fsr & 0x40000000) ? "SS " : "",
+				(fsr & 0x80000000) ? "MULTI " : "");
+			dev_err(smmu->dev,
+				"soft iova-to-phys=%pa\n", &phys_soft);
+			if (!phys_soft)
+				dev_err(smmu->dev,
+					"SOFTWARE TABLE WALK FAILED! Looks like %s accessed an unmapped address!\n",
+					dev_name(smmu->dev));
+			dev_err(smmu->dev,
+				"hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
+			dev_err(smmu->dev, "SID=0x%x\n", frsynra);
+		}
+		ret = IRQ_NONE;
+		resume = RESUME_TERMINATE;
+		if (!non_fatal_fault) {
+			dev_err(smmu->dev,
+				"Unhandled arm-smmu context fault!\n");
+			BUG();
+		}
+	}
 
-	dev_err_ratelimited(smmu->dev,
-	"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
-			    fsr, iova, fsynr, cfg->cbndx);
+	/*
+	 * If the client returns -EBUSY, do not clear FSR and do not RESUME
+	 * if stalled. This is required to keep the IOMMU client stalled on
+	 * the outstanding fault. This gives the client a chance to take any
+	 * debug action and then terminate the stalled transaction.
+	 * So, the sequence in case of stall on fault should be:
+	 * 1) Do not clear FSR or write to RESUME here
+	 * 2) Client takes any debug action
+	 * 3) Client terminates the stalled transaction and resumes the IOMMU
+	 * 4) Client clears FSR. The FSR should only be cleared after 3) and
+	 *    not before so that the fault remains outstanding. This ensures
+	 *    SCTLR.HUPCF has the desired effect if subsequent transactions also
+	 *    need to be terminated.
+	 */
+	if (tmp != -EBUSY) {
+		/* Clear the faulting FSR */
+		writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
 
-	writel(fsr, cb_base + ARM_SMMU_CB_FSR);
-	return IRQ_HANDLED;
+		/*
+		 * Barrier required to ensure that the FSR is cleared
+		 * before resuming SMMU operation
+		 */
+		wmb();
+
+		/* Retry or terminate any stalled transactions */
+		if (fsr & FSR_SS)
+			writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
+	}
+
+out_power_off:
+	arm_smmu_power_off(smmu->pwr);
+
+	return ret;
 }
 
 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
@@ -681,13 +1404,18 @@
 	struct arm_smmu_device *smmu = dev;
 	void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
 
+	if (arm_smmu_power_on(smmu->pwr))
+		return IRQ_NONE;
+
 	gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
 	gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
 	gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
 	gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
 
-	if (!gfsr)
+	if (!gfsr) {
+		arm_smmu_power_off(smmu->pwr);
 		return IRQ_NONE;
+	}
 
 	dev_err_ratelimited(smmu->dev,
 		"Unexpected global fault, this could be serious\n");
@@ -696,13 +1424,14 @@
 		gfsr, gfsynr0, gfsynr1, gfsynr2);
 
 	writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
+	arm_smmu_power_off(smmu->pwr);
 	return IRQ_HANDLED;
 }
 
 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
 				       struct io_pgtable_cfg *pgtbl_cfg)
 {
-	u32 reg, reg2;
+	u32 reg;
 	u64 reg64;
 	bool stage1;
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
@@ -745,22 +1474,14 @@
 
 	/* TTBRs */
 	if (stage1) {
-		u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
+		reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
 
-		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
-			reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
-			writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
-			reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
-			writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
-			writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
-		} else {
-			reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
-			reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
-			writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
-			reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
-			reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
-			writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
-		}
+		reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
+		writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
+
+		reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
+		reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
+		writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
 	} else {
 		reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
 		writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
@@ -768,36 +1489,32 @@
 
 	/* TTBCR */
 	if (stage1) {
-		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
-			reg = pgtbl_cfg->arm_v7s_cfg.tcr;
-			reg2 = 0;
-		} else {
-			reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
-			reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-			reg2 |= TTBCR2_SEP_UPSTREAM;
+		reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
+		writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
+		if (smmu->version > ARM_SMMU_V1) {
+			reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
+			reg |= TTBCR2_SEP_UPSTREAM;
+			writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
 		}
-		if (smmu->version > ARM_SMMU_V1)
-			writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
 	} else {
 		reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
+		writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
 	}
-	writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
 
 	/* MAIRs (stage-1 only) */
 	if (stage1) {
-		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
-			reg = pgtbl_cfg->arm_v7s_cfg.prrr;
-			reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
-		} else {
-			reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
-			reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
-		}
+		reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
 		writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
-		writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
+		reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
+		writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
 	}
 
 	/* SCTLR */
-	reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
+	reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_EAE_SBOP;
+
+	if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_S1_BYPASS)) ||
+	    !stage1)
+		reg |= SCTLR_M;
 	if (stage1)
 		reg |= SCTLR_S1_ASIDPNE;
 #ifdef __BIG_ENDIAN
@@ -806,21 +1523,81 @@
 	writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
 }
 
+static int arm_smmu_init_asid(struct iommu_domain *domain,
+				struct arm_smmu_device *smmu)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	bool dynamic = is_dynamic_domain(domain);
+	int ret;
+
+	if (!dynamic) {
+		cfg->asid = cfg->cbndx + 1;
+	} else {
+		mutex_lock(&smmu->idr_mutex);
+		ret = idr_alloc_cyclic(&smmu->asid_idr, domain,
+				smmu->num_context_banks + 2,
+				MAX_ASID + 1, GFP_KERNEL);
+
+		mutex_unlock(&smmu->idr_mutex);
+		if (ret < 0) {
+			dev_err(smmu->dev, "dynamic ASID allocation failed: %d\n",
+				ret);
+			return ret;
+		}
+		cfg->asid = ret;
+	}
+	return 0;
+}
+
+static void arm_smmu_free_asid(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	bool dynamic = is_dynamic_domain(domain);
+
+	if (cfg->asid == INVALID_ASID || !dynamic)
+		return;
+
+	mutex_lock(&smmu->idr_mutex);
+	idr_remove(&smmu->asid_idr, cfg->asid);
+	mutex_unlock(&smmu->idr_mutex);
+}
+
 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
 					struct arm_smmu_device *smmu)
 {
 	int irq, start, ret = 0;
 	unsigned long ias, oas;
 	struct io_pgtable_ops *pgtbl_ops;
-	struct io_pgtable_cfg pgtbl_cfg;
 	enum io_pgtable_fmt fmt;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
+	unsigned long quirks = 0;
+	bool dynamic;
 
 	mutex_lock(&smmu_domain->init_mutex);
 	if (smmu_domain->smmu)
 		goto out_unlock;
 
+	smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
+	smmu_domain->cfg.asid = INVALID_ASID;
+
+	/* We're bypassing these SIDs, so don't allocate an actual context */
+	if (domain->type == IOMMU_DOMAIN_DMA) {
+		smmu_domain->smmu = smmu;
+		goto out_unlock;
+	}
+
+	dynamic = is_dynamic_domain(domain);
+	if (dynamic && !(smmu->options & ARM_SMMU_OPT_DYNAMIC)) {
+		dev_err(smmu->dev, "dynamic domains not supported\n");
+		ret = -EPERM;
+		goto out_unlock;
+	}
+
 	/*
 	 * Mapping the requested stage onto what we support is surprisingly
 	 * complicated, mainly because the spec allows S1+S2 SMMUs without
@@ -854,11 +1631,6 @@
 	 */
 	if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
 		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
-	if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
-	    !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
-	    (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
-	    (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
-		cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
 	if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
 	    (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
 			       ARM_SMMU_FEAT_FMT_AARCH64_16K |
@@ -878,14 +1650,10 @@
 		oas = smmu->ipa_size;
 		if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
 			fmt = ARM_64_LPAE_S1;
-		} else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
+		} else {
 			fmt = ARM_32_LPAE_S1;
 			ias = min(ias, 32UL);
 			oas = min(oas, 40UL);
-		} else {
-			fmt = ARM_V7S;
-			ias = min(ias, 32UL);
-			oas = min(oas, 32UL);
 		}
 		break;
 	case ARM_SMMU_DOMAIN_NESTED:
@@ -911,12 +1679,20 @@
 		goto out_unlock;
 	}
 
-	ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
-				      smmu->num_context_banks);
-	if (ret < 0)
-		goto out_unlock;
+	if (is_fast)
+		fmt = ARM_V8L_FAST;
 
-	cfg->cbndx = ret;
+	if (smmu_domain->attributes & (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT))
+		quirks |= IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT;
+
+	/* Dynamic domains must set cbndx through domain attribute */
+	if (!dynamic) {
+		ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
+				      smmu->num_context_banks);
+		if (ret < 0)
+			goto out_unlock;
+		cfg->cbndx = ret;
+	}
 	if (smmu->version < ARM_SMMU_V2) {
 		cfg->irptndx = atomic_inc_return(&smmu->irptndx);
 		cfg->irptndx %= smmu->num_context_irqs;
@@ -924,7 +1700,8 @@
 		cfg->irptndx = cfg->cbndx;
 	}
 
-	pgtbl_cfg = (struct io_pgtable_cfg) {
+	smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
+		.quirks		= quirks,
 		.pgsize_bitmap	= smmu->pgsize_bitmap,
 		.ias		= ias,
 		.oas		= oas,
@@ -933,33 +1710,51 @@
 	};
 
 	smmu_domain->smmu = smmu;
-	pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
+	pgtbl_ops = alloc_io_pgtable_ops(fmt, &smmu_domain->pgtbl_cfg,
+					smmu_domain);
 	if (!pgtbl_ops) {
 		ret = -ENOMEM;
 		goto out_clear_smmu;
 	}
 
-	/* Update the domain's page sizes to reflect the page table format */
-	domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
-	domain->geometry.aperture_end = (1UL << ias) - 1;
-	domain->geometry.force_aperture = true;
-
-	/* Initialise the context bank with our page table cfg */
-	arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
-
 	/*
-	 * Request context fault interrupt. Do this last to avoid the
-	 * handler seeing a half-initialised domain state.
+	 * assign any page table memory that might have been allocated
+	 * during alloc_io_pgtable_ops
 	 */
-	irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
-	ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
-			       IRQF_SHARED, "arm-smmu-context-fault", domain);
-	if (ret < 0) {
-		dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
-			cfg->irptndx, irq);
+	arm_smmu_secure_domain_lock(smmu_domain);
+	arm_smmu_assign_table(smmu_domain);
+	arm_smmu_secure_domain_unlock(smmu_domain);
+
+	/* Update the domain's page sizes to reflect the page table format */
+	domain->pgsize_bitmap = smmu_domain->pgtbl_cfg.pgsize_bitmap;
+
+	/* Assign an asid */
+	ret = arm_smmu_init_asid(domain, smmu);
+	if (ret)
+		goto out_clear_smmu;
+
+	if (!dynamic) {
+		/* Initialise the context bank with our page table cfg */
+		arm_smmu_init_context_bank(smmu_domain,
+						&smmu_domain->pgtbl_cfg);
+
+		/*
+		 * Request context fault interrupt. Do this last to avoid the
+		 * handler seeing a half-initialised domain state.
+		 */
+		irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
+		ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
+			arm_smmu_context_fault, IRQF_ONESHOT | IRQF_SHARED,
+			"arm-smmu-context-fault", domain);
+		if (ret < 0) {
+			dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
+				cfg->irptndx, irq);
+			cfg->irptndx = INVALID_IRPTNDX;
+			goto out_clear_smmu;
+		}
+	} else {
 		cfg->irptndx = INVALID_IRPTNDX;
 	}
-
 	mutex_unlock(&smmu_domain->init_mutex);
 
 	/* Publish page table ops for map/unmap */
@@ -967,12 +1762,20 @@
 	return 0;
 
 out_clear_smmu:
+	arm_smmu_destroy_domain_context(domain);
 	smmu_domain->smmu = NULL;
 out_unlock:
 	mutex_unlock(&smmu_domain->init_mutex);
 	return ret;
 }
 
+static void arm_smmu_domain_reinit(struct arm_smmu_domain *smmu_domain)
+{
+	smmu_domain->cfg.irptndx = INVALID_IRPTNDX;
+	smmu_domain->cfg.cbndx = INVALID_CBNDX;
+	smmu_domain->secure_vmid = VMID_INVAL;
+}
+
 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -980,10 +1783,32 @@
 	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
 	void __iomem *cb_base;
 	int irq;
+	bool dynamic;
+	int ret;
 
-	if (!smmu)
+	if (!smmu || domain->type == IOMMU_DOMAIN_DMA)
 		return;
 
+	ret = arm_smmu_power_on(smmu->pwr);
+	if (ret) {
+		WARN_ONCE(ret, "Woops, powering on smmu %p failed. Leaking context bank\n",
+				smmu);
+		return;
+	}
+
+	dynamic = is_dynamic_domain(domain);
+	if (dynamic) {
+		arm_smmu_free_asid(domain);
+		free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+		arm_smmu_power_off(smmu->pwr);
+		arm_smmu_secure_domain_lock(smmu_domain);
+		arm_smmu_secure_pool_destroy(smmu_domain);
+		arm_smmu_unassign_table(smmu_domain);
+		arm_smmu_secure_domain_unlock(smmu_domain);
+		arm_smmu_domain_reinit(smmu_domain);
+		return;
+	}
+
 	/*
 	 * Disable the context bank and free the page tables before freeing
 	 * it.
@@ -997,14 +1822,22 @@
 	}
 
 	free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+	arm_smmu_secure_domain_lock(smmu_domain);
+	arm_smmu_secure_pool_destroy(smmu_domain);
+	arm_smmu_unassign_table(smmu_domain);
+	arm_smmu_secure_domain_unlock(smmu_domain);
 	__arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
+
+	arm_smmu_power_off(smmu->pwr);
+	arm_smmu_domain_reinit(smmu_domain);
 }
 
 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
 {
 	struct arm_smmu_domain *smmu_domain;
 
-	if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
+	/* Do not support DOMAIN_DMA for now */
+	if (type != IOMMU_DOMAIN_UNMANAGED)
 		return NULL;
 	/*
 	 * Allocate the domain and initialise some of its data structures.
@@ -1015,14 +1848,19 @@
 	if (!smmu_domain)
 		return NULL;
 
-	if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
-	    iommu_get_dma_cookie(&smmu_domain->domain))) {
+	if (type == IOMMU_DOMAIN_DMA &&
+	    iommu_get_dma_cookie(&smmu_domain->domain)) {
 		kfree(smmu_domain);
 		return NULL;
 	}
 
 	mutex_init(&smmu_domain->init_mutex);
 	spin_lock_init(&smmu_domain->pgtbl_lock);
+	INIT_LIST_HEAD(&smmu_domain->pte_info_list);
+	INIT_LIST_HEAD(&smmu_domain->unassign_list);
+	mutex_init(&smmu_domain->assign_lock);
+	INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
+	arm_smmu_domain_reinit(smmu_domain);
 
 	return &smmu_domain->domain;
 }
@@ -1040,211 +1878,284 @@
 	kfree(smmu_domain);
 }
 
-static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
+static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
+					  struct arm_smmu_master_cfg *cfg)
 {
-	struct arm_smmu_smr *smr = smmu->smrs + idx;
-	u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
+	int i;
+	struct arm_smmu_smr *smrs;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
-	if (smr->valid)
-		reg |= SMR_VALID;
-	writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
-}
+	if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
+		return 0;
 
-static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
-{
-	struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
-	u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
-		  (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
-		  (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
+	if (cfg->smrs)
+		return -EEXIST;
 
-	writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
-}
-
-static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
-{
-	arm_smmu_write_s2cr(smmu, idx);
-	if (smmu->smrs)
-		arm_smmu_write_smr(smmu, idx);
-}
-
-static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
-{
-	struct arm_smmu_smr *smrs = smmu->smrs;
-	int i, free_idx = -ENOSPC;
-
-	/* Stream indexing is blissfully easy */
-	if (!smrs)
-		return id;
-
-	/* Validating SMRs is... less so */
-	for (i = 0; i < smmu->num_mapping_groups; ++i) {
-		if (!smrs[i].valid) {
-			/*
-			 * Note the first free entry we come across, which
-			 * we'll claim in the end if nothing else matches.
-			 */
-			if (free_idx < 0)
-				free_idx = i;
-			continue;
-		}
-		/*
-		 * If the new entry is _entirely_ matched by an existing entry,
-		 * then reuse that, with the guarantee that there also cannot
-		 * be any subsequent conflicting entries. In normal use we'd
-		 * expect simply identical entries for this case, but there's
-		 * no harm in accommodating the generalisation.
-		 */
-		if ((mask & smrs[i].mask) == mask &&
-		    !((id ^ smrs[i].id) & ~smrs[i].mask))
-			return i;
-		/*
-		 * If the new entry has any other overlap with an existing one,
-		 * though, then there always exists at least one stream ID
-		 * which would cause a conflict, and we can't allow that risk.
-		 */
-		if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
-			return -EINVAL;
+	smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
+	if (!smrs) {
+		dev_err(smmu->dev, "failed to allocate %d SMRs\n",
+			cfg->num_streamids);
+		return -ENOMEM;
 	}
 
-	return free_idx;
-}
-
-static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
-{
-	if (--smmu->s2crs[idx].count)
-		return false;
-
-	smmu->s2crs[idx] = s2cr_init_val;
-	if (smmu->smrs)
-		smmu->smrs[idx].valid = false;
-
-	return true;
-}
-
-static int arm_smmu_master_alloc_smes(struct device *dev)
-{
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
-	struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
-	struct arm_smmu_device *smmu = cfg->smmu;
-	struct arm_smmu_smr *smrs = smmu->smrs;
-	struct iommu_group *group;
-	int i, idx, ret;
-
-	mutex_lock(&smmu->stream_map_mutex);
-	/* Figure out a viable stream map entry allocation */
-	for_each_cfg_sme(fwspec, i, idx) {
-		u16 sid = fwspec->ids[i];
-		u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
-
-		if (idx != INVALID_SMENDX) {
-			ret = -EEXIST;
-			goto out_err;
+	/* Allocate the SMRs on the SMMU */
+	for (i = 0; i < cfg->num_streamids; ++i) {
+		int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
+						  smmu->num_mapping_groups);
+		if (idx < 0) {
+			dev_err(smmu->dev, "failed to allocate free SMR\n");
+			goto err_free_smrs;
 		}
 
-		ret = arm_smmu_find_sme(smmu, sid, mask);
-		if (ret < 0)
-			goto out_err;
-
-		idx = ret;
-		if (smrs && smmu->s2crs[idx].count == 0) {
-			smrs[idx].id = sid;
-			smrs[idx].mask = mask;
-			smrs[idx].valid = true;
-		}
-		smmu->s2crs[idx].count++;
-		cfg->smendx[i] = (s16)idx;
+		smrs[i] = (struct arm_smmu_smr) {
+			.idx	= idx,
+			.mask	= 0, /* We don't currently share SMRs */
+			.id	= cfg->streamids[i],
+		};
 	}
 
-	group = iommu_group_get_for_dev(dev);
-	if (!group)
-		group = ERR_PTR(-ENOMEM);
-	if (IS_ERR(group)) {
-		ret = PTR_ERR(group);
-		goto out_err;
-	}
-	iommu_group_put(group);
-
 	/* It worked! Now, poke the actual hardware */
-	for_each_cfg_sme(fwspec, i, idx) {
-		arm_smmu_write_sme(smmu, idx);
-		smmu->s2crs[idx].group = group;
+	for (i = 0; i < cfg->num_streamids; ++i) {
+		u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
+			  smrs[i].mask << SMR_MASK_SHIFT;
+		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
 	}
 
-	mutex_unlock(&smmu->stream_map_mutex);
+	cfg->smrs = smrs;
 	return 0;
 
-out_err:
-	while (i--) {
-		arm_smmu_free_sme(smmu, cfg->smendx[i]);
-		cfg->smendx[i] = INVALID_SMENDX;
-	}
-	mutex_unlock(&smmu->stream_map_mutex);
-	return ret;
+err_free_smrs:
+	while (--i >= 0)
+		__arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
+	kfree(smrs);
+	return -ENOSPC;
 }
 
-static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
+static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
+				      struct arm_smmu_master_cfg *cfg)
 {
-	struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
-	struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
-	int i, idx;
+	int i;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+	struct arm_smmu_smr *smrs = cfg->smrs;
 
-	mutex_lock(&smmu->stream_map_mutex);
-	for_each_cfg_sme(fwspec, i, idx) {
-		if (arm_smmu_free_sme(smmu, idx))
-			arm_smmu_write_sme(smmu, idx);
-		cfg->smendx[i] = INVALID_SMENDX;
+	if (!smrs)
+		return;
+
+	/* Invalidate the SMRs before freeing back to the allocator */
+	for (i = 0; i < cfg->num_streamids; ++i) {
+		u8 idx = smrs[i].idx;
+
+		writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
+		__arm_smmu_free_bitmap(smmu->smr_map, idx);
 	}
-	mutex_unlock(&smmu->stream_map_mutex);
+
+	cfg->smrs = NULL;
+	kfree(smrs);
 }
 
 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
-				      struct iommu_fwspec *fwspec)
+				      struct arm_smmu_master_cfg *cfg)
 {
+	int i, ret;
 	struct arm_smmu_device *smmu = smmu_domain->smmu;
-	struct arm_smmu_s2cr *s2cr = smmu->s2crs;
-	enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
-	u8 cbndx = smmu_domain->cfg.cbndx;
-	int i, idx;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 
-	for_each_cfg_sme(fwspec, i, idx) {
-		if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
-			continue;
+	/*
+	 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
+	 * for all devices behind the SMMU. Note that we need to take
+	 * care configuring SMRs for devices both a platform_device and
+	 * and a PCI device (i.e. a PCI host controller)
+	 */
+	if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
+		return 0;
 
-		s2cr[idx].type = type;
-		s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
-		s2cr[idx].cbndx = cbndx;
-		arm_smmu_write_s2cr(smmu, idx);
+	/* Devices in an IOMMU group may already be configured */
+	ret = arm_smmu_master_configure_smrs(smmu, cfg);
+	if (ret)
+		return ret == -EEXIST ? 0 : ret;
+
+	for (i = 0; i < cfg->num_streamids; ++i) {
+		u32 idx, s2cr;
+
+		idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+		s2cr = S2CR_TYPE_TRANS |
+		       (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
+		writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
 	}
+
+	return 0;
+}
+
+static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
+					  struct arm_smmu_master_cfg *cfg)
+{
+	int i;
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+
+	/* An IOMMU group is torn down by the first device to be removed */
+	if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
+		return;
+
+	/*
+	 * We *must* clear the S2CR first, because freeing the SMR means
+	 * that it can be re-allocated immediately.
+	 */
+	for (i = 0; i < cfg->num_streamids; ++i) {
+		u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
+		u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
+
+		writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
+	}
+
+	arm_smmu_master_free_smrs(smmu, cfg);
+}
+
+static void arm_smmu_detach_dev(struct iommu_domain *domain,
+				struct device *dev)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	struct arm_smmu_master_cfg *cfg;
+	int dynamic = smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC);
+	int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
+
+	if (dynamic)
+		return;
+
+	cfg = find_smmu_master_cfg(dev);
+	if (!cfg)
+		return;
+
+	if (!smmu) {
+		dev_err(dev, "Domain not attached; cannot detach!\n");
+		return;
+	}
+
+	dev->archdata.iommu = NULL;
+	arm_smmu_domain_remove_master(smmu_domain, cfg);
+
+	/* Remove additional vote for atomic power */
+	if (atomic_domain) {
+		WARN_ON(arm_smmu_power_on_atomic(smmu->pwr));
+		arm_smmu_power_off(smmu->pwr);
+	}
+}
+
+static int arm_smmu_assign_table(struct arm_smmu_domain *smmu_domain)
+{
+	int ret = 0;
+	int dest_vmids[2] = {VMID_HLOS, smmu_domain->secure_vmid};
+	int dest_perms[2] = {PERM_READ | PERM_WRITE, PERM_READ};
+	int source_vmid = VMID_HLOS;
+	struct arm_smmu_pte_info *pte_info, *temp;
+
+	if (!arm_smmu_is_domain_secure(smmu_domain))
+		return ret;
+
+	list_for_each_entry(pte_info, &smmu_domain->pte_info_list, entry) {
+		ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
+				      PAGE_SIZE, &source_vmid, 1,
+				      dest_vmids, dest_perms, 2);
+		if (WARN_ON(ret))
+			break;
+	}
+
+	list_for_each_entry_safe(pte_info, temp, &smmu_domain->pte_info_list,
+								entry) {
+		list_del(&pte_info->entry);
+		kfree(pte_info);
+	}
+	return ret;
+}
+
+static void arm_smmu_unassign_table(struct arm_smmu_domain *smmu_domain)
+{
+	int ret;
+	int dest_vmids = VMID_HLOS;
+	int dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
+	int source_vmlist[2] = {VMID_HLOS, smmu_domain->secure_vmid};
+	struct arm_smmu_pte_info *pte_info, *temp;
+
+	if (!arm_smmu_is_domain_secure(smmu_domain))
+		return;
+
+	list_for_each_entry(pte_info, &smmu_domain->unassign_list, entry) {
+		ret = hyp_assign_phys(virt_to_phys(pte_info->virt_addr),
+				      PAGE_SIZE, source_vmlist, 2,
+				      &dest_vmids, &dest_perms, 1);
+		if (WARN_ON(ret))
+			break;
+		free_pages_exact(pte_info->virt_addr, pte_info->size);
+	}
+
+	list_for_each_entry_safe(pte_info, temp, &smmu_domain->unassign_list,
+				 entry) {
+		list_del(&pte_info->entry);
+		kfree(pte_info);
+	}
+}
+
+static void arm_smmu_unprepare_pgtable(void *cookie, void *addr, size_t size)
+{
+	struct arm_smmu_domain *smmu_domain = cookie;
+	struct arm_smmu_pte_info *pte_info;
+
+	BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
+
+	pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
+	if (!pte_info)
+		return;
+
+	pte_info->virt_addr = addr;
+	pte_info->size = size;
+	list_add_tail(&pte_info->entry, &smmu_domain->unassign_list);
+}
+
+static int arm_smmu_prepare_pgtable(void *addr, void *cookie)
+{
+	struct arm_smmu_domain *smmu_domain = cookie;
+	struct arm_smmu_pte_info *pte_info;
+
+	BUG_ON(!arm_smmu_is_domain_secure(smmu_domain));
+
+	pte_info = kzalloc(sizeof(struct arm_smmu_pte_info), GFP_ATOMIC);
+	if (!pte_info)
+		return -ENOMEM;
+	pte_info->virt_addr = addr;
+	list_add_tail(&pte_info->entry, &smmu_domain->pte_info_list);
 	return 0;
 }
 
 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
 {
 	int ret;
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
-	struct arm_smmu_device *smmu;
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu;
+	struct arm_smmu_master_cfg *cfg;
+	int atomic_domain = smmu_domain->attributes & (1 << DOMAIN_ATTR_ATOMIC);
 
-	if (!fwspec || fwspec->ops != &arm_smmu_ops) {
+	smmu = find_smmu_for_device(dev);
+	if (!smmu) {
 		dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
 		return -ENXIO;
 	}
 
-	/*
-	 * FIXME: The arch/arm DMA API code tries to attach devices to its own
-	 * domains between of_xlate() and add_device() - we have no way to cope
-	 * with that, so until ARM gets converted to rely on groups and default
-	 * domains, just say no (but more politely than by dereferencing NULL).
-	 * This should be at least a WARN_ON once that's sorted.
-	 */
-	if (!fwspec->iommu_priv)
-		return -ENODEV;
+	/* Enable Clocks and Power */
+	ret = arm_smmu_power_on(smmu->pwr);
+	if (ret)
+		return ret;
 
-	smmu = fwspec_smmu(fwspec);
 	/* Ensure that the domain is finalised */
 	ret = arm_smmu_init_domain_context(domain, smmu);
 	if (ret < 0)
-		return ret;
+		goto out_power_off;
+
+	/* Do not modify the SIDs, HW is still running */
+	if (is_dynamic_domain(domain)) {
+		ret = 0;
+		goto out_power_off;
+	}
 
 	/*
 	 * Sanity check the domain. We don't support domains across
@@ -1254,11 +2165,38 @@
 		dev_err(dev,
 			"cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
 			dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
-		return -EINVAL;
+		ret = -EINVAL;
+		goto out_power_off;
 	}
 
 	/* Looks ok, so add the device to the domain */
-	return arm_smmu_domain_add_master(smmu_domain, fwspec);
+	cfg = find_smmu_master_cfg(dev);
+	if (!cfg) {
+		ret = -ENODEV;
+		goto out_power_off;
+	}
+
+	/* Detach the dev from its current domain */
+	if (dev->archdata.iommu)
+		arm_smmu_detach_dev(dev->archdata.iommu, dev);
+
+	ret = arm_smmu_domain_add_master(smmu_domain, cfg);
+	if (!ret)
+		dev->archdata.iommu = domain;
+
+out_power_off:
+	/*
+	 * Keep an additional vote for non-atomic power until domain is
+	 * detached
+	 */
+	if (!ret && atomic_domain) {
+		WARN_ON(arm_smmu_power_on(smmu->pwr));
+		arm_smmu_power_off_atomic(smmu->pwr);
+	}
+
+	arm_smmu_power_off(smmu->pwr);
+
+	return ret;
 }
 
 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
@@ -1272,9 +2210,15 @@
 	if (!ops)
 		return -ENODEV;
 
+	arm_smmu_secure_domain_lock(smmu_domain);
+
 	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
 	ret = ops->map(ops, iova, paddr, size, prot);
 	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+
+	arm_smmu_assign_table(smmu_domain);
+	arm_smmu_secure_domain_unlock(smmu_domain);
+
 	return ret;
 }
 
@@ -1289,13 +2233,59 @@
 	if (!ops)
 		return 0;
 
+	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
+	if (ret)
+		return ret;
+
+	arm_smmu_secure_domain_lock(smmu_domain);
+
 	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
 	ret = ops->unmap(ops, iova, size);
 	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+
+	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+	/*
+	 * While splitting up block mappings, we might allocate page table
+	 * memory during unmap, so the vmids needs to be assigned to the
+	 * memory here as well.
+	 */
+	arm_smmu_assign_table(smmu_domain);
+	/* Also unassign any pages that were free'd during unmap */
+	arm_smmu_unassign_table(smmu_domain);
+	arm_smmu_secure_domain_unlock(smmu_domain);
 	return ret;
 }
 
-static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+static size_t arm_smmu_map_sg(struct iommu_domain *domain, unsigned long iova,
+			   struct scatterlist *sg, unsigned int nents, int prot)
+{
+	int ret;
+	size_t size;
+	unsigned long flags;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
+
+	if (!ops)
+		return -ENODEV;
+
+	ret = arm_smmu_domain_power_on(domain, smmu_domain->smmu);
+	if (ret)
+		return ret;
+
+	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+	ret = ops->map_sg(ops, iova, sg, nents, prot, &size);
+	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+
+	if (!ret)
+		arm_smmu_unmap(domain, iova, size);
+
+	arm_smmu_domain_power_off(domain, smmu_domain->smmu);
+	arm_smmu_assign_table(smmu_domain);
+
+	return ret;
+}
+
+static phys_addr_t __arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
 					      dma_addr_t iova)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
@@ -1319,20 +2309,24 @@
 
 	if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
 				      !(tmp & ATSR_ACTIVE), 5, 50)) {
+		phys = ops->iova_to_phys(ops, iova);
 		dev_err(dev,
-			"iova to phys timed out on %pad. Falling back to software table walk.\n",
-			&iova);
-		return ops->iova_to_phys(ops, iova);
+			"iova to phys timed out on %pad. software table walk result=%pa.\n",
+			&iova, &phys);
+		phys = 0;
+		return phys;
 	}
 
 	phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
 	if (phys & CB_PAR_F) {
 		dev_err(dev, "translation fault!\n");
 		dev_err(dev, "PAR = 0x%llx\n", phys);
-		return 0;
+		phys = 0;
+	} else {
+		phys = (phys & (PHYS_MASK & ~0xfffULL)) | (iova & 0xfff);
 	}
 
-	return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
+	return phys;
 }
 
 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
@@ -1347,12 +2341,33 @@
 		return 0;
 
 	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+	ret = ops->iova_to_phys(ops, iova);
+	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+
+	return ret;
+}
+
+/*
+ * This function can sleep, and cannot be called from atomic context. Will
+ * power on register block if required. This restriction does not apply to the
+ * original iova_to_phys() op.
+ */
+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
+					dma_addr_t iova)
+{
+	phys_addr_t ret = 0;
+	unsigned long flags;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+	if (smmu_domain->smmu->arch_ops &&
+	    smmu_domain->smmu->arch_ops->iova_to_phys_hard)
+		return smmu_domain->smmu->arch_ops->iova_to_phys_hard(
+						domain, iova);
+
+	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
 	if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
-			smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
-		ret = arm_smmu_iova_to_phys_hard(domain, iova);
-	} else {
-		ret = ops->iova_to_phys(ops, iova);
-	}
+			smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
+		ret = __arm_smmu_iova_to_phys_hard(domain, iova);
 
 	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
 
@@ -1377,114 +2392,111 @@
 	}
 }
 
-static int arm_smmu_match_node(struct device *dev, void *data)
+static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
 {
-	return dev->of_node == data;
+	*((u16 *)data) = alias;
+	return 0; /* Continue walking */
 }
 
-static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
+static void __arm_smmu_release_pci_iommudata(void *data)
 {
-	struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
-						np, arm_smmu_match_node);
-	put_device(dev);
-	return dev ? dev_get_drvdata(dev) : NULL;
+	kfree(data);
+}
+
+static int arm_smmu_init_pci_device(struct pci_dev *pdev,
+				    struct iommu_group *group)
+{
+	struct arm_smmu_master_cfg *cfg;
+	u16 sid;
+	int i;
+
+	cfg = iommu_group_get_iommudata(group);
+	if (!cfg) {
+		cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+		if (!cfg)
+			return -ENOMEM;
+
+		iommu_group_set_iommudata(group, cfg,
+					  __arm_smmu_release_pci_iommudata);
+	}
+
+	if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
+		return -ENOSPC;
+
+	/*
+	 * Assume Stream ID == Requester ID for now.
+	 * We need a way to describe the ID mappings in FDT.
+	 */
+	pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
+	for (i = 0; i < cfg->num_streamids; ++i)
+		if (cfg->streamids[i] == sid)
+			break;
+
+	/* Avoid duplicate SIDs, as this can lead to SMR conflicts */
+	if (i == cfg->num_streamids)
+		cfg->streamids[cfg->num_streamids++] = sid;
+
+	return 0;
+}
+
+static int arm_smmu_init_platform_device(struct device *dev,
+					 struct iommu_group *group)
+{
+	struct arm_smmu_device *smmu = find_smmu_for_device(dev);
+	struct arm_smmu_master *master;
+
+	if (!smmu)
+		return -ENODEV;
+
+	master = find_smmu_master(smmu, dev->of_node);
+	if (!master)
+		return -ENODEV;
+
+	iommu_group_set_iommudata(group, &master->cfg, NULL);
+
+	return 0;
 }
 
 static int arm_smmu_add_device(struct device *dev)
 {
-	struct arm_smmu_device *smmu;
-	struct arm_smmu_master_cfg *cfg;
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
-	int i, ret;
+	struct iommu_group *group;
 
-	if (using_legacy_binding) {
-		ret = arm_smmu_register_legacy_master(dev, &smmu);
-		fwspec = dev->iommu_fwspec;
-		if (ret)
-			goto out_free;
-	} else if (fwspec && fwspec->ops == &arm_smmu_ops) {
-		smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
-	} else {
-		return -ENODEV;
-	}
+	group = iommu_group_get_for_dev(dev);
+	if (IS_ERR(group))
+		return PTR_ERR(group);
 
-	ret = -EINVAL;
-	for (i = 0; i < fwspec->num_ids; i++) {
-		u16 sid = fwspec->ids[i];
-		u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
-
-		if (sid & ~smmu->streamid_mask) {
-			dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
-				sid, smmu->streamid_mask);
-			goto out_free;
-		}
-		if (mask & ~smmu->smr_mask_mask) {
-			dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
-				sid, smmu->smr_mask_mask);
-			goto out_free;
-		}
-	}
-
-	ret = -ENOMEM;
-	cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
-		      GFP_KERNEL);
-	if (!cfg)
-		goto out_free;
-
-	cfg->smmu = smmu;
-	fwspec->iommu_priv = cfg;
-	while (i--)
-		cfg->smendx[i] = INVALID_SMENDX;
-
-	ret = arm_smmu_master_alloc_smes(dev);
-	if (ret)
-		goto out_free;
-
+	iommu_group_put(group);
 	return 0;
-
-out_free:
-	if (fwspec)
-		kfree(fwspec->iommu_priv);
-	iommu_fwspec_free(dev);
-	return ret;
 }
 
 static void arm_smmu_remove_device(struct device *dev)
 {
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
-
-	if (!fwspec || fwspec->ops != &arm_smmu_ops)
-		return;
-
-	arm_smmu_master_free_smes(fwspec);
 	iommu_group_remove_device(dev);
-	kfree(fwspec->iommu_priv);
-	iommu_fwspec_free(dev);
 }
 
 static struct iommu_group *arm_smmu_device_group(struct device *dev)
 {
-	struct iommu_fwspec *fwspec = dev->iommu_fwspec;
-	struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
-	struct iommu_group *group = NULL;
-	int i, idx;
-
-	for_each_cfg_sme(fwspec, i, idx) {
-		if (group && smmu->s2crs[idx].group &&
-		    group != smmu->s2crs[idx].group)
-			return ERR_PTR(-EINVAL);
-
-		group = smmu->s2crs[idx].group;
-	}
-
-	if (group)
-		return group;
+	struct iommu_group *group;
+	int ret;
 
 	if (dev_is_pci(dev))
 		group = pci_device_group(dev);
 	else
 		group = generic_device_group(dev);
 
+	if (IS_ERR_OR_NULL(group))
+		return group;
+
+	if (dev_is_pci(dev))
+		ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
+	else
+		ret = arm_smmu_init_platform_device(dev, group);
+
+	if (ret) {
+		iommu_group_put(group);
+		group = ERR_PTR(ret);
+	}
+
 	return group;
 }
 
@@ -1492,14 +2504,94 @@
 				    enum iommu_attr attr, void *data)
 {
 	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	int ret = 0;
 
 	switch (attr) {
 	case DOMAIN_ATTR_NESTING:
 		*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
 		return 0;
+	case DOMAIN_ATTR_PT_BASE_ADDR:
+		*((phys_addr_t *)data) =
+			smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
+		return 0;
+	case DOMAIN_ATTR_CONTEXT_BANK:
+		/* context bank index isn't valid until we are attached */
+		if (smmu_domain->smmu == NULL)
+			return -ENODEV;
+
+		*((unsigned int *) data) = smmu_domain->cfg.cbndx;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_TTBR0: {
+		u64 val;
+		struct arm_smmu_device *smmu = smmu_domain->smmu;
+		/* not valid until we are attached */
+		if (smmu == NULL)
+			return -ENODEV;
+
+		val = smmu_domain->pgtbl_cfg.arm_lpae_s1_cfg.ttbr[0];
+		if (smmu_domain->cfg.cbar != CBAR_TYPE_S2_TRANS)
+			val |= (u64)ARM_SMMU_CB_ASID(smmu, &smmu_domain->cfg)
+					<< (TTBRn_ASID_SHIFT);
+		*((u64 *)data) = val;
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_CONTEXTIDR:
+		/* not valid until attached */
+		if (smmu_domain->smmu == NULL)
+			return -ENODEV;
+		*((u32 *)data) = smmu_domain->cfg.procid;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PROCID:
+		*((u32 *)data) = smmu_domain->cfg.procid;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_DYNAMIC:
+		*((int *)data) = !!(smmu_domain->attributes
+					& (1 << DOMAIN_ATTR_DYNAMIC));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_NON_FATAL_FAULTS:
+		*((int *)data) = !!(smmu_domain->attributes
+				    & (1 << DOMAIN_ATTR_NON_FATAL_FAULTS));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_S1_BYPASS:
+		*((int *)data) = !!(smmu_domain->attributes
+				    & (1 << DOMAIN_ATTR_S1_BYPASS));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_SECURE_VMID:
+		*((int *)data) = smmu_domain->secure_vmid;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_PGTBL_INFO: {
+		struct iommu_pgtbl_info *info = data;
+
+		if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST))) {
+			ret = -ENODEV;
+			break;
+		}
+		info->pmds = smmu_domain->pgtbl_cfg.av8l_fast_cfg.pmds;
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_FAST:
+		*((int *)data) = !!(smmu_domain->attributes
+					& (1 << DOMAIN_ATTR_FAST));
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_USE_UPSTREAM_HINT:
+		*((int *)data) = !!(smmu_domain->attributes &
+				   (1 << DOMAIN_ATTR_USE_UPSTREAM_HINT));
+		ret = 0;
+		break;
 	default:
 		return -ENODEV;
 	}
+	return ret;
 }
 
 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
@@ -1523,6 +2615,117 @@
 			smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
 
 		break;
+	case DOMAIN_ATTR_PROCID:
+		if (smmu_domain->smmu != NULL) {
+			dev_err(smmu_domain->smmu->dev,
+			  "cannot change procid attribute while attached\n");
+			ret = -EBUSY;
+			break;
+		}
+		smmu_domain->cfg.procid = *((u32 *)data);
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_DYNAMIC: {
+		int dynamic = *((int *)data);
+
+		if (smmu_domain->smmu != NULL) {
+			dev_err(smmu_domain->smmu->dev,
+			  "cannot change dynamic attribute while attached\n");
+			ret = -EBUSY;
+			break;
+		}
+
+		if (dynamic)
+			smmu_domain->attributes |= 1 << DOMAIN_ATTR_DYNAMIC;
+		else
+			smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_DYNAMIC);
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_CONTEXT_BANK:
+		/* context bank can't be set while attached */
+		if (smmu_domain->smmu != NULL) {
+			ret = -EBUSY;
+			break;
+		}
+		/* ... and it can only be set for dynamic contexts. */
+		if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_DYNAMIC))) {
+			ret = -EINVAL;
+			break;
+		}
+
+		/* this will be validated during attach */
+		smmu_domain->cfg.cbndx = *((unsigned int *)data);
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_NON_FATAL_FAULTS: {
+		u32 non_fatal_faults = *((int *)data);
+
+		if (non_fatal_faults)
+			smmu_domain->attributes |=
+					1 << DOMAIN_ATTR_NON_FATAL_FAULTS;
+		else
+			smmu_domain->attributes &=
+					~(1 << DOMAIN_ATTR_NON_FATAL_FAULTS);
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_S1_BYPASS: {
+		int bypass = *((int *)data);
+
+		/* bypass can't be changed while attached */
+		if (smmu_domain->smmu != NULL) {
+			ret = -EBUSY;
+			break;
+		}
+		if (bypass)
+			smmu_domain->attributes |= 1 << DOMAIN_ATTR_S1_BYPASS;
+		else
+			smmu_domain->attributes &=
+					~(1 << DOMAIN_ATTR_S1_BYPASS);
+
+		ret = 0;
+		break;
+	}
+	case DOMAIN_ATTR_ATOMIC:
+	{
+		int atomic_ctx = *((int *)data);
+
+		/* can't be changed while attached */
+		if (smmu_domain->smmu != NULL) {
+			ret = -EBUSY;
+			break;
+		}
+		if (atomic_ctx)
+			smmu_domain->attributes |= (1 << DOMAIN_ATTR_ATOMIC);
+		else
+			smmu_domain->attributes &= ~(1 << DOMAIN_ATTR_ATOMIC);
+		break;
+	}
+	case DOMAIN_ATTR_SECURE_VMID:
+		if (smmu_domain->secure_vmid != VMID_INVAL) {
+			ret = -ENODEV;
+			WARN(1, "secure vmid already set!");
+			break;
+		}
+		smmu_domain->secure_vmid = *((int *)data);
+		break;
+	case DOMAIN_ATTR_FAST:
+		if (*((int *)data))
+			smmu_domain->attributes |= 1 << DOMAIN_ATTR_FAST;
+		ret = 0;
+		break;
+	case DOMAIN_ATTR_USE_UPSTREAM_HINT:
+		/* can't be changed while attached */
+		if (smmu_domain->smmu != NULL) {
+			ret = -EBUSY;
+			break;
+		}
+		if (*((int *)data))
+			smmu_domain->attributes |=
+				1 << DOMAIN_ATTR_USE_UPSTREAM_HINT;
+		ret = 0;
+		break;
 	default:
 		ret = -ENODEV;
 	}
@@ -1532,17 +2735,109 @@
 	return ret;
 }
 
-static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
+static void arm_smmu_trigger_fault(struct iommu_domain *domain,
+					unsigned long flags)
 {
-	u32 fwid = 0;
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	struct arm_smmu_device *smmu;
+	void __iomem *cb_base;
 
-	if (args->args_count > 0)
-		fwid |= (u16)args->args[0];
+	if (!smmu_domain->smmu) {
+		pr_err("Can't trigger faults on non-attached domains\n");
+		return;
+	}
 
-	if (args->args_count > 1)
-		fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
+	smmu = smmu_domain->smmu;
+	if (arm_smmu_power_on(smmu->pwr))
+		return;
 
-	return iommu_fwspec_add_ids(dev, &fwid, 1);
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	dev_err(smmu->dev, "Writing 0x%lx to FSRRESTORE on cb %d\n",
+		flags, cfg->cbndx);
+	writel_relaxed(flags, cb_base + ARM_SMMU_CB_FSRRESTORE);
+	/* give the interrupt time to fire... */
+	msleep(1000);
+
+	arm_smmu_power_off(smmu->pwr);
+}
+
+static unsigned long arm_smmu_reg_read(struct iommu_domain *domain,
+				       unsigned long offset)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	void __iomem *cb_base;
+	unsigned long val;
+
+	if (offset >= SZ_4K) {
+		pr_err("Invalid offset: 0x%lx\n", offset);
+		return 0;
+	}
+
+	smmu = smmu_domain->smmu;
+	if (!smmu) {
+		WARN(1, "Can't read registers of a detached domain\n");
+		val = 0;
+		return val;
+	}
+
+	if (arm_smmu_power_on(smmu->pwr))
+		return 0;
+
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	val = readl_relaxed(cb_base + offset);
+
+	arm_smmu_power_off(smmu->pwr);
+	return val;
+}
+
+static void arm_smmu_reg_write(struct iommu_domain *domain,
+			       unsigned long offset, unsigned long val)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu;
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	void __iomem *cb_base;
+
+	if (offset >= SZ_4K) {
+		pr_err("Invalid offset: 0x%lx\n", offset);
+		return;
+	}
+
+	smmu = smmu_domain->smmu;
+	if (!smmu) {
+		WARN(1, "Can't read registers of a detached domain\n");
+		return;
+	}
+
+	if (arm_smmu_power_on(smmu->pwr))
+		return;
+
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+	writel_relaxed(val, cb_base + offset);
+
+	arm_smmu_power_off(smmu->pwr);
+}
+
+static void arm_smmu_tlbi_domain(struct iommu_domain *domain)
+{
+	arm_smmu_tlb_inv_context(to_smmu_domain(domain));
+}
+
+static int arm_smmu_enable_config_clocks(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+	return arm_smmu_power_on(smmu_domain->smmu->pwr);
+}
+
+static void arm_smmu_disable_config_clocks(struct iommu_domain *domain)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+
+	arm_smmu_power_off(smmu_domain->smmu->pwr);
 }
 
 static struct iommu_ops arm_smmu_ops = {
@@ -1550,36 +2845,205 @@
 	.domain_alloc		= arm_smmu_domain_alloc,
 	.domain_free		= arm_smmu_domain_free,
 	.attach_dev		= arm_smmu_attach_dev,
+	.detach_dev		= arm_smmu_detach_dev,
 	.map			= arm_smmu_map,
 	.unmap			= arm_smmu_unmap,
-	.map_sg			= default_iommu_map_sg,
+	.map_sg			= arm_smmu_map_sg,
 	.iova_to_phys		= arm_smmu_iova_to_phys,
+	.iova_to_phys_hard	= arm_smmu_iova_to_phys_hard,
 	.add_device		= arm_smmu_add_device,
 	.remove_device		= arm_smmu_remove_device,
 	.device_group		= arm_smmu_device_group,
 	.domain_get_attr	= arm_smmu_domain_get_attr,
 	.domain_set_attr	= arm_smmu_domain_set_attr,
-	.of_xlate		= arm_smmu_of_xlate,
 	.pgsize_bitmap		= -1UL, /* Restricted during device attach */
+	.trigger_fault		= arm_smmu_trigger_fault,
+	.reg_read		= arm_smmu_reg_read,
+	.reg_write		= arm_smmu_reg_write,
+	.tlbi_domain		= arm_smmu_tlbi_domain,
+	.enable_config_clocks	= arm_smmu_enable_config_clocks,
+	.disable_config_clocks	= arm_smmu_disable_config_clocks,
 };
 
-static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
-{
-	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
-	void __iomem *cb_base;
-	int i;
-	u32 reg, major;
+#define IMPL_DEF1_MICRO_MMU_CTRL	0
+#define MICRO_MMU_CTRL_LOCAL_HALT_REQ	(1 << 2)
+#define MICRO_MMU_CTRL_IDLE		(1 << 3)
 
-	/* clear global FSR */
-	reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
-	writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
+/* Definitions for implementation-defined registers */
+#define ACTLR_QCOM_OSH_SHIFT		28
+#define ACTLR_QCOM_OSH			1
+
+#define ACTLR_QCOM_ISH_SHIFT		29
+#define ACTLR_QCOM_ISH			1
+
+#define ACTLR_QCOM_NSH_SHIFT		30
+#define ACTLR_QCOM_NSH			1
+
+static int qsmmuv2_wait_for_halt(struct arm_smmu_device *smmu)
+{
+	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
+	u32 tmp;
+
+	if (readl_poll_timeout_atomic(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL,
+					tmp, (tmp & MICRO_MMU_CTRL_IDLE),
+					0, 30000)) {
+		dev_err(smmu->dev, "Couldn't halt SMMU!\n");
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static int __qsmmuv2_halt(struct arm_smmu_device *smmu, bool wait)
+{
+	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
+	u32 reg;
+
+	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+	reg |= MICRO_MMU_CTRL_LOCAL_HALT_REQ;
+	writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+
+	return wait ? qsmmuv2_wait_for_halt(smmu) : 0;
+}
+
+static int qsmmuv2_halt(struct arm_smmu_device *smmu)
+{
+	return __qsmmuv2_halt(smmu, true);
+}
+
+static int qsmmuv2_halt_nowait(struct arm_smmu_device *smmu)
+{
+	return __qsmmuv2_halt(smmu, false);
+}
+
+static void qsmmuv2_resume(struct arm_smmu_device *smmu)
+{
+	void __iomem *impl_def1_base = ARM_SMMU_IMPL_DEF1(smmu);
+	u32 reg;
+
+	reg = readl_relaxed(impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+	reg &= ~MICRO_MMU_CTRL_LOCAL_HALT_REQ;
+	writel_relaxed(reg, impl_def1_base + IMPL_DEF1_MICRO_MMU_CTRL);
+}
+
+static void qsmmuv2_device_reset(struct arm_smmu_device *smmu)
+{
+	int i;
+	u32 val;
+	struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
+	void __iomem *cb_base;
 
 	/*
-	 * Reset stream mapping groups: Initial values mark all SMRn as
-	 * invalid and all S2CRn as bypass unless overridden.
+	 * SCTLR.M must be disabled here per ARM SMMUv2 spec
+	 * to prevent table walks with an inconsistent state.
 	 */
-	for (i = 0; i < smmu->num_mapping_groups; ++i)
-		arm_smmu_write_sme(smmu, i);
+	for (i = 0; i < smmu->num_context_banks; ++i) {
+		cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
+		val = ACTLR_QCOM_ISH << ACTLR_QCOM_ISH_SHIFT |
+		ACTLR_QCOM_OSH << ACTLR_QCOM_OSH_SHIFT |
+		ACTLR_QCOM_NSH << ACTLR_QCOM_NSH_SHIFT;
+		writel_relaxed(val, cb_base + ARM_SMMU_CB_ACTLR);
+	}
+
+	/* Program implementation defined registers */
+	qsmmuv2_halt(smmu);
+	for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
+		writel_relaxed(regs[i].value,
+			ARM_SMMU_GR0(smmu) + regs[i].offset);
+	qsmmuv2_resume(smmu);
+}
+
+static phys_addr_t __qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
+					      dma_addr_t iova, bool halt)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_device *smmu = smmu_domain->smmu;
+	int ret;
+	phys_addr_t phys = 0;
+	unsigned long flags;
+
+	ret = arm_smmu_power_on(smmu_domain->smmu->pwr);
+	if (ret)
+		return 0;
+
+	if (halt) {
+		ret = qsmmuv2_halt(smmu);
+		if (ret)
+			goto out_power_off;
+	}
+
+	spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
+	spin_lock(&smmu->atos_lock);
+	phys = __arm_smmu_iova_to_phys_hard(domain, iova);
+	spin_unlock(&smmu->atos_lock);
+	spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
+
+	if (halt)
+		qsmmuv2_resume(smmu);
+
+out_power_off:
+	arm_smmu_power_off(smmu_domain->smmu->pwr);
+	return phys;
+}
+
+static phys_addr_t qsmmuv2_iova_to_phys_hard(struct iommu_domain *domain,
+					      dma_addr_t iova)
+{
+	return __qsmmuv2_iova_to_phys_hard(domain, iova, true);
+}
+
+static void qsmmuv2_iova_to_phys_fault(
+				struct iommu_domain *domain,
+				dma_addr_t iova, phys_addr_t *phys,
+				phys_addr_t *phys_post_tlbiall)
+{
+	struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+	struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
+	struct arm_smmu_device *smmu;
+	void __iomem *cb_base;
+	u64 sctlr, sctlr_orig;
+	u32 fsr;
+
+	smmu = smmu_domain->smmu;
+	cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
+
+	qsmmuv2_halt_nowait(smmu);
+
+	writel_relaxed(RESUME_TERMINATE, cb_base + ARM_SMMU_CB_RESUME);
+
+	qsmmuv2_wait_for_halt(smmu);
+
+	/* clear FSR to allow ATOS to log any faults */
+	fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
+	writel_relaxed(fsr, cb_base + ARM_SMMU_CB_FSR);
+
+	/* disable stall mode momentarily */
+	sctlr_orig = readl_relaxed(cb_base + ARM_SMMU_CB_SCTLR);
+	sctlr = sctlr_orig & ~SCTLR_CFCFG;
+	writel_relaxed(sctlr, cb_base + ARM_SMMU_CB_SCTLR);
+
+	*phys = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
+	arm_smmu_tlb_inv_context(smmu_domain);
+	*phys_post_tlbiall = __qsmmuv2_iova_to_phys_hard(domain, iova, false);
+
+	/* restore SCTLR */
+	writel_relaxed(sctlr_orig, cb_base + ARM_SMMU_CB_SCTLR);
+
+	qsmmuv2_resume(smmu);
+}
+
+struct arm_smmu_arch_ops qsmmuv2_arch_ops = {
+	.device_reset = qsmmuv2_device_reset,
+	.iova_to_phys_hard = qsmmuv2_iova_to_phys_hard,
+	.iova_to_phys_fault = qsmmuv2_iova_to_phys_fault,
+};
+
+static void arm_smmu_context_bank_reset(struct arm_smmu_device *smmu)
+{
+	int i;
+	u32 reg, major;
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+	void __iomem *cb_base;
 
 	/*
 	 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
@@ -1609,6 +3073,31 @@
 			writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
 		}
 	}
+}
+
+static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
+{
+	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
+	int i = 0;
+	u32 reg;
+
+	/* clear global FSR */
+	reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
+	writel_relaxed(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
+
+	if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
+		/*
+		 * Mark all SMRn as invalid and all S2CRn as bypass unless
+		 * overridden
+		 */
+		reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
+		for (i = 0; i < smmu->num_mapping_groups; ++i) {
+			writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
+			writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
+		}
+
+		arm_smmu_context_bank_reset(smmu);
+	}
 
 	/* Invalidate the TLB, just in case */
 	writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
@@ -1641,6 +3130,9 @@
 	/* Push the button */
 	__arm_smmu_tlb_sync(smmu);
 	writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+
+	/* Manage any implementation defined features */
+	arm_smmu_arch_device_reset(smmu);
 }
 
 static int arm_smmu_id_size_to_bits(int size)
@@ -1662,16 +3154,206 @@
 	}
 }
 
+static int arm_smmu_parse_impl_def_registers(struct arm_smmu_device *smmu)
+{
+	struct device *dev = smmu->dev;
+	int i, ntuples, ret;
+	u32 *tuples;
+	struct arm_smmu_impl_def_reg *regs, *regit;
+
+	if (!of_find_property(dev->of_node, "attach-impl-defs", &ntuples))
+		return 0;
+
+	ntuples /= sizeof(u32);
+	if (ntuples % 2) {
+		dev_err(dev,
+			"Invalid number of attach-impl-defs registers: %d\n",
+			ntuples);
+		return -EINVAL;
+	}
+
+	regs = devm_kmalloc(
+		dev, sizeof(*smmu->impl_def_attach_registers) * ntuples,
+		GFP_KERNEL);
+	if (!regs)
+		return -ENOMEM;
+
+	tuples = devm_kmalloc(dev, sizeof(u32) * ntuples * 2, GFP_KERNEL);
+	if (!tuples)
+		return -ENOMEM;
+
+	ret = of_property_read_u32_array(dev->of_node, "attach-impl-defs",
+					tuples, ntuples);
+	if (ret)
+		return ret;
+
+	for (i = 0, regit = regs; i < ntuples; i += 2, ++regit) {
+		regit->offset = tuples[i];
+		regit->value = tuples[i + 1];
+	}
+
+	devm_kfree(dev, tuples);
+
+	smmu->impl_def_attach_registers = regs;
+	smmu->num_impl_def_attach_registers = ntuples / 2;
+
+	return 0;
+}
+
+
+static int arm_smmu_init_clocks(struct arm_smmu_power_resources *pwr)
+{
+	const char *cname;
+	struct property *prop;
+	int i;
+	struct device *dev = pwr->dev;
+
+	pwr->num_clocks =
+		of_property_count_strings(dev->of_node, "clock-names");
+
+	if (pwr->num_clocks < 1) {
+		pwr->num_clocks = 0;
+		return 0;
+	}
+
+	pwr->clocks = devm_kzalloc(
+		dev, sizeof(*pwr->clocks) * pwr->num_clocks,
+		GFP_KERNEL);
+
+	if (!pwr->clocks)
+		return -ENOMEM;
+
+	i = 0;
+	of_property_for_each_string(dev->of_node, "clock-names",
+				prop, cname) {
+		struct clk *c = devm_clk_get(dev, cname);
+
+		if (IS_ERR(c)) {
+			dev_err(dev, "Couldn't get clock: %s",
+				cname);
+			return PTR_ERR(c);
+		}
+
+		if (clk_get_rate(c) == 0) {
+			long rate = clk_round_rate(c, 1000);
+
+			clk_set_rate(c, rate);
+		}
+
+		pwr->clocks[i] = c;
+
+		++i;
+	}
+	return 0;
+}
+
+static int arm_smmu_init_regulators(struct arm_smmu_power_resources *pwr)
+{
+	const char *cname;
+	struct property *prop;
+	int i, ret = 0;
+	struct device *dev = pwr->dev;
+
+	pwr->num_gdscs =
+		of_property_count_strings(dev->of_node, "qcom,regulator-names");
+
+	if (pwr->num_gdscs < 1) {
+		pwr->num_gdscs = 0;
+		return 0;
+	}
+
+	pwr->gdscs = devm_kzalloc(
+			dev, sizeof(*pwr->gdscs) * pwr->num_gdscs, GFP_KERNEL);
+
+	if (!pwr->gdscs)
+		return -ENOMEM;
+
+	i = 0;
+	of_property_for_each_string(dev->of_node, "qcom,regulator-names",
+				prop, cname)
+		pwr->gdscs[i].supply = cname;
+
+	ret = devm_regulator_bulk_get(dev, pwr->num_gdscs, pwr->gdscs);
+	return ret;
+}
+
+static int arm_smmu_init_bus_scaling(struct arm_smmu_power_resources *pwr)
+{
+	struct device *dev = pwr->dev;
+
+	/* We don't want the bus APIs to print an error message */
+	if (!of_find_property(dev->of_node, "qcom,msm-bus,name", NULL)) {
+		dev_dbg(dev, "No bus scaling info\n");
+		return 0;
+	}
+
+	pwr->bus_dt_data = msm_bus_cl_get_pdata(pwr->pdev);
+	if (!pwr->bus_dt_data) {
+		dev_err(dev, "Unable to read bus-scaling from devicetree\n");
+		return -EINVAL;
+	}
+
+	pwr->bus_client = msm_bus_scale_register_client(pwr->bus_dt_data);
+	if (!pwr->bus_client) {
+		dev_err(dev, "Bus client registration failed\n");
+		msm_bus_cl_clear_pdata(pwr->bus_dt_data);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/*
+ * Cleanup done by devm. Any non-devm resources must clean up themselves.
+ */
+static struct arm_smmu_power_resources *arm_smmu_init_power_resources(
+						struct platform_device *pdev)
+{
+	struct arm_smmu_power_resources *pwr;
+	int ret;
+
+	pwr = devm_kzalloc(&pdev->dev, sizeof(*pwr), GFP_KERNEL);
+	if (!pwr)
+		return ERR_PTR(-ENOMEM);
+
+	pwr->dev = &pdev->dev;
+	pwr->pdev = pdev;
+	mutex_init(&pwr->power_lock);
+	spin_lock_init(&pwr->clock_refs_lock);
+
+	ret = arm_smmu_init_clocks(pwr);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = arm_smmu_init_regulators(pwr);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = arm_smmu_init_bus_scaling(pwr);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return pwr;
+}
+
+/*
+ * Bus APIs are not devm-safe.
+ */
+static void arm_smmu_exit_power_resources(struct arm_smmu_power_resources *pwr)
+{
+	msm_bus_scale_unregister_client(pwr->bus_client);
+	msm_bus_cl_clear_pdata(pwr->bus_dt_data);
+}
+
 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
 {
 	unsigned long size;
 	void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
 	u32 id;
 	bool cttw_dt, cttw_reg;
-	int i;
 
-	dev_notice(smmu->dev, "probing hardware configuration...\n");
-	dev_notice(smmu->dev, "SMMUv%d with:\n",
+	dev_dbg(smmu->dev, "probing hardware configuration...\n");
+	dev_dbg(smmu->dev, "SMMUv%d with:\n",
 			smmu->version == ARM_SMMU_V2 ? 2 : 1);
 
 	/* ID0 */
@@ -1685,17 +3367,17 @@
 
 	if (id & ID0_S1TS) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
-		dev_notice(smmu->dev, "\tstage 1 translation\n");
+		dev_dbg(smmu->dev, "\tstage 1 translation\n");
 	}
 
 	if (id & ID0_S2TS) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
-		dev_notice(smmu->dev, "\tstage 2 translation\n");
+		dev_dbg(smmu->dev, "\tstage 2 translation\n");
 	}
 
 	if (id & ID0_NTS) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
-		dev_notice(smmu->dev, "\tnested translation\n");
+		dev_dbg(smmu->dev, "\tnested translation\n");
 	}
 
 	if (!(smmu->features &
@@ -1707,7 +3389,7 @@
 	if ((id & ID0_S1TS) &&
 		((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
 		smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
-		dev_notice(smmu->dev, "\taddress translation ops\n");
+		dev_dbg(smmu->dev, "\taddress translation ops\n");
 	}
 
 	/*
@@ -1721,61 +3403,47 @@
 	if (cttw_dt)
 		smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
 	if (cttw_dt || cttw_reg)
-		dev_notice(smmu->dev, "\t%scoherent table walk\n",
+		dev_dbg(smmu->dev, "\t%scoherent table walk\n",
 			   cttw_dt ? "" : "non-");
 	if (cttw_dt != cttw_reg)
 		dev_notice(smmu->dev,
 			   "\t(IDR0.CTTW overridden by dma-coherent property)\n");
 
-	/* Max. number of entries we have for stream matching/indexing */
-	size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
-	smmu->streamid_mask = size - 1;
 	if (id & ID0_SMS) {
-		u32 smr;
+		u32 smr, sid, mask;
 
 		smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
-		size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
-		if (size == 0) {
+		smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
+					   ID0_NUMSMRG_MASK;
+		if (smmu->num_mapping_groups == 0) {
 			dev_err(smmu->dev,
 				"stream-matching supported, but no SMRs present!\n");
 			return -ENODEV;
 		}
 
-		/*
-		 * SMR.ID bits may not be preserved if the corresponding MASK
-		 * bits are set, so check each one separately. We can reject
-		 * masters later if they try to claim IDs outside these masks.
-		 */
-		smr = smmu->streamid_mask << SMR_ID_SHIFT;
-		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
-		smmu->streamid_mask = smr >> SMR_ID_SHIFT;
+		if (!(smmu->options & ARM_SMMU_OPT_SKIP_INIT)) {
+			smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
+			smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
+			writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
+			smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
 
-		smr = smmu->streamid_mask << SMR_MASK_SHIFT;
-		writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
-		smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
-		smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
+			mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
+			sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
+			if ((mask & sid) != sid) {
+				dev_err(smmu->dev,
+					"SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
+					mask, sid);
+				return -ENODEV;
+			}
 
-		/* Zero-initialised to mark as invalid */
-		smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
-					  GFP_KERNEL);
-		if (!smmu->smrs)
-			return -ENOMEM;
-
-		dev_notice(smmu->dev,
-			   "\tstream matching with %lu register groups, mask 0x%x",
-			   size, smmu->smr_mask_mask);
+			dev_dbg(smmu->dev,
+				   "\tstream matching with %u register groups, mask 0x%x",
+				   smmu->num_mapping_groups, mask);
+		}
+	} else {
+		smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
+					   ID0_NUMSIDB_MASK;
 	}
-	/* s2cr->type == 0 means translation, so initialise explicitly */
-	smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
-					 GFP_KERNEL);
-	if (!smmu->s2crs)
-		return -ENOMEM;
-	for (i = 0; i < size; i++)
-		smmu->s2crs[i] = s2cr_init_val;
-
-	smmu->num_mapping_groups = size;
-	mutex_init(&smmu->stream_map_mutex);
 
 	if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
 		smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
@@ -1801,7 +3469,7 @@
 		dev_err(smmu->dev, "impossible number of S2 context banks!\n");
 		return -ENODEV;
 	}
-	dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
+	dev_dbg(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
 		   smmu->num_context_banks, smmu->num_s2_context_banks);
 	/*
 	 * Cavium CN88xx erratum #27704.
@@ -1866,34 +3534,62 @@
 		arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
 	else
 		arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
-	dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
+	dev_dbg(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
 		   smmu->pgsize_bitmap);
 
 
 	if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
-		dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
-			   smmu->va_size, smmu->ipa_size);
+		dev_dbg(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
+			smmu->va_size, smmu->ipa_size);
 
 	if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
-		dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
-			   smmu->ipa_size, smmu->pa_size);
+		dev_dbg(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
+			smmu->ipa_size, smmu->pa_size);
 
 	return 0;
 }
 
+static int arm_smmu_arch_init(struct arm_smmu_device *smmu)
+{
+	if (!smmu->arch_ops)
+		return 0;
+	if (!smmu->arch_ops->init)
+		return 0;
+	return smmu->arch_ops->init(smmu);
+}
+
+static void arm_smmu_arch_device_reset(struct arm_smmu_device *smmu)
+{
+	if (!smmu->arch_ops)
+		return;
+	if (!smmu->arch_ops->device_reset)
+		return;
+	return smmu->arch_ops->device_reset(smmu);
+}
+
 struct arm_smmu_match_data {
 	enum arm_smmu_arch_version version;
 	enum arm_smmu_implementation model;
+	struct arm_smmu_arch_ops *arch_ops;
 };
 
-#define ARM_SMMU_MATCH_DATA(name, ver, imp)	\
-static struct arm_smmu_match_data name = { .version = ver, .model = imp }
+#define ARM_SMMU_MATCH_DATA(name, ver, imp, ops)	\
+static struct arm_smmu_match_data name = {		\
+.version = ver,						\
+.model = imp,						\
+.arch_ops = ops,					\
+}							\
 
-ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
-ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
-ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
-ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
-ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
+struct arm_smmu_arch_ops qsmmuv500_arch_ops;
+
+ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU, NULL);
+ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU, NULL);
+ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU, NULL);
+ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500, NULL);
+ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2, NULL);
+ARM_SMMU_MATCH_DATA(qcom_smmuv2, ARM_SMMU_V2, QCOM_SMMUV2, &qsmmuv2_arch_ops);
+ARM_SMMU_MATCH_DATA(qcom_smmuv500, ARM_SMMU_V2, QCOM_SMMUV500,
+		    &qsmmuv500_arch_ops);
 
 static const struct of_device_id arm_smmu_of_match[] = {
 	{ .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
@@ -1902,30 +3598,22 @@
 	{ .compatible = "arm,mmu-401", .data = &arm_mmu401 },
 	{ .compatible = "arm,mmu-500", .data = &arm_mmu500 },
 	{ .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
+	{ .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
+	{ .compatible = "qcom,qsmmu-v500", .data = &qcom_smmuv500 },
 	{ },
 };
 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
 
+static int qsmmuv500_tbu_register(struct device *dev, void *data);
 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
 {
+	const struct of_device_id *of_id;
 	const struct arm_smmu_match_data *data;
 	struct resource *res;
 	struct arm_smmu_device *smmu;
 	struct device *dev = &pdev->dev;
-	int num_irqs, i, err;
-	bool legacy_binding;
-
-	legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
-	if (legacy_binding && !using_generic_binding) {
-		if (!using_legacy_binding)
-			pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
-		using_legacy_binding = true;
-	} else if (!legacy_binding && !using_legacy_binding) {
-		using_generic_binding = true;
-	} else {
-		dev_err(dev, "not probing due to mismatched DT properties\n");
-		return -ENODEV;
-	}
+	struct rb_node *node;
+	int num_irqs, i, err, num_masters;
 
 	smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
 	if (!smmu) {
@@ -1933,10 +3621,15 @@
 		return -ENOMEM;
 	}
 	smmu->dev = dev;
+	spin_lock_init(&smmu->atos_lock);
+	idr_init(&smmu->asid_idr);
+	mutex_init(&smmu->idr_mutex);
 
-	data = of_device_get_match_data(dev);
+	of_id = of_match_node(arm_smmu_of_match, dev->of_node);
+	data = of_id->data;
 	smmu->version = data->version;
 	smmu->model = data->model;
+	smmu->arch_ops = data->arch_ops;
 
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	smmu->base = devm_ioremap_resource(dev, res);
@@ -1980,65 +3673,128 @@
 		smmu->irqs[i] = irq;
 	}
 
+	parse_driver_options(smmu);
+
+
+	smmu->pwr = arm_smmu_init_power_resources(pdev);
+	if (IS_ERR(smmu->pwr))
+		return PTR_ERR(smmu->pwr);
+
+	err = arm_smmu_power_on(smmu->pwr);
+	if (err)
+		goto out_exit_power_resources;
+
 	err = arm_smmu_device_cfg_probe(smmu);
 	if (err)
-		return err;
+		goto out_power_off;
 
-	parse_driver_options(smmu);
+	i = 0;
+	smmu->masters = RB_ROOT;
+
+	err = arm_smmu_parse_iommus_properties(smmu, &num_masters);
+	if (err)
+		goto out_put_masters;
+
+	dev_dbg(dev, "registered %d master devices\n", num_masters);
+
+	err = arm_smmu_parse_impl_def_registers(smmu);
+	if (err)
+		goto out_put_masters;
 
 	if (smmu->version == ARM_SMMU_V2 &&
 	    smmu->num_context_banks != smmu->num_context_irqs) {
 		dev_err(dev,
-			"found only %d context interrupt(s) but %d required\n",
-			smmu->num_context_irqs, smmu->num_context_banks);
-		return -ENODEV;
+			"found %d context interrupt(s) but have %d context banks. assuming %d context interrupts.\n",
+			smmu->num_context_irqs, smmu->num_context_banks,
+			smmu->num_context_banks);
+		smmu->num_context_irqs = smmu->num_context_banks;
 	}
 
 	for (i = 0; i < smmu->num_global_irqs; ++i) {
-		err = devm_request_irq(smmu->dev, smmu->irqs[i],
-				       arm_smmu_global_fault,
-				       IRQF_SHARED,
-				       "arm-smmu global fault",
-				       smmu);
+		err = devm_request_threaded_irq(smmu->dev, smmu->irqs[i],
+					NULL, arm_smmu_global_fault,
+					IRQF_ONESHOT | IRQF_SHARED,
+					"arm-smmu global fault", smmu);
 		if (err) {
 			dev_err(dev, "failed to request global IRQ %d (%u)\n",
 				i, smmu->irqs[i]);
-			return err;
+			goto out_put_masters;
 		}
 	}
 
-	of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
-	platform_set_drvdata(pdev, smmu);
-	arm_smmu_device_reset(smmu);
+	INIT_LIST_HEAD(&smmu->list);
+	spin_lock(&arm_smmu_devices_lock);
+	list_add(&smmu->list, &arm_smmu_devices);
+	spin_unlock(&arm_smmu_devices_lock);
 
-	/* Oh, for a proper bus abstraction */
-	if (!iommu_present(&platform_bus_type))
-		bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
-#ifdef CONFIG_ARM_AMBA
-	if (!iommu_present(&amba_bustype))
-		bus_set_iommu(&amba_bustype, &arm_smmu_ops);
-#endif
-#ifdef CONFIG_PCI
-	if (!iommu_present(&pci_bus_type)) {
-		pci_request_acs();
-		bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
-	}
-#endif
+	err = arm_smmu_arch_init(smmu);
+	if (err)
+		goto out_put_masters;
+
+	arm_smmu_device_reset(smmu);
+	arm_smmu_power_off(smmu->pwr);
+
 	return 0;
+
+out_put_masters:
+	for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+		struct arm_smmu_master *master
+			= container_of(node, struct arm_smmu_master, node);
+		of_node_put(master->of_node);
+	}
+
+out_power_off:
+	arm_smmu_power_off(smmu->pwr);
+
+out_exit_power_resources:
+	arm_smmu_exit_power_resources(smmu->pwr);
+
+	return err;
 }
 
 static int arm_smmu_device_remove(struct platform_device *pdev)
 {
-	struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
+	int i;
+	struct device *dev = &pdev->dev;
+	struct arm_smmu_device *curr, *smmu = NULL;
+	struct rb_node *node;
+
+	spin_lock(&arm_smmu_devices_lock);
+	list_for_each_entry(curr, &arm_smmu_devices, list) {
+		if (curr->dev == dev) {
+			smmu = curr;
+			list_del(&smmu->list);
+			break;
+		}
+	}
+	spin_unlock(&arm_smmu_devices_lock);
 
 	if (!smmu)
 		return -ENODEV;
 
+	if (arm_smmu_power_on(smmu->pwr))
+		return -EINVAL;
+
+	for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
+		struct arm_smmu_master *master
+			= container_of(node, struct arm_smmu_master, node);
+		of_node_put(master->of_node);
+	}
+
 	if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
-		dev_err(&pdev->dev, "removing device with active domains!\n");
+		dev_err(dev, "removing device with active domains!\n");
+
+	for (i = 0; i < smmu->num_global_irqs; ++i)
+		devm_free_irq(smmu->dev, smmu->irqs[i], smmu);
+
+	idr_destroy(&smmu->asid_idr);
 
 	/* Turn the thing off */
 	writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
+	arm_smmu_power_off(smmu->pwr);
+
+	arm_smmu_exit_power_resources(smmu->pwr);
+
 	return 0;
 }
 
@@ -2053,14 +3809,41 @@
 
 static int __init arm_smmu_init(void)
 {
-	static bool registered;
-	int ret = 0;
+	struct device_node *np;
+	int ret;
 
-	if (!registered) {
-		ret = platform_driver_register(&arm_smmu_driver);
-		registered = !ret;
+	/*
+	 * Play nice with systems that don't have an ARM SMMU by checking that
+	 * an ARM SMMU exists in the system before proceeding with the driver
+	 * and IOMMU bus operation registration.
+	 */
+	np = of_find_matching_node(NULL, arm_smmu_of_match);
+	if (!np)
+		return 0;
+
+	of_node_put(np);
+
+	ret = platform_driver_register(&arm_smmu_driver);
+	if (ret)
+		return ret;
+
+	/* Oh, for a proper bus abstraction */
+	if (!iommu_present(&platform_bus_type))
+		bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+
+#ifdef CONFIG_ARM_AMBA
+	if (!iommu_present(&amba_bustype))
+		bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
+
+#ifdef CONFIG_PCI
+	if (!iommu_present(&pci_bus_type)) {
+		pci_request_acs();
+		bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
 	}
-	return ret;
+#endif
+
+	return 0;
 }
 
 static void __exit arm_smmu_exit(void)
@@ -2071,24 +3854,264 @@
 subsys_initcall(arm_smmu_init);
 module_exit(arm_smmu_exit);
 
-static int __init arm_smmu_of_init(struct device_node *np)
-{
-	int ret = arm_smmu_init();
+#define DEBUG_SID_HALT_REG		0x0
+#define DEBUG_SID_HALT_VAL		(0x1 << 16)
 
+#define DEBUG_SR_HALT_ACK_REG		0x20
+#define DEBUG_SR_HALT_ACK_VAL		(0x1 << 1)
+
+#define TBU_DBG_TIMEOUT_US		30000
+
+struct qsmmuv500_tbu_device {
+	struct list_head		list;
+	struct device			*dev;
+	struct arm_smmu_device		*smmu;
+	void __iomem			*base;
+	void __iomem			*status_reg;
+
+	struct arm_smmu_power_resources *pwr;
+
+	/* Protects halt count */
+	spinlock_t			halt_lock;
+	u32				halt_count;
+};
+
+static int qsmmuv500_tbu_power_on_all(struct arm_smmu_device *smmu)
+{
+	struct qsmmuv500_tbu_device *tbu;
+	struct list_head *list = smmu->archdata;
+	int ret = 0;
+
+	list_for_each_entry(tbu, list, list) {
+		ret = arm_smmu_power_on(tbu->pwr);
+		if (ret)
+			break;
+	}
+	if (!ret)
+		return 0;
+
+	list_for_each_entry_continue_reverse(tbu, list, list) {
+		arm_smmu_power_off(tbu->pwr);
+	}
+	return ret;
+}
+
+static void qsmmuv500_tbu_power_off_all(struct arm_smmu_device *smmu)
+{
+	struct qsmmuv500_tbu_device *tbu;
+	struct list_head *list = smmu->archdata;
+
+	list_for_each_entry_reverse(tbu, list, list) {
+		arm_smmu_power_off(tbu->pwr);
+	}
+}
+
+static int qsmmuv500_tbu_halt(struct qsmmuv500_tbu_device *tbu)
+{
+	unsigned long flags;
+	u32 val;
+	void __iomem *base;
+
+	spin_lock_irqsave(&tbu->halt_lock, flags);
+	if (tbu->halt_count) {
+		tbu->halt_count++;
+		spin_unlock_irqrestore(&tbu->halt_lock, flags);
+		return 0;
+	}
+
+	base = tbu->base;
+	val = readl_relaxed(base + DEBUG_SID_HALT_REG);
+	val |= DEBUG_SID_HALT_VAL;
+	writel_relaxed(val, base + DEBUG_SID_HALT_REG);
+
+	if (readl_poll_timeout_atomic(base + DEBUG_SR_HALT_ACK_REG,
+					val, (val & DEBUG_SR_HALT_ACK_VAL),
+					0, TBU_DBG_TIMEOUT_US)) {
+		dev_err(tbu->dev, "Couldn't halt TBU!\n");
+		spin_unlock_irqrestore(&tbu->halt_lock, flags);
+		return -ETIMEDOUT;
+	}
+
+	tbu->halt_count = 1;
+	spin_unlock_irqrestore(&tbu->halt_lock, flags);
+	return 0;
+}
+
+static void qsmmuv500_tbu_resume(struct qsmmuv500_tbu_device *tbu)
+{
+	unsigned long flags;
+	u32 val;
+	void __iomem *base;
+
+	spin_lock_irqsave(&tbu->halt_lock, flags);
+	if (!tbu->halt_count) {
+		WARN(1, "%s: bad tbu->halt_count", dev_name(tbu->dev));
+		spin_unlock_irqrestore(&tbu->halt_lock, flags);
+		return;
+
+	} else if (tbu->halt_count > 1) {
+		tbu->halt_count--;
+		spin_unlock_irqrestore(&tbu->halt_lock, flags);
+		return;
+	}
+
+	base = tbu->base;
+	val = readl_relaxed(base + DEBUG_SID_HALT_REG);
+	val &= ~DEBUG_SID_HALT_VAL;
+	writel_relaxed(val, base + DEBUG_SID_HALT_REG);
+
+	tbu->halt_count = 0;
+	spin_unlock_irqrestore(&tbu->halt_lock, flags);
+}
+
+static int qsmmuv500_halt_all(struct arm_smmu_device *smmu)
+{
+	struct qsmmuv500_tbu_device *tbu;
+	struct list_head *list = smmu->archdata;
+	int ret = 0;
+
+	list_for_each_entry(tbu, list, list) {
+		ret = qsmmuv500_tbu_halt(tbu);
+		if (ret)
+			break;
+	}
+
+	if (!ret)
+		return 0;
+
+	list_for_each_entry_continue_reverse(tbu, list, list) {
+		qsmmuv500_tbu_resume(tbu);
+	}
+	return ret;
+}
+
+static void qsmmuv500_resume_all(struct arm_smmu_device *smmu)
+{
+	struct qsmmuv500_tbu_device *tbu;
+	struct list_head *list = smmu->archdata;
+
+	list_for_each_entry(tbu, list, list) {
+		qsmmuv500_tbu_resume(tbu);
+	}
+}
+
+static void qsmmuv500_device_reset(struct arm_smmu_device *smmu)
+{
+	int i, ret;
+	struct arm_smmu_impl_def_reg *regs = smmu->impl_def_attach_registers;
+
+	ret = qsmmuv500_tbu_power_on_all(smmu);
+	if (ret)
+		return;
+
+	/* Program implementation defined registers */
+	qsmmuv500_halt_all(smmu);
+	for (i = 0; i < smmu->num_impl_def_attach_registers; ++i)
+		writel_relaxed(regs[i].value,
+			ARM_SMMU_GR0(smmu) + regs[i].offset);
+	qsmmuv500_resume_all(smmu);
+	qsmmuv500_tbu_power_off_all(smmu);
+}
+
+static int qsmmuv500_tbu_register(struct device *dev, void *data)
+{
+	struct arm_smmu_device *smmu = data;
+	struct qsmmuv500_tbu_device *tbu;
+	struct list_head *list = smmu->archdata;
+
+	if (!dev->driver) {
+		dev_err(dev, "TBU failed probe, QSMMUV500 cannot continue!\n");
+		return -EINVAL;
+	}
+
+	tbu = dev_get_drvdata(dev);
+
+	INIT_LIST_HEAD(&tbu->list);
+	tbu->smmu = smmu;
+	list_add(&tbu->list, list);
+	return 0;
+}
+
+static int qsmmuv500_arch_init(struct arm_smmu_device *smmu)
+{
+	struct device *dev = smmu->dev;
+	struct list_head *list;
+	int ret;
+
+	list = devm_kzalloc(dev, sizeof(*list), GFP_KERNEL);
+	if (!list)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(list);
+	smmu->archdata = list;
+
+	ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
 	if (ret)
 		return ret;
 
-	if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
-		return -ENODEV;
+	/* Attempt to register child devices */
+	ret = device_for_each_child(dev, smmu, qsmmuv500_tbu_register);
+	if (ret)
+		return -EINVAL;
 
 	return 0;
 }
-IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
-IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
-IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
+
+struct arm_smmu_arch_ops qsmmuv500_arch_ops = {
+	.init = qsmmuv500_arch_init,
+	.device_reset = qsmmuv500_device_reset,
+};
+
+static const struct of_device_id qsmmuv500_tbu_of_match[] = {
+	{.compatible = "qcom,qsmmuv500-tbu"},
+	{}
+};
+
+static int qsmmuv500_tbu_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct device *dev = &pdev->dev;
+	struct qsmmuv500_tbu_device *tbu;
+
+	tbu = devm_kzalloc(dev, sizeof(*tbu), GFP_KERNEL);
+	if (!tbu)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&tbu->list);
+	tbu->dev = dev;
+	spin_lock_init(&tbu->halt_lock);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
+	tbu->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(tbu->base))
+		return PTR_ERR(tbu->base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "status-reg");
+	tbu->status_reg = devm_ioremap_resource(dev, res);
+	if (IS_ERR(tbu->status_reg))
+		return PTR_ERR(tbu->status_reg);
+
+	tbu->pwr = arm_smmu_init_power_resources(pdev);
+	if (IS_ERR(tbu->pwr))
+		return PTR_ERR(tbu->pwr);
+
+	dev_set_drvdata(dev, tbu);
+	return 0;
+}
+
+static struct platform_driver qsmmuv500_tbu_driver = {
+	.driver	= {
+		.name		= "qsmmuv500-tbu",
+		.of_match_table	= of_match_ptr(qsmmuv500_tbu_of_match),
+	},
+	.probe	= qsmmuv500_tbu_probe,
+};
+
+static int __init qsmmuv500_tbu_init(void)
+{
+	return platform_driver_register(&qsmmuv500_tbu_driver);
+}
+subsys_initcall(qsmmuv500_tbu_init);
 
 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
new file mode 100644
index 0000000..c5cbdb7
--- /dev/null
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -0,0 +1,709 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/dma-contiguous.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-mapping-fast.h>
+#include <linux/io-pgtable-fast.h>
+#include <linux/vmalloc.h>
+#include <asm/cacheflush.h>
+#include <asm/dma-iommu.h>
+
+
+/* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
+#define FAST_PAGE_SHIFT		12
+#define FAST_PAGE_SIZE (1UL << FAST_PAGE_SHIFT)
+#define FAST_PAGE_MASK (~(PAGE_SIZE - 1))
+#define FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
+
+/*
+ * Checks if the allocated range (ending at @end) covered the upcoming
+ * stale bit.  We don't need to know exactly where the range starts since
+ * we already know where the candidate search range started.  If, starting
+ * from the beginning of the candidate search range, we had to step over
+ * (or landed directly on top of) the upcoming stale bit, then we return
+ * true.
+ *
+ * Due to wrapping, there are two scenarios we'll need to check: (1) if the
+ * range [search_start, upcoming_stale] spans 0 (i.e. search_start >
+ * upcoming_stale), and, (2) if the range: [search_start, upcoming_stale]
+ * does *not* span 0 (i.e. search_start <= upcoming_stale).  And for each
+ * of those two scenarios we need to handle three cases: (1) the bit was
+ * found before wrapping or
+ */
+static bool __bit_covered_stale(unsigned long upcoming_stale,
+				unsigned long search_start,
+				unsigned long end)
+{
+	if (search_start > upcoming_stale) {
+		if (end >= search_start) {
+			/*
+			 * We started searching above upcoming_stale and we
+			 * didn't wrap, so we couldn't have crossed
+			 * upcoming_stale.
+			 */
+			return false;
+		}
+		/*
+		 * We wrapped. Did we cross (or land on top of)
+		 * upcoming_stale?
+		 */
+		return end >= upcoming_stale;
+	}
+
+	if (search_start <= upcoming_stale) {
+		if (end >= search_start) {
+			/*
+			 * We didn't wrap.  Did we cross (or land on top
+			 * of) upcoming_stale?
+			 */
+			return end >= upcoming_stale;
+		}
+		/*
+		 * We wrapped. So we must have crossed upcoming_stale
+		 * (since we started searching below it).
+		 */
+		return true;
+	}
+
+	/* we should have covered all logical combinations... */
+	WARN_ON(1);
+	return true;
+}
+
+static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
+					 unsigned long attrs,
+					 size_t size)
+{
+	unsigned long bit, prev_search_start, nbits = size >> FAST_PAGE_SHIFT;
+	unsigned long align = (1 << get_order(size)) - 1;
+
+	bit = bitmap_find_next_zero_area(
+		mapping->bitmap, mapping->num_4k_pages, mapping->next_start,
+		nbits, align);
+	if (unlikely(bit > mapping->num_4k_pages)) {
+		/* try wrapping */
+		mapping->next_start = 0; /* TODO: SHOULD I REALLY DO THIS?!? */
+		bit = bitmap_find_next_zero_area(
+			mapping->bitmap, mapping->num_4k_pages, 0, nbits,
+			align);
+		if (unlikely(bit > mapping->num_4k_pages))
+			return DMA_ERROR_CODE;
+	}
+
+	bitmap_set(mapping->bitmap, bit, nbits);
+	prev_search_start = mapping->next_start;
+	mapping->next_start = bit + nbits;
+	if (unlikely(mapping->next_start >= mapping->num_4k_pages))
+		mapping->next_start = 0;
+
+	/*
+	 * If we just re-allocated a VA whose TLB hasn't been invalidated
+	 * since it was last used and unmapped, we need to invalidate it
+	 * here.  We actually invalidate the entire TLB so that we don't
+	 * have to invalidate the TLB again until we wrap back around.
+	 */
+	if (mapping->have_stale_tlbs &&
+	    __bit_covered_stale(mapping->upcoming_stale_bit,
+				prev_search_start,
+				bit + nbits - 1)) {
+		bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
+
+		iommu_tlbiall(mapping->domain);
+		mapping->have_stale_tlbs = false;
+		av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync);
+	}
+
+	return (bit << FAST_PAGE_SHIFT) + mapping->base;
+}
+
+/*
+ * Checks whether the candidate bit will be allocated sooner than the
+ * current upcoming stale bit.  We can say candidate will be upcoming
+ * sooner than the current upcoming stale bit if it lies between the
+ * starting bit of the next search range and the upcoming stale bit
+ * (allowing for wrap-around).
+ *
+ * Stated differently, we're checking the relative ordering of three
+ * unsigned numbers.  So we need to check all 6 (i.e. 3!) permutations,
+ * namely:
+ *
+ *     0 |---A---B---C---| TOP (Case 1)
+ *     0 |---A---C---B---| TOP (Case 2)
+ *     0 |---B---A---C---| TOP (Case 3)
+ *     0 |---B---C---A---| TOP (Case 4)
+ *     0 |---C---A---B---| TOP (Case 5)
+ *     0 |---C---B---A---| TOP (Case 6)
+ *
+ * Note that since we're allowing numbers to wrap, the following three
+ * scenarios are all equivalent for Case 1:
+ *
+ *     0 |---A---B---C---| TOP
+ *     0 |---C---A---B---| TOP (C has wrapped. This is Case 5.)
+ *     0 |---B---C---A---| TOP (C and B have wrapped. This is Case 4.)
+ *
+ * In any of these cases, if we start searching from A, we will find B
+ * before we find C.
+ *
+ * We can also find two equivalent cases for Case 2:
+ *
+ *     0 |---A---C---B---| TOP
+ *     0 |---B---A---C---| TOP (B has wrapped. This is Case 3.)
+ *     0 |---C---B---A---| TOP (B and C have wrapped. This is Case 6.)
+ *
+ * In any of these cases, if we start searching from A, we will find C
+ * before we find B.
+ */
+static bool __bit_is_sooner(unsigned long candidate,
+			    struct dma_fast_smmu_mapping *mapping)
+{
+	unsigned long A = mapping->next_start;
+	unsigned long B = candidate;
+	unsigned long C = mapping->upcoming_stale_bit;
+
+	if ((A < B && B < C) ||	/* Case 1 */
+	    (C < A && A < B) ||	/* Case 5 */
+	    (B < C && C < A))	/* Case 4 */
+		return true;
+
+	if ((A < C && C < B) ||	/* Case 2 */
+	    (B < A && A < C) ||	/* Case 3 */
+	    (C < B && B < A))	/* Case 6 */
+		return false;
+
+	/*
+	 * For simplicity, we've been ignoring the possibility of any of
+	 * our three numbers being equal.  Handle those cases here (they
+	 * shouldn't happen very often, (I think?)).
+	 */
+
+	/*
+	 * If candidate is the next bit to be searched then it's definitely
+	 * sooner.
+	 */
+	if (A == B)
+		return true;
+
+	/*
+	 * If candidate is the next upcoming stale bit we'll return false
+	 * to avoid doing `upcoming = candidate' in the caller (which would
+	 * be useless since they're already equal)
+	 */
+	if (B == C)
+		return false;
+
+	/*
+	 * If next start is the upcoming stale bit then candidate can't
+	 * possibly be sooner.  The "soonest" bit is already selected.
+	 */
+	if (A == C)
+		return false;
+
+	/* We should have covered all logical combinations. */
+	WARN(1, "Well, that's awkward. A=%ld, B=%ld, C=%ld\n", A, B, C);
+	return true;
+}
+
+static void __fast_smmu_free_iova(struct dma_fast_smmu_mapping *mapping,
+				  dma_addr_t iova, size_t size)
+{
+	unsigned long start_bit = (iova - mapping->base) >> FAST_PAGE_SHIFT;
+	unsigned long nbits = size >> FAST_PAGE_SHIFT;
+
+	/*
+	 * We don't invalidate TLBs on unmap.  We invalidate TLBs on map
+	 * when we're about to re-allocate a VA that was previously
+	 * unmapped but hasn't yet been invalidated.  So we need to keep
+	 * track of which bit is the closest to being re-allocated here.
+	 */
+	if (__bit_is_sooner(start_bit, mapping))
+		mapping->upcoming_stale_bit = start_bit;
+
+	bitmap_clear(mapping->bitmap, start_bit, nbits);
+	mapping->have_stale_tlbs = true;
+}
+
+
+static void __fast_dma_page_cpu_to_dev(struct page *page, unsigned long off,
+				       size_t size, enum dma_data_direction dir)
+{
+	__dma_map_area(page_address(page) + off, size, dir);
+}
+
+static void __fast_dma_page_dev_to_cpu(struct page *page, unsigned long off,
+				       size_t size, enum dma_data_direction dir)
+{
+	__dma_unmap_area(page_address(page) + off, size, dir);
+
+	/* TODO: WHAT IS THIS? */
+	/*
+	 * Mark the D-cache clean for this page to avoid extra flushing.
+	 */
+	if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+		set_bit(PG_dcache_clean, &page->flags);
+}
+
+static int __fast_dma_direction_to_prot(enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_BIDIRECTIONAL:
+		return IOMMU_READ | IOMMU_WRITE;
+	case DMA_TO_DEVICE:
+		return IOMMU_READ;
+	case DMA_FROM_DEVICE:
+		return IOMMU_WRITE;
+	default:
+		return 0;
+	}
+}
+
+static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
+				   unsigned long offset, size_t size,
+				   enum dma_data_direction dir,
+				   unsigned long attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	dma_addr_t iova;
+	unsigned long flags;
+	av8l_fast_iopte *pmd;
+	phys_addr_t phys_plus_off = page_to_phys(page) + offset;
+	phys_addr_t phys_to_map = round_down(phys_plus_off, FAST_PAGE_SIZE);
+	unsigned long offset_from_phys_to_map = phys_plus_off & ~FAST_PAGE_MASK;
+	size_t len = ALIGN(size + offset_from_phys_to_map, FAST_PAGE_SIZE);
+	int nptes = len >> FAST_PAGE_SHIFT;
+	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
+	int prot = __fast_dma_direction_to_prot(dir);
+
+	if (attrs & DMA_ATTR_STRONGLY_ORDERED)
+		prot |= IOMMU_MMIO;
+
+	if (!skip_sync)
+		__fast_dma_page_cpu_to_dev(phys_to_page(phys_to_map),
+					   offset_from_phys_to_map, size, dir);
+
+	spin_lock_irqsave(&mapping->lock, flags);
+
+	iova = __fast_smmu_alloc_iova(mapping, attrs, len);
+
+	if (unlikely(iova == DMA_ERROR_CODE))
+		goto fail;
+
+	pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+
+	if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
+		goto fail_free_iova;
+
+	if (!skip_sync)		/* TODO: should ask SMMU if coherent */
+		dmac_clean_range(pmd, pmd + nptes);
+
+	spin_unlock_irqrestore(&mapping->lock, flags);
+	return iova + offset_from_phys_to_map;
+
+fail_free_iova:
+	__fast_smmu_free_iova(mapping, iova, size);
+fail:
+	spin_unlock_irqrestore(&mapping->lock, flags);
+	return DMA_ERROR_CODE;
+}
+
+static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
+			       size_t size, enum dma_data_direction dir,
+			       unsigned long attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	unsigned long flags;
+	av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+	unsigned long offset = iova & ~FAST_PAGE_MASK;
+	size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
+	int nptes = len >> FAST_PAGE_SHIFT;
+	struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
+	bool skip_sync = (attrs & DMA_ATTR_SKIP_CPU_SYNC);
+
+	if (!skip_sync)
+		__fast_dma_page_dev_to_cpu(page, offset, size, dir);
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	av8l_fast_unmap_public(pmd, len);
+	if (!skip_sync)		/* TODO: should ask SMMU if coherent */
+		dmac_clean_range(pmd, pmd + nptes);
+	__fast_smmu_free_iova(mapping, iova, len);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+}
+
+static int fast_smmu_map_sg(struct device *dev, struct scatterlist *sg,
+			    int nents, enum dma_data_direction dir,
+			    unsigned long attrs)
+{
+	return -EINVAL;
+}
+
+static void fast_smmu_unmap_sg(struct device *dev,
+			       struct scatterlist *sg, int nents,
+			       enum dma_data_direction dir,
+			       unsigned long attrs)
+{
+	WARN_ON_ONCE(1);
+}
+
+static void __fast_smmu_free_pages(struct page **pages, int count)
+{
+	int i;
+
+	for (i = 0; i < count; i++)
+		__free_page(pages[i]);
+	kvfree(pages);
+}
+
+static struct page **__fast_smmu_alloc_pages(unsigned int count, gfp_t gfp)
+{
+	struct page **pages;
+	unsigned int i = 0, array_size = count * sizeof(*pages);
+
+	if (array_size <= PAGE_SIZE)
+		pages = kzalloc(array_size, GFP_KERNEL);
+	else
+		pages = vzalloc(array_size);
+	if (!pages)
+		return NULL;
+
+	/* IOMMU can map any pages, so himem can also be used here */
+	gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
+
+	for (i = 0; i < count; ++i) {
+		struct page *page = alloc_page(gfp);
+
+		if (!page) {
+			__fast_smmu_free_pages(pages, i);
+			return NULL;
+		}
+		pages[i] = page;
+	}
+	return pages;
+}
+
+static void *fast_smmu_alloc(struct device *dev, size_t size,
+			     dma_addr_t *handle, gfp_t gfp,
+			     unsigned long attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct sg_table sgt;
+	dma_addr_t dma_addr, iova_iter;
+	void *addr;
+	av8l_fast_iopte *ptep;
+	unsigned long flags;
+	struct sg_mapping_iter miter;
+	unsigned int count = ALIGN(size, SZ_4K) >> PAGE_SHIFT;
+	int prot = IOMMU_READ | IOMMU_WRITE; /* TODO: extract from attrs */
+	pgprot_t remap_prot = pgprot_writecombine(PAGE_KERNEL);
+	struct page **pages;
+
+	*handle = DMA_ERROR_CODE;
+
+	pages = __fast_smmu_alloc_pages(count, gfp);
+	if (!pages) {
+		dev_err(dev, "no pages\n");
+		return NULL;
+	}
+
+	size = ALIGN(size, SZ_4K);
+	if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, gfp)) {
+		dev_err(dev, "no sg tablen\n");
+		goto out_free_pages;
+	}
+
+	if (!(prot & IOMMU_CACHE)) {
+		/*
+		 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
+		 * sufficient here, so skip it by using the "wrong" direction.
+		 */
+		sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
+			       SG_MITER_FROM_SG);
+		while (sg_miter_next(&miter))
+			__dma_flush_area(miter.addr, miter.length);
+		sg_miter_stop(&miter);
+	}
+
+	spin_lock_irqsave(&mapping->lock, flags);
+	dma_addr = __fast_smmu_alloc_iova(mapping, attrs, size);
+	if (dma_addr == DMA_ERROR_CODE) {
+		dev_err(dev, "no iova\n");
+		spin_unlock_irqrestore(&mapping->lock, flags);
+		goto out_free_sg;
+	}
+	iova_iter = dma_addr;
+	sg_miter_start(&miter, sgt.sgl, sgt.orig_nents,
+		       SG_MITER_FROM_SG | SG_MITER_ATOMIC);
+	while (sg_miter_next(&miter)) {
+		int nptes = miter.length >> FAST_PAGE_SHIFT;
+
+		ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter);
+		if (unlikely(av8l_fast_map_public(
+				     ptep, page_to_phys(miter.page),
+				     miter.length, prot))) {
+			dev_err(dev, "no map public\n");
+			/* TODO: unwind previously successful mappings */
+			goto out_free_iova;
+		}
+		dmac_clean_range(ptep, ptep + nptes);
+		iova_iter += miter.length;
+	}
+	sg_miter_stop(&miter);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+
+	addr = dma_common_pages_remap(pages, size, VM_USERMAP, remap_prot,
+				      __builtin_return_address(0));
+	if (!addr) {
+		dev_err(dev, "no common pages\n");
+		goto out_unmap;
+	}
+
+	*handle = dma_addr;
+	sg_free_table(&sgt);
+	return addr;
+
+out_unmap:
+	/* need to take the lock again for page tables and iova */
+	spin_lock_irqsave(&mapping->lock, flags);
+	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr);
+	av8l_fast_unmap_public(ptep, size);
+	dmac_clean_range(ptep, ptep + count);
+out_free_iova:
+	__fast_smmu_free_iova(mapping, dma_addr, size);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+out_free_sg:
+	sg_free_table(&sgt);
+out_free_pages:
+	__fast_smmu_free_pages(pages, count);
+	return NULL;
+}
+
+static void fast_smmu_free(struct device *dev, size_t size,
+			   void *vaddr, dma_addr_t dma_handle,
+			   unsigned long attrs)
+{
+	struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
+	struct vm_struct *area;
+	struct page **pages;
+	size_t count = ALIGN(size, SZ_4K) >> FAST_PAGE_SHIFT;
+	av8l_fast_iopte *ptep;
+	unsigned long flags;
+
+	size = ALIGN(size, SZ_4K);
+
+	area = find_vm_area(vaddr);
+	if (WARN_ON_ONCE(!area))
+		return;
+
+	pages = area->pages;
+	dma_common_free_remap(vaddr, size, VM_USERMAP, false);
+	ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle);
+	spin_lock_irqsave(&mapping->lock, flags);
+	av8l_fast_unmap_public(ptep, size);
+	dmac_clean_range(ptep, ptep + count);
+	__fast_smmu_free_iova(mapping, dma_handle, size);
+	spin_unlock_irqrestore(&mapping->lock, flags);
+	__fast_smmu_free_pages(pages, count);
+}
+
+static int fast_smmu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+				void *cpu_addr, dma_addr_t dma_addr,
+				size_t size, unsigned long attrs)
+{
+	struct vm_struct *area;
+	unsigned long uaddr = vma->vm_start;
+	struct page **pages;
+	int i, nr_pages, ret = 0;
+
+	vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+	area = find_vm_area(cpu_addr);
+	if (!area)
+		return -EINVAL;
+
+	pages = area->pages;
+	nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+	for (i = vma->vm_pgoff; i < nr_pages && uaddr < vma->vm_end; i++) {
+		ret = vm_insert_page(vma, uaddr, pages[i]);
+		if (ret)
+			break;
+		uaddr += PAGE_SIZE;
+	}
+
+	return ret;
+}
+
+static int fast_smmu_dma_supported(struct device *dev, u64 mask)
+{
+	return mask <= 0xffffffff;
+}
+
+static int fast_smmu_mapping_error(struct device *dev,
+				   dma_addr_t dma_addr)
+{
+	return dma_addr == DMA_ERROR_CODE;
+}
+
+static void __fast_smmu_mapped_over_stale(struct dma_fast_smmu_mapping *fast,
+					  void *data)
+{
+	av8l_fast_iopte *ptep = data;
+	dma_addr_t iova;
+	unsigned long bitmap_idx;
+
+	bitmap_idx = (unsigned long)(ptep - fast->pgtbl_pmds);
+	iova = bitmap_idx << FAST_PAGE_SHIFT;
+	dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
+	dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
+	dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
+		fast->pgtbl_pmds, ptep - fast->pgtbl_pmds);
+	print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
+		       32, 8, fast->bitmap, fast->bitmap_size, false);
+}
+
+static int fast_smmu_notify(struct notifier_block *self,
+			    unsigned long action, void *data)
+{
+	struct dma_fast_smmu_mapping *fast = container_of(
+		self, struct dma_fast_smmu_mapping, notifier);
+
+	switch (action) {
+	case MAPPED_OVER_STALE_TLB:
+		__fast_smmu_mapped_over_stale(fast, data);
+		return NOTIFY_OK;
+	default:
+		WARN(1, "Unhandled notifier action");
+		return NOTIFY_DONE;
+	}
+}
+
+static const struct dma_map_ops fast_smmu_dma_ops = {
+	.alloc = fast_smmu_alloc,
+	.free = fast_smmu_free,
+	.mmap = fast_smmu_mmap_attrs,
+	.map_page = fast_smmu_map_page,
+	.unmap_page = fast_smmu_unmap_page,
+	.map_sg = fast_smmu_map_sg,
+	.unmap_sg = fast_smmu_unmap_sg,
+	.dma_supported = fast_smmu_dma_supported,
+	.mapping_error = fast_smmu_mapping_error,
+};
+
+/**
+ * __fast_smmu_create_mapping_sized
+ * @base: bottom of the VA range
+ * @size: size of the VA range in bytes
+ *
+ * Creates a mapping structure which holds information about used/unused IO
+ * address ranges, which is required to perform mapping with IOMMU aware
+ * functions.  The only VA range supported is [0, 4GB).
+ *
+ * The client device need to be attached to the mapping with
+ * fast_smmu_attach_device function.
+ */
+static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
+	dma_addr_t base, size_t size)
+{
+	struct dma_fast_smmu_mapping *fast;
+
+	fast = kzalloc(sizeof(struct dma_fast_smmu_mapping), GFP_KERNEL);
+	if (!fast)
+		goto err;
+
+	fast->base = base;
+	fast->size = size;
+	fast->num_4k_pages = size >> FAST_PAGE_SHIFT;
+	fast->bitmap_size = BITS_TO_LONGS(fast->num_4k_pages) * sizeof(long);
+
+	fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL);
+	if (!fast->bitmap)
+		goto err2;
+
+	spin_lock_init(&fast->lock);
+
+	return fast;
+err2:
+	kfree(fast);
+err:
+	return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * fast_smmu_attach_device
+ * @dev: valid struct device pointer
+ * @mapping: io address space mapping structure (returned from
+ *	fast_smmu_create_mapping)
+ *
+ * Attaches specified io address space mapping to the provided device,
+ * this replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version. More than one client might be attached to
+ * the same io address space mapping.
+ */
+int fast_smmu_attach_device(struct device *dev,
+			    struct dma_iommu_mapping *mapping)
+{
+	int atomic_domain = 1;
+	struct iommu_domain *domain = mapping->domain;
+	struct iommu_pgtbl_info info;
+	size_t size = mapping->bits << PAGE_SHIFT;
+
+	if (mapping->base + size > (SZ_1G * 4ULL))
+		return -EINVAL;
+
+	if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC,
+				  &atomic_domain))
+		return -EINVAL;
+
+	mapping->fast = __fast_smmu_create_mapping_sized(mapping->base, size);
+	if (IS_ERR(mapping->fast))
+		return -ENOMEM;
+	mapping->fast->domain = domain;
+	mapping->fast->dev = dev;
+
+	if (iommu_attach_device(domain, dev))
+		return -EINVAL;
+
+	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_PGTBL_INFO,
+				  &info)) {
+		dev_err(dev, "Couldn't get page table info\n");
+		fast_smmu_detach_device(dev, mapping);
+		return -EINVAL;
+	}
+	mapping->fast->pgtbl_pmds = info.pmds;
+
+	mapping->fast->notifier.notifier_call = fast_smmu_notify;
+	av8l_register_notify(&mapping->fast->notifier);
+
+	dev->archdata.mapping = mapping;
+	set_dma_ops(dev, &fast_smmu_dma_ops);
+
+	return 0;
+}
+EXPORT_SYMBOL(fast_smmu_attach_device);
+
+/**
+ * fast_smmu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void fast_smmu_detach_device(struct device *dev,
+			     struct dma_iommu_mapping *mapping)
+{
+	iommu_detach_device(mapping->domain, dev);
+	dev->archdata.mapping = NULL;
+	set_dma_ops(dev, NULL);
+
+	kfree(mapping->fast->bitmap);
+	kfree(mapping->fast);
+}
+EXPORT_SYMBOL(fast_smmu_detach_device);
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index f5c90e1..f00c142 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -22,6 +22,7 @@
 
 #include <linux/iommu.h>
 #include <linux/kernel.h>
+#include <linux/scatterlist.h>
 #include <linux/sizes.h>
 #include <linux/slab.h>
 #include <linux/types.h>
@@ -101,8 +102,10 @@
 					 ARM_LPAE_PTE_ATTR_HI_MASK)
 
 /* Stage-1 PTE */
-#define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
-#define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_AP_PRIV_RW		(((arm_lpae_iopte)0) << 6)
+#define ARM_LPAE_PTE_AP_RW		(((arm_lpae_iopte)1) << 6)
+#define ARM_LPAE_PTE_AP_PRIV_RO		(((arm_lpae_iopte)2) << 6)
+#define ARM_LPAE_PTE_AP_RO		(((arm_lpae_iopte)3) << 6)
 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
 
@@ -162,13 +165,15 @@
 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
 #define ARM_LPAE_MAIR_ATTR_NC		0x44
 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
+#define ARM_LPAE_MAIR_ATTR_UPSTREAM	0xf4
 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
+#define ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM	3
 
 /* IOPTE accessors */
-#define iopte_deref(pte,d)					\
-	(__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
+#define iopte_deref(pte, d)						\
+	(__va(iopte_val(pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)	\
 	& ~(ARM_LPAE_GRANULE(d) - 1ULL)))
 
 #define iopte_type(pte,l)					\
@@ -200,7 +205,78 @@
 
 typedef u64 arm_lpae_iopte;
 
+/*
+ * We'll use some ignored bits in table entries to keep track of the number
+ * of page mappings beneath the table.  The maximum number of entries
+ * beneath any table mapping in armv8 is 8192 (which is possible at the
+ * 2nd- and 3rd-level when using a 64K granule size).  The bits at our
+ * disposal are:
+ *
+ *     4k granule: [58..52], [11..2]
+ *    64k granule: [58..52], [15..2]
+ *
+ * [58..52], [11..2] is enough bits for tracking table mappings at any
+ * level for any granule, so we'll use those.
+ */
+#define BOTTOM_IGNORED_MASK 0x3ff
+#define BOTTOM_IGNORED_SHIFT 2
+#define BOTTOM_IGNORED_NUM_BITS 10
+#define TOP_IGNORED_MASK 0x7fULL
+#define TOP_IGNORED_SHIFT 52
+#define IOPTE_RESERVED_MASK ((BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT) | \
+			     (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
+
+static arm_lpae_iopte iopte_val(arm_lpae_iopte table_pte)
+{
+	return table_pte & ~IOPTE_RESERVED_MASK;
+}
+
+static arm_lpae_iopte _iopte_bottom_ignored_val(arm_lpae_iopte table_pte)
+{
+	return (table_pte & (BOTTOM_IGNORED_MASK << BOTTOM_IGNORED_SHIFT))
+		>> BOTTOM_IGNORED_SHIFT;
+}
+
+static arm_lpae_iopte _iopte_top_ignored_val(arm_lpae_iopte table_pte)
+{
+	return (table_pte & (TOP_IGNORED_MASK << TOP_IGNORED_SHIFT))
+		>> TOP_IGNORED_SHIFT;
+}
+
+static int iopte_tblcnt(arm_lpae_iopte table_pte)
+{
+	return (_iopte_bottom_ignored_val(table_pte) |
+		(_iopte_top_ignored_val(table_pte) << BOTTOM_IGNORED_NUM_BITS));
+}
+
+static void iopte_tblcnt_set(arm_lpae_iopte *table_pte, int val)
+{
+	arm_lpae_iopte pte = iopte_val(*table_pte);
+
+	pte |= ((val & BOTTOM_IGNORED_MASK) << BOTTOM_IGNORED_SHIFT) |
+		 (((val & (TOP_IGNORED_MASK << BOTTOM_IGNORED_NUM_BITS))
+		   >> BOTTOM_IGNORED_NUM_BITS) << TOP_IGNORED_SHIFT);
+	*table_pte = pte;
+}
+
+static void iopte_tblcnt_sub(arm_lpae_iopte *table_ptep, int cnt)
+{
+	arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
+
+	current_cnt -= cnt;
+	iopte_tblcnt_set(table_ptep, current_cnt);
+}
+
+static void iopte_tblcnt_add(arm_lpae_iopte *table_ptep, int cnt)
+{
+	arm_lpae_iopte current_cnt = iopte_tblcnt(*table_ptep);
+
+	current_cnt += cnt;
+	iopte_tblcnt_set(table_ptep, current_cnt);
+}
+
 static bool selftest_running = false;
+static bool suppress_map_failures;
 
 static dma_addr_t __arm_lpae_dma_addr(void *pages)
 {
@@ -208,11 +284,12 @@
 }
 
 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
-				    struct io_pgtable_cfg *cfg)
+				    struct io_pgtable_cfg *cfg, void *cookie)
 {
 	struct device *dev = cfg->iommu_dev;
 	dma_addr_t dma;
-	void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
+	void *pages = io_pgtable_alloc_pages_exact(cfg, cookie, size,
+						   gfp | __GFP_ZERO);
 
 	if (!pages)
 		return NULL;
@@ -236,17 +313,17 @@
 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
 out_free:
-	free_pages_exact(pages, size);
+	io_pgtable_free_pages_exact(cfg, cookie, pages, size);
 	return NULL;
 }
 
 static void __arm_lpae_free_pages(void *pages, size_t size,
-				  struct io_pgtable_cfg *cfg)
+				  struct io_pgtable_cfg *cfg, void *cookie)
 {
 	if (!selftest_running)
 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
 				 size, DMA_TO_DEVICE);
-	free_pages_exact(pages, size);
+	io_pgtable_free_pages_exact(cfg, cookie, pages, size);
 }
 
 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
@@ -260,33 +337,19 @@
 					   sizeof(pte), DMA_TO_DEVICE);
 }
 
-static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
-			    unsigned long iova, size_t size, int lvl,
-			    arm_lpae_iopte *ptep);
-
 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
 			     unsigned long iova, phys_addr_t paddr,
 			     arm_lpae_iopte prot, int lvl,
-			     arm_lpae_iopte *ptep)
+			     arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep,
+			     bool flush)
 {
 	arm_lpae_iopte pte = prot;
 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
 
-	if (iopte_leaf(*ptep, lvl)) {
-		/* We require an unmap first */
-		WARN_ON(!selftest_running);
+	/* We require an unmap first */
+	if (*ptep & ARM_LPAE_PTE_VALID) {
+		BUG_ON(!suppress_map_failures);
 		return -EEXIST;
-	} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
-		/*
-		 * We need to unmap and free the old table before
-		 * overwriting it with a block entry.
-		 */
-		arm_lpae_iopte *tblp;
-		size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
-
-		tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
-		if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
-			return -EINVAL;
 	}
 
 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
@@ -297,27 +360,83 @@
 	else
 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
 
-	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+	pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_OS;
 	pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
 
-	__arm_lpae_set_pte(ptep, pte, cfg);
+	if (flush)
+		__arm_lpae_set_pte(ptep, pte, cfg);
+	else
+		*ptep = pte;
+
+	if (prev_ptep)
+		iopte_tblcnt_add(prev_ptep, 1);
 	return 0;
 }
 
+struct map_state {
+	unsigned long iova_end;
+	unsigned int pgsize;
+	arm_lpae_iopte *pgtable;
+	arm_lpae_iopte *prev_pgtable;
+	arm_lpae_iopte *pte_start;
+	unsigned int num_pte;
+};
+/* map state optimization works at level 3 (the 2nd-to-last level) */
+#define MAP_STATE_LVL 3
+
 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
 			  phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
-			  int lvl, arm_lpae_iopte *ptep)
+			  int lvl, arm_lpae_iopte *ptep,
+			  arm_lpae_iopte *prev_ptep, struct map_state *ms)
 {
 	arm_lpae_iopte *cptep, pte;
 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
+	void *cookie = data->iop.cookie;
+	arm_lpae_iopte *pgtable = ptep;
 
 	/* Find our entry at the current level */
 	ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
 
 	/* If we can install a leaf entry at this level, then do so */
-	if (size == block_size && (size & cfg->pgsize_bitmap))
-		return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
+	if (size == block_size && (size & cfg->pgsize_bitmap)) {
+		if (!ms)
+			return arm_lpae_init_pte(data, iova, paddr, prot, lvl,
+						ptep, prev_ptep, true);
+
+		if (lvl == MAP_STATE_LVL) {
+			if (ms->pgtable)
+				dma_sync_single_for_device(
+					cfg->iommu_dev,
+					__arm_lpae_dma_addr(ms->pte_start),
+					ms->num_pte * sizeof(*ptep),
+					DMA_TO_DEVICE);
+
+			ms->iova_end = round_down(iova, SZ_2M) + SZ_2M;
+			ms->pgtable = pgtable;
+			ms->prev_pgtable = prev_ptep;
+			ms->pgsize = size;
+			ms->pte_start = ptep;
+			ms->num_pte = 1;
+		} else {
+			/*
+			 * We have some map state from previous page
+			 * mappings, but we're about to set up a block
+			 * mapping.  Flush out the previous page mappings.
+			 */
+			if (ms->pgtable)
+				dma_sync_single_for_device(
+					cfg->iommu_dev,
+					__arm_lpae_dma_addr(ms->pte_start),
+					ms->num_pte * sizeof(*ptep),
+					DMA_TO_DEVICE);
+			memset(ms, 0, sizeof(*ms));
+			ms = NULL;
+		}
+
+		return arm_lpae_init_pte(data, iova, paddr, prot, lvl,
+					ptep, prev_ptep, ms == NULL);
+	}
 
 	/* We can't allocate tables at the final level */
 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
@@ -327,7 +446,7 @@
 	pte = *ptep;
 	if (!pte) {
 		cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data),
-					       GFP_ATOMIC, cfg);
+					       GFP_ATOMIC, cfg, cookie);
 		if (!cptep)
 			return -ENOMEM;
 
@@ -340,7 +459,8 @@
 	}
 
 	/* Rinse, repeat */
-	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+	return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep,
+				ptep, ms);
 }
 
 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@@ -350,10 +470,14 @@
 
 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
 	    data->iop.fmt == ARM_32_LPAE_S1) {
-		pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG;
+		pte = ARM_LPAE_PTE_nG;
 
-		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
-			pte |= ARM_LPAE_PTE_AP_RDONLY;
+		if (prot & IOMMU_WRITE)
+			pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RW
+					: ARM_LPAE_PTE_AP_RW;
+		else
+			pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RO
+					: ARM_LPAE_PTE_AP_RO;
 
 		if (prot & IOMMU_MMIO)
 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
@@ -361,6 +485,9 @@
 		else if (prot & IOMMU_CACHE)
 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
+		else if (prot & IOMMU_USE_UPSTREAM_HINT)
+			pte |= (ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM
+				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
 	} else {
 		pte = ARM_LPAE_PTE_HAP_FAULT;
 		if (prot & IOMMU_READ)
@@ -394,7 +521,8 @@
 		return 0;
 
 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
-	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+	ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, NULL,
+				NULL);
 	/*
 	 * Synchronise all PTE updates for the new mapping before there's
 	 * a chance for anything to kick off a table walk for the new iova.
@@ -404,11 +532,89 @@
 	return ret;
 }
 
+static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
+			   struct scatterlist *sg, unsigned int nents,
+			   int iommu_prot, size_t *size)
+{
+	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
+	arm_lpae_iopte *ptep = data->pgd;
+	int lvl = ARM_LPAE_START_LVL(data);
+	arm_lpae_iopte prot;
+	struct scatterlist *s;
+	size_t mapped = 0;
+	int i, ret;
+	unsigned int min_pagesz;
+	struct io_pgtable_cfg *cfg = &data->iop.cfg;
+	struct map_state ms;
+
+	/* If no access, then nothing to do */
+	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
+		goto out_err;
+
+	prot = arm_lpae_prot_to_pte(data, iommu_prot);
+
+	min_pagesz = 1 << __ffs(cfg->pgsize_bitmap);
+
+	memset(&ms, 0, sizeof(ms));
+
+	for_each_sg(sg, s, nents, i) {
+		phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
+		size_t size = s->length;
+
+		/*
+		 * We are mapping on IOMMU page boundaries, so offset within
+		 * the page must be 0. However, the IOMMU may support pages
+		 * smaller than PAGE_SIZE, so s->offset may still represent
+		 * an offset of that boundary within the CPU page.
+		 */
+		if (!IS_ALIGNED(s->offset, min_pagesz))
+			goto out_err;
+
+		while (size) {
+			size_t pgsize = iommu_pgsize(
+				cfg->pgsize_bitmap, iova | phys, size);
+
+			if (ms.pgtable && (iova < ms.iova_end)) {
+				arm_lpae_iopte *ptep = ms.pgtable +
+					ARM_LPAE_LVL_IDX(iova, MAP_STATE_LVL,
+							 data);
+				arm_lpae_init_pte(
+					data, iova, phys, prot, MAP_STATE_LVL,
+					ptep, ms.prev_pgtable, false);
+				ms.num_pte++;
+			} else {
+				ret = __arm_lpae_map(data, iova, phys, pgsize,
+						prot, lvl, ptep, NULL, &ms);
+				if (ret)
+					goto out_err;
+			}
+
+			iova += pgsize;
+			mapped += pgsize;
+			phys += pgsize;
+			size -= pgsize;
+		}
+	}
+
+	if (ms.pgtable)
+		dma_sync_single_for_device(
+			cfg->iommu_dev, __arm_lpae_dma_addr(ms.pte_start),
+			ms.num_pte * sizeof(*ptep), DMA_TO_DEVICE);
+
+	return mapped;
+
+out_err:
+	/* Return the size of the partial mapping so that they can be undone */
+	*size = mapped;
+	return 0;
+}
+
 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
 				    arm_lpae_iopte *ptep)
 {
 	arm_lpae_iopte *start, *end;
 	unsigned long table_size;
+	void *cookie = data->iop.cookie;
 
 	if (lvl == ARM_LPAE_START_LVL(data))
 		table_size = data->pgd_size;
@@ -432,7 +638,7 @@
 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
 	}
 
-	__arm_lpae_free_pages(start, table_size, &data->iop.cfg);
+	__arm_lpae_free_pages(start, table_size, &data->iop.cfg, cookie);
 }
 
 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
@@ -446,7 +652,8 @@
 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
 				    unsigned long iova, size_t size,
 				    arm_lpae_iopte prot, int lvl,
-				    arm_lpae_iopte *ptep, size_t blk_size)
+				    arm_lpae_iopte *ptep,
+				    arm_lpae_iopte *prev_ptep, size_t blk_size)
 {
 	unsigned long blk_start, blk_end;
 	phys_addr_t blk_paddr;
@@ -455,6 +662,7 @@
 	blk_start = iova & ~(blk_size - 1);
 	blk_end = blk_start + blk_size;
 	blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift;
+	size = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova, size);
 
 	for (; blk_start < blk_end; blk_start += size, blk_paddr += size) {
 		arm_lpae_iopte *tablep;
@@ -466,7 +674,7 @@
 		/* __arm_lpae_map expects a pointer to the start of the table */
 		tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data);
 		if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl,
-				   tablep) < 0) {
+				   tablep, prev_ptep, NULL) < 0) {
 			if (table) {
 				/* Free the table we allocated */
 				tablep = iopte_deref(table, data);
@@ -477,14 +685,12 @@
 	}
 
 	__arm_lpae_set_pte(ptep, table, &data->iop.cfg);
-	iova &= ~(blk_size - 1);
-	io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true);
 	return size;
 }
 
 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
 			    unsigned long iova, size_t size, int lvl,
-			    arm_lpae_iopte *ptep)
+			    arm_lpae_iopte *ptep, arm_lpae_iopte *prev_ptep)
 {
 	arm_lpae_iopte pte;
 	struct io_pgtable *iop = &data->iop;
@@ -505,16 +711,52 @@
 
 		if (!iopte_leaf(pte, lvl)) {
 			/* Also flush any partial walks */
-			io_pgtable_tlb_add_flush(iop, iova, size,
-						ARM_LPAE_GRANULE(data), false);
-			io_pgtable_tlb_sync(iop);
 			ptep = iopte_deref(pte, data);
 			__arm_lpae_free_pgtable(data, lvl + 1, ptep);
-		} else {
-			io_pgtable_tlb_add_flush(iop, iova, size, size, true);
 		}
 
 		return size;
+	} else if ((lvl == ARM_LPAE_MAX_LEVELS - 2) && !iopte_leaf(pte, lvl)) {
+		arm_lpae_iopte *table = iopte_deref(pte, data);
+		arm_lpae_iopte *table_base = table;
+		int tl_offset = ARM_LPAE_LVL_IDX(iova, lvl + 1, data);
+		int entry_size = ARM_LPAE_GRANULE(data);
+		int max_entries = ARM_LPAE_BLOCK_SIZE(lvl, data) / entry_size;
+		int entries = min_t(int, size / entry_size,
+			max_entries - tl_offset);
+		int table_len = entries * sizeof(*table);
+
+		/*
+		 * This isn't a block mapping so it must be a table mapping
+		 * and since it's the 2nd-to-last level the next level has
+		 * to be all page mappings.  Zero them all out in one fell
+		 * swoop.
+		 */
+
+		table += tl_offset;
+
+		memset(table, 0, table_len);
+		dma_sync_single_for_device(iop->cfg.iommu_dev,
+					   __arm_lpae_dma_addr(table),
+					   table_len, DMA_TO_DEVICE);
+
+		iopte_tblcnt_sub(ptep, entries);
+		if (!iopte_tblcnt(*ptep)) {
+			/* no valid mappings left under this table. free it. */
+			__arm_lpae_set_pte(ptep, 0, &iop->cfg);
+			io_pgtable_tlb_add_flush(iop, iova,
+						 entries * entry_size,
+						 ARM_LPAE_GRANULE(data),
+						 false);
+			__arm_lpae_free_pgtable(data, lvl + 1, table_base);
+		} else {
+			io_pgtable_tlb_add_flush(iop, iova,
+						 entries * entry_size,
+						 ARM_LPAE_GRANULE(data),
+						 true);
+		}
+
+		return entries * entry_size;
 	} else if (iopte_leaf(pte, lvl)) {
 		/*
 		 * Insert a table at the next level to map the old region,
@@ -522,25 +764,42 @@
 		 */
 		return arm_lpae_split_blk_unmap(data, iova, size,
 						iopte_prot(pte), lvl, ptep,
-						blk_size);
+						prev_ptep, blk_size);
 	}
 
 	/* Keep on walkin' */
+	prev_ptep = ptep;
 	ptep = iopte_deref(pte, data);
-	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
+	return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep, prev_ptep);
 }
 
-static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
 			  size_t size)
 {
-	size_t unmapped;
+	size_t unmapped = 0;
 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
 	arm_lpae_iopte *ptep = data->pgd;
 	int lvl = ARM_LPAE_START_LVL(data);
 
-	unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep);
+	while (unmapped < size) {
+		size_t ret, size_to_unmap, remaining;
+
+		remaining = (size - unmapped);
+		size_to_unmap = iommu_pgsize(data->iop.cfg.pgsize_bitmap, iova,
+						remaining);
+		size_to_unmap = size_to_unmap >= SZ_2M ?
+				size_to_unmap :
+				min_t(unsigned long, remaining,
+					(ALIGN(iova + 1, SZ_2M) - iova));
+		ret = __arm_lpae_unmap(data, iova, size_to_unmap, lvl, ptep,
+					NULL);
+		if (ret == 0)
+			break;
+		unmapped += ret;
+		iova += ret;
+	}
 	if (unmapped)
-		io_pgtable_tlb_sync(&data->iop);
+		io_pgtable_tlb_flush_all(&data->iop);
 
 	return unmapped;
 }
@@ -653,6 +912,7 @@
 
 	data->iop.ops = (struct io_pgtable_ops) {
 		.map		= arm_lpae_map,
+		.map_sg		= arm_lpae_map_sg,
 		.unmap		= arm_lpae_unmap,
 		.iova_to_phys	= arm_lpae_iova_to_phys,
 	};
@@ -666,17 +926,23 @@
 	u64 reg;
 	struct arm_lpae_io_pgtable *data;
 
-	if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS)
-		return NULL;
-
 	data = arm_lpae_alloc_pgtable(cfg);
 	if (!data)
 		return NULL;
 
 	/* TCR */
-	reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
-	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
-	      (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+	if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent)
+		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+	else if (cfg->quirks && IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT)
+		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
+	else
+		reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
+			(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
 
 	switch (ARM_LPAE_GRANULE(data)) {
 	case SZ_4K:
@@ -725,13 +991,16 @@
 	      (ARM_LPAE_MAIR_ATTR_WBRWA
 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
 	      (ARM_LPAE_MAIR_ATTR_DEVICE
-	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
+	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
+	      (ARM_LPAE_MAIR_ATTR_UPSTREAM
+	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_UPSTREAM));
 
 	cfg->arm_lpae_s1_cfg.mair[0] = reg;
 	cfg->arm_lpae_s1_cfg.mair[1] = 0;
 
 	/* Looking good; allocate a pgd */
-	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL,
+					   cfg, cookie);
 	if (!data->pgd)
 		goto out_free_data;
 
@@ -825,7 +1094,8 @@
 	cfg->arm_lpae_s2_cfg.vtcr = reg;
 
 	/* Allocate pgd pages */
-	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
+	data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL,
+					   cfg, cookie);
 	if (!data->pgd)
 		goto out_free_data;
 
@@ -908,7 +1178,6 @@
 				size_t granule, bool leaf, void *cookie)
 {
 	WARN_ON(cookie != cfg_cookie);
-	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
 }
 
 static void dummy_tlb_sync(void *cookie)
@@ -937,10 +1206,48 @@
 #define __FAIL(ops, i)	({						\
 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
 		arm_lpae_dump_ops(ops);					\
+		suppress_map_failures = false;				\
 		selftest_running = false;				\
 		-EFAULT;						\
 })
 
+/*
+ * Returns true if there's any mapping in the given iova range in ops.
+ */
+static bool arm_lpae_range_has_mapping(struct io_pgtable_ops *ops,
+				       unsigned long iova_start, size_t size)
+{
+	unsigned long iova = iova_start;
+
+	while (iova < (iova_start + size)) {
+		if (ops->iova_to_phys(ops, iova + 42))
+			return true;
+		iova += SZ_4K;
+	}
+	return false;
+}
+
+/*
+ * Returns true if the iova range is successfully mapped to the contiguous
+ * phys range in ops.
+ */
+static bool arm_lpae_range_has_specific_mapping(struct io_pgtable_ops *ops,
+						const unsigned long iova_start,
+						const phys_addr_t phys_start,
+						const size_t size)
+{
+	unsigned long iova = iova_start;
+	phys_addr_t phys = phys_start;
+
+	while (iova < (iova_start + size)) {
+		if (ops->iova_to_phys(ops, iova + 42) != (phys + 42))
+			return false;
+		iova += SZ_4K;
+		phys += SZ_4K;
+	}
+	return true;
+}
+
 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
 {
 	static const enum io_pgtable_fmt fmts[] = {
@@ -948,14 +1255,16 @@
 		ARM_64_LPAE_S2,
 	};
 
-	int i, j;
+	int i, j, k;
 	unsigned long iova;
 	size_t size;
 	struct io_pgtable_ops *ops;
-
 	selftest_running = true;
 
 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
+		unsigned long test_sg_sizes[] = { SZ_4K, SZ_64K, SZ_2M,
+						  SZ_1M * 12, SZ_1M * 20 };
+
 		cfg_cookie = cfg;
 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
 		if (!ops) {
@@ -964,16 +1273,11 @@
 		}
 
 		/*
-		 * Initial sanity checks.
-		 * Empty page tables shouldn't provide any translations.
+		 * Initial sanity checks.  Empty page tables shouldn't
+		 * provide any translations.  TODO: check entire supported
+		 * range for these ops rather than first 2G
 		 */
-		if (ops->iova_to_phys(ops, 42))
-			return __FAIL(ops, i);
-
-		if (ops->iova_to_phys(ops, SZ_1G + 42))
-			return __FAIL(ops, i);
-
-		if (ops->iova_to_phys(ops, SZ_2G + 42))
+		if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
 			return __FAIL(ops, i);
 
 		/*
@@ -990,12 +1294,15 @@
 							    IOMMU_CACHE))
 				return __FAIL(ops, i);
 
+			suppress_map_failures = true;
 			/* Overlapping mappings */
 			if (!ops->map(ops, iova, iova + size, size,
 				      IOMMU_READ | IOMMU_NOEXEC))
 				return __FAIL(ops, i);
+			suppress_map_failures = false;
 
-			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+			if (!arm_lpae_range_has_specific_mapping(ops, iova,
+								 iova, size))
 				return __FAIL(ops, i);
 
 			iova += SZ_1G;
@@ -1008,11 +1315,15 @@
 		if (ops->unmap(ops, SZ_1G + size, size) != size)
 			return __FAIL(ops, i);
 
+		if (arm_lpae_range_has_mapping(ops, SZ_1G + size, size))
+			return __FAIL(ops, i);
+
 		/* Remap of partial unmap */
 		if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
 			return __FAIL(ops, i);
 
-		if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
+		if (!arm_lpae_range_has_specific_mapping(ops, SZ_1G + size,
+							 size, size))
 			return __FAIL(ops, i);
 
 		/* Full unmap */
@@ -1034,15 +1345,108 @@
 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
 				return __FAIL(ops, i);
 
+			if (ops->unmap(ops, iova, size) != size)
+				return __FAIL(ops, i);
+
 			iova += SZ_1G;
 			j++;
 			j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
 		}
 
+		if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
+			return __FAIL(ops, i);
+
+		if ((cfg->pgsize_bitmap & SZ_2M) &&
+		    (cfg->pgsize_bitmap & SZ_4K)) {
+			/* mixed block + page mappings */
+			iova = 0;
+			if (ops->map(ops, iova, iova, SZ_2M, IOMMU_READ))
+				return __FAIL(ops, i);
+
+			if (ops->map(ops, iova + SZ_2M, iova + SZ_2M, SZ_4K,
+				     IOMMU_READ))
+				return __FAIL(ops, i);
+
+			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
+				return __FAIL(ops, i);
+
+			if (ops->iova_to_phys(ops, iova + SZ_2M + 42) !=
+			    (iova + SZ_2M + 42))
+				return __FAIL(ops, i);
+
+			/* unmap both mappings at once */
+			if (ops->unmap(ops, iova, SZ_2M + SZ_4K) !=
+			    (SZ_2M + SZ_4K))
+				return __FAIL(ops, i);
+
+			if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
+				return __FAIL(ops, i);
+		}
+
+		/* map_sg */
+		for (j = 0; j < ARRAY_SIZE(test_sg_sizes); ++j) {
+			size_t mapped;
+			size_t unused;
+			struct page *page;
+			phys_addr_t page_phys;
+			struct sg_table table;
+			struct scatterlist *sg;
+			unsigned long total_size = test_sg_sizes[j];
+			int chunk_size = 1UL << find_first_bit(
+				&cfg->pgsize_bitmap, BITS_PER_LONG);
+			int nents = total_size / chunk_size;
+
+			if (total_size < chunk_size)
+				continue;
+
+			page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
+			page_phys = page_to_phys(page);
+
+			iova = 0;
+			BUG_ON(sg_alloc_table(&table, nents, GFP_KERNEL));
+			BUG_ON(!page);
+			for_each_sg(table.sgl, sg, table.nents, k)
+				sg_set_page(sg, page, chunk_size, 0);
+
+			mapped = ops->map_sg(ops, iova, table.sgl, table.nents,
+					     IOMMU_READ | IOMMU_WRITE, &unused);
+
+			if (mapped != total_size)
+				return __FAIL(ops, i);
+
+			if (!arm_lpae_range_has_mapping(ops, iova, total_size))
+				return __FAIL(ops, i);
+
+			if (arm_lpae_range_has_mapping(ops, iova + total_size,
+					      SZ_2G - (iova + total_size)))
+				return __FAIL(ops, i);
+
+			for_each_sg(table.sgl, sg, table.nents, k) {
+				dma_addr_t newphys =
+					ops->iova_to_phys(ops, iova + 42);
+				if (newphys != (page_phys + 42))
+					return __FAIL(ops, i);
+				iova += chunk_size;
+			}
+
+			if (ops->unmap(ops, 0, total_size) != total_size)
+				return __FAIL(ops, i);
+
+			if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
+				return __FAIL(ops, i);
+
+			sg_free_table(&table);
+			__free_pages(page, get_order(chunk_size));
+		}
+
+		if (arm_lpae_range_has_mapping(ops, 0, SZ_2G))
+			return __FAIL(ops, i);
+
 		free_io_pgtable_ops(ops);
 	}
 
 	selftest_running = false;
+	suppress_map_failures = false;
 	return 0;
 }
 
@@ -1050,8 +1454,6 @@
 {
 	static const unsigned long pgsize[] = {
 		SZ_4K | SZ_2M | SZ_1G,
-		SZ_16K | SZ_32M,
-		SZ_64K | SZ_512M,
 	};
 
 	static const unsigned int ias[] = {
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
new file mode 100644
index 0000000..2c34d10f
--- /dev/null
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -0,0 +1,691 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)	"io-pgtable-fast: " fmt
+
+#include <linux/iommu.h>
+#include <linux/kernel.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/io-pgtable-fast.h>
+#include <asm/cacheflush.h>
+
+#include "io-pgtable.h"
+
+#define AV8L_FAST_MAX_ADDR_BITS		48
+
+/* Struct accessors */
+#define iof_pgtable_to_data(x)						\
+	container_of((x), struct av8l_fast_io_pgtable, iop)
+
+#define iof_pgtable_ops_to_pgtable(x)					\
+	container_of((x), struct io_pgtable, ops)
+
+#define iof_pgtable_ops_to_data(x)					\
+	iof_pgtable_to_data(iof_pgtable_ops_to_pgtable(x))
+
+struct av8l_fast_io_pgtable {
+	struct io_pgtable	  iop;
+	av8l_fast_iopte		 *pgd;
+	av8l_fast_iopte		 *puds[4];
+	av8l_fast_iopte		 *pmds;
+	struct page		**pages; /* page table memory */
+};
+
+/* Page table bits */
+#define AV8L_FAST_PTE_TYPE_SHIFT	0
+#define AV8L_FAST_PTE_TYPE_MASK		0x3
+
+#define AV8L_FAST_PTE_TYPE_BLOCK	1
+#define AV8L_FAST_PTE_TYPE_TABLE	3
+#define AV8L_FAST_PTE_TYPE_PAGE		3
+
+#define AV8L_FAST_PTE_NSTABLE		(((av8l_fast_iopte)1) << 63)
+#define AV8L_FAST_PTE_XN		(((av8l_fast_iopte)3) << 53)
+#define AV8L_FAST_PTE_AF		(((av8l_fast_iopte)1) << 10)
+#define AV8L_FAST_PTE_SH_NS		(((av8l_fast_iopte)0) << 8)
+#define AV8L_FAST_PTE_SH_OS		(((av8l_fast_iopte)2) << 8)
+#define AV8L_FAST_PTE_SH_IS		(((av8l_fast_iopte)3) << 8)
+#define AV8L_FAST_PTE_NS		(((av8l_fast_iopte)1) << 5)
+#define AV8L_FAST_PTE_VALID		(((av8l_fast_iopte)1) << 0)
+
+#define AV8L_FAST_PTE_ATTR_LO_MASK	(((av8l_fast_iopte)0x3ff) << 2)
+/* Ignore the contiguous bit for block splitting */
+#define AV8L_FAST_PTE_ATTR_HI_MASK	(((av8l_fast_iopte)6) << 52)
+#define AV8L_FAST_PTE_ATTR_MASK		(AV8L_FAST_PTE_ATTR_LO_MASK |	\
+					 AV8L_FAST_PTE_ATTR_HI_MASK)
+#define AV8L_FAST_PTE_ADDR_MASK		((av8l_fast_iopte)0xfffffffff000)
+
+
+/* Stage-1 PTE */
+#define AV8L_FAST_PTE_AP_PRIV_RW	(((av8l_fast_iopte)0) << 6)
+#define AV8L_FAST_PTE_AP_RW		(((av8l_fast_iopte)1) << 6)
+#define AV8L_FAST_PTE_AP_PRIV_RO	(((av8l_fast_iopte)2) << 6)
+#define AV8L_FAST_PTE_AP_RO		(((av8l_fast_iopte)3) << 6)
+#define AV8L_FAST_PTE_ATTRINDX_SHIFT	2
+#define AV8L_FAST_PTE_nG		(((av8l_fast_iopte)1) << 11)
+
+/* Stage-2 PTE */
+#define AV8L_FAST_PTE_HAP_FAULT		(((av8l_fast_iopte)0) << 6)
+#define AV8L_FAST_PTE_HAP_READ		(((av8l_fast_iopte)1) << 6)
+#define AV8L_FAST_PTE_HAP_WRITE		(((av8l_fast_iopte)2) << 6)
+#define AV8L_FAST_PTE_MEMATTR_OIWB	(((av8l_fast_iopte)0xf) << 2)
+#define AV8L_FAST_PTE_MEMATTR_NC	(((av8l_fast_iopte)0x5) << 2)
+#define AV8L_FAST_PTE_MEMATTR_DEV	(((av8l_fast_iopte)0x1) << 2)
+
+/* Register bits */
+#define ARM_32_LPAE_TCR_EAE		(1 << 31)
+#define ARM_64_LPAE_S2_TCR_RES1		(1 << 31)
+
+#define AV8L_FAST_TCR_TG0_4K		(0 << 14)
+#define AV8L_FAST_TCR_TG0_64K		(1 << 14)
+#define AV8L_FAST_TCR_TG0_16K		(2 << 14)
+
+#define AV8L_FAST_TCR_SH0_SHIFT		12
+#define AV8L_FAST_TCR_SH0_MASK		0x3
+#define AV8L_FAST_TCR_SH_NS		0
+#define AV8L_FAST_TCR_SH_OS		2
+#define AV8L_FAST_TCR_SH_IS		3
+
+#define AV8L_FAST_TCR_ORGN0_SHIFT	10
+#define AV8L_FAST_TCR_IRGN0_SHIFT	8
+#define AV8L_FAST_TCR_RGN_MASK		0x3
+#define AV8L_FAST_TCR_RGN_NC		0
+#define AV8L_FAST_TCR_RGN_WBWA		1
+#define AV8L_FAST_TCR_RGN_WT		2
+#define AV8L_FAST_TCR_RGN_WB		3
+
+#define AV8L_FAST_TCR_SL0_SHIFT		6
+#define AV8L_FAST_TCR_SL0_MASK		0x3
+
+#define AV8L_FAST_TCR_T0SZ_SHIFT	0
+#define AV8L_FAST_TCR_SZ_MASK		0xf
+
+#define AV8L_FAST_TCR_PS_SHIFT		16
+#define AV8L_FAST_TCR_PS_MASK		0x7
+
+#define AV8L_FAST_TCR_IPS_SHIFT		32
+#define AV8L_FAST_TCR_IPS_MASK		0x7
+
+#define AV8L_FAST_TCR_PS_32_BIT		0x0ULL
+#define AV8L_FAST_TCR_PS_36_BIT		0x1ULL
+#define AV8L_FAST_TCR_PS_40_BIT		0x2ULL
+#define AV8L_FAST_TCR_PS_42_BIT		0x3ULL
+#define AV8L_FAST_TCR_PS_44_BIT		0x4ULL
+#define AV8L_FAST_TCR_PS_48_BIT		0x5ULL
+
+#define AV8L_FAST_TCR_EPD1_SHIFT	23
+#define AV8L_FAST_TCR_EPD1_FAULT	1
+
+#define AV8L_FAST_MAIR_ATTR_SHIFT(n)	((n) << 3)
+#define AV8L_FAST_MAIR_ATTR_MASK	0xff
+#define AV8L_FAST_MAIR_ATTR_DEVICE	0x04
+#define AV8L_FAST_MAIR_ATTR_NC		0x44
+#define AV8L_FAST_MAIR_ATTR_WBRWA	0xff
+#define AV8L_FAST_MAIR_ATTR_UPSTREAM	0xf4
+#define AV8L_FAST_MAIR_ATTR_IDX_NC	0
+#define AV8L_FAST_MAIR_ATTR_IDX_CACHE	1
+#define AV8L_FAST_MAIR_ATTR_IDX_DEV	2
+#define AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM	3
+
+#define AV8L_FAST_PAGE_SHIFT		12
+
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB
+
+#include <asm/cacheflush.h>
+#include <linux/notifier.h>
+
+static ATOMIC_NOTIFIER_HEAD(av8l_notifier_list);
+
+void av8l_register_notify(struct notifier_block *nb)
+{
+	atomic_notifier_chain_register(&av8l_notifier_list, nb);
+}
+EXPORT_SYMBOL(av8l_register_notify);
+
+static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
+{
+	if (unlikely(*ptep)) {
+		atomic_notifier_call_chain(
+			&av8l_notifier_list, MAPPED_OVER_STALE_TLB,
+			(void *) ptep);
+		pr_err("Tried to map over a non-vacant pte: 0x%llx @ %p\n",
+		       *ptep, ptep);
+		pr_err("Nearby memory:\n");
+		print_hex_dump(KERN_ERR, "pgtbl: ", DUMP_PREFIX_ADDRESS,
+			       32, 8, ptep - 16, 32 * sizeof(*ptep), false);
+	}
+}
+
+void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, bool skip_sync)
+{
+	int i;
+	av8l_fast_iopte *pmdp = pmds;
+
+	for (i = 0; i < ((SZ_1G * 4UL) >> AV8L_FAST_PAGE_SHIFT); ++i) {
+		if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
+			*pmdp = 0;
+			if (!skip_sync)
+				dmac_clean_range(pmdp, pmdp + 1);
+		}
+		pmdp++;
+	}
+}
+#else
+static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
+{
+}
+#endif
+
+/* caller must take care of cache maintenance on *ptep */
+int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
+			 int prot)
+{
+	int i, nptes = size >> AV8L_FAST_PAGE_SHIFT;
+	av8l_fast_iopte pte = AV8L_FAST_PTE_XN
+		| AV8L_FAST_PTE_TYPE_PAGE
+		| AV8L_FAST_PTE_AF
+		| AV8L_FAST_PTE_nG
+		| AV8L_FAST_PTE_SH_IS;
+
+	if (prot & IOMMU_MMIO)
+		pte |= (AV8L_FAST_MAIR_ATTR_IDX_DEV
+			<< AV8L_FAST_PTE_ATTRINDX_SHIFT);
+	else if (prot & IOMMU_CACHE)
+		pte |= (AV8L_FAST_MAIR_ATTR_IDX_CACHE
+			<< AV8L_FAST_PTE_ATTRINDX_SHIFT);
+	else if (prot & IOMMU_USE_UPSTREAM_HINT)
+		pte |= (AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM
+			<< AV8L_FAST_PTE_ATTRINDX_SHIFT);
+
+	if (!(prot & IOMMU_WRITE))
+		pte |= AV8L_FAST_PTE_AP_RO;
+	else
+		pte |= AV8L_FAST_PTE_AP_RW;
+
+	paddr &= AV8L_FAST_PTE_ADDR_MASK;
+	for (i = 0; i < nptes; i++, paddr += SZ_4K) {
+		__av8l_check_for_stale_tlb(ptep + i);
+		*(ptep + i) = pte | paddr;
+	}
+
+	return 0;
+}
+
+static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova,
+			 phys_addr_t paddr, size_t size, int prot)
+{
+	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
+	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
+
+	av8l_fast_map_public(ptep, paddr, size, prot);
+	dmac_clean_range(ptep, ptep + nptes);
+
+	return 0;
+}
+
+static void __av8l_fast_unmap(av8l_fast_iopte *ptep, size_t size,
+			      bool need_stale_tlb_tracking)
+{
+	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
+	int val = need_stale_tlb_tracking
+		? AV8L_FAST_PTE_UNMAPPED_NEED_TLBI
+		: 0;
+
+	memset(ptep, val, sizeof(*ptep) * nptes);
+}
+
+/* caller must take care of cache maintenance on *ptep */
+void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size)
+{
+	__av8l_fast_unmap(ptep, size, true);
+}
+
+/* upper layer must take care of TLB invalidation */
+static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
+			      size_t size)
+{
+	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+	av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
+	unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
+
+	__av8l_fast_unmap(ptep, size, false);
+	dmac_clean_range(ptep, ptep + nptes);
+
+	return size;
+}
+
+static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
+					  unsigned long iova)
+{
+	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
+	av8l_fast_iopte pte, *pgdp, *pudp, *pmdp;
+	phys_addr_t phys;
+	const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT;
+	const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK;
+	const unsigned long ptt = AV8L_FAST_PTE_TYPE_TABLE;
+	const unsigned long ptp = AV8L_FAST_PTE_TYPE_PAGE;
+	const av8l_fast_iopte am = AV8L_FAST_PTE_ADDR_MASK;
+
+	/* TODO: clean up some of these magic numbers... */
+
+	pgdp = (av8l_fast_iopte *)
+		(((unsigned long)data->pgd) | ((iova & 0x7fc0000000) >> 27));
+	pte = *pgdp;
+	if (((pte >> pts) & ptm) != ptt)
+		return 0;
+	pudp = phys_to_virt((pte & am) | ((iova & 0x3fe00000) >> 18));
+
+	pte = *pudp;
+	if (((pte >> pts) & ptm) != ptt)
+		return 0;
+	pmdp = phys_to_virt((pte & am) | ((iova & 0x1ff000) >> 9));
+
+	pte = *pmdp;
+	if (((pte >> pts) & ptm) != ptp)
+		return 0;
+	phys = pte & am;
+
+	return phys | (iova & 0xfff);
+}
+
+static int av8l_fast_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
+			    struct scatterlist *sg, unsigned int nents,
+			    int prot, size_t *size)
+{
+	return -ENODEV;
+}
+
+static struct av8l_fast_io_pgtable *
+av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
+{
+	struct av8l_fast_io_pgtable *data;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return NULL;
+
+	data->iop.ops = (struct io_pgtable_ops) {
+		.map		= av8l_fast_map,
+		.map_sg		= av8l_fast_map_sg,
+		.unmap		= av8l_fast_unmap,
+		.iova_to_phys	= av8l_fast_iova_to_phys,
+	};
+
+	return data;
+}
+
+/*
+ * We need 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and
+ * 2048 pages for pmds (each pud page contains 512 table entries, each
+ * pointing to a pmd).
+ */
+#define NUM_PGD_PAGES 1
+#define NUM_PUD_PAGES 4
+#define NUM_PMD_PAGES 2048
+#define NUM_PGTBL_PAGES (NUM_PGD_PAGES + NUM_PUD_PAGES + NUM_PMD_PAGES)
+
+static int
+av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
+			       struct io_pgtable_cfg *cfg, void *cookie)
+{
+	int i, j, pg = 0;
+	struct page **pages, *page;
+
+	pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, GFP_KERNEL);
+	if (!pages)
+		return -ENOMEM;
+
+	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+	if (!page)
+		goto err_free_pages_arr;
+	pages[pg++] = page;
+	data->pgd = page_address(page);
+
+	/*
+	 * We need 2048 entries at level 2 to map 4GB of VA space. A page
+	 * can hold 512 entries, so we need 4 pages.
+	 */
+	for (i = 0; i < 4; ++i) {
+		av8l_fast_iopte pte, *ptep;
+
+		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+		if (!page)
+			goto err_free_pages;
+		pages[pg++] = page;
+		data->puds[i] = page_address(page);
+		pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
+		ptep = ((av8l_fast_iopte *)data->pgd) + i;
+		*ptep = pte;
+	}
+	dmac_clean_range(data->pgd, data->pgd + 4);
+
+	/*
+	 * We have 4 puds, each of which can point to 512 pmds, so we'll
+	 * have 2048 pmds, each of which can hold 512 ptes, for a grand
+	 * total of 2048*512=1048576 PTEs.
+	 */
+	for (i = 0; i < 4; ++i) {
+		for (j = 0; j < 512; ++j) {
+			av8l_fast_iopte pte, *pudp;
+
+			page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+			if (!page)
+				goto err_free_pages;
+			pages[pg++] = page;
+			pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
+			pudp = data->puds[i] + j;
+			*pudp = pte;
+		}
+		dmac_clean_range(data->puds[i], data->puds[i] + 512);
+	}
+
+	if (WARN_ON(pg != NUM_PGTBL_PAGES))
+		goto err_free_pages;
+
+	/*
+	 * We map the pmds into a virtually contiguous space so that we
+	 * don't have to traverse the first two levels of the page tables
+	 * to find the appropriate pud.  Instead, it will be a simple
+	 * offset from the virtual base of the pmds.
+	 */
+	data->pmds = vmap(&pages[NUM_PGD_PAGES + NUM_PUD_PAGES], NUM_PMD_PAGES,
+			  VM_IOREMAP, PAGE_KERNEL);
+	if (!data->pmds)
+		goto err_free_pages;
+
+	data->pages = pages;
+	return 0;
+
+err_free_pages:
+	for (i = 0; i < pg; ++i)
+		__free_page(pages[i]);
+err_free_pages_arr:
+	kfree(pages);
+	return -ENOMEM;
+}
+
+static struct io_pgtable *
+av8l_fast_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
+{
+	u64 reg;
+	struct av8l_fast_io_pgtable *data =
+		av8l_fast_alloc_pgtable_data(cfg);
+
+	if (!data)
+		return NULL;
+
+	/* restrict according to the fast map requirements */
+	cfg->ias = 32;
+	cfg->pgsize_bitmap = SZ_4K;
+
+	/* TCR */
+	if (cfg->quirks && IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT)
+		reg = (AV8L_FAST_TCR_SH_OS << AV8L_FAST_TCR_SH0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
+			(AV8L_FAST_TCR_RGN_WBWA << AV8L_FAST_TCR_ORGN0_SHIFT);
+	else
+		reg = (AV8L_FAST_TCR_SH_IS << AV8L_FAST_TCR_SH0_SHIFT) |
+		      (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_IRGN0_SHIFT) |
+		      (AV8L_FAST_TCR_RGN_NC << AV8L_FAST_TCR_ORGN0_SHIFT);
+
+	reg |= AV8L_FAST_TCR_TG0_4K;
+
+	switch (cfg->oas) {
+	case 32:
+		reg |= (AV8L_FAST_TCR_PS_32_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 36:
+		reg |= (AV8L_FAST_TCR_PS_36_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 40:
+		reg |= (AV8L_FAST_TCR_PS_40_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 42:
+		reg |= (AV8L_FAST_TCR_PS_42_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 44:
+		reg |= (AV8L_FAST_TCR_PS_44_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	case 48:
+		reg |= (AV8L_FAST_TCR_PS_48_BIT << AV8L_FAST_TCR_IPS_SHIFT);
+		break;
+	default:
+		goto out_free_data;
+	}
+
+	reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT;
+	reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT;
+	cfg->av8l_fast_cfg.tcr = reg;
+
+	/* MAIRs */
+	reg = (AV8L_FAST_MAIR_ATTR_NC
+	       << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_NC)) |
+	      (AV8L_FAST_MAIR_ATTR_WBRWA
+	       << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_CACHE)) |
+	      (AV8L_FAST_MAIR_ATTR_DEVICE
+	       << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_DEV)) |
+	      (AV8L_FAST_MAIR_ATTR_UPSTREAM
+	       << AV8L_FAST_MAIR_ATTR_SHIFT(AV8L_FAST_MAIR_ATTR_IDX_UPSTREAM));
+
+	cfg->av8l_fast_cfg.mair[0] = reg;
+	cfg->av8l_fast_cfg.mair[1] = 0;
+
+	/* Allocate all page table memory! */
+	if (av8l_fast_prepopulate_pgtables(data, cfg, cookie))
+		goto out_free_data;
+
+	cfg->av8l_fast_cfg.pmds = data->pmds;
+
+	/* TTBRs */
+	cfg->av8l_fast_cfg.ttbr[0] = virt_to_phys(data->pgd);
+	cfg->av8l_fast_cfg.ttbr[1] = 0;
+	return &data->iop;
+
+out_free_data:
+	kfree(data);
+	return NULL;
+}
+
+static void av8l_fast_free_pgtable(struct io_pgtable *iop)
+{
+	int i;
+	struct av8l_fast_io_pgtable *data = iof_pgtable_to_data(iop);
+
+	vunmap(data->pmds);
+	for (i = 0; i < NUM_PGTBL_PAGES; ++i)
+		__free_page(data->pages[i]);
+	kfree(data->pages);
+	kfree(data);
+}
+
+struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns = {
+	.alloc	= av8l_fast_alloc_pgtable,
+	.free	= av8l_fast_free_pgtable,
+};
+
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST
+
+#include <linux/dma-contiguous.h>
+
+static struct io_pgtable_cfg *cfg_cookie;
+
+static void dummy_tlb_flush_all(void *cookie)
+{
+	WARN_ON(cookie != cfg_cookie);
+}
+
+static void dummy_tlb_add_flush(unsigned long iova, size_t size, size_t granule,
+				bool leaf, void *cookie)
+{
+	WARN_ON(cookie != cfg_cookie);
+	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
+}
+
+static void dummy_tlb_sync(void *cookie)
+{
+	WARN_ON(cookie != cfg_cookie);
+}
+
+static struct iommu_gather_ops dummy_tlb_ops __initdata = {
+	.tlb_flush_all	= dummy_tlb_flush_all,
+	.tlb_add_flush	= dummy_tlb_add_flush,
+	.tlb_sync	= dummy_tlb_sync,
+};
+
+/*
+ * Returns true if the iova range is successfully mapped to the contiguous
+ * phys range in ops.
+ */
+static bool av8l_fast_range_has_specific_mapping(struct io_pgtable_ops *ops,
+						 const unsigned long iova_start,
+						 const phys_addr_t phys_start,
+						 const size_t size)
+{
+	unsigned long iova = iova_start;
+	phys_addr_t phys = phys_start;
+
+	while (iova < (iova_start + size)) {
+		/* + 42 just to make sure offsetting is working */
+		if (ops->iova_to_phys(ops, iova + 42) != (phys + 42))
+			return false;
+		iova += SZ_4K;
+		phys += SZ_4K;
+	}
+	return true;
+}
+
+static int __init av8l_fast_positive_testing(void)
+{
+	int failed = 0;
+	unsigned long iova;
+	struct io_pgtable_ops *ops;
+	struct io_pgtable_cfg cfg;
+	struct av8l_fast_io_pgtable *data;
+	av8l_fast_iopte *pmds;
+
+	cfg = (struct io_pgtable_cfg) {
+		.tlb = &dummy_tlb_ops,
+		.ias = 32,
+		.oas = 32,
+		.pgsize_bitmap = SZ_4K,
+	};
+
+	cfg_cookie = &cfg;
+	ops = alloc_io_pgtable_ops(ARM_V8L_FAST, &cfg, &cfg);
+
+	if (WARN_ON(!ops))
+		return 1;
+
+	data = iof_pgtable_ops_to_data(ops);
+	pmds = data->pmds;
+
+	/* map the entire 4GB VA space with 4K map calls */
+	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+		if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
+			failed++;
+			continue;
+		}
+	}
+
+	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
+							  SZ_1G * 4UL)))
+		failed++;
+
+	/* unmap it all */
+	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+		if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
+			failed++;
+	}
+
+	/* sweep up TLB proving PTEs */
+	av8l_fast_clear_stale_ptes(pmds, false);
+
+	/* map the entire 4GB VA space with 8K map calls */
+	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+		if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
+			failed++;
+			continue;
+		}
+	}
+
+	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
+							  SZ_1G * 4UL)))
+		failed++;
+
+	/* unmap it all with 8K unmap calls */
+	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+		if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
+			failed++;
+	}
+
+	/* sweep up TLB proving PTEs */
+	av8l_fast_clear_stale_ptes(pmds, false);
+
+	/* map the entire 4GB VA space with 16K map calls */
+	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+		if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
+			failed++;
+			continue;
+		}
+	}
+
+	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
+							  SZ_1G * 4UL)))
+		failed++;
+
+	/* unmap it all */
+	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+		if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
+			failed++;
+	}
+
+	/* sweep up TLB proving PTEs */
+	av8l_fast_clear_stale_ptes(pmds, false);
+
+	/* map the entire 4GB VA space with 64K map calls */
+	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_64K) {
+		if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
+			failed++;
+			continue;
+		}
+	}
+
+	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
+							  SZ_1G * 4UL)))
+		failed++;
+
+	/* unmap it all at once */
+	if (WARN_ON(ops->unmap(ops, 0, SZ_1G * 4UL) != SZ_1G * 4UL))
+		failed++;
+
+	free_io_pgtable_ops(ops);
+	return failed;
+}
+
+static int __init av8l_fast_do_selftests(void)
+{
+	int failed = 0;
+
+	failed += av8l_fast_positive_testing();
+
+	pr_err("selftest: completed with %d failures\n", failed);
+
+	return 0;
+}
+subsys_initcall(av8l_fast_do_selftests);
+#endif
diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c
index 127558d..541abb2 100644
--- a/drivers/iommu/io-pgtable.c
+++ b/drivers/iommu/io-pgtable.c
@@ -18,9 +18,15 @@
  * Author: Will Deacon <will.deacon@arm.com>
  */
 
+#define pr_fmt(fmt)	"io-pgtable: " fmt
+
 #include <linux/bug.h>
 #include <linux/kernel.h>
 #include <linux/types.h>
+#include <linux/iommu.h>
+#include <linux/debugfs.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
 
 #include "io-pgtable.h"
 
@@ -35,8 +41,13 @@
 #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
 	[ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
 #endif
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
+	[ARM_V8L_FAST] = &io_pgtable_av8l_fast_init_fns,
+#endif
 };
 
+static struct dentry *io_pgtable_top;
+
 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
 					    struct io_pgtable_cfg *cfg,
 					    void *cookie)
@@ -77,3 +88,56 @@
 	io_pgtable_tlb_flush_all(iop);
 	io_pgtable_init_table[iop->fmt]->free(iop);
 }
+
+static atomic_t pages_allocated;
+
+void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
+				   size_t size, gfp_t gfp_mask)
+{
+	void *ret;
+
+	if (cfg->tlb->alloc_pages_exact)
+		ret = cfg->tlb->alloc_pages_exact(cookie, size, gfp_mask);
+	else
+		ret = alloc_pages_exact(size, gfp_mask);
+
+	if (likely(ret))
+		atomic_add(1 << get_order(size), &pages_allocated);
+
+	return ret;
+}
+
+void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
+				 void *virt, size_t size)
+{
+	if (cfg->tlb->free_pages_exact)
+		cfg->tlb->free_pages_exact(cookie, virt, size);
+	else
+		free_pages_exact(virt, size);
+
+	atomic_sub(1 << get_order(size), &pages_allocated);
+}
+
+static int io_pgtable_init(void)
+{
+	io_pgtable_top = debugfs_create_dir("io-pgtable", iommu_debugfs_top);
+
+	if (!io_pgtable_top)
+		return -ENODEV;
+
+	if (!debugfs_create_atomic_t("pages", 0600,
+				     io_pgtable_top, &pages_allocated)) {
+		debugfs_remove_recursive(io_pgtable_top);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static void io_pgtable_exit(void)
+{
+	debugfs_remove_recursive(io_pgtable_top);
+}
+
+module_init(io_pgtable_init);
+module_exit(io_pgtable_exit);
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index 969d82c..33e0879 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -2,6 +2,8 @@
 #define __IO_PGTABLE_H
 #include <linux/bitops.h>
 
+#include <linux/scatterlist.h>
+
 /*
  * Public API for use by IOMMU drivers
  */
@@ -11,6 +13,7 @@
 	ARM_64_LPAE_S1,
 	ARM_64_LPAE_S2,
 	ARM_V7S,
+	ARM_V8L_FAST,
 	IO_PGTABLE_NUM_FMTS,
 };
 
@@ -22,6 +25,10 @@
  * @tlb_sync:      Ensure any queued TLB invalidation has taken effect, and
  *                 any corresponding page table updates are visible to the
  *                 IOMMU.
+ * @alloc_pages_exact: Allocate page table memory (optional, defaults to
+ *                     alloc_pages_exact)
+ * @free_pages_exact:  Free page table memory (optional, defaults to
+ *                     free_pages_exact)
  *
  * Note that these can all be called in atomic context and must therefore
  * not block.
@@ -31,6 +38,8 @@
 	void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule,
 			      bool leaf, void *cookie);
 	void (*tlb_sync)(void *cookie);
+	void *(*alloc_pages_exact)(void *cookie, size_t size, gfp_t gfp_mask);
+	void (*free_pages_exact)(void *cookie, void *virt, size_t size);
 };
 
 /**
@@ -65,11 +74,16 @@
 	 *	PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit
 	 *	when the SoC is in "4GB mode" and they can only access the high
 	 *	remap of DRAM (0x1_00000000 to 0x1_ffffffff).
+	 *
+	 * IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT: Override the attributes
+	 *	set in TCR for the page table walker. Use attributes specified
+	 *	by the upstream hw instead.
 	 */
 	#define IO_PGTABLE_QUIRK_ARM_NS		BIT(0)
 	#define IO_PGTABLE_QUIRK_NO_PERMS	BIT(1)
 	#define IO_PGTABLE_QUIRK_TLBI_ON_MAP	BIT(2)
 	#define IO_PGTABLE_QUIRK_ARM_MTK_4GB	BIT(3)
+	#define IO_PGTABLE_QUIRK_QCOM_USE_UPSTREAM_HINT	BIT(4)
 	unsigned long			quirks;
 	unsigned long			pgsize_bitmap;
 	unsigned int			ias;
@@ -96,6 +110,13 @@
 			u32	nmrr;
 			u32	prrr;
 		} arm_v7s_cfg;
+
+		struct {
+			u64	ttbr[2];
+			u64	tcr;
+			u64	mair[2];
+			void	*pmds;
+		} av8l_fast_cfg;
 	};
 };
 
@@ -103,6 +124,9 @@
  * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers.
  *
  * @map:          Map a physically contiguous memory region.
+ * @map_sg:	  Map a scatterlist.  Returns the number of bytes mapped,
+ *		  or 0 on failure.  The size parameter contains the size
+ *		  of the partial mapping in case of failure.
  * @unmap:        Unmap a physically contiguous memory region.
  * @iova_to_phys: Translate iova to physical address.
  *
@@ -112,7 +136,10 @@
 struct io_pgtable_ops {
 	int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
 		   phys_addr_t paddr, size_t size, int prot);
-	int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
+	int (*map_sg)(struct io_pgtable_ops *ops, unsigned long iova,
+		      struct scatterlist *sg, unsigned int nents,
+		      int prot, size_t *size);
+	size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
 		     size_t size);
 	phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
 				    unsigned long iova);
@@ -204,5 +231,30 @@
 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
 extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
 extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns;
+extern struct io_pgtable_init_fns io_pgtable_av8l_fast_init_fns;
+
+/**
+ * io_pgtable_alloc_pages_exact:
+ *	allocate an exact number of physically-contiguous pages.
+ * @size: the number of bytes to allocate
+ * @gfp_mask: GFP flags for the allocation
+ *
+ * Like alloc_pages_exact(), but with some additional accounting for debug
+ * purposes.
+ */
+void *io_pgtable_alloc_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
+				   size_t size, gfp_t gfp_mask);
+
+/**
+ * io_pgtable_free_pages_exact:
+ *	release memory allocated via io_pgtable_alloc_pages_exact()
+ * @virt: the value returned by alloc_pages_exact.
+ * @size: size of allocation, same value as passed to alloc_pages_exact().
+ *
+ * Like free_pages_exact(), but with some additional accounting for debug
+ * purposes.
+ */
+void io_pgtable_free_pages_exact(struct io_pgtable_cfg *cfg, void *cookie,
+				 void *virt, size_t size);
 
 #endif /* __IO_PGTABLE_H */
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
new file mode 100644
index 0000000..504d46c
--- /dev/null
+++ b/drivers/iommu/iommu-debug.c
@@ -0,0 +1,2046 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "iommu-debug: %s: " fmt, __func__
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/iommu.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/dma-contiguous.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <asm/dma-iommu.h>
+
+#if defined(CONFIG_IOMMU_DEBUG_TRACKING) || defined(CONFIG_IOMMU_TESTS)
+
+static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
+{
+	switch (attr) {
+	case DOMAIN_ATTR_GEOMETRY:
+		return "DOMAIN_ATTR_GEOMETRY";
+	case DOMAIN_ATTR_PAGING:
+		return "DOMAIN_ATTR_PAGING";
+	case DOMAIN_ATTR_WINDOWS:
+		return "DOMAIN_ATTR_WINDOWS";
+	case DOMAIN_ATTR_FSL_PAMU_STASH:
+		return "DOMAIN_ATTR_FSL_PAMU_STASH";
+	case DOMAIN_ATTR_FSL_PAMU_ENABLE:
+		return "DOMAIN_ATTR_FSL_PAMU_ENABLE";
+	case DOMAIN_ATTR_FSL_PAMUV1:
+		return "DOMAIN_ATTR_FSL_PAMUV1";
+	case DOMAIN_ATTR_NESTING:
+		return "DOMAIN_ATTR_NESTING";
+	case DOMAIN_ATTR_PT_BASE_ADDR:
+		return "DOMAIN_ATTR_PT_BASE_ADDR";
+	case DOMAIN_ATTR_SECURE_VMID:
+		return "DOMAIN_ATTR_SECURE_VMID";
+	case DOMAIN_ATTR_ATOMIC:
+		return "DOMAIN_ATTR_ATOMIC";
+	case DOMAIN_ATTR_CONTEXT_BANK:
+		return "DOMAIN_ATTR_CONTEXT_BANK";
+	case DOMAIN_ATTR_TTBR0:
+		return "DOMAIN_ATTR_TTBR0";
+	case DOMAIN_ATTR_CONTEXTIDR:
+		return "DOMAIN_ATTR_CONTEXTIDR";
+	case DOMAIN_ATTR_PROCID:
+		return "DOMAIN_ATTR_PROCID";
+	case DOMAIN_ATTR_DYNAMIC:
+		return "DOMAIN_ATTR_DYNAMIC";
+	case DOMAIN_ATTR_NON_FATAL_FAULTS:
+		return "DOMAIN_ATTR_NON_FATAL_FAULTS";
+	case DOMAIN_ATTR_S1_BYPASS:
+		return "DOMAIN_ATTR_S1_BYPASS";
+	case DOMAIN_ATTR_FAST:
+		return "DOMAIN_ATTR_FAST";
+	default:
+		return "Unknown attr!";
+	}
+}
+#endif
+
+#ifdef CONFIG_IOMMU_DEBUG_TRACKING
+
+static DEFINE_MUTEX(iommu_debug_attachments_lock);
+static LIST_HEAD(iommu_debug_attachments);
+static struct dentry *debugfs_attachments_dir;
+
+struct iommu_debug_attachment {
+	struct iommu_domain *domain;
+	struct device *dev;
+	struct dentry *dentry;
+	struct list_head list;
+	unsigned long reg_offset;
+};
+
+static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
+{
+	struct iommu_debug_attachment *attach = s->private;
+	int secure_vmid;
+
+	seq_printf(s, "Domain: 0x%p\n", attach->domain);
+
+	seq_puts(s, "SECURE_VMID: ");
+	if (iommu_domain_get_attr(attach->domain,
+				  DOMAIN_ATTR_SECURE_VMID,
+				  &secure_vmid))
+		seq_puts(s, "(Unknown)\n");
+	else
+		seq_printf(s, "%s (0x%x)\n",
+			   msm_secure_vmid_to_string(secure_vmid), secure_vmid);
+
+	return 0;
+}
+
+static int iommu_debug_attachment_info_open(struct inode *inode,
+					    struct file *file)
+{
+	return single_open(file, iommu_debug_attachment_info_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_attachment_info_fops = {
+	.open	 = iommu_debug_attachment_info_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static ssize_t iommu_debug_attachment_trigger_fault_write(
+	struct file *file, const char __user *ubuf, size_t count,
+	loff_t *offset)
+{
+	struct iommu_debug_attachment *attach = file->private_data;
+	unsigned long flags;
+
+	if (kstrtoul_from_user(ubuf, count, 0, &flags)) {
+		pr_err("Invalid flags format\n");
+		return -EFAULT;
+	}
+
+	iommu_trigger_fault(attach->domain, flags);
+
+	return count;
+}
+
+static const struct file_operations
+iommu_debug_attachment_trigger_fault_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_attachment_trigger_fault_write,
+};
+
+static ssize_t iommu_debug_attachment_reg_offset_write(
+	struct file *file, const char __user *ubuf, size_t count,
+	loff_t *offset)
+{
+	struct iommu_debug_attachment *attach = file->private_data;
+	unsigned long reg_offset;
+
+	if (kstrtoul_from_user(ubuf, count, 0, &reg_offset)) {
+		pr_err("Invalid reg_offset format\n");
+		return -EFAULT;
+	}
+
+	attach->reg_offset = reg_offset;
+
+	return count;
+}
+
+static const struct file_operations iommu_debug_attachment_reg_offset_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_attachment_reg_offset_write,
+};
+
+static ssize_t iommu_debug_attachment_reg_read_read(
+	struct file *file, char __user *ubuf, size_t count, loff_t *offset)
+{
+	struct iommu_debug_attachment *attach = file->private_data;
+	unsigned long val;
+	char *val_str;
+	ssize_t val_str_len;
+
+	if (*offset)
+		return 0;
+
+	val = iommu_reg_read(attach->domain, attach->reg_offset);
+	val_str = kasprintf(GFP_KERNEL, "0x%lx\n", val);
+	if (!val_str)
+		return -ENOMEM;
+	val_str_len = strlen(val_str);
+
+	if (copy_to_user(ubuf, val_str, val_str_len)) {
+		pr_err("copy_to_user failed\n");
+		val_str_len = -EFAULT;
+		goto out;
+	}
+	*offset = 1;		/* non-zero means we're done */
+
+out:
+	kfree(val_str);
+	return val_str_len;
+}
+
+static const struct file_operations iommu_debug_attachment_reg_read_fops = {
+	.open	= simple_open,
+	.read	= iommu_debug_attachment_reg_read_read,
+};
+
+static ssize_t iommu_debug_attachment_reg_write_write(
+	struct file *file, const char __user *ubuf, size_t count,
+	loff_t *offset)
+{
+	struct iommu_debug_attachment *attach = file->private_data;
+	unsigned long val;
+
+	if (kstrtoul_from_user(ubuf, count, 0, &val)) {
+		pr_err("Invalid val format\n");
+		return -EFAULT;
+	}
+
+	iommu_reg_write(attach->domain, attach->reg_offset, val);
+
+	return count;
+}
+
+static const struct file_operations iommu_debug_attachment_reg_write_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_attachment_reg_write_write,
+};
+
+/* should be called with iommu_debug_attachments_lock locked */
+static int iommu_debug_attach_add_debugfs(
+	struct iommu_debug_attachment *attach)
+{
+	const char *attach_name;
+	struct device *dev = attach->dev;
+	struct iommu_domain *domain = attach->domain;
+	int is_dynamic;
+
+	if (iommu_domain_get_attr(domain, DOMAIN_ATTR_DYNAMIC, &is_dynamic))
+		is_dynamic = 0;
+
+	if (is_dynamic) {
+		uuid_le uuid;
+
+		uuid_le_gen(&uuid);
+		attach_name = kasprintf(GFP_KERNEL, "%s-%pUl", dev_name(dev),
+					uuid.b);
+		if (!attach_name)
+			return -ENOMEM;
+	} else {
+		attach_name = dev_name(dev);
+	}
+
+	attach->dentry = debugfs_create_dir(attach_name,
+					    debugfs_attachments_dir);
+	if (!attach->dentry) {
+		pr_err("Couldn't create iommu/attachments/%s debugfs directory for domain 0x%p\n",
+		       attach_name, domain);
+		if (is_dynamic)
+			kfree(attach_name);
+		return -EIO;
+	}
+
+	if (is_dynamic)
+		kfree(attach_name);
+
+	if (!debugfs_create_file(
+		    "info", S_IRUSR, attach->dentry, attach,
+		    &iommu_debug_attachment_info_fops)) {
+		pr_err("Couldn't create iommu/attachments/%s/info debugfs file for domain 0x%p\n",
+		       dev_name(dev), domain);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file(
+		    "trigger_fault", S_IRUSR, attach->dentry, attach,
+		    &iommu_debug_attachment_trigger_fault_fops)) {
+		pr_err("Couldn't create iommu/attachments/%s/trigger_fault debugfs file for domain 0x%p\n",
+		       dev_name(dev), domain);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file(
+		    "reg_offset", S_IRUSR, attach->dentry, attach,
+		    &iommu_debug_attachment_reg_offset_fops)) {
+		pr_err("Couldn't create iommu/attachments/%s/reg_offset debugfs file for domain 0x%p\n",
+		       dev_name(dev), domain);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file(
+		    "reg_read", S_IRUSR, attach->dentry, attach,
+		    &iommu_debug_attachment_reg_read_fops)) {
+		pr_err("Couldn't create iommu/attachments/%s/reg_read debugfs file for domain 0x%p\n",
+		       dev_name(dev), domain);
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file(
+		    "reg_write", S_IRUSR, attach->dentry, attach,
+		    &iommu_debug_attachment_reg_write_fops)) {
+		pr_err("Couldn't create iommu/attachments/%s/reg_write debugfs file for domain 0x%p\n",
+		       dev_name(dev), domain);
+		goto err_rmdir;
+	}
+
+	return 0;
+
+err_rmdir:
+	debugfs_remove_recursive(attach->dentry);
+	return -EIO;
+}
+
+void iommu_debug_domain_add(struct iommu_domain *domain)
+{
+	struct iommu_debug_attachment *attach;
+
+	mutex_lock(&iommu_debug_attachments_lock);
+
+	attach = kmalloc(sizeof(*attach), GFP_KERNEL);
+	if (!attach)
+		goto out_unlock;
+
+	attach->domain = domain;
+	attach->dev = NULL;
+	list_add(&attach->list, &iommu_debug_attachments);
+
+out_unlock:
+	mutex_unlock(&iommu_debug_attachments_lock);
+}
+
+void iommu_debug_domain_remove(struct iommu_domain *domain)
+{
+	struct iommu_debug_attachment *it;
+
+	mutex_lock(&iommu_debug_attachments_lock);
+	list_for_each_entry(it, &iommu_debug_attachments, list)
+		if (it->domain == domain && it->dev == NULL)
+			break;
+
+	if (&it->list == &iommu_debug_attachments) {
+		WARN(1, "Couldn't find debug attachment for domain=0x%p",
+				domain);
+	} else {
+		list_del(&it->list);
+		kfree(it);
+	}
+	mutex_unlock(&iommu_debug_attachments_lock);
+}
+
+void iommu_debug_attach_device(struct iommu_domain *domain,
+			       struct device *dev)
+{
+	struct iommu_debug_attachment *attach;
+
+	mutex_lock(&iommu_debug_attachments_lock);
+
+	list_for_each_entry(attach, &iommu_debug_attachments, list)
+		if (attach->domain == domain && attach->dev == NULL)
+			break;
+
+	if (&attach->list == &iommu_debug_attachments) {
+		WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
+		     domain, dev_name(dev));
+	} else {
+		attach->dev = dev;
+
+		/*
+		 * we might not init until after other drivers start calling
+		 * iommu_attach_device. Only set up the debugfs nodes if we've
+		 * already init'd to avoid polluting the top-level debugfs
+		 * directory (by calling debugfs_create_dir with a NULL
+		 * parent). These will be flushed out later once we init.
+		 */
+
+		if (debugfs_attachments_dir)
+			iommu_debug_attach_add_debugfs(attach);
+	}
+
+	mutex_unlock(&iommu_debug_attachments_lock);
+}
+
+void iommu_debug_detach_device(struct iommu_domain *domain,
+			       struct device *dev)
+{
+	struct iommu_debug_attachment *it;
+
+	mutex_lock(&iommu_debug_attachments_lock);
+	list_for_each_entry(it, &iommu_debug_attachments, list)
+		if (it->domain == domain && it->dev == dev)
+			break;
+
+	if (&it->list == &iommu_debug_attachments) {
+		WARN(1, "Couldn't find debug attachment for domain=0x%p dev=%s",
+		     domain, dev_name(dev));
+	} else {
+		/*
+		 * Just remove debugfs entry and mark dev as NULL on
+		 * iommu_detach call. We would remove the actual
+		 * attachment entry from the list only on domain_free call.
+		 * This is to ensure we keep track of unattached domains too.
+		 */
+
+		debugfs_remove_recursive(it->dentry);
+		it->dev = NULL;
+	}
+	mutex_unlock(&iommu_debug_attachments_lock);
+}
+
+static int iommu_debug_init_tracking(void)
+{
+	int ret = 0;
+	struct iommu_debug_attachment *attach;
+
+	mutex_lock(&iommu_debug_attachments_lock);
+	debugfs_attachments_dir = debugfs_create_dir("attachments",
+						     iommu_debugfs_top);
+	if (!debugfs_attachments_dir) {
+		pr_err("Couldn't create iommu/attachments debugfs directory\n");
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
+	/* set up debugfs entries for attachments made during early boot */
+	list_for_each_entry(attach, &iommu_debug_attachments, list)
+		if (attach->dev)
+			iommu_debug_attach_add_debugfs(attach);
+
+out_unlock:
+	mutex_unlock(&iommu_debug_attachments_lock);
+	return ret;
+}
+
+static void iommu_debug_destroy_tracking(void)
+{
+	debugfs_remove_recursive(debugfs_attachments_dir);
+}
+#else
+static inline int iommu_debug_init_tracking(void) { return 0; }
+static inline void iommu_debug_destroy_tracking(void) { }
+#endif
+
+#ifdef CONFIG_IOMMU_TESTS
+
+#ifdef CONFIG_64BIT
+
+#define kstrtoux kstrtou64
+#define kstrtox_from_user kstrtoull_from_user
+#define kstrtosize_t kstrtoul
+
+#else
+
+#define kstrtoux kstrtou32
+#define kstrtox_from_user kstrtouint_from_user
+#define kstrtosize_t kstrtouint
+
+#endif
+
+static LIST_HEAD(iommu_debug_devices);
+static struct dentry *debugfs_tests_dir;
+static u32 iters_per_op = 1;
+
+struct iommu_debug_device {
+	struct device *dev;
+	struct iommu_domain *domain;
+	u64 iova;
+	u64 phys;
+	size_t len;
+	struct list_head list;
+};
+
+static int iommu_debug_build_phoney_sg_table(struct device *dev,
+					     struct sg_table *table,
+					     unsigned long total_size,
+					     unsigned long chunk_size)
+{
+	unsigned long nents = total_size / chunk_size;
+	struct scatterlist *sg;
+	int i;
+	struct page *page;
+
+	if (!IS_ALIGNED(total_size, PAGE_SIZE))
+		return -EINVAL;
+	if (!IS_ALIGNED(total_size, chunk_size))
+		return -EINVAL;
+	if (sg_alloc_table(table, nents, GFP_KERNEL))
+		return -EINVAL;
+	page = alloc_pages(GFP_KERNEL, get_order(chunk_size));
+	if (!page)
+		goto free_table;
+
+	/* all the same page... why not. */
+	for_each_sg(table->sgl, sg, table->nents, i)
+		sg_set_page(sg, page, chunk_size, 0);
+
+	return 0;
+
+free_table:
+	sg_free_table(table);
+	return -ENOMEM;
+}
+
+static void iommu_debug_destroy_phoney_sg_table(struct device *dev,
+						struct sg_table *table,
+						unsigned long chunk_size)
+{
+	__free_pages(sg_page(table->sgl), get_order(chunk_size));
+	sg_free_table(table);
+}
+
+static const char * const _size_to_string(unsigned long size)
+{
+	switch (size) {
+	case SZ_4K:
+		return "4K";
+	case SZ_8K:
+		return "8K";
+	case SZ_16K:
+		return "16K";
+	case SZ_64K:
+		return "64K";
+	case SZ_2M:
+		return "2M";
+	case SZ_1M * 12:
+		return "12M";
+	case SZ_1M * 20:
+		return "20M";
+	}
+	return "unknown size, please add to _size_to_string";
+}
+
+static int nr_iters_set(void *data, u64 val)
+{
+	if (!val)
+		val = 1;
+	if (val > 10000)
+		val = 10000;
+	*(u32 *)data = val;
+	return 0;
+}
+
+static int nr_iters_get(void *data, u64 *val)
+{
+	*val = *(u32 *)data;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(iommu_debug_nr_iters_ops,
+			nr_iters_get, nr_iters_set, "%llu\n");
+
+static void iommu_debug_device_profiling(struct seq_file *s, struct device *dev,
+					 enum iommu_attr attrs[],
+					 void *attr_values[], int nattrs,
+					 const size_t sizes[])
+{
+	int i;
+	const size_t *sz;
+	struct iommu_domain *domain;
+	unsigned long iova = 0x10000;
+	phys_addr_t paddr = 0xa000;
+
+	domain = iommu_domain_alloc(&platform_bus_type);
+	if (!domain) {
+		seq_puts(s, "Couldn't allocate domain\n");
+		return;
+	}
+
+	seq_puts(s, "Domain attributes: [ ");
+	for (i = 0; i < nattrs; ++i) {
+		/* not all attrs are ints, but this will get us by for now */
+		seq_printf(s, "%s=%d%s", iommu_debug_attr_to_string(attrs[i]),
+			   *((int *)attr_values[i]),
+			   i < nattrs ? " " : "");
+	}
+	seq_puts(s, "]\n");
+	for (i = 0; i < nattrs; ++i) {
+		if (iommu_domain_set_attr(domain, attrs[i], attr_values[i])) {
+			seq_printf(s, "Couldn't set %d to the value at %p\n",
+				 attrs[i], attr_values[i]);
+			goto out_domain_free;
+		}
+	}
+
+	if (iommu_attach_device(domain, dev)) {
+		seq_puts(s,
+			 "Couldn't attach new domain to device. Is it already attached?\n");
+		goto out_domain_free;
+	}
+
+	seq_printf(s, "(average over %d iterations)\n", iters_per_op);
+	seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map", "iommu_unmap");
+	for (sz = sizes; *sz; ++sz) {
+		size_t size = *sz;
+		size_t unmapped;
+		u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
+		u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
+		u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
+		struct timespec tbefore, tafter, diff;
+		int i;
+
+		for (i = 0; i < iters_per_op; ++i) {
+			getnstimeofday(&tbefore);
+			if (iommu_map(domain, iova, paddr, size,
+				      IOMMU_READ | IOMMU_WRITE)) {
+				seq_puts(s, "Failed to map\n");
+				continue;
+			}
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			map_elapsed_ns += timespec_to_ns(&diff);
+
+			getnstimeofday(&tbefore);
+			unmapped = iommu_unmap(domain, iova, size);
+			if (unmapped != size) {
+				seq_printf(s,
+					   "Only unmapped %zx instead of %zx\n",
+					   unmapped, size);
+				continue;
+			}
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			unmap_elapsed_ns += timespec_to_ns(&diff);
+		}
+
+		map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
+				&map_elapsed_rem);
+		unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
+				&unmap_elapsed_rem);
+
+		map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
+						&map_elapsed_rem);
+		unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
+						&unmap_elapsed_rem);
+
+		seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
+			_size_to_string(size),
+			map_elapsed_us, map_elapsed_rem,
+			unmap_elapsed_us, unmap_elapsed_rem);
+	}
+
+	seq_putc(s, '\n');
+	seq_printf(s, "%8s %19s %16s\n", "size", "iommu_map_sg", "iommu_unmap");
+	for (sz = sizes; *sz; ++sz) {
+		size_t size = *sz;
+		size_t unmapped;
+		u64 map_elapsed_ns = 0, unmap_elapsed_ns = 0;
+		u64 map_elapsed_us = 0, unmap_elapsed_us = 0;
+		u32 map_elapsed_rem = 0, unmap_elapsed_rem = 0;
+		struct timespec tbefore, tafter, diff;
+		struct sg_table table;
+		unsigned long chunk_size = SZ_4K;
+		int i;
+
+		if (iommu_debug_build_phoney_sg_table(dev, &table, size,
+						      chunk_size)) {
+			seq_puts(s,
+				"couldn't build phoney sg table! bailing...\n");
+			goto out_detach;
+		}
+
+		for (i = 0; i < iters_per_op; ++i) {
+			getnstimeofday(&tbefore);
+			if (iommu_map_sg(domain, iova, table.sgl, table.nents,
+					 IOMMU_READ | IOMMU_WRITE) != size) {
+				seq_puts(s, "Failed to map_sg\n");
+				goto next;
+			}
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			map_elapsed_ns += timespec_to_ns(&diff);
+
+			getnstimeofday(&tbefore);
+			unmapped = iommu_unmap(domain, iova, size);
+			if (unmapped != size) {
+				seq_printf(s,
+					   "Only unmapped %zx instead of %zx\n",
+					   unmapped, size);
+				goto next;
+			}
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			unmap_elapsed_ns += timespec_to_ns(&diff);
+		}
+
+		map_elapsed_ns = div_u64_rem(map_elapsed_ns, iters_per_op,
+				&map_elapsed_rem);
+		unmap_elapsed_ns = div_u64_rem(unmap_elapsed_ns, iters_per_op,
+				&unmap_elapsed_rem);
+
+		map_elapsed_us = div_u64_rem(map_elapsed_ns, 1000,
+						&map_elapsed_rem);
+		unmap_elapsed_us = div_u64_rem(unmap_elapsed_ns, 1000,
+						&unmap_elapsed_rem);
+
+		seq_printf(s, "%8s %12lld.%03d us %9lld.%03d us\n",
+			_size_to_string(size),
+			map_elapsed_us, map_elapsed_rem,
+			unmap_elapsed_us, unmap_elapsed_rem);
+
+next:
+		iommu_debug_destroy_phoney_sg_table(dev, &table, chunk_size);
+	}
+
+out_detach:
+	iommu_detach_device(domain, dev);
+out_domain_free:
+	iommu_domain_free(domain);
+}
+
+static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
+{
+	struct iommu_debug_device *ddev = s->private;
+	const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
+					SZ_1M * 20, 0 };
+	enum iommu_attr attrs[] = {
+		DOMAIN_ATTR_ATOMIC,
+	};
+	int htw_disable = 1, atomic = 1;
+	void *attr_values[] = { &htw_disable, &atomic };
+
+	iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
+				     ARRAY_SIZE(attrs), sizes);
+
+	return 0;
+}
+
+static int iommu_debug_profiling_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, iommu_debug_profiling_show, inode->i_private);
+}
+
+static const struct file_operations iommu_debug_profiling_fops = {
+	.open	 = iommu_debug_profiling_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
+{
+	struct iommu_debug_device *ddev = s->private;
+	const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
+					SZ_1M * 20, 0 };
+
+	enum iommu_attr attrs[] = {
+		DOMAIN_ATTR_ATOMIC,
+		DOMAIN_ATTR_SECURE_VMID,
+	};
+	int one = 1, secure_vmid = VMID_CP_PIXEL;
+	void *attr_values[] = { &one, &secure_vmid };
+
+	iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
+				     ARRAY_SIZE(attrs), sizes);
+
+	return 0;
+}
+
+static int iommu_debug_secure_profiling_open(struct inode *inode,
+					     struct file *file)
+{
+	return single_open(file, iommu_debug_secure_profiling_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_secure_profiling_fops = {
+	.open	 = iommu_debug_secure_profiling_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
+{
+	struct iommu_debug_device *ddev = s->private;
+	size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
+	enum iommu_attr attrs[] = {
+		DOMAIN_ATTR_FAST,
+		DOMAIN_ATTR_ATOMIC,
+	};
+	int one = 1;
+	void *attr_values[] = { &one, &one };
+
+	iommu_debug_device_profiling(s, ddev->dev, attrs, attr_values,
+				     ARRAY_SIZE(attrs), sizes);
+
+	return 0;
+}
+
+static int iommu_debug_profiling_fast_open(struct inode *inode,
+					   struct file *file)
+{
+	return single_open(file, iommu_debug_profiling_fast_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_profiling_fast_fops = {
+	.open	 = iommu_debug_profiling_fast_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_profiling_fast_dma_api_show(struct seq_file *s,
+						 void *ignored)
+{
+	int i, experiment;
+	struct iommu_debug_device *ddev = s->private;
+	struct device *dev = ddev->dev;
+	u64 map_elapsed_ns[10], unmap_elapsed_ns[10];
+	struct dma_iommu_mapping *mapping;
+	dma_addr_t dma_addr;
+	void *virt;
+	int fast = 1;
+	const char * const extra_labels[] = {
+		"not coherent",
+		"coherent",
+	};
+	unsigned long extra_attrs[] = {
+		0,
+		DMA_ATTR_SKIP_CPU_SYNC,
+	};
+
+	virt = kmalloc(1518, GFP_KERNEL);
+	if (!virt)
+		goto out;
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	if (!mapping) {
+		seq_puts(s, "fast_smmu_create_mapping failed\n");
+		goto out_kfree;
+	}
+
+	if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
+		seq_puts(s, "iommu_domain_set_attr failed\n");
+		goto out_release_mapping;
+	}
+
+	if (arm_iommu_attach_device(dev, mapping)) {
+		seq_puts(s, "fast_smmu_attach_device failed\n");
+		goto out_release_mapping;
+	}
+
+	if (iommu_enable_config_clocks(mapping->domain)) {
+		seq_puts(s, "Couldn't enable clocks\n");
+		goto out_detach;
+	}
+	for (experiment = 0; experiment < 2; ++experiment) {
+		size_t map_avg = 0, unmap_avg = 0;
+
+		for (i = 0; i < 10; ++i) {
+			struct timespec tbefore, tafter, diff;
+			u64 ns;
+
+			getnstimeofday(&tbefore);
+			dma_addr = dma_map_single_attrs(
+				dev, virt, SZ_4K, DMA_TO_DEVICE,
+				extra_attrs[experiment]);
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			ns = timespec_to_ns(&diff);
+			if (dma_mapping_error(dev, dma_addr)) {
+				seq_puts(s, "dma_map_single failed\n");
+				goto out_disable_config_clocks;
+			}
+			map_elapsed_ns[i] = ns;
+
+			getnstimeofday(&tbefore);
+			dma_unmap_single_attrs(
+				dev, dma_addr, SZ_4K, DMA_TO_DEVICE,
+				extra_attrs[experiment]);
+			getnstimeofday(&tafter);
+			diff = timespec_sub(tafter, tbefore);
+			ns = timespec_to_ns(&diff);
+			unmap_elapsed_ns[i] = ns;
+		}
+
+		seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
+			   "dma_map_single_attrs");
+		for (i = 0; i < 10; ++i) {
+			map_avg += map_elapsed_ns[i];
+			seq_printf(s, "%5llu%s", map_elapsed_ns[i],
+				   i < 9 ? ", " : "");
+		}
+		map_avg /= 10;
+		seq_printf(s, "] (avg: %zu)\n", map_avg);
+
+		seq_printf(s, "%13s %24s (ns): [", extra_labels[experiment],
+			   "dma_unmap_single_attrs");
+		for (i = 0; i < 10; ++i) {
+			unmap_avg += unmap_elapsed_ns[i];
+			seq_printf(s, "%5llu%s", unmap_elapsed_ns[i],
+				   i < 9 ? ", " : "");
+		}
+		unmap_avg /= 10;
+		seq_printf(s, "] (avg: %zu)\n", unmap_avg);
+	}
+
+out_disable_config_clocks:
+	iommu_disable_config_clocks(mapping->domain);
+out_detach:
+	arm_iommu_detach_device(dev);
+out_release_mapping:
+	arm_iommu_release_mapping(mapping);
+out_kfree:
+	kfree(virt);
+out:
+	return 0;
+}
+
+static int iommu_debug_profiling_fast_dma_api_open(struct inode *inode,
+						 struct file *file)
+{
+	return single_open(file, iommu_debug_profiling_fast_dma_api_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_profiling_fast_dma_api_fops = {
+	.open	 = iommu_debug_profiling_fast_dma_api_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
+{
+	int i, ret = 0;
+	unsigned long iova;
+	const unsigned long max = SZ_1G * 4UL;
+	void *virt;
+	phys_addr_t phys;
+	dma_addr_t dma_addr;
+
+	/*
+	 * we'll be doing 4K and 8K mappings.  Need to own an entire 8K
+	 * chunk that we can work with.
+	 */
+	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(SZ_8K));
+	phys = virt_to_phys(virt);
+
+	/* fill the whole 4GB space */
+	for (iova = 0, i = 0; iova < max; iova += SZ_8K, ++i) {
+		dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
+		if (dma_addr == DMA_ERROR_CODE) {
+			dev_err(dev, "Failed map on iter %d\n", i);
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
+		dev_err(dev,
+			"dma_map_single unexpectedly (VA should have been exhausted)\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * free up 4K at the very beginning, then leave one 4K mapping,
+	 * then free up 8K.  This will result in the next 8K map to skip
+	 * over the 4K hole and take the 8K one.
+	 */
+	dma_unmap_single(dev, 0, SZ_4K, DMA_TO_DEVICE);
+	dma_unmap_single(dev, SZ_8K, SZ_4K, DMA_TO_DEVICE);
+	dma_unmap_single(dev, SZ_8K + SZ_4K, SZ_4K, DMA_TO_DEVICE);
+
+	/* remap 8K */
+	dma_addr = dma_map_single(dev, virt, SZ_8K, DMA_TO_DEVICE);
+	if (dma_addr != SZ_8K) {
+		dma_addr_t expected = SZ_8K;
+
+		dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
+			&dma_addr, &expected);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * now remap 4K.  We should get the first 4K chunk that was skipped
+	 * over during the previous 8K map.  If we missed a TLB invalidate
+	 * at that point this should explode.
+	 */
+	dma_addr = dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE);
+	if (dma_addr != 0) {
+		dma_addr_t expected = 0;
+
+		dev_err(dev, "Unexpected dma_addr. got: %pa expected: %pa\n",
+			&dma_addr, &expected);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (dma_map_single(dev, virt, SZ_4K, DMA_TO_DEVICE) != DMA_ERROR_CODE) {
+		dev_err(dev,
+			"dma_map_single unexpectedly after remaps (VA should have been exhausted)\n");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* we're all full again. unmap everything. */
+	for (dma_addr = 0; dma_addr < max; dma_addr += SZ_8K)
+		dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
+
+out:
+	free_pages((unsigned long)virt, get_order(SZ_8K));
+	return ret;
+}
+
+struct fib_state {
+	unsigned long cur;
+	unsigned long prev;
+};
+
+static void fib_init(struct fib_state *f)
+{
+	f->cur = f->prev = 1;
+}
+
+static unsigned long get_next_fib(struct fib_state *f)
+{
+	int next = f->cur + f->prev;
+
+	f->prev = f->cur;
+	f->cur = next;
+	return next;
+}
+
+/*
+ * Not actually random.  Just testing the fibs (and max - the fibs).
+ */
+static int __rand_va_sweep(struct device *dev, struct seq_file *s,
+			   const size_t size)
+{
+	u64 iova;
+	const unsigned long max = SZ_1G * 4UL;
+	int i, remapped, unmapped, ret = 0;
+	void *virt;
+	dma_addr_t dma_addr, dma_addr2;
+	struct fib_state fib;
+
+	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
+	if (!virt) {
+		if (size > SZ_8K) {
+			dev_err(dev,
+				"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
+				_size_to_string(size));
+			return 0;
+		}
+		return -ENOMEM;
+	}
+
+	/* fill the whole 4GB space */
+	for (iova = 0, i = 0; iova < max; iova += size, ++i) {
+		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
+		if (dma_addr == DMA_ERROR_CODE) {
+			dev_err(dev, "Failed map on iter %d\n", i);
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	/* now unmap "random" iovas */
+	unmapped = 0;
+	fib_init(&fib);
+	for (iova = get_next_fib(&fib) * size;
+	     iova < max - size;
+	     iova = get_next_fib(&fib) * size) {
+		dma_addr = iova;
+		dma_addr2 = max - size - iova;
+		if (dma_addr == dma_addr2) {
+			WARN(1,
+			"%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
+			__func__);
+			return -EINVAL;
+		}
+		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+		dma_unmap_single(dev, dma_addr2, size, DMA_TO_DEVICE);
+		unmapped += 2;
+	}
+
+	/* and map until everything fills back up */
+	for (remapped = 0; ; ++remapped) {
+		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
+		if (dma_addr == DMA_ERROR_CODE)
+			break;
+	}
+
+	if (unmapped != remapped) {
+		dev_err(dev,
+			"Unexpected random remap count! Unmapped %d but remapped %d\n",
+			unmapped, remapped);
+		ret = -EINVAL;
+	}
+
+	for (dma_addr = 0; dma_addr < max; dma_addr += size)
+		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+
+out:
+	free_pages((unsigned long)virt, get_order(size));
+	return ret;
+}
+
+static int __check_mapping(struct device *dev, struct iommu_domain *domain,
+			   dma_addr_t iova, phys_addr_t expected)
+{
+	phys_addr_t res = iommu_iova_to_phys_hard(domain, iova);
+	phys_addr_t res2 = iommu_iova_to_phys(domain, iova);
+
+	WARN(res != res2, "hard/soft iova_to_phys fns don't agree...");
+
+	if (res != expected) {
+		dev_err_ratelimited(dev,
+				    "Bad translation for %pa! Expected: %pa Got: %pa\n",
+				    &iova, &expected, &res);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __full_va_sweep(struct device *dev, struct seq_file *s,
+			   const size_t size, struct iommu_domain *domain)
+{
+	unsigned long iova;
+	dma_addr_t dma_addr;
+	void *virt;
+	phys_addr_t phys;
+	int ret = 0, i;
+
+	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
+	if (!virt) {
+		if (size > SZ_8K) {
+			dev_err(dev,
+				"Failed to allocate %s of memory, which is a lot. Skipping test for this size\n",
+				_size_to_string(size));
+			return 0;
+		}
+		return -ENOMEM;
+	}
+	phys = virt_to_phys(virt);
+
+	for (iova = 0, i = 0; iova < SZ_1G * 4UL; iova += size, ++i) {
+		unsigned long expected = iova;
+
+		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
+		if (dma_addr != expected) {
+			dev_err_ratelimited(dev,
+					    "Unexpected iova on iter %d (expected: 0x%lx got: 0x%lx)\n",
+					    i, expected,
+					    (unsigned long)dma_addr);
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (domain) {
+		/* check every mapping from 0..6M */
+		for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
+			phys_addr_t expected = phys;
+
+			if (__check_mapping(dev, domain, iova, expected)) {
+				dev_err(dev, "iter: %d\n", i);
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+		/* and from 4G..4G-6M */
+		for (iova = 0, i = 0; iova < SZ_2M * 3; iova += size, ++i) {
+			phys_addr_t expected = phys;
+			unsigned long theiova = ((SZ_1G * 4ULL) - size) - iova;
+
+			if (__check_mapping(dev, domain, theiova, expected)) {
+				dev_err(dev, "iter: %d\n", i);
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+	}
+
+	/* at this point, our VA space should be full */
+	dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
+	if (dma_addr != DMA_ERROR_CODE) {
+		dev_err_ratelimited(dev,
+				    "dma_map_single succeeded when it should have failed. Got iova: 0x%lx\n",
+				    (unsigned long)dma_addr);
+		ret = -EINVAL;
+	}
+
+out:
+	for (dma_addr = 0; dma_addr < SZ_1G * 4UL; dma_addr += size)
+		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+
+	free_pages((unsigned long)virt, get_order(size));
+	return ret;
+}
+
+#define ds_printf(d, s, fmt, ...) ({				\
+			dev_err(d, fmt, ##__VA_ARGS__);		\
+			seq_printf(s, fmt, ##__VA_ARGS__);	\
+		})
+
+static int __functional_dma_api_va_test(struct device *dev, struct seq_file *s,
+				     struct iommu_domain *domain, void *priv)
+{
+	int i, j, ret = 0;
+	size_t *sz, *sizes = priv;
+
+	for (j = 0; j < 1; ++j) {
+		for (sz = sizes; *sz; ++sz) {
+			for (i = 0; i < 2; ++i) {
+				ds_printf(dev, s, "Full VA sweep @%s %d",
+					       _size_to_string(*sz), i);
+				if (__full_va_sweep(dev, s, *sz, domain)) {
+					ds_printf(dev, s, "  -> FAILED\n");
+					ret = -EINVAL;
+				} else {
+					ds_printf(dev, s, "  -> SUCCEEDED\n");
+				}
+			}
+		}
+	}
+
+	ds_printf(dev, s, "bonus map:");
+	if (__full_va_sweep(dev, s, SZ_4K, domain)) {
+		ds_printf(dev, s, "  -> FAILED\n");
+		ret = -EINVAL;
+	} else {
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+	}
+
+	for (sz = sizes; *sz; ++sz) {
+		for (i = 0; i < 2; ++i) {
+			ds_printf(dev, s, "Rand VA sweep @%s %d",
+				   _size_to_string(*sz), i);
+			if (__rand_va_sweep(dev, s, *sz)) {
+				ds_printf(dev, s, "  -> FAILED\n");
+				ret = -EINVAL;
+			} else {
+				ds_printf(dev, s, "  -> SUCCEEDED\n");
+			}
+		}
+	}
+
+	ds_printf(dev, s, "TLB stress sweep");
+	if (__tlb_stress_sweep(dev, s)) {
+		ds_printf(dev, s, "  -> FAILED\n");
+		ret = -EINVAL;
+	} else {
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+	}
+
+	ds_printf(dev, s, "second bonus map:");
+	if (__full_va_sweep(dev, s, SZ_4K, domain)) {
+		ds_printf(dev, s, "  -> FAILED\n");
+		ret = -EINVAL;
+	} else {
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+	}
+
+	return ret;
+}
+
+static int __functional_dma_api_alloc_test(struct device *dev,
+					   struct seq_file *s,
+					   struct iommu_domain *domain,
+					   void *ignored)
+{
+	size_t size = SZ_1K * 742;
+	int ret = 0;
+	u8 *data;
+	dma_addr_t iova;
+
+	/* Make sure we can allocate and use a buffer */
+	ds_printf(dev, s, "Allocating coherent buffer");
+	data = dma_alloc_coherent(dev, size, &iova, GFP_KERNEL);
+	if (!data) {
+		ds_printf(dev, s, "  -> FAILED\n");
+		ret = -EINVAL;
+	} else {
+		int i;
+
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+		ds_printf(dev, s, "Using coherent buffer");
+		for (i = 0; i < 742; ++i) {
+			int ind = SZ_1K * i;
+			u8 *p = data + ind;
+			u8 val = i % 255;
+
+			memset(data, 0xa5, size);
+			*p = val;
+			(*p)++;
+			if ((*p) != val + 1) {
+				ds_printf(dev, s,
+					  "  -> FAILED on iter %d since %d != %d\n",
+					  i, *p, val + 1);
+				ret = -EINVAL;
+			}
+		}
+		if (!ret)
+			ds_printf(dev, s, "  -> SUCCEEDED\n");
+		dma_free_coherent(dev, size, data, iova);
+	}
+
+	return ret;
+}
+
+static int __functional_dma_api_basic_test(struct device *dev,
+					   struct seq_file *s,
+					   struct iommu_domain *domain,
+					   void *ignored)
+{
+	size_t size = 1518;
+	int i, j, ret = 0;
+	u8 *data;
+	dma_addr_t iova;
+	phys_addr_t pa, pa2;
+
+	ds_printf(dev, s, "Basic DMA API test");
+	/* Make sure we can allocate and use a buffer */
+	for (i = 0; i < 1000; ++i) {
+		data = kmalloc(size, GFP_KERNEL);
+		if (!data) {
+			ds_printf(dev, s, "  -> FAILED\n");
+			ret = -EINVAL;
+			goto out;
+		}
+		memset(data, 0xa5, size);
+		iova = dma_map_single(dev, data, size, DMA_TO_DEVICE);
+		pa = iommu_iova_to_phys(domain, iova);
+		pa2 = iommu_iova_to_phys_hard(domain, iova);
+		if (pa != pa2) {
+			dev_err(dev,
+				"iova_to_phys doesn't match iova_to_phys_hard: %pa != %pa\n",
+				&pa, &pa2);
+			ret = -EINVAL;
+			goto out;
+		}
+		pa2 = virt_to_phys(data);
+		if (pa != pa2) {
+			dev_err(dev,
+				"iova_to_phys doesn't match virt_to_phys: %pa != %pa\n",
+				&pa, &pa2);
+			ret = -EINVAL;
+			goto out;
+		}
+		dma_unmap_single(dev, iova, size, DMA_TO_DEVICE);
+		for (j = 0; j < size; ++j) {
+			if (data[j] != 0xa5) {
+				dev_err(dev, "data[%d] != 0xa5\n", data[j]);
+				ret = -EINVAL;
+				goto out;
+			}
+		}
+		kfree(data);
+	}
+
+out:
+	if (ret)
+		ds_printf(dev, s, "  -> FAILED\n");
+	else
+		ds_printf(dev, s, "  -> SUCCEEDED\n");
+
+	return ret;
+}
+
+/* Creates a fresh fast mapping and applies @fn to it */
+static int __apply_to_new_mapping(struct seq_file *s,
+				    int (*fn)(struct device *dev,
+					      struct seq_file *s,
+					      struct iommu_domain *domain,
+					      void *priv),
+				    void *priv)
+{
+	struct dma_iommu_mapping *mapping;
+	struct iommu_debug_device *ddev = s->private;
+	struct device *dev = ddev->dev;
+	int ret = -EINVAL, fast = 1;
+	phys_addr_t pt_phys;
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	if (!mapping)
+		goto out;
+
+	if (iommu_domain_set_attr(mapping->domain, DOMAIN_ATTR_FAST, &fast)) {
+		seq_puts(s, "iommu_domain_set_attr failed\n");
+		goto out_release_mapping;
+	}
+
+	if (arm_iommu_attach_device(dev, mapping))
+		goto out_release_mapping;
+
+	if (iommu_domain_get_attr(mapping->domain, DOMAIN_ATTR_PT_BASE_ADDR,
+				  &pt_phys)) {
+		ds_printf(dev, s, "Couldn't get page table base address\n");
+		goto out_release_mapping;
+	}
+
+	dev_err(dev, "testing with pgtables at %pa\n", &pt_phys);
+	if (iommu_enable_config_clocks(mapping->domain)) {
+		ds_printf(dev, s, "Couldn't enable clocks\n");
+		goto out_release_mapping;
+	}
+	ret = fn(dev, s, mapping->domain, priv);
+	iommu_disable_config_clocks(mapping->domain);
+
+	arm_iommu_detach_device(dev);
+out_release_mapping:
+	arm_iommu_release_mapping(mapping);
+out:
+	seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
+	return 0;
+}
+
+static int iommu_debug_functional_fast_dma_api_show(struct seq_file *s,
+						    void *ignored)
+{
+	size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
+	int ret = 0;
+
+	ret |= __apply_to_new_mapping(s, __functional_dma_api_alloc_test, NULL);
+	ret |= __apply_to_new_mapping(s, __functional_dma_api_basic_test, NULL);
+	ret |= __apply_to_new_mapping(s, __functional_dma_api_va_test, sizes);
+	return ret;
+}
+
+static int iommu_debug_functional_fast_dma_api_open(struct inode *inode,
+						    struct file *file)
+{
+	return single_open(file, iommu_debug_functional_fast_dma_api_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_functional_fast_dma_api_fops = {
+	.open	 = iommu_debug_functional_fast_dma_api_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_functional_arm_dma_api_show(struct seq_file *s,
+						   void *ignored)
+{
+	struct dma_iommu_mapping *mapping;
+	struct iommu_debug_device *ddev = s->private;
+	struct device *dev = ddev->dev;
+	size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
+	int ret = -EINVAL;
+
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	if (!mapping)
+		goto out;
+
+	if (arm_iommu_attach_device(dev, mapping))
+		goto out_release_mapping;
+
+	ret = __functional_dma_api_alloc_test(dev, s, mapping->domain, sizes);
+	ret |= __functional_dma_api_basic_test(dev, s, mapping->domain, sizes);
+
+	arm_iommu_detach_device(dev);
+out_release_mapping:
+	arm_iommu_release_mapping(mapping);
+out:
+	seq_printf(s, "%s\n", ret ? "FAIL" : "SUCCESS");
+	return 0;
+}
+
+static int iommu_debug_functional_arm_dma_api_open(struct inode *inode,
+						   struct file *file)
+{
+	return single_open(file, iommu_debug_functional_arm_dma_api_show,
+			   inode->i_private);
+}
+
+static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
+	.open	 = iommu_debug_functional_arm_dma_api_open,
+	.read	 = seq_read,
+	.llseek	 = seq_lseek,
+	.release = single_release,
+};
+
+static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
+					int val, bool is_secure)
+{
+	ddev->domain = iommu_domain_alloc(&platform_bus_type);
+	if (!ddev->domain) {
+		pr_err("Couldn't allocate domain\n");
+		return -ENOMEM;
+	}
+
+	if (is_secure && iommu_domain_set_attr(ddev->domain,
+					       DOMAIN_ATTR_SECURE_VMID,
+					       &val)) {
+		pr_err("Couldn't set secure vmid to %d\n", val);
+		goto out_domain_free;
+	}
+
+	if (iommu_attach_device(ddev->domain, ddev->dev)) {
+		pr_err("Couldn't attach new domain to device. Is it already attached?\n");
+		goto out_domain_free;
+	}
+
+	return 0;
+
+out_domain_free:
+	iommu_domain_free(ddev->domain);
+	ddev->domain = NULL;
+	return -EIO;
+}
+
+static ssize_t __iommu_debug_attach_write(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *offset,
+					  bool is_secure)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	ssize_t retval;
+	int val;
+
+	if (kstrtoint_from_user(ubuf, count, 0, &val)) {
+		pr_err("Invalid format. Expected a hex or decimal integer");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	if (val) {
+		if (ddev->domain) {
+			pr_err("Already attached.\n");
+			retval = -EINVAL;
+			goto out;
+		}
+		if (WARN(ddev->dev->archdata.iommu,
+			 "Attachment tracking out of sync with device\n")) {
+			retval = -EINVAL;
+			goto out;
+		}
+		if (iommu_debug_attach_do_attach(ddev, val, is_secure)) {
+			retval = -EIO;
+			goto out;
+		}
+		pr_err("Attached\n");
+	} else {
+		if (!ddev->domain) {
+			pr_err("No domain. Did you already attach?\n");
+			retval = -EINVAL;
+			goto out;
+		}
+		iommu_detach_device(ddev->domain, ddev->dev);
+		iommu_domain_free(ddev->domain);
+		ddev->domain = NULL;
+		pr_err("Detached\n");
+	}
+
+	retval = count;
+out:
+	return retval;
+}
+
+static ssize_t iommu_debug_attach_write(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *offset)
+{
+	return __iommu_debug_attach_write(file, ubuf, count, offset,
+					  false);
+
+}
+
+static ssize_t iommu_debug_attach_read(struct file *file, char __user *ubuf,
+				       size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	char c[2];
+
+	if (*offset)
+		return 0;
+
+	c[0] = ddev->domain ? '1' : '0';
+	c[1] = '\n';
+	if (copy_to_user(ubuf, &c, 2)) {
+		pr_err("copy_to_user failed\n");
+		return -EFAULT;
+	}
+	*offset = 1;		/* non-zero means we're done */
+
+	return 2;
+}
+
+static const struct file_operations iommu_debug_attach_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_attach_write,
+	.read	= iommu_debug_attach_read,
+};
+
+static ssize_t iommu_debug_attach_write_secure(struct file *file,
+					       const char __user *ubuf,
+					       size_t count, loff_t *offset)
+{
+	return __iommu_debug_attach_write(file, ubuf, count, offset,
+					  true);
+
+}
+
+static const struct file_operations iommu_debug_secure_attach_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_attach_write_secure,
+	.read	= iommu_debug_attach_read,
+};
+
+static ssize_t iommu_debug_atos_write(struct file *file,
+				      const char __user *ubuf,
+				      size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	dma_addr_t iova;
+
+	if (kstrtox_from_user(ubuf, count, 0, &iova)) {
+		pr_err("Invalid format for iova\n");
+		ddev->iova = 0;
+		return -EINVAL;
+	}
+
+	ddev->iova = iova;
+	pr_err("Saved iova=%pa for future ATOS commands\n", &iova);
+	return count;
+}
+
+static ssize_t iommu_debug_atos_read(struct file *file, char __user *ubuf,
+				     size_t count, loff_t *offset)
+{
+	struct iommu_debug_device *ddev = file->private_data;
+	phys_addr_t phys;
+	char buf[100];
+	ssize_t retval;
+	size_t buflen;
+
+	if (!ddev->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (*offset)
+		return 0;
+
+	memset(buf, 0, 100);
+
+	phys = iommu_iova_to_phys_hard(ddev->domain, ddev->iova);
+	if (!phys)
+		strlcpy(buf, "FAIL\n", 100);
+	else
+		snprintf(buf, 100, "%pa\n", &phys);
+
+	buflen = strlen(buf);
+	if (copy_to_user(ubuf, buf, buflen)) {
+		pr_err("Couldn't copy_to_user\n");
+		retval = -EFAULT;
+	} else {
+		*offset = 1;	/* non-zero means we're done */
+		retval = buflen;
+	}
+
+	return retval;
+}
+
+static const struct file_operations iommu_debug_atos_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_atos_write,
+	.read	= iommu_debug_atos_read,
+};
+
+static ssize_t iommu_debug_map_write(struct file *file, const char __user *ubuf,
+				     size_t count, loff_t *offset)
+{
+	ssize_t retval = -EINVAL;
+	int ret;
+	char *comma1, *comma2, *comma3;
+	char buf[100];
+	dma_addr_t iova;
+	phys_addr_t phys;
+	size_t size;
+	int prot;
+	struct iommu_debug_device *ddev = file->private_data;
+
+	if (count >= 100) {
+		pr_err("Value too large\n");
+		return -EINVAL;
+	}
+
+	if (!ddev->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 100);
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err("Couldn't copy from user\n");
+		retval = -EFAULT;
+	}
+
+	comma1 = strnchr(buf, count, ',');
+	if (!comma1)
+		goto invalid_format;
+
+	comma2 = strnchr(comma1 + 1, count, ',');
+	if (!comma2)
+		goto invalid_format;
+
+	comma3 = strnchr(comma2 + 1, count, ',');
+	if (!comma3)
+		goto invalid_format;
+
+	/* split up the words */
+	*comma1 = *comma2 = *comma3 = '\0';
+
+	if (kstrtoux(buf, 0, &iova))
+		goto invalid_format;
+
+	if (kstrtoux(comma1 + 1, 0, &phys))
+		goto invalid_format;
+
+	if (kstrtosize_t(comma2 + 1, 0, &size))
+		goto invalid_format;
+
+	if (kstrtoint(comma3 + 1, 0, &prot))
+		goto invalid_format;
+
+	ret = iommu_map(ddev->domain, iova, phys, size, prot);
+	if (ret) {
+		pr_err("iommu_map failed with %d\n", ret);
+		retval = -EIO;
+		goto out;
+	}
+
+	retval = count;
+	pr_err("Mapped %pa to %pa (len=0x%zx, prot=0x%x)\n",
+	       &iova, &phys, size, prot);
+out:
+	return retval;
+
+invalid_format:
+	pr_err("Invalid format. Expected: iova,phys,len,prot where `prot' is the bitwise OR of IOMMU_READ, IOMMU_WRITE, etc.\n");
+	return -EINVAL;
+}
+
+static const struct file_operations iommu_debug_map_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_map_write,
+};
+
+static ssize_t iommu_debug_unmap_write(struct file *file,
+				       const char __user *ubuf,
+				       size_t count, loff_t *offset)
+{
+	ssize_t retval = 0;
+	char *comma1;
+	char buf[100];
+	dma_addr_t iova;
+	size_t size;
+	size_t unmapped;
+	struct iommu_debug_device *ddev = file->private_data;
+
+	if (count >= 100) {
+		pr_err("Value too large\n");
+		return -EINVAL;
+	}
+
+	if (!ddev->domain) {
+		pr_err("No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	memset(buf, 0, 100);
+
+	if (copy_from_user(buf, ubuf, count)) {
+		pr_err("Couldn't copy from user\n");
+		retval = -EFAULT;
+		goto out;
+	}
+
+	comma1 = strnchr(buf, count, ',');
+	if (!comma1)
+		goto invalid_format;
+
+	/* split up the words */
+	*comma1 = '\0';
+
+	if (kstrtoux(buf, 0, &iova))
+		goto invalid_format;
+
+	if (kstrtosize_t(comma1 + 1, 0, &size))
+		goto invalid_format;
+
+	unmapped = iommu_unmap(ddev->domain, iova, size);
+	if (unmapped != size) {
+		pr_err("iommu_unmap failed. Expected to unmap: 0x%zx, unmapped: 0x%zx",
+		       size, unmapped);
+		return -EIO;
+	}
+
+	retval = count;
+	pr_err("Unmapped %pa (len=0x%zx)\n", &iova, size);
+out:
+	return retval;
+
+invalid_format:
+	pr_err("Invalid format. Expected: iova,len\n");
+	return -EINVAL;
+}
+
+static const struct file_operations iommu_debug_unmap_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_unmap_write,
+};
+
+static ssize_t iommu_debug_config_clocks_write(struct file *file,
+					       const char __user *ubuf,
+					       size_t count, loff_t *offset)
+{
+	char buf;
+	struct iommu_debug_device *ddev = file->private_data;
+	struct device *dev = ddev->dev;
+
+	/* we're expecting a single character plus (optionally) a newline */
+	if (count > 2) {
+		dev_err(dev, "Invalid value\n");
+		return -EINVAL;
+	}
+
+	if (!ddev->domain) {
+		dev_err(dev, "No domain. Did you already attach?\n");
+		return -EINVAL;
+	}
+
+	if (copy_from_user(&buf, ubuf, 1)) {
+		dev_err(dev, "Couldn't copy from user\n");
+		return -EFAULT;
+	}
+
+	switch (buf) {
+	case '0':
+		dev_err(dev, "Disabling config clocks\n");
+		iommu_disable_config_clocks(ddev->domain);
+		break;
+	case '1':
+		dev_err(dev, "Enabling config clocks\n");
+		if (iommu_enable_config_clocks(ddev->domain))
+			dev_err(dev, "Failed!\n");
+		break;
+	default:
+		dev_err(dev, "Invalid value. Should be 0 or 1.\n");
+		return -EINVAL;
+	}
+
+	return count;
+}
+
+static const struct file_operations iommu_debug_config_clocks_fops = {
+	.open	= simple_open,
+	.write	= iommu_debug_config_clocks_write,
+};
+
+/*
+ * The following will only work for drivers that implement the generic
+ * device tree bindings described in
+ * Documentation/devicetree/bindings/iommu/iommu.txt
+ */
+static int snarf_iommu_devices(struct device *dev, void *ignored)
+{
+	struct iommu_debug_device *ddev;
+	struct dentry *dir;
+
+	if (!of_find_property(dev->of_node, "iommus", NULL))
+		return 0;
+
+	ddev = kzalloc(sizeof(*ddev), GFP_KERNEL);
+	if (!ddev)
+		return -ENODEV;
+	ddev->dev = dev;
+	dir = debugfs_create_dir(dev_name(dev), debugfs_tests_dir);
+	if (!dir) {
+		pr_err("Couldn't create iommu/devices/%s debugfs dir\n",
+		       dev_name(dev));
+		goto err;
+	}
+
+	if (!debugfs_create_file("nr_iters", S_IRUSR, dir, &iters_per_op,
+				&iommu_debug_nr_iters_ops)) {
+		pr_err("Couldn't create iommu/devices/%s/nr_iters debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("profiling", S_IRUSR, dir, ddev,
+				 &iommu_debug_profiling_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/profiling debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("secure_profiling", S_IRUSR, dir, ddev,
+				 &iommu_debug_secure_profiling_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/secure_profiling debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("profiling_fast", S_IRUSR, dir, ddev,
+				 &iommu_debug_profiling_fast_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/profiling_fast debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("profiling_fast_dma_api", S_IRUSR, dir, ddev,
+				 &iommu_debug_profiling_fast_dma_api_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/profiling_fast_dma_api debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("functional_fast_dma_api", S_IRUSR, dir, ddev,
+				 &iommu_debug_functional_fast_dma_api_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/functional_fast_dma_api debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("functional_arm_dma_api", S_IRUSR, dir, ddev,
+				 &iommu_debug_functional_arm_dma_api_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/functional_arm_dma_api debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("attach", S_IRUSR, dir, ddev,
+				 &iommu_debug_attach_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/attach debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("secure_attach", S_IRUSR, dir, ddev,
+				 &iommu_debug_secure_attach_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/secure_attach debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("atos", S_IWUSR, dir, ddev,
+				 &iommu_debug_atos_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/atos debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("map", S_IWUSR, dir, ddev,
+				 &iommu_debug_map_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/map debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("unmap", S_IWUSR, dir, ddev,
+				 &iommu_debug_unmap_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/unmap debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	if (!debugfs_create_file("config_clocks", S_IWUSR, dir, ddev,
+				 &iommu_debug_config_clocks_fops)) {
+		pr_err("Couldn't create iommu/devices/%s/config_clocks debugfs file\n",
+		       dev_name(dev));
+		goto err_rmdir;
+	}
+
+	list_add(&ddev->list, &iommu_debug_devices);
+	return 0;
+
+err_rmdir:
+	debugfs_remove_recursive(dir);
+err:
+	kfree(ddev);
+	return 0;
+}
+
+static int iommu_debug_init_tests(void)
+{
+	debugfs_tests_dir = debugfs_create_dir("tests",
+					       iommu_debugfs_top);
+	if (!debugfs_tests_dir) {
+		pr_err("Couldn't create iommu/tests debugfs directory\n");
+		return -ENODEV;
+	}
+
+	return bus_for_each_dev(&platform_bus_type, NULL, NULL,
+				snarf_iommu_devices);
+}
+
+static void iommu_debug_destroy_tests(void)
+{
+	debugfs_remove_recursive(debugfs_tests_dir);
+}
+#else
+static inline int iommu_debug_init_tests(void) { return 0; }
+static inline void iommu_debug_destroy_tests(void) { }
+#endif
+
+/*
+ * This isn't really a "driver", we just need something in the device tree
+ * so that our tests can run without any client drivers, and our tests rely
+ * on parsing the device tree for nodes with the `iommus' property.
+ */
+static int iommu_debug_pass(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id iommu_debug_of_match[] = {
+	{ .compatible = "iommu-debug-test" },
+	{ },
+};
+
+static struct platform_driver iommu_debug_driver = {
+	.probe = iommu_debug_pass,
+	.remove = iommu_debug_pass,
+	.driver = {
+		.name = "iommu-debug",
+		.of_match_table = iommu_debug_of_match,
+	},
+};
+
+static int iommu_debug_init(void)
+{
+	if (iommu_debug_init_tracking())
+		return -ENODEV;
+
+	if (iommu_debug_init_tests())
+		return -ENODEV;
+
+	return platform_driver_register(&iommu_debug_driver);
+}
+
+static void iommu_debug_exit(void)
+{
+	platform_driver_unregister(&iommu_debug_driver);
+	iommu_debug_destroy_tracking();
+	iommu_debug_destroy_tests();
+}
+
+module_init(iommu_debug_init);
+module_exit(iommu_debug_exit);
diff --git a/drivers/iommu/iommu-debug.h b/drivers/iommu/iommu-debug.h
new file mode 100644
index 0000000..94a97bf
--- /dev/null
+++ b/drivers/iommu/iommu-debug.h
@@ -0,0 +1,45 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IOMMU_DEBUG_H
+#define IOMMU_DEBUG_H
+
+#ifdef CONFIG_IOMMU_DEBUG_TRACKING
+
+void iommu_debug_attach_device(struct iommu_domain *domain, struct device *dev);
+void iommu_debug_detach_device(struct iommu_domain *domain, struct device *dev);
+void iommu_debug_domain_add(struct iommu_domain *domain);
+void iommu_debug_domain_remove(struct iommu_domain *domain);
+
+#else  /* !CONFIG_IOMMU_DEBUG_TRACKING */
+
+static inline void iommu_debug_attach_device(struct iommu_domain *domain,
+					     struct device *dev)
+{
+}
+
+static inline void iommu_debug_detach_device(struct iommu_domain *domain,
+					     struct device *dev)
+{
+}
+
+static inline void iommu_debug_domain_add(struct iommu_domain *domain)
+{
+}
+
+static inline void iommu_debug_domain_remove(struct iommu_domain *domain)
+{
+}
+
+#endif  /* CONFIG_IOMMU_DEBUG_TRACKING */
+
+#endif /* IOMMU_DEBUG_H */
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 9a2f196..ba7d6f1 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -31,9 +31,12 @@
 #include <linux/err.h>
 #include <linux/pci.h>
 #include <linux/bitops.h>
+#include <linux/debugfs.h>
 #include <linux/property.h>
 #include <trace/events/iommu.h>
 
+#include "iommu-debug.h"
+
 static struct kset *iommu_group_kset;
 static DEFINE_IDA(iommu_group_ida);
 
@@ -1064,6 +1067,8 @@
 	/* Assume all sizes by default; the driver may override this later */
 	domain->pgsize_bitmap  = bus->iommu_ops->pgsize_bitmap;
 
+	iommu_debug_domain_add(domain);
+
 	return domain;
 }
 
@@ -1075,6 +1080,7 @@
 
 void iommu_domain_free(struct iommu_domain *domain)
 {
+	iommu_debug_domain_remove(domain);
 	domain->ops->domain_free(domain);
 }
 EXPORT_SYMBOL_GPL(iommu_domain_free);
@@ -1087,8 +1093,10 @@
 		return -ENODEV;
 
 	ret = domain->ops->attach_dev(domain, dev);
-	if (!ret)
+	if (!ret) {
 		trace_attach_device_to_domain(dev);
+		iommu_debug_attach_device(domain, dev);
+	}
 	return ret;
 }
 
@@ -1124,6 +1132,8 @@
 static void __iommu_detach_device(struct iommu_domain *domain,
 				  struct device *dev)
 {
+	iommu_debug_detach_device(domain, dev);
+
 	if (unlikely(domain->ops->detach_dev == NULL))
 		return;
 
@@ -1267,8 +1277,17 @@
 }
 EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
 
-static size_t iommu_pgsize(struct iommu_domain *domain,
-			   unsigned long addr_merge, size_t size)
+phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
+				    dma_addr_t iova)
+{
+	if (unlikely(domain->ops->iova_to_phys_hard == NULL))
+		return 0;
+
+	return domain->ops->iova_to_phys_hard(domain, iova);
+}
+
+size_t iommu_pgsize(unsigned long pgsize_bitmap,
+		    unsigned long addr_merge, size_t size)
 {
 	unsigned int pgsize_idx;
 	size_t pgsize;
@@ -1287,10 +1306,14 @@
 	pgsize = (1UL << (pgsize_idx + 1)) - 1;
 
 	/* throw away page sizes not supported by the hardware */
-	pgsize &= domain->pgsize_bitmap;
+	pgsize &= pgsize_bitmap;
 
 	/* make sure we're still sane */
-	BUG_ON(!pgsize);
+	if (!pgsize) {
+		pr_err("invalid pgsize/addr/size! 0x%lx 0x%lx 0x%zx\n",
+		       pgsize_bitmap, addr_merge, size);
+		BUG();
+	}
 
 	/* pick the biggest page */
 	pgsize_idx = __fls(pgsize);
@@ -1332,7 +1355,8 @@
 	pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
 
 	while (size) {
-		size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
+		size_t pgsize = iommu_pgsize(domain->pgsize_bitmap,
+						iova | paddr, size);
 
 		pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
 			 iova, &paddr, pgsize);
@@ -1390,9 +1414,9 @@
 	 * or we hit an area that isn't mapped.
 	 */
 	while (unmapped < size) {
-		size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
+		size_t left = size - unmapped;
 
-		unmapped_page = domain->ops->unmap(domain, iova, pgsize);
+		unmapped_page = domain->ops->unmap(domain, iova, left);
 		if (!unmapped_page)
 			break;
 
@@ -1471,12 +1495,20 @@
 }
 EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
 
+struct dentry *iommu_debugfs_top;
+
 static int __init iommu_init(void)
 {
 	iommu_group_kset = kset_create_and_add("iommu_groups",
 					       NULL, kernel_kobj);
 	BUG_ON(!iommu_group_kset);
 
+	iommu_debugfs_top = debugfs_create_dir("iommu", NULL);
+	if (!iommu_debugfs_top) {
+		pr_err("Couldn't create iommu debugfs directory\n");
+		return -ENODEV;
+	}
+
 	return 0;
 }
 core_initcall(iommu_init);
@@ -1546,6 +1578,45 @@
 }
 EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
 
+/**
+ * iommu_trigger_fault() - trigger an IOMMU fault
+ * @domain: iommu domain
+ *
+ * Triggers a fault on the device to which this domain is attached.
+ *
+ * This function should only be used for debugging purposes, for obvious
+ * reasons.
+ */
+void iommu_trigger_fault(struct iommu_domain *domain, unsigned long flags)
+{
+	if (domain->ops->trigger_fault)
+		domain->ops->trigger_fault(domain, flags);
+}
+
+/**
+ * iommu_reg_read() - read an IOMMU register
+ *
+ * Reads the IOMMU register at the given offset.
+ */
+unsigned long iommu_reg_read(struct iommu_domain *domain, unsigned long offset)
+{
+	if (domain->ops->reg_read)
+		return domain->ops->reg_read(domain, offset);
+	return 0;
+}
+
+/**
+ * iommu_reg_write() - write an IOMMU register
+ *
+ * Writes the given value to the IOMMU register at the given offset.
+ */
+void iommu_reg_write(struct iommu_domain *domain, unsigned long offset,
+		     unsigned long val)
+{
+	if (domain->ops->reg_write)
+		domain->ops->reg_write(domain, offset, val);
+}
+
 void iommu_get_dm_regions(struct device *dev, struct list_head *list)
 {
 	const struct iommu_ops *ops = dev->bus->iommu_ops;
diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c
new file mode 100644
index 0000000..ba0a6c3
--- /dev/null
+++ b/drivers/iommu/msm_dma_iommu_mapping.c
@@ -0,0 +1,400 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/slab.h>
+#include <linux/rbtree.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+
+#include <linux/msm_dma_iommu_mapping.h>
+
+/**
+ * struct msm_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @lnode - list node to exist in the buffer's list of iommu mappings
+ * @dev - Device this is mapped to. Used as key
+ * @sgl - The scatterlist for this mapping
+ * @nents - Number of entries in sgl
+ * @dir - The direction for the unmap.
+ * @meta - Backpointer to the meta this guy belongs to.
+ * @ref - for reference counting this mapping
+ *
+ * Represents a mapping of one dma_buf buffer to a particular device
+ * and address range. There may exist other mappings of this buffer in
+ * different devices. All mappings will have the same cacheability and security.
+ */
+struct msm_iommu_map {
+	struct list_head lnode;
+	struct rb_node node;
+	struct device *dev;
+	struct scatterlist sgl;
+	unsigned int nents;
+	enum dma_data_direction dir;
+	struct msm_iommu_meta *meta;
+	struct kref ref;
+};
+
+struct msm_iommu_meta {
+	struct rb_node node;
+	struct list_head iommu_maps;
+	struct kref ref;
+	struct mutex lock;
+	void *buffer;
+};
+
+static struct rb_root iommu_root;
+static DEFINE_MUTEX(msm_iommu_map_mutex);
+
+static void msm_iommu_meta_add(struct msm_iommu_meta *meta)
+{
+	struct rb_root *root = &iommu_root;
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct msm_iommu_meta *entry;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct msm_iommu_meta, node);
+
+		if (meta->buffer < entry->buffer)
+			p = &(*p)->rb_left;
+		else if (meta->buffer > entry->buffer)
+			p = &(*p)->rb_right;
+		else
+			pr_err("%s: dma_buf %p already exists\n", __func__,
+			       entry->buffer);
+	}
+
+	rb_link_node(&meta->node, parent, p);
+	rb_insert_color(&meta->node, root);
+}
+
+static struct msm_iommu_meta *msm_iommu_meta_lookup(void *buffer)
+{
+	struct rb_root *root = &iommu_root;
+	struct rb_node **p = &root->rb_node;
+	struct rb_node *parent = NULL;
+	struct msm_iommu_meta *entry = NULL;
+
+	while (*p) {
+		parent = *p;
+		entry = rb_entry(parent, struct msm_iommu_meta, node);
+
+		if (buffer < entry->buffer)
+			p = &(*p)->rb_left;
+		else if (buffer > entry->buffer)
+			p = &(*p)->rb_right;
+		else
+			return entry;
+	}
+
+	return NULL;
+}
+
+static void msm_iommu_add(struct msm_iommu_meta *meta,
+			  struct msm_iommu_map *iommu)
+{
+	INIT_LIST_HEAD(&iommu->lnode);
+	list_add(&iommu->lnode, &meta->iommu_maps);
+}
+
+
+static struct msm_iommu_map *msm_iommu_lookup(struct msm_iommu_meta *meta,
+					      struct device *dev)
+{
+	struct msm_iommu_map *entry;
+
+	list_for_each_entry(entry, &meta->iommu_maps, lnode) {
+		if (entry->dev == dev)
+			return entry;
+	}
+
+	return NULL;
+}
+
+static struct msm_iommu_meta *msm_iommu_meta_create(struct dma_buf *dma_buf)
+{
+	struct msm_iommu_meta *meta;
+
+	meta = kzalloc(sizeof(*meta), GFP_KERNEL);
+
+	if (!meta)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&meta->iommu_maps);
+	meta->buffer = dma_buf->priv;
+	kref_init(&meta->ref);
+	mutex_init(&meta->lock);
+	msm_iommu_meta_add(meta);
+
+	return meta;
+}
+
+static void msm_iommu_meta_put(struct msm_iommu_meta *meta);
+
+static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
+				   int nents, enum dma_data_direction dir,
+				   struct dma_buf *dma_buf,
+				   unsigned long attrs)
+{
+	struct msm_iommu_map *iommu_map;
+	struct msm_iommu_meta *iommu_meta = NULL;
+	int ret = 0;
+	bool extra_meta_ref_taken = false;
+	int late_unmap = !(attrs & DMA_ATTR_NO_DELAYED_UNMAP);
+
+	mutex_lock(&msm_iommu_map_mutex);
+	iommu_meta = msm_iommu_meta_lookup(dma_buf->priv);
+
+	if (!iommu_meta) {
+		iommu_meta = msm_iommu_meta_create(dma_buf);
+
+		if (IS_ERR(iommu_meta)) {
+			mutex_unlock(&msm_iommu_map_mutex);
+			ret = PTR_ERR(iommu_meta);
+			goto out;
+		}
+		if (late_unmap) {
+			kref_get(&iommu_meta->ref);
+			extra_meta_ref_taken = true;
+		}
+	} else {
+		kref_get(&iommu_meta->ref);
+	}
+
+	mutex_unlock(&msm_iommu_map_mutex);
+
+	mutex_lock(&iommu_meta->lock);
+	iommu_map = msm_iommu_lookup(iommu_meta, dev);
+	if (!iommu_map) {
+		iommu_map = kmalloc(sizeof(*iommu_map), GFP_ATOMIC);
+
+		if (!iommu_map) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+
+		ret = dma_map_sg_attrs(dev, sg, nents, dir, attrs);
+		if (ret != nents) {
+			kfree(iommu_map);
+			goto out_unlock;
+		}
+
+		kref_init(&iommu_map->ref);
+		if (late_unmap)
+			kref_get(&iommu_map->ref);
+		iommu_map->meta = iommu_meta;
+		iommu_map->sgl.dma_address = sg->dma_address;
+		iommu_map->sgl.dma_length = sg->dma_length;
+		iommu_map->dev = dev;
+		msm_iommu_add(iommu_meta, iommu_map);
+
+	} else {
+		sg->dma_address = iommu_map->sgl.dma_address;
+		sg->dma_length = iommu_map->sgl.dma_length;
+
+		kref_get(&iommu_map->ref);
+		/*
+		 * Need to do cache operations here based on "dir" in the
+		 * future if we go with coherent mappings.
+		 */
+		ret = nents;
+	}
+	mutex_unlock(&iommu_meta->lock);
+	return ret;
+
+out_unlock:
+	mutex_unlock(&iommu_meta->lock);
+out:
+	if (!IS_ERR(iommu_meta)) {
+		if (extra_meta_ref_taken)
+			msm_iommu_meta_put(iommu_meta);
+		msm_iommu_meta_put(iommu_meta);
+	}
+	return ret;
+
+}
+
+/*
+ * We are not taking a reference to the dma_buf here. It is expected that
+ * clients hold reference to the dma_buf until they are done with mapping and
+ * unmapping.
+ */
+int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
+		   enum dma_data_direction dir, struct dma_buf *dma_buf,
+		   unsigned long attrs)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(dev)) {
+		pr_err("%s: dev pointer is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_OR_NULL(sg)) {
+		pr_err("%s: sg table pointer is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	if (IS_ERR_OR_NULL(dma_buf)) {
+		pr_err("%s: dma_buf pointer is invalid\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = __msm_dma_map_sg(dev, sg, nents, dir, dma_buf, attrs);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_dma_map_sg_attrs);
+
+static void msm_iommu_meta_destroy(struct kref *kref)
+{
+	struct msm_iommu_meta *meta = container_of(kref, struct msm_iommu_meta,
+						ref);
+
+	if (!list_empty(&meta->iommu_maps)) {
+		WARN(1, "%s: DMA Buffer %p being destroyed with outstanding iommu mappins!\n",
+		     __func__, meta->buffer);
+	}
+	rb_erase(&meta->node, &iommu_root);
+	kfree(meta);
+}
+
+static void msm_iommu_meta_put(struct msm_iommu_meta *meta)
+{
+	/*
+	 * Need to lock here to prevent race against map/unmap
+	 */
+	mutex_lock(&msm_iommu_map_mutex);
+	kref_put(&meta->ref, msm_iommu_meta_destroy);
+	mutex_unlock(&msm_iommu_map_mutex);
+}
+
+static void msm_iommu_map_release(struct kref *kref)
+{
+	struct msm_iommu_map *map = container_of(kref, struct msm_iommu_map,
+						ref);
+
+	list_del(&map->lnode);
+	dma_unmap_sg(map->dev, &map->sgl, map->nents, map->dir);
+	kfree(map);
+}
+
+void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
+		      enum dma_data_direction dir, struct dma_buf *dma_buf)
+{
+	struct msm_iommu_map *iommu_map;
+	struct msm_iommu_meta *meta;
+
+	mutex_lock(&msm_iommu_map_mutex);
+	meta = msm_iommu_meta_lookup(dma_buf->priv);
+	if (!meta) {
+		WARN(1, "%s: (%p) was never mapped\n", __func__, dma_buf);
+		mutex_unlock(&msm_iommu_map_mutex);
+		goto out;
+
+	}
+	mutex_unlock(&msm_iommu_map_mutex);
+
+	mutex_lock(&meta->lock);
+	iommu_map = msm_iommu_lookup(meta, dev);
+
+	if (!iommu_map) {
+		WARN(1, "%s: (%p) was never mapped for device  %p\n", __func__,
+				dma_buf, dev);
+		mutex_unlock(&meta->lock);
+		goto out;
+	}
+
+	/*
+	 * Save direction for later use when we actually unmap.
+	 * Not used right now but in the future if we go to coherent mapping
+	 * API we might want to call the appropriate API when client asks
+	 * to unmap
+	 */
+	iommu_map->dir = dir;
+
+	kref_put(&iommu_map->ref, msm_iommu_map_release);
+	mutex_unlock(&meta->lock);
+
+	msm_iommu_meta_put(meta);
+
+out:
+	return;
+}
+EXPORT_SYMBOL(msm_dma_unmap_sg);
+
+int msm_dma_unmap_all_for_dev(struct device *dev)
+{
+	int ret = 0;
+	struct msm_iommu_meta *meta;
+	struct rb_root *root;
+	struct rb_node *meta_node;
+
+	mutex_lock(&msm_iommu_map_mutex);
+	root = &iommu_root;
+	meta_node = rb_first(root);
+	while (meta_node) {
+		struct msm_iommu_map *iommu_map;
+
+		meta = rb_entry(meta_node, struct msm_iommu_meta, node);
+		mutex_lock(&meta->lock);
+		list_for_each_entry(iommu_map, &meta->iommu_maps, lnode)
+			if (iommu_map->dev == dev)
+				if (!kref_put(&iommu_map->ref,
+						msm_iommu_map_release))
+					ret = -EINVAL;
+
+		mutex_unlock(&meta->lock);
+		meta_node = rb_next(meta_node);
+	}
+	mutex_unlock(&msm_iommu_map_mutex);
+
+	return ret;
+}
+
+/*
+ * Only to be called by ION code when a buffer is freed
+ */
+void msm_dma_buf_freed(void *buffer)
+{
+	struct msm_iommu_map *iommu_map;
+	struct msm_iommu_map *iommu_map_next;
+	struct msm_iommu_meta *meta;
+
+	mutex_lock(&msm_iommu_map_mutex);
+	meta = msm_iommu_meta_lookup(buffer);
+	if (!meta) {
+		/* Already unmapped (assuming no late unmapping) */
+		mutex_unlock(&msm_iommu_map_mutex);
+		return;
+	}
+	mutex_unlock(&msm_iommu_map_mutex);
+
+	mutex_lock(&meta->lock);
+
+	list_for_each_entry_safe(iommu_map, iommu_map_next, &meta->iommu_maps,
+				 lnode)
+		kref_put(&iommu_map->ref, msm_iommu_map_release);
+
+	if (!list_empty(&meta->iommu_maps)) {
+		WARN(1, "%s: DMA buffer %p destroyed with outstanding iommu mappings\n",
+		     __func__, meta->buffer);
+	}
+
+	INIT_LIST_HEAD(&meta->iommu_maps);
+	mutex_unlock(&meta->lock);
+
+	msm_iommu_meta_put(meta);
+}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index bc0af33..41515bb 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -34,6 +34,7 @@
 	select MULTI_IRQ_HANDLER
 	select IRQ_DOMAIN_HIERARCHY
 	select PARTITION_PERCPU
+	select QCOM_SHOW_RESUME_IRQ
 
 config ARM_GIC_V3_ITS
 	bool
@@ -41,6 +42,15 @@
 	depends on PCI_MSI
 	select ACPI_IORT if ACPI
 
+config ARM_GIC_V3_ACL
+	bool "GICv3 Access control"
+	depends on ARM_GIC_V3
+	help
+	  Access to GIC ITS address space is controlled by EL2.
+	  Kernel has no permission to access GIC ITS address space.
+	  If you wish to enforce the Acces control then set this
+	  option to Y, if you are unsure please say N.
+
 config ARM_NVIC
 	bool
 	select IRQ_DOMAIN
@@ -110,6 +120,16 @@
 	select GENERIC_IRQ_CHIP
 	select IRQ_DOMAIN
 
+config QCOM_SHOW_RESUME_IRQ
+	bool "Enable logging of interrupts that could have caused resume"
+	depends on ARM_GIC
+	default n
+	help
+	  This option logs wake up interrupts that have triggered just before
+	  the resume loop unrolls. It helps to debug to know any unnecessary
+	  wake up interrupts that causes system to come out of low power modes.
+	  Say Y if you want to debug why the system resumed.
+
 config DW_APB_ICTL
 	bool
 	select GENERIC_IRQ_CHIP
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index e4dbfc8..987bd89 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -67,6 +67,7 @@
 obj-$(CONFIG_ARCH_SA1100)		+= irq-sa11x0.o
 obj-$(CONFIG_INGENIC_IRQ)		+= irq-ingenic.o
 obj-$(CONFIG_IMX_GPCV2)			+= irq-imx-gpcv2.o
+obj-$(CONFIG_QCOM_SHOW_RESUME_IRQ)      += msm_show_resume_irq.o
 obj-$(CONFIG_PIC32_EVIC)		+= irq-pic32-evic.o
 obj-$(CONFIG_MVEBU_ODMI)		+= irq-mvebu-odmi.o
 obj-$(CONFIG_MVEBU_PIC)			+= irq-mvebu-pic.o
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 19d642e..911c955 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -28,6 +28,7 @@
 #include <linux/of_irq.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
+#include <linux/msm_rtb.h>
 
 #include <linux/irqchip.h>
 #include <linux/irqchip/arm-gic-common.h>
@@ -352,6 +353,7 @@
 		if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
 			int err;
 
+			uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
 			if (static_key_true(&supports_deactivate))
 				gic_write_eoir(irqnr);
 
@@ -368,6 +370,7 @@
 			continue;
 		}
 		if (irqnr < 16) {
+			uncached_logk(LOGK_IRQ, (void *)(uintptr_t)irqnr);
 			gic_write_eoir(irqnr);
 			if (static_key_true(&supports_deactivate))
 				gic_write_dir(irqnr);
@@ -538,7 +541,8 @@
 	gic_cpu_config(rbase, gic_redist_wait_for_rwp);
 
 	/* Give LPIs a spin */
-	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
+					!IS_ENABLED(CONFIG_ARM_GIC_V3_ACL))
 		its_cpu_init();
 
 	/* initialise system registers */
@@ -959,7 +963,8 @@
 
 	set_handle_irq(gic_handle_irq);
 
-	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
+	if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() &&
+	    !IS_ENABLED(CONFIG_ARM_GIC_V3_ACL))
 		its_init(handle, &gic_data.rdists, gic_data.domain);
 
 	gic_smp_init();
diff --git a/drivers/irqchip/msm_show_resume_irq.c b/drivers/irqchip/msm_show_resume_irq.c
new file mode 100644
index 0000000..5211496
--- /dev/null
+++ b/drivers/irqchip/msm_show_resume_irq.c
@@ -0,0 +1,22 @@
+/* Copyright (c) 2011, 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+int msm_show_resume_irq_mask;
+
+module_param_named(
+	debug_mask, msm_show_resume_irq_mask, int, S_IRUGO | S_IWUSR | S_IWGRP
+);
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index 11eebfe..c041db6 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -143,4 +143,12 @@
 	  Mailbox implementation for the Broadcom PDC ring manager,
 	  which provides access to various offload engines on Broadcom
 	  SoCs. Say Y here if you want to use the Broadcom PDC.
+
+config QTI_RPMH_MBOX
+	bool "TCS Mailbox for QTI RPMH Communication"
+	depends on ARCH_QCOM
+	help
+	  Support for communication with the hardened-RPM blocks in
+	  Qualcomm Technologies Inc (QTI) SoCs using TCS hardware mailbox.
+
 endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index ace6fed..0a01d79 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -29,3 +29,5 @@
 obj-$(CONFIG_HI6220_MBOX)	+= hi6220-mailbox.o
 
 obj-$(CONFIG_BCM_PDC_MBOX)	+= bcm-pdc-mailbox.o
+
+obj-$(CONFIG_QTI_RPMH_MBOX)	+= qti-tcs.o
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 4a36632..6c7f6c4 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -282,6 +282,44 @@
 EXPORT_SYMBOL_GPL(mbox_send_message);
 
 /**
+ * mbox_send_controller_data-	For client to submit a message to be
+ *				sent only to the controller.
+ * @chan: Mailbox channel assigned to this client.
+ * @mssg: Client specific message typecasted.
+ *
+ * For client to submit data to the controller. There is no ACK expected
+ * from the controller. This request is not buffered in the mailbox framework.
+ *
+ * Return: Non-negative integer for successful submission (non-blocking mode)
+ *	or transmission over chan (blocking mode).
+ *	Negative value denotes failure.
+ */
+int mbox_send_controller_data(struct mbox_chan *chan, void *mssg)
+{
+	unsigned long flags;
+	int err;
+
+	if (!chan || !chan->cl)
+		return -EINVAL;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	err = chan->mbox->ops->send_controller_data(chan, mssg);
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return err;
+}
+EXPORT_SYMBOL(mbox_send_controller_data);
+
+bool mbox_controller_is_idle(struct mbox_chan *chan)
+{
+	if (!chan || !chan->cl || !chan->mbox->is_idle)
+		return false;
+
+	return chan->mbox->is_idle(chan->mbox);
+}
+EXPORT_SYMBOL(mbox_controller_is_idle);
+
+/**
  * mbox_request_channel - Request a mailbox channel.
  * @cl: Identity of the client requesting the channel.
  * @index: Index of mailbox specifier in 'mboxes' property.
diff --git a/drivers/mailbox/qti-tcs.c b/drivers/mailbox/qti-tcs.c
new file mode 100644
index 0000000..5cd481a
--- /dev/null
+++ b/drivers/mailbox/qti-tcs.c
@@ -0,0 +1,1107 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__
+
+#include <linux/bitmap.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h> /* For dev_err */
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <asm-generic/io.h>
+
+#include <soc/qcom/tcs.h>
+
+#include <dt-bindings/soc/qcom,tcs-mbox.h>
+
+#include "mailbox.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/rpmh.h>
+
+#define MAX_CMDS_PER_TCS		16
+#define MAX_TCS_PER_TYPE		3
+#define MAX_TCS_SLOTS			(MAX_CMDS_PER_TCS * MAX_TCS_PER_TYPE)
+
+#define TCS_DRV_TCS_OFFSET		672
+#define TCS_DRV_CMD_OFFSET		20
+
+/* DRV Configuration Information Register */
+#define DRV_PRNT_CHLD_CONFIG		0x0C
+#define DRV_NUM_TCS_MASK		0x3F
+#define DRV_NUM_TCS_SHIFT		6
+#define DRV_NCPT_MASK			0x1F
+#define DRV_NCPT_SHIFT			27
+
+/* Register offsets */
+#define TCS_DRV_IRQ_ENABLE		0x00
+#define TCS_DRV_IRQ_STATUS		0x04
+#define TCS_DRV_IRQ_CLEAR		0x08
+#define TCS_DRV_CMD_WAIT_FOR_CMPL	0x10
+#define TCS_DRV_CONTROL			0x14
+#define TCS_DRV_STATUS			0x18
+#define TCS_DRV_CMD_ENABLE		0x1C
+#define TCS_DRV_CMD_MSGID		0x30
+#define TCS_DRV_CMD_ADDR		0x34
+#define TCS_DRV_CMD_DATA		0x38
+#define TCS_DRV_CMD_STATUS		0x3C
+#define TCS_DRV_CMD_RESP_DATA		0x40
+
+#define TCS_AMC_MODE_ENABLE		BIT(16)
+#define TCS_AMC_MODE_TRIGGER		BIT(24)
+
+/* TCS CMD register bit mask */
+#define CMD_MSGID_LEN			8
+#define CMD_MSGID_RESP_REQ		BIT(8)
+#define CMD_MSGID_WRITE			BIT(16)
+#define CMD_STATUS_ISSUED		BIT(8)
+#define CMD_STATUS_COMPL		BIT(16)
+
+/* Control/Hidden TCS */
+#define TCS_HIDDEN_MAX_SLOTS		3
+#define TCS_HIDDEN_CMD0_DRV_ADDR	0x34
+#define TCS_HIDDEN_CMD0_DRV_DATA	0x38
+#define TCS_HIDDEN_CMD_SHIFT		0x08
+
+#define TCS_TYPE_NR			4
+#define TCS_MBOX_TOUT_MS		2000
+#define MAX_POOL_SIZE			(MAX_TCS_PER_TYPE * TCS_TYPE_NR)
+
+struct tcs_drv;
+
+struct tcs_response {
+	struct tcs_drv *drv;
+	struct mbox_chan *chan;
+	struct tcs_mbox_msg *msg;
+	u32 m; /* m-th TCS */
+	struct tasklet_struct tasklet;
+	struct delayed_work dwork;
+	int err;
+};
+
+struct tcs_response_pool {
+	struct tcs_response *resp;
+	spinlock_t lock;
+	DECLARE_BITMAP(avail, MAX_POOL_SIZE);
+};
+
+/* One per TCS type of a controller */
+struct tcs_mbox {
+	struct tcs_drv *drv;
+	u32 *cmd_addr;
+	int type;
+	u32 tcs_mask;
+	u32 tcs_offset;
+	int num_tcs;
+	int ncpt; /* num cmds per tcs */
+	DECLARE_BITMAP(slots, MAX_TCS_SLOTS);
+	spinlock_t tcs_lock; /* TCS type lock */
+	spinlock_t tcs_m_lock[MAX_TCS_PER_TYPE];
+	struct tcs_response *resp[MAX_TCS_PER_TYPE];
+};
+
+/* One per MBOX controller */
+struct tcs_drv {
+	void *base; /* start address of the RSC's registers */
+	void *reg_base; /* start address for DRV specific register */
+	int drv_id;
+	struct platform_device *pdev;
+	struct mbox_controller mbox;
+	struct tcs_mbox tcs[TCS_TYPE_NR];
+	int num_assigned;
+	int num_tcs;
+	struct workqueue_struct *wq;
+	struct tcs_response_pool *resp_pool;
+};
+
+static void tcs_notify_tx_done(unsigned long data);
+static void tcs_notify_timeout(struct work_struct *work);
+
+static int tcs_response_pool_init(struct tcs_drv *drv)
+{
+	struct tcs_response_pool *pool;
+	int i;
+
+	pool = devm_kzalloc(&drv->pdev->dev, sizeof(*pool), GFP_KERNEL);
+	if (!pool)
+		return -ENOMEM;
+
+	pool->resp = devm_kzalloc(&drv->pdev->dev, sizeof(*pool->resp) *
+				MAX_POOL_SIZE, GFP_KERNEL);
+	if (!pool->resp)
+		return -ENOMEM;
+
+	for (i = 0; i < MAX_POOL_SIZE; i++) {
+		tasklet_init(&pool->resp[i].tasklet, tcs_notify_tx_done,
+						(unsigned long) &pool->resp[i]);
+		INIT_DELAYED_WORK(&pool->resp[i].dwork,
+						tcs_notify_timeout);
+	}
+
+	spin_lock_init(&pool->lock);
+	drv->resp_pool = pool;
+
+	return 0;
+}
+
+static struct tcs_response *get_response_from_pool(struct tcs_drv *drv)
+{
+	struct tcs_response_pool *pool = drv->resp_pool;
+	struct tcs_response *resp = ERR_PTR(-ENOMEM);
+	unsigned long flags;
+	int pos;
+
+	spin_lock_irqsave(&pool->lock, flags);
+	pos = find_first_zero_bit(pool->avail, MAX_POOL_SIZE);
+	if (pos != MAX_POOL_SIZE) {
+		bitmap_set(pool->avail, pos, 1);
+		resp = &pool->resp[pos];
+		memset(resp, 0, sizeof(*resp));
+		tasklet_init(&resp->tasklet, tcs_notify_tx_done,
+						(unsigned long) resp);
+		INIT_DELAYED_WORK(&resp->dwork, tcs_notify_timeout);
+		resp->drv = drv;
+	}
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	return resp;
+}
+
+static void free_response_to_pool(struct tcs_response *resp)
+{
+	struct tcs_response_pool *pool = resp->drv->resp_pool;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&pool->lock, flags);
+	i = resp - pool->resp;
+	bitmap_clear(pool->avail, i, 1);
+	spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static inline u32 read_drv_config(void __iomem *base)
+{
+	return le32_to_cpu(readl_relaxed(base + DRV_PRNT_CHLD_CONFIG));
+}
+
+static inline u32 read_tcs_reg(void __iomem *base, int reg, int m, int n)
+{
+	return le32_to_cpu(readl_relaxed(base + reg +
+			TCS_DRV_TCS_OFFSET * m + TCS_DRV_CMD_OFFSET * n));
+}
+
+static inline void write_tcs_reg(void __iomem *base, int reg, int m, int n,
+				u32 data)
+{
+	writel_relaxed(cpu_to_le32(data), base + reg +
+			TCS_DRV_TCS_OFFSET * m + TCS_DRV_CMD_OFFSET * n);
+}
+
+static inline void write_tcs_reg_sync(void __iomem *base, int reg, int m, int n,
+				u32 data)
+{
+	do {
+		write_tcs_reg(base, reg, m, n, data);
+		if (data == read_tcs_reg(base, reg, m, n))
+			break;
+		cpu_relax();
+	} while (1);
+}
+
+static inline bool tcs_is_free(void __iomem *base, int m)
+{
+	return read_tcs_reg(base, TCS_DRV_STATUS, m, 0);
+}
+
+static inline struct tcs_mbox *get_tcs_from_index(struct tcs_drv *drv, int m)
+{
+	struct tcs_mbox *tcs;
+	int i;
+
+	for (i = 0; i < TCS_TYPE_NR; i++) {
+		tcs = &drv->tcs[i];
+		if (tcs->tcs_mask & BIT(m))
+			break;
+	}
+
+	if (i == TCS_TYPE_NR)
+		tcs = NULL;
+
+	return tcs;
+}
+
+static inline struct tcs_mbox *get_tcs_of_type(struct tcs_drv *drv, int type)
+{
+	int i;
+	struct tcs_mbox *tcs;
+
+	for (i = 0; i < TCS_TYPE_NR; i++)
+		if (type == drv->tcs[i].type)
+			break;
+
+	if (i == TCS_TYPE_NR)
+		return ERR_PTR(-EINVAL);
+
+	tcs = &drv->tcs[i];
+	if (!tcs->num_tcs)
+		return ERR_PTR(-EINVAL);
+
+	return tcs;
+}
+
+static inline struct tcs_mbox *get_tcs_for_msg(struct tcs_drv *drv,
+						struct tcs_mbox_msg *msg)
+{
+	int type = -1;
+
+	/* Which box are we dropping this in and do we trigger the TCS */
+	switch (msg->state) {
+	case RPMH_SLEEP_STATE:
+		type = SLEEP_TCS;
+		break;
+	case RPMH_WAKE_ONLY_STATE:
+		type = WAKE_TCS;
+		break;
+	case RPMH_ACTIVE_ONLY_STATE:
+		type = ACTIVE_TCS;
+		break;
+	case RPMH_AWAKE_STATE:
+		/*
+		 * Awake state is only used when the DRV has no separate
+		 * TCS for ACTIVE requests. Switch to WAKE TCS to send
+		 * active votes. Otherwise, the caller should be explicit
+		 * about the state.
+		 */
+		if (IS_ERR(get_tcs_of_type(drv, ACTIVE_TCS)))
+			type = WAKE_TCS;
+		break;
+	}
+
+	if (msg->is_read)
+		type = ACTIVE_TCS;
+
+	if (type < 0)
+		return ERR_PTR(-EINVAL);
+
+	return get_tcs_of_type(drv, type);
+}
+
+static inline struct tcs_response *get_tcs_response(struct tcs_drv *drv, int m)
+{
+	struct tcs_mbox *tcs = get_tcs_from_index(drv, m);
+
+	return tcs ? tcs->resp[m - tcs->tcs_offset] : NULL;
+}
+
+static inline void send_tcs_response(struct tcs_response *resp)
+{
+	tasklet_schedule(&resp->tasklet);
+}
+
+static inline void schedule_tcs_err_response(struct tcs_response *resp)
+{
+	schedule_delayed_work(&resp->dwork, msecs_to_jiffies(TCS_MBOX_TOUT_MS));
+}
+
+/**
+ * tcs_irq_handler: TX Done / Recv data handler
+ */
+static irqreturn_t tcs_irq_handler(int irq, void *p)
+{
+	struct tcs_drv *drv = p;
+	void __iomem *base = drv->reg_base;
+	int m, i;
+	u32 irq_status, sts;
+	struct tcs_mbox *tcs;
+	struct tcs_response *resp;
+	u32 irq_clear = 0;
+	u32 data;
+
+	/* Know which TCSes were triggered */
+	irq_status = read_tcs_reg(base, TCS_DRV_IRQ_STATUS, 0, 0);
+
+	for (m = 0; irq_status >= BIT(m); m++) {
+		if (!(irq_status & BIT(m)))
+			continue;
+
+		/* Find the TCS that triggered */
+		resp = get_tcs_response(drv, m);
+		if (!resp) {
+			pr_err("No resp request for TCS-%d\n", m);
+			continue;
+		}
+
+		cancel_delayed_work(&resp->dwork);
+
+		/* Clear the AMC mode for non-ACTIVE TCSes */
+		tcs = get_tcs_from_index(drv, m);
+		if (!tcs) {
+			pr_err("TCS-%d doesn't exist in DRV\n", m);
+			continue;
+		}
+		if (tcs->type != ACTIVE_TCS) {
+			data = read_tcs_reg(base, TCS_DRV_CONTROL, m, 0);
+			data &= ~TCS_AMC_MODE_ENABLE;
+			write_tcs_reg(base, TCS_DRV_CONTROL, m, 0, data);
+		} else {
+			/* Clear the enable bit for the commands */
+			write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
+		}
+
+		/* Check if all commands were completed */
+		resp->err = 0;
+		for (i = 0; i < resp->msg->num_payload; i++) {
+			sts = read_tcs_reg(base, TCS_DRV_CMD_STATUS, m, i);
+			if (!(sts & CMD_STATUS_ISSUED) ||
+				(resp->msg->is_complete &&
+					!(sts & CMD_STATUS_COMPL)))
+				resp->err = -EIO;
+		}
+
+		/* Check for response if this was a read request */
+		if (resp->msg->is_read) {
+			/* Respond the data back in the same req data */
+			data = read_tcs_reg(base, TCS_DRV_CMD_RESP_DATA, m, 0);
+			resp->msg->payload[0].data = data;
+			mbox_chan_received_data(resp->chan, resp->msg);
+		}
+
+		trace_rpmh_notify_irq(m, resp->msg->payload[0].addr, resp->err);
+
+		/* Notify the client that this request is completed. */
+		send_tcs_response(resp);
+		irq_clear |= BIT(m);
+	}
+
+	/* Clear the TCS IRQ status */
+	write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, irq_clear);
+
+	return IRQ_HANDLED;
+}
+
+static inline void mbox_notify_tx_done(struct mbox_chan *chan,
+				struct tcs_mbox_msg *msg, int m, int err)
+{
+	trace_rpmh_notify(m, msg->payload[0].addr, err);
+	mbox_chan_txdone(chan, err);
+}
+
+/**
+ * tcs_notify_tx_done: TX Done for requests that do not trigger TCS
+ */
+static void tcs_notify_tx_done(unsigned long data)
+{
+	struct tcs_response *resp = (struct tcs_response *) data;
+	struct mbox_chan *chan = resp->chan;
+	struct tcs_mbox_msg *msg = resp->msg;
+	int err = resp->err;
+	int m = resp->m;
+
+	free_response_to_pool(resp);
+	mbox_notify_tx_done(chan, msg, m, err);
+}
+
+/**
+ * tcs_notify_timeout: TX Done for requests that do trigger TCS, but
+ * we do not get a response IRQ back.
+ */
+static void tcs_notify_timeout(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct tcs_response *resp = container_of(dwork,
+					struct tcs_response, dwork);
+	struct mbox_chan *chan = resp->chan;
+	struct tcs_mbox_msg *msg = resp->msg;
+	struct tcs_drv *drv = resp->drv;
+	int m = resp->m;
+	int err = -EIO;
+
+	/*
+	 * In case the RPMH resource fails to respond to the completion
+	 * request, the TCS would be blocked forever waiting on the response.
+	 * There is no way to recover from this case.
+	 */
+	if (!tcs_is_free(drv->reg_base, m)) {
+		bool pending = false;
+		struct tcs_cmd *cmd;
+		int i;
+		u32 addr;
+
+		for (i = 0; i < msg->num_payload; i++) {
+			cmd = &msg->payload[i];
+			addr = read_tcs_reg(drv->reg_base, TCS_DRV_CMD_ADDR,
+						m, i);
+			pending = (cmd->addr == addr);
+		}
+		if (pending) {
+			pr_err("TCS-%d blocked waiting for RPMH to respond.\n",
+				m);
+			for (i = 0; i < msg->num_payload; i++)
+				pr_err("Addr: 0x%x Data: 0x%x\n",
+						msg->payload[i].addr,
+						msg->payload[i].data);
+			BUG();
+		}
+	}
+
+	free_response_to_pool(resp);
+	mbox_notify_tx_done(chan, msg, -1, err);
+}
+
+static void __tcs_buffer_write(void __iomem *base, int d, int m, int n,
+			struct tcs_mbox_msg *msg, bool trigger)
+{
+	u32 cmd_msgid = 0;
+	u32 cmd_enable = 0;
+	u32 cmd_complete;
+	u32 enable = TCS_AMC_MODE_ENABLE;
+	struct tcs_cmd *cmd;
+	int i;
+
+	/* We have homologous command set i.e pure read or write, not a mix */
+	cmd_msgid = CMD_MSGID_LEN;
+	cmd_msgid |= (msg->is_complete) ? CMD_MSGID_RESP_REQ : 0;
+	cmd_msgid |= (!msg->is_read) ? CMD_MSGID_WRITE : 0;
+
+	/* Read the send-after-prev complete flag for those already in TCS */
+	cmd_complete = read_tcs_reg(base, TCS_DRV_CMD_WAIT_FOR_CMPL, m, 0);
+
+	for (i = 0; i < msg->num_payload; i++) {
+		cmd = &msg->payload[i];
+		cmd_enable |= BIT(n + i);
+		cmd_complete |= cmd->complete << (n + i);
+		write_tcs_reg(base, TCS_DRV_CMD_MSGID, m, n + i, cmd_msgid);
+		write_tcs_reg(base, TCS_DRV_CMD_ADDR, m, n + i, cmd->addr);
+		write_tcs_reg(base, TCS_DRV_CMD_DATA, m, n + i, cmd->data);
+		trace_rpmh_send_msg(base, m, n + i,
+				cmd_msgid, cmd->addr, cmd->data, cmd->complete);
+	}
+
+	/* Write the send-after-prev completion bits for the batch */
+	write_tcs_reg(base, TCS_DRV_CMD_WAIT_FOR_CMPL, m, 0, cmd_complete);
+
+	/* Enable the new commands in TCS */
+	cmd_enable |= read_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0);
+	write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, cmd_enable);
+
+	if (trigger) {
+		/* Clear pending interrupt bits for this TCS, OK to not lock */
+		write_tcs_reg(base, TCS_DRV_IRQ_CLEAR, 0, 0, BIT(m));
+		/* HW req: Clear the DRV_CONTROL and enable TCS again */
+		write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, 0);
+		write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
+		/* Enable the AMC mode on the TCS */
+		enable |= TCS_AMC_MODE_TRIGGER;
+		write_tcs_reg_sync(base, TCS_DRV_CONTROL, m, 0, enable);
+	}
+}
+
+/**
+ * tcs_drv_is_idle: Check if any of the AMCs are busy.
+ *
+ * @mbox: The mailbox controller.
+ *
+ * Returns true if the AMCs are not engaged or absent.
+ */
+static bool tcs_drv_is_idle(struct mbox_controller *mbox)
+{
+	int m;
+	struct tcs_drv *drv = container_of(mbox, struct tcs_drv, mbox);
+	struct tcs_mbox *tcs = get_tcs_of_type(drv, ACTIVE_TCS);
+
+	/* Check for WAKE TCS if there are no ACTIVE TCS */
+	if (IS_ERR(tcs))
+		tcs = get_tcs_of_type(drv, WAKE_TCS);
+
+	for (m = tcs->tcs_offset; m < tcs->tcs_offset + tcs->num_tcs; m++)
+		if (!tcs_is_free(drv->reg_base, m))
+			return false;
+
+	return true;
+}
+
+static void wait_for_req_inflight(struct tcs_drv *drv, struct tcs_mbox *tcs,
+						struct tcs_mbox_msg *msg)
+{
+	u32 curr_enabled;
+	int i, j, k;
+	bool is_free;
+
+	do  {
+		is_free = true;
+		for (i = 1; i > tcs->tcs_mask; i = i << 1) {
+			if (!(tcs->tcs_mask & i))
+				continue;
+			if (tcs_is_free(drv->reg_base, i))
+				continue;
+			curr_enabled = read_tcs_reg(drv->reg_base,
+						TCS_DRV_CMD_ENABLE, i, 0);
+			for (j = 0; j < msg->num_payload; j++) {
+				for (k = 0; k < curr_enabled; k++) {
+					if (!(curr_enabled & BIT(k)))
+						continue;
+					if (tcs->cmd_addr[k] ==
+						msg->payload[j].addr) {
+						is_free = false;
+						goto retry;
+					}
+				}
+			}
+		}
+retry:
+		if (!is_free)
+			cpu_relax();
+	} while (!is_free);
+}
+
+static int find_free_tcs(struct tcs_mbox *tcs)
+{
+	int slot, m = 0;
+
+	/* Loop until we find a free AMC */
+	do {
+		if (tcs_is_free(tcs->drv->reg_base, tcs->tcs_offset + m)) {
+			slot = m * tcs->ncpt;
+			break;
+		}
+		if (++m > tcs->num_tcs)
+			m = 0;
+		cpu_relax();
+	} while (1);
+
+	return slot;
+}
+
+static int find_match(struct tcs_mbox *tcs, struct tcs_cmd *cmd, int len)
+{
+	bool found = false;
+	int i = 0, j;
+
+	/* Check for already cached commands */
+	while ((i = find_next_bit(tcs->slots, MAX_TCS_SLOTS, i)) <
+			MAX_TCS_SLOTS) {
+		if (tcs->cmd_addr[i] != cmd[0].addr) {
+			i++;
+			continue;
+		}
+		/* sanity check to ensure the seq is same */
+		for (j = 1; j < len; j++) {
+			WARN((tcs->cmd_addr[i + j] != cmd[j].addr),
+				"Message does not match previous sequence.\n");
+				return -EINVAL;
+		}
+		found = true;
+		break;
+	}
+
+	return found ? i : -1;
+}
+
+static int find_slots(struct tcs_mbox *tcs, struct tcs_mbox_msg *msg)
+{
+	int slot;
+	int n = 0;
+
+	/* For active requests find the first free AMC. */
+	if (tcs->type == ACTIVE_TCS)
+		return find_free_tcs(tcs);
+
+	/* Find if we already have the msg in our TCS */
+	slot = find_match(tcs, msg->payload, msg->num_payload);
+	if (slot >= 0)
+		return slot;
+
+	/* Do over, until we can fit the full payload in a TCS */
+	do {
+		slot = bitmap_find_next_zero_area(tcs->slots, MAX_TCS_SLOTS,
+						n, msg->num_payload, 0);
+		if (slot == MAX_TCS_SLOTS)
+			break;
+		n += tcs->ncpt;
+	} while (slot + msg->num_payload - 1 >= n);
+
+	return (slot != MAX_TCS_SLOTS) ? slot : -ENOMEM;
+}
+
+static struct tcs_response *setup_response(struct tcs_mbox *tcs,
+		struct mbox_chan *chan, struct tcs_mbox_msg *msg, int m)
+{
+	struct tcs_response *resp = get_response_from_pool(tcs->drv);
+
+	if (IS_ERR(resp))
+		return resp;
+
+	if (m < tcs->tcs_offset)
+		return ERR_PTR(-EINVAL);
+
+	tcs->resp[m - tcs->tcs_offset] = resp;
+	resp->msg = msg;
+	resp->chan = chan;
+	resp->m = m;
+	resp->err = 0;
+
+	return resp;
+}
+
+static int tcs_mbox_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg,
+				bool trigger)
+{
+	const struct device *dev = chan->cl->dev;
+	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
+	int d = drv->drv_id;
+	struct tcs_mbox *tcs;
+	int i, slot, offset, m, n;
+	struct tcs_response *resp;
+
+	tcs = get_tcs_for_msg(drv, msg);
+	if (IS_ERR(tcs))
+		return PTR_ERR(tcs);
+
+	/* Identify the sequential slots that we can write to */
+	spin_lock(&tcs->tcs_lock);
+	slot = find_slots(tcs, msg);
+	if (slot < 0) {
+		dev_err(dev, "No TCS slot found.\n");
+		spin_unlock(&tcs->tcs_lock);
+		return slot;
+	}
+	/* Mark the slots as in-use, before we unlock */
+	if (tcs->type == SLEEP_TCS || tcs->type == WAKE_TCS)
+		bitmap_set(tcs->slots, slot, msg->num_payload);
+
+	/* Copy the addresses of the resources over to the slots */
+	for (i = 0; tcs->cmd_addr && i < msg->num_payload; i++)
+		tcs->cmd_addr[slot + i] = msg->payload[i].addr;
+
+	if (trigger)
+		resp = setup_response(tcs, chan, msg,
+				slot / tcs->ncpt + tcs->tcs_offset);
+
+	spin_unlock(&tcs->tcs_lock);
+
+	/*
+	 * Find the TCS corresponding to the slot and start writing.
+	 * Break down 'slot' into a 'n' position in the 'm'th TCS.
+	 */
+	offset = slot / tcs->ncpt;
+	m = offset + tcs->tcs_offset;
+	n = slot % tcs->ncpt;
+
+	spin_lock(&tcs->tcs_m_lock[offset]);
+	if (trigger) {
+		/* Block, if we have an address from the msg in flight */
+		wait_for_req_inflight(drv, tcs, msg);
+		/* If the TCS is busy there is nothing to do but spin wait */
+		while (!tcs_is_free(drv->reg_base, m))
+			cpu_relax();
+	}
+
+	/* Write to the TCS or AMC */
+	__tcs_buffer_write(drv->reg_base, d, m, n, msg, trigger);
+
+	/* Schedule a timeout response, incase there is no actual response */
+	if (trigger)
+		schedule_tcs_err_response(resp);
+
+	spin_unlock(&tcs->tcs_m_lock[offset]);
+
+	return 0;
+}
+
+/**
+ * chan_tcs_write: Validate the incoming message and write to the
+ * appropriate TCS block.
+ *
+ * @chan: the MBOX channel
+ * @data: the tcs_mbox_msg*
+ *
+ * Returns a negative error for invalid message structure and invalid
+ * message combination, -EBUSY if there is an other active request for
+ * the channel in process, otherwise bubbles up internal error.
+ */
+static int chan_tcs_write(struct mbox_chan *chan, void *data)
+{
+	struct tcs_mbox_msg *msg = data;
+	const struct device *dev = chan->cl->dev;
+	int ret = -EINVAL;
+
+	if (!msg) {
+		dev_err(dev, "Payload error.\n");
+		goto tx_fail;
+	}
+
+	if (!msg->payload || msg->num_payload > MAX_RPMH_PAYLOAD) {
+		dev_err(dev, "Payload error.\n");
+		goto tx_fail;
+	}
+
+	if (msg->invalidate || msg->is_control) {
+		dev_err(dev, "Incorrect API.\n");
+		goto tx_fail;
+	}
+
+	if (msg->state != RPMH_ACTIVE_ONLY_STATE &&
+			msg->state != RPMH_AWAKE_STATE) {
+		dev_err(dev, "Incorrect API.\n");
+		goto tx_fail;
+	}
+
+	/* Read requests should always be single */
+	if (msg->is_read && msg->num_payload > 1) {
+		dev_err(dev, "Incorrect read request.\n");
+		goto tx_fail;
+	}
+
+	/* Post the message to the TCS and trigger */
+	ret = tcs_mbox_write(chan, msg, true);
+
+tx_fail:
+	if (ret) {
+		struct tcs_drv *drv = container_of(chan->mbox,
+							struct tcs_drv, mbox);
+		struct tcs_response *resp = get_response_from_pool(drv);
+
+		resp->chan = chan;
+		resp->msg = msg;
+		resp->err = ret;
+
+		dev_err(dev, "Error sending RPMH message %d\n", ret);
+		send_tcs_response(resp);
+	}
+
+	return 0;
+}
+
+static void __tcs_buffer_invalidate(void __iomem *base, int m)
+{
+	write_tcs_reg(base, TCS_DRV_CMD_ENABLE, m, 0, 0);
+}
+
+static int tcs_mbox_invalidate(struct mbox_chan *chan)
+{
+	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
+	struct tcs_mbox *tcs;
+	int m, i;
+	int inv_types[] = { WAKE_TCS, SLEEP_TCS };
+	int type = 0;
+
+	do {
+		tcs = get_tcs_of_type(drv, inv_types[type]);
+		if (IS_ERR(tcs))
+			return PTR_ERR(tcs);
+
+		spin_lock(&tcs->tcs_lock);
+		for (i = 0; i < tcs->num_tcs; i++) {
+			m = i + tcs->tcs_offset;
+			spin_lock(&tcs->tcs_m_lock[i]);
+			while (!tcs_is_free(drv->reg_base, m))
+				cpu_relax();
+			__tcs_buffer_invalidate(drv->reg_base, m);
+			spin_unlock(&tcs->tcs_m_lock[i]);
+		}
+		/* Mark the TCS as free */
+		bitmap_zero(tcs->slots, MAX_TCS_SLOTS);
+		spin_unlock(&tcs->tcs_lock);
+	} while (++type < ARRAY_SIZE(inv_types));
+
+	return 0;
+}
+
+static void __tcs_write_hidden(void *base, int d, struct tcs_mbox_msg *msg)
+{
+	int i;
+	void __iomem *addr;
+	const u32 offset = TCS_HIDDEN_CMD0_DRV_DATA - TCS_HIDDEN_CMD0_DRV_ADDR;
+
+	addr = base + TCS_HIDDEN_CMD0_DRV_ADDR;
+	for (i = 0; i < msg->num_payload; i++) {
+		/* Only data is write capable */
+		writel_relaxed(cpu_to_le32(msg->payload[i].data),
+							addr + offset);
+		trace_rpmh_control_msg(addr + offset, msg->payload[i].data);
+		addr += TCS_HIDDEN_CMD_SHIFT;
+	}
+}
+
+static int tcs_control_write(struct mbox_chan *chan, struct tcs_mbox_msg *msg)
+{
+	const struct device *dev = chan->cl->dev;
+	struct tcs_drv *drv = container_of(chan->mbox, struct tcs_drv, mbox);
+	struct tcs_mbox *tcs;
+
+	tcs = get_tcs_of_type(drv, CONTROL_TCS);
+	if (IS_ERR(tcs))
+		return PTR_ERR(tcs);
+
+	if (msg->num_payload != tcs->ncpt) {
+		dev_err(dev, "Request must fit the control TCS size.\n");
+		return -EINVAL;
+	}
+
+	spin_lock(&tcs->tcs_lock);
+	__tcs_write_hidden(tcs->drv->base, drv->drv_id, msg);
+	spin_unlock(&tcs->tcs_lock);
+
+	return 0;
+}
+
+/**
+ * chan_tcs_ctrl_write: Write message to the controller, no ACK sent.
+ *
+ * @chan: the MBOX channel
+ * @data: the tcs_mbox_msg*
+ */
+static int chan_tcs_ctrl_write(struct mbox_chan *chan, void *data)
+{
+	struct tcs_mbox_msg *msg = data;
+	const struct device *dev = chan->cl->dev;
+	int ret = -EINVAL;
+
+	if (!msg) {
+		dev_err(dev, "Payload error.\n");
+		goto tx_done;
+	}
+
+	if (msg->num_payload > MAX_RPMH_PAYLOAD) {
+		dev_err(dev, "Payload error.\n");
+		goto tx_done;
+	}
+
+	/* Invalidate sleep/wake TCS */
+	if (msg->invalidate) {
+		ret = tcs_mbox_invalidate(chan);
+		goto tx_done;
+	}
+
+	/* Control slots are unique. They carry specific data. */
+	if (msg->is_control) {
+		ret = tcs_control_write(chan, msg);
+		goto tx_done;
+	}
+
+	if (msg->is_complete) {
+		dev_err(dev, "Incorrect ctrl request.\n");
+		goto tx_done;
+	}
+
+	/* Post the message to the TCS without trigger */
+	ret = tcs_mbox_write(chan, msg, false);
+
+tx_done:
+	return ret;
+}
+
+static int chan_init(struct mbox_chan *chan)
+{
+	return 0;
+}
+
+static void chan_shutdown(struct mbox_chan *chan)
+{ }
+
+static const struct mbox_chan_ops mbox_ops = {
+	.send_data = chan_tcs_write,
+	.send_controller_data = chan_tcs_ctrl_write,
+	.startup = chan_init,
+	.shutdown = chan_shutdown,
+};
+
+static struct mbox_chan *of_tcs_mbox_xlate(struct mbox_controller *mbox,
+				const struct of_phandle_args *sp)
+{
+	struct tcs_drv *drv = container_of(mbox, struct tcs_drv, mbox);
+	struct mbox_chan *chan;
+
+	if (drv->num_assigned >= mbox->num_chans) {
+		pr_err("TCS-Mbox out of channel memory\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	chan = &mbox->chans[drv->num_assigned++];
+
+	return chan;
+}
+
+static int tcs_drv_probe(struct platform_device *pdev)
+{
+	struct device_node *dn = pdev->dev.of_node;
+	struct device_node *np;
+	struct tcs_drv *drv;
+	struct mbox_chan *chans;
+	struct tcs_mbox *tcs;
+	struct of_phandle_args p;
+	int irq;
+	u32 val[8] = { 0 };
+	int num_chans = 0;
+	int st = 0;
+	int i, j, ret, nelem;
+	u32 config, max_tcs, ncpt;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+
+	of_property_read_u32(dn, "qcom,drv-id", &drv->drv_id);
+
+	drv->base = of_iomap(dn, 0);
+	if (IS_ERR(drv->base))
+		return PTR_ERR(drv->base);
+
+	drv->reg_base = of_iomap(dn, 1);
+	if (IS_ERR(drv->reg_base))
+		return PTR_ERR(drv->reg_base);
+
+	config = read_drv_config(drv->base);
+	max_tcs = config & (DRV_NUM_TCS_MASK <<
+				(DRV_NUM_TCS_SHIFT * drv->drv_id));
+	max_tcs = max_tcs >> (DRV_NUM_TCS_SHIFT * drv->drv_id);
+	ncpt = config & (DRV_NCPT_MASK << DRV_NCPT_SHIFT);
+	ncpt = ncpt >> DRV_NCPT_SHIFT;
+
+	nelem = of_property_count_elems_of_size(dn, "qcom,tcs-config",
+						sizeof(u32));
+	if (!nelem || (nelem % 2) || (nelem > 2 * TCS_TYPE_NR))
+		return -EINVAL;
+
+	ret = of_property_read_u32_array(dn, "qcom,tcs-config", val, nelem);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < (nelem / 2); i++) {
+		tcs = &drv->tcs[i];
+		tcs->drv = drv;
+		tcs->type = val[2 * i];
+		tcs->num_tcs = val[2 * i + 1];
+		tcs->ncpt = (tcs->type == CONTROL_TCS) ? TCS_HIDDEN_MAX_SLOTS
+							: ncpt;
+		spin_lock_init(&tcs->tcs_lock);
+
+		if (tcs->num_tcs <= 0 || tcs->type == CONTROL_TCS)
+			continue;
+
+		if (tcs->num_tcs > MAX_TCS_PER_TYPE)
+			return -EINVAL;
+
+		if (st > max_tcs)
+			return -EINVAL;
+
+		tcs->tcs_mask = ((1 << tcs->num_tcs) - 1) << st;
+		tcs->tcs_offset = st;
+		st += tcs->num_tcs;
+
+		tcs->cmd_addr = devm_kzalloc(&pdev->dev, sizeof(u32) *
+					tcs->num_tcs * tcs->ncpt, GFP_KERNEL);
+		if (!tcs->cmd_addr)
+			return -ENOMEM;
+
+		for (j = 0; j < tcs->num_tcs; j++)
+			spin_lock_init(&tcs->tcs_m_lock[j]);
+	}
+
+	/* Allocate only that many channels specified in DT for our MBOX */
+	for_each_node_with_property(np, "mboxes") {
+		if (!of_device_is_available(np))
+			continue;
+		i = of_count_phandle_with_args(np, "mboxes", "#mbox-cells");
+		for (j = 0; j < i; j++) {
+			ret = of_parse_phandle_with_args(np, "mboxes",
+							"#mbox-cells", j, &p);
+			if (!ret && p.np == pdev->dev.of_node)
+				break;
+		}
+		num_chans++;
+	}
+
+	if (!num_chans) {
+		pr_err("%s: No clients for controller (%s)\n", __func__,
+							dn->full_name);
+		return -ENODEV;
+	}
+
+	chans = devm_kzalloc(&pdev->dev, num_chans * sizeof(*chans),
+				GFP_KERNEL);
+	if (!chans)
+		return -ENOMEM;
+
+	for (i = 0; i < num_chans; i++) {
+		chans[i].mbox = &drv->mbox;
+		chans[i].txdone_method = TXDONE_BY_IRQ;
+	}
+
+	drv->mbox.dev = &pdev->dev;
+	drv->mbox.ops = &mbox_ops;
+	drv->mbox.chans = chans;
+	drv->mbox.num_chans = num_chans;
+	drv->mbox.txdone_irq = true;
+	drv->mbox.of_xlate = of_tcs_mbox_xlate;
+	drv->mbox.is_idle = tcs_drv_is_idle;
+	drv->num_tcs = st;
+	drv->pdev = pdev;
+
+	ret = tcs_response_pool_init(drv);
+	if (ret)
+		return ret;
+
+	irq = of_irq_get(dn, 0);
+	if (irq < 0)
+		return irq;
+
+	ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
+			tcs_irq_handler,
+			IRQF_ONESHOT | IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND,
+			"tcs_irq", drv);
+	if (ret)
+		return ret;
+
+	/* Enable interrupts for AMC TCS */
+	write_tcs_reg(drv->reg_base, TCS_DRV_IRQ_ENABLE, 0, 0,
+					drv->tcs[ACTIVE_TCS].tcs_mask);
+
+	ret = mbox_controller_register(&drv->mbox);
+	if (ret)
+		return ret;
+
+	pr_debug("Mailbox controller (%s, drv=%d) registered\n",
+					dn->full_name, drv->drv_id);
+
+	return 0;
+}
+
+static const struct of_device_id tcs_drv_match[] = {
+	{ .compatible = "qcom,tcs-drv", },
+	{ }
+};
+
+static struct platform_driver tcs_mbox_driver = {
+	.probe = tcs_drv_probe,
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = tcs_drv_match,
+	},
+};
+
+static int __init tcs_mbox_driver_init(void)
+{
+	return platform_driver_register(&tcs_mbox_driver);
+}
+arch_initcall(tcs_mbox_driver_init);
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 02a5345..f550da3 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -500,4 +500,21 @@
 
 	  If unsure, say N.
 
+config DM_ANDROID_VERITY
+	tristate "Android verity target support"
+	depends on DM_VERITY
+	depends on X509_CERTIFICATE_PARSER
+	depends on SYSTEM_TRUSTED_KEYRING
+	depends on PUBLIC_KEY_ALGO_RSA
+	depends on KEYS
+	depends on ASYMMETRIC_KEY_TYPE
+	depends on ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+	depends on MD_LINEAR
+	---help---
+	  This device-mapper target is virtually a VERITY target. This
+	  target is setup by reading the metadata contents piggybacked
+	  to the actual data blocks in the block device. The signature
+	  of the metadata contents are verified against the key included
+	  in the system keyring. Upon success, the underlying verity
+	  target is setup.
 endif # MD
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index 3cbda1a..f26ce41 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -59,6 +59,7 @@
 obj-$(CONFIG_DM_CACHE_CLEANER)	+= dm-cache-cleaner.o
 obj-$(CONFIG_DM_ERA)		+= dm-era.o
 obj-$(CONFIG_DM_LOG_WRITES)	+= dm-log-writes.o
+obj-$(CONFIG_DM_ANDROID_VERITY) += dm-android-verity.o
 
 ifeq ($(CONFIG_DM_UEVENT),y)
 dm-mod-objs			+= dm-uevent.o
diff --git a/drivers/md/dm-android-verity.c b/drivers/md/dm-android-verity.c
new file mode 100644
index 0000000..bb6c128
--- /dev/null
+++ b/drivers/md/dm-android-verity.c
@@ -0,0 +1,925 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/device-mapper.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/of.h>
+#include <linux/reboot.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include <asm/setup.h>
+#include <crypto/hash.h>
+#include <crypto/public_key.h>
+#include <crypto/sha.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+
+#include "dm-verity.h"
+#include "dm-android-verity.h"
+
+static char verifiedbootstate[VERITY_COMMANDLINE_PARAM_LENGTH];
+static char veritymode[VERITY_COMMANDLINE_PARAM_LENGTH];
+static char veritykeyid[VERITY_DEFAULT_KEY_ID_LENGTH];
+static char buildvariant[BUILD_VARIANT];
+
+static bool target_added;
+static bool verity_enabled = true;
+struct dentry *debug_dir;
+static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
+
+static struct target_type android_verity_target = {
+	.name                   = "android-verity",
+	.version                = {1, 0, 0},
+	.module                 = THIS_MODULE,
+	.ctr                    = android_verity_ctr,
+	.dtr                    = verity_dtr,
+	.map                    = verity_map,
+	.status                 = verity_status,
+	.prepare_ioctl          = verity_prepare_ioctl,
+	.iterate_devices        = verity_iterate_devices,
+	.io_hints               = verity_io_hints,
+};
+
+static int __init verified_boot_state_param(char *line)
+{
+	strlcpy(verifiedbootstate, line, sizeof(verifiedbootstate));
+	return 1;
+}
+
+__setup("androidboot.verifiedbootstate=", verified_boot_state_param);
+
+static int __init verity_mode_param(char *line)
+{
+	strlcpy(veritymode, line, sizeof(veritymode));
+	return 1;
+}
+
+__setup("androidboot.veritymode=", verity_mode_param);
+
+static int __init verity_keyid_param(char *line)
+{
+	strlcpy(veritykeyid, line, sizeof(veritykeyid));
+	return 1;
+}
+
+__setup("veritykeyid=", verity_keyid_param);
+
+static int __init verity_buildvariant(char *line)
+{
+	strlcpy(buildvariant, line, sizeof(buildvariant));
+	return 1;
+}
+
+__setup("buildvariant=", verity_buildvariant);
+
+static inline bool default_verity_key_id(void)
+{
+	return veritykeyid[0] != '\0';
+}
+
+static inline bool is_eng(void)
+{
+	static const char typeeng[]  = "eng";
+
+	return !strncmp(buildvariant, typeeng, sizeof(typeeng));
+}
+
+static inline bool is_userdebug(void)
+{
+	static const char typeuserdebug[]  = "userdebug";
+
+	return !strncmp(buildvariant, typeuserdebug, sizeof(typeuserdebug));
+}
+
+
+static int table_extract_mpi_array(struct public_key_signature *pks,
+				const void *data, size_t len)
+{
+	MPI mpi = mpi_read_raw_data(data, len);
+
+	if (!mpi) {
+		DMERR("Error while allocating mpi array");
+		return -ENOMEM;
+	}
+
+	pks->mpi[0] = mpi;
+	pks->nr_mpi = 1;
+	return 0;
+}
+
+static struct public_key_signature *table_make_digest(
+						enum hash_algo hash,
+						const void *table,
+						unsigned long table_len)
+{
+	struct public_key_signature *pks = NULL;
+	struct crypto_shash *tfm;
+	struct shash_desc *desc;
+	size_t digest_size, desc_size;
+	int ret;
+
+	/* Allocate the hashing algorithm we're going to need and find out how
+	 * big the hash operational data will be.
+	 */
+	tfm = crypto_alloc_shash(hash_algo_name[hash], 0, 0);
+	if (IS_ERR(tfm))
+		return ERR_CAST(tfm);
+
+	desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
+	digest_size = crypto_shash_digestsize(tfm);
+
+	/* We allocate the hash operational data storage on the end of out
+	 * context data and the digest output buffer on the end of that.
+	 */
+	ret = -ENOMEM;
+	pks = kzalloc(digest_size + sizeof(*pks) + desc_size, GFP_KERNEL);
+	if (!pks)
+		goto error;
+
+	pks->pkey_hash_algo = hash;
+	pks->digest = (u8 *)pks + sizeof(*pks) + desc_size;
+	pks->digest_size = digest_size;
+
+	desc = (struct shash_desc *)(pks + 1);
+	desc->tfm = tfm;
+	desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+	ret = crypto_shash_init(desc);
+	if (ret < 0)
+		goto error;
+
+	ret = crypto_shash_finup(desc, table, table_len, pks->digest);
+	if (ret < 0)
+		goto error;
+
+	crypto_free_shash(tfm);
+	return pks;
+
+error:
+	kfree(pks);
+	crypto_free_shash(tfm);
+	return ERR_PTR(ret);
+}
+
+static int read_block_dev(struct bio_read *payload, struct block_device *bdev,
+		sector_t offset, int length)
+{
+	struct bio *bio;
+	int err = 0, i;
+
+	payload->number_of_pages = DIV_ROUND_UP(length, PAGE_SIZE);
+
+	bio = bio_alloc(GFP_KERNEL, payload->number_of_pages);
+	if (!bio) {
+		DMERR("Error while allocating bio");
+		return -ENOMEM;
+	}
+
+	bio->bi_bdev = bdev;
+	bio->bi_iter.bi_sector = offset;
+
+	payload->page_io = kzalloc(sizeof(struct page *) *
+		payload->number_of_pages, GFP_KERNEL);
+	if (!payload->page_io) {
+		DMERR("page_io array alloc failed");
+		err = -ENOMEM;
+		goto free_bio;
+	}
+
+	for (i = 0; i < payload->number_of_pages; i++) {
+		payload->page_io[i] = alloc_page(GFP_KERNEL);
+		if (!payload->page_io[i]) {
+			DMERR("alloc_page failed");
+			err = -ENOMEM;
+			goto free_pages;
+		}
+		if (!bio_add_page(bio, payload->page_io[i], PAGE_SIZE, 0)) {
+			DMERR("bio_add_page error");
+			err = -EIO;
+			goto free_pages;
+		}
+	}
+
+	if (!submit_bio_wait(READ, bio))
+		/* success */
+		goto free_bio;
+	DMERR("bio read failed");
+	err = -EIO;
+
+free_pages:
+	for (i = 0; i < payload->number_of_pages; i++)
+		if (payload->page_io[i])
+			__free_page(payload->page_io[i]);
+	kfree(payload->page_io);
+free_bio:
+	bio_put(bio);
+	return err;
+}
+
+static inline u64 fec_div_round_up(u64 x, u64 y)
+{
+	u64 remainder;
+
+	return div64_u64_rem(x, y, &remainder) +
+		(remainder > 0 ? 1 : 0);
+}
+
+static inline void populate_fec_metadata(struct fec_header *header,
+				struct fec_ecc_metadata *ecc)
+{
+	ecc->blocks = fec_div_round_up(le64_to_cpu(header->inp_size),
+			FEC_BLOCK_SIZE);
+	ecc->roots = le32_to_cpu(header->roots);
+	ecc->start = le64_to_cpu(header->inp_size);
+}
+
+static inline int validate_fec_header(struct fec_header *header, u64 offset)
+{
+	/* move offset to make the sanity check work for backup header
+	 * as well. */
+	offset -= offset % FEC_BLOCK_SIZE;
+	if (le32_to_cpu(header->magic) != FEC_MAGIC ||
+		le32_to_cpu(header->version) != FEC_VERSION ||
+		le32_to_cpu(header->size) != sizeof(struct fec_header) ||
+		le32_to_cpu(header->roots) == 0 ||
+		le32_to_cpu(header->roots) >= FEC_RSM)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int extract_fec_header(dev_t dev, struct fec_header *fec,
+				struct fec_ecc_metadata *ecc)
+{
+	u64 device_size;
+	struct bio_read payload;
+	int i, err = 0;
+	struct block_device *bdev;
+
+	bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+
+	if (IS_ERR_OR_NULL(bdev)) {
+		DMERR("bdev get error");
+		return PTR_ERR(bdev);
+	}
+
+	device_size = i_size_read(bdev->bd_inode);
+
+	/* fec metadata size is a power of 2 and PAGE_SIZE
+	 * is a power of 2 as well.
+	 */
+	BUG_ON(FEC_BLOCK_SIZE > PAGE_SIZE);
+	/* 512 byte sector alignment */
+	BUG_ON(((device_size - FEC_BLOCK_SIZE) % (1 << SECTOR_SHIFT)) != 0);
+
+	err = read_block_dev(&payload, bdev, (device_size -
+		FEC_BLOCK_SIZE) / (1 << SECTOR_SHIFT), FEC_BLOCK_SIZE);
+	if (err) {
+		DMERR("Error while reading verity metadata");
+		goto error;
+	}
+
+	BUG_ON(sizeof(struct fec_header) > PAGE_SIZE);
+	memcpy(fec, page_address(payload.page_io[0]),
+			sizeof(*fec));
+
+	ecc->valid = true;
+	if (validate_fec_header(fec, device_size - FEC_BLOCK_SIZE)) {
+		/* Try the backup header */
+		memcpy(fec, page_address(payload.page_io[0]) + FEC_BLOCK_SIZE
+			- sizeof(*fec) ,
+			sizeof(*fec));
+		if (validate_fec_header(fec, device_size -
+			sizeof(struct fec_header)))
+			ecc->valid = false;
+	}
+
+	if (ecc->valid)
+		populate_fec_metadata(fec, ecc);
+
+	for (i = 0; i < payload.number_of_pages; i++)
+		__free_page(payload.page_io[i]);
+	kfree(payload.page_io);
+
+error:
+	blkdev_put(bdev, FMODE_READ);
+	return err;
+}
+static void find_metadata_offset(struct fec_header *fec,
+		struct block_device *bdev, u64 *metadata_offset)
+{
+	u64 device_size;
+
+	device_size = i_size_read(bdev->bd_inode);
+
+	if (le32_to_cpu(fec->magic) == FEC_MAGIC)
+		*metadata_offset = le64_to_cpu(fec->inp_size) -
+					VERITY_METADATA_SIZE;
+	else
+		*metadata_offset = device_size - VERITY_METADATA_SIZE;
+}
+
+static int find_size(dev_t dev, u64 *device_size)
+{
+	struct block_device *bdev;
+
+	bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+	if (IS_ERR_OR_NULL(bdev)) {
+		DMERR("blkdev_get_by_dev failed");
+		return PTR_ERR(bdev);
+	}
+
+	*device_size = i_size_read(bdev->bd_inode);
+	*device_size >>= SECTOR_SHIFT;
+
+	DMINFO("blkdev size in sectors: %llu", *device_size);
+	blkdev_put(bdev, FMODE_READ);
+	return 0;
+}
+
+static int verify_header(struct android_metadata_header *header)
+{
+	int retval = -EINVAL;
+
+	if (is_userdebug() && le32_to_cpu(header->magic_number) ==
+			VERITY_METADATA_MAGIC_DISABLE)
+		return VERITY_STATE_DISABLE;
+
+	if (!(le32_to_cpu(header->magic_number) ==
+			VERITY_METADATA_MAGIC_NUMBER) ||
+			(le32_to_cpu(header->magic_number) ==
+			VERITY_METADATA_MAGIC_DISABLE)) {
+		DMERR("Incorrect magic number");
+		return retval;
+	}
+
+	if (le32_to_cpu(header->protocol_version) !=
+			VERITY_METADATA_VERSION) {
+		DMERR("Unsupported version %u",
+			le32_to_cpu(header->protocol_version));
+		return retval;
+	}
+
+	return 0;
+}
+
+static int extract_metadata(dev_t dev, struct fec_header *fec,
+				struct android_metadata **metadata,
+				bool *verity_enabled)
+{
+	struct block_device *bdev;
+	struct android_metadata_header *header;
+	int i;
+	u32 table_length, copy_length, offset;
+	u64 metadata_offset;
+	struct bio_read payload;
+	int err = 0;
+
+	bdev = blkdev_get_by_dev(dev, FMODE_READ, NULL);
+
+	if (IS_ERR_OR_NULL(bdev)) {
+		DMERR("blkdev_get_by_dev failed");
+		return -ENODEV;
+	}
+
+	find_metadata_offset(fec, bdev, &metadata_offset);
+
+	/* Verity metadata size is a power of 2 and PAGE_SIZE
+	 * is a power of 2 as well.
+	 * PAGE_SIZE is also a multiple of 512 bytes.
+	*/
+	if (VERITY_METADATA_SIZE > PAGE_SIZE)
+		BUG_ON(VERITY_METADATA_SIZE % PAGE_SIZE != 0);
+	/* 512 byte sector alignment */
+	BUG_ON(metadata_offset % (1 << SECTOR_SHIFT) != 0);
+
+	err = read_block_dev(&payload, bdev, metadata_offset /
+		(1 << SECTOR_SHIFT), VERITY_METADATA_SIZE);
+	if (err) {
+		DMERR("Error while reading verity metadata");
+		goto blkdev_release;
+	}
+
+	header = kzalloc(sizeof(*header), GFP_KERNEL);
+	if (!header) {
+		DMERR("kzalloc failed for header");
+		err = -ENOMEM;
+		goto free_payload;
+	}
+
+	memcpy(header, page_address(payload.page_io[0]),
+		sizeof(*header));
+
+	DMINFO("bio magic_number:%u protocol_version:%d table_length:%u",
+		le32_to_cpu(header->magic_number),
+		le32_to_cpu(header->protocol_version),
+		le32_to_cpu(header->table_length));
+
+	err = verify_header(header);
+
+	if (err == VERITY_STATE_DISABLE) {
+		DMERR("Mounting root with verity disabled");
+		*verity_enabled = false;
+		/* we would still have to read the metadata to figure out
+		 * the data blocks size. Or may be could map the entire
+		 * partition similar to mounting the device.
+		 *
+		 * Reset error as well as the verity_enabled flag is changed.
+		 */
+		err = 0;
+	} else if (err)
+		goto free_header;
+
+	*metadata = kzalloc(sizeof(**metadata), GFP_KERNEL);
+	if (!*metadata) {
+		DMERR("kzalloc for metadata failed");
+		err = -ENOMEM;
+		goto free_header;
+	}
+
+	(*metadata)->header = header;
+	table_length = le32_to_cpu(header->table_length);
+
+	if (table_length == 0 ||
+		table_length > (VERITY_METADATA_SIZE -
+			sizeof(struct android_metadata_header))) {
+		DMERR("table_length too long");
+		err = -EINVAL;
+		goto free_metadata;
+	}
+
+	(*metadata)->verity_table = kzalloc(table_length + 1, GFP_KERNEL);
+
+	if (!(*metadata)->verity_table) {
+		DMERR("kzalloc verity_table failed");
+		err = -ENOMEM;
+		goto free_metadata;
+	}
+
+	if (sizeof(struct android_metadata_header) +
+			table_length <= PAGE_SIZE) {
+		memcpy((*metadata)->verity_table,
+			page_address(payload.page_io[0])
+			+ sizeof(struct android_metadata_header),
+			table_length);
+	} else {
+		copy_length = PAGE_SIZE -
+			sizeof(struct android_metadata_header);
+		memcpy((*metadata)->verity_table,
+			page_address(payload.page_io[0])
+			+ sizeof(struct android_metadata_header),
+			copy_length);
+		table_length -= copy_length;
+		offset = copy_length;
+		i = 1;
+		while (table_length != 0) {
+			if (table_length > PAGE_SIZE) {
+				memcpy((*metadata)->verity_table + offset,
+					page_address(payload.page_io[i]),
+					PAGE_SIZE);
+				offset += PAGE_SIZE;
+				table_length -= PAGE_SIZE;
+			} else {
+				memcpy((*metadata)->verity_table + offset,
+					page_address(payload.page_io[i]),
+					table_length);
+				table_length = 0;
+			}
+			i++;
+		}
+	}
+	(*metadata)->verity_table[table_length] = '\0';
+
+	DMINFO("verity_table: %s", (*metadata)->verity_table);
+	goto free_payload;
+
+free_metadata:
+	kfree(*metadata);
+free_header:
+	kfree(header);
+free_payload:
+	for (i = 0; i < payload.number_of_pages; i++)
+		if (payload.page_io[i])
+			__free_page(payload.page_io[i]);
+	kfree(payload.page_io);
+blkdev_release:
+	blkdev_put(bdev, FMODE_READ);
+	return err;
+}
+
+/* helper functions to extract properties from dts */
+const char *find_dt_value(const char *name)
+{
+	struct device_node *firmware;
+	const char *value;
+
+	firmware = of_find_node_by_path("/firmware/android");
+	if (!firmware)
+		return NULL;
+	value = of_get_property(firmware, name, NULL);
+	of_node_put(firmware);
+
+	return value;
+}
+
+static int verity_mode(void)
+{
+	static const char enforcing[] = "enforcing";
+	static const char verified_mode_prop[] = "veritymode";
+	const char *value;
+
+	value = find_dt_value(verified_mode_prop);
+	if (!value)
+		value = veritymode;
+	if (!strncmp(value, enforcing, sizeof(enforcing) - 1))
+		return DM_VERITY_MODE_RESTART;
+
+	return DM_VERITY_MODE_EIO;
+}
+
+static int verify_verity_signature(char *key_id,
+		struct android_metadata *metadata)
+{
+	key_ref_t key_ref;
+	struct key *key;
+	struct public_key_signature *pks = NULL;
+	int retval = -EINVAL;
+
+	key_ref = keyring_search(make_key_ref(system_trusted_keyring, 1),
+		&key_type_asymmetric, key_id);
+
+	if (IS_ERR(key_ref)) {
+		DMERR("keyring: key not found");
+		return -ENOKEY;
+	}
+
+	key = key_ref_to_ptr(key_ref);
+
+	pks = table_make_digest(HASH_ALGO_SHA256,
+			(const void *)metadata->verity_table,
+			le32_to_cpu(metadata->header->table_length));
+
+	if (IS_ERR(pks)) {
+		DMERR("hashing failed");
+		goto error;
+	}
+
+	retval = table_extract_mpi_array(pks, &metadata->header->signature[0],
+				RSANUMBYTES);
+	if (retval < 0) {
+		DMERR("Error extracting mpi %d", retval);
+		goto error;
+	}
+
+	retval = verify_signature(key, pks);
+	mpi_free(pks->rsa.s);
+error:
+	kfree(pks);
+	key_put(key);
+
+	return retval;
+}
+
+static void handle_error(void)
+{
+	int mode = verity_mode();
+	if (mode == DM_VERITY_MODE_RESTART) {
+		DMERR("triggering restart");
+		kernel_restart("dm-verity device corrupted");
+	} else {
+		DMERR("Mounting verity root failed");
+	}
+}
+
+static inline bool test_mult_overflow(sector_t a, u32 b)
+{
+	sector_t r = (sector_t)~0ULL;
+
+	sector_div(r, b);
+	return a > r;
+}
+
+static int add_as_linear_device(struct dm_target *ti, char *dev)
+{
+	/*Move to linear mapping defines*/
+	char *linear_table_args[DM_LINEAR_ARGS] = {dev,
+					DM_LINEAR_TARGET_OFFSET};
+	int err = 0;
+
+	android_verity_target.dtr = dm_linear_dtr,
+	android_verity_target.map = dm_linear_map,
+	android_verity_target.status = dm_linear_status,
+	android_verity_target.prepare_ioctl = dm_linear_prepare_ioctl,
+	android_verity_target.iterate_devices = dm_linear_iterate_devices,
+	android_verity_target.io_hints = NULL;
+
+	err = dm_linear_ctr(ti, DM_LINEAR_ARGS, linear_table_args);
+
+	if (!err) {
+		DMINFO("Added android-verity as a linear target");
+		target_added = true;
+	} else
+		DMERR("Failed to add android-verity as linear target");
+
+	return err;
+}
+
+/*
+ * Target parameters:
+ *	<key id>	Key id of the public key in the system keyring.
+ *			Verity metadata's signature would be verified against
+ *			this. If the key id contains spaces, replace them
+ *			with '#'.
+ *	<block device>	The block device for which dm-verity is being setup.
+ */
+static int android_verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+{
+	dev_t uninitialized_var(dev);
+	struct android_metadata *metadata = NULL;
+	int err = 0, i, mode;
+	char *key_id, *table_ptr, dummy, *target_device,
+	*verity_table_args[VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS];
+	/* One for specifying number of opt args and one for mode */
+	sector_t data_sectors;
+	u32 data_block_size;
+	unsigned int no_of_args = VERITY_TABLE_ARGS + 2 + VERITY_TABLE_OPT_FEC_ARGS;
+	struct fec_header uninitialized_var(fec);
+	struct fec_ecc_metadata uninitialized_var(ecc);
+	char buf[FEC_ARG_LENGTH], *buf_ptr;
+	unsigned long long tmpll;
+	u64  uninitialized_var(device_size);
+
+	if (argc == 1) {
+		/* Use the default keyid */
+		if (default_verity_key_id())
+			key_id = veritykeyid;
+		else if (!is_eng()) {
+			DMERR("veritykeyid= is not set");
+			handle_error();
+			return -EINVAL;
+		}
+	} else if (argc == 2)
+		key_id = argv[1];
+	else {
+		DMERR("Incorrect number of arguments");
+		handle_error();
+		return -EINVAL;
+	}
+
+	target_device = argv[0];
+
+	dev = name_to_dev_t(target_device);
+	if (!dev) {
+		DMERR("no dev found for %s", target_device);
+		handle_error();
+		return -EINVAL;
+	}
+
+	if (is_eng()) {
+		err = find_size(dev, &device_size);
+		if (err) {
+			DMERR("error finding bdev size");
+			handle_error();
+			return err;
+		}
+
+		ti->len = device_size;
+		err = add_as_linear_device(ti, target_device);
+		if (err) {
+			handle_error();
+			return err;
+		}
+		verity_enabled = false;
+		return 0;
+	}
+
+	strreplace(key_id, '#', ' ');
+
+	DMINFO("key:%s dev:%s", key_id, target_device);
+
+	if (extract_fec_header(dev, &fec, &ecc)) {
+		DMERR("Error while extracting fec header");
+		handle_error();
+		return -EINVAL;
+	}
+
+	err = extract_metadata(dev, &fec, &metadata, &verity_enabled);
+
+	if (err) {
+		DMERR("Error while extracting metadata");
+		handle_error();
+		goto free_metadata;
+	}
+
+	if (verity_enabled) {
+		err = verify_verity_signature(key_id, metadata);
+
+		if (err) {
+			DMERR("Signature verification failed");
+			handle_error();
+			goto free_metadata;
+		} else
+			DMINFO("Signature verification success");
+	}
+
+	table_ptr = metadata->verity_table;
+
+	for (i = 0; i < VERITY_TABLE_ARGS; i++) {
+		verity_table_args[i] = strsep(&table_ptr, " ");
+		if (verity_table_args[i] == NULL)
+			break;
+	}
+
+	if (i != VERITY_TABLE_ARGS) {
+		DMERR("Verity table not in the expected format");
+		err = -EINVAL;
+		handle_error();
+		goto free_metadata;
+	}
+
+	if (sscanf(verity_table_args[5], "%llu%c", &tmpll, &dummy)
+							!= 1) {
+		DMERR("Verity table not in the expected format");
+		handle_error();
+		err = -EINVAL;
+		goto free_metadata;
+	}
+
+	if (tmpll > ULONG_MAX) {
+		DMERR("<num_data_blocks> too large. Forgot to turn on CONFIG_LBDAF?");
+		handle_error();
+		err = -EINVAL;
+		goto free_metadata;
+	}
+
+	data_sectors = tmpll;
+
+	if (sscanf(verity_table_args[3], "%u%c", &data_block_size, &dummy)
+								!= 1) {
+		DMERR("Verity table not in the expected format");
+		handle_error();
+		err = -EINVAL;
+		goto free_metadata;
+	}
+
+	if (test_mult_overflow(data_sectors, data_block_size >>
+							SECTOR_SHIFT)) {
+		DMERR("data_sectors too large");
+		handle_error();
+		err = -EOVERFLOW;
+		goto free_metadata;
+	}
+
+	data_sectors *= data_block_size >> SECTOR_SHIFT;
+	DMINFO("Data sectors %llu", (unsigned long long)data_sectors);
+
+	/* update target length */
+	ti->len = data_sectors;
+
+	/* Setup linear target and free */
+	if (!verity_enabled) {
+		err = add_as_linear_device(ti, target_device);
+		goto free_metadata;
+	}
+
+	/*substitute data_dev and hash_dev*/
+	verity_table_args[1] = target_device;
+	verity_table_args[2] = target_device;
+
+	mode = verity_mode();
+
+	if (ecc.valid && IS_BUILTIN(CONFIG_DM_VERITY_FEC)) {
+		if (mode) {
+			err = snprintf(buf, FEC_ARG_LENGTH,
+				"%u %s " VERITY_TABLE_OPT_FEC_FORMAT,
+				1 + VERITY_TABLE_OPT_FEC_ARGS,
+				mode == DM_VERITY_MODE_RESTART ?
+					VERITY_TABLE_OPT_RESTART :
+					VERITY_TABLE_OPT_LOGGING,
+				target_device,
+				ecc.start / FEC_BLOCK_SIZE, ecc.blocks,
+				ecc.roots);
+		} else {
+			err = snprintf(buf, FEC_ARG_LENGTH,
+				"%u " VERITY_TABLE_OPT_FEC_FORMAT,
+				VERITY_TABLE_OPT_FEC_ARGS, target_device,
+				ecc.start / FEC_BLOCK_SIZE, ecc.blocks,
+				ecc.roots);
+		}
+	} else if (mode) {
+		err = snprintf(buf, FEC_ARG_LENGTH,
+			"2 " VERITY_TABLE_OPT_IGNZERO " %s",
+			mode == DM_VERITY_MODE_RESTART ?
+			VERITY_TABLE_OPT_RESTART : VERITY_TABLE_OPT_LOGGING);
+	} else {
+		err = snprintf(buf, FEC_ARG_LENGTH, "1 %s",
+				 "ignore_zero_blocks");
+	}
+
+	if (err < 0 || err >= FEC_ARG_LENGTH)
+		goto free_metadata;
+
+	buf_ptr = buf;
+
+	for (i = VERITY_TABLE_ARGS; i < (VERITY_TABLE_ARGS +
+		VERITY_TABLE_OPT_FEC_ARGS + 2); i++) {
+		verity_table_args[i] = strsep(&buf_ptr, " ");
+		if (verity_table_args[i] == NULL) {
+			no_of_args = i;
+			break;
+		}
+	}
+
+	err = verity_ctr(ti, no_of_args, verity_table_args);
+
+	if (err)
+		DMERR("android-verity failed to mount as verity target");
+	else {
+		target_added = true;
+		DMINFO("android-verity mounted as verity target");
+	}
+
+free_metadata:
+	if (metadata) {
+		kfree(metadata->header);
+		kfree(metadata->verity_table);
+	}
+	kfree(metadata);
+	return err;
+}
+
+static int __init dm_android_verity_init(void)
+{
+	int r;
+	struct dentry *file;
+
+	r = dm_register_target(&android_verity_target);
+	if (r < 0)
+		DMERR("register failed %d", r);
+
+	/* Tracks the status of the last added target */
+	debug_dir = debugfs_create_dir("android_verity", NULL);
+
+	if (IS_ERR_OR_NULL(debug_dir)) {
+		DMERR("Cannot create android_verity debugfs directory: %ld",
+			PTR_ERR(debug_dir));
+		goto end;
+	}
+
+	file = debugfs_create_bool("target_added", S_IRUGO, debug_dir,
+				&target_added);
+
+	if (IS_ERR_OR_NULL(file)) {
+		DMERR("Cannot create android_verity debugfs directory: %ld",
+			PTR_ERR(debug_dir));
+		debugfs_remove_recursive(debug_dir);
+		goto end;
+	}
+
+	file = debugfs_create_bool("verity_enabled", S_IRUGO, debug_dir,
+				&verity_enabled);
+
+	if (IS_ERR_OR_NULL(file)) {
+		DMERR("Cannot create android_verity debugfs directory: %ld",
+			PTR_ERR(debug_dir));
+		debugfs_remove_recursive(debug_dir);
+	}
+
+end:
+	return r;
+}
+
+static void __exit dm_android_verity_exit(void)
+{
+	if (!IS_ERR_OR_NULL(debug_dir))
+		debugfs_remove_recursive(debug_dir);
+
+	dm_unregister_target(&android_verity_target);
+}
+
+module_init(dm_android_verity_init);
+module_exit(dm_android_verity_exit);
diff --git a/drivers/md/dm-android-verity.h b/drivers/md/dm-android-verity.h
new file mode 100644
index 0000000..0c7ff6a
--- /dev/null
+++ b/drivers/md/dm-android-verity.h
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef DM_ANDROID_VERITY_H
+#define DM_ANDROID_VERITY_H
+
+#include <crypto/sha.h>
+
+#define RSANUMBYTES 256
+#define VERITY_METADATA_MAGIC_NUMBER 0xb001b001
+#define VERITY_METADATA_MAGIC_DISABLE 0x46464f56
+#define VERITY_METADATA_VERSION 0
+#define VERITY_STATE_DISABLE 1
+#define DATA_BLOCK_SIZE (4 * 1024)
+#define VERITY_METADATA_SIZE (8 * DATA_BLOCK_SIZE)
+#define VERITY_TABLE_ARGS 10
+#define VERITY_COMMANDLINE_PARAM_LENGTH 20
+#define BUILD_VARIANT 20
+
+/*
+ * <subject>:<sha1-id> is the format for the identifier.
+ * subject can either be the Common Name(CN) + Organization Name(O) or
+ * just the CN if the it is prefixed with O
+ * From https://tools.ietf.org/html/rfc5280#appendix-A
+ * ub-organization-name-length INTEGER ::= 64
+ * ub-common-name-length INTEGER ::= 64
+ *
+ * http://lxr.free-electrons.com/source/crypto/asymmetric_keys/x509_cert_parser.c?v=3.9#L278
+ * ctx->o_size + 2 + ctx->cn_size + 1
+ * + 41 characters for ":" and sha1 id
+ * 64 + 2 + 64 + 1 + 1 + 40 (172)
+ * setting VERITY_DEFAULT_KEY_ID_LENGTH to 200 characters.
+ */
+#define VERITY_DEFAULT_KEY_ID_LENGTH 200
+
+#define FEC_MAGIC 0xFECFECFE
+#define FEC_BLOCK_SIZE (4 * 1024)
+#define FEC_VERSION 0
+#define FEC_RSM 255
+#define FEC_ARG_LENGTH 300
+
+#define VERITY_TABLE_OPT_RESTART "restart_on_corruption"
+#define VERITY_TABLE_OPT_LOGGING "ignore_corruption"
+#define VERITY_TABLE_OPT_IGNZERO "ignore_zero_blocks"
+
+#define VERITY_TABLE_OPT_FEC_FORMAT \
+	"use_fec_from_device %s fec_start %llu fec_blocks %llu fec_roots %u ignore_zero_blocks"
+#define VERITY_TABLE_OPT_FEC_ARGS 9
+
+#define VERITY_DEBUG 0
+
+#define DM_MSG_PREFIX                   "android-verity"
+
+#define DM_LINEAR_ARGS 2
+#define DM_LINEAR_TARGET_OFFSET "0"
+
+/*
+ * There can be two formats.
+ * if fec is present
+ * <data_blocks> <verity_tree> <verity_metdata_32K><fec_data><fec_data_4K>
+ * if fec is not present
+ * <data_blocks> <verity_tree> <verity_metdata_32K>
+ */
+struct fec_header {
+	__le32 magic;
+	__le32 version;
+	__le32 size;
+	__le32 roots;
+	__le32 fec_size;
+	__le64 inp_size;
+	u8 hash[SHA256_DIGEST_SIZE];
+} __attribute__((packed));
+
+struct android_metadata_header {
+	__le32 magic_number;
+	__le32 protocol_version;
+	char signature[RSANUMBYTES];
+	__le32 table_length;
+};
+
+struct android_metadata {
+	struct android_metadata_header *header;
+	char *verity_table;
+};
+
+struct fec_ecc_metadata {
+	bool valid;
+	u32 roots;
+	u64 blocks;
+	u64 rounds;
+	u64 start;
+};
+
+struct bio_read {
+	struct page **page_io;
+	int number_of_pages;
+};
+
+extern struct target_type linear_target;
+
+extern void dm_linear_dtr(struct dm_target *ti);
+extern int dm_linear_map(struct dm_target *ti, struct bio *bio);
+extern void dm_linear_status(struct dm_target *ti, status_type_t type,
+			unsigned status_flags, char *result, unsigned maxlen);
+extern int dm_linear_prepare_ioctl(struct dm_target *ti,
+                struct block_device **bdev, fmode_t *mode);
+extern int dm_linear_iterate_devices(struct dm_target *ti,
+			iterate_devices_callout_fn fn, void *data);
+extern int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv);
+#endif /* DM_ANDROID_VERITY_H */
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a276883..715cc52 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1863,16 +1863,24 @@
 	}
 
 	ret = -ENOMEM;
-	cc->io_queue = alloc_workqueue("kcryptd_io", WQ_MEM_RECLAIM, 1);
+	cc->io_queue = alloc_workqueue("kcryptd_io",
+				       WQ_HIGHPRI |
+				       WQ_MEM_RECLAIM,
+				       1);
 	if (!cc->io_queue) {
 		ti->error = "Couldn't create kcryptd io queue";
 		goto bad;
 	}
 
 	if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
-		cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+		cc->crypt_queue = alloc_workqueue("kcryptd",
+						  WQ_HIGHPRI |
+						  WQ_MEM_RECLAIM, 1);
 	else
-		cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+		cc->crypt_queue = alloc_workqueue("kcryptd",
+						  WQ_HIGHPRI |
+						  WQ_MEM_RECLAIM |
+						  WQ_UNBOUND,
 						  num_online_cpus());
 	if (!cc->crypt_queue) {
 		ti->error = "Couldn't create kcryptd queue";
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 966eb4b..89ec6d2 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1927,6 +1927,45 @@
 	dm_hash_exit();
 }
 
+
+/**
+ * dm_ioctl_export - Permanently export a mapped device via the ioctl interface
+ * @md: Pointer to mapped_device
+ * @name: Buffer (size DM_NAME_LEN) for name
+ * @uuid: Buffer (size DM_UUID_LEN) for uuid or NULL if not desired
+ */
+int dm_ioctl_export(struct mapped_device *md, const char *name,
+		    const char *uuid)
+{
+	int r = 0;
+	struct hash_cell *hc;
+
+	if (!md) {
+		r = -ENXIO;
+		goto out;
+	}
+
+	/* The name and uuid can only be set once. */
+	mutex_lock(&dm_hash_cells_mutex);
+	hc = dm_get_mdptr(md);
+	mutex_unlock(&dm_hash_cells_mutex);
+	if (hc) {
+		DMERR("%s: already exported", dm_device_name(md));
+		r = -ENXIO;
+		goto out;
+	}
+
+	r = dm_hash_insert(name, uuid, md);
+	if (r) {
+		DMERR("%s: could not bind to '%s'", dm_device_name(md), name);
+		goto out;
+	}
+
+	/* Let udev know we've changed. */
+	dm_kobject_uevent(md, KOBJ_CHANGE, dm_get_event_nr(md));
+out:
+	return r;
+}
 /**
  * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers
  * @md: Pointer to mapped_device
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4788b0b..760a464 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -25,7 +25,7 @@
 /*
  * Construct a linear mapping: <dev_path> <offset>
  */
-static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+int dm_linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
 {
 	struct linear_c *lc;
 	unsigned long long tmp;
@@ -66,14 +66,16 @@
 	kfree(lc);
 	return ret;
 }
+EXPORT_SYMBOL_GPL(dm_linear_ctr);
 
-static void linear_dtr(struct dm_target *ti)
+void dm_linear_dtr(struct dm_target *ti)
 {
 	struct linear_c *lc = (struct linear_c *) ti->private;
 
 	dm_put_device(ti, lc->dev);
 	kfree(lc);
 }
+EXPORT_SYMBOL_GPL(dm_linear_dtr);
 
 static sector_t linear_map_sector(struct dm_target *ti, sector_t bi_sector)
 {
@@ -92,14 +94,15 @@
 			linear_map_sector(ti, bio->bi_iter.bi_sector);
 }
 
-static int linear_map(struct dm_target *ti, struct bio *bio)
+int dm_linear_map(struct dm_target *ti, struct bio *bio)
 {
 	linear_map_bio(ti, bio);
 
 	return DM_MAPIO_REMAPPED;
 }
+EXPORT_SYMBOL_GPL(dm_linear_map);
 
-static void linear_status(struct dm_target *ti, status_type_t type,
+void dm_linear_status(struct dm_target *ti, status_type_t type,
 			  unsigned status_flags, char *result, unsigned maxlen)
 {
 	struct linear_c *lc = (struct linear_c *) ti->private;
@@ -115,8 +118,9 @@
 		break;
 	}
 }
+EXPORT_SYMBOL_GPL(dm_linear_status);
 
-static int linear_prepare_ioctl(struct dm_target *ti,
+int dm_linear_prepare_ioctl(struct dm_target *ti,
 		struct block_device **bdev, fmode_t *mode)
 {
 	struct linear_c *lc = (struct linear_c *) ti->private;
@@ -132,14 +136,16 @@
 		return 1;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(dm_linear_prepare_ioctl);
 
-static int linear_iterate_devices(struct dm_target *ti,
+int dm_linear_iterate_devices(struct dm_target *ti,
 				  iterate_devices_callout_fn fn, void *data)
 {
 	struct linear_c *lc = ti->private;
 
 	return fn(ti, lc->dev, lc->start, ti->len, data);
 }
+EXPORT_SYMBOL_GPL(dm_linear_iterate_devices);
 
 static long linear_direct_access(struct dm_target *ti, sector_t sector,
 				 void **kaddr, pfn_t *pfn, long size)
@@ -163,12 +169,12 @@
 	.name   = "linear",
 	.version = {1, 3, 0},
 	.module = THIS_MODULE,
-	.ctr    = linear_ctr,
-	.dtr    = linear_dtr,
-	.map    = linear_map,
-	.status = linear_status,
-	.prepare_ioctl = linear_prepare_ioctl,
-	.iterate_devices = linear_iterate_devices,
+	.ctr    = dm_linear_ctr,
+	.dtr    = dm_linear_dtr,
+	.map    = dm_linear_map,
+	.status = dm_linear_status,
+	.prepare_ioctl  = dm_linear_prepare_ioctl,
+	.iterate_devices = dm_linear_iterate_devices,
 	.direct_access = linear_direct_access,
 };
 
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index c4b53b3..399bcac 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -11,6 +11,7 @@
 #include <linux/vmalloc.h>
 #include <linux/blkdev.h>
 #include <linux/namei.h>
+#include <linux/mount.h>
 #include <linux/ctype.h>
 #include <linux/string.h>
 #include <linux/slab.h>
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 0f0eb8a..a8d4d2f 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -11,6 +11,7 @@
 
 #include "dm-verity-fec.h"
 #include <linux/math64.h>
+#include <linux/sysfs.h>
 
 #define DM_MSG_PREFIX	"verity-fec"
 
@@ -175,9 +176,11 @@
 	if (r < 0 && neras)
 		DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
 			    v->data_dev->name, (unsigned long long)rsb, r);
-	else if (r > 0)
+	else if (r > 0) {
 		DMWARN_LIMIT("%s: FEC %llu: corrected %d errors",
 			     v->data_dev->name, (unsigned long long)rsb, r);
+		atomic_add_unless(&v->fec->corrected, 1, INT_MAX);
+	}
 
 	return r;
 }
@@ -439,6 +442,13 @@
 	if (!verity_fec_is_enabled(v))
 		return -EOPNOTSUPP;
 
+	if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
+		DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
+		return -EIO;
+	}
+
+	fio->level++;
+
 	if (type == DM_VERITY_BLOCK_TYPE_METADATA)
 		block += v->data_blocks;
 
@@ -470,7 +480,7 @@
 	if (r < 0) {
 		r = fec_decode_rsb(v, io, fio, rsb, offset, true);
 		if (r < 0)
-			return r;
+			goto done;
 	}
 
 	if (dest)
@@ -480,6 +490,8 @@
 		r = verity_for_bv_block(v, io, iter, fec_bv_copy);
 	}
 
+done:
+	fio->level--;
 	return r;
 }
 
@@ -520,6 +532,7 @@
 	memset(fio->bufs, 0, sizeof(fio->bufs));
 	fio->nbufs = 0;
 	fio->output = NULL;
+	fio->level = 0;
 }
 
 /*
@@ -546,6 +559,7 @@
 void verity_fec_dtr(struct dm_verity *v)
 {
 	struct dm_verity_fec *f = v->fec;
+	struct kobject *kobj = &f->kobj_holder.kobj;
 
 	if (!verity_fec_is_enabled(v))
 		goto out;
@@ -562,6 +576,12 @@
 
 	if (f->dev)
 		dm_put_device(v->ti, f->dev);
+
+	if (kobj->state_initialized) {
+		kobject_put(kobj);
+		wait_for_completion(dm_get_completion_from_kobject(kobj));
+	}
+
 out:
 	kfree(f);
 	v->fec = NULL;
@@ -650,6 +670,28 @@
 	return 0;
 }
 
+static ssize_t corrected_show(struct kobject *kobj, struct kobj_attribute *attr,
+			      char *buf)
+{
+	struct dm_verity_fec *f = container_of(kobj, struct dm_verity_fec,
+					       kobj_holder.kobj);
+
+	return sprintf(buf, "%d\n", atomic_read(&f->corrected));
+}
+
+static struct kobj_attribute attr_corrected = __ATTR_RO(corrected);
+
+static struct attribute *fec_attrs[] = {
+	&attr_corrected.attr,
+	NULL
+};
+
+static struct kobj_type fec_ktype = {
+	.sysfs_ops = &kobj_sysfs_ops,
+	.default_attrs = fec_attrs,
+	.release = dm_kobject_release
+};
+
 /*
  * Allocate dm_verity_fec for v->fec. Must be called before verity_fec_ctr.
  */
@@ -673,8 +715,10 @@
  */
 int verity_fec_ctr(struct dm_verity *v)
 {
+	int r;
 	struct dm_verity_fec *f = v->fec;
 	struct dm_target *ti = v->ti;
+	struct mapped_device *md = dm_table_get_md(ti->table);
 	u64 hash_blocks;
 
 	if (!verity_fec_is_enabled(v)) {
@@ -682,6 +726,16 @@
 		return 0;
 	}
 
+	/* Create a kobject and sysfs attributes */
+	init_completion(&f->kobj_holder.completion);
+
+	r = kobject_init_and_add(&f->kobj_holder.kobj, &fec_ktype,
+				 &disk_to_dev(dm_disk(md))->kobj, "%s", "fec");
+	if (r) {
+		ti->error = "Cannot create kobject";
+		return r;
+	}
+
 	/*
 	 * FEC is computed over data blocks, possible metadata, and
 	 * hash blocks. In other words, FEC covers total of fec_blocks
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 7fa0298..f36a6772 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -12,6 +12,7 @@
 #ifndef DM_VERITY_FEC_H
 #define DM_VERITY_FEC_H
 
+#include "dm-core.h"
 #include "dm-verity.h"
 #include <linux/rslib.h>
 
@@ -27,6 +28,9 @@
 #define DM_VERITY_FEC_BUF_MAX \
 	(1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
 
+/* maximum recursion level for verity_fec_decode */
+#define DM_VERITY_FEC_MAX_RECURSION	4
+
 #define DM_VERITY_OPT_FEC_DEV		"use_fec_from_device"
 #define DM_VERITY_OPT_FEC_BLOCKS	"fec_blocks"
 #define DM_VERITY_OPT_FEC_START		"fec_start"
@@ -48,6 +52,8 @@
 	mempool_t *extra_pool;	/* mempool for extra buffers */
 	mempool_t *output_pool;	/* mempool for output */
 	struct kmem_cache *cache;	/* cache for buffers */
+	atomic_t corrected;		/* corrected errors */
+	struct dm_kobject_holder kobj_holder;	/* for sysfs attributes */
 };
 
 /* per-bio data */
@@ -58,6 +64,7 @@
 	unsigned nbufs;		/* number of buffers allocated */
 	u8 *output;		/* buffer for corrected output */
 	size_t output_pos;
+	unsigned level;		/* recursion level */
 };
 
 #ifdef CONFIG_DM_VERITY_FEC
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0aba34a..b53539e 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -551,7 +551,7 @@
  * Bio map function. It allocates dm_verity_io structure and bio vector and
  * fills them. Then it issues prefetches and the I/O.
  */
-static int verity_map(struct dm_target *ti, struct bio *bio)
+int verity_map(struct dm_target *ti, struct bio *bio)
 {
 	struct dm_verity *v = ti->private;
 	struct dm_verity_io *io;
@@ -592,11 +592,12 @@
 
 	return DM_MAPIO_SUBMITTED;
 }
+EXPORT_SYMBOL_GPL(verity_map);
 
 /*
  * Status: V (valid) or C (corruption found)
  */
-static void verity_status(struct dm_target *ti, status_type_t type,
+void verity_status(struct dm_target *ti, status_type_t type,
 			  unsigned status_flags, char *result, unsigned maxlen)
 {
 	struct dm_verity *v = ti->private;
@@ -655,8 +656,9 @@
 		break;
 	}
 }
+EXPORT_SYMBOL_GPL(verity_status);
 
-static int verity_prepare_ioctl(struct dm_target *ti,
+int verity_prepare_ioctl(struct dm_target *ti,
 		struct block_device **bdev, fmode_t *mode)
 {
 	struct dm_verity *v = ti->private;
@@ -668,16 +670,18 @@
 		return 1;
 	return 0;
 }
+EXPORT_SYMBOL_GPL(verity_prepare_ioctl);
 
-static int verity_iterate_devices(struct dm_target *ti,
+int verity_iterate_devices(struct dm_target *ti,
 				  iterate_devices_callout_fn fn, void *data)
 {
 	struct dm_verity *v = ti->private;
 
 	return fn(ti, v->data_dev, v->data_start, ti->len, data);
 }
+EXPORT_SYMBOL_GPL(verity_iterate_devices);
 
-static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
+void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
 	struct dm_verity *v = ti->private;
 
@@ -689,8 +693,9 @@
 
 	blk_limits_io_min(limits, limits->logical_block_size);
 }
+EXPORT_SYMBOL_GPL(verity_io_hints);
 
-static void verity_dtr(struct dm_target *ti)
+void verity_dtr(struct dm_target *ti)
 {
 	struct dm_verity *v = ti->private;
 
@@ -719,6 +724,7 @@
 
 	kfree(v);
 }
+EXPORT_SYMBOL_GPL(verity_dtr);
 
 static int verity_alloc_zero_digest(struct dm_verity *v)
 {
@@ -817,7 +823,7 @@
  *	<digest>
  *	<salt>		Hex string or "-" if no salt.
  */
-static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
+int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
 {
 	struct dm_verity *v;
 	struct dm_arg_set as;
@@ -1053,6 +1059,7 @@
 
 	return r;
 }
+EXPORT_SYMBOL_GPL(verity_ctr);
 
 static struct target_type verity_target = {
 	.name		= "verity",
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index fb419f4..75effca 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -126,4 +126,14 @@
 extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
 				 sector_t block, u8 *digest, bool *is_zero);
 
+extern void verity_status(struct dm_target *ti, status_type_t type,
+			unsigned status_flags, char *result, unsigned maxlen);
+extern int verity_prepare_ioctl(struct dm_target *ti,
+                struct block_device **bdev, fmode_t *mode);
+extern int verity_iterate_devices(struct dm_target *ti,
+				iterate_devices_callout_fn fn, void *data);
+extern void verity_io_hints(struct dm_target *ti, struct queue_limits *limits);
+extern void verity_dtr(struct dm_target *ti);
+extern int verity_ctr(struct dm_target *ti, unsigned argc, char **argv);
+extern int verity_map(struct dm_target *ti, struct bio *bio);
 #endif /* DM_VERITY_H */
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 64971ba..407cf29 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -766,6 +766,18 @@
 	  An empty message will only clear the display at driver init time. Any other
 	  printf()-formatted message is valid with newline and escape codes.
 
+config UID_CPUTIME
+	bool "Per-UID cpu time statistics"
+	depends on PROFILING
+	help
+	  Per UID based cpu time statistics exported to /proc/uid_cputime
+
+config MEMORY_STATE_TIME
+	tristate "Memory freq/bandwidth time statistics"
+	depends on PROFILING
+	help
+	  Memory time statistics exported to /sys/kernel/memory_state_time
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 3198336..6c2fde7 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -53,6 +53,8 @@
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
 obj-$(CONFIG_PANEL)             += panel.o
+obj-$(CONFIG_UID_CPUTIME) += uid_cputime.o
+obj-$(CONFIG_MEMORY_STATE_TIME)	+= memory_state_time.o
 
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_core.o
 lkdtm-$(CONFIG_LKDTM)		+= lkdtm_bugs.o
diff --git a/drivers/misc/memory_state_time.c b/drivers/misc/memory_state_time.c
new file mode 100644
index 0000000..34c797a
--- /dev/null
+++ b/drivers/misc/memory_state_time.c
@@ -0,0 +1,454 @@
+/* drivers/misc/memory_state_time.c
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/hashtable.h>
+#include <linux/kconfig.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/memory-state-time.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/time.h>
+#include <linux/timekeeping.h>
+#include <linux/workqueue.h>
+
+#define KERNEL_ATTR_RO(_name) \
+static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
+
+#define KERNEL_ATTR_RW(_name) \
+static struct kobj_attribute _name##_attr = \
+	__ATTR(_name, 0644, _name##_show, _name##_store)
+
+#define FREQ_HASH_BITS 4
+DECLARE_HASHTABLE(freq_hash_table, FREQ_HASH_BITS);
+
+static DEFINE_MUTEX(mem_lock);
+
+#define TAG "memory_state_time"
+#define BW_NODE "/soc/memory-state-time"
+#define FREQ_TBL "freq-tbl"
+#define BW_TBL "bw-buckets"
+#define NUM_SOURCES "num-sources"
+
+#define LOWEST_FREQ 2
+
+static int curr_bw;
+static int curr_freq;
+static u32 *bw_buckets;
+static u32 *freq_buckets;
+static int num_freqs;
+static int num_buckets;
+static int registered_bw_sources;
+static u64 last_update;
+static bool init_success;
+static struct workqueue_struct *memory_wq;
+static u32 num_sources = 10;
+static int *bandwidths;
+
+struct freq_entry {
+	int freq;
+	u64 *buckets; /* Bandwidth buckets. */
+	struct hlist_node hash;
+};
+
+struct queue_container {
+	struct work_struct update_state;
+	int value;
+	u64 time_now;
+	int id;
+	struct mutex *lock;
+};
+
+static int find_bucket(int bw)
+{
+	int i;
+
+	if (bw_buckets != NULL) {
+		for (i = 0; i < num_buckets; i++) {
+			if (bw_buckets[i] > bw) {
+				pr_debug("Found bucket %d for bandwidth %d\n",
+					i, bw);
+				return i;
+			}
+		}
+		return num_buckets - 1;
+	}
+	return 0;
+}
+
+static u64 get_time_diff(u64 time_now)
+{
+	u64 ms;
+
+	ms = time_now - last_update;
+	last_update = time_now;
+	return ms;
+}
+
+static ssize_t show_stat_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int i, j;
+	int len = 0;
+	struct freq_entry *freq_entry;
+
+	for (i = 0; i < num_freqs; i++) {
+		hash_for_each_possible(freq_hash_table, freq_entry, hash,
+				freq_buckets[i]) {
+			if (freq_entry->freq == freq_buckets[i]) {
+				len += scnprintf(buf + len, PAGE_SIZE - len,
+						"%d ", freq_buckets[i]);
+				if (len >= PAGE_SIZE)
+					break;
+				for (j = 0; j < num_buckets; j++) {
+					len += scnprintf(buf + len,
+							PAGE_SIZE - len,
+							"%llu ",
+							freq_entry->buckets[j]);
+				}
+				len += scnprintf(buf + len, PAGE_SIZE - len,
+						"\n");
+			}
+		}
+	}
+	pr_debug("Current Time: %llu\n", ktime_get_boot_ns());
+	return len;
+}
+KERNEL_ATTR_RO(show_stat);
+
+static void update_table(u64 time_now)
+{
+	struct freq_entry *freq_entry;
+
+	pr_debug("Last known bw %d freq %d\n", curr_bw, curr_freq);
+	hash_for_each_possible(freq_hash_table, freq_entry, hash, curr_freq) {
+		if (curr_freq == freq_entry->freq) {
+			freq_entry->buckets[find_bucket(curr_bw)]
+					+= get_time_diff(time_now);
+			break;
+		}
+	}
+}
+
+static bool freq_exists(int freq)
+{
+	int i;
+
+	for (i = 0; i < num_freqs; i++) {
+		if (freq == freq_buckets[i])
+			return true;
+	}
+	return false;
+}
+
+static int calculate_total_bw(int bw, int index)
+{
+	int i;
+	int total_bw = 0;
+
+	pr_debug("memory_state_time New bw %d for id %d\n", bw, index);
+	bandwidths[index] = bw;
+	for (i = 0; i < registered_bw_sources; i++)
+		total_bw += bandwidths[i];
+	return total_bw;
+}
+
+static void freq_update_do_work(struct work_struct *work)
+{
+	struct queue_container *freq_state_update
+			= container_of(work, struct queue_container,
+			update_state);
+	if (freq_state_update) {
+		mutex_lock(&mem_lock);
+		update_table(freq_state_update->time_now);
+		curr_freq = freq_state_update->value;
+		mutex_unlock(&mem_lock);
+		kfree(freq_state_update);
+	}
+}
+
+static void bw_update_do_work(struct work_struct *work)
+{
+	struct queue_container *bw_state_update
+			= container_of(work, struct queue_container,
+			update_state);
+	if (bw_state_update) {
+		mutex_lock(&mem_lock);
+		update_table(bw_state_update->time_now);
+		curr_bw = calculate_total_bw(bw_state_update->value,
+				bw_state_update->id);
+		mutex_unlock(&mem_lock);
+		kfree(bw_state_update);
+	}
+}
+
+static void memory_state_freq_update(struct memory_state_update_block *ub,
+		int value)
+{
+	if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+		if (freq_exists(value) && init_success) {
+			struct queue_container *freq_container
+				= kmalloc(sizeof(struct queue_container),
+				GFP_KERNEL);
+			if (!freq_container)
+				return;
+			INIT_WORK(&freq_container->update_state,
+					freq_update_do_work);
+			freq_container->time_now = ktime_get_boot_ns();
+			freq_container->value = value;
+			pr_debug("Scheduling freq update in work queue\n");
+			queue_work(memory_wq, &freq_container->update_state);
+		} else {
+			pr_debug("Freq does not exist.\n");
+		}
+	}
+}
+
+static void memory_state_bw_update(struct memory_state_update_block *ub,
+		int value)
+{
+	if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+		if (init_success) {
+			struct queue_container *bw_container
+				= kmalloc(sizeof(struct queue_container),
+				GFP_KERNEL);
+			if (!bw_container)
+				return;
+			INIT_WORK(&bw_container->update_state,
+					bw_update_do_work);
+			bw_container->time_now = ktime_get_boot_ns();
+			bw_container->value = value;
+			bw_container->id = ub->id;
+			pr_debug("Scheduling bandwidth update in work queue\n");
+			queue_work(memory_wq, &bw_container->update_state);
+		}
+	}
+}
+
+struct memory_state_update_block *memory_state_register_frequency_source(void)
+{
+	struct memory_state_update_block *block;
+
+	if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+		pr_debug("Allocating frequency source\n");
+		block = kmalloc(sizeof(struct memory_state_update_block),
+					GFP_KERNEL);
+		if (!block)
+			return NULL;
+		block->update_call = memory_state_freq_update;
+		return block;
+	}
+	pr_err("Config option disabled.\n");
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(memory_state_register_frequency_source);
+
+struct memory_state_update_block *memory_state_register_bandwidth_source(void)
+{
+	struct memory_state_update_block *block;
+
+	if (IS_ENABLED(CONFIG_MEMORY_STATE_TIME)) {
+		pr_debug("Allocating bandwidth source %d\n",
+				registered_bw_sources);
+		block = kmalloc(sizeof(struct memory_state_update_block),
+					GFP_KERNEL);
+		if (!block)
+			return NULL;
+		block->update_call = memory_state_bw_update;
+		if (registered_bw_sources < num_sources) {
+			block->id = registered_bw_sources++;
+		} else {
+			pr_err("Unable to allocate source; max number reached\n");
+			kfree(block);
+			return NULL;
+		}
+		return block;
+	}
+	pr_err("Config option disabled.\n");
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(memory_state_register_bandwidth_source);
+
+/* Buckets are designated by their maximum.
+ * Returns the buckets decided by the capability of the device.
+ */
+static int get_bw_buckets(struct device *dev)
+{
+	int ret, lenb;
+	struct device_node *node = dev->of_node;
+
+	of_property_read_u32(node, NUM_SOURCES, &num_sources);
+	if (of_find_property(node, BW_TBL, &lenb)) {
+		bandwidths = devm_kzalloc(dev,
+				sizeof(*bandwidths) * num_sources, GFP_KERNEL);
+		if (!bandwidths)
+			return -ENOMEM;
+		lenb /= sizeof(*bw_buckets);
+		bw_buckets = devm_kzalloc(dev, lenb * sizeof(*bw_buckets),
+				GFP_KERNEL);
+		if (!bw_buckets) {
+			devm_kfree(dev, bandwidths);
+			return -ENOMEM;
+		}
+		ret = of_property_read_u32_array(node, BW_TBL, bw_buckets,
+				lenb);
+		if (ret < 0) {
+			devm_kfree(dev, bandwidths);
+			devm_kfree(dev, bw_buckets);
+			pr_err("Unable to read bandwidth table from device tree.\n");
+			return ret;
+		}
+	}
+	curr_bw = 0;
+	num_buckets = lenb;
+	return 0;
+}
+
+/* Adds struct freq_entry nodes to the hashtable for each compatible frequency.
+ * Returns the supported number of frequencies.
+ */
+static int freq_buckets_init(struct device *dev)
+{
+	struct freq_entry *freq_entry;
+	int i;
+	int ret, lenf;
+	struct device_node *node = dev->of_node;
+
+	if (of_find_property(node, FREQ_TBL, &lenf)) {
+		lenf /= sizeof(*freq_buckets);
+		freq_buckets = devm_kzalloc(dev, lenf * sizeof(*freq_buckets),
+				GFP_KERNEL);
+		if (!freq_buckets)
+			return -ENOMEM;
+		pr_debug("freqs found len %d\n", lenf);
+		ret = of_property_read_u32_array(node, FREQ_TBL, freq_buckets,
+				lenf);
+		if (ret < 0) {
+			devm_kfree(dev, freq_buckets);
+			pr_err("Unable to read frequency table from device tree.\n");
+			return ret;
+		}
+		pr_debug("ret freq %d\n", ret);
+	}
+	num_freqs = lenf;
+	curr_freq = freq_buckets[LOWEST_FREQ];
+
+	for (i = 0; i < num_freqs; i++) {
+		freq_entry = devm_kzalloc(dev, sizeof(struct freq_entry),
+				GFP_KERNEL);
+		if (!freq_entry)
+			return -ENOMEM;
+		freq_entry->buckets = devm_kzalloc(dev, sizeof(u64)*num_buckets,
+				GFP_KERNEL);
+		if (!freq_entry->buckets) {
+			devm_kfree(dev, freq_entry);
+			return -ENOMEM;
+		}
+		pr_debug("memory_state_time Adding freq to ht %d\n",
+				freq_buckets[i]);
+		freq_entry->freq = freq_buckets[i];
+		hash_add(freq_hash_table, &freq_entry->hash, freq_buckets[i]);
+	}
+	return 0;
+}
+
+struct kobject *memory_kobj;
+EXPORT_SYMBOL_GPL(memory_kobj);
+
+static struct attribute *memory_attrs[] = {
+	&show_stat_attr.attr,
+	NULL
+};
+
+static struct attribute_group memory_attr_group = {
+	.attrs = memory_attrs,
+};
+
+static int memory_state_time_probe(struct platform_device *pdev)
+{
+	int error;
+
+	error = get_bw_buckets(&pdev->dev);
+	if (error)
+		return error;
+	error = freq_buckets_init(&pdev->dev);
+	if (error)
+		return error;
+	last_update = ktime_get_boot_ns();
+	init_success = true;
+
+	pr_debug("memory_state_time initialized with num_freqs %d\n",
+			num_freqs);
+	return 0;
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "memory-state-time" },
+	{}
+};
+
+static struct platform_driver memory_state_time_driver = {
+	.probe = memory_state_time_probe,
+	.driver = {
+		.name = "memory-state-time",
+		.of_match_table = match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init memory_state_time_init(void)
+{
+	int error;
+
+	hash_init(freq_hash_table);
+	memory_wq = create_singlethread_workqueue("memory_wq");
+	if (!memory_wq) {
+		pr_err("Unable to create workqueue.\n");
+		return -EINVAL;
+	}
+	/*
+	 * Create sys/kernel directory for memory_state_time.
+	 */
+	memory_kobj = kobject_create_and_add(TAG, kernel_kobj);
+	if (!memory_kobj) {
+		pr_err("Unable to allocate memory_kobj for sysfs directory.\n");
+		error = -ENOMEM;
+		goto wq;
+	}
+	error = sysfs_create_group(memory_kobj, &memory_attr_group);
+	if (error) {
+		pr_err("Unable to create sysfs folder.\n");
+		goto kobj;
+	}
+
+	error = platform_driver_register(&memory_state_time_driver);
+	if (error) {
+		pr_err("Unable to register memory_state_time platform driver.\n");
+		goto group;
+	}
+	return 0;
+
+group:	sysfs_remove_group(memory_kobj, &memory_attr_group);
+kobj:	kobject_put(memory_kobj);
+wq:	destroy_workqueue(memory_wq);
+	return error;
+}
+module_init(memory_state_time_init);
diff --git a/drivers/misc/uid_cputime.c b/drivers/misc/uid_cputime.c
new file mode 100644
index 0000000..c1ad524
--- /dev/null
+++ b/drivers/misc/uid_cputime.c
@@ -0,0 +1,240 @@
+/* drivers/misc/uid_cputime.c
+ *
+ * Copyright (C) 2014 - 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#define UID_HASH_BITS	10
+DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
+
+static DEFINE_MUTEX(uid_lock);
+static struct proc_dir_entry *parent;
+
+struct uid_entry {
+	uid_t uid;
+	cputime_t utime;
+	cputime_t stime;
+	cputime_t active_utime;
+	cputime_t active_stime;
+	struct hlist_node hash;
+};
+
+static struct uid_entry *find_uid_entry(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+	hash_for_each_possible(hash_table, uid_entry, hash, uid) {
+		if (uid_entry->uid == uid)
+			return uid_entry;
+	}
+	return NULL;
+}
+
+static struct uid_entry *find_or_register_uid(uid_t uid)
+{
+	struct uid_entry *uid_entry;
+
+	uid_entry = find_uid_entry(uid);
+	if (uid_entry)
+		return uid_entry;
+
+	uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
+	if (!uid_entry)
+		return NULL;
+
+	uid_entry->uid = uid;
+
+	hash_add(hash_table, &uid_entry->hash, uid);
+
+	return uid_entry;
+}
+
+static int uid_stat_show(struct seq_file *m, void *v)
+{
+	struct uid_entry *uid_entry;
+	struct task_struct *task, *temp;
+	cputime_t utime;
+	cputime_t stime;
+	unsigned long bkt;
+
+	mutex_lock(&uid_lock);
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		uid_entry->active_stime = 0;
+		uid_entry->active_utime = 0;
+	}
+
+	read_lock(&tasklist_lock);
+	do_each_thread(temp, task) {
+		uid_entry = find_or_register_uid(from_kuid_munged(
+			current_user_ns(), task_uid(task)));
+		if (!uid_entry) {
+			read_unlock(&tasklist_lock);
+			mutex_unlock(&uid_lock);
+			pr_err("%s: failed to find the uid_entry for uid %d\n",
+				__func__, from_kuid_munged(current_user_ns(),
+				task_uid(task)));
+			return -ENOMEM;
+		}
+		task_cputime_adjusted(task, &utime, &stime);
+		uid_entry->active_utime += utime;
+		uid_entry->active_stime += stime;
+	} while_each_thread(temp, task);
+	read_unlock(&tasklist_lock);
+
+	hash_for_each(hash_table, bkt, uid_entry, hash) {
+		cputime_t total_utime = uid_entry->utime +
+							uid_entry->active_utime;
+		cputime_t total_stime = uid_entry->stime +
+							uid_entry->active_stime;
+		seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
+			(unsigned long long)jiffies_to_msecs(
+				cputime_to_jiffies(total_utime)) * USEC_PER_MSEC,
+			(unsigned long long)jiffies_to_msecs(
+				cputime_to_jiffies(total_stime)) * USEC_PER_MSEC);
+	}
+
+	mutex_unlock(&uid_lock);
+	return 0;
+}
+
+static int uid_stat_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, uid_stat_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_stat_fops = {
+	.open		= uid_stat_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+};
+
+static int uid_remove_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_remove_write(struct file *file,
+			const char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct uid_entry *uid_entry;
+	struct hlist_node *tmp;
+	char uids[128];
+	char *start_uid, *end_uid = NULL;
+	long int uid_start = 0, uid_end = 0;
+
+	if (count >= sizeof(uids))
+		count = sizeof(uids) - 1;
+
+	if (copy_from_user(uids, buffer, count))
+		return -EFAULT;
+
+	uids[count] = '\0';
+	end_uid = uids;
+	start_uid = strsep(&end_uid, "-");
+
+	if (!start_uid || !end_uid)
+		return -EINVAL;
+
+	if (kstrtol(start_uid, 10, &uid_start) != 0 ||
+		kstrtol(end_uid, 10, &uid_end) != 0) {
+		return -EINVAL;
+	}
+	mutex_lock(&uid_lock);
+
+	for (; uid_start <= uid_end; uid_start++) {
+		hash_for_each_possible_safe(hash_table, uid_entry, tmp,
+							hash, (uid_t)uid_start) {
+			if (uid_start == uid_entry->uid) {
+				hash_del(&uid_entry->hash);
+				kfree(uid_entry);
+			}
+		}
+	}
+
+	mutex_unlock(&uid_lock);
+	return count;
+}
+
+static const struct file_operations uid_remove_fops = {
+	.open		= uid_remove_open,
+	.release	= single_release,
+	.write		= uid_remove_write,
+};
+
+static int process_notifier(struct notifier_block *self,
+			unsigned long cmd, void *v)
+{
+	struct task_struct *task = v;
+	struct uid_entry *uid_entry;
+	cputime_t utime, stime;
+	uid_t uid;
+
+	if (!task)
+		return NOTIFY_OK;
+
+	mutex_lock(&uid_lock);
+	uid = from_kuid_munged(current_user_ns(), task_uid(task));
+	uid_entry = find_or_register_uid(uid);
+	if (!uid_entry) {
+		pr_err("%s: failed to find uid %d\n", __func__, uid);
+		goto exit;
+	}
+
+	task_cputime_adjusted(task, &utime, &stime);
+	uid_entry->utime += utime;
+	uid_entry->stime += stime;
+
+exit:
+	mutex_unlock(&uid_lock);
+	return NOTIFY_OK;
+}
+
+static struct notifier_block process_notifier_block = {
+	.notifier_call	= process_notifier,
+};
+
+static int __init proc_uid_cputime_init(void)
+{
+	hash_init(hash_table);
+
+	parent = proc_mkdir("uid_cputime", NULL);
+	if (!parent) {
+		pr_err("%s: failed to create proc entry\n", __func__);
+		return -ENOMEM;
+	}
+
+	proc_create_data("remove_uid_range", S_IWUGO, parent, &uid_remove_fops,
+					NULL);
+
+	proc_create_data("show_uid_stat", S_IRUGO, parent, &uid_stat_fops,
+					NULL);
+
+	profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+
+	return 0;
+}
+
+early_initcall(proc_uid_cputime_init);
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 5562308..6142ec1 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -68,3 +68,15 @@
 
 	  This driver is only of interest to those developing or
 	  testing a host driver. Most people should say N here.
+
+config MMC_SIMULATE_MAX_SPEED
+	bool "Turn on maximum speed control per block device"
+	depends on MMC_BLOCK
+	help
+	  Say Y here to enable MMC device speed limiting. Used to test and
+	  simulate the behavior of the system when confronted with a slow MMC.
+
+	  Enables max_read_speed, max_write_speed and cache_size attributes to
+	  control the write or read maximum KB/second speed behaviors.
+
+	  If unsure, say N here.
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 709a872..817fcf8 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -287,6 +287,250 @@
 	return ret;
 }
 
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+
+static int max_read_speed, max_write_speed, cache_size = 4;
+
+module_param(max_read_speed, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(max_read_speed, "maximum KB/s read speed 0=off");
+module_param(max_write_speed, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(max_write_speed, "maximum KB/s write speed 0=off");
+module_param(cache_size, int, S_IRUSR | S_IRGRP);
+MODULE_PARM_DESC(cache_size, "MB high speed memory or SLC cache");
+
+/*
+ * helper macros and expectations:
+ *  size    - unsigned long number of bytes
+ *  jiffies - unsigned long HZ timestamp difference
+ *  speed   - unsigned KB/s transfer rate
+ */
+#define size_and_speed_to_jiffies(size, speed) \
+		((size) * HZ / (speed) / 1024UL)
+#define jiffies_and_speed_to_size(jiffies, speed) \
+		(((speed) * (jiffies) * 1024UL) / HZ)
+#define jiffies_and_size_to_speed(jiffies, size) \
+		((size) * HZ / (jiffies) / 1024UL)
+
+/* Limits to report warning */
+/* jiffies_and_size_to_speed(10*HZ, queue_max_hw_sectors(q) * 512UL) ~ 25 */
+#define MIN_SPEED(q) 250 /* 10 times faster than a floppy disk */
+#define MAX_SPEED(q) jiffies_and_size_to_speed(1, queue_max_sectors(q) * 512UL)
+
+#define speed_valid(speed) ((speed) > 0)
+
+static const char off[] = "off\n";
+
+static int max_speed_show(int speed, char *buf)
+{
+	if (speed)
+		return scnprintf(buf, PAGE_SIZE, "%uKB/s\n", speed);
+	else
+		return scnprintf(buf, PAGE_SIZE, off);
+}
+
+static int max_speed_store(const char *buf, struct request_queue *q)
+{
+	unsigned int limit, set = 0;
+
+	if (!strncasecmp(off, buf, sizeof(off) - 2))
+		return set;
+	if (kstrtouint(buf, 0, &set) || (set > INT_MAX))
+		return -EINVAL;
+	if (set == 0)
+		return set;
+	limit = MAX_SPEED(q);
+	if (set > limit)
+		pr_warn("max speed %u ineffective above %u\n", set, limit);
+	limit = MIN_SPEED(q);
+	if (set < limit)
+		pr_warn("max speed %u painful below %u\n", set, limit);
+	return set;
+}
+
+static ssize_t max_write_speed_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int ret = max_speed_show(atomic_read(&md->queue.max_write_speed), buf);
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t max_write_speed_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int set = max_speed_store(buf, md->queue.queue);
+
+	if (set < 0) {
+		mmc_blk_put(md);
+		return set;
+	}
+
+	atomic_set(&md->queue.max_write_speed, set);
+	mmc_blk_put(md);
+	return count;
+}
+
+static const DEVICE_ATTR(max_write_speed, S_IRUGO | S_IWUSR,
+	max_write_speed_show, max_write_speed_store);
+
+static ssize_t max_read_speed_show(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int ret = max_speed_show(atomic_read(&md->queue.max_read_speed), buf);
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t max_read_speed_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	int set = max_speed_store(buf, md->queue.queue);
+
+	if (set < 0) {
+		mmc_blk_put(md);
+		return set;
+	}
+
+	atomic_set(&md->queue.max_read_speed, set);
+	mmc_blk_put(md);
+	return count;
+}
+
+static const DEVICE_ATTR(max_read_speed, S_IRUGO | S_IWUSR,
+	max_read_speed_show, max_read_speed_store);
+
+static ssize_t cache_size_show(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+	struct mmc_queue *mq = &md->queue;
+	int cache_size = atomic_read(&mq->cache_size);
+	int ret;
+
+	if (!cache_size)
+		ret = scnprintf(buf, PAGE_SIZE, off);
+	else {
+		int speed = atomic_read(&mq->max_write_speed);
+
+		if (!speed_valid(speed))
+			ret = scnprintf(buf, PAGE_SIZE, "%uMB\n", cache_size);
+		else { /* We accept race between cache_jiffies and cache_used */
+			unsigned long size = jiffies_and_speed_to_size(
+				jiffies - mq->cache_jiffies, speed);
+			long used = atomic_long_read(&mq->cache_used);
+
+			if (size >= used)
+				size = 0;
+			else
+				size = (used - size) * 100 / cache_size
+					/ 1024UL / 1024UL;
+
+			ret = scnprintf(buf, PAGE_SIZE, "%uMB %lu%% used\n",
+				cache_size, size);
+		}
+	}
+
+	mmc_blk_put(md);
+	return ret;
+}
+
+static ssize_t cache_size_store(struct device *dev,
+				  struct device_attribute *attr,
+				  const char *buf, size_t count)
+{
+	struct mmc_blk_data *md;
+	unsigned int set = 0;
+
+	if (strncasecmp(off, buf, sizeof(off) - 2)
+	 && (kstrtouint(buf, 0, &set) || (set > INT_MAX)))
+		return -EINVAL;
+
+	md = mmc_blk_get(dev_to_disk(dev));
+	atomic_set(&md->queue.cache_size, set);
+	mmc_blk_put(md);
+	return count;
+}
+
+static const DEVICE_ATTR(cache_size, S_IRUGO | S_IWUSR,
+	cache_size_show, cache_size_store);
+
+/* correct for write-back */
+static long mmc_blk_cache_used(struct mmc_queue *mq, unsigned long waitfor)
+{
+	long used = 0;
+	int speed = atomic_read(&mq->max_write_speed);
+
+	if (speed_valid(speed)) {
+		unsigned long size = jiffies_and_speed_to_size(
+					waitfor - mq->cache_jiffies, speed);
+		used = atomic_long_read(&mq->cache_used);
+
+		if (size >= used)
+			used = 0;
+		else
+			used -= size;
+	}
+
+	atomic_long_set(&mq->cache_used, used);
+	mq->cache_jiffies = waitfor;
+
+	return used;
+}
+
+static void mmc_blk_simulate_delay(
+	struct mmc_queue *mq,
+	struct request *req,
+	unsigned long waitfor)
+{
+	int max_speed;
+
+	if (!req)
+		return;
+
+	max_speed = (rq_data_dir(req) == READ)
+		? atomic_read(&mq->max_read_speed)
+		: atomic_read(&mq->max_write_speed);
+	if (speed_valid(max_speed)) {
+		unsigned long bytes = blk_rq_bytes(req);
+
+		if (rq_data_dir(req) != READ) {
+			int cache_size = atomic_read(&mq->cache_size);
+
+			if (cache_size) {
+				unsigned long size = cache_size * 1024L * 1024L;
+				long used = mmc_blk_cache_used(mq, waitfor);
+
+				used += bytes;
+				atomic_long_set(&mq->cache_used, used);
+				bytes = 0;
+				if (used > size)
+					bytes = used - size;
+			}
+		}
+		waitfor += size_and_speed_to_jiffies(bytes, max_speed);
+		if (time_is_after_jiffies(waitfor)) {
+			long msecs = jiffies_to_msecs(waitfor - jiffies);
+
+			if (likely(msecs > 0))
+				msleep(msecs);
+		}
+	}
+}
+
+#else
+
+#define mmc_blk_simulate_delay(mq, req, waitfor)
+
+#endif
+
 static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
 {
 	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -1284,6 +1528,23 @@
 	if (ret)
 		ret = -EIO;
 
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	else if (atomic_read(&mq->cache_size)) {
+		long used = mmc_blk_cache_used(mq, jiffies);
+
+		if (used) {
+			int speed = atomic_read(&mq->max_write_speed);
+
+			if (speed_valid(speed)) {
+				unsigned long msecs = jiffies_to_msecs(
+					size_and_speed_to_jiffies(
+						used, speed));
+				if (msecs)
+					msleep(msecs);
+			}
+		}
+	}
+#endif
 	blk_end_request_all(req, ret);
 
 	return ret ? 0 : 1;
@@ -1965,6 +2226,9 @@
 	struct mmc_async_req *areq;
 	const u8 packed_nr = 2;
 	u8 reqs = 0;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	unsigned long waitfor = jiffies;
+#endif
 
 	if (!rqc && !mq->mqrq_prev->req)
 		return 0;
@@ -2015,6 +2279,8 @@
 			 */
 			mmc_blk_reset_success(md, type);
 
+			mmc_blk_simulate_delay(mq, rqc, waitfor);
+
 			if (mmc_packed_cmd(mq_rq->cmd_type)) {
 				ret = mmc_blk_end_packed_req(mq_rq);
 				break;
@@ -2437,6 +2703,14 @@
 					card->ext_csd.boot_ro_lockable)
 				device_remove_file(disk_to_dev(md->disk),
 					&md->power_ro_lock);
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+			device_remove_file(disk_to_dev(md->disk),
+						&dev_attr_max_write_speed);
+			device_remove_file(disk_to_dev(md->disk),
+						&dev_attr_max_read_speed);
+			device_remove_file(disk_to_dev(md->disk),
+						&dev_attr_cache_size);
+#endif
 
 			del_gendisk(md->disk);
 		}
@@ -2471,6 +2745,24 @@
 	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
 	if (ret)
 		goto force_ro_fail;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	atomic_set(&md->queue.max_write_speed, max_write_speed);
+	ret = device_create_file(disk_to_dev(md->disk),
+			&dev_attr_max_write_speed);
+	if (ret)
+		goto max_write_speed_fail;
+	atomic_set(&md->queue.max_read_speed, max_read_speed);
+	ret = device_create_file(disk_to_dev(md->disk),
+			&dev_attr_max_read_speed);
+	if (ret)
+		goto max_read_speed_fail;
+	atomic_set(&md->queue.cache_size, cache_size);
+	atomic_long_set(&md->queue.cache_used, 0);
+	md->queue.cache_jiffies = jiffies;
+	ret = device_create_file(disk_to_dev(md->disk), &dev_attr_cache_size);
+	if (ret)
+		goto cache_size_fail;
+#endif
 
 	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
 	     card->ext_csd.boot_ro_lockable) {
@@ -2495,6 +2787,14 @@
 	return ret;
 
 power_ro_lock_fail:
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	device_remove_file(disk_to_dev(md->disk), &dev_attr_cache_size);
+cache_size_fail:
+	device_remove_file(disk_to_dev(md->disk), &dev_attr_max_read_speed);
+max_read_speed_fail:
+	device_remove_file(disk_to_dev(md->disk), &dev_attr_max_write_speed);
+max_write_speed_fail:
+#endif
 	device_remove_file(disk_to_dev(md->disk), &md->force_ro);
 force_ro_fail:
 	del_gendisk(md->disk);
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 8037f73..1810f76 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -19,6 +19,7 @@
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/host.h>
+#include <linux/sched/rt.h>
 
 #include "queue.h"
 #include "block.h"
@@ -53,6 +54,11 @@
 {
 	struct mmc_queue *mq = d;
 	struct request_queue *q = mq->queue;
+	struct sched_param scheduler_params = {0};
+
+	scheduler_params.sched_priority = 1;
+
+	sched_setscheduler(current, SCHED_FIFO, &scheduler_params);
 
 	current->flags |= PF_MEMALLOC;
 
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 342f1e3..fe58d31 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -62,6 +62,14 @@
 	struct mmc_queue_req	mqrq[2];
 	struct mmc_queue_req	*mqrq_cur;
 	struct mmc_queue_req	*mqrq_prev;
+#ifdef CONFIG_MMC_SIMULATE_MAX_SPEED
+	atomic_t max_write_speed;
+	atomic_t max_read_speed;
+	atomic_t cache_size;
+	/* i/o tracking */
+	atomic_long_t cache_used;
+	unsigned long cache_jiffies;
+#endif
 };
 
 extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 250f223..daad32f 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -22,3 +22,18 @@
 
 	  This driver can also be built as a module. If so, the module
 	  will be called pwrseq_simple.
+
+config MMC_EMBEDDED_SDIO
+	boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+	help
+	  If you say Y here, support will be added for embedded SDIO
+	  devices which do not contain the necessary enumeration
+	  support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+	bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+	help
+	  If you say Y here, the MMC layer will be extra paranoid
+	  about re-trying SD init requests. This can be a useful
+	  work-around for buggy controllers and hardware. Enable
+	  if you are experiencing issues with SD detection.
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 2553d90..40ddc3e 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -201,6 +201,19 @@
 			pr_debug("%s:     %d bytes transferred: %d\n",
 				mmc_hostname(host),
 				mrq->data->bytes_xfered, mrq->data->error);
+#ifdef CONFIG_BLOCK
+			if (mrq->lat_hist_enabled) {
+				ktime_t completion;
+				u_int64_t delta_us;
+
+				completion = ktime_get();
+				delta_us = ktime_us_delta(completion,
+							  mrq->io_start);
+				blk_update_latency_hist(&host->io_lat_s,
+					(mrq->data->flags & MMC_DATA_READ),
+					delta_us);
+			}
+#endif
 		}
 
 		if (mrq->stop) {
@@ -699,8 +712,16 @@
 		}
 	}
 
-	if (!err && areq)
+	if (!err && areq) {
+#ifdef CONFIG_BLOCK
+		if (host->latency_hist_enabled) {
+			areq->mrq->io_start = ktime_get();
+			areq->mrq->lat_hist_enabled = 1;
+		} else
+			areq->mrq->lat_hist_enabled = 0;
+#endif
 		start_err = __mmc_start_data_req(host, areq->mrq);
+	}
 
 	if (host->areq)
 		mmc_post_req(host, host->areq->mrq, 0);
@@ -2051,7 +2072,7 @@
 }
 
 static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card,
-				          unsigned int arg, unsigned int qty)
+					  unsigned int arg, unsigned int qty)
 {
 	unsigned int erase_timeout;
 
@@ -3026,6 +3047,22 @@
 	init_waitqueue_head(&host->context_info.wait);
 }
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+				struct sdio_cis *cis,
+				struct sdio_cccr *cccr,
+				struct sdio_embedded_func *funcs,
+				int num_funcs)
+{
+	host->embedded_sdio_data.cis = cis;
+	host->embedded_sdio_data.cccr = cccr;
+	host->embedded_sdio_data.funcs = funcs;
+	host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
 static int __init mmc_init(void)
 {
 	int ret;
@@ -3058,6 +3095,56 @@
 	mmc_unregister_bus();
 }
 
+#ifdef CONFIG_BLOCK
+static ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+
+	return blk_latency_hist_show(&host->io_lat_s, buf);
+}
+
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct mmc_host *host = cls_dev_to_mmc_host(dev);
+	long value;
+
+	if (kstrtol(buf, 0, &value))
+		return -EINVAL;
+	if (value == BLK_IO_LAT_HIST_ZERO)
+		blk_zero_latency_hist(&host->io_lat_s);
+	else if (value == BLK_IO_LAT_HIST_ENABLE ||
+		 value == BLK_IO_LAT_HIST_DISABLE)
+		host->latency_hist_enabled = value;
+	return count;
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+		   latency_hist_show, latency_hist_store);
+
+void
+mmc_latency_hist_sysfs_init(struct mmc_host *host)
+{
+	if (device_create_file(&host->class_dev, &dev_attr_latency_hist))
+		dev_err(&host->class_dev,
+			"Failed to create latency_hist sysfs entry\n");
+}
+
+void
+mmc_latency_hist_sysfs_exit(struct mmc_host *host)
+{
+	device_remove_file(&host->class_dev, &dev_attr_latency_hist);
+}
+#endif
+
 subsys_initcall(mmc_init);
 module_exit(mmc_exit);
 
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 98f25ff..60a642a 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -31,8 +31,6 @@
 #include "slot-gpio.h"
 #include "pwrseq.h"
 
-#define cls_dev_to_mmc_host(d)	container_of(d, struct mmc_host, class_dev)
-
 static DEFINE_IDA(mmc_host_ida);
 static DEFINE_SPINLOCK(mmc_host_lock);
 
@@ -430,8 +428,13 @@
 	mmc_add_host_debugfs(host);
 #endif
 
+#ifdef CONFIG_BLOCK
+	mmc_latency_hist_sysfs_init(host);
+#endif
+
 	mmc_start_host(host);
-	mmc_register_pm_notifier(host);
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+		mmc_register_pm_notifier(host);
 
 	return 0;
 }
@@ -448,13 +451,19 @@
  */
 void mmc_remove_host(struct mmc_host *host)
 {
-	mmc_unregister_pm_notifier(host);
+	if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+		mmc_unregister_pm_notifier(host);
+
 	mmc_stop_host(host);
 
 #ifdef CONFIG_DEBUG_FS
 	mmc_remove_host_debugfs(host);
 #endif
 
+#ifdef CONFIG_BLOCK
+	mmc_latency_hist_sysfs_exit(host);
+#endif
+
 	device_del(&host->class_dev);
 
 	led_trigger_unregister_simple(host->led);
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 992bf53..bf38533 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -12,6 +12,8 @@
 #define _MMC_CORE_HOST_H
 #include <linux/mmc/host.h>
 
+#define cls_dev_to_mmc_host(d)	container_of(d, struct mmc_host, class_dev)
+
 int mmc_register_host_class(void);
 void mmc_unregister_host_class(void);
 
@@ -21,5 +23,8 @@
 void mmc_retune_release(struct mmc_host *host);
 int mmc_retune(struct mmc_host *host);
 
+void mmc_latency_hist_sysfs_init(struct mmc_host *host);
+void mmc_latency_hist_sysfs_exit(struct mmc_host *host);
+
 #endif
 
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 73c762a..fa7ecd1 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -839,6 +839,9 @@
 	bool reinit)
 {
 	int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	if (!reinit) {
 		/*
@@ -865,7 +868,26 @@
 		/*
 		 * Fetch switch information from card.
 		 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+		for (retries = 1; retries <= 3; retries++) {
+			err = mmc_read_switch(card);
+			if (!err) {
+				if (retries > 1) {
+					printk(KERN_WARNING
+					       "%s: recovered\n",
+					       mmc_hostname(host));
+				}
+				break;
+			} else {
+				printk(KERN_WARNING
+				       "%s: read switch failed (attempt %d)\n",
+				       mmc_hostname(host), retries);
+			}
+		}
+#else
 		err = mmc_read_switch(card);
+#endif
+
 		if (err)
 			return err;
 	}
@@ -1063,7 +1085,10 @@
  */
 static void mmc_sd_detect(struct mmc_host *host)
 {
-	int err;
+	int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries = 5;
+#endif
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
@@ -1073,7 +1098,23 @@
 	/*
 	 * Just check if our card has been removed.
 	 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	while(retries) {
+		err = mmc_send_status(host->card, NULL);
+		if (err) {
+			retries--;
+			udelay(5);
+			continue;
+		}
+		break;
+	}
+	if (!retries) {
+		printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+		       __func__, mmc_hostname(host), err);
+	}
+#else
 	err = _mmc_detect_card_removed(host);
+#endif
 
 	mmc_put_card(host->card);
 
@@ -1135,6 +1176,9 @@
 static int _mmc_sd_resume(struct mmc_host *host)
 {
 	int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	BUG_ON(!host);
 	BUG_ON(!host->card);
@@ -1145,7 +1189,23 @@
 		goto out;
 
 	mmc_power_up(host, host->card->ocr);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	retries = 5;
+	while (retries) {
+		err = mmc_sd_init_card(host, host->card->ocr, host->card);
+
+		if (err) {
+			printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+			       mmc_hostname(host), err, retries);
+			mdelay(5);
+			retries--;
+			continue;
+		}
+		break;
+	}
+#else
 	err = mmc_sd_init_card(host, host->card->ocr, host->card);
+#endif
 	mmc_card_clr_suspended(host->card);
 
 out:
@@ -1220,6 +1280,9 @@
 {
 	int err;
 	u32 ocr, rocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	int retries;
+#endif
 
 	BUG_ON(!host);
 	WARN_ON(!host->claimed);
@@ -1256,9 +1319,27 @@
 	/*
 	 * Detect and init the card.
 	 */
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+	retries = 5;
+	while (retries) {
+		err = mmc_sd_init_card(host, rocr, NULL);
+		if (err) {
+			retries--;
+			continue;
+		}
+		break;
+	}
+
+	if (!retries) {
+		printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+		       mmc_hostname(host), err);
+		goto err;
+	}
+#else
 	err = mmc_sd_init_card(host, rocr, NULL);
 	if (err)
 		goto err;
+#endif
 
 	mmc_release_host(host);
 	err = mmc_add_card(host->card);
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index bd44ba8..b5ec3c8 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -10,6 +10,7 @@
  */
 
 #include <linux/err.h>
+#include <linux/module.h>
 #include <linux/pm_runtime.h>
 
 #include <linux/mmc/host.h>
@@ -21,6 +22,7 @@
 
 #include "core.h"
 #include "bus.h"
+#include "host.h"
 #include "sd.h"
 #include "sdio_bus.h"
 #include "mmc_ops.h"
@@ -28,6 +30,10 @@
 #include "sdio_ops.h"
 #include "sdio_cis.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
 static int sdio_read_fbr(struct sdio_func *func)
 {
 	int ret;
@@ -697,19 +703,35 @@
 		goto finish;
 	}
 
-	/*
-	 * Read the common registers.
-	 */
-	err = sdio_read_cccr(card, ocr);
-	if (err)
-		goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.cccr)
+		memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+	else {
+#endif
+		/*
+		 * Read the common registers.
+		 */
+		err = sdio_read_cccr(card,  ocr);
+		if (err)
+			goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	}
+#endif
 
-	/*
-	 * Read the common CIS tuples.
-	 */
-	err = sdio_read_common_cis(card);
-	if (err)
-		goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.cis)
+		memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+	else {
+#endif
+		/*
+		 * Read the common CIS tuples.
+		 */
+		err = sdio_read_common_cis(card);
+		if (err)
+			goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	}
+#endif
 
 	if (oldcard) {
 		int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -1118,14 +1140,36 @@
 	funcs = (ocr & 0x70000000) >> 28;
 	card->sdio_funcs = 0;
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	if (host->embedded_sdio_data.funcs)
+		card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
 	/*
 	 * Initialize (but don't add) all present functions.
 	 */
 	for (i = 0; i < funcs; i++, card->sdio_funcs++) {
-		err = sdio_init_func(host->card, i + 1);
-		if (err)
-			goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+		if (host->embedded_sdio_data.funcs) {
+			struct sdio_func *tmp;
 
+			tmp = sdio_alloc_func(host->card);
+			if (IS_ERR(tmp))
+				goto remove;
+			tmp->num = (i + 1);
+			card->sdio_func[i] = tmp;
+			tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+			tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+			tmp->vendor = card->cis.vendor;
+			tmp->device = card->cis.device;
+		} else {
+#endif
+			err = sdio_init_func(host->card, i + 1);
+			if (err)
+				goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+		}
+#endif
 		/*
 		 * Enable Runtime PM for this func (if supported)
 		 */
@@ -1173,3 +1217,42 @@
 	return err;
 }
 
+int sdio_reset_comm(struct mmc_card *card)
+{
+	struct mmc_host *host = card->host;
+	u32 ocr;
+	u32 rocr;
+	int err;
+
+	printk("%s():\n", __func__);
+	mmc_claim_host(host);
+
+	mmc_retune_disable(host);
+
+	mmc_go_idle(host);
+
+	mmc_set_clock(host, host->f_min);
+
+	err = mmc_send_io_op_cond(host, 0, &ocr);
+	if (err)
+		goto err;
+
+	rocr = mmc_select_voltage(host, ocr);
+	if (!rocr) {
+		err = -EINVAL;
+		goto err;
+	}
+
+	err = mmc_sdio_init_card(host, rocr, card, 0);
+	if (err)
+		goto err;
+
+	mmc_release_host(host);
+	return 0;
+err:
+	printk("%s: Error resetting SDIO communications (%d)\n",
+	       mmc_hostname(host), err);
+	mmc_release_host(host);
+	return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 86f5b32..1499d53 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -28,6 +28,10 @@
 #include "sdio_cis.h"
 #include "sdio_bus.h"
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
 #define to_sdio_driver(d)	container_of(d, struct sdio_driver, drv)
 
 /* show configuration fields */
@@ -263,7 +267,14 @@
 {
 	struct sdio_func *func = dev_to_sdio_func(dev);
 
-	sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	/*
+	 * If this device is embedded then we never allocated
+	 * cis tables for this func
+	 */
+	if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+		sdio_free_func_cis(func);
 
 	kfree(func->info);
 
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 406e5f0..3734cba 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -390,6 +390,39 @@
 EXPORT_SYMBOL_GPL(sdio_readb);
 
 /**
+ *	sdio_readb_ext - read a single byte from a SDIO function
+ *	@func: SDIO function to access
+ *	@addr: address to read
+ *	@err_ret: optional status value from transfer
+ *	@in: value to add to argument
+ *
+ *	Reads a single byte from the address space of a given SDIO
+ *	function. If there is a problem reading the address, 0xff
+ *	is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+	int *err_ret, unsigned in)
+{
+	int ret;
+	unsigned char val;
+
+	BUG_ON(!func);
+
+	if (err_ret)
+		*err_ret = 0;
+
+	ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+	if (ret) {
+		if (err_ret)
+			*err_ret = ret;
+		return 0xFF;
+	}
+
+	return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
+/**
  *	sdio_writeb - write a single byte to a SDIO function
  *	@func: SDIO function to access
  *	@b: byte to write
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7b7a887..bfa587d 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,3 +1,10 @@
+config MTD_NAND_IDS
+	tristate "Include chip ids for known NAND devices."
+	depends on MTD
+	help
+	  Useful for NAND drivers that do not use the NAND subsystem but
+	  still like to take advantage of the known chip information.
+
 config MTD_NAND_ECC
 	tristate
 
@@ -109,9 +116,6 @@
 config MTD_NAND_OMAP_BCH_BUILD
 	def_tristate MTD_NAND_OMAP2 && MTD_NAND_OMAP_BCH
 
-config MTD_NAND_IDS
-	tristate
-
 config MTD_NAND_RICOH
 	tristate "Ricoh xD card reader"
 	default n
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 8cc7467..9d91f96 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -112,6 +112,7 @@
 source "drivers/net/ethernet/micrel/Kconfig"
 source "drivers/net/ethernet/microchip/Kconfig"
 source "drivers/net/ethernet/moxa/Kconfig"
+source "drivers/net/ethernet/msm/Kconfig"
 source "drivers/net/ethernet/myricom/Kconfig"
 
 config FEALNX
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index a09423d..b31cbc2 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -52,6 +52,7 @@
 obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/
 obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/
 obj-$(CONFIG_NET_VENDOR_MOXART) += moxa/
+obj-$(CONFIG_ARCH_QCOM) += msm/
 obj-$(CONFIG_NET_VENDOR_MYRI) += myricom/
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_NET_VENDOR_NATSEMI) += natsemi/
diff --git a/drivers/net/ethernet/msm/Kconfig b/drivers/net/ethernet/msm/Kconfig
new file mode 100644
index 0000000..586e03e
--- /dev/null
+++ b/drivers/net/ethernet/msm/Kconfig
@@ -0,0 +1,22 @@
+#
+# msm network device configuration
+#
+
+config ECM_IPA
+	tristate "STD ECM LAN Driver support"
+	depends on IPA || IPA3
+	help
+	  Enables LAN between applications processor and a tethered
+	  host using the STD ECM protocol.
+	  This Network interface is aimed to allow data path go through
+	  IPA core while using STD ECM protocol.
+
+config RNDIS_IPA
+	tristate "RNDIS_IPA Network Interface Driver support"
+	depends on IPA || IPA3
+	help
+	  Enables LAN between applications processor and a tethered
+	  host using the RNDIS protocol.
+	  This Network interface is aimed to allow data path go through
+	  IPA core while using RNDIS protocol.
+
diff --git a/drivers/net/ethernet/msm/Makefile b/drivers/net/ethernet/msm/Makefile
new file mode 100644
index 0000000..ec2699a
--- /dev/null
+++ b/drivers/net/ethernet/msm/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the msm networking support.
+#
+
+obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
+obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o
diff --git a/drivers/net/ethernet/msm/ecm_ipa.c b/drivers/net/ethernet/msm/ecm_ipa.c
new file mode 100644
index 0000000..ebc424b
--- /dev/null
+++ b/drivers/net/ethernet/msm/ecm_ipa.c
@@ -0,0 +1,1739 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/atomic.h>
+#include <linux/ecm_ipa.h>
+
+#define DRIVER_NAME "ecm_ipa"
+#define ECM_IPA_IPV4_HDR_NAME "ecm_eth_ipv4"
+#define ECM_IPA_IPV6_HDR_NAME "ecm_eth_ipv6"
+#define INACTIVITY_MSEC_DELAY 100
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+#define DEBUGFS_TEMP_BUF_SIZE 4
+#define TX_TIMEOUT (5 * HZ)
+
+#define ECM_IPA_DEBUG(fmt, args...) \
+	pr_debug("ctx:%s: "\
+			fmt, current->comm, ## args)
+
+#define ECM_IPA_INFO(fmt, args...) \
+	pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
+			fmt, __func__, __LINE__, current->comm, ## args)
+
+#define ECM_IPA_ERROR(fmt, args...) \
+	pr_err(DRIVER_NAME "@%s@%d@ctx:%s: "\
+			fmt, __func__, __LINE__, current->comm, ## args)
+
+#define NULL_CHECK(ptr) \
+	do { \
+		if (!(ptr)) { \
+			ECM_IPA_ERROR("null pointer #ptr\n"); \
+			ret = -EINVAL; \
+		} \
+	} \
+	while (0)
+
+#define ECM_IPA_LOG_ENTRY() ECM_IPA_DEBUG("begin\n")
+#define ECM_IPA_LOG_EXIT() ECM_IPA_DEBUG("end\n")
+
+/**
+ * enum ecm_ipa_state - specify the current driver internal state
+ *  which is guarded by a state machine.
+ *
+ * The driver internal state changes due to its external API usage.
+ * The driver saves its internal state to guard from caller illegal
+ * call sequence.
+ * states:
+ * UNLOADED is the first state which is the default one and is also the state
+ *  after the driver gets unloaded(cleanup).
+ * INITIALIZED is the driver state once it finished registering
+ *  the network device and all internal data struct were initialized
+ * CONNECTED is the driver state once the USB pipes were connected to IPA
+ * UP is the driver state after the interface mode was set to UP but the
+ *  pipes are not connected yet - this state is meta-stable state.
+ * CONNECTED_AND_UP is the driver state when the pipe were connected and
+ *  the interface got UP request from the network stack. this is the driver
+ *   idle operation state which allows it to transmit/receive data.
+ * INVALID is a state which is not allowed.
+ */
+enum ecm_ipa_state {
+	ECM_IPA_UNLOADED = 0,
+	ECM_IPA_INITIALIZED,
+	ECM_IPA_CONNECTED,
+	ECM_IPA_UP,
+	ECM_IPA_CONNECTED_AND_UP,
+	ECM_IPA_INVALID,
+};
+
+/**
+ * enum ecm_ipa_operation - enumerations used to descibe the API operation
+ *
+ * Those enums are used as input for the driver state machine.
+ */
+enum ecm_ipa_operation {
+	ECM_IPA_INITIALIZE,
+	ECM_IPA_CONNECT,
+	ECM_IPA_OPEN,
+	ECM_IPA_STOP,
+	ECM_IPA_DISCONNECT,
+	ECM_IPA_CLEANUP,
+};
+
+#define ECM_IPA_STATE_DEBUG(ecm_ipa_ctx) \
+	(ECM_IPA_DEBUG("Driver state - %s\n",\
+	ecm_ipa_state_string((ecm_ipa_ctx)->state)))
+
+/**
+ * struct ecm_ipa_dev - main driver context parameters
+ * @net: network interface struct implemented by this driver
+ * @directory: debugfs directory for various debuging switches
+ * @tx_enable: flag that enable/disable Tx path to continue to IPA
+ * @rx_enable: flag that enable/disable Rx path to continue to IPA
+ * @rm_enable: flag that enable/disable Resource manager request prior to Tx
+ * @dma_enable: flag that allow on-the-fly DMA mode for IPA
+ * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
+ * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
+ * @usb_to_ipa_hdl: save handle for IPA pipe operations
+ * @ipa_to_usb_hdl: save handle for IPA pipe operations
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ *  to netdev queue start (after stopped due to outstanding_high reached)
+ * @state: current state of ecm_ipa driver
+ * @device_ready_notify: callback supplied by USB core driver
+ * This callback shall be called by the Netdev once the Netdev internal
+ * state is changed to RNDIS_IPA_CONNECTED_AND_UP
+ * @ipa_to_usb_client: consumer client
+ * @usb_to_ipa_client: producer client
+ * @ipa_rm_resource_name_prod: IPA resource manager producer resource
+ * @ipa_rm_resource_name_cons: IPA resource manager consumer resource
+ */
+struct ecm_ipa_dev {
+	struct net_device *net;
+	u32 tx_enable;
+	u32 rx_enable;
+	u32  rm_enable;
+	bool dma_enable;
+	struct dentry *directory;
+	u32 eth_ipv4_hdr_hdl;
+	u32 eth_ipv6_hdr_hdl;
+	u32 usb_to_ipa_hdl;
+	u32 ipa_to_usb_hdl;
+	atomic_t outstanding_pkts;
+	u8 outstanding_high;
+	u8 outstanding_low;
+	enum ecm_ipa_state state;
+	void (*device_ready_notify)(void);
+	enum ipa_client_type ipa_to_usb_client;
+	enum ipa_client_type usb_to_ipa_client;
+	enum ipa_rm_resource_name ipa_rm_resource_name_prod;
+	enum ipa_rm_resource_name ipa_rm_resource_name_cons;
+};
+
+static int ecm_ipa_open(struct net_device *net);
+static void ecm_ipa_packet_receive_notify
+	(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+static void ecm_ipa_tx_complete_notify
+	(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+static void ecm_ipa_tx_timeout(struct net_device *net);
+static int ecm_ipa_stop(struct net_device *net);
+static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_rules_cfg
+	(struct ecm_ipa_dev *ecm_ipa_ctx, const void *dst_mac,
+		const void *src_mac);
+static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_deregister_properties(void);
+static void ecm_ipa_rm_notify
+	(void *user_data, enum ipa_rm_event event, unsigned long data);
+static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net);
+static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx);
+static bool rx_filter(struct sk_buff *skb);
+static bool tx_filter(struct sk_buff *skb);
+static bool rm_enabled(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx);
+static netdev_tx_t ecm_ipa_start_xmit
+	(struct sk_buff *skb, struct net_device *net);
+static int ecm_ipa_debugfs_stall_open
+	(struct inode *inode, struct file *file);
+static ssize_t ecm_ipa_debugfs_stall_write
+	(struct file *file, const char __user *buf, size_t count,
+		loff_t *ppos);
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file);
+static ssize_t ecm_ipa_debugfs_enable_write_dma
+	(struct file *file, const char __user *buf, size_t count,
+		loff_t *ppos);
+static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file);
+static ssize_t ecm_ipa_debugfs_enable_write
+	(struct file *file, const char __user *buf, size_t count,
+		loff_t *ppos);
+static ssize_t ecm_ipa_debugfs_enable_read
+	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ecm_ipa_debugfs_atomic_read
+	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos);
+static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx);
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx);
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl);
+static int ecm_ipa_ep_registers_dma_cfg
+	(u32 usb_to_ipa_hdl, enum ipa_client_type prod_client);
+static int ecm_ipa_set_device_ethernet_addr
+	(u8 *dev_ethaddr, u8 device_ethaddr[]);
+static enum ecm_ipa_state ecm_ipa_next_state
+	(enum ecm_ipa_state current_state, enum ecm_ipa_operation operation);
+static const char *ecm_ipa_state_string(enum ecm_ipa_state state);
+static int ecm_ipa_init_module(void);
+static void ecm_ipa_cleanup_module(void);
+
+static const struct net_device_ops ecm_ipa_netdev_ops = {
+	.ndo_open		= ecm_ipa_open,
+	.ndo_stop		= ecm_ipa_stop,
+	.ndo_start_xmit = ecm_ipa_start_xmit,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_tx_timeout = ecm_ipa_tx_timeout,
+	.ndo_get_stats = ecm_ipa_get_stats,
+};
+
+const struct file_operations ecm_ipa_debugfs_dma_ops = {
+	.open = ecm_ipa_debugfs_dma_open,
+	.read = ecm_ipa_debugfs_enable_read,
+	.write = ecm_ipa_debugfs_enable_write_dma,
+};
+
+const struct file_operations ecm_ipa_debugfs_atomic_ops = {
+	.open = ecm_ipa_debugfs_atomic_open,
+	.read = ecm_ipa_debugfs_atomic_read,
+};
+
+const struct file_operations ecm_ipa_debugfs_stall_ops = {
+	.open = ecm_ipa_debugfs_stall_open,
+	.write = ecm_ipa_debugfs_stall_write,
+};
+
+static void ecm_ipa_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
+/**
+ * ecm_ipa_init() - create network device and initializes internal
+ *  data structures
+ * @params: in/out parameters required for ecm_ipa initialization
+ *
+ * Shall be called prior to pipe connection.
+ * The out parameters (the callbacks) shall be supplied to ipa_connect.
+ * Detailed description:
+ *  - allocate the network device
+ *  - set default values for driver internals
+ *  - create debugfs folder and files
+ *  - create IPA resource manager client
+ *  - add header insertion rules for IPA driver (based on host/device
+ *    Ethernet addresses given in input params)
+ *  - register tx/rx properties to IPA driver (will be later used
+ *    by IPA configuration manager to configure reset of the IPA rules)
+ *  - set the carrier state to "off" (until ecm_ipa_connect is called)
+ *  - register the network device
+ *  - set the out parameters
+ *
+ * Returns negative errno, or zero on success
+ */
+int ecm_ipa_init(struct ecm_ipa_params *params)
+{
+	int result = 0;
+	struct net_device *net;
+	struct ecm_ipa_dev *ecm_ipa_ctx;
+	int ret;
+
+	ECM_IPA_LOG_ENTRY();
+
+	ECM_IPA_DEBUG("%s initializing\n", DRIVER_NAME);
+	ret = 0;
+	NULL_CHECK(params);
+	if (ret)
+		return ret;
+
+	ECM_IPA_DEBUG
+		("host_ethaddr=%pM, device_ethaddr=%pM\n",
+		params->host_ethaddr,
+		params->device_ethaddr);
+
+	net = alloc_etherdev(sizeof(struct ecm_ipa_dev));
+	if (!net) {
+		result = -ENOMEM;
+		ECM_IPA_ERROR("fail to allocate etherdev\n");
+		goto fail_alloc_etherdev;
+	}
+	ECM_IPA_DEBUG("network device was successfully allocated\n");
+
+	ecm_ipa_ctx = netdev_priv(net);
+	if (!ecm_ipa_ctx) {
+		ECM_IPA_ERROR("fail to extract netdev priv\n");
+		result = -ENOMEM;
+		goto fail_netdev_priv;
+	}
+	memset(ecm_ipa_ctx, 0, sizeof(*ecm_ipa_ctx));
+	ECM_IPA_DEBUG("ecm_ipa_ctx (private) = %p\n", ecm_ipa_ctx);
+
+	ecm_ipa_ctx->net = net;
+	ecm_ipa_ctx->tx_enable = true;
+	ecm_ipa_ctx->rx_enable = true;
+	ecm_ipa_ctx->rm_enable = true;
+	ecm_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+	ecm_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+	atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0);
+	snprintf(net->name, sizeof(net->name), "%s%%d", "ecm");
+	net->netdev_ops = &ecm_ipa_netdev_ops;
+	net->watchdog_timeo = TX_TIMEOUT;
+	ECM_IPA_DEBUG("internal data structures were initialized\n");
+
+	if (!params->device_ready_notify)
+		ECM_IPA_DEBUG("device_ready_notify() was not supplied");
+	ecm_ipa_ctx->device_ready_notify = params->device_ready_notify;
+
+	result = ecm_ipa_debugfs_init(ecm_ipa_ctx);
+	if (result)
+		goto fail_debugfs;
+	ECM_IPA_DEBUG("debugfs entries were created\n");
+
+	result = ecm_ipa_set_device_ethernet_addr
+		(net->dev_addr, params->device_ethaddr);
+	if (result) {
+		ECM_IPA_ERROR("set device MAC failed\n");
+		goto fail_set_device_ethernet;
+	}
+	ECM_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr);
+
+	result = ecm_ipa_rules_cfg
+		(ecm_ipa_ctx, params->host_ethaddr, params->device_ethaddr);
+	if (result) {
+		ECM_IPA_ERROR("fail on ipa rules set\n");
+		goto fail_rules_cfg;
+	}
+	ECM_IPA_DEBUG("Ethernet header insertion set\n");
+
+	netif_carrier_off(net);
+	ECM_IPA_DEBUG("netif_carrier_off() was called\n");
+
+	netif_stop_queue(ecm_ipa_ctx->net);
+	ECM_IPA_DEBUG("netif_stop_queue() was called");
+
+	result = register_netdev(net);
+	if (result) {
+		ECM_IPA_ERROR("register_netdev failed: %d\n", result);
+		goto fail_register_netdev;
+	}
+	ECM_IPA_DEBUG("register_netdev succeeded\n");
+
+	params->ecm_ipa_rx_dp_notify = ecm_ipa_packet_receive_notify;
+	params->ecm_ipa_tx_dp_notify = ecm_ipa_tx_complete_notify;
+	params->private = (void *)ecm_ipa_ctx;
+	params->skip_ep_cfg = false;
+	ecm_ipa_ctx->state = ECM_IPA_INITIALIZED;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	ECM_IPA_INFO("ECM_IPA was initialized successfully\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+
+fail_register_netdev:
+	ecm_ipa_rules_destroy(ecm_ipa_ctx);
+fail_set_device_ethernet:
+fail_rules_cfg:
+	ecm_ipa_debugfs_destroy(ecm_ipa_ctx);
+fail_debugfs:
+fail_netdev_priv:
+	free_netdev(net);
+fail_alloc_etherdev:
+	return result;
+}
+EXPORT_SYMBOL(ecm_ipa_init);
+
+/**
+ * ecm_ipa_connect() - notify ecm_ipa for IPA<->USB pipes connection
+ * @usb_to_ipa_hdl: handle of IPA driver client for USB->IPA
+ * @ipa_to_usb_hdl: handle of IPA driver client for IPA->USB
+ * @priv: same value that was set by ecm_ipa_init(), this
+ *  parameter holds the network device pointer.
+ *
+ * Once USB driver finishes the pipe connection between IPA core
+ * and USB core this method shall be called in order to
+ * allow ecm_ipa complete the data path configurations.
+ * Caller should make sure that it is calling this function
+ * from a context that allows it to handle device_ready_notify().
+ * Detailed description:
+ *  - configure the IPA end-points register
+ *  - notify the Linux kernel for "carrier_on"
+ *  After this function is done the driver state changes to "Connected".
+ *  This API is expected to be called after ecm_ipa_init() or
+ *  after a call to ecm_ipa_disconnect.
+ */
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl, void *priv)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+	int next_state;
+	struct ipa_ecm_msg *ecm_msg;
+	struct ipa_msg_meta msg_meta;
+	int retval;
+	int ret;
+
+	ECM_IPA_LOG_ENTRY();
+	ret = 0;
+	NULL_CHECK(priv);
+	if (ret)
+		return ret;
+	ECM_IPA_DEBUG("usb_to_ipa_hdl = %d, ipa_to_usb_hdl = %d, priv=0x%p\n",
+		      usb_to_ipa_hdl, ipa_to_usb_hdl, priv);
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CONNECT);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't call connect before calling initialize\n");
+		return -EPERM;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	if (!ipa_is_client_handle_valid(usb_to_ipa_hdl)) {
+		ECM_IPA_ERROR
+			("usb_to_ipa_hdl(%d) is not a valid ipa handle\n",
+			usb_to_ipa_hdl);
+		return -EINVAL;
+	}
+	if (!ipa_is_client_handle_valid(ipa_to_usb_hdl)) {
+		ECM_IPA_ERROR
+			("ipa_to_usb_hdl(%d) is not a valid ipa handle\n",
+			ipa_to_usb_hdl);
+		return -EINVAL;
+	}
+
+	ecm_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl;
+	ecm_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl;
+
+	ecm_ipa_ctx->ipa_to_usb_client = ipa_get_client_mapping(ipa_to_usb_hdl);
+	if (ecm_ipa_ctx->ipa_to_usb_client < 0) {
+		ECM_IPA_ERROR(
+			"Error getting IPA->USB client from handle %d\n",
+			ecm_ipa_ctx->ipa_to_usb_client);
+		return -EINVAL;
+	}
+	ECM_IPA_DEBUG("ipa_to_usb_client = %d\n",
+		      ecm_ipa_ctx->ipa_to_usb_client);
+
+	ecm_ipa_ctx->usb_to_ipa_client = ipa_get_client_mapping(usb_to_ipa_hdl);
+	if (ecm_ipa_ctx->usb_to_ipa_client < 0) {
+		ECM_IPA_ERROR(
+			"Error getting USB->IPA client from handle %d\n",
+			ecm_ipa_ctx->usb_to_ipa_client);
+		return -EINVAL;
+	}
+	ECM_IPA_DEBUG("usb_to_ipa_client = %d\n",
+		      ecm_ipa_ctx->usb_to_ipa_client);
+
+	ecm_ipa_ctx->ipa_rm_resource_name_cons =
+		ipa_get_rm_resource_from_ep(ipa_to_usb_hdl);
+	if (ecm_ipa_ctx->ipa_rm_resource_name_cons < 0) {
+		ECM_IPA_ERROR("Error getting CONS RM resource from handle %d\n",
+			      ecm_ipa_ctx->ipa_rm_resource_name_cons);
+		return -EINVAL;
+	}
+	ECM_IPA_DEBUG("ipa_rm_resource_name_cons = %d\n",
+		      ecm_ipa_ctx->ipa_rm_resource_name_cons);
+
+	ecm_ipa_ctx->ipa_rm_resource_name_prod =
+		ipa_get_rm_resource_from_ep(usb_to_ipa_hdl);
+	if (ecm_ipa_ctx->ipa_rm_resource_name_prod < 0) {
+		ECM_IPA_ERROR("Error getting PROD RM resource from handle %d\n",
+			      ecm_ipa_ctx->ipa_rm_resource_name_prod);
+		return -EINVAL;
+	}
+	ECM_IPA_DEBUG("ipa_rm_resource_name_prod = %d\n",
+		      ecm_ipa_ctx->ipa_rm_resource_name_prod);
+
+	retval = ecm_ipa_create_rm_resource(ecm_ipa_ctx);
+	if (retval) {
+		ECM_IPA_ERROR("fail on RM create\n");
+		goto fail_create_rm;
+	}
+	ECM_IPA_DEBUG("RM resource was created\n");
+
+	retval = ecm_ipa_register_properties(ecm_ipa_ctx);
+	if (retval) {
+		ECM_IPA_ERROR("fail on properties set\n");
+		goto fail_create_rm;
+	}
+	ECM_IPA_DEBUG("ecm_ipa 2 Tx and 2 Rx properties were registered\n");
+
+	retval = ecm_ipa_ep_registers_cfg(usb_to_ipa_hdl, ipa_to_usb_hdl);
+	if (retval) {
+		ECM_IPA_ERROR("fail on ep cfg\n");
+		goto fail;
+	}
+	ECM_IPA_DEBUG("end-point configured\n");
+
+	netif_carrier_on(ecm_ipa_ctx->net);
+
+	ecm_msg = kzalloc(sizeof(*ecm_msg), GFP_KERNEL);
+	if (!ecm_msg) {
+		ECM_IPA_ERROR("can't alloc msg mem\n");
+		retval = -ENOMEM;
+		goto fail;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = ECM_CONNECT;
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name,
+		IPA_RESOURCE_NAME_MAX);
+	ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex;
+
+	retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb);
+	if (retval) {
+		ECM_IPA_ERROR("fail to send ECM_CONNECT message\n");
+		kfree(ecm_msg);
+		goto fail;
+	}
+
+	if (!netif_carrier_ok(ecm_ipa_ctx->net)) {
+		ECM_IPA_ERROR("netif_carrier_ok error\n");
+		retval = -EBUSY;
+		goto fail;
+	}
+	ECM_IPA_DEBUG("carrier_on notified\n");
+
+	if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP)
+		ecm_ipa_enable_data_path(ecm_ipa_ctx);
+	else
+		ECM_IPA_DEBUG("data path was not enabled yet\n");
+
+	ECM_IPA_INFO("ECM_IPA was connected successfully\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+
+fail:
+	ecm_ipa_deregister_properties();
+fail_create_rm:
+	ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
+	return retval;
+}
+EXPORT_SYMBOL(ecm_ipa_connect);
+
+/**
+ * ecm_ipa_open() - notify Linux network stack to start sending packets
+ * @net: the network interface supplied by the network stack
+ *
+ * Linux uses this API to notify the driver that the network interface
+ * transitions to the up state.
+ * The driver will instruct the Linux network stack to start
+ * delivering data packets.
+ */
+static int ecm_ipa_open(struct net_device *net)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx;
+	int next_state;
+
+	ECM_IPA_LOG_ENTRY();
+
+	ecm_ipa_ctx = netdev_priv(net);
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_OPEN);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't bring driver up before initialize\n");
+		return -EPERM;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	if (ecm_ipa_ctx->state == ECM_IPA_CONNECTED_AND_UP)
+		ecm_ipa_enable_data_path(ecm_ipa_ctx);
+	else
+		ECM_IPA_DEBUG("data path was not enabled yet\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+/**
+ * ecm_ipa_start_xmit() - send data from APPs to USB core via IPA core
+ * @skb: packet received from Linux network stack
+ * @net: the network device being used to send this packet
+ *
+ * Several conditions needed in order to send the packet to IPA:
+ * - Transmit queue for the network driver is currently
+ *   in "send" state
+ * - The driver internal state is in "UP" state.
+ * - Filter Tx switch is turned off
+ * - The IPA resource manager state for the driver producer client
+ *   is "Granted" which implies that all the resources in the dependency
+ *   graph are valid for data flow.
+ * - outstanding high boundary did not reach.
+ *
+ * In case all of the above conditions are met, the network driver will
+ * send the packet by using the IPA API for Tx.
+ * In case the outstanding packet high boundary is reached, the driver will
+ * stop the send queue until enough packet were proceeded by the IPA core.
+ */
+static netdev_tx_t ecm_ipa_start_xmit
+	(struct sk_buff *skb, struct net_device *net)
+{
+	int ret;
+	netdev_tx_t status = NETDEV_TX_BUSY;
+	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
+
+	net->trans_start = jiffies;
+
+	ECM_IPA_DEBUG
+		("Tx, len=%d, skb->protocol=%d, outstanding=%d\n",
+		skb->len, skb->protocol,
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts));
+
+	if (unlikely(netif_queue_stopped(net))) {
+		ECM_IPA_ERROR("interface queue is stopped\n");
+		goto out;
+	}
+
+	if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) {
+		ECM_IPA_ERROR("Missing pipe connected and/or iface up\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(tx_filter(skb))) {
+		dev_kfree_skb_any(skb);
+		ECM_IPA_DEBUG("packet got filtered out on Tx path\n");
+		status = NETDEV_TX_OK;
+		goto out;
+	}
+	ret = resource_request(ecm_ipa_ctx);
+	if (ret) {
+		ECM_IPA_DEBUG("Waiting to resource\n");
+		netif_stop_queue(net);
+		goto resource_busy;
+	}
+
+	if (atomic_read(&ecm_ipa_ctx->outstanding_pkts) >=
+					ecm_ipa_ctx->outstanding_high) {
+		ECM_IPA_DEBUG
+			("outstanding high (%d)- stopping\n",
+			ecm_ipa_ctx->outstanding_high);
+		netif_stop_queue(net);
+		status = NETDEV_TX_BUSY;
+		goto out;
+	}
+
+	ret = ipa_tx_dp(ecm_ipa_ctx->ipa_to_usb_client, skb, NULL);
+	if (ret) {
+		ECM_IPA_ERROR("ipa transmit failed (%d)\n", ret);
+		goto fail_tx_packet;
+	}
+
+	atomic_inc(&ecm_ipa_ctx->outstanding_pkts);
+
+	status = NETDEV_TX_OK;
+	goto out;
+
+fail_tx_packet:
+out:
+	resource_release(ecm_ipa_ctx);
+resource_busy:
+	return status;
+}
+
+/**
+ * ecm_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data pointing
+ * to Ethernet packet frame.
+ */
+static void ecm_ipa_packet_receive_notify
+	(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+	int result;
+	unsigned int packet_len;
+
+	if (!skb) {
+		ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
+		return;
+	}
+
+	packet_len = skb->len;
+	ECM_IPA_DEBUG("packet RX, len=%d\n", skb->len);
+
+	if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) {
+		ECM_IPA_DEBUG("Missing pipe connected and/or iface up\n");
+		return;
+	}
+
+	if (evt != IPA_RECEIVE)	{
+		ECM_IPA_ERROR("A none IPA_RECEIVE event in ecm_ipa_receive\n");
+		return;
+	}
+
+	skb->dev = ecm_ipa_ctx->net;
+	skb->protocol = eth_type_trans(skb, ecm_ipa_ctx->net);
+	if (rx_filter(skb)) {
+		ECM_IPA_DEBUG("packet got filtered out on Rx path\n");
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	result = netif_rx(skb);
+	if (result)
+		ECM_IPA_ERROR("fail on netif_rx\n");
+	ecm_ipa_ctx->net->stats.rx_packets++;
+	ecm_ipa_ctx->net->stats.rx_bytes += packet_len;
+}
+
+/** ecm_ipa_stop() - called when network device transitions to the down
+ *     state.
+ *  @net: the network device being stopped.
+ *
+ * This API is used by Linux network stack to notify the network driver that
+ * its state was changed to "down"
+ * The driver will stop the "send" queue and change its internal
+ * state to "Connected".
+ */
+static int ecm_ipa_stop(struct net_device *net)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
+	int next_state;
+
+	ECM_IPA_LOG_ENTRY();
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_STOP);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't do network interface down without up\n");
+		return -EPERM;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	netif_stop_queue(net);
+	ECM_IPA_DEBUG("network device stopped\n");
+
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+/** ecm_ipa_disconnect() - called when the USB cable is unplugged.
+ * @priv: same value that was set by ecm_ipa_init(), this
+ *  parameter holds the network device pointer.
+ *
+ * Once the USB cable is unplugged the USB driver will notify the network
+ * interface driver.
+ * The internal driver state will returned to its initialized state and
+ * Linux network stack will be informed for carrier off and the send queue
+ * will be stopped.
+ */
+int ecm_ipa_disconnect(void *priv)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+	int next_state;
+	struct ipa_ecm_msg *ecm_msg;
+	struct ipa_msg_meta msg_meta;
+	int retval;
+	int outstanding_dropped_pkts;
+	int ret;
+
+	ECM_IPA_LOG_ENTRY();
+	ret = 0;
+	NULL_CHECK(ecm_ipa_ctx);
+	if (ret)
+		return ret;
+	ECM_IPA_DEBUG("priv=0x%p\n", priv);
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_DISCONNECT);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't disconnect before connect\n");
+		return -EPERM;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	netif_carrier_off(ecm_ipa_ctx->net);
+	ECM_IPA_DEBUG("carrier_off notifcation was sent\n");
+
+	ecm_msg = kzalloc(sizeof(*ecm_msg), GFP_KERNEL);
+	if (!ecm_msg) {
+		ECM_IPA_ERROR("can't alloc msg mem\n");
+		return -ENOMEM;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = ECM_DISCONNECT;
+	msg_meta.msg_len = sizeof(struct ipa_ecm_msg);
+	strlcpy(ecm_msg->name, ecm_ipa_ctx->net->name,
+		IPA_RESOURCE_NAME_MAX);
+	ecm_msg->ifindex = ecm_ipa_ctx->net->ifindex;
+
+	retval = ipa_send_msg(&msg_meta, ecm_msg, ecm_ipa_msg_free_cb);
+	if (retval) {
+		ECM_IPA_ERROR("fail to send ECM_DISCONNECT message\n");
+		kfree(ecm_msg);
+		return -EPERM;
+	}
+
+	netif_stop_queue(ecm_ipa_ctx->net);
+	ECM_IPA_DEBUG("queue stopped\n");
+
+	ecm_ipa_destroy_rm_resource(ecm_ipa_ctx);
+
+	outstanding_dropped_pkts =
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts);
+	ecm_ipa_ctx->net->stats.tx_errors += outstanding_dropped_pkts;
+	atomic_set(&ecm_ipa_ctx->outstanding_pkts, 0);
+
+	ECM_IPA_INFO("ECM_IPA was disconnected successfully\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+}
+EXPORT_SYMBOL(ecm_ipa_disconnect);
+
+/**
+ * ecm_ipa_cleanup() - unregister the network interface driver and free
+ *  internal data structs.
+ * @priv: same value that was set by ecm_ipa_init(), this
+ *   parameter holds the network device pointer.
+ *
+ * This function shall be called once the network interface is not
+ * needed anymore, e.g: when the USB composition does not support ECM.
+ * This function shall be called after the pipes were disconnected.
+ * Detailed description:
+ *  - delete the driver dependency defined for IPA resource manager and
+ *   destroy the producer resource.
+ *  -  remove the debugfs entries
+ *  - deregister the network interface from Linux network stack
+ *  - free all internal data structs
+ */
+void ecm_ipa_cleanup(void *priv)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+	int next_state;
+
+	ECM_IPA_LOG_ENTRY();
+
+	ECM_IPA_DEBUG("priv=0x%p\n", priv);
+
+	if (!ecm_ipa_ctx) {
+		ECM_IPA_ERROR("ecm_ipa_ctx NULL pointer\n");
+		return;
+	}
+
+	next_state = ecm_ipa_next_state(ecm_ipa_ctx->state, ECM_IPA_CLEANUP);
+	if (next_state == ECM_IPA_INVALID) {
+		ECM_IPA_ERROR("can't clean driver without cable disconnect\n");
+		return;
+	}
+	ecm_ipa_ctx->state = next_state;
+	ECM_IPA_STATE_DEBUG(ecm_ipa_ctx);
+
+	ecm_ipa_rules_destroy(ecm_ipa_ctx);
+	ecm_ipa_debugfs_destroy(ecm_ipa_ctx);
+
+	unregister_netdev(ecm_ipa_ctx->net);
+	free_netdev(ecm_ipa_ctx->net);
+
+	ECM_IPA_INFO("ECM_IPA was destroyed successfully\n");
+
+	ECM_IPA_LOG_EXIT();
+}
+EXPORT_SYMBOL(ecm_ipa_cleanup);
+
+static void ecm_ipa_enable_data_path(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	if (ecm_ipa_ctx->device_ready_notify) {
+		ecm_ipa_ctx->device_ready_notify();
+		ECM_IPA_DEBUG("USB device_ready_notify() was called\n");
+	} else {
+		ECM_IPA_DEBUG("device_ready_notify() not supplied\n");
+	}
+
+	netif_start_queue(ecm_ipa_ctx->net);
+	ECM_IPA_DEBUG("queue started\n");
+}
+
+/**
+ * ecm_ipa_rules_cfg() - set header insertion and register Tx/Rx properties
+ *				Headers will be committed to HW
+ * @ecm_ipa_ctx: main driver context parameters
+ * @dst_mac: destination MAC address
+ * @src_mac: source MAC address
+ *
+ * Returns negative errno, or zero on success
+ */
+static int ecm_ipa_rules_cfg
+	(struct ecm_ipa_dev *ecm_ipa_ctx,
+	const void *dst_mac, const void *src_mac)
+{
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ipa_hdr_add *ipv4_hdr;
+	struct ipa_hdr_add *ipv6_hdr;
+	struct ethhdr *eth_ipv4;
+	struct ethhdr *eth_ipv6;
+	int result = 0;
+
+	ECM_IPA_LOG_ENTRY();
+	hdrs = kzalloc
+		(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+			GFP_KERNEL);
+	if (!hdrs) {
+		result = -ENOMEM;
+		goto out;
+	}
+	ipv4_hdr = &hdrs->hdr[0];
+	eth_ipv4 = (struct ethhdr *)ipv4_hdr->hdr;
+	ipv6_hdr = &hdrs->hdr[1];
+	eth_ipv6 = (struct ethhdr *)ipv6_hdr->hdr;
+	strlcpy(ipv4_hdr->name, ECM_IPA_IPV4_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+	memcpy(eth_ipv4->h_dest, dst_mac, ETH_ALEN);
+	memcpy(eth_ipv4->h_source, src_mac, ETH_ALEN);
+	eth_ipv4->h_proto = htons(ETH_P_IP);
+	ipv4_hdr->hdr_len = ETH_HLEN;
+	ipv4_hdr->is_partial = 0;
+	ipv4_hdr->is_eth2_ofst_valid = true;
+	ipv4_hdr->eth2_ofst = 0;
+	ipv4_hdr->type = IPA_HDR_L2_ETHERNET_II;
+	strlcpy(ipv6_hdr->name, ECM_IPA_IPV6_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+	memcpy(eth_ipv6->h_dest, dst_mac, ETH_ALEN);
+	memcpy(eth_ipv6->h_source, src_mac, ETH_ALEN);
+	eth_ipv6->h_proto = htons(ETH_P_IPV6);
+	ipv6_hdr->hdr_len = ETH_HLEN;
+	ipv6_hdr->is_partial = 0;
+	ipv6_hdr->is_eth2_ofst_valid = true;
+	ipv6_hdr->eth2_ofst = 0;
+	ipv6_hdr->type = IPA_HDR_L2_ETHERNET_II;
+	hdrs->commit = 1;
+	hdrs->num_hdrs = 2;
+	result = ipa_add_hdr(hdrs);
+	if (result) {
+		ECM_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
+		goto out_free_mem;
+	}
+	if (ipv4_hdr->status) {
+		ECM_IPA_ERROR
+			("Fail on Header-Insertion ipv4(%d)\n",
+			ipv4_hdr->status);
+		result = ipv4_hdr->status;
+		goto out_free_mem;
+	}
+	if (ipv6_hdr->status) {
+		ECM_IPA_ERROR
+			("Fail on Header-Insertion ipv6(%d)\n",
+			ipv6_hdr->status);
+		result = ipv6_hdr->status;
+		goto out_free_mem;
+	}
+	ecm_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+	ecm_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+	ECM_IPA_LOG_EXIT();
+out_free_mem:
+	kfree(hdrs);
+out:
+	return result;
+}
+
+/**
+ * ecm_ipa_rules_destroy() - remove the IPA core configuration done for
+ *  the driver data path.
+ *  @ecm_ipa_ctx: the driver context
+ *
+ *  Revert the work done on ecm_ipa_rules_cfg.
+ */
+static void ecm_ipa_rules_destroy(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *ipv4;
+	struct ipa_hdr_del *ipv6;
+	int result;
+
+	del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+			sizeof(*ipv6), GFP_KERNEL);
+	if (!del_hdr)
+		return;
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+	ipv4 = &del_hdr->hdl[0];
+	ipv4->hdl = ecm_ipa_ctx->eth_ipv4_hdr_hdl;
+	ipv6 = &del_hdr->hdl[1];
+	ipv6->hdl = ecm_ipa_ctx->eth_ipv6_hdr_hdl;
+	result = ipa_del_hdr(del_hdr);
+	if (result || ipv4->status || ipv6->status)
+		ECM_IPA_ERROR("ipa_del_hdr failed\n");
+	kfree(del_hdr);
+}
+
+/* ecm_ipa_register_properties() - set Tx/Rx properties for ipacm
+ *
+ * Register ecm0 interface with 2 Tx properties and 2 Rx properties:
+ * The 2 Tx properties are for data flowing from IPA to USB, they
+ * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing.
+ * The 2 Rx properties are for data flowing from USB to IPA, they have
+ * simple rule which always "hit".
+ *
+ */
+static int ecm_ipa_register_properties(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *ipv4_property;
+	struct ipa_ioc_tx_intf_prop *ipv6_property;
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	int result = 0;
+
+	ECM_IPA_LOG_ENTRY();
+
+	tx_properties.prop = properties;
+	ipv4_property = &tx_properties.prop[0];
+	ipv4_property->ip = IPA_IP_v4;
+	ipv4_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client;
+	strlcpy
+		(ipv4_property->hdr_name, ECM_IPA_IPV4_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	ipv6_property = &tx_properties.prop[1];
+	ipv6_property->ip = IPA_IP_v6;
+	ipv6_property->dst_pipe = ecm_ipa_ctx->ipa_to_usb_client;
+	ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	strlcpy
+		(ipv6_property->hdr_name, ECM_IPA_IPV6_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	tx_properties.num_props = 2;
+
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask = 0;
+	rx_ipv4_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client;
+	rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask = 0;
+	rx_ipv6_property->src_pipe = ecm_ipa_ctx->usb_to_ipa_client;
+	rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_properties.num_props = 2;
+
+	result = ipa_register_intf("ecm0", &tx_properties, &rx_properties);
+	if (result)
+		ECM_IPA_ERROR("fail on Tx/Rx properties registration\n");
+
+	ECM_IPA_LOG_EXIT();
+
+	return result;
+}
+
+static void ecm_ipa_deregister_properties(void)
+{
+	int result;
+
+	ECM_IPA_LOG_ENTRY();
+	result = ipa_deregister_intf("ecm0");
+	if (result)
+		ECM_IPA_DEBUG("Fail on Tx prop deregister\n");
+	ECM_IPA_LOG_EXIT();
+}
+
+/**
+ * ecm_ipa_configure() - make IPA core end-point specific configuration
+ * @usb_to_ipa_hdl: handle of usb_to_ipa end-point for IPA driver
+ * @ipa_to_usb_hdl: handle of ipa_to_usb end-point for IPA driver
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ *
+ * Configure the usb_to_ipa and ipa_to_usb end-point registers
+ * - USB->IPA end-point: disable de-aggregation, enable link layer
+ *   header removal (Ethernet removal), source NATing and default routing.
+ * - IPA->USB end-point: disable aggregation, add link layer header (Ethernet)
+ * - allocate Ethernet device
+ * - register to Linux network stack
+ *
+ * Returns negative errno, or zero on success
+ */
+
+static void ecm_ipa_rm_notify
+	(void *user_data, enum ipa_rm_event event, unsigned long data)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = user_data;
+
+	ECM_IPA_LOG_ENTRY();
+	if
+		(event == IPA_RM_RESOURCE_GRANTED &&
+			netif_queue_stopped(ecm_ipa_ctx->net)) {
+		ECM_IPA_DEBUG("Resource Granted - starting queue\n");
+		netif_start_queue(ecm_ipa_ctx->net);
+	} else {
+		ECM_IPA_DEBUG("Resource released\n");
+	}
+	ECM_IPA_LOG_EXIT();
+}
+
+static struct net_device_stats *ecm_ipa_get_stats(struct net_device *net)
+{
+	return &net->stats;
+}
+
+static int ecm_ipa_create_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	struct ipa_rm_create_params create_params = {0};
+	struct ipa_rm_perf_profile profile;
+	int result;
+
+	ECM_IPA_LOG_ENTRY();
+	create_params.name = IPA_RM_RESOURCE_STD_ECM_PROD;
+	create_params.reg_params.user_data = ecm_ipa_ctx;
+	create_params.reg_params.notify_cb = ecm_ipa_rm_notify;
+	result = ipa_rm_create_resource(&create_params);
+	if (result) {
+		ECM_IPA_ERROR("Fail on ipa_rm_create_resource\n");
+		goto fail_rm_create;
+	}
+	ECM_IPA_DEBUG("rm client was created");
+
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ipa_rm_set_perf_profile(IPA_RM_RESOURCE_STD_ECM_PROD, &profile);
+
+	result = ipa_rm_inactivity_timer_init
+		(IPA_RM_RESOURCE_STD_ECM_PROD,
+		INACTIVITY_MSEC_DELAY);
+	if (result) {
+		ECM_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
+		goto fail_it;
+	}
+	ECM_IPA_DEBUG("rm_it client was created");
+
+	result = ipa_rm_add_dependency_sync
+		(IPA_RM_RESOURCE_STD_ECM_PROD,
+		ecm_ipa_ctx->ipa_rm_resource_name_cons);
+	if (result && result != -EINPROGRESS)
+		ECM_IPA_ERROR
+		("unable to add ECM/USB dependency (%d)\n", result);
+
+	result = ipa_rm_add_dependency_sync
+		(ecm_ipa_ctx->ipa_rm_resource_name_prod,
+		IPA_RM_RESOURCE_APPS_CONS);
+	if (result && result != -EINPROGRESS)
+		ECM_IPA_ERROR
+		("unable to add USB/APPS dependency (%d)\n", result);
+
+	ECM_IPA_DEBUG("rm dependency was set\n");
+
+	ECM_IPA_LOG_EXIT();
+	return 0;
+
+fail_it:
+fail_rm_create:
+	return result;
+}
+
+static void ecm_ipa_destroy_rm_resource(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	int result;
+
+	ECM_IPA_LOG_ENTRY();
+
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_STD_ECM_PROD,
+				 ecm_ipa_ctx->ipa_rm_resource_name_cons);
+	ipa_rm_delete_dependency(ecm_ipa_ctx->ipa_rm_resource_name_prod,
+				 IPA_RM_RESOURCE_APPS_CONS);
+	ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_STD_ECM_PROD);
+	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
+	if (result)
+		ECM_IPA_ERROR("resource deletion failed\n");
+
+	ECM_IPA_LOG_EXIT();
+}
+
+static bool rx_filter(struct sk_buff *skb)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(skb->dev);
+
+	return !ecm_ipa_ctx->rx_enable;
+}
+
+static bool tx_filter(struct sk_buff *skb)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(skb->dev);
+
+	return !ecm_ipa_ctx->tx_enable;
+}
+
+static bool rm_enabled(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	return ecm_ipa_ctx->rm_enable;
+}
+
+static int resource_request(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	int result = 0;
+
+	if (!rm_enabled(ecm_ipa_ctx))
+		goto out;
+	result = ipa_rm_inactivity_timer_request_resource(
+			IPA_RM_RESOURCE_STD_ECM_PROD);
+out:
+	return result;
+}
+
+static void resource_release(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	if (!rm_enabled(ecm_ipa_ctx))
+		goto out;
+	ipa_rm_inactivity_timer_release_resource(IPA_RM_RESOURCE_STD_ECM_PROD);
+out:
+	return;
+}
+
+/**
+ * ecm_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: ecm driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+static void ecm_ipa_tx_complete_notify
+		(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct ecm_ipa_dev *ecm_ipa_ctx = priv;
+
+	if (!skb) {
+		ECM_IPA_ERROR("Bad SKB received from IPA driver\n");
+		return;
+	}
+
+	if (!ecm_ipa_ctx) {
+		ECM_IPA_ERROR("ecm_ipa_ctx is NULL pointer\n");
+		return;
+	}
+
+	ECM_IPA_DEBUG
+		("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n",
+		skb->len, skb->protocol,
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts));
+
+	if (evt != IPA_WRITE_DONE) {
+		ECM_IPA_ERROR("unsupported event on Tx callback\n");
+		return;
+	}
+
+	if (unlikely(ecm_ipa_ctx->state != ECM_IPA_CONNECTED_AND_UP)) {
+		ECM_IPA_DEBUG
+			("dropping Tx-complete pkt, state=%s",
+			ecm_ipa_state_string(ecm_ipa_ctx->state));
+		goto out;
+	}
+
+	ecm_ipa_ctx->net->stats.tx_packets++;
+	ecm_ipa_ctx->net->stats.tx_bytes += skb->len;
+
+	atomic_dec(&ecm_ipa_ctx->outstanding_pkts);
+	if
+		(netif_queue_stopped(ecm_ipa_ctx->net) &&
+		netif_carrier_ok(ecm_ipa_ctx->net) &&
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts)
+		< (ecm_ipa_ctx->outstanding_low)) {
+		ECM_IPA_DEBUG
+			("outstanding low (%d) - waking up queue\n",
+			ecm_ipa_ctx->outstanding_low);
+		netif_wake_queue(ecm_ipa_ctx->net);
+	}
+
+out:
+	dev_kfree_skb_any(skb);
+}
+
+static void ecm_ipa_tx_timeout(struct net_device *net)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = netdev_priv(net);
+
+	ECM_IPA_ERROR
+		("possible IPA stall was detected, %d outstanding",
+		atomic_read(&ecm_ipa_ctx->outstanding_pkts));
+
+	net->stats.tx_errors++;
+}
+
+static int ecm_ipa_debugfs_stall_open
+	(struct inode *inode, struct file *file)
+{
+	ECM_IPA_LOG_ENTRY();
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+static ssize_t ecm_ipa_debugfs_stall_write
+	(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+	u32 cmdq_cfg_mmio_phy = 0xFD4E3038;
+	void *cmdq_cfg_mmio_virt;
+	int result;
+	bool val = 0;
+
+	ECM_IPA_LOG_ENTRY();
+
+	file->private_data = &val;
+	result = ecm_ipa_debugfs_enable_write(file, buf, count, ppos);
+
+	cmdq_cfg_mmio_virt = ioremap(cmdq_cfg_mmio_phy, sizeof(u32));
+	if (!cmdq_cfg_mmio_virt) {
+		ECM_IPA_ERROR
+			("fail on mmio for cmdq_cfg_mmio_phy=0x%x",
+			cmdq_cfg_mmio_phy);
+		return result;
+	}
+
+	iowrite32(val, cmdq_cfg_mmio_virt);
+	ECM_IPA_DEBUG("Value %d was written to cfgq", val);
+
+	ECM_IPA_LOG_EXIT();
+
+	return result;
+}
+
+static int ecm_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private;
+
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = &ecm_ipa_ctx->outstanding_pkts;
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+static ssize_t ecm_ipa_debugfs_enable_write_dma
+	(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = file->private_data;
+	int result;
+
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = &ecm_ipa_ctx->dma_enable;
+	result = ecm_ipa_debugfs_enable_write(file, buf, count, ppos);
+	if (ecm_ipa_ctx->dma_enable)
+		ecm_ipa_ep_registers_dma_cfg
+			(ecm_ipa_ctx->usb_to_ipa_hdl,
+			ecm_ipa_ctx->ipa_to_usb_client);
+	else
+		ecm_ipa_ep_registers_cfg
+			(ecm_ipa_ctx->usb_to_ipa_hdl,
+			ecm_ipa_ctx->usb_to_ipa_hdl);
+	ECM_IPA_LOG_EXIT();
+	return result;
+}
+
+static int ecm_ipa_debugfs_dma_open(struct inode *inode, struct file *file)
+{
+	struct ecm_ipa_dev *ecm_ipa_ctx = inode->i_private;
+
+	ECM_IPA_LOG_ENTRY();
+	file->private_data = ecm_ipa_ctx;
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+static ssize_t ecm_ipa_debugfs_enable_write
+	(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	char input;
+	bool *enable = file->private_data;
+
+	if (count != sizeof(input) + 1) {
+		ECM_IPA_ERROR("wrong input length(%zd)\n", count);
+		return -EINVAL;
+	}
+	if (!buf) {
+		ECM_IPA_ERROR("Bad argument\n");
+		return -EINVAL;
+	}
+	missing = copy_from_user(&input, buf, 1);
+	if (missing)
+		return -EFAULT;
+	ECM_IPA_DEBUG("input received %c\n", input);
+	*enable = input - '0';
+	ECM_IPA_DEBUG("value was set to %d\n", *enable);
+	return count;
+}
+
+static ssize_t ecm_ipa_debugfs_enable_read
+	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int size = 0;
+	int ret;
+	loff_t pos;
+	u8 enable_str[sizeof(char) * 3] = {0};
+	bool *enable = file->private_data;
+
+	pos = *ppos;
+	nbytes = scnprintf(enable_str, sizeof(enable_str), "%d\n", *enable);
+	ret = simple_read_from_buffer(ubuf, count, ppos, enable_str, nbytes);
+	if (ret < 0) {
+		ECM_IPA_ERROR("simple_read_from_buffer problem\n");
+		return ret;
+	}
+	size += ret;
+	count -= nbytes;
+	*ppos = pos + size;
+	return size;
+}
+
+static ssize_t ecm_ipa_debugfs_atomic_read
+	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0};
+	atomic_t *atomic_var = file->private_data;
+
+	nbytes = scnprintf
+		(atomic_str, sizeof(atomic_str), "%d\n",
+			atomic_read(atomic_var));
+	return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
+}
+
+static int ecm_ipa_debugfs_init(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	const mode_t flags_read_write = 0666;
+	const mode_t flags_read_only = 0444;
+	const mode_t flags_write_only = 0222;
+	struct dentry *file;
+
+	ECM_IPA_LOG_ENTRY();
+
+	if (!ecm_ipa_ctx)
+		return -EINVAL;
+
+	ecm_ipa_ctx->directory = debugfs_create_dir("ecm_ipa", NULL);
+	if (!ecm_ipa_ctx->directory) {
+		ECM_IPA_ERROR("could not create debugfs directory entry\n");
+		goto fail_directory;
+	}
+	file = debugfs_create_bool
+		("tx_enable", flags_read_write,
+		ecm_ipa_ctx->directory, &ecm_ipa_ctx->tx_enable);
+	if (!file) {
+		ECM_IPA_ERROR("could not create debugfs tx file\n");
+		goto fail_file;
+	}
+	file = debugfs_create_bool
+		("rx_enable", flags_read_write,
+		ecm_ipa_ctx->directory, &ecm_ipa_ctx->rx_enable);
+	if (!file) {
+		ECM_IPA_ERROR("could not create debugfs rx file\n");
+		goto fail_file;
+	}
+	file = debugfs_create_bool
+		("rm_enable", flags_read_write,
+		ecm_ipa_ctx->directory, &ecm_ipa_ctx->rm_enable);
+	if (!file) {
+		ECM_IPA_ERROR("could not create debugfs rm file\n");
+		goto fail_file;
+	}
+	file = debugfs_create_u8
+		("outstanding_high", flags_read_write,
+		ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_high);
+	if (!file) {
+		ECM_IPA_ERROR("could not create outstanding_high file\n");
+		goto fail_file;
+	}
+	file = debugfs_create_u8
+		("outstanding_low", flags_read_write,
+		ecm_ipa_ctx->directory, &ecm_ipa_ctx->outstanding_low);
+	if (!file) {
+		ECM_IPA_ERROR("could not create outstanding_low file\n");
+		goto fail_file;
+	}
+	file = debugfs_create_file
+		("dma_enable", flags_read_write,
+		ecm_ipa_ctx->directory,
+		ecm_ipa_ctx, &ecm_ipa_debugfs_dma_ops);
+	if (!file) {
+		ECM_IPA_ERROR("could not create debugfs dma file\n");
+		goto fail_file;
+	}
+	file = debugfs_create_file
+		("outstanding", flags_read_only,
+		ecm_ipa_ctx->directory,
+		ecm_ipa_ctx, &ecm_ipa_debugfs_atomic_ops);
+	if (!file) {
+		ECM_IPA_ERROR("could not create outstanding file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_file
+		("stall_ipa_rx_proc", flags_write_only,
+		ecm_ipa_ctx->directory,
+		ecm_ipa_ctx, &ecm_ipa_debugfs_stall_ops);
+	if (!file) {
+		ECM_IPA_ERROR("could not create stall_ipa_rx_proc file\n");
+		goto fail_file;
+	}
+
+	ECM_IPA_LOG_EXIT();
+
+	return 0;
+fail_file:
+	debugfs_remove_recursive(ecm_ipa_ctx->directory);
+fail_directory:
+	return -EFAULT;
+}
+
+static void ecm_ipa_debugfs_destroy(struct ecm_ipa_dev *ecm_ipa_ctx)
+{
+	debugfs_remove_recursive(ecm_ipa_ctx->directory);
+}
+
+/**
+ * ecm_ipa_ep_cfg() - configure the USB endpoints for ECM
+ *
+ *usb_to_ipa_hdl: handle received from ipa_connect
+ *ipa_to_usb_hdl: handle received from ipa_connect
+ *
+ * USB to IPA pipe:
+ *  - No de-aggregation
+ *  - Remove Ethernet header
+ *  - SRC NAT
+ *  - Default routing(0)
+ * IPA to USB Pipe:
+ *  - No aggregation
+ *  - Add Ethernet header
+ */
+static int ecm_ipa_ep_registers_cfg(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl)
+{
+	int result = 0;
+	struct ipa_ep_cfg usb_to_ipa_ep_cfg;
+	struct ipa_ep_cfg ipa_to_usb_ep_cfg;
+
+	ECM_IPA_LOG_ENTRY();
+	memset(&usb_to_ipa_ep_cfg, 0, sizeof(struct ipa_ep_cfg));
+	usb_to_ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+	usb_to_ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	usb_to_ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	usb_to_ipa_ep_cfg.route.rt_tbl_hdl = 0;
+	usb_to_ipa_ep_cfg.mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	usb_to_ipa_ep_cfg.mode.mode = IPA_BASIC;
+	result = ipa_cfg_ep(usb_to_ipa_hdl, &usb_to_ipa_ep_cfg);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure USB to IPA point\n");
+		goto out;
+	}
+	memset(&ipa_to_usb_ep_cfg, 0, sizeof(struct ipa_ep_cfg));
+	ipa_to_usb_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+	ipa_to_usb_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	ipa_to_usb_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure IPA to USB end-point\n");
+		goto out;
+	}
+	ECM_IPA_DEBUG("end-point registers successfully configured\n");
+out:
+	ECM_IPA_LOG_EXIT();
+	return result;
+}
+
+/**
+ * ecm_ipa_ep_registers_dma_cfg() - configure the USB endpoints for ECM
+ *	DMA
+ * @usb_to_ipa_hdl: handle received from ipa_connect
+ *
+ * This function will override the previous configuration
+ * which is needed for cores that does not support blocks logic
+ * Note that client handles are the actual pipe index
+ */
+static int ecm_ipa_ep_registers_dma_cfg(u32 usb_to_ipa_hdl,
+					enum ipa_client_type prod_client)
+{
+	int result = 0;
+	struct ipa_ep_cfg_mode cfg_mode;
+	u32 apps_to_ipa_hdl = 2;
+
+	ECM_IPA_LOG_ENTRY();
+
+	memset(&cfg_mode, 0, sizeof(cfg_mode));
+	cfg_mode.mode = IPA_DMA;
+	cfg_mode.dst = prod_client;
+	result = ipa_cfg_ep_mode(apps_to_ipa_hdl, &cfg_mode);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure Apps to IPA\n");
+		goto out;
+	}
+	memset(&cfg_mode, 0, sizeof(cfg_mode));
+	cfg_mode.mode = IPA_DMA;
+	cfg_mode.dst = IPA_CLIENT_A5_LAN_WAN_CONS;
+	result = ipa_cfg_ep_mode(usb_to_ipa_hdl, &cfg_mode);
+	if (result) {
+		ECM_IPA_ERROR("failed to configure USB to IPA\n");
+		goto out;
+	}
+	ECM_IPA_DEBUG("end-point registers successfully configured\n");
+out:
+	ECM_IPA_LOG_EXIT();
+	return result;
+}
+
+/**
+ * ecm_ipa_set_device_ethernet_addr() - set device etherenet address
+ * @dev_ethaddr: device etherenet address
+ *
+ * Returns 0 for success, negative otherwise
+ */
+static int ecm_ipa_set_device_ethernet_addr
+	(u8 *dev_ethaddr, u8 device_ethaddr[])
+{
+	if (!is_valid_ether_addr(device_ethaddr))
+		return -EINVAL;
+	memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN);
+	ECM_IPA_DEBUG("device ethernet address: %pM\n", dev_ethaddr);
+	return 0;
+}
+
+/** ecm_ipa_next_state - return the next state of the driver
+ * @current_state: the current state of the driver
+ * @operation: an enum which represent the operation being made on the driver
+ *  by its API.
+ *
+ * This function implements the driver internal state machine.
+ * Its decisions are based on the driver current state and the operation
+ * being made.
+ * In case the operation is invalid this state machine will return
+ * the value ECM_IPA_INVALID to inform the caller for a forbidden sequence.
+ */
+static enum ecm_ipa_state ecm_ipa_next_state
+	(enum ecm_ipa_state current_state, enum ecm_ipa_operation operation)
+{
+	int next_state = ECM_IPA_INVALID;
+
+	switch (current_state) {
+	case ECM_IPA_UNLOADED:
+		if (operation == ECM_IPA_INITIALIZE)
+			next_state = ECM_IPA_INITIALIZED;
+		break;
+	case ECM_IPA_INITIALIZED:
+		if (operation == ECM_IPA_CONNECT)
+			next_state = ECM_IPA_CONNECTED;
+		else if (operation == ECM_IPA_OPEN)
+			next_state = ECM_IPA_UP;
+		else if (operation == ECM_IPA_CLEANUP)
+			next_state = ECM_IPA_UNLOADED;
+		break;
+	case ECM_IPA_CONNECTED:
+		if (operation == ECM_IPA_DISCONNECT)
+			next_state = ECM_IPA_INITIALIZED;
+		else if (operation == ECM_IPA_OPEN)
+			next_state = ECM_IPA_CONNECTED_AND_UP;
+		break;
+	case ECM_IPA_UP:
+		if (operation == ECM_IPA_STOP)
+			next_state = ECM_IPA_INITIALIZED;
+		else if (operation == ECM_IPA_CONNECT)
+			next_state = ECM_IPA_CONNECTED_AND_UP;
+		else if (operation == ECM_IPA_CLEANUP)
+			next_state = ECM_IPA_UNLOADED;
+		break;
+	case ECM_IPA_CONNECTED_AND_UP:
+		if (operation == ECM_IPA_STOP)
+			next_state = ECM_IPA_CONNECTED;
+		else if (operation == ECM_IPA_DISCONNECT)
+			next_state = ECM_IPA_UP;
+		break;
+	default:
+		ECM_IPA_ERROR("State is not supported\n");
+		break;
+	}
+
+	ECM_IPA_DEBUG
+		("state transition ( %s -> %s )- %s\n",
+		ecm_ipa_state_string(current_state),
+		ecm_ipa_state_string(next_state),
+		next_state == ECM_IPA_INVALID ? "Forbidden" : "Allowed");
+
+	return next_state;
+}
+
+/**
+ * ecm_ipa_state_string - return the state string representation
+ * @state: enum which describe the state
+ */
+static const char *ecm_ipa_state_string(enum ecm_ipa_state state)
+{
+	switch (state) {
+	case ECM_IPA_UNLOADED:
+		return "ECM_IPA_UNLOADED";
+	case ECM_IPA_INITIALIZED:
+		return "ECM_IPA_INITIALIZED";
+	case ECM_IPA_CONNECTED:
+		return "ECM_IPA_CONNECTED";
+	case ECM_IPA_UP:
+		return "ECM_IPA_UP";
+	case ECM_IPA_CONNECTED_AND_UP:
+		return "ECM_IPA_CONNECTED_AND_UP";
+	default:
+		return "Not supported";
+	}
+}
+
+/**
+ * ecm_ipa_init_module() - module initialization
+ *
+ */
+static int ecm_ipa_init_module(void)
+{
+	ECM_IPA_LOG_ENTRY();
+	ECM_IPA_LOG_EXIT();
+	return 0;
+}
+
+/**
+ * ecm_ipa_cleanup_module() - module cleanup
+ *
+ */
+static void ecm_ipa_cleanup_module(void)
+{
+	ECM_IPA_LOG_ENTRY();
+	ECM_IPA_LOG_EXIT();
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ECM IPA network interface");
+
+late_initcall(ecm_ipa_init_module);
+module_exit(ecm_ipa_cleanup_module);
diff --git a/drivers/net/ethernet/msm/rndis_ipa.c b/drivers/net/ethernet/msm/rndis_ipa.c
new file mode 100644
index 0000000..b218cb3
--- /dev/null
+++ b/drivers/net/ethernet/msm/rndis_ipa.c
@@ -0,0 +1,2857 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
+#include <linux/in.h>
+#include <linux/stddef.h>
+#include <linux/ip.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/ipa.h>
+#include <linux/random.h>
+#include <linux/rndis_ipa.h>
+#include <linux/workqueue.h>
+
+#define CREATE_TRACE_POINTS
+#include "rndis_ipa_trace.h"
+
+#define DRV_NAME "RNDIS_IPA"
+#define DEBUGFS_DIR_NAME "rndis_ipa"
+#define DEBUGFS_AGGR_DIR_NAME "rndis_ipa_aggregation"
+#define NETDEV_NAME "rndis"
+#define DRV_RESOURCE_ID IPA_RM_RESOURCE_RNDIS_PROD
+#define IPV4_HDR_NAME "rndis_eth_ipv4"
+#define IPV6_HDR_NAME "rndis_eth_ipv6"
+#define IPA_TO_USB_CLIENT IPA_CLIENT_USB_CONS
+#define INACTIVITY_MSEC_DELAY 100
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+#define DEBUGFS_TEMP_BUF_SIZE 4
+#define RNDIS_IPA_PKT_TYPE 0x00000001
+#define RNDIS_IPA_DFLT_RT_HDL 0
+#define FROM_IPA_TO_USB_BAMDMA 4
+#define FROM_USB_TO_IPA_BAMDMA 5
+#define BAM_DMA_MAX_PKT_NUMBER 10
+#define BAM_DMA_DATA_FIFO_SIZE \
+		(BAM_DMA_MAX_PKT_NUMBER * \
+			(ETH_FRAME_LEN + sizeof(struct rndis_pkt_hdr)))
+#define BAM_DMA_DESC_FIFO_SIZE \
+		(BAM_DMA_MAX_PKT_NUMBER * (sizeof(struct sps_iovec)))
+#define TX_TIMEOUT (5 * HZ)
+#define MIN_TX_ERROR_SLEEP_PERIOD 500
+#define DEFAULT_AGGR_TIME_LIMIT 1
+#define DEFAULT_AGGR_PKT_LIMIT 0
+
+#define RNDIS_IPA_ERROR(fmt, args...) \
+		pr_err(DRV_NAME "@%s@%d@ctx:%s: "\
+				fmt, __func__, __LINE__, current->comm, ## args)
+#define RNDIS_IPA_DEBUG(fmt, args...) \
+			pr_debug("ctx: %s, "fmt, current->comm, ## args)
+
+#define NULL_CHECK_RETVAL(ptr) \
+		do { \
+			if (!(ptr)) { \
+				RNDIS_IPA_ERROR("null pointer #ptr\n"); \
+				ret = -EINVAL; \
+			} \
+		} \
+		while (0)
+
+#define RNDIS_HDR_OFST(field) offsetof(struct rndis_pkt_hdr, field)
+#define RNDIS_IPA_LOG_ENTRY() RNDIS_IPA_DEBUG("begin\n")
+#define RNDIS_IPA_LOG_EXIT()  RNDIS_IPA_DEBUG("end\n")
+
+/**
+ * enum rndis_ipa_state - specify the current driver internal state
+ *  which is guarded by a state machine.
+ *
+ * The driver internal state changes due to its external API usage.
+ * The driver saves its internal state to guard from caller illegal
+ * call sequence.
+ * states:
+ * UNLOADED is the first state which is the default one and is also the state
+ *  after the driver gets unloaded(cleanup).
+ * INITIALIZED is the driver state once it finished registering
+ *  the network device and all internal data struct were initialized
+ * CONNECTED is the driver state once the USB pipes were connected to IPA
+ * UP is the driver state after the interface mode was set to UP but the
+ *  pipes are not connected yet - this state is meta-stable state.
+ * CONNECTED_AND_UP is the driver state when the pipe were connected and
+ *  the interface got UP request from the network stack. this is the driver
+ *   idle operation state which allows it to transmit/receive data.
+ * INVALID is a state which is not allowed.
+ */
+enum rndis_ipa_state {
+	RNDIS_IPA_UNLOADED          = 0,
+	RNDIS_IPA_INITIALIZED       = 1,
+	RNDIS_IPA_CONNECTED         = 2,
+	RNDIS_IPA_UP                = 3,
+	RNDIS_IPA_CONNECTED_AND_UP  = 4,
+	RNDIS_IPA_INVALID           = 5,
+};
+
+/**
+ * enum rndis_ipa_operation - enumerations used to describe the API operation
+ *
+ * Those enums are used as input for the driver state machine.
+ */
+enum rndis_ipa_operation {
+	RNDIS_IPA_INITIALIZE,
+	RNDIS_IPA_CONNECT,
+	RNDIS_IPA_OPEN,
+	RNDIS_IPA_STOP,
+	RNDIS_IPA_DISCONNECT,
+	RNDIS_IPA_CLEANUP,
+};
+
+#define RNDIS_IPA_STATE_DEBUG(ctx) \
+	(RNDIS_IPA_DEBUG("Driver state: %s\n",\
+	rndis_ipa_state_string((ctx)->state)))
+
+/**
+ * struct rndis_loopback_pipe - hold all information needed for
+ *  pipe loopback logic
+ */
+struct rndis_loopback_pipe {
+	struct sps_pipe          *ipa_sps;
+	struct ipa_sps_params ipa_sps_connect;
+	struct ipa_connect_params ipa_connect_params;
+
+	struct sps_pipe          *dma_sps;
+	struct sps_connect        dma_connect;
+
+	struct sps_alloc_dma_chan dst_alloc;
+	struct sps_dma_chan       ipa_sps_channel;
+	enum sps_mode mode;
+	u32 ipa_peer_bam_hdl;
+	u32 peer_pipe_index;
+	u32 ipa_drv_ep_hdl;
+	u32 ipa_pipe_index;
+	enum ipa_client_type ipa_client;
+	ipa_notify_cb ipa_callback;
+	struct ipa_ep_cfg *ipa_ep_cfg;
+};
+
+/**
+ * struct rndis_ipa_dev - main driver context parameters
+ *
+ * @net: network interface struct implemented by this driver
+ * @directory: debugfs directory for various debugging switches
+ * @tx_filter: flag that enable/disable Tx path to continue to IPA
+ * @tx_dropped: number of filtered out Tx packets
+ * @tx_dump_enable: dump all Tx packets
+ * @rx_filter: flag that enable/disable Rx path to continue to IPA
+ * @rx_dropped: number of filtered out Rx packets
+ * @rx_dump_enable: dump all Rx packets
+ * @icmp_filter: allow all ICMP packet to pass through the filters
+ * @rm_enable: flag that enable/disable Resource manager request prior to Tx
+ * @loopback_enable:  flag that enable/disable USB stub loopback
+ * @deaggregation_enable: enable/disable IPA HW deaggregation logic
+ * @during_xmit_error: flags that indicate that the driver is in a middle
+ *  of error handling in Tx path
+ * @usb_to_ipa_loopback_pipe: usb to ipa (Rx) pipe representation for loopback
+ * @ipa_to_usb_loopback_pipe: ipa to usb (Tx) pipe representation for loopback
+ * @bam_dma_hdl: handle representing bam-dma, used for loopback logic
+ * @directory: holds all debug flags used by the driver to allow cleanup
+ *  for driver unload
+ * @eth_ipv4_hdr_hdl: saved handle for ipv4 header-insertion table
+ * @eth_ipv6_hdr_hdl: saved handle for ipv6 header-insertion table
+ * @usb_to_ipa_hdl: save handle for IPA pipe operations
+ * @ipa_to_usb_hdl: save handle for IPA pipe operations
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ *  to netdev queue start (after stopped due to outstanding_high reached)
+ * @error_msec_sleep_time: number of msec for sleeping in case of Tx error
+ * @state: current state of the driver
+ * @host_ethaddr: holds the tethered PC ethernet address
+ * @device_ethaddr: holds the device ethernet address
+ * @device_ready_notify: callback supplied by USB core driver
+ * This callback shall be called by the Netdev once the Netdev internal
+ * state is changed to RNDIS_IPA_CONNECTED_AND_UP
+ * @xmit_error_delayed_work: work item for cases where IPA driver Tx fails
+ */
+struct rndis_ipa_dev {
+	struct net_device *net;
+	bool tx_filter;
+	u32 tx_dropped;
+	bool tx_dump_enable;
+	bool rx_filter;
+	u32 rx_dropped;
+	bool rx_dump_enable;
+	bool icmp_filter;
+	bool rm_enable;
+	bool loopback_enable;
+	bool deaggregation_enable;
+	bool during_xmit_error;
+	struct rndis_loopback_pipe usb_to_ipa_loopback_pipe;
+	struct rndis_loopback_pipe ipa_to_usb_loopback_pipe;
+	u32 bam_dma_hdl;
+	struct dentry *directory;
+	u32 eth_ipv4_hdr_hdl;
+	u32 eth_ipv6_hdr_hdl;
+	u32 usb_to_ipa_hdl;
+	u32 ipa_to_usb_hdl;
+	atomic_t outstanding_pkts;
+	u32 outstanding_high;
+	u32 outstanding_low;
+	u32 error_msec_sleep_time;
+	enum rndis_ipa_state state;
+	u8 host_ethaddr[ETH_ALEN];
+	u8 device_ethaddr[ETH_ALEN];
+	void (*device_ready_notify)(void);
+	struct delayed_work xmit_error_delayed_work;
+};
+
+/**
+ * rndis_pkt_hdr - RNDIS_IPA representation of REMOTE_NDIS_PACKET_MSG
+ * @msg_type: for REMOTE_NDIS_PACKET_MSG this value should be 1
+ * @msg_len:  total message length in bytes, including RNDIS header an payload
+ * @data_ofst: offset in bytes from start of the data_ofst to payload
+ * @data_len: payload size in bytes
+ * @zeroes: OOB place holder - not used for RNDIS_IPA.
+ */
+struct rndis_pkt_hdr {
+	__le32	msg_type;
+	__le32	msg_len;
+	__le32	data_ofst;
+	__le32	data_len;
+	__le32  zeroes[7];
+} __packed__;
+
+static int rndis_ipa_open(struct net_device *net);
+static void rndis_ipa_packet_receive_notify
+	(void *private, enum ipa_dp_evt_type evt, unsigned long data);
+static void rndis_ipa_tx_complete_notify
+	(void *private, enum ipa_dp_evt_type evt, unsigned long data);
+static void rndis_ipa_tx_timeout(struct net_device *net);
+static int rndis_ipa_stop(struct net_device *net);
+static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx);
+static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb);
+static void rndis_ipa_xmit_error(struct sk_buff *skb);
+static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work);
+static void rndis_ipa_prepare_header_insertion
+	(int eth_type,
+	const char *hdr_name, struct ipa_hdr_add *add_hdr,
+	const void *dst_mac, const void *src_mac);
+static int rndis_ipa_hdrs_cfg
+	(struct rndis_ipa_dev *rndis_ipa_ctx,
+	const void *dst_mac, const void *src_mac);
+static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
+static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net);
+static int rndis_ipa_register_properties(char *netdev_name);
+static int rndis_ipa_deregister_properties(char *netdev_name);
+static void rndis_ipa_rm_notify
+	(void *user_data, enum ipa_rm_event event,
+	unsigned long data);
+static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx);
+static bool rx_filter(struct sk_buff *skb);
+static bool tx_filter(struct sk_buff *skb);
+static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx);
+static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx);
+static netdev_tx_t rndis_ipa_start_xmit
+	(struct sk_buff *skb, struct net_device *net);
+static int rndis_ipa_loopback_pipe_create
+	(struct rndis_ipa_dev *rndis_ipa_ctx,
+	struct rndis_loopback_pipe *loopback_pipe);
+static void rndis_ipa_destroy_loopback_pipe
+	(struct rndis_loopback_pipe *loopback_pipe);
+static int rndis_ipa_create_loopback(struct rndis_ipa_dev *rndis_ipa_ctx);
+static void rndis_ipa_destroy_loopback(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_setup_loopback
+	(bool enable, struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_debugfs_loopback_open
+	(struct inode *inode, struct file *file);
+static int rndis_ipa_debugfs_atomic_open
+	(struct inode *inode, struct file *file);
+static int rndis_ipa_debugfs_aggr_open
+	(struct inode *inode, struct file *file);
+static ssize_t rndis_ipa_debugfs_aggr_write
+	(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos);
+static ssize_t rndis_ipa_debugfs_loopback_write
+	(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos);
+static ssize_t rndis_ipa_debugfs_enable_write
+	(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos);
+static ssize_t rndis_ipa_debugfs_enable_read
+	(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t rndis_ipa_debugfs_loopback_read
+	(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t rndis_ipa_debugfs_atomic_read
+	(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos);
+static void rndis_ipa_dump_skb(struct sk_buff *skb);
+static int rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx);
+static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx);
+static int rndis_ipa_ep_registers_cfg
+	(u32 usb_to_ipa_hdl,
+	u32 ipa_to_usb_hdl, u32 max_xfer_size_bytes_to_dev,
+	u32 max_xfer_size_bytes_to_host, u32 mtu,
+	bool deaggr_enable);
+static int rndis_ipa_set_device_ethernet_addr
+	(u8 *dev_ethaddr,
+	u8 device_ethaddr[]);
+static enum rndis_ipa_state rndis_ipa_next_state
+	(enum rndis_ipa_state current_state,
+	enum rndis_ipa_operation operation);
+static const char *rndis_ipa_state_string(enum rndis_ipa_state state);
+static int rndis_ipa_init_module(void);
+static void rndis_ipa_cleanup_module(void);
+
+struct rndis_ipa_dev *rndis_ipa;
+
+static const struct net_device_ops rndis_ipa_netdev_ops = {
+	.ndo_open		= rndis_ipa_open,
+	.ndo_stop		= rndis_ipa_stop,
+	.ndo_start_xmit = rndis_ipa_start_xmit,
+	.ndo_tx_timeout = rndis_ipa_tx_timeout,
+	.ndo_get_stats = rndis_ipa_get_stats,
+	.ndo_set_mac_address = eth_mac_addr,
+};
+
+const struct file_operations rndis_ipa_debugfs_atomic_ops = {
+	.open = rndis_ipa_debugfs_atomic_open,
+	.read = rndis_ipa_debugfs_atomic_read,
+};
+
+const struct file_operations rndis_ipa_loopback_ops = {
+		.open = rndis_ipa_debugfs_loopback_open,
+		.read = rndis_ipa_debugfs_loopback_read,
+		.write = rndis_ipa_debugfs_loopback_write,
+};
+
+const struct file_operations rndis_ipa_aggr_ops = {
+		.open = rndis_ipa_debugfs_aggr_open,
+		.write = rndis_ipa_debugfs_aggr_write,
+};
+
+static struct ipa_ep_cfg ipa_to_usb_ep_cfg = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst  = IPA_CLIENT_APPS_LAN_CONS,
+	},
+	.hdr = {
+		.hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr),
+		.hdr_ofst_metadata_valid = false,
+		.hdr_ofst_metadata = 0,
+		.hdr_additional_const_len = ETH_HLEN,
+		.hdr_ofst_pkt_size_valid = true,
+		.hdr_ofst_pkt_size = 3 * sizeof(u32),
+		.hdr_a5_mux = false,
+		.hdr_remove_additional = false,
+		.hdr_metadata_reg_valid = false,
+	},
+	.hdr_ext = {
+		.hdr_pad_to_alignment = 0,
+		.hdr_total_len_or_pad_offset = 1 * sizeof(u32),
+		.hdr_payload_len_inc_padding = false,
+		.hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
+		.hdr_total_len_or_pad_valid = true,
+		.hdr_little_endian = true,
+	},
+	.aggr = {
+		.aggr_en = IPA_ENABLE_AGGR,
+		.aggr = IPA_GENERIC,
+		.aggr_byte_limit = 4,
+		.aggr_time_limit = DEFAULT_AGGR_TIME_LIMIT,
+		.aggr_pkt_limit = DEFAULT_AGGR_PKT_LIMIT
+	},
+	.deaggr = {
+		.deaggr_hdr_len = 0,
+		.packet_offset_valid = 0,
+		.packet_offset_location = 0,
+		.max_packet_len = 0,
+	},
+	.route = {
+		.rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL,
+	},
+	.nat = {
+		.nat_en = IPA_SRC_NAT,
+	},
+};
+
+static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_dis = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst  = IPA_CLIENT_APPS_LAN_CONS,
+	},
+	.hdr = {
+		.hdr_len = ETH_HLEN + sizeof(struct rndis_pkt_hdr),
+		.hdr_ofst_metadata_valid = false,
+		.hdr_ofst_metadata = 0,
+		.hdr_additional_const_len = 0,
+		.hdr_ofst_pkt_size_valid = true,
+		.hdr_ofst_pkt_size = 3 * sizeof(u32) +
+			sizeof(struct rndis_pkt_hdr),
+		.hdr_a5_mux = false,
+		.hdr_remove_additional = false,
+		.hdr_metadata_reg_valid = false,
+	},
+	.hdr_ext = {
+		.hdr_pad_to_alignment = 0,
+		.hdr_total_len_or_pad_offset = 1 * sizeof(u32),
+		.hdr_payload_len_inc_padding = false,
+		.hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
+		.hdr_total_len_or_pad_valid = true,
+		.hdr_little_endian = true,
+	},
+
+	.aggr = {
+		.aggr_en = IPA_BYPASS_AGGR,
+		.aggr = 0,
+		.aggr_byte_limit = 0,
+		.aggr_time_limit = 0,
+		.aggr_pkt_limit  = 0,
+	},
+	.deaggr = {
+		.deaggr_hdr_len = 0,
+		.packet_offset_valid = false,
+		.packet_offset_location = 0,
+		.max_packet_len = 0,
+	},
+
+	.route = {
+		.rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL,
+	},
+	.nat = {
+		.nat_en = IPA_BYPASS_NAT,
+	},
+};
+
+static struct ipa_ep_cfg usb_to_ipa_ep_cfg_deaggr_en = {
+	.mode = {
+		.mode = IPA_BASIC,
+		.dst  = IPA_CLIENT_APPS_LAN_CONS,
+	},
+	.hdr = {
+		.hdr_len = ETH_HLEN,
+		.hdr_ofst_metadata_valid = false,
+		.hdr_ofst_metadata = 0,
+		.hdr_additional_const_len = 0,
+		.hdr_ofst_pkt_size_valid = true,
+		.hdr_ofst_pkt_size = 3 * sizeof(u32),
+		.hdr_a5_mux = false,
+		.hdr_remove_additional = false,
+		.hdr_metadata_reg_valid = false,
+	},
+	.hdr_ext = {
+		.hdr_pad_to_alignment = 0,
+		.hdr_total_len_or_pad_offset = 1 * sizeof(u32),
+		.hdr_payload_len_inc_padding = false,
+		.hdr_total_len_or_pad = IPA_HDR_TOTAL_LEN,
+		.hdr_total_len_or_pad_valid = true,
+		.hdr_little_endian = true,
+	},
+	.aggr = {
+		.aggr_en = IPA_ENABLE_DEAGGR,
+		.aggr = IPA_GENERIC,
+		.aggr_byte_limit = 0,
+		.aggr_time_limit = 0,
+		.aggr_pkt_limit  = 0,
+	},
+	.deaggr = {
+		.deaggr_hdr_len = sizeof(struct rndis_pkt_hdr),
+		.packet_offset_valid = true,
+		.packet_offset_location = 8,
+		.max_packet_len = 8192, /* Will be overridden*/
+	},
+	.route = {
+		.rt_tbl_hdl = RNDIS_IPA_DFLT_RT_HDL,
+	},
+	.nat = {
+		.nat_en = IPA_BYPASS_NAT,
+	},
+};
+
+/**
+ * rndis_template_hdr - RNDIS template structure for RNDIS_IPA SW insertion
+ * @msg_type: set for REMOTE_NDIS_PACKET_MSG (0x00000001)
+ *  this value will be used for all data packets
+ * @msg_len:  will add the skb length to get final size
+ * @data_ofst: this field value will not be changed
+ * @data_len: set as skb length to get final size
+ * @zeroes: make sure all OOB data is not used
+ */
+struct rndis_pkt_hdr rndis_template_hdr = {
+	.msg_type = RNDIS_IPA_PKT_TYPE,
+	.msg_len = sizeof(struct rndis_pkt_hdr),
+	.data_ofst = sizeof(struct rndis_pkt_hdr) - RNDIS_HDR_OFST(data_ofst),
+	.data_len = 0,
+	.zeroes = {0},
+};
+
+/**
+ * rndis_ipa_init() - create network device and initialize internal
+ *  data structures
+ * @params: in/out parameters required for initialization,
+ *  see "struct ipa_usb_init_params" for more details
+ *
+ * Shall be called prior to pipe connection.
+ * Detailed description:
+ *  - allocate the network device
+ *  - set default values for driver internal switches and stash them inside
+ *     the netdev private field
+ *  - set needed headroom for RNDIS header
+ *  - create debugfs folder and files
+ *  - create IPA resource manager client
+ *  - set the ethernet address for the netdev to be added on SW Tx path
+ *  - add header insertion rules for IPA driver (based on host/device Ethernet
+ *     addresses given in input params and on RNDIS data template struct)
+ *  - register tx/rx properties to IPA driver (will be later used
+ *    by IPA configuration manager to configure rest of the IPA rules)
+ *  - set the carrier state to "off" (until connect is called)
+ *  - register the network device
+ *  - set the out parameters
+ *  - change driver internal state to INITIALIZED
+ *
+ * Returns negative errno, or zero on success
+ */
+int rndis_ipa_init(struct ipa_usb_init_params *params)
+{
+	int result = 0;
+	struct net_device *net;
+	struct rndis_ipa_dev *rndis_ipa_ctx;
+	int ret;
+
+	RNDIS_IPA_LOG_ENTRY();
+	RNDIS_IPA_DEBUG("%s initializing\n", DRV_NAME);
+	ret = 0;
+	NULL_CHECK_RETVAL(params);
+	if (ret)
+		return ret;
+
+	RNDIS_IPA_DEBUG
+		("host_ethaddr=%pM, device_ethaddr=%pM\n",
+		params->host_ethaddr,
+		params->device_ethaddr);
+
+	net = alloc_etherdev(sizeof(struct rndis_ipa_dev));
+	if (!net) {
+		result = -ENOMEM;
+		RNDIS_IPA_ERROR("fail to allocate Ethernet device\n");
+		goto fail_alloc_etherdev;
+	}
+	RNDIS_IPA_DEBUG("network device was successfully allocated\n");
+
+	rndis_ipa_ctx = netdev_priv(net);
+	if (!rndis_ipa_ctx) {
+		result = -ENOMEM;
+		RNDIS_IPA_ERROR("fail to extract netdev priv\n");
+		goto fail_netdev_priv;
+	}
+	memset(rndis_ipa_ctx, 0, sizeof(*rndis_ipa_ctx));
+	RNDIS_IPA_DEBUG("rndis_ipa_ctx (private)=%p\n", rndis_ipa_ctx);
+
+	rndis_ipa_ctx->net = net;
+	rndis_ipa_ctx->tx_filter = false;
+	rndis_ipa_ctx->rx_filter = false;
+	rndis_ipa_ctx->icmp_filter = true;
+	rndis_ipa_ctx->rm_enable = true;
+	rndis_ipa_ctx->tx_dropped = 0;
+	rndis_ipa_ctx->rx_dropped = 0;
+	rndis_ipa_ctx->tx_dump_enable = false;
+	rndis_ipa_ctx->rx_dump_enable = false;
+	rndis_ipa_ctx->deaggregation_enable = false;
+	rndis_ipa_ctx->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+	rndis_ipa_ctx->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+	atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
+	memcpy
+		(rndis_ipa_ctx->device_ethaddr, params->device_ethaddr,
+		sizeof(rndis_ipa_ctx->device_ethaddr));
+	memcpy
+		(rndis_ipa_ctx->host_ethaddr, params->host_ethaddr,
+		sizeof(rndis_ipa_ctx->host_ethaddr));
+	INIT_DELAYED_WORK
+		(&rndis_ipa_ctx->xmit_error_delayed_work,
+		rndis_ipa_xmit_error_aftercare_wq);
+	rndis_ipa_ctx->error_msec_sleep_time =
+		MIN_TX_ERROR_SLEEP_PERIOD;
+	RNDIS_IPA_DEBUG("internal data structures were set\n");
+
+	if (!params->device_ready_notify)
+		RNDIS_IPA_DEBUG("device_ready_notify() was not supplied\n");
+	rndis_ipa_ctx->device_ready_notify = params->device_ready_notify;
+
+	snprintf(net->name, sizeof(net->name), "%s%%d", NETDEV_NAME);
+	RNDIS_IPA_DEBUG
+		("Setting network interface driver name to: %s\n",
+		net->name);
+
+	net->netdev_ops = &rndis_ipa_netdev_ops;
+	net->watchdog_timeo = TX_TIMEOUT;
+
+	net->needed_headroom = sizeof(rndis_template_hdr);
+	RNDIS_IPA_DEBUG
+		("Needed headroom for RNDIS header set to %d\n",
+		net->needed_headroom);
+
+	result = rndis_ipa_debugfs_init(rndis_ipa_ctx);
+	if (result)
+		goto fail_debugfs;
+	RNDIS_IPA_DEBUG("debugfs entries were created\n");
+
+	result = rndis_ipa_set_device_ethernet_addr
+		(net->dev_addr, rndis_ipa_ctx->device_ethaddr);
+	if (result) {
+		RNDIS_IPA_ERROR("set device MAC failed\n");
+		goto fail_set_device_ethernet;
+	}
+	RNDIS_IPA_DEBUG("Device Ethernet address set %pM\n", net->dev_addr);
+
+	result = rndis_ipa_hdrs_cfg
+			(rndis_ipa_ctx,
+			params->host_ethaddr,
+			params->device_ethaddr);
+	if (result) {
+		RNDIS_IPA_ERROR("fail on ipa hdrs set\n");
+		goto fail_hdrs_cfg;
+	}
+	RNDIS_IPA_DEBUG("IPA header-insertion configed for Ethernet+RNDIS\n");
+
+	result = rndis_ipa_register_properties(net->name);
+	if (result) {
+		RNDIS_IPA_ERROR("fail on properties set\n");
+		goto fail_register_tx;
+	}
+	RNDIS_IPA_DEBUG("2 TX and 2 RX properties were registered\n");
+
+	netif_carrier_off(net);
+	RNDIS_IPA_DEBUG("set carrier off until pipes are connected\n");
+
+	result = register_netdev(net);
+	if (result) {
+		RNDIS_IPA_ERROR("register_netdev failed: %d\n", result);
+		goto fail_register_netdev;
+	}
+	RNDIS_IPA_DEBUG
+		("netdev:%s registration succeeded, index=%d\n",
+		net->name, net->ifindex);
+
+	rndis_ipa = rndis_ipa_ctx;
+	params->ipa_rx_notify = rndis_ipa_packet_receive_notify;
+	params->ipa_tx_notify = rndis_ipa_tx_complete_notify;
+	params->private = rndis_ipa_ctx;
+	params->skip_ep_cfg = false;
+	rndis_ipa_ctx->state = RNDIS_IPA_INITIALIZED;
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+	pr_info("RNDIS_IPA NetDev was initialized");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+
+fail_register_netdev:
+	rndis_ipa_deregister_properties(net->name);
+fail_register_tx:
+	rndis_ipa_hdrs_destroy(rndis_ipa_ctx);
+fail_set_device_ethernet:
+fail_hdrs_cfg:
+	rndis_ipa_debugfs_destroy(rndis_ipa_ctx);
+fail_debugfs:
+fail_netdev_priv:
+	free_netdev(net);
+fail_alloc_etherdev:
+	return result;
+}
+EXPORT_SYMBOL(rndis_ipa_init);
+
+/**
+ * rndis_ipa_pipe_connect_notify() - notify rndis_ipa Netdev that the USB pipes
+ *  were connected
+ * @usb_to_ipa_hdl: handle from IPA driver client for USB->IPA
+ * @ipa_to_usb_hdl: handle from IPA driver client for IPA->USB
+ * @private: same value that was set by init(), this parameter holds the
+ *  network device pointer.
+ * @max_transfer_byte_size: RNDIS protocol specific, the maximum size that
+ *  the host expect
+ * @max_packet_number: RNDIS protocol specific, the maximum packet number
+ *  that the host expects
+ *
+ * Once USB driver finishes the pipe connection between IPA core
+ * and USB core this method shall be called in order to
+ * allow the driver to complete the data path configurations.
+ * Detailed description:
+ *  - configure the IPA end-points register
+ *  - notify the Linux kernel for "carrier_on"
+ *  - change the driver internal state
+ *
+ *  After this function is done the driver state changes to "Connected"  or
+ *  Connected and Up.
+ *  This API is expected to be called after initialization() or
+ *  after a call to disconnect().
+ *
+ * Returns negative errno, or zero on success
+ */
+int rndis_ipa_pipe_connect_notify(
+	u32 usb_to_ipa_hdl,
+	u32 ipa_to_usb_hdl,
+	u32 max_xfer_size_bytes_to_dev,
+	u32 max_packet_number_to_dev,
+	u32 max_xfer_size_bytes_to_host,
+	void *private)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int next_state;
+	int result;
+	int ret;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	ret = 0;
+	NULL_CHECK_RETVAL(private);
+	if (ret)
+		return ret;
+
+	RNDIS_IPA_DEBUG
+		("usb_to_ipa_hdl=%d, ipa_to_usb_hdl=%d, private=0x%p\n",
+		usb_to_ipa_hdl, ipa_to_usb_hdl, private);
+	RNDIS_IPA_DEBUG
+		("max_xfer_sz_to_dev=%d, max_pkt_num_to_dev=%d\n",
+		max_xfer_size_bytes_to_dev,
+		max_packet_number_to_dev);
+	RNDIS_IPA_DEBUG
+		("max_xfer_sz_to_host=%d\n",
+		max_xfer_size_bytes_to_host);
+
+	next_state = rndis_ipa_next_state
+		(rndis_ipa_ctx->state,
+		RNDIS_IPA_CONNECT);
+	if (next_state == RNDIS_IPA_INVALID) {
+		RNDIS_IPA_ERROR("use init()/disconnect() before connect()\n");
+		return -EPERM;
+	}
+
+	if (usb_to_ipa_hdl >= IPA_CLIENT_MAX) {
+		RNDIS_IPA_ERROR
+			("usb_to_ipa_hdl(%d) - not valid ipa handle\n",
+			usb_to_ipa_hdl);
+		return -EINVAL;
+	}
+	if (ipa_to_usb_hdl >= IPA_CLIENT_MAX) {
+		RNDIS_IPA_ERROR
+			("ipa_to_usb_hdl(%d) - not valid ipa handle\n",
+			ipa_to_usb_hdl);
+		return -EINVAL;
+	}
+
+	result = rndis_ipa_create_rm_resource(rndis_ipa_ctx);
+	if (result) {
+		RNDIS_IPA_ERROR("fail on RM create\n");
+		goto fail_create_rm;
+	}
+	RNDIS_IPA_DEBUG("RM resource was created\n");
+
+	rndis_ipa_ctx->ipa_to_usb_hdl = ipa_to_usb_hdl;
+	rndis_ipa_ctx->usb_to_ipa_hdl = usb_to_ipa_hdl;
+	if (max_packet_number_to_dev > 1)
+		rndis_ipa_ctx->deaggregation_enable = true;
+	else
+		rndis_ipa_ctx->deaggregation_enable = false;
+	result = rndis_ipa_ep_registers_cfg
+		(usb_to_ipa_hdl,
+		ipa_to_usb_hdl,
+		max_xfer_size_bytes_to_dev,
+		max_xfer_size_bytes_to_host,
+		rndis_ipa_ctx->net->mtu,
+		rndis_ipa_ctx->deaggregation_enable);
+	if (result) {
+		RNDIS_IPA_ERROR("fail on ep cfg\n");
+		goto fail;
+	}
+	RNDIS_IPA_DEBUG("end-points configured\n");
+
+	netif_stop_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netif_stop_queue() was called\n");
+
+	netif_carrier_on(rndis_ipa_ctx->net);
+	if (!netif_carrier_ok(rndis_ipa_ctx->net)) {
+		RNDIS_IPA_ERROR("netif_carrier_ok error\n");
+		result = -EBUSY;
+		goto fail;
+	}
+	RNDIS_IPA_DEBUG("netif_carrier_on() was called\n");
+
+	rndis_ipa_ctx->state = next_state;
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	if (next_state == RNDIS_IPA_CONNECTED_AND_UP)
+		rndis_ipa_enable_data_path(rndis_ipa_ctx);
+	else
+		RNDIS_IPA_DEBUG("queue shall be started after open()\n");
+
+	pr_info("RNDIS_IPA NetDev pipes were connected\n");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+
+fail:
+	rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
+fail_create_rm:
+	return result;
+}
+EXPORT_SYMBOL(rndis_ipa_pipe_connect_notify);
+
+/**
+ * rndis_ipa_open() - notify Linux network stack to start sending packets
+ * @net: the network interface supplied by the network stack
+ *
+ * Linux uses this API to notify the driver that the network interface
+ * transitions to the up state.
+ * The driver will instruct the Linux network stack to start
+ * delivering data packets.
+ * The driver internal state shall be changed to Up or Connected and Up
+ *
+ * Returns negative errno, or zero on success
+ */
+static int rndis_ipa_open(struct net_device *net)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx;
+	int next_state;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	rndis_ipa_ctx = netdev_priv(net);
+
+	next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_OPEN);
+	if (next_state == RNDIS_IPA_INVALID) {
+		RNDIS_IPA_ERROR("can't bring driver up before initialize\n");
+		return -EPERM;
+	}
+
+	rndis_ipa_ctx->state = next_state;
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	if (next_state == RNDIS_IPA_CONNECTED_AND_UP)
+		rndis_ipa_enable_data_path(rndis_ipa_ctx);
+	else
+		RNDIS_IPA_DEBUG("queue shall be started after connect()\n");
+
+	pr_info("RNDIS_IPA NetDev was opened\n");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+/**
+ * rndis_ipa_start_xmit() - send data from APPs to USB core via IPA core
+ *  using SW path (Tx data path)
+ * Tx path for this Netdev is Apps-processor->IPA->USB
+ * @skb: packet received from Linux network stack destined for tethered PC
+ * @net: the network device being used to send this packet (rndis0)
+ *
+ * Several conditions needed in order to send the packet to IPA:
+ * - Transmit queue for the network driver is currently
+ *   in "started" state
+ * - The driver internal state is in Connected and Up state.
+ * - Filters Tx switch are turned off
+ * - The IPA resource manager state for the driver producer client
+ *   is "Granted" which implies that all the resources in the dependency
+ *   graph are valid for data flow.
+ * - outstanding high boundary was not reached.
+ *
+ * In case the outstanding packets high boundary is reached, the driver will
+ * stop the send queue until enough packets are processed by
+ * the IPA core (based on calls to rndis_ipa_tx_complete_notify).
+ *
+ * In case all of the conditions are met, the network driver shall:
+ *  - encapsulate the Ethernet packet with RNDIS header (REMOTE_NDIS_PACKET_MSG)
+ *  - send the packet by using IPA Driver SW path (IP_PACKET_INIT)
+ *  - Netdev status fields shall be updated based on the current Tx packet
+ *
+ * Returns NETDEV_TX_BUSY if retry should be made later,
+ * or NETDEV_TX_OK on success.
+ */
+static netdev_tx_t rndis_ipa_start_xmit(struct sk_buff *skb,
+					struct net_device *net)
+{
+	int ret;
+	netdev_tx_t status = NETDEV_TX_BUSY;
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+
+	net->trans_start = jiffies;
+
+	RNDIS_IPA_DEBUG
+		("Tx, len=%d, skb->protocol=%d, outstanding=%d\n",
+		skb->len, skb->protocol,
+		atomic_read(&rndis_ipa_ctx->outstanding_pkts));
+
+	if (unlikely(netif_queue_stopped(net))) {
+		RNDIS_IPA_ERROR("interface queue is stopped\n");
+		goto out;
+	}
+
+	if (unlikely(rndis_ipa_ctx->tx_dump_enable))
+		rndis_ipa_dump_skb(skb);
+
+	if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+		RNDIS_IPA_ERROR("Missing pipe connected and/or iface up\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(tx_filter(skb))) {
+		dev_kfree_skb_any(skb);
+		RNDIS_IPA_DEBUG("packet got filtered out on Tx path\n");
+		rndis_ipa_ctx->tx_dropped++;
+		status = NETDEV_TX_OK;
+		goto out;
+	}
+
+	ret = resource_request(rndis_ipa_ctx);
+	if (ret) {
+		RNDIS_IPA_DEBUG("Waiting to resource\n");
+		netif_stop_queue(net);
+		goto resource_busy;
+	}
+
+	if (atomic_read(&rndis_ipa_ctx->outstanding_pkts) >=
+				rndis_ipa_ctx->outstanding_high) {
+		RNDIS_IPA_DEBUG("Outstanding high boundary reached (%d)\n",
+				rndis_ipa_ctx->outstanding_high);
+		netif_stop_queue(net);
+		RNDIS_IPA_DEBUG("send  queue was stopped\n");
+		status = NETDEV_TX_BUSY;
+		goto out;
+	}
+
+	skb = rndis_encapsulate_skb(skb);
+	trace_rndis_tx_dp(skb->protocol);
+	ret = ipa_tx_dp(IPA_TO_USB_CLIENT, skb, NULL);
+	if (ret) {
+		RNDIS_IPA_ERROR("ipa transmit failed (%d)\n", ret);
+		goto fail_tx_packet;
+	}
+
+	atomic_inc(&rndis_ipa_ctx->outstanding_pkts);
+
+	status = NETDEV_TX_OK;
+	goto out;
+
+fail_tx_packet:
+	rndis_ipa_xmit_error(skb);
+out:
+	resource_release(rndis_ipa_ctx);
+resource_busy:
+	RNDIS_IPA_DEBUG
+		("packet Tx done - %s\n",
+		(status == NETDEV_TX_OK) ? "OK" : "FAIL");
+
+	return status;
+}
+
+/**
+ * rndis_ipa_tx_complete_notify() - notification for Netdev that the
+ *  last packet was successfully sent
+ * @private: driver context stashed by IPA driver upon pipe connect
+ * @evt: event type (expected to be write-done event)
+ * @data: data provided with event (this is actually the skb that
+ *  holds the sent packet)
+ *
+ * This function will be called on interrupt bottom halve deferred context.
+ * outstanding packets counter shall be decremented.
+ * Network stack send queue will be re-started in case low outstanding
+ * boundary is reached and queue was stopped before.
+ * At the end the skb shall be freed.
+ */
+static void rndis_ipa_tx_complete_notify(
+	void *private,
+	enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int ret;
+
+	ret = 0;
+	NULL_CHECK_RETVAL(private);
+	if (ret)
+		return;
+
+	trace_rndis_status_rcvd(skb->protocol);
+
+	RNDIS_IPA_DEBUG
+		("Tx-complete, len=%d, skb->prot=%d, outstanding=%d\n",
+		skb->len, skb->protocol,
+		atomic_read(&rndis_ipa_ctx->outstanding_pkts));
+
+	if (unlikely((evt != IPA_WRITE_DONE))) {
+		RNDIS_IPA_ERROR("unsupported event on TX call-back\n");
+		return;
+	}
+
+	if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+		RNDIS_IPA_DEBUG
+		("dropping Tx-complete pkt, state=%s\n",
+		rndis_ipa_state_string(rndis_ipa_ctx->state));
+		goto out;
+	}
+
+	rndis_ipa_ctx->net->stats.tx_packets++;
+	rndis_ipa_ctx->net->stats.tx_bytes += skb->len;
+
+	atomic_dec(&rndis_ipa_ctx->outstanding_pkts);
+	if
+		(netif_queue_stopped(rndis_ipa_ctx->net) &&
+		netif_carrier_ok(rndis_ipa_ctx->net) &&
+		atomic_read(&rndis_ipa_ctx->outstanding_pkts) <
+					(rndis_ipa_ctx->outstanding_low)) {
+		RNDIS_IPA_DEBUG("outstanding low boundary reached (%d)n",
+				rndis_ipa_ctx->outstanding_low);
+		netif_wake_queue(rndis_ipa_ctx->net);
+		RNDIS_IPA_DEBUG("send queue was awaken\n");
+	}
+
+out:
+	dev_kfree_skb_any(skb);
+}
+
+static void rndis_ipa_tx_timeout(struct net_device *net)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+	int outstanding = atomic_read(&rndis_ipa_ctx->outstanding_pkts);
+
+	RNDIS_IPA_ERROR
+		("possible IPA stall was detected, %d outstanding\n",
+		outstanding);
+
+	net->stats.tx_errors++;
+}
+
+/**
+ * rndis_ipa_rm_notify() - callback supplied to IPA resource manager
+ *   for grant/release events
+ * user_data: the driver context supplied to IPA resource manager during call
+ *  to ipa_rm_create_resource().
+ * event: the event notified to us by IPA resource manager (Release/Grant)
+ * data: reserved field supplied by IPA resource manager
+ *
+ * This callback shall be called based on resource request/release sent
+ * to the IPA resource manager.
+ * In case the queue was stopped during EINPROGRESS for Tx path and the
+ * event received is Grant then the queue shall be restarted.
+ * In case the event notified is a release notification the netdev discard it.
+ */
+static void rndis_ipa_rm_notify(
+	void *user_data, enum ipa_rm_event event,
+	unsigned long data)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = user_data;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	if (event == IPA_RM_RESOURCE_RELEASED) {
+		RNDIS_IPA_DEBUG("Resource Released\n");
+		return;
+	}
+
+	if (event != IPA_RM_RESOURCE_GRANTED) {
+		RNDIS_IPA_ERROR
+			("Unexceoted event receieved from RM (%d\n)", event);
+		return;
+	}
+	RNDIS_IPA_DEBUG("Resource Granted\n");
+
+	if (netif_queue_stopped(rndis_ipa_ctx->net)) {
+		RNDIS_IPA_DEBUG("starting queue\n");
+		netif_start_queue(rndis_ipa_ctx->net);
+	} else {
+		RNDIS_IPA_DEBUG("queue already awake\n");
+	}
+
+	RNDIS_IPA_LOG_EXIT();
+}
+
+/**
+ * rndis_ipa_packet_receive_notify() - Rx notify for packet sent from
+ *  tethered PC (USB->IPA).
+ *  is USB->IPA->Apps-processor
+ * @private: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Once IPA driver receives a packet from USB client this callback will be
+ * called from bottom-half interrupt handling context (ipa Rx workqueue).
+ *
+ * Packets that shall be sent to Apps processor may be of two types:
+ * 1) Packets that are destined for Apps (e.g: WEBSERVER running on Apps)
+ * 2) Exception packets that need special handling (based on IPA core
+ *    configuration, e.g: new TCP session or any other packets that IPA core
+ *    can't handle)
+ * If the next conditions are met, the packet shall be sent up to the
+ * Linux network stack:
+ *  - Driver internal state is Connected and Up
+ *  - Notification received from IPA driver meets the expected type
+ *    for Rx packet
+ *  -Filters Rx switch are turned off
+ *
+ * Prior to the sending to the network stack:
+ *  - Netdev struct shall be stashed to the skb as required by the network stack
+ *  - Ethernet header shall be removed (skb->data shall point to the Ethernet
+ *     payload, Ethernet still stashed under MAC header).
+ *  - The skb->pkt_protocol shall be set based on the ethernet destination
+ *     address, Can be Broadcast, Multicast or Other-Host, The later
+ *     pkt-types packets shall be dropped in case the Netdev is not
+ *     in  promisc mode.
+ *   - Set the skb protocol field based on the EtherType field
+ *
+ * Netdev status fields shall be updated based on the current Rx packet
+ */
+static void rndis_ipa_packet_receive_notify(
+		void *private,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int result;
+	unsigned int packet_len = skb->len;
+
+	RNDIS_IPA_DEBUG
+		("packet Rx, len=%d\n",
+		skb->len);
+
+	if (unlikely(rndis_ipa_ctx->rx_dump_enable))
+		rndis_ipa_dump_skb(skb);
+
+	if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+		RNDIS_IPA_DEBUG("use connect()/up() before receive()\n");
+		RNDIS_IPA_DEBUG("packet dropped (length=%d)\n",
+				skb->len);
+		return;
+	}
+
+	if (evt != IPA_RECEIVE)	{
+		RNDIS_IPA_ERROR("a none IPA_RECEIVE event in driver RX\n");
+		return;
+	}
+
+	if (!rndis_ipa_ctx->deaggregation_enable)
+		skb_pull(skb, sizeof(struct rndis_pkt_hdr));
+
+	skb->dev = rndis_ipa_ctx->net;
+	skb->protocol = eth_type_trans(skb, rndis_ipa_ctx->net);
+
+	if (rx_filter(skb)) {
+		RNDIS_IPA_DEBUG("packet got filtered out on RX path\n");
+		rndis_ipa_ctx->rx_dropped++;
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	trace_rndis_netif_ni(skb->protocol);
+	result = netif_rx_ni(skb);
+	if (result)
+		RNDIS_IPA_ERROR("fail on netif_rx_ni\n");
+	rndis_ipa_ctx->net->stats.rx_packets++;
+	rndis_ipa_ctx->net->stats.rx_bytes += packet_len;
+}
+
+/** rndis_ipa_stop() - notify the network interface to stop
+ *   sending/receiving data
+ *  @net: the network device being stopped.
+ *
+ * This API is used by Linux network stack to notify the network driver that
+ * its state was changed to "down"
+ * The driver will stop the "send" queue and change its internal
+ * state to "Connected".
+ * The Netdev shall be returned to be "Up" after rndis_ipa_open().
+ */
+static int rndis_ipa_stop(struct net_device *net)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(net);
+	int next_state;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	next_state = rndis_ipa_next_state(rndis_ipa_ctx->state, RNDIS_IPA_STOP);
+	if (next_state == RNDIS_IPA_INVALID) {
+		RNDIS_IPA_DEBUG("can't do network interface down without up\n");
+		return -EPERM;
+	}
+
+	netif_stop_queue(net);
+	pr_info("RNDIS_IPA NetDev queue is stopped\n");
+
+	rndis_ipa_ctx->state = next_state;
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+/** rndis_ipa_disconnect() - notify rndis_ipa Netdev that the USB pipes
+ *   were disconnected
+ * @private: same value that was set by init(), this  parameter holds the
+ *  network device pointer.
+ *
+ * USB shall notify the Netdev after disconnecting the pipe.
+ * - The internal driver state shall returned to its previous
+ *   state (Up or Initialized).
+ * - Linux network stack shall be informed for carrier off to notify
+ *   user space for pipe disconnect
+ * - send queue shall be stopped
+ * During the transition between the pipe disconnection to
+ * the Netdev notification packets
+ * are expected to be dropped by IPA driver or IPA core.
+ */
+int rndis_ipa_pipe_disconnect_notify(void *private)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int next_state;
+	int outstanding_dropped_pkts;
+	int retval;
+	int ret;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	ret = 0;
+	NULL_CHECK_RETVAL(rndis_ipa_ctx);
+	if (ret)
+		return ret;
+	RNDIS_IPA_DEBUG("private=0x%p\n", private);
+
+	next_state = rndis_ipa_next_state
+		(rndis_ipa_ctx->state,
+		RNDIS_IPA_DISCONNECT);
+	if (next_state == RNDIS_IPA_INVALID) {
+		RNDIS_IPA_ERROR("can't disconnect before connect\n");
+		return -EPERM;
+	}
+
+	if (rndis_ipa_ctx->during_xmit_error) {
+		RNDIS_IPA_DEBUG("canceling xmit-error delayed work\n");
+		cancel_delayed_work_sync(
+			&rndis_ipa_ctx->xmit_error_delayed_work);
+		rndis_ipa_ctx->during_xmit_error = false;
+	}
+
+	netif_carrier_off(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("carrier_off notification was sent\n");
+
+	netif_stop_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("queue stopped\n");
+
+	outstanding_dropped_pkts =
+		atomic_read(&rndis_ipa_ctx->outstanding_pkts);
+
+	rndis_ipa_ctx->net->stats.tx_dropped += outstanding_dropped_pkts;
+	atomic_set(&rndis_ipa_ctx->outstanding_pkts, 0);
+
+	retval = rndis_ipa_destroy_rm_resource(rndis_ipa_ctx);
+	if (retval) {
+		RNDIS_IPA_ERROR("Fail to clean RM\n");
+		return retval;
+	}
+	RNDIS_IPA_DEBUG("RM was successfully destroyed\n");
+
+	rndis_ipa_ctx->state = next_state;
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	pr_info("RNDIS_IPA NetDev pipes disconnected (%d outstanding clr)\n",
+		outstanding_dropped_pkts);
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+EXPORT_SYMBOL(rndis_ipa_pipe_disconnect_notify);
+
+/**
+ * rndis_ipa_cleanup() - unregister the network interface driver and free
+ *  internal data structs.
+ * @private: same value that was set by init(), this
+ *   parameter holds the network device pointer.
+ *
+ * This function shall be called once the network interface is not
+ * needed anymore, e.g: when the USB composition does not support it.
+ * This function shall be called after the pipes were disconnected.
+ * Detailed description:
+ *  - remove header-insertion headers from IPA core
+ *  - delete the driver dependency defined for IPA resource manager and
+ *   destroy the producer resource.
+ *  -  remove the debugfs entries
+ *  - deregister the network interface from Linux network stack
+ *  - free all internal data structs
+ *
+ * It is assumed that no packets shall be sent through HW bridging
+ * during cleanup to avoid packets trying to add an header that is
+ * removed during cleanup (IPA configuration manager should have
+ * removed them at this point)
+ */
+void rndis_ipa_cleanup(void *private)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = private;
+	int next_state;
+	int retval;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	RNDIS_IPA_DEBUG("private=0x%p\n", private);
+
+	if (!rndis_ipa_ctx) {
+		RNDIS_IPA_ERROR("rndis_ipa_ctx NULL pointer\n");
+		return;
+	}
+
+	next_state = rndis_ipa_next_state
+		(rndis_ipa_ctx->state,
+		RNDIS_IPA_CLEANUP);
+	if (next_state == RNDIS_IPA_INVALID) {
+		RNDIS_IPA_ERROR("use disconnect()before clean()\n");
+		return;
+	}
+	RNDIS_IPA_STATE_DEBUG(rndis_ipa_ctx);
+
+	retval = rndis_ipa_deregister_properties(rndis_ipa_ctx->net->name);
+	if (retval) {
+		RNDIS_IPA_ERROR("Fail to deregister Tx/Rx properties\n");
+		return;
+	}
+	RNDIS_IPA_DEBUG("deregister Tx/Rx properties was successful\n");
+
+	retval = rndis_ipa_hdrs_destroy(rndis_ipa_ctx);
+	if (retval)
+		RNDIS_IPA_ERROR(
+			"Failed removing RNDIS headers from IPA core. Continue anyway\n");
+	else
+		RNDIS_IPA_DEBUG("RNDIS headers were removed from IPA core\n");
+
+	rndis_ipa_debugfs_destroy(rndis_ipa_ctx);
+	RNDIS_IPA_DEBUG("debugfs remove was done\n");
+
+	unregister_netdev(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netdev unregistered\n");
+
+	rndis_ipa_ctx->state = next_state;
+	free_netdev(rndis_ipa_ctx->net);
+	pr_info("RNDIS_IPA NetDev was cleaned\n");
+
+	RNDIS_IPA_LOG_EXIT();
+}
+EXPORT_SYMBOL(rndis_ipa_cleanup);
+
+static void rndis_ipa_enable_data_path(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	if (rndis_ipa_ctx->device_ready_notify) {
+		rndis_ipa_ctx->device_ready_notify();
+		RNDIS_IPA_DEBUG("USB device_ready_notify() was called\n");
+	} else {
+		RNDIS_IPA_DEBUG("device_ready_notify() not supplied\n");
+	}
+
+	netif_start_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netif_start_queue() was called\n");
+}
+
+static void rndis_ipa_xmit_error(struct sk_buff *skb)
+{
+	bool retval;
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev);
+	unsigned long delay_jiffies;
+	u8 rand_dealy_msec;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	RNDIS_IPA_DEBUG("starting Tx-queue backoff\n");
+
+	netif_stop_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netif_stop_queue was called\n");
+
+	skb_pull(skb, sizeof(rndis_template_hdr));
+	rndis_ipa_ctx->net->stats.tx_errors++;
+
+	get_random_bytes(&rand_dealy_msec, sizeof(rand_dealy_msec));
+	delay_jiffies = msecs_to_jiffies(
+		rndis_ipa_ctx->error_msec_sleep_time + rand_dealy_msec);
+
+	retval = schedule_delayed_work(
+		&rndis_ipa_ctx->xmit_error_delayed_work, delay_jiffies);
+	if (!retval) {
+		RNDIS_IPA_ERROR("fail to schedule delayed work\n");
+		netif_start_queue(rndis_ipa_ctx->net);
+	} else {
+		RNDIS_IPA_DEBUG
+			("work scheduled to start Tx-queue in %d msec\n",
+			rndis_ipa_ctx->error_msec_sleep_time +
+			rand_dealy_msec);
+		rndis_ipa_ctx->during_xmit_error = true;
+	}
+
+	RNDIS_IPA_LOG_EXIT();
+}
+
+static void rndis_ipa_xmit_error_aftercare_wq(struct work_struct *work)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx;
+	struct delayed_work *delayed_work;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	RNDIS_IPA_DEBUG("Starting queue after xmit error\n");
+
+	delayed_work = to_delayed_work(work);
+	rndis_ipa_ctx = container_of
+		(delayed_work, struct rndis_ipa_dev,
+		xmit_error_delayed_work);
+
+	if (unlikely(rndis_ipa_ctx->state != RNDIS_IPA_CONNECTED_AND_UP)) {
+		RNDIS_IPA_ERROR
+			("error aftercare handling in bad state (%d)",
+			rndis_ipa_ctx->state);
+		return;
+	}
+
+	rndis_ipa_ctx->during_xmit_error = false;
+
+	netif_start_queue(rndis_ipa_ctx->net);
+	RNDIS_IPA_DEBUG("netif_start_queue() was called\n");
+
+	RNDIS_IPA_LOG_EXIT();
+}
+
+/**
+ * rndis_ipa_prepare_header_insertion() - prepare the header insertion request
+ *  for IPA driver
+ * eth_type: the Ethernet type for this header-insertion header
+ * hdr_name: string that shall represent this header in IPA data base
+ * add_hdr: output for caller to be used with ipa_add_hdr() to configure
+ *  the IPA core
+ * dst_mac: tethered PC MAC (Ethernet) address to be added to packets
+ *  for IPA->USB pipe
+ * src_mac: device MAC (Ethernet) address to be added to packets
+ *  for IPA->USB pipe
+ *
+ * This function shall build the header-insertion block request for a
+ * single Ethernet+RNDIS header)
+ * this header shall be inserted for packets processed by IPA
+ * and destined for USB client.
+ * This header shall be used for HW bridging for packets destined for
+ *  tethered PC.
+ * For SW data-path, this header won't be used.
+ */
+static void rndis_ipa_prepare_header_insertion(
+	int eth_type,
+	const char *hdr_name, struct ipa_hdr_add *add_hdr,
+	const void *dst_mac, const void *src_mac)
+{
+	struct ethhdr *eth_hdr;
+
+	add_hdr->hdr_len = sizeof(rndis_template_hdr);
+	add_hdr->is_partial = false;
+	strlcpy(add_hdr->name, hdr_name, IPA_RESOURCE_NAME_MAX);
+
+	memcpy(add_hdr->hdr, &rndis_template_hdr, sizeof(rndis_template_hdr));
+	eth_hdr = (struct ethhdr *)(add_hdr->hdr + sizeof(rndis_template_hdr));
+	memcpy(eth_hdr->h_dest, dst_mac, ETH_ALEN);
+	memcpy(eth_hdr->h_source, src_mac, ETH_ALEN);
+	eth_hdr->h_proto = htons(eth_type);
+	add_hdr->hdr_len += ETH_HLEN;
+	add_hdr->is_eth2_ofst_valid = true;
+	add_hdr->eth2_ofst = sizeof(rndis_template_hdr);
+	add_hdr->type = IPA_HDR_L2_ETHERNET_II;
+}
+
+/**
+ * rndis_ipa_hdrs_cfg() - configure header insertion block in IPA core
+ *  to allow HW bridging
+ * @rndis_ipa_ctx: main driver context
+ * @dst_mac: destination MAC address (tethered PC)
+ * @src_mac: source MAC address (MDM device)
+ *
+ * This function shall add 2 headers.
+ * One header for Ipv4 and one header for Ipv6.
+ * Both headers shall contain Ethernet header and RNDIS header, the only
+ * difference shall be in the EtherTye field.
+ * Headers will be committed to HW
+ *
+ * Returns negative errno, or zero on success
+ */
+static int rndis_ipa_hdrs_cfg(
+	struct rndis_ipa_dev *rndis_ipa_ctx,
+	const void *dst_mac, const void *src_mac)
+{
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ipa_hdr_add *ipv4_hdr;
+	struct ipa_hdr_add *ipv6_hdr;
+	int result = 0;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	hdrs = kzalloc
+		(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+		GFP_KERNEL);
+	if (!hdrs) {
+		RNDIS_IPA_ERROR("mem allocation fail for header-insertion\n");
+		result = -ENOMEM;
+		goto fail_mem;
+	}
+
+	ipv4_hdr = &hdrs->hdr[0];
+	ipv6_hdr = &hdrs->hdr[1];
+	rndis_ipa_prepare_header_insertion
+		(ETH_P_IP, IPV4_HDR_NAME,
+		ipv4_hdr, dst_mac, src_mac);
+	rndis_ipa_prepare_header_insertion
+		(ETH_P_IPV6, IPV6_HDR_NAME,
+		ipv6_hdr, dst_mac, src_mac);
+
+	hdrs->commit = 1;
+	hdrs->num_hdrs = 2;
+	result = ipa_add_hdr(hdrs);
+	if (result) {
+		RNDIS_IPA_ERROR("Fail on Header-Insertion(%d)\n", result);
+		goto fail_add_hdr;
+	}
+	if (ipv4_hdr->status) {
+		RNDIS_IPA_ERROR("Fail on Header-Insertion ipv4(%d)\n",
+				ipv4_hdr->status);
+		result = ipv4_hdr->status;
+		goto fail_add_hdr;
+	}
+	if (ipv6_hdr->status) {
+		RNDIS_IPA_ERROR("Fail on Header-Insertion ipv6(%d)\n",
+				ipv6_hdr->status);
+		result = ipv6_hdr->status;
+		goto fail_add_hdr;
+	}
+	rndis_ipa_ctx->eth_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+	rndis_ipa_ctx->eth_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+
+	RNDIS_IPA_LOG_EXIT();
+
+fail_add_hdr:
+	kfree(hdrs);
+fail_mem:
+	return result;
+}
+
+/**
+ * rndis_ipa_hdrs_destroy() - remove the IPA core configuration done for
+ *  the driver data path bridging.
+ * @rndis_ipa_ctx: the driver context
+ *
+ *  Revert the work done on rndis_ipa_hdrs_cfg(), which is,
+ * remove 2 headers for Ethernet+RNDIS.
+ */
+static int rndis_ipa_hdrs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *ipv4;
+	struct ipa_hdr_del *ipv6;
+	int result;
+
+	del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+			sizeof(*ipv6), GFP_KERNEL);
+	if (!del_hdr) {
+		RNDIS_IPA_ERROR("memory allocation for del_hdr failed\n");
+		return -ENOMEM;
+	}
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+
+	ipv4 = &del_hdr->hdl[0];
+	ipv4->hdl = rndis_ipa_ctx->eth_ipv4_hdr_hdl;
+	ipv6 = &del_hdr->hdl[1];
+	ipv6->hdl = rndis_ipa_ctx->eth_ipv6_hdr_hdl;
+
+	result = ipa_del_hdr(del_hdr);
+	if (result || ipv4->status || ipv6->status)
+		RNDIS_IPA_ERROR("ipa_del_hdr failed\n");
+	else
+		RNDIS_IPA_DEBUG("hdrs deletion done\n");
+
+	kfree(del_hdr);
+	return result;
+}
+
+static struct net_device_stats *rndis_ipa_get_stats(struct net_device *net)
+{
+	return &net->stats;
+}
+
+/**
+ * rndis_ipa_register_properties() - set Tx/Rx properties needed
+ *  by IPA configuration manager
+ * @netdev_name: a string with the name of the network interface device
+ *
+ * Register Tx/Rx properties to allow user space configuration (IPA
+ * Configuration Manager):
+ *
+ * - Two Tx properties (IPA->USB): specify the header names and pipe number
+ *   that shall be used by user space for header-addition configuration
+ *   for ipv4/ipv6 packets flowing from IPA to USB for HW bridging data.
+ *   That header-addition header is added by the Netdev and used by user
+ *   space to close the the HW bridge by adding filtering and routing rules
+ *   that point to this header.
+ *
+ * - Two Rx properties (USB->IPA): these properties shall be used by user space
+ *   to configure the IPA core to identify the packets destined
+ *   for Apps-processor by configuring the unicast rules destined for
+ *   the Netdev IP address.
+ *   This rules shall be added based on the attribute mask supplied at
+ *   this function, that is, always hit rule.
+ */
+static int rndis_ipa_register_properties(char *netdev_name)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *ipv4_property;
+	struct ipa_ioc_tx_intf_prop *ipv6_property;
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	int result = 0;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	tx_properties.prop = properties;
+	ipv4_property = &tx_properties.prop[0];
+	ipv4_property->ip = IPA_IP_v4;
+	ipv4_property->dst_pipe = IPA_TO_USB_CLIENT;
+	strlcpy
+		(ipv4_property->hdr_name, IPV4_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	ipv6_property = &tx_properties.prop[1];
+	ipv6_property->ip = IPA_IP_v6;
+	ipv6_property->dst_pipe = IPA_TO_USB_CLIENT;
+	strlcpy
+		(ipv6_property->hdr_name, IPV6_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	tx_properties.num_props = 2;
+
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask = 0;
+	rx_ipv4_property->src_pipe = IPA_CLIENT_USB_PROD;
+	rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask = 0;
+	rx_ipv6_property->src_pipe = IPA_CLIENT_USB_PROD;
+	rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_properties.num_props = 2;
+
+	result = ipa_register_intf("rndis0", &tx_properties, &rx_properties);
+	if (result)
+		RNDIS_IPA_ERROR("fail on Tx/Rx properties registration\n");
+	else
+		RNDIS_IPA_DEBUG("Tx/Rx properties registration done\n");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return result;
+}
+
+/**
+ * rndis_ipa_deregister_properties() - remove the 2 Tx and 2 Rx properties
+ * @netdev_name: a string with the name of the network interface device
+ *
+ * This function revert the work done on rndis_ipa_register_properties().
+ */
+static int  rndis_ipa_deregister_properties(char *netdev_name)
+{
+	int result;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	result = ipa_deregister_intf(netdev_name);
+	if (result) {
+		RNDIS_IPA_DEBUG("Fail on Tx prop deregister\n");
+		return result;
+	}
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+/**
+ * rndis_ipa_create_rm_resource() -creates the resource representing
+ *  this Netdev and supply notification callback for resource event
+ *  such as Grant/Release
+ * @rndis_ipa_ctx: this driver context
+ *
+ * In order make sure all needed resources are available during packet
+ * transmit this Netdev shall use Request/Release mechanism of
+ * the IPA resource manager.
+ * This mechanism shall iterate over a dependency graph and make sure
+ * all dependent entities are ready to for packet Tx
+ * transfer (Apps->IPA->USB).
+ * In this function the resource representing the Netdev is created
+ * in addition to the basic dependency between the Netdev and the USB client.
+ * Hence, USB client, is a dependency for the Netdev and may be notified in
+ * case of packet transmit from this Netdev to tethered Host.
+ * As implied from the "may" in the above sentence there is a scenario where
+ * the USB is not notified. This is done thanks to the IPA resource manager
+ * inactivity timer.
+ * The inactivity timer allow the Release requests to be delayed in order
+ * prevent ping-pong with the USB and other dependencies.
+ */
+static int rndis_ipa_create_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	struct ipa_rm_create_params create_params = {0};
+	struct ipa_rm_perf_profile profile;
+	int result;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	create_params.name = DRV_RESOURCE_ID;
+	create_params.reg_params.user_data = rndis_ipa_ctx;
+	create_params.reg_params.notify_cb = rndis_ipa_rm_notify;
+	result = ipa_rm_create_resource(&create_params);
+	if (result) {
+		RNDIS_IPA_ERROR("Fail on ipa_rm_create_resource\n");
+		goto fail_rm_create;
+	}
+	RNDIS_IPA_DEBUG("RM client was created\n");
+
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ipa_rm_set_perf_profile(DRV_RESOURCE_ID, &profile);
+
+	result = ipa_rm_inactivity_timer_init
+		(DRV_RESOURCE_ID,
+		INACTIVITY_MSEC_DELAY);
+	if (result) {
+		RNDIS_IPA_ERROR("Fail on ipa_rm_inactivity_timer_init\n");
+		goto fail_inactivity_timer;
+	}
+
+	RNDIS_IPA_DEBUG("rm_it client was created\n");
+
+	result = ipa_rm_add_dependency_sync
+		(DRV_RESOURCE_ID,
+		IPA_RM_RESOURCE_USB_CONS);
+
+	if (result && result != -EINPROGRESS)
+		RNDIS_IPA_ERROR("unable to add RNDIS/USB dependency (%d)\n",
+				result);
+	else
+		RNDIS_IPA_DEBUG("RNDIS/USB dependency was set\n");
+
+	result = ipa_rm_add_dependency_sync
+		(IPA_RM_RESOURCE_USB_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+	if (result && result != -EINPROGRESS)
+		RNDIS_IPA_ERROR("unable to add USB/APPS dependency (%d)\n",
+				result);
+	else
+		RNDIS_IPA_DEBUG("USB/APPS dependency was set\n");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+
+fail_inactivity_timer:
+fail_rm_create:
+	return result;
+}
+
+/**
+ * rndis_ipa_destroy_rm_resource() - delete the dependency and destroy
+ * the resource done on rndis_ipa_create_rm_resource()
+ * @rndis_ipa_ctx: this driver context
+ *
+ * This function shall delete the dependency create between
+ * the Netdev to the USB.
+ * In addition the inactivity time shall be destroy and the resource shall
+ * be deleted.
+ */
+static int rndis_ipa_destroy_rm_resource(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	int result;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	result = ipa_rm_delete_dependency
+		(DRV_RESOURCE_ID,
+		IPA_RM_RESOURCE_USB_CONS);
+	if (result && result != -EINPROGRESS) {
+		RNDIS_IPA_ERROR("Fail to delete RNDIS/USB dependency\n");
+		goto bail;
+	}
+	RNDIS_IPA_DEBUG("RNDIS/USB dependency was successfully deleted\n");
+
+	result = ipa_rm_delete_dependency
+		(IPA_RM_RESOURCE_USB_PROD,
+		IPA_RM_RESOURCE_APPS_CONS);
+	if (result == -EINPROGRESS) {
+		RNDIS_IPA_DEBUG("RM dependency deletion is in progress");
+	} else if (result) {
+		RNDIS_IPA_ERROR("Fail to delete USB/APPS dependency\n");
+		goto bail;
+	} else {
+		RNDIS_IPA_DEBUG("USB/APPS dependency was deleted\n");
+	}
+
+	result = ipa_rm_inactivity_timer_destroy(DRV_RESOURCE_ID);
+	if (result) {
+		RNDIS_IPA_ERROR("Fail to destroy inactivity timern");
+		goto bail;
+	}
+	RNDIS_IPA_DEBUG("RM inactivity timer was successfully destroy\n");
+
+	result = ipa_rm_delete_resource(DRV_RESOURCE_ID);
+	if (result) {
+		RNDIS_IPA_ERROR("resource deletion failed\n");
+		goto bail;
+	}
+	RNDIS_IPA_DEBUG
+		("Netdev RM resource was deleted (resid:%d)\n",
+		DRV_RESOURCE_ID);
+
+	RNDIS_IPA_LOG_EXIT();
+
+bail:
+	return result;
+}
+
+/**
+ * resource_request() - request for the Netdev resource
+ * @rndis_ipa_ctx: main driver context
+ *
+ * This function shall send the IPA resource manager inactivity time a request
+ * to Grant the Netdev producer.
+ * In case the resource is already Granted the function shall return immediately
+ * and "pet" the inactivity timer.
+ * In case the resource was not already Granted this function shall
+ * return EINPROGRESS and the Netdev shall stop the send queue until
+ * the IPA resource manager notify it that the resource is
+ * granted (done in a differ context)
+ */
+static int resource_request(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	int result = 0;
+
+	if (!rm_enabled(rndis_ipa_ctx))
+		goto out;
+	result = ipa_rm_inactivity_timer_request_resource(
+			DRV_RESOURCE_ID);
+out:
+	return result;
+}
+
+/**
+ * resource_release() - release the Netdev resource
+ * @rndis_ipa_ctx: main driver context
+ *
+ * start the inactivity timer count down.by using the IPA resource
+ * manager inactivity time.
+ * The actual resource release shall occur only if no request shall be done
+ * during the INACTIVITY_MSEC_DELAY.
+ */
+static void resource_release(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	if (!rm_enabled(rndis_ipa_ctx))
+		goto out;
+	ipa_rm_inactivity_timer_release_resource(DRV_RESOURCE_ID);
+out:
+	return;
+}
+
+/**
+ * rndis_encapsulate_skb() - encapsulate the given Ethernet skb with
+ *  an RNDIS header
+ * @skb: packet to be encapsulated with the RNDIS header
+ *
+ * Shall use a template header for RNDIS and update it with the given
+ * skb values.
+ * Ethernet is expected to be already encapsulate the packet.
+ */
+static struct sk_buff *rndis_encapsulate_skb(struct sk_buff *skb)
+{
+	struct rndis_pkt_hdr *rndis_hdr;
+	int payload_byte_len = skb->len;
+
+	/* if there is no room in this skb, allocate a new one */
+	if (unlikely(skb_headroom(skb) < sizeof(rndis_template_hdr))) {
+		struct sk_buff *new_skb = skb_copy_expand(skb,
+			sizeof(rndis_template_hdr), 0, GFP_ATOMIC);
+		if (!new_skb) {
+			RNDIS_IPA_ERROR("no memory for skb expand\n");
+			return skb;
+		}
+		RNDIS_IPA_DEBUG("skb expanded. old %p new %p\n", skb, new_skb);
+		dev_kfree_skb_any(skb);
+		skb = new_skb;
+	}
+
+	/* make room at the head of the SKB to put the RNDIS header */
+	rndis_hdr = (struct rndis_pkt_hdr *)skb_push(skb,
+					sizeof(rndis_template_hdr));
+
+	memcpy(rndis_hdr, &rndis_template_hdr, sizeof(*rndis_hdr));
+	rndis_hdr->msg_len +=  payload_byte_len;
+	rndis_hdr->data_len +=  payload_byte_len;
+
+	return skb;
+}
+
+/**
+ * rx_filter() - logic that decide if the current skb is to be filtered out
+ * @skb: skb that may be sent up to the network stack
+ *
+ * This function shall do Rx packet filtering on the Netdev level.
+ */
+static bool rx_filter(struct sk_buff *skb)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev);
+
+	return rndis_ipa_ctx->rx_filter;
+}
+
+/**
+ * tx_filter() - logic that decide if the current skb is to be filtered out
+ * @skb: skb that may be sent to the USB core
+ *
+ * This function shall do Tx packet filtering on the Netdev level.
+ * ICMP filter bypass is possible to allow only ICMP packet to be
+ * sent (pings and etc)
+ */
+
+static bool tx_filter(struct sk_buff *skb)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = netdev_priv(skb->dev);
+	bool is_icmp;
+
+	if (likely(!rndis_ipa_ctx->tx_filter))
+		return false;
+
+	is_icmp = (skb->protocol == htons(ETH_P_IP)	&&
+		ip_hdr(skb)->protocol == IPPROTO_ICMP);
+
+	if ((!rndis_ipa_ctx->icmp_filter) && is_icmp)
+		return false;
+
+	return true;
+}
+
+/**
+ * rm_enabled() - allow the use of resource manager Request/Release to
+ *  be bypassed
+ * @rndis_ipa_ctx: main driver context
+ *
+ * By disabling the resource manager flag the Request for the Netdev resource
+ * shall be bypassed and the packet shall be sent.
+ * accordingly, Release request shall be bypass as well.
+ */
+static bool rm_enabled(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	return rndis_ipa_ctx->rm_enable;
+}
+
+/**
+ * rndis_ipa_ep_registers_cfg() - configure the USB endpoints
+ * @usb_to_ipa_hdl: handle received from ipa_connect which represents
+ *  the USB to IPA end-point
+ * @ipa_to_usb_hdl: handle received from ipa_connect which represents
+ *  the IPA to USB end-point
+ * @max_xfer_size_bytes_to_dev: the maximum size, in bytes, that the device
+ *  expects to receive from the host. supplied on REMOTE_NDIS_INITIALIZE_CMPLT.
+ * @max_xfer_size_bytes_to_host: the maximum size, in bytes, that the host
+ *  expects to receive from the device. supplied on REMOTE_NDIS_INITIALIZE_MSG.
+ * @mtu: the netdev MTU size, in bytes
+ *
+ * USB to IPA pipe:
+ *  - de-aggregation
+ *  - Remove Ethernet header
+ *  - Remove RNDIS header
+ *  - SRC NAT
+ *  - Default routing(0)
+ * IPA to USB Pipe:
+ *  - aggregation
+ *  - Add Ethernet header
+ *  - Add RNDIS header
+ */
+static int rndis_ipa_ep_registers_cfg(
+	u32 usb_to_ipa_hdl,
+	u32 ipa_to_usb_hdl,
+	u32 max_xfer_size_bytes_to_dev,
+	u32 max_xfer_size_bytes_to_host,
+	u32 mtu,
+	bool deaggr_enable)
+{
+	int result;
+	struct ipa_ep_cfg *usb_to_ipa_ep_cfg;
+
+	if (deaggr_enable) {
+		usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_en;
+		RNDIS_IPA_DEBUG("deaggregation enabled\n");
+	} else {
+		usb_to_ipa_ep_cfg = &usb_to_ipa_ep_cfg_deaggr_dis;
+		RNDIS_IPA_DEBUG("deaggregation disabled\n");
+	}
+
+	usb_to_ipa_ep_cfg->deaggr.max_packet_len = max_xfer_size_bytes_to_dev;
+	result = ipa_cfg_ep(usb_to_ipa_hdl, usb_to_ipa_ep_cfg);
+	if (result) {
+		pr_err("failed to configure USB to IPA point\n");
+		return result;
+	}
+	RNDIS_IPA_DEBUG("IPA<-USB end-point configured\n");
+
+	ipa_to_usb_ep_cfg.aggr.aggr_byte_limit =
+		(max_xfer_size_bytes_to_host - mtu) / 1024;
+
+	if (ipa_to_usb_ep_cfg.aggr.aggr_byte_limit == 0) {
+		ipa_to_usb_ep_cfg.aggr.aggr_time_limit = 0;
+		ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit = 1;
+	} else {
+		ipa_to_usb_ep_cfg.aggr.aggr_time_limit =
+			DEFAULT_AGGR_TIME_LIMIT;
+		ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit =
+			DEFAULT_AGGR_PKT_LIMIT;
+	}
+
+	RNDIS_IPA_DEBUG(
+		"RNDIS aggregation param: en=%d byte_limit=%d time_limit=%d pkt_limit=%d\n"
+		, ipa_to_usb_ep_cfg.aggr.aggr_en,
+		ipa_to_usb_ep_cfg.aggr.aggr_byte_limit,
+		ipa_to_usb_ep_cfg.aggr.aggr_time_limit,
+		ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit);
+
+	result = ipa_cfg_ep(ipa_to_usb_hdl, &ipa_to_usb_ep_cfg);
+	if (result) {
+		pr_err("failed to configure IPA to USB end-point\n");
+		return result;
+	}
+	RNDIS_IPA_DEBUG("IPA->USB end-point configured\n");
+
+	return 0;
+}
+
+/**
+ * rndis_ipa_set_device_ethernet_addr() - set device Ethernet address
+ * @dev_ethaddr: device Ethernet address
+ *
+ * Returns 0 for success, negative otherwise
+ */
+static int rndis_ipa_set_device_ethernet_addr(
+	u8 *dev_ethaddr,
+	u8 device_ethaddr[])
+{
+	if (!is_valid_ether_addr(device_ethaddr))
+		return -EINVAL;
+	memcpy(dev_ethaddr, device_ethaddr, ETH_ALEN);
+
+	return 0;
+}
+
+/** rndis_ipa_next_state - return the next state of the driver
+ * @current_state: the current state of the driver
+ * @operation: an enum which represent the operation being made on the driver
+ *  by its API.
+ *
+ * This function implements the driver internal state machine.
+ * Its decisions are based on the driver current state and the operation
+ * being made.
+ * In case the operation is invalid this state machine will return
+ * the value RNDIS_IPA_INVALID to inform the caller for a forbidden sequence.
+ */
+static enum rndis_ipa_state rndis_ipa_next_state(
+		enum rndis_ipa_state current_state,
+		enum rndis_ipa_operation operation)
+{
+	int next_state = RNDIS_IPA_INVALID;
+
+	switch (current_state) {
+	case RNDIS_IPA_UNLOADED:
+		if (operation == RNDIS_IPA_INITIALIZE)
+			next_state = RNDIS_IPA_INITIALIZED;
+		break;
+	case RNDIS_IPA_INITIALIZED:
+		if (operation == RNDIS_IPA_CONNECT)
+			next_state = RNDIS_IPA_CONNECTED;
+		else if (operation == RNDIS_IPA_OPEN)
+			next_state = RNDIS_IPA_UP;
+		else if (operation == RNDIS_IPA_CLEANUP)
+			next_state = RNDIS_IPA_UNLOADED;
+		break;
+	case RNDIS_IPA_CONNECTED:
+		if (operation == RNDIS_IPA_DISCONNECT)
+			next_state = RNDIS_IPA_INITIALIZED;
+		else if (operation == RNDIS_IPA_OPEN)
+			next_state = RNDIS_IPA_CONNECTED_AND_UP;
+		break;
+	case RNDIS_IPA_UP:
+		if (operation == RNDIS_IPA_STOP)
+			next_state = RNDIS_IPA_INITIALIZED;
+		else if (operation == RNDIS_IPA_CONNECT)
+			next_state = RNDIS_IPA_CONNECTED_AND_UP;
+		else if (operation == RNDIS_IPA_CLEANUP)
+			next_state = RNDIS_IPA_UNLOADED;
+		break;
+	case RNDIS_IPA_CONNECTED_AND_UP:
+		if (operation == RNDIS_IPA_STOP)
+			next_state = RNDIS_IPA_CONNECTED;
+		else if (operation == RNDIS_IPA_DISCONNECT)
+			next_state = RNDIS_IPA_UP;
+		break;
+	default:
+		RNDIS_IPA_ERROR("State is not supported\n");
+		WARN_ON(true);
+		break;
+	}
+
+	RNDIS_IPA_DEBUG
+		("state transition ( %s -> %s )- %s\n",
+		rndis_ipa_state_string(current_state),
+		rndis_ipa_state_string(next_state),
+		next_state == RNDIS_IPA_INVALID ?
+		"Forbidden" : "Allowed");
+
+	return next_state;
+}
+
+/**
+ * rndis_ipa_state_string - return the state string representation
+ * @state: enum which describe the state
+ */
+static const char *rndis_ipa_state_string(enum rndis_ipa_state state)
+{
+	switch (state) {
+	case RNDIS_IPA_UNLOADED:
+		return "RNDIS_IPA_UNLOADED";
+	case RNDIS_IPA_INITIALIZED:
+		return "RNDIS_IPA_INITIALIZED";
+	case RNDIS_IPA_CONNECTED:
+		return "RNDIS_IPA_CONNECTED";
+	case RNDIS_IPA_UP:
+		return "RNDIS_IPA_UP";
+	case RNDIS_IPA_CONNECTED_AND_UP:
+		return "RNDIS_IPA_CONNECTED_AND_UP";
+	default:
+		return "Not supported";
+	}
+}
+
+static void rndis_ipa_dump_skb(struct sk_buff *skb)
+{
+	int i;
+	u32 *cur = (u32 *)skb->data;
+	u8 *byte;
+
+	RNDIS_IPA_DEBUG
+		("packet dump start for skb->len=%d\n",
+		skb->len);
+
+	for (i = 0; i < (skb->len / 4); i++) {
+		byte = (u8 *)(cur + i);
+		pr_info
+			("%2d %08x   %02x %02x %02x %02x\n",
+			i, *(cur + i),
+			byte[0], byte[1], byte[2], byte[3]);
+	}
+	RNDIS_IPA_DEBUG
+		("packet dump ended for skb->len=%d\n", skb->len);
+}
+
+/**
+ * Creates the root folder for the driver
+ */
+static int rndis_ipa_debugfs_init(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	const mode_t flags_read_write = 0666;
+	const mode_t flags_read_only = 0444;
+	const mode_t  flags_write_only = 0222;
+	struct dentry *file;
+	struct dentry *aggr_directory;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	if (!rndis_ipa_ctx)
+		return -EINVAL;
+
+	rndis_ipa_ctx->directory = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL);
+	if (!rndis_ipa_ctx->directory) {
+		RNDIS_IPA_ERROR("could not create debugfs directory entry\n");
+		goto fail_directory;
+	}
+
+	file = debugfs_create_bool
+		("tx_filter", flags_read_write,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_filter);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create debugfs tx_filter file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("rx_filter", flags_read_write,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_filter);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create debugfs rx_filter file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("icmp_filter", flags_read_write,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->icmp_filter);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create debugfs icmp_filter file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("rm_enable", flags_read_write,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->rm_enable);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create debugfs rm file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("outstanding_high", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->outstanding_high);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create outstanding_high file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("outstanding_low", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->outstanding_low);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create outstanding_low file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_file
+		("outstanding", flags_read_only,
+		rndis_ipa_ctx->directory,
+		rndis_ipa_ctx, &rndis_ipa_debugfs_atomic_ops);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create outstanding file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_file
+		("loopback_enable", flags_read_write,
+		rndis_ipa_ctx->directory,
+		rndis_ipa_ctx, &rndis_ipa_loopback_ops);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create outstanding file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u8
+		("state", flags_read_only,
+		rndis_ipa_ctx->directory, (u8 *)&rndis_ipa_ctx->state);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create state file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("tx_dropped", flags_read_only,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->tx_dropped);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create tx_dropped file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("rx_dropped", flags_read_only,
+		rndis_ipa_ctx->directory, &rndis_ipa_ctx->rx_dropped);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create rx_dropped file\n");
+		goto fail_file;
+	}
+
+	aggr_directory = debugfs_create_dir
+		(DEBUGFS_AGGR_DIR_NAME,
+		rndis_ipa_ctx->directory);
+	if (!aggr_directory) {
+		RNDIS_IPA_ERROR("could not create debugfs aggr entry\n");
+		goto fail_directory;
+	}
+
+	file = debugfs_create_file
+		("aggr_value_set", flags_write_only,
+		aggr_directory,
+		rndis_ipa_ctx, &rndis_ipa_aggr_ops);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_value_set file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u8
+		("aggr_enable", flags_read_write,
+		aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr_en);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_enable file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u8
+		("aggr_type", flags_read_write,
+		aggr_directory, (u8 *)&ipa_to_usb_ep_cfg.aggr.aggr);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_type file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("aggr_byte_limit", flags_read_write,
+		aggr_directory,
+		&ipa_to_usb_ep_cfg.aggr.aggr_byte_limit);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_byte_limit file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("aggr_time_limit", flags_read_write,
+		aggr_directory,
+		&ipa_to_usb_ep_cfg.aggr.aggr_time_limit);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_time_limit file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("aggr_pkt_limit", flags_read_write,
+		aggr_directory,
+		&ipa_to_usb_ep_cfg.aggr.aggr_pkt_limit);
+	if (!file) {
+		RNDIS_IPA_ERROR("could not create aggr_pkt_limit file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("tx_dump_enable", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->tx_dump_enable);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create tx_dump_enable file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("rx_dump_enable", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->rx_dump_enable);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create rx_dump_enable file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("deaggregation_enable", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->deaggregation_enable);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create deaggregation_enable file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_u32
+		("error_msec_sleep_time", flags_read_write,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->error_msec_sleep_time);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create error_msec_sleep_time file\n");
+		goto fail_file;
+	}
+
+	file = debugfs_create_bool
+		("during_xmit_error", flags_read_only,
+		rndis_ipa_ctx->directory,
+		&rndis_ipa_ctx->during_xmit_error);
+	if (!file) {
+		RNDIS_IPA_ERROR("fail to create during_xmit_error file\n");
+		goto fail_file;
+	}
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+fail_file:
+	debugfs_remove_recursive(rndis_ipa_ctx->directory);
+fail_directory:
+	return -EFAULT;
+}
+
+static void rndis_ipa_debugfs_destroy(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	debugfs_remove_recursive(rndis_ipa_ctx->directory);
+}
+
+static int rndis_ipa_debugfs_aggr_open
+	(struct inode *inode,
+	struct file *file)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private;
+
+	file->private_data = rndis_ipa_ctx;
+
+	return 0;
+}
+
+static ssize_t rndis_ipa_debugfs_aggr_write
+	(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = file->private_data;
+	int result;
+
+	result = ipa_cfg_ep(rndis_ipa_ctx->usb_to_ipa_hdl, &ipa_to_usb_ep_cfg);
+	if (result) {
+		pr_err("failed to re-configure USB to IPA point\n");
+		return result;
+	}
+	pr_info("IPA<-USB end-point re-configured\n");
+
+	return count;
+}
+
+static int rndis_ipa_debugfs_loopback_open
+	(struct inode *inode, struct file *file)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private;
+
+	file->private_data = rndis_ipa_ctx;
+
+	return 0;
+}
+
+static ssize_t rndis_ipa_debugfs_loopback_read
+	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int cnt;
+	struct rndis_ipa_dev *rndis_ipa_ctx = file->private_data;
+
+	file->private_data = &rndis_ipa_ctx->loopback_enable;
+
+	cnt = rndis_ipa_debugfs_enable_read
+		(file, ubuf, count, ppos);
+
+	return cnt;
+}
+
+static ssize_t rndis_ipa_debugfs_loopback_write
+	(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
+{
+	int retval;
+	int cnt;
+	struct rndis_ipa_dev *rndis_ipa_ctx = file->private_data;
+	bool old_state = rndis_ipa_ctx->loopback_enable;
+
+	file->private_data = &rndis_ipa_ctx->loopback_enable;
+
+	cnt = rndis_ipa_debugfs_enable_write(file, buf, count, ppos);
+
+	RNDIS_IPA_DEBUG("loopback_enable was set to:%d->%d\n",
+			old_state, rndis_ipa_ctx->loopback_enable);
+
+	if (old_state == rndis_ipa_ctx->loopback_enable) {
+		RNDIS_IPA_ERROR("NOP - same state\n");
+		return cnt;
+	}
+
+	retval = rndis_ipa_setup_loopback(
+				rndis_ipa_ctx->loopback_enable,
+				rndis_ipa_ctx);
+	if (retval)
+		rndis_ipa_ctx->loopback_enable = old_state;
+
+	return cnt;
+}
+
+static int rndis_ipa_debugfs_atomic_open(struct inode *inode, struct file *file)
+{
+	struct rndis_ipa_dev *rndis_ipa_ctx = inode->i_private;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	file->private_data = &rndis_ipa_ctx->outstanding_pkts;
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+}
+
+static ssize_t rndis_ipa_debugfs_atomic_read
+	(struct file *file, char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	u8 atomic_str[DEBUGFS_TEMP_BUF_SIZE] = {0};
+	atomic_t *atomic_var = file->private_data;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	nbytes = scnprintf
+		(atomic_str, sizeof(atomic_str), "%d\n",
+		atomic_read(atomic_var));
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return simple_read_from_buffer(ubuf, count, ppos, atomic_str, nbytes);
+}
+
+static ssize_t rndis_ipa_debugfs_enable_read
+	(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int size = 0;
+	int ret;
+	loff_t pos;
+	u8 enable_str[sizeof(char) * 3] = {0};
+	bool *enable = file->private_data;
+
+	pos = *ppos;
+	nbytes = scnprintf(enable_str, sizeof(enable_str), "%d\n", *enable);
+	ret = simple_read_from_buffer(ubuf, count, ppos, enable_str, nbytes);
+	if (ret < 0) {
+		RNDIS_IPA_ERROR("simple_read_from_buffer problem\n");
+		return ret;
+	}
+	size += ret;
+	count -= nbytes;
+	*ppos = pos + size;
+	return size;
+}
+
+static ssize_t rndis_ipa_debugfs_enable_write
+	(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	char input;
+	bool *enable = file->private_data;
+
+	if (count != sizeof(input) + 1) {
+		RNDIS_IPA_ERROR("wrong input length(%zd)\n", count);
+		return -EINVAL;
+	}
+	if (!buf) {
+		RNDIS_IPA_ERROR("Bad argument\n");
+		return -EINVAL;
+	}
+	missing = copy_from_user(&input, buf, 1);
+	if (missing)
+		return -EFAULT;
+	RNDIS_IPA_DEBUG("input received %c\n", input);
+	*enable = input - '0';
+	RNDIS_IPA_DEBUG("value was set to %d\n", *enable);
+	return count;
+}
+
+/**
+ * Connects IPA->BAMDMA
+ * This shall simulate the path from IPA to USB
+ * Allowing the driver TX path
+ */
+static int rndis_ipa_loopback_pipe_create(
+		struct rndis_ipa_dev *rndis_ipa_ctx,
+		struct rndis_loopback_pipe *loopback_pipe)
+{
+	int retval;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	/* SPS pipe has two side handshake
+	 * This is the first handshake of IPA->BAMDMA,
+	 * This is the IPA side
+	 */
+	loopback_pipe->ipa_connect_params.client = loopback_pipe->ipa_client;
+	loopback_pipe->ipa_connect_params.client_bam_hdl =
+			rndis_ipa_ctx->bam_dma_hdl;
+	loopback_pipe->ipa_connect_params.client_ep_idx =
+		loopback_pipe->peer_pipe_index;
+	loopback_pipe->ipa_connect_params.desc_fifo_sz = BAM_DMA_DESC_FIFO_SIZE;
+	loopback_pipe->ipa_connect_params.data_fifo_sz = BAM_DMA_DATA_FIFO_SIZE;
+	loopback_pipe->ipa_connect_params.notify = loopback_pipe->ipa_callback;
+	loopback_pipe->ipa_connect_params.priv = rndis_ipa_ctx;
+	loopback_pipe->ipa_connect_params.ipa_ep_cfg =
+		*loopback_pipe->ipa_ep_cfg;
+
+	/* loopback_pipe->ipa_sps_connect is out param */
+	retval = ipa_connect
+		(&loopback_pipe->ipa_connect_params,
+		&loopback_pipe->ipa_sps_connect,
+		&loopback_pipe->ipa_drv_ep_hdl);
+	if (retval) {
+		RNDIS_IPA_ERROR("ipa_connect() fail (%d)", retval);
+		return retval;
+	}
+	RNDIS_IPA_DEBUG("ipa_connect() succeeded, ipa_drv_ep_hdl=%d",
+			loopback_pipe->ipa_drv_ep_hdl);
+
+	/* SPS pipe has two side handshake
+	 * This is the second handshake of IPA->BAMDMA,
+	 * This is the BAMDMA side
+	 */
+	loopback_pipe->dma_sps = sps_alloc_endpoint();
+	if (!loopback_pipe->dma_sps) {
+		RNDIS_IPA_ERROR("sps_alloc_endpoint() failed ");
+		retval = -ENOMEM;
+		goto fail_sps_alloc;
+	}
+
+	retval = sps_get_config
+		(loopback_pipe->dma_sps,
+		&loopback_pipe->dma_connect);
+	if (retval) {
+		RNDIS_IPA_ERROR("sps_get_config() failed (%d)", retval);
+		goto fail_get_cfg;
+	}
+
+	/* Start setting the non IPA ep for SPS driver*/
+	loopback_pipe->dma_connect.mode = loopback_pipe->mode;
+
+	/* SPS_MODE_DEST: DMA end point is the dest (consumer) IPA->DMA */
+	if (loopback_pipe->mode == SPS_MODE_DEST) {
+		loopback_pipe->dma_connect.source =
+				loopback_pipe->ipa_sps_connect.ipa_bam_hdl;
+		loopback_pipe->dma_connect.src_pipe_index =
+				loopback_pipe->ipa_sps_connect.ipa_ep_idx;
+		loopback_pipe->dma_connect.destination =
+				rndis_ipa_ctx->bam_dma_hdl;
+		loopback_pipe->dma_connect.dest_pipe_index =
+				loopback_pipe->peer_pipe_index;
+
+	/* SPS_MODE_SRC: DMA end point is the source (producer) DMA->IPA */
+	} else {
+		loopback_pipe->dma_connect.source =
+				rndis_ipa_ctx->bam_dma_hdl;
+		loopback_pipe->dma_connect.src_pipe_index =
+				loopback_pipe->peer_pipe_index;
+		loopback_pipe->dma_connect.destination =
+				loopback_pipe->ipa_sps_connect.ipa_bam_hdl;
+		loopback_pipe->dma_connect.dest_pipe_index =
+				loopback_pipe->ipa_sps_connect.ipa_ep_idx;
+	}
+
+	loopback_pipe->dma_connect.desc = loopback_pipe->ipa_sps_connect.desc;
+	loopback_pipe->dma_connect.data = loopback_pipe->ipa_sps_connect.data;
+	loopback_pipe->dma_connect.event_thresh = 0x10;
+	/* BAM-to-BAM */
+	loopback_pipe->dma_connect.options = SPS_O_AUTO_ENABLE;
+
+	RNDIS_IPA_DEBUG("doing sps_connect() with - ");
+	RNDIS_IPA_DEBUG
+		("src bam_hdl:0x%lx, src_pipe#:%d",
+		loopback_pipe->dma_connect.source,
+		loopback_pipe->dma_connect.src_pipe_index);
+	RNDIS_IPA_DEBUG
+		("dst bam_hdl:0x%lx, dst_pipe#:%d",
+		loopback_pipe->dma_connect.destination,
+		loopback_pipe->dma_connect.dest_pipe_index);
+
+	retval = sps_connect
+		(loopback_pipe->dma_sps,
+		&loopback_pipe->dma_connect);
+	if (retval) {
+		RNDIS_IPA_ERROR
+			("sps_connect() fail for BAMDMA side (%d)",
+			retval);
+		goto fail_sps_connect;
+	}
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+
+fail_sps_connect:
+fail_get_cfg:
+	sps_free_endpoint(loopback_pipe->dma_sps);
+fail_sps_alloc:
+	ipa_disconnect(loopback_pipe->ipa_drv_ep_hdl);
+	return retval;
+}
+
+static void rndis_ipa_destroy_loopback_pipe(
+		struct rndis_loopback_pipe *loopback_pipe)
+{
+	sps_disconnect(loopback_pipe->dma_sps);
+	sps_free_endpoint(loopback_pipe->dma_sps);
+}
+
+/**
+ * rndis_ipa_create_loopback() - create a BAM-DMA loopback
+ *  in order to replace the USB core
+ */
+static int rndis_ipa_create_loopback(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	/* The BAM handle should be use as
+	 * source/destination in the sps_connect()
+	 */
+	int retval;
+
+	RNDIS_IPA_LOG_ENTRY();
+
+	retval = sps_ctrl_bam_dma_clk(true);
+	if (retval) {
+		RNDIS_IPA_ERROR("fail on enabling BAM-DMA clocks");
+		return -ENODEV;
+	}
+
+	/* Get BAM handle instead of USB handle */
+	rndis_ipa_ctx->bam_dma_hdl = sps_dma_get_bam_handle();
+	if (!rndis_ipa_ctx->bam_dma_hdl) {
+		RNDIS_IPA_ERROR("sps_dma_get_bam_handle() failed");
+		return -ENODEV;
+	}
+	RNDIS_IPA_DEBUG("sps_dma_get_bam_handle() succeeded (0x%x)",
+			rndis_ipa_ctx->bam_dma_hdl);
+
+	/* IPA<-BAMDMA, NetDev Rx path (BAMDMA is the USB stub) */
+	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.ipa_client =
+	IPA_CLIENT_USB_PROD;
+	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.peer_pipe_index =
+		FROM_USB_TO_IPA_BAMDMA;
+	/*DMA EP mode*/
+	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.mode = SPS_MODE_SRC;
+	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.ipa_ep_cfg =
+		&usb_to_ipa_ep_cfg_deaggr_en;
+	rndis_ipa_ctx->usb_to_ipa_loopback_pipe.ipa_callback =
+			rndis_ipa_packet_receive_notify;
+	RNDIS_IPA_DEBUG("setting up IPA<-BAMDAM pipe (RNDIS_IPA RX path)");
+	retval = rndis_ipa_loopback_pipe_create
+		(rndis_ipa_ctx,
+		&rndis_ipa_ctx->usb_to_ipa_loopback_pipe);
+	if (retval) {
+		RNDIS_IPA_ERROR
+		("fail to close IPA->BAMDAM pipe");
+		goto fail_to_usb;
+	}
+	RNDIS_IPA_DEBUG("IPA->BAMDAM pipe successfully connected (TX path)");
+
+	/* IPA->BAMDMA, NetDev Tx path (BAMDMA is the USB stub)*/
+	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.ipa_client =
+		IPA_CLIENT_USB_CONS;
+	/*DMA EP mode*/
+	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.mode = SPS_MODE_DEST;
+	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.ipa_ep_cfg = &ipa_to_usb_ep_cfg;
+	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.peer_pipe_index =
+		FROM_IPA_TO_USB_BAMDMA;
+	rndis_ipa_ctx->ipa_to_usb_loopback_pipe.ipa_callback =
+			rndis_ipa_tx_complete_notify;
+	RNDIS_IPA_DEBUG("setting up IPA->BAMDAM pipe (RNDIS_IPA TX path)");
+	retval = rndis_ipa_loopback_pipe_create
+		(rndis_ipa_ctx,
+		&rndis_ipa_ctx->ipa_to_usb_loopback_pipe);
+	if (retval) {
+		RNDIS_IPA_ERROR("fail to close IPA<-BAMDAM pipe");
+		goto fail_from_usb;
+	}
+	RNDIS_IPA_DEBUG("IPA<-BAMDAM pipe successfully connected(RX path)");
+
+	RNDIS_IPA_LOG_EXIT();
+
+	return 0;
+
+fail_from_usb:
+	rndis_ipa_destroy_loopback_pipe(
+			&rndis_ipa_ctx->usb_to_ipa_loopback_pipe);
+fail_to_usb:
+
+	return retval;
+}
+
+static void rndis_ipa_destroy_loopback(struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	rndis_ipa_destroy_loopback_pipe(
+			&rndis_ipa_ctx->ipa_to_usb_loopback_pipe);
+	rndis_ipa_destroy_loopback_pipe(
+			&rndis_ipa_ctx->usb_to_ipa_loopback_pipe);
+	sps_dma_free_bam_handle(rndis_ipa_ctx->bam_dma_hdl);
+	if (sps_ctrl_bam_dma_clk(false))
+		RNDIS_IPA_ERROR("fail to disable BAM-DMA clocks");
+}
+
+/**
+ * rndis_ipa_setup_loopback() - create/destroy a loopback on IPA HW
+ *  (as USB pipes loopback) and notify RNDIS_IPA netdev for pipe connected
+ * @enable: flag that determines if the loopback should be created or destroyed
+ * @rndis_ipa_ctx: driver main context
+ *
+ * This function is the main loopback logic.
+ * It shall create/destroy the loopback by using BAM-DMA and notify
+ * the netdev accordingly.
+ */
+static int rndis_ipa_setup_loopback
+	(bool enable, struct rndis_ipa_dev *rndis_ipa_ctx)
+{
+	int retval;
+
+	if (!enable) {
+		rndis_ipa_destroy_loopback(rndis_ipa_ctx);
+		RNDIS_IPA_DEBUG("loopback destroy done");
+		retval = rndis_ipa_pipe_disconnect_notify(rndis_ipa_ctx);
+		if (retval) {
+			RNDIS_IPA_ERROR("connect notify fail");
+			return -ENODEV;
+		}
+		return 0;
+	}
+
+	RNDIS_IPA_DEBUG("creating loopback (instead of USB core)");
+	retval = rndis_ipa_create_loopback(rndis_ipa_ctx);
+	RNDIS_IPA_DEBUG("creating loopback- %s", (retval ? "FAIL" : "OK"));
+	if (retval) {
+		RNDIS_IPA_ERROR("Fail to connect loopback");
+		return -ENODEV;
+	}
+	retval = rndis_ipa_pipe_connect_notify(
+			rndis_ipa_ctx->usb_to_ipa_loopback_pipe.ipa_drv_ep_hdl,
+			rndis_ipa_ctx->ipa_to_usb_loopback_pipe.ipa_drv_ep_hdl,
+			BAM_DMA_DATA_FIFO_SIZE,
+			15,
+			BAM_DMA_DATA_FIFO_SIZE - rndis_ipa_ctx->net->mtu,
+			rndis_ipa_ctx);
+	if (retval) {
+		RNDIS_IPA_ERROR("connect notify fail");
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+static int rndis_ipa_init_module(void)
+{
+	pr_info("RNDIS_IPA module is loaded.");
+	return 0;
+}
+
+static void rndis_ipa_cleanup_module(void)
+{
+	pr_info("RNDIS_IPA module is unloaded.");
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RNDIS_IPA network interface");
+
+late_initcall(rndis_ipa_init_module);
+module_exit(rndis_ipa_cleanup_module);
diff --git a/drivers/net/ethernet/msm/rndis_ipa_trace.h b/drivers/net/ethernet/msm/rndis_ipa_trace.h
new file mode 100644
index 0000000..bf66c1e
--- /dev/null
+++ b/drivers/net/ethernet/msm/rndis_ipa_trace.h
@@ -0,0 +1,81 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rndis_ipa
+#define TRACE_INCLUDE_FILE rndis_ipa_trace
+
+#if !defined(_RNDIS_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RNDIS_IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+	rndis_netif_ni,
+
+	TP_PROTO(unsigned long proto),
+
+	TP_ARGS(proto),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	proto)
+	),
+
+	TP_fast_assign(
+		__entry->proto = proto;
+	),
+
+	TP_printk("proto =%lu\n", __entry->proto)
+);
+
+TRACE_EVENT(
+	rndis_tx_dp,
+
+	TP_PROTO(unsigned long proto),
+
+	TP_ARGS(proto),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	proto)
+	),
+
+	TP_fast_assign(
+		__entry->proto = proto;
+	),
+
+	TP_printk("proto =%lu\n", __entry->proto)
+);
+
+TRACE_EVENT(
+	rndis_status_rcvd,
+
+	TP_PROTO(unsigned long proto),
+
+	TP_ARGS(proto),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	proto)
+	),
+
+	TP_fast_assign(
+		__entry->proto = proto;
+	),
+
+	TP_printk("proto =%lu\n", __entry->proto)
+);
+
+#endif /* _RNDIS_IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/net/ppp/Kconfig b/drivers/net/ppp/Kconfig
index 1373c6d..282aec4 100644
--- a/drivers/net/ppp/Kconfig
+++ b/drivers/net/ppp/Kconfig
@@ -149,6 +149,23 @@
 	  tunnels. L2TP is replacing PPTP for VPN uses.
 if TTY
 
+config PPPOLAC
+	tristate "PPP on L2TP Access Concentrator"
+	depends on PPP && INET
+	help
+	  L2TP (RFC 2661) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles L2TP data packets between a UDP socket
+	  and a PPP channel, but only permits one session per socket. Thus it is
+	  fairly simple and suited for clients.
+
+config PPPOPNS
+	tristate "PPP on PPTP Network Server"
+	depends on PPP && INET
+	help
+	  PPTP (RFC 2637) is a tunneling protocol widely used in virtual private
+	  networks. This driver handles PPTP data packets between a RAW socket
+	  and a PPP channel. It is fairly simple and easy to use.
+
 config PPP_ASYNC
 	tristate "PPP support for async serial ports"
 	depends on PPP
diff --git a/drivers/net/ppp/Makefile b/drivers/net/ppp/Makefile
index a6b6297..d283d03c 100644
--- a/drivers/net/ppp/Makefile
+++ b/drivers/net/ppp/Makefile
@@ -11,3 +11,5 @@
 obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
 obj-$(CONFIG_PPPOL2TP) += pppox.o
 obj-$(CONFIG_PPTP) += pppox.o pptp.o
+obj-$(CONFIG_PPPOLAC) += pppox.o pppolac.o
+obj-$(CONFIG_PPPOPNS) += pppox.o pppopns.o
diff --git a/drivers/net/ppp/pppolac.c b/drivers/net/ppp/pppolac.c
new file mode 100644
index 0000000..0184c96
--- /dev/null
+++ b/drivers/net/ppp/pppolac.c
@@ -0,0 +1,448 @@
+/* drivers/net/pppolac.c
+ *
+ * Driver for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles L2TP data packets between a UDP socket and a PPP channel.
+ * The socket must keep connected, and only one session per socket is permitted.
+ * Sequencing of outgoing packets is controlled by LNS. Incoming packets with
+ * sequences are reordered within a sliding window of one second. Currently
+ * reordering only happens when a packet is received. It is done for simplicity
+ * since no additional locks or threads are required. This driver only works on
+ * IPv4 due to the lack of UDP encapsulation support in IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/udp.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <net/tcp_states.h>
+#include <asm/uaccess.h>
+
+#define L2TP_CONTROL_BIT	0x80
+#define L2TP_LENGTH_BIT		0x40
+#define L2TP_SEQUENCE_BIT	0x08
+#define L2TP_OFFSET_BIT		0x02
+#define L2TP_VERSION		0x02
+#define L2TP_VERSION_MASK	0x0F
+
+#define PPP_ADDR	0xFF
+#define PPP_CTRL	0x03
+
+union unaligned {
+	__u32 u32;
+} __attribute__((packed));
+
+static inline union unaligned *unaligned(void *ptr)
+{
+	return (union unaligned *)ptr;
+}
+
+struct meta {
+	__u32 sequence;
+	__u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+	return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppolac_recv_core(struct sock *sk_udp, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)sk_udp->sk_user_data;
+	struct pppolac_opt *opt = &pppox_sk(sk)->proto.lac;
+	struct meta *meta = skb_meta(skb);
+	__u32 now = jiffies;
+	__u8 bits;
+	__u8 *ptr;
+
+	/* Drop the packet if L2TP header is missing. */
+	if (skb->len < sizeof(struct udphdr) + 6)
+		goto drop;
+
+	/* Put it back if it is a control packet. */
+	if (skb->data[sizeof(struct udphdr)] & L2TP_CONTROL_BIT)
+		return opt->backlog_rcv(sk_udp, skb);
+
+	/* Skip UDP header. */
+	skb_pull(skb, sizeof(struct udphdr));
+
+	/* Check the version. */
+	if ((skb->data[1] & L2TP_VERSION_MASK) != L2TP_VERSION)
+		goto drop;
+	bits = skb->data[0];
+	ptr = &skb->data[2];
+
+	/* Check the length if it is present. */
+	if (bits & L2TP_LENGTH_BIT) {
+		if ((ptr[0] << 8 | ptr[1]) != skb->len)
+			goto drop;
+		ptr += 2;
+	}
+
+	/* Skip all fields including optional ones. */
+	if (!skb_pull(skb, 6 + (bits & L2TP_SEQUENCE_BIT ? 4 : 0) +
+			(bits & L2TP_LENGTH_BIT ? 2 : 0) +
+			(bits & L2TP_OFFSET_BIT ? 2 : 0)))
+		goto drop;
+
+	/* Skip the offset padding if it is present. */
+	if (bits & L2TP_OFFSET_BIT &&
+			!skb_pull(skb, skb->data[-2] << 8 | skb->data[-1]))
+		goto drop;
+
+	/* Check the tunnel and the session. */
+	if (unaligned(ptr)->u32 != opt->local)
+		goto drop;
+
+	/* Check the sequence if it is present. */
+	if (bits & L2TP_SEQUENCE_BIT) {
+		meta->sequence = ptr[4] << 8 | ptr[5];
+		if ((__s16)(meta->sequence - opt->recv_sequence) < 0)
+			goto drop;
+	}
+
+	/* Skip PPP address and control if they are present. */
+	if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+			skb->data[1] == PPP_CTRL)
+		skb_pull(skb, 2);
+
+	/* Fix PPP protocol if it is compressed. */
+	if (skb->len >= 1 && skb->data[0] & 1)
+		skb_push(skb, 1)[0] = 0;
+
+	/* Drop the packet if PPP protocol is missing. */
+	if (skb->len < 2)
+		goto drop;
+
+	/* Perform reordering if sequencing is enabled. */
+	atomic_set(&opt->sequencing, bits & L2TP_SEQUENCE_BIT);
+	if (bits & L2TP_SEQUENCE_BIT) {
+		struct sk_buff *skb1;
+
+		/* Insert the packet into receive queue in order. */
+		skb_set_owner_r(skb, sk);
+		skb_queue_walk(&sk->sk_receive_queue, skb1) {
+			struct meta *meta1 = skb_meta(skb1);
+			__s16 order = meta->sequence - meta1->sequence;
+			if (order == 0)
+				goto drop;
+			if (order < 0) {
+				meta->timestamp = meta1->timestamp;
+				skb_insert(skb1, skb, &sk->sk_receive_queue);
+				skb = NULL;
+				break;
+			}
+		}
+		if (skb) {
+			meta->timestamp = now;
+			skb_queue_tail(&sk->sk_receive_queue, skb);
+		}
+
+		/* Remove packets from receive queue as long as
+		 * 1. the receive buffer is full,
+		 * 2. they are queued longer than one second, or
+		 * 3. there are no missing packets before them. */
+		skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+			meta = skb_meta(skb);
+			if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+					now - meta->timestamp < HZ &&
+					meta->sequence != opt->recv_sequence)
+				break;
+			skb_unlink(skb, &sk->sk_receive_queue);
+			opt->recv_sequence = (__u16)(meta->sequence + 1);
+			skb_orphan(skb);
+			ppp_input(&pppox_sk(sk)->chan, skb);
+		}
+		return NET_RX_SUCCESS;
+	}
+
+	/* Flush receive queue if sequencing is disabled. */
+	skb_queue_purge(&sk->sk_receive_queue);
+	skb_orphan(skb);
+	ppp_input(&pppox_sk(sk)->chan, skb);
+	return NET_RX_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static int pppolac_recv(struct sock *sk_udp, struct sk_buff *skb)
+{
+	sock_hold(sk_udp);
+	sk_receive_skb(sk_udp, skb, 0);
+	return 0;
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppolac_xmit_core(struct work_struct *delivery_work)
+{
+	mm_segment_t old_fs = get_fs();
+	struct sk_buff *skb;
+
+	set_fs(KERNEL_DS);
+	while ((skb = skb_dequeue(&delivery_queue))) {
+		struct sock *sk_udp = skb->sk;
+		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+		struct msghdr msg = { 0 };
+
+		iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
+			      skb->len);
+		sk_udp->sk_prot->sendmsg(sk_udp, &msg, skb->len);
+		kfree_skb(skb);
+	}
+	set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppolac_xmit_core);
+
+static int pppolac_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk_udp = (struct sock *)chan->private;
+	struct pppolac_opt *opt = &pppox_sk(sk_udp->sk_user_data)->proto.lac;
+
+	/* Install PPP address and control. */
+	skb_push(skb, 2);
+	skb->data[0] = PPP_ADDR;
+	skb->data[1] = PPP_CTRL;
+
+	/* Install L2TP header. */
+	if (atomic_read(&opt->sequencing)) {
+		skb_push(skb, 10);
+		skb->data[0] = L2TP_SEQUENCE_BIT;
+		skb->data[6] = opt->xmit_sequence >> 8;
+		skb->data[7] = opt->xmit_sequence;
+		skb->data[8] = 0;
+		skb->data[9] = 0;
+		opt->xmit_sequence++;
+	} else {
+		skb_push(skb, 6);
+		skb->data[0] = 0;
+	}
+	skb->data[1] = L2TP_VERSION;
+	unaligned(&skb->data[2])->u32 = opt->remote;
+
+	/* Now send the packet via the delivery queue. */
+	skb_set_owner_w(skb, sk_udp);
+	skb_queue_tail(&delivery_queue, skb);
+	schedule_work(&delivery_work);
+	return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppolac_channel_ops = {
+	.start_xmit = pppolac_xmit,
+};
+
+static int pppolac_connect(struct socket *sock, struct sockaddr *useraddr,
+	int addrlen, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct sockaddr_pppolac *addr = (struct sockaddr_pppolac *)useraddr;
+	struct socket *sock_udp = NULL;
+	struct sock *sk_udp;
+	int error;
+
+	if (addrlen != sizeof(struct sockaddr_pppolac) ||
+			!addr->local.tunnel || !addr->local.session ||
+			!addr->remote.tunnel || !addr->remote.session) {
+		return -EINVAL;
+	}
+
+	lock_sock(sk);
+	error = -EALREADY;
+	if (sk->sk_state != PPPOX_NONE)
+		goto out;
+
+	sock_udp = sockfd_lookup(addr->udp_socket, &error);
+	if (!sock_udp)
+		goto out;
+	sk_udp = sock_udp->sk;
+	lock_sock(sk_udp);
+
+	/* Remove this check when IPv6 supports UDP encapsulation. */
+	error = -EAFNOSUPPORT;
+	if (sk_udp->sk_family != AF_INET)
+		goto out;
+	error = -EPROTONOSUPPORT;
+	if (sk_udp->sk_protocol != IPPROTO_UDP)
+		goto out;
+	error = -EDESTADDRREQ;
+	if (sk_udp->sk_state != TCP_ESTABLISHED)
+		goto out;
+	error = -EBUSY;
+	if (udp_sk(sk_udp)->encap_type || sk_udp->sk_user_data)
+		goto out;
+	if (!sk_udp->sk_bound_dev_if) {
+		struct dst_entry *dst = sk_dst_get(sk_udp);
+		error = -ENODEV;
+		if (!dst)
+			goto out;
+		sk_udp->sk_bound_dev_if = dst->dev->ifindex;
+		dst_release(dst);
+	}
+
+	po->chan.hdrlen = 12;
+	po->chan.private = sk_udp;
+	po->chan.ops = &pppolac_channel_ops;
+	po->chan.mtu = PPP_MRU - 80;
+	po->proto.lac.local = unaligned(&addr->local)->u32;
+	po->proto.lac.remote = unaligned(&addr->remote)->u32;
+	atomic_set(&po->proto.lac.sequencing, 1);
+	po->proto.lac.backlog_rcv = sk_udp->sk_backlog_rcv;
+
+	error = ppp_register_channel(&po->chan);
+	if (error)
+		goto out;
+
+	sk->sk_state = PPPOX_CONNECTED;
+	udp_sk(sk_udp)->encap_type = UDP_ENCAP_L2TPINUDP;
+	udp_sk(sk_udp)->encap_rcv = pppolac_recv;
+	sk_udp->sk_backlog_rcv = pppolac_recv_core;
+	sk_udp->sk_user_data = sk;
+out:
+	if (sock_udp) {
+		release_sock(sk_udp);
+		if (error)
+			sockfd_put(sock_udp);
+	}
+	release_sock(sk);
+	return error;
+}
+
+static int pppolac_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	if (sk->sk_state != PPPOX_NONE) {
+		struct sock *sk_udp = (struct sock *)pppox_sk(sk)->chan.private;
+		lock_sock(sk_udp);
+		skb_queue_purge(&sk->sk_receive_queue);
+		pppox_unbind_sock(sk);
+		udp_sk(sk_udp)->encap_type = 0;
+		udp_sk(sk_udp)->encap_rcv = NULL;
+		sk_udp->sk_backlog_rcv = pppox_sk(sk)->proto.lac.backlog_rcv;
+		sk_udp->sk_user_data = NULL;
+		release_sock(sk_udp);
+		sockfd_put(sk_udp->sk_socket);
+	}
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+	release_sock(sk);
+	sock_put(sk);
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppolac_proto = {
+	.name = "PPPOLAC",
+	.owner = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppolac_proto_ops = {
+	.family = PF_PPPOX,
+	.owner = THIS_MODULE,
+	.release = pppolac_release,
+	.bind = sock_no_bind,
+	.connect = pppolac_connect,
+	.socketpair = sock_no_socketpair,
+	.accept = sock_no_accept,
+	.getname = sock_no_getname,
+	.poll = sock_no_poll,
+	.ioctl = pppox_ioctl,
+	.listen = sock_no_listen,
+	.shutdown = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = sock_no_getsockopt,
+	.sendmsg = sock_no_sendmsg,
+	.recvmsg = sock_no_recvmsg,
+	.mmap = sock_no_mmap,
+};
+
+static int pppolac_create(struct net *net, struct socket *sock, int kern)
+{
+	struct sock *sk;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppolac_proto, kern);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+	sock->state = SS_UNCONNECTED;
+	sock->ops = &pppolac_proto_ops;
+	sk->sk_protocol = PX_PROTO_OLAC;
+	sk->sk_state = PPPOX_NONE;
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppolac_pppox_proto = {
+	.create = pppolac_create,
+	.owner = THIS_MODULE,
+};
+
+static int __init pppolac_init(void)
+{
+	int error;
+
+	error = proto_register(&pppolac_proto, 0);
+	if (error)
+		return error;
+
+	error = register_pppox_proto(PX_PROTO_OLAC, &pppolac_pppox_proto);
+	if (error)
+		proto_unregister(&pppolac_proto);
+	else
+		skb_queue_head_init(&delivery_queue);
+	return error;
+}
+
+static void __exit pppolac_exit(void)
+{
+	unregister_pppox_proto(PX_PROTO_OLAC);
+	proto_unregister(&pppolac_proto);
+}
+
+module_init(pppolac_init);
+module_exit(pppolac_exit);
+
+MODULE_DESCRIPTION("PPP on L2TP Access Concentrator (PPPoLAC)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ppp/pppopns.c b/drivers/net/ppp/pppopns.c
new file mode 100644
index 0000000..d9e0603
--- /dev/null
+++ b/drivers/net/ppp/pppopns.c
@@ -0,0 +1,427 @@
+/* drivers/net/pppopns.c
+ *
+ * Driver for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* This driver handles PPTP data packets between a RAW socket and a PPP channel.
+ * The socket is created in the kernel space and connected to the same address
+ * of the control socket. Outgoing packets are always sent with sequences but
+ * without acknowledgements. Incoming packets with sequences are reordered
+ * within a sliding window of one second. Currently reordering only happens when
+ * a packet is received. It is done for simplicity since no additional locks or
+ * threads are required. This driver should work on both IPv4 and IPv6. */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/file.h>
+#include <linux/netdevice.h>
+#include <linux/net.h>
+#include <linux/ppp_defs.h>
+#include <linux/if.h>
+#include <linux/if_ppp.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <asm/uaccess.h>
+
+#define GRE_HEADER_SIZE		8
+
+#define PPTP_GRE_BITS		htons(0x2001)
+#define PPTP_GRE_BITS_MASK	htons(0xEF7F)
+#define PPTP_GRE_SEQ_BIT	htons(0x1000)
+#define PPTP_GRE_ACK_BIT	htons(0x0080)
+#define PPTP_GRE_TYPE		htons(0x880B)
+
+#define PPP_ADDR	0xFF
+#define PPP_CTRL	0x03
+
+struct header {
+	__u16	bits;
+	__u16	type;
+	__u16	length;
+	__u16	call;
+	__u32	sequence;
+} __attribute__((packed));
+
+struct meta {
+	__u32 sequence;
+	__u32 timestamp;
+};
+
+static inline struct meta *skb_meta(struct sk_buff *skb)
+{
+	return (struct meta *)skb->cb;
+}
+
+/******************************************************************************/
+
+static int pppopns_recv_core(struct sock *sk_raw, struct sk_buff *skb)
+{
+	struct sock *sk = (struct sock *)sk_raw->sk_user_data;
+	struct pppopns_opt *opt = &pppox_sk(sk)->proto.pns;
+	struct meta *meta = skb_meta(skb);
+	__u32 now = jiffies;
+	struct header *hdr;
+
+	/* Skip transport header */
+	skb_pull(skb, skb_transport_header(skb) - skb->data);
+
+	/* Drop the packet if GRE header is missing. */
+	if (skb->len < GRE_HEADER_SIZE)
+		goto drop;
+	hdr = (struct header *)skb->data;
+
+	/* Check the header. */
+	if (hdr->type != PPTP_GRE_TYPE || hdr->call != opt->local ||
+			(hdr->bits & PPTP_GRE_BITS_MASK) != PPTP_GRE_BITS)
+		goto drop;
+
+	/* Skip all fields including optional ones. */
+	if (!skb_pull(skb, GRE_HEADER_SIZE +
+			(hdr->bits & PPTP_GRE_SEQ_BIT ? 4 : 0) +
+			(hdr->bits & PPTP_GRE_ACK_BIT ? 4 : 0)))
+		goto drop;
+
+	/* Check the length. */
+	if (skb->len != ntohs(hdr->length))
+		goto drop;
+
+	/* Check the sequence if it is present. */
+	if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+		meta->sequence = ntohl(hdr->sequence);
+		if ((__s32)(meta->sequence - opt->recv_sequence) < 0)
+			goto drop;
+	}
+
+	/* Skip PPP address and control if they are present. */
+	if (skb->len >= 2 && skb->data[0] == PPP_ADDR &&
+			skb->data[1] == PPP_CTRL)
+		skb_pull(skb, 2);
+
+	/* Fix PPP protocol if it is compressed. */
+	if (skb->len >= 1 && skb->data[0] & 1)
+		skb_push(skb, 1)[0] = 0;
+
+	/* Drop the packet if PPP protocol is missing. */
+	if (skb->len < 2)
+		goto drop;
+
+	/* Perform reordering if sequencing is enabled. */
+	if (hdr->bits & PPTP_GRE_SEQ_BIT) {
+		struct sk_buff *skb1;
+
+		/* Insert the packet into receive queue in order. */
+		skb_set_owner_r(skb, sk);
+		skb_queue_walk(&sk->sk_receive_queue, skb1) {
+			struct meta *meta1 = skb_meta(skb1);
+			__s32 order = meta->sequence - meta1->sequence;
+			if (order == 0)
+				goto drop;
+			if (order < 0) {
+				meta->timestamp = meta1->timestamp;
+				skb_insert(skb1, skb, &sk->sk_receive_queue);
+				skb = NULL;
+				break;
+			}
+		}
+		if (skb) {
+			meta->timestamp = now;
+			skb_queue_tail(&sk->sk_receive_queue, skb);
+		}
+
+		/* Remove packets from receive queue as long as
+		 * 1. the receive buffer is full,
+		 * 2. they are queued longer than one second, or
+		 * 3. there are no missing packets before them. */
+		skb_queue_walk_safe(&sk->sk_receive_queue, skb, skb1) {
+			meta = skb_meta(skb);
+			if (atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
+					now - meta->timestamp < HZ &&
+					meta->sequence != opt->recv_sequence)
+				break;
+			skb_unlink(skb, &sk->sk_receive_queue);
+			opt->recv_sequence = meta->sequence + 1;
+			skb_orphan(skb);
+			ppp_input(&pppox_sk(sk)->chan, skb);
+		}
+		return NET_RX_SUCCESS;
+	}
+
+	/* Flush receive queue if sequencing is disabled. */
+	skb_queue_purge(&sk->sk_receive_queue);
+	skb_orphan(skb);
+	ppp_input(&pppox_sk(sk)->chan, skb);
+	return NET_RX_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+static void pppopns_recv(struct sock *sk_raw)
+{
+	struct sk_buff *skb;
+	while ((skb = skb_dequeue(&sk_raw->sk_receive_queue))) {
+		sock_hold(sk_raw);
+		sk_receive_skb(sk_raw, skb, 0);
+	}
+}
+
+static struct sk_buff_head delivery_queue;
+
+static void pppopns_xmit_core(struct work_struct *delivery_work)
+{
+	mm_segment_t old_fs = get_fs();
+	struct sk_buff *skb;
+
+	set_fs(KERNEL_DS);
+	while ((skb = skb_dequeue(&delivery_queue))) {
+		struct sock *sk_raw = skb->sk;
+		struct kvec iov = {.iov_base = skb->data, .iov_len = skb->len};
+		struct msghdr msg = { 0 };
+
+		iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &iov, 1,
+			      skb->len);
+		sk_raw->sk_prot->sendmsg(sk_raw, &msg, skb->len);
+		kfree_skb(skb);
+	}
+	set_fs(old_fs);
+}
+
+static DECLARE_WORK(delivery_work, pppopns_xmit_core);
+
+static int pppopns_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct sock *sk_raw = (struct sock *)chan->private;
+	struct pppopns_opt *opt = &pppox_sk(sk_raw->sk_user_data)->proto.pns;
+	struct header *hdr;
+	__u16 length;
+
+	/* Install PPP address and control. */
+	skb_push(skb, 2);
+	skb->data[0] = PPP_ADDR;
+	skb->data[1] = PPP_CTRL;
+	length = skb->len;
+
+	/* Install PPTP GRE header. */
+	hdr = (struct header *)skb_push(skb, 12);
+	hdr->bits = PPTP_GRE_BITS | PPTP_GRE_SEQ_BIT;
+	hdr->type = PPTP_GRE_TYPE;
+	hdr->length = htons(length);
+	hdr->call = opt->remote;
+	hdr->sequence = htonl(opt->xmit_sequence);
+	opt->xmit_sequence++;
+
+	/* Now send the packet via the delivery queue. */
+	skb_set_owner_w(skb, sk_raw);
+	skb_queue_tail(&delivery_queue, skb);
+	schedule_work(&delivery_work);
+	return 1;
+}
+
+/******************************************************************************/
+
+static struct ppp_channel_ops pppopns_channel_ops = {
+	.start_xmit = pppopns_xmit,
+};
+
+static int pppopns_connect(struct socket *sock, struct sockaddr *useraddr,
+	int addrlen, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct pppox_sock *po = pppox_sk(sk);
+	struct sockaddr_pppopns *addr = (struct sockaddr_pppopns *)useraddr;
+	struct sockaddr_storage ss;
+	struct socket *sock_tcp = NULL;
+	struct socket *sock_raw = NULL;
+	struct sock *sk_tcp;
+	struct sock *sk_raw;
+	int error;
+
+	if (addrlen != sizeof(struct sockaddr_pppopns))
+		return -EINVAL;
+
+	lock_sock(sk);
+	error = -EALREADY;
+	if (sk->sk_state != PPPOX_NONE)
+		goto out;
+
+	sock_tcp = sockfd_lookup(addr->tcp_socket, &error);
+	if (!sock_tcp)
+		goto out;
+	sk_tcp = sock_tcp->sk;
+	error = -EPROTONOSUPPORT;
+	if (sk_tcp->sk_protocol != IPPROTO_TCP)
+		goto out;
+	addrlen = sizeof(struct sockaddr_storage);
+	error = kernel_getpeername(sock_tcp, (struct sockaddr *)&ss, &addrlen);
+	if (error)
+		goto out;
+	if (!sk_tcp->sk_bound_dev_if) {
+		struct dst_entry *dst = sk_dst_get(sk_tcp);
+		error = -ENODEV;
+		if (!dst)
+			goto out;
+		sk_tcp->sk_bound_dev_if = dst->dev->ifindex;
+		dst_release(dst);
+	}
+
+	error = sock_create(ss.ss_family, SOCK_RAW, IPPROTO_GRE, &sock_raw);
+	if (error)
+		goto out;
+	sk_raw = sock_raw->sk;
+	sk_raw->sk_bound_dev_if = sk_tcp->sk_bound_dev_if;
+	error = kernel_connect(sock_raw, (struct sockaddr *)&ss, addrlen, 0);
+	if (error)
+		goto out;
+
+	po->chan.hdrlen = 14;
+	po->chan.private = sk_raw;
+	po->chan.ops = &pppopns_channel_ops;
+	po->chan.mtu = PPP_MRU - 80;
+	po->proto.pns.local = addr->local;
+	po->proto.pns.remote = addr->remote;
+	po->proto.pns.data_ready = sk_raw->sk_data_ready;
+	po->proto.pns.backlog_rcv = sk_raw->sk_backlog_rcv;
+
+	error = ppp_register_channel(&po->chan);
+	if (error)
+		goto out;
+
+	sk->sk_state = PPPOX_CONNECTED;
+	lock_sock(sk_raw);
+	sk_raw->sk_data_ready = pppopns_recv;
+	sk_raw->sk_backlog_rcv = pppopns_recv_core;
+	sk_raw->sk_user_data = sk;
+	release_sock(sk_raw);
+out:
+	if (sock_tcp)
+		sockfd_put(sock_tcp);
+	if (error && sock_raw)
+		sock_release(sock_raw);
+	release_sock(sk);
+	return error;
+}
+
+static int pppopns_release(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+
+	if (!sk)
+		return 0;
+
+	lock_sock(sk);
+	if (sock_flag(sk, SOCK_DEAD)) {
+		release_sock(sk);
+		return -EBADF;
+	}
+
+	if (sk->sk_state != PPPOX_NONE) {
+		struct sock *sk_raw = (struct sock *)pppox_sk(sk)->chan.private;
+		lock_sock(sk_raw);
+		skb_queue_purge(&sk->sk_receive_queue);
+		pppox_unbind_sock(sk);
+		sk_raw->sk_data_ready = pppox_sk(sk)->proto.pns.data_ready;
+		sk_raw->sk_backlog_rcv = pppox_sk(sk)->proto.pns.backlog_rcv;
+		sk_raw->sk_user_data = NULL;
+		release_sock(sk_raw);
+		sock_release(sk_raw->sk_socket);
+	}
+
+	sock_orphan(sk);
+	sock->sk = NULL;
+	release_sock(sk);
+	sock_put(sk);
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct proto pppopns_proto = {
+	.name = "PPPOPNS",
+	.owner = THIS_MODULE,
+	.obj_size = sizeof(struct pppox_sock),
+};
+
+static struct proto_ops pppopns_proto_ops = {
+	.family = PF_PPPOX,
+	.owner = THIS_MODULE,
+	.release = pppopns_release,
+	.bind = sock_no_bind,
+	.connect = pppopns_connect,
+	.socketpair = sock_no_socketpair,
+	.accept = sock_no_accept,
+	.getname = sock_no_getname,
+	.poll = sock_no_poll,
+	.ioctl = pppox_ioctl,
+	.listen = sock_no_listen,
+	.shutdown = sock_no_shutdown,
+	.setsockopt = sock_no_setsockopt,
+	.getsockopt = sock_no_getsockopt,
+	.sendmsg = sock_no_sendmsg,
+	.recvmsg = sock_no_recvmsg,
+	.mmap = sock_no_mmap,
+};
+
+static int pppopns_create(struct net *net, struct socket *sock, int kern)
+{
+	struct sock *sk;
+
+	sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppopns_proto, kern);
+	if (!sk)
+		return -ENOMEM;
+
+	sock_init_data(sock, sk);
+	sock->state = SS_UNCONNECTED;
+	sock->ops = &pppopns_proto_ops;
+	sk->sk_protocol = PX_PROTO_OPNS;
+	sk->sk_state = PPPOX_NONE;
+	return 0;
+}
+
+/******************************************************************************/
+
+static struct pppox_proto pppopns_pppox_proto = {
+	.create = pppopns_create,
+	.owner = THIS_MODULE,
+};
+
+static int __init pppopns_init(void)
+{
+	int error;
+
+	error = proto_register(&pppopns_proto, 0);
+	if (error)
+		return error;
+
+	error = register_pppox_proto(PX_PROTO_OPNS, &pppopns_pppox_proto);
+	if (error)
+		proto_unregister(&pppopns_proto);
+	else
+		skb_queue_head_init(&delivery_queue);
+	return error;
+}
+
+static void __exit pppopns_exit(void)
+{
+	unregister_pppox_proto(PX_PROTO_OPNS);
+	proto_unregister(&pppopns_proto);
+}
+
+module_init(pppopns_init);
+module_exit(pppopns_exit);
+
+MODULE_DESCRIPTION("PPP on PPTP Network Server (PPPoPNS)");
+MODULE_AUTHOR("Chia-chi Yeh <chiachi@android.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index db6acec..929dafb8 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1991,6 +1991,12 @@
 	int le;
 	int ret;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+	if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
+		return -EPERM;
+	}
+#endif
+
 	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
 		if (copy_from_user(&ifr, argp, ifreq_len))
 			return -EFAULT;
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index d0b7734..b7974b4 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -549,6 +549,11 @@
 {
 	int ret;
 
+	/* Disable filtering */
+	ret = wl1271_acx_group_address_tbl(wl, wlvif, false, NULL, 0);
+	if (ret < 0)
+		return ret;
+
 	ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
 	if (ret < 0)
 		return ret;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c89d5d2..8668808 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -1054,42 +1054,66 @@
 	return 0;
 }
 
+/*
+ * Convert configs to something easy to use in C code
+ */
+#if defined(CONFIG_CMDLINE_FORCE)
+static const int overwrite_incoming_cmdline = 1;
+static const int read_dt_cmdline;
+static const int concat_cmdline;
+#elif defined(CONFIG_CMDLINE_EXTEND)
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline = 1;
+#else /* CMDLINE_FROM_BOOTLOADER */
+static const int overwrite_incoming_cmdline;
+static const int read_dt_cmdline = 1;
+static const int concat_cmdline;
+#endif
+
+#ifdef CONFIG_CMDLINE
+static const char *config_cmdline = CONFIG_CMDLINE;
+#else
+static const char *config_cmdline = "";
+#endif
+
 int __init early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data)
 {
-	int l;
-	const char *p;
+	int l = 0;
+	const char *p = NULL;
+	char *cmdline = data;
 
 	pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname);
 
-	if (depth != 1 || !data ||
+	if (depth != 1 || !cmdline ||
 	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
 		return 0;
 
 	early_init_dt_check_for_initrd(node);
 
-	/* Retrieve command line */
-	p = of_get_flat_dt_prop(node, "bootargs", &l);
-	if (p != NULL && l > 0)
-		strlcpy(data, p, min((int)l, COMMAND_LINE_SIZE));
+	/* Put CONFIG_CMDLINE in if forced or if data had nothing in it to start */
+	if (overwrite_incoming_cmdline || !cmdline[0])
+		strlcpy(cmdline, config_cmdline, COMMAND_LINE_SIZE);
 
-	/*
-	 * CONFIG_CMDLINE is meant to be a default in case nothing else
-	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
-	 * is set in which case we override whatever was found earlier.
-	 */
-#ifdef CONFIG_CMDLINE
-#if defined(CONFIG_CMDLINE_EXTEND)
-	strlcat(data, " ", COMMAND_LINE_SIZE);
-	strlcat(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#elif defined(CONFIG_CMDLINE_FORCE)
-	strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#else
-	/* No arguments from boot loader, use kernel's  cmdl*/
-	if (!((char *)data)[0])
-		strlcpy(data, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
-#endif
-#endif /* CONFIG_CMDLINE */
+	/* Retrieve command line unless forcing */
+	if (read_dt_cmdline)
+		p = of_get_flat_dt_prop(node, "bootargs", &l);
+
+	if (p != NULL && l > 0) {
+		if (concat_cmdline) {
+			int cmdline_len;
+			int copy_len;
+			strlcat(cmdline, " ", COMMAND_LINE_SIZE);
+			cmdline_len = strlen(cmdline);
+			copy_len = COMMAND_LINE_SIZE - cmdline_len - 1;
+			copy_len = min((int)l, copy_len);
+			strncpy(cmdline + cmdline_len, p, copy_len);
+			cmdline[cmdline_len + copy_len] = '\0';
+		} else {
+			strlcpy(cmdline, p, min((int)l, COMMAND_LINE_SIZE));
+		}
+	}
 
 	pr_debug("Command line is: %s\n", (char*)data);
 
diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile
index a534cf5..1749037 100644
--- a/drivers/phy/Makefile
+++ b/drivers/phy/Makefile
@@ -52,8 +52,9 @@
 obj-$(CONFIG_PHY_STIH407_USB)		+= phy-stih407-usb.o
 obj-$(CONFIG_PHY_STIH41X_USB)		+= phy-stih41x-usb.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs.o
-obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-20nm.o
 obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-14nm.o
+obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qmp-v3.o
+obj-$(CONFIG_PHY_QCOM_UFS) 	+= phy-qcom-ufs-qrbtc-msmskunk.o
 obj-$(CONFIG_PHY_TUSB1210)		+= phy-tusb1210.o
 obj-$(CONFIG_PHY_BRCM_SATA)		+= phy-brcm-sata.o
 obj-$(CONFIG_PHY_PISTACHIO_USB)		+= phy-pistachio-usb.o
diff --git a/drivers/phy/phy-qcom-ufs-i.h b/drivers/phy/phy-qcom-ufs-i.h
index 2bd5ce4..35179c8 100644
--- a/drivers/phy/phy-qcom-ufs-i.h
+++ b/drivers/phy/phy-qcom-ufs-i.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -91,6 +91,7 @@
 	struct clk *ref_clk_src;
 	struct clk *ref_clk_parent;
 	struct clk *ref_clk;
+	struct clk *ref_aux_clk;
 	bool is_ref_clk_enabled;
 	bool is_dev_ref_clk_enabled;
 	struct ufs_qcom_phy_vreg vdda_pll;
@@ -107,6 +108,23 @@
 	*/
 	#define UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE	BIT(0)
 
+	/*
+	 * On some UFS PHY HW revisions, UFS PHY power up calibration sequence
+	 * cannot have SVS mode configuration otherwise calibration result
+	 * cannot be used in HS-G3. So there are additional register writes must
+	 * be done after the PHY is initialized but before the controller
+	 * requests hibernate exit.
+	 */
+	#define UFS_QCOM_PHY_QUIRK_SVS_MODE	BIT(1)
+
+	/*
+	 * On some UFS PHY HW revisions, UFS PHY power up calibration sequence
+	 * requires manual VCO tuning code and its better to rely on the VCO
+	 * tuning code programmed by boot loader. Enable this quirk to enable
+	 * programming the manually tuned VCO code.
+	 */
+	#define UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING	BIT(2)
+
 	u8 host_ctrl_rev_major;
 	u16 host_ctrl_rev_minor;
 	u16 host_ctrl_rev_step;
@@ -116,6 +134,7 @@
 	int cached_regs_table_size;
 	bool is_powered_on;
 	struct ufs_qcom_phy_specific_ops *phy_spec_ops;
+	u32 vco_tune1_mode1;
 };
 
 /**
@@ -127,15 +146,21 @@
  * @is_physical_coding_sublayer_ready: pointer to a function that
  * checks pcs readiness. returns 0 for success and non-zero for error.
  * @set_tx_lane_enable: pointer to a function that enable tx lanes
+ * @ctrl_rx_linecfg: pointer to a function that controls the Host Rx LineCfg
+ * state.
  * @power_control: pointer to a function that controls analog rail of phy
  * and writes to QSERDES_RX_SIGDET_CNTRL attribute
+ * @configure_lpm: pointer to a function that configures the phy
+ * for low power mode.
  */
 struct ufs_qcom_phy_specific_ops {
 	int (*calibrate_phy)(struct ufs_qcom_phy *phy, bool is_rate_B);
 	void (*start_serdes)(struct ufs_qcom_phy *phy);
 	int (*is_physical_coding_sublayer_ready)(struct ufs_qcom_phy *phy);
 	void (*set_tx_lane_enable)(struct ufs_qcom_phy *phy, u32 val);
+	void (*ctrl_rx_linecfg)(struct ufs_qcom_phy *phy, bool ctrl);
 	void (*power_control)(struct ufs_qcom_phy *phy, bool val);
+	int (*configure_lpm)(struct ufs_qcom_phy *phy, bool enable);
 };
 
 struct ufs_qcom_phy *get_ufs_qcom_phy(struct phy *generic_phy);
@@ -156,4 +181,8 @@
 			struct ufs_qcom_phy_calibration *tbl_A, int tbl_size_A,
 			struct ufs_qcom_phy_calibration *tbl_B, int tbl_size_B,
 			bool is_rate_B);
+void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
+				struct ufs_qcom_phy_calibration *tbl,
+				int tbl_size);
+
 #endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.c b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
index 6ee5149..8cb8c02 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.c
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.c
@@ -15,19 +15,49 @@
 #include "phy-qcom-ufs-qmp-14nm.h"
 
 #define UFS_PHY_NAME "ufs_phy_qmp_14nm"
-#define UFS_PHY_VDDA_PHY_UV	(925000)
 
 static
 int ufs_qcom_phy_qmp_14nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 					bool is_rate_B)
 {
-	int tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
-	int tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
 	int err;
+	int tbl_size_A, tbl_size_B;
+	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+	u8 major = ufs_qcom_phy->host_ctrl_rev_major;
+	u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
+	u16 step = ufs_qcom_phy->host_ctrl_rev_step;
 
-	err = ufs_qcom_phy_calibrate(ufs_qcom_phy, phy_cal_table_rate_A,
-		tbl_size_A, phy_cal_table_rate_B, tbl_size_B, is_rate_B);
+	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+	tbl_B = phy_cal_table_rate_B;
 
+	if ((major == 0x2) && (minor == 0x000) && (step == 0x0000)) {
+		tbl_A = phy_cal_table_rate_A_2_0_0;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_2_0_0);
+	} else if ((major == 0x2) && (minor == 0x001) && (step == 0x0000)) {
+		tbl_A = phy_cal_table_rate_A_2_1_0;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_2_1_0);
+	} else if ((major == 0x2) && (minor == 0x002) && (step == 0x0000)) {
+		tbl_A = phy_cal_table_rate_A_2_2_0;
+		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_2_2_0);
+		tbl_B = phy_cal_table_rate_B_2_2_0;
+		tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B_2_2_0);
+	} else {
+		dev_err(ufs_qcom_phy->dev,
+			"%s: Unknown UFS-PHY version (major 0x%x minor 0x%x step 0x%x), no calibration values\n",
+			__func__, major, minor, step);
+		err = -ENODEV;
+		goto out;
+	}
+
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     tbl_B, tbl_size_B,
+				     is_rate_B);
+
+	if (ufs_qcom_phy->quirks & UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING)
+		writel_relaxed(ufs_qcom_phy->vco_tune1_mode1,
+			ufs_qcom_phy->mmio + QSERDES_COM_VCO_TUNE1_MODE1);
+out:
 	if (err)
 		dev_err(ufs_qcom_phy->dev,
 			"%s: ufs_qcom_phy_calibrate() failed %d\n",
@@ -38,8 +68,15 @@
 static
 void ufs_qcom_phy_qmp_14nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
 {
-	phy_common->quirks =
-		UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
+	u8 major = phy_common->host_ctrl_rev_major;
+	u16 minor = phy_common->host_ctrl_rev_minor;
+	u16 step = phy_common->host_ctrl_rev_step;
+
+	if ((major == 0x2) && (minor == 0x000) && (step == 0x0000))
+		phy_common->quirks =
+			UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE |
+			UFS_QCOM_PHY_QUIRK_SVS_MODE |
+			UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING;
 }
 
 static int ufs_qcom_phy_qmp_14nm_init(struct phy *generic_phy)
@@ -61,24 +98,66 @@
 			__func__, err);
 		goto out;
 	}
-	phy_common->vdda_phy.max_uV = UFS_PHY_VDDA_PHY_UV;
-	phy_common->vdda_phy.min_uV = UFS_PHY_VDDA_PHY_UV;
 
 	ufs_qcom_phy_qmp_14nm_advertise_quirks(phy_common);
 
+	if (phy_common->quirks & UFS_QCOM_PHY_QUIRK_VCO_MANUAL_TUNING) {
+		phy_common->vco_tune1_mode1 = readl_relaxed(phy_common->mmio +
+						QSERDES_COM_VCO_TUNE1_MODE1);
+		dev_info(phy_common->dev, "%s: vco_tune1_mode1 0x%x\n",
+			__func__, phy_common->vco_tune1_mode1);
+	}
+
 out:
 	return err;
 }
 
 static
-void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy, bool val)
+void ufs_qcom_phy_qmp_14nm_power_control(struct ufs_qcom_phy *phy,
+					 bool power_ctrl)
 {
-	writel_relaxed(val ? 0x1 : 0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
-	/*
-	 * Before any transactions involving PHY, ensure PHY knows
-	 * that it's analog rail is powered ON (or OFF).
-	 */
-	mb();
+	bool is_workaround_req = false;
+
+	if (phy->quirks &
+	    UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE)
+		is_workaround_req = true;
+
+	if (!power_ctrl) {
+		/* apply PHY analog power collapse */
+		if (is_workaround_req) {
+			/* assert common reset before analog power collapse */
+			writel_relaxed(0x1, phy->mmio + QSERDES_COM_SW_RESET);
+			/*
+			 * make sure that reset is propogated before analog
+			 * power collapse
+			 */
+			mb();
+		}
+		/* apply analog power collapse */
+		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Make sure that PHY knows its analog rail is going to be
+		 * powered OFF.
+		 */
+		mb();
+	} else {
+		/* bring PHY out of analog power collapse */
+		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Before any transactions involving PHY, ensure PHY knows
+		 * that it's analog rail is powered ON.
+		 */
+		mb();
+		if (is_workaround_req) {
+			/*
+			 * de-assert common reset after coming out of analog
+			 * power collapse
+			 */
+			writel_relaxed(0x0, phy->mmio + QSERDES_COM_SW_RESET);
+			/* make common reset is de-asserted before proceeding */
+			mb();
+		}
+	}
 }
 
 static inline
@@ -90,6 +169,23 @@
 	 */
 }
 
+static
+void ufs_qcom_phy_qmp_14nm_ctrl_rx_linecfg(struct ufs_qcom_phy *phy, bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
 static inline void ufs_qcom_phy_qmp_14nm_start_serdes(struct ufs_qcom_phy *phy)
 {
 	u32 tmp;
@@ -109,9 +205,24 @@
 
 	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
 		val, (val & MASK_PCS_READY), 10, 1000000);
-	if (err)
+	if (err) {
 		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
 			__func__, err);
+		goto out;
+	}
+
+	if (phy_common->quirks & UFS_QCOM_PHY_QUIRK_SVS_MODE) {
+		int i;
+
+		for (i = 0; i < ARRAY_SIZE(phy_svs_mode_config_2_0_0); i++)
+			writel_relaxed(phy_svs_mode_config_2_0_0[i].cfg_value,
+				(phy_common->mmio +
+				phy_svs_mode_config_2_0_0[i].reg_offset));
+		/* apply above configuration immediately */
+		mb();
+	}
+
+out:
 	return err;
 }
 
@@ -128,6 +239,7 @@
 	.start_serdes		= ufs_qcom_phy_qmp_14nm_start_serdes,
 	.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_14nm_is_pcs_ready,
 	.set_tx_lane_enable	= ufs_qcom_phy_qmp_14nm_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_14nm_ctrl_rx_linecfg,
 	.power_control		= ufs_qcom_phy_qmp_14nm_power_control,
 };
 
@@ -140,6 +252,7 @@
 
 	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
 	if (!phy) {
+		dev_err(dev, "%s: failed to allocate phy\n", __func__);
 		err = -ENOMEM;
 		goto out;
 	}
diff --git a/drivers/phy/phy-qcom-ufs-qmp-14nm.h b/drivers/phy/phy-qcom-ufs-qmp-14nm.h
index 3aefdba..46e652f 100644
--- a/drivers/phy/phy-qcom-ufs-qmp-14nm.h
+++ b/drivers/phy/phy-qcom-ufs-qmp-14nm.h
@@ -27,12 +27,14 @@
 #define QSERDES_COM_BG_TIMER			COM_OFF(0x0C)
 #define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x34)
 #define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x3C)
+#define QSERDES_COM_PLL_IVCO			COM_OFF(0x48)
 #define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0x4C)
 #define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0x50)
 #define QSERDES_COM_LOCK_CMP3_MODE0		COM_OFF(0x54)
 #define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0x58)
 #define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0x5C)
 #define QSERDES_COM_LOCK_CMP3_MODE1		COM_OFF(0x60)
+#define QSERDES_COM_BG_TRIM			COM_OFF(0x70)
 #define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x78)
 #define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x7C)
 #define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x84)
@@ -41,6 +43,7 @@
 #define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x94)
 #define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0xAC)
 #define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0xB4)
+#define QSERDES_COM_RESCODE_DIV_NUM		COM_OFF(0xC4)
 #define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0xC8)
 #define QSERDES_COM_LOCK_CMP_CFG		COM_OFF(0xCC)
 #define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xD0)
@@ -61,19 +64,35 @@
 #define QSERDES_COM_VCO_TUNE2_MODE0		COM_OFF(0x130)
 #define QSERDES_COM_VCO_TUNE1_MODE1		COM_OFF(0x134)
 #define QSERDES_COM_VCO_TUNE2_MODE1		COM_OFF(0x138)
+#define QSERDES_COM_VCO_TUNE_INITVAL1		COM_OFF(0x13C)
+#define QSERDES_COM_VCO_TUNE_INITVAL2		COM_OFF(0x140)
 #define QSERDES_COM_VCO_TUNE_TIMER1		COM_OFF(0x144)
 #define QSERDES_COM_VCO_TUNE_TIMER2		COM_OFF(0x148)
 #define QSERDES_COM_CLK_SELECT			COM_OFF(0x174)
 #define QSERDES_COM_HSCLK_SEL			COM_OFF(0x178)
 #define QSERDES_COM_CORECLK_DIV			COM_OFF(0x184)
+#define QSERDES_COM_SW_RESET			COM_OFF(0x188)
 #define QSERDES_COM_CORE_CLK_EN			COM_OFF(0x18C)
 #define QSERDES_COM_CMN_CONFIG			COM_OFF(0x194)
 #define QSERDES_COM_SVS_MODE_CLK_SEL		COM_OFF(0x19C)
+#define QSERDES_COM_DEBUG_BUS0			COM_OFF(0x1A0)
+#define QSERDES_COM_DEBUG_BUS1			COM_OFF(0x1A4)
+#define QSERDES_COM_DEBUG_BUS2			COM_OFF(0x1A8)
+#define QSERDES_COM_DEBUG_BUS3			COM_OFF(0x1AC)
+#define QSERDES_COM_DEBUG_BUS_SEL		COM_OFF(0x1B0)
+#define QSERDES_COM_CMN_MISC2			COM_OFF(0x1B8)
 #define QSERDES_COM_CORECLK_DIV_MODE1		COM_OFF(0x1BC)
 
 /* UFS PHY registers */
 #define UFS_PHY_PHY_START			PHY_OFF(0x00)
 #define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x34)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x3C)
+#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP	PHY_OFF(0xCC)
+#define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x138)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x13C)
+#define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x148)
+#define UFS_PHY_RX_PWM_GEAR_BAND		PHY_OFF(0x154)
 #define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x168)
 
 /* UFS PHY TX registers */
@@ -81,7 +100,12 @@
 #define QSERDES_TX_LANE_MODE				TX_OFF(0, 0x94)
 
 /* UFS PHY RX registers */
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_HALF	RX_OFF(0, 0x30)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER	RX_OFF(0, 0x34)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN_EIGHTH	RX_OFF(0, 0x38)
+#define QSERDES_RX_UCDR_SVS_SO_GAIN		RX_OFF(0, 0x3C)
 #define QSERDES_RX_UCDR_FASTLOCK_FO_GAIN	RX_OFF(0, 0x40)
+#define QSERDES_RX_UCDR_SO_SATURATION_ENABLE	RX_OFF(0, 0x48)
 #define QSERDES_RX_RX_TERM_BW			RX_OFF(0, 0x90)
 #define QSERDES_RX_RX_EQ_GAIN1_LSB		RX_OFF(0, 0xC4)
 #define QSERDES_RX_RX_EQ_GAIN1_MSB		RX_OFF(0, 0xC8)
@@ -93,6 +117,8 @@
 #define QSERDES_RX_SIGDET_DEGLITCH_CNTRL	RX_OFF(0, 0x11C)
 #define QSERDES_RX_RX_INTERFACE_MODE		RX_OFF(0, 0x12C)
 
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+
 /*
  * This structure represents the 14nm specific phy.
  * common_cfg MUST remain the first field in this structure
@@ -105,12 +131,102 @@
 	struct ufs_qcom_phy common_cfg;
 };
 
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_2_0_0[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xd7),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x17),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x1C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_MISC2, 0x1F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x12),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+
+	/*
+	 * UFS_PHY_RX_PWM_GEAR_BAND configuration is changed after the power up
+	 * sequence so make sure that this register gets set to power on reset
+	 * value. This is required in case power up sequence is initiated after
+	 * this register value got changed to value other than power on reset
+	 * value.
+	 */
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x55),
+};
+
+/*
+ * For 2.1.0 revision, SVS mode configuration can be part of PHY power
+ * up sequence itself.
+ */
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_2_1_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x05),
@@ -155,23 +271,133 @@
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
 
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
 
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x02),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x18),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x0F),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESCODE_DIV_NUM, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_MISC2, 0x1F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x12),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_2_2_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x0e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x20),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_CFG, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER1, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_TIMER2, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xd6),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP3_MODE1, 0x00),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_HIGHZ_TRANSCEIVER_BIAS_DRVR_EN, 0x45),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE, 0x06),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0D),
+
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TRIM, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESCODE_DIV_NUM, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_MISC2, 0x63),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UCDR_SO_SATURATION_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6c),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAP, 0x28),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
 };
 
 static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
 	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x54),
 };
 
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B_2_2_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+/*
+ * For 2.0.0 revision, apply this SVS mode configuration after PHY power
+ * up sequence is completed.
+ */
+static struct ufs_qcom_phy_calibration phy_svs_mode_config_2_0_0[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_PWM_GEAR_BAND, 0x15),
+};
+
 #endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.c b/drivers/phy/phy-qcom-ufs-qmp-20nm.c
deleted file mode 100644
index 770087a..0000000
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.c
+++ /dev/null
@@ -1,255 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#include "phy-qcom-ufs-qmp-20nm.h"
-
-#define UFS_PHY_NAME "ufs_phy_qmp_20nm"
-
-static
-int ufs_qcom_phy_qmp_20nm_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
-					bool is_rate_B)
-{
-	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
-	int tbl_size_A, tbl_size_B;
-	u8 major = ufs_qcom_phy->host_ctrl_rev_major;
-	u16 minor = ufs_qcom_phy->host_ctrl_rev_minor;
-	u16 step = ufs_qcom_phy->host_ctrl_rev_step;
-	int err;
-
-	if ((major == 0x1) && (minor == 0x002) && (step == 0x0000)) {
-		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_2_0);
-		tbl_A = phy_cal_table_rate_A_1_2_0;
-	} else if ((major == 0x1) && (minor == 0x003) && (step == 0x0000)) {
-		tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A_1_3_0);
-		tbl_A = phy_cal_table_rate_A_1_3_0;
-	} else {
-		dev_err(ufs_qcom_phy->dev, "%s: Unknown UFS-PHY version, no calibration values\n",
-			__func__);
-		err = -ENODEV;
-		goto out;
-	}
-
-	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
-	tbl_B = phy_cal_table_rate_B;
-
-	err = ufs_qcom_phy_calibrate(ufs_qcom_phy, tbl_A, tbl_size_A,
-						tbl_B, tbl_size_B, is_rate_B);
-
-	if (err)
-		dev_err(ufs_qcom_phy->dev, "%s: ufs_qcom_phy_calibrate() failed %d\n",
-			__func__, err);
-
-out:
-	return err;
-}
-
-static
-void ufs_qcom_phy_qmp_20nm_advertise_quirks(struct ufs_qcom_phy *phy_common)
-{
-	phy_common->quirks =
-		UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
-}
-
-static int ufs_qcom_phy_qmp_20nm_init(struct phy *generic_phy)
-{
-	struct ufs_qcom_phy_qmp_20nm *phy = phy_get_drvdata(generic_phy);
-	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
-	int err = 0;
-
-	err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
-	if (err) {
-		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
-			__func__, err);
-		goto out;
-	}
-
-	err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
-	if (err) {
-		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
-			__func__, err);
-		goto out;
-	}
-
-	ufs_qcom_phy_qmp_20nm_advertise_quirks(phy_common);
-
-out:
-	return err;
-}
-
-static
-void ufs_qcom_phy_qmp_20nm_power_control(struct ufs_qcom_phy *phy, bool val)
-{
-	bool hibern8_exit_after_pwr_collapse = phy->quirks &
-		UFS_QCOM_PHY_QUIRK_HIBERN8_EXIT_AFTER_PHY_PWR_COLLAPSE;
-
-	if (val) {
-		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
-		/*
-		 * Before any transactions involving PHY, ensure PHY knows
-		 * that it's analog rail is powered ON.
-		 */
-		mb();
-
-		if (hibern8_exit_after_pwr_collapse) {
-			/*
-			 * Give atleast 1us delay after restoring PHY analog
-			 * power.
-			 */
-			usleep_range(1, 2);
-			writel_relaxed(0x0A, phy->mmio +
-				       QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
-			writel_relaxed(0x08, phy->mmio +
-				       QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
-			/*
-			 * Make sure workaround is deactivated before proceeding
-			 * with normal PHY operations.
-			 */
-			mb();
-		}
-	} else {
-		if (hibern8_exit_after_pwr_collapse) {
-			writel_relaxed(0x0A, phy->mmio +
-				       QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
-			writel_relaxed(0x02, phy->mmio +
-				       QSERDES_COM_SYSCLK_EN_SEL_TXBAND);
-			/*
-			 * Make sure that above workaround is activated before
-			 * PHY analog power collapse.
-			 */
-			mb();
-		}
-
-		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
-		/*
-		 * ensure that PHY knows its PHY analog rail is going
-		 * to be powered down
-		 */
-		mb();
-	}
-}
-
-static
-void ufs_qcom_phy_qmp_20nm_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
-{
-	writel_relaxed(val & UFS_PHY_TX_LANE_ENABLE_MASK,
-			phy->mmio + UFS_PHY_TX_LANE_ENABLE);
-	mb();
-}
-
-static inline void ufs_qcom_phy_qmp_20nm_start_serdes(struct ufs_qcom_phy *phy)
-{
-	u32 tmp;
-
-	tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
-	tmp &= ~MASK_SERDES_START;
-	tmp |= (1 << OFFSET_SERDES_START);
-	writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
-	mb();
-}
-
-static int ufs_qcom_phy_qmp_20nm_is_pcs_ready(struct ufs_qcom_phy *phy_common)
-{
-	int err = 0;
-	u32 val;
-
-	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
-			val, (val & MASK_PCS_READY), 10, 1000000);
-	if (err)
-		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
-			__func__, err);
-	return err;
-}
-
-static const struct phy_ops ufs_qcom_phy_qmp_20nm_phy_ops = {
-	.init		= ufs_qcom_phy_qmp_20nm_init,
-	.exit		= ufs_qcom_phy_exit,
-	.power_on	= ufs_qcom_phy_power_on,
-	.power_off	= ufs_qcom_phy_power_off,
-	.owner		= THIS_MODULE,
-};
-
-static struct ufs_qcom_phy_specific_ops phy_20nm_ops = {
-	.calibrate_phy		= ufs_qcom_phy_qmp_20nm_phy_calibrate,
-	.start_serdes		= ufs_qcom_phy_qmp_20nm_start_serdes,
-	.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_20nm_is_pcs_ready,
-	.set_tx_lane_enable	= ufs_qcom_phy_qmp_20nm_set_tx_lane_enable,
-	.power_control		= ufs_qcom_phy_qmp_20nm_power_control,
-};
-
-static int ufs_qcom_phy_qmp_20nm_probe(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct phy *generic_phy;
-	struct ufs_qcom_phy_qmp_20nm *phy;
-	int err = 0;
-
-	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
-	if (!phy) {
-		err = -ENOMEM;
-		goto out;
-	}
-
-	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
-				&ufs_qcom_phy_qmp_20nm_phy_ops, &phy_20nm_ops);
-
-	if (!generic_phy) {
-		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
-			__func__);
-		err = -EIO;
-		goto out;
-	}
-
-	phy_set_drvdata(generic_phy, phy);
-
-	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
-			sizeof(phy->common_cfg.name));
-
-out:
-	return err;
-}
-
-static int ufs_qcom_phy_qmp_20nm_remove(struct platform_device *pdev)
-{
-	struct device *dev = &pdev->dev;
-	struct phy *generic_phy = to_phy(dev);
-	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
-	int err = 0;
-
-	err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
-	if (err)
-		dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
-			__func__, err);
-
-	return err;
-}
-
-static const struct of_device_id ufs_qcom_phy_qmp_20nm_of_match[] = {
-	{.compatible = "qcom,ufs-phy-qmp-20nm"},
-	{},
-};
-MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_20nm_of_match);
-
-static struct platform_driver ufs_qcom_phy_qmp_20nm_driver = {
-	.probe = ufs_qcom_phy_qmp_20nm_probe,
-	.remove = ufs_qcom_phy_qmp_20nm_remove,
-	.driver = {
-		.of_match_table = ufs_qcom_phy_qmp_20nm_of_match,
-		.name = "ufs_qcom_phy_qmp_20nm",
-	},
-};
-
-module_platform_driver(ufs_qcom_phy_qmp_20nm_driver);
-
-MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP 20nm");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-20nm.h b/drivers/phy/phy-qcom-ufs-qmp-20nm.h
deleted file mode 100644
index 4f3076b..0000000
--- a/drivers/phy/phy-qcom-ufs-qmp-20nm.h
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef UFS_QCOM_PHY_QMP_20NM_H_
-#define UFS_QCOM_PHY_QMP_20NM_H_
-
-#include "phy-qcom-ufs-i.h"
-
-/* QCOM UFS PHY control registers */
-
-#define COM_OFF(x)     (0x000 + x)
-#define PHY_OFF(x)     (0xC00 + x)
-#define TX_OFF(n, x)   (0x400 + (0x400 * n) + x)
-#define RX_OFF(n, x)   (0x600 + (0x400 * n) + x)
-
-/* UFS PHY PLL block registers */
-#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x0)
-#define QSERDES_COM_PLL_VCOTAIL_EN		COM_OFF(0x04)
-#define QSERDES_COM_PLL_CNTRL			COM_OFF(0x14)
-#define QSERDES_COM_PLL_IP_SETI			COM_OFF(0x24)
-#define QSERDES_COM_CORE_CLK_IN_SYNC_SEL	COM_OFF(0x28)
-#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x30)
-#define QSERDES_COM_PLL_CP_SETI			COM_OFF(0x34)
-#define QSERDES_COM_PLL_IP_SETP			COM_OFF(0x38)
-#define QSERDES_COM_PLL_CP_SETP			COM_OFF(0x3C)
-#define QSERDES_COM_SYSCLK_EN_SEL_TXBAND	COM_OFF(0x48)
-#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0x4C)
-#define QSERDES_COM_RESETSM_CNTRL2		COM_OFF(0x50)
-#define QSERDES_COM_PLLLOCK_CMP1		COM_OFF(0x90)
-#define QSERDES_COM_PLLLOCK_CMP2		COM_OFF(0x94)
-#define QSERDES_COM_PLLLOCK_CMP3		COM_OFF(0x98)
-#define QSERDES_COM_PLLLOCK_CMP_EN		COM_OFF(0x9C)
-#define QSERDES_COM_BGTC			COM_OFF(0xA0)
-#define QSERDES_COM_DEC_START1			COM_OFF(0xAC)
-#define QSERDES_COM_PLL_AMP_OS			COM_OFF(0xB0)
-#define QSERDES_COM_RES_CODE_UP_OFFSET		COM_OFF(0xD8)
-#define QSERDES_COM_RES_CODE_DN_OFFSET		COM_OFF(0xDC)
-#define QSERDES_COM_DIV_FRAC_START1		COM_OFF(0x100)
-#define QSERDES_COM_DIV_FRAC_START2		COM_OFF(0x104)
-#define QSERDES_COM_DIV_FRAC_START3		COM_OFF(0x108)
-#define QSERDES_COM_DEC_START2			COM_OFF(0x10C)
-#define QSERDES_COM_PLL_RXTXEPCLK_EN		COM_OFF(0x110)
-#define QSERDES_COM_PLL_CRCTRL			COM_OFF(0x114)
-#define QSERDES_COM_PLL_CLKEPDIV		COM_OFF(0x118)
-
-/* TX LANE n (0, 1) registers */
-#define QSERDES_TX_EMP_POST1_LVL(n)		TX_OFF(n, 0x08)
-#define QSERDES_TX_DRV_LVL(n)			TX_OFF(n, 0x0C)
-#define QSERDES_TX_LANE_MODE(n)			TX_OFF(n, 0x54)
-
-/* RX LANE n (0, 1) registers */
-#define QSERDES_RX_CDR_CONTROL1(n)		RX_OFF(n, 0x0)
-#define QSERDES_RX_CDR_CONTROL_HALF(n)		RX_OFF(n, 0x8)
-#define QSERDES_RX_RX_EQ_GAIN1_LSB(n)		RX_OFF(n, 0xA8)
-#define QSERDES_RX_RX_EQ_GAIN1_MSB(n)		RX_OFF(n, 0xAC)
-#define QSERDES_RX_RX_EQ_GAIN2_LSB(n)		RX_OFF(n, 0xB0)
-#define QSERDES_RX_RX_EQ_GAIN2_MSB(n)		RX_OFF(n, 0xB4)
-#define QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(n)	RX_OFF(n, 0xBC)
-#define QSERDES_RX_CDR_CONTROL_QUARTER(n)	RX_OFF(n, 0xC)
-#define QSERDES_RX_SIGDET_CNTRL(n)		RX_OFF(n, 0x100)
-
-/* UFS PHY registers */
-#define UFS_PHY_PHY_START			PHY_OFF(0x00)
-#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x4)
-#define UFS_PHY_TX_LANE_ENABLE			PHY_OFF(0x44)
-#define UFS_PHY_PWM_G1_CLK_DIVIDER		PHY_OFF(0x08)
-#define UFS_PHY_PWM_G2_CLK_DIVIDER		PHY_OFF(0x0C)
-#define UFS_PHY_PWM_G3_CLK_DIVIDER		PHY_OFF(0x10)
-#define UFS_PHY_PWM_G4_CLK_DIVIDER		PHY_OFF(0x14)
-#define UFS_PHY_CORECLK_PWM_G1_CLK_DIVIDER	PHY_OFF(0x34)
-#define UFS_PHY_CORECLK_PWM_G2_CLK_DIVIDER	PHY_OFF(0x38)
-#define UFS_PHY_CORECLK_PWM_G3_CLK_DIVIDER	PHY_OFF(0x3C)
-#define UFS_PHY_CORECLK_PWM_G4_CLK_DIVIDER	PHY_OFF(0x40)
-#define UFS_PHY_OMC_STATUS_RDVAL		PHY_OFF(0x68)
-#define UFS_PHY_LINE_RESET_TIME			PHY_OFF(0x28)
-#define UFS_PHY_LINE_RESET_GRANULARITY		PHY_OFF(0x2C)
-#define UFS_PHY_TSYNC_RSYNC_CNTL		PHY_OFF(0x48)
-#define UFS_PHY_PLL_CNTL			PHY_OFF(0x50)
-#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x54)
-#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x5C)
-#define UFS_PHY_TX_LARGE_AMP_POST_EMP_LVL	PHY_OFF(0x58)
-#define UFS_PHY_TX_SMALL_AMP_POST_EMP_LVL	PHY_OFF(0x60)
-#define UFS_PHY_CFG_CHANGE_CNT_VAL		PHY_OFF(0x64)
-#define UFS_PHY_RX_SYNC_WAIT_TIME		PHY_OFF(0x6C)
-#define UFS_PHY_TX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY	PHY_OFF(0xB4)
-#define UFS_PHY_RX_MIN_SLEEP_NOCONFIG_TIME_CAPABILITY	PHY_OFF(0xE0)
-#define UFS_PHY_TX_MIN_STALL_NOCONFIG_TIME_CAPABILITY	PHY_OFF(0xB8)
-#define UFS_PHY_RX_MIN_STALL_NOCONFIG_TIME_CAPABILITY	PHY_OFF(0xE4)
-#define UFS_PHY_TX_MIN_SAVE_CONFIG_TIME_CAPABILITY	PHY_OFF(0xBC)
-#define UFS_PHY_RX_MIN_SAVE_CONFIG_TIME_CAPABILITY	PHY_OFF(0xE8)
-#define UFS_PHY_RX_PWM_BURST_CLOSURE_LENGTH_CAPABILITY	PHY_OFF(0xFC)
-#define UFS_PHY_RX_MIN_ACTIVATETIME_CAPABILITY		PHY_OFF(0x100)
-#define UFS_PHY_RX_SIGDET_CTRL3				PHY_OFF(0x14c)
-#define UFS_PHY_RMMI_ATTR_CTRL			PHY_OFF(0x160)
-#define UFS_PHY_RMMI_RX_CFGUPDT_L1	(1 << 7)
-#define UFS_PHY_RMMI_TX_CFGUPDT_L1	(1 << 6)
-#define UFS_PHY_RMMI_CFGWR_L1		(1 << 5)
-#define UFS_PHY_RMMI_CFGRD_L1		(1 << 4)
-#define UFS_PHY_RMMI_RX_CFGUPDT_L0	(1 << 3)
-#define UFS_PHY_RMMI_TX_CFGUPDT_L0	(1 << 2)
-#define UFS_PHY_RMMI_CFGWR_L0		(1 << 1)
-#define UFS_PHY_RMMI_CFGRD_L0		(1 << 0)
-#define UFS_PHY_RMMI_ATTRID			PHY_OFF(0x164)
-#define UFS_PHY_RMMI_ATTRWRVAL			PHY_OFF(0x168)
-#define UFS_PHY_RMMI_ATTRRDVAL_L0_STATUS	PHY_OFF(0x16C)
-#define UFS_PHY_RMMI_ATTRRDVAL_L1_STATUS	PHY_OFF(0x170)
-#define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x174)
-
-#define UFS_PHY_TX_LANE_ENABLE_MASK		0x3
-
-/*
- * This structure represents the 20nm specific phy.
- * common_cfg MUST remain the first field in this structure
- * in case extra fields are added. This way, when calling
- * get_ufs_qcom_phy() of generic phy, we can extract the
- * common phy structure (struct ufs_qcom_phy) out of it
- * regardless of the relevant specific phy.
- */
-struct ufs_qcom_phy_qmp_20nm {
-	struct ufs_qcom_phy common_cfg;
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_2_0[] = {
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x3f),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x1b),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x0f),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(0), 0x2F),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(0), 0x20),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_EMP_POST1_LVL(1), 0x2F),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_DRV_LVL(1), 0x20),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_A_1_3_0[] = {
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
-	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL3, 0x0D),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0xe1),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0xcc),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL_TXBAND, 0x08),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x10),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x40),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x90),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL2, 0x03),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(0), 0xf2),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0c),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL1(1), 0xf2),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0c),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(0), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(0), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(0), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(0), 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_LSB(1), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN1_MSB(1), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_LSB(1), 0xff),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQ_GAIN2_MSB(1), 0x00),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x2b),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x38),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x3c),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_UP_OFFSET, 0x02),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_DN_OFFSET, 0x02),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x01),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CNTRL, 0x40),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(0), 0x68),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX_LANE_MODE(1), 0x68),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(1), 0xdc),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_EQU_ADAPTOR_CNTRL2(0), 0xdc),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3),
-};
-
-static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x98),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0x65),
-	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x1e),
-};
-
-#endif
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.c b/drivers/phy/phy-qcom-ufs-qmp-v3.c
new file mode 100644
index 0000000..6b8dbc2
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qmp-v3.h"
+
+#define UFS_PHY_NAME "ufs_phy_qmp_v3"
+
+static
+int ufs_qcom_phy_qmp_v3_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool is_rate_B)
+{
+	int err;
+	int tbl_size_A, tbl_size_B;
+	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+
+	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+	tbl_B = phy_cal_table_rate_B;
+
+	tbl_A = phy_cal_table_rate_A;
+	tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
+
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     tbl_B, tbl_size_B,
+				     is_rate_B);
+
+	if (err)
+		dev_err(ufs_qcom_phy->dev,
+			"%s: ufs_qcom_phy_calibrate() failed %d\n",
+			__func__, err);
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_init(struct phy *generic_phy)
+{
+	struct ufs_qcom_phy_qmp_v3 *phy = phy_get_drvdata(generic_phy);
+	struct ufs_qcom_phy *phy_common = &phy->common_cfg;
+	int err;
+
+	err = ufs_qcom_phy_init_clks(generic_phy, phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_clks() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+	err = ufs_qcom_phy_init_vregulators(generic_phy, phy_common);
+	if (err) {
+		dev_err(phy_common->dev, "%s: ufs_qcom_phy_init_vregulators() failed %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static
+void ufs_qcom_phy_qmp_v3_power_control(struct ufs_qcom_phy *phy,
+					 bool power_ctrl)
+{
+	if (!power_ctrl) {
+		/* apply analog power collapse */
+		writel_relaxed(0x0, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+		/*
+		 * Make sure that PHY knows its analog rail is going to be
+		 * powered OFF.
+		 */
+		mb();
+	} else {
+		/* bring PHY out of analog power collapse */
+		writel_relaxed(0x1, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+		/*
+		 * Before any transactions involving PHY, ensure PHY knows
+		 * that it's analog rail is powered ON.
+		 */
+		mb();
+	}
+}
+
+static inline
+void ufs_qcom_phy_qmp_v3_set_tx_lane_enable(struct ufs_qcom_phy *phy, u32 val)
+{
+	/*
+	 * v3 PHY does not have TX_LANE_ENABLE register.
+	 * Implement this function so as not to propagate error to caller.
+	 */
+}
+
+static
+void ufs_qcom_phy_qmp_v3_ctrl_rx_linecfg(struct ufs_qcom_phy *phy, bool ctrl)
+{
+	u32 temp;
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_LINECFG_DISABLE);
+
+	if (ctrl) /* enable RX LineCfg */
+		temp &= ~UFS_PHY_RX_LINECFG_DISABLE_BIT;
+	else /* disable RX LineCfg */
+		temp |= UFS_PHY_RX_LINECFG_DISABLE_BIT;
+
+	writel_relaxed(temp, phy->mmio + UFS_PHY_LINECFG_DISABLE);
+	/* make sure that RX LineCfg config applied before we return */
+	mb();
+}
+
+static inline void ufs_qcom_phy_qmp_v3_start_serdes(struct ufs_qcom_phy *phy)
+{
+	u32 tmp;
+
+	tmp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+	tmp &= ~MASK_SERDES_START;
+	tmp |= (1 << OFFSET_SERDES_START);
+	writel_relaxed(tmp, phy->mmio + UFS_PHY_PHY_START);
+	/* Ensure register value is committed */
+	mb();
+}
+
+static int ufs_qcom_phy_qmp_v3_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+{
+	int err = 0;
+	u32 val;
+
+	err = readl_poll_timeout(phy_common->mmio + UFS_PHY_PCS_READY_STATUS,
+		val, (val & MASK_PCS_READY), 10, 1000000);
+	if (err) {
+		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+			__func__, err);
+		goto out;
+	}
+
+out:
+	return err;
+}
+
+static
+int ufs_qcom_phy_qmp_v3_configure_lpm(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool enable)
+{
+	int err = 0;
+	int tbl_size;
+	struct ufs_qcom_phy_calibration *tbl = NULL;
+
+	/* The default low power mode configuration is SVS2 */
+	if (enable) {
+		tbl_size = ARRAY_SIZE(phy_cal_table_svs2_enable);
+		tbl = phy_cal_table_svs2_enable;
+	} else {
+		tbl_size = ARRAY_SIZE(phy_cal_table_svs2_disable);
+		tbl = phy_cal_table_svs2_disable;
+	}
+
+	if (!tbl) {
+		dev_err(ufs_qcom_phy->dev, "%s: tbl for SVS2 %s is NULL",
+			__func__, enable ? "enable" : "disable");
+		err = -EINVAL;
+		goto out;
+	}
+
+	ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl, tbl_size);
+
+	/* flush buffered writes */
+	mb();
+
+out:
+	return err;
+}
+
+struct phy_ops ufs_qcom_phy_qmp_v3_phy_ops = {
+	.init		= ufs_qcom_phy_qmp_v3_init,
+	.exit		= ufs_qcom_phy_exit,
+	.power_on	= ufs_qcom_phy_power_on,
+	.power_off	= ufs_qcom_phy_power_off,
+	.owner		= THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_v3_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qmp_v3_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qmp_v3_start_serdes,
+	.is_physical_coding_sublayer_ready = ufs_qcom_phy_qmp_v3_is_pcs_ready,
+	.set_tx_lane_enable	= ufs_qcom_phy_qmp_v3_set_tx_lane_enable,
+	.ctrl_rx_linecfg	= ufs_qcom_phy_qmp_v3_ctrl_rx_linecfg,
+	.power_control		= ufs_qcom_phy_qmp_v3_power_control,
+	.configure_lpm		= ufs_qcom_phy_qmp_v3_configure_lpm,
+};
+
+static int ufs_qcom_phy_qmp_v3_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy;
+	struct ufs_qcom_phy_qmp_v3 *phy;
+	int err = 0;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+				&ufs_qcom_phy_qmp_v3_phy_ops, &phy_v3_ops);
+
+	if (!generic_phy) {
+		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+			__func__);
+		err = -EIO;
+		goto out;
+	}
+
+	phy_set_drvdata(generic_phy, phy);
+
+	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+		sizeof(phy->common_cfg.name));
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qmp_v3_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy = to_phy(dev);
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int err = 0;
+
+	err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+	if (err)
+		dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+			__func__, err);
+
+	return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qmp_v3_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qmp-v3"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qmp_v3_of_match);
+
+static struct platform_driver ufs_qcom_phy_qmp_v3_driver = {
+	.probe = ufs_qcom_phy_qmp_v3_probe,
+	.remove = ufs_qcom_phy_qmp_v3_remove,
+	.driver = {
+		.of_match_table = ufs_qcom_phy_qmp_v3_of_match,
+		.name = "ufs_qcom_phy_qmp_v3",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_platform_driver(ufs_qcom_phy_qmp_v3_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QMP v3");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qmp-v3.h b/drivers/phy/phy-qcom-ufs-qmp-v3.h
new file mode 100644
index 0000000..e9ac76b
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qmp-v3.h
@@ -0,0 +1,275 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QMP_V3_H_
+#define UFS_QCOM_PHY_QMP_V3_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_OFF(x)	(0x000 + x)
+#define PHY_OFF(x)	(0xC00 + x)
+#define TX_OFF(n, x)	(0x400 + (0x400 * n) + x)
+#define RX_OFF(n, x)	(0x600 + (0x400 * n) + x)
+
+/* UFS PHY QSERDES COM registers */
+#define QSERDES_COM_ATB_SEL1			COM_OFF(0x00)
+#define QSERDES_COM_ATB_SEL2			COM_OFF(0x04)
+#define QSERDES_COM_FREQ_UPDATE			COM_OFF(0x08)
+#define QSERDES_COM_BG_TIMER			COM_OFF(0x0C)
+#define QSERDES_COM_SSC_EN_CENTER		COM_OFF(0x10)
+#define QSERDES_COM_SSC_ADJ_PER1		COM_OFF(0x14)
+#define QSERDES_COM_SSC_ADJ_PER2		COM_OFF(0x18)
+#define QSERDES_COM_SSC_PER1			COM_OFF(0x1C)
+#define QSERDES_COM_SSC_PER2			COM_OFF(0x20)
+#define QSERDES_COM_SSC_STEP_SIZE1		COM_OFF(0x24)
+#define QSERDES_COM_SSC_STEP_SIZE2		COM_OFF(0x28)
+#define QSERDES_COM_POST_DIV			COM_OFF(0x2C)
+#define QSERDES_COM_POST_DIV_MUX		COM_OFF(0x30)
+#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x34)
+#define QSERDES_COM_CLK_ENABLE1			COM_OFF(0x38)
+#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x3C)
+#define QSERDES_COM_SYSCLK_BUF_ENABLE		COM_OFF(0x40)
+#define QSERDES_COM_PLL_EN			COM_OFF(0x44)
+#define QSERDES_COM_PLL_IVCO			COM_OFF(0x48)
+#define QSERDES_COM_CMN_IETRIM			COM_OFF(0x4C)
+#define QSERDES_COM_CMN_IPTRIM			COM_OFF(0x50)
+#define QSERDES_COM_EP_CLOCK_DETECT_CTR		COM_OFF(0x54)
+#define QSERDES_COM_SYSCLK_DET_COMP_STATUS	COM_OFF(0x58)
+#define QSERDES_COM_CLK_EP_DIV			COM_OFF(0x5C)
+#define QSERDES_COM_CP_CTRL_MODE0		COM_OFF(0x60)
+#define QSERDES_COM_CP_CTRL_MODE1		COM_OFF(0x64)
+#define QSERDES_COM_PLL_RCTRL_MODE0		COM_OFF(0x68)
+#define QSERDES_COM_PLL_RCTRL_MODE1		COM_OFF(0x6C)
+#define QSERDES_COM_PLL_CCTRL_MODE0		COM_OFF(0x70)
+#define QSERDES_COM_PLL_CCTRL_MODE1		COM_OFF(0x74)
+#define QSERDES_COM_PLL_CNTRL			COM_OFF(0x78)
+#define SERDES_COM_BIAS_EN_CTRL_BY_PSM		COM_OFF(0x7C)
+#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0x80)
+#define QSERDES_COM_CML_SYSCLK_SEL		COM_OFF(0x84)
+#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0x88)
+#define QSERDES_COM_RESETSM_CNTRL2		COM_OFF(0x8C)
+#define QSERDES_COM_LOCK_CMP_EN			COM_OFF(0x90)
+#define QSERDES_COM_LOCK_CMP_CFG		COM_OFF(0x94)
+#define QSERDES_COM_LOCK_CMP1_MODE0		COM_OFF(0x98)
+#define QSERDES_COM_LOCK_CMP2_MODE0		COM_OFF(0x9C)
+#define QSERDES_COM_LOCK_CMP3_MODE0		COM_OFF(0xA0)
+#define QSERDES_COM_LOCK_CMP1_MODE1		COM_OFF(0xA4)
+#define QSERDES_COM_LOCK_CMP2_MODE1		COM_OFF(0xA8)
+#define QSERDES_COM_LOCK_CMP3_MODE1		COM_OFF(0xAC)
+#define QSERDES_COM_DEC_START_MODE0		COM_OFF(0xB0)
+#define QSERDES_COM_DEC_START_MODE1		COM_OFF(0xB4)
+#define QSERDES_COM_DIV_FRAC_START1_MODE0	COM_OFF(0xB8)
+#define QSERDES_COM_DIV_FRAC_START2_MODE0	COM_OFF(0xBC)
+#define QSERDES_COM_DIV_FRAC_START3_MODE0	COM_OFF(0xC0)
+#define QSERDES_COM_DIV_FRAC_START1_MODE1	COM_OFF(0xC4)
+#define QSERDES_COM_DIV_FRAC_START2_MODE1	COM_OFF(0xC8)
+#define QSERDES_COM_DIV_FRAC_START3_MODE1	COM_OFF(0xCC)
+#define QSERDES_COM_INTEGLOOP_INITVAL		COM_OFF(0xD0)
+#define QSERDES_COM_INTEGLOOP_EN		COM_OFF(0xD4)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE0	COM_OFF(0xD8)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE0	COM_OFF(0xDC)
+#define QSERDES_COM_INTEGLOOP_GAIN0_MODE1	COM_OFF(0xE0)
+#define QSERDES_COM_INTEGLOOP_GAIN1_MODE1	COM_OFF(0xE4)
+#define QSERDES_COM_VCOCAL_DEADMAN_CTRL		COM_OFF(0xE8)
+#define QSERDES_COM_VCO_TUNE_CTRL		COM_OFF(0xEC)
+#define QSERDES_COM_VCO_TUNE_MAP		COM_OFF(0xF0)
+#define QSERDES_COM_VCO_TUNE1_MODE0		COM_OFF(0xF4)
+#define QSERDES_COM_VCO_TUNE2_MODE0		COM_OFF(0xF8)
+#define QSERDES_COM_VCO_TUNE1_MODE1		COM_OFF(0xFC)
+#define QSERDES_COM_VCO_TUNE2_MODE1		COM_OFF(0x100)
+#define QSERDES_COM_VCO_TUNE_INITVAL1		COM_OFF(0x104)
+#define QSERDES_COM_VCO_TUNE_INITVAL2		COM_OFF(0x108)
+#define QSERDES_COM_VCO_TUNE_MINVAL1		COM_OFF(0x10C)
+#define QSERDES_COM_VCO_TUNE_MINVAL2		COM_OFF(0x110)
+#define QSERDES_COM_VCO_TUNE_MAXVAL1		COM_OFF(0x114)
+#define QSERDES_COM_VCO_TUNE_MAXVAL2		COM_OFF(0x118)
+#define QSERDES_COM_VCO_TUNE_TIMER1		COM_OFF(0x11C)
+#define QSERDES_COM_VCO_TUNE_TIMER2		COM_OFF(0x120)
+#define QSERDES_COM_CMN_STATUS			COM_OFF(0x124)
+#define QSERDES_COM_RESET_SM_STATUS		COM_OFF(0x128)
+#define QSERDES_COM_RESTRIM_CODE_STATUS		COM_OFF(0x12C)
+#define QSERDES_COM_PLLCAL_CODE1_STATUS		COM_OFF(0x130)
+#define QSERDES_COM_PLLCAL_CODE2_STATUS		COM_OFF(0x134)
+#define QSERDES_COM_CLK_SELECT			COM_OFF(0x138)
+#define QSERDES_COM_HSCLK_SEL			COM_OFF(0x13C)
+#define QSERDES_COM_INTEGLOOP_BINCODE_STATUS	COM_OFF(0x140)
+#define QSERDES_COM_PLL_ANALOG			COM_OFF(0x144)
+#define QSERDES_COM_CORECLK_DIV_MODE0		COM_OFF(0x148)
+#define QSERDES_COM_CORECLK_DIV_MODE1		COM_OFF(0x14C)
+#define QSERDES_COM_SW_RESET			COM_OFF(0x150)
+#define QSERDES_COM_CORE_CLK_EN			COM_OFF(0x154)
+#define QSERDES_COM_C_READY_STATUS		COM_OFF(0x158)
+#define QSERDES_COM_CMN_CONFIG			COM_OFF(0x15C)
+#define QSERDES_COM_CMN_RATE_OVERRIDE		COM_OFF(0x160)
+#define QSERDES_COM_SVS_MODE_CLK_SEL		COM_OFF(0x164)
+#define QSERDES_COM_DEBUG_BUS0			COM_OFF(0x168)
+#define QSERDES_COM_DEBUG_BUS1			COM_OFF(0x16C)
+#define QSERDES_COM_DEBUG_BUS2			COM_OFF(0x170)
+#define QSERDES_COM_DEBUG_BUS3			COM_OFF(0x174)
+#define QSERDES_COM_DEBUG_BUS_SEL		COM_OFF(0x178)
+#define QSERDES_COM_CMN_MISC1			COM_OFF(0x17C)
+#define QSERDES_COM_CMN_MISC2			COM_OFF(0x180)
+#define QSERDES_COM_CMN_MODE			COM_OFF(0x184)
+#define QSERDES_COM_CMN_VREG_SEL		COM_OFF(0x188)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START			PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB	PHY_OFF(0x08)
+#define UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB	PHY_OFF(0x0C)
+#define UFS_PHY_TX_LARGE_AMP_DRV_LVL		PHY_OFF(0x2C)
+#define UFS_PHY_TX_SMALL_AMP_DRV_LVL		PHY_OFF(0x34)
+#define UFS_PHY_LINECFG_DISABLE			PHY_OFF(0x130)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x134)
+#define UFS_PHY_RX_SIGDET_CTRL2			PHY_OFF(0x140)
+#define UFS_PHY_RX_PWM_GEAR_BAND		PHY_OFF(0x14C)
+#define UFS_PHY_PCS_READY_STATUS		PHY_OFF(0x160)
+
+/* UFS PHY TX registers */
+#define QSERDES_TX0_TRANSCEIVER_BIAS_EN		TX_OFF(0, 0x5C)
+#define QSERDES_TX0_LANE_MODE_1			TX_OFF(0, 0x8C)
+#define QSERDES_TX0_LANE_MODE_2			TX_OFF(0, 0x90)
+#define QSERDES_TX0_LANE_MODE_3			TX_OFF(0, 0x94)
+
+/* UFS PHY RX registers */
+#define QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF		RX_OFF(0, 0x24)
+#define QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER		RX_OFF(0, 0x28)
+#define QSERDES_RX0_UCDR_SVS_SO_GAIN			RX_OFF(0, 0x2C)
+#define QSERDES_RX0_UCDR_FASTLOCK_FO_GAIN		RX_OFF(0, 0x30)
+#define QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE	RX_OFF(0, 0x34)
+#define QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW		RX_OFF(0, 0x3C)
+#define QSERDES_RX0_UCDR_PI_CONTROLS			RX_OFF(0, 0x44)
+#define QSERDES_RX0_RX_TERM_BW				RX_OFF(0, 0x7C)
+#define QSERDES_RX0_RX_EQ_GAIN2_LSB			RX_OFF(0, 0xC8)
+#define QSERDES_RX0_RX_EQ_GAIN2_MSB			RX_OFF(0, 0xCC)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL1		RX_OFF(0, 0xD0)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2		RX_OFF(0, 0xD4)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3		RX_OFF(0, 0xD8)
+#define QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4		RX_OFF(0, 0xDC)
+#define QSERDES_RX0_SIGDET_CNTRL			RX_OFF(0, 0x104)
+#define QSERDES_RX0_SIGDET_LVL				RX_OFF(0, 0x108)
+#define QSERDES_RX0_SIGDET_DEGLITCH_CNTRL		RX_OFF(0, 0x10C)
+#define QSERDES_RX0_RX_INTERFACE_MODE			RX_OFF(0, 0x11C)
+
+#define UFS_PHY_RX_LINECFG_DISABLE_BIT		BIT(1)
+
+/*
+ * This structure represents the v3 specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qmp_v3 {
+	struct ufs_qcom_phy common_cfg;
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CMN_CONFIG, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0xD5),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CLK_SELECT, 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BG_TIMER, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_HSCLK_SEL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP_EN, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_CTRL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORE_CLK_EN, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IVCO, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_INITVAL2, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE0, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE0, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE0, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE0, 0x34),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE0, 0xCB),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE0, 0x01),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START_MODE1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CP_CTRL_MODE1, 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RCTRL_MODE1, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CCTRL_MODE1, 0x34),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE1_MODE1, 0xB2),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE2_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_TX0_LANE_MODE_1, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_LVL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_CNTRL, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_SIGDET_DEGLITCH_CNTRL, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_INTERFACE_MODE, 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_FO_GAIN, 0x0B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_TERM_BW, 0x5B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL2, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL3, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_RX_EQU_ADAPTOR_CNTRL4, 0x1D),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_HALF, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN_QUARTER, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SVS_SO_GAIN, 0x04),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_SO_SATURATION_AND_ENABLE, 0x4B),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_PI_CONTROLS, 0xF1),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX0_UCDR_FASTLOCK_COUNT_LOW, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SIGDET_CTRL2, 0x6C),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_LARGE_AMP_DRV_LVL, 0x0A),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TX_SMALL_AMP_DRV_LVL, 0x02),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_VCO_TUNE_MAP, 0x44),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_svs2_enable[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE0, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x14),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x7e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0x7f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x7e),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x99),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x07),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x0b),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0x66),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_svs2_disable[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE0, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_CORECLK_DIV_MODE1, 0x0a),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SVS_MODE_CLK_SEL, 0x05),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE0, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE0, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE0, 0xff),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE0, 0x0c),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN0_MODE1, 0x3f),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_INTEGLOOP_GAIN1_MODE1, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP1_MODE1, 0x32),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_LOCK_CMP2_MODE1, 0x0f),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0xcc),
+};
+
+#endif
diff --git a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.c b/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.c
new file mode 100644
index 0000000..61f1232
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "phy-qcom-ufs-qrbtc-msmskunk.h"
+
+#define UFS_PHY_NAME "ufs_phy_qrbtc_msmskunk"
+
+static
+int ufs_qcom_phy_qrbtc_msmskunk_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
+					bool is_rate_B)
+{
+	int err;
+	int tbl_size_A, tbl_size_B;
+	struct ufs_qcom_phy_calibration *tbl_A, *tbl_B;
+
+	tbl_A = phy_cal_table_rate_A;
+	tbl_size_A = ARRAY_SIZE(phy_cal_table_rate_A);
+
+	tbl_size_B = ARRAY_SIZE(phy_cal_table_rate_B);
+	tbl_B = phy_cal_table_rate_B;
+
+	err = ufs_qcom_phy_calibrate(ufs_qcom_phy,
+				     tbl_A, tbl_size_A,
+				     tbl_B, tbl_size_B,
+				     is_rate_B);
+
+	if (err)
+		dev_err(ufs_qcom_phy->dev,
+			"%s: ufs_qcom_phy_calibrate() failed %d\n",
+			__func__, err);
+
+	return err;
+}
+
+static int
+ufs_qcom_phy_qrbtc_msmskunk_is_pcs_ready(struct ufs_qcom_phy *phy_common)
+{
+	int err = 0;
+	u32 val;
+
+	/*
+	 * The value we are polling for is 0x3D which represents the
+	 * following masks:
+	 * RESET_SM field: 0x5
+	 * RESTRIMDONE bit: BIT(3)
+	 * PLLLOCK bit: BIT(4)
+	 * READY bit: BIT(5)
+	 */
+	#define QSERDES_COM_RESET_SM_REG_POLL_VAL	0x3D
+	err = readl_poll_timeout(phy_common->mmio + QSERDES_COM_RESET_SM,
+		val, (val == QSERDES_COM_RESET_SM_REG_POLL_VAL), 10, 1000000);
+
+	if (err)
+		dev_err(phy_common->dev, "%s: poll for pcs failed err = %d\n",
+			__func__, err);
+
+	return err;
+}
+
+static void ufs_qcom_phy_qrbtc_msmskunk_start_serdes(struct ufs_qcom_phy *phy)
+{
+	u32 temp;
+
+	writel_relaxed(0x01, phy->mmio + UFS_PHY_POWER_DOWN_CONTROL);
+
+	temp = readl_relaxed(phy->mmio + UFS_PHY_PHY_START);
+	temp |= 0x1;
+	writel_relaxed(temp, phy->mmio + UFS_PHY_PHY_START);
+
+	/* Ensure register value is committed */
+	mb();
+}
+
+static int ufs_qcom_phy_qrbtc_msmskunk_init(struct phy *generic_phy)
+{
+	return 0;
+}
+
+struct phy_ops ufs_qcom_phy_qrbtc_msmskunk_phy_ops = {
+	.init		= ufs_qcom_phy_qrbtc_msmskunk_init,
+	.exit		= ufs_qcom_phy_exit,
+	.owner		= THIS_MODULE,
+};
+
+struct ufs_qcom_phy_specific_ops phy_qrbtc_msmskunk_ops = {
+	.calibrate_phy		= ufs_qcom_phy_qrbtc_msmskunk_phy_calibrate,
+	.start_serdes		= ufs_qcom_phy_qrbtc_msmskunk_start_serdes,
+	.is_physical_coding_sublayer_ready =
+				ufs_qcom_phy_qrbtc_msmskunk_is_pcs_ready,
+};
+
+static int ufs_qcom_phy_qrbtc_msmskunk_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy;
+	struct ufs_qcom_phy_qrbtc_msmskunk *phy;
+	int err = 0;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	generic_phy = ufs_qcom_phy_generic_probe(pdev, &phy->common_cfg,
+		&ufs_qcom_phy_qrbtc_msmskunk_phy_ops, &phy_qrbtc_msmskunk_ops);
+
+	if (!generic_phy) {
+		dev_err(dev, "%s: ufs_qcom_phy_generic_probe() failed\n",
+			__func__);
+		err = -EIO;
+		goto out;
+	}
+
+	phy_set_drvdata(generic_phy, phy);
+
+	strlcpy(phy->common_cfg.name, UFS_PHY_NAME,
+		sizeof(phy->common_cfg.name));
+
+out:
+	return err;
+}
+
+static int ufs_qcom_phy_qrbtc_msmskunk_remove(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct phy *generic_phy = to_phy(dev);
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int err = 0;
+
+	err = ufs_qcom_phy_remove(generic_phy, ufs_qcom_phy);
+	if (err)
+		dev_err(dev, "%s: ufs_qcom_phy_remove failed = %d\n",
+			__func__, err);
+
+	return err;
+}
+
+static const struct of_device_id ufs_qcom_phy_qrbtc_msmskunk_of_match[] = {
+	{.compatible = "qcom,ufs-phy-qrbtc-msmskunk"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ufs_qcom_phy_qrbtc_msmskunk_of_match);
+
+static struct platform_driver ufs_qcom_phy_qrbtc_msmskunk_driver = {
+	.probe = ufs_qcom_phy_qrbtc_msmskunk_probe,
+	.remove = ufs_qcom_phy_qrbtc_msmskunk_remove,
+	.driver = {
+		.of_match_table = ufs_qcom_phy_qrbtc_msmskunk_of_match,
+		.name = "ufs_qcom_phy_qrbtc_msmskunk",
+		.owner = THIS_MODULE,
+	},
+};
+
+module_platform_driver(ufs_qcom_phy_qrbtc_msmskunk_driver);
+
+MODULE_DESCRIPTION("Universal Flash Storage (UFS) QCOM PHY QRBTC MSMSKUNK");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.h b/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.h
new file mode 100644
index 0000000..66a17f89
--- /dev/null
+++ b/drivers/phy/phy-qcom-ufs-qrbtc-msmskunk.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef UFS_QCOM_PHY_QRBTC_MSMSKUNK_H_
+#define UFS_QCOM_PHY_QRBTC_MSMSKUNK_H_
+
+#include "phy-qcom-ufs-i.h"
+
+/* QCOM UFS PHY control registers */
+#define COM_OFF(x)	(0x000 + x)
+#define TX_OFF(n, x)	(0x400 + (0x400 * n) + x)
+#define RX_OFF(n, x)	(0x600 + (0x400 * n) + x)
+#define PHY_OFF(x)	(0xC00 + x)
+#define PHY_USR(x)	(x)
+
+/* UFS PHY PLL block registers */
+#define QSERDES_COM_SYS_CLK_CTRL		COM_OFF(0x00)
+#define QSERDES_COM_PLL_VCOTAIL_EN		COM_OFF(0x04)
+#define QSERDES_COM_PLL_CNTRL			COM_OFF(0x14)
+#define QSERDES_COM_PLL_IP_SETI			COM_OFF(0x18)
+#define	QSERDES_COM_BIAS_EN_CLKBUFLR_EN		COM_OFF(0x20)
+#define QSERDES_COM_PLL_CP_SETI			COM_OFF(0x24)
+#define QSERDES_COM_PLL_IP_SETP			COM_OFF(0x28)
+#define QSERDES_COM_PLL_CP_SETP			COM_OFF(0x2C)
+#define QSERDES_COM_SYSCLK_EN_SEL		COM_OFF(0x38)
+#define QSERDES_COM_RES_CODE_TXBAND		COM_OFF(0x3C)
+#define QSERDES_COM_RESETSM_CNTRL		COM_OFF(0x40)
+#define QSERDES_COM_PLLLOCK_CMP1		COM_OFF(0x44)
+#define QSERDES_COM_PLLLOCK_CMP2		COM_OFF(0x48)
+#define QSERDES_COM_PLLLOCK_CMP3		COM_OFF(0x4C)
+#define QSERDES_COM_PLLLOCK_CMP_EN		COM_OFF(0x50)
+#define QSERDES_COM_DEC_START1			COM_OFF(0x64)
+#define QSERDES_COM_DIV_FRAC_START1		COM_OFF(0x98)
+#define QSERDES_COM_DIV_FRAC_START2		COM_OFF(0x9C)
+#define QSERDES_COM_DIV_FRAC_START3		COM_OFF(0xA0)
+#define QSERDES_COM_DEC_START2			COM_OFF(0xA4)
+#define QSERDES_COM_PLL_RXTXEPCLK_EN		COM_OFF(0xA8)
+#define QSERDES_COM_PLL_CRCTRL			COM_OFF(0xAC)
+#define QSERDES_COM_PLL_CLKEPDIV		COM_OFF(0xB0)
+#define QSERDES_COM_RESET_SM			COM_OFF(0xBC)
+
+/* RX LANE n (0, 1) registers */
+#define QSERDES_RX_CDR_CONTROL(n)		RX_OFF(n, 0x0)
+#define QSERDES_RX_RX_IQ_RXDET_EN(n)		RX_OFF(n, 0x28)
+#define QSERDES_RX_SIGDET_CNTRL(n)		RX_OFF(n, 0x34)
+#define QSERDES_RX_RX_BAND(n)			RX_OFF(n, 0x38)
+#define QSERDES_RX_CDR_CONTROL_HALF(n)		RX_OFF(n, 0x98)
+#define QSERDES_RX_CDR_CONTROL_QUARTER(n)	RX_OFF(n, 0x9C)
+#define QSERDES_RX_PWM_CNTRL1(n)		RX_OFF(n, 0x80)
+#define QSERDES_RX_PWM_CNTRL2(n)		RX_OFF(n, 0x84)
+#define QSERDES_RX_PWM_NDIV(n)			RX_OFF(n, 0x88)
+#define QSERDES_RX_SIGDET_CNTRL2(n)		RX_OFF(n, 0x8C)
+#define QSERDES_RX_UFS_CNTRL(n)			RX_OFF(n, 0x90)
+
+/* UFS PHY registers */
+#define UFS_PHY_PHY_START			PHY_OFF(0x00)
+#define UFS_PHY_POWER_DOWN_CONTROL		PHY_OFF(0x04)
+#define UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB	PHY_OFF(0x08)
+#define UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB	PHY_OFF(0x0C)
+#define UFS_PHY_RX_SYM_RESYNC_CTRL		PHY_OFF(0x134)
+#define UFS_PHY_MULTI_LANE_CTRL1		PHY_OFF(0x1C4)
+
+/* QRBTC V2 USER REGISTERS */
+#define U11_UFS_RESET_REG_OFFSET		PHY_USR(0x4)
+#define U11_QRBTC_CONTROL_OFFSET		PHY_USR(0x18)
+#define U11_QRBTC_TX_CLK_CTRL			PHY_USR(0x20)
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_A[] = {
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_PHY_START, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_POWER_DOWN_CONTROL, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_RX_SYM_RESYNC_CTRL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_MSB, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_TIMER_20US_CORECLK_STEPS_LSB, 0x00),
+
+	/* QSERDES Common */
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x3F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYSCLK_EN_SEL, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_SYS_CLK_CTRL, 0x16),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RES_CODE_TXBAND, 0xC0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_VCOTAIL_EN, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CNTRL, 0x24),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CLKEPDIV, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_RESETSM_CNTRL, 0x10),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_RXTXEPCLK_EN, 0x13),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CRCTRL, 0x43),
+
+	/* QSERDES RX0 */
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_PWM_CNTRL1(0), 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_PWM_CNTRL2(0), 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_PWM_NDIV(0), 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL(0), 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(0), 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(0), 0x12),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL(0), 0xC0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL2(0), 0x07),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_BAND(0), 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UFS_CNTRL(0), 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_IQ_RXDET_EN(0), 0xF3),
+
+	/* QSERDES RX1 */
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_PWM_CNTRL1(1), 0x08),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_PWM_CNTRL2(1), 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_PWM_NDIV(1), 0x30),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL(1), 0x40),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_HALF(1), 0x0C),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_CDR_CONTROL_QUARTER(1), 0x12),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL(1), 0xC0),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_SIGDET_CNTRL2(1), 0x07),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_BAND(1), 0x06),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_UFS_CNTRL(1), 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_RX_RX_IQ_RXDET_EN(1), 0xF3),
+
+	/* QSERDES PLL Settings - Series A */
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x82),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x10),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0xFF),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x19),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x07),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x07),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x01),
+
+	UFS_QCOM_PHY_CAL_ENTRY(UFS_PHY_MULTI_LANE_CTRL1, 0x02),
+};
+
+static struct ufs_qcom_phy_calibration phy_cal_table_rate_B[] = {
+	/* QSERDES PLL Settings - Series B */
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START1, 0x98),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DEC_START2, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START1, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START2, 0x80),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_DIV_FRAC_START3, 0x10),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP1, 0x65),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP2, 0x1E),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP3, 0x00),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLLLOCK_CMP_EN, 0x03),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETI, 0x07),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETI, 0x0F),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_IP_SETP, 0x07),
+	UFS_QCOM_PHY_CAL_ENTRY(QSERDES_COM_PLL_CP_SETP, 0x01),
+};
+
+
+/*
+ * This structure represents the qrbtc-msmskunk specific phy.
+ * common_cfg MUST remain the first field in this structure
+ * in case extra fields are added. This way, when calling
+ * get_ufs_qcom_phy() of generic phy, we can extract the
+ * common phy structure (struct ufs_qcom_phy) out of it
+ * regardless of the relevant specific phy.
+ */
+struct ufs_qcom_phy_qrbtc_msmskunk {
+	struct ufs_qcom_phy common_cfg;
+};
+
+#endif
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 18a5b49..b8b9080 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,7 @@
 #define MAX_PROP_NAME              32
 #define VDDA_PHY_MIN_UV            1000000
 #define VDDA_PHY_MAX_UV            1000000
-#define VDDA_PLL_MIN_UV            1800000
+#define VDDA_PLL_MIN_UV            1200000
 #define VDDA_PLL_MAX_UV            1800000
 #define VDDP_REF_CLK_MIN_UV        1200000
 #define VDDP_REF_CLK_MAX_UV        1200000
@@ -29,13 +29,24 @@
 static int ufs_qcom_phy_base_init(struct platform_device *pdev,
 				  struct ufs_qcom_phy *phy_common);
 
+void ufs_qcom_phy_write_tbl(struct ufs_qcom_phy *ufs_qcom_phy,
+			   struct ufs_qcom_phy_calibration *tbl,
+			   int tbl_size)
+{
+	int i;
+
+	for (i = 0; i < tbl_size; i++)
+		writel_relaxed(tbl[i].cfg_value,
+			       ufs_qcom_phy->mmio + tbl[i].reg_offset);
+}
+EXPORT_SYMBOL(ufs_qcom_phy_write_tbl);
+
 int ufs_qcom_phy_calibrate(struct ufs_qcom_phy *ufs_qcom_phy,
 			   struct ufs_qcom_phy_calibration *tbl_A,
 			   int tbl_size_A,
 			   struct ufs_qcom_phy_calibration *tbl_B,
 			   int tbl_size_B, bool is_rate_B)
 {
-	int i;
 	int ret = 0;
 
 	if (!tbl_A) {
@@ -44,9 +55,7 @@
 		goto out;
 	}
 
-	for (i = 0; i < tbl_size_A; i++)
-		writel_relaxed(tbl_A[i].cfg_value,
-			       ufs_qcom_phy->mmio + tbl_A[i].reg_offset);
+	ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_A, tbl_size_A);
 
 	/*
 	 * In case we would like to work in rate B, we need
@@ -62,9 +71,7 @@
 			goto out;
 		}
 
-		for (i = 0; i < tbl_size_B; i++)
-			writel_relaxed(tbl_B[i].cfg_value,
-				ufs_qcom_phy->mmio + tbl_B[i].reg_offset);
+		ufs_qcom_phy_write_tbl(ufs_qcom_phy, tbl_B, tbl_size_B);
 	}
 
 	/* flush buffered writes */
@@ -135,23 +142,21 @@
 	int err = 0;
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phy_mem");
+	if (!res) {
+		dev_err(dev, "%s: phy_mem resource not found\n", __func__);
+		err = -ENOMEM;
+		goto out;
+	}
+
 	phy_common->mmio = devm_ioremap_resource(dev, res);
 	if (IS_ERR((void const *)phy_common->mmio)) {
 		err = PTR_ERR((void const *)phy_common->mmio);
 		phy_common->mmio = NULL;
 		dev_err(dev, "%s: ioremap for phy_mem resource failed %d\n",
 			__func__, err);
-		return err;
 	}
-
-	/* "dev_ref_clk_ctrl_mem" is optional resource */
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-					   "dev_ref_clk_ctrl_mem");
-	phy_common->dev_ref_clk_ctrl_mmio = devm_ioremap_resource(dev, res);
-	if (IS_ERR((void const *)phy_common->dev_ref_clk_ctrl_mmio))
-		phy_common->dev_ref_clk_ctrl_mmio = NULL;
-
-	return 0;
+out:
+	return err;
 }
 
 static int __ufs_qcom_phy_clk_get(struct phy *phy,
@@ -186,16 +191,27 @@
 		       struct ufs_qcom_phy *phy_common)
 {
 	int err;
+	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
 
 	err = ufs_qcom_phy_clk_get(generic_phy, "tx_iface_clk",
 				   &phy_common->tx_iface_clk);
+	/*
+	 * tx_iface_clk does not exist in newer version of ufs-phy HW,
+	 * so don't return error if it is not found
+	 */
 	if (err)
-		goto out;
+		dev_dbg(phy->dev, "%s: failed to get tx_iface_clk\n",
+			__func__);
 
 	err = ufs_qcom_phy_clk_get(generic_phy, "rx_iface_clk",
 				   &phy_common->rx_iface_clk);
+	/*
+	 * rx_iface_clk does not exist in newer version of ufs-phy HW,
+	 * so don't return error if it is not found
+	 */
 	if (err)
-		goto out;
+		dev_dbg(phy->dev, "%s: failed to get rx_iface_clk\n",
+			__func__);
 
 	err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk_src",
 				   &phy_common->ref_clk_src);
@@ -211,7 +227,15 @@
 
 	err = ufs_qcom_phy_clk_get(generic_phy, "ref_clk",
 				   &phy_common->ref_clk);
+	if (err)
+		goto out;
 
+	/*
+	 * "ref_aux_clk" is optional and only supported by certain
+	 * phy versions, don't abort init if it's not found.
+	 */
+	 __ufs_qcom_phy_clk_get(generic_phy, "ref_aux_clk",
+				   &phy_common->ref_aux_clk, false);
 out:
 	return err;
 }
@@ -222,6 +246,7 @@
 			      struct ufs_qcom_phy *phy_common)
 {
 	int err;
+	int vdda_phy_uV;
 
 	err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_pll,
 		"vdda-pll");
@@ -230,10 +255,13 @@
 
 	err = ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vdda_phy,
 		"vdda-phy");
-
 	if (err)
 		goto out;
 
+	vdda_phy_uV = regulator_get_voltage(phy_common->vdda_phy.reg);
+	phy_common->vdda_phy.max_uV = vdda_phy_uV;
+	phy_common->vdda_phy.min_uV = vdda_phy_uV;
+
 	/* vddp-ref-clk-* properties are optional */
 	__ufs_qcom_phy_init_vreg(generic_phy, &phy_common->vddp_ref_clk,
 				 "vddp-ref-clk", true);
@@ -419,9 +447,26 @@
 		goto out_disable_parent;
 	}
 
+	/*
+	 * "ref_aux_clk" is optional clock and only supported by certain
+	 * phy versions, hence make sure that clk reference is available
+	 * before trying to enable the clock.
+	 */
+	if (phy->ref_aux_clk) {
+		ret = clk_prepare_enable(phy->ref_aux_clk);
+		if (ret) {
+			dev_err(phy->dev, "%s: ref_aux_clk enable failed %d\n",
+					__func__, ret);
+			goto out_disable_ref;
+		}
+	}
+
 	phy->is_ref_clk_enabled = true;
 	goto out;
 
+out_disable_ref:
+	if (phy->ref_clk)
+		clk_disable_unprepare(phy->ref_clk);
 out_disable_parent:
 	if (phy->ref_clk_parent)
 		clk_disable_unprepare(phy->ref_clk_parent);
@@ -462,6 +507,13 @@
 	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
 
 	if (phy->is_ref_clk_enabled) {
+		/*
+		 * "ref_aux_clk" is optional clock and only supported by
+		 * certain phy versions, hence make sure that clk reference
+		 * is available before trying to disable the clock.
+		 */
+		if (phy->ref_aux_clk)
+			clk_disable_unprepare(phy->ref_aux_clk);
 		clk_disable_unprepare(phy->ref_clk);
 		/*
 		 * "ref_clk_parent" is optional clock hence make sure that clk
@@ -475,56 +527,6 @@
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk);
 
-#define UFS_REF_CLK_EN	(1 << 5)
-
-static void ufs_qcom_phy_dev_ref_clk_ctrl(struct phy *generic_phy, bool enable)
-{
-	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
-
-	if (phy->dev_ref_clk_ctrl_mmio &&
-	    (enable ^ phy->is_dev_ref_clk_enabled)) {
-		u32 temp = readl_relaxed(phy->dev_ref_clk_ctrl_mmio);
-
-		if (enable)
-			temp |= UFS_REF_CLK_EN;
-		else
-			temp &= ~UFS_REF_CLK_EN;
-
-		/*
-		 * If we are here to disable this clock immediately after
-		 * entering into hibern8, we need to make sure that device
-		 * ref_clk is active atleast 1us after the hibern8 enter.
-		 */
-		if (!enable)
-			udelay(1);
-
-		writel_relaxed(temp, phy->dev_ref_clk_ctrl_mmio);
-		/* ensure that ref_clk is enabled/disabled before we return */
-		wmb();
-		/*
-		 * If we call hibern8 exit after this, we need to make sure that
-		 * device ref_clk is stable for atleast 1us before the hibern8
-		 * exit command.
-		 */
-		if (enable)
-			udelay(1);
-
-		phy->is_dev_ref_clk_enabled = enable;
-	}
-}
-
-void ufs_qcom_phy_enable_dev_ref_clk(struct phy *generic_phy)
-{
-	ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk);
-
-void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy)
-{
-	ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false);
-}
-EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk);
-
 /* Turn ON M-PHY RMMI interface clocks */
 int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy)
 {
@@ -534,6 +536,9 @@
 	if (phy->is_iface_clk_enabled)
 		goto out;
 
+	if (!phy->tx_iface_clk)
+		goto out;
+
 	ret = clk_prepare_enable(phy->tx_iface_clk);
 	if (ret) {
 		dev_err(phy->dev, "%s: tx_iface_clk enable failed %d\n",
@@ -559,6 +564,9 @@
 {
 	struct ufs_qcom_phy *phy = get_ufs_qcom_phy(generic_phy);
 
+	if (!phy->tx_iface_clk)
+		return;
+
 	if (phy->is_iface_clk_enabled) {
 		clk_disable_unprepare(phy->tx_iface_clk);
 		clk_disable_unprepare(phy->rx_iface_clk);
@@ -589,19 +597,26 @@
 	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
 	int ret = 0;
 
-	if (!ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable) {
-		dev_err(ufs_qcom_phy->dev, "%s: set_tx_lane_enable() callback is not supported\n",
-			__func__);
-		ret = -ENOTSUPP;
-	} else {
+	if (ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable)
 		ufs_qcom_phy->phy_spec_ops->set_tx_lane_enable(ufs_qcom_phy,
 							       tx_lanes);
-	}
 
 	return ret;
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable);
 
+int ufs_qcom_phy_ctrl_rx_linecfg(struct phy *generic_phy, bool ctrl)
+{
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int ret = 0;
+
+	if (ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg)
+		ufs_qcom_phy->phy_spec_ops->ctrl_rx_linecfg(ufs_qcom_phy, ctrl);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ufs_qcom_phy_ctrl_rx_linecfg);
+
 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy,
 					  u8 major, u16 minor, u16 step)
 {
@@ -634,6 +649,14 @@
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy);
 
+const char *ufs_qcom_phy_name(struct phy *phy)
+{
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(phy);
+
+	return ufs_qcom_phy->name;
+}
+EXPORT_SYMBOL(ufs_qcom_phy_name);
+
 int ufs_qcom_phy_remove(struct phy *generic_phy,
 			struct ufs_qcom_phy *ufs_qcom_phy)
 {
@@ -745,3 +768,21 @@
 	return 0;
 }
 EXPORT_SYMBOL_GPL(ufs_qcom_phy_power_off);
+
+int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable)
+{
+	struct ufs_qcom_phy *ufs_qcom_phy = get_ufs_qcom_phy(generic_phy);
+	int ret = 0;
+
+	if (ufs_qcom_phy->phy_spec_ops->configure_lpm) {
+		ret = ufs_qcom_phy->phy_spec_ops->
+				configure_lpm(ufs_qcom_phy, enable);
+		if (ret)
+			dev_err(ufs_qcom_phy->dev,
+				"%s: configure_lpm(%s) failed %d\n",
+				__func__, enable ? "enable" : "disable", ret);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ufs_qcom_phy_configure_lpm);
diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig
index 93ef268..9ddaff9 100644
--- a/drivers/pinctrl/qcom/Kconfig
+++ b/drivers/pinctrl/qcom/Kconfig
@@ -79,6 +79,16 @@
 	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
 	  Qualcomm TLMM block found on the Qualcomm 8916 platform.
 
+config PINCTRL_MSMSKUNK
+	tristate "Qualcomm Technologies Inc MSMSKUNK pin controller driver"
+	depends on GPIOLIB && OF
+	select PINCTRL_MSM
+	help
+	  This is the pinctrl, pinmux, pinconf and gpiolib driver for the
+	  Qualcomm Technologies Inc TLMM block found on the Qualcomm
+	  Technologies Inc MSMSKUNK platform.
+
+
 config PINCTRL_MSM8996
 	tristate "Qualcomm MSM8996 pin controller driver"
 	depends on GPIOLIB && OF
diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile
index 8319e11..6e14ef9 100644
--- a/drivers/pinctrl/qcom/Makefile
+++ b/drivers/pinctrl/qcom/Makefile
@@ -15,3 +15,4 @@
 obj-$(CONFIG_PINCTRL_QCOM_SPMI_PMIC) += pinctrl-spmi-mpp.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-gpio.o
 obj-$(CONFIG_PINCTRL_QCOM_SSBI_PMIC) += pinctrl-ssbi-mpp.o
+obj-$(CONFIG_PINCTRL_MSMSKUNK) += pinctrl-msmskunk.o
diff --git a/drivers/pinctrl/qcom/pinctrl-msmskunk.c b/drivers/pinctrl/qcom/pinctrl-msmskunk.c
new file mode 100644
index 0000000..9d3ec83
--- /dev/null
+++ b/drivers/pinctrl/qcom/pinctrl-msmskunk.c
@@ -0,0 +1,2443 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pinctrl/pinctrl.h>
+
+#include "pinctrl-msm.h"
+
+#define FUNCTION(fname)			                \
+	[msm_mux_##fname] = {		                \
+		.name = #fname,				\
+		.groups = fname##_groups,               \
+		.ngroups = ARRAY_SIZE(fname##_groups),	\
+	}
+
+#define NORTH	0x00500000
+#define SOUTH	0x00900000
+#define REG_SIZE 0x1000
+#define PINGROUP(id, base, f1, f2, f3, f4, f5, f6, f7, f8, f9)	\
+	{					        \
+		.name = "gpio" #id,			\
+		.pins = gpio##id##_pins,		\
+		.npins = (unsigned int)ARRAY_SIZE(gpio##id##_pins),	\
+		.funcs = (int[]){			\
+			msm_mux_gpio, /* gpio mode */	\
+			msm_mux_##f1,			\
+			msm_mux_##f2,			\
+			msm_mux_##f3,			\
+			msm_mux_##f4,			\
+			msm_mux_##f5,			\
+			msm_mux_##f6,			\
+			msm_mux_##f7,			\
+			msm_mux_##f8,			\
+			msm_mux_##f9			\
+		},				        \
+		.nfuncs = 10,				\
+		.ctl_reg = base + REG_SIZE * id,		\
+		.io_reg = base + 0x4 + REG_SIZE * id,		\
+		.intr_cfg_reg = base + 0x8 + REG_SIZE * id,		\
+		.intr_status_reg = base + 0xc + REG_SIZE * id,	\
+		.intr_target_reg = base + 0x8 + REG_SIZE * id,	\
+		.mux_bit = 2,			\
+		.pull_bit = 0,			\
+		.drv_bit = 6,			\
+		.oe_bit = 9,			\
+		.in_bit = 0,			\
+		.out_bit = 1,			\
+		.intr_enable_bit = 0,		\
+		.intr_status_bit = 0,		\
+		.intr_target_bit = 5,		\
+		.intr_target_kpss_val = 3,	\
+		.intr_raw_status_bit = 4,	\
+		.intr_polarity_bit = 1,		\
+		.intr_detection_bit = 2,	\
+		.intr_detection_width = 2,	\
+	}
+
+#define SDC_QDSD_PINGROUP(pg_name, ctl, pull, drv)	\
+	{					        \
+		.name = #pg_name,			\
+		.pins = pg_name##_pins,			\
+		.npins = (unsigned int)ARRAY_SIZE(pg_name##_pins),	\
+		.ctl_reg = ctl,				\
+		.io_reg = 0,				\
+		.intr_cfg_reg = 0,			\
+		.intr_status_reg = 0,			\
+		.intr_target_reg = 0,			\
+		.mux_bit = -1,				\
+		.pull_bit = pull,			\
+		.drv_bit = drv,				\
+		.oe_bit = -1,				\
+		.in_bit = -1,				\
+		.out_bit = -1,				\
+		.intr_enable_bit = -1,			\
+		.intr_status_bit = -1,			\
+		.intr_target_bit = -1,			\
+		.intr_raw_status_bit = -1,		\
+		.intr_polarity_bit = -1,		\
+		.intr_detection_bit = -1,		\
+		.intr_detection_width = -1,		\
+	}
+static const struct pinctrl_pin_desc msmskunk_pins[] = {
+	PINCTRL_PIN(0, "GPIO_0"),
+	PINCTRL_PIN(1, "GPIO_1"),
+	PINCTRL_PIN(2, "GPIO_2"),
+	PINCTRL_PIN(3, "GPIO_3"),
+	PINCTRL_PIN(4, "GPIO_4"),
+	PINCTRL_PIN(5, "GPIO_5"),
+	PINCTRL_PIN(6, "GPIO_6"),
+	PINCTRL_PIN(7, "GPIO_7"),
+	PINCTRL_PIN(8, "GPIO_8"),
+	PINCTRL_PIN(9, "GPIO_9"),
+	PINCTRL_PIN(10, "GPIO_10"),
+	PINCTRL_PIN(11, "GPIO_11"),
+	PINCTRL_PIN(12, "GPIO_12"),
+	PINCTRL_PIN(13, "GPIO_13"),
+	PINCTRL_PIN(14, "GPIO_14"),
+	PINCTRL_PIN(15, "GPIO_15"),
+	PINCTRL_PIN(16, "GPIO_16"),
+	PINCTRL_PIN(17, "GPIO_17"),
+	PINCTRL_PIN(18, "GPIO_18"),
+	PINCTRL_PIN(19, "GPIO_19"),
+	PINCTRL_PIN(20, "GPIO_20"),
+	PINCTRL_PIN(21, "GPIO_21"),
+	PINCTRL_PIN(22, "GPIO_22"),
+	PINCTRL_PIN(23, "GPIO_23"),
+	PINCTRL_PIN(24, "GPIO_24"),
+	PINCTRL_PIN(25, "GPIO_25"),
+	PINCTRL_PIN(26, "GPIO_26"),
+	PINCTRL_PIN(27, "GPIO_27"),
+	PINCTRL_PIN(28, "GPIO_28"),
+	PINCTRL_PIN(29, "GPIO_29"),
+	PINCTRL_PIN(30, "GPIO_30"),
+	PINCTRL_PIN(31, "GPIO_31"),
+	PINCTRL_PIN(32, "GPIO_32"),
+	PINCTRL_PIN(33, "GPIO_33"),
+	PINCTRL_PIN(34, "GPIO_34"),
+	PINCTRL_PIN(35, "GPIO_35"),
+	PINCTRL_PIN(36, "GPIO_36"),
+	PINCTRL_PIN(37, "GPIO_37"),
+	PINCTRL_PIN(38, "GPIO_38"),
+	PINCTRL_PIN(39, "GPIO_39"),
+	PINCTRL_PIN(40, "GPIO_40"),
+	PINCTRL_PIN(41, "GPIO_41"),
+	PINCTRL_PIN(42, "GPIO_42"),
+	PINCTRL_PIN(43, "GPIO_43"),
+	PINCTRL_PIN(44, "GPIO_44"),
+	PINCTRL_PIN(45, "GPIO_45"),
+	PINCTRL_PIN(46, "GPIO_46"),
+	PINCTRL_PIN(47, "GPIO_47"),
+	PINCTRL_PIN(48, "GPIO_48"),
+	PINCTRL_PIN(49, "GPIO_49"),
+	PINCTRL_PIN(50, "GPIO_50"),
+	PINCTRL_PIN(51, "GPIO_51"),
+	PINCTRL_PIN(52, "GPIO_52"),
+	PINCTRL_PIN(53, "GPIO_53"),
+	PINCTRL_PIN(54, "GPIO_54"),
+	PINCTRL_PIN(55, "GPIO_55"),
+	PINCTRL_PIN(56, "GPIO_56"),
+	PINCTRL_PIN(57, "GPIO_57"),
+	PINCTRL_PIN(58, "GPIO_58"),
+	PINCTRL_PIN(59, "GPIO_59"),
+	PINCTRL_PIN(60, "GPIO_60"),
+	PINCTRL_PIN(61, "GPIO_61"),
+	PINCTRL_PIN(62, "GPIO_62"),
+	PINCTRL_PIN(63, "GPIO_63"),
+	PINCTRL_PIN(64, "GPIO_64"),
+	PINCTRL_PIN(65, "GPIO_65"),
+	PINCTRL_PIN(66, "GPIO_66"),
+	PINCTRL_PIN(67, "GPIO_67"),
+	PINCTRL_PIN(68, "GPIO_68"),
+	PINCTRL_PIN(69, "GPIO_69"),
+	PINCTRL_PIN(70, "GPIO_70"),
+	PINCTRL_PIN(71, "GPIO_71"),
+	PINCTRL_PIN(72, "GPIO_72"),
+	PINCTRL_PIN(73, "GPIO_73"),
+	PINCTRL_PIN(74, "GPIO_74"),
+	PINCTRL_PIN(75, "GPIO_75"),
+	PINCTRL_PIN(76, "GPIO_76"),
+	PINCTRL_PIN(77, "GPIO_77"),
+	PINCTRL_PIN(78, "GPIO_78"),
+	PINCTRL_PIN(79, "GPIO_79"),
+	PINCTRL_PIN(80, "GPIO_80"),
+	PINCTRL_PIN(81, "GPIO_81"),
+	PINCTRL_PIN(82, "GPIO_82"),
+	PINCTRL_PIN(83, "GPIO_83"),
+	PINCTRL_PIN(84, "GPIO_84"),
+	PINCTRL_PIN(85, "GPIO_85"),
+	PINCTRL_PIN(86, "GPIO_86"),
+	PINCTRL_PIN(87, "GPIO_87"),
+	PINCTRL_PIN(88, "GPIO_88"),
+	PINCTRL_PIN(89, "GPIO_89"),
+	PINCTRL_PIN(90, "GPIO_90"),
+	PINCTRL_PIN(91, "GPIO_91"),
+	PINCTRL_PIN(92, "GPIO_92"),
+	PINCTRL_PIN(93, "GPIO_93"),
+	PINCTRL_PIN(94, "GPIO_94"),
+	PINCTRL_PIN(95, "GPIO_95"),
+	PINCTRL_PIN(96, "GPIO_96"),
+	PINCTRL_PIN(97, "GPIO_97"),
+	PINCTRL_PIN(98, "GPIO_98"),
+	PINCTRL_PIN(99, "GPIO_99"),
+	PINCTRL_PIN(100, "GPIO_100"),
+	PINCTRL_PIN(101, "GPIO_101"),
+	PINCTRL_PIN(102, "GPIO_102"),
+	PINCTRL_PIN(103, "GPIO_103"),
+	PINCTRL_PIN(104, "GPIO_104"),
+	PINCTRL_PIN(105, "GPIO_105"),
+	PINCTRL_PIN(106, "GPIO_106"),
+	PINCTRL_PIN(107, "GPIO_107"),
+	PINCTRL_PIN(108, "GPIO_108"),
+	PINCTRL_PIN(109, "GPIO_109"),
+	PINCTRL_PIN(110, "GPIO_110"),
+	PINCTRL_PIN(111, "GPIO_111"),
+	PINCTRL_PIN(112, "GPIO_112"),
+	PINCTRL_PIN(113, "GPIO_113"),
+	PINCTRL_PIN(114, "GPIO_114"),
+	PINCTRL_PIN(115, "GPIO_115"),
+	PINCTRL_PIN(116, "GPIO_116"),
+	PINCTRL_PIN(117, "GPIO_117"),
+	PINCTRL_PIN(118, "GPIO_118"),
+	PINCTRL_PIN(119, "GPIO_119"),
+	PINCTRL_PIN(120, "GPIO_120"),
+	PINCTRL_PIN(121, "GPIO_121"),
+	PINCTRL_PIN(122, "GPIO_122"),
+	PINCTRL_PIN(123, "GPIO_123"),
+	PINCTRL_PIN(124, "GPIO_124"),
+	PINCTRL_PIN(125, "GPIO_125"),
+	PINCTRL_PIN(126, "GPIO_126"),
+	PINCTRL_PIN(127, "GPIO_127"),
+	PINCTRL_PIN(128, "GPIO_128"),
+	PINCTRL_PIN(129, "GPIO_129"),
+	PINCTRL_PIN(130, "GPIO_130"),
+	PINCTRL_PIN(131, "GPIO_131"),
+	PINCTRL_PIN(132, "GPIO_132"),
+	PINCTRL_PIN(133, "GPIO_133"),
+	PINCTRL_PIN(134, "GPIO_134"),
+	PINCTRL_PIN(135, "GPIO_135"),
+	PINCTRL_PIN(136, "GPIO_136"),
+	PINCTRL_PIN(137, "GPIO_137"),
+	PINCTRL_PIN(138, "GPIO_138"),
+	PINCTRL_PIN(139, "GPIO_139"),
+	PINCTRL_PIN(140, "GPIO_140"),
+	PINCTRL_PIN(141, "GPIO_141"),
+	PINCTRL_PIN(142, "GPIO_142"),
+	PINCTRL_PIN(143, "GPIO_143"),
+	PINCTRL_PIN(144, "GPIO_144"),
+	PINCTRL_PIN(145, "GPIO_145"),
+	PINCTRL_PIN(146, "GPIO_146"),
+	PINCTRL_PIN(147, "GPIO_147"),
+	PINCTRL_PIN(148, "GPIO_148"),
+	PINCTRL_PIN(149, "GPIO_149"),
+	PINCTRL_PIN(150, "SDC2_CLK"),
+	PINCTRL_PIN(151, "SDC2_CMD"),
+	PINCTRL_PIN(152, "SDC2_DATA"),
+};
+
+#define DECLARE_MSM_GPIO_PINS(pin) \
+	static const unsigned int gpio##pin##_pins[] = { pin }
+DECLARE_MSM_GPIO_PINS(0);
+DECLARE_MSM_GPIO_PINS(1);
+DECLARE_MSM_GPIO_PINS(2);
+DECLARE_MSM_GPIO_PINS(3);
+DECLARE_MSM_GPIO_PINS(4);
+DECLARE_MSM_GPIO_PINS(5);
+DECLARE_MSM_GPIO_PINS(6);
+DECLARE_MSM_GPIO_PINS(7);
+DECLARE_MSM_GPIO_PINS(8);
+DECLARE_MSM_GPIO_PINS(9);
+DECLARE_MSM_GPIO_PINS(10);
+DECLARE_MSM_GPIO_PINS(11);
+DECLARE_MSM_GPIO_PINS(12);
+DECLARE_MSM_GPIO_PINS(13);
+DECLARE_MSM_GPIO_PINS(14);
+DECLARE_MSM_GPIO_PINS(15);
+DECLARE_MSM_GPIO_PINS(16);
+DECLARE_MSM_GPIO_PINS(17);
+DECLARE_MSM_GPIO_PINS(18);
+DECLARE_MSM_GPIO_PINS(19);
+DECLARE_MSM_GPIO_PINS(20);
+DECLARE_MSM_GPIO_PINS(21);
+DECLARE_MSM_GPIO_PINS(22);
+DECLARE_MSM_GPIO_PINS(23);
+DECLARE_MSM_GPIO_PINS(24);
+DECLARE_MSM_GPIO_PINS(25);
+DECLARE_MSM_GPIO_PINS(26);
+DECLARE_MSM_GPIO_PINS(27);
+DECLARE_MSM_GPIO_PINS(28);
+DECLARE_MSM_GPIO_PINS(29);
+DECLARE_MSM_GPIO_PINS(30);
+DECLARE_MSM_GPIO_PINS(31);
+DECLARE_MSM_GPIO_PINS(32);
+DECLARE_MSM_GPIO_PINS(33);
+DECLARE_MSM_GPIO_PINS(34);
+DECLARE_MSM_GPIO_PINS(35);
+DECLARE_MSM_GPIO_PINS(36);
+DECLARE_MSM_GPIO_PINS(37);
+DECLARE_MSM_GPIO_PINS(38);
+DECLARE_MSM_GPIO_PINS(39);
+DECLARE_MSM_GPIO_PINS(40);
+DECLARE_MSM_GPIO_PINS(41);
+DECLARE_MSM_GPIO_PINS(42);
+DECLARE_MSM_GPIO_PINS(43);
+DECLARE_MSM_GPIO_PINS(44);
+DECLARE_MSM_GPIO_PINS(45);
+DECLARE_MSM_GPIO_PINS(46);
+DECLARE_MSM_GPIO_PINS(47);
+DECLARE_MSM_GPIO_PINS(48);
+DECLARE_MSM_GPIO_PINS(49);
+DECLARE_MSM_GPIO_PINS(50);
+DECLARE_MSM_GPIO_PINS(51);
+DECLARE_MSM_GPIO_PINS(52);
+DECLARE_MSM_GPIO_PINS(53);
+DECLARE_MSM_GPIO_PINS(54);
+DECLARE_MSM_GPIO_PINS(55);
+DECLARE_MSM_GPIO_PINS(56);
+DECLARE_MSM_GPIO_PINS(57);
+DECLARE_MSM_GPIO_PINS(58);
+DECLARE_MSM_GPIO_PINS(59);
+DECLARE_MSM_GPIO_PINS(60);
+DECLARE_MSM_GPIO_PINS(61);
+DECLARE_MSM_GPIO_PINS(62);
+DECLARE_MSM_GPIO_PINS(63);
+DECLARE_MSM_GPIO_PINS(64);
+DECLARE_MSM_GPIO_PINS(65);
+DECLARE_MSM_GPIO_PINS(66);
+DECLARE_MSM_GPIO_PINS(67);
+DECLARE_MSM_GPIO_PINS(68);
+DECLARE_MSM_GPIO_PINS(69);
+DECLARE_MSM_GPIO_PINS(70);
+DECLARE_MSM_GPIO_PINS(71);
+DECLARE_MSM_GPIO_PINS(72);
+DECLARE_MSM_GPIO_PINS(73);
+DECLARE_MSM_GPIO_PINS(74);
+DECLARE_MSM_GPIO_PINS(75);
+DECLARE_MSM_GPIO_PINS(76);
+DECLARE_MSM_GPIO_PINS(77);
+DECLARE_MSM_GPIO_PINS(78);
+DECLARE_MSM_GPIO_PINS(79);
+DECLARE_MSM_GPIO_PINS(80);
+DECLARE_MSM_GPIO_PINS(81);
+DECLARE_MSM_GPIO_PINS(82);
+DECLARE_MSM_GPIO_PINS(83);
+DECLARE_MSM_GPIO_PINS(84);
+DECLARE_MSM_GPIO_PINS(85);
+DECLARE_MSM_GPIO_PINS(86);
+DECLARE_MSM_GPIO_PINS(87);
+DECLARE_MSM_GPIO_PINS(88);
+DECLARE_MSM_GPIO_PINS(89);
+DECLARE_MSM_GPIO_PINS(90);
+DECLARE_MSM_GPIO_PINS(91);
+DECLARE_MSM_GPIO_PINS(92);
+DECLARE_MSM_GPIO_PINS(93);
+DECLARE_MSM_GPIO_PINS(94);
+DECLARE_MSM_GPIO_PINS(95);
+DECLARE_MSM_GPIO_PINS(96);
+DECLARE_MSM_GPIO_PINS(97);
+DECLARE_MSM_GPIO_PINS(98);
+DECLARE_MSM_GPIO_PINS(99);
+DECLARE_MSM_GPIO_PINS(100);
+DECLARE_MSM_GPIO_PINS(101);
+DECLARE_MSM_GPIO_PINS(102);
+DECLARE_MSM_GPIO_PINS(103);
+DECLARE_MSM_GPIO_PINS(104);
+DECLARE_MSM_GPIO_PINS(105);
+DECLARE_MSM_GPIO_PINS(106);
+DECLARE_MSM_GPIO_PINS(107);
+DECLARE_MSM_GPIO_PINS(108);
+DECLARE_MSM_GPIO_PINS(109);
+DECLARE_MSM_GPIO_PINS(110);
+DECLARE_MSM_GPIO_PINS(111);
+DECLARE_MSM_GPIO_PINS(112);
+DECLARE_MSM_GPIO_PINS(113);
+DECLARE_MSM_GPIO_PINS(114);
+DECLARE_MSM_GPIO_PINS(115);
+DECLARE_MSM_GPIO_PINS(116);
+DECLARE_MSM_GPIO_PINS(117);
+DECLARE_MSM_GPIO_PINS(118);
+DECLARE_MSM_GPIO_PINS(119);
+DECLARE_MSM_GPIO_PINS(120);
+DECLARE_MSM_GPIO_PINS(121);
+DECLARE_MSM_GPIO_PINS(122);
+DECLARE_MSM_GPIO_PINS(123);
+DECLARE_MSM_GPIO_PINS(124);
+DECLARE_MSM_GPIO_PINS(125);
+DECLARE_MSM_GPIO_PINS(126);
+DECLARE_MSM_GPIO_PINS(127);
+DECLARE_MSM_GPIO_PINS(128);
+DECLARE_MSM_GPIO_PINS(129);
+DECLARE_MSM_GPIO_PINS(130);
+DECLARE_MSM_GPIO_PINS(131);
+DECLARE_MSM_GPIO_PINS(132);
+DECLARE_MSM_GPIO_PINS(133);
+DECLARE_MSM_GPIO_PINS(134);
+DECLARE_MSM_GPIO_PINS(135);
+DECLARE_MSM_GPIO_PINS(136);
+DECLARE_MSM_GPIO_PINS(137);
+DECLARE_MSM_GPIO_PINS(138);
+DECLARE_MSM_GPIO_PINS(139);
+DECLARE_MSM_GPIO_PINS(140);
+DECLARE_MSM_GPIO_PINS(141);
+DECLARE_MSM_GPIO_PINS(142);
+DECLARE_MSM_GPIO_PINS(143);
+DECLARE_MSM_GPIO_PINS(144);
+DECLARE_MSM_GPIO_PINS(145);
+DECLARE_MSM_GPIO_PINS(146);
+DECLARE_MSM_GPIO_PINS(147);
+DECLARE_MSM_GPIO_PINS(148);
+DECLARE_MSM_GPIO_PINS(149);
+
+static const unsigned int sdc2_clk_pins[] = { 150 };
+static const unsigned int sdc2_cmd_pins[] = { 151 };
+static const unsigned int sdc2_data_pins[] = { 152 };
+
+enum msmskunk_functions {
+	msm_mux_gpio,
+	msm_mux_qup0,
+	msm_mux_reserved0,
+	msm_mux_reserved1,
+	msm_mux_reserved2,
+	msm_mux_reserved3,
+	msm_mux_qup9,
+	msm_mux_qdss_cti,
+	msm_mux_reserved4,
+	msm_mux_reserved5,
+	msm_mux_ddr_pxi0,
+	msm_mux_reserved6,
+	msm_mux_ddr_bist,
+	msm_mux_atest_tsens2,
+	msm_mux_vsense_trigger,
+	msm_mux_atest_usb1,
+	msm_mux_reserved7,
+	msm_mux_qup_l4,
+	msm_mux_wlan1_adc1,
+	msm_mux_atest_usb13,
+	msm_mux_ddr_pxi1,
+	msm_mux_reserved8,
+	msm_mux_qup_l5,
+	msm_mux_wlan1_adc0,
+	msm_mux_atest_usb12,
+	msm_mux_reserved9,
+	msm_mux_mdp_vsync,
+	msm_mux_qup_l6,
+	msm_mux_wlan2_adc1,
+	msm_mux_atest_usb11,
+	msm_mux_ddr_pxi2,
+	msm_mux_reserved10,
+	msm_mux_edp_lcd,
+	msm_mux_dbg_out,
+	msm_mux_wlan2_adc0,
+	msm_mux_atest_usb10,
+	msm_mux_reserved11,
+	msm_mux_m_voc,
+	msm_mux_tsif1_sync,
+	msm_mux_ddr_pxi3,
+	msm_mux_reserved12,
+	msm_mux_cam_mclk,
+	msm_mux_pll_bypassnl,
+	msm_mux_qdss_gpio0,
+	msm_mux_reserved13,
+	msm_mux_pll_reset,
+	msm_mux_qdss_gpio1,
+	msm_mux_reserved14,
+	msm_mux_qdss_gpio2,
+	msm_mux_reserved15,
+	msm_mux_qdss_gpio3,
+	msm_mux_reserved16,
+	msm_mux_cci_i2c,
+	msm_mux_qup1,
+	msm_mux_qdss_gpio4,
+	msm_mux_reserved17,
+	msm_mux_qdss_gpio5,
+	msm_mux_reserved18,
+	msm_mux_qdss_gpio6,
+	msm_mux_reserved19,
+	msm_mux_qdss_gpio7,
+	msm_mux_reserved20,
+	msm_mux_cci_timer0,
+	msm_mux_gcc_gp2,
+	msm_mux_qdss_gpio8,
+	msm_mux_reserved21,
+	msm_mux_cci_timer1,
+	msm_mux_gcc_gp3,
+	msm_mux_qdss_gpio,
+	msm_mux_reserved22,
+	msm_mux_cci_timer2,
+	msm_mux_qdss_gpio9,
+	msm_mux_reserved23,
+	msm_mux_cci_timer3,
+	msm_mux_cci_async,
+	msm_mux_qdss_gpio10,
+	msm_mux_reserved24,
+	msm_mux_cci_timer4,
+	msm_mux_qdss_gpio11,
+	msm_mux_reserved25,
+	msm_mux_qdss_gpio12,
+	msm_mux_reserved26,
+	msm_mux_qup2,
+	msm_mux_qdss_gpio13,
+	msm_mux_reserved27,
+	msm_mux_qdss_gpio14,
+	msm_mux_reserved28,
+	msm_mux_phase_flag1,
+	msm_mux_qdss_gpio15,
+	msm_mux_reserved29,
+	msm_mux_phase_flag2,
+	msm_mux_reserved30,
+	msm_mux_qup11,
+	msm_mux_qup14,
+	msm_mux_reserved31,
+	msm_mux_phase_flag3,
+	msm_mux_reserved96,
+	msm_mux_ldo_en,
+	msm_mux_reserved97,
+	msm_mux_ldo_update,
+	msm_mux_reserved98,
+	msm_mux_phase_flag14,
+	msm_mux_reserved99,
+	msm_mux_phase_flag15,
+	msm_mux_reserved100,
+	msm_mux_reserved101,
+	msm_mux_pci_e1,
+	msm_mux_prng_rosc,
+	msm_mux_reserved102,
+	msm_mux_phase_flag5,
+	msm_mux_reserved103,
+	msm_mux_reserved104,
+	msm_mux_pcie1_forceon,
+	msm_mux_uim2_data,
+	msm_mux_qup13,
+	msm_mux_reserved105,
+	msm_mux_pcie1_pwren,
+	msm_mux_uim2_clk,
+	msm_mux_reserved106,
+	msm_mux_pcie1_auxen,
+	msm_mux_uim2_reset,
+	msm_mux_reserved107,
+	msm_mux_pcie1_button,
+	msm_mux_uim2_present,
+	msm_mux_reserved108,
+	msm_mux_uim1_data,
+	msm_mux_reserved109,
+	msm_mux_uim1_clk,
+	msm_mux_reserved110,
+	msm_mux_uim1_reset,
+	msm_mux_reserved111,
+	msm_mux_uim1_present,
+	msm_mux_reserved112,
+	msm_mux_pcie1_prsnt2,
+	msm_mux_uim_batt,
+	msm_mux_edp_hot,
+	msm_mux_reserved113,
+	msm_mux_nav_pps,
+	msm_mux_reserved114,
+	msm_mux_reserved115,
+	msm_mux_reserved116,
+	msm_mux_atest_char,
+	msm_mux_reserved117,
+	msm_mux_adsp_ext,
+	msm_mux_atest_char3,
+	msm_mux_reserved118,
+	msm_mux_atest_char2,
+	msm_mux_reserved119,
+	msm_mux_atest_char1,
+	msm_mux_reserved120,
+	msm_mux_atest_char0,
+	msm_mux_reserved121,
+	msm_mux_reserved122,
+	msm_mux_reserved123,
+	msm_mux_reserved124,
+	msm_mux_reserved125,
+	msm_mux_sd_card,
+	msm_mux_reserved126,
+	msm_mux_reserved127,
+	msm_mux_reserved128,
+	msm_mux_reserved129,
+	msm_mux_qlink_request,
+	msm_mux_reserved130,
+	msm_mux_qlink_enable,
+	msm_mux_reserved131,
+	msm_mux_reserved132,
+	msm_mux_reserved133,
+	msm_mux_reserved134,
+	msm_mux_pa_indicator,
+	msm_mux_reserved135,
+	msm_mux_reserved136,
+	msm_mux_phase_flag26,
+	msm_mux_reserved137,
+	msm_mux_phase_flag27,
+	msm_mux_reserved138,
+	msm_mux_phase_flag28,
+	msm_mux_reserved139,
+	msm_mux_phase_flag6,
+	msm_mux_reserved140,
+	msm_mux_phase_flag29,
+	msm_mux_reserved141,
+	msm_mux_phase_flag30,
+	msm_mux_reserved142,
+	msm_mux_phase_flag31,
+	msm_mux_reserved143,
+	msm_mux_mss_lte,
+	msm_mux_reserved144,
+	msm_mux_reserved145,
+	msm_mux_reserved146,
+	msm_mux_reserved147,
+	msm_mux_reserved148,
+	msm_mux_reserved149,
+	msm_mux_reserved32,
+	msm_mux_reserved33,
+	msm_mux_reserved34,
+	msm_mux_pci_e0,
+	msm_mux_jitter_bist,
+	msm_mux_reserved35,
+	msm_mux_pll_bist,
+	msm_mux_atest_tsens,
+	msm_mux_reserved36,
+	msm_mux_agera_pll,
+	msm_mux_reserved37,
+	msm_mux_usb_phy,
+	msm_mux_reserved38,
+	msm_mux_lpass_slimbus,
+	msm_mux_reserved39,
+	msm_mux_sd_write,
+	msm_mux_tsif1_error,
+	msm_mux_reserved40,
+	msm_mux_qup3,
+	msm_mux_reserved41,
+	msm_mux_reserved42,
+	msm_mux_reserved43,
+	msm_mux_reserved44,
+	msm_mux_bt_reset,
+	msm_mux_qup6,
+	msm_mux_reserved45,
+	msm_mux_reserved46,
+	msm_mux_reserved47,
+	msm_mux_reserved48,
+	msm_mux_qup12,
+	msm_mux_reserved49,
+	msm_mux_reserved50,
+	msm_mux_reserved51,
+	msm_mux_phase_flag16,
+	msm_mux_reserved52,
+	msm_mux_qup10,
+	msm_mux_phase_flag11,
+	msm_mux_reserved53,
+	msm_mux_phase_flag12,
+	msm_mux_reserved54,
+	msm_mux_phase_flag13,
+	msm_mux_reserved55,
+	msm_mux_phase_flag17,
+	msm_mux_reserved56,
+	msm_mux_qua_mi2s,
+	msm_mux_gcc_gp1,
+	msm_mux_phase_flag18,
+	msm_mux_reserved57,
+	msm_mux_ssc_irq,
+	msm_mux_phase_flag19,
+	msm_mux_reserved58,
+	msm_mux_phase_flag20,
+	msm_mux_reserved59,
+	msm_mux_cri_trng0,
+	msm_mux_phase_flag21,
+	msm_mux_reserved60,
+	msm_mux_cri_trng1,
+	msm_mux_phase_flag22,
+	msm_mux_reserved61,
+	msm_mux_cri_trng,
+	msm_mux_phase_flag23,
+	msm_mux_reserved62,
+	msm_mux_phase_flag24,
+	msm_mux_reserved63,
+	msm_mux_pri_mi2s,
+	msm_mux_sp_cmu,
+	msm_mux_phase_flag25,
+	msm_mux_reserved64,
+	msm_mux_qup8,
+	msm_mux_reserved65,
+	msm_mux_pri_mi2s_ws,
+	msm_mux_reserved66,
+	msm_mux_reserved67,
+	msm_mux_reserved68,
+	msm_mux_spkr_i2s,
+	msm_mux_audio_ref,
+	msm_mux_reserved69,
+	msm_mux_reserved70,
+	msm_mux_tsense_pwm1,
+	msm_mux_tsense_pwm2,
+	msm_mux_reserved71,
+	msm_mux_reserved72,
+	msm_mux_btfm_slimbus,
+	msm_mux_atest_usb2,
+	msm_mux_reserved73,
+	msm_mux_ter_mi2s,
+	msm_mux_phase_flag7,
+	msm_mux_atest_usb23,
+	msm_mux_reserved74,
+	msm_mux_phase_flag8,
+	msm_mux_atest_usb22,
+	msm_mux_reserved75,
+	msm_mux_phase_flag9,
+	msm_mux_atest_usb21,
+	msm_mux_reserved76,
+	msm_mux_phase_flag4,
+	msm_mux_atest_usb20,
+	msm_mux_reserved77,
+	msm_mux_reserved78,
+	msm_mux_sec_mi2s,
+	msm_mux_reserved79,
+	msm_mux_reserved80,
+	msm_mux_qup15,
+	msm_mux_reserved81,
+	msm_mux_reserved82,
+	msm_mux_reserved83,
+	msm_mux_reserved84,
+	msm_mux_pcie1_pwrfault,
+	msm_mux_qup5,
+	msm_mux_reserved85,
+	msm_mux_pcie1_mrl,
+	msm_mux_reserved86,
+	msm_mux_reserved87,
+	msm_mux_reserved88,
+	msm_mux_tsif1_clk,
+	msm_mux_qup4,
+	msm_mux_tgu_ch3,
+	msm_mux_phase_flag10,
+	msm_mux_reserved89,
+	msm_mux_tsif1_en,
+	msm_mux_mdp_vsync0,
+	msm_mux_mdp_vsync1,
+	msm_mux_mdp_vsync2,
+	msm_mux_mdp_vsync3,
+	msm_mux_tgu_ch0,
+	msm_mux_phase_flag0,
+	msm_mux_reserved90,
+	msm_mux_tsif1_data,
+	msm_mux_sdc4_cmd,
+	msm_mux_tgu_ch1,
+	msm_mux_reserved91,
+	msm_mux_tsif2_error,
+	msm_mux_sdc43,
+	msm_mux_vfr_1,
+	msm_mux_tgu_ch2,
+	msm_mux_reserved92,
+	msm_mux_tsif2_clk,
+	msm_mux_sdc4_clk,
+	msm_mux_qup7,
+	msm_mux_reserved93,
+	msm_mux_tsif2_en,
+	msm_mux_sdc42,
+	msm_mux_reserved94,
+	msm_mux_tsif2_data,
+	msm_mux_sdc41,
+	msm_mux_reserved95,
+	msm_mux_tsif2_sync,
+	msm_mux_sdc40,
+	msm_mux_NA,
+};
+
+static const char * const gpio_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3", "gpio4", "gpio5", "gpio6", "gpio7",
+	"gpio8", "gpio9", "gpio10", "gpio11", "gpio12", "gpio13", "gpio14",
+	"gpio15", "gpio16", "gpio17", "gpio18", "gpio19", "gpio20", "gpio21",
+	"gpio22", "gpio23", "gpio24", "gpio25", "gpio26", "gpio27", "gpio28",
+	"gpio29", "gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35",
+	"gpio36", "gpio38", "gpio39", "gpio40", "gpio41", "gpio42", "gpio43",
+	"gpio44", "gpio46", "gpio47", "gpio48", "gpio49", "gpio50", "gpio51",
+	"gpio52", "gpio53", "gpio54", "gpio55", "gpio56", "gpio57", "gpio64",
+	"gpio65", "gpio66", "gpio67", "gpio68", "gpio69", "gpio70", "gpio71",
+	"gpio72", "gpio73", "gpio74", "gpio75", "gpio76", "gpio77", "gpio81",
+	"gpio82", "gpio83", "gpio84", "gpio87", "gpio88", "gpio89", "gpio90",
+	"gpio91", "gpio92", "gpio93", "gpio94", "gpio95", "gpio96", "gpio97",
+	"gpio98", "gpio99", "gpio100", "gpio101", "gpio102", "gpio103",
+	"gpio109", "gpio110", "gpio111", "gpio112", "gpio114", "gpio115",
+	"gpio116", "gpio127", "gpio128", "gpio129", "gpio130", "gpio131",
+	"gpio132", "gpio133", "gpio134", "gpio135", "gpio136", "gpio137",
+	"gpio138", "gpio139", "gpio140", "gpio141", "gpio142", "gpio143",
+	"gpio144", "gpio145", "gpio146", "gpio147", "gpio148", "gpio149",
+};
+static const char * const qup0_groups[] = {
+	"gpio0", "gpio1", "gpio2", "gpio3",
+};
+static const char * const reserved0_groups[] = {
+	"gpio0",
+};
+static const char * const reserved1_groups[] = {
+	"gpio1",
+};
+static const char * const reserved2_groups[] = {
+	"gpio2",
+};
+static const char * const reserved3_groups[] = {
+	"gpio3",
+};
+static const char * const qup9_groups[] = {
+	"gpio4", "gpio5", "gpio6", "gpio7",
+};
+static const char * const qdss_cti_groups[] = {
+	"gpio4", "gpio5", "gpio51", "gpio52", "gpio62", "gpio63", "gpio90",
+	"gpio91",
+};
+static const char * const reserved4_groups[] = {
+	"gpio4",
+};
+static const char * const reserved5_groups[] = {
+	"gpio5",
+};
+static const char * const ddr_pxi0_groups[] = {
+	"gpio6", "gpio7",
+};
+static const char * const reserved6_groups[] = {
+	"gpio6",
+};
+static const char * const ddr_bist_groups[] = {
+	"gpio7", "gpio8", "gpio9", "gpio10",
+};
+static const char * const atest_tsens2_groups[] = {
+	"gpio7",
+};
+static const char * const vsense_trigger_groups[] = {
+	"gpio7",
+};
+static const char * const atest_usb1_groups[] = {
+	"gpio7",
+};
+static const char * const reserved7_groups[] = {
+	"gpio7",
+};
+static const char * const qup_l4_groups[] = {
+	"gpio8", "gpio35", "gpio105", "gpio123",
+};
+static const char * const wlan1_adc1_groups[] = {
+	"gpio8",
+};
+static const char * const atest_usb13_groups[] = {
+	"gpio8",
+};
+static const char * const ddr_pxi1_groups[] = {
+	"gpio8", "gpio9",
+};
+static const char * const reserved8_groups[] = {
+	"gpio8",
+};
+static const char * const qup_l5_groups[] = {
+	"gpio9", "gpio36", "gpio106", "gpio124",
+};
+static const char * const wlan1_adc0_groups[] = {
+	"gpio9",
+};
+static const char * const atest_usb12_groups[] = {
+	"gpio9",
+};
+static const char * const reserved9_groups[] = {
+	"gpio9",
+};
+static const char * const mdp_vsync_groups[] = {
+	"gpio10", "gpio11", "gpio12", "gpio97", "gpio98",
+};
+static const char * const qup_l6_groups[] = {
+	"gpio10", "gpio37", "gpio107", "gpio125",
+};
+static const char * const wlan2_adc1_groups[] = {
+	"gpio10",
+};
+static const char * const atest_usb11_groups[] = {
+	"gpio10",
+};
+static const char * const ddr_pxi2_groups[] = {
+	"gpio10", "gpio11",
+};
+static const char * const reserved10_groups[] = {
+	"gpio10",
+};
+static const char * const edp_lcd_groups[] = {
+	"gpio11",
+};
+static const char * const dbg_out_groups[] = {
+	"gpio11",
+};
+static const char * const wlan2_adc0_groups[] = {
+	"gpio11",
+};
+static const char * const atest_usb10_groups[] = {
+	"gpio11",
+};
+static const char * const reserved11_groups[] = {
+	"gpio11",
+};
+static const char * const m_voc_groups[] = {
+	"gpio12",
+};
+static const char * const tsif1_sync_groups[] = {
+	"gpio12",
+};
+static const char * const ddr_pxi3_groups[] = {
+	"gpio12", "gpio13",
+};
+static const char * const reserved12_groups[] = {
+	"gpio12",
+};
+static const char * const cam_mclk_groups[] = {
+	"gpio13", "gpio14", "gpio15", "gpio16",
+};
+static const char * const pll_bypassnl_groups[] = {
+	"gpio13",
+};
+static const char * const qdss_gpio0_groups[] = {
+	"gpio13", "gpio117",
+};
+static const char * const reserved13_groups[] = {
+	"gpio13",
+};
+static const char * const pll_reset_groups[] = {
+	"gpio14",
+};
+static const char * const qdss_gpio1_groups[] = {
+	"gpio14", "gpio118",
+};
+static const char * const reserved14_groups[] = {
+	"gpio14",
+};
+static const char * const qdss_gpio2_groups[] = {
+	"gpio15", "gpio119",
+};
+static const char * const reserved15_groups[] = {
+	"gpio15",
+};
+static const char * const qdss_gpio3_groups[] = {
+	"gpio16", "gpio120",
+};
+static const char * const reserved16_groups[] = {
+	"gpio16",
+};
+static const char * const cci_i2c_groups[] = {
+	"gpio17", "gpio18", "gpio19", "gpio20",
+};
+static const char * const qup1_groups[] = {
+	"gpio17", "gpio18", "gpio19", "gpio20",
+};
+static const char * const qdss_gpio4_groups[] = {
+	"gpio17", "gpio121",
+};
+static const char * const reserved17_groups[] = {
+	"gpio17",
+};
+static const char * const qdss_gpio5_groups[] = {
+	"gpio18", "gpio122",
+};
+static const char * const reserved18_groups[] = {
+	"gpio18",
+};
+static const char * const qdss_gpio6_groups[] = {
+	"gpio19", "gpio41",
+};
+static const char * const reserved19_groups[] = {
+	"gpio19",
+};
+static const char * const qdss_gpio7_groups[] = {
+	"gpio20", "gpio42",
+};
+static const char * const reserved20_groups[] = {
+	"gpio20",
+};
+static const char * const cci_timer0_groups[] = {
+	"gpio21",
+};
+static const char * const gcc_gp2_groups[] = {
+	"gpio21", "gpio58",
+};
+static const char * const qdss_gpio8_groups[] = {
+	"gpio21", "gpio75",
+};
+static const char * const reserved21_groups[] = {
+	"gpio21",
+};
+static const char * const cci_timer1_groups[] = {
+	"gpio22",
+};
+static const char * const gcc_gp3_groups[] = {
+	"gpio22", "gpio59",
+};
+static const char * const qdss_gpio_groups[] = {
+	"gpio22", "gpio30", "gpio123", "gpio124",
+};
+static const char * const reserved22_groups[] = {
+	"gpio22",
+};
+static const char * const cci_timer2_groups[] = {
+	"gpio23",
+};
+static const char * const qdss_gpio9_groups[] = {
+	"gpio23", "gpio76",
+};
+static const char * const reserved23_groups[] = {
+	"gpio23",
+};
+static const char * const cci_timer3_groups[] = {
+	"gpio24",
+};
+static const char * const cci_async_groups[] = {
+	"gpio24", "gpio25", "gpio26",
+};
+static const char * const qdss_gpio10_groups[] = {
+	"gpio24", "gpio77",
+};
+static const char * const reserved24_groups[] = {
+	"gpio24",
+};
+static const char * const cci_timer4_groups[] = {
+	"gpio25",
+};
+static const char * const qdss_gpio11_groups[] = {
+	"gpio25", "gpio79",
+};
+static const char * const reserved25_groups[] = {
+	"gpio25",
+};
+static const char * const qdss_gpio12_groups[] = {
+	"gpio26", "gpio80",
+};
+static const char * const reserved26_groups[] = {
+	"gpio26",
+};
+static const char * const qup2_groups[] = {
+	"gpio27", "gpio28", "gpio29", "gpio30",
+};
+static const char * const qdss_gpio13_groups[] = {
+	"gpio27", "gpio93",
+};
+static const char * const reserved27_groups[] = {
+	"gpio27",
+};
+static const char * const qdss_gpio14_groups[] = {
+	"gpio28", "gpio43",
+};
+static const char * const reserved28_groups[] = {
+	"gpio28",
+};
+static const char * const phase_flag1_groups[] = {
+	"gpio29",
+};
+static const char * const qdss_gpio15_groups[] = {
+	"gpio29", "gpio44",
+};
+static const char * const reserved29_groups[] = {
+	"gpio29",
+};
+static const char * const phase_flag2_groups[] = {
+	"gpio30",
+};
+static const char * const reserved30_groups[] = {
+	"gpio30",
+};
+static const char * const qup11_groups[] = {
+	"gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const qup14_groups[] = {
+	"gpio31", "gpio32", "gpio33", "gpio34",
+};
+static const char * const reserved31_groups[] = {
+	"gpio31",
+};
+static const char * const phase_flag3_groups[] = {
+	"gpio96",
+};
+static const char * const reserved96_groups[] = {
+	"gpio96",
+};
+static const char * const ldo_en_groups[] = {
+	"gpio97",
+};
+static const char * const reserved97_groups[] = {
+	"gpio97",
+};
+static const char * const ldo_update_groups[] = {
+	"gpio98",
+};
+static const char * const reserved98_groups[] = {
+	"gpio98",
+};
+static const char * const phase_flag14_groups[] = {
+	"gpio99",
+};
+static const char * const reserved99_groups[] = {
+	"gpio99",
+};
+static const char * const phase_flag15_groups[] = {
+	"gpio100",
+};
+static const char * const reserved100_groups[] = {
+	"gpio100",
+};
+static const char * const reserved101_groups[] = {
+	"gpio101",
+};
+static const char * const pci_e1_groups[] = {
+	"gpio102", "gpio103", "gpio104",
+};
+static const char * const prng_rosc_groups[] = {
+	"gpio102",
+};
+static const char * const reserved102_groups[] = {
+	"gpio102",
+};
+static const char * const phase_flag5_groups[] = {
+	"gpio103",
+};
+static const char * const reserved103_groups[] = {
+	"gpio103",
+};
+static const char * const reserved104_groups[] = {
+	"gpio104",
+};
+static const char * const pcie1_forceon_groups[] = {
+	"gpio105",
+};
+static const char * const uim2_data_groups[] = {
+	"gpio105",
+};
+static const char * const qup13_groups[] = {
+	"gpio105", "gpio106", "gpio107", "gpio108",
+};
+static const char * const reserved105_groups[] = {
+	"gpio105",
+};
+static const char * const pcie1_pwren_groups[] = {
+	"gpio106",
+};
+static const char * const uim2_clk_groups[] = {
+	"gpio106",
+};
+static const char * const reserved106_groups[] = {
+	"gpio106",
+};
+static const char * const pcie1_auxen_groups[] = {
+	"gpio107",
+};
+static const char * const uim2_reset_groups[] = {
+	"gpio107",
+};
+static const char * const reserved107_groups[] = {
+	"gpio107",
+};
+static const char * const pcie1_button_groups[] = {
+	"gpio108",
+};
+static const char * const uim2_present_groups[] = {
+	"gpio108",
+};
+static const char * const reserved108_groups[] = {
+	"gpio108",
+};
+static const char * const uim1_data_groups[] = {
+	"gpio109",
+};
+static const char * const reserved109_groups[] = {
+	"gpio109",
+};
+static const char * const uim1_clk_groups[] = {
+	"gpio110",
+};
+static const char * const reserved110_groups[] = {
+	"gpio110",
+};
+static const char * const uim1_reset_groups[] = {
+	"gpio111",
+};
+static const char * const reserved111_groups[] = {
+	"gpio111",
+};
+static const char * const uim1_present_groups[] = {
+	"gpio112",
+};
+static const char * const reserved112_groups[] = {
+	"gpio112",
+};
+static const char * const pcie1_prsnt2_groups[] = {
+	"gpio113",
+};
+static const char * const uim_batt_groups[] = {
+	"gpio113",
+};
+static const char * const edp_hot_groups[] = {
+	"gpio113",
+};
+static const char * const reserved113_groups[] = {
+	"gpio113",
+};
+static const char * const nav_pps_groups[] = {
+	"gpio114", "gpio114", "gpio115", "gpio115", "gpio128", "gpio128",
+	"gpio129", "gpio129", "gpio143", "gpio143",
+};
+static const char * const reserved114_groups[] = {
+	"gpio114",
+};
+static const char * const reserved115_groups[] = {
+	"gpio115",
+};
+static const char * const reserved116_groups[] = {
+	"gpio116",
+};
+static const char * const atest_char_groups[] = {
+	"gpio117",
+};
+static const char * const reserved117_groups[] = {
+	"gpio117",
+};
+static const char * const adsp_ext_groups[] = {
+	"gpio118",
+};
+static const char * const atest_char3_groups[] = {
+	"gpio118",
+};
+static const char * const reserved118_groups[] = {
+	"gpio118",
+};
+static const char * const atest_char2_groups[] = {
+	"gpio119",
+};
+static const char * const reserved119_groups[] = {
+	"gpio119",
+};
+static const char * const atest_char1_groups[] = {
+	"gpio120",
+};
+static const char * const reserved120_groups[] = {
+	"gpio120",
+};
+static const char * const atest_char0_groups[] = {
+	"gpio121",
+};
+static const char * const reserved121_groups[] = {
+	"gpio121",
+};
+static const char * const reserved122_groups[] = {
+	"gpio122",
+};
+static const char * const reserved123_groups[] = {
+	"gpio123",
+};
+static const char * const reserved124_groups[] = {
+	"gpio124",
+};
+static const char * const reserved125_groups[] = {
+	"gpio125",
+};
+static const char * const sd_card_groups[] = {
+	"gpio126",
+};
+static const char * const reserved126_groups[] = {
+	"gpio126",
+};
+static const char * const reserved127_groups[] = {
+	"gpio127",
+};
+static const char * const reserved128_groups[] = {
+	"gpio128",
+};
+static const char * const reserved129_groups[] = {
+	"gpio129",
+};
+static const char * const qlink_request_groups[] = {
+	"gpio130",
+};
+static const char * const reserved130_groups[] = {
+	"gpio130",
+};
+static const char * const qlink_enable_groups[] = {
+	"gpio131",
+};
+static const char * const reserved131_groups[] = {
+	"gpio131",
+};
+static const char * const reserved132_groups[] = {
+	"gpio132",
+};
+static const char * const reserved133_groups[] = {
+	"gpio133",
+};
+static const char * const reserved134_groups[] = {
+	"gpio134",
+};
+static const char * const pa_indicator_groups[] = {
+	"gpio135",
+};
+static const char * const reserved135_groups[] = {
+	"gpio135",
+};
+static const char * const reserved136_groups[] = {
+	"gpio136",
+};
+static const char * const phase_flag26_groups[] = {
+	"gpio137",
+};
+static const char * const reserved137_groups[] = {
+	"gpio137",
+};
+static const char * const phase_flag27_groups[] = {
+	"gpio138",
+};
+static const char * const reserved138_groups[] = {
+	"gpio138",
+};
+static const char * const phase_flag28_groups[] = {
+	"gpio139",
+};
+static const char * const reserved139_groups[] = {
+	"gpio139",
+};
+static const char * const phase_flag6_groups[] = {
+	"gpio140",
+};
+static const char * const reserved140_groups[] = {
+	"gpio140",
+};
+static const char * const phase_flag29_groups[] = {
+	"gpio141",
+};
+static const char * const reserved141_groups[] = {
+	"gpio141",
+};
+static const char * const phase_flag30_groups[] = {
+	"gpio142",
+};
+static const char * const reserved142_groups[] = {
+	"gpio142",
+};
+static const char * const phase_flag31_groups[] = {
+	"gpio143",
+};
+static const char * const reserved143_groups[] = {
+	"gpio143",
+};
+static const char * const mss_lte_groups[] = {
+	"gpio144", "gpio145",
+};
+static const char * const reserved144_groups[] = {
+	"gpio144",
+};
+static const char * const reserved145_groups[] = {
+	"gpio145",
+};
+static const char * const reserved146_groups[] = {
+	"gpio146",
+};
+static const char * const reserved147_groups[] = {
+	"gpio147",
+};
+static const char * const reserved148_groups[] = {
+	"gpio148",
+};
+static const char * const reserved149_groups[] = {
+	"gpio149", "gpio149",
+};
+static const char * const reserved32_groups[] = {
+	"gpio32",
+};
+static const char * const reserved33_groups[] = {
+	"gpio33",
+};
+static const char * const reserved34_groups[] = {
+	"gpio34",
+};
+static const char * const pci_e0_groups[] = {
+	"gpio35", "gpio36", "gpio37",
+};
+static const char * const jitter_bist_groups[] = {
+	"gpio35",
+};
+static const char * const reserved35_groups[] = {
+	"gpio35",
+};
+static const char * const pll_bist_groups[] = {
+	"gpio36",
+};
+static const char * const atest_tsens_groups[] = {
+	"gpio36",
+};
+static const char * const reserved36_groups[] = {
+	"gpio36",
+};
+static const char * const agera_pll_groups[] = {
+	"gpio37",
+};
+static const char * const reserved37_groups[] = {
+	"gpio37",
+};
+static const char * const usb_phy_groups[] = {
+	"gpio38",
+};
+static const char * const reserved38_groups[] = {
+	"gpio38",
+};
+static const char * const lpass_slimbus_groups[] = {
+	"gpio39", "gpio70", "gpio71", "gpio72",
+};
+static const char * const reserved39_groups[] = {
+	"gpio39",
+};
+static const char * const sd_write_groups[] = {
+	"gpio40",
+};
+static const char * const tsif1_error_groups[] = {
+	"gpio40",
+};
+static const char * const reserved40_groups[] = {
+	"gpio40",
+};
+static const char * const qup3_groups[] = {
+	"gpio41", "gpio42", "gpio43", "gpio44",
+};
+static const char * const reserved41_groups[] = {
+	"gpio41",
+};
+static const char * const reserved42_groups[] = {
+	"gpio42",
+};
+static const char * const reserved43_groups[] = {
+	"gpio43",
+};
+static const char * const reserved44_groups[] = {
+	"gpio44",
+};
+static const char * const bt_reset_groups[] = {
+	"gpio45",
+};
+static const char * const qup6_groups[] = {
+	"gpio45", "gpio46", "gpio47", "gpio48",
+};
+static const char * const reserved45_groups[] = {
+	"gpio45",
+};
+static const char * const reserved46_groups[] = {
+	"gpio46",
+};
+static const char * const reserved47_groups[] = {
+	"gpio47",
+};
+static const char * const reserved48_groups[] = {
+	"gpio48",
+};
+static const char * const qup12_groups[] = {
+	"gpio49", "gpio50", "gpio51", "gpio52",
+};
+static const char * const reserved49_groups[] = {
+	"gpio49",
+};
+static const char * const reserved50_groups[] = {
+	"gpio50",
+};
+static const char * const reserved51_groups[] = {
+	"gpio51",
+};
+static const char * const phase_flag16_groups[] = {
+	"gpio52",
+};
+static const char * const reserved52_groups[] = {
+	"gpio52",
+};
+static const char * const qup10_groups[] = {
+	"gpio53", "gpio54", "gpio55", "gpio56",
+};
+static const char * const phase_flag11_groups[] = {
+	"gpio53",
+};
+static const char * const reserved53_groups[] = {
+	"gpio53",
+};
+static const char * const phase_flag12_groups[] = {
+	"gpio54",
+};
+static const char * const reserved54_groups[] = {
+	"gpio54",
+};
+static const char * const phase_flag13_groups[] = {
+	"gpio55",
+};
+static const char * const reserved55_groups[] = {
+	"gpio55",
+};
+static const char * const phase_flag17_groups[] = {
+	"gpio56",
+};
+static const char * const reserved56_groups[] = {
+	"gpio56",
+};
+static const char * const qua_mi2s_groups[] = {
+	"gpio57", "gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63",
+};
+static const char * const gcc_gp1_groups[] = {
+	"gpio57", "gpio78",
+};
+static const char * const phase_flag18_groups[] = {
+	"gpio57",
+};
+static const char * const reserved57_groups[] = {
+	"gpio57",
+};
+static const char * const ssc_irq_groups[] = {
+	"gpio58", "gpio59", "gpio60", "gpio61", "gpio62", "gpio63", "gpio78",
+	"gpio79", "gpio80", "gpio117", "gpio118", "gpio119", "gpio120",
+	"gpio121", "gpio122", "gpio123", "gpio124", "gpio125",
+};
+static const char * const phase_flag19_groups[] = {
+	"gpio58",
+};
+static const char * const reserved58_groups[] = {
+	"gpio58",
+};
+static const char * const phase_flag20_groups[] = {
+	"gpio59",
+};
+static const char * const reserved59_groups[] = {
+	"gpio59",
+};
+static const char * const cri_trng0_groups[] = {
+	"gpio60",
+};
+static const char * const phase_flag21_groups[] = {
+	"gpio60",
+};
+static const char * const reserved60_groups[] = {
+	"gpio60",
+};
+static const char * const cri_trng1_groups[] = {
+	"gpio61",
+};
+static const char * const phase_flag22_groups[] = {
+	"gpio61",
+};
+static const char * const reserved61_groups[] = {
+	"gpio61",
+};
+static const char * const cri_trng_groups[] = {
+	"gpio62",
+};
+static const char * const phase_flag23_groups[] = {
+	"gpio62",
+};
+static const char * const reserved62_groups[] = {
+	"gpio62",
+};
+static const char * const phase_flag24_groups[] = {
+	"gpio63",
+};
+static const char * const reserved63_groups[] = {
+	"gpio63",
+};
+static const char * const pri_mi2s_groups[] = {
+	"gpio64", "gpio65", "gpio67", "gpio68",
+};
+static const char * const sp_cmu_groups[] = {
+	"gpio64",
+};
+static const char * const phase_flag25_groups[] = {
+	"gpio64",
+};
+static const char * const reserved64_groups[] = {
+	"gpio64",
+};
+static const char * const qup8_groups[] = {
+	"gpio65", "gpio66", "gpio67", "gpio68",
+};
+static const char * const reserved65_groups[] = {
+	"gpio65",
+};
+static const char * const pri_mi2s_ws_groups[] = {
+	"gpio66",
+};
+static const char * const reserved66_groups[] = {
+	"gpio66",
+};
+static const char * const reserved67_groups[] = {
+	"gpio67",
+};
+static const char * const reserved68_groups[] = {
+	"gpio68",
+};
+static const char * const spkr_i2s_groups[] = {
+	"gpio69", "gpio70", "gpio71", "gpio72",
+};
+static const char * const audio_ref_groups[] = {
+	"gpio69",
+};
+static const char * const reserved69_groups[] = {
+	"gpio69",
+};
+static const char * const reserved70_groups[] = {
+	"gpio70",
+};
+static const char * const tsense_pwm1_groups[] = {
+	"gpio71",
+};
+static const char * const tsense_pwm2_groups[] = {
+	"gpio71",
+};
+static const char * const reserved71_groups[] = {
+	"gpio71",
+};
+static const char * const reserved72_groups[] = {
+	"gpio72",
+};
+static const char * const btfm_slimbus_groups[] = {
+	"gpio73", "gpio74",
+};
+static const char * const atest_usb2_groups[] = {
+	"gpio73",
+};
+static const char * const reserved73_groups[] = {
+	"gpio73",
+};
+static const char * const ter_mi2s_groups[] = {
+	"gpio74", "gpio75", "gpio76", "gpio77", "gpio78",
+};
+static const char * const phase_flag7_groups[] = {
+	"gpio74",
+};
+static const char * const atest_usb23_groups[] = {
+	"gpio74",
+};
+static const char * const reserved74_groups[] = {
+	"gpio74",
+};
+static const char * const phase_flag8_groups[] = {
+	"gpio75",
+};
+static const char * const atest_usb22_groups[] = {
+	"gpio75",
+};
+static const char * const reserved75_groups[] = {
+	"gpio75",
+};
+static const char * const phase_flag9_groups[] = {
+	"gpio76",
+};
+static const char * const atest_usb21_groups[] = {
+	"gpio76",
+};
+static const char * const reserved76_groups[] = {
+	"gpio76",
+};
+static const char * const phase_flag4_groups[] = {
+	"gpio77",
+};
+static const char * const atest_usb20_groups[] = {
+	"gpio77",
+};
+static const char * const reserved77_groups[] = {
+	"gpio77",
+};
+static const char * const reserved78_groups[] = {
+	"gpio78",
+};
+static const char * const sec_mi2s_groups[] = {
+	"gpio79", "gpio80", "gpio81", "gpio82", "gpio83",
+};
+static const char * const reserved79_groups[] = {
+	"gpio79",
+};
+static const char * const reserved80_groups[] = {
+	"gpio80",
+};
+static const char * const qup15_groups[] = {
+	"gpio81", "gpio82", "gpio83", "gpio84",
+};
+static const char * const reserved81_groups[] = {
+	"gpio81",
+};
+static const char * const reserved82_groups[] = {
+	"gpio82",
+};
+static const char * const reserved83_groups[] = {
+	"gpio83",
+};
+static const char * const reserved84_groups[] = {
+	"gpio84",
+};
+static const char * const pcie1_pwrfault_groups[] = {
+	"gpio85",
+};
+static const char * const qup5_groups[] = {
+	"gpio85", "gpio86", "gpio87", "gpio88",
+};
+static const char * const reserved85_groups[] = {
+	"gpio85",
+};
+static const char * const pcie1_mrl_groups[] = {
+	"gpio86",
+};
+static const char * const reserved86_groups[] = {
+	"gpio86",
+};
+static const char * const reserved87_groups[] = {
+	"gpio87",
+};
+static const char * const reserved88_groups[] = {
+	"gpio88",
+};
+static const char * const tsif1_clk_groups[] = {
+	"gpio89",
+};
+static const char * const qup4_groups[] = {
+	"gpio89", "gpio90", "gpio91", "gpio92",
+};
+static const char * const tgu_ch3_groups[] = {
+	"gpio89",
+};
+static const char * const phase_flag10_groups[] = {
+	"gpio89",
+};
+static const char * const reserved89_groups[] = {
+	"gpio89",
+};
+static const char * const tsif1_en_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync0_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync1_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync2_groups[] = {
+	"gpio90",
+};
+static const char * const mdp_vsync3_groups[] = {
+	"gpio90",
+};
+static const char * const tgu_ch0_groups[] = {
+	"gpio90",
+};
+static const char * const phase_flag0_groups[] = {
+	"gpio90",
+};
+static const char * const reserved90_groups[] = {
+	"gpio90",
+};
+static const char * const tsif1_data_groups[] = {
+	"gpio91",
+};
+static const char * const sdc4_cmd_groups[] = {
+	"gpio91",
+};
+static const char * const tgu_ch1_groups[] = {
+	"gpio91",
+};
+static const char * const reserved91_groups[] = {
+	"gpio91",
+};
+static const char * const tsif2_error_groups[] = {
+	"gpio92",
+};
+static const char * const sdc43_groups[] = {
+	"gpio92",
+};
+static const char * const vfr_1_groups[] = {
+	"gpio92",
+};
+static const char * const tgu_ch2_groups[] = {
+	"gpio92",
+};
+static const char * const reserved92_groups[] = {
+	"gpio92",
+};
+static const char * const tsif2_clk_groups[] = {
+	"gpio93",
+};
+static const char * const sdc4_clk_groups[] = {
+	"gpio93",
+};
+static const char * const qup7_groups[] = {
+	"gpio93", "gpio94", "gpio95", "gpio96",
+};
+static const char * const reserved93_groups[] = {
+	"gpio93",
+};
+static const char * const tsif2_en_groups[] = {
+	"gpio94",
+};
+static const char * const sdc42_groups[] = {
+	"gpio94",
+};
+static const char * const reserved94_groups[] = {
+	"gpio94",
+};
+static const char * const tsif2_data_groups[] = {
+	"gpio95",
+};
+static const char * const sdc41_groups[] = {
+	"gpio95",
+};
+static const char * const reserved95_groups[] = {
+	"gpio95",
+};
+static const char * const tsif2_sync_groups[] = {
+	"gpio96",
+};
+static const char * const sdc40_groups[] = {
+	"gpio96",
+};
+
+static const struct msm_function msmskunk_functions[] = {
+	FUNCTION(gpio),
+	FUNCTION(qup0),
+	FUNCTION(reserved0),
+	FUNCTION(reserved1),
+	FUNCTION(reserved2),
+	FUNCTION(reserved3),
+	FUNCTION(qup9),
+	FUNCTION(qdss_cti),
+	FUNCTION(reserved4),
+	FUNCTION(reserved5),
+	FUNCTION(ddr_pxi0),
+	FUNCTION(reserved6),
+	FUNCTION(ddr_bist),
+	FUNCTION(atest_tsens2),
+	FUNCTION(vsense_trigger),
+	FUNCTION(atest_usb1),
+	FUNCTION(reserved7),
+	FUNCTION(qup_l4),
+	FUNCTION(wlan1_adc1),
+	FUNCTION(atest_usb13),
+	FUNCTION(ddr_pxi1),
+	FUNCTION(reserved8),
+	FUNCTION(qup_l5),
+	FUNCTION(wlan1_adc0),
+	FUNCTION(atest_usb12),
+	FUNCTION(reserved9),
+	FUNCTION(mdp_vsync),
+	FUNCTION(qup_l6),
+	FUNCTION(wlan2_adc1),
+	FUNCTION(atest_usb11),
+	FUNCTION(ddr_pxi2),
+	FUNCTION(reserved10),
+	FUNCTION(edp_lcd),
+	FUNCTION(dbg_out),
+	FUNCTION(wlan2_adc0),
+	FUNCTION(atest_usb10),
+	FUNCTION(reserved11),
+	FUNCTION(m_voc),
+	FUNCTION(tsif1_sync),
+	FUNCTION(ddr_pxi3),
+	FUNCTION(reserved12),
+	FUNCTION(cam_mclk),
+	FUNCTION(pll_bypassnl),
+	FUNCTION(qdss_gpio0),
+	FUNCTION(reserved13),
+	FUNCTION(pll_reset),
+	FUNCTION(qdss_gpio1),
+	FUNCTION(reserved14),
+	FUNCTION(qdss_gpio2),
+	FUNCTION(reserved15),
+	FUNCTION(qdss_gpio3),
+	FUNCTION(reserved16),
+	FUNCTION(cci_i2c),
+	FUNCTION(qup1),
+	FUNCTION(qdss_gpio4),
+	FUNCTION(reserved17),
+	FUNCTION(qdss_gpio5),
+	FUNCTION(reserved18),
+	FUNCTION(qdss_gpio6),
+	FUNCTION(reserved19),
+	FUNCTION(qdss_gpio7),
+	FUNCTION(reserved20),
+	FUNCTION(cci_timer0),
+	FUNCTION(gcc_gp2),
+	FUNCTION(qdss_gpio8),
+	FUNCTION(reserved21),
+	FUNCTION(cci_timer1),
+	FUNCTION(gcc_gp3),
+	FUNCTION(qdss_gpio),
+	FUNCTION(reserved22),
+	FUNCTION(cci_timer2),
+	FUNCTION(qdss_gpio9),
+	FUNCTION(reserved23),
+	FUNCTION(cci_timer3),
+	FUNCTION(cci_async),
+	FUNCTION(qdss_gpio10),
+	FUNCTION(reserved24),
+	FUNCTION(cci_timer4),
+	FUNCTION(qdss_gpio11),
+	FUNCTION(reserved25),
+	FUNCTION(qdss_gpio12),
+	FUNCTION(reserved26),
+	FUNCTION(qup2),
+	FUNCTION(qdss_gpio13),
+	FUNCTION(reserved27),
+	FUNCTION(qdss_gpio14),
+	FUNCTION(reserved28),
+	FUNCTION(phase_flag1),
+	FUNCTION(qdss_gpio15),
+	FUNCTION(reserved29),
+	FUNCTION(phase_flag2),
+	FUNCTION(reserved30),
+	FUNCTION(qup11),
+	FUNCTION(qup14),
+	FUNCTION(reserved31),
+	FUNCTION(phase_flag3),
+	FUNCTION(reserved96),
+	FUNCTION(ldo_en),
+	FUNCTION(reserved97),
+	FUNCTION(ldo_update),
+	FUNCTION(reserved98),
+	FUNCTION(phase_flag14),
+	FUNCTION(reserved99),
+	FUNCTION(phase_flag15),
+	FUNCTION(reserved100),
+	FUNCTION(reserved101),
+	FUNCTION(pci_e1),
+	FUNCTION(prng_rosc),
+	FUNCTION(reserved102),
+	FUNCTION(phase_flag5),
+	FUNCTION(reserved103),
+	FUNCTION(reserved104),
+	FUNCTION(pcie1_forceon),
+	FUNCTION(uim2_data),
+	FUNCTION(qup13),
+	FUNCTION(reserved105),
+	FUNCTION(pcie1_pwren),
+	FUNCTION(uim2_clk),
+	FUNCTION(reserved106),
+	FUNCTION(pcie1_auxen),
+	FUNCTION(uim2_reset),
+	FUNCTION(reserved107),
+	FUNCTION(pcie1_button),
+	FUNCTION(uim2_present),
+	FUNCTION(reserved108),
+	FUNCTION(uim1_data),
+	FUNCTION(reserved109),
+	FUNCTION(uim1_clk),
+	FUNCTION(reserved110),
+	FUNCTION(uim1_reset),
+	FUNCTION(reserved111),
+	FUNCTION(uim1_present),
+	FUNCTION(reserved112),
+	FUNCTION(pcie1_prsnt2),
+	FUNCTION(uim_batt),
+	FUNCTION(edp_hot),
+	FUNCTION(reserved113),
+	FUNCTION(nav_pps),
+	FUNCTION(reserved114),
+	FUNCTION(reserved115),
+	FUNCTION(reserved116),
+	FUNCTION(atest_char),
+	FUNCTION(reserved117),
+	FUNCTION(adsp_ext),
+	FUNCTION(atest_char3),
+	FUNCTION(reserved118),
+	FUNCTION(atest_char2),
+	FUNCTION(reserved119),
+	FUNCTION(atest_char1),
+	FUNCTION(reserved120),
+	FUNCTION(atest_char0),
+	FUNCTION(reserved121),
+	FUNCTION(reserved122),
+	FUNCTION(reserved123),
+	FUNCTION(reserved124),
+	FUNCTION(reserved125),
+	FUNCTION(sd_card),
+	FUNCTION(reserved126),
+	FUNCTION(reserved127),
+	FUNCTION(reserved128),
+	FUNCTION(reserved129),
+	FUNCTION(qlink_request),
+	FUNCTION(reserved130),
+	FUNCTION(qlink_enable),
+	FUNCTION(reserved131),
+	FUNCTION(reserved132),
+	FUNCTION(reserved133),
+	FUNCTION(reserved134),
+	FUNCTION(pa_indicator),
+	FUNCTION(reserved135),
+	FUNCTION(reserved136),
+	FUNCTION(phase_flag26),
+	FUNCTION(reserved137),
+	FUNCTION(phase_flag27),
+	FUNCTION(reserved138),
+	FUNCTION(phase_flag28),
+	FUNCTION(reserved139),
+	FUNCTION(phase_flag6),
+	FUNCTION(reserved140),
+	FUNCTION(phase_flag29),
+	FUNCTION(reserved141),
+	FUNCTION(phase_flag30),
+	FUNCTION(reserved142),
+	FUNCTION(phase_flag31),
+	FUNCTION(reserved143),
+	FUNCTION(mss_lte),
+	FUNCTION(reserved144),
+	FUNCTION(reserved145),
+	FUNCTION(reserved146),
+	FUNCTION(reserved147),
+	FUNCTION(reserved148),
+	FUNCTION(reserved149),
+	FUNCTION(reserved32),
+	FUNCTION(reserved33),
+	FUNCTION(reserved34),
+	FUNCTION(pci_e0),
+	FUNCTION(jitter_bist),
+	FUNCTION(reserved35),
+	FUNCTION(pll_bist),
+	FUNCTION(atest_tsens),
+	FUNCTION(reserved36),
+	FUNCTION(agera_pll),
+	FUNCTION(reserved37),
+	FUNCTION(usb_phy),
+	FUNCTION(reserved38),
+	FUNCTION(lpass_slimbus),
+	FUNCTION(reserved39),
+	FUNCTION(sd_write),
+	FUNCTION(tsif1_error),
+	FUNCTION(reserved40),
+	FUNCTION(qup3),
+	FUNCTION(reserved41),
+	FUNCTION(reserved42),
+	FUNCTION(reserved43),
+	FUNCTION(reserved44),
+	FUNCTION(bt_reset),
+	FUNCTION(qup6),
+	FUNCTION(reserved45),
+	FUNCTION(reserved46),
+	FUNCTION(reserved47),
+	FUNCTION(reserved48),
+	FUNCTION(qup12),
+	FUNCTION(reserved49),
+	FUNCTION(reserved50),
+	FUNCTION(reserved51),
+	FUNCTION(phase_flag16),
+	FUNCTION(reserved52),
+	FUNCTION(qup10),
+	FUNCTION(phase_flag11),
+	FUNCTION(reserved53),
+	FUNCTION(phase_flag12),
+	FUNCTION(reserved54),
+	FUNCTION(phase_flag13),
+	FUNCTION(reserved55),
+	FUNCTION(phase_flag17),
+	FUNCTION(reserved56),
+	FUNCTION(qua_mi2s),
+	FUNCTION(gcc_gp1),
+	FUNCTION(phase_flag18),
+	FUNCTION(reserved57),
+	FUNCTION(ssc_irq),
+	FUNCTION(phase_flag19),
+	FUNCTION(reserved58),
+	FUNCTION(phase_flag20),
+	FUNCTION(reserved59),
+	FUNCTION(cri_trng0),
+	FUNCTION(phase_flag21),
+	FUNCTION(reserved60),
+	FUNCTION(cri_trng1),
+	FUNCTION(phase_flag22),
+	FUNCTION(reserved61),
+	FUNCTION(cri_trng),
+	FUNCTION(phase_flag23),
+	FUNCTION(reserved62),
+	FUNCTION(phase_flag24),
+	FUNCTION(reserved63),
+	FUNCTION(pri_mi2s),
+	FUNCTION(sp_cmu),
+	FUNCTION(phase_flag25),
+	FUNCTION(reserved64),
+	FUNCTION(qup8),
+	FUNCTION(reserved65),
+	FUNCTION(pri_mi2s_ws),
+	FUNCTION(reserved66),
+	FUNCTION(reserved67),
+	FUNCTION(reserved68),
+	FUNCTION(spkr_i2s),
+	FUNCTION(audio_ref),
+	FUNCTION(reserved69),
+	FUNCTION(reserved70),
+	FUNCTION(tsense_pwm1),
+	FUNCTION(tsense_pwm2),
+	FUNCTION(reserved71),
+	FUNCTION(reserved72),
+	FUNCTION(btfm_slimbus),
+	FUNCTION(atest_usb2),
+	FUNCTION(reserved73),
+	FUNCTION(ter_mi2s),
+	FUNCTION(phase_flag7),
+	FUNCTION(atest_usb23),
+	FUNCTION(reserved74),
+	FUNCTION(phase_flag8),
+	FUNCTION(atest_usb22),
+	FUNCTION(reserved75),
+	FUNCTION(phase_flag9),
+	FUNCTION(atest_usb21),
+	FUNCTION(reserved76),
+	FUNCTION(phase_flag4),
+	FUNCTION(atest_usb20),
+	FUNCTION(reserved77),
+	FUNCTION(reserved78),
+	FUNCTION(sec_mi2s),
+	FUNCTION(reserved79),
+	FUNCTION(reserved80),
+	FUNCTION(qup15),
+	FUNCTION(reserved81),
+	FUNCTION(reserved82),
+	FUNCTION(reserved83),
+	FUNCTION(reserved84),
+	FUNCTION(pcie1_pwrfault),
+	FUNCTION(qup5),
+	FUNCTION(reserved85),
+	FUNCTION(pcie1_mrl),
+	FUNCTION(reserved86),
+	FUNCTION(reserved87),
+	FUNCTION(reserved88),
+	FUNCTION(tsif1_clk),
+	FUNCTION(qup4),
+	FUNCTION(tgu_ch3),
+	FUNCTION(phase_flag10),
+	FUNCTION(reserved89),
+	FUNCTION(tsif1_en),
+	FUNCTION(mdp_vsync0),
+	FUNCTION(mdp_vsync1),
+	FUNCTION(mdp_vsync2),
+	FUNCTION(mdp_vsync3),
+	FUNCTION(tgu_ch0),
+	FUNCTION(phase_flag0),
+	FUNCTION(reserved90),
+	FUNCTION(tsif1_data),
+	FUNCTION(sdc4_cmd),
+	FUNCTION(tgu_ch1),
+	FUNCTION(reserved91),
+	FUNCTION(tsif2_error),
+	FUNCTION(sdc43),
+	FUNCTION(vfr_1),
+	FUNCTION(tgu_ch2),
+	FUNCTION(reserved92),
+	FUNCTION(tsif2_clk),
+	FUNCTION(sdc4_clk),
+	FUNCTION(qup7),
+	FUNCTION(reserved93),
+	FUNCTION(tsif2_en),
+	FUNCTION(sdc42),
+	FUNCTION(reserved94),
+	FUNCTION(tsif2_data),
+	FUNCTION(sdc41),
+	FUNCTION(reserved95),
+	FUNCTION(tsif2_sync),
+	FUNCTION(sdc40),
+};
+
+static const struct msm_pingroup msmskunk_groups[] = {
+	PINGROUP(0, NORTH, qup0, NA, reserved0, NA, NA, NA, NA, NA, NA),
+	PINGROUP(1, NORTH, qup0, NA, reserved1, NA, NA, NA, NA, NA, NA),
+	PINGROUP(2, NORTH, qup0, NA, reserved2, NA, NA, NA, NA, NA, NA),
+	PINGROUP(3, NORTH, qup0, NA, reserved3, NA, NA, NA, NA, NA, NA),
+	PINGROUP(4, NORTH, qup9, qdss_cti, reserved4, NA, NA, NA, NA, NA, NA),
+	PINGROUP(5, NORTH, qup9, qdss_cti, reserved5, NA, NA, NA, NA, NA, NA),
+	PINGROUP(6, NORTH, qup9, NA, ddr_pxi0, reserved6, NA, NA, NA, NA, NA),
+	PINGROUP(7, NORTH, qup9, ddr_bist, NA, atest_tsens2, vsense_trigger,
+		 atest_usb1, ddr_pxi0, reserved7, NA),
+	PINGROUP(8, NORTH, qup_l4, NA, ddr_bist, NA, NA, wlan1_adc1,
+		 atest_usb13, ddr_pxi1, reserved8),
+	PINGROUP(9, NORTH, qup_l5, ddr_bist, NA, wlan1_adc0, atest_usb12,
+		 ddr_pxi1, reserved9, NA, NA),
+	PINGROUP(10, NORTH, mdp_vsync, qup_l6, ddr_bist, wlan2_adc1,
+		 atest_usb11, ddr_pxi2, reserved10, NA, NA),
+	PINGROUP(11, NORTH, mdp_vsync, edp_lcd, dbg_out, wlan2_adc0,
+		 atest_usb10, ddr_pxi2, reserved11, NA, NA),
+	PINGROUP(12, SOUTH, mdp_vsync, m_voc, tsif1_sync, ddr_pxi3, reserved12,
+		 NA, NA, NA, NA),
+	PINGROUP(13, SOUTH, cam_mclk, pll_bypassnl, qdss_gpio0, ddr_pxi3,
+		 reserved13, NA, NA, NA, NA),
+	PINGROUP(14, SOUTH, cam_mclk, pll_reset, qdss_gpio1, reserved14, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(15, SOUTH, cam_mclk, qdss_gpio2, reserved15, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(16, SOUTH, cam_mclk, qdss_gpio3, reserved16, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(17, SOUTH, cci_i2c, qup1, qdss_gpio4, reserved17, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(18, SOUTH, cci_i2c, qup1, NA, qdss_gpio5, reserved18, NA, NA,
+		 NA, NA),
+	PINGROUP(19, SOUTH, cci_i2c, qup1, NA, qdss_gpio6, reserved19, NA, NA,
+		 NA, NA),
+	PINGROUP(20, SOUTH, cci_i2c, qup1, NA, qdss_gpio7, reserved20, NA, NA,
+		 NA, NA),
+	PINGROUP(21, SOUTH, cci_timer0, gcc_gp2, qdss_gpio8, reserved21, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(22, SOUTH, cci_timer1, gcc_gp3, qdss_gpio, reserved22, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(23, SOUTH, cci_timer2, qdss_gpio9, reserved23, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(24, SOUTH, cci_timer3, cci_async, qdss_gpio10, reserved24, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(25, SOUTH, cci_timer4, cci_async, qdss_gpio11, reserved25, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(26, SOUTH, cci_async, qdss_gpio12, reserved26, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(27, NORTH, qup2, qdss_gpio13, reserved27, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(28, NORTH, qup2, qdss_gpio14, reserved28, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(29, NORTH, qup2, NA, phase_flag1, qdss_gpio15, reserved29, NA,
+		 NA, NA, NA),
+	PINGROUP(30, NORTH, qup2, phase_flag2, qdss_gpio, reserved30, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(31, NORTH, qup11, qup14, reserved31, NA, NA, NA, NA, NA, NA),
+	PINGROUP(32, NORTH, qup11, qup14, NA, reserved32, NA, NA, NA, NA, NA),
+	PINGROUP(33, NORTH, qup11, qup14, NA, reserved33, NA, NA, NA, NA, NA),
+	PINGROUP(34, NORTH, qup11, qup14, NA, reserved34, NA, NA, NA, NA, NA),
+	PINGROUP(35, SOUTH, pci_e0, qup_l4, jitter_bist, NA, reserved35, NA,
+		 NA, NA, NA),
+	PINGROUP(36, SOUTH, pci_e0, qup_l5, pll_bist, NA, atest_tsens,
+		 reserved36, NA, NA, NA),
+	PINGROUP(37, SOUTH, qup_l6, agera_pll, NA, reserved37, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(38, NORTH, usb_phy, NA, reserved38, NA, NA, NA, NA, NA, NA),
+	PINGROUP(39, NORTH, lpass_slimbus, NA, reserved39, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(40, SOUTH, sd_write, tsif1_error, NA, reserved40, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(41, SOUTH, qup3, NA, qdss_gpio6, reserved41, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(42, SOUTH, qup3, NA, qdss_gpio7, reserved42, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(43, SOUTH, qup3, NA, qdss_gpio14, reserved43, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(44, SOUTH, qup3, NA, qdss_gpio15, reserved44, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(45, NORTH, qup6, NA, reserved45, NA, NA, NA, NA, NA, NA),
+	PINGROUP(46, NORTH, qup6, NA, reserved46, NA, NA, NA, NA, NA, NA),
+	PINGROUP(47, NORTH, qup6, reserved47, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(48, NORTH, qup6, reserved48, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(49, NORTH, qup12, reserved49, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(50, NORTH, qup12, reserved50, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(51, NORTH, qup12, qdss_cti, reserved51, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(52, NORTH, qup12, phase_flag16, qdss_cti, reserved52, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(53, NORTH, qup10, phase_flag11, reserved53, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(54, NORTH, qup10, NA, phase_flag12, reserved54, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(55, NORTH, qup10, phase_flag13, reserved55, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(56, NORTH, qup10, phase_flag17, reserved56, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(57, NORTH, qua_mi2s, gcc_gp1, phase_flag18, reserved57, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(58, NORTH, qua_mi2s, gcc_gp2, phase_flag19, reserved58, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(59, NORTH, qua_mi2s, gcc_gp3, phase_flag20, reserved59, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(60, NORTH, qua_mi2s, cri_trng0, phase_flag21, reserved60, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(61, NORTH, qua_mi2s, cri_trng1, phase_flag22, reserved61, NA,
+		 NA, NA, NA, NA),
+	PINGROUP(62, NORTH, qua_mi2s, cri_trng, phase_flag23, qdss_cti,
+		 reserved62, NA, NA, NA, NA),
+	PINGROUP(63, NORTH, qua_mi2s, NA, phase_flag24, qdss_cti, reserved63,
+		 NA, NA, NA, NA),
+	PINGROUP(64, NORTH, pri_mi2s, sp_cmu, phase_flag25, reserved64, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(65, NORTH, pri_mi2s, qup8, reserved65, NA, NA, NA, NA, NA, NA),
+	PINGROUP(66, NORTH, pri_mi2s_ws, qup8, reserved66, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(67, NORTH, pri_mi2s, qup8, reserved67, NA, NA, NA, NA, NA, NA),
+	PINGROUP(68, NORTH, pri_mi2s, qup8, reserved68, NA, NA, NA, NA, NA, NA),
+	PINGROUP(69, NORTH, spkr_i2s, audio_ref, reserved69, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(70, NORTH, lpass_slimbus, spkr_i2s, reserved70, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(71, NORTH, lpass_slimbus, spkr_i2s, tsense_pwm1, tsense_pwm2,
+		 reserved71, NA, NA, NA, NA),
+	PINGROUP(72, NORTH, lpass_slimbus, spkr_i2s, reserved72, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(73, NORTH, btfm_slimbus, atest_usb2, reserved73, NA, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(74, NORTH, btfm_slimbus, ter_mi2s, phase_flag7, atest_usb23,
+		 reserved74, NA, NA, NA, NA),
+	PINGROUP(75, NORTH, ter_mi2s, phase_flag8, qdss_gpio8, atest_usb22,
+		 reserved75, NA, NA, NA, NA),
+	PINGROUP(76, NORTH, ter_mi2s, phase_flag9, qdss_gpio9, atest_usb21,
+		 reserved76, NA, NA, NA, NA),
+	PINGROUP(77, NORTH, ter_mi2s, phase_flag4, qdss_gpio10, atest_usb20,
+		 reserved77, NA, NA, NA, NA),
+	PINGROUP(78, NORTH, ter_mi2s, gcc_gp1, reserved78, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(79, NORTH, sec_mi2s, NA, NA, qdss_gpio11, reserved79, NA, NA,
+		 NA, NA),
+	PINGROUP(80, NORTH, sec_mi2s, NA, qdss_gpio12, reserved80, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(81, NORTH, sec_mi2s, qup15, NA, reserved81, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(82, NORTH, sec_mi2s, qup15, NA, reserved82, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(83, NORTH, sec_mi2s, qup15, NA, reserved83, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(84, NORTH, qup15, NA, reserved84, NA, NA, NA, NA, NA, NA),
+	PINGROUP(85, SOUTH, qup5, NA, reserved85, NA, NA, NA, NA, NA, NA),
+	PINGROUP(86, SOUTH, qup5, NA, NA, reserved86, NA, NA, NA, NA, NA),
+	PINGROUP(87, SOUTH, qup5, NA, reserved87, NA, NA, NA, NA, NA, NA),
+	PINGROUP(88, SOUTH, qup5, NA, reserved88, NA, NA, NA, NA, NA, NA),
+	PINGROUP(89, SOUTH, tsif1_clk, qup4, tgu_ch3, phase_flag10, reserved89,
+		 NA, NA, NA, NA),
+	PINGROUP(90, SOUTH, tsif1_en, mdp_vsync0, qup4, mdp_vsync1, mdp_vsync2,
+		 mdp_vsync3, tgu_ch0, phase_flag0, qdss_cti),
+	PINGROUP(91, SOUTH, tsif1_data, sdc4_cmd, qup4, tgu_ch1, NA, qdss_cti,
+		 reserved91, NA, NA),
+	PINGROUP(92, SOUTH, tsif2_error, sdc43, qup4, vfr_1, tgu_ch2, NA,
+		 reserved92, NA, NA),
+	PINGROUP(93, SOUTH, tsif2_clk, sdc4_clk, qup7, NA, qdss_gpio13,
+		 reserved93, NA, NA, NA),
+	PINGROUP(94, SOUTH, tsif2_en, sdc42, qup7, NA, reserved94, NA, NA, NA,
+		 NA),
+	PINGROUP(95, SOUTH, tsif2_data, sdc41, qup7, NA, NA, reserved95, NA,
+		 NA, NA),
+	PINGROUP(96, SOUTH, tsif2_sync, sdc40, qup7, phase_flag3, reserved96,
+		 NA, NA, NA, NA),
+	PINGROUP(97, NORTH, NA, NA, mdp_vsync, ldo_en, reserved97, NA, NA, NA,
+		 NA),
+	PINGROUP(98, NORTH, NA, mdp_vsync, ldo_update, reserved98, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(99, NORTH, phase_flag14, reserved99, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(100, NORTH, phase_flag15, reserved100, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(101, NORTH, NA, reserved101, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(102, NORTH, pci_e1, prng_rosc, reserved102, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(103, NORTH, pci_e1, phase_flag5, reserved103, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(104, NORTH, NA, reserved104, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(105, NORTH, uim2_data, qup13, qup_l4, NA, reserved105, NA, NA,
+		 NA, NA),
+	PINGROUP(106, NORTH, uim2_clk, qup13, qup_l5, NA, reserved106, NA, NA,
+		 NA, NA),
+	PINGROUP(107, NORTH, uim2_reset, qup13, qup_l6, reserved107, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(108, NORTH, uim2_present, qup13, reserved108, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(109, NORTH, uim1_data, reserved109, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(110, NORTH, uim1_clk, reserved110, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(111, NORTH, uim1_reset, reserved111, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(112, NORTH, uim1_present, reserved112, NA, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(113, NORTH, uim_batt, edp_hot, reserved113, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(114, NORTH, NA, nav_pps, nav_pps, NA, NA, reserved114, NA, NA,
+		 NA),
+	PINGROUP(115, NORTH, NA, nav_pps, nav_pps, NA, NA, reserved115, NA, NA,
+		 NA),
+	PINGROUP(116, NORTH, NA, reserved116, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(117, NORTH, NA, qdss_gpio0, atest_char, reserved117, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(118, NORTH, adsp_ext, NA, qdss_gpio1, atest_char3,
+		 reserved118, NA, NA, NA, NA),
+	PINGROUP(119, NORTH, NA, qdss_gpio2, atest_char2, reserved119, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(120, NORTH, NA, qdss_gpio3, atest_char1, reserved120, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(121, NORTH, NA, qdss_gpio4, atest_char0, reserved121, NA, NA,
+		 NA, NA, NA),
+	PINGROUP(122, NORTH, NA, qdss_gpio5, reserved122, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(123, NORTH, qup_l4, NA, qdss_gpio, reserved123, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(124, NORTH, qup_l5, NA, qdss_gpio, reserved124, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(125, NORTH, qup_l6, NA, reserved125, NA, NA, NA, NA, NA, NA),
+	PINGROUP(126, NORTH, NA, reserved126, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(127, NORTH, NA, reserved127, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(128, NORTH, nav_pps, nav_pps, NA, NA, reserved128, NA, NA, NA,
+		 NA),
+	PINGROUP(129, NORTH, nav_pps, nav_pps, NA, NA, reserved129, NA, NA, NA,
+		 NA),
+	PINGROUP(130, NORTH, qlink_request, NA, reserved130, NA, NA, NA, NA,
+		 NA, NA),
+	PINGROUP(131, NORTH, qlink_enable, NA, reserved131, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(132, NORTH, NA, NA, reserved132, NA, NA, NA, NA, NA, NA),
+	PINGROUP(133, NORTH, NA, reserved133, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(134, NORTH, NA, reserved134, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(135, NORTH, NA, pa_indicator, NA, reserved135, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(136, NORTH, NA, reserved136, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(137, NORTH, NA, NA, phase_flag26, reserved137, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(138, NORTH, NA, NA, phase_flag27, reserved138, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(139, NORTH, NA, phase_flag28, reserved139, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(140, NORTH, NA, NA, phase_flag6, reserved140, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(141, NORTH, NA, phase_flag29, reserved141, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(142, NORTH, NA, phase_flag30, reserved142, NA, NA, NA, NA, NA,
+		 NA),
+	PINGROUP(143, NORTH, NA, nav_pps, nav_pps, NA, phase_flag31,
+		 reserved143, NA, NA, NA),
+	PINGROUP(144, NORTH, mss_lte, reserved144, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(145, NORTH, mss_lte, NA, reserved145, NA, NA, NA, NA, NA, NA),
+	PINGROUP(146, NORTH, NA, NA, reserved146, NA, NA, NA, NA, NA, NA),
+	PINGROUP(147, NORTH, NA, NA, reserved147, NA, NA, NA, NA, NA, NA),
+	PINGROUP(148, NORTH, NA, reserved148, NA, NA, NA, NA, NA, NA, NA),
+	PINGROUP(149, NORTH, NA, reserved149, NA, NA, NA, NA, NA, NA, NA),
+	SDC_QDSD_PINGROUP(sdc2_clk, 0x59a000, 14, 6),
+	SDC_QDSD_PINGROUP(sdc2_cmd, 0x59a000, 11, 3),
+	SDC_QDSD_PINGROUP(sdc2_data, 0x59a000, 9, 0),
+};
+
+static const struct msm_pinctrl_soc_data msmskunk_pinctrl = {
+	.pins = msmskunk_pins,
+	.npins = ARRAY_SIZE(msmskunk_pins),
+	.functions = msmskunk_functions,
+	.nfunctions = ARRAY_SIZE(msmskunk_functions),
+	.groups = msmskunk_groups,
+	.ngroups = ARRAY_SIZE(msmskunk_groups),
+	.ngpios = 150,
+};
+
+static int msmskunk_pinctrl_probe(struct platform_device *pdev)
+{
+	return msm_pinctrl_probe(pdev, &msmskunk_pinctrl);
+}
+
+static const struct of_device_id msmskunk_pinctrl_of_match[] = {
+	{ .compatible = "qcom,msmskunk-pinctrl", },
+	{ },
+};
+
+static struct platform_driver msmskunk_pinctrl_driver = {
+	.driver = {
+		.name = "msmskunk-pinctrl",
+		.owner = THIS_MODULE,
+		.of_match_table = msmskunk_pinctrl_of_match,
+	},
+	.probe = msmskunk_pinctrl_probe,
+	.remove = msm_pinctrl_remove,
+};
+
+static int __init msmskunk_pinctrl_init(void)
+{
+	return platform_driver_register(&msmskunk_pinctrl_driver);
+}
+arch_initcall(msmskunk_pinctrl_init);
+
+static void __exit msmskunk_pinctrl_exit(void)
+{
+	platform_driver_unregister(&msmskunk_pinctrl_driver);
+}
+module_exit(msmskunk_pinctrl_exit);
+
+MODULE_DESCRIPTION("QTI msmskunk pinctrl driver");
+MODULE_LICENSE("GPL v2");
+MODULE_DEVICE_TABLE(of, msmskunk_pinctrl_of_match);
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index 664b641..77e9dd7 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -40,6 +40,8 @@
 #define PMIC_GPIO_SUBTYPE_GPIOC_4CH		0x5
 #define PMIC_GPIO_SUBTYPE_GPIO_8CH		0x9
 #define PMIC_GPIO_SUBTYPE_GPIOC_8CH		0xd
+#define PMIC_GPIO_SUBTYPE_GPIO_LV		0x10
+#define PMIC_GPIO_SUBTYPE_GPIO_MV		0x11
 
 #define PMIC_MPP_REG_RT_STS			0x10
 #define PMIC_MPP_REG_RT_STS_VAL_MASK		0x1
@@ -48,8 +50,11 @@
 #define PMIC_GPIO_REG_MODE_CTL			0x40
 #define PMIC_GPIO_REG_DIG_VIN_CTL		0x41
 #define PMIC_GPIO_REG_DIG_PULL_CTL		0x42
+#define PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL	0x44
+#define PMIC_GPIO_REG_DIG_IN_CTL		0x43
 #define PMIC_GPIO_REG_DIG_OUT_CTL		0x45
 #define PMIC_GPIO_REG_EN_CTL			0x46
+#define PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL	0x4A
 
 /* PMIC_GPIO_REG_MODE_CTL */
 #define PMIC_GPIO_REG_MODE_VALUE_SHIFT		0x1
@@ -58,6 +63,13 @@
 #define PMIC_GPIO_REG_MODE_DIR_SHIFT		4
 #define PMIC_GPIO_REG_MODE_DIR_MASK		0x7
 
+#define PMIC_GPIO_MODE_DIGITAL_INPUT		0
+#define PMIC_GPIO_MODE_DIGITAL_OUTPUT		1
+#define PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT	2
+#define PMIC_GPIO_MODE_ANALOG_PASS_THRU		3
+
+#define PMIC_GPIO_REG_LV_MV_MODE_DIR_MASK	0x3
+
 /* PMIC_GPIO_REG_DIG_VIN_CTL */
 #define PMIC_GPIO_REG_VIN_SHIFT			0
 #define PMIC_GPIO_REG_VIN_MASK			0x7
@@ -69,6 +81,16 @@
 #define PMIC_GPIO_PULL_DOWN			4
 #define PMIC_GPIO_PULL_DISABLE			5
 
+/* PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL for LV/MV */
+#define PMIC_GPIO_LV_MV_OUTPUT_INVERT		0x80
+#define PMIC_GPIO_LV_MV_OUTPUT_INVERT_SHIFT	7
+#define PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK	0xF
+
+/* PMIC_GPIO_REG_DIG_IN_CTL */
+#define PMIC_GPIO_LV_MV_DIG_IN_DTEST_EN		0x80
+#define PMIC_GPIO_LV_MV_DIG_IN_DTEST_SEL_MASK	0x7
+#define PMIC_GPIO_DIG_IN_DTEST_SEL_MASK		0xf
+
 /* PMIC_GPIO_REG_DIG_OUT_CTL */
 #define PMIC_GPIO_REG_OUT_STRENGTH_SHIFT	0
 #define PMIC_GPIO_REG_OUT_STRENGTH_MASK		0x3
@@ -88,9 +110,29 @@
 
 #define PMIC_GPIO_PHYSICAL_OFFSET		1
 
+/* PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL */
+#define PMIC_GPIO_LV_MV_ANA_MUX_SEL_MASK		0x3
+
 /* Qualcomm specific pin configurations */
 #define PMIC_GPIO_CONF_PULL_UP			(PIN_CONFIG_END + 1)
 #define PMIC_GPIO_CONF_STRENGTH			(PIN_CONFIG_END + 2)
+#define PMIC_GPIO_CONF_ATEST			(PIN_CONFIG_END + 3)
+#define PMIC_GPIO_CONF_DTEST_BUFFER		(PIN_CONFIG_END + 4)
+
+/* The index of each function in pmic_gpio_functions[] array */
+enum pmic_gpio_func_index {
+	PMIC_GPIO_FUNC_INDEX_NORMAL	= 0x00,
+	PMIC_GPIO_FUNC_INDEX_PAIRED	= 0x01,
+	PMIC_GPIO_FUNC_INDEX_FUNC1	= 0x02,
+	PMIC_GPIO_FUNC_INDEX_FUNC2	= 0x03,
+	PMIC_GPIO_FUNC_INDEX_FUNC3	= 0x04,
+	PMIC_GPIO_FUNC_INDEX_FUNC4	= 0x05,
+	PMIC_GPIO_FUNC_INDEX_DTEST1	= 0x06,
+	PMIC_GPIO_FUNC_INDEX_DTEST2	= 0x07,
+	PMIC_GPIO_FUNC_INDEX_DTEST3	= 0x08,
+	PMIC_GPIO_FUNC_INDEX_DTEST4	= 0x09,
+	PMIC_GPIO_FUNC_INDEX_ANALOG	= 0x10,
+};
 
 /**
  * struct pmic_gpio_pad - keep current GPIO settings
@@ -102,12 +144,16 @@
  *	open-drain or open-source mode.
  * @output_enabled: Set to true if GPIO output logic is enabled.
  * @input_enabled: Set to true if GPIO input buffer logic is enabled.
+ * @lv_mv_type: Set to true if GPIO subtype is GPIO_LV(0x10) or GPIO_MV(0x11).
  * @num_sources: Number of power-sources supported by this GPIO.
  * @power_source: Current power-source used.
  * @buffer_type: Push-pull, open-drain or open-source.
  * @pullup: Constant current which flow trough GPIO output buffer.
  * @strength: No, Low, Medium, High
  * @function: See pmic_gpio_functions[]
+ * @atest: the ATEST selection for GPIO analog-pass-through mode
+ * @dtest_buffer: the DTEST buffer selection for digital input mode,
+ *	the default value is INT_MAX if not used.
  */
 struct pmic_gpio_pad {
 	u16		base;
@@ -117,12 +163,15 @@
 	bool		have_buffer;
 	bool		output_enabled;
 	bool		input_enabled;
+	bool		lv_mv_type;
 	unsigned int	num_sources;
 	unsigned int	power_source;
 	unsigned int	buffer_type;
 	unsigned int	pullup;
 	unsigned int	strength;
 	unsigned int	function;
+	unsigned int	atest;
+	unsigned int	dtest_buffer;
 };
 
 struct pmic_gpio_state {
@@ -135,12 +184,15 @@
 static const struct pinconf_generic_params pmic_gpio_bindings[] = {
 	{"qcom,pull-up-strength",	PMIC_GPIO_CONF_PULL_UP,		0},
 	{"qcom,drive-strength",		PMIC_GPIO_CONF_STRENGTH,	0},
+	{"qcom,atest",			PMIC_GPIO_CONF_ATEST,	0},
+	{"qcom,dtest-buffer",		PMIC_GPIO_CONF_DTEST_BUFFER,	0},
 };
 
 #ifdef CONFIG_DEBUG_FS
 static const struct pin_config_item pmic_conf_items[ARRAY_SIZE(pmic_gpio_bindings)] = {
 	PCONFDUMP(PMIC_GPIO_CONF_PULL_UP,  "pull up strength", NULL, true),
 	PCONFDUMP(PMIC_GPIO_CONF_STRENGTH, "drive-strength", NULL, true),
+	PCONFDUMP(PMIC_GPIO_CONF_ATEST, "atest", NULL, true),
 };
 #endif
 
@@ -152,11 +204,25 @@
 	"gpio30", "gpio31", "gpio32", "gpio33", "gpio34", "gpio35", "gpio36",
 };
 
+/*
+ * Treat LV/MV GPIO analog-pass-through mode as a function, add it
+ * to the end of the function list. Add placeholder for the reserved
+ * functions defined in LV/MV OUTPUT_SOURCE_SEL register.
+ */
 static const char *const pmic_gpio_functions[] = {
-	PMIC_GPIO_FUNC_NORMAL, PMIC_GPIO_FUNC_PAIRED,
-	PMIC_GPIO_FUNC_FUNC1, PMIC_GPIO_FUNC_FUNC2,
-	PMIC_GPIO_FUNC_DTEST1, PMIC_GPIO_FUNC_DTEST2,
-	PMIC_GPIO_FUNC_DTEST3, PMIC_GPIO_FUNC_DTEST4,
+	[PMIC_GPIO_FUNC_INDEX_NORMAL]	= PMIC_GPIO_FUNC_NORMAL,
+	[PMIC_GPIO_FUNC_INDEX_PAIRED]	= PMIC_GPIO_FUNC_PAIRED,
+	[PMIC_GPIO_FUNC_INDEX_FUNC1]	= PMIC_GPIO_FUNC_FUNC1,
+	[PMIC_GPIO_FUNC_INDEX_FUNC2]	= PMIC_GPIO_FUNC_FUNC2,
+	[PMIC_GPIO_FUNC_INDEX_FUNC3]	= PMIC_GPIO_FUNC_FUNC3,
+	[PMIC_GPIO_FUNC_INDEX_FUNC4]	= PMIC_GPIO_FUNC_FUNC4,
+	[PMIC_GPIO_FUNC_INDEX_DTEST1]	= PMIC_GPIO_FUNC_DTEST1,
+	[PMIC_GPIO_FUNC_INDEX_DTEST2]	= PMIC_GPIO_FUNC_DTEST2,
+	[PMIC_GPIO_FUNC_INDEX_DTEST3]	= PMIC_GPIO_FUNC_DTEST3,
+	[PMIC_GPIO_FUNC_INDEX_DTEST4]	= PMIC_GPIO_FUNC_DTEST4,
+	"reserved-a", "reserved-b", "reserved-c",
+	"reserved-d", "reserved-e", "reserved-f",
+	[PMIC_GPIO_FUNC_INDEX_ANALOG]	= PMIC_GPIO_FUNC_ANALOG,
 };
 
 static int pmic_gpio_read(struct pmic_gpio_state *state,
@@ -248,21 +314,74 @@
 
 	pad->function = function;
 
-	val = 0;
+	val = PMIC_GPIO_MODE_DIGITAL_INPUT;
 	if (pad->output_enabled) {
 		if (pad->input_enabled)
-			val = 2;
+			val = PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT;
 		else
-			val = 1;
+			val = PMIC_GPIO_MODE_DIGITAL_OUTPUT;
 	}
 
-	val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
-	val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
-	val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+	if (function > PMIC_GPIO_FUNC_INDEX_DTEST4 &&
+			function < PMIC_GPIO_FUNC_INDEX_ANALOG) {
+		pr_err("reserved function: %s hasn't been enabled\n",
+				pmic_gpio_functions[function]);
+		return -EINVAL;
+	}
 
-	ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
-	if (ret < 0)
-		return ret;
+	if (pad->lv_mv_type) {
+		if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) {
+			val = PMIC_GPIO_MODE_ANALOG_PASS_THRU;
+			ret = pmic_gpio_write(state, pad,
+					PMIC_GPIO_REG_MODE_CTL, val);
+			if (ret < 0)
+				return ret;
+
+			ret = pmic_gpio_write(state, pad,
+					PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL,
+					pad->atest);
+			if (ret < 0)
+				return ret;
+		} else {
+			ret = pmic_gpio_write(state, pad,
+					PMIC_GPIO_REG_MODE_CTL, val);
+			if (ret < 0)
+				return ret;
+
+			val = pad->out_value
+				<< PMIC_GPIO_LV_MV_OUTPUT_INVERT_SHIFT;
+			val |= pad->function
+				& PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK;
+			ret = pmic_gpio_write(state, pad,
+				PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL, val);
+			if (ret < 0)
+				return ret;
+		}
+	} else {
+		/*
+		 * GPIO not of LV/MV subtype doesn't have "func3", "func4"
+		 * "analog" functions, and "dtest1" to "dtest4" functions
+		 * have register value 2 bits lower than the function index
+		 * in pmic_gpio_functions[].
+		 */
+		if (function == PMIC_GPIO_FUNC_INDEX_FUNC3
+				|| function == PMIC_GPIO_FUNC_INDEX_FUNC4
+				|| function == PMIC_GPIO_FUNC_INDEX_ANALOG) {
+			return -EINVAL;
+		} else if (function >= PMIC_GPIO_FUNC_INDEX_DTEST1 &&
+				function <= PMIC_GPIO_FUNC_INDEX_DTEST4) {
+			pad->function -= (PMIC_GPIO_FUNC_INDEX_DTEST1 -
+					PMIC_GPIO_FUNC_INDEX_FUNC3);
+		}
+
+		val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
+		val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
+		val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+
+		ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
+		if (ret < 0)
+			return ret;
+	}
 
 	val = pad->is_enabled << PMIC_GPIO_REG_MASTER_EN_SHIFT;
 
@@ -322,6 +441,12 @@
 	case PMIC_GPIO_CONF_STRENGTH:
 		arg = pad->strength;
 		break;
+	case PMIC_GPIO_CONF_ATEST:
+		arg = pad->atest;
+		break;
+	case PMIC_GPIO_CONF_DTEST_BUFFER:
+		arg = pad->dtest_buffer;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -375,7 +500,7 @@
 			pad->is_enabled = false;
 			break;
 		case PIN_CONFIG_POWER_SOURCE:
-			if (arg > pad->num_sources)
+			if (arg >= pad->num_sources)
 				return -EINVAL;
 			pad->power_source = arg;
 			break;
@@ -396,6 +521,18 @@
 				return -EINVAL;
 			pad->strength = arg;
 			break;
+		case PMIC_GPIO_CONF_ATEST:
+			if (arg > PMIC_GPIO_AOUT_ATEST4)
+				return -EINVAL;
+			pad->atest = arg;
+			break;
+		case PMIC_GPIO_CONF_DTEST_BUFFER:
+			if ((pad->lv_mv_type && arg > PMIC_GPIO_DIN_DTEST4)
+					|| (!pad->lv_mv_type && arg >
+					PMIC_GPIO_DIG_IN_DTEST_SEL_MASK))
+				return -EINVAL;
+			pad->dtest_buffer = arg;
+			break;
 		default:
 			return -EINVAL;
 		}
@@ -420,19 +557,64 @@
 	if (ret < 0)
 		return ret;
 
-	val = 0;
+	val = PMIC_GPIO_MODE_DIGITAL_INPUT;
 	if (pad->output_enabled) {
 		if (pad->input_enabled)
-			val = 2;
+			val = PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT;
 		else
-			val = 1;
+			val = PMIC_GPIO_MODE_DIGITAL_OUTPUT;
 	}
 
-	val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
-	val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
-	val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+	if (pad->dtest_buffer != INT_MAX) {
+		val = pad->dtest_buffer;
+		if (pad->lv_mv_type)
+			val |= PMIC_GPIO_LV_MV_DIG_IN_DTEST_EN;
 
-	return pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
+		ret = pmic_gpio_write(state, pad,
+				PMIC_GPIO_REG_DIG_IN_CTL, val);
+		if (ret < 0)
+			return ret;
+	}
+
+	if (pad->lv_mv_type) {
+		if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) {
+			val = PMIC_GPIO_MODE_ANALOG_PASS_THRU;
+			ret = pmic_gpio_write(state, pad,
+					PMIC_GPIO_REG_MODE_CTL, val);
+			if (ret < 0)
+				return ret;
+
+			ret = pmic_gpio_write(state, pad,
+					PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL,
+					pad->atest);
+			if (ret < 0)
+				return ret;
+		} else {
+			ret = pmic_gpio_write(state, pad,
+					PMIC_GPIO_REG_MODE_CTL, val);
+			if (ret < 0)
+				return ret;
+
+			val = pad->out_value
+				<< PMIC_GPIO_LV_MV_OUTPUT_INVERT_SHIFT;
+			val |= pad->function
+				& PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK;
+			ret = pmic_gpio_write(state, pad,
+				PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL, val);
+			if (ret < 0)
+				return ret;
+		}
+	} else {
+		val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
+		val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
+		val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+
+		ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_MODE_CTL, val);
+		if (ret < 0)
+			return ret;
+	}
+
+	return ret;
 }
 
 static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
@@ -440,7 +622,7 @@
 {
 	struct pmic_gpio_state *state = pinctrl_dev_get_drvdata(pctldev);
 	struct pmic_gpio_pad *pad;
-	int ret, val;
+	int ret, val, function;
 
 	static const char *const biases[] = {
 		"pull-up 30uA", "pull-up 1.5uA", "pull-up 31.5uA",
@@ -471,14 +653,28 @@
 			ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
 			pad->out_value = ret;
 		}
+		/*
+		 * For GPIO not of LV/MV subtypes, the register value of
+		 * the function mapping from "dtest1" to "dtest4" is 2 bits
+		 * lower than the function index in pmic_gpio_functions[].
+		 */
+		if (!pad->lv_mv_type &&
+				pad->function >= PMIC_GPIO_FUNC_INDEX_FUNC3) {
+			function = pad->function + (PMIC_GPIO_FUNC_INDEX_DTEST1
+					- PMIC_GPIO_FUNC_INDEX_FUNC3);
+		} else {
+			function = pad->function;
+		}
 
 		seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
-		seq_printf(s, " %-7s", pmic_gpio_functions[pad->function]);
+		seq_printf(s, " %-7s", pmic_gpio_functions[function]);
 		seq_printf(s, " vin-%d", pad->power_source);
 		seq_printf(s, " %-27s", biases[pad->pullup]);
 		seq_printf(s, " %-10s", buffer_types[pad->buffer_type]);
 		seq_printf(s, " %-4s", pad->out_value ? "high" : "low");
 		seq_printf(s, " %-7s", strengths[pad->strength]);
+		if (pad->dtest_buffer != INT_MAX)
+			seq_printf(s, " dtest buffer %d", pad->dtest_buffer);
 	}
 }
 
@@ -618,40 +814,72 @@
 	case PMIC_GPIO_SUBTYPE_GPIOC_8CH:
 		pad->num_sources = 8;
 		break;
+	case PMIC_GPIO_SUBTYPE_GPIO_LV:
+		pad->num_sources = 1;
+		pad->have_buffer = true;
+		pad->lv_mv_type = true;
+		break;
+	case PMIC_GPIO_SUBTYPE_GPIO_MV:
+		pad->num_sources = 2;
+		pad->have_buffer = true;
+		pad->lv_mv_type = true;
+		break;
 	default:
 		dev_err(state->dev, "unknown GPIO type 0x%x\n", subtype);
 		return -ENODEV;
 	}
 
-	val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL);
-	if (val < 0)
-		return val;
+	if (pad->lv_mv_type) {
+		val = pmic_gpio_read(state, pad,
+				PMIC_GPIO_REG_LV_MV_DIG_OUT_SOURCE_CTL);
+		if (val < 0)
+			return val;
 
-	pad->out_value = val & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+		pad->out_value = !!(val & PMIC_GPIO_LV_MV_OUTPUT_INVERT);
+		pad->function = val & PMIC_GPIO_LV_MV_OUTPUT_SOURCE_SEL_MASK;
 
-	dir = val >> PMIC_GPIO_REG_MODE_DIR_SHIFT;
-	dir &= PMIC_GPIO_REG_MODE_DIR_MASK;
+		val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL);
+		if (val < 0)
+			return val;
+
+		dir = val & PMIC_GPIO_REG_LV_MV_MODE_DIR_MASK;
+	} else {
+		val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_MODE_CTL);
+		if (val < 0)
+			return val;
+
+		pad->out_value = val & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
+
+		dir = val >> PMIC_GPIO_REG_MODE_DIR_SHIFT;
+		dir &= PMIC_GPIO_REG_MODE_DIR_MASK;
+		pad->function = val >> PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
+		pad->function &= PMIC_GPIO_REG_MODE_FUNCTION_MASK;
+	}
+
 	switch (dir) {
-	case 0:
+	case PMIC_GPIO_MODE_DIGITAL_INPUT:
 		pad->input_enabled = true;
 		pad->output_enabled = false;
 		break;
-	case 1:
+	case PMIC_GPIO_MODE_DIGITAL_OUTPUT:
 		pad->input_enabled = false;
 		pad->output_enabled = true;
 		break;
-	case 2:
+	case PMIC_GPIO_MODE_DIGITAL_INPUT_OUTPUT:
 		pad->input_enabled = true;
 		pad->output_enabled = true;
 		break;
+	case PMIC_GPIO_MODE_ANALOG_PASS_THRU:
+		if (pad->lv_mv_type)
+			pad->function = PMIC_GPIO_FUNC_INDEX_ANALOG;
+		else
+			return -ENODEV;
+		break;
 	default:
 		dev_err(state->dev, "unknown GPIO direction\n");
 		return -ENODEV;
 	}
 
-	pad->function = val >> PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
-	pad->function &= PMIC_GPIO_REG_MODE_FUNCTION_MASK;
-
 	val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_VIN_CTL);
 	if (val < 0)
 		return val;
@@ -666,6 +894,17 @@
 	pad->pullup = val >> PMIC_GPIO_REG_PULL_SHIFT;
 	pad->pullup &= PMIC_GPIO_REG_PULL_MASK;
 
+	val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_IN_CTL);
+	if (val < 0)
+		return val;
+
+	if (pad->lv_mv_type && (val & PMIC_GPIO_LV_MV_DIG_IN_DTEST_EN))
+		pad->dtest_buffer = val & PMIC_GPIO_LV_MV_DIG_IN_DTEST_SEL_MASK;
+	else if (!pad->lv_mv_type)
+		pad->dtest_buffer = val & PMIC_GPIO_DIG_IN_DTEST_SEL_MASK;
+	else
+		pad->dtest_buffer = INT_MAX;
+
 	val = pmic_gpio_read(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL);
 	if (val < 0)
 		return val;
@@ -676,6 +915,13 @@
 	pad->buffer_type = val >> PMIC_GPIO_REG_OUT_TYPE_SHIFT;
 	pad->buffer_type &= PMIC_GPIO_REG_OUT_TYPE_MASK;
 
+	if (pad->function == PMIC_GPIO_FUNC_INDEX_ANALOG) {
+		val = pmic_gpio_read(state, pad,
+				PMIC_GPIO_REG_LV_MV_ANA_PASS_THRU_SEL);
+		if (val < 0)
+			return val;
+		pad->atest = val & PMIC_GPIO_LV_MV_ANA_MUX_SEL_MASK;
+	}
 	/* Pin could be disabled with PIN_CONFIG_BIAS_HIGH_IMPEDANCE */
 	pad->is_enabled = true;
 	return 0;
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 6556dbe..3e6ee7f 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -88,6 +88,10 @@
 #define PMIC_MPP_REG_AIN_ROUTE_SHIFT		0
 #define PMIC_MPP_REG_AIN_ROUTE_MASK		0x7
 
+/* PMIC_MPP_REG_SINK_CTL */
+#define PMIC_MPP_REG_CURRENT_SINK_MASK		0x7
+#define MPP_CURRENT_SINK_MA_STEP_SIZE		5
+
 #define PMIC_MPP_MODE_DIGITAL_INPUT		0
 #define PMIC_MPP_MODE_DIGITAL_OUTPUT		1
 #define PMIC_MPP_MODE_DIGITAL_BIDIR		2
@@ -107,6 +111,7 @@
 #define PMIC_MPP_CONF_ANALOG_LEVEL		(PIN_CONFIG_END + 2)
 #define PMIC_MPP_CONF_DTEST_SELECTOR		(PIN_CONFIG_END + 3)
 #define PMIC_MPP_CONF_PAIRED			(PIN_CONFIG_END + 4)
+#define PMIC_MPP_CONF_DTEST_BUFFER		(PIN_CONFIG_END + 5)
 
 /**
  * struct pmic_mpp_pad - keep current MPP settings
@@ -126,6 +131,7 @@
  * @function: See pmic_mpp_functions[].
  * @drive_strength: Amount of current in sink mode
  * @dtest: DTEST route selector
+ * @dtest_buffer: the DTEST buffer selection for digital input mode
  */
 struct pmic_mpp_pad {
 	u16		base;
@@ -144,6 +150,7 @@
 	unsigned int	function;
 	unsigned int	drive_strength;
 	unsigned int	dtest;
+	unsigned int	dtest_buffer;
 };
 
 struct pmic_mpp_state {
@@ -158,6 +165,7 @@
 	{"qcom,analog-level",	PMIC_MPP_CONF_ANALOG_LEVEL,	0},
 	{"qcom,dtest",		PMIC_MPP_CONF_DTEST_SELECTOR,	0},
 	{"qcom,paired",		PMIC_MPP_CONF_PAIRED,		0},
+	{"qcom,dtest-buffer",	PMIC_MPP_CONF_DTEST_BUFFER,	0},
 };
 
 #ifdef CONFIG_DEBUG_FS
@@ -166,6 +174,7 @@
 	PCONFDUMP(PMIC_MPP_CONF_ANALOG_LEVEL, "analog level", NULL, true),
 	PCONFDUMP(PMIC_MPP_CONF_DTEST_SELECTOR, "dtest", NULL, true),
 	PCONFDUMP(PMIC_MPP_CONF_PAIRED, "paired", NULL, false),
+	PCONFDUMP(PMIC_MPP_CONF_DTEST_BUFFER, "dtest buffer", NULL, true),
 };
 #endif
 
@@ -390,6 +399,9 @@
 	case PMIC_MPP_CONF_ANALOG_LEVEL:
 		arg = pad->aout_level;
 		break;
+	case PMIC_MPP_CONF_DTEST_BUFFER:
+		arg = pad->dtest_buffer;
+		break;
 	default:
 		return -EINVAL;
 	}
@@ -455,7 +467,7 @@
 			pad->dtest = arg;
 			break;
 		case PIN_CONFIG_DRIVE_STRENGTH:
-			arg = pad->drive_strength;
+			pad->drive_strength = arg;
 			break;
 		case PMIC_MPP_CONF_AMUX_ROUTE:
 			if (arg >= PMIC_MPP_AMUX_ROUTE_ABUS4)
@@ -468,6 +480,15 @@
 		case PMIC_MPP_CONF_PAIRED:
 			pad->paired = !!arg;
 			break;
+		case PMIC_MPP_CONF_DTEST_BUFFER:
+			/*
+			 * 0xf is the max value which selects
+			 * 4 dtest rails simultaneously
+			 */
+			if (arg > 0xf)
+				return -EINVAL;
+			pad->dtest_buffer = arg;
+			break;
 		default:
 			return -EINVAL;
 		}
@@ -479,6 +500,11 @@
 	if (ret < 0)
 		return ret;
 
+	val = pad->dtest_buffer;
+	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_DIG_IN_CTL, val);
+	if (ret < 0)
+		return ret;
+
 	if (pad->has_pullup) {
 		val = pad->pullup << PMIC_MPP_REG_PULL_SHIFT;
 
@@ -498,6 +524,16 @@
 	if (ret < 0)
 		return ret;
 
+	val = 0;
+	if (pad->drive_strength >= MPP_CURRENT_SINK_MA_STEP_SIZE)
+		val = DIV_ROUND_UP(pad->drive_strength,
+				MPP_CURRENT_SINK_MA_STEP_SIZE) - 1;
+
+	val &= PMIC_MPP_REG_CURRENT_SINK_MASK;
+	ret = pmic_mpp_write(state, pad, PMIC_MPP_REG_SINK_CTL, val);
+	if (ret < 0)
+		return ret;
+
 	ret = pmic_mpp_write_mode_ctl(state, pad);
 	if (ret < 0)
 		return ret;
@@ -546,6 +582,8 @@
 			seq_printf(s, " dtest%d", pad->dtest);
 		if (pad->paired)
 			seq_puts(s, " paired");
+		if (pad->dtest_buffer)
+			seq_printf(s, " dtest buffer %d", pad->dtest_buffer);
 	}
 }
 
@@ -743,7 +781,7 @@
 	sel &= PMIC_MPP_REG_MODE_FUNCTION_MASK;
 
 	if (sel >= PMIC_MPP_SELECTOR_DTEST_FIRST)
-		pad->dtest = sel + 1;
+		pad->dtest = sel - PMIC_MPP_SELECTOR_DTEST_FIRST + 1;
 	else if (sel == PMIC_MPP_SELECTOR_PAIRED)
 		pad->paired = true;
 
@@ -754,6 +792,12 @@
 	pad->power_source = val >> PMIC_MPP_REG_VIN_SHIFT;
 	pad->power_source &= PMIC_MPP_REG_VIN_MASK;
 
+	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_IN_CTL);
+	if (val < 0)
+		return val;
+
+	pad->dtest_buffer = val;
+
 	if (subtype != PMIC_MPP_SUBTYPE_ULT_4CH_NO_ANA_OUT &&
 	    subtype != PMIC_MPP_SUBTYPE_ULT_4CH_NO_SINK) {
 		val = pmic_mpp_read(state, pad, PMIC_MPP_REG_DIG_PULL_CTL);
@@ -776,7 +820,8 @@
 	if (val < 0)
 		return val;
 
-	pad->drive_strength = val;
+	val &= PMIC_MPP_REG_CURRENT_SINK_MASK;
+	pad->drive_strength = (val + 1) * MPP_CURRENT_SINK_MA_STEP_SIZE;
 
 	val = pmic_mpp_read(state, pad, PMIC_MPP_REG_AOUT_CTL);
 	if (val < 0)
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index c11db8b..e29f6c2 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -8,3 +8,5 @@
 source "drivers/platform/goldfish/Kconfig"
 
 source "drivers/platform/chrome/Kconfig"
+
+source "drivers/platform/msm/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index ca26925..3fef6b2 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -7,3 +7,4 @@
 obj-$(CONFIG_OLPC)		+= olpc/
 obj-$(CONFIG_GOLDFISH)		+= goldfish/
 obj-$(CONFIG_CHROME_PLATFORMS)	+= chrome/
+obj-$(CONFIG_ARCH_QCOM)          += msm/
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 1aba2c7..198d16d 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -662,7 +662,6 @@
 	.remove = goldfish_pipe_remove,
 	.driver = {
 		.name = "goldfish_pipe",
-		.owner = THIS_MODULE,
 		.of_match_table = goldfish_pipe_of_match,
 		.acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
 	}
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
new file mode 100644
index 0000000..ac9545e
--- /dev/null
+++ b/drivers/platform/msm/Kconfig
@@ -0,0 +1,68 @@
+menu "Qualcomm technologies inc. MSM specific device drivers"
+	depends on ARCH_QCOM
+
+config IPA
+	tristate "IPA support"
+	depends on SPS && NET
+	help
+	  This driver supports the Internet Packet Accelerator (IPA) core.
+	  IPA is a programmable protocol processor HW block.
+	  It is designed to support generic HW processing of UL/DL IP packets
+	  for various use cases independent of radio technology.
+	  The driver support client connection and configuration
+	  for the IPA core.
+	  Kernel and user-space processes can call the IPA driver
+	  to configure IPA core.
+
+config RMNET_IPA
+	tristate "IPA RMNET WWAN Network Device"
+	depends on IPA && MSM_QMI_INTERFACE
+	help
+	  This WWAN Network Driver implements network stack class device.
+	  It supports Embedded data transfer from A7 to Q6. Configures IPA HW
+	  for RmNet Data Driver and also exchange of QMI messages between
+	  A7 and Q6 IPA-driver.
+
+config GSI
+	bool "GSI support"
+	help
+	  This driver provides the transport needed to talk to the
+	  IPA core. It replaces the BAM transport used previously.
+
+	  The GSI connects to a peripheral component via uniform TLV
+	  interface, and allows it to interface with other peripherals
+	  and CPUs over various types of interfaces such as MHI, xDCI,
+	  xHCI, GPI, WDI, Ethernet, etc.
+
+config IPA3
+	tristate "IPA3 support"
+	depends on GSI && NET
+	help
+	  This driver supports the Internet Packet Accelerator (IPA3) core.
+	  IPA is a programmable protocol processor HW block.
+	  It is designed to support generic HW processing of UL/DL IP packets
+	  for various use cases independent of radio technology.
+	  The driver support client connection and configuration
+	  for the IPA core.
+	  Kernel and user-space processes can call the IPA driver
+	  to configure IPA core.
+
+config RMNET_IPA3
+	tristate "IPA3 RMNET WWAN Network Device"
+	depends on IPA3 && MSM_QMI_INTERFACE
+	help
+	  This WWAN Network Driver implements network stack class device.
+	  It supports Embedded data transfer from A7 to Q6. Configures IPA HW
+	  for RmNet Data Driver and also exchange of QMI messages between
+	  A7 and Q6 IPA-driver.
+
+config IPA_UT
+	tristate "IPA Unit-Test Framework and Test Suites"
+	depends on IPA3 && DEBUG_FS
+	help
+	  This Module implements IPA in-kernel test framework.
+	  The framework supports defining and running tests, grouped
+	  into suites according to the sub-unit of the IPA being tested.
+	  The user interface to run and control the tests is debugfs file
+	  system.
+endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
new file mode 100644
index 0000000..1f9e11b
--- /dev/null
+++ b/drivers/platform/msm/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the MSM specific device drivers.
+#
+obj-$(CONFIG_GSI) += gsi/
+obj-$(CONFIG_IPA) += ipa/
+obj-$(CONFIG_IPA3) += ipa/
diff --git a/drivers/platform/msm/gsi/Makefile b/drivers/platform/msm/gsi/Makefile
new file mode 100644
index 0000000..b350a59
--- /dev/null
+++ b/drivers/platform/msm/gsi/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_GSI) += gsi.o gsi_dbg.o
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
new file mode 100644
index 0000000..df39010
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -0,0 +1,2763 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/log2.h>
+#include <linux/module.h>
+#include <linux/msm_gsi.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include "gsi.h"
+#include "gsi_reg.h"
+
+#define GSI_CMD_TIMEOUT (5*HZ)
+#define GSI_STOP_CMD_TIMEOUT_MS 1
+#define GSI_MAX_CH_LOW_WEIGHT 15
+#define GSI_MHI_ER_START 10
+#define GSI_MHI_ER_END 16
+
+#define GSI_RESET_WA_MIN_SLEEP 1000
+#define GSI_RESET_WA_MAX_SLEEP 2000
+static const struct of_device_id msm_gsi_match[] = {
+	{ .compatible = "qcom,msm_gsi", },
+	{ },
+};
+
+struct gsi_ctx *gsi_ctx;
+
+static void __gsi_config_type_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(ee));
+}
+
+static void __gsi_config_ch_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(ee));
+}
+
+static void __gsi_config_evt_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(ee));
+}
+
+static void __gsi_config_ieob_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
+}
+
+static void __gsi_config_glob_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(ee));
+}
+
+static void __gsi_config_gen_irq(int ee, uint32_t mask, uint32_t val)
+{
+	uint32_t curr;
+
+	curr = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
+	gsi_writel((curr & ~mask) | (val & mask), gsi_ctx->base +
+			GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(ee));
+}
+
+static void gsi_handle_ch_ctrl(int ee)
+{
+	uint32_t ch;
+	int i;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(ee));
+	GSIDBG("ch %x\n", ch);
+	for (i = 0; i < 32; i++) {
+		if ((1 << i) & ch) {
+			if (i >= gsi_ctx->max_ch || i >= GSI_CHAN_MAX) {
+				GSIERR("invalid channel %d\n", i);
+				break;
+			}
+
+			ctx = &gsi_ctx->chan[i];
+			val = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(i, ee));
+			ctx->state = (val &
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
+			GSIDBG("ch %u state updated to %u\n", i, ctx->state);
+			complete(&ctx->compl);
+			gsi_ctx->ch_dbg[i].cmd_completed++;
+		}
+	}
+
+	gsi_writel(ch, gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
+}
+
+static void gsi_handle_ev_ctrl(int ee)
+{
+	uint32_t ch;
+	int i;
+	uint32_t val;
+	struct gsi_evt_ctx *ctx;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(ee));
+	GSIDBG("ev %x\n", ch);
+	for (i = 0; i < 32; i++) {
+		if ((1 << i) & ch) {
+			if (i >= gsi_ctx->max_ev || i >= GSI_EVT_RING_MAX) {
+				GSIERR("invalid event %d\n", i);
+				break;
+			}
+
+			ctx = &gsi_ctx->evtr[i];
+			val = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(i, ee));
+			ctx->state = (val &
+				GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+				GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
+			GSIDBG("evt %u state updated to %u\n", i, ctx->state);
+			complete(&ctx->compl);
+		}
+	}
+
+	gsi_writel(ch, gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(ee));
+}
+
+static void gsi_handle_glob_err(uint32_t err)
+{
+	struct gsi_log_err *log;
+	struct gsi_chan_ctx *ch;
+	struct gsi_evt_ctx *ev;
+	struct gsi_chan_err_notify chan_notify;
+	struct gsi_evt_err_notify evt_notify;
+	struct gsi_per_notify per_notify;
+	uint32_t val;
+
+	log = (struct gsi_log_err *)&err;
+	GSIERR("log err_type=%u ee=%u idx=%u\n", log->err_type, log->ee,
+			log->virt_idx);
+	GSIERR("code=%u arg1=%u arg2=%u arg3=%u\n", log->code, log->arg1,
+			log->arg2, log->arg3);
+	switch (log->err_type) {
+	case GSI_ERR_TYPE_GLOB:
+		per_notify.evt_id = GSI_PER_EVT_GLOB_ERROR;
+		per_notify.user_data = gsi_ctx->per.user_data;
+		per_notify.data.err_desc = err & 0xFFFF;
+		gsi_ctx->per.notify_cb(&per_notify);
+		break;
+	case GSI_ERR_TYPE_CHAN:
+		if (log->virt_idx >= gsi_ctx->max_ch) {
+			GSIERR("Unexpected ch %d\n", log->virt_idx);
+			WARN_ON(1);
+			return;
+		}
+
+		ch = &gsi_ctx->chan[log->virt_idx];
+		chan_notify.chan_user_data = ch->props.chan_user_data;
+		chan_notify.err_desc = err & 0xFFFF;
+		if (log->code == GSI_INVALID_TRE_ERR) {
+			BUG_ON(log->ee != gsi_ctx->per.ee);
+			val = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(log->virt_idx,
+					gsi_ctx->per.ee));
+			ch->state = (val &
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK) >>
+				GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT;
+			GSIDBG("ch %u state updated to %u\n", log->virt_idx,
+					ch->state);
+			ch->stats.invalid_tre_error++;
+			BUG_ON(ch->state != GSI_CHAN_STATE_ERROR);
+			chan_notify.evt_id = GSI_CHAN_INVALID_TRE_ERR;
+		} else if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
+			BUG_ON(log->ee != gsi_ctx->per.ee);
+			chan_notify.evt_id = GSI_CHAN_OUT_OF_BUFFERS_ERR;
+		} else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
+			BUG_ON(log->ee != gsi_ctx->per.ee);
+			chan_notify.evt_id = GSI_CHAN_OUT_OF_RESOURCES_ERR;
+			complete(&ch->compl);
+		} else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
+			chan_notify.evt_id =
+				GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR;
+		} else if (log->code == GSI_NON_ALLOCATED_EVT_ACCESS_ERR) {
+			BUG_ON(log->ee != gsi_ctx->per.ee);
+			chan_notify.evt_id =
+				GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR;
+		} else if (log->code == GSI_HWO_1_ERR) {
+			BUG_ON(log->ee != gsi_ctx->per.ee);
+			chan_notify.evt_id = GSI_CHAN_HWO_1_ERR;
+		} else {
+			BUG();
+		}
+		if (ch->props.err_cb)
+			ch->props.err_cb(&chan_notify);
+		else
+			WARN_ON(1);
+		break;
+	case GSI_ERR_TYPE_EVT:
+		if (log->virt_idx >= gsi_ctx->max_ev) {
+			GSIERR("Unexpected ev %d\n", log->virt_idx);
+			WARN_ON(1);
+			return;
+		}
+
+		ev = &gsi_ctx->evtr[log->virt_idx];
+		evt_notify.user_data = ev->props.user_data;
+		evt_notify.err_desc = err & 0xFFFF;
+		if (log->code == GSI_OUT_OF_BUFFERS_ERR) {
+			BUG_ON(log->ee != gsi_ctx->per.ee);
+			evt_notify.evt_id = GSI_EVT_OUT_OF_BUFFERS_ERR;
+		} else if (log->code == GSI_OUT_OF_RESOURCES_ERR) {
+			BUG_ON(log->ee != gsi_ctx->per.ee);
+			evt_notify.evt_id = GSI_EVT_OUT_OF_RESOURCES_ERR;
+			complete(&ev->compl);
+		} else if (log->code == GSI_UNSUPPORTED_INTER_EE_OP_ERR) {
+			evt_notify.evt_id = GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR;
+		} else if (log->code == GSI_EVT_RING_EMPTY_ERR) {
+			BUG_ON(log->ee != gsi_ctx->per.ee);
+			evt_notify.evt_id = GSI_EVT_EVT_RING_EMPTY_ERR;
+		} else {
+			BUG();
+		}
+		if (ev->props.err_cb)
+			ev->props.err_cb(&evt_notify);
+		else
+			WARN_ON(1);
+		break;
+	default:
+		WARN_ON(1);
+	}
+}
+
+static void gsi_handle_glob_ee(int ee)
+{
+	uint32_t val;
+	uint32_t err;
+	struct gsi_per_notify notify;
+	uint32_t clr = ~0;
+
+	val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(ee));
+
+	notify.user_data = gsi_ctx->per.user_data;
+
+	if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
+		err = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_ERROR_LOG_OFFS(ee));
+		if (gsi_ctx->per.ver >= GSI_VER_1_2)
+			gsi_writel(0, gsi_ctx->base +
+				GSI_EE_n_ERROR_LOG_OFFS(ee));
+		gsi_writel(clr, gsi_ctx->base +
+			GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
+		gsi_handle_glob_err(err);
+	}
+
+	if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK) {
+		notify.evt_id = GSI_PER_EVT_GLOB_GP1;
+		gsi_ctx->per.notify_cb(&notify);
+	}
+
+	if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
+		notify.evt_id = GSI_PER_EVT_GLOB_GP2;
+		gsi_ctx->per.notify_cb(&notify);
+	}
+
+	if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK) {
+		notify.evt_id = GSI_PER_EVT_GLOB_GP3;
+		gsi_ctx->per.notify_cb(&notify);
+	}
+
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(ee));
+}
+
+static void gsi_incr_ring_wp(struct gsi_ring_ctx *ctx)
+{
+	ctx->wp_local += ctx->elem_sz;
+	if (ctx->wp_local == ctx->end)
+		ctx->wp_local = ctx->base;
+}
+
+static void gsi_incr_ring_rp(struct gsi_ring_ctx *ctx)
+{
+	ctx->rp_local += ctx->elem_sz;
+	if (ctx->rp_local == ctx->end)
+		ctx->rp_local = ctx->base;
+}
+
+uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr)
+{
+	BUG_ON(addr < ctx->base || addr >= ctx->end);
+
+	return (uint32_t)(addr - ctx->base)/ctx->elem_sz;
+}
+
+static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
+		struct gsi_chan_xfer_notify *notify, bool callback)
+{
+	uint32_t ch_id;
+	struct gsi_chan_ctx *ch_ctx;
+	uint16_t rp_idx;
+	uint64_t rp;
+
+	ch_id = evt->chid;
+	if (ch_id >= gsi_ctx->max_ch) {
+		GSIERR("Unexpected ch %d\n", ch_id);
+		WARN_ON(1);
+		return;
+	}
+
+	ch_ctx = &gsi_ctx->chan[ch_id];
+	BUG_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI);
+	rp = evt->xfer_ptr;
+
+	while (ch_ctx->ring.rp_local != rp) {
+		gsi_incr_ring_rp(&ch_ctx->ring);
+		ch_ctx->stats.completed++;
+	}
+
+	/* the element at RP is also processed */
+	gsi_incr_ring_rp(&ch_ctx->ring);
+	ch_ctx->stats.completed++;
+
+	ch_ctx->ring.rp = ch_ctx->ring.rp_local;
+
+	rp_idx = gsi_find_idx_from_addr(&ch_ctx->ring, rp);
+	notify->xfer_user_data = ch_ctx->user_data[rp_idx];
+	notify->chan_user_data = ch_ctx->props.chan_user_data;
+	notify->evt_id = evt->code;
+	notify->bytes_xfered = evt->len;
+	if (callback)
+		ch_ctx->props.xfer_cb(notify);
+}
+
+static void gsi_process_evt_re(struct gsi_evt_ctx *ctx,
+		struct gsi_chan_xfer_notify *notify, bool callback)
+{
+	struct gsi_xfer_compl_evt *evt;
+	uint16_t idx;
+
+	idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.rp_local);
+	evt = (struct gsi_xfer_compl_evt *)(ctx->ring.base_va +
+			idx * ctx->ring.elem_sz);
+	gsi_process_chan(evt, notify, callback);
+	gsi_incr_ring_rp(&ctx->ring);
+	/* recycle this element */
+	gsi_incr_ring_wp(&ctx->ring);
+	ctx->stats.completed++;
+}
+
+static void gsi_ring_evt_doorbell(struct gsi_evt_ctx *ctx)
+{
+	uint32_t val;
+
+	/* write order MUST be MSB followed by LSB */
+	val = ((ctx->ring.wp_local >> 32) &
+			GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
+			GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(ctx->id,
+				gsi_ctx->per.ee));
+
+	val = (ctx->ring.wp_local &
+			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
+			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(ctx->id,
+				gsi_ctx->per.ee));
+}
+
+static void gsi_ring_chan_doorbell(struct gsi_chan_ctx *ctx)
+{
+	uint32_t val;
+
+	/* write order MUST be MSB followed by LSB */
+	val = ((ctx->ring.wp_local >> 32) &
+			GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK) <<
+			GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(ctx->props.ch_id,
+				gsi_ctx->per.ee));
+
+	val = (ctx->ring.wp_local &
+			GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
+			GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(ctx->props.ch_id,
+				gsi_ctx->per.ee));
+}
+
+static void gsi_handle_ieob(int ee)
+{
+	uint32_t ch;
+	int i;
+	uint64_t rp;
+	struct gsi_evt_ctx *ctx;
+	struct gsi_chan_xfer_notify notify;
+	unsigned long flags;
+	unsigned long cntr;
+	uint32_t msk;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(ee));
+	msk = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(ee));
+
+	for (i = 0; i < 32; i++) {
+		if ((1 << i) & ch & msk) {
+			ctx = &gsi_ctx->evtr[i];
+			BUG_ON(ctx->props.intf != GSI_EVT_CHTYPE_GPI_EV);
+			spin_lock_irqsave(&ctx->ring.slock, flags);
+check_again:
+			cntr = 0;
+			rp = gsi_readl(gsi_ctx->base +
+				GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(i, ee));
+			rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+				GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(i, ee))) << 32;
+			ctx->ring.rp = rp;
+			while (ctx->ring.rp_local != rp) {
+				++cntr;
+				gsi_process_evt_re(ctx, &notify, true);
+				if (ctx->props.exclusive &&
+					atomic_read(&ctx->chan->poll_mode)) {
+					cntr = 0;
+					break;
+				}
+			}
+			gsi_ring_evt_doorbell(ctx);
+			if (cntr != 0)
+				goto check_again;
+			spin_unlock_irqrestore(&ctx->ring.slock, flags);
+		}
+	}
+
+	gsi_writel(ch & msk, gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
+}
+
+static void gsi_handle_inter_ee_ch_ctrl(int ee)
+{
+	uint32_t ch;
+	int i;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(ee));
+	for (i = 0; i < 32; i++) {
+		if ((1 << i) & ch) {
+			/* not currently expected */
+			GSIERR("ch %u was inter-EE changed\n", i);
+		}
+	}
+
+	gsi_writel(ch, gsi_ctx->base +
+			GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(ee));
+}
+
+static void gsi_handle_inter_ee_ev_ctrl(int ee)
+{
+	uint32_t ch;
+	int i;
+
+	ch = gsi_readl(gsi_ctx->base +
+		GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(ee));
+	for (i = 0; i < 32; i++) {
+		if ((1 << i) & ch) {
+			/* not currently expected */
+			GSIERR("evt %u was inter-EE changed\n", i);
+		}
+	}
+
+	gsi_writel(ch, gsi_ctx->base +
+			GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(ee));
+}
+
+static void gsi_handle_general(int ee)
+{
+	uint32_t val;
+	struct gsi_per_notify notify;
+
+	val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(ee));
+
+	notify.user_data = gsi_ctx->per.user_data;
+
+	if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK)
+		notify.evt_id = GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW;
+
+	if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK)
+		notify.evt_id = GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW;
+
+	if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK)
+		notify.evt_id = GSI_PER_EVT_GENERAL_BUS_ERROR;
+
+	if (val & GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK)
+		notify.evt_id = GSI_PER_EVT_GENERAL_BREAK_POINT;
+
+	if (gsi_ctx->per.notify_cb)
+		gsi_ctx->per.notify_cb(&notify);
+
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(ee));
+}
+
+#define GSI_ISR_MAX_ITER 50
+
+static void gsi_handle_irq(void)
+{
+	uint32_t type;
+	int ee = gsi_ctx->per.ee;
+	unsigned long cnt = 0;
+
+	while (1) {
+		type = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(ee));
+
+		if (!type)
+			break;
+
+		GSIDBG("type %x\n", type);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK)
+			gsi_handle_ch_ctrl(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK)
+			gsi_handle_ev_ctrl(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK)
+			gsi_handle_glob_ee(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK)
+			gsi_handle_ieob(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK)
+			gsi_handle_inter_ee_ch_ctrl(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK)
+			gsi_handle_inter_ee_ev_ctrl(ee);
+
+		if (type & GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK)
+			gsi_handle_general(ee);
+
+		if (++cnt > GSI_ISR_MAX_ITER)
+			BUG();
+	}
+}
+
+static irqreturn_t gsi_isr(int irq, void *ctxt)
+{
+	BUG_ON(ctxt != gsi_ctx);
+
+	if (gsi_ctx->per.req_clk_cb) {
+		bool granted = false;
+
+		gsi_ctx->per.req_clk_cb(gsi_ctx->per.user_data, &granted);
+		if (granted) {
+			gsi_handle_irq();
+			gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
+		}
+	} else {
+		gsi_handle_irq();
+	}
+
+	return IRQ_HANDLED;
+}
+
+static uint32_t gsi_get_max_channels(enum gsi_ver ver)
+{
+	uint32_t reg;
+
+	switch (ver) {
+	case GSI_VER_1_0:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+		reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
+			GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
+		break;
+	case GSI_VER_1_2:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+		reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
+			GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
+		break;
+	case GSI_VER_1_3:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+		break;
+	default:
+		GSIERR("bad gsi version %d\n", ver);
+		WARN_ON(1);
+		reg = 0;
+	}
+
+	GSIDBG("max channels %d\n", reg);
+
+	return reg;
+}
+
+static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
+{
+	uint32_t reg;
+
+	switch (ver) {
+	case GSI_VER_1_0:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+		reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
+			GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
+		break;
+	case GSI_VER_1_2:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+		reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
+			GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
+		break;
+	case GSI_VER_1_3:
+		reg = gsi_readl(gsi_ctx->base +
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		reg = (reg &
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+		break;
+	default:
+		GSIERR("bad gsi version %d\n", ver);
+		WARN_ON(1);
+		reg = 0;
+	}
+
+	GSIDBG("max event rings %d\n", reg);
+
+	return reg;
+}
+int gsi_complete_clk_grant(unsigned long dev_hdl)
+{
+	unsigned long flags;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!gsi_ctx->per_registered) {
+		GSIERR("no client registered\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%p\n", dev_hdl,
+				gsi_ctx);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	spin_lock_irqsave(&gsi_ctx->slock, flags);
+	gsi_handle_irq();
+	gsi_ctx->per.rel_clk_cb(gsi_ctx->per.user_data);
+	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_complete_clk_grant);
+
+int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
+{
+	int res;
+	uint32_t val;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !dev_hdl) {
+		GSIERR("bad params props=%p dev_hdl=%p\n", props, dev_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
+		GSIERR("bad params gsi_ver=%d\n", props->ver);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!props->notify_cb) {
+		GSIERR("notify callback must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->req_clk_cb && !props->rel_clk_cb) {
+		GSIERR("rel callback  must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->per_registered) {
+		GSIERR("per already registered\n");
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	spin_lock_init(&gsi_ctx->slock);
+	if (props->intr == GSI_INTR_IRQ) {
+		if (!props->irq) {
+			GSIERR("bad irq specified %u\n", props->irq);
+			return -GSI_STATUS_INVALID_PARAMS;
+		}
+
+		res = devm_request_irq(gsi_ctx->dev, props->irq,
+				(irq_handler_t) gsi_isr,
+				props->req_clk_cb ? IRQF_TRIGGER_RISING :
+					IRQF_TRIGGER_HIGH,
+				"gsi",
+				gsi_ctx);
+		if (res) {
+			GSIERR("failed to register isr for %u\n", props->irq);
+			return -GSI_STATUS_ERROR;
+		}
+
+		res = enable_irq_wake(props->irq);
+		if (res)
+			GSIERR("failed to enable wake irq %u\n", props->irq);
+		else
+			GSIERR("GSI irq is wake enabled %u\n", props->irq);
+
+	} else {
+		GSIERR("do not support interrupt type %u\n", props->intr);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	gsi_ctx->base = devm_ioremap_nocache(gsi_ctx->dev, props->phys_addr,
+				props->size);
+	if (!gsi_ctx->base) {
+		GSIERR("failed to remap GSI HW\n");
+		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+
+	gsi_ctx->per = *props;
+	gsi_ctx->per_registered = true;
+	mutex_init(&gsi_ctx->mlock);
+	atomic_set(&gsi_ctx->num_chan, 0);
+	atomic_set(&gsi_ctx->num_evt_ring, 0);
+	gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
+	if (gsi_ctx->max_ch == 0) {
+		devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+		GSIERR("failed to get max channels\n");
+		return -GSI_STATUS_ERROR;
+	}
+	gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
+	if (gsi_ctx->max_ev == 0) {
+		devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+		devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+		GSIERR("failed to get max event rings\n");
+		return -GSI_STATUS_ERROR;
+	}
+
+	/* bitmap is max events excludes reserved events */
+	gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
+	gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^
+		((1 << GSI_MHI_ER_START) - 1);
+
+	/*
+	 * enable all interrupts but GSI_BREAK_POINT.
+	 * Inter EE commands / interrupt are no supported.
+	 */
+	__gsi_config_type_irq(props->ee, ~0, ~0);
+	__gsi_config_ch_irq(props->ee, ~0, ~0);
+	__gsi_config_evt_irq(props->ee, ~0, ~0);
+	__gsi_config_ieob_irq(props->ee, ~0, ~0);
+	__gsi_config_glob_irq(props->ee, ~0, ~0);
+	__gsi_config_gen_irq(props->ee, ~0,
+		~GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK);
+
+	gsi_writel(props->intr, gsi_ctx->base +
+			GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
+
+	val = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
+	if (val & GSI_EE_n_GSI_STATUS_ENABLED_BMSK)
+		gsi_ctx->enabled = true;
+	else
+		GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
+
+	if (gsi_ctx->per.ver >= GSI_VER_1_2)
+		gsi_writel(0, gsi_ctx->base +
+			GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
+
+	*dev_hdl = (uintptr_t)gsi_ctx;
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_register_device);
+
+int gsi_write_device_scratch(unsigned long dev_hdl,
+		struct gsi_device_scratch *val)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!gsi_ctx->per_registered) {
+		GSIERR("no client registered\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%p\n", dev_hdl,
+				gsi_ctx);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (val->max_usb_pkt_size_valid &&
+			val->max_usb_pkt_size != 1024 &&
+			val->max_usb_pkt_size != 512) {
+		GSIERR("bad USB max pkt size dev_hdl=0x%lx sz=%u\n", dev_hdl,
+				val->max_usb_pkt_size);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	if (val->mhi_base_chan_idx_valid)
+		gsi_ctx->scratch.word0.s.mhi_base_chan_idx =
+			val->mhi_base_chan_idx;
+	if (val->max_usb_pkt_size_valid)
+		gsi_ctx->scratch.word0.s.max_usb_pkt_size =
+			(val->max_usb_pkt_size == 1024) ? 1 : 0;
+	gsi_writel(gsi_ctx->scratch.word0.val,
+			gsi_ctx->base +
+			GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_device_scratch);
+
+int gsi_deregister_device(unsigned long dev_hdl, bool force)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!gsi_ctx->per_registered) {
+		GSIERR("no client registered\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params dev_hdl=0x%lx gsi_ctx=0x%p\n", dev_hdl,
+				gsi_ctx);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!force && atomic_read(&gsi_ctx->num_chan)) {
+		GSIERR("%u channels are allocated\n",
+				atomic_read(&gsi_ctx->num_chan));
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (!force && atomic_read(&gsi_ctx->num_evt_ring)) {
+		GSIERR("%u evt rings are allocated\n",
+				atomic_read(&gsi_ctx->num_evt_ring));
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	/* disable all interrupts */
+	__gsi_config_type_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_ch_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_evt_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_ieob_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_glob_irq(gsi_ctx->per.ee, ~0, 0);
+	__gsi_config_gen_irq(gsi_ctx->per.ee, ~0, 0);
+
+	devm_free_irq(gsi_ctx->dev, gsi_ctx->per.irq, gsi_ctx);
+	devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+	memset(gsi_ctx, 0, sizeof(*gsi_ctx));
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_deregister_device);
+
+static void gsi_program_evt_ring_ctx(struct gsi_evt_ring_props *props,
+		uint8_t evt_id, unsigned int ee)
+{
+	uint32_t val;
+
+	GSIDBG("intf=%u intr=%u re=%u\n", props->intf, props->intr,
+			props->re_size);
+
+	val = (((props->intf << GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT) &
+			GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK) |
+		((props->intr << GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT) &
+			GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK) |
+		((props->re_size << GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
+			& GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
+
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(evt_id, ee));
+
+	val = (props->ring_len & GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(evt_id, ee));
+
+	val = (props->ring_base_addr &
+			GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(evt_id, ee));
+
+	val = ((props->ring_base_addr >> 32) &
+		GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(evt_id, ee));
+
+	val = (((props->int_modt << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT) &
+		GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK) |
+		((props->int_modc << GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT) &
+		 GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(evt_id, ee));
+
+	val = (props->intvec & GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(evt_id, ee));
+
+	val = (props->msi_addr & GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(evt_id, ee));
+
+	val = ((props->msi_addr >> 32) &
+		GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(evt_id, ee));
+
+	val = (props->rp_update_addr &
+			GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(evt_id, ee));
+
+	val = ((props->rp_update_addr >> 32) &
+		GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK) <<
+		GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(evt_id, ee));
+}
+
+static void gsi_init_evt_ring(struct gsi_evt_ring_props *props,
+		struct gsi_ring_ctx *ctx)
+{
+	ctx->base_va = (uintptr_t)props->ring_base_vaddr;
+	ctx->base = props->ring_base_addr;
+	ctx->wp = ctx->base;
+	ctx->rp = ctx->base;
+	ctx->wp_local = ctx->base;
+	ctx->rp_local = ctx->base;
+	ctx->len = props->ring_len;
+	ctx->elem_sz = props->re_size;
+	ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
+	ctx->end = ctx->base + (ctx->max_num_elem + 1) * ctx->elem_sz;
+}
+
+static void gsi_prime_evt_ring(struct gsi_evt_ctx *ctx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->ring.slock, flags);
+	memset((void *)ctx->ring.base_va, 0, ctx->ring.len);
+	ctx->ring.wp_local = ctx->ring.base +
+		ctx->ring.max_num_elem * ctx->ring.elem_sz;
+	gsi_ring_evt_doorbell(ctx);
+	spin_unlock_irqrestore(&ctx->ring.slock, flags);
+}
+
+static int gsi_validate_evt_ring_props(struct gsi_evt_ring_props *props)
+{
+	uint64_t ra;
+
+	if ((props->re_size == GSI_EVT_RING_RE_SIZE_4B &&
+				props->ring_len % 4) ||
+			(props->re_size == GSI_EVT_RING_RE_SIZE_16B &&
+				 props->ring_len % 16)) {
+		GSIERR("bad params ring_len %u not a multiple of RE size %u\n",
+				props->ring_len, props->re_size);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ra = props->ring_base_addr;
+	do_div(ra, roundup_pow_of_two(props->ring_len));
+
+	if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
+		GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
+				props->ring_base_addr,
+				roundup_pow_of_two(props->ring_len));
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->intf == GSI_EVT_CHTYPE_GPI_EV &&
+			!props->ring_base_vaddr) {
+		GSIERR("protocol %u requires ring base VA\n", props->intf);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->intf == GSI_EVT_CHTYPE_MHI_EV &&
+			(!props->evchid_valid ||
+			props->evchid > GSI_MHI_ER_END ||
+			props->evchid < GSI_MHI_ER_START)) {
+		GSIERR("MHI requires evchid valid=%d val=%u\n",
+				props->evchid_valid, props->evchid);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->intf != GSI_EVT_CHTYPE_MHI_EV &&
+			props->evchid_valid) {
+		GSIERR("protocol %u cannot specify evchid\n", props->intf);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!props->err_cb) {
+		GSIERR("err callback must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	return GSI_STATUS_SUCCESS;
+}
+
+int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
+		unsigned long *evt_ring_hdl)
+{
+	unsigned long evt_id;
+	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_ALLOCATE;
+	uint32_t val;
+	struct gsi_evt_ctx *ctx;
+	int res;
+	int ee = gsi_ctx->per.ee;
+	unsigned long flags;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !evt_ring_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params props=%p dev_hdl=0x%lx evt_ring_hdl=%p\n",
+				props, dev_hdl, evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_validate_evt_ring_props(props)) {
+		GSIERR("invalid params\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!props->evchid_valid) {
+		mutex_lock(&gsi_ctx->mlock);
+		evt_id = find_first_zero_bit(&gsi_ctx->evt_bmap,
+				sizeof(unsigned long) * BITS_PER_BYTE);
+		if (evt_id == sizeof(unsigned long) * BITS_PER_BYTE) {
+			GSIERR("failed to alloc event ID\n");
+			mutex_unlock(&gsi_ctx->mlock);
+			return -GSI_STATUS_RES_ALLOC_FAILURE;
+		}
+		set_bit(evt_id, &gsi_ctx->evt_bmap);
+		mutex_unlock(&gsi_ctx->mlock);
+	} else {
+		evt_id = props->evchid;
+	}
+	GSIDBG("Using %lu as virt evt id\n", evt_id);
+
+	ctx = &gsi_ctx->evtr[evt_id];
+	memset(ctx, 0, sizeof(*ctx));
+	mutex_init(&ctx->mlock);
+	init_completion(&ctx->compl);
+	atomic_set(&ctx->chan_ref_cnt, 0);
+	ctx->props = *props;
+
+	mutex_lock(&gsi_ctx->mlock);
+	val = (((evt_id << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
+			GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_CMD_OFFS(ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("evt_id=%lu timed out\n", evt_id);
+		if (!props->evchid_valid)
+			clear_bit(evt_id, &gsi_ctx->evt_bmap);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("evt_id=%lu allocation failed state=%u\n",
+				evt_id, ctx->state);
+		if (!props->evchid_valid)
+			clear_bit(evt_id, &gsi_ctx->evt_bmap);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+
+	gsi_program_evt_ring_ctx(props, evt_id, gsi_ctx->per.ee);
+
+	spin_lock_init(&ctx->ring.slock);
+	gsi_init_evt_ring(props, &ctx->ring);
+
+	ctx->id = evt_id;
+	*evt_ring_hdl = evt_id;
+	atomic_inc(&gsi_ctx->num_evt_ring);
+	if (props->intf == GSI_EVT_CHTYPE_GPI_EV)
+		gsi_prime_evt_ring(ctx);
+	mutex_unlock(&gsi_ctx->mlock);
+
+	spin_lock_irqsave(&gsi_ctx->slock, flags);
+	gsi_writel(1 << evt_id, gsi_ctx->base +
+			GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(ee));
+	if (props->intf != GSI_EVT_CHTYPE_GPI_EV)
+		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << evt_id, 0);
+	else
+		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->id, ~0);
+	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_alloc_evt_ring);
+
+static void __gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
+		union __packed gsi_evt_scratch val)
+{
+	gsi_writel(val.data.word1, gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(evt_ring_hdl,
+			gsi_ctx->per.ee));
+	gsi_writel(val.data.word2, gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(evt_ring_hdl,
+			gsi_ctx->per.ee));
+}
+
+int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
+		union __packed gsi_evt_scratch val)
+{
+	struct gsi_evt_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->evtr[evt_ring_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	ctx->scratch = val;
+	__gsi_write_evt_ring_scratch(evt_ring_hdl, val);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_evt_ring_scratch);
+
+int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
+{
+	uint32_t val;
+	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_DE_ALLOC;
+	struct gsi_evt_ctx *ctx;
+	int res;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (atomic_read(&ctx->chan_ref_cnt)) {
+		GSIERR("%d channels still using this event ring\n",
+			atomic_read(&ctx->chan_ref_cnt));
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	/* TODO: add check for ERROR state */
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
+			 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+
+	if (ctx->state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+		GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
+				ctx->state);
+		BUG();
+	}
+	mutex_unlock(&gsi_ctx->mlock);
+
+	if (!ctx->props.evchid_valid) {
+		mutex_lock(&gsi_ctx->mlock);
+		clear_bit(evt_ring_hdl, &gsi_ctx->evt_bmap);
+		mutex_unlock(&gsi_ctx->mlock);
+	}
+	atomic_dec(&gsi_ctx->num_evt_ring);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_dealloc_evt_ring);
+
+int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
+		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
+{
+	struct gsi_evt_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!db_addr_wp_msb || !db_addr_wp_lsb) {
+		GSIERR("bad params msb=%p lsb=%p\n", db_addr_wp_msb,
+				db_addr_wp_lsb);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->evtr[evt_ring_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	*db_addr_wp_lsb = gsi_ctx->per.phys_addr +
+		GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
+	*db_addr_wp_msb = gsi_ctx->per.phys_addr +
+		GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(evt_ring_hdl, gsi_ctx->per.ee);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_query_evt_ring_db_addr);
+
+int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
+{
+	uint32_t val;
+	enum gsi_evt_ch_cmd_opcode op = GSI_EVT_RESET;
+	struct gsi_evt_ctx *ctx;
+	int res;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	val = (((evt_ring_hdl << GSI_EE_n_EV_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_EV_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_EV_CH_CMD_OPCODE_SHFT) &
+			 GSI_EE_n_EV_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_EV_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("evt_id=%lu timed out\n", evt_ring_hdl);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("evt_id=%lu unexpected state=%u\n", evt_ring_hdl,
+				ctx->state);
+		BUG();
+	}
+
+	gsi_program_evt_ring_ctx(&ctx->props, evt_ring_hdl, gsi_ctx->per.ee);
+	gsi_init_evt_ring(&ctx->props, &ctx->ring);
+
+	/* restore scratch */
+	__gsi_write_evt_ring_scratch(evt_ring_hdl, ctx->scratch);
+
+	if (ctx->props.intf == GSI_EVT_CHTYPE_GPI_EV)
+		gsi_prime_evt_ring(ctx);
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_reset_evt_ring);
+
+int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
+		struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
+{
+	struct gsi_evt_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !scr) {
+		GSIERR("bad params props=%p scr=%p\n", props, scr);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state == GSI_EVT_RING_STATE_NOT_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	*props = ctx->props;
+	*scr = ctx->scratch;
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_get_evt_ring_cfg);
+
+int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
+		struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
+{
+	struct gsi_evt_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || gsi_validate_evt_ring_props(props)) {
+		GSIERR("bad params props=%p\n", props);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (evt_ring_hdl >= gsi_ctx->max_ev) {
+		GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->evtr[evt_ring_hdl];
+
+	if (ctx->state != GSI_EVT_RING_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->props.exclusive != props->exclusive) {
+		GSIERR("changing immutable fields not supported\n");
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	ctx->props = *props;
+	if (scr)
+		ctx->scratch = *scr;
+	mutex_unlock(&ctx->mlock);
+
+	return gsi_reset_evt_ring(evt_ring_hdl);
+}
+EXPORT_SYMBOL(gsi_set_evt_ring_cfg);
+
+static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
+		uint8_t erindex)
+{
+	uint32_t val;
+
+	val = (((props->prot << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT)
+			& GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK) |
+		((props->dir << GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT) &
+			 GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK) |
+		((erindex << GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT) &
+			 GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK) |
+		((props->re_size << GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT)
+			 & GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(props->ch_id, ee));
+
+	val = (props->ring_len & GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK) <<
+		GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(props->ch_id, ee));
+
+	val = (props->ring_base_addr &
+			GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK) <<
+		GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(props->ch_id, ee));
+
+	val = ((props->ring_base_addr >> 32) &
+		GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK) <<
+		GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT;
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(props->ch_id, ee));
+
+	val = (((props->low_weight << GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT) &
+				GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK) |
+		((props->max_prefetch <<
+			 GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT) &
+			 GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK) |
+		((props->use_db_eng << GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT) &
+			 GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_k_QOS_OFFS(props->ch_id, ee));
+}
+
+static void gsi_init_chan_ring(struct gsi_chan_props *props,
+		struct gsi_ring_ctx *ctx)
+{
+	ctx->base_va = (uintptr_t)props->ring_base_vaddr;
+	ctx->base = props->ring_base_addr;
+	ctx->wp = ctx->base;
+	ctx->rp = ctx->base;
+	ctx->wp_local = ctx->base;
+	ctx->rp_local = ctx->base;
+	ctx->len = props->ring_len;
+	ctx->elem_sz = props->re_size;
+	ctx->max_num_elem = ctx->len / ctx->elem_sz - 1;
+	ctx->end = ctx->base + (ctx->max_num_elem + 1) *
+		ctx->elem_sz;
+}
+
+static int gsi_validate_channel_props(struct gsi_chan_props *props)
+{
+	uint64_t ra;
+
+	if (props->ch_id >= gsi_ctx->max_ch) {
+		GSIERR("ch_id %u invalid\n", props->ch_id);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if ((props->re_size == GSI_CHAN_RE_SIZE_4B &&
+				props->ring_len % 4) ||
+			(props->re_size == GSI_CHAN_RE_SIZE_16B &&
+				 props->ring_len % 16) ||
+			(props->re_size == GSI_CHAN_RE_SIZE_32B &&
+				 props->ring_len % 32)) {
+		GSIERR("bad params ring_len %u not a multiple of re size %u\n",
+				props->ring_len, props->re_size);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ra = props->ring_base_addr;
+	do_div(ra, roundup_pow_of_two(props->ring_len));
+
+	if (props->ring_base_addr != ra * roundup_pow_of_two(props->ring_len)) {
+		GSIERR("bad params ring base not aligned 0x%llx align 0x%lx\n",
+				props->ring_base_addr,
+				roundup_pow_of_two(props->ring_len));
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->prot == GSI_CHAN_PROT_GPI &&
+			!props->ring_base_vaddr) {
+		GSIERR("protocol %u requires ring base VA\n", props->prot);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->low_weight > GSI_MAX_CH_LOW_WEIGHT) {
+		GSIERR("invalid channel low weight %u\n", props->low_weight);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->prot == GSI_CHAN_PROT_GPI && !props->xfer_cb) {
+		GSIERR("xfer callback must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (!props->err_cb) {
+		GSIERR("err callback must be provided\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	return GSI_STATUS_SUCCESS;
+}
+
+int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
+		unsigned long *chan_hdl)
+{
+	struct gsi_chan_ctx *ctx;
+	uint32_t val;
+	int res;
+	int ee = gsi_ctx->per.ee;
+	enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
+	uint8_t erindex;
+	void **user_data;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !chan_hdl || dev_hdl != (uintptr_t)gsi_ctx) {
+		GSIERR("bad params props=%p dev_hdl=0x%lx chan_hdl=%p\n",
+				props, dev_hdl, chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_validate_channel_props(props)) {
+		GSIERR("bad params\n");
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (props->evt_ring_hdl != ~0 &&
+		atomic_read(&gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
+		gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive) {
+		GSIERR("evt ring=%lu already in exclusive use chan_hdl=%p\n",
+				props->evt_ring_hdl, chan_hdl);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+
+	ctx = &gsi_ctx->chan[props->ch_id];
+	if (ctx->allocated) {
+		GSIERR("chan %d already allocated\n", props->ch_id);
+		return -GSI_STATUS_NODEV;
+	}
+
+	memset(ctx, 0, sizeof(*ctx));
+	user_data = devm_kzalloc(gsi_ctx->dev,
+		(props->ring_len / props->re_size) * sizeof(void *),
+		GFP_KERNEL);
+	if (user_data == NULL) {
+		GSIERR("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+
+	mutex_init(&ctx->mlock);
+	init_completion(&ctx->compl);
+	atomic_set(&ctx->poll_mode, GSI_CHAN_MODE_CALLBACK);
+	ctx->props = *props;
+
+	mutex_lock(&gsi_ctx->mlock);
+	gsi_ctx->ch_dbg[props->ch_id].ch_allocate++;
+	val = (((props->ch_id << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+				GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+			 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("chan_hdl=%u timed out\n", props->ch_id);
+		mutex_unlock(&gsi_ctx->mlock);
+		devm_kfree(gsi_ctx->dev, user_data);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("chan_hdl=%u allocation failed state=%d\n",
+				props->ch_id, ctx->state);
+		mutex_unlock(&gsi_ctx->mlock);
+		devm_kfree(gsi_ctx->dev, user_data);
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+	mutex_unlock(&gsi_ctx->mlock);
+
+	erindex = props->evt_ring_hdl != ~0 ? props->evt_ring_hdl :
+		GSI_NO_EVT_ERINDEX;
+	if (erindex != GSI_NO_EVT_ERINDEX) {
+		ctx->evtr = &gsi_ctx->evtr[erindex];
+		atomic_inc(&ctx->evtr->chan_ref_cnt);
+		if (ctx->evtr->props.exclusive)
+			ctx->evtr->chan = ctx;
+	}
+
+	gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
+
+	spin_lock_init(&ctx->ring.slock);
+	gsi_init_chan_ring(props, &ctx->ring);
+	if (!props->max_re_expected)
+		ctx->props.max_re_expected = ctx->ring.max_num_elem;
+	ctx->user_data = user_data;
+	*chan_hdl = props->ch_id;
+	ctx->allocated = true;
+	ctx->stats.dp.last_timestamp = jiffies_to_msecs(jiffies);
+	atomic_inc(&gsi_ctx->num_chan);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_alloc_channel);
+
+static void __gsi_write_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch val)
+{
+	uint32_t reg;
+
+	gsi_writel(val.data.word1, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	gsi_writel(val.data.word2, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	gsi_writel(val.data.word3, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	/* below sequence is not atomic. assumption is sequencer specific fields
+	 * will remain unchanged across this sequence
+	 */
+	reg = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+	reg &= 0xFFFF;
+	reg |= (val.data.word4 & 0xFFFF0000);
+	gsi_writel(reg, gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(chan_hdl,
+			gsi_ctx->per.ee));
+}
+
+int gsi_write_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch val)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_ALLOCATED &&
+		gsi_ctx->chan[chan_hdl].state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	mutex_lock(&ctx->mlock);
+	ctx->scratch = val;
+	__gsi_write_channel_scratch(chan_hdl, val);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_write_channel_scratch);
+
+int gsi_query_channel_db_addr(unsigned long chan_hdl,
+		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
+{
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!db_addr_wp_msb || !db_addr_wp_lsb) {
+		GSIERR("bad params msb=%p lsb=%p\n", db_addr_wp_msb,
+				db_addr_wp_lsb);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (gsi_ctx->chan[chan_hdl].state == GSI_CHAN_STATE_NOT_ALLOCATED) {
+		GSIERR("bad state %d\n",
+				gsi_ctx->chan[chan_hdl].state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	*db_addr_wp_lsb = gsi_ctx->per.phys_addr +
+		GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(chan_hdl, gsi_ctx->per.ee);
+	*db_addr_wp_msb = gsi_ctx->per.phys_addr +
+		GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(chan_hdl, gsi_ctx->per.ee);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_query_channel_db_addr);
+
+int gsi_start_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_START;
+	int res;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
+		ctx->state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	init_completion(&ctx->compl);
+
+	gsi_ctx->ch_dbg[chan_hdl].ch_start++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+		 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+	if (ctx->state != GSI_CHAN_STATE_STARTED) {
+		GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
+		BUG();
+	}
+
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_start_channel);
+
+int gsi_stop_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_STOP;
+	int res;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state == GSI_CHAN_STATE_STOPPED) {
+		GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
+		return GSI_STATUS_SUCCESS;
+	}
+
+	if (ctx->state != GSI_CHAN_STATE_STARTED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC &&
+		ctx->state != GSI_CHAN_STATE_ERROR) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	init_completion(&ctx->compl);
+
+	gsi_ctx->ch_dbg[chan_hdl].ch_stop++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+		 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl,
+			msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
+	if (res == 0) {
+		GSIDBG("chan_hdl=%lu timed out\n", chan_hdl);
+		res = -GSI_STATUS_TIMED_OUT;
+		goto free_lock;
+	}
+
+	if (ctx->state != GSI_CHAN_STATE_STOPPED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
+		res = -GSI_STATUS_BAD_STATE;
+		goto free_lock;
+	}
+
+	if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("chan=%lu busy try again\n", chan_hdl);
+		res = -GSI_STATUS_AGAIN;
+		goto free_lock;
+	}
+
+	res = GSI_STATUS_SUCCESS;
+
+free_lock:
+	mutex_unlock(&gsi_ctx->mlock);
+	return res;
+}
+EXPORT_SYMBOL(gsi_stop_channel);
+
+int gsi_stop_db_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_DB_STOP;
+	int res;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state == GSI_CHAN_STATE_STOPPED) {
+		GSIDBG("chan_hdl=%lu already stopped\n", chan_hdl);
+		return GSI_STATUS_SUCCESS;
+	}
+
+	if (ctx->state != GSI_CHAN_STATE_STARTED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	init_completion(&ctx->compl);
+
+	gsi_ctx->ch_dbg[chan_hdl].ch_db_stop++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+		 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl,
+			msecs_to_jiffies(GSI_STOP_CMD_TIMEOUT_MS));
+	if (res == 0) {
+		GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
+		res = -GSI_STATUS_TIMED_OUT;
+		goto free_lock;
+	}
+
+	if (ctx->state != GSI_CHAN_STATE_STOPPED &&
+		ctx->state != GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("chan=%lu unexpected state=%u\n", chan_hdl, ctx->state);
+		res = -GSI_STATUS_BAD_STATE;
+		goto free_lock;
+	}
+
+	if (ctx->state == GSI_CHAN_STATE_STOP_IN_PROC) {
+		GSIERR("chan=%lu busy try again\n", chan_hdl);
+		res = -GSI_STATUS_AGAIN;
+		goto free_lock;
+	}
+
+	res = GSI_STATUS_SUCCESS;
+
+free_lock:
+	mutex_unlock(&gsi_ctx->mlock);
+	return res;
+}
+EXPORT_SYMBOL(gsi_stop_db_channel);
+
+int gsi_reset_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_RESET;
+	int res;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+	bool reset_done = false;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state != GSI_CHAN_STATE_STOPPED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+
+reset:
+	init_completion(&ctx->compl);
+	gsi_ctx->ch_dbg[chan_hdl].ch_reset++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+		 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
+				ctx->state);
+		BUG();
+	}
+
+	/* workaround: reset GSI producers again */
+	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI && !reset_done) {
+		usleep_range(GSI_RESET_WA_MIN_SLEEP, GSI_RESET_WA_MAX_SLEEP);
+		reset_done = true;
+		goto reset;
+	}
+
+	gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
+			ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
+	gsi_init_chan_ring(&ctx->props, &ctx->ring);
+
+	/* restore scratch */
+	__gsi_write_channel_scratch(chan_hdl, ctx->scratch);
+
+	mutex_unlock(&gsi_ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_reset_channel);
+
+int gsi_dealloc_channel(unsigned long chan_hdl)
+{
+	enum gsi_ch_cmd_opcode op = GSI_CH_DE_ALLOC;
+	int res;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&gsi_ctx->mlock);
+	init_completion(&ctx->compl);
+
+	gsi_ctx->ch_dbg[chan_hdl].ch_de_alloc++;
+	val = (((chan_hdl << GSI_EE_n_GSI_CH_CMD_CHID_SHFT) &
+			GSI_EE_n_GSI_CH_CMD_CHID_BMSK) |
+		((op << GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT) &
+		 GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK));
+	gsi_writel(val, gsi_ctx->base +
+			GSI_EE_n_GSI_CH_CMD_OFFS(gsi_ctx->per.ee));
+	res = wait_for_completion_timeout(&ctx->compl, GSI_CMD_TIMEOUT);
+	if (res == 0) {
+		GSIERR("chan_hdl=%lu timed out\n", chan_hdl);
+		mutex_unlock(&gsi_ctx->mlock);
+		return -GSI_STATUS_TIMED_OUT;
+	}
+	if (ctx->state != GSI_CHAN_STATE_NOT_ALLOCATED) {
+		GSIERR("chan_hdl=%lu unexpected state=%u\n", chan_hdl,
+				ctx->state);
+		BUG();
+	}
+
+	mutex_unlock(&gsi_ctx->mlock);
+
+	devm_kfree(gsi_ctx->dev, ctx->user_data);
+	ctx->allocated = false;
+	if (ctx->evtr)
+		atomic_dec(&ctx->evtr->chan_ref_cnt);
+	atomic_dec(&gsi_ctx->num_chan);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_dealloc_channel);
+
+void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used)
+{
+	unsigned long now = jiffies_to_msecs(jiffies);
+	unsigned long elapsed;
+
+	if (used == 0) {
+		elapsed = now - ctx->stats.dp.last_timestamp;
+		if (ctx->stats.dp.empty_time < elapsed)
+			ctx->stats.dp.empty_time = elapsed;
+	}
+
+	if (used <= ctx->props.max_re_expected / 3)
+		++ctx->stats.dp.ch_below_lo;
+	else if (used <= 2 * ctx->props.max_re_expected / 3)
+		++ctx->stats.dp.ch_below_hi;
+	else
+		++ctx->stats.dp.ch_above_hi;
+	ctx->stats.dp.last_timestamp = now;
+}
+
+static void __gsi_query_channel_free_re(struct gsi_chan_ctx *ctx,
+		uint16_t *num_free_re)
+{
+	uint16_t start;
+	uint16_t start_hw;
+	uint16_t end;
+	uint64_t rp;
+	uint64_t rp_hw;
+	int ee = gsi_ctx->per.ee;
+	uint16_t used;
+	uint16_t used_hw;
+
+	rp_hw = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+	rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee)))
+		<< 32;
+
+	if (!ctx->evtr) {
+		rp = rp_hw;
+		ctx->ring.rp = rp;
+	} else {
+		rp = ctx->ring.rp_local;
+	}
+
+	start = gsi_find_idx_from_addr(&ctx->ring, rp);
+	start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw);
+	end = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
+
+	if (end >= start)
+		used = end - start;
+	else
+		used = ctx->ring.max_num_elem + 1 - (start - end);
+
+	if (end >= start_hw)
+		used_hw = end - start_hw;
+	else
+		used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end);
+
+	*num_free_re = ctx->ring.max_num_elem - used;
+	gsi_update_ch_dp_stats(ctx, used_hw);
+}
+
+int gsi_query_channel_info(unsigned long chan_hdl,
+		struct gsi_chan_info *info)
+{
+	struct gsi_chan_ctx *ctx;
+	spinlock_t *slock;
+	unsigned long flags;
+	uint64_t rp;
+	uint64_t wp;
+	int ee = gsi_ctx->per.ee;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch || !info) {
+		GSIERR("bad params chan_hdl=%lu info=%p\n", chan_hdl, info);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+	if (ctx->evtr) {
+		slock = &ctx->evtr->ring.slock;
+		info->evt_valid = true;
+	} else {
+		slock = &ctx->ring.slock;
+		info->evt_valid = false;
+	}
+
+	spin_lock_irqsave(slock, flags);
+
+	rp = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
+	ctx->ring.rp = rp;
+	info->rp = rp;
+
+	wp = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
+	wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
+	ctx->ring.wp = wp;
+	info->wp = wp;
+
+	if (info->evt_valid) {
+		rp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
+		rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee)))
+			<< 32;
+		info->evt_rp = rp;
+
+		wp = gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(ctx->evtr->id, ee));
+		wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+			GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(ctx->evtr->id, ee)))
+			<< 32;
+		info->evt_wp = wp;
+	}
+
+	spin_unlock_irqrestore(slock, flags);
+
+	GSIDBG("ch=%lu RP=0x%llx WP=0x%llx ev_valid=%d ERP=0x%llx EWP=0x%llx\n",
+			chan_hdl, info->rp, info->wp,
+			info->evt_valid, info->evt_rp, info->evt_wp);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_query_channel_info);
+
+int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
+{
+	struct gsi_chan_ctx *ctx;
+	spinlock_t *slock;
+	unsigned long flags;
+	uint64_t rp;
+	uint64_t wp;
+	int ee = gsi_ctx->per.ee;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
+		GSIERR("bad params chan_hdl=%lu is_empty=%p\n",
+				chan_hdl, is_empty);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->evtr)
+		slock = &ctx->evtr->ring.slock;
+	else
+		slock = &ctx->ring.slock;
+
+	spin_lock_irqsave(slock, flags);
+
+	rp = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee))) << 32;
+	ctx->ring.rp = rp;
+
+	wp = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
+	wp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee))) << 32;
+	ctx->ring.wp = wp;
+
+	if (ctx->props.dir == GSI_CHAN_DIR_FROM_GSI)
+		*is_empty = (ctx->ring.rp_local == rp) ? true : false;
+	else
+		*is_empty = (wp == rp) ? true : false;
+
+	spin_unlock_irqrestore(slock, flags);
+
+	GSIDBG("ch=%lu RP=0x%llx WP=0x%llx RP_LOCAL=0x%llx\n",
+			chan_hdl, rp, wp, ctx->ring.rp_local);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_is_channel_empty);
+
+int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
+		struct gsi_xfer_elem *xfer, bool ring_db)
+{
+	struct gsi_chan_ctx *ctx;
+	uint16_t free;
+	struct gsi_tre tre;
+	struct gsi_tre *tre_ptr;
+	uint16_t idx;
+	uint64_t wp_rollback;
+	int i;
+	spinlock_t *slock;
+	unsigned long flags;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) {
+		GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%p\n",
+				chan_hdl, num_xfers, xfer);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->evtr)
+		slock = &ctx->evtr->ring.slock;
+	else
+		slock = &ctx->ring.slock;
+
+	spin_lock_irqsave(slock, flags);
+	__gsi_query_channel_free_re(ctx, &free);
+
+	if (num_xfers > free) {
+		GSIERR("chan_hdl=%lu num_xfers=%u free=%u\n",
+				chan_hdl, num_xfers, free);
+		spin_unlock_irqrestore(slock, flags);
+		return -GSI_STATUS_RING_INSUFFICIENT_SPACE;
+	}
+
+	wp_rollback = ctx->ring.wp_local;
+	for (i = 0; i < num_xfers; i++) {
+		memset(&tre, 0, sizeof(tre));
+		tre.buffer_ptr = xfer[i].addr;
+		tre.buf_len = xfer[i].len;
+		if (xfer[i].type == GSI_XFER_ELEM_DATA) {
+			tre.re_type = GSI_RE_XFER;
+		} else if (xfer[i].type == GSI_XFER_ELEM_IMME_CMD) {
+			tre.re_type = GSI_RE_IMMD_CMD;
+		} else {
+			GSIERR("chan_hdl=%lu bad RE type=%u\n", chan_hdl,
+				xfer[i].type);
+			break;
+		}
+		tre.bei = (xfer[i].flags & GSI_XFER_FLAG_BEI) ? 1 : 0;
+		tre.ieot = (xfer[i].flags & GSI_XFER_FLAG_EOT) ? 1 : 0;
+		tre.ieob = (xfer[i].flags & GSI_XFER_FLAG_EOB) ? 1 : 0;
+		tre.chain = (xfer[i].flags & GSI_XFER_FLAG_CHAIN) ? 1 : 0;
+
+		idx = gsi_find_idx_from_addr(&ctx->ring, ctx->ring.wp_local);
+		tre_ptr = (struct gsi_tre *)(ctx->ring.base_va +
+				idx * ctx->ring.elem_sz);
+
+		/* write the TRE to ring */
+		*tre_ptr = tre;
+		ctx->user_data[idx] = xfer[i].xfer_user_data;
+		gsi_incr_ring_wp(&ctx->ring);
+	}
+
+	if (i != num_xfers) {
+		/* reject all the xfers */
+		ctx->ring.wp_local = wp_rollback;
+		spin_unlock_irqrestore(slock, flags);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx->stats.queued += num_xfers;
+
+	/* ensure TRE is set before ringing doorbell */
+	wmb();
+
+	if (ring_db)
+		gsi_ring_chan_doorbell(ctx);
+
+	spin_unlock_irqrestore(slock, flags);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_queue_xfer);
+
+int gsi_start_xfer(unsigned long chan_hdl)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->state != GSI_CHAN_STATE_STARTED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	gsi_ring_chan_doorbell(ctx);
+
+	return GSI_STATUS_SUCCESS;
+};
+EXPORT_SYMBOL(gsi_start_xfer);
+
+int gsi_poll_channel(unsigned long chan_hdl,
+		struct gsi_chan_xfer_notify *notify)
+{
+	struct gsi_chan_ctx *ctx;
+	uint64_t rp;
+	int ee = gsi_ctx->per.ee;
+	unsigned long flags;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch || !notify) {
+		GSIERR("bad params chan_hdl=%lu notify=%p\n", chan_hdl, notify);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (!ctx->evtr) {
+		GSIERR("no event ring associated chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	spin_lock_irqsave(&ctx->evtr->ring.slock, flags);
+	rp = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(ctx->evtr->id, ee));
+	rp |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(ctx->evtr->id, ee))) << 32;
+	ctx->evtr->ring.rp = rp;
+	if (rp == ctx->evtr->ring.rp_local) {
+		spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
+		ctx->stats.poll_empty++;
+		return GSI_STATUS_POLL_EMPTY;
+	}
+
+	gsi_process_evt_re(ctx->evtr, notify, false);
+	gsi_ring_evt_doorbell(ctx->evtr);
+	spin_unlock_irqrestore(&ctx->evtr->ring.slock, flags);
+	ctx->stats.poll_ok++;
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_poll_channel);
+
+int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
+{
+	struct gsi_chan_ctx *ctx;
+	enum gsi_chan_mode curr;
+	unsigned long flags;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
+		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (!ctx->evtr || !ctx->evtr->props.exclusive) {
+		GSIERR("cannot configure mode on chan_hdl=%lu\n",
+				chan_hdl);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (atomic_read(&ctx->poll_mode))
+		curr = GSI_CHAN_MODE_POLL;
+	else
+		curr = GSI_CHAN_MODE_CALLBACK;
+
+	if (mode == curr) {
+		GSIERR("already in requested mode %u chan_hdl=%lu\n",
+				curr, chan_hdl);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	spin_lock_irqsave(&gsi_ctx->slock, flags);
+	if (curr == GSI_CHAN_MODE_CALLBACK &&
+			mode == GSI_CHAN_MODE_POLL) {
+		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, 0);
+		ctx->stats.callback_to_poll++;
+	}
+
+	if (curr == GSI_CHAN_MODE_POLL &&
+			mode == GSI_CHAN_MODE_CALLBACK) {
+		__gsi_config_ieob_irq(gsi_ctx->per.ee, 1 << ctx->evtr->id, ~0);
+		ctx->stats.poll_to_callback++;
+	}
+	atomic_set(&ctx->poll_mode, mode);
+	spin_unlock_irqrestore(&gsi_ctx->slock, flags);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_config_channel_mode);
+
+int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
+		union gsi_channel_scratch *scr)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || !scr) {
+		GSIERR("bad params props=%p scr=%p\n", props, scr);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state == GSI_CHAN_STATE_NOT_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	*props = ctx->props;
+	*scr = ctx->scratch;
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_get_channel_cfg);
+
+int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
+		union gsi_channel_scratch *scr)
+{
+	struct gsi_chan_ctx *ctx;
+
+	if (!gsi_ctx) {
+		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+		return -GSI_STATUS_NODEV;
+	}
+
+	if (!props || gsi_validate_channel_props(props)) {
+		GSIERR("bad params props=%p\n", props);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	if (chan_hdl >= gsi_ctx->max_ch) {
+		GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
+		return -GSI_STATUS_INVALID_PARAMS;
+	}
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	if (ctx->props.ch_id != props->ch_id ||
+		ctx->props.evt_ring_hdl != props->evt_ring_hdl) {
+		GSIERR("changing immutable fields not supported\n");
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	mutex_lock(&ctx->mlock);
+	ctx->props = *props;
+	if (scr)
+		ctx->scratch = *scr;
+	gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
+			ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
+	gsi_init_chan_ring(&ctx->props, &ctx->ring);
+
+	/* restore scratch */
+	__gsi_write_channel_scratch(chan_hdl, ctx->scratch);
+	mutex_unlock(&ctx->mlock);
+
+	return GSI_STATUS_SUCCESS;
+}
+EXPORT_SYMBOL(gsi_set_channel_cfg);
+
+static void gsi_configure_ieps(void *base)
+{
+	void __iomem *gsi_base = (void __iomem *)base;
+
+	gsi_writel(1, gsi_base + GSI_GSI_IRAM_PTR_CH_CMD_OFFS);
+	gsi_writel(2, gsi_base + GSI_GSI_IRAM_PTR_CH_DB_OFFS);
+	gsi_writel(3, gsi_base + GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS);
+	gsi_writel(4, gsi_base + GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS);
+	gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
+	gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
+	gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
+	gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
+	gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
+	gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
+	gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
+	gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
+	gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
+}
+
+static void gsi_configure_bck_prs_matrix(void *base)
+{
+	void __iomem *gsi_base = (void __iomem *)base;
+	/*
+	 * For now, these are default values. In the future, GSI FW image will
+	 * produce optimized back-pressure values based on the FW image.
+	 */
+	gsi_writel(0xfffffffe,
+		gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff,
+		gsi_base + GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffbf, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffefff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffefff,
+		gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff,
+		gsi_base + GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS);
+	gsi_writel(0x00000000,
+		gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
+	gsi_writel(0x00000000,
+		gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_READ_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffefff, gsi_base + GSI_IC_READ_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffff, gsi_base + GSI_IC_WRITE_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xffffdfff, gsi_base + GSI_IC_WRITE_BCK_PRS_MSB_OFFS);
+	gsi_writel(0xffffffff,
+		gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS);
+	gsi_writel(0xff03ffff,
+		gsi_base + GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS);
+}
+
+int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
+		phys_addr_t per_base_addr)
+{
+	void __iomem *gsi_base;
+
+	gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
+	if (!gsi_base) {
+		GSIERR("ioremap failed for 0x%pa\n", &gsi_base_addr);
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+	gsi_writel(0, gsi_base + GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS);
+	gsi_writel(per_base_addr,
+			gsi_base + GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS);
+	gsi_configure_bck_prs_matrix((void *)gsi_base);
+	gsi_configure_ieps((void *)gsi_base);
+	iounmap(gsi_base);
+
+	return 0;
+}
+EXPORT_SYMBOL(gsi_configure_regs);
+
+int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
+{
+	void __iomem *gsi_base;
+	uint32_t value;
+
+	gsi_base = ioremap_nocache(gsi_base_addr, gsi_size);
+	if (!gsi_base) {
+		GSIERR("ioremap failed for 0x%pa\n", &gsi_base_addr);
+		return -GSI_STATUS_RES_ALLOC_FAILURE;
+	}
+
+	/* Enable the MCS and set to x2 clocks */
+	if (gsi_ctx->per.ver >= GSI_VER_1_2) {
+		value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
+				GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
+		gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
+
+		value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+				GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+			((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+				GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+			((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+				GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+			((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+				GSI_GSI_CFG_UC_IS_MCS_BMSK) |
+			((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
+				GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
+			((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
+				GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
+		gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+	} else {
+		value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+				GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+			((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+				GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+			((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+				GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+			((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+				GSI_GSI_CFG_UC_IS_MCS_BMSK));
+		gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+	}
+
+	iounmap(gsi_base);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(gsi_enable_fw);
+
+static int msm_gsi_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+
+	pr_debug("gsi_probe\n");
+	gsi_ctx = devm_kzalloc(dev, sizeof(*gsi_ctx), GFP_KERNEL);
+	if (!gsi_ctx) {
+		dev_err(dev, "failed to allocated gsi context\n");
+		return -ENOMEM;
+	}
+
+	gsi_ctx->dev = dev;
+	gsi_debugfs_init();
+
+	return 0;
+}
+
+static struct platform_driver msm_gsi_driver = {
+	.probe          = msm_gsi_probe,
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= "gsi",
+		.of_match_table = msm_gsi_match,
+	},
+};
+
+/**
+ * Module Init.
+ */
+static int __init gsi_init(void)
+{
+	pr_debug("gsi_init\n");
+	return platform_driver_register(&msm_gsi_driver);
+}
+
+arch_initcall(gsi_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Generic Software Interface (GSI)");
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
new file mode 100644
index 0000000..750ae2b
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -0,0 +1,235 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef GSI_H
+#define GSI_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/msm_gsi.h>
+
+#define GSI_CHAN_MAX      31
+#define GSI_EVT_RING_MAX  23
+#define GSI_NO_EVT_ERINDEX 31
+
+#define gsi_readl(c)	({ u32 __v = readl_relaxed(c); __iormb(); __v; })
+#define gsi_writel(v, c)	({ __iowmb(); writel_relaxed((v), (c)); })
+
+#define GSIERR(fmt, args...) \
+		dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, ## args)
+#define GSIDBG(fmt, args...) \
+		dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, ## args)
+
+enum gsi_evt_ring_state {
+	GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
+	GSI_EVT_RING_STATE_ALLOCATED = 0x1,
+	GSI_EVT_RING_STATE_ERROR = 0xf
+};
+
+enum gsi_chan_state {
+	GSI_CHAN_STATE_NOT_ALLOCATED = 0x0,
+	GSI_CHAN_STATE_ALLOCATED = 0x1,
+	GSI_CHAN_STATE_STARTED = 0x2,
+	GSI_CHAN_STATE_STOPPED = 0x3,
+	GSI_CHAN_STATE_STOP_IN_PROC = 0x4,
+	GSI_CHAN_STATE_ERROR = 0xf
+};
+
+struct gsi_ring_ctx {
+	spinlock_t slock;
+	unsigned long base_va;
+	uint64_t base;
+	uint64_t wp;
+	uint64_t rp;
+	uint64_t wp_local;
+	uint64_t rp_local;
+	uint16_t len;
+	uint8_t elem_sz;
+	uint16_t max_num_elem;
+	uint64_t end;
+};
+
+struct gsi_chan_dp_stats {
+	unsigned long ch_below_lo;
+	unsigned long ch_below_hi;
+	unsigned long ch_above_hi;
+	unsigned long empty_time;
+	unsigned long last_timestamp;
+};
+
+struct gsi_chan_stats {
+	unsigned long queued;
+	unsigned long completed;
+	unsigned long callback_to_poll;
+	unsigned long poll_to_callback;
+	unsigned long invalid_tre_error;
+	unsigned long poll_ok;
+	unsigned long poll_empty;
+	struct gsi_chan_dp_stats dp;
+};
+
+struct gsi_chan_ctx {
+	struct gsi_chan_props props;
+	enum gsi_chan_state state;
+	struct gsi_ring_ctx ring;
+	void **user_data;
+	struct gsi_evt_ctx *evtr;
+	struct mutex mlock;
+	struct completion compl;
+	bool allocated;
+	atomic_t poll_mode;
+	union __packed gsi_channel_scratch scratch;
+	struct gsi_chan_stats stats;
+	bool enable_dp_stats;
+	bool print_dp_stats;
+};
+
+struct gsi_evt_stats {
+	unsigned long completed;
+};
+
+struct gsi_evt_ctx {
+	struct gsi_evt_ring_props props;
+	enum gsi_evt_ring_state state;
+	uint8_t id;
+	struct gsi_ring_ctx ring;
+	struct mutex mlock;
+	struct completion compl;
+	struct gsi_chan_ctx *chan;
+	atomic_t chan_ref_cnt;
+	union __packed gsi_evt_scratch scratch;
+	struct gsi_evt_stats stats;
+};
+
+struct gsi_ee_scratch {
+	union __packed {
+		struct {
+			uint32_t resvd1:15;
+			uint32_t max_usb_pkt_size:1;
+			uint32_t resvd2:8;
+			uint32_t mhi_base_chan_idx:8;
+		} s;
+		uint32_t val;
+	} word0;
+	uint32_t word1;
+};
+
+struct ch_debug_stats {
+	unsigned long ch_allocate;
+	unsigned long ch_start;
+	unsigned long ch_stop;
+	unsigned long ch_reset;
+	unsigned long ch_de_alloc;
+	unsigned long ch_db_stop;
+	unsigned long cmd_completed;
+};
+
+struct gsi_ctx {
+	void __iomem *base;
+	struct device *dev;
+	struct gsi_per_props per;
+	bool per_registered;
+	struct gsi_chan_ctx chan[GSI_CHAN_MAX];
+	struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
+	struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
+	struct mutex mlock;
+	spinlock_t slock;
+	unsigned long evt_bmap;
+	bool enabled;
+	atomic_t num_chan;
+	atomic_t num_evt_ring;
+	struct gsi_ee_scratch scratch;
+	int num_ch_dp_stats;
+	struct workqueue_struct *dp_stat_wq;
+	u32 max_ch;
+	u32 max_ev;
+};
+
+enum gsi_re_type {
+	GSI_RE_XFER = 0x2,
+	GSI_RE_IMMD_CMD = 0x3,
+};
+
+struct __packed gsi_tre {
+	uint64_t buffer_ptr;
+	uint16_t buf_len;
+	uint16_t resvd1;
+	uint16_t chain:1;
+	uint16_t resvd4:7;
+	uint16_t ieob:1;
+	uint16_t ieot:1;
+	uint16_t bei:1;
+	uint16_t resvd3:5;
+	uint8_t re_type;
+	uint8_t resvd2;
+};
+
+struct __packed gsi_xfer_compl_evt {
+	uint64_t xfer_ptr;
+	uint16_t len;
+	uint8_t resvd1;
+	uint8_t code;  /* see gsi_chan_evt */
+	uint16_t resvd;
+	uint8_t type;
+	uint8_t chid;
+};
+
+enum gsi_err_type {
+	GSI_ERR_TYPE_GLOB = 0x1,
+	GSI_ERR_TYPE_CHAN = 0x2,
+	GSI_ERR_TYPE_EVT = 0x3,
+};
+
+enum gsi_err_code {
+	GSI_INVALID_TRE_ERR = 0x1,
+	GSI_OUT_OF_BUFFERS_ERR = 0x2,
+	GSI_OUT_OF_RESOURCES_ERR = 0x3,
+	GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
+	GSI_EVT_RING_EMPTY_ERR = 0x5,
+	GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
+	GSI_HWO_1_ERR = 0x8
+};
+
+struct __packed gsi_log_err {
+	uint32_t arg3:4;
+	uint32_t arg2:4;
+	uint32_t arg1:4;
+	uint32_t code:4;
+	uint32_t resvd:3;
+	uint32_t virt_idx:5;
+	uint32_t err_type:4;
+	uint32_t ee:4;
+};
+
+enum gsi_ch_cmd_opcode {
+	GSI_CH_ALLOCATE = 0x0,
+	GSI_CH_START = 0x1,
+	GSI_CH_STOP = 0x2,
+	GSI_CH_RESET = 0x9,
+	GSI_CH_DE_ALLOC = 0xa,
+	GSI_CH_DB_STOP = 0xb,
+};
+
+enum gsi_evt_ch_cmd_opcode {
+	GSI_EVT_ALLOCATE = 0x0,
+	GSI_EVT_RESET = 0x9,  /* TODO: is this valid? */
+	GSI_EVT_DE_ALLOC = 0xa,
+};
+
+extern struct gsi_ctx *gsi_ctx;
+void gsi_debugfs_init(void);
+uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
+void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used);
+
+#endif
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
new file mode 100644
index 0000000..5eb9084
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -0,0 +1,869 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/dma-mapping.h>
+#include <linux/random.h>
+#include <linux/uaccess.h>
+#include <linux/msm_gsi.h>
+#include "gsi_reg.h"
+#include "gsi.h"
+
+#define TERR(fmt, args...) \
+		pr_err("%s:%d " fmt, __func__, __LINE__, ## args)
+#define TDBG(fmt, args...) \
+		pr_debug("%s:%d " fmt, __func__, __LINE__, ## args)
+#define PRT_STAT(fmt, args...) \
+		pr_err(fmt, ## args)
+
+static struct dentry *dent;
+static char dbg_buff[4096];
+
+static void gsi_wq_print_dp_stats(struct work_struct *work);
+static DECLARE_DELAYED_WORK(gsi_print_dp_stats_work, gsi_wq_print_dp_stats);
+static void gsi_wq_update_dp_stats(struct work_struct *work);
+static DECLARE_DELAYED_WORK(gsi_update_dp_stats_work, gsi_wq_update_dp_stats);
+
+static ssize_t gsi_dump_evt(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u32 arg1;
+	u32 arg2;
+	unsigned long missing;
+	char *sptr, *token;
+	uint32_t val;
+	struct gsi_evt_ctx *ctx;
+	uint16_t i;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &arg1))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &arg2))
+		return -EINVAL;
+
+	TDBG("arg1=%u arg2=%u\n", arg1, arg2);
+
+	if (arg1 >= gsi_ctx->max_ev) {
+		TERR("invalid evt ring id %u\n", arg1);
+		return -EFAULT;
+	}
+
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX0  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX1  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX2  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX3  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX4  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX5  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX6  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX7  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX8  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX9  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX10 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX11 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX12 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d CTX13 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d SCR0  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("EV%2d SCR1  0x%x\n", arg1, val);
+
+	if (arg2) {
+		ctx = &gsi_ctx->evtr[arg1];
+
+		if (ctx->props.ring_base_vaddr) {
+			for (i = 0; i < ctx->props.ring_len / 16; i++)
+				TERR("EV%2d (0x%08llx) %08x %08x %08x %08x\n",
+				arg1, ctx->props.ring_base_addr + i * 16,
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 0),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 4),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 8),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 12));
+		} else {
+			TERR("No VA supplied for event ring id %u\n", arg1);
+		}
+	}
+
+	return count;
+}
+
+static ssize_t gsi_dump_ch(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u32 arg1;
+	u32 arg2;
+	unsigned long missing;
+	char *sptr, *token;
+	uint32_t val;
+	struct gsi_chan_ctx *ctx;
+	uint16_t i;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &arg1))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &arg2))
+		return -EINVAL;
+
+	TDBG("arg1=%u arg2=%u\n", arg1, arg2);
+
+	if (arg1 >= gsi_ctx->max_ch) {
+		TERR("invalid chan id %u\n", arg1);
+		return -EFAULT;
+	}
+
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX0  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX1  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX2  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX3  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX4  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX5  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX6  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d CTX7  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(arg1,
+			gsi_ctx->per.ee));
+	TERR("CH%2d REFRP 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(arg1,
+			gsi_ctx->per.ee));
+	TERR("CH%2d REFWP 0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d QOS   0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d SCR0  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d SCR1  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d SCR2  0x%x\n", arg1, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(arg1, gsi_ctx->per.ee));
+	TERR("CH%2d SCR3  0x%x\n", arg1, val);
+
+	if (arg2) {
+		ctx = &gsi_ctx->chan[arg1];
+
+		if (ctx->props.ring_base_vaddr) {
+			for (i = 0; i < ctx->props.ring_len / 16; i++)
+				TERR("CH%2d (0x%08llx) %08x %08x %08x %08x\n",
+				arg1, ctx->props.ring_base_addr + i * 16,
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 0),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 4),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 8),
+				*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
+					i * 16 + 12));
+		} else {
+			TERR("No VA supplied for chan id %u\n", arg1);
+		}
+	}
+
+	return count;
+}
+
+static ssize_t gsi_dump_ee(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	uint32_t val;
+
+	val = gsi_readl(gsi_ctx->base +
+		GSI_GSI_MANAGER_EE_QOS_n_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d QOS 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d STATUS 0x%x\n", gsi_ctx->per.ee, val);
+	if (gsi_ctx->per.ver == GSI_VER_1_0) {
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+		TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
+	} else if (gsi_ctx->per.ver == GSI_VER_1_2) {
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+		TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+		TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+	} else if (gsi_ctx->per.ver == GSI_VER_1_3) {
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+		TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+		TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+		val = gsi_readl(gsi_ctx->base +
+			GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+		TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
+	} else {
+		WARN_ON(1);
+	}
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_SW_VERSION_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d SW_VERSION 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_MCS_CODE_VER_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d MCS_CODE_VER 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d TYPE_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d CH_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d EV_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d IEOB_IRQ_MSK 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d GLOB_IRQ_EN 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d GSI_IRQ_EN 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_INTSET_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d INTSET 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_MSI_BASE_LSB_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d MSI_BASE_LSB 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_MSI_BASE_MSB_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d MSI_BASE_MSB 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_INT_VEC_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d INT_VEC 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d SCR0 0x%x\n", gsi_ctx->per.ee, val);
+	val = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_CNTXT_SCRATCH_1_OFFS(gsi_ctx->per.ee));
+	TERR("EE%2d SCR1 0x%x\n", gsi_ctx->per.ee, val);
+
+	return count;
+}
+
+static ssize_t gsi_dump_map(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct gsi_chan_ctx *ctx;
+	uint32_t val1;
+	uint32_t val2;
+	int i;
+
+	TERR("EVT bitmap 0x%lx\n", gsi_ctx->evt_bmap);
+	for (i = 0; i < gsi_ctx->max_ch; i++) {
+		ctx = &gsi_ctx->chan[i];
+
+		if (ctx->allocated) {
+			TERR("VIRT CH%2d -> VIRT EV%2d\n", ctx->props.ch_id,
+				ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
+			val1 = gsi_readl(gsi_ctx->base +
+				GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(i,
+					gsi_ctx->per.ee));
+			TERR("VIRT CH%2d -> PHYS CH%2d\n", ctx->props.ch_id,
+				val1 &
+				GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK);
+			if (ctx->evtr) {
+				val2 = gsi_readl(gsi_ctx->base +
+				GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(
+					ctx->evtr->id, gsi_ctx->per.ee));
+				TERR("VRT EV%2d -> PHYS EV%2d\n", ctx->evtr->id,
+				val2 &
+				GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK);
+			}
+			TERR("\n");
+		}
+	}
+
+	return count;
+}
+
+static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx)
+{
+	if (!ctx->allocated)
+		return;
+
+	PRT_STAT("CH%2d:\n", ctx->props.ch_id);
+	PRT_STAT("queued=%lu compl=%lu\n",
+		ctx->stats.queued,
+		ctx->stats.completed);
+	PRT_STAT("cb->poll=%lu poll->cb=%lu\n",
+		ctx->stats.callback_to_poll,
+		ctx->stats.poll_to_callback);
+	PRT_STAT("invalid_tre_error=%lu\n",
+		ctx->stats.invalid_tre_error);
+	PRT_STAT("poll_ok=%lu poll_empty=%lu\n",
+		ctx->stats.poll_ok, ctx->stats.poll_empty);
+	if (ctx->evtr)
+		PRT_STAT("compl_evt=%lu\n",
+			ctx->evtr->stats.completed);
+
+	PRT_STAT("ch_below_lo=%lu\n", ctx->stats.dp.ch_below_lo);
+	PRT_STAT("ch_below_hi=%lu\n", ctx->stats.dp.ch_below_hi);
+	PRT_STAT("ch_above_hi=%lu\n", ctx->stats.dp.ch_above_hi);
+	PRT_STAT("time_empty=%lums\n", ctx->stats.dp.empty_time);
+	PRT_STAT("\n");
+}
+
+static ssize_t gsi_dump_stats(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	int ch_id;
+	int min, max;
+
+	if (sizeof(dbg_buff) < count + 1)
+		goto error;
+
+	if (copy_from_user(dbg_buff, buf, count))
+		goto error;
+
+	dbg_buff[count] = '\0';
+
+	if (kstrtos32(dbg_buff, 0, &ch_id))
+		goto error;
+
+	if (ch_id == -1) {
+		min = 0;
+		max = gsi_ctx->max_ch;
+	} else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
+		   !gsi_ctx->chan[ch_id].allocated) {
+		goto error;
+	} else {
+		min = ch_id;
+		max = ch_id + 1;
+	}
+
+	for (ch_id = min; ch_id < max; ch_id++)
+		gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
+
+	return count;
+error:
+	TERR("Usage: echo ch_id > stats. Use -1 for all\n");
+	return -EFAULT;
+}
+
+static int gsi_dbg_create_stats_wq(void)
+{
+	gsi_ctx->dp_stat_wq =
+		create_singlethread_workqueue("gsi_stat");
+	if (!gsi_ctx->dp_stat_wq) {
+		TERR("failed create workqueue\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void gsi_dbg_destroy_stats_wq(void)
+{
+	cancel_delayed_work_sync(&gsi_update_dp_stats_work);
+	cancel_delayed_work_sync(&gsi_print_dp_stats_work);
+	flush_workqueue(gsi_ctx->dp_stat_wq);
+	destroy_workqueue(gsi_ctx->dp_stat_wq);
+	gsi_ctx->dp_stat_wq = NULL;
+}
+
+static ssize_t gsi_enable_dp_stats(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	int ch_id;
+	bool enable;
+	int ret;
+
+	if (sizeof(dbg_buff) < count + 1)
+		goto error;
+
+	if (copy_from_user(dbg_buff, buf, count))
+		goto error;
+
+	dbg_buff[count] = '\0';
+
+	if (dbg_buff[0] != '+' && dbg_buff[0] != '-')
+		goto error;
+
+	enable = (dbg_buff[0] == '+');
+
+	if (kstrtos32(dbg_buff + 1, 0, &ch_id))
+		goto error;
+
+	if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
+	    !gsi_ctx->chan[ch_id].allocated) {
+		goto error;
+	}
+
+	if (gsi_ctx->chan[ch_id].props.prot == GSI_CHAN_PROT_GPI) {
+		TERR("valid for non GPI channels only\n");
+		goto error;
+	}
+
+	if (gsi_ctx->chan[ch_id].enable_dp_stats == enable) {
+		TERR("ch_%d: already enabled/disabled\n", ch_id);
+		return -EFAULT;
+	}
+	gsi_ctx->chan[ch_id].enable_dp_stats = enable;
+
+	if (enable)
+		gsi_ctx->num_ch_dp_stats++;
+	else
+		gsi_ctx->num_ch_dp_stats--;
+
+	if (enable) {
+		if (gsi_ctx->num_ch_dp_stats == 1) {
+			ret = gsi_dbg_create_stats_wq();
+			if (ret)
+				return ret;
+		}
+		cancel_delayed_work_sync(&gsi_update_dp_stats_work);
+		queue_delayed_work(gsi_ctx->dp_stat_wq,
+			&gsi_update_dp_stats_work, msecs_to_jiffies(10));
+	} else if (!enable && gsi_ctx->num_ch_dp_stats == 0) {
+		gsi_dbg_destroy_stats_wq();
+	}
+
+	return count;
+error:
+	TERR("Usage: echo [+-]ch_id > enable_dp_stats\n");
+	return -EFAULT;
+}
+
+static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	u32 ch_id;
+	u32 max_elem;
+	unsigned long missing;
+	char *sptr, *token;
+
+
+	if (sizeof(dbg_buff) < count + 1)
+		goto error;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		goto error;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token) {
+		TERR("\n");
+		goto error;
+	}
+
+	if (kstrtou32(token, 0, &ch_id)) {
+		TERR("\n");
+		goto error;
+	}
+
+	token = strsep(&sptr, " ");
+	if (!token) {
+		/* get */
+		if (kstrtou32(dbg_buff, 0, &ch_id))
+			goto error;
+		if (ch_id >= gsi_ctx->max_ch)
+			goto error;
+		PRT_STAT("ch %d: max_re_expected=%d\n", ch_id,
+			gsi_ctx->chan[ch_id].props.max_re_expected);
+		return count;
+	}
+	if (kstrtou32(token, 0, &max_elem)) {
+		TERR("\n");
+		goto error;
+	}
+
+	TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem);
+
+	if (ch_id >= gsi_ctx->max_ch) {
+		TERR("invalid chan id %u\n", ch_id);
+		goto error;
+	}
+
+	gsi_ctx->chan[ch_id].props.max_re_expected = max_elem;
+
+	return count;
+
+error:
+	TERR("Usage: (set) echo <ch_id> <max_elem> > max_elem_dp_stats\n");
+	TERR("Usage: (get) echo <ch_id> > max_elem_dp_stats\n");
+	return -EFAULT;
+}
+
+static void gsi_wq_print_dp_stats(struct work_struct *work)
+{
+	int ch_id;
+
+	for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
+		if (gsi_ctx->chan[ch_id].print_dp_stats)
+			gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
+	}
+
+	queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_print_dp_stats_work,
+		msecs_to_jiffies(1000));
+}
+
+static void gsi_dbg_update_ch_dp_stats(struct gsi_chan_ctx *ctx)
+{
+	uint16_t start_hw;
+	uint16_t end_hw;
+	uint64_t rp_hw;
+	uint64_t wp_hw;
+	int ee = gsi_ctx->per.ee;
+	uint16_t used_hw;
+
+	rp_hw = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
+	rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee)))
+		<< 32;
+
+	wp_hw = gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
+	wp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
+		GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee)))
+		<< 32;
+
+	start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw);
+	end_hw = gsi_find_idx_from_addr(&ctx->ring, wp_hw);
+
+	if (end_hw >= start_hw)
+		used_hw = end_hw - start_hw;
+	else
+		used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end_hw);
+
+	TERR("ch %d used %d\n", ctx->props.ch_id, used_hw);
+	gsi_update_ch_dp_stats(ctx, used_hw);
+}
+
+static void gsi_wq_update_dp_stats(struct work_struct *work)
+{
+	int ch_id;
+
+	for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
+		if (gsi_ctx->chan[ch_id].allocated &&
+		    gsi_ctx->chan[ch_id].props.prot != GSI_CHAN_PROT_GPI &&
+		    gsi_ctx->chan[ch_id].enable_dp_stats)
+			gsi_dbg_update_ch_dp_stats(&gsi_ctx->chan[ch_id]);
+	}
+
+	queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_update_dp_stats_work,
+		msecs_to_jiffies(10));
+}
+
+
+static ssize_t gsi_rst_stats(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	int ch_id;
+	int min, max;
+
+	if (sizeof(dbg_buff) < count + 1)
+		goto error;
+
+	if (copy_from_user(dbg_buff, buf, count))
+		goto error;
+
+	dbg_buff[count] = '\0';
+
+	if (kstrtos32(dbg_buff, 0, &ch_id))
+		goto error;
+
+	if (ch_id == -1) {
+		min = 0;
+		max = gsi_ctx->max_ch;
+	} else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
+		   !gsi_ctx->chan[ch_id].allocated) {
+		goto error;
+	} else {
+		min = ch_id;
+		max = ch_id + 1;
+	}
+
+	for (ch_id = min; ch_id < max; ch_id++)
+		memset(&gsi_ctx->chan[ch_id].stats, 0,
+			sizeof(gsi_ctx->chan[ch_id].stats));
+
+	return count;
+error:
+	TERR("Usage: echo ch_id > rst_stats. Use -1 for all\n");
+	return -EFAULT;
+}
+
+static ssize_t gsi_print_dp_stats(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	int ch_id;
+	bool enable;
+	int ret;
+
+	if (sizeof(dbg_buff) < count + 1)
+		goto error;
+
+	if (copy_from_user(dbg_buff, buf, count))
+		goto error;
+
+	dbg_buff[count] = '\0';
+
+	if (dbg_buff[0] != '+' && dbg_buff[0] != '-')
+		goto error;
+
+	enable = (dbg_buff[0] == '+');
+
+	if (kstrtos32(dbg_buff + 1, 0, &ch_id))
+		goto error;
+
+	if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
+	    !gsi_ctx->chan[ch_id].allocated) {
+		goto error;
+	}
+
+	if (gsi_ctx->chan[ch_id].print_dp_stats == enable) {
+		TERR("ch_%d: already enabled/disabled\n", ch_id);
+		return -EFAULT;
+	}
+	gsi_ctx->chan[ch_id].print_dp_stats = enable;
+
+	if (enable)
+		gsi_ctx->num_ch_dp_stats++;
+	else
+		gsi_ctx->num_ch_dp_stats--;
+
+	if (enable) {
+		if (gsi_ctx->num_ch_dp_stats == 1) {
+			ret = gsi_dbg_create_stats_wq();
+			if (ret)
+				return ret;
+		}
+		cancel_delayed_work_sync(&gsi_print_dp_stats_work);
+		queue_delayed_work(gsi_ctx->dp_stat_wq,
+			&gsi_print_dp_stats_work, msecs_to_jiffies(10));
+	} else if (!enable && gsi_ctx->num_ch_dp_stats == 0) {
+		gsi_dbg_destroy_stats_wq();
+	}
+
+	return count;
+error:
+	TERR("Usage: echo [+-]ch_id > print_dp_stats\n");
+	return -EFAULT;
+}
+
+const struct file_operations gsi_ev_dump_ops = {
+	.write = gsi_dump_evt,
+};
+
+const struct file_operations gsi_ch_dump_ops = {
+	.write = gsi_dump_ch,
+};
+
+const struct file_operations gsi_ee_dump_ops = {
+	.write = gsi_dump_ee,
+};
+
+const struct file_operations gsi_map_ops = {
+	.write = gsi_dump_map,
+};
+
+const struct file_operations gsi_stats_ops = {
+	.write = gsi_dump_stats,
+};
+
+const struct file_operations gsi_enable_dp_stats_ops = {
+	.write = gsi_enable_dp_stats,
+};
+
+const struct file_operations gsi_max_elem_dp_stats_ops = {
+	.write = gsi_set_max_elem_dp_stats,
+};
+
+const struct file_operations gsi_rst_stats_ops = {
+	.write = gsi_rst_stats,
+};
+
+const struct file_operations gsi_print_dp_stats_ops = {
+	.write = gsi_print_dp_stats,
+};
+
+void gsi_debugfs_init(void)
+{
+	static struct dentry *dfile;
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+	const mode_t write_only_mode = S_IWUSR | S_IWGRP;
+
+	dent = debugfs_create_dir("gsi", 0);
+	if (IS_ERR(dent)) {
+		TERR("fail to create dir\n");
+		return;
+	}
+
+	dfile = debugfs_create_file("ev_dump", write_only_mode,
+			dent, 0, &gsi_ev_dump_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create ev_dump file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("ch_dump", write_only_mode,
+			dent, 0, &gsi_ch_dump_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create ch_dump file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("ee_dump", read_only_mode, dent,
+			0, &gsi_ee_dump_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create ee_dump file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("map", read_only_mode, dent,
+			0, &gsi_map_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create map file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("stats", write_only_mode, dent,
+			0, &gsi_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("enable_dp_stats", write_only_mode, dent,
+			0, &gsi_enable_dp_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("max_elem_dp_stats", write_only_mode,
+		dent, 0, &gsi_max_elem_dp_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("rst_stats", write_only_mode,
+		dent, 0, &gsi_rst_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	dfile = debugfs_create_file("print_dp_stats",
+		write_only_mode, dent, 0, &gsi_print_dp_stats_ops);
+	if (!dfile || IS_ERR(dfile)) {
+		TERR("fail to create stats file\n");
+		goto fail;
+	}
+
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+#else
+void gsi_debugfs_init(void)
+{
+}
+#endif
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
new file mode 100644
index 0000000..fa1e848
--- /dev/null
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -0,0 +1,1842 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __GSI_REG_H__
+#define __GSI_REG_H__
+
+#define GSI_GSI_REG_BASE_OFFS 0
+
+#define GSI_GSI_CFG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000000)
+#define GSI_GSI_CFG_RMSK 0xf
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
+#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8
+#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3
+#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
+#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT 0x2
+#define GSI_GSI_CFG_MCS_ENABLE_BMSK 0x2
+#define GSI_GSI_CFG_MCS_ENABLE_SHFT 0x1
+#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1
+#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0
+
+#define GSI_GSI_MCS_CFG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000B000)
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0
+
+#define GSI_GSI_MANAGER_MCS_CODE_VER_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000008)
+#define GSI_GSI_MANAGER_MCS_CODE_VER_RMSK 0xffffffff
+#define GSI_GSI_MANAGER_MCS_CODE_VER_VER_BMSK 0xffffffff
+#define GSI_GSI_MANAGER_MCS_CODE_VER_VER_SHFT 0x0
+
+#define GSI_GSI_ZEROS_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000010)
+#define GSI_GSI_ZEROS_RMSK 0xffffffff
+#define GSI_GSI_ZEROS_ZEROS_BMSK 0xffffffff
+#define GSI_GSI_ZEROS_ZEROS_SHFT 0x0
+
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000018)
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_RMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_BMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_LSB_BASE_ADDR_SHFT 0x0
+
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000001c)
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_RMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_BMSK 0xffffffff
+#define GSI_GSI_PERIPH_BASE_ADDR_MSB_BASE_ADDR_SHFT 0x0
+
+#define GSI_GSI_MOQA_CFG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000030)
+#define GSI_GSI_MOQA_CFG_RMSK 0xffffff
+#define GSI_GSI_MOQA_CFG_CLIENT_OOWR_BMSK 0xff0000
+#define GSI_GSI_MOQA_CFG_CLIENT_OOWR_SHFT 0x10
+#define GSI_GSI_MOQA_CFG_CLIENT_OORD_BMSK 0xff00
+#define GSI_GSI_MOQA_CFG_CLIENT_OORD_SHFT 0x8
+#define GSI_GSI_MOQA_CFG_CLIENT_REQ_PRIO_BMSK 0xff
+#define GSI_GSI_MOQA_CFG_CLIENT_REQ_PRIO_SHFT 0x0
+
+#define GSI_GSI_REE_CFG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000038)
+#define GSI_GSI_REE_CFG_RMSK 0xff01
+#define GSI_GSI_REE_CFG_MAX_BURST_SIZE_BMSK 0xff00
+#define GSI_GSI_REE_CFG_MAX_BURST_SIZE_SHFT 0x8
+#define GSI_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_BMSK 0x1
+#define GSI_GSI_REE_CFG_MOVE_TO_ESC_CLR_MODE_TRSH_SHFT 0x0
+
+#define GSI_GSI_SHRAM_WR_WRR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000050)
+#define GSI_GSI_SHRAM_WR_WRR_RMSK 0xffff
+#define GSI_GSI_SHRAM_WR_WRR_CLIENT3_WR_WEIGHT_BMSK 0xf000
+#define GSI_GSI_SHRAM_WR_WRR_CLIENT3_WR_WEIGHT_SHFT 0xc
+#define GSI_GSI_SHRAM_WR_WRR_CLIENT2_WR_WEIGHT_BMSK 0xf00
+#define GSI_GSI_SHRAM_WR_WRR_CLIENT2_WR_WEIGHT_SHFT 0x8
+#define GSI_GSI_SHRAM_WR_WRR_CLIENT1_WR_WEIGHT_BMSK 0xf0
+#define GSI_GSI_SHRAM_WR_WRR_CLIENT1_WR_WEIGHT_SHFT 0x4
+#define GSI_GSI_SHRAM_WR_WRR_CLIENT0_WR_WEIGHT_BMSK 0xf
+#define GSI_GSI_SHRAM_WR_WRR_CLIENT0_WR_WEIGHT_SHFT 0x0
+
+#define GSI_GSI_SHRAM_RD_WRR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000058)
+#define GSI_GSI_SHRAM_RD_WRR_RMSK 0xffffff
+#define GSI_GSI_SHRAM_RD_WRR_ACH_SHRAM_RD_WEIGHT_BMSK 0xf00000
+#define GSI_GSI_SHRAM_RD_WRR_ACH_SHRAM_RD_WEIGHT_SHFT 0x14
+#define GSI_GSI_SHRAM_RD_WRR_IE_SHRAM_RD_WEIGHT_BMSK 0xf0000
+#define GSI_GSI_SHRAM_RD_WRR_IE_SHRAM_RD_WEIGHT_SHFT 0x10
+#define GSI_GSI_SHRAM_RD_WRR_CSR_SHRAM_RD_WEIGHT_BMSK 0xf000
+#define GSI_GSI_SHRAM_RD_WRR_CSR_SHRAM_RD_WEIGHT_SHFT 0xc
+#define GSI_GSI_SHRAM_RD_WRR_RE_CNTXT_SHRAM_RD_WEIGHT_BMSK 0xf00
+#define GSI_GSI_SHRAM_RD_WRR_RE_CNTXT_SHRAM_RD_WEIGHT_SHFT 0x8
+#define GSI_GSI_SHRAM_RD_WRR_MCS_LD_SHRAM_RD_WEIGHT_BMSK 0xf0
+#define GSI_GSI_SHRAM_RD_WRR_MCS_LD_SHRAM_RD_WEIGHT_SHFT 0x4
+#define GSI_GSI_SHRAM_RD_WRR_EV_ENG_SHRAM_RD_WEIGHT_BMSK 0xf
+#define GSI_GSI_SHRAM_RD_WRR_EV_ENG_SHRAM_RD_WEIGHT_SHFT 0x0
+
+#define GSI_GSI_CGC_CTRL_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000060)
+#define GSI_GSI_CGC_CTRL_RMSK 0x3f
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_BMSK 0x800
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_SHFT 0xb
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_BMSK0x400
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_SHFT 0xa
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_BMSK0x200
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_SHFT 0x9
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_BMSK 0x100
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_SHFT 0x8
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_BMSK 0x80
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_SHFT 0x7
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_BMSK 0x40
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_SHFT 0x6
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_BMSK 0x20
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_SHFT 0x5
+#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_BMSK 0x10
+#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_SHFT 0x4
+#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_BMSK 0x8
+#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_SHFT 0x3
+#define GSI_GSI_CGC_CTRL_REGION_3_HW_CGC_EN_BMSK 0x4
+#define GSI_GSI_CGC_CTRL_REGION_3_HW_CGC_EN_SHFT 0x2
+#define GSI_GSI_CGC_CTRL_REGION_2_HW_CGC_EN_BMSK 0x2
+#define GSI_GSI_CGC_CTRL_REGION_2_HW_CGC_EN_SHFT 0x1
+#define GSI_GSI_CGC_CTRL_REGION_1_HW_CGC_EN_BMSK 0x1
+#define GSI_GSI_CGC_CTRL_REGION_1_HW_CGC_EN_SHFT 0x0
+
+#define GSI_GSI_MSI_CACHEATTR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000080)
+#define GSI_GSI_MSI_CACHEATTR_RMSK 0x3f
+#define GSI_GSI_MSI_CACHEATTR_AREQPRIORITY_BMSK 0x30
+#define GSI_GSI_MSI_CACHEATTR_AREQPRIORITY_SHFT 0x4
+#define GSI_GSI_MSI_CACHEATTR_ATRANSIENT_BMSK 0x8
+#define GSI_GSI_MSI_CACHEATTR_ATRANSIENT_SHFT 0x3
+#define GSI_GSI_MSI_CACHEATTR_ANOALLOCATE_BMSK 0x4
+#define GSI_GSI_MSI_CACHEATTR_ANOALLOCATE_SHFT 0x2
+#define GSI_GSI_MSI_CACHEATTR_AINNERSHARED_BMSK 0x2
+#define GSI_GSI_MSI_CACHEATTR_AINNERSHARED_SHFT 0x1
+#define GSI_GSI_MSI_CACHEATTR_ASHARED_BMSK 0x1
+#define GSI_GSI_MSI_CACHEATTR_ASHARED_SHFT 0x0
+
+#define GSI_GSI_EVENT_CACHEATTR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000084)
+#define GSI_GSI_EVENT_CACHEATTR_RMSK 0x3f
+#define GSI_GSI_EVENT_CACHEATTR_AREQPRIORITY_BMSK 0x30
+#define GSI_GSI_EVENT_CACHEATTR_AREQPRIORITY_SHFT 0x4
+#define GSI_GSI_EVENT_CACHEATTR_ATRANSIENT_BMSK 0x8
+#define GSI_GSI_EVENT_CACHEATTR_ATRANSIENT_SHFT 0x3
+#define GSI_GSI_EVENT_CACHEATTR_ANOALLOCATE_BMSK 0x4
+#define GSI_GSI_EVENT_CACHEATTR_ANOALLOCATE_SHFT 0x2
+#define GSI_GSI_EVENT_CACHEATTR_AINNERSHARED_BMSK 0x2
+#define GSI_GSI_EVENT_CACHEATTR_AINNERSHARED_SHFT 0x1
+#define GSI_GSI_EVENT_CACHEATTR_ASHARED_BMSK 0x1
+#define GSI_GSI_EVENT_CACHEATTR_ASHARED_SHFT 0x0
+
+#define GSI_GSI_DATA_CACHEATTR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000088)
+#define GSI_GSI_DATA_CACHEATTR_RMSK 0x3f
+#define GSI_GSI_DATA_CACHEATTR_AREQPRIORITY_BMSK 0x30
+#define GSI_GSI_DATA_CACHEATTR_AREQPRIORITY_SHFT 0x4
+#define GSI_GSI_DATA_CACHEATTR_ATRANSIENT_BMSK 0x8
+#define GSI_GSI_DATA_CACHEATTR_ATRANSIENT_SHFT 0x3
+#define GSI_GSI_DATA_CACHEATTR_ANOALLOCATE_BMSK 0x4
+#define GSI_GSI_DATA_CACHEATTR_ANOALLOCATE_SHFT 0x2
+#define GSI_GSI_DATA_CACHEATTR_AINNERSHARED_BMSK 0x2
+#define GSI_GSI_DATA_CACHEATTR_AINNERSHARED_SHFT 0x1
+#define GSI_GSI_DATA_CACHEATTR_ASHARED_BMSK 0x1
+#define GSI_GSI_DATA_CACHEATTR_ASHARED_SHFT 0x0
+
+#define GSI_GSI_TRE_CACHEATTR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000090)
+#define GSI_GSI_TRE_CACHEATTR_RMSK 0x3f
+#define GSI_GSI_TRE_CACHEATTR_AREQPRIORITY_BMSK 0x30
+#define GSI_GSI_TRE_CACHEATTR_AREQPRIORITY_SHFT 0x4
+#define GSI_GSI_TRE_CACHEATTR_ATRANSIENT_BMSK 0x8
+#define GSI_GSI_TRE_CACHEATTR_ATRANSIENT_SHFT 0x3
+#define GSI_GSI_TRE_CACHEATTR_ANOALLOCATE_BMSK 0x4
+#define GSI_GSI_TRE_CACHEATTR_ANOALLOCATE_SHFT 0x2
+#define GSI_GSI_TRE_CACHEATTR_AINNERSHARED_BMSK 0x2
+#define GSI_GSI_TRE_CACHEATTR_AINNERSHARED_SHFT 0x1
+#define GSI_GSI_TRE_CACHEATTR_ASHARED_BMSK 0x1
+#define GSI_GSI_TRE_CACHEATTR_ASHARED_SHFT 0x0
+
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000a0)
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000a4)
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_DISABLE_CHNL_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000a8)
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_GEN_EVNT_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000ac)
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_GEN_EVNT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000b0)
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_GEN_INT_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000b4)
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_GEN_INT_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000b8)
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000bc)
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_STOP_INT_MOD_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000c0)
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_PROCESS_DESC_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000c4)
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_PROCESS_DESC_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000c8)
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_TLV_STOP_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000cc)
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_TLV_STOP_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000d0)
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_TLV_RESET_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000d4)
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_TLV_RESET_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000d8)
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000dc)
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_READ_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000e0)
+#define GSI_IC_READ_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_READ_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_READ_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_READ_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_READ_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_READ_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_READ_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000e4)
+#define GSI_IC_READ_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_READ_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_READ_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_READ_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_READ_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_WRITE_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000e8)
+#define GSI_IC_WRITE_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_WRITE_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_WRITE_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_WRITE_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_WRITE_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_WRITE_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_WRITE_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000ec)
+#define GSI_IC_WRITE_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_WRITE_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_WRITE_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_WRITE_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_WRITE_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000f0)
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_RMSK 0x3ffc1047
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_BMSK 0x3f000000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_TLV_INT_SHFT 0x18
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_BMSK 0xfc0000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_CSR_INT_SHFT 0x12
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_BMSK 0x1000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_INT_END_INT_SHFT 0xc
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_BMSK 0x40
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_EV_ENG_INT_SHFT 0x6
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_BMSK 0x7
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_LSB_REE_INT_SHFT 0x0
+
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x000000f4)
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RMSK 0xfc3041
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_BMSK 0xfc0000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_UCONTROLLER_INT_SHFT 0x12
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_BMSK 0x3000
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_RD_WR_INT_SHFT 0xc
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_BMSK 0x40
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_DB_ENG_INT_SHFT 0x6
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_BMSK 0x1
+#define GSI_IC_UCONTROLLER_GPR_BCK_PRS_MSB_TIMER_INT_SHFT 0x0
+
+#define GSI_IC_INT_WEIGHT_REE_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000100)
+#define GSI_IC_INT_WEIGHT_REE_RMSK 0xfff
+#define GSI_IC_INT_WEIGHT_REE_CH_EMPTY_INT_WEIGHT_BMSK 0xf00
+#define GSI_IC_INT_WEIGHT_REE_CH_EMPTY_INT_WEIGHT_SHFT 0x8
+#define GSI_IC_INT_WEIGHT_REE_NEW_RE_INT_WEIGHT_BMSK 0xf0
+#define GSI_IC_INT_WEIGHT_REE_NEW_RE_INT_WEIGHT_SHFT 0x4
+#define GSI_IC_INT_WEIGHT_REE_STOP_CH_COMP_INT_WEIGHT_BMSK 0xf
+#define GSI_IC_INT_WEIGHT_REE_STOP_CH_COMP_INT_WEIGHT_SHFT 0x0
+
+#define GSI_IC_INT_WEIGHT_EVT_ENG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000104)
+#define GSI_IC_INT_WEIGHT_EVT_ENG_RMSK 0xf
+#define GSI_IC_INT_WEIGHT_EVT_ENG_EVNT_ENG_INT_WEIGHT_BMSK 0xf
+#define GSI_IC_INT_WEIGHT_EVT_ENG_EVNT_ENG_INT_WEIGHT_SHFT 0x0
+
+#define GSI_IC_INT_WEIGHT_INT_ENG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000108)
+#define GSI_IC_INT_WEIGHT_INT_ENG_RMSK 0xf
+#define GSI_IC_INT_WEIGHT_INT_ENG_INT_ENG_INT_WEIGHT_BMSK 0xf
+#define GSI_IC_INT_WEIGHT_INT_ENG_INT_ENG_INT_WEIGHT_SHFT 0x0
+
+#define GSI_IC_INT_WEIGHT_CSR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000010c)
+#define GSI_IC_INT_WEIGHT_CSR_RMSK 0xffffff
+#define GSI_IC_INT_WEIGHT_CSR_CH_START_CMD_INT_WEIGHT_BMSK 0xf00000
+#define GSI_IC_INT_WEIGHT_CSR_CH_START_CMD_INT_WEIGHT_SHFT 0x14
+#define GSI_IC_INT_WEIGHT_CSR_CH_STOP_CMD_INT_WEIGHT_BMSK 0xf0000
+#define GSI_IC_INT_WEIGHT_CSR_CH_STOP_CMD_INT_WEIGHT_SHFT 0x10
+#define GSI_IC_INT_WEIGHT_CSR_CH_RESET_CMD_INT_WEIGHT_BMSK 0xf000
+#define GSI_IC_INT_WEIGHT_CSR_CH_RESET_CMD_INT_WEIGHT_SHFT 0xc
+#define GSI_IC_INT_WEIGHT_CSR_CH_ALLOC_CMD_INT_WEIGHT_BMSK 0xf00
+#define GSI_IC_INT_WEIGHT_CSR_CH_ALLOC_CMD_INT_WEIGHT_SHFT 0x8
+#define GSI_IC_INT_WEIGHT_CSR_EV_RESET_CMD_INT_WEIGHT_BMSK 0xf0
+#define GSI_IC_INT_WEIGHT_CSR_EV_RESET_CMD_INT_WEIGHT_SHFT 0x4
+#define GSI_IC_INT_WEIGHT_CSR_EV_ALLOC_CMD_INT_WEIGHT_BMSK 0xf
+#define GSI_IC_INT_WEIGHT_CSR_EV_ALLOC_CMD_INT_WEIGHT_SHFT 0x0
+
+#define GSI_IC_INT_WEIGHT_TLV_ENG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000110)
+#define GSI_IC_INT_WEIGHT_TLV_ENG_RMSK 0xf
+#define GSI_IC_INT_WEIGHT_TLV_ENG_TLV_INT_WEIGHT_BMSK 0xf
+#define GSI_IC_INT_WEIGHT_TLV_ENG_TLV_INT_WEIGHT_SHFT 0x0
+
+#define GSI_IC_INT_WEIGHT_TIMER_ENG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000114)
+#define GSI_IC_INT_WEIGHT_TIMER_ENG_RMSK 0xf
+#define GSI_IC_INT_WEIGHT_TIMER_ENG_TIMER_INT_WEIGHT_BMSK 0xf
+#define GSI_IC_INT_WEIGHT_TIMER_ENG_TIMER_INT_WEIGHT_SHFT 0x0
+
+#define GSI_IC_INT_WEIGHT_DB_ENG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000118)
+#define GSI_IC_INT_WEIGHT_DB_ENG_RMSK 0xf
+#define GSI_IC_INT_WEIGHT_DB_ENG_NEW_DB_INT_WEIGHT_BMSK 0xf
+#define GSI_IC_INT_WEIGHT_DB_ENG_NEW_DB_INT_WEIGHT_SHFT 0x0
+
+#define GSI_IC_INT_WEIGHT_RD_WR_ENG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000011c)
+#define GSI_IC_INT_WEIGHT_RD_WR_ENG_RMSK 0xff
+#define GSI_IC_INT_WEIGHT_RD_WR_ENG_WRITE_INT_WEIGHT_BMSK 0xf0
+#define GSI_IC_INT_WEIGHT_RD_WR_ENG_WRITE_INT_WEIGHT_SHFT 0x4
+#define GSI_IC_INT_WEIGHT_RD_WR_ENG_READ_INT_WEIGHT_BMSK 0xf
+#define GSI_IC_INT_WEIGHT_RD_WR_ENG_READ_INT_WEIGHT_SHFT 0x0
+
+#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000120)
+#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_RMSK 0xf
+#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_GP_INT_WEIGHT_BMSK 0xf
+#define GSI_IC_INT_WEIGHT_UCONTROLLER_ENG_GP_INT_WEIGHT_SHFT 0x0
+
+#define GSI_GSI_MANAGER_EE_QOS_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000300 + 0x4 * (n))
+#define GSI_GSI_MANAGER_EE_QOS_n_RMSK 0x1f1f03
+#define GSI_GSI_MANAGER_EE_QOS_n_MAXn 3
+#define GSI_GSI_MANAGER_EE_QOS_n_MAX_EV_ALLOC_BMSK 0x1f0000
+#define GSI_GSI_MANAGER_EE_QOS_n_MAX_EV_ALLOC_SHFT 0x10
+#define GSI_GSI_MANAGER_EE_QOS_n_MAX_CH_ALLOC_BMSK 0x1f00
+#define GSI_GSI_MANAGER_EE_QOS_n_MAX_CH_ALLOC_SHFT 0x8
+#define GSI_GSI_MANAGER_EE_QOS_n_EE_PRIO_BMSK 0x3
+#define GSI_GSI_MANAGER_EE_QOS_n_EE_PRIO_SHFT 0x0
+
+#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000200)
+#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_RMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_CH_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000204)
+#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_RMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_BMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_EV_CNTXT_BASE_ADDR_SHRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000208)
+#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_RMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_BMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_RE_STORAGE_BASE_ADDR_SHRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000020c)
+#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_RMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_BMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_RE_ESC_BUF_BASE_ADDR_SHRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000240)
+#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_RMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_BMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_EE_SCRACH_BASE_ADDR_SHRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000244)
+#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_RMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_BMSK 0xffff
+#define GSI_GSI_SHRAM_PTR_FUNC_STACK_BASE_ADDR_SHRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_CMD_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000400)
+#define GSI_GSI_IRAM_PTR_CH_CMD_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_CMD_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000404)
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_DB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000418)
+#define GSI_GSI_IRAM_PTR_CH_DB_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DB_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_EV_DB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000041c)
+#define GSI_GSI_IRAM_PTR_EV_DB_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EV_DB_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_NEW_RE_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000420)
+#define GSI_GSI_IRAM_PTR_NEW_RE_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_NEW_RE_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000424)
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_DIS_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000428)
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_CH_EMPTY_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000042c)
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000430)
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000434)
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000438)
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000043c)
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000440)
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_WRITE_ENG_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000444)
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_READ_ENG_COMP_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000448)
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_UC_GP_INT_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000044c)
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_INT_MOD_STOPED_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_INST_RAM_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00004000 + 0x4 * (n))
+#define GSI_GSI_INST_RAM_n_RMSK 0xffffffff
+#define GSI_GSI_INST_RAM_n_MAXn 4095
+#define GSI_GSI_INST_RAM_n_INST_BYTE_3_BMSK 0xff000000
+#define GSI_GSI_INST_RAM_n_INST_BYTE_3_SHFT 0x18
+#define GSI_GSI_INST_RAM_n_INST_BYTE_2_BMSK 0xff0000
+#define GSI_GSI_INST_RAM_n_INST_BYTE_2_SHFT 0x10
+#define GSI_GSI_INST_RAM_n_INST_BYTE_1_BMSK 0xff00
+#define GSI_GSI_INST_RAM_n_INST_BYTE_1_SHFT 0x8
+#define GSI_GSI_INST_RAM_n_INST_BYTE_0_BMSK 0xff
+#define GSI_GSI_INST_RAM_n_INST_BYTE_0_SHFT 0x0
+
+#define GSI_GSI_SHRAM_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00002000 + 0x4 * (n))
+#define GSI_GSI_SHRAM_n_RMSK 0xffffffff
+#define GSI_GSI_SHRAM_n_MAXn 1023
+#define GSI_GSI_SHRAM_n_SHRAM_BMSK 0xffffffff
+#define GSI_GSI_SHRAM_n_SHRAM_SHFT 0x0
+
+#define GSI_GSI_TEST_BUS_SEL_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001000)
+#define GSI_GSI_TEST_BUS_SEL_RMSK 0xff
+#define GSI_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_BMSK 0xff
+#define GSI_GSI_TEST_BUS_SEL_GSI_TESTBUS_SEL_SHFT 0x0
+
+#define GSI_GSI_TEST_BUS_REG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001008)
+#define GSI_GSI_TEST_BUS_REG_RMSK 0xffffffff
+#define GSI_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_BMSK 0xffffffff
+#define GSI_GSI_TEST_BUS_REG_GSI_TESTBUS_REG_SHFT 0x0
+
+#define GSI_GSI_DEBUG_BUSY_REG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001010)
+#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0xff
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7
+#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40
+#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6
+#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20
+#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_SHFT 0x5
+#define GSI_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_BMSK 0x10
+#define GSI_GSI_DEBUG_BUSY_REG_RD_WR_BUSY_SHFT 0x4
+#define GSI_GSI_DEBUG_BUSY_REG_TIMER_BUSY_BMSK 0x8
+#define GSI_GSI_DEBUG_BUSY_REG_TIMER_BUSY_SHFT 0x3
+#define GSI_GSI_DEBUG_BUSY_REG_MCS_BUSY_BMSK 0x4
+#define GSI_GSI_DEBUG_BUSY_REG_MCS_BUSY_SHFT 0x2
+#define GSI_GSI_DEBUG_BUSY_REG_REE_BUSY_BMSK 0x2
+#define GSI_GSI_DEBUG_BUSY_REG_REE_BUSY_SHFT 0x1
+#define GSI_GSI_DEBUG_BUSY_REG_CSR_BUSY_BMSK 0x1
+#define GSI_GSI_DEBUG_BUSY_REG_CSR_BUSY_SHFT 0x0
+
+#define GSI_GSI_DEBUG_COUNTER_CFGn_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001200 + 0x4 * (n))
+#define GSI_GSI_DEBUG_COUNTER_CFGn_RMSK 0x3ffffff7
+#define GSI_GSI_DEBUG_COUNTER_CFGn_MAXn 7
+#define GSI_GSI_DEBUG_COUNTER_CFGn_TIMER_VALUE_BMSK 0x3ff80000
+#define GSI_GSI_DEBUG_COUNTER_CFGn_TIMER_VALUE_SHFT 0x13
+#define GSI_GSI_DEBUG_COUNTER_CFGn_VIRTUAL_CHNL_BMSK 0x7f000
+#define GSI_GSI_DEBUG_COUNTER_CFGn_VIRTUAL_CHNL_SHFT 0xc
+#define GSI_GSI_DEBUG_COUNTER_CFGn_EE_BMSK 0xf00
+#define GSI_GSI_DEBUG_COUNTER_CFGn_EE_SHFT 0x8
+#define GSI_GSI_DEBUG_COUNTER_CFGn_EVNT_TYPE_BMSK 0xf0
+#define GSI_GSI_DEBUG_COUNTER_CFGn_EVNT_TYPE_SHFT 0x4
+#define GSI_GSI_DEBUG_COUNTER_CFGn_CLR_AT_READ_BMSK 0x4
+#define GSI_GSI_DEBUG_COUNTER_CFGn_CLR_AT_READ_SHFT 0x2
+#define GSI_GSI_DEBUG_COUNTER_CFGn_STOP_AT_WRAP_ARND_BMSK 0x2
+#define GSI_GSI_DEBUG_COUNTER_CFGn_STOP_AT_WRAP_ARND_SHFT 0x1
+#define GSI_GSI_DEBUG_COUNTER_CFGn_ENABLE_BMSK 0x1
+#define GSI_GSI_DEBUG_COUNTER_CFGn_ENABLE_SHFT 0x0
+
+#define GSI_GSI_DEBUG_COUNTERn_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001240 + 0x4 * (n))
+#define GSI_GSI_DEBUG_COUNTERn_RMSK 0xffff
+#define GSI_GSI_DEBUG_COUNTERn_MAXn 7
+#define GSI_GSI_DEBUG_COUNTERn_COUNTER_VALUE_BMSK 0xffff
+#define GSI_GSI_DEBUG_COUNTERn_COUNTER_VALUE_SHFT 0x0
+
+#define GSI_GSI_DEBUG_PC_FROM_SW_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001040)
+#define GSI_GSI_DEBUG_PC_FROM_SW_RMSK 0xfff
+#define GSI_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_DEBUG_PC_FROM_SW_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_DEBUG_SW_STALL_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001044)
+#define GSI_GSI_DEBUG_SW_STALL_RMSK 0x1
+#define GSI_GSI_DEBUG_SW_STALL_MCS_STALL_BMSK 0x1
+#define GSI_GSI_DEBUG_SW_STALL_MCS_STALL_SHFT 0x0
+
+#define GSI_GSI_DEBUG_PC_FOR_DEBUG_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001048)
+#define GSI_GSI_DEBUG_PC_FOR_DEBUG_RMSK 0xfff
+#define GSI_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_DEBUG_PC_FOR_DEBUG_IRAM_PTR_SHFT 0x0
+
+#define GSI_GSI_DEBUG_QSB_LOG_SEL_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001050)
+#define GSI_GSI_DEBUG_QSB_LOG_SEL_RMSK 0xffff01
+#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_MID_BMSK 0xff0000
+#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_MID_SHFT 0x10
+#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_TID_BMSK 0xff00
+#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_TID_SHFT 0x8
+#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_WRITE_BMSK 0x1
+#define GSI_GSI_DEBUG_QSB_LOG_SEL_SEL_WRITE_SHFT 0x0
+
+#define GSI_GSI_DEBUG_QSB_LOG_CLR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001058)
+#define GSI_GSI_DEBUG_QSB_LOG_CLR_RMSK 0x1
+#define GSI_GSI_DEBUG_QSB_LOG_CLR_LOG_CLR_BMSK 0x1
+#define GSI_GSI_DEBUG_QSB_LOG_CLR_LOG_CLR_SHFT 0x0
+
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001060)
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_RMSK 0x1ffff01
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_BMSK 0x1000000
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_SAVED_SHFT 0x18
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_BMSK 0xff0000
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_MID_SHFT 0x10
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_BMSK 0xff00
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_TID_SHFT 0x8
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_BMSK 0x1
+#define GSI_GSI_DEBUG_QSB_LOG_ERR_TRNS_ID_ERR_WRITE_SHFT 0x0
+
+#define GSI_GSI_DEBUG_QSB_LOG_0_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001064)
+#define GSI_GSI_DEBUG_QSB_LOG_0_RMSK 0xffffffff
+#define GSI_GSI_DEBUG_QSB_LOG_0_ADDR_31_0_BMSK 0xffffffff
+#define GSI_GSI_DEBUG_QSB_LOG_0_ADDR_31_0_SHFT 0x0
+
+#define GSI_GSI_DEBUG_QSB_LOG_1_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001068)
+#define GSI_GSI_DEBUG_QSB_LOG_1_RMSK 0xfff7ffff
+#define GSI_GSI_DEBUG_QSB_LOG_1_AREQPRIORITY_BMSK 0xf0000000
+#define GSI_GSI_DEBUG_QSB_LOG_1_AREQPRIORITY_SHFT 0x1c
+#define GSI_GSI_DEBUG_QSB_LOG_1_ASIZE_BMSK 0xf000000
+#define GSI_GSI_DEBUG_QSB_LOG_1_ASIZE_SHFT 0x18
+#define GSI_GSI_DEBUG_QSB_LOG_1_ALEN_BMSK 0xf00000
+#define GSI_GSI_DEBUG_QSB_LOG_1_ALEN_SHFT 0x14
+#define GSI_GSI_DEBUG_QSB_LOG_1_AOOOWR_BMSK 0x40000
+#define GSI_GSI_DEBUG_QSB_LOG_1_AOOOWR_SHFT 0x12
+#define GSI_GSI_DEBUG_QSB_LOG_1_AOOORD_BMSK 0x20000
+#define GSI_GSI_DEBUG_QSB_LOG_1_AOOORD_SHFT 0x11
+#define GSI_GSI_DEBUG_QSB_LOG_1_ATRANSIENT_BMSK 0x10000
+#define GSI_GSI_DEBUG_QSB_LOG_1_ATRANSIENT_SHFT 0x10
+#define GSI_GSI_DEBUG_QSB_LOG_1_ACACHEABLE_BMSK 0x8000
+#define GSI_GSI_DEBUG_QSB_LOG_1_ACACHEABLE_SHFT 0xf
+#define GSI_GSI_DEBUG_QSB_LOG_1_ASHARED_BMSK 0x4000
+#define GSI_GSI_DEBUG_QSB_LOG_1_ASHARED_SHFT 0xe
+#define GSI_GSI_DEBUG_QSB_LOG_1_ANOALLOCATE_BMSK 0x2000
+#define GSI_GSI_DEBUG_QSB_LOG_1_ANOALLOCATE_SHFT 0xd
+#define GSI_GSI_DEBUG_QSB_LOG_1_AINNERSHARED_BMSK 0x1000
+#define GSI_GSI_DEBUG_QSB_LOG_1_AINNERSHARED_SHFT 0xc
+#define GSI_GSI_DEBUG_QSB_LOG_1_ADDR_43_32_BMSK 0xfff
+#define GSI_GSI_DEBUG_QSB_LOG_1_ADDR_43_32_SHFT 0x0
+
+#define GSI_GSI_DEBUG_QSB_LOG_2_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000106c)
+#define GSI_GSI_DEBUG_QSB_LOG_2_RMSK 0xffff
+#define GSI_GSI_DEBUG_QSB_LOG_2_AMEMTYPE_BMSK 0xf000
+#define GSI_GSI_DEBUG_QSB_LOG_2_AMEMTYPE_SHFT 0xc
+#define GSI_GSI_DEBUG_QSB_LOG_2_AMMUSID_BMSK 0xfff
+#define GSI_GSI_DEBUG_QSB_LOG_2_AMMUSID_SHFT 0x0
+
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001070 + 0x4 * (n))
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_RMSK 0xffffffff
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_MAXn 3
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_MID_BMSK 0xf8000000
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_MID_SHFT 0x1b
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_TID_BMSK 0x7c00000
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_TID_SHFT 0x16
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_WRITE_BMSK 0x200000
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_WRITE_SHFT 0x15
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR_20_0_BMSK 0x1fffff
+#define GSI_GSI_DEBUG_QSB_LOG_LAST_MISC_IDn_ADDR_20_0_SHFT 0x0
+
+#define GSI_GSI_DEBUG_SW_RF_n_WRITE_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001080 + 0x4 * (n))
+#define GSI_GSI_DEBUG_SW_RF_n_WRITE_RMSK 0xffffffff
+#define GSI_GSI_DEBUG_SW_RF_n_WRITE_MAXn 31
+#define GSI_GSI_DEBUG_SW_RF_n_WRITE_DATA_IN_BMSK 0xffffffff
+#define GSI_GSI_DEBUG_SW_RF_n_WRITE_DATA_IN_SHFT 0x0
+
+#define GSI_GSI_DEBUG_SW_RF_n_READ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001100 + 0x4 * (n))
+#define GSI_GSI_DEBUG_SW_RF_n_READ_RMSK 0xffffffff
+#define GSI_GSI_DEBUG_SW_RF_n_READ_MAXn 31
+#define GSI_GSI_DEBUG_SW_RF_n_READ_RF_REG_BMSK 0xffffffff
+#define GSI_GSI_DEBUG_SW_RF_n_READ_RF_REG_SHFT 0x0
+
+#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001400 + 0x80 * (n) + 0x4 * (k))
+#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_RMSK 0x3f
+#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_MAXk 30
+#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_MAXn 3
+#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_VALID_BMSK 0x20
+#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_VALID_SHFT 0x5
+#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_BMSK 0x1f
+#define GSI_GSI_DEBUG_EE_n_CH_k_VP_TABLE_PHY_CH_SHFT 0x0
+
+#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00001600 + 0x80 * (n) + 0x4 * (k))
+#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_RMSK 0x3f
+#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXk 15
+#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_MAXn 3
+#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_BMSK 0x20
+#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_VALID_SHFT 0x5
+#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_BMSK 0x1f
+#define GSI_GSI_DEBUG_EE_n_EV_k_VP_TABLE_PHY_EV_CH_SHFT 0x0
+
+#define GSI_GSI_UC_SRC_IRQ_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000500)
+#define GSI_GSI_UC_SRC_IRQ_RMSK 0xf
+#define GSI_GSI_UC_SRC_IRQ_IC_2_UC_MCS_INT_VLD_BMSK 0x8
+#define GSI_GSI_UC_SRC_IRQ_IC_2_UC_MCS_INT_VLD_SHFT 0x3
+#define GSI_GSI_UC_SRC_IRQ_ACC_2_UC_MCS_GO_ACK_BMSK 0x4
+#define GSI_GSI_UC_SRC_IRQ_ACC_2_UC_MCS_GO_ACK_SHFT 0x2
+#define GSI_GSI_UC_SRC_IRQ_UC_ACC_CMPLT_BMSK 0x2
+#define GSI_GSI_UC_SRC_IRQ_UC_ACC_CMPLT_SHFT 0x1
+#define GSI_GSI_UC_SRC_IRQ_UC_ACC_GO_BMSK 0x1
+#define GSI_GSI_UC_SRC_IRQ_UC_ACC_GO_SHFT 0x0
+
+#define GSI_GSI_UC_SRC_IRQ_MSK_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000504)
+#define GSI_GSI_UC_SRC_IRQ_MSK_RMSK 0xf
+#define GSI_GSI_UC_SRC_IRQ_MSK_IC_2_UC_MCS_INT_VLD_BMSK 0x8
+#define GSI_GSI_UC_SRC_IRQ_MSK_IC_2_UC_MCS_INT_VLD_SHFT 0x3
+#define GSI_GSI_UC_SRC_IRQ_MSK_ACC_2_UC_MCS_GO_ACK_BMSK 0x4
+#define GSI_GSI_UC_SRC_IRQ_MSK_ACC_2_UC_MCS_GO_ACK_SHFT 0x2
+#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_CMPLT_BMSK 0x2
+#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_CMPLT_SHFT 0x1
+#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_GO_BMSK 0x1
+#define GSI_GSI_UC_SRC_IRQ_MSK_UC_ACC_GO_SHFT 0x0
+
+#define GSI_GSI_UC_SRC_IRQ_CLR_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000508)
+#define GSI_GSI_UC_SRC_IRQ_CLR_RMSK 0xf
+#define GSI_GSI_UC_SRC_IRQ_CLR_IC_2_UC_MCS_INT_VLD_BMSK 0x8
+#define GSI_GSI_UC_SRC_IRQ_CLR_IC_2_UC_MCS_INT_VLD_SHFT 0x3
+#define GSI_GSI_UC_SRC_IRQ_CLR_ACC_2_UC_MCS_GO_ACK_BMSK 0x4
+#define GSI_GSI_UC_SRC_IRQ_CLR_ACC_2_UC_MCS_GO_ACK_SHFT 0x2
+#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_CMPLT_BMSK 0x2
+#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_CMPLT_SHFT 0x1
+#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_GO_BMSK 0x1
+#define GSI_GSI_UC_SRC_IRQ_CLR_UC_ACC_GO_SHFT 0x0
+
+#define GSI_GSI_ACC_ARGS_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000050c + 0x4 * (n))
+#define GSI_GSI_ACC_ARGS_n_RMSK 0xffffffff
+#define GSI_GSI_ACC_ARGS_n_MAXn 5
+#define GSI_GSI_ACC_ARGS_n_GSI_ACC_ARGS_BMSK 0xffffffff
+#define GSI_GSI_ACC_ARGS_n_GSI_ACC_ARGS_SHFT 0x0
+
+#define GSI_GSI_ACC_ROUTINE_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000524)
+#define GSI_GSI_ACC_ROUTINE_RMSK 0xffffffff
+#define GSI_GSI_ACC_ROUTINE_GSI_ACC_ROUTINE_BMSK 0xffffffff
+#define GSI_GSI_ACC_ROUTINE_GSI_ACC_ROUTINE_SHFT 0x0
+
+#define GSI_GSI_ACC_GO_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000528)
+#define GSI_GSI_ACC_GO_RMSK 0x7f
+#define GSI_GSI_ACC_GO_TIMER_GO_BMSK 0x40
+#define GSI_GSI_ACC_GO_TIMER_GO_SHFT 0x6
+#define GSI_GSI_ACC_GO_RW_ENG_GO_BMSK 0x20
+#define GSI_GSI_ACC_GO_RW_ENG_GO_SHFT 0x5
+#define GSI_GSI_ACC_GO_INT_ENG_GO_BMSK 0x10
+#define GSI_GSI_ACC_GO_INT_ENG_GO_SHFT 0x4
+#define GSI_GSI_ACC_GO_TLV_OUT_GO_BMSK 0x8
+#define GSI_GSI_ACC_GO_TLV_OUT_GO_SHFT 0x3
+#define GSI_GSI_ACC_GO_CSR_GO_BMSK 0x4
+#define GSI_GSI_ACC_GO_CSR_GO_SHFT 0x2
+#define GSI_GSI_ACC_GO_RE_ENG_GO_BMSK 0x2
+#define GSI_GSI_ACC_GO_RE_ENG_GO_SHFT 0x1
+#define GSI_GSI_ACC_GO_EV_ENG_GO_BMSK 0x1
+#define GSI_GSI_ACC_GO_EV_ENG_GO_SHFT 0x0
+
+#define GSI_GSI_ACC_2_UC_MCS_STTS_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000052c)
+#define GSI_GSI_ACC_2_UC_MCS_STTS_RMSK 0xffffffff
+#define GSI_GSI_ACC_2_UC_MCS_STTS_GSI_ACC_2_UC_MCS_STTS_BMSK 0xffffffff
+#define GSI_GSI_ACC_2_UC_MCS_STTS_GSI_ACC_2_UC_MCS_STTS_SHFT 0x0
+
+#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000530)
+#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_RMSK 0xffffffff
+#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_GSI_ACC_2_UC_MCS_RET_VAL_BMSK \
+	0xffffffff
+#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_LSB_GSI_ACC_2_UC_MCS_RET_VAL_SHFT \
+	0x0
+
+#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000534)
+#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_RMSK 0xffffffff
+#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_GSI_ACC_2_UC_MCS_RET_VAL_BMSK \
+	0xffffffff
+#define GSI_GSI_ACC_2_UC_MCS_RET_VAL_MSB_GSI_ACC_2_UC_MCS_RET_VAL_SHFT \
+	0x0
+
+#define GSI_GSI_IC_2_UC_MCS_VLD_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000538)
+#define GSI_GSI_IC_2_UC_MCS_VLD_RMSK 0xffffffff
+#define GSI_GSI_IC_2_UC_MCS_VLD_GSI_IC_2_UC_MCS_VLD_BMSK 0xffffffff
+#define GSI_GSI_IC_2_UC_MCS_VLD_GSI_IC_2_UC_MCS_VLD_SHFT 0x0
+
+#define GSI_GSI_IC_2_UC_MCS_PC_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000053c)
+#define GSI_GSI_IC_2_UC_MCS_PC_RMSK 0xffffffff
+#define GSI_GSI_IC_2_UC_MCS_PC_GSI_IC_2_UC_MCS_PC_BMSK 0xffffffff
+#define GSI_GSI_IC_2_UC_MCS_PC_GSI_IC_2_UC_MCS_PC_SHFT 0x0
+
+#define GSI_GSI_IC_2_UC_MCS_ARGS_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000540 + 0x4 * (n))
+#define GSI_GSI_IC_2_UC_MCS_ARGS_n_RMSK 0xffffffff
+#define GSI_GSI_IC_2_UC_MCS_ARGS_n_MAXn 5
+#define GSI_GSI_IC_2_UC_MCS_ARGS_n_GSI_IC_2_UC_MCS_ARGS_BMSK 0xffffffff
+#define GSI_GSI_IC_2_UC_MCS_ARGS_n_GSI_IC_2_UC_MCS_ARGS_SHFT 0x0
+
+#define GSI_GSI_UC_TLV_IN_VLD_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000558)
+#define GSI_GSI_UC_TLV_IN_VLD_RMSK 0x1
+#define GSI_GSI_UC_TLV_IN_VLD_GSI_UC_TLV_IN_VLD_BMSK 0x1
+#define GSI_GSI_UC_TLV_IN_VLD_GSI_UC_TLV_IN_VLD_SHFT 0x0
+
+#define GSI_GSI_UC_TLV_IN_ROUTINE_OFFS \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000055c)
+#define GSI_GSI_UC_TLV_IN_ROUTINE_RMSK 0xffffffff
+#define GSI_GSI_UC_TLV_IN_ROUTINE_GSI_UC_TLV_IN_ROUTINE_BMSK 0xffffffff
+#define GSI_GSI_UC_TLV_IN_ROUTINE_GSI_UC_TLV_IN_ROUTINE_SHFT 0x0
+
+#define GSI_GSI_UC_TLV_IN_ARGS_n_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x00000560 + 0x4 * (n))
+#define GSI_GSI_UC_TLV_IN_ARGS_n_RMSK 0xffffffff
+#define GSI_GSI_UC_TLV_IN_ARGS_n_MAXn 5
+#define GSI_GSI_UC_TLV_IN_ARGS_n_GSI_UC_TLV_IN_ARGS_BMSK 0xffffffff
+#define GSI_GSI_UC_TLV_IN_ARGS_n_GSI_UC_TLV_IN_ARGS_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c000 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_RMSK 0xfff7dfff
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_MAXk 30
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_MAXn 3
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_BMSK 0x7c000
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_ERINDEX_SHFT 0xe
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_BMSK 0x1f00
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHID_SHFT 0x8
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_EE_SHFT 0x4
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_BMSK 0x8
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_DIR_SHFT 0x3
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_BMSK 0x7
+#define GSI_EE_n_GSI_CH_k_CNTXT_0_CHTYPE_PROTOCOL_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c004 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_RMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_MAXk 30
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_MAXn 3
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c008 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_MAXk 30
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_MAXn 3
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c00c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_MAXk 30
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_MAXn 3
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c010 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_MAXk 30
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_MAXn 3
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c014 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_MAXk 30
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_MAXn 3
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c018 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_MAXk 30
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_MAXn 3
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c01c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_MAXk 30
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_MAXn 3
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c054 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_RMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXk 30
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_MAXn 3
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_BMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_READ_PTR_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c058 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXk 30
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_MAXn 3
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_BMSK 0xffff
+#define GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_RE_INTR_DB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_QOS_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c05c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_QOS_RMSK 0x303
+#define GSI_EE_n_GSI_CH_k_QOS_MAXk 30
+#define GSI_EE_n_GSI_CH_k_QOS_MAXn 3
+#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_BMSK 0x200
+#define GSI_EE_n_GSI_CH_k_QOS_USE_DB_ENG_SHFT 0x9
+#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_BMSK 0x100
+#define GSI_EE_n_GSI_CH_k_QOS_MAX_PREFETCH_SHFT 0x8
+#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_BMSK 0xf
+#define GSI_EE_n_GSI_CH_k_QOS_WRR_WEIGHT_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c060 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_MAXk 30
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_MAXn 3
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c064 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_MAXk 30
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_MAXn 3
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c068 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_MAXk 30
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_MAXn 3
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_2_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001c06c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_MAXk 30
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_MAXn 3
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_SCRATCH_3_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d000 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_0_RMSK 0xfff1ffff
+#define GSI_EE_n_EV_CH_k_CNTXT_0_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_0_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_BMSK 0xff000000
+#define GSI_EE_n_EV_CH_k_CNTXT_0_ELEMENT_SIZE_SHFT 0x18
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK 0xf00000
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT 0x14
+#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_BMSK 0x10000
+#define GSI_EE_n_EV_CH_k_CNTXT_0_INTYPE_SHFT 0x10
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_BMSK 0xff00
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EVCHID_SHFT 0x8
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_BMSK 0xf0
+#define GSI_EE_n_EV_CH_k_CNTXT_0_EE_SHFT 0x4
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_BMSK 0xf
+#define GSI_EE_n_EV_CH_k_CNTXT_0_CHTYPE_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d004 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_1_RMSK 0xffff
+#define GSI_EE_n_EV_CH_k_CNTXT_1_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_1_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_BMSK 0xffff
+#define GSI_EE_n_EV_CH_k_CNTXT_1_R_LENGTH_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d008 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_2_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_2_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_2_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_2_R_BASE_ADDR_LSBS_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d00c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_3_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_3_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_3_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_3_R_BASE_ADDR_MSBS_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d010 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_4_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_4_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_4_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_4_READ_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d014 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_5_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_5_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_5_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_5_READ_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d018 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_6_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_6_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_6_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_6_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d01c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_7_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_7_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_7_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_7_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d020 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_8_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_8_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_8_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_BMSK 0xff000000
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MOD_CNT_SHFT 0x18
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_BMSK 0xff0000
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODC_SHFT 0x10
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_BMSK 0xffff
+#define GSI_EE_n_EV_CH_k_CNTXT_8_INT_MODT_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d024 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_9_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_9_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_9_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_9_INTVEC_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d028 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_10_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_10_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_10_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_10_MSI_ADDR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d02c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_11_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_11_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_11_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_11_MSI_ADDR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d030 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_12_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_12_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_12_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_12_RP_UPDATE_ADDR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d034 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_CNTXT_13_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_13_MAXk 15
+#define GSI_EE_n_EV_CH_k_CNTXT_13_MAXn 3
+#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_CNTXT_13_RP_UPDATE_ADDR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d048 + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_MAXk 15
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_MAXn 3
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_SCRATCH_0_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001d04c + 0x4000 * (n) + 0x80 * (k))
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_MAXk 15
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_MAXn 3
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_SCRATCH_1_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001e000 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_MAXk 30
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_MAXn 3
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001e004 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_RMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_MAXk 30
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_MAXn 3
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_GSI_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001e100 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_MAXk 15
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_MAXn 3
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_OFFS(k, n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001e104 + 0x4000 * (n) + 0x8 * (k))
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_RMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_MAXk 15
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_MAXn 3
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_EV_CH_k_DOORBELL_1_WRITE_PTR_MSB_SHFT 0x0
+
+#define GSI_EE_n_GSI_STATUS_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f000 + 0x4000 * (n))
+#define GSI_EE_n_GSI_STATUS_RMSK 0x1
+#define GSI_EE_n_GSI_STATUS_MAXn 3
+#define GSI_EE_n_GSI_STATUS_ENABLED_BMSK 0x1
+#define GSI_EE_n_GSI_STATUS_ENABLED_SHFT 0x0
+
+#define GSI_EE_n_GSI_CH_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f008 + 0x4000 * (n))
+#define GSI_EE_n_GSI_CH_CMD_RMSK 0xff0000ff
+#define GSI_EE_n_GSI_CH_CMD_MAXn 3
+#define GSI_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000
+#define GSI_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18
+#define GSI_EE_n_GSI_CH_CMD_CHID_BMSK 0xff
+#define GSI_EE_n_GSI_CH_CMD_CHID_SHFT 0x0
+
+#define GSI_EE_n_EV_CH_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f010 + 0x4000 * (n))
+#define GSI_EE_n_EV_CH_CMD_RMSK 0xff0000ff
+#define GSI_EE_n_EV_CH_CMD_MAXn 3
+#define GSI_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000
+#define GSI_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18
+#define GSI_EE_n_EV_CH_CMD_CHID_BMSK 0xff
+#define GSI_EE_n_EV_CH_CMD_CHID_SHFT 0x0
+
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f018 + 0x4000 * (n))
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_RMSK 0xffffffff
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_MAXn 3
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
+
+/* v1.0 */
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_MAXn 3
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+
+/* v1.2 */
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+	0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+	0x40000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+/* v1.3 */
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f03c + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+	0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+	0x40000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
+
+#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n))
+#define GSI_EE_n_GSI_SW_VERSION_RMSK 0xffffffff
+#define GSI_EE_n_GSI_SW_VERSION_MAXn 3
+#define GSI_EE_n_GSI_SW_VERSION_MAJOR_BMSK 0xf0000000
+#define GSI_EE_n_GSI_SW_VERSION_MAJOR_SHFT 0x1c
+#define GSI_EE_n_GSI_SW_VERSION_MINOR_BMSK 0xfff0000
+#define GSI_EE_n_GSI_SW_VERSION_MINOR_SHFT 0x10
+#define GSI_EE_n_GSI_SW_VERSION_STEP_BMSK 0xffff
+#define GSI_EE_n_GSI_SW_VERSION_STEP_SHFT 0x0
+
+#define GSI_EE_n_GSI_MCS_CODE_VER_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f048 + 0x4000 * (n))
+#define GSI_EE_n_GSI_MCS_CODE_VER_RMSK 0xffffffff
+#define GSI_EE_n_GSI_MCS_CODE_VER_MAXn 3
+#define GSI_EE_n_GSI_MCS_CODE_VER_VER_BMSK 0xffffffff
+#define GSI_EE_n_GSI_MCS_CODE_VER_VER_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_TYPE_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f080 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_TYPE_IRQ_RMSK 0x7f
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MAXn 3
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_BMSK 0x40
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GENERAL_SHFT 0x6
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_BMSK 0x20
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_EV_CTRL_SHFT 0x5
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_BMSK 0x10
+#define GSI_EE_n_CNTXT_TYPE_IRQ_INTER_EE_CH_CTRL_SHFT 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_BMSK 0x8
+#define GSI_EE_n_CNTXT_TYPE_IRQ_IEOB_SHFT 0x3
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_BMSK 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_GLOB_EE_SHFT 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_BMSK 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_EV_CTRL_SHFT 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_BMSK 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_CH_CTRL_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f088 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_RMSK 0x7f
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_MAXn 3
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_BMSK 0x40
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GENERAL_SHFT 0x6
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_BMSK 0x20
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_EV_CTRL_SHFT 0x5
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_BMSK 0x10
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_INTER_EE_CH_CTRL_SHFT 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_BMSK 0x8
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_IEOB_SHFT 0x3
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_BMSK 0x4
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_GLOB_EE_SHFT 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_BMSK 0x2
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL_SHFT 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_BMSK 0x1
+#define GSI_EE_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f090 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MAXn 3
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f094 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MAXn 3
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f098 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_MAXn 3
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
+	0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f09c + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_MAXn 3
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
+	0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0a0 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_MAXn 3
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0a4 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_MAXn 3
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0b0 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MAXn 3
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0b8 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_MAXn 3
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
+	0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f0c0 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_MAXn 3
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SRC_IEOB_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f100 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_RMSK 0xf
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_MAXn 3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_BMSK 0x8
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT3_SHFT 0x3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_BMSK 0x4
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT2_SHFT 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_BMSK 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_GP_INT1_SHFT 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f108 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_RMSK 0xf
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_MAXn 3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_BMSK 0x8
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT3_SHFT 0x3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK 0x4
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_SHFT 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_SHFT 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_EN_ERROR_INT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f110 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_RMSK 0xf
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_MAXn 3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_BMSK 0x8
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT3_SHFT 0x3
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_BMSK 0x4
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT2_SHFT 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_BMSK 0x2
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_GP_INT1_SHFT 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GLOB_IRQ_CLR_ERROR_INT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f118 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_RMSK 0xf
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_MAXn 3
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_BMSK 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BUS_ERROR_SHFT 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_STTS_GSI_BREAK_POINT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f120 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_RMSK 0xf
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_MAXn 3
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_BMSK 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BUS_ERROR_SHFT 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_EN_GSI_BREAK_POINT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f128 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_RMSK 0xf
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_MAXn 3
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_BMSK 0x8
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_MCS_STACK_OVRFLOW_SHFT 0x3
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_BMSK 0x4
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_CMD_FIFO_OVRFLOW_SHFT 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_BMSK 0x2
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BUS_ERROR_SHFT 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_BMSK 0x1
+#define GSI_EE_n_CNTXT_GSI_IRQ_CLR_GSI_BREAK_POINT_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_INTSET_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f180 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_INTSET_RMSK 0x1
+#define GSI_EE_n_CNTXT_INTSET_MAXn 3
+#define GSI_EE_n_CNTXT_INTSET_INTYPE_BMSK 0x1
+#define GSI_EE_n_CNTXT_INTSET_INTYPE_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_MSI_BASE_LSB_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f188 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_MSI_BASE_LSB_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_MSI_BASE_LSB_MAXn 3
+#define GSI_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_MSI_BASE_LSB_MSI_ADDR_LSB_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_MSI_BASE_MSB_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f18c + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_MSI_BASE_MSB_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_MSI_BASE_MSB_MAXn 3
+#define GSI_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_MSI_BASE_MSB_MSI_ADDR_MSB_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_INT_VEC_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f190 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_INT_VEC_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_INT_VEC_MAXn 3
+#define GSI_EE_n_CNTXT_INT_VEC_INT_VEC_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_INT_VEC_INT_VEC_SHFT 0x0
+
+#define GSI_EE_n_ERROR_LOG_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f200 + 0x4000 * (n))
+#define GSI_EE_n_ERROR_LOG_RMSK 0xffffffff
+#define GSI_EE_n_ERROR_LOG_MAXn 3
+#define GSI_EE_n_ERROR_LOG_TODO_BMSK 0xffffffff
+#define GSI_EE_n_ERROR_LOG_TODO_SHFT 0x0
+
+#define GSI_EE_n_ERROR_LOG_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f210 + 0x4000 * (n))
+#define GSI_EE_n_ERROR_LOG_CLR_RMSK 0xffffffff
+#define GSI_EE_n_ERROR_LOG_CLR_MAXn 3
+#define GSI_EE_n_ERROR_LOG_CLR_TODO_BMSK 0xffffffff
+#define GSI_EE_n_ERROR_LOG_CLR_TODO_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SCRATCH_0_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f400 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SCRATCH_0_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SCRATCH_0_MAXn 3
+#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SCRATCH_0_SCRATCH_SHFT 0x0
+
+#define GSI_EE_n_CNTXT_SCRATCH_1_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0001f404 + 0x4000 * (n))
+#define GSI_EE_n_CNTXT_SCRATCH_1_RMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SCRATCH_1_MAXn 3
+#define GSI_EE_n_CNTXT_SCRATCH_1_SCRATCH_BMSK 0xffffffff
+#define GSI_EE_n_CNTXT_SCRATCH_1_SCRATCH_SHFT 0x0
+
+#define GSI_INTER_EE_n_ORIGINATOR_EE_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c000 + 0x1000 * (n))
+#define GSI_INTER_EE_n_ORIGINATOR_EE_RMSK 0xf
+#define GSI_INTER_EE_n_ORIGINATOR_EE_MAXn 3
+#define GSI_INTER_EE_n_ORIGINATOR_EE_EE_NUMBER_BMSK 0xf
+#define GSI_INTER_EE_n_ORIGINATOR_EE_EE_NUMBER_SHFT 0x0
+
+#define GSI_INTER_EE_n_GSI_CH_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c008 + 0x1000 * (n))
+#define GSI_INTER_EE_n_GSI_CH_CMD_RMSK 0xff0000ff
+#define GSI_INTER_EE_n_GSI_CH_CMD_MAXn 3
+#define GSI_INTER_EE_n_GSI_CH_CMD_OPCODE_BMSK 0xff000000
+#define GSI_INTER_EE_n_GSI_CH_CMD_OPCODE_SHFT 0x18
+#define GSI_INTER_EE_n_GSI_CH_CMD_CHID_BMSK 0xff
+#define GSI_INTER_EE_n_GSI_CH_CMD_CHID_SHFT 0x0
+
+#define GSI_INTER_EE_n_EV_CH_CMD_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c010 + 0x1000 * (n))
+#define GSI_INTER_EE_n_EV_CH_CMD_RMSK 0xff0000ff
+#define GSI_INTER_EE_n_EV_CH_CMD_MAXn 3
+#define GSI_INTER_EE_n_EV_CH_CMD_OPCODE_BMSK 0xff000000
+#define GSI_INTER_EE_n_EV_CH_CMD_OPCODE_SHFT 0x18
+#define GSI_INTER_EE_n_EV_CH_CMD_CHID_BMSK 0xff
+#define GSI_INTER_EE_n_EV_CH_CMD_CHID_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c018 + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_RMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MAXn 3
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c01c + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_RMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MAXn 3
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_EV_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c020 + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_MAXn 3
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
+	0x00003fff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c024 + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_MAXn 3
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
+	0x000003ff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c028 + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_RMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_MAXn 3
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_GSI_CH_BIT_MAP_SHFT 0x0
+
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_OFFS(n) \
+	(GSI_GSI_REG_BASE_OFFS + 0x0000c02c + 0x1000 * (n))
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_RMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_MAXn 3
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_BMSK 0xffffffff
+#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_EV_CH_BIT_MAP_SHFT 0x0
+
+
+#endif /* __GSI_REG_H__ */
diff --git a/drivers/platform/msm/ipa/Makefile b/drivers/platform/msm/ipa/Makefile
new file mode 100644
index 0000000..15ed471
--- /dev/null
+++ b/drivers/platform/msm/ipa/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_IPA) += ipa_v2/ ipa_clients/ ipa_common
+obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common
+obj-$(CONFIG_IPA_UT) += test/
+
+ipa_common += ipa_api.o ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
new file mode 100644
index 0000000..06881d3
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -0,0 +1,2951 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/ipa_uc_offload.h>
+#include "ipa_api.h"
+
+#define DRV_NAME "ipa"
+
+#define IPA_API_DISPATCH_RETURN(api, p...) \
+	do { \
+		if (!ipa_api_ctrl) { \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
+			ret = -EPERM; \
+		} \
+		else { \
+			if (ipa_api_ctrl->api) { \
+				ret = ipa_api_ctrl->api(p); \
+			} else { \
+				pr_err("%s not implemented for IPA ver %d\n", \
+						__func__, ipa_api_hw_type); \
+				WARN_ON(1); \
+				ret = -EPERM; \
+			} \
+		} \
+	} while (0)
+
+#define IPA_API_DISPATCH(api, p...) \
+	do { \
+		if (!ipa_api_ctrl) \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
+		else { \
+			if (ipa_api_ctrl->api) { \
+				ipa_api_ctrl->api(p); \
+			} else { \
+				pr_err("%s not implemented for IPA ver %d\n", \
+						__func__, ipa_api_hw_type); \
+				WARN_ON(1); \
+			} \
+		} \
+	} while (0)
+
+#define IPA_API_DISPATCH_RETURN_PTR(api, p...) \
+	do { \
+		if (!ipa_api_ctrl) { \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
+			ret = NULL; \
+		} \
+		else { \
+			if (ipa_api_ctrl->api) { \
+				ret = ipa_api_ctrl->api(p); \
+			} else { \
+				pr_err("%s not implemented for IPA ver %d\n", \
+						__func__, ipa_api_hw_type); \
+				WARN_ON(1); \
+				ret = NULL; \
+			} \
+		} \
+	} while (0)
+
+#define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \
+	do { \
+		if (!ipa_api_ctrl) { \
+			pr_err("%s:%d IPA HW is not supported\n", \
+				__func__, __LINE__); \
+			ret = false; \
+		} \
+		else { \
+			if (ipa_api_ctrl->api) { \
+				ret = ipa_api_ctrl->api(p); \
+			} else { \
+				pr_err("%s not implemented for IPA ver %d\n", \
+						__func__, ipa_api_hw_type); \
+				WARN_ON(1); \
+				ret = false; \
+			} \
+		} \
+	} while (0)
+
+static enum ipa_hw_type ipa_api_hw_type;
+static struct ipa_api_controller *ipa_api_ctrl;
+
+const char *ipa_clients_strings[IPA_CLIENT_MAX] = {
+	__stringify(IPA_CLIENT_HSIC1_PROD),
+	__stringify(IPA_CLIENT_WLAN1_PROD),
+	__stringify(IPA_CLIENT_HSIC2_PROD),
+	__stringify(IPA_CLIENT_USB2_PROD),
+	__stringify(IPA_CLIENT_HSIC3_PROD),
+	__stringify(IPA_CLIENT_USB3_PROD),
+	__stringify(IPA_CLIENT_HSIC4_PROD),
+	__stringify(IPA_CLIENT_USB4_PROD),
+	__stringify(IPA_CLIENT_HSIC5_PROD),
+	__stringify(IPA_CLIENT_USB_PROD),
+	__stringify(IPA_CLIENT_A5_WLAN_AMPDU_PROD),
+	__stringify(IPA_CLIENT_A2_EMBEDDED_PROD),
+	__stringify(IPA_CLIENT_A2_TETHERED_PROD),
+	__stringify(IPA_CLIENT_APPS_LAN_WAN_PROD),
+	__stringify(IPA_CLIENT_APPS_CMD_PROD),
+	__stringify(IPA_CLIENT_ODU_PROD),
+	__stringify(IPA_CLIENT_MHI_PROD),
+	__stringify(IPA_CLIENT_Q6_LAN_PROD),
+	__stringify(IPA_CLIENT_Q6_WAN_PROD),
+	__stringify(IPA_CLIENT_Q6_CMD_PROD),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD),
+	__stringify(IPA_CLIENT_Q6_DECOMP_PROD),
+	__stringify(IPA_CLIENT_Q6_DECOMP2_PROD),
+	__stringify(IPA_CLIENT_UC_USB_PROD),
+
+	/* Below PROD client type is only for test purpose */
+	__stringify(IPA_CLIENT_TEST_PROD),
+	__stringify(IPA_CLIENT_TEST1_PROD),
+	__stringify(IPA_CLIENT_TEST2_PROD),
+	__stringify(IPA_CLIENT_TEST3_PROD),
+	__stringify(IPA_CLIENT_TEST4_PROD),
+
+	__stringify(IPA_CLIENT_HSIC1_CONS),
+	__stringify(IPA_CLIENT_WLAN1_CONS),
+	__stringify(IPA_CLIENT_HSIC2_CONS),
+	__stringify(IPA_CLIENT_USB2_CONS),
+	__stringify(IPA_CLIENT_WLAN2_CONS),
+	__stringify(IPA_CLIENT_HSIC3_CONS),
+	__stringify(IPA_CLIENT_USB3_CONS),
+	__stringify(IPA_CLIENT_WLAN3_CONS),
+	__stringify(IPA_CLIENT_HSIC4_CONS),
+	__stringify(IPA_CLIENT_USB4_CONS),
+	__stringify(IPA_CLIENT_WLAN4_CONS),
+	__stringify(IPA_CLIENT_HSIC5_CONS),
+	__stringify(IPA_CLIENT_USB_CONS),
+	__stringify(IPA_CLIENT_USB_DPL_CONS),
+	__stringify(IPA_CLIENT_A2_EMBEDDED_CONS),
+	__stringify(IPA_CLIENT_A2_TETHERED_CONS),
+	__stringify(IPA_CLIENT_A5_LAN_WAN_CONS),
+	__stringify(IPA_CLIENT_APPS_LAN_CONS),
+	__stringify(IPA_CLIENT_APPS_WAN_CONS),
+	__stringify(IPA_CLIENT_ODU_EMB_CONS),
+	__stringify(IPA_CLIENT_ODU_TETH_CONS),
+	__stringify(IPA_CLIENT_MHI_CONS),
+	__stringify(IPA_CLIENT_Q6_LAN_CONS),
+	__stringify(IPA_CLIENT_Q6_WAN_CONS),
+	__stringify(IPA_CLIENT_Q6_DUN_CONS),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS),
+	__stringify(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS),
+	__stringify(IPA_CLIENT_Q6_DECOMP_CONS),
+	__stringify(IPA_CLIENT_Q6_DECOMP2_CONS),
+	__stringify(IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS),
+	/* Below CONS client type is only for test purpose */
+	__stringify(IPA_CLIENT_TEST_CONS),
+	__stringify(IPA_CLIENT_TEST1_CONS),
+	__stringify(IPA_CLIENT_TEST2_CONS),
+	__stringify(IPA_CLIENT_TEST3_CONS),
+	__stringify(IPA_CLIENT_TEST4_CONS),
+};
+
+/**
+ * ipa_write_64() - convert 64 bit value to byte array
+ * @w: 64 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_64(u64 w, u8 *dest)
+{
+	if (unlikely(dest == NULL)) {
+		pr_err("ipa_write_64: NULL address!\n");
+		return dest;
+	}
+	*dest++ = (u8)((w) & 0xFF);
+	*dest++ = (u8)((w >> 8) & 0xFF);
+	*dest++ = (u8)((w >> 16) & 0xFF);
+	*dest++ = (u8)((w >> 24) & 0xFF);
+	*dest++ = (u8)((w >> 32) & 0xFF);
+	*dest++ = (u8)((w >> 40) & 0xFF);
+	*dest++ = (u8)((w >> 48) & 0xFF);
+	*dest++ = (u8)((w >> 56) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_32() - convert 32 bit value to byte array
+ * @w: 32 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_32(u32 w, u8 *dest)
+{
+	if (unlikely(dest == NULL)) {
+		pr_err("ipa_write_32: NULL address!\n");
+		return dest;
+	}
+	*dest++ = (u8)((w) & 0xFF);
+	*dest++ = (u8)((w >> 8) & 0xFF);
+	*dest++ = (u8)((w >> 16) & 0xFF);
+	*dest++ = (u8)((w >> 24) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_16() - convert 16 bit value to byte array
+ * @hw: 16 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_16(u16 hw, u8 *dest)
+{
+	if (unlikely(dest == NULL)) {
+		pr_err("ipa_write_16: NULL address!\n");
+		return dest;
+	}
+	*dest++ = (u8)((hw) & 0xFF);
+	*dest++ = (u8)((hw >> 8) & 0xFF);
+
+	return dest;
+}
+
+/**
+ * ipa_write_8() - convert 8 bit value to byte array
+ * @hw: 8 bit integer
+ * @dest: byte array
+ *
+ * Return value: converted value
+ */
+u8 *ipa_write_8(u8 b, u8 *dest)
+{
+	if (unlikely(dest == NULL)) {
+		pr_err("ipa_write_8: NULL address!\n");
+		return dest;
+	}
+	*dest++ = (b) & 0xFF;
+
+	return dest;
+}
+
+/**
+ * ipa_pad_to_64() - pad byte array to 64 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_64(u8 *dest)
+{
+	int i = (long)dest & 0x7;
+	int j;
+
+	if (i)
+		for (j = 0; j < (8 - i); j++)
+			*dest++ = 0;
+
+	return dest;
+}
+
+/**
+ * ipa_pad_to_32() - pad byte array to 32 bit value
+ * @dest: byte array
+ *
+ * Return value: padded value
+ */
+u8 *ipa_pad_to_32(u8 *dest)
+{
+	int i = (long)dest & 0x3;
+	int j;
+
+	if (i)
+		for (j = 0; j < (4 - i); j++)
+			*dest++ = 0;
+
+	return dest;
+}
+
+/**
+ * ipa_connect() - low-level IPA client connect
+ * @in:	[in] input parameters from client
+ * @sps:	[out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl:	[out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+	u32 *clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_connect, in, sps, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_connect);
+
+/**
+ * ipa_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_disconnect(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disconnect, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disconnect);
+
+/**
+* ipa_clear_endpoint_delay() - Clear ep_delay.
+* @clnt_hdl:	[in] IPA client handle
+*
+* Returns:	0 on success, negative on failure
+*
+* Note:		Should not be called from atomic context
+*/
+int ipa_clear_endpoint_delay(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_clear_endpoint_delay, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_clear_endpoint_delay);
+
+/**
+* ipa_reset_endpoint() - reset an endpoint from BAM perspective
+* @clnt_hdl:	[in] IPA client handle
+*
+* Returns:	0 on success, negative on failure
+*
+* Note:		Should not be called from atomic context
+*/
+int ipa_reset_endpoint(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_reset_endpoint, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_reset_endpoint);
+
+/**
+* ipa_disable_endpoint() - Disable an endpoint from IPA perspective
+* @clnt_hdl:	[in] IPA client handle
+*
+* Returns:	0 on success, negative on failure
+*
+* Note:		Should not be called from atomic context
+*/
+int ipa_disable_endpoint(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_endpoint, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disable_endpoint);
+
+
+/**
+ * ipa_cfg_ep - IPA end-point configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep, clnt_hdl, ipa_ep_cfg);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep);
+
+/**
+ * ipa_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_nat, clnt_hdl, ep_nat);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_nat);
+
+/**
+ * ipa_cfg_ep_hdr() -  IPA end-point header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr, clnt_hdl, ep_hdr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr);
+
+/**
+ * ipa_cfg_ep_hdr_ext() -  IPA end-point extended header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_hdr_ext:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+		       const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_hdr_ext, clnt_hdl, ep_hdr_ext);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_hdr_ext);
+
+/**
+ * ipa_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_mode, clnt_hdl, ep_mode);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_mode);
+
+/**
+ * ipa_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_aggr, clnt_hdl, ep_aggr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_aggr);
+
+/**
+ * ipa_cfg_ep_deaggr() -  IPA end-point deaggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_deaggr:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_deaggr(u32 clnt_hdl,
+			const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_deaggr, clnt_hdl, ep_deaggr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_deaggr);
+
+/**
+ * ipa_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_route, clnt_hdl, ep_route);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_route);
+
+/**
+ * ipa_cfg_ep_holb() - IPA end-point holb configuration
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb, clnt_hdl, ep_holb);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_holb);
+
+
+/**
+ * ipa_cfg_ep_cfg() - IPA end-point cfg configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_cfg, clnt_hdl, cfg);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_cfg);
+
+/**
+ * ipa_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask
+		*metadata_mask)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_metadata_mask, clnt_hdl,
+			metadata_mask);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_metadata_mask);
+
+/**
+ * ipa_cfg_ep_holb_by_client() - IPA end-point holb configuration
+ *
+ * Wrapper function for ipa_cfg_ep_holb() with client name instead of
+ * client handle. This function is used for clients that does not have
+ * client handle.
+ *
+ * @client:	[in] client name
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ep_holb)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_holb_by_client, client, ep_holb);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_holb_by_client);
+
+/**
+ * ipa_cfg_ep_ctrl() -  IPA end-point Control configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_cfg_ep_ctrl, clnt_hdl, ep_ctrl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_cfg_ep_ctrl);
+
+/**
+ * ipa_add_hdr() - add the specified headers to SW and optionally commit them to
+ * IPA HW
+ * @hdrs:	[inout] set of headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_hdr, hdrs);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_hdr);
+
+/**
+ * ipa_del_hdr() - Remove the specified headers from SW and optionally
+ * commit them to IPA HW
+ * @hdls:	[inout] set of headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_hdr, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_hdr);
+
+/**
+ * ipa_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_hdr(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_commit_hdr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_commit_hdr);
+
+/**
+ * ipa_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_hdr(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_reset_hdr);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_reset_hdr);
+
+/**
+ * ipa_get_hdr() - Lookup the specified header resource
+ * @lookup:	[inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *		Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_hdr, lookup);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_hdr);
+
+/**
+ * ipa_put_hdr() - Release the specified header handle
+ * @hdr_hdl:	[in] the header handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_put_hdr(u32 hdr_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_put_hdr, hdr_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_put_hdr);
+
+/**
+ * ipa_copy_hdr() - Lookup the specified header resource and return a copy of it
+ * @copy:	[inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_copy_hdr, copy);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_copy_hdr);
+
+/**
+ * ipa_add_hdr_proc_ctx() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @proc_ctxs:	[inout] set of processing context headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_hdr_proc_ctx, proc_ctxs);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_hdr_proc_ctx);
+
+/**
+ * ipa_del_hdr_proc_ctx() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls:	[inout] set of processing context headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_hdr_proc_ctx, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_hdr_proc_ctx);
+
+/**
+ * ipa_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_rt_rule, rules);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_rt_rule);
+
+/**
+ * ipa_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls:	[inout] set of routing rules to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_rt_rule, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_rt_rule);
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_rt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_commit_rt, ip);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_commit_rt);
+
+/**
+ * ipa_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_rt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_reset_rt, ip);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_reset_rt);
+
+/**
+ * ipa_get_rt_tbl() - lookup the specified routing table and return handle if it
+ * exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup:	[inout] routing table to lookup and its handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *	Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_rt_tbl, lookup);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_rt_tbl);
+
+/**
+ * ipa_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl:	[in] the routing table handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_put_rt_tbl, rt_tbl_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_put_rt_tbl);
+
+/**
+ * ipa_query_rt_index() - find the routing table index
+ *			which name and ip type are given as parameters
+ * @in:	[out] the index of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_query_rt_index, in);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_query_rt_index);
+
+/**
+ * ipa_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mdfy_rt_rule, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_rt_rule);
+
+/**
+ * ipa_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_flt_rule, rules);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_flt_rule);
+
+/**
+ * ipa_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_del_flt_rule, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_del_flt_rule);
+
+/**
+ * ipa_mdfy_flt_rule() - Modify the specified filtering rules in SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mdfy_flt_rule, hdls);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mdfy_flt_rule);
+
+/**
+ * ipa_commit_flt() - Commit the current SW filtering table of specified type to
+ * IPA HW
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_commit_flt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_commit_flt, ip);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_commit_flt);
+
+/**
+ * ipa_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_reset_flt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_reset_flt, ip);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_reset_flt);
+
+/**
+ * allocate_nat_device() - Allocates memory for the NAT device
+ * @mem:	[in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(allocate_nat_device, mem);
+
+	return ret;
+}
+EXPORT_SYMBOL(allocate_nat_device);
+
+/**
+ * ipa_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_nat_init_cmd, init);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_nat_init_cmd);
+
+/**
+ * ipa_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_nat_dma_cmd, dma);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_nat_dma_cmd);
+
+/**
+ * ipa_nat_del_cmd() - Delete a NAT table
+ * @del:	[in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_nat_del_cmd, del);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_nat_del_cmd);
+
+/**
+ * ipa_send_msg() - Send "message" from kernel client to IPA driver
+ * @meta: [in] message meta-data
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message meta-data and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_send_msg, meta, buff, callback);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_send_msg);
+
+/**
+ * ipa_register_pull_msg() - register pull message type
+ * @meta: [in] message meta-data
+ * @callback: [in] pull callback
+ *
+ * Register message callback by kernel client with IPA driver for IPA driver to
+ * pull message on-demand.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_register_pull_msg, meta, callback);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_register_pull_msg);
+
+/**
+ * ipa_deregister_pull_msg() - De-register pull message type
+ * @meta: [in] message meta-data
+ *
+ * De-register "message" by kernel client from IPA driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_deregister_pull_msg, meta);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_deregister_pull_msg);
+
+/**
+ * ipa_register_intf() - register "logical" interface
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ *
+ * Register an interface and its tx and rx properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_register_intf, name, tx, rx);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_register_intf);
+
+/**
+ * ipa_register_intf_ext() - register "logical" interface which has only
+ * extended properties
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ * @ext: [in] EXT properties of the interface
+ *
+ * Register an interface and its tx, rx and ext properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+	const struct ipa_rx_intf *rx,
+	const struct ipa_ext_intf *ext)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_register_intf_ext, name, tx, rx, ext);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_register_intf_ext);
+
+/**
+ * ipa_deregister_intf() - de-register previously registered logical interface
+ * @name: [in] interface name
+ *
+ * De-register a previously registered interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_deregister_intf(const char *name)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_deregister_intf, name);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_deregister_intf);
+
+/**
+ * ipa_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_aggr_mode, mode);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_aggr_mode);
+
+
+/**
+ * ipa_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_qcncm_ndp_sig, sig);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_qcncm_ndp_sig);
+
+/**
+ * ipa_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable:	[in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_single_ndp_per_mbim(bool enable)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_single_ndp_per_mbim, enable);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_single_ndp_per_mbim);
+
+/**
+ * ipa_tx_dp() - Data-path tx handler
+ * @dst:	[in] which IPA destination to route tx packets to
+ * @skb:	[in] the packet to send
+ * @metadata:	[in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *meta)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_tx_dp, dst, skb, meta);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_tx_dp);
+
+/**
+ * ipa_tx_dp_mul() - Data-path tx handler for multiple packets
+ * @src: [in] - Client that is sending data
+ * @ipa_tx_data_desc:	[in] data descriptors from wlan
+ *
+ * this is used for to transfer data descriptors that received
+ * from WLAN1_PROD pipe to IPA HW
+ *
+ * The function will send data descriptors from WLAN1_PROD (one
+ * at a time) using sps_transfer_one. Will set EOT flag for last
+ * descriptor Once this send was done from SPS point-of-view the
+ * IPA driver will get notified by the supplied callback -
+ * ipa_sps_irq_tx_no_aggr_notify()
+ *
+ * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_tx_dp_mul(enum ipa_client_type src,
+			struct ipa_tx_data_desc *data_desc)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_tx_dp_mul, src, data_desc);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_tx_dp_mul);
+
+void ipa_free_skb(struct ipa_rx_data *data)
+{
+	IPA_API_DISPATCH(ipa_free_skb, data);
+}
+EXPORT_SYMBOL(ipa_free_skb);
+
+/**
+ * ipa_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in:	[in] input needed to setup BAM pipe and configure EP
+ * @clnt_hdl:	[out] client handle
+ *
+ *  - configure the end-point registers with the supplied
+ *    parameters from the user.
+ *  - call SPS APIs to create a system-to-bam connection with IPA.
+ *  - allocate descriptor FIFO
+ *  - register callback function(ipa_sps_irq_rx_notify or
+ *    ipa_sps_irq_tx_notify - depends on client type) in case the driver is
+ *    not configured to pulling mode
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_setup_sys_pipe, sys_in, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_setup_sys_pipe);
+
+/**
+ * ipa_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_teardown_sys_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_teardown_sys_pipe);
+
+int ipa_sys_setup(struct ipa_sys_connect_params *sys_in,
+	unsigned long *ipa_bam_or_gsi_hdl,
+	u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
+
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_sys_setup, sys_in, ipa_bam_or_gsi_hdl,
+			ipa_pipe_num, clnt_hdl, en_status);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_sys_setup);
+
+int ipa_sys_teardown(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_sys_teardown, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_sys_teardown);
+
+int ipa_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_sys_update_gsi_hdls, clnt_hdl,
+		gsi_ch_hdl, gsi_ev_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_sys_update_gsi_hdls);
+
+/**
+ * ipa_connect_wdi_pipe() - WDI client connect
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_connect_wdi_pipe, in, out);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_connect_wdi_pipe);
+
+/**
+ * ipa_disconnect_wdi_pipe() - WDI client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disconnect_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disconnect_wdi_pipe);
+
+/**
+ * ipa_enable_wdi_pipe() - WDI client enable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_enable_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_enable_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_enable_wdi_pipe);
+
+/**
+ * ipa_disable_wdi_pipe() - WDI client disable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_disable_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disable_wdi_pipe);
+
+/**
+ * ipa_resume_wdi_pipe() - WDI client resume
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_resume_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_resume_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_resume_wdi_pipe);
+
+/**
+ * ipa_suspend_wdi_pipe() - WDI client suspend
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_suspend_wdi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_suspend_wdi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_wdi_pipe);
+
+/**
+ * ipa_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_wdi_stats, stats);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_wdi_stats);
+
+/**
+ * ipa_get_smem_restr_bytes()- Return IPA smem restricted bytes
+ *
+ * Return value: u16 - number of IPA smem restricted bytes
+ */
+u16 ipa_get_smem_restr_bytes(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_smem_restr_bytes);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_smem_restr_bytes);
+
+/**
+ * ipa_uc_wdi_get_dbpa() - To retrieve
+ * doorbell physical address of wlan pipes
+ * @param:  [in/out] input/output parameters
+ *          from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa_uc_wdi_get_dbpa(
+	struct ipa_wdi_db_params *param)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_wdi_get_dbpa, param);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_wdi_get_dbpa);
+
+/**
+ * ipa_uc_reg_rdyCB() - To register uC
+ * ready CB if uC not ready
+ * @inout:	[in/out] input/output parameters
+ * from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa_uc_reg_rdyCB(
+	struct ipa_wdi_uc_ready_params *inout)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_reg_rdyCB, inout);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_reg_rdyCB);
+
+/**
+ * ipa_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa_uc_dereg_rdyCB(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_dereg_rdyCB);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_dereg_rdyCB);
+
+/**
+* teth_bridge_init() - Initialize the Tethering bridge driver
+* @params - in/out params for USB initialization API (please look at struct
+*  definition for more info)
+*
+* USB driver gets a pointer to a callback function (usb_notify_cb) and an
+* associated data. USB driver installs this callback function in the call to
+* ipa_connect().
+*
+* Builds IPA resource manager dependency graph.
+*
+* Return codes: 0: success,
+*		-EINVAL - Bad parameter
+*		Other negative value - Failure
+*/
+int teth_bridge_init(struct teth_bridge_init_params *params)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(teth_bridge_init, params);
+
+	return ret;
+}
+EXPORT_SYMBOL(teth_bridge_init);
+
+/**
+* teth_bridge_disconnect() - Disconnect tethering bridge module
+*/
+int teth_bridge_disconnect(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(teth_bridge_disconnect, client);
+
+	return ret;
+}
+EXPORT_SYMBOL(teth_bridge_disconnect);
+
+/**
+* teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+* @connect_params:	Connection info
+*
+* Return codes: 0: success
+*		-EINVAL: invalid parameters
+*		-EPERM: Operation not permitted as the bridge is already
+*		connected
+*/
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(teth_bridge_connect, connect_params);
+
+	return ret;
+}
+EXPORT_SYMBOL(teth_bridge_connect);
+
+/* ipa_set_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+
+void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink)
+{
+	IPA_API_DISPATCH(ipa_set_client, index, client, uplink);
+}
+EXPORT_SYMBOL(ipa_set_client);
+
+/**
+ * ipa_get_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+enum ipacm_client_enum ipa_get_client(int pipe_idx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_client, pipe_idx);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_client);
+
+/**
+ * ipa_get_client_uplink() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+bool ipa_get_client_uplink(int pipe_idx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_client_uplink, pipe_idx);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_client_uplink);
+
+/**
+ * ipa_dma_init() -Initialize IPADMA.
+ *
+ * This function initialize all IPADMA internal data and connect in dma:
+ *	MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+ *	MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+ *
+ * Return codes: 0: success
+ *		-EFAULT: IPADMA is already initialized
+ *		-ENOMEM: allocating memory error
+ *		-EPERM: pipe connection failed
+ */
+int ipa_dma_init(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_init);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_init);
+
+/**
+ * ipa_dma_enable() -Vote for IPA clocks.
+ *
+ *Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *		 enabled
+ */
+int ipa_dma_enable(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_enable);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_enable);
+
+/**
+ * ipa_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *			diabled
+ *		-EFAULT: can not disable ipa_dma as there are pending
+ *			memcopy works
+ */
+int ipa_dma_disable(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_disable);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_disable);
+
+/**
+ * ipa_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: other
+ */
+int ipa_dma_sync_memcpy(u64 dest, u64 src, int len)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_sync_memcpy, dest, src, len);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_sync_memcpy);
+
+/**
+ * ipa_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: descr fifo is full.
+ */
+int ipa_dma_async_memcpy(u64 dest, u64 src, int len,
+		void (*user_cb)(void *user1), void *user_param)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_async_memcpy, dest, src, len, user_cb,
+		user_param);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_async_memcpy);
+
+/**
+ * ipa_dma_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-EBADF: IPA uC is not loaded
+ */
+int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_dma_uc_memcpy, dest, src, len);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_dma_uc_memcpy);
+
+/**
+ * ipa_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa_dma_destroy(void)
+{
+	IPA_API_DISPATCH(ipa_dma_destroy);
+}
+EXPORT_SYMBOL(ipa_dma_destroy);
+
+int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_init_engine, params);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_mhi_init_engine);
+
+/**
+ * ipa_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ * This function is doing the following:
+ *	- Send command to uC to start corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_connect_mhi_pipe, in, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_connect_mhi_pipe);
+
+/**
+ * ipa_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ *	- Send command to uC to reset corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_disconnect_mhi_pipe(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disconnect_mhi_pipe, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disconnect_mhi_pipe);
+
+bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client)
+{
+	bool ret;
+
+	IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_stop_gsi_channel, client);
+
+	return ret;
+}
+
+int ipa_uc_mhi_reset_channel(int channelHandle)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_reset_channel, channelHandle);
+
+	return ret;
+}
+
+bool ipa_mhi_sps_channel_empty(enum ipa_client_type client)
+{
+	bool ret;
+
+	IPA_API_DISPATCH_RETURN_BOOL(ipa_mhi_sps_channel_empty, client);
+
+	return ret;
+}
+
+int ipa_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_qmi_enable_force_clear_datapath_send, req);
+
+	return ret;
+}
+
+int ipa_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_qmi_disable_force_clear_datapath_send, req);
+
+	return ret;
+}
+
+int ipa_generate_tag_process(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_generate_tag_process);
+
+	return ret;
+}
+
+int ipa_disable_sps_pipe(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_sps_pipe, client);
+
+	return ret;
+}
+
+int ipa_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_reset_channel_internal, client);
+
+	return ret;
+}
+
+int ipa_mhi_start_channel_internal(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_start_channel_internal, client);
+
+	return ret;
+}
+
+void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+	IPA_API_DISPATCH(ipa_get_holb, ep_idx, holb);
+}
+
+void ipa_set_tag_process_before_gating(bool val)
+{
+	IPA_API_DISPATCH(ipa_set_tag_process_before_gating, val);
+}
+
+int ipa_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_query_ch_info, client, ch_info);
+
+	return ret;
+}
+
+int ipa_uc_mhi_suspend_channel(int channelHandle)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_suspend_channel, channelHandle);
+
+	return ret;
+}
+
+int ipa_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_stop_event_update_channel,
+			channelHandle);
+
+	return ret;
+}
+
+bool ipa_has_open_aggr_frame(enum ipa_client_type client)
+{
+	bool ret;
+
+	IPA_API_DISPATCH_RETURN_BOOL(ipa_has_open_aggr_frame, client);
+
+	return ret;
+}
+
+int ipa_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_resume_channels_internal, client,
+			LPTransitionRejected, brstmode_enabled, ch_scratch,
+			index);
+
+	return ret;
+}
+
+int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_send_dl_ul_sync_info,
+			cmd);
+
+	return ret;
+}
+
+int ipa_mhi_destroy_channel(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_mhi_destroy_channel, client);
+
+	return ret;
+}
+
+int ipa_uc_mhi_init(void (*ready_cb)(void),
+		void (*wakeup_request_cb)(void))
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_init, ready_cb, wakeup_request_cb);
+
+	return ret;
+}
+
+void ipa_uc_mhi_cleanup(void)
+{
+	IPA_API_DISPATCH(ipa_uc_mhi_cleanup);
+}
+
+int ipa_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_mhi_print_stats, dbg_buff, size);
+
+	return ret;
+}
+
+/**
+ * ipa_uc_state_check() - Check the status of the uC interface
+ *
+ * Return value: 0 if the uC is loaded, interface is initialized
+ *               and there was no recent failure in one of the commands.
+ *               A negative value is returned otherwise.
+ */
+int ipa_uc_state_check(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_uc_state_check);
+
+	return ret;
+}
+
+int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_write_qmap_id, param_in);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_write_qmap_id);
+
+/**
+* ipa_add_interrupt_handler() - Adds handler to an interrupt type
+* @interrupt:		Interrupt type
+* @handler:		The handler to be added
+* @deferred_flag:	whether the handler processing should be deferred in
+*			a workqueue
+* @private_data:	the client's private data
+*
+* Adds handler to an interrupt type and enable the specific bit
+* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+*/
+int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+	ipa_irq_handler_t handler,
+	bool deferred_flag,
+	void *private_data)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_add_interrupt_handler, interrupt, handler,
+		deferred_flag, private_data);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_add_interrupt_handler);
+
+/**
+* ipa_remove_interrupt_handler() - Removes handler to an interrupt type
+* @interrupt:		Interrupt type
+*
+* Removes the handler and disable the specific bit in IRQ_EN register
+*/
+int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_remove_interrupt_handler, interrupt);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_remove_interrupt_handler);
+
+/**
+* ipa_restore_suspend_handler() - restores the original suspend IRQ handler
+* as it was registered in the IPA init sequence.
+* Return codes:
+* 0: success
+* -EPERM: failed to remove current handler or failed to add original handler
+*/
+int ipa_restore_suspend_handler(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_restore_suspend_handler);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_restore_suspend_handler);
+
+/**
+ * ipa_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
+ *
+ * Function is rate limited to avoid flooding kernel log buffer
+ */
+void ipa_bam_reg_dump(void)
+{
+	IPA_API_DISPATCH(ipa_bam_reg_dump);
+}
+EXPORT_SYMBOL(ipa_bam_reg_dump);
+
+/**
+ * ipa_get_ep_mapping() - provide endpoint mapping
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa_get_ep_mapping(enum ipa_client_type client)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_ep_mapping, client);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_ep_mapping);
+
+/**
+ * ipa_is_ready() - check if IPA module was initialized
+ * successfully
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa_is_ready(void)
+{
+	if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_ready)
+		return false;
+	return ipa_api_ctrl->ipa_is_ready();
+}
+EXPORT_SYMBOL(ipa_is_ready);
+
+/**
+ * ipa_proxy_clk_vote() - called to add IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa_proxy_clk_vote(void)
+{
+	IPA_API_DISPATCH(ipa_proxy_clk_vote);
+}
+EXPORT_SYMBOL(ipa_proxy_clk_vote);
+
+/**
+ * ipa_proxy_clk_unvote() - called to remove IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa_proxy_clk_unvote(void)
+{
+	IPA_API_DISPATCH(ipa_proxy_clk_unvote);
+}
+EXPORT_SYMBOL(ipa_proxy_clk_unvote);
+
+/**
+ * ipa_get_hw_type() - Return IPA HW version
+ *
+ * Return value: enum ipa_hw_type
+ */
+enum ipa_hw_type ipa_get_hw_type(void)
+{
+	return ipa_api_hw_type;
+}
+EXPORT_SYMBOL(ipa_get_hw_type);
+
+/**
+ * ipa_is_client_handle_valid() - check if IPA client handle is valid handle
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa_is_client_handle_valid(u32 clnt_hdl)
+{
+	if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_is_client_handle_valid)
+		return false;
+	return ipa_api_ctrl->ipa_is_client_handle_valid(clnt_hdl);
+}
+EXPORT_SYMBOL(ipa_is_client_handle_valid);
+
+/**
+ * ipa_get_client_mapping() - provide client mapping
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client mapping
+ */
+enum ipa_client_type ipa_get_client_mapping(int pipe_idx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_client_mapping, pipe_idx);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_client_mapping);
+
+/**
+ * ipa_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
+ * the supplied pipe index.
+ *
+ * @pipe_idx:
+ *
+ * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
+ * found.
+ */
+enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_rm_resource_from_ep, pipe_idx);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_rm_resource_from_ep);
+
+/**
+ * ipa_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
+ *
+ * Return value: true if modem configures embedded pipe flt, false otherwise
+ */
+bool ipa_get_modem_cfg_emb_pipe_flt(void)
+{
+	if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt)
+		return false;
+	return ipa_api_ctrl->ipa_get_modem_cfg_emb_pipe_flt();
+}
+EXPORT_SYMBOL(ipa_get_modem_cfg_emb_pipe_flt);
+
+/**
+ * ipa_get_transport_type()- Return ipa_ctx->transport_prototype
+ *
+ * Return value: enum ipa_transport_type
+ */
+enum ipa_transport_type ipa_get_transport_type(void)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_get_transport_type);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_transport_type);
+
+/**
+ * ipa_get_smmu_domain()- Return the smmu domain
+ *
+ * Return value: pointer to iommu domain if smmu_cb valid, NULL otherwise
+ */
+struct iommu_domain *ipa_get_smmu_domain(void)
+{
+	struct iommu_domain *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_smmu_domain);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_smmu_domain);
+
+/**
+ * ipa_disable_apps_wan_cons_deaggr()- set
+ * ipa_ctx->ipa_client_apps_wan_cons_agg_gro
+ *
+ * Return value: 0 or negative in case of failure
+ */
+int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_disable_apps_wan_cons_deaggr, agg_size,
+		agg_count);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_disable_apps_wan_cons_deaggr);
+
+/**
+ * ipa_get_dma_dev()- Returns ipa_ctx dma dev pointer
+ *
+ * Return value: pointer to ipa_ctx dma dev pointer
+ */
+struct device *ipa_get_dma_dev(void)
+{
+	struct device *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_dma_dev);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_dma_dev);
+
+/**
+ * ipa_release_wdi_mapping() - release iommu mapping
+ *
+ *
+ * @num_buffers: number of buffers to be released
+ *
+ * @info: pointer to wdi buffers info array
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_release_wdi_mapping, num_buffers, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_release_wdi_mapping);
+
+/**
+ * ipa_create_wdi_mapping() - Perform iommu mapping
+ *
+ *
+ * @num_buffers: number of buffers to be mapped
+ *
+ * @info: pointer to wdi buffers info array
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_create_wdi_mapping, num_buffers, info);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_create_wdi_mapping);
+
+/**
+ * ipa_get_gsi_ep_info() - provide gsi ep information
+ * @ipa_ep_idx: IPA endpoint index
+ *
+ * Return value: pointer to ipa_gsi_ep_info
+ */
+struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx)
+{
+	if (!ipa_api_ctrl || !ipa_api_ctrl->ipa_get_gsi_ep_info)
+		return NULL;
+	return ipa_api_ctrl->ipa_get_gsi_ep_info(ipa_ep_idx);
+}
+EXPORT_SYMBOL(ipa_get_gsi_ep_info);
+
+/**
+ * ipa_stop_gsi_channel()- Stops a GSI channel in IPA
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa_stop_gsi_channel(u32 clnt_hdl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_stop_gsi_channel, clnt_hdl);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_stop_gsi_channel);
+
+/**
+ * ipa_get_version_string() - Get string representation of IPA version
+ * @ver: IPA version
+ *
+ * Return: Constant string representation
+ */
+const char *ipa_get_version_string(enum ipa_hw_type ver)
+{
+	const char *str;
+
+	switch (ver) {
+	case IPA_HW_v1_0:
+		str = "1.0";
+		break;
+	case IPA_HW_v1_1:
+		str = "1.1";
+		break;
+	case IPA_HW_v2_0:
+		str = "2.0";
+		break;
+	case IPA_HW_v2_1:
+		str = "2.1";
+		break;
+	case IPA_HW_v2_5:
+		str = "2.5/2.6";
+		break;
+	case IPA_HW_v2_6L:
+		str = "2.6L";
+		break;
+	case IPA_HW_v3_0:
+		str = "3.0";
+		break;
+	case IPA_HW_v3_1:
+		str = "3.1";
+		break;
+	case IPA_HW_v3_5:
+		str = "3.5";
+		break;
+	case IPA_HW_v3_5_1:
+		str = "3.5.1";
+		break;
+	default:
+		str = "Invalid version";
+		break;
+	}
+
+	return str;
+}
+EXPORT_SYMBOL(ipa_get_version_string);
+
+static const struct of_device_id ipa_plat_drv_match[] = {
+	{ .compatible = "qcom,ipa", },
+	{ .compatible = "qcom,ipa-smmu-ap-cb", },
+	{ .compatible = "qcom,ipa-smmu-wlan-cb", },
+	{ .compatible = "qcom,ipa-smmu-uc-cb", },
+	{ .compatible = "qcom,smp2pgpio-map-ipa-1-in", },
+	{ .compatible = "qcom,smp2pgpio-map-ipa-1-out", },
+	{}
+};
+
+static int ipa_generic_plat_drv_probe(struct platform_device *pdev_p)
+{
+	int result;
+
+	/*
+	* IPA probe function can be called for multiple times as the same probe
+	* function handles multiple compatibilities
+	*/
+	pr_debug("ipa: IPA driver probing started for %s\n",
+		pdev_p->dev.of_node->name);
+
+	if (!ipa_api_ctrl) {
+		ipa_api_ctrl = kzalloc(sizeof(*ipa_api_ctrl), GFP_KERNEL);
+		if (!ipa_api_ctrl)
+			return -ENOMEM;
+
+		/* Get IPA HW Version */
+		result = of_property_read_u32(pdev_p->dev.of_node,
+			"qcom,ipa-hw-ver", &ipa_api_hw_type);
+		if ((result) || (ipa_api_hw_type == 0)) {
+			pr_err("ipa: get resource failed for ipa-hw-ver!\n");
+			kfree(ipa_api_ctrl);
+			ipa_api_ctrl = 0;
+			return -ENODEV;
+		}
+		pr_debug("ipa: ipa_api_hw_type = %d", ipa_api_hw_type);
+	}
+
+	/* call probe based on IPA HW version */
+	switch (ipa_api_hw_type) {
+	case IPA_HW_v2_0:
+	case IPA_HW_v2_1:
+	case IPA_HW_v2_5:
+	case IPA_HW_v2_6L:
+		result = ipa_plat_drv_probe(pdev_p, ipa_api_ctrl,
+			ipa_plat_drv_match);
+		break;
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+	case IPA_HW_v3_5:
+	case IPA_HW_v3_5_1:
+		result = ipa3_plat_drv_probe(pdev_p, ipa_api_ctrl,
+			ipa_plat_drv_match);
+		break;
+	default:
+		pr_err("ipa: unsupported version %d\n", ipa_api_hw_type);
+		return -EPERM;
+	}
+
+	if (result && result != -EPROBE_DEFER)
+		pr_err("ipa: ipa_plat_drv_probe failed\n");
+
+	return result;
+}
+
+static int ipa_ap_suspend(struct device *dev)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_ap_suspend, dev);
+
+	return ret;
+}
+
+static int ipa_ap_resume(struct device *dev)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_ap_resume, dev);
+
+	return ret;
+}
+
+int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
+			      void *user_data)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_register_ipa_ready_cb,
+				ipa_ready_cb, user_data);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_register_ipa_ready_cb);
+
+/**
+ * ipa_inc_client_enable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA_ACTIVE_CLIENTS_INC_XXX();
+ *
+ * Return codes:
+ * None
+*/
+void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+	IPA_API_DISPATCH(ipa_inc_client_enable_clks, id);
+}
+EXPORT_SYMBOL(ipa_inc_client_enable_clks);
+
+/**
+ * ipa_dec_client_disable_clks() - Increase active clients counter, and
+ * enable ipa clocks if necessary
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA_ACTIVE_CLIENTS_DEC_XXX();
+ *
+ * Return codes:
+ * None
+*/
+void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+	IPA_API_DISPATCH(ipa_dec_client_disable_clks, id);
+}
+EXPORT_SYMBOL(ipa_dec_client_disable_clks);
+
+/**
+ * ipa_inc_client_enable_clks_no_block() - Only increment the number of active
+ * clients if no asynchronous actions should be done.Asynchronous actions are
+ * locking a mutex and waking up IPA HW.
+ *
+ * Please do not use this API, use the wrapper macros instead(ipa_i.h)
+ *
+ *
+ * Return codes : 0 for success
+ *		-EPERM if an asynchronous action should have been done
+ */
+int ipa_inc_client_enable_clks_no_block(
+	struct ipa_active_client_logging_info *id)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_inc_client_enable_clks_no_block, id);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_inc_client_enable_clks_no_block);
+
+/**
+* ipa_suspend_resource_no_block() - suspend client endpoints related to the
+* IPA_RM resource and decrement active clients counter. This function is
+* guaranteed to avoid sleeping.
+*
+* @resource: [IN] IPA Resource Manager resource
+*
+* Return codes: 0 on success, negative on failure.
+*/
+int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_suspend_resource_no_block, resource);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_resource_no_block);
+/**
+ * ipa_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_resume_resource(enum ipa_rm_resource_name resource)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_resume_resource, resource);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_resume_resource);
+
+/**
+ * ipa_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_suspend_resource_sync, resource);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_suspend_resource_sync);
+
+/**
+ * ipa_set_required_perf_profile() - set IPA to the specified performance
+ *	profile based on the bandwidth, unless minimum voltage required is
+ *	higher. In this case the floor_voltage specified will be used.
+ * @floor_voltage: minimum voltage to operate
+ * @bandwidth_mbps: needed bandwidth from IPA
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+	u32 bandwidth_mbps)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_set_required_perf_profile, floor_voltage,
+		bandwidth_mbps);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_set_required_perf_profile);
+
+/**
+ * ipa_get_ipc_logbuf() - return a pointer to IPA driver IPC log
+ */
+void *ipa_get_ipc_logbuf(void)
+{
+	void *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_ipc_logbuf);
+
+/**
+ * ipa_get_ipc_logbuf_low() - return a pointer to IPA driver IPC low prio log
+ */
+void *ipa_get_ipc_logbuf_low(void)
+{
+	void *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_ipc_logbuf_low);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_ipc_logbuf_low);
+
+/**
+ * ipa_assert() - general function for assertion
+ */
+void ipa_assert(void)
+{
+	pr_err("IPA: unrecoverable error has occurred, asserting\n");
+	BUG();
+}
+
+/**
+ * ipa_rx_poll() - Poll the rx packets from IPA HW in the
+ * softirq context
+ *
+ * @budget: number of packets to be polled in single iteration
+ *
+ * Return codes: >= 0  : Actual number of packets polled
+ *
+ */
+int ipa_rx_poll(u32 clnt_hdl, int budget)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_rx_poll, clnt_hdl, budget);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_rx_poll);
+
+/**
+ * ipa_recycle_wan_skb() - Recycle the Wan skb
+ *
+ * @skb: skb that needs to recycle
+ *
+ */
+void ipa_recycle_wan_skb(struct sk_buff *skb)
+{
+	IPA_API_DISPATCH(ipa_recycle_wan_skb, skb);
+}
+EXPORT_SYMBOL(ipa_recycle_wan_skb);
+
+/**
+ * ipa_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+		ipa_notify_cb notify, void *priv, u8 hdr_len,
+		struct ipa_ntn_conn_out_params *outp)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_setup_uc_ntn_pipes, inp,
+		notify, priv, hdr_len, outp);
+
+	return ret;
+}
+
+/**
+ * ipa_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+		int ipa_ep_idx_dl)
+{
+	int ret;
+
+	IPA_API_DISPATCH_RETURN(ipa_tear_down_uc_offload_pipes, ipa_ep_idx_ul,
+		ipa_ep_idx_dl);
+
+	return ret;
+}
+
+/**
+ * ipa_get_pdev() - return a pointer to IPA dev struct
+ *
+ * Return value: a pointer to IPA dev struct
+ *
+ */
+struct device *ipa_get_pdev(void)
+{
+	struct device *ret;
+
+	IPA_API_DISPATCH_RETURN_PTR(ipa_get_pdev);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_get_pdev);
+
+static const struct dev_pm_ops ipa_pm_ops = {
+	.suspend_noirq = ipa_ap_suspend,
+	.resume_noirq = ipa_ap_resume,
+};
+
+static struct platform_driver ipa_plat_drv = {
+	.probe = ipa_generic_plat_drv_probe,
+	.driver = {
+		.name = DRV_NAME,
+		.owner = THIS_MODULE,
+		.pm = &ipa_pm_ops,
+		.of_match_table = ipa_plat_drv_match,
+	},
+};
+
+static int __init ipa_module_init(void)
+{
+	pr_debug("IPA module init\n");
+
+	/* Register as a platform device driver */
+	return platform_driver_register(&ipa_plat_drv);
+}
+subsys_initcall(ipa_module_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
diff --git a/drivers/platform/msm/ipa/ipa_api.h b/drivers/platform/msm/ipa/ipa_api.h
new file mode 100644
index 0000000..171c9fb
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_api.h
@@ -0,0 +1,402 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_uc_offload.h>
+#include "ipa_common_i.h"
+
+#ifndef _IPA_API_H_
+#define _IPA_API_H_
+
+struct ipa_api_controller {
+	int (*ipa_connect)(const struct ipa_connect_params *in,
+		struct ipa_sps_params *sps, u32 *clnt_hdl);
+
+	int (*ipa_disconnect)(u32 clnt_hdl);
+
+	int (*ipa_reset_endpoint)(u32 clnt_hdl);
+
+	int (*ipa_clear_endpoint_delay)(u32 clnt_hdl);
+
+	int (*ipa_disable_endpoint)(u32 clnt_hdl);
+
+	int (*ipa_cfg_ep)(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_nat)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_hdr)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_hdr_ext)(u32 clnt_hdl,
+			const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_mode)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_aggr)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_deaggr)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_route)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_holb)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_cfg)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_holb_by_client)(enum ipa_client_type client,
+		const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+	int (*ipa_cfg_ep_ctrl)(u32 clnt_hdl,
+		const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+	int (*ipa_add_hdr)(struct ipa_ioc_add_hdr *hdrs);
+
+	int (*ipa_del_hdr)(struct ipa_ioc_del_hdr *hdls);
+
+	int (*ipa_commit_hdr)(void);
+
+	int (*ipa_reset_hdr)(void);
+
+	int (*ipa_get_hdr)(struct ipa_ioc_get_hdr *lookup);
+
+	int (*ipa_put_hdr)(u32 hdr_hdl);
+
+	int (*ipa_copy_hdr)(struct ipa_ioc_copy_hdr *copy);
+
+	int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+	int (*ipa_del_hdr_proc_ctx)(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+	int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules);
+
+	int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls);
+
+	int (*ipa_commit_rt)(enum ipa_ip_type ip);
+
+	int (*ipa_reset_rt)(enum ipa_ip_type ip);
+
+	int (*ipa_get_rt_tbl)(struct ipa_ioc_get_rt_tbl *lookup);
+
+	int (*ipa_put_rt_tbl)(u32 rt_tbl_hdl);
+
+	int (*ipa_query_rt_index)(struct ipa_ioc_get_rt_tbl_indx *in);
+
+	int (*ipa_mdfy_rt_rule)(struct ipa_ioc_mdfy_rt_rule *rules);
+
+	int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules);
+
+	int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls);
+
+	int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules);
+
+	int (*ipa_commit_flt)(enum ipa_ip_type ip);
+
+	int (*ipa_reset_flt)(enum ipa_ip_type ip);
+
+	int (*allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem);
+
+	int (*ipa_nat_init_cmd)(struct ipa_ioc_v4_nat_init *init);
+
+	int (*ipa_nat_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma);
+
+	int (*ipa_nat_del_cmd)(struct ipa_ioc_v4_nat_del *del);
+
+	int (*ipa_send_msg)(struct ipa_msg_meta *meta, void *buff,
+		ipa_msg_free_fn callback);
+
+	int (*ipa_register_pull_msg)(struct ipa_msg_meta *meta,
+		ipa_msg_pull_fn callback);
+
+	int (*ipa_deregister_pull_msg)(struct ipa_msg_meta *meta);
+
+	int (*ipa_register_intf)(const char *name,
+		const struct ipa_tx_intf *tx,
+		const struct ipa_rx_intf *rx);
+
+	int (*ipa_register_intf_ext)(const char *name,
+		const struct ipa_tx_intf *tx,
+		const struct ipa_rx_intf *rx,
+		const struct ipa_ext_intf *ext);
+
+	int (*ipa_deregister_intf)(const char *name);
+
+	int (*ipa_set_aggr_mode)(enum ipa_aggr_mode mode);
+
+	int (*ipa_set_qcncm_ndp_sig)(char sig[3]);
+
+	int (*ipa_set_single_ndp_per_mbim)(bool enable);
+
+	int (*ipa_tx_dp)(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+	int (*ipa_tx_dp_mul)(enum ipa_client_type dst,
+			struct ipa_tx_data_desc *data_desc);
+
+	void (*ipa_free_skb)(struct ipa_rx_data *);
+
+	int (*ipa_setup_sys_pipe)(struct ipa_sys_connect_params *sys_in,
+		u32 *clnt_hdl);
+
+	int (*ipa_teardown_sys_pipe)(u32 clnt_hdl);
+
+	int (*ipa_sys_setup)(struct ipa_sys_connect_params *sys_in,
+		unsigned long *ipa_bam_hdl,
+		u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
+
+	int (*ipa_sys_teardown)(u32 clnt_hdl);
+
+	int (*ipa_sys_update_gsi_hdls)(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+		unsigned long gsi_ev_hdl);
+
+	int (*ipa_connect_wdi_pipe)(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out);
+
+	int (*ipa_disconnect_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_enable_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_disable_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_resume_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_suspend_wdi_pipe)(u32 clnt_hdl);
+
+	int (*ipa_get_wdi_stats)(struct IpaHwStatsWDIInfoData_t *stats);
+
+	u16 (*ipa_get_smem_restr_bytes)(void);
+
+	int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out);
+
+	int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
+
+	int (*ipa_uc_dereg_rdyCB)(void);
+
+	int (*teth_bridge_init)(struct teth_bridge_init_params *params);
+
+	int (*teth_bridge_disconnect)(enum ipa_client_type client);
+
+	int (*teth_bridge_connect)(
+		struct teth_bridge_connect_params *connect_params);
+
+	void (*ipa_set_client)(
+		int index, enum ipacm_client_enum client, bool uplink);
+
+	enum ipacm_client_enum (*ipa_get_client)(int pipe_idx);
+
+	bool (*ipa_get_client_uplink)(int pipe_idx);
+
+	int (*ipa_dma_init)(void);
+
+	int (*ipa_dma_enable)(void);
+
+	int (*ipa_dma_disable)(void);
+
+	int (*ipa_dma_sync_memcpy)(u64 dest, u64 src, int len);
+
+	int (*ipa_dma_async_memcpy)(u64 dest, u64 src, int len,
+		void (*user_cb)(void *user1), void *user_param);
+
+	int (*ipa_dma_uc_memcpy)(phys_addr_t dest, phys_addr_t src, int len);
+
+	void (*ipa_dma_destroy)(void);
+
+	bool (*ipa_has_open_aggr_frame)(enum ipa_client_type client);
+
+	int (*ipa_generate_tag_process)(void);
+
+	int (*ipa_disable_sps_pipe)(enum ipa_client_type client);
+
+	void (*ipa_set_tag_process_before_gating)(bool val);
+
+	int (*ipa_mhi_init_engine)(struct ipa_mhi_init_engine *params);
+
+	int (*ipa_connect_mhi_pipe)(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl);
+
+	int (*ipa_disconnect_mhi_pipe)(u32 clnt_hdl);
+
+	bool (*ipa_mhi_stop_gsi_channel)(enum ipa_client_type client);
+
+	int (*ipa_qmi_disable_force_clear)(u32 request_id);
+
+	int (*ipa_qmi_enable_force_clear_datapath_send)(
+		struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+	int (*ipa_qmi_disable_force_clear_datapath_send)(
+		struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+	bool (*ipa_mhi_sps_channel_empty)(enum ipa_client_type client);
+
+	int (*ipa_mhi_reset_channel_internal)(enum ipa_client_type client);
+
+	int (*ipa_mhi_start_channel_internal)(enum ipa_client_type client);
+
+	void (*ipa_get_holb)(int ep_idx, struct ipa_ep_cfg_holb *holb);
+
+	int (*ipa_mhi_query_ch_info)(enum ipa_client_type client,
+			struct gsi_chan_info *ch_info);
+
+	int (*ipa_mhi_resume_channels_internal)(
+			enum ipa_client_type client,
+			bool LPTransitionRejected,
+			bool brstmode_enabled,
+			union __packed gsi_channel_scratch ch_scratch,
+			u8 index);
+
+	int  (*ipa_mhi_destroy_channel)(enum ipa_client_type client);
+
+	int (*ipa_uc_mhi_send_dl_ul_sync_info)
+		(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+
+	int (*ipa_uc_mhi_init)
+		(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+
+	void (*ipa_uc_mhi_cleanup)(void);
+
+	int (*ipa_uc_mhi_print_stats)(char *dbg_buff, int size);
+
+	int (*ipa_uc_mhi_reset_channel)(int channelHandle);
+
+	int (*ipa_uc_mhi_suspend_channel)(int channelHandle);
+
+	int (*ipa_uc_mhi_stop_event_update_channel)(int channelHandle);
+
+	int (*ipa_uc_state_check)(void);
+
+	int (*ipa_write_qmap_id)(struct ipa_ioc_write_qmapid *param_in);
+
+	int (*ipa_add_interrupt_handler)(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data);
+
+	int (*ipa_remove_interrupt_handler)(enum ipa_irq_type interrupt);
+
+	int (*ipa_restore_suspend_handler)(void);
+
+	void (*ipa_bam_reg_dump)(void);
+
+	int (*ipa_get_ep_mapping)(enum ipa_client_type client);
+
+	bool (*ipa_is_ready)(void);
+
+	void (*ipa_proxy_clk_vote)(void);
+
+	void (*ipa_proxy_clk_unvote)(void);
+
+	bool (*ipa_is_client_handle_valid)(u32 clnt_hdl);
+
+	enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx);
+
+	enum ipa_rm_resource_name (*ipa_get_rm_resource_from_ep)(int pipe_idx);
+
+	bool (*ipa_get_modem_cfg_emb_pipe_flt)(void);
+
+	enum ipa_transport_type (*ipa_get_transport_type)(void);
+
+	int (*ipa_ap_suspend)(struct device *dev);
+
+	int (*ipa_ap_resume)(struct device *dev);
+
+	int (*ipa_stop_gsi_channel)(u32 clnt_hdl);
+
+	struct iommu_domain *(*ipa_get_smmu_domain)(void);
+
+	int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size,
+						uint32_t agg_count);
+
+	struct device *(*ipa_get_dma_dev)(void);
+
+	int (*ipa_release_wdi_mapping)(u32 num_buffers,
+		struct ipa_wdi_buffer_info *info);
+
+	int (*ipa_create_wdi_mapping)(u32 num_buffers,
+		struct ipa_wdi_buffer_info *info);
+
+	struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)(int ipa_ep_idx);
+
+	int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data),
+		void *user_data);
+
+	void (*ipa_inc_client_enable_clks)(
+		struct ipa_active_client_logging_info *id);
+
+	void (*ipa_dec_client_disable_clks)(
+		struct ipa_active_client_logging_info *id);
+
+	int (*ipa_inc_client_enable_clks_no_block)(
+		struct ipa_active_client_logging_info *id);
+
+	int (*ipa_suspend_resource_no_block)(
+		enum ipa_rm_resource_name resource);
+
+	int (*ipa_resume_resource)(enum ipa_rm_resource_name name);
+
+	int (*ipa_suspend_resource_sync)(enum ipa_rm_resource_name resource);
+
+	int (*ipa_set_required_perf_profile)(
+		enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps);
+
+	void *(*ipa_get_ipc_logbuf)(void);
+
+	void *(*ipa_get_ipc_logbuf_low)(void);
+
+	int (*ipa_rx_poll)(u32 clnt_hdl, int budget);
+
+	void (*ipa_recycle_wan_skb)(struct sk_buff *skb);
+
+	int (*ipa_setup_uc_ntn_pipes)(struct ipa_ntn_conn_in_params *in,
+		ipa_notify_cb notify, void *priv, u8 hdr_len,
+		struct ipa_ntn_conn_out_params *);
+
+	int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul,
+		int ipa_ep_idx_dl);
+
+	struct device *(*ipa_get_pdev)(void);
+};
+
+#ifdef CONFIG_IPA
+int ipa_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match);
+#else
+static inline int ipa_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	return -ENODEV;
+}
+#endif /* (CONFIG_IPA) */
+
+#ifdef CONFIG_IPA3
+int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match);
+#else
+static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	return -ENODEV;
+}
+#endif /* (CONFIG_IPA3) */
+
+#endif /* _IPA_API_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_clients/Makefile b/drivers/platform/msm/ipa/ipa_clients/Makefile
new file mode 100644
index 0000000..61cef2d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
+obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
new file mode 100644
index 0000000..a02247d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -0,0 +1,2629 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/msm_gsi.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <linux/ipa_mhi.h>
+#include "../ipa_common_i.h"
+
+#define IPA_MHI_DRV_NAME "ipa_mhi_client"
+#define IPA_MHI_DBG(fmt, args...) \
+	pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+		 __func__, __LINE__, ## args)
+#define IPA_MHI_ERR(fmt, args...) \
+	pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPA_MHI_FUNC_ENTRY() \
+	IPA_MHI_DBG("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+	IPA_MHI_DBG("EXIT\n")
+
+#define IPA_MHI_RM_TIMEOUT_MSEC 10000
+#define IPA_MHI_CH_EMPTY_TIMEOUT_MSEC 10
+
+#define IPA_MHI_SUSPEND_SLEEP_MIN 900
+#define IPA_MHI_SUSPEND_SLEEP_MAX 1100
+
+#define IPA_MHI_MAX_UL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 1
+
+#if (IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) > \
+	(IPA_MHI_GSI_ER_END - IPA_MHI_GSI_ER_START)
+#error not enought event rings for MHI
+#endif
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_CLIENT_HOST_ADDR_COND(addr) \
+	((ipa_mhi_client_ctx->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
+
+enum ipa_mhi_rm_state {
+	IPA_MHI_RM_STATE_RELEASED,
+	IPA_MHI_RM_STATE_REQUESTED,
+	IPA_MHI_RM_STATE_GRANTED,
+	IPA_MHI_RM_STATE_MAX
+};
+
+enum ipa_mhi_state {
+	IPA_MHI_STATE_INITIALIZED,
+	IPA_MHI_STATE_READY,
+	IPA_MHI_STATE_STARTED,
+	IPA_MHI_STATE_SUSPEND_IN_PROGRESS,
+	IPA_MHI_STATE_SUSPENDED,
+	IPA_MHI_STATE_RESUME_IN_PROGRESS,
+	IPA_MHI_STATE_MAX
+};
+
+static char *ipa_mhi_state_str[] = {
+	__stringify(IPA_MHI_STATE_INITIALIZED),
+	__stringify(IPA_MHI_STATE_READY),
+	__stringify(IPA_MHI_STATE_STARTED),
+	__stringify(IPA_MHI_STATE_SUSPEND_IN_PROGRESS),
+	__stringify(IPA_MHI_STATE_SUSPENDED),
+	__stringify(IPA_MHI_STATE_RESUME_IN_PROGRESS),
+};
+
+#define MHI_STATE_STR(state) \
+	(((state) >= 0 && (state) < IPA_MHI_STATE_MAX) ? \
+		ipa_mhi_state_str[(state)] : \
+		"INVALID")
+
+enum ipa_mhi_dma_dir {
+	IPA_MHI_DMA_TO_HOST,
+	IPA_MHI_DMA_FROM_HOST,
+};
+
+/**
+ * struct ipa_mhi_channel_ctx - MHI Channel context
+ * @valid: entry is valid
+ * @id: MHI channel ID
+ * @hdl: channel handle for uC
+ * @client: IPA Client
+ * @state: Channel state
+ */
+struct ipa_mhi_channel_ctx {
+	bool valid;
+	u8 id;
+	u8 index;
+	enum ipa_client_type client;
+	enum ipa_hw_mhi_channel_states state;
+	bool stop_in_proc;
+	struct gsi_chan_info ch_info;
+	u64 channel_context_addr;
+	struct ipa_mhi_ch_ctx ch_ctx_host;
+	u64 event_context_addr;
+	struct ipa_mhi_ev_ctx ev_ctx_host;
+	bool brstmode_enabled;
+	union __packed gsi_channel_scratch ch_scratch;
+	unsigned long cached_gsi_evt_ring_hdl;
+};
+
+struct ipa_mhi_client_ctx {
+	enum ipa_mhi_state state;
+	spinlock_t state_lock;
+	mhi_client_cb cb_notify;
+	void *cb_priv;
+	struct completion rm_prod_granted_comp;
+	enum ipa_mhi_rm_state rm_cons_state;
+	struct completion rm_cons_comp;
+	bool trigger_wakeup;
+	bool wakeup_notified;
+	struct workqueue_struct *wq;
+	struct ipa_mhi_channel_ctx ul_channels[IPA_MHI_MAX_UL_CHANNELS];
+	struct ipa_mhi_channel_ctx dl_channels[IPA_MHI_MAX_DL_CHANNELS];
+	u32 total_channels;
+	struct ipa_mhi_msi_info msi;
+	u32 mmio_addr;
+	u32 first_ch_idx;
+	u32 first_er_idx;
+	u32 host_ctrl_addr;
+	u32 host_data_addr;
+	u64 channel_context_array_addr;
+	u64 event_context_array_addr;
+	u32 qmi_req_id;
+	u32 use_ipadma;
+	bool assert_bit40;
+	bool test_mode;
+};
+
+static struct ipa_mhi_client_ctx *ipa_mhi_client_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+#define IPA_MHI_MAX_MSG_LEN 512
+static char dbg_buff[IPA_MHI_MAX_MSG_LEN];
+static struct dentry *dent;
+
+static char *ipa_mhi_channel_state_str[] = {
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_DISABLE),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_ENABLE),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_RUN),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_SUSPEND),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_STOP),
+	__stringify(IPA_HW_MHI_CHANNEL_STATE_ERROR),
+};
+
+#define MHI_CH_STATE_STR(state) \
+	(((state) >= 0 && (state) <= IPA_HW_MHI_CHANNEL_STATE_ERROR) ? \
+	ipa_mhi_channel_state_str[(state)] : \
+	"INVALID")
+
+static int ipa_mhi_read_write_host(enum ipa_mhi_dma_dir dir, void *dev_addr,
+	u64 host_addr, int size)
+{
+	struct ipa_mem_buffer mem;
+	int res;
+	struct device *pdev;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (ipa_mhi_client_ctx->use_ipadma) {
+		pdev = ipa_get_dma_dev();
+		host_addr = IPA_MHI_CLIENT_HOST_ADDR_COND(host_addr);
+
+		mem.size = size;
+		mem.base = dma_alloc_coherent(pdev, mem.size,
+			&mem.phys_base, GFP_KERNEL);
+		if (!mem.base) {
+			IPA_MHI_ERR(
+				"dma_alloc_coherent failed, DMA buff size %d\n"
+					, mem.size);
+			return -ENOMEM;
+		}
+
+		if (dir == IPA_MHI_DMA_FROM_HOST) {
+			res = ipa_dma_sync_memcpy(mem.phys_base, host_addr,
+				size);
+			if (res) {
+				IPA_MHI_ERR(
+					"ipa_dma_sync_memcpy from host fail%d\n"
+					, res);
+				goto fail_memcopy;
+			}
+			memcpy(dev_addr, mem.base, size);
+		} else {
+			memcpy(mem.base, dev_addr, size);
+			res = ipa_dma_sync_memcpy(host_addr, mem.phys_base,
+				size);
+			if (res) {
+				IPA_MHI_ERR(
+					"ipa_dma_sync_memcpy to host fail %d\n"
+					, res);
+				goto fail_memcopy;
+			}
+		}
+		dma_free_coherent(pdev, mem.size, mem.base,
+			mem.phys_base);
+	} else {
+		void *host_ptr;
+
+		if (!ipa_mhi_client_ctx->test_mode)
+			host_ptr = ioremap(host_addr, size);
+		else
+			host_ptr = phys_to_virt(host_addr);
+		if (!host_ptr) {
+			IPA_MHI_ERR("ioremap failed for 0x%llx\n", host_addr);
+			return -EFAULT;
+		}
+		if (dir == IPA_MHI_DMA_FROM_HOST)
+			memcpy(dev_addr, host_ptr, size);
+		else
+			memcpy(host_ptr, dev_addr, size);
+		if (!ipa_mhi_client_ctx->test_mode)
+			iounmap(host_ptr);
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_memcopy:
+	dma_free_coherent(ipa_get_dma_dev(), mem.size, mem.base,
+			mem.phys_base);
+	return res;
+}
+
+static int ipa_mhi_print_channel_info(struct ipa_mhi_channel_ctx *channel,
+	char *buff, int len)
+{
+	int nbytes = 0;
+
+	if (channel->valid) {
+		nbytes += scnprintf(&buff[nbytes],
+			len - nbytes,
+			"channel idx=%d ch_id=%d client=%d state=%s\n",
+			channel->index, channel->id, channel->client,
+			MHI_CH_STATE_STR(channel->state));
+
+		nbytes += scnprintf(&buff[nbytes],
+			len - nbytes,
+			"	ch_ctx=%llx\n",
+			channel->channel_context_addr);
+
+		nbytes += scnprintf(&buff[nbytes],
+			len - nbytes,
+			"	gsi_evt_ring_hdl=%ld ev_ctx=%llx\n",
+			channel->cached_gsi_evt_ring_hdl,
+			channel->event_context_addr);
+	}
+	return nbytes;
+}
+
+static int ipa_mhi_print_host_channel_ctx_info(
+		struct ipa_mhi_channel_ctx *channel, char *buff, int len)
+{
+	int res, nbytes = 0;
+	struct ipa_mhi_ch_ctx ch_ctx_host;
+
+	memset(&ch_ctx_host, 0, sizeof(ch_ctx_host));
+
+	/* reading ch context from host */
+	res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+		&ch_ctx_host, channel->channel_context_addr,
+		sizeof(ch_ctx_host));
+	if (res) {
+		nbytes += scnprintf(&buff[nbytes], len - nbytes,
+			"Failed to read from host %d\n", res);
+		return nbytes;
+	}
+
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"ch_id: %d\n", channel->id);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"chstate: 0x%x\n", ch_ctx_host.chstate);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"brstmode: 0x%x\n", ch_ctx_host.brstmode);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"chtype: 0x%x\n", ch_ctx_host.chtype);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"erindex: 0x%x\n", ch_ctx_host.erindex);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"rbase: 0x%llx\n", ch_ctx_host.rbase);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"rlen: 0x%llx\n", ch_ctx_host.rlen);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"rp: 0x%llx\n", ch_ctx_host.rp);
+	nbytes += scnprintf(&buff[nbytes], len - nbytes,
+		"wp: 0x%llx\n", ch_ctx_host.wp);
+
+	return nbytes;
+}
+
+static ssize_t ipa_mhi_debugfs_stats(struct file *file,
+	char __user *ubuf,
+	size_t count,
+	loff_t *ppos)
+{
+	int nbytes = 0;
+	int i;
+	struct ipa_mhi_channel_ctx *channel;
+
+	nbytes += scnprintf(&dbg_buff[nbytes],
+		IPA_MHI_MAX_MSG_LEN - nbytes,
+		"IPA MHI state: %s\n",
+		MHI_STATE_STR(ipa_mhi_client_ctx->state));
+
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->ul_channels[i];
+		nbytes += ipa_mhi_print_channel_info(channel,
+			&dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+	}
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->dl_channels[i];
+		nbytes += ipa_mhi_print_channel_info(channel,
+			&dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_mhi_debugfs_uc_stats(struct file *file,
+	char __user *ubuf,
+	size_t count,
+	loff_t *ppos)
+{
+	int nbytes = 0;
+
+	nbytes += ipa_uc_mhi_print_stats(dbg_buff, IPA_MHI_MAX_MSG_LEN);
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_mhi_debugfs_dump_host_ch_ctx_arr(struct file *file,
+	char __user *ubuf,
+	size_t count,
+	loff_t *ppos)
+{
+	int i, nbytes = 0;
+	struct ipa_mhi_channel_ctx *channel;
+
+	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_INITIALIZED ||
+	    ipa_mhi_client_ctx->state == IPA_MHI_STATE_READY) {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+		IPA_MHI_MAX_MSG_LEN - nbytes,
+			"Cannot dump host channel context ");
+		nbytes += scnprintf(&dbg_buff[nbytes],
+				IPA_MHI_MAX_MSG_LEN - nbytes,
+				"before IPA MHI was STARTED\n");
+		return simple_read_from_buffer(ubuf, count, ppos,
+			dbg_buff, nbytes);
+	}
+	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			"IPA MHI is suspended, cannot dump channel ctx array");
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			" from host -PCIe can be in D3 state\n");
+		return simple_read_from_buffer(ubuf, count, ppos,
+			dbg_buff, nbytes);
+	}
+
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			"channel contex array - dump from host\n");
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			"***** UL channels *******\n");
+
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->ul_channels[i];
+		if (!channel->valid)
+			continue;
+		nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
+			&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes);
+	}
+
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			IPA_MHI_MAX_MSG_LEN - nbytes,
+			"\n***** DL channels *******\n");
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->dl_channels[i];
+		if (!channel->valid)
+			continue;
+		nbytes += ipa_mhi_print_host_channel_ctx_info(channel,
+			&dbg_buff[nbytes], IPA_MHI_MAX_MSG_LEN - nbytes);
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+const struct file_operations ipa_mhi_stats_ops = {
+	.read = ipa_mhi_debugfs_stats,
+};
+
+const struct file_operations ipa_mhi_uc_stats_ops = {
+	.read = ipa_mhi_debugfs_uc_stats,
+};
+
+const struct file_operations ipa_mhi_dump_host_ch_ctx_ops = {
+	.read = ipa_mhi_debugfs_dump_host_ch_ctx_arr,
+};
+
+
+static void ipa_mhi_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+		S_IWUSR | S_IWGRP;
+	struct dentry *file;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	dent = debugfs_create_dir("ipa_mhi", 0);
+	if (IS_ERR(dent)) {
+		IPA_MHI_ERR("fail to create folder ipa_mhi\n");
+		return;
+	}
+
+	file = debugfs_create_file("stats", read_only_mode, dent,
+		0, &ipa_mhi_stats_ops);
+	if (!file || IS_ERR(file)) {
+		IPA_MHI_ERR("fail to create file stats\n");
+		goto fail;
+	}
+
+	file = debugfs_create_file("uc_stats", read_only_mode, dent,
+		0, &ipa_mhi_uc_stats_ops);
+	if (!file || IS_ERR(file)) {
+		IPA_MHI_ERR("fail to create file uc_stats\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("use_ipadma", read_write_mode, dent,
+		&ipa_mhi_client_ctx->use_ipadma);
+	if (!file || IS_ERR(file)) {
+		IPA_MHI_ERR("fail to create file use_ipadma\n");
+		goto fail;
+	}
+
+	file = debugfs_create_file("dump_host_channel_ctx_array",
+		read_only_mode, dent, 0, &ipa_mhi_dump_host_ch_ctx_ops);
+	if (!file || IS_ERR(file)) {
+		IPA_MHI_ERR("fail to create file dump_host_channel_ctx_arr\n");
+		goto fail;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+#else
+static void ipa_mhi_debugfs_init(void) {}
+static void ipa_mhi_debugfs_destroy(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+static union IpaHwMhiDlUlSyncCmdData_t ipa_cached_dl_ul_sync_info;
+
+static void ipa_mhi_wq_notify_wakeup(struct work_struct *work);
+static DECLARE_WORK(ipa_mhi_notify_wakeup_work, ipa_mhi_wq_notify_wakeup);
+
+static void ipa_mhi_wq_notify_ready(struct work_struct *work);
+static DECLARE_WORK(ipa_mhi_notify_ready_work, ipa_mhi_wq_notify_ready);
+
+/**
+ * ipa_mhi_notify_wakeup() - Schedule work to notify data available
+ *
+ * This function will schedule a work to notify data available event.
+ * In case this function is called more than once, only one notification will
+ * be sent to MHI client driver. No further notifications will be sent until
+ * IPA MHI state will become STARTED.
+ */
+static void ipa_mhi_notify_wakeup(void)
+{
+	IPA_MHI_FUNC_ENTRY();
+	if (ipa_mhi_client_ctx->wakeup_notified) {
+		IPA_MHI_DBG("wakeup already called\n");
+		return;
+	}
+	queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_wakeup_work);
+	ipa_mhi_client_ctx->wakeup_notified = true;
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_rm_cons_request() - callback function for IPA RM request resource
+ *
+ * In case IPA MHI is not suspended, MHI CONS will be granted immediately.
+ * In case IPA MHI is suspended, MHI CONS will be granted after resume.
+ */
+static int ipa_mhi_rm_cons_request(void)
+{
+	unsigned long flags;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	IPA_MHI_DBG("%s\n", MHI_STATE_STR(ipa_mhi_client_ctx->state));
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_REQUESTED;
+	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_STARTED) {
+		ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
+		res = 0;
+	} else if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED) {
+		ipa_mhi_notify_wakeup();
+		res = -EINPROGRESS;
+	} else if (ipa_mhi_client_ctx->state ==
+			IPA_MHI_STATE_SUSPEND_IN_PROGRESS) {
+		/* wakeup event will be trigger after suspend finishes */
+		ipa_mhi_client_ctx->trigger_wakeup = true;
+		res = -EINPROGRESS;
+	} else {
+		res = -EINPROGRESS;
+	}
+
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+	IPA_MHI_DBG("EXIT with %d\n", res);
+	return res;
+}
+
+static int ipa_mhi_rm_cons_release(void)
+{
+	unsigned long flags;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
+	complete_all(&ipa_mhi_client_ctx->rm_cons_comp);
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static void ipa_mhi_rm_prod_notify(void *user_data, enum ipa_rm_event event,
+	unsigned long data)
+{
+	IPA_MHI_FUNC_ENTRY();
+
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		IPA_MHI_DBG("IPA_RM_RESOURCE_GRANTED\n");
+		complete_all(&ipa_mhi_client_ctx->rm_prod_granted_comp);
+		break;
+
+	case IPA_RM_RESOURCE_RELEASED:
+		IPA_MHI_DBG("IPA_RM_RESOURCE_RELEASED\n");
+		break;
+
+	default:
+		IPA_MHI_ERR("unexpected event %d\n", event);
+		WARN_ON(1);
+		break;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_wq_notify_wakeup() - Notify MHI client on data available
+ *
+ * This function is called from IPA MHI workqueue to notify
+ * MHI client driver on data available event.
+ */
+static void ipa_mhi_wq_notify_wakeup(struct work_struct *work)
+{
+	IPA_MHI_FUNC_ENTRY();
+	ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
+		IPA_MHI_EVENT_DATA_AVAILABLE, 0);
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_wq_notify_ready() - Notify MHI client on ready
+ *
+ * This function is called from IPA MHI workqueue to notify
+ * MHI client driver on ready event when IPA uC is loaded
+ */
+static void ipa_mhi_wq_notify_ready(struct work_struct *work)
+{
+	IPA_MHI_FUNC_ENTRY();
+	ipa_mhi_client_ctx->cb_notify(ipa_mhi_client_ctx->cb_priv,
+		IPA_MHI_EVENT_READY, 0);
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_notify_ready() - Schedule work to notify ready
+ *
+ * This function will schedule a work to notify ready event.
+ */
+static void ipa_mhi_notify_ready(void)
+{
+	IPA_MHI_FUNC_ENTRY();
+	queue_work(ipa_mhi_client_ctx->wq, &ipa_mhi_notify_ready_work);
+	IPA_MHI_FUNC_EXIT();
+}
+
+/**
+ * ipa_mhi_set_state() - Set new state to IPA MHI
+ * @state: new state
+ *
+ * Sets a new state to IPA MHI if possible according to IPA MHI state machine.
+ * In some state transitions a wakeup request will be triggered.
+ *
+ * Returns: 0 on success, -1 otherwise
+ */
+static int ipa_mhi_set_state(enum ipa_mhi_state new_state)
+{
+	unsigned long flags;
+	int res = -EPERM;
+
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	IPA_MHI_DBG("Current state: %s\n",
+			MHI_STATE_STR(ipa_mhi_client_ctx->state));
+
+	switch (ipa_mhi_client_ctx->state) {
+	case IPA_MHI_STATE_INITIALIZED:
+		if (new_state == IPA_MHI_STATE_READY) {
+			ipa_mhi_notify_ready();
+			res = 0;
+		}
+		break;
+
+	case IPA_MHI_STATE_READY:
+		if (new_state == IPA_MHI_STATE_READY)
+			res = 0;
+		if (new_state == IPA_MHI_STATE_STARTED)
+			res = 0;
+		break;
+
+	case IPA_MHI_STATE_STARTED:
+		if (new_state == IPA_MHI_STATE_INITIALIZED)
+			res = 0;
+		else if (new_state == IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
+			res = 0;
+		break;
+
+	case IPA_MHI_STATE_SUSPEND_IN_PROGRESS:
+		if (new_state == IPA_MHI_STATE_SUSPENDED) {
+			if (ipa_mhi_client_ctx->trigger_wakeup) {
+				ipa_mhi_client_ctx->trigger_wakeup = false;
+				ipa_mhi_notify_wakeup();
+			}
+			res = 0;
+		} else if (new_state == IPA_MHI_STATE_STARTED) {
+			ipa_mhi_client_ctx->wakeup_notified = false;
+			ipa_mhi_client_ctx->trigger_wakeup = false;
+			if (ipa_mhi_client_ctx->rm_cons_state ==
+				IPA_MHI_RM_STATE_REQUESTED) {
+				ipa_rm_notify_completion(
+					IPA_RM_RESOURCE_GRANTED,
+					IPA_RM_RESOURCE_MHI_CONS);
+				ipa_mhi_client_ctx->rm_cons_state =
+					IPA_MHI_RM_STATE_GRANTED;
+			}
+			res = 0;
+		}
+		break;
+
+	case IPA_MHI_STATE_SUSPENDED:
+		if (new_state == IPA_MHI_STATE_RESUME_IN_PROGRESS)
+			res = 0;
+		break;
+
+	case IPA_MHI_STATE_RESUME_IN_PROGRESS:
+		if (new_state == IPA_MHI_STATE_SUSPENDED) {
+			if (ipa_mhi_client_ctx->trigger_wakeup) {
+				ipa_mhi_client_ctx->trigger_wakeup = false;
+				ipa_mhi_notify_wakeup();
+			}
+			res = 0;
+		} else if (new_state == IPA_MHI_STATE_STARTED) {
+			ipa_mhi_client_ctx->trigger_wakeup = false;
+			ipa_mhi_client_ctx->wakeup_notified = false;
+			if (ipa_mhi_client_ctx->rm_cons_state ==
+				IPA_MHI_RM_STATE_REQUESTED) {
+				ipa_rm_notify_completion(
+					IPA_RM_RESOURCE_GRANTED,
+					IPA_RM_RESOURCE_MHI_CONS);
+				ipa_mhi_client_ctx->rm_cons_state =
+					IPA_MHI_RM_STATE_GRANTED;
+			}
+			res = 0;
+		}
+		break;
+
+	default:
+		IPA_MHI_ERR("Invalid state %d\n", ipa_mhi_client_ctx->state);
+		WARN_ON(1);
+	}
+
+	if (res)
+		IPA_MHI_ERR("Invalid state change to %s\n",
+						MHI_STATE_STR(new_state));
+	else {
+		IPA_MHI_DBG("New state change to %s\n",
+						MHI_STATE_STR(new_state));
+		ipa_mhi_client_ctx->state = new_state;
+	}
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+	return res;
+}
+
+static void ipa_mhi_uc_ready_cb(void)
+{
+	IPA_MHI_FUNC_ENTRY();
+	ipa_mhi_set_state(IPA_MHI_STATE_READY);
+	IPA_MHI_FUNC_EXIT();
+}
+
+static void ipa_mhi_uc_wakeup_request_cb(void)
+{
+	unsigned long flags;
+
+	IPA_MHI_FUNC_ENTRY();
+	IPA_MHI_DBG("MHI state: %s\n",
+			MHI_STATE_STR(ipa_mhi_client_ctx->state));
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	if (ipa_mhi_client_ctx->state == IPA_MHI_STATE_SUSPENDED)
+		ipa_mhi_notify_wakeup();
+	else if (ipa_mhi_client_ctx->state ==
+			IPA_MHI_STATE_SUSPEND_IN_PROGRESS)
+		/* wakeup event will be triggered after suspend finishes */
+		ipa_mhi_client_ctx->trigger_wakeup = true;
+
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+	IPA_MHI_FUNC_EXIT();
+}
+
+static int ipa_mhi_request_prod(void)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	reinit_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
+	IPA_MHI_DBG("requesting mhi prod\n");
+	res = ipa_rm_request_resource(IPA_RM_RESOURCE_MHI_PROD);
+	if (res) {
+		if (res != -EINPROGRESS) {
+			IPA_MHI_ERR("failed to request mhi prod %d\n", res);
+			return res;
+		}
+		res = wait_for_completion_timeout(
+			&ipa_mhi_client_ctx->rm_prod_granted_comp,
+			msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
+		if (res == 0) {
+			IPA_MHI_ERR("timeout request mhi prod\n");
+			return -ETIME;
+		}
+	}
+
+	IPA_MHI_DBG("mhi prod granted\n");
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+}
+
+static int ipa_mhi_release_prod(void)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa_rm_release_resource(IPA_RM_RESOURCE_MHI_PROD);
+
+	IPA_MHI_FUNC_EXIT();
+	return res;
+
+}
+
+/**
+ * ipa_mhi_start() - Start IPA MHI engine
+ * @params: pcie addresses for MHI
+ *
+ * This function is called by MHI client driver on MHI engine start for
+ * handling MHI accelerated channels. This function is called after
+ * ipa_mhi_init() was called and can be called after MHI reset to restart MHI
+ * engine. When this function returns device can move to M0 state.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_start(struct ipa_mhi_start_params *params)
+{
+	int res;
+	struct ipa_mhi_init_engine init_params;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!params) {
+		IPA_MHI_ERR("null args\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_mhi_client_ctx) {
+		IPA_MHI_ERR("not initialized\n");
+		return -EPERM;
+	}
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state %d\n", res);
+		return res;
+	}
+
+	ipa_mhi_client_ctx->host_ctrl_addr = params->host_ctrl_addr;
+	ipa_mhi_client_ctx->host_data_addr = params->host_data_addr;
+	ipa_mhi_client_ctx->channel_context_array_addr =
+		params->channel_context_array_addr;
+	ipa_mhi_client_ctx->event_context_array_addr =
+		params->event_context_array_addr;
+	IPA_MHI_DBG("host_ctrl_addr 0x%x\n",
+			ipa_mhi_client_ctx->host_ctrl_addr);
+	IPA_MHI_DBG("host_data_addr 0x%x\n",
+			ipa_mhi_client_ctx->host_data_addr);
+	IPA_MHI_DBG("channel_context_array_addr 0x%llx\n",
+		ipa_mhi_client_ctx->channel_context_array_addr);
+	IPA_MHI_DBG("event_context_array_addr 0x%llx\n",
+		ipa_mhi_client_ctx->event_context_array_addr);
+
+	/* Add MHI <-> Q6 dependencies to IPA RM */
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_MHI_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (res && res != -EINPROGRESS) {
+		IPA_MHI_ERR("failed to add dependency %d\n", res);
+		goto fail_add_mhi_q6_dep;
+	}
+
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+		IPA_RM_RESOURCE_MHI_CONS);
+	if (res && res != -EINPROGRESS) {
+		IPA_MHI_ERR("failed to add dependency %d\n", res);
+		goto fail_add_q6_mhi_dep;
+	}
+
+	res = ipa_mhi_request_prod();
+	if (res) {
+		IPA_MHI_ERR("failed request prod %d\n", res);
+		goto fail_request_prod;
+	}
+
+	/* gsi params */
+	init_params.gsi.first_ch_idx =
+			ipa_mhi_client_ctx->first_ch_idx;
+	/* uC params */
+	init_params.uC.first_ch_idx =
+			ipa_mhi_client_ctx->first_ch_idx;
+	init_params.uC.first_er_idx =
+			ipa_mhi_client_ctx->first_er_idx;
+	init_params.uC.host_ctrl_addr = params->host_ctrl_addr;
+	init_params.uC.host_data_addr = params->host_data_addr;
+	init_params.uC.mmio_addr = ipa_mhi_client_ctx->mmio_addr;
+	init_params.uC.msi = &ipa_mhi_client_ctx->msi;
+	init_params.uC.ipa_cached_dl_ul_sync_info =
+			&ipa_cached_dl_ul_sync_info;
+
+	res = ipa_mhi_init_engine(&init_params);
+	if (res) {
+		IPA_MHI_ERR("IPA core failed to start MHI %d\n", res);
+		goto fail_init_engine;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_init_engine:
+	ipa_mhi_release_prod();
+fail_request_prod:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+		IPA_RM_RESOURCE_MHI_CONS);
+fail_add_q6_mhi_dep:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+fail_add_mhi_q6_dep:
+	ipa_mhi_set_state(IPA_MHI_STATE_INITIALIZED);
+	return res;
+}
+
+/**
+ * ipa_mhi_get_channel_context() - Get corresponding channel context
+ * @ep: IPA ep
+ * @channel_id: Channel ID
+ *
+ * This function will return the corresponding channel context or allocate new
+ * one in case channel context for channel does not exist.
+ */
+static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context(
+	enum ipa_client_type client, u8 channel_id)
+{
+	int ch_idx;
+	struct ipa_mhi_channel_ctx *channels;
+	int max_channels;
+
+	if (IPA_CLIENT_IS_PROD(client)) {
+		channels = ipa_mhi_client_ctx->ul_channels;
+		max_channels = IPA_MHI_MAX_UL_CHANNELS;
+	} else {
+		channels = ipa_mhi_client_ctx->dl_channels;
+		max_channels = IPA_MHI_MAX_DL_CHANNELS;
+	}
+
+	/* find the channel context according to channel id */
+	for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
+		if (channels[ch_idx].valid &&
+		    channels[ch_idx].id == channel_id)
+			return &channels[ch_idx];
+	}
+
+	/* channel context does not exists, allocate a new one */
+	for (ch_idx = 0; ch_idx < max_channels; ch_idx++) {
+		if (!channels[ch_idx].valid)
+			break;
+	}
+
+	if (ch_idx == max_channels) {
+		IPA_MHI_ERR("no more channels available\n");
+		return NULL;
+	}
+
+	channels[ch_idx].valid = true;
+	channels[ch_idx].id = channel_id;
+	channels[ch_idx].index = ipa_mhi_client_ctx->total_channels++;
+	channels[ch_idx].client = client;
+	channels[ch_idx].state = IPA_HW_MHI_CHANNEL_STATE_INVALID;
+
+	return &channels[ch_idx];
+}
+
+/**
+ * ipa_mhi_get_channel_context_by_clnt_hdl() - Get corresponding channel
+ * context
+ * @clnt_hdl: client handle as provided in ipa_mhi_connect_pipe()
+ *
+ * This function will return the corresponding channel context or NULL in case
+ * that channel does not exist.
+ */
+static struct ipa_mhi_channel_ctx *ipa_mhi_get_channel_context_by_clnt_hdl(
+	u32 clnt_hdl)
+{
+	int ch_idx;
+
+	for (ch_idx = 0; ch_idx < IPA_MHI_MAX_UL_CHANNELS; ch_idx++) {
+		if (ipa_mhi_client_ctx->ul_channels[ch_idx].valid &&
+		ipa_get_ep_mapping(
+			ipa_mhi_client_ctx->ul_channels[ch_idx].client)
+				== clnt_hdl)
+			return &ipa_mhi_client_ctx->ul_channels[ch_idx];
+	}
+
+	for (ch_idx = 0; ch_idx < IPA_MHI_MAX_DL_CHANNELS; ch_idx++) {
+		if (ipa_mhi_client_ctx->dl_channels[ch_idx].valid &&
+		ipa_get_ep_mapping(
+			ipa_mhi_client_ctx->dl_channels[ch_idx].client)
+				== clnt_hdl)
+			return &ipa_mhi_client_ctx->dl_channels[ch_idx];
+	}
+
+	return NULL;
+}
+
+static void ipa_mhi_dump_ch_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+	IPA_MHI_DBG("ch_id %d\n", channel->id);
+	IPA_MHI_DBG("chstate 0x%x\n", channel->ch_ctx_host.chstate);
+	IPA_MHI_DBG("brstmode 0x%x\n", channel->ch_ctx_host.brstmode);
+	IPA_MHI_DBG("pollcfg 0x%x\n", channel->ch_ctx_host.pollcfg);
+	IPA_MHI_DBG("chtype 0x%x\n", channel->ch_ctx_host.chtype);
+	IPA_MHI_DBG("erindex 0x%x\n", channel->ch_ctx_host.erindex);
+	IPA_MHI_DBG("rbase 0x%llx\n", channel->ch_ctx_host.rbase);
+	IPA_MHI_DBG("rlen 0x%llx\n", channel->ch_ctx_host.rlen);
+	IPA_MHI_DBG("rp 0x%llx\n", channel->ch_ctx_host.rp);
+	IPA_MHI_DBG("wp 0x%llx\n", channel->ch_ctx_host.wp);
+}
+
+static void ipa_mhi_dump_ev_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+	IPA_MHI_DBG("ch_id %d event id %d\n", channel->id,
+		channel->ch_ctx_host.erindex);
+
+	IPA_MHI_DBG("intmodc 0x%x\n", channel->ev_ctx_host.intmodc);
+	IPA_MHI_DBG("intmodt 0x%x\n", channel->ev_ctx_host.intmodt);
+	IPA_MHI_DBG("ertype 0x%x\n", channel->ev_ctx_host.ertype);
+	IPA_MHI_DBG("msivec 0x%x\n", channel->ev_ctx_host.msivec);
+	IPA_MHI_DBG("rbase 0x%llx\n", channel->ev_ctx_host.rbase);
+	IPA_MHI_DBG("rlen 0x%llx\n", channel->ev_ctx_host.rlen);
+	IPA_MHI_DBG("rp 0x%llx\n", channel->ev_ctx_host.rp);
+	IPA_MHI_DBG("wp 0x%llx\n", channel->ev_ctx_host.wp);
+}
+
+static int ipa_mhi_read_ch_ctx(struct ipa_mhi_channel_ctx *channel)
+{
+	int res;
+
+	res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+		&channel->ch_ctx_host, channel->channel_context_addr,
+		sizeof(channel->ch_ctx_host));
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+		return res;
+
+	}
+	ipa_mhi_dump_ch_ctx(channel);
+
+	channel->event_context_addr =
+		ipa_mhi_client_ctx->event_context_array_addr +
+		channel->ch_ctx_host.erindex * sizeof(struct ipa_mhi_ev_ctx);
+	IPA_MHI_DBG("ch %d event_context_addr 0x%llx\n", channel->id,
+		channel->event_context_addr);
+
+	res = ipa_mhi_read_write_host(IPA_MHI_DMA_FROM_HOST,
+		&channel->ev_ctx_host, channel->event_context_addr,
+		sizeof(channel->ev_ctx_host));
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+		return res;
+
+	}
+	ipa_mhi_dump_ev_ctx(channel);
+
+	return 0;
+}
+
+static void ipa_mhi_gsi_ev_err_cb(struct gsi_evt_err_notify *notify)
+{
+	struct ipa_mhi_channel_ctx *channel = notify->user_data;
+
+	IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
+		channel->id, channel->client, channel->state);
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPA_MHI_ERR("Received GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPA_MHI_ERR("Received GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPA_MHI_ERR("Received GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPA_MHI_ERR("Received GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
+}
+
+static void ipa_mhi_gsi_ch_err_cb(struct gsi_chan_err_notify *notify)
+{
+	struct ipa_mhi_channel_ctx *channel = notify->chan_user_data;
+
+	IPA_MHI_ERR("channel id=%d client=%d state=%d\n",
+		channel->id, channel->client, channel->state);
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPA_MHI_ERR("Received GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPA_MHI_ERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+	IPA_MHI_ERR("err_desc=0x%x\n", notify->err_desc);
+}
+
+
+static bool ipa_mhi_gsi_channel_empty(struct ipa_mhi_channel_ctx *channel)
+{
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!channel->stop_in_proc) {
+		IPA_MHI_DBG("Channel is not in STOP_IN_PROC\n");
+		return true;
+	}
+
+	if (ipa_mhi_stop_gsi_channel(channel->client) == true) {
+		channel->stop_in_proc = false;
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * ipa_mhi_wait_for_ul_empty_timeout() - wait for pending packets in uplink
+ * @msecs: timeout to wait
+ *
+ * This function will poll until there are no packets pending in uplink channels
+ * or timeout occurred.
+ *
+ * Return code: true - no pending packets in uplink channels
+ *		false - timeout occurred
+ */
+static bool ipa_mhi_wait_for_ul_empty_timeout(unsigned int msecs)
+{
+	unsigned long jiffies_timeout = msecs_to_jiffies(msecs);
+	unsigned long jiffies_start = jiffies;
+	bool empty = false;
+	int i;
+
+	IPA_MHI_FUNC_ENTRY();
+	while (!empty) {
+		empty = true;
+		for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+			if (!ipa_mhi_client_ctx->ul_channels[i].valid)
+				continue;
+			if (ipa_get_transport_type() ==
+			    IPA_TRANSPORT_TYPE_GSI)
+				empty &= ipa_mhi_gsi_channel_empty(
+					&ipa_mhi_client_ctx->ul_channels[i]);
+			else
+				empty &= ipa_mhi_sps_channel_empty(
+				ipa_mhi_client_ctx->ul_channels[i].client);
+		}
+
+		if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
+			IPA_MHI_DBG("finished waiting for UL empty\n");
+			break;
+		}
+
+		if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI &&
+		    IPA_MHI_MAX_UL_CHANNELS == 1)
+			usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+			IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+	}
+
+	IPA_MHI_DBG("IPA UL is %s\n", (empty) ? "empty" : "not empty");
+
+	IPA_MHI_FUNC_EXIT();
+	return empty;
+}
+
+static int ipa_mhi_enable_force_clear(u32 request_id, bool throttle_source)
+{
+	struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+	int i;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	req.source_pipe_bitmask = 0;
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		if (!ipa_mhi_client_ctx->ul_channels[i].valid)
+			continue;
+		req.source_pipe_bitmask |= 1 << ipa_get_ep_mapping(
+				ipa_mhi_client_ctx->ul_channels[i].client);
+	}
+	if (throttle_source) {
+		req.throttle_source_valid = 1;
+		req.throttle_source = 1;
+	}
+	IPA_MHI_DBG("req_id=0x%x src_pipe_btmk=0x%x throt_src=%d\n",
+		req.request_id, req.source_pipe_bitmask,
+		req.throttle_source);
+	res = ipa_qmi_enable_force_clear_datapath_send(&req);
+	if (res) {
+		IPA_MHI_ERR(
+			"ipa_qmi_enable_force_clear_datapath_send failed %d\n"
+				, res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_mhi_disable_force_clear(u32 request_id)
+{
+	struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	IPA_MHI_DBG("req_id=0x%x\n", req.request_id);
+	res = ipa_qmi_disable_force_clear_datapath_send(&req);
+	if (res) {
+		IPA_MHI_ERR(
+			"ipa_qmi_disable_force_clear_datapath_send failed %d\n"
+				, res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static void ipa_mhi_set_holb_on_dl_channels(bool enable,
+	struct ipa_ep_cfg_holb old_holb[])
+{
+	int i;
+	struct ipa_ep_cfg_holb ep_holb;
+	int ep_idx;
+	int res;
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		if (!ipa_mhi_client_ctx->dl_channels[i].valid)
+			continue;
+		if (ipa_mhi_client_ctx->dl_channels[i].state ==
+			IPA_HW_MHI_CHANNEL_STATE_INVALID)
+			continue;
+		ep_idx = ipa_get_ep_mapping(
+			ipa_mhi_client_ctx->dl_channels[i].client);
+		if (-1 == ep_idx) {
+			IPA_MHI_ERR("Client %u is not mapped\n",
+				ipa_mhi_client_ctx->dl_channels[i].client);
+			ipa_assert();
+			return;
+		}
+		memset(&ep_holb, 0, sizeof(ep_holb));
+		if (enable) {
+			ipa_get_holb(ep_idx, &old_holb[i]);
+			ep_holb.en = 1;
+			ep_holb.tmr_val = 0;
+		} else {
+			ep_holb = old_holb[i];
+		}
+		res = ipa_cfg_ep_holb(ep_idx, &ep_holb);
+		if (res) {
+			IPA_MHI_ERR("ipa_cfg_ep_holb failed %d\n", res);
+			ipa_assert();
+			return;
+		}
+	}
+}
+
+static int ipa_mhi_suspend_gsi_channel(struct ipa_mhi_channel_ctx *channel)
+{
+	int clnt_hdl;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	clnt_hdl = ipa_get_ep_mapping(channel->client);
+	if (clnt_hdl < 0)
+		return -EFAULT;
+
+	res = ipa_stop_gsi_channel(clnt_hdl);
+	if (res != 0 && res != -GSI_STATUS_AGAIN &&
+	    res != -GSI_STATUS_TIMED_OUT) {
+		IPA_MHI_ERR("GSI stop channel failed %d\n", res);
+		return -EFAULT;
+	}
+
+	/* check if channel was stopped completely */
+	if (res)
+		channel->stop_in_proc = true;
+
+	IPA_MHI_DBG("GSI channel is %s\n", (channel->stop_in_proc) ?
+		"STOP_IN_PROC" : "STOP");
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_mhi_reset_ul_channel(struct ipa_mhi_channel_ctx *channel)
+{
+	int res;
+	bool empty;
+	struct ipa_ep_cfg_holb old_ep_holb[IPA_MHI_MAX_DL_CHANNELS];
+
+	IPA_MHI_FUNC_ENTRY();
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		res = ipa_mhi_suspend_gsi_channel(channel);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n",
+				 res);
+			return res;
+		}
+	} else {
+		res = ipa_uc_mhi_reset_channel(channel->index);
+		if (res) {
+			IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
+				res);
+			return res;
+		}
+	}
+
+	empty = ipa_mhi_wait_for_ul_empty_timeout(
+			IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+	if (!empty) {
+		IPA_MHI_DBG("%s not empty\n",
+			(ipa_get_transport_type() ==
+				IPA_TRANSPORT_TYPE_GSI) ? "GSI" : "BAM");
+		res = ipa_mhi_enable_force_clear(
+				ipa_mhi_client_ctx->qmi_req_id, false);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_enable_force_clear failed %d\n",
+				res);
+			ipa_assert();
+			return res;
+		}
+
+		if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+			empty = ipa_mhi_wait_for_ul_empty_timeout(
+				IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+
+			IPA_MHI_DBG("empty=%d\n", empty);
+		} else {
+			/* enable packet drop on all DL channels */
+			ipa_mhi_set_holb_on_dl_channels(true, old_ep_holb);
+			ipa_generate_tag_process();
+			/* disable packet drop on all DL channels */
+			ipa_mhi_set_holb_on_dl_channels(false, old_ep_holb);
+
+			res = ipa_disable_sps_pipe(channel->client);
+			if (res) {
+				IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
+				ipa_assert();
+				return res;
+			}
+		}
+
+		res =
+		ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_disable_force_clear failed %d\n",
+				res);
+			ipa_assert();
+			return res;
+		}
+		ipa_mhi_client_ctx->qmi_req_id++;
+	}
+
+	res = ipa_mhi_reset_channel_internal(channel->client);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_reset_ul_channel_internal failed %d\n"
+				, res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+static int ipa_mhi_reset_dl_channel(struct ipa_mhi_channel_ctx *channel)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		res = ipa_mhi_suspend_gsi_channel(channel);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_suspend_gsi_channel failed %d\n"
+					, res);
+			return res;
+		}
+
+		res = ipa_mhi_reset_channel_internal(channel->client);
+		if (res) {
+			IPA_MHI_ERR(
+				"ipa_mhi_reset_ul_channel_internal failed %d\n"
+				, res);
+			return res;
+		}
+	} else {
+		res = ipa_mhi_reset_channel_internal(channel->client);
+		if (res) {
+			IPA_MHI_ERR(
+				"ipa_mhi_reset_ul_channel_internal failed %d\n"
+				, res);
+			return res;
+		}
+
+		res = ipa_uc_mhi_reset_channel(channel->index);
+		if (res) {
+			IPA_MHI_ERR("ipa_uc_mhi_reset_channel failed %d\n",
+				res);
+			ipa_mhi_start_channel_internal(channel->client);
+			return res;
+		}
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_mhi_reset_channel(struct ipa_mhi_channel_ctx *channel)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	if (IPA_CLIENT_IS_PROD(channel->client))
+		res = ipa_mhi_reset_ul_channel(channel);
+	else
+		res = ipa_mhi_reset_dl_channel(channel);
+	if (res) {
+		IPA_MHI_ERR("failed to reset channel error %d\n", res);
+		return res;
+	}
+
+	channel->state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+			&channel->state, channel->channel_context_addr +
+				offsetof(struct ipa_mhi_ch_ctx, chstate),
+				sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_write_host failed %d\n", res);
+			return res;
+		}
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+/**
+ * ipa_mhi_connect_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl)
+{
+	int res;
+	unsigned long flags;
+	struct ipa_mhi_channel_ctx *channel = NULL;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!in || !clnt_hdl) {
+		IPA_MHI_ERR("NULL args\n");
+		return -EINVAL;
+	}
+
+	if (in->sys.client >= IPA_CLIENT_MAX) {
+		IPA_MHI_ERR("bad param client:%d\n", in->sys.client);
+		return -EINVAL;
+	}
+
+	if (!IPA_CLIENT_IS_MHI(in->sys.client)) {
+		IPA_MHI_ERR(
+			"Invalid MHI client, client: %d\n", in->sys.client);
+		return -EINVAL;
+	}
+
+	IPA_MHI_DBG("channel=%d\n", in->channel_id);
+
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	if (!ipa_mhi_client_ctx ||
+			ipa_mhi_client_ctx->state != IPA_MHI_STATE_STARTED) {
+		IPA_MHI_ERR("IPA MHI was not started\n");
+		spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+		return -EINVAL;
+	}
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+	channel = ipa_mhi_get_channel_context(in->sys.client, in->channel_id);
+	if (!channel) {
+		IPA_MHI_ERR("ipa_mhi_get_channel_context failed\n");
+		return -EINVAL;
+	}
+
+	if (channel->state != IPA_HW_MHI_CHANNEL_STATE_INVALID &&
+	    channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+		IPA_MHI_ERR("Invalid channel state %d\n", channel->state);
+		return -EFAULT;
+	}
+
+	channel->channel_context_addr =
+		ipa_mhi_client_ctx->channel_context_array_addr +
+			channel->id * sizeof(struct ipa_mhi_ch_ctx);
+
+	/* for event context address index needs to read from host */
+
+	IPA_MHI_DBG("client %d channelIndex %d channelID %d, state %d\n",
+		channel->client, channel->index, channel->id, channel->state);
+	IPA_MHI_DBG("channel_context_addr 0x%llx cached_gsi_evt_ring_hdl %lu\n",
+		channel->channel_context_addr,
+		channel->cached_gsi_evt_ring_hdl);
+
+	IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		struct ipa_mhi_connect_params_internal internal;
+
+		IPA_MHI_DBG("reading ch/ev context from host\n");
+		res = ipa_mhi_read_ch_ctx(channel);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
+			goto fail_start_channel;
+		}
+
+		internal.channel_id = in->channel_id;
+		internal.sys = &in->sys;
+		internal.start.gsi.state = channel->state;
+		internal.start.gsi.msi = &ipa_mhi_client_ctx->msi;
+		internal.start.gsi.ev_ctx_host = &channel->ev_ctx_host;
+		internal.start.gsi.event_context_addr =
+				channel->event_context_addr;
+		internal.start.gsi.ch_ctx_host = &channel->ch_ctx_host;
+		internal.start.gsi.channel_context_addr =
+				channel->channel_context_addr;
+		internal.start.gsi.ch_err_cb = ipa_mhi_gsi_ch_err_cb;
+		internal.start.gsi.channel = (void *)channel;
+		internal.start.gsi.ev_err_cb = ipa_mhi_gsi_ev_err_cb;
+		internal.start.gsi.assert_bit40 =
+				ipa_mhi_client_ctx->assert_bit40;
+		internal.start.gsi.mhi = &channel->ch_scratch.mhi;
+		internal.start.gsi.cached_gsi_evt_ring_hdl =
+				&channel->cached_gsi_evt_ring_hdl;
+		internal.start.gsi.evchid =
+				channel->index + IPA_MHI_GSI_ER_START;
+
+		res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
+		if (res) {
+			IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
+			goto fail_connect_pipe;
+		}
+		channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+		channel->brstmode_enabled =
+				channel->ch_scratch.mhi.burst_mode_enabled;
+
+		res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+			&channel->state, channel->channel_context_addr +
+				offsetof(struct ipa_mhi_ch_ctx, chstate),
+				sizeof(channel->state));
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+			return res;
+
+		}
+	} else {
+		struct ipa_mhi_connect_params_internal internal;
+
+		internal.channel_id = in->channel_id;
+		internal.sys = &in->sys;
+		internal.start.uC.index = channel->index;
+		internal.start.uC.id = channel->id;
+		internal.start.uC.state = channel->state;
+		res = ipa_connect_mhi_pipe(&internal, clnt_hdl);
+		if (res) {
+			IPA_MHI_ERR("ipa_connect_mhi_pipe failed %d\n", res);
+			goto fail_connect_pipe;
+		}
+		channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	}
+
+	if (!in->sys.keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+fail_connect_pipe:
+	ipa_mhi_reset_channel(channel);
+fail_start_channel:
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+	return -EPERM;
+}
+
+/**
+ * ipa_mhi_disconnect_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @clnt_hdl: client handle for this pipe
+ *
+ * This function is called by MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ *	- Send command to uC/GSI to reset corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+{
+	int res;
+	enum ipa_client_type client;
+	static struct ipa_mhi_channel_ctx *channel;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!ipa_mhi_client_ctx) {
+		IPA_MHI_ERR("IPA MHI was not initialized\n");
+		return -EINVAL;
+	}
+
+	client = ipa_get_client_mapping(clnt_hdl);
+
+	if (!IPA_CLIENT_IS_MHI(client)) {
+		IPA_MHI_ERR("invalid IPA MHI client, client: %d\n", client);
+		return -EINVAL;
+	}
+
+	channel = ipa_mhi_get_channel_context_by_clnt_hdl(clnt_hdl);
+	if (!channel) {
+		IPA_MHI_ERR("invalid clnt index\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa_get_client_mapping(clnt_hdl));
+
+	res = ipa_mhi_reset_channel(channel);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_reset_channel failed %d\n", res);
+		goto fail_reset_channel;
+	}
+
+	res = ipa_disconnect_mhi_pipe(clnt_hdl);
+	if (res) {
+		IPA_MHI_ERR(
+			"IPA core driver failed to disconnect the pipe hdl %d, res %d"
+				, clnt_hdl, res);
+		return res;
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
+
+	IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+fail_reset_channel:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa_get_client_mapping(clnt_hdl));
+	return res;
+}
+
+static int ipa_mhi_wait_for_cons_release(void)
+{
+	unsigned long flags;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	reinit_completion(&ipa_mhi_client_ctx->rm_cons_comp);
+	spin_lock_irqsave(&ipa_mhi_client_ctx->state_lock, flags);
+	if (ipa_mhi_client_ctx->rm_cons_state != IPA_MHI_RM_STATE_GRANTED) {
+		spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ipa_mhi_client_ctx->state_lock, flags);
+
+	res = wait_for_completion_timeout(
+		&ipa_mhi_client_ctx->rm_cons_comp,
+		msecs_to_jiffies(IPA_MHI_RM_TIMEOUT_MSEC));
+	if (res == 0) {
+		IPA_MHI_ERR("timeout release mhi cons\n");
+		return -ETIME;
+	}
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels)
+{
+	int i;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		if (!channels[i].valid)
+			continue;
+		if (channels[i].state !=
+		    IPA_HW_MHI_CHANNEL_STATE_RUN)
+			continue;
+		IPA_MHI_DBG("suspending channel %d\n",
+			channels[i].id);
+
+		if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+			res = ipa_mhi_suspend_gsi_channel(
+				&channels[i]);
+		else
+			res = ipa_uc_mhi_suspend_channel(
+				channels[i].index);
+
+		if (res) {
+			IPA_MHI_ERR("failed to suspend channel %d error %d\n",
+				i, res);
+			return res;
+		}
+		channels[i].state =
+			IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static int ipa_mhi_stop_event_update_channels(
+		struct ipa_mhi_channel_ctx *channels)
+{
+	int i;
+	int res;
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+		return 0;
+
+	IPA_MHI_FUNC_ENTRY();
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		if (!channels[i].valid)
+			continue;
+		if (channels[i].state !=
+		    IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
+			continue;
+		IPA_MHI_DBG("stop update event channel %d\n",
+			channels[i].id);
+		res = ipa_uc_mhi_stop_event_update_channel(
+			channels[i].index);
+		if (res) {
+			IPA_MHI_ERR("failed stop event channel %d error %d\n",
+				i, res);
+			return res;
+		}
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static bool ipa_mhi_check_pending_packets_from_host(void)
+{
+	int i;
+	int res;
+	struct ipa_mhi_channel_ctx *channel;
+
+	IPA_MHI_FUNC_ENTRY();
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->ul_channels[i];
+		if (!channel->valid)
+			continue;
+
+		res = ipa_mhi_query_ch_info(channel->client,
+				&channel->ch_info);
+		if (res) {
+			IPA_MHI_ERR("gsi_query_channel_info failed\n");
+			return true;
+		}
+		res = ipa_mhi_read_ch_ctx(channel);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_ch_ctx failed %d\n", res);
+			return true;
+		}
+
+		if (channel->ch_info.rp != channel->ch_ctx_host.wp) {
+			IPA_MHI_DBG("There are pending packets from host\n");
+			IPA_MHI_DBG("device rp 0x%llx host 0x%llx\n",
+				channel->ch_info.rp, channel->ch_ctx_host.wp);
+
+			return true;
+		}
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return false;
+}
+
+static int ipa_mhi_resume_channels(bool LPTransitionRejected,
+		struct ipa_mhi_channel_ctx *channels)
+{
+	int i;
+	int res;
+	struct ipa_mhi_channel_ctx *channel;
+
+	IPA_MHI_FUNC_ENTRY();
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		if (!channels[i].valid)
+			continue;
+		if (channels[i].state !=
+		    IPA_HW_MHI_CHANNEL_STATE_SUSPEND)
+			continue;
+		channel = &channels[i];
+		IPA_MHI_DBG("resuming channel %d\n", channel->id);
+
+		res = ipa_mhi_resume_channels_internal(channel->client,
+			LPTransitionRejected, channel->brstmode_enabled,
+			channel->ch_scratch, channel->index);
+
+		if (res) {
+			IPA_MHI_ERR("failed to resume channel %d error %d\n",
+				i, res);
+			return res;
+		}
+
+		channel->stop_in_proc = false;
+		channel->state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+/**
+ * ipa_mhi_suspend_ul() - Suspend MHI accelerated up link channels
+ * @force:
+ *	false: in case of data pending in IPA, MHI channels will not be
+ *		suspended and function will fail.
+ *	true:  in case of data pending in IPA, make sure no further access from
+ *		IPA to PCIe is possible. In this case suspend cannot fail.
+ *
+ *
+ * This function is called by MHI client driver on MHI suspend.
+ * This function is called after MHI channel was started.
+ * When this function returns device can move to M1/M2/M3/D3cold state.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+static int ipa_mhi_suspend_ul(bool force, bool *empty, bool *force_clear)
+{
+	int res;
+
+	*force_clear = false;
+
+	res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
+		goto fail_suspend_ul_channel;
+	}
+
+	*empty = ipa_mhi_wait_for_ul_empty_timeout(
+			IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+
+	if (!*empty) {
+		if (force) {
+			res = ipa_mhi_enable_force_clear(
+				ipa_mhi_client_ctx->qmi_req_id, false);
+			if (res) {
+				IPA_MHI_ERR("failed to enable force clear\n");
+				ipa_assert();
+				return res;
+			}
+			*force_clear = true;
+			IPA_MHI_DBG("force clear datapath enabled\n");
+
+			*empty = ipa_mhi_wait_for_ul_empty_timeout(
+				IPA_MHI_CH_EMPTY_TIMEOUT_MSEC);
+			IPA_MHI_DBG("empty=%d\n", *empty);
+			if (!*empty && ipa_get_transport_type()
+				== IPA_TRANSPORT_TYPE_GSI) {
+				IPA_MHI_ERR("Failed to suspend UL channels\n");
+				if (ipa_mhi_client_ctx->test_mode) {
+					res = -EAGAIN;
+					goto fail_suspend_ul_channel;
+				}
+
+				ipa_assert();
+			}
+		} else {
+			IPA_MHI_DBG("IPA not empty\n");
+			res = -EAGAIN;
+			goto fail_suspend_ul_channel;
+		}
+	}
+
+	if (*force_clear) {
+		res =
+		ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id);
+		if (res) {
+			IPA_MHI_ERR("failed to disable force clear\n");
+			ipa_assert();
+			return res;
+		}
+		IPA_MHI_DBG("force clear datapath disabled\n");
+		ipa_mhi_client_ctx->qmi_req_id++;
+	}
+
+	if (!force && ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		if (ipa_mhi_check_pending_packets_from_host()) {
+			res = -EAGAIN;
+			goto fail_suspend_ul_channel;
+		}
+	}
+
+	res = ipa_mhi_stop_event_update_channels(
+		ipa_mhi_client_ctx->ul_channels);
+	if (res) {
+		IPA_MHI_ERR(
+			"ipa_mhi_stop_event_update_ul_channels failed %d\n",
+			res);
+		goto fail_suspend_ul_channel;
+	}
+
+	return 0;
+
+fail_suspend_ul_channel:
+	return res;
+}
+
+static bool ipa_mhi_has_open_aggr_frame(void)
+{
+	struct ipa_mhi_channel_ctx *channel;
+	int i;
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->dl_channels[i];
+
+		if (!channel->valid)
+			continue;
+
+		if (ipa_has_open_aggr_frame(channel->client))
+			return true;
+	}
+
+	return false;
+}
+
+static void ipa_mhi_update_host_ch_state(bool update_rp)
+{
+	int i;
+	int res;
+	struct ipa_mhi_channel_ctx *channel;
+
+	for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->ul_channels[i];
+		if (!channel->valid)
+			continue;
+
+		if (update_rp) {
+			res = ipa_mhi_query_ch_info(channel->client,
+				&channel->ch_info);
+			if (res) {
+				IPA_MHI_ERR("gsi_query_channel_info failed\n");
+				ipa_assert();
+				return;
+			}
+
+			res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+				&channel->ch_info.rp,
+				channel->channel_context_addr +
+					offsetof(struct ipa_mhi_ch_ctx, rp),
+				sizeof(channel->ch_info.rp));
+			if (res) {
+				IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+				ipa_assert();
+				return;
+			}
+		}
+
+		res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+			&channel->state, channel->channel_context_addr +
+				offsetof(struct ipa_mhi_ch_ctx, chstate),
+			sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+			ipa_assert();
+			return;
+		}
+	}
+
+	for (i = 0; i < IPA_MHI_MAX_DL_CHANNELS; i++) {
+		channel = &ipa_mhi_client_ctx->dl_channels[i];
+		if (!channel->valid)
+			continue;
+
+		if (update_rp) {
+			res = ipa_mhi_query_ch_info(channel->client,
+				&channel->ch_info);
+			if (res) {
+				IPA_MHI_ERR("gsi_query_channel_info failed\n");
+				ipa_assert();
+				return;
+			}
+
+			res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+				&channel->ch_info.rp,
+				channel->channel_context_addr +
+					offsetof(struct ipa_mhi_ch_ctx, rp),
+				sizeof(channel->ch_info.rp));
+			if (res) {
+				IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+				ipa_assert();
+				return;
+			}
+		}
+
+		res = ipa_mhi_read_write_host(IPA_MHI_DMA_TO_HOST,
+			&channel->state, channel->channel_context_addr +
+			offsetof(struct ipa_mhi_ch_ctx, chstate),
+			sizeof(((struct ipa_mhi_ch_ctx *)0)->chstate));
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_read_write_host failed\n");
+			ipa_assert();
+		}
+	}
+}
+
+static int ipa_mhi_suspend_dl(bool force)
+{
+	int res;
+
+	res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+	if (res) {
+		IPA_MHI_ERR(
+			"ipa_mhi_suspend_channels for dl failed %d\n", res);
+		goto fail_suspend_dl_channel;
+	}
+
+	res = ipa_mhi_stop_event_update_channels
+			(ipa_mhi_client_ctx->dl_channels);
+	if (res) {
+		IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
+		goto fail_stop_event_update_dl_channel;
+	}
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		if (ipa_mhi_has_open_aggr_frame()) {
+			IPA_MHI_DBG("There is an open aggr frame\n");
+			if (force) {
+				ipa_mhi_client_ctx->trigger_wakeup = true;
+			} else {
+				res = -EAGAIN;
+				goto fail_stop_event_update_dl_channel;
+			}
+		}
+	}
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+		ipa_mhi_update_host_ch_state(true);
+
+fail_stop_event_update_dl_channel:
+		ipa_mhi_resume_channels(true,
+				ipa_mhi_client_ctx->dl_channels);
+fail_suspend_dl_channel:
+		return res;
+}
+
+/**
+ * ipa_mhi_suspend() - Suspend MHI accelerated channels
+ * @force:
+ *	false: in case of data pending in IPA, MHI channels will not be
+ *		suspended and function will fail.
+ *	true:  in case of data pending in IPA, make sure no further access from
+ *		IPA to PCIe is possible. In this case suspend cannot fail.
+ *
+ * This function is called by MHI client driver on MHI suspend.
+ * This function is called after MHI channel was started.
+ * When this function returns device can move to M1/M2/M3/D3cold state.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_suspend(bool force)
+{
+	int res;
+	bool empty;
+	bool force_clear;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPEND_IN_PROGRESS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+		return res;
+	}
+	res = ipa_mhi_suspend_ul(force, &empty, &force_clear);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_suspend_ul failed %d\n", res);
+		goto fail_suspend_ul_channel;
+	}
+
+	/*
+	 * hold IPA clocks and release them after all
+	 * IPA RM resource are released to make sure tag process will not start
+	 */
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	IPA_MHI_DBG("release prod\n");
+	res = ipa_mhi_release_prod();
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
+		goto fail_release_prod;
+	}
+
+	IPA_MHI_DBG("wait for cons release\n");
+	res = ipa_mhi_wait_for_cons_release();
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n", res);
+		goto fail_release_cons;
+	}
+
+	usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN, IPA_MHI_SUSPEND_SLEEP_MAX);
+
+	res = ipa_mhi_suspend_dl(force);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_suspend_dl failed %d\n", res);
+		goto fail_suspend_dl_channel;
+	}
+
+	if (!empty)
+		ipa_set_tag_process_before_gating(false);
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+		goto fail_release_cons;
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_suspend_dl_channel:
+fail_release_cons:
+	ipa_mhi_request_prod();
+fail_release_prod:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail_suspend_ul_channel:
+	ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels);
+	ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+	if (force_clear) {
+		if (
+		ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) {
+			IPA_MHI_ERR("failed to disable force clear\n");
+			ipa_assert();
+		}
+		IPA_MHI_DBG("force clear datapath disabled\n");
+		ipa_mhi_client_ctx->qmi_req_id++;
+	}
+	return res;
+}
+
+/**
+ * ipa_mhi_resume() - Resume MHI accelerated channels
+ *
+ * This function is called by MHI client driver on MHI resume.
+ * This function is called after MHI channel was suspended.
+ * When this function returns device can move to M0 state.
+ * This function is doing the following:
+ *	- Send command to uC/GSI to resume corresponding MHI channel
+ *	- Request MHI_PROD in IPA RM
+ *	- Resume data to IPA
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_resume(void)
+{
+	int res;
+	bool dl_channel_resumed = false;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_RESUME_IN_PROGRESS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+		return res;
+	}
+
+	if (ipa_mhi_client_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) {
+		/* resume all DL channels */
+		res = ipa_mhi_resume_channels(false,
+				ipa_mhi_client_ctx->dl_channels);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
+				res);
+			goto fail_resume_dl_channels;
+		}
+		dl_channel_resumed = true;
+
+		ipa_rm_notify_completion(IPA_RM_RESOURCE_GRANTED,
+			IPA_RM_RESOURCE_MHI_CONS);
+		ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_GRANTED;
+	}
+
+	res = ipa_mhi_request_prod();
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_request_prod failed %d\n", res);
+		goto fail_request_prod;
+	}
+
+	/* resume all UL channels */
+	res = ipa_mhi_resume_channels(false,
+					ipa_mhi_client_ctx->ul_channels);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res);
+		goto fail_resume_ul_channels;
+	}
+
+	if (!dl_channel_resumed) {
+		res = ipa_mhi_resume_channels(false,
+					ipa_mhi_client_ctx->dl_channels);
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
+				res);
+			goto fail_resume_dl_channels2;
+		}
+	}
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI)
+		ipa_mhi_update_host_ch_state(false);
+
+	res = ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_set_state failed %d\n", res);
+		goto fail_set_state;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_set_state:
+	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+fail_resume_dl_channels2:
+	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels);
+fail_resume_ul_channels:
+	ipa_mhi_release_prod();
+fail_request_prod:
+	ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+fail_resume_dl_channels:
+	ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
+	return res;
+}
+
+
+static int  ipa_mhi_destroy_channels(struct ipa_mhi_channel_ctx *channels,
+	int num_of_channels)
+{
+	struct ipa_mhi_channel_ctx *channel;
+	int i, res;
+	u32 clnt_hdl;
+
+	for (i = 0; i < num_of_channels; i++) {
+		channel = &channels[i];
+		if (!channel->valid)
+			continue;
+		if (channel->state == IPA_HW_MHI_CHANNEL_STATE_INVALID)
+			continue;
+		if (channel->state != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+			clnt_hdl = ipa_get_ep_mapping(channel->client);
+			IPA_MHI_DBG("disconnect pipe (ep: %d)\n", clnt_hdl);
+			res = ipa_mhi_disconnect_pipe(clnt_hdl);
+			if (res) {
+				IPA_MHI_ERR(
+					"failed to disconnect pipe %d, err %d\n"
+					, clnt_hdl, res);
+				goto fail;
+			}
+		}
+		res = ipa_mhi_destroy_channel(channel->client);
+		if (res) {
+			IPA_MHI_ERR(
+				"ipa_mhi_destroy_channel failed %d"
+					, res);
+			goto fail;
+		}
+	}
+	return 0;
+fail:
+	return res;
+}
+
+/**
+ * ipa_mhi_destroy_all_channels() - Destroy MHI IPA channels
+ *
+ * This function is called by IPA MHI client driver on MHI reset to destroy all
+ * IPA MHI channels.
+ */
+int ipa_mhi_destroy_all_channels(void)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	/* reset all UL and DL acc channels and its accociated event rings */
+	res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->ul_channels,
+		IPA_MHI_MAX_UL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_destroy_channels(ul_channels) failed %d\n",
+			res);
+		return -EPERM;
+	}
+	IPA_MHI_DBG("All UL channels are disconnected\n");
+
+	res = ipa_mhi_destroy_channels(ipa_mhi_client_ctx->dl_channels,
+		IPA_MHI_MAX_DL_CHANNELS);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_destroy_channels(dl_channels) failed %d\n",
+			res);
+		return -EPERM;
+	}
+	IPA_MHI_DBG("All DL channels are disconnected\n");
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+static void ipa_mhi_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+
+/**
+ * ipa_mhi_destroy() - Destroy MHI IPA
+ *
+ * This function is called by MHI client driver on MHI reset to destroy all IPA
+ * MHI resources.
+ * When this function returns ipa_mhi can re-initialize.
+ */
+void ipa_mhi_destroy(void)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	if (!ipa_mhi_client_ctx) {
+		IPA_MHI_DBG("IPA MHI was not initialized, already destroyed\n");
+		return;
+	}
+	/* reset all UL and DL acc channels and its accociated event rings */
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_GSI) {
+		res = ipa_mhi_destroy_all_channels();
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_destroy_all_channels failed %d\n",
+				res);
+			goto fail;
+		}
+	}
+	IPA_MHI_DBG("All channels are disconnected\n");
+
+	if (ipa_get_transport_type() == IPA_TRANSPORT_TYPE_SPS) {
+		IPA_MHI_DBG("cleanup uC MHI\n");
+		ipa_uc_mhi_cleanup();
+	}
+
+
+	if (ipa_mhi_client_ctx->state != IPA_MHI_STATE_INITIALIZED  &&
+			ipa_mhi_client_ctx->state != IPA_MHI_STATE_READY) {
+		IPA_MHI_DBG("release prod\n");
+		res = ipa_mhi_release_prod();
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_release_prod failed %d\n", res);
+			goto fail;
+		}
+		IPA_MHI_DBG("wait for cons release\n");
+		res = ipa_mhi_wait_for_cons_release();
+		if (res) {
+			IPA_MHI_ERR("ipa_mhi_wait_for_cons_release failed %d\n",
+				res);
+			goto fail;
+		}
+		usleep_range(IPA_MHI_SUSPEND_SLEEP_MIN,
+				IPA_MHI_SUSPEND_SLEEP_MAX);
+
+		IPA_MHI_DBG("deleate dependency Q6_PROD->MHI_CONS\n");
+		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_MHI_CONS);
+		if (res) {
+			IPA_MHI_ERR(
+				"Error deleting dependency %d->%d, res=%d\n"
+				, IPA_RM_RESOURCE_Q6_PROD,
+				IPA_RM_RESOURCE_MHI_CONS,
+				res);
+			goto fail;
+		}
+		IPA_MHI_DBG("deleate dependency MHI_PROD->Q6_CONS\n");
+		res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_MHI_PROD,
+			IPA_RM_RESOURCE_Q6_CONS);
+		if (res) {
+			IPA_MHI_ERR(
+				"Error deleting dependency %d->%d, res=%d\n",
+			IPA_RM_RESOURCE_MHI_PROD,
+			IPA_RM_RESOURCE_Q6_CONS,
+			res);
+			goto fail;
+		}
+	}
+
+	res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+	if (res) {
+		IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
+			IPA_RM_RESOURCE_MHI_PROD, res);
+		goto fail;
+	}
+
+	res = ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
+	if (res) {
+		IPA_MHI_ERR("Error deleting resource %d, res=%d\n",
+			IPA_RM_RESOURCE_MHI_CONS, res);
+		goto fail;
+	}
+
+	ipa_mhi_debugfs_destroy();
+	destroy_workqueue(ipa_mhi_client_ctx->wq);
+	kfree(ipa_mhi_client_ctx);
+	ipa_mhi_client_ctx = NULL;
+	IPA_MHI_DBG("IPA MHI was reset, ready for re-init\n");
+
+	IPA_MHI_FUNC_EXIT();
+	return;
+fail:
+	ipa_assert();
+}
+
+/**
+ * ipa_mhi_init() - Initialize IPA MHI driver
+ * @params: initialization params
+ *
+ * This function is called by MHI client driver on boot to initialize IPA MHI
+ * Driver. When this function returns device can move to READY state.
+ * This function is doing the following:
+ *	- Initialize MHI IPA internal data structures
+ *	- Create IPA RM resources
+ *	- Initialize debugfs
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_init(struct ipa_mhi_init_params *params)
+{
+	int res;
+	struct ipa_rm_create_params mhi_prod_params;
+	struct ipa_rm_create_params mhi_cons_params;
+	struct ipa_rm_perf_profile profile;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!params) {
+		IPA_MHI_ERR("null args\n");
+		return -EINVAL;
+	}
+
+	if (!params->notify) {
+		IPA_MHI_ERR("null notify function\n");
+		return -EINVAL;
+	}
+
+	if (ipa_mhi_client_ctx) {
+		IPA_MHI_ERR("already initialized\n");
+		return -EPERM;
+	}
+
+	IPA_MHI_DBG("notify = %pF priv = %p\n", params->notify, params->priv);
+	IPA_MHI_DBG("msi: addr_lo = 0x%x addr_hi = 0x%x\n",
+		params->msi.addr_low, params->msi.addr_hi);
+	IPA_MHI_DBG("msi: data = 0x%x mask = 0x%x\n",
+		params->msi.data, params->msi.mask);
+	IPA_MHI_DBG("mmio_addr = 0x%x\n", params->mmio_addr);
+	IPA_MHI_DBG("first_ch_idx = 0x%x\n", params->first_ch_idx);
+	IPA_MHI_DBG("first_er_idx = 0x%x\n", params->first_er_idx);
+	IPA_MHI_DBG("assert_bit40=%d\n", params->assert_bit40);
+	IPA_MHI_DBG("test_mode=%d\n", params->test_mode);
+
+	/* Initialize context */
+	ipa_mhi_client_ctx = kzalloc(sizeof(*ipa_mhi_client_ctx), GFP_KERNEL);
+	if (!ipa_mhi_client_ctx) {
+		IPA_MHI_ERR("no memory\n");
+		res = -EFAULT;
+		goto fail_alloc_ctx;
+	}
+
+	ipa_mhi_client_ctx->state = IPA_MHI_STATE_INITIALIZED;
+	ipa_mhi_client_ctx->cb_notify = params->notify;
+	ipa_mhi_client_ctx->cb_priv = params->priv;
+	ipa_mhi_client_ctx->rm_cons_state = IPA_MHI_RM_STATE_RELEASED;
+	init_completion(&ipa_mhi_client_ctx->rm_prod_granted_comp);
+	spin_lock_init(&ipa_mhi_client_ctx->state_lock);
+	init_completion(&ipa_mhi_client_ctx->rm_cons_comp);
+	ipa_mhi_client_ctx->msi = params->msi;
+	ipa_mhi_client_ctx->mmio_addr = params->mmio_addr;
+	ipa_mhi_client_ctx->first_ch_idx = params->first_ch_idx;
+	ipa_mhi_client_ctx->first_er_idx = params->first_er_idx;
+	ipa_mhi_client_ctx->qmi_req_id = 0;
+	ipa_mhi_client_ctx->use_ipadma = true;
+	ipa_mhi_client_ctx->assert_bit40 = !!params->assert_bit40;
+	ipa_mhi_client_ctx->test_mode = params->test_mode;
+
+	ipa_mhi_client_ctx->wq = create_singlethread_workqueue("ipa_mhi_wq");
+	if (!ipa_mhi_client_ctx->wq) {
+		IPA_MHI_ERR("failed to create workqueue\n");
+		res = -EFAULT;
+		goto fail_create_wq;
+	}
+
+	/* Create PROD in IPA RM */
+	memset(&mhi_prod_params, 0, sizeof(mhi_prod_params));
+	mhi_prod_params.name = IPA_RM_RESOURCE_MHI_PROD;
+	mhi_prod_params.floor_voltage = IPA_VOLTAGE_SVS;
+	mhi_prod_params.reg_params.notify_cb = ipa_mhi_rm_prod_notify;
+	res = ipa_rm_create_resource(&mhi_prod_params);
+	if (res) {
+		IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_PROD\n");
+		goto fail_create_rm_prod;
+	}
+
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = 1000;
+	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_PROD, &profile);
+	if (res) {
+		IPA_MHI_ERR("fail to set profile to MHI_PROD\n");
+		goto fail_perf_rm_prod;
+	}
+
+	/* Create CONS in IPA RM */
+	memset(&mhi_cons_params, 0, sizeof(mhi_cons_params));
+	mhi_cons_params.name = IPA_RM_RESOURCE_MHI_CONS;
+	mhi_cons_params.floor_voltage = IPA_VOLTAGE_SVS;
+	mhi_cons_params.request_resource = ipa_mhi_rm_cons_request;
+	mhi_cons_params.release_resource = ipa_mhi_rm_cons_release;
+	res = ipa_rm_create_resource(&mhi_cons_params);
+	if (res) {
+		IPA_MHI_ERR("fail to create IPA_RM_RESOURCE_MHI_CONS\n");
+		goto fail_create_rm_cons;
+	}
+
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = 1000;
+	res = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_MHI_CONS, &profile);
+	if (res) {
+		IPA_MHI_ERR("fail to set profile to MHI_CONS\n");
+		goto fail_perf_rm_cons;
+	}
+
+	/* Initialize uC interface */
+	ipa_uc_mhi_init(ipa_mhi_uc_ready_cb,
+		ipa_mhi_uc_wakeup_request_cb);
+	if (ipa_uc_state_check() == 0)
+		ipa_mhi_set_state(IPA_MHI_STATE_READY);
+
+	/* Initialize debugfs */
+	ipa_mhi_debugfs_init();
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_perf_rm_cons:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_CONS);
+fail_create_rm_cons:
+fail_perf_rm_prod:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_MHI_PROD);
+fail_create_rm_prod:
+	destroy_workqueue(ipa_mhi_client_ctx->wq);
+fail_create_wq:
+	kfree(ipa_mhi_client_ctx);
+	ipa_mhi_client_ctx = NULL;
+fail_alloc_ctx:
+	return res;
+}
+
+static void ipa_mhi_cache_dl_ul_sync_info(
+	struct ipa_config_req_msg_v01 *config_req)
+{
+	ipa_cached_dl_ul_sync_info.params.isDlUlSyncEnabled = true;
+	ipa_cached_dl_ul_sync_info.params.UlAccmVal =
+		(config_req->ul_accumulation_time_limit_valid) ?
+		config_req->ul_accumulation_time_limit : 0;
+	ipa_cached_dl_ul_sync_info.params.ulMsiEventThreshold =
+		(config_req->ul_msi_event_threshold_valid) ?
+		config_req->ul_msi_event_threshold : 0;
+	ipa_cached_dl_ul_sync_info.params.dlMsiEventThreshold =
+		(config_req->dl_msi_event_threshold_valid) ?
+		config_req->dl_msi_event_threshold : 0;
+}
+
+/**
+ * ipa_mhi_handle_ipa_config_req() - hanle IPA CONFIG QMI message
+ *
+ * This function is called by by IPA QMI service to indicate that IPA CONFIG
+ * message was sent from modem. IPA MHI will update this information to IPA uC
+ * or will cache it until IPA MHI will be initialized.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req)
+{
+	IPA_MHI_FUNC_ENTRY();
+
+	if (ipa_get_transport_type() != IPA_TRANSPORT_TYPE_GSI) {
+		ipa_mhi_cache_dl_ul_sync_info(config_req);
+		if (ipa_mhi_client_ctx &&
+				ipa_mhi_client_ctx->state !=
+						IPA_MHI_STATE_INITIALIZED)
+			ipa_uc_mhi_send_dl_ul_sync_info(
+				&ipa_cached_dl_ul_sync_info);
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+int ipa_mhi_is_using_dma(bool *flag)
+{
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!ipa_mhi_client_ctx) {
+		IPA_MHI_ERR("not initialized\n");
+		return -EPERM;
+	}
+
+	*flag = ipa_mhi_client_ctx->use_ipadma ? true : false;
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+EXPORT_SYMBOL(ipa_mhi_is_using_dma);
+
+const char *ipa_mhi_get_state_str(int state)
+{
+	return MHI_STATE_STR(state);
+}
+EXPORT_SYMBOL(ipa_mhi_get_state_str);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI client driver");
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
new file mode 100644
index 0000000..069f0a2
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -0,0 +1,597 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_uc_offload.h>
+#include <linux/msm_ipa.h>
+#include "../ipa_common_i.h"
+
+#define IPA_NTN_DMA_POOL_ALIGNMENT 8
+#define OFFLOAD_DRV_NAME "ipa_uc_offload"
+#define IPA_UC_OFFLOAD_DBG(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_UC_OFFLOAD_LOW(fmt, args...) \
+	do { \
+		pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_UC_OFFLOAD_ERR(fmt, args...) \
+	do { \
+		pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_UC_OFFLOAD_INFO(fmt, args...) \
+	do { \
+		pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+enum ipa_uc_offload_state {
+	IPA_UC_OFFLOAD_STATE_INVALID,
+	IPA_UC_OFFLOAD_STATE_INITIALIZED,
+	IPA_UC_OFFLOAD_STATE_UP,
+	IPA_UC_OFFLOAD_STATE_DOWN,
+};
+
+struct ipa_uc_offload_ctx {
+	enum ipa_uc_offload_proto proto;
+	enum ipa_uc_offload_state state;
+	void *priv;
+	u8 hdr_len;
+	u32 partial_hdr_hdl[IPA_IP_MAX];
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	ipa_notify_cb notify;
+	struct completion ntn_completion;
+};
+
+static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
+
+static int ipa_commit_partial_hdr(
+	struct ipa_ioc_add_hdr *hdr,
+	const char *netdev_name,
+	struct ipa_hdr_info *hdr_info)
+{
+	int i;
+
+	if (hdr == NULL || hdr_info == NULL) {
+		IPA_UC_OFFLOAD_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	hdr->commit = 1;
+	hdr->num_hdrs = 2;
+
+	snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
+			 "%s_ipv4", netdev_name);
+	snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
+			 "%s_ipv6", netdev_name);
+	for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
+		hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
+		memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
+		hdr->hdr[i].type = hdr_info[i].hdr_type;
+		hdr->hdr[i].is_partial = 1;
+		hdr->hdr[i].is_eth2_ofst_valid = 1;
+		hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
+	}
+
+	if (ipa_add_hdr(hdr)) {
+		IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa_uc_offload_ntn_reg_intf(
+	struct ipa_uc_offload_intf_params *inp,
+	struct ipa_uc_offload_out_params *outp,
+	struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_tx_intf tx;
+	struct ipa_rx_intf rx;
+	struct ipa_ioc_tx_intf_prop tx_prop[2];
+	struct ipa_ioc_rx_intf_prop rx_prop[2];
+	u32 len;
+	int ret = 0;
+
+	IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
+					 inp->netdev_name);
+
+	memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
+	ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
+	ntn_ctx->notify = inp->notify;
+	ntn_ctx->priv = inp->priv;
+
+	/* add partial header */
+	len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(len, GFP_KERNEL);
+	if (hdr == NULL) {
+		IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+		return -ENOMEM;
+	}
+
+	if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
+		IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n");
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	/* populate tx prop */
+	tx.num_props = 2;
+	tx.prop = tx_prop;
+
+	memset(tx_prop, 0, sizeof(tx_prop));
+	tx_prop[0].ip = IPA_IP_v4;
+	tx_prop[0].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+	tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+	memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
+		sizeof(tx_prop[0].hdr_name));
+
+	tx_prop[1].ip = IPA_IP_v6;
+	tx_prop[1].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+	tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+	memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
+		sizeof(tx_prop[1].hdr_name));
+
+	/* populate rx prop */
+	rx.num_props = 2;
+	rx.prop = rx_prop;
+
+	memset(rx_prop, 0, sizeof(rx_prop));
+	rx_prop[0].ip = IPA_IP_v4;
+	rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
+	if (inp->is_meta_data_valid) {
+		rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
+		rx_prop[0].attrib.meta_data = inp->meta_data;
+		rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask;
+	}
+
+	rx_prop[1].ip = IPA_IP_v6;
+	rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
+	if (inp->is_meta_data_valid) {
+		rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
+		rx_prop[1].attrib.meta_data = inp->meta_data;
+		rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
+	}
+
+	if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
+		IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
+		memset(ntn_ctx, 0, sizeof(*ntn_ctx));
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
+	ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
+	init_completion(&ntn_ctx->ntn_completion);
+	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
+
+fail:
+	kfree(hdr);
+	return ret;
+}
+
+int ipa_uc_offload_reg_intf(
+	struct ipa_uc_offload_intf_params *inp,
+	struct ipa_uc_offload_out_params *outp)
+{
+	struct ipa_uc_offload_ctx *ctx;
+	int ret = 0;
+
+	if (inp == NULL || outp == NULL) {
+		IPA_UC_OFFLOAD_ERR("invalid params in=%p out=%p\n", inp, outp);
+		return -EINVAL;
+	}
+
+	if (inp->proto <= IPA_UC_INVALID ||
+		inp->proto >= IPA_UC_MAX_PROT_SIZE) {
+		IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto);
+		return -EINVAL;
+	}
+
+	if (!ipa_uc_offload_ctx[inp->proto]) {
+		ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+		if (ctx == NULL) {
+			IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n");
+			return -EFAULT;
+		}
+		ipa_uc_offload_ctx[inp->proto] = ctx;
+		ctx->proto = inp->proto;
+	} else
+		ctx = ipa_uc_offload_ctx[inp->proto];
+
+	if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) {
+		IPA_UC_OFFLOAD_ERR("Already Initialized\n");
+		return -EINVAL;
+	}
+
+	if (ctx->proto == IPA_UC_NTN) {
+		ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
+		if (!ret)
+			outp->clnt_hndl = IPA_UC_NTN;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
+
+static int ipa_uc_ntn_cons_release(void)
+{
+	return 0;
+}
+
+static int ipa_uc_ntn_cons_request(void)
+{
+	int ret = 0;
+	struct ipa_uc_offload_ctx *ntn_ctx;
+
+	ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN];
+	if (!ntn_ctx) {
+		IPA_UC_OFFLOAD_ERR("NTN is not initialized\n");
+		ret = -EFAULT;
+	} else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+		IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state);
+		ret = -EFAULT;
+	}
+
+	return ret;
+}
+
+static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event,
+		unsigned long data)
+{
+	struct ipa_uc_offload_ctx *offload_ctx;
+
+	offload_ctx = (struct ipa_uc_offload_ctx *)user_data;
+	if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID &&
+		  offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) {
+		IPA_UC_OFFLOAD_ERR("Invalid user data\n");
+		return;
+	}
+
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED)
+		IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state);
+
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		complete_all(&offload_ctx->ntn_completion);
+		break;
+
+	case IPA_RM_RESOURCE_RELEASED:
+		break;
+
+	default:
+		IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event);
+		break;
+	}
+}
+
+int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
+			struct ipa_ntn_conn_out_params *outp,
+			struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	struct ipa_rm_create_params param;
+	int result = 0;
+
+	if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+		inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+		IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
+		return -EINVAL;
+	}
+	if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
+		inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
+		IPA_UC_OFFLOAD_ERR("alignment failure on RX\n");
+		return -EINVAL;
+	}
+
+	memset(&param, 0, sizeof(param));
+	param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+	param.reg_params.user_data = ntn_ctx;
+	param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
+	param.floor_voltage = IPA_VOLTAGE_SVS;
+	result = ipa_rm_create_resource(&param);
+	if (result) {
+		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
+		return -EFAULT;
+	}
+
+	memset(&param, 0, sizeof(param));
+	param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+	param.request_resource = ipa_uc_ntn_cons_request;
+	param.release_resource = ipa_uc_ntn_cons_release;
+	result = ipa_rm_create_resource(&param);
+	if (result) {
+		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
+		goto fail_create_rm_cons;
+	}
+
+	if (ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_APPS_CONS)) {
+		IPA_UC_OFFLOAD_ERR("fail to add rm dependency\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
+		ntn_ctx->priv, ntn_ctx->hdr_len, outp)) {
+		IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
+	result = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	if (result == -EINPROGRESS) {
+		if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
+			10*HZ) == 0) {
+			IPA_UC_OFFLOAD_ERR("ODU PROD resource req time out\n");
+			result = -EFAULT;
+			goto fail;
+		}
+	} else if (result != 0) {
+		IPA_UC_OFFLOAD_ERR("fail to request resource\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	return 0;
+
+fail:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+fail_create_rm_cons:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+
+	return result;
+}
+
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
+			struct ipa_uc_offload_conn_out_params *outp)
+{
+	int ret = 0;
+	struct ipa_uc_offload_ctx *offload_ctx;
+
+	if (!(inp && outp)) {
+		IPA_UC_OFFLOAD_ERR("bad parm. in=%p out=%p\n", inp, outp);
+		return -EINVAL;
+	}
+
+	if (inp->clnt_hndl <= IPA_UC_INVALID ||
+		inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) {
+		IPA_UC_OFFLOAD_ERR("invalid client handle %d\n",
+						   inp->clnt_hndl);
+		return -EINVAL;
+	}
+
+	offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
+	if (!offload_ctx) {
+		IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
+		return -EINVAL;
+	}
+
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
+		IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
+		return -EPERM;
+	}
+
+	switch (offload_ctx->proto) {
+	case IPA_UC_NTN:
+		ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
+						offload_ctx);
+		break;
+
+	default:
+		IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
+
+int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+	struct ipa_rm_perf_profile rm_profile;
+	enum ipa_rm_resource_name resource_name;
+
+	if (profile == NULL) {
+		IPA_UC_OFFLOAD_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	rm_profile.max_supported_bandwidth_mbps =
+		profile->max_supported_bw_mbps;
+
+	if (profile->client == IPA_CLIENT_ODU_PROD) {
+		resource_name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+	} else if (profile->client == IPA_CLIENT_ODU_TETH_CONS) {
+		resource_name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+	} else {
+		IPA_UC_OFFLOAD_ERR("not supported\n");
+		return -EINVAL;
+	}
+
+	if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
+		IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_set_perf_profile);
+
+static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	int ipa_ep_idx_ul, ipa_ep_idx_dl;
+
+	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
+	if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+				IPA_RM_RESOURCE_APPS_CONS)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete rm dependency\n");
+		return -EFAULT;
+	}
+
+	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
+		return -EFAULT;
+	}
+
+	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+		return -EFAULT;
+	}
+
+	ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ODU_PROD);
+	ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ODU_TETH_CONS);
+	if (ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl)) {
+		IPA_UC_OFFLOAD_ERR("fail to tear down uc offload pipes\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+	struct ipa_uc_offload_ctx *offload_ctx;
+	int ret = 0;
+
+	if (clnt_hdl <= IPA_UC_INVALID ||
+		clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+		IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+	if (!offload_ctx) {
+		IPA_UC_OFFLOAD_ERR("Invalid client Handle\n");
+		return -EINVAL;
+	}
+
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
+		IPA_UC_OFFLOAD_ERR("Invalid state\n");
+		return -EINVAL;
+	}
+
+	switch (offload_ctx->proto) {
+	case IPA_UC_NTN:
+		ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
+		break;
+
+	default:
+		IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
+
+static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
+{
+	int len, result = 0;
+	struct ipa_ioc_del_hdr *hdr;
+
+	len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
+	hdr = kzalloc(len, GFP_KERNEL);
+	if (hdr == NULL) {
+		IPA_UC_OFFLOAD_ERR("fail to alloc %d bytes\n", len);
+		return -ENOMEM;
+	}
+
+	hdr->commit = 1;
+	hdr->num_hdls = 2;
+	hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
+	hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
+
+	if (ipa_del_hdr(hdr)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+fail:
+	kfree(hdr);
+	return result;
+}
+
+int ipa_uc_offload_cleanup(u32 clnt_hdl)
+{
+	struct ipa_uc_offload_ctx *offload_ctx;
+	int ret = 0;
+
+	if (clnt_hdl <= IPA_UC_INVALID ||
+		clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
+		IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
+	if (!offload_ctx) {
+		IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_DOWN) {
+		IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
+		return -EINVAL;
+	}
+
+	switch (offload_ctx->proto) {
+	case IPA_UC_NTN:
+		ret = ipa_uc_ntn_cleanup(offload_ctx);
+		break;
+
+	default:
+		IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
+		ret = -EINVAL;
+		break;
+	}
+
+	if (!ret) {
+		kfree(offload_ctx);
+		offload_ctx = NULL;
+		ipa_uc_offload_ctx[clnt_hdl] = NULL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_offload_cleanup);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
new file mode 100644
index 0000000..d183083
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -0,0 +1,2711 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include <linux/ipa_usb.h>
+#include <linux/rndis_ipa.h>
+#include <linux/ecm_ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_USB_RM_TIMEOUT_MSEC 10000
+#define IPA_USB_DEV_READY_TIMEOUT_MSEC 10000
+
+#define IPA_HOLB_TMR_EN 0x1
+
+/* GSI channels weights */
+#define IPA_USB_DL_CHAN_LOW_WEIGHT 0x5
+#define IPA_USB_UL_CHAN_LOW_WEIGHT 0x4
+
+#define IPA_USB_MAX_MSG_LEN 4096
+
+#define IPA_USB_DRV_NAME "ipa_usb"
+
+#define IPA_USB_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_USB_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_USB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_USB_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_USB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_USB_INFO(fmt, args...) \
+	do { \
+		pr_info(IPA_USB_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_USB_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+struct ipa_usb_xdci_connect_params_internal {
+	enum ipa_usb_max_usb_packet_size max_pkt_size;
+	u32 ipa_to_usb_clnt_hdl;
+	u8 ipa_to_usb_xferrscidx;
+	bool ipa_to_usb_xferrscidx_valid;
+	u32 usb_to_ipa_clnt_hdl;
+	u8 usb_to_ipa_xferrscidx;
+	bool usb_to_ipa_xferrscidx_valid;
+	enum ipa_usb_teth_prot teth_prot;
+	struct ipa_usb_teth_prot_params teth_prot_params;
+	u32 max_supported_bandwidth_mbps;
+};
+
+enum ipa3_usb_teth_prot_state {
+	IPA_USB_TETH_PROT_INITIALIZED,
+	IPA_USB_TETH_PROT_CONNECTED,
+	IPA_USB_TETH_PROT_INVALID
+};
+
+struct ipa3_usb_teth_prot_context {
+	union {
+		struct ipa_usb_init_params rndis;
+		struct ecm_ipa_params ecm;
+		struct teth_bridge_init_params teth_bridge;
+	} teth_prot_params;
+	enum ipa3_usb_teth_prot_state state;
+	void *user_data;
+};
+
+enum ipa3_usb_cons_state {
+	IPA_USB_CONS_GRANTED,
+	IPA_USB_CONS_RELEASED
+};
+
+struct ipa3_usb_rm_context {
+	struct ipa_rm_create_params prod_params;
+	struct ipa_rm_create_params cons_params;
+	bool prod_valid;
+	bool cons_valid;
+	struct completion prod_comp;
+	enum ipa3_usb_cons_state cons_state;
+	/* consumer was requested*/
+	bool cons_requested;
+	/* consumer was requested and released before it was granted*/
+	bool cons_requested_released;
+};
+
+enum ipa3_usb_state {
+	IPA_USB_INVALID,
+	IPA_USB_INITIALIZED,
+	IPA_USB_CONNECTED,
+	IPA_USB_STOPPED,
+	IPA_USB_SUSPEND_REQUESTED,
+	IPA_USB_SUSPEND_IN_PROGRESS,
+	IPA_USB_SUSPENDED,
+	IPA_USB_RESUME_IN_PROGRESS
+};
+
+enum ipa3_usb_transport_type {
+	IPA_USB_TRANSPORT_TETH,
+	IPA_USB_TRANSPORT_DPL,
+	IPA_USB_TRANSPORT_MAX
+};
+
+/* Get transport type from tethering protocol */
+#define IPA3_USB_GET_TTYPE(__teth_prot) \
+	(((__teth_prot) == IPA_USB_DIAG) ? \
+	IPA_USB_TRANSPORT_DPL : IPA_USB_TRANSPORT_TETH)
+
+/* Does the given transport type is DPL? */
+#define IPA3_USB_IS_TTYPE_DPL(__ttype) \
+	((__ttype) == IPA_USB_TRANSPORT_DPL)
+
+struct finish_suspend_work_context {
+	struct work_struct work;
+	enum ipa3_usb_transport_type ttype;
+	u32 dl_clnt_hdl;
+	u32 ul_clnt_hdl;
+};
+
+/**
+ * Transport type - could be either data tethering or DPL
+ * Each transport has it's own RM resources and statuses
+ */
+struct ipa3_usb_transport_type_ctx {
+	struct ipa3_usb_rm_context rm_ctx;
+	int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event, void *user_data);
+	void *user_data;
+	enum ipa3_usb_state state;
+	struct finish_suspend_work_context finish_suspend_work;
+	struct ipa_usb_xdci_chan_params ch_params;
+};
+
+struct ipa3_usb_smmu_reg_map {
+	int cnt;
+	phys_addr_t addr;
+};
+
+struct ipa3_usb_context {
+	struct ipa3_usb_teth_prot_context
+		teth_prot_ctx[IPA_USB_MAX_TETH_PROT_SIZE];
+	int num_init_prot; /* without dpl */
+	struct teth_bridge_init_params teth_bridge_params;
+	struct completion dev_ready_comp;
+	u32 qmi_req_id;
+	spinlock_t state_lock;
+	bool dl_data_pending;
+	struct workqueue_struct *wq;
+	struct mutex general_mutex;
+	struct ipa3_usb_transport_type_ctx
+		ttype_ctx[IPA_USB_TRANSPORT_MAX];
+	struct dentry *dfile_state_info;
+	struct dentry *dent;
+	struct ipa3_usb_smmu_reg_map smmu_reg_map;
+};
+
+enum ipa3_usb_op {
+	IPA_USB_INIT_TETH_PROT,
+	IPA_USB_REQUEST_CHANNEL,
+	IPA_USB_CONNECT,
+	IPA_USB_DISCONNECT,
+	IPA_USB_RELEASE_CHANNEL,
+	IPA_USB_DEINIT_TETH_PROT,
+	IPA_USB_SUSPEND,
+	IPA_USB_RESUME
+};
+
+struct ipa3_usb_status_dbg_info {
+	const char *teth_state;
+	const char *dpl_state;
+	int num_init_prot;
+	const char *inited_prots[IPA_USB_MAX_TETH_PROT_SIZE];
+	const char *teth_connected_prot;
+	const char *dpl_connected_prot;
+	const char *teth_cons_state;
+	const char *dpl_cons_state;
+};
+
+static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work);
+static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work);
+static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work);
+static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work);
+static DECLARE_WORK(ipa3_usb_notify_remote_wakeup_work,
+	ipa3_usb_wq_notify_remote_wakeup);
+static DECLARE_WORK(ipa3_usb_dpl_notify_remote_wakeup_work,
+	ipa3_usb_wq_dpl_notify_remote_wakeup);
+static DECLARE_WORK(ipa3_usb_notify_suspend_completed_work,
+	ipa3_usb_wq_notify_suspend_completed);
+static DECLARE_WORK(ipa3_usb_dpl_notify_suspend_completed_work,
+	ipa3_usb_wq_dpl_notify_suspend_completed);
+
+struct ipa3_usb_context *ipa3_usb_ctx;
+
+static char *ipa3_usb_op_to_string(enum ipa3_usb_op op)
+{
+	switch (op) {
+	case IPA_USB_INIT_TETH_PROT:
+		return "IPA_USB_INIT_TETH_PROT";
+	case IPA_USB_REQUEST_CHANNEL:
+		return "IPA_USB_REQUEST_CHANNEL";
+	case IPA_USB_CONNECT:
+		return "IPA_USB_CONNECT";
+	case IPA_USB_DISCONNECT:
+		return "IPA_USB_DISCONNECT";
+	case IPA_USB_RELEASE_CHANNEL:
+		return "IPA_USB_RELEASE_CHANNEL";
+	case IPA_USB_DEINIT_TETH_PROT:
+		return "IPA_USB_DEINIT_TETH_PROT";
+	case IPA_USB_SUSPEND:
+		return "IPA_USB_SUSPEND";
+	case IPA_USB_RESUME:
+		return "IPA_USB_RESUME";
+	}
+
+	return "UNSUPPORTED";
+}
+
+static char *ipa3_usb_state_to_string(enum ipa3_usb_state state)
+{
+	switch (state) {
+	case IPA_USB_INVALID:
+		return "IPA_USB_INVALID";
+	case IPA_USB_INITIALIZED:
+		return "IPA_USB_INITIALIZED";
+	case IPA_USB_CONNECTED:
+		return "IPA_USB_CONNECTED";
+	case IPA_USB_STOPPED:
+		return "IPA_USB_STOPPED";
+	case IPA_USB_SUSPEND_REQUESTED:
+		return "IPA_USB_SUSPEND_REQUESTED";
+	case IPA_USB_SUSPEND_IN_PROGRESS:
+		return "IPA_USB_SUSPEND_IN_PROGRESS";
+	case IPA_USB_SUSPENDED:
+		return "IPA_USB_SUSPENDED";
+	case IPA_USB_RESUME_IN_PROGRESS:
+		return "IPA_USB_RESUME_IN_PROGRESS";
+	}
+
+	return "UNSUPPORTED";
+}
+
+static char *ipa3_usb_notify_event_to_string(enum ipa_usb_notify_event event)
+{
+	switch (event) {
+	case IPA_USB_DEVICE_READY:
+		return "IPA_USB_DEVICE_READY";
+	case IPA_USB_REMOTE_WAKEUP:
+		return "IPA_USB_REMOTE_WAKEUP";
+	case IPA_USB_SUSPEND_COMPLETED:
+		return "IPA_USB_SUSPEND_COMPLETED";
+	}
+
+	return "UNSUPPORTED";
+}
+
+static bool ipa3_usb_set_state(enum ipa3_usb_state new_state, bool err_permit,
+	enum ipa3_usb_transport_type ttype)
+{
+	unsigned long flags;
+	int state_legal = false;
+	enum ipa3_usb_state state;
+	struct ipa3_usb_rm_context *rm_ctx;
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+	switch (new_state) {
+	case IPA_USB_INVALID:
+		if (state == IPA_USB_INITIALIZED)
+			state_legal = true;
+		break;
+	case IPA_USB_INITIALIZED:
+		if (state == IPA_USB_STOPPED || state == IPA_USB_INVALID ||
+			((!IPA3_USB_IS_TTYPE_DPL(ttype)) &&
+			(state == IPA_USB_INITIALIZED)))
+			state_legal = true;
+		break;
+	case IPA_USB_CONNECTED:
+		if (state == IPA_USB_INITIALIZED ||
+			state == IPA_USB_STOPPED ||
+			state == IPA_USB_RESUME_IN_PROGRESS ||
+			/*
+			 * In case of failure during suspend request
+			 * handling, state is reverted to connected.
+			 */
+			(err_permit && state == IPA_USB_SUSPEND_REQUESTED) ||
+			/*
+			 * In case of failure during suspend completing
+			 * handling, state is reverted to connected.
+			 */
+			(err_permit && state == IPA_USB_SUSPEND_IN_PROGRESS))
+			state_legal = true;
+		break;
+	case IPA_USB_STOPPED:
+		if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
+			state == IPA_USB_CONNECTED ||
+			state == IPA_USB_SUSPENDED)
+			state_legal = true;
+		break;
+	case IPA_USB_SUSPEND_REQUESTED:
+		if (state == IPA_USB_CONNECTED)
+			state_legal = true;
+		break;
+	case IPA_USB_SUSPEND_IN_PROGRESS:
+		if (state == IPA_USB_SUSPEND_REQUESTED ||
+			/*
+			 * In case of failure during resume, state is reverted
+			 * to original, which could be suspend_in_progress.
+			 * Allow it.
+			 */
+			(err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
+			state_legal = true;
+		break;
+	case IPA_USB_SUSPENDED:
+		if (state == IPA_USB_SUSPEND_REQUESTED ||
+			state == IPA_USB_SUSPEND_IN_PROGRESS ||
+			/*
+			 * In case of failure during resume, state is reverted
+			 * to original, which could be suspended. Allow it
+			 */
+			(err_permit && state == IPA_USB_RESUME_IN_PROGRESS))
+			state_legal = true;
+		break;
+	case IPA_USB_RESUME_IN_PROGRESS:
+		if (state == IPA_USB_SUSPEND_IN_PROGRESS ||
+			state == IPA_USB_SUSPENDED)
+			state_legal = true;
+		break;
+	default:
+		state_legal = false;
+		break;
+
+	}
+	if (state_legal) {
+		if (state != new_state) {
+			IPA_USB_DBG("ipa_usb %s state changed %s -> %s\n",
+				IPA3_USB_IS_TTYPE_DPL(ttype) ? "DPL" : "",
+				ipa3_usb_state_to_string(state),
+				ipa3_usb_state_to_string(new_state));
+			ipa3_usb_ctx->ttype_ctx[ttype].state = new_state;
+		}
+	} else {
+		IPA_USB_ERR("invalid state change %s -> %s\n",
+			ipa3_usb_state_to_string(state),
+			ipa3_usb_state_to_string(new_state));
+	}
+
+	if (state_legal && (new_state == IPA_USB_CONNECTED)) {
+		rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+		if ((rm_ctx->cons_state == IPA_USB_CONS_GRANTED) ||
+			rm_ctx->cons_requested_released) {
+			rm_ctx->cons_requested = false;
+			rm_ctx->cons_requested_released =
+			false;
+		}
+		/* Notify RM that consumer is granted */
+		if (rm_ctx->cons_requested) {
+			ipa_rm_notify_completion(
+				IPA_RM_RESOURCE_GRANTED,
+				rm_ctx->cons_params.name);
+			rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
+			rm_ctx->cons_requested = false;
+		}
+	}
+
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	return state_legal;
+}
+
+static bool ipa3_usb_check_legal_op(enum ipa3_usb_op op,
+	enum ipa3_usb_transport_type ttype)
+{
+	unsigned long flags;
+	bool is_legal = false;
+	enum ipa3_usb_state state;
+	bool is_dpl;
+
+	if (ipa3_usb_ctx == NULL) {
+		IPA_USB_ERR("ipa_usb_ctx is not initialized!\n");
+		return false;
+	}
+
+	is_dpl = IPA3_USB_IS_TTYPE_DPL(ttype);
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+	switch (op) {
+	case IPA_USB_INIT_TETH_PROT:
+		if (state == IPA_USB_INVALID ||
+			(!is_dpl && state == IPA_USB_INITIALIZED))
+			is_legal = true;
+		break;
+	case IPA_USB_REQUEST_CHANNEL:
+		if (state == IPA_USB_INITIALIZED)
+			is_legal = true;
+		break;
+	case IPA_USB_CONNECT:
+		if (state == IPA_USB_INITIALIZED || state == IPA_USB_STOPPED)
+			is_legal = true;
+		break;
+	case IPA_USB_DISCONNECT:
+		if  (state == IPA_USB_CONNECTED ||
+			state == IPA_USB_SUSPEND_IN_PROGRESS ||
+			state == IPA_USB_SUSPENDED)
+			is_legal = true;
+		break;
+	case IPA_USB_RELEASE_CHANNEL:
+		/* when releasing 1st channel state will be changed already */
+		if (state == IPA_USB_STOPPED ||
+			(!is_dpl && state == IPA_USB_INITIALIZED))
+			is_legal = true;
+		break;
+	case IPA_USB_DEINIT_TETH_PROT:
+		/*
+		 * For data tethering we should allow deinit an inited protocol
+		 * always. E.g. rmnet is inited and rndis is connected.
+		 * USB can deinit rmnet first and then disconnect rndis
+		 * on cable disconnect.
+		 */
+		if (!is_dpl || state == IPA_USB_INITIALIZED)
+			is_legal = true;
+		break;
+	case IPA_USB_SUSPEND:
+		if (state == IPA_USB_CONNECTED)
+			is_legal = true;
+		break;
+	case IPA_USB_RESUME:
+		if (state == IPA_USB_SUSPENDED ||
+			state == IPA_USB_SUSPEND_IN_PROGRESS)
+			is_legal = true;
+		break;
+	default:
+		is_legal = false;
+		break;
+	}
+
+	if (!is_legal) {
+		IPA_USB_ERR("Illegal %s operation: state=%s operation=%s\n",
+			is_dpl ? "DPL" : "",
+			ipa3_usb_state_to_string(state),
+			ipa3_usb_op_to_string(op));
+	}
+
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	return is_legal;
+}
+
+static void ipa3_usb_notify_do(enum ipa3_usb_transport_type ttype,
+	enum ipa_usb_notify_event event)
+{
+	int (*cb)(enum ipa_usb_notify_event, void *user_data);
+	void *user_data;
+	int res;
+
+	IPA_USB_DBG("Trying to notify USB with %s\n",
+		ipa3_usb_notify_event_to_string(event));
+
+	cb = ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb;
+	user_data = ipa3_usb_ctx->ttype_ctx[ttype].user_data;
+
+	if (cb) {
+		res = cb(event, user_data);
+		IPA_USB_DBG("Notified USB with %s. is_dpl=%d result=%d\n",
+			ipa3_usb_notify_event_to_string(event),
+			IPA3_USB_IS_TTYPE_DPL(ttype), res);
+	}
+}
+
+/*
+ * This call-back is called from ECM or RNDIS drivers.
+ * Both drivers are data tethering drivers and not DPL
+ */
+void ipa3_usb_device_ready_notify_cb(void)
+{
+	IPA_USB_DBG_LOW("entry\n");
+	ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH,
+		IPA_USB_DEVICE_READY);
+	IPA_USB_DBG_LOW("exit\n");
+}
+
+static void ipa3_usb_prod_notify_cb_do(enum ipa_rm_event event,
+		enum  ipa3_usb_transport_type ttype)
+{
+	struct ipa3_usb_rm_context *rm_ctx;
+
+	IPA_USB_DBG_LOW("entry\n");
+
+	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		IPA_USB_DBG(":%s granted\n",
+			ipa_rm_resource_str(rm_ctx->prod_params.name));
+		complete_all(&rm_ctx->prod_comp);
+		break;
+	case IPA_RM_RESOURCE_RELEASED:
+		IPA_USB_DBG(":%s released\n",
+			ipa_rm_resource_str(rm_ctx->prod_params.name));
+		complete_all(&rm_ctx->prod_comp);
+		break;
+	}
+	IPA_USB_DBG_LOW("exit\n");
+}
+
+static void ipa3_usb_prod_notify_cb(void *user_data, enum ipa_rm_event event,
+			     unsigned long data)
+{
+	ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
+}
+
+static void ipa3_usb_dpl_dummy_prod_notify_cb(void *user_data,
+		enum ipa_rm_event event, unsigned long data)
+{
+	ipa3_usb_prod_notify_cb_do(event, IPA_USB_TRANSPORT_TETH);
+}
+
+static void ipa3_usb_wq_notify_remote_wakeup(struct work_struct *work)
+{
+	ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_REMOTE_WAKEUP);
+}
+
+static void ipa3_usb_wq_dpl_notify_remote_wakeup(struct work_struct *work)
+{
+	ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_REMOTE_WAKEUP);
+}
+
+static void ipa3_usb_wq_notify_suspend_completed(struct work_struct *work)
+{
+	ipa3_usb_notify_do(IPA_USB_TRANSPORT_TETH, IPA_USB_SUSPEND_COMPLETED);
+}
+
+static void ipa3_usb_wq_dpl_notify_suspend_completed(struct work_struct *work)
+{
+	ipa3_usb_notify_do(IPA_USB_TRANSPORT_DPL, IPA_USB_SUSPEND_COMPLETED);
+}
+
+static void ipa3_usb_wq_finish_suspend_work(struct work_struct *work)
+{
+	struct finish_suspend_work_context *finish_suspend_work_ctx;
+	unsigned long flags;
+	int result = -EFAULT;
+	struct ipa3_usb_transport_type_ctx *tctx;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+	finish_suspend_work_ctx = container_of(work,
+		struct finish_suspend_work_context, work);
+	tctx = &ipa3_usb_ctx->ttype_ctx[finish_suspend_work_ctx->ttype];
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	if (tctx->state != IPA_USB_SUSPEND_IN_PROGRESS) {
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+		mutex_unlock(&ipa3_usb_ctx->general_mutex);
+		return;
+	}
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+	/* Stop DL/DPL channel */
+	result = ipa3_stop_gsi_channel(finish_suspend_work_ctx->dl_clnt_hdl);
+	if (result) {
+		IPAERR("Error stopping DL/DPL channel: %d, resuming channel\n",
+			result);
+		ipa3_xdci_resume(finish_suspend_work_ctx->ul_clnt_hdl,
+			finish_suspend_work_ctx->dl_clnt_hdl,
+			IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype));
+		/* Change state back to CONNECTED */
+		if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true,
+			finish_suspend_work_ctx->ttype))
+			IPA_USB_ERR("failed to change state to connected\n");
+		queue_work(ipa3_usb_ctx->wq,
+			IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
+			&ipa3_usb_dpl_notify_remote_wakeup_work :
+			&ipa3_usb_notify_remote_wakeup_work);
+		mutex_unlock(&ipa3_usb_ctx->general_mutex);
+		return;
+	}
+
+	/* Change ipa_usb state to SUSPENDED */
+	if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false,
+		finish_suspend_work_ctx->ttype))
+		IPA_USB_ERR("failed to change state to suspended\n");
+
+	queue_work(ipa3_usb_ctx->wq,
+		IPA3_USB_IS_TTYPE_DPL(finish_suspend_work_ctx->ttype) ?
+		&ipa3_usb_dpl_notify_suspend_completed_work :
+		&ipa3_usb_notify_suspend_completed_work);
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+}
+
+static int ipa3_usb_cons_request_resource_cb_do(
+	enum ipa3_usb_transport_type ttype,
+	struct work_struct *remote_wakeup_work)
+{
+	struct ipa3_usb_rm_context *rm_ctx;
+	unsigned long flags;
+	int result;
+
+	IPA_USB_DBG_LOW("entry\n");
+	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	IPA_USB_DBG("state is %s\n",
+			ipa3_usb_state_to_string(
+				ipa3_usb_ctx->ttype_ctx[ttype].state));
+	switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
+	case IPA_USB_CONNECTED:
+		rm_ctx->cons_state = IPA_USB_CONS_GRANTED;
+		result = 0;
+		break;
+	case IPA_USB_SUSPEND_REQUESTED:
+		rm_ctx->cons_requested = true;
+		if (rm_ctx->cons_state == IPA_USB_CONS_GRANTED)
+			result = 0;
+		else
+			result = -EINPROGRESS;
+		break;
+	case IPA_USB_SUSPEND_IN_PROGRESS:
+		/*
+		 * This case happens due to suspend interrupt.
+		 * CONS is granted
+		 */
+		if (!rm_ctx->cons_requested) {
+			rm_ctx->cons_requested = true;
+			queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
+		}
+		result = 0;
+		break;
+	case IPA_USB_SUSPENDED:
+		if (!rm_ctx->cons_requested) {
+			rm_ctx->cons_requested = true;
+			queue_work(ipa3_usb_ctx->wq, remote_wakeup_work);
+		}
+		result = -EINPROGRESS;
+		break;
+	default:
+		rm_ctx->cons_requested = true;
+		result = -EINPROGRESS;
+		break;
+	}
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	IPA_USB_DBG_LOW("exit with %d\n", result);
+	return result;
+}
+
+static int ipa3_usb_cons_request_resource_cb(void)
+{
+	return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_TETH,
+		&ipa3_usb_notify_remote_wakeup_work);
+}
+
+static int ipa3_usb_dpl_cons_request_resource_cb(void)
+{
+	return ipa3_usb_cons_request_resource_cb_do(IPA_USB_TRANSPORT_DPL,
+		&ipa3_usb_dpl_notify_remote_wakeup_work);
+}
+
+static int ipa3_usb_cons_release_resource_cb_do(
+	enum ipa3_usb_transport_type ttype)
+{
+	unsigned long flags;
+	struct ipa3_usb_rm_context *rm_ctx;
+
+	IPA_USB_DBG_LOW("entry\n");
+	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	IPA_USB_DBG("state is %s\n",
+			ipa3_usb_state_to_string(
+			ipa3_usb_ctx->ttype_ctx[ttype].state));
+	switch (ipa3_usb_ctx->ttype_ctx[ttype].state) {
+	case IPA_USB_SUSPEND_IN_PROGRESS:
+		/* Proceed with the suspend if no DL/DPL data */
+		if (rm_ctx->cons_requested)
+			rm_ctx->cons_requested_released = true;
+		else {
+			queue_work(ipa3_usb_ctx->wq,
+				&ipa3_usb_ctx->ttype_ctx[ttype].
+				finish_suspend_work.work);
+		}
+		break;
+	case IPA_USB_SUSPEND_REQUESTED:
+		if (rm_ctx->cons_requested)
+			rm_ctx->cons_requested_released = true;
+		break;
+	case IPA_USB_STOPPED:
+	case IPA_USB_RESUME_IN_PROGRESS:
+		if (rm_ctx->cons_requested)
+			rm_ctx->cons_requested = false;
+		break;
+	case IPA_USB_CONNECTED:
+	case IPA_USB_INITIALIZED:
+		break;
+	default:
+		IPA_USB_ERR("received cons_release_cb in bad state: %s!\n",
+			ipa3_usb_state_to_string(
+				ipa3_usb_ctx->ttype_ctx[ttype].state));
+		WARN_ON(1);
+		break;
+	}
+
+	rm_ctx->cons_state = IPA_USB_CONS_RELEASED;
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	IPA_USB_DBG_LOW("exit\n");
+	return 0;
+}
+
+static int ipa3_usb_cons_release_resource_cb(void)
+{
+	return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_TETH);
+}
+
+static int ipa3_usb_dpl_cons_release_resource_cb(void)
+{
+	return ipa3_usb_cons_release_resource_cb_do(IPA_USB_TRANSPORT_DPL);
+}
+
+static char *ipa3_usb_teth_prot_to_string(enum ipa_usb_teth_prot teth_prot)
+{
+	switch (teth_prot) {
+	case IPA_USB_RNDIS:
+		return "rndis_ipa";
+	case IPA_USB_ECM:
+		return "ecm_ipa";
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		return "teth_bridge";
+	case IPA_USB_DIAG:
+		return "dpl";
+	default:
+		break;
+	}
+
+	return "unsupported";
+}
+
+static char *ipa3_usb_teth_bridge_prot_to_string(
+	enum ipa_usb_teth_prot teth_prot)
+{
+	switch (teth_prot) {
+	case IPA_USB_RMNET:
+		return "rmnet";
+	case IPA_USB_MBIM:
+		return "mbim";
+	default:
+		break;
+	}
+
+	return "unsupported";
+}
+
+static int ipa3_usb_init_teth_bridge(void)
+{
+	int result;
+
+	result = teth_bridge_init(&ipa3_usb_ctx->teth_bridge_params);
+	if (result) {
+		IPA_USB_ERR("Failed to initialize teth_bridge.\n");
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_create_rm_resources(enum ipa3_usb_transport_type ttype)
+{
+	struct ipa3_usb_rm_context *rm_ctx;
+	int result = -EFAULT;
+	bool created = false;
+
+	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+
+	/* create PROD */
+	if (!rm_ctx->prod_valid) {
+		rm_ctx->prod_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
+			IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD :
+			IPA_RM_RESOURCE_USB_PROD;
+		rm_ctx->prod_params.floor_voltage = IPA_VOLTAGE_SVS;
+		rm_ctx->prod_params.reg_params.user_data = NULL;
+		rm_ctx->prod_params.reg_params.notify_cb =
+			IPA3_USB_IS_TTYPE_DPL(ttype) ?
+			ipa3_usb_dpl_dummy_prod_notify_cb :
+			ipa3_usb_prod_notify_cb;
+		rm_ctx->prod_params.request_resource = NULL;
+		rm_ctx->prod_params.release_resource = NULL;
+		result = ipa_rm_create_resource(&rm_ctx->prod_params);
+		if (result) {
+			IPA_USB_ERR("Failed to create %s RM resource\n",
+				ipa_rm_resource_str(rm_ctx->prod_params.name));
+			return result;
+		}
+		rm_ctx->prod_valid = true;
+		created = true;
+		IPA_USB_DBG("Created %s RM resource\n",
+			ipa_rm_resource_str(rm_ctx->prod_params.name));
+	}
+
+	/* Create CONS */
+	if (!rm_ctx->cons_valid) {
+		rm_ctx->cons_params.name = IPA3_USB_IS_TTYPE_DPL(ttype) ?
+			IPA_RM_RESOURCE_USB_DPL_CONS :
+			IPA_RM_RESOURCE_USB_CONS;
+		rm_ctx->cons_params.floor_voltage = IPA_VOLTAGE_SVS;
+		rm_ctx->cons_params.reg_params.user_data = NULL;
+		rm_ctx->cons_params.reg_params.notify_cb = NULL;
+		rm_ctx->cons_params.request_resource =
+			IPA3_USB_IS_TTYPE_DPL(ttype) ?
+			ipa3_usb_dpl_cons_request_resource_cb :
+			ipa3_usb_cons_request_resource_cb;
+		rm_ctx->cons_params.release_resource =
+			IPA3_USB_IS_TTYPE_DPL(ttype) ?
+			ipa3_usb_dpl_cons_release_resource_cb :
+			ipa3_usb_cons_release_resource_cb;
+		result = ipa_rm_create_resource(&rm_ctx->cons_params);
+		if (result) {
+			IPA_USB_ERR("Failed to create %s RM resource\n",
+				ipa_rm_resource_str(rm_ctx->cons_params.name));
+			goto create_cons_rsc_fail;
+		}
+		rm_ctx->cons_valid = true;
+		IPA_USB_DBG("Created %s RM resource\n",
+			ipa_rm_resource_str(rm_ctx->cons_params.name));
+	}
+
+	return 0;
+
+create_cons_rsc_fail:
+	if (created) {
+		rm_ctx->prod_valid = false;
+		ipa_rm_delete_resource(rm_ctx->prod_params.name);
+	}
+	return result;
+}
+
+int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+			   struct ipa_usb_teth_params *teth_params,
+			   int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+			   void *),
+			   void *user_data)
+{
+	int result = -EFAULT;
+	enum ipa3_usb_transport_type ttype;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+	if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE ||
+		((teth_prot == IPA_USB_RNDIS || teth_prot == IPA_USB_ECM) &&
+		teth_params == NULL) || ipa_usb_notify_cb == NULL ||
+		user_data == NULL) {
+		IPA_USB_ERR("bad parameters.\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_INIT_TETH_PROT, ttype)) {
+		IPA_USB_ERR("Illegal operation.\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	/* Create IPA RM USB resources */
+	result = ipa3_usb_create_rm_resources(ttype);
+	if (result) {
+		IPA_USB_ERR("Failed creating IPA RM USB resources\n");
+		goto bad_params;
+	}
+
+	if (!ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb) {
+		ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb =
+			ipa_usb_notify_cb;
+	} else if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		if (ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb !=
+			ipa_usb_notify_cb) {
+			IPA_USB_ERR("Got different notify_cb\n");
+			result = -EINVAL;
+			goto bad_params;
+		}
+	} else {
+		IPA_USB_ERR("Already has dpl_notify_cb\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	/* Initialize tethering protocol */
+	switch (teth_prot) {
+	case IPA_USB_RNDIS:
+	case IPA_USB_ECM:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_DBG("%s already initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			result = -EPERM;
+			goto bad_params;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+		if (teth_prot == IPA_USB_RNDIS) {
+			ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.rndis.device_ready_notify =
+				ipa3_usb_device_ready_notify_cb;
+			memcpy(ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.rndis.host_ethaddr,
+				teth_params->host_ethaddr,
+				sizeof(teth_params->host_ethaddr));
+			memcpy(ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.rndis.device_ethaddr,
+				teth_params->device_ethaddr,
+				sizeof(teth_params->device_ethaddr));
+
+			result = rndis_ipa_init(&ipa3_usb_ctx->
+				teth_prot_ctx[teth_prot].
+				teth_prot_params.rndis);
+			if (result) {
+				IPA_USB_ERR("Failed to initialize %s\n",
+					ipa3_usb_teth_prot_to_string(
+					teth_prot));
+				goto teth_prot_init_fail;
+			}
+		} else {
+			ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.ecm.device_ready_notify =
+				ipa3_usb_device_ready_notify_cb;
+			memcpy(ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.ecm.host_ethaddr,
+				teth_params->host_ethaddr,
+				sizeof(teth_params->host_ethaddr));
+			memcpy(ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.ecm.device_ethaddr,
+				teth_params->device_ethaddr,
+				sizeof(teth_params->device_ethaddr));
+
+			result = ecm_ipa_init(&ipa3_usb_ctx->
+				teth_prot_ctx[teth_prot].teth_prot_params.ecm);
+			if (result) {
+				IPA_USB_ERR("Failed to initialize %s\n",
+					ipa3_usb_teth_prot_to_string(
+					teth_prot));
+				goto teth_prot_init_fail;
+			}
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INITIALIZED;
+		ipa3_usb_ctx->num_init_prot++;
+		IPA_USB_DBG("initialized %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_DBG("%s already initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			result = -EPERM;
+			goto bad_params;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+		result = ipa3_usb_init_teth_bridge();
+		if (result)
+			goto teth_prot_init_fail;
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INITIALIZED;
+		ipa3_usb_ctx->num_init_prot++;
+		IPA_USB_DBG("initialized %s %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot),
+			ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_DIAG:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_DBG("DPL already initialized\n");
+			result = -EPERM;
+			goto bad_params;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = user_data;
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INITIALIZED;
+		IPA_USB_DBG("initialized DPL\n");
+		break;
+	default:
+		IPA_USB_ERR("unexpected tethering protocol\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
+		IPA_USB_ERR("failed to change state to initialized\n");
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+teth_prot_init_fail:
+	if ((IPA3_USB_IS_TTYPE_DPL(ttype))
+		|| (ipa3_usb_ctx->num_init_prot == 0)) {
+		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false;
+		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false;
+		ipa_rm_delete_resource(
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
+		ipa_rm_delete_resource(
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
+	}
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_init_teth_prot);
+
+void ipa3_usb_gsi_evt_err_cb(struct gsi_evt_err_notify *notify)
+{
+	IPA_USB_DBG_LOW("entry\n");
+	if (!notify)
+		return;
+	IPA_USB_ERR("Received event error %d, description: %d\n",
+		notify->evt_id, notify->err_desc);
+	IPA_USB_DBG_LOW("exit\n");
+}
+
+void ipa3_usb_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	IPA_USB_DBG_LOW("entry\n");
+	if (!notify)
+		return;
+	IPA_USB_ERR("Received channel error %d, description: %d\n",
+		notify->evt_id, notify->err_desc);
+	IPA_USB_DBG_LOW("exit\n");
+}
+
+static bool ipa3_usb_check_chan_params(struct ipa_usb_xdci_chan_params *params)
+{
+	IPA_USB_DBG_LOW("gevntcount_low_addr = %x\n",
+			params->gevntcount_low_addr);
+	IPA_USB_DBG_LOW("gevntcount_hi_addr = %x\n",
+			params->gevntcount_hi_addr);
+	IPA_USB_DBG_LOW("dir = %d\n", params->dir);
+	IPA_USB_DBG_LOW("xfer_ring_len = %d\n", params->xfer_ring_len);
+	IPA_USB_DBG_LOW("xfer_ring_base_addr = %llx\n",
+		params->xfer_ring_base_addr);
+	IPA_USB_DBG_LOW("last_trb_addr_iova = %x\n",
+		params->xfer_scratch.last_trb_addr_iova);
+	IPA_USB_DBG_LOW("const_buffer_size = %d\n",
+		params->xfer_scratch.const_buffer_size);
+	IPA_USB_DBG_LOW("depcmd_low_addr = %x\n",
+		params->xfer_scratch.depcmd_low_addr);
+	IPA_USB_DBG_LOW("depcmd_hi_addr = %x\n",
+		params->xfer_scratch.depcmd_hi_addr);
+
+	if (params->client >= IPA_CLIENT_MAX  ||
+		params->teth_prot > IPA_USB_MAX_TETH_PROT_SIZE ||
+		params->xfer_ring_len % GSI_CHAN_RE_SIZE_16B ||
+		params->xfer_scratch.const_buffer_size < 1 ||
+		params->xfer_scratch.const_buffer_size > 31) {
+		IPA_USB_ERR("Invalid params\n");
+		return false;
+	}
+	switch (params->teth_prot) {
+	case IPA_USB_DIAG:
+		if (!IPA_CLIENT_IS_CONS(params->client)) {
+			IPA_USB_ERR("DPL supports only DL channel\n");
+			return false;
+		}
+	case IPA_USB_RNDIS:
+	case IPA_USB_ECM:
+		if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_ERR("%s is not initialized\n",
+				ipa3_usb_teth_prot_to_string(
+				params->teth_prot));
+			return false;
+		}
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+			IPA_USB_TETH_PROT_INVALID) {
+			IPA_USB_ERR("%s is not initialized\n",
+				ipa3_usb_teth_bridge_prot_to_string(
+				params->teth_prot));
+			return false;
+		}
+		break;
+	default:
+		IPA_USB_ERR("Unknown tethering protocol (%d)\n",
+			params->teth_prot);
+		return false;
+	}
+	return true;
+}
+
+static int ipa3_usb_smmu_map_xdci_channel(
+	struct ipa_usb_xdci_chan_params *params, bool map)
+{
+	int result;
+	u32 gevntcount_r = rounddown(params->gevntcount_low_addr, PAGE_SIZE);
+	u32 xfer_scratch_r =
+		rounddown(params->xfer_scratch.depcmd_low_addr, PAGE_SIZE);
+
+	if (gevntcount_r != xfer_scratch_r) {
+		IPA_USB_ERR("No support more than 1 page map for USB regs\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	if (map) {
+		if (ipa3_usb_ctx->smmu_reg_map.cnt == 0) {
+			ipa3_usb_ctx->smmu_reg_map.addr = gevntcount_r;
+			result = ipa3_smmu_map_peer_reg(
+				ipa3_usb_ctx->smmu_reg_map.addr, true);
+			if (result) {
+				IPA_USB_ERR("failed to map USB regs %d\n",
+					result);
+				return result;
+			}
+		} else {
+			if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) {
+				IPA_USB_ERR(
+					"No support for map different reg\n");
+				return -EINVAL;
+			}
+		}
+		ipa3_usb_ctx->smmu_reg_map.cnt++;
+	} else {
+		if (gevntcount_r != ipa3_usb_ctx->smmu_reg_map.addr) {
+			IPA_USB_ERR(
+				"No support for map different reg\n");
+			return -EINVAL;
+		}
+
+		if (ipa3_usb_ctx->smmu_reg_map.cnt == 1) {
+			result = ipa3_smmu_map_peer_reg(
+				ipa3_usb_ctx->smmu_reg_map.addr, false);
+			if (result) {
+				IPA_USB_ERR("failed to unmap USB regs %d\n",
+					result);
+				return result;
+			}
+		}
+		ipa3_usb_ctx->smmu_reg_map.cnt--;
+	}
+
+	result = ipa3_smmu_map_peer_buff(params->xfer_ring_base_addr_iova,
+		params->xfer_ring_base_addr, params->xfer_ring_len, map);
+	if (result) {
+		IPA_USB_ERR("failed to map Xfer ring %d\n", result);
+		return result;
+	}
+
+	result = ipa3_smmu_map_peer_buff(params->data_buff_base_addr_iova,
+		params->data_buff_base_addr, params->data_buff_base_len, map);
+	if (result) {
+		IPA_USB_ERR("failed to map TRBs buff %d\n", result);
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_request_xdci_channel(
+	struct ipa_usb_xdci_chan_params *params,
+	struct ipa_req_chan_out_params *out_params)
+{
+	int result = -EFAULT;
+	struct ipa_request_gsi_channel_params chan_params;
+	enum ipa3_usb_transport_type ttype;
+
+	IPA_USB_DBG_LOW("entry\n");
+	if (params == NULL || out_params == NULL ||
+		!ipa3_usb_check_chan_params(params)) {
+		IPA_USB_ERR("bad parameters\n");
+		return -EINVAL;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(params->teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_REQUEST_CHANNEL, ttype)) {
+		IPA_USB_ERR("Illegal operation\n");
+		return -EPERM;
+	}
+
+	memset(&chan_params, 0, sizeof(struct ipa_request_gsi_channel_params));
+	memcpy(&chan_params.ipa_ep_cfg, &params->ipa_ep_cfg,
+		sizeof(struct ipa_ep_cfg));
+	chan_params.client = params->client;
+	switch (params->teth_prot) {
+	case IPA_USB_RNDIS:
+		chan_params.priv = ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+			teth_prot_params.rndis.private;
+		if (params->dir == GSI_CHAN_DIR_FROM_GSI)
+			chan_params.notify =
+				ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+				teth_prot_params.rndis.ipa_tx_notify;
+		else
+			chan_params.notify =
+				ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+				teth_prot_params.rndis.ipa_rx_notify;
+		chan_params.skip_ep_cfg =
+			ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+			teth_prot_params.rndis.skip_ep_cfg;
+		break;
+	case IPA_USB_ECM:
+		chan_params.priv = ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+			teth_prot_params.ecm.private;
+		if (params->dir == GSI_CHAN_DIR_FROM_GSI)
+			chan_params.notify =
+				ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+				teth_prot_params.ecm.ecm_ipa_tx_dp_notify;
+		else
+			chan_params.notify =
+				ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+				teth_prot_params.ecm.ecm_ipa_rx_dp_notify;
+		chan_params.skip_ep_cfg =
+			ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+			teth_prot_params.ecm.skip_ep_cfg;
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		chan_params.priv =
+			ipa3_usb_ctx->teth_bridge_params.private_data;
+		chan_params.notify =
+			ipa3_usb_ctx->teth_bridge_params.usb_notify_cb;
+		chan_params.skip_ep_cfg =
+			ipa3_usb_ctx->teth_bridge_params.skip_ep_cfg;
+		break;
+	case IPA_USB_DIAG:
+		chan_params.priv = NULL;
+		chan_params.notify = NULL;
+		chan_params.skip_ep_cfg = true;
+		break;
+	default:
+		break;
+	}
+
+	result = ipa3_usb_smmu_map_xdci_channel(params, true);
+	if (result) {
+		IPA_USB_ERR("failed to smmu map %d\n", result);
+		return result;
+	}
+
+	/* store channel params for SMMU unmap */
+	ipa3_usb_ctx->ttype_ctx[ttype].ch_params = *params;
+
+	chan_params.keep_ipa_awake = params->keep_ipa_awake;
+	chan_params.evt_ring_params.intf = GSI_EVT_CHTYPE_XDCI_EV;
+	chan_params.evt_ring_params.intr = GSI_INTR_IRQ;
+	chan_params.evt_ring_params.re_size = GSI_EVT_RING_RE_SIZE_16B;
+	chan_params.evt_ring_params.ring_len = params->xfer_ring_len -
+		chan_params.evt_ring_params.re_size;
+	chan_params.evt_ring_params.ring_base_addr =
+		params->xfer_ring_base_addr;
+	chan_params.evt_ring_params.ring_base_vaddr = NULL;
+	chan_params.evt_ring_params.int_modt = 0;
+	chan_params.evt_ring_params.int_modt = 0;
+	chan_params.evt_ring_params.intvec = 0;
+	chan_params.evt_ring_params.msi_addr = 0;
+	chan_params.evt_ring_params.rp_update_addr = 0;
+	chan_params.evt_ring_params.exclusive = true;
+	chan_params.evt_ring_params.err_cb = ipa3_usb_gsi_evt_err_cb;
+	chan_params.evt_ring_params.user_data = NULL;
+	chan_params.evt_scratch.xdci.gevntcount_low_addr =
+		params->gevntcount_low_addr;
+	chan_params.evt_scratch.xdci.gevntcount_hi_addr =
+		params->gevntcount_hi_addr;
+	chan_params.chan_params.prot = GSI_CHAN_PROT_XDCI;
+	chan_params.chan_params.dir = params->dir;
+	/* chan_id is set in ipa3_request_gsi_channel() */
+	chan_params.chan_params.re_size = GSI_CHAN_RE_SIZE_16B;
+	chan_params.chan_params.ring_len = params->xfer_ring_len;
+	chan_params.chan_params.ring_base_addr =
+		params->xfer_ring_base_addr;
+	chan_params.chan_params.ring_base_vaddr = NULL;
+	chan_params.chan_params.use_db_eng = GSI_CHAN_DB_MODE;
+	chan_params.chan_params.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	if (params->dir == GSI_CHAN_DIR_FROM_GSI)
+		chan_params.chan_params.low_weight =
+			IPA_USB_DL_CHAN_LOW_WEIGHT;
+	else
+		chan_params.chan_params.low_weight =
+			IPA_USB_UL_CHAN_LOW_WEIGHT;
+	chan_params.chan_params.xfer_cb = NULL;
+	chan_params.chan_params.err_cb = ipa3_usb_gsi_chan_err_cb;
+	chan_params.chan_params.chan_user_data = NULL;
+	chan_params.chan_scratch.xdci.last_trb_addr =
+		params->xfer_scratch.last_trb_addr_iova;
+	/* xferrscidx will be updated later */
+	chan_params.chan_scratch.xdci.xferrscidx = 0;
+	chan_params.chan_scratch.xdci.const_buffer_size =
+		params->xfer_scratch.const_buffer_size;
+	chan_params.chan_scratch.xdci.depcmd_low_addr =
+		params->xfer_scratch.depcmd_low_addr;
+	chan_params.chan_scratch.xdci.depcmd_hi_addr =
+		params->xfer_scratch.depcmd_hi_addr;
+	chan_params.chan_scratch.xdci.outstanding_threshold =
+		((params->teth_prot == IPA_USB_MBIM) ? 1 : 2) *
+		chan_params.chan_params.re_size;
+	/* max_outstanding_tre is set in ipa3_request_gsi_channel() */
+	result = ipa3_request_gsi_channel(&chan_params, out_params);
+	if (result) {
+		IPA_USB_ERR("failed to allocate GSI channel\n");
+		ipa3_usb_smmu_map_xdci_channel(params, false);
+		return result;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	return 0;
+}
+
+static int ipa3_usb_release_xdci_channel(u32 clnt_hdl,
+	enum ipa3_usb_transport_type ttype)
+{
+	int result = 0;
+
+	IPA_USB_DBG_LOW("entry\n");
+	if (ttype > IPA_USB_TRANSPORT_MAX) {
+		IPA_USB_ERR("bad parameter.\n");
+		return -EINVAL;
+	}
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_RELEASE_CHANNEL, ttype)) {
+		IPA_USB_ERR("Illegal operation.\n");
+		return -EPERM;
+	}
+
+	/* Release channel */
+	result = ipa3_release_gsi_channel(clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to deallocate channel.\n");
+		return result;
+	}
+
+	result = ipa3_usb_smmu_map_xdci_channel(
+		&ipa3_usb_ctx->ttype_ctx[ttype].ch_params, false);
+
+	/* Change ipa_usb state to INITIALIZED */
+	if (!ipa3_usb_set_state(IPA_USB_INITIALIZED, false, ttype))
+		IPA_USB_ERR("failed to change state to initialized\n");
+
+	IPA_USB_DBG_LOW("exit\n");
+	return 0;
+}
+
+static int ipa3_usb_request_prod(enum ipa3_usb_transport_type ttype)
+{
+	int result;
+	struct ipa3_usb_rm_context *rm_ctx;
+	const char *rsrc_str;
+
+	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+	rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
+
+	IPA_USB_DBG_LOW("requesting %s\n", rsrc_str);
+	init_completion(&rm_ctx->prod_comp);
+	result = ipa_rm_request_resource(rm_ctx->prod_params.name);
+	if (result) {
+		if (result != -EINPROGRESS) {
+			IPA_USB_ERR("failed to request %s: %d\n",
+				rsrc_str, result);
+			return result;
+		}
+		result = wait_for_completion_timeout(&rm_ctx->prod_comp,
+				msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
+		if (result == 0) {
+			IPA_USB_ERR("timeout request %s\n", rsrc_str);
+			return -ETIME;
+		}
+	}
+
+	IPA_USB_DBG_LOW("%s granted\n", rsrc_str);
+	return 0;
+}
+
+static int ipa3_usb_release_prod(enum ipa3_usb_transport_type ttype)
+{
+	int result;
+	struct ipa3_usb_rm_context *rm_ctx;
+	const char *rsrc_str;
+
+	rm_ctx = &ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx;
+	rsrc_str = ipa_rm_resource_str(rm_ctx->prod_params.name);
+
+	IPA_USB_DBG_LOW("releasing %s\n", rsrc_str);
+
+	init_completion(&rm_ctx->prod_comp);
+	result = ipa_rm_release_resource(rm_ctx->prod_params.name);
+	if (result) {
+		if (result != -EINPROGRESS) {
+			IPA_USB_ERR("failed to release %s: %d\n",
+				rsrc_str, result);
+			return result;
+		}
+		result = wait_for_completion_timeout(&rm_ctx->prod_comp,
+			msecs_to_jiffies(IPA_USB_RM_TIMEOUT_MSEC));
+		if (result == 0) {
+			IPA_USB_ERR("timeout release %s\n", rsrc_str);
+			return -ETIME;
+		}
+	}
+
+	IPA_USB_DBG_LOW("%s released\n", rsrc_str);
+	return 0;
+}
+
+static bool ipa3_usb_check_connect_params(
+	struct ipa_usb_xdci_connect_params_internal *params)
+{
+	IPA_USB_DBG_LOW("ul xferrscidx = %d\n", params->usb_to_ipa_xferrscidx);
+	IPA_USB_DBG_LOW("dl xferrscidx = %d\n", params->ipa_to_usb_xferrscidx);
+	IPA_USB_DBG_LOW("max_supported_bandwidth_mbps = %d\n",
+		params->max_supported_bandwidth_mbps);
+
+	if (params->max_pkt_size < IPA_USB_HIGH_SPEED_512B  ||
+		params->max_pkt_size > IPA_USB_SUPER_SPEED_1024B  ||
+		params->ipa_to_usb_xferrscidx < 0 ||
+		params->ipa_to_usb_xferrscidx > 127 ||
+		(params->teth_prot != IPA_USB_DIAG &&
+		(params->usb_to_ipa_xferrscidx < 0 ||
+		params->usb_to_ipa_xferrscidx > 127)) ||
+		params->teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("Invalid params\n");
+		return false;
+	}
+
+	if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+		IPA_USB_TETH_PROT_INVALID) {
+		IPA_USB_ERR("%s is not initialized\n",
+			ipa3_usb_teth_prot_to_string(
+			params->teth_prot));
+		return false;
+	}
+
+	return true;
+}
+
+static int ipa3_usb_connect_teth_bridge(
+	struct teth_bridge_connect_params *params)
+{
+	int result;
+
+	result = teth_bridge_connect(params);
+	if (result) {
+		IPA_USB_ERR("failed to connect teth_bridge (%s)\n",
+			params->tethering_mode == TETH_TETHERING_MODE_RMNET ?
+			"rmnet" : "mbim");
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_connect_dpl(void)
+{
+	int res = 0;
+
+	/*
+	 * Add DPL dependency to RM dependency graph, first add_dependency call
+	 * is sync in order to make sure the IPA clocks are up before we
+	 * continue and notify the USB driver it may continue.
+	 */
+	res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+				    IPA_RM_RESOURCE_Q6_CONS);
+	if (res < 0) {
+		IPA_USB_ERR("ipa_rm_add_dependency_sync() failed.\n");
+		return res;
+	}
+
+	/*
+	 * this add_dependency call can't be sync since it will block until DPL
+	 * status is connected (which can happen only later in the flow),
+	 * the clocks are already up so the call doesn't need to block.
+	 */
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+				    IPA_RM_RESOURCE_USB_DPL_CONS);
+	if (res < 0 && res != -EINPROGRESS) {
+		IPA_USB_ERR("ipa_rm_add_dependency() failed.\n");
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+				IPA_RM_RESOURCE_Q6_CONS);
+		return res;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_connect_teth_prot(
+	struct ipa_usb_xdci_connect_params_internal *params,
+	enum ipa3_usb_transport_type ttype)
+{
+	int result;
+	struct teth_bridge_connect_params teth_bridge_params;
+
+	IPA_USB_DBG("connecting protocol = %d\n",
+		params->teth_prot);
+	switch (params->teth_prot) {
+	case IPA_USB_RNDIS:
+		if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is already connected.\n",
+				ipa3_usb_teth_prot_to_string(
+				params->teth_prot));
+			break;
+		}
+		ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+			ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].user_data;
+		result = rndis_ipa_pipe_connect_notify(
+			params->usb_to_ipa_clnt_hdl,
+			params->ipa_to_usb_clnt_hdl,
+			params->teth_prot_params.max_xfer_size_bytes_to_dev,
+			params->teth_prot_params.max_packet_number_to_dev,
+			params->teth_prot_params.max_xfer_size_bytes_to_host,
+			ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].
+			teth_prot_params.rndis.private);
+		if (result) {
+			IPA_USB_ERR("failed to connect %s.\n",
+				ipa3_usb_teth_prot_to_string(
+				params->teth_prot));
+			ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+			return result;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[IPA_USB_RNDIS].state =
+			IPA_USB_TETH_PROT_CONNECTED;
+		IPA_USB_DBG("%s is connected.\n",
+			ipa3_usb_teth_prot_to_string(
+			params->teth_prot));
+		break;
+	case IPA_USB_ECM:
+		if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is already connected.\n",
+				ipa3_usb_teth_prot_to_string(
+				params->teth_prot));
+			break;
+		}
+		ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+			ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].user_data;
+		result = ecm_ipa_connect(params->usb_to_ipa_clnt_hdl,
+			params->ipa_to_usb_clnt_hdl,
+			ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].
+			teth_prot_params.ecm.private);
+		if (result) {
+			IPA_USB_ERR("failed to connect %s.\n",
+				ipa3_usb_teth_prot_to_string(
+				params->teth_prot));
+			ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+			return result;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[IPA_USB_ECM].state =
+			IPA_USB_TETH_PROT_CONNECTED;
+		IPA_USB_DBG("%s is connected.\n",
+			ipa3_usb_teth_prot_to_string(
+			params->teth_prot));
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is already connected.\n",
+				ipa3_usb_teth_prot_to_string(
+				params->teth_prot));
+			break;
+		}
+		result = ipa3_usb_init_teth_bridge();
+		if (result)
+			return result;
+
+		ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+			ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].
+			user_data;
+		teth_bridge_params.ipa_usb_pipe_hdl =
+			params->ipa_to_usb_clnt_hdl;
+		teth_bridge_params.usb_ipa_pipe_hdl =
+			params->usb_to_ipa_clnt_hdl;
+		teth_bridge_params.tethering_mode =
+			(params->teth_prot == IPA_USB_RMNET) ?
+			(TETH_TETHERING_MODE_RMNET):(TETH_TETHERING_MODE_MBIM);
+		teth_bridge_params.client_type = IPA_CLIENT_USB_PROD;
+		result = ipa3_usb_connect_teth_bridge(&teth_bridge_params);
+		if (result) {
+			ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+			return result;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].state =
+			IPA_USB_TETH_PROT_CONNECTED;
+		ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
+		IPA_USB_DBG("%s (%s) is connected.\n",
+			ipa3_usb_teth_prot_to_string(
+			params->teth_prot),
+			ipa3_usb_teth_bridge_prot_to_string(
+			params->teth_prot));
+		break;
+	case IPA_USB_DIAG:
+		if (ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is already connected.\n",
+				ipa3_usb_teth_prot_to_string(
+				params->teth_prot));
+			break;
+		}
+
+		ipa3_usb_ctx->ttype_ctx[ttype].user_data =
+			ipa3_usb_ctx->teth_prot_ctx[params->teth_prot].
+			user_data;
+		result = ipa3_usb_connect_dpl();
+		if (result) {
+			IPA_USB_ERR("Failed connecting DPL result=%d\n",
+				result);
+			ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+			return result;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[IPA_USB_DIAG].state =
+			IPA_USB_TETH_PROT_CONNECTED;
+		ipa3_usb_notify_do(ttype, IPA_USB_DEVICE_READY);
+		IPA_USB_DBG("%s is connected.\n",
+			ipa3_usb_teth_prot_to_string(
+			params->teth_prot));
+		break;
+	default:
+		IPA_USB_ERR("Invalid tethering protocol\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_disconnect_teth_bridge(void)
+{
+	int result;
+
+	result = teth_bridge_disconnect(IPA_CLIENT_USB_PROD);
+	if (result) {
+		IPA_USB_ERR("failed to disconnect teth_bridge.\n");
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_usb_disconnect_dpl(void)
+{
+	int res;
+
+	/* Remove DPL RM dependency */
+	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+				    IPA_RM_RESOURCE_Q6_CONS);
+	if (res)
+		IPA_USB_ERR("deleting DPL_DUMMY_PROD rsrc dependency fail\n");
+
+	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+				 IPA_RM_RESOURCE_USB_DPL_CONS);
+	if (res)
+		IPA_USB_ERR("deleting DPL_CONS rsrc dependencty fail\n");
+
+	return 0;
+}
+
+static int ipa3_usb_disconnect_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+	int result = 0;
+	enum ipa3_usb_transport_type ttype;
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	switch (teth_prot) {
+	case IPA_USB_RNDIS:
+	case IPA_USB_ECM:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is not connected.\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			return -EPERM;
+		}
+		if (teth_prot == IPA_USB_RNDIS) {
+			result = rndis_ipa_pipe_disconnect_notify(
+				ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.rndis.private);
+		} else {
+			result = ecm_ipa_disconnect(
+				ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.ecm.private);
+		}
+		if (result) {
+			IPA_USB_ERR("failed to disconnect %s.\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			break;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INITIALIZED;
+		IPA_USB_DBG("disconnected %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s (%s) is not connected.\n",
+				ipa3_usb_teth_prot_to_string(teth_prot),
+				ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+			return -EPERM;
+		}
+		result = ipa3_usb_disconnect_teth_bridge();
+		if (result)
+			break;
+
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INITIALIZED;
+		IPA_USB_DBG("disconnected %s (%s)\n",
+			ipa3_usb_teth_prot_to_string(teth_prot),
+			ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_DIAG:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_CONNECTED) {
+			IPA_USB_DBG("%s is not connected.\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			return -EPERM;
+		}
+		result = ipa3_usb_disconnect_dpl();
+		if (result)
+			break;
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INITIALIZED;
+		IPA_USB_DBG("disconnected %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	default:
+		break;
+	}
+
+	ipa3_usb_ctx->ttype_ctx[ttype].user_data = NULL;
+	return result;
+}
+
+static int ipa3_usb_xdci_connect_internal(
+	struct ipa_usb_xdci_connect_params_internal *params)
+{
+	int result = -EFAULT;
+	struct ipa_rm_perf_profile profile;
+	enum ipa3_usb_transport_type ttype;
+
+	IPA_USB_DBG_LOW("entry\n");
+	if (params == NULL || !ipa3_usb_check_connect_params(params)) {
+		IPA_USB_ERR("bad parameters.\n");
+		return -EINVAL;
+	}
+
+	ttype = (params->teth_prot == IPA_USB_DIAG) ? IPA_USB_TRANSPORT_DPL :
+		IPA_USB_TRANSPORT_TETH;
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_CONNECT, ttype)) {
+		IPA_USB_ERR("Illegal operation.\n");
+		return -EPERM;
+	}
+
+	/* Set EE xDCI specific scratch */
+	result = ipa3_set_usb_max_packet_size(params->max_pkt_size);
+	if (result) {
+		IPA_USB_ERR("failed setting xDCI EE scratch field\n");
+		return result;
+	}
+
+	/* Set RM PROD & CONS perf profile */
+	profile.max_supported_bandwidth_mbps =
+			params->max_supported_bandwidth_mbps;
+	result = ipa_rm_set_perf_profile(
+		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name,
+		&profile);
+	if (result) {
+		IPA_USB_ERR("failed to set %s perf profile\n",
+			ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
+				rm_ctx.prod_params.name));
+		return result;
+	}
+	result = ipa_rm_set_perf_profile(
+		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name,
+		&profile);
+	if (result) {
+		IPA_USB_ERR("failed to set %s perf profile\n",
+			ipa_rm_resource_str(ipa3_usb_ctx->ttype_ctx[ttype].
+				rm_ctx.cons_params.name));
+		return result;
+	}
+
+	/* Request PROD */
+	result = ipa3_usb_request_prod(ttype);
+	if (result)
+		return result;
+
+	if (params->teth_prot != IPA_USB_DIAG) {
+		/* Start UL channel */
+		result = ipa3_xdci_connect(params->usb_to_ipa_clnt_hdl,
+			params->usb_to_ipa_xferrscidx,
+			params->usb_to_ipa_xferrscidx_valid);
+		if (result) {
+			IPA_USB_ERR("failed to connect UL channel.\n");
+			goto connect_ul_fail;
+		}
+	}
+
+	/* Start DL/DPL channel */
+	result = ipa3_xdci_connect(params->ipa_to_usb_clnt_hdl,
+		params->ipa_to_usb_xferrscidx,
+		params->ipa_to_usb_xferrscidx_valid);
+	if (result) {
+		IPA_USB_ERR("failed to connect DL/DPL channel.\n");
+		goto connect_dl_fail;
+	}
+
+	/* Connect tethering protocol */
+	result = ipa3_usb_connect_teth_prot(params, ttype);
+	if (result) {
+		IPA_USB_ERR("failed to connect teth protocol\n");
+		goto connect_teth_prot_fail;
+	}
+
+	if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
+		IPA_USB_ERR(
+			"failed to change state to connected\n");
+		goto state_change_connected_fail;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	return 0;
+
+state_change_connected_fail:
+	ipa3_usb_disconnect_teth_prot(params->teth_prot);
+connect_teth_prot_fail:
+	ipa3_xdci_disconnect(params->ipa_to_usb_clnt_hdl, false, -1);
+	ipa3_reset_gsi_channel(params->ipa_to_usb_clnt_hdl);
+	ipa3_reset_gsi_event_ring(params->ipa_to_usb_clnt_hdl);
+connect_dl_fail:
+	if (params->teth_prot != IPA_USB_DIAG) {
+		ipa3_xdci_disconnect(params->usb_to_ipa_clnt_hdl, false, -1);
+		ipa3_reset_gsi_channel(params->usb_to_ipa_clnt_hdl);
+		ipa3_reset_gsi_event_ring(params->usb_to_ipa_clnt_hdl);
+	}
+connect_ul_fail:
+	ipa3_usb_release_prod(ttype);
+	return result;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static char dbg_buff[IPA_USB_MAX_MSG_LEN];
+
+static char *ipa3_usb_cons_state_to_string(enum ipa3_usb_cons_state state)
+{
+	switch (state) {
+	case IPA_USB_CONS_GRANTED:
+		return "CONS_GRANTED";
+	case IPA_USB_CONS_RELEASED:
+		return "CONS_RELEASED";
+	}
+
+	return "UNSUPPORTED";
+}
+
+static int ipa3_usb_get_status_dbg_info(struct ipa3_usb_status_dbg_info *status)
+{
+	int res;
+	int i;
+	unsigned long flags;
+
+	IPA_USB_DBG_LOW("entry\n");
+
+	if (ipa3_usb_ctx == NULL) {
+		IPA_USB_ERR("IPA USB was not inited yet\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+
+	if (!status) {
+		IPA_USB_ERR("Invalid input\n");
+		res = -EINVAL;
+		goto bail;
+	}
+
+	memset(status, 0, sizeof(struct ipa3_usb_status_dbg_info));
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	status->teth_state = ipa3_usb_state_to_string(
+		ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].state);
+	status->dpl_state = ipa3_usb_state_to_string(
+		ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].state);
+	if (ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].rm_ctx.cons_valid)
+		status->teth_cons_state = ipa3_usb_cons_state_to_string(
+			ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_TETH].
+			rm_ctx.cons_state);
+	if (ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].rm_ctx.cons_valid)
+		status->dpl_cons_state = ipa3_usb_cons_state_to_string(
+			ipa3_usb_ctx->ttype_ctx[IPA_USB_TRANSPORT_DPL].
+			rm_ctx.cons_state);
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+	for (i = 0 ; i < IPA_USB_MAX_TETH_PROT_SIZE ; i++) {
+		if (ipa3_usb_ctx->teth_prot_ctx[i].state ==
+			IPA_USB_TETH_PROT_INITIALIZED) {
+			if ((i == IPA_USB_RMNET) || (i == IPA_USB_MBIM))
+				status->inited_prots[status->num_init_prot++] =
+					ipa3_usb_teth_bridge_prot_to_string(i);
+			else
+				status->inited_prots[status->num_init_prot++] =
+					ipa3_usb_teth_prot_to_string(i);
+		} else if (ipa3_usb_ctx->teth_prot_ctx[i].state ==
+			IPA_USB_TETH_PROT_CONNECTED) {
+			switch (i) {
+			case IPA_USB_RMNET:
+			case IPA_USB_MBIM:
+				status->teth_connected_prot =
+					ipa3_usb_teth_bridge_prot_to_string(i);
+				break;
+			case IPA_USB_DIAG:
+				status->dpl_connected_prot =
+					ipa3_usb_teth_prot_to_string(i);
+				break;
+			default:
+				status->teth_connected_prot =
+					ipa3_usb_teth_prot_to_string(i);
+			}
+		}
+	}
+
+	res = 0;
+	IPA_USB_DBG_LOW("exit\n");
+bail:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return res;
+}
+
+static ssize_t ipa3_read_usb_state_info(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct ipa3_usb_status_dbg_info status;
+	int result;
+	int nbytes;
+	int cnt = 0;
+	int i;
+
+	result = ipa3_usb_get_status_dbg_info(&status);
+	if (result) {
+		nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN,
+				"Fail to read IPA USB status\n");
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_USB_MAX_MSG_LEN,
+			"Tethering Data State: %s\n"
+			"DPL State: %s\n"
+			"Protocols in Initialized State: ",
+			status.teth_state,
+			status.dpl_state);
+		cnt += nbytes;
+
+		for (i = 0 ; i < status.num_init_prot ; i++) {
+			nbytes = scnprintf(dbg_buff + cnt,
+					IPA_USB_MAX_MSG_LEN - cnt,
+					"%s ", status.inited_prots[i]);
+			cnt += nbytes;
+		}
+		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+				status.num_init_prot ? "\n" : "None\n");
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+				"Protocols in Connected State: ");
+		cnt += nbytes;
+		if (status.teth_connected_prot) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_USB_MAX_MSG_LEN - cnt,
+				"%s ", status.teth_connected_prot);
+			cnt += nbytes;
+		}
+		if (status.dpl_connected_prot) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_USB_MAX_MSG_LEN - cnt,
+				"%s ", status.dpl_connected_prot);
+			cnt += nbytes;
+		}
+		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+				(status.teth_connected_prot ||
+				status.dpl_connected_prot) ? "\n" : "None\n");
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+				"USB Tethering Consumer State: %s\n",
+				status.teth_cons_state ?
+				status.teth_cons_state : "Invalid");
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_USB_MAX_MSG_LEN - cnt,
+				"DPL Consumer State: %s\n",
+				status.dpl_cons_state ? status.dpl_cons_state :
+				"Invalid");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+const struct file_operations ipa3_ipa_usb_ops = {
+	.read = ipa3_read_usb_state_info,
+};
+
+static void ipa_usb_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+
+	ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0);
+	if (IS_ERR(ipa3_usb_ctx->dent)) {
+		pr_err("fail to create folder in debug_fs.\n");
+		return;
+	}
+
+	ipa3_usb_ctx->dfile_state_info = debugfs_create_file("state_info",
+			read_only_mode, ipa3_usb_ctx->dent, 0,
+			&ipa3_ipa_usb_ops);
+	if (!ipa3_usb_ctx->dfile_state_info ||
+		IS_ERR(ipa3_usb_ctx->dfile_state_info)) {
+		pr_err("failed to create file for state_info\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(ipa3_usb_ctx->dent);
+	ipa3_usb_ctx->dent = NULL;
+}
+
+static void ipa_usb_debugfs_remove(void)
+{
+	if (IS_ERR(ipa3_usb_ctx->dent)) {
+		IPA_USB_ERR("ipa_usb debugfs folder was not created.\n");
+		return;
+	}
+
+	debugfs_remove_recursive(ipa3_usb_ctx->dent);
+}
+#else /* CONFIG_DEBUG_FS */
+static void ipa_usb_debugfs_init(void){}
+static void ipa_usb_debugfs_remove(void){}
+#endif /* CONFIG_DEBUG_FS */
+
+
+
+int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
+			 struct ipa_usb_xdci_chan_params *dl_chan_params,
+			 struct ipa_req_chan_out_params *ul_out_params,
+			 struct ipa_req_chan_out_params *dl_out_params,
+			 struct ipa_usb_xdci_connect_params *connect_params)
+{
+	int result = -EFAULT;
+	struct ipa_usb_xdci_connect_params_internal conn_params;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+	if (connect_params == NULL || dl_chan_params == NULL ||
+		dl_out_params == NULL ||
+		(connect_params->teth_prot != IPA_USB_DIAG &&
+		(ul_chan_params == NULL || ul_out_params == NULL))) {
+		IPA_USB_ERR("bad parameters.\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	if (connect_params->teth_prot != IPA_USB_DIAG) {
+		result = ipa3_usb_request_xdci_channel(ul_chan_params,
+			ul_out_params);
+		if (result) {
+			IPA_USB_ERR("failed to allocate UL channel.\n");
+			goto bad_params;
+		}
+	}
+
+	result = ipa3_usb_request_xdci_channel(dl_chan_params, dl_out_params);
+	if (result) {
+		IPA_USB_ERR("failed to allocate DL/DPL channel.\n");
+		goto alloc_dl_chan_fail;
+	}
+
+	memset(&conn_params, 0,
+		sizeof(struct ipa_usb_xdci_connect_params_internal));
+	conn_params.max_pkt_size = connect_params->max_pkt_size;
+	conn_params.ipa_to_usb_clnt_hdl = dl_out_params->clnt_hdl;
+	conn_params.ipa_to_usb_xferrscidx =
+		connect_params->ipa_to_usb_xferrscidx;
+	conn_params.ipa_to_usb_xferrscidx_valid =
+		connect_params->ipa_to_usb_xferrscidx_valid;
+	if (connect_params->teth_prot != IPA_USB_DIAG) {
+		conn_params.usb_to_ipa_clnt_hdl = ul_out_params->clnt_hdl;
+		conn_params.usb_to_ipa_xferrscidx =
+			connect_params->usb_to_ipa_xferrscidx;
+		conn_params.usb_to_ipa_xferrscidx_valid =
+			connect_params->usb_to_ipa_xferrscidx_valid;
+	}
+	conn_params.teth_prot = connect_params->teth_prot;
+	conn_params.teth_prot_params = connect_params->teth_prot_params;
+	conn_params.max_supported_bandwidth_mbps =
+		connect_params->max_supported_bandwidth_mbps;
+	result = ipa3_usb_xdci_connect_internal(&conn_params);
+	if (result) {
+		IPA_USB_ERR("failed to connect.\n");
+		goto connect_fail;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+connect_fail:
+	ipa3_usb_release_xdci_channel(dl_out_params->clnt_hdl,
+		IPA3_USB_GET_TTYPE(dl_chan_params->teth_prot));
+alloc_dl_chan_fail:
+	if (connect_params->teth_prot != IPA_USB_DIAG)
+		ipa3_usb_release_xdci_channel(ul_out_params->clnt_hdl,
+			IPA3_USB_GET_TTYPE(ul_chan_params->teth_prot));
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_xdci_connect);
+
+static int ipa3_usb_check_disconnect_prot(enum ipa_usb_teth_prot teth_prot)
+{
+	if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("bad parameter.\n");
+		return -EFAULT;
+	}
+
+	if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+		IPA_USB_TETH_PROT_CONNECTED) {
+		IPA_USB_ERR("%s is not connected.\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+			    enum ipa_usb_teth_prot teth_prot)
+{
+	int result = 0;
+	struct ipa_ep_cfg_holb holb_cfg;
+	unsigned long flags;
+	enum ipa3_usb_state orig_state;
+	enum ipa3_usb_transport_type ttype;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+	if (ipa3_usb_check_disconnect_prot(teth_prot)) {
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_DISCONNECT, ttype)) {
+		IPA_USB_ERR("Illegal operation.\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	if (ipa3_usb_ctx->ttype_ctx[ttype].state != IPA_USB_SUSPENDED) {
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+		/* Stop DL/DPL channel */
+		result = ipa3_xdci_disconnect(dl_clnt_hdl, false, -1);
+		if (result) {
+			IPA_USB_ERR("failed to disconnect DL/DPL channel.\n");
+			goto bad_params;
+		}
+	} else {
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_EN;
+		holb_cfg.tmr_val = 0;
+		ipa3_cfg_ep_holb(dl_clnt_hdl, &holb_cfg);
+	}
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	orig_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
+			orig_state != IPA_USB_SUSPENDED) {
+			spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
+				flags);
+			/* Stop UL channel */
+			result = ipa3_xdci_disconnect(ul_clnt_hdl,
+				true,
+				ipa3_usb_ctx->qmi_req_id);
+			if (result) {
+				IPA_USB_ERR("failed disconnect UL channel\n");
+				goto bad_params;
+			}
+			ipa3_usb_ctx->qmi_req_id++;
+		} else
+			spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock,
+				flags);
+	} else
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+	/* Reset DL channel */
+	result = ipa3_reset_gsi_channel(dl_clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to reset DL channel.\n");
+		goto bad_params;
+	}
+
+	/* Reset DL event ring */
+	result = ipa3_reset_gsi_event_ring(dl_clnt_hdl);
+	if (result) {
+		IPA_USB_ERR("failed to reset DL event ring.\n");
+		goto bad_params;
+	}
+
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		/* Reset UL channel */
+		result = ipa3_reset_gsi_channel(ul_clnt_hdl);
+		if (result) {
+			IPA_USB_ERR("failed to reset UL channel.\n");
+			goto bad_params;
+		}
+
+		/* Reset UL event ring */
+		result = ipa3_reset_gsi_event_ring(ul_clnt_hdl);
+		if (result) {
+			IPA_USB_ERR("failed to reset UL event ring.\n");
+			goto bad_params;
+		}
+	}
+
+	/* Change state to STOPPED */
+	if (!ipa3_usb_set_state(IPA_USB_STOPPED, false, ttype))
+		IPA_USB_ERR("failed to change state to stopped\n");
+
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		result = ipa3_usb_release_xdci_channel(ul_clnt_hdl, ttype);
+		if (result) {
+			IPA_USB_ERR("failed to release UL channel.\n");
+			goto bad_params;
+		}
+	}
+
+	result = ipa3_usb_release_xdci_channel(dl_clnt_hdl, ttype);
+	if (result) {
+		IPA_USB_ERR("failed to release DL channel.\n");
+		goto bad_params;
+	}
+
+	/* Disconnect tethering protocol */
+	result = ipa3_usb_disconnect_teth_prot(teth_prot);
+	if (result)
+		goto bad_params;
+
+	if (orig_state != IPA_USB_SUSPEND_IN_PROGRESS &&
+		orig_state != IPA_USB_SUSPENDED) {
+		result = ipa3_usb_release_prod(ttype);
+		if (result) {
+			IPA_USB_ERR("failed to release PROD.\n");
+			goto bad_params;
+		}
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+
+}
+EXPORT_SYMBOL(ipa_usb_xdci_disconnect);
+
+int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+	int result = -EFAULT;
+	enum ipa3_usb_transport_type ttype;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+	if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("bad parameters.\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_DEINIT_TETH_PROT, ttype)) {
+		IPA_USB_ERR("Illegal operation.\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	/* Clean-up tethering protocol */
+	switch (teth_prot) {
+	case IPA_USB_RNDIS:
+	case IPA_USB_ECM:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_INITIALIZED) {
+			IPA_USB_ERR("%s is not initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			result = -EINVAL;
+			goto bad_params;
+		}
+		if (teth_prot == IPA_USB_RNDIS)
+			rndis_ipa_cleanup(
+				ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.rndis.private);
+		else
+			ecm_ipa_cleanup(
+				ipa3_usb_ctx->teth_prot_ctx[teth_prot].
+				teth_prot_params.ecm.private);
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data = NULL;
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INVALID;
+		ipa3_usb_ctx->num_init_prot--;
+		IPA_USB_DBG("deinitialized %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_RMNET:
+	case IPA_USB_MBIM:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_INITIALIZED) {
+			IPA_USB_ERR("%s (%s) is not initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot),
+				ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+			result = -EINVAL;
+			goto bad_params;
+		}
+
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data =
+			NULL;
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INVALID;
+		ipa3_usb_ctx->num_init_prot--;
+		IPA_USB_DBG("deinitialized %s (%s)\n",
+			ipa3_usb_teth_prot_to_string(teth_prot),
+			ipa3_usb_teth_bridge_prot_to_string(teth_prot));
+		break;
+	case IPA_USB_DIAG:
+		if (ipa3_usb_ctx->teth_prot_ctx[teth_prot].state !=
+			IPA_USB_TETH_PROT_INITIALIZED) {
+			IPA_USB_ERR("%s is not initialized\n",
+				ipa3_usb_teth_prot_to_string(teth_prot));
+			result = -EINVAL;
+			goto bad_params;
+		}
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].user_data =
+			NULL;
+		ipa3_usb_ctx->teth_prot_ctx[teth_prot].state =
+			IPA_USB_TETH_PROT_INVALID;
+		IPA_USB_DBG("deinitialized %s\n",
+			ipa3_usb_teth_prot_to_string(teth_prot));
+		break;
+	default:
+		IPA_USB_ERR("unexpected tethering protocol\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	if (IPA3_USB_IS_TTYPE_DPL(ttype) ||
+		(ipa3_usb_ctx->num_init_prot == 0)) {
+		if (!ipa3_usb_set_state(IPA_USB_INVALID, false, ttype))
+			IPA_USB_ERR("failed to change state to invalid\n");
+		ipa_rm_delete_resource(
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_params.name);
+		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.prod_valid = false;
+		ipa_rm_delete_resource(
+			ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_params.name);
+		ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_valid = false;
+		ipa3_usb_ctx->ttype_ctx[ttype].ipa_usb_notify_cb = NULL;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_deinit_teth_prot);
+
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	enum ipa_usb_teth_prot teth_prot)
+{
+	int result = 0;
+	unsigned long flags;
+	enum ipa3_usb_cons_state curr_cons_state;
+	enum ipa3_usb_transport_type ttype;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+	if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("bad parameters.\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_SUSPEND, ttype)) {
+		IPA_USB_ERR("Illegal operation.\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	IPA_USB_DBG("Start suspend sequence: %s\n",
+		IPA3_USB_IS_TTYPE_DPL(ttype) ?
+		"DPL channel":"Data Tethering channels");
+
+	/* Change state to SUSPEND_REQUESTED */
+	if (!ipa3_usb_set_state(IPA_USB_SUSPEND_REQUESTED, false, ttype)) {
+		IPA_USB_ERR(
+			"fail changing state to suspend_req.\n");
+		result = -EFAULT;
+		goto bad_params;
+	}
+
+	/* Stop UL channel & suspend DL/DPL EP */
+	result = ipa3_xdci_suspend(ul_clnt_hdl, dl_clnt_hdl,
+		true,
+		ipa3_usb_ctx->qmi_req_id, IPA3_USB_IS_TTYPE_DPL(ttype));
+	if (result) {
+		IPA_USB_ERR("failed to suspend\n");
+		goto suspend_fail;
+	}
+	ipa3_usb_ctx->qmi_req_id++;
+
+	result = ipa3_usb_release_prod(ttype);
+	if (result) {
+		IPA_USB_ERR("failed to release PROD\n");
+		goto release_prod_fail;
+	}
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	curr_cons_state = ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_state;
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+	if (curr_cons_state == IPA_USB_CONS_GRANTED) {
+		/* Change state to SUSPEND_IN_PROGRESS */
+		if (!ipa3_usb_set_state(IPA_USB_SUSPEND_IN_PROGRESS,
+			false, ttype))
+			IPA_USB_ERR("fail set state to suspend_in_progress\n");
+
+		/* Check if DL/DPL data pending */
+		spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+		if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+			IPA_USB_DBG(
+				"DL/DPL data pending, invoke remote wakeup\n");
+			queue_work(ipa3_usb_ctx->wq,
+				IPA3_USB_IS_TTYPE_DPL(ttype) ?
+				&ipa3_usb_dpl_notify_remote_wakeup_work :
+				&ipa3_usb_notify_remote_wakeup_work);
+		}
+		spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+		ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ttype =
+			ttype;
+		ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.dl_clnt_hdl =
+			dl_clnt_hdl;
+		ipa3_usb_ctx->ttype_ctx[ttype].finish_suspend_work.ul_clnt_hdl =
+			ul_clnt_hdl;
+		INIT_WORK(&ipa3_usb_ctx->ttype_ctx[ttype].
+			finish_suspend_work.work,
+			ipa3_usb_wq_finish_suspend_work);
+
+		result = -EINPROGRESS;
+		IPA_USB_DBG("exit with suspend_in_progress\n");
+		goto bad_params;
+	}
+
+	/* Stop DL channel */
+	result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+	if (result) {
+		IPAERR("Error stopping DL/DPL channel: %d\n", result);
+		result = -EFAULT;
+		goto release_prod_fail;
+	}
+	/* Change state to SUSPENDED */
+	if (!ipa3_usb_set_state(IPA_USB_SUSPENDED, false, ttype))
+		IPA_USB_ERR("failed to change state to suspended\n");
+
+	/* Check if DL/DPL data pending */
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	if (ipa3_usb_ctx->ttype_ctx[ttype].rm_ctx.cons_requested) {
+		IPA_USB_DBG_LOW(
+			"DL/DPL data is pending, invoking remote wakeup\n");
+		queue_work(ipa3_usb_ctx->wq, IPA3_USB_IS_TTYPE_DPL(ttype) ?
+			&ipa3_usb_dpl_notify_remote_wakeup_work :
+			&ipa3_usb_notify_remote_wakeup_work);
+	}
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+release_prod_fail:
+	ipa3_xdci_resume(ul_clnt_hdl, dl_clnt_hdl,
+		IPA3_USB_IS_TTYPE_DPL(ttype));
+suspend_fail:
+	/* Change state back to CONNECTED */
+	if (!ipa3_usb_set_state(IPA_USB_CONNECTED, true, ttype))
+		IPA_USB_ERR("failed to change state back to connected\n");
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_xdci_suspend);
+
+int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	enum ipa_usb_teth_prot teth_prot)
+{
+	int result = -EFAULT;
+	enum ipa3_usb_state prev_state;
+	unsigned long flags;
+	enum ipa3_usb_transport_type ttype;
+
+	mutex_lock(&ipa3_usb_ctx->general_mutex);
+	IPA_USB_DBG_LOW("entry\n");
+
+	if (teth_prot > IPA_USB_MAX_TETH_PROT_SIZE) {
+		IPA_USB_ERR("bad parameters.\n");
+		result = -EINVAL;
+		goto bad_params;
+	}
+
+	ttype = IPA3_USB_GET_TTYPE(teth_prot);
+
+	if (!ipa3_usb_check_legal_op(IPA_USB_RESUME, ttype)) {
+		IPA_USB_ERR("Illegal operation.\n");
+		result = -EPERM;
+		goto bad_params;
+	}
+
+	IPA_USB_DBG_LOW("Start resume sequence: %s\n",
+		IPA3_USB_IS_TTYPE_DPL(ttype) ?
+		"DPL channel" : "Data Tethering channels");
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	prev_state = ipa3_usb_ctx->ttype_ctx[ttype].state;
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+	/* Change state to RESUME_IN_PROGRESS */
+	if (!ipa3_usb_set_state(IPA_USB_RESUME_IN_PROGRESS, false, ttype)) {
+		IPA_USB_ERR("failed to change state to resume_in_progress\n");
+		result = -EFAULT;
+		goto bad_params;
+	}
+
+	/* Request USB_PROD */
+	result = ipa3_usb_request_prod(ttype);
+	if (result)
+		goto prod_req_fail;
+
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		/* Start UL channel */
+		result = ipa3_start_gsi_channel(ul_clnt_hdl);
+		if (result) {
+			IPA_USB_ERR("failed to start UL channel.\n");
+			goto start_ul_fail;
+		}
+	}
+
+	if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
+		/* Start DL/DPL channel */
+		result = ipa3_start_gsi_channel(dl_clnt_hdl);
+		if (result) {
+			IPA_USB_ERR("failed to start DL/DPL channel.\n");
+			goto start_dl_fail;
+		}
+	}
+
+	/* Change state to CONNECTED */
+	if (!ipa3_usb_set_state(IPA_USB_CONNECTED, false, ttype)) {
+		IPA_USB_ERR("failed to change state to connected\n");
+		result = -EFAULT;
+		goto state_change_connected_fail;
+	}
+
+	IPA_USB_DBG_LOW("exit\n");
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return 0;
+
+state_change_connected_fail:
+	if (prev_state != IPA_USB_SUSPEND_IN_PROGRESS) {
+		result = ipa3_stop_gsi_channel(dl_clnt_hdl);
+		if (result)
+			IPA_USB_ERR("Error stopping DL/DPL channel: %d\n",
+				result);
+	}
+start_dl_fail:
+	if (!IPA3_USB_IS_TTYPE_DPL(ttype)) {
+		result = ipa3_stop_gsi_channel(ul_clnt_hdl);
+		if (result)
+			IPA_USB_ERR("Error stopping UL channel: %d\n", result);
+	}
+start_ul_fail:
+	ipa3_usb_release_prod(ttype);
+prod_req_fail:
+	/* Change state back to prev_state */
+	if (!ipa3_usb_set_state(prev_state, true, ttype))
+		IPA_USB_ERR("failed to change state back to %s\n",
+			ipa3_usb_state_to_string(prev_state));
+bad_params:
+	mutex_unlock(&ipa3_usb_ctx->general_mutex);
+	return result;
+}
+EXPORT_SYMBOL(ipa_usb_xdci_resume);
+
+static int __init ipa3_usb_init(void)
+{
+	int i;
+	unsigned long flags;
+	int res;
+
+	pr_debug("entry\n");
+	ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
+	if (ipa3_usb_ctx == NULL) {
+		pr_err("failed to allocate memory\n");
+		pr_err(":ipa_usb init failed\n");
+		return -EFAULT;
+	}
+	memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
+
+	for (i = 0; i < IPA_USB_MAX_TETH_PROT_SIZE; i++)
+		ipa3_usb_ctx->teth_prot_ctx[i].state =
+			IPA_USB_TETH_PROT_INVALID;
+	ipa3_usb_ctx->num_init_prot = 0;
+	init_completion(&ipa3_usb_ctx->dev_ready_comp);
+	ipa3_usb_ctx->qmi_req_id = 0;
+	spin_lock_init(&ipa3_usb_ctx->state_lock);
+	ipa3_usb_ctx->dl_data_pending = false;
+	mutex_init(&ipa3_usb_ctx->general_mutex);
+
+	for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
+		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_valid = false;
+		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_valid = false;
+		init_completion(&ipa3_usb_ctx->ttype_ctx[i].rm_ctx.prod_comp);
+		ipa3_usb_ctx->ttype_ctx[i].user_data = NULL;
+	}
+
+	spin_lock_irqsave(&ipa3_usb_ctx->state_lock, flags);
+	for (i = 0; i < IPA_USB_TRANSPORT_MAX; i++) {
+		ipa3_usb_ctx->ttype_ctx[i].state = IPA_USB_INVALID;
+		ipa3_usb_ctx->ttype_ctx[i].rm_ctx.cons_state =
+			IPA_USB_CONS_RELEASED;
+	}
+	spin_unlock_irqrestore(&ipa3_usb_ctx->state_lock, flags);
+
+	ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq");
+	if (!ipa3_usb_ctx->wq) {
+		pr_err("failed to create workqueue\n");
+		res = -EFAULT;
+		goto ipa_usb_workqueue_fail;
+	}
+
+	ipa_usb_debugfs_init();
+
+	pr_info("exit: IPA_USB init success!\n");
+
+	return 0;
+
+ipa_usb_workqueue_fail:
+	pr_err(":init failed (%d)\n", -res);
+	kfree(ipa3_usb_ctx);
+	return res;
+}
+
+static void ipa3_usb_exit(void)
+{
+	IPA_USB_DBG_LOW("IPA_USB exit\n");
+	ipa_usb_debugfs_remove();
+	kfree(ipa3_usb_ctx);
+}
+
+arch_initcall(ipa3_usb_init);
+module_exit(ipa3_usb_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA USB client driver");
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
new file mode 100644
index 0000000..79da63e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -0,0 +1,1251 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipv6.h>
+#include <net/addrconf.h>
+#include <linux/ipa.h>
+#include <linux/cdev.h>
+#include <linux/ipa_odu_bridge.h>
+#include "../ipa_common_i.h"
+
+#define ODU_BRIDGE_DRV_NAME "odu_ipa_bridge"
+
+#define ODU_BRIDGE_DBG(fmt, args...) \
+	do { \
+		pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define ODU_BRIDGE_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define ODU_BRIDGE_ERR(fmt, args...) \
+	do { \
+		pr_err(ODU_BRIDGE_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			ODU_BRIDGE_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define ODU_BRIDGE_FUNC_ENTRY() \
+	ODU_BRIDGE_DBG_LOW("ENTRY\n")
+#define ODU_BRIDGE_FUNC_EXIT() \
+	ODU_BRIDGE_DBG_LOW("EXIT\n")
+
+
+#define ODU_BRIDGE_IS_QMI_ADDR(daddr) \
+	(memcmp(&(daddr), &odu_bridge_ctx->llv6_addr, sizeof((daddr))) \
+		== 0)
+
+#define ODU_BRIDGE_IPV4_HDR_NAME "odu_br_ipv4"
+#define ODU_BRIDGE_IPV6_HDR_NAME "odu_br_ipv6"
+
+#define IPA_ODU_SYS_DESC_FIFO_SZ 0x800
+
+#ifdef CONFIG_COMPAT
+#define ODU_BRIDGE_IOC_SET_LLV6_ADDR32 _IOW(ODU_BRIDGE_IOC_MAGIC, \
+				ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \
+				compat_uptr_t)
+#endif
+
+#define IPA_ODU_VER_CHECK() \
+	do { \
+		ret = 0;\
+		if (ipa_get_hw_type() == IPA_HW_None) { \
+			pr_err("IPA HW is unknown\n"); \
+			ret = -EFAULT; \
+		} \
+		else if (ipa_get_hw_type() < IPA_HW_v3_0) \
+			ret = 1; \
+	} while (0)
+
+/**
+ * struct stats - driver statistics, viewable using debugfs
+ * @num_ul_packets: number of packets bridged in uplink direction
+ * @num_dl_packets: number of packets bridged in downink direction
+ * bridge
+ * @num_lan_packets: number of packets bridged to APPS on bridge mode
+ */
+struct stats {
+	u64 num_ul_packets;
+	u64 num_dl_packets;
+	u64 num_lan_packets;
+};
+
+/**
+ * struct odu_bridge_ctx - ODU bridge driver context information
+ * @class: kernel class pointer
+ * @dev_num: kernel device number
+ * @dev: kernel device struct pointer
+ * @cdev: kernel character device struct
+ * @netdev_name: network interface name
+ * @device_ethaddr: network interface ethernet address
+ * @priv: client's private data. to be used in client's callbacks
+ * @tx_dp_notify: client callback for handling IPA ODU_PROD callback
+ * @send_dl_skb: client callback for sending skb in downlink direction
+ * @stats: statistics, how many packets were transmitted using the SW bridge
+ * @is_conencted: is bridge connected ?
+ * @mode: ODU mode (router/bridge)
+ * @lock: for the initialization, connect and disconnect synchronization
+ * @llv6_addr: link local IPv6 address of ODU network interface
+ * @odu_br_ipv4_hdr_hdl: handle for partial ipv4 ethernet header
+ * @odu_br_ipv6_hdr_hdl: handle for partial ipv6 ethernet header
+ * @odu_prod_hdl: handle for IPA_CLIENT_ODU_PROD pipe
+ * @odu_emb_cons_hdl: handle for IPA_CLIENT_ODU_EMB_CONS pipe
+ * @odu_teth_cons_hdl: handle for IPA_CLIENT_ODU_TETH_CONS pipe
+ */
+struct odu_bridge_ctx {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+	char netdev_name[IPA_RESOURCE_NAME_MAX];
+	u8 device_ethaddr[ETH_ALEN];
+	void *priv;
+	ipa_notify_cb tx_dp_notify;
+	int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+	struct stats stats;
+	bool is_connected;
+	enum odu_bridge_mode mode;
+	struct mutex lock;
+	struct in6_addr llv6_addr;
+	uint32_t odu_br_ipv4_hdr_hdl;
+	uint32_t odu_br_ipv6_hdr_hdl;
+	u32 odu_prod_hdl;
+	u32 odu_emb_cons_hdl;
+	u32 odu_teth_cons_hdl;
+	u32 ipa_sys_desc_size;
+	void *logbuf;
+	void *logbuf_low;
+};
+static struct odu_bridge_ctx *odu_bridge_ctx;
+
+#ifdef CONFIG_DEBUG_FS
+#define ODU_MAX_MSG_LEN 512
+static char dbg_buff[ODU_MAX_MSG_LEN];
+#endif
+
+static void odu_bridge_emb_cons_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	ODU_BRIDGE_FUNC_ENTRY();
+	if (evt != IPA_RECEIVE) {
+		ODU_BRIDGE_ERR("unexpected event\n");
+		WARN_ON(1);
+		return;
+	}
+	odu_bridge_ctx->send_dl_skb(priv, (struct sk_buff *)data);
+	odu_bridge_ctx->stats.num_dl_packets++;
+	ODU_BRIDGE_FUNC_EXIT();
+}
+
+static void odu_bridge_teth_cons_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct ipv6hdr *ipv6hdr;
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct sk_buff *skb_copied;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+	if (evt != IPA_RECEIVE) {
+		ODU_BRIDGE_ERR("unexpected event\n");
+		WARN_ON(1);
+		return;
+	}
+
+	ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+	if (ipv6hdr->version == 6 &&
+	    ipv6_addr_is_multicast(&ipv6hdr->daddr)) {
+		ODU_BRIDGE_DBG_LOW("Multicast pkt, send to APPS and adapter\n");
+		skb_copied = skb_clone(skb, GFP_KERNEL);
+		if (skb_copied) {
+			odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+						IPA_RECEIVE,
+						(unsigned long) skb_copied);
+			odu_bridge_ctx->stats.num_lan_packets++;
+		} else {
+			ODU_BRIDGE_ERR("No memory\n");
+		}
+	}
+
+	odu_bridge_ctx->send_dl_skb(priv, skb);
+	odu_bridge_ctx->stats.num_dl_packets++;
+	ODU_BRIDGE_FUNC_EXIT();
+}
+
+static int odu_bridge_connect_router(void)
+{
+	struct ipa_sys_connect_params odu_prod_params;
+	struct ipa_sys_connect_params odu_emb_cons_params;
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	memset(&odu_prod_params, 0, sizeof(odu_prod_params));
+	memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
+
+	/* configure RX (ODU->IPA) EP */
+	odu_prod_params.client = IPA_CLIENT_ODU_PROD;
+	odu_prod_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	odu_prod_params.ipa_ep_cfg.nat.nat_en = IPA_SRC_NAT;
+	odu_prod_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size;
+	odu_prod_params.priv = odu_bridge_ctx->priv;
+	odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify;
+	res = ipa_setup_sys_pipe(&odu_prod_params,
+		&odu_bridge_ctx->odu_prod_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res);
+		goto fail_odu_prod;
+	}
+
+	/* configure TX (IPA->ODU) EP */
+	odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+	odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	odu_emb_cons_params.desc_fifo_sz = odu_bridge_ctx->ipa_sys_desc_size;
+	odu_emb_cons_params.priv = odu_bridge_ctx->priv;
+	odu_emb_cons_params.notify = odu_bridge_emb_cons_cb;
+	res = ipa_setup_sys_pipe(&odu_emb_cons_params,
+		&odu_bridge_ctx->odu_emb_cons_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res);
+		goto fail_odu_emb_cons;
+	}
+
+	ODU_BRIDGE_DBG("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n",
+		odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl);
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return 0;
+
+fail_odu_emb_cons:
+	ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+	odu_bridge_ctx->odu_prod_hdl = 0;
+fail_odu_prod:
+	return res;
+}
+
+static int odu_bridge_connect_bridge(void)
+{
+	struct ipa_sys_connect_params odu_prod_params;
+	struct ipa_sys_connect_params odu_emb_cons_params;
+	struct ipa_sys_connect_params odu_teth_cons_params;
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	memset(&odu_prod_params, 0, sizeof(odu_prod_params));
+	memset(&odu_emb_cons_params, 0, sizeof(odu_emb_cons_params));
+
+	/* Build IPA Resource manager dependency graph */
+	ODU_BRIDGE_DBG_LOW("build dependency graph\n");
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+					IPA_RM_RESOURCE_Q6_CONS);
+	if (res && res != -EINPROGRESS) {
+		ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
+		goto fail_add_dependency_1;
+	}
+
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+					IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+	if (res && res != -EINPROGRESS) {
+		ODU_BRIDGE_ERR("ipa_rm_add_dependency() failed\n");
+		goto fail_add_dependency_2;
+	}
+
+	/* configure RX (ODU->IPA) EP */
+	odu_prod_params.client = IPA_CLIENT_ODU_PROD;
+	odu_prod_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
+	odu_prod_params.priv = odu_bridge_ctx->priv;
+	odu_prod_params.notify = odu_bridge_ctx->tx_dp_notify;
+	odu_prod_params.skip_ep_cfg = true;
+	res = ipa_setup_sys_pipe(&odu_prod_params,
+		&odu_bridge_ctx->odu_prod_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_PROD %d\n", res);
+		goto fail_odu_prod;
+	}
+
+	/* configure TX tethered (IPA->ODU) EP */
+	odu_teth_cons_params.client = IPA_CLIENT_ODU_TETH_CONS;
+	odu_teth_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
+	odu_teth_cons_params.priv = odu_bridge_ctx->priv;
+	odu_teth_cons_params.notify = odu_bridge_teth_cons_cb;
+	odu_teth_cons_params.skip_ep_cfg = true;
+	res = ipa_setup_sys_pipe(&odu_teth_cons_params,
+		&odu_bridge_ctx->odu_teth_cons_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_TETH_CONS %d\n",
+				res);
+		goto fail_odu_teth_cons;
+	}
+
+	/* configure TX embedded(IPA->ODU) EP */
+	odu_emb_cons_params.client = IPA_CLIENT_ODU_EMB_CONS;
+	odu_emb_cons_params.ipa_ep_cfg.hdr.hdr_len = ETH_HLEN;
+	odu_emb_cons_params.ipa_ep_cfg.nat.nat_en = IPA_BYPASS_NAT;
+	odu_emb_cons_params.desc_fifo_sz = IPA_ODU_SYS_DESC_FIFO_SZ;
+	odu_emb_cons_params.priv = odu_bridge_ctx->priv;
+	odu_emb_cons_params.notify = odu_bridge_emb_cons_cb;
+	res = ipa_setup_sys_pipe(&odu_emb_cons_params,
+		&odu_bridge_ctx->odu_emb_cons_hdl);
+	if (res) {
+		ODU_BRIDGE_ERR("fail to setup sys pipe ODU_EMB_CONS %d\n", res);
+		goto fail_odu_emb_cons;
+	}
+
+	ODU_BRIDGE_DBG_LOW("odu_prod_hdl = %d, odu_emb_cons_hdl = %d\n",
+		odu_bridge_ctx->odu_prod_hdl, odu_bridge_ctx->odu_emb_cons_hdl);
+	ODU_BRIDGE_DBG_LOW("odu_teth_cons_hdl = %d\n",
+		odu_bridge_ctx->odu_teth_cons_hdl);
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return 0;
+
+fail_odu_emb_cons:
+	ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl);
+	odu_bridge_ctx->odu_teth_cons_hdl = 0;
+fail_odu_teth_cons:
+	ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+	odu_bridge_ctx->odu_prod_hdl = 0;
+fail_odu_prod:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+				IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+fail_add_dependency_2:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+				IPA_RM_RESOURCE_Q6_CONS);
+fail_add_dependency_1:
+	return res;
+}
+
+static int odu_bridge_disconnect_router(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU PROD failed\n");
+	odu_bridge_ctx->odu_prod_hdl = 0;
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n");
+	odu_bridge_ctx->odu_emb_cons_hdl = 0;
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return 0;
+}
+
+static int odu_bridge_disconnect_bridge(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_prod_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU PROD failed\n");
+	odu_bridge_ctx->odu_prod_hdl = 0;
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_teth_cons_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU TETH CONS failed\n");
+	odu_bridge_ctx->odu_teth_cons_hdl = 0;
+
+	res = ipa_teardown_sys_pipe(odu_bridge_ctx->odu_emb_cons_hdl);
+	if (res)
+		ODU_BRIDGE_ERR("teardown ODU EMB CONS failed\n");
+	odu_bridge_ctx->odu_emb_cons_hdl = 0;
+
+	/* Delete IPA Resource manager dependency graph */
+	ODU_BRIDGE_DBG("deleting dependency graph\n");
+	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (res && res != -EINPROGRESS)
+		ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
+
+	res = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+		IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+	if (res && res != -EINPROGRESS)
+		ODU_BRIDGE_ERR("ipa_rm_delete_dependency() failed\n");
+
+	return 0;
+}
+
+/**
+ * odu_bridge_disconnect() - Disconnect odu bridge
+ *
+ * Disconnect all pipes and deletes IPA RM dependencies on bridge mode
+ *
+ * Return codes: 0- success, error otherwise
+ */
+int odu_bridge_disconnect(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("Not connected\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&odu_bridge_ctx->lock);
+	if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) {
+		res = odu_bridge_disconnect_router();
+		if (res) {
+			ODU_BRIDGE_ERR("disconnect_router failed %d\n", res);
+			goto out;
+		}
+	} else {
+		res = odu_bridge_disconnect_bridge();
+		if (res) {
+			ODU_BRIDGE_ERR("disconnect_bridge failed %d\n", res);
+			goto out;
+		}
+	}
+
+	odu_bridge_ctx->is_connected = false;
+	res = 0;
+out:
+	mutex_unlock(&odu_bridge_ctx->lock);
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(odu_bridge_disconnect);
+
+/**
+ * odu_bridge_connect() - Connect odu bridge.
+ *
+ * Call to the mode-specific connect function for connection IPA pipes
+ * and adding IPA RM dependencies
+
+ * Return codes: 0: success
+ *		-EINVAL: invalid parameters
+ *		-EPERM: Operation not permitted as the bridge is already
+ *		connected
+ */
+int odu_bridge_connect(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("already connected\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&odu_bridge_ctx->lock);
+	if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) {
+		res = odu_bridge_connect_router();
+		if (res) {
+			ODU_BRIDGE_ERR("connect_router failed\n");
+			goto bail;
+		}
+	} else {
+		res = odu_bridge_connect_bridge();
+		if (res) {
+			ODU_BRIDGE_ERR("connect_bridge failed\n");
+			goto bail;
+		}
+	}
+
+	odu_bridge_ctx->is_connected = true;
+	res = 0;
+bail:
+	mutex_unlock(&odu_bridge_ctx->lock);
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(odu_bridge_connect);
+
+/**
+ * odu_bridge_set_mode() - Set bridge mode to Router/Bridge
+ * @mode: mode to be set
+ */
+static int odu_bridge_set_mode(enum odu_bridge_mode mode)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (mode < 0 || mode >= ODU_BRIDGE_MODE_MAX) {
+		ODU_BRIDGE_ERR("Unsupported mode: %d\n", mode);
+		return -EFAULT;
+	}
+
+	ODU_BRIDGE_DBG_LOW("setting mode: %d\n", mode);
+	mutex_lock(&odu_bridge_ctx->lock);
+
+	if (odu_bridge_ctx->mode == mode) {
+		ODU_BRIDGE_DBG_LOW("same mode\n");
+		res = 0;
+		goto bail;
+	}
+
+	if (odu_bridge_ctx->is_connected) {
+		/* first disconnect the old configuration */
+		if (odu_bridge_ctx->mode == ODU_BRIDGE_MODE_ROUTER) {
+			res = odu_bridge_disconnect_router();
+			if (res) {
+				ODU_BRIDGE_ERR("disconnect_router failed\n");
+				goto bail;
+			}
+		} else {
+			res = odu_bridge_disconnect_bridge();
+			if (res) {
+				ODU_BRIDGE_ERR("disconnect_bridge failed\n");
+				goto bail;
+			}
+		}
+
+		/* connect the new configuration */
+		if (mode == ODU_BRIDGE_MODE_ROUTER) {
+			res = odu_bridge_connect_router();
+			if (res) {
+				ODU_BRIDGE_ERR("connect_router failed\n");
+				goto bail;
+			}
+		} else {
+			res = odu_bridge_connect_bridge();
+			if (res) {
+				ODU_BRIDGE_ERR("connect_bridge failed\n");
+				goto bail;
+			}
+		}
+	}
+	odu_bridge_ctx->mode = mode;
+	res = 0;
+bail:
+	mutex_unlock(&odu_bridge_ctx->lock);
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+};
+
+/**
+ * odu_bridge_set_llv6_addr() - Set link local ipv6 address
+ * @llv6_addr: odu network interface link local address
+ *
+ * This function sets the link local ipv6 address provided by IOCTL
+ */
+static int odu_bridge_set_llv6_addr(struct in6_addr *llv6_addr)
+{
+	struct in6_addr llv6_addr_host;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	llv6_addr_host.s6_addr32[0] = ntohl(llv6_addr->s6_addr32[0]);
+	llv6_addr_host.s6_addr32[1] = ntohl(llv6_addr->s6_addr32[1]);
+	llv6_addr_host.s6_addr32[2] = ntohl(llv6_addr->s6_addr32[2]);
+	llv6_addr_host.s6_addr32[3] = ntohl(llv6_addr->s6_addr32[3]);
+
+	memcpy(&odu_bridge_ctx->llv6_addr, &llv6_addr_host,
+				sizeof(odu_bridge_ctx->llv6_addr));
+	ODU_BRIDGE_DBG_LOW("LLV6 addr: %pI6c\n", &odu_bridge_ctx->llv6_addr);
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return 0;
+};
+
+static long odu_bridge_ioctl(struct file *filp,
+			      unsigned int cmd,
+			      unsigned long arg)
+{
+	int res = 0;
+	struct in6_addr llv6_addr;
+
+	ODU_BRIDGE_DBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+	if ((_IOC_TYPE(cmd) != ODU_BRIDGE_IOC_MAGIC) ||
+	    (_IOC_NR(cmd) >= ODU_BRIDGE_IOCTL_MAX)) {
+		ODU_BRIDGE_ERR("Invalid ioctl\n");
+		return -ENOIOCTLCMD;
+	}
+
+	switch (cmd) {
+	case ODU_BRIDGE_IOC_SET_MODE:
+		ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_MODE ioctl called\n");
+		res = odu_bridge_set_mode(arg);
+		if (res) {
+			ODU_BRIDGE_ERR("Error, res = %d\n", res);
+			break;
+		}
+		break;
+
+	case ODU_BRIDGE_IOC_SET_LLV6_ADDR:
+		ODU_BRIDGE_DBG("ODU_BRIDGE_IOC_SET_LLV6_ADDR ioctl called\n");
+		res = copy_from_user(&llv6_addr,
+			(struct in6_addr *)arg,
+			sizeof(llv6_addr));
+		if (res) {
+			ODU_BRIDGE_ERR("Error, res = %d\n", res);
+			res = -EFAULT;
+			break;
+		}
+
+		res = odu_bridge_set_llv6_addr(&llv6_addr);
+		if (res) {
+			ODU_BRIDGE_ERR("Error, res = %d\n", res);
+			break;
+		}
+		break;
+
+	default:
+		ODU_BRIDGE_ERR("Unknown ioctl: %d\n", cmd);
+		WARN_ON(1);
+	}
+
+	return res;
+}
+
+#ifdef CONFIG_COMPAT
+static long compat_odu_bridge_ioctl(struct file *file,
+	unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case ODU_BRIDGE_IOC_SET_LLV6_ADDR32:
+		cmd = ODU_BRIDGE_IOC_SET_LLV6_ADDR;
+		break;
+	case ODU_BRIDGE_IOC_SET_MODE:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return odu_bridge_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_stats;
+static struct dentry *dfile_mode;
+
+static ssize_t odu_debugfs_stats(struct file *file,
+				  char __user *ubuf,
+				  size_t count,
+				  loff_t *ppos)
+{
+	int nbytes = 0;
+
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			    ODU_MAX_MSG_LEN - nbytes,
+			   "UL packets: %lld\n",
+			    odu_bridge_ctx->stats.num_ul_packets);
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			    ODU_MAX_MSG_LEN - nbytes,
+			   "DL packets: %lld\n",
+			    odu_bridge_ctx->stats.num_dl_packets);
+	nbytes += scnprintf(&dbg_buff[nbytes],
+			    ODU_MAX_MSG_LEN - nbytes,
+			    "LAN packets: %lld\n",
+			    odu_bridge_ctx->stats.num_lan_packets);
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t odu_debugfs_hw_bridge_mode_write(struct file *file,
+					const char __user *ubuf,
+					size_t count,
+					loff_t *ppos)
+{
+	unsigned long missing;
+	enum odu_bridge_mode mode;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	if (count > 0)
+		dbg_buff[count-1] = '\0';
+
+	if (strcmp(dbg_buff, "router") == 0) {
+		mode = ODU_BRIDGE_MODE_ROUTER;
+	} else if (strcmp(dbg_buff, "bridge") == 0) {
+		mode = ODU_BRIDGE_MODE_BRIDGE;
+	} else {
+		ODU_BRIDGE_ERR("Bad mode, got %s,\n"
+			 "Use <router> or <bridge>.\n", dbg_buff);
+		return count;
+	}
+
+	odu_bridge_set_mode(mode);
+	return count;
+}
+
+static ssize_t odu_debugfs_hw_bridge_mode_read(struct file *file,
+					     char __user *ubuf,
+					     size_t count,
+					     loff_t *ppos)
+{
+	int nbytes = 0;
+
+	switch (odu_bridge_ctx->mode) {
+	case ODU_BRIDGE_MODE_ROUTER:
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			ODU_MAX_MSG_LEN - nbytes,
+			"router\n");
+		break;
+	case ODU_BRIDGE_MODE_BRIDGE:
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			ODU_MAX_MSG_LEN - nbytes,
+			"bridge\n");
+		break;
+	default:
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			ODU_MAX_MSG_LEN - nbytes,
+			"mode error\n");
+		break;
+
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+const struct file_operations odu_stats_ops = {
+	.read = odu_debugfs_stats,
+};
+
+const struct file_operations odu_hw_bridge_mode_ops = {
+	.read = odu_debugfs_hw_bridge_mode_read,
+	.write = odu_debugfs_hw_bridge_mode_write,
+};
+
+static void odu_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+		S_IWUSR | S_IWGRP | S_IWOTH;
+
+	dent = debugfs_create_dir("odu_ipa_bridge", 0);
+	if (IS_ERR(dent)) {
+		ODU_BRIDGE_ERR("fail to create folder odu_ipa_bridge\n");
+		return;
+	}
+
+	dfile_stats =
+		debugfs_create_file("stats", read_only_mode, dent,
+				    0, &odu_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		ODU_BRIDGE_ERR("fail to create file stats\n");
+		goto fail;
+	}
+
+	dfile_mode =
+		debugfs_create_file("mode", read_write_mode,
+				    dent, 0, &odu_hw_bridge_mode_ops);
+	if (!dfile_mode ||
+	    IS_ERR(dfile_mode)) {
+		ODU_BRIDGE_ERR("fail to create file dfile_mode\n");
+		goto fail;
+	}
+
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void odu_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+
+#else
+static void odu_debugfs_init(void) {}
+static void odu_debugfs_destroy(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+
+static const struct file_operations odu_bridge_drv_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = odu_bridge_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_odu_bridge_ioctl,
+#endif
+};
+
+/**
+ * odu_bridge_tx_dp() - Send skb to ODU bridge
+ * @skb: skb to send
+ * @metadata: metadata on packet
+ *
+ * This function handles uplink packet.
+ * In Router Mode:
+ *	packet is sent directly to IPA.
+ * In Router Mode:
+ *	packet is classified if it should arrive to network stack.
+ *	QMI IP packet should arrive to APPS network stack
+ *	IPv6 Multicast packet should arrive to APPS network stack and Q6
+ *
+ * Return codes: 0- success, error otherwise
+ */
+int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata)
+{
+	struct sk_buff *skb_copied = NULL;
+	struct ipv6hdr *ipv6hdr;
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	switch (odu_bridge_ctx->mode) {
+	case ODU_BRIDGE_MODE_ROUTER:
+		/* Router mode - pass skb to IPA */
+		res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+		if (res) {
+			ODU_BRIDGE_DBG("tx dp failed %d\n", res);
+			goto out;
+		}
+		odu_bridge_ctx->stats.num_ul_packets++;
+		goto out;
+
+	case ODU_BRIDGE_MODE_BRIDGE:
+		ipv6hdr = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+		if (ipv6hdr->version == 6 &&
+		    ODU_BRIDGE_IS_QMI_ADDR(ipv6hdr->daddr)) {
+			ODU_BRIDGE_DBG_LOW("QMI packet\n");
+			skb_copied = skb_clone(skb, GFP_KERNEL);
+			if (!skb_copied) {
+				ODU_BRIDGE_ERR("No memory\n");
+				return -ENOMEM;
+			}
+			odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+						     IPA_RECEIVE,
+						     (unsigned long)skb_copied);
+			odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+						     IPA_WRITE_DONE,
+						     (unsigned long)skb);
+			odu_bridge_ctx->stats.num_ul_packets++;
+			odu_bridge_ctx->stats.num_lan_packets++;
+			res = 0;
+			goto out;
+		}
+
+		if (ipv6hdr->version == 6 &&
+		    ipv6_addr_is_multicast(&ipv6hdr->daddr)) {
+			ODU_BRIDGE_DBG_LOW(
+				"Multicast pkt, send to APPS and IPA\n");
+			skb_copied = skb_clone(skb, GFP_KERNEL);
+			if (!skb_copied) {
+				ODU_BRIDGE_ERR("No memory\n");
+				return -ENOMEM;
+			}
+
+			res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+			if (res) {
+				ODU_BRIDGE_DBG("tx dp failed %d\n", res);
+				dev_kfree_skb(skb_copied);
+				goto out;
+			}
+
+			odu_bridge_ctx->tx_dp_notify(odu_bridge_ctx->priv,
+						     IPA_RECEIVE,
+						     (unsigned long)skb_copied);
+			odu_bridge_ctx->stats.num_ul_packets++;
+			odu_bridge_ctx->stats.num_lan_packets++;
+			goto out;
+		}
+
+		res = ipa_tx_dp(IPA_CLIENT_ODU_PROD, skb, metadata);
+		if (res) {
+			ODU_BRIDGE_DBG("tx dp failed %d\n", res);
+			goto out;
+		}
+		odu_bridge_ctx->stats.num_ul_packets++;
+		goto out;
+
+	default:
+		ODU_BRIDGE_ERR("Unsupported mode: %d\n", odu_bridge_ctx->mode);
+		WARN_ON(1);
+		res = -EFAULT;
+
+	}
+out:
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+}
+EXPORT_SYMBOL(odu_bridge_tx_dp);
+
+static int odu_bridge_add_hdrs(void)
+{
+	struct ipa_ioc_add_hdr *hdrs;
+	struct ipa_hdr_add *ipv4_hdr;
+	struct ipa_hdr_add *ipv6_hdr;
+	struct ethhdr *eth_ipv4;
+	struct ethhdr *eth_ipv6;
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+	hdrs = kzalloc(sizeof(*hdrs) + sizeof(*ipv4_hdr) + sizeof(*ipv6_hdr),
+			GFP_KERNEL);
+	if (!hdrs) {
+		ODU_BRIDGE_ERR("no mem\n");
+		res = -ENOMEM;
+		goto out;
+	}
+	ipv4_hdr = &hdrs->hdr[0];
+	eth_ipv4 = (struct ethhdr *)(ipv4_hdr->hdr);
+	ipv6_hdr = &hdrs->hdr[1];
+	eth_ipv6 = (struct ethhdr *)(ipv6_hdr->hdr);
+	strlcpy(ipv4_hdr->name, ODU_BRIDGE_IPV4_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	memcpy(eth_ipv4->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN);
+	eth_ipv4->h_proto = htons(ETH_P_IP);
+	ipv4_hdr->hdr_len = ETH_HLEN;
+	ipv4_hdr->is_partial = 1;
+	ipv4_hdr->is_eth2_ofst_valid = 1;
+	ipv4_hdr->eth2_ofst = 0;
+	strlcpy(ipv6_hdr->name, ODU_BRIDGE_IPV6_HDR_NAME,
+		IPA_RESOURCE_NAME_MAX);
+	memcpy(eth_ipv6->h_source, odu_bridge_ctx->device_ethaddr, ETH_ALEN);
+	eth_ipv6->h_proto = htons(ETH_P_IPV6);
+	ipv6_hdr->hdr_len = ETH_HLEN;
+	ipv6_hdr->is_partial = 1;
+	ipv6_hdr->is_eth2_ofst_valid = 1;
+	ipv6_hdr->eth2_ofst = 0;
+	hdrs->commit = 1;
+	hdrs->num_hdrs = 2;
+	res = ipa_add_hdr(hdrs);
+	if (res) {
+		ODU_BRIDGE_ERR("Fail on Header-Insertion(%d)\n", res);
+		goto out_free_mem;
+	}
+	if (ipv4_hdr->status) {
+		ODU_BRIDGE_ERR("Fail on Header-Insertion ipv4(%d)\n",
+				ipv4_hdr->status);
+		res = ipv4_hdr->status;
+		goto out_free_mem;
+	}
+	if (ipv6_hdr->status) {
+		ODU_BRIDGE_ERR("Fail on Header-Insertion ipv6(%d)\n",
+				ipv6_hdr->status);
+		res = ipv6_hdr->status;
+		goto out_free_mem;
+	}
+	odu_bridge_ctx->odu_br_ipv4_hdr_hdl = ipv4_hdr->hdr_hdl;
+	odu_bridge_ctx->odu_br_ipv6_hdr_hdl = ipv6_hdr->hdr_hdl;
+
+	res = 0;
+out_free_mem:
+	kfree(hdrs);
+out:
+	ODU_BRIDGE_FUNC_EXIT();
+	return res;
+}
+
+static void odu_bridge_del_hdrs(void)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *ipv4;
+	struct ipa_hdr_del *ipv6;
+	int result;
+
+	del_hdr = kzalloc(sizeof(*del_hdr) + sizeof(*ipv4) +
+			sizeof(*ipv6), GFP_KERNEL);
+	if (!del_hdr)
+		return;
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 2;
+	ipv4 = &del_hdr->hdl[0];
+	ipv4->hdl = odu_bridge_ctx->odu_br_ipv4_hdr_hdl;
+	ipv6 = &del_hdr->hdl[1];
+	ipv6->hdl = odu_bridge_ctx->odu_br_ipv6_hdr_hdl;
+	result = ipa_del_hdr(del_hdr);
+	if (result || ipv4->status || ipv6->status)
+		ODU_BRIDGE_ERR("ipa_del_hdr failed");
+	kfree(del_hdr);
+}
+
+/**
+ * odu_bridge_register_properties() - set Tx/Rx properties for ipacm
+ *
+ * Register the network interface interface with Tx and Rx properties
+ * Tx properties are for data flowing from IPA to adapter, they
+ * have Header-Insertion properties both for Ipv4 and Ipv6 Ethernet framing.
+ * Rx properties are for data flowing from adapter to IPA, they have
+ * simple rule which always "hit".
+ *
+ */
+static int odu_bridge_register_properties(void)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *ipv4_property;
+	struct ipa_ioc_tx_intf_prop *ipv6_property;
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	int res = 0;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	tx_properties.prop = properties;
+	ipv4_property = &tx_properties.prop[0];
+	ipv4_property->ip = IPA_IP_v4;
+	ipv4_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	strlcpy(ipv4_property->hdr_name, ODU_BRIDGE_IPV4_HDR_NAME,
+			IPA_RESOURCE_NAME_MAX);
+	ipv6_property = &tx_properties.prop[1];
+	ipv6_property->ip = IPA_IP_v6;
+	ipv6_property->dst_pipe = IPA_CLIENT_ODU_EMB_CONS;
+	ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	strlcpy(ipv6_property->hdr_name, ODU_BRIDGE_IPV6_HDR_NAME,
+			IPA_RESOURCE_NAME_MAX);
+	tx_properties.num_props = 2;
+
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask = 0;
+	rx_ipv4_property->src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_ipv4_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask = 0;
+	rx_ipv6_property->src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_ipv6_property->hdr_l2_type = IPA_HDR_L2_ETHERNET_II;
+	rx_properties.num_props = 2;
+
+	res = ipa_register_intf(odu_bridge_ctx->netdev_name, &tx_properties,
+		&rx_properties);
+	if (res) {
+		ODU_BRIDGE_ERR("fail on Tx/Rx properties registration %d\n",
+									res);
+	}
+
+	ODU_BRIDGE_FUNC_EXIT();
+
+	return res;
+}
+
+static void odu_bridge_deregister_properties(void)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+	res = ipa_deregister_intf(odu_bridge_ctx->netdev_name);
+	if (res)
+		ODU_BRIDGE_ERR("Fail on Tx prop deregister %d\n", res);
+	ODU_BRIDGE_FUNC_EXIT();
+}
+
+/**
+ * odu_bridge_init() - Initialize the ODU bridge driver
+ * @params: initialization parameters
+ *
+ * This function initialize all bridge internal data and register odu bridge to
+ * kernel for IOCTL and debugfs.
+ * Header addition and properties are registered to IPA driver.
+ *
+ * Return codes: 0: success,
+ *		-EINVAL - Bad parameter
+ *		Other negative value - Failure
+ */
+int odu_bridge_init(struct odu_bridge_params *params)
+{
+	int res;
+
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (!params) {
+		ODU_BRIDGE_ERR("null pointer params\n");
+		return -EINVAL;
+	}
+	if (!params->netdev_name) {
+		ODU_BRIDGE_ERR("null pointer params->netdev_name\n");
+		return -EINVAL;
+	}
+	if (!params->tx_dp_notify) {
+		ODU_BRIDGE_ERR("null pointer params->tx_dp_notify\n");
+		return -EINVAL;
+	}
+	if (!params->send_dl_skb) {
+		ODU_BRIDGE_ERR("null pointer params->send_dl_skb\n");
+		return -EINVAL;
+	}
+	if (odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Already initialized\n");
+		return -EFAULT;
+	}
+	if (!ipa_is_ready()) {
+		ODU_BRIDGE_ERR("IPA is not ready\n");
+		return -EFAULT;
+	}
+
+	ODU_BRIDGE_DBG("device_ethaddr=%pM\n", params->device_ethaddr);
+
+	odu_bridge_ctx = kzalloc(sizeof(*odu_bridge_ctx), GFP_KERNEL);
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("kzalloc err.\n");
+		return -ENOMEM;
+	}
+
+	odu_bridge_ctx->class = class_create(THIS_MODULE, ODU_BRIDGE_DRV_NAME);
+	if (!odu_bridge_ctx->class) {
+		ODU_BRIDGE_ERR("Class_create err.\n");
+		res = -ENODEV;
+		goto fail_class_create;
+	}
+
+	res = alloc_chrdev_region(&odu_bridge_ctx->dev_num, 0, 1,
+				  ODU_BRIDGE_DRV_NAME);
+	if (res) {
+		ODU_BRIDGE_ERR("alloc_chrdev_region err.\n");
+		res = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	odu_bridge_ctx->dev = device_create(odu_bridge_ctx->class, NULL,
+		odu_bridge_ctx->dev_num, odu_bridge_ctx, ODU_BRIDGE_DRV_NAME);
+	if (IS_ERR(odu_bridge_ctx->dev)) {
+		ODU_BRIDGE_ERR(":device_create err.\n");
+		res = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&odu_bridge_ctx->cdev, &odu_bridge_drv_fops);
+	odu_bridge_ctx->cdev.owner = THIS_MODULE;
+	odu_bridge_ctx->cdev.ops = &odu_bridge_drv_fops;
+
+	res = cdev_add(&odu_bridge_ctx->cdev, odu_bridge_ctx->dev_num, 1);
+	if (res) {
+		ODU_BRIDGE_ERR(":cdev_add err=%d\n", -res);
+		res = -ENODEV;
+		goto fail_cdev_add;
+	}
+
+	odu_debugfs_init();
+
+	strlcpy(odu_bridge_ctx->netdev_name, params->netdev_name,
+		IPA_RESOURCE_NAME_MAX);
+	odu_bridge_ctx->priv = params->priv;
+	odu_bridge_ctx->tx_dp_notify = params->tx_dp_notify;
+	odu_bridge_ctx->send_dl_skb = params->send_dl_skb;
+	memcpy(odu_bridge_ctx->device_ethaddr, params->device_ethaddr,
+		ETH_ALEN);
+	odu_bridge_ctx->ipa_sys_desc_size = params->ipa_desc_size;
+	odu_bridge_ctx->mode = ODU_BRIDGE_MODE_ROUTER;
+
+	mutex_init(&odu_bridge_ctx->lock);
+
+	res = odu_bridge_add_hdrs();
+	if (res) {
+		ODU_BRIDGE_ERR("fail on odu_bridge_add_hdr %d\n", res);
+		goto fail_add_hdrs;
+	}
+
+	res = odu_bridge_register_properties();
+	if (res) {
+		ODU_BRIDGE_ERR("fail on register properties %d\n", res);
+		goto fail_register_properties;
+	}
+
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+
+fail_register_properties:
+	odu_bridge_del_hdrs();
+fail_add_hdrs:
+	odu_debugfs_destroy();
+fail_cdev_add:
+	device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(odu_bridge_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	class_destroy(odu_bridge_ctx->class);
+fail_class_create:
+	kfree(odu_bridge_ctx);
+	odu_bridge_ctx = NULL;
+	return res;
+}
+EXPORT_SYMBOL(odu_bridge_init);
+
+/**
+ * odu_bridge_cleanup() - De-Initialize the ODU bridge driver
+ *
+ * Return codes: 0: success,
+ *		-EINVAL - Bad parameter
+ *		Other negative value - Failure
+ */
+int odu_bridge_cleanup(void)
+{
+	ODU_BRIDGE_FUNC_ENTRY();
+
+	if (!odu_bridge_ctx) {
+		ODU_BRIDGE_ERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (odu_bridge_ctx->is_connected) {
+		ODU_BRIDGE_ERR("cannot deinit while bridge is conncetd\n");
+		return -EFAULT;
+	}
+
+	odu_bridge_deregister_properties();
+	odu_bridge_del_hdrs();
+	odu_debugfs_destroy();
+	cdev_del(&odu_bridge_ctx->cdev);
+	device_destroy(odu_bridge_ctx->class, odu_bridge_ctx->dev_num);
+	unregister_chrdev_region(odu_bridge_ctx->dev_num, 1);
+	class_destroy(odu_bridge_ctx->class);
+	ipc_log_context_destroy(odu_bridge_ctx->logbuf);
+	ipc_log_context_destroy(odu_bridge_ctx->logbuf_low);
+	kfree(odu_bridge_ctx);
+	odu_bridge_ctx = NULL;
+
+	ODU_BRIDGE_FUNC_EXIT();
+	return 0;
+}
+EXPORT_SYMBOL(odu_bridge_cleanup);
+
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("ODU bridge driver");
diff --git a/drivers/platform/msm/ipa/ipa_common_i.h b/drivers/platform/msm/ipa/ipa_common_i.h
new file mode 100644
index 0000000..981129e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_common_i.h
@@ -0,0 +1,383 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#ifndef _IPA_COMMON_I_H_
+#define _IPA_COMMON_I_H_
+#include <linux/ipc_logging.h>
+#include <linux/ipa.h>
+#include <linux/ipa_uc_offload.h>
+
+#define __FILENAME__ \
+	(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
+
+#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \
+		log_info.file = __FILENAME__; \
+		log_info.line = __LINE__; \
+		log_info.type = EP; \
+		log_info.id_string = ipa_clients_strings[client]
+
+#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
+		log_info.file = __FILENAME__; \
+		log_info.line = __LINE__; \
+		log_info.type = SIMPLE; \
+		log_info.id_string = __func__
+
+#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \
+		log_info.file = __FILENAME__; \
+		log_info.line = __LINE__; \
+		log_info.type = RESOURCE; \
+		log_info.id_string = resource_name
+
+#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \
+		log_info.file = __FILENAME__; \
+		log_info.line = __LINE__; \
+		log_info.type = SPECIAL; \
+		log_info.id_string = id_str
+
+#define IPA_ACTIVE_CLIENTS_INC_EP(client) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+		ipa_inc_client_enable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
+		ipa_dec_client_disable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+		ipa_inc_client_enable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
+		ipa_dec_client_disable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+		ipa_inc_client_enable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
+		ipa_dec_client_disable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+		ipa_inc_client_enable_clks(&log_info); \
+	} while (0)
+
+#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \
+	do { \
+		struct ipa_active_client_logging_info log_info; \
+		IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
+		ipa_dec_client_disable_clks(&log_info); \
+	} while (0)
+
+#define ipa_assert_on(condition)\
+do {\
+	if (unlikely(condition))\
+		ipa_assert();\
+} while (0)
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+
+#define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000)
+#define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000)
+
+enum ipa_active_client_log_type {
+	EP,
+	SIMPLE,
+	RESOURCE,
+	SPECIAL,
+	INVALID
+};
+
+struct ipa_active_client_logging_info {
+	const char *id_string;
+	char *file;
+	int line;
+	enum ipa_active_client_log_type type;
+};
+
+/**
+ * struct ipa_mem_buffer - IPA memory buffer
+ * @base: base
+ * @phys_base: physical base address
+ * @size: size of memory buffer
+ */
+struct ipa_mem_buffer {
+	void *base;
+	dma_addr_t phys_base;
+	u32 size;
+};
+
+#define IPA_MHI_GSI_ER_START 10
+#define IPA_MHI_GSI_ER_END 16
+
+/**
+ * enum ipa3_mhi_burst_mode - MHI channel burst mode state
+ *
+ * Values are according to MHI specification
+ * @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels,
+ * disabled for SW channels
+ * @IPA_MHI_BURST_MODE_RESERVED:
+ * @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel
+ * @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel
+ *
+ */
+enum ipa3_mhi_burst_mode {
+	IPA_MHI_BURST_MODE_DEFAULT,
+	IPA_MHI_BURST_MODE_RESERVED,
+	IPA_MHI_BURST_MODE_DISABLE,
+	IPA_MHI_BURST_MODE_ENABLE,
+};
+
+/**
+ * enum ipa_hw_mhi_channel_states - MHI channel state machine
+ *
+ * Values are according to MHI specification
+ * @IPA_HW_MHI_CHANNEL_STATE_DISABLE: Channel is disabled and not processed by
+ *	the host or device.
+ * @IPA_HW_MHI_CHANNEL_STATE_ENABLE: A channel is enabled after being
+ *	initialized and configured by host, including its channel context and
+ *	associated transfer ring. While this state, the channel is not active
+ *	and the device does not process transfer.
+ * @IPA_HW_MHI_CHANNEL_STATE_RUN: The device processes transfers and doorbell
+ *	for channels.
+ * @IPA_HW_MHI_CHANNEL_STATE_SUSPEND: Used to halt operations on the channel.
+ *	The device does not process transfers for the channel in this state.
+ *	This state is typically used to synchronize the transition to low power
+ *	modes.
+ * @IPA_HW_MHI_CHANNEL_STATE_STOP: Used to halt operations on the channel.
+ *	The device does not process transfers for the channel in this state.
+ * @IPA_HW_MHI_CHANNEL_STATE_ERROR: The device detected an error in an element
+ *	from the transfer ring associated with the channel.
+ * @IPA_HW_MHI_CHANNEL_STATE_INVALID: Invalid state. Shall not be in use in
+ *	operational scenario.
+ */
+enum ipa_hw_mhi_channel_states {
+	IPA_HW_MHI_CHANNEL_STATE_DISABLE	= 0,
+	IPA_HW_MHI_CHANNEL_STATE_ENABLE		= 1,
+	IPA_HW_MHI_CHANNEL_STATE_RUN		= 2,
+	IPA_HW_MHI_CHANNEL_STATE_SUSPEND	= 3,
+	IPA_HW_MHI_CHANNEL_STATE_STOP		= 4,
+	IPA_HW_MHI_CHANNEL_STATE_ERROR		= 5,
+	IPA_HW_MHI_CHANNEL_STATE_INVALID	= 0xFF
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+ * command. Parameters are sent as 32b immediate parameters.
+ * @isDlUlSyncEnabled: Flag to indicate if DL UL Syncronization is enabled
+ * @UlAccmVal: UL Timer Accumulation value (Period after which device will poll
+ *	for UL data)
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+union IpaHwMhiDlUlSyncCmdData_t {
+	struct IpaHwMhiDlUlSyncCmdParams_t {
+		u32 isDlUlSyncEnabled:8;
+		u32 UlAccmVal:8;
+		u32 ulMsiEventThreshold:8;
+		u32 dlMsiEventThreshold:8;
+	} params;
+	u32 raw32b;
+};
+
+struct ipa_mhi_ch_ctx {
+	u8 chstate;/*0-7*/
+	u8 brstmode:2;/*8-9*/
+	u8 pollcfg:6;/*10-15*/
+	u16 rsvd;/*16-31*/
+	u32 chtype;
+	u32 erindex;
+	u64 rbase;
+	u64 rlen;
+	u64 rp;
+	u64 wp;
+} __packed;
+
+struct ipa_mhi_ev_ctx {
+	u32 intmodc:16;
+	u32 intmodt:16;
+	u32 ertype;
+	u32 msivec;
+	u64 rbase;
+	u64 rlen;
+	u64 rp;
+	u64 wp;
+} __packed;
+
+struct ipa_mhi_init_uc_engine {
+	struct ipa_mhi_msi_info *msi;
+	u32 mmio_addr;
+	u32 host_ctrl_addr;
+	u32 host_data_addr;
+	u32 first_ch_idx;
+	u32 first_er_idx;
+	union IpaHwMhiDlUlSyncCmdData_t *ipa_cached_dl_ul_sync_info;
+};
+
+struct ipa_mhi_init_gsi_engine {
+	u32 first_ch_idx;
+};
+
+struct ipa_mhi_init_engine {
+	struct ipa_mhi_init_uc_engine uC;
+	struct ipa_mhi_init_gsi_engine gsi;
+};
+
+struct start_gsi_channel {
+	enum ipa_hw_mhi_channel_states state;
+	struct ipa_mhi_msi_info *msi;
+	struct ipa_mhi_ev_ctx *ev_ctx_host;
+	u64 event_context_addr;
+	struct ipa_mhi_ch_ctx *ch_ctx_host;
+	u64 channel_context_addr;
+	void (*ch_err_cb)(struct gsi_chan_err_notify *notify);
+	void (*ev_err_cb)(struct gsi_evt_err_notify *notify);
+	void *channel;
+	bool assert_bit40;
+	struct gsi_mhi_channel_scratch *mhi;
+	unsigned long *cached_gsi_evt_ring_hdl;
+	uint8_t evchid;
+};
+
+struct start_uc_channel {
+	enum ipa_hw_mhi_channel_states state;
+	u8 index;
+	u8 id;
+};
+
+struct start_mhi_channel {
+	struct start_uc_channel uC;
+	struct start_gsi_channel gsi;
+};
+
+struct ipa_mhi_connect_params_internal {
+	struct ipa_sys_connect_params *sys;
+	u8 channel_id;
+	struct start_mhi_channel start;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa_hdr_offset_entry {
+	struct list_head link;
+	u32 offset;
+	u32 bin;
+};
+
+extern const char *ipa_clients_strings[];
+
+#define IPA_IPC_LOGGING(buf, fmt, args...) \
+	do { \
+		if (buf) \
+			ipc_log_string((buf), fmt, __func__, __LINE__, \
+				## args); \
+	} while (0)
+
+void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+int ipa_inc_client_enable_clks_no_block(
+	struct ipa_active_client_logging_info *id);
+int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource);
+int ipa_resume_resource(enum ipa_rm_resource_name name);
+int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource);
+int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+	u32 bandwidth_mbps);
+void *ipa_get_ipc_logbuf(void);
+void *ipa_get_ipc_logbuf_low(void);
+void ipa_assert(void);
+
+/* MHI */
+int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params);
+int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl);
+int ipa_disconnect_mhi_pipe(u32 clnt_hdl);
+bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client);
+int ipa_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+int ipa_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+int ipa_generate_tag_process(void);
+int ipa_disable_sps_pipe(enum ipa_client_type client);
+int ipa_mhi_reset_channel_internal(enum ipa_client_type client);
+int ipa_mhi_start_channel_internal(enum ipa_client_type client);
+bool ipa_mhi_sps_channel_empty(enum ipa_client_type client);
+int ipa_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index);
+int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info);
+int ipa_mhi_destroy_channel(enum ipa_client_type client);
+int ipa_mhi_is_using_dma(bool *flag);
+const char *ipa_mhi_get_state_str(int state);
+
+/* MHI uC */
+int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa_uc_mhi_init
+	(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa_uc_mhi_cleanup(void);
+int ipa_uc_mhi_reset_channel(int channelHandle);
+int ipa_uc_mhi_suspend_channel(int channelHandle);
+int ipa_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa_uc_mhi_print_stats(char *dbg_buff, int size);
+
+/* uC */
+int ipa_uc_state_check(void);
+
+/* general */
+void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb);
+void ipa_set_tag_process_before_gating(bool val);
+bool ipa_has_open_aggr_frame(enum ipa_client_type client);
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+	ipa_notify_cb notify, void *priv, u8 hdr_len,
+	struct ipa_ntn_conn_out_params *outp);
+
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+u8 *ipa_write_64(u64 w, u8 *dest);
+u8 *ipa_write_32(u32 w, u8 *dest);
+u8 *ipa_write_16(u16 hw, u8 *dest);
+u8 *ipa_write_8(u8 b, u8 *dest);
+u8 *ipa_pad_to_64(u8 *dest);
+u8 *ipa_pad_to_32(u8 *dest);
+const char *ipa_get_version_string(enum ipa_hw_type ver);
+
+#endif /* _IPA_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
new file mode 100644
index 0000000..1431dcf
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -0,0 +1,1198 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+#include "ipa_common_i.h"
+
+static const char *resource_name_to_str[IPA_RM_RESOURCE_MAX] = {
+	__stringify(IPA_RM_RESOURCE_Q6_PROD),
+	__stringify(IPA_RM_RESOURCE_USB_PROD),
+	__stringify(IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD),
+	__stringify(IPA_RM_RESOURCE_HSIC_PROD),
+	__stringify(IPA_RM_RESOURCE_STD_ECM_PROD),
+	__stringify(IPA_RM_RESOURCE_RNDIS_PROD),
+	__stringify(IPA_RM_RESOURCE_WWAN_0_PROD),
+	__stringify(IPA_RM_RESOURCE_WLAN_PROD),
+	__stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD),
+	__stringify(IPA_RM_RESOURCE_MHI_PROD),
+	__stringify(IPA_RM_RESOURCE_Q6_CONS),
+	__stringify(IPA_RM_RESOURCE_USB_CONS),
+	__stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
+	__stringify(IPA_RM_RESOURCE_HSIC_CONS),
+	__stringify(IPA_RM_RESOURCE_WLAN_CONS),
+	__stringify(IPA_RM_RESOURCE_APPS_CONS),
+	__stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS),
+	__stringify(IPA_RM_RESOURCE_MHI_CONS),
+};
+
+struct ipa_rm_profile_vote_type {
+	enum ipa_voltage_level volt[IPA_RM_RESOURCE_MAX];
+	enum ipa_voltage_level curr_volt;
+	u32 bw_prods[IPA_RM_RESOURCE_PROD_MAX];
+	u32 bw_cons[IPA_RM_RESOURCE_CONS_MAX];
+	u32 curr_bw;
+};
+
+struct ipa_rm_context_type {
+	struct ipa_rm_dep_graph *dep_graph;
+	struct workqueue_struct *ipa_rm_wq;
+	spinlock_t ipa_rm_lock;
+	struct ipa_rm_profile_vote_type prof_vote;
+};
+static struct ipa_rm_context_type *ipa_rm_ctx;
+
+struct ipa_rm_notify_ipa_work_type {
+	struct work_struct		work;
+	enum ipa_voltage_level		volt;
+	u32				bandwidth_mbps;
+};
+
+/**
+ * ipa_rm_create_resource() - create resource
+ * @create_params: [in] parameters needed
+ *                  for resource initialization
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * This function is called by IPA RM client to initialize client's resources.
+ * This API should be called before any other IPA RM API on a given resource
+ * name.
+ *
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params)
+{
+	struct ipa_rm_resource *resource;
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!create_params) {
+		IPA_RM_ERR("invalid args\n");
+		return -EINVAL;
+	}
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(create_params->name));
+
+	if (create_params->floor_voltage < 0 ||
+		create_params->floor_voltage >= IPA_VOLTAGE_MAX) {
+		IPA_RM_ERR("invalid voltage %d\n",
+			create_params->floor_voltage);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					  create_params->name,
+					  &resource) == 0) {
+		IPA_RM_ERR("resource already exists\n");
+		result = -EEXIST;
+		goto bail;
+	}
+	result = ipa_rm_resource_create(create_params,
+			&resource);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_resource_create() failed\n");
+		goto bail;
+	}
+	result = ipa_rm_dep_graph_add(ipa_rm_ctx->dep_graph, resource);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_dep_graph_add() failed\n");
+		ipa_rm_resource_delete(resource);
+		goto bail;
+	}
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_create_resource);
+
+/**
+ * ipa_rm_delete_resource() - delete resource
+ * @resource_name: name of resource to be deleted
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * This function is called by IPA RM client to delete client's resources.
+ *
+ */
+int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name)
+{
+	struct ipa_rm_resource *resource;
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					resource_name,
+						&resource) != 0) {
+		IPA_RM_ERR("resource does not exist\n");
+		result = -EINVAL;
+		goto bail;
+	}
+	result = ipa_rm_resource_delete(resource);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_resource_delete() failed\n");
+		goto bail;
+	}
+	result = ipa_rm_dep_graph_remove(ipa_rm_ctx->dep_graph,
+								resource_name);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_dep_graph_remove() failed\n");
+		goto bail;
+	}
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_delete_resource);
+
+static int _ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name,
+			bool userspace_dep)
+{
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name),
+				 ipa_rm_resource_str(depends_on_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	result = ipa_rm_dep_graph_add_dependency(
+						ipa_rm_ctx->dep_graph,
+						resource_name,
+						depends_on_name,
+						userspace_dep);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_add_dependency() - create dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_add_dependency(resource_name, depends_on_name, false);
+}
+EXPORT_SYMBOL(ipa_rm_add_dependency);
+
+/**
+ * ipa_rm_add_dependency_from_ioctl() - create dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * This function is expected to be called from IOCTL and the dependency will be
+ * marked as is was added by the userspace.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_add_dependency(resource_name, depends_on_name, true);
+}
+
+static int _ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name,
+		bool userspsace_dep)
+{
+	int result;
+	struct ipa_rm_resource *consumer;
+	unsigned long time;
+	unsigned long flags;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name),
+				 ipa_rm_resource_str(depends_on_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	result = ipa_rm_dep_graph_add_dependency(
+						ipa_rm_ctx->dep_graph,
+						resource_name,
+						depends_on_name,
+						userspsace_dep);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (result == -EINPROGRESS) {
+		ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+				depends_on_name,
+				&consumer);
+		IPA_RM_DBG("%s waits for GRANT of %s.\n",
+				ipa_rm_resource_str(resource_name),
+				ipa_rm_resource_str(depends_on_name));
+		time = wait_for_completion_timeout(
+				&((struct ipa_rm_resource_cons *)consumer)->
+				request_consumer_in_progress,
+				HZ * 5);
+		result = 0;
+		if (!time) {
+			IPA_RM_ERR("TIMEOUT waiting for %s GRANT event.",
+					ipa_rm_resource_str(depends_on_name));
+			result = -ETIME;
+		} else {
+			IPA_RM_DBG("%s waited for %s GRANT %lu time.\n",
+				ipa_rm_resource_str(resource_name),
+				ipa_rm_resource_str(depends_on_name),
+				time);
+		}
+	}
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+/**
+ * ipa_rm_add_dependency_sync() - Create a dependency between 2 resources
+ * in a synchronized fashion. In case a producer resource is in GRANTED state
+ * and the newly added consumer resource is in RELEASED state, the consumer
+ * entity will be requested and the function will block until the consumer
+ * is granted.
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * This function is expected to be called from IOCTL and the dependency will be
+ * marked as is was added by the userspace.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: May block. See documentation above.
+ */
+int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_add_dependency_sync(resource_name, depends_on_name,
+		false);
+}
+EXPORT_SYMBOL(ipa_rm_add_dependency_sync);
+
+/**
+ * ipa_rm_add_dependency_sync_from_ioctl() - Create a dependency between 2
+ * resources in a synchronized fashion. In case a producer resource is in
+ * GRANTED state and the newly added consumer resource is in RELEASED state,
+ * the consumer entity will be requested and the function will block until
+ * the consumer is granted.
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: May block. See documentation above.
+ */
+int ipa_rm_add_dependency_sync_from_ioctl(
+	enum ipa_rm_resource_name resource_name,
+	enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_add_dependency_sync(resource_name, depends_on_name,
+		true);
+}
+
+static int _ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name,
+			bool userspace_dep)
+{
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s -> %s\n", ipa_rm_resource_str(resource_name),
+				 ipa_rm_resource_str(depends_on_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	result = ipa_rm_dep_graph_delete_dependency(
+			  ipa_rm_ctx->dep_graph,
+			  resource_name,
+			  depends_on_name,
+			  userspace_dep);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_delete_dependency() - delete dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_delete_dependency(resource_name, depends_on_name, false);
+}
+EXPORT_SYMBOL(ipa_rm_delete_dependency);
+
+/**
+ * ipa_rm_delete_dependency_fron_ioctl() - delete dependency between 2 resources
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ *
+ * This function is expected to be called from IOCTL and the dependency will be
+ * marked as is was added by the userspace.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Side effects: IPA_RM_RESORCE_GRANTED could be generated
+ * in case client registered with IPA RM
+ */
+int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name)
+{
+	return _ipa_rm_delete_dependency(resource_name, depends_on_name, true);
+}
+
+/**
+ * ipa_rm_request_resource() - request resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_GRANTED
+ * on successful completion of this operation.
+ */
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name)
+{
+	struct ipa_rm_resource *resource;
+	unsigned long flags;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		IPA_RM_ERR("can be called on PROD only\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+			resource_name,
+			&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_request(
+			(struct ipa_rm_resource_prod *)resource);
+
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_request_resource);
+
+void delayed_release_work_func(struct work_struct *work)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_delayed_release_work_type *rwork = container_of(
+			to_delayed_work(work),
+			struct ipa_rm_delayed_release_work_type,
+			work);
+
+	if (!IPA_RM_RESORCE_IS_CONS(rwork->resource_name)) {
+		IPA_RM_ERR("can be called on CONS only\n");
+		kfree(rwork);
+		return;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					rwork->resource_name,
+					&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		goto bail;
+	}
+
+	ipa_rm_resource_consumer_release(
+		(struct ipa_rm_resource_cons *)resource, rwork->needed_bw,
+		rwork->dec_usage_count);
+
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	kfree(rwork);
+
+}
+
+/**
+ * ipa_rm_request_resource_with_timer() - requests the specified consumer
+ * resource and releases it after 1 second
+ * @resource_name: name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_delayed_release_work_type *release_work;
+	int result;
+
+	if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
+		IPA_RM_ERR("can be called on CONS only\n");
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+			resource_name,
+			&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_consumer_request(
+		(struct ipa_rm_resource_cons *)resource, 0, false, true);
+	if (result != 0 && result != -EINPROGRESS) {
+		IPA_RM_ERR("consumer request returned error %d\n", result);
+		result = -EPERM;
+		goto bail;
+	}
+
+	release_work = kzalloc(sizeof(*release_work), GFP_ATOMIC);
+	if (!release_work) {
+		result = -ENOMEM;
+		goto bail;
+	}
+	release_work->resource_name = resource->name;
+	release_work->needed_bw = 0;
+	release_work->dec_usage_count = false;
+	INIT_DELAYED_WORK(&release_work->work, delayed_release_work_func);
+	schedule_delayed_work(&release_work->work,
+			msecs_to_jiffies(IPA_RM_RELEASE_DELAY_IN_MSEC));
+	result = 0;
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	return result;
+}
+
+/**
+ * ipa_rm_release_resource() - release resource
+ * @resource_name: [in] name of the requested resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * All registered callbacks are called with IPA_RM_RESOURCE_RELEASED
+ * on successful completion of this operation.
+ */
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		IPA_RM_ERR("can be called on PROD only\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					  resource_name,
+					  &resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_release(
+		    (struct ipa_rm_resource_prod *)resource);
+
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_release_resource);
+
+/**
+ * ipa_rm_register() - register for event
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided later in  ipa_rm_deregister() call.
+ */
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	int result;
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		IPA_RM_ERR("can be called on PROD only\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+				resource_name,
+				&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_register(
+			(struct ipa_rm_resource_prod *)resource,
+			reg_params,
+			true);
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_register);
+
+/**
+ * ipa_rm_deregister() - cancel the registration
+ * @resource_name: resource name
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Registration parameters provided here should be the same
+ * as provided in  ipa_rm_register() call.
+ */
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	int result;
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+
+	if (!IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		IPA_RM_ERR("can be called on PROD only\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+			resource_name,
+			&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_producer_deregister(
+			(struct ipa_rm_resource_prod *)resource,
+			reg_params);
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_deregister);
+
+/**
+ * ipa_rm_set_perf_profile() - set performance profile
+ * @resource_name: resource name
+ * @profile: [in] profile information.
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Set resource performance profile.
+ * Updates IPA driver if performance level changed.
+ */
+int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_perf_profile *profile)
+{
+	int result;
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("%s\n", ipa_rm_resource_str(resource_name));
+	if (profile)
+		IPA_RM_DBG("BW: %d\n", profile->max_supported_bandwidth_mbps);
+
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+				resource_name,
+				&resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		result = -EPERM;
+		goto bail;
+	}
+	result = ipa_rm_resource_set_perf_profile(resource, profile);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_resource_set_perf_profile failed %d\n",
+			result);
+		goto bail;
+	}
+
+	result = 0;
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_set_perf_profile);
+
+/**
+ * ipa_rm_notify_completion() -
+ *	consumer driver notification for
+ *	request_resource / release_resource operations
+ *	completion
+ * @event: notified event
+ * @resource_name: resource name
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+		enum ipa_rm_resource_name resource_name)
+{
+	int result;
+
+	if (unlikely(!ipa_rm_ctx)) {
+		IPA_RM_ERR("IPA RM was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("event %d on %s\n", event,
+				ipa_rm_resource_str(resource_name));
+	if (!IPA_RM_RESORCE_IS_CONS(resource_name)) {
+		IPA_RM_ERR("can be called on CONS only\n");
+		result = -EINVAL;
+		goto bail;
+	}
+	ipa_rm_wq_send_cmd(IPA_RM_WQ_RESOURCE_CB,
+			resource_name,
+			event,
+			false);
+	result = 0;
+bail:
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+EXPORT_SYMBOL(ipa_rm_notify_completion);
+
+static void ipa_rm_wq_handler(struct work_struct *work)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_wq_work_type *ipa_rm_work =
+			container_of(work,
+					struct ipa_rm_wq_work_type,
+					work);
+	IPA_RM_DBG_LOW("%s cmd=%d event=%d notify_registered_only=%d\n",
+		ipa_rm_resource_str(ipa_rm_work->resource_name),
+		ipa_rm_work->wq_cmd,
+		ipa_rm_work->event,
+		ipa_rm_work->notify_registered_only);
+	switch (ipa_rm_work->wq_cmd) {
+	case IPA_RM_WQ_NOTIFY_PROD:
+		if (!IPA_RM_RESORCE_IS_PROD(ipa_rm_work->resource_name)) {
+			IPA_RM_ERR("resource is not PROD\n");
+			goto free_work;
+		}
+		spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+		if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+						ipa_rm_work->resource_name,
+						&resource) != 0){
+			IPA_RM_ERR("resource does not exists\n");
+			spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+			goto free_work;
+		}
+		ipa_rm_resource_producer_notify_clients(
+				(struct ipa_rm_resource_prod *)resource,
+				ipa_rm_work->event,
+				ipa_rm_work->notify_registered_only);
+		spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+		break;
+	case IPA_RM_WQ_NOTIFY_CONS:
+		break;
+	case IPA_RM_WQ_RESOURCE_CB:
+		spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+		if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+						ipa_rm_work->resource_name,
+						&resource) != 0){
+			IPA_RM_ERR("resource does not exists\n");
+			spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+			goto free_work;
+		}
+		ipa_rm_resource_consumer_handle_cb(
+				(struct ipa_rm_resource_cons *)resource,
+				ipa_rm_work->event);
+		spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+		break;
+	default:
+		break;
+	}
+
+free_work:
+	kfree((void *) work);
+}
+
+static void ipa_rm_wq_resume_handler(struct work_struct *work)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work =
+			container_of(work,
+			struct ipa_rm_wq_suspend_resume_work_type,
+			work);
+		IPA_RM_DBG_LOW("resume work handler: %s",
+		ipa_rm_resource_str(ipa_rm_work->resource_name));
+
+	if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) {
+		IPA_RM_ERR("resource is not CONS\n");
+		return;
+	}
+	IPA_ACTIVE_CLIENTS_INC_RESOURCE(ipa_rm_resource_str(
+			ipa_rm_work->resource_name));
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					ipa_rm_work->resource_name,
+					&resource) != 0){
+		IPA_RM_ERR("resource does not exists\n");
+		spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+		IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(
+				ipa_rm_work->resource_name));
+		goto bail;
+	}
+	ipa_rm_resource_consumer_request_work(
+			(struct ipa_rm_resource_cons *)resource,
+			ipa_rm_work->prev_state, ipa_rm_work->needed_bw, true,
+			ipa_rm_work->inc_usage_count);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+bail:
+	kfree(ipa_rm_work);
+}
+
+
+static void ipa_rm_wq_suspend_handler(struct work_struct *work)
+{
+	unsigned long flags;
+	struct ipa_rm_resource *resource;
+	struct ipa_rm_wq_suspend_resume_work_type *ipa_rm_work =
+			container_of(work,
+			struct ipa_rm_wq_suspend_resume_work_type,
+			work);
+		IPA_RM_DBG_LOW("suspend work handler: %s",
+		ipa_rm_resource_str(ipa_rm_work->resource_name));
+
+	if (!IPA_RM_RESORCE_IS_CONS(ipa_rm_work->resource_name)) {
+		IPA_RM_ERR("resource is not CONS\n");
+		return;
+	}
+	ipa_suspend_resource_sync(ipa_rm_work->resource_name);
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					ipa_rm_work->resource_name,
+					&resource) != 0){
+		IPA_RM_ERR("resource does not exists\n");
+		spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+		return;
+	}
+	ipa_rm_resource_consumer_release_work(
+			(struct ipa_rm_resource_cons *)resource,
+			ipa_rm_work->prev_state,
+			true);
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	kfree(ipa_rm_work);
+}
+
+/**
+ * ipa_rm_wq_send_cmd() - send a command for deferred work
+ * @wq_cmd: command that should be executed
+ * @resource_name: resource on which command should be executed
+ * @notify_registered_only: notify only clients registered by
+ *	ipa_rm_register()
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_event event,
+		bool notify_registered_only)
+{
+	int result = -ENOMEM;
+	struct ipa_rm_wq_work_type *work = kzalloc(sizeof(*work), GFP_ATOMIC);
+
+	if (work) {
+		INIT_WORK((struct work_struct *)work, ipa_rm_wq_handler);
+		work->wq_cmd = wq_cmd;
+		work->resource_name = resource_name;
+		work->event = event;
+		work->notify_registered_only = notify_registered_only;
+		result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+				(struct work_struct *)work);
+	} else {
+		IPA_RM_ERR("no mem\n");
+	}
+
+	return result;
+}
+
+int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw)
+{
+	int result = -ENOMEM;
+	struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work),
+			GFP_ATOMIC);
+	if (work) {
+		INIT_WORK((struct work_struct *)work,
+				ipa_rm_wq_suspend_handler);
+		work->resource_name = resource_name;
+		work->prev_state = prev_state;
+		work->needed_bw = needed_bw;
+		result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+				(struct work_struct *)work);
+	} else {
+		IPA_RM_ERR("no mem\n");
+	}
+
+	return result;
+}
+
+int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw,
+		bool inc_usage_count)
+{
+	int result = -ENOMEM;
+	struct ipa_rm_wq_suspend_resume_work_type *work = kzalloc(sizeof(*work),
+			GFP_ATOMIC);
+	if (work) {
+		INIT_WORK((struct work_struct *)work, ipa_rm_wq_resume_handler);
+		work->resource_name = resource_name;
+		work->prev_state = prev_state;
+		work->needed_bw = needed_bw;
+		work->inc_usage_count = inc_usage_count;
+		result = queue_work(ipa_rm_ctx->ipa_rm_wq,
+				(struct work_struct *)work);
+	} else {
+		IPA_RM_ERR("no mem\n");
+	}
+
+	return result;
+}
+/**
+ * ipa_rm_initialize() - initialize IPA RM component
+ *
+ * Returns: 0 on success, negative otherwise
+ */
+int ipa_rm_initialize(void)
+{
+	int result;
+
+	ipa_rm_ctx = kzalloc(sizeof(*ipa_rm_ctx), GFP_KERNEL);
+	if (!ipa_rm_ctx) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+	ipa_rm_ctx->ipa_rm_wq = create_singlethread_workqueue("ipa_rm_wq");
+	if (!ipa_rm_ctx->ipa_rm_wq) {
+		IPA_RM_ERR("create workqueue failed\n");
+		result = -ENOMEM;
+		goto create_wq_fail;
+	}
+	result = ipa_rm_dep_graph_create(&(ipa_rm_ctx->dep_graph));
+	if (result) {
+		IPA_RM_ERR("create dependency graph failed\n");
+		goto graph_alloc_fail;
+	}
+	spin_lock_init(&ipa_rm_ctx->ipa_rm_lock);
+	IPA_RM_DBG("SUCCESS\n");
+
+	return 0;
+graph_alloc_fail:
+	destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+create_wq_fail:
+	kfree(ipa_rm_ctx);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_stat() - print RM stat
+ * @buf: [in] The user buff used to print
+ * @size: [in] The size of buf
+ * Returns: number of bytes used on success, negative on failure
+ *
+ * This function is called by ipa_debugfs in order to receive
+ * a full picture of the current state of the RM
+ */
+
+int ipa_rm_stat(char *buf, int size)
+{
+	unsigned long flags;
+	int i, cnt = 0, result = EINVAL;
+	struct ipa_rm_resource *resource = NULL;
+	u32 sum_bw_prod = 0;
+	u32 sum_bw_cons = 0;
+
+	if (!buf || size < 0)
+		return result;
+
+	spin_lock_irqsave(&ipa_rm_ctx->ipa_rm_lock, flags);
+	for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; ++i) {
+		result = ipa_rm_dep_graph_get_resource(
+				ipa_rm_ctx->dep_graph,
+				i,
+				&resource);
+		if (!result) {
+			result = ipa_rm_resource_producer_print_stat(
+							resource, buf + cnt,
+							size-cnt);
+			if (result < 0)
+				goto bail;
+			cnt += result;
+		}
+	}
+
+	for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++)
+		sum_bw_prod += ipa_rm_ctx->prof_vote.bw_prods[i];
+
+	for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++)
+		sum_bw_cons += ipa_rm_ctx->prof_vote.bw_cons[i];
+
+	result = scnprintf(buf + cnt, size - cnt,
+		"All prod bandwidth: %d, All cons bandwidth: %d\n",
+		sum_bw_prod, sum_bw_cons);
+	cnt += result;
+
+	result = scnprintf(buf + cnt, size - cnt,
+		"Voting: voltage %d, bandwidth %d\n",
+		ipa_rm_ctx->prof_vote.curr_volt,
+		ipa_rm_ctx->prof_vote.curr_bw);
+	cnt += result;
+
+	result = cnt;
+bail:
+	spin_unlock_irqrestore(&ipa_rm_ctx->ipa_rm_lock, flags);
+
+	return result;
+}
+
+/**
+ * ipa_rm_resource_str() - returns string that represent the resource
+ * @resource_name: [in] resource name
+ */
+const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name)
+{
+	if (resource_name < 0 || resource_name >= IPA_RM_RESOURCE_MAX)
+		return "INVALID RESOURCE";
+
+	return resource_name_to_str[resource_name];
+};
+
+static void ipa_rm_perf_profile_notify_to_ipa_work(struct work_struct *work)
+{
+	struct ipa_rm_notify_ipa_work_type *notify_work = container_of(work,
+				struct ipa_rm_notify_ipa_work_type,
+				work);
+	int res;
+
+	IPA_RM_DBG_LOW("calling to IPA driver. voltage %d bandwidth %d\n",
+		notify_work->volt, notify_work->bandwidth_mbps);
+
+	res = ipa_set_required_perf_profile(notify_work->volt,
+		notify_work->bandwidth_mbps);
+	if (res) {
+		IPA_RM_ERR("ipa_set_required_perf_profile failed %d\n", res);
+		goto bail;
+	}
+
+	IPA_RM_DBG_LOW("IPA driver notified\n");
+bail:
+	kfree(notify_work);
+}
+
+static void ipa_rm_perf_profile_notify_to_ipa(enum ipa_voltage_level volt,
+					      u32 bandwidth)
+{
+	struct ipa_rm_notify_ipa_work_type *work;
+
+	work = kzalloc(sizeof(*work), GFP_ATOMIC);
+	if (!work) {
+		IPA_RM_ERR("no mem\n");
+		return;
+	}
+
+	INIT_WORK(&work->work, ipa_rm_perf_profile_notify_to_ipa_work);
+	work->volt = volt;
+	work->bandwidth_mbps = bandwidth;
+	queue_work(ipa_rm_ctx->ipa_rm_wq, &work->work);
+}
+
+/**
+ * ipa_rm_perf_profile_change() - change performance profile vote for resource
+ * @resource_name: [in] resource name
+ *
+ * change bandwidth and voltage vote based on resource state.
+ */
+void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name)
+{
+	enum ipa_voltage_level old_volt;
+	u32 *bw_ptr;
+	u32 old_bw;
+	struct ipa_rm_resource *resource;
+	int i;
+	u32 sum_bw_prod = 0;
+	u32 sum_bw_cons = 0;
+
+	IPA_RM_DBG_LOW("%s\n", ipa_rm_resource_str(resource_name));
+
+	if (ipa_rm_dep_graph_get_resource(ipa_rm_ctx->dep_graph,
+					  resource_name,
+					  &resource) != 0) {
+		IPA_RM_ERR("resource does not exists\n");
+		WARN_ON(1);
+		return;
+	}
+
+	old_volt = ipa_rm_ctx->prof_vote.curr_volt;
+	old_bw = ipa_rm_ctx->prof_vote.curr_bw;
+
+	if (IPA_RM_RESORCE_IS_PROD(resource_name)) {
+		bw_ptr = &ipa_rm_ctx->prof_vote.bw_prods[resource_name];
+	} else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
+		bw_ptr = &ipa_rm_ctx->prof_vote.bw_cons[
+				resource_name - IPA_RM_RESOURCE_PROD_MAX];
+	} else {
+		IPA_RM_ERR("Invalid resource_name\n");
+		return;
+	}
+
+	switch (resource->state) {
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		IPA_RM_DBG_LOW("max_bw = %d, needed_bw = %d\n",
+			resource->max_bw, resource->needed_bw);
+		*bw_ptr = min(resource->max_bw, resource->needed_bw);
+		ipa_rm_ctx->prof_vote.volt[resource_name] =
+						resource->floor_voltage;
+		break;
+
+	case IPA_RM_RELEASE_IN_PROGRESS:
+	case IPA_RM_RELEASED:
+		*bw_ptr = 0;
+		ipa_rm_ctx->prof_vote.volt[resource_name] = 0;
+		break;
+
+	default:
+		IPA_RM_ERR("unknown state %d\n", resource->state);
+		WARN_ON(1);
+	return;
+	}
+	IPA_RM_DBG_LOW("resource bandwidth: %d voltage: %d\n", *bw_ptr,
+					resource->floor_voltage);
+
+	ipa_rm_ctx->prof_vote.curr_volt = IPA_VOLTAGE_UNSPECIFIED;
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+		if (ipa_rm_ctx->prof_vote.volt[i] >
+				ipa_rm_ctx->prof_vote.curr_volt) {
+			ipa_rm_ctx->prof_vote.curr_volt =
+				ipa_rm_ctx->prof_vote.volt[i];
+		}
+	}
+
+	for (i = 0; i < IPA_RM_RESOURCE_PROD_MAX; i++)
+		sum_bw_prod += ipa_rm_ctx->prof_vote.bw_prods[i];
+
+	for (i = 0; i < IPA_RM_RESOURCE_CONS_MAX; i++)
+		sum_bw_cons += ipa_rm_ctx->prof_vote.bw_cons[i];
+
+	IPA_RM_DBG_LOW("all prod bandwidth: %d all cons bandwidth: %d\n",
+		sum_bw_prod, sum_bw_cons);
+	ipa_rm_ctx->prof_vote.curr_bw = min(sum_bw_prod, sum_bw_cons);
+
+	if (ipa_rm_ctx->prof_vote.curr_volt == old_volt &&
+		ipa_rm_ctx->prof_vote.curr_bw == old_bw) {
+		IPA_RM_DBG_LOW("same voting\n");
+		return;
+	}
+
+	IPA_RM_DBG_LOW("new voting: voltage %d bandwidth %d\n",
+		ipa_rm_ctx->prof_vote.curr_volt,
+		ipa_rm_ctx->prof_vote.curr_bw);
+
+	ipa_rm_perf_profile_notify_to_ipa(ipa_rm_ctx->prof_vote.curr_volt,
+			ipa_rm_ctx->prof_vote.curr_bw);
+
+	return;
+};
+/**
+ * ipa_rm_exit() - free all IPA RM resources
+ */
+void ipa_rm_exit(void)
+{
+	IPA_RM_DBG("ENTER\n");
+	ipa_rm_dep_graph_delete(ipa_rm_ctx->dep_graph);
+	destroy_workqueue(ipa_rm_ctx->ipa_rm_wq);
+	kfree(ipa_rm_ctx);
+	ipa_rm_ctx = NULL;
+	IPA_RM_DBG("EXIT\n");
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
new file mode 100644
index 0000000..54cad88
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.c
@@ -0,0 +1,251 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_dependency_graph.h"
+#include "ipa_rm_i.h"
+
+static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name)
+{
+	int resource_index = IPA_RM_INDEX_INVALID;
+
+	if (IPA_RM_RESORCE_IS_PROD(resource_name))
+		resource_index = ipa_rm_prod_index(resource_name);
+	else if (IPA_RM_RESORCE_IS_CONS(resource_name))
+		resource_index = ipa_rm_cons_index(resource_name);
+
+	return resource_index;
+}
+
+/**
+ * ipa_rm_dep_graph_create() - creates graph
+ * @dep_graph: [out] created dependency graph
+ *
+ * Returns: dependency graph on success, NULL on failure
+ */
+int  ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph)
+{
+	int result = 0;
+
+	*dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL);
+	if (!*dep_graph) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete() - destroyes the graph
+ * @graph: [in] dependency graph
+ *
+ * Frees all resources.
+ */
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph)
+{
+	int resource_index;
+
+	if (!graph) {
+		IPA_RM_ERR("invalid params\n");
+		return;
+	}
+	for (resource_index = 0;
+			resource_index < IPA_RM_RESOURCE_MAX;
+			resource_index++)
+		kfree(graph->resource_table[resource_index]);
+	memset(graph->resource_table, 0, sizeof(graph->resource_table));
+}
+
+/**
+ * ipa_rm_dep_graph_get_resource() - provides a resource by name
+ * @graph: [in] dependency graph
+ * @name: [in] name of the resource
+ * @resource: [out] resource in case of success
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_get_resource(
+				struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				struct ipa_rm_resource **resource)
+{
+	int result;
+	int resource_index;
+
+	if (!graph) {
+		result = -EINVAL;
+		goto bail;
+	}
+	resource_index = ipa_rm_dep_get_index(resource_name);
+	if (resource_index == IPA_RM_INDEX_INVALID) {
+		result = -EINVAL;
+		goto bail;
+	}
+	*resource = graph->resource_table[resource_index];
+	if (!*resource) {
+		result = -EINVAL;
+		goto bail;
+	}
+	result = 0;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_add() - adds resource to graph
+ * @graph: [in] dependency graph
+ * @resource: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+			 struct ipa_rm_resource *resource)
+{
+	int result = 0;
+	int resource_index;
+
+	if (!graph || !resource) {
+		result = -EINVAL;
+		goto bail;
+	}
+	resource_index = ipa_rm_dep_get_index(resource->name);
+	if (resource_index == IPA_RM_INDEX_INVALID) {
+		result = -EINVAL;
+		goto bail;
+	}
+	graph->resource_table[resource_index] = resource;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_remove() - removes resource from graph
+ * @graph: [in] dependency graph
+ * @resource: [in] resource to add
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph,
+		enum ipa_rm_resource_name resource_name)
+{
+	if (!graph)
+		return -EINVAL;
+	graph->resource_table[resource_name] = NULL;
+
+	return 0;
+}
+
+/**
+ * ipa_rm_dep_graph_add_dependency() - adds dependency between
+ *				two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to add
+ * @depends_on_name: [in] resource to add
+ * @userspace_dep: [in] operation requested by userspace ?
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+				    enum ipa_rm_resource_name resource_name,
+				    enum ipa_rm_resource_name depends_on_name,
+				    bool userspace_dep)
+{
+	struct ipa_rm_resource *dependent = NULL;
+	struct ipa_rm_resource *dependency = NULL;
+	int result;
+
+	if (!graph ||
+		!IPA_RM_RESORCE_IS_PROD(resource_name) ||
+		!IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+		IPA_RM_ERR("invalid params\n");
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(graph,
+					  resource_name,
+					  &dependent)) {
+		IPA_RM_ERR("%s does not exist\n",
+					ipa_rm_resource_str(resource_name));
+		result = -EINVAL;
+		goto bail;
+	}
+	if (ipa_rm_dep_graph_get_resource(graph,
+					depends_on_name,
+					  &dependency)) {
+		IPA_RM_ERR("%s does not exist\n",
+					ipa_rm_resource_str(depends_on_name));
+		result = -EINVAL;
+		goto bail;
+	}
+	result = ipa_rm_resource_add_dependency(dependent, dependency,
+		userspace_dep);
+bail:
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_dep_graph_delete_dependency() - deleted dependency between
+ *				two nodes in graph
+ * @graph: [in] dependency graph
+ * @resource_name: [in] resource to delete
+ * @depends_on_name: [in] resource to delete
+ * @userspace_dep: [in] operation requested by userspace ?
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				enum ipa_rm_resource_name depends_on_name,
+				bool userspace_dep)
+{
+	struct ipa_rm_resource *dependent = NULL;
+	struct ipa_rm_resource *dependency = NULL;
+	int result;
+
+	if (!graph ||
+		!IPA_RM_RESORCE_IS_PROD(resource_name) ||
+		!IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
+		IPA_RM_ERR("invalid params\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (ipa_rm_dep_graph_get_resource(graph,
+					  resource_name,
+					  &dependent)) {
+		IPA_RM_ERR("%s does not exist\n",
+					ipa_rm_resource_str(resource_name));
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (ipa_rm_dep_graph_get_resource(graph,
+					  depends_on_name,
+					  &dependency)) {
+		IPA_RM_ERR("%s does not exist\n",
+					ipa_rm_resource_str(depends_on_name));
+		result = -EINVAL;
+		goto bail;
+	}
+
+	result = ipa_rm_resource_delete_dependency(dependent, dependency,
+		userspace_dep);
+bail:
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
new file mode 100644
index 0000000..e322d81
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_dependency_graph.h
@@ -0,0 +1,49 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_
+#define _IPA_RM_DEPENDENCY_GRAPH_H_
+
+#include <linux/list.h>
+#include <linux/ipa.h>
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_dep_graph {
+	struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX];
+};
+
+int ipa_rm_dep_graph_get_resource(
+				struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name name,
+				struct ipa_rm_resource **resource);
+
+int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph);
+
+void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph);
+
+int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
+			 struct ipa_rm_resource *resource);
+
+int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				enum ipa_rm_resource_name depends_on_name,
+				bool userspsace_dep);
+
+int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
+				enum ipa_rm_resource_name resource_name,
+				enum ipa_rm_resource_name depends_on_name,
+				bool userspsace_dep);
+
+#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_i.h b/drivers/platform/msm/ipa/ipa_rm_i.h
new file mode 100644
index 0000000..1610bb1
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_i.h
@@ -0,0 +1,159 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_I_H_
+#define _IPA_RM_I_H_
+
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include "ipa_rm_resource.h"
+#include "ipa_common_i.h"
+
+#define IPA_RM_DRV_NAME "ipa_rm"
+
+#define IPA_RM_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+#define IPA_RM_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_RM_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_RM_RESOURCE_CONS_MAX \
+	(IPA_RM_RESOURCE_MAX - IPA_RM_RESOURCE_PROD_MAX)
+#define IPA_RM_RESORCE_IS_PROD(x) \
+	(x >= IPA_RM_RESOURCE_PROD && x < IPA_RM_RESOURCE_PROD_MAX)
+#define IPA_RM_RESORCE_IS_CONS(x) \
+	(x >= IPA_RM_RESOURCE_PROD_MAX && x < IPA_RM_RESOURCE_MAX)
+#define IPA_RM_INDEX_INVALID	(-1)
+#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000
+
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name);
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name);
+
+/**
+ * struct ipa_rm_delayed_release_work_type - IPA RM delayed resource release
+ *				work type
+ * @delayed_work: work struct
+ * @ipa_rm_resource_name: name of the resource on which this work should be done
+ * @needed_bw: bandwidth required for resource in Mbps
+ * @dec_usage_count: decrease usage count on release ?
+ */
+struct ipa_rm_delayed_release_work_type {
+	struct delayed_work		work;
+	enum ipa_rm_resource_name	resource_name;
+	u32				needed_bw;
+	bool				dec_usage_count;
+
+};
+
+/**
+ * enum ipa_rm_wq_cmd - workqueue commands
+ */
+enum ipa_rm_wq_cmd {
+	IPA_RM_WQ_NOTIFY_PROD,
+	IPA_RM_WQ_NOTIFY_CONS,
+	IPA_RM_WQ_RESOURCE_CB
+};
+
+/**
+ * struct ipa_rm_wq_work_type - IPA RM worqueue specific
+ *				work type
+ * @work: work struct
+ * @wq_cmd: command that should be processed in workqueue context
+ * @resource_name: name of the resource on which this work
+ *			should be done
+ * @dep_graph: data structure to search for resource if exists
+ * @event: event to notify
+ * @notify_registered_only: notify only clients registered by
+ *	ipa_rm_register()
+ */
+struct ipa_rm_wq_work_type {
+	struct work_struct		work;
+	enum ipa_rm_wq_cmd		wq_cmd;
+	enum ipa_rm_resource_name	resource_name;
+	enum ipa_rm_event		event;
+	bool				notify_registered_only;
+};
+
+/**
+ * struct ipa_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or
+ *				suspend work type
+ * @work: work struct
+ * @resource_name: name of the resource on which this work
+ *			should be done
+ * @prev_state:
+ * @needed_bw:
+ */
+struct ipa_rm_wq_suspend_resume_work_type {
+	struct work_struct		work;
+	enum ipa_rm_resource_name	resource_name;
+	enum ipa_rm_resource_state	prev_state;
+	u32				needed_bw;
+	bool				inc_usage_count;
+
+};
+
+int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_event event,
+		bool notify_registered_only);
+
+int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw,
+		bool inc_usage_count);
+
+int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw);
+
+int ipa_rm_initialize(void);
+
+int ipa_rm_stat(char *buf, int size);
+
+const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name);
+
+void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name);
+
+void delayed_release_work_func(struct work_struct *work);
+
+int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+	enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
+	enum ipa_rm_resource_name depends_on_name);
+
+void ipa_rm_exit(void);
+
+#endif /* _IPA_RM_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
new file mode 100644
index 0000000..8e33d71
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_inactivity_timer.c
@@ -0,0 +1,273 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/unistd.h>
+#include <linux/workqueue.h>
+#include <linux/ipa.h>
+#include "ipa_rm_i.h"
+
+/**
+ * struct ipa_rm_it_private - IPA RM Inactivity Timer private
+ *	data
+ * @initied: indicates if instance was initialized
+ * @lock - spinlock for mutual exclusion
+ * @resource_name - resource name
+ * @work: delayed work object for running delayed releas
+ *	function
+ * @resource_requested: boolean flag indicates if resource was requested
+ * @reschedule_work: boolean flag indicates to not release and to
+ *	reschedule the release work.
+ * @work_in_progress: boolean flag indicates is release work was scheduled.
+ * @jiffies: number of jiffies for timeout
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct ipa_rm_it_private {
+	bool initied;
+	enum ipa_rm_resource_name resource_name;
+	spinlock_t lock;
+	struct delayed_work work;
+	bool resource_requested;
+	bool reschedule_work;
+	bool work_in_progress;
+	unsigned long jiffies;
+};
+
+static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX];
+
+/**
+ * ipa_rm_inactivity_timer_func() - called when timer expired in
+ * the context of the shared workqueue. Checks internally if
+ * reschedule_work flag is set. In case it is not set this function calls to
+ * ipa_rm_release_resource(). In case reschedule_work is set this function
+ * reschedule the work. This flag is cleared cleared when
+ * calling to ipa_rm_inactivity_timer_release_resource().
+ *
+ * @work: work object provided by the work queue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_inactivity_timer_func(struct work_struct *work)
+{
+
+	struct ipa_rm_it_private *me = container_of(to_delayed_work(work),
+						    struct ipa_rm_it_private,
+						    work);
+	unsigned long flags;
+
+	IPA_RM_DBG_LOW("%s: timer expired for resource %d!\n", __func__,
+	    me->resource_name);
+
+	spin_lock_irqsave(
+		&ipa_rm_it_handles[me->resource_name].lock, flags);
+	if (ipa_rm_it_handles[me->resource_name].reschedule_work) {
+		IPA_RM_DBG_LOW("%s: setting delayed work\n", __func__);
+		ipa_rm_it_handles[me->resource_name].reschedule_work = false;
+		queue_delayed_work(system_unbound_wq,
+			&ipa_rm_it_handles[me->resource_name].work,
+			ipa_rm_it_handles[me->resource_name].jiffies);
+	} else if (ipa_rm_it_handles[me->resource_name].resource_requested) {
+		IPA_RM_DBG_LOW("%s: not calling release\n", __func__);
+		ipa_rm_it_handles[me->resource_name].work_in_progress = false;
+	} else {
+		IPA_RM_DBG_LOW("%s: calling release_resource on resource %d!\n",
+			__func__, me->resource_name);
+		ipa_rm_release_resource(me->resource_name);
+		ipa_rm_it_handles[me->resource_name].work_in_progress = false;
+	}
+	spin_unlock_irqrestore(
+		&ipa_rm_it_handles[me->resource_name].lock, flags);
+}
+
+/**
+* ipa_rm_inactivity_timer_init() - Init function for IPA RM
+* inactivity timer. This function shall be called prior calling
+* any other API of IPA RM inactivity timer.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+* @msecs: time in miliseccond, that IPA RM inactivity timer
+* shall wait prior calling to ipa_rm_release_resource().
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+				 unsigned long msecs)
+{
+	IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPA_RM_ERR("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (ipa_rm_it_handles[resource_name].initied) {
+		IPA_RM_ERR("%s: resource %d already inited\n",
+		    __func__, resource_name);
+		return -EINVAL;
+	}
+
+	spin_lock_init(&ipa_rm_it_handles[resource_name].lock);
+	ipa_rm_it_handles[resource_name].resource_name = resource_name;
+	ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs);
+	ipa_rm_it_handles[resource_name].resource_requested = false;
+	ipa_rm_it_handles[resource_name].reschedule_work = false;
+	ipa_rm_it_handles[resource_name].work_in_progress = false;
+
+	INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work,
+			  ipa_rm_inactivity_timer_func);
+	ipa_rm_it_handles[resource_name].initied = 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_init);
+
+/**
+* ipa_rm_inactivity_timer_destroy() - De-Init function for IPA
+* RM inactivity timer.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
+{
+	IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPA_RM_ERR("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_it_handles[resource_name].initied) {
+		IPA_RM_ERR("%s: resource %d already inited\n",
+		    __func__, resource_name);
+		return -EINVAL;
+	}
+
+	cancel_delayed_work_sync(&ipa_rm_it_handles[resource_name].work);
+
+	memset(&ipa_rm_it_handles[resource_name], 0,
+	       sizeof(struct ipa_rm_it_private));
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy);
+
+/**
+* ipa_rm_inactivity_timer_request_resource() - Same as
+* ipa_rm_request_resource(), with a difference that calling to
+* this function will also cancel the inactivity timer, if
+* ipa_rm_inactivity_timer_release_resource() was called earlier.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_request_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	int ret;
+	unsigned long flags;
+
+	IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPA_RM_ERR("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_it_handles[resource_name].initied) {
+		IPA_RM_ERR("%s: Not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+	ipa_rm_it_handles[resource_name].resource_requested = true;
+	spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+	ret = ipa_rm_request_resource(resource_name);
+	IPA_RM_DBG_LOW("%s: resource %d: returning %d\n", __func__,
+		resource_name, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource);
+
+/**
+* ipa_rm_inactivity_timer_release_resource() - Sets the
+* inactivity timer to the timeout set by
+* ipa_rm_inactivity_timer_init(). When the timeout expires, IPA
+* RM inactivity timer will call to ipa_rm_release_resource().
+* If a call to ipa_rm_inactivity_timer_request_resource() was
+* made BEFORE the timout has expired, rge timer will be
+* cancelled.
+*
+* @resource_name: Resource name. @see ipa_rm.h
+*
+* Return codes:
+* 0: success
+* -EINVAL: invalid parameters
+*/
+int ipa_rm_inactivity_timer_release_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	unsigned long flags;
+
+	IPA_RM_DBG_LOW("%s: resource %d\n", __func__, resource_name);
+
+	if (resource_name < 0 ||
+	    resource_name >= IPA_RM_RESOURCE_MAX) {
+		IPA_RM_ERR("%s: Invalid parameter\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_it_handles[resource_name].initied) {
+		IPA_RM_ERR("%s: Not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
+	ipa_rm_it_handles[resource_name].resource_requested = false;
+	if (ipa_rm_it_handles[resource_name].work_in_progress) {
+		IPA_RM_DBG_LOW("%s: Timer already set, no sched again %d\n",
+		    __func__, resource_name);
+		ipa_rm_it_handles[resource_name].reschedule_work = true;
+		spin_unlock_irqrestore(
+			&ipa_rm_it_handles[resource_name].lock, flags);
+		return 0;
+	}
+	ipa_rm_it_handles[resource_name].work_in_progress = true;
+	ipa_rm_it_handles[resource_name].reschedule_work = false;
+	IPA_RM_DBG_LOW("%s: setting delayed work\n", __func__);
+	queue_delayed_work(system_unbound_wq,
+			      &ipa_rm_it_handles[resource_name].work,
+			      ipa_rm_it_handles[resource_name].jiffies);
+	spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource);
+
diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.c b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
new file mode 100644
index 0000000..fe8e781
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.c
@@ -0,0 +1,280 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_i.h"
+
+/**
+ * ipa_rm_peers_list_get_resource_index() - resource name to index
+ *	of this resource in corresponding peers list
+ * @resource_name: [in] resource name
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ * in case provided resource name isn't contained in enum
+ * ipa_rm_resource_name.
+ *
+ */
+static int ipa_rm_peers_list_get_resource_index(
+		enum ipa_rm_resource_name resource_name)
+{
+	int resource_index = IPA_RM_INDEX_INVALID;
+
+	if (IPA_RM_RESORCE_IS_PROD(resource_name))
+		resource_index = ipa_rm_prod_index(resource_name);
+	else if (IPA_RM_RESORCE_IS_CONS(resource_name)) {
+		resource_index = ipa_rm_cons_index(resource_name);
+		if (resource_index != IPA_RM_INDEX_INVALID)
+			resource_index =
+				resource_index - IPA_RM_RESOURCE_PROD_MAX;
+	}
+
+	return resource_index;
+}
+
+static bool ipa_rm_peers_list_check_index(int index,
+		struct ipa_rm_peers_list *peers_list)
+{
+	return !(index > peers_list->max_peers || index < 0);
+}
+
+/**
+ * ipa_rm_peers_list_create() - creates the peers list
+ *
+ * @max_peers: maximum number of peers in new list
+ * @peers_list: [out] newly created peers list
+ *
+ * Returns: 0 in case of SUCCESS, negative otherwise
+ */
+int ipa_rm_peers_list_create(int max_peers,
+		struct ipa_rm_peers_list **peers_list)
+{
+	int result;
+
+	*peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC);
+	if (!*peers_list) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	(*peers_list)->max_peers = max_peers;
+	(*peers_list)->peers = kzalloc((*peers_list)->max_peers *
+			sizeof(*((*peers_list)->peers)), GFP_ATOMIC);
+	if (!((*peers_list)->peers)) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto list_alloc_fail;
+	}
+
+	return 0;
+
+list_alloc_fail:
+	kfree(*peers_list);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_delete() - deletes the peers list
+ *
+ * @peers_list: peers list
+ *
+ */
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list)
+{
+	if (peers_list) {
+		kfree(peers_list->peers);
+		kfree(peers_list);
+	}
+}
+
+/**
+ * ipa_rm_peers_list_remove_peer() - removes peer from the list
+ *
+ * @peers_list: peers list
+ * @resource_name: name of the resource to remove
+ *
+ */
+void ipa_rm_peers_list_remove_peer(
+		struct ipa_rm_peers_list *peers_list,
+		enum ipa_rm_resource_name resource_name)
+{
+	if (!peers_list)
+		return;
+
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+			resource_name)].resource = NULL;
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+			resource_name)].userspace_dep = false;
+	peers_list->peers_count--;
+}
+
+/**
+ * ipa_rm_peers_list_add_peer() - adds peer to the list
+ *
+ * @peers_list: peers list
+ * @resource: resource to add
+ *
+ */
+void ipa_rm_peers_list_add_peer(
+		struct ipa_rm_peers_list *peers_list,
+		struct ipa_rm_resource *resource,
+		bool userspace_dep)
+{
+	if (!peers_list || !resource)
+		return;
+
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+			resource->name)].resource = resource;
+	peers_list->peers[ipa_rm_peers_list_get_resource_index(
+		resource->name)].userspace_dep = userspace_dep;
+	peers_list->peers_count++;
+}
+
+/**
+ * ipa_rm_peers_list_is_empty() - checks
+ *	if resource peers list is empty
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list is empty, false otherwise
+ */
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list)
+{
+	bool result = true;
+
+	if (!peers_list)
+		goto bail;
+
+	if (peers_list->peers_count > 0)
+		result = false;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_has_last_peer() - checks
+ *	if resource peers list has exactly one peer
+ *
+ * @peers_list: peers list
+ *
+ * Returns: true if the list has exactly one peer, false otherwise
+ */
+bool ipa_rm_peers_list_has_last_peer(
+		struct ipa_rm_peers_list *peers_list)
+{
+	bool result = false;
+
+	if (!peers_list)
+		goto bail;
+
+	if (peers_list->peers_count == 1)
+		result = true;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_check_dependency() - check dependency
+ *	between 2 peer lists
+ * @resource_peers: first peers list
+ * @resource_name: first peers list resource name
+ * @depends_on_peers: second peers list
+ * @depends_on_name: second peers list resource name
+ * @userspace_dep: [out] dependency was created by userspace
+ *
+ * Returns: true if there is dependency, false otherwise
+ *
+ */
+bool ipa_rm_peers_list_check_dependency(
+		struct ipa_rm_peers_list *resource_peers,
+		enum ipa_rm_resource_name resource_name,
+		struct ipa_rm_peers_list *depends_on_peers,
+		enum ipa_rm_resource_name depends_on_name,
+		bool *userspace_dep)
+{
+	bool result = false;
+	int resource_index;
+
+	if (!resource_peers || !depends_on_peers || !userspace_dep)
+		return result;
+
+	resource_index = ipa_rm_peers_list_get_resource_index(depends_on_name);
+	if (resource_peers->peers[resource_index].resource != NULL) {
+		result = true;
+		*userspace_dep = resource_peers->peers[resource_index].
+			userspace_dep;
+	}
+
+	resource_index = ipa_rm_peers_list_get_resource_index(resource_name);
+	if (depends_on_peers->peers[resource_index].resource != NULL) {
+		result = true;
+		*userspace_dep = depends_on_peers->peers[resource_index].
+			userspace_dep;
+	}
+
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_resource() - get resource by
+ *	resource index
+ * @resource_index: resource index
+ * @resource_peers: peers list
+ *
+ * Returns: the resource if found, NULL otherwise
+ */
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+		struct ipa_rm_peers_list *resource_peers)
+{
+	struct ipa_rm_resource *result = NULL;
+
+	if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
+		goto bail;
+
+	result = resource_peers->peers[resource_index].resource;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_userspace_dep() - returns whether resource dependency
+ * was added by userspace
+ * @resource_index: resource index
+ * @resource_peers: peers list
+ *
+ * Returns: true if dependency was added by userspace, false by kernel
+ */
+bool ipa_rm_peers_list_get_userspace_dep(int resource_index,
+		struct ipa_rm_peers_list *resource_peers)
+{
+	bool result = false;
+
+	if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
+		goto bail;
+
+	result = resource_peers->peers[resource_index].userspace_dep;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_peers_list_get_size() - get peers list sise
+ *
+ * @peers_list: peers list
+ *
+ * Returns: the size of the peers list
+ */
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list)
+{
+	return peers_list->max_peers;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_peers_list.h b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
new file mode 100644
index 0000000..cf1c157
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_peers_list.h
@@ -0,0 +1,62 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_PEERS_LIST_H_
+#define _IPA_RM_PEERS_LIST_H_
+
+#include "ipa_rm_resource.h"
+
+struct ipa_rm_resource_peer {
+	struct ipa_rm_resource *resource;
+	bool userspace_dep;
+};
+
+/**
+ * struct ipa_rm_peers_list - IPA RM resource peers list
+ * @peers: the list of references to resources dependent on this resource
+ *          in case of producer or list of dependencies in case of consumer
+ * @max_peers: maximum number of peers for this resource
+ * @peers_count: actual number of peers for this resource
+ */
+struct ipa_rm_peers_list {
+	struct ipa_rm_resource_peer	*peers;
+	int				max_peers;
+	int				peers_count;
+};
+
+int ipa_rm_peers_list_create(int max_peers,
+		struct ipa_rm_peers_list **peers_list);
+void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list);
+void ipa_rm_peers_list_remove_peer(
+		struct ipa_rm_peers_list *peers_list,
+		enum ipa_rm_resource_name resource_name);
+void ipa_rm_peers_list_add_peer(
+		struct ipa_rm_peers_list *peers_list,
+		struct ipa_rm_resource *resource,
+		bool userspace_dep);
+bool ipa_rm_peers_list_check_dependency(
+		struct ipa_rm_peers_list *resource_peers,
+		enum ipa_rm_resource_name resource_name,
+		struct ipa_rm_peers_list *depends_on_peers,
+		enum ipa_rm_resource_name depends_on_name,
+		bool *userspace_dep);
+struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
+		struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_get_userspace_dep(int resource_index,
+		struct ipa_rm_peers_list *resource_peers);
+int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list);
+bool ipa_rm_peers_list_has_last_peer(
+		struct ipa_rm_peers_list *peers_list);
+
+
+#endif /* _IPA_RM_PEERS_LIST_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
new file mode 100644
index 0000000..6657bd9
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -0,0 +1,1212 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include "ipa_rm_resource.h"
+#include "ipa_rm_i.h"
+#include "ipa_common_i.h"
+/**
+ * ipa_rm_dep_prod_index() - producer name to producer index mapping
+ * @resource_name: [in] resource name (should be of producer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ *	in case provided resource name isn't contained
+ *	in enum ipa_rm_resource_name or is not of producers.
+ *
+ */
+int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name)
+{
+	int result = resource_name;
+
+	switch (resource_name) {
+	case IPA_RM_RESOURCE_Q6_PROD:
+	case IPA_RM_RESOURCE_USB_PROD:
+	case IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD:
+	case IPA_RM_RESOURCE_HSIC_PROD:
+	case IPA_RM_RESOURCE_STD_ECM_PROD:
+	case IPA_RM_RESOURCE_RNDIS_PROD:
+	case IPA_RM_RESOURCE_WWAN_0_PROD:
+	case IPA_RM_RESOURCE_WLAN_PROD:
+	case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
+	case IPA_RM_RESOURCE_MHI_PROD:
+		break;
+	default:
+		result = IPA_RM_INDEX_INVALID;
+		break;
+	}
+
+	return result;
+}
+
+/**
+ * ipa_rm_cons_index() - consumer name to consumer index mapping
+ * @resource_name: [in] resource name (should be of consumer)
+ *
+ * Returns: resource index mapping, IPA_RM_INDEX_INVALID
+ *	in case provided resource name isn't contained
+ *	in enum ipa_rm_resource_name or is not of consumers.
+ *
+ */
+int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name)
+{
+	int result = resource_name;
+
+	switch (resource_name) {
+	case IPA_RM_RESOURCE_Q6_CONS:
+	case IPA_RM_RESOURCE_USB_CONS:
+	case IPA_RM_RESOURCE_HSIC_CONS:
+	case IPA_RM_RESOURCE_WLAN_CONS:
+	case IPA_RM_RESOURCE_APPS_CONS:
+	case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
+	case IPA_RM_RESOURCE_MHI_CONS:
+	case IPA_RM_RESOURCE_USB_DPL_CONS:
+		break;
+	default:
+		result = IPA_RM_INDEX_INVALID;
+		break;
+	}
+
+	return result;
+}
+
+int ipa_rm_resource_consumer_release_work(
+		struct ipa_rm_resource_cons *consumer,
+		enum ipa_rm_resource_state prev_state,
+		bool notify_completion)
+{
+	int driver_result;
+
+	IPA_RM_DBG_LOW("calling driver CB\n");
+	driver_result = consumer->release_resource();
+	IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result);
+	/*
+	 * Treat IPA_RM_RELEASE_IN_PROGRESS as IPA_RM_RELEASED
+	 * for CONS which remains in RELEASE_IN_PROGRESS.
+	 */
+	if (driver_result == -EINPROGRESS)
+		driver_result = 0;
+	if (driver_result != 0 && driver_result != -EINPROGRESS) {
+		IPA_RM_ERR("driver CB returned error %d\n", driver_result);
+		consumer->resource.state = prev_state;
+		goto bail;
+	}
+	if (driver_result == 0) {
+		if (notify_completion)
+			ipa_rm_resource_consumer_handle_cb(consumer,
+					IPA_RM_RESOURCE_RELEASED);
+		else
+			consumer->resource.state = IPA_RM_RELEASED;
+	}
+	complete_all(&consumer->request_consumer_in_progress);
+
+	ipa_rm_perf_profile_change(consumer->resource.name);
+bail:
+	return driver_result;
+}
+
+int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
+		enum ipa_rm_resource_state prev_state,
+		u32 prod_needed_bw,
+		bool notify_completion,
+		bool dec_client_on_err)
+{
+	int driver_result;
+
+	IPA_RM_DBG_LOW("calling driver CB\n");
+	driver_result = consumer->request_resource();
+	IPA_RM_DBG_LOW("driver CB returned with %d\n", driver_result);
+	if (driver_result == 0) {
+		if (notify_completion) {
+			ipa_rm_resource_consumer_handle_cb(consumer,
+					IPA_RM_RESOURCE_GRANTED);
+		} else {
+			consumer->resource.state = IPA_RM_GRANTED;
+			ipa_rm_perf_profile_change(consumer->resource.name);
+			ipa_resume_resource(consumer->resource.name);
+		}
+	} else if (driver_result != -EINPROGRESS) {
+		consumer->resource.state = prev_state;
+		consumer->resource.needed_bw -= prod_needed_bw;
+		if (dec_client_on_err)
+			consumer->usage_count--;
+	}
+
+	return driver_result;
+}
+
+int ipa_rm_resource_consumer_request(
+		struct ipa_rm_resource_cons *consumer,
+		u32 prod_needed_bw,
+		bool inc_usage_count,
+		bool wake_client)
+{
+	int result = 0;
+	enum ipa_rm_resource_state prev_state;
+	struct ipa_active_client_logging_info log_info;
+
+	IPA_RM_DBG_LOW("%s state: %d\n",
+			ipa_rm_resource_str(consumer->resource.name),
+			consumer->resource.state);
+
+	prev_state = consumer->resource.state;
+	consumer->resource.needed_bw += prod_needed_bw;
+	switch (consumer->resource.state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		reinit_completion(&consumer->request_consumer_in_progress);
+		consumer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+				ipa_rm_resource_str(consumer->resource.name));
+		if (prev_state == IPA_RM_RELEASE_IN_PROGRESS ||
+			ipa_inc_client_enable_clks_no_block(&log_info) != 0) {
+			IPA_RM_DBG_LOW("async resume work for %s\n",
+				ipa_rm_resource_str(consumer->resource.name));
+			ipa_rm_wq_send_resume_cmd(consumer->resource.name,
+						prev_state,
+						prod_needed_bw,
+						inc_usage_count);
+			result = -EINPROGRESS;
+			break;
+		}
+		result = ipa_rm_resource_consumer_request_work(consumer,
+						prev_state,
+						prod_needed_bw,
+						false,
+						inc_usage_count);
+		break;
+	case IPA_RM_GRANTED:
+		if (wake_client) {
+			result = ipa_rm_resource_consumer_request_work(
+				consumer, prev_state, prod_needed_bw, false,
+				inc_usage_count);
+			break;
+		}
+		ipa_rm_perf_profile_change(consumer->resource.name);
+		break;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		result = -EINPROGRESS;
+		break;
+	default:
+		consumer->resource.needed_bw -= prod_needed_bw;
+		result = -EPERM;
+		goto bail;
+	}
+	if (inc_usage_count)
+		consumer->usage_count++;
+bail:
+	IPA_RM_DBG_LOW("%s new state: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state);
+	IPA_RM_DBG_LOW("EXIT with %d\n", result);
+
+	return result;
+}
+
+int ipa_rm_resource_consumer_release(
+		struct ipa_rm_resource_cons *consumer,
+		u32 prod_needed_bw,
+		bool dec_usage_count)
+{
+	int result = 0;
+	enum ipa_rm_resource_state save_state;
+
+	IPA_RM_DBG_LOW("%s state: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state);
+	save_state = consumer->resource.state;
+	consumer->resource.needed_bw -= prod_needed_bw;
+	switch (consumer->resource.state) {
+	case IPA_RM_RELEASED:
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (dec_usage_count && consumer->usage_count > 0)
+			consumer->usage_count--;
+		if (consumer->usage_count == 0) {
+			consumer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+			if (save_state == IPA_RM_REQUEST_IN_PROGRESS ||
+			    ipa_suspend_resource_no_block(
+						consumer->resource.name) != 0) {
+				ipa_rm_wq_send_suspend_cmd(
+						consumer->resource.name,
+						save_state,
+						prod_needed_bw);
+				result = -EINPROGRESS;
+				goto bail;
+			}
+			result = ipa_rm_resource_consumer_release_work(consumer,
+					save_state, false);
+			goto bail;
+		} else if (consumer->resource.state == IPA_RM_GRANTED) {
+			ipa_rm_perf_profile_change(consumer->resource.name);
+		}
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (dec_usage_count && consumer->usage_count > 0)
+			consumer->usage_count--;
+		result = -EINPROGRESS;
+		break;
+	default:
+		result = -EPERM;
+		goto bail;
+	}
+bail:
+	IPA_RM_DBG_LOW("%s new state: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state);
+	IPA_RM_DBG_LOW("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_resource_producer_notify_clients() - notify
+ *	all registered clients of given producer
+ * @producer: producer
+ * @event: event to notify
+ * @notify_registered_only: notify only clients registered by
+ *	ipa_rm_register()
+ */
+void ipa_rm_resource_producer_notify_clients(
+				struct ipa_rm_resource_prod *producer,
+				enum ipa_rm_event event,
+				bool notify_registered_only)
+{
+	struct ipa_rm_notification_info *reg_info;
+
+	IPA_RM_DBG_LOW("%s event: %d notify_registered_only: %d\n",
+		ipa_rm_resource_str(producer->resource.name),
+		event,
+		notify_registered_only);
+
+	list_for_each_entry(reg_info, &(producer->event_listeners), link) {
+		if (notify_registered_only && !reg_info->explicit)
+			continue;
+
+		IPA_RM_DBG_LOW("Notifying %s event: %d\n",
+			   ipa_rm_resource_str(producer->resource.name), event);
+		reg_info->reg_params.notify_cb(reg_info->reg_params.user_data,
+					       event,
+					       0);
+		IPA_RM_DBG_LOW("back from client CB\n");
+	}
+}
+
+static int ipa_rm_resource_producer_create(struct ipa_rm_resource **resource,
+		struct ipa_rm_resource_prod **producer,
+		struct ipa_rm_create_params *create_params,
+		int *max_peers)
+{
+	int result = 0;
+
+	*producer = kzalloc(sizeof(**producer), GFP_ATOMIC);
+	if (*producer == NULL) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	INIT_LIST_HEAD(&((*producer)->event_listeners));
+	result = ipa_rm_resource_producer_register(*producer,
+			&(create_params->reg_params),
+			false);
+	if (result) {
+		IPA_RM_ERR("ipa_rm_resource_producer_register() failed\n");
+		goto register_fail;
+	}
+
+	(*resource) = (struct ipa_rm_resource *) (*producer);
+	(*resource)->type = IPA_RM_PRODUCER;
+	*max_peers = IPA_RM_RESOURCE_CONS_MAX;
+	goto bail;
+register_fail:
+	kfree(*producer);
+bail:
+	return result;
+}
+
+static void ipa_rm_resource_producer_delete(
+				struct ipa_rm_resource_prod *producer)
+{
+	struct ipa_rm_notification_info *reg_info;
+	struct list_head *pos, *q;
+
+	ipa_rm_resource_producer_release(producer);
+	list_for_each_safe(pos, q, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+				struct ipa_rm_notification_info,
+				link);
+		list_del(pos);
+		kfree(reg_info);
+	}
+}
+
+static int ipa_rm_resource_consumer_create(struct ipa_rm_resource **resource,
+		struct ipa_rm_resource_cons **consumer,
+		struct ipa_rm_create_params *create_params,
+		int *max_peers)
+{
+	int result = 0;
+
+	*consumer = kzalloc(sizeof(**consumer), GFP_ATOMIC);
+	if (*consumer == NULL) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	(*consumer)->request_resource = create_params->request_resource;
+	(*consumer)->release_resource = create_params->release_resource;
+	(*resource) = (struct ipa_rm_resource *) (*consumer);
+	(*resource)->type = IPA_RM_CONSUMER;
+	init_completion(&((*consumer)->request_consumer_in_progress));
+	*max_peers = IPA_RM_RESOURCE_PROD_MAX;
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_create() - creates resource
+ * @create_params: [in] parameters needed
+ *			for resource initialization with IPA RM
+ * @resource: [out] created resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_create(
+		struct ipa_rm_create_params *create_params,
+		struct ipa_rm_resource **resource)
+{
+	struct ipa_rm_resource_cons *consumer;
+	struct ipa_rm_resource_prod *producer;
+	int max_peers;
+	int result = 0;
+
+	if (!create_params) {
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (IPA_RM_RESORCE_IS_PROD(create_params->name)) {
+		result = ipa_rm_resource_producer_create(resource,
+				&producer,
+				create_params,
+				&max_peers);
+		if (result) {
+			IPA_RM_ERR("ipa_rm_resource_producer_create failed\n");
+			goto bail;
+		}
+	} else if (IPA_RM_RESORCE_IS_CONS(create_params->name)) {
+		result = ipa_rm_resource_consumer_create(resource,
+				&consumer,
+				create_params,
+				&max_peers);
+		if (result) {
+			IPA_RM_ERR("ipa_rm_resource_producer_create failed\n");
+			goto bail;
+		}
+	} else {
+		IPA_RM_ERR("invalied resource\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	result = ipa_rm_peers_list_create(max_peers,
+			&((*resource)->peers_list));
+	if (result) {
+		IPA_RM_ERR("ipa_rm_peers_list_create failed\n");
+		goto peers_alloc_fail;
+	}
+	(*resource)->name = create_params->name;
+	(*resource)->floor_voltage = create_params->floor_voltage;
+	(*resource)->state = IPA_RM_RELEASED;
+	goto bail;
+
+peers_alloc_fail:
+	ipa_rm_resource_delete(*resource);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_delete() - deletes resource
+ * @resource: [in] resource
+ *			for resource initialization with IPA RM
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_delete(struct ipa_rm_resource *resource)
+{
+	struct ipa_rm_resource *consumer;
+	struct ipa_rm_resource *producer;
+	int peers_index;
+	int result = 0;
+	int list_size;
+	bool userspace_dep;
+
+	if (!resource) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	IPA_RM_DBG("ipa_rm_resource_delete ENTER with resource %d\n",
+					resource->name);
+	if (resource->type == IPA_RM_PRODUCER) {
+		if (resource->peers_list) {
+			list_size = ipa_rm_peers_list_get_size(
+				resource->peers_list);
+			for (peers_index = 0;
+				peers_index < list_size;
+				peers_index++) {
+				consumer = ipa_rm_peers_list_get_resource(
+						peers_index,
+						resource->peers_list);
+				if (consumer) {
+					userspace_dep =
+					ipa_rm_peers_list_get_userspace_dep(
+							peers_index,
+							resource->peers_list);
+					ipa_rm_resource_delete_dependency(
+						resource,
+						consumer,
+						userspace_dep);
+				}
+			}
+		}
+
+		ipa_rm_resource_producer_delete(
+				(struct ipa_rm_resource_prod *) resource);
+	} else if (resource->type == IPA_RM_CONSUMER) {
+		if (resource->peers_list) {
+			list_size = ipa_rm_peers_list_get_size(
+				resource->peers_list);
+			for (peers_index = 0;
+					peers_index < list_size;
+					peers_index++){
+				producer = ipa_rm_peers_list_get_resource(
+							peers_index,
+							resource->peers_list);
+				if (producer) {
+					userspace_dep =
+					ipa_rm_peers_list_get_userspace_dep(
+						peers_index,
+						resource->peers_list);
+					ipa_rm_resource_delete_dependency(
+							producer,
+							resource,
+							userspace_dep);
+				}
+			}
+		}
+	}
+	ipa_rm_peers_list_delete(resource->peers_list);
+	kfree(resource);
+	return result;
+}
+
+/**
+ * ipa_rm_resource_register() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ * @explicit: [in] registered explicitly by ipa_rm_register()
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ *
+ */
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+		struct ipa_rm_register_params *reg_params,
+		bool explicit)
+{
+	int result = 0;
+	struct ipa_rm_notification_info *reg_info;
+	struct list_head *pos;
+
+	if (!producer || !reg_params) {
+		IPA_RM_ERR("invalid params\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	list_for_each(pos, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+					struct ipa_rm_notification_info,
+					link);
+		if (reg_info->reg_params.notify_cb ==
+						reg_params->notify_cb) {
+			IPA_RM_ERR("already registered\n");
+			result = -EPERM;
+			goto bail;
+		}
+
+	}
+
+	reg_info = kzalloc(sizeof(*reg_info), GFP_ATOMIC);
+	if (reg_info == NULL) {
+		IPA_RM_ERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	reg_info->reg_params.user_data = reg_params->user_data;
+	reg_info->reg_params.notify_cb = reg_params->notify_cb;
+	reg_info->explicit = explicit;
+	INIT_LIST_HEAD(&reg_info->link);
+	list_add(&reg_info->link, &producer->event_listeners);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_deregister() - register resource
+ * @resource: [in] resource
+ * @reg_params: [in] registration parameters
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Producer resource is expected for this call.
+ * This function deleted only single instance of
+ * registration info.
+ *
+ */
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+		struct ipa_rm_register_params *reg_params)
+{
+	int result = -EINVAL;
+	struct ipa_rm_notification_info *reg_info;
+	struct list_head *pos, *q;
+
+	if (!producer || !reg_params) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	list_for_each_safe(pos, q, &(producer->event_listeners)) {
+		reg_info = list_entry(pos,
+				struct ipa_rm_notification_info,
+				link);
+		if (reg_info->reg_params.notify_cb ==
+						reg_params->notify_cb) {
+			list_del(pos);
+			kfree(reg_info);
+			result = 0;
+			goto bail;
+		}
+	}
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_add_dependency() - add dependency between two
+ *				given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+				   struct ipa_rm_resource *depends_on,
+				   bool userspace_dep)
+{
+	int result = 0;
+	int consumer_result;
+	bool add_dep_by_userspace;
+
+	if (!resource || !depends_on) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (ipa_rm_peers_list_check_dependency(resource->peers_list,
+			resource->name,
+			depends_on->peers_list,
+			depends_on->name,
+			&add_dep_by_userspace)) {
+		IPA_RM_ERR("dependency already exists, added by %s\n",
+			add_dep_by_userspace ? "userspace" : "kernel");
+		return -EEXIST;
+	}
+
+	ipa_rm_peers_list_add_peer(resource->peers_list, depends_on,
+		userspace_dep);
+	ipa_rm_peers_list_add_peer(depends_on->peers_list, resource,
+		userspace_dep);
+	IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name),
+				resource->state);
+
+	resource->needed_bw += depends_on->max_bw;
+	switch (resource->state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+	{
+		enum ipa_rm_resource_state prev_state = resource->state;
+
+		resource->state = IPA_RM_REQUEST_IN_PROGRESS;
+		((struct ipa_rm_resource_prod *)
+					resource)->pending_request++;
+		consumer_result = ipa_rm_resource_consumer_request(
+				(struct ipa_rm_resource_cons *)depends_on,
+				resource->max_bw,
+				true, false);
+		if (consumer_result != -EINPROGRESS) {
+			resource->state = prev_state;
+			((struct ipa_rm_resource_prod *)
+					resource)->pending_request--;
+			ipa_rm_perf_profile_change(resource->name);
+		}
+		result = consumer_result;
+		break;
+	}
+	default:
+		IPA_RM_ERR("invalid state\n");
+		result = -EPERM;
+		goto bail;
+	}
+bail:
+	IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name),
+					resource->state);
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_resource_delete_dependency() - add dependency between two
+ *				given resources
+ * @resource: [in] resource resource
+ * @depends_on: [in] depends_on resource
+ *
+ * Returns: 0 on success, negative on failure
+ * In case the resource state was changed, a notification
+ * will be sent to the RM client
+ */
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+				   struct ipa_rm_resource *depends_on,
+				   bool userspace_dep)
+{
+	int result = 0;
+	bool state_changed = false;
+	bool release_consumer = false;
+	enum ipa_rm_event evt;
+	bool add_dep_by_userspace;
+
+	if (!resource || !depends_on) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_rm_peers_list_check_dependency(resource->peers_list,
+			resource->name,
+			depends_on->peers_list,
+			depends_on->name,
+			&add_dep_by_userspace)) {
+		IPA_RM_ERR("dependency does not exist\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * to avoid race conditions between kernel and userspace
+	 * need to check that the dependency was added by same entity
+	 */
+	if (add_dep_by_userspace != userspace_dep) {
+		IPA_RM_DBG("dependency was added by %s\n",
+			add_dep_by_userspace ? "userspace" : "kernel");
+		IPA_RM_DBG("ignore request to delete dependency by %s\n",
+			userspace_dep ? "userspace" : "kernel");
+		return 0;
+	}
+
+	IPA_RM_DBG("%s state: %d\n", ipa_rm_resource_str(resource->name),
+				resource->state);
+
+	resource->needed_bw -= depends_on->max_bw;
+	switch (resource->state) {
+	case IPA_RM_RELEASED:
+		break;
+	case IPA_RM_GRANTED:
+		ipa_rm_perf_profile_change(resource->name);
+		release_consumer = true;
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (((struct ipa_rm_resource_prod *)
+			resource)->pending_release > 0)
+			((struct ipa_rm_resource_prod *)
+				resource)->pending_release--;
+		if (depends_on->state == IPA_RM_RELEASE_IN_PROGRESS &&
+			((struct ipa_rm_resource_prod *)
+			resource)->pending_release == 0) {
+			resource->state = IPA_RM_RELEASED;
+			state_changed = true;
+			evt = IPA_RM_RESOURCE_RELEASED;
+			ipa_rm_perf_profile_change(resource->name);
+		}
+		break;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		release_consumer = true;
+		if (((struct ipa_rm_resource_prod *)
+			resource)->pending_request > 0)
+			((struct ipa_rm_resource_prod *)
+				resource)->pending_request--;
+		if (depends_on->state == IPA_RM_REQUEST_IN_PROGRESS &&
+			((struct ipa_rm_resource_prod *)
+				resource)->pending_request == 0) {
+			resource->state = IPA_RM_GRANTED;
+			state_changed = true;
+			evt = IPA_RM_RESOURCE_GRANTED;
+			ipa_rm_perf_profile_change(resource->name);
+		}
+		break;
+	default:
+		result = -EINVAL;
+		goto bail;
+	}
+	if (state_changed) {
+		(void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+				resource->name,
+				evt,
+				false);
+	}
+	IPA_RM_DBG("%s new state: %d\n", ipa_rm_resource_str(resource->name),
+					resource->state);
+	ipa_rm_peers_list_remove_peer(resource->peers_list,
+			depends_on->name);
+	ipa_rm_peers_list_remove_peer(depends_on->peers_list,
+			resource->name);
+	if (release_consumer)
+		(void) ipa_rm_resource_consumer_release(
+				(struct ipa_rm_resource_cons *)depends_on,
+				resource->max_bw,
+				true);
+bail:
+	IPA_RM_DBG("EXIT with %d\n", result);
+
+	return result;
+}
+
+/**
+ * ipa_rm_resource_producer_request() - producer resource request
+ * @producer: [in] producer
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer)
+{
+	int peers_index;
+	int result = 0;
+	struct ipa_rm_resource *consumer;
+	int consumer_result;
+	enum ipa_rm_resource_state state;
+
+	state = producer->resource.state;
+	switch (producer->resource.state) {
+	case IPA_RM_RELEASED:
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		producer->resource.state = IPA_RM_REQUEST_IN_PROGRESS;
+		break;
+	case IPA_RM_GRANTED:
+		goto unlock_and_bail;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		result = -EINPROGRESS;
+		goto unlock_and_bail;
+	default:
+		result = -EINVAL;
+		goto unlock_and_bail;
+	}
+
+	producer->pending_request = 0;
+	for (peers_index = 0;
+		peers_index < ipa_rm_peers_list_get_size(
+				producer->resource.peers_list);
+		peers_index++) {
+		consumer = ipa_rm_peers_list_get_resource(peers_index,
+				producer->resource.peers_list);
+		if (consumer) {
+			producer->pending_request++;
+			consumer_result = ipa_rm_resource_consumer_request(
+				(struct ipa_rm_resource_cons *)consumer,
+				producer->resource.max_bw,
+				true, false);
+			if (consumer_result == -EINPROGRESS) {
+				result = -EINPROGRESS;
+			} else {
+				producer->pending_request--;
+				if (consumer_result != 0) {
+					result = consumer_result;
+					goto bail;
+				}
+			}
+		}
+	}
+
+	if (producer->pending_request == 0) {
+		producer->resource.state = IPA_RM_GRANTED;
+		ipa_rm_perf_profile_change(producer->resource.name);
+		(void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+			producer->resource.name,
+			IPA_RM_RESOURCE_GRANTED,
+			true);
+		result = 0;
+	}
+unlock_and_bail:
+	if (state != producer->resource.state)
+		IPA_RM_DBG_LOW("%s state changed %d->%d\n",
+			ipa_rm_resource_str(producer->resource.name),
+			state,
+			producer->resource.state);
+bail:
+	return result;
+}
+
+/**
+ * ipa_rm_resource_producer_release() - producer resource release
+ * producer: [in] producer resource
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ */
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer)
+{
+	int peers_index;
+	int result = 0;
+	struct ipa_rm_resource *consumer;
+	int consumer_result;
+	enum ipa_rm_resource_state state;
+
+	state = producer->resource.state;
+	switch (producer->resource.state) {
+	case IPA_RM_RELEASED:
+		goto bail;
+	case IPA_RM_GRANTED:
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		producer->resource.state = IPA_RM_RELEASE_IN_PROGRESS;
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		result = -EINPROGRESS;
+		goto bail;
+	default:
+		result = -EPERM;
+		goto bail;
+	}
+
+	producer->pending_release = 0;
+	for (peers_index = 0;
+		peers_index < ipa_rm_peers_list_get_size(
+				producer->resource.peers_list);
+		peers_index++) {
+		consumer = ipa_rm_peers_list_get_resource(peers_index,
+				producer->resource.peers_list);
+		if (consumer) {
+			producer->pending_release++;
+			consumer_result = ipa_rm_resource_consumer_release(
+				(struct ipa_rm_resource_cons *)consumer,
+				producer->resource.max_bw,
+				true);
+			producer->pending_release--;
+		}
+	}
+
+	if (producer->pending_release == 0) {
+		producer->resource.state = IPA_RM_RELEASED;
+		ipa_rm_perf_profile_change(producer->resource.name);
+		(void) ipa_rm_wq_send_cmd(IPA_RM_WQ_NOTIFY_PROD,
+			producer->resource.name,
+			IPA_RM_RESOURCE_RELEASED,
+			true);
+	}
+bail:
+	if (state != producer->resource.state)
+		IPA_RM_DBG_LOW("%s state changed %d->%d\n",
+		ipa_rm_resource_str(producer->resource.name),
+		state,
+		producer->resource.state);
+
+	return result;
+}
+
+static void ipa_rm_resource_producer_handle_cb(
+		struct ipa_rm_resource_prod *producer,
+		enum ipa_rm_event event)
+{
+	IPA_RM_DBG_LOW("%s state: %d event: %d pending_request: %d\n",
+		ipa_rm_resource_str(producer->resource.name),
+		producer->resource.state,
+		event,
+		producer->pending_request);
+
+	switch (producer->resource.state) {
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (event != IPA_RM_RESOURCE_GRANTED)
+			goto unlock_and_bail;
+		if (producer->pending_request > 0) {
+			producer->pending_request--;
+			if (producer->pending_request == 0) {
+				producer->resource.state =
+						IPA_RM_GRANTED;
+				ipa_rm_perf_profile_change(
+					producer->resource.name);
+				ipa_rm_resource_producer_notify_clients(
+						producer,
+						IPA_RM_RESOURCE_GRANTED,
+						false);
+				goto bail;
+			}
+		}
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (event != IPA_RM_RESOURCE_RELEASED)
+			goto unlock_and_bail;
+		if (producer->pending_release > 0) {
+			producer->pending_release--;
+			if (producer->pending_release == 0) {
+				producer->resource.state =
+						IPA_RM_RELEASED;
+				ipa_rm_perf_profile_change(
+					producer->resource.name);
+				ipa_rm_resource_producer_notify_clients(
+						producer,
+						IPA_RM_RESOURCE_RELEASED,
+						false);
+				goto bail;
+			}
+		}
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_RELEASED:
+	default:
+		goto unlock_and_bail;
+	}
+unlock_and_bail:
+	IPA_RM_DBG_LOW("%s new state: %d\n",
+		ipa_rm_resource_str(producer->resource.name),
+		producer->resource.state);
+bail:
+	return;
+}
+
+/**
+ * ipa_rm_resource_consumer_handle_cb() - propagates resource
+ *	notification to all dependent producers
+ * @consumer: [in] notifying resource
+ *
+ */
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+				enum ipa_rm_event event)
+{
+	int peers_index;
+	struct ipa_rm_resource *producer;
+
+	if (!consumer) {
+		IPA_RM_ERR("invalid params\n");
+		return;
+	}
+	IPA_RM_DBG_LOW("%s state: %d event: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state,
+		event);
+
+	switch (consumer->resource.state) {
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		if (event == IPA_RM_RESOURCE_RELEASED)
+			goto bail;
+		consumer->resource.state = IPA_RM_GRANTED;
+		ipa_rm_perf_profile_change(consumer->resource.name);
+		ipa_resume_resource(consumer->resource.name);
+		complete_all(&consumer->request_consumer_in_progress);
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		if (event == IPA_RM_RESOURCE_GRANTED)
+			goto bail;
+		consumer->resource.state = IPA_RM_RELEASED;
+		break;
+	case IPA_RM_GRANTED:
+	case IPA_RM_RELEASED:
+	default:
+		goto bail;
+	}
+
+	for (peers_index = 0;
+		peers_index < ipa_rm_peers_list_get_size(
+				consumer->resource.peers_list);
+		peers_index++) {
+		producer = ipa_rm_peers_list_get_resource(peers_index,
+				consumer->resource.peers_list);
+		if (producer)
+			ipa_rm_resource_producer_handle_cb(
+					(struct ipa_rm_resource_prod *)
+						producer,
+						event);
+	}
+
+	return;
+bail:
+	IPA_RM_DBG_LOW("%s new state: %d\n",
+		ipa_rm_resource_str(consumer->resource.name),
+		consumer->resource.state);
+}
+
+/*
+ * ipa_rm_resource_set_perf_profile() - sets the performance profile to
+ *					resource.
+ *
+ * @resource: [in] resource
+ * @profile: [in] profile to be set
+ *
+ * sets the profile to the given resource, In case the resource is
+ * granted, update bandwidth vote of the resource
+ */
+int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource,
+				     struct ipa_rm_perf_profile *profile)
+{
+	int peers_index;
+	struct ipa_rm_resource *peer;
+
+	if (!resource || !profile) {
+		IPA_RM_ERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	if (profile->max_supported_bandwidth_mbps == resource->max_bw) {
+		IPA_RM_DBG_LOW("same profile\n");
+		return 0;
+	}
+
+	if ((resource->type == IPA_RM_PRODUCER &&
+	    (resource->state == IPA_RM_GRANTED ||
+	    resource->state == IPA_RM_REQUEST_IN_PROGRESS)) ||
+	    resource->type == IPA_RM_CONSUMER) {
+		for (peers_index = 0;
+		     peers_index < ipa_rm_peers_list_get_size(
+		     resource->peers_list);
+		     peers_index++) {
+			peer = ipa_rm_peers_list_get_resource(peers_index,
+				resource->peers_list);
+			if (!peer)
+				continue;
+			peer->needed_bw -= resource->max_bw;
+			peer->needed_bw +=
+				profile->max_supported_bandwidth_mbps;
+			if (peer->state == IPA_RM_GRANTED)
+				ipa_rm_perf_profile_change(peer->name);
+		}
+	}
+
+	resource->max_bw = profile->max_supported_bandwidth_mbps;
+	if (resource->state == IPA_RM_GRANTED)
+		ipa_rm_perf_profile_change(resource->name);
+
+	return 0;
+}
+
+
+/*
+ * ipa_rm_resource_producer_print_stat() - print the
+ * resource status and all his dependencies
+ *
+ * @resource: [in] Resource resource
+ * @buff: [in] The buf used to print
+ * @size: [in] Buf size
+ *
+ * Returns: number of bytes used on success, negative on failure
+ */
+int ipa_rm_resource_producer_print_stat(
+				struct ipa_rm_resource *resource,
+				char *buf,
+				int size){
+
+	int i;
+	int nbytes;
+	int cnt = 0;
+	struct ipa_rm_resource *consumer;
+
+	if (!buf || size < 0)
+		return -EINVAL;
+
+	nbytes = scnprintf(buf + cnt, size - cnt,
+		ipa_rm_resource_str(resource->name));
+	cnt += nbytes;
+	nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ", resource->max_bw);
+	cnt += nbytes;
+
+	switch (resource->state) {
+	case IPA_RM_RELEASED:
+		nbytes = scnprintf(buf + cnt, size - cnt,
+			"Released] -> ");
+		cnt += nbytes;
+		break;
+	case IPA_RM_REQUEST_IN_PROGRESS:
+		nbytes = scnprintf(buf + cnt, size - cnt,
+			"Request In Progress] -> ");
+		cnt += nbytes;
+		break;
+	case IPA_RM_GRANTED:
+		nbytes = scnprintf(buf + cnt, size - cnt,
+			"Granted] -> ");
+		cnt += nbytes;
+		break;
+	case IPA_RM_RELEASE_IN_PROGRESS:
+		nbytes = scnprintf(buf + cnt, size - cnt,
+			"Release In Progress] -> ");
+		cnt += nbytes;
+		break;
+	default:
+		return -EPERM;
+	}
+
+	for (i = 0; i < resource->peers_list->max_peers; ++i) {
+		consumer =
+			ipa_rm_peers_list_get_resource(
+			i,
+			resource->peers_list);
+		if (consumer) {
+			nbytes = scnprintf(buf + cnt, size - cnt,
+				ipa_rm_resource_str(consumer->name));
+			cnt += nbytes;
+			nbytes = scnprintf(buf + cnt, size - cnt, "[%d, ",
+				consumer->max_bw);
+			cnt += nbytes;
+
+			switch (consumer->state) {
+			case IPA_RM_RELEASED:
+				nbytes = scnprintf(buf + cnt, size - cnt,
+					"Released], ");
+				cnt += nbytes;
+				break;
+			case IPA_RM_REQUEST_IN_PROGRESS:
+				nbytes = scnprintf(buf + cnt, size - cnt,
+						"Request In Progress], ");
+				cnt += nbytes;
+					break;
+			case IPA_RM_GRANTED:
+				nbytes = scnprintf(buf + cnt, size - cnt,
+						"Granted], ");
+				cnt += nbytes;
+				break;
+			case IPA_RM_RELEASE_IN_PROGRESS:
+				nbytes = scnprintf(buf + cnt, size - cnt,
+						"Release In Progress], ");
+				cnt += nbytes;
+				break;
+			default:
+				return -EPERM;
+			}
+		}
+	}
+	nbytes = scnprintf(buf + cnt, size - cnt, "\n");
+	cnt += nbytes;
+
+	return cnt;
+}
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.h b/drivers/platform/msm/ipa/ipa_rm_resource.h
new file mode 100644
index 0000000..da149c5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.h
@@ -0,0 +1,166 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RM_RESOURCE_H_
+#define _IPA_RM_RESOURCE_H_
+
+#include <linux/list.h>
+#include <linux/ipa.h>
+#include "ipa_rm_peers_list.h"
+
+/**
+ * enum ipa_rm_resource_state - resource state
+ */
+enum ipa_rm_resource_state {
+	IPA_RM_RELEASED,
+	IPA_RM_REQUEST_IN_PROGRESS,
+	IPA_RM_GRANTED,
+	IPA_RM_RELEASE_IN_PROGRESS
+};
+
+/**
+ * enum ipa_rm_resource_type - IPA resource manager resource type
+ */
+enum ipa_rm_resource_type {
+	IPA_RM_PRODUCER,
+	IPA_RM_CONSUMER
+};
+
+/**
+ * struct ipa_rm_notification_info - notification information
+ *				of IPA RM client
+ * @reg_params: registration parameters
+ * @explicit: registered explicitly by ipa_rm_register()
+ * @link: link to the list of all registered clients information
+ */
+struct ipa_rm_notification_info {
+	struct ipa_rm_register_params	reg_params;
+	bool				explicit;
+	struct list_head		link;
+};
+
+/**
+ * struct ipa_rm_resource - IPA RM resource
+ * @name: name identifying resource
+ * @type: type of resource (PRODUCER or CONSUMER)
+ * @floor_voltage: minimum voltage level for operation
+ * @max_bw: maximum bandwidth required for resource in Mbps
+ * @state: state of the resource
+ * @peers_list: list of the peers of the resource
+ */
+struct ipa_rm_resource {
+	enum ipa_rm_resource_name	name;
+	enum ipa_rm_resource_type	type;
+	enum ipa_voltage_level		floor_voltage;
+	u32				max_bw;
+	u32				needed_bw;
+	enum ipa_rm_resource_state	state;
+	struct ipa_rm_peers_list	*peers_list;
+};
+
+/**
+ * struct ipa_rm_resource_cons - IPA RM consumer
+ * @resource: resource
+ * @usage_count: number of producers in GRANTED / REQUESTED state
+ *		using this consumer
+ * @request_consumer_in_progress: when set, the consumer is during its request
+ *		phase
+ * @request_resource: function which should be called to request resource
+ *			from resource manager
+ * @release_resource: function which should be called to release resource
+ *			from resource manager
+ * Add new fields after @resource only.
+ */
+struct ipa_rm_resource_cons {
+	struct ipa_rm_resource resource;
+	int usage_count;
+	struct completion request_consumer_in_progress;
+	int (*request_resource)(void);
+	int (*release_resource)(void);
+};
+
+/**
+ * struct ipa_rm_resource_prod - IPA RM producer
+ * @resource: resource
+ * @event_listeners: clients registered with this producer
+ *		for notifications in resource state
+ * list Add new fields after @resource only.
+ */
+struct ipa_rm_resource_prod {
+	struct ipa_rm_resource	resource;
+	struct list_head	event_listeners;
+	int			pending_request;
+	int			pending_release;
+};
+
+int ipa_rm_resource_create(
+		struct ipa_rm_create_params *create_params,
+		struct ipa_rm_resource **resource);
+
+int ipa_rm_resource_delete(struct ipa_rm_resource *resource);
+
+int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
+				struct ipa_rm_register_params *reg_params,
+				bool explicit);
+
+int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
+				struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
+				   struct ipa_rm_resource *depends_on,
+				   bool userspace_dep);
+
+int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
+				      struct ipa_rm_resource *depends_on,
+				      bool userspace_dep);
+
+int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer);
+
+int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer);
+
+int ipa_rm_resource_consumer_request(struct ipa_rm_resource_cons *consumer,
+				u32 needed_bw,
+				bool inc_usage_count,
+				bool wake_client);
+
+int ipa_rm_resource_consumer_release(struct ipa_rm_resource_cons *consumer,
+				u32 needed_bw,
+				bool dec_usage_count);
+
+int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource,
+				     struct ipa_rm_perf_profile *profile);
+
+void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
+				enum ipa_rm_event event);
+
+void ipa_rm_resource_producer_notify_clients(
+				struct ipa_rm_resource_prod *producer,
+				enum ipa_rm_event event,
+				bool notify_registered_only);
+
+int ipa_rm_resource_producer_print_stat(
+		struct ipa_rm_resource *resource,
+		char *buf,
+		int size);
+
+int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
+		enum ipa_rm_resource_state prev_state,
+		u32 needed_bw,
+		bool notify_completion,
+		bool dec_client_on_err);
+
+int ipa_rm_resource_consumer_release_work(
+		struct ipa_rm_resource_cons *consumer,
+		enum ipa_rm_resource_state prev_state,
+		bool notify_completion);
+
+#endif /* _IPA_RM_RESOURCE_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
new file mode 100644
index 0000000..ae6cfc4
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_uc_offload_common_i.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa_mhi.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#ifndef _IPA_UC_OFFLOAD_COMMON_I_H_
+#define _IPA_UC_OFFLOAD_COMMON_I_H_
+
+int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+	ipa_notify_cb notify, void *priv, u8 hdr_len,
+	struct ipa_ntn_conn_out_params *outp);
+int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/Makefile b/drivers/platform/msm/ipa/ipa_v2/Makefile
new file mode 100644
index 0000000..69b8a4c
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_IPA) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
+	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
+
+obj-$(CONFIG_RMNET_IPA) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
new file mode 100644
index 0000000..84a5180
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -0,0 +1,4937 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/qcom_iommu.h>
+#include <linux/time.h>
+#include <linux/hashtable.h>
+#include <linux/hash.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define CREATE_TRACE_POINTS
+#include "ipa_trace.h"
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+			       x == IPA_MODE_MOBILE_AP_WAN || \
+			       x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_A5_MUX_HEADER_LENGTH (8)
+#define IPA_ROUTING_RULE_BYTE_SIZE (4)
+#define IPA_BAM_CNFG_BITS_VALv1_1 (0x7FFFE004)
+#define IPA_BAM_CNFG_BITS_VALv2_0 (0xFFFFE004)
+#define IPA_STATUS_CLEAR_OFST (0x3f28)
+#define IPA_STATUS_CLEAR_SIZE (32)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define CLEANUP_TAG_PROCESS_TIMEOUT 150
+
+#define IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
+#define MAX_POLLING_ITERATION 40
+#define MIN_POLLING_ITERATION 1
+#define ONE_MSEC 1
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+#define IPA_SPS_PROD_TIMEOUT_MSEC 100
+
+#ifdef CONFIG_COMPAT
+#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				compat_uptr_t)
+#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				compat_uptr_t)
+#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_DMA, \
+				compat_uptr_t)
+#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				compat_uptr_t)
+#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PULL_MSG, \
+				compat_uptr_t)
+#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_ADD_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_DEL_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GENERATE_FLT_EQ, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+				compat_uptr_t)
+#define IPA_IOC_WRITE_QMAPID32  _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WRITE_QMAPID, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_FLT_RULE, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_RT_RULE, \
+				compat_uptr_t)
+
+/**
+ * struct ipa_ioc_nat_alloc_mem32 - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem32 {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	compat_size_t size;
+	compat_off_t offset;
+};
+#endif
+
+static void ipa_start_tag_process(struct work_struct *work);
+static DECLARE_WORK(ipa_tag_work, ipa_start_tag_process);
+
+static void ipa_sps_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_sps_release_resource_work,
+	ipa_sps_release_resource);
+
+static struct ipa_plat_drv_res ipa_res = {0, };
+
+struct msm_bus_scale_pdata *bus_scale_table;
+
+static struct clk *ipa_clk_src;
+static struct clk *ipa_clk;
+static struct clk *smmu_clk;
+static struct clk *sys_noc_ipa_axi_clk;
+static struct clk *ipa_cnoc_clk;
+static struct clk *ipa_inactivity_clk;
+
+struct ipa_context *ipa_ctx;
+static struct device *master_dev;
+struct platform_device *ipa_pdev;
+static struct {
+	bool present;
+	bool arm_smmu;
+	bool fast_map;
+	bool s1_bypass;
+	u32 ipa_base;
+	u32 ipa_size;
+} smmu_info;
+
+static char *active_clients_table_buf;
+
+int ipa2_active_clients_log_print_buffer(char *buf, int size)
+{
+	int i;
+	int nbytes;
+	int cnt = 0;
+	int start_idx;
+	int end_idx;
+
+	start_idx = (ipa_ctx->ipa2_active_clients_logging.log_tail + 1) %
+			IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+	end_idx = ipa_ctx->ipa2_active_clients_logging.log_head;
+	for (i = start_idx; i != end_idx;
+		i = (i + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
+		nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
+				ipa_ctx->ipa2_active_clients_logging
+				.log_buffer[i]);
+		cnt += nbytes;
+	}
+
+	return cnt;
+}
+
+int ipa2_active_clients_log_print_table(char *buf, int size)
+{
+	int i;
+	struct ipa2_active_client_htable_entry *iterator;
+	int cnt = 0;
+
+	cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
+	hash_for_each(ipa_ctx->ipa2_active_clients_logging.htable, i,
+			iterator, list) {
+		switch (iterator->type) {
+		case IPA2_ACTIVE_CLIENT_LOG_TYPE_EP:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d ENDPOINT\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA2_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d SIMPLE\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA2_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d RESOURCE\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA2_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d SPECIAL\n",
+					iterator->id_string, iterator->count);
+			break;
+		default:
+			IPAERR("Trying to print illegal active_clients type");
+			break;
+		}
+	}
+	cnt += scnprintf(buf + cnt, size - cnt,
+			"\nTotal active clients count: %d\n",
+			ipa_ctx->ipa_active_clients.cnt);
+
+	return cnt;
+}
+
+static int ipa2_active_clients_panic_notifier(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	ipa_active_clients_lock();
+	ipa2_active_clients_log_print_table(active_clients_table_buf,
+			IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
+	IPAERR("%s", active_clients_table_buf);
+	ipa_active_clients_unlock();
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa2_active_clients_panic_blk = {
+	.notifier_call  = ipa2_active_clients_panic_notifier,
+};
+
+static int ipa2_active_clients_log_insert(const char *string)
+{
+	int head;
+	int tail;
+
+	head = ipa_ctx->ipa2_active_clients_logging.log_head;
+	tail = ipa_ctx->ipa2_active_clients_logging.log_tail;
+
+	if (!ipa_ctx->ipa2_active_clients_logging.log_rdy)
+		return -EPERM;
+	memset(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], '_',
+			IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
+	strlcpy(ipa_ctx->ipa2_active_clients_logging.log_buffer[head], string,
+			(size_t)IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN);
+	head = (head + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+	if (tail == head)
+		tail = (tail + 1) % IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+
+	ipa_ctx->ipa2_active_clients_logging.log_tail = tail;
+	ipa_ctx->ipa2_active_clients_logging.log_head = head;
+
+	return 0;
+}
+
+static int ipa2_active_clients_log_init(void)
+{
+	int i;
+
+	ipa_ctx->ipa2_active_clients_logging.log_buffer[0] = kzalloc(
+			IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
+			sizeof(char[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN]),
+			GFP_KERNEL);
+	active_clients_table_buf = kzalloc(sizeof(
+			char[IPA2_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
+	if (ipa_ctx->ipa2_active_clients_logging.log_buffer == NULL) {
+		IPAERR("Active Clients Logging memory allocation failed");
+		goto bail;
+	}
+	for (i = 0; i < IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
+		ipa_ctx->ipa2_active_clients_logging.log_buffer[i] =
+			ipa_ctx->ipa2_active_clients_logging.log_buffer[0] +
+			(IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
+	}
+	ipa_ctx->ipa2_active_clients_logging.log_head = 0;
+	ipa_ctx->ipa2_active_clients_logging.log_tail =
+			IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	hash_init(ipa_ctx->ipa2_active_clients_logging.htable);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&ipa2_active_clients_panic_blk);
+	ipa_ctx->ipa2_active_clients_logging.log_rdy = 1;
+
+	return 0;
+
+bail:
+	return -ENOMEM;
+}
+
+void ipa2_active_clients_log_clear(void)
+{
+	ipa_active_clients_lock();
+	ipa_ctx->ipa2_active_clients_logging.log_head = 0;
+	ipa_ctx->ipa2_active_clients_logging.log_tail =
+			IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	ipa_active_clients_unlock();
+}
+
+static void ipa2_active_clients_log_destroy(void)
+{
+	ipa_ctx->ipa2_active_clients_logging.log_rdy = 0;
+	kfree(ipa_ctx->ipa2_active_clients_logging.log_buffer[0]);
+	ipa_ctx->ipa2_active_clients_logging.log_head = 0;
+	ipa_ctx->ipa2_active_clients_logging.log_tail =
+			IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+}
+
+enum ipa_smmu_cb_type {
+	IPA_SMMU_CB_AP,
+	IPA_SMMU_CB_WLAN,
+	IPA_SMMU_CB_UC,
+	IPA_SMMU_CB_MAX
+
+};
+
+static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
+
+struct iommu_domain *ipa2_get_smmu_domain(void)
+{
+	if (smmu_cb[IPA_SMMU_CB_AP].valid)
+		return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
+
+	IPAERR("CB not valid\n");
+
+	return NULL;
+}
+
+struct iommu_domain *ipa2_get_uc_smmu_domain(void)
+{
+	if (smmu_cb[IPA_SMMU_CB_UC].valid)
+		return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
+
+	IPAERR("CB not valid\n");
+
+	return NULL;
+}
+
+struct iommu_domain *ipa2_get_wlan_smmu_domain(void)
+{
+	if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
+		return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
+
+	IPAERR("CB not valid\n");
+
+	return NULL;
+}
+
+struct device *ipa2_get_dma_dev(void)
+{
+	return ipa_ctx->pdev;
+}
+
+/**
+ * ipa2_get_smmu_ctx()- Return the smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void)
+{
+	return &smmu_cb[IPA_SMMU_CB_AP];
+}
+
+
+/**
+ * ipa2_get_wlan_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void)
+{
+	return &smmu_cb[IPA_SMMU_CB_WLAN];
+}
+
+/**
+ * ipa2_get_uc_smmu_ctx()- Return the uc smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void)
+{
+	return &smmu_cb[IPA_SMMU_CB_UC];
+}
+
+static int ipa_open(struct inode *inode, struct file *filp)
+{
+	struct ipa_context *ctx = NULL;
+
+	IPADBG("ENTER\n");
+	ctx = container_of(inode->i_cdev, struct ipa_context, cdev);
+	filp->private_data = ctx;
+
+	return 0;
+}
+
+/**
+* ipa_flow_control() - Enable/Disable flow control on a particular client.
+* Return codes:
+* None
+*/
+void ipa_flow_control(enum ipa_client_type ipa_client,
+		bool enable, uint32_t qmap_id)
+{
+	struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+	int ep_idx;
+	struct ipa_ep_context *ep;
+
+	/* Check if tethered flow control is needed or not.*/
+	if (!ipa_ctx->tethered_flow_control) {
+		IPADBG("Apps flow control is not needed\n");
+		return;
+	}
+
+	/* Check if ep is valid. */
+	ep_idx = ipa2_get_ep_mapping(ipa_client);
+	if (ep_idx == -1) {
+		IPADBG("Invalid IPA client\n");
+		return;
+	}
+
+	ep = &ipa_ctx->ep[ep_idx];
+	if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
+		IPADBG("EP not valid/Not applicable for client.\n");
+		return;
+	}
+
+	spin_lock(&ipa_ctx->disconnect_lock);
+	/* Check if the QMAP_ID matches. */
+	if (ep->cfg.meta.qmap_id != qmap_id) {
+		IPADBG("Flow control ind not for same flow: %u %u\n",
+			ep->cfg.meta.qmap_id, qmap_id);
+		spin_unlock(&ipa_ctx->disconnect_lock);
+		return;
+	}
+	if (!ep->disconnect_in_progress) {
+		if (enable) {
+			IPADBG("Enabling Flow\n");
+			ep_ctrl.ipa_ep_delay = false;
+			IPA_STATS_INC_CNT(ipa_ctx->stats.flow_enable);
+		} else {
+			IPADBG("Disabling Flow\n");
+			ep_ctrl.ipa_ep_delay = true;
+			IPA_STATS_INC_CNT(ipa_ctx->stats.flow_disable);
+		}
+		ep_ctrl.ipa_ep_suspend = false;
+		ipa2_cfg_ep_ctrl(ep_idx, &ep_ctrl);
+	} else {
+		IPADBG("EP disconnect is in progress\n");
+	}
+	spin_unlock(&ipa_ctx->disconnect_lock);
+}
+
+static void ipa_wan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAERR("Null buffer\n");
+		return;
+	}
+
+	if (type != WAN_UPSTREAM_ROUTE_ADD &&
+	    type != WAN_UPSTREAM_ROUTE_DEL &&
+	    type != WAN_EMBMS_CONNECT) {
+		IPAERR("Wrong type given. buff %p type %d\n", buff, type);
+		return;
+	}
+
+	kfree(buff);
+}
+
+static int ipa_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
+{
+	int retval;
+	struct ipa_wan_msg *wan_msg;
+	struct ipa_msg_meta msg_meta;
+
+	wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
+	if (!wan_msg) {
+		IPAERR("no memory\n");
+		return -ENOMEM;
+	}
+
+	if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
+		sizeof(struct ipa_wan_msg))) {
+		kfree(wan_msg);
+		return -EFAULT;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = msg_type;
+	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+	retval = ipa2_send_msg(&msg_meta, wan_msg, ipa_wan_msg_free_cb);
+	if (retval) {
+		IPAERR("ipa2_send_msg failed: %d\n", retval);
+		kfree(wan_msg);
+		return retval;
+	}
+
+	return 0;
+}
+
+
+static long ipa_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	u32 pyld_sz;
+	u8 header[128] = { 0 };
+	u8 *param = NULL;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+	struct ipa_ioc_v4_nat_init nat_init;
+	struct ipa_ioc_v4_nat_del nat_del;
+	struct ipa_ioc_rm_dependency rm_depend;
+	size_t sz;
+	int pre_entry;
+
+	IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+	if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+		return -ENOTTY;
+	if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+		return -ENOTTY;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	switch (cmd) {
+	case IPA_IOC_ALLOC_NAT_MEM:
+		if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		/* null terminate the string */
+		nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+		if (ipa2_allocate_nat_device(&nat_mem)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_V4_INIT_NAT:
+		if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_init))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_nat_init_cmd(&nat_init)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_NAT_DMA:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_dma_cmd))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_nat_dma_cmd *)header)->entries;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_nat_dma_cmd) +
+		   pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_nat_dma_cmd *)param)->entries,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_V4_DEL_NAT:
+		if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_del))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_nat_del_cmd(&nat_del)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr *)header)->num_hdrs;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr) +
+		   pre_entry * sizeof(struct ipa_hdr_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr *)param)->num_hdrs,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr) +
+		   pre_entry * sizeof(struct ipa_hdr_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule) +
+		   pre_entry * sizeof(struct ipa_rt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_MDFY_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_mdfy_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_mdfy_rt_rule) +
+		   pre_entry * sizeof(struct ipa_rt_rule_mdfy);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_rt_rule) +
+		   pre_entry * sizeof(struct ipa_rt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_flt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_flt_rule) +
+		   pre_entry * sizeof(struct ipa_flt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_flt_rule) +
+		   pre_entry * sizeof(struct ipa_flt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_flt_rule *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_MDFY_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_mdfy_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_mdfy_flt_rule) +
+		   pre_entry * sizeof(struct ipa_flt_rule_mdfy);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_COMMIT_HDR:
+		retval = ipa2_commit_hdr();
+		break;
+	case IPA_IOC_RESET_HDR:
+		retval = ipa2_reset_hdr();
+		break;
+	case IPA_IOC_COMMIT_RT:
+		retval = ipa2_commit_rt(arg);
+		break;
+	case IPA_IOC_RESET_RT:
+		retval = ipa2_reset_rt(arg);
+		break;
+	case IPA_IOC_COMMIT_FLT:
+		retval = ipa2_commit_flt(arg);
+		break;
+	case IPA_IOC_RESET_FLT:
+		retval = ipa2_reset_flt(arg);
+		break;
+	case IPA_IOC_GET_RT_TBL:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_RT_TBL:
+		retval = ipa2_put_rt_tbl(arg);
+		break;
+	case IPA_IOC_GET_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_HDR:
+		retval = ipa2_put_hdr(arg);
+		break;
+	case IPA_IOC_SET_FLT:
+		retval = ipa_cfg_filter(arg);
+		break;
+	case IPA_IOC_COPY_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_query_intf))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_query_intf((struct ipa_ioc_query_intf *)header)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_query_intf))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_TX_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_tx_props);
+		if (copy_from_user(header, (u8 *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
+				> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_query_intf_tx_props *)
+			header)->num_tx_props;
+		pyld_sz = sz + pre_entry *
+			sizeof(struct ipa_ioc_tx_intf_prop);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
+			param)->num_tx_props
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_tx_props *)
+				param)->num_tx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_query_intf_tx_props(
+				(struct ipa_ioc_query_intf_tx_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_RX_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_rx_props);
+		if (copy_from_user(header, (u8 *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
+				> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_query_intf_rx_props *)
+			header)->num_rx_props;
+		pyld_sz = sz + pre_entry *
+			sizeof(struct ipa_ioc_rx_intf_prop);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
+			param)->num_rx_props != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_rx_props *)
+				param)->num_rx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_query_intf_rx_props(
+				(struct ipa_ioc_query_intf_rx_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_EXT_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_ext_props);
+		if (copy_from_user(header, (u8 *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_ext_props *)
+				header)->num_ext_props > IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_query_intf_ext_props *)
+			header)->num_ext_props;
+		pyld_sz = sz + pre_entry *
+			sizeof(struct ipa_ioc_ext_intf_prop);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
+			param)->num_ext_props != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_ext_props *)
+				param)->num_ext_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_query_intf_ext_props(
+				(struct ipa_ioc_query_intf_ext_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PULL_MSG:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_msg_meta))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+		   ((struct ipa_msg_meta *)header)->msg_len;
+		pyld_sz = sizeof(struct ipa_msg_meta) +
+		   pre_entry;
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_msg_meta *)param)->msg_len
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_msg_meta *)param)->msg_len,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa_pull_msg((struct ipa_msg_meta *)param,
+				 (char *)param + sizeof(struct ipa_msg_meta),
+				 ((struct ipa_msg_meta *)param)->msg_len) !=
+		       ((struct ipa_msg_meta *)param)->msg_len) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_RM_ADD_DEPENDENCY:
+		if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+				sizeof(struct ipa_ioc_rm_dependency))) {
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa_rm_add_dependency_from_ioctl(
+			rm_depend.resource_name, rm_depend.depends_on_name);
+		break;
+	case IPA_IOC_RM_DEL_DEPENDENCY:
+		if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+				sizeof(struct ipa_ioc_rm_dependency))) {
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa_rm_delete_dependency_from_ioctl(
+			rm_depend.resource_name, rm_depend.depends_on_name);
+		break;
+	case IPA_IOC_GENERATE_FLT_EQ:
+		{
+			struct ipa_ioc_generate_flt_eq flt_eq;
+
+			if (copy_from_user(&flt_eq, (u8 *)arg,
+				sizeof(struct ipa_ioc_generate_flt_eq))) {
+				retval = -EFAULT;
+				break;
+			}
+			if (ipa_generate_flt_eq(flt_eq.ip, &flt_eq.attrib,
+						&flt_eq.eq_attrib)) {
+				retval = -EFAULT;
+				break;
+			}
+			if (copy_to_user((u8 *)arg, &flt_eq,
+				sizeof(struct ipa_ioc_generate_flt_eq))) {
+				retval = -EFAULT;
+				break;
+			}
+			break;
+		}
+	case IPA_IOC_QUERY_EP_MAPPING:
+		{
+			retval = ipa2_get_ep_mapping(arg);
+			break;
+		}
+	case IPA_IOC_QUERY_RT_TBL_INDEX:
+		if (copy_from_user(header, (u8 *)arg,
+				sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_query_rt_index(
+			 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+				sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_WRITE_QMAPID:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_write_qmapid))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_write_qmapid))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
+		retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD);
+		if (retval) {
+			IPAERR("ipa_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
+		retval = ipa_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL);
+		if (retval) {
+			IPAERR("ipa_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
+		retval = ipa_send_wan_msg(arg, WAN_EMBMS_CONNECT);
+		if (retval) {
+			IPAERR("ipa_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_ADD_HDR_PROC_CTX:
+		if (copy_from_user(header, (u8 *)arg,
+			sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr_proc_ctx *)
+			header)->num_proc_ctxs;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
+			param)->num_proc_ctxs != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr_proc_ctx *)
+				param)->num_proc_ctxs, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_add_hdr_proc_ctx(
+			(struct ipa_ioc_add_hdr_proc_ctx *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_DEL_HDR_PROC_CTX:
+		if (copy_from_user(header, (u8 *)arg,
+			sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
+			param)->num_hdls != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr_proc_ctx *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa2_del_hdr_proc_ctx(
+			(struct ipa_ioc_del_hdr_proc_ctx *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_GET_HW_VERSION:
+		pyld_sz = sizeof(enum ipa_hw_type);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		memcpy(param, &ipa_ctx->ipa_hw_type, pyld_sz);
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	default:        /* redundant, as cmd was checked against MAXNR */
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -ENOTTY;
+	}
+	kfree(param);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return retval;
+}
+
+/**
+* ipa_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa_setup_dflt_rt_tables(void)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_rt_rule_add *rt_rule_entry;
+
+	rt_rule =
+	   kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+	if (!rt_rule) {
+		IPAERR("fail to alloc mem\n");
+		return -ENOMEM;
+	}
+	/* setup a default v4 route to point to Apps */
+	rt_rule->num_rules = 1;
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+			IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = &rt_rule->rules[0];
+	rt_rule_entry->at_rear = 1;
+	rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
+	rt_rule_entry->rule.hdr_hdl = ipa_ctx->excp_hdr_hdl;
+
+	if (ipa2_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v4 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/* setup a default v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	if (ipa2_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v6 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/*
+	 * because these tables are the very first to be added, they will both
+	 * have the same index (0) which is essential for programming the
+	 * "route" end-point config
+	 */
+
+	kfree(rt_rule);
+
+	return 0;
+}
+
+static int ipa_setup_exception_path(void)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	struct ipa_route route = { 0 };
+	int ret;
+
+	/* install the basic exception header */
+	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdr) {
+		IPAERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
+		strlcpy(hdr_entry->name, IPA_A5_MUX_HDR_NAME,
+				IPA_RESOURCE_NAME_MAX);
+		/* set template for the A5_MUX hdr in header addition block */
+		hdr_entry->hdr_len = IPA_A5_MUX_HEADER_LENGTH;
+	} else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
+		strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME,
+				IPA_RESOURCE_NAME_MAX);
+		hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+	} else {
+		WARN_ON(1);
+	}
+
+	if (ipa2_add_hdr(hdr)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ipa_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+	/* set the route register to pass exception packets to Apps */
+	route.route_def_pipe = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	route.route_frag_def_pipe = ipa2_get_ep_mapping(
+		IPA_CLIENT_APPS_LAN_CONS);
+	route.route_def_hdr_table = !ipa_ctx->hdr_tbl_lcl;
+
+	if (ipa_cfg_route(&route)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+static int ipa_init_smem_region(int memory_region_size,
+				int memory_region_offset)
+{
+	struct ipa_hw_imm_cmd_dma_shared_mem cmd;
+	struct ipa_desc desc;
+	struct ipa_mem_buffer mem;
+	int rc;
+
+	if (memory_region_size == 0)
+		return 0;
+
+	memset(&desc, 0, sizeof(desc));
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&mem, 0, sizeof(mem));
+
+	mem.size = memory_region_size;
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size,
+		&mem.phys_base, GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	memset(mem.base, 0, mem.size);
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa_ctx->smem_restricted_bytes +
+		memory_region_offset;
+	desc.opcode = IPA_DMA_SHARED_MEM;
+	desc.pyld = &cmd;
+	desc.len = sizeof(cmd);
+	desc.type = IPA_IMM_CMD_DESC;
+
+	rc = ipa_send_cmd(1, &desc);
+	if (rc) {
+		IPAERR("failed to send immediate command (error %d)\n", rc);
+		rc = -EFAULT;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
+		mem.phys_base);
+
+	return rc;
+}
+
+/**
+* ipa_init_q6_smem() - Initialize Q6 general memory and
+*                      header memory regions in IPA.
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate dma memory
+* -EFAULT: failed to send IPA command to initialize the memory
+*/
+int ipa_init_q6_smem(void)
+{
+	int rc;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (ipa_ctx->ipa_hw_type == IPA_HW_v2_0)
+		rc = ipa_init_smem_region(IPA_MEM_PART(modem_size) -
+			IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE,
+			IPA_MEM_PART(modem_ofst));
+	else
+		rc = ipa_init_smem_region(IPA_MEM_PART(modem_size),
+			IPA_MEM_PART(modem_ofst));
+
+	if (rc) {
+		IPAERR("failed to initialize Modem RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_size),
+		IPA_MEM_PART(modem_hdr_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem HDRs RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem proc ctx RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
+		IPA_MEM_PART(modem_comp_decomp_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return rc;
+}
+
+static void ipa_free_buffer(void *user1, int user2)
+{
+	kfree(user1);
+}
+
+int ipa_q6_pipe_delay(bool zip_pipes)
+{
+	u32 reg_val = 0;
+	int client_idx;
+	int ep_idx;
+
+	/* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		/* Skip the processing for non Q6 pipes. */
+		if (!IPA_CLIENT_IS_Q6_PROD(client_idx))
+			continue;
+		/* Skip the processing for NON-ZIP pipes. */
+		else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx))
+			continue;
+		/* Skip the processing for ZIP pipes. */
+		else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx))
+			continue;
+
+		ep_idx = ipa2_get_ep_mapping(client_idx);
+		if (ep_idx == -1)
+			continue;
+
+		IPA_SETFIELD_IN_REG(reg_val, 1,
+			IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
+			IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
+
+		ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_CTRL_N_OFST(ep_idx), reg_val);
+	}
+
+	return 0;
+}
+
+int ipa_q6_monitor_holb_mitigation(bool enable)
+{
+	int ep_idx;
+	int client_idx;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx)) {
+			ep_idx = ipa2_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+			/* Send a command to Uc to enable/disable
+			 * holb monitoring.
+			 */
+			ipa_uc_monitor_holb(client_idx, enable);
+		}
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+static int ipa_q6_avoid_holb(bool zip_pipes)
+{
+	u32 reg_val;
+	int ep_idx;
+	int client_idx;
+	struct ipa_ep_cfg_ctrl avoid_holb;
+
+	memset(&avoid_holb, 0, sizeof(avoid_holb));
+	avoid_holb.ipa_ep_suspend = true;
+
+	/* For ZIP pipes, processing is done in AFTER_SHUTDOWN callback. */
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		/* Skip the processing for non Q6 pipes. */
+		if (!IPA_CLIENT_IS_Q6_CONS(client_idx))
+			continue;
+		/* Skip the processing for NON-ZIP pipes. */
+		else if (zip_pipes && IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx))
+			continue;
+		/* Skip the processing for ZIP pipes. */
+		else if (!zip_pipes && IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx))
+			continue;
+
+		ep_idx = ipa2_get_ep_mapping(client_idx);
+		if (ep_idx == -1)
+			continue;
+
+		/*
+		 * ipa2_cfg_ep_holb is not used here because we are
+		 * setting HOLB on Q6 pipes, and from APPS perspective
+		 * they are not valid, therefore, the above function
+		 * will fail.
+		 */
+		reg_val = 0;
+		IPA_SETFIELD_IN_REG(reg_val, 0,
+			IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT,
+			IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK);
+
+		ipa_write_reg(ipa_ctx->mmio,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(ep_idx),
+			reg_val);
+
+		reg_val = 0;
+		IPA_SETFIELD_IN_REG(reg_val, 1,
+			IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT,
+			IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK);
+
+		ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(ep_idx),
+			reg_val);
+
+		ipa2_cfg_ep_ctrl(ep_idx, &avoid_holb);
+	}
+
+	return 0;
+}
+
+static u32 ipa_get_max_flt_rt_cmds(u32 num_pipes)
+{
+	u32 max_cmds = 0;
+
+	/* As many filter tables as there are pipes, x2 for IPv4 and IPv6 */
+	max_cmds += num_pipes * 2;
+
+	/* For each of the Modem routing tables */
+	max_cmds += (IPA_MEM_PART(v4_modem_rt_index_hi) -
+		     IPA_MEM_PART(v4_modem_rt_index_lo) + 1);
+
+	max_cmds += (IPA_MEM_PART(v6_modem_rt_index_hi) -
+		     IPA_MEM_PART(v6_modem_rt_index_lo) + 1);
+
+	return max_cmds;
+}
+
+static int ipa_q6_clean_q6_tables(void)
+{
+	struct ipa_desc *desc;
+	struct ipa_hw_imm_cmd_dma_shared_mem *cmd = NULL;
+	int pipe_idx;
+	int num_cmds = 0;
+	int index;
+	int retval;
+	struct ipa_mem_buffer mem = { 0 };
+	u32 *entry;
+	u32 max_cmds = ipa_get_max_flt_rt_cmds(ipa_ctx->ipa_num_pipes);
+
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, 4, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("failed to alloc DMA buff of size 4\n");
+		return -ENOMEM;
+	}
+
+	mem.size = 4;
+	entry = mem.base;
+	*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+
+	desc = kcalloc(max_cmds, sizeof(struct ipa_desc), GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		retval = -ENOMEM;
+		goto bail_dma;
+	}
+
+	cmd = kcalloc(max_cmds, sizeof(struct ipa_hw_imm_cmd_dma_shared_mem),
+		GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to allocate memory\n");
+		retval = -ENOMEM;
+		goto bail_desc;
+	}
+
+	/*
+	 * Iterating over all the pipes which are either invalid but connected
+	 * or connected but not configured by AP.
+	 */
+	for (pipe_idx = 0; pipe_idx < ipa_ctx->ipa_num_pipes; pipe_idx++) {
+		if (!ipa_ctx->ep[pipe_idx].valid ||
+		    ipa_ctx->ep[pipe_idx].skip_ep_cfg) {
+			/*
+			 * Need to point v4 and v6 fltr tables to an empty
+			 * table
+			 */
+			cmd[num_cmds].size = mem.size;
+			cmd[num_cmds].system_addr = mem.phys_base;
+			cmd[num_cmds].local_addr =
+				ipa_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(v4_flt_ofst) + 8 + pipe_idx * 4;
+
+			desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
+			desc[num_cmds].pyld = &cmd[num_cmds];
+			desc[num_cmds].len = sizeof(*cmd);
+			desc[num_cmds].type = IPA_IMM_CMD_DESC;
+			num_cmds++;
+
+			cmd[num_cmds].size = mem.size;
+			cmd[num_cmds].system_addr =  mem.phys_base;
+			cmd[num_cmds].local_addr =
+				ipa_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(v6_flt_ofst) + 8 + pipe_idx * 4;
+
+			desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
+			desc[num_cmds].pyld = &cmd[num_cmds];
+			desc[num_cmds].len = sizeof(*cmd);
+			desc[num_cmds].type = IPA_IMM_CMD_DESC;
+			num_cmds++;
+		}
+	}
+
+	/* Need to point v4/v6 modem routing tables to an empty table */
+	for (index = IPA_MEM_PART(v4_modem_rt_index_lo);
+		 index <= IPA_MEM_PART(v4_modem_rt_index_hi);
+		 index++) {
+		cmd[num_cmds].size = mem.size;
+		cmd[num_cmds].system_addr =  mem.phys_base;
+		cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_rt_ofst) + index * 4;
+
+		desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
+		desc[num_cmds].pyld = &cmd[num_cmds];
+		desc[num_cmds].len = sizeof(*cmd);
+		desc[num_cmds].type = IPA_IMM_CMD_DESC;
+		num_cmds++;
+	}
+
+	for (index = IPA_MEM_PART(v6_modem_rt_index_lo);
+		 index <= IPA_MEM_PART(v6_modem_rt_index_hi);
+		 index++) {
+		cmd[num_cmds].size = mem.size;
+		cmd[num_cmds].system_addr =  mem.phys_base;
+		cmd[num_cmds].local_addr = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_rt_ofst) + index * 4;
+
+		desc[num_cmds].opcode = IPA_DMA_SHARED_MEM;
+		desc[num_cmds].pyld = &cmd[num_cmds];
+		desc[num_cmds].len = sizeof(*cmd);
+		desc[num_cmds].type = IPA_IMM_CMD_DESC;
+		num_cmds++;
+	}
+
+	retval = ipa_send_cmd(num_cmds, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (error %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+	kfree(cmd);
+
+bail_desc:
+	kfree(desc);
+
+bail_dma:
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	return retval;
+}
+
+static void ipa_q6_disable_agg_reg(struct ipa_register_write *reg_write,
+				   int ep_idx)
+{
+	reg_write->skip_pipeline_clear = 0;
+
+	reg_write->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(ep_idx);
+	reg_write->value =
+		(1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+	reg_write->value_mask =
+		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+
+	reg_write->value |=
+		((0 & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) <<
+		IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT);
+	reg_write->value_mask |=
+		((IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK <<
+		IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT));
+}
+
+static int ipa_q6_set_ex_path_dis_agg(void)
+{
+	int ep_idx;
+	int client_idx;
+	struct ipa_desc *desc;
+	int num_descs = 0;
+	int index;
+	struct ipa_register_write *reg_write;
+	int retval;
+
+	desc = kcalloc(ipa_ctx->ipa_num_pipes, sizeof(struct ipa_desc),
+			GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	/* Set the exception path to AP */
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		ep_idx = ipa2_get_ep_mapping(client_idx);
+		if (ep_idx == -1)
+			continue;
+
+		if (ipa_ctx->ep[ep_idx].valid &&
+			ipa_ctx->ep[ep_idx].skip_ep_cfg) {
+			BUG_ON(num_descs >= ipa_ctx->ipa_num_pipes);
+			reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
+
+			if (!reg_write) {
+				IPAERR("failed to allocate memory\n");
+				BUG();
+			}
+			reg_write->skip_pipeline_clear = 0;
+			reg_write->offset = IPA_ENDP_STATUS_n_OFST(ep_idx);
+			reg_write->value =
+				(ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) &
+				IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
+				IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+			reg_write->value_mask =
+				IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
+				IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+
+			desc[num_descs].opcode = IPA_REGISTER_WRITE;
+			desc[num_descs].pyld = reg_write;
+			desc[num_descs].len = sizeof(*reg_write);
+			desc[num_descs].type = IPA_IMM_CMD_DESC;
+			desc[num_descs].callback = ipa_free_buffer;
+			desc[num_descs].user1 = reg_write;
+			num_descs++;
+		}
+	}
+
+	/* Disable AGGR on IPA->Q6 pipes */
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		ep_idx = ipa2_get_ep_mapping(client_idx);
+		if (ep_idx == -1)
+			continue;
+		if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
+			IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx)) {
+			reg_write = kzalloc(sizeof(*reg_write), GFP_KERNEL);
+
+			if (!reg_write) {
+				IPAERR("failed to allocate memory\n");
+				BUG();
+			}
+
+			ipa_q6_disable_agg_reg(reg_write, ep_idx);
+
+			desc[num_descs].opcode = IPA_REGISTER_WRITE;
+			desc[num_descs].pyld = reg_write;
+			desc[num_descs].len = sizeof(*reg_write);
+			desc[num_descs].type = IPA_IMM_CMD_DESC;
+			desc[num_descs].callback = ipa_free_buffer;
+			desc[num_descs].user1 = reg_write;
+			num_descs++;
+		}
+	}
+
+	/* Will wait 150msecs for IPA tag process completion */
+	retval = ipa_tag_process(desc, num_descs,
+				 msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
+	if (retval) {
+		IPAERR("TAG process failed! (error %d)\n", retval);
+		/* For timeout error ipa_free_buffer cb will free user1 */
+		if (retval != -ETIME) {
+			for (index = 0; index < num_descs; index++)
+				kfree(desc[index].user1);
+			retval = -EINVAL;
+		}
+	}
+
+	kfree(desc);
+
+	return retval;
+}
+
+/**
+* ipa_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
+*                    in IPA HW before modem shutdown. This is performed in
+*                    case of SSR.
+*
+* Return codes:
+* 0: success
+* This is a mandatory procedure, in case one of the steps fails, the
+* AP needs to restart.
+*/
+int ipa_q6_pre_shutdown_cleanup(void)
+{
+	/* If uC has notified the APPS upon a ZIP engine error,
+	 * APPS need to assert (This is a non recoverable error).
+	 */
+	if (ipa_ctx->uc_ctx.uc_zip_error)
+		BUG();
+
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("Q6");
+
+	/*
+	 * Do not delay Q6 pipes here. This may result in IPA reading a
+	 * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this
+	 * situation IPA will be remain locked as the DMA_TASK with unlock
+	 * bit will not be read by IPA as pipe delay is enabled. IPA uC will
+	 * wait for pipe to be empty before issuing a BAM pipe reset.
+	 */
+
+	if (ipa_q6_monitor_holb_mitigation(false)) {
+		IPAERR("Failed to disable HOLB monitroing on Q6 pipes\n");
+		BUG();
+	}
+
+	if (ipa_q6_avoid_holb(false)) {
+		IPAERR("Failed to set HOLB on Q6 pipes\n");
+		BUG();
+	}
+	if (ipa_q6_clean_q6_tables()) {
+		IPAERR("Failed to clean Q6 tables\n");
+		BUG();
+	}
+	if (ipa_q6_set_ex_path_dis_agg()) {
+		IPAERR("Failed to disable aggregation on Q6 pipes\n");
+		BUG();
+	}
+
+	ipa_ctx->q6_proxy_clk_vote_valid = true;
+	return 0;
+}
+
+/**
+* ipa_q6_post_shutdown_cleanup() - A cleanup for the Q6 pipes
+*                    in IPA HW after modem shutdown. This is performed
+*                    in case of SSR.
+*
+* Return codes:
+* 0: success
+* This is a mandatory procedure, in case one of the steps fails, the
+* AP needs to restart.
+*/
+int ipa_q6_post_shutdown_cleanup(void)
+{
+	int client_idx;
+	int res;
+
+	/*
+	 * Do not delay Q6 pipes here. This may result in IPA reading a
+	 * DMA_TASK with lock bit set and then Q6 pipe delay is set. In this
+	 * situation IPA will be remain locked as the DMA_TASK with unlock
+	 * bit will not be read by IPA as pipe delay is enabled. IPA uC will
+	 * wait for pipe to be empty before issuing a BAM pipe reset.
+	 */
+
+	if (ipa_q6_avoid_holb(true)) {
+		IPAERR("Failed to set HOLB on Q6 ZIP pipes\n");
+		BUG();
+	}
+
+	if (!ipa_ctx->uc_ctx.uc_loaded) {
+		IPAERR("uC is not loaded, won't reset Q6 pipes\n");
+		return 0;
+	}
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+		if (IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client_idx) ||
+			IPA_CLIENT_IS_Q6_ZIP_CONS(client_idx) ||
+			IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client_idx) ||
+			IPA_CLIENT_IS_Q6_ZIP_PROD(client_idx)) {
+			res = ipa_uc_reset_pipe(client_idx);
+			if (res)
+				BUG();
+		}
+	return 0;
+}
+
+int _ipa_init_sram_v2(void)
+{
+	u32 *ipa_sram_mmio;
+	unsigned long phys_addr;
+	struct ipa_hw_imm_cmd_dma_shared_mem cmd = {0};
+	struct ipa_desc desc = {0};
+	struct ipa_mem_buffer mem;
+	int rc = 0;
+
+	phys_addr = ipa_ctx->ipa_wrapper_base +
+		ipa_ctx->ctrl->ipa_reg_base_ofst +
+		IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(
+			ipa_ctx->smem_restricted_bytes / 4);
+
+	ipa_sram_mmio = ioremap(phys_addr,
+			ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val)
+
+	IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(uc_info_ofst), IPA_MEM_CANARY_VAL);
+
+	iounmap(ipa_sram_mmio);
+
+	mem.size = IPA_STATUS_CLEAR_SIZE;
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+			GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	memset(mem.base, 0, mem.size);
+
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = IPA_STATUS_CLEAR_OFST;
+	desc.opcode = IPA_DMA_SHARED_MEM;
+	desc.pyld = &cmd;
+	desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+	desc.type = IPA_IMM_CMD_DESC;
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return rc;
+}
+
+int _ipa_init_sram_v2_5(void)
+{
+	u32 *ipa_sram_mmio;
+	unsigned long phys_addr;
+
+	phys_addr = ipa_ctx->ipa_wrapper_base +
+			ipa_ctx->ctrl->ipa_reg_base_ofst +
+			IPA_SRAM_SW_FIRST_v2_5;
+
+	ipa_sram_mmio = ioremap(phys_addr,
+		ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+#define IPA_SRAM_SET(ofst, val) (ipa_sram_mmio[(ofst - 4) / 4] = val)
+
+	IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst) - 4, IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v4_flt_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst) - 4, IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v6_flt_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst) - 4, IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v4_rt_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(v6_rt_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4,
+							IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(modem_hdr_proc_ctx_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(modem_ofst), IPA_MEM_CANARY_VAL);
+	IPA_SRAM_SET(IPA_MEM_PART(end_ofst), IPA_MEM_CANARY_VAL);
+
+	iounmap(ipa_sram_mmio);
+
+	return 0;
+}
+
+static inline void ipa_sram_set_canary(u32 *sram_mmio, int offset)
+{
+	/* Set 4 bytes of CANARY before the offset */
+	sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
+}
+
+int _ipa_init_sram_v2_6L(void)
+{
+	u32 *ipa_sram_mmio;
+	unsigned long phys_addr;
+
+	phys_addr = ipa_ctx->ipa_wrapper_base +
+		ipa_ctx->ctrl->ipa_reg_base_ofst +
+		IPA_SRAM_SW_FIRST_v2_5;
+
+	ipa_sram_mmio = ioremap(phys_addr,
+		ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	/* Consult with ipa_ram_mmap.h on the location of the CANARY values */
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst) - 4);
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_ofst));
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst) - 4);
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_ofst));
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst) - 4);
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_ofst));
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_ofst));
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
+	ipa_sram_set_canary(ipa_sram_mmio,
+			    IPA_MEM_PART(modem_comp_decomp_ofst) - 4);
+	ipa_sram_set_canary(ipa_sram_mmio,
+			    IPA_MEM_PART(modem_comp_decomp_ofst));
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
+	ipa_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst));
+
+	iounmap(ipa_sram_mmio);
+
+	return 0;
+}
+
+int _ipa_init_hdr_v2(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipa_hdr_init_local cmd;
+	int rc = 0;
+
+	mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+			GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	memset(mem.base, 0, mem.size);
+
+	cmd.hdr_table_src_addr = mem.phys_base;
+	cmd.size_hdr_table = mem.size;
+	cmd.hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(modem_hdr_ofst);
+
+	desc.opcode = IPA_HDR_INIT_LOCAL;
+	desc.pyld = &cmd;
+	desc.len = sizeof(struct ipa_hdr_init_local);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return rc;
+}
+
+int _ipa_init_hdr_v2_5(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipa_hdr_init_local cmd = { 0 };
+	struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd = { 0 };
+
+	mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	memset(mem.base, 0, mem.size);
+
+	cmd.hdr_table_src_addr = mem.phys_base;
+	cmd.size_hdr_table = mem.size;
+	cmd.hdr_table_dst_addr = ipa_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(modem_hdr_ofst);
+
+	desc.opcode = IPA_HDR_INIT_LOCAL;
+	desc.pyld = &cmd;
+	desc.len = sizeof(struct ipa_hdr_init_local);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		dma_free_coherent(ipa_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
+		IPA_MEM_PART(apps_hdr_proc_ctx_size);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	memset(mem.base, 0, mem.size);
+	memset(&desc, 0, sizeof(desc));
+
+	dma_cmd.system_addr = mem.phys_base;
+	dma_cmd.local_addr = ipa_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
+	dma_cmd.size = mem.size;
+	desc.opcode = IPA_DMA_SHARED_MEM;
+	desc.pyld = &dma_cmd;
+	desc.len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		dma_free_coherent(ipa_ctx->pdev,
+			mem.size,
+			mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+
+	ipa_write_reg(ipa_ctx->mmio,
+		IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST,
+		dma_cmd.local_addr);
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	return 0;
+}
+
+int _ipa_init_hdr_v2_6L(void)
+{
+	/* Same implementation as IPAv2 */
+	return _ipa_init_hdr_v2();
+}
+
+int _ipa_init_rt4_v2(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipa_ip_v4_routing_init v4_cmd;
+	u32 *entry;
+	int i;
+	int rc = 0;
+
+	for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
+		i <= IPA_MEM_PART(v4_modem_rt_index_hi);
+		i++)
+		ipa_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
+	IPADBG("v4 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v4]);
+
+	mem.size = IPA_MEM_PART(v4_rt_size);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+			GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	entry = mem.base;
+	for (i = 0; i < IPA_MEM_PART(v4_num_index); i++) {
+		*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+		entry++;
+	}
+
+	desc.opcode = IPA_IP_V4_ROUTING_INIT;
+	v4_cmd.ipv4_rules_addr = mem.phys_base;
+	v4_cmd.size_ipv4_rules = mem.size;
+	v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_rt_ofst);
+	IPADBG("putting Routing IPv4 rules to phys 0x%x",
+				v4_cmd.ipv4_addr);
+
+	desc.pyld = &v4_cmd;
+	desc.len = sizeof(struct ipa_ip_v4_routing_init);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return rc;
+}
+
+int _ipa_init_rt6_v2(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipa_ip_v6_routing_init v6_cmd;
+	u32 *entry;
+	int i;
+	int rc = 0;
+
+	for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
+		i <= IPA_MEM_PART(v6_modem_rt_index_hi);
+		i++)
+		ipa_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
+	IPADBG("v6 rt bitmap 0x%lx\n", ipa_ctx->rt_idx_bitmap[IPA_IP_v6]);
+
+	mem.size = IPA_MEM_PART(v6_rt_size);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+			GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	entry = mem.base;
+	for (i = 0; i < IPA_MEM_PART(v6_num_index); i++) {
+		*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+		entry++;
+	}
+
+	desc.opcode = IPA_IP_V6_ROUTING_INIT;
+	v6_cmd.ipv6_rules_addr = mem.phys_base;
+	v6_cmd.size_ipv6_rules = mem.size;
+	v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_rt_ofst);
+	IPADBG("putting Routing IPv6 rules to phys 0x%x",
+				v6_cmd.ipv6_addr);
+
+	desc.pyld = &v6_cmd;
+	desc.len = sizeof(struct ipa_ip_v6_routing_init);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return rc;
+}
+
+int _ipa_init_flt4_v2(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipa_ip_v4_filter_init v4_cmd;
+	u32 *entry;
+	int i;
+	int rc = 0;
+
+	mem.size = IPA_MEM_PART(v4_flt_size);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+			GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	entry = mem.base;
+
+	*entry = ((0xFFFFF << 1) | 0x1);
+	entry++;
+
+	for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) {
+		*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+		entry++;
+	}
+
+	desc.opcode = IPA_IP_V4_FILTER_INIT;
+	v4_cmd.ipv4_rules_addr = mem.phys_base;
+	v4_cmd.size_ipv4_rules = mem.size;
+	v4_cmd.ipv4_addr = ipa_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_flt_ofst);
+	IPADBG("putting Filtering IPv4 rules to phys 0x%x",
+				v4_cmd.ipv4_addr);
+
+	desc.pyld = &v4_cmd;
+	desc.len = sizeof(struct ipa_ip_v4_filter_init);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return rc;
+}
+
+int _ipa_init_flt6_v2(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipa_ip_v6_filter_init v6_cmd;
+	u32 *entry;
+	int i;
+	int rc = 0;
+
+	mem.size = IPA_MEM_PART(v6_flt_size);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+			GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	entry = mem.base;
+
+	*entry = (0xFFFFF << 1) | 0x1;
+	entry++;
+
+	for (i = 0; i <= ipa_ctx->ipa_num_pipes; i++) {
+		*entry = ipa_ctx->empty_rt_tbl_mem.phys_base;
+		entry++;
+	}
+
+	desc.opcode = IPA_IP_V6_FILTER_INIT;
+	v6_cmd.ipv6_rules_addr = mem.phys_base;
+	v6_cmd.size_ipv6_rules = mem.size;
+	v6_cmd.ipv6_addr = ipa_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_flt_ofst);
+	IPADBG("putting Filtering IPv6 rules to phys 0x%x",
+				v6_cmd.ipv6_addr);
+
+	desc.pyld = &v6_cmd;
+	desc.len = sizeof(struct ipa_ip_v6_filter_init);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return rc;
+}
+
+static int ipa_setup_apps_pipes(void)
+{
+	struct ipa_sys_connect_params sys_in;
+	int result = 0;
+
+	/* CMD OUT (A5->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
+	sys_in.skip_ep_cfg = true;
+	if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_cmd)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_cmd;
+	}
+	IPADBG("Apps to IPA cmd pipe is connected\n");
+
+	ipa_ctx->ctrl->ipa_init_sram();
+	IPADBG("SRAM initialized\n");
+
+	ipa_ctx->ctrl->ipa_init_hdr();
+	IPADBG("HDR initialized\n");
+
+	ipa_ctx->ctrl->ipa_init_rt4();
+	IPADBG("V4 RT initialized\n");
+
+	ipa_ctx->ctrl->ipa_init_rt6();
+	IPADBG("V6 RT initialized\n");
+
+	ipa_ctx->ctrl->ipa_init_flt4();
+	IPADBG("V4 FLT initialized\n");
+
+	ipa_ctx->ctrl->ipa_init_flt6();
+	IPADBG("V6 FLT initialized\n");
+
+	if (ipa_setup_exception_path()) {
+		IPAERR(":fail to setup excp path\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("Exception path was successfully set");
+
+	if (ipa_setup_dflt_rt_tables()) {
+		IPAERR(":fail to setup dflt routes\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("default routing was set\n");
+
+	/* LAN IN (IPA->A5) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
+		sys_in.ipa_ep_cfg.hdr.hdr_a5_mux = 1;
+		sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_A5_MUX_HEADER_LENGTH;
+	} else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
+		sys_in.notify = ipa_lan_rx_cb;
+		sys_in.priv = NULL;
+		sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+		sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+		sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
+	} else {
+		WARN_ON(1);
+	}
+
+	/**
+	 * ipa_lan_rx_cb() intended to notify the source EP about packet
+	 * being received on the LAN_CONS via calling the source EP call-back.
+	 * There could be a race condition with calling this call-back. Other
+	 * thread may nullify it - e.g. on EP disconnect.
+	 * This lock intended to protect the access to the source EP call-back
+	 */
+	spin_lock_init(&ipa_ctx->disconnect_lock);
+	if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_in)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+
+	/* LAN-WAN OUT (A5->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	if (ipa2_setup_sys_pipe(&sys_in, &ipa_ctx->clnt_hdl_data_out)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_data_out;
+	}
+
+	return 0;
+
+fail_data_out:
+	ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+fail_schedule_delayed_work:
+	if (ipa_ctx->dflt_v6_rt_rule_hdl)
+		__ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl);
+	if (ipa_ctx->dflt_v4_rt_rule_hdl)
+		__ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl);
+	if (ipa_ctx->excp_hdr_hdl)
+		__ipa_del_hdr(ipa_ctx->excp_hdr_hdl);
+	ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+fail_cmd:
+	return result;
+}
+
+static void ipa_teardown_apps_pipes(void)
+{
+	ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_out);
+	ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_data_in);
+	__ipa_del_rt_rule(ipa_ctx->dflt_v6_rt_rule_hdl);
+	__ipa_del_rt_rule(ipa_ctx->dflt_v4_rt_rule_hdl);
+	__ipa_del_hdr(ipa_ctx->excp_hdr_hdl);
+	ipa2_teardown_sys_pipe(ipa_ctx->clnt_hdl_cmd);
+}
+
+#ifdef CONFIG_COMPAT
+long compat_ipa_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	struct ipa_ioc_nat_alloc_mem32 nat_mem32;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+
+	switch (cmd) {
+	case IPA_IOC_ADD_HDR32:
+		cmd = IPA_IOC_ADD_HDR;
+		break;
+	case IPA_IOC_DEL_HDR32:
+		cmd = IPA_IOC_DEL_HDR;
+		break;
+	case IPA_IOC_ADD_RT_RULE32:
+		cmd = IPA_IOC_ADD_RT_RULE;
+		break;
+	case IPA_IOC_DEL_RT_RULE32:
+		cmd = IPA_IOC_DEL_RT_RULE;
+		break;
+	case IPA_IOC_ADD_FLT_RULE32:
+		cmd = IPA_IOC_ADD_FLT_RULE;
+		break;
+	case IPA_IOC_DEL_FLT_RULE32:
+		cmd = IPA_IOC_DEL_FLT_RULE;
+		break;
+	case IPA_IOC_GET_RT_TBL32:
+		cmd = IPA_IOC_GET_RT_TBL;
+		break;
+	case IPA_IOC_COPY_HDR32:
+		cmd = IPA_IOC_COPY_HDR;
+		break;
+	case IPA_IOC_QUERY_INTF32:
+		cmd = IPA_IOC_QUERY_INTF;
+		break;
+	case IPA_IOC_QUERY_INTF_TX_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
+		break;
+	case IPA_IOC_QUERY_INTF_RX_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
+		break;
+	case IPA_IOC_QUERY_INTF_EXT_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
+		break;
+	case IPA_IOC_GET_HDR32:
+		cmd = IPA_IOC_GET_HDR;
+		break;
+	case IPA_IOC_ALLOC_NAT_MEM32:
+		if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
+			sizeof(struct ipa_ioc_nat_alloc_mem32))) {
+			retval = -EFAULT;
+			goto ret;
+		}
+		memcpy(nat_mem.dev_name, nat_mem32.dev_name,
+				IPA_RESOURCE_NAME_MAX);
+		nat_mem.size = (size_t)nat_mem32.size;
+		nat_mem.offset = (off_t)nat_mem32.offset;
+
+		/* null terminate the string */
+		nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+		if (ipa2_allocate_nat_device(&nat_mem)) {
+			retval = -EFAULT;
+			goto ret;
+		}
+		nat_mem32.offset = (compat_off_t)nat_mem.offset;
+		if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
+			sizeof(struct ipa_ioc_nat_alloc_mem32))) {
+			retval = -EFAULT;
+		}
+ret:
+		return retval;
+	case IPA_IOC_V4_INIT_NAT32:
+		cmd = IPA_IOC_V4_INIT_NAT;
+		break;
+	case IPA_IOC_NAT_DMA32:
+		cmd = IPA_IOC_NAT_DMA;
+		break;
+	case IPA_IOC_V4_DEL_NAT32:
+		cmd = IPA_IOC_V4_DEL_NAT;
+		break;
+	case IPA_IOC_GET_NAT_OFFSET32:
+		cmd = IPA_IOC_GET_NAT_OFFSET;
+		break;
+	case IPA_IOC_PULL_MSG32:
+		cmd = IPA_IOC_PULL_MSG;
+		break;
+	case IPA_IOC_RM_ADD_DEPENDENCY32:
+		cmd = IPA_IOC_RM_ADD_DEPENDENCY;
+		break;
+	case IPA_IOC_RM_DEL_DEPENDENCY32:
+		cmd = IPA_IOC_RM_DEL_DEPENDENCY;
+		break;
+	case IPA_IOC_GENERATE_FLT_EQ32:
+		cmd = IPA_IOC_GENERATE_FLT_EQ;
+		break;
+	case IPA_IOC_QUERY_RT_TBL_INDEX32:
+		cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
+		break;
+	case IPA_IOC_WRITE_QMAPID32:
+		cmd = IPA_IOC_WRITE_QMAPID;
+		break;
+	case IPA_IOC_MDFY_FLT_RULE32:
+		cmd = IPA_IOC_MDFY_FLT_RULE;
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
+		cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
+		cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
+		break;
+	case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
+		cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
+		break;
+	case IPA_IOC_MDFY_RT_RULE32:
+		cmd = IPA_IOC_MDFY_RT_RULE;
+		break;
+	case IPA_IOC_COMMIT_HDR:
+	case IPA_IOC_RESET_HDR:
+	case IPA_IOC_COMMIT_RT:
+	case IPA_IOC_RESET_RT:
+	case IPA_IOC_COMMIT_FLT:
+	case IPA_IOC_RESET_FLT:
+	case IPA_IOC_DUMP:
+	case IPA_IOC_PUT_RT_TBL:
+	case IPA_IOC_PUT_HDR:
+	case IPA_IOC_SET_FLT:
+	case IPA_IOC_QUERY_EP_MAPPING:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return ipa_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations ipa_drv_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa_open,
+	.read = ipa_read,
+	.unlocked_ioctl = ipa_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_ipa_ioctl,
+#endif
+};
+
+static int ipa_get_clks(struct device *dev)
+{
+	ipa_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(ipa_clk)) {
+		if (ipa_clk != ERR_PTR(-EPROBE_DEFER))
+			IPAERR("fail to get ipa clk\n");
+		return PTR_ERR(ipa_clk);
+	}
+
+	if (smmu_info.present && smmu_info.arm_smmu) {
+		smmu_clk = clk_get(dev, "smmu_clk");
+		if (IS_ERR(smmu_clk)) {
+			if (smmu_clk != ERR_PTR(-EPROBE_DEFER))
+				IPAERR("fail to get smmu clk\n");
+			return PTR_ERR(smmu_clk);
+		}
+
+		if (clk_get_rate(smmu_clk) == 0) {
+			long rate = clk_round_rate(smmu_clk, 1000);
+
+			clk_set_rate(smmu_clk, rate);
+		}
+	}
+
+	if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) {
+		ipa_cnoc_clk = clk_get(dev, "iface_clk");
+		if (IS_ERR(ipa_cnoc_clk)) {
+			ipa_cnoc_clk = NULL;
+			IPAERR("fail to get cnoc clk\n");
+			return -ENODEV;
+		}
+
+		ipa_clk_src = clk_get(dev, "core_src_clk");
+		if (IS_ERR(ipa_clk_src)) {
+			ipa_clk_src = NULL;
+			IPAERR("fail to get ipa clk src\n");
+			return -ENODEV;
+		}
+
+		sys_noc_ipa_axi_clk = clk_get(dev, "bus_clk");
+		if (IS_ERR(sys_noc_ipa_axi_clk)) {
+			sys_noc_ipa_axi_clk = NULL;
+			IPAERR("fail to get sys_noc_ipa_axi clk\n");
+			return -ENODEV;
+		}
+
+		ipa_inactivity_clk = clk_get(dev, "inactivity_clk");
+		if (IS_ERR(ipa_inactivity_clk)) {
+			ipa_inactivity_clk = NULL;
+			IPAERR("fail to get inactivity clk\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+void _ipa_enable_clks_v2_0(void)
+{
+	IPADBG("enabling gcc_ipa_clk\n");
+	if (ipa_clk) {
+		clk_prepare(ipa_clk);
+		clk_enable(ipa_clk);
+		IPADBG("curr_ipa_clk_rate=%d", ipa_ctx->curr_ipa_clk_rate);
+		clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate);
+		ipa_uc_notify_clk_state(true);
+	} else {
+		WARN_ON(1);
+	}
+
+	if (smmu_clk)
+		clk_prepare_enable(smmu_clk);
+	/* Enable the BAM IRQ. */
+	ipa_sps_irq_control_all(true);
+	ipa_suspend_apps_pipes(false);
+}
+
+void _ipa_enable_clks_v1_1(void)
+{
+
+	if (ipa_cnoc_clk) {
+		clk_prepare(ipa_cnoc_clk);
+		clk_enable(ipa_cnoc_clk);
+		clk_set_rate(ipa_cnoc_clk, IPA_CNOC_CLK_RATE);
+	} else {
+		WARN_ON(1);
+	}
+
+	if (ipa_clk_src)
+		clk_set_rate(ipa_clk_src,
+				ipa_ctx->curr_ipa_clk_rate);
+	else
+		WARN_ON(1);
+
+	if (ipa_clk)
+		clk_prepare(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (sys_noc_ipa_axi_clk)
+		clk_prepare(sys_noc_ipa_axi_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_inactivity_clk)
+		clk_prepare(ipa_inactivity_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_clk)
+		clk_enable(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (sys_noc_ipa_axi_clk)
+		clk_enable(sys_noc_ipa_axi_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_inactivity_clk)
+		clk_enable(ipa_inactivity_clk);
+	else
+		WARN_ON(1);
+
+}
+
+static unsigned int ipa_get_bus_vote(void)
+{
+	unsigned int idx = 1;
+
+	if (ipa_ctx->curr_ipa_clk_rate == ipa_ctx->ctrl->ipa_clk_rate_svs) {
+		idx = 1;
+	} else if (ipa_ctx->curr_ipa_clk_rate ==
+			ipa_ctx->ctrl->ipa_clk_rate_nominal) {
+		if (ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2)
+			idx = 1;
+		else
+			idx = 2;
+	} else if (ipa_ctx->curr_ipa_clk_rate ==
+			ipa_ctx->ctrl->ipa_clk_rate_turbo) {
+		idx = ipa_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
+	} else {
+		WARN_ON(1);
+	}
+
+	IPADBG("curr %d idx %d\n", ipa_ctx->curr_ipa_clk_rate, idx);
+
+	return idx;
+}
+
+/**
+* ipa_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_enable_clks(void)
+{
+	IPADBG("enabling IPA clocks and bus voting\n");
+
+	ipa_ctx->ctrl->ipa_enable_clks();
+
+	if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
+		if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl,
+		    ipa_get_bus_vote()))
+			WARN_ON(1);
+}
+
+void _ipa_disable_clks_v1_1(void)
+{
+
+	if (ipa_inactivity_clk)
+		clk_disable_unprepare(ipa_inactivity_clk);
+	else
+		WARN_ON(1);
+
+	if (sys_noc_ipa_axi_clk)
+		clk_disable_unprepare(sys_noc_ipa_axi_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_clk)
+		clk_disable_unprepare(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (ipa_cnoc_clk)
+		clk_disable_unprepare(ipa_cnoc_clk);
+	else
+		WARN_ON(1);
+
+}
+
+void _ipa_disable_clks_v2_0(void)
+{
+	IPADBG("disabling gcc_ipa_clk\n");
+	ipa_suspend_apps_pipes(true);
+	ipa_sps_irq_control_all(false);
+	ipa_uc_notify_clk_state(false);
+	if (ipa_clk)
+		clk_disable_unprepare(ipa_clk);
+	else
+		WARN_ON(1);
+
+	if (smmu_clk)
+		clk_disable_unprepare(smmu_clk);
+}
+
+/**
+* ipa_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa_disable_clks(void)
+{
+	IPADBG("disabling IPA clocks and bus voting\n");
+
+	ipa_ctx->ctrl->ipa_disable_clks();
+
+	if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
+		if (msm_bus_scale_client_update_request(ipa_ctx->ipa_bus_hdl,
+		    0))
+			WARN_ON(1);
+}
+
+/**
+ * ipa_start_tag_process() - Send TAG packet and wait for it to come back
+ *
+ * This function is called prior to clock gating when active client counter
+ * is 1. TAG process ensures that there are no packets inside IPA HW that
+ * were not submitted to peer's BAM. During TAG process all aggregation frames
+ * are (force) closed.
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_start_tag_process(struct work_struct *work)
+{
+	int res;
+
+	IPADBG("starting TAG process\n");
+	/* close aggregation frames on all pipes */
+	res = ipa_tag_aggr_force_close(-1);
+	if (res)
+		IPAERR("ipa_tag_aggr_force_close failed %d\n", res);
+
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
+
+	IPADBG("TAG process done\n");
+}
+
+/**
+* ipa2_active_clients_log_mod() - Log a modification in the active clients
+* reference count
+*
+* This method logs any modification in the active clients reference count:
+* It logs the modification in the circular history buffer
+* It logs the modification in the hash table - looking for an entry,
+* creating one if needed and deleting one if needed.
+*
+* @id: ipa2_active client logging info struct to hold the log information
+* @inc: a boolean variable to indicate whether the modification is an increase
+* or decrease
+* @int_ctx: a boolean variable to indicate whether this call is being made from
+* an interrupt context and therefore should allocate GFP_ATOMIC memory
+*
+* Method process:
+* - Hash the unique identifier string
+* - Find the hash in the table
+*    1)If found, increase or decrease the reference count
+*    2)If not found, allocate a new hash table entry struct and initialize it
+* - Remove and deallocate unneeded data structure
+* - Log the call in the circular history buffer (unless it is a simple call)
+*/
+void ipa2_active_clients_log_mod(struct ipa_active_client_logging_info *id,
+		bool inc, bool int_ctx)
+{
+	char temp_str[IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN];
+	unsigned long long t;
+	unsigned long nanosec_rem;
+	struct ipa2_active_client_htable_entry *hentry;
+	struct ipa2_active_client_htable_entry *hfound;
+	u32 hkey;
+	char str_to_hash[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
+
+	hfound = NULL;
+	memset(str_to_hash, 0, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
+	strlcpy(str_to_hash, id->id_string, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
+	hkey = arch_fast_hash(str_to_hash, IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN,
+			0);
+	hash_for_each_possible(ipa_ctx->ipa2_active_clients_logging.htable,
+			hentry, list, hkey) {
+		if (!strcmp(hentry->id_string, id->id_string)) {
+			hentry->count = hentry->count + (inc ? 1 : -1);
+			hfound = hentry;
+		}
+	}
+	if (hfound == NULL) {
+		hentry = NULL;
+		hentry = kzalloc(sizeof(
+				struct ipa2_active_client_htable_entry),
+				int_ctx ? GFP_ATOMIC : GFP_KERNEL);
+		if (hentry == NULL) {
+			IPAERR("failed allocating active clients hash entry");
+			return;
+		}
+		hentry->type = id->type;
+		strlcpy(hentry->id_string, id->id_string,
+				IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN);
+		INIT_HLIST_NODE(&hentry->list);
+		hentry->count = inc ? 1 : -1;
+		hash_add(ipa_ctx->ipa2_active_clients_logging.htable,
+				&hentry->list, hkey);
+	} else if (hfound->count == 0) {
+		hash_del(&hfound->list);
+		kfree(hfound);
+	}
+
+	if (id->type != SIMPLE) {
+		t = local_clock();
+		nanosec_rem = do_div(t, 1000000000) / 1000;
+		snprintf(temp_str, IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN,
+				inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
+						"[%5lu.%06lu] v %s, %s: %d",
+				(unsigned long)t, nanosec_rem,
+				id->id_string, id->file, id->line);
+		ipa2_active_clients_log_insert(temp_str);
+	}
+}
+
+void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+		bool int_ctx)
+{
+	ipa2_active_clients_log_mod(id, false, int_ctx);
+}
+
+void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+		bool int_ctx)
+{
+	ipa2_active_clients_log_mod(id, true, int_ctx);
+}
+
+/**
+* ipa_inc_client_enable_clks() - Increase active clients counter, and
+* enable ipa clocks if necessary
+*
+* Please do not use this API, use the wrapper macros instead (ipa_i.h)
+* IPA2_ACTIVE_CLIENTS_INC_XXXX();
+*
+* Return codes:
+* None
+*/
+void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+	ipa_active_clients_lock();
+	ipa2_active_clients_log_inc(id, false);
+	ipa_ctx->ipa_active_clients.cnt++;
+	if (ipa_ctx->ipa_active_clients.cnt == 1)
+		ipa_enable_clks();
+	IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
+	ipa_active_clients_unlock();
+}
+
+/**
+* ipa_inc_client_enable_clks_no_block() - Only increment the number of active
+* clients if no asynchronous actions should be done. Asynchronous actions are
+* locking a mutex and waking up IPA HW.
+*
+* Please do not use this API, use the wrapper macros instead (ipa_i.h)
+*
+*
+* Return codes: 0 for success
+*		-EPERM if an asynchronous action should have been done
+*/
+int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+		*id)
+{
+	int res = 0;
+	unsigned long flags;
+
+	if (ipa_active_clients_trylock(&flags) == 0)
+		return -EPERM;
+
+	if (ipa_ctx->ipa_active_clients.cnt == 0) {
+		res = -EPERM;
+		goto bail;
+	}
+
+	ipa2_active_clients_log_inc(id, true);
+
+	ipa_ctx->ipa_active_clients.cnt++;
+	IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
+bail:
+	ipa_active_clients_trylock_unlock(&flags);
+
+	return res;
+}
+
+/**
+ * ipa_dec_client_disable_clks() - Decrease active clients counter
+ *
+ * In case that there are no active clients this function also starts
+ * TAG process. When TAG progress ends ipa clocks will be gated.
+ * start_tag_process_again flag is set during this function to signal TAG
+ * process to start again as there was another client that may send data to ipa
+ *
+ * Please do not use this API, use the wrapper macros instead (ipa_i.h)
+ * IPA2_ACTIVE_CLIENTS_DEC_XXXX();
+ *
+ * Return codes:
+ * None
+ */
+void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+	struct ipa_active_client_logging_info log_info;
+
+	ipa_active_clients_lock();
+	ipa2_active_clients_log_dec(id, false);
+	ipa_ctx->ipa_active_clients.cnt--;
+	IPADBG("active clients = %d\n", ipa_ctx->ipa_active_clients.cnt);
+	if (ipa_ctx->ipa_active_clients.cnt == 0) {
+		if (ipa_ctx->tag_process_before_gating) {
+			IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
+					"TAG_PROCESS");
+			ipa2_active_clients_log_inc(&log_info, false);
+			ipa_ctx->tag_process_before_gating = false;
+			/*
+			 * When TAG process ends, active clients will be
+			 * decreased
+			 */
+			ipa_ctx->ipa_active_clients.cnt = 1;
+			queue_work(ipa_ctx->power_mgmt_wq, &ipa_tag_work);
+		} else {
+			ipa_disable_clks();
+		}
+	}
+	ipa_active_clients_unlock();
+}
+
+/**
+* ipa_inc_acquire_wakelock() - Increase active clients counter, and
+* acquire wakelock if necessary
+*
+* Return codes:
+* None
+*/
+void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client)
+{
+	unsigned long flags;
+
+	if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
+		return;
+	spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
+	if (ipa_ctx->wakelock_ref_cnt.cnt & (1 << ref_client))
+		IPAERR("client enum %d mask already set. ref cnt = %d\n",
+		ref_client, ipa_ctx->wakelock_ref_cnt.cnt);
+	ipa_ctx->wakelock_ref_cnt.cnt |= (1 << ref_client);
+	if (ipa_ctx->wakelock_ref_cnt.cnt)
+		__pm_stay_awake(&ipa_ctx->w_lock);
+	IPADBG("active wakelock ref cnt = %d client enum %d\n",
+		ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
+	spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+/**
+ * ipa_dec_release_wakelock() - Decrease active clients counter
+ *
+ * In case if the ref count is 0, release the wakelock.
+ *
+ * Return codes:
+ * None
+ */
+void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client)
+{
+	unsigned long flags;
+
+	if (ref_client >= IPA_WAKELOCK_REF_CLIENT_MAX)
+		return;
+	spin_lock_irqsave(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
+	ipa_ctx->wakelock_ref_cnt.cnt &= ~(1 << ref_client);
+	IPADBG("active wakelock ref cnt = %d client enum %d\n",
+		ipa_ctx->wakelock_ref_cnt.cnt, ref_client);
+	if (ipa_ctx->wakelock_ref_cnt.cnt == 0)
+		__pm_relax(&ipa_ctx->w_lock);
+	spin_unlock_irqrestore(&ipa_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+static int ipa_setup_bam_cfg(const struct ipa_plat_drv_res *res)
+{
+	void *ipa_bam_mmio;
+	int reg_val;
+	int retval = 0;
+
+	ipa_bam_mmio = ioremap(res->ipa_mem_base + IPA_BAM_REG_BASE_OFST,
+			IPA_BAM_REMAP_SIZE);
+	if (!ipa_bam_mmio)
+		return -ENOMEM;
+	switch (ipa_ctx->ipa_hw_type) {
+	case IPA_HW_v1_1:
+		reg_val = IPA_BAM_CNFG_BITS_VALv1_1;
+		break;
+	case IPA_HW_v2_0:
+	case IPA_HW_v2_5:
+	case IPA_HW_v2_6L:
+		reg_val = IPA_BAM_CNFG_BITS_VALv2_0;
+		break;
+	default:
+		retval = -EPERM;
+		goto fail;
+	}
+	if (ipa_ctx->ipa_hw_type < IPA_HW_v2_5)
+		ipa_write_reg(ipa_bam_mmio, IPA_BAM_CNFG_BITS_OFST, reg_val);
+fail:
+	iounmap(ipa_bam_mmio);
+
+	return retval;
+}
+
+int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+				  u32 bandwidth_mbps)
+{
+	enum ipa_voltage_level needed_voltage;
+	u32 clk_rate;
+
+	IPADBG("floor_voltage=%d, bandwidth_mbps=%u",
+					floor_voltage, bandwidth_mbps);
+
+	if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
+		floor_voltage >= IPA_VOLTAGE_MAX) {
+		IPAERR("bad voltage\n");
+		return -EINVAL;
+	}
+
+	if (ipa_ctx->enable_clock_scaling) {
+		IPADBG("Clock scaling is enabled\n");
+		if (bandwidth_mbps >=
+			ipa_ctx->ctrl->clock_scaling_bw_threshold_turbo)
+			needed_voltage = IPA_VOLTAGE_TURBO;
+		else if (bandwidth_mbps >=
+			ipa_ctx->ctrl->clock_scaling_bw_threshold_nominal)
+			needed_voltage = IPA_VOLTAGE_NOMINAL;
+		else
+			needed_voltage = IPA_VOLTAGE_SVS;
+	} else {
+		IPADBG("Clock scaling is disabled\n");
+		needed_voltage = IPA_VOLTAGE_NOMINAL;
+	}
+
+	needed_voltage = max(needed_voltage, floor_voltage);
+	switch (needed_voltage) {
+	case IPA_VOLTAGE_SVS:
+		clk_rate = ipa_ctx->ctrl->ipa_clk_rate_svs;
+		break;
+	case IPA_VOLTAGE_NOMINAL:
+		clk_rate = ipa_ctx->ctrl->ipa_clk_rate_nominal;
+		break;
+	case IPA_VOLTAGE_TURBO:
+		clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo;
+		break;
+	default:
+		IPAERR("bad voltage\n");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (clk_rate == ipa_ctx->curr_ipa_clk_rate) {
+		IPADBG("Same voltage\n");
+		return 0;
+	}
+
+	ipa_active_clients_lock();
+	ipa_ctx->curr_ipa_clk_rate = clk_rate;
+	IPADBG("setting clock rate to %u\n", ipa_ctx->curr_ipa_clk_rate);
+	if (ipa_ctx->ipa_active_clients.cnt > 0) {
+		clk_set_rate(ipa_clk, ipa_ctx->curr_ipa_clk_rate);
+		if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
+			if (msm_bus_scale_client_update_request(
+			    ipa_ctx->ipa_bus_hdl, ipa_get_bus_vote()))
+				WARN_ON(1);
+	} else {
+		IPADBG("clocks are gated, not setting rate\n");
+	}
+	ipa_active_clients_unlock();
+	IPADBG("Done\n");
+	return 0;
+}
+
+static int ipa_init_flt_block(void)
+{
+	int result = 0;
+
+	/*
+	 * SW workaround for Improper Filter Behavior when neither Global nor
+	 * Pipe Rules are present => configure dummy global filter rule
+	 * always which results in a miss
+	 */
+	struct ipa_ioc_add_flt_rule *rules;
+	struct ipa_flt_rule_add *rule;
+	struct ipa_ioc_get_rt_tbl rt_lookup;
+	enum ipa_ip_type ip;
+
+	if (ipa_ctx->ipa_hw_type >= IPA_HW_v1_1) {
+		size_t sz = sizeof(struct ipa_ioc_add_flt_rule) +
+		   sizeof(struct ipa_flt_rule_add);
+
+		rules = kmalloc(sz, GFP_KERNEL);
+		if (rules == NULL) {
+			IPAERR("fail to alloc mem for dummy filter rule\n");
+			return -ENOMEM;
+		}
+
+		IPADBG("Adding global rules for IPv4 and IPv6");
+		for (ip = IPA_IP_v4; ip < IPA_IP_MAX; ip++) {
+			memset(&rt_lookup, 0,
+					sizeof(struct ipa_ioc_get_rt_tbl));
+			rt_lookup.ip = ip;
+			strlcpy(rt_lookup.name, IPA_DFLT_RT_TBL_NAME,
+					IPA_RESOURCE_NAME_MAX);
+			ipa2_get_rt_tbl(&rt_lookup);
+			ipa2_put_rt_tbl(rt_lookup.hdl);
+
+			memset(rules, 0, sz);
+			rule = &rules->rules[0];
+			rules->commit = 1;
+			rules->ip = ip;
+			rules->global = 1;
+			rules->num_rules = 1;
+			rule->at_rear = 1;
+			if (ip == IPA_IP_v4) {
+				rule->rule.attrib.attrib_mask =
+					IPA_FLT_PROTOCOL | IPA_FLT_DST_ADDR;
+				rule->rule.attrib.u.v4.protocol =
+				   IPA_INVALID_L4_PROTOCOL;
+				rule->rule.attrib.u.v4.dst_addr_mask = ~0;
+				rule->rule.attrib.u.v4.dst_addr = ~0;
+			} else if (ip == IPA_IP_v6) {
+				rule->rule.attrib.attrib_mask =
+					IPA_FLT_NEXT_HDR | IPA_FLT_DST_ADDR;
+				rule->rule.attrib.u.v6.next_hdr =
+					IPA_INVALID_L4_PROTOCOL;
+				rule->rule.attrib.u.v6.dst_addr_mask[0] = ~0;
+				rule->rule.attrib.u.v6.dst_addr_mask[1] = ~0;
+				rule->rule.attrib.u.v6.dst_addr_mask[2] = ~0;
+				rule->rule.attrib.u.v6.dst_addr_mask[3] = ~0;
+				rule->rule.attrib.u.v6.dst_addr[0] = ~0;
+				rule->rule.attrib.u.v6.dst_addr[1] = ~0;
+				rule->rule.attrib.u.v6.dst_addr[2] = ~0;
+				rule->rule.attrib.u.v6.dst_addr[3] = ~0;
+			} else {
+				result = -EINVAL;
+				WARN_ON(1);
+				break;
+			}
+			rule->rule.action = IPA_PASS_TO_ROUTING;
+			rule->rule.rt_tbl_hdl = rt_lookup.hdl;
+			rule->rule.retain_hdr = true;
+
+			if (ipa2_add_flt_rule(rules) ||
+			rules->rules[0].status) {
+
+				result = -EINVAL;
+				WARN_ON(1);
+				break;
+			}
+		}
+		kfree(rules);
+	}
+	return result;
+}
+
+static void ipa_sps_process_irq_schedule_rel(void)
+{
+	queue_delayed_work(ipa_ctx->sps_power_mgmt_wq,
+		&ipa_sps_release_resource_work,
+		msecs_to_jiffies(IPA_SPS_PROD_TIMEOUT_MSEC));
+}
+
+/**
+* ipa_suspend_handler() - Handles the suspend interrupt:
+* wakes up the suspended peripheral by requesting its consumer
+* @interrupt:		Interrupt type
+* @private_data:	The client's private data
+* @interrupt_data:	Interrupt specific information data
+*/
+void ipa_suspend_handler(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data)
+{
+	enum ipa_rm_resource_name resource;
+	u32 suspend_data =
+		((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
+	u32 bmsk = 1;
+	u32 i = 0;
+	int res;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	IPADBG("interrupt=%d, interrupt_data=%u\n", interrupt, suspend_data);
+	memset(&holb_cfg, 0, sizeof(holb_cfg));
+	holb_cfg.tmr_val = 0;
+
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+		if ((suspend_data & bmsk) && (ipa_ctx->ep[i].valid)) {
+			if (IPA_CLIENT_IS_APPS_CONS(ipa_ctx->ep[i].client)) {
+				/*
+				 * pipe will be unsuspended as part of
+				 * enabling IPA clocks
+				 */
+				if (!atomic_read(
+					&ipa_ctx->sps_pm.dec_clients)
+					) {
+					IPA_ACTIVE_CLIENTS_INC_EP(
+							ipa_ctx->ep[i].client);
+					IPADBG("Pipes un-suspended.\n");
+					IPADBG("Enter poll mode.\n");
+					atomic_set(
+						&ipa_ctx->sps_pm.dec_clients,
+						1);
+					ipa_sps_process_irq_schedule_rel();
+				}
+			} else {
+				resource = ipa2_get_rm_resource_from_ep(i);
+				res = ipa_rm_request_resource_with_timer(
+					resource);
+				if (res == -EPERM &&
+				    IPA_CLIENT_IS_CONS(
+					ipa_ctx->ep[i].client)) {
+					holb_cfg.en = 1;
+					res = ipa2_cfg_ep_holb_by_client(
+					   ipa_ctx->ep[i].client, &holb_cfg);
+					if (res) {
+						IPAERR("holb en fail\n");
+						IPAERR("IPAHW stall\n");
+						BUG();
+					}
+				}
+			}
+		}
+			bmsk = bmsk << 1;
+	}
+}
+
+/**
+* ipa2_restore_suspend_handler() - restores the original suspend IRQ handler
+* as it was registered in the IPA init sequence.
+* Return codes:
+* 0: success
+* -EPERM: failed to remove current handler or failed to add original handler
+*/
+int ipa2_restore_suspend_handler(void)
+{
+	int result = 0;
+
+	result  = ipa2_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
+	if (result) {
+		IPAERR("remove handler for suspend interrupt failed\n");
+		return -EPERM;
+	}
+
+	result = ipa2_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+			ipa_suspend_handler, true, NULL);
+	if (result) {
+		IPAERR("register handler for suspend interrupt failed\n");
+		result = -EPERM;
+	}
+
+	return result;
+}
+
+static int apps_cons_release_resource(void)
+{
+	return 0;
+}
+
+static int apps_cons_request_resource(void)
+{
+	return 0;
+}
+
+static void ipa_sps_release_resource(struct work_struct *work)
+{
+	mutex_lock(&ipa_ctx->sps_pm.sps_pm_lock);
+	/* check whether still need to decrease client usage */
+	if (atomic_read(&ipa_ctx->sps_pm.dec_clients)) {
+		if (atomic_read(&ipa_ctx->sps_pm.eot_activity)) {
+			IPADBG("EOT pending Re-scheduling\n");
+			ipa_sps_process_irq_schedule_rel();
+		} else {
+			atomic_set(&ipa_ctx->sps_pm.dec_clients, 0);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
+		}
+	}
+	atomic_set(&ipa_ctx->sps_pm.eot_activity, 0);
+	mutex_unlock(&ipa_ctx->sps_pm.sps_pm_lock);
+}
+
+int ipa_create_apps_resource(void)
+{
+	struct ipa_rm_create_params apps_cons_create_params;
+	struct ipa_rm_perf_profile profile;
+	int result = 0;
+
+	memset(&apps_cons_create_params, 0,
+				sizeof(apps_cons_create_params));
+	apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
+	apps_cons_create_params.request_resource = apps_cons_request_resource;
+	apps_cons_create_params.release_resource = apps_cons_release_resource;
+	result = ipa_rm_create_resource(&apps_cons_create_params);
+	if (result) {
+		IPAERR("ipa_rm_create_resource failed\n");
+		return result;
+	}
+
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
+
+	return result;
+}
+
+
+/**
+* ipa_init() - Initialize the IPA Driver
+* @resource_p:	contain platform specific values from DST file
+* @pdev:	The platform device structure representing the IPA driver
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa_ctx with:
+*    1)parsed values from the dts file
+*    2)parameters passed to the module initialization
+*    3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Register IPA BAM to SPS driver and get a BAM handler
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+*   routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+*   is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+*   routing table ,filtering rule
+* - Setup all A5<->IPA pipes by calling to ipa_setup_a5_pipes
+* - Preparing the descriptors for System pipes
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+* - Initialize IPA RM (resource manager)
+*/
+static int ipa_init(const struct ipa_plat_drv_res *resource_p,
+		struct device *ipa_dev)
+{
+	int result = 0;
+	int i;
+	struct sps_bam_props bam_props = { 0 };
+	struct ipa_flt_tbl *flt_tbl;
+	struct ipa_rt_tbl_set *rset;
+	struct ipa_active_client_logging_info log_info;
+
+	IPADBG("IPA Driver initialization started\n");
+
+	/*
+	 * since structure alignment is implementation dependent, add test to
+	 * avoid different and incompatible data layouts
+	 */
+	BUILD_BUG_ON(sizeof(struct ipa_hw_pkt_status) != IPA_PKT_STATUS_SIZE);
+
+	ipa_ctx = kzalloc(sizeof(*ipa_ctx), GFP_KERNEL);
+	if (!ipa_ctx) {
+		IPAERR(":kzalloc err.\n");
+		result = -ENOMEM;
+		goto fail_mem_ctx;
+	}
+
+	ipa_ctx->pdev = ipa_dev;
+	ipa_ctx->uc_pdev = ipa_dev;
+	ipa_ctx->smmu_present = smmu_info.present;
+	if (!ipa_ctx->smmu_present)
+		ipa_ctx->smmu_s1_bypass = true;
+	else
+		ipa_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
+	ipa_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+	ipa_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
+	ipa_ctx->ipa_hw_type = resource_p->ipa_hw_type;
+	ipa_ctx->ipa_hw_mode = resource_p->ipa_hw_mode;
+	ipa_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
+	ipa_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode;
+	ipa_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
+	ipa_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
+	ipa_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
+	ipa_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
+	ipa_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
+	ipa_ctx->use_dma_zone = resource_p->use_dma_zone;
+	ipa_ctx->tethered_flow_control = resource_p->tethered_flow_control;
+
+	/* Setting up IPA RX Polling Timeout Seconds */
+	ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec,
+		&ipa_ctx->ipa_rx_max_timeout_usec,
+		resource_p->ipa_rx_polling_sleep_msec);
+
+	/* Setting up ipa polling iteration */
+	if ((resource_p->ipa_polling_iteration >= MIN_POLLING_ITERATION)
+		&& (resource_p->ipa_polling_iteration <= MAX_POLLING_ITERATION))
+		ipa_ctx->ipa_polling_iteration =
+			resource_p->ipa_polling_iteration;
+	else
+		ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION;
+
+	/* default aggregation parameters */
+	ipa_ctx->aggregation_type = IPA_MBIM_16;
+	ipa_ctx->aggregation_byte_limit = 1;
+	ipa_ctx->aggregation_time_limit = 0;
+	ipa_ctx->ipa2_active_clients_logging.log_rdy = false;
+
+	ipa_ctx->ctrl = kzalloc(sizeof(*ipa_ctx->ctrl), GFP_KERNEL);
+	if (!ipa_ctx->ctrl) {
+		IPAERR("memory allocation error for ctrl\n");
+		result = -ENOMEM;
+		goto fail_mem_ctrl;
+	}
+	result = ipa_controller_static_bind(ipa_ctx->ctrl,
+			ipa_ctx->ipa_hw_type);
+	if (result) {
+		IPAERR("fail to static bind IPA ctrl.\n");
+		result = -EFAULT;
+		goto fail_bind;
+	}
+
+	IPADBG("hdr_lcl=%u ip4_rt=%u ip6_rt=%u ip4_flt=%u ip6_flt=%u\n",
+	       ipa_ctx->hdr_tbl_lcl, ipa_ctx->ip4_rt_tbl_lcl,
+	       ipa_ctx->ip6_rt_tbl_lcl, ipa_ctx->ip4_flt_tbl_lcl,
+	       ipa_ctx->ip6_flt_tbl_lcl);
+
+	if (bus_scale_table) {
+		IPADBG("Use bus scaling info from device tree\n");
+		ipa_ctx->ctrl->msm_bus_data_ptr = bus_scale_table;
+	}
+
+	if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL) {
+		/* get BUS handle */
+		ipa_ctx->ipa_bus_hdl =
+			msm_bus_scale_register_client(
+				ipa_ctx->ctrl->msm_bus_data_ptr);
+		if (!ipa_ctx->ipa_bus_hdl) {
+			IPAERR("fail to register with bus mgr!\n");
+			result = -ENODEV;
+			goto fail_bus_reg;
+		}
+	} else {
+		IPADBG("Skipping bus scaling registration on Virtual plat\n");
+	}
+
+	if (ipa2_active_clients_log_init())
+		goto fail_init_active_client;
+
+	/* get IPA clocks */
+	result = ipa_get_clks(master_dev);
+	if (result)
+		goto fail_clk;
+
+	/* Enable ipa_ctx->enable_clock_scaling */
+	ipa_ctx->enable_clock_scaling = 1;
+	ipa_ctx->curr_ipa_clk_rate = ipa_ctx->ctrl->ipa_clk_rate_turbo;
+
+	/* enable IPA clocks explicitly to allow the initialization */
+	ipa_enable_clks();
+
+	/* setup IPA register access */
+	ipa_ctx->mmio = ioremap(resource_p->ipa_mem_base +
+			ipa_ctx->ctrl->ipa_reg_base_ofst,
+			resource_p->ipa_mem_size);
+	if (!ipa_ctx->mmio) {
+		IPAERR(":ipa-base ioremap err.\n");
+		result = -EFAULT;
+		goto fail_remap;
+	}
+
+	result = ipa_init_hw();
+	if (result) {
+		IPAERR(":error initializing HW.\n");
+		result = -ENODEV;
+		goto fail_init_hw;
+	}
+	IPADBG("IPA HW initialization sequence completed");
+
+	ipa_ctx->ipa_num_pipes = ipa_get_num_pipes();
+	ipa_ctx->ctrl->ipa_sram_read_settings();
+	IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
+		ipa_ctx->smem_sz, ipa_ctx->smem_restricted_bytes);
+
+	if (ipa_ctx->smem_reqd_sz >
+		ipa_ctx->smem_sz - ipa_ctx->smem_restricted_bytes) {
+		IPAERR("SW expect more core memory, needed %d, avail %d\n",
+			ipa_ctx->smem_reqd_sz, ipa_ctx->smem_sz -
+			ipa_ctx->smem_restricted_bytes);
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+
+	mutex_init(&ipa_ctx->ipa_active_clients.mutex);
+	spin_lock_init(&ipa_ctx->ipa_active_clients.spinlock);
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
+	ipa2_active_clients_log_inc(&log_info, false);
+	ipa_ctx->ipa_active_clients.cnt = 1;
+
+	/* Create workqueues for power management */
+	ipa_ctx->power_mgmt_wq =
+		create_singlethread_workqueue("ipa_power_mgmt");
+	if (!ipa_ctx->power_mgmt_wq) {
+		IPAERR("failed to create power mgmt wq\n");
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+
+	ipa_ctx->sps_power_mgmt_wq =
+		create_singlethread_workqueue("sps_ipa_power_mgmt");
+	if (!ipa_ctx->sps_power_mgmt_wq) {
+		IPAERR("failed to create sps power mgmt wq\n");
+		result = -ENOMEM;
+		goto fail_create_sps_wq;
+	}
+
+	/* register IPA with SPS driver */
+	bam_props.phys_addr = resource_p->bam_mem_base;
+	bam_props.virt_size = resource_p->bam_mem_size;
+	bam_props.irq = resource_p->bam_irq;
+	bam_props.num_pipes = ipa_ctx->ipa_num_pipes;
+	bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+	bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+	bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
+	if (ipa_ctx->ipa_hw_mode != IPA_HW_MODE_VIRTUAL)
+		bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP;
+	if (ipa_ctx->ipa_bam_remote_mode == true)
+		bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
+	if (!ipa_ctx->smmu_s1_bypass)
+		bam_props.options |= SPS_BAM_SMMU_EN;
+	bam_props.options |= SPS_BAM_CACHED_WP;
+	bam_props.ee = resource_p->ee;
+	bam_props.ipc_loglevel = 3;
+
+	result = sps_register_bam_device(&bam_props, &ipa_ctx->bam_handle);
+	if (result) {
+		IPAERR(":bam register err.\n");
+		result = -EPROBE_DEFER;
+		goto fail_register_bam_device;
+	}
+	IPADBG("IPA BAM is registered\n");
+
+	if (ipa_setup_bam_cfg(resource_p)) {
+		IPAERR(":bam cfg err.\n");
+		result = -ENODEV;
+		goto fail_flt_rule_cache;
+	}
+
+	/* init the lookaside cache */
+	ipa_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
+			sizeof(struct ipa_flt_entry), 0, 0, NULL);
+	if (!ipa_ctx->flt_rule_cache) {
+		IPAERR(":ipa flt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_flt_rule_cache;
+	}
+	ipa_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
+			sizeof(struct ipa_rt_entry), 0, 0, NULL);
+	if (!ipa_ctx->rt_rule_cache) {
+		IPAERR(":ipa rt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_rule_cache;
+	}
+	ipa_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
+			sizeof(struct ipa_hdr_entry), 0, 0, NULL);
+	if (!ipa_ctx->hdr_cache) {
+		IPAERR(":ipa hdr cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_cache;
+	}
+	ipa_ctx->hdr_offset_cache =
+	   kmem_cache_create("IPA_HDR_OFFSET",
+			   sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
+	if (!ipa_ctx->hdr_offset_cache) {
+		IPAERR(":ipa hdr off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_offset_cache;
+	}
+	ipa_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
+		sizeof(struct ipa_hdr_proc_ctx_entry), 0, 0, NULL);
+	if (!ipa_ctx->hdr_proc_ctx_cache) {
+		IPAERR(":ipa hdr proc ctx cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_proc_ctx_cache;
+	}
+	ipa_ctx->hdr_proc_ctx_offset_cache =
+		kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
+		sizeof(struct ipa_hdr_proc_ctx_offset_entry), 0, 0, NULL);
+	if (!ipa_ctx->hdr_proc_ctx_offset_cache) {
+		IPAERR(":ipa hdr proc ctx off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_proc_ctx_offset_cache;
+	}
+	ipa_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
+			sizeof(struct ipa_rt_tbl), 0, 0, NULL);
+	if (!ipa_ctx->rt_tbl_cache) {
+		IPAERR(":ipa rt tbl cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_tbl_cache;
+	}
+	ipa_ctx->tx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA_TX_PKT_WRAPPER",
+			   sizeof(struct ipa_tx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa_ctx->tx_pkt_wrapper_cache) {
+		IPAERR(":ipa tx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_tx_pkt_wrapper_cache;
+	}
+	ipa_ctx->rx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA_RX_PKT_WRAPPER",
+			   sizeof(struct ipa_rx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa_ctx->rx_pkt_wrapper_cache) {
+		IPAERR(":ipa rx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rx_pkt_wrapper_cache;
+	}
+
+	/* Setup DMA pool */
+	ipa_ctx->dma_pool = dma_pool_create("ipa_tx", ipa_ctx->pdev,
+		IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
+		0, 0);
+	if (!ipa_ctx->dma_pool) {
+		IPAERR("cannot alloc DMA pool.\n");
+		result = -ENOMEM;
+		goto fail_dma_pool;
+	}
+
+	ipa_ctx->glob_flt_tbl[IPA_IP_v4].in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+	ipa_ctx->glob_flt_tbl[IPA_IP_v6].in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+
+	/* init the various list heads */
+	INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v4].head_flt_rule_list);
+	INIT_LIST_HEAD(&ipa_ctx->glob_flt_tbl[IPA_IP_v6].head_flt_rule_list);
+	INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_hdr_entry_list);
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(&ipa_ctx->hdr_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
+	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+		INIT_LIST_HEAD(&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(&ipa_ctx->
+				hdr_proc_ctx_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+	INIT_LIST_HEAD(&ipa_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+		flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v4];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys = !ipa_ctx->ip4_flt_tbl_lcl;
+
+		flt_tbl = &ipa_ctx->flt_tbl[i][IPA_IP_v6];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys = !ipa_ctx->ip6_flt_tbl_lcl;
+	}
+
+	rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v4];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+	rset = &ipa_ctx->reap_rt_tbl_set[IPA_IP_v6];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+	INIT_LIST_HEAD(&ipa_ctx->intf_list);
+	INIT_LIST_HEAD(&ipa_ctx->msg_list);
+	INIT_LIST_HEAD(&ipa_ctx->pull_msg_list);
+	init_waitqueue_head(&ipa_ctx->msg_waitq);
+	mutex_init(&ipa_ctx->msg_lock);
+
+	mutex_init(&ipa_ctx->lock);
+	mutex_init(&ipa_ctx->nat_mem.lock);
+
+	idr_init(&ipa_ctx->ipa_idr);
+	spin_lock_init(&ipa_ctx->idr_lock);
+
+	/* wlan related member */
+	memset(&ipa_ctx->wc_memb, 0, sizeof(ipa_ctx->wc_memb));
+	spin_lock_init(&ipa_ctx->wc_memb.wlan_spinlock);
+	spin_lock_init(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
+	INIT_LIST_HEAD(&ipa_ctx->wc_memb.wlan_comm_desc_list);
+	/*
+	 * setup an empty routing table in system memory, this will be used
+	 * to delete a routing table cleanly and safely
+	 */
+	ipa_ctx->empty_rt_tbl_mem.size = IPA_ROUTING_RULE_BYTE_SIZE;
+
+	ipa_ctx->empty_rt_tbl_mem.base =
+		dma_alloc_coherent(ipa_ctx->pdev,
+				ipa_ctx->empty_rt_tbl_mem.size,
+				    &ipa_ctx->empty_rt_tbl_mem.phys_base,
+				    GFP_KERNEL);
+	if (!ipa_ctx->empty_rt_tbl_mem.base) {
+		IPAERR("DMA buff alloc fail %d bytes for empty routing tbl\n",
+				ipa_ctx->empty_rt_tbl_mem.size);
+		result = -ENOMEM;
+		goto fail_apps_pipes;
+	}
+	memset(ipa_ctx->empty_rt_tbl_mem.base, 0,
+			ipa_ctx->empty_rt_tbl_mem.size);
+	IPADBG("empty routing table was allocated in system memory");
+
+	/* setup the A5-IPA pipes */
+	if (ipa_setup_apps_pipes()) {
+		IPAERR(":failed to setup IPA-Apps pipes.\n");
+		result = -ENODEV;
+		goto fail_empty_rt_tbl;
+	}
+	IPADBG("IPA System2Bam pipes were connected\n");
+
+	if (ipa_init_flt_block()) {
+		IPAERR("fail to setup dummy filter rules\n");
+		result = -ENODEV;
+		goto fail_empty_rt_tbl;
+	}
+	IPADBG("filter block was set with dummy filter rules");
+
+	/* setup the IPA pipe mem pool */
+	if (resource_p->ipa_pipe_mem_size)
+		ipa_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+				resource_p->ipa_pipe_mem_size);
+
+	ipa_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+	result = alloc_chrdev_region(&ipa_ctx->dev_num, 0, 1, DRV_NAME);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	ipa_ctx->dev = device_create(ipa_ctx->class, NULL, ipa_ctx->dev_num,
+			ipa_ctx, DRV_NAME);
+	if (IS_ERR(ipa_ctx->dev)) {
+		IPAERR(":device_create err.\n");
+		result = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&ipa_ctx->cdev, &ipa_drv_fops);
+	ipa_ctx->cdev.owner = THIS_MODULE;
+	ipa_ctx->cdev.ops = &ipa_drv_fops;  /* from LDD3 */
+
+	result = cdev_add(&ipa_ctx->cdev, ipa_ctx->dev_num, 1);
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_cdev_add;
+	}
+	IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+			MAJOR(ipa_ctx->dev_num),
+			MINOR(ipa_ctx->dev_num));
+
+	if (create_nat_device()) {
+		IPAERR("unable to create nat device\n");
+		result = -ENODEV;
+		goto fail_nat_dev_add;
+	}
+
+
+
+	/* Create a wakeup source. */
+	wakeup_source_init(&ipa_ctx->w_lock, "IPA_WS");
+	spin_lock_init(&ipa_ctx->wakelock_ref_cnt.spinlock);
+
+	/* Initialize the SPS PM lock. */
+	mutex_init(&ipa_ctx->sps_pm.sps_pm_lock);
+
+	/* Initialize IPA RM (resource manager) */
+	result = ipa_rm_initialize();
+	if (result) {
+		IPAERR("RM initialization failed (%d)\n", -result);
+		result = -ENODEV;
+		goto fail_ipa_rm_init;
+	}
+	IPADBG("IPA resource manager initialized");
+
+	result = ipa_create_apps_resource();
+	if (result) {
+		IPAERR("Failed to create APPS_CONS resource\n");
+		result = -ENODEV;
+		goto fail_create_apps_resource;
+	}
+
+	/*register IPA IRQ handler*/
+	result = ipa_interrupts_init(resource_p->ipa_irq, 0,
+			master_dev);
+	if (result) {
+		IPAERR("ipa interrupts initialization failed\n");
+		result = -ENODEV;
+		goto fail_ipa_interrupts_init;
+	}
+
+	/*add handler for suspend interrupt*/
+	result = ipa_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+			ipa_suspend_handler, false, NULL);
+	if (result) {
+		IPAERR("register handler for suspend interrupt failed\n");
+		result = -ENODEV;
+		goto fail_add_interrupt_handler;
+	}
+
+	if (ipa_ctx->use_ipa_teth_bridge) {
+		/* Initialize the tethering bridge driver */
+		result = teth_bridge_driver_init();
+		if (result) {
+			IPAERR(":teth_bridge init failed (%d)\n", -result);
+			result = -ENODEV;
+			goto fail_add_interrupt_handler;
+		}
+		IPADBG("teth_bridge initialized");
+	}
+
+	ipa_debugfs_init();
+
+	result = ipa_uc_interface_init();
+	if (result)
+		IPAERR(":ipa Uc interface init failed (%d)\n", -result);
+	else
+		IPADBG(":ipa Uc interface init ok\n");
+
+	result = ipa_wdi_init();
+	if (result)
+		IPAERR(":wdi init failed (%d)\n", -result);
+	else
+		IPADBG(":wdi init ok\n");
+
+	result = ipa_ntn_init();
+	if (result)
+		IPAERR(":ntn init failed (%d)\n", -result);
+	else
+		IPADBG(":ntn init ok\n");
+
+	ipa_ctx->q6_proxy_clk_vote_valid = true;
+
+	ipa_register_panic_hdlr();
+
+	pr_info("IPA driver initialization was successful.\n");
+
+	return 0;
+
+fail_add_interrupt_handler:
+	free_irq(resource_p->ipa_irq, master_dev);
+fail_ipa_interrupts_init:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+fail_create_apps_resource:
+	ipa_rm_exit();
+fail_ipa_rm_init:
+fail_nat_dev_add:
+	cdev_del(&ipa_ctx->cdev);
+fail_cdev_add:
+	device_destroy(ipa_ctx->class, ipa_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(ipa_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	if (ipa_ctx->pipe_mem_pool)
+		gen_pool_destroy(ipa_ctx->pipe_mem_pool);
+fail_empty_rt_tbl:
+	ipa_teardown_apps_pipes();
+	dma_free_coherent(ipa_ctx->pdev,
+			  ipa_ctx->empty_rt_tbl_mem.size,
+			  ipa_ctx->empty_rt_tbl_mem.base,
+			  ipa_ctx->empty_rt_tbl_mem.phys_base);
+fail_apps_pipes:
+	idr_destroy(&ipa_ctx->ipa_idr);
+fail_dma_pool:
+	kmem_cache_destroy(ipa_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+	kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_offset_cache);
+fail_hdr_proc_ctx_offset_cache:
+	kmem_cache_destroy(ipa_ctx->hdr_proc_ctx_cache);
+fail_hdr_proc_ctx_cache:
+	kmem_cache_destroy(ipa_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+	kmem_cache_destroy(ipa_ctx->hdr_cache);
+fail_hdr_cache:
+	kmem_cache_destroy(ipa_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+	kmem_cache_destroy(ipa_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+	sps_deregister_bam_device(ipa_ctx->bam_handle);
+fail_register_bam_device:
+	destroy_workqueue(ipa_ctx->sps_power_mgmt_wq);
+fail_create_sps_wq:
+	destroy_workqueue(ipa_ctx->power_mgmt_wq);
+fail_init_hw:
+	iounmap(ipa_ctx->mmio);
+fail_remap:
+	ipa_disable_clks();
+fail_clk:
+	ipa2_active_clients_log_destroy();
+fail_init_active_client:
+	msm_bus_scale_unregister_client(ipa_ctx->ipa_bus_hdl);
+fail_bus_reg:
+	if (bus_scale_table) {
+		msm_bus_cl_clear_pdata(bus_scale_table);
+		bus_scale_table = NULL;
+	}
+fail_bind:
+	kfree(ipa_ctx->ctrl);
+fail_mem_ctrl:
+	kfree(ipa_ctx);
+	ipa_ctx = NULL;
+fail_mem_ctx:
+	return result;
+}
+
+static int get_ipa_dts_configuration(struct platform_device *pdev,
+		struct ipa_plat_drv_res *ipa_drv_res)
+{
+	int result;
+	struct resource *resource;
+
+	/* initialize ipa_res */
+	ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+	ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+	ipa_drv_res->ipa_hw_type = 0;
+	ipa_drv_res->ipa_hw_mode = 0;
+	ipa_drv_res->ipa_bam_remote_mode = false;
+	ipa_drv_res->modem_cfg_emb_pipe_flt = false;
+	ipa_drv_res->ipa_wdi2 = false;
+	ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+	ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+
+	/* Get IPA HW Version */
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
+					&ipa_drv_res->ipa_hw_type);
+	if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
+		IPAERR(":get resource failed for ipa-hw-ver!\n");
+		return -ENODEV;
+	}
+	IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
+
+	/* Get IPA HW mode */
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
+			&ipa_drv_res->ipa_hw_mode);
+	if (result)
+		IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
+	else
+		IPADBG(": found ipa_drv_res->ipa_hw_mode = %d",
+				ipa_drv_res->ipa_hw_mode);
+
+	/* Get IPA WAN / LAN RX  pool sizes */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-ring-size",
+			&ipa_drv_res->wan_rx_ring_size);
+	if (result)
+		IPADBG("using default for wan-rx-ring-size = %u\n",
+				ipa_drv_res->wan_rx_ring_size);
+	else
+		IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
+				ipa_drv_res->wan_rx_ring_size);
+
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,lan-rx-ring-size",
+			&ipa_drv_res->lan_rx_ring_size);
+	if (result)
+		IPADBG("using default for lan-rx-ring-size = %u\n",
+				ipa_drv_res->lan_rx_ring_size);
+	else
+		IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
+				ipa_drv_res->lan_rx_ring_size);
+
+	ipa_drv_res->use_ipa_teth_bridge =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-ipa-tethering-bridge");
+	IPADBG(": using TBDr = %s",
+		ipa_drv_res->use_ipa_teth_bridge
+		? "True" : "False");
+
+	ipa_drv_res->ipa_bam_remote_mode =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-bam-remote-mode");
+	IPADBG(": ipa bam remote mode = %s\n",
+			ipa_drv_res->ipa_bam_remote_mode
+			? "True" : "False");
+
+	ipa_drv_res->modem_cfg_emb_pipe_flt =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,modem-cfg-emb-pipe-flt");
+	IPADBG(": modem configure embedded pipe filtering = %s\n",
+			ipa_drv_res->modem_cfg_emb_pipe_flt
+			? "True" : "False");
+
+	ipa_drv_res->ipa_wdi2 =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,ipa-wdi2");
+	IPADBG(": WDI-2.0 = %s\n",
+		ipa_drv_res->ipa_wdi2
+		? "True" : "False");
+
+	ipa_drv_res->skip_uc_pipe_reset =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,skip-uc-pipe-reset");
+	IPADBG(": skip uC pipe reset = %s\n",
+		ipa_drv_res->skip_uc_pipe_reset
+		? "True" : "False");
+
+	ipa_drv_res->use_dma_zone =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,use-dma-zone");
+	IPADBG(": use dma zone = %s\n",
+		ipa_drv_res->use_dma_zone
+		? "True" : "False");
+
+	ipa_drv_res->tethered_flow_control =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,tethered-flow-control");
+	IPADBG(": Use apps based flow control = %s\n",
+		ipa_drv_res->tethered_flow_control
+		? "True" : "False");
+
+	/* Get IPA wrapper address */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"ipa-base");
+	if (!resource) {
+		IPAERR(":get resource failed for ipa-base!\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->ipa_mem_base = resource->start;
+	ipa_drv_res->ipa_mem_size = resource_size(resource);
+	IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
+			ipa_drv_res->ipa_mem_base,
+			ipa_drv_res->ipa_mem_size);
+
+	smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+	smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+	/* Get IPA BAM address */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"bam-base");
+	if (!resource) {
+		IPAERR(":get resource failed for bam-base!\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->bam_mem_base = resource->start;
+	ipa_drv_res->bam_mem_size = resource_size(resource);
+	IPADBG(": bam-base = 0x%x, size = 0x%x\n",
+			ipa_drv_res->bam_mem_base,
+			ipa_drv_res->bam_mem_size);
+
+	/* Get IPA pipe mem start ofst */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"ipa-pipe-mem");
+	if (!resource) {
+		IPADBG(":not using pipe memory - resource nonexisting\n");
+	} else {
+		ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
+		ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
+		IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
+				ipa_drv_res->ipa_pipe_mem_start_ofst,
+				ipa_drv_res->ipa_pipe_mem_size);
+	}
+
+	/* Get IPA IRQ number */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+			"ipa-irq");
+	if (!resource) {
+		IPAERR(":get resource failed for ipa-irq!\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->ipa_irq = resource->start;
+	IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
+
+	/* Get IPA BAM IRQ number */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+			"bam-irq");
+	if (!resource) {
+		IPAERR(":get resource failed for bam-irq!\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->bam_irq = resource->start;
+	IPADBG(":ibam-irq = %d\n", ipa_drv_res->bam_irq);
+
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
+			&ipa_drv_res->ee);
+	if (result)
+		ipa_drv_res->ee = 0;
+
+	/* Get IPA RX Polling Timeout Seconds */
+	result = of_property_read_u32(pdev->dev.of_node,
+				"qcom,rx-polling-sleep-ms",
+				&ipa_drv_res->ipa_rx_polling_sleep_msec);
+
+	if (result) {
+		ipa_drv_res->ipa_rx_polling_sleep_msec = ONE_MSEC;
+		IPADBG("using default polling timeout of 1MSec\n");
+	} else {
+		IPADBG(": found ipa_drv_res->ipa_rx_polling_sleep_sec = %d",
+			ipa_drv_res->ipa_rx_polling_sleep_msec);
+	}
+
+	/* Get IPA Polling Iteration */
+	result = of_property_read_u32(pdev->dev.of_node,
+				"qcom,ipa-polling-iteration",
+				&ipa_drv_res->ipa_polling_iteration);
+	if (result) {
+		ipa_drv_res->ipa_polling_iteration = MAX_POLLING_ITERATION;
+		IPADBG("using default polling iteration\n");
+	} else {
+		IPADBG(": found ipa_drv_res->ipa_polling_iteration = %d",
+			ipa_drv_res->ipa_polling_iteration);
+	}
+
+	return 0;
+}
+
+static int ipa_smmu_wlan_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
+	int atomic_ctx = 1;
+	int fast = 1;
+	int bypass = 1;
+	int ret;
+
+	IPADBG("sub pdev=%p\n", dev);
+
+	cb->dev = dev;
+	cb->iommu = iommu_domain_alloc(msm_iommu_get_bus(dev));
+	if (!cb->iommu) {
+		IPAERR("could not alloc iommu domain\n");
+		/* assume this failure is because iommu driver is not ready */
+		return -EPROBE_DEFER;
+	}
+	cb->valid = true;
+
+	if (smmu_info.s1_bypass) {
+		if (iommu_domain_set_attr(cb->iommu,
+			DOMAIN_ATTR_S1_BYPASS,
+			&bypass)) {
+			IPAERR("couldn't set bypass\n");
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU S1 BYPASS\n");
+	} else {
+		if (iommu_domain_set_attr(cb->iommu,
+			DOMAIN_ATTR_ATOMIC,
+			&atomic_ctx)) {
+			IPAERR("couldn't set domain as atomic\n");
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU atomic set\n");
+		if (smmu_info.fast_map) {
+			if (iommu_domain_set_attr(cb->iommu,
+				DOMAIN_ATTR_FAST,
+				&fast)) {
+				IPAERR("couldn't set fast map\n");
+				cb->valid = false;
+				return -EIO;
+			}
+			IPADBG("SMMU fast map set\n");
+		}
+	}
+
+	ret = iommu_attach_device(cb->iommu, dev);
+	if (ret) {
+		IPAERR("could not attach device ret=%d\n", ret);
+		cb->valid = false;
+		return ret;
+	}
+
+	if (!smmu_info.s1_bypass) {
+		IPAERR("map IPA region to WLAN_CB IOMMU\n");
+		ret = ipa_iommu_map(cb->iommu,
+			rounddown(smmu_info.ipa_base, PAGE_SIZE),
+			rounddown(smmu_info.ipa_base, PAGE_SIZE),
+			roundup(smmu_info.ipa_size, PAGE_SIZE),
+			IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+		if (ret) {
+			IPAERR("map IPA to WLAN_CB IOMMU failed ret=%d\n",
+				ret);
+			arm_iommu_detach_device(cb->dev);
+			cb->valid = false;
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ipa_smmu_uc_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
+	int atomic_ctx = 1;
+	int ret;
+	int fast = 1;
+	int bypass = 1;
+	u32 iova_ap_mapping[2];
+
+	IPADBG("UC CB PROBE sub pdev=%p\n", dev);
+
+	ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+		iova_ap_mapping, 2);
+	if (ret) {
+		IPAERR("Fail to read UC start/size iova addresses\n");
+		return ret;
+	}
+	cb->va_start = iova_ap_mapping[0];
+	cb->va_size = iova_ap_mapping[1];
+	cb->va_end = cb->va_start + cb->va_size;
+	IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+	if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+		    dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+		IPAERR("DMA set mask failed\n");
+		return -EOPNOTSUPP;
+	}
+
+	IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
+
+	cb->dev = dev;
+	cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+				cb->va_start, cb->va_size);
+	if (IS_ERR_OR_NULL(cb->mapping)) {
+		IPADBG("Fail to create mapping\n");
+		/* assume this failure is because iommu driver is not ready */
+		return -EPROBE_DEFER;
+	}
+	IPADBG("SMMU mapping created\n");
+	cb->valid = true;
+
+	IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
+	if (smmu_info.s1_bypass) {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+			DOMAIN_ATTR_S1_BYPASS,
+			&bypass)) {
+			IPAERR("couldn't set bypass\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU S1 BYPASS\n");
+	} else {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+			DOMAIN_ATTR_ATOMIC,
+			&atomic_ctx)) {
+			IPAERR("couldn't set domain as atomic\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU atomic set\n");
+		if (smmu_info.fast_map) {
+			if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_FAST,
+				&fast)) {
+				IPAERR("couldn't set fast map\n");
+				arm_iommu_release_mapping(cb->mapping);
+				cb->valid = false;
+				return -EIO;
+			}
+			IPADBG("SMMU fast map set\n");
+		}
+	}
+
+	IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
+	ret = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (ret) {
+		IPAERR("could not attach device ret=%d\n", ret);
+		arm_iommu_release_mapping(cb->mapping);
+		cb->valid = false;
+		return ret;
+	}
+
+	cb->next_addr = cb->va_end;
+	ipa_ctx->uc_pdev = dev;
+
+	IPADBG("UC CB PROBE pdev=%p attached\n", dev);
+	return 0;
+}
+
+static int ipa_smmu_ap_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
+	int result;
+	int atomic_ctx = 1;
+	int fast = 1;
+	int bypass = 1;
+	u32 iova_ap_mapping[2];
+
+	IPADBG("AP CB probe: sub pdev=%p\n", dev);
+
+	result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+		 iova_ap_mapping, 2);
+	if (result) {
+		IPAERR("Fail to read AP start/size iova addresses\n");
+		return result;
+	}
+	cb->va_start = iova_ap_mapping[0];
+	cb->va_size = iova_ap_mapping[1];
+	cb->va_end = cb->va_start + cb->va_size;
+	IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+	if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+		    dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+		IPAERR("DMA set mask failed\n");
+		return -EOPNOTSUPP;
+	}
+
+	cb->dev = dev;
+	cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+					       cb->va_start,
+					       cb->va_size);
+	if (IS_ERR_OR_NULL(cb->mapping)) {
+		IPADBG("Fail to create mapping\n");
+		/* assume this failure is because iommu driver is not ready */
+		return -EPROBE_DEFER;
+	}
+	IPADBG("SMMU mapping created\n");
+	cb->valid = true;
+
+	if (smmu_info.s1_bypass) {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+			DOMAIN_ATTR_S1_BYPASS,
+			&bypass)) {
+			IPAERR("couldn't set bypass\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU S1 BYPASS\n");
+	} else {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+			DOMAIN_ATTR_ATOMIC,
+			&atomic_ctx)) {
+			IPAERR("couldn't set domain as atomic\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU atomic set\n");
+
+		if (iommu_domain_set_attr(cb->mapping->domain,
+			DOMAIN_ATTR_FAST,
+			&fast)) {
+			IPAERR("couldn't set fast map\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU fast map set\n");
+	}
+
+	result = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (result) {
+		IPAERR("couldn't attach to IOMMU ret=%d\n", result);
+		cb->valid = false;
+		return result;
+	}
+
+	if (!smmu_info.s1_bypass) {
+		IPAERR("map IPA region to AP_CB IOMMU\n");
+		result = ipa_iommu_map(cb->mapping->domain,
+				rounddown(smmu_info.ipa_base, PAGE_SIZE),
+				rounddown(smmu_info.ipa_base, PAGE_SIZE),
+				roundup(smmu_info.ipa_size, PAGE_SIZE),
+				IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+		if (result) {
+			IPAERR("map IPA region to AP_CB IOMMU failed ret=%d\n",
+				result);
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return result;
+		}
+	}
+
+	smmu_info.present = true;
+
+	if (!bus_scale_table)
+		bus_scale_table = msm_bus_cl_get_pdata(ipa_pdev);
+
+	/* Proceed to real initialization */
+	result = ipa_init(&ipa_res, dev);
+	if (result) {
+		IPAERR("ipa_init failed\n");
+		arm_iommu_detach_device(cb->dev);
+		arm_iommu_release_mapping(cb->mapping);
+		cb->valid = false;
+		return result;
+	}
+
+	return result;
+}
+
+int ipa_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	int result;
+	struct device *dev = &pdev_p->dev;
+
+	IPADBG("IPA driver probing started\n");
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
+		return ipa_smmu_ap_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
+		return ipa_smmu_wlan_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
+		return ipa_smmu_uc_cb_probe(dev);
+
+	master_dev = dev;
+	if (!ipa_pdev)
+		ipa_pdev = pdev_p;
+
+	result = get_ipa_dts_configuration(pdev_p, &ipa_res);
+	if (result) {
+		IPAERR("IPA dts parsing failed\n");
+		return result;
+	}
+
+	result = ipa2_bind_api_controller(ipa_res.ipa_hw_type, api_ctrl);
+	if (result) {
+		IPAERR("IPA API binding failed\n");
+		return result;
+	}
+
+	if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
+		if (of_property_read_bool(pdev_p->dev.of_node,
+		    "qcom,smmu-s1-bypass"))
+			smmu_info.s1_bypass = true;
+		if (of_property_read_bool(pdev_p->dev.of_node,
+		    "qcom,smmu-fast-map"))
+			smmu_info.fast_map = true;
+		smmu_info.arm_smmu = true;
+		pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
+			smmu_info.s1_bypass, smmu_info.fast_map);
+		result = of_platform_populate(pdev_p->dev.of_node,
+				pdrv_match, NULL, &pdev_p->dev);
+	} else if (of_property_read_bool(pdev_p->dev.of_node,
+				"qcom,msm-smmu")) {
+		IPAERR("Legacy IOMMU not supported\n");
+		result = -EOPNOTSUPP;
+	} else {
+		if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
+			    dma_set_coherent_mask(&pdev_p->dev,
+			    DMA_BIT_MASK(32))) {
+			IPAERR("DMA set mask failed\n");
+			return -EOPNOTSUPP;
+		}
+
+		if (!bus_scale_table)
+			bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
+
+		/* Proceed to real initialization */
+		result = ipa_init(&ipa_res, dev);
+		if (result) {
+			IPAERR("ipa_init failed\n");
+			return result;
+		}
+	}
+
+	return result;
+}
+
+/**
+ * ipa2_ap_suspend() - suspend callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * This callback will be invoked by the runtime_pm framework when an AP suspend
+ * operation is invoked, usually by pressing a suspend button.
+ *
+ * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
+ * This will postpone the suspend operation until IPA is no longer used by AP.
+*/
+int ipa2_ap_suspend(struct device *dev)
+{
+	int i;
+
+	IPADBG("Enter...\n");
+
+	/* In case there is a tx/rx handler in polling mode fail to suspend */
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+		if (ipa_ctx->ep[i].sys &&
+			atomic_read(&ipa_ctx->ep[i].sys->curr_polling_state)) {
+			IPAERR("EP %d is in polling state, do not suspend\n",
+				i);
+			return -EAGAIN;
+		}
+	}
+
+	/* release SPS IPA resource without waiting for inactivity timer */
+	atomic_set(&ipa_ctx->sps_pm.eot_activity, 0);
+	ipa_sps_release_resource(NULL);
+	IPADBG("Exit\n");
+
+	return 0;
+}
+
+/**
+* ipa2_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Always returns 0 since resume should always succeed.
+*/
+int ipa2_ap_resume(struct device *dev)
+{
+	return 0;
+}
+
+struct ipa_context *ipa_get_ctx(void)
+{
+	return ipa_ctx;
+}
+
+int ipa_iommu_map(struct iommu_domain *domain,
+	unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+	struct ipa_smmu_cb_ctx *ap_cb = ipa2_get_smmu_ctx();
+	struct ipa_smmu_cb_ctx *uc_cb = ipa2_get_uc_smmu_ctx();
+
+	IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
+	IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+	/* make sure no overlapping */
+	if (domain == ipa2_get_smmu_domain()) {
+		if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+			IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+			ipa_assert();
+			return -EFAULT;
+		}
+	} else if (domain == ipa2_get_wlan_smmu_domain()) {
+		/* wlan is one time map */
+	} else if (domain == ipa2_get_uc_smmu_domain()) {
+		if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+			IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+			ipa_assert();
+			return -EFAULT;
+		}
+	} else {
+		IPAERR("Unexpected domain 0x%p\n", domain);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	return iommu_map(domain, iova, paddr, size, prot);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
+
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
new file mode 100644
index 0000000..fd37395
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
@@ -0,0 +1,897 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/barrier.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include "ipa_i.h"
+
+/*
+ * These values were determined empirically and shows good E2E bi-
+ * directional throughputs
+ */
+#define IPA_HOLB_TMR_EN 0x1
+#define IPA_HOLB_TMR_DIS 0x0
+#define IPA_HOLB_TMR_DEFAULT_VAL 0x1ff
+
+#define IPA_PKT_FLUSH_TO_US 100
+
+int ipa_enable_data_path(u32 clnt_hdl)
+{
+	struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];
+	struct ipa_ep_cfg_holb holb_cfg;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	int res = 0;
+
+	IPADBG("Enabling data path\n");
+	/* From IPA 2.0, disable HOLB */
+	if ((ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) &&
+		IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_DIS;
+		holb_cfg.tmr_val = 0;
+		res = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	/* Enable the pipe */
+	if (IPA_CLIENT_IS_CONS(ep->client) &&
+	    (ep->keep_ipa_awake ||
+	     ipa_ctx->resume_on_connect[ep->client] ||
+	     !ipa_should_pipe_be_suspended(ep->client))) {
+		memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = false;
+		ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	return res;
+}
+
+int ipa_disable_data_path(u32 clnt_hdl)
+{
+	struct ipa_ep_context *ep = &ipa_ctx->ep[clnt_hdl];
+	struct ipa_ep_cfg_holb holb_cfg;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	u32 aggr_init;
+	int res = 0;
+
+	IPADBG("Disabling data path\n");
+	/* On IPA 2.0, enable HOLB in order to prevent IPA from stalling */
+	if ((ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) &&
+		IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_EN;
+		holb_cfg.tmr_val = 0;
+		res = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	/* Suspend the pipe */
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = true;
+		ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	udelay(IPA_PKT_FLUSH_TO_US);
+	aggr_init = ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_AGGR_N_OFST_v2_0(clnt_hdl));
+	if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
+	    IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) == IPA_ENABLE_AGGR) {
+		res = ipa_tag_aggr_force_close(clnt_hdl);
+		if (res) {
+			IPAERR("tag process timeout, client:%d err:%d\n",
+				clnt_hdl, res);
+			BUG();
+		}
+	}
+
+	return res;
+}
+
+static int ipa2_smmu_map_peer_bam(unsigned long dev)
+{
+	phys_addr_t base;
+	u32 size;
+	struct iommu_domain *smmu_domain;
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
+
+	if (!ipa_ctx->smmu_s1_bypass) {
+		if (ipa_ctx->peer_bam_map_cnt == 0) {
+			if (sps_get_bam_addr(dev, &base, &size)) {
+				IPAERR("Fail to get addr\n");
+				return -EINVAL;
+			}
+			smmu_domain = ipa2_get_smmu_domain();
+			if (smmu_domain != NULL) {
+				if (ipa_iommu_map(smmu_domain,
+					cb->va_end,
+					rounddown(base, PAGE_SIZE),
+					roundup(size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE),
+					IOMMU_READ | IOMMU_WRITE |
+					IOMMU_DEVICE)) {
+					IPAERR("Fail to ipa_iommu_map\n");
+					return -EINVAL;
+				}
+			}
+
+			ipa_ctx->peer_bam_iova = cb->va_end;
+			ipa_ctx->peer_bam_pa = base;
+			ipa_ctx->peer_bam_map_size = size;
+			ipa_ctx->peer_bam_dev = dev;
+
+			IPADBG("Peer bam %lu mapped\n", dev);
+		} else {
+			WARN_ON(dev != ipa_ctx->peer_bam_dev);
+		}
+
+		ipa_ctx->peer_bam_map_cnt++;
+	}
+
+	return 0;
+}
+
+static int ipa_connect_configure_sps(const struct ipa_connect_params *in,
+				     struct ipa_ep_context *ep, int ipa_ep_idx)
+{
+	int result = -EFAULT;
+
+	/* Default Config */
+	ep->ep_hdl = sps_alloc_endpoint();
+
+	if (ipa2_smmu_map_peer_bam(in->client_bam_hdl)) {
+		IPAERR("fail to iommu map peer BAM.\n");
+		return -EFAULT;
+	}
+
+	if (ep->ep_hdl == NULL) {
+		IPAERR("SPS EP alloc failed EP.\n");
+		return -EFAULT;
+	}
+
+	result = sps_get_config(ep->ep_hdl,
+		&ep->connect);
+	if (result) {
+		IPAERR("fail to get config.\n");
+		return -EFAULT;
+	}
+
+	/* Specific Config */
+	if (IPA_CLIENT_IS_CONS(in->client)) {
+		ep->connect.mode = SPS_MODE_SRC;
+		ep->connect.destination =
+			in->client_bam_hdl;
+		ep->connect.dest_iova = ipa_ctx->peer_bam_iova;
+		ep->connect.source = ipa_ctx->bam_handle;
+		ep->connect.dest_pipe_index =
+			in->client_ep_idx;
+		ep->connect.src_pipe_index = ipa_ep_idx;
+	} else {
+		ep->connect.mode = SPS_MODE_DEST;
+		ep->connect.source = in->client_bam_hdl;
+		ep->connect.source_iova = ipa_ctx->peer_bam_iova;
+		ep->connect.destination = ipa_ctx->bam_handle;
+		ep->connect.src_pipe_index = in->client_ep_idx;
+		ep->connect.dest_pipe_index = ipa_ep_idx;
+	}
+
+	return 0;
+}
+
+static int ipa_connect_allocate_fifo(const struct ipa_connect_params *in,
+				     struct sps_mem_buffer *mem_buff_ptr,
+				     bool *fifo_in_pipe_mem_ptr,
+				     u32 *fifo_pipe_mem_ofst_ptr,
+				     u32 fifo_size, int ipa_ep_idx)
+{
+	dma_addr_t dma_addr;
+	u32 ofst;
+	int result = -EFAULT;
+	struct iommu_domain *smmu_domain;
+
+	mem_buff_ptr->size = fifo_size;
+	if (in->pipe_mem_preferred) {
+		if (ipa_pipe_mem_alloc(&ofst, fifo_size)) {
+			IPAERR("FIFO pipe mem alloc fail ep %u\n",
+				ipa_ep_idx);
+			mem_buff_ptr->base =
+				dma_alloc_coherent(ipa_ctx->pdev,
+				mem_buff_ptr->size,
+				&dma_addr, GFP_KERNEL);
+		} else {
+			memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+			result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+				fifo_size, 1);
+			WARN_ON(result);
+			*fifo_in_pipe_mem_ptr = 1;
+			dma_addr = mem_buff_ptr->phys_base;
+			*fifo_pipe_mem_ofst_ptr = ofst;
+		}
+	} else {
+		mem_buff_ptr->base =
+			dma_alloc_coherent(ipa_ctx->pdev, mem_buff_ptr->size,
+			&dma_addr, GFP_KERNEL);
+	}
+	if (ipa_ctx->smmu_s1_bypass) {
+		mem_buff_ptr->phys_base = dma_addr;
+	} else {
+		mem_buff_ptr->iova = dma_addr;
+		smmu_domain = ipa2_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			mem_buff_ptr->phys_base =
+				iommu_iova_to_phys(smmu_domain, dma_addr);
+		}
+	}
+	if (mem_buff_ptr->base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa2_connect() - low-level IPA client connect
+ * @in:	[in] input parameters from client
+ * @sps:	[out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl:	[out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_connect(const struct ipa_connect_params *in,
+		struct ipa_sps_params *sps, u32 *clnt_hdl)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa_ep_context *ep;
+	struct ipa_ep_cfg_status ep_status;
+	unsigned long base;
+	struct iommu_domain *smmu_domain;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPADBG("connecting client\n");
+
+	if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+	    in->client >= IPA_CLIENT_MAX ||
+	    in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa2_get_ep_mapping(in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_EP(in->client);
+
+
+	ep->skip_ep_cfg = in->skip_ep_cfg;
+	ep->valid = 1;
+	ep->client = in->client;
+	ep->client_notify = in->notify;
+	ep->priv = in->priv;
+	ep->keep_ipa_awake = in->keep_ipa_awake;
+
+	/* Notify uc to start monitoring holb on USB BAM Producer pipe. */
+	if (IPA_CLIENT_IS_USB_CONS(in->client)) {
+		ipa_uc_monitor_holb(in->client, true);
+		IPADBG("Enabling holb monitor for client:%d", in->client);
+	}
+
+	result = ipa_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+				ipa_ep_idx);
+		goto ipa_cfg_ep_fail;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa2_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		/* Setting EP status 0 */
+		memset(&ep_status, 0, sizeof(ep_status));
+		if (ipa2_cfg_ep_status(ipa_ep_idx, &ep_status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	result = ipa_connect_configure_sps(in, ep, ipa_ep_idx);
+	if (result) {
+		IPAERR("fail to configure SPS.\n");
+		goto ipa_cfg_ep_fail;
+	}
+
+	if (!ipa_ctx->smmu_s1_bypass &&
+			(in->desc.base == NULL ||
+			 in->data.base == NULL)) {
+		IPAERR(" allocate FIFOs data_fifo=0x%p desc_fifo=0x%p.\n",
+				in->data.base, in->desc.base);
+		goto desc_mem_alloc_fail;
+	}
+
+	if (in->desc.base == NULL) {
+		result = ipa_connect_allocate_fifo(in, &ep->connect.desc,
+						  &ep->desc_fifo_in_pipe_mem,
+						  &ep->desc_fifo_pipe_mem_ofst,
+						  in->desc_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DESC FIFO.\n");
+			goto desc_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DESC FIFO\n");
+		ep->connect.desc = in->desc;
+		ep->desc_fifo_client_allocated = 1;
+	}
+	IPADBG("Descriptor FIFO pa=%pa, size=%d\n", &ep->connect.desc.phys_base,
+	       ep->connect.desc.size);
+
+	if (in->data.base == NULL) {
+		result = ipa_connect_allocate_fifo(in, &ep->connect.data,
+						&ep->data_fifo_in_pipe_mem,
+						&ep->data_fifo_pipe_mem_ofst,
+						in->data_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DATA FIFO.\n");
+			goto data_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DATA FIFO\n");
+		ep->connect.data = in->data;
+		ep->data_fifo_client_allocated = 1;
+	}
+	IPADBG("Data FIFO pa=%pa, size=%d\n", &ep->connect.data.phys_base,
+	       ep->connect.data.size);
+
+	if (!ipa_ctx->smmu_s1_bypass) {
+		ep->connect.data.iova = ep->connect.data.phys_base;
+		base = ep->connect.data.iova;
+		smmu_domain = ipa2_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			if (ipa_iommu_map(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.data.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE),
+				IOMMU_READ | IOMMU_WRITE)) {
+				IPAERR("Fail to ipa_iommu_map data FIFO\n");
+				goto iommu_map_data_fail;
+			}
+		}
+		ep->connect.desc.iova = ep->connect.desc.phys_base;
+		base = ep->connect.desc.iova;
+		if (smmu_domain != NULL) {
+			if (ipa_iommu_map(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.desc.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE),
+				IOMMU_READ | IOMMU_WRITE)) {
+				IPAERR("Fail to ipa_iommu_map desc FIFO\n");
+				goto iommu_map_desc_fail;
+			}
+		}
+	}
+
+	if ((ipa_ctx->ipa_hw_type == IPA_HW_v2_0 ||
+		ipa_ctx->ipa_hw_type == IPA_HW_v2_5 ||
+		ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) &&
+		IPA_CLIENT_IS_USB_CONS(in->client))
+		ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD;
+	else
+		ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+	ep->connect.options = SPS_O_AUTO_ENABLE;    /* BAM-to-BAM */
+
+	result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, in->client);
+	if (result) {
+		IPAERR("sps_connect fails.\n");
+		goto sps_connect_fail;
+	}
+
+	sps->ipa_bam_hdl = ipa_ctx->bam_handle;
+	sps->ipa_ep_idx = ipa_ep_idx;
+	*clnt_hdl = ipa_ep_idx;
+	memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+	memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+	ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->client))
+		ipa_install_dflt_flt_rules(ipa_ep_idx);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+
+	IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
+
+	return 0;
+
+sps_connect_fail:
+	if (!ipa_ctx->smmu_s1_bypass) {
+		base = ep->connect.desc.iova;
+		smmu_domain = ipa2_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.desc.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+iommu_map_desc_fail:
+	if (!ipa_ctx->smmu_s1_bypass) {
+		base = ep->connect.data.iova;
+		smmu_domain = ipa2_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.data.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+iommu_map_data_fail:
+	if (!ep->data_fifo_client_allocated) {
+		if (!ep->data_fifo_in_pipe_mem)
+			dma_free_coherent(ipa_ctx->pdev,
+				  ep->connect.data.size,
+				  ep->connect.data.base,
+				  ep->connect.data.phys_base);
+		else
+			ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+				  ep->connect.data.size);
+	}
+data_mem_alloc_fail:
+	if (!ep->desc_fifo_client_allocated) {
+		if (!ep->desc_fifo_in_pipe_mem)
+			dma_free_coherent(ipa_ctx->pdev,
+				  ep->connect.desc.size,
+				  ep->connect.desc.base,
+				  ep->connect.desc.phys_base);
+		else
+			ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+				  ep->connect.desc.size);
+	}
+desc_mem_alloc_fail:
+	sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+fail:
+	return result;
+}
+
+static int ipa2_smmu_unmap_peer_bam(unsigned long dev)
+{
+	size_t len;
+	struct iommu_domain *smmu_domain;
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
+
+	if (!ipa_ctx->smmu_s1_bypass) {
+		WARN_ON(dev != ipa_ctx->peer_bam_dev);
+		ipa_ctx->peer_bam_map_cnt--;
+		if (ipa_ctx->peer_bam_map_cnt == 0) {
+			len = roundup(ipa_ctx->peer_bam_map_size +
+					ipa_ctx->peer_bam_pa -
+					rounddown(ipa_ctx->peer_bam_pa,
+						PAGE_SIZE), PAGE_SIZE);
+			smmu_domain = ipa2_get_smmu_domain();
+			if (smmu_domain != NULL) {
+				if (iommu_unmap(smmu_domain,
+					cb->va_end, len) != len) {
+					IPAERR("Fail to iommu_unmap\n");
+					return -EINVAL;
+				}
+				IPADBG("Peer bam %lu unmapped\n", dev);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa2_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_disconnect(u32 clnt_hdl)
+{
+	int result;
+	struct ipa_ep_context *ep;
+	unsigned long peer_bam;
+	unsigned long base;
+	struct iommu_domain *smmu_domain;
+	struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
+	int res;
+	enum ipa_client_type client_type;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+		ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+	client_type = ipa2_get_client_mapping(clnt_hdl);
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+	/* For USB 2.0 controller, first the ep will be disabled.
+	 * so this sequence is not needed again when disconnecting the pipe.
+	 */
+	if (!ep->ep_disabled) {
+		/* Set Disconnect in Progress flag. */
+		spin_lock(&ipa_ctx->disconnect_lock);
+		ep->disconnect_in_progress = true;
+		spin_unlock(&ipa_ctx->disconnect_lock);
+
+		/* Notify uc to stop monitoring holb on USB BAM
+		 * Producer pipe.
+		 */
+		if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
+			ipa_uc_monitor_holb(ep->client, false);
+			IPADBG("Disabling holb monitor for client: %d\n",
+				ep->client);
+		}
+
+		result = ipa_disable_data_path(clnt_hdl);
+		if (result) {
+			IPAERR("disable data path failed res=%d clnt=%d.\n",
+				result, clnt_hdl);
+			return -EPERM;
+		}
+	}
+
+	result = sps_disconnect(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS disconnect failed.\n");
+		return -EPERM;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		peer_bam = ep->connect.destination;
+	else
+		peer_bam = ep->connect.source;
+
+	if (ipa2_smmu_unmap_peer_bam(peer_bam)) {
+		IPAERR("fail to iommu unmap peer BAM.\n");
+		return -EPERM;
+	}
+
+	if (!ep->desc_fifo_client_allocated &&
+	     ep->connect.desc.base) {
+		if (!ep->desc_fifo_in_pipe_mem)
+			dma_free_coherent(ipa_ctx->pdev,
+					  ep->connect.desc.size,
+					  ep->connect.desc.base,
+					  ep->connect.desc.phys_base);
+		else
+			ipa_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+					  ep->connect.desc.size);
+	}
+
+	if (!ep->data_fifo_client_allocated &&
+	     ep->connect.data.base) {
+		if (!ep->data_fifo_in_pipe_mem)
+			dma_free_coherent(ipa_ctx->pdev,
+					  ep->connect.data.size,
+					  ep->connect.data.base,
+					  ep->connect.data.phys_base);
+		else
+			ipa_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+					  ep->connect.data.size);
+	}
+
+	if (!ipa_ctx->smmu_s1_bypass) {
+		base = ep->connect.desc.iova;
+		smmu_domain = ipa2_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.desc.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+
+	if (!ipa_ctx->smmu_s1_bypass) {
+		base = ep->connect.data.iova;
+		smmu_domain = ipa2_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.data.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+
+	result = sps_free_endpoint(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS de-alloc EP failed.\n");
+		return -EPERM;
+	}
+
+	ipa_delete_dflt_flt_rules(clnt_hdl);
+
+	/* If APPS flow control is not enabled, send a message to modem to
+	 * enable flow control honoring.
+	 */
+	if (!ipa_ctx->tethered_flow_control && ep->qmi_request_sent) {
+		/* Send a message to modem to disable flow control honoring. */
+		req.request_id = clnt_hdl;
+		res = qmi_disable_force_clear_datapath_send(&req);
+		if (res) {
+			IPADBG("disable_force_clear_datapath failed %d\n",
+				res);
+		}
+	}
+
+	spin_lock(&ipa_ctx->disconnect_lock);
+	memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+	spin_unlock(&ipa_ctx->disconnect_lock);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+* ipa2_reset_endpoint() - reset an endpoint from BAM perspective
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns:	0 on success, negative on failure
+*
+* Note:	Should not be called from atomic context
+*/
+int ipa2_reset_endpoint(u32 clnt_hdl)
+{
+	int res;
+	struct ipa_ep_context *ep;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes) {
+		IPAERR("Bad parameters.\n");
+		return -EFAULT;
+	}
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+	res = sps_disconnect(ep->ep_hdl);
+	if (res) {
+		IPAERR("sps_disconnect() failed, res=%d.\n", res);
+		goto bail;
+	} else {
+		res = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect,
+			ep->client);
+		if (res) {
+			IPAERR("sps_connect() failed, res=%d.\n", res);
+			goto bail;
+		}
+	}
+
+bail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return res;
+}
+
+/**
+ * ipa2_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before
+ * client disconnect.
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to remove
+ * ep delay on IPA consumer ipe before disconnect in BAM-BAM mode. this api
+ * expects caller to take responsibility to free any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_clear_endpoint_delay(u32 clnt_hdl)
+{
+	struct ipa_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+	struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0};
+	int res;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+		ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	if (!ipa_ctx->tethered_flow_control) {
+		IPADBG("APPS flow control is not enabled\n");
+		/* Send a message to modem to disable flow control honoring. */
+		req.request_id = clnt_hdl;
+		req.source_pipe_bitmask = 1 << clnt_hdl;
+		res = qmi_enable_force_clear_datapath_send(&req);
+		if (res) {
+			IPADBG("enable_force_clear_datapath failed %d\n",
+				res);
+		}
+		ep->qmi_request_sent = true;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+	/* Set disconnect in progress flag so further flow control events are
+	 * not honored.
+	 */
+	spin_lock(&ipa_ctx->disconnect_lock);
+	ep->disconnect_in_progress = true;
+	spin_unlock(&ipa_ctx->disconnect_lock);
+
+	/* If flow is disabled at this point, restore the ep state.*/
+	ep_ctrl.ipa_ep_delay = false;
+	ep_ctrl.ipa_ep_suspend = false;
+	ipa2_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa2_disable_endpoint() - low-level IPA client disable endpoint
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to
+ * disable the pipe from IPA in BAM-BAM mode.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_disable_endpoint(u32 clnt_hdl)
+{
+	int result;
+	struct ipa_ep_context *ep;
+	enum ipa_client_type client_type;
+	unsigned long bam;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+		ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+	client_type = ipa2_get_client_mapping(clnt_hdl);
+	IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+	/* Set Disconnect in Progress flag. */
+	spin_lock(&ipa_ctx->disconnect_lock);
+	ep->disconnect_in_progress = true;
+	spin_unlock(&ipa_ctx->disconnect_lock);
+
+	/* Notify uc to stop monitoring holb on USB BAM Producer pipe. */
+	if (IPA_CLIENT_IS_USB_CONS(ep->client)) {
+		ipa_uc_monitor_holb(ep->client, false);
+		IPADBG("Disabling holb monitor for client: %d\n", ep->client);
+	}
+
+	result = ipa_disable_data_path(clnt_hdl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+				clnt_hdl);
+		goto fail;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		bam = ep->connect.source;
+	else
+		bam = ep->connect.destination;
+
+	result = sps_pipe_reset(bam, clnt_hdl);
+	if (result) {
+		IPAERR("SPS pipe reset failed.\n");
+		goto fail;
+	}
+
+	ep->ep_disabled = true;
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+	IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+	return 0;
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+	return -EPERM;
+}
+
+
+/**
+ * ipa_sps_connect_safe() - connect endpoint from BAM prespective
+ * @h: [in] sps pipe handle
+ * @connect: [in] sps connect parameters
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * This function connects a BAM pipe using SPS driver sps_connect() API
+ * and by requesting uC interface to reset the pipe, avoids an IPA HW
+ * limitation that does not allow resetting a BAM pipe during traffic in
+ * IPA TX command queue.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+			 enum ipa_client_type ipa_client)
+{
+	int res;
+
+	if (ipa_ctx->ipa_hw_type > IPA_HW_v2_5 || ipa_ctx->skip_uc_pipe_reset) {
+		IPADBG("uC pipe reset is not required\n");
+	} else {
+		res = ipa_uc_reset_pipe(ipa_client);
+		if (res)
+			return res;
+	}
+	return sps_connect(h, connect);
+}
+EXPORT_SYMBOL(ipa_sps_connect_safe);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
new file mode 100644
index 0000000..c9e20d3
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -0,0 +1,2149 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_MAX_MSG_LEN 4096
+#define IPA_DBG_CNTR_ON 127265
+#define IPA_DBG_CNTR_OFF 127264
+#define IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE ((IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN \
+			* IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) \
+			+ IPA_MAX_MSG_LEN)
+
+#define RX_MIN_POLL_CNT "Rx Min Poll Count"
+#define RX_MAX_POLL_CNT "Rx Max Poll Count"
+#define MAX_COUNT_LENGTH 6
+#define MAX_POLLING_ITERATION 40
+#define MIN_POLLING_ITERATION 1
+
+#define IPA_DUMP_STATUS_FIELD(f) \
+	pr_err(#f "=0x%x\n", status->f)
+
+const char *ipa_excp_name[] = {
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
+};
+
+const char *ipa_status_excp_name[] = {
+	__stringify_1(IPA_EXCP_DEAGGR),
+	__stringify_1(IPA_EXCP_REPLICATION),
+	__stringify_1(IPA_EXCP_IP),
+	__stringify_1(IPA_EXCP_IHL),
+	__stringify_1(IPA_EXCP_FRAG_MISS),
+	__stringify_1(IPA_EXCP_SW),
+	__stringify_1(IPA_EXCP_NAT),
+	__stringify_1(IPA_EXCP_NONE),
+};
+
+const char *ipa_event_name[] = {
+	__stringify(WLAN_CLIENT_CONNECT),
+	__stringify(WLAN_CLIENT_DISCONNECT),
+	__stringify(WLAN_CLIENT_POWER_SAVE_MODE),
+	__stringify(WLAN_CLIENT_NORMAL_MODE),
+	__stringify(SW_ROUTING_ENABLE),
+	__stringify(SW_ROUTING_DISABLE),
+	__stringify(WLAN_AP_CONNECT),
+	__stringify(WLAN_AP_DISCONNECT),
+	__stringify(WLAN_STA_CONNECT),
+	__stringify(WLAN_STA_DISCONNECT),
+	__stringify(WLAN_CLIENT_CONNECT_EX),
+	__stringify(WLAN_SWITCH_TO_SCC),
+	__stringify(WLAN_SWITCH_TO_MCC),
+	__stringify(WLAN_WDI_ENABLE),
+	__stringify(WLAN_WDI_DISABLE),
+	__stringify(WAN_UPSTREAM_ROUTE_ADD),
+	__stringify(WAN_UPSTREAM_ROUTE_DEL),
+	__stringify(WAN_EMBMS_CONNECT),
+	__stringify(WAN_XLAT_CONNECT),
+	__stringify(ECM_CONNECT),
+	__stringify(ECM_DISCONNECT),
+	__stringify(IPA_TETHERING_STATS_UPDATE_STATS),
+	__stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS),
+};
+
+const char *ipa_hdr_l2_type_name[] = {
+	__stringify(IPA_HDR_L2_NONE),
+	__stringify(IPA_HDR_L2_ETHERNET_II),
+	__stringify(IPA_HDR_L2_802_3),
+};
+
+const char *ipa_hdr_proc_type_name[] = {
+	__stringify(IPA_HDR_PROC_NONE),
+	__stringify(IPA_HDR_PROC_ETHII_TO_ETHII),
+	__stringify(IPA_HDR_PROC_ETHII_TO_802_3),
+	__stringify(IPA_HDR_PROC_802_3_TO_ETHII),
+	__stringify(IPA_HDR_PROC_802_3_TO_802_3),
+};
+
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_keep_awake;
+static struct dentry *dfile_ep_holb;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_proc_ctx;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip6_flt;
+static struct dentry *dfile_stats;
+static struct dentry *dfile_wstats;
+static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
+static struct dentry *dfile_dbg_cnt;
+static struct dentry *dfile_msg;
+static struct dentry *dfile_ip4_nat;
+static struct dentry *dfile_rm_stats;
+static struct dentry *dfile_status_stats;
+static struct dentry *dfile_active_clients;
+static struct dentry *dfile_ipa_rx_poll_timeout;
+static struct dentry *dfile_ipa_poll_iteration;
+
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static char *active_clients_buf;
+static s8 ep_reg_idx;
+
+int _ipa_read_gen_reg_v1_1(char *buff, int max_len)
+{
+	return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_VERSION=0x%x\n"
+			"IPA_COMP_HW_VERSION=0x%x\n"
+			"IPA_ROUTE=0x%x\n"
+			"IPA_FILTER=0x%x\n"
+			"IPA_SHARED_MEM_SIZE=0x%x\n",
+			ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1),
+			ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v1_1),
+			ipa_read_reg(ipa_ctx->mmio,
+					IPA_SHARED_MEM_SIZE_OFST_v1_1));
+}
+
+int _ipa_read_gen_reg_v2_0(char *buff, int max_len)
+{
+	return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_VERSION=0x%x\n"
+			"IPA_COMP_HW_VERSION=0x%x\n"
+			"IPA_ROUTE=0x%x\n"
+			"IPA_FILTER=0x%x\n"
+			"IPA_SHARED_MEM_RESTRICTED=0x%x\n"
+			"IPA_SHARED_MEM_SIZE=0x%x\n",
+			ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_COMP_HW_VERSION_OFST),
+			ipa_read_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1),
+			ipa_read_reg(ipa_ctx->mmio, IPA_FILTER_OFST_v1_1),
+			ipa_read_reg_field(ipa_ctx->mmio,
+				IPA_SHARED_MEM_SIZE_OFST_v2_0,
+				IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
+				IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0),
+			ipa_read_reg_field(ipa_ctx->mmio,
+				IPA_SHARED_MEM_SIZE_OFST_v2_0,
+				IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
+				IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0));
+}
+
+static ssize_t ipa_read_gen_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	nbytes = ipa_ctx->ctrl->ipa_read_gen_reg(dbg_buff, IPA_MAX_MSG_LEN);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_write_ep_holb(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ipa_ep_cfg_holb holb;
+	u32 en;
+	u32 tmr_val;
+	u32 ep_idx;
+	unsigned long missing;
+	char *sptr, *token;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &ep_idx))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &en))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &tmr_val))
+		return -EINVAL;
+
+	holb.en = en;
+	holb.tmr_val = tmr_val;
+
+	ipa2_cfg_ep_holb(ep_idx, &holb);
+
+	return count;
+}
+
+static ssize_t ipa_write_ep_reg(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option >= ipa_ctx->ipa_num_pipes) {
+		IPAERR("bad pipe specified %u\n", option);
+		return count;
+	}
+
+	ep_reg_idx = option;
+
+	return count;
+}
+
+int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe)
+{
+	return scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+			"IPA_ENDP_INIT_HDR_%u=0x%x\n"
+			"IPA_ENDP_INIT_MODE_%u=0x%x\n"
+			"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+			"IPA_ENDP_INIT_ROUTE_%u=0x%x\n"
+			"IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+			"IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+			"IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n",
+			pipe, ipa_read_reg(ipa_ctx->mmio,
+				IPA_ENDP_INIT_NAT_N_OFST_v1_1(pipe)),
+			pipe, ipa_read_reg(ipa_ctx->mmio,
+				IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe)),
+			pipe, ipa_read_reg(ipa_ctx->mmio,
+				IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe)),
+			pipe, ipa_read_reg(ipa_ctx->mmio,
+				IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe)),
+			pipe, ipa_read_reg(ipa_ctx->mmio,
+				IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe)),
+			pipe, ipa_read_reg(ipa_ctx->mmio,
+				IPA_ENDP_INIT_CTRL_N_OFST(pipe)),
+			pipe, ipa_read_reg(ipa_ctx->mmio,
+				IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe)),
+			pipe, ipa_read_reg(ipa_ctx->mmio,
+				IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe))
+				);
+}
+
+int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe)
+{
+	return scnprintf(
+		dbg_buff, IPA_MAX_MSG_LEN,
+		"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
+		"IPA_ENDP_INIT_MODE_%u=0x%x\n"
+		"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_ROUTE_%u=0x%x\n"
+		"IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
+		"IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_CFG_%u=0x%x\n",
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_NAT_N_OFST_v2_0(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_CTRL_N_OFST(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(pipe)),
+		pipe, ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_CFG_n_OFST(pipe)));
+}
+
+static ssize_t ipa_read_ep_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int start_idx;
+	int end_idx;
+	int size = 0;
+	int ret;
+	loff_t pos;
+
+	/* negative ep_reg_idx means all registers */
+	if (ep_reg_idx < 0) {
+		start_idx = 0;
+		end_idx = ipa_ctx->ipa_num_pipes;
+	} else {
+		start_idx = ep_reg_idx;
+		end_idx = start_idx + 1;
+	}
+	pos = *ppos;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = start_idx; i < end_idx; i++) {
+
+		nbytes = ipa_ctx->ctrl->ipa_read_ep_reg(dbg_buff,
+				IPA_MAX_MSG_LEN, i);
+
+		*ppos = pos;
+		ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+					      nbytes);
+		if (ret < 0) {
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return ret;
+		}
+
+		size += ret;
+		ubuf += nbytes;
+		count -= nbytes;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	*ppos = pos + size;
+	return size;
+}
+
+static ssize_t ipa_write_keep_awake(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option == 1)
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	else if (option == 0)
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	else
+		return -EFAULT;
+
+	return count;
+}
+
+static ssize_t ipa_read_keep_awake(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	int nbytes;
+
+	ipa_active_clients_lock();
+	if (ipa_ctx->ipa_active_clients.cnt)
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA APPS power state is ON\n");
+	else
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA APPS power state is OFF\n");
+	ipa_active_clients_unlock();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_read_hdr(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int i = 0;
+	struct ipa_hdr_entry *entry;
+
+	mutex_lock(&ipa_ctx->lock);
+
+	if (ipa_ctx->hdr_tbl_lcl)
+		pr_err("Table resides on local memory\n");
+	else
+		pr_err("Table resides on system (ddr) memory\n");
+
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		nbytes = scnprintf(
+			dbg_buff,
+			IPA_MAX_MSG_LEN,
+			"name:%s len=%d ref=%d partial=%d type=%s ",
+			entry->name,
+			entry->hdr_len,
+			entry->ref_cnt,
+			entry->is_partial,
+			ipa_hdr_l2_type_name[entry->type]);
+
+		if (entry->is_hdr_proc_ctx) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"phys_base=0x%pa ",
+				&entry->phys_base);
+		} else {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ofst=%u ",
+				entry->offset_entry->offset >> 2);
+		}
+		for (i = 0; i < entry->hdr_len; i++) {
+			scnprintf(dbg_buff + nbytes + i * 2,
+				  IPA_MAX_MSG_LEN - nbytes - i * 2,
+				  "%02x", entry->hdr[i]);
+		}
+		scnprintf(dbg_buff + nbytes + entry->hdr_len * 2,
+			  IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2,
+			  "\n");
+		pr_err("%s", dbg_buff);
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+
+static int ipa_attrib_dump(struct ipa_rule_attrib *attrib,
+		enum ipa_ip_type ip)
+{
+	uint32_t addr[4];
+	uint32_t mask[4];
+	int i;
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+		pr_err("tos_value:%d ", attrib->tos_value);
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+		pr_err("tos_mask:%d ", attrib->tos_mask);
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL)
+		pr_err("protocol:%d ", attrib->u.v4.protocol);
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.src_addr);
+			mask[0] = htonl(attrib->u.v4.src_addr_mask);
+			pr_err(
+					"src_addr:%pI4 src_addr_mask:%pI4 ",
+					addr + 0, mask + 0);
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.src_addr[i]);
+				mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+			}
+			pr_err(
+					   "src_addr:%pI6 src_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.dst_addr);
+			mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+			pr_err(
+					   "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+					   addr + 0, mask + 0);
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+				mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+			}
+			pr_err(
+					   "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		pr_err("src_port_range:%u %u ",
+				   attrib->src_port_lo,
+			     attrib->src_port_hi);
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		pr_err("dst_port_range:%u %u ",
+				   attrib->dst_port_lo,
+			     attrib->dst_port_hi);
+	}
+	if (attrib->attrib_mask & IPA_FLT_TYPE)
+		pr_err("type:%d ", attrib->type);
+
+	if (attrib->attrib_mask & IPA_FLT_CODE)
+		pr_err("code:%d ", attrib->code);
+
+	if (attrib->attrib_mask & IPA_FLT_SPI)
+		pr_err("spi:%x ", attrib->spi);
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT)
+		pr_err("src_port:%u ", attrib->src_port);
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT)
+		pr_err("dst_port:%u ", attrib->dst_port);
+
+	if (attrib->attrib_mask & IPA_FLT_TC)
+		pr_err("tc:%d ", attrib->u.v6.tc);
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL)
+		pr_err("flow_label:%x ", attrib->u.v6.flow_label);
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR)
+		pr_err("next_hdr:%d ", attrib->u.v6.next_hdr);
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		pr_err(
+				   "metadata:%x metadata_mask:%x",
+				   attrib->meta_data, attrib->meta_data_mask);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		pr_err("frg ");
+
+	if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) {
+		pr_err("src_mac_addr:%pM ", attrib->src_mac_addr);
+	}
+
+	if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) {
+		pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
+		pr_err("ether_type:%x ", attrib->ether_type);
+
+	pr_err("\n");
+	return 0;
+}
+
+static int ipa_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
+{
+	uint8_t addr[16];
+	uint8_t mask[16];
+	int i;
+	int j;
+
+	if (attrib->tos_eq_present)
+		pr_err("tos_value:%d ", attrib->tos_eq);
+
+	if (attrib->protocol_eq_present)
+		pr_err("protocol:%d ", attrib->protocol_eq);
+
+	for (i = 0; i < attrib->num_ihl_offset_range_16; i++) {
+		pr_err(
+			   "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
+			   attrib->ihl_offset_range_16[i].offset,
+			   attrib->ihl_offset_range_16[i].range_low,
+			   attrib->ihl_offset_range_16[i].range_high);
+	}
+
+	for (i = 0; i < attrib->num_offset_meq_32; i++) {
+		pr_err(
+			   "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
+			   attrib->offset_meq_32[i].offset,
+			   attrib->offset_meq_32[i].mask,
+			   attrib->offset_meq_32[i].value);
+	}
+
+	if (attrib->tc_eq_present)
+		pr_err("tc:%d ", attrib->tc_eq);
+
+	if (attrib->fl_eq_present)
+		pr_err("flow_label:%d ", attrib->fl_eq);
+
+	if (attrib->ihl_offset_eq_16_present) {
+		pr_err(
+				"(ihl_ofst_eq16:%d val:0x%x) ",
+				attrib->ihl_offset_eq_16.offset,
+				attrib->ihl_offset_eq_16.value);
+	}
+
+	for (i = 0; i < attrib->num_ihl_offset_meq_32; i++) {
+		pr_err(
+				"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
+				attrib->ihl_offset_meq_32[i].offset,
+				attrib->ihl_offset_meq_32[i].mask,
+				attrib->ihl_offset_meq_32[i].value);
+	}
+
+	for (i = 0; i < attrib->num_offset_meq_128; i++) {
+		for (j = 0; j < 16; j++) {
+			addr[j] = attrib->offset_meq_128[i].value[j];
+			mask[j] = attrib->offset_meq_128[i].mask[j];
+		}
+		pr_err(
+				"(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ",
+				attrib->offset_meq_128[i].offset,
+				mask + 0,
+				addr + 0);
+	}
+
+	if (attrib->metadata_meq32_present) {
+		pr_err(
+				"(metadata: ofst:%u mask:0x%x val:0x%x) ",
+				attrib->metadata_meq32.offset,
+				attrib->metadata_meq32.mask,
+				attrib->metadata_meq32.value);
+	}
+
+	if (attrib->ipv4_frag_eq_present)
+		pr_err("frg ");
+
+	pr_err("\n");
+	return 0;
+}
+
+static int ipa_open_dbg(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t ipa_read_rt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int i = 0;
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	struct ipa_rt_tbl_set *set;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	u32 ofst;
+	u32 ofst_words;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+
+	mutex_lock(&ipa_ctx->lock);
+
+	if (ip ==  IPA_IP_v6) {
+		if (ipa_ctx->ip6_rt_tbl_lcl)
+			pr_err("Table resides on local memory\n");
+		else
+			pr_err("Table resides on system (ddr) memory\n");
+	} else if (ip == IPA_IP_v4) {
+		if (ipa_ctx->ip4_rt_tbl_lcl)
+			pr_err("Table resides on local memory\n");
+		else
+			pr_err("Table resides on system (ddr) memory\n");
+	}
+
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+			if (entry->proc_ctx) {
+				ofst = entry->proc_ctx->offset_entry->offset;
+				ofst_words =
+					(ofst +
+					ipa_ctx->hdr_proc_ctx_tbl.start_offset)
+					>> 5;
+
+				pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt);
+				pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+					i, entry->rule.dst,
+					ipa2_get_ep_mapping(entry->rule.dst),
+					!ipa_ctx->hdr_tbl_lcl);
+				pr_err("proc_ctx[32B]:%u attrib_mask:%08x ",
+					ofst_words,
+					entry->rule.attrib.attrib_mask);
+			} else {
+				if (entry->hdr)
+					ofst = entry->hdr->offset_entry->offset;
+				else
+					ofst = 0;
+
+				pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt);
+				pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+					i, entry->rule.dst,
+					ipa2_get_ep_mapping(entry->rule.dst),
+					!ipa_ctx->hdr_tbl_lcl);
+				pr_err("hdr_ofst[words]:%u attrib_mask:%08x ",
+					ofst >> 2,
+					entry->rule.attrib.attrib_mask);
+			}
+
+			ipa_attrib_dump(&entry->rule.attrib, ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+
+static ssize_t ipa_read_proc_ctx(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa_hdr_proc_ctx_tbl *tbl;
+	struct ipa_hdr_proc_ctx_entry *entry;
+	u32 ofst_words;
+
+	tbl = &ipa_ctx->hdr_proc_ctx_tbl;
+
+	mutex_lock(&ipa_ctx->lock);
+
+	if (ipa_ctx->hdr_proc_ctx_tbl_lcl)
+		pr_info("Table resides on local memory\n");
+	else
+		pr_info("Table resides on system(ddr) memory\n");
+
+	list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) {
+		ofst_words = (entry->offset_entry->offset +
+			ipa_ctx->hdr_proc_ctx_tbl.start_offset)
+			>> 5;
+		if (entry->hdr->is_hdr_proc_ctx) {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+				entry->id,
+				ipa_hdr_proc_type_name[entry->type],
+				ofst_words);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"hdr_phys_base:0x%pa\n",
+				&entry->hdr->phys_base);
+		} else {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+				entry->id,
+				ipa_hdr_proc_type_name[entry->type],
+				ofst_words);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"hdr[words]:%u\n",
+				entry->hdr->offset_entry->offset >> 2);
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int i;
+	int j;
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	struct ipa_rt_tbl *rt_tbl;
+	u32 rt_tbl_idx;
+	u32 bitmap;
+	bool eq;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	mutex_lock(&ipa_ctx->lock);
+	i = 0;
+	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+		if (entry->rule.eq_attrib_type) {
+			rt_tbl_idx = entry->rule.rt_tbl_idx;
+			bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
+			eq = true;
+		} else {
+			rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
+			if (rt_tbl)
+				rt_tbl_idx = rt_tbl->idx;
+			else
+				rt_tbl_idx = ~0;
+			bitmap = entry->rule.attrib.attrib_mask;
+			eq = false;
+		}
+		pr_err("ep_idx:global rule_idx:%d act:%d rt_tbl_idx:%d ",
+			i, entry->rule.action, rt_tbl_idx);
+		pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ",
+			bitmap, entry->rule.retain_hdr, eq);
+		if (eq)
+			ipa_attrib_dump_eq(
+				&entry->rule.eq_attrib);
+		else
+			ipa_attrib_dump(
+				&entry->rule.attrib, ip);
+		i++;
+	}
+
+	for (j = 0; j < ipa_ctx->ipa_num_pipes; j++) {
+		tbl = &ipa_ctx->flt_tbl[j][ip];
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			if (entry->rule.eq_attrib_type) {
+				rt_tbl_idx = entry->rule.rt_tbl_idx;
+				bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
+				eq = true;
+			} else {
+				rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
+				if (rt_tbl)
+					rt_tbl_idx = rt_tbl->idx;
+				else
+					rt_tbl_idx = ~0;
+				bitmap = entry->rule.attrib.attrib_mask;
+				eq = false;
+			}
+			pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				j, i, entry->rule.action, rt_tbl_idx);
+			pr_err("attrib_mask:%08x retain_hdr:%d ",
+				bitmap, entry->rule.retain_hdr);
+			pr_err("eq:%d ", eq);
+			if (eq)
+				ipa_attrib_dump_eq(
+					&entry->rule.eq_attrib);
+			else
+				ipa_attrib_dump(
+					&entry->rule.attrib, ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+
+static ssize_t ipa_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int cnt = 0;
+	uint connect = 0;
+
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++)
+		connect |= (ipa_ctx->ep[i].valid << i);
+
+	if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"sw_tx=%u\n"
+			"hw_tx=%u\n"
+			"tx_non_linear=%u\n"
+			"tx_compl=%u\n"
+			"wan_rx=%u\n"
+			"stat_compl=%u\n"
+			"lan_aggr_close=%u\n"
+			"wan_aggr_close=%u\n"
+			"act_clnt=%u\n"
+			"con_clnt_bmap=0x%x\n"
+			"wan_rx_empty=%u\n"
+			"wan_repl_rx_empty=%u\n"
+			"lan_rx_empty=%u\n"
+			"lan_repl_rx_empty=%u\n"
+			"flow_enable=%u\n"
+			"flow_disable=%u\n",
+			ipa_ctx->stats.tx_sw_pkts,
+			ipa_ctx->stats.tx_hw_pkts,
+			ipa_ctx->stats.tx_non_linear,
+			ipa_ctx->stats.tx_pkts_compl,
+			ipa_ctx->stats.rx_pkts,
+			ipa_ctx->stats.stat_compl,
+			ipa_ctx->stats.aggr_close,
+			ipa_ctx->stats.wan_aggr_close,
+			ipa_ctx->ipa_active_clients.cnt,
+			connect,
+			ipa_ctx->stats.wan_rx_empty,
+			ipa_ctx->stats.wan_repl_rx_empty,
+			ipa_ctx->stats.lan_rx_empty,
+			ipa_ctx->stats.lan_repl_rx_empty,
+			ipa_ctx->stats.flow_enable,
+			ipa_ctx->stats.flow_disable);
+		cnt += nbytes;
+
+		for (i = 0; i < MAX_NUM_EXCP; i++) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt,
+				"lan_rx_excp[%u:%20s]=%u\n", i,
+				ipa_status_excp_name[i],
+				ipa_ctx->stats.rx_excp_pkts[i]);
+			cnt += nbytes;
+		}
+	} else{
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"sw_tx=%u\n"
+			"hw_tx=%u\n"
+			"rx=%u\n"
+			"rx_repl_repost=%u\n"
+			"rx_q_len=%u\n"
+			"act_clnt=%u\n"
+			"con_clnt_bmap=0x%x\n",
+			ipa_ctx->stats.tx_sw_pkts,
+			ipa_ctx->stats.tx_hw_pkts,
+			ipa_ctx->stats.rx_pkts,
+			ipa_ctx->stats.rx_repl_repost,
+			ipa_ctx->stats.rx_q_len,
+			ipa_ctx->ipa_active_clients.cnt,
+			connect);
+	cnt += nbytes;
+
+		for (i = 0; i < MAX_NUM_EXCP; i++) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt,
+				"rx_excp[%u:%35s]=%u\n", i, ipa_excp_name[i],
+				ipa_ctx->stats.rx_excp_pkts[i]);
+			cnt += nbytes;
+		}
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_wstats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+
+#define HEAD_FRMT_STR "%25s\n"
+#define FRMT_STR "%25s %10u\n"
+#define FRMT_STR1 "%25s %10u\n\n"
+
+	int cnt = 0;
+	int nbytes;
+	int ipa_ep_idx;
+	enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD;
+	struct ipa_ep_context *ep;
+
+	do {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:");
+		cnt += nbytes;
+
+		ipa_ep_idx = ipa2_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			break;
+		}
+
+		ep = &ipa_ctx->ep[ipa_ep_idx];
+		if (ep->valid != 1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			break;
+		}
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Avail Fifo Desc:",
+			atomic_read(&ep->avail_fifo_desc));
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkts Status Rcvd:",
+			ep->wstats.rx_pkts_status_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Processed:",
+			ep->wstats.rx_hd_processed);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail);
+		cnt += nbytes;
+
+	} while (0);
+
+	client = IPA_CLIENT_WLAN1_CONS;
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+		"Client IPA_CLIENT_WLAN1_CONS Stats:");
+	cnt += nbytes;
+	while (1) {
+		ipa_ep_idx = ipa2_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			goto nxt_clnt_cons;
+		}
+
+		ep = &ipa_ctx->ep[ipa_ep_idx];
+		if (ep->valid != 1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			goto nxt_clnt_cons;
+		}
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR1, "Tx Pkts Dropped:",
+			ep->wstats.tx_pkts_dropped);
+		cnt += nbytes;
+
+nxt_clnt_cons:
+			switch (client) {
+			case IPA_CLIENT_WLAN1_CONS:
+				client = IPA_CLIENT_WLAN2_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN2_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN2_CONS:
+				client = IPA_CLIENT_WLAN3_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN3_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN3_CONS:
+				client = IPA_CLIENT_WLAN4_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN4_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN4_CONS:
+			default:
+				break;
+			}
+		break;
+	}
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+		"\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:");
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+		"Tx Comm Buff Allocated:",
+		ipa_ctx->wc_memb.wlan_comm_total_cnt);
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+		"Tx Comm Buff Avail:", ipa_ctx->wc_memb.wlan_comm_free_cnt);
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1,
+		"Total Tx Pkts Freed:", ipa_ctx->wc_memb.total_tx_pkts_freed);
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_ntn(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+	ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+	ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+	struct IpaHwStatsNTNInfoData_t stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (!ipa2_get_ntn_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX num_pkts_processed=%u\n"
+			"TX tail_ptr_val=%u\n"
+			"TX num_db_fired=%u\n"
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n"
+			"TX bamFifoFull=%u\n"
+			"TX bamFifoEmpty=%u\n"
+			"TX bamFifoUsageHigh=%u\n"
+			"TX bamFifoUsageLow=%u\n"
+			"TX bamUtilCount=%u\n"
+			"TX num_db=%u\n"
+			"TX num_unexpected_db=%u\n"
+			"TX num_bam_int_handled=%u\n"
+			"TX num_bam_int_in_non_running_state=%u\n"
+			"TX num_qmb_int_handled=%u\n"
+			"TX num_bam_int_handled_while_wait_for_bam=%u\n"
+			"TX num_bam_int_handled_while_not_in_bam=%u\n",
+			TX_STATS(num_pkts_processed),
+			TX_STATS(tail_ptr_val),
+			TX_STATS(num_db_fired),
+			TX_STATS(tx_comp_ring_stats.ringFull),
+			TX_STATS(tx_comp_ring_stats.ringEmpty),
+			TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+			TX_STATS(tx_comp_ring_stats.ringUsageLow),
+			TX_STATS(tx_comp_ring_stats.RingUtilCount),
+			TX_STATS(bam_stats.bamFifoFull),
+			TX_STATS(bam_stats.bamFifoEmpty),
+			TX_STATS(bam_stats.bamFifoUsageHigh),
+			TX_STATS(bam_stats.bamFifoUsageLow),
+			TX_STATS(bam_stats.bamUtilCount),
+			TX_STATS(num_db),
+			TX_STATS(num_unexpected_db),
+			TX_STATS(num_bam_int_handled),
+			TX_STATS(num_bam_int_in_non_running_state),
+			TX_STATS(num_qmb_int_handled),
+			TX_STATS(num_bam_int_handled_while_wait_for_bam),
+			TX_STATS(num_bam_int_handled_while_not_in_bam));
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX max_outstanding_pkts=%u\n"
+			"RX num_pkts_processed=%u\n"
+			"RX rx_ring_rp_value=%u\n"
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n"
+			"RX bamFifoFull=%u\n"
+			"RX bamFifoEmpty=%u\n"
+			"RX bamFifoUsageHigh=%u\n"
+			"RX bamFifoUsageLow=%u\n"
+			"RX bamUtilCount=%u\n"
+			"RX num_bam_int_handled=%u\n"
+			"RX num_db=%u\n"
+			"RX num_unexpected_db=%u\n"
+			"RX num_pkts_in_dis_uninit_state=%u\n"
+			"num_ic_inj_vdev_change=%u\n"
+			"num_ic_inj_fw_desc_change=%u\n",
+			RX_STATS(max_outstanding_pkts),
+			RX_STATS(num_pkts_processed),
+			RX_STATS(rx_ring_rp_value),
+			RX_STATS(rx_ind_ring_stats.ringFull),
+			RX_STATS(rx_ind_ring_stats.ringEmpty),
+			RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+			RX_STATS(rx_ind_ring_stats.ringUsageLow),
+			RX_STATS(rx_ind_ring_stats.RingUtilCount),
+			RX_STATS(bam_stats.bamFifoFull),
+			RX_STATS(bam_stats.bamFifoEmpty),
+			RX_STATS(bam_stats.bamFifoUsageHigh),
+			RX_STATS(bam_stats.bamFifoUsageLow),
+			RX_STATS(bam_stats.bamUtilCount),
+			RX_STATS(num_bam_int_handled),
+			RX_STATS(num_db),
+			RX_STATS(num_unexpected_db),
+			RX_STATS(num_pkts_in_dis_uninit_state),
+			RX_STATS(num_bam_int_handled_while_not_in_bam),
+			RX_STATS(num_bam_int_handled_while_in_bam_state));
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read NTN stats\n");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_wdi(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct IpaHwStatsWDIInfoData_t stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (!ipa2_get_wdi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX num_pkts_processed=%u\n"
+			"TX copy_engine_doorbell_value=%u\n"
+			"TX num_db_fired=%u\n"
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n"
+			"TX bamFifoFull=%u\n"
+			"TX bamFifoEmpty=%u\n"
+			"TX bamFifoUsageHigh=%u\n"
+			"TX bamFifoUsageLow=%u\n"
+			"TX bamUtilCount=%u\n"
+			"TX num_db=%u\n"
+			"TX num_unexpected_db=%u\n"
+			"TX num_bam_int_handled=%u\n"
+			"TX num_bam_int_in_non_running_state=%u\n"
+			"TX num_qmb_int_handled=%u\n"
+			"TX num_bam_int_handled_while_wait_for_bam=%u\n",
+			stats.tx_ch_stats.num_pkts_processed,
+			stats.tx_ch_stats.copy_engine_doorbell_value,
+			stats.tx_ch_stats.num_db_fired,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringFull,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
+			stats.tx_ch_stats.tx_comp_ring_stats.RingUtilCount,
+			stats.tx_ch_stats.bam_stats.bamFifoFull,
+			stats.tx_ch_stats.bam_stats.bamFifoEmpty,
+			stats.tx_ch_stats.bam_stats.bamFifoUsageHigh,
+			stats.tx_ch_stats.bam_stats.bamFifoUsageLow,
+			stats.tx_ch_stats.bam_stats.bamUtilCount,
+			stats.tx_ch_stats.num_db,
+			stats.tx_ch_stats.num_unexpected_db,
+			stats.tx_ch_stats.num_bam_int_handled,
+			stats.tx_ch_stats.num_bam_int_in_non_running_state,
+			stats.tx_ch_stats.num_qmb_int_handled,
+			stats.tx_ch_stats.
+				num_bam_int_handled_while_wait_for_bam);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX max_outstanding_pkts=%u\n"
+			"RX num_pkts_processed=%u\n"
+			"RX rx_ring_rp_value=%u\n"
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n"
+			"RX bamFifoFull=%u\n"
+			"RX bamFifoEmpty=%u\n"
+			"RX bamFifoUsageHigh=%u\n"
+			"RX bamFifoUsageLow=%u\n"
+			"RX bamUtilCount=%u\n"
+			"RX num_bam_int_handled=%u\n"
+			"RX num_db=%u\n"
+			"RX num_unexpected_db=%u\n"
+			"RX num_pkts_in_dis_uninit_state=%u\n"
+			"num_ic_inj_vdev_change=%u\n"
+			"num_ic_inj_fw_desc_change=%u\n"
+			"RX reserved1=%u\n"
+			"RX reserved2=%u\n",
+			stats.rx_ch_stats.max_outstanding_pkts,
+			stats.rx_ch_stats.num_pkts_processed,
+			stats.rx_ch_stats.rx_ring_rp_value,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringFull,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
+			stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount,
+			stats.rx_ch_stats.bam_stats.bamFifoFull,
+			stats.rx_ch_stats.bam_stats.bamFifoEmpty,
+			stats.rx_ch_stats.bam_stats.bamFifoUsageHigh,
+			stats.rx_ch_stats.bam_stats.bamFifoUsageLow,
+			stats.rx_ch_stats.bam_stats.bamUtilCount,
+			stats.rx_ch_stats.num_bam_int_handled,
+			stats.rx_ch_stats.num_db,
+			stats.rx_ch_stats.num_unexpected_db,
+			stats.rx_ch_stats.num_pkts_in_dis_uninit_state,
+			stats.rx_ch_stats.num_ic_inj_vdev_change,
+			stats.rx_ch_stats.num_ic_inj_fw_desc_change,
+			stats.rx_ch_stats.reserved1,
+			stats.rx_ch_stats.reserved2);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read WDI stats\n");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+void _ipa_write_dbg_cnt_v1_1(int option)
+{
+	if (option == 1)
+		ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0),
+				IPA_DBG_CNTR_ON);
+	else
+		ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(0),
+				IPA_DBG_CNTR_OFF);
+}
+
+void _ipa_write_dbg_cnt_v2_0(int option)
+{
+	if (option == 1)
+		ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(0),
+				IPA_DBG_CNTR_ON);
+	else
+		ipa_write_reg(ipa_ctx->mmio, IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(0),
+				IPA_DBG_CNTR_OFF);
+}
+
+static ssize_t ipa_write_dbg_cnt(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	u32 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtou32(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa_ctx->ctrl->ipa_write_dbg_cnt(option);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return count;
+}
+
+int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len)
+{
+	int regval;
+
+	regval = ipa_read_reg(ipa_ctx->mmio,
+			IPA_DEBUG_CNT_REG_N_OFST_v1_1(0));
+
+	return scnprintf(buf, max_len,
+			"IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+}
+
+int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len)
+{
+	int regval;
+
+	regval = ipa_read_reg(ipa_ctx->mmio,
+			IPA_DEBUG_CNT_REG_N_OFST_v2_0(0));
+
+	return scnprintf(buf, max_len,
+			"IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+}
+
+static ssize_t ipa_read_dbg_cnt(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	nbytes = ipa_ctx->ctrl->ipa_read_dbg_cnt(dbg_buff, IPA_MAX_MSG_LEN);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_read_msg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+	int i;
+
+	for (i = 0; i < IPA_EVENT_MAX_NUM; i++) {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				"msg[%u:%27s] W:%u R:%u\n", i,
+				ipa_event_name[i],
+				ipa_ctx->stats.msg_w[i],
+				ipa_ctx->stats.msg_r[i]);
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa_read_nat4(struct file *file,
+		char __user *ubuf, size_t count,
+		loff_t *ppos) {
+
+#define ENTRY_U32_FIELDS 8
+#define NAT_ENTRY_ENABLE 0x8000
+#define NAT_ENTRY_RST_FIN_BIT 0x4000
+#define BASE_TABLE 0
+#define EXPANSION_TABLE 1
+
+	u32 *base_tbl, *indx_tbl;
+	u32 tbl_size, *tmp;
+	u32 value, i, j, rule_id;
+	u16 enable, tbl_entry, flag;
+	u32 no_entrys = 0;
+
+	mutex_lock(&ipa_ctx->nat_mem.lock);
+	value = ipa_ctx->nat_mem.public_ip_addr;
+	pr_err(
+				"Table IP Address:%d.%d.%d.%d\n",
+				((value & 0xFF000000) >> 24),
+				((value & 0x00FF0000) >> 16),
+				((value & 0x0000FF00) >> 8),
+				((value & 0x000000FF)));
+
+	pr_err("Table Size:%d\n",
+				ipa_ctx->nat_mem.size_base_tables);
+
+	pr_err("Expansion Table Size:%d\n",
+				ipa_ctx->nat_mem.size_expansion_tables-1);
+
+	if (!ipa_ctx->nat_mem.is_sys_mem)
+		pr_err("Not supported for local(shared) memory\n");
+
+	/* Print Base tables */
+	rule_id = 0;
+	for (j = 0; j < 2; j++) {
+		if (j == BASE_TABLE) {
+			tbl_size = ipa_ctx->nat_mem.size_base_tables;
+			base_tbl = (u32 *)ipa_ctx->nat_mem.ipv4_rules_addr;
+
+			pr_err("\nBase Table:\n");
+		} else {
+			tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1;
+			base_tbl =
+			 (u32 *)ipa_ctx->nat_mem.ipv4_expansion_rules_addr;
+
+			pr_err("\nExpansion Base Table:\n");
+		}
+
+		if (base_tbl != NULL) {
+			for (i = 0; i <= tbl_size; i++, rule_id++) {
+				tmp = base_tbl;
+				value = tmp[4];
+				enable = ((value & 0xFFFF0000) >> 16);
+
+				if (enable & NAT_ENTRY_ENABLE) {
+					no_entrys++;
+					pr_err("Rule:%d ", rule_id);
+
+					value = *tmp;
+					pr_err(
+						"Private_IP:%d.%d.%d.%d ",
+						((value & 0xFF000000) >> 24),
+						((value & 0x00FF0000) >> 16),
+						((value & 0x0000FF00) >> 8),
+						((value & 0x000000FF)));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Target_IP:%d.%d.%d.%d ",
+						((value & 0xFF000000) >> 24),
+						((value & 0x00FF0000) >> 16),
+						((value & 0x0000FF00) >> 8),
+						((value & 0x000000FF)));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Next_Index:%d  Public_Port:%d ",
+						(value & 0x0000FFFF),
+						((value & 0xFFFF0000) >> 16));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Private_Port:%d  Target_Port:%d ",
+						(value & 0x0000FFFF),
+						((value & 0xFFFF0000) >> 16));
+					tmp++;
+
+					value = *tmp;
+					flag = ((value & 0xFFFF0000) >> 16);
+					if (flag & NAT_ENTRY_RST_FIN_BIT) {
+						pr_err(
+								"IP_CKSM_delta:0x%x  Flags:%s ",
+							  (value & 0x0000FFFF),
+								"Direct_To_A5");
+					} else {
+						pr_err(
+							"IP_CKSM_delta:0x%x  Flags:%s ",
+							(value & 0x0000FFFF),
+							"Fwd_to_route");
+					}
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Time_stamp:0x%x Proto:%d ",
+						(value & 0x00FFFFFF),
+						((value & 0xFF000000) >> 24));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Prev_Index:%d  Indx_tbl_entry:%d ",
+						(value & 0x0000FFFF),
+						((value & 0xFFFF0000) >> 16));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"TCP_UDP_cksum_delta:0x%x\n",
+						((value & 0xFFFF0000) >> 16));
+				}
+
+				base_tbl += ENTRY_U32_FIELDS;
+
+			}
+		}
+	}
+
+	/* Print Index tables */
+	rule_id = 0;
+	for (j = 0; j < 2; j++) {
+		if (j == BASE_TABLE) {
+			tbl_size = ipa_ctx->nat_mem.size_base_tables;
+			indx_tbl = (u32 *)ipa_ctx->nat_mem.index_table_addr;
+
+			pr_err("\nIndex Table:\n");
+		} else {
+			tbl_size = ipa_ctx->nat_mem.size_expansion_tables-1;
+			indx_tbl =
+			 (u32 *)ipa_ctx->nat_mem.index_table_expansion_addr;
+
+			pr_err("\nExpansion Index Table:\n");
+		}
+
+		if (indx_tbl != NULL) {
+			for (i = 0; i <= tbl_size; i++, rule_id++) {
+				tmp = indx_tbl;
+				value = *tmp;
+				tbl_entry = (value & 0x0000FFFF);
+
+				if (tbl_entry) {
+					pr_err("Rule:%d ", rule_id);
+
+					value = *tmp;
+					pr_err(
+						"Table_Entry:%d  Next_Index:%d\n",
+						tbl_entry,
+						((value & 0xFFFF0000) >> 16));
+				}
+
+				indx_tbl++;
+			}
+		}
+	}
+	pr_err("Current No. Nat Entries: %d\n", no_entrys);
+	mutex_unlock(&ipa_ctx->nat_mem.lock);
+
+	return 0;
+}
+
+static ssize_t ipa_rm_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int result, nbytes, cnt = 0;
+
+	result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
+	if (result < 0) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Error in printing RM stat %d\n", result);
+		cnt += nbytes;
+	} else
+		cnt += result;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static void ipa_dump_status(struct ipa_hw_pkt_status *status)
+{
+	IPA_DUMP_STATUS_FIELD(status_opcode);
+	IPA_DUMP_STATUS_FIELD(exception);
+	IPA_DUMP_STATUS_FIELD(status_mask);
+	IPA_DUMP_STATUS_FIELD(pkt_len);
+	IPA_DUMP_STATUS_FIELD(endp_src_idx);
+	IPA_DUMP_STATUS_FIELD(endp_dest_idx);
+	IPA_DUMP_STATUS_FIELD(metadata);
+
+	if (ipa_ctx->ipa_hw_type < IPA_HW_v2_5) {
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_local);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_global);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_pipe_idx);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_match);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.filt_rule_idx);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.ret_hdr);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_0_pkt_status.tag_f_1);
+	} else {
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_local);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_global);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_pipe_idx);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.ret_hdr);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.filt_rule_idx);
+		IPA_DUMP_STATUS_FIELD(ipa_hw_v2_5_pkt_status.tag_f_1);
+	}
+
+	IPA_DUMP_STATUS_FIELD(tag_f_2);
+	IPA_DUMP_STATUS_FIELD(time_day_ctr);
+	IPA_DUMP_STATUS_FIELD(nat_hit);
+	IPA_DUMP_STATUS_FIELD(nat_tbl_idx);
+	IPA_DUMP_STATUS_FIELD(nat_type);
+	IPA_DUMP_STATUS_FIELD(route_local);
+	IPA_DUMP_STATUS_FIELD(route_tbl_idx);
+	IPA_DUMP_STATUS_FIELD(route_match);
+	IPA_DUMP_STATUS_FIELD(ucp);
+	IPA_DUMP_STATUS_FIELD(route_rule_idx);
+	IPA_DUMP_STATUS_FIELD(hdr_local);
+	IPA_DUMP_STATUS_FIELD(hdr_offset);
+	IPA_DUMP_STATUS_FIELD(frag_hit);
+	IPA_DUMP_STATUS_FIELD(frag_rule);
+}
+
+static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct ipa_status_stats *stats;
+	int i, j;
+
+	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats)
+		return -EFAULT;
+
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+		if (!ipa_ctx->ep[i].sys || !ipa_ctx->ep[i].sys->status_stat)
+			continue;
+
+		memcpy(stats, ipa_ctx->ep[i].sys->status_stat, sizeof(*stats));
+		stats->curr = (stats->curr + IPA_MAX_STATUS_STAT_NUM - 1)
+			% IPA_MAX_STATUS_STAT_NUM;
+		pr_err("Statuses for pipe %d\n", i);
+		for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) {
+			pr_err("curr=%d\n", stats->curr);
+			ipa_dump_status(&stats->status[stats->curr]);
+			pr_err("\n\n\n");
+			stats->curr = (stats->curr + 1) %
+				IPA_MAX_STATUS_STAT_NUM;
+		}
+	}
+
+	kfree(stats);
+	return 0;
+}
+
+static ssize_t ipa2_print_active_clients_log(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int cnt;
+	int table_size;
+
+	if (active_clients_buf == NULL) {
+		IPAERR("Active Clients buffer is not allocated");
+		return 0;
+	}
+	memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
+	ipa_active_clients_lock();
+	cnt = ipa2_active_clients_log_print_buffer(active_clients_buf,
+			IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE - IPA_MAX_MSG_LEN);
+	table_size = ipa2_active_clients_log_print_table(active_clients_buf
+			+ cnt, IPA_MAX_MSG_LEN);
+	ipa_active_clients_unlock();
+
+	return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
+			cnt + table_size);
+}
+
+static ssize_t ipa2_clear_active_clients_log(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	ipa2_active_clients_log_clear();
+
+	return count;
+}
+
+static ssize_t ipa_read_rx_polling_timeout(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int min_cnt;
+	int max_cnt;
+
+	if (active_clients_buf == NULL) {
+		IPAERR("Active Clients buffer is not allocated");
+		return 0;
+	}
+	memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
+	min_cnt = scnprintf(active_clients_buf,
+		IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+		"Rx Min Poll count = %u\n",
+		ipa_ctx->ipa_rx_min_timeout_usec);
+
+	max_cnt = scnprintf(active_clients_buf + min_cnt,
+		IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+		"Rx Max Poll count = %u\n",
+		ipa_ctx->ipa_rx_max_timeout_usec);
+
+	return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
+			min_cnt + max_cnt);
+}
+
+static ssize_t ipa_write_rx_polling_timeout(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	s8 polltime = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	if (copy_from_user(dbg_buff, ubuf, count))
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	if (kstrtos8(dbg_buff, 0, &polltime))
+		return -EFAULT;
+
+	ipa_rx_timeout_min_max_calc(&ipa_ctx->ipa_rx_min_timeout_usec,
+		&ipa_ctx->ipa_rx_max_timeout_usec, polltime);
+	return count;
+}
+
+static ssize_t ipa_read_polling_iteration(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int cnt;
+
+	if (active_clients_buf == NULL) {
+		IPAERR("Active Clients buffer is not allocated");
+		return 0;
+	}
+
+	memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE);
+
+	cnt = scnprintf(active_clients_buf, IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+			"Polling Iteration count = %u\n",
+			ipa_ctx->ipa_polling_iteration);
+
+	return simple_read_from_buffer(ubuf, count, ppos, active_clients_buf,
+			cnt);
+}
+
+static ssize_t ipa_write_polling_iteration(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	s8 iteration_cnt = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	if (copy_from_user(dbg_buff, ubuf, count))
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	if (kstrtos8(dbg_buff, 0, &iteration_cnt))
+		return -EFAULT;
+
+	if ((iteration_cnt >= MIN_POLLING_ITERATION) &&
+		(iteration_cnt <= MAX_POLLING_ITERATION))
+		ipa_ctx->ipa_polling_iteration = iteration_cnt;
+	else
+		ipa_ctx->ipa_polling_iteration = MAX_POLLING_ITERATION;
+
+	return count;
+}
+
+const struct file_operations ipa_gen_reg_ops = {
+	.read = ipa_read_gen_reg,
+};
+
+const struct file_operations ipa_ep_reg_ops = {
+	.read = ipa_read_ep_reg,
+	.write = ipa_write_ep_reg,
+};
+
+const struct file_operations ipa_keep_awake_ops = {
+	.read = ipa_read_keep_awake,
+	.write = ipa_write_keep_awake,
+};
+
+const struct file_operations ipa_ep_holb_ops = {
+	.write = ipa_write_ep_holb,
+};
+
+const struct file_operations ipa_hdr_ops = {
+	.read = ipa_read_hdr,
+};
+
+const struct file_operations ipa_rt_ops = {
+	.read = ipa_read_rt,
+	.open = ipa_open_dbg,
+};
+
+const struct file_operations ipa_proc_ctx_ops = {
+	.read = ipa_read_proc_ctx,
+};
+
+const struct file_operations ipa_flt_ops = {
+	.read = ipa_read_flt,
+	.open = ipa_open_dbg,
+};
+
+const struct file_operations ipa_stats_ops = {
+	.read = ipa_read_stats,
+};
+
+const struct file_operations ipa_wstats_ops = {
+	.read = ipa_read_wstats,
+};
+
+const struct file_operations ipa_wdi_ops = {
+	.read = ipa_read_wdi,
+};
+
+const struct file_operations ipa_ntn_ops = {
+	.read = ipa_read_ntn,
+};
+
+const struct file_operations ipa_msg_ops = {
+	.read = ipa_read_msg,
+};
+
+const struct file_operations ipa_dbg_cnt_ops = {
+	.read = ipa_read_dbg_cnt,
+	.write = ipa_write_dbg_cnt,
+};
+
+const struct file_operations ipa_nat4_ops = {
+	.read = ipa_read_nat4,
+};
+
+const struct file_operations ipa_rm_stats = {
+	.read = ipa_rm_read_stats,
+};
+
+const struct file_operations ipa_status_stats_ops = {
+	.read = ipa_status_stats_read,
+};
+
+const struct file_operations ipa2_active_clients = {
+	.read = ipa2_print_active_clients_log,
+	.write = ipa2_clear_active_clients_log,
+};
+
+const struct file_operations ipa_rx_poll_time_ops = {
+	.read = ipa_read_rx_polling_timeout,
+	.write = ipa_write_rx_polling_timeout,
+};
+
+const struct file_operations ipa_poll_iteration_ops = {
+	.read = ipa_read_polling_iteration,
+	.write = ipa_write_polling_iteration,
+};
+
+void ipa_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP;
+	const mode_t write_only_mode = S_IWUSR | S_IWGRP;
+	struct dentry *file;
+
+	dent = debugfs_create_dir("ipa", 0);
+	if (IS_ERR(dent)) {
+		IPAERR("fail to create folder in debug_fs.\n");
+		return;
+	}
+
+	file = debugfs_create_u32("hw_type", read_only_mode,
+			dent, &ipa_ctx->ipa_hw_type);
+	if (!file) {
+		IPAERR("could not create hw_type file\n");
+		goto fail;
+	}
+
+
+	dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+			&ipa_gen_reg_ops);
+	if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+		IPAERR("fail to create file for debug_fs gen_reg\n");
+		goto fail;
+	}
+
+	dfile_active_clients = debugfs_create_file("active_clients",
+			read_write_mode, dent, 0, &ipa2_active_clients);
+	if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
+		IPAERR("fail to create file for debug_fs active_clients\n");
+		goto fail;
+	}
+
+	active_clients_buf = NULL;
+	active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENTS_BUF_SIZE,
+			GFP_KERNEL);
+	if (active_clients_buf == NULL)
+		IPAERR("fail to allocate active clients memory buffer");
+
+	dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+			&ipa_ep_reg_ops);
+	if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+		IPAERR("fail to create file for debug_fs ep_reg\n");
+		goto fail;
+	}
+
+	dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode,
+			dent, 0, &ipa_keep_awake_ops);
+	if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) {
+		IPAERR("fail to create file for debug_fs dfile_keep_awake\n");
+		goto fail;
+	}
+
+	dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent,
+			0, &ipa_ep_holb_ops);
+	if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) {
+		IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n");
+		goto fail;
+	}
+
+	dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+			&ipa_hdr_ops);
+	if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+		IPAERR("fail to create file for debug_fs hdr\n");
+		goto fail;
+	}
+
+	dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent,
+		0, &ipa_proc_ctx_ops);
+	if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+		IPAERR("fail to create file for debug_fs proc_ctx\n");
+		goto fail;
+	}
+
+	dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa_rt_ops);
+	if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+		IPAERR("fail to create file for debug_fs ip4 rt\n");
+		goto fail;
+	}
+
+	dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa_rt_ops);
+	if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+		IPAERR("fail to create file for debug_fs ip6:w rt\n");
+		goto fail;
+	}
+
+	dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa_flt_ops);
+	if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+		IPAERR("fail to create file for debug_fs ip4 flt\n");
+		goto fail;
+	}
+
+	dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa_flt_ops);
+	if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+		IPAERR("fail to create file for debug_fs ip6 flt\n");
+		goto fail;
+	}
+
+	dfile_stats = debugfs_create_file("stats", read_only_mode, dent, 0,
+			&ipa_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		IPAERR("fail to create file for debug_fs stats\n");
+		goto fail;
+	}
+
+	dfile_wstats = debugfs_create_file("wstats", read_only_mode,
+			dent, 0, &ipa_wstats_ops);
+	if (!dfile_wstats || IS_ERR(dfile_wstats)) {
+		IPAERR("fail to create file for debug_fs wstats\n");
+		goto fail;
+	}
+
+	dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, 0,
+			&ipa_wdi_ops);
+	if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) {
+		IPAERR("fail to create file for debug_fs wdi stats\n");
+		goto fail;
+	}
+
+	dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+			&ipa_ntn_ops);
+	if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+		IPAERR("fail to create file for debug_fs ntn stats\n");
+		goto fail;
+	}
+
+	dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
+			&ipa_dbg_cnt_ops);
+	if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
+		IPAERR("fail to create file for debug_fs dbg_cnt\n");
+		goto fail;
+	}
+
+	dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0,
+			&ipa_msg_ops);
+	if (!dfile_msg || IS_ERR(dfile_msg)) {
+		IPAERR("fail to create file for debug_fs msg\n");
+		goto fail;
+	}
+
+	dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent,
+			0, &ipa_nat4_ops);
+	if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) {
+		IPAERR("fail to create file for debug_fs ip4 nat\n");
+		goto fail;
+	}
+
+	dfile_rm_stats = debugfs_create_file("rm_stats",
+			read_only_mode, dent, 0, &ipa_rm_stats);
+	if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) {
+		IPAERR("fail to create file for debug_fs rm_stats\n");
+		goto fail;
+	}
+
+	dfile_status_stats = debugfs_create_file("status_stats",
+			read_only_mode, dent, 0, &ipa_status_stats_ops);
+	if (!dfile_status_stats || IS_ERR(dfile_status_stats)) {
+		IPAERR("fail to create file for debug_fs status_stats\n");
+		goto fail;
+	}
+
+	dfile_ipa_rx_poll_timeout = debugfs_create_file("ipa_rx_poll_time",
+			read_write_mode, dent, 0, &ipa_rx_poll_time_ops);
+	if (!dfile_ipa_rx_poll_timeout || IS_ERR(dfile_ipa_rx_poll_timeout)) {
+		IPAERR("fail to create file for debug_fs rx poll timeout\n");
+		goto fail;
+	}
+
+	dfile_ipa_poll_iteration = debugfs_create_file("ipa_poll_iteration",
+			read_write_mode, dent, 0, &ipa_poll_iteration_ops);
+	if (!dfile_ipa_poll_iteration || IS_ERR(dfile_ipa_poll_iteration)) {
+		IPAERR("fail to create file for debug_fs poll iteration\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("enable_clock_scaling", read_write_mode,
+		dent, &ipa_ctx->enable_clock_scaling);
+	if (!file) {
+		IPAERR("could not create enable_clock_scaling file\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps",
+		read_write_mode, dent,
+		&ipa_ctx->ctrl->clock_scaling_bw_threshold_nominal);
+	if (!file) {
+		IPAERR("could not create bw_threshold_nominal_mbps\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps",
+		read_write_mode, dent,
+		&ipa_ctx->ctrl->clock_scaling_bw_threshold_turbo);
+	if (!file) {
+		IPAERR("could not create bw_threshold_turbo_mbps\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+void ipa_debugfs_remove(void)
+{
+	if (IS_ERR(dent)) {
+		IPAERR("ipa_debugfs_remove: folder was not created.\n");
+		return;
+	}
+	if (active_clients_buf != NULL) {
+		kfree(active_clients_buf);
+		active_clients_buf = NULL;
+	}
+	debugfs_remove_recursive(dent);
+}
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa_debugfs_init(void) {}
+void ipa_debugfs_remove(void) {}
+int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len)
+{
+	return 0;
+}
+int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe)
+{
+	return 0;
+}
+int _ipa_read_gen_reg_v1_1(char *buff, int max_len)
+{
+	return 0;
+}
+void _ipa_write_dbg_cnt_v1_1(int option) {}
+int _ipa_read_gen_reg_v2_0(char *buff, int max_len)
+{
+	return 0;
+}
+int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe)
+{
+	return 0;
+}
+void _ipa_write_dbg_cnt_v2_0(int option) {}
+int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len)
+{
+	return 0;
+}
+#endif
+
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c
new file mode 100644
index 0000000..bee6331
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dma.c
@@ -0,0 +1,884 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010
+#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050
+#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8
+#define IPA_DMA_MAX_PKT_SZ 0xFFFF
+#define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \
+	sizeof(struct sps_iovec) - 1)
+#define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \
+	sizeof(struct sps_iovec) - 1)
+
+#define IPADMA_DRV_NAME "ipa_dma"
+
+#define IPADMA_DBG(fmt, args...) \
+	pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+		 __func__, __LINE__, ## args)
+#define IPADMA_ERR(fmt, args...) \
+	pr_err(IPADMA_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define IPADMA_FUNC_ENTRY() \
+	IPADMA_DBG("ENTRY\n")
+
+#define IPADMA_FUNC_EXIT() \
+	IPADMA_DBG("EXIT\n")
+
+#ifdef CONFIG_DEBUG_FS
+#define IPADMA_MAX_MSG_LEN 1024
+static char dbg_buff[IPADMA_MAX_MSG_LEN];
+static void ipa_dma_debugfs_init(void);
+static void ipa_dma_debugfs_destroy(void);
+#else
+static void ipa_dma_debugfs_init(void) {}
+static void ipa_dma_debugfs_destroy(void) {}
+#endif
+
+/**
+ * struct ipa_dma_xfer_wrapper - IPADMA transfer descr wrapper
+ * @phys_addr_src: physical address of the source data to copy
+ * @phys_addr_dest: physical address to store the copied data
+ * @len: len in bytes to copy
+ * @link: linked to the wrappers list on the proper(sync/async) cons pipe
+ * @xfer_done: completion object for sync_memcpy completion
+ * @callback: IPADMA client provided completion callback
+ * @user1: cookie1 for above callback
+ *
+ * This struct can wrap both sync and async memcpy transfers descriptors.
+ */
+struct ipa_dma_xfer_wrapper {
+	u64 phys_addr_src;
+	u64 phys_addr_dest;
+	u16 len;
+	struct list_head link;
+	struct completion xfer_done;
+	void (*callback)(void *user1);
+	void *user1;
+};
+
+/**
+ * struct ipa_dma_ctx -IPADMA driver context information
+ * @is_enabled:is ipa_dma enabled?
+ * @destroy_pending: destroy ipa_dma after handling all pending memcpy
+ * @ipa_dma_xfer_wrapper_cache: cache of ipa_dma_xfer_wrapper structs
+ * @sync_lock: lock for synchronisation in sync_memcpy
+ * @async_lock: lock for synchronisation in async_memcpy
+ * @enable_lock: lock for is_enabled
+ * @pending_lock: lock for synchronize is_enable and pending_cnt
+ * @done: no pending works-ipadma can be destroyed
+ * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer
+ * @ipa_dma_async_prod_hdl:handle of async memcpy producer
+ * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer
+ * @sync_memcpy_pending_cnt: number of pending sync memcopy operations
+ * @async_memcpy_pending_cnt: number of pending async memcopy operations
+ * @uc_memcpy_pending_cnt: number of pending uc memcopy operations
+ * @total_sync_memcpy: total number of sync memcpy (statistics)
+ * @total_async_memcpy: total number of async memcpy (statistics)
+ * @total_uc_memcpy: total number of uc memcpy (statistics)
+ */
+struct ipa_dma_ctx {
+	bool is_enabled;
+	bool destroy_pending;
+	struct kmem_cache *ipa_dma_xfer_wrapper_cache;
+	struct mutex sync_lock;
+	spinlock_t async_lock;
+	struct mutex enable_lock;
+	spinlock_t pending_lock;
+	struct completion done;
+	u32 ipa_dma_sync_prod_hdl;
+	u32 ipa_dma_async_prod_hdl;
+	u32 ipa_dma_sync_cons_hdl;
+	u32 ipa_dma_async_cons_hdl;
+	atomic_t sync_memcpy_pending_cnt;
+	atomic_t async_memcpy_pending_cnt;
+	atomic_t uc_memcpy_pending_cnt;
+	atomic_t total_sync_memcpy;
+	atomic_t total_async_memcpy;
+	atomic_t total_uc_memcpy;
+};
+static struct ipa_dma_ctx *ipa_dma_ctx;
+
+/**
+ * ipa2_dma_init() -Initialize IPADMA.
+ *
+ * This function initialize all IPADMA internal data and connect in dma:
+ *	MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+ *	MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+ *
+ * Return codes: 0: success
+ *		-EFAULT: IPADMA is already initialized
+ *		-ENOMEM: allocating memory error
+ *		-EPERM: pipe connection failed
+ */
+int ipa2_dma_init(void)
+{
+	struct ipa_dma_ctx *ipa_dma_ctx_t;
+	struct ipa_sys_connect_params sys_in;
+	int res = 0;
+
+	IPADMA_FUNC_ENTRY();
+
+	if (ipa_dma_ctx) {
+		IPADMA_ERR("Already initialized.\n");
+		return -EFAULT;
+	}
+	ipa_dma_ctx_t = kzalloc(sizeof(*(ipa_dma_ctx)), GFP_KERNEL);
+
+	if (!ipa_dma_ctx_t) {
+		IPADMA_ERR("kzalloc error.\n");
+		return -ENOMEM;
+	}
+
+	ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache =
+		kmem_cache_create("IPA_DMA_XFER_WRAPPER",
+			sizeof(struct ipa_dma_xfer_wrapper), 0, 0, NULL);
+	if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) {
+		IPAERR(":failed to create ipa dma xfer wrapper cache.\n");
+		res = -ENOMEM;
+		goto fail_mem_ctrl;
+	}
+
+	mutex_init(&ipa_dma_ctx_t->enable_lock);
+	spin_lock_init(&ipa_dma_ctx_t->async_lock);
+	mutex_init(&ipa_dma_ctx_t->sync_lock);
+	spin_lock_init(&ipa_dma_ctx_t->pending_lock);
+	init_completion(&ipa_dma_ctx_t->done);
+	ipa_dma_ctx_t->is_enabled = false;
+	ipa_dma_ctx_t->destroy_pending = false;
+	atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0);
+	atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0);
+	atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0);
+
+	/* IPADMA SYNC PROD-source for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+	sys_in.skip_ep_cfg = false;
+	if (ipa2_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) {
+		IPADMA_ERR(":setup sync prod pipe failed\n");
+		res = -EPERM;
+		goto fail_sync_prod;
+	}
+
+	/* IPADMA SYNC CONS-destination for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.skip_ep_cfg = false;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.notify = NULL;
+	sys_in.priv = NULL;
+	if (ipa2_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) {
+		IPADMA_ERR(":setup sync cons pipe failed.\n");
+		res = -EPERM;
+		goto fail_sync_cons;
+	}
+
+	IPADMA_DBG("SYNC MEMCPY pipes are connected\n");
+
+	/* IPADMA ASYNC PROD-source for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD;
+	sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+	sys_in.skip_ep_cfg = false;
+	sys_in.notify = NULL;
+	if (ipa2_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) {
+		IPADMA_ERR(":setup async prod pipe failed.\n");
+		res = -EPERM;
+		goto fail_async_prod;
+	}
+
+	/* IPADMA ASYNC CONS-destination for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+	sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+	sys_in.skip_ep_cfg = false;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.notify = ipa_dma_async_memcpy_notify_cb;
+	sys_in.priv = NULL;
+	if (ipa2_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) {
+		IPADMA_ERR(":setup async cons pipe failed.\n");
+		res = -EPERM;
+		goto fail_async_cons;
+	}
+	ipa_dma_debugfs_init();
+	ipa_dma_ctx = ipa_dma_ctx_t;
+	IPADMA_DBG("ASYNC MEMCPY pipes are connected\n");
+
+	IPADMA_FUNC_EXIT();
+	return res;
+fail_async_cons:
+	ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
+fail_async_prod:
+	ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl);
+fail_sync_cons:
+	ipa2_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl);
+fail_sync_prod:
+	kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache);
+fail_mem_ctrl:
+	kfree(ipa_dma_ctx_t);
+	ipa_dma_ctx = NULL;
+	return res;
+
+}
+
+
+/**
+ * ipa2_dma_enable() -Vote for IPA clocks.
+ *
+ *Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *		 enabled
+ */
+int ipa2_dma_enable(void)
+{
+	IPADMA_FUNC_ENTRY();
+	if (ipa_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't enable\n");
+		return -EPERM;
+	}
+	mutex_lock(&ipa_dma_ctx->enable_lock);
+	if (ipa_dma_ctx->is_enabled) {
+		IPADMA_DBG("Already enabled.\n");
+		mutex_unlock(&ipa_dma_ctx->enable_lock);
+		return -EPERM;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
+	ipa_dma_ctx->is_enabled = true;
+	mutex_unlock(&ipa_dma_ctx->enable_lock);
+
+	IPADMA_FUNC_EXIT();
+	return 0;
+}
+
+static bool ipa_dma_work_pending(void)
+{
+	if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending sync\n");
+		return true;
+	}
+	if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending async\n");
+		return true;
+	}
+	if (atomic_read(&ipa_dma_ctx->uc_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending uc\n");
+		return true;
+	}
+	IPADMA_DBG("no pending work\n");
+	return false;
+}
+
+/**
+ * ipa2_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *			diabled
+ *		-EFAULT: can not disable ipa_dma as there are pending
+ *			memcopy works
+ */
+int ipa2_dma_disable(void)
+{
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	if (ipa_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't disable\n");
+		return -EPERM;
+	}
+	mutex_lock(&ipa_dma_ctx->enable_lock);
+	spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags);
+	if (!ipa_dma_ctx->is_enabled) {
+		IPADMA_DBG("Already disabled.\n");
+		spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+		mutex_unlock(&ipa_dma_ctx->enable_lock);
+		return -EPERM;
+	}
+	if (ipa_dma_work_pending()) {
+		IPADMA_ERR("There is pending work, can't disable.\n");
+		spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+		mutex_unlock(&ipa_dma_ctx->enable_lock);
+		return -EFAULT;
+	}
+	ipa_dma_ctx->is_enabled = false;
+	spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
+	mutex_unlock(&ipa_dma_ctx->enable_lock);
+	IPADMA_FUNC_EXIT();
+	return 0;
+}
+
+/**
+ * ipa2_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: other
+ */
+int ipa2_dma_sync_memcpy(u64 dest, u64 src, int len)
+{
+	int ep_idx;
+	int res;
+	int i = 0;
+	struct ipa_sys_context *cons_sys;
+	struct ipa_sys_context *prod_sys;
+	struct sps_iovec iov;
+	struct ipa_dma_xfer_wrapper *xfer_descr = NULL;
+	struct ipa_dma_xfer_wrapper *head_descr = NULL;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+
+	if (ipa_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+	if (((u32)src != src) || ((u32)dest != dest)) {
+		IPADMA_ERR("Bad addr - only 32b addr supported for BAM");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags);
+	if (!ipa_dma_ctx->is_enabled) {
+		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa_dma_ctx->sync_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+	if (atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt) >=
+		IPA_DMA_MAX_PENDING_SYNC) {
+		atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt);
+		IPADMA_DBG("Reached pending requests limit\n");
+		return -EFAULT;
+	}
+
+	ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+		return -EFAULT;
+	}
+	cons_sys = ipa_ctx->ep[ep_idx].sys;
+
+	ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+		return -EFAULT;
+	}
+	prod_sys = ipa_ctx->ep[ep_idx].sys;
+
+	xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache,
+					GFP_KERNEL);
+	if (!xfer_descr) {
+		IPADMA_ERR("failed to alloc xfer descr wrapper\n");
+		res = -ENOMEM;
+		goto fail_mem_alloc;
+	}
+	xfer_descr->phys_addr_dest = dest;
+	xfer_descr->phys_addr_src = src;
+	xfer_descr->len = len;
+	init_completion(&xfer_descr->xfer_done);
+
+	mutex_lock(&ipa_dma_ctx->sync_lock);
+	list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+	cons_sys->len++;
+	res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, NULL, 0);
+	if (res) {
+		IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+		goto fail_sps_send;
+	}
+	res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+		NULL, SPS_IOVEC_FLAG_EOT);
+	if (res) {
+		IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+		BUG();
+	}
+	head_descr = list_first_entry(&cons_sys->head_desc_list,
+				struct ipa_dma_xfer_wrapper, link);
+
+	/* in case we are not the head of the list, wait for head to wake us */
+	if (xfer_descr != head_descr) {
+		mutex_unlock(&ipa_dma_ctx->sync_lock);
+		wait_for_completion(&xfer_descr->xfer_done);
+		mutex_lock(&ipa_dma_ctx->sync_lock);
+		head_descr = list_first_entry(&cons_sys->head_desc_list,
+					struct ipa_dma_xfer_wrapper, link);
+		BUG_ON(xfer_descr != head_descr);
+	}
+	mutex_unlock(&ipa_dma_ctx->sync_lock);
+
+	do {
+		/* wait for transfer to complete */
+		res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov);
+		if (res)
+			IPADMA_ERR("Failed: get_iovec, returned %d loop#:%d\n"
+			, res, i);
+
+		usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX,
+			IPA_DMA_POLLING_MAX_SLEEP_RX);
+		i++;
+	} while (iov.addr == 0);
+
+	mutex_lock(&ipa_dma_ctx->sync_lock);
+	list_del(&head_descr->link);
+	cons_sys->len--;
+	kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+	/* wake the head of the list */
+	if (!list_empty(&cons_sys->head_desc_list)) {
+		head_descr = list_first_entry(&cons_sys->head_desc_list,
+				struct ipa_dma_xfer_wrapper, link);
+		complete(&head_descr->xfer_done);
+	}
+	mutex_unlock(&ipa_dma_ctx->sync_lock);
+
+	BUG_ON(dest != iov.addr);
+	BUG_ON(len != iov.size);
+	atomic_inc(&ipa_dma_ctx->total_sync_memcpy);
+	atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt);
+	if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+		complete(&ipa_dma_ctx->done);
+
+	IPADMA_FUNC_EXIT();
+	return res;
+
+fail_sps_send:
+	list_del(&xfer_descr->link);
+	cons_sys->len--;
+	mutex_unlock(&ipa_dma_ctx->sync_lock);
+	kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+	atomic_dec(&ipa_dma_ctx->sync_memcpy_pending_cnt);
+	if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+		complete(&ipa_dma_ctx->done);
+	return res;
+}
+
+/**
+ * ipa2_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: descr fifo is full.
+ */
+int ipa2_dma_async_memcpy(u64 dest, u64 src, int len,
+		void (*user_cb)(void *user1), void *user_param)
+{
+	int ep_idx;
+	int res = 0;
+	struct ipa_dma_xfer_wrapper *xfer_descr = NULL;
+	struct ipa_sys_context *prod_sys;
+	struct ipa_sys_context *cons_sys;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	if (ipa_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+	if (((u32)src != src) || ((u32)dest != dest)) {
+		IPADMA_ERR("Bad addr - only 32b addr supported for BAM");
+		return -EINVAL;
+	}
+	if (!user_cb) {
+		IPADMA_ERR("null pointer: user_cb\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags);
+	if (!ipa_dma_ctx->is_enabled) {
+		IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa_dma_ctx->async_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+	if (atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt) >=
+		IPA_DMA_MAX_PENDING_ASYNC) {
+		atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt);
+		IPADMA_DBG("Reached pending requests limit\n");
+		return -EFAULT;
+	}
+
+	ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+		return -EFAULT;
+	}
+	cons_sys = ipa_ctx->ep[ep_idx].sys;
+
+	ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+		return -EFAULT;
+	}
+	prod_sys = ipa_ctx->ep[ep_idx].sys;
+
+	xfer_descr = kmem_cache_zalloc(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache,
+					GFP_KERNEL);
+	if (!xfer_descr) {
+		IPADMA_ERR("failed to alloc xfrer descr wrapper\n");
+		res = -ENOMEM;
+		goto fail_mem_alloc;
+	}
+	xfer_descr->phys_addr_dest = dest;
+	xfer_descr->phys_addr_src = src;
+	xfer_descr->len = len;
+	xfer_descr->callback = user_cb;
+	xfer_descr->user1 = user_param;
+
+	spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags);
+	list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+	cons_sys->len++;
+	res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len, xfer_descr, 0);
+	if (res) {
+		IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+		goto fail_sps_send;
+	}
+	res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+		NULL, SPS_IOVEC_FLAG_EOT);
+	if (res) {
+		IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+		BUG();
+		goto fail_sps_send;
+	}
+	spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags);
+	IPADMA_FUNC_EXIT();
+	return res;
+
+fail_sps_send:
+	list_del(&xfer_descr->link);
+	spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags);
+	kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+	atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt);
+	if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+		complete(&ipa_dma_ctx->done);
+	return res;
+}
+
+/**
+ * ipa2_dma_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-EBADF: IPA uC is not loaded
+ */
+int ipa2_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int res;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	if (ipa_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa_dma_ctx->pending_lock, flags);
+	if (!ipa_dma_ctx->is_enabled) {
+		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa_dma_ctx->uc_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa_dma_ctx->pending_lock, flags);
+
+	res = ipa_uc_memcpy(dest, src, len);
+	if (res) {
+		IPADMA_ERR("ipa_uc_memcpy failed %d\n", res);
+		goto dec_and_exit;
+	}
+
+	atomic_inc(&ipa_dma_ctx->total_uc_memcpy);
+	res = 0;
+dec_and_exit:
+	atomic_dec(&ipa_dma_ctx->uc_memcpy_pending_cnt);
+	if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+		complete(&ipa_dma_ctx->done);
+	IPADMA_FUNC_EXIT();
+	return res;
+}
+
+/**
+ * ipa2_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa2_dma_destroy(void)
+{
+	int res = 0;
+
+	IPADMA_FUNC_ENTRY();
+	if (!ipa_dma_ctx) {
+		IPADMA_DBG("IPADMA isn't initialized\n");
+		return;
+	}
+
+	if (ipa_dma_work_pending()) {
+		ipa_dma_ctx->destroy_pending = true;
+		IPADMA_DBG("There are pending memcpy, wait for completion\n");
+		wait_for_completion(&ipa_dma_ctx->done);
+	}
+
+	res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_cons_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n");
+	ipa_dma_ctx->ipa_dma_async_cons_hdl = 0;
+	res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_cons_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA SYNC CONS failed\n");
+	ipa_dma_ctx->ipa_dma_sync_cons_hdl = 0;
+	res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_async_prod_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n");
+	ipa_dma_ctx->ipa_dma_async_prod_hdl = 0;
+	res = ipa2_teardown_sys_pipe(ipa_dma_ctx->ipa_dma_sync_prod_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA SYNC PROD failed\n");
+	ipa_dma_ctx->ipa_dma_sync_prod_hdl = 0;
+
+	ipa_dma_debugfs_destroy();
+	kmem_cache_destroy(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache);
+	kfree(ipa_dma_ctx);
+	ipa_dma_ctx = NULL;
+
+	IPADMA_FUNC_EXIT();
+}
+
+/**
+ * ipa_dma_async_memcpy_notify_cb() -Callback function which will be called by
+ * IPA driver after getting notify from SPS driver or poll mode on Rx operation
+ * is completed (data was written to dest descriptor on async_cons ep).
+ *
+ * @priv -not in use.
+ * @evt - event name - IPA_RECIVE.
+ * @data -the iovec.
+ */
+void ipa_dma_async_memcpy_notify_cb(void *priv
+			, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	int ep_idx = 0;
+	struct sps_iovec *iov = (struct sps_iovec *) data;
+	struct ipa_dma_xfer_wrapper *xfer_descr_expected;
+	struct ipa_sys_context *sys;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+
+	ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	sys = ipa_ctx->ep[ep_idx].sys;
+
+	spin_lock_irqsave(&ipa_dma_ctx->async_lock, flags);
+	xfer_descr_expected = list_first_entry(&sys->head_desc_list,
+				 struct ipa_dma_xfer_wrapper, link);
+	list_del(&xfer_descr_expected->link);
+	sys->len--;
+	spin_unlock_irqrestore(&ipa_dma_ctx->async_lock, flags);
+
+	BUG_ON(xfer_descr_expected->phys_addr_dest != iov->addr);
+	BUG_ON(xfer_descr_expected->len != iov->size);
+
+	atomic_inc(&ipa_dma_ctx->total_async_memcpy);
+	atomic_dec(&ipa_dma_ctx->async_memcpy_pending_cnt);
+	xfer_descr_expected->callback(xfer_descr_expected->user1);
+
+	kmem_cache_free(ipa_dma_ctx->ipa_dma_xfer_wrapper_cache,
+		xfer_descr_expected);
+
+	if (ipa_dma_ctx->destroy_pending && !ipa_dma_work_pending())
+		complete(&ipa_dma_ctx->done);
+
+	IPADMA_FUNC_EXIT();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_info;
+
+static ssize_t ipa_dma_debugfs_read(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+
+	if (!ipa_dma_ctx) {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Not initialized\n");
+	} else {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Status:\n	IPADMA is %s\n",
+			(ipa_dma_ctx->is_enabled) ? "Enabled" : "Disabled");
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Statistics:\n	total sync memcpy: %d\n	",
+			atomic_read(&ipa_dma_ctx->total_sync_memcpy));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"total async memcpy: %d\n	",
+			atomic_read(&ipa_dma_ctx->total_async_memcpy));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending sync memcpy jobs: %d\n	",
+			atomic_read(&ipa_dma_ctx->sync_memcpy_pending_cnt));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending async memcpy jobs: %d\n",
+			atomic_read(&ipa_dma_ctx->async_memcpy_pending_cnt));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending uc memcpy jobs: %d\n",
+			atomic_read(&ipa_dma_ctx->uc_memcpy_pending_cnt));
+	}
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa_dma_debugfs_reset_statistics(struct file *file,
+					const char __user *ubuf,
+					size_t count,
+					loff_t *ppos)
+{
+	unsigned long missing;
+	s8 in_num = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &in_num))
+		return -EFAULT;
+	switch (in_num) {
+	case 0:
+		if (ipa_dma_work_pending())
+			IPADMA_DBG("Note, there are pending memcpy\n");
+
+		atomic_set(&ipa_dma_ctx->total_async_memcpy, 0);
+		atomic_set(&ipa_dma_ctx->total_sync_memcpy, 0);
+		break;
+	default:
+		IPADMA_ERR("invalid argument: To reset statistics echo 0\n");
+		break;
+	}
+	return count;
+}
+
+const struct file_operations ipadma_stats_ops = {
+	.read = ipa_dma_debugfs_read,
+	.write = ipa_dma_debugfs_reset_statistics,
+};
+
+static void ipa_dma_debugfs_init(void)
+{
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP | S_IWOTH;
+
+	dent = debugfs_create_dir("ipa_dma", 0);
+	if (IS_ERR(dent)) {
+		IPADMA_ERR("fail to create folder ipa_dma\n");
+		return;
+	}
+
+	dfile_info =
+		debugfs_create_file("info", read_write_mode, dent,
+				 0, &ipadma_stats_ops);
+	if (!dfile_info || IS_ERR(dfile_info)) {
+		IPADMA_ERR("fail to create file stats\n");
+		goto fail;
+	}
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void ipa_dma_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+
+#endif /* !CONFIG_DEBUG_FS */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
new file mode 100644
index 0000000..179a31b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -0,0 +1,3717 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+#include "ipa_trace.h"
+
+#define IPA_LAST_DESC_CNT 0xFFFF
+#define POLLING_INACTIVITY_RX 40
+#define POLLING_INACTIVITY_TX 40
+#define POLLING_MIN_SLEEP_TX 400
+#define POLLING_MAX_SLEEP_TX 500
+/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_MTU 1500
+#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
+#define IPA_GENERIC_AGGR_TIME_LIMIT 1
+#define IPA_GENERIC_AGGR_PKT_LIMIT 0
+
+#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
+#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
+		(X) + NET_SKB_PAD) +\
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
+		(IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
+#define IPA_GENERIC_RX_BUFF_LIMIT (\
+		IPA_REAL_GENERIC_RX_BUFF_SZ(\
+		IPA_GENERIC_RX_BUFF_BASE_SZ) -\
+		IPA_GENERIC_RX_BUFF_BASE_SZ)
+
+#define IPA_RX_BUFF_CLIENT_HEADROOM 256
+
+/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
+
+#define IPA_WLAN_RX_POOL_SZ 100
+#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
+#define IPA_WLAN_RX_BUFF_SZ 2048
+#define IPA_WLAN_COMM_RX_POOL_LOW 100
+#define IPA_WLAN_COMM_RX_POOL_HIGH 900
+
+#define IPA_ODU_RX_BUFF_SZ 2048
+#define IPA_ODU_RX_POOL_SZ 32
+#define IPA_SIZE_DL_CSUM_META_TRAILER 8
+
+#define IPA_HEADROOM 128
+
+static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags);
+static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys);
+static void ipa_replenish_rx_cache(struct ipa_sys_context *sys);
+static void replenish_rx_work_func(struct work_struct *work);
+static void ipa_wq_handle_rx(struct work_struct *work);
+static void ipa_wq_handle_tx(struct work_struct *work);
+static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size);
+static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys,
+				u32 size);
+static int ipa_assign_policy(struct ipa_sys_connect_params *in,
+		struct ipa_sys_context *sys);
+static void ipa_cleanup_rx(struct ipa_sys_context *sys);
+static void ipa_wq_rx_avail(struct work_struct *work);
+static void ipa_alloc_wlan_rx_common_cache(u32 size);
+static void ipa_cleanup_wlan_rx_common_cache(void);
+static void ipa_wq_repl_rx(struct work_struct *work);
+static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
+		struct sps_iovec *iovec);
+
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
+static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys);
+
+static void ipa_wq_write_done_common(struct ipa_sys_context *sys, u32 cnt)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt_expected;
+	int i;
+
+	for (i = 0; i < cnt; i++) {
+		spin_lock_bh(&sys->spinlock);
+		if (unlikely(list_empty(&sys->head_desc_list))) {
+			spin_unlock_bh(&sys->spinlock);
+			return;
+		}
+		tx_pkt_expected = list_first_entry(&sys->head_desc_list,
+						   struct ipa_tx_pkt_wrapper,
+						   link);
+		list_del(&tx_pkt_expected->link);
+		sys->len--;
+		spin_unlock_bh(&sys->spinlock);
+		if (!tx_pkt_expected->no_unmap_dma) {
+			if (tx_pkt_expected->type != IPA_DATA_DESC_SKB_PAGED) {
+				dma_unmap_single(ipa_ctx->pdev,
+					tx_pkt_expected->mem.phys_base,
+					tx_pkt_expected->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(ipa_ctx->pdev,
+					tx_pkt_expected->mem.phys_base,
+					tx_pkt_expected->mem.size,
+					DMA_TO_DEVICE);
+			}
+		}
+		if (tx_pkt_expected->callback)
+			tx_pkt_expected->callback(tx_pkt_expected->user1,
+					tx_pkt_expected->user2);
+		if (tx_pkt_expected->cnt > 1 &&
+				tx_pkt_expected->cnt != IPA_LAST_DESC_CNT) {
+			if (tx_pkt_expected->cnt == IPA_NUM_DESC_PER_SW_TX) {
+				dma_pool_free(ipa_ctx->dma_pool,
+					tx_pkt_expected->mult.base,
+					tx_pkt_expected->mult.phys_base);
+			} else {
+				dma_unmap_single(ipa_ctx->pdev,
+					tx_pkt_expected->mult.phys_base,
+					tx_pkt_expected->mult.size,
+					DMA_TO_DEVICE);
+				kfree(tx_pkt_expected->mult.base);
+			}
+		}
+		kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt_expected);
+	}
+}
+
+static void ipa_wq_write_done_status(int src_pipe)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt_expected;
+	struct ipa_sys_context *sys;
+	u32 cnt;
+
+	WARN_ON(src_pipe >= ipa_ctx->ipa_num_pipes);
+
+	if (!ipa_ctx->ep[src_pipe].status.status_en)
+		return;
+
+	sys = ipa_ctx->ep[src_pipe].sys;
+	if (!sys)
+		return;
+
+	spin_lock_bh(&sys->spinlock);
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		spin_unlock_bh(&sys->spinlock);
+		return;
+	}
+	tx_pkt_expected = list_first_entry(&sys->head_desc_list,
+					   struct ipa_tx_pkt_wrapper,
+					   link);
+	cnt = tx_pkt_expected->cnt;
+	spin_unlock_bh(&sys->spinlock);
+	ipa_wq_write_done_common(sys, cnt);
+}
+
+/**
+ * ipa_write_done() - this function will be (eventually) called when a Tx
+ * operation is complete
+ * * @work:	work_struct used by the work queue
+ *
+ * Will be called in deferred context.
+ * - invoke the callback supplied by the client who sent this command
+ * - iterate over all packets and validate that
+ *   the order for sent packet is the same as expected
+ * - delete all the tx packet descriptors from the system
+ *   pipe context (not needed anymore)
+ * - return the tx buffer back to dma_pool
+ */
+static void ipa_wq_write_done(struct work_struct *work)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+	u32 cnt;
+	struct ipa_sys_context *sys;
+
+	tx_pkt = container_of(work, struct ipa_tx_pkt_wrapper, work);
+	cnt = tx_pkt->cnt;
+	sys = tx_pkt->sys;
+
+	ipa_wq_write_done_common(sys, cnt);
+}
+
+static int ipa_handle_tx_core(struct ipa_sys_context *sys, bool process_all,
+		bool in_poll_state)
+{
+	struct sps_iovec iov;
+	int ret;
+	int cnt = 0;
+
+	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+				!atomic_read(&sys->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+		ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+		if (ret) {
+			IPAERR("sps_get_iovec failed %d\n", ret);
+			break;
+		}
+
+		if (iov.addr == 0)
+			break;
+
+		ipa_wq_write_done_common(sys, 1);
+		cnt++;
+	};
+
+	return cnt;
+}
+
+/**
+ * ipa_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
+ */
+static void ipa_tx_switch_to_intr_mode(struct ipa_sys_context *sys)
+{
+	int ret;
+
+	if (!atomic_read(&sys->curr_polling_state)) {
+		IPAERR("already in intr mode\n");
+		goto fail;
+	}
+
+	ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		goto fail;
+	}
+	sys->event.options = SPS_O_EOT;
+	ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+	if (ret) {
+		IPAERR("sps_register_event() failed %d\n", ret);
+		goto fail;
+	}
+	sys->ep->connect.options =
+		SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+	ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		goto fail;
+	}
+	atomic_set(&sys->curr_polling_state, 0);
+	ipa_handle_tx_core(sys, true, false);
+	return;
+
+fail:
+	queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+			msecs_to_jiffies(1));
+}
+
+static void ipa_handle_tx(struct ipa_sys_context *sys)
+{
+	int inactive_cycles = 0;
+	int cnt;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	do {
+		cnt = ipa_handle_tx_core(sys, true, true);
+		if (cnt == 0) {
+			inactive_cycles++;
+			usleep_range(POLLING_MIN_SLEEP_TX,
+					POLLING_MAX_SLEEP_TX);
+		} else {
+			inactive_cycles = 0;
+		}
+	} while (inactive_cycles <= POLLING_INACTIVITY_TX);
+
+	ipa_tx_switch_to_intr_mode(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static void ipa_wq_handle_tx(struct work_struct *work)
+{
+	struct ipa_sys_context *sys;
+
+	sys = container_of(work, struct ipa_sys_context, work);
+
+	ipa_handle_tx(sys);
+}
+
+/**
+ * ipa_send_one() - Send a single descriptor
+ * @sys:	system pipe context
+ * @desc:	descriptor to send
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * - Allocate tx_packet wrapper
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ *   notify the sending user via ipa_sps_irq_comp_tx()
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
+		bool in_atomic)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+	int result;
+	u16 sps_flags = SPS_IOVEC_FLAG_EOT;
+	dma_addr_t dma_address;
+	u16 len;
+	u32 mem_flag = GFP_ATOMIC;
+	struct sps_iovec iov;
+	int ret;
+
+	if (unlikely(!in_atomic))
+		mem_flag = GFP_KERNEL;
+
+	tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache, mem_flag);
+	if (!tx_pkt) {
+		IPAERR("failed to alloc tx wrapper\n");
+		goto fail_mem_alloc;
+	}
+
+	if (!desc->dma_address_valid) {
+		dma_address = dma_map_single(ipa_ctx->pdev, desc->pyld,
+			desc->len, DMA_TO_DEVICE);
+	} else {
+		dma_address = desc->dma_address;
+		tx_pkt->no_unmap_dma = true;
+	}
+	if (!dma_address) {
+		IPAERR("failed to DMA wrap\n");
+		goto fail_dma_map;
+	}
+
+	INIT_LIST_HEAD(&tx_pkt->link);
+	tx_pkt->type = desc->type;
+	tx_pkt->cnt = 1;    /* only 1 desc in this "set" */
+
+	tx_pkt->mem.phys_base = dma_address;
+	tx_pkt->mem.base = desc->pyld;
+	tx_pkt->mem.size = desc->len;
+	tx_pkt->sys = sys;
+	tx_pkt->callback = desc->callback;
+	tx_pkt->user1 = desc->user1;
+	tx_pkt->user2 = desc->user2;
+
+	/*
+	 * Special treatment for immediate commands, where the structure of the
+	 * descriptor is different
+	 */
+	if (desc->type == IPA_IMM_CMD_DESC) {
+		sps_flags |= SPS_IOVEC_FLAG_IMME;
+		len = desc->opcode;
+		IPADBG("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+				desc->opcode, desc->len, sps_flags);
+		IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+	} else {
+		len = desc->len;
+	}
+
+	INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
+
+	spin_lock_bh(&sys->spinlock);
+	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+	if (sys->policy == IPA_POLICY_NOINTR_MODE) {
+		do {
+			ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+			if (ret) {
+				IPADBG("sps_get_iovec failed %d\n", ret);
+				break;
+			}
+			if ((iov.addr == 0x0) && (iov.size == 0x0))
+				break;
+		} while (1);
+	}
+	result = sps_transfer_one(sys->ep->ep_hdl, dma_address, len, tx_pkt,
+			sps_flags);
+	if (result) {
+		IPAERR("sps_transfer_one failed rc=%d\n", result);
+		goto fail_sps_send;
+	}
+
+	spin_unlock_bh(&sys->spinlock);
+
+	return 0;
+
+fail_sps_send:
+	list_del(&tx_pkt->link);
+	spin_unlock_bh(&sys->spinlock);
+	dma_unmap_single(ipa_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
+fail_dma_map:
+	kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+	return -EFAULT;
+}
+
+/**
+ * ipa_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send (may be immediate command or data)
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * This function is used for system-to-bam connection.
+ * - SPS driver expect struct sps_transfer which will contain all the data
+ *   for a transaction
+ * - ipa_tx_pkt_wrapper will be used for each ipa
+ *   descriptor (allocated from wrappers cache)
+ * - The wrapper struct will be configured for each ipa-desc payload and will
+ *   contain information which will be later used by the user callbacks
+ * - each transfer will be made by calling to sps_transfer()
+ * - Each packet (command or data) that will be sent will also be saved in
+ *   ipa_sys_context for later check that all data was sent
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
+		bool in_atomic)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+	struct ipa_tx_pkt_wrapper *next_pkt;
+	struct sps_transfer transfer = { 0 };
+	struct sps_iovec *iovec;
+	dma_addr_t dma_addr;
+	int i = 0;
+	int j;
+	int result;
+	int fail_dma_wrap = 0;
+	uint size = num_desc * sizeof(struct sps_iovec);
+	u32 mem_flag = GFP_ATOMIC;
+	struct sps_iovec iov;
+	int ret;
+
+	if (unlikely(!in_atomic))
+		mem_flag = GFP_KERNEL;
+
+	if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+		transfer.iovec = dma_pool_alloc(ipa_ctx->dma_pool, mem_flag,
+				&dma_addr);
+		if (!transfer.iovec) {
+			IPAERR("fail to alloc dma mem for sps xfr buff\n");
+			return -EFAULT;
+		}
+	} else {
+		transfer.iovec = kmalloc(size, mem_flag);
+		if (!transfer.iovec) {
+			IPAERR("fail to alloc mem for sps xfr buff ");
+			IPAERR("num_desc = %d size = %d\n", num_desc, size);
+			return -EFAULT;
+		}
+		dma_addr  = dma_map_single(ipa_ctx->pdev,
+				transfer.iovec, size, DMA_TO_DEVICE);
+		if (!dma_addr) {
+			IPAERR("dma_map_single failed for sps xfr buff\n");
+			kfree(transfer.iovec);
+			return -EFAULT;
+		}
+	}
+
+	transfer.iovec_phys = dma_addr;
+	transfer.iovec_count = num_desc;
+	spin_lock_bh(&sys->spinlock);
+
+	for (i = 0; i < num_desc; i++) {
+		fail_dma_wrap = 0;
+		tx_pkt = kmem_cache_zalloc(ipa_ctx->tx_pkt_wrapper_cache,
+					   mem_flag);
+		if (!tx_pkt) {
+			IPAERR("failed to alloc tx wrapper\n");
+			goto failure;
+		}
+		/*
+		 * first desc of set is "special" as it holds the count and
+		 * other info
+		 */
+		if (i == 0) {
+			transfer.user = tx_pkt;
+			tx_pkt->mult.phys_base = dma_addr;
+			tx_pkt->mult.base = transfer.iovec;
+			tx_pkt->mult.size = size;
+			tx_pkt->cnt = num_desc;
+			INIT_WORK(&tx_pkt->work, ipa_wq_write_done);
+		}
+
+		iovec = &transfer.iovec[i];
+		iovec->flags = 0;
+
+		INIT_LIST_HEAD(&tx_pkt->link);
+		tx_pkt->type = desc[i].type;
+
+		if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+			tx_pkt->mem.base = desc[i].pyld;
+			tx_pkt->mem.size = desc[i].len;
+
+			if (!desc[i].dma_address_valid) {
+				tx_pkt->mem.phys_base =
+					dma_map_single(ipa_ctx->pdev,
+					tx_pkt->mem.base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				tx_pkt->mem.phys_base = desc[i].dma_address;
+				tx_pkt->no_unmap_dma = true;
+			}
+		} else {
+			tx_pkt->mem.base = desc[i].frag;
+			tx_pkt->mem.size = desc[i].len;
+
+			if (!desc[i].dma_address_valid) {
+				tx_pkt->mem.phys_base =
+					skb_frag_dma_map(ipa_ctx->pdev,
+					desc[i].frag,
+					0, tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				tx_pkt->mem.phys_base = desc[i].dma_address;
+				tx_pkt->no_unmap_dma = true;
+			}
+		}
+
+		if (!tx_pkt->mem.phys_base) {
+			IPAERR("failed to alloc tx wrapper\n");
+			fail_dma_wrap = 1;
+			goto failure;
+		}
+
+		tx_pkt->sys = sys;
+		tx_pkt->callback = desc[i].callback;
+		tx_pkt->user1 = desc[i].user1;
+		tx_pkt->user2 = desc[i].user2;
+
+		/*
+		 * Point the iovec to the buffer and
+		 * add this packet to system pipe context.
+		 */
+		iovec->addr = tx_pkt->mem.phys_base;
+		list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+
+		/*
+		 * Special treatment for immediate commands, where the structure
+		 * of the descriptor is different
+		 */
+		if (desc[i].type == IPA_IMM_CMD_DESC) {
+			iovec->size = desc[i].opcode;
+			iovec->flags |= SPS_IOVEC_FLAG_IMME;
+			IPA_DUMP_BUFF(desc[i].pyld,
+					tx_pkt->mem.phys_base, desc[i].len);
+		} else {
+			iovec->size = desc[i].len;
+		}
+
+		if (i == (num_desc - 1)) {
+			iovec->flags |= SPS_IOVEC_FLAG_EOT;
+			/* "mark" the last desc */
+			tx_pkt->cnt = IPA_LAST_DESC_CNT;
+		}
+	}
+
+	if (sys->policy == IPA_POLICY_NOINTR_MODE) {
+		do {
+			ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+			if (ret) {
+				IPADBG("sps_get_iovec failed %d\n", ret);
+				break;
+			}
+			if ((iov.addr == 0x0) && (iov.size == 0x0))
+				break;
+		} while (1);
+	}
+	result = sps_transfer(sys->ep->ep_hdl, &transfer);
+	if (result) {
+		IPAERR("sps_transfer failed rc=%d\n", result);
+		goto failure;
+	}
+
+	spin_unlock_bh(&sys->spinlock);
+	return 0;
+
+failure:
+	tx_pkt = transfer.user;
+	for (j = 0; j < i; j++) {
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+			dma_unmap_single(ipa_ctx->pdev, tx_pkt->mem.phys_base,
+				tx_pkt->mem.size,
+				DMA_TO_DEVICE);
+		} else {
+			dma_unmap_page(ipa_ctx->pdev, tx_pkt->mem.phys_base,
+				tx_pkt->mem.size,
+				DMA_TO_DEVICE);
+		}
+		kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+	if (j < num_desc)
+		/* last desc failed */
+		if (fail_dma_wrap)
+			kmem_cache_free(ipa_ctx->tx_pkt_wrapper_cache, tx_pkt);
+	if (transfer.iovec_phys) {
+		if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+			dma_pool_free(ipa_ctx->dma_pool, transfer.iovec,
+					transfer.iovec_phys);
+		} else {
+			dma_unmap_single(ipa_ctx->pdev, transfer.iovec_phys,
+					size, DMA_TO_DEVICE);
+			kfree(transfer.iovec);
+		}
+	}
+	spin_unlock_bh(&sys->spinlock);
+	return -EFAULT;
+}
+
+/**
+ * ipa_sps_irq_cmd_ack - callback function which will be called by SPS driver
+ * after an immediate command is complete.
+ * @user1:	pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa_send_cmd())
+ */
+static void ipa_sps_irq_cmd_ack(void *user1, int user2)
+{
+	struct ipa_desc *desc = (struct ipa_desc *)user1;
+
+	if (!desc) {
+		IPAERR("desc is NULL\n");
+		WARN_ON(1);
+		return;
+	}
+	IPADBG("got ack for cmd=%d\n", desc->opcode);
+	complete(&desc->xfer_done);
+}
+
+/**
+ * ipa_send_cmd - send immediate commands
+ * @num_desc:	number of descriptors within the desc struct
+ * @descr:	descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ * The callback in ipa_desc should not be set by the caller
+ * for this function.
+ */
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr)
+{
+	struct ipa_desc *desc;
+	int result = 0;
+	struct ipa_sys_context *sys;
+	int ep_idx;
+
+	IPADBG("sending command\n");
+
+	ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+	sys = ipa_ctx->ep[ep_idx].sys;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (num_desc == 1) {
+		init_completion(&descr->xfer_done);
+
+		if (descr->callback || descr->user1)
+			WARN_ON(1);
+
+		descr->callback = ipa_sps_irq_cmd_ack;
+		descr->user1 = descr;
+		if (ipa_send_one(sys, descr, true)) {
+			IPAERR("fail to send immediate command\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&descr->xfer_done);
+	} else {
+		desc = &descr[num_desc - 1];
+		init_completion(&desc->xfer_done);
+
+		if (desc->callback || desc->user1)
+			WARN_ON(1);
+
+		desc->callback = ipa_sps_irq_cmd_ack;
+		desc->user1 = desc;
+		if (ipa_send(sys, num_desc, descr, true)) {
+			IPAERR("fail to send multiple immediate command set\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&desc->xfer_done);
+	}
+
+bail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+/**
+ * ipa_sps_irq_tx_notify() - Callback function which will be called by
+ * the SPS driver to start a Tx poll operation.
+ * Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ */
+static void ipa_sps_irq_tx_notify(struct sps_event_notify *notify)
+{
+	struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
+	int ret;
+
+	IPADBG("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+			atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
+		if (!atomic_read(&sys->curr_polling_state)) {
+			ret = sps_get_config(sys->ep->ep_hdl,
+					&sys->ep->connect);
+			if (ret) {
+				IPAERR("sps_get_config() failed %d\n", ret);
+				break;
+			}
+			sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+				SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+			ret = sps_set_config(sys->ep->ep_hdl,
+					&sys->ep->connect);
+			if (ret) {
+				IPAERR("sps_set_config() failed %d\n", ret);
+				break;
+			}
+			atomic_set(&sys->curr_polling_state, 1);
+			queue_work(sys->wq, &sys->work);
+		}
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Tx operation is complete.
+ * Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ * This event will be later handled by ipa_write_done.
+ */
+static void ipa_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
+{
+	struct ipa_tx_pkt_wrapper *tx_pkt;
+
+	IPADBG("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		tx_pkt = notify->data.transfer.user;
+		if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
+			atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
+		queue_work(tx_pkt->sys->wq, &tx_pkt->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa_poll_pkt() - Poll packet from SPS BAM
+ * return 0 to caller on poll successfully
+ * else -EIO
+ *
+ */
+static int ipa_poll_pkt(struct ipa_sys_context *sys,
+		struct sps_iovec *iov)
+{
+	int ret;
+
+	ret = sps_get_iovec(sys->ep->ep_hdl, iov);
+	if (ret) {
+		IPAERR("sps_get_iovec failed %d\n", ret);
+		return ret;
+	}
+
+	if (iov->addr == 0)
+		return -EIO;
+
+	return 0;
+}
+
+/**
+ * ipa_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ *  - Disconnect the packet from the system pipe linked list
+ *  - Unmap the packets skb, make it non DMAable
+ *  - Free the packet from the cache
+ *  - Prepare a proper skb
+ *  - Call the endpoints notify function, passing the skb in the parameters
+ *  - Replenish the rx cache
+ */
+static int ipa_handle_rx_core(struct ipa_sys_context *sys, bool process_all,
+		bool in_poll_state)
+{
+	struct sps_iovec iov;
+	int ret;
+	int cnt = 0;
+
+	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+				!atomic_read(&sys->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+
+		ret = ipa_poll_pkt(sys, &iov);
+		if (ret)
+			break;
+
+		if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+			ipa_dma_memcpy_notify(sys, &iov);
+		else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+			ipa_wlan_wq_rx_common(sys, iov.size);
+		else
+			ipa_wq_rx_common(sys, iov.size);
+
+		cnt++;
+	};
+
+	return cnt;
+}
+
+/**
+ * ipa_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa_rx_switch_to_intr_mode(struct ipa_sys_context *sys)
+{
+	int ret;
+
+	if (!sys->ep || !sys->ep->valid) {
+		IPAERR("EP Not Valid, no need to cleanup.\n");
+		return;
+	}
+
+	ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+	if (ret) {
+		IPAERR("sps_get_config() failed %d\n", ret);
+		goto fail;
+	}
+
+	if (!atomic_read(&sys->curr_polling_state) &&
+		((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
+		IPADBG("already in intr mode\n");
+		return;
+	}
+
+	if (!atomic_read(&sys->curr_polling_state)) {
+		IPAERR("already in intr mode\n");
+		goto fail;
+	}
+
+	sys->event.options = SPS_O_EOT;
+	ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+	if (ret) {
+		IPAERR("sps_register_event() failed %d\n", ret);
+		goto fail;
+	}
+	sys->ep->connect.options =
+		SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+	ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+	if (ret) {
+		IPAERR("sps_set_config() failed %d\n", ret);
+		goto fail;
+	}
+	atomic_set(&sys->curr_polling_state, 0);
+	if (!sys->ep->napi_enabled)
+		ipa_handle_rx_core(sys, true, false);
+	ipa_dec_release_wakelock(sys->ep->wakelock_client);
+	return;
+
+fail:
+	queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+			msecs_to_jiffies(1));
+}
+
+
+/**
+ * ipa_sps_irq_control() - Function to enable or disable BAM IRQ.
+ */
+static void ipa_sps_irq_control(struct ipa_sys_context *sys, bool enable)
+{
+	int ret;
+
+	/*
+	 * Do not change sps config in case we are in polling mode as this
+	 * indicates that sps driver already notified EOT event and sps config
+	 * should not change until ipa driver processes the packet.
+	 */
+	if (atomic_read(&sys->curr_polling_state)) {
+		IPADBG("in polling mode, do not change config\n");
+		return;
+	}
+
+	if (enable) {
+		ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_get_config() failed %d\n", ret);
+			return;
+		}
+		sys->event.options = SPS_O_EOT;
+		ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+		if (ret) {
+			IPAERR("sps_register_event() failed %d\n", ret);
+			return;
+		}
+		sys->ep->connect.options =
+			SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+		ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_set_config() failed %d\n", ret);
+			return;
+		}
+	} else {
+		ret = sps_get_config(sys->ep->ep_hdl,
+				&sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_get_config() failed %d\n", ret);
+			return;
+		}
+		sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+			SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		ret = sps_set_config(sys->ep->ep_hdl,
+				&sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_set_config() failed %d\n", ret);
+			return;
+		}
+	}
+}
+
+void ipa_sps_irq_control_all(bool enable)
+{
+	struct ipa_ep_context *ep;
+	int ipa_ep_idx, client_num;
+
+	IPADBG("\n");
+
+	for (client_num = IPA_CLIENT_CONS;
+		client_num < IPA_CLIENT_MAX; client_num++) {
+		if (!IPA_CLIENT_IS_APPS_CONS(client_num))
+			continue;
+
+		ipa_ep_idx = ipa_get_ep_mapping(client_num);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			continue;
+		}
+		ep = &ipa_ctx->ep[ipa_ep_idx];
+		if (!ep->valid) {
+			IPAERR("EP (%d) not allocated.\n", ipa_ep_idx);
+			continue;
+		}
+		ipa_sps_irq_control(ep->sys, enable);
+	}
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify:	SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa_sps_irq_rx_notify(struct sps_event_notify *notify)
+{
+	struct ipa_sys_context *sys = (struct ipa_sys_context *)notify->user;
+	int ret;
+
+	IPADBG("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+			atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
+
+		if (atomic_read(&sys->curr_polling_state)) {
+			sys->ep->eot_in_poll_err++;
+			break;
+		}
+
+		ret = sps_get_config(sys->ep->ep_hdl,
+					&sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_get_config() failed %d\n", ret);
+			break;
+		}
+		sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+			  SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		ret = sps_set_config(sys->ep->ep_hdl,
+					&sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_set_config() failed %d\n", ret);
+			break;
+		}
+		ipa_inc_acquire_wakelock(sys->ep->wakelock_client);
+		atomic_set(&sys->curr_polling_state, 1);
+		trace_intr_to_poll(sys->ep->client);
+		queue_work(sys->wq, &sys->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->event_id);
+	}
+}
+
+static void switch_to_intr_tx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
+	ipa_handle_tx(sys);
+}
+
+/**
+ * ipa_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+static void ipa_handle_rx(struct ipa_sys_context *sys)
+{
+	int inactive_cycles = 0;
+	int cnt;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	do {
+		cnt = ipa_handle_rx_core(sys, true, true);
+		if (cnt == 0) {
+			inactive_cycles++;
+			trace_idle_sleep_enter(sys->ep->client);
+			usleep_range(ipa_ctx->ipa_rx_min_timeout_usec,
+					ipa_ctx->ipa_rx_max_timeout_usec);
+			trace_idle_sleep_exit(sys->ep->client);
+		} else {
+			inactive_cycles = 0;
+		}
+
+		/* if pipe is out of buffers there is no point polling for
+		 * completed descs; release the worker so delayed work can
+		 * run in a timely manner
+		 */
+		if (sys->len == 0)
+			break;
+
+	} while (inactive_cycles <= ipa_ctx->ipa_polling_iteration);
+
+	trace_poll_to_intr(sys->ep->client);
+	ipa_rx_switch_to_intr_mode(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * ipa2_rx_poll() - Poll the rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa2_rx_poll(u32 clnt_hdl, int weight)
+{
+	struct ipa_ep_context *ep;
+	int ret;
+	int cnt = 0;
+	unsigned int delay = 1;
+	struct sps_iovec iov;
+
+	IPADBG("\n");
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+		ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm 0x%x\n", clnt_hdl);
+		return cnt;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+	while (cnt < weight &&
+		   atomic_read(&ep->sys->curr_polling_state)) {
+
+		ret = ipa_poll_pkt(ep->sys, &iov);
+		if (ret)
+			break;
+
+		ipa_wq_rx_common(ep->sys, iov.size);
+		cnt += 5;
+	};
+
+	if (cnt == 0) {
+		ep->inactive_cycles++;
+		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
+
+		if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
+			ep->switch_to_intr = true;
+			delay = 0;
+		}
+		queue_delayed_work(ep->sys->wq,
+			&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
+	} else
+		ep->inactive_cycles = 0;
+
+	return cnt;
+}
+
+static void switch_to_intr_rx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa_sys_context, switch_to_intr_work);
+
+	if (sys->ep->napi_enabled) {
+		if (sys->ep->switch_to_intr) {
+			ipa_rx_switch_to_intr_mode(sys);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+			sys->ep->switch_to_intr = false;
+			sys->ep->inactive_cycles = 0;
+		} else
+			sys->ep->client_notify(sys->ep->priv,
+				IPA_CLIENT_START_POLL, 0);
+	} else
+		ipa_handle_rx(sys);
+}
+
+/**
+ * ipa_update_repl_threshold()- Update the repl_threshold for the client.
+ *
+ * Return value: None.
+ */
+void ipa_update_repl_threshold(enum ipa_client_type ipa_client)
+{
+	int ep_idx;
+	struct ipa_ep_context *ep;
+
+	/* Check if ep is valid. */
+	ep_idx = ipa2_get_ep_mapping(ipa_client);
+	if (ep_idx == -1) {
+		IPADBG("Invalid IPA client\n");
+		return;
+	}
+
+	ep = &ipa_ctx->ep[ep_idx];
+	if (!ep->valid) {
+		IPADBG("EP not valid/Not applicable for client.\n");
+		return;
+	}
+	/*
+	 * Determine how many buffers/descriptors remaining will
+	 * cause to drop below the yellow WM bar.
+	 */
+	if (ep->sys->rx_buff_sz)
+		ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
+						/ ep->sys->rx_buff_sz;
+	else
+		ep->rx_replenish_threshold = 0;
+}
+
+/**
+ * ipa2_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in:	[in] input needed to setup BAM pipe and configure EP
+ * @clnt_hdl:	[out] client handle
+ *
+ *  - configure the end-point registers with the supplied
+ *    parameters from the user.
+ *  - call SPS APIs to create a system-to-bam connection with IPA.
+ *  - allocate descriptor FIFO
+ *  - register callback function(ipa_sps_irq_rx_notify or
+ *    ipa_sps_irq_tx_notify - depends on client type) in case the driver is
+ *    not configured to pulling mode
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+	struct ipa_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+	dma_addr_t dma_addr;
+	char buff[IPA_RESOURCE_NAME_MAX];
+	struct iommu_domain *smmu_domain;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (sys_in == NULL || clnt_hdl == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+
+	if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+		IPAERR("bad parm client:%d fifo_sz:%d\n",
+			sys_in->client, sys_in->desc_fifo_sz);
+		goto fail_gen;
+	}
+
+	ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		goto fail_gen;
+	}
+
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+
+	IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+	if (ep->valid == 1) {
+		if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+			IPAERR("EP already allocated.\n");
+			goto fail_and_disable_clocks;
+		} else {
+			if (ipa2_cfg_ep_hdr(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.hdr)) {
+				IPAERR("fail to configure hdr prop of EP.\n");
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			if (ipa2_cfg_ep_cfg(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.cfg)) {
+				IPAERR("fail to configure cfg prop of EP.\n");
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			IPADBG("client %d (ep: %d) overlay ok sys=%p\n",
+					sys_in->client, ipa_ep_idx, ep->sys);
+			ep->client_notify = sys_in->notify;
+			ep->priv = sys_in->priv;
+			*clnt_hdl = ipa_ep_idx;
+			if (!ep->keep_ipa_awake)
+				IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+			return 0;
+		}
+	}
+
+	memset(ep, 0, offsetof(struct ipa_ep_context, sys));
+
+	if (!ep->sys) {
+		ep->sys = kzalloc(sizeof(struct ipa_sys_context), GFP_KERNEL);
+		if (!ep->sys) {
+			IPAERR("failed to sys ctx for client %d\n",
+					sys_in->client);
+			result = -ENOMEM;
+			goto fail_and_disable_clocks;
+		}
+
+		ep->sys->ep = ep;
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
+				sys_in->client);
+		ep->sys->wq = alloc_workqueue(buff,
+				WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+		if (!ep->sys->wq) {
+			IPAERR("failed to create wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq;
+		}
+
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
+				sys_in->client);
+		ep->sys->repl_wq = alloc_workqueue(buff,
+				WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+		if (!ep->sys->repl_wq) {
+			IPAERR("failed to create rep wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq2;
+		}
+
+		INIT_LIST_HEAD(&ep->sys->head_desc_list);
+		INIT_LIST_HEAD(&ep->sys->rcycl_list);
+		spin_lock_init(&ep->sys->spinlock);
+	} else {
+		memset(ep->sys, 0, offsetof(struct ipa_sys_context, ep));
+	}
+
+	ep->skip_ep_cfg = sys_in->skip_ep_cfg;
+	if (ipa_assign_policy(sys_in, ep->sys)) {
+		IPAERR("failed to sys ctx for client %d\n", sys_in->client);
+		result = -ENOMEM;
+		goto fail_gen2;
+	}
+
+	ep->valid = 1;
+	ep->client = sys_in->client;
+	ep->client_notify = sys_in->notify;
+	ep->napi_enabled = sys_in->napi_enabled;
+	ep->priv = sys_in->priv;
+	ep->keep_ipa_awake = sys_in->keep_ipa_awake;
+	atomic_set(&ep->avail_fifo_desc,
+		((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
+
+	if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
+	    ep->sys->status_stat == NULL) {
+		ep->sys->status_stat =
+			kzalloc(sizeof(struct ipa_status_stats), GFP_KERNEL);
+		if (!ep->sys->status_stat) {
+			IPAERR("no memory\n");
+			goto fail_gen2;
+		}
+	}
+
+	result = ipa_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+				ipa_ep_idx);
+		goto fail_gen2;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_gen2;
+		}
+		if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_gen2;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("skipping ep configuration\n");
+	}
+
+	/* Default Config */
+	ep->ep_hdl = sps_alloc_endpoint();
+	if (ep->ep_hdl == NULL) {
+		IPAERR("SPS EP allocation failed.\n");
+		goto fail_gen2;
+	}
+
+	result = sps_get_config(ep->ep_hdl, &ep->connect);
+	if (result) {
+		IPAERR("fail to get config.\n");
+		goto fail_sps_cfg;
+	}
+
+	/* Specific Config */
+	if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+		ep->connect.mode = SPS_MODE_SRC;
+		ep->connect.destination = SPS_DEV_HANDLE_MEM;
+		ep->connect.source = ipa_ctx->bam_handle;
+		ep->connect.dest_pipe_index = ipa_ctx->a5_pipe_index++;
+		ep->connect.src_pipe_index = ipa_ep_idx;
+		/*
+		 * Determine how many buffers/descriptors remaining will
+		 * cause to drop below the yellow WM bar.
+		 */
+		if (ep->sys->rx_buff_sz)
+			ep->rx_replenish_threshold =
+			   ipa_get_sys_yellow_wm(ep->sys) / ep->sys->rx_buff_sz;
+		else
+			ep->rx_replenish_threshold = 0;
+		/* Only when the WAN pipes are setup, actual threshold will
+		 * be read from the register. So update LAN_CONS ep again with
+		 * right value.
+		 */
+		if (sys_in->client == IPA_CLIENT_APPS_WAN_CONS)
+			ipa_update_repl_threshold(IPA_CLIENT_APPS_LAN_CONS);
+	} else {
+		ep->connect.mode = SPS_MODE_DEST;
+		ep->connect.source = SPS_DEV_HANDLE_MEM;
+		ep->connect.destination = ipa_ctx->bam_handle;
+		ep->connect.src_pipe_index = ipa_ctx->a5_pipe_index++;
+		ep->connect.dest_pipe_index = ipa_ep_idx;
+	}
+
+	IPADBG("client:%d ep:%d",
+		sys_in->client, ipa_ep_idx);
+
+	IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
+		ep->connect.dest_pipe_index,
+		ep->connect.src_pipe_index);
+
+	ep->connect.options = ep->sys->sps_option;
+	ep->connect.desc.size = sys_in->desc_fifo_sz;
+	ep->connect.desc.base = dma_alloc_coherent(ipa_ctx->pdev,
+			ep->connect.desc.size, &dma_addr, GFP_KERNEL);
+	if (ipa_ctx->smmu_s1_bypass) {
+		ep->connect.desc.phys_base = dma_addr;
+	} else {
+		ep->connect.desc.iova = dma_addr;
+		smmu_domain = ipa2_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			ep->connect.desc.phys_base =
+				iommu_iova_to_phys(smmu_domain, dma_addr);
+		}
+	}
+	if (ep->connect.desc.base == NULL) {
+		IPAERR("fail to get DMA desc memory.\n");
+		goto fail_sps_cfg;
+	}
+
+	ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+	result = ipa_sps_connect_safe(ep->ep_hdl, &ep->connect, sys_in->client);
+	if (result) {
+		IPAERR("sps_connect fails.\n");
+		goto fail_sps_connect;
+	}
+
+	ep->sys->event.options = SPS_O_EOT;
+	ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
+	ep->sys->event.xfer_done = NULL;
+	ep->sys->event.user = ep->sys;
+	ep->sys->event.callback = ep->sys->sps_callback;
+	result = sps_register_event(ep->ep_hdl, &ep->sys->event);
+	if (result < 0) {
+		IPAERR("register event error %d\n", result);
+		goto fail_register_event;
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	if (ep->sys->repl_hdlr == ipa_fast_replenish_rx_cache) {
+		ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
+		ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
+				sizeof(void *), GFP_KERNEL);
+		if (!ep->sys->repl.cache) {
+			IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
+			ep->sys->repl_hdlr = ipa_replenish_rx_cache;
+			ep->sys->repl.capacity = 0;
+		} else {
+			atomic_set(&ep->sys->repl.head_idx, 0);
+			atomic_set(&ep->sys->repl.tail_idx, 0);
+			ipa_wq_repl_rx(&ep->sys->repl_work);
+		}
+	}
+
+	if (IPA_CLIENT_IS_CONS(sys_in->client))
+		ipa_replenish_rx_cache(ep->sys);
+
+	if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
+		ipa_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
+		atomic_inc(&ipa_ctx->wc_memb.active_clnt_cnt);
+	}
+
+	ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
+		if (ipa_ctx->modem_cfg_emb_pipe_flt &&
+			sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+			IPADBG("modem cfg emb pipe flt\n");
+		else
+			ipa_install_dflt_flt_rules(ipa_ep_idx);
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+	IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+			ipa_ep_idx, ep->sys);
+
+	return 0;
+
+fail_register_event:
+	sps_disconnect(ep->ep_hdl);
+fail_sps_connect:
+	dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
+			  ep->connect.desc.base,
+			  ep->connect.desc.phys_base);
+fail_sps_cfg:
+	sps_free_endpoint(ep->ep_hdl);
+fail_gen2:
+	destroy_workqueue(ep->sys->repl_wq);
+fail_wq2:
+	destroy_workqueue(ep->sys->wq);
+fail_wq:
+	kfree(ep->sys);
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+fail_and_disable_clocks:
+	IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+	return result;
+}
+
+/**
+ * ipa2_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa2_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_teardown_sys_pipe(u32 clnt_hdl)
+{
+	struct ipa_ep_context *ep;
+	int empty;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_disable_data_path(clnt_hdl);
+	if (ep->napi_enabled) {
+		ep->switch_to_intr = true;
+		do {
+			usleep_range(95, 105);
+		} while (atomic_read(&ep->sys->curr_polling_state));
+	}
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		do {
+			spin_lock_bh(&ep->sys->spinlock);
+			empty = list_empty(&ep->sys->head_desc_list);
+			spin_unlock_bh(&ep->sys->spinlock);
+			if (!empty)
+				usleep_range(95, 105);
+			else
+				break;
+		} while (1);
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+		cancel_delayed_work_sync(&ep->sys->switch_to_intr_work);
+	}
+
+	flush_workqueue(ep->sys->wq);
+	sps_disconnect(ep->ep_hdl);
+	dma_free_coherent(ipa_ctx->pdev, ep->connect.desc.size,
+			  ep->connect.desc.base,
+			  ep->connect.desc.phys_base);
+	sps_free_endpoint(ep->ep_hdl);
+	if (ep->sys->repl_wq)
+		flush_workqueue(ep->sys->repl_wq);
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		ipa_cleanup_rx(ep->sys);
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
+		if (ipa_ctx->modem_cfg_emb_pipe_flt &&
+			ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+			IPADBG("modem cfg emb pipe flt\n");
+		else
+			ipa_delete_dflt_flt_rules(clnt_hdl);
+	}
+
+	if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
+		atomic_dec(&ipa_ctx->wc_memb.active_clnt_cnt);
+
+	memset(&ep->wstats, 0, sizeof(struct ipa_wlan_stats));
+
+	if (!atomic_read(&ipa_ctx->wc_memb.active_clnt_cnt))
+		ipa_cleanup_wlan_rx_common_cache();
+
+	ep->valid = 0;
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa_tx_comp_usr_notify_release() - Callback function which will call the
+ * user supplied callback function to release the skb, or release it on
+ * its own if no callback function was supplied.
+ * @user1
+ * @user2
+ *
+ * This notified callback is for the destination client.
+ * This function is supplied in ipa_connect.
+ */
+static void ipa_tx_comp_usr_notify_release(void *user1, int user2)
+{
+	struct sk_buff *skb = (struct sk_buff *)user1;
+	int ep_idx = user2;
+
+	IPADBG("skb=%p ep=%d\n", skb, ep_idx);
+
+	IPA_STATS_INC_CNT(ipa_ctx->stats.tx_pkts_compl);
+
+	if (ipa_ctx->ep[ep_idx].client_notify)
+		ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)skb);
+	else
+		dev_kfree_skb_any(skb);
+}
+
+static void ipa_tx_cmd_comp(void *user1, int user2)
+{
+	kfree(user1);
+}
+
+/**
+ * ipa2_tx_dp() - Data-path tx handler
+ * @dst:	[in] which IPA destination to route tx packets to
+ * @skb:	[in] the packet to send
+ * @metadata:	[in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *meta)
+{
+	struct ipa_desc *desc;
+	struct ipa_desc _desc[2];
+	int dst_ep_idx;
+	struct ipa_ip_packet_init *cmd;
+	struct ipa_sys_context *sys;
+	int src_ep_idx;
+	int num_frags, f;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (skb->len == 0) {
+		IPAERR("packet size is 0\n");
+		return -EINVAL;
+	}
+
+	num_frags = skb_shinfo(skb)->nr_frags;
+	if (num_frags) {
+		/* 1 desc is needed for the linear portion of skb;
+		 * 1 desc may be needed for the PACKET_INIT;
+		 * 1 desc for each frag
+		 */
+		desc = kzalloc(sizeof(*desc) * (num_frags + 2), GFP_ATOMIC);
+		if (!desc) {
+			IPAERR("failed to alloc desc array\n");
+			goto fail_mem;
+		}
+	} else {
+		memset(_desc, 0, 2 * sizeof(struct ipa_desc));
+		desc = &_desc[0];
+	}
+
+	/*
+	 * USB_CONS: PKT_INIT ep_idx = dst pipe
+	 * Q6_CONS: PKT_INIT ep_idx = sender pipe
+	 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
+	 *
+	 * LAN TX: all PKT_INIT
+	 * WAN TX: PKT_INIT (cmd) + HW (data)
+	 *
+	 */
+	if (IPA_CLIENT_IS_CONS(dst)) {
+		src_ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+		if (-1 == src_ep_idx) {
+			IPAERR("Client %u is not mapped\n",
+				IPA_CLIENT_APPS_LAN_WAN_PROD);
+			goto fail_gen;
+		}
+		dst_ep_idx = ipa2_get_ep_mapping(dst);
+	} else {
+		src_ep_idx = ipa2_get_ep_mapping(dst);
+		if (-1 == src_ep_idx) {
+			IPAERR("Client %u is not mapped\n", dst);
+			goto fail_gen;
+		}
+		if (meta && meta->pkt_init_dst_ep_valid)
+			dst_ep_idx = meta->pkt_init_dst_ep;
+		else
+			dst_ep_idx = -1;
+	}
+
+	sys = ipa_ctx->ep[src_ep_idx].sys;
+
+	if (!sys->ep->valid) {
+		IPAERR("pipe not valid\n");
+		goto fail_gen;
+	}
+
+	if (dst_ep_idx != -1) {
+		/* SW data path */
+		cmd = kzalloc(sizeof(struct ipa_ip_packet_init), GFP_ATOMIC);
+		if (!cmd) {
+			IPAERR("failed to alloc immediate command object\n");
+			goto fail_gen;
+		}
+
+		cmd->destination_pipe_index = dst_ep_idx;
+		desc[0].opcode = IPA_IP_PACKET_INIT;
+		desc[0].pyld = cmd;
+		desc[0].len = sizeof(struct ipa_ip_packet_init);
+		desc[0].type = IPA_IMM_CMD_DESC;
+		desc[0].callback = ipa_tx_cmd_comp;
+		desc[0].user1 = cmd;
+		desc[1].pyld = skb->data;
+		desc[1].len = skb_headlen(skb);
+		desc[1].type = IPA_DATA_DESC_SKB;
+		desc[1].callback = ipa_tx_comp_usr_notify_release;
+		desc[1].user1 = skb;
+		desc[1].user2 = (meta && meta->pkt_init_dst_ep_valid &&
+				meta->pkt_init_dst_ep_remote) ?
+				src_ep_idx :
+				dst_ep_idx;
+		if (meta && meta->dma_address_valid) {
+			desc[1].dma_address_valid = true;
+			desc[1].dma_address = meta->dma_address;
+		}
+
+		for (f = 0; f < num_frags; f++) {
+			desc[2+f].frag = &skb_shinfo(skb)->frags[f];
+			desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
+			desc[2+f].len = skb_frag_size(desc[2+f].frag);
+		}
+
+		/* don't free skb till frag mappings are released */
+		if (num_frags) {
+			desc[2+f-1].callback = desc[1].callback;
+			desc[2+f-1].user1 = desc[1].user1;
+			desc[2+f-1].user2 = desc[1].user2;
+			desc[1].callback = NULL;
+		}
+
+		if (ipa_send(sys, num_frags + 2, desc, true)) {
+			IPAERR("fail to send skb %p num_frags %u SWP\n",
+					skb, num_frags);
+			goto fail_send;
+		}
+		IPA_STATS_INC_CNT(ipa_ctx->stats.tx_sw_pkts);
+	} else {
+		/* HW data path */
+		desc[0].pyld = skb->data;
+		desc[0].len = skb_headlen(skb);
+		desc[0].type = IPA_DATA_DESC_SKB;
+		desc[0].callback = ipa_tx_comp_usr_notify_release;
+		desc[0].user1 = skb;
+		desc[0].user2 = src_ep_idx;
+
+		if (meta && meta->dma_address_valid) {
+			desc[0].dma_address_valid = true;
+			desc[0].dma_address = meta->dma_address;
+		}
+
+		if (num_frags == 0) {
+			if (ipa_send_one(sys, desc, true)) {
+				IPAERR("fail to send skb %p HWP\n", skb);
+				goto fail_gen;
+			}
+		} else {
+			for (f = 0; f < num_frags; f++) {
+				desc[1+f].frag = &skb_shinfo(skb)->frags[f];
+				desc[1+f].type = IPA_DATA_DESC_SKB_PAGED;
+				desc[1+f].len = skb_frag_size(desc[1+f].frag);
+			}
+
+			/* don't free skb till frag mappings are released */
+			desc[1+f-1].callback = desc[0].callback;
+			desc[1+f-1].user1 = desc[0].user1;
+			desc[1+f-1].user2 = desc[0].user2;
+			desc[0].callback = NULL;
+
+			if (ipa_send(sys, num_frags + 1, desc, true)) {
+				IPAERR("fail to send skb %p num_frags %u HWP\n",
+						skb, num_frags);
+				goto fail_gen;
+			}
+		}
+
+		IPA_STATS_INC_CNT(ipa_ctx->stats.tx_hw_pkts);
+	}
+
+	if (num_frags) {
+		kfree(desc);
+		IPA_STATS_INC_CNT(ipa_ctx->stats.tx_non_linear);
+	}
+
+	return 0;
+
+fail_send:
+	kfree(cmd);
+fail_gen:
+	if (num_frags)
+		kfree(desc);
+fail_mem:
+	return -EFAULT;
+}
+
+static void ipa_wq_handle_rx(struct work_struct *work)
+{
+	struct ipa_sys_context *sys;
+
+	sys = container_of(work, struct ipa_sys_context, work);
+
+	if (sys->ep->napi_enabled) {
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+		sys->ep->client_notify(sys->ep->priv,
+				IPA_CLIENT_START_POLL, 0);
+	} else
+		ipa_handle_rx(sys);
+}
+
+static void ipa_wq_repl_rx(struct work_struct *work)
+{
+	struct ipa_sys_context *sys;
+	void *ptr;
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	gfp_t flag = GFP_KERNEL | (ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+	u32 next;
+	u32 curr;
+
+	sys = container_of(work, struct ipa_sys_context, repl_work);
+	curr = atomic_read(&sys->repl.tail_idx);
+
+begin:
+	while (1) {
+		next = (curr + 1) % sys->repl.capacity;
+		if (next == atomic_read(&sys->repl.head_idx))
+			goto fail_kmem_cache_alloc;
+
+		rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
+					__func__, sys);
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL) {
+			pr_err_ratelimited("%s fail alloc skb sys=%p\n",
+					__func__, sys);
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+			pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
+			       __func__, (void *)rx_pkt->data.dma_addr,
+			       ptr, sys);
+			goto fail_dma_mapping;
+		}
+
+		sys->repl.cache[curr] = rx_pkt;
+		curr = next;
+		/* ensure write is done before setting tail index */
+		mb();
+		atomic_set(&sys->repl.tail_idx, next);
+	}
+
+	return;
+
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	if (atomic_read(&sys->repl.tail_idx) ==
+			atomic_read(&sys->repl.head_idx)) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+			IPA_STATS_INC_CNT(ipa_ctx->stats.wan_repl_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+			IPA_STATS_INC_CNT(ipa_ctx->stats.lan_repl_rx_empty);
+		else
+			WARN_ON(1);
+		pr_err_ratelimited("%s sys=%p repl ring empty\n",
+				__func__, sys);
+		goto begin;
+	}
+}
+
+static void ipa_replenish_wlan_rx_cache(struct ipa_sys_context *sys)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt = NULL;
+	struct ipa_rx_pkt_wrapper *tmp;
+	int ret;
+	u32 rx_len_cached = 0;
+
+	IPADBG("\n");
+
+	spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+	rx_len_cached = sys->len;
+
+	if (rx_len_cached < sys->rx_pool_sz) {
+		list_for_each_entry_safe(rx_pkt, tmp,
+			&ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
+			list_del(&rx_pkt->link);
+
+			if (ipa_ctx->wc_memb.wlan_comm_free_cnt > 0)
+				ipa_ctx->wc_memb.wlan_comm_free_cnt--;
+
+			INIT_LIST_HEAD(&rx_pkt->link);
+			rx_pkt->len = 0;
+			rx_pkt->sys = sys;
+
+			ret = sps_transfer_one(sys->ep->ep_hdl,
+				rx_pkt->data.dma_addr,
+				IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
+
+			if (ret) {
+				IPAERR("sps_transfer_one failed %d\n", ret);
+				goto fail_sps_transfer;
+			}
+
+			list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+			rx_len_cached = ++sys->len;
+
+			if (rx_len_cached >= sys->rx_pool_sz) {
+				spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+				return;
+			}
+		}
+	}
+	spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+
+	if (rx_len_cached < sys->rx_pool_sz &&
+			ipa_ctx->wc_memb.wlan_comm_total_cnt <
+			 IPA_WLAN_COMM_RX_POOL_HIGH) {
+		ipa_replenish_rx_cache(sys);
+		ipa_ctx->wc_memb.wlan_comm_total_cnt +=
+			(sys->rx_pool_sz - rx_len_cached);
+	}
+
+	return;
+
+fail_sps_transfer:
+	list_del(&rx_pkt->link);
+	spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+}
+
+static void ipa_cleanup_wlan_rx_common_cache(void)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	struct ipa_rx_pkt_wrapper *tmp;
+
+	list_for_each_entry_safe(rx_pkt, tmp,
+		&ipa_ctx->wc_memb.wlan_comm_desc_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+			IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
+		dev_kfree_skb_any(rx_pkt->data.skb);
+		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+		ipa_ctx->wc_memb.wlan_comm_free_cnt--;
+		ipa_ctx->wc_memb.wlan_comm_total_cnt--;
+	}
+	ipa_ctx->wc_memb.total_tx_pkts_freed = 0;
+
+	if (ipa_ctx->wc_memb.wlan_comm_free_cnt != 0)
+		IPAERR("wlan comm buff free cnt: %d\n",
+			ipa_ctx->wc_memb.wlan_comm_free_cnt);
+
+	if (ipa_ctx->wc_memb.wlan_comm_total_cnt != 0)
+		IPAERR("wlan comm buff total cnt: %d\n",
+			ipa_ctx->wc_memb.wlan_comm_total_cnt);
+
+}
+
+static void ipa_alloc_wlan_rx_common_cache(u32 size)
+{
+	void *ptr;
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	int rx_len_cached = 0;
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
+		(ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
+	rx_len_cached = ipa_ctx->wc_memb.wlan_comm_total_cnt;
+	while (rx_len_cached < size) {
+		rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc rx wrapper\n");
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+
+		rx_pkt->data.skb =
+			ipa_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
+						flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+		rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
+				IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+			IPAERR("dma_map_single failure %p for %p\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		list_add_tail(&rx_pkt->link,
+			&ipa_ctx->wc_memb.wlan_comm_desc_list);
+		rx_len_cached = ++ipa_ctx->wc_memb.wlan_comm_total_cnt;
+
+		ipa_ctx->wc_memb.wlan_comm_free_cnt++;
+
+	}
+
+	return;
+
+fail_dma_mapping:
+	dev_kfree_skb_any(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	return;
+}
+
+
+/**
+ * ipa_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ *   - Allocate a buffer in the cache
+ *   - Initialized the packets link
+ *   - Initialize the packets work struct
+ *   - Allocate the packets socket buffer (skb)
+ *   - Fill the packets skb with data
+ *   - Make the packet DMAable
+ *   - Add the packet to the system pipe linked list
+ *   - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+static void ipa_replenish_rx_cache(struct ipa_sys_context *sys)
+{
+	void *ptr;
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
+		(ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
+	rx_len_cached = sys->len;
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		rx_pkt = kmem_cache_zalloc(ipa_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc rx wrapper\n");
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+			IPAERR("dma_map_single failure %p for %p\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		rx_len_cached = ++sys->len;
+
+		ret = sps_transfer_one(sys->ep->ep_hdl,
+			rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
+
+		if (ret) {
+			IPAERR("sps_transfer_one failed %d\n", ret);
+			goto fail_sps_transfer;
+		}
+	}
+
+	return;
+
+fail_sps_transfer:
+	list_del(&rx_pkt->link);
+	rx_len_cached = --sys->len;
+	dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	if (rx_len_cached == 0)
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+}
+
+static void ipa_replenish_rx_cache_recycle(struct ipa_sys_context *sys)
+{
+	void *ptr;
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+
+	rx_len_cached = sys->len;
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		spin_lock_bh(&sys->spinlock);
+		if (list_empty(&sys->rcycl_list))
+			goto fail_kmem_cache_alloc;
+
+		rx_pkt = list_first_entry(&sys->rcycl_list,
+				struct ipa_rx_pkt_wrapper, link);
+		list_del(&rx_pkt->link);
+		spin_unlock_bh(&sys->spinlock);
+		INIT_LIST_HEAD(&rx_pkt->link);
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa_ctx->pdev,
+			ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+			rx_pkt->data.dma_addr == ~0)
+			goto fail_dma_mapping;
+
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		rx_len_cached = ++sys->len;
+
+		ret = sps_transfer_one(sys->ep->ep_hdl,
+			rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
+
+		if (ret) {
+			IPAERR("sps_transfer_one failed %d\n", ret);
+			goto fail_sps_transfer;
+		}
+	}
+
+	return;
+fail_sps_transfer:
+	rx_len_cached = --sys->len;
+	list_del(&rx_pkt->link);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+		sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+	spin_lock_bh(&sys->spinlock);
+	list_add_tail(&rx_pkt->link, &sys->rcycl_list);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_unlock_bh(&sys->spinlock);
+fail_kmem_cache_alloc:
+	spin_unlock_bh(&sys->spinlock);
+	if (rx_len_cached == 0)
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+		msecs_to_jiffies(1));
+}
+
+static void ipa_fast_replenish_rx_cache(struct ipa_sys_context *sys)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	u32 curr;
+
+	rx_len_cached = sys->len;
+	curr = atomic_read(&sys->repl.head_idx);
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		if (curr == atomic_read(&sys->repl.tail_idx)) {
+			queue_work(sys->repl_wq, &sys->repl_work);
+			break;
+		}
+
+		rx_pkt = sys->repl.cache[curr];
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+
+		ret = sps_transfer_one(sys->ep->ep_hdl,
+			rx_pkt->data.dma_addr, sys->rx_buff_sz, rx_pkt, 0);
+
+		if (ret) {
+			IPAERR("sps_transfer_one failed %d\n", ret);
+			list_del(&rx_pkt->link);
+			break;
+		}
+		rx_len_cached = ++sys->len;
+		sys->repl_trig_cnt++;
+		curr = (curr + 1) % sys->repl.capacity;
+		/* ensure write is done before setting head index */
+		mb();
+		atomic_set(&sys->repl.head_idx, curr);
+	}
+
+	if (sys->repl_trig_cnt % sys->repl_trig_thresh == 0)
+		queue_work(sys->repl_wq, &sys->repl_work);
+
+	if (rx_len_cached <= sys->ep->rx_replenish_threshold) {
+		if (rx_len_cached == 0) {
+			if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+				IPA_STATS_INC_CNT(ipa_ctx->stats.wan_rx_empty);
+			else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+				IPA_STATS_INC_CNT(ipa_ctx->stats.lan_rx_empty);
+			else
+				WARN_ON(1);
+		}
+		sys->repl_trig_cnt = 0;
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+			msecs_to_jiffies(1));
+	}
+}
+
+static void replenish_rx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa_sys_context, replenish_rx_work);
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	sys->repl_hdlr(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * ipa_cleanup_rx() - release RX queue resources
+ *
+ */
+static void ipa_cleanup_rx(struct ipa_sys_context *sys)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	struct ipa_rx_pkt_wrapper *r;
+	u32 head;
+	u32 tail;
+
+	list_for_each_entry_safe(rx_pkt, r,
+				 &sys->head_desc_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+		sys->free_skb(rx_pkt->data.skb);
+		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	}
+
+	list_for_each_entry_safe(rx_pkt, r,
+			&sys->rcycl_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+		sys->free_skb(rx_pkt->data.skb);
+		kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	}
+
+	if (sys->repl.cache) {
+		head = atomic_read(&sys->repl.head_idx);
+		tail = atomic_read(&sys->repl.tail_idx);
+		while (head != tail) {
+			rx_pkt = sys->repl.cache[head];
+			dma_unmap_single(ipa_ctx->pdev, rx_pkt->data.dma_addr,
+					sys->rx_buff_sz, DMA_FROM_DEVICE);
+			sys->free_skb(rx_pkt->data.skb);
+			kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt);
+			head = (head + 1) % sys->repl.capacity;
+		}
+		kfree(sys->repl.cache);
+	}
+}
+
+static struct sk_buff *ipa_skb_copy_for_client(struct sk_buff *skb, int len)
+{
+	struct sk_buff *skb2 = NULL;
+
+	skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
+	if (likely(skb2)) {
+		/* Set the data pointer */
+		skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
+		memcpy(skb2->data, skb->data, len);
+		skb2->len = len;
+		skb_set_tail_pointer(skb2, len);
+	}
+
+	return skb2;
+}
+
+static int ipa_lan_rx_pyld_hdlr(struct sk_buff *skb,
+		struct ipa_sys_context *sys)
+{
+	int rc = 0;
+	struct ipa_hw_pkt_status *status;
+	struct sk_buff *skb2;
+	int pad_len_byte;
+	int len;
+	unsigned char *buf;
+	int src_pipe;
+	unsigned int used = *(unsigned int *)skb->cb;
+	unsigned int used_align = ALIGN(used, 32);
+	unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+
+	IPA_DUMP_BUFF(skb->data, 0, skb->len);
+
+	if (skb->len == 0) {
+		IPAERR("ZLT\n");
+		sys->free_skb(skb);
+		return rc;
+	}
+
+	if (sys->len_partial) {
+		IPADBG("len_partial %d\n", sys->len_partial);
+		buf = skb_push(skb, sys->len_partial);
+		memcpy(buf, sys->prev_skb->data, sys->len_partial);
+		sys->len_partial = 0;
+		sys->free_skb(sys->prev_skb);
+		sys->prev_skb = NULL;
+		goto begin;
+	}
+
+	/* this pipe has TX comp (status only) + mux-ed LAN RX data
+	 * (status+data)
+	 */
+	if (sys->len_rem) {
+		IPADBG("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
+				sys->len_pad);
+		if (sys->len_rem <= skb->len) {
+			if (sys->prev_skb) {
+				skb2 = skb_copy_expand(sys->prev_skb, 0,
+						sys->len_rem, GFP_KERNEL);
+				if (likely(skb2)) {
+					memcpy(skb_put(skb2, sys->len_rem),
+						skb->data, sys->len_rem);
+					skb_trim(skb2,
+						skb2->len - sys->len_pad);
+					skb2->truesize = skb2->len +
+						sizeof(struct sk_buff);
+					if (sys->drop_packet)
+						dev_kfree_skb_any(skb2);
+					else
+						sys->ep->client_notify(
+							sys->ep->priv,
+							IPA_RECEIVE,
+							(unsigned long)(skb2));
+				} else {
+					IPAERR("copy expand failed\n");
+				}
+				dev_kfree_skb_any(sys->prev_skb);
+			}
+			skb_pull(skb, sys->len_rem);
+			sys->prev_skb = NULL;
+			sys->len_rem = 0;
+			sys->len_pad = 0;
+		} else {
+			if (sys->prev_skb) {
+				skb2 = skb_copy_expand(sys->prev_skb, 0,
+					skb->len, GFP_KERNEL);
+				if (likely(skb2)) {
+					memcpy(skb_put(skb2, skb->len),
+						skb->data, skb->len);
+				} else {
+					IPAERR("copy expand failed\n");
+				}
+				dev_kfree_skb_any(sys->prev_skb);
+				sys->prev_skb = skb2;
+			}
+			sys->len_rem -= skb->len;
+			sys->free_skb(skb);
+			return rc;
+		}
+	}
+
+begin:
+	while (skb->len) {
+		sys->drop_packet = false;
+		IPADBG("LEN_REM %d\n", skb->len);
+
+		if (skb->len < IPA_PKT_STATUS_SIZE) {
+			WARN_ON(sys->prev_skb != NULL);
+			IPADBG("status straddles buffer\n");
+			sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+			sys->len_partial = skb->len;
+			return rc;
+		}
+
+		status = (struct ipa_hw_pkt_status *)skb->data;
+		IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status->status_opcode, status->endp_src_idx,
+				status->endp_dest_idx, status->pkt_len);
+		if (sys->status_stat) {
+			sys->status_stat->status[sys->status_stat->curr] =
+				*status;
+			sys->status_stat->curr++;
+			if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+				sys->status_stat->curr = 0;
+		}
+
+		if (status->status_opcode !=
+			IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
+			status->status_opcode !=
+			IPA_HW_STATUS_OPCODE_PACKET &&
+			status->status_opcode !=
+			IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET &&
+			status->status_opcode !=
+			IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
+			IPAERR("unsupported opcode(%d)\n",
+				status->status_opcode);
+			skb_pull(skb, IPA_PKT_STATUS_SIZE);
+			continue;
+		}
+		IPA_STATS_EXCP_CNT(status->exception,
+				ipa_ctx->stats.rx_excp_pkts);
+		if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
+			status->endp_src_idx >= ipa_ctx->ipa_num_pipes) {
+			IPAERR("status fields invalid\n");
+			IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status->status_opcode, status->endp_src_idx,
+				status->endp_dest_idx, status->pkt_len);
+			WARN_ON(1);
+			BUG();
+		}
+		if (status->status_mask & IPA_HW_PKT_STATUS_MASK_TAG_VALID) {
+			struct ipa_tag_completion *comp;
+
+			IPADBG("TAG packet arrived\n");
+			if (status->tag_f_2 == IPA_COOKIE) {
+				skb_pull(skb, IPA_PKT_STATUS_SIZE);
+				if (skb->len < sizeof(comp)) {
+					IPAERR("TAG arrived without packet\n");
+					return rc;
+				}
+				memcpy(&comp, skb->data, sizeof(comp));
+				skb_pull(skb, sizeof(comp) +
+						IPA_SIZE_DL_CSUM_META_TRAILER);
+				complete(&comp->comp);
+				if (atomic_dec_return(&comp->cnt) == 0)
+					kfree(comp);
+				continue;
+			} else {
+				IPADBG("ignoring TAG with wrong cookie\n");
+			}
+		}
+		if (status->pkt_len == 0) {
+			IPADBG("Skip aggr close status\n");
+			skb_pull(skb, IPA_PKT_STATUS_SIZE);
+			IPA_STATS_INC_CNT(ipa_ctx->stats.aggr_close);
+			IPA_STATS_DEC_CNT(
+				ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
+			continue;
+		}
+		if (status->endp_dest_idx == (sys->ep - ipa_ctx->ep)) {
+			/* RX data */
+			src_pipe = status->endp_src_idx;
+
+			/*
+			 * A packet which is received back to the AP after
+			 * there was no route match.
+			 */
+			if (!status->exception && !status->route_match)
+				sys->drop_packet = true;
+
+			if (skb->len == IPA_PKT_STATUS_SIZE &&
+					!status->exception) {
+				WARN_ON(sys->prev_skb != NULL);
+				IPADBG("Ins header in next buffer\n");
+				sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+				sys->len_partial =	 skb->len;
+				return rc;
+			}
+
+			pad_len_byte = ((status->pkt_len + 3) & ~3) -
+					status->pkt_len;
+
+			len = status->pkt_len + pad_len_byte +
+				IPA_SIZE_DL_CSUM_META_TRAILER;
+			IPADBG("pad %d pkt_len %d len %d\n", pad_len_byte,
+					status->pkt_len, len);
+
+			if (status->exception ==
+					IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR) {
+				IPADBG("Dropping packet on DeAggr Exception\n");
+				sys->drop_packet = true;
+			}
+
+			skb2 = ipa_skb_copy_for_client(skb,
+				status->pkt_len + IPA_PKT_STATUS_SIZE);
+			if (likely(skb2)) {
+				if (skb->len < len + IPA_PKT_STATUS_SIZE) {
+					IPADBG("SPL skb len %d len %d\n",
+							skb->len, len);
+					sys->prev_skb = skb2;
+					sys->len_rem = len - skb->len +
+						IPA_PKT_STATUS_SIZE;
+					sys->len_pad = pad_len_byte;
+					skb_pull(skb, skb->len);
+				} else {
+					skb_trim(skb2, status->pkt_len +
+							IPA_PKT_STATUS_SIZE);
+					IPADBG("rx avail for %d\n",
+							status->endp_dest_idx);
+					if (sys->drop_packet) {
+						dev_kfree_skb_any(skb2);
+					} else if (status->pkt_len >
+						   IPA_GENERIC_AGGR_BYTE_LIMIT *
+						   1024) {
+						IPAERR("packet size invalid\n");
+						IPAERR("STATUS opcode=%d\n",
+							status->status_opcode);
+						IPAERR("src=%d dst=%d len=%d\n",
+							status->endp_src_idx,
+							status->endp_dest_idx,
+							status->pkt_len);
+						BUG();
+					} else {
+					skb2->truesize = skb2->len +
+						sizeof(struct sk_buff) +
+						(ALIGN(len +
+						IPA_PKT_STATUS_SIZE, 32) *
+						unused / used_align);
+						sys->ep->client_notify(
+							sys->ep->priv,
+							IPA_RECEIVE,
+							(unsigned long)(skb2));
+					}
+					skb_pull(skb, len +
+						IPA_PKT_STATUS_SIZE);
+				}
+			} else {
+				IPAERR("fail to alloc skb\n");
+				if (skb->len < len) {
+					sys->prev_skb = NULL;
+					sys->len_rem = len - skb->len +
+						IPA_PKT_STATUS_SIZE;
+					sys->len_pad = pad_len_byte;
+					skb_pull(skb, skb->len);
+				} else {
+					skb_pull(skb, len +
+						IPA_PKT_STATUS_SIZE);
+				}
+			}
+			/* TX comp */
+			ipa_wq_write_done_status(src_pipe);
+			IPADBG("tx comp imp for %d\n", src_pipe);
+		} else {
+			/* TX comp */
+			ipa_wq_write_done_status(status->endp_src_idx);
+			IPADBG("tx comp exp for %d\n", status->endp_src_idx);
+			skb_pull(skb, IPA_PKT_STATUS_SIZE);
+			IPA_STATS_INC_CNT(ipa_ctx->stats.stat_compl);
+			IPA_STATS_DEC_CNT(
+				ipa_ctx->stats.rx_excp_pkts[MAX_NUM_EXCP - 1]);
+		}
+	};
+
+	sys->free_skb(skb);
+	return rc;
+}
+
+static struct sk_buff *join_prev_skb(struct sk_buff *prev_skb,
+		struct sk_buff *skb, unsigned int len)
+{
+	struct sk_buff *skb2;
+
+	skb2 = skb_copy_expand(prev_skb, 0,
+			len, GFP_KERNEL);
+	if (likely(skb2)) {
+		memcpy(skb_put(skb2, len),
+			skb->data, len);
+	} else {
+		IPAERR("copy expand failed\n");
+		skb2 = NULL;
+	}
+	dev_kfree_skb_any(prev_skb);
+
+	return skb2;
+}
+
+static void wan_rx_handle_splt_pyld(struct sk_buff *skb,
+		struct ipa_sys_context *sys)
+{
+	struct sk_buff *skb2;
+
+	IPADBG("rem %d skb %d\n", sys->len_rem, skb->len);
+	if (sys->len_rem <= skb->len) {
+		if (sys->prev_skb) {
+			skb2 = join_prev_skb(sys->prev_skb, skb,
+					sys->len_rem);
+			if (likely(skb2)) {
+				IPADBG(
+					"removing Status element from skb and sending to WAN client");
+				skb_pull(skb2, IPA_PKT_STATUS_SIZE);
+				skb2->truesize = skb2->len +
+					sizeof(struct sk_buff);
+				sys->ep->client_notify(sys->ep->priv,
+					IPA_RECEIVE,
+					(unsigned long)(skb2));
+			}
+		}
+		skb_pull(skb, sys->len_rem);
+		sys->prev_skb = NULL;
+		sys->len_rem = 0;
+	} else {
+		if (sys->prev_skb) {
+			skb2 = join_prev_skb(sys->prev_skb, skb,
+					skb->len);
+			sys->prev_skb = skb2;
+		}
+		sys->len_rem -= skb->len;
+		skb_pull(skb, skb->len);
+	}
+}
+
+static int ipa_wan_rx_pyld_hdlr(struct sk_buff *skb,
+		struct ipa_sys_context *sys)
+{
+	int rc = 0;
+	struct ipa_hw_pkt_status *status;
+	struct sk_buff *skb2;
+	u16 pkt_len_with_pad;
+	u32 qmap_hdr;
+	int checksum_trailer_exists;
+	int frame_len;
+	int ep_idx;
+	unsigned int used = *(unsigned int *)skb->cb;
+	unsigned int used_align = ALIGN(used, 32);
+	unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+
+	IPA_DUMP_BUFF(skb->data, 0, skb->len);
+	if (skb->len == 0) {
+		IPAERR("ZLT\n");
+		goto bail;
+	}
+
+	if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
+		sys->ep->client_notify(sys->ep->priv,
+					IPA_RECEIVE, (unsigned long)(skb));
+		return rc;
+	}
+	if (sys->repl_hdlr == ipa_replenish_rx_cache_recycle) {
+		IPAERR("Recycle should enable only with GRO Aggr\n");
+		ipa_assert();
+	}
+	/*
+	 * payload splits across 2 buff or more,
+	 * take the start of the payload from prev_skb
+	 */
+	if (sys->len_rem)
+		wan_rx_handle_splt_pyld(skb, sys);
+
+
+	while (skb->len) {
+		IPADBG("LEN_REM %d\n", skb->len);
+		if (skb->len < IPA_PKT_STATUS_SIZE) {
+			IPAERR("status straddles buffer\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		status = (struct ipa_hw_pkt_status *)skb->data;
+		IPADBG("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status->status_opcode, status->endp_src_idx,
+				status->endp_dest_idx, status->pkt_len);
+
+		if (sys->status_stat) {
+			sys->status_stat->status[sys->status_stat->curr] =
+				*status;
+			sys->status_stat->curr++;
+			if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+				sys->status_stat->curr = 0;
+		}
+
+		if (status->status_opcode !=
+			IPA_HW_STATUS_OPCODE_DROPPED_PACKET &&
+			status->status_opcode !=
+			IPA_HW_STATUS_OPCODE_PACKET &&
+			status->status_opcode !=
+			IPA_HW_STATUS_OPCODE_XLAT_PACKET) {
+			IPAERR("unsupported opcode\n");
+			skb_pull(skb, IPA_PKT_STATUS_SIZE);
+			continue;
+		}
+		IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
+		if (status->endp_dest_idx >= ipa_ctx->ipa_num_pipes ||
+			status->endp_src_idx >= ipa_ctx->ipa_num_pipes ||
+			status->pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
+			IPAERR("status fields invalid\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		if (status->pkt_len == 0) {
+			IPADBG("Skip aggr close status\n");
+			skb_pull(skb, IPA_PKT_STATUS_SIZE);
+			IPA_STATS_DEC_CNT(ipa_ctx->stats.rx_pkts);
+			IPA_STATS_INC_CNT(ipa_ctx->stats.wan_aggr_close);
+			continue;
+		}
+		ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+		if (status->endp_dest_idx != ep_idx) {
+			IPAERR("expected endp_dest_idx %d received %d\n",
+					ep_idx, status->endp_dest_idx);
+			WARN_ON(1);
+			goto bail;
+		}
+		/* RX data */
+		if (skb->len == IPA_PKT_STATUS_SIZE) {
+			IPAERR("Ins header in next buffer\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		qmap_hdr = *(u32 *)(status+1);
+		/*
+		 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
+		 * header
+		 */
+
+		/*QMAP is BE: convert the pkt_len field from BE to LE*/
+		pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
+		IPADBG("pkt_len with pad %d\n", pkt_len_with_pad);
+		/*get the CHECKSUM_PROCESS bit*/
+		checksum_trailer_exists = status->status_mask &
+				IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS;
+		IPADBG("checksum_trailer_exists %d\n",
+				checksum_trailer_exists);
+
+		frame_len = IPA_PKT_STATUS_SIZE +
+			    IPA_QMAP_HEADER_LENGTH +
+			    pkt_len_with_pad;
+		if (checksum_trailer_exists)
+			frame_len += IPA_DL_CHECKSUM_LENGTH;
+		IPADBG("frame_len %d\n", frame_len);
+
+		skb2 = skb_clone(skb, GFP_KERNEL);
+		if (likely(skb2)) {
+			/*
+			 * the len of actual data is smaller than expected
+			 * payload split across 2 buff
+			 */
+			if (skb->len < frame_len) {
+				IPADBG("SPL skb len %d len %d\n",
+						skb->len, frame_len);
+				sys->prev_skb = skb2;
+				sys->len_rem = frame_len - skb->len;
+				skb_pull(skb, skb->len);
+			} else {
+				skb_trim(skb2, frame_len);
+				IPADBG("rx avail for %d\n",
+						status->endp_dest_idx);
+				IPADBG(
+					"removing Status element from skb and sending to WAN client");
+				skb_pull(skb2, IPA_PKT_STATUS_SIZE);
+				skb2->truesize = skb2->len +
+					sizeof(struct sk_buff) +
+					(ALIGN(frame_len, 32) *
+					 unused / used_align);
+				sys->ep->client_notify(sys->ep->priv,
+					IPA_RECEIVE, (unsigned long)(skb2));
+				skb_pull(skb, frame_len);
+			}
+		} else {
+			IPAERR("fail to clone\n");
+			if (skb->len < frame_len) {
+				sys->prev_skb = NULL;
+				sys->len_rem = frame_len - skb->len;
+				skb_pull(skb, skb->len);
+			} else {
+				skb_pull(skb, frame_len);
+			}
+		}
+	};
+bail:
+	sys->free_skb(skb);
+	return rc;
+}
+
+static int ipa_rx_pyld_hdlr(struct sk_buff *rx_skb, struct ipa_sys_context *sys)
+{
+	struct ipa_a5_mux_hdr *mux_hdr;
+	unsigned int pull_len;
+	unsigned int padding;
+	struct ipa_ep_context *ep;
+	unsigned int src_pipe;
+
+	mux_hdr = (struct ipa_a5_mux_hdr *)rx_skb->data;
+
+	src_pipe = mux_hdr->src_pipe_index;
+
+	IPADBG("RX pkt len=%d IID=0x%x src=%d, flags=0x%x, meta=0x%x\n",
+		rx_skb->len, ntohs(mux_hdr->interface_id),
+		src_pipe, mux_hdr->flags, ntohl(mux_hdr->metadata));
+
+	IPA_DUMP_BUFF(rx_skb->data, 0, rx_skb->len);
+
+	IPA_STATS_INC_CNT(ipa_ctx->stats.rx_pkts);
+	IPA_STATS_EXCP_CNT(mux_hdr->flags, ipa_ctx->stats.rx_excp_pkts);
+
+	/*
+	 * Any packets arriving over AMPDU_TX should be dispatched
+	 * to the regular WLAN RX data-path.
+	 */
+	if (unlikely(src_pipe == WLAN_AMPDU_TX_EP))
+		src_pipe = WLAN_PROD_TX_EP;
+
+	ep = &ipa_ctx->ep[src_pipe];
+	spin_lock(&ipa_ctx->disconnect_lock);
+	if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
+		!ep->valid || !ep->client_notify)) {
+		IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
+		  src_pipe, ep->valid, ep->client_notify);
+		dev_kfree_skb_any(rx_skb);
+		spin_unlock(&ipa_ctx->disconnect_lock);
+		return 0;
+	}
+
+	pull_len = sizeof(struct ipa_a5_mux_hdr);
+
+	/*
+	 * IP packet starts on word boundary
+	 * remove the MUX header and any padding and pass the frame to
+	 * the client which registered a rx callback on the "src pipe"
+	 */
+	padding = ep->cfg.hdr.hdr_len & 0x3;
+	if (padding)
+		pull_len += 4 - padding;
+
+	IPADBG("pulling %d bytes from skb\n", pull_len);
+	skb_pull(rx_skb, pull_len);
+	ep->client_notify(ep->priv, IPA_RECEIVE,
+			(unsigned long)(rx_skb));
+	spin_unlock(&ipa_ctx->disconnect_lock);
+	return 0;
+}
+
+static struct sk_buff *ipa_get_skb_ipa_rx(unsigned int len, gfp_t flags)
+{
+	return __dev_alloc_skb(len, flags);
+}
+
+static struct sk_buff *ipa_get_skb_ipa_rx_headroom(unsigned int len,
+		gfp_t flags)
+{
+	struct sk_buff *skb;
+
+	skb = __dev_alloc_skb(len + IPA_HEADROOM, flags);
+	if (skb)
+		skb_reserve(skb, IPA_HEADROOM);
+
+	return skb;
+}
+
+static void ipa_free_skb_rx(struct sk_buff *skb)
+{
+	dev_kfree_skb_any(skb);
+}
+
+void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	struct sk_buff *rx_skb = (struct sk_buff *)data;
+	struct ipa_hw_pkt_status *status;
+	struct ipa_ep_context *ep;
+	unsigned int src_pipe;
+	u32 metadata;
+
+	status = (struct ipa_hw_pkt_status *)rx_skb->data;
+	src_pipe = status->endp_src_idx;
+	metadata = status->metadata;
+	ep = &ipa_ctx->ep[src_pipe];
+	if (unlikely(src_pipe >= ipa_ctx->ipa_num_pipes ||
+		!ep->valid ||
+		!ep->client_notify)) {
+		IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
+		  src_pipe, ep->valid, ep->client_notify);
+		dev_kfree_skb_any(rx_skb);
+		return;
+	}
+	if (!status->exception)
+		skb_pull(rx_skb, IPA_PKT_STATUS_SIZE +
+				IPA_LAN_RX_HEADER_LENGTH);
+	else
+		skb_pull(rx_skb, IPA_PKT_STATUS_SIZE);
+
+	/*
+	 *  Metadata Info
+	 *  ------------------------------------------
+	 *  |   3     |   2     |    1        |  0   |
+	 *  | fw_desc | vdev_id | qmap mux id | Resv |
+	 *  ------------------------------------------
+	 */
+	*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+	IPADBG("meta_data: 0x%x cb: 0x%x\n",
+			metadata, *(u32 *)rx_skb->cb);
+
+	ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+}
+
+void ipa2_recycle_wan_skb(struct sk_buff *skb)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	int ep_idx = ipa2_get_ep_mapping(
+	   IPA_CLIENT_APPS_WAN_CONS);
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN |
+		(ipa_ctx->use_dma_zone ? GFP_DMA : 0);
+
+	if (unlikely(ep_idx == -1)) {
+		IPAERR("dest EP does not exist\n");
+		ipa_assert();
+	}
+
+	rx_pkt = kmem_cache_zalloc(
+		ipa_ctx->rx_pkt_wrapper_cache, flag);
+	if (!rx_pkt)
+		ipa_assert();
+
+	INIT_WORK(&rx_pkt->work, ipa_wq_rx_avail);
+	rx_pkt->sys = ipa_ctx->ep[ep_idx].sys;
+
+	rx_pkt->data.skb = skb;
+	rx_pkt->data.dma_addr = 0;
+	ipa_skb_recycle(rx_pkt->data.skb);
+	skb_reserve(rx_pkt->data.skb, IPA_HEADROOM);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_lock_bh(&rx_pkt->sys->spinlock);
+	list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
+	spin_unlock_bh(&rx_pkt->sys->spinlock);
+}
+
+static void ipa_wq_rx_common(struct ipa_sys_context *sys, u32 size)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt_expected;
+	struct sk_buff *rx_skb;
+
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		WARN_ON(1);
+		return;
+	}
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+					   struct ipa_rx_pkt_wrapper,
+					   link);
+	list_del(&rx_pkt_expected->link);
+	sys->len--;
+	if (size)
+		rx_pkt_expected->len = size;
+	rx_skb = rx_pkt_expected->data.skb;
+	dma_unmap_single(ipa_ctx->pdev, rx_pkt_expected->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+	skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+	rx_skb->len = rx_pkt_expected->len;
+	*(unsigned int *)rx_skb->cb = rx_skb->len;
+	rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+	sys->pyld_hdlr(rx_skb, sys);
+	sys->repl_hdlr(sys);
+	kmem_cache_free(ipa_ctx->rx_pkt_wrapper_cache, rx_pkt_expected);
+
+}
+
+static void ipa_wlan_wq_rx_common(struct ipa_sys_context *sys, u32 size)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt_expected;
+	struct sk_buff *rx_skb;
+
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		WARN_ON(1);
+		return;
+	}
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+					   struct ipa_rx_pkt_wrapper,
+					   link);
+	list_del(&rx_pkt_expected->link);
+	sys->len--;
+
+	if (size)
+		rx_pkt_expected->len = size;
+
+	rx_skb = rx_pkt_expected->data.skb;
+	skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+	rx_skb->len = rx_pkt_expected->len;
+	rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+	sys->ep->wstats.tx_pkts_rcvd++;
+	if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
+		ipa2_free_skb(&rx_pkt_expected->data);
+		sys->ep->wstats.tx_pkts_dropped++;
+	} else {
+		sys->ep->wstats.tx_pkts_sent++;
+		sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+				(unsigned long)(&rx_pkt_expected->data));
+	}
+	ipa_replenish_wlan_rx_cache(sys);
+}
+
+static void ipa_dma_memcpy_notify(struct ipa_sys_context *sys,
+	struct sps_iovec *iovec)
+{
+	IPADBG("ENTER.\n");
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		IPAERR("descriptor list is empty!\n");
+		WARN_ON(1);
+		return;
+	}
+	if (!(iovec->flags & SPS_IOVEC_FLAG_EOT)) {
+		IPAERR("received unexpected event. sps flag is 0x%x\n"
+			, iovec->flags);
+		WARN_ON(1);
+		return;
+	}
+	sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+				(unsigned long)(iovec));
+	IPADBG("EXIT\n");
+}
+
+static void ipa_wq_rx_avail(struct work_struct *work)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+	struct ipa_sys_context *sys;
+
+	rx_pkt = container_of(work, struct ipa_rx_pkt_wrapper, work);
+	if (unlikely(rx_pkt == NULL))
+		WARN_ON(1);
+	sys = rx_pkt->sys;
+	ipa_wq_rx_common(sys, 0);
+}
+
+/**
+ * ipa_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Rx operation is complete.
+ * Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to a workqueue.
+ */
+void ipa_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		rx_pkt = notify->data.transfer.user;
+		if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
+			atomic_set(&ipa_ctx->sps_pm.eot_activity, 1);
+		rx_pkt->len = notify->data.transfer.iovec.size;
+		IPADBG("event %d notified sys=%p len=%u\n", notify->event_id,
+				notify->user, rx_pkt->len);
+		queue_work(rx_pkt->sys->wq, &rx_pkt->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d sys=%p\n",
+				notify->event_id, notify->user);
+	}
+}
+
+static int ipa_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
+	struct ipa_sys_context *sys)
+{
+	if (sys->ep->client_notify) {
+		sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+			(unsigned long)(rx_skb));
+	} else {
+		dev_kfree_skb_any(rx_skb);
+		WARN_ON(1);
+	}
+
+	return 0;
+}
+
+static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
+		struct ipa_sys_context *sys)
+{
+	unsigned long int aggr_byte_limit;
+
+	sys->ep->status.status_en = true;
+	sys->ep->wakelock_client = IPA_WAKELOCK_REF_CLIENT_MAX;
+	if (IPA_CLIENT_IS_PROD(in->client)) {
+		if (!sys->ep->skip_ep_cfg) {
+			sys->policy = IPA_POLICY_NOINTR_MODE;
+			sys->sps_option = SPS_O_AUTO_ENABLE;
+			sys->sps_callback = NULL;
+			sys->ep->status.status_ep = ipa2_get_ep_mapping(
+					IPA_CLIENT_APPS_LAN_CONS);
+			if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client))
+				sys->ep->status.status_en = false;
+		} else {
+			sys->policy = IPA_POLICY_INTR_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE |
+					SPS_O_EOT);
+			sys->sps_callback =
+				ipa_sps_irq_tx_no_aggr_notify;
+		}
+		return 0;
+	}
+
+	aggr_byte_limit =
+	(unsigned long int)IPA_GENERIC_RX_BUFF_SZ(
+		ipa_adjust_ra_buff_base_sz(
+			in->ipa_ep_cfg.aggr.aggr_byte_limit));
+
+	if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
+		in->client == IPA_CLIENT_APPS_WAN_CONS) {
+		sys->policy = IPA_POLICY_INTR_POLL_MODE;
+		sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+						   | SPS_O_ACK_TRANSFERS);
+		sys->sps_callback = ipa_sps_irq_rx_notify;
+		INIT_WORK(&sys->work, ipa_wq_handle_rx);
+		INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+						  switch_to_intr_rx_work_func);
+		INIT_DELAYED_WORK(&sys->replenish_rx_work,
+						  replenish_rx_work_func);
+		INIT_WORK(&sys->repl_work, ipa_wq_repl_rx);
+		atomic_set(&sys->curr_polling_state, 0);
+		sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+		   IPA_GENERIC_RX_BUFF_BASE_SZ) -
+		   IPA_HEADROOM;
+		sys->get_skb = ipa_get_skb_ipa_rx_headroom;
+		sys->free_skb = ipa_free_skb_rx;
+		in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+		in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+		in->ipa_ep_cfg.aggr.aggr_time_limit =
+		   IPA_GENERIC_AGGR_TIME_LIMIT;
+		if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+			sys->pyld_hdlr = ipa_lan_rx_pyld_hdlr;
+			if (nr_cpu_ids > 1) {
+				sys->repl_hdlr =
+				   ipa_fast_replenish_rx_cache;
+				sys->repl_trig_thresh =
+				   sys->rx_pool_sz / 8;
+			} else {
+				sys->repl_hdlr =
+				   ipa_replenish_rx_cache;
+			}
+			sys->rx_pool_sz =
+			   ipa_ctx->lan_rx_ring_size;
+			in->ipa_ep_cfg.aggr.aggr_byte_limit =
+			   IPA_GENERIC_AGGR_BYTE_LIMIT;
+			in->ipa_ep_cfg.aggr.aggr_pkt_limit =
+			   IPA_GENERIC_AGGR_PKT_LIMIT;
+			sys->ep->wakelock_client =
+			   IPA_WAKELOCK_REF_CLIENT_LAN_RX;
+		} else if (in->client ==
+					  IPA_CLIENT_APPS_WAN_CONS) {
+			sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
+			sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size;
+			if (nr_cpu_ids > 1) {
+				sys->repl_hdlr =
+				   ipa_fast_replenish_rx_cache;
+				sys->repl_trig_thresh =
+				   sys->rx_pool_sz / 8;
+			} else {
+				sys->repl_hdlr =
+				   ipa_replenish_rx_cache;
+			}
+			if (in->napi_enabled) {
+				sys->rx_pool_sz =
+					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+				if (in->recycle_enabled) {
+					sys->repl_hdlr =
+					   ipa_replenish_rx_cache_recycle;
+				}
+			}
+			sys->ep->wakelock_client =
+			   IPA_WAKELOCK_REF_CLIENT_WAN_RX;
+			in->ipa_ep_cfg.aggr.aggr_sw_eof_active
+			   = true;
+			if (ipa_ctx->ipa_client_apps_wan_cons_agg_gro) {
+				IPAERR("get close-by %u\n",
+					   ipa_adjust_ra_buff_base_sz(
+						  in->ipa_ep_cfg.aggr.
+						  aggr_byte_limit));
+				IPAERR("set rx_buff_sz %lu\n", aggr_byte_limit);
+				/* disable ipa_status */
+				sys->ep->status.
+				   status_en = false;
+				sys->rx_buff_sz =
+				   IPA_GENERIC_RX_BUFF_SZ(
+				   ipa_adjust_ra_buff_base_sz(
+					  in->ipa_ep_cfg.aggr.
+					  aggr_byte_limit - IPA_HEADROOM));
+				in->ipa_ep_cfg.aggr.
+				   aggr_byte_limit =
+				   sys->rx_buff_sz < in->
+					ipa_ep_cfg.aggr.aggr_byte_limit ?
+				   IPA_ADJUST_AGGR_BYTE_LIMIT(
+				   sys->rx_buff_sz) :
+				   IPA_ADJUST_AGGR_BYTE_LIMIT(
+				   in->ipa_ep_cfg.
+				   aggr.aggr_byte_limit);
+				IPAERR("set aggr_limit %lu\n",
+					   (unsigned long int)
+					   in->ipa_ep_cfg.aggr.
+					   aggr_byte_limit);
+			} else {
+				in->ipa_ep_cfg.aggr.
+				   aggr_byte_limit =
+				   IPA_GENERIC_AGGR_BYTE_LIMIT;
+				in->ipa_ep_cfg.aggr.
+				   aggr_pkt_limit =
+				   IPA_GENERIC_AGGR_PKT_LIMIT;
+			}
+		}
+	} else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
+		IPADBG("assigning policy to client:%d",
+			   in->client);
+
+		sys->ep->status.status_en = false;
+		sys->policy = IPA_POLICY_INTR_POLL_MODE;
+		sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+						   | SPS_O_ACK_TRANSFERS);
+		sys->sps_callback = ipa_sps_irq_rx_notify;
+		INIT_WORK(&sys->work, ipa_wq_handle_rx);
+		INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+						  switch_to_intr_rx_work_func);
+		INIT_DELAYED_WORK(&sys->replenish_rx_work,
+						  replenish_rx_work_func);
+		atomic_set(&sys->curr_polling_state, 0);
+		sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
+		sys->rx_pool_sz = in->desc_fifo_sz /
+		   sizeof(struct sps_iovec) - 1;
+		if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
+			sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
+		sys->pyld_hdlr = NULL;
+		sys->repl_hdlr = ipa_replenish_wlan_rx_cache;
+		sys->get_skb = ipa_get_skb_ipa_rx;
+		sys->free_skb = ipa_free_skb_rx;
+		in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+		sys->ep->wakelock_client =
+		   IPA_WAKELOCK_REF_CLIENT_WLAN_RX;
+	} else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
+		IPADBG("assigning policy to client:%d",
+			   in->client);
+
+		sys->ep->status.status_en = false;
+		sys->policy = IPA_POLICY_INTR_POLL_MODE;
+		sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+						   | SPS_O_ACK_TRANSFERS);
+		sys->sps_callback = ipa_sps_irq_rx_notify;
+		INIT_WORK(&sys->work, ipa_wq_handle_rx);
+		INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+						  switch_to_intr_rx_work_func);
+		INIT_DELAYED_WORK(&sys->replenish_rx_work,
+						  replenish_rx_work_func);
+		atomic_set(&sys->curr_polling_state, 0);
+		sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+		sys->rx_pool_sz = in->desc_fifo_sz /
+		   sizeof(struct sps_iovec) - 1;
+		if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
+			sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
+		sys->pyld_hdlr = ipa_odu_rx_pyld_hdlr;
+		sys->get_skb = ipa_get_skb_ipa_rx;
+		sys->free_skb = ipa_free_skb_rx;
+		sys->repl_hdlr = ipa_replenish_rx_cache;
+		sys->ep->wakelock_client =
+		   IPA_WAKELOCK_REF_CLIENT_ODU_RX;
+	} else if (in->client ==
+				  IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
+		IPADBG("assigning policy to client:%d",
+			   in->client);
+		sys->ep->status.status_en = false;
+		sys->policy = IPA_POLICY_INTR_POLL_MODE;
+		sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+						   | SPS_O_ACK_TRANSFERS);
+		sys->sps_callback = ipa_sps_irq_rx_notify;
+		INIT_WORK(&sys->work, ipa_wq_handle_rx);
+		INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+						  switch_to_intr_rx_work_func);
+	} else if (in->client ==
+				  IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+		IPADBG("assigning policy to client:%d",
+			   in->client);
+		sys->ep->status.status_en = false;
+		sys->policy = IPA_POLICY_NOINTR_MODE;
+		sys->sps_option = SPS_O_AUTO_ENABLE |
+			  SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+	} else {
+		IPAERR("Need to install a RX pipe hdlr\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ipa_assign_policy(struct ipa_sys_connect_params *in,
+		struct ipa_sys_context *sys)
+{
+	if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
+		sys->policy = IPA_POLICY_INTR_MODE;
+		sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
+		sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
+		return 0;
+	}
+
+	if (ipa_ctx->ipa_hw_type == IPA_HW_v1_1) {
+		if (in->client == IPA_CLIENT_APPS_LAN_WAN_PROD) {
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
+					SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa_sps_irq_tx_notify;
+			INIT_WORK(&sys->work, ipa_wq_handle_tx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				switch_to_intr_tx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+		} else if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT |
+					SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa_sps_irq_rx_notify;
+			INIT_WORK(&sys->work, ipa_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+					replenish_rx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz = IPA_RX_SKB_SIZE;
+			sys->rx_pool_sz = IPA_RX_POOL_CEIL;
+			sys->pyld_hdlr = ipa_rx_pyld_hdlr;
+			sys->get_skb = ipa_get_skb_ipa_rx;
+			sys->free_skb = ipa_free_skb_rx;
+			sys->repl_hdlr = ipa_replenish_rx_cache;
+		} else if (IPA_CLIENT_IS_PROD(in->client)) {
+			sys->policy = IPA_POLICY_INTR_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
+			sys->sps_callback = ipa_sps_irq_tx_no_aggr_notify;
+		} else {
+			IPAERR("Need to install a RX pipe hdlr\n");
+			WARN_ON(1);
+			return -EINVAL;
+		}
+
+		return 0;
+	} else if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
+		return ipa_assign_policy_v2(in, sys);
+
+	IPAERR("Unsupported HW type %d\n", ipa_ctx->ipa_hw_type);
+	WARN_ON(1);
+	return -EINVAL;
+}
+
+/**
+ * ipa_tx_client_rx_notify_release() - Callback function
+ * which will call the user supplied callback function to
+ * release the skb, or release it on its own if no callback
+ * function was supplied
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa_tx_dp_mul
+ */
+static void ipa_tx_client_rx_notify_release(void *user1, int user2)
+{
+	struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
+	int ep_idx = user2;
+
+	IPADBG("Received data desc anchor:%p\n", dd);
+
+	atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
+	ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+
+  /* wlan host driver waits till tx complete before unload */
+	IPADBG("ep=%d fifo_desc_free_count=%d\n",
+		ep_idx, atomic_read(&ipa_ctx->ep[ep_idx].avail_fifo_desc));
+	IPADBG("calling client notify callback with priv:%p\n",
+		ipa_ctx->ep[ep_idx].priv);
+
+	if (ipa_ctx->ep[ep_idx].client_notify) {
+		ipa_ctx->ep[ep_idx].client_notify(ipa_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)user1);
+		ipa_ctx->ep[ep_idx].wstats.rx_hd_reply++;
+	}
+}
+/**
+ * ipa_tx_client_rx_pkt_status() - Callback function
+ * which will call the user supplied callback function to
+ * increase the available fifo descriptor
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa_tx_dp_mul
+ */
+static void ipa_tx_client_rx_pkt_status(void *user1, int user2)
+{
+	int ep_idx = user2;
+
+	atomic_inc(&ipa_ctx->ep[ep_idx].avail_fifo_desc);
+	ipa_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+}
+
+
+/**
+ * ipa2_tx_dp_mul() - Data-path tx handler for multiple packets
+ * @src: [in] - Client that is sending data
+ * @ipa_tx_data_desc:	[in] data descriptors from wlan
+ *
+ * this is used for to transfer data descriptors that received
+ * from WLAN1_PROD pipe to IPA HW
+ *
+ * The function will send data descriptors from WLAN1_PROD (one
+ * at a time) using sps_transfer_one. Will set EOT flag for last
+ * descriptor Once this send was done from SPS point-of-view the
+ * IPA driver will get notified by the supplied callback -
+ * ipa_sps_irq_tx_no_aggr_notify()
+ *
+ * ipa_sps_irq_tx_no_aggr_notify will call to the user supplied
+ * callback (from ipa_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_tx_dp_mul(enum ipa_client_type src,
+			struct ipa_tx_data_desc *data_desc)
+{
+	/* The second byte in wlan header holds qmap id */
+#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
+	struct ipa_tx_data_desc *entry;
+	struct ipa_sys_context *sys;
+	struct ipa_desc desc = { 0 };
+	u32 num_desc, cnt;
+	int ep_idx;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	IPADBG("Received data desc anchor:%p\n", data_desc);
+
+	spin_lock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
+
+	ep_idx = ipa2_get_ep_mapping(src);
+	if (unlikely(ep_idx == -1)) {
+		IPAERR("dest EP does not exist.\n");
+		goto fail_send;
+	}
+	IPADBG("ep idx:%d\n", ep_idx);
+	sys = ipa_ctx->ep[ep_idx].sys;
+
+	if (unlikely(ipa_ctx->ep[ep_idx].valid == 0)) {
+		IPAERR("dest EP not valid.\n");
+		goto fail_send;
+	}
+	sys->ep->wstats.rx_hd_rcvd++;
+
+	/* Calculate the number of descriptors */
+	num_desc = 0;
+	list_for_each_entry(entry, &data_desc->link, link) {
+		num_desc++;
+	}
+	IPADBG("Number of Data Descriptors:%d", num_desc);
+
+	if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
+		IPAERR("Insufficient data descriptors available\n");
+		goto fail_send;
+	}
+
+	/* Assign callback only for last data descriptor */
+	cnt = 0;
+	list_for_each_entry(entry, &data_desc->link, link) {
+		IPADBG("Parsing data desc :%d\n", cnt);
+		cnt++;
+		((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
+			(u8)sys->ep->cfg.meta.qmap_id;
+		desc.pyld = entry->pyld_buffer;
+		desc.len = entry->pyld_len;
+		desc.type = IPA_DATA_DESC_SKB;
+		desc.user1 = data_desc;
+		desc.user2 = ep_idx;
+		IPADBG("priv:%p pyld_buf:0x%p pyld_len:%d\n",
+			entry->priv, desc.pyld, desc.len);
+
+		/* In case of last descriptor populate callback */
+		if (cnt == num_desc) {
+			IPADBG("data desc:%p\n", data_desc);
+			desc.callback = ipa_tx_client_rx_notify_release;
+		} else {
+			desc.callback = ipa_tx_client_rx_pkt_status;
+		}
+
+		IPADBG("calling ipa_send_one()\n");
+		if (ipa_send_one(sys, &desc, true)) {
+			IPAERR("fail to send skb\n");
+			sys->ep->wstats.rx_pkt_leak += (cnt-1);
+			sys->ep->wstats.rx_dp_fail++;
+			goto fail_send;
+		}
+
+		if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
+			atomic_dec(&sys->ep->avail_fifo_desc);
+
+		sys->ep->wstats.rx_pkts_rcvd++;
+		IPADBG("ep=%d fifo desc=%d\n",
+			ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
+	}
+
+	sys->ep->wstats.rx_hd_processed++;
+	spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
+	return 0;
+
+fail_send:
+	spin_unlock_bh(&ipa_ctx->wc_memb.ipa_tx_mul_spinlock);
+	return -EFAULT;
+
+}
+
+void ipa2_free_skb(struct ipa_rx_data *data)
+{
+	struct ipa_rx_pkt_wrapper *rx_pkt;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return;
+	}
+
+	spin_lock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+
+	ipa_ctx->wc_memb.total_tx_pkts_freed++;
+	rx_pkt = container_of(data, struct ipa_rx_pkt_wrapper, data);
+
+	ipa_skb_recycle(rx_pkt->data.skb);
+	(void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+
+	list_add_tail(&rx_pkt->link,
+		&ipa_ctx->wc_memb.wlan_comm_desc_list);
+	ipa_ctx->wc_memb.wlan_comm_free_cnt++;
+
+	spin_unlock_bh(&ipa_ctx->wc_memb.wlan_spinlock);
+}
+
+
+/* Functions added to support kernel tests */
+
+int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
+			unsigned long *ipa_bam_hdl,
+			u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
+{
+	struct ipa_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+
+	if (sys_in == NULL || clnt_hdl == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+
+	if (ipa_bam_hdl == NULL || ipa_pipe_num == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+	if (sys_in->client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm client:%d\n", sys_in->client);
+		goto fail_gen;
+	}
+
+	ipa_ep_idx = ipa2_get_ep_mapping(sys_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client :%d\n", sys_in->client);
+		goto fail_gen;
+	}
+
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+
+	IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+	if (ep->valid == 1) {
+		if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+			IPAERR("EP %d already allocated\n", ipa_ep_idx);
+			goto fail_and_disable_clocks;
+		} else {
+			if (ipa2_cfg_ep_hdr(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.hdr)) {
+				IPAERR("fail to configure hdr prop of EP %d\n",
+						ipa_ep_idx);
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			if (ipa2_cfg_ep_cfg(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.cfg)) {
+				IPAERR("fail to configure cfg prop of EP %d\n",
+						ipa_ep_idx);
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
+					sys_in->client, ipa_ep_idx, ep->sys);
+			ep->client_notify = sys_in->notify;
+			ep->priv = sys_in->priv;
+			*clnt_hdl = ipa_ep_idx;
+			if (!ep->keep_ipa_awake)
+				IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+			return 0;
+		}
+	}
+
+	memset(ep, 0, offsetof(struct ipa_ep_context, sys));
+
+	ep->valid = 1;
+	ep->client = sys_in->client;
+	ep->client_notify = sys_in->notify;
+	ep->priv = sys_in->priv;
+	ep->keep_ipa_awake = true;
+
+	result = ipa_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n",
+				 result, ipa_ep_idx);
+		goto fail_gen2;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa2_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_gen2;
+		}
+		if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_gen2;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("skipping ep configuration\n");
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	*ipa_pipe_num = ipa_ep_idx;
+	*ipa_bam_hdl = ipa_ctx->bam_handle;
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+	ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+			ipa_ep_idx, ep->sys);
+
+	return 0;
+
+fail_gen2:
+fail_and_disable_clocks:
+	IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+	return result;
+}
+
+int ipa2_sys_teardown(u32 clnt_hdl)
+{
+	struct ipa_ep_context *ep;
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_disable_data_path(clnt_hdl);
+	ep->valid = 0;
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl)
+{
+	IPAERR("GSI not supported in IPAv2");
+	return -EFAULT;
+}
+
+
+/**
+ * ipa_adjust_ra_buff_base_sz()
+ *
+ * Return value: the largest power of two which is smaller
+ * than the input value
+ */
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
+{
+	aggr_byte_limit += IPA_MTU;
+	aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
+	aggr_byte_limit--;
+	aggr_byte_limit |= aggr_byte_limit >> 1;
+	aggr_byte_limit |= aggr_byte_limit >> 2;
+	aggr_byte_limit |= aggr_byte_limit >> 4;
+	aggr_byte_limit |= aggr_byte_limit >> 8;
+	aggr_byte_limit |= aggr_byte_limit >> 16;
+	aggr_byte_limit++;
+	return aggr_byte_limit >> 1;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
new file mode 100644
index 0000000..12eaae8
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_flt.c
@@ -0,0 +1,1473 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+#define IPA_FLT_TABLE_WORD_SIZE			(4)
+#define IPA_FLT_ENTRY_MEMORY_ALLIGNMENT		(0x3)
+#define IPA_FLT_BIT_MASK			(0x1)
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND		(-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_MDFY_FAILED		(-1)
+
+static int ipa_generate_hw_rule_from_eq(
+		const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
+{
+	int num_offset_meq_32 = attrib->num_offset_meq_32;
+	int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
+	int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
+	int num_offset_meq_128 = attrib->num_offset_meq_128;
+	int i;
+
+	if (attrib->tos_eq_present) {
+		*buf = ipa_write_8(attrib->tos_eq, *buf);
+		*buf = ipa_pad_to_32(*buf);
+	}
+
+	if (attrib->protocol_eq_present) {
+		*buf = ipa_write_8(attrib->protocol_eq, *buf);
+		*buf = ipa_pad_to_32(*buf);
+	}
+
+	if (num_offset_meq_32) {
+		*buf = ipa_write_8(attrib->offset_meq_32[0].offset, *buf);
+		*buf = ipa_write_32(attrib->offset_meq_32[0].mask, *buf);
+		*buf = ipa_write_32(attrib->offset_meq_32[0].value, *buf);
+		*buf = ipa_pad_to_32(*buf);
+		num_offset_meq_32--;
+	}
+
+	if (num_offset_meq_32) {
+		*buf = ipa_write_8(attrib->offset_meq_32[1].offset, *buf);
+		*buf = ipa_write_32(attrib->offset_meq_32[1].mask, *buf);
+		*buf = ipa_write_32(attrib->offset_meq_32[1].value, *buf);
+		*buf = ipa_pad_to_32(*buf);
+		num_offset_meq_32--;
+	}
+
+	if (num_ihl_offset_range_16) {
+		*buf = ipa_write_8(attrib->ihl_offset_range_16[0].offset, *buf);
+		*buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_high,
+				*buf);
+		*buf = ipa_write_16(attrib->ihl_offset_range_16[0].range_low,
+				*buf);
+		*buf = ipa_pad_to_32(*buf);
+		num_ihl_offset_range_16--;
+	}
+
+	if (num_ihl_offset_range_16) {
+		*buf = ipa_write_8(attrib->ihl_offset_range_16[1].offset, *buf);
+		*buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_high,
+				*buf);
+		*buf = ipa_write_16(attrib->ihl_offset_range_16[1].range_low,
+				*buf);
+		*buf = ipa_pad_to_32(*buf);
+		num_ihl_offset_range_16--;
+	}
+
+	if (attrib->ihl_offset_eq_16_present) {
+		*buf = ipa_write_8(attrib->ihl_offset_eq_16.offset, *buf);
+		*buf = ipa_write_16(attrib->ihl_offset_eq_16.value, *buf);
+		*buf = ipa_pad_to_32(*buf);
+	}
+
+	if (attrib->ihl_offset_eq_32_present) {
+		*buf = ipa_write_8(attrib->ihl_offset_eq_32.offset, *buf);
+		*buf = ipa_write_32(attrib->ihl_offset_eq_32.value, *buf);
+		*buf = ipa_pad_to_32(*buf);
+	}
+
+	if (num_ihl_offset_meq_32) {
+		*buf = ipa_write_8(attrib->ihl_offset_meq_32[0].offset, *buf);
+		*buf = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, *buf);
+		*buf = ipa_write_32(attrib->ihl_offset_meq_32[0].value, *buf);
+		*buf = ipa_pad_to_32(*buf);
+		num_ihl_offset_meq_32--;
+	}
+
+	/* TODO check layout of 16 byte mask and value */
+	if (num_offset_meq_128) {
+		*buf = ipa_write_8(attrib->offset_meq_128[0].offset, *buf);
+		for (i = 0; i < 16; i++)
+			*buf = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+					*buf);
+		for (i = 0; i < 16; i++)
+			*buf = ipa_write_8(attrib->offset_meq_128[0].value[i],
+					*buf);
+		*buf = ipa_pad_to_32(*buf);
+		num_offset_meq_128--;
+	}
+
+	if (num_offset_meq_128) {
+		*buf = ipa_write_8(attrib->offset_meq_128[1].offset, *buf);
+		for (i = 0; i < 16; i++)
+			*buf = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+					*buf);
+		for (i = 0; i < 16; i++)
+			*buf = ipa_write_8(attrib->offset_meq_128[1].value[i],
+					*buf);
+		*buf = ipa_pad_to_32(*buf);
+		num_offset_meq_128--;
+	}
+
+	if (attrib->tc_eq_present) {
+		*buf = ipa_write_8(attrib->tc_eq, *buf);
+		*buf = ipa_pad_to_32(*buf);
+	}
+
+	if (attrib->fl_eq_present) {
+		*buf = ipa_write_32(attrib->fl_eq, *buf);
+		*buf = ipa_pad_to_32(*buf);
+	}
+
+	if (num_ihl_offset_meq_32) {
+		*buf = ipa_write_8(attrib->ihl_offset_meq_32[1].offset, *buf);
+		*buf = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, *buf);
+		*buf = ipa_write_32(attrib->ihl_offset_meq_32[1].value, *buf);
+		*buf = ipa_pad_to_32(*buf);
+		num_ihl_offset_meq_32--;
+	}
+
+	if (attrib->metadata_meq32_present) {
+		*buf = ipa_write_8(attrib->metadata_meq32.offset, *buf);
+		*buf = ipa_write_32(attrib->metadata_meq32.mask, *buf);
+		*buf = ipa_write_32(attrib->metadata_meq32.value, *buf);
+		*buf = ipa_pad_to_32(*buf);
+	}
+
+	if (attrib->ipv4_frag_eq_present)
+		*buf = ipa_pad_to_32(*buf);
+
+	return 0;
+}
+
+/**
+ * ipa_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_generate_flt_hw_rule(enum ipa_ip_type ip,
+		struct ipa_flt_entry *entry, u8 *buf)
+{
+	struct ipa_flt_rule_hw_hdr *hdr;
+	const struct ipa_flt_rule *rule =
+		(const struct ipa_flt_rule *)&entry->rule;
+	u16 en_rule = 0;
+	u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
+	u8 *start;
+
+	if (buf == NULL) {
+		memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+		buf = (u8 *)tmp;
+	}
+
+	start = buf;
+	hdr = (struct ipa_flt_rule_hw_hdr *)buf;
+	hdr->u.hdr.action = entry->rule.action;
+	hdr->u.hdr.retain_hdr =  entry->rule.retain_hdr;
+	hdr->u.hdr.to_uc = entry->rule.to_uc;
+	if (entry->rt_tbl)
+		hdr->u.hdr.rt_tbl_idx = entry->rt_tbl->idx;
+	else
+		hdr->u.hdr.rt_tbl_idx = entry->rule.rt_tbl_idx;
+	hdr->u.hdr.rsvd = 0;
+	buf += sizeof(struct ipa_flt_rule_hw_hdr);
+
+	if (rule->eq_attrib_type) {
+		if (ipa_generate_hw_rule_from_eq(&rule->eq_attrib, &buf)) {
+			IPAERR("fail to generate hw rule\n");
+			return -EPERM;
+		}
+		en_rule = rule->eq_attrib.rule_eq_bitmap;
+	} else {
+		if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+			IPAERR("fail to generate hw rule\n");
+			return -EPERM;
+		}
+	}
+
+	IPADBG("en_rule 0x%x, action=%d, rt_idx=%d, uc=%d, retain_hdr=%d\n",
+			en_rule,
+			hdr->u.hdr.action,
+			hdr->u.hdr.rt_tbl_idx,
+			hdr->u.hdr.to_uc,
+			hdr->u.hdr.retain_hdr);
+
+	hdr->u.hdr.en_rule = en_rule;
+	ipa_write_32(hdr->u.word, (u8 *)hdr);
+
+	if (entry->hw_len == 0) {
+		entry->hw_len = buf - start;
+	} else if (entry->hw_len != (buf - start)) {
+		IPAERR("hw_len differs b/w passes passed=%x calc=%td\n",
+		       entry->hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa_get_flt_hw_tbl_size() - returns the size of HW filtering table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_get_flt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	u32 total_sz = 0;
+	u32 rule_set_sz;
+	int i;
+
+	*hdr_sz = 0;
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	rule_set_sz = 0;
+	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+		if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+			IPAERR("failed to find HW FLT rule size\n");
+			return -EPERM;
+		}
+		IPADBG("glob ip %d len %d\n", ip, entry->hw_len);
+		rule_set_sz += entry->hw_len;
+	}
+
+	if (rule_set_sz) {
+		tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+		/* this rule-set uses a word in header block */
+		*hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+		if (!tbl->in_sys) {
+			/* add the terminator */
+			total_sz += (rule_set_sz + IPA_FLT_TABLE_WORD_SIZE);
+			total_sz = (total_sz +
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+					~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+		}
+	}
+
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		rule_set_sz = 0;
+		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			if (ipa_generate_flt_hw_rule(ip, entry, NULL)) {
+				IPAERR("failed to find HW FLT rule size\n");
+				return -EPERM;
+			}
+			IPADBG("pipe %d len %d\n", i, entry->hw_len);
+			rule_set_sz += entry->hw_len;
+		}
+
+		if (rule_set_sz) {
+			tbl->sz = rule_set_sz + IPA_FLT_TABLE_WORD_SIZE;
+			/* this rule-set uses a word in header block */
+			*hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+			if (!tbl->in_sys) {
+				/* add the terminator */
+				total_sz += (rule_set_sz +
+					    IPA_FLT_TABLE_WORD_SIZE);
+				total_sz = (total_sz +
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) &
+					~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+			}
+		}
+	}
+
+	*hdr_sz += IPA_FLT_TABLE_WORD_SIZE;
+	total_sz += *hdr_sz;
+	IPADBG("FLT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+	return total_sz;
+}
+
+static int ipa_generate_flt_hw_tbl_common(enum ipa_ip_type ip, u8 *base,
+		u8 *hdr, u32 body_start_offset, u8 *hdr2, u32 *hdr_top)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	int i;
+	u32 offset;
+	u8 *body;
+	struct ipa_mem_buffer flt_tbl_mem;
+	u8 *ftbl_membody;
+
+	*hdr_top = 0;
+	body = base;
+
+#define IPA_WRITE_FLT_HDR(idx, val) {			\
+	if (idx <= 5) {					\
+		*((u32 *)hdr + 1 + idx) = val;		\
+	} else if (idx >= 6 && idx <= 10) {		\
+		WARN_ON(1);				\
+	} else if (idx >= 11 && idx <= 19) {		\
+		*((u32 *)hdr2 + idx - 11) = val;	\
+	} else {					\
+		WARN_ON(1);				\
+	}						\
+}
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+
+	if (!list_empty(&tbl->head_flt_rule_list)) {
+		*hdr_top |= IPA_FLT_BIT_MASK;
+
+		if (!tbl->in_sys) {
+			offset = body - base + body_start_offset;
+			if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+				IPAERR("offset is not word multiple %d\n",
+						offset);
+				goto proc_err;
+			}
+
+			offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+			/* rule is at an offset from base */
+			offset |= IPA_FLT_BIT_MASK;
+
+			if (hdr2)
+				*(u32 *)hdr = offset;
+			else
+				hdr = ipa_write_32(offset, hdr);
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+					link) {
+				if (ipa_generate_flt_hw_rule(ip, entry, body)) {
+					IPAERR("failed to gen HW FLT rule\n");
+					goto proc_err;
+				}
+				body += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			body = ipa_write_32(0, body);
+			if ((long)body & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+				/* advance body to next word boundary */
+				body = body + (IPA_FLT_TABLE_WORD_SIZE -
+					((long)body &
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+		} else {
+			WARN_ON(tbl->sz == 0);
+			/* allocate memory for the flt tbl */
+			flt_tbl_mem.size = tbl->sz;
+			flt_tbl_mem.base =
+			   dma_alloc_coherent(ipa_ctx->pdev, flt_tbl_mem.size,
+					   &flt_tbl_mem.phys_base, GFP_KERNEL);
+			if (!flt_tbl_mem.base) {
+				IPAERR("fail to alloc DMA buff of size %d\n",
+						flt_tbl_mem.size);
+				WARN_ON(1);
+				goto proc_err;
+			}
+
+			WARN_ON(flt_tbl_mem.phys_base &
+				IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+			ftbl_membody = flt_tbl_mem.base;
+			memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+
+			if (hdr2)
+				*(u32 *)hdr = flt_tbl_mem.phys_base;
+			else
+				hdr = ipa_write_32(flt_tbl_mem.phys_base, hdr);
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+					link) {
+				if (ipa_generate_flt_hw_rule(ip, entry,
+							ftbl_membody)) {
+					IPAERR("failed to gen HW FLT rule\n");
+					WARN_ON(1);
+				}
+				ftbl_membody += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			ftbl_membody = ipa_write_32(0, ftbl_membody);
+			if (tbl->curr_mem.phys_base) {
+				WARN_ON(tbl->prev_mem.phys_base);
+				tbl->prev_mem = tbl->curr_mem;
+			}
+			tbl->curr_mem = flt_tbl_mem;
+		}
+	}
+
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		if (!list_empty(&tbl->head_flt_rule_list)) {
+			/* pipe "i" is at bit "i+1" */
+			*hdr_top |= (1 << (i + 1));
+
+			if (!tbl->in_sys) {
+				offset = body - base + body_start_offset;
+				if (offset & IPA_FLT_ENTRY_MEMORY_ALLIGNMENT) {
+					IPAERR("ofst is not word multiple %d\n",
+					       offset);
+					goto proc_err;
+				}
+				offset &= ~IPA_FLT_ENTRY_MEMORY_ALLIGNMENT;
+				/* rule is at an offset from base */
+				offset |= IPA_FLT_BIT_MASK;
+
+				if (hdr2)
+					IPA_WRITE_FLT_HDR(i, offset)
+				else
+					hdr = ipa_write_32(offset, hdr);
+
+				/* generate the rule-set */
+				list_for_each_entry(entry,
+						&tbl->head_flt_rule_list,
+						link) {
+					if (ipa_generate_flt_hw_rule(ip, entry,
+								body)) {
+						IPAERR("fail gen FLT rule\n");
+						goto proc_err;
+					}
+					body += entry->hw_len;
+				}
+
+				/* write the rule-set terminator */
+				body = ipa_write_32(0, body);
+				if ((long)body &
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT)
+					/* advance body to next word boundary */
+					body = body + (IPA_FLT_TABLE_WORD_SIZE -
+						((long)body &
+					IPA_FLT_ENTRY_MEMORY_ALLIGNMENT));
+			} else {
+				WARN_ON(tbl->sz == 0);
+				/* allocate memory for the flt tbl */
+				flt_tbl_mem.size = tbl->sz;
+				flt_tbl_mem.base =
+				   dma_alloc_coherent(ipa_ctx->pdev,
+						   flt_tbl_mem.size,
+						   &flt_tbl_mem.phys_base,
+						   GFP_KERNEL);
+				if (!flt_tbl_mem.base) {
+					IPAERR("fail alloc DMA buff size %d\n",
+							flt_tbl_mem.size);
+					WARN_ON(1);
+					goto proc_err;
+				}
+
+				WARN_ON(flt_tbl_mem.phys_base &
+				IPA_FLT_ENTRY_MEMORY_ALLIGNMENT);
+
+				ftbl_membody = flt_tbl_mem.base;
+				memset(flt_tbl_mem.base, 0, flt_tbl_mem.size);
+
+				if (hdr2)
+					IPA_WRITE_FLT_HDR(i,
+						flt_tbl_mem.phys_base)
+				else
+					hdr = ipa_write_32(
+						flt_tbl_mem.phys_base, hdr);
+
+				/* generate the rule-set */
+				list_for_each_entry(entry,
+						&tbl->head_flt_rule_list,
+						link) {
+					if (ipa_generate_flt_hw_rule(ip, entry,
+							ftbl_membody)) {
+						IPAERR("fail gen FLT rule\n");
+						WARN_ON(1);
+					}
+					ftbl_membody += entry->hw_len;
+				}
+
+				/* write the rule-set terminator */
+				ftbl_membody =
+					ipa_write_32(0, ftbl_membody);
+				if (tbl->curr_mem.phys_base) {
+					WARN_ON(tbl->prev_mem.phys_base);
+					tbl->prev_mem = tbl->curr_mem;
+				}
+				tbl->curr_mem = flt_tbl_mem;
+			}
+		}
+	}
+
+	return 0;
+
+proc_err:
+	return -EPERM;
+}
+
+
+/**
+ * ipa_generate_flt_hw_tbl() - generates the filtering hardware table
+ * @ip:	[in] the ip address family type
+ * @mem:	[out] buffer to put the filtering table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa_generate_flt_hw_tbl_v1_1(enum ipa_ip_type ip,
+		struct ipa_mem_buffer *mem)
+{
+	u32 hdr_top = 0;
+	u32 hdr_sz;
+	u8 *hdr;
+	u8 *body;
+	u8 *base;
+
+	mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+	mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
+
+	if (mem->size == 0) {
+		IPAERR("flt tbl empty ip=%d\n", ip);
+		goto error;
+	}
+	mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+			&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		goto error;
+	}
+
+	memset(mem->base, 0, mem->size);
+
+	/* build the flt tbl in the DMA buffer to submit to IPA HW */
+	base = hdr = (u8 *)mem->base;
+	body = base + hdr_sz;
+
+	/* write a dummy header to move cursor */
+	hdr = ipa_write_32(hdr_top, hdr);
+
+	if (ipa_generate_flt_hw_tbl_common(ip, body, hdr, hdr_sz, 0,
+				&hdr_top)) {
+		IPAERR("fail to generate FLT HW table\n");
+		goto proc_err;
+	}
+
+	/* now write the hdr_top */
+	ipa_write_32(hdr_top, base);
+
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	return 0;
+
+proc_err:
+	dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
+error:
+	return -EPERM;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip)
+{
+	struct ipa_flt_tbl *tbl;
+	int i;
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	if (tbl->prev_mem.phys_base) {
+		IPADBG("reaping glob flt tbl (prev) ip=%d\n", ip);
+		dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
+				tbl->prev_mem.base, tbl->prev_mem.phys_base);
+		memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+	}
+
+	if (list_empty(&tbl->head_flt_rule_list)) {
+		if (tbl->curr_mem.phys_base) {
+			IPADBG("reaping glob flt tbl (curr) ip=%d\n", ip);
+			dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size,
+					tbl->curr_mem.base,
+					tbl->curr_mem.phys_base);
+			memset(&tbl->curr_mem, 0, sizeof(tbl->curr_mem));
+		}
+	}
+
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		if (tbl->prev_mem.phys_base) {
+			IPADBG("reaping flt tbl (prev) pipe=%d ip=%d\n", i, ip);
+			dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
+					tbl->prev_mem.base,
+					tbl->prev_mem.phys_base);
+			memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+		}
+
+		if (list_empty(&tbl->head_flt_rule_list)) {
+			if (tbl->curr_mem.phys_base) {
+				IPADBG("reaping flt tbl (curr) pipe=%d ip=%d\n",
+						i, ip);
+				dma_free_coherent(ipa_ctx->pdev,
+						tbl->curr_mem.size,
+						tbl->curr_mem.base,
+						tbl->curr_mem.phys_base);
+				memset(&tbl->curr_mem, 0,
+						sizeof(tbl->curr_mem));
+			}
+		}
+	}
+}
+
+int __ipa_commit_flt_v1_1(enum ipa_ip_type ip)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer *mem;
+	void *cmd;
+	struct ipa_ip_v4_filter_init *v4;
+	struct ipa_ip_v6_filter_init *v6;
+	u16 avail;
+	u16 size;
+
+	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+	if (!mem) {
+		IPAERR("failed to alloc memory object\n");
+		goto fail_alloc_mem;
+	}
+
+	if (ip == IPA_IP_v4) {
+		avail = ipa_ctx->ip4_flt_tbl_lcl ? IPA_MEM_v1_RAM_V4_FLT_SIZE :
+			IPA_MEM_PART(v4_flt_size_ddr);
+		size = sizeof(struct ipa_ip_v4_filter_init);
+	} else {
+		avail = ipa_ctx->ip6_flt_tbl_lcl ? IPA_MEM_v1_RAM_V6_FLT_SIZE :
+			IPA_MEM_PART(v6_flt_size_ddr);
+		size = sizeof(struct ipa_ip_v6_filter_init);
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to alloc immediate command object\n");
+		goto fail_alloc_cmd;
+	}
+
+	if (ipa_generate_flt_hw_tbl_v1_1(ip, mem)) {
+		IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (mem->size > avail) {
+		IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+		goto fail_send_cmd;
+	}
+
+	if (ip == IPA_IP_v4) {
+		v4 = (struct ipa_ip_v4_filter_init *)cmd;
+		desc.opcode = IPA_IP_V4_FILTER_INIT;
+		v4->ipv4_rules_addr = mem->phys_base;
+		v4->size_ipv4_rules = mem->size;
+		v4->ipv4_addr = IPA_MEM_v1_RAM_V4_FLT_OFST;
+	} else {
+		v6 = (struct ipa_ip_v6_filter_init *)cmd;
+		desc.opcode = IPA_IP_V6_FILTER_INIT;
+		v6->ipv6_rules_addr = mem->phys_base;
+		v6->size_ipv6_rules = mem->size;
+		v6->ipv6_addr = IPA_MEM_v1_RAM_V6_FLT_OFST;
+	}
+
+	desc.pyld = cmd;
+	desc.len = size;
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		goto fail_send_cmd;
+	}
+
+	__ipa_reap_sys_flt_tbls(ip);
+	dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
+	kfree(cmd);
+	kfree(mem);
+
+	return 0;
+
+fail_send_cmd:
+	if (mem->phys_base)
+		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+				mem->phys_base);
+fail_hw_tbl_gen:
+	kfree(cmd);
+fail_alloc_cmd:
+	kfree(mem);
+fail_alloc_mem:
+
+	return -EPERM;
+}
+
+static int ipa_generate_flt_hw_tbl_v2(enum ipa_ip_type ip,
+		struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head1,
+		struct ipa_mem_buffer *head2)
+{
+	int i;
+	u32 hdr_sz;
+	int num_words;
+	u32 *entr;
+	u32 body_start_offset;
+	u32 hdr_top;
+
+	if (ip == IPA_IP_v4)
+		body_start_offset = IPA_MEM_PART(apps_v4_flt_ofst) -
+			IPA_MEM_PART(v4_flt_ofst);
+	else
+		body_start_offset = IPA_MEM_PART(apps_v6_flt_ofst) -
+			IPA_MEM_PART(v6_flt_ofst);
+
+	num_words = 7;
+	head1->size = num_words * 4;
+	head1->base = dma_alloc_coherent(ipa_ctx->pdev, head1->size,
+			&head1->phys_base, GFP_KERNEL);
+	if (!head1->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", head1->size);
+		goto err;
+	}
+	entr = (u32 *)head1->base;
+	for (i = 0; i < num_words; i++) {
+		*entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
+		entr++;
+	}
+
+	num_words = 9;
+	head2->size = num_words * 4;
+	head2->base = dma_alloc_coherent(ipa_ctx->pdev, head2->size,
+			&head2->phys_base, GFP_KERNEL);
+	if (!head2->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", head2->size);
+		goto head_err;
+	}
+	entr = (u32 *)head2->base;
+	for (i = 0; i < num_words; i++) {
+		*entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
+		entr++;
+	}
+
+	mem->size = ipa_get_flt_hw_tbl_size(ip, &hdr_sz);
+	mem->size -= hdr_sz;
+	mem->size = IPA_HW_TABLE_ALIGNMENT(mem->size);
+
+	if (mem->size) {
+		mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+				&mem->phys_base, GFP_KERNEL);
+		if (!mem->base) {
+			IPAERR("fail to alloc DMA buff of size %d\n",
+					mem->size);
+			goto body_err;
+		}
+		memset(mem->base, 0, mem->size);
+	}
+
+	if (ipa_generate_flt_hw_tbl_common(ip, mem->base, head1->base,
+				body_start_offset, head2->base, &hdr_top)) {
+		IPAERR("fail to generate FLT HW table\n");
+		goto proc_err;
+	}
+
+	IPADBG("HEAD1\n");
+	IPA_DUMP_BUFF(head1->base, head1->phys_base, head1->size);
+	IPADBG("HEAD2\n");
+	IPA_DUMP_BUFF(head2->base, head2->phys_base, head2->size);
+	if (mem->size) {
+		IPADBG("BODY\n");
+		IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+	}
+
+	return 0;
+
+proc_err:
+	if (mem->size)
+		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+				mem->phys_base);
+body_err:
+	dma_free_coherent(ipa_ctx->pdev, head2->size, head2->base,
+			head2->phys_base);
+head_err:
+	dma_free_coherent(ipa_ctx->pdev, head1->size, head1->base,
+			head1->phys_base);
+err:
+	return -EPERM;
+}
+
+int __ipa_commit_flt_v2(enum ipa_ip_type ip)
+{
+	struct ipa_desc *desc;
+	struct ipa_hw_imm_cmd_dma_shared_mem *cmd;
+	struct ipa_mem_buffer body;
+	struct ipa_mem_buffer head1;
+	struct ipa_mem_buffer head2;
+	int rc = 0;
+	u32 local_addrb;
+	u32 local_addrh;
+	bool lcl;
+	int num_desc = 0;
+	int i;
+	u16 avail;
+
+	desc = kzalloc(16 * sizeof(*desc), GFP_ATOMIC);
+	if (desc == NULL) {
+		IPAERR("fail to alloc desc blob ip %d\n", ip);
+		rc = -ENOMEM;
+		goto fail_desc;
+	}
+
+	cmd = kzalloc(16 * sizeof(*cmd), GFP_ATOMIC);
+	if (cmd == NULL) {
+		IPAERR("fail to alloc cmd blob ip %d\n", ip);
+		rc = -ENOMEM;
+		goto fail_imm;
+	}
+
+	if (ip == IPA_IP_v4) {
+		avail = ipa_ctx->ip4_flt_tbl_lcl ?
+			IPA_MEM_PART(apps_v4_flt_size) :
+			IPA_MEM_PART(v4_flt_size_ddr);
+		local_addrh = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_flt_ofst) + 4;
+		local_addrb = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_flt_ofst);
+		lcl = ipa_ctx->ip4_flt_tbl_lcl;
+	} else {
+		avail = ipa_ctx->ip6_flt_tbl_lcl ?
+			IPA_MEM_PART(apps_v6_flt_size) :
+			IPA_MEM_PART(v6_flt_size_ddr);
+		local_addrh = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_flt_ofst) + 4;
+		local_addrb = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_flt_ofst);
+		lcl = ipa_ctx->ip6_flt_tbl_lcl;
+	}
+
+	if (ipa_generate_flt_hw_tbl_v2(ip, &body, &head1, &head2)) {
+		IPAERR("fail to generate FLT HW TBL ip %d\n", ip);
+		rc = -EFAULT;
+		goto fail_gen;
+	}
+
+	if (body.size > avail) {
+		IPAERR("tbl too big, needed %d avail %d\n", body.size, avail);
+		goto fail_send_cmd;
+	}
+
+	cmd[num_desc].size = 4;
+	cmd[num_desc].system_addr = head1.phys_base;
+	cmd[num_desc].local_addr = local_addrh;
+
+	desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
+	desc[num_desc].pyld = &cmd[num_desc];
+	desc[num_desc].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+	desc[num_desc++].type = IPA_IMM_CMD_DESC;
+
+	for (i = 0; i < 6; i++) {
+		if (ipa_ctx->skip_ep_cfg_shadow[i]) {
+			IPADBG("skip %d\n", i);
+			continue;
+		}
+
+		if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS) == i ||
+			ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS) == i ||
+			ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD) == i ||
+			(ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i
+			&& ipa_ctx->modem_cfg_emb_pipe_flt)) {
+			IPADBG("skip %d\n", i);
+			continue;
+		}
+
+		if (ip == IPA_IP_v4) {
+			local_addrh = ipa_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(v4_flt_ofst) +
+				8 + i * 4;
+		} else {
+			local_addrh = ipa_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(v6_flt_ofst) +
+				8 + i * 4;
+		}
+		cmd[num_desc].size = 4;
+		cmd[num_desc].system_addr = head1.phys_base + 4 + i * 4;
+		cmd[num_desc].local_addr = local_addrh;
+
+		desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
+		desc[num_desc].pyld = &cmd[num_desc];
+		desc[num_desc].len =
+			sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+		desc[num_desc++].type = IPA_IMM_CMD_DESC;
+	}
+
+	for (i = 11; i < ipa_ctx->ipa_num_pipes; i++) {
+		if (ipa_ctx->skip_ep_cfg_shadow[i]) {
+			IPADBG("skip %d\n", i);
+			continue;
+		}
+		if (ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == i &&
+			ipa_ctx->modem_cfg_emb_pipe_flt) {
+			IPADBG("skip %d\n", i);
+			continue;
+		}
+		if (ip == IPA_IP_v4) {
+			local_addrh = ipa_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(v4_flt_ofst) +
+				13 * 4 + (i - 11) * 4;
+		} else {
+			local_addrh = ipa_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(v6_flt_ofst) +
+				13 * 4 + (i - 11) * 4;
+		}
+		cmd[num_desc].size = 4;
+		cmd[num_desc].system_addr = head2.phys_base + (i - 11) * 4;
+		cmd[num_desc].local_addr = local_addrh;
+
+		desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
+		desc[num_desc].pyld = &cmd[num_desc];
+		desc[num_desc].len =
+			sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+		desc[num_desc++].type = IPA_IMM_CMD_DESC;
+	}
+
+	if (lcl) {
+		cmd[num_desc].size = body.size;
+		cmd[num_desc].system_addr = body.phys_base;
+		cmd[num_desc].local_addr = local_addrb;
+
+		desc[num_desc].opcode = IPA_DMA_SHARED_MEM;
+		desc[num_desc].pyld = &cmd[num_desc];
+		desc[num_desc].len =
+			sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+		desc[num_desc++].type = IPA_IMM_CMD_DESC;
+
+		if (ipa_send_cmd(num_desc, desc)) {
+			IPAERR("fail to send immediate command\n");
+			rc = -EFAULT;
+			goto fail_send_cmd;
+		}
+	} else {
+		if (ipa_send_cmd(num_desc, desc)) {
+			IPAERR("fail to send immediate command\n");
+			rc = -EFAULT;
+			goto fail_send_cmd;
+		}
+	}
+
+	__ipa_reap_sys_flt_tbls(ip);
+
+fail_send_cmd:
+	if (body.size)
+		dma_free_coherent(ipa_ctx->pdev, body.size, body.base,
+				body.phys_base);
+	dma_free_coherent(ipa_ctx->pdev, head1.size, head1.base,
+			head1.phys_base);
+	dma_free_coherent(ipa_ctx->pdev, head2.size, head2.base,
+			head2.phys_base);
+fail_gen:
+	kfree(cmd);
+fail_imm:
+	kfree(desc);
+fail_desc:
+	return rc;
+}
+
+static int __ipa_add_flt_rule(struct ipa_flt_tbl *tbl, enum ipa_ip_type ip,
+			      const struct ipa_flt_rule *rule, u8 add_rear,
+			      u32 *rule_hdl)
+{
+	struct ipa_flt_entry *entry;
+	struct ipa_rt_tbl *rt_tbl = NULL;
+	int id;
+
+	if (rule->action != IPA_PASS_TO_EXCEPTION) {
+		if (!rule->eq_attrib_type) {
+			if (!rule->rt_tbl_hdl) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+
+			rt_tbl = ipa_id_find(rule->rt_tbl_hdl);
+			if (rt_tbl == NULL) {
+				IPAERR("RT tbl not found\n");
+				goto error;
+			}
+
+			if (rt_tbl->cookie != IPA_COOKIE) {
+				IPAERR("RT table cookie is invalid\n");
+				goto error;
+			}
+		} else {
+			if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
+				IPA_MEM_PART(v4_modem_rt_index_hi) :
+				IPA_MEM_PART(v6_modem_rt_index_hi))) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+		}
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->flt_rule_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc FLT rule object\n");
+		goto error;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->rule = *rule;
+	entry->cookie = IPA_COOKIE;
+	entry->rt_tbl = rt_tbl;
+	entry->tbl = tbl;
+	if (add_rear) {
+		if (tbl->sticky_rear)
+			list_add_tail(&entry->link,
+					tbl->head_flt_rule_list.prev);
+		else
+			list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+	} else {
+		list_add(&entry->link, &tbl->head_flt_rule_list);
+	}
+	tbl->rule_cnt++;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt++;
+	id = ipa_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+	}
+	*rule_hdl = id;
+	entry->id = id;
+	IPADBG("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+	struct ipa_flt_entry *entry;
+	int id;
+
+	entry = ipa_id_find(rule_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+	id = entry->id;
+
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt--;
+	IPADBG("del flt rule rule_cnt=%d\n", entry->tbl->rule_cnt);
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	ipa_id_remove(id);
+
+	return 0;
+}
+
+static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
+		enum ipa_ip_type ip)
+{
+	struct ipa_flt_entry *entry;
+	struct ipa_rt_tbl *rt_tbl = NULL;
+
+	entry = ipa_id_find(frule->rule_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		goto error;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		goto error;
+	}
+
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt--;
+
+	if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
+		if (!frule->rule.eq_attrib_type) {
+			if (!frule->rule.rt_tbl_hdl) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+
+			rt_tbl = ipa_id_find(frule->rule.rt_tbl_hdl);
+			if (rt_tbl == NULL) {
+				IPAERR("RT tbl not found\n");
+				goto error;
+			}
+
+			if (rt_tbl->cookie != IPA_COOKIE) {
+				IPAERR("RT table cookie is invalid\n");
+				goto error;
+			}
+		} else {
+			if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
+				IPA_MEM_PART(v4_modem_rt_index_hi) :
+				IPA_MEM_PART(v6_modem_rt_index_hi))) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+		}
+	}
+
+	entry->rule = frule->rule;
+	entry->rt_tbl = rt_tbl;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt++;
+	entry->hw_len = 0;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_global_flt_rule(enum ipa_ip_type ip,
+		const struct ipa_flt_rule *rule, u8 add_rear, u32 *rule_hdl)
+{
+	struct ipa_flt_tbl *tbl;
+
+	if (rule == NULL || rule_hdl == NULL) {
+		IPAERR("bad parms rule=%p rule_hdl=%p\n", rule, rule_hdl);
+
+		return -EINVAL;
+	}
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	IPADBG("add global flt rule ip=%d\n", ip);
+
+	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+				 const struct ipa_flt_rule *rule, u8 add_rear,
+				 u32 *rule_hdl)
+{
+	struct ipa_flt_tbl *tbl;
+	int ipa_ep_idx;
+
+	if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
+		IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+				rule_hdl, ep);
+
+		return -EINVAL;
+	}
+	ipa_ep_idx = ipa2_get_ep_mapping(ep);
+	if (ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
+		IPAERR("ep not valid ep=%d\n", ep);
+		return -EINVAL;
+	}
+	if (ipa_ctx->ep[ipa_ep_idx].valid == 0)
+		IPADBG("ep not connected ep_idx=%d\n", ipa_ep_idx);
+
+	tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][ip];
+	IPADBG("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa2_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	int i;
+	int result;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (rules->global)
+			result = __ipa_add_global_flt_rule(rules->ip,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl);
+		else
+			result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl);
+		if (result) {
+			IPAERR("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa_ctx->ctrl->ipa_commit_flt(rules->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa2_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del rt rule %i\n", i);
+			hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa2_mdfy_flt_rule() - Modify the specified filtering rules in SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
+			IPAERR("failed to mdfy rt rule %i\n", i);
+			hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
+		} else {
+			hdls->rules[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa_ctx->ctrl->ipa_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+
+
+/**
+ * ipa2_commit_flt() - Commit the current SW filtering table of specified type
+ * to IPA HW
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_commit_flt(enum ipa_ip_type ip)
+{
+	int result;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+
+	if (ipa_ctx->ctrl->ipa_commit_flt(ip)) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa2_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_reset_flt(enum ipa_ip_type ip)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_flt_entry *entry;
+	struct ipa_flt_entry *next;
+	int i;
+	int id;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	tbl = &ipa_ctx->glob_flt_tbl[ip];
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("reset flt ip=%d\n", ip);
+	list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list, link) {
+		if (ipa_id_find(entry->id) == NULL) {
+			WARN_ON(1);
+			mutex_unlock(&ipa_ctx->lock);
+			return -EFAULT;
+		}
+
+		if ((ip == IPA_IP_v4 &&
+		     entry->rule.attrib.attrib_mask == IPA_FLT_PROTOCOL &&
+		     entry->rule.attrib.u.v4.protocol ==
+		      IPA_INVALID_L4_PROTOCOL) ||
+		    (ip == IPA_IP_v6 &&
+		     entry->rule.attrib.attrib_mask == IPA_FLT_NEXT_HDR &&
+		     entry->rule.attrib.u.v6.next_hdr ==
+		      IPA_INVALID_L4_PROTOCOL))
+			continue;
+
+		list_del(&entry->link);
+		entry->tbl->rule_cnt--;
+		if (entry->rt_tbl)
+			entry->rt_tbl->ref_cnt--;
+		entry->cookie = 0;
+		id = entry->id;
+		kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+		/* remove the handle from the database */
+		ipa_id_remove(id);
+	}
+
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+		tbl = &ipa_ctx->flt_tbl[i][ip];
+		list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+				link) {
+			if (ipa_id_find(entry->id) == NULL) {
+				WARN_ON(1);
+				mutex_unlock(&ipa_ctx->lock);
+				return -EFAULT;
+			}
+			list_del(&entry->link);
+			entry->tbl->rule_cnt--;
+			if (entry->rt_tbl)
+				entry->rt_tbl->ref_cnt--;
+			entry->cookie = 0;
+			id = entry->id;
+			kmem_cache_free(ipa_ctx->flt_rule_cache, entry);
+
+			/* remove the handle from the database */
+			ipa_id_remove(id);
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+
+void ipa_install_dflt_flt_rules(u32 ipa_ep_idx)
+{
+	struct ipa_flt_tbl *tbl;
+	struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
+	struct ipa_flt_rule rule;
+
+	memset(&rule, 0, sizeof(rule));
+
+	mutex_lock(&ipa_ctx->lock);
+	tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
+	tbl->sticky_rear = true;
+	rule.action = IPA_PASS_TO_EXCEPTION;
+	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, false,
+			&ep->dflt_flt4_rule_hdl);
+	ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+
+	tbl = &ipa_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
+	tbl->sticky_rear = true;
+	rule.action = IPA_PASS_TO_EXCEPTION;
+	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, false,
+			&ep->dflt_flt6_rule_hdl);
+	ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+	mutex_unlock(&ipa_ctx->lock);
+}
+
+void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx)
+{
+	struct ipa_ep_context *ep = &ipa_ctx->ep[ipa_ep_idx];
+
+	mutex_lock(&ipa_ctx->lock);
+	if (ep->dflt_flt4_rule_hdl) {
+		__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
+		ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v4);
+		ep->dflt_flt4_rule_hdl = 0;
+	}
+	if (ep->dflt_flt6_rule_hdl) {
+		__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
+		ipa_ctx->ctrl->ipa_commit_flt(IPA_IP_v6);
+		ep->dflt_flt6_rule_hdl = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
new file mode 100644
index 0000000..ab14cb7
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hdr.c
@@ -0,0 +1,1369 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
+static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
+
+#define HDR_TYPE_IS_VALID(type) \
+	((type) >= 0 && (type) < IPA_HDR_L2_MAX)
+
+#define HDR_PROC_TYPE_IS_VALID(type) \
+	((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
+
+/* uCP command numbers */
+#define IPA_HDR_UCP_802_3_TO_802_3 6
+#define IPA_HDR_UCP_802_3_TO_ETHII 7
+#define IPA_HDR_UCP_ETHII_TO_802_3 8
+#define IPA_HDR_UCP_ETHII_TO_ETHII 9
+
+/**
+ * ipa_generate_hdr_hw_tbl() - generates the headers table
+ * @mem:	[out] buffer to put the header table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+	struct ipa_hdr_entry *entry;
+
+	mem->size = ipa_ctx->hdr_tbl.end;
+
+	if (mem->size == 0) {
+		IPAERR("hdr tbl empty\n");
+		return -EPERM;
+	}
+	IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_tbl.end);
+
+	mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+			&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	memset(mem->base, 0, mem->size);
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (entry->is_hdr_proc_ctx)
+			continue;
+		IPADBG("hdr of len %d ofst=%d\n", entry->hdr_len,
+				entry->offset_entry->offset);
+		memcpy(mem->base + entry->offset_entry->offset, entry->hdr,
+				entry->hdr_len);
+	}
+
+	return 0;
+}
+
+static void ipa_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
+	u32 hdr_base_addr)
+{
+	struct ipa_hdr_proc_ctx_entry *entry;
+
+	list_for_each_entry(entry,
+			&ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+			link) {
+		IPADBG("processing type %d ofst=%d\n",
+			entry->type, entry->offset_entry->offset);
+		if (entry->type == IPA_HDR_PROC_NONE) {
+			struct ipa_hdr_proc_ctx_add_hdr_seq *ctx;
+
+			ctx = (struct ipa_hdr_proc_ctx_add_hdr_seq *)
+				(mem->base + entry->offset_entry->offset);
+			ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+			ctx->hdr_add.tlv.length = 1;
+			ctx->hdr_add.tlv.value = entry->hdr->hdr_len;
+			ctx->hdr_add.hdr_addr = (entry->hdr->is_hdr_proc_ctx) ?
+				entry->hdr->phys_base :
+				hdr_base_addr +
+				entry->hdr->offset_entry->offset;
+			IPADBG("header address 0x%x\n",
+				ctx->hdr_add.hdr_addr);
+			ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+			ctx->end.length = 0;
+			ctx->end.value = 0;
+		} else {
+			struct ipa_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
+
+			ctx = (struct ipa_hdr_proc_ctx_add_hdr_cmd_seq *)
+				(mem->base + entry->offset_entry->offset);
+			ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+			ctx->hdr_add.tlv.length = 1;
+			ctx->hdr_add.tlv.value = entry->hdr->hdr_len;
+			ctx->hdr_add.hdr_addr = (entry->hdr->is_hdr_proc_ctx) ?
+				entry->hdr->phys_base :
+				hdr_base_addr +
+				entry->hdr->offset_entry->offset;
+			IPADBG("header address 0x%x\n",
+				ctx->hdr_add.hdr_addr);
+			ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+			ctx->cmd.length = 0;
+			if (entry->type == IPA_HDR_PROC_ETHII_TO_ETHII)
+				ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII;
+			else if (entry->type == IPA_HDR_PROC_ETHII_TO_802_3)
+				ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3;
+			else if (entry->type == IPA_HDR_PROC_802_3_TO_ETHII)
+				ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
+			else if (entry->type == IPA_HDR_PROC_802_3_TO_802_3)
+				ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
+			IPADBG("command id %d\n", ctx->cmd.value);
+			ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+			ctx->end.length = 0;
+			ctx->end.value = 0;
+		}
+	}
+}
+
+/**
+ * ipa_generate_hdr_proc_ctx_hw_tbl() -
+ * generates the headers processing context table.
+ * @mem:		[out] buffer to put the processing context table
+ * @aligned_mem:	[out] actual processing context table (with alignment).
+ *			Processing context table needs to be 8 Bytes aligned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
+	struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
+{
+	u32 hdr_base_addr;
+
+	mem->size = (ipa_ctx->hdr_proc_ctx_tbl.end) ? : 4;
+
+	/* make sure table is aligned */
+	mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+
+	IPADBG("tbl_sz=%d\n", ipa_ctx->hdr_proc_ctx_tbl.end);
+
+	mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+			&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	aligned_mem->phys_base =
+		IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
+	aligned_mem->base = mem->base +
+		(aligned_mem->phys_base - mem->phys_base);
+	aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+	memset(aligned_mem->base, 0, aligned_mem->size);
+	hdr_base_addr = (ipa_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
+		hdr_sys_addr;
+	ipa_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
+
+	return 0;
+}
+
+/*
+ * __ipa_commit_hdr() commits hdr to hardware
+ * This function needs to be called with a locked mutex.
+ */
+int __ipa_commit_hdr_v1_1(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer *mem;
+	struct ipa_hdr_init_local *cmd;
+	u16 len;
+
+	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+	if (!mem) {
+		IPAERR("failed to alloc memory object\n");
+		goto fail_alloc_mem;
+	}
+
+	/* the immediate command param size is same for both local and system */
+	len = sizeof(struct ipa_hdr_init_local);
+
+	/*
+	 * we can use init_local ptr for init_system due to layout of the
+	 * struct
+	 */
+	cmd = kmalloc(len, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to alloc immediate command object\n");
+		goto fail_alloc_cmd;
+	}
+
+	if (ipa_generate_hdr_hw_tbl(mem)) {
+		IPAERR("fail to generate HDR HW TBL\n");
+		goto fail_hw_tbl_gen;
+	}
+
+	if (ipa_ctx->hdr_tbl_lcl) {
+		if (mem->size > IPA_MEM_v1_RAM_HDR_SIZE) {
+			IPAERR("tbl too big, needed %d avail %d\n", mem->size,
+				IPA_MEM_v1_RAM_HDR_SIZE);
+			goto fail_send_cmd;
+		}
+	} else {
+		if (mem->size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+			IPAERR("tbl too big, needed %d avail %d\n", mem->size,
+				IPA_MEM_PART(apps_hdr_size_ddr));
+			goto fail_send_cmd;
+		}
+	}
+
+	cmd->hdr_table_src_addr = mem->phys_base;
+	if (ipa_ctx->hdr_tbl_lcl) {
+		cmd->size_hdr_table = mem->size;
+		cmd->hdr_table_dst_addr = IPA_MEM_v1_RAM_HDR_OFST;
+		desc.opcode = IPA_HDR_INIT_LOCAL;
+	} else {
+		desc.opcode = IPA_HDR_INIT_SYSTEM;
+	}
+	desc.pyld = cmd;
+	desc.len = sizeof(struct ipa_hdr_init_local);
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		goto fail_send_cmd;
+	}
+
+	if (ipa_ctx->hdr_tbl_lcl) {
+		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+				mem->phys_base);
+	} else {
+		if (ipa_ctx->hdr_mem.phys_base) {
+			dma_free_coherent(ipa_ctx->pdev, ipa_ctx->hdr_mem.size,
+					  ipa_ctx->hdr_mem.base,
+					  ipa_ctx->hdr_mem.phys_base);
+		}
+		ipa_ctx->hdr_mem = *mem;
+	}
+	kfree(cmd);
+	kfree(mem);
+
+	return 0;
+
+fail_send_cmd:
+	if (mem->base)
+		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+				mem->phys_base);
+fail_hw_tbl_gen:
+	kfree(cmd);
+fail_alloc_cmd:
+	kfree(mem);
+fail_alloc_mem:
+
+	return -EPERM;
+}
+
+int __ipa_commit_hdr_v2(void)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipa_hdr_init_system cmd;
+	struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd;
+	int rc = -EFAULT;
+
+	if (ipa_generate_hdr_hw_tbl(&mem)) {
+		IPAERR("fail to generate HDR HW TBL\n");
+		goto end;
+	}
+
+	if (ipa_ctx->hdr_tbl_lcl) {
+		if (mem.size > IPA_MEM_PART(apps_hdr_size)) {
+			IPAERR("tbl too big, needed %d avail %d\n", mem.size,
+				IPA_MEM_PART(apps_hdr_size));
+			goto end;
+		} else {
+			dma_cmd.system_addr = mem.phys_base;
+			dma_cmd.size = mem.size;
+			dma_cmd.local_addr = ipa_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(apps_hdr_ofst);
+			desc.opcode = IPA_DMA_SHARED_MEM;
+			desc.pyld = &dma_cmd;
+			desc.len =
+				sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+		}
+	} else {
+		if (mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+			IPAERR("tbl too big, needed %d avail %d\n", mem.size,
+				IPA_MEM_PART(apps_hdr_size_ddr));
+			goto end;
+		} else {
+			cmd.hdr_table_addr = mem.phys_base;
+			desc.opcode = IPA_HDR_INIT_SYSTEM;
+			desc.pyld = &cmd;
+			desc.len = sizeof(struct ipa_hdr_init_system);
+		}
+	}
+
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa_send_cmd(1, &desc))
+		IPAERR("fail to send immediate command\n");
+	else
+		rc = 0;
+
+	if (ipa_ctx->hdr_tbl_lcl) {
+		dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
+				mem.phys_base);
+	} else {
+		if (!rc) {
+			if (ipa_ctx->hdr_mem.phys_base)
+				dma_free_coherent(ipa_ctx->pdev,
+						ipa_ctx->hdr_mem.size,
+						ipa_ctx->hdr_mem.base,
+						ipa_ctx->hdr_mem.phys_base);
+			ipa_ctx->hdr_mem = mem;
+		}
+	}
+
+end:
+	return rc;
+}
+
+int __ipa_commit_hdr_v2_5(void)
+{
+	struct ipa_desc desc[2];
+	struct ipa_mem_buffer hdr_mem;
+	struct ipa_mem_buffer ctx_mem;
+	struct ipa_mem_buffer aligned_ctx_mem;
+	struct ipa_hdr_init_system hdr_init_cmd = {0};
+	struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
+	struct ipa_hw_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
+	struct ipa_register_write reg_write_cmd = {0};
+	int rc = -EFAULT;
+	u32 proc_ctx_size;
+	u32 proc_ctx_ofst;
+	u32 proc_ctx_size_ddr;
+
+	memset(desc, 0, 2 * sizeof(struct ipa_desc));
+
+	if (ipa_generate_hdr_hw_tbl(&hdr_mem)) {
+		IPAERR("fail to generate HDR HW TBL\n");
+		goto end;
+	}
+
+	if (ipa_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
+	    &aligned_ctx_mem)) {
+		IPAERR("fail to generate HDR PROC CTX HW TBL\n");
+		goto end;
+	}
+
+	if (ipa_ctx->hdr_tbl_lcl) {
+		if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
+			IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+				IPA_MEM_PART(apps_hdr_size));
+			goto end;
+		} else {
+			dma_cmd_hdr.system_addr = hdr_mem.phys_base;
+			dma_cmd_hdr.size = hdr_mem.size;
+			dma_cmd_hdr.local_addr =
+				ipa_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(apps_hdr_ofst);
+			desc[0].opcode = IPA_DMA_SHARED_MEM;
+			desc[0].pyld = &dma_cmd_hdr;
+			desc[0].len =
+				sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+		}
+	} else {
+		if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+			IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+				IPA_MEM_PART(apps_hdr_size_ddr));
+			goto end;
+		} else {
+			hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
+			desc[0].opcode = IPA_HDR_INIT_SYSTEM;
+			desc[0].pyld = &hdr_init_cmd;
+			desc[0].len = sizeof(struct ipa_hdr_init_system);
+		}
+	}
+	desc[0].type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+
+	proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
+	proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
+	if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
+		if (aligned_ctx_mem.size > proc_ctx_size) {
+			IPAERR("tbl too big needed %d avail %d\n",
+				aligned_ctx_mem.size,
+				proc_ctx_size);
+			goto end;
+		} else {
+			dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
+			dma_cmd_ctx.size = aligned_ctx_mem.size;
+			dma_cmd_ctx.local_addr =
+				ipa_ctx->smem_restricted_bytes +
+				proc_ctx_ofst;
+			desc[1].opcode = IPA_DMA_SHARED_MEM;
+			desc[1].pyld = &dma_cmd_ctx;
+			desc[1].len =
+				sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+		}
+	} else {
+		proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+		if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
+			IPAERR("tbl too big, needed %d avail %d\n",
+				aligned_ctx_mem.size,
+				proc_ctx_size_ddr);
+			goto end;
+		} else {
+			reg_write_cmd.offset = IPA_SYS_PKT_PROC_CNTXT_BASE_OFST;
+			reg_write_cmd.value = aligned_ctx_mem.phys_base;
+			reg_write_cmd.value_mask =
+				~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
+			desc[1].pyld = &reg_write_cmd;
+			desc[1].opcode = IPA_REGISTER_WRITE;
+			desc[1].len = sizeof(reg_write_cmd);
+		}
+	}
+	desc[1].type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
+
+	if (ipa_send_cmd(2, desc))
+		IPAERR("fail to send immediate command\n");
+	else
+		rc = 0;
+
+	if (ipa_ctx->hdr_tbl_lcl) {
+		dma_free_coherent(ipa_ctx->pdev, hdr_mem.size, hdr_mem.base,
+			hdr_mem.phys_base);
+	} else {
+		if (!rc) {
+			if (ipa_ctx->hdr_mem.phys_base)
+				dma_free_coherent(ipa_ctx->pdev,
+				ipa_ctx->hdr_mem.size,
+				ipa_ctx->hdr_mem.base,
+				ipa_ctx->hdr_mem.phys_base);
+			ipa_ctx->hdr_mem = hdr_mem;
+		}
+	}
+
+	if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
+		dma_free_coherent(ipa_ctx->pdev, ctx_mem.size, ctx_mem.base,
+			ctx_mem.phys_base);
+	} else {
+		if (!rc) {
+			if (ipa_ctx->hdr_proc_ctx_mem.phys_base)
+				dma_free_coherent(ipa_ctx->pdev,
+					ipa_ctx->hdr_proc_ctx_mem.size,
+					ipa_ctx->hdr_proc_ctx_mem.base,
+					ipa_ctx->hdr_proc_ctx_mem.phys_base);
+			ipa_ctx->hdr_proc_ctx_mem = ctx_mem;
+		}
+	}
+
+end:
+	return rc;
+}
+
+/**
+ * __ipa_commit_hdr_v2_6L() - Commits a header to the IPA HW.
+ *
+ * This function needs to be called with a locked mutex.
+ */
+int __ipa_commit_hdr_v2_6L(void)
+{
+	/* Same implementation as IPAv2 */
+	return __ipa_commit_hdr_v2();
+}
+
+static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
+	bool add_ref_hdr)
+{
+	struct ipa_hdr_entry *hdr_entry;
+	struct ipa_hdr_proc_ctx_entry *entry;
+	struct ipa_hdr_proc_ctx_offset_entry *offset;
+	u32 bin;
+	struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
+	int id;
+	int needed_len;
+	int mem_size;
+
+	IPADBG("processing type %d hdr_hdl %d\n",
+		proc_ctx->type, proc_ctx->hdr_hdl);
+
+	if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
+		IPAERR("invalid processing type %d\n", proc_ctx->type);
+		return -EINVAL;
+	}
+
+	hdr_entry = ipa_id_find(proc_ctx->hdr_hdl);
+	if (!hdr_entry || (hdr_entry->cookie != IPA_COOKIE)) {
+		IPAERR("hdr_hdl is invalid\n");
+		return -EINVAL;
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc proc_ctx object\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&entry->link);
+
+	entry->type = proc_ctx->type;
+	entry->hdr = hdr_entry;
+	if (add_ref_hdr)
+		hdr_entry->ref_cnt++;
+	entry->cookie = IPA_COOKIE;
+
+	needed_len = (proc_ctx->type == IPA_HDR_PROC_NONE) ?
+			sizeof(struct ipa_hdr_proc_ctx_add_hdr_seq) :
+			sizeof(struct ipa_hdr_proc_ctx_add_hdr_cmd_seq);
+
+	if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
+		bin = IPA_HDR_PROC_CTX_BIN0;
+	} else if (needed_len <=
+			ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
+		bin = IPA_HDR_PROC_CTX_BIN1;
+	} else {
+		IPAERR("unexpected needed len %d\n", needed_len);
+		WARN_ON(1);
+		goto bad_len;
+	}
+
+	mem_size = (ipa_ctx->hdr_proc_ctx_tbl_lcl) ?
+		IPA_MEM_PART(apps_hdr_proc_ctx_size) :
+		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+	if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
+		IPAERR("hdr proc ctx table overflow\n");
+		goto bad_len;
+	}
+
+	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		offset = kmem_cache_zalloc(ipa_ctx->hdr_proc_ctx_offset_cache,
+					   GFP_KERNEL);
+		if (!offset) {
+			IPAERR("failed to alloc offset object\n");
+			goto bad_len;
+		}
+		INIT_LIST_HEAD(&offset->link);
+		/*
+		 * for a first item grow, set the bin and offset which are set
+		 * in stone
+		 */
+		offset->offset = htbl->end;
+		offset->bin = bin;
+		htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
+		list_add(&offset->link,
+				&htbl->head_offset_list[bin]);
+	} else {
+		/* get the first free slot */
+		offset =
+		    list_first_entry(&htbl->head_free_offset_list[bin],
+				    struct ipa_hdr_proc_ctx_offset_entry, link);
+		list_move(&offset->link, &htbl->head_offset_list[bin]);
+	}
+
+	entry->offset_entry = offset;
+	list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
+	htbl->proc_ctx_cnt++;
+	IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
+			htbl->proc_ctx_cnt, offset->offset);
+
+	id = ipa_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to alloc id\n");
+		WARN_ON(1);
+	}
+	entry->id = id;
+	proc_ctx->proc_ctx_hdl = id;
+	entry->ref_cnt++;
+
+	return 0;
+
+bad_len:
+	if (add_ref_hdr)
+		hdr_entry->ref_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry);
+	return -EPERM;
+}
+
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+	struct ipa_hdr_entry *entry;
+	struct ipa_hdr_offset_entry *offset;
+	u32 bin;
+	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+	int id;
+	int mem_size;
+
+	if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
+		IPAERR("bad parm\n");
+		goto error;
+	}
+
+	if (!HDR_TYPE_IS_VALID(hdr->type)) {
+		IPAERR("invalid hdr type %d\n", hdr->type);
+		goto error;
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->hdr_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc hdr object\n");
+		goto error;
+	}
+
+	INIT_LIST_HEAD(&entry->link);
+
+	memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+	entry->hdr_len = hdr->hdr_len;
+	strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+	entry->is_partial = hdr->is_partial;
+	entry->type = hdr->type;
+	entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
+	entry->eth2_ofst = hdr->eth2_ofst;
+	entry->cookie = IPA_COOKIE;
+
+	if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+		bin = IPA_HDR_BIN0;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+		bin = IPA_HDR_BIN1;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+		bin = IPA_HDR_BIN2;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+		bin = IPA_HDR_BIN3;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
+		bin = IPA_HDR_BIN4;
+	else {
+		IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+		goto bad_hdr_len;
+	}
+
+	mem_size = (ipa_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
+		IPA_MEM_PART(apps_hdr_size_ddr);
+
+	/*
+	 * if header does not fit to table, place it in DDR
+	 * This is valid for IPA 2.5 and on,
+	 * with the exception of IPA2.6L.
+	 */
+	if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
+		if (ipa_ctx->ipa_hw_type != IPA_HW_v2_5) {
+			IPAERR("not enough room for header\n");
+			goto bad_hdr_len;
+		} else {
+			entry->is_hdr_proc_ctx = true;
+			entry->phys_base = dma_map_single(ipa_ctx->pdev,
+				entry->hdr,
+				entry->hdr_len,
+				DMA_TO_DEVICE);
+		}
+	} else {
+		entry->is_hdr_proc_ctx = false;
+		if (list_empty(&htbl->head_free_offset_list[bin])) {
+			offset = kmem_cache_zalloc(ipa_ctx->hdr_offset_cache,
+						   GFP_KERNEL);
+			if (!offset) {
+				IPAERR("failed to alloc hdr offset object\n");
+				goto bad_hdr_len;
+			}
+			INIT_LIST_HEAD(&offset->link);
+			/*
+			 * for a first item grow, set the bin and offset which
+			 * are set in stone
+			 */
+			offset->offset = htbl->end;
+			offset->bin = bin;
+			htbl->end += ipa_hdr_bin_sz[bin];
+			list_add(&offset->link,
+					&htbl->head_offset_list[bin]);
+		} else {
+			/* get the first free slot */
+			offset =
+			list_first_entry(&htbl->head_free_offset_list[bin],
+					struct ipa_hdr_offset_entry, link);
+			list_move(&offset->link, &htbl->head_offset_list[bin]);
+		}
+
+		entry->offset_entry = offset;
+	}
+
+	list_add(&entry->link, &htbl->head_hdr_entry_list);
+	htbl->hdr_cnt++;
+	if (entry->is_hdr_proc_ctx)
+		IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+			hdr->hdr_len,
+			htbl->hdr_cnt,
+			&entry->phys_base);
+	else
+		IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
+			hdr->hdr_len,
+			htbl->hdr_cnt,
+			entry->offset_entry->offset);
+
+	id = ipa_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to alloc id\n");
+		WARN_ON(1);
+	}
+	entry->id = id;
+	hdr->hdr_hdl = id;
+	entry->ref_cnt++;
+
+	if (entry->is_hdr_proc_ctx) {
+		struct ipa_hdr_proc_ctx_add proc_ctx;
+
+		IPADBG("adding processing context for header %s\n", hdr->name);
+		proc_ctx.type = IPA_HDR_PROC_NONE;
+		proc_ctx.hdr_hdl = id;
+		if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
+			IPAERR("failed to add hdr proc ctx\n");
+			goto fail_add_proc_ctx;
+		}
+		entry->proc_ctx = ipa_id_find(proc_ctx.proc_ctx_hdl);
+	}
+
+	return 0;
+
+fail_add_proc_ctx:
+	entry->ref_cnt--;
+	hdr->hdr_hdl = 0;
+	ipa_id_remove(id);
+	htbl->hdr_cnt--;
+	list_del(&entry->link);
+	dma_unmap_single(ipa_ctx->pdev, entry->phys_base,
+			entry->hdr_len, DMA_TO_DEVICE);
+bad_hdr_len:
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->hdr_cache, entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
+{
+	struct ipa_hdr_proc_ctx_entry *entry;
+	struct ipa_hdr_proc_ctx_tbl *htbl = &ipa_ctx->hdr_proc_ctx_tbl;
+
+	entry = ipa_id_find(proc_ctx_hdl);
+	if (!entry || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	IPADBG("del ctx proc cnt=%d ofst=%d\n",
+		htbl->proc_ctx_cnt, entry->offset_entry->offset);
+
+	if (--entry->ref_cnt) {
+		IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
+			proc_ctx_hdl, entry->ref_cnt);
+		return 0;
+	}
+
+	if (release_hdr)
+		__ipa_del_hdr(entry->hdr->id);
+
+	/* move the offset entry to appropriate free list */
+	list_move(&entry->offset_entry->link,
+		&htbl->head_free_offset_list[entry->offset_entry->bin]);
+	list_del(&entry->link);
+	htbl->proc_ctx_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, entry);
+
+	/* remove the handle from the database */
+	ipa_id_remove(proc_ctx_hdl);
+
+	return 0;
+}
+
+
+int __ipa_del_hdr(u32 hdr_hdl)
+{
+	struct ipa_hdr_entry *entry;
+	struct ipa_hdr_tbl *htbl = &ipa_ctx->hdr_tbl;
+
+	entry = ipa_id_find(hdr_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (!entry || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (entry->is_hdr_proc_ctx)
+		IPADBG("del hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+			entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
+	else
+		IPADBG("del hdr of sz=%d hdr_cnt=%d ofst=%d\n", entry->hdr_len,
+			htbl->hdr_cnt, entry->offset_entry->offset);
+
+	if (--entry->ref_cnt) {
+		IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
+		return 0;
+	}
+
+	if (entry->is_hdr_proc_ctx) {
+		dma_unmap_single(ipa_ctx->pdev,
+			entry->phys_base,
+			entry->hdr_len,
+			DMA_TO_DEVICE);
+		__ipa_del_hdr_proc_ctx(entry->proc_ctx->id, false);
+	} else {
+		/* move the offset entry to appropriate free list */
+		list_move(&entry->offset_entry->link,
+			&htbl->head_free_offset_list[entry->offset_entry->bin]);
+	}
+	list_del(&entry->link);
+	htbl->hdr_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+	/* remove the handle from the database */
+	ipa_id_remove(hdr_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa2_add_hdr() - add the specified headers to SW and optionally commit them
+ * to IPA HW
+ * @hdrs:	[inout] set of headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (hdrs == NULL || hdrs->num_hdrs == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("adding %d headers to IPA driver internal data struct\n",
+			hdrs->num_hdrs);
+	for (i = 0; i < hdrs->num_hdrs; i++) {
+		if (__ipa_add_hdr(&hdrs->hdr[i])) {
+			IPAERR("failed to add hdr %d\n", i);
+			hdrs->hdr[i].status = -1;
+		} else {
+			hdrs->hdr[i].status = 0;
+		}
+	}
+
+	if (hdrs->commit) {
+		IPADBG("committing all headers to IPA core");
+		if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa2_del_hdr() - Remove the specified headers from SW and optionally commit
+ * them to IPA HW
+ * @hdls:	[inout] set of headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_hdr(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa2_add_hdr_proc_ctx() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @proc_ctxs:	[inout] set of processing context headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 ||
+	    ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
+		IPAERR("Processing context not supported on IPA HW %d\n",
+			ipa_ctx->ipa_hw_type);
+		return -EFAULT;
+	}
+
+	if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("adding %d header processing contextes to IPA driver\n",
+			proc_ctxs->num_proc_ctxs);
+	for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
+		if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
+			IPAERR("failed to add hdr pric ctx %d\n", i);
+			proc_ctxs->proc_ctx[i].status = -1;
+		} else {
+			proc_ctxs->proc_ctx[i].status = 0;
+		}
+	}
+
+	if (proc_ctxs->commit) {
+		IPADBG("committing all headers to IPA core");
+		if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa2_del_hdr_proc_ctx() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls:	[inout] set of processing context headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+	int i;
+	int result;
+
+	if (ipa_ctx->ipa_hw_type <= IPA_HW_v2_0 ||
+	    ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
+		IPAERR("Processing context not supported on IPA HW %d\n",
+			ipa_ctx->ipa_hw_type);
+		return -EFAULT;
+	}
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_hdr_proc_ctx(hdls->hdl[i].hdl, true)) {
+			IPAERR("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa2_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_commit_hdr(void)
+{
+	int result = -EFAULT;
+
+	/*
+	 * issue a commit on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa2_commit_rt(IPA_IP_v4))
+		return -EPERM;
+	if (ipa2_commit_rt(IPA_IP_v6))
+		return -EPERM;
+
+	mutex_lock(&ipa_ctx->lock);
+	if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa2_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_reset_hdr(void)
+{
+	struct ipa_hdr_entry *entry;
+	struct ipa_hdr_entry *next;
+	struct ipa_hdr_proc_ctx_entry *ctx_entry;
+	struct ipa_hdr_proc_ctx_entry *ctx_next;
+	struct ipa_hdr_offset_entry *off_entry;
+	struct ipa_hdr_offset_entry *off_next;
+	struct ipa_hdr_proc_ctx_offset_entry *ctx_off_entry;
+	struct ipa_hdr_proc_ctx_offset_entry *ctx_off_next;
+	int i;
+
+	/*
+	 * issue a reset on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa2_reset_rt(IPA_IP_v4))
+		IPAERR("fail to reset v4 rt\n");
+	if (ipa2_reset_rt(IPA_IP_v6))
+		IPAERR("fail to reset v4 rt\n");
+
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("reset hdr\n");
+	list_for_each_entry_safe(entry, next,
+			&ipa_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+		/* do not remove the default header */
+		if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+			if (entry->is_hdr_proc_ctx) {
+				mutex_unlock(&ipa_ctx->lock);
+				WARN_ON(1);
+				IPAERR("default header is proc ctx\n");
+				return -EFAULT;
+			}
+			continue;
+		}
+
+		if (ipa_id_find(entry->id) == NULL) {
+			mutex_unlock(&ipa_ctx->lock);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		if (entry->is_hdr_proc_ctx) {
+			dma_unmap_single(ipa_ctx->pdev,
+				entry->phys_base,
+				entry->hdr_len,
+				DMA_TO_DEVICE);
+			entry->proc_ctx = NULL;
+		}
+		list_del(&entry->link);
+		entry->ref_cnt = 0;
+		entry->cookie = 0;
+
+		/* remove the handle from the database */
+		ipa_id_remove(entry->id);
+		kmem_cache_free(ipa_ctx->hdr_cache, entry);
+
+	}
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		list_for_each_entry_safe(off_entry, off_next,
+					 &ipa_ctx->hdr_tbl.head_offset_list[i],
+					 link) {
+
+			/*
+			 * do not remove the default exception header which is
+			 * at offset 0
+			 */
+			if (off_entry->offset == 0)
+				continue;
+
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+		}
+		list_for_each_entry_safe(off_entry, off_next,
+				&ipa_ctx->hdr_tbl.head_free_offset_list[i],
+				link) {
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa_ctx->hdr_offset_cache, off_entry);
+		}
+	}
+	/* there is one header of size 8 */
+	ipa_ctx->hdr_tbl.end = 8;
+	ipa_ctx->hdr_tbl.hdr_cnt = 1;
+
+	IPADBG("reset hdr proc ctx\n");
+	list_for_each_entry_safe(
+		ctx_entry,
+		ctx_next,
+		&ipa_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+		link) {
+
+		if (ipa_id_find(ctx_entry->id) == NULL) {
+			mutex_unlock(&ipa_ctx->lock);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		list_del(&ctx_entry->link);
+		ctx_entry->ref_cnt = 0;
+		ctx_entry->cookie = 0;
+
+		/* remove the handle from the database */
+		ipa_id_remove(ctx_entry->id);
+		kmem_cache_free(ipa_ctx->hdr_proc_ctx_cache, ctx_entry);
+
+	}
+	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+				&ipa_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
+				link) {
+
+			list_del(&ctx_off_entry->link);
+			kmem_cache_free(ipa_ctx->hdr_proc_ctx_offset_cache,
+					ctx_off_entry);
+		}
+		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+			    &ipa_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
+			    link) {
+			list_del(&ctx_off_entry->link);
+			kmem_cache_free(ipa_ctx->hdr_proc_ctx_offset_cache,
+				ctx_off_entry);
+		}
+	}
+	ipa_ctx->hdr_proc_ctx_tbl.end = 0;
+	ipa_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+
+static struct ipa_hdr_entry *__ipa_find_hdr(const char *name)
+{
+	struct ipa_hdr_entry *entry;
+
+	list_for_each_entry(entry, &ipa_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (!strcmp(name, entry->name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa2_get_hdr() - Lookup the specified header resource
+ * @lookup:	[inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *		Caller should call ipa_put_hdr later if this function succeeds
+ */
+int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	struct ipa_hdr_entry *entry;
+	int result = -1;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (lookup == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry = __ipa_find_hdr(lookup->name);
+	if (entry) {
+		lookup->hdl = entry->id;
+		result = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+
+/**
+ * __ipa_release_hdr() - drop reference to header and cause
+ * deletion if reference count permits
+ * @hdr_hdl:	[in] handle of header to be released
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa_release_hdr(u32 hdr_hdl)
+{
+	int result = 0;
+
+	if (__ipa_del_hdr(hdr_hdl)) {
+		IPADBG("fail to del hdr %x\n", hdr_hdl);
+		result = -EFAULT;
+		goto bail;
+	}
+
+	/* commit for put */
+	if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		result = -EFAULT;
+		goto bail;
+	}
+
+bail:
+	return result;
+}
+
+/**
+ * __ipa_release_hdr_proc_ctx() - drop reference to processing context
+ *  and cause deletion if reference count permits
+ * @proc_ctx_hdl:	[in] handle of processing context to be released
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa_release_hdr_proc_ctx(u32 proc_ctx_hdl)
+{
+	int result = 0;
+
+	if (__ipa_del_hdr_proc_ctx(proc_ctx_hdl, true)) {
+		IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
+		result = -EFAULT;
+		goto bail;
+	}
+
+	/* commit for put */
+	if (ipa_ctx->ctrl->ipa_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		result = -EFAULT;
+		goto bail;
+	}
+
+bail:
+	return result;
+}
+
+/**
+ * ipa2_put_hdr() - Release the specified header handle
+ * @hdr_hdl:	[in] the header handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_put_hdr(u32 hdr_hdl)
+{
+	struct ipa_hdr_entry *entry;
+	int result = -EFAULT;
+
+	mutex_lock(&ipa_ctx->lock);
+
+	entry = ipa_id_find(hdr_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("invalid header entry\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa2_copy_hdr() - Lookup the specified header resource and return a copy of
+ * it
+ * @copy:	[inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	struct ipa_hdr_entry *entry;
+	int result = -EFAULT;
+
+	if (copy == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry = __ipa_find_hdr(copy->name);
+	if (entry) {
+		memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+		copy->hdr_len = entry->hdr_len;
+		copy->type = entry->type;
+		copy->is_partial = entry->is_partial;
+		copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
+		copy->eth2_ofst = entry->eth2_ofst;
+		result = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h
new file mode 100644
index 0000000..f12a3c6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_hw_defs.h
@@ -0,0 +1,450 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+/* immediate command op-codes */
+#define IPA_DECIPH_INIT           (1)
+#define IPA_PPP_FRM_INIT          (2)
+#define IPA_IP_V4_FILTER_INIT     (3)
+#define IPA_IP_V6_FILTER_INIT     (4)
+#define IPA_IP_V4_NAT_INIT        (5)
+#define IPA_IP_V6_NAT_INIT        (6)
+#define IPA_IP_V4_ROUTING_INIT    (7)
+#define IPA_IP_V6_ROUTING_INIT    (8)
+#define IPA_HDR_INIT_LOCAL        (9)
+#define IPA_HDR_INIT_SYSTEM      (10)
+#define IPA_DECIPH_SETUP         (11)
+#define IPA_REGISTER_WRITE       (12)
+#define IPA_NAT_DMA              (14)
+#define IPA_IP_PACKET_TAG        (15)
+#define IPA_IP_PACKET_INIT       (16)
+#define IPA_DMA_SHARED_MEM       (19)
+#define IPA_IP_PACKET_TAG_STATUS (20)
+
+/* Processing context TLV type */
+#define IPA_PROC_CTX_TLV_TYPE_END 0
+#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
+#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3
+
+
+/**
+ * struct ipa_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post routing action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @to_uc: direct IPA to sent the packet to uc instead of
+ *  the intended destination. This will be performed just after
+ *  routing block processing, so routing will have determined
+ *  destination end point and uc will receive this information
+ *  together with the packet as part of the HW packet TX commands
+ * @rsvd: reserved bits
+ */
+struct ipa_flt_rule_hw_hdr {
+	union {
+		u32 word;
+		struct {
+			u32 en_rule:16;
+			u32 action:5;
+			u32 rt_tbl_idx:5;
+			u32 retain_hdr:1;
+			u32 to_uc:1;
+			u32 rsvd:4;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @pipe_dest_idx: destination pipe index
+ * @system: changed from local to system due to HW change
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ *	header processing context table
+ */
+struct ipa_rt_rule_hw_hdr {
+	union {
+		u32 word;
+		struct {
+			u32 en_rule:16;
+			u32 pipe_dest_idx:5;
+			u32 system:1;
+			u32 hdr_offset:10;
+		} hdr;
+		struct {
+			u32 en_rule:16;
+			u32 pipe_dest_idx:5;
+			u32 system:1;
+			u32 hdr_offset:9;
+			u32 proc_ctx:1;
+		} hdr_v2_5;
+	} u;
+};
+
+/**
+ * struct ipa_ip_v4_filter_init - IPA_IP_V4_FILTER_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_filter_init {
+	u64 ipv4_rules_addr:32;
+	u64 size_ipv4_rules:12;
+	u64 ipv4_addr:16;
+	u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_filter_init - IPA_IP_V6_FILTER_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_filter_init {
+	u64 ipv6_rules_addr:32;
+	u64 size_ipv6_rules:16;
+	u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_ip_v4_routing_init - IPA_IP_V4_ROUTING_INIT command payload
+ * @ipv4_rules_addr: address of ipv4 rules
+ * @size_ipv4_rules: size of the above
+ * @ipv4_addr: ipv4 address
+ * @rsvd: reserved
+ */
+struct ipa_ip_v4_routing_init {
+	u64 ipv4_rules_addr:32;
+	u64 size_ipv4_rules:12;
+	u64 ipv4_addr:16;
+	u64 rsvd:4;
+};
+
+/**
+ * struct ipa_ip_v6_routing_init - IPA_IP_V6_ROUTING_INIT command payload
+ * @ipv6_rules_addr: address of ipv6 rules
+ * @size_ipv6_rules: size of the above
+ * @ipv6_addr: ipv6 address
+ */
+struct ipa_ip_v6_routing_init {
+	u64 ipv6_rules_addr:32;
+	u64 size_ipv6_rules:16;
+	u64 ipv6_addr:16;
+};
+
+/**
+ * struct ipa_hdr_init_local - IPA_HDR_INIT_LOCAL command payload
+ * @hdr_table_src_addr: word address of header table in system memory where the
+ *  table starts (use as source for memory copying)
+ * @size_hdr_table: size of the above (in bytes)
+ * @hdr_table_dst_addr: header address in IPA sram (used as dst for memory copy)
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_local {
+	u64 hdr_table_src_addr:32;
+	u64 size_hdr_table:12;
+	u64 hdr_table_dst_addr:16;
+	u64 rsvd:4;
+};
+
+/**
+ * struct ipa_hdr_init_system - IPA_HDR_INIT_SYSTEM command payload
+ * @hdr_table_addr: word address of header table in system memory where the
+ *  table starts (use as source for memory copying)
+ * @rsvd: reserved
+ */
+struct ipa_hdr_init_system {
+	u64 hdr_table_addr:32;
+	u64 rsvd:32;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_tlv -
+ * HW structure of IPA processing context header - TLV part
+ * @type: 0 - end type
+ *        1 - header addition type
+ *        3 - processing command type
+ * @length: number of bytes after tlv
+ *        for type:
+ *        0 - needs to be 0
+ *        1 - header addition length
+ *        3 - number of 32B including type and length.
+ * @value: specific value for type
+ *        for type:
+ *        0 - needs to be 0
+ *        1 - header length
+ *        3 - command ID (see IPA_HDR_UCP_* definitions)
+ */
+struct ipa_hdr_proc_ctx_tlv {
+	u32 type:8;
+	u32 length:8;
+	u32 value:16;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_hdr_add -
+ * HW structure of IPA processing context - add header tlv
+ * @tlv: IPA processing context TLV
+ * @hdr_addr: processing context header address
+ */
+struct ipa_hdr_proc_ctx_hdr_add {
+	struct ipa_hdr_proc_ctx_tlv tlv;
+	u32 hdr_addr;
+};
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP		BIT(7)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT		BIT(6)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT	BIT(5)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG		BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED	BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL		BIT(2)
+
+/**
+ * struct ipa_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa_a5_mux_hdr {
+	u16 interface_id;
+	u8 src_pipe_index;
+	u8 flags;
+	u32 metadata;
+};
+
+/**
+ * struct ipa_register_write - IPA_REGISTER_WRITE command payload
+ * @rsvd: reserved
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear
+ * @offset: offset from IPA base address
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ */
+struct ipa_register_write {
+	u32 rsvd:15;
+	u32 skip_pipeline_clear:1;
+	u32 offset:16;
+	u32 value:32;
+	u32 value_mask:32;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_NAT_DMA command payload
+ * @table_index: NAT table index
+ * @rsvd1: reserved
+ * @base_addr: base address
+ * @rsvd2: reserved
+ * @offset: offset
+ * @data: metadata
+ * @rsvd3: reserved
+ */
+struct ipa_nat_dma {
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 base_addr:2;
+	u64 rsvd2:2;
+	u64 offset:32;
+	u64 data:16;
+	u64 rsvd3:8;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_PACKET_INIT command payload
+ * @destination_pipe_index: destination pipe index
+ * @rsvd1: reserved
+ * @metadata: metadata
+ * @rsvd2: reserved
+ */
+struct ipa_ip_packet_init {
+	u64 destination_pipe_index:5;
+	u64 rsvd1:3;
+	u64 metadata:32;
+	u64 rsvd2:24;
+};
+
+/**
+ * struct ipa_nat_dma - IPA_IP_V4_NAT_INIT command payload
+ * @ipv4_rules_addr: ipv4 rules address
+ * @ipv4_expansion_rules_addr: ipv4 expansion rules address
+ * @index_table_addr: index tables address
+ * @index_table_expansion_addr: index expansion table address
+ * @table_index: index in table
+ * @ipv4_rules_addr_type: ipv4 address type
+ * @ipv4_expansion_rules_addr_type: ipv4 expansion address type
+ * @index_table_addr_type: index table address type
+ * @index_table_expansion_addr_type: index expansion table type
+ * @size_base_tables: size of base tables
+ * @size_expansion_tables: size of expansion tables
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_ip_v4_nat_init {
+	u64 ipv4_rules_addr:32;
+	u64 ipv4_expansion_rules_addr:32;
+	u64 index_table_addr:32;
+	u64 index_table_expansion_addr:32;
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 ipv4_rules_addr_type:1;
+	u64 ipv4_expansion_rules_addr_type:1;
+	u64 index_table_addr_type:1;
+	u64 index_table_expansion_addr_type:1;
+	u64 size_base_tables:12;
+	u64 size_expansion_tables:10;
+	u64 rsvd2:2;
+	u64 public_ip_addr:32;
+};
+
+/**
+ * struct ipa_ip_packet_tag - IPA_IP_PACKET_TAG command payload
+ * @tag: tag value returned with response
+ */
+struct ipa_ip_packet_tag {
+	u32 tag;
+};
+
+/**
+ * struct ipa_ip_packet_tag_status - IPA_IP_PACKET_TAG_STATUS command payload
+ * @rsvd: reserved
+ * @tag_f_1: tag value returned within status
+ * @tag_f_2: tag value returned within status
+ */
+struct ipa_ip_packet_tag_status {
+	u32 rsvd:16;
+	u32 tag_f_1:16;
+	u32 tag_f_2:32;
+};
+
+/*! @brief Struct for the IPAv2.0 and IPAv2.5 UL packet status header */
+struct ipa_hw_pkt_status {
+	u32 status_opcode:8;
+	u32 exception:8;
+	u32 status_mask:16;
+	u32 pkt_len:16;
+	u32 endp_src_idx:5;
+	u32 reserved_1:3;
+	u32 endp_dest_idx:5;
+	u32 reserved_2:3;
+	u32 metadata:32;
+	union {
+		struct {
+			u32 filt_local:1;
+			u32 filt_global:1;
+			u32 filt_pipe_idx:5;
+			u32 filt_match:1;
+			u32 filt_rule_idx:6;
+			u32 ret_hdr:1;
+			u32 reserved_3:1;
+			u32 tag_f_1:16;
+
+		} ipa_hw_v2_0_pkt_status;
+		struct {
+			u32 filt_local:1;
+			u32 filt_global:1;
+			u32 filt_pipe_idx:5;
+			u32 ret_hdr:1;
+			u32 filt_rule_idx:8;
+			u32 tag_f_1:16;
+
+		} ipa_hw_v2_5_pkt_status;
+	};
+
+	u32 tag_f_2:32;
+	u32 time_day_ctr:32;
+	u32 nat_hit:1;
+	u32 nat_tbl_idx:13;
+	u32 nat_type:2;
+	u32 route_local:1;
+	u32 route_tbl_idx:5;
+	u32 route_match:1;
+	u32 ucp:1;
+	u32 route_rule_idx:8;
+	u32 hdr_local:1;
+	u32 hdr_offset:10;
+	u32 frag_hit:1;
+	u32 frag_rule:4;
+	u32 reserved_4:16;
+};
+
+#define IPA_PKT_STATUS_SIZE 32
+
+/*! @brief Status header opcodes */
+enum ipa_hw_status_opcode {
+	IPA_HW_STATUS_OPCODE_MIN,
+	IPA_HW_STATUS_OPCODE_PACKET = IPA_HW_STATUS_OPCODE_MIN,
+	IPA_HW_STATUS_OPCODE_NEW_FRAG_RULE,
+	IPA_HW_STATUS_OPCODE_DROPPED_PACKET,
+	IPA_HW_STATUS_OPCODE_SUSPENDED_PACKET,
+	IPA_HW_STATUS_OPCODE_XLAT_PACKET = 6,
+	IPA_HW_STATUS_OPCODE_MAX
+};
+
+/*! @brief Possible Masks received in status */
+enum ipa_hw_pkt_status_mask {
+	IPA_HW_PKT_STATUS_MASK_FRAG_PROCESS      = 0x1,
+	IPA_HW_PKT_STATUS_MASK_FILT_PROCESS      = 0x2,
+	IPA_HW_PKT_STATUS_MASK_NAT_PROCESS       = 0x4,
+	IPA_HW_PKT_STATUS_MASK_ROUTE_PROCESS     = 0x8,
+	IPA_HW_PKT_STATUS_MASK_TAG_VALID         = 0x10,
+	IPA_HW_PKT_STATUS_MASK_FRAGMENT          = 0x20,
+	IPA_HW_PKT_STATUS_MASK_FIRST_FRAGMENT    = 0x40,
+	IPA_HW_PKT_STATUS_MASK_V4                = 0x80,
+	IPA_HW_PKT_STATUS_MASK_CKSUM_PROCESS     = 0x100,
+	IPA_HW_PKT_STATUS_MASK_AGGR_PROCESS      = 0x200,
+	IPA_HW_PKT_STATUS_MASK_DEST_EOT          = 0x400,
+	IPA_HW_PKT_STATUS_MASK_DEAGGR_PROCESS    = 0x800,
+	IPA_HW_PKT_STATUS_MASK_DEAGG_FIRST       = 0x1000,
+	IPA_HW_PKT_STATUS_MASK_SRC_EOT           = 0x2000
+};
+
+/*! @brief Possible Exceptions received in status */
+enum ipa_hw_pkt_status_exception {
+	IPA_HW_PKT_STATUS_EXCEPTION_NONE           = 0x0,
+	IPA_HW_PKT_STATUS_EXCEPTION_DEAGGR         = 0x1,
+	IPA_HW_PKT_STATUS_EXCEPTION_REPL           = 0x2,
+	IPA_HW_PKT_STATUS_EXCEPTION_IPTYPE         = 0x4,
+	IPA_HW_PKT_STATUS_EXCEPTION_IHL            = 0x8,
+	IPA_HW_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS = 0x10,
+	IPA_HW_PKT_STATUS_EXCEPTION_SW_FILT        = 0x20,
+	IPA_HW_PKT_STATUS_EXCEPTION_NAT            = 0x40,
+	IPA_HW_PKT_STATUS_EXCEPTION_ACTUAL_MAX,
+	IPA_HW_PKT_STATUS_EXCEPTION_MAX            = 0xFF
+};
+
+/*! @brief IPA_HW_IMM_CMD_DMA_SHARED_MEM Immediate Command Parameters */
+struct ipa_hw_imm_cmd_dma_shared_mem {
+	u32 reserved_1:16;
+	u32 size:16;
+	u32 system_addr:32;
+	u32 local_addr:16;
+	u32 direction:1;
+	u32 skip_pipeline_clear:1;
+	u32 reserved_2:14;
+	u32 padding:32;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
new file mode 100644
index 0000000..350b5a1
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -0,0 +1,1853 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_I_H_
+#define _IPA_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/ipa.h>
+#include <linux/msm-sps.h>
+#include <linux/platform_device.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/ipa_uc_offload.h>
+#include "ipa_hw_defs.h"
+#include "ipa_ram_mmap.h"
+#include "ipa_reg.h"
+#include "ipa_qmi_service.h"
+#include "../ipa_api.h"
+#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
+
+#define DRV_NAME "ipa"
+#define NAT_DEV_NAME "ipaNatTable"
+#define IPA_COOKIE 0x57831603
+#define MTU_BYTE 1500
+
+#define IPA_MAX_NUM_PIPES 0x14
+#define IPA_SYS_DESC_FIFO_SZ 0x2000
+#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_LAN_RX_HEADER_LENGTH (2)
+#define IPA_QMAP_HEADER_LENGTH (4)
+#define IPA_DL_CHECKSUM_LENGTH (8)
+#define IPA_NUM_DESC_PER_SW_TX (2)
+#define IPA_GENERIC_RX_POOL_SZ 1000
+#define IPA_UC_FINISH_MAX 6
+#define IPA_UC_WAIT_MIN_SLEEP 1000
+#define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
+#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
+
+#define IPA_MAX_STATUS_STAT_NUM 30
+
+#define IPADBG(fmt, args...) \
+	pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPAERR(fmt, args...) \
+	pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+#define WLAN_AMPDU_TX_EP 15
+#define WLAN_PROD_TX_EP  19
+#define WLAN1_CONS_RX_EP  14
+#define WLAN2_CONS_RX_EP  16
+#define WLAN3_CONS_RX_EP  17
+#define WLAN4_CONS_RX_EP  18
+
+#define MAX_NUM_EXCP     8
+
+#define IPA_STATS
+
+#ifdef IPA_STATS
+#define IPA_STATS_INC_CNT(val) (++val)
+#define IPA_STATS_DEC_CNT(val) (--val)
+#define IPA_STATS_EXCP_CNT(flags, base) do {			\
+			int i;					\
+			for (i = 0; i < MAX_NUM_EXCP; i++)	\
+				if (flags & BIT(i))		\
+					++base[i];		\
+			if (flags == 0)				\
+				++base[MAX_NUM_EXCP - 1];	\
+			} while (0)
+#else
+#define IPA_STATS_INC_CNT(x) do { } while (0)
+#define IPA_STATS_DEC_CNT(x)
+#define IPA_STATS_EXCP_CNT(flags, base) do { } while (0)
+#endif
+
+#define IPA_TOS_EQ			BIT(0)
+#define IPA_PROTOCOL_EQ			BIT(1)
+#define IPA_OFFSET_MEQ32_0		BIT(2)
+#define IPA_OFFSET_MEQ32_1		BIT(3)
+#define IPA_IHL_OFFSET_RANGE16_0	BIT(4)
+#define IPA_IHL_OFFSET_RANGE16_1	BIT(5)
+#define IPA_IHL_OFFSET_EQ_16		BIT(6)
+#define IPA_IHL_OFFSET_EQ_32		BIT(7)
+#define IPA_IHL_OFFSET_MEQ32_0		BIT(8)
+#define IPA_OFFSET_MEQ128_0		BIT(9)
+#define IPA_OFFSET_MEQ128_1		BIT(10)
+#define IPA_TC_EQ			BIT(11)
+#define IPA_FL_EQ			BIT(12)
+#define IPA_IHL_OFFSET_MEQ32_1		BIT(13)
+#define IPA_METADATA_COMPARE		BIT(14)
+#define IPA_IS_FRAG			BIT(15)
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN4 4
+#define IPA_HDR_BIN_MAX 5
+
+#define IPA_HDR_PROC_CTX_BIN0 0
+#define IPA_HDR_PROC_CTX_BIN1 1
+#define IPA_HDR_PROC_CTX_BIN_MAX 2
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+/*
+ * Due to ZLT issue with USB 3.0 core, IPA BAM threashold need to be set
+ * to max packet size + 1. After setting the threshold, USB core
+ * will not be notified on ZLTs
+ */
+#define IPA_USB_EVENT_THRESHOLD 0x4001
+
+#define IPA_RX_POOL_CEIL 32
+#define IPA_RX_SKB_SIZE 1792
+
+#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr"
+#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
+#define IPA_INVALID_L4_PROTOCOL 0xFF
+
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
+			(reg |= ((val) << (shift)) & (mask))
+
+#define IPA_HW_TABLE_ALIGNMENT(start_ofst) \
+	(((start_ofst) + 127) & ~127)
+#define IPA_RT_FLT_HW_RULE_BUF_SIZE	(128)
+
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
+	(((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \
+	~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1))
+
+#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
+#define IPA_MEM_PART(x_) (ipa_ctx->ctrl->mem_partition.x_)
+
+#define IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
+#define IPA2_ACTIVE_CLIENTS_LOG_LINE_LEN 96
+#define IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
+#define IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN 40
+
+struct ipa2_active_client_htable_entry {
+	struct hlist_node list;
+	char id_string[IPA2_ACTIVE_CLIENTS_LOG_NAME_LEN];
+	int count;
+	enum ipa_active_client_log_type type;
+};
+
+struct ipa2_active_clients_log_ctx {
+	char *log_buffer[IPA2_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
+	int log_head;
+	int log_tail;
+	bool log_rdy;
+	struct hlist_head htable[IPA2_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
+};
+
+
+struct ipa_client_names {
+	enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS];
+	int length;
+};
+
+struct ipa_smmu_cb_ctx {
+	bool valid;
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	struct iommu_domain *iommu;
+	unsigned long next_addr;
+	u32 va_start;
+	u32 va_size;
+	u32 va_end;
+};
+
+/**
+ * struct ipa_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ */
+struct ipa_flt_entry {
+	struct list_head link;
+	struct ipa_flt_rule rule;
+	u32 cookie;
+	struct ipa_flt_tbl *tbl;
+	struct ipa_rt_tbl *rt_tbl;
+	u32 hw_len;
+	int id;
+};
+
+/**
+ * struct ipa_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of routing table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ * @id: routing table id
+ */
+struct ipa_rt_tbl {
+	struct list_head link;
+	struct list_head head_rt_rule_list;
+	char name[IPA_RESOURCE_NAME_MAX];
+	u32 idx;
+	u32 rule_cnt;
+	u32 ref_cnt;
+	struct ipa_rt_tbl_set *set;
+	u32 cookie;
+	bool in_sys;
+	u32 sz;
+	struct ipa_mem_buffer curr_mem;
+	struct ipa_mem_buffer prev_mem;
+	int id;
+};
+
+/**
+ * struct ipa_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @type: l2 header type
+ * @is_partial: flag indicating if header table entry is partial
+ * @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
+ * true - hdr entry resides in DDR and pointed to by proc ctx
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
+ * else 0
+ * @proc_ctx: processing context header
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: header entry id
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_hdr_entry {
+	struct list_head link;
+	u8 hdr[IPA_HDR_MAX_SIZE];
+	u32 hdr_len;
+	char name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_hdr_l2_type type;
+	u8 is_partial;
+	bool is_hdr_proc_ctx;
+	dma_addr_t phys_base;
+	struct ipa_hdr_proc_ctx_entry *proc_ctx;
+	struct ipa_hdr_offset_entry *offset_entry;
+	u32 cookie;
+	u32 ref_cnt;
+	int id;
+	u8 is_eth2_ofst_valid;
+	u16 eth2_ofst;
+};
+
+/**
+ * struct ipa_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa_hdr_tbl {
+	struct list_head head_hdr_entry_list;
+	struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+	u32 hdr_cnt;
+	u32 end;
+};
+
+/**
+ * struct ipa_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global processing context header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa_hdr_proc_ctx_offset_entry {
+	struct list_head link;
+	u32 offset;
+	u32 bin;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_add_hdr_seq -
+ * IPA processing context header - add header sequence
+ * @hdr_add: add header command
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hdr_proc_ctx_add_hdr_seq {
+	struct ipa_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_add_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @cmd: tlv processing command (cmd.type must be 3)
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hdr_proc_ctx_add_hdr_cmd_seq {
+	struct ipa_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hdr_proc_ctx_tlv cmd;
+	struct ipa_hdr_proc_ctx_tlv end;
+};
+
+/**
+ *struct ipa_hdr_proc_ctx_entry - IPA processing context header table entry
+ * @link: entry's link in global header table entries list
+ * @type:
+ * @offset_entry: entry's offset
+ * @hdr: the header
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: processing context header entry id
+ */
+struct ipa_hdr_proc_ctx_entry {
+	struct list_head link;
+	enum ipa_hdr_proc_type type;
+	struct ipa_hdr_proc_ctx_offset_entry *offset_entry;
+	struct ipa_hdr_entry *hdr;
+	u32 cookie;
+	u32 ref_cnt;
+	int id;
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_tbl - IPA processing context header table
+ * @head_proc_ctx_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @proc_ctx_cnt: number of processing context headers
+ * @end: the last processing context header index
+ * @start_offset: offset in words of processing context header table
+ */
+struct ipa_hdr_proc_ctx_tbl {
+	struct list_head head_proc_ctx_entry_list;
+	struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+	u32 proc_ctx_cnt;
+	u32 end;
+	u32 start_offset;
+};
+
+/**
+ * struct ipa_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter table
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ */
+struct ipa_flt_tbl {
+	struct list_head head_flt_rule_list;
+	u32 rule_cnt;
+	bool in_sys;
+	u32 sz;
+	struct ipa_mem_buffer curr_mem;
+	struct ipa_mem_buffer prev_mem;
+	bool sticky_rear;
+};
+
+/**
+ * struct ipa_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @proc_ctx: processing context table
+ * @hw_len: the length of the table
+ */
+struct ipa_rt_entry {
+	struct list_head link;
+	struct ipa_rt_rule rule;
+	u32 cookie;
+	struct ipa_rt_tbl *tbl;
+	struct ipa_hdr_entry *hdr;
+	struct ipa_hdr_proc_ctx_entry *proc_ctx;
+	u32 hw_len;
+	int id;
+};
+
+/**
+ * struct ipa_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa_rt_tbl_set {
+	struct list_head head_rt_tbl_list;
+	u32 tbl_cnt;
+};
+
+/**
+ * struct ipa_ep_cfg_status - status configuration in IPA end-point
+ * @status_en: Determines if end point supports Status Indications. SW should
+ *	set this bit in order to enable Statuses. Output Pipe - send
+ *	Status indications only if bit is set. Input Pipe - forward Status
+ *	indication to STATUS_ENDP only if bit is set. Valid for Input
+ *	and Output Pipes (IPA Consumer and Producer)
+ * @status_ep: Statuses generated for this endpoint will be forwarded to the
+ *	specified Status End Point. Status endpoint needs to be
+ *	configured with STATUS_EN=1 Valid only for Input Pipes (IPA
+ *	Consumer)
+ */
+struct ipa_ep_cfg_status {
+	bool status_en;
+	u8 status_ep;
+};
+
+/**
+ * struct ipa_wlan_stats - Wlan stats for each wlan endpoint
+ * @rx_pkts_rcvd: Packets sent by wlan driver
+ * @rx_pkts_status_rcvd: Status packets received from ipa hw
+ * @rx_hd_processed: Data Descriptors processed by IPA Driver
+ * @rx_hd_reply: Data Descriptors recycled by wlan driver
+ * @rx_hd_rcvd: Data Descriptors sent by wlan driver
+ * @rx_pkt_leak: Packet count that are not recycled
+ * @rx_dp_fail: Packets failed to transfer to IPA HW
+ * @tx_pkts_rcvd: SKB Buffers received from ipa hw
+ * @tx_pkts_sent: SKB Buffers sent to wlan driver
+ * @tx_pkts_dropped: Dropped packets count
+ */
+struct ipa_wlan_stats {
+	u32 rx_pkts_rcvd;
+	u32 rx_pkts_status_rcvd;
+	u32 rx_hd_processed;
+	u32 rx_hd_reply;
+	u32 rx_hd_rcvd;
+	u32 rx_pkt_leak;
+	u32 rx_dp_fail;
+	u32 tx_pkts_rcvd;
+	u32 tx_pkts_sent;
+	u32 tx_pkts_dropped;
+};
+
+/**
+ * struct ipa_wlan_comm_memb - Wlan comm members
+ * @wlan_spinlock: protects wlan comm buff list and its size
+ * @ipa_tx_mul_spinlock: protects tx dp mul transfer
+ * @wlan_comm_total_cnt: wlan common skb buffers allocated count
+ * @wlan_comm_free_cnt: wlan common skb buffer free count
+ * @total_tx_pkts_freed: Recycled Buffer count
+ * @wlan_comm_desc_list: wlan common skb buffer list
+ */
+struct ipa_wlan_comm_memb {
+	spinlock_t wlan_spinlock;
+	spinlock_t ipa_tx_mul_spinlock;
+	u32 wlan_comm_total_cnt;
+	u32 wlan_comm_free_cnt;
+	u32 total_tx_pkts_freed;
+	struct list_head wlan_comm_desc_list;
+	atomic_t active_clnt_cnt;
+};
+
+struct ipa_status_stats {
+	struct ipa_hw_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
+	int curr;
+};
+
+enum ipa_wakelock_ref_client {
+	IPA_WAKELOCK_REF_CLIENT_TX  = 0,
+	IPA_WAKELOCK_REF_CLIENT_LAN_RX = 1,
+	IPA_WAKELOCK_REF_CLIENT_WAN_RX = 2,
+	IPA_WAKELOCK_REF_CLIENT_WLAN_RX = 3,
+	IPA_WAKELOCK_REF_CLIENT_ODU_RX = 4,
+	IPA_WAKELOCK_REF_CLIENT_SPS = 5,
+	IPA_WAKELOCK_REF_CLIENT_MAX
+};
+
+/**
+ * struct ipa_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information which will forwarded once the user is
+ *        notified for new data avail
+ * @client_notify: user provided CB for EP events notification, the event is
+ *                 data revived.
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ *  by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @rx_replenish_threshold: Indicates the WM value which requires the RX
+ *                          descriptors replenish function to be called to
+ *                          avoid the RX pipe to run out of descriptors
+ *                          and cause HOLB.
+ * @disconnect_in_progress: Indicates client disconnect in progress.
+ * @qmi_request_sent: Indicates whether QMI request to enable clear data path
+ *					request is sent or not.
+ * @napi_enabled: when true, IPA call client callback to start polling
+ */
+struct ipa_ep_context {
+	int valid;
+	enum ipa_client_type client;
+	struct sps_pipe *ep_hdl;
+	struct ipa_ep_cfg cfg;
+	struct ipa_ep_cfg_holb holb;
+	struct ipa_ep_cfg_status status;
+	u32 dst_pipe_index;
+	u32 rt_tbl_idx;
+	struct sps_connect connect;
+	void *priv;
+	void (*client_notify)(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data);
+	bool desc_fifo_in_pipe_mem;
+	bool data_fifo_in_pipe_mem;
+	u32 desc_fifo_pipe_mem_ofst;
+	u32 data_fifo_pipe_mem_ofst;
+	bool desc_fifo_client_allocated;
+	bool data_fifo_client_allocated;
+	atomic_t avail_fifo_desc;
+	u32 dflt_flt4_rule_hdl;
+	u32 dflt_flt6_rule_hdl;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+	struct ipa_wlan_stats wstats;
+	u32 uc_offload_state;
+	u32 rx_replenish_threshold;
+	bool disconnect_in_progress;
+	u32 qmi_request_sent;
+	enum ipa_wakelock_ref_client wakelock_client;
+	bool napi_enabled;
+	bool switch_to_intr;
+	int inactive_cycles;
+	u32 eot_in_poll_err;
+	bool ep_disabled;
+
+	/* sys MUST be the last element of this struct */
+	struct ipa_sys_context *sys;
+};
+
+enum ipa_sys_pipe_policy {
+	IPA_POLICY_INTR_MODE,
+	IPA_POLICY_NOINTR_MODE,
+	IPA_POLICY_INTR_POLL_MODE,
+};
+
+struct ipa_repl_ctx {
+	struct ipa_rx_pkt_wrapper **cache;
+	atomic_t head_idx;
+	atomic_t tail_idx;
+	u32 capacity;
+};
+
+/**
+ * struct ipa_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa_sys_context {
+	u32 len;
+	struct sps_register_event event;
+	atomic_t curr_polling_state;
+	struct delayed_work switch_to_intr_work;
+	enum ipa_sys_pipe_policy policy;
+	int (*pyld_hdlr)(struct sk_buff *skb, struct ipa_sys_context *sys);
+	struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
+	void (*free_skb)(struct sk_buff *skb);
+	u32 rx_buff_sz;
+	u32 rx_pool_sz;
+	struct sk_buff *prev_skb;
+	unsigned int len_rem;
+	unsigned int len_pad;
+	unsigned int len_partial;
+	bool drop_packet;
+	struct work_struct work;
+	void (*sps_callback)(struct sps_event_notify *notify);
+	enum sps_option sps_option;
+	struct delayed_work replenish_rx_work;
+	struct work_struct repl_work;
+	void (*repl_hdlr)(struct ipa_sys_context *sys);
+	struct ipa_repl_ctx repl;
+	unsigned int repl_trig_cnt;
+	unsigned int repl_trig_thresh;
+
+	/* ordering is important - mutable fields go above */
+	struct ipa_ep_context *ep;
+	struct list_head head_desc_list;
+	struct list_head rcycl_list;
+	spinlock_t spinlock;
+	struct workqueue_struct *wq;
+	struct workqueue_struct *repl_wq;
+	struct ipa_status_stats *status_stat;
+	/* ordering is important - other immutable fields go below */
+};
+
+/**
+ * enum ipa_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa_desc_type {
+	IPA_DATA_DESC,
+	IPA_DATA_DESC_SKB,
+	IPA_DATA_DESC_SKB_PAGED,
+	IPA_IMM_CMD_DESC
+};
+
+/**
+ * struct ipa_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: specify if this packet is for the skb or immediate command
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" transfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ * @unmap_dma: in case this is true, the buffer will not be dma unmapped
+ *
+ * This struct can wrap both data packet and immediate command packet.
+ */
+struct ipa_tx_pkt_wrapper {
+	enum ipa_desc_type type;
+	struct ipa_mem_buffer mem;
+	struct work_struct work;
+	struct list_head link;
+	void (*callback)(void *user1, int user2);
+	void *user1;
+	int user2;
+	struct ipa_sys_context *sys;
+	struct ipa_mem_buffer mult;
+	u32 cnt;
+	void *bounce;
+	bool no_unmap_dma;
+};
+
+/**
+ * struct ipa_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * @frag: points to paged fragment
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @dma_address: dma mapped address of pyld
+ * @dma_address_valid: valid field for dma_address
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa_desc {
+	enum ipa_desc_type type;
+	void *pyld;
+	skb_frag_t *frag;
+	dma_addr_t dma_address;
+	bool dma_address_valid;
+	u16 len;
+	u16 opcode;
+	void (*callback)(void *user1, int user2);
+	void *user1;
+	int user2;
+	struct completion xfer_done;
+};
+
+/**
+ * struct ipa_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa_rx_pkt_wrapper {
+	struct list_head link;
+	struct ipa_rx_data data;
+	u32 len;
+	struct work_struct work;
+	struct ipa_sys_context *sys;
+};
+
+/**
+ * struct ipa_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ * @nat_base_address: nat table virutal address
+ * @ipv4_rules_addr: base nat table address
+ * @ipv4_expansion_rules_addr: expansion table address
+ * @index_table_addr: index table address
+ * @index_table_expansion_addr: index expansion table address
+ * @size_base_tables: base table size
+ * @size_expansion_tables: expansion table size
+ * @public_ip_addr: ip address of nat table
+ */
+struct ipa_nat_mem {
+	struct class *class;
+	struct device *dev;
+	struct cdev cdev;
+	dev_t dev_num;
+	void *vaddr;
+	dma_addr_t dma_handle;
+	size_t size;
+	bool is_mapped;
+	bool is_sys_mem;
+	bool is_dev_init;
+	bool is_dev;
+	struct mutex lock;
+	void *nat_base_address;
+	char *ipv4_rules_addr;
+	char *ipv4_expansion_rules_addr;
+	char *index_table_addr;
+	char *index_table_expansion_addr;
+	u32 size_base_tables;
+	u32 size_expansion_tables;
+	u32 public_ip_addr;
+	void *tmp_vaddr;
+	dma_addr_t tmp_dma_handle;
+	bool is_tmp_mem;
+};
+
+/**
+ * enum ipa_hw_mode - IPA hardware mode
+ * @IPA_HW_Normal: Regular IPA hardware
+ * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
+ * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge
+ */
+enum ipa_hw_mode {
+	IPA_HW_MODE_NORMAL  = 0,
+	IPA_HW_MODE_VIRTUAL = 1,
+	IPA_HW_MODE_PCIE    = 2
+};
+
+enum ipa_config_this_ep {
+	IPA_CONFIGURE_THIS_EP,
+	IPA_DO_NOT_CONFIGURE_THIS_EP,
+};
+
+struct ipa_stats {
+	u32 tx_sw_pkts;
+	u32 tx_hw_pkts;
+	u32 rx_pkts;
+	u32 rx_excp_pkts[MAX_NUM_EXCP];
+	u32 rx_repl_repost;
+	u32 tx_pkts_compl;
+	u32 rx_q_len;
+	u32 msg_w[IPA_EVENT_MAX_NUM];
+	u32 msg_r[IPA_EVENT_MAX_NUM];
+	u32 stat_compl;
+	u32 aggr_close;
+	u32 wan_aggr_close;
+	u32 wan_rx_empty;
+	u32 wan_repl_rx_empty;
+	u32 lan_rx_empty;
+	u32 lan_repl_rx_empty;
+	u32 flow_enable;
+	u32 flow_disable;
+	u32 tx_non_linear;
+};
+
+struct ipa_active_clients {
+	struct mutex mutex;
+	spinlock_t spinlock;
+	bool mutex_locked;
+	int cnt;
+};
+
+struct ipa_wakelock_ref_cnt {
+	spinlock_t spinlock;
+	u32 cnt;
+};
+
+struct ipa_tag_completion {
+	struct completion comp;
+	atomic_t cnt;
+};
+
+struct ipa_controller;
+
+/**
+ * struct ipa_uc_hdlrs - IPA uC callback functions
+ * @ipa_uc_loaded_hdlr: Function handler when uC is loaded
+ * @ipa_uc_event_hdlr: Event handler function
+ * @ipa_uc_response_hdlr: Response handler function
+ * @ipa_uc_event_log_info_hdlr: Log event handler function
+ */
+struct ipa_uc_hdlrs {
+	void (*ipa_uc_loaded_hdlr)(void);
+
+	void (*ipa_uc_event_hdlr)
+		(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio);
+	int (*ipa_uc_response_hdlr)
+		(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio,
+		u32 *uc_status);
+	void (*ipa_uc_event_log_info_hdlr)
+		(struct IpaHwEventLogInfoData_t *uc_event_top_mmio);
+};
+
+/**
+ * enum ipa_hw_flags - flags which defines the behavior of HW
+ *
+ * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert
+ *	failure.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported
+ *	in the event ring only. No event to CPU.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event
+ *	IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST
+ * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by
+ *	QMB (avoid memcpy)
+ * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in
+ *	IN Channel
+ * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is
+ *	entering a mode where it expects a doorbell to be rung for OUT Channel
+ * @IPA_HW_FLAG_NO_START_OOB_TIMER
+ */
+enum ipa_hw_flags {
+	IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE	= 0x01,
+	IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR		= 0x02,
+	IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP	= 0x04,
+	IPA_HW_FLAG_WORK_OVER_DDR			= 0x08,
+	IPA_HW_FLAG_NO_REPORT_OOB			= 0x10,
+	IPA_HW_FLAG_NO_REPORT_DB_MODE			= 0x20,
+	IPA_HW_FLAG_NO_START_OOB_TIMER			= 0x40
+};
+
+/**
+ * struct ipa_uc_ctx - IPA uC context
+ * @uc_inited: Indicates if uC interface has been initialized
+ * @uc_loaded: Indicates if uC has loaded
+ * @uc_failed: Indicates if uC has failed / returned an error
+ * @uc_lock: uC interface lock to allow only one uC interaction at a time
+ * @uc_completation: Completion mechanism to wait for uC commands
+ * @uc_sram_mmio: Pointer to uC mapped memory
+ * @pending_cmd: The last command sent waiting to be ACKed
+ * @uc_status: The last status provided by the uC
+ * @uc_zip_error: uC has notified the APPS upon a ZIP engine error
+ * @uc_error_type: error type from uC error event
+ */
+struct ipa_uc_ctx {
+	bool uc_inited;
+	bool uc_loaded;
+	bool uc_failed;
+	struct mutex uc_lock;
+	struct completion uc_completion;
+	struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio;
+	struct IpaHwEventLogInfoData_t *uc_event_top_mmio;
+	u32 uc_event_top_ofst;
+	u32 pending_cmd;
+	u32 uc_status;
+	bool uc_zip_error;
+	u32 uc_error_type;
+	phys_addr_t rdy_ring_base_pa;
+	phys_addr_t rdy_ring_rp_pa;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_comp_ring_base_pa;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
+};
+
+/**
+ * struct ipa_uc_wdi_ctx
+ * @wdi_uc_top_ofst:
+ * @wdi_uc_top_mmio:
+ * @wdi_uc_stats_ofst:
+ * @wdi_uc_stats_mmio:
+ */
+struct ipa_uc_wdi_ctx {
+	/* WDI specific fields */
+	u32 wdi_uc_stats_ofst;
+	struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * struct ipa_sps_pm - SPS power management related members
+ * @dec_clients: true if need to decrease active clients count
+ * @eot_activity: represent EOT interrupt activity to determine to reset
+ *  the inactivity timer
+ * @sps_pm_lock: Lock to protect the sps_pm functionality.
+ */
+struct ipa_sps_pm {
+	atomic_t dec_clients;
+	atomic_t eot_activity;
+	struct mutex sps_pm_lock;
+};
+
+/**
+ * struct ipacm_client_info - the client-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipacm_client_info {
+	enum ipacm_client_enum client_enum;
+	bool uplink;
+};
+
+/**
+ * struct ipa_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @skip_ep_cfg_shadow: state to update filter table correctly across
+  power-save
+ * @resume_on_connect: resume ep on ipa_connect
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @glob_flt_tbl: global filter table
+ * @hdr_tbl: IPA header table
+ * @hdr_proc_ctx_tbl: IPA processing context table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @hdr_proc_ctx_cache: processing context cache
+ * @hdr_proc_ctx_offset_cache: processing context offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa_sys_context
+ * @smem_sz: shared memory size available for SW use starting
+ *  from non-restricted bytes
+ * @smem_restricted_bytes: the bytes that SW should not use in the shared mem
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system
+ * @hdr_mem: header memory
+ * @hdr_proc_ctx_mem: processing context memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @empty_rt_tbl_mem: empty routing tables memory
+ * @power_mgmt_wq: workqueue for power management
+ * @sps_power_mgmt_wq: workqueue SPS related power management
+ * @tag_process_before_gating: indicates whether to start tag process before
+ *  gating IPA clocks
+ * @sps_pm: sps power management related information
+ * @disconnect_lock: protects LAN_CONS packet receive notification CB
+ * @pipe_mem_pool: pipe memory pool
+ * @dma_pool: special purpose DMA pool
+ * @ipa_active_clients: structure for reference counting connected IPA clients
+ * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc')
+ * @ipa_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe)
+ * @use_ipa_teth_bridge: use tethering bridge driver
+ * @ipa_bam_remote_mode: ipa bam is in remote mode
+ * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
+ * @ipa_wdi2: using wdi-2.0
+ * @ipa_bus_hdl: msm driver handle for the data path bus
+ * @ctrl: holds the core specific operations based on
+ *  core version (vtable like)
+ * @enable_clock_scaling: clock scaling is enabled ?
+ * @curr_ipa_clk_rate: ipa_clk current rate
+ * @wcstats: wlan common buffer stats
+ * @uc_ctx: uC interface context
+ * @uc_wdi_ctx: WDI specific fields for uC interface
+ * @ipa_num_pipes: The number of pipes used by IPA HW
+ * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided
+ * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA
+ * @w_lock: Indicates the wakeup source.
+ * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired
+
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa_context {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+	unsigned long bam_handle;
+	struct ipa_ep_context ep[IPA_MAX_NUM_PIPES];
+	bool skip_ep_cfg_shadow[IPA_MAX_NUM_PIPES];
+	bool resume_on_connect[IPA_CLIENT_MAX];
+	struct ipa_flt_tbl flt_tbl[IPA_MAX_NUM_PIPES][IPA_IP_MAX];
+	void __iomem *mmio;
+	u32 ipa_wrapper_base;
+	u32 ipa_wrapper_size;
+	struct ipa_flt_tbl glob_flt_tbl[IPA_IP_MAX];
+	struct ipa_hdr_tbl hdr_tbl;
+	struct ipa_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
+	struct ipa_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+	struct ipa_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+	struct kmem_cache *flt_rule_cache;
+	struct kmem_cache *rt_rule_cache;
+	struct kmem_cache *hdr_cache;
+	struct kmem_cache *hdr_offset_cache;
+	struct kmem_cache *hdr_proc_ctx_cache;
+	struct kmem_cache *hdr_proc_ctx_offset_cache;
+	struct kmem_cache *rt_tbl_cache;
+	struct kmem_cache *tx_pkt_wrapper_cache;
+	struct kmem_cache *rx_pkt_wrapper_cache;
+	unsigned long rt_idx_bitmap[IPA_IP_MAX];
+	struct mutex lock;
+	u16 smem_sz;
+	u16 smem_restricted_bytes;
+	u16 smem_reqd_sz;
+	struct ipa_nat_mem nat_mem;
+	u32 excp_hdr_hdl;
+	u32 dflt_v4_rt_rule_hdl;
+	u32 dflt_v6_rt_rule_hdl;
+	uint aggregation_type;
+	uint aggregation_byte_limit;
+	uint aggregation_time_limit;
+	bool hdr_tbl_lcl;
+	bool hdr_proc_ctx_tbl_lcl;
+	struct ipa_mem_buffer hdr_mem;
+	struct ipa_mem_buffer hdr_proc_ctx_mem;
+	bool ip4_rt_tbl_lcl;
+	bool ip6_rt_tbl_lcl;
+	bool ip4_flt_tbl_lcl;
+	bool ip6_flt_tbl_lcl;
+	struct ipa_mem_buffer empty_rt_tbl_mem;
+	struct gen_pool *pipe_mem_pool;
+	struct dma_pool *dma_pool;
+	struct ipa_active_clients ipa_active_clients;
+	struct ipa2_active_clients_log_ctx ipa2_active_clients_logging;
+	struct workqueue_struct *power_mgmt_wq;
+	struct workqueue_struct *sps_power_mgmt_wq;
+	bool tag_process_before_gating;
+	struct ipa_sps_pm sps_pm;
+	u32 clnt_hdl_cmd;
+	u32 clnt_hdl_data_in;
+	u32 clnt_hdl_data_out;
+	spinlock_t disconnect_lock;
+	u8 a5_pipe_index;
+	struct list_head intf_list;
+	struct list_head msg_list;
+	struct list_head pull_msg_list;
+	struct mutex msg_lock;
+	wait_queue_head_t msg_waitq;
+	enum ipa_hw_type ipa_hw_type;
+	enum ipa_hw_mode ipa_hw_mode;
+	bool use_ipa_teth_bridge;
+	bool ipa_bam_remote_mode;
+	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
+	/* featurize if memory footprint becomes a concern */
+	struct ipa_stats stats;
+	void *smem_pipe_mem;
+	u32 ipa_bus_hdl;
+	struct ipa_controller *ctrl;
+	struct idr ipa_idr;
+	struct device *pdev;
+	struct device *uc_pdev;
+	spinlock_t idr_lock;
+	u32 enable_clock_scaling;
+	u32 curr_ipa_clk_rate;
+	bool q6_proxy_clk_vote_valid;
+	u32 ipa_num_pipes;
+
+	struct ipa_wlan_comm_memb wc_memb;
+
+	struct ipa_uc_ctx uc_ctx;
+
+	struct ipa_uc_wdi_ctx uc_wdi_ctx;
+	struct ipa_uc_ntn_ctx uc_ntn_ctx;
+	u32 wan_rx_ring_size;
+	u32 lan_rx_ring_size;
+	bool skip_uc_pipe_reset;
+	bool smmu_present;
+	bool smmu_s1_bypass;
+	unsigned long peer_bam_iova;
+	phys_addr_t peer_bam_pa;
+	u32 peer_bam_map_size;
+	unsigned long peer_bam_dev;
+	u32 peer_bam_map_cnt;
+	u32 wdi_map_cnt;
+	bool use_dma_zone;
+	struct wakeup_source w_lock;
+	struct ipa_wakelock_ref_cnt wakelock_ref_cnt;
+
+	/* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */
+	bool ipa_client_apps_wan_cons_agg_gro;
+	/* M-release support to know client pipes */
+	struct ipacm_client_info ipacm_client[IPA_MAX_NUM_PIPES];
+	bool tethered_flow_control;
+	u32 ipa_rx_min_timeout_usec;
+	u32 ipa_rx_max_timeout_usec;
+	u32 ipa_polling_iteration;
+};
+
+/**
+ * struct ipa_route - IPA route
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ * @route_frag_def_pipe: Default pipe to route fragmented exception
+ *    packets and frag new rule statues, if source pipe does not have
+ *    a notification status pipe defined.
+ */
+struct ipa_route {
+	u32 route_dis;
+	u32 route_def_pipe;
+	u32 route_def_hdr_table;
+	u32 route_def_hdr_ofst;
+	u8  route_frag_def_pipe;
+};
+
+/**
+ * enum ipa_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa_pipe_mem_type {
+	IPA_SPS_PIPE_MEM = 0,
+	IPA_PRIVATE_MEM  = 1,
+	IPA_SYSTEM_MEM   = 2,
+};
+
+struct ipa_plat_drv_res {
+	bool use_ipa_teth_bridge;
+	u32 ipa_mem_base;
+	u32 ipa_mem_size;
+	u32 bam_mem_base;
+	u32 bam_mem_size;
+	u32 ipa_irq;
+	u32 bam_irq;
+	u32 ipa_pipe_mem_start_ofst;
+	u32 ipa_pipe_mem_size;
+	enum ipa_hw_type ipa_hw_type;
+	enum ipa_hw_mode ipa_hw_mode;
+	u32 ee;
+	bool ipa_bam_remote_mode;
+	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
+	u32 wan_rx_ring_size;
+	u32 lan_rx_ring_size;
+	bool skip_uc_pipe_reset;
+	bool use_dma_zone;
+	bool tethered_flow_control;
+	u32 ipa_rx_polling_sleep_msec;
+	u32 ipa_polling_iteration;
+};
+
+struct ipa_mem_partition {
+	u16 ofst_start;
+	u16 nat_ofst;
+	u16 nat_size;
+	u16 v4_flt_ofst;
+	u16 v4_flt_size;
+	u16 v4_flt_size_ddr;
+	u16 v6_flt_ofst;
+	u16 v6_flt_size;
+	u16 v6_flt_size_ddr;
+	u16 v4_rt_ofst;
+	u16 v4_num_index;
+	u16 v4_modem_rt_index_lo;
+	u16 v4_modem_rt_index_hi;
+	u16 v4_apps_rt_index_lo;
+	u16 v4_apps_rt_index_hi;
+	u16 v4_rt_size;
+	u16 v4_rt_size_ddr;
+	u16 v6_rt_ofst;
+	u16 v6_num_index;
+	u16 v6_modem_rt_index_lo;
+	u16 v6_modem_rt_index_hi;
+	u16 v6_apps_rt_index_lo;
+	u16 v6_apps_rt_index_hi;
+	u16 v6_rt_size;
+	u16 v6_rt_size_ddr;
+	u16 modem_hdr_ofst;
+	u16 modem_hdr_size;
+	u16 apps_hdr_ofst;
+	u16 apps_hdr_size;
+	u16 apps_hdr_size_ddr;
+	u16 modem_hdr_proc_ctx_ofst;
+	u16 modem_hdr_proc_ctx_size;
+	u16 apps_hdr_proc_ctx_ofst;
+	u16 apps_hdr_proc_ctx_size;
+	u16 apps_hdr_proc_ctx_size_ddr;
+	u16 modem_comp_decomp_ofst;
+	u16 modem_comp_decomp_size;
+	u16 modem_ofst;
+	u16 modem_size;
+	u16 apps_v4_flt_ofst;
+	u16 apps_v4_flt_size;
+	u16 apps_v6_flt_ofst;
+	u16 apps_v6_flt_size;
+	u16 uc_info_ofst;
+	u16 uc_info_size;
+	u16 end_ofst;
+	u16 apps_v4_rt_ofst;
+	u16 apps_v4_rt_size;
+	u16 apps_v6_rt_ofst;
+	u16 apps_v6_rt_size;
+};
+
+struct ipa_controller {
+	struct ipa_mem_partition mem_partition;
+	u32 ipa_clk_rate_turbo;
+	u32 ipa_clk_rate_nominal;
+	u32 ipa_clk_rate_svs;
+	u32 clock_scaling_bw_threshold_turbo;
+	u32 clock_scaling_bw_threshold_nominal;
+	u32 ipa_reg_base_ofst;
+	u32 max_holb_tmr_val;
+	void (*ipa_sram_read_settings)(void);
+	int (*ipa_init_sram)(void);
+	int (*ipa_init_hdr)(void);
+	int (*ipa_init_rt4)(void);
+	int (*ipa_init_rt6)(void);
+	int (*ipa_init_flt4)(void);
+	int (*ipa_init_flt6)(void);
+	void (*ipa_cfg_ep_hdr)(u32 pipe_number,
+			const struct ipa_ep_cfg_hdr *ipa_ep_hdr_cfg);
+	int (*ipa_cfg_ep_hdr_ext)(u32 pipe_number,
+		const struct ipa_ep_cfg_hdr_ext *ipa_ep_hdr_ext_cfg);
+	void (*ipa_cfg_ep_aggr)(u32 pipe_number,
+			const struct ipa_ep_cfg_aggr *ipa_ep_agrr_cfg);
+	int (*ipa_cfg_ep_deaggr)(u32 pipe_index,
+			const struct ipa_ep_cfg_deaggr *ep_deaggr);
+	void (*ipa_cfg_ep_nat)(u32 pipe_number,
+			const struct ipa_ep_cfg_nat *ipa_ep_nat_cfg);
+	void (*ipa_cfg_ep_mode)(u32 pipe_number, u32 dst_pipe_number,
+			const struct ipa_ep_cfg_mode *ep_mode);
+	void (*ipa_cfg_ep_route)(u32 pipe_index, u32 rt_tbl_index);
+	void (*ipa_cfg_ep_holb)(u32 pipe_index,
+			const struct ipa_ep_cfg_holb *ep_holb);
+	void (*ipa_cfg_route)(struct ipa_route *route);
+	int (*ipa_read_gen_reg)(char *buff, int max_len);
+	int (*ipa_read_ep_reg)(char *buff, int max_len, int pipe);
+	void (*ipa_write_dbg_cnt)(int option);
+	int (*ipa_read_dbg_cnt)(char *buf, int max_len);
+	void (*ipa_cfg_ep_status)(u32 clnt_hdl,
+			const struct ipa_ep_cfg_status *ep_status);
+	int (*ipa_commit_flt)(enum ipa_ip_type ip);
+	int (*ipa_commit_rt)(enum ipa_ip_type ip);
+	int (*ipa_generate_rt_hw_rule)(enum ipa_ip_type ip,
+		struct ipa_rt_entry *entry, u8 *buf);
+	int (*ipa_commit_hdr)(void);
+	void (*ipa_cfg_ep_cfg)(u32 clnt_hdl,
+			const struct ipa_ep_cfg_cfg *cfg);
+	void (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl,
+			const struct ipa_ep_cfg_metadata_mask *metadata_mask);
+	void (*ipa_enable_clks)(void);
+	void (*ipa_disable_clks)(void);
+	struct msm_bus_scale_pdata *msm_bus_data_ptr;
+
+	void (*ipa_cfg_ep_metadata)(u32 pipe_number,
+			const struct ipa_ep_cfg_metadata *);
+};
+
+extern struct ipa_context *ipa_ctx;
+
+/* public APIs */
+/*
+ * Connect / Disconnect
+ */
+int ipa2_connect(const struct ipa_connect_params *in,
+		struct ipa_sps_params *sps, u32 *clnt_hdl);
+int ipa2_disconnect(u32 clnt_hdl);
+
+/*
+ * Resume / Suspend
+ */
+int ipa2_reset_endpoint(u32 clnt_hdl);
+
+/*
+ * Remove ep delay
+ */
+int ipa2_clear_endpoint_delay(u32 clnt_hdl);
+
+/*
+ * Disable ep
+ */
+int ipa2_disable_endpoint(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl,
+			const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa2_cfg_ep_deaggr(u32 clnt_hdl,
+		      const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl,
+	const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
+
+int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Header removal / addition
+ */
+int ipa2_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa2_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa2_commit_hdr(void);
+
+int ipa2_reset_hdr(void);
+
+int ipa2_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa2_put_hdr(u32 hdr_hdl);
+
+int ipa2_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Header Processing Context
+ */
+int ipa2_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+int ipa2_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+/*
+ * Routing
+ */
+int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa2_commit_rt(enum ipa_ip_type ip);
+
+int ipa2_reset_rt(enum ipa_ip_type ip);
+
+int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa2_put_rt_tbl(u32 rt_tbl_hdl);
+
+int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
+
+int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
+
+/*
+ * Filtering
+ */
+int ipa2_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa2_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa2_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
+
+int ipa2_commit_flt(enum ipa_ip_type ip);
+
+int ipa2_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Messaging
+ */
+int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback);
+int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
+int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta);
+
+/*
+ * Interface
+ */
+int ipa2_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx);
+int ipa2_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx,
+		       const struct ipa_ext_intf *ext);
+int ipa2_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+int ipa2_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa2_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa2_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * Data path
+ */
+int ipa2_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+/*
+ * To transfer multiple data packets
+ * While passing the data descriptor list, the anchor node
+ * should be of type struct ipa_tx_data_desc not list_head
+*/
+int ipa2_tx_dp_mul(enum ipa_client_type dst,
+			struct ipa_tx_data_desc *data_desc);
+
+void ipa2_free_skb(struct ipa_rx_data *);
+
+/*
+ * System pipes
+ */
+int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa2_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa2_sys_setup(struct ipa_sys_connect_params *sys_in,
+	unsigned long *ipa_bam_hdl,
+	u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
+
+int ipa2_sys_teardown(u32 clnt_hdl);
+
+int ipa2_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl);
+
+int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out);
+int ipa2_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa2_enable_wdi_pipe(u32 clnt_hdl);
+int ipa2_disable_wdi_pipe(u32 clnt_hdl);
+int ipa2_resume_wdi_pipe(u32 clnt_hdl);
+int ipa2_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+u16 ipa2_get_smem_restr_bytes(void);
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *inp,
+		ipa_notify_cb notify, void *priv, u8 hdr_len,
+		struct ipa_ntn_conn_out_params *outp);
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa2_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa2_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa2_uc_dereg_rdyCB(void);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int ipa2_teth_bridge_init(struct teth_bridge_init_params *params);
+
+int ipa2_teth_bridge_disconnect(enum ipa_client_type client);
+
+int ipa2_teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
+/*
+ * Tethering client info
+ */
+void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink);
+
+enum ipacm_client_enum ipa2_get_client(int pipe_idx);
+
+bool ipa2_get_client_uplink(int pipe_idx);
+
+/*
+ * IPADMA
+ */
+int ipa2_dma_init(void);
+
+int ipa2_dma_enable(void);
+
+int ipa2_dma_disable(void);
+
+int ipa2_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+int ipa2_dma_async_memcpy(u64 dest, u64 src, int len,
+			void (*user_cb)(void *user1), void *user_param);
+
+int ipa2_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+
+void ipa2_dma_destroy(void);
+
+/*
+ * MHI APIs for IPA MHI client driver
+ */
+int ipa2_init_mhi(struct ipa_mhi_init_params *params);
+
+int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params);
+
+int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl);
+
+int ipa2_disconnect_mhi_pipe(u32 clnt_hdl);
+
+bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client);
+
+int ipa2_disable_sps_pipe(enum ipa_client_type client);
+
+int ipa2_mhi_reset_channel_internal(enum ipa_client_type client);
+
+int ipa2_mhi_start_channel_internal(enum ipa_client_type client);
+
+int ipa2_mhi_suspend_ul_channels(void);
+
+int ipa2_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index);
+
+/*
+ * mux id
+ */
+int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
+
+/*
+ * interrupts
+ */
+int ipa2_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data);
+
+int ipa2_remove_interrupt_handler(enum ipa_irq_type interrupt);
+
+/*
+ * Miscellaneous
+ */
+void ipa2_bam_reg_dump(void);
+
+int ipa2_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa2_is_ready(void);
+
+void ipa2_proxy_clk_vote(void);
+void ipa2_proxy_clk_unvote(void);
+
+bool ipa2_is_client_handle_valid(u32 clnt_hdl);
+
+enum ipa_client_type ipa2_get_client_mapping(int pipe_idx);
+
+enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx);
+
+bool ipa2_get_modem_cfg_emb_pipe_flt(void);
+
+/* internal functions */
+
+int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+	struct ipa_api_controller *api_ctrl);
+
+int ipa_send_one(struct ipa_sys_context *sys, struct ipa_desc *desc,
+		bool in_atomic);
+int ipa_send(struct ipa_sys_context *sys, u32 num_desc, struct ipa_desc *desc,
+		bool in_atomic);
+int ipa2_get_ep_mapping(enum ipa_client_type client);
+
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+			 const struct ipa_rule_attrib *attrib,
+			 u8 **buf,
+			 u16 *en_rule);
+int ipa_init_hw(void);
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+int ipa_set_single_ndp_per_mbim(bool);
+int ipa_set_hw_timer_fix_for_mbim_aggr(bool);
+void ipa_debugfs_init(void);
+void ipa_debugfs_remove(void);
+
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+
+void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time);
+
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+	ipa_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+int ipa_controller_static_bind(struct ipa_controller *controller,
+		enum ipa_hw_type ipa_hw_type);
+int ipa_cfg_route(struct ipa_route *route);
+int ipa_send_cmd(u16 num_desc, struct ipa_desc *descr);
+int ipa_cfg_filter(u32 disable);
+int ipa_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa_pipe_mem_free(u32 ofst, u32 size);
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa_context *ipa_get_ctx(void);
+void ipa_enable_clks(void);
+void ipa_disable_clks(void);
+void ipa2_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+int ipa2_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+		*id);
+void ipa2_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa2_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+		bool int_ctx);
+void ipa2_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+		bool int_ctx);
+int ipa2_active_clients_log_print_buffer(char *buf, int size);
+int ipa2_active_clients_log_print_table(char *buf, int size);
+void ipa2_active_clients_log_clear(void);
+int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
+int __ipa_del_rt_rule(u32 rule_hdl);
+int __ipa_del_hdr(u32 hdr_hdl);
+int __ipa_release_hdr(u32 hdr_hdl);
+int __ipa_release_hdr_proc_ctx(u32 proc_ctx_hdl);
+int _ipa_read_gen_reg_v1_1(char *buff, int max_len);
+int _ipa_read_gen_reg_v2_0(char *buff, int max_len);
+int _ipa_read_ep_reg_v1_1(char *buf, int max_len, int pipe);
+int _ipa_read_ep_reg_v2_0(char *buf, int max_len, int pipe);
+void _ipa_write_dbg_cnt_v1_1(int option);
+void _ipa_write_dbg_cnt_v2_0(int option);
+int _ipa_read_dbg_cnt_v1_1(char *buf, int max_len);
+int _ipa_read_dbg_cnt_v2_0(char *buf, int max_len);
+void _ipa_enable_clks_v1_1(void);
+void _ipa_enable_clks_v2_0(void);
+void _ipa_disable_clks_v1_1(void);
+void _ipa_disable_clks_v2_0(void);
+
+static inline u32 ipa_read_reg(void *base, u32 offset)
+{
+	return ioread32(base + offset);
+}
+
+static inline u32 ipa_read_reg_field(void *base, u32 offset,
+		u32 mask, u32 shift)
+{
+	return (ipa_read_reg(base, offset) & mask) >> shift;
+}
+
+static inline void ipa_write_reg(void *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+}
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+
+ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
+		 loff_t *f_pos);
+int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
+int ipa_query_intf(struct ipa_ioc_query_intf *lookup);
+int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx);
+int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx);
+int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext);
+
+void wwan_cleanup(void);
+
+int teth_bridge_driver_init(void);
+void ipa_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+
+int _ipa_init_sram_v2(void);
+int _ipa_init_sram_v2_5(void);
+int _ipa_init_sram_v2_6L(void);
+int _ipa_init_hdr_v2(void);
+int _ipa_init_hdr_v2_5(void);
+int _ipa_init_hdr_v2_6L(void);
+int _ipa_init_rt4_v2(void);
+int _ipa_init_rt6_v2(void);
+int _ipa_init_flt4_v2(void);
+int _ipa_init_flt6_v2(void);
+
+int __ipa_commit_flt_v1_1(enum ipa_ip_type ip);
+int __ipa_commit_flt_v2(enum ipa_ip_type ip);
+int __ipa_commit_rt_v1_1(enum ipa_ip_type ip);
+int __ipa_commit_rt_v2(enum ipa_ip_type ip);
+int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip,
+	struct ipa_rt_entry *entry, u8 *buf);
+int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip,
+	struct ipa_rt_entry *entry, u8 *buf);
+int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip,
+	struct ipa_rt_entry *entry, u8 *buf);
+
+int __ipa_commit_hdr_v1_1(void);
+int __ipa_commit_hdr_v2(void);
+int __ipa_commit_hdr_v2_5(void);
+int __ipa_commit_hdr_v2_6L(void);
+int ipa_generate_flt_eq(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_attrib);
+void ipa_skb_recycle(struct sk_buff *skb);
+void ipa_install_dflt_flt_rules(u32 ipa_ep_idx);
+void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx);
+
+int ipa_enable_data_path(u32 clnt_hdl);
+int ipa_disable_data_path(u32 clnt_hdl);
+int ipa_id_alloc(void *ptr);
+void *ipa_id_find(u32 id);
+void ipa_id_remove(u32 id);
+
+int ipa2_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+				  u32 bandwidth_mbps);
+
+int ipa2_cfg_ep_status(u32 clnt_hdl,
+			const struct ipa_ep_cfg_status *ipa_ep_cfg);
+int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity);
+int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity);
+
+int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name name);
+int ipa2_suspend_resource_sync(enum ipa_rm_resource_name name);
+int ipa2_resume_resource(enum ipa_rm_resource_name name);
+bool ipa_should_pipe_be_suspended(enum ipa_client_type client);
+int ipa_tag_aggr_force_close(int pipe_num);
+
+void ipa_active_clients_lock(void);
+int ipa_active_clients_trylock(unsigned long *flags);
+void ipa_active_clients_unlock(void);
+void ipa_active_clients_trylock_unlock(unsigned long *flags);
+int ipa_wdi_init(void);
+int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
+int ipa_tag_process(struct ipa_desc *desc, int num_descs,
+		    unsigned long timeout);
+
+int ipa_q6_pre_shutdown_cleanup(void);
+int ipa_q6_post_shutdown_cleanup(void);
+int ipa_init_q6_smem(void);
+int ipa_q6_monitor_holb_mitigation(bool enable);
+
+int ipa_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+			 enum ipa_client_type ipa_client);
+
+int ipa_uc_interface_init(void);
+int ipa_uc_reset_pipe(enum ipa_client_type ipa_client);
+int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable);
+int ipa2_uc_state_check(void);
+int ipa_uc_loaded_check(void);
+int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+		    bool polling_mode, unsigned long timeout_jiffies);
+void ipa_register_panic_hdlr(void);
+void ipa_uc_register_handlers(enum ipa_hw_features feature,
+			      struct ipa_uc_hdlrs *hdlrs);
+int create_nat_device(void);
+int ipa_uc_notify_clk_state(bool enabled);
+void ipa_dma_async_memcpy_notify_cb(void *priv,
+		enum ipa_dp_evt_type evt, unsigned long data);
+
+int ipa_uc_update_hw_flags(u32 flags);
+
+int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa2_uc_mhi_cleanup(void);
+int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+	u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+	u32 first_evt_idx);
+int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+	int contexArrayIndex, int channelDirection);
+int ipa2_uc_mhi_reset_channel(int channelHandle);
+int ipa2_uc_mhi_suspend_channel(int channelHandle);
+int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected);
+int ipa2_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa2_uc_mhi_print_stats(char *dbg_buff, int size);
+int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+u32 ipa_get_num_pipes(void);
+u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys);
+struct ipa_smmu_cb_ctx *ipa2_get_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa2_get_wlan_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa2_get_uc_smmu_ctx(void);
+struct iommu_domain *ipa_get_uc_smmu_domain(void);
+struct iommu_domain *ipa2_get_wlan_smmu_domain(void);
+int ipa2_ap_suspend(struct device *dev);
+int ipa2_ap_resume(struct device *dev);
+struct iommu_domain *ipa2_get_smmu_domain(void);
+struct device *ipa2_get_dma_dev(void);
+int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+void ipa_suspend_apps_pipes(bool suspend);
+void ipa_update_repl_threshold(enum ipa_client_type ipa_client);
+void ipa_flow_control(enum ipa_client_type ipa_client, bool enable,
+			uint32_t qmap_id);
+int ipa2_restore_suspend_handler(void);
+void ipa_sps_irq_control_all(bool enable);
+void ipa_inc_acquire_wakelock(enum ipa_wakelock_ref_client ref_client);
+void ipa_dec_release_wakelock(enum ipa_wakelock_ref_client ref_client);
+int ipa_iommu_map(struct iommu_domain *domain, unsigned long iova,
+	phys_addr_t paddr, size_t size, int prot);
+int ipa2_rx_poll(u32 clnt_hdl, int budget);
+void ipa2_recycle_wan_skb(struct sk_buff *skb);
+int ipa_ntn_init(void);
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats);
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *),
+				void *user_data);
+struct device *ipa2_get_pdev(void);
+#endif /* _IPA_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c
new file mode 100644
index 0000000..17f577a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_interrupts.c
@@ -0,0 +1,381 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include "ipa_i.h"
+
+#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq"
+#define IPA_IRQ_NUM_MAX 32
+
+struct ipa_interrupt_info {
+	ipa_irq_handler_t handler;
+	enum ipa_irq_type interrupt;
+	void *private_data;
+	bool deferred_flag;
+};
+
+struct ipa_interrupt_work_wrap {
+	struct work_struct interrupt_work;
+	ipa_irq_handler_t handler;
+	enum ipa_irq_type interrupt;
+	void *private_data;
+	void *interrupt_data;
+};
+
+static struct ipa_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX];
+static struct workqueue_struct *ipa_interrupt_wq;
+static u32 ipa_ee;
+
+static void ipa_interrupt_defer(struct work_struct *work);
+static DECLARE_WORK(ipa_interrupt_defer_work, ipa_interrupt_defer);
+
+static int ipa2_irq_mapping[IPA_IRQ_MAX] = {
+	[IPA_BAD_SNOC_ACCESS_IRQ]		= 0,
+	[IPA_EOT_COAL_IRQ]			= 1,
+	[IPA_UC_IRQ_0]				= 2,
+	[IPA_UC_IRQ_1]				= 3,
+	[IPA_UC_IRQ_2]				= 4,
+	[IPA_UC_IRQ_3]				= 5,
+	[IPA_UC_IN_Q_NOT_EMPTY_IRQ]		= 6,
+	[IPA_UC_RX_CMD_Q_NOT_FULL_IRQ]		= 7,
+	[IPA_UC_TX_CMD_Q_NOT_FULL_IRQ]		= 8,
+	[IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ]	= 9,
+	[IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ]	= 10,
+	[IPA_RX_ERR_IRQ]			= 11,
+	[IPA_DEAGGR_ERR_IRQ]			= 12,
+	[IPA_TX_ERR_IRQ]			= 13,
+	[IPA_STEP_MODE_IRQ]			= 14,
+	[IPA_PROC_ERR_IRQ]			= 15,
+	[IPA_TX_SUSPEND_IRQ]			= 16,
+	[IPA_TX_HOLB_DROP_IRQ]			= 17,
+	[IPA_BAM_IDLE_IRQ]			= 18,
+};
+
+static void deferred_interrupt_work(struct work_struct *work)
+{
+	struct ipa_interrupt_work_wrap *work_data =
+			container_of(work,
+			struct ipa_interrupt_work_wrap,
+			interrupt_work);
+	IPADBG("call handler from workq...\n");
+	work_data->handler(work_data->interrupt, work_data->private_data,
+			work_data->interrupt_data);
+	kfree(work_data->interrupt_data);
+	kfree(work_data);
+}
+
+static bool is_valid_ep(u32 ep_suspend_data)
+{
+	u32 bmsk = 1;
+	u32 i = 0;
+
+	for (i = 0; i < ipa_ctx->ipa_num_pipes; i++) {
+		if ((ep_suspend_data & bmsk) && (ipa_ctx->ep[i].valid))
+			return true;
+		bmsk = bmsk << 1;
+	}
+	return false;
+}
+
+static int handle_interrupt(int irq_num, bool isr_context)
+{
+	struct ipa_interrupt_info interrupt_info;
+	struct ipa_interrupt_work_wrap *work_data;
+	u32 suspend_data;
+	void *interrupt_data = NULL;
+	struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL;
+	int res;
+
+	interrupt_info = ipa_interrupt_to_cb[irq_num];
+	if (interrupt_info.handler == NULL) {
+		IPAERR("A callback function wasn't set for interrupt num %d\n",
+			irq_num);
+		return -EINVAL;
+	}
+
+	switch (interrupt_info.interrupt) {
+	case IPA_TX_SUSPEND_IRQ:
+		suspend_data = ipa_read_reg(ipa_ctx->mmio,
+					IPA_IRQ_SUSPEND_INFO_EE_n_ADDR(ipa_ee));
+		if (!is_valid_ep(suspend_data))
+			return 0;
+
+		suspend_interrupt_data =
+			kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC);
+		if (!suspend_interrupt_data) {
+			IPAERR("failed allocating suspend_interrupt_data\n");
+			return -ENOMEM;
+		}
+		suspend_interrupt_data->endpoints = suspend_data;
+		interrupt_data = suspend_interrupt_data;
+		break;
+	default:
+		break;
+	}
+
+	/* Force defer processing if in ISR context. */
+	if (interrupt_info.deferred_flag || isr_context) {
+		work_data = kzalloc(sizeof(struct ipa_interrupt_work_wrap),
+				GFP_ATOMIC);
+		if (!work_data) {
+			IPAERR("failed allocating ipa_interrupt_work_wrap\n");
+			res = -ENOMEM;
+			goto fail_alloc_work;
+		}
+		INIT_WORK(&work_data->interrupt_work, deferred_interrupt_work);
+		work_data->handler = interrupt_info.handler;
+		work_data->interrupt = interrupt_info.interrupt;
+		work_data->private_data = interrupt_info.private_data;
+		work_data->interrupt_data = interrupt_data;
+		queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+
+	} else {
+		interrupt_info.handler(interrupt_info.interrupt,
+			interrupt_info.private_data,
+			interrupt_data);
+		kfree(interrupt_data);
+	}
+
+	return 0;
+
+fail_alloc_work:
+	kfree(interrupt_data);
+	return res;
+}
+
+static inline bool is_uc_irq(int irq_num)
+{
+	if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 &&
+		ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3)
+		return true;
+	else
+		return false;
+}
+
+static void ipa_process_interrupts(bool isr_context)
+{
+	u32 reg;
+	u32 bmsk;
+	u32 i = 0;
+	u32 en;
+	bool uc_irq;
+
+	en = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+	reg = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_STTS_EE_n_ADDR(ipa_ee));
+	while (en & reg) {
+		bmsk = 1;
+		for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
+			if (!(en & reg & bmsk)) {
+				bmsk = bmsk << 1;
+				continue;
+			}
+			uc_irq = is_uc_irq(i);
+			/*
+			 * Clear uC interrupt before processing to avoid
+			 * clearing unhandled interrupts
+			 */
+			if (uc_irq)
+				ipa_write_reg(ipa_ctx->mmio,
+					IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), bmsk);
+
+			/* Process the interrupts */
+			handle_interrupt(i, isr_context);
+
+			/*
+			 * Clear non uC interrupt after processing
+			 * to avoid clearing interrupt data
+			 */
+			if (!uc_irq)
+				ipa_write_reg(ipa_ctx->mmio,
+				   IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), bmsk);
+
+			bmsk = bmsk << 1;
+		}
+		/*
+		 * Check pending interrupts that may have
+		 * been raised since last read
+		 */
+		reg = ipa_read_reg(ipa_ctx->mmio,
+				IPA_IRQ_STTS_EE_n_ADDR(ipa_ee));
+	}
+}
+
+static void ipa_interrupt_defer(struct work_struct *work)
+{
+	IPADBG("processing interrupts in wq\n");
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa_process_interrupts(false);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("Done\n");
+}
+
+static irqreturn_t ipa_isr(int irq, void *ctxt)
+{
+	unsigned long flags;
+
+	/* defer interrupt handling in case IPA is not clocked on */
+	if (ipa_active_clients_trylock(&flags) == 0) {
+		IPADBG("defer interrupt processing\n");
+		queue_work(ipa_ctx->power_mgmt_wq, &ipa_interrupt_defer_work);
+		return IRQ_HANDLED;
+	}
+
+	if (ipa_ctx->ipa_active_clients.cnt == 0) {
+		IPADBG("defer interrupt processing\n");
+		queue_work(ipa_ctx->power_mgmt_wq, &ipa_interrupt_defer_work);
+		goto bail;
+	}
+
+	ipa_process_interrupts(true);
+
+bail:
+	ipa_active_clients_trylock_unlock(&flags);
+	return IRQ_HANDLED;
+}
+/**
+* ipa2_add_interrupt_handler() - Adds handler to an interrupt type
+* @interrupt:		Interrupt type
+* @handler:		The handler to be added
+* @deferred_flag:	whether the handler processing should be deferred in
+*			a workqueue
+* @private_data:	the client's private data
+*
+* Adds handler to an interrupt type and enable the specific bit
+* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+*/
+int ipa2_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data)
+{
+	u32 val;
+	u32 bmsk;
+	int irq_num;
+
+	IPADBG("in ipa2_add_interrupt_handler\n");
+	if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+		interrupt >= IPA_IRQ_MAX) {
+		IPAERR("invalid interrupt number %d\n", interrupt);
+		return -EINVAL;
+	}
+
+	irq_num = ipa2_irq_mapping[interrupt];
+	if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+		IPAERR("interrupt %d not supported\n", interrupt);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag;
+	ipa_interrupt_to_cb[irq_num].handler = handler;
+	ipa_interrupt_to_cb[irq_num].private_data = private_data;
+	ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
+
+	val = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+	IPADBG("read IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val);
+	bmsk = 1 << irq_num;
+	val |= bmsk;
+	ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
+	IPADBG("wrote IPA_IRQ_EN_EE_n_ADDR register. reg = %d\n", val);
+	return 0;
+}
+
+/**
+* ipa2_remove_interrupt_handler() - Removes handler to an interrupt type
+* @interrupt:		Interrupt type
+*
+* Removes the handler and disable the specific bit in IRQ_EN register
+*/
+int ipa2_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+	u32 val;
+	u32 bmsk;
+	int irq_num;
+
+	if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+		interrupt >= IPA_IRQ_MAX) {
+		IPAERR("invalid interrupt number %d\n", interrupt);
+		return -EINVAL;
+	}
+
+	irq_num = ipa2_irq_mapping[interrupt];
+	if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+		IPAERR("interrupt %d not supported\n", interrupt);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	kfree(ipa_interrupt_to_cb[irq_num].private_data);
+	ipa_interrupt_to_cb[irq_num].deferred_flag = false;
+	ipa_interrupt_to_cb[irq_num].handler = NULL;
+	ipa_interrupt_to_cb[irq_num].private_data = NULL;
+	ipa_interrupt_to_cb[irq_num].interrupt = -1;
+
+	val = ipa_read_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee));
+	bmsk = 1 << irq_num;
+	val &= ~bmsk;
+	ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EN_EE_n_ADDR(ipa_ee), val);
+
+	return 0;
+}
+
+/**
+* ipa_interrupts_init() - Initialize the IPA interrupts framework
+* @ipa_irq:	The interrupt number to allocate
+* @ee:		Execution environment
+* @ipa_dev:	The basic device structure representing the IPA driver
+*
+* - Initialize the ipa_interrupt_to_cb array
+* - Clear interrupts status
+* - Register the ipa interrupt handler - ipa_isr
+* - Enable apps processor wakeup by IPA interrupts
+*/
+int ipa_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
+{
+	int idx;
+	u32 reg = 0xFFFFFFFF;
+	int res = 0;
+
+	ipa_ee = ee;
+	for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) {
+		ipa_interrupt_to_cb[idx].deferred_flag = false;
+		ipa_interrupt_to_cb[idx].handler = NULL;
+		ipa_interrupt_to_cb[idx].private_data = NULL;
+		ipa_interrupt_to_cb[idx].interrupt = -1;
+	}
+
+	ipa_interrupt_wq = create_singlethread_workqueue(
+			INTERRUPT_WORKQUEUE_NAME);
+	if (!ipa_interrupt_wq) {
+		IPAERR("workqueue creation failed\n");
+		return -ENOMEM;
+	}
+
+	/*Clearing interrupts status*/
+	ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_CLR_EE_n_ADDR(ipa_ee), reg);
+
+	res = request_irq(ipa_irq, (irq_handler_t) ipa_isr,
+				IRQF_TRIGGER_RISING, "ipa", ipa_dev);
+	if (res) {
+		IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq);
+		return -ENODEV;
+	}
+	IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
+
+	res = enable_irq_wake(ipa_irq);
+	if (res)
+		IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
+				ipa_irq, res);
+	else
+		IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
+
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
new file mode 100644
index 0000000..2a68970
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -0,0 +1,623 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include "ipa_i.h"
+
+struct ipa_intf {
+	char name[IPA_RESOURCE_NAME_MAX];
+	struct list_head link;
+	u32 num_tx_props;
+	u32 num_rx_props;
+	u32 num_ext_props;
+	struct ipa_ioc_tx_intf_prop *tx;
+	struct ipa_ioc_rx_intf_prop *rx;
+	struct ipa_ioc_ext_intf_prop *ext;
+	enum ipa_client_type excp_pipe;
+};
+
+struct ipa_push_msg {
+	struct ipa_msg_meta meta;
+	ipa_msg_free_fn callback;
+	void *buff;
+	struct list_head link;
+};
+
+struct ipa_pull_msg {
+	struct ipa_msg_meta meta;
+	ipa_msg_pull_fn callback;
+	struct list_head link;
+};
+
+/**
+ * ipa2_register_intf() - register "logical" interface
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ *
+ * Register an interface and its tx and rx properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx)
+{
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	return ipa2_register_intf_ext(name, tx, rx, NULL);
+}
+
+/**
+ * ipa2_register_intf_ext() - register "logical" interface which has only
+ * extended properties
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ * @ext: [in] EXT properties of the interface
+ *
+ * Register an interface and its tx, rx and ext properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx,
+		       const struct ipa_ext_intf *ext)
+{
+	struct ipa_intf *intf;
+	u32 len;
+
+	if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) {
+		IPAERR("invalid params name=%p tx=%p rx=%p ext=%p\n", name,
+				tx, rx, ext);
+		return -EINVAL;
+	}
+
+	if (tx && tx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid tx num_props=%d max=%d\n", tx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	if (rx && rx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid rx num_props=%d max=%d\n", rx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	if (ext && ext->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid ext num_props=%d max=%d\n", ext->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	len = sizeof(struct ipa_intf);
+	intf = kzalloc(len, GFP_KERNEL);
+	if (intf == NULL) {
+		IPAERR("fail to alloc 0x%x bytes\n", len);
+		return -ENOMEM;
+	}
+
+	strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX);
+
+	if (tx) {
+		intf->num_tx_props = tx->num_props;
+		len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop);
+		intf->tx = kzalloc(len, GFP_KERNEL);
+		if (intf->tx == NULL) {
+			IPAERR("fail to alloc 0x%x bytes\n", len);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->tx, tx->prop, len);
+	}
+
+	if (rx) {
+		intf->num_rx_props = rx->num_props;
+		len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop);
+		intf->rx = kzalloc(len, GFP_KERNEL);
+		if (intf->rx == NULL) {
+			IPAERR("fail to alloc 0x%x bytes\n", len);
+			kfree(intf->tx);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->rx, rx->prop, len);
+	}
+
+	if (ext) {
+		intf->num_ext_props = ext->num_props;
+		len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop);
+		intf->ext = kzalloc(len, GFP_KERNEL);
+		if (intf->ext == NULL) {
+			IPAERR("fail to alloc 0x%x bytes\n", len);
+			kfree(intf->rx);
+			kfree(intf->tx);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->ext, ext->prop, len);
+	}
+
+	if (ext && ext->excp_pipe_valid)
+		intf->excp_pipe = ext->excp_pipe;
+	else
+		intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS;
+
+	mutex_lock(&ipa_ctx->lock);
+	list_add_tail(&intf->link, &ipa_ctx->intf_list);
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+
+/**
+ * ipa2_deregister_intf() - de-register previously registered logical interface
+ * @name: [in] interface name
+ *
+ * De-register a previously registered interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_deregister_intf(const char *name)
+{
+	struct ipa_intf *entry;
+	struct ipa_intf *next;
+	int result = -EINVAL;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (name == NULL) {
+		IPAERR("invalid param name=%p\n", name);
+		return result;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry_safe(entry, next, &ipa_ctx->intf_list, link) {
+		if (!strcmp(entry->name, name)) {
+			list_del(&entry->link);
+			kfree(entry->ext);
+			kfree(entry->rx);
+			kfree(entry->tx);
+			kfree(entry);
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa_query_intf() - query logical interface properties
+ * @lookup:	[inout] interface name and number of properties
+ *
+ * Obtain the handle and number of tx and rx properties for the named
+ * interface, used as part of querying the tx and rx properties for
+ * configuration of various rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_query_intf(struct ipa_ioc_query_intf *lookup)
+{
+	struct ipa_intf *entry;
+	int result = -EINVAL;
+
+	if (lookup == NULL) {
+		IPAERR("invalid param lookup=%p\n", lookup);
+		return result;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
+		if (!strcmp(entry->name, lookup->name)) {
+			lookup->num_tx_props = entry->num_tx_props;
+			lookup->num_rx_props = entry->num_rx_props;
+			lookup->num_ext_props = entry->num_ext_props;
+			lookup->excp_pipe = entry->excp_pipe;
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa_query_intf_tx_props() - qeury TX props of an interface
+ * @tx:  [inout] interface tx attributes
+ *
+ * Obtain the tx properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
+{
+	struct ipa_intf *entry;
+	int result = -EINVAL;
+
+	if (tx == NULL) {
+		IPAERR("invalid param tx=%p\n", tx);
+		return result;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
+		if (!strcmp(entry->name, tx->name)) {
+			memcpy(tx->tx, entry->tx, entry->num_tx_props *
+			       sizeof(struct ipa_ioc_tx_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa_query_intf_rx_props() - qeury RX props of an interface
+ * @rx:  [inout] interface rx attributes
+ *
+ * Obtain the rx properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
+{
+	struct ipa_intf *entry;
+	int result = -EINVAL;
+
+	if (rx == NULL) {
+		IPAERR("invalid param rx=%p\n", rx);
+		return result;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
+		if (!strcmp(entry->name, rx->name)) {
+			memcpy(rx->rx, entry->rx, entry->num_rx_props *
+					sizeof(struct ipa_ioc_rx_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa_query_intf_ext_props() - qeury EXT props of an interface
+ * @ext:  [inout] interface ext attributes
+ *
+ * Obtain the ext properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
+{
+	struct ipa_intf *entry;
+	int result = -EINVAL;
+
+	if (ext == NULL) {
+		IPAERR("invalid param ext=%p\n", ext);
+		return result;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	list_for_each_entry(entry, &ipa_ctx->intf_list, link) {
+		if (!strcmp(entry->name, ext->name)) {
+			memcpy(ext->ext, entry->ext, entry->num_ext_props *
+					sizeof(struct ipa_ioc_ext_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+	return result;
+}
+
+static void ipa2_send_msg_free(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
+/**
+ * ipa2_send_msg() - Send "message" from kernel client to IPA driver
+ * @meta: [in] message meta-data
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message meta-data and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback)
+{
+	struct ipa_push_msg *msg;
+	void *data = NULL;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (meta == NULL || (buff == NULL && callback != NULL) ||
+	    (buff != NULL && callback == NULL)) {
+		IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+		       meta, buff, callback);
+		return -EINVAL;
+	}
+
+	if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
+		IPAERR("unsupported message type %d\n", meta->msg_type);
+		return -EINVAL;
+	}
+
+	msg = kzalloc(sizeof(struct ipa_push_msg), GFP_KERNEL);
+	if (msg == NULL) {
+		IPAERR("fail to alloc ipa_msg container\n");
+		return -ENOMEM;
+	}
+
+	msg->meta = *meta;
+	if (meta->msg_len > 0 && buff) {
+		data = kmalloc(meta->msg_len, GFP_KERNEL);
+		if (data == NULL) {
+			IPAERR("fail to alloc data container\n");
+			kfree(msg);
+			return -ENOMEM;
+		}
+		memcpy(data, buff, meta->msg_len);
+		msg->buff = data;
+		msg->callback = ipa2_send_msg_free;
+	}
+
+	mutex_lock(&ipa_ctx->msg_lock);
+	list_add_tail(&msg->link, &ipa_ctx->msg_list);
+	mutex_unlock(&ipa_ctx->msg_lock);
+	IPA_STATS_INC_CNT(ipa_ctx->stats.msg_w[meta->msg_type]);
+
+	wake_up(&ipa_ctx->msg_waitq);
+	if (buff)
+		callback(buff, meta->msg_len, meta->msg_type);
+
+	return 0;
+}
+
+/**
+ * ipa2_register_pull_msg() - register pull message type
+ * @meta: [in] message meta-data
+ * @callback: [in] pull callback
+ *
+ * Register message callback by kernel client with IPA driver for IPA driver to
+ * pull message on-demand.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
+{
+	struct ipa_pull_msg *msg;
+
+	if (meta == NULL || callback == NULL) {
+		IPAERR("invalid param meta=%p callback=%p\n", meta, callback);
+		return -EINVAL;
+	}
+
+	msg = kzalloc(sizeof(struct ipa_pull_msg), GFP_KERNEL);
+	if (msg == NULL) {
+		IPAERR("fail to alloc ipa_msg container\n");
+		return -ENOMEM;
+	}
+
+	msg->meta = *meta;
+	msg->callback = callback;
+
+	mutex_lock(&ipa_ctx->msg_lock);
+	list_add_tail(&msg->link, &ipa_ctx->pull_msg_list);
+	mutex_unlock(&ipa_ctx->msg_lock);
+
+	return 0;
+}
+
+/**
+ * ipa2_deregister_pull_msg() - De-register pull message type
+ * @meta: [in] message meta-data
+ *
+ * De-register "message" by kernel client from IPA driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+	struct ipa_pull_msg *entry;
+	struct ipa_pull_msg *next;
+	int result = -EINVAL;
+
+	if (meta == NULL) {
+		IPAERR("invalid param name=%p\n", meta);
+		return result;
+	}
+
+	mutex_lock(&ipa_ctx->msg_lock);
+	list_for_each_entry_safe(entry, next, &ipa_ctx->pull_msg_list, link) {
+		if (entry->meta.msg_len == meta->msg_len &&
+		    entry->meta.msg_type == meta->msg_type) {
+			list_del(&entry->link);
+			kfree(entry);
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa_ctx->msg_lock);
+	return result;
+}
+
+/**
+ * ipa_read() - read message from IPA device
+ * @filp:	[in] file pointer
+ * @buf:	[out] buffer to read into
+ * @count:	[in] size of above buffer
+ * @f_pos:	[inout] file position
+ *
+ * Uer-space should continually read from /dev/ipa, read wll block when there
+ * are no messages to read. Upon return, user-space should read the ipa_msg_meta
+ * from the start of the buffer to know what type of message was read and its
+ * length in the remainder of the buffer. Buffer supplied must be big enough to
+ * hold the message meta-data and the largest defined message type
+ *
+ * Returns:	how many bytes copied to buffer
+ *
+ * Note:	Should not be called from atomic context
+ */
+ssize_t ipa_read(struct file *filp, char __user *buf, size_t count,
+		  loff_t *f_pos)
+{
+	char __user *start;
+	struct ipa_push_msg *msg = NULL;
+	int ret;
+	DEFINE_WAIT(wait);
+	int locked;
+
+	start = buf;
+
+	while (1) {
+		mutex_lock(&ipa_ctx->msg_lock);
+		locked = 1;
+		prepare_to_wait(&ipa_ctx->msg_waitq, &wait, TASK_INTERRUPTIBLE);
+		if (!list_empty(&ipa_ctx->msg_list)) {
+			msg = list_first_entry(&ipa_ctx->msg_list,
+					struct ipa_push_msg, link);
+			list_del(&msg->link);
+		}
+
+		IPADBG("msg=%p\n", msg);
+
+		if (msg) {
+			locked = 0;
+			mutex_unlock(&ipa_ctx->msg_lock);
+			if (copy_to_user(buf, &msg->meta,
+					  sizeof(struct ipa_msg_meta))) {
+				ret = -EFAULT;
+				break;
+			}
+			buf += sizeof(struct ipa_msg_meta);
+			count -= sizeof(struct ipa_msg_meta);
+			if (msg->buff) {
+				if (copy_to_user(buf, msg->buff,
+						  msg->meta.msg_len)) {
+					ret = -EFAULT;
+					break;
+				}
+				buf += msg->meta.msg_len;
+				count -= msg->meta.msg_len;
+				msg->callback(msg->buff, msg->meta.msg_len,
+					       msg->meta.msg_type);
+			}
+			IPA_STATS_INC_CNT(
+				ipa_ctx->stats.msg_r[msg->meta.msg_type]);
+			kfree(msg);
+		}
+
+		ret = -EAGAIN;
+		if (filp->f_flags & O_NONBLOCK)
+			break;
+
+		ret = -EINTR;
+		if (signal_pending(current))
+			break;
+
+		if (start != buf)
+			break;
+
+		locked = 0;
+		mutex_unlock(&ipa_ctx->msg_lock);
+		schedule();
+	}
+
+	finish_wait(&ipa_ctx->msg_waitq, &wait);
+	if (start != buf && ret != -EFAULT)
+		ret = buf - start;
+
+	if (locked)
+		mutex_unlock(&ipa_ctx->msg_lock);
+
+	return ret;
+}
+
+/**
+ * ipa_pull_msg() - pull the specified message from client
+ * @meta: [in] message meta-data
+ * @buf:  [out] buffer to read into
+ * @count: [in] size of above buffer
+ *
+ * Populate the supplied buffer with the pull message which is fetched
+ * from client, the message must have previously been registered with
+ * the IPA driver
+ *
+ * Returns:	how many bytes copied to buffer
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
+{
+	struct ipa_pull_msg *entry;
+	int result = -EINVAL;
+
+	if (meta == NULL || buff == NULL || !count) {
+		IPAERR("invalid param name=%p buff=%p count=%zu\n",
+				meta, buff, count);
+		return result;
+	}
+
+	mutex_lock(&ipa_ctx->msg_lock);
+	list_for_each_entry(entry, &ipa_ctx->pull_msg_list, link) {
+		if (entry->meta.msg_len == meta->msg_len &&
+		    entry->meta.msg_type == meta->msg_type) {
+			result = entry->callback(buff, count, meta->msg_type);
+			break;
+		}
+	}
+	mutex_unlock(&ipa_ctx->msg_lock);
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
new file mode 100644
index 0000000..e8f25c9
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_mhi.c
@@ -0,0 +1,319 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/ipa_mhi.h>
+#include "ipa_i.h"
+#include "ipa_qmi_service.h"
+
+#define IPA_MHI_DRV_NAME
+#define IPA_MHI_DBG(fmt, args...) \
+	pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+		 __func__, __LINE__, ## args)
+#define IPA_MHI_ERR(fmt, args...) \
+	pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPA_MHI_FUNC_ENTRY() \
+	IPA_MHI_DBG("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+	IPA_MHI_DBG("EXIT\n")
+
+bool ipa2_mhi_sps_channel_empty(enum ipa_client_type client)
+{
+	u32 pipe_idx;
+	bool pending;
+
+	pipe_idx = ipa2_get_ep_mapping(client);
+	if (sps_pipe_pending_desc(ipa_ctx->bam_handle,
+		pipe_idx, &pending)) {
+		IPA_MHI_ERR("sps_pipe_pending_desc failed\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	return !pending;
+}
+
+int ipa2_disable_sps_pipe(enum ipa_client_type client)
+{
+	int ipa_ep_index;
+	int res;
+
+	ipa_ep_index = ipa2_get_ep_mapping(client);
+
+	res = sps_pipe_disable(ipa_ctx->bam_handle, ipa_ep_index);
+	if (res) {
+		IPA_MHI_ERR("sps_pipe_disable fail %d\n", res);
+		return res;
+	}
+
+	return 0;
+}
+
+int ipa2_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa_disable_data_path(ipa2_get_ep_mapping(client));
+	if (res) {
+		IPA_MHI_ERR("ipa_disable_data_path failed %d\n", res);
+		return res;
+	}
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+int ipa2_mhi_start_channel_internal(enum ipa_client_type client)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa_enable_data_path(ipa2_get_ep_mapping(client));
+	if (res) {
+		IPA_MHI_ERR("ipa_enable_data_path failed %d\n", res);
+		return res;
+	}
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+int ipa2_mhi_init_engine(struct ipa_mhi_init_engine *params)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!params) {
+		IPA_MHI_ERR("null args\n");
+		return -EINVAL;
+	}
+
+	if (ipa2_uc_state_check()) {
+		IPA_MHI_ERR("IPA uc is not loaded\n");
+		return -EAGAIN;
+	}
+
+	/* Initialize IPA MHI engine */
+	res = ipa_uc_mhi_init_engine(params->uC.msi, params->uC.mmio_addr,
+		params->uC.host_ctrl_addr, params->uC.host_data_addr,
+		params->uC.first_ch_idx, params->uC.first_er_idx);
+	if (res) {
+		IPA_MHI_ERR("failed to start MHI engine %d\n", res);
+		goto fail_init_engine;
+	}
+
+	/* Update UL/DL sync if valid */
+	res = ipa2_uc_mhi_send_dl_ul_sync_info(
+		params->uC.ipa_cached_dl_ul_sync_info);
+	if (res) {
+		IPA_MHI_ERR("failed to update ul/dl sync %d\n", res);
+		goto fail_init_engine;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_init_engine:
+	return res;
+}
+
+/**
+ * ipa2_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ * This function is doing the following:
+ *	- Send command to uC to start corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa2_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl)
+{
+	struct ipa_ep_context *ep;
+	int ipa_ep_idx;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!in || !clnt_hdl) {
+		IPA_MHI_ERR("NULL args\n");
+		return -EINVAL;
+	}
+
+	if (in->sys->client >= IPA_CLIENT_MAX) {
+		IPA_MHI_ERR("bad parm client:%d\n", in->sys->client);
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa2_get_ep_mapping(in->sys->client);
+	if (ipa_ep_idx == -1) {
+		IPA_MHI_ERR("Invalid client.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+
+	IPA_MHI_DBG("client %d channelHandle %d channelIndex %d\n",
+		in->sys->client, in->start.uC.index, in->start.uC.id);
+
+	if (ep->valid == 1) {
+		IPA_MHI_ERR("EP already allocated.\n");
+		goto fail_ep_exists;
+	}
+
+	memset(ep, 0, offsetof(struct ipa_ep_context, sys));
+	ep->valid = 1;
+	ep->skip_ep_cfg = in->sys->skip_ep_cfg;
+	ep->client = in->sys->client;
+	ep->client_notify = in->sys->notify;
+	ep->priv = in->sys->priv;
+	ep->keep_ipa_awake = in->sys->keep_ipa_awake;
+
+	/* start channel in uC */
+	if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
+		IPA_MHI_DBG("Initializing channel\n");
+		res = ipa_uc_mhi_init_channel(ipa_ep_idx, in->start.uC.index,
+			in->start.uC.id,
+			(IPA_CLIENT_IS_PROD(ep->client) ? 1 : 2));
+		if (res) {
+			IPA_MHI_ERR("init_channel failed %d\n", res);
+			goto fail_init_channel;
+		}
+	} else if (in->start.uC.state == IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+		IPA_MHI_DBG("Starting channel\n");
+		res = ipa_uc_mhi_resume_channel(in->start.uC.index, false);
+		if (res) {
+			IPA_MHI_ERR("init_channel failed %d\n", res);
+			goto fail_init_channel;
+		}
+	} else {
+		IPA_MHI_ERR("Invalid channel state %d\n", in->start.uC.state);
+		goto fail_init_channel;
+	}
+
+	res = ipa_enable_data_path(ipa_ep_idx);
+	if (res) {
+		IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
+			ipa_ep_idx);
+		goto fail_enable_dp;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa2_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_ep_cfg;
+		}
+		if (ipa2_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_ep_cfg;
+		}
+		IPA_MHI_DBG("ep configuration successful\n");
+	} else {
+		IPA_MHI_DBG("skipping ep configuration\n");
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys->client))
+		ipa_install_dflt_flt_rules(ipa_ep_idx);
+
+	ipa_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	IPA_MHI_DBG("client %d (ep: %d) connected\n", in->sys->client,
+		ipa_ep_idx);
+
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+
+fail_ep_cfg:
+	ipa_disable_data_path(ipa_ep_idx);
+fail_enable_dp:
+	ipa_uc_mhi_reset_channel(in->start.uC.index);
+fail_init_channel:
+	memset(ep, 0, offsetof(struct ipa_ep_context, sys));
+fail_ep_exists:
+	return -EPERM;
+}
+
+/**
+ * ipa2_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ *	- Send command to uC to reset corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa2_disconnect_mhi_pipe(u32 clnt_hdl)
+{
+	IPA_MHI_FUNC_ENTRY();
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes) {
+		IPAERR("invalid handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("pipe was not connected %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ipa_ctx->ep[clnt_hdl].valid = 0;
+
+	ipa_delete_dflt_flt_rules(clnt_hdl);
+
+	IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+int ipa2_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+	res = ipa_uc_mhi_resume_channel(index, LPTransitionRejected);
+	if (res) {
+		IPA_MHI_ERR("failed to suspend channel %u error %d\n",
+			index, res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI driver");
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
new file mode 100644
index 0000000..9b97f57
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_nat.c
@@ -0,0 +1,769 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET  0
+#define IPA_NAT_PHYS_MEM_SIZE  IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_SYSTEM_MEMORY  0
+#define IPA_NAT_SHARED_MEMORY  1
+#define IPA_NAT_TEMP_MEM_SIZE 128
+
+static int ipa_nat_vma_fault_remap(
+	 struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	IPADBG("\n");
+	vmf->page = NULL;
+
+	return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa_nat_remap_vm_ops = {
+	.fault = ipa_nat_vma_fault_remap,
+};
+
+static int ipa_nat_open(struct inode *inode, struct file *filp)
+{
+	struct ipa_nat_mem *nat_ctx;
+
+	IPADBG("\n");
+	nat_ctx = container_of(inode->i_cdev, struct ipa_nat_mem, cdev);
+	filp->private_data = nat_ctx;
+	IPADBG("return\n");
+
+	return 0;
+}
+
+static int ipa_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long vsize = vma->vm_end - vma->vm_start;
+	struct ipa_nat_mem *nat_ctx = (struct ipa_nat_mem *)filp->private_data;
+	unsigned long phys_addr;
+	int result;
+
+	mutex_lock(&nat_ctx->lock);
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("Mapping system memory\n");
+		if (nat_ctx->is_mapped) {
+			IPAERR("mapping already exists, only 1 supported\n");
+			result = -EINVAL;
+			goto bail;
+		}
+		IPADBG("map sz=0x%zx\n", nat_ctx->size);
+		result =
+			dma_mmap_coherent(
+				 ipa_ctx->pdev, vma,
+				 nat_ctx->vaddr, nat_ctx->dma_handle,
+				 nat_ctx->size);
+
+		if (result) {
+			IPAERR("unable to map memory. Err:%d\n", result);
+			goto bail;
+		}
+		ipa_ctx->nat_mem.nat_base_address = nat_ctx->vaddr;
+	} else {
+		IPADBG("Mapping shared(local) memory\n");
+		IPADBG("map sz=0x%lx\n", vsize);
+
+		if ((IPA_NAT_PHYS_MEM_SIZE == 0) ||
+				(vsize > IPA_NAT_PHYS_MEM_SIZE)) {
+			result = -EINVAL;
+			goto bail;
+		}
+		phys_addr = ipa_ctx->ipa_wrapper_base +
+			ipa_ctx->ctrl->ipa_reg_base_ofst +
+			IPA_SRAM_DIRECT_ACCESS_N_OFST(IPA_NAT_PHYS_MEM_OFFSET);
+
+		if (remap_pfn_range(
+			 vma, vma->vm_start,
+			 phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+			IPAERR("remap failed\n");
+			result = -EAGAIN;
+			goto bail;
+		}
+		ipa_ctx->nat_mem.nat_base_address = (void *)vma->vm_start;
+	}
+	nat_ctx->is_mapped = true;
+	vma->vm_ops = &ipa_nat_remap_vm_ops;
+	IPADBG("return\n");
+	result = 0;
+bail:
+	mutex_unlock(&nat_ctx->lock);
+	return result;
+}
+
+static const struct file_operations ipa_nat_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa_nat_open,
+	.mmap = ipa_nat_mmap
+};
+
+/**
+ * allocate_temp_nat_memory() - Allocates temp nat memory
+ *
+ * Called during nat table delete
+ */
+void allocate_temp_nat_memory(void)
+{
+	struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+	int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+
+	nat_ctx->tmp_vaddr =
+		dma_alloc_coherent(ipa_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE,
+				&nat_ctx->tmp_dma_handle, gfp_flags);
+
+	if (nat_ctx->tmp_vaddr == NULL) {
+		IPAERR("Temp Memory alloc failed\n");
+		nat_ctx->is_tmp_mem = false;
+		return;
+	}
+
+	nat_ctx->is_tmp_mem = true;
+	IPADBG("IPA NAT allocated temp memory successfully\n");
+}
+
+/**
+ * create_nat_device() - Create the NAT device
+ *
+ * Called during ipa init to create nat device
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int create_nat_device(void)
+{
+	struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+	int result;
+
+	IPADBG("\n");
+
+	mutex_lock(&nat_ctx->lock);
+	nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME);
+	if (IS_ERR(nat_ctx->class)) {
+		IPAERR("unable to create the class\n");
+		result = -ENODEV;
+		goto vaddr_alloc_fail;
+	}
+	result = alloc_chrdev_region(&nat_ctx->dev_num,
+					0,
+					1,
+					NAT_DEV_NAME);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto alloc_chrdev_region_fail;
+	}
+
+	nat_ctx->dev =
+	   device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+			"%s", NAT_DEV_NAME);
+
+	if (IS_ERR(nat_ctx->dev)) {
+		IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+		result = -ENODEV;
+		goto device_create_fail;
+	}
+
+	cdev_init(&nat_ctx->cdev, &ipa_nat_fops);
+	nat_ctx->cdev.owner = THIS_MODULE;
+	nat_ctx->cdev.ops = &ipa_nat_fops;
+
+	result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+	if (result) {
+		IPAERR("cdev_add err=%d\n", -result);
+		goto cdev_add_fail;
+	}
+	IPADBG("ipa nat dev added successful. major:%d minor:%d\n",
+			MAJOR(nat_ctx->dev_num),
+			MINOR(nat_ctx->dev_num));
+
+	nat_ctx->is_dev = true;
+	allocate_temp_nat_memory();
+	IPADBG("IPA NAT device created successfully\n");
+	result = 0;
+	goto bail;
+
+cdev_add_fail:
+	device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+	unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+	class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+	if (nat_ctx->vaddr) {
+		IPADBG("Releasing system memory\n");
+		dma_free_coherent(
+			 ipa_ctx->pdev, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->vaddr = NULL;
+		nat_ctx->dma_handle = 0;
+		nat_ctx->size = 0;
+	}
+
+bail:
+	mutex_unlock(&nat_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa2_allocate_nat_device() - Allocates memory for the NAT device
+ * @mem:	[in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	struct ipa_nat_mem *nat_ctx = &(ipa_ctx->nat_mem);
+	int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+	int result;
+
+	IPADBG("passed memory size %zu\n", mem->size);
+
+	mutex_lock(&nat_ctx->lock);
+	if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
+		IPAERR("Nat device name mismatch\n");
+		IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (nat_ctx->is_dev != true) {
+		IPAERR("Nat device not created successfully during boot up\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (nat_ctx->is_dev_init == true) {
+		IPAERR("Device already init\n");
+		result = 0;
+		goto bail;
+	}
+
+	if (mem->size <= 0 ||
+			nat_ctx->is_dev_init == true) {
+		IPAERR("Invalid Parameters or device is already init\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+		IPADBG("Allocating system memory\n");
+		nat_ctx->is_sys_mem = true;
+		nat_ctx->vaddr =
+		   dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+				   &nat_ctx->dma_handle, gfp_flags);
+		if (nat_ctx->vaddr == NULL) {
+			IPAERR("memory alloc failed\n");
+			result = -ENOMEM;
+			goto bail;
+		}
+		nat_ctx->size = mem->size;
+	} else {
+		IPADBG("using shared(local) memory\n");
+		nat_ctx->is_sys_mem = false;
+	}
+
+	nat_ctx->is_dev_init = true;
+	IPADBG("IPA NAT dev init successfully\n");
+	result = 0;
+
+bail:
+	mutex_unlock(&nat_ctx->lock);
+
+	return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa2_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+#define TBL_ENTRY_SIZE 32
+#define INDX_TBL_ENTRY_SIZE 4
+
+	struct ipa_register_write *reg_write_nop;
+	struct ipa_desc desc[2];
+	struct ipa_ip_v4_nat_init *cmd;
+	u16 size = sizeof(struct ipa_ip_v4_nat_init);
+	int result;
+	u32 offset = 0;
+	size_t tmp;
+
+	IPADBG("\n");
+	if (init->table_entries == 0) {
+		IPADBG("Table entries is zero\n");
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->ipv4_rules_offset >
+		UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Table Entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->ipv4_rules_offset +
+		(TBL_ENTRY_SIZE * (init->table_entries + 1));
+	if (tmp > ipa_ctx->nat_mem.size) {
+		IPAERR("Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->ipv4_rules_offset, (init->table_entries + 1),
+			tmp, ipa_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->expn_rules_offset >
+		UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Expn Table Entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->expn_rules_offset +
+		(TBL_ENTRY_SIZE * init->expn_table_entries);
+	if (tmp > ipa_ctx->nat_mem.size) {
+		IPAERR("Expn Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->expn_rules_offset, init->expn_table_entries,
+			tmp, ipa_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->index_offset >
+		UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Indx Table Entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->index_offset +
+		(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
+	if (tmp > ipa_ctx->nat_mem.size) {
+		IPAERR("Indx Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->index_offset, (init->table_entries + 1),
+			tmp, ipa_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->index_expn_offset >
+		UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Expn Table entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->index_expn_offset +
+		(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
+	if (tmp > ipa_ctx->nat_mem.size) {
+		IPAERR("Indx Expn Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->index_expn_offset, init->expn_table_entries,
+			tmp, ipa_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	memset(&desc, 0, sizeof(desc));
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	if (!reg_write_nop) {
+		IPAERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	reg_write_nop->skip_pipeline_clear = 0;
+	reg_write_nop->value_mask = 0x0;
+
+	desc[0].opcode = IPA_REGISTER_WRITE;
+	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[0].callback = NULL;
+	desc[0].user1 = NULL;
+	desc[0].user2 = 0;
+	desc[0].pyld = (void *)reg_write_nop;
+	desc[0].len = sizeof(*reg_write_nop);
+
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("Failed to alloc immediate command object\n");
+		result = -ENOMEM;
+		goto free_nop;
+	}
+	if (ipa_ctx->nat_mem.vaddr) {
+		IPADBG("using system memory for nat table\n");
+		cmd->ipv4_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+		cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SYSTEM_MEMORY;
+		cmd->index_table_addr_type = IPA_NAT_SYSTEM_MEMORY;
+		cmd->index_table_expansion_addr_type = IPA_NAT_SYSTEM_MEMORY;
+
+		offset = UINT_MAX - ipa_ctx->nat_mem.dma_handle;
+
+		if ((init->ipv4_rules_offset > offset) ||
+				(init->expn_rules_offset > offset) ||
+				(init->index_offset > offset) ||
+				(init->index_expn_offset > offset)) {
+			IPAERR("Failed due to integer overflow\n");
+			IPAERR("nat.mem.dma_handle: 0x%pa\n",
+				&ipa_ctx->nat_mem.dma_handle);
+			IPAERR("ipv4_rules_offset: 0x%x\n",
+				init->ipv4_rules_offset);
+			IPAERR("expn_rules_offset: 0x%x\n",
+				init->expn_rules_offset);
+			IPAERR("index_offset: 0x%x\n",
+				init->index_offset);
+			IPAERR("index_expn_offset: 0x%x\n",
+				init->index_expn_offset);
+			result = -EPERM;
+			goto free_mem;
+		}
+		cmd->ipv4_rules_addr =
+			ipa_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+		IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+		cmd->ipv4_expansion_rules_addr =
+		   ipa_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+		IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+		cmd->index_table_addr =
+			ipa_ctx->nat_mem.dma_handle + init->index_offset;
+		IPADBG("index_offset:0x%x\n", init->index_offset);
+
+		cmd->index_table_expansion_addr =
+		   ipa_ctx->nat_mem.dma_handle + init->index_expn_offset;
+		IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+	} else {
+		IPADBG("using shared(local) memory for nat table\n");
+		cmd->ipv4_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+		cmd->ipv4_expansion_rules_addr_type = IPA_NAT_SHARED_MEMORY;
+		cmd->index_table_addr_type = IPA_NAT_SHARED_MEMORY;
+		cmd->index_table_expansion_addr_type = IPA_NAT_SHARED_MEMORY;
+
+		cmd->ipv4_rules_addr = init->ipv4_rules_offset +
+				IPA_RAM_NAT_OFST;
+
+		cmd->ipv4_expansion_rules_addr = init->expn_rules_offset +
+				IPA_RAM_NAT_OFST;
+
+		cmd->index_table_addr = init->index_offset  +
+				IPA_RAM_NAT_OFST;
+
+		cmd->index_table_expansion_addr = init->index_expn_offset +
+				IPA_RAM_NAT_OFST;
+	}
+	cmd->table_index = init->tbl_index;
+	IPADBG("Table index:0x%x\n", cmd->table_index);
+	cmd->size_base_tables = init->table_entries;
+	IPADBG("Base Table size:0x%x\n", cmd->size_base_tables);
+	cmd->size_expansion_tables = init->expn_table_entries;
+	IPADBG("Expansion Table size:0x%x\n", cmd->size_expansion_tables);
+	cmd->public_ip_addr = init->ip_addr;
+	IPADBG("Public ip address:0x%x\n", cmd->public_ip_addr);
+	desc[1].opcode = IPA_IP_V4_NAT_INIT;
+	desc[1].type = IPA_IMM_CMD_DESC;
+	desc[1].callback = NULL;
+	desc[1].user1 = NULL;
+	desc[1].user2 = 0;
+	desc[1].pyld = (void *)cmd;
+	desc[1].len = size;
+	IPADBG("posting v4 init command\n");
+	if (ipa_send_cmd(2, desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto free_mem;
+	}
+
+	ipa_ctx->nat_mem.public_ip_addr = init->ip_addr;
+	IPADBG("Table ip address:0x%x", ipa_ctx->nat_mem.public_ip_addr);
+
+	ipa_ctx->nat_mem.ipv4_rules_addr =
+	 (char *)ipa_ctx->nat_mem.nat_base_address + init->ipv4_rules_offset;
+	IPADBG("ipv4_rules_addr: 0x%p\n",
+				 ipa_ctx->nat_mem.ipv4_rules_addr);
+
+	ipa_ctx->nat_mem.ipv4_expansion_rules_addr =
+	 (char *)ipa_ctx->nat_mem.nat_base_address + init->expn_rules_offset;
+	IPADBG("ipv4_expansion_rules_addr: 0x%p\n",
+				 ipa_ctx->nat_mem.ipv4_expansion_rules_addr);
+
+	ipa_ctx->nat_mem.index_table_addr =
+		 (char *)ipa_ctx->nat_mem.nat_base_address + init->index_offset;
+	IPADBG("index_table_addr: 0x%p\n",
+				 ipa_ctx->nat_mem.index_table_addr);
+
+	ipa_ctx->nat_mem.index_table_expansion_addr =
+	 (char *)ipa_ctx->nat_mem.nat_base_address + init->index_expn_offset;
+	IPADBG("index_table_expansion_addr: 0x%p\n",
+				 ipa_ctx->nat_mem.index_table_expansion_addr);
+
+	IPADBG("size_base_tables: %d\n", init->table_entries);
+	ipa_ctx->nat_mem.size_base_tables  = init->table_entries;
+
+	IPADBG("size_expansion_tables: %d\n", init->expn_table_entries);
+	ipa_ctx->nat_mem.size_expansion_tables = init->expn_table_entries;
+
+	IPADBG("return\n");
+	result = 0;
+free_mem:
+	kfree(cmd);
+free_nop:
+	kfree(reg_write_nop);
+bail:
+	return result;
+}
+
+/**
+ * ipa2_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+#define NUM_OF_DESC 2
+
+	struct ipa_register_write *reg_write_nop = NULL;
+	struct ipa_nat_dma *cmd = NULL;
+	struct ipa_desc *desc = NULL;
+	u16 size = 0, cnt = 0;
+	int ret = 0;
+
+	IPADBG("\n");
+	if (dma->entries <= 0) {
+		IPAERR("Invalid number of commands %d\n",
+			dma->entries);
+		ret = -EPERM;
+		goto bail;
+	}
+
+	size = sizeof(struct ipa_desc) * NUM_OF_DESC;
+	desc = kzalloc(size, GFP_KERNEL);
+	if (desc == NULL) {
+		IPAERR("Failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	size = sizeof(struct ipa_nat_dma);
+	cmd = kzalloc(size, GFP_KERNEL);
+	if (cmd == NULL) {
+		IPAERR("Failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	if (!reg_write_nop) {
+		IPAERR("Failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	reg_write_nop->skip_pipeline_clear = 0;
+	reg_write_nop->value_mask = 0x0;
+
+	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[0].opcode = IPA_REGISTER_WRITE;
+	desc[0].callback = NULL;
+	desc[0].user1 = NULL;
+	desc[0].user2 = 0;
+	desc[0].len = sizeof(*reg_write_nop);
+	desc[0].pyld = (void *)reg_write_nop;
+
+	for (cnt = 0; cnt < dma->entries; cnt++) {
+		cmd->table_index = dma->dma[cnt].table_index;
+		cmd->base_addr = dma->dma[cnt].base_addr;
+		cmd->offset = dma->dma[cnt].offset;
+		cmd->data = dma->dma[cnt].data;
+
+		desc[1].type = IPA_IMM_CMD_DESC;
+		desc[1].opcode = IPA_NAT_DMA;
+		desc[1].callback = NULL;
+		desc[1].user1 = NULL;
+		desc[1].user2 = 0;
+		desc[1].len = sizeof(struct ipa_nat_dma);
+		desc[1].pyld = (void *)cmd;
+
+		ret = ipa_send_cmd(NUM_OF_DESC, desc);
+		if (ret == -EPERM)
+			IPAERR("Fail to send immediate command %d\n", cnt);
+	}
+
+bail:
+	if (cmd != NULL)
+		kfree(cmd);
+
+	if (desc != NULL)
+		kfree(desc);
+
+	if (reg_write_nop != NULL)
+		kfree(reg_write_nop);
+
+	return ret;
+}
+
+/**
+ * ipa_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx:	[in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa_nat_free_mem_and_device(struct ipa_nat_mem *nat_ctx)
+{
+	IPADBG("\n");
+	mutex_lock(&nat_ctx->lock);
+
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("freeing the dma memory\n");
+		dma_free_coherent(
+			 ipa_ctx->pdev, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->size = 0;
+		nat_ctx->vaddr = NULL;
+	}
+	nat_ctx->is_mapped = false;
+	nat_ctx->is_sys_mem = false;
+	nat_ctx->is_dev_init = false;
+
+	mutex_unlock(&nat_ctx->lock);
+	IPADBG("return\n");
+}
+
+/**
+ * ipa2_nat_del_cmd() - Delete a NAT table
+ * @del:	[in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	struct ipa_register_write *reg_write_nop;
+	struct ipa_desc desc[2];
+	struct ipa_ip_v4_nat_init *cmd;
+	u16 size = sizeof(struct ipa_ip_v4_nat_init);
+	u8 mem_type = IPA_NAT_SHARED_MEMORY;
+	u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+	int result;
+
+	IPADBG("\n");
+	if (ipa_ctx->nat_mem.is_tmp_mem) {
+		IPAERR("using temp memory during nat del\n");
+		mem_type = IPA_NAT_SYSTEM_MEMORY;
+		base_addr = ipa_ctx->nat_mem.tmp_dma_handle;
+	}
+
+	if (del->public_ip_addr == 0) {
+		IPADBG("Bad Parameter\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	memset(&desc, 0, sizeof(desc));
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	if (!reg_write_nop) {
+		IPAERR("no mem\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	reg_write_nop->skip_pipeline_clear = 0;
+	reg_write_nop->value_mask = 0x0;
+
+	desc[0].opcode = IPA_REGISTER_WRITE;
+	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[0].callback = NULL;
+	desc[0].user1 = NULL;
+	desc[0].user2 = 0;
+	desc[0].pyld = (void *)reg_write_nop;
+	desc[0].len = sizeof(*reg_write_nop);
+
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (cmd == NULL) {
+		IPAERR("Failed to alloc immediate command object\n");
+		result = -ENOMEM;
+		goto free_nop;
+	}
+	cmd->table_index = del->table_index;
+	cmd->ipv4_rules_addr = base_addr;
+	cmd->ipv4_rules_addr_type = mem_type;
+	cmd->ipv4_expansion_rules_addr = base_addr;
+	cmd->ipv4_expansion_rules_addr_type = mem_type;
+	cmd->index_table_addr = base_addr;
+	cmd->index_table_addr_type = mem_type;
+	cmd->index_table_expansion_addr = base_addr;
+	cmd->index_table_expansion_addr_type = mem_type;
+	cmd->size_base_tables = 0;
+	cmd->size_expansion_tables = 0;
+	cmd->public_ip_addr = 0;
+
+	desc[1].opcode = IPA_IP_V4_NAT_INIT;
+	desc[1].type = IPA_IMM_CMD_DESC;
+	desc[1].callback = NULL;
+	desc[1].user1 = NULL;
+	desc[1].user2 = 0;
+	desc[1].pyld = (void *)cmd;
+	desc[1].len = size;
+	if (ipa_send_cmd(2, desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto free_mem;
+	}
+
+	ipa_ctx->nat_mem.size_base_tables = 0;
+	ipa_ctx->nat_mem.size_expansion_tables = 0;
+	ipa_ctx->nat_mem.public_ip_addr = 0;
+	ipa_ctx->nat_mem.ipv4_rules_addr = 0;
+	ipa_ctx->nat_mem.ipv4_expansion_rules_addr = 0;
+	ipa_ctx->nat_mem.index_table_addr = 0;
+	ipa_ctx->nat_mem.index_table_expansion_addr = 0;
+
+	ipa_nat_free_mem_and_device(&ipa_ctx->nat_mem);
+	IPADBG("return\n");
+	result = 0;
+free_mem:
+	kfree(cmd);
+free_nop:
+	kfree(reg_write_nop);
+bail:
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
new file mode 100644
index 0000000..7291a44
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -0,0 +1,1216 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/qmi_encdec.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/ipa.h>
+#include <linux/vmalloc.h>
+
+#include "ipa_qmi_service.h"
+#include "ipa_ram_mmap.h"
+#include "../ipa_common_i.h"
+
+#define IPA_Q6_SVC_VERS 1
+#define IPA_A5_SVC_VERS 1
+#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ)
+
+#define IPA_A5_SERVICE_SVC_ID 0x31
+#define IPA_A5_SERVICE_INS_ID 1
+#define IPA_Q6_SERVICE_SVC_ID 0x31
+#define IPA_Q6_SERVICE_INS_ID 2
+
+#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
+#define QMI_SEND_REQ_TIMEOUT_MS 60000
+
+static struct qmi_handle *ipa_svc_handle;
+static void ipa_a5_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, ipa_a5_svc_recv_msg);
+static struct workqueue_struct *ipa_svc_workqueue;
+static struct workqueue_struct *ipa_clnt_req_workqueue;
+static struct workqueue_struct *ipa_clnt_resp_workqueue;
+static void *curr_conn;
+static bool qmi_modem_init_fin, qmi_indication_fin;
+static uint32_t ipa_wan_platform;
+struct ipa_qmi_context *ipa_qmi_ctx;
+static bool first_time_handshake;
+static atomic_t workqueues_stopped;
+static atomic_t ipa_qmi_initialized;
+struct mutex ipa_qmi_lock;
+
+/* QMI A5 service */
+
+static struct msg_desc ipa_indication_reg_req_desc = {
+	.max_msg_len = QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01,
+	.ei_array = ipa_indication_reg_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa_indication_reg_resp_desc = {
+	.max_msg_len = QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INDICATION_REGISTER_RESP_V01,
+	.ei_array = ipa_indication_reg_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa_master_driver_complete_indication_desc = {
+	.max_msg_len = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01,
+	.ei_array = ipa_master_driver_init_complt_ind_msg_data_v01_ei,
+};
+static struct msg_desc ipa_install_fltr_rule_req_desc = {
+	.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01,
+	.ei_array = ipa_install_fltr_rule_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa_install_fltr_rule_resp_desc = {
+	.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01,
+	.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa_filter_installed_notif_req_desc = {
+	.max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01,
+	.ei_array = ipa_fltr_installed_notif_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa_filter_installed_notif_resp_desc = {
+	.max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01,
+	.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa_config_req_desc = {
+	.max_msg_len = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_CONFIG_REQ_V01,
+	.ei_array = ipa_config_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa_config_resp_desc = {
+	.max_msg_len = QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_CONFIG_RESP_V01,
+	.ei_array = ipa_config_resp_msg_data_v01_ei,
+};
+
+static int handle_indication_req(void *req_h, void *req)
+{
+	struct ipa_indication_reg_req_msg_v01 *indication_req;
+	struct ipa_indication_reg_resp_msg_v01 resp;
+	struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+	int rc;
+
+	indication_req = (struct ipa_indication_reg_req_msg_v01 *)req;
+	IPAWANDBG("Received INDICATION Request\n");
+
+	memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h,
+			&ipa_indication_reg_resp_desc, &resp, sizeof(resp));
+	qmi_indication_fin = true;
+	/* check if need sending indication to modem */
+	if (qmi_modem_init_fin)	{
+		IPAWANDBG("send indication to modem (%d)\n",
+		qmi_modem_init_fin);
+		memset(&ind, 0, sizeof(struct
+				ipa_master_driver_init_complt_ind_msg_v01));
+		ind.master_driver_init_status.result =
+			IPA_QMI_RESULT_SUCCESS_V01;
+		rc = qmi_send_ind_from_cb(ipa_svc_handle, curr_conn,
+			&ipa_master_driver_complete_indication_desc,
+			&ind,
+			sizeof(ind));
+	} else {
+		IPAWANERR("not send indication\n");
+	}
+	return rc;
+}
+
+
+static int handle_install_filter_rule_req(void *req_h, void *req)
+{
+	struct ipa_install_fltr_rule_req_msg_v01 *rule_req;
+	struct ipa_install_fltr_rule_resp_msg_v01 resp;
+	uint32_t rule_hdl[MAX_NUM_Q6_RULE];
+	int rc = 0, i;
+
+	rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)req;
+	memset(rule_hdl, 0, sizeof(rule_hdl));
+	memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+	IPAWANDBG("Received install filter Request\n");
+
+	rc = copy_ul_filter_rule_to_ipa((struct
+		ipa_install_fltr_rule_req_msg_v01*)req, rule_hdl);
+	if (rc)
+		IPAWANERR("copy UL rules from modem is failed\n");
+
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	if (rule_req->filter_spec_list_valid == true) {
+		resp.filter_handle_list_valid = true;
+		if (rule_req->filter_spec_list_len > MAX_NUM_Q6_RULE) {
+			resp.filter_handle_list_len = MAX_NUM_Q6_RULE;
+			IPAWANERR("installed (%d) max Q6-UL rules ",
+			MAX_NUM_Q6_RULE);
+			IPAWANERR("but modem gives total (%u)\n",
+			rule_req->filter_spec_list_len);
+		} else {
+			resp.filter_handle_list_len =
+				rule_req->filter_spec_list_len;
+		}
+	} else {
+		resp.filter_handle_list_valid = false;
+	}
+
+	/* construct UL filter rules response to Modem*/
+	for (i = 0; i < resp.filter_handle_list_len; i++) {
+		resp.filter_handle_list[i].filter_spec_identifier =
+			rule_req->filter_spec_list[i].filter_spec_identifier;
+		resp.filter_handle_list[i].filter_handle = rule_hdl[i];
+	}
+
+	rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h,
+			&ipa_install_fltr_rule_resp_desc, &resp, sizeof(resp));
+
+	IPAWANDBG("Replied to install filter request\n");
+	return rc;
+}
+
+static int handle_filter_installed_notify_req(void *req_h, void *req)
+{
+	struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+	int rc = 0;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	IPAWANDBG("Received filter_install_notify Request\n");
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+	rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h,
+			&ipa_filter_installed_notif_resp_desc,
+			&resp, sizeof(resp));
+
+	IPAWANDBG("Responsed filter_install_notify Request\n");
+	return rc;
+}
+
+static int handle_ipa_config_req(void *req_h, void *req)
+{
+	struct ipa_config_resp_msg_v01 resp;
+	int rc;
+
+	memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	IPAWANDBG("Received IPA CONFIG Request\n");
+	rc = ipa_mhi_handle_ipa_config_req(
+		(struct ipa_config_req_msg_v01 *)req);
+	if (rc) {
+		IPAERR("ipa_mhi_handle_ipa_config_req failed %d\n", rc);
+		resp.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+	}
+	rc = qmi_send_resp_from_cb(ipa_svc_handle, curr_conn, req_h,
+		&ipa_config_resp_desc,
+		&resp, sizeof(resp));
+	IPAWANDBG("Responsed IPA CONFIG Request\n");
+	return rc;
+}
+
+static int ipa_a5_svc_connect_cb(struct qmi_handle *handle,
+			       void *conn_h)
+{
+	if (ipa_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	if (curr_conn) {
+		IPAWANERR("Service is busy\n");
+		return -ECONNREFUSED;
+	}
+	curr_conn = conn_h;
+	return 0;
+}
+
+static int ipa_a5_svc_disconnect_cb(struct qmi_handle *handle,
+				  void *conn_h)
+{
+	if (ipa_svc_handle != handle || curr_conn != conn_h)
+		return -EINVAL;
+
+	curr_conn = NULL;
+	return 0;
+}
+
+static int ipa_a5_svc_req_desc_cb(unsigned int msg_id,
+				struct msg_desc **req_desc)
+{
+	int rc;
+
+	switch (msg_id) {
+	case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+		*req_desc = &ipa_indication_reg_req_desc;
+		rc = sizeof(struct ipa_indication_reg_req_msg_v01);
+		break;
+
+	case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+		*req_desc = &ipa_install_fltr_rule_req_desc;
+		rc = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+		break;
+	case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+		*req_desc = &ipa_filter_installed_notif_req_desc;
+		rc = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+		break;
+	case QMI_IPA_CONFIG_REQ_V01:
+		*req_desc = &ipa_config_req_desc;
+		rc = sizeof(struct ipa_config_req_msg_v01);
+		break;
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static int ipa_a5_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			void *req_h, unsigned int msg_id, void *req)
+{
+	int rc;
+
+	if (ipa_svc_handle != handle || curr_conn != conn_h)
+		return -EINVAL;
+
+	switch (msg_id) {
+	case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+		rc = handle_indication_req(req_h, req);
+		break;
+	case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+		rc = handle_install_filter_rule_req(req_h, req);
+		rc = wwan_update_mux_channel_prop();
+		break;
+	case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+		rc = handle_filter_installed_notify_req(req_h, req);
+		break;
+	case QMI_IPA_CONFIG_REQ_V01:
+		rc = handle_ipa_config_req(req_h, req);
+		break;
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static void ipa_a5_svc_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	do {
+		IPAWANDBG("Notified about a Receive Event");
+		rc = qmi_recv_msg(ipa_svc_handle);
+	} while (rc == 0);
+	if (rc != -ENOMSG)
+		IPAWANERR("Error receiving message\n");
+}
+
+static void qmi_ipa_a5_svc_ntfy(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		if (!atomic_read(&workqueues_stopped))
+			queue_delayed_work(ipa_svc_workqueue,
+					   &work_recv_msg, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options ipa_a5_svc_ops_options = {
+	.version = 1,
+	.service_id = IPA_A5_SERVICE_SVC_ID,
+	.service_vers = IPA_A5_SVC_VERS,
+	.service_ins = IPA_A5_SERVICE_INS_ID,
+	.connect_cb = ipa_a5_svc_connect_cb,
+	.disconnect_cb = ipa_a5_svc_disconnect_cb,
+	.req_desc_cb = ipa_a5_svc_req_desc_cb,
+	.req_cb = ipa_a5_svc_req_cb,
+};
+
+
+/****************************************************/
+/*                 QMI A5 client ->Q6               */
+/****************************************************/
+static void ipa_q6_clnt_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg_client, ipa_q6_clnt_recv_msg);
+static void ipa_q6_clnt_svc_arrive(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_svc_arrive, ipa_q6_clnt_svc_arrive);
+static void ipa_q6_clnt_svc_exit(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_svc_exit, ipa_q6_clnt_svc_exit);
+/* Test client port for IPC Router */
+static struct qmi_handle *ipa_q6_clnt;
+static int ipa_q6_clnt_reset;
+
+static int ipa_check_qmi_response(int rc,
+				  int req_id,
+				  enum ipa_qmi_result_type_v01 result,
+				  enum ipa_qmi_error_type_v01 error,
+				  char *resp_type)
+{
+	if (rc < 0) {
+		if (rc == -ETIMEDOUT && ipa_rmnet_ctx.ipa_rmnet_ssr) {
+			IPAWANERR(
+			"Timeout for qmi request id %d\n", req_id);
+			return rc;
+		}
+		if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+			IPAWANERR(
+			"SSR while waiting for qmi request id %d\n", req_id);
+			return rc;
+		}
+		IPAWANERR("Error sending qmi request id %d, rc = %d\n",
+			req_id, rc);
+		return rc;
+	}
+	if (result != IPA_QMI_RESULT_SUCCESS_V01 &&
+	    ipa_rmnet_ctx.ipa_rmnet_ssr) {
+		IPAWANERR(
+		"Got bad response %d from request id %d (error %d)\n",
+		req_id, result, error);
+		return result;
+	}
+	IPAWANDBG("Received %s successfully\n", resp_type);
+	return 0;
+}
+
+static int qmi_init_modem_send_sync_msg(void)
+{
+	struct ipa_init_modem_driver_req_msg_v01 req;
+	struct ipa_init_modem_driver_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+	u16 smem_restr_bytes = ipa2_get_smem_restr_bytes();
+
+	memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01));
+	memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01));
+
+	req.platform_type_valid = true;
+	req.platform_type = ipa_wan_platform;
+
+	req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0);
+	req.hdr_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes;
+	req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) +
+		smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1;
+
+	req.v4_route_tbl_info_valid = true;
+	req.v4_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v4_rt_ofst) +
+		smem_restr_bytes;
+	req.v4_route_tbl_info.num_indices = IPA_MEM_PART(v4_modem_rt_index_hi);
+	req.v6_route_tbl_info_valid = true;
+
+	req.v6_route_tbl_info.route_tbl_start_addr = IPA_MEM_PART(v6_rt_ofst) +
+		smem_restr_bytes;
+	req.v6_route_tbl_info.num_indices = IPA_MEM_PART(v6_modem_rt_index_hi);
+
+	req.v4_filter_tbl_start_addr_valid = true;
+	req.v4_filter_tbl_start_addr =
+		IPA_MEM_PART(v4_flt_ofst) + smem_restr_bytes;
+
+	req.v6_filter_tbl_start_addr_valid = true;
+	req.v6_filter_tbl_start_addr =
+		IPA_MEM_PART(v6_flt_ofst) + smem_restr_bytes;
+
+	req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0);
+	req.modem_mem_info.block_start_addr =
+		IPA_MEM_PART(modem_ofst) + smem_restr_bytes;
+	req.modem_mem_info.size = IPA_MEM_PART(modem_size);
+
+	req.ctrl_comm_dest_end_pt_valid = true;
+	req.ctrl_comm_dest_end_pt =
+		ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+
+	req.hdr_proc_ctx_tbl_info_valid =
+		(IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0);
+	req.hdr_proc_ctx_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes;
+	req.hdr_proc_ctx_tbl_info.modem_offset_end =
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) +
+		IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1;
+
+	req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0);
+	req.zip_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes;
+	req.zip_tbl_info.modem_offset_end =
+		IPA_MEM_PART(modem_comp_decomp_ofst) +
+		IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1;
+
+	if (!ipa_uc_loaded_check()) {  /* First time boot */
+		req.is_ssr_bootup_valid = false;
+		req.is_ssr_bootup = 0;
+	} else {  /* After SSR boot */
+		req.is_ssr_bootup_valid = true;
+		req.is_ssr_bootup = 1;
+	}
+
+	IPAWANDBG("platform_type %d\n", req.platform_type);
+	IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n",
+			req.hdr_tbl_info.modem_offset_start);
+	IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n",
+			req.hdr_tbl_info.modem_offset_end);
+	IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n",
+			req.v4_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v4_route_tbl_info.num_indices %d\n",
+			req.v4_route_tbl_info.num_indices);
+	IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n",
+			req.v6_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v6_route_tbl_info.num_indices %d\n",
+			req.v6_route_tbl_info.num_indices);
+	IPAWANDBG("v4_filter_tbl_start_addr %d\n",
+			req.v4_filter_tbl_start_addr);
+	IPAWANDBG("v6_filter_tbl_start_addr %d\n",
+			req.v6_filter_tbl_start_addr);
+	IPAWANDBG("modem_mem_info.block_start_addr %d\n",
+			req.modem_mem_info.block_start_addr);
+	IPAWANDBG("modem_mem_info.size %d\n",
+			req.modem_mem_info.size);
+	IPAWANDBG("ctrl_comm_dest_end_pt %d\n",
+			req.ctrl_comm_dest_end_pt);
+	IPAWANDBG("is_ssr_bootup %d\n",
+			req.is_ssr_bootup);
+
+	req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01;
+	req_desc.ei_array = ipa_init_modem_driver_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01;
+	resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei;
+
+	pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_REQ_TIMEOUT_MS);
+	pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n");
+	return ipa_check_qmi_response(rc,
+		QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_init_modem_driver_resp_msg_v01");
+}
+
+/* sending filter-install-request to modem*/
+int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+	struct ipa_install_fltr_rule_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->filter_spec_list_len == 0) {
+		IPAWANDBG("IPACM pass zero rules to Q6\n");
+	} else {
+		IPAWANDBG("IPACM pass %u rules to Q6\n",
+		req->filter_spec_list_len);
+	}
+
+	mutex_lock(&ipa_qmi_lock);
+	if (ipa_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+			ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+		ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+		ipa_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+	}
+	mutex_unlock(&ipa_qmi_lock);
+
+	req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
+	req_desc.ei_array = ipa_install_fltr_rule_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
+	resp_desc.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei;
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_msg_v01),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_REQ_TIMEOUT_MS);
+	return ipa_check_qmi_response(rc,
+		QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_install_filter");
+}
+
+
+int qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	struct ipa_enable_force_clear_datapath_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+
+	if (!req || !req->source_pipe_bitmask) {
+		IPAWANERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	req_desc.max_msg_len =
+	QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+	req_desc.ei_array = ipa_enable_force_clear_datapath_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+	resp_desc.ei_array =
+		ipa_enable_force_clear_datapath_resp_msg_data_v01_ei;
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt,
+			&req_desc,
+			req,
+			sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), 0);
+	if (rc < 0) {
+		IPAWANERR("send req failed %d\n", rc);
+		return rc;
+	}
+	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+		IPAWANERR("filter_notify failed %d\n",
+			resp.resp.result);
+		return resp.resp.result;
+	}
+	IPAWANDBG("SUCCESS\n");
+	return rc;
+}
+
+int qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	struct ipa_disable_force_clear_datapath_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+
+	if (!req) {
+		IPAWANERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	req_desc.max_msg_len =
+		QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+	req_desc.ei_array =
+		ipa_disable_force_clear_datapath_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+	resp_desc.ei_array =
+		ipa_disable_force_clear_datapath_resp_msg_data_v01_ei;
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt,
+			&req_desc,
+			req,
+			sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), 0);
+	if (rc < 0) {
+		IPAWANERR("send req failed %d\n", rc);
+		return rc;
+	}
+	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+		IPAWANERR("filter_notify failed %d\n",
+			resp.resp.result);
+		return resp.resp.result;
+	}
+	IPAWANDBG("SUCCESS\n");
+	return rc;
+}
+
+/* sending filter-installed-notify-request to modem*/
+int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+	struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc = 0, i = 0;
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->filter_index_list_len == 0) {
+		IPAWANERR(" delete UL filter rule for pipe %d\n",
+		req->source_pipe_index);
+		return -EINVAL;
+	} else if (req->filter_index_list_len > QMI_IPA_MAX_FILTERS_V01) {
+		IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+		req->source_pipe_index,
+		req->filter_index_list_len);
+		return -EINVAL;
+	} else if (req->filter_index_list[0].filter_index == 0 &&
+		req->source_pipe_index !=
+		ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD)) {
+		IPAWANERR(" get index wrong for pipe %d\n",
+			req->source_pipe_index);
+		for (i = 0; i < req->filter_index_list_len; i++)
+			IPAWANERR(" %d-st handle %d index %d\n",
+				i,
+				req->filter_index_list[i].filter_handle,
+				req->filter_index_list[i].filter_index);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_qmi_lock);
+	if (ipa_qmi_ctx != NULL) {
+		/* cache the qmi_filter_request */
+		memcpy(&(ipa_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+			ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+			req,
+			sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+		ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+		ipa_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+	}
+	mutex_unlock(&ipa_qmi_lock);
+	req_desc.max_msg_len =
+	QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01;
+	req_desc.ei_array = ipa_fltr_installed_notif_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
+	resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei;
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt,
+			&req_desc,
+			req,
+			sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_REQ_TIMEOUT_MS);
+	return ipa_check_qmi_response(rc,
+		QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_fltr_installed_notif_resp");
+}
+
+static void ipa_q6_clnt_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	do {
+		IPAWANDBG("Notified about a Receive Event");
+		rc = qmi_recv_msg(ipa_q6_clnt);
+	} while (rc == 0);
+	if (rc != -ENOMSG)
+		IPAWANERR("Error receiving message\n");
+}
+
+static void ipa_q6_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		IPAWANDBG("client qmi recv message called");
+		if (!atomic_read(&workqueues_stopped))
+			queue_delayed_work(ipa_clnt_resp_workqueue,
+					   &work_recv_msg_client, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static void ipa_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
+			       void *msg, unsigned int msg_len,
+			       void *ind_cb_priv)
+{
+	struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+	struct msg_desc qmi_ind_desc;
+	int rc = 0;
+
+	if (handle != ipa_q6_clnt) {
+		IPAWANERR("Wrong client\n");
+		return;
+	}
+
+	if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) {
+		memset(&qmi_ind, 0, sizeof(
+			struct ipa_data_usage_quota_reached_ind_msg_v01));
+		qmi_ind_desc.max_msg_len =
+			QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01;
+		qmi_ind_desc.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01;
+		qmi_ind_desc.ei_array =
+			ipa_data_usage_quota_reached_ind_msg_data_v01_ei;
+
+		rc = qmi_kernel_decode(&qmi_ind_desc, &qmi_ind, msg, msg_len);
+		if (rc < 0) {
+			IPAWANERR("Error decoding msg_id %d\n", msg_id);
+			return;
+		}
+		IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
+			  qmi_ind.apn.mux_id,
+			  (unsigned long int) qmi_ind.apn.num_Mbytes);
+		ipa_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+	}
+}
+
+static void ipa_q6_clnt_svc_arrive(struct work_struct *work)
+{
+	int rc;
+	struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+
+	/* Create a Local client port for QMI communication */
+	ipa_q6_clnt = qmi_handle_create(ipa_q6_clnt_notify, NULL);
+	if (!ipa_q6_clnt) {
+		IPAWANERR("QMI client handle alloc failed\n");
+		return;
+	}
+
+	IPAWANDBG("Lookup server name, get client-hdl(%p)\n",
+		ipa_q6_clnt);
+	rc = qmi_connect_to_service(ipa_q6_clnt,
+			IPA_Q6_SERVICE_SVC_ID,
+			IPA_Q6_SVC_VERS,
+			IPA_Q6_SERVICE_INS_ID);
+	if (rc < 0) {
+		IPAWANERR("Server not found\n");
+		ipa_q6_clnt_svc_exit(0);
+		return;
+	}
+
+	rc = qmi_register_ind_cb(ipa_q6_clnt, ipa_q6_clnt_ind_cb, NULL);
+	if (rc < 0)
+		IPAWANERR("Unable to register for indications\n");
+
+	ipa_q6_clnt_reset = 0;
+	IPAWANDBG("Q6 QMI service available now\n");
+	/* Initialize modem IPA-driver */
+	IPAWANDBG("send qmi_init_modem_send_sync_msg to modem\n");
+	rc = qmi_init_modem_send_sync_msg();
+	if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+		IPAWANERR("qmi_init_modem_send_sync_msg failed due to SSR!\n");
+		/* Cleanup will take place when ipa_wwan_remove is called */
+		return;
+	}
+	if (rc != 0) {
+		IPAWANERR("qmi_init_modem_send_sync_msg failed\n");
+		/*
+		 * This is a very unexpected scenario, which requires a kernel
+		 * panic in order to force dumps for QMI/Q6 side analysis.
+		 */
+		BUG();
+		return;
+	}
+	qmi_modem_init_fin = true;
+
+	/* In cold-bootup, first_time_handshake = false */
+	ipa_q6_handshake_complete(first_time_handshake);
+	first_time_handshake = true;
+
+	IPAWANDBG("complete, qmi_modem_init_fin : %d\n",
+		qmi_modem_init_fin);
+
+	if (qmi_indication_fin)	{
+		IPAWANDBG("send indication to modem (%d)\n",
+		qmi_indication_fin);
+		memset(&ind, 0, sizeof(struct
+				ipa_master_driver_init_complt_ind_msg_v01));
+		ind.master_driver_init_status.result =
+			IPA_QMI_RESULT_SUCCESS_V01;
+		rc = qmi_send_ind(ipa_svc_handle, curr_conn,
+			&ipa_master_driver_complete_indication_desc,
+			&ind,
+			sizeof(ind));
+		IPAWANDBG("ipa_qmi_service_client good\n");
+	} else {
+		IPAWANERR("not send indication (%d)\n",
+		qmi_indication_fin);
+	}
+}
+
+
+static void ipa_q6_clnt_svc_exit(struct work_struct *work)
+{
+	mutex_lock(&ipa_qmi_lock);
+
+	if (ipa_q6_clnt)
+		qmi_handle_destroy(ipa_q6_clnt);
+	ipa_q6_clnt_reset = 1;
+	ipa_q6_clnt = NULL;
+
+	mutex_unlock(&ipa_qmi_lock);
+}
+
+
+static int ipa_q6_clnt_svc_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	IPAWANDBG("event %ld\n", code);
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		if (!atomic_read(&workqueues_stopped))
+			queue_delayed_work(ipa_clnt_req_workqueue,
+					   &work_svc_arrive, 0);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+
+static struct notifier_block ipa_q6_clnt_nb = {
+	.notifier_call = ipa_q6_clnt_svc_event_notify,
+};
+
+static void ipa_qmi_service_init_worker(void)
+{
+	int rc;
+
+	/* Initialize QMI-service*/
+	IPAWANDBG("IPA A7 QMI init OK :>>>>\n");
+
+	/* start the QMI msg cache */
+	ipa_qmi_ctx = vzalloc(sizeof(*ipa_qmi_ctx));
+	if (!ipa_qmi_ctx) {
+		IPAWANERR(":kzalloc err.\n");
+		return;
+	}
+	ipa_qmi_ctx->modem_cfg_emb_pipe_flt =
+		ipa2_get_modem_cfg_emb_pipe_flt();
+
+	ipa_svc_workqueue = create_singlethread_workqueue("ipa_A7_svc");
+	if (!ipa_svc_workqueue) {
+		IPAWANERR("Creating ipa_A7_svc workqueue failed\n");
+		vfree(ipa_qmi_ctx);
+		ipa_qmi_ctx = NULL;
+		return;
+	}
+
+	ipa_svc_handle = qmi_handle_create(qmi_ipa_a5_svc_ntfy, NULL);
+	if (!ipa_svc_handle) {
+		IPAWANERR("Creating ipa_A7_svc qmi handle failed\n");
+		goto destroy_ipa_A7_svc_wq;
+	}
+
+	/*
+	 * Setting the current connection to NULL, as due to a race between
+	 * server and client clean-up in SSR, the disconnect_cb might not
+	 * have necessarily been called
+	 */
+	curr_conn = NULL;
+
+	rc = qmi_svc_register(ipa_svc_handle, &ipa_a5_svc_ops_options);
+	if (rc < 0) {
+		IPAWANERR("Registering ipa_a5 svc failed %d\n",
+				rc);
+		goto destroy_qmi_handle;
+	}
+
+	/* Initialize QMI-client */
+
+	ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req");
+	if (!ipa_clnt_req_workqueue) {
+		IPAWANERR("Creating clnt_req workqueue failed\n");
+		goto deregister_qmi_srv;
+	}
+
+	ipa_clnt_resp_workqueue = create_singlethread_workqueue("clnt_resp");
+	if (!ipa_clnt_resp_workqueue) {
+		IPAWANERR("Creating clnt_resp workqueue failed\n");
+		goto destroy_clnt_req_wq;
+	}
+
+	rc = qmi_svc_event_notifier_register(IPA_Q6_SERVICE_SVC_ID,
+				IPA_Q6_SVC_VERS,
+				IPA_Q6_SERVICE_INS_ID, &ipa_q6_clnt_nb);
+	if (rc < 0) {
+		IPAWANERR("notifier register failed\n");
+		goto destroy_clnt_resp_wq;
+	}
+
+	atomic_set(&ipa_qmi_initialized, 1);
+	/* get Q6 service and start send modem-initial to Q6 */
+	IPAWANDBG("wait service available\n");
+	return;
+
+destroy_clnt_resp_wq:
+	destroy_workqueue(ipa_clnt_resp_workqueue);
+	ipa_clnt_resp_workqueue = NULL;
+destroy_clnt_req_wq:
+	destroy_workqueue(ipa_clnt_req_workqueue);
+	ipa_clnt_req_workqueue = NULL;
+deregister_qmi_srv:
+	qmi_svc_unregister(ipa_svc_handle);
+destroy_qmi_handle:
+	qmi_handle_destroy(ipa_svc_handle);
+	ipa_svc_handle = 0;
+destroy_ipa_A7_svc_wq:
+	destroy_workqueue(ipa_svc_workqueue);
+	ipa_svc_workqueue = NULL;
+	vfree(ipa_qmi_ctx);
+	ipa_qmi_ctx = NULL;
+}
+
+int ipa_qmi_service_init(uint32_t wan_platform_type)
+{
+	ipa_wan_platform = wan_platform_type;
+	qmi_modem_init_fin = false;
+	qmi_indication_fin = false;
+	atomic_set(&workqueues_stopped, 0);
+
+	if (atomic_read(&ipa_qmi_initialized == 0))
+		ipa_qmi_service_init_worker();
+	return 0;
+}
+
+void ipa_qmi_service_exit(void)
+{
+	int ret = 0;
+
+	atomic_set(&workqueues_stopped, 1);
+
+	/* qmi-service */
+	if (ipa_svc_handle) {
+		ret = qmi_svc_unregister(ipa_svc_handle);
+		if (ret < 0)
+			IPAWANERR("unregister qmi handle %p failed, ret=%d\n",
+			ipa_svc_handle, ret);
+	}
+	if (ipa_svc_workqueue) {
+		flush_workqueue(ipa_svc_workqueue);
+		destroy_workqueue(ipa_svc_workqueue);
+		ipa_svc_workqueue = NULL;
+	}
+
+	if (ipa_svc_handle) {
+		ret = qmi_handle_destroy(ipa_svc_handle);
+		if (ret < 0)
+			IPAWANERR("Error destroying qmi handle %p, ret=%d\n",
+			ipa_svc_handle, ret);
+	}
+	ipa_svc_handle = 0;
+
+	/* qmi-client */
+
+	/* Unregister from events */
+	ret = qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID,
+				IPA_Q6_SVC_VERS,
+				IPA_Q6_SERVICE_INS_ID, &ipa_q6_clnt_nb);
+	if (ret < 0)
+		IPAWANERR(
+		"Error qmi_svc_event_notifier_unregister service %d, ret=%d\n",
+		IPA_Q6_SERVICE_SVC_ID, ret);
+
+	/* Release client handle */
+	ipa_q6_clnt_svc_exit(0);
+
+	if (ipa_clnt_req_workqueue) {
+		destroy_workqueue(ipa_clnt_req_workqueue);
+		ipa_clnt_req_workqueue = NULL;
+	}
+	if (ipa_clnt_resp_workqueue) {
+		destroy_workqueue(ipa_clnt_resp_workqueue);
+		ipa_clnt_resp_workqueue = NULL;
+	}
+
+	mutex_lock(&ipa_qmi_lock);
+	/* clean the QMI msg cache */
+	if (ipa_qmi_ctx != NULL) {
+		vfree(ipa_qmi_ctx);
+		ipa_qmi_ctx = NULL;
+	}
+	mutex_unlock(&ipa_qmi_lock);
+	qmi_modem_init_fin = false;
+	qmi_indication_fin = false;
+	atomic_set(&ipa_qmi_initialized, 0);
+}
+
+void ipa_qmi_stop_workqueues(void)
+{
+	IPAWANDBG("Stopping all QMI workqueues\n");
+
+	/* Stopping all workqueues so new work won't be scheduled */
+	atomic_set(&workqueues_stopped, 1);
+
+	/* Making sure that the current scheduled work won't be executed */
+	cancel_delayed_work(&work_recv_msg);
+	cancel_delayed_work(&work_recv_msg_client);
+	cancel_delayed_work(&work_svc_arrive);
+	cancel_delayed_work(&work_svc_exit);
+}
+
+/* voting for bus BW to ipa_rm*/
+int vote_for_bus_bw(uint32_t *bw_mbps)
+{
+	struct ipa_rm_perf_profile profile;
+	int ret;
+
+	if (bw_mbps == NULL) {
+		IPAWANERR("Bus BW is invalid\n");
+		return -EINVAL;
+	}
+
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = *bw_mbps;
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+			&profile);
+	if (ret)
+		IPAWANERR("Failed to set perf profile to BW %u\n",
+			profile.max_supported_bandwidth_mbps);
+	else
+		IPAWANDBG("Succeeded to set perf profile to BW %u\n",
+			profile.max_supported_bandwidth_mbps);
+
+	return ret;
+}
+
+int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+			   struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01;
+	req_desc.ei_array = ipa_get_data_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01;
+	resp_desc.ei_array = ipa_get_data_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_get_data_stats_req_msg_v01),
+			&resp_desc, resp,
+			sizeof(struct ipa_get_data_stats_resp_msg_v01),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n");
+
+	return ipa_check_qmi_response(rc,
+		QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa_get_data_stats_resp_msg_v01");
+}
+
+int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+			      struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01;
+	req_desc.ei_array = ipa_get_apn_data_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01;
+	resp_desc.ei_array = ipa_get_apn_data_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
+			&resp_desc, resp,
+			sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n");
+
+	return ipa_check_qmi_response(rc,
+		QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01");
+}
+
+int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+	struct ipa_set_data_usage_quota_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01));
+
+	req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01;
+	req_desc.ei_array = ipa_set_data_usage_quota_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01;
+	resp_desc.ei_array = ipa_set_data_usage_quota_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+	return ipa_check_qmi_response(rc,
+		QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01");
+}
+
+int ipa_qmi_stop_data_qouta(void)
+{
+	struct ipa_stop_data_usage_quota_req_msg_v01 req;
+	struct ipa_stop_data_usage_quota_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01));
+	memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01));
+
+	req_desc.max_msg_len =
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01;
+	req_desc.ei_array = ipa_stop_data_usage_quota_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01;
+	resp_desc.ei_array = ipa_stop_data_usage_quota_resp_msg_data_v01_ei;
+
+	IPAWANDBG("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+		&resp_desc, &resp, sizeof(resp),
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+	return ipa_check_qmi_response(rc,
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
+}
+
+void ipa_qmi_init(void)
+{
+	mutex_init(&ipa_qmi_lock);
+}
+
+void ipa_qmi_cleanup(void)
+{
+	mutex_destroy(&ipa_qmi_lock);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
new file mode 100644
index 0000000..7793fc0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.h
@@ -0,0 +1,280 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IPA_QMI_SERVICE_H
+#define IPA_QMI_SERVICE_H
+
+#include <linux/ipa.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "ipa_i.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+
+/**
+ * name of the DL wwan default routing tables for v4 and v6
+ */
+#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr"
+#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt"
+#define MAX_NUM_Q6_RULE 35
+#define MAX_NUM_QMI_RULE_CACHE 10
+#define DEV_NAME "ipa-wan"
+#define SUBSYS_MODEM "modem"
+
+#define IPAWANDBG(fmt, args...) \
+	pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+#define IPAWANERR(fmt, args...) \
+	pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+extern struct ipa_qmi_context *ipa_qmi_ctx;
+extern struct mutex ipa_qmi_lock;
+
+struct ipa_qmi_context {
+struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+int num_ipa_install_fltr_rule_req_msg;
+struct ipa_install_fltr_rule_req_msg_v01
+		ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+int num_ipa_fltr_installed_notif_req_msg;
+struct ipa_fltr_installed_notif_req_msg_v01
+		ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+bool modem_cfg_emb_pipe_flt;
+};
+
+struct rmnet_mux_val {
+	uint32_t  mux_id;
+	int8_t    vchannel_name[IFNAMSIZ];
+	bool mux_channel_set;
+	bool ul_flt_reg;
+	bool mux_hdr_set;
+	uint32_t  hdr_hdl;
+};
+
+extern struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[];
+extern struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_indication_reg_req_msg_data_v01_ei[];
+extern struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[];
+extern struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[];
+extern struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[];
+extern struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_config_req_msg_data_v01_ei[];
+extern struct elem_info ipa_config_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[];
+
+/**
+ * struct ipa_rmnet_context - IPA rmnet context
+ * @ipa_rmnet_ssr: support modem SSR
+ * @polling_interval: Requested interval for polling tethered statistics
+ * @metered_mux_id: The mux ID on which quota has been set
+ */
+struct ipa_rmnet_context {
+	bool ipa_rmnet_ssr;
+	u64 polling_interval;
+	u32 metered_mux_id;
+};
+
+extern struct ipa_rmnet_context ipa_rmnet_ctx;
+
+#ifdef CONFIG_RMNET_IPA
+
+int ipa_qmi_service_init(uint32_t wan_platform_type);
+
+void ipa_qmi_service_exit(void);
+
+/* sending filter-install-request to modem*/
+int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req);
+
+/* sending filter-installed-notify-request to modem*/
+int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req);
+
+/* voting for bus BW to ipa_rm*/
+int vote_for_bus_bw(uint32_t *bw_mbps);
+
+int qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+int qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+	*rule_req, uint32_t *rule_hdl);
+
+int wwan_update_mux_channel_prop(void);
+
+int wan_ioctl_init(void);
+
+void wan_ioctl_stop_qmi_messages(void);
+
+void wan_ioctl_enable_qmi_messages(void);
+
+void wan_ioctl_deinit(void);
+
+void ipa_qmi_stop_workqueues(void);
+
+int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data);
+
+int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data);
+
+void ipa_broadcast_quota_reach_ind(uint32_t mux_id);
+
+int rmnet_ipa_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
+	*data);
+
+int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+	bool reset);
+
+int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+	struct ipa_get_data_stats_resp_msg_v01 *resp);
+
+int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp);
+
+int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req);
+
+int ipa_qmi_stop_data_qouta(void);
+
+void ipa_q6_handshake_complete(bool ssr_bootup);
+
+void ipa_qmi_init(void);
+
+void ipa_qmi_cleanup(void);
+
+#else /* CONFIG_RMNET_IPA */
+
+static inline int ipa_qmi_service_init(uint32_t wan_platform_type)
+{
+	return -EPERM;
+}
+
+static inline void ipa_qmi_service_exit(void) { }
+
+/* sending filter-install-request to modem*/
+static inline int qmi_filter_request_send(
+	struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+/* sending filter-installed-notify-request to modem*/
+static inline int qmi_filter_notify_send(
+	struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int copy_ul_filter_rule_to_ipa(
+	struct ipa_install_fltr_rule_req_msg_v01 *rule_req, uint32_t *rule_hdl)
+{
+	return -EPERM;
+}
+
+static inline int wwan_update_mux_channel_prop(void)
+{
+	return -EPERM;
+}
+
+static inline int wan_ioctl_init(void)
+{
+	return -EPERM;
+}
+
+static inline void wan_ioctl_stop_qmi_messages(void) { }
+
+static inline void wan_ioctl_enable_qmi_messages(void) { }
+
+static inline void wan_ioctl_deinit(void) { }
+
+static inline void ipa_qmi_stop_workqueues(void) { }
+
+static inline int vote_for_bus_bw(uint32_t *bw_mbps)
+{
+	return -EPERM;
+}
+
+static inline int rmnet_ipa_poll_tethering_stats(
+	struct wan_ioctl_poll_tethering_stats *data)
+{
+	return -EPERM;
+}
+
+static inline int rmnet_ipa_set_data_quota(
+	struct wan_ioctl_set_data_quota *data)
+{
+	return -EPERM;
+}
+
+static inline void ipa_broadcast_quota_reach_ind(uint32_t mux_id) { }
+
+static inline int ipa_qmi_get_data_stats(
+	struct ipa_get_data_stats_req_msg_v01 *req,
+	struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa_qmi_get_network_stats(
+	struct ipa_get_apn_data_stats_req_msg_v01 *req,
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa_qmi_set_data_quota(
+	struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa_qmi_stop_data_qouta(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa_q6_handshake_complete(bool ssr_bootup) { }
+
+static inline void ipa_qmi_init(void)
+{
+}
+
+static inline void ipa_qmi_cleanup(void)
+{
+}
+
+#endif /* CONFIG_RMNET_IPA */
+
+#endif /* IPA_QMI_SERVICE_H */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
new file mode 100644
index 0000000..dd59140
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service_v01.c
@@ -0,0 +1,2366 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+/* Type Definitions  */
+static struct elem_info ipa_hdr_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_hdr_tbl_info_type_v01,
+					modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_hdr_tbl_info_type_v01,
+					modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_route_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_route_tbl_info_type_v01,
+					route_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_route_tbl_info_type_v01,
+					num_indices),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_modem_mem_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_modem_mem_info_type_v01,
+					block_start_addr),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_modem_mem_info_type_v01,
+					size),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+			modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+			modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_zip_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_zip_tbl_info_type_v01,
+					modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_zip_tbl_info_type_v01,
+					modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_ipfltr_range_eq_16_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			range_low),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			range_high),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_ipfltr_mask_eq_32_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+				struct ipa_ipfltr_mask_eq_32_type_v01,
+				offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+				struct ipa_ipfltr_mask_eq_32_type_v01,
+				mask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_32_type_v01,
+			value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_ipfltr_eq_16_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_eq_16_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_16_type_v01,
+					value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_ipfltr_eq_32_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_32_type_v01,
+					offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_32_type_v01,
+					value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_ipfltr_mask_eq_128_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 16,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			mask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 16,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_filter_rule_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			rule_eq_bitmap),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			tos_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tos_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					protocol_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					protocol_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_ihl_offset_range_16),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01,
+		.elem_size	= sizeof(
+			struct ipa_ipfltr_range_eq_16_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_range_16),
+		.ei_array	= ipa_ipfltr_range_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_offset_meq_32),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					offset_meq_32),
+		.ei_array	= ipa_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tc_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tc_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					flow_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					flow_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_16_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_eq_16_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_16),
+		.ei_array	= ipa_ipfltr_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_32_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_eq_32_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_32),
+		.ei_array	= ipa_ipfltr_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_ihl_offset_meq_32),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_meq_32),
+		.ei_array	= ipa_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_offset_meq_128),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	=
+			QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01,
+		.elem_size	= sizeof(
+			struct ipa_ipfltr_mask_eq_128_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			offset_meq_128),
+		.ei_array	= ipa_ipfltr_mask_eq_128_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					metadata_meq32_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					metadata_meq32),
+		.ei_array	= ipa_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ipv4_frag_eq_present),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_filter_spec_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_spec_identifier),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_rule),
+		.ei_array	= ipa_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_action),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					is_routing_table_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					route_table_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					is_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info
+	ipa_filter_rule_identifier_to_handle_map_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01,
+			filter_spec_identifier),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01,
+			filter_handle),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_filter_handle_to_index_map_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_handle_to_index_map_v01,
+			filter_handle),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_handle_to_index_map_v01,
+			filter_index),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_init_modem_driver_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			platform_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			platform_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_hdr_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_tbl_info),
+		.ei_array	= ipa_hdr_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_route_tbl_info),
+		.ei_array	= ipa_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_route_tbl_info),
+		.ei_array	= ipa_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			modem_mem_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_modem_mem_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			modem_mem_info),
+		.ei_array	= ipa_modem_mem_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			ctrl_comm_dest_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			ctrl_comm_dest_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			is_ssr_bootup_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			is_ssr_bootup),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_proc_ctx_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_proc_ctx_tbl_info),
+		.ei_array	= ipa_hdr_proc_ctx_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			zip_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_zip_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			zip_tbl_info),
+		.ei_array	= ipa_zip_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_init_modem_driver_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			ctrl_comm_dest_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			ctrl_comm_dest_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			default_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			default_end_pt),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_indication_reg_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			master_driver_init_complete_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			master_driver_init_complete),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			data_usage_quota_reached_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			data_usage_quota_reached),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_indication_reg_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_indication_reg_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_master_driver_init_complt_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+			ipa_master_driver_init_complt_ind_msg_v01,
+			master_driver_init_status),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_install_fltr_rule_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_filter_spec_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list),
+		.ei_array	= ipa_filter_spec_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			source_pipe_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			source_pipe_index),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_install_fltr_rule_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			resp),
+		.ei_array       = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list),
+		.ei_array	=
+			ipa_filter_rule_identifier_to_handle_map_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_fltr_installed_notif_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			source_pipe_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			install_status),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x03,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			filter_index_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(
+			struct ipa_filter_handle_to_index_map_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x03,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			filter_index_list),
+		.ei_array	= ipa_filter_handle_to_index_map_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_pipe_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_pipe_index),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			retain_header_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			retain_header),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_call_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_call_mux_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv4_filter_idx_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv4_filter_idx),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv6_filter_idx_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv6_filter_idx),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_fltr_installed_notif_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_enable_force_clear_datapath_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			source_pipe_bitmask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			request_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			throttle_source_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			throttle_source),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_enable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_disable_force_clear_datapath_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_disable_force_clear_datapath_req_msg_v01,
+			request_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_disable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_disable_force_clear_datapath_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_config_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_deaggr_supported_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_deaggr_supported),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			max_aggr_frame_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+					max_aggr_frame_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ipa_ingress_pipe_mode_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ipa_ingress_pipe_mode),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_speed_info_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_speed_info),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_time_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_time_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_pkt_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_pkt_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_byte_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_byte_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_accumulation_time_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_accumulation_time_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_control_flags_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_control_flags),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_msi_event_threshold_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_msi_event_threshold),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_msi_event_threshold_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_msi_event_threshold),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_config_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_config_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_get_data_stats_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			ipa_stats_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			reset_stats_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			reset_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_pipe_stats_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					pipe_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv4_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv4_bytes),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv6_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv6_bytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_stats_type_filter_rule_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_stats_type_filter_rule_v01,
+					filter_rule_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_stats_type_filter_rule_v01,
+					num_packets),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_get_data_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ipa_stats_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ipa_stats_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PIPES_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list),
+		.ei_array	= ipa_pipe_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PIPES_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list),
+		.ei_array	= ipa_pipe_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list),
+		.ei_array	= ipa_stats_type_filter_rule_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_apn_data_stats_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_ul_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_ul_bytes),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_dl_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_dl_bytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_get_apn_data_stats_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_get_apn_data_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(struct
+					ipa_apn_data_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list),
+		.ei_array	= ipa_apn_data_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_data_usage_quota_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_data_usage_quota_info_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_data_usage_quota_info_type_v01,
+					num_Mbytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_set_data_usage_quota_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(struct
+					ipa_data_usage_quota_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list),
+		.ei_array	= ipa_data_usage_quota_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_set_data_usage_quota_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_data_usage_quota_reached_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct
+					ipa_data_usage_quota_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_data_usage_quota_reached_ind_msg_v01,
+			apn),
+		.ei_array	= ipa_data_usage_quota_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_stop_data_usage_quota_req_msg_data_v01_ei[] = {
+	/* ipa_stop_data_usage_quota_req_msg is empty */
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa_stop_data_usage_quota_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_stop_data_usage_quota_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h b/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h
new file mode 100644
index 0000000..56ada21b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_ram_mmap.h
@@ -0,0 +1,560 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_RAM_MMAP_H_
+#define _IPA_RAM_MMAP_H_
+
+/*
+ * This header defines the memory map of the IPA RAM (not all SRAM is
+ * available for SW use)
+ * In case of restricted bytes the actual starting address will be
+ * advanced by the number of needed bytes
+ */
+
+#define IPA_RAM_NAT_OFST    0
+#define IPA_RAM_NAT_SIZE    0
+
+#define IPA_MEM_v1_RAM_HDR_OFST    (IPA_RAM_NAT_OFST + IPA_RAM_NAT_SIZE)
+#define IPA_MEM_v1_RAM_HDR_SIZE    1664
+#define IPA_MEM_v1_RAM_V4_FLT_OFST (IPA_MEM_v1_RAM_HDR_OFST +\
+	IPA_MEM_v1_RAM_HDR_SIZE)
+#define IPA_MEM_v1_RAM_V4_FLT_SIZE 2176
+#define IPA_MEM_v1_RAM_V4_RT_OFST  (IPA_MEM_v1_RAM_V4_FLT_OFST +\
+	IPA_MEM_v1_RAM_V4_FLT_SIZE)
+#define IPA_MEM_v1_RAM_V4_RT_SIZE  512
+#define IPA_MEM_v1_RAM_V6_FLT_OFST (IPA_MEM_v1_RAM_V4_RT_OFST +\
+	IPA_MEM_v1_RAM_V4_RT_SIZE)
+#define IPA_MEM_v1_RAM_V6_FLT_SIZE 1792
+#define IPA_MEM_v1_RAM_V6_RT_OFST  (IPA_MEM_v1_RAM_V6_FLT_OFST +\
+	IPA_MEM_v1_RAM_V6_FLT_SIZE)
+#define IPA_MEM_v1_RAM_V6_RT_SIZE  512
+#define IPA_MEM_v1_RAM_END_OFST    (IPA_MEM_v1_RAM_V6_RT_OFST +\
+	IPA_MEM_v1_RAM_V6_RT_SIZE)
+
+#define IPA_MEM_RAM_V6_RT_SIZE_DDR 16384
+#define IPA_MEM_RAM_V4_RT_SIZE_DDR 16384
+#define IPA_MEM_RAM_V6_FLT_SIZE_DDR 16384
+#define IPA_MEM_RAM_V4_FLT_SIZE_DDR 16384
+#define IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR 0
+
+#define IPA_MEM_CANARY_SIZE 4
+#define IPA_MEM_CANARY_VAL 0xdeadbeef
+
+#define IPA_MEM_RAM_MODEM_NETWORK_STATS_SIZE 256
+/*
+ * IPA v2.0 and v2.1 SRAM memory layout:
+ * +-------------+
+ * | V4 FLT HDR  |
+ * +-------------+
+ * |    CANARY   |
+ * +-------------+
+ * |    CANARY   |
+ * +-------------+
+ * | V6 FLT HDR  |
+ * +-------------+
+ * |    CANARY   |
+ * +-------------+
+ * |    CANARY   |
+ * +-------------+
+ * | V4 RT HDR   |
+ * +-------------+
+ * |    CANARY   |
+ * +-------------+
+ * | V6 RT HDR   |
+ * +-------------+
+ * |    CANARY   |
+ * +-------------+
+ * |  MODEM HDR  |
+ * +-------------+
+ * |  APPS  HDR  |
+ * +-------------+
+ * |    CANARY   |
+ * +-------------+
+ * |  MODEM MEM  |
+ * +-------------+
+ * |    CANARY   |
+ * +-------------+
+ * | APPS V4 FLT |
+ * +-------------+
+ * | APPS V6 FLT |
+ * +-------------+
+ * |    CANARY   |
+ * +-------------+
+ * |   UC INFO   |
+ * +-------------+
+ */
+#define IPA_MEM_v2_RAM_OFST_START 128
+#define IPA_MEM_v2_RAM_V4_FLT_OFST IPA_MEM_v2_RAM_OFST_START
+#define IPA_MEM_v2_RAM_V4_FLT_SIZE 88
+
+/* V4 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_V4_FLT_OFST & 7)
+#error V4 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_V6_FLT_OFST (IPA_MEM_v2_RAM_V4_FLT_OFST + \
+		IPA_MEM_v2_RAM_V4_FLT_SIZE + 2*IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_V6_FLT_SIZE 88
+
+/* V6 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_V6_FLT_OFST & 7)
+#error V6 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_V4_RT_OFST (IPA_MEM_v2_RAM_V6_FLT_OFST + \
+		IPA_MEM_v2_RAM_V6_FLT_SIZE + 2*IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_V4_NUM_INDEX 11
+#define IPA_MEM_v2_V4_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_V4_MODEM_RT_INDEX_HI 3
+#define IPA_MEM_v2_V4_APPS_RT_INDEX_LO 4
+#define IPA_MEM_v2_V4_APPS_RT_INDEX_HI 10
+#define IPA_MEM_v2_RAM_V4_RT_SIZE (IPA_MEM_v2_RAM_V4_NUM_INDEX * 4)
+
+/* V4 routing header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_V4_RT_OFST & 7)
+#error V4 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_V6_RT_OFST (IPA_MEM_v2_RAM_V4_RT_OFST + \
+		IPA_MEM_v2_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_V6_NUM_INDEX 11
+#define IPA_MEM_v2_V6_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_V6_MODEM_RT_INDEX_HI 3
+#define IPA_MEM_v2_V6_APPS_RT_INDEX_LO 4
+#define IPA_MEM_v2_V6_APPS_RT_INDEX_HI 10
+#define IPA_MEM_v2_RAM_V6_RT_SIZE (IPA_MEM_v2_RAM_V6_NUM_INDEX * 4)
+
+/* V6 routing header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_V6_RT_OFST & 7)
+#error V6 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_MODEM_HDR_OFST (IPA_MEM_v2_RAM_V6_RT_OFST + \
+		IPA_MEM_v2_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_MODEM_HDR_SIZE 320
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_MODEM_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_APPS_HDR_OFST (IPA_MEM_v2_RAM_MODEM_HDR_OFST + \
+		IPA_MEM_v2_RAM_MODEM_HDR_SIZE)
+#define IPA_MEM_v2_RAM_APPS_HDR_SIZE 72
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_RAM_APPS_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_MODEM_OFST (IPA_MEM_v2_RAM_APPS_HDR_OFST + \
+		IPA_MEM_v2_RAM_APPS_HDR_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_MODEM_SIZE 3532
+
+/* modem memory is 4B aligned */
+#if (IPA_MEM_v2_RAM_MODEM_OFST & 3)
+#error modem memory is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_RAM_MODEM_OFST + \
+		IPA_MEM_v2_RAM_MODEM_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE 1920
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_RAM_APPS_V4_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_APPS_V6_FLT_OFST (IPA_MEM_v2_RAM_APPS_V4_FLT_OFST + \
+		IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE)
+#define IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE 1372
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_RAM_APPS_V6_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_UC_INFO_OFST (IPA_MEM_v2_RAM_APPS_V6_FLT_OFST + \
+		IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_RAM_UC_INFO_SIZE 292
+
+/* uC info 4B aligned */
+#if (IPA_MEM_v2_RAM_UC_INFO_OFST & 3)
+#error uC info is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_RAM_END_OFST (IPA_MEM_v2_RAM_UC_INFO_OFST + \
+		IPA_MEM_v2_RAM_UC_INFO_SIZE)
+#define IPA_MEM_v2_RAM_APPS_V4_RT_OFST IPA_MEM_v2_RAM_END_OFST
+#define IPA_MEM_v2_RAM_APPS_V4_RT_SIZE 0
+#define IPA_MEM_v2_RAM_APPS_V6_RT_OFST IPA_MEM_v2_RAM_END_OFST
+#define IPA_MEM_v2_RAM_APPS_V6_RT_SIZE 0
+#define IPA_MEM_v2_RAM_HDR_SIZE_DDR 4096
+
+/*
+ * IPA v2.5/v2.6 SRAM memory layout:
+ * +----------------+
+ * |    UC INFO     |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | V4 FLT HDR     |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | V6 FLT HDR     |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | V4 RT HDR      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | V6 RT HDR      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |  MODEM HDR     |
+ * +----------------+
+ * |  APPS  HDR     |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | MODEM PROC CTX |
+ * +----------------+
+ * | APPS PROC CTX  |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |  MODEM MEM     |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ */
+
+#define IPA_MEM_v2_5_RAM_UC_MEM_SIZE 128
+#define IPA_MEM_v2_5_RAM_UC_INFO_OFST IPA_MEM_v2_5_RAM_UC_MEM_SIZE
+#define IPA_MEM_v2_5_RAM_UC_INFO_SIZE 512
+
+/* uC info 4B aligned */
+#if (IPA_MEM_v2_5_RAM_UC_INFO_OFST & 3)
+#error uC info is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_OFST_START (IPA_MEM_v2_5_RAM_UC_INFO_OFST + \
+	IPA_MEM_v2_5_RAM_UC_INFO_SIZE)
+
+#define IPA_MEM_v2_5_RAM_V4_FLT_OFST (IPA_MEM_v2_5_RAM_OFST_START + \
+	2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_V4_FLT_SIZE 88
+
+/* V4 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_V4_FLT_OFST & 7)
+#error V4 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_V6_FLT_OFST (IPA_MEM_v2_5_RAM_V4_FLT_OFST + \
+	IPA_MEM_v2_5_RAM_V4_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_V6_FLT_SIZE 88
+
+/* V6 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_V6_FLT_OFST & 7)
+#error V6 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_V4_RT_OFST (IPA_MEM_v2_5_RAM_V6_FLT_OFST + \
+	IPA_MEM_v2_5_RAM_V6_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_V4_NUM_INDEX 15
+#define IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI 6
+#define IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO \
+					(IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI + 1)
+#define IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI \
+					(IPA_MEM_v2_5_RAM_V4_NUM_INDEX - 1)
+#define IPA_MEM_v2_5_RAM_V4_RT_SIZE (IPA_MEM_v2_5_RAM_V4_NUM_INDEX * 4)
+
+/* V4 routing header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_V4_RT_OFST & 7)
+#error V4 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_V6_RT_OFST (IPA_MEM_v2_5_RAM_V4_RT_OFST + \
+	IPA_MEM_v2_5_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_V6_NUM_INDEX 15
+#define IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI 6
+#define IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO \
+					(IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI + 1)
+#define IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI \
+					(IPA_MEM_v2_5_RAM_V6_NUM_INDEX - 1)
+#define IPA_MEM_v2_5_RAM_V6_RT_SIZE (IPA_MEM_v2_5_RAM_V6_NUM_INDEX * 4)
+
+/* V6 routing header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_V6_RT_OFST & 7)
+#error V6 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_MODEM_HDR_OFST (IPA_MEM_v2_5_RAM_V6_RT_OFST + \
+	IPA_MEM_v2_5_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE 320
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_MODEM_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_APPS_HDR_OFST (IPA_MEM_v2_5_RAM_MODEM_HDR_OFST + \
+	IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_HDR_SIZE 0
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_APPS_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST \
+	(IPA_MEM_v2_5_RAM_APPS_HDR_OFST + IPA_MEM_v2_5_RAM_APPS_HDR_SIZE + \
+	2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE 512
+
+/* header processing context table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST & 7)
+#error header processing context table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST \
+	(IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST + \
+	IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE 512
+
+/* header processing context table is 8B aligned */
+#if (IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST & 7)
+#error header processing context table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_MODEM_OFST (IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST + \
+	IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_MODEM_SIZE 5800
+
+/* modem memory is 4B aligned */
+#if (IPA_MEM_v2_5_RAM_MODEM_OFST & 3)
+#error modem memory is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_5_RAM_MODEM_OFST + \
+	IPA_MEM_v2_5_RAM_MODEM_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE 0
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST (IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST + \
+	IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE 0
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_5_RAM_END_OFST (IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST + \
+	IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST IPA_MEM_v2_5_RAM_END_OFST
+#define IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE 0
+#define IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST IPA_MEM_v2_5_RAM_END_OFST
+#define IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE 0
+#define IPA_MEM_v2_5_RAM_HDR_SIZE_DDR 2048
+
+/*
+ * IPA v2.6Lite SRAM memory layout:
+ * +----------------+
+ * |   UC INFO      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | V4 FLT HDR     |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | V6 FLT HDR     |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | V4 RT HDR      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | V6 RT HDR      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |  MODEM HDR     |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * | COMP / DECOMP  |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ * |  MODEM MEM     |
+ * +----------------+
+ * |    CANARY      |
+ * +----------------+
+ */
+
+#define IPA_MEM_v2_6L_RAM_UC_MEM_SIZE 128
+#define IPA_MEM_v2_6L_RAM_UC_INFO_OFST IPA_MEM_v2_6L_RAM_UC_MEM_SIZE
+#define IPA_MEM_v2_6L_RAM_UC_INFO_SIZE 512
+
+/* uC info 4B aligned */
+#if (IPA_MEM_v2_6L_RAM_UC_INFO_OFST & 3)
+#error uC info is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_OFST_START (IPA_MEM_v2_6L_RAM_UC_INFO_OFST + \
+	IPA_MEM_v2_6L_RAM_UC_INFO_SIZE)
+
+#define IPA_MEM_v2_6L_RAM_V4_FLT_OFST (IPA_MEM_v2_6L_RAM_OFST_START + \
+	2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_V4_FLT_SIZE 88
+
+/* V4 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_V4_FLT_OFST & 7)
+#error V4 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_V6_FLT_OFST (IPA_MEM_v2_6L_RAM_V4_FLT_OFST + \
+	IPA_MEM_v2_6L_RAM_V4_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_V6_FLT_SIZE 88
+
+/* V6 filtering header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_V6_FLT_OFST & 7)
+#error V6 filtering header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_V4_RT_OFST (IPA_MEM_v2_6L_RAM_V6_FLT_OFST + \
+	IPA_MEM_v2_6L_RAM_V6_FLT_SIZE + 2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_V4_NUM_INDEX 15
+#define IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI 6
+#define IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO \
+	(IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI + 1)
+#define IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI \
+	(IPA_MEM_v2_6L_RAM_V4_NUM_INDEX - 1)
+#define IPA_MEM_v2_6L_RAM_V4_RT_SIZE (IPA_MEM_v2_6L_RAM_V4_NUM_INDEX * 4)
+
+/* V4 routing header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_V4_RT_OFST & 7)
+#error V4 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_V6_RT_OFST (IPA_MEM_v2_6L_RAM_V4_RT_OFST + \
+	IPA_MEM_v2_6L_RAM_V4_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_V6_NUM_INDEX 15
+#define IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO 0
+#define IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI 6
+#define IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO \
+	(IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI + 1)
+#define IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI \
+	(IPA_MEM_v2_6L_RAM_V6_NUM_INDEX - 1)
+#define IPA_MEM_v2_6L_RAM_V6_RT_SIZE (IPA_MEM_v2_6L_RAM_V6_NUM_INDEX * 4)
+
+/* V6 routing header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_V6_RT_OFST & 7)
+#error V6 routing header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST (IPA_MEM_v2_6L_RAM_V6_RT_OFST + \
+	IPA_MEM_v2_6L_RAM_V6_RT_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE 320
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_APPS_HDR_OFST (IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST + \
+	IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE)
+#define IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE 0
+
+/* header table is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_APPS_HDR_OFST & 7)
+#error header table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST \
+	(IPA_MEM_v2_6L_RAM_APPS_HDR_OFST + IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE + \
+	2 * IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE 512
+
+/* comp/decomp memory region is 8B aligned */
+#if (IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST & 7)
+#error header processing context table is not 8B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_MODEM_OFST \
+	(IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST + \
+	IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE + IPA_MEM_CANARY_SIZE)
+#define IPA_MEM_v2_6L_RAM_MODEM_SIZE 6376
+
+/* modem memory is 4B aligned */
+#if (IPA_MEM_v2_6L_RAM_MODEM_OFST & 3)
+#error modem memory is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST (IPA_MEM_v2_6L_RAM_MODEM_OFST + \
+	IPA_MEM_v2_6L_RAM_MODEM_SIZE)
+#define IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE 0
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST \
+	(IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST + \
+	IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE)
+#define IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE 0
+
+/* filtering rule is 4B aligned */
+#if (IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST & 3)
+#error filtering rule is not 4B aligned
+#endif
+
+#define IPA_MEM_v2_6L_RAM_END_OFST (IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST + \
+	IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE + IPA_MEM_CANARY_SIZE)
+
+#define IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST IPA_MEM_v2_6L_RAM_END_OFST
+#define IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE 0
+#define IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST IPA_MEM_v2_6L_RAM_END_OFST
+#define IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE 0
+#define IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR 2048
+
+#endif /* _IPA_RAM_MMAP_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h b/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h
new file mode 100644
index 0000000..6487a2f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_reg.h
@@ -0,0 +1,319 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __IPA_REG_H__
+#define __IPA_REG_H__
+
+/*
+ * IPA's BAM specific registers
+ * Used for IPA HW 1.0 only
+ */
+
+#define IPA_BAM_REG_BASE_OFST 0x00004000
+#define IPA_BAM_CNFG_BITS_OFST 0x7c
+#define IPA_BAM_REMAP_SIZE (0x1000)
+
+#define IPA_FILTER_FILTER_EN_BMSK 0x1
+#define IPA_FILTER_FILTER_EN_SHFT 0x0
+#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094
+#define IPA_AGGREGATION_QCNCM_SIG0_SHFT 16
+#define IPA_AGGREGATION_QCNCM_SIG1_SHFT 8
+
+#define IPA_AGGREGATION_SPARE_REG_1_OFST 0x00002090
+#define IPA_AGGREGATION_SPARE_REG_2_OFST 0x00002094
+
+#define IPA_AGGREGATION_SINGLE_NDP_MSK 0x1
+#define IPA_AGGREGATION_SINGLE_NDP_BMSK 0xfffffffe
+
+#define IPA_AGGREGATION_MODE_MSK 0x1
+#define IPA_AGGREGATION_MODE_SHFT 31
+#define IPA_AGGREGATION_MODE_BMSK 0x7fffffff
+
+#define IPA_AGGREGATION_QCNCM_SIG_BMSK 0xff000000
+
+#define IPA_FILTER_FILTER_EN_BMSK 0x1
+#define IPA_FILTER_FILTER_EN_SHFT 0x0
+
+#define IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT 2
+#define IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK 0x4
+
+#define IPA_HEAD_OF_LINE_BLOCK_EN_OFST 0x00000044
+
+/*
+ * End of IPA 1.0 Registers
+ */
+
+
+/*
+ * IPA HW 2.0 Registers
+ */
+#define IPA_REG_BASE 0x0
+
+#define IPA_IRQ_STTS_EE_n_ADDR(n) (IPA_REG_BASE + 0x00001008 + 0x1000 * (n))
+#define IPA_IRQ_STTS_EE_n_MAXn 3
+
+#define IPA_IRQ_EN_EE_n_ADDR(n) (IPA_REG_BASE + 0x0000100c + 0x1000 * (n))
+#define IPA_IRQ_EN_EE_n_MAXn 3
+
+
+#define IPA_IRQ_CLR_EE_n_ADDR(n) (IPA_REG_BASE + 0x00001010 + 0x1000 * (n))
+#define IPA_IRQ_CLR_EE_n_MAXn 3
+
+#define IPA_IRQ_SUSPEND_INFO_EE_n_ADDR(n) \
+				(IPA_REG_BASE + 0x00001098 + 0x1000 * (n))
+#define IPA_IRQ_SUSPEND_INFO_EE_n_MAXn 3
+/*
+ * End of IPA 2.0 Registers
+ */
+
+/*
+ * IPA HW 2.5 Registers
+ */
+#define IPA_BCR_OFST 0x000005B0
+#define IPA_COUNTER_CFG_OFST 0x000005E8
+#define IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK 0xF
+#define IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT 0x0
+#define IPA_COUNTER_CFG_AGGR_GRAN_BMSK 0x1F0
+#define IPA_COUNTER_CFG_AGGR_GRAN_SHFT 0x4
+ /*
+ * End of IPA 2.5 Registers
+ */
+
+/*
+ * IPA HW 2.6/2.6L Registers
+ */
+#define IPA_ENABLED_PIPES_OFST 0x000005DC
+#define IPA_YELLOW_MARKER_SYS_CFG_OFST 0x00000728
+/*
+ * End of IPA 2.6/2.6L Registers
+ */
+
+/*
+ * Common Registers
+ */
+#define IPA_REG_BASE_OFST_v2_0 0x00020000
+#define IPA_REG_BASE_OFST_v2_5 0x00040000
+#define IPA_REG_BASE_OFST_v2_6L IPA_REG_BASE_OFST_v2_5
+#define IPA_COMP_SW_RESET_OFST 0x0000003c
+
+#define IPA_VERSION_OFST 0x00000034
+#define IPA_COMP_HW_VERSION_OFST 0x00000030
+
+#define IPA_SHARED_MEM_SIZE_OFST_v1_1 0x00000050
+#define IPA_SHARED_MEM_SIZE_OFST_v2_0 0x00000050
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0 0xffff0000
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0 0x10
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0  0xffff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0  0x0
+
+#define IPA_ENDP_INIT_AGGR_N_OFST_v1_1(n) (0x000001c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_AGGR_N_OFST_v2_0(n) (0x00000320 + 0x4 * (n))
+
+#define IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(n) (0x00000220 + 0x4 * (n))
+#define IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(n) (0x00000370 + 0x4 * (n))
+#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT 0x0
+
+#define IPA_ROUTE_OFST_v1_1 0x00000044
+
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+
+#define IPA_FILTER_OFST_v1_1 0x00000048
+
+#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v1_1(n) (0x00004000 + 0x4 * (n))
+#define IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(n) (0x00005000 + 0x4 * (n))
+#define IPA_SRAM_DIRECT_ACCESS_N_OFST(n) (0x00004000 + 0x4 * (n))
+#define IPA_SRAM_SW_FIRST_v2_5 0x00005000
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0x40
+#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0
+#define IPA_COMP_CFG_OFST 0x00000038
+
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x1
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK 0x200000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT 0x15
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf
+#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_MODE_N_OFST_v1_1(n) (0x00000170 + 0x4 * (n))
+#define IPA_ENDP_INIT_MODE_N_OFST_v2_0(n) (0x000002c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_MODE_N_RMSK 0x7f
+#define IPA_ENDP_INIT_MODE_N_MAX 19
+#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1 0x7c
+#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1 0x2
+#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0 0x1f0
+#define IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0 0x4
+#define IPA_ENDP_INIT_MODE_N_MODE_BMSK 0x7
+#define IPA_ENDP_INIT_MODE_N_MODE_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_N_OFST_v1_1(n) (0x00000120 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_N_OFST_v2_0(n) (0x00000170 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
+#define IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
+#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
+#define IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
+#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK 0x1f80
+
+#define IPA_ENDP_INIT_NAT_N_OFST_v1_1(n) (0x000000c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_NAT_N_OFST_v2_0(n) (0x00000120 + 0x4 * (n))
+#define IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT 0x0
+
+
+#define IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(n) (0x000001c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0 0x1c00
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5 0x3c00
+
+
+
+/*
+ * IPA HW 1.1 specific Registers
+ */
+
+#define IPA_FILTER_FILTER_DIS_BMSK 0x1
+#define IPA_FILTER_FILTER_DIS_SHFT 0x0
+#define IPA_SINGLE_NDP_MODE_OFST 0x00000064
+#define IPA_QCNCM_OFST 0x00000060
+
+#define IPA_ENDP_INIT_CTRL_N_OFST(n) (0x00000070 + 0x4 * (n))
+#define IPA_ENDP_INIT_CTRL_N_RMSK 0x1
+#define IPA_ENDP_INIT_CTRL_N_MAX 19
+#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK 0x1
+#define IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT 0x0
+#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK 0x2
+#define IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT 0x1
+
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(n) (0x00000270 + 0x4 * (n))
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(n) (0x000003c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_RMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_BMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_N_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(n) (0x00000470 + 0x04 * (n))
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(n) (0x000002c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(n) (0x00000420 + 0x4 * (n))
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_RMSK 0x1ff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_BMSK 0x1ff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_TIMER_SHFT 0x0
+
+#define IPA_DEBUG_CNT_REG_N_OFST_v1_1(n) (0x00000340 + 0x4 * (n))
+#define IPA_DEBUG_CNT_REG_N_OFST_v2_0(n) (0x00000600 + 0x4 * (n))
+#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_MAX 15
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
+
+#define IPA_DEBUG_CNT_CTRL_N_OFST_v1_1(n) (0x00000380 + 0x4 * (n))
+#define IPA_DEBUG_CNT_CTRL_N_OFST_v2_0(n) (0x00000640 + 0x4 * (n))
+#define IPA_DEBUG_CNT_CTRL_N_RMSK 0x1ff1f171
+#define IPA_DEBUG_CNT_CTRL_N_MAX 15
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_BMSK 0x1ff00000
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_RULE_INDEX_SHFT 0x14
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_SOURCE_PIPE_SHFT 0xc
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_BMSK 0x100
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_PRODUCT_SHFT 0x8
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_BMSK 0x70
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_TYPE_SHFT 0x4
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_BMSK 0x1
+#define IPA_DEBUG_CNT_CTRL_N_DBG_CNT_EN_SHFT 0x0
+
+#define IPA_ENDP_STATUS_n_OFST(n) (0x000004c0 + 0x4 * (n))
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_CFG_n_OFST(n) (0x000000c0 + 0x4 * (n))
+#define IPA_ENDP_INIT_CFG_n_RMSK 0x7f
+#define IPA_ENDP_INIT_CFG_n_MAXn 19
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(n) (0x00000220 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_RMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_MAXn 19
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+
+#define IPA_ENDP_INIT_HDR_METADATA_n_OFST(n) (0x00000270 + 0x4 * (n))
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
+
+#define IPA_IRQ_EE_UC_n_OFFS(n) (0x0000101c + 0x1000 * (n))
+#define IPA_IRQ_EE_UC_n_RMSK 0x1
+#define IPA_IRQ_EE_UC_n_MAXn 3
+#define IPA_IRQ_EE_UC_n_INT_BMSK 0x1
+#define IPA_IRQ_EE_UC_n_INT_SHFT 0x0
+
+#define IPA_UC_MAILBOX_m_n_OFFS(m, n) (0x0001a000 + 0x80 * (m) + 0x4 * (n))
+#define IPA_UC_MAILBOX_m_n_OFFS_v2_5(m, n) (0x00022000 + 0x80 * (m) + 0x4 * (n))
+
+#define IPA_SYS_PKT_PROC_CNTXT_BASE_OFST (0x000005d8)
+#define IPA_LOCAL_PKT_PROC_CNTXT_BASE_OFST (0x000005e0)
+
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
new file mode 100644
index 0000000..164e94b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_rt.c
@@ -0,0 +1,1457 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include "ipa_i.h"
+
+#define IPA_RT_TABLE_INDEX_NOT_FOUND	(-1)
+#define IPA_RT_TABLE_WORD_SIZE		(4)
+#define IPA_RT_INDEX_BITMAP_SIZE	(32)
+#define IPA_RT_TABLE_MEMORY_ALLIGNMENT	(127)
+#define IPA_RT_ENTRY_MEMORY_ALLIGNMENT	(3)
+#define IPA_RT_BIT_MASK			(0x1)
+#define IPA_RT_STATUS_OF_ADD_FAILED	(-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED	(-1)
+#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
+
+/**
+ * __ipa_generate_rt_hw_rule_v2() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+int __ipa_generate_rt_hw_rule_v2(enum ipa_ip_type ip,
+		struct ipa_rt_entry *entry, u8 *buf)
+{
+	struct ipa_rt_rule_hw_hdr *rule_hdr;
+	const struct ipa_rt_rule *rule =
+		(const struct ipa_rt_rule *)&entry->rule;
+	u16 en_rule = 0;
+	u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
+	u8 *start;
+	int pipe_idx;
+
+	if (buf == NULL) {
+		memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+		buf = (u8 *)tmp;
+	}
+
+	start = buf;
+	rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
+	pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
+	if (pipe_idx == -1) {
+		IPAERR("Wrong destination pipe specified in RT rule\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+	if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
+		IPAERR("No RT rule on IPA_client_producer pipe.\n");
+		IPAERR("pipe_idx: %d dst_pipe: %d\n",
+				pipe_idx, entry->rule.dst);
+		WARN_ON(1);
+		return -EPERM;
+	}
+	rule_hdr->u.hdr.pipe_dest_idx = pipe_idx;
+	rule_hdr->u.hdr.system = !ipa_ctx->hdr_tbl_lcl;
+	if (entry->hdr) {
+		rule_hdr->u.hdr.hdr_offset =
+			entry->hdr->offset_entry->offset >> 2;
+	} else {
+		rule_hdr->u.hdr.hdr_offset = 0;
+	}
+	buf += sizeof(struct ipa_rt_rule_hw_hdr);
+
+	if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+		IPAERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+
+	IPADBG("en_rule 0x%x\n", en_rule);
+
+	rule_hdr->u.hdr.en_rule = en_rule;
+	ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (entry->hw_len == 0) {
+		entry->hw_len = buf - start;
+	} else if (entry->hw_len != (buf - start)) {
+		IPAERR(
+		"hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
+		entry->hw_len,
+		(buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+/**
+ * __ipa_generate_rt_hw_rule_v2_5() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+int __ipa_generate_rt_hw_rule_v2_5(enum ipa_ip_type ip,
+		struct ipa_rt_entry *entry, u8 *buf)
+{
+	struct ipa_rt_rule_hw_hdr *rule_hdr;
+	const struct ipa_rt_rule *rule =
+		(const struct ipa_rt_rule *)&entry->rule;
+	u16 en_rule = 0;
+	u32 tmp[IPA_RT_FLT_HW_RULE_BUF_SIZE/4];
+	u8 *start;
+	int pipe_idx;
+
+	if (buf == NULL) {
+		memset(tmp, 0, IPA_RT_FLT_HW_RULE_BUF_SIZE);
+		buf = (u8 *)tmp;
+	}
+
+	start = buf;
+	rule_hdr = (struct ipa_rt_rule_hw_hdr *)buf;
+	pipe_idx = ipa2_get_ep_mapping(entry->rule.dst);
+	if (pipe_idx == -1) {
+		IPAERR("Wrong destination pipe specified in RT rule\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+	if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
+		IPAERR("No RT rule on IPA_client_producer pipe.\n");
+		IPAERR("pipe_idx: %d dst_pipe: %d\n",
+				pipe_idx, entry->rule.dst);
+		WARN_ON(1);
+		return -EPERM;
+	}
+	rule_hdr->u.hdr_v2_5.pipe_dest_idx = pipe_idx;
+	if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
+		struct ipa_hdr_proc_ctx_entry *proc_ctx;
+
+		proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
+		rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_proc_ctx_tbl_lcl;
+		BUG_ON(proc_ctx->offset_entry->offset & 31);
+		rule_hdr->u.hdr_v2_5.proc_ctx = 1;
+		rule_hdr->u.hdr_v2_5.hdr_offset =
+			(proc_ctx->offset_entry->offset +
+			ipa_ctx->hdr_proc_ctx_tbl.start_offset) >> 5;
+	} else if (entry->hdr) {
+		rule_hdr->u.hdr_v2_5.system = !ipa_ctx->hdr_tbl_lcl;
+		BUG_ON(entry->hdr->offset_entry->offset & 3);
+		rule_hdr->u.hdr_v2_5.proc_ctx = 0;
+		rule_hdr->u.hdr_v2_5.hdr_offset =
+				entry->hdr->offset_entry->offset >> 2;
+	} else {
+		rule_hdr->u.hdr_v2_5.proc_ctx = 0;
+		rule_hdr->u.hdr_v2_5.hdr_offset = 0;
+	}
+	buf += sizeof(struct ipa_rt_rule_hw_hdr);
+
+	if (ipa_generate_hw_rule(ip, &rule->attrib, &buf, &en_rule)) {
+		IPAERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+
+	IPADBG("en_rule 0x%x\n", en_rule);
+
+	rule_hdr->u.hdr_v2_5.en_rule = en_rule;
+	ipa_write_32(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (entry->hw_len == 0) {
+		entry->hw_len = buf - start;
+	} else if (entry->hw_len != (buf - start)) {
+		IPAERR("hw_len differs b/w passes passed=0x%x calc=0x%xtd\n",
+			entry->hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+/**
+ * __ipa_generate_rt_hw_rule_v2_6L() - generates the routing hardware rule
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means that the caller wants to know the size
+ *       of the rule as seen by HW so they did not pass a valid buffer, we will
+ *       use a scratch buffer instead.
+ *       With this scheme we are going to generate the rule twice, once to know
+ *       size using scratch buffer and second to write the rule to the actual
+ *       caller supplied buffer which is of required size.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+int __ipa_generate_rt_hw_rule_v2_6L(enum ipa_ip_type ip,
+		struct ipa_rt_entry *entry, u8 *buf)
+{
+	/* Same implementation as IPAv2 */
+	return __ipa_generate_rt_hw_rule_v2(ip, entry, buf);
+}
+
+/**
+ * ipa_get_rt_hw_tbl_size() - returns the size of HW routing table
+ * @ip: the ip address family type
+ * @hdr_sz: header size
+ * @max_rt_idx: maximal index
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ * the MSB set in rt_idx_bitmap indicates the size of hdr of routing tbl
+ */
+static int ipa_get_rt_hw_tbl_size(enum ipa_ip_type ip, u32 *hdr_sz,
+		int *max_rt_idx)
+{
+	struct ipa_rt_tbl_set *set;
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	u32 total_sz = 0;
+	u32 tbl_sz;
+	u32 bitmap = ipa_ctx->rt_idx_bitmap[ip];
+	int highest_bit_set = IPA_RT_TABLE_INDEX_NOT_FOUND;
+	int i;
+	int res;
+
+	*hdr_sz = 0;
+	set = &ipa_ctx->rt_tbl_set[ip];
+
+	for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+		if (bitmap & IPA_RT_BIT_MASK)
+			highest_bit_set = i;
+		bitmap >>= 1;
+	}
+
+	*max_rt_idx = highest_bit_set;
+	if (highest_bit_set == IPA_RT_TABLE_INDEX_NOT_FOUND) {
+		IPAERR("no rt tbls present\n");
+		total_sz = IPA_RT_TABLE_WORD_SIZE;
+		*hdr_sz = IPA_RT_TABLE_WORD_SIZE;
+		return total_sz;
+	}
+
+	*hdr_sz = (highest_bit_set + 1) * IPA_RT_TABLE_WORD_SIZE;
+	total_sz += *hdr_sz;
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		tbl_sz = 0;
+		list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+			res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
+				ip,
+				entry,
+				NULL);
+			if (res) {
+				IPAERR("failed to find HW RT rule size\n");
+				return -EPERM;
+			}
+			tbl_sz += entry->hw_len;
+		}
+
+		if (tbl_sz)
+			tbl->sz = tbl_sz + IPA_RT_TABLE_WORD_SIZE;
+
+		if (tbl->in_sys)
+			continue;
+
+		if (tbl_sz) {
+			/* add the terminator */
+			total_sz += (tbl_sz + IPA_RT_TABLE_WORD_SIZE);
+			/* every rule-set should start at word boundary */
+			total_sz = (total_sz + IPA_RT_ENTRY_MEMORY_ALLIGNMENT) &
+						~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+		}
+	}
+
+	IPADBG("RT HW TBL SZ %d HDR SZ %d IP %d\n", total_sz, *hdr_sz, ip);
+
+	return total_sz;
+}
+
+static int ipa_generate_rt_hw_tbl_common(enum ipa_ip_type ip, u8 *base, u8 *hdr,
+		u32 body_ofst, u32 apps_start_idx)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	struct ipa_rt_tbl_set *set;
+	u32 offset;
+	u8 *body;
+	struct ipa_mem_buffer rt_tbl_mem;
+	u8 *rt_tbl_mem_body;
+	int res;
+
+	/* build the rt tbl in the DMA buffer to submit to IPA HW */
+	body = base;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (!tbl->in_sys) {
+			offset = body - base + body_ofst;
+			if (offset & IPA_RT_ENTRY_MEMORY_ALLIGNMENT) {
+				IPAERR("offset is not word multiple %d\n",
+						offset);
+				goto proc_err;
+			}
+
+			/* convert offset to words from bytes */
+			offset &= ~IPA_RT_ENTRY_MEMORY_ALLIGNMENT;
+			/* rule is at an offset from base */
+			offset |= IPA_RT_BIT_MASK;
+
+			/* update the hdr at the right index */
+			ipa_write_32(offset, hdr +
+					((tbl->idx - apps_start_idx) *
+					 IPA_RT_TABLE_WORD_SIZE));
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
+					ip,
+					entry,
+					body);
+				if (res) {
+					IPAERR("failed to gen HW RT rule\n");
+					goto proc_err;
+				}
+				body += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			body = ipa_write_32(0, body);
+			if ((long)body & IPA_RT_ENTRY_MEMORY_ALLIGNMENT)
+				/* advance body to next word boundary */
+				body = body + (IPA_RT_TABLE_WORD_SIZE -
+					      ((long)body &
+					      IPA_RT_ENTRY_MEMORY_ALLIGNMENT));
+		} else {
+			WARN_ON(tbl->sz == 0);
+			/* allocate memory for the RT tbl */
+			rt_tbl_mem.size = tbl->sz;
+			rt_tbl_mem.base =
+			   dma_alloc_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
+					   &rt_tbl_mem.phys_base, GFP_KERNEL);
+			if (!rt_tbl_mem.base) {
+				IPAERR("fail to alloc DMA buff of size %d\n",
+						rt_tbl_mem.size);
+				WARN_ON(1);
+				goto proc_err;
+			}
+
+			WARN_ON(rt_tbl_mem.phys_base &
+					IPA_RT_ENTRY_MEMORY_ALLIGNMENT);
+			rt_tbl_mem_body = rt_tbl_mem.base;
+			memset(rt_tbl_mem.base, 0, rt_tbl_mem.size);
+			/* update the hdr at the right index */
+			ipa_write_32(rt_tbl_mem.phys_base,
+					hdr + ((tbl->idx - apps_start_idx) *
+					IPA_RT_TABLE_WORD_SIZE));
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				res = ipa_ctx->ctrl->ipa_generate_rt_hw_rule(
+					ip,
+					entry,
+					rt_tbl_mem_body);
+				if (res) {
+					IPAERR("failed to gen HW RT rule\n");
+					WARN_ON(1);
+					goto rt_table_mem_alloc_failed;
+				}
+				rt_tbl_mem_body += entry->hw_len;
+			}
+
+			/* write the rule-set terminator */
+			rt_tbl_mem_body = ipa_write_32(0, rt_tbl_mem_body);
+
+			if (tbl->curr_mem.phys_base) {
+				WARN_ON(tbl->prev_mem.phys_base);
+				tbl->prev_mem = tbl->curr_mem;
+			}
+			tbl->curr_mem = rt_tbl_mem;
+		}
+	}
+
+	return 0;
+
+rt_table_mem_alloc_failed:
+	dma_free_coherent(ipa_ctx->pdev, rt_tbl_mem.size,
+			  rt_tbl_mem.base, rt_tbl_mem.phys_base);
+proc_err:
+	return -EPERM;
+}
+
+
+/**
+ * ipa_generate_rt_hw_tbl() - generates the routing hardware table
+ * @ip:	[in] the ip address family type
+ * @mem:	[out] buffer to put the filtering table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa_generate_rt_hw_tbl_v1_1(enum ipa_ip_type ip,
+		struct ipa_mem_buffer *mem)
+{
+	u32 hdr_sz;
+	u8 *hdr;
+	u8 *body;
+	u8 *base;
+	int max_rt_idx;
+	int i;
+
+	mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+	mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
+				~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
+
+	if (mem->size == 0) {
+		IPAERR("rt tbl empty ip=%d\n", ip);
+		goto error;
+	}
+	mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+			&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		goto error;
+	}
+
+	memset(mem->base, 0, mem->size);
+
+	/* build the rt tbl in the DMA buffer to submit to IPA HW */
+	base = hdr = (u8 *)mem->base;
+	body = base + hdr_sz;
+
+	/* setup all indices to point to the empty sys rt tbl */
+	for (i = 0; i <= max_rt_idx; i++)
+		ipa_write_32(ipa_ctx->empty_rt_tbl_mem.phys_base,
+				hdr + (i * IPA_RT_TABLE_WORD_SIZE));
+
+	if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, 0, 0)) {
+		IPAERR("fail to generate RT tbl\n");
+		goto proc_err;
+	}
+
+	return 0;
+
+proc_err:
+	dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
+	mem->base = NULL;
+error:
+	return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_tbl *next;
+	struct ipa_rt_tbl_set *set;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (tbl->prev_mem.phys_base) {
+			IPADBG("reaping rt tbl name=%s ip=%d\n", tbl->name, ip);
+			dma_free_coherent(ipa_ctx->pdev, tbl->prev_mem.size,
+					tbl->prev_mem.base,
+					tbl->prev_mem.phys_base);
+			memset(&tbl->prev_mem, 0, sizeof(tbl->prev_mem));
+		}
+	}
+
+	set = &ipa_ctx->reap_rt_tbl_set[ip];
+	list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+		list_del(&tbl->link);
+		WARN_ON(tbl->prev_mem.phys_base != 0);
+		if (tbl->curr_mem.phys_base) {
+			IPADBG("reaping sys rt tbl name=%s ip=%d\n", tbl->name,
+					ip);
+			dma_free_coherent(ipa_ctx->pdev, tbl->curr_mem.size,
+					tbl->curr_mem.base,
+					tbl->curr_mem.phys_base);
+			kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+		}
+	}
+}
+
+int __ipa_commit_rt_v1_1(enum ipa_ip_type ip)
+{
+	struct ipa_desc desc = { 0 };
+	struct ipa_mem_buffer *mem;
+	void *cmd;
+	struct ipa_ip_v4_routing_init *v4;
+	struct ipa_ip_v6_routing_init *v6;
+	u16 avail;
+	u16 size;
+
+	mem = kmalloc(sizeof(struct ipa_mem_buffer), GFP_KERNEL);
+	if (!mem) {
+		IPAERR("failed to alloc memory object\n");
+		goto fail_alloc_mem;
+	}
+
+	if (ip == IPA_IP_v4) {
+		avail = ipa_ctx->ip4_rt_tbl_lcl ? IPA_MEM_v1_RAM_V4_RT_SIZE :
+			IPA_MEM_PART(v4_rt_size_ddr);
+		size = sizeof(struct ipa_ip_v4_routing_init);
+	} else {
+		avail = ipa_ctx->ip6_rt_tbl_lcl ? IPA_MEM_v1_RAM_V6_RT_SIZE :
+			IPA_MEM_PART(v6_rt_size_ddr);
+		size = sizeof(struct ipa_ip_v6_routing_init);
+	}
+	cmd = kmalloc(size, GFP_KERNEL);
+	if (!cmd) {
+		IPAERR("failed to alloc immediate command object\n");
+		goto fail_alloc_cmd;
+	}
+
+	if (ipa_generate_rt_hw_tbl_v1_1(ip, mem)) {
+		IPAERR("fail to generate RT HW TBL ip %d\n", ip);
+		goto fail_hw_tbl_gen;
+	}
+
+	if (mem->size > avail) {
+		IPAERR("tbl too big, needed %d avail %d\n", mem->size, avail);
+		goto fail_send_cmd;
+	}
+
+	if (ip == IPA_IP_v4) {
+		v4 = (struct ipa_ip_v4_routing_init *)cmd;
+		desc.opcode = IPA_IP_V4_ROUTING_INIT;
+		v4->ipv4_rules_addr = mem->phys_base;
+		v4->size_ipv4_rules = mem->size;
+		v4->ipv4_addr = IPA_MEM_v1_RAM_V4_RT_OFST;
+		IPADBG("putting Routing IPv4 rules to phys 0x%x",
+				v4->ipv4_addr);
+	} else {
+		v6 = (struct ipa_ip_v6_routing_init *)cmd;
+		desc.opcode = IPA_IP_V6_ROUTING_INIT;
+		v6->ipv6_rules_addr = mem->phys_base;
+		v6->size_ipv6_rules = mem->size;
+		v6->ipv6_addr = IPA_MEM_v1_RAM_V6_RT_OFST;
+		IPADBG("putting Routing IPv6 rules to phys 0x%x",
+				v6->ipv6_addr);
+	}
+
+	desc.pyld = cmd;
+	desc.len = size;
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem->base, mem->phys_base, mem->size);
+
+	if (ipa_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		goto fail_send_cmd;
+	}
+
+	__ipa_reap_sys_rt_tbls(ip);
+	dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base, mem->phys_base);
+	kfree(cmd);
+	kfree(mem);
+
+	return 0;
+
+fail_send_cmd:
+	if (mem->base)
+		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+				mem->phys_base);
+fail_hw_tbl_gen:
+	kfree(cmd);
+fail_alloc_cmd:
+	kfree(mem);
+fail_alloc_mem:
+	return -EPERM;
+}
+
+static int ipa_generate_rt_hw_tbl_v2(enum ipa_ip_type ip,
+		struct ipa_mem_buffer *mem, struct ipa_mem_buffer *head)
+{
+	u32 hdr_sz;
+	u8 *hdr;
+	u8 *body;
+	u8 *base;
+	int max_rt_idx;
+	int i;
+	u32 *entr;
+	int num_index;
+	u32 body_start_offset;
+	u32 apps_start_idx;
+
+	if (ip == IPA_IP_v4) {
+		num_index = IPA_MEM_PART(v4_apps_rt_index_hi) -
+			IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
+		body_start_offset = IPA_MEM_PART(apps_v4_rt_ofst) -
+			IPA_MEM_PART(v4_rt_ofst);
+		apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
+	} else {
+		num_index = IPA_MEM_PART(v6_apps_rt_index_hi) -
+			IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
+		body_start_offset = IPA_MEM_PART(apps_v6_rt_ofst) -
+			IPA_MEM_PART(v6_rt_ofst);
+		apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
+	}
+
+	head->size = num_index * 4;
+	head->base = dma_alloc_coherent(ipa_ctx->pdev, head->size,
+			&head->phys_base, GFP_KERNEL);
+	if (!head->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", head->size);
+		goto err;
+	}
+	entr = (u32 *)head->base;
+	hdr = (u8 *)head->base;
+	for (i = 1; i <= num_index; i++) {
+		*entr = ipa_ctx->empty_rt_tbl_mem.phys_base;
+		entr++;
+	}
+
+	mem->size = ipa_get_rt_hw_tbl_size(ip, &hdr_sz, &max_rt_idx);
+	mem->size -= hdr_sz;
+	mem->size = (mem->size + IPA_RT_TABLE_MEMORY_ALLIGNMENT) &
+				~IPA_RT_TABLE_MEMORY_ALLIGNMENT;
+
+	if (mem->size > 0) {
+		mem->base = dma_alloc_coherent(ipa_ctx->pdev, mem->size,
+				&mem->phys_base, GFP_KERNEL);
+		if (!mem->base) {
+			IPAERR("fail to alloc DMA buff of size %d\n",
+					mem->size);
+			goto base_err;
+		}
+		memset(mem->base, 0, mem->size);
+	}
+
+	/* build the rt tbl in the DMA buffer to submit to IPA HW */
+	body = base = (u8 *)mem->base;
+
+	if (ipa_generate_rt_hw_tbl_common(ip, base, hdr, body_start_offset,
+				apps_start_idx)) {
+		IPAERR("fail to generate RT tbl\n");
+		goto proc_err;
+	}
+
+	return 0;
+
+proc_err:
+	if (mem->size)
+		dma_free_coherent(ipa_ctx->pdev, mem->size, mem->base,
+			mem->phys_base);
+base_err:
+	dma_free_coherent(ipa_ctx->pdev, head->size, head->base,
+			head->phys_base);
+err:
+	return -EPERM;
+}
+
+int __ipa_commit_rt_v2(enum ipa_ip_type ip)
+{
+	struct ipa_desc desc[2];
+	struct ipa_mem_buffer body;
+	struct ipa_mem_buffer head;
+	struct ipa_hw_imm_cmd_dma_shared_mem cmd1 = {0};
+	struct ipa_hw_imm_cmd_dma_shared_mem cmd2 = {0};
+	u16 avail;
+	u32 num_modem_rt_index;
+	int rc = 0;
+	u32 local_addr1;
+	u32 local_addr2;
+	bool lcl;
+
+	memset(desc, 0, 2 * sizeof(struct ipa_desc));
+
+	if (ip == IPA_IP_v4) {
+		avail = ipa_ctx->ip4_rt_tbl_lcl ?
+			IPA_MEM_PART(apps_v4_rt_size) :
+			IPA_MEM_PART(v4_rt_size_ddr);
+		num_modem_rt_index =
+			IPA_MEM_PART(v4_modem_rt_index_hi) -
+			IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
+		local_addr1 = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_rt_ofst) +
+			num_modem_rt_index * 4;
+		local_addr2 = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_rt_ofst);
+		lcl = ipa_ctx->ip4_rt_tbl_lcl;
+	} else {
+		avail = ipa_ctx->ip6_rt_tbl_lcl ?
+			IPA_MEM_PART(apps_v6_rt_size) :
+			IPA_MEM_PART(v6_rt_size_ddr);
+		num_modem_rt_index =
+			IPA_MEM_PART(v6_modem_rt_index_hi) -
+			IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
+		local_addr1 = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_rt_ofst) +
+			num_modem_rt_index * 4;
+		local_addr2 = ipa_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_rt_ofst);
+		lcl = ipa_ctx->ip6_rt_tbl_lcl;
+	}
+
+	if (ipa_generate_rt_hw_tbl_v2(ip, &body, &head)) {
+		IPAERR("fail to generate RT HW TBL ip %d\n", ip);
+		rc = -EFAULT;
+		goto fail_gen;
+	}
+
+	if (body.size > avail) {
+		IPAERR("tbl too big, needed %d avail %d\n", body.size, avail);
+		rc = -EFAULT;
+		goto fail_send_cmd;
+	}
+
+	cmd1.size = head.size;
+	cmd1.system_addr = head.phys_base;
+	cmd1.local_addr = local_addr1;
+	desc[0].opcode = IPA_DMA_SHARED_MEM;
+	desc[0].pyld = &cmd1;
+	desc[0].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+	desc[0].type = IPA_IMM_CMD_DESC;
+
+	if (lcl) {
+		cmd2.size = body.size;
+		cmd2.system_addr = body.phys_base;
+		cmd2.local_addr = local_addr2;
+
+		desc[1].opcode = IPA_DMA_SHARED_MEM;
+		desc[1].pyld = &cmd2;
+		desc[1].len = sizeof(struct ipa_hw_imm_cmd_dma_shared_mem);
+		desc[1].type = IPA_IMM_CMD_DESC;
+
+		if (ipa_send_cmd(2, desc)) {
+			IPAERR("fail to send immediate command\n");
+			rc = -EFAULT;
+			goto fail_send_cmd;
+		}
+	} else {
+		if (ipa_send_cmd(1, desc)) {
+			IPAERR("fail to send immediate command\n");
+			rc = -EFAULT;
+			goto fail_send_cmd;
+		}
+	}
+
+	IPADBG("HEAD\n");
+	IPA_DUMP_BUFF(head.base, head.phys_base, head.size);
+	if (body.size) {
+		IPADBG("BODY\n");
+		IPA_DUMP_BUFF(body.base, body.phys_base, body.size);
+	}
+	__ipa_reap_sys_rt_tbls(ip);
+fail_send_cmd:
+	dma_free_coherent(ipa_ctx->pdev, head.size, head.base, head.phys_base);
+	if (body.size)
+		dma_free_coherent(ipa_ctx->pdev, body.size, body.base,
+				body.phys_base);
+fail_gen:
+	return rc;
+}
+
+/**
+ * __ipa_find_rt_tbl() - find the routing table
+ *			which name is given as parameter
+ * @ip:	[in] the ip address family type of the wanted routing table
+ * @name:	[in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa_rt_tbl *__ipa_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+	struct ipa_rt_tbl *entry;
+	struct ipa_rt_tbl_set *set;
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+		if (!strcmp(name, entry->name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa2_query_rt_index() - find the routing table index
+ *			which name and ip type are given as parameters
+ * @in:	[out] the index of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+int ipa2_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+	struct ipa_rt_tbl *entry;
+
+	if (in->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	/* check if this table exists */
+	entry = __ipa_find_rt_tbl(in->ip, in->name);
+	if (!entry)
+		return -EFAULT;
+
+	in->idx  = entry->idx;
+	return 0;
+}
+
+static struct ipa_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+		const char *name)
+{
+	struct ipa_rt_tbl *entry;
+	struct ipa_rt_tbl_set *set;
+	int i;
+	int id;
+
+	if (ip >= IPA_IP_MAX || name == NULL) {
+		IPAERR("bad parm\n");
+		goto error;
+	}
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	/* check if this table exists */
+	entry = __ipa_find_rt_tbl(ip, name);
+	if (!entry) {
+		entry = kmem_cache_zalloc(ipa_ctx->rt_tbl_cache, GFP_KERNEL);
+		if (!entry) {
+			IPAERR("failed to alloc RT tbl object\n");
+			goto error;
+		}
+		/* find a routing tbl index */
+		for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+			if (!test_bit(i, &ipa_ctx->rt_idx_bitmap[ip])) {
+				entry->idx = i;
+				set_bit(i, &ipa_ctx->rt_idx_bitmap[ip]);
+				break;
+			}
+		}
+		if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+			IPAERR("not free RT tbl indices left\n");
+			goto fail_rt_idx_alloc;
+		}
+
+		INIT_LIST_HEAD(&entry->head_rt_rule_list);
+		INIT_LIST_HEAD(&entry->link);
+		strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+		entry->set = set;
+		entry->cookie = IPA_COOKIE;
+		entry->in_sys = (ip == IPA_IP_v4) ?
+			!ipa_ctx->ip4_rt_tbl_lcl : !ipa_ctx->ip6_rt_tbl_lcl;
+		set->tbl_cnt++;
+		list_add(&entry->link, &set->head_rt_tbl_list);
+
+		IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+				set->tbl_cnt, ip);
+
+		id = ipa_id_alloc(entry);
+		if (id < 0) {
+			IPAERR("failed to add to tree\n");
+			WARN_ON(1);
+		}
+		entry->id = id;
+	}
+
+	return entry;
+
+fail_rt_idx_alloc:
+	entry->cookie = 0;
+	kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+error:
+	return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa_rt_tbl *entry)
+{
+	enum ipa_ip_type ip = IPA_IP_MAX;
+	u32 id;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parms\n");
+		return -EINVAL;
+	}
+	id = entry->id;
+	if (ipa_id_find(id) == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	if (!entry->in_sys) {
+		list_del(&entry->link);
+		clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+				entry->set->tbl_cnt);
+		kmem_cache_free(ipa_ctx->rt_tbl_cache, entry);
+	} else {
+		list_move(&entry->link,
+				&ipa_ctx->reap_rt_tbl_set[ip].head_rt_tbl_list);
+		clear_bit(entry->idx, &ipa_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d\n", entry->idx,
+				entry->set->tbl_cnt);
+	}
+
+	/* remove the handle from the database */
+	ipa_id_remove(id);
+	return 0;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_entry *entry;
+	struct ipa_hdr_entry *hdr = NULL;
+	struct ipa_hdr_proc_ctx_entry *proc_ctx = NULL;
+	int id;
+
+	if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
+		IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
+		goto error;
+	}
+
+	if (rule->hdr_hdl) {
+		hdr = ipa_id_find(rule->hdr_hdl);
+		if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+			IPAERR("rt rule does not point to valid hdr\n");
+			goto error;
+		}
+	} else if (rule->hdr_proc_ctx_hdl) {
+		proc_ctx = ipa_id_find(rule->hdr_proc_ctx_hdl);
+		if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
+			IPAERR("rt rule does not point to valid proc ctx\n");
+			goto error;
+		}
+	}
+
+
+	tbl = __ipa_add_rt_tbl(ip, name);
+	if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+		IPAERR("bad params\n");
+		goto error;
+	}
+	/*
+	 * do not allow any rules to be added at end of the "default" routing
+	 * tables
+	 */
+	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+	    (tbl->rule_cnt > 0) && (at_rear != 0)) {
+		IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+		       tbl->rule_cnt, at_rear);
+		goto error;
+	}
+
+	entry = kmem_cache_zalloc(ipa_ctx->rt_rule_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc RT rule object\n");
+		goto error;
+	}
+	INIT_LIST_HEAD(&entry->link);
+	entry->cookie = IPA_COOKIE;
+	entry->rule = *rule;
+	entry->tbl = tbl;
+	entry->hdr = hdr;
+	entry->proc_ctx = proc_ctx;
+	if (at_rear)
+		list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+	else
+		list_add(&entry->link, &tbl->head_rt_rule_list);
+	tbl->rule_cnt++;
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+	else if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt++;
+	id = ipa_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+		goto ipa_insert_failed;
+	}
+	IPADBG("add rt rule tbl_idx=%d rule_cnt=%d\n", tbl->idx, tbl->rule_cnt);
+	*rule_hdl = id;
+	entry->id = id;
+
+	return 0;
+
+ipa_insert_failed:
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+	else if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt--;
+	list_del(&entry->link);
+	kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+error:
+	return -EPERM;
+}
+
+/**
+ * ipa2_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].rt_rule_hdl)) {
+			IPAERR("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa_ctx->ctrl->ipa_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return ret;
+}
+
+int __ipa_del_rt_rule(u32 rule_hdl)
+{
+	struct ipa_rt_entry *entry;
+	int id;
+
+	entry = ipa_id_find(rule_hdl);
+
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+
+	if (entry->hdr)
+		__ipa_release_hdr(entry->hdr->id);
+	else if (entry->proc_ctx)
+		__ipa_release_hdr_proc_ctx(entry->proc_ctx->id);
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d\n", entry->tbl->idx,
+			entry->tbl->rule_cnt);
+	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry->tbl))
+			IPAERR("fail to del RT tbl\n");
+	}
+	entry->cookie = 0;
+	id = entry->id;
+	kmem_cache_free(ipa_ctx->rt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	ipa_id_remove(id);
+
+	return 0;
+}
+
+/**
+ * ipa2_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls:	[inout] set of routing rules to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	int i;
+	int ret;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_rt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del rt rule %i\n", i);
+			hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa2_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_commit_rt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * issue a commit on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa2_commit_flt(ip))
+		return -EPERM;
+
+	mutex_lock(&ipa_ctx->lock);
+	if (ipa_ctx->ctrl->ipa_commit_rt(ip)) {
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa2_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_reset_rt(enum ipa_ip_type ip)
+{
+	struct ipa_rt_tbl *tbl;
+	struct ipa_rt_tbl *tbl_next;
+	struct ipa_rt_tbl_set *set;
+	struct ipa_rt_entry *rule;
+	struct ipa_rt_entry *rule_next;
+	struct ipa_rt_tbl_set *rset;
+	u32 apps_start_idx;
+	int id;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0) {
+		if (ip == IPA_IP_v4)
+			apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
+		else
+			apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
+	} else {
+		apps_start_idx = 0;
+	}
+
+	/*
+	 * issue a reset on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa2_reset_flt(ip))
+		IPAERR("fail to reset flt ip=%d\n", ip);
+
+	set = &ipa_ctx->rt_tbl_set[ip];
+	rset = &ipa_ctx->reap_rt_tbl_set[ip];
+	mutex_lock(&ipa_ctx->lock);
+	IPADBG("reset rt ip=%d\n", ip);
+	list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+		list_for_each_entry_safe(rule, rule_next,
+					 &tbl->head_rt_rule_list, link) {
+			if (ipa_id_find(rule->id) == NULL) {
+				WARN_ON(1);
+				mutex_unlock(&ipa_ctx->lock);
+				return -EFAULT;
+			}
+
+			/*
+			 * for the "default" routing tbl, remove all but the
+			 *  last rule
+			 */
+			if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
+				continue;
+
+			list_del(&rule->link);
+			tbl->rule_cnt--;
+			if (rule->hdr)
+				__ipa_release_hdr(rule->hdr->id);
+			else if (rule->proc_ctx)
+				__ipa_release_hdr_proc_ctx(rule->proc_ctx->id);
+			rule->cookie = 0;
+			id = rule->id;
+			kmem_cache_free(ipa_ctx->rt_rule_cache, rule);
+
+			/* remove the handle from the database */
+			ipa_id_remove(id);
+		}
+
+		if (ipa_id_find(tbl->id) == NULL) {
+			WARN_ON(1);
+			mutex_unlock(&ipa_ctx->lock);
+			return -EFAULT;
+		}
+		id = tbl->id;
+
+		/* do not remove the "default" routing tbl which has index 0 */
+		if (tbl->idx != apps_start_idx) {
+			if (!tbl->in_sys) {
+				list_del(&tbl->link);
+				set->tbl_cnt--;
+				clear_bit(tbl->idx,
+					  &ipa_ctx->rt_idx_bitmap[ip]);
+				IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+				kmem_cache_free(ipa_ctx->rt_tbl_cache, tbl);
+			} else {
+				list_move(&tbl->link, &rset->head_rt_tbl_list);
+				clear_bit(tbl->idx,
+					  &ipa_ctx->rt_idx_bitmap[ip]);
+				set->tbl_cnt--;
+				IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+			}
+			/* remove the handle from the database */
+			ipa_id_remove(id);
+		}
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return 0;
+}
+
+/**
+ * ipa2_get_rt_tbl() - lookup the specified routing table and return handle if
+ * it exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup:	[inout] routing table to lookup and its handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *	Caller should call ipa_put_rt_tbl later if this function succeeds
+ */
+int ipa2_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	struct ipa_rt_tbl *entry;
+	int result = -EFAULT;
+
+	if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa_ctx->lock);
+	entry = __ipa_find_rt_tbl(lookup->ip, lookup->name);
+	if (entry && entry->cookie == IPA_COOKIE) {
+		entry->ref_cnt++;
+		lookup->hdl = entry->id;
+
+		/* commit for get */
+		if (ipa_ctx->ctrl->ipa_commit_rt(lookup->ip))
+			IPAERR("fail to commit RT tbl\n");
+
+		result = 0;
+	}
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa2_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl:	[in] the routing table handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	struct ipa_rt_tbl *entry;
+	enum ipa_ip_type ip = IPA_IP_MAX;
+	int result;
+
+	mutex_lock(&ipa_ctx->lock);
+	entry = ipa_id_find(rt_tbl_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		result = -EINVAL;
+		goto ret;
+	}
+
+	if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
+		IPAERR("bad parms\n");
+		result = -EINVAL;
+		goto ret;
+	}
+
+	if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	entry->ref_cnt--;
+	if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry))
+			IPAERR("fail to del RT tbl\n");
+		/* commit for put */
+		if (ipa_ctx->ctrl->ipa_commit_rt(ip))
+			IPAERR("fail to commit RT tbl\n");
+	}
+
+	result = 0;
+
+ret:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
+
+
+static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
+{
+	struct ipa_rt_entry *entry;
+	struct ipa_hdr_entry *hdr = NULL;
+
+	if (rtrule->rule.hdr_hdl) {
+		hdr = ipa_id_find(rtrule->rule.hdr_hdl);
+		if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+			IPAERR("rt rule does not point to valid hdr\n");
+			goto error;
+		}
+	}
+
+	entry = ipa_id_find(rtrule->rt_rule_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		goto error;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		goto error;
+	}
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+
+	entry->rule = rtrule->rule;
+	entry->hdr = hdr;
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+/**
+ * ipa2_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
+			IPAERR("failed to mdfy rt rule %i\n", i);
+			hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
+		} else {
+			hdls->rules[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa_ctx->ctrl->ipa_commit_rt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa_ctx->lock);
+
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
new file mode 100644
index 0000000..a03a49a
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_trace.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ipa
+#define TRACE_INCLUDE_FILE ipa_trace
+
+#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+	intr_to_poll,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	poll_to_intr,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	idle_sleep_enter,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	idle_sleep_exit,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netifni,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netifrx,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netif_rcv_skb,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+#endif /* _IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
new file mode 100644
index 0000000..01eea36
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc.c
@@ -0,0 +1,923 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/delay.h>
+
+#define IPA_RAM_UC_SMEM_SIZE 128
+#define IPA_HW_INTERFACE_VERSION     0x0111
+#define IPA_PKT_FLUSH_TO_US 100
+#define IPA_UC_POLL_SLEEP_USEC 100
+#define IPA_UC_POLL_MAX_RETRY 10000
+#define HOLB_WORKQUEUE_NAME "ipa_holb_wq"
+
+static struct workqueue_struct *ipa_holb_wq;
+static void ipa_start_monitor_holb(struct work_struct *work);
+static DECLARE_WORK(ipa_holb_work, ipa_start_monitor_holb);
+
+/**
+ * enum ipa_cpu_2_hw_commands - Values that represent the commands from the CPU
+ * IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior
+ *                                 of HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information.
+ * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal
+ *                              handling.
+ * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state.
+ * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state.
+ * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB.
+ * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
+ */
+enum ipa_cpu_2_hw_commands {
+	IPA_CPU_2_HW_CMD_NO_OP                     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_CPU_2_HW_CMD_UPDATE_FLAGS              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_CPU_2_HW_CMD_DEBUG_GET_INFO            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+	IPA_CPU_2_HW_CMD_ERR_FATAL                 =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+	IPA_CPU_2_HW_CMD_CLK_GATE                  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+	IPA_CPU_2_HW_CMD_CLK_UNGATE                =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+	IPA_CPU_2_HW_CMD_MEMCPY                    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+	IPA_CPU_2_HW_CMD_RESET_PIPE                =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
+	IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING    =
+			FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
+};
+
+/**
+ * enum ipa_hw_2_cpu_responses -  Values that represent common HW responses
+ * to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once
+ * boot sequence is completed and HW is ready to serve commands from CPU
+ * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands
+ */
+enum ipa_hw_2_cpu_responses {
+	IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ * device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ */
+enum ipa_hw_2_cpu_events {
+	IPA_HW_2_CPU_EVENT_ERROR     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_2_CPU_EVENT_LOG_INFO  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_ZIP_ENGINE_ERROR : ZIP engine error
+ */
+enum ipa_hw_errors {
+	IPA_HW_ERROR_NONE              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_INVALID_DOORBELL_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_DMA_ERROR               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_FATAL_SYSTEM_ERROR      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+	IPA_HW_INVALID_OPCODE          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+	IPA_HW_ZIP_ENGINE_ERROR        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5)
+};
+
+/**
+ * struct IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_MEMCPY command.
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+struct IpaHwMemCopyData_t  {
+	u32 destination_addr;
+	u32 source_addr;
+	u32 dest_buffer_size;
+	u32 source_buffer_size;
+};
+
+/**
+ * union IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_RESET_PIPE command.
+ * @pipeNum : Pipe number to be reset
+ * @direction : 1 - IPA Producer, 0 - IPA Consumer
+ * @reserved_02_03 : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwResetPipeCmdData_t {
+	struct IpaHwResetPipeCmdParams_t {
+		u8     pipeNum;
+		u8     direction;
+		u32    reserved_02_03;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwmonitorHolbCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING command.
+ * @monitorPipe : Indication whether to monitor the pipe. 0 – Do not Monitor
+ *		  Pipe, 1 – Monitor Pipe
+ * @pipeNum : Pipe to be monitored/not monitored
+ * @reserved_02_03 : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwmonitorHolbCmdData_t {
+	struct IpaHwmonitorHolbCmdParams_t {
+		u8     monitorPipe;
+		u8     pipeNum;
+		u32    reserved_02_03:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+
+/**
+ * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters
+ * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response.
+ * @originalCmdOp : The original command opcode
+ * @status : 0 for success indication, otherwise failure
+ * @reserved : Reserved
+ *
+ * Parameters are sent as 32b immediate parameters.
+ */
+union IpaHwCpuCmdCompletedResponseData_t {
+	struct IpaHwCpuCmdCompletedResponseParams_t {
+		u32 originalCmdOp:8;
+		u32 status:8;
+		u32 reserved:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+	struct IpaHwErrorEventParams_t {
+		u32 errorType:8;
+		u32 reserved:24;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command
+ * @newFlags: SW flags defined the behavior of HW.
+ *	This field is expected to be used as bitmask for enum ipa_hw_flags
+ */
+union IpaHwUpdateFlagsCmdData_t {
+	struct IpaHwUpdateFlagsCmdParams_t {
+		u32 newFlags;
+	} params;
+	u32 raw32b;
+};
+
+struct ipa_uc_hdlrs uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } };
+
+static inline const char *ipa_hw_error_str(enum ipa_hw_errors err_type)
+{
+	const char *str;
+
+	switch (err_type) {
+	case IPA_HW_ERROR_NONE:
+		str = "IPA_HW_ERROR_NONE";
+		break;
+	case IPA_HW_INVALID_DOORBELL_ERROR:
+		str = "IPA_HW_INVALID_DOORBELL_ERROR";
+		break;
+	case IPA_HW_FATAL_SYSTEM_ERROR:
+		str = "IPA_HW_FATAL_SYSTEM_ERROR";
+		break;
+	case IPA_HW_INVALID_OPCODE:
+		str = "IPA_HW_INVALID_OPCODE";
+		break;
+	case IPA_HW_ZIP_ENGINE_ERROR:
+		str = "IPA_HW_ZIP_ENGINE_ERROR";
+		break;
+	default:
+		str = "INVALID ipa_hw_errors type";
+	}
+
+	return str;
+}
+
+static void ipa_log_evt_hdlr(void)
+{
+	int i;
+
+	if (!ipa_ctx->uc_ctx.uc_event_top_ofst) {
+		ipa_ctx->uc_ctx.uc_event_top_ofst =
+			ipa_ctx->uc_ctx.uc_sram_mmio->eventParams;
+		if (ipa_ctx->uc_ctx.uc_event_top_ofst +
+			sizeof(struct IpaHwEventLogInfoData_t) >=
+			ipa_ctx->ctrl->ipa_reg_base_ofst +
+			IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+			ipa_ctx->smem_sz) {
+			IPAERR("uc_top 0x%x outside SRAM\n",
+				ipa_ctx->uc_ctx.uc_event_top_ofst);
+			goto bad_uc_top_ofst;
+		}
+
+		ipa_ctx->uc_ctx.uc_event_top_mmio = ioremap(
+			ipa_ctx->ipa_wrapper_base +
+			ipa_ctx->uc_ctx.uc_event_top_ofst,
+			sizeof(struct IpaHwEventLogInfoData_t));
+		if (!ipa_ctx->uc_ctx.uc_event_top_mmio) {
+			IPAERR("fail to ioremap uc top\n");
+			goto bad_uc_top_ofst;
+		}
+
+		for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+			if (uc_hdlrs[i].ipa_uc_event_log_info_hdlr)
+				uc_hdlrs[i].ipa_uc_event_log_info_hdlr
+					(ipa_ctx->uc_ctx.uc_event_top_mmio);
+		}
+	} else {
+
+		if (ipa_ctx->uc_ctx.uc_sram_mmio->eventParams !=
+			ipa_ctx->uc_ctx.uc_event_top_ofst) {
+			IPAERR("uc top ofst changed new=%u cur=%u\n",
+				ipa_ctx->uc_ctx.uc_sram_mmio->
+					eventParams,
+				ipa_ctx->uc_ctx.uc_event_top_ofst);
+		}
+	}
+
+	return;
+
+bad_uc_top_ofst:
+	ipa_ctx->uc_ctx.uc_event_top_ofst = 0;
+}
+
+/**
+ * ipa2_uc_state_check() - Check the status of the uC interface
+ *
+ * Return value: 0 if the uC is loaded, interface is initialized
+ *               and there was no recent failure in one of the commands.
+ *               A negative value is returned otherwise.
+ */
+int ipa2_uc_state_check(void)
+{
+	if (!ipa_ctx->uc_ctx.uc_inited) {
+		IPAERR("uC interface not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!ipa_ctx->uc_ctx.uc_loaded) {
+		IPAERR("uC is not loaded\n");
+		return -EFAULT;
+	}
+
+	if (ipa_ctx->uc_ctx.uc_failed) {
+		IPAERR("uC has failed its last command\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa2_uc_state_check);
+
+/**
+ * ipa_uc_loaded_check() - Check the uC has been loaded
+ *
+ * Return value: 1 if the uC is loaded, 0 otherwise
+ */
+int ipa_uc_loaded_check(void)
+{
+	return ipa_ctx->uc_ctx.uc_loaded;
+}
+EXPORT_SYMBOL(ipa_uc_loaded_check);
+
+static void ipa_uc_event_handler(enum ipa_irq_type interrupt,
+				 void *private_data,
+				 void *interrupt_data)
+{
+	union IpaHwErrorEventData_t evt;
+	u8 feature;
+
+	WARN_ON(private_data != ipa_ctx);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	IPADBG("uC evt opcode=%u\n",
+		ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+
+	feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Invalid feature %u for event %u\n",
+			feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return;
+	}
+	/* Feature specific handling */
+	if (uc_hdlrs[feature].ipa_uc_event_hdlr)
+		uc_hdlrs[feature].ipa_uc_event_hdlr
+			(ipa_ctx->uc_ctx.uc_sram_mmio);
+
+	/* General handling */
+	if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+	    IPA_HW_2_CPU_EVENT_ERROR) {
+		evt.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->eventParams;
+		IPAERR("uC Error, evt errorType = %s\n",
+			ipa_hw_error_str(evt.params.errorType));
+		ipa_ctx->uc_ctx.uc_failed = true;
+		ipa_ctx->uc_ctx.uc_error_type = evt.params.errorType;
+		if (evt.params.errorType == IPA_HW_ZIP_ENGINE_ERROR) {
+			IPAERR("IPA has encountered a ZIP engine error\n");
+			ipa_ctx->uc_ctx.uc_zip_error = true;
+		}
+		BUG();
+	} else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_LOG_INFO) {
+		IPADBG("uC evt log info ofst=0x%x\n",
+			ipa_ctx->uc_ctx.uc_sram_mmio->eventParams);
+		ipa_log_evt_hdlr();
+	} else {
+		IPADBG("unsupported uC evt opcode=%u\n",
+				ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+}
+
+static int ipa_uc_panic_notifier(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	int result = 0;
+	struct ipa_active_client_logging_info log_info;
+
+	IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
+
+	result = ipa2_uc_state_check();
+	if (result)
+		goto fail;
+	IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+	if (ipa2_inc_client_enable_clks_no_block(&log_info))
+		goto fail;
+
+	ipa_ctx->uc_ctx.uc_sram_mmio->cmdOp =
+		IPA_CPU_2_HW_CMD_ERR_FATAL;
+	/* ensure write to shared memory is done before triggering uc */
+	wmb();
+	ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1);
+	/* give uc enough time to save state */
+	udelay(IPA_PKT_FLUSH_TO_US);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("err_fatal issued\n");
+
+fail:
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa_uc_panic_blk = {
+	.notifier_call  = ipa_uc_panic_notifier,
+};
+
+void ipa_register_panic_hdlr(void)
+{
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&ipa_uc_panic_blk);
+}
+
+static void ipa_uc_response_hdlr(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data)
+{
+	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	u8 feature;
+	int res;
+	int i;
+
+	WARN_ON(private_data != ipa_ctx);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	IPADBG("uC rsp opcode=%u\n",
+			ipa_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+	feature = EXTRACT_UC_FEATURE(ipa_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Invalid feature %u for event %u\n",
+			feature, ipa_ctx->uc_ctx.uc_sram_mmio->eventOp);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return;
+	}
+
+	/* Feature specific handling */
+	if (uc_hdlrs[feature].ipa_uc_response_hdlr) {
+		res = uc_hdlrs[feature].ipa_uc_response_hdlr(
+			ipa_ctx->uc_ctx.uc_sram_mmio,
+			&ipa_ctx->uc_ctx.uc_status);
+		if (res == 0) {
+			IPADBG("feature %d specific response handler\n",
+				feature);
+			complete_all(&ipa_ctx->uc_ctx.uc_completion);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return;
+		}
+	}
+
+	/* General handling */
+	if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+			IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) {
+		ipa_ctx->uc_ctx.uc_loaded = true;
+		IPAERR("IPA uC loaded\n");
+		/*
+		 * The proxy vote is held until uC is loaded to ensure that
+		 * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received.
+		 */
+		ipa2_proxy_clk_unvote();
+		for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+			if (uc_hdlrs[i].ipa_uc_loaded_hdlr)
+				uc_hdlrs[i].ipa_uc_loaded_hdlr();
+		}
+		/* Queue the work to enable holb monitoring on IPA-USB Producer
+		 * pipe if valid.
+		 */
+		if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L)
+			queue_work(ipa_holb_wq, &ipa_holb_work);
+	} else if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+		   IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+		uc_rsp.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->responseParams;
+		IPADBG("uC cmd response opcode=%u status=%u\n",
+		       uc_rsp.params.originalCmdOp,
+		       uc_rsp.params.status);
+		if (uc_rsp.params.originalCmdOp ==
+		    ipa_ctx->uc_ctx.pending_cmd) {
+			ipa_ctx->uc_ctx.uc_status = uc_rsp.params.status;
+			complete_all(&ipa_ctx->uc_ctx.uc_completion);
+		} else {
+			IPAERR("Expected cmd=%u rcvd cmd=%u\n",
+			       ipa_ctx->uc_ctx.pending_cmd,
+			       uc_rsp.params.originalCmdOp);
+		}
+	} else {
+		IPAERR("Unsupported uC rsp opcode = %u\n",
+		       ipa_ctx->uc_ctx.uc_sram_mmio->responseOp);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * ipa_uc_interface_init() - Initialize the interface with the uC
+ *
+ * Return value: 0 on success, negative value otherwise
+ */
+int ipa_uc_interface_init(void)
+{
+	int result;
+	unsigned long phys_addr;
+
+	if (ipa_ctx->uc_ctx.uc_inited) {
+		IPADBG("uC interface already initialized\n");
+		return 0;
+	}
+
+	ipa_holb_wq = create_singlethread_workqueue(
+			HOLB_WORKQUEUE_NAME);
+	if (!ipa_holb_wq) {
+		IPAERR("HOLB workqueue creation failed\n");
+		return -ENOMEM;
+	}
+
+	mutex_init(&ipa_ctx->uc_ctx.uc_lock);
+
+	if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+		phys_addr = ipa_ctx->ipa_wrapper_base +
+			ipa_ctx->ctrl->ipa_reg_base_ofst +
+			IPA_SRAM_SW_FIRST_v2_5;
+	} else {
+		phys_addr = ipa_ctx->ipa_wrapper_base +
+			ipa_ctx->ctrl->ipa_reg_base_ofst +
+			IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(
+			ipa_ctx->smem_restricted_bytes / 4);
+	}
+
+	ipa_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr,
+					       IPA_RAM_UC_SMEM_SIZE);
+	if (!ipa_ctx->uc_ctx.uc_sram_mmio) {
+		IPAERR("Fail to ioremap IPA uC SRAM\n");
+		result = -ENOMEM;
+		goto remap_fail;
+	}
+
+	result = ipa2_add_interrupt_handler(IPA_UC_IRQ_0,
+		ipa_uc_event_handler, true,
+		ipa_ctx);
+	if (result) {
+		IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n");
+		result = -EFAULT;
+		goto irq_fail0;
+	}
+
+	result = ipa2_add_interrupt_handler(IPA_UC_IRQ_1,
+		ipa_uc_response_hdlr, true,
+		ipa_ctx);
+	if (result) {
+		IPAERR("fail to register for UC_IRQ1 rsp interrupt\n");
+		result = -EFAULT;
+		goto irq_fail1;
+	}
+
+	ipa_ctx->uc_ctx.uc_inited = true;
+
+	IPADBG("IPA uC interface is initialized\n");
+	return 0;
+
+irq_fail1:
+	ipa2_remove_interrupt_handler(IPA_UC_IRQ_0);
+irq_fail0:
+	iounmap(ipa_ctx->uc_ctx.uc_sram_mmio);
+remap_fail:
+	return result;
+}
+EXPORT_SYMBOL(ipa_uc_interface_init);
+
+/**
+ * ipa_uc_send_cmd() - Send a command to the uC
+ *
+ * Note: In case the operation times out (No response from the uC) or
+ *       polling maximal amount of retries has reached, the logic
+ *       considers it as an invalid state of the uC/IPA, and
+ *       issues a kernel panic.
+ *
+ * Returns: 0 on success.
+ *          -EINVAL in case of invalid input.
+ *          -EBADF in case uC interface is not initialized /
+ *                 or the uC has failed previously.
+ *          -EFAULT in case the received status doesn't match
+ *                  the expected.
+ */
+int ipa_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+		    bool polling_mode, unsigned long timeout_jiffies)
+{
+	int index;
+	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+
+	mutex_lock(&ipa_ctx->uc_ctx.uc_lock);
+
+	if (ipa2_uc_state_check()) {
+		IPADBG("uC send command aborted\n");
+		mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+		return -EBADF;
+	}
+
+	init_completion(&ipa_ctx->uc_ctx.uc_completion);
+
+	ipa_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd;
+	ipa_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode;
+	ipa_ctx->uc_ctx.pending_cmd = opcode;
+
+	ipa_ctx->uc_ctx.uc_sram_mmio->responseOp = 0;
+	ipa_ctx->uc_ctx.uc_sram_mmio->responseParams = 0;
+
+	ipa_ctx->uc_ctx.uc_status = 0;
+
+	/* ensure write to shared memory is done before triggering uc */
+	wmb();
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_IRQ_EE_UC_n_OFFS(0), 0x1);
+
+	if (polling_mode) {
+		for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) {
+			if (ipa_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+			    IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+				uc_rsp.raw32b = ipa_ctx->uc_ctx.uc_sram_mmio->
+						responseParams;
+				if (uc_rsp.params.originalCmdOp ==
+				    ipa_ctx->uc_ctx.pending_cmd) {
+					ipa_ctx->uc_ctx.pending_cmd = -1;
+					break;
+				}
+			}
+			usleep_range(IPA_UC_POLL_SLEEP_USEC,
+					IPA_UC_POLL_SLEEP_USEC);
+		}
+
+		if (index == IPA_UC_POLL_MAX_RETRY) {
+			IPAERR("uC max polling retries reached\n");
+			if (ipa_ctx->uc_ctx.uc_failed) {
+				IPAERR("uC reported on Error, errorType = %s\n",
+					ipa_hw_error_str(ipa_ctx->
+					uc_ctx.uc_error_type));
+			}
+			mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+			BUG();
+			return -EFAULT;
+		}
+	} else {
+		if (wait_for_completion_timeout(&ipa_ctx->uc_ctx.uc_completion,
+			timeout_jiffies) == 0) {
+			IPAERR("uC timed out\n");
+			if (ipa_ctx->uc_ctx.uc_failed) {
+				IPAERR("uC reported on Error, errorType = %s\n",
+					ipa_hw_error_str(ipa_ctx->
+					uc_ctx.uc_error_type));
+			}
+			mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+			BUG();
+			return -EFAULT;
+		}
+	}
+
+	if (ipa_ctx->uc_ctx.uc_status != expected_status) {
+		IPAERR("Recevied status %u, Expected status %u\n",
+			ipa_ctx->uc_ctx.uc_status, expected_status);
+		ipa_ctx->uc_ctx.pending_cmd = -1;
+		mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+		return -EFAULT;
+	}
+
+	ipa_ctx->uc_ctx.pending_cmd = -1;
+	mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+
+	IPADBG("uC cmd %u send succeeded\n", opcode);
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa_uc_send_cmd);
+
+/**
+ * ipa_uc_register_handlers() - Registers event, response and log event
+ *                              handlers for a specific feature.Please note
+ *                              that currently only one handler can be
+ *                              registered per feature.
+ *
+ * Return value: None
+ */
+void ipa_uc_register_handlers(enum ipa_hw_features feature,
+			      struct ipa_uc_hdlrs *hdlrs)
+{
+
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Feature %u is invalid, not registering hdlrs\n",
+		       feature);
+		return;
+	}
+
+	mutex_lock(&ipa_ctx->uc_ctx.uc_lock);
+	uc_hdlrs[feature] = *hdlrs;
+	mutex_unlock(&ipa_ctx->uc_ctx.uc_lock);
+
+	IPADBG("uC handlers registered for feature %u\n", feature);
+}
+EXPORT_SYMBOL(ipa_uc_register_handlers);
+
+/**
+ * ipa_uc_reset_pipe() - reset a BAM pipe using the uC interface
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * The function uses the uC interface in order to issue a BAM
+ * PIPE reset request. The uC makes sure there's no traffic in
+ * the TX command queue before issuing the reset.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_uc_reset_pipe(enum ipa_client_type ipa_client)
+{
+	union IpaHwResetPipeCmdData_t cmd;
+	int ep_idx;
+	int ret;
+
+	ep_idx = ipa2_get_ep_mapping(ipa_client);
+	if (ep_idx == -1) {
+		IPAERR("Invalid IPA client\n");
+		return 0;
+	}
+
+	/*
+	 * If the uC interface has not been initialized yet,
+	 * continue with the sequence without resetting the
+	 * pipe.
+	 */
+	if (ipa2_uc_state_check()) {
+		IPADBG("uC interface will not be used to reset %s pipe %d\n",
+		       IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
+		       ep_idx);
+		return 0;
+	}
+
+	/*
+	 * IPA consumer = 0, IPA producer = 1.
+	 * IPA driver concept of PROD/CONS is the opposite of the
+	 * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER,
+	 * and vice-versa.
+	 */
+	cmd.params.direction = (u8)(IPA_CLIENT_IS_PROD(ipa_client) ? 0 : 1);
+	cmd.params.pipeNum = (u8)ep_idx;
+
+	IPADBG("uC pipe reset on IPA %s pipe %d\n",
+	       IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx);
+
+	ret = ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0,
+			      false, 10*HZ);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_reset_pipe);
+
+/**
+ * ipa_uc_monitor_holb() - Enable/Disable holb monitoring of a producer pipe.
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * The function uses the uC interface in order to disable/enable holb
+ * monitoring.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_uc_monitor_holb(enum ipa_client_type ipa_client, bool enable)
+{
+	union IpaHwmonitorHolbCmdData_t cmd;
+	int ep_idx;
+	int ret;
+
+	/* HOLB monitoring is applicable only to 2.6L. */
+	if (ipa_ctx->ipa_hw_type != IPA_HW_v2_6L) {
+		IPADBG("Not applicable on this target\n");
+		return 0;
+	}
+
+	ep_idx = ipa2_get_ep_mapping(ipa_client);
+	if (ep_idx == -1) {
+		IPAERR("Invalid IPA client\n");
+		return 0;
+	}
+
+	/*
+	 * If the uC interface has not been initialized yet,
+	 * continue with the sequence without resetting the
+	 * pipe.
+	 */
+	if (ipa2_uc_state_check()) {
+		IPADBG("uC interface will not be used to reset %s pipe %d\n",
+		       IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
+		       ep_idx);
+		return 0;
+	}
+
+	/*
+	 * IPA consumer = 0, IPA producer = 1.
+	 * IPA driver concept of PROD/CONS is the opposite of the
+	 * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER,
+	 * and vice-versa.
+	 */
+	cmd.params.monitorPipe = (u8)(enable ? 1 : 0);
+	cmd.params.pipeNum = (u8)ep_idx;
+
+	IPADBG("uC holb monitoring on IPA pipe %d, Enable: %d\n",
+	       ep_idx, enable);
+
+	ret = ipa_uc_send_cmd(cmd.raw32b,
+				IPA_CPU_2_HW_CMD_UPDATE_HOLB_MONITORING, 0,
+				false, 10*HZ);
+
+	return ret;
+}
+EXPORT_SYMBOL(ipa_uc_monitor_holb);
+
+/**
+ * ipa_start_monitor_holb() - Send HOLB command to monitor IPA-USB
+ * producer pipe.
+ *
+ * This function is called after uc is loaded to start monitoring
+ * IPA pipe towrds USB in case if USB is already connected.
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_start_monitor_holb(struct work_struct *work)
+{
+	IPADBG("starting holb monitoring on IPA_CLIENT_USB_CONS\n");
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa_uc_monitor_holb(IPA_CLIENT_USB_CONS, true);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+
+/**
+ * ipa_uc_notify_clk_state() - notify to uC of clock enable / disable
+ * @enabled: true if clock are enabled
+ *
+ * The function uses the uC interface in order to notify uC before IPA clocks
+ * are disabled to make sure uC is not in the middle of operation.
+ * Also after clocks are enabled ned to notify uC to start processing.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_notify_clk_state(bool enabled)
+{
+	u32 opcode;
+
+	/*
+	 * If the uC interface has not been initialized yet,
+	 * don't notify the uC on the enable/disable
+	 */
+	if (ipa2_uc_state_check()) {
+		IPADBG("uC interface will not notify the UC on clock state\n");
+		return 0;
+	}
+
+	IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE");
+
+	opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE :
+			     IPA_CPU_2_HW_CMD_CLK_GATE;
+
+	return ipa_uc_send_cmd(0, opcode, 0, true, 0);
+}
+EXPORT_SYMBOL(ipa_uc_notify_clk_state);
+
+/**
+ * ipa_uc_update_hw_flags() - send uC the HW flags to be used
+ * @flags: This field is expected to be used as bitmask for enum ipa_hw_flags
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_update_hw_flags(u32 flags)
+{
+	union IpaHwUpdateFlagsCmdData_t cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.newFlags = flags;
+	return ipa_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0,
+		false, HZ);
+}
+EXPORT_SYMBOL(ipa_uc_update_hw_flags);
+
+/**
+ * ipa_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int res;
+	struct ipa_mem_buffer mem;
+	struct IpaHwMemCopyData_t *cmd;
+
+	IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len);
+	mem.size = sizeof(cmd);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	cmd = (struct IpaHwMemCopyData_t *)mem.base;
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->destination_addr = dest;
+	cmd->dest_buffer_size = len;
+	cmd->source_addr = src;
+	cmd->source_buffer_size = len;
+	res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0,
+		true, 10 * HZ);
+	if (res) {
+		IPAERR("ipa_uc_send_cmd failed %d\n", res);
+		goto free_coherent;
+	}
+
+	res = 0;
+free_coherent:
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
new file mode 100644
index 0000000..08d7363
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_mhi.c
@@ -0,0 +1,966 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/* MHI uC interface definitions */
+#define IPA_HW_INTERFACE_MHI_VERSION            0x0004
+
+#define IPA_HW_MAX_NUMBER_OF_CHANNELS	2
+#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS	2
+#define IPA_HW_MAX_CHANNEL_HANDLE	(IPA_HW_MAX_NUMBER_OF_CHANNELS-1)
+
+/**
+ * Values that represent the MHI commands from CPU to IPA HW.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing.
+ *	Once operation was completed HW shall respond with
+ *	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready
+ *	to serve MHI transfers. Once initialization was completed HW shall
+ *	respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ *		IPA_HW_MHI_CHANNEL_STATE_ENABLE
+ * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data.
+ *	Once operation was completed HW shall respond with
+ *	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel
+ *	processing state following host request. Once operation was completed
+ *	HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization.
+ * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing.
+ */
+enum ipa_cpu_2_hw_mhi_commands {
+	IPA_CPU_2_HW_CMD_MHI_INIT
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+	IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+	IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3),
+	IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+	IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5)
+};
+
+/**
+ * Values that represent MHI related HW responses to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to
+ *	IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or
+ *	IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands.
+ */
+enum ipa_hw_2_cpu_mhi_responses {
+	IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+};
+
+/**
+ * Values that represent MHI related HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an
+ *	error in an element from the transfer ring associated with the channel
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a bam
+ *	interrupt was asserted when MHI engine is suspended
+ */
+enum ipa_hw_2_cpu_mhi_events {
+	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+};
+
+/**
+ * Channel error types.
+ * @IPA_HW_CHANNEL_ERROR_NONE: No error persists.
+ * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected
+ */
+enum ipa_hw_channel_errors {
+	IPA_HW_CHANNEL_ERROR_NONE,
+	IPA_HW_CHANNEL_INVALID_RE_ERROR
+};
+
+/**
+ * MHI error types.
+ * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space
+ * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array
+ * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array
+ * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on
+ *	secondary event ring
+ * @IPA_HW_LINK_ERROR: Link error
+ */
+enum ipa_hw_mhi_errors {
+	IPA_HW_INVALID_MMIO_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_HW_INVALID_CHANNEL_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+	IPA_HW_INVALID_EVENT_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+	IPA_HW_NO_ED_IN_RING_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+	IPA_HW_LINK_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5),
+};
+
+
+/**
+ * Structure referring to the common and MHI section of 128B shared memory
+ * located in offset zero of SW Partition in IPA SRAM.
+ * The shared memory is used for communication between IPA HW and CPU.
+ * @common: common section in IPA SRAM
+ * @interfaceVersionMhi: The MHI interface version as reported by HW
+ * @mhiState: Overall MHI state
+ * @reserved_2B: reserved
+ * @mhiCnl0State: State of MHI channel 0.
+ *	The state carries information regarding the error type.
+ *	See IPA_HW_MHI_CHANNEL_STATES.
+ * @mhiCnl0State: State of MHI channel 1.
+ * @mhiCnl0State: State of MHI channel 2.
+ * @mhiCnl0State: State of MHI channel 3
+ * @mhiCnl0State: State of MHI channel 4.
+ * @mhiCnl0State: State of MHI channel 5.
+ * @mhiCnl0State: State of MHI channel 6.
+ * @mhiCnl0State: State of MHI channel 7.
+ * @reserved_37_34: reserved
+ * @reserved_3B_38: reserved
+ * @reserved_3F_3C: reserved
+ */
+struct IpaHwSharedMemMhiMapping_t {
+	struct IpaHwSharedMemCommonMapping_t common;
+	u16 interfaceVersionMhi;
+	u8 mhiState;
+	u8 reserved_2B;
+	u8 mhiCnl0State;
+	u8 mhiCnl1State;
+	u8 mhiCnl2State;
+	u8 mhiCnl3State;
+	u8 mhiCnl4State;
+	u8 mhiCnl5State;
+	u8 mhiCnl6State;
+	u8 mhiCnl7State;
+	u32 reserved_37_34;
+	u32 reserved_3B_38;
+	u32 reserved_3F_3C;
+};
+
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command.
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW.
+ * @msiAddress: The MSI base (in device space) used for asserting the interrupt
+ *	(MSI) associated with the event ring
+ * mmioBaseAddress: The address (in device space) of MMIO structure in
+ *	host space
+ * deviceMhiCtrlBaseAddress: Base address of the memory region in the device
+ *	address space where the MHI control data structures are allocated by
+ *	the host, including channel context array, event context array,
+ *	and rings. This value is used for host/device address translation.
+ * deviceMhiDataBaseAddress: Base address of the memory region in the device
+ *	address space where the MHI data buffers are allocated by the host.
+ *	This value is used for host/device address translation.
+ * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel
+ * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this
+ *	event ring.
+ */
+struct IpaHwMhiInitCmdData_t {
+	u32 msiAddress;
+	u32 mmioBaseAddress;
+	u32 deviceMhiCtrlBaseAddress;
+	u32 deviceMhiDataBaseAddress;
+	u32 firstChannelIndex;
+	u32 firstEventRingIndex;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+ *	command. Parameters are sent as 32b immediate parameters.
+ * @hannelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is
+ *	used as an index in channel context array structures.
+ * @bamPipeId: The BAM pipe number for pipe dedicated for this channel
+ * @channelDirection: The direction of the channel as defined in the channel
+ *	type field (CHTYPE) in the channel context data structure.
+ * @reserved: reserved.
+ */
+union IpaHwMhiInitChannelCmdData_t {
+	struct IpaHwMhiInitChannelCmdParams_t {
+		u32 channelHandle:8;
+		u32 contexArrayIndex:8;
+		u32 bamPipeId:6;
+		u32 channelDirection:2;
+		u32 reserved:8;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command.
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwMhiMsiCmdData_t {
+	u32 msiAddress_low;
+	u32 msiAddress_hi;
+	u32 msiMask;
+	u32 msiData;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @requestedState: The requested channel state as was indicated from Host.
+ *	Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @LPTransitionRejected: Indication that low power state transition was
+ *	rejected
+ * @reserved: reserved
+ */
+union IpaHwMhiChangeChannelStateCmdData_t {
+	struct IpaHwMhiChangeChannelStateCmdParams_t {
+		u32 requestedState:8;
+		u32 channelHandle:8;
+		u32 LPTransitionRejected:8;
+		u32 reserved:8;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiStopEventUpdateData_t {
+	struct IpaHwMhiStopEventUpdateDataParams_t {
+		u32 channelHandle:8;
+		u32 reserved:24;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response.
+ * Parameters are sent as 32b immediate parameters.
+ * @state: The new channel state. In case state is not as requested this is
+ *	error indication for the last command
+ * @channelHandle: The channel identifier
+ * @additonalParams: For stop: the number of pending bam descriptors currently
+ *	queued
+*/
+union IpaHwMhiChangeChannelStateResponseData_t {
+	struct IpaHwMhiChangeChannelStateResponseParams_t {
+		u32 state:8;
+		u32 channelHandle:8;
+		u32 additonalParams:16;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event.
+ * Parameters are sent as 32b immediate parameters.
+ * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelErrorEventData_t {
+	struct IpaHwMhiChannelErrorEventParams_t {
+		u32 errorType:8;
+		u32 channelHandle:8;
+		u32 reserved:16;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelWakeupEventData_t {
+	struct IpaHwMhiChannelWakeupEventParams_t {
+		u32 channelHandle:8;
+		u32 reserved:24;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the MHI Common statistics
+ * @numULDLSync: Number of times UL activity trigged due to DL activity
+ * @numULTimerExpired: Number of times UL Accm Timer expired
+ */
+struct IpaHwStatsMhiCmnInfoData_t {
+	u32 numULDLSync;
+	u32 numULTimerExpired;
+	u32 numChEvCtxWpRead;
+	u32 reserved;
+};
+
+/**
+ * Structure holding the MHI Channel statistics
+ * @doorbellInt: The number of doorbell int
+ * @reProccesed: The number of ring elements processed
+ * @bamFifoFull: Number of times Bam Fifo got full
+ * @bamFifoEmpty: Number of times Bam Fifo got empty
+ * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75%
+ * @bamFifoUsageLow: Number of times Bam fifo usage went below 25%
+ * @bamInt: Number of BAM Interrupts
+ * @ringFull: Number of times Transfer Ring got full
+ * @ringEmpty: umber of times Transfer Ring got empty
+ * @ringUsageHigh: Number of times Transfer Ring usage went above 75%
+ * @ringUsageLow: Number of times Transfer Ring usage went below 25%
+ * @delayedMsi: Number of times device triggered MSI to host after
+ *	Interrupt Moderation Timer expiry
+ * @immediateMsi: Number of times device triggered MSI to host immediately
+ * @thresholdMsi: Number of times device triggered MSI due to max pending
+ *	events threshold reached
+ * @numSuspend: Number of times channel was suspended
+ * @numResume: Number of times channel was suspended
+ * @num_OOB: Number of times we indicated that we are OOB
+ * @num_OOB_timer_expiry: Number of times we indicated that we are OOB
+ *	after timer expiry
+ * @num_OOB_moderation_timer_start: Number of times we started timer after
+ *	sending OOB and hitting OOB again before we processed threshold
+ *	number of packets
+ * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode
+ */
+struct IpaHwStatsMhiCnlInfoData_t {
+	u32 doorbellInt;
+	u32 reProccesed;
+	u32 bamFifoFull;
+	u32 bamFifoEmpty;
+	u32 bamFifoUsageHigh;
+	u32 bamFifoUsageLow;
+	u32 bamInt;
+	u32 ringFull;
+	u32 ringEmpty;
+	u32 ringUsageHigh;
+	u32 ringUsageLow;
+	u32 delayedMsi;
+	u32 immediateMsi;
+	u32 thresholdMsi;
+	u32 numSuspend;
+	u32 numResume;
+	u32 num_OOB;
+	u32 num_OOB_timer_expiry;
+	u32 num_OOB_moderation_timer_start;
+	u32 num_db_mode_evt;
+};
+
+/**
+ * Structure holding the MHI statistics
+ * @mhiCmnStats: Stats pertaining to MHI
+ * @mhiCnlStats: Stats pertaining to each channel
+ */
+struct IpaHwStatsMhiInfoData_t {
+	struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats;
+	struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[
+						IPA_HW_MAX_NUMBER_OF_CHANNELS];
+};
+
+/**
+ * Structure holding the MHI Common Config info
+ * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled
+ * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is
+ *	enabled
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+struct IpaHwConfigMhiCmnInfoData_t {
+	u8 isDlUlSyncEnabled;
+	u8 UlAccmVal;
+	u8 ulMsiEventThreshold;
+	u8 dlMsiEventThreshold;
+};
+
+/**
+ * Structure holding the parameters for MSI info data
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwConfigMhiMsiInfoData_t {
+	u32 msiAddress_low;
+	u32 msiAddress_hi;
+	u32 msiMask;
+	u32 msiData;
+};
+
+/**
+ * Structure holding the MHI Channel Config info
+ * @transferRingSize: The Transfer Ring size in terms of Ring Elements
+ * @transferRingIndex: The Transfer Ring channel number as defined by host
+ * @eventRingIndex: The Event Ring Index associated with this Transfer Ring
+ * @bamPipeIndex: The BAM Pipe associated with this channel
+ * @isOutChannel: Indication for the direction of channel
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiCnlInfoData_t {
+	u16 transferRingSize;
+	u8  transferRingIndex;
+	u8  eventRingIndex;
+	u8  bamPipeIndex;
+	u8  isOutChannel;
+	u8  reserved_0;
+	u8  reserved_1;
+};
+
+/**
+ * Structure holding the MHI Event Config info
+ * @msiVec: msi vector to invoke MSI interrupt
+ * @intmodtValue: Interrupt moderation timer (in milliseconds)
+ * @eventRingSize: The Event Ring size in terms of Ring Elements
+ * @eventRingIndex: The Event Ring number as defined by host
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ * @reserved_2: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiEventInfoData_t {
+	u32 msiVec;
+	u16 intmodtValue;
+	u16 eventRingSize;
+	u8  eventRingIndex;
+	u8  reserved_0;
+	u8  reserved_1;
+	u8  reserved_2;
+};
+
+/**
+ * Structure holding the MHI Config info
+ * @mhiCmnCfg: Common Config pertaining to MHI
+ * @mhiMsiCfg: Config pertaining to MSI config
+ * @mhiCnlCfg: Config pertaining to each channel
+ * @mhiEvtCfg: Config pertaining to each event Ring
+ */
+struct IpaHwConfigMhiInfoData_t {
+	struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg;
+	struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg;
+	struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[
+						IPA_HW_MAX_NUMBER_OF_CHANNELS];
+	struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[
+					IPA_HW_MAX_NUMBER_OF_EVENTRINGS];
+};
+
+
+struct ipa_uc_mhi_ctx {
+	u8 expected_responseOp;
+	u32 expected_responseParams;
+	void (*ready_cb)(void);
+	void (*wakeup_request_cb)(void);
+	u32 mhi_uc_stats_ofst;
+	struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio;
+};
+
+#define PRINT_COMMON_STATS(x) \
+	(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+	#x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x))
+
+#define PRINT_CHANNEL_STATS(ch, x) \
+	(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+	#x "=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x))
+
+struct ipa_uc_mhi_ctx *ipa_uc_mhi_ctx;
+
+static int ipa_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t
+	*uc_sram_mmio, u32 *uc_status)
+{
+	IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp);
+	if (uc_sram_mmio->responseOp == ipa_uc_mhi_ctx->expected_responseOp &&
+	    uc_sram_mmio->responseParams ==
+	    ipa_uc_mhi_ctx->expected_responseParams) {
+		*uc_status = 0;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void ipa_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t
+	*uc_sram_mmio)
+{
+	if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+	    IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) {
+		union IpaHwMhiChannelErrorEventData_t evt;
+
+		IPAERR("Channel error\n");
+		evt.raw32b = uc_sram_mmio->eventParams;
+		IPAERR("errorType=%d channelHandle=%d reserved=%d\n",
+			evt.params.errorType, evt.params.channelHandle,
+			evt.params.reserved);
+	} else if (ipa_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		   IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) {
+		union IpaHwMhiChannelWakeupEventData_t evt;
+
+		IPADBG("WakeUp channel request\n");
+		evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("channelHandle=%d reserved=%d\n",
+			evt.params.channelHandle, evt.params.reserved);
+		ipa_uc_mhi_ctx->wakeup_request_cb();
+	}
+}
+
+static void ipa_uc_mhi_event_log_info_hdlr(
+	struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) {
+		IPAERR("MHI feature missing 0x%x\n",
+			uc_event_top_mmio->featureMask);
+		return;
+	}
+
+	if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].
+		params.size != sizeof(struct IpaHwStatsMhiInfoData_t)) {
+		IPAERR("mhi stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct IpaHwStatsMhiInfoData_t),
+			uc_event_top_mmio->statsInfo.
+			featureInfo[IPA_HW_FEATURE_MHI].params.size);
+		return;
+	}
+
+	ipa_uc_mhi_ctx->mhi_uc_stats_ofst = uc_event_top_mmio->
+		statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+		featureInfo[IPA_HW_FEATURE_MHI].params.offset;
+	IPAERR("MHI stats ofst=0x%x\n", ipa_uc_mhi_ctx->mhi_uc_stats_ofst);
+	if (ipa_uc_mhi_ctx->mhi_uc_stats_ofst +
+		sizeof(struct IpaHwStatsMhiInfoData_t) >=
+		ipa_ctx->ctrl->ipa_reg_base_ofst +
+		IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+		ipa_ctx->smem_sz) {
+		IPAERR("uc_mhi_stats 0x%x outside SRAM\n",
+			ipa_uc_mhi_ctx->mhi_uc_stats_ofst);
+		return;
+	}
+
+	ipa_uc_mhi_ctx->mhi_uc_stats_mmio =
+		ioremap(ipa_ctx->ipa_wrapper_base +
+		ipa_uc_mhi_ctx->mhi_uc_stats_ofst,
+		sizeof(struct IpaHwStatsMhiInfoData_t));
+	if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc mhi stats\n");
+		return;
+	}
+}
+
+int ipa2_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
+{
+	struct ipa_uc_hdlrs hdlrs;
+
+	if (ipa_uc_mhi_ctx) {
+		IPAERR("Already initialized\n");
+		return -EFAULT;
+	}
+
+	ipa_uc_mhi_ctx = kzalloc(sizeof(*ipa_uc_mhi_ctx), GFP_KERNEL);
+	if (!ipa_uc_mhi_ctx) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+
+	ipa_uc_mhi_ctx->ready_cb = ready_cb;
+	ipa_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb;
+
+	memset(&hdlrs, 0, sizeof(hdlrs));
+	hdlrs.ipa_uc_loaded_hdlr = ipa_uc_mhi_ctx->ready_cb;
+	hdlrs.ipa_uc_response_hdlr = ipa_uc_mhi_response_hdlr;
+	hdlrs.ipa_uc_event_hdlr = ipa_uc_mhi_event_hdlr;
+	hdlrs.ipa_uc_event_log_info_hdlr = ipa_uc_mhi_event_log_info_hdlr;
+	ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs);
+
+	IPADBG("Done\n");
+	return 0;
+}
+
+void ipa2_uc_mhi_cleanup(void)
+{
+	struct ipa_uc_hdlrs null_hdlrs = { 0 };
+
+	IPADBG("Enter\n");
+
+	if (!ipa_uc_mhi_ctx) {
+		IPAERR("ipa3_uc_mhi_ctx is not initialized\n");
+		return;
+	}
+	ipa_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs);
+	kfree(ipa_uc_mhi_ctx);
+	ipa_uc_mhi_ctx = NULL;
+
+	IPADBG("Done\n");
+}
+
+int ipa_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+	u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+	u32 first_evt_idx)
+{
+	int res;
+	struct ipa_mem_buffer mem;
+	struct IpaHwMhiInitCmdData_t *init_cmd_data;
+	struct IpaHwMhiMsiCmdData_t *msi_cmd;
+
+	if (!ipa_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa_uc_update_hw_flags(0);
+	if (res) {
+		IPAERR("ipa_uc_update_hw_flags failed %d\n", res);
+		goto disable_clks;
+	}
+
+	mem.size = sizeof(*init_cmd_data);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		res = -ENOMEM;
+		goto disable_clks;
+	}
+	memset(mem.base, 0, mem.size);
+	init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base;
+	init_cmd_data->msiAddress = msi->addr_low;
+	init_cmd_data->mmioBaseAddress = mmio_addr;
+	init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr;
+	init_cmd_data->deviceMhiDataBaseAddress = host_data_addr;
+	init_cmd_data->firstChannelIndex = first_ch_idx;
+	init_cmd_data->firstEventRingIndex = first_evt_idx;
+	res = ipa_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0,
+		false, HZ);
+	if (res) {
+		IPAERR("ipa_uc_send_cmd failed %d\n", res);
+		dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
+			mem.phys_base);
+		goto disable_clks;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	mem.size = sizeof(*msi_cmd);
+	mem.base = dma_alloc_coherent(ipa_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		res = -ENOMEM;
+		goto disable_clks;
+	}
+
+	msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base;
+	msi_cmd->msiAddress_hi = msi->addr_hi;
+	msi_cmd->msiAddress_low = msi->addr_low;
+	msi_cmd->msiData = msi->data;
+	msi_cmd->msiMask = msi->mask;
+	res = ipa_uc_send_cmd((u32)mem.phys_base,
+		IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa_uc_send_cmd failed %d\n", res);
+		dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base,
+			mem.phys_base);
+		goto disable_clks;
+	}
+
+	dma_free_coherent(ipa_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+
+}
+
+int ipa_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+	int contexArrayIndex, int channelDirection)
+
+{
+	int res;
+	union IpaHwMhiInitChannelCmdData_t init_cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+
+	if (!ipa_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (ipa_ep_idx < 0  || ipa_ep_idx >= ipa_ctx->ipa_num_pipes) {
+		IPAERR("Invalid ipa_ep_idx.\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&init_cmd, 0, sizeof(init_cmd));
+	init_cmd.params.channelHandle = channelHandle;
+	init_cmd.params.contexArrayIndex = contexArrayIndex;
+	init_cmd.params.bamPipeId = ipa_ep_idx;
+	init_cmd.params.channelDirection = channelDirection;
+
+	res = ipa_uc_send_cmd(init_cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+
+int ipa2_uc_mhi_reset_channel(int channelHandle)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+	cmd.params.channelHandle = channelHandle;
+	res = ipa_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa2_uc_mhi_suspend_channel(int channelHandle)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	cmd.params.channelHandle = channelHandle;
+	res = ipa_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	cmd.params.channelHandle = channelHandle;
+	cmd.params.LPTransitionRejected = LPTransitionRejected;
+	res = ipa_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa2_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+	union IpaHwMhiStopEventUpdateData_t cmd;
+	int res;
+
+	if (!ipa_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.channelHandle = channelHandle;
+
+	ipa_uc_mhi_ctx->expected_responseOp =
+		IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE;
+	ipa_uc_mhi_ctx->expected_responseParams = cmd.raw32b;
+
+	res = ipa_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa2_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
+{
+	int res;
+
+	if (!ipa_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
+		cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal);
+	IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
+		cmd->params.ulMsiEventThreshold,
+		cmd->params.dlMsiEventThreshold);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa_uc_send_cmd(cmd->raw32b,
+		IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa2_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+	int nBytes = 0;
+	int i;
+
+	if (!ipa_uc_mhi_ctx->mhi_uc_stats_mmio) {
+		IPAERR("MHI uc stats is not valid\n");
+		return 0;
+	}
+
+	nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+		"Common Stats:\n");
+	PRINT_COMMON_STATS(numULDLSync);
+	PRINT_COMMON_STATS(numULTimerExpired);
+	PRINT_COMMON_STATS(numChEvCtxWpRead);
+
+	for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) {
+		nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+			"Channel %d Stats:\n", i);
+		PRINT_CHANNEL_STATS(i, doorbellInt);
+		PRINT_CHANNEL_STATS(i, reProccesed);
+		PRINT_CHANNEL_STATS(i, bamFifoFull);
+		PRINT_CHANNEL_STATS(i, bamFifoEmpty);
+		PRINT_CHANNEL_STATS(i, bamFifoUsageHigh);
+		PRINT_CHANNEL_STATS(i, bamFifoUsageLow);
+		PRINT_CHANNEL_STATS(i, bamInt);
+		PRINT_CHANNEL_STATS(i, ringFull);
+		PRINT_CHANNEL_STATS(i, ringEmpty);
+		PRINT_CHANNEL_STATS(i, ringUsageHigh);
+		PRINT_CHANNEL_STATS(i, ringUsageLow);
+		PRINT_CHANNEL_STATS(i, delayedMsi);
+		PRINT_CHANNEL_STATS(i, immediateMsi);
+		PRINT_CHANNEL_STATS(i, thresholdMsi);
+		PRINT_CHANNEL_STATS(i, numSuspend);
+		PRINT_CHANNEL_STATS(i, numResume);
+		PRINT_CHANNEL_STATS(i, num_OOB);
+		PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry);
+		PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start);
+		PRINT_CHANNEL_STATS(i, num_db_mode_evt);
+	}
+
+	return nBytes;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
new file mode 100644
index 0000000..d14f8da
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_ntn.c
@@ -0,0 +1,444 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa_uc_ntn_event_handler(
+		struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio)
+{
+	union IpaHwNTNErrorEventData_t ntn_evt;
+
+	if (uc_sram_mmio->eventOp == IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+		ntn_evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+			ntn_evt.params.ntn_error_type,
+			ntn_evt.params.ipa_pipe_number,
+			ntn_evt.params.ntn_ch_err_type);
+	}
+}
+
+static void ipa_uc_ntn_event_log_info_handler(
+		struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+{
+	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+		IPAERR("NTN feature missing 0x%x\n",
+			uc_event_top_mmio->featureMask);
+		return;
+	}
+
+	if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+		params.size != sizeof(struct IpaHwStatsNTNInfoData_t)) {
+		IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct IpaHwStatsNTNInfoData_t),
+			uc_event_top_mmio->statsInfo.
+			featureInfo[IPA_HW_FEATURE_NTN].params.size);
+		return;
+	}
+
+	ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+		statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+		featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+	IPAERR("NTN stats ofst=0x%x\n", ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+	if (ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+		sizeof(struct IpaHwStatsNTNInfoData_t) >=
+		ipa_ctx->ctrl->ipa_reg_base_ofst +
+		IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+		ipa_ctx->smem_sz) {
+		IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+			ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+		return;
+	}
+
+	ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+		ioremap(ipa_ctx->ipa_wrapper_base +
+		ipa_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+		sizeof(struct IpaHwStatsNTNInfoData_t));
+	if (!ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc ntn stats\n");
+		return;
+	}
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa2_get_ntn_stats(struct IpaHwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+	ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+	ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!stats || !ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+		IPAERR("bad parms stats=%p ntn_stats=%p\n",
+			stats,
+			ipa_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	TX_STATS(num_pkts_processed);
+	TX_STATS(tail_ptr_val);
+	TX_STATS(num_db_fired);
+	TX_STATS(tx_comp_ring_stats.ringFull);
+	TX_STATS(tx_comp_ring_stats.ringEmpty);
+	TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+	TX_STATS(tx_comp_ring_stats.ringUsageLow);
+	TX_STATS(tx_comp_ring_stats.RingUtilCount);
+	TX_STATS(bam_stats.bamFifoFull);
+	TX_STATS(bam_stats.bamFifoEmpty);
+	TX_STATS(bam_stats.bamFifoUsageHigh);
+	TX_STATS(bam_stats.bamFifoUsageLow);
+	TX_STATS(bam_stats.bamUtilCount);
+	TX_STATS(num_db);
+	TX_STATS(num_unexpected_db);
+	TX_STATS(num_bam_int_handled);
+	TX_STATS(num_bam_int_in_non_running_state);
+	TX_STATS(num_qmb_int_handled);
+	TX_STATS(num_bam_int_handled_while_wait_for_bam);
+	TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+	RX_STATS(max_outstanding_pkts);
+	RX_STATS(num_pkts_processed);
+	RX_STATS(rx_ring_rp_value);
+	RX_STATS(rx_ind_ring_stats.ringFull);
+	RX_STATS(rx_ind_ring_stats.ringEmpty);
+	RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+	RX_STATS(rx_ind_ring_stats.ringUsageLow);
+	RX_STATS(rx_ind_ring_stats.RingUtilCount);
+	RX_STATS(bam_stats.bamFifoFull);
+	RX_STATS(bam_stats.bamFifoEmpty);
+	RX_STATS(bam_stats.bamFifoUsageHigh);
+	RX_STATS(bam_stats.bamFifoUsageLow);
+	RX_STATS(bam_stats.bamUtilCount);
+	RX_STATS(num_bam_int_handled);
+	RX_STATS(num_db);
+	RX_STATS(num_unexpected_db);
+	RX_STATS(num_pkts_in_dis_uninit_state);
+	RX_STATS(num_bam_int_handled_while_not_in_bam);
+	RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+int ipa2_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
+{
+	int ret;
+
+	if (!ipa_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return -ENXIO;
+	}
+
+	ret = ipa2_uc_state_check();
+	if (ret) {
+		ipa_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
+		ipa_ctx->uc_ntn_ctx.priv = user_data;
+		return 0;
+	}
+
+	return -EEXIST;
+}
+
+static void ipa_uc_ntn_loaded_handler(void)
+{
+	if (!ipa_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return;
+	}
+
+	if (ipa_ctx->uc_ntn_ctx.uc_ready_cb) {
+		ipa_ctx->uc_ntn_ctx.uc_ready_cb(
+			ipa_ctx->uc_ntn_ctx.priv);
+
+		ipa_ctx->uc_ntn_ctx.uc_ready_cb =
+			NULL;
+		ipa_ctx->uc_ntn_ctx.priv = NULL;
+	}
+}
+
+int ipa_ntn_init(void)
+{
+	struct ipa_uc_hdlrs uc_ntn_cbs = { 0 };
+
+	uc_ntn_cbs.ipa_uc_event_hdlr = ipa_uc_ntn_event_handler;
+	uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+		ipa_uc_ntn_event_log_info_handler;
+	uc_ntn_cbs.ipa_uc_loaded_hdlr =
+		ipa_uc_ntn_loaded_handler;
+
+	ipa_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+	return 0;
+}
+
+static int ipa2_uc_send_ntn_setup_pipe_cmd(
+	struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+	int ipa_ep_idx;
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct IpaHwNtnSetUpCmdData_t *Ntn_params;
+	struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+	if (ntn_info == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to get ep idx.\n");
+		return -EFAULT;
+	}
+
+	IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+	IPADBG("ring_base_pa = 0x%pa\n",
+			&ntn_info->ring_base_pa);
+	IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+	IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+	IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+	IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+	IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+	cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+	Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+	Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+	Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+	Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+	Ntn_params->num_buffers = ntn_info->num_buffers;
+	Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+	Ntn_params->data_buff_size = ntn_info->data_buff_size;
+	Ntn_params->ipa_pipe_number = ipa_ep_idx;
+	Ntn_params->dir = dir;
+
+	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result)
+		result = -EFAULT;
+
+	dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return result;
+}
+
+/**
+ * ipa2_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa2_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+	ipa_notify_cb notify, void *priv, u8 hdr_len,
+	struct ipa_ntn_conn_out_params *outp)
+{
+	int ipa_ep_idx_ul, ipa_ep_idx_dl;
+	struct ipa_ep_context *ep_ul, *ep_dl;
+	int result = 0;
+
+	if (in == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+	ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+	if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+		IPAERR("fail to alloc EP.\n");
+		return -EFAULT;
+	}
+
+	ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+	ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+	if (ep_ul->valid || ep_dl->valid) {
+		IPAERR("EP already allocated ul:%d dl:%d\n",
+			   ep_ul->valid, ep_dl->valid);
+		return -EFAULT;
+	}
+
+	memset(ep_ul, 0, offsetof(struct ipa_ep_context, sys));
+	memset(ep_dl, 0, offsetof(struct ipa_ep_context, sys));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* setup ul ep cfg */
+	ep_ul->valid = 1;
+	ep_ul->client = in->ul.client;
+	result = ipa_enable_data_path(ipa_ep_idx_ul);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_ul);
+		return -EFAULT;
+	}
+	ep_ul->client_notify = notify;
+	ep_ul->priv = priv;
+
+	memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+	ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+	ep_ul->cfg.hdr.hdr_len = hdr_len;
+	ep_ul->cfg.mode.mode = IPA_BASIC;
+
+	if (ipa2_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+		IPAERR("fail to setup ul pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+		IPAERR("fail to send cmd to uc for ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	ipa_install_dflt_flt_rules(ipa_ep_idx_ul);
+	outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+	ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+	IPAERR("client %d (ep: %d) connected\n", in->ul.client,
+		ipa_ep_idx_ul);
+
+	/* setup dl ep cfg */
+	ep_dl->valid = 1;
+	ep_dl->client = in->dl.client;
+	result = ipa_enable_data_path(ipa_ep_idx_dl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_dl);
+		result = -EFAULT;
+		goto fail;
+	}
+
+	memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+	ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+	ep_dl->cfg.hdr.hdr_len = hdr_len;
+	ep_dl->cfg.mode.mode = IPA_BASIC;
+
+	if (ipa2_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+		IPAERR("fail to setup dl pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa2_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+		IPAERR("fail to send cmd to uc for dl pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+	ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+	IPAERR("client %d (ep: %d) connected\n", in->dl.client,
+		ipa_ep_idx_dl);
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+/**
+ * ipa2_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa2_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+		int ipa_ep_idx_dl)
+{
+	struct ipa_mem_buffer cmd;
+	struct ipa_ep_context *ep_ul, *ep_dl;
+	struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+	union IpaHwNtnCommonChCmdData_t *tear;
+	int result = 0;
+
+	IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+	IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+	ep_ul = &ipa_ctx->ep[ipa_ep_idx_ul];
+	ep_dl = &ipa_ctx->ep[ipa_ep_idx_dl];
+
+	if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+		ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+		IPAERR("channel bad state: ul %d dl %d\n",
+			ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+		return -EFAULT;
+	}
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	/* teardown the UL pipe */
+	cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+	cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+	tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+	tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	ipa_disable_data_path(ipa_ep_idx_ul);
+	ipa_delete_dflt_flt_rules(ipa_ep_idx_ul);
+	memset(&ipa_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa_ep_context));
+	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+	/* teardown the DL pipe */
+	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	ipa_disable_data_path(ipa_ep_idx_dl);
+	memset(&ipa_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa_ep_context));
+	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+	dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
new file mode 100644
index 0000000..3bec471
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_offload_i.h
@@ -0,0 +1,514 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ *  @brief   Enum value determined based on the feature it
+ *           corresponds to
+ *  +----------------+----------------+
+ *  |    3 bits      |     5 bits     |
+ *  +----------------+----------------+
+ *  |   HW_FEATURE   |     OPCODE     |
+ *  +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa_hw_features - Values that represent the features supported in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa_hw_features {
+	IPA_HW_FEATURE_COMMON = 0x0,
+	IPA_HW_FEATURE_MHI    = 0x1,
+	IPA_HW_FEATURE_WDI    = 0x3,
+	IPA_HW_FEATURE_NTN    = 0x4,
+	IPA_HW_FEATURE_OFFLOAD = 0x5,
+	IPA_HW_FEATURE_MAX    = IPA_HW_NUM_FEATURES
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter. The parameter filed can hold 32 bits
+ *		of parameters (immediate parameters) and point on structure in
+ *		system memory (in such case the address must be accessible
+ *		for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold
+ *			32 bits of parameters (immediate parameters) and point
+ *			on structure in system memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32 bits
+ *			of parameters (immediate parameters) and point on
+ *			structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the error
+ *				type.
+ * @warningCounter : The warnings counter. The counter carries information
+ *			regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+	u8  cmdOp;
+	u8  reserved_01;
+	u16 reserved_03_02;
+	u32 cmdParams;
+	u8  responseOp;
+	u8  reserved_09;
+	u16 reserved_0B_0A;
+	u32 responseParams;
+	u8  eventOp;
+	u8  reserved_11;
+	u16 reserved_13_12;
+	u32 eventParams;
+	u32 reserved_1B_18;
+	u32 firstErrorAddress;
+	u8  hwState;
+	u8  warningCounter;
+	u16 reserved_23_22;
+	u16 interfaceVersionCommon;
+	u16 reserved_27_26;
+} __packed;
+
+/**
+ * union IpaHwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union IpaHwFeatureInfoData_t {
+	struct IpaHwFeatureInfoParams_t {
+		u32 offset:16;
+		u32 size:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * struct IpaHwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @IpaHwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note    Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct IpaHwEventInfoData_t {
+	u32 baseAddrOffset;
+	union IpaHwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note    The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+	u32 featureMask;
+	u32 circBuffBaseAddrOffset;
+	struct IpaHwEventInfoData_t statsInfo;
+	struct IpaHwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa_uc_ntn_ctx {
+	u32 ntn_uc_stats_ofst;
+	struct IpaHwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa_hw_2_cpu_ntn_events - Values that represent HW event
+ *			to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ *			detected an error in NTN
+ *
+ */
+enum ipa_hw_2_cpu_ntn_events {
+	IPA_HW_2_CPU_EVENT_NTN_ERROR =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_ntn_errors {
+	IPA_HW_NTN_ERROR_NONE    = 0,
+	IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ *			initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ *     Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_states {
+	IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_NTN_CHANNEL_STATE_RUNNING  = 2,
+	IPA_HW_NTN_CHANNEL_STATE_ERROR    = 3,
+	IPA_HW_NTN_CHANNEL_STATE_INVALID  = 0xFF
+};
+
+/**
+ * enum ipa_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ *		transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ *		num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ *		failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ *		transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa_hw_ntn_channel_errors {
+	IPA_HW_NTN_CH_ERR_NONE            = 0,
+	IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+	IPA_HW_NTN_TX_FSM_ERROR           = 2,
+	IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL  = 3,
+	IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+	IPA_HW_NTN_RX_FSM_ERROR           = 5,
+	IPA_HW_NTN_RX_CACHE_NON_EMPTY     = 6,
+	IPA_HW_NTN_CH_ERR_RESERVED        = 0xFF
+};
+
+
+/**
+ * struct IpaHwNtnSetUpCmdData_t  - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ *  ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ *  buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ *  Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ *  Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ *  DDR
+ */
+struct IpaHwNtnSetUpCmdData_t {
+	u32 ring_base_pa;
+	u32 buff_pool_base_pa;
+	u16 ntn_ring_size;
+	u16 num_buffers;
+	u32 ntn_reg_base_ptr_pa;
+	u8  ipa_pipe_number;
+	u8  dir;
+	u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct IpaHwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union IpaHwNtnCommonChCmdData_t {
+	struct IpaHwNtnCommonChCmdParams_t {
+		u32  ipa_pipe_number :8;
+		u32  reserved        :24;
+	} __packed params;
+	uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct IpaHwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (IPA_HW_NTN_ERRORS)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ *   Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ *		available)
+ */
+union IpaHwNTNErrorEventData_t {
+	struct IpaHwNTNErrorEventParams_t {
+		u32  ntn_error_type  :8;
+		u32  reserved        :8;
+		u32  ipa_pipe_number :8;
+		u32  ntn_ch_err_type :8;
+	} __packed params;
+	uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTNRxInfoData_t - NTN Structure holding the
+ * Rx pipe information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ *		Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ *		available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ *		Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ *   Interrupts handled by FW
+ */
+struct NTNRxInfoData_t {
+	u32  max_outstanding_pkts;
+	u32  num_pkts_processed;
+	u32  rx_ring_rp_value;
+	struct IpaHwRingStats_t rx_ind_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32  num_bam_int_handled;
+	u32  num_db;
+	u32  num_unexpected_db;
+	u32  num_pkts_in_dis_uninit_state;
+	u32  num_bam_int_handled_while_not_in_bam;
+	u32  num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ *			while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ *		Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+	u32  num_pkts_processed;
+	u32  tail_ptr_val;
+	u32  num_db_fired;
+	struct IpaHwRingStats_t tx_comp_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32  num_db;
+	u32  num_unexpected_db;
+	u32  num_bam_int_handled;
+	u32  num_bam_int_in_non_running_state;
+	u32  num_qmb_int_handled;
+	u32  num_bam_int_handled_while_wait_for_bam;
+	u32  num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct IpaHwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct IpaHwStatsNTNInfoData_t {
+	struct NTNRxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+	struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands -  Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ *				Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ *				Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+	IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+	IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is initialized
+ *		but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ *		SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use
+ *		in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa_hw_offload_channel_states {
+	IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING  = 2,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR    = 3,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID  = 0xFF
+};
+
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_offload_cmd_resp_status {
+	IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+	IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+	IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+	IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+	IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+	IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+	IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+	IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+	IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd  -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+	struct IpaHwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t  -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+	u8 protocol;
+	union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd  - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+	union IpaHwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+	u8 protocol;
+	union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
new file mode 100644
index 0000000..aca6d05
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -0,0 +1,1896 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+
+#define IPA_HOLB_TMR_DIS 0x0
+
+#define IPA_HW_INTERFACE_WDI_VERSION 0x0001
+#define IPA_HW_WDI_RX_MBOX_START_INDEX 48
+#define IPA_HW_WDI_TX_MBOX_START_INDEX 50
+#define IPA_WDI_RING_ALIGNMENT 8
+
+#define IPA_WDI_CONNECTED BIT(0)
+#define IPA_WDI_ENABLED BIT(1)
+#define IPA_WDI_RESUMED BIT(2)
+#define IPA_UC_POLL_SLEEP_USEC 100
+
+#define IPA_WDI_RX_RING_RES 0
+#define IPA_WDI_RX_RING_RP_RES 1
+#define IPA_WDI_RX_COMP_RING_RES 2
+#define IPA_WDI_RX_COMP_RING_WP_RES 3
+#define IPA_WDI_TX_RING_RES 4
+#define IPA_WDI_CE_RING_RES 5
+#define IPA_WDI_CE_DB_RES 6
+#define IPA_WDI_MAX_RES 7
+
+struct ipa_wdi_res {
+	struct ipa_wdi_buffer_info *res;
+	unsigned int nents;
+	bool valid;
+};
+
+static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES];
+
+static void ipa_uc_wdi_loaded_handler(void);
+
+/**
+ * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to
+ * CPU.
+ * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error
+ * in WDI
+ */
+enum ipa_hw_2_cpu_wdi_events {
+	IPA_HW_2_CPU_EVENT_WDI_ERROR =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+};
+
+/**
+ * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state
+ * machine.
+ * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but
+ * disabled
+ * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in
+ * suspended state
+ * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in
+ * operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_wdi_channel_states {
+	IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2,
+	IPA_HW_WDI_CHANNEL_STATE_RUNNING         = 3,
+	IPA_HW_WDI_CHANNEL_STATE_ERROR           = 4,
+	IPA_HW_WDI_CHANNEL_STATE_INVALID         = 0xFF
+};
+
+/**
+ * enum ipa_cpu_2_hw_commands -  Values that represent the WDI commands from CPU
+ * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path
+ * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel
+ * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_wdi_commands {
+	IPA_CPU_2_HW_CMD_WDI_TX_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_CPU_2_HW_CMD_WDI_RX_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_CPU_2_HW_CMD_WDI_CH_ENABLE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_CPU_2_HW_CMD_WDI_CH_DISABLE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_CPU_2_HW_CMD_WDI_CH_RESUME  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+};
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
+ * enum ipa_hw_wdi_errors - WDI specific error types.
+ * @IPA_HW_WDI_ERROR_NONE : No error persists
+ * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_wdi_errors {
+	IPA_HW_WDI_ERROR_NONE    = 0,
+	IPA_HW_WDI_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present
+ * in the event param.
+ * @IPA_HW_WDI_CH_ERR_NONE : No error persists
+ * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx
+ * Completion ring
+ * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition
+ * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring
+ * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use
+*/
+enum ipa_hw_wdi_ch_errors {
+	IPA_HW_WDI_CH_ERR_NONE                 = 0,
+	IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1,
+	IPA_HW_WDI_TX_FSM_ERROR                = 2,
+	IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL       = 3,
+	IPA_HW_WDI_CH_ERR_RESERVED             = 0xFF
+};
+
+/**
+ * struct IpaHwSharedMemWdiMapping_t  - Structure referring to the common and
+ * WDI section of 128B shared memory located in offset zero of SW Partition in
+ * IPA SRAM.
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemWdiMapping_t {
+	struct IpaHwSharedMemCommonMapping_t common;
+	u32 reserved_2B_28;
+	u32 reserved_2F_2C;
+	u32 reserved_33_30;
+	u32 reserved_37_34;
+	u32 reserved_3B_38;
+	u32 reserved_3F_3C;
+	u16 interfaceVersionWdi;
+	u16 reserved_43_42;
+	u8  wdi_tx_ch_0_state;
+	u8  wdi_rx_ch_0_state;
+	u16 reserved_47_46;
+} __packed;
+
+/**
+ * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command.
+ * @comp_ring_base_pa : This is the physical address of the base of the Tx
+ * completion ring
+ * @comp_ring_size : This is the size of the Tx completion ring
+ * @reserved_comp_ring : Reserved field for expansion of Completion ring params
+ * @ce_ring_base_pa : This is the physical address of the base of the Copy
+ * Engine Source Ring
+ * @ce_ring_size : Copy Engine Ring size
+ * @reserved_ce_ring : Reserved field for expansion of CE ring params
+ * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the
+ * IPA uC has to write into to trigger the copy engine
+ * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring
+ * and the Tx completion ring has to be atleast ( num_tx_buffers + 1)
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Tx path
+ * @reserved : Reserved field
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+ */
+struct IpaHwWdiTxSetUpCmdData_t {
+	u32 comp_ring_base_pa;
+	u16 comp_ring_size;
+	u16 reserved_comp_ring;
+	u32 ce_ring_base_pa;
+	u16 ce_ring_size;
+	u16 reserved_ce_ring;
+	u32 ce_ring_doorbell_pa;
+	u16 num_tx_buffers;
+	u8  ipa_pipe_number;
+	u8  reserved;
+} __packed;
+
+struct IpaHwWdi2TxSetUpCmdData_t {
+	u32 comp_ring_base_pa;
+	u32 comp_ring_base_pa_hi;
+	u16 comp_ring_size;
+	u16 reserved_comp_ring;
+	u32 ce_ring_base_pa;
+	u32 ce_ring_base_pa_hi;
+	u16 ce_ring_size;
+	u16 reserved_ce_ring;
+	u32 ce_ring_doorbell_pa;
+	u32 ce_ring_doorbell_pa_hi;
+	u16 num_tx_buffers;
+	u8  ipa_pipe_number;
+	u8  reserved;
+} __packed;
+/**
+ * struct IpaHwWdiRxSetUpCmdData_t -  Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command.
+ * @rx_ring_base_pa : This is the physical address of the base of the Rx ring
+ * (containing Rx buffers)
+ * @rx_ring_size : This is the size of the Rx ring
+ * @rx_ring_rp_pa : This is the physical address of the location through which
+ * IPA uc is expected to communicate about the Read pointer into the Rx Ring
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Rx path
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+*/
+struct IpaHwWdiRxSetUpCmdData_t {
+	u32 rx_ring_base_pa;
+	u32 rx_ring_size;
+	u32 rx_ring_rp_pa;
+	u8  ipa_pipe_number;
+} __packed;
+
+struct IpaHwWdi2RxSetUpCmdData_t {
+	u32 rx_ring_base_pa;
+	u32 rx_ring_base_pa_hi;
+	u32 rx_ring_size;
+	u32 rx_ring_rp_pa;
+	u32 rx_ring_rp_pa_hi;
+	u32 rx_comp_ring_base_pa;
+	u32 rx_comp_ring_base_pa_hi;
+	u32 rx_comp_ring_size;
+	u32 rx_comp_ring_wp_pa;
+	u32 rx_comp_ring_wp_pa_hi;
+	u8  ipa_pipe_number;
+} __packed;
+/**
+ * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command.
+ * @ipa_pipe_number : The IPA pipe number for which this config is passed
+ * @qmap_id : QMAP ID to be set in the metadata register
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+*/
+union IpaHwWdiRxExtCfgCmdData_t {
+	struct IpaHwWdiRxExtCfgCmdParams_t {
+		u32 ipa_pipe_number:8;
+		u32 qmap_id:8;
+		u32 reserved:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiCommonChCmdData_t -  Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+ * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command.
+ * @ipa_pipe_number :  The IPA pipe number. This could be Tx or an Rx pipe
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiCommonChCmdData_t {
+	struct IpaHwWdiCommonChCmdParams_t {
+		u32 ipa_pipe_number:8;
+		u32 reserved:24;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR
+ * event.
+ * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or
+ * an Rx pipe
+ * @reserved : Reserved
+ * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable
+ * only if error type indicates channel error
+ * @wdi_ch_err_type : Information about the channel error (if available)
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiErrorEventData_t {
+	struct IpaHwWdiErrorEventParams_t {
+		u32 wdi_error_type:8;
+		u32 reserved:8;
+		u32 ipa_pipe_number:8;
+		u32 wdi_ch_err_type:8;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+static void ipa_uc_wdi_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_WDI)) == 0) {
+		IPAERR("WDI feature missing 0x%x\n",
+			uc_event_top_mmio->featureMask);
+		return;
+	}
+
+	if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].
+		params.size != sizeof(struct IpaHwStatsWDIInfoData_t)) {
+		IPAERR("wdi stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct IpaHwStatsWDIInfoData_t),
+			uc_event_top_mmio->statsInfo.
+			featureInfo[IPA_HW_FEATURE_WDI].params.size);
+		return;
+	}
+
+	ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = uc_event_top_mmio->
+		statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+		featureInfo[IPA_HW_FEATURE_WDI].params.offset;
+	IPAERR("WDI stats ofst=0x%x\n", ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+	if (ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
+		sizeof(struct IpaHwStatsWDIInfoData_t) >=
+		ipa_ctx->ctrl->ipa_reg_base_ofst +
+		IPA_SRAM_DIRECT_ACCESS_N_OFST_v2_0(0) +
+		ipa_ctx->smem_sz) {
+		IPAERR("uc_wdi_stats 0x%x outside SRAM\n",
+			ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+		return;
+	}
+
+	ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio =
+		ioremap(ipa_ctx->ipa_wrapper_base +
+		ipa_ctx->uc_wdi_ctx.wdi_uc_stats_ofst,
+		sizeof(struct IpaHwStatsWDIInfoData_t));
+	if (!ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc wdi stats\n");
+		return;
+	}
+}
+
+static void ipa_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t
+				     *uc_sram_mmio)
+
+{
+	union IpaHwWdiErrorEventData_t wdi_evt;
+	struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext;
+
+	if (uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_WDI_ERROR) {
+		wdi_evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n",
+			wdi_evt.params.wdi_error_type,
+			wdi_evt.params.ipa_pipe_number,
+			wdi_evt.params.wdi_ch_err_type);
+		wdi_sram_mmio_ext =
+			(struct IpaHwSharedMemWdiMapping_t *)
+			uc_sram_mmio;
+		IPADBG("tx_ch_state=%u rx_ch_state=%u\n",
+			wdi_sram_mmio_ext->wdi_tx_ch_0_state,
+			wdi_sram_mmio_ext->wdi_rx_ch_0_state);
+	}
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa2_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats.y = \
+	ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y
+#define RX_STATS(y) stats->rx_ch_stats.y = \
+	ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!stats || !ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+		IPAERR("bad parms stats=%p wdi_stats=%p\n",
+			stats,
+			ipa_ctx->uc_wdi_ctx.wdi_uc_stats_mmio);
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	TX_STATS(num_pkts_processed);
+	TX_STATS(copy_engine_doorbell_value);
+	TX_STATS(num_db_fired);
+	TX_STATS(tx_comp_ring_stats.ringFull);
+	TX_STATS(tx_comp_ring_stats.ringEmpty);
+	TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+	TX_STATS(tx_comp_ring_stats.ringUsageLow);
+	TX_STATS(tx_comp_ring_stats.RingUtilCount);
+	TX_STATS(bam_stats.bamFifoFull);
+	TX_STATS(bam_stats.bamFifoEmpty);
+	TX_STATS(bam_stats.bamFifoUsageHigh);
+	TX_STATS(bam_stats.bamFifoUsageLow);
+	TX_STATS(bam_stats.bamUtilCount);
+	TX_STATS(num_db);
+	TX_STATS(num_unexpected_db);
+	TX_STATS(num_bam_int_handled);
+	TX_STATS(num_bam_int_in_non_running_state);
+	TX_STATS(num_qmb_int_handled);
+	TX_STATS(num_bam_int_handled_while_wait_for_bam);
+
+	RX_STATS(max_outstanding_pkts);
+	RX_STATS(num_pkts_processed);
+	RX_STATS(rx_ring_rp_value);
+	RX_STATS(rx_ind_ring_stats.ringFull);
+	RX_STATS(rx_ind_ring_stats.ringEmpty);
+	RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+	RX_STATS(rx_ind_ring_stats.ringUsageLow);
+	RX_STATS(rx_ind_ring_stats.RingUtilCount);
+	RX_STATS(bam_stats.bamFifoFull);
+	RX_STATS(bam_stats.bamFifoEmpty);
+	RX_STATS(bam_stats.bamFifoUsageHigh);
+	RX_STATS(bam_stats.bamFifoUsageLow);
+	RX_STATS(bam_stats.bamUtilCount);
+	RX_STATS(num_bam_int_handled);
+	RX_STATS(num_db);
+	RX_STATS(num_unexpected_db);
+	RX_STATS(num_pkts_in_dis_uninit_state);
+	RX_STATS(reserved1);
+	RX_STATS(reserved2);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+int ipa_wdi_init(void)
+{
+	struct ipa_uc_hdlrs uc_wdi_cbs = { 0 };
+
+	uc_wdi_cbs.ipa_uc_event_hdlr = ipa_uc_wdi_event_handler;
+	uc_wdi_cbs.ipa_uc_event_log_info_hdlr =
+		ipa_uc_wdi_event_log_info_handler;
+	uc_wdi_cbs.ipa_uc_loaded_hdlr =
+		ipa_uc_wdi_loaded_handler;
+
+	ipa_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs);
+
+	return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
+		bool device, unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+			PAGE_SIZE);
+	int ret;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	ret = ipa_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+			true_len,
+			device ? (prot | IOMMU_DEVICE) : prot);
+	if (ret) {
+		IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+		return -EINVAL;
+	}
+
+	ipa_ctx->wdi_map_cnt++;
+	cb->next_addr = va + true_len;
+	*iova = va + pa - rounddown(pa, PAGE_SIZE);
+	return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
+		unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	int ret;
+	int i;
+	struct scatterlist *sg;
+	unsigned long start_iova = va;
+	phys_addr_t phys;
+	size_t len;
+	int count = 0;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return -EINVAL;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		phys = page_to_phys(sg_page(sg));
+		len = PAGE_ALIGN(sg->offset + sg->length);
+
+		ret = ipa_iommu_map(cb->mapping->domain, va, phys, len, prot);
+		if (ret) {
+			IPAERR("iommu map failed for pa=%pa len=%zu\n",
+					&phys, len);
+			goto bad_mapping;
+		}
+		va += len;
+		ipa_ctx->wdi_map_cnt++;
+		count++;
+	}
+	cb->next_addr = va;
+	*iova = start_iova;
+
+	return 0;
+
+bad_mapping:
+	for_each_sg(sgt->sgl, sg, count, i)
+		iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
+				sg_dma_len(sg));
+	return -EINVAL;
+}
+
+static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
+	int i;
+	int j;
+	int start;
+	int end;
+
+	if (IPA_CLIENT_IS_CONS(client)) {
+		start = IPA_WDI_TX_RING_RES;
+		end = IPA_WDI_CE_DB_RES;
+	} else {
+		start = IPA_WDI_RX_RING_RES;
+		if (ipa_ctx->ipa_wdi2)
+			end = IPA_WDI_RX_COMP_RING_WP_RES;
+		else
+			end = IPA_WDI_RX_RING_RP_RES;
+	}
+
+	for (i = start; i <= end; i++) {
+		if (wdi_res[i].valid) {
+			for (j = 0; j < wdi_res[i].nents; j++) {
+				iommu_unmap(cb->mapping->domain,
+					wdi_res[i].res[j].iova,
+					wdi_res[i].res[j].size);
+				ipa_ctx->wdi_map_cnt--;
+			}
+			kfree(wdi_res[i].res);
+			wdi_res[i].valid = false;
+		}
+	}
+
+	if (ipa_ctx->wdi_map_cnt == 0)
+		cb->next_addr = cb->va_end;
+
+}
+
+static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa,
+		unsigned long iova, size_t len)
+{
+	IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&pa, iova, len);
+	wdi_res[res_idx].res = kzalloc(sizeof(struct ipa_wdi_res), GFP_KERNEL);
+	if (!wdi_res[res_idx].res)
+		BUG();
+	wdi_res[res_idx].nents = 1;
+	wdi_res[res_idx].valid = true;
+	wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE);
+	wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE);
+	wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa,
+				PAGE_SIZE), PAGE_SIZE);
+	IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova,
+			wdi_res[res_idx].res->size);
+}
+
+static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt,
+		unsigned long iova)
+{
+	int i;
+	struct scatterlist *sg;
+	unsigned long curr_iova = iova;
+
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return;
+	}
+
+	wdi_res[res_idx].res = kcalloc(sgt->nents, sizeof(struct ipa_wdi_res),
+			GFP_KERNEL);
+	if (!wdi_res[res_idx].res)
+		BUG();
+	wdi_res[res_idx].nents = sgt->nents;
+	wdi_res[res_idx].valid = true;
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		wdi_res[res_idx].res[i].pa = page_to_phys(sg_page(sg));
+		wdi_res[res_idx].res[i].iova = curr_iova;
+		wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset +
+				sg->length);
+		IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&wdi_res[res_idx].res[i].pa,
+			wdi_res[res_idx].res[i].iova,
+			wdi_res[res_idx].res[i].size);
+		curr_iova += wdi_res[res_idx].res[i].size;
+	}
+}
+
+static int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en,
+		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+		unsigned long *iova)
+{
+	/* support for SMMU on WLAN but no SMMU on IPA */
+	if (wlan_smmu_en && ipa_ctx->smmu_s1_bypass) {
+		IPAERR("Unsupported SMMU pairing\n");
+		return -EINVAL;
+	}
+
+	/* legacy: no SMMUs on either end */
+	if (!wlan_smmu_en && ipa_ctx->smmu_s1_bypass) {
+		*iova = pa;
+		return 0;
+	}
+
+	/* no SMMU on WLAN but SMMU on IPA */
+	if (!wlan_smmu_en && !ipa_ctx->smmu_s1_bypass) {
+		if (ipa_create_uc_smmu_mapping_pa(pa, len,
+			(res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) {
+			IPAERR("Fail to create mapping res %d\n", res_idx);
+			return -EFAULT;
+		}
+		ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+		return 0;
+	}
+
+	/* SMMU on WLAN and SMMU on IPA */
+	if (wlan_smmu_en && !ipa_ctx->smmu_s1_bypass) {
+		switch (res_idx) {
+		case IPA_WDI_RX_RING_RP_RES:
+		case IPA_WDI_CE_DB_RES:
+			if (ipa_create_uc_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+				iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+			break;
+		case IPA_WDI_RX_RING_RES:
+		case IPA_WDI_TX_RING_RES:
+		case IPA_WDI_CE_RING_RES:
+			if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
+			break;
+		default:
+			BUG();
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa2_connect_wdi_pipe() - WDI client connect
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa_ep_context *ep;
+	struct ipa_mem_buffer cmd;
+	struct IpaHwWdiTxSetUpCmdData_t *tx;
+	struct IpaHwWdiRxSetUpCmdData_t *rx;
+	struct IpaHwWdi2TxSetUpCmdData_t *tx_2;
+	struct IpaHwWdi2RxSetUpCmdData_t *rx_2;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	unsigned long va;
+	phys_addr_t pa;
+	u32 len;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm. in=%p out=%p\n", in, out);
+		if (in)
+			IPAERR("client = %d\n", in->sys.client);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT ||
+			in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+			IPAERR("alignment failure on TX\n");
+			return -EINVAL;
+		}
+	} else {
+		if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+			IPAERR("alignment failure on RX\n");
+			return -EINVAL;
+		}
+	}
+
+	result = ipa2_uc_state_check();
+	if (result)
+		return result;
+
+	ipa_ep_idx = ipa2_get_ep_mapping(in->sys.client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+	IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (ipa_ctx->ipa_wdi2)
+			cmd.size = sizeof(*tx_2);
+		else
+			cmd.size = sizeof(*tx);
+		IPADBG("comp_ring_base_pa=0x%pa\n",
+				&in->u.dl.comp_ring_base_pa);
+		IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
+		IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa);
+		IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
+		IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+				&in->u.dl.ce_door_bell_pa);
+		IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+	} else {
+		if (ipa_ctx->ipa_wdi2) {
+			/* WDI2.0 feature */
+			cmd.size = sizeof(*rx_2);
+			IPADBG("rdy_ring_rp value =%d\n",
+				*in->u.ul.rdy_ring_rp_va);
+			IPADBG("rx_comp_ring_wp value=%d\n",
+				*in->u.ul.rdy_comp_ring_wp_va);
+			ipa_ctx->uc_ctx.rdy_ring_rp_va =
+				in->u.ul.rdy_ring_rp_va;
+			ipa_ctx->uc_ctx.rdy_comp_ring_wp_va =
+				in->u.ul.rdy_comp_ring_wp_va;
+		} else {
+			cmd.size = sizeof(*rx);
+		}
+		IPADBG("rx_ring_base_pa=0x%pa\n",
+			&in->u.ul.rdy_ring_base_pa);
+		IPADBG("rx_ring_size=%d\n",
+			in->u.ul.rdy_ring_size);
+		IPADBG("rx_ring_rp_pa=0x%pa\n",
+			&in->u.ul.rdy_ring_rp_pa);
+
+		IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+			&in->u.ul.rdy_comp_ring_base_pa);
+		IPADBG("rx_comp_ring_size=%d\n",
+			in->u.ul.rdy_comp_ring_size);
+		IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+			&in->u.ul.rdy_comp_ring_wp_pa);
+
+		ipa_ctx->uc_ctx.rdy_ring_base_pa =
+			in->u.ul.rdy_ring_base_pa;
+		ipa_ctx->uc_ctx.rdy_ring_rp_pa =
+			in->u.ul.rdy_ring_rp_pa;
+		ipa_ctx->uc_ctx.rdy_ring_size =
+			in->u.ul.rdy_ring_size;
+		ipa_ctx->uc_ctx.rdy_comp_ring_base_pa =
+			in->u.ul.rdy_comp_ring_base_pa;
+		ipa_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+			in->u.ul.rdy_comp_ring_wp_pa;
+		ipa_ctx->uc_ctx.rdy_comp_ring_size =
+			in->u.ul.rdy_comp_ring_size;
+
+		/* check if the VA is empty */
+		if (!in->u.ul.rdy_ring_rp_va && ipa_ctx->ipa_wdi2) {
+			IPAERR("rdy_ring_rp_va is empty, wdi2.0(%d)\n",
+				ipa_ctx->ipa_wdi2);
+				goto dma_alloc_fail;
+		}
+		if (!in->u.ul.rdy_comp_ring_wp_va && ipa_ctx->ipa_wdi2) {
+			IPAERR("comp_ring_wp_va is empty, wdi2.0(%d)\n",
+				ipa_ctx->ipa_wdi2);
+				goto dma_alloc_fail;
+		}
+	}
+
+	cmd.base = dma_alloc_coherent(ipa_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		result = -ENOMEM;
+		goto dma_alloc_fail;
+	}
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (ipa_ctx->ipa_wdi2) {
+			tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+			IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.dl_smmu.comp_ring_size,
+				in->u.dl.comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+					in->smmu_enabled,
+					in->u.dl.comp_ring_base_pa,
+					&in->u.dl_smmu.comp_ring,
+					len,
+					false,
+					&va)) {
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->comp_ring_size = len;
+			IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->comp_ring_base_pa_hi,
+					tx_2->comp_ring_base_pa);
+
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->ce_ring_size = len;
+			IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_base_pa_hi,
+					tx_2->ce_ring_base_pa);
+
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_doorbell_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_doorbell_pa_hi,
+					tx_2->ce_ring_doorbell_pa);
+
+			tx_2->num_tx_buffers = in->u.dl.num_tx_buffers;
+			tx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+				IPADBG("TX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.comp_ring_size,
+					in->u.dl.comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.comp_ring_base_pa,
+						&in->u.dl_smmu.comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->comp_ring_base_pa = va;
+			tx->comp_ring_size = len;
+
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->ce_ring_base_pa = va;
+			tx->ce_ring_size = len;
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->ce_ring_doorbell_pa = va;
+			tx->num_tx_buffers = in->u.dl.num_tx_buffers;
+			tx->ipa_pipe_number = ipa_ep_idx;
+		}
+
+		if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+			out->uc_door_bell_pa =
+				ipa_ctx->ipa_wrapper_base +
+				IPA_REG_BASE_OFST_v2_5 +
+				IPA_UC_MAILBOX_m_n_OFFS_v2_5(
+				IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+		} else {
+			out->uc_door_bell_pa =
+				ipa_ctx->ipa_wrapper_base +
+				IPA_REG_BASE_OFST_v2_0 +
+				IPA_UC_MAILBOX_m_n_OFFS(
+				IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+		}
+	} else {
+		if (ipa_ctx->ipa_wdi2) {
+			rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.ul_smmu.rdy_ring_size,
+				in->u.ul.rdy_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_ring_size = len;
+			IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_ring_base_pa_hi,
+					rx_2->rx_ring_base_pa);
+
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_rp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n",
+					rx_2->rx_ring_rp_pa_hi,
+					rx_2->rx_ring_rp_pa);
+			len = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_size :
+				in->u.ul.rdy_comp_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.ul_smmu.rdy_comp_ring_size,
+					in->u.ul.rdy_comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_COMP_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_comp_ring_base_pa,
+						&in->u.ul_smmu.rdy_comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_comp_ring_size = len;
+			IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_base_pa_hi,
+					rx_2->rx_comp_ring_base_pa);
+
+			pa = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_wp_pa :
+				in->u.ul.rdy_comp_ring_wp_pa;
+			if (ipa_create_uc_smmu_mapping(
+						IPA_WDI_RX_COMP_RING_WP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_rng WP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_wp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_wp_pa_hi,
+					rx_2->rx_comp_ring_wp_pa);
+			rx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+				IPADBG("RX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.ul_smmu.rdy_ring_size,
+					in->u.ul.rdy_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_base_pa = va;
+			rx->rx_ring_size = len;
+
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_rp_pa = va;
+			rx->ipa_pipe_number = ipa_ep_idx;
+		}
+
+		if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+			out->uc_door_bell_pa =
+				ipa_ctx->ipa_wrapper_base +
+				IPA_REG_BASE_OFST_v2_5 +
+				IPA_UC_MAILBOX_m_n_OFFS_v2_5(
+				IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+		} else {
+			out->uc_door_bell_pa =
+				ipa_ctx->ipa_wrapper_base +
+				IPA_REG_BASE_OFST_v2_0 +
+				IPA_UC_MAILBOX_m_n_OFFS(
+				IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+		}
+	}
+
+	ep->valid = 1;
+	ep->client = in->sys.client;
+	ep->keep_ipa_awake = in->sys.keep_ipa_awake;
+	result = ipa_disable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx);
+		goto uc_timeout;
+	}
+	if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa2_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+	}
+
+	result = ipa_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CLIENT_IS_CONS(in->sys.client) ?
+				IPA_CPU_2_HW_CMD_WDI_TX_SET_UP :
+				IPA_CPU_2_HW_CMD_WDI_RX_SET_UP,
+				IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	ep->skip_ep_cfg = in->sys.skip_ep_cfg;
+	ep->client_notify = in->sys.notify;
+	ep->priv = in->sys.priv;
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa2_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	out->clnt_hdl = ipa_ep_idx;
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
+		ipa_install_dflt_flt_rules(ipa_ep_idx);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+	dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	ep->uc_offload_state |= IPA_WDI_CONNECTED;
+	IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
+
+	return 0;
+
+ipa_cfg_ep_fail:
+	memset(&ipa_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa_ep_context));
+uc_timeout:
+	ipa_release_uc_smmu_mappings(in->sys.client);
+	dma_free_coherent(ipa_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+dma_alloc_fail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+fail:
+	return result;
+}
+
+
+/**
+ * ipa2_disconnect_wdi_pipe() - WDI client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t tear;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa2_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	tear.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa_uc_send_cmd(tear.raw32b,
+				IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+				IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	ipa_delete_dflt_flt_rules(clnt_hdl);
+	ipa_release_uc_smmu_mappings(ep->client);
+
+	memset(&ipa_ctx->ep[clnt_hdl], 0, sizeof(struct ipa_ep_context));
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa2_enable_wdi_pipe() - WDI client enable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_enable_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t enable;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa2_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+	enable.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa_uc_send_cmd(enable.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_DIS;
+		holb_cfg.tmr_val = 0;
+		result = ipa2_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state |= IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) enabled\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa2_disable_wdi_pipe() - WDI client disable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_disable_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t disable;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	u32 prod_hdl;
+	int i;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa2_uc_state_check();
+	if (result)
+		return result;
+
+	/* checking rdy_ring_rp_pa matches the rdy_comp_ring_wp_pa on WDI2.0 */
+	if (ipa_ctx->ipa_wdi2) {
+		for (i = 0; i < IPA_UC_FINISH_MAX; i++) {
+			IPADBG("(%d) rp_value(%u), comp_wp_value(%u)\n",
+					i,
+					*ipa_ctx->uc_ctx.rdy_ring_rp_va,
+					*ipa_ctx->uc_ctx.rdy_comp_ring_wp_va);
+			if (*ipa_ctx->uc_ctx.rdy_ring_rp_va !=
+				*ipa_ctx->uc_ctx.rdy_comp_ring_wp_va) {
+				usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+					IPA_UC_WAII_MAX_SLEEP);
+			} else {
+				break;
+			}
+		}
+		/* In case ipa_uc still haven't processed all
+		 * pending descriptors, we have to assert
+		 */
+		if (i == IPA_UC_FINISH_MAX)
+			ipa_assert();
+	}
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	result = ipa_disable_data_path(clnt_hdl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			clnt_hdl);
+		result = -EPERM;
+		goto uc_timeout;
+	}
+
+	/**
+	 * To avoid data stall during continuous SAP on/off before
+	 * setting delay to IPA Consumer pipe, remove delay and enable
+	 * holb on IPA Producer pipe
+	 */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+
+		prod_hdl = ipa2_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+		if (ipa_ctx->ep[prod_hdl].valid == 1) {
+			result = ipa_disable_data_path(prod_hdl);
+			if (result) {
+				IPAERR("disable data path failed\n");
+				IPAERR("res=%d clnt=%d\n",
+					result, prod_hdl);
+				result = -EPERM;
+				goto uc_timeout;
+			}
+		}
+		usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC,
+			IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC);
+	}
+
+	disable.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa_uc_send_cmd(disable.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	/* Set the delay after disabling IPA Producer pipe */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state &= ~IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa2_resume_wdi_pipe() - WDI client resume
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_resume_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t resume;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa2_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+	resume.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa_uc_send_cmd(resume.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_RESUME,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	if (result)
+		IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
+				clnt_hdl, result);
+	else
+		IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
+
+	ep->uc_offload_state |= IPA_WDI_RESUMED;
+	IPADBG("client (ep: %d) resumed\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa2_suspend_wdi_pipe() - WDI client suspend
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t suspend;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa2_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+				IPA_WDI_RESUMED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	suspend.params.ipa_pipe_number = clnt_hdl;
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Post suspend event first for IPA Producer\n");
+		IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
+		result = ipa_uc_send_cmd(suspend.raw32b,
+			IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+			IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+			false, 10*HZ);
+
+		if (result) {
+			result = -EFAULT;
+			goto uc_timeout;
+		}
+	}
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		ep_cfg_ctrl.ipa_ep_suspend = true;
+		result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed to suspend result=%d\n",
+					clnt_hdl, result);
+		else
+			IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+	} else {
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		result = ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed to delay result=%d\n",
+					clnt_hdl, result);
+		else
+			IPADBG("client (ep: %d) delayed\n", clnt_hdl);
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		result = ipa_uc_send_cmd(suspend.raw32b,
+			IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+			IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+			false, 10*HZ);
+
+		if (result) {
+			result = -EFAULT;
+			goto uc_timeout;
+		}
+	}
+
+	ipa_ctx->tag_process_before_gating = true;
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state &= ~IPA_WDI_RESUMED;
+	IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+int ipa_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
+{
+	int result = 0;
+	struct ipa_ep_context *ep;
+	union IpaHwWdiRxExtCfgCmdData_t qmap;
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa2_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+	qmap.params.ipa_pipe_number = clnt_hdl;
+	qmap.params.qmap_id = qmap_id;
+
+	result = ipa_uc_send_cmd(qmap.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa2_uc_reg_rdyCB() - To register uC
+ * ready CB if uC not ready
+ * @inout:	[in/out] input/output parameters
+ * from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa2_uc_reg_rdyCB(
+	struct ipa_wdi_uc_ready_params *inout)
+{
+	int result = 0;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (inout == NULL) {
+		IPAERR("bad parm. inout=%p ", inout);
+		return -EINVAL;
+	}
+
+	result = ipa2_uc_state_check();
+	if (result) {
+		inout->is_uC_ready = false;
+		ipa_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify;
+		ipa_ctx->uc_wdi_ctx.priv = inout->priv;
+	} else {
+		inout->is_uC_ready = true;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa2_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa2_uc_dereg_rdyCB(void)
+{
+	ipa_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
+	ipa_ctx->uc_wdi_ctx.priv = NULL;
+
+	return 0;
+}
+
+
+/**
+ * ipa2_uc_wdi_get_dbpa() - To retrieve
+ * doorbell physical address of wlan pipes
+ * @param:  [in/out] input/output parameters
+ *          from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa2_uc_wdi_get_dbpa(
+	struct ipa_wdi_db_params *param)
+{
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (param == NULL || param->client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm. param=%p ", param);
+		if (param)
+			IPAERR("client = %d\n", param->client);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(param->client)) {
+		if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+			param->uc_door_bell_pa =
+				ipa_ctx->ipa_wrapper_base +
+				IPA_REG_BASE_OFST_v2_5 +
+				IPA_UC_MAILBOX_m_n_OFFS_v2_5(
+				IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+		} else {
+			param->uc_door_bell_pa =
+				ipa_ctx->ipa_wrapper_base +
+				IPA_REG_BASE_OFST_v2_0 +
+				IPA_UC_MAILBOX_m_n_OFFS(
+				IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+		}
+	} else {
+		if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+			param->uc_door_bell_pa =
+				ipa_ctx->ipa_wrapper_base +
+				IPA_REG_BASE_OFST_v2_5 +
+				IPA_UC_MAILBOX_m_n_OFFS_v2_5(
+				IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+		} else {
+			param->uc_door_bell_pa =
+				ipa_ctx->ipa_wrapper_base +
+				IPA_REG_BASE_OFST_v2_0 +
+				IPA_UC_MAILBOX_m_n_OFFS(
+				IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+		}
+	}
+
+	return 0;
+}
+
+static void ipa_uc_wdi_loaded_handler(void)
+{
+	if (!ipa_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return;
+	}
+
+	if (ipa_ctx->uc_wdi_ctx.uc_ready_cb) {
+		ipa_ctx->uc_wdi_ctx.uc_ready_cb(
+			ipa_ctx->uc_wdi_ctx.priv);
+
+		ipa_ctx->uc_wdi_ctx.uc_ready_cb =
+			NULL;
+		ipa_ctx->uc_wdi_ctx.priv = NULL;
+	}
+}
+
+int ipa2_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
+	int i;
+	int ret = 0;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+
+	if (!info) {
+		IPAERR("info = %p\n", info);
+		return -EINVAL;
+	}
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_buffers; i++) {
+		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+			&info[i].pa, info[i].iova, info[i].size);
+		info[i].result = ipa_iommu_map(cb->iommu,
+			rounddown(info[i].iova, PAGE_SIZE),
+			rounddown(info[i].pa, PAGE_SIZE),
+			roundup(info[i].size + info[i].pa -
+				rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE),
+			prot);
+	}
+
+	return ret;
+}
+
+int ipa2_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
+	int i;
+	int ret = 0;
+
+	if (!info) {
+		IPAERR("info = %p\n", info);
+		return -EINVAL;
+	}
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_buffers; i++) {
+		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+			&info[i].pa, info[i].iova, info[i].size);
+		info[i].result = iommu_unmap(cb->iommu,
+			rounddown(info[i].iova, PAGE_SIZE),
+			roundup(info[i].size + info[i].pa -
+				rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE));
+	}
+
+	return ret;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
new file mode 100644
index 0000000..f399bae
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -0,0 +1,5200 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h>	/* gen_pool_alloc() */
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_V1_CLK_RATE (92.31 * 1000 * 1000UL)
+#define IPA_V1_1_CLK_RATE (100 * 1000 * 1000UL)
+#define IPA_V2_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
+#define IPA_V2_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
+#define IPA_V2_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
+#define IPA_V1_MAX_HOLB_TMR_VAL (512 - 1)
+#define IPA_V2_0_MAX_HOLB_TMR_VAL (65536 - 1)
+#define IPA_V2_5_MAX_HOLB_TMR_VAL (4294967296 - 1)
+#define IPA_V2_6L_MAX_HOLB_TMR_VAL IPA_V2_5_MAX_HOLB_TMR_VAL
+
+#define IPA_V2_0_BW_THRESHOLD_TURBO_MBPS (1000)
+#define IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS (600)
+
+/* Max pipes + ICs for TAG process */
+#define IPA_TAG_MAX_DESC (IPA_MAX_NUM_PIPES + 6)
+
+#define IPA_TAG_SLEEP_MIN_USEC (1000)
+#define IPA_TAG_SLEEP_MAX_USEC (2000)
+#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
+#define IPA_BCR_REG_VAL (0x001FFF7F)
+#define IPA_AGGR_GRAN_MIN (1)
+#define IPA_AGGR_GRAN_MAX (32)
+#define IPA_EOT_COAL_GRAN_MIN (1)
+#define IPA_EOT_COAL_GRAN_MAX (16)
+#define MSEC 1000
+#define MIN_RX_POLL_TIME 1
+#define MAX_RX_POLL_TIME 5
+#define UPPER_CUTOFF 50
+#define LOWER_CUTOFF 10
+
+#define IPA_DEFAULT_SYS_YELLOW_WM 32
+
+#define IPA_AGGR_BYTE_LIMIT (\
+		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
+		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
+#define IPA_AGGR_PKT_LIMIT (\
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
+
+static const int ipa_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+					IPA_OFFSET_MEQ32_1, -1 };
+static const int ipa_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+					IPA_OFFSET_MEQ128_1, -1 };
+static const int ipa_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+					IPA_IHL_OFFSET_RANGE16_1, -1 };
+static const int ipa_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+					IPA_IHL_OFFSET_MEQ32_1, -1 };
+#define IPA_1_1 (0)
+#define IPA_2_0 (1)
+#define IPA_2_6L (2)
+
+#define INVALID_EP_MAPPING_INDEX (-1)
+
+static const int ep_mapping[3][IPA_CLIENT_MAX] = {
+	[IPA_1_1][IPA_CLIENT_HSIC1_PROD]         = 19,
+	[IPA_1_1][IPA_CLIENT_WLAN1_PROD]         = -1,
+	[IPA_1_1][IPA_CLIENT_HSIC2_PROD]         = 12,
+	[IPA_1_1][IPA_CLIENT_USB2_PROD]          = 12,
+	[IPA_1_1][IPA_CLIENT_HSIC3_PROD]         = 13,
+	[IPA_1_1][IPA_CLIENT_USB3_PROD]          = 13,
+	[IPA_1_1][IPA_CLIENT_HSIC4_PROD]         =  0,
+	[IPA_1_1][IPA_CLIENT_USB4_PROD]          =  0,
+	[IPA_1_1][IPA_CLIENT_HSIC5_PROD]         = -1,
+	[IPA_1_1][IPA_CLIENT_USB_PROD]           = 11,
+	[IPA_1_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = 15,
+	[IPA_1_1][IPA_CLIENT_A2_EMBEDDED_PROD]   =  8,
+	[IPA_1_1][IPA_CLIENT_A2_TETHERED_PROD]   =  6,
+	[IPA_1_1][IPA_CLIENT_APPS_LAN_WAN_PROD]  =  2,
+	[IPA_1_1][IPA_CLIENT_APPS_CMD_PROD]      =  1,
+	[IPA_1_1][IPA_CLIENT_ODU_PROD]           = -1,
+	[IPA_1_1][IPA_CLIENT_MHI_PROD]           = -1,
+	[IPA_1_1][IPA_CLIENT_Q6_LAN_PROD]        =  5,
+	[IPA_1_1][IPA_CLIENT_Q6_WAN_PROD]        = -1,
+	[IPA_1_1][IPA_CLIENT_Q6_CMD_PROD]        = -1,
+
+	[IPA_1_1][IPA_CLIENT_HSIC1_CONS]         = 14,
+	[IPA_1_1][IPA_CLIENT_WLAN1_CONS]         = -1,
+	[IPA_1_1][IPA_CLIENT_HSIC2_CONS]         = 16,
+	[IPA_1_1][IPA_CLIENT_USB2_CONS]          = 16,
+	[IPA_1_1][IPA_CLIENT_WLAN2_CONS]         = -1,
+	[IPA_1_1][IPA_CLIENT_HSIC3_CONS]         = 17,
+	[IPA_1_1][IPA_CLIENT_USB3_CONS]          = 17,
+	[IPA_1_1][IPA_CLIENT_WLAN3_CONS]         = -1,
+	[IPA_1_1][IPA_CLIENT_HSIC4_CONS]         = 18,
+	[IPA_1_1][IPA_CLIENT_USB4_CONS]          = 18,
+	[IPA_1_1][IPA_CLIENT_WLAN4_CONS]         = -1,
+	[IPA_1_1][IPA_CLIENT_HSIC5_CONS]         = -1,
+	[IPA_1_1][IPA_CLIENT_USB_CONS]           = 10,
+	[IPA_1_1][IPA_CLIENT_USB_DPL_CONS]       = -1,
+	[IPA_1_1][IPA_CLIENT_A2_EMBEDDED_CONS]   =  9,
+	[IPA_1_1][IPA_CLIENT_A2_TETHERED_CONS]   =  7,
+	[IPA_1_1][IPA_CLIENT_A5_LAN_WAN_CONS]    =  3,
+	[IPA_1_1][IPA_CLIENT_APPS_LAN_CONS]      = -1,
+	[IPA_1_1][IPA_CLIENT_APPS_WAN_CONS]      = -1,
+	[IPA_1_1][IPA_CLIENT_ODU_EMB_CONS]       = -1,
+	[IPA_1_1][IPA_CLIENT_ODU_TETH_CONS]      = -1,
+	[IPA_1_1][IPA_CLIENT_MHI_CONS]           = -1,
+	[IPA_1_1][IPA_CLIENT_Q6_LAN_CONS]        =  4,
+	[IPA_1_1][IPA_CLIENT_Q6_WAN_CONS]        = -1,
+
+
+	[IPA_2_0][IPA_CLIENT_HSIC1_PROD]         = 12,
+	[IPA_2_0][IPA_CLIENT_WLAN1_PROD]         = 18,
+	[IPA_2_0][IPA_CLIENT_HSIC2_PROD]         = -1,
+	[IPA_2_0][IPA_CLIENT_USB2_PROD]          = 12,
+	[IPA_2_0][IPA_CLIENT_HSIC3_PROD]         = -1,
+	[IPA_2_0][IPA_CLIENT_USB3_PROD]          = 13,
+	[IPA_2_0][IPA_CLIENT_HSIC4_PROD]         = -1,
+	[IPA_2_0][IPA_CLIENT_USB4_PROD]          =  0,
+	[IPA_2_0][IPA_CLIENT_HSIC5_PROD]         = -1,
+	[IPA_2_0][IPA_CLIENT_USB_PROD]           = 11,
+	[IPA_2_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = -1,
+	[IPA_2_0][IPA_CLIENT_A2_EMBEDDED_PROD]   = -1,
+	[IPA_2_0][IPA_CLIENT_A2_TETHERED_PROD]   = -1,
+	[IPA_2_0][IPA_CLIENT_APPS_LAN_WAN_PROD]  =  4,
+	[IPA_2_0][IPA_CLIENT_APPS_CMD_PROD]      =  3,
+	[IPA_2_0][IPA_CLIENT_ODU_PROD]           = 12,
+	[IPA_2_0][IPA_CLIENT_MHI_PROD]           = 18,
+	[IPA_2_0][IPA_CLIENT_Q6_LAN_PROD]        =  6,
+	[IPA_2_0][IPA_CLIENT_Q6_WAN_PROD]	 = -1,
+	[IPA_2_0][IPA_CLIENT_Q6_CMD_PROD]        =  7,
+	[IPA_2_0][IPA_CLIENT_Q6_DECOMP_PROD]     = -1,
+	[IPA_2_0][IPA_CLIENT_Q6_DECOMP2_PROD]    = -1,
+	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
+						 =  12,
+	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
+						 =  19,
+	/* Only for test purpose */
+	[IPA_2_0][IPA_CLIENT_TEST_PROD]          = 19,
+	[IPA_2_0][IPA_CLIENT_TEST1_PROD]         = 19,
+	[IPA_2_0][IPA_CLIENT_TEST2_PROD]         = 12,
+	[IPA_2_0][IPA_CLIENT_TEST3_PROD]         = 11,
+	[IPA_2_0][IPA_CLIENT_TEST4_PROD]         =  0,
+
+	[IPA_2_0][IPA_CLIENT_HSIC1_CONS]         = 13,
+	[IPA_2_0][IPA_CLIENT_WLAN1_CONS]         = 17,
+	[IPA_2_0][IPA_CLIENT_HSIC2_CONS]         = -1,
+	[IPA_2_0][IPA_CLIENT_USB2_CONS]          = -1,
+	[IPA_2_0][IPA_CLIENT_WLAN2_CONS]         = 16,
+	[IPA_2_0][IPA_CLIENT_HSIC3_CONS]         = -1,
+	[IPA_2_0][IPA_CLIENT_USB3_CONS]          = -1,
+	[IPA_2_0][IPA_CLIENT_WLAN3_CONS]         = 14,
+	[IPA_2_0][IPA_CLIENT_HSIC4_CONS]         = -1,
+	[IPA_2_0][IPA_CLIENT_USB4_CONS]          = -1,
+	[IPA_2_0][IPA_CLIENT_WLAN4_CONS]         = 19,
+	[IPA_2_0][IPA_CLIENT_HSIC5_CONS]         = -1,
+	[IPA_2_0][IPA_CLIENT_USB_CONS]           = 15,
+	[IPA_2_0][IPA_CLIENT_USB_DPL_CONS]       =  0,
+	[IPA_2_0][IPA_CLIENT_A2_EMBEDDED_CONS]   = -1,
+	[IPA_2_0][IPA_CLIENT_A2_TETHERED_CONS]   = -1,
+	[IPA_2_0][IPA_CLIENT_A5_LAN_WAN_CONS]    = -1,
+	[IPA_2_0][IPA_CLIENT_APPS_LAN_CONS]      =  2,
+	[IPA_2_0][IPA_CLIENT_APPS_WAN_CONS]      =  5,
+	[IPA_2_0][IPA_CLIENT_ODU_EMB_CONS]       = 13,
+	[IPA_2_0][IPA_CLIENT_ODU_TETH_CONS]      =  1,
+	[IPA_2_0][IPA_CLIENT_MHI_CONS]           = 17,
+	[IPA_2_0][IPA_CLIENT_Q6_LAN_CONS]        =  8,
+	[IPA_2_0][IPA_CLIENT_Q6_WAN_CONS]        =  9,
+	[IPA_2_0][IPA_CLIENT_Q6_DUN_CONS]        = -1,
+	[IPA_2_0][IPA_CLIENT_Q6_DECOMP_CONS]     = -1,
+	[IPA_2_0][IPA_CLIENT_Q6_DECOMP2_CONS]    = -1,
+	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
+						 =  13,
+	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
+						 =  16,
+	[IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
+						 =  10,
+	/* Only for test purpose */
+	[IPA_2_0][IPA_CLIENT_TEST_CONS]          = 1,
+	[IPA_2_0][IPA_CLIENT_TEST1_CONS]         = 1,
+	[IPA_2_0][IPA_CLIENT_TEST2_CONS]         = 16,
+	[IPA_2_0][IPA_CLIENT_TEST3_CONS]         = 13,
+	[IPA_2_0][IPA_CLIENT_TEST4_CONS]         = 15,
+
+
+	[IPA_2_6L][IPA_CLIENT_HSIC1_PROD]         = -1,
+	[IPA_2_6L][IPA_CLIENT_WLAN1_PROD]         = -1,
+	[IPA_2_6L][IPA_CLIENT_HSIC2_PROD]         = -1,
+	[IPA_2_6L][IPA_CLIENT_USB2_PROD]          = -1,
+	[IPA_2_6L][IPA_CLIENT_HSIC3_PROD]         = -1,
+	[IPA_2_6L][IPA_CLIENT_USB3_PROD]          = -1,
+	[IPA_2_6L][IPA_CLIENT_HSIC4_PROD]         = -1,
+	[IPA_2_6L][IPA_CLIENT_USB4_PROD]          = -1,
+	[IPA_2_6L][IPA_CLIENT_HSIC5_PROD]         = -1,
+	[IPA_2_6L][IPA_CLIENT_USB_PROD]           =  1,
+	[IPA_2_6L][IPA_CLIENT_A5_WLAN_AMPDU_PROD] = -1,
+	[IPA_2_6L][IPA_CLIENT_A2_EMBEDDED_PROD]   = -1,
+	[IPA_2_6L][IPA_CLIENT_A2_TETHERED_PROD]   = -1,
+	[IPA_2_6L][IPA_CLIENT_APPS_LAN_WAN_PROD]  =  4,
+	[IPA_2_6L][IPA_CLIENT_APPS_CMD_PROD]      =  3,
+	[IPA_2_6L][IPA_CLIENT_ODU_PROD]           = -1,
+	[IPA_2_6L][IPA_CLIENT_MHI_PROD]           = -1,
+	[IPA_2_6L][IPA_CLIENT_Q6_LAN_PROD]        =  6,
+	[IPA_2_6L][IPA_CLIENT_Q6_WAN_PROD]	  = -1,
+	[IPA_2_6L][IPA_CLIENT_Q6_CMD_PROD]        =  7,
+	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP_PROD]     = 11,
+	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_PROD]    = 13,
+	[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
+						 =  -1,
+	[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
+						 =  -1,
+	/* Only for test purpose */
+	[IPA_2_6L][IPA_CLIENT_TEST_PROD]          = 11,
+	[IPA_2_6L][IPA_CLIENT_TEST1_PROD]         = 11,
+	[IPA_2_6L][IPA_CLIENT_TEST2_PROD]         = 12,
+	[IPA_2_6L][IPA_CLIENT_TEST3_PROD]         = 13,
+	[IPA_2_6L][IPA_CLIENT_TEST4_PROD]         = 14,
+
+	[IPA_2_6L][IPA_CLIENT_HSIC1_CONS]         = -1,
+	[IPA_2_6L][IPA_CLIENT_WLAN1_CONS]         = -1,
+	[IPA_2_6L][IPA_CLIENT_HSIC2_CONS]         = -1,
+	[IPA_2_6L][IPA_CLIENT_USB2_CONS]          = -1,
+	[IPA_2_6L][IPA_CLIENT_WLAN2_CONS]         = -1,
+	[IPA_2_6L][IPA_CLIENT_HSIC3_CONS]         = -1,
+	[IPA_2_6L][IPA_CLIENT_USB3_CONS]          = -1,
+	[IPA_2_6L][IPA_CLIENT_WLAN3_CONS]         = -1,
+	[IPA_2_6L][IPA_CLIENT_HSIC4_CONS]         = -1,
+	[IPA_2_6L][IPA_CLIENT_USB4_CONS]          = -1,
+	[IPA_2_6L][IPA_CLIENT_WLAN4_CONS]         = -1,
+	[IPA_2_6L][IPA_CLIENT_HSIC5_CONS]         = -1,
+	[IPA_2_6L][IPA_CLIENT_USB_CONS]           =  0,
+	[IPA_2_6L][IPA_CLIENT_USB_DPL_CONS]       = 10,
+	[IPA_2_6L][IPA_CLIENT_A2_EMBEDDED_CONS]   = -1,
+	[IPA_2_6L][IPA_CLIENT_A2_TETHERED_CONS]   = -1,
+	[IPA_2_6L][IPA_CLIENT_A5_LAN_WAN_CONS]    = -1,
+	[IPA_2_6L][IPA_CLIENT_APPS_LAN_CONS]      =  2,
+	[IPA_2_6L][IPA_CLIENT_APPS_WAN_CONS]      =  5,
+	[IPA_2_6L][IPA_CLIENT_ODU_EMB_CONS]       = -1,
+	[IPA_2_6L][IPA_CLIENT_ODU_TETH_CONS]      = -1,
+	[IPA_2_6L][IPA_CLIENT_MHI_CONS]           = -1,
+	[IPA_2_6L][IPA_CLIENT_Q6_LAN_CONS]        =  8,
+	[IPA_2_6L][IPA_CLIENT_Q6_WAN_CONS]        =  9,
+	[IPA_2_6L][IPA_CLIENT_Q6_DUN_CONS]        = -1,
+	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP_CONS]     = 12,
+	[IPA_2_6L][IPA_CLIENT_Q6_DECOMP2_CONS]    = 14,
+	[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
+						 =  -1,
+	[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
+						 =  -1,
+	[IPA_2_6L][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
+						 =  -1,
+	/* Only for test purpose */
+	[IPA_2_6L][IPA_CLIENT_TEST_CONS]          = 15,
+	[IPA_2_6L][IPA_CLIENT_TEST1_CONS]         = 15,
+	[IPA_2_6L][IPA_CLIENT_TEST2_CONS]         = 0,
+	[IPA_2_6L][IPA_CLIENT_TEST3_CONS]         = 1,
+	[IPA_2_6L][IPA_CLIENT_TEST4_CONS]         = 10,
+};
+
+static struct msm_bus_vectors ipa_init_vectors_v1_1[]  = {
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 0,
+		.ib = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_BAM_DMA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 0,
+		.ib = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_BAM_DMA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 0,
+		.ib = 0,
+	},
+};
+
+static struct msm_bus_vectors ipa_init_vectors_v2_0[]  = {
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 0,
+		.ib = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 0,
+		.ib = 0,
+	},
+};
+
+static struct msm_bus_vectors ipa_max_perf_vectors_v1_1[]  = {
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 50000000,
+		.ib = 960000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_BAM_DMA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 50000000,
+		.ib = 960000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_BAM_DMA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 50000000,
+		.ib = 960000000,
+	},
+};
+
+static struct msm_bus_vectors ipa_nominal_perf_vectors_v2_0[]  = {
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 100000000,
+		.ib = 1300000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 100000000,
+		.ib = 1300000000,
+	},
+};
+
+static struct msm_bus_paths ipa_usecases_v1_1[]  = {
+	{
+		ARRAY_SIZE(ipa_init_vectors_v1_1),
+		ipa_init_vectors_v1_1,
+	},
+	{
+		ARRAY_SIZE(ipa_max_perf_vectors_v1_1),
+		ipa_max_perf_vectors_v1_1,
+	},
+};
+
+static struct msm_bus_paths ipa_usecases_v2_0[]  = {
+	{
+		ARRAY_SIZE(ipa_init_vectors_v2_0),
+		ipa_init_vectors_v2_0,
+	},
+	{
+		ARRAY_SIZE(ipa_nominal_perf_vectors_v2_0),
+		ipa_nominal_perf_vectors_v2_0,
+	},
+};
+
+static struct msm_bus_scale_pdata ipa_bus_client_pdata_v1_1 = {
+	ipa_usecases_v1_1,
+	ARRAY_SIZE(ipa_usecases_v1_1),
+	.name = "ipa",
+};
+
+static struct msm_bus_scale_pdata ipa_bus_client_pdata_v2_0 = {
+	ipa_usecases_v2_0,
+	ARRAY_SIZE(ipa_usecases_v2_0),
+	.name = "ipa",
+};
+
+void ipa_active_clients_lock(void)
+{
+	unsigned long flags;
+
+	mutex_lock(&ipa_ctx->ipa_active_clients.mutex);
+	spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
+	ipa_ctx->ipa_active_clients.mutex_locked = true;
+	spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
+}
+
+int ipa_active_clients_trylock(unsigned long *flags)
+{
+	spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, *flags);
+	if (ipa_ctx->ipa_active_clients.mutex_locked) {
+		spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock,
+					 *flags);
+		return 0;
+	}
+
+	return 1;
+}
+
+void ipa_active_clients_trylock_unlock(unsigned long *flags)
+{
+	spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, *flags);
+}
+
+void ipa_active_clients_unlock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa_ctx->ipa_active_clients.spinlock, flags);
+	ipa_ctx->ipa_active_clients.mutex_locked = false;
+	spin_unlock_irqrestore(&ipa_ctx->ipa_active_clients.spinlock, flags);
+	mutex_unlock(&ipa_ctx->ipa_active_clients.mutex);
+}
+
+/**
+ * ipa_get_clients_from_rm_resource() - get IPA clients which are related to an
+ * IPA_RM resource
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ * @clients: [OUT] Empty array which will contain the list of clients. The
+ *         caller must initialize this array.
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa_get_clients_from_rm_resource(
+	enum ipa_rm_resource_name resource,
+	struct ipa_client_names *clients)
+{
+	int i = 0;
+
+	if (resource < 0 ||
+	    resource >= IPA_RM_RESOURCE_MAX ||
+	    !clients) {
+		IPAERR("Bad parameters\n");
+		return -EINVAL;
+	}
+
+	switch (resource) {
+	case IPA_RM_RESOURCE_USB_CONS:
+		clients->names[i++] = IPA_CLIENT_USB_CONS;
+		break;
+	case IPA_RM_RESOURCE_HSIC_CONS:
+		clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
+		break;
+	case IPA_RM_RESOURCE_WLAN_CONS:
+		clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
+		break;
+	case IPA_RM_RESOURCE_MHI_CONS:
+		clients->names[i++] = IPA_CLIENT_MHI_CONS;
+		break;
+	case IPA_RM_RESOURCE_USB_PROD:
+		clients->names[i++] = IPA_CLIENT_USB_PROD;
+		break;
+	case IPA_RM_RESOURCE_HSIC_PROD:
+		clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
+		break;
+	case IPA_RM_RESOURCE_MHI_PROD:
+		clients->names[i++] = IPA_CLIENT_MHI_PROD;
+		break;
+	default:
+		break;
+	}
+	clients->length = i;
+
+	return 0;
+}
+
+/**
+ * ipa_should_pipe_be_suspended() - returns true when the client's pipe should
+ * be suspended during a power save scenario. False otherwise.
+ *
+ * @client: [IN] IPA client
+ */
+bool ipa_should_pipe_be_suspended(enum ipa_client_type client)
+{
+	struct ipa_ep_context *ep;
+	int ipa_ep_idx;
+
+	ipa_ep_idx = ipa2_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+
+	if (ep->keep_ipa_awake)
+		return false;
+
+	if (client == IPA_CLIENT_USB_CONS   ||
+	    client == IPA_CLIENT_MHI_CONS   ||
+	    client == IPA_CLIENT_HSIC1_CONS ||
+	    client == IPA_CLIENT_WLAN1_CONS ||
+	    client == IPA_CLIENT_WLAN2_CONS ||
+	    client == IPA_CLIENT_WLAN3_CONS ||
+	    client == IPA_CLIENT_WLAN4_CONS)
+		return true;
+
+	return false;
+}
+
+/**
+ * ipa2_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa2_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+	struct ipa_client_names clients;
+	int res;
+	int index;
+	struct ipa_ep_cfg_ctrl suspend;
+	enum ipa_client_type client;
+	int ipa_ep_idx;
+	bool pipe_suspended = false;
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR("Bad params.\n");
+		return res;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa2_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		ipa_ctx->resume_on_connect[client] = false;
+		if (ipa_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa_should_pipe_be_suspended(client)) {
+			if (ipa_ctx->ep[ipa_ep_idx].valid) {
+				/* suspend endpoint */
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = true;
+				ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+				pipe_suspended = true;
+			}
+		}
+	}
+	/* Sleep ~1 msec */
+	if (pipe_suspended)
+		usleep_range(1000, 2000);
+
+	/* before gating IPA clocks do TAG process */
+	ipa_ctx->tag_process_before_gating = true;
+	IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
+
+	return 0;
+}
+
+/**
+ * ipa2_suspend_resource_no_block() - suspend client endpoints related to the
+ * IPA_RM resource and decrement active clients counter. This function is
+ * guaranteed to avoid sleeping.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa2_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+	int res;
+	struct ipa_client_names clients;
+	int index;
+	enum ipa_client_type client;
+	struct ipa_ep_cfg_ctrl suspend;
+	int ipa_ep_idx;
+	unsigned long flags;
+	struct ipa_active_client_logging_info log_info;
+
+	if (ipa_active_clients_trylock(&flags) == 0)
+		return -EPERM;
+	if (ipa_ctx->ipa_active_clients.cnt == 1) {
+		res = -EPERM;
+		goto bail;
+	}
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR("ipa_get_clients_from_rm_resource() failed, name = %d.\n"
+		       , resource);
+		goto bail;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa2_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		ipa_ctx->resume_on_connect[client] = false;
+		if (ipa_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa_should_pipe_be_suspended(client)) {
+			if (ipa_ctx->ep[ipa_ep_idx].valid) {
+				/* suspend endpoint */
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = true;
+				ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+			}
+		}
+	}
+
+	if (res == 0) {
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+				ipa_rm_resource_str(resource));
+		ipa2_active_clients_log_dec(&log_info, true);
+		ipa_ctx->ipa_active_clients.cnt--;
+		IPADBG("active clients = %d\n",
+		       ipa_ctx->ipa_active_clients.cnt);
+	}
+bail:
+	ipa_active_clients_trylock_unlock(&flags);
+
+	return res;
+}
+
+/**
+ * ipa2_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa2_resume_resource(enum ipa_rm_resource_name resource)
+{
+
+	struct ipa_client_names clients;
+	int res;
+	int index;
+	struct ipa_ep_cfg_ctrl suspend;
+	enum ipa_client_type client;
+	int ipa_ep_idx;
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR("ipa_get_clients_from_rm_resource() failed.\n");
+		return res;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa2_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		/*
+		 * The related ep, will be resumed on connect
+		 * while its resource is granted
+		 */
+		ipa_ctx->resume_on_connect[client] = true;
+		IPADBG("%d will be resumed on connect.\n", client);
+		if (ipa_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa_should_pipe_be_suspended(client)) {
+			spin_lock(&ipa_ctx->disconnect_lock);
+			if (ipa_ctx->ep[ipa_ep_idx].valid &&
+			!ipa_ctx->ep[ipa_ep_idx].disconnect_in_progress) {
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = false;
+				ipa2_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+			}
+			spin_unlock(&ipa_ctx->disconnect_lock);
+		}
+	}
+
+	return res;
+}
+
+/* read how much SRAM is available for SW use
+ * In case of IPAv2.0 this will also supply an offset from
+ * which we can start write
+ */
+void _ipa_sram_settings_read_v1_1(void)
+{
+	ipa_ctx->smem_restricted_bytes = 0;
+	ipa_ctx->smem_sz = ipa_read_reg(ipa_ctx->mmio,
+			IPA_SHARED_MEM_SIZE_OFST_v1_1);
+	ipa_ctx->smem_reqd_sz = IPA_MEM_v1_RAM_END_OFST;
+	ipa_ctx->hdr_tbl_lcl = 1;
+	ipa_ctx->ip4_rt_tbl_lcl = 0;
+	ipa_ctx->ip6_rt_tbl_lcl = 0;
+	ipa_ctx->ip4_flt_tbl_lcl = 1;
+	ipa_ctx->ip6_flt_tbl_lcl = 1;
+}
+
+void _ipa_sram_settings_read_v2_0(void)
+{
+	ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
+			IPA_SHARED_MEM_SIZE_OFST_v2_0,
+			IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
+			IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
+	ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
+			IPA_SHARED_MEM_SIZE_OFST_v2_0,
+			IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
+			IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
+	ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+	ipa_ctx->hdr_tbl_lcl = 0;
+	ipa_ctx->ip4_rt_tbl_lcl = 0;
+	ipa_ctx->ip6_rt_tbl_lcl = 0;
+	ipa_ctx->ip4_flt_tbl_lcl = 0;
+	ipa_ctx->ip6_flt_tbl_lcl = 0;
+}
+
+void _ipa_sram_settings_read_v2_5(void)
+{
+	ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
+		IPA_SHARED_MEM_SIZE_OFST_v2_0,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
+	ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
+		IPA_SHARED_MEM_SIZE_OFST_v2_0,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
+	ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+	ipa_ctx->hdr_tbl_lcl = 0;
+	ipa_ctx->hdr_proc_ctx_tbl_lcl = 1;
+
+	/*
+	 * when proc ctx table is located in internal memory,
+	 * modem entries resides first.
+	 */
+	if (ipa_ctx->hdr_proc_ctx_tbl_lcl) {
+		ipa_ctx->hdr_proc_ctx_tbl.start_offset =
+			IPA_MEM_PART(modem_hdr_proc_ctx_size);
+	}
+	ipa_ctx->ip4_rt_tbl_lcl = 0;
+	ipa_ctx->ip6_rt_tbl_lcl = 0;
+	ipa_ctx->ip4_flt_tbl_lcl = 0;
+	ipa_ctx->ip6_flt_tbl_lcl = 0;
+}
+
+void _ipa_sram_settings_read_v2_6L(void)
+{
+	ipa_ctx->smem_restricted_bytes = ipa_read_reg_field(ipa_ctx->mmio,
+		IPA_SHARED_MEM_SIZE_OFST_v2_0,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK_v2_0,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT_v2_0);
+	ipa_ctx->smem_sz = ipa_read_reg_field(ipa_ctx->mmio,
+		IPA_SHARED_MEM_SIZE_OFST_v2_0,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK_v2_0,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT_v2_0);
+	ipa_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+	ipa_ctx->hdr_tbl_lcl = 0;
+	ipa_ctx->ip4_rt_tbl_lcl = 0;
+	ipa_ctx->ip6_rt_tbl_lcl = 0;
+	ipa_ctx->ip4_flt_tbl_lcl = 0;
+	ipa_ctx->ip6_flt_tbl_lcl = 0;
+}
+
+void _ipa_cfg_route_v1_1(struct ipa_route *route)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
+			IPA_ROUTE_ROUTE_DIS_SHFT,
+			IPA_ROUTE_ROUTE_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
+			IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+			IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
+			IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+			IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
+			IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+			IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
+}
+
+void _ipa_cfg_route_v2_0(struct ipa_route *route)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, route->route_dis,
+			IPA_ROUTE_ROUTE_DIS_SHFT,
+			IPA_ROUTE_ROUTE_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, route->route_def_pipe,
+			IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+			IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_table,
+			IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+			IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, route->route_def_hdr_ofst,
+			IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+			IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, route->route_frag_def_pipe,
+			IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
+			IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ROUTE_OFST_v1_1, reg_val);
+}
+
+/**
+ * ipa_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_route(struct ipa_route *route)
+{
+
+	IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
+		route->route_dis,
+		route->route_def_pipe,
+		route->route_def_hdr_table);
+	IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
+		route->route_def_hdr_ofst,
+		route->route_frag_def_pipe);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipa_ctx->ctrl->ipa_cfg_route(route);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_cfg_filter(u32 disable)
+{
+	u32 ipa_filter_ofst = IPA_FILTER_OFST_v1_1;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa_write_reg(ipa_ctx->mmio, ipa_filter_ofst,
+			IPA_SETFIELD(!disable,
+					IPA_FILTER_FILTER_EN_SHFT,
+					IPA_FILTER_FILTER_EN_BMSK));
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa_init_hw(void)
+{
+	u32 ipa_version = 0;
+
+	/* do soft reset of IPA */
+	ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 1);
+	ipa_write_reg(ipa_ctx->mmio, IPA_COMP_SW_RESET_OFST, 0);
+
+	/* enable IPA */
+	ipa_write_reg(ipa_ctx->mmio, IPA_COMP_CFG_OFST, 1);
+
+	/* Read IPA version and make sure we have access to the registers */
+	ipa_version = ipa_read_reg(ipa_ctx->mmio, IPA_VERSION_OFST);
+	if (ipa_version == 0)
+		return -EFAULT;
+
+	if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_5) {
+		/* set ipa_bcr to 0xFFFFFFFF for using new IPA behavior */
+		ipa_write_reg(ipa_ctx->mmio, IPA_BCR_OFST, IPA_BCR_REG_VAL);
+	}
+	return 0;
+}
+
+/**
+ * ipa2_get_ep_mapping() - provide endpoint mapping
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa2_get_ep_mapping(enum ipa_client_type client)
+{
+	u8 hw_type_index = IPA_1_1;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return INVALID_EP_MAPPING_INDEX;
+	}
+
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR("Bad client number! client =%d\n", client);
+		return INVALID_EP_MAPPING_INDEX;
+	}
+
+	switch (ipa_ctx->ipa_hw_type) {
+	case IPA_HW_v2_0:
+	case IPA_HW_v2_5:
+		hw_type_index = IPA_2_0;
+		break;
+	case IPA_HW_v2_6L:
+		hw_type_index = IPA_2_6L;
+		break;
+	default:
+		hw_type_index = IPA_1_1;
+		break;
+	}
+
+	return ep_mapping[hw_type_index][client];
+}
+
+/* ipa2_set_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+
+void ipa2_set_client(int index, enum ipacm_client_enum client, bool uplink)
+{
+	if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
+		IPAERR("Bad client number! client =%d\n", client);
+	} else if (index >= IPA_MAX_NUM_PIPES || index < 0) {
+		IPAERR("Bad pipe index! index =%d\n", index);
+	} else {
+		ipa_ctx->ipacm_client[index].client_enum = client;
+		ipa_ctx->ipacm_client[index].uplink = uplink;
+	}
+}
+
+/**
+ * ipa2_get_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+enum ipacm_client_enum ipa2_get_client(int pipe_idx)
+{
+	if (pipe_idx >= IPA_MAX_NUM_PIPES || pipe_idx < 0) {
+		IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
+		return IPACM_CLIENT_MAX;
+	} else {
+		return ipa_ctx->ipacm_client[pipe_idx].client_enum;
+	}
+}
+
+/**
+ * ipa2_get_client_uplink() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+bool ipa2_get_client_uplink(int pipe_idx)
+{
+	return ipa_ctx->ipacm_client[pipe_idx].uplink;
+}
+
+/**
+ * ipa2_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
+ * the supplied pipe index.
+ *
+ * @pipe_idx:
+ *
+ * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
+ * found.
+ */
+enum ipa_rm_resource_name ipa2_get_rm_resource_from_ep(int pipe_idx)
+{
+	int i;
+	int j;
+	enum ipa_client_type client;
+	struct ipa_client_names clients;
+	bool found = false;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return -EINVAL;
+	}
+
+	client = ipa_ctx->ep[pipe_idx].client;
+
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+		memset(&clients, 0, sizeof(clients));
+		ipa_get_clients_from_rm_resource(i, &clients);
+		for (j = 0; j < clients.length; j++) {
+			if (clients.names[j] == client) {
+				found = true;
+				break;
+			}
+		}
+		if (found)
+			break;
+	}
+
+	if (!found)
+		return -EFAULT;
+
+	return i;
+}
+
+/**
+ * ipa2_get_client_mapping() - provide client mapping
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client mapping
+ */
+enum ipa_client_type ipa2_get_client_mapping(int pipe_idx)
+{
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (pipe_idx >= ipa_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return -EINVAL;
+	}
+
+	return ipa_ctx->ep[pipe_idx].client;
+}
+
+void ipa_generate_mac_addr_hw_rule(u8 **buf, u8 hdr_mac_addr_offset,
+	const uint8_t mac_addr_mask[ETH_ALEN],
+	const uint8_t mac_addr[ETH_ALEN])
+{
+	*buf = ipa_write_8(hdr_mac_addr_offset, *buf);
+
+	/* MAC addr mask copied as little endian each 4 bytes */
+	*buf = ipa_write_8(mac_addr_mask[3], *buf);
+	*buf = ipa_write_8(mac_addr_mask[2], *buf);
+	*buf = ipa_write_8(mac_addr_mask[1], *buf);
+	*buf = ipa_write_8(mac_addr_mask[0], *buf);
+	*buf = ipa_write_16(0, *buf);
+	*buf = ipa_write_8(mac_addr_mask[5], *buf);
+	*buf = ipa_write_8(mac_addr_mask[4], *buf);
+	*buf = ipa_write_32(0, *buf);
+	*buf = ipa_write_32(0, *buf);
+
+	/* MAC addr copied as little endian each 4 bytes */
+	*buf = ipa_write_8(mac_addr[3], *buf);
+	*buf = ipa_write_8(mac_addr[2], *buf);
+	*buf = ipa_write_8(mac_addr[1], *buf);
+	*buf = ipa_write_8(mac_addr[0], *buf);
+	*buf = ipa_write_16(0, *buf);
+	*buf = ipa_write_8(mac_addr[5], *buf);
+	*buf = ipa_write_8(mac_addr[4], *buf);
+	*buf = ipa_write_32(0, *buf);
+	*buf = ipa_write_32(0, *buf);
+	*buf = ipa_pad_to_32(*buf);
+}
+
+/**
+ * ipa_generate_hw_rule() - generate HW rule
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer
+ * @en_rule: rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+int ipa_generate_hw_rule(enum ipa_ip_type ip,
+	const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+
+	if (ip == IPA_IP_v4) {
+
+		/* error check */
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+		    attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
+		    IPA_FLT_FLOW_LABEL) {
+			IPAERR("v6 attrib's specified for v4 rule\n");
+			return -EPERM;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TOS) {
+			*en_rule |= IPA_TOS_EQ;
+			*buf = ipa_write_8(attrib->u.v4.tos, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			/* 0 => offset of TOS in v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32((attrib->tos_mask << 16), *buf);
+			*buf = ipa_write_32((attrib->tos_value << 16), *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+			*en_rule |= IPA_PROTOCOL_EQ;
+			*buf = ipa_write_8(attrib->u.v4.protocol, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			/* 12 => offset of src ip in v4 header */
+			*buf = ipa_write_8(12, *buf);
+			*buf = ipa_write_32(attrib->u.v4.src_addr_mask, *buf);
+			*buf = ipa_write_32(attrib->u.v4.src_addr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			/* 16 => offset of dst ip in v4 header */
+			*buf = ipa_write_8(16, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr_mask, *buf);
+			*buf = ipa_write_32(attrib->u.v4.dst_addr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			/* -2 => offset of ether type in L2 hdr */
+			*buf = ipa_write_8((u8)-2, *buf);
+			*buf = ipa_write_16(0, *buf);
+			*buf = ipa_write_16(htons(attrib->ether_type), *buf);
+			*buf = ipa_write_16(0, *buf);
+			*buf = ipa_write_16(htons(attrib->ether_type), *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->src_port_hi < attrib->src_port_lo) {
+				IPAERR("bad src port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port_hi, *buf);
+			*buf = ipa_write_16(attrib->src_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->dst_port_hi < attrib->dst_port_lo) {
+				IPAERR("bad dst port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v4 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port_hi, *buf);
+			*buf = ipa_write_16(attrib->dst_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of type after v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->type, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_CODE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 1  => offset of code after v4 header */
+			*buf = ipa_write_8(1, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->code, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SPI) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of SPI after v4 header FIXME */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFFFFFFFF, *buf);
+			*buf = ipa_write_32(attrib->spi, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v4 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v4 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -14 => offset of dst mac addr in Ethernet II hdr */
+			ipa_generate_mac_addr_hw_rule(
+				buf,
+				-14,
+				attrib->dst_mac_addr_mask,
+				attrib->dst_mac_addr);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -8 => offset of src mac addr in Ethernet II hdr */
+			ipa_generate_mac_addr_hw_rule(
+				buf,
+				-8,
+				attrib->src_mac_addr_mask,
+				attrib->src_mac_addr);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -22 => offset of dst mac addr in 802.3 hdr */
+			ipa_generate_mac_addr_hw_rule(
+				buf,
+				-22,
+				attrib->dst_mac_addr_mask,
+				attrib->dst_mac_addr);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -16 => offset of src mac addr in 802.3 hdr */
+			ipa_generate_mac_addr_hw_rule(
+				buf,
+				-16,
+				attrib->src_mac_addr_mask,
+				attrib->src_mac_addr);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+			*en_rule |= IPA_METADATA_COMPARE;
+			*buf = ipa_write_8(0, *buf);    /* offset, reserved */
+			*buf = ipa_write_32(attrib->meta_data_mask, *buf);
+			*buf = ipa_write_32(attrib->meta_data, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+			*en_rule |= IPA_IS_FRAG;
+			*buf = ipa_pad_to_32(*buf);
+		}
+	} else if (ip == IPA_IP_v6) {
+
+		/* v6 code below assumes no extension headers TODO: fix this */
+
+		/* error check */
+		if (attrib->attrib_mask & IPA_FLT_TOS ||
+		    attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+			IPAERR("v4 attrib's specified for v6 rule\n");
+			return -EPERM;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+			*en_rule |= IPA_PROTOCOL_EQ;
+			*buf = ipa_write_8(attrib->u.v6.next_hdr, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			/* -2 => offset of ether type in L2 hdr */
+			*buf = ipa_write_8((u8)-2, *buf);
+			*buf = ipa_write_16(0, *buf);
+			*buf = ipa_write_16(htons(attrib->ether_type), *buf);
+			*buf = ipa_write_16(0, *buf);
+			*buf = ipa_write_16(htons(attrib->ether_type), *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of type after v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->type, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_CODE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 1  => offset of code after v6 header */
+			*buf = ipa_write_8(1, *buf);
+			*buf = ipa_write_32(0xFF, *buf);
+			*buf = ipa_write_32(attrib->code, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SPI) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			/* 0  => offset of SPI after v6 header FIXME */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32(0xFFFFFFFF, *buf);
+			*buf = ipa_write_32(attrib->spi, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_write_16(attrib->src_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v6 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_write_16(attrib->dst_port, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->src_port_hi < attrib->src_port_lo) {
+				IPAERR("bad src port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 0  => offset of src port after v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_16(attrib->src_port_hi, *buf);
+			*buf = ipa_write_16(attrib->src_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->dst_port_hi < attrib->dst_port_lo) {
+				IPAERR("bad dst port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			/* 2  => offset of dst port after v6 header */
+			*buf = ipa_write_8(2, *buf);
+			*buf = ipa_write_16(attrib->dst_port_hi, *buf);
+			*buf = ipa_write_16(attrib->dst_port_lo, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			/* 8 => offset of src ip in v6 header */
+			*buf = ipa_write_8(8, *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[0],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[1],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[2],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr_mask[3],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[0], *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[1], *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[2], *buf);
+			*buf = ipa_write_32(attrib->u.v6.src_addr[3], *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			/* 24 => offset of dst ip in v6 header */
+			*buf = ipa_write_8(24, *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[0],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[1],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[2],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr_mask[3],
+					*buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[0], *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[1], *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[2], *buf);
+			*buf = ipa_write_32(attrib->u.v6.dst_addr[3], *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TC) {
+			*en_rule |= IPA_FLT_TC;
+			*buf = ipa_write_8(attrib->u.v6.tc, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			/* 0 => offset of TOS in v6 header */
+			*buf = ipa_write_8(0, *buf);
+			*buf = ipa_write_32((attrib->tos_mask << 20), *buf);
+			*buf = ipa_write_32(0, *buf);
+			*buf = ipa_write_32(0, *buf);
+			*buf = ipa_write_32(0, *buf);
+
+			*buf = ipa_write_32((attrib->tos_value << 20), *buf);
+			*buf = ipa_write_32(0, *buf);
+			*buf = ipa_write_32(0, *buf);
+			*buf = ipa_write_32(0, *buf);
+			*buf = ipa_pad_to_32(*buf);
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -14 => offset of dst mac addr in Ethernet II hdr */
+			ipa_generate_mac_addr_hw_rule(
+				buf,
+				-14,
+				attrib->dst_mac_addr_mask,
+				attrib->dst_mac_addr);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -8 => offset of src mac addr in Ethernet II hdr */
+			ipa_generate_mac_addr_hw_rule(
+				buf,
+				-8,
+				attrib->src_mac_addr_mask,
+				attrib->src_mac_addr);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -22 => offset of dst mac addr in 802.3 hdr */
+			ipa_generate_mac_addr_hw_rule(
+				buf,
+				-22,
+				attrib->dst_mac_addr_mask,
+				attrib->dst_mac_addr);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -16 => offset of src mac addr in 802.3 hdr */
+			ipa_generate_mac_addr_hw_rule(
+				buf,
+				-16,
+				attrib->src_mac_addr_mask,
+				attrib->src_mac_addr);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+			*en_rule |= IPA_FLT_FLOW_LABEL;
+			 /* FIXME FL is only 20 bits */
+			*buf = ipa_write_32(attrib->u.v6.flow_label, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+			*en_rule |= IPA_METADATA_COMPARE;
+			*buf = ipa_write_8(0, *buf);    /* offset, reserved */
+			*buf = ipa_write_32(attrib->meta_data_mask, *buf);
+			*buf = ipa_write_32(attrib->meta_data, *buf);
+			*buf = ipa_pad_to_32(*buf);
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+			*en_rule |= IPA_IS_FRAG;
+			*buf = ipa_pad_to_32(*buf);
+		}
+	} else {
+		IPAERR("unsupported ip %d\n", ip);
+		return -EPERM;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		if (ipa_ofst_meq32[ofst_meq32] == -1) {
+			IPAERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= ipa_ofst_meq32[ofst_meq32];
+		*buf = ipa_write_8(0, *buf);    /* offset */
+		*buf = ipa_write_32(0, *buf);   /* mask */
+		*buf = ipa_write_32(0, *buf);   /* val */
+		*buf = ipa_pad_to_32(*buf);
+		ofst_meq32++;
+	}
+
+	return 0;
+}
+
+void ipa_generate_flt_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
+	u8 hdr_mac_addr_offset,	const uint8_t mac_addr_mask[ETH_ALEN],
+	const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
+{
+	eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
+	eq_atrb->offset_meq_128[ofst_meq128].mask[0] = mac_addr_mask[3];
+	eq_atrb->offset_meq_128[ofst_meq128].mask[1] = mac_addr_mask[2];
+	eq_atrb->offset_meq_128[ofst_meq128].mask[2] = mac_addr_mask[1];
+	eq_atrb->offset_meq_128[ofst_meq128].mask[3] = mac_addr_mask[0];
+	eq_atrb->offset_meq_128[ofst_meq128].mask[4] = 0;
+	eq_atrb->offset_meq_128[ofst_meq128].mask[5] = 0;
+	eq_atrb->offset_meq_128[ofst_meq128].mask[6] = mac_addr_mask[5];
+	eq_atrb->offset_meq_128[ofst_meq128].mask[7] = mac_addr_mask[4];
+	memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 8);
+	eq_atrb->offset_meq_128[ofst_meq128].value[0] =	mac_addr[3];
+	eq_atrb->offset_meq_128[ofst_meq128].value[1] =	mac_addr[2];
+	eq_atrb->offset_meq_128[ofst_meq128].value[2] =	mac_addr[1];
+	eq_atrb->offset_meq_128[ofst_meq128].value[3] =	mac_addr[0];
+	eq_atrb->offset_meq_128[ofst_meq128].value[4] = 0;
+	eq_atrb->offset_meq_128[ofst_meq128].value[5] = 0;
+	eq_atrb->offset_meq_128[ofst_meq128].value[6] =	mac_addr[5];
+	eq_atrb->offset_meq_128[ofst_meq128].value[7] =	mac_addr[4];
+	memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 8);
+}
+
+int ipa_generate_flt_eq(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	u16 eq_bitmap = 0;
+	u16 *en_rule = &eq_bitmap;
+
+	if (ip == IPA_IP_v4) {
+
+		/* error check */
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+		    attrib->attrib_mask & IPA_FLT_TC || attrib->attrib_mask &
+		    IPA_FLT_FLOW_LABEL) {
+			IPAERR("v6 attrib's specified for v4 rule\n");
+			return -EPERM;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TOS) {
+			*en_rule |= IPA_TOS_EQ;
+			eq_atrb->tos_eq_present = 1;
+			eq_atrb->tos_eq = attrib->u.v4.tos;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+			eq_atrb->offset_meq_32[ofst_meq32].mask =
+				attrib->tos_mask << 16;
+			eq_atrb->offset_meq_32[ofst_meq32].value =
+				attrib->tos_value << 16;
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+			*en_rule |= IPA_PROTOCOL_EQ;
+			eq_atrb->protocol_eq_present = 1;
+			eq_atrb->protocol_eq = attrib->u.v4.protocol;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
+			eq_atrb->offset_meq_32[ofst_meq32].mask =
+				attrib->u.v4.src_addr_mask;
+			eq_atrb->offset_meq_32[ofst_meq32].value =
+				attrib->u.v4.src_addr;
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
+			eq_atrb->offset_meq_32[ofst_meq32].mask =
+				attrib->u.v4.dst_addr_mask;
+			eq_atrb->offset_meq_32[ofst_meq32].value =
+				attrib->u.v4.dst_addr;
+			ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->src_port_hi < attrib->src_port_lo) {
+				IPAERR("bad src port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->src_port_lo;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->src_port_hi;
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->dst_port_hi < attrib->dst_port_lo) {
+				IPAERR("bad dst port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->dst_port_lo;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->dst_port_hi;
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				attrib->type;
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_CODE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				attrib->code;
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SPI) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				0xFFFFFFFF;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				attrib->spi;
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->src_port;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->src_port;
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->dst_port;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->dst_port;
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+			*en_rule |= IPA_METADATA_COMPARE;
+			eq_atrb->metadata_meq32_present = 1;
+			eq_atrb->metadata_meq32.offset = 0;
+			eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+			eq_atrb->metadata_meq32.value = attrib->meta_data;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+			*en_rule |= IPA_IS_FRAG;
+			eq_atrb->ipv4_frag_eq_present = 1;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -14 => offset of dst mac addr in Ethernet II hdr */
+			ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
+				attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+				ofst_meq128);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -8 => offset of src mac addr in Ethernet II hdr */
+			ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
+				attrib->src_mac_addr_mask, attrib->src_mac_addr,
+				ofst_meq128);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -22 => offset of dst mac addr in 802.3 hdr */
+			ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
+				attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+				ofst_meq128);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -16 => offset of src mac addr in 802.3 hdr */
+			ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
+				attrib->src_mac_addr_mask, attrib->src_mac_addr,
+				ofst_meq128);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+			eq_atrb->offset_meq_32[ofst_meq32].mask =
+				htons(attrib->ether_type);
+			eq_atrb->offset_meq_32[ofst_meq32].value =
+				htons(attrib->ether_type);
+			ofst_meq32++;
+		}
+	} else if (ip == IPA_IP_v6) {
+
+		/* v6 code below assumes no extension headers TODO: fix this */
+
+		/* error check */
+		if (attrib->attrib_mask & IPA_FLT_TOS ||
+		    attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+			IPAERR("v4 attrib's specified for v6 rule\n");
+			return -EPERM;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+			*en_rule |= IPA_PROTOCOL_EQ;
+			eq_atrb->protocol_eq_present = 1;
+			eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TYPE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				attrib->type;
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_CODE) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				attrib->code;
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SPI) {
+			if (ipa_ihl_ofst_meq32[ihl_ofst_meq32] == -1) {
+				IPAERR("ran out of ihl_meq32 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_meq32[ihl_ofst_meq32];
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+				0xFFFFFFFF;
+			eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+				attrib->spi;
+			ihl_ofst_meq32++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->src_port;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->src_port;
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->dst_port;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->dst_port;
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->src_port_hi < attrib->src_port_lo) {
+				IPAERR("bad src port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->src_port_lo;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->src_port_hi;
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+			if (ipa_ihl_ofst_rng16[ihl_ofst_rng16] == -1) {
+				IPAERR("ran out of ihl_rng16 eq\n");
+				return -EPERM;
+			}
+			if (attrib->dst_port_hi < attrib->dst_port_lo) {
+				IPAERR("bad dst port range param\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ihl_ofst_rng16[ihl_ofst_rng16];
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+				= attrib->dst_port_lo;
+			eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+				= attrib->dst_port_hi;
+			ihl_ofst_rng16++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+				= attrib->u.v6.src_addr_mask[0];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+				= attrib->u.v6.src_addr_mask[1];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+				= attrib->u.v6.src_addr_mask[2];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+				= attrib->u.v6.src_addr_mask[3];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+				= attrib->u.v6.src_addr[0];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+				= attrib->u.v6.src_addr[1];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+				= attrib->u.v6.src_addr[2];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+					12) = attrib->u.v6.src_addr[3];
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+				= attrib->u.v6.dst_addr_mask[0];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+				= attrib->u.v6.dst_addr_mask[1];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+				= attrib->u.v6.dst_addr_mask[2];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+				= attrib->u.v6.dst_addr_mask[3];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+				= attrib->u.v6.dst_addr[0];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+				= attrib->u.v6.dst_addr[1];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+				= attrib->u.v6.dst_addr[2];
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+					12) = attrib->u.v6.dst_addr[3];
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TC) {
+			*en_rule |= IPA_FLT_TC;
+			eq_atrb->tc_eq_present = 1;
+			eq_atrb->tc_eq = attrib->u.v6.tc;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+			eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+				= attrib->tos_mask << 20;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+				= 0;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+				= 0;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+				= 0;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+				= attrib->tos_value << 20;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+				= 0;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+				= 0;
+			*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+					12) = 0;
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+			*en_rule |= IPA_FLT_FLOW_LABEL;
+			eq_atrb->fl_eq_present = 1;
+			eq_atrb->fl_eq = attrib->u.v6.flow_label;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+			*en_rule |= IPA_METADATA_COMPARE;
+			eq_atrb->metadata_meq32_present = 1;
+			eq_atrb->metadata_meq32.offset = 0;
+			eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+			eq_atrb->metadata_meq32.value = attrib->meta_data;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+			*en_rule |= IPA_IS_FRAG;
+			eq_atrb->ipv4_frag_eq_present = 1;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -14 => offset of dst mac addr in Ethernet II hdr */
+			ipa_generate_flt_mac_addr_eq(eq_atrb, -14,
+				attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+				ofst_meq128);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -8 => offset of src mac addr in Ethernet II hdr */
+			ipa_generate_flt_mac_addr_eq(eq_atrb, -8,
+				attrib->src_mac_addr_mask, attrib->src_mac_addr,
+				ofst_meq128);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -22 => offset of dst mac addr in 802.3 hdr */
+			ipa_generate_flt_mac_addr_eq(eq_atrb, -22,
+				attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+				ofst_meq128);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+			if (ipa_ofst_meq128[ofst_meq128] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq128[ofst_meq128];
+
+			/* -16 => offset of src mac addr in 802.3 hdr */
+			ipa_generate_flt_mac_addr_eq(eq_atrb, -16,
+				attrib->src_mac_addr_mask, attrib->src_mac_addr,
+				ofst_meq128);
+
+			ofst_meq128++;
+		}
+
+		if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+			if (ipa_ofst_meq32[ofst_meq32] == -1) {
+				IPAERR("ran out of meq128 eq\n");
+				return -EPERM;
+			}
+			*en_rule |= ipa_ofst_meq32[ofst_meq32];
+			eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+			eq_atrb->offset_meq_32[ofst_meq32].mask =
+				htons(attrib->ether_type);
+			eq_atrb->offset_meq_32[ofst_meq32].value =
+				htons(attrib->ether_type);
+			ofst_meq32++;
+		}
+
+	} else {
+		IPAERR("unsupported ip %d\n", ip);
+		return -EPERM;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		if (ipa_ofst_meq32[ofst_meq32] == -1) {
+			IPAERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= ipa_ofst_meq32[ofst_meq32];
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+		eq_atrb->offset_meq_32[ofst_meq32].mask = 0;
+		eq_atrb->offset_meq_32[ofst_meq32].value = 0;
+		ofst_meq32++;
+	}
+
+	eq_atrb->rule_eq_bitmap = *en_rule;
+	eq_atrb->num_offset_meq_32 = ofst_meq32;
+	eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+	eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+	eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+	return 0;
+}
+
+/**
+ * ipa2_cfg_ep - IPA end-point configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	int result = -EINVAL;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	result = ipa2_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+	if (result)
+		return result;
+
+	result = ipa2_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
+	if (result)
+		return result;
+
+	result = ipa2_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+	if (result)
+		return result;
+
+	result = ipa2_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
+	if (result)
+		return result;
+
+	if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
+		result = ipa2_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+		if (result)
+			return result;
+
+		result = ipa2_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+		if (result)
+			return result;
+
+		result = ipa2_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+		if (result)
+			return result;
+
+		result = ipa2_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
+		if (result)
+			return result;
+	} else {
+		result = ipa2_cfg_ep_metadata_mask(clnt_hdl,
+				&ipa_ep_cfg->metadata_mask);
+		if (result)
+			return result;
+	}
+
+	return 0;
+}
+
+const char *ipa_get_nat_en_str(enum ipa_nat_en_type nat_en)
+{
+	switch (nat_en) {
+	case (IPA_BYPASS_NAT):
+		return "NAT disabled";
+	case (IPA_SRC_NAT):
+		return "Source NAT";
+	case (IPA_DST_NAT):
+		return "Dst NAT";
+	}
+
+	return "undefined";
+}
+
+void _ipa_cfg_ep_nat_v1_1(u32 clnt_hdl,
+		const struct ipa_ep_cfg_nat *ep_nat)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
+			IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
+			IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_NAT_N_OFST_v1_1(clnt_hdl),
+			reg_val);
+}
+
+void _ipa_cfg_ep_nat_v2_0(u32 clnt_hdl,
+		const struct ipa_ep_cfg_nat *ep_nat)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_nat->nat_en,
+			IPA_ENDP_INIT_NAT_N_NAT_EN_SHFT,
+			IPA_ENDP_INIT_NAT_N_NAT_EN_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_NAT_N_OFST_v2_0(clnt_hdl),
+			reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
+{
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, nat_en=%d(%s)\n",
+			clnt_hdl,
+			ep_nat->nat_en,
+			ipa_get_nat_en_str(ep_nat->nat_en));
+
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_nat(clnt_hdl, ep_nat);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+static void _ipa_cfg_ep_status_v1_1(u32 clnt_hdl,
+				const struct ipa_ep_cfg_status *ep_status)
+{
+	IPADBG("Not supported for version 1.1\n");
+}
+
+static void _ipa_cfg_ep_status_v2_0(u32 clnt_hdl,
+		const struct ipa_ep_cfg_status *ep_status)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_status->status_en,
+			IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_status->status_ep,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_STATUS_n_OFST(clnt_hdl),
+			reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_status() - IPA end-point status configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_status(u32 clnt_hdl, const struct ipa_ep_cfg_status *ep_status)
+{
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, status_en=%d status_ep=%d\n",
+			clnt_hdl,
+			ep_status->status_en,
+			ep_status->status_ep);
+
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].status = *ep_status;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_status(clnt_hdl, ep_status);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+static void _ipa_cfg_ep_cfg_v1_1(u32 clnt_hdl,
+				const struct ipa_ep_cfg_cfg *cfg)
+{
+	IPADBG("Not supported for version 1.1\n");
+}
+
+static void _ipa_cfg_ep_cfg_v2_0(u32 clnt_hdl,
+		const struct ipa_ep_cfg_cfg *cfg)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, cfg->frag_offload_en,
+			IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
+			IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
+	IPA_SETFIELD_IN_REG(reg_val, cfg->cs_offload_en,
+			IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
+	IPA_SETFIELD_IN_REG(reg_val, cfg->cs_metadata_hdr_offset,
+			IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio, IPA_ENDP_INIT_CFG_n_OFST(clnt_hdl),
+			reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_cfg() - IPA end-point cfg configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
+{
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d\n",
+			clnt_hdl,
+			cfg->frag_offload_en,
+			cfg->cs_offload_en,
+			cfg->cs_metadata_hdr_offset);
+
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_cfg(clnt_hdl, cfg);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+static void _ipa_cfg_ep_metadata_mask_v1_1(u32 clnt_hdl,
+			const struct ipa_ep_cfg_metadata_mask *metadata_mask)
+{
+	IPADBG("Not supported for version 1.1\n");
+}
+
+static void _ipa_cfg_ep_metadata_mask_v2_0(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask *metadata_mask)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, metadata_mask->metadata_mask,
+			IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
+			IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HDR_METADATA_MASK_n_OFST(clnt_hdl),
+			reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_metadata_mask(u32 clnt_hdl,
+	const struct ipa_ep_cfg_metadata_mask *metadata_mask)
+{
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, metadata_mask=0x%x\n",
+			clnt_hdl,
+			metadata_mask->metadata_mask);
+
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_metadata_mask(clnt_hdl, metadata_mask);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+void _ipa_cfg_ep_hdr_v1_1(u32 pipe_number,
+		const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+	u32 val = 0;
+
+	val = IPA_SETFIELD(ep_hdr->hdr_len,
+		   IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
+		   IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK) |
+	      IPA_SETFIELD(ep_hdr->hdr_ofst_metadata_valid,
+		   IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
+		   IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK) |
+	      IPA_SETFIELD(ep_hdr->hdr_ofst_metadata,
+		   IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
+		   IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK) |
+	      IPA_SETFIELD(ep_hdr->hdr_additional_const_len,
+		   IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
+		   IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK) |
+	      IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size_valid,
+		   IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
+		   IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK) |
+	      IPA_SETFIELD(ep_hdr->hdr_ofst_pkt_size,
+		   IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
+		   IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK) |
+	      IPA_SETFIELD(ep_hdr->hdr_a5_mux,
+		   IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
+		   IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HDR_N_OFST_v1_1(pipe_number), val);
+}
+
+void _ipa_cfg_ep_hdr_v2_0(u32 pipe_number,
+		const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_metadata_reg_valid,
+			IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_SHFT_v2,
+			IPA_ENDP_INIT_HDR_N_HDR_METADATA_REG_VALID_BMSK_v2);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_remove_additional,
+			IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
+			IPA_ENDP_INIT_HDR_N_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_a5_mux,
+			IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_SHFT,
+			IPA_ENDP_INIT_HDR_N_HDR_A5_MUX_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size,
+			IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_SHFT,
+			IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_pkt_size_valid,
+			IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_SHFT,
+			IPA_ENDP_INIT_HDR_N_HDR_OFST_PKT_SIZE_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_additional_const_len,
+			IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_SHFT,
+			IPA_ENDP_INIT_HDR_N_HDR_ADDITIONAL_CONST_LEN_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata,
+			IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_SHFT,
+			IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_ofst_metadata_valid,
+			IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_SHFT,
+			IPA_ENDP_INIT_HDR_N_HDR_OFST_METADATA_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr->hdr_len,
+			IPA_ENDP_INIT_HDR_N_HDR_LEN_SHFT,
+			IPA_ENDP_INIT_HDR_N_HDR_LEN_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HDR_N_OFST_v2_0(pipe_number), reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_hdr() -  IPA end-point header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+	struct ipa_ep_context *ep;
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+	IPADBG("pipe=%d remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
+		clnt_hdl,
+		ep_hdr->hdr_remove_additional,
+		ep_hdr->hdr_a5_mux,
+		ep_hdr->hdr_ofst_pkt_size);
+
+	IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
+		ep_hdr->hdr_ofst_pkt_size_valid,
+		ep_hdr->hdr_additional_const_len);
+
+	IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
+		ep_hdr->hdr_ofst_metadata,
+		ep_hdr->hdr_ofst_metadata_valid,
+		ep_hdr->hdr_len);
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr = *ep_hdr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ep->cfg.hdr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+static int _ipa_cfg_ep_hdr_ext_v1_1(u32 clnt_hdl,
+				const struct ipa_ep_cfg_hdr_ext *ep_hdr)
+{
+	IPADBG("Not supported for version 1.1\n");
+	return 0;
+}
+
+static int _ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+		const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext, u32 reg_val)
+{
+	u8 hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_offset,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_payload_len_inc_padding,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_total_len_or_pad_valid,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, hdr_endianness,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+		IPA_ENDP_INIT_HDR_EXT_n_OFST_v2_0(clnt_hdl), reg_val);
+
+	return 0;
+}
+
+static int _ipa_cfg_ep_hdr_ext_v2_0(u32 clnt_hdl,
+				const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_0);
+
+	return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
+}
+
+static int _ipa_cfg_ep_hdr_ext_v2_5(u32 clnt_hdl,
+				const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
+
+	return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
+
+}
+
+static int _ipa_cfg_ep_hdr_ext_v2_6L(u32 clnt_hdl,
+				const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_hdr_ext->hdr_pad_to_alignment,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v2_5);
+
+	return _ipa_cfg_ep_hdr_ext(clnt_hdl, ep_hdr_ext, reg_val);
+
+}
+
+/**
+ * ipa2_cfg_ep_hdr_ext() -  IPA end-point extended header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_hdr_ext:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_hdr_ext(u32 clnt_hdl,
+		       const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+	struct ipa_ep_context *ep;
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
+		clnt_hdl,
+		ep_hdr_ext->hdr_pad_to_alignment);
+
+	IPADBG("hdr_total_len_or_pad_offset=%d\n",
+		ep_hdr_ext->hdr_total_len_or_pad_offset);
+
+	IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
+		ep_hdr_ext->hdr_payload_len_inc_padding,
+		ep_hdr_ext->hdr_total_len_or_pad);
+
+	IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
+		ep_hdr_ext->hdr_total_len_or_pad_valid,
+		ep_hdr_ext->hdr_little_endian);
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr_ext = *ep_hdr_ext;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_hdr_ext(clnt_hdl, &ep->cfg.hdr_ext);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa2_cfg_ep_hdr() -  IPA end-point Control configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+	u32 reg_val = 0;
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes || ep_ctrl == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
+		clnt_hdl,
+		ep_ctrl->ipa_ep_suspend,
+		ep_ctrl->ipa_ep_delay);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_suspend,
+		IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_SHFT,
+		IPA_ENDP_INIT_CTRL_N_ENDP_SUSPEND_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_ctrl->ipa_ep_delay,
+			IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_SHFT,
+			IPA_ENDP_INIT_CTRL_N_ENDP_DELAY_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+		IPA_ENDP_INIT_CTRL_N_OFST(clnt_hdl), reg_val);
+
+	return 0;
+
+}
+
+/**
+ * ipa_cfg_aggr_cntr_granularity() - granularity of the AGGR timer configuration
+ * @aggr_granularity:     [in] defines the granularity of AGGR timers
+ *			  number of units of 1/32msec
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_cfg_aggr_cntr_granularity(u8 aggr_granularity)
+{
+	u32 reg_val = 0;
+
+	if (aggr_granularity <= IPA_AGGR_GRAN_MIN ||
+			aggr_granularity > IPA_AGGR_GRAN_MAX) {
+		IPAERR("bad param, aggr_granularity = %d\n",
+				aggr_granularity);
+		return -EINVAL;
+	}
+	IPADBG("aggr_granularity=%d\n", aggr_granularity);
+
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
+	reg_val = (reg_val & ~IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, aggr_granularity - 1,
+			IPA_COUNTER_CFG_AGGR_GRAN_SHFT,
+			IPA_COUNTER_CFG_AGGR_GRAN_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_COUNTER_CFG_OFST, reg_val);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(ipa_cfg_aggr_cntr_granularity);
+
+/**
+ * ipa_cfg_eot_coal_cntr_granularity() - granularity of EOT_COAL timer
+ *					 configuration
+ * @eot_coal_granularity: defines the granularity of EOT_COAL timers
+ *			  number of units of 1/32msec
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa_cfg_eot_coal_cntr_granularity(u8 eot_coal_granularity)
+{
+	u32 reg_val = 0;
+
+	if (eot_coal_granularity <= IPA_EOT_COAL_GRAN_MIN ||
+			eot_coal_granularity > IPA_EOT_COAL_GRAN_MAX) {
+		IPAERR("bad parm, eot_coal_granularity = %d\n",
+				eot_coal_granularity);
+		return -EINVAL;
+	}
+	IPADBG("eot_coal_granularity=%d\n", eot_coal_granularity);
+
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_COUNTER_CFG_OFST);
+	reg_val = (reg_val & ~IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, eot_coal_granularity - 1,
+			IPA_COUNTER_CFG_EOT_COAL_GRAN_SHFT,
+			IPA_COUNTER_CFG_EOT_COAL_GRAN_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_COUNTER_CFG_OFST, reg_val);
+
+	return 0;
+
+}
+EXPORT_SYMBOL(ipa_cfg_eot_coal_cntr_granularity);
+
+const char *ipa_get_mode_type_str(enum ipa_mode_type mode)
+{
+	switch (mode) {
+	case (IPA_BASIC):
+		return "Basic";
+	case (IPA_ENABLE_FRAMING_HDLC):
+		return "HDLC framing";
+	case (IPA_ENABLE_DEFRAMING_HDLC):
+		return "HDLC de-framing";
+	case (IPA_DMA):
+		return "DMA";
+	}
+
+	return "undefined";
+}
+
+void _ipa_cfg_ep_mode_v1_1(u32 pipe_number, u32 dst_pipe_number,
+		const struct ipa_ep_cfg_mode *ep_mode)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
+			IPA_ENDP_INIT_MODE_N_MODE_SHFT,
+			IPA_ENDP_INIT_MODE_N_MODE_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
+			IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v1_1,
+			IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v1_1);
+
+		ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_MODE_N_OFST_v1_1(pipe_number), reg_val);
+}
+
+void _ipa_cfg_ep_mode_v2_0(u32 pipe_number, u32 dst_pipe_number,
+		const struct ipa_ep_cfg_mode *ep_mode)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_mode->mode,
+			IPA_ENDP_INIT_MODE_N_MODE_SHFT,
+			IPA_ENDP_INIT_MODE_N_MODE_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, dst_pipe_number,
+			IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_SHFT_v2_0,
+			IPA_ENDP_INIT_MODE_N_DEST_PIPE_INDEX_BMSK_v2_0);
+
+		ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_MODE_N_OFST_v2_0(pipe_number), reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
+{
+	int ep;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ep = ipa2_get_ep_mapping(ep_mode->dst);
+	if (ep == -1 && ep_mode->mode == IPA_DMA) {
+		IPAERR("dst %d does not exist\n", ep_mode->dst);
+		return -EINVAL;
+	}
+
+	WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
+
+	if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
+		ep = ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+
+	IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
+			clnt_hdl,
+			ep_mode->mode,
+			ipa_get_mode_type_str(ep_mode->mode),
+			ep_mode->dst);
+
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
+	ipa_ctx->ep[clnt_hdl].dst_pipe_index = ep;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_mode(clnt_hdl,
+			ipa_ctx->ep[clnt_hdl].dst_pipe_index,
+			ep_mode);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+const char *get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
+{
+	switch (aggr_en) {
+	case (IPA_BYPASS_AGGR):
+			return "no aggregation";
+	case (IPA_ENABLE_AGGR):
+			return "aggregation enabled";
+	case (IPA_ENABLE_DEAGGR):
+		return "de-aggregation enabled";
+	}
+
+	return "undefined";
+}
+
+const char *get_aggr_type_str(enum ipa_aggr_type aggr_type)
+{
+	switch (aggr_type) {
+	case (IPA_MBIM_16):
+			return "MBIM_16";
+	case (IPA_HDLC):
+		return "HDLC";
+	case (IPA_TLP):
+			return "TLP";
+	case (IPA_RNDIS):
+			return "RNDIS";
+	case (IPA_GENERIC):
+			return "GENERIC";
+	case (IPA_QCMAP):
+			return "QCMAP";
+	}
+	return "undefined";
+}
+
+void _ipa_cfg_ep_aggr_v1_1(u32 pipe_number,
+		const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
+			IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
+			IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
+			IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
+			IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
+			IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
+			IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
+			IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
+			IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_AGGR_N_OFST_v1_1(pipe_number), reg_val);
+}
+
+void _ipa_cfg_ep_aggr_v2_0(u32 pipe_number,
+		const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_en,
+			IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT,
+			IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr,
+			IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_SHFT,
+			IPA_ENDP_INIT_AGGR_N_AGGR_TYPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_byte_limit,
+			IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT,
+			IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_time_limit,
+			IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_SHFT,
+			IPA_ENDP_INIT_AGGR_N_AGGR_TIME_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_pkt_limit,
+			IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
+			IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_aggr->aggr_sw_eof_active,
+			IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
+			IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_AGGR_N_OFST_v2_0(pipe_number), reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+			clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
+			clnt_hdl,
+			ep_aggr->aggr_en,
+			get_aggr_enable_str(ep_aggr->aggr_en),
+			ep_aggr->aggr,
+			get_aggr_type_str(ep_aggr->aggr),
+			ep_aggr->aggr_byte_limit,
+			ep_aggr->aggr_time_limit);
+
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_aggr(clnt_hdl, ep_aggr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+void _ipa_cfg_ep_route_v1_1(u32 pipe_index, u32 rt_tbl_index)
+{
+	int reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
+			IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
+			IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_ROUTE_N_OFST_v1_1(pipe_index),
+			reg_val);
+}
+
+void _ipa_cfg_ep_route_v2_0(u32 pipe_index, u32 rt_tbl_index)
+{
+	int reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, rt_tbl_index,
+			IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_SHFT,
+			IPA_ENDP_INIT_ROUTE_N_ROUTE_TABLE_INDEX_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_ROUTE_N_OFST_v2_0(pipe_index),
+			reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
+{
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+			clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("ROUTE does not apply to IPA out EP %d\n",
+				clnt_hdl);
+		return -EINVAL;
+	}
+
+	/*
+	 * if DMA mode was configured previously for this EP, return with
+	 * success
+	 */
+	if (ipa_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+		IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
+				clnt_hdl);
+		return 0;
+	}
+
+	if (ep_route->rt_tbl_hdl)
+		IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+	IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
+			clnt_hdl,
+			ep_route->rt_tbl_hdl);
+
+	/* always use "default" routing table when programming EP ROUTE reg */
+	if (ipa_ctx->ipa_hw_type >= IPA_HW_v2_0)
+		ipa_ctx->ep[clnt_hdl].rt_tbl_idx =
+			IPA_MEM_PART(v4_apps_rt_index_lo);
+	else
+		ipa_ctx->ep[clnt_hdl].rt_tbl_idx = 0;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_route(clnt_hdl,
+			ipa_ctx->ep[clnt_hdl].rt_tbl_idx);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+void _ipa_cfg_ep_holb_v1_1(u32 pipe_number,
+			const struct ipa_ep_cfg_holb *ep_holb)
+{
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v1_1(pipe_number),
+			ep_holb->en);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v1_1(pipe_number),
+			(u16)ep_holb->tmr_val);
+}
+
+void _ipa_cfg_ep_holb_v2_0(u32 pipe_number,
+			const struct ipa_ep_cfg_holb *ep_holb)
+{
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
+			ep_holb->en);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
+			(u16)ep_holb->tmr_val);
+}
+
+void _ipa_cfg_ep_holb_v2_5(u32 pipe_number,
+			const struct ipa_ep_cfg_holb *ep_holb)
+{
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
+			ep_holb->en);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
+			ep_holb->tmr_val);
+}
+
+void _ipa_cfg_ep_holb_v2_6L(u32 pipe_number,
+			const struct ipa_ep_cfg_holb *ep_holb)
+{
+	ipa_write_reg(ipa_ctx->mmio,
+		IPA_ENDP_INIT_HOL_BLOCK_EN_N_OFST_v2_0(pipe_number),
+		ep_holb->en);
+
+	ipa_write_reg(ipa_ctx->mmio,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_N_OFST_v2_0(pipe_number),
+		ep_holb->tmr_val);
+}
+
+/**
+ * ipa2_cfg_ep_holb() - IPA end-point holb configuration
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
+{
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
+	    ep_holb->tmr_val > ipa_ctx->ctrl->max_holb_tmr_val ||
+	    ep_holb->en > 1) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_PROD(ipa_ctx->ep[clnt_hdl].client)) {
+		IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (!ipa_ctx->ctrl->ipa_cfg_ep_holb) {
+		IPAERR("HOLB is not supported for this IPA core\n");
+		return -EINVAL;
+	}
+
+	ipa_ctx->ep[clnt_hdl].holb = *ep_holb;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_holb(clnt_hdl, ep_holb);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
+				ep_holb->tmr_val);
+
+	return 0;
+}
+
+/**
+ * ipa2_cfg_ep_holb_by_client() - IPA end-point holb configuration
+ *
+ * Wrapper function for ipa_cfg_ep_holb() with client name instead of
+ * client handle. This function is used for clients that does not have
+ * client handle.
+ *
+ * @client:	[in] client name
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ep_holb)
+{
+	return ipa2_cfg_ep_holb(ipa2_get_ep_mapping(client), ep_holb);
+}
+
+static int _ipa_cfg_ep_deaggr_v1_1(u32 clnt_hdl,
+				const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+	IPADBG("Not supported for version 1.1\n");
+	return 0;
+}
+
+static int _ipa_cfg_ep_deaggr_v2_0(u32 clnt_hdl,
+				   const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->deaggr_hdr_len,
+		IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_valid,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->packet_offset_location,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
+
+	IPA_SETFIELD_IN_REG(reg_val, ep_deaggr->max_packet_len,
+		IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+		IPA_ENDP_INIT_DEAGGR_n_OFST_v2_0(clnt_hdl), reg_val);
+
+	return 0;
+}
+
+/**
+ * ipa2_cfg_ep_deaggr() -  IPA end-point deaggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_deaggr:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_deaggr(u32 clnt_hdl,
+			const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+	struct ipa_ep_context *ep;
+
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+	    ipa_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d deaggr_hdr_len=%d\n",
+		clnt_hdl,
+		ep_deaggr->deaggr_hdr_len);
+
+	IPADBG("packet_offset_valid=%d\n",
+		ep_deaggr->packet_offset_valid);
+
+	IPADBG("packet_offset_location=%d max_packet_len=%d\n",
+		ep_deaggr->packet_offset_location,
+		ep_deaggr->max_packet_len);
+
+	ep = &ipa_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.deaggr = *ep_deaggr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_deaggr(clnt_hdl, &ep->cfg.deaggr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+static void _ipa_cfg_ep_metadata_v1_1(u32 pipe_number,
+					const struct ipa_ep_cfg_metadata *meta)
+{
+	IPADBG("Not supported for version 1.1\n");
+}
+
+static void _ipa_cfg_ep_metadata_v2_0(u32 pipe_number,
+					const struct ipa_ep_cfg_metadata *meta)
+{
+	u32 reg_val = 0;
+
+	IPA_SETFIELD_IN_REG(reg_val, meta->qmap_id,
+			IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT,
+			IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK);
+
+	ipa_write_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_HDR_METADATA_n_OFST(pipe_number),
+			reg_val);
+}
+
+/**
+ * ipa2_cfg_ep_metadata() - IPA end-point metadata configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa2_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
+{
+	if (clnt_hdl >= ipa_ctx->ipa_num_pipes ||
+		ipa_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl, ipa_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
+
+	/* copy over EP cfg */
+	ipa_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	ipa_ctx->ctrl->ipa_cfg_ep_metadata(clnt_hdl, ep_md);
+	ipa_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
+	ipa_ctx->ctrl->ipa_cfg_ep_hdr(clnt_hdl, &ipa_ctx->ep[clnt_hdl].cfg.hdr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+EXPORT_SYMBOL(ipa2_cfg_ep_metadata);
+
+int ipa2_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+	struct ipa_ep_cfg_metadata meta;
+	struct ipa_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (param_in->client  >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm client:%d\n", param_in->client);
+		goto fail;
+	}
+
+	ipa_ep_idx = ipa2_get_ep_mapping(param_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		goto fail;
+	}
+
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+	if (!ep->valid) {
+		IPAERR("EP not allocated.\n");
+		goto fail;
+	}
+
+	meta.qmap_id = param_in->qmap_id;
+	if (param_in->client == IPA_CLIENT_USB_PROD ||
+	    param_in->client == IPA_CLIENT_HSIC1_PROD ||
+	    param_in->client == IPA_CLIENT_ODU_PROD) {
+		result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta);
+	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
+		ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
+		result = ipa_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
+		if (result)
+			IPAERR("qmap_id %d write failed on ep=%d\n",
+					meta.qmap_id, ipa_ep_idx);
+		result = 0;
+	}
+
+fail:
+	return result;
+}
+
+/**
+ * ipa_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+	int i;
+	u32 *cur = (u32 *)base;
+	u8 *byt;
+
+	IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
+	for (i = 0; i < size / 4; i++) {
+		byt = (u8 *)(cur + i);
+		IPADBG("%2d %08x   %02x %02x %02x %02x\n", i, *(cur + i),
+				byt[0], byt[1], byt[2], byt[3]);
+	}
+	IPADBG("END\n");
+}
+
+/**
+ * void ipa_rx_timeout_min_max_calc() - calc min max timeout time of rx polling
+ * @time: time fom dtsi entry or from debugfs file system
+ * @min: rx polling min timeout
+ * @max: rx polling max timeout
+ * Maximum time could be of 10Msec allowed.
+ */
+void ipa_rx_timeout_min_max_calc(u32 *min, u32 *max, s8 time)
+{
+	if ((time >= MIN_RX_POLL_TIME) &&
+		(time <= MAX_RX_POLL_TIME)) {
+		*min = (time * MSEC) + LOWER_CUTOFF;
+		*max = (time * MSEC) + UPPER_CUTOFF;
+	} else {
+		/* Setting up the default min max time */
+		IPADBG("Setting up default rx polling timeout\n");
+		*min = (MIN_RX_POLL_TIME * MSEC) +
+			LOWER_CUTOFF;
+		*max = (MIN_RX_POLL_TIME * MSEC) +
+			UPPER_CUTOFF;
+	}
+	IPADBG("Rx polling timeout Min = %u len = %u\n", *min, *max);
+}
+
+/**
+ * ipa_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa_pipe_mem_init(u32 start_ofst, u32 size)
+{
+	int res;
+	u32 aligned_start_ofst;
+	u32 aligned_size;
+	struct gen_pool *pool;
+
+	if (!size) {
+		IPAERR("no IPA pipe memory allocated\n");
+		goto fail;
+	}
+
+	aligned_start_ofst = IPA_HW_TABLE_ALIGNMENT(start_ofst);
+	aligned_size = size - (aligned_start_ofst - start_ofst);
+
+	IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+	       start_ofst, aligned_start_ofst, size, aligned_size);
+
+	/* allocation order of 8 i.e. 128 bytes, global pool */
+	pool = gen_pool_create(8, -1);
+	if (!pool) {
+		IPAERR("Failed to create a new memory pool.\n");
+		goto fail;
+	}
+
+	res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+	if (res) {
+		IPAERR("Failed to add memory to IPA pipe pool\n");
+		goto err_pool_add;
+	}
+
+	ipa_ctx->pipe_mem_pool = pool;
+	return 0;
+
+err_pool_add:
+	gen_pool_destroy(pool);
+fail:
+	return -ENOMEM;
+}
+
+/**
+ * ipa_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+	u32 vaddr;
+	int res = -1;
+
+	if (!ipa_ctx->pipe_mem_pool || !size) {
+		IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+				ipa_ctx->pipe_mem_pool);
+		return res;
+	}
+
+	vaddr = gen_pool_alloc(ipa_ctx->pipe_mem_pool, size);
+
+	if (vaddr) {
+		*ofst = vaddr;
+		res = 0;
+		IPADBG("size=%u ofst=%u\n", size, vaddr);
+	} else {
+		IPAERR("size=%u failed\n", size);
+	}
+
+	return res;
+}
+
+/**
+ * ipa_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa_pipe_mem_free(u32 ofst, u32 size)
+{
+	IPADBG("size=%u ofst=%u\n", size, ofst);
+	if (ipa_ctx->pipe_mem_pool && size)
+		gen_pool_free(ipa_ctx->pipe_mem_pool, ofst, size);
+	return 0;
+}
+
+/**
+ * ipa2_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns:	0 on success
+ */
+int ipa2_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	u32 reg_val;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, (mode & 0x1) |
+			(reg_val & 0xfffffffe));
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa2_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa2_set_qcncm_ndp_sig(char sig[3])
+{
+	u32 reg_val;
+
+	if (sig == NULL) {
+		IPAERR("bad argument for ipa_set_qcncm_ndp_sig/n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_QCNCM_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_QCNCM_OFST, sig[0] << 20 |
+			(sig[1] << 12) | (sig[2] << 4) |
+			(reg_val & 0xf000000f));
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa2_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable:	[in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa2_set_single_ndp_per_mbim(bool enable)
+{
+	u32 reg_val;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_SINGLE_NDP_MODE_OFST,
+			(enable & 0x1) | (reg_val & 0xfffffffe));
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa_set_hw_timer_fix_for_mbim_aggr() - Enable/disable HW timer fix
+ * for MBIM aggregation.
+ * @enable:	[in] true for enable HW fix; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa_set_hw_timer_fix_for_mbim_aggr(bool enable)
+{
+	u32 reg_val;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	reg_val = ipa_read_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST);
+	ipa_write_reg(ipa_ctx->mmio, IPA_AGGREGATION_SPARE_REG_1_OFST,
+		(enable << IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_SHFT) |
+		(reg_val & ~IPA_AGGREGATION_HW_TIMER_FIX_MBIM_AGGR_BMSK));
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return 0;
+}
+EXPORT_SYMBOL(ipa_set_hw_timer_fix_for_mbim_aggr);
+
+/**
+ * ipa_straddle_boundary() - Checks whether a memory buffer straddles a boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+	u32 next_start;
+	u32 prev_end;
+
+	IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+	next_start = (start + (boundary - 1)) & ~(boundary - 1);
+	prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+	while (next_start < prev_end)
+		next_start += boundary;
+
+	if (next_start == prev_end)
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * ipa2_bam_reg_dump() - Dump selected BAM registers for IPA and DMA-BAM
+ *
+ * Function is rate limited to avoid flooding kernel log buffer
+ */
+void ipa2_bam_reg_dump(void)
+{
+	static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
+
+	if (__ratelimit(&_rs)) {
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		pr_err("IPA BAM START\n");
+		if (ipa_ctx->ipa_hw_type < IPA_HW_v2_0) {
+			sps_get_bam_debug_info(ipa_ctx->bam_handle, 5,
+			511950, 0, 0);
+			sps_get_bam_debug_info(ipa_ctx->bam_handle, 93, 0,
+			0, 0);
+		} else {
+			sps_get_bam_debug_info(ipa_ctx->bam_handle, 93,
+			(SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_CONS))
+			|
+			SPS_BAM_PIPE(ipa_get_ep_mapping(IPA_CLIENT_USB_PROD))),
+			0, 2);
+		}
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	}
+}
+
+static void ipa_init_mem_partition_v2(void)
+{
+	IPADBG("Memory partition IPA 2\n");
+	IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
+	IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
+	IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
+		IPA_MEM_PART(nat_size));
+
+	IPA_MEM_PART(ofst_start) = IPA_MEM_v2_RAM_OFST_START;
+	IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+	IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_RAM_V4_FLT_OFST;
+	IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_RAM_V4_FLT_SIZE;
+	IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
+	IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
+		IPA_MEM_PART(v4_flt_size_ddr));
+
+	IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_RAM_V6_FLT_OFST;
+	IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_RAM_V6_FLT_SIZE;
+	IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
+	IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
+		IPA_MEM_PART(v6_flt_size_ddr));
+
+	IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_RAM_V4_RT_OFST;
+	IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
+
+	IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_RAM_V4_NUM_INDEX;
+	IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
+
+	IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_V4_MODEM_RT_INDEX_LO;
+	IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_V4_MODEM_RT_INDEX_HI;
+	IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_modem_rt_index_lo),
+		IPA_MEM_PART(v4_modem_rt_index_hi));
+
+	IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_V4_APPS_RT_INDEX_LO;
+	IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_V4_APPS_RT_INDEX_HI;
+	IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_apps_rt_index_lo),
+		IPA_MEM_PART(v4_apps_rt_index_hi));
+
+	IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_RAM_V4_RT_SIZE;
+	IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
+	IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
+		IPA_MEM_PART(v4_rt_size_ddr));
+
+	IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_RAM_V6_RT_OFST;
+	IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
+
+	IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_RAM_V6_NUM_INDEX;
+	IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
+
+	IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_V6_MODEM_RT_INDEX_LO;
+	IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_V6_MODEM_RT_INDEX_HI;
+	IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_modem_rt_index_lo),
+		IPA_MEM_PART(v6_modem_rt_index_hi));
+
+	IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_V6_APPS_RT_INDEX_LO;
+	IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_V6_APPS_RT_INDEX_HI;
+	IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_apps_rt_index_lo),
+		IPA_MEM_PART(v6_apps_rt_index_hi));
+
+	IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_RAM_V6_RT_SIZE;
+	IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
+	IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
+		IPA_MEM_PART(v6_rt_size_ddr));
+
+	IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_RAM_MODEM_HDR_OFST;
+	IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_RAM_MODEM_HDR_SIZE;
+	IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+	IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_RAM_APPS_HDR_OFST;
+	IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_RAM_APPS_HDR_SIZE;
+	IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_RAM_HDR_SIZE_DDR;
+	IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+		IPA_MEM_PART(apps_hdr_size_ddr));
+
+	IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_RAM_MODEM_OFST;
+	IPA_MEM_PART(modem_size) = IPA_MEM_v2_RAM_MODEM_SIZE;
+	IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+		IPA_MEM_PART(modem_size));
+
+	IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_RAM_APPS_V4_FLT_OFST;
+	IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_RAM_APPS_V4_FLT_SIZE;
+	IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
+
+	IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_RAM_APPS_V6_FLT_OFST;
+	IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_RAM_APPS_V6_FLT_SIZE;
+	IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
+
+	IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_RAM_UC_INFO_OFST;
+	IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_RAM_UC_INFO_SIZE;
+	IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
+		IPA_MEM_PART(uc_info_size));
+
+	IPA_MEM_PART(end_ofst) = IPA_MEM_v2_RAM_END_OFST;
+	IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_RAM_APPS_V4_RT_OFST;
+	IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_RAM_APPS_V4_RT_SIZE;
+	IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_RAM_APPS_V6_RT_OFST;
+	IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_RAM_APPS_V6_RT_SIZE;
+}
+
+static void ipa_init_mem_partition_v2_5(void)
+{
+	IPADBG("Memory partition IPA 2.5\n");
+	IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
+	IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
+	IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
+		IPA_MEM_PART(nat_size));
+
+	IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_5_RAM_UC_INFO_OFST;
+	IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_5_RAM_UC_INFO_SIZE;
+	IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
+		IPA_MEM_PART(uc_info_size));
+
+	IPA_MEM_PART(ofst_start) = IPA_MEM_v2_5_RAM_OFST_START;
+	IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+	IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_5_RAM_V4_FLT_OFST;
+	IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_5_RAM_V4_FLT_SIZE;
+	IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
+	IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
+		IPA_MEM_PART(v4_flt_size_ddr));
+
+	IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_5_RAM_V6_FLT_OFST;
+	IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_5_RAM_V6_FLT_SIZE;
+	IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
+	IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
+		IPA_MEM_PART(v6_flt_size_ddr));
+
+	IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_5_RAM_V4_RT_OFST;
+	IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
+
+	IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_5_RAM_V4_NUM_INDEX;
+	IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
+
+	IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_LO;
+	IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_5_V4_MODEM_RT_INDEX_HI;
+	IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_modem_rt_index_lo),
+		IPA_MEM_PART(v4_modem_rt_index_hi));
+
+	IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_LO;
+	IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_5_V4_APPS_RT_INDEX_HI;
+	IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_apps_rt_index_lo),
+		IPA_MEM_PART(v4_apps_rt_index_hi));
+
+	IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_5_RAM_V4_RT_SIZE;
+	IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
+	IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
+		IPA_MEM_PART(v4_rt_size_ddr));
+
+	IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_5_RAM_V6_RT_OFST;
+	IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
+
+	IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_5_RAM_V6_NUM_INDEX;
+	IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
+
+	IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_LO;
+	IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_5_V6_MODEM_RT_INDEX_HI;
+	IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_modem_rt_index_lo),
+		IPA_MEM_PART(v6_modem_rt_index_hi));
+
+	IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_LO;
+	IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_5_V6_APPS_RT_INDEX_HI;
+	IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_apps_rt_index_lo),
+		IPA_MEM_PART(v6_apps_rt_index_hi));
+
+	IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_5_RAM_V6_RT_SIZE;
+	IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
+	IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
+		IPA_MEM_PART(v6_rt_size_ddr));
+
+	IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_5_RAM_MODEM_HDR_OFST;
+	IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_5_RAM_MODEM_HDR_SIZE;
+	IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+	IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_5_RAM_APPS_HDR_OFST;
+	IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_5_RAM_APPS_HDR_SIZE;
+	IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_5_RAM_HDR_SIZE_DDR;
+	IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+		IPA_MEM_PART(apps_hdr_size_ddr));
+
+	IPA_MEM_PART(modem_hdr_proc_ctx_ofst) =
+		IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_OFST;
+	IPA_MEM_PART(modem_hdr_proc_ctx_size) =
+		IPA_MEM_v2_5_RAM_MODEM_HDR_PROC_CTX_SIZE;
+	IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
+		IPA_MEM_PART(modem_hdr_proc_ctx_size));
+
+	IPA_MEM_PART(apps_hdr_proc_ctx_ofst) =
+		IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_OFST;
+	IPA_MEM_PART(apps_hdr_proc_ctx_size) =
+		IPA_MEM_v2_5_RAM_APPS_HDR_PROC_CTX_SIZE;
+	IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr) =
+		IPA_MEM_RAM_HDR_PROC_CTX_SIZE_DDR;
+	IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
+		IPA_MEM_PART(apps_hdr_proc_ctx_size),
+		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
+
+	IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_5_RAM_MODEM_OFST;
+	IPA_MEM_PART(modem_size) = IPA_MEM_v2_5_RAM_MODEM_SIZE;
+	IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+		IPA_MEM_PART(modem_size));
+
+	IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_OFST;
+	IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_5_RAM_APPS_V4_FLT_SIZE;
+	IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
+
+	IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_OFST;
+	IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_5_RAM_APPS_V6_FLT_SIZE;
+	IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
+
+	IPA_MEM_PART(end_ofst) = IPA_MEM_v2_5_RAM_END_OFST;
+	IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V4_RT_OFST;
+	IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_5_RAM_APPS_V4_RT_SIZE;
+	IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_5_RAM_APPS_V6_RT_OFST;
+	IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_5_RAM_APPS_V6_RT_SIZE;
+}
+
+static void ipa_init_mem_partition_v2_6L(void)
+{
+	IPADBG("Memory partition IPA 2.6Lite\n");
+	IPA_MEM_PART(nat_ofst) = IPA_RAM_NAT_OFST;
+	IPA_MEM_PART(nat_size) = IPA_RAM_NAT_SIZE;
+	IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
+		IPA_MEM_PART(nat_size));
+
+	IPA_MEM_PART(uc_info_ofst) = IPA_MEM_v2_6L_RAM_UC_INFO_OFST;
+	IPA_MEM_PART(uc_info_size) = IPA_MEM_v2_6L_RAM_UC_INFO_SIZE;
+	IPADBG("V6 UC INFO OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(uc_info_ofst),
+		IPA_MEM_PART(uc_info_size));
+
+	IPA_MEM_PART(ofst_start) = IPA_MEM_v2_6L_RAM_OFST_START;
+	IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+	IPA_MEM_PART(v4_flt_ofst) = IPA_MEM_v2_6L_RAM_V4_FLT_OFST;
+	IPA_MEM_PART(v4_flt_size) = IPA_MEM_v2_6L_RAM_V4_FLT_SIZE;
+	IPA_MEM_PART(v4_flt_size_ddr) = IPA_MEM_RAM_V4_FLT_SIZE_DDR;
+	IPADBG("V4 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_flt_ofst), IPA_MEM_PART(v4_flt_size),
+		IPA_MEM_PART(v4_flt_size_ddr));
+
+	IPA_MEM_PART(v6_flt_ofst) = IPA_MEM_v2_6L_RAM_V6_FLT_OFST;
+	IPA_MEM_PART(v6_flt_size) = IPA_MEM_v2_6L_RAM_V6_FLT_SIZE;
+	IPA_MEM_PART(v6_flt_size_ddr) = IPA_MEM_RAM_V6_FLT_SIZE_DDR;
+	IPADBG("V6 FLT OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_flt_ofst), IPA_MEM_PART(v6_flt_size),
+		IPA_MEM_PART(v6_flt_size_ddr));
+
+	IPA_MEM_PART(v4_rt_ofst) = IPA_MEM_v2_6L_RAM_V4_RT_OFST;
+	IPADBG("V4 RT OFST 0x%x\n", IPA_MEM_PART(v4_rt_ofst));
+
+	IPA_MEM_PART(v4_num_index) = IPA_MEM_v2_6L_RAM_V4_NUM_INDEX;
+	IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_num_index));
+
+	IPA_MEM_PART(v4_modem_rt_index_lo) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_LO;
+	IPA_MEM_PART(v4_modem_rt_index_hi) = IPA_MEM_v2_6L_V4_MODEM_RT_INDEX_HI;
+	IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_modem_rt_index_lo),
+		IPA_MEM_PART(v4_modem_rt_index_hi));
+
+	IPA_MEM_PART(v4_apps_rt_index_lo) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_LO;
+	IPA_MEM_PART(v4_apps_rt_index_hi) = IPA_MEM_v2_6L_V4_APPS_RT_INDEX_HI;
+	IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_apps_rt_index_lo),
+		IPA_MEM_PART(v4_apps_rt_index_hi));
+
+	IPA_MEM_PART(v4_rt_size) = IPA_MEM_v2_6L_RAM_V4_RT_SIZE;
+	IPA_MEM_PART(v4_rt_size_ddr) = IPA_MEM_RAM_V4_RT_SIZE_DDR;
+	IPADBG("V4 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v4_rt_size),
+		IPA_MEM_PART(v4_rt_size_ddr));
+
+	IPA_MEM_PART(v6_rt_ofst) = IPA_MEM_v2_6L_RAM_V6_RT_OFST;
+	IPADBG("V6 RT OFST 0x%x\n", IPA_MEM_PART(v6_rt_ofst));
+
+	IPA_MEM_PART(v6_num_index) = IPA_MEM_v2_6L_RAM_V6_NUM_INDEX;
+	IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_num_index));
+
+	IPA_MEM_PART(v6_modem_rt_index_lo) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_LO;
+	IPA_MEM_PART(v6_modem_rt_index_hi) = IPA_MEM_v2_6L_V6_MODEM_RT_INDEX_HI;
+	IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_modem_rt_index_lo),
+		IPA_MEM_PART(v6_modem_rt_index_hi));
+
+	IPA_MEM_PART(v6_apps_rt_index_lo) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_LO;
+	IPA_MEM_PART(v6_apps_rt_index_hi) = IPA_MEM_v2_6L_V6_APPS_RT_INDEX_HI;
+	IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_apps_rt_index_lo),
+		IPA_MEM_PART(v6_apps_rt_index_hi));
+
+	IPA_MEM_PART(v6_rt_size) = IPA_MEM_v2_6L_RAM_V6_RT_SIZE;
+	IPA_MEM_PART(v6_rt_size_ddr) = IPA_MEM_RAM_V6_RT_SIZE_DDR;
+	IPADBG("V6 RT SIZE 0x%x DDR SIZE 0x%x\n", IPA_MEM_PART(v6_rt_size),
+		IPA_MEM_PART(v6_rt_size_ddr));
+
+	IPA_MEM_PART(modem_hdr_ofst) = IPA_MEM_v2_6L_RAM_MODEM_HDR_OFST;
+	IPA_MEM_PART(modem_hdr_size) = IPA_MEM_v2_6L_RAM_MODEM_HDR_SIZE;
+	IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+	IPA_MEM_PART(apps_hdr_ofst) = IPA_MEM_v2_6L_RAM_APPS_HDR_OFST;
+	IPA_MEM_PART(apps_hdr_size) = IPA_MEM_v2_6L_RAM_APPS_HDR_SIZE;
+	IPA_MEM_PART(apps_hdr_size_ddr) = IPA_MEM_v2_6L_RAM_HDR_SIZE_DDR;
+	IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+		IPA_MEM_PART(apps_hdr_size_ddr));
+
+	IPA_MEM_PART(modem_comp_decomp_ofst) =
+		IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_OFST;
+	IPA_MEM_PART(modem_comp_decomp_size) =
+		IPA_MEM_v2_6L_RAM_MODEM_COMP_DECOMP_SIZE;
+	IPADBG("MODEM COMP DECOMP OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_comp_decomp_ofst),
+		IPA_MEM_PART(modem_comp_decomp_size));
+
+	IPA_MEM_PART(modem_ofst) = IPA_MEM_v2_6L_RAM_MODEM_OFST;
+	IPA_MEM_PART(modem_size) = IPA_MEM_v2_6L_RAM_MODEM_SIZE;
+	IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+		IPA_MEM_PART(modem_size));
+
+	IPA_MEM_PART(apps_v4_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_OFST;
+	IPA_MEM_PART(apps_v4_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_FLT_SIZE;
+	IPADBG("V4 APPS FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_flt_ofst), IPA_MEM_PART(apps_v4_flt_size));
+
+	IPA_MEM_PART(apps_v6_flt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_OFST;
+	IPA_MEM_PART(apps_v6_flt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_FLT_SIZE;
+	IPADBG("V6 APPS FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_flt_ofst), IPA_MEM_PART(apps_v6_flt_size));
+
+	IPA_MEM_PART(end_ofst) = IPA_MEM_v2_6L_RAM_END_OFST;
+	IPA_MEM_PART(apps_v4_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_OFST;
+	IPA_MEM_PART(apps_v4_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V4_RT_SIZE;
+	IPA_MEM_PART(apps_v6_rt_ofst) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_OFST;
+	IPA_MEM_PART(apps_v6_rt_size) = IPA_MEM_v2_6L_RAM_APPS_V6_RT_SIZE;
+}
+
+/**
+ * ipa_controller_shared_static_bind() - set the appropriate shared methods for
+ * for IPA HW version 2.0, 2.5, 2.6 and 2.6L
+ *
+ *  @ctrl: data structure which holds the function pointers
+ */
+void ipa_controller_shared_static_bind(struct ipa_controller *ctrl)
+{
+	ctrl->ipa_init_rt4 = _ipa_init_rt4_v2;
+	ctrl->ipa_init_rt6 = _ipa_init_rt6_v2;
+	ctrl->ipa_init_flt4 = _ipa_init_flt4_v2;
+	ctrl->ipa_init_flt6 = _ipa_init_flt6_v2;
+	ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v2_0;
+	ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v2_0;
+	ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v2_0;
+	ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v2_0;
+	ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v2_0;
+	ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v2_0;
+	ctrl->ipa_cfg_route = _ipa_cfg_route_v2_0;
+	ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v2_0;
+	ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v2_0;
+	ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v2_0;
+	ctrl->ipa_clk_rate_turbo = IPA_V2_0_CLK_RATE_TURBO;
+	ctrl->ipa_clk_rate_nominal = IPA_V2_0_CLK_RATE_NOMINAL;
+	ctrl->ipa_clk_rate_svs = IPA_V2_0_CLK_RATE_SVS;
+	ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v2_0;
+	ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v2_0;
+	ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v2_0;
+	ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v2_0;
+	ctrl->ipa_commit_flt = __ipa_commit_flt_v2;
+	ctrl->ipa_commit_rt = __ipa_commit_rt_v2;
+	ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
+	ctrl->ipa_enable_clks = _ipa_enable_clks_v2_0;
+	ctrl->ipa_disable_clks = _ipa_disable_clks_v2_0;
+	ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v2_0;
+	ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v2_0;
+	ctrl->clock_scaling_bw_threshold_nominal =
+		IPA_V2_0_BW_THRESHOLD_NOMINAL_MBPS;
+	ctrl->clock_scaling_bw_threshold_turbo =
+		IPA_V2_0_BW_THRESHOLD_TURBO_MBPS;
+}
+
+/**
+ * ipa_ctrl_static_bind() - set the appropriate methods for
+ *  IPA Driver based on the HW version
+ *
+ *  @ctrl: data structure which holds the function pointers
+ *  @hw_type: the HW type in use
+ *
+ *  This function can avoid the runtime assignment by using C99 special
+ *  struct initialization - hard decision... time.vs.mem
+ */
+int ipa_controller_static_bind(struct ipa_controller *ctrl,
+		enum ipa_hw_type hw_type)
+{
+	switch (hw_type) {
+	case (IPA_HW_v1_1):
+		ipa_init_mem_partition_v2();
+		ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v1_1;
+		ctrl->ipa_cfg_ep_hdr = _ipa_cfg_ep_hdr_v1_1;
+		ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v1_1;
+		ctrl->ipa_cfg_ep_aggr = _ipa_cfg_ep_aggr_v1_1;
+		ctrl->ipa_cfg_ep_deaggr = _ipa_cfg_ep_deaggr_v1_1;
+		ctrl->ipa_cfg_ep_nat = _ipa_cfg_ep_nat_v1_1;
+		ctrl->ipa_cfg_ep_mode = _ipa_cfg_ep_mode_v1_1;
+		ctrl->ipa_cfg_ep_route = _ipa_cfg_ep_route_v1_1;
+		ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v1_1;
+		ctrl->ipa_cfg_route = _ipa_cfg_route_v1_1;
+		ctrl->ipa_cfg_ep_status = _ipa_cfg_ep_status_v1_1;
+		ctrl->ipa_cfg_ep_cfg = _ipa_cfg_ep_cfg_v1_1;
+		ctrl->ipa_cfg_ep_metadata_mask = _ipa_cfg_ep_metadata_mask_v1_1;
+		ctrl->ipa_clk_rate_turbo = IPA_V1_1_CLK_RATE;
+		ctrl->ipa_clk_rate_nominal = IPA_V1_1_CLK_RATE;
+		ctrl->ipa_clk_rate_svs = IPA_V1_1_CLK_RATE;
+		ctrl->ipa_read_gen_reg = _ipa_read_gen_reg_v1_1;
+		ctrl->ipa_read_ep_reg = _ipa_read_ep_reg_v1_1;
+		ctrl->ipa_write_dbg_cnt = _ipa_write_dbg_cnt_v1_1;
+		ctrl->ipa_read_dbg_cnt = _ipa_read_dbg_cnt_v1_1;
+		ctrl->ipa_commit_flt = __ipa_commit_flt_v1_1;
+		ctrl->ipa_commit_rt = __ipa_commit_rt_v1_1;
+		ctrl->ipa_commit_hdr = __ipa_commit_hdr_v1_1;
+		ctrl->ipa_enable_clks = _ipa_enable_clks_v1_1;
+		ctrl->ipa_disable_clks = _ipa_disable_clks_v1_1;
+		ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v1_1;
+		ctrl->ipa_cfg_ep_metadata = _ipa_cfg_ep_metadata_v1_1;
+		ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
+		ctrl->max_holb_tmr_val = IPA_V1_MAX_HOLB_TMR_VAL;
+		break;
+	case (IPA_HW_v2_0):
+		ipa_init_mem_partition_v2();
+		ipa_controller_shared_static_bind(ctrl);
+		ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_0;
+		ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_0;
+		ctrl->max_holb_tmr_val = IPA_V2_0_MAX_HOLB_TMR_VAL;
+		ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_0;
+		ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_0;
+		ctrl->ipa_init_sram = _ipa_init_sram_v2;
+		ctrl->ipa_init_hdr = _ipa_init_hdr_v2;
+		ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2;
+		ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2;
+		break;
+	case (IPA_HW_v2_5):
+		ipa_init_mem_partition_v2_5();
+		ipa_controller_shared_static_bind(ctrl);
+		ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_5;
+		ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_5;
+		ctrl->max_holb_tmr_val = IPA_V2_5_MAX_HOLB_TMR_VAL;
+		ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_5;
+		ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_5;
+		ctrl->ipa_init_sram = _ipa_init_sram_v2_5;
+		ctrl->ipa_init_hdr = _ipa_init_hdr_v2_5;
+		ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_5;
+		ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_5;
+		break;
+	case (IPA_HW_v2_6L):
+		ipa_init_mem_partition_v2_6L();
+		ipa_controller_shared_static_bind(ctrl);
+		ctrl->ipa_cfg_ep_holb = _ipa_cfg_ep_holb_v2_6L;
+		ctrl->ipa_reg_base_ofst = IPA_REG_BASE_OFST_v2_6L;
+		ctrl->max_holb_tmr_val = IPA_V2_6L_MAX_HOLB_TMR_VAL;
+		ctrl->ipa_cfg_ep_hdr_ext = _ipa_cfg_ep_hdr_ext_v2_6L;
+		ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v2_6L;
+		ctrl->ipa_init_sram = _ipa_init_sram_v2_6L;
+		ctrl->ipa_init_hdr = _ipa_init_hdr_v2_6L;
+		ctrl->ipa_commit_hdr = __ipa_commit_hdr_v2_6L;
+		ctrl->ipa_generate_rt_hw_rule = __ipa_generate_rt_hw_rule_v2_6L;
+		break;
+	default:
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+void ipa_skb_recycle(struct sk_buff *skb)
+{
+	struct skb_shared_info *shinfo;
+
+	shinfo = skb_shinfo(skb);
+	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+	atomic_set(&shinfo->dataref, 1);
+
+	memset(skb, 0, offsetof(struct sk_buff, tail));
+	skb->data = skb->head + NET_SKB_PAD;
+	skb_reset_tail_pointer(skb);
+}
+
+int ipa_id_alloc(void *ptr)
+{
+	int id;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&ipa_ctx->idr_lock);
+	id = idr_alloc(&ipa_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
+	spin_unlock(&ipa_ctx->idr_lock);
+	idr_preload_end();
+
+	return id;
+}
+
+void *ipa_id_find(u32 id)
+{
+	void *ptr;
+
+	spin_lock(&ipa_ctx->idr_lock);
+	ptr = idr_find(&ipa_ctx->ipa_idr, id);
+	spin_unlock(&ipa_ctx->idr_lock);
+
+	return ptr;
+}
+
+void ipa_id_remove(u32 id)
+{
+	spin_lock(&ipa_ctx->idr_lock);
+	idr_remove(&ipa_ctx->ipa_idr, id);
+	spin_unlock(&ipa_ctx->idr_lock);
+}
+
+static void ipa_tag_free_buf(void *user1, int user2)
+{
+	kfree(user1);
+}
+
+static void ipa_tag_free_skb(void *user1, int user2)
+{
+	dev_kfree_skb_any((struct sk_buff *)user1);
+}
+
+#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
+
+/* ipa_tag_process() - Initiates a tag process. Incorporates the input
+ * descriptors
+ *
+ * @desc:	descriptors with commands for IC
+ * @desc_size:	amount of descriptors in the above variable
+ *
+ * Note: The descriptors are copied (if there's room), the client needs to
+ * free his descriptors afterwards
+ *
+ * Return: 0 or negative in case of failure
+ */
+int ipa_tag_process(struct ipa_desc desc[],
+	int descs_num,
+	unsigned long timeout)
+{
+	struct ipa_sys_context *sys;
+	struct ipa_desc *tag_desc;
+	int desc_idx = 0;
+	struct ipa_ip_packet_init *pkt_init;
+	struct ipa_register_write *reg_write_nop;
+	struct ipa_ip_packet_tag_status *status;
+	int i;
+	struct sk_buff *dummy_skb;
+	int res;
+	struct ipa_tag_completion *comp;
+	int ep_idx;
+
+	/* Not enough room for the required descriptors for the tag process */
+	if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
+		IPAERR("up to %d descriptors are allowed (received %d)\n",
+		       IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
+		       descs_num);
+		return -ENOMEM;
+	}
+
+	ep_idx = ipa2_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+	sys = ipa_ctx->ep[ep_idx].sys;
+
+	tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
+	if (!tag_desc) {
+		IPAERR("failed to allocate memory\n");
+		res = -ENOMEM;
+		goto fail_alloc_desc;
+	}
+
+	/* IP_PACKET_INIT IC for tag status to be sent to apps */
+	pkt_init = kzalloc(sizeof(*pkt_init), GFP_KERNEL);
+	if (!pkt_init) {
+		IPAERR("failed to allocate memory\n");
+		res = -ENOMEM;
+		goto fail_alloc_pkt_init;
+	}
+
+	pkt_init->destination_pipe_index =
+		ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+
+	tag_desc[desc_idx].opcode = IPA_IP_PACKET_INIT;
+	tag_desc[desc_idx].pyld = pkt_init;
+	tag_desc[desc_idx].len = sizeof(*pkt_init);
+	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+	tag_desc[desc_idx].callback = ipa_tag_free_buf;
+	tag_desc[desc_idx].user1 = pkt_init;
+	desc_idx++;
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	reg_write_nop = kzalloc(sizeof(*reg_write_nop), GFP_KERNEL);
+	if (!reg_write_nop) {
+		IPAERR("no mem\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+
+	reg_write_nop->skip_pipeline_clear = 0;
+	reg_write_nop->value_mask = 0x0;
+
+	tag_desc[desc_idx].opcode = IPA_REGISTER_WRITE;
+	tag_desc[desc_idx].pyld = reg_write_nop;
+	tag_desc[desc_idx].len = sizeof(*reg_write_nop);
+	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+	tag_desc[desc_idx].callback = ipa_tag_free_buf;
+	tag_desc[desc_idx].user1 = reg_write_nop;
+	desc_idx++;
+
+	/* status IC */
+	status = kzalloc(sizeof(*status), GFP_KERNEL);
+	if (!status) {
+		IPAERR("no mem\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+
+	status->tag_f_2 = IPA_COOKIE;
+
+	tag_desc[desc_idx].opcode = IPA_IP_PACKET_TAG_STATUS;
+	tag_desc[desc_idx].pyld = status;
+	tag_desc[desc_idx].len = sizeof(*status);
+	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+	tag_desc[desc_idx].callback = ipa_tag_free_buf;
+	tag_desc[desc_idx].user1 = status;
+	desc_idx++;
+
+	/* Copy the required descriptors from the client now */
+	if (desc) {
+		memcpy(&(tag_desc[desc_idx]), desc, descs_num *
+			sizeof(struct ipa_desc));
+		desc_idx += descs_num;
+	}
+
+	comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+	if (!comp) {
+		IPAERR("no mem\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	init_completion(&comp->comp);
+
+	/* completion needs to be released from both here and rx handler */
+	atomic_set(&comp->cnt, 2);
+
+	/* dummy packet to send to IPA. packet payload is a completion object */
+	dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
+	if (!dummy_skb) {
+		IPAERR("failed to allocate memory\n");
+		res = -ENOMEM;
+		goto fail_free_skb;
+	}
+
+	memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
+
+	tag_desc[desc_idx].pyld = dummy_skb->data;
+	tag_desc[desc_idx].len = dummy_skb->len;
+	tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
+	tag_desc[desc_idx].callback = ipa_tag_free_skb;
+	tag_desc[desc_idx].user1 = dummy_skb;
+	desc_idx++;
+
+	/* send all descriptors to IPA with single EOT */
+	res = ipa_send(sys, desc_idx, tag_desc, true);
+	if (res) {
+		IPAERR("failed to send TAG packets %d\n", res);
+		res = -ENOMEM;
+		goto fail_send;
+	}
+	kfree(tag_desc);
+	tag_desc = NULL;
+
+	IPADBG("waiting for TAG response\n");
+	res = wait_for_completion_timeout(&comp->comp, timeout);
+	if (res == 0) {
+		IPAERR("timeout (%lu msec) on waiting for TAG response\n",
+			timeout);
+		WARN_ON(1);
+		if (atomic_dec_return(&comp->cnt) == 0)
+			kfree(comp);
+		return -ETIME;
+	}
+
+	IPADBG("TAG response arrived!\n");
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+
+	/* sleep for short period to ensure IPA wrote all packets to BAM */
+	usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+	return 0;
+
+fail_send:
+	dev_kfree_skb_any(dummy_skb);
+	desc_idx--;
+fail_free_skb:
+	kfree(comp);
+fail_free_desc:
+	/*
+	 * Free only the first descriptors allocated here.
+	 * [pkt_init, status, nop]
+	 * The user is responsible to free his allocations
+	 * in case of failure.
+	 * The min is required because we may fail during
+	 * of the initial allocations above
+	 */
+	for (i = 0; i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS-1, desc_idx); i++)
+		kfree(tag_desc[i].user1);
+
+fail_alloc_pkt_init:
+	kfree(tag_desc);
+fail_alloc_desc:
+	return res;
+}
+
+/**
+ * ipa_tag_generate_force_close_desc() - generate descriptors for force close
+ *					 immediate command
+ *
+ * @desc: descriptors for IC
+ * @desc_size: desc array size
+ * @start_pipe: first pipe to close aggregation
+ * @end_pipe: last (non-inclusive) pipe to close aggregation
+ *
+ * Return: number of descriptors written or negative in case of failure
+ */
+static int ipa_tag_generate_force_close_desc(struct ipa_desc desc[],
+	int desc_size, int start_pipe, int end_pipe)
+{
+	int i;
+	u32 aggr_init;
+	int desc_idx = 0;
+	int res;
+	struct ipa_register_write *reg_write_agg_close;
+
+	for (i = start_pipe; i < end_pipe; i++) {
+		aggr_init = ipa_read_reg(ipa_ctx->mmio,
+			IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i));
+		if (((aggr_init & IPA_ENDP_INIT_AGGR_N_AGGR_EN_BMSK) >>
+			IPA_ENDP_INIT_AGGR_N_AGGR_EN_SHFT) != IPA_ENABLE_AGGR)
+			continue;
+		IPADBG("Force close ep: %d\n", i);
+		if (desc_idx + 1 > desc_size) {
+			IPAERR("Internal error - no descriptors\n");
+			res = -EFAULT;
+			goto fail_no_desc;
+		}
+
+		reg_write_agg_close = kzalloc(sizeof(*reg_write_agg_close),
+			GFP_KERNEL);
+		if (!reg_write_agg_close) {
+			IPAERR("no mem\n");
+			res = -ENOMEM;
+			goto fail_alloc_reg_write_agg_close;
+		}
+
+		reg_write_agg_close->skip_pipeline_clear = 0;
+		reg_write_agg_close->offset = IPA_ENDP_INIT_AGGR_N_OFST_v2_0(i);
+		reg_write_agg_close->value =
+			(1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
+			IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+		reg_write_agg_close->value_mask =
+			IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
+			IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+
+		desc[desc_idx].opcode = IPA_REGISTER_WRITE;
+		desc[desc_idx].pyld = reg_write_agg_close;
+		desc[desc_idx].len = sizeof(*reg_write_agg_close);
+		desc[desc_idx].type = IPA_IMM_CMD_DESC;
+		desc[desc_idx].callback = ipa_tag_free_buf;
+		desc[desc_idx].user1 = reg_write_agg_close;
+		desc_idx++;
+	}
+
+	return desc_idx;
+
+fail_alloc_reg_write_agg_close:
+	for (i = 0; i < desc_idx; i++)
+		kfree(desc[desc_idx].user1);
+fail_no_desc:
+	return res;
+}
+
+/**
+ * ipa_tag_aggr_force_close() - Force close aggregation
+ *
+ * @pipe_num: pipe number or -1 for all pipes
+ */
+int ipa_tag_aggr_force_close(int pipe_num)
+{
+	struct ipa_desc *desc;
+	int res = -1;
+	int start_pipe;
+	int end_pipe;
+	int num_descs;
+	int num_aggr_descs;
+
+	if (pipe_num < -1 || pipe_num >= (int)ipa_ctx->ipa_num_pipes) {
+		IPAERR("Invalid pipe number %d\n", pipe_num);
+		return -EINVAL;
+	}
+
+	if (pipe_num == -1) {
+		start_pipe = 0;
+		end_pipe = ipa_ctx->ipa_num_pipes;
+	} else {
+		start_pipe = pipe_num;
+		end_pipe = pipe_num + 1;
+	}
+
+	num_descs = end_pipe - start_pipe;
+
+	desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+
+	/* Force close aggregation on all valid pipes with aggregation */
+	num_aggr_descs = ipa_tag_generate_force_close_desc(desc, num_descs,
+						start_pipe, end_pipe);
+	if (num_aggr_descs < 0) {
+		IPAERR("ipa_tag_generate_force_close_desc failed %d\n",
+			num_aggr_descs);
+		goto fail_free_desc;
+	}
+
+	res = ipa_tag_process(desc, num_aggr_descs,
+			      IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
+
+fail_free_desc:
+	kfree(desc);
+
+	return res;
+}
+
+/**
+ * ipa2_is_ready() - check if IPA module was initialized
+ * successfully
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa2_is_ready(void)
+{
+	return (ipa_ctx != NULL) ? true : false;
+}
+
+/**
+ * ipa2_is_client_handle_valid() - check if IPA client handle is valid handle
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa2_is_client_handle_valid(u32 clnt_hdl)
+{
+	if (unlikely(!ipa_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return false;
+	}
+
+	if (clnt_hdl >= 0 && clnt_hdl < ipa_ctx->ipa_num_pipes)
+		return true;
+	return false;
+}
+
+/**
+ * ipa2_proxy_clk_unvote() - called to remove IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa2_proxy_clk_unvote(void)
+{
+	if (ipa2_is_ready() && ipa_ctx->q6_proxy_clk_vote_valid) {
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
+		ipa_ctx->q6_proxy_clk_vote_valid = false;
+	}
+}
+
+/**
+ * ipa2_proxy_clk_vote() - called to add IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa2_proxy_clk_vote(void)
+{
+	if (ipa2_is_ready() && !ipa_ctx->q6_proxy_clk_vote_valid) {
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
+		ipa_ctx->q6_proxy_clk_vote_valid = true;
+	}
+}
+
+
+/**
+ * ipa2_get_smem_restr_bytes()- Return IPA smem restricted bytes
+ *
+ * Return value: u16 - number of IPA smem restricted bytes
+ */
+u16 ipa2_get_smem_restr_bytes(void)
+{
+	if (ipa_ctx)
+		return ipa_ctx->smem_restricted_bytes;
+
+	IPAERR("IPA Driver not initialized\n");
+
+	return 0;
+}
+
+/**
+ * ipa2_get_modem_cfg_emb_pipe_flt()- Return ipa_ctx->modem_cfg_emb_pipe_flt
+ *
+ * Return value: true if modem configures embedded pipe flt, false otherwise
+ */
+bool ipa2_get_modem_cfg_emb_pipe_flt(void)
+{
+	if (ipa_ctx)
+		return ipa_ctx->modem_cfg_emb_pipe_flt;
+
+	IPAERR("IPA driver has not been initialized\n");
+
+	return false;
+}
+/**
+ * ipa2_get_transport_type()- Return IPA_TRANSPORT_TYPE_SPS
+ *
+ * Return value: enum ipa_transport_type
+ */
+enum ipa_transport_type ipa2_get_transport_type(void)
+{
+	return IPA_TRANSPORT_TYPE_SPS;
+}
+
+u32 ipa_get_num_pipes(void)
+{
+	if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L)
+		return ipa_read_reg(ipa_ctx->mmio, IPA_ENABLED_PIPES_OFST);
+	else
+		return IPA_MAX_NUM_PIPES;
+}
+EXPORT_SYMBOL(ipa_get_num_pipes);
+
+/**
+ * ipa2_disable_apps_wan_cons_deaggr()-
+ * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
+ *
+ * Return value: 0 or negative in case of failure
+ */
+int ipa2_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
+{
+	int res = -1;
+
+	/* checking if IPA-HW can support */
+	if ((agg_size >> 10) >
+		IPA_AGGR_BYTE_LIMIT) {
+		IPAWANERR("IPA-AGG byte limit %d\n",
+		IPA_AGGR_BYTE_LIMIT);
+		IPAWANERR("exceed aggr_byte_limit\n");
+		return res;
+		}
+	if (agg_count >
+		IPA_AGGR_PKT_LIMIT) {
+		IPAWANERR("IPA-AGG pkt limit %d\n",
+		IPA_AGGR_PKT_LIMIT);
+		IPAWANERR("exceed aggr_pkt_limit\n");
+		return res;
+	}
+
+	if (ipa_ctx) {
+		ipa_ctx->ipa_client_apps_wan_cons_agg_gro = true;
+		return 0;
+	}
+	return res;
+}
+
+static struct ipa_gsi_ep_config *ipa2_get_gsi_ep_info(int ipa_ep_idx)
+{
+	IPAERR("Not supported for IPA 2.x\n");
+	return NULL;
+}
+
+static int ipa2_stop_gsi_channel(u32 clnt_hdl)
+{
+	IPAERR("Not supported for IPA 2.x\n");
+	return -EFAULT;
+}
+
+static void *ipa2_get_ipc_logbuf(void)
+{
+	/* no support for IPC logging in IPAv2 */
+	return NULL;
+}
+
+static void *ipa2_get_ipc_logbuf_low(void)
+{
+	/* no support for IPC logging in IPAv2 */
+	return NULL;
+}
+
+static void ipa2_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+	*holb = ipa_ctx->ep[ep_idx].holb;
+}
+
+static int ipa2_generate_tag_process(void)
+{
+	int res;
+
+	res = ipa_tag_process(NULL, 0, HZ);
+	if (res)
+		IPAERR("TAG process failed\n");
+
+	return res;
+}
+
+static void ipa2_set_tag_process_before_gating(bool val)
+{
+	ipa_ctx->tag_process_before_gating = val;
+}
+
+int ipa2_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+	struct ipa_api_controller *api_ctrl)
+{
+	if (ipa_hw_type < IPA_HW_v2_0 || ipa_hw_type >= IPA_HW_v3_0) {
+		IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	api_ctrl->ipa_connect = ipa2_connect;
+	api_ctrl->ipa_disconnect = ipa2_disconnect;
+	api_ctrl->ipa_reset_endpoint = ipa2_reset_endpoint;
+	api_ctrl->ipa_clear_endpoint_delay = ipa2_clear_endpoint_delay;
+	api_ctrl->ipa_disable_endpoint = ipa2_disable_endpoint;
+	api_ctrl->ipa_cfg_ep = ipa2_cfg_ep;
+	api_ctrl->ipa_cfg_ep_nat = ipa2_cfg_ep_nat;
+	api_ctrl->ipa_cfg_ep_hdr = ipa2_cfg_ep_hdr;
+	api_ctrl->ipa_cfg_ep_hdr_ext = ipa2_cfg_ep_hdr_ext;
+	api_ctrl->ipa_cfg_ep_mode = ipa2_cfg_ep_mode;
+	api_ctrl->ipa_cfg_ep_aggr = ipa2_cfg_ep_aggr;
+	api_ctrl->ipa_cfg_ep_deaggr = ipa2_cfg_ep_deaggr;
+	api_ctrl->ipa_cfg_ep_route = ipa2_cfg_ep_route;
+	api_ctrl->ipa_cfg_ep_holb = ipa2_cfg_ep_holb;
+	api_ctrl->ipa_get_holb = ipa2_get_holb;
+	api_ctrl->ipa_set_tag_process_before_gating =
+			ipa2_set_tag_process_before_gating;
+	api_ctrl->ipa_cfg_ep_cfg = ipa2_cfg_ep_cfg;
+	api_ctrl->ipa_cfg_ep_metadata_mask = ipa2_cfg_ep_metadata_mask;
+	api_ctrl->ipa_cfg_ep_holb_by_client = ipa2_cfg_ep_holb_by_client;
+	api_ctrl->ipa_cfg_ep_ctrl = ipa2_cfg_ep_ctrl;
+	api_ctrl->ipa_add_hdr = ipa2_add_hdr;
+	api_ctrl->ipa_del_hdr = ipa2_del_hdr;
+	api_ctrl->ipa_commit_hdr = ipa2_commit_hdr;
+	api_ctrl->ipa_reset_hdr = ipa2_reset_hdr;
+	api_ctrl->ipa_get_hdr = ipa2_get_hdr;
+	api_ctrl->ipa_put_hdr = ipa2_put_hdr;
+	api_ctrl->ipa_copy_hdr = ipa2_copy_hdr;
+	api_ctrl->ipa_add_hdr_proc_ctx = ipa2_add_hdr_proc_ctx;
+	api_ctrl->ipa_del_hdr_proc_ctx = ipa2_del_hdr_proc_ctx;
+	api_ctrl->ipa_add_rt_rule = ipa2_add_rt_rule;
+	api_ctrl->ipa_del_rt_rule = ipa2_del_rt_rule;
+	api_ctrl->ipa_commit_rt = ipa2_commit_rt;
+	api_ctrl->ipa_reset_rt = ipa2_reset_rt;
+	api_ctrl->ipa_get_rt_tbl = ipa2_get_rt_tbl;
+	api_ctrl->ipa_put_rt_tbl = ipa2_put_rt_tbl;
+	api_ctrl->ipa_query_rt_index = ipa2_query_rt_index;
+	api_ctrl->ipa_mdfy_rt_rule = ipa2_mdfy_rt_rule;
+	api_ctrl->ipa_add_flt_rule = ipa2_add_flt_rule;
+	api_ctrl->ipa_del_flt_rule = ipa2_del_flt_rule;
+	api_ctrl->ipa_mdfy_flt_rule = ipa2_mdfy_flt_rule;
+	api_ctrl->ipa_commit_flt = ipa2_commit_flt;
+	api_ctrl->ipa_reset_flt = ipa2_reset_flt;
+	api_ctrl->allocate_nat_device = ipa2_allocate_nat_device;
+	api_ctrl->ipa_nat_init_cmd = ipa2_nat_init_cmd;
+	api_ctrl->ipa_nat_dma_cmd = ipa2_nat_dma_cmd;
+	api_ctrl->ipa_nat_del_cmd = ipa2_nat_del_cmd;
+	api_ctrl->ipa_send_msg = ipa2_send_msg;
+	api_ctrl->ipa_register_pull_msg = ipa2_register_pull_msg;
+	api_ctrl->ipa_deregister_pull_msg = ipa2_deregister_pull_msg;
+	api_ctrl->ipa_register_intf = ipa2_register_intf;
+	api_ctrl->ipa_register_intf_ext = ipa2_register_intf_ext;
+	api_ctrl->ipa_deregister_intf = ipa2_deregister_intf;
+	api_ctrl->ipa_set_aggr_mode = ipa2_set_aggr_mode;
+	api_ctrl->ipa_set_qcncm_ndp_sig = ipa2_set_qcncm_ndp_sig;
+	api_ctrl->ipa_set_single_ndp_per_mbim = ipa2_set_single_ndp_per_mbim;
+	api_ctrl->ipa_tx_dp = ipa2_tx_dp;
+	api_ctrl->ipa_tx_dp_mul = ipa2_tx_dp_mul;
+	api_ctrl->ipa_free_skb = ipa2_free_skb;
+	api_ctrl->ipa_setup_sys_pipe = ipa2_setup_sys_pipe;
+	api_ctrl->ipa_teardown_sys_pipe = ipa2_teardown_sys_pipe;
+	api_ctrl->ipa_sys_update_gsi_hdls = ipa2_sys_update_gsi_hdls;
+	api_ctrl->ipa_sys_setup = ipa2_sys_setup;
+	api_ctrl->ipa_sys_teardown = ipa2_sys_teardown;
+	api_ctrl->ipa_connect_wdi_pipe = ipa2_connect_wdi_pipe;
+	api_ctrl->ipa_disconnect_wdi_pipe = ipa2_disconnect_wdi_pipe;
+	api_ctrl->ipa_enable_wdi_pipe = ipa2_enable_wdi_pipe;
+	api_ctrl->ipa_disable_wdi_pipe = ipa2_disable_wdi_pipe;
+	api_ctrl->ipa_resume_wdi_pipe = ipa2_resume_wdi_pipe;
+	api_ctrl->ipa_suspend_wdi_pipe = ipa2_suspend_wdi_pipe;
+	api_ctrl->ipa_get_wdi_stats = ipa2_get_wdi_stats;
+	api_ctrl->ipa_get_smem_restr_bytes = ipa2_get_smem_restr_bytes;
+	api_ctrl->ipa_uc_wdi_get_dbpa = ipa2_uc_wdi_get_dbpa;
+	api_ctrl->ipa_uc_reg_rdyCB = ipa2_uc_reg_rdyCB;
+	api_ctrl->ipa_uc_dereg_rdyCB = ipa2_uc_dereg_rdyCB;
+	api_ctrl->ipa_create_wdi_mapping = ipa2_create_wdi_mapping;
+	api_ctrl->ipa_release_wdi_mapping = ipa2_release_wdi_mapping;
+	api_ctrl->teth_bridge_init = ipa2_teth_bridge_init;
+	api_ctrl->teth_bridge_disconnect = ipa2_teth_bridge_disconnect;
+	api_ctrl->teth_bridge_connect = ipa2_teth_bridge_connect;
+	api_ctrl->ipa_set_client = ipa2_set_client;
+	api_ctrl->ipa_get_client = ipa2_get_client;
+	api_ctrl->ipa_get_client_uplink = ipa2_get_client_uplink;
+	api_ctrl->ipa_dma_init = ipa2_dma_init;
+	api_ctrl->ipa_dma_enable = ipa2_dma_enable;
+	api_ctrl->ipa_dma_disable = ipa2_dma_disable;
+	api_ctrl->ipa_dma_sync_memcpy = ipa2_dma_sync_memcpy;
+	api_ctrl->ipa_dma_async_memcpy = ipa2_dma_async_memcpy;
+	api_ctrl->ipa_dma_uc_memcpy = ipa2_dma_uc_memcpy;
+	api_ctrl->ipa_dma_destroy = ipa2_dma_destroy;
+	api_ctrl->ipa_mhi_init_engine = ipa2_mhi_init_engine;
+	api_ctrl->ipa_connect_mhi_pipe = ipa2_connect_mhi_pipe;
+	api_ctrl->ipa_disconnect_mhi_pipe = ipa2_disconnect_mhi_pipe;
+	api_ctrl->ipa_uc_mhi_reset_channel = ipa2_uc_mhi_reset_channel;
+	api_ctrl->ipa_mhi_sps_channel_empty = ipa2_mhi_sps_channel_empty;
+	api_ctrl->ipa_generate_tag_process = ipa2_generate_tag_process;
+	api_ctrl->ipa_disable_sps_pipe = ipa2_disable_sps_pipe;
+	api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
+			qmi_enable_force_clear_datapath_send;
+	api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
+			qmi_disable_force_clear_datapath_send;
+	api_ctrl->ipa_mhi_reset_channel_internal =
+			ipa2_mhi_reset_channel_internal;
+	api_ctrl->ipa_mhi_start_channel_internal =
+			ipa2_mhi_start_channel_internal;
+	api_ctrl->ipa_mhi_resume_channels_internal =
+			ipa2_mhi_resume_channels_internal;
+	api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
+			ipa2_uc_mhi_send_dl_ul_sync_info;
+	api_ctrl->ipa_uc_mhi_init = ipa2_uc_mhi_init;
+	api_ctrl->ipa_uc_mhi_suspend_channel = ipa2_uc_mhi_suspend_channel;
+	api_ctrl->ipa_uc_mhi_stop_event_update_channel =
+			ipa2_uc_mhi_stop_event_update_channel;
+	api_ctrl->ipa_uc_mhi_cleanup = ipa2_uc_mhi_cleanup;
+	api_ctrl->ipa_uc_mhi_print_stats = ipa2_uc_mhi_print_stats;
+	api_ctrl->ipa_uc_state_check = ipa2_uc_state_check;
+	api_ctrl->ipa_write_qmap_id = ipa2_write_qmap_id;
+	api_ctrl->ipa_add_interrupt_handler = ipa2_add_interrupt_handler;
+	api_ctrl->ipa_remove_interrupt_handler = ipa2_remove_interrupt_handler;
+	api_ctrl->ipa_restore_suspend_handler = ipa2_restore_suspend_handler;
+	api_ctrl->ipa_bam_reg_dump = ipa2_bam_reg_dump;
+	api_ctrl->ipa_get_ep_mapping = ipa2_get_ep_mapping;
+	api_ctrl->ipa_is_ready = ipa2_is_ready;
+	api_ctrl->ipa_proxy_clk_vote = ipa2_proxy_clk_vote;
+	api_ctrl->ipa_proxy_clk_unvote = ipa2_proxy_clk_unvote;
+	api_ctrl->ipa_is_client_handle_valid = ipa2_is_client_handle_valid;
+	api_ctrl->ipa_get_client_mapping = ipa2_get_client_mapping;
+	api_ctrl->ipa_get_rm_resource_from_ep = ipa2_get_rm_resource_from_ep;
+	api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
+		ipa2_get_modem_cfg_emb_pipe_flt;
+	api_ctrl->ipa_get_transport_type = ipa2_get_transport_type;
+	api_ctrl->ipa_ap_suspend = ipa2_ap_suspend;
+	api_ctrl->ipa_ap_resume = ipa2_ap_resume;
+	api_ctrl->ipa_get_smmu_domain = ipa2_get_smmu_domain;
+	api_ctrl->ipa_disable_apps_wan_cons_deaggr =
+		ipa2_disable_apps_wan_cons_deaggr;
+	api_ctrl->ipa_get_dma_dev = ipa2_get_dma_dev;
+	api_ctrl->ipa_get_gsi_ep_info = ipa2_get_gsi_ep_info;
+	api_ctrl->ipa_stop_gsi_channel = ipa2_stop_gsi_channel;
+	api_ctrl->ipa_register_ipa_ready_cb = ipa2_register_ipa_ready_cb;
+	api_ctrl->ipa_inc_client_enable_clks = ipa2_inc_client_enable_clks;
+	api_ctrl->ipa_dec_client_disable_clks = ipa2_dec_client_disable_clks;
+	api_ctrl->ipa_inc_client_enable_clks_no_block =
+		ipa2_inc_client_enable_clks_no_block;
+	api_ctrl->ipa_suspend_resource_no_block =
+		ipa2_suspend_resource_no_block;
+	api_ctrl->ipa_resume_resource = ipa2_resume_resource;
+	api_ctrl->ipa_suspend_resource_sync = ipa2_suspend_resource_sync;
+	api_ctrl->ipa_set_required_perf_profile =
+		ipa2_set_required_perf_profile;
+	api_ctrl->ipa_get_ipc_logbuf = ipa2_get_ipc_logbuf;
+	api_ctrl->ipa_get_ipc_logbuf_low = ipa2_get_ipc_logbuf_low;
+	api_ctrl->ipa_rx_poll = ipa2_rx_poll;
+	api_ctrl->ipa_recycle_wan_skb = ipa2_recycle_wan_skb;
+	api_ctrl->ipa_setup_uc_ntn_pipes = ipa2_setup_uc_ntn_pipes;
+	api_ctrl->ipa_tear_down_uc_offload_pipes =
+		ipa2_tear_down_uc_offload_pipes;
+	api_ctrl->ipa_get_pdev = ipa2_get_pdev;
+
+	return 0;
+}
+
+/**
+ * ipa_get_sys_yellow_wm()- Return yellow WM value for IPA SYS pipes.
+ *
+ * Return value: IPA_YELLOW_MARKER_SYS_CFG_OFST register if IPA_HW_v2.6L,
+ *               IPA_DEFAULT_SYS_YELLOW_WM otherwise.
+ */
+u32 ipa_get_sys_yellow_wm(struct ipa_sys_context *sys)
+{
+	if (ipa_ctx->ipa_hw_type == IPA_HW_v2_6L) {
+		return ipa_read_reg(ipa_ctx->mmio,
+			IPA_YELLOW_MARKER_SYS_CFG_OFST);
+	} else {
+		if (!sys)
+			return 0;
+
+		return IPA_DEFAULT_SYS_YELLOW_WM * sys->rx_buff_sz;
+	}
+}
+EXPORT_SYMBOL(ipa_get_sys_yellow_wm);
+
+void ipa_suspend_apps_pipes(bool suspend)
+{
+	struct ipa_ep_cfg_ctrl cfg;
+	int ipa_ep_idx;
+	u32 lan_empty = 0, wan_empty = 0;
+	int ret;
+	struct sps_event_notify notify;
+	struct ipa_ep_context *ep;
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.ipa_ep_suspend = suspend;
+
+	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		/* Check if the pipes are empty. */
+		ret = sps_is_pipe_empty(ep->ep_hdl, &lan_empty);
+		if (ret) {
+			IPAERR("%s: sps_is_pipe_empty failed with %d\n",
+				__func__, ret);
+		}
+		if (!lan_empty) {
+			IPADBG("LAN Cons is not-empty. Enter poll mode.\n");
+			notify.user = ep->sys;
+			notify.event_id = SPS_EVENT_EOT;
+			if (ep->sys->sps_callback)
+				ep->sys->sps_callback(&notify);
+		}
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+	/* Considering the case for SSR. */
+	if (ipa_ep_idx == -1) {
+		IPADBG("Invalid client.\n");
+		return;
+	}
+	ep = &ipa_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		ipa2_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		/* Check if the pipes are empty. */
+		ret = sps_is_pipe_empty(ep->ep_hdl, &wan_empty);
+		if (ret) {
+			IPAERR("%s: sps_is_pipe_empty failed with %d\n",
+				__func__, ret);
+		}
+		if (!wan_empty) {
+			IPADBG("WAN Cons is not-empty. Enter poll mode.\n");
+			notify.user = ep->sys;
+			notify.event_id = SPS_EVENT_EOT;
+			if (ep->sys->sps_callback)
+				ep->sys->sps_callback(&notify);
+		}
+	}
+}
+
+/**
+ * ipa2_get_pdev() - return a pointer to IPA dev struct
+ *
+ * Return value: a pointer to IPA dev struct
+ *
+ */
+struct device *ipa2_get_pdev(void)
+{
+	if (!ipa_ctx)
+		return NULL;
+
+	return ipa_ctx->pdev;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
new file mode 100644
index 0000000..02e2c5f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -0,0 +1,2897 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * WWAN Transport Network Driver.
+ */
+
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/pkt_sched.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include "ipa_qmi_service.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include <linux/ipa.h>
+#include <uapi/linux/net_map.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <net/rmnet_config.h>
+
+#include "ipa_trace.h"
+
+#define WWAN_METADATA_SHFT 24
+#define WWAN_METADATA_MASK 0xFF000000
+#define WWAN_DATA_LEN 2000
+#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
+#define HEADROOM_FOR_QMAP   8 /* for mux header */
+#define TAILROOM            0 /* for padding by mux layer */
+#define MAX_NUM_OF_MUX_CHANNEL  10 /* max mux channels */
+#define UL_FILTER_RULE_HANDLE_START 69
+#define DEFAULT_OUTSTANDING_HIGH_CTL 96
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+
+#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+#define IPA_WWAN_DEVICE_COUNT (1)
+
+#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
+
+#define INVALID_MUX_ID 0xFF
+#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
+#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
+#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
+
+#define NAPI_WEIGHT 60
+
+static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
+static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
+static u32 qmap_hdr_hdl, dflt_v4_wan_rt_hdl, dflt_v6_wan_rt_hdl;
+static struct rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
+static int num_q6_rule, old_num_q6_rule;
+static int rmnet_index;
+static bool egress_set, a7_ul_flt_set;
+static struct workqueue_struct *ipa_rm_q6_workqueue; /* IPA_RM workqueue*/
+static atomic_t is_initialized;
+static atomic_t is_ssr;
+static void *subsys_notify_handle;
+
+u32 apps_to_ipa_hdl, ipa_to_apps_hdl; /* get handler from ipa */
+static struct mutex ipa_to_apps_pipe_handle_guard;
+static int wwan_add_ul_flt_rule_to_ipa(void);
+static int wwan_del_ul_flt_rule_to_ipa(void);
+static void ipa_wwan_msg_free_cb(void*, u32, u32);
+static void ipa_rmnet_rx_cb(void *priv);
+static int ipa_rmnet_poll(struct napi_struct *napi, int budget);
+
+static void wake_tx_queue(struct work_struct *work);
+static DECLARE_WORK(ipa_tx_wakequeue_work, wake_tx_queue);
+
+static void tethering_stats_poll_queue(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
+			    tethering_stats_poll_queue);
+
+enum wwan_device_status {
+	WWAN_DEVICE_INACTIVE = 0,
+	WWAN_DEVICE_ACTIVE   = 1
+};
+
+struct ipa_rmnet_plat_drv_res {
+	bool ipa_rmnet_ssr;
+	bool ipa_loaduC;
+	bool ipa_advertise_sg_support;
+	bool ipa_napi_enable;
+};
+
+static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
+/**
+ * struct wwan_private - WWAN private data
+ * @net: network interface struct implemented by this driver
+ * @stats: iface statistics
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ * @ch_id: channel id
+ * @lock: spinlock for mutual exclusion
+ * @device_status: holds device status
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct wwan_private {
+	struct net_device *net;
+	struct net_device_stats stats;
+	atomic_t outstanding_pkts;
+	int outstanding_high_ctl;
+	int outstanding_high;
+	int outstanding_low;
+	uint32_t ch_id;
+	spinlock_t lock;
+	struct completion resource_granted_completion;
+	enum wwan_device_status device_status;
+	struct napi_struct napi;
+};
+
+/**
+* ipa_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa_setup_a7_qmap_hdr(void)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	u32 pyld_sz;
+	int ret;
+
+	/* install the basic exception header */
+	pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!hdr) {
+		IPAWANERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
+				IPA_RESOURCE_NAME_MAX);
+	hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+
+	if (ipa2_add_hdr(hdr)) {
+		IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+	qmap_hdr_hdl = hdr_entry->hdr_hdl;
+
+	ret = 0;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+static void ipa_del_a7_qmap_hdr(void)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *hdl_entry;
+	u32 pyld_sz;
+	int ret;
+
+	pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+		      sizeof(struct ipa_hdr_del);
+	del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!del_hdr) {
+		IPAWANERR("fail to alloc exception hdr_del\n");
+		return;
+	}
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 1;
+	hdl_entry = &del_hdr->hdl[0];
+	hdl_entry->hdl = qmap_hdr_hdl;
+
+	ret = ipa2_del_hdr(del_hdr);
+	if (ret || hdl_entry->status)
+		IPAWANERR("ipa2_del_hdr failed\n");
+	else
+		IPAWANDBG("hdrs deletion done\n");
+
+	qmap_hdr_hdl = 0;
+	kfree(del_hdr);
+}
+
+static void ipa_del_qmap_hdr(uint32_t hdr_hdl)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *hdl_entry;
+	u32 pyld_sz;
+	int ret;
+
+	if (hdr_hdl == 0) {
+		IPAWANERR("Invalid hdr_hdl provided\n");
+		return;
+	}
+
+	pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+		sizeof(struct ipa_hdr_del);
+	del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!del_hdr) {
+		IPAWANERR("fail to alloc exception hdr_del\n");
+		return;
+	}
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 1;
+	hdl_entry = &del_hdr->hdl[0];
+	hdl_entry->hdl = hdr_hdl;
+
+	ret = ipa2_del_hdr(del_hdr);
+	if (ret || hdl_entry->status)
+		IPAWANERR("ipa2_del_hdr failed\n");
+	else
+		IPAWANDBG("header deletion done\n");
+
+	qmap_hdr_hdl = 0;
+	kfree(del_hdr);
+}
+
+static void ipa_del_mux_qmap_hdrs(void)
+{
+	int index;
+
+	for (index = 0; index < rmnet_index; index++) {
+		ipa_del_qmap_hdr(mux_channel[index].hdr_hdl);
+		mux_channel[index].hdr_hdl = 0;
+	}
+}
+
+static int ipa_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	char hdr_name[IPA_RESOURCE_NAME_MAX];
+	u32 pyld_sz;
+	int ret;
+
+	pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!hdr) {
+		IPAWANERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 mux_id);
+	 strlcpy(hdr_entry->name, hdr_name,
+				IPA_RESOURCE_NAME_MAX);
+
+	hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+	hdr_entry->hdr[1] = (uint8_t) mux_id;
+	IPAWANDBG("header (%s) with mux-id: (%d)\n",
+		hdr_name,
+		hdr_entry->hdr[1]);
+	if (ipa2_add_hdr(hdr)) {
+		IPAWANERR("fail to add IPA_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAWANERR("fail to add IPA_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+	*hdr_hdl = hdr_entry->hdr_hdl;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+/**
+* ipa_setup_dflt_wan_rt_tables() - Setup default wan routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa_setup_dflt_wan_rt_tables(void)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_rt_rule_add *rt_rule_entry;
+
+	rt_rule =
+	   kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+	if (!rt_rule) {
+		IPAWANERR("fail to alloc mem\n");
+		return -ENOMEM;
+	}
+	/* setup a default v4 route to point to Apps */
+	rt_rule->num_rules = 1;
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
+			IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = &rt_rule->rules[0];
+	rt_rule_entry->at_rear = 1;
+	rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
+	rt_rule_entry->rule.hdr_hdl = qmap_hdr_hdl;
+
+	if (ipa2_add_rt_rule(rt_rule)) {
+		IPAWANERR("fail to add dflt_wan v4 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+
+	IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/* setup a default v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	if (ipa2_add_rt_rule(rt_rule)) {
+		IPAWANERR("fail to add dflt_wan v6 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+	kfree(rt_rule);
+	return 0;
+}
+
+static void ipa_del_dflt_wan_rt_tables(void)
+{
+	struct ipa_ioc_del_rt_rule *rt_rule;
+	struct ipa_rt_rule_del *rt_rule_entry;
+	int len;
+
+	len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_del);
+	rt_rule = kzalloc(len, GFP_KERNEL);
+	if (!rt_rule) {
+		IPAWANERR("unable to allocate memory for del route rule\n");
+		return;
+	}
+
+	memset(rt_rule, 0, len);
+	rt_rule->commit = 1;
+	rt_rule->num_hdls = 1;
+	rt_rule->ip = IPA_IP_v4;
+
+	rt_rule_entry = &rt_rule->hdl[0];
+	rt_rule_entry->status = -1;
+	rt_rule_entry->hdl = dflt_v4_wan_rt_hdl;
+
+	IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+		rt_rule_entry->hdl, IPA_IP_v4);
+	if (ipa2_del_rt_rule(rt_rule) ||
+			(rt_rule_entry->status)) {
+		IPAWANERR("Routing rule deletion failed!\n");
+	}
+
+	rt_rule->ip = IPA_IP_v6;
+	rt_rule_entry->hdl = dflt_v6_wan_rt_hdl;
+	IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+		rt_rule_entry->hdl, IPA_IP_v6);
+	if (ipa2_del_rt_rule(rt_rule) ||
+			(rt_rule_entry->status)) {
+		IPAWANERR("Routing rule deletion failed!\n");
+	}
+
+	kfree(rt_rule);
+}
+
+int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+		*rule_req, uint32_t *rule_hdl)
+{
+	int i, j;
+
+	if (rule_req->filter_spec_list_valid == true) {
+		num_q6_rule = rule_req->filter_spec_list_len;
+		IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
+	} else {
+		num_q6_rule = 0;
+		IPAWANERR("got no UL rules from modem\n");
+		return -EINVAL;
+	}
+
+	/* copy UL filter rules from Modem*/
+	for (i = 0; i < num_q6_rule; i++) {
+		/* check if rules overside the cache*/
+		if (i == MAX_NUM_Q6_RULE) {
+			IPAWANERR("Reaching (%d) max cache ",
+				MAX_NUM_Q6_RULE);
+			IPAWANERR(" however total (%d)\n",
+				num_q6_rule);
+			goto failure;
+		}
+		/* construct UL_filter_rule handler QMI use-cas */
+		ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl =
+			UL_FILTER_RULE_HANDLE_START + i;
+		rule_hdl[i] = ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].ip =
+			rule_req->filter_spec_list[i].ip_type;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].action =
+			rule_req->filter_spec_list[i].filter_action;
+		if (rule_req->filter_spec_list[i].is_routing_table_index_valid
+			== true)
+			ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
+			rule_req->filter_spec_list[i].route_table_index;
+		if (rule_req->filter_spec_list[i].is_mux_id_valid == true)
+			ipa_qmi_ctx->q6_ul_filter_rule[i].mux_id =
+			rule_req->filter_spec_list[i].mux_id;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
+			rule_req->filter_spec_list[i].filter_rule.
+			rule_eq_bitmap;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
+			rule_req->filter_spec_list[i].filter_rule.
+			tos_eq_present;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
+			rule_req->filter_spec_list[i].filter_rule.tos_eq;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			protocol_eq_present = rule_req->filter_spec_list[i].
+			filter_rule.protocol_eq_present;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
+			rule_req->filter_spec_list[i].filter_rule.
+			protocol_eq;
+
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			num_ihl_offset_range_16 = rule_req->filter_spec_list[i].
+			filter_rule.num_ihl_offset_range_16;
+		for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			num_ihl_offset_range_16; j++) {
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ihl_offset_range_16[j].offset = rule_req->
+			filter_spec_list[i].filter_rule.
+			ihl_offset_range_16[j].offset;
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ihl_offset_range_16[j].range_low = rule_req->
+			filter_spec_list[i].filter_rule.
+			ihl_offset_range_16[j].range_low;
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ihl_offset_range_16[j].range_high = rule_req->
+			filter_spec_list[i].filter_rule.
+			ihl_offset_range_16[j].range_high;
+		}
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
+			rule_req->filter_spec_list[i].filter_rule.
+			num_offset_meq_32;
+		for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				num_offset_meq_32; j++) {
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			offset_meq_32[j].offset = rule_req->filter_spec_list[i].
+			filter_rule.offset_meq_32[j].offset;
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			offset_meq_32[j].mask = rule_req->filter_spec_list[i].
+			filter_rule.offset_meq_32[j].mask;
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			offset_meq_32[j].value = rule_req->filter_spec_list[i].
+			filter_rule.offset_meq_32[j].value;
+		}
+
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
+			rule_req->filter_spec_list[i].filter_rule.tc_eq_present;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
+			rule_req->filter_spec_list[i].filter_rule.tc_eq;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
+			rule_req->filter_spec_list[i].filter_rule.
+			flow_eq_present;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
+			rule_req->filter_spec_list[i].filter_rule.flow_eq;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_16_present = rule_req->filter_spec_list[i].
+		filter_rule.ihl_offset_eq_16_present;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_16.offset = rule_req->filter_spec_list[i].
+		filter_rule.ihl_offset_eq_16.offset;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_16.value = rule_req->filter_spec_list[i].
+		filter_rule.ihl_offset_eq_16.value;
+
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_32_present = rule_req->filter_spec_list[i].
+		filter_rule.ihl_offset_eq_32_present;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_32.offset = rule_req->filter_spec_list[i].
+		filter_rule.ihl_offset_eq_32.offset;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_32.value = rule_req->filter_spec_list[i].
+		filter_rule.ihl_offset_eq_32.value;
+
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		num_ihl_offset_meq_32 = rule_req->filter_spec_list[i].
+		filter_rule.num_ihl_offset_meq_32;
+		for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].
+			eq_attrib.num_ihl_offset_meq_32; j++) {
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				ihl_offset_meq_32[j].offset = rule_req->
+				filter_spec_list[i].filter_rule.
+				ihl_offset_meq_32[j].offset;
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				ihl_offset_meq_32[j].mask = rule_req->
+				filter_spec_list[i].filter_rule.
+				ihl_offset_meq_32[j].mask;
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				ihl_offset_meq_32[j].value = rule_req->
+				filter_spec_list[i].filter_rule.
+				ihl_offset_meq_32[j].value;
+		}
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
+			rule_req->filter_spec_list[i].filter_rule.
+			num_offset_meq_128;
+		for (j = 0; j < ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			num_offset_meq_128; j++) {
+			ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				offset_meq_128[j].offset = rule_req->
+				filter_spec_list[i].filter_rule.
+				offset_meq_128[j].offset;
+			memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+					offset_meq_128[j].mask,
+					rule_req->filter_spec_list[i].
+					filter_rule.offset_meq_128[j].mask, 16);
+			memcpy(ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+					offset_meq_128[j].value, rule_req->
+					filter_spec_list[i].filter_rule.
+					offset_meq_128[j].value, 16);
+		}
+
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			metadata_meq32_present = rule_req->filter_spec_list[i].
+				filter_rule.metadata_meq32_present;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			metadata_meq32.offset = rule_req->filter_spec_list[i].
+			filter_rule.metadata_meq32.offset;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			metadata_meq32.mask = rule_req->filter_spec_list[i].
+			filter_rule.metadata_meq32.mask;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
+			value = rule_req->filter_spec_list[i].filter_rule.
+			metadata_meq32.value;
+		ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ipv4_frag_eq_present = rule_req->filter_spec_list[i].
+			filter_rule.ipv4_frag_eq_present;
+	}
+
+	if (rule_req->xlat_filter_indices_list_valid) {
+		if (rule_req->xlat_filter_indices_list_len > num_q6_rule) {
+			IPAWANERR("Number of xlat indices is not valid: %d\n",
+					rule_req->xlat_filter_indices_list_len);
+			goto failure;
+		}
+		IPAWANDBG("Receive %d XLAT indices: ",
+				rule_req->xlat_filter_indices_list_len);
+		for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
+			IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
+		IPAWANDBG("\n");
+
+		for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
+			if (rule_req->xlat_filter_indices_list[i]
+				>= num_q6_rule) {
+				IPAWANERR("Xlat rule idx is wrong: %d\n",
+					rule_req->xlat_filter_indices_list[i]);
+				goto failure;
+			} else {
+				ipa_qmi_ctx->q6_ul_filter_rule
+				[rule_req->xlat_filter_indices_list[i]]
+				.is_xlat_rule = 1;
+				IPAWANDBG("Rule %d is xlat rule\n",
+					rule_req->xlat_filter_indices_list[i]);
+			}
+		}
+	}
+	goto success;
+
+failure:
+	num_q6_rule = 0;
+	memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
+		sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
+	return -EINVAL;
+
+success:
+	return 0;
+}
+
+static int wwan_add_ul_flt_rule_to_ipa(void)
+{
+	u32 pyld_sz;
+	int i, retval = 0;
+	int num_v4_rule = 0, num_v6_rule = 0;
+	struct ipa_ioc_add_flt_rule *param;
+	struct ipa_flt_rule_add flt_rule_entry;
+	struct ipa_fltr_installed_notif_req_msg_v01 *req;
+
+	if (ipa_qmi_ctx == NULL) {
+		IPAWANERR("ipa_qmi_ctx is NULL!\n");
+		return -EFAULT;
+	}
+
+	pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
+	   sizeof(struct ipa_flt_rule_add);
+	param = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!param)
+		return -ENOMEM;
+
+	req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
+		kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		kfree(param);
+		return -ENOMEM;
+	}
+
+	param->commit = 1;
+	param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	param->global = false;
+	param->num_rules = (uint8_t)1;
+
+	mutex_lock(&ipa_qmi_lock);
+	for (i = 0; i < num_q6_rule; i++) {
+		param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
+		memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
+		flt_rule_entry.at_rear = true;
+		flt_rule_entry.rule.action =
+			ipa_qmi_ctx->q6_ul_filter_rule[i].action;
+		flt_rule_entry.rule.rt_tbl_idx
+		= ipa_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
+		flt_rule_entry.rule.retain_hdr = true;
+
+		/* debug rt-hdl*/
+		IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
+			i, flt_rule_entry.rule.rt_tbl_idx);
+		flt_rule_entry.rule.eq_attrib_type = true;
+		memcpy(&(flt_rule_entry.rule.eq_attrib),
+			&ipa_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
+			sizeof(struct ipa_ipfltri_rule_eq));
+		memcpy(&(param->rules[0]), &flt_rule_entry,
+			sizeof(struct ipa_flt_rule_add));
+		if (ipa2_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+			retval = -EFAULT;
+			IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
+		} else {
+			/* store the rule handler */
+			ipa_qmi_ctx->q6_ul_filter_rule_hdl[i] =
+				param->rules[0].flt_rule_hdl;
+		}
+	}
+	mutex_unlock(&ipa_qmi_lock);
+
+	/* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
+	req->source_pipe_index =
+		ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+	req->install_status = QMI_RESULT_SUCCESS_V01;
+	req->filter_index_list_len = num_q6_rule;
+	mutex_lock(&ipa_qmi_lock);
+	for (i = 0; i < num_q6_rule; i++) {
+		if (ipa_qmi_ctx->q6_ul_filter_rule[i].ip == IPA_IP_v4) {
+			req->filter_index_list[i].filter_index = num_v4_rule;
+			num_v4_rule++;
+		} else {
+			req->filter_index_list[i].filter_index = num_v6_rule;
+			num_v6_rule++;
+		}
+		req->filter_index_list[i].filter_handle =
+			ipa_qmi_ctx->q6_ul_filter_rule[i].filter_hdl;
+	}
+	mutex_unlock(&ipa_qmi_lock);
+	if (qmi_filter_notify_send(req)) {
+		IPAWANDBG("add filter rule index on A7-RX failed\n");
+		retval = -EFAULT;
+	}
+	old_num_q6_rule = num_q6_rule;
+	IPAWANDBG("add (%d) filter rule index on A7-RX\n",
+			old_num_q6_rule);
+	kfree(param);
+	kfree(req);
+	return retval;
+}
+
+static int wwan_del_ul_flt_rule_to_ipa(void)
+{
+	u32 pyld_sz;
+	int i, retval = 0;
+	struct ipa_ioc_del_flt_rule *param;
+	struct ipa_flt_rule_del flt_rule_entry;
+
+	pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
+	   sizeof(struct ipa_flt_rule_del);
+	param = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!param) {
+		IPAWANERR("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	param->commit = 1;
+	param->num_hdls = (uint8_t) 1;
+
+	for (i = 0; i < old_num_q6_rule; i++) {
+		param->ip = ipa_qmi_ctx->q6_ul_filter_rule[i].ip;
+		memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
+		flt_rule_entry.hdl = ipa_qmi_ctx->q6_ul_filter_rule_hdl[i];
+		/* debug rt-hdl*/
+		IPAWANDBG("delete-IPA rule index(%d)\n", i);
+		memcpy(&(param->hdl[0]), &flt_rule_entry,
+			sizeof(struct ipa_flt_rule_del));
+		if (ipa2_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+			IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
+			kfree(param);
+			return -EFAULT;
+		}
+	}
+
+	/* set UL filter-rule add-indication */
+	a7_ul_flt_set = false;
+	old_num_q6_rule = 0;
+
+	kfree(param);
+	return retval;
+}
+
+static int find_mux_channel_index(uint32_t mux_id)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+		if (mux_id == mux_channel[i].mux_id)
+			return i;
+	}
+	return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int find_vchannel_name_index(const char *vchannel_name)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+		if (strcmp(mux_channel[i].vchannel_name, vchannel_name == 0))
+			return i;
+	}
+	return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int wwan_register_to_ipa(int index)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
+	struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	struct ipa_ext_intf ext_properties = {0};
+	struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
+	u32 pyld_sz;
+	int ret = 0, i;
+
+	IPAWANDBG("index(%d) device[%s]:\n", index,
+		mux_channel[index].vchannel_name);
+	if (!mux_channel[index].mux_hdr_set) {
+		ret = ipa_add_qmap_hdr(mux_channel[index].mux_id,
+		      &mux_channel[index].hdr_hdl);
+		if (ret) {
+			IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
+			return ret;
+		}
+		mux_channel[index].mux_hdr_set = true;
+	}
+	tx_properties.prop = tx_ioc_properties;
+	tx_ipv4_property = &tx_properties.prop[0];
+	tx_ipv4_property->ip = IPA_IP_v4;
+	tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+	snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 mux_channel[index].mux_id);
+	tx_ipv6_property = &tx_properties.prop[1];
+	tx_ipv6_property->ip = IPA_IP_v6;
+	tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+	/* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
+	snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 mux_channel[index].mux_id);
+	tx_properties.num_props = 2;
+
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_ipv4_property->attrib.meta_data =
+		mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+	rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+	rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_ipv6_property->attrib.meta_data =
+		mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+	rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+	rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	rx_properties.num_props = 2;
+
+	pyld_sz = num_q6_rule *
+	   sizeof(struct ipa_ioc_ext_intf_prop);
+	ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
+	if (!ext_ioc_properties) {
+		IPAWANERR("Error allocate memory\n");
+		return -ENOMEM;
+	}
+
+	ext_properties.prop = ext_ioc_properties;
+	ext_properties.excp_pipe_valid = true;
+	ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
+	ext_properties.num_props = num_q6_rule;
+	for (i = 0; i < num_q6_rule; i++) {
+		memcpy(&(ext_properties.prop[i]),
+				 &(ipa_qmi_ctx->q6_ul_filter_rule[i]),
+				sizeof(struct ipa_ioc_ext_intf_prop));
+	ext_properties.prop[i].mux_id = mux_channel[index].mux_id;
+	IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
+		ext_properties.prop[i].ip,
+		ext_properties.prop[i].rt_tbl_idx);
+	IPAWANDBG("action: %d mux:%d\n",
+		ext_properties.prop[i].action,
+		ext_properties.prop[i].mux_id);
+	}
+	ret = ipa2_register_intf_ext(mux_channel[index].
+		vchannel_name, &tx_properties,
+		&rx_properties, &ext_properties);
+	if (ret) {
+		IPAWANERR("[%s]:ipa2_register_intf failed %d\n",
+			mux_channel[index].vchannel_name, ret);
+		goto fail;
+	}
+	mux_channel[index].ul_flt_reg = true;
+fail:
+	kfree(ext_ioc_properties);
+	return ret;
+}
+
+static void ipa_cleanup_deregister_intf(void)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < rmnet_index; i++) {
+		if (mux_channel[i].ul_flt_reg) {
+			ret = ipa2_deregister_intf(
+				mux_channel[i].vchannel_name);
+			if (ret < 0) {
+				IPAWANERR("de-register device %s(%d) failed\n",
+					mux_channel[i].vchannel_name,
+					i);
+				return;
+			}
+			IPAWANDBG("de-register device %s(%d) success\n",
+					mux_channel[i].vchannel_name,
+					i);
+		}
+		mux_channel[i].ul_flt_reg = false;
+	}
+}
+
+int wwan_update_mux_channel_prop(void)
+{
+	int ret = 0, i;
+	/* install UL filter rules */
+	if (egress_set) {
+		if (ipa_qmi_ctx &&
+			ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
+			IPAWANDBG("setup UL filter rules\n");
+			if (a7_ul_flt_set) {
+				IPAWANDBG("del previous UL filter rules\n");
+				/* delete rule hdlers */
+				ret = wwan_del_ul_flt_rule_to_ipa();
+				if (ret) {
+					IPAWANERR("failed to del old rules\n");
+					return -EINVAL;
+				}
+				IPAWANDBG("deleted old UL rules\n");
+			}
+			ret = wwan_add_ul_flt_rule_to_ipa();
+		}
+		if (ret)
+			IPAWANERR("failed to install UL rules\n");
+		else
+			a7_ul_flt_set = true;
+	}
+	/* update Tx/Rx/Ext property */
+	IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
+	if (rmnet_index == 0) {
+		IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
+		return ret;
+	}
+
+	ipa_cleanup_deregister_intf();
+
+	for (i = 0; i < rmnet_index; i++) {
+		ret = wwan_register_to_ipa(i);
+		if (ret < 0) {
+			IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
+				mux_channel[i].vchannel_name,
+				mux_channel[i].mux_id,
+				i);
+			return -ENODEV;
+		}
+		IPAWANERR("dev(%s) has registered to IPA\n",
+		mux_channel[i].vchannel_name);
+		mux_channel[i].ul_flt_reg = true;
+	}
+	return ret;
+}
+
+#ifdef INIT_COMPLETION
+#define reinit_completion(x) INIT_COMPLETION(*(x))
+#endif /* INIT_COMPLETION */
+
+static int __ipa_wwan_open(struct net_device *dev)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+	IPAWANDBG("[%s] __wwan_open()\n", dev->name);
+	if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
+		reinit_completion(&wwan_ptr->resource_granted_completion);
+	wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+
+	if (ipa_rmnet_res.ipa_napi_enable)
+		napi_enable(&(wwan_ptr->napi));
+	return 0;
+}
+
+/**
+ * wwan_open() - Opens the wwan network interface. Opens logical
+ * channel on A2 MUX driver and starts the network stack queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa_wwan_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	IPAWANDBG("[%s] wwan_open()\n", dev->name);
+	rc = __ipa_wwan_open(dev);
+	if (rc == 0)
+		netif_start_queue(dev);
+	return rc;
+}
+
+static int __ipa_wwan_close(struct net_device *dev)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+	int rc = 0;
+
+	if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
+		wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
+		/* do not close wwan port once up,  this causes
+		* remote side to hang if tried to open again
+		*/
+		reinit_completion(&wwan_ptr->resource_granted_completion);
+		if (ipa_rmnet_res.ipa_napi_enable)
+			napi_disable(&(wwan_ptr->napi));
+		rc = ipa2_deregister_intf(dev->name);
+		if (rc) {
+			IPAWANERR("[%s]: ipa2_deregister_intf failed %d\n",
+			       dev->name, rc);
+			return rc;
+		}
+		return rc;
+	} else {
+		return -EBADF;
+	}
+}
+
+/**
+ * ipa_wwan_stop() - Stops the wwan network interface. Closes
+ * logical channel on A2 MUX driver and stops the network stack
+ * queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa_wwan_stop(struct net_device *dev)
+{
+	IPAWANDBG("[%s] ipa_wwan_stop()\n", dev->name);
+	__ipa_wwan_close(dev);
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static int ipa_wwan_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
+		return -EINVAL;
+	IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/**
+ * ipa_wwan_xmit() - Transmits an skb.
+ *
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int ret = 0;
+	bool qmap_check;
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+	struct ipa_tx_meta meta;
+
+	if (skb->protocol != htons(ETH_P_MAP)) {
+		IPAWANDBG
+		("SW filtering out none QMAP packet received from %s",
+		current->comm);
+		return NETDEV_TX_OK;
+	}
+
+	qmap_check = RMNET_MAP_GET_CD_BIT(skb);
+	if (netif_queue_stopped(dev)) {
+		if (qmap_check &&
+			atomic_read(&wwan_ptr->outstanding_pkts) <
+					wwan_ptr->outstanding_high_ctl) {
+			pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
+			goto send;
+		} else {
+			pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	/* checking High WM hit */
+	if (atomic_read(&wwan_ptr->outstanding_pkts) >=
+					wwan_ptr->outstanding_high) {
+		if (!qmap_check) {
+			IPAWANDBG("pending(%d)/(%d)- stop(%d), qmap_chk(%d)\n",
+				atomic_read(&wwan_ptr->outstanding_pkts),
+				wwan_ptr->outstanding_high,
+				netif_queue_stopped(dev),
+				qmap_check);
+			netif_stop_queue(dev);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+send:
+	/* IPA_RM checking start */
+	ret = ipa_rm_inactivity_timer_request_resource(
+		IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret == -EINPROGRESS) {
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+	if (ret) {
+		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
+		       dev->name, ret);
+		return -EFAULT;
+	}
+	/* IPA_RM checking end */
+
+	if (qmap_check) {
+		memset(&meta, 0, sizeof(meta));
+		meta.pkt_init_dst_ep_valid = true;
+		meta.pkt_init_dst_ep_remote = true;
+		ret = ipa2_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
+	} else {
+		ret = ipa2_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
+	}
+
+	if (ret) {
+		ret = NETDEV_TX_BUSY;
+		dev->stats.tx_dropped++;
+		goto out;
+	}
+
+	atomic_inc(&wwan_ptr->outstanding_pkts);
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+	ret = NETDEV_TX_OK;
+out:
+	ipa_rm_inactivity_timer_release_resource(
+		IPA_RM_RESOURCE_WWAN_0_PROD);
+	return ret;
+}
+
+static void ipa_wwan_tx_timeout(struct net_device *dev)
+{
+	IPAWANERR("[%s] ipa_wwan_tx_timeout(), data stall in UL\n", dev->name);
+}
+
+/**
+ * apps_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+static void apps_ipa_tx_complete_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct net_device *dev = (struct net_device *)priv;
+	struct wwan_private *wwan_ptr;
+
+	if (dev != ipa_netdevs[0]) {
+		IPAWANDBG("Received pre-SSR packet completion\n");
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	if (evt != IPA_WRITE_DONE) {
+		IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
+		return;
+	}
+
+	wwan_ptr = netdev_priv(dev);
+	atomic_dec(&wwan_ptr->outstanding_pkts);
+	__netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
+	if (!atomic_read(&is_ssr) &&
+		netif_queue_stopped(wwan_ptr->net) &&
+		atomic_read(&wwan_ptr->outstanding_pkts) <
+					(wwan_ptr->outstanding_low)) {
+		IPAWANDBG("Outstanding low (%d) - wake up queue\n",
+				wwan_ptr->outstanding_low);
+		netif_wake_queue(wwan_ptr->net);
+	}
+	__netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
+	dev_kfree_skb_any(skb);
+	ipa_rm_inactivity_timer_release_resource(
+		IPA_RM_RESOURCE_WWAN_0_PROD);
+}
+
+/**
+ * apps_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data
+ */
+static void apps_ipa_packet_receive_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)priv;
+
+	if (evt == IPA_RECEIVE) {
+		struct sk_buff *skb = (struct sk_buff *)data;
+		int result;
+		unsigned int packet_len = skb->len;
+
+		IPAWANDBG("Rx packet was received");
+		skb->dev = ipa_netdevs[0];
+		skb->protocol = htons(ETH_P_MAP);
+
+		if (ipa_rmnet_res.ipa_napi_enable) {
+			trace_rmnet_ipa_netif_rcv_skb(dev->stats.rx_packets);
+			result = netif_receive_skb(skb);
+		} else {
+			if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
+					== 0) {
+				trace_rmnet_ipa_netifni(dev->stats.rx_packets);
+				result = netif_rx_ni(skb);
+			} else {
+				trace_rmnet_ipa_netifrx(dev->stats.rx_packets);
+				result = netif_rx(skb);
+			}
+		}
+
+		if (result)	{
+			pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
+							   __func__, __LINE__);
+			dev->stats.rx_dropped++;
+		}
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += packet_len;
+	} else if (evt == IPA_CLIENT_START_POLL)
+		ipa_rmnet_rx_cb(priv);
+	else if (evt == IPA_CLIENT_COMP_NAPI) {
+		struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+		if (ipa_rmnet_res.ipa_napi_enable)
+			napi_complete(&(wwan_ptr->napi));
+	} else
+		IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
+
+}
+
+static int handle_ingress_format(struct net_device *dev,
+			struct rmnet_ioctl_extended_s *in)
+{
+	int ret = 0;
+	struct rmnet_phys_ep_conf_s *ep_cfg;
+
+	IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
+	if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+		ipa_to_apps_ep_cfg.ipa_ep_cfg.cfg.cs_offload_en =
+		   IPA_ENABLE_CS_OFFLOAD_DL;
+
+	if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
+		IPAWANERR("get AGG size %d count %d\n",
+				  in->u.ingress_format.agg_size,
+				  in->u.ingress_format.agg_count);
+
+		ret = ipa_disable_apps_wan_cons_deaggr(
+			  in->u.ingress_format.agg_size,
+			  in->u.ingress_format.agg_count);
+
+		if (!ret) {
+			ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_byte_limit =
+				in->u.ingress_format.agg_size;
+			ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
+				in->u.ingress_format.agg_count;
+
+			if (ipa_rmnet_res.ipa_napi_enable) {
+				ipa_to_apps_ep_cfg.recycle_enabled = true;
+				ep_cfg = (struct rmnet_phys_ep_conf_s *)
+				   rcu_dereference(dev->rx_handler_data);
+				ep_cfg->recycle = ipa_recycle_wan_skb;
+				pr_info("Wan Recycle Enabled\n");
+			}
+		}
+	}
+
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding =
+		true;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
+	ipa_to_apps_ep_cfg.ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
+
+	ipa_to_apps_ep_cfg.client = IPA_CLIENT_APPS_WAN_CONS;
+	ipa_to_apps_ep_cfg.notify = apps_ipa_packet_receive_notify;
+	ipa_to_apps_ep_cfg.priv = dev;
+
+	ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
+	if (ipa_to_apps_ep_cfg.napi_enabled)
+		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+	else
+		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
+	mutex_lock(&ipa_to_apps_pipe_handle_guard);
+	if (atomic_read(&is_ssr)) {
+		IPAWANDBG("In SSR sequence/recovery\n");
+		mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+		return -EFAULT;
+	}
+	ret = ipa2_setup_sys_pipe(&ipa_to_apps_ep_cfg, &ipa_to_apps_hdl);
+	mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+
+	if (ret)
+		IPAWANERR("failed to configure ingress\n");
+
+	return ret;
+}
+
+/**
+ * ipa_wwan_ioctl() - I/O control for wwan network driver.
+ *
+ * @dev: network device
+ * @ifr: ignored
+ * @cmd: cmd to be excecuded. can be one of the following:
+ * IPA_WWAN_IOCTL_OPEN - Open the network interface
+ * IPA_WWAN_IOCTL_CLOSE - Close the network interface
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	int rc = 0;
+	int mru = 1000, epid = 1, mux_index, len;
+	struct ipa_msg_meta msg_meta;
+	struct ipa_wan_msg *wan_msg = NULL;
+	struct rmnet_ioctl_extended_s extend_ioctl_data;
+	struct rmnet_ioctl_data_s ioctl_data;
+
+	IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
+	switch (cmd) {
+	/*  Set Ethernet protocol  */
+	case RMNET_IOCTL_SET_LLP_ETHERNET:
+		break;
+	/*  Set RAWIP protocol  */
+	case RMNET_IOCTL_SET_LLP_IP:
+		break;
+	/*  Get link protocol  */
+	case RMNET_IOCTL_GET_LLP:
+		ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+			sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	/*  Set QoS header enabled  */
+	case RMNET_IOCTL_SET_QOS_ENABLE:
+		return -EINVAL;
+	/*  Set QoS header disabled  */
+	case RMNET_IOCTL_SET_QOS_DISABLE:
+		break;
+	/*  Get QoS header state  */
+	case RMNET_IOCTL_GET_QOS:
+		ioctl_data.u.operation_mode = RMNET_MODE_NONE;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+			sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	/*  Get operation mode  */
+	case RMNET_IOCTL_GET_OPMODE:
+		ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+			sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	/*  Open transport port  */
+	case RMNET_IOCTL_OPEN:
+		break;
+	/*  Close transport port  */
+	case RMNET_IOCTL_CLOSE:
+		break;
+	/*  Flow enable  */
+	case RMNET_IOCTL_FLOW_ENABLE:
+		IPAWANDBG("Received flow enable\n");
+		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+			sizeof(struct rmnet_ioctl_data_s))) {
+			rc = -EFAULT;
+			break;
+		}
+		ipa_flow_control(IPA_CLIENT_USB_PROD, true,
+			ioctl_data.u.tcm_handle);
+		break;
+	/*  Flow disable  */
+	case RMNET_IOCTL_FLOW_DISABLE:
+		IPAWANDBG("Received flow disable\n");
+		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+			sizeof(struct rmnet_ioctl_data_s))) {
+			rc = -EFAULT;
+			break;
+		}
+		ipa_flow_control(IPA_CLIENT_USB_PROD, false,
+			ioctl_data.u.tcm_handle);
+		break;
+	/*  Set flow handle  */
+	case RMNET_IOCTL_FLOW_SET_HNDL:
+		break;
+
+	/*  Extended IOCTLs  */
+	case RMNET_IOCTL_EXTENDED:
+		IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
+		if (copy_from_user(&extend_ioctl_data,
+			(u8 *)ifr->ifr_ifru.ifru_data,
+			sizeof(struct rmnet_ioctl_extended_s))) {
+			IPAWANERR("failed to copy extended ioctl data\n");
+			rc = -EFAULT;
+			break;
+		}
+		switch (extend_ioctl_data.extended_ioctl) {
+		/*  Get features  */
+		case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+			IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
+			extend_ioctl_data.u.data =
+				(RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
+				RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
+				RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/*  Set MRU  */
+		case RMNET_IOCTL_SET_MRU:
+			mru = extend_ioctl_data.u.data;
+			IPAWANDBG("get MRU size %d\n",
+				extend_ioctl_data.u.data);
+			break;
+		/*  Get MRU  */
+		case RMNET_IOCTL_GET_MRU:
+			extend_ioctl_data.u.data = mru;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/* GET SG support */
+		case RMNET_IOCTL_GET_SG_SUPPORT:
+			extend_ioctl_data.u.data =
+				ipa_rmnet_res.ipa_advertise_sg_support;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/*  Get endpoint ID  */
+		case RMNET_IOCTL_GET_EPID:
+			IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
+			extend_ioctl_data.u.data = epid;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			if (copy_from_user(&extend_ioctl_data,
+				(u8 *)ifr->ifr_ifru.ifru_data,
+				sizeof(struct rmnet_ioctl_extended_s))) {
+				IPAWANERR("copy extended ioctl data failed\n");
+				rc = -EFAULT;
+			break;
+			}
+			IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
+					extend_ioctl_data.u.data);
+			break;
+		/*  Endpoint pair  */
+		case RMNET_IOCTL_GET_EP_PAIR:
+			IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
+			extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
+			ipa2_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+			extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
+			ipa2_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			if (copy_from_user(&extend_ioctl_data,
+				(u8 *)ifr->ifr_ifru.ifru_data,
+				sizeof(struct rmnet_ioctl_extended_s))) {
+				IPAWANERR("copy extended ioctl data failed\n");
+				rc = -EFAULT;
+			break;
+		}
+			IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
+			extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
+			extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
+			break;
+		/*  Get driver name  */
+		case RMNET_IOCTL_GET_DRIVER_NAME:
+			memcpy(&extend_ioctl_data.u.if_name,
+						ipa_netdevs[0]->name,
+							sizeof(IFNAMSIZ));
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+					&extend_ioctl_data,
+					sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/*  Add MUX ID  */
+		case RMNET_IOCTL_ADD_MUX_CHANNEL:
+			mux_index = find_mux_channel_index(
+				extend_ioctl_data.u.rmnet_mux_val.mux_id);
+			if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
+				IPAWANDBG("already setup mux(%d)\n",
+					extend_ioctl_data.u.
+					rmnet_mux_val.mux_id);
+				return rc;
+			}
+			if (rmnet_index >= MAX_NUM_OF_MUX_CHANNEL) {
+				IPAWANERR("Exceed mux_channel limit(%d)\n",
+				rmnet_index);
+				return -EFAULT;
+			}
+			IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
+			extend_ioctl_data.u.rmnet_mux_val.mux_id,
+			extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
+			/* cache the mux name and id */
+			mux_channel[rmnet_index].mux_id =
+				extend_ioctl_data.u.rmnet_mux_val.mux_id;
+			memcpy(mux_channel[rmnet_index].vchannel_name,
+				extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
+				sizeof(mux_channel[rmnet_index].vchannel_name));
+			IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
+				mux_channel[rmnet_index].vchannel_name,
+				mux_channel[rmnet_index].mux_id,
+				rmnet_index);
+			/* check if UL filter rules coming*/
+			if (num_q6_rule != 0) {
+				IPAWANERR("dev(%s) register to IPA\n",
+					extend_ioctl_data.u.rmnet_mux_val.
+					vchannel_name);
+				rc = wwan_register_to_ipa(rmnet_index);
+				if (rc < 0) {
+					IPAWANERR("device %s reg IPA failed\n",
+						extend_ioctl_data.u.
+						rmnet_mux_val.vchannel_name);
+					return -ENODEV;
+				}
+				mux_channel[rmnet_index].mux_channel_set = true;
+				mux_channel[rmnet_index].ul_flt_reg = true;
+			} else {
+				IPAWANDBG("dev(%s) haven't registered to IPA\n",
+					extend_ioctl_data.u.
+					rmnet_mux_val.vchannel_name);
+				mux_channel[rmnet_index].mux_channel_set = true;
+				mux_channel[rmnet_index].ul_flt_reg = false;
+			}
+			rmnet_index++;
+			break;
+		case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
+			IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
+			if ((extend_ioctl_data.u.data) &
+					RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
+				apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 8;
+				apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
+					cs_offload_en =
+					IPA_ENABLE_CS_OFFLOAD_UL;
+				apps_to_ipa_ep_cfg.ipa_ep_cfg.cfg.
+					cs_metadata_hdr_offset = 1;
+			} else {
+				apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_len = 4;
+			}
+			if ((extend_ioctl_data.u.data) &
+					RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
+				apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
+					IPA_ENABLE_AGGR;
+			else
+				apps_to_ipa_ep_cfg.ipa_ep_cfg.aggr.aggr_en =
+					IPA_BYPASS_AGGR;
+			apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+				hdr_ofst_metadata_valid = 1;
+			/* modem want offset at 0! */
+			apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.hdr_ofst_metadata = 0;
+			apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.dst =
+					IPA_CLIENT_APPS_LAN_WAN_PROD;
+			apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.mode = IPA_BASIC;
+
+			apps_to_ipa_ep_cfg.client =
+				IPA_CLIENT_APPS_LAN_WAN_PROD;
+			apps_to_ipa_ep_cfg.notify =
+				apps_ipa_tx_complete_notify;
+			apps_to_ipa_ep_cfg.desc_fifo_sz =
+			IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+			apps_to_ipa_ep_cfg.priv = dev;
+
+			rc = ipa2_setup_sys_pipe(&apps_to_ipa_ep_cfg,
+				&apps_to_ipa_hdl);
+			if (rc)
+				IPAWANERR("failed to config egress endpoint\n");
+
+			if (num_q6_rule != 0) {
+				/* already got Q6 UL filter rules*/
+				if (ipa_qmi_ctx &&
+					ipa_qmi_ctx->modem_cfg_emb_pipe_flt
+					== false)
+					rc = wwan_add_ul_flt_rule_to_ipa();
+				else
+					rc = 0;
+				egress_set = true;
+				if (rc)
+					IPAWANERR("install UL rules failed\n");
+				else
+					a7_ul_flt_set = true;
+			} else {
+				/* wait Q6 UL filter rules*/
+				egress_set = true;
+				IPAWANDBG("no UL-rules, egress_set(%d)\n",
+					egress_set);
+			}
+			break;
+		case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/*  Set IDF  */
+			rc = handle_ingress_format(dev, &extend_ioctl_data);
+			break;
+		case RMNET_IOCTL_SET_XLAT_DEV_INFO:
+			wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
+						GFP_KERNEL);
+			if (!wan_msg) {
+				IPAWANERR("Failed to allocate memory.\n");
+				return -ENOMEM;
+			}
+			len = sizeof(wan_msg->upstream_ifname) >
+			sizeof(extend_ioctl_data.u.if_name) ?
+				sizeof(extend_ioctl_data.u.if_name) :
+				sizeof(wan_msg->upstream_ifname);
+			strlcpy(wan_msg->upstream_ifname,
+				extend_ioctl_data.u.if_name, len);
+			memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+			msg_meta.msg_type = WAN_XLAT_CONNECT;
+			msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+			rc = ipa2_send_msg(&msg_meta, wan_msg,
+						ipa_wwan_msg_free_cb);
+			if (rc) {
+				IPAWANERR("Failed to send XLAT_CONNECT msg\n");
+				kfree(wan_msg);
+			}
+			break;
+		/*  Get agg count  */
+		case RMNET_IOCTL_GET_AGGREGATION_COUNT:
+			break;
+		/*  Set agg count  */
+		case RMNET_IOCTL_SET_AGGREGATION_COUNT:
+			break;
+		/*  Get agg size  */
+		case RMNET_IOCTL_GET_AGGREGATION_SIZE:
+			break;
+		/*  Set agg size  */
+		case RMNET_IOCTL_SET_AGGREGATION_SIZE:
+			break;
+		/*  Do flow control  */
+		case RMNET_IOCTL_FLOW_CONTROL:
+			break;
+		/*  For legacy use  */
+		case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
+			break;
+		/*  Get HW/SW map  */
+		case RMNET_IOCTL_GET_HWSW_MAP:
+			break;
+		/*  Set RX Headroom  */
+		case RMNET_IOCTL_SET_RX_HEADROOM:
+			break;
+		default:
+			IPAWANERR("[%s] unsupported extended cmd[%d]",
+				dev->name,
+				extend_ioctl_data.extended_ioctl);
+			rc = -EINVAL;
+		}
+		break;
+	default:
+			IPAWANERR("[%s] unsupported cmd[%d]",
+				dev->name, cmd);
+			rc = -EINVAL;
+	}
+	return rc;
+}
+
+static const struct net_device_ops ipa_wwan_ops_ip = {
+	.ndo_open = ipa_wwan_open,
+	.ndo_stop = ipa_wwan_stop,
+	.ndo_start_xmit = ipa_wwan_xmit,
+	.ndo_tx_timeout = ipa_wwan_tx_timeout,
+	.ndo_do_ioctl = ipa_wwan_ioctl,
+	.ndo_change_mtu = ipa_wwan_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+/**
+ * wwan_setup() - Setups the wwan network driver.
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+
+static void ipa_wwan_setup(struct net_device *dev)
+{
+	dev->netdev_ops = &ipa_wwan_ops_ip;
+	ether_setup(dev);
+	/* set this after calling ether_setup */
+	dev->header_ops = 0;  /* No header */
+	dev->type = ARPHRD_RAWIP;
+	dev->hard_header_len = 0;
+	dev->mtu = WWAN_DATA_LEN;
+	dev->addr_len = 0;
+	dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+	dev->needed_headroom = HEADROOM_FOR_QMAP;
+	dev->needed_tailroom = TAILROOM;
+	dev->watchdog_timeo = 1000;
+}
+
+/* IPA_RM related functions start*/
+static void q6_prod_rm_request_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(q6_con_rm_request, q6_prod_rm_request_resource);
+static void q6_prod_rm_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(q6_con_rm_release, q6_prod_rm_release_resource);
+
+static void q6_prod_rm_request_resource(struct work_struct *work)
+{
+	int ret = 0;
+
+	ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (ret < 0 && ret != -EINPROGRESS) {
+		IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
+		       ret);
+		return;
+	}
+}
+
+static int q6_rm_request_resource(void)
+{
+	queue_delayed_work(ipa_rm_q6_workqueue,
+	   &q6_con_rm_request, 0);
+	return 0;
+}
+
+static void q6_prod_rm_release_resource(struct work_struct *work)
+{
+	int ret = 0;
+
+	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (ret < 0 && ret != -EINPROGRESS) {
+		IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
+		      ret);
+		return;
+	}
+}
+
+
+static int q6_rm_release_resource(void)
+{
+	queue_delayed_work(ipa_rm_q6_workqueue,
+	   &q6_con_rm_release, 0);
+	return 0;
+}
+
+
+static void q6_rm_notify_cb(void *user_data,
+		enum ipa_rm_event event,
+		unsigned long data)
+{
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		IPAWANDBG("%s: Q6_PROD GRANTED CB\n", __func__);
+		break;
+	case IPA_RM_RESOURCE_RELEASED:
+		IPAWANDBG("%s: Q6_PROD RELEASED CB\n", __func__);
+		break;
+	default:
+		return;
+	}
+}
+static int q6_initialize_rm(void)
+{
+	struct ipa_rm_create_params create_params;
+	struct ipa_rm_perf_profile profile;
+	int result;
+
+	/* Initialize IPA_RM workqueue */
+	ipa_rm_q6_workqueue = create_singlethread_workqueue("clnt_req");
+	if (!ipa_rm_q6_workqueue)
+		return -ENOMEM;
+
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_Q6_PROD;
+	create_params.reg_params.notify_cb = &q6_rm_notify_cb;
+	result = ipa_rm_create_resource(&create_params);
+	if (result)
+		goto create_rsrc_err1;
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_Q6_CONS;
+	create_params.release_resource = &q6_rm_release_resource;
+	create_params.request_resource = &q6_rm_request_resource;
+	result = ipa_rm_create_resource(&create_params);
+	if (result)
+		goto create_rsrc_err2;
+	/* add dependency*/
+	result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+	if (result)
+		goto add_dpnd_err;
+	/* setup Performance profile */
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = 100;
+	result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+			&profile);
+	if (result)
+		goto set_perf_err;
+	result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
+			&profile);
+	if (result)
+		goto set_perf_err;
+	return result;
+
+set_perf_err:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+add_dpnd_err:
+	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+	if (result < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_CONS, result);
+create_rsrc_err2:
+	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (result < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_PROD, result);
+create_rsrc_err1:
+	destroy_workqueue(ipa_rm_q6_workqueue);
+	return result;
+}
+
+void q6_deinitialize_rm(void)
+{
+	int ret;
+
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
+			ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_CONS, ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_PROD, ret);
+	destroy_workqueue(ipa_rm_q6_workqueue);
+}
+
+static void wake_tx_queue(struct work_struct *work)
+{
+	if (ipa_netdevs[0]) {
+		__netif_tx_lock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
+		netif_wake_queue(ipa_netdevs[0]);
+		__netif_tx_unlock_bh(netdev_get_tx_queue(ipa_netdevs[0], 0));
+	}
+}
+
+/**
+ * ipa_rm_resource_granted() - Called upon
+ * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
+ *
+ * @work: work object supplied ny workqueue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_resource_granted(void *dev)
+{
+	IPAWANDBG("Resource Granted - starting queue\n");
+	schedule_work(&ipa_tx_wakequeue_work);
+}
+
+/**
+ * ipa_rm_notify() - Callback function for RM events. Handles
+ * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
+ * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
+ * workqueue.
+ *
+ * @dev: network device
+ * @event: IPA RM event
+ * @data: Additional data provided by IPA RM
+ *
+ * Return codes:
+ * None
+ */
+static void ipa_rm_notify(void *dev, enum ipa_rm_event event,
+			  unsigned long data)
+{
+	struct wwan_private *wwan_ptr = netdev_priv(dev);
+
+	pr_debug("%s: event %d\n", __func__, event);
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
+			complete_all(&wwan_ptr->resource_granted_completion);
+			break;
+		}
+		ipa_rm_resource_granted(dev);
+		break;
+	case IPA_RM_RESOURCE_RELEASED:
+		break;
+	default:
+		pr_err("%s: unknown event %d\n", __func__, event);
+		break;
+	}
+}
+
+/* IPA_RM related functions end*/
+
+static int ssr_notifier_cb(struct notifier_block *this,
+			   unsigned long code,
+			   void *data);
+
+static struct notifier_block ssr_notifier = {
+	.notifier_call = ssr_notifier_cb,
+};
+
+static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
+		struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
+{
+	ipa_rmnet_drv_res->ipa_rmnet_ssr =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,rmnet-ipa-ssr");
+	pr_info("IPA SSR support = %s\n",
+		ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
+	ipa_rmnet_drv_res->ipa_loaduC =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-loaduC");
+	pr_info("IPA ipa-loaduC = %s\n",
+		ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
+
+	ipa_rmnet_drv_res->ipa_advertise_sg_support =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-advertise-sg-support");
+	pr_info("IPA SG support = %s\n",
+		ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
+
+	ipa_rmnet_drv_res->ipa_napi_enable =
+		of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-napi-enable");
+	pr_info("IPA Napi Enable = %s\n",
+		ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+	return 0;
+}
+
+struct ipa_rmnet_context ipa_rmnet_ctx;
+
+/**
+ * ipa_wwan_probe() - Initialized the module and registers as a
+ * network interface to the network stack
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: No memory available
+ * -EFAULT: Internal error
+ * -ENODEV: IPA driver not loaded
+ */
+static int ipa_wwan_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	struct net_device *dev;
+	struct wwan_private *wwan_ptr;
+	struct ipa_rm_create_params ipa_rm_params;	/* IPA_RM */
+	struct ipa_rm_perf_profile profile;			/* IPA_RM */
+
+	pr_info("rmnet_ipa started initialization\n");
+
+	if (!ipa2_is_ready()) {
+		IPAWANERR("IPA driver not loaded\n");
+		return -ENODEV;
+	}
+
+	ret = get_ipa_rmnet_dts_configuration(pdev, &ipa_rmnet_res);
+	ipa_rmnet_ctx.ipa_rmnet_ssr = ipa_rmnet_res.ipa_rmnet_ssr;
+
+	ret = ipa_init_q6_smem();
+	if (ret) {
+		IPAWANERR("ipa_init_q6_smem failed!\n");
+		return ret;
+	}
+
+	/* initialize tx/rx enpoint setup */
+	memset(&apps_to_ipa_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
+	memset(&ipa_to_apps_ep_cfg, 0, sizeof(struct ipa_sys_connect_params));
+
+	/* initialize ex property setup */
+	num_q6_rule = 0;
+	old_num_q6_rule = 0;
+	rmnet_index = 0;
+	egress_set = false;
+	a7_ul_flt_set = false;
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
+		memset(&mux_channel[i], 0, sizeof(struct rmnet_mux_val));
+
+	/* start A7 QMI service/client */
+	if (ipa_rmnet_res.ipa_loaduC)
+		/* Android platform loads uC */
+		ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
+	else
+		/* LE platform not loads uC */
+		ipa_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
+
+	/* construct default WAN RT tbl for IPACM */
+	ret = ipa_setup_a7_qmap_hdr();
+	if (ret)
+		goto setup_a7_qmap_hdr_err;
+	ret = ipa_setup_dflt_wan_rt_tables();
+	if (ret)
+		goto setup_dflt_wan_rt_tables_err;
+
+	if (!atomic_read(&is_ssr)) {
+		/* Start transport-driver fd ioctl for ipacm for first init */
+		ret = wan_ioctl_init();
+		if (ret)
+			goto wan_ioctl_init_err;
+	} else {
+		/* Enable sending QMI messages after SSR */
+		wan_ioctl_enable_qmi_messages();
+	}
+
+	/* initialize wan-driver netdev */
+	dev = alloc_netdev(sizeof(struct wwan_private),
+			   IPA_WWAN_DEV_NAME,
+			   NET_NAME_UNKNOWN,
+			   ipa_wwan_setup);
+	if (!dev) {
+		IPAWANERR("no memory for netdev\n");
+		ret = -ENOMEM;
+		goto alloc_netdev_err;
+	}
+	ipa_netdevs[0] = dev;
+	wwan_ptr = netdev_priv(dev);
+	memset(wwan_ptr, 0, sizeof(*wwan_ptr));
+	IPAWANDBG("wwan_ptr (private) = %p", wwan_ptr);
+	wwan_ptr->net = dev;
+	wwan_ptr->outstanding_high_ctl = DEFAULT_OUTSTANDING_HIGH_CTL;
+	wwan_ptr->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+	wwan_ptr->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+	atomic_set(&wwan_ptr->outstanding_pkts, 0);
+	spin_lock_init(&wwan_ptr->lock);
+	init_completion(&wwan_ptr->resource_granted_completion);
+
+	if (!atomic_read(&is_ssr)) {
+		/* IPA_RM configuration starts */
+		ret = q6_initialize_rm();
+		if (ret) {
+			IPAWANERR("%s: q6_initialize_rm failed, ret: %d\n",
+				__func__, ret);
+			goto q6_init_err;
+		}
+	}
+
+	memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
+	ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
+	ipa_rm_params.reg_params.user_data = dev;
+	ipa_rm_params.reg_params.notify_cb = ipa_rm_notify;
+	ret = ipa_rm_create_resource(&ipa_rm_params);
+	if (ret) {
+		pr_err("%s: unable to create resourse %d in IPA RM\n",
+		       __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
+		goto create_rsrc_err;
+	}
+	ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
+					   IPA_RM_INACTIVITY_TIMER);
+	if (ret) {
+		pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
+		       __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
+		goto timer_init_err;
+	}
+	/* add dependency */
+	ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+			IPA_RM_RESOURCE_Q6_CONS);
+	if (ret)
+		goto add_dpnd_err;
+	/* setup Performance profile */
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
+			&profile);
+	if (ret)
+		goto set_perf_err;
+	/* IPA_RM configuration ends */
+
+	/* Enable SG support in netdevice. */
+	if (ipa_rmnet_res.ipa_advertise_sg_support)
+		dev->hw_features |= NETIF_F_SG;
+
+	/* Enable NAPI support in netdevice. */
+	if (ipa_rmnet_res.ipa_napi_enable) {
+		netif_napi_add(dev, &(wwan_ptr->napi),
+			ipa_rmnet_poll, NAPI_WEIGHT);
+	}
+
+	ret = register_netdev(dev);
+	if (ret) {
+		IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
+			0, ret);
+		goto set_perf_err;
+	}
+
+	IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n",
+			ipa_netdevs[0]->name);
+	if (ret) {
+		IPAWANERR("default configuration failed rc=%d\n",
+				ret);
+		goto config_err;
+	}
+	atomic_set(&is_initialized, 1);
+	if (!atomic_read(&is_ssr)) {
+		/* offline charging mode */
+		ipa2_proxy_clk_unvote();
+	}
+	atomic_set(&is_ssr, 0);
+
+	pr_info("rmnet_ipa completed initialization\n");
+	return 0;
+config_err:
+	if (ipa_rmnet_res.ipa_napi_enable)
+		netif_napi_del(&(wwan_ptr->napi));
+	unregister_netdev(ipa_netdevs[0]);
+set_perf_err:
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (ret)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+			IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+			ret);
+add_dpnd_err:
+	ret = ipa_rm_inactivity_timer_destroy(
+		IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
+	if (ret)
+		IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+timer_init_err:
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+create_rsrc_err:
+	q6_deinitialize_rm();
+q6_init_err:
+	free_netdev(ipa_netdevs[0]);
+	ipa_netdevs[0] = NULL;
+alloc_netdev_err:
+	wan_ioctl_deinit();
+wan_ioctl_init_err:
+	ipa_del_dflt_wan_rt_tables();
+setup_dflt_wan_rt_tables_err:
+	ipa_del_a7_qmap_hdr();
+setup_a7_qmap_hdr_err:
+	ipa_qmi_service_exit();
+	atomic_set(&is_ssr, 0);
+	return ret;
+}
+
+static int ipa_wwan_remove(struct platform_device *pdev)
+{
+	int ret;
+	struct wwan_private *wwan_ptr;
+
+	wwan_ptr = netdev_priv(ipa_netdevs[0]);
+
+	pr_info("rmnet_ipa started deinitialization\n");
+	mutex_lock(&ipa_to_apps_pipe_handle_guard);
+	ret = ipa2_teardown_sys_pipe(ipa_to_apps_hdl);
+	if (ret < 0)
+		IPAWANERR("Failed to teardown IPA->APPS pipe\n");
+	else
+		ipa_to_apps_hdl = -1;
+	if (ipa_rmnet_res.ipa_napi_enable)
+		netif_napi_del(&(wwan_ptr->napi));
+	mutex_unlock(&ipa_to_apps_pipe_handle_guard);
+	unregister_netdev(ipa_netdevs[0]);
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+			IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+			ret);
+	ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret < 0)
+		IPAWANERR(
+		"Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+	cancel_work_sync(&ipa_tx_wakequeue_work);
+	cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+	free_netdev(ipa_netdevs[0]);
+	ipa_netdevs[0] = NULL;
+	/* No need to remove wwan_ioctl during SSR */
+	if (!atomic_read(&is_ssr))
+		wan_ioctl_deinit();
+	ipa_del_dflt_wan_rt_tables();
+	ipa_del_a7_qmap_hdr();
+	ipa_del_mux_qmap_hdrs();
+	if (ipa_qmi_ctx && ipa_qmi_ctx->modem_cfg_emb_pipe_flt == false)
+		wwan_del_ul_flt_rule_to_ipa();
+	ipa_cleanup_deregister_intf();
+	atomic_set(&is_initialized, 0);
+	pr_info("rmnet_ipa completed deinitialization\n");
+	return 0;
+}
+
+/**
+* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP suspend
+* operation is invoked, usually by pressing a suspend button.
+*
+* Returns -EAGAIN to runtime_pm framework in case there are pending packets
+* in the Tx queue. This will postpone the suspend operation until all the
+* pending packets will be transmitted.
+*
+* In case there are no packets to send, releases the WWAN0_PROD entity.
+* As an outcome, the number of IPA active clients should be decremented
+* until IPA clocks can be gated.
+*/
+static int rmnet_ipa_ap_suspend(struct device *dev)
+{
+	struct net_device *netdev = ipa_netdevs[0];
+	struct wwan_private *wwan_ptr = netdev_priv(netdev);
+
+	IPAWANDBG("Enter...\n");
+	/* Do not allow A7 to suspend in case there are oustanding packets */
+	if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
+		IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
+		return -EAGAIN;
+	}
+
+	/* Make sure that there is no Tx operation ongoing */
+	netif_tx_lock_bh(netdev);
+	ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	netif_tx_unlock_bh(netdev);
+	IPAWANDBG("Exit\n");
+
+	return 0;
+}
+
+/**
+* rmnet_ipa_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Enables the network interface queue and returns success to the
+* runtime_pm framework.
+*/
+static int rmnet_ipa_ap_resume(struct device *dev)
+{
+	struct net_device *netdev = ipa_netdevs[0];
+
+	IPAWANDBG("Enter...\n");
+	netif_wake_queue(netdev);
+	IPAWANDBG("Exit\n");
+
+	return 0;
+}
+
+static void ipa_stop_polling_stats(void)
+{
+	cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+	ipa_rmnet_ctx.polling_interval = 0;
+}
+
+static const struct of_device_id rmnet_ipa_dt_match[] = {
+	{.compatible = "qcom,rmnet-ipa"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
+
+static const struct dev_pm_ops rmnet_ipa_pm_ops = {
+	.suspend_noirq = rmnet_ipa_ap_suspend,
+	.resume_noirq = rmnet_ipa_ap_resume,
+};
+
+static struct platform_driver rmnet_ipa_driver = {
+	.driver = {
+		.name = "rmnet_ipa",
+		.owner = THIS_MODULE,
+		.pm = &rmnet_ipa_pm_ops,
+		.of_match_table = rmnet_ipa_dt_match,
+	},
+	.probe = ipa_wwan_probe,
+	.remove = ipa_wwan_remove,
+};
+
+static int ssr_notifier_cb(struct notifier_block *this,
+			   unsigned long code,
+			   void *data)
+{
+	if (ipa_rmnet_ctx.ipa_rmnet_ssr) {
+		if (code == SUBSYS_BEFORE_SHUTDOWN) {
+			pr_info("IPA received MPSS BEFORE_SHUTDOWN\n");
+			atomic_set(&is_ssr, 1);
+			ipa_q6_pre_shutdown_cleanup();
+			if (ipa_netdevs[0])
+				netif_stop_queue(ipa_netdevs[0]);
+			ipa_qmi_stop_workqueues();
+			wan_ioctl_stop_qmi_messages();
+			ipa_stop_polling_stats();
+			if (atomic_read(&is_initialized))
+				platform_driver_unregister(&rmnet_ipa_driver);
+			pr_info("IPA BEFORE_SHUTDOWN handling is complete\n");
+			return NOTIFY_DONE;
+		}
+		if (code == SUBSYS_AFTER_SHUTDOWN) {
+			pr_info("IPA received MPSS AFTER_SHUTDOWN\n");
+			if (atomic_read(&is_ssr))
+				ipa_q6_post_shutdown_cleanup();
+			pr_info("IPA AFTER_SHUTDOWN handling is complete\n");
+			return NOTIFY_DONE;
+		}
+		if (code == SUBSYS_AFTER_POWERUP) {
+			pr_info("IPA received MPSS AFTER_POWERUP\n");
+			if (!atomic_read(&is_initialized)
+				&& atomic_read(&is_ssr))
+				platform_driver_register(&rmnet_ipa_driver);
+			pr_info("IPA AFTER_POWERUP handling is complete\n");
+			return NOTIFY_DONE;
+		}
+		if (code == SUBSYS_BEFORE_POWERUP) {
+			pr_info("IPA received MPSS BEFORE_POWERUP\n");
+			if (atomic_read(&is_ssr))
+				/* clean up cached QMI msg/handlers */
+				ipa_qmi_service_exit();
+			ipa2_proxy_clk_vote();
+			pr_info("IPA BEFORE_POWERUP handling is complete\n");
+			return NOTIFY_DONE;
+		}
+	}
+	return NOTIFY_DONE;
+}
+
+/**
+ * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa2_send_msg
+ * @buff: pointer to buffer containing the message
+ * @len: message len
+ * @type: message type
+ *
+ * This function is invoked when ipa2_send_msg is complete (Provided as a
+ * free function pointer along with the message).
+ */
+static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAWANERR("Null buffer\n");
+		return;
+	}
+
+	if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
+		type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+		IPAWANERR("Wrong type given. buff %p type %d\n",
+			  buff, type);
+	}
+	kfree(buff);
+}
+
+/**
+ * rmnet_ipa_get_stats_and_update(bool reset) - Gets pipe stats from Modem
+ *
+ * This function queries the IPA Modem driver for the pipe stats
+ * via QMI, and updates the user space IPA entity.
+ */
+static void rmnet_ipa_get_stats_and_update(bool reset)
+{
+	struct ipa_get_data_stats_req_msg_v01 req;
+	struct ipa_get_data_stats_resp_msg_v01 *resp;
+	struct ipa_msg_meta msg_meta;
+	int rc;
+
+	resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+		       GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		return;
+	}
+
+	memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+	req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+	if (reset == true) {
+		req.reset_stats_valid = true;
+		req.reset_stats = true;
+		IPAWANERR("Get the latest pipe-stats and reset it\n");
+	}
+
+	rc = ipa_qmi_get_data_stats(&req, resp);
+	if (rc) {
+		IPAWANERR("ipa_qmi_get_data_stats failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
+	msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
+	rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa2_send_msg failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
+}
+
+/**
+ * tethering_stats_poll_queue() - Stats polling function
+ * @work - Work entry
+ *
+ * This function is scheduled periodically (per the interval) in
+ * order to poll the IPA Modem driver for the pipe stats.
+ */
+static void tethering_stats_poll_queue(struct work_struct *work)
+{
+	rmnet_ipa_get_stats_and_update(false);
+
+	/* Schedule again only if there's an active polling interval */
+	if (ipa_rmnet_ctx.polling_interval != 0)
+		schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
+			msecs_to_jiffies(ipa_rmnet_ctx.polling_interval*1000));
+}
+
+/**
+ * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
+ *
+ * This function retrieves the data usage (used quota) from the IPA Modem driver
+ * via QMI, and updates IPA user space entity.
+ */
+static void rmnet_ipa_get_network_stats_and_update(void)
+{
+	struct ipa_get_apn_data_stats_req_msg_v01 req;
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
+	struct ipa_msg_meta msg_meta;
+	int rc;
+
+	resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+		       GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for network stats message\n");
+		return;
+	}
+
+	memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
+
+	req.mux_id_list_valid = true;
+	req.mux_id_list_len = 1;
+	req.mux_id_list[0] = ipa_rmnet_ctx.metered_mux_id;
+
+	rc = ipa_qmi_get_network_stats(&req, resp);
+	if (rc) {
+		IPAWANERR("ipa_qmi_get_network_stats failed %d\n", rc);
+		kfree(resp);
+		return;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
+	msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
+	rc = ipa2_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa2_send_msg failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
+}
+
+/**
+ * rmnet_ipa_poll_tethering_stats() - Tethering stats polling IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_POLL_TETHERING_STATS.
+ * In case polling interval received is 0, polling will stop
+ * (If there's a polling in progress, it will allow it to finish), and then will
+ * fetch network stats, and update the IPA user space.
+ *
+ * Return codes:
+ * 0: Success
+ */
+int rmnet_ipa_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
+{
+	ipa_rmnet_ctx.polling_interval = data->polling_interval_secs;
+
+	cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
+
+	if (ipa_rmnet_ctx.polling_interval == 0) {
+		ipa_qmi_stop_data_qouta();
+		rmnet_ipa_get_network_stats_and_update();
+		rmnet_ipa_get_stats_and_update(true);
+		return 0;
+	}
+
+	schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
+	return 0;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+	u32 mux_id;
+	int index;
+	struct ipa_set_data_usage_quota_req_msg_v01 req;
+
+	index = find_vchannel_name_index(data->interface_name);
+	IPAWANERR("iface name %s, quota %lu\n",
+			  data->interface_name,
+			  (unsigned long int) data->quota_mbytes);
+
+	if (index == MAX_NUM_OF_MUX_CHANNEL) {
+		IPAWANERR("%s is an invalid iface name\n",
+			  data->interface_name);
+		return -EFAULT;
+	}
+
+	mux_id = mux_channel[index].mux_id;
+
+	ipa_rmnet_ctx.metered_mux_id = mux_id;
+
+	memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
+	req.apn_quota_list_valid = true;
+	req.apn_quota_list_len = 1;
+	req.apn_quota_list[0].mux_id = mux_id;
+	req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
+
+	return ipa_qmi_set_data_quota(&req);
+}
+
+ /* rmnet_ipa_set_tether_client_pipe() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa_set_tether_client_pipe(
+	struct wan_ioctl_set_tether_client_pipe *data)
+{
+	int number, i;
+
+	IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
+	data->ipa_client,
+	data->ul_src_pipe_len,
+	data->dl_dst_pipe_len,
+	data->reset_client);
+	number = data->ul_src_pipe_len;
+	for (i = 0; i < number; i++) {
+		IPAWANDBG("UL index-%d pipe %d\n", i,
+			data->ul_src_pipe_list[i]);
+		if (data->reset_client)
+			ipa_set_client(data->ul_src_pipe_list[i],
+				0, false);
+		else
+			ipa_set_client(data->ul_src_pipe_list[i],
+				data->ipa_client, true);
+	}
+	number = data->dl_dst_pipe_len;
+	for (i = 0; i < number; i++) {
+		IPAWANDBG("DL index-%d pipe %d\n", i,
+			data->dl_dst_pipe_list[i]);
+		if (data->reset_client)
+			ipa_set_client(data->dl_dst_pipe_list[i],
+				0, false);
+		else
+			ipa_set_client(data->dl_dst_pipe_list[i],
+				data->ipa_client, false);
+	}
+	return 0;
+}
+
+int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+	bool reset)
+{
+	struct ipa_get_data_stats_req_msg_v01 *req;
+	struct ipa_get_data_stats_resp_msg_v01 *resp;
+	int pipe_len, rc;
+
+	req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		IPAWANERR("failed to allocate memory for stats message\n");
+		return -ENOMEM;
+	}
+	resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+			GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("failed to allocate memory for stats message\n");
+		kfree(req);
+		return -ENOMEM;
+	}
+	memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+	req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+	if (reset) {
+		req->reset_stats_valid = true;
+		req->reset_stats = true;
+		IPAWANERR("reset the pipe stats\n");
+	} else {
+		/* print tethered-client enum */
+		IPAWANDBG("Tethered-client enum(%d)\n", data->ipa_client);
+	}
+
+	rc = ipa_qmi_get_data_stats(req, resp);
+	if (rc) {
+		IPAWANERR("can't get ipa_qmi_get_data_stats\n");
+		kfree(req);
+		kfree(resp);
+		return rc;
+	} else if (reset) {
+		kfree(req);
+		kfree(resp);
+		return 0;
+	}
+
+	if (resp->dl_dst_pipe_stats_list_valid) {
+		for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
+			pipe_len++) {
+			IPAWANDBG("Check entry(%d) dl_dst_pipe(%d)\n",
+				pipe_len, resp->dl_dst_pipe_stats_list
+					[pipe_len].pipe_index);
+			IPAWANDBG("dl_p_v4(%lu)v6(%lu) dl_b_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv4_packets,
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv6_packets,
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv4_bytes,
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv6_bytes);
+			if (ipa_get_client_uplink(resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				pipe_index) == false) {
+				if (data->ipa_client == ipa_get_client(resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					pipe_index)) {
+					/* update the DL stats */
+					data->ipv4_rx_packets += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv4_packets;
+					data->ipv6_rx_packets += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv6_packets;
+					data->ipv4_rx_bytes += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv4_bytes;
+					data->ipv6_rx_bytes += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv6_bytes;
+				}
+			}
+		}
+	}
+	IPAWANDBG("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+		(unsigned long int) data->ipv4_rx_packets,
+		(unsigned long int) data->ipv6_rx_packets,
+		(unsigned long int) data->ipv4_rx_bytes,
+		(unsigned long int) data->ipv6_rx_bytes);
+
+	if (resp->ul_src_pipe_stats_list_valid) {
+		for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
+			pipe_len++) {
+			IPAWANDBG("Check entry(%d) ul_dst_pipe(%d)\n",
+				pipe_len,
+				resp->ul_src_pipe_stats_list[pipe_len].
+				pipe_index);
+			IPAWANDBG("ul_p_v4(%lu)v6(%lu)ul_b_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv4_packets,
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv6_packets,
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv4_bytes,
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv6_bytes);
+			if (ipa_get_client_uplink(resp->
+				ul_src_pipe_stats_list[pipe_len].
+				pipe_index) == true) {
+				if (data->ipa_client == ipa_get_client(resp->
+				ul_src_pipe_stats_list[pipe_len].
+				pipe_index)) {
+					/* update the DL stats */
+					data->ipv4_tx_packets += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv4_packets;
+					data->ipv6_tx_packets += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv6_packets;
+					data->ipv4_tx_bytes += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv4_bytes;
+					data->ipv6_tx_bytes += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv6_bytes;
+				}
+			}
+		}
+	}
+	IPAWANDBG("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+		(unsigned long int) data->ipv4_tx_packets,
+		(unsigned long  int) data->ipv6_tx_packets,
+		(unsigned long int) data->ipv4_tx_bytes,
+		(unsigned long int) data->ipv6_tx_bytes);
+	kfree(req);
+	kfree(resp);
+	return 0;
+}
+
+/**
+ * ipa_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
+ * @mux_id - The MUX ID on which the quota has been reached
+ *
+ * This function broadcasts a Netlink event using the kobject of the
+ * rmnet_ipa interface in order to alert the user space that the quota
+ * on the specific interface which matches the mux_id has been reached.
+ *
+ */
+void ipa_broadcast_quota_reach_ind(u32 mux_id)
+{
+	char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
+	char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+	char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+	char *envp[IPA_UEVENT_NUM_EVNP] = {
+		alert_msg, iface_name_l, iface_name_m, NULL };
+	int res;
+	int index;
+
+	index = find_mux_channel_index(mux_id);
+
+	if (index == MAX_NUM_OF_MUX_CHANNEL) {
+		IPAWANERR("%u is an mux ID\n", mux_id);
+		return;
+	}
+
+	res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
+			"ALERT_NAME=%s", "quotaReachedAlert");
+	if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
+		IPAWANERR("message too long (%d)", res);
+		return;
+	}
+	/* posting msg for L-release for CNE */
+	res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+		       "UPSTREAM=%s", mux_channel[index].vchannel_name);
+	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+		IPAWANERR("message too long (%d)", res);
+		return;
+	}
+	/* posting msg for M-release for CNE */
+	res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+		       "INTERFACE=%s", mux_channel[index].vchannel_name);
+	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+		IPAWANERR("message too long (%d)", res);
+		return;
+	}
+
+	IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
+		alert_msg, iface_name_l, iface_name_m);
+	kobject_uevent_env(&(ipa_netdevs[0]->dev.kobj), KOBJ_CHANGE, envp);
+}
+
+/**
+ * ipa_q6_handshake_complete() - Perform operations once Q6 is up
+ * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
+ *
+ * This function is invoked once the handshake between the IPA AP driver
+ * and IPA Q6 driver is complete. At this point, it is possible to perform
+ * operations which can't be performed until IPA Q6 driver is up.
+ *
+ */
+void ipa_q6_handshake_complete(bool ssr_bootup)
+{
+	/* It is required to recover the network stats after SSR recovery */
+	if (ssr_bootup) {
+		/*
+		 * In case the uC is required to be loaded by the Modem,
+		 * the proxy vote will be removed only when uC loading is
+		 * complete and indication is received by the AP. After SSR,
+		 * uC is already loaded. Therefore, proxy vote can be removed
+		 * once Modem init is complete.
+		 */
+		ipa2_proxy_clk_unvote();
+
+		/*
+		 * It is required to recover the network stats after
+		 * SSR recovery
+		 */
+		rmnet_ipa_get_network_stats_and_update();
+
+		/* Enable holb monitoring on Q6 pipes. */
+		ipa_q6_monitor_holb_mitigation(true);
+	}
+}
+
+static int __init ipa_wwan_init(void)
+{
+	atomic_set(&is_initialized, 0);
+	atomic_set(&is_ssr, 0);
+
+	mutex_init(&ipa_to_apps_pipe_handle_guard);
+	ipa_to_apps_hdl = -1;
+
+	ipa_qmi_init();
+
+	/* Register for Modem SSR */
+	subsys_notify_handle = subsys_notif_register_notifier(SUBSYS_MODEM,
+						&ssr_notifier);
+	if (!IS_ERR(subsys_notify_handle))
+		return platform_driver_register(&rmnet_ipa_driver);
+	else
+		return (int)PTR_ERR(subsys_notify_handle);
+}
+
+static void __exit ipa_wwan_cleanup(void)
+{
+	int ret;
+
+	ipa_qmi_cleanup();
+	mutex_destroy(&ipa_to_apps_pipe_handle_guard);
+	ret = subsys_notif_unregister_notifier(subsys_notify_handle,
+					&ssr_notifier);
+	if (ret)
+		IPAWANERR(
+		"Error subsys_notif_unregister_notifier system %s, ret=%d\n",
+		SUBSYS_MODEM, ret);
+	platform_driver_unregister(&rmnet_ipa_driver);
+}
+
+static void ipa_wwan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff)
+		IPAWANERR("Null buffer.\n");
+	kfree(buff);
+}
+
+static void ipa_rmnet_rx_cb(void *priv)
+{
+	struct net_device *dev = priv;
+	struct wwan_private *wwan_ptr;
+
+	IPAWANDBG("\n");
+
+	if (dev != ipa_netdevs[0]) {
+		IPAWANERR("Not matching with netdev\n");
+		return;
+	}
+
+	wwan_ptr = netdev_priv(dev);
+	napi_schedule(&(wwan_ptr->napi));
+}
+
+static int ipa_rmnet_poll(struct napi_struct *napi, int budget)
+{
+	int rcvd_pkts = 0;
+
+	rcvd_pkts = ipa_rx_poll(ipa_to_apps_hdl, NAPI_WEIGHT);
+	IPAWANDBG("rcvd packets: %d\n", rcvd_pkts);
+	return rcvd_pkts;
+}
+
+late_initcall(ipa_wwan_init);
+module_exit(ipa_wwan_cleanup);
+MODULE_DESCRIPTION("WWAN Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
new file mode 100644
index 0000000..811dba4
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa_fd_ioctl.c
@@ -0,0 +1,391 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include "ipa_qmi_service.h"
+
+#define DRIVER_NAME "wwan_ioctl"
+
+#ifdef CONFIG_COMPAT
+#define WAN_IOC_ADD_FLT_RULE32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_RULE, \
+		compat_uptr_t)
+#define WAN_IOC_ADD_FLT_RULE_INDEX32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_INDEX, \
+		compat_uptr_t)
+#define WAN_IOC_POLL_TETHERING_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_POLL_TETHERING_STATS, \
+		compat_uptr_t)
+#define WAN_IOC_SET_DATA_QUOTA32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_DATA_QUOTA, \
+		compat_uptr_t)
+#define WAN_IOC_SET_TETHER_CLIENT_PIPE32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \
+		compat_uptr_t)
+#define WAN_IOC_QUERY_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_TETHER_STATS, \
+		compat_uptr_t)
+#define WAN_IOC_RESET_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_RESET_TETHER_STATS, \
+		compat_uptr_t)
+#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_DL_FILTER_STATS, \
+		compat_uptr_t)
+#endif
+
+static unsigned int dev_num = 1;
+static struct cdev wan_ioctl_cdev;
+static unsigned int process_ioctl = 1;
+static struct class *class;
+static dev_t device;
+
+static long wan_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	u32 pyld_sz;
+	u8 *param = NULL;
+
+	IPAWANDBG("device %s got ioctl events :>>>\n",
+		DRIVER_NAME);
+
+	if (!process_ioctl) {
+		IPAWANDBG("modem is in SSR, ignoring ioctl\n");
+		return -EAGAIN;
+	}
+
+	switch (cmd) {
+	case WAN_IOC_ADD_FLT_RULE:
+		IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n",
+		DRIVER_NAME);
+		pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (qmi_filter_request_send(
+			(struct ipa_install_fltr_rule_req_msg_v01 *)param)) {
+			IPAWANDBG("IPACM->Q6 add filter rule failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_ADD_FLT_RULE_INDEX:
+		IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
+		DRIVER_NAME);
+		pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (qmi_filter_notify_send(
+		(struct ipa_fltr_installed_notif_req_msg_v01 *)param)) {
+			IPAWANDBG("IPACM->Q6 rule index fail\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_VOTE_FOR_BW_MBPS:
+		IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n",
+		DRIVER_NAME);
+		pyld_sz = sizeof(uint32_t);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (vote_for_bus_bw((uint32_t *)param)) {
+			IPAWANERR("Failed to vote for bus BW\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_POLL_TETHERING_STATS:
+		IPAWANDBG("device %s got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n",
+			  DRIVER_NAME);
+		pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa_poll_tethering_stats(
+		(struct wan_ioctl_poll_tethering_stats *)param)) {
+			IPAWANERR("WAN_IOCTL_POLL_TETHERING_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_SET_DATA_QUOTA:
+		IPAWANDBG("device %s got WAN_IOCTL_SET_DATA_QUOTA :>>>\n",
+			  DRIVER_NAME);
+		pyld_sz = sizeof(struct wan_ioctl_set_data_quota);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa_set_data_quota(
+		(struct wan_ioctl_set_data_quota *)param)) {
+			IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_SET_TETHER_CLIENT_PIPE:
+		IPAWANDBG("device %s got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n",
+				DRIVER_NAME);
+		pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa_set_tether_client_pipe(
+			(struct wan_ioctl_set_tether_client_pipe *)param)) {
+			IPAWANERR("WAN_IOC_SET_TETHER_CLIENT_PIPE failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_QUERY_TETHER_STATS:
+		IPAWANDBG("device %s got WAN_IOC_QUERY_TETHER_STATS :>>>\n",
+				DRIVER_NAME);
+		pyld_sz = sizeof(struct wan_ioctl_query_tether_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (rmnet_ipa_query_tethering_stats(
+			(struct wan_ioctl_query_tether_stats *)param, false)) {
+			IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_RESET_TETHER_STATS:
+		IPAWANDBG("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n",
+				DRIVER_NAME);
+		pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (rmnet_ipa_query_tethering_stats(NULL, true)) {
+			IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	default:
+		retval = -ENOTTY;
+	}
+	kfree(param);
+	return retval;
+}
+
+#ifdef CONFIG_COMPAT
+long compat_wan_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case WAN_IOC_ADD_FLT_RULE32:
+		cmd = WAN_IOC_ADD_FLT_RULE;
+		break;
+	case WAN_IOC_ADD_FLT_RULE_INDEX32:
+		cmd = WAN_IOC_ADD_FLT_RULE_INDEX;
+		break;
+	case WAN_IOC_POLL_TETHERING_STATS32:
+		cmd = WAN_IOC_POLL_TETHERING_STATS;
+		break;
+	case WAN_IOC_SET_DATA_QUOTA32:
+		cmd = WAN_IOC_SET_DATA_QUOTA;
+		break;
+	case WAN_IOC_SET_TETHER_CLIENT_PIPE32:
+		cmd = WAN_IOC_SET_TETHER_CLIENT_PIPE;
+		break;
+	case WAN_IOC_QUERY_TETHER_STATS32:
+		cmd = WAN_IOC_QUERY_TETHER_STATS;
+		break;
+	case WAN_IOC_RESET_TETHER_STATS32:
+		cmd = WAN_IOC_RESET_TETHER_STATS;
+		break;
+	case WAN_IOC_QUERY_DL_FILTER_STATS32:
+		cmd = WAN_IOC_QUERY_DL_FILTER_STATS;
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return wan_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static int wan_ioctl_open(struct inode *inode, struct file *filp)
+{
+	IPAWANDBG("\n IPA A7 wan_ioctl open OK :>>>> ");
+	return 0;
+}
+
+const struct file_operations fops = {
+	.owner = THIS_MODULE,
+	.open = wan_ioctl_open,
+	.read = NULL,
+	.unlocked_ioctl = wan_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_wan_ioctl,
+#endif
+};
+
+int wan_ioctl_init(void)
+{
+	unsigned int wan_ioctl_major = 0;
+	int ret;
+	struct device *dev;
+
+	device = MKDEV(wan_ioctl_major, 0);
+
+	ret = alloc_chrdev_region(&device, 0, dev_num, DRIVER_NAME);
+	if (ret) {
+		IPAWANERR(":device_alloc err.\n");
+		goto dev_alloc_err;
+	}
+	wan_ioctl_major = MAJOR(device);
+
+	class = class_create(THIS_MODULE, DRIVER_NAME);
+	if (IS_ERR(class)) {
+		IPAWANERR(":class_create err.\n");
+		goto class_err;
+	}
+
+	dev = device_create(class, NULL, device,
+		NULL, DRIVER_NAME);
+	if (IS_ERR(dev)) {
+		IPAWANERR(":device_create err.\n");
+		goto device_err;
+	}
+
+	cdev_init(&wan_ioctl_cdev, &fops);
+	ret = cdev_add(&wan_ioctl_cdev, device, dev_num);
+	if (ret) {
+		IPAWANERR(":cdev_add err.\n");
+		goto cdev_add_err;
+	}
+
+	process_ioctl = 1;
+
+	IPAWANDBG("IPA %s major(%d) initial ok :>>>>\n",
+	DRIVER_NAME, wan_ioctl_major);
+	return 0;
+
+cdev_add_err:
+	device_destroy(class, device);
+device_err:
+	class_destroy(class);
+class_err:
+	unregister_chrdev_region(device, dev_num);
+dev_alloc_err:
+	return -ENODEV;
+}
+
+void wan_ioctl_stop_qmi_messages(void)
+{
+	process_ioctl = 0;
+}
+
+void wan_ioctl_enable_qmi_messages(void)
+{
+	process_ioctl = 1;
+}
+
+void wan_ioctl_deinit(void)
+{
+	cdev_del(&wan_ioctl_cdev);
+	device_destroy(class, device);
+	class_destroy(class);
+	unregister_chrdev_region(device, dev_num);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c
new file mode 100644
index 0000000..110ee03
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v2/teth_bridge.c
@@ -0,0 +1,240 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipa.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge"
+
+#define TETH_DBG(fmt, args...) \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \
+		 __func__, __LINE__, ## args)
+#define TETH_DBG_FUNC_ENTRY() \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__)
+#define TETH_DBG_FUNC_EXIT() \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
+#define TETH_ERR(fmt, args...) \
+	pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+/**
+ * struct teth_bridge_ctx - Tethering bridge driver context information
+ * @class: kernel class pointer
+ * @dev_num: kernel device number
+ * @dev: kernel device struct pointer
+ * @cdev: kernel character device struct
+ */
+struct teth_bridge_ctx {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+};
+static struct teth_bridge_ctx *teth_ctx;
+
+/**
+* teth_bridge_ipa_cb() - Callback to handle IPA data path events
+* @priv - private data
+* @evt - event type
+* @data - event specific data (usually skb)
+*
+* This callback is called by IPA driver for exception packets from USB.
+* All exception packets are handled by Q6 and should not reach this function.
+* Packets will arrive to AP exception pipe only in case where packets are
+* sent from USB before Q6 has setup the call.
+*/
+static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+
+	TETH_DBG_FUNC_ENTRY();
+	if (evt != IPA_RECEIVE) {
+		TETH_ERR("unexpected event %d\n", evt);
+		WARN_ON(1);
+		return;
+	}
+
+	TETH_ERR("Unexpected exception packet from USB, dropping packet\n");
+	dev_kfree_skb_any(skb);
+	TETH_DBG_FUNC_EXIT();
+}
+
+/**
+* ipa2_teth_bridge_init() - Initialize the Tethering bridge driver
+* @params - in/out params for USB initialization API (please look at struct
+*  definition for more info)
+*
+* USB driver gets a pointer to a callback function (usb_notify_cb) and an
+* associated data. USB driver installs this callback function in the call to
+* ipa_connect().
+*
+* Builds IPA resource manager dependency graph.
+*
+* Return codes: 0: success,
+*		-EINVAL - Bad parameter
+*		Other negative value - Failure
+*/
+int ipa2_teth_bridge_init(struct teth_bridge_init_params *params)
+{
+	int res = 0;
+
+	TETH_DBG_FUNC_ENTRY();
+
+	if (!params) {
+		TETH_ERR("Bad parameter\n");
+		TETH_DBG_FUNC_EXIT();
+		return -EINVAL;
+	}
+
+	params->usb_notify_cb = teth_bridge_ipa_cb;
+	params->private_data = NULL;
+	params->skip_ep_cfg = true;
+
+	/* Build dependency graph */
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_USB_PROD,
+				    IPA_RM_RESOURCE_Q6_CONS);
+	if (res < 0 && res != -EINPROGRESS) {
+		TETH_ERR("ipa_rm_add_dependency() failed.\n");
+		goto bail;
+	}
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+				    IPA_RM_RESOURCE_USB_CONS);
+	if (res < 0 && res != -EINPROGRESS) {
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+					IPA_RM_RESOURCE_Q6_CONS);
+		TETH_ERR("ipa_rm_add_dependency() failed.\n");
+		goto bail;
+	}
+
+	res = 0;
+	goto bail;
+
+bail:
+	TETH_DBG_FUNC_EXIT();
+	return res;
+}
+
+/**
+* ipa2_teth_bridge_disconnect() - Disconnect tethering bridge module
+*/
+int ipa2_teth_bridge_disconnect(enum ipa_client_type client)
+{
+	TETH_DBG_FUNC_ENTRY();
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+				 IPA_RM_RESOURCE_Q6_CONS);
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+				 IPA_RM_RESOURCE_USB_CONS);
+	TETH_DBG_FUNC_EXIT();
+
+	return 0;
+}
+
+/**
+* ipa2_teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+* @connect_params:	Connection info
+*
+* Return codes: 0: success
+*		-EINVAL: invalid parameters
+*		-EPERM: Operation not permitted as the bridge is already
+*		connected
+*/
+int ipa2_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+	return 0;
+}
+
+static long teth_bridge_ioctl(struct file *filp,
+			      unsigned int cmd,
+			      unsigned long arg)
+{
+	IPAERR("No ioctls are supported !\n");
+	return -ENOIOCTLCMD;
+}
+
+static const struct file_operations teth_bridge_drv_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = teth_bridge_ioctl,
+};
+
+/**
+* teth_bridge_driver_init() - Initialize tethering bridge driver
+*
+*/
+int teth_bridge_driver_init(void)
+{
+	int res;
+
+	TETH_DBG("Tethering bridge driver init\n");
+	teth_ctx = kzalloc(sizeof(*teth_ctx), GFP_KERNEL);
+	if (!teth_ctx) {
+		TETH_ERR("kzalloc err.\n");
+		return -ENOMEM;
+	}
+
+	teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME);
+
+	res = alloc_chrdev_region(&teth_ctx->dev_num, 0, 1,
+				  TETH_BRIDGE_DRV_NAME);
+	if (res) {
+		TETH_ERR("alloc_chrdev_region err.\n");
+		res = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	teth_ctx->dev = device_create(teth_ctx->class, NULL, teth_ctx->dev_num,
+				      teth_ctx, TETH_BRIDGE_DRV_NAME);
+	if (IS_ERR(teth_ctx->dev)) {
+		TETH_ERR(":device_create err.\n");
+		res = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&teth_ctx->cdev, &teth_bridge_drv_fops);
+	teth_ctx->cdev.owner = THIS_MODULE;
+	teth_ctx->cdev.ops = &teth_bridge_drv_fops;
+
+	res = cdev_add(&teth_ctx->cdev, teth_ctx->dev_num, 1);
+	if (res) {
+		TETH_ERR(":cdev_add err=%d\n", -res);
+		res = -ENODEV;
+		goto fail_cdev_add;
+	}
+	TETH_DBG("Tethering bridge driver init OK\n");
+
+	return 0;
+fail_cdev_add:
+	device_destroy(teth_ctx->class, teth_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(teth_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	kfree(teth_ctx);
+	teth_ctx = NULL;
+
+	return res;
+}
+EXPORT_SYMBOL(teth_bridge_driver_init);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Tethering bridge driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/Makefile b/drivers/platform/msm/ipa/ipa_v3/Makefile
new file mode 100644
index 0000000..a4faaea
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_IPA3) += ipahal/
+
+obj-$(CONFIG_IPA3) += ipat.o
+ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
+	ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
+	ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o
+
+obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
new file mode 100644
index 0000000..2575878
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -0,0 +1,5732 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/compat.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/fs.h>
+#include <linux/genalloc.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/rbtree.h>
+#include <linux/of_gpio.h>
+#include <linux/uaccess.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/msm_gsi.h>
+#include <linux/qcom_iommu.h>
+#include <linux/time.h>
+#include <linux/hashtable.h>
+#include <linux/jhash.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/scm.h>
+
+#ifdef CONFIG_ARM64
+
+/* Outer caches unsupported on ARM64 platforms */
+#define outer_flush_range(x, y)
+#define __cpuc_flush_dcache_area __flush_dcache_area
+
+#endif
+
+#define IPA_SUBSYSTEM_NAME "ipa_fws"
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define CREATE_TRACE_POINTS
+#include "ipa_trace.h"
+
+#define IPA_GPIO_IN_QUERY_CLK_IDX 0
+#define IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX 0
+#define IPA_GPIO_OUT_CLK_VOTE_IDX 1
+
+#define IPA_SUMMING_THRESHOLD (0x10)
+#define IPA_PIPE_MEM_START_OFST (0x0)
+#define IPA_PIPE_MEM_SIZE (0x0)
+#define IPA_MOBILE_AP_MODE(x) (x == IPA_MODE_MOBILE_AP_ETH || \
+			       x == IPA_MODE_MOBILE_AP_WAN || \
+			       x == IPA_MODE_MOBILE_AP_WLAN)
+#define IPA_CNOC_CLK_RATE (75 * 1000 * 1000UL)
+#define IPA_A5_MUX_HEADER_LENGTH (8)
+
+#define IPA_AGGR_MAX_STR_LENGTH (10)
+
+#define CLEANUP_TAG_PROCESS_TIMEOUT 500
+
+#define IPA_AGGR_STR_IN_BYTES(str) \
+	(strnlen((str), IPA_AGGR_MAX_STR_LENGTH - 1) + 1)
+
+#define IPA_TRANSPORT_PROD_TIMEOUT_MSEC 100
+
+#define IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE 2048
+
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_EP 0
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE 1
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE 2
+#define IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL 3
+
+#define IPA_SMEM_SIZE (8 * 1024)
+
+/* round addresses for closes page per SMMU requirements */
+#define IPA_SMMU_ROUND_TO_PAGE(iova, pa, size, iova_p, pa_p, size_p) \
+	do { \
+		(iova_p) = rounddown((iova), PAGE_SIZE); \
+		(pa_p) = rounddown((pa), PAGE_SIZE); \
+		(size_p) = roundup((size) + (pa) - (pa_p), PAGE_SIZE); \
+	} while (0)
+
+
+/* The relative location in /lib/firmware where the FWs will reside */
+#define IPA_FWS_PATH "ipa/ipa_fws.elf"
+
+#ifdef CONFIG_COMPAT
+#define IPA_IOC_ADD_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_HDR32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_DEL_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					compat_uptr_t)
+#define IPA_IOC_GET_RT_TBL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				compat_uptr_t)
+#define IPA_IOC_COPY_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_TX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_RX_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+					compat_uptr_t)
+#define IPA_IOC_GET_HDR32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				compat_uptr_t)
+#define IPA_IOC_ALLOC_NAT_MEM32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				compat_uptr_t)
+#define IPA_IOC_V4_INIT_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_NAT_DMA32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_DMA, \
+				compat_uptr_t)
+#define IPA_IOC_V4_DEL_NAT32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				compat_uptr_t)
+#define IPA_IOC_GET_NAT_OFFSET32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				compat_uptr_t)
+#define IPA_IOC_PULL_MSG32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PULL_MSG, \
+				compat_uptr_t)
+#define IPA_IOC_RM_ADD_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_ADD_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_RM_DEL_DEPENDENCY32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_DEL_DEPENDENCY, \
+				compat_uptr_t)
+#define IPA_IOC_GENERATE_FLT_EQ32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GENERATE_FLT_EQ, \
+				compat_uptr_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+				compat_uptr_t)
+#define IPA_IOC_WRITE_QMAPID32  _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WRITE_QMAPID, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_FLT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_FLT_RULE, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+				compat_uptr_t)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32 _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+					compat_uptr_t)
+#define IPA_IOC_ADD_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_DEL_HDR_PROC_CTX32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_HDR_PROC_CTX, \
+				compat_uptr_t)
+#define IPA_IOC_MDFY_RT_RULE32 _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_MDFY_RT_RULE, \
+				compat_uptr_t)
+
+/**
+ * struct ipa3_ioc_nat_alloc_mem32 - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa3_ioc_nat_alloc_mem32 {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	compat_size_t size;
+	compat_off_t offset;
+};
+#endif
+
+#define IPA_TZ_UNLOCK_ATTRIBUTE 0x0C0311
+#define TZ_MEM_PROTECT_REGION_ID 0x10
+
+struct tz_smmu_ipa_protect_region_iovec_s {
+	u64 input_addr;
+	u64 output_addr;
+	u64 size;
+	u32 attr;
+} __packed;
+
+struct tz_smmu_ipa_protect_region_s {
+	phys_addr_t iovec_buf;
+	u32 size_bytes;
+} __packed;
+
+static void ipa3_start_tag_process(struct work_struct *work);
+static DECLARE_WORK(ipa3_tag_work, ipa3_start_tag_process);
+
+static void ipa3_sps_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_sps_release_resource_work,
+	ipa3_sps_release_resource);
+static void ipa_gsi_notify_cb(struct gsi_per_notify *notify);
+
+static void ipa_gsi_request_resource(struct work_struct *work);
+static DECLARE_WORK(ipa_gsi_request_resource_work,
+	ipa_gsi_request_resource);
+
+static void ipa_gsi_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_gsi_release_resource_work,
+	ipa_gsi_release_resource);
+
+static struct ipa3_plat_drv_res ipa3_res = {0, };
+struct msm_bus_scale_pdata *ipa3_bus_scale_table;
+
+static struct clk *ipa3_clk;
+
+struct ipa3_context *ipa3_ctx;
+static struct device *master_dev;
+struct platform_device *ipa3_pdev;
+static struct {
+	bool present;
+	bool arm_smmu;
+	bool fast_map;
+	bool s1_bypass;
+	bool use_64_bit_dma_mask;
+	u32 ipa_base;
+	u32 ipa_size;
+} smmu_info;
+
+static char *active_clients_table_buf;
+
+int ipa3_active_clients_log_print_buffer(char *buf, int size)
+{
+	int i;
+	int nbytes;
+	int cnt = 0;
+	int start_idx;
+	int end_idx;
+
+	start_idx = (ipa3_ctx->ipa3_active_clients_logging.log_tail + 1) %
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+	end_idx = ipa3_ctx->ipa3_active_clients_logging.log_head;
+	for (i = start_idx; i != end_idx;
+		i = (i + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) {
+		nbytes = scnprintf(buf + cnt, size - cnt, "%s\n",
+				ipa3_ctx->ipa3_active_clients_logging
+				.log_buffer[i]);
+		cnt += nbytes;
+	}
+
+	return cnt;
+}
+
+int ipa3_active_clients_log_print_table(char *buf, int size)
+{
+	int i;
+	struct ipa3_active_client_htable_entry *iterator;
+	int cnt = 0;
+
+	cnt = scnprintf(buf, size, "\n---- Active Clients Table ----\n");
+	hash_for_each(ipa3_ctx->ipa3_active_clients_logging.htable, i,
+			iterator, list) {
+		switch (iterator->type) {
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_EP:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d ENDPOINT\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_SIMPLE:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d SIMPLE\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_RESOURCE:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d RESOURCE\n",
+					iterator->id_string, iterator->count);
+			break;
+		case IPA3_ACTIVE_CLIENT_LOG_TYPE_SPECIAL:
+			cnt += scnprintf(buf + cnt, size - cnt,
+					"%-40s %-3d SPECIAL\n",
+					iterator->id_string, iterator->count);
+			break;
+		default:
+			IPAERR("Trying to print illegal active_clients type");
+			break;
+		}
+	}
+	cnt += scnprintf(buf + cnt, size - cnt,
+			"\nTotal active clients count: %d\n",
+			ipa3_ctx->ipa3_active_clients.cnt);
+
+	return cnt;
+}
+
+static int ipa3_active_clients_panic_notifier(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	ipa3_active_clients_lock();
+	ipa3_active_clients_log_print_table(active_clients_table_buf,
+			IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE);
+	IPAERR("%s", active_clients_table_buf);
+	ipa3_active_clients_unlock();
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_active_clients_panic_blk = {
+	.notifier_call  = ipa3_active_clients_panic_notifier,
+};
+
+static int ipa3_active_clients_log_insert(const char *string)
+{
+	int head;
+	int tail;
+
+	if (!ipa3_ctx->ipa3_active_clients_logging.log_rdy)
+		return -EPERM;
+
+	head = ipa3_ctx->ipa3_active_clients_logging.log_head;
+	tail = ipa3_ctx->ipa3_active_clients_logging.log_tail;
+
+	memset(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], '_',
+			IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+	strlcpy(ipa3_ctx->ipa3_active_clients_logging.log_buffer[head], string,
+			(size_t)IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN);
+	head = (head + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+	if (tail == head)
+		tail = (tail + 1) % IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES;
+
+	ipa3_ctx->ipa3_active_clients_logging.log_tail = tail;
+	ipa3_ctx->ipa3_active_clients_logging.log_head = head;
+
+	return 0;
+}
+
+static int ipa3_active_clients_log_init(void)
+{
+	int i;
+
+	ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] = kzalloc(
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES *
+			sizeof(char[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN]),
+			GFP_KERNEL);
+	active_clients_table_buf = kzalloc(sizeof(
+			char[IPA3_ACTIVE_CLIENTS_TABLE_BUF_SIZE]), GFP_KERNEL);
+	if (ipa3_ctx->ipa3_active_clients_logging.log_buffer == NULL) {
+		pr_err("Active Clients Logging memory allocation failed");
+		goto bail;
+	}
+	for (i = 0; i < IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES; i++) {
+		ipa3_ctx->ipa3_active_clients_logging.log_buffer[i] =
+			ipa3_ctx->ipa3_active_clients_logging.log_buffer[0] +
+			(IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN * i);
+	}
+	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+	ipa3_ctx->ipa3_active_clients_logging.log_tail =
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	hash_init(ipa3_ctx->ipa3_active_clients_logging.htable);
+	atomic_notifier_chain_register(&panic_notifier_list,
+			&ipa3_active_clients_panic_blk);
+	ipa3_ctx->ipa3_active_clients_logging.log_rdy = 1;
+
+	return 0;
+
+bail:
+	return -ENOMEM;
+}
+
+void ipa3_active_clients_log_clear(void)
+{
+	ipa3_active_clients_lock();
+	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+	ipa3_ctx->ipa3_active_clients_logging.log_tail =
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+	ipa3_active_clients_unlock();
+}
+
+static void ipa3_active_clients_log_destroy(void)
+{
+	ipa3_ctx->ipa3_active_clients_logging.log_rdy = 0;
+	kfree(ipa3_ctx->ipa3_active_clients_logging.log_buffer[0]);
+	ipa3_ctx->ipa3_active_clients_logging.log_head = 0;
+	ipa3_ctx->ipa3_active_clients_logging.log_tail =
+			IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES - 1;
+}
+
+enum ipa_smmu_cb_type {
+	IPA_SMMU_CB_AP,
+	IPA_SMMU_CB_WLAN,
+	IPA_SMMU_CB_UC,
+	IPA_SMMU_CB_MAX
+
+};
+
+static struct ipa_smmu_cb_ctx smmu_cb[IPA_SMMU_CB_MAX];
+
+struct iommu_domain *ipa3_get_smmu_domain(void)
+{
+	if (smmu_cb[IPA_SMMU_CB_AP].valid)
+		return smmu_cb[IPA_SMMU_CB_AP].mapping->domain;
+
+	IPAERR("CB not valid\n");
+
+	return NULL;
+}
+
+struct iommu_domain *ipa3_get_uc_smmu_domain(void)
+{
+	if (smmu_cb[IPA_SMMU_CB_UC].valid)
+		return smmu_cb[IPA_SMMU_CB_UC].mapping->domain;
+
+	IPAERR("CB not valid\n");
+
+	return NULL;
+}
+
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void)
+{
+	if (smmu_cb[IPA_SMMU_CB_WLAN].valid)
+		return smmu_cb[IPA_SMMU_CB_WLAN].iommu;
+
+	IPAERR("CB not valid\n");
+
+	return NULL;
+}
+
+
+struct device *ipa3_get_dma_dev(void)
+{
+	return ipa3_ctx->pdev;
+}
+
+/**
+ * ipa3_get_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void)
+{
+	return &smmu_cb[IPA_SMMU_CB_AP];
+}
+
+/**
+ * ipa3_get_wlan_smmu_ctx()- Return the wlan smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void)
+{
+	return &smmu_cb[IPA_SMMU_CB_WLAN];
+}
+
+/**
+ * ipa3_get_uc_smmu_ctx()- Return the uc smmu context
+ *
+ * Return value: pointer to smmu context address
+ */
+struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void)
+{
+	return &smmu_cb[IPA_SMMU_CB_UC];
+}
+
+static int ipa3_open(struct inode *inode, struct file *filp)
+{
+	struct ipa3_context *ctx = NULL;
+
+	IPADBG_LOW("ENTER\n");
+	ctx = container_of(inode->i_cdev, struct ipa3_context, cdev);
+	filp->private_data = ctx;
+
+	return 0;
+}
+
+/**
+* ipa3_flow_control() - Enable/Disable flow control on a particular client.
+* Return codes:
+* None
+*/
+void ipa3_flow_control(enum ipa_client_type ipa_client,
+		bool enable, uint32_t qmap_id)
+{
+	struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+	int ep_idx;
+	struct ipa3_ep_context *ep;
+
+	/* Check if tethered flow control is needed or not.*/
+	if (!ipa3_ctx->tethered_flow_control) {
+		IPADBG("Apps flow control is not needed\n");
+		return;
+	}
+
+	/* Check if ep is valid. */
+	ep_idx = ipa3_get_ep_mapping(ipa_client);
+	if (ep_idx == -1) {
+		IPADBG("Invalid IPA client\n");
+		return;
+	}
+
+	ep = &ipa3_ctx->ep[ep_idx];
+	if (!ep->valid || (ep->client != IPA_CLIENT_USB_PROD)) {
+		IPADBG("EP not valid/Not applicable for client.\n");
+		return;
+	}
+
+	spin_lock(&ipa3_ctx->disconnect_lock);
+	/* Check if the QMAP_ID matches. */
+	if (ep->cfg.meta.qmap_id != qmap_id) {
+		IPADBG("Flow control ind not for same flow: %u %u\n",
+			ep->cfg.meta.qmap_id, qmap_id);
+		spin_unlock(&ipa3_ctx->disconnect_lock);
+		return;
+	}
+	if (!ep->disconnect_in_progress) {
+		if (enable) {
+			IPADBG("Enabling Flow\n");
+			ep_ctrl.ipa_ep_delay = false;
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_enable);
+		} else {
+			IPADBG("Disabling Flow\n");
+			ep_ctrl.ipa_ep_delay = true;
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.flow_disable);
+		}
+		ep_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(ep_idx, &ep_ctrl);
+	} else {
+		IPADBG("EP disconnect is in progress\n");
+	}
+	spin_unlock(&ipa3_ctx->disconnect_lock);
+}
+
+static void ipa3_wan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAERR("Null buffer\n");
+		return;
+	}
+
+	if (type != WAN_UPSTREAM_ROUTE_ADD &&
+	    type != WAN_UPSTREAM_ROUTE_DEL &&
+	    type != WAN_EMBMS_CONNECT) {
+		IPAERR("Wrong type given. buff %p type %d\n", buff, type);
+		return;
+	}
+
+	kfree(buff);
+}
+
+static int ipa3_send_wan_msg(unsigned long usr_param, uint8_t msg_type)
+{
+	int retval;
+	struct ipa_wan_msg *wan_msg;
+	struct ipa_msg_meta msg_meta;
+
+	wan_msg = kzalloc(sizeof(struct ipa_wan_msg), GFP_KERNEL);
+	if (!wan_msg) {
+		IPAERR("no memory\n");
+		return -ENOMEM;
+	}
+
+	if (copy_from_user((u8 *)wan_msg, (u8 *)usr_param,
+		sizeof(struct ipa_wan_msg))) {
+		kfree(wan_msg);
+		return -EFAULT;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = msg_type;
+	msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+	retval = ipa3_send_msg(&msg_meta, wan_msg, ipa3_wan_msg_free_cb);
+	if (retval) {
+		IPAERR("ipa3_send_msg failed: %d\n", retval);
+		kfree(wan_msg);
+		return retval;
+	}
+
+	return 0;
+}
+
+
+static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	u32 pyld_sz;
+	u8 header[128] = { 0 };
+	u8 *param = NULL;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+	struct ipa_ioc_v4_nat_init nat_init;
+	struct ipa_ioc_v4_nat_del nat_del;
+	struct ipa_ioc_rm_dependency rm_depend;
+	size_t sz;
+	int pre_entry;
+
+	IPADBG("cmd=%x nr=%d\n", cmd, _IOC_NR(cmd));
+
+	if (!ipa3_is_ready()) {
+		IPAERR("IPA not ready, waiting for init completion\n");
+		wait_for_completion(&ipa3_ctx->init_completion_obj);
+	}
+
+	if (_IOC_TYPE(cmd) != IPA_IOC_MAGIC)
+		return -ENOTTY;
+	if (_IOC_NR(cmd) >= IPA_IOCTL_MAX)
+		return -ENOTTY;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	switch (cmd) {
+	case IPA_IOC_ALLOC_NAT_MEM:
+		if (copy_from_user((u8 *)&nat_mem, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		/* null terminate the string */
+		nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+		if (ipa3_allocate_nat_device(&nat_mem)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, (u8 *)&nat_mem,
+					sizeof(struct ipa_ioc_nat_alloc_mem))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_V4_INIT_NAT:
+		if (copy_from_user((u8 *)&nat_init, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_init))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_nat_init_cmd(&nat_init)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_NAT_DMA:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_nat_dma_cmd))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_nat_dma_cmd *)header)->entries;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_nat_dma_cmd) +
+		   pre_entry * sizeof(struct ipa_ioc_nat_dma_one);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_nat_dma_cmd *)param)->entries
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_nat_dma_cmd *)param)->entries,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_nat_dma_cmd((struct ipa_ioc_nat_dma_cmd *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_V4_DEL_NAT:
+		if (copy_from_user((u8 *)&nat_del, (u8 *)arg,
+					sizeof(struct ipa_ioc_v4_nat_del))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_nat_del_cmd(&nat_del)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr *)header)->num_hdrs;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr) +
+		   pre_entry * sizeof(struct ipa_hdr_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr *)param)->num_hdrs
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr *)param)->num_hdrs,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_hdr((struct ipa_ioc_add_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr) +
+		   pre_entry * sizeof(struct ipa_hdr_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_hdr((struct ipa_ioc_del_hdr *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule) +
+		   pre_entry * sizeof(struct ipa_rt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_rt_rule((struct ipa_ioc_add_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_ADD_RT_RULE_AFTER:
+		if (copy_from_user(header, (u8 *)arg,
+			sizeof(struct ipa_ioc_add_rt_rule_after))) {
+
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_rt_rule_after *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_rt_rule_after) +
+		   pre_entry * sizeof(struct ipa_rt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_rt_rule_after *)param)->
+			num_rules != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_rt_rule_after *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_rt_rule_after(
+			(struct ipa_ioc_add_rt_rule_after *)param)) {
+
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_MDFY_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_mdfy_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_rt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_mdfy_rt_rule) +
+		   pre_entry * sizeof(struct ipa_rt_rule_mdfy);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_rt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_rt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_mdfy_rt_rule((struct ipa_ioc_mdfy_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_RT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_rt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_rt_rule *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_rt_rule) +
+		   pre_entry * sizeof(struct ipa_rt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_rt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_rt_rule *)param)->num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_rt_rule((struct ipa_ioc_del_rt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_add_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_flt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_flt_rule) +
+		   pre_entry * sizeof(struct ipa_flt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_ADD_FLT_RULE_AFTER:
+		if (copy_from_user(header, (u8 *)arg,
+				sizeof(struct ipa_ioc_add_flt_rule_after))) {
+
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_flt_rule_after *)header)->
+			num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_flt_rule_after) +
+		   pre_entry * sizeof(struct ipa_flt_rule_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_flt_rule_after *)param)->
+			num_rules != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_flt_rule_after *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_flt_rule_after(
+				(struct ipa_ioc_add_flt_rule_after *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_DEL_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_del_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_flt_rule *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_flt_rule) +
+		   pre_entry * sizeof(struct ipa_flt_rule_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_flt_rule *)param)->num_hdls
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_flt_rule *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_MDFY_FLT_RULE:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_mdfy_flt_rule))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_mdfy_flt_rule *)header)->num_rules;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_mdfy_flt_rule) +
+		   pre_entry * sizeof(struct ipa_flt_rule_mdfy);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_mdfy_flt_rule *)param)->num_rules
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_mdfy_flt_rule *)param)->
+				num_rules,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_mdfy_flt_rule((struct ipa_ioc_mdfy_flt_rule *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_COMMIT_HDR:
+		retval = ipa3_commit_hdr();
+		break;
+	case IPA_IOC_RESET_HDR:
+		retval = ipa3_reset_hdr();
+		break;
+	case IPA_IOC_COMMIT_RT:
+		retval = ipa3_commit_rt(arg);
+		break;
+	case IPA_IOC_RESET_RT:
+		retval = ipa3_reset_rt(arg);
+		break;
+	case IPA_IOC_COMMIT_FLT:
+		retval = ipa3_commit_flt(arg);
+		break;
+	case IPA_IOC_RESET_FLT:
+		retval = ipa3_reset_flt(arg);
+		break;
+	case IPA_IOC_GET_RT_TBL:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_get_rt_tbl((struct ipa_ioc_get_rt_tbl *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_rt_tbl))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_RT_TBL:
+		retval = ipa3_put_rt_tbl(arg);
+		break;
+	case IPA_IOC_GET_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_get_hdr((struct ipa_ioc_get_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_get_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PUT_HDR:
+		retval = ipa3_put_hdr(arg);
+		break;
+	case IPA_IOC_SET_FLT:
+		retval = ipa3_cfg_filter(arg);
+		break;
+	case IPA_IOC_COPY_HDR:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_copy_hdr((struct ipa_ioc_copy_hdr *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_copy_hdr))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_query_intf))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf((struct ipa_ioc_query_intf *)header)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_query_intf))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_TX_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_tx_props);
+		if (copy_from_user(header, (u8 *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_tx_props *)header)->num_tx_props
+				> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_query_intf_tx_props *)
+			header)->num_tx_props;
+		pyld_sz = sz + pre_entry *
+			sizeof(struct ipa_ioc_tx_intf_prop);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_tx_props *)
+			param)->num_tx_props
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_tx_props *)
+				param)->num_tx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf_tx_props(
+				(struct ipa_ioc_query_intf_tx_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_RX_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_rx_props);
+		if (copy_from_user(header, (u8 *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_rx_props *)header)->num_rx_props
+				> IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_query_intf_rx_props *)
+			header)->num_rx_props;
+		pyld_sz = sz + pre_entry *
+			sizeof(struct ipa_ioc_rx_intf_prop);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_rx_props *)
+			param)->num_rx_props != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_rx_props *)
+				param)->num_rx_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf_rx_props(
+				(struct ipa_ioc_query_intf_rx_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_QUERY_INTF_EXT_PROPS:
+		sz = sizeof(struct ipa_ioc_query_intf_ext_props);
+		if (copy_from_user(header, (u8 *)arg, sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (((struct ipa_ioc_query_intf_ext_props *)
+				header)->num_ext_props > IPA_NUM_PROPS_MAX) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_query_intf_ext_props *)
+			header)->num_ext_props;
+		pyld_sz = sz + pre_entry *
+			sizeof(struct ipa_ioc_ext_intf_prop);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_query_intf_ext_props *)
+			param)->num_ext_props != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_query_intf_ext_props *)
+				param)->num_ext_props, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_intf_ext_props(
+				(struct ipa_ioc_query_intf_ext_props *)param)) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_PULL_MSG:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_msg_meta))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+		   ((struct ipa_msg_meta *)header)->msg_len;
+		pyld_sz = sizeof(struct ipa_msg_meta) +
+		   pre_entry;
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_msg_meta *)param)->msg_len
+			!= pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_msg_meta *)param)->msg_len,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_pull_msg((struct ipa_msg_meta *)param,
+				 (char *)param + sizeof(struct ipa_msg_meta),
+				 ((struct ipa_msg_meta *)param)->msg_len) !=
+		       ((struct ipa_msg_meta *)param)->msg_len) {
+			retval = -1;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_RM_ADD_DEPENDENCY:
+		if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+				sizeof(struct ipa_ioc_rm_dependency))) {
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa_rm_add_dependency_from_ioctl(
+			rm_depend.resource_name, rm_depend.depends_on_name);
+		break;
+	case IPA_IOC_RM_DEL_DEPENDENCY:
+		if (copy_from_user((u8 *)&rm_depend, (u8 *)arg,
+				sizeof(struct ipa_ioc_rm_dependency))) {
+			retval = -EFAULT;
+			break;
+		}
+		retval = ipa_rm_delete_dependency_from_ioctl(
+			rm_depend.resource_name, rm_depend.depends_on_name);
+		break;
+	case IPA_IOC_GENERATE_FLT_EQ:
+		{
+			struct ipa_ioc_generate_flt_eq flt_eq;
+
+			if (copy_from_user(&flt_eq, (u8 *)arg,
+				sizeof(struct ipa_ioc_generate_flt_eq))) {
+				retval = -EFAULT;
+				break;
+			}
+			if (ipahal_flt_generate_equation(flt_eq.ip,
+				&flt_eq.attrib, &flt_eq.eq_attrib)) {
+				retval = -EFAULT;
+				break;
+			}
+			if (copy_to_user((u8 *)arg, &flt_eq,
+				sizeof(struct ipa_ioc_generate_flt_eq))) {
+				retval = -EFAULT;
+				break;
+			}
+			break;
+		}
+	case IPA_IOC_QUERY_EP_MAPPING:
+		{
+			retval = ipa3_get_ep_mapping(arg);
+			break;
+		}
+	case IPA_IOC_QUERY_RT_TBL_INDEX:
+		if (copy_from_user(header, (u8 *)arg,
+				sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_query_rt_index(
+			 (struct ipa_ioc_get_rt_tbl_indx *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+				sizeof(struct ipa_ioc_get_rt_tbl_indx))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_WRITE_QMAPID:
+		if (copy_from_user(header, (u8 *)arg,
+					sizeof(struct ipa_ioc_write_qmapid))) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_write_qmap_id((struct ipa_ioc_write_qmapid *)header)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, header,
+					sizeof(struct ipa_ioc_write_qmapid))) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD:
+		retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_ADD);
+		if (retval) {
+			IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL:
+		retval = ipa3_send_wan_msg(arg, WAN_UPSTREAM_ROUTE_DEL);
+		if (retval) {
+			IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED:
+		retval = ipa3_send_wan_msg(arg, WAN_EMBMS_CONNECT);
+		if (retval) {
+			IPAERR("ipa3_send_wan_msg failed: %d\n", retval);
+			break;
+		}
+		break;
+	case IPA_IOC_ADD_HDR_PROC_CTX:
+		if (copy_from_user(header, (u8 *)arg,
+			sizeof(struct ipa_ioc_add_hdr_proc_ctx))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_add_hdr_proc_ctx *)
+			header)->num_proc_ctxs;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_add_hdr_proc_ctx) +
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_add);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_add_hdr_proc_ctx *)
+			param)->num_proc_ctxs != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_add_hdr_proc_ctx *)
+				param)->num_proc_ctxs, pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_add_hdr_proc_ctx(
+			(struct ipa_ioc_add_hdr_proc_ctx *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+	case IPA_IOC_DEL_HDR_PROC_CTX:
+		if (copy_from_user(header, (u8 *)arg,
+			sizeof(struct ipa_ioc_del_hdr_proc_ctx))) {
+			retval = -EFAULT;
+			break;
+		}
+		pre_entry =
+			((struct ipa_ioc_del_hdr_proc_ctx *)header)->num_hdls;
+		pyld_sz =
+		   sizeof(struct ipa_ioc_del_hdr_proc_ctx) +
+		   pre_entry * sizeof(struct ipa_hdr_proc_ctx_del);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		/* add check in case user-space module compromised */
+		if (unlikely(((struct ipa_ioc_del_hdr_proc_ctx *)
+			param)->num_hdls != pre_entry)) {
+			IPAERR("current %d pre %d\n",
+				((struct ipa_ioc_del_hdr_proc_ctx *)param)->
+				num_hdls,
+				pre_entry);
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_del_hdr_proc_ctx(
+			(struct ipa_ioc_del_hdr_proc_ctx *)param)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case IPA_IOC_GET_HW_VERSION:
+		pyld_sz = sizeof(enum ipa_hw_type);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		memcpy(param, &ipa3_ctx->ipa_hw_type, pyld_sz);
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	default:        /* redundant, as cmd was checked against MAXNR */
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return -ENOTTY;
+	}
+	kfree(param);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return retval;
+}
+
+/**
+* ipa3_setup_dflt_rt_tables() - Setup default routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+int ipa3_setup_dflt_rt_tables(void)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_rt_rule_add *rt_rule_entry;
+
+	rt_rule =
+	   kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+	if (!rt_rule) {
+		IPAERR("fail to alloc mem\n");
+		return -ENOMEM;
+	}
+	/* setup a default v4 route to point to Apps */
+	rt_rule->num_rules = 1;
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_RT_TBL_NAME,
+			IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = &rt_rule->rules[0];
+	rt_rule_entry->at_rear = 1;
+	rt_rule_entry->rule.dst = IPA_CLIENT_APPS_LAN_CONS;
+	rt_rule_entry->rule.hdr_hdl = ipa3_ctx->excp_hdr_hdl;
+	rt_rule_entry->rule.retain_hdr = 1;
+
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v4 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa3_ctx->dflt_v4_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/* setup a default v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAERR("fail to add dflt v6 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPADBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	ipa3_ctx->dflt_v6_rt_rule_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/*
+	 * because these tables are the very first to be added, they will both
+	 * have the same index (0) which is essential for programming the
+	 * "route" end-point config
+	 */
+
+	kfree(rt_rule);
+
+	return 0;
+}
+
+static int ipa3_setup_exception_path(void)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	struct ipahal_reg_route route = { 0 };
+	int ret;
+
+	/* install the basic exception header */
+	hdr = kzalloc(sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add), GFP_KERNEL);
+	if (!hdr) {
+		IPAERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	strlcpy(hdr_entry->name, IPA_LAN_RX_HDR_NAME, IPA_RESOURCE_NAME_MAX);
+	hdr_entry->hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+
+	if (ipa3_add_hdr(hdr)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ipa3_ctx->excp_hdr_hdl = hdr_entry->hdr_hdl;
+
+	/* set the route register to pass exception packets to Apps */
+	route.route_def_pipe = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	route.route_frag_def_pipe = ipa3_get_ep_mapping(
+		IPA_CLIENT_APPS_LAN_CONS);
+	route.route_def_hdr_table = !ipa3_ctx->hdr_tbl_lcl;
+	route.route_def_retain_hdr = 1;
+
+	if (ipa3_cfg_route(&route)) {
+		IPAERR("fail to add exception hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+static int ipa3_init_smem_region(int memory_region_size,
+				int memory_region_offset)
+{
+	struct ipahal_imm_cmd_dma_shared_mem cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa3_desc desc;
+	struct ipa_mem_buffer mem;
+	int rc;
+
+	if (memory_region_size == 0)
+		return 0;
+
+	memset(&desc, 0, sizeof(desc));
+	memset(&cmd, 0, sizeof(cmd));
+	memset(&mem, 0, sizeof(mem));
+
+	mem.size = memory_region_size;
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size,
+		&mem.phys_base, GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("failed to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+
+	memset(mem.base, 0, mem.size);
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr = mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		memory_region_offset;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		return -ENOMEM;
+	}
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	rc = ipa3_send_cmd(1, &desc);
+	if (rc) {
+		IPAERR("failed to send immediate command (error %d)\n", rc);
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+		mem.phys_base);
+
+	return rc;
+}
+
+/**
+* ipa3_init_q6_smem() - Initialize Q6 general memory and
+*                      header memory regions in IPA.
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate dma memory
+* -EFAULT: failed to send IPA command to initialize the memory
+*/
+int ipa3_init_q6_smem(void)
+{
+	int rc;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_size),
+		IPA_MEM_PART(modem_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_size),
+		IPA_MEM_PART(modem_hdr_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem HDRs RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_hdr_proc_ctx_size),
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem proc ctx RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+
+	rc = ipa3_init_smem_region(IPA_MEM_PART(modem_comp_decomp_size),
+		IPA_MEM_PART(modem_comp_decomp_ofst));
+	if (rc) {
+		IPAERR("failed to initialize Modem Comp/Decomp RAM memory\n");
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return rc;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return rc;
+}
+
+static void ipa3_destroy_imm(void *user1, int user2)
+{
+	ipahal_destroy_imm_cmd(user1);
+}
+
+static void ipa3_q6_pipe_delay(bool delay)
+{
+	int client_idx;
+	int ep_idx;
+	struct ipa_ep_cfg_ctrl ep_ctrl;
+
+	memset(&ep_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_ctrl.ipa_ep_delay = delay;
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+			ep_idx = ipa3_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+
+			ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n,
+				ep_idx, &ep_ctrl);
+		}
+	}
+}
+
+static void ipa3_q6_avoid_holb(void)
+{
+	int ep_idx;
+	int client_idx;
+	struct ipa_ep_cfg_ctrl ep_suspend;
+	struct ipa_ep_cfg_holb ep_holb;
+
+	memset(&ep_suspend, 0, sizeof(ep_suspend));
+	memset(&ep_holb, 0, sizeof(ep_holb));
+
+	ep_suspend.ipa_ep_suspend = true;
+	ep_holb.tmr_val = 0;
+	ep_holb.en = 1;
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
+			ep_idx = ipa3_get_ep_mapping(client_idx);
+			if (ep_idx == -1)
+				continue;
+
+			/*
+			 * ipa3_cfg_ep_holb is not used here because we are
+			 * setting HOLB on Q6 pipes, and from APPS perspective
+			 * they are not valid, therefore, the above function
+			 * will fail.
+			 */
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+				ep_idx, &ep_holb);
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+				ep_idx, &ep_holb);
+
+			ipahal_write_reg_n_fields(
+				IPA_ENDP_INIT_CTRL_n,
+				ep_idx, &ep_suspend);
+		}
+	}
+}
+
+static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt)
+{
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
+	int retval = 0;
+	int pipe_idx;
+	int flt_idx = 0;
+	int num_cmds = 0;
+	int index;
+	u32 lcl_addr_mem_part;
+	u32 lcl_hdr_sz;
+	struct ipa_mem_buffer mem;
+
+	IPADBG("Entry\n");
+
+	if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+		IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
+		return -EINVAL;
+	}
+
+	/* Up to filtering pipes we have filtering tables */
+	desc = kcalloc(ipa3_ctx->ep_flt_num, sizeof(struct ipa3_desc),
+		GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	cmd_pyld = kcalloc(ipa3_ctx->ep_flt_num,
+		sizeof(struct ipahal_imm_cmd_pyld *), GFP_KERNEL);
+	if (!cmd_pyld) {
+		IPAERR("failed to allocate memory\n");
+		retval = -ENOMEM;
+		goto free_desc;
+	}
+
+	if (ip == IPA_IP_v4) {
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_flt_hash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v4_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_flt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+		}
+	} else {
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_flt_hash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v6_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_flt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+		}
+	}
+
+	retval = ipahal_flt_generate_empty_img(1, lcl_hdr_sz, lcl_hdr_sz,
+		0, &mem);
+	if (retval) {
+		IPAERR("failed to generate flt single tbl empty img\n");
+		goto free_cmd_pyld;
+	}
+
+	for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes; pipe_idx++) {
+		if (!ipa_is_ep_support_flt(pipe_idx))
+			continue;
+
+		/*
+		 * Iterating over all the filtering pipes which are either
+		 * invalid but connected or connected but not configured by AP.
+		 */
+		if (!ipa3_ctx->ep[pipe_idx].valid ||
+		    ipa3_ctx->ep[pipe_idx].skip_ep_cfg) {
+
+			cmd.is_read = false;
+			cmd.skip_pipeline_clear = false;
+			cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			cmd.size = mem.size;
+			cmd.system_addr = mem.phys_base;
+			cmd.local_addr =
+				ipa3_ctx->smem_restricted_bytes +
+				lcl_addr_mem_part +
+				ipahal_get_hw_tbl_hdr_width() +
+				flt_idx * ipahal_get_hw_tbl_hdr_width();
+			cmd_pyld[num_cmds] = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+			if (!cmd_pyld[num_cmds]) {
+				IPAERR("fail construct dma_shared_mem cmd\n");
+				retval = -ENOMEM;
+				goto free_empty_img;
+			}
+			desc[num_cmds].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_DMA_SHARED_MEM);
+			desc[num_cmds].pyld = cmd_pyld[num_cmds]->data;
+			desc[num_cmds].len = cmd_pyld[num_cmds]->len;
+			desc[num_cmds].type = IPA_IMM_CMD_DESC;
+			num_cmds++;
+		}
+
+		flt_idx++;
+	}
+
+	IPADBG("Sending %d descriptors for flt tbl clearing\n", num_cmds);
+	retval = ipa3_send_cmd(num_cmds, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (err %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+free_empty_img:
+	ipahal_free_dma_mem(&mem);
+free_cmd_pyld:
+	for (index = 0; index < num_cmds; index++)
+		ipahal_destroy_imm_cmd(cmd_pyld[index]);
+	kfree(cmd_pyld);
+free_desc:
+	kfree(desc);
+	return retval;
+}
+
+static int ipa3_q6_clean_q6_rt_tbls(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt)
+{
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_dma_shared_mem cmd = {0};
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	int retval = 0;
+	u32 modem_rt_index_lo;
+	u32 modem_rt_index_hi;
+	u32 lcl_addr_mem_part;
+	u32 lcl_hdr_sz;
+	struct ipa_mem_buffer mem;
+
+	IPADBG("Entry\n");
+
+	if ((ip >= IPA_IP_MAX) || (rlt >= IPA_RULE_TYPE_MAX)) {
+		IPAERR("Input Err: ip=%d ; rlt=%d\n", ip, rlt);
+		return -EINVAL;
+	}
+
+	if (ip == IPA_IP_v4) {
+		modem_rt_index_lo = IPA_MEM_PART(v4_modem_rt_index_lo);
+		modem_rt_index_hi = IPA_MEM_PART(v4_modem_rt_index_hi);
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_rt_hash_ofst);
+			lcl_hdr_sz =  IPA_MEM_PART(v4_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v4_rt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v4_flt_nhash_size);
+		}
+	} else {
+		modem_rt_index_lo = IPA_MEM_PART(v6_modem_rt_index_lo);
+		modem_rt_index_hi = IPA_MEM_PART(v6_modem_rt_index_hi);
+		if (rlt == IPA_RULE_HASHABLE) {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_rt_hash_ofst);
+			lcl_hdr_sz =  IPA_MEM_PART(v6_flt_hash_size);
+		} else {
+			lcl_addr_mem_part = IPA_MEM_PART(v6_rt_nhash_ofst);
+			lcl_hdr_sz = IPA_MEM_PART(v6_flt_nhash_size);
+		}
+	}
+
+	retval = ipahal_rt_generate_empty_img(
+		modem_rt_index_hi - modem_rt_index_lo + 1,
+		lcl_hdr_sz, lcl_hdr_sz, &mem);
+	if (retval) {
+		IPAERR("fail generate empty rt img\n");
+		return -ENOMEM;
+	}
+
+	desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		goto free_empty_img;
+	}
+
+	cmd.is_read = false;
+	cmd.skip_pipeline_clear = false;
+	cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	cmd.size = mem.size;
+	cmd.system_addr =  mem.phys_base;
+	cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		lcl_addr_mem_part +
+		modem_rt_index_lo * ipahal_get_hw_tbl_hdr_width();
+	cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct dma_shared_mem imm cmd\n");
+		retval = -ENOMEM;
+		goto free_desc;
+	}
+	desc->opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc->pyld = cmd_pyld->data;
+	desc->len = cmd_pyld->len;
+	desc->type = IPA_IMM_CMD_DESC;
+
+	IPADBG("Sending 1 descriptor for rt tbl clearing\n");
+	retval = ipa3_send_cmd(1, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (err %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_desc:
+	kfree(desc);
+free_empty_img:
+	ipahal_free_dma_mem(&mem);
+	return retval;
+}
+
+static int ipa3_q6_clean_q6_tables(void)
+{
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	int retval;
+	struct ipahal_reg_fltrt_hash_flush flush;
+	struct ipahal_reg_valmask valmask;
+
+	IPADBG("Entry\n");
+
+
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v4/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v6/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v4/non-hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_flt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 flt tbls (v6/non-hashable)\n");
+		return -EFAULT;
+	}
+
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v4/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v6/hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v4, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v4/non-hashable)\n");
+		return -EFAULT;
+	}
+	if (ipa3_q6_clean_q6_rt_tbls(IPA_IP_v6, IPA_RULE_NON_HASHABLE)) {
+		IPAERR("failed to clean q6 rt tbls (v6/non-hashable)\n");
+		return -EFAULT;
+	}
+
+	/* Flush rules cache */
+	desc = kzalloc(sizeof(struct ipa3_desc), GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	flush.v4_flt = true;
+	flush.v4_rt = true;
+	flush.v6_flt = true;
+	flush.v6_rt = true;
+	ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&reg_write_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct register_write imm cmd\n");
+		retval = -EFAULT;
+		goto bail_desc;
+	}
+	desc->opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc->pyld = cmd_pyld->data;
+	desc->len = cmd_pyld->len;
+	desc->type = IPA_IMM_CMD_DESC;
+
+	IPADBG("Sending 1 descriptor for tbls flush\n");
+	retval = ipa3_send_cmd(1, desc);
+	if (retval) {
+		IPAERR("failed to send immediate command (err %d)\n", retval);
+		retval = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+bail_desc:
+	kfree(desc);
+	IPADBG("Done - retval = %d\n", retval);
+	return retval;
+}
+
+static int ipa3_q6_set_ex_path_to_apps(void)
+{
+	int ep_idx;
+	int client_idx;
+	struct ipa3_desc *desc;
+	int num_descs = 0;
+	int index;
+	struct ipahal_imm_cmd_register_write reg_write;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int retval;
+	struct ipahal_reg_valmask valmask;
+
+	desc = kcalloc(ipa3_ctx->ipa_num_pipes, sizeof(struct ipa3_desc),
+			GFP_KERNEL);
+	if (!desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	/* Set the exception path to AP */
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		ep_idx = ipa3_get_ep_mapping(client_idx);
+		if (ep_idx == -1)
+			continue;
+
+		if (ipa3_ctx->ep[ep_idx].valid &&
+			ipa3_ctx->ep[ep_idx].skip_ep_cfg) {
+			BUG_ON(num_descs >= ipa3_ctx->ipa_num_pipes);
+
+			reg_write.skip_pipeline_clear = false;
+			reg_write.pipeline_clear_options =
+				IPAHAL_HPS_CLEAR;
+			reg_write.offset =
+				ipahal_get_reg_ofst(IPA_ENDP_STATUS_n);
+			ipahal_get_status_ep_valmask(
+				ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS),
+				&valmask);
+			reg_write.value = valmask.val;
+			reg_write.value_mask = valmask.mask;
+			cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE, &reg_write, false);
+			if (!cmd_pyld) {
+				IPAERR("fail construct register_write cmd\n");
+				BUG();
+			}
+
+			desc[num_descs].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_REGISTER_WRITE);
+			desc[num_descs].type = IPA_IMM_CMD_DESC;
+			desc[num_descs].callback = ipa3_destroy_imm;
+			desc[num_descs].user1 = cmd_pyld;
+			desc[num_descs].pyld = cmd_pyld->data;
+			desc[num_descs].len = cmd_pyld->len;
+			num_descs++;
+		}
+	}
+
+	/* Will wait 500msecs for IPA tag process completion */
+	retval = ipa3_tag_process(desc, num_descs,
+		msecs_to_jiffies(CLEANUP_TAG_PROCESS_TIMEOUT));
+	if (retval) {
+		IPAERR("TAG process failed! (error %d)\n", retval);
+		/* For timeout error ipa3_destroy_imm cb will destroy user1 */
+		if (retval != -ETIME) {
+			for (index = 0; index < num_descs; index++)
+				if (desc[index].callback)
+					desc[index].callback(desc[index].user1,
+						desc[index].user2);
+			retval = -EINVAL;
+		}
+	}
+
+	kfree(desc);
+
+	return retval;
+}
+
+/**
+* ipa3_q6_pre_shutdown_cleanup() - A cleanup for all Q6 related configuration
+*                    in IPA HW. This is performed in case of SSR.
+*
+* This is a mandatory procedure, in case one of the steps fails, the
+* AP needs to restart.
+*/
+void ipa3_q6_pre_shutdown_cleanup(void)
+{
+	IPADBG_LOW("ENTER\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipa3_q6_pipe_delay(true);
+	ipa3_q6_avoid_holb();
+	if (ipa3_q6_clean_q6_tables()) {
+		IPAERR("Failed to clean Q6 tables\n");
+		BUG();
+	}
+	if (ipa3_q6_set_ex_path_to_apps()) {
+		IPAERR("Failed to redirect exceptions to APPS\n");
+		BUG();
+	}
+	/* Remove delay from Q6 PRODs to avoid pending descriptors
+	  * on pipe reset procedure
+	  */
+	ipa3_q6_pipe_delay(false);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG_LOW("Exit with success\n");
+}
+
+/*
+ * ipa3_q6_post_shutdown_cleanup() - As part of this cleanup
+ * check if GSI channel related to Q6 producer client is empty.
+ *
+ * Q6 GSI channel emptiness is needed to garantee no descriptors with invalid
+ *  info are injected into IPA RX from IPA_IF, while modem is restarting.
+ */
+void ipa3_q6_post_shutdown_cleanup(void)
+{
+	int client_idx;
+
+	IPADBG_LOW("ENTER\n");
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (!ipa3_ctx->uc_ctx.uc_loaded) {
+		IPAERR("uC is not loaded. Skipping\n");
+		return;
+	}
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+		if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+			if (ipa3_uc_is_gsi_channel_empty(client_idx)) {
+				IPAERR("fail to validate Q6 ch emptiness %d\n",
+					client_idx);
+				BUG();
+				return;
+			}
+		}
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG_LOW("Exit with success\n");
+}
+
+static inline void ipa3_sram_set_canary(u32 *sram_mmio, int offset)
+{
+	/* Set 4 bytes of CANARY before the offset */
+	sram_mmio[(offset - 4) / 4] = IPA_MEM_CANARY_VAL;
+}
+
+/**
+ * _ipa_init_sram_v3_0() - Initialize IPA local SRAM.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_sram_v3_0(void)
+{
+	u32 *ipa_sram_mmio;
+	unsigned long phys_addr;
+
+	phys_addr = ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+			ipa3_ctx->smem_restricted_bytes / 4);
+
+	ipa_sram_mmio = ioremap(phys_addr, ipa3_ctx->smem_sz);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	/* Consult with ipa_i.h on the location of the CANARY values */
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(v4_flt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_flt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(v6_flt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_flt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v4_rt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_hash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(v6_rt_nhash_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_hdr_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio,
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst) - 4);
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(modem_ofst));
+	ipa3_sram_set_canary(ipa_sram_mmio, IPA_MEM_PART(end_ofst));
+
+	iounmap(ipa_sram_mmio);
+
+	return 0;
+}
+
+/**
+ * _ipa_init_hdr_v3_0() - Initialize IPA header block.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_hdr_v3_0(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_hdr_init_local cmd = {0};
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_imm_cmd_dma_shared_mem dma_cmd = { 0 };
+
+	mem.size = IPA_MEM_PART(modem_hdr_size) + IPA_MEM_PART(apps_hdr_size);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	memset(mem.base, 0, mem.size);
+
+	cmd.hdr_table_addr = mem.phys_base;
+	cmd.size_hdr_table = mem.size;
+	cmd.hdr_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(modem_hdr_ofst);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_HDR_INIT_LOCAL, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail to construct hdr_init_local imm cmd\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_HDR_INIT_LOCAL);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		ipahal_destroy_imm_cmd(cmd_pyld);
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	mem.size = IPA_MEM_PART(modem_hdr_proc_ctx_size) +
+		IPA_MEM_PART(apps_hdr_proc_ctx_size);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	memset(mem.base, 0, mem.size);
+	memset(&desc, 0, sizeof(desc));
+
+	dma_cmd.is_read = false;
+	dma_cmd.skip_pipeline_clear = false;
+	dma_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	dma_cmd.system_addr = mem.phys_base;
+	dma_cmd.local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst);
+	dma_cmd.size = mem.size;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &dma_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail to construct dma_shared_mem imm\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size, mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		ipahal_destroy_imm_cmd(cmd_pyld);
+		dma_free_coherent(ipa3_ctx->pdev,
+			mem.size,
+			mem.base,
+			mem.phys_base);
+		return -EFAULT;
+	}
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+	ipahal_write_reg(IPA_LOCAL_PKT_PROC_CNTXT_BASE, dma_cmd.local_addr);
+
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	return 0;
+}
+
+/**
+ * _ipa_init_rt4_v3() - Initialize IPA routing block for IPv4.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_rt4_v3(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v4_routing_init v4_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int i;
+	int rc = 0;
+
+	for (i = IPA_MEM_PART(v4_modem_rt_index_lo);
+		i <= IPA_MEM_PART(v4_modem_rt_index_hi);
+		i++)
+		ipa3_ctx->rt_idx_bitmap[IPA_IP_v4] |= (1 << i);
+	IPADBG("v4 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v4]);
+
+	rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v4_rt_num_index),
+		IPA_MEM_PART(v4_rt_hash_size), IPA_MEM_PART(v4_rt_nhash_size),
+		&mem);
+	if (rc) {
+		IPAERR("fail generate empty v4 rt img\n");
+		return rc;
+	}
+
+	v4_cmd.hash_rules_addr = mem.phys_base;
+	v4_cmd.hash_rules_size = mem.size;
+	v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_rt_hash_ofst);
+	v4_cmd.nhash_rules_addr = mem.phys_base;
+	v4_cmd.nhash_rules_size = mem.size;
+	v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_rt_nhash_ofst);
+	IPADBG("putting hashable routing IPv4 rules to phys 0x%x\n",
+				v4_cmd.hash_local_addr);
+	IPADBG("putting non-hashable routing IPv4 rules to phys 0x%x\n",
+				v4_cmd.nhash_local_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_ROUTING_INIT, &v4_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v4_rt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	desc.opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_ROUTING_INIT);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+/**
+ * _ipa_init_rt6_v3() - Initialize IPA routing block for IPv6.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_rt6_v3(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v6_routing_init v6_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int i;
+	int rc = 0;
+
+	for (i = IPA_MEM_PART(v6_modem_rt_index_lo);
+		i <= IPA_MEM_PART(v6_modem_rt_index_hi);
+		i++)
+		ipa3_ctx->rt_idx_bitmap[IPA_IP_v6] |= (1 << i);
+	IPADBG("v6 rt bitmap 0x%lx\n", ipa3_ctx->rt_idx_bitmap[IPA_IP_v6]);
+
+	rc = ipahal_rt_generate_empty_img(IPA_MEM_PART(v6_rt_num_index),
+		IPA_MEM_PART(v6_rt_hash_size), IPA_MEM_PART(v6_rt_nhash_size),
+		&mem);
+	if (rc) {
+		IPAERR("fail generate empty v6 rt img\n");
+		return rc;
+	}
+
+	v6_cmd.hash_rules_addr = mem.phys_base;
+	v6_cmd.hash_rules_size = mem.size;
+	v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_rt_hash_ofst);
+	v6_cmd.nhash_rules_addr = mem.phys_base;
+	v6_cmd.nhash_rules_size = mem.size;
+	v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_rt_nhash_ofst);
+	IPADBG("putting hashable routing IPv6 rules to phys 0x%x\n",
+				v6_cmd.hash_local_addr);
+	IPADBG("putting non-hashable routing IPv6 rules to phys 0x%x\n",
+				v6_cmd.nhash_local_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V6_ROUTING_INIT, &v6_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v6_rt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	desc.opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_ROUTING_INIT);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+/**
+ * _ipa_init_flt4_v3() - Initialize IPA filtering block for IPv4.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_flt4_v3(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v4_filter_init v4_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int rc;
+
+	rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+		IPA_MEM_PART(v4_flt_hash_size),
+		IPA_MEM_PART(v4_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+		&mem);
+	if (rc) {
+		IPAERR("fail generate empty v4 flt img\n");
+		return rc;
+	}
+
+	v4_cmd.hash_rules_addr = mem.phys_base;
+	v4_cmd.hash_rules_size = mem.size;
+	v4_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_flt_hash_ofst);
+	v4_cmd.nhash_rules_addr = mem.phys_base;
+	v4_cmd.nhash_rules_size = mem.size;
+	v4_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v4_flt_nhash_ofst);
+	IPADBG("putting hashable filtering IPv4 rules to phys 0x%x\n",
+				v4_cmd.hash_local_addr);
+	IPADBG("putting non-hashable filtering IPv4 rules to phys 0x%x\n",
+				v4_cmd.nhash_local_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_FILTER_INIT, &v4_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v4_flt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_FILTER_INIT);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+/**
+ * _ipa_init_flt6_v3() - Initialize IPA filtering block for IPv6.
+ *
+ * Return codes: 0 for success, negative value for failure
+ */
+int _ipa_init_flt6_v3(void)
+{
+	struct ipa3_desc desc = { 0 };
+	struct ipa_mem_buffer mem;
+	struct ipahal_imm_cmd_ip_v6_filter_init v6_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	int rc;
+
+	rc = ipahal_flt_generate_empty_img(ipa3_ctx->ep_flt_num,
+		IPA_MEM_PART(v6_flt_hash_size),
+		IPA_MEM_PART(v6_flt_nhash_size), ipa3_ctx->ep_flt_bitmap,
+		&mem);
+	if (rc) {
+		IPAERR("fail generate empty v6 flt img\n");
+		return rc;
+	}
+
+	v6_cmd.hash_rules_addr = mem.phys_base;
+	v6_cmd.hash_rules_size = mem.size;
+	v6_cmd.hash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_flt_hash_ofst);
+	v6_cmd.nhash_rules_addr = mem.phys_base;
+	v6_cmd.nhash_rules_size = mem.size;
+	v6_cmd.nhash_local_addr = ipa3_ctx->smem_restricted_bytes +
+		IPA_MEM_PART(v6_flt_nhash_ofst);
+	IPADBG("putting hashable filtering IPv6 rules to phys 0x%x\n",
+				v6_cmd.hash_local_addr);
+	IPADBG("putting non-hashable filtering IPv6 rules to phys 0x%x\n",
+				v6_cmd.nhash_local_addr);
+
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V6_FILTER_INIT, &v6_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("fail construct ip_v6_flt_init imm cmd\n");
+		rc = -EPERM;
+		goto free_mem;
+	}
+
+	desc.opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V6_FILTER_INIT);
+	desc.type = IPA_IMM_CMD_DESC;
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	IPA_DUMP_BUFF(mem.base, mem.phys_base, mem.size);
+
+	if (ipa3_send_cmd(1, &desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+	}
+
+	ipahal_destroy_imm_cmd(cmd_pyld);
+
+free_mem:
+	ipahal_free_dma_mem(&mem);
+	return rc;
+}
+
+static int ipa3_setup_flt_hash_tuple(void)
+{
+	int pipe_idx;
+	struct ipahal_reg_hash_tuple tuple;
+
+	memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
+
+	for (pipe_idx = 0; pipe_idx < ipa3_ctx->ipa_num_pipes ; pipe_idx++) {
+		if (!ipa_is_ep_support_flt(pipe_idx))
+			continue;
+
+		if (ipa_is_modem_pipe(pipe_idx))
+			continue;
+
+		if (ipa3_set_flt_tuple_mask(pipe_idx, &tuple)) {
+			IPAERR("failed to setup pipe %d flt tuple\n", pipe_idx);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static int ipa3_setup_rt_hash_tuple(void)
+{
+	int tbl_idx;
+	struct ipahal_reg_hash_tuple tuple;
+
+	memset(&tuple, 0, sizeof(struct ipahal_reg_hash_tuple));
+
+	for (tbl_idx = 0;
+		tbl_idx < max(IPA_MEM_PART(v6_rt_num_index),
+		IPA_MEM_PART(v4_rt_num_index));
+		tbl_idx++) {
+
+		if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
+			tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi))
+			continue;
+
+		if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
+			tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi))
+			continue;
+
+		if (ipa3_set_rt_tuple_mask(tbl_idx, &tuple)) {
+			IPAERR("failed to setup tbl %d rt tuple\n", tbl_idx);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+static int ipa3_setup_apps_pipes(void)
+{
+	struct ipa_sys_connect_params sys_in;
+	int result = 0;
+
+	if (ipa3_ctx->gsi_ch20_wa) {
+		IPADBG("Allocating GSI physical channel 20\n");
+		result = ipa_gsi_ch20_wa();
+		if (result) {
+			IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
+			goto fail_cmd;
+		}
+	}
+
+	/* CMD OUT (AP->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_APPS_LAN_CONS;
+	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_cmd)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_cmd;
+	}
+	IPADBG("Apps to IPA cmd pipe is connected\n");
+
+	ipa3_ctx->ctrl->ipa_init_sram();
+	IPADBG("SRAM initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_hdr();
+	IPADBG("HDR initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_rt4();
+	IPADBG("V4 RT initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_rt6();
+	IPADBG("V6 RT initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_flt4();
+	IPADBG("V4 FLT initialized\n");
+
+	ipa3_ctx->ctrl->ipa_init_flt6();
+	IPADBG("V6 FLT initialized\n");
+
+	if (ipa3_setup_flt_hash_tuple()) {
+		IPAERR(":fail to configure flt hash tuple\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("flt hash tuple is configured\n");
+
+	if (ipa3_setup_rt_hash_tuple()) {
+		IPAERR(":fail to configure rt hash tuple\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("rt hash tuple is configured\n");
+
+	if (ipa3_setup_exception_path()) {
+		IPAERR(":fail to setup excp path\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("Exception path was successfully set");
+
+	if (ipa3_setup_dflt_rt_tables()) {
+		IPAERR(":fail to setup dflt routes\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+	IPADBG("default routing was set\n");
+
+	/* LAN IN (IPA->A5) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_LAN_CONS;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.notify = ipa3_lan_rx_cb;
+	sys_in.priv = NULL;
+	sys_in.ipa_ep_cfg.hdr.hdr_len = IPA_LAN_RX_HEADER_LENGTH;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_little_endian = false;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = IPA_HDR_PAD;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = false;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+	sys_in.ipa_ep_cfg.hdr_ext.hdr_pad_to_alignment = 2;
+	sys_in.ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
+
+	/**
+	 * ipa_lan_rx_cb() intended to notify the source EP about packet
+	 * being received on the LAN_CONS via calling the source EP call-back.
+	 * There could be a race condition with calling this call-back. Other
+	 * thread may nullify it - e.g. on EP disconnect.
+	 * This lock intended to protect the access to the source EP call-back
+	 */
+	spin_lock_init(&ipa3_ctx->disconnect_lock);
+	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_in)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_schedule_delayed_work;
+	}
+
+	/* LAN-WAN OUT (AP->IPA) */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	if (ipa3_setup_sys_pipe(&sys_in, &ipa3_ctx->clnt_hdl_data_out)) {
+		IPAERR(":setup sys pipe failed.\n");
+		result = -EPERM;
+		goto fail_data_out;
+	}
+
+	return 0;
+
+fail_data_out:
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+fail_schedule_delayed_work:
+	if (ipa3_ctx->dflt_v6_rt_rule_hdl)
+		__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
+	if (ipa3_ctx->dflt_v4_rt_rule_hdl)
+		__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
+	if (ipa3_ctx->excp_hdr_hdl)
+		__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl);
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+fail_cmd:
+	return result;
+}
+
+static void ipa3_teardown_apps_pipes(void)
+{
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_out);
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_data_in);
+	__ipa3_del_rt_rule(ipa3_ctx->dflt_v6_rt_rule_hdl);
+	__ipa3_del_rt_rule(ipa3_ctx->dflt_v4_rt_rule_hdl);
+	__ipa3_del_hdr(ipa3_ctx->excp_hdr_hdl);
+	ipa3_teardown_sys_pipe(ipa3_ctx->clnt_hdl_cmd);
+}
+
+#ifdef CONFIG_COMPAT
+long compat_ipa3_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	int retval = 0;
+	struct ipa3_ioc_nat_alloc_mem32 nat_mem32;
+	struct ipa_ioc_nat_alloc_mem nat_mem;
+
+	switch (cmd) {
+	case IPA_IOC_ADD_HDR32:
+		cmd = IPA_IOC_ADD_HDR;
+		break;
+	case IPA_IOC_DEL_HDR32:
+		cmd = IPA_IOC_DEL_HDR;
+		break;
+	case IPA_IOC_ADD_RT_RULE32:
+		cmd = IPA_IOC_ADD_RT_RULE;
+		break;
+	case IPA_IOC_DEL_RT_RULE32:
+		cmd = IPA_IOC_DEL_RT_RULE;
+		break;
+	case IPA_IOC_ADD_FLT_RULE32:
+		cmd = IPA_IOC_ADD_FLT_RULE;
+		break;
+	case IPA_IOC_DEL_FLT_RULE32:
+		cmd = IPA_IOC_DEL_FLT_RULE;
+		break;
+	case IPA_IOC_GET_RT_TBL32:
+		cmd = IPA_IOC_GET_RT_TBL;
+		break;
+	case IPA_IOC_COPY_HDR32:
+		cmd = IPA_IOC_COPY_HDR;
+		break;
+	case IPA_IOC_QUERY_INTF32:
+		cmd = IPA_IOC_QUERY_INTF;
+		break;
+	case IPA_IOC_QUERY_INTF_TX_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_TX_PROPS;
+		break;
+	case IPA_IOC_QUERY_INTF_RX_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_RX_PROPS;
+		break;
+	case IPA_IOC_QUERY_INTF_EXT_PROPS32:
+		cmd = IPA_IOC_QUERY_INTF_EXT_PROPS;
+		break;
+	case IPA_IOC_GET_HDR32:
+		cmd = IPA_IOC_GET_HDR;
+		break;
+	case IPA_IOC_ALLOC_NAT_MEM32:
+		if (copy_from_user((u8 *)&nat_mem32, (u8 *)arg,
+			sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
+			retval = -EFAULT;
+			goto ret;
+		}
+		memcpy(nat_mem.dev_name, nat_mem32.dev_name,
+				IPA_RESOURCE_NAME_MAX);
+		nat_mem.size = (size_t)nat_mem32.size;
+		nat_mem.offset = (off_t)nat_mem32.offset;
+
+		/* null terminate the string */
+		nat_mem.dev_name[IPA_RESOURCE_NAME_MAX - 1] = '\0';
+
+		if (ipa3_allocate_nat_device(&nat_mem)) {
+			retval = -EFAULT;
+			goto ret;
+		}
+		nat_mem32.offset = (compat_off_t)nat_mem.offset;
+		if (copy_to_user((u8 *)arg, (u8 *)&nat_mem32,
+			sizeof(struct ipa3_ioc_nat_alloc_mem32))) {
+			retval = -EFAULT;
+		}
+ret:
+		return retval;
+	case IPA_IOC_V4_INIT_NAT32:
+		cmd = IPA_IOC_V4_INIT_NAT;
+		break;
+	case IPA_IOC_NAT_DMA32:
+		cmd = IPA_IOC_NAT_DMA;
+		break;
+	case IPA_IOC_V4_DEL_NAT32:
+		cmd = IPA_IOC_V4_DEL_NAT;
+		break;
+	case IPA_IOC_GET_NAT_OFFSET32:
+		cmd = IPA_IOC_GET_NAT_OFFSET;
+		break;
+	case IPA_IOC_PULL_MSG32:
+		cmd = IPA_IOC_PULL_MSG;
+		break;
+	case IPA_IOC_RM_ADD_DEPENDENCY32:
+		cmd = IPA_IOC_RM_ADD_DEPENDENCY;
+		break;
+	case IPA_IOC_RM_DEL_DEPENDENCY32:
+		cmd = IPA_IOC_RM_DEL_DEPENDENCY;
+		break;
+	case IPA_IOC_GENERATE_FLT_EQ32:
+		cmd = IPA_IOC_GENERATE_FLT_EQ;
+		break;
+	case IPA_IOC_QUERY_RT_TBL_INDEX32:
+		cmd = IPA_IOC_QUERY_RT_TBL_INDEX;
+		break;
+	case IPA_IOC_WRITE_QMAPID32:
+		cmd = IPA_IOC_WRITE_QMAPID;
+		break;
+	case IPA_IOC_MDFY_FLT_RULE32:
+		cmd = IPA_IOC_MDFY_FLT_RULE;
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD32:
+		cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD;
+		break;
+	case IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL32:
+		cmd = IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL;
+		break;
+	case IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED32:
+		cmd = IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED;
+		break;
+	case IPA_IOC_MDFY_RT_RULE32:
+		cmd = IPA_IOC_MDFY_RT_RULE;
+		break;
+	case IPA_IOC_COMMIT_HDR:
+	case IPA_IOC_RESET_HDR:
+	case IPA_IOC_COMMIT_RT:
+	case IPA_IOC_RESET_RT:
+	case IPA_IOC_COMMIT_FLT:
+	case IPA_IOC_RESET_FLT:
+	case IPA_IOC_DUMP:
+	case IPA_IOC_PUT_RT_TBL:
+	case IPA_IOC_PUT_HDR:
+	case IPA_IOC_SET_FLT:
+	case IPA_IOC_QUERY_EP_MAPPING:
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return ipa3_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static ssize_t ipa3_write(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos);
+
+static const struct file_operations ipa3_drv_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa3_open,
+	.read = ipa3_read,
+	.write = ipa3_write,
+	.unlocked_ioctl = ipa3_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_ipa3_ioctl,
+#endif
+};
+
+static int ipa3_get_clks(struct device *dev)
+{
+	ipa3_clk = clk_get(dev, "core_clk");
+	if (IS_ERR(ipa3_clk)) {
+		if (ipa3_clk != ERR_PTR(-EPROBE_DEFER))
+			IPAERR("fail to get ipa clk\n");
+		return PTR_ERR(ipa3_clk);
+	}
+	return 0;
+}
+
+/**
+ * _ipa_enable_clks_v3_0() - Enable IPA clocks.
+ */
+void _ipa_enable_clks_v3_0(void)
+{
+	IPADBG_LOW("enabling gcc_ipa_clk\n");
+	if (ipa3_clk) {
+		clk_prepare(ipa3_clk);
+		clk_enable(ipa3_clk);
+		IPADBG_LOW("curr_ipa_clk_rate=%d", ipa3_ctx->curr_ipa_clk_rate);
+		clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+		ipa3_uc_notify_clk_state(true);
+	} else {
+		WARN_ON(1);
+	}
+
+	ipa3_suspend_apps_pipes(false);
+}
+
+static unsigned int ipa3_get_bus_vote(void)
+{
+	unsigned int idx = 1;
+
+	if (ipa3_ctx->curr_ipa_clk_rate == ipa3_ctx->ctrl->ipa_clk_rate_svs) {
+		idx = 1;
+	} else if (ipa3_ctx->curr_ipa_clk_rate ==
+			ipa3_ctx->ctrl->ipa_clk_rate_nominal) {
+		if (ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases <= 2)
+			idx = 1;
+		else
+			idx = 2;
+	} else if (ipa3_ctx->curr_ipa_clk_rate ==
+			ipa3_ctx->ctrl->ipa_clk_rate_turbo) {
+		idx = ipa3_ctx->ctrl->msm_bus_data_ptr->num_usecases - 1;
+	} else {
+		WARN_ON(1);
+	}
+
+	IPADBG("curr %d idx %d\n", ipa3_ctx->curr_ipa_clk_rate, idx);
+
+	return idx;
+}
+
+/**
+* ipa3_enable_clks() - Turn on IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa3_enable_clks(void)
+{
+	IPADBG("enabling IPA clocks and bus voting\n");
+
+	ipa3_ctx->ctrl->ipa3_enable_clks();
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
+		    ipa3_get_bus_vote()))
+			WARN_ON(1);
+}
+
+
+/**
+ * _ipa_disable_clks_v3_0() - Disable IPA clocks.
+ */
+void _ipa_disable_clks_v3_0(void)
+{
+	IPADBG_LOW("disabling gcc_ipa_clk\n");
+	ipa3_suspend_apps_pipes(true);
+	ipa3_uc_notify_clk_state(false);
+	if (ipa3_clk)
+		clk_disable_unprepare(ipa3_clk);
+	else
+		WARN_ON(1);
+}
+
+/**
+* ipa3_disable_clks() - Turn off IPA clocks
+*
+* Return codes:
+* None
+*/
+void ipa3_disable_clks(void)
+{
+	IPADBG("disabling IPA clocks and bus voting\n");
+
+	ipa3_ctx->ctrl->ipa3_disable_clks();
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+		if (msm_bus_scale_client_update_request(ipa3_ctx->ipa_bus_hdl,
+		    0))
+			WARN_ON(1);
+}
+
+/**
+ * ipa3_start_tag_process() - Send TAG packet and wait for it to come back
+ *
+ * This function is called prior to clock gating when active client counter
+ * is 1. TAG process ensures that there are no packets inside IPA HW that
+ * were not submitted to peer's BAM. During TAG process all aggregation frames
+ * are (force) closed.
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_start_tag_process(struct work_struct *work)
+{
+	int res;
+
+	IPADBG("starting TAG process\n");
+	/* close aggregation frames on all pipes */
+	res = ipa3_tag_aggr_force_close(-1);
+	if (res)
+		IPAERR("ipa3_tag_aggr_force_close failed %d\n", res);
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("TAG_PROCESS");
+
+	IPADBG("TAG process done\n");
+}
+
+/**
+* ipa3_active_clients_log_mod() - Log a modification in the active clients
+* reference count
+*
+* This method logs any modification in the active clients reference count:
+* It logs the modification in the circular history buffer
+* It logs the modification in the hash table - looking for an entry,
+* creating one if needed and deleting one if needed.
+*
+* @id: ipa3_active client logging info struct to hold the log information
+* @inc: a boolean variable to indicate whether the modification is an increase
+* or decrease
+* @int_ctx: a boolean variable to indicate whether this call is being made from
+* an interrupt context and therefore should allocate GFP_ATOMIC memory
+*
+* Method process:
+* - Hash the unique identifier string
+* - Find the hash in the table
+*    1)If found, increase or decrease the reference count
+*    2)If not found, allocate a new hash table entry struct and initialize it
+* - Remove and deallocate unneeded data structure
+* - Log the call in the circular history buffer (unless it is a simple call)
+*/
+void ipa3_active_clients_log_mod(struct ipa_active_client_logging_info *id,
+		bool inc, bool int_ctx)
+{
+	char temp_str[IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN];
+	unsigned long long t;
+	unsigned long nanosec_rem;
+	struct ipa3_active_client_htable_entry *hentry;
+	struct ipa3_active_client_htable_entry *hfound;
+	u32 hkey;
+	char str_to_hash[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+
+	hfound = NULL;
+	memset(str_to_hash, 0, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+	strlcpy(str_to_hash, id->id_string, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+	hkey = jhash(str_to_hash, IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN,
+			0);
+	hash_for_each_possible(ipa3_ctx->ipa3_active_clients_logging.htable,
+			hentry, list, hkey) {
+		if (!strcmp(hentry->id_string, id->id_string)) {
+			hentry->count = hentry->count + (inc ? 1 : -1);
+			hfound = hentry;
+		}
+	}
+	if (hfound == NULL) {
+		hentry = NULL;
+		hentry = kzalloc(sizeof(
+				struct ipa3_active_client_htable_entry),
+				int_ctx ? GFP_ATOMIC : GFP_KERNEL);
+		if (hentry == NULL) {
+			IPAERR("failed allocating active clients hash entry");
+			return;
+		}
+		hentry->type = id->type;
+		strlcpy(hentry->id_string, id->id_string,
+				IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN);
+		INIT_HLIST_NODE(&hentry->list);
+		hentry->count = inc ? 1 : -1;
+		hash_add(ipa3_ctx->ipa3_active_clients_logging.htable,
+				&hentry->list, hkey);
+	} else if (hfound->count == 0) {
+		hash_del(&hfound->list);
+		kfree(hfound);
+	}
+
+	if (id->type != SIMPLE) {
+		t = local_clock();
+		nanosec_rem = do_div(t, 1000000000) / 1000;
+		snprintf(temp_str, IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN,
+				inc ? "[%5lu.%06lu] ^ %s, %s: %d" :
+						"[%5lu.%06lu] v %s, %s: %d",
+				(unsigned long)t, nanosec_rem,
+				id->id_string, id->file, id->line);
+		ipa3_active_clients_log_insert(temp_str);
+	}
+}
+
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+		bool int_ctx)
+{
+	ipa3_active_clients_log_mod(id, false, int_ctx);
+}
+
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+		bool int_ctx)
+{
+	ipa3_active_clients_log_mod(id, true, int_ctx);
+}
+
+/**
+* ipa3_inc_client_enable_clks() - Increase active clients counter, and
+* enable ipa clocks if necessary
+*
+* Return codes:
+* None
+*/
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id)
+{
+	ipa3_active_clients_lock();
+	ipa3_active_clients_log_inc(id, false);
+	ipa3_ctx->ipa3_active_clients.cnt++;
+	if (ipa3_ctx->ipa3_active_clients.cnt == 1)
+		ipa3_enable_clks();
+	IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+	ipa3_active_clients_unlock();
+}
+
+/**
+* ipa3_inc_client_enable_clks_no_block() - Only increment the number of active
+* clients if no asynchronous actions should be done. Asynchronous actions are
+* locking a mutex and waking up IPA HW.
+*
+* Return codes: 0 for success
+*		-EPERM if an asynchronous action should have been done
+*/
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+		*id)
+{
+	int res = 0;
+	unsigned long flags;
+
+	if (ipa3_active_clients_trylock(&flags) == 0)
+		return -EPERM;
+
+	if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
+		res = -EPERM;
+		goto bail;
+	}
+	ipa3_active_clients_log_inc(id, true);
+	ipa3_ctx->ipa3_active_clients.cnt++;
+	IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+bail:
+	ipa3_active_clients_trylock_unlock(&flags);
+
+	return res;
+}
+
+/**
+ * ipa3_dec_client_disable_clks() - Decrease active clients counter
+ *
+ * In case that there are no active clients this function also starts
+ * TAG process. When TAG progress ends ipa clocks will be gated.
+ * start_tag_process_again flag is set during this function to signal TAG
+ * process to start again as there was another client that may send data to ipa
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id)
+{
+	struct ipa_active_client_logging_info log_info;
+
+	ipa3_active_clients_lock();
+	ipa3_active_clients_log_dec(id, false);
+	ipa3_ctx->ipa3_active_clients.cnt--;
+	IPADBG_LOW("active clients = %d\n", ipa3_ctx->ipa3_active_clients.cnt);
+	if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
+		if (ipa3_ctx->tag_process_before_gating) {
+			ipa3_ctx->tag_process_before_gating = false;
+			/*
+			 * When TAG process ends, active clients will be
+			 * decreased
+			 */
+			IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info,
+					"TAG_PROCESS");
+			ipa3_active_clients_log_inc(&log_info, false);
+			ipa3_ctx->ipa3_active_clients.cnt = 1;
+			queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_tag_work);
+		} else {
+			ipa3_disable_clks();
+		}
+	}
+	ipa3_active_clients_unlock();
+}
+
+/**
+* ipa3_inc_acquire_wakelock() - Increase active clients counter, and
+* acquire wakelock if necessary
+*
+* Return codes:
+* None
+*/
+void ipa3_inc_acquire_wakelock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+	ipa3_ctx->wakelock_ref_cnt.cnt++;
+	if (ipa3_ctx->wakelock_ref_cnt.cnt == 1)
+		__pm_stay_awake(&ipa3_ctx->w_lock);
+	IPADBG_LOW("active wakelock ref cnt = %d\n",
+		ipa3_ctx->wakelock_ref_cnt.cnt);
+	spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+/**
+ * ipa3_dec_release_wakelock() - Decrease active clients counter
+ *
+ * In case if the ref count is 0, release the wakelock.
+ *
+ * Return codes:
+ * None
+ */
+void ipa3_dec_release_wakelock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+	ipa3_ctx->wakelock_ref_cnt.cnt--;
+	IPADBG_LOW("active wakelock ref cnt = %d\n",
+		ipa3_ctx->wakelock_ref_cnt.cnt);
+	if (ipa3_ctx->wakelock_ref_cnt.cnt == 0)
+		__pm_relax(&ipa3_ctx->w_lock);
+	spin_unlock_irqrestore(&ipa3_ctx->wakelock_ref_cnt.spinlock, flags);
+}
+
+int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+				  u32 bandwidth_mbps)
+{
+	enum ipa_voltage_level needed_voltage;
+	u32 clk_rate;
+
+	IPADBG_LOW("floor_voltage=%d, bandwidth_mbps=%u",
+					floor_voltage, bandwidth_mbps);
+
+	if (floor_voltage < IPA_VOLTAGE_UNSPECIFIED ||
+		floor_voltage >= IPA_VOLTAGE_MAX) {
+		IPAERR("bad voltage\n");
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->enable_clock_scaling) {
+		IPADBG_LOW("Clock scaling is enabled\n");
+		if (bandwidth_mbps >=
+			ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo)
+			needed_voltage = IPA_VOLTAGE_TURBO;
+		else if (bandwidth_mbps >=
+			ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal)
+			needed_voltage = IPA_VOLTAGE_NOMINAL;
+		else
+			needed_voltage = IPA_VOLTAGE_SVS;
+	} else {
+		IPADBG_LOW("Clock scaling is disabled\n");
+		needed_voltage = IPA_VOLTAGE_NOMINAL;
+	}
+
+	needed_voltage = max(needed_voltage, floor_voltage);
+	switch (needed_voltage) {
+	case IPA_VOLTAGE_SVS:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_svs;
+		break;
+	case IPA_VOLTAGE_NOMINAL:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_nominal;
+		break;
+	case IPA_VOLTAGE_TURBO:
+		clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
+		break;
+	default:
+		IPAERR("bad voltage\n");
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (clk_rate == ipa3_ctx->curr_ipa_clk_rate) {
+		IPADBG_LOW("Same voltage\n");
+		return 0;
+	}
+
+	ipa3_active_clients_lock();
+	ipa3_ctx->curr_ipa_clk_rate = clk_rate;
+	IPADBG_LOW("setting clock rate to %u\n", ipa3_ctx->curr_ipa_clk_rate);
+	if (ipa3_ctx->ipa3_active_clients.cnt > 0) {
+		clk_set_rate(ipa3_clk, ipa3_ctx->curr_ipa_clk_rate);
+		if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+			if (msm_bus_scale_client_update_request(
+			    ipa3_ctx->ipa_bus_hdl, ipa3_get_bus_vote()))
+				WARN_ON(1);
+	} else {
+		IPADBG_LOW("clocks are gated, not setting rate\n");
+	}
+	ipa3_active_clients_unlock();
+	IPADBG_LOW("Done\n");
+	return 0;
+}
+
+static void ipa3_sps_process_irq_schedule_rel(void)
+{
+	queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+		&ipa3_sps_release_resource_work,
+		msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
+}
+
+/**
+* ipa3_suspend_handler() - Handles the suspend interrupt:
+* wakes up the suspended peripheral by requesting its consumer
+* @interrupt:		Interrupt type
+* @private_data:	The client's private data
+* @interrupt_data:	Interrupt specific information data
+*/
+void ipa3_suspend_handler(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data)
+{
+	enum ipa_rm_resource_name resource;
+	u32 suspend_data =
+		((struct ipa_tx_suspend_irq_data *)interrupt_data)->endpoints;
+	u32 bmsk = 1;
+	u32 i = 0;
+	int res;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	IPADBG("interrupt=%d, interrupt_data=%u\n",
+		interrupt, suspend_data);
+	memset(&holb_cfg, 0, sizeof(holb_cfg));
+	holb_cfg.tmr_val = 0;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if ((suspend_data & bmsk) && (ipa3_ctx->ep[i].valid)) {
+			if (IPA_CLIENT_IS_APPS_CONS(ipa3_ctx->ep[i].client)) {
+				/*
+				 * pipe will be unsuspended as part of
+				 * enabling IPA clocks
+				 */
+				if (!atomic_read(
+					&ipa3_ctx->transport_pm.dec_clients)
+					) {
+					IPA_ACTIVE_CLIENTS_INC_EP(
+						ipa3_ctx->ep[i].client);
+					IPADBG_LOW("Pipes un-suspended.\n");
+					IPADBG_LOW("Enter poll mode.\n");
+					atomic_set(
+					&ipa3_ctx->transport_pm.dec_clients,
+					1);
+					ipa3_sps_process_irq_schedule_rel();
+				}
+			} else {
+				resource = ipa3_get_rm_resource_from_ep(i);
+				res =
+				ipa_rm_request_resource_with_timer(resource);
+				if (res == -EPERM &&
+					IPA_CLIENT_IS_CONS(
+					   ipa3_ctx->ep[i].client)) {
+					holb_cfg.en = 1;
+					res = ipa3_cfg_ep_holb_by_client(
+					   ipa3_ctx->ep[i].client, &holb_cfg);
+					if (res) {
+						IPAERR("holb en fail, stall\n");
+						BUG();
+					}
+				}
+			}
+		}
+		bmsk = bmsk << 1;
+	}
+}
+
+/**
+* ipa3_restore_suspend_handler() - restores the original suspend IRQ handler
+* as it was registered in the IPA init sequence.
+* Return codes:
+* 0: success
+* -EPERM: failed to remove current handler or failed to add original handler
+*/
+int ipa3_restore_suspend_handler(void)
+{
+	int result = 0;
+
+	result  = ipa3_remove_interrupt_handler(IPA_TX_SUSPEND_IRQ);
+	if (result) {
+		IPAERR("remove handler for suspend interrupt failed\n");
+		return -EPERM;
+	}
+
+	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+			ipa3_suspend_handler, false, NULL);
+	if (result) {
+		IPAERR("register handler for suspend interrupt failed\n");
+		result = -EPERM;
+	}
+
+	IPADBG("suspend handler successfully restored\n");
+
+	return result;
+}
+
+static int ipa3_apps_cons_release_resource(void)
+{
+	return 0;
+}
+
+static int ipa3_apps_cons_request_resource(void)
+{
+	return 0;
+}
+
+static void ipa3_sps_release_resource(struct work_struct *work)
+{
+	/* check whether still need to decrease client usage */
+	if (atomic_read(&ipa3_ctx->transport_pm.dec_clients)) {
+		if (atomic_read(&ipa3_ctx->transport_pm.eot_activity)) {
+			IPADBG("EOT pending Re-scheduling\n");
+			ipa3_sps_process_irq_schedule_rel();
+		} else {
+			atomic_set(&ipa3_ctx->transport_pm.dec_clients, 0);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("SPS_RESOURCE");
+		}
+	}
+	atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+}
+
+int ipa3_create_apps_resource(void)
+{
+	struct ipa_rm_create_params apps_cons_create_params;
+	struct ipa_rm_perf_profile profile;
+	int result = 0;
+
+	memset(&apps_cons_create_params, 0,
+				sizeof(apps_cons_create_params));
+	apps_cons_create_params.name = IPA_RM_RESOURCE_APPS_CONS;
+	apps_cons_create_params.request_resource =
+		ipa3_apps_cons_request_resource;
+	apps_cons_create_params.release_resource =
+		ipa3_apps_cons_release_resource;
+	result = ipa_rm_create_resource(&apps_cons_create_params);
+	if (result) {
+		IPAERR("ipa_rm_create_resource failed\n");
+		return result;
+	}
+
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ipa_rm_set_perf_profile(IPA_RM_RESOURCE_APPS_CONS, &profile);
+
+	return result;
+}
+
+/**
+ * ipa3_init_interrupts() - Register to IPA IRQs
+ *
+ * Return codes: 0 in success, negative in failure
+ *
+ */
+int ipa3_init_interrupts(void)
+{
+	int result;
+
+	/*register IPA IRQ handler*/
+	result = ipa3_interrupts_init(ipa3_res.ipa_irq, 0,
+			master_dev);
+	if (result) {
+		IPAERR("ipa interrupts initialization failed\n");
+		return -ENODEV;
+	}
+
+	/*add handler for suspend interrupt*/
+	result = ipa3_add_interrupt_handler(IPA_TX_SUSPEND_IRQ,
+			ipa3_suspend_handler, false, NULL);
+	if (result) {
+		IPAERR("register handler for suspend interrupt failed\n");
+		result = -ENODEV;
+		goto fail_add_interrupt_handler;
+	}
+
+	return 0;
+
+fail_add_interrupt_handler:
+	free_irq(ipa3_res.ipa_irq, master_dev);
+	return result;
+}
+
+/**
+ * ipa3_destroy_flt_tbl_idrs() - destroy the idr structure for flt tables
+ *  The idr strcuture per filtering table is intended for rule id generation
+ *  per filtering rule.
+ */
+static void ipa3_destroy_flt_tbl_idrs(void)
+{
+	int i;
+	struct ipa3_flt_tbl *flt_tbl;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+		idr_destroy(&flt_tbl->rule_ids);
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+		idr_destroy(&flt_tbl->rule_ids);
+	}
+}
+
+static void ipa3_freeze_clock_vote_and_notify_modem(void)
+{
+	int res;
+	u32 ipa_clk_state;
+	struct ipa_active_client_logging_info log_info;
+
+	if (ipa3_ctx->smp2p_info.res_sent)
+		return;
+
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "FREEZE_VOTE");
+	res = ipa3_inc_client_enable_clks_no_block(&log_info);
+	if (res)
+		ipa_clk_state = 0;
+	else
+		ipa_clk_state = 1;
+
+	if (ipa3_ctx->smp2p_info.out_base_id) {
+		gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
+			IPA_GPIO_OUT_CLK_VOTE_IDX, ipa_clk_state);
+		gpio_set_value(ipa3_ctx->smp2p_info.out_base_id +
+			IPA_GPIO_OUT_CLK_RSP_CMPLT_IDX, 1);
+		ipa3_ctx->smp2p_info.res_sent = true;
+	} else {
+		IPAERR("smp2p out gpio not assigned\n");
+	}
+
+	IPADBG("IPA clocks are %s\n", ipa_clk_state ? "ON" : "OFF");
+}
+
+static int ipa3_panic_notifier(struct notifier_block *this,
+	unsigned long event, void *ptr)
+{
+	int res;
+
+	ipa3_freeze_clock_vote_and_notify_modem();
+
+	IPADBG("Calling uC panic handler\n");
+	res = ipa3_uc_panic_notifier(this, event, ptr);
+	if (res)
+		IPAERR("uC panic handler failed %d\n", res);
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block ipa3_panic_blk = {
+	.notifier_call = ipa3_panic_notifier,
+	/* IPA panic handler needs to run before modem shuts down */
+	.priority = INT_MAX,
+};
+
+static void ipa3_register_panic_hdlr(void)
+{
+	atomic_notifier_chain_register(&panic_notifier_list,
+		&ipa3_panic_blk);
+}
+
+static void ipa3_trigger_ipa_ready_cbs(void)
+{
+	struct ipa3_ready_cb_info *info;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	/* Call all the CBs */
+	list_for_each_entry(info, &ipa3_ctx->ipa_ready_cb_list, link)
+		if (info->ready_cb)
+			info->ready_cb(info->user_data);
+
+	mutex_unlock(&ipa3_ctx->lock);
+}
+
+static int ipa3_gsi_pre_fw_load_init(void)
+{
+	int result;
+
+	result = gsi_configure_regs(ipa3_res.transport_mem_base,
+		ipa3_res.transport_mem_size,
+		ipa3_res.ipa_mem_base);
+	if (result) {
+		IPAERR("Failed to configure GSI registers\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void ipa3_uc_is_loaded(void)
+{
+	IPADBG("\n");
+	complete_all(&ipa3_ctx->uc_loaded_completion_obj);
+}
+
+static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
+{
+	enum gsi_ver gsi_ver;
+
+	switch (ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+		gsi_ver = GSI_VER_1_0;
+		break;
+	case IPA_HW_v3_5:
+		gsi_ver = GSI_VER_1_2;
+		break;
+	case IPA_HW_v3_5_1:
+		gsi_ver = GSI_VER_1_3;
+		break;
+	default:
+		IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
+		WARN_ON(1);
+		gsi_ver = GSI_VER_ERR;
+	}
+
+	IPADBG("GSI version %d\n", gsi_ver);
+
+	return gsi_ver;
+}
+
+/**
+ * ipa3_post_init() - Initialize the IPA Driver (Part II).
+ * This part contains all initialization which requires interaction with
+ * IPA HW (via SPS BAM or GSI).
+ *
+ * @resource_p:	contain platform specific values from DST file
+ * @pdev:	The platform device structure representing the IPA driver
+ *
+ * Function initialization process:
+ * - Register BAM/SPS or GSI
+ * - Setup APPS pipes
+ * - Initialize tethering bridge
+ * - Initialize IPA debugfs
+ * - Initialize IPA uC interface
+ * - Initialize WDI interface
+ * - Initialize USB interface
+ * - Register for panic handler
+ * - Trigger IPA ready callbacks (to all subscribers)
+ * - Trigger IPA completion object (to all who wait on it)
+ */
+static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
+			  struct device *ipa_dev)
+{
+	int result;
+	struct sps_bam_props bam_props = { 0 };
+	struct gsi_per_props gsi_props;
+	struct ipa3_uc_hdlrs uc_hdlrs = { 0 };
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		memset(&gsi_props, 0, sizeof(gsi_props));
+		gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
+		gsi_props.ee = resource_p->ee;
+		gsi_props.intr = GSI_INTR_IRQ;
+		gsi_props.irq = resource_p->transport_irq;
+		gsi_props.phys_addr = resource_p->transport_mem_base;
+		gsi_props.size = resource_p->transport_mem_size;
+		gsi_props.notify_cb = ipa_gsi_notify_cb;
+		gsi_props.req_clk_cb = NULL;
+		gsi_props.rel_clk_cb = NULL;
+
+		result = gsi_register_device(&gsi_props,
+			&ipa3_ctx->gsi_dev_hdl);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR(":gsi register error - %d\n", result);
+			result = -ENODEV;
+			goto fail_register_device;
+		}
+		IPADBG("IPA gsi is registered\n");
+	} else {
+		/* register IPA with SPS driver */
+		bam_props.phys_addr = resource_p->transport_mem_base;
+		bam_props.virt_size = resource_p->transport_mem_size;
+		bam_props.irq = resource_p->transport_irq;
+		bam_props.num_pipes = ipa3_ctx->ipa_num_pipes;
+		bam_props.summing_threshold = IPA_SUMMING_THRESHOLD;
+		bam_props.event_threshold = IPA_EVENT_THRESHOLD;
+		bam_props.options |= SPS_BAM_NO_LOCAL_CLK_GATING;
+		if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL)
+			bam_props.options |= SPS_BAM_OPT_IRQ_WAKEUP;
+		if (ipa3_ctx->ipa_bam_remote_mode == true)
+			bam_props.manage |= SPS_BAM_MGR_DEVICE_REMOTE;
+		if (!ipa3_ctx->smmu_s1_bypass)
+			bam_props.options |= SPS_BAM_SMMU_EN;
+		bam_props.ee = resource_p->ee;
+		bam_props.ipc_loglevel = 3;
+
+		result = sps_register_bam_device(&bam_props,
+			&ipa3_ctx->bam_handle);
+		if (result) {
+			IPAERR(":bam register error - %d\n", result);
+			result = -EPROBE_DEFER;
+			goto fail_register_device;
+		}
+		IPADBG("IPA BAM is registered\n");
+	}
+
+	/* setup the AP-IPA pipes */
+	if (ipa3_setup_apps_pipes()) {
+		IPAERR(":failed to setup IPA-Apps pipes\n");
+		result = -ENODEV;
+		goto fail_setup_apps_pipes;
+	}
+	IPADBG("IPA System2Bam pipes were connected\n");
+
+	if (ipa3_ctx->use_ipa_teth_bridge) {
+		/* Initialize the tethering bridge driver */
+		result = ipa3_teth_bridge_driver_init();
+		if (result) {
+			IPAERR(":teth_bridge init failed (%d)\n", -result);
+			result = -ENODEV;
+			goto fail_teth_bridge_driver_init;
+		}
+		IPADBG("teth_bridge initialized");
+	}
+
+	ipa3_debugfs_init();
+
+	result = ipa3_uc_interface_init();
+	if (result)
+		IPAERR(":ipa Uc interface init failed (%d)\n", -result);
+	else
+		IPADBG(":ipa Uc interface init ok\n");
+
+	uc_hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_is_loaded;
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_COMMON, &uc_hdlrs);
+
+	result = ipa3_wdi_init();
+	if (result)
+		IPAERR(":wdi init failed (%d)\n", -result);
+	else
+		IPADBG(":wdi init ok\n");
+
+	result = ipa3_ntn_init();
+	if (result)
+		IPAERR(":ntn init failed (%d)\n", -result);
+	else
+		IPADBG(":ntn init ok\n");
+
+	ipa3_register_panic_hdlr();
+
+	ipa3_ctx->q6_proxy_clk_vote_valid = true;
+
+	mutex_lock(&ipa3_ctx->lock);
+	ipa3_ctx->ipa_initialization_complete = true;
+	mutex_unlock(&ipa3_ctx->lock);
+
+	ipa3_trigger_ipa_ready_cbs();
+	complete_all(&ipa3_ctx->init_completion_obj);
+	pr_info("IPA driver initialization was successful.\n");
+
+	return 0;
+
+fail_teth_bridge_driver_init:
+	ipa3_teardown_apps_pipes();
+fail_setup_apps_pipes:
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+		gsi_deregister_device(ipa3_ctx->gsi_dev_hdl, false);
+	else
+		sps_deregister_bam_device(ipa3_ctx->bam_handle);
+fail_register_device:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+	ipa_rm_exit();
+	cdev_del(&ipa3_ctx->cdev);
+	device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
+	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
+	if (ipa3_ctx->pipe_mem_pool)
+		gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
+	ipa3_destroy_flt_tbl_idrs();
+	idr_destroy(&ipa3_ctx->ipa_idr);
+	kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
+	kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
+	kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
+	kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
+	kmem_cache_destroy(ipa3_ctx->hdr_cache);
+	kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
+	kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
+	destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
+	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
+	iounmap(ipa3_ctx->mmio);
+	ipa3_disable_clks();
+	msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
+	if (ipa3_bus_scale_table) {
+		msm_bus_cl_clear_pdata(ipa3_bus_scale_table);
+		ipa3_bus_scale_table = NULL;
+	}
+	kfree(ipa3_ctx->ctrl);
+	kfree(ipa3_ctx);
+	ipa3_ctx = NULL;
+	return result;
+}
+
+static int ipa3_trigger_fw_loading_mdms(void)
+{
+	int result;
+	const struct firmware *fw;
+
+	IPADBG("FW loading process initiated\n");
+
+	result = request_firmware(&fw, IPA_FWS_PATH, ipa3_ctx->dev);
+	if (result < 0) {
+		IPAERR("request_firmware failed, error %d\n", result);
+		return result;
+	}
+	if (fw == NULL) {
+		IPAERR("Firmware is NULL!\n");
+		return -EINVAL;
+	}
+
+	IPADBG("FWs are available for loading\n");
+
+	result = ipa3_load_fws(fw);
+	if (result) {
+		IPAERR("IPA FWs loading has failed\n");
+		release_firmware(fw);
+		return result;
+	}
+
+	result = gsi_enable_fw(ipa3_res.transport_mem_base,
+				ipa3_res.transport_mem_size);
+	if (result) {
+		IPAERR("Failed to enable GSI FW\n");
+		release_firmware(fw);
+		return result;
+	}
+
+	release_firmware(fw);
+
+	IPADBG("FW loading process is complete\n");
+	return 0;
+}
+
+static int ipa3_trigger_fw_loading_msms(void)
+{
+	void *subsystem_get_retval = NULL;
+
+	IPADBG("FW loading process initiated\n");
+
+	subsystem_get_retval = subsystem_get(IPA_SUBSYSTEM_NAME);
+	if (IS_ERR_OR_NULL(subsystem_get_retval)) {
+		IPAERR("Unable to trigger PIL process for FW loading\n");
+		return -EINVAL;
+	}
+
+	IPADBG("FW loading process is complete\n");
+	return 0;
+}
+
+static ssize_t ipa3_write(struct file *file, const char __user *buf,
+			  size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	int result = -EINVAL;
+
+	char dbg_buff[16] = { 0 };
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+
+	if (missing) {
+		IPAERR("Unable to copy data from user\n");
+		return -EFAULT;
+	}
+
+	/* Prevent consequent calls from trying to load the FW again. */
+	if (ipa3_is_ready())
+		return count;
+
+	/*
+	 * We will trigger the process only if we're in GSI mode, otherwise,
+	 * we just ignore the write.
+	 */
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+		if (ipa3_is_msm_device())
+			result = ipa3_trigger_fw_loading_msms();
+		else
+			result = ipa3_trigger_fw_loading_mdms();
+		/* No IPAv3.x chipsets that don't support FW loading */
+
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+		if (result) {
+			IPAERR("FW loading process has failed\n");
+			BUG();
+		} else
+			ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
+	}
+	return count;
+}
+
+static int ipa3_tz_unlock_reg(struct ipa3_context *ipa3_ctx)
+{
+	int i, size, ret, resp;
+	struct tz_smmu_ipa_protect_region_iovec_s *ipa_tz_unlock_vec;
+	struct tz_smmu_ipa_protect_region_s cmd_buf;
+
+	if (ipa3_ctx && ipa3_ctx->ipa_tz_unlock_reg_num > 0) {
+		size = ipa3_ctx->ipa_tz_unlock_reg_num *
+			sizeof(struct tz_smmu_ipa_protect_region_iovec_s);
+		ipa_tz_unlock_vec = kzalloc(PAGE_ALIGN(size), GFP_KERNEL);
+		if (ipa_tz_unlock_vec == NULL)
+			return -ENOMEM;
+
+		for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
+			ipa_tz_unlock_vec[i].input_addr =
+				ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
+				(ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
+				0xFFF);
+			ipa_tz_unlock_vec[i].output_addr =
+				ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr ^
+				(ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr &
+				0xFFF);
+			ipa_tz_unlock_vec[i].size =
+				ipa3_ctx->ipa_tz_unlock_reg[i].size;
+			ipa_tz_unlock_vec[i].attr = IPA_TZ_UNLOCK_ATTRIBUTE;
+		}
+
+		/* pass physical address of command buffer */
+		cmd_buf.iovec_buf = virt_to_phys((void *)ipa_tz_unlock_vec);
+		cmd_buf.size_bytes = size;
+
+		/* flush cache to DDR */
+		__cpuc_flush_dcache_area((void *)ipa_tz_unlock_vec, size);
+		outer_flush_range(cmd_buf.iovec_buf, cmd_buf.iovec_buf + size);
+
+		ret = scm_call(SCM_SVC_MP, TZ_MEM_PROTECT_REGION_ID, &cmd_buf,
+				sizeof(cmd_buf), &resp, sizeof(resp));
+		if (ret) {
+			IPAERR("scm call SCM_SVC_MP failed: %d\n", ret);
+			kfree(ipa_tz_unlock_vec);
+			return -EFAULT;
+		}
+		kfree(ipa_tz_unlock_vec);
+	}
+	return 0;
+}
+
+/**
+* ipa3_pre_init() - Initialize the IPA Driver.
+* This part contains all initialization which doesn't require IPA HW, such
+* as structure allocations and initializations, register writes, etc.
+*
+* @resource_p:	contain platform specific values from DST file
+* @pdev:	The platform device structure representing the IPA driver
+*
+* Function initialization process:
+* - Allocate memory for the driver context data struct
+* - Initializing the ipa3_ctx with:
+*    1)parsed values from the dts file
+*    2)parameters passed to the module initialization
+*    3)read HW values(such as core memory size)
+* - Map IPA core registers to CPU memory
+* - Restart IPA core(HW reset)
+* - Set configuration for IPA BAM via BAM_CNFG_BITS
+* - Initialize the look-aside caches(kmem_cache/slab) for filter,
+*   routing and IPA-tree
+* - Create memory pool with 4 objects for DMA operations(each object
+*   is 512Bytes long), this object will be use for tx(A5->IPA)
+* - Initialize lists head(routing,filter,hdr,system pipes)
+* - Initialize mutexes (for ipa_ctx and NAT memory mutexes)
+* - Initialize spinlocks (for list related to A5<->IPA pipes)
+* - Initialize 2 single-threaded work-queue named "ipa rx wq" and "ipa tx wq"
+* - Initialize Red-Black-Tree(s) for handles of header,routing rule,
+*   routing table ,filtering rule
+* - Initialize the filter block by committing IPV4 and IPV6 default rules
+* - Create empty routing table in system memory(no committing)
+* - Initialize pipes memory pool with ipa3_pipe_mem_init for supported platforms
+* - Create a char-device for IPA
+* - Initialize IPA RM (resource manager)
+* - Configure GSI registers (in GSI case)
+*/
+static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
+		struct device *ipa_dev)
+{
+	int result = 0;
+	int i;
+	struct ipa3_flt_tbl *flt_tbl;
+	struct ipa3_rt_tbl_set *rset;
+	struct ipa_active_client_logging_info log_info;
+
+	IPADBG("IPA Driver initialization started\n");
+
+	ipa3_ctx = kzalloc(sizeof(*ipa3_ctx), GFP_KERNEL);
+	if (!ipa3_ctx) {
+		IPAERR(":kzalloc err.\n");
+		result = -ENOMEM;
+		goto fail_mem_ctx;
+	}
+
+	ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
+	if (ipa3_ctx->logbuf == NULL) {
+		IPAERR("failed to get logbuf\n");
+		result = -ENOMEM;
+		goto fail_logbuf;
+	}
+
+	ipa3_ctx->pdev = ipa_dev;
+	ipa3_ctx->uc_pdev = ipa_dev;
+	ipa3_ctx->smmu_present = smmu_info.present;
+	if (!ipa3_ctx->smmu_present)
+		ipa3_ctx->smmu_s1_bypass = true;
+	else
+		ipa3_ctx->smmu_s1_bypass = smmu_info.s1_bypass;
+	ipa3_ctx->ipa_wrapper_base = resource_p->ipa_mem_base;
+	ipa3_ctx->ipa_wrapper_size = resource_p->ipa_mem_size;
+	ipa3_ctx->ipa_hw_type = resource_p->ipa_hw_type;
+	ipa3_ctx->ipa3_hw_mode = resource_p->ipa3_hw_mode;
+	ipa3_ctx->use_ipa_teth_bridge = resource_p->use_ipa_teth_bridge;
+	ipa3_ctx->ipa_bam_remote_mode = resource_p->ipa_bam_remote_mode;
+	ipa3_ctx->modem_cfg_emb_pipe_flt = resource_p->modem_cfg_emb_pipe_flt;
+	ipa3_ctx->ipa_wdi2 = resource_p->ipa_wdi2;
+	ipa3_ctx->use_64_bit_dma_mask = resource_p->use_64_bit_dma_mask;
+	ipa3_ctx->wan_rx_ring_size = resource_p->wan_rx_ring_size;
+	ipa3_ctx->lan_rx_ring_size = resource_p->lan_rx_ring_size;
+	ipa3_ctx->skip_uc_pipe_reset = resource_p->skip_uc_pipe_reset;
+	ipa3_ctx->tethered_flow_control = resource_p->tethered_flow_control;
+	ipa3_ctx->transport_prototype = resource_p->transport_prototype;
+	ipa3_ctx->ee = resource_p->ee;
+	ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
+	ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
+	ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
+	if (resource_p->ipa_tz_unlock_reg) {
+		ipa3_ctx->ipa_tz_unlock_reg_num =
+			resource_p->ipa_tz_unlock_reg_num;
+		ipa3_ctx->ipa_tz_unlock_reg = kcalloc(
+			ipa3_ctx->ipa_tz_unlock_reg_num,
+			sizeof(*ipa3_ctx->ipa_tz_unlock_reg),
+			GFP_KERNEL);
+		if (ipa3_ctx->ipa_tz_unlock_reg == NULL) {
+			result = -ENOMEM;
+			goto fail_tz_unlock_reg;
+		}
+		for (i = 0; i < ipa3_ctx->ipa_tz_unlock_reg_num; i++) {
+			ipa3_ctx->ipa_tz_unlock_reg[i].reg_addr =
+				resource_p->ipa_tz_unlock_reg[i].reg_addr;
+			ipa3_ctx->ipa_tz_unlock_reg[i].size =
+				resource_p->ipa_tz_unlock_reg[i].size;
+		}
+	}
+
+	/* unlock registers for uc */
+	ipa3_tz_unlock_reg(ipa3_ctx);
+
+	/* default aggregation parameters */
+	ipa3_ctx->aggregation_type = IPA_MBIM_16;
+	ipa3_ctx->aggregation_byte_limit = 1;
+	ipa3_ctx->aggregation_time_limit = 0;
+
+	ipa3_ctx->ctrl = kzalloc(sizeof(*ipa3_ctx->ctrl), GFP_KERNEL);
+	if (!ipa3_ctx->ctrl) {
+		IPAERR("memory allocation error for ctrl\n");
+		result = -ENOMEM;
+		goto fail_mem_ctrl;
+	}
+	result = ipa3_controller_static_bind(ipa3_ctx->ctrl,
+			ipa3_ctx->ipa_hw_type);
+	if (result) {
+		IPAERR("fail to static bind IPA ctrl.\n");
+		result = -EFAULT;
+		goto fail_bind;
+	}
+
+	result = ipa3_init_mem_partition(master_dev->of_node);
+	if (result) {
+		IPAERR(":ipa3_init_mem_partition failed!\n");
+		result = -ENODEV;
+		goto fail_init_mem_partition;
+	}
+
+	if (ipa3_bus_scale_table) {
+		IPADBG("Use bus scaling info from device tree\n");
+		ipa3_ctx->ctrl->msm_bus_data_ptr = ipa3_bus_scale_table;
+	}
+
+	if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_VIRTUAL) {
+		/* get BUS handle */
+		ipa3_ctx->ipa_bus_hdl =
+			msm_bus_scale_register_client(
+				ipa3_ctx->ctrl->msm_bus_data_ptr);
+		if (!ipa3_ctx->ipa_bus_hdl) {
+			IPAERR("fail to register with bus mgr!\n");
+			result = -ENODEV;
+			goto fail_bus_reg;
+		}
+	} else {
+		IPADBG("Skipping bus scaling registration on Virtual plat\n");
+	}
+
+	/* get IPA clocks */
+	result = ipa3_get_clks(master_dev);
+	if (result)
+		goto fail_clk;
+
+	/* init active_clients_log after getting ipa-clk */
+	if (ipa3_active_clients_log_init())
+		goto fail_init_active_client;
+
+	/* Enable ipa3_ctx->enable_clock_scaling */
+	ipa3_ctx->enable_clock_scaling = 1;
+	ipa3_ctx->curr_ipa_clk_rate = ipa3_ctx->ctrl->ipa_clk_rate_turbo;
+
+	/* enable IPA clocks explicitly to allow the initialization */
+	ipa3_enable_clks();
+
+	/* setup IPA register access */
+	IPADBG("Mapping 0x%x\n", resource_p->ipa_mem_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst);
+	ipa3_ctx->mmio = ioremap(resource_p->ipa_mem_base +
+			ipa3_ctx->ctrl->ipa_reg_base_ofst,
+			resource_p->ipa_mem_size);
+	if (!ipa3_ctx->mmio) {
+		IPAERR(":ipa-base ioremap err.\n");
+		result = -EFAULT;
+		goto fail_remap;
+	}
+
+	if (ipahal_init(ipa3_ctx->ipa_hw_type, ipa3_ctx->mmio,
+		ipa3_ctx->pdev)) {
+		IPAERR("fail to init ipahal\n");
+		result = -EFAULT;
+		goto fail_ipahal;
+	}
+
+	result = ipa3_init_hw();
+	if (result) {
+		IPAERR(":error initializing HW.\n");
+		result = -ENODEV;
+		goto fail_init_hw;
+	}
+	IPADBG("IPA HW initialization sequence completed");
+
+	ipa3_ctx->ipa_num_pipes = ipa3_get_num_pipes();
+	if (ipa3_ctx->ipa_num_pipes > IPA3_MAX_NUM_PIPES) {
+		IPAERR("IPA has more pipes then supported! has %d, max %d\n",
+			ipa3_ctx->ipa_num_pipes, IPA3_MAX_NUM_PIPES);
+		result = -ENODEV;
+		goto fail_init_hw;
+	}
+
+	ipa_init_ep_flt_bitmap();
+	IPADBG("EP with flt support bitmap 0x%x (%u pipes)\n",
+		ipa3_ctx->ep_flt_bitmap, ipa3_ctx->ep_flt_num);
+
+	ipa3_ctx->ctrl->ipa_sram_read_settings();
+	IPADBG("SRAM, size: 0x%x, restricted bytes: 0x%x\n",
+		ipa3_ctx->smem_sz, ipa3_ctx->smem_restricted_bytes);
+
+	IPADBG("hdr_lcl=%u ip4_rt_hash=%u ip4_rt_nonhash=%u\n",
+		ipa3_ctx->hdr_tbl_lcl, ipa3_ctx->ip4_rt_tbl_hash_lcl,
+		ipa3_ctx->ip4_rt_tbl_nhash_lcl);
+
+	IPADBG("ip6_rt_hash=%u ip6_rt_nonhash=%u\n",
+		ipa3_ctx->ip6_rt_tbl_hash_lcl, ipa3_ctx->ip6_rt_tbl_nhash_lcl);
+
+	IPADBG("ip4_flt_hash=%u ip4_flt_nonhash=%u\n",
+		ipa3_ctx->ip4_flt_tbl_hash_lcl,
+		ipa3_ctx->ip4_flt_tbl_nhash_lcl);
+
+	IPADBG("ip6_flt_hash=%u ip6_flt_nonhash=%u\n",
+		ipa3_ctx->ip6_flt_tbl_hash_lcl,
+		ipa3_ctx->ip6_flt_tbl_nhash_lcl);
+
+	if (ipa3_ctx->smem_reqd_sz > ipa3_ctx->smem_sz) {
+		IPAERR("SW expect more core memory, needed %d, avail %d\n",
+			ipa3_ctx->smem_reqd_sz, ipa3_ctx->smem_sz);
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+
+	mutex_init(&ipa3_ctx->ipa3_active_clients.mutex);
+	spin_lock_init(&ipa3_ctx->ipa3_active_clients.spinlock);
+	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "PROXY_CLK_VOTE");
+	ipa3_active_clients_log_inc(&log_info, false);
+	ipa3_ctx->ipa3_active_clients.cnt = 1;
+
+	/* Assign resource limitation to each group */
+	ipa3_set_resorce_groups_min_max_limits();
+
+	/* Create workqueues for power management */
+	ipa3_ctx->power_mgmt_wq =
+		create_singlethread_workqueue("ipa_power_mgmt");
+	if (!ipa3_ctx->power_mgmt_wq) {
+		IPAERR("failed to create power mgmt wq\n");
+		result = -ENOMEM;
+		goto fail_init_hw;
+	}
+
+	ipa3_ctx->transport_power_mgmt_wq =
+		create_singlethread_workqueue("transport_power_mgmt");
+	if (!ipa3_ctx->transport_power_mgmt_wq) {
+		IPAERR("failed to create transport power mgmt wq\n");
+		result = -ENOMEM;
+		goto fail_create_transport_wq;
+	}
+
+	spin_lock_init(&ipa3_ctx->transport_pm.lock);
+	ipa3_ctx->transport_pm.res_granted = false;
+	ipa3_ctx->transport_pm.res_rel_in_prog = false;
+
+	/* init the lookaside cache */
+	ipa3_ctx->flt_rule_cache = kmem_cache_create("IPA_FLT",
+			sizeof(struct ipa3_flt_entry), 0, 0, NULL);
+	if (!ipa3_ctx->flt_rule_cache) {
+		IPAERR(":ipa flt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_flt_rule_cache;
+	}
+	ipa3_ctx->rt_rule_cache = kmem_cache_create("IPA_RT",
+			sizeof(struct ipa3_rt_entry), 0, 0, NULL);
+	if (!ipa3_ctx->rt_rule_cache) {
+		IPAERR(":ipa rt cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_rule_cache;
+	}
+	ipa3_ctx->hdr_cache = kmem_cache_create("IPA_HDR",
+			sizeof(struct ipa3_hdr_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_cache) {
+		IPAERR(":ipa hdr cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_cache;
+	}
+	ipa3_ctx->hdr_offset_cache =
+	   kmem_cache_create("IPA_HDR_OFFSET",
+			   sizeof(struct ipa_hdr_offset_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_offset_cache) {
+		IPAERR(":ipa hdr off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_offset_cache;
+	}
+	ipa3_ctx->hdr_proc_ctx_cache = kmem_cache_create("IPA_HDR_PROC_CTX",
+		sizeof(struct ipa3_hdr_proc_ctx_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_proc_ctx_cache) {
+		IPAERR(":ipa hdr proc ctx cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_proc_ctx_cache;
+	}
+	ipa3_ctx->hdr_proc_ctx_offset_cache =
+		kmem_cache_create("IPA_HDR_PROC_CTX_OFFSET",
+		sizeof(struct ipa3_hdr_proc_ctx_offset_entry), 0, 0, NULL);
+	if (!ipa3_ctx->hdr_proc_ctx_offset_cache) {
+		IPAERR(":ipa hdr proc ctx off cache create failed\n");
+		result = -ENOMEM;
+		goto fail_hdr_proc_ctx_offset_cache;
+	}
+	ipa3_ctx->rt_tbl_cache = kmem_cache_create("IPA_RT_TBL",
+			sizeof(struct ipa3_rt_tbl), 0, 0, NULL);
+	if (!ipa3_ctx->rt_tbl_cache) {
+		IPAERR(":ipa rt tbl cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rt_tbl_cache;
+	}
+	ipa3_ctx->tx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA_TX_PKT_WRAPPER",
+			   sizeof(struct ipa3_tx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa3_ctx->tx_pkt_wrapper_cache) {
+		IPAERR(":ipa tx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_tx_pkt_wrapper_cache;
+	}
+	ipa3_ctx->rx_pkt_wrapper_cache =
+	   kmem_cache_create("IPA_RX_PKT_WRAPPER",
+			   sizeof(struct ipa3_rx_pkt_wrapper), 0, 0, NULL);
+	if (!ipa3_ctx->rx_pkt_wrapper_cache) {
+		IPAERR(":ipa rx pkt wrapper cache create failed\n");
+		result = -ENOMEM;
+		goto fail_rx_pkt_wrapper_cache;
+	}
+
+	/* Setup DMA pool */
+	ipa3_ctx->dma_pool = dma_pool_create("ipa_tx", ipa3_ctx->pdev,
+		IPA_NUM_DESC_PER_SW_TX * sizeof(struct sps_iovec),
+		0, 0);
+	if (!ipa3_ctx->dma_pool) {
+		IPAERR("cannot alloc DMA pool.\n");
+		result = -ENOMEM;
+		goto fail_dma_pool;
+	}
+
+	/* init the various list heads */
+	INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_hdr_entry_list);
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(&ipa3_ctx->hdr_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list);
+	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+		INIT_LIST_HEAD(&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i]);
+		INIT_LIST_HEAD(&ipa3_ctx->
+				hdr_proc_ctx_tbl.head_free_offset_list[i]);
+	}
+	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v4].head_rt_tbl_list);
+	INIT_LIST_HEAD(&ipa3_ctx->rt_tbl_set[IPA_IP_v6].head_rt_tbl_list);
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v4];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+		idr_init(&flt_tbl->rule_ids);
+
+		flt_tbl = &ipa3_ctx->flt_tbl[i][IPA_IP_v6];
+		INIT_LIST_HEAD(&flt_tbl->head_flt_rule_list);
+		flt_tbl->in_sys[IPA_RULE_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_hash_lcl;
+		flt_tbl->in_sys[IPA_RULE_NON_HASHABLE] =
+			!ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+		idr_init(&flt_tbl->rule_ids);
+	}
+
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v4];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+	rset = &ipa3_ctx->reap_rt_tbl_set[IPA_IP_v6];
+	INIT_LIST_HEAD(&rset->head_rt_tbl_list);
+
+	INIT_LIST_HEAD(&ipa3_ctx->intf_list);
+	INIT_LIST_HEAD(&ipa3_ctx->msg_list);
+	INIT_LIST_HEAD(&ipa3_ctx->pull_msg_list);
+	init_waitqueue_head(&ipa3_ctx->msg_waitq);
+	mutex_init(&ipa3_ctx->msg_lock);
+
+	mutex_init(&ipa3_ctx->lock);
+	mutex_init(&ipa3_ctx->nat_mem.lock);
+
+	idr_init(&ipa3_ctx->ipa_idr);
+	spin_lock_init(&ipa3_ctx->idr_lock);
+
+	/* wlan related member */
+	memset(&ipa3_ctx->wc_memb, 0, sizeof(ipa3_ctx->wc_memb));
+	spin_lock_init(&ipa3_ctx->wc_memb.wlan_spinlock);
+	spin_lock_init(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+	INIT_LIST_HEAD(&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+
+	/* setup the IPA pipe mem pool */
+	if (resource_p->ipa_pipe_mem_size)
+		ipa3_pipe_mem_init(resource_p->ipa_pipe_mem_start_ofst,
+				resource_p->ipa_pipe_mem_size);
+
+	ipa3_ctx->class = class_create(THIS_MODULE, DRV_NAME);
+
+	result = alloc_chrdev_region(&ipa3_ctx->dev_num, 0, 1, DRV_NAME);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	ipa3_ctx->dev = device_create(ipa3_ctx->class, NULL, ipa3_ctx->dev_num,
+			ipa3_ctx, DRV_NAME);
+	if (IS_ERR(ipa3_ctx->dev)) {
+		IPAERR(":device_create err.\n");
+		result = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&ipa3_ctx->cdev, &ipa3_drv_fops);
+	ipa3_ctx->cdev.owner = THIS_MODULE;
+	ipa3_ctx->cdev.ops = &ipa3_drv_fops;  /* from LDD3 */
+
+	result = cdev_add(&ipa3_ctx->cdev, ipa3_ctx->dev_num, 1);
+	if (result) {
+		IPAERR(":cdev_add err=%d\n", -result);
+		result = -ENODEV;
+		goto fail_cdev_add;
+	}
+	IPADBG("ipa cdev added successful. major:%d minor:%d\n",
+			MAJOR(ipa3_ctx->dev_num),
+			MINOR(ipa3_ctx->dev_num));
+
+	if (ipa3_create_nat_device()) {
+		IPAERR("unable to create nat device\n");
+		result = -ENODEV;
+		goto fail_nat_dev_add;
+	}
+
+	/* Create a wakeup source. */
+	wakeup_source_init(&ipa3_ctx->w_lock, "IPA_WS");
+	spin_lock_init(&ipa3_ctx->wakelock_ref_cnt.spinlock);
+
+	/* Initialize IPA RM (resource manager) */
+	result = ipa_rm_initialize();
+	if (result) {
+		IPAERR("RM initialization failed (%d)\n", -result);
+		result = -ENODEV;
+		goto fail_ipa_rm_init;
+	}
+	IPADBG("IPA resource manager initialized");
+
+	result = ipa3_create_apps_resource();
+	if (result) {
+		IPAERR("Failed to create APPS_CONS resource\n");
+		result = -ENODEV;
+		goto fail_create_apps_resource;
+	}
+
+	if (!ipa3_ctx->apply_rg10_wa) {
+		result = ipa3_init_interrupts();
+		if (result) {
+			IPAERR("ipa initialization of interrupts failed\n");
+			result = -ENODEV;
+			goto fail_ipa_init_interrupts;
+		}
+	} else {
+		IPADBG("Initialization of ipa interrupts skipped\n");
+	}
+
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_5)
+		ipa3_enable_dcd();
+
+	INIT_LIST_HEAD(&ipa3_ctx->ipa_ready_cb_list);
+
+	init_completion(&ipa3_ctx->init_completion_obj);
+	init_completion(&ipa3_ctx->uc_loaded_completion_obj);
+
+	/*
+	 * For GSI, we can't register the GSI driver yet, as it expects
+	 * the GSI FW to be up and running before the registration.
+	 */
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		/*
+		 * For IPA3.0, the GSI configuration is done by the GSI driver.
+		 * For IPA3.1 (and on), the GSI configuration is done by TZ.
+		 */
+		if (ipa3_ctx->ipa_hw_type == IPA_HW_v3_0) {
+			result = ipa3_gsi_pre_fw_load_init();
+			if (result) {
+				IPAERR("gsi pre FW loading config failed\n");
+				result = -ENODEV;
+				goto fail_ipa_init_interrupts;
+			}
+		}
+	}
+	/* For BAM (No other mode), we can just carry on with initialization */
+	else
+		return ipa3_post_init(resource_p, ipa_dev);
+
+	return 0;
+
+fail_ipa_init_interrupts:
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_APPS_CONS);
+fail_create_apps_resource:
+	ipa_rm_exit();
+fail_ipa_rm_init:
+fail_nat_dev_add:
+	cdev_del(&ipa3_ctx->cdev);
+fail_cdev_add:
+	device_destroy(ipa3_ctx->class, ipa3_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(ipa3_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	if (ipa3_ctx->pipe_mem_pool)
+		gen_pool_destroy(ipa3_ctx->pipe_mem_pool);
+	ipa3_destroy_flt_tbl_idrs();
+	idr_destroy(&ipa3_ctx->ipa_idr);
+fail_dma_pool:
+	kmem_cache_destroy(ipa3_ctx->rx_pkt_wrapper_cache);
+fail_rx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa3_ctx->tx_pkt_wrapper_cache);
+fail_tx_pkt_wrapper_cache:
+	kmem_cache_destroy(ipa3_ctx->rt_tbl_cache);
+fail_rt_tbl_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_offset_cache);
+fail_hdr_proc_ctx_offset_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_proc_ctx_cache);
+fail_hdr_proc_ctx_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_offset_cache);
+fail_hdr_offset_cache:
+	kmem_cache_destroy(ipa3_ctx->hdr_cache);
+fail_hdr_cache:
+	kmem_cache_destroy(ipa3_ctx->rt_rule_cache);
+fail_rt_rule_cache:
+	kmem_cache_destroy(ipa3_ctx->flt_rule_cache);
+fail_flt_rule_cache:
+	destroy_workqueue(ipa3_ctx->transport_power_mgmt_wq);
+fail_create_transport_wq:
+	destroy_workqueue(ipa3_ctx->power_mgmt_wq);
+fail_init_hw:
+	ipahal_destroy();
+fail_ipahal:
+	iounmap(ipa3_ctx->mmio);
+fail_remap:
+	ipa3_disable_clks();
+	ipa3_active_clients_log_destroy();
+fail_init_active_client:
+fail_clk:
+	msm_bus_scale_unregister_client(ipa3_ctx->ipa_bus_hdl);
+fail_bus_reg:
+fail_init_mem_partition:
+fail_bind:
+	kfree(ipa3_ctx->ctrl);
+fail_mem_ctrl:
+	kfree(ipa3_ctx->ipa_tz_unlock_reg);
+fail_tz_unlock_reg:
+	ipc_log_context_destroy(ipa3_ctx->logbuf);
+fail_logbuf:
+	kfree(ipa3_ctx);
+	ipa3_ctx = NULL;
+fail_mem_ctx:
+	return result;
+}
+
+static int get_ipa_dts_configuration(struct platform_device *pdev,
+		struct ipa3_plat_drv_res *ipa_drv_res)
+{
+	int i, result, pos;
+	struct resource *resource;
+	u32 *ipa_tz_unlock_reg;
+	int elem_num;
+
+	/* initialize ipa3_res */
+	ipa_drv_res->ipa_pipe_mem_start_ofst = IPA_PIPE_MEM_START_OFST;
+	ipa_drv_res->ipa_pipe_mem_size = IPA_PIPE_MEM_SIZE;
+	ipa_drv_res->ipa_hw_type = 0;
+	ipa_drv_res->ipa3_hw_mode = 0;
+	ipa_drv_res->ipa_bam_remote_mode = false;
+	ipa_drv_res->modem_cfg_emb_pipe_flt = false;
+	ipa_drv_res->ipa_wdi2 = false;
+	ipa_drv_res->use_64_bit_dma_mask = false;
+	ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+	ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
+	ipa_drv_res->apply_rg10_wa = false;
+	ipa_drv_res->gsi_ch20_wa = false;
+	ipa_drv_res->ipa_tz_unlock_reg_num = 0;
+	ipa_drv_res->ipa_tz_unlock_reg = NULL;
+
+	/* Get IPA HW Version */
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
+					&ipa_drv_res->ipa_hw_type);
+	if ((result) || (ipa_drv_res->ipa_hw_type == 0)) {
+		IPAERR(":get resource failed for ipa-hw-ver!\n");
+		return -ENODEV;
+	}
+	IPADBG(": ipa_hw_type = %d", ipa_drv_res->ipa_hw_type);
+
+	if (ipa_drv_res->ipa_hw_type < IPA_HW_v3_0) {
+		IPAERR(":IPA version below 3.0 not supported!\n");
+		return -ENODEV;
+	}
+
+	/* Get IPA HW mode */
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-mode",
+			&ipa_drv_res->ipa3_hw_mode);
+	if (result)
+		IPADBG("using default (IPA_MODE_NORMAL) for ipa-hw-mode\n");
+	else
+		IPADBG(": found ipa_drv_res->ipa3_hw_mode = %d",
+				ipa_drv_res->ipa3_hw_mode);
+
+	/* Get IPA WAN / LAN RX pool size */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-ring-size",
+			&ipa_drv_res->wan_rx_ring_size);
+	if (result)
+		IPADBG("using default for wan-rx-ring-size = %u\n",
+				ipa_drv_res->wan_rx_ring_size);
+	else
+		IPADBG(": found ipa_drv_res->wan-rx-ring-size = %u",
+				ipa_drv_res->wan_rx_ring_size);
+
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,lan-rx-ring-size",
+			&ipa_drv_res->lan_rx_ring_size);
+	if (result)
+		IPADBG("using default for lan-rx-ring-size = %u\n",
+			ipa_drv_res->lan_rx_ring_size);
+	else
+		IPADBG(": found ipa_drv_res->lan-rx-ring-size = %u",
+			ipa_drv_res->lan_rx_ring_size);
+
+	ipa_drv_res->use_ipa_teth_bridge =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-ipa-tethering-bridge");
+	IPADBG(": using TBDr = %s",
+		ipa_drv_res->use_ipa_teth_bridge
+		? "True" : "False");
+
+	ipa_drv_res->ipa_bam_remote_mode =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-bam-remote-mode");
+	IPADBG(": ipa bam remote mode = %s\n",
+			ipa_drv_res->ipa_bam_remote_mode
+			? "True" : "False");
+
+	ipa_drv_res->modem_cfg_emb_pipe_flt =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,modem-cfg-emb-pipe-flt");
+	IPADBG(": modem configure embedded pipe filtering = %s\n",
+			ipa_drv_res->modem_cfg_emb_pipe_flt
+			? "True" : "False");
+
+	ipa_drv_res->ipa_wdi2 =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-wdi2");
+	IPADBG(": WDI-2.0 = %s\n",
+			ipa_drv_res->ipa_wdi2
+			? "True" : "False");
+
+	ipa_drv_res->use_64_bit_dma_mask =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,use-64-bit-dma-mask");
+	IPADBG(": use_64_bit_dma_mask = %s\n",
+			ipa_drv_res->use_64_bit_dma_mask
+			? "True" : "False");
+
+	ipa_drv_res->skip_uc_pipe_reset =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,skip-uc-pipe-reset");
+	IPADBG(": skip uC pipe reset = %s\n",
+		ipa_drv_res->skip_uc_pipe_reset
+		? "True" : "False");
+
+	ipa_drv_res->tethered_flow_control =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,tethered-flow-control");
+	IPADBG(": Use apps based flow control = %s\n",
+		ipa_drv_res->tethered_flow_control
+		? "True" : "False");
+
+	if (of_property_read_bool(pdev->dev.of_node,
+		"qcom,use-gsi"))
+		ipa_drv_res->transport_prototype = IPA_TRANSPORT_TYPE_GSI;
+	else
+		ipa_drv_res->transport_prototype = IPA_TRANSPORT_TYPE_SPS;
+
+	IPADBG(": transport type = %s\n",
+		ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS
+		? "SPS" : "GSI");
+
+	/* Get IPA wrapper address */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"ipa-base");
+	if (!resource) {
+		IPAERR(":get resource failed for ipa-base!\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->ipa_mem_base = resource->start;
+	ipa_drv_res->ipa_mem_size = resource_size(resource);
+	IPADBG(": ipa-base = 0x%x, size = 0x%x\n",
+			ipa_drv_res->ipa_mem_base,
+			ipa_drv_res->ipa_mem_size);
+
+	smmu_info.ipa_base = ipa_drv_res->ipa_mem_base;
+	smmu_info.ipa_size = ipa_drv_res->ipa_mem_size;
+
+	if (ipa_drv_res->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
+		/* Get IPA BAM address */
+		resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+				"bam-base");
+		if (!resource) {
+			IPAERR(":get resource failed for bam-base!\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->transport_mem_base = resource->start;
+		ipa_drv_res->transport_mem_size = resource_size(resource);
+		IPADBG(": bam-base = 0x%x, size = 0x%x\n",
+				ipa_drv_res->transport_mem_base,
+				ipa_drv_res->transport_mem_size);
+
+		/* Get IPA BAM IRQ number */
+		resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+				"bam-irq");
+		if (!resource) {
+			IPAERR(":get resource failed for bam-irq!\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->transport_irq = resource->start;
+		IPADBG(": bam-irq = %d\n", ipa_drv_res->transport_irq);
+	} else {
+		/* Get IPA GSI address */
+		resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+				"gsi-base");
+		if (!resource) {
+			IPAERR(":get resource failed for gsi-base!\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->transport_mem_base = resource->start;
+		ipa_drv_res->transport_mem_size = resource_size(resource);
+		IPADBG(": gsi-base = 0x%x, size = 0x%x\n",
+				ipa_drv_res->transport_mem_base,
+				ipa_drv_res->transport_mem_size);
+
+		/* Get IPA GSI IRQ number */
+		resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+				"gsi-irq");
+		if (!resource) {
+			IPAERR(":get resource failed for gsi-irq!\n");
+			return -ENODEV;
+		}
+		ipa_drv_res->transport_irq = resource->start;
+		IPADBG(": gsi-irq = %d\n", ipa_drv_res->transport_irq);
+	}
+
+	/* Get IPA pipe mem start ofst */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"ipa-pipe-mem");
+	if (!resource) {
+		IPADBG(":not using pipe memory - resource nonexisting\n");
+	} else {
+		ipa_drv_res->ipa_pipe_mem_start_ofst = resource->start;
+		ipa_drv_res->ipa_pipe_mem_size = resource_size(resource);
+		IPADBG(":using pipe memory - at 0x%x of size 0x%x\n",
+				ipa_drv_res->ipa_pipe_mem_start_ofst,
+				ipa_drv_res->ipa_pipe_mem_size);
+	}
+
+	/* Get IPA IRQ number */
+	resource = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
+			"ipa-irq");
+	if (!resource) {
+		IPAERR(":get resource failed for ipa-irq!\n");
+		return -ENODEV;
+	}
+	ipa_drv_res->ipa_irq = resource->start;
+	IPADBG(":ipa-irq = %d\n", ipa_drv_res->ipa_irq);
+
+	result = of_property_read_u32(pdev->dev.of_node, "qcom,ee",
+			&ipa_drv_res->ee);
+	if (result)
+		ipa_drv_res->ee = 0;
+
+	ipa_drv_res->apply_rg10_wa =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,use-rg10-limitation-mitigation");
+	IPADBG(": Use Register Group 10 limitation mitigation = %s\n",
+		ipa_drv_res->apply_rg10_wa
+		? "True" : "False");
+
+	ipa_drv_res->gsi_ch20_wa =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,do-not-use-ch-gsi-20");
+	IPADBG(": GSI CH 20 WA is = %s\n",
+		ipa_drv_res->apply_rg10_wa
+		? "Needed" : "Not needed");
+
+	elem_num = of_property_count_elems_of_size(pdev->dev.of_node,
+		"qcom,ipa-tz-unlock-reg", sizeof(u32));
+
+	if (elem_num > 0 && elem_num % 2 == 0) {
+		ipa_drv_res->ipa_tz_unlock_reg_num = elem_num / 2;
+
+		ipa_tz_unlock_reg = kcalloc(elem_num, sizeof(u32), GFP_KERNEL);
+		if (ipa_tz_unlock_reg == NULL)
+			return -ENOMEM;
+
+		ipa_drv_res->ipa_tz_unlock_reg = kcalloc(
+			ipa_drv_res->ipa_tz_unlock_reg_num,
+			sizeof(*ipa_drv_res->ipa_tz_unlock_reg),
+			GFP_KERNEL);
+		if (ipa_drv_res->ipa_tz_unlock_reg == NULL) {
+			kfree(ipa_tz_unlock_reg);
+			return -ENOMEM;
+		}
+
+		if (of_property_read_u32_array(pdev->dev.of_node,
+			"qcom,ipa-tz-unlock-reg", ipa_tz_unlock_reg,
+			elem_num)) {
+			IPAERR("failed to read register addresses\n");
+			kfree(ipa_tz_unlock_reg);
+			kfree(ipa_drv_res->ipa_tz_unlock_reg);
+			return -EFAULT;
+		}
+
+		pos = 0;
+		for (i = 0; i < ipa_drv_res->ipa_tz_unlock_reg_num; i++) {
+			ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr =
+				ipa_tz_unlock_reg[pos++];
+			ipa_drv_res->ipa_tz_unlock_reg[i].size =
+				ipa_tz_unlock_reg[pos++];
+			IPADBG("tz unlock reg %d: addr 0x%pa size %d\n", i,
+				&ipa_drv_res->ipa_tz_unlock_reg[i].reg_addr,
+				ipa_drv_res->ipa_tz_unlock_reg[i].size);
+		}
+		kfree(ipa_tz_unlock_reg);
+	}
+	return 0;
+}
+
+static int ipa_smmu_wlan_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
+	int atomic_ctx = 1;
+	int fast = 1;
+	int bypass = 1;
+	int ret;
+	u32 add_map_size;
+	const u32 *add_map;
+	int i;
+
+	IPADBG("sub pdev=%p\n", dev);
+
+	cb->dev = dev;
+	cb->iommu = iommu_domain_alloc(msm_iommu_get_bus(dev));
+	if (!cb->iommu) {
+		IPAERR("could not alloc iommu domain\n");
+		/* assume this failure is because iommu driver is not ready */
+		return -EPROBE_DEFER;
+	}
+	cb->valid = true;
+
+	if (smmu_info.s1_bypass) {
+		if (iommu_domain_set_attr(cb->iommu,
+					DOMAIN_ATTR_S1_BYPASS,
+					&bypass)) {
+			IPAERR("couldn't set bypass\n");
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU S1 BYPASS\n");
+	} else {
+		if (iommu_domain_set_attr(cb->iommu,
+					DOMAIN_ATTR_ATOMIC,
+					&atomic_ctx)) {
+			IPAERR("couldn't disable coherent HTW\n");
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU ATTR ATOMIC\n");
+
+		if (smmu_info.fast_map) {
+			if (iommu_domain_set_attr(cb->iommu,
+						DOMAIN_ATTR_FAST,
+						&fast)) {
+				IPAERR("couldn't set fast map\n");
+				cb->valid = false;
+				return -EIO;
+			}
+			IPADBG("SMMU fast map set\n");
+		}
+	}
+
+	ret = iommu_attach_device(cb->iommu, dev);
+	if (ret) {
+		IPAERR("could not attach device ret=%d\n", ret);
+		cb->valid = false;
+		return ret;
+	}
+	/* MAP ipa-uc ram */
+	add_map = of_get_property(dev->of_node,
+		"qcom,additional-mapping", &add_map_size);
+	if (add_map) {
+		/* mapping size is an array of 3-tuple of u32 */
+		if (add_map_size % (3 * sizeof(u32))) {
+			IPAERR("wrong additional mapping format\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+
+		/* iterate of each entry of the additional mapping array */
+		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+			u32 iova = be32_to_cpu(add_map[i]);
+			u32 pa = be32_to_cpu(add_map[i + 1]);
+			u32 size = be32_to_cpu(add_map[i + 2]);
+			unsigned long iova_p;
+			phys_addr_t pa_p;
+			u32 size_p;
+
+			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+				iova_p, pa_p, size_p);
+			IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			ipa3_iommu_map(cb->iommu,
+				iova_p, pa_p, size_p,
+				IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+		}
+	}
+	return 0;
+}
+
+static int ipa_smmu_uc_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+	int atomic_ctx = 1;
+	int bypass = 1;
+	int fast = 1;
+	int ret;
+	u32 iova_ap_mapping[2];
+
+	IPADBG("UC CB PROBE sub pdev=%p\n", dev);
+
+	ret = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+			iova_ap_mapping, 2);
+	if (ret) {
+		IPAERR("Fail to read UC start/size iova addresses\n");
+		return ret;
+	}
+	cb->va_start = iova_ap_mapping[0];
+	cb->va_size = iova_ap_mapping[1];
+	cb->va_end = cb->va_start + cb->va_size;
+	IPADBG("UC va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+	if (smmu_info.use_64_bit_dma_mask) {
+		if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
+				dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
+			IPAERR("DMA set 64bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	} else {
+		if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+				dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+			IPAERR("DMA set 32bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	}
+	IPADBG("UC CB PROBE=%p create IOMMU mapping\n", dev);
+
+	cb->dev = dev;
+	cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+			cb->va_start, cb->va_size);
+	if (IS_ERR_OR_NULL(cb->mapping)) {
+		IPADBG("Fail to create mapping\n");
+		/* assume this failure is because iommu driver is not ready */
+		return -EPROBE_DEFER;
+	}
+	IPADBG("SMMU mapping created\n");
+	cb->valid = true;
+
+	IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
+	if (smmu_info.s1_bypass) {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_S1_BYPASS,
+				&bypass)) {
+			IPAERR("couldn't set bypass\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU S1 BYPASS\n");
+	} else {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_ATOMIC,
+				&atomic_ctx)) {
+			IPAERR("couldn't set domain as atomic\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU atomic set\n");
+
+		if (smmu_info.fast_map) {
+			if (iommu_domain_set_attr(cb->mapping->domain,
+					DOMAIN_ATTR_FAST,
+					&fast)) {
+				IPAERR("couldn't set fast map\n");
+				arm_iommu_release_mapping(cb->mapping);
+				cb->valid = false;
+				return -EIO;
+			}
+			IPADBG("SMMU fast map set\n");
+		}
+	}
+
+	IPADBG("UC CB PROBE sub pdev=%p attaching IOMMU device\n", dev);
+	ret = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (ret) {
+		IPAERR("could not attach device ret=%d\n", ret);
+		arm_iommu_release_mapping(cb->mapping);
+		cb->valid = false;
+		return ret;
+	}
+
+	cb->next_addr = cb->va_end;
+	ipa3_ctx->uc_pdev = dev;
+
+	return 0;
+}
+
+static int ipa_smmu_ap_cb_probe(struct device *dev)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
+	int result;
+	int atomic_ctx = 1;
+	int fast = 1;
+	int bypass = 1;
+	u32 iova_ap_mapping[2];
+	u32 add_map_size;
+	const u32 *add_map;
+	void *smem_addr;
+	int i;
+
+	IPADBG("AP CB probe: sub pdev=%p\n", dev);
+
+	result = of_property_read_u32_array(dev->of_node, "qcom,iova-mapping",
+		iova_ap_mapping, 2);
+	if (result) {
+		IPAERR("Fail to read AP start/size iova addresses\n");
+		return result;
+	}
+	cb->va_start = iova_ap_mapping[0];
+	cb->va_size = iova_ap_mapping[1];
+	cb->va_end = cb->va_start + cb->va_size;
+	IPADBG("AP va_start=0x%x va_sise=0x%x\n", cb->va_start, cb->va_size);
+
+	if (smmu_info.use_64_bit_dma_mask) {
+		if (dma_set_mask(dev, DMA_BIT_MASK(64)) ||
+				dma_set_coherent_mask(dev, DMA_BIT_MASK(64))) {
+			IPAERR("DMA set 64bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	} else {
+		if (dma_set_mask(dev, DMA_BIT_MASK(32)) ||
+				dma_set_coherent_mask(dev, DMA_BIT_MASK(32))) {
+			IPAERR("DMA set 32bit mask failed\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
+	cb->dev = dev;
+	cb->mapping = arm_iommu_create_mapping(msm_iommu_get_bus(dev),
+					cb->va_start, cb->va_size);
+	if (IS_ERR_OR_NULL(cb->mapping)) {
+		IPADBG("Fail to create mapping\n");
+		/* assume this failure is because iommu driver is not ready */
+		return -EPROBE_DEFER;
+	}
+	IPADBG("SMMU mapping created\n");
+	cb->valid = true;
+
+	if (smmu_info.s1_bypass) {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_S1_BYPASS,
+				&bypass)) {
+			IPAERR("couldn't set bypass\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU S1 BYPASS\n");
+	} else {
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_ATOMIC,
+				&atomic_ctx)) {
+			IPAERR("couldn't set domain as atomic\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU atomic set\n");
+
+		if (iommu_domain_set_attr(cb->mapping->domain,
+				DOMAIN_ATTR_FAST,
+				&fast)) {
+			IPAERR("couldn't set fast map\n");
+			arm_iommu_release_mapping(cb->mapping);
+			cb->valid = false;
+			return -EIO;
+		}
+		IPADBG("SMMU fast map set\n");
+	}
+
+	result = arm_iommu_attach_device(cb->dev, cb->mapping);
+	if (result) {
+		IPAERR("couldn't attach to IOMMU ret=%d\n", result);
+		cb->valid = false;
+		return result;
+	}
+
+	add_map = of_get_property(dev->of_node,
+		"qcom,additional-mapping", &add_map_size);
+	if (add_map) {
+		/* mapping size is an array of 3-tuple of u32 */
+		if (add_map_size % (3 * sizeof(u32))) {
+			IPAERR("wrong additional mapping format\n");
+			cb->valid = false;
+			return -EFAULT;
+		}
+
+		/* iterate of each entry of the additional mapping array */
+		for (i = 0; i < add_map_size / sizeof(u32); i += 3) {
+			u32 iova = be32_to_cpu(add_map[i]);
+			u32 pa = be32_to_cpu(add_map[i + 1]);
+			u32 size = be32_to_cpu(add_map[i + 2]);
+			unsigned long iova_p;
+			phys_addr_t pa_p;
+			u32 size_p;
+
+			IPA_SMMU_ROUND_TO_PAGE(iova, pa, size,
+				iova_p, pa_p, size_p);
+			IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+				iova_p, &pa_p, size_p);
+			ipa3_iommu_map(cb->mapping->domain,
+				iova_p, pa_p, size_p,
+				IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+		}
+	}
+
+	/* map SMEM memory for IPA table accesses */
+	smem_addr = smem_alloc(SMEM_IPA_FILTER_TABLE, IPA_SMEM_SIZE,
+		SMEM_MODEM, 0);
+	if (smem_addr) {
+		phys_addr_t iova = smem_virt_to_phys(smem_addr);
+		phys_addr_t pa = iova;
+		unsigned long iova_p;
+		phys_addr_t pa_p;
+		u32 size_p;
+
+		IPA_SMMU_ROUND_TO_PAGE(iova, pa, IPA_SMEM_SIZE,
+			iova_p, pa_p, size_p);
+		IPADBG("mapping 0x%lx to 0x%pa size %d\n",
+			iova_p, &pa_p, size_p);
+		ipa3_iommu_map(cb->mapping->domain,
+			iova_p, pa_p, size_p,
+			IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+	}
+
+
+	smmu_info.present = true;
+
+	if (!ipa3_bus_scale_table)
+		ipa3_bus_scale_table = msm_bus_cl_get_pdata(ipa3_pdev);
+
+	/* Proceed to real initialization */
+	result = ipa3_pre_init(&ipa3_res, dev);
+	if (result) {
+		IPAERR("ipa_init failed\n");
+		arm_iommu_detach_device(cb->dev);
+		arm_iommu_release_mapping(cb->mapping);
+		cb->valid = false;
+		return result;
+	}
+
+	return result;
+}
+
+static irqreturn_t ipa3_smp2p_modem_clk_query_isr(int irq, void *ctxt)
+{
+	ipa3_freeze_clock_vote_and_notify_modem();
+
+	return IRQ_HANDLED;
+}
+
+static int ipa3_smp2p_probe(struct device *dev)
+{
+	struct device_node *node = dev->of_node;
+	int res;
+
+	IPADBG("node->name=%s\n", node->name);
+	if (strcmp("qcom,smp2pgpio_map_ipa_1_out", node->name) == 0) {
+		res = of_get_gpio(node, 0);
+		if (res < 0) {
+			IPADBG("of_get_gpio returned %d\n", res);
+			return res;
+		}
+
+		ipa3_ctx->smp2p_info.out_base_id = res;
+		IPADBG("smp2p out_base_id=%d\n",
+			ipa3_ctx->smp2p_info.out_base_id);
+	} else if (strcmp("qcom,smp2pgpio_map_ipa_1_in", node->name) == 0) {
+		int irq;
+
+		res = of_get_gpio(node, 0);
+		if (res < 0) {
+			IPADBG("of_get_gpio returned %d\n", res);
+			return res;
+		}
+
+		ipa3_ctx->smp2p_info.in_base_id = res;
+		IPADBG("smp2p in_base_id=%d\n",
+			ipa3_ctx->smp2p_info.in_base_id);
+
+		/* register for modem clk query */
+		irq = gpio_to_irq(ipa3_ctx->smp2p_info.in_base_id +
+			IPA_GPIO_IN_QUERY_CLK_IDX);
+		if (irq < 0) {
+			IPAERR("gpio_to_irq failed %d\n", irq);
+			return -ENODEV;
+		}
+		IPADBG("smp2p irq#=%d\n", irq);
+		res = request_irq(irq,
+			(irq_handler_t)ipa3_smp2p_modem_clk_query_isr,
+			IRQF_TRIGGER_RISING, "ipa_smp2p_clk_vote", dev);
+		if (res) {
+			IPAERR("fail to register smp2p irq=%d\n", irq);
+			return -ENODEV;
+		}
+		res = enable_irq_wake(ipa3_ctx->smp2p_info.in_base_id +
+			IPA_GPIO_IN_QUERY_CLK_IDX);
+		if (res)
+			IPAERR("failed to enable irq wake\n");
+	}
+
+	return 0;
+}
+
+int ipa3_plat_drv_probe(struct platform_device *pdev_p,
+	struct ipa_api_controller *api_ctrl,
+	const struct of_device_id *pdrv_match)
+{
+	int result;
+	struct device *dev = &pdev_p->dev;
+
+	IPADBG("IPA driver probing started\n");
+	IPADBG("dev->of_node->name = %s\n", dev->of_node->name);
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-ap-cb"))
+		return ipa_smmu_ap_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-wlan-cb"))
+		return ipa_smmu_wlan_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node, "qcom,ipa-smmu-uc-cb"))
+		return ipa_smmu_uc_cb_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node,
+	    "qcom,smp2pgpio-map-ipa-1-in"))
+		return ipa3_smp2p_probe(dev);
+
+	if (of_device_is_compatible(dev->of_node,
+	    "qcom,smp2pgpio-map-ipa-1-out"))
+		return ipa3_smp2p_probe(dev);
+
+	master_dev = dev;
+	if (!ipa3_pdev)
+		ipa3_pdev = pdev_p;
+
+	result = get_ipa_dts_configuration(pdev_p, &ipa3_res);
+	if (result) {
+		IPAERR("IPA dts parsing failed\n");
+		return result;
+	}
+
+	result = ipa3_bind_api_controller(ipa3_res.ipa_hw_type, api_ctrl);
+	if (result) {
+		IPAERR("IPA API binding failed\n");
+		return result;
+	}
+
+	result = of_platform_populate(pdev_p->dev.of_node,
+		pdrv_match, NULL, &pdev_p->dev);
+	if (result) {
+		IPAERR("failed to populate platform\n");
+		return result;
+	}
+
+	if (of_property_read_bool(pdev_p->dev.of_node, "qcom,arm-smmu")) {
+		if (of_property_read_bool(pdev_p->dev.of_node,
+		    "qcom,smmu-s1-bypass"))
+			smmu_info.s1_bypass = true;
+		if (of_property_read_bool(pdev_p->dev.of_node,
+			"qcom,smmu-fast-map"))
+			smmu_info.fast_map = true;
+		if (of_property_read_bool(pdev_p->dev.of_node,
+			"qcom,use-64-bit-dma-mask"))
+			smmu_info.use_64_bit_dma_mask = true;
+		smmu_info.arm_smmu = true;
+		pr_info("IPA smmu_info.s1_bypass=%d smmu_info.fast_map=%d\n",
+			smmu_info.s1_bypass, smmu_info.fast_map);
+	} else if (of_property_read_bool(pdev_p->dev.of_node,
+				"qcom,msm-smmu")) {
+		IPAERR("Legacy IOMMU not supported\n");
+		result = -EOPNOTSUPP;
+	} else {
+		if (of_property_read_bool(pdev_p->dev.of_node,
+			"qcom,use-64-bit-dma-mask")) {
+			if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(64)) ||
+			    dma_set_coherent_mask(&pdev_p->dev,
+			    DMA_BIT_MASK(64))) {
+				IPAERR("DMA set 64bit mask failed\n");
+				return -EOPNOTSUPP;
+			}
+		} else {
+			if (dma_set_mask(&pdev_p->dev, DMA_BIT_MASK(32)) ||
+			    dma_set_coherent_mask(&pdev_p->dev,
+			    DMA_BIT_MASK(32))) {
+				IPAERR("DMA set 32bit mask failed\n");
+				return -EOPNOTSUPP;
+			}
+		}
+
+		if (!ipa3_bus_scale_table)
+			ipa3_bus_scale_table = msm_bus_cl_get_pdata(pdev_p);
+		/* Proceed to real initialization */
+		result = ipa3_pre_init(&ipa3_res, dev);
+		if (result) {
+			IPAERR("ipa3_init failed\n");
+			return result;
+		}
+	}
+
+	return result;
+}
+
+/**
+ * ipa3_ap_suspend() - suspend callback for runtime_pm
+ * @dev: pointer to device
+ *
+ * This callback will be invoked by the runtime_pm framework when an AP suspend
+ * operation is invoked, usually by pressing a suspend button.
+ *
+ * Returns -EAGAIN to runtime_pm framework in case IPA is in use by AP.
+ * This will postpone the suspend operation until IPA is no longer used by AP.
+*/
+int ipa3_ap_suspend(struct device *dev)
+{
+	int i;
+
+	IPADBG("Enter...\n");
+
+	/* In case there is a tx/rx handler in polling mode fail to suspend */
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (ipa3_ctx->ep[i].sys &&
+			atomic_read(&ipa3_ctx->ep[i].sys->curr_polling_state)) {
+			IPAERR("EP %d is in polling state, do not suspend\n",
+				i);
+			return -EAGAIN;
+		}
+	}
+
+	/* release SPS IPA resource without waiting for inactivity timer */
+	atomic_set(&ipa3_ctx->transport_pm.eot_activity, 0);
+	ipa3_sps_release_resource(NULL);
+	IPADBG("Exit\n");
+
+	return 0;
+}
+
+/**
+* ipa3_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Always returns 0 since resume should always succeed.
+*/
+int ipa3_ap_resume(struct device *dev)
+{
+	return 0;
+}
+
+struct ipa3_context *ipa3_get_ctx(void)
+{
+	return ipa3_ctx;
+}
+
+static void ipa_gsi_request_resource(struct work_struct *work)
+{
+	unsigned long flags;
+	int ret;
+
+	/* request IPA clocks */
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* mark transport resource as granted */
+	spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+	ipa3_ctx->transport_pm.res_granted = true;
+
+	IPADBG("IPA is ON, calling gsi driver\n");
+	ret = gsi_complete_clk_grant(ipa3_ctx->gsi_dev_hdl);
+	if (ret != GSI_STATUS_SUCCESS)
+		IPAERR("gsi_complete_clk_grant failed %d\n", ret);
+
+	spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+}
+
+void ipa_gsi_req_res_cb(void *user_data, bool *granted)
+{
+	unsigned long flags;
+	struct ipa_active_client_logging_info log_info;
+
+	spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+
+	/* make sure no release will happen */
+	cancel_delayed_work(&ipa_gsi_release_resource_work);
+	ipa3_ctx->transport_pm.res_rel_in_prog = false;
+
+	if (ipa3_ctx->transport_pm.res_granted) {
+		*granted = true;
+	} else {
+		IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, "GSI_RESOURCE");
+		if (ipa3_inc_client_enable_clks_no_block(&log_info) == 0) {
+			ipa3_ctx->transport_pm.res_granted = true;
+			*granted = true;
+		} else {
+			queue_work(ipa3_ctx->transport_power_mgmt_wq,
+				   &ipa_gsi_request_resource_work);
+			*granted = false;
+		}
+	}
+	spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+}
+
+static void ipa_gsi_release_resource(struct work_struct *work)
+{
+	unsigned long flags;
+	bool dec_clients = false;
+
+	spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+	/* check whether still need to decrease client usage */
+	if (ipa3_ctx->transport_pm.res_rel_in_prog) {
+		dec_clients = true;
+		ipa3_ctx->transport_pm.res_rel_in_prog = false;
+		ipa3_ctx->transport_pm.res_granted = false;
+	}
+	spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+	if (dec_clients)
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("GSI_RESOURCE");
+}
+
+int ipa_gsi_rel_res_cb(void *user_data)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->transport_pm.lock, flags);
+
+	ipa3_ctx->transport_pm.res_rel_in_prog = true;
+	queue_delayed_work(ipa3_ctx->transport_power_mgmt_wq,
+			   &ipa_gsi_release_resource_work,
+			   msecs_to_jiffies(IPA_TRANSPORT_PROD_TIMEOUT_MSEC));
+
+	spin_unlock_irqrestore(&ipa3_ctx->transport_pm.lock, flags);
+	return 0;
+}
+
+static void ipa_gsi_notify_cb(struct gsi_per_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_PER_EVT_GLOB_ERROR:
+		IPAERR("Got GSI_PER_EVT_GLOB_ERROR\n");
+		IPAERR("Err_desc = 0x%04x\n", notify->data.err_desc);
+		break;
+	case GSI_PER_EVT_GLOB_GP1:
+		IPAERR("Got GSI_PER_EVT_GLOB_GP1\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GLOB_GP2:
+		IPAERR("Got GSI_PER_EVT_GLOB_GP2\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GLOB_GP3:
+		IPAERR("Got GSI_PER_EVT_GLOB_GP3\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GENERAL_BREAK_POINT:
+		IPAERR("Got GSI_PER_EVT_GENERAL_BREAK_POINT\n");
+		break;
+	case GSI_PER_EVT_GENERAL_BUS_ERROR:
+		IPAERR("Got GSI_PER_EVT_GENERAL_BUS_ERROR\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW:
+		IPAERR("Got GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW\n");
+		BUG();
+		break;
+	case GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW:
+		IPAERR("Got GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW\n");
+		BUG();
+		break;
+	default:
+		IPAERR("Received unexpected evt: %d\n",
+			notify->evt_id);
+		BUG();
+	}
+}
+
+int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data)
+{
+	struct ipa3_ready_cb_info *cb_info = NULL;
+
+	/* check ipa3_ctx existed or not */
+	if (!ipa3_ctx) {
+		IPADBG("IPA driver haven't initialized\n");
+		return -ENXIO;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ipa_initialization_complete) {
+		mutex_unlock(&ipa3_ctx->lock);
+		IPADBG("IPA driver finished initialization already\n");
+		return -EEXIST;
+	}
+
+	cb_info = kmalloc(sizeof(struct ipa3_ready_cb_info), GFP_KERNEL);
+	if (!cb_info) {
+		mutex_unlock(&ipa3_ctx->lock);
+		return -ENOMEM;
+	}
+
+	cb_info->ready_cb = ipa_ready_cb;
+	cb_info->user_data = user_data;
+
+	list_add_tail(&cb_info->link, &ipa3_ctx->ipa_ready_cb_list);
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+int ipa3_iommu_map(struct iommu_domain *domain,
+	unsigned long iova, phys_addr_t paddr, size_t size, int prot)
+{
+	struct ipa_smmu_cb_ctx *ap_cb = ipa3_get_smmu_ctx();
+	struct ipa_smmu_cb_ctx *uc_cb = ipa3_get_uc_smmu_ctx();
+
+	IPADBG("domain =0x%p iova 0x%lx\n", domain, iova);
+	IPADBG("paddr =0x%pa size 0x%x\n", &paddr, (u32)size);
+
+	/* make sure no overlapping */
+	if (domain == ipa3_get_smmu_domain()) {
+		if (iova >= ap_cb->va_start && iova < ap_cb->va_end) {
+			IPAERR("iommu AP overlap addr 0x%lx\n", iova);
+			ipa_assert();
+			return -EFAULT;
+		}
+	} else if (domain == ipa3_get_wlan_smmu_domain()) {
+		/* wlan is one time map */
+	} else if (domain == ipa3_get_uc_smmu_domain()) {
+		if (iova >= uc_cb->va_start && iova < uc_cb->va_end) {
+			IPAERR("iommu uC overlap addr 0x%lx\n", iova);
+			ipa_assert();
+			return -EFAULT;
+		}
+	} else {
+		IPAERR("Unexpected domain 0x%p\n", domain);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	return iommu_map(domain, iova, paddr, size, prot);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA HW device driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
new file mode 100644
index 0000000..f3b07f5
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -0,0 +1,2000 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/barrier.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include "ipa_i.h"
+#include "linux/msm_gsi.h"
+
+/*
+ * These values were determined empirically and shows good E2E bi-
+ * directional throughputs
+ */
+#define IPA_HOLB_TMR_EN 0x1
+#define IPA_HOLB_TMR_DIS 0x0
+#define IPA_HOLB_TMR_DEFAULT_VAL 0x1ff
+#define IPA_POLL_AGGR_STATE_RETRIES_NUM 3
+#define IPA_POLL_AGGR_STATE_SLEEP_MSEC 1
+
+#define IPA_PKT_FLUSH_TO_US 100
+
+#define IPA_POLL_FOR_EMPTINESS_NUM 50
+#define IPA_POLL_FOR_EMPTINESS_SLEEP_USEC 20
+#define IPA_CHANNEL_STOP_IN_PROC_TO_MSEC 5
+#define IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC 200
+
+/* xfer_rsc_idx should be 7 bits */
+#define IPA_XFER_RSC_IDX_MAX 127
+
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+	bool *is_empty);
+
+int ipa3_enable_data_path(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
+	struct ipa_ep_cfg_holb holb_cfg;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	int res = 0;
+	struct ipahal_reg_endp_init_rsrc_grp rsrc_grp;
+
+	IPADBG("Enabling data path\n");
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_DIS;
+		holb_cfg.tmr_val = 0;
+		res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	/* Enable the pipe */
+	if (IPA_CLIENT_IS_CONS(ep->client) &&
+	    (ep->keep_ipa_awake ||
+	     ipa3_ctx->resume_on_connect[ep->client] ||
+	     !ipa3_should_pipe_be_suspended(ep->client))) {
+		memset(&ep_cfg_ctrl, 0, sizeof(ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = false;
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	/* Assign the resource group for pipe */
+	memset(&rsrc_grp, 0, sizeof(rsrc_grp));
+	rsrc_grp.rsrc_grp = ipa_get_ep_group(ep->client);
+	if (rsrc_grp.rsrc_grp == -1) {
+		IPAERR("invalid group for client %d\n", ep->client);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPADBG("Setting group %d for pipe %d\n",
+		rsrc_grp.rsrc_grp, clnt_hdl);
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_RSRC_GRP_n, clnt_hdl,
+		&rsrc_grp);
+
+	return res;
+}
+
+int ipa3_disable_data_path(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[clnt_hdl];
+	struct ipa_ep_cfg_holb holb_cfg;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	struct ipa_ep_cfg_aggr ep_aggr;
+	int res = 0;
+
+	IPADBG("Disabling data path\n");
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_EN;
+		holb_cfg.tmr_val = 0;
+		res = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+
+	/* Suspend the pipe */
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		/*
+		 * for RG10 workaround uC needs to be loaded before pipe can
+		 * be suspended in this case.
+		 */
+		if (ipa3_ctx->apply_rg10_wa && ipa3_uc_state_check()) {
+			IPADBG("uC is not loaded yet, waiting...\n");
+			res = wait_for_completion_timeout(
+				&ipa3_ctx->uc_loaded_completion_obj, 60 * HZ);
+			if (res == 0)
+				IPADBG("timeout waiting for uC to load\n");
+		}
+
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_suspend = true;
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+
+	udelay(IPA_PKT_FLUSH_TO_US);
+	ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, &ep_aggr);
+	if (ep_aggr.aggr_en) {
+		res = ipa3_tag_aggr_force_close(clnt_hdl);
+		if (res) {
+			IPAERR("tag process timeout, client:%d err:%d\n",
+				   clnt_hdl, res);
+			BUG();
+		}
+	}
+
+	return res;
+}
+
+static int ipa3_smmu_map_peer_bam(unsigned long dev)
+{
+	phys_addr_t base;
+	u32 size;
+	struct iommu_domain *smmu_domain;
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		if (ipa3_ctx->peer_bam_map_cnt == 0) {
+			if (sps_get_bam_addr(dev, &base, &size)) {
+				IPAERR("Fail to get addr\n");
+				return -EINVAL;
+			}
+			smmu_domain = ipa3_get_smmu_domain();
+			if (smmu_domain != NULL) {
+				if (ipa3_iommu_map(smmu_domain,
+					cb->va_end,
+					rounddown(base, PAGE_SIZE),
+					roundup(size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE),
+					IOMMU_READ | IOMMU_WRITE |
+					IOMMU_DEVICE)) {
+					IPAERR("Fail to ipa3_iommu_map\n");
+					return -EINVAL;
+				}
+			}
+
+			ipa3_ctx->peer_bam_iova = cb->va_end;
+			ipa3_ctx->peer_bam_pa = base;
+			ipa3_ctx->peer_bam_map_size = size;
+			ipa3_ctx->peer_bam_dev = dev;
+
+			IPADBG("Peer bam %lu mapped\n", dev);
+		} else {
+			WARN_ON(dev != ipa3_ctx->peer_bam_dev);
+		}
+
+		ipa3_ctx->peer_bam_map_cnt++;
+	}
+
+	return 0;
+}
+
+static int ipa3_connect_configure_sps(const struct ipa_connect_params *in,
+				     struct ipa3_ep_context *ep, int ipa_ep_idx)
+{
+	int result = -EFAULT;
+
+	/* Default Config */
+	ep->ep_hdl = sps_alloc_endpoint();
+
+	if (ipa3_smmu_map_peer_bam(in->client_bam_hdl)) {
+		IPAERR("fail to iommu map peer BAM.\n");
+		return -EFAULT;
+	}
+
+	if (ep->ep_hdl == NULL) {
+		IPAERR("SPS EP alloc failed EP.\n");
+		return -EFAULT;
+	}
+
+	result = sps_get_config(ep->ep_hdl,
+		&ep->connect);
+	if (result) {
+		IPAERR("fail to get config.\n");
+		return -EFAULT;
+	}
+
+	/* Specific Config */
+	if (IPA_CLIENT_IS_CONS(in->client)) {
+		ep->connect.mode = SPS_MODE_SRC;
+		ep->connect.destination =
+			in->client_bam_hdl;
+		ep->connect.dest_iova = ipa3_ctx->peer_bam_iova;
+		ep->connect.source = ipa3_ctx->bam_handle;
+		ep->connect.dest_pipe_index =
+			in->client_ep_idx;
+		ep->connect.src_pipe_index = ipa_ep_idx;
+	} else {
+		ep->connect.mode = SPS_MODE_DEST;
+		ep->connect.source = in->client_bam_hdl;
+		ep->connect.source_iova = ipa3_ctx->peer_bam_iova;
+		ep->connect.destination = ipa3_ctx->bam_handle;
+		ep->connect.src_pipe_index = in->client_ep_idx;
+		ep->connect.dest_pipe_index = ipa_ep_idx;
+	}
+
+	return 0;
+}
+
+static int ipa3_connect_allocate_fifo(const struct ipa_connect_params *in,
+				     struct sps_mem_buffer *mem_buff_ptr,
+				     bool *fifo_in_pipe_mem_ptr,
+				     u32 *fifo_pipe_mem_ofst_ptr,
+				     u32 fifo_size, int ipa_ep_idx)
+{
+	dma_addr_t dma_addr;
+	u32 ofst;
+	int result = -EFAULT;
+	struct iommu_domain *smmu_domain;
+
+	mem_buff_ptr->size = fifo_size;
+	if (in->pipe_mem_preferred) {
+		if (ipa3_pipe_mem_alloc(&ofst, fifo_size)) {
+			IPAERR("FIFO pipe mem alloc fail ep %u\n",
+				ipa_ep_idx);
+			mem_buff_ptr->base =
+				dma_alloc_coherent(ipa3_ctx->pdev,
+				mem_buff_ptr->size,
+				&dma_addr, GFP_KERNEL);
+		} else {
+			memset(mem_buff_ptr, 0, sizeof(struct sps_mem_buffer));
+			result = sps_setup_bam2bam_fifo(mem_buff_ptr, ofst,
+				fifo_size, 1);
+			WARN_ON(result);
+			*fifo_in_pipe_mem_ptr = 1;
+			dma_addr = mem_buff_ptr->phys_base;
+			*fifo_pipe_mem_ofst_ptr = ofst;
+		}
+	} else {
+		mem_buff_ptr->base =
+			dma_alloc_coherent(ipa3_ctx->pdev, mem_buff_ptr->size,
+			&dma_addr, GFP_KERNEL);
+	}
+	if (ipa3_ctx->smmu_s1_bypass) {
+		mem_buff_ptr->phys_base = dma_addr;
+	} else {
+		mem_buff_ptr->iova = dma_addr;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			mem_buff_ptr->phys_base =
+				iommu_iova_to_phys(smmu_domain, dma_addr);
+		}
+	}
+	if (mem_buff_ptr->base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_connect() - low-level IPA client connect
+ * @in:	[in] input parameters from client
+ * @sps:	[out] sps output from IPA needed by client for sps_connect
+ * @clnt_hdl:	[out] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to connect to
+ * IPA in BAM-BAM mode. these peripherals are USB and HSIC. this api
+ * expects caller to take responsibility to add any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_connect(const struct ipa_connect_params *in,
+		struct ipa_sps_params *sps,
+		u32 *clnt_hdl)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa3_ep_context *ep;
+	struct ipahal_reg_ep_cfg_status ep_status;
+	unsigned long base;
+	struct iommu_domain *smmu_domain;
+
+	IPADBG("connecting client\n");
+
+	if (in == NULL || sps == NULL || clnt_hdl == NULL ||
+	    in->client >= IPA_CLIENT_MAX ||
+	    in->desc_fifo_sz == 0 || in->data_fifo_sz == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_EP(in->client);
+
+	ep->skip_ep_cfg = in->skip_ep_cfg;
+	ep->valid = 1;
+	ep->client = in->client;
+	ep->client_notify = in->notify;
+	ep->priv = in->priv;
+	ep->keep_ipa_awake = in->keep_ipa_awake;
+
+	result = ipa3_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+				ipa_ep_idx);
+		goto ipa_cfg_ep_fail;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		/* Setting EP status 0 */
+		memset(&ep_status, 0, sizeof(ep_status));
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	result = ipa3_connect_configure_sps(in, ep, ipa_ep_idx);
+	if (result) {
+		IPAERR("fail to configure SPS.\n");
+		goto ipa_cfg_ep_fail;
+	}
+
+	if (!ipa3_ctx->smmu_s1_bypass &&
+			(in->desc.base == NULL ||
+			 in->data.base == NULL)) {
+		IPAERR(" allocate FIFOs data_fifo=0x%p desc_fifo=0x%p.\n",
+				in->data.base, in->desc.base);
+		goto desc_mem_alloc_fail;
+	}
+
+	if (in->desc.base == NULL) {
+		result = ipa3_connect_allocate_fifo(in, &ep->connect.desc,
+						  &ep->desc_fifo_in_pipe_mem,
+						  &ep->desc_fifo_pipe_mem_ofst,
+						  in->desc_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DESC FIFO.\n");
+			goto desc_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DESC FIFO\n");
+		ep->connect.desc = in->desc;
+		ep->desc_fifo_client_allocated = 1;
+	}
+	IPADBG("Descriptor FIFO pa=%pa, size=%d\n", &ep->connect.desc.phys_base,
+	       ep->connect.desc.size);
+
+	if (in->data.base == NULL) {
+		result = ipa3_connect_allocate_fifo(in, &ep->connect.data,
+						&ep->data_fifo_in_pipe_mem,
+						&ep->data_fifo_pipe_mem_ofst,
+						in->data_fifo_sz, ipa_ep_idx);
+		if (result) {
+			IPAERR("fail to allocate DATA FIFO.\n");
+			goto data_mem_alloc_fail;
+		}
+	} else {
+		IPADBG("client allocated DATA FIFO\n");
+		ep->connect.data = in->data;
+		ep->data_fifo_client_allocated = 1;
+	}
+	IPADBG("Data FIFO pa=%pa, size=%d\n", &ep->connect.data.phys_base,
+	       ep->connect.data.size);
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		ep->connect.data.iova = ep->connect.data.phys_base;
+		base = ep->connect.data.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			if (ipa3_iommu_map(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.data.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE),
+				IOMMU_READ | IOMMU_WRITE)) {
+				IPAERR("Fail to ipa3_iommu_map data FIFO\n");
+				goto iommu_map_data_fail;
+			}
+		}
+		ep->connect.desc.iova = ep->connect.desc.phys_base;
+		base = ep->connect.desc.iova;
+		if (smmu_domain != NULL) {
+			if (ipa3_iommu_map(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.desc.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE),
+				IOMMU_READ | IOMMU_WRITE)) {
+				IPAERR("Fail to ipa3_iommu_map desc FIFO\n");
+				goto iommu_map_desc_fail;
+			}
+		}
+	}
+
+	if (IPA_CLIENT_IS_USB_CONS(in->client))
+		ep->connect.event_thresh = IPA_USB_EVENT_THRESHOLD;
+	else
+		ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+	ep->connect.options = SPS_O_AUTO_ENABLE;    /* BAM-to-BAM */
+
+	result = ipa3_sps_connect_safe(ep->ep_hdl, &ep->connect, in->client);
+	if (result) {
+		IPAERR("sps_connect fails.\n");
+		goto sps_connect_fail;
+	}
+
+	sps->ipa_bam_hdl = ipa3_ctx->bam_handle;
+	sps->ipa_ep_idx = ipa_ep_idx;
+	*clnt_hdl = ipa_ep_idx;
+	memcpy(&sps->desc, &ep->connect.desc, sizeof(struct sps_mem_buffer));
+	memcpy(&sps->data, &ep->connect.data, sizeof(struct sps_mem_buffer));
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+
+	IPADBG("client %d (ep: %d) connected\n", in->client, ipa_ep_idx);
+
+	return 0;
+
+sps_connect_fail:
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		base = ep->connect.desc.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.desc.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+iommu_map_desc_fail:
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		base = ep->connect.data.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.data.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+iommu_map_data_fail:
+	if (!ep->data_fifo_client_allocated) {
+		if (!ep->data_fifo_in_pipe_mem)
+			dma_free_coherent(ipa3_ctx->pdev,
+				  ep->connect.data.size,
+				  ep->connect.data.base,
+				  ep->connect.data.phys_base);
+		else
+			ipa3_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+				  ep->connect.data.size);
+	}
+data_mem_alloc_fail:
+	if (!ep->desc_fifo_client_allocated) {
+		if (!ep->desc_fifo_in_pipe_mem)
+			dma_free_coherent(ipa3_ctx->pdev,
+				  ep->connect.desc.size,
+				  ep->connect.desc.base,
+				  ep->connect.desc.phys_base);
+		else
+			ipa3_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+				  ep->connect.desc.size);
+	}
+desc_mem_alloc_fail:
+	sps_free_endpoint(ep->ep_hdl);
+ipa_cfg_ep_fail:
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->client);
+fail:
+	return result;
+}
+
+static int ipa3_smmu_unmap_peer_bam(unsigned long dev)
+{
+	size_t len;
+	struct iommu_domain *smmu_domain;
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		WARN_ON(dev != ipa3_ctx->peer_bam_dev);
+		ipa3_ctx->peer_bam_map_cnt--;
+		if (ipa3_ctx->peer_bam_map_cnt == 0) {
+			len = roundup(ipa3_ctx->peer_bam_map_size +
+					ipa3_ctx->peer_bam_pa -
+					rounddown(ipa3_ctx->peer_bam_pa,
+						PAGE_SIZE), PAGE_SIZE);
+			smmu_domain = ipa3_get_smmu_domain();
+			if (smmu_domain != NULL) {
+				if (iommu_unmap(smmu_domain,
+					cb->va_end, len) != len) {
+					IPAERR("Fail to iommu_unmap\n");
+					return -EINVAL;
+				}
+				IPADBG("Peer bam %lu unmapped\n", dev);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_disconnect() - low-level IPA client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to disconnect
+ * from IPA in BAM-BAM mode. this api expects caller to take responsibility to
+ * free any needed headers, routing and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_disconnect(u32 clnt_hdl)
+{
+	int result;
+	struct ipa3_ep_context *ep;
+	unsigned long peer_bam;
+	unsigned long base;
+	struct iommu_domain *smmu_domain;
+	struct ipa_disable_force_clear_datapath_req_msg_v01 req = {0};
+	int res;
+	enum ipa_client_type client_type;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	client_type = ipa3_get_client_mapping(clnt_hdl);
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(client_type);
+
+	/* Set Disconnect in Progress flag. */
+	spin_lock(&ipa3_ctx->disconnect_lock);
+	ep->disconnect_in_progress = true;
+	spin_unlock(&ipa3_ctx->disconnect_lock);
+
+	result = ipa3_disable_data_path(clnt_hdl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+				clnt_hdl);
+		return -EPERM;
+	}
+
+	result = sps_disconnect(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS disconnect failed.\n");
+		return -EPERM;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		peer_bam = ep->connect.destination;
+	else
+		peer_bam = ep->connect.source;
+
+	if (ipa3_smmu_unmap_peer_bam(peer_bam)) {
+		IPAERR("fail to iommu unmap peer BAM.\n");
+		return -EPERM;
+	}
+
+	if (!ep->desc_fifo_client_allocated &&
+	     ep->connect.desc.base) {
+		if (!ep->desc_fifo_in_pipe_mem)
+			dma_free_coherent(ipa3_ctx->pdev,
+					  ep->connect.desc.size,
+					  ep->connect.desc.base,
+					  ep->connect.desc.phys_base);
+		else
+			ipa3_pipe_mem_free(ep->desc_fifo_pipe_mem_ofst,
+					  ep->connect.desc.size);
+	}
+
+	if (!ep->data_fifo_client_allocated &&
+	     ep->connect.data.base) {
+		if (!ep->data_fifo_in_pipe_mem)
+			dma_free_coherent(ipa3_ctx->pdev,
+					  ep->connect.data.size,
+					  ep->connect.data.base,
+					  ep->connect.data.phys_base);
+		else
+			ipa3_pipe_mem_free(ep->data_fifo_pipe_mem_ofst,
+					  ep->connect.data.size);
+	}
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		base = ep->connect.desc.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.desc.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+
+	if (!ipa3_ctx->smmu_s1_bypass) {
+		base = ep->connect.data.iova;
+		smmu_domain = ipa_get_smmu_domain();
+		if (smmu_domain != NULL) {
+			iommu_unmap(smmu_domain,
+				rounddown(base, PAGE_SIZE),
+				roundup(ep->connect.data.size + base -
+					rounddown(base, PAGE_SIZE), PAGE_SIZE));
+		}
+	}
+
+	result = sps_free_endpoint(ep->ep_hdl);
+	if (result) {
+		IPAERR("SPS de-alloc EP failed.\n");
+		return -EPERM;
+	}
+
+	ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+	/* If APPS flow control is not enabled, send a message to modem to
+	 * enable flow control honoring.
+	 */
+	if (!ipa3_ctx->tethered_flow_control && ep->qmi_request_sent) {
+		/* Send a message to modem to disable flow control honoring. */
+		req.request_id = clnt_hdl;
+		res = ipa3_qmi_disable_force_clear_datapath_send(&req);
+		if (res) {
+			IPADBG("disable_force_clear_datapath failed %d\n",
+				res);
+		}
+	}
+
+	spin_lock(&ipa3_ctx->disconnect_lock);
+	memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+	spin_unlock(&ipa3_ctx->disconnect_lock);
+	IPA_ACTIVE_CLIENTS_DEC_EP(client_type);
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+* ipa3_reset_endpoint() - reset an endpoint from BAM perspective
+* @clnt_hdl: [in] IPA client handle
+*
+* Returns:	0 on success, negative on failure
+*
+* Note:	Should not be called from atomic context
+*/
+int ipa3_reset_endpoint(u32 clnt_hdl)
+{
+	int res;
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("Bad parameters.\n");
+		return -EFAULT;
+	}
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	res = sps_disconnect(ep->ep_hdl);
+	if (res) {
+		IPAERR("sps_disconnect() failed, res=%d.\n", res);
+		goto bail;
+	} else {
+		res = ipa3_sps_connect_safe(ep->ep_hdl, &ep->connect,
+			ep->client);
+		if (res) {
+			IPAERR("sps_connect() failed, res=%d.\n", res);
+			goto bail;
+		}
+	}
+
+bail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return res;
+}
+
+/**
+ * ipa3_sps_connect_safe() - connect endpoint from BAM prespective
+ * @h: [in] sps pipe handle
+ * @connect: [in] sps connect parameters
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * This function connects a BAM pipe using SPS driver sps_connect() API
+ * and by requesting uC interface to reset the pipe, avoids an IPA HW
+ * limitation that does not allow resetting a BAM pipe during traffic in
+ * IPA TX command queue.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+			 enum ipa_client_type ipa_client)
+{
+	int res;
+
+	if (ipa3_ctx->ipa_hw_type > IPA_HW_v2_5 ||
+			ipa3_ctx->skip_uc_pipe_reset) {
+		IPADBG("uC pipe reset is not required\n");
+	} else {
+		res = ipa3_uc_reset_pipe(ipa_client);
+		if (res)
+			return res;
+	}
+	return sps_connect(h, connect);
+}
+
+static void ipa_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	if (notify) {
+		switch (notify->evt_id) {
+		case GSI_CHAN_INVALID_TRE_ERR:
+			IPAERR("Received GSI_CHAN_INVALID_TRE_ERR\n");
+			break;
+		case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+			IPAERR("Received GSI_CHAN_NON_ALLOC_EVT_ACCESS_ERR\n");
+			break;
+		case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+			IPAERR("Received GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+			break;
+		case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+			IPAERR("Received GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+			break;
+		case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+			IPAERR("Received GSI_CHAN_UNSUPP_INTER_EE_OP_ERR\n");
+			break;
+		case GSI_CHAN_HWO_1_ERR:
+			IPAERR("Received GSI_CHAN_HWO_1_ERR\n");
+			break;
+		default:
+			IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+		}
+		BUG();
+	}
+}
+
+static void ipa_xfer_cb(struct gsi_chan_xfer_notify *notify)
+{
+}
+
+static int ipa3_reconfigure_channel_to_gpi(struct ipa3_ep_context *ep,
+	struct gsi_chan_props *orig_chan_props,
+	struct ipa_mem_buffer *chan_dma)
+{
+	struct gsi_chan_props chan_props;
+	enum gsi_status gsi_res;
+	dma_addr_t chan_dma_addr;
+	int result;
+
+	/* Set up channel properties */
+	memset(&chan_props, 0, sizeof(struct gsi_chan_props));
+	chan_props.prot = GSI_CHAN_PROT_GPI;
+	chan_props.dir = GSI_CHAN_DIR_FROM_GSI;
+	chan_props.ch_id = orig_chan_props->ch_id;
+	chan_props.evt_ring_hdl = orig_chan_props->evt_ring_hdl;
+	chan_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	chan_props.ring_len = 2 * GSI_CHAN_RE_SIZE_16B;
+	chan_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, chan_props.ring_len,
+		&chan_dma_addr, 0);
+	chan_props.ring_base_addr = chan_dma_addr;
+	chan_dma->base = chan_props.ring_base_vaddr;
+	chan_dma->phys_base = chan_props.ring_base_addr;
+	chan_dma->size = chan_props.ring_len;
+	chan_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
+	chan_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	chan_props.low_weight = 1;
+	chan_props.chan_user_data = NULL;
+	chan_props.err_cb = ipa_chan_err_cb;
+	chan_props.xfer_cb = ipa_xfer_cb;
+
+	gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, &chan_props, NULL);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error setting channel properties\n");
+		result = -EFAULT;
+		goto set_chan_cfg_fail;
+	}
+
+	return 0;
+
+set_chan_cfg_fail:
+	dma_free_coherent(ipa3_ctx->pdev, chan_dma->size,
+		chan_dma->base, chan_dma->phys_base);
+	return result;
+
+}
+
+static int ipa3_restore_channel_properties(struct ipa3_ep_context *ep,
+	struct gsi_chan_props *chan_props,
+	union gsi_channel_scratch *chan_scratch)
+{
+	enum gsi_status gsi_res;
+
+	gsi_res = gsi_set_channel_cfg(ep->gsi_chan_hdl, chan_props,
+		chan_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error restoring channel properties\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static int ipa3_reset_with_open_aggr_frame_wa(u32 clnt_hdl,
+	struct ipa3_ep_context *ep)
+{
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+	struct gsi_chan_props orig_chan_props;
+	union gsi_channel_scratch orig_chan_scratch;
+	struct ipa_mem_buffer chan_dma;
+	void *buff;
+	dma_addr_t dma_addr;
+	struct gsi_xfer_elem xfer_elem;
+	int i;
+	int aggr_active_bitmap = 0;
+
+	IPADBG("Applying reset channel with open aggregation frame WA\n");
+	ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
+
+	/* Reset channel */
+	gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting channel: %d\n", gsi_res);
+		return -EFAULT;
+	}
+
+	/* Reconfigure channel to dummy GPI channel */
+	memset(&orig_chan_props, 0, sizeof(struct gsi_chan_props));
+	memset(&orig_chan_scratch, 0, sizeof(union gsi_channel_scratch));
+	gsi_res = gsi_get_channel_cfg(ep->gsi_chan_hdl, &orig_chan_props,
+		&orig_chan_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error getting channel properties: %d\n", gsi_res);
+		return -EFAULT;
+	}
+	memset(&chan_dma, 0, sizeof(struct ipa_mem_buffer));
+	result = ipa3_reconfigure_channel_to_gpi(ep, &orig_chan_props,
+		&chan_dma);
+	if (result)
+		return -EFAULT;
+
+	/* Start channel and put 1 Byte descriptor on it */
+	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error starting channel: %d\n", gsi_res);
+		goto start_chan_fail;
+	}
+
+	memset(&xfer_elem, 0, sizeof(struct gsi_xfer_elem));
+	buff = dma_alloc_coherent(ipa3_ctx->pdev, 1, &dma_addr,
+		GFP_KERNEL);
+	xfer_elem.addr = dma_addr;
+	xfer_elem.len = 1;
+	xfer_elem.flags = GSI_XFER_FLAG_EOT;
+	xfer_elem.type = GSI_XFER_ELEM_DATA;
+
+	gsi_res = gsi_queue_xfer(ep->gsi_chan_hdl, 1, &xfer_elem,
+		true);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error queueing xfer: %d\n", gsi_res);
+		result = -EFAULT;
+		goto queue_xfer_fail;
+	}
+
+	/* Wait for aggregation frame to be closed and stop channel*/
+	for (i = 0; i < IPA_POLL_AGGR_STATE_RETRIES_NUM; i++) {
+		aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+		if (!(aggr_active_bitmap & (1 << clnt_hdl)))
+			break;
+		msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+	}
+
+	if (aggr_active_bitmap & (1 << clnt_hdl)) {
+		IPAERR("Failed closing aggr frame for client: %d\n",
+			clnt_hdl);
+		BUG();
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr);
+
+	result = ipa3_stop_gsi_channel(clnt_hdl);
+	if (result) {
+		IPAERR("Error stopping channel: %d\n", result);
+		goto start_chan_fail;
+	}
+
+	/* Reset channel */
+	gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting channel: %d\n", gsi_res);
+		result = -EFAULT;
+		goto start_chan_fail;
+	}
+
+	/*
+	 * Need to sleep for 1ms as required by H/W verified
+	 * sequence for resetting GSI channel
+	 */
+	msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+
+	/* Restore channels properties */
+	result = ipa3_restore_channel_properties(ep, &orig_chan_props,
+		&orig_chan_scratch);
+	if (result)
+		goto restore_props_fail;
+	dma_free_coherent(ipa3_ctx->pdev, chan_dma.size,
+		chan_dma.base, chan_dma.phys_base);
+
+	return 0;
+
+queue_xfer_fail:
+	ipa3_stop_gsi_channel(clnt_hdl);
+	dma_free_coherent(ipa3_ctx->pdev, 1, buff, dma_addr);
+start_chan_fail:
+	ipa3_restore_channel_properties(ep, &orig_chan_props,
+		&orig_chan_scratch);
+restore_props_fail:
+	dma_free_coherent(ipa3_ctx->pdev, chan_dma.size,
+		chan_dma.base, chan_dma.phys_base);
+	return result;
+}
+
+int ipa3_reset_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+	int aggr_active_bitmap = 0;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	/*
+	 * Check for open aggregation frame on Consumer EP -
+	 * reset with open aggregation frame WA
+	 */
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+		if (aggr_active_bitmap & (1 << clnt_hdl)) {
+			result = ipa3_reset_with_open_aggr_frame_wa(clnt_hdl,
+				ep);
+			if (result)
+				goto reset_chan_fail;
+			goto finish_reset;
+		}
+	}
+
+	/*
+	 * Reset channel
+	 * If the reset called after stop, need to wait 1ms
+	 */
+	msleep(IPA_POLL_AGGR_STATE_SLEEP_MSEC);
+	gsi_res = gsi_reset_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting channel: %d\n", gsi_res);
+		result = -EFAULT;
+		goto reset_chan_fail;
+	}
+
+finish_reset:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+reset_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_reset_gsi_event_ring(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	/* Reset event ring */
+	gsi_res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error resetting event: %d\n", gsi_res);
+		result = -EFAULT;
+		goto reset_evt_fail;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+reset_evt_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+static bool ipa3_is_legal_params(struct ipa_request_gsi_channel_params *params)
+{
+	if (params->client >= IPA_CLIENT_MAX)
+		return false;
+	else
+		return true;
+}
+
+int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map)
+{
+	struct iommu_domain *smmu_domain;
+	int res;
+
+	if (ipa3_ctx->smmu_s1_bypass)
+		return 0;
+
+	smmu_domain = ipa3_get_smmu_domain();
+	if (!smmu_domain) {
+		IPAERR("invalid smmu domain\n");
+		return -EINVAL;
+	}
+
+	if (map) {
+		res = ipa3_iommu_map(smmu_domain, phys_addr, phys_addr,
+			PAGE_SIZE, IOMMU_READ | IOMMU_WRITE | IOMMU_DEVICE);
+	} else {
+		res = iommu_unmap(smmu_domain, phys_addr, PAGE_SIZE);
+		res = (res != PAGE_SIZE);
+	}
+	if (res) {
+		IPAERR("Fail to %s reg 0x%pa\n", map ? "map" : "unmap",
+			&phys_addr);
+		return -EINVAL;
+	}
+
+	IPADBG("Peer reg 0x%pa %s\n", &phys_addr, map ? "map" : "unmap");
+
+	return 0;
+}
+
+int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr, u32 size, bool map)
+{
+	struct iommu_domain *smmu_domain;
+	int res;
+
+	if (ipa3_ctx->smmu_s1_bypass)
+		return 0;
+
+	smmu_domain = ipa3_get_smmu_domain();
+	if (!smmu_domain) {
+		IPAERR("invalid smmu domain\n");
+		return -EINVAL;
+	}
+
+	if (map) {
+		res = ipa3_iommu_map(smmu_domain,
+			rounddown(iova, PAGE_SIZE),
+			rounddown(phys_addr, PAGE_SIZE),
+			roundup(size + iova - rounddown(iova, PAGE_SIZE),
+			PAGE_SIZE),
+			IOMMU_READ | IOMMU_WRITE);
+		if (res) {
+			IPAERR("Fail to map 0x%llx->0x%pa\n", iova, &phys_addr);
+			return -EINVAL;
+		}
+	} else {
+		res = iommu_unmap(smmu_domain,
+			rounddown(iova, PAGE_SIZE),
+			roundup(size + iova - rounddown(iova, PAGE_SIZE),
+			PAGE_SIZE));
+		if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE),
+			PAGE_SIZE)) {
+			IPAERR("Fail to unmap 0x%llx->0x%pa\n",
+				iova, &phys_addr);
+			return -EINVAL;
+		}
+	}
+
+	IPADBG("Peer buff %s 0x%llx->0x%pa\n", map ? "map" : "unmap",
+		iova, &phys_addr);
+
+	return 0;
+}
+
+
+int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
+			     struct ipa_req_chan_out_params *out_params)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa3_ep_context *ep;
+	struct ipahal_reg_ep_cfg_status ep_status;
+	unsigned long gsi_dev_hdl;
+	enum gsi_status gsi_res;
+	struct ipa_gsi_ep_config gsi_ep_cfg;
+	struct ipa_gsi_ep_config *gsi_ep_cfg_ptr = &gsi_ep_cfg;
+
+	IPADBG("entry\n");
+	if (params == NULL || out_params == NULL ||
+		!ipa3_is_legal_params(params)) {
+		IPAERR("bad parameters\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(params->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ep->skip_ep_cfg = params->skip_ep_cfg;
+	ep->valid = 1;
+	ep->client = params->client;
+	ep->client_notify = params->notify;
+	ep->priv = params->priv;
+	ep->keep_ipa_awake = params->keep_ipa_awake;
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &params->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		/* Setting EP status 0 */
+		memset(&ep_status, 0, sizeof(ep_status));
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep_status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	out_params->clnt_hdl = ipa_ep_idx;
+
+	result = ipa3_enable_data_path(out_params->clnt_hdl);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+				out_params->clnt_hdl);
+		goto ipa_cfg_ep_fail;
+	}
+
+	gsi_dev_hdl = ipa3_ctx->gsi_dev_hdl;
+	gsi_res = gsi_alloc_evt_ring(&params->evt_ring_params, gsi_dev_hdl,
+		&ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error allocating event ring: %d\n", gsi_res);
+		result = -EFAULT;
+		goto ipa_cfg_ep_fail;
+	}
+
+	gsi_res = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl,
+		params->evt_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing event ring scratch: %d\n", gsi_res);
+		result = -EFAULT;
+		goto write_evt_scratch_fail;
+	}
+
+	memset(gsi_ep_cfg_ptr, 0, sizeof(struct ipa_gsi_ep_config));
+	gsi_ep_cfg_ptr = ipa_get_gsi_ep_info(ipa_ep_idx);
+	params->chan_params.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	params->chan_params.ch_id = gsi_ep_cfg_ptr->ipa_gsi_chan_num;
+	gsi_res = gsi_alloc_channel(&params->chan_params, gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error allocating channel: %d, chan_id: %d\n", gsi_res,
+			params->chan_params.ch_id);
+		result = -EFAULT;
+		goto write_evt_scratch_fail;
+	}
+
+	memcpy(&ep->chan_scratch, &params->chan_scratch,
+		sizeof(union __packed gsi_channel_scratch));
+	ep->chan_scratch.xdci.max_outstanding_tre =
+		params->chan_params.re_size * gsi_ep_cfg_ptr->ipa_if_tlv;
+	gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+		params->chan_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing channel scratch: %d\n", gsi_res);
+		result = -EFAULT;
+		goto write_chan_scratch_fail;
+	}
+
+	gsi_res = gsi_query_channel_db_addr(ep->gsi_chan_hdl,
+		&out_params->db_reg_phs_addr_lsb,
+		&out_params->db_reg_phs_addr_msb);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error querying channel DB registers addresses: %d\n",
+			gsi_res);
+		result = -EFAULT;
+		goto write_chan_scratch_fail;
+	}
+
+	ep->gsi_mem_info.evt_ring_len = params->evt_ring_params.ring_len;
+	ep->gsi_mem_info.evt_ring_base_addr =
+		params->evt_ring_params.ring_base_addr;
+	ep->gsi_mem_info.evt_ring_base_vaddr =
+		params->evt_ring_params.ring_base_vaddr;
+	ep->gsi_mem_info.chan_ring_len = params->chan_params.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		params->chan_params.ring_base_addr;
+	ep->gsi_mem_info.chan_ring_base_vaddr =
+		params->chan_params.ring_base_vaddr;
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(params->client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("client %d (ep: %d) connected\n", params->client, ipa_ep_idx);
+	IPADBG("exit\n");
+
+	return 0;
+
+write_chan_scratch_fail:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+write_evt_scratch_fail:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+ipa_cfg_ep_fail:
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+fail:
+	return result;
+}
+
+int ipa3_set_usb_max_packet_size(
+	enum ipa_usb_max_usb_packet_size usb_max_packet_size)
+{
+	struct gsi_device_scratch dev_scratch;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&dev_scratch, 0, sizeof(struct gsi_device_scratch));
+	dev_scratch.mhi_base_chan_idx_valid = false;
+	dev_scratch.max_usb_pkt_size_valid = true;
+	dev_scratch.max_usb_pkt_size = usb_max_packet_size;
+
+	gsi_res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
+		&dev_scratch);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error writing device scratch: %d\n", gsi_res);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG("exit\n");
+	return 0;
+}
+
+int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes  ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+		xferrscidx < 0 || xferrscidx > IPA_XFER_RSC_IDX_MAX) {
+		IPAERR("Bad parameters.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	if (xferrscidx_valid) {
+		ep->chan_scratch.xdci.xferrscidx = xferrscidx;
+		gsi_res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+			ep->chan_scratch);
+		if (gsi_res != GSI_STATUS_SUCCESS) {
+			IPAERR("Error writing channel scratch: %d\n", gsi_res);
+			goto write_chan_scratch_fail;
+		}
+	}
+	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error starting channel: %d\n", gsi_res);
+		goto write_chan_scratch_fail;
+	}
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+write_chan_scratch_fail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+static int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
+	unsigned long chan_hdl)
+{
+	enum gsi_status gsi_res;
+
+	memset(gsi_chan_info, 0, sizeof(struct gsi_chan_info));
+	gsi_res = gsi_query_channel_info(chan_hdl, gsi_chan_info);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error querying channel info: %d\n", gsi_res);
+		return -EFAULT;
+	}
+	if (!gsi_chan_info->evt_valid) {
+		IPAERR("Event info invalid\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+static bool ipa3_is_xdci_channel_with_given_info_empty(
+	struct ipa3_ep_context *ep, struct gsi_chan_info *chan_info)
+{
+	bool is_empty = false;
+
+	if (!IPA_CLIENT_IS_CONS(ep->client)) {
+		/* For UL channel: chan.RP == chan.WP */
+		is_empty = (chan_info->rp == chan_info->wp);
+	} else {
+		/* For DL channel: */
+		if (chan_info->wp !=
+		    (ep->gsi_mem_info.chan_ring_base_addr +
+		     ep->gsi_mem_info.chan_ring_len -
+		     GSI_CHAN_RE_SIZE_16B)) {
+			/*  if chan.WP != LINK TRB: chan.WP == evt.RP */
+			is_empty = (chan_info->wp == chan_info->evt_rp);
+		} else {
+			/*
+			 * if chan.WP == LINK TRB: chan.base_xfer_ring_addr
+			 * == evt.RP
+			 */
+			is_empty = (ep->gsi_mem_info.chan_ring_base_addr ==
+				chan_info->evt_rp);
+		}
+	}
+
+	return is_empty;
+}
+
+static int ipa3_is_xdci_channel_empty(struct ipa3_ep_context *ep,
+	bool *is_empty)
+{
+	struct gsi_chan_info chan_info;
+	int res;
+
+	if (!ep || !is_empty || !ep->valid) {
+		IPAERR("Input Error\n");
+		return -EFAULT;
+	}
+
+	res = ipa3_get_gsi_chan_info(&chan_info, ep->gsi_chan_hdl);
+	if (res) {
+		IPAERR("Failed to get GSI channel info\n");
+		return -EFAULT;
+	}
+
+	*is_empty = ipa3_is_xdci_channel_with_given_info_empty(ep, &chan_info);
+
+	return 0;
+}
+
+static int ipa3_enable_force_clear(u32 request_id, bool throttle_source,
+	u32 source_pipe_bitmask)
+{
+	struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+	int result;
+
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	req.source_pipe_bitmask = source_pipe_bitmask;
+	if (throttle_source) {
+		req.throttle_source_valid = 1;
+		req.throttle_source = 1;
+	}
+	result = ipa3_qmi_enable_force_clear_datapath_send(&req);
+	if (result) {
+		IPAERR("ipa3_qmi_enable_force_clear_datapath_send failed %d\n",
+			result);
+		return result;
+	}
+
+	return 0;
+}
+
+static int ipa3_disable_force_clear(u32 request_id)
+{
+	struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+	int result;
+
+	memset(&req, 0, sizeof(req));
+	req.request_id = request_id;
+	result = ipa3_qmi_disable_force_clear_datapath_send(&req);
+	if (result) {
+		IPAERR("ipa3_qmi_disable_force_clear_datapath_send failed %d\n",
+			result);
+		return result;
+	}
+
+	return 0;
+}
+
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_channel(u32 clnt_hdl, bool *stop_in_proc)
+{
+	int res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+		!stop_in_proc) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	res = ipa3_stop_gsi_channel(clnt_hdl);
+	if (res != 0 && res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPAERR("xDCI stop channel failed res=%d\n", res);
+		return -EFAULT;
+	}
+
+	if (res)
+		*stop_in_proc = true;
+	else
+		*stop_in_proc = false;
+
+	IPADBG("xDCI channel is %s (result=%d)\n",
+		res ? "STOP_IN_PROC/TimeOut" : "STOP", res);
+
+	IPADBG("exit\n");
+	return 0;
+}
+
+/* Clocks should be voted before invoking this function */
+static int ipa3_xdci_stop_gsi_ch_brute_force(u32 clnt_hdl,
+	bool *stop_in_proc)
+{
+	unsigned long jiffies_start;
+	unsigned long jiffies_timeout =
+		msecs_to_jiffies(IPA_CHANNEL_STOP_IN_PROC_TO_MSEC);
+	int res;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 ||
+		!stop_in_proc) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	jiffies_start = jiffies;
+	while (1) {
+		res = ipa3_xdci_stop_gsi_channel(clnt_hdl,
+			stop_in_proc);
+		if (res) {
+			IPAERR("failed to stop xDCI channel hdl=%d\n",
+				clnt_hdl);
+			return res;
+		}
+
+		if (!*stop_in_proc) {
+			IPADBG("xDCI channel STOP hdl=%d\n", clnt_hdl);
+			return res;
+		}
+
+		/*
+		 * Give chance to the previous stop request to be accomplished
+		 * before the retry
+		 */
+		udelay(IPA_CHANNEL_STOP_IN_PROC_SLEEP_USEC);
+
+		if (time_after(jiffies, jiffies_start + jiffies_timeout)) {
+			IPADBG("timeout waiting for xDCI channel emptiness\n");
+			return res;
+		}
+	}
+}
+
+/* Clocks should be voted for before invoking this function */
+static int ipa3_stop_ul_chan_with_data_drain(u32 qmi_req_id,
+		u32 source_pipe_bitmask, bool should_force_clear, u32 clnt_hdl)
+{
+	int result;
+	bool is_empty = false;
+	int i;
+	bool stop_in_proc;
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* first try to stop the channel */
+	result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+			&stop_in_proc);
+	if (result) {
+		IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		goto exit;
+	}
+	if (!stop_in_proc)
+		goto exit;
+
+	/* if stop_in_proc, lets wait for emptiness */
+	for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+		result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+		if (result)
+			goto exit;
+		if (is_empty)
+			break;
+		udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+	}
+	/* In case of empty, lets try to stop the channel again */
+	if (is_empty) {
+		result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+			&stop_in_proc);
+		if (result) {
+			IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+				clnt_hdl, ep->client);
+			goto exit;
+		}
+		if (!stop_in_proc)
+			goto exit;
+	}
+	/* if still stop_in_proc or not empty, activate force clear */
+	if (should_force_clear) {
+		result = ipa3_enable_force_clear(qmi_req_id, false,
+			source_pipe_bitmask);
+		if (result)
+			goto exit;
+	}
+	/* with force clear, wait for emptiness */
+	for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+		result = ipa3_is_xdci_channel_empty(ep, &is_empty);
+		if (result)
+			goto disable_force_clear_and_exit;
+		if (is_empty)
+			break;
+
+		udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+	}
+	/* try to stop for the last time */
+	result = ipa3_xdci_stop_gsi_ch_brute_force(clnt_hdl,
+		&stop_in_proc);
+	if (result) {
+		IPAERR("fail to stop UL channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		goto disable_force_clear_and_exit;
+	}
+	result = stop_in_proc ? -EFAULT : 0;
+
+disable_force_clear_and_exit:
+	if (should_force_clear)
+		ipa3_disable_force_clear(qmi_req_id);
+exit:
+	return result;
+}
+
+int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id)
+{
+	struct ipa3_ep_context *ep;
+	int result;
+	u32 source_pipe_bitmask = 0;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_disable_data_path(clnt_hdl);
+
+	if (!IPA_CLIENT_IS_CONS(ep->client)) {
+		IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		source_pipe_bitmask = 1 <<
+			ipa3_get_ep_mapping(ep->client);
+		result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+			source_pipe_bitmask, should_force_clear, clnt_hdl);
+		if (result) {
+			IPAERR("Fail to stop UL channel with data drain\n");
+			WARN_ON(1);
+			goto stop_chan_fail;
+		}
+	} else {
+		IPADBG("Stopping CONS channel - hdl=%d clnt=%d\n",
+			clnt_hdl, ep->client);
+		result = ipa3_stop_gsi_channel(clnt_hdl);
+		if (result) {
+			IPAERR("Error stopping channel (CONS client): %d\n",
+				result);
+			goto stop_chan_fail;
+		}
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+stop_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_release_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	gsi_res = gsi_dealloc_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error deallocating channel: %d\n", gsi_res);
+		goto dealloc_chan_fail;
+	}
+
+	gsi_res = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error deallocating event: %d\n", gsi_res);
+		goto dealloc_chan_fail;
+	}
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client))
+		ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+
+	IPADBG("exit\n");
+	return 0;
+
+dealloc_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	bool should_force_clear, u32 qmi_req_id, bool is_dpl)
+{
+	struct ipa3_ep_context *ul_ep, *dl_ep;
+	int result = -EFAULT;
+	u32 source_pipe_bitmask = 0;
+	bool dl_data_pending = true;
+	bool ul_data_pending = true;
+	int i;
+	bool is_empty = false;
+	struct gsi_chan_info ul_gsi_chan_info, dl_gsi_chan_info;
+	int aggr_active_bitmap = 0;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	/* In case of DPL, dl is the DPL channel/client */
+
+	IPADBG("entry\n");
+	if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+		(!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
+	if (!is_dpl)
+		ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	result = ipa3_get_gsi_chan_info(&dl_gsi_chan_info,
+		dl_ep->gsi_chan_hdl);
+	if (result)
+		goto disable_clk_and_exit;
+
+	if (!is_dpl) {
+		result = ipa3_get_gsi_chan_info(&ul_gsi_chan_info,
+			ul_ep->gsi_chan_hdl);
+		if (result)
+			goto disable_clk_and_exit;
+	}
+
+	for (i = 0; i < IPA_POLL_FOR_EMPTINESS_NUM; i++) {
+		if (!dl_data_pending && !ul_data_pending)
+			break;
+		result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+		if (result)
+			goto disable_clk_and_exit;
+		if (!is_empty) {
+			dl_data_pending = true;
+			break;
+		}
+		dl_data_pending = false;
+		if (!is_dpl) {
+			result = ipa3_is_xdci_channel_empty(ul_ep, &is_empty);
+			if (result)
+				goto disable_clk_and_exit;
+			ul_data_pending = !is_empty;
+		} else {
+			ul_data_pending = false;
+		}
+
+		udelay(IPA_POLL_FOR_EMPTINESS_SLEEP_USEC);
+	}
+
+	if (!dl_data_pending) {
+		aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+		if (aggr_active_bitmap & (1 << dl_clnt_hdl)) {
+			IPADBG("DL/DPL data pending due to open aggr. frame\n");
+			dl_data_pending = true;
+		}
+	}
+	if (dl_data_pending) {
+		IPAERR("DL/DPL data pending, can't suspend\n");
+		result = -EFAULT;
+		goto disable_clk_and_exit;
+	}
+
+	/* Suspend the DL/DPL EP */
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_cfg_ctrl.ipa_ep_suspend = true;
+	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+
+	/*
+	 * Check if DL/DPL channel is empty again, data could enter the channel
+	 * before its IPA EP was suspended
+	 */
+	result = ipa3_is_xdci_channel_empty(dl_ep, &is_empty);
+	if (result)
+		goto unsuspend_dl_and_exit;
+	if (!is_empty) {
+		IPAERR("DL/DPL data pending, can't suspend\n");
+		result = -EFAULT;
+		goto unsuspend_dl_and_exit;
+	}
+
+	/* STOP UL channel */
+	if (!is_dpl) {
+		source_pipe_bitmask = 1 << ipa3_get_ep_mapping(ul_ep->client);
+		result = ipa3_stop_ul_chan_with_data_drain(qmi_req_id,
+			source_pipe_bitmask, should_force_clear, ul_clnt_hdl);
+		if (result) {
+			IPAERR("Error stopping UL channel: result = %d\n",
+				result);
+			goto unsuspend_dl_and_exit;
+		}
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+unsuspend_dl_and_exit:
+	/* Unsuspend the DL EP */
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_cfg_ctrl.ipa_ep_suspend = false;
+	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+disable_clk_and_exit:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+	return result;
+}
+
+int ipa3_start_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int result = -EFAULT;
+	enum gsi_status gsi_res;
+
+	IPADBG("entry\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes  ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("Bad parameters.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	gsi_res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (gsi_res != GSI_STATUS_SUCCESS) {
+		IPAERR("Error starting channel: %d\n", gsi_res);
+		goto start_chan_fail;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+
+start_chan_fail:
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	return result;
+}
+
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl)
+{
+	struct ipa3_ep_context *ul_ep, *dl_ep;
+	enum gsi_status gsi_res;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	/* In case of DPL, dl is the DPL channel/client */
+
+	IPADBG("entry\n");
+	if (dl_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[dl_clnt_hdl].valid == 0 ||
+		(!is_dpl && (ul_clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[ul_clnt_hdl].valid == 0))) {
+		IPAERR("Bad parameter.\n");
+		return -EINVAL;
+	}
+
+	dl_ep = &ipa3_ctx->ep[dl_clnt_hdl];
+	if (!is_dpl)
+		ul_ep = &ipa3_ctx->ep[ul_clnt_hdl];
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	/* Unsuspend the DL/DPL EP */
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	ep_cfg_ctrl.ipa_ep_suspend = false;
+	ipa3_cfg_ep_ctrl(dl_clnt_hdl, &ep_cfg_ctrl);
+
+	/* Start UL channel */
+	if (!is_dpl) {
+		gsi_res = gsi_start_channel(ul_ep->gsi_chan_hdl);
+		if (gsi_res != GSI_STATUS_SUCCESS)
+			IPAERR("Error starting UL channel: %d\n", gsi_res);
+	}
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(dl_clnt_hdl));
+
+	IPADBG("exit\n");
+	return 0;
+}
+/**
+ * ipa3_clear_endpoint_delay() - Remove ep delay set on the IPA pipe before
+ * client disconnect.
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Should be called by the driver of the peripheral that wants to remove
+ * ep delay on IPA consumer ipe before disconnect in BAM-BAM mode. this api
+ * expects caller to take responsibility to free any needed headers, routing
+ * and filtering tables and rules as needed.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_clear_endpoint_delay(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	struct ipa_ep_cfg_ctrl ep_ctrl = {0};
+	struct ipa_enable_force_clear_datapath_req_msg_v01 req = {0};
+	int res;
+
+	if (unlikely(!ipa3_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ipa3_ctx->tethered_flow_control) {
+		IPADBG("APPS flow control is not enabled\n");
+		/* Send a message to modem to disable flow control honoring. */
+		req.request_id = clnt_hdl;
+		req.source_pipe_bitmask = 1 << clnt_hdl;
+		res = ipa3_qmi_enable_force_clear_datapath_send(&req);
+		if (res) {
+			IPADBG("enable_force_clear_datapath failed %d\n",
+				res);
+		}
+		ep->qmi_request_sent = true;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	/* Set disconnect in progress flag so further flow control events are
+	 * not honored.
+	 */
+	spin_lock(&ipa3_ctx->disconnect_lock);
+	ep->disconnect_in_progress = true;
+	spin_unlock(&ipa3_ctx->disconnect_lock);
+
+	/* If flow is disabled at this point, restore the ep state.*/
+	ep_ctrl.ipa_ep_delay = false;
+	ep_ctrl.ipa_ep_suspend = false;
+	ipa3_cfg_ep_ctrl(clnt_hdl, &ep_ctrl);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) removed ep delay\n", clnt_hdl);
+
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
new file mode 100644
index 0000000..5912d3f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -0,0 +1,2145 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include "ipa_i.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_MAX_MSG_LEN 4096
+#define IPA_DBG_MAX_RULE_IN_TBL 128
+#define IPA_DBG_ACTIVE_CLIENT_BUF_SIZE ((IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN \
+	* IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES) + IPA_MAX_MSG_LEN)
+
+#define IPA_DUMP_STATUS_FIELD(f) \
+	pr_err(#f "=0x%x\n", status->f)
+
+const char *ipa3_excp_name[] = {
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD0),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_RSVD1),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IHL),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_TAG),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_NAT),
+	__stringify_1(IPA_A5_MUX_HDR_EXCP_FLAG_IP),
+};
+
+const char *ipa3_event_name[] = {
+	__stringify(WLAN_CLIENT_CONNECT),
+	__stringify(WLAN_CLIENT_DISCONNECT),
+	__stringify(WLAN_CLIENT_POWER_SAVE_MODE),
+	__stringify(WLAN_CLIENT_NORMAL_MODE),
+	__stringify(SW_ROUTING_ENABLE),
+	__stringify(SW_ROUTING_DISABLE),
+	__stringify(WLAN_AP_CONNECT),
+	__stringify(WLAN_AP_DISCONNECT),
+	__stringify(WLAN_STA_CONNECT),
+	__stringify(WLAN_STA_DISCONNECT),
+	__stringify(WLAN_CLIENT_CONNECT_EX),
+	__stringify(WLAN_SWITCH_TO_SCC),
+	__stringify(WLAN_SWITCH_TO_MCC),
+	__stringify(WLAN_WDI_ENABLE),
+	__stringify(WLAN_WDI_DISABLE),
+	__stringify(WAN_UPSTREAM_ROUTE_ADD),
+	__stringify(WAN_UPSTREAM_ROUTE_DEL),
+	__stringify(WAN_EMBMS_CONNECT),
+	__stringify(WAN_XLAT_CONNECT),
+	__stringify(ECM_CONNECT),
+	__stringify(ECM_DISCONNECT),
+	__stringify(IPA_TETHERING_STATS_UPDATE_STATS),
+	__stringify(IPA_TETHERING_STATS_UPDATE_NETWORK_STATS),
+};
+
+const char *ipa3_hdr_l2_type_name[] = {
+	__stringify(IPA_HDR_L2_NONE),
+	__stringify(IPA_HDR_L2_ETHERNET_II),
+	__stringify(IPA_HDR_L2_802_3),
+};
+
+const char *ipa3_hdr_proc_type_name[] = {
+	__stringify(IPA_HDR_PROC_NONE),
+	__stringify(IPA_HDR_PROC_ETHII_TO_ETHII),
+	__stringify(IPA_HDR_PROC_ETHII_TO_802_3),
+	__stringify(IPA_HDR_PROC_802_3_TO_ETHII),
+	__stringify(IPA_HDR_PROC_802_3_TO_802_3),
+};
+
+static struct dentry *dent;
+static struct dentry *dfile_gen_reg;
+static struct dentry *dfile_ep_reg;
+static struct dentry *dfile_keep_awake;
+static struct dentry *dfile_ep_holb;
+static struct dentry *dfile_hdr;
+static struct dentry *dfile_proc_ctx;
+static struct dentry *dfile_ip4_rt;
+static struct dentry *dfile_ip4_rt_hw;
+static struct dentry *dfile_ip6_rt;
+static struct dentry *dfile_ip6_rt_hw;
+static struct dentry *dfile_ip4_flt;
+static struct dentry *dfile_ip4_flt_hw;
+static struct dentry *dfile_ip6_flt;
+static struct dentry *dfile_ip6_flt_hw;
+static struct dentry *dfile_stats;
+static struct dentry *dfile_wstats;
+static struct dentry *dfile_wdi_stats;
+static struct dentry *dfile_ntn_stats;
+static struct dentry *dfile_dbg_cnt;
+static struct dentry *dfile_msg;
+static struct dentry *dfile_ip4_nat;
+static struct dentry *dfile_rm_stats;
+static struct dentry *dfile_status_stats;
+static struct dentry *dfile_active_clients;
+static char dbg_buff[IPA_MAX_MSG_LEN];
+static char *active_clients_buf;
+
+static s8 ep_reg_idx;
+
+
+static ssize_t ipa3_read_gen_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	struct ipahal_reg_shared_mem_size smem_sz;
+
+	memset(&smem_sz, 0, sizeof(smem_sz));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_VERSION=0x%x\n"
+			"IPA_COMP_HW_VERSION=0x%x\n"
+			"IPA_ROUTE=0x%x\n"
+			"IPA_SHARED_MEM_RESTRICTED=0x%x\n"
+			"IPA_SHARED_MEM_SIZE=0x%x\n",
+			ipahal_read_reg(IPA_VERSION),
+			ipahal_read_reg(IPA_COMP_HW_VERSION),
+			ipahal_read_reg(IPA_ROUTE),
+			smem_sz.shared_mem_baddr,
+			smem_sz.shared_mem_sz);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_write_ep_holb(struct file *file,
+		const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ipa_ep_cfg_holb holb;
+	u32 en;
+	u32 tmr_val;
+	u32 ep_idx;
+	unsigned long missing;
+	char *sptr, *token;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+
+	sptr = dbg_buff;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &ep_idx))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &en))
+		return -EINVAL;
+
+	token = strsep(&sptr, " ");
+	if (!token)
+		return -EINVAL;
+	if (kstrtou32(token, 0, &tmr_val))
+		return -EINVAL;
+
+	holb.en = en;
+	holb.tmr_val = tmr_val;
+
+	ipa3_cfg_ep_holb(ep_idx, &holb);
+
+	return count;
+}
+
+static ssize_t ipa3_write_ep_reg(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("bad pipe specified %u\n", option);
+		return count;
+	}
+
+	ep_reg_idx = option;
+
+	return count;
+}
+
+/**
+ * _ipa_read_ep_reg_v3_0() - Reads and prints endpoint configuration registers
+ *
+ * Returns the number of characters printed
+ */
+int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe)
+{
+	return scnprintf(
+		dbg_buff, IPA_MAX_MSG_LEN,
+		"IPA_ENDP_INIT_NAT_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_%u=0x%x\n"
+		"IPA_ENDP_INIT_HDR_EXT_%u=0x%x\n"
+		"IPA_ENDP_INIT_MODE_%u=0x%x\n"
+		"IPA_ENDP_INIT_AGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_ROUTE_%u=0x%x\n"
+		"IPA_ENDP_INIT_CTRL_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_EN_%u=0x%x\n"
+		"IPA_ENDP_INIT_HOL_TIMER_%u=0x%x\n"
+		"IPA_ENDP_INIT_DEAGGR_%u=0x%x\n"
+		"IPA_ENDP_INIT_CFG_%u=0x%x\n",
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_NAT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HDR_EXT_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_MODE_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_AGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_ROUTE_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CTRL_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_EN_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_DEAGGR_n, pipe),
+		pipe, ipahal_read_reg_n(IPA_ENDP_INIT_CFG_n, pipe));
+}
+
+static ssize_t ipa3_read_ep_reg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int start_idx;
+	int end_idx;
+	int size = 0;
+	int ret;
+	loff_t pos;
+
+	/* negative ep_reg_idx means all registers */
+	if (ep_reg_idx < 0) {
+		start_idx = 0;
+		end_idx = ipa3_ctx->ipa_num_pipes;
+	} else {
+		start_idx = ep_reg_idx;
+		end_idx = start_idx + 1;
+	}
+	pos = *ppos;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	for (i = start_idx; i < end_idx; i++) {
+
+		nbytes = ipa3_ctx->ctrl->ipa3_read_ep_reg(dbg_buff,
+				IPA_MAX_MSG_LEN, i);
+
+		*ppos = pos;
+		ret = simple_read_from_buffer(ubuf, count, ppos, dbg_buff,
+					      nbytes);
+		if (ret < 0) {
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return ret;
+		}
+
+		size += ret;
+		ubuf += nbytes;
+		count -= nbytes;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	*ppos = pos + size;
+	return size;
+}
+
+static ssize_t ipa3_write_keep_awake(struct file *file, const char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option == 1)
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	else if (option == 0)
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	else
+		return -EFAULT;
+
+	return count;
+}
+
+static ssize_t ipa3_read_keep_awake(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	int nbytes;
+
+	ipa3_active_clients_lock();
+	if (ipa3_ctx->ipa3_active_clients.cnt)
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA APPS power state is ON\n");
+	else
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"IPA APPS power state is OFF\n");
+	ipa3_active_clients_unlock();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_hdr(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int nbytes = 0;
+	int i = 0;
+	struct ipa3_hdr_entry *entry;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ipa3_ctx->hdr_tbl_lcl)
+		pr_err("Table resides on local memory\n");
+	else
+		pr_err("Table resides on system (ddr) memory\n");
+
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		nbytes = scnprintf(
+			dbg_buff,
+			IPA_MAX_MSG_LEN,
+			"name:%s len=%d ref=%d partial=%d type=%s ",
+			entry->name,
+			entry->hdr_len,
+			entry->ref_cnt,
+			entry->is_partial,
+			ipa3_hdr_l2_type_name[entry->type]);
+
+		if (entry->is_hdr_proc_ctx) {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"phys_base=0x%pa ",
+				&entry->phys_base);
+		} else {
+			nbytes += scnprintf(
+				dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"ofst=%u ",
+				entry->offset_entry->offset >> 2);
+		}
+		for (i = 0; i < entry->hdr_len; i++) {
+			scnprintf(dbg_buff + nbytes + i * 2,
+				  IPA_MAX_MSG_LEN - nbytes - i * 2,
+				  "%02x", entry->hdr[i]);
+		}
+		scnprintf(dbg_buff + nbytes + entry->hdr_len * 2,
+			  IPA_MAX_MSG_LEN - nbytes - entry->hdr_len * 2,
+			  "\n");
+		pr_err("%s", dbg_buff);
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+static int ipa3_attrib_dump(struct ipa_rule_attrib *attrib,
+		enum ipa_ip_type ip)
+{
+	uint32_t addr[4];
+	uint32_t mask[4];
+	int i;
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+		pr_err("tos_value:%d ", attrib->tos_value);
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED)
+		pr_err("tos_mask:%d ", attrib->tos_mask);
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL)
+		pr_err("protocol:%d ", attrib->u.v4.protocol);
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.src_addr);
+			mask[0] = htonl(attrib->u.v4.src_addr_mask);
+			pr_err(
+					"src_addr:%pI4 src_addr_mask:%pI4 ",
+					addr + 0, mask + 0);
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.src_addr[i]);
+				mask[i] = htonl(attrib->u.v6.src_addr_mask[i]);
+			}
+			pr_err(
+					   "src_addr:%pI6 src_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (ip == IPA_IP_v4) {
+			addr[0] = htonl(attrib->u.v4.dst_addr);
+			mask[0] = htonl(attrib->u.v4.dst_addr_mask);
+			pr_err(
+					   "dst_addr:%pI4 dst_addr_mask:%pI4 ",
+					   addr + 0, mask + 0);
+		} else if (ip == IPA_IP_v6) {
+			for (i = 0; i < 4; i++) {
+				addr[i] = htonl(attrib->u.v6.dst_addr[i]);
+				mask[i] = htonl(attrib->u.v6.dst_addr_mask[i]);
+			}
+			pr_err(
+					   "dst_addr:%pI6 dst_addr_mask:%pI6 ",
+					   addr + 0, mask + 0);
+		} else {
+			WARN_ON(1);
+		}
+	}
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		pr_err("src_port_range:%u %u ",
+				   attrib->src_port_lo,
+			     attrib->src_port_hi);
+	}
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		pr_err("dst_port_range:%u %u ",
+				   attrib->dst_port_lo,
+			     attrib->dst_port_hi);
+	}
+	if (attrib->attrib_mask & IPA_FLT_TYPE)
+		pr_err("type:%d ", attrib->type);
+
+	if (attrib->attrib_mask & IPA_FLT_CODE)
+		pr_err("code:%d ", attrib->code);
+
+	if (attrib->attrib_mask & IPA_FLT_SPI)
+		pr_err("spi:%x ", attrib->spi);
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT)
+		pr_err("src_port:%u ", attrib->src_port);
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT)
+		pr_err("dst_port:%u ", attrib->dst_port);
+
+	if (attrib->attrib_mask & IPA_FLT_TC)
+		pr_err("tc:%d ", attrib->u.v6.tc);
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL)
+		pr_err("flow_label:%x ", attrib->u.v6.flow_label);
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR)
+		pr_err("next_hdr:%d ", attrib->u.v6.next_hdr);
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		pr_err(
+				   "metadata:%x metadata_mask:%x ",
+				   attrib->meta_data, attrib->meta_data_mask);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		pr_err("frg ");
+
+	if ((attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3)) {
+		pr_err("src_mac_addr:%pM ", attrib->src_mac_addr);
+	}
+
+	if ((attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) ||
+		(attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3)) {
+		pr_err("dst_mac_addr:%pM ", attrib->dst_mac_addr);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE)
+		pr_err("ether_type:%x ", attrib->ether_type);
+
+	pr_err("\n");
+	return 0;
+}
+
+static int ipa3_attrib_dump_eq(struct ipa_ipfltri_rule_eq *attrib)
+{
+	uint8_t addr[16];
+	uint8_t mask[16];
+	int i;
+	int j;
+
+	if (attrib->tos_eq_present)
+		pr_err("tos_value:%d ", attrib->tos_eq);
+
+	if (attrib->protocol_eq_present)
+		pr_err("protocol:%d ", attrib->protocol_eq);
+
+	if (attrib->tc_eq_present)
+		pr_err("tc:%d ", attrib->tc_eq);
+
+	for (i = 0; i < attrib->num_offset_meq_128; i++) {
+		for (j = 0; j < 16; j++) {
+			addr[j] = attrib->offset_meq_128[i].value[j];
+			mask[j] = attrib->offset_meq_128[i].mask[j];
+		}
+		pr_err(
+			"(ofst_meq128: ofst:%d mask:%pI6 val:%pI6) ",
+			attrib->offset_meq_128[i].offset,
+			mask, addr);
+	}
+
+	for (i = 0; i < attrib->num_offset_meq_32; i++)
+		pr_err(
+			   "(ofst_meq32: ofst:%u mask:0x%x val:0x%x) ",
+			   attrib->offset_meq_32[i].offset,
+			   attrib->offset_meq_32[i].mask,
+			   attrib->offset_meq_32[i].value);
+
+	for (i = 0; i < attrib->num_ihl_offset_meq_32; i++)
+		pr_err(
+			"(ihl_ofst_meq32: ofts:%d mask:0x%x val:0x%x) ",
+			attrib->ihl_offset_meq_32[i].offset,
+			attrib->ihl_offset_meq_32[i].mask,
+			attrib->ihl_offset_meq_32[i].value);
+
+	if (attrib->metadata_meq32_present)
+		pr_err(
+			"(metadata: ofst:%u mask:0x%x val:0x%x) ",
+			attrib->metadata_meq32.offset,
+			attrib->metadata_meq32.mask,
+			attrib->metadata_meq32.value);
+
+	for (i = 0; i < attrib->num_ihl_offset_range_16; i++)
+		pr_err(
+			   "(ihl_ofst_range16: ofst:%u lo:%u hi:%u) ",
+			   attrib->ihl_offset_range_16[i].offset,
+			   attrib->ihl_offset_range_16[i].range_low,
+			   attrib->ihl_offset_range_16[i].range_high);
+
+	if (attrib->ihl_offset_eq_32_present)
+		pr_err(
+			"(ihl_ofst_eq32:%d val:0x%x) ",
+			attrib->ihl_offset_eq_32.offset,
+			attrib->ihl_offset_eq_32.value);
+
+	if (attrib->ihl_offset_eq_16_present)
+		pr_err(
+			"(ihl_ofst_eq16:%d val:0x%x) ",
+			attrib->ihl_offset_eq_16.offset,
+			attrib->ihl_offset_eq_16.value);
+
+	if (attrib->fl_eq_present)
+		pr_err("flow_label:%d ", attrib->fl_eq);
+
+	if (attrib->ipv4_frag_eq_present)
+		pr_err("frag ");
+
+	pr_err("\n");
+	return 0;
+}
+
+static int ipa3_open_dbg(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t ipa3_read_rt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int i = 0;
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_entry *entry;
+	struct ipa3_rt_tbl_set *set;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	u32 ofst;
+	u32 ofst_words;
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ip ==  IPA_IP_v6) {
+		if (ipa3_ctx->ip6_rt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip6_rt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	} else if (ip == IPA_IP_v4) {
+		if (ipa3_ctx->ip4_rt_tbl_hash_lcl)
+			pr_err("Hashable table resides on local memory\n");
+		else
+			pr_err("Hashable table resides on system (ddr) memory\n");
+		if (ipa3_ctx->ip4_rt_tbl_nhash_lcl)
+			pr_err("Non-Hashable table resides on local memory\n");
+		else
+			pr_err("Non-Hashable table resides on system (ddr) memory\n");
+	}
+
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+			if (entry->proc_ctx) {
+				ofst = entry->proc_ctx->offset_entry->offset;
+				ofst_words =
+					(ofst +
+					ipa3_ctx->hdr_proc_ctx_tbl.start_offset)
+					>> 5;
+
+				pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt);
+				pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+					i, entry->rule.dst,
+					ipa3_get_ep_mapping(entry->rule.dst),
+					!ipa3_ctx->hdr_proc_ctx_tbl_lcl);
+				pr_err("proc_ctx[32B]:%u attrib_mask:%08x ",
+					ofst_words,
+					entry->rule.attrib.attrib_mask);
+				pr_err("rule_id:%u max_prio:%u prio:%u ",
+					entry->rule_id, entry->rule.max_prio,
+					entry->prio);
+				pr_err("hashable:%u retain_hdr:%u ",
+					entry->rule.hashable,
+					entry->rule.retain_hdr);
+			} else {
+				if (entry->hdr)
+					ofst = entry->hdr->offset_entry->offset;
+				else
+					ofst = 0;
+
+				pr_err("tbl_idx:%d tbl_name:%s tbl_ref:%u ",
+					entry->tbl->idx, entry->tbl->name,
+					entry->tbl->ref_cnt);
+				pr_err("rule_idx:%d dst:%d ep:%d S:%u ",
+					i, entry->rule.dst,
+					ipa3_get_ep_mapping(entry->rule.dst),
+					!ipa3_ctx->hdr_tbl_lcl);
+				pr_err("hdr_ofst[words]:%u attrib_mask:%08x ",
+					ofst >> 2,
+					entry->rule.attrib.attrib_mask);
+				pr_err("rule_id:%u max_prio:%u prio:%u ",
+					entry->rule_id, entry->rule.max_prio,
+					entry->prio);
+				pr_err("hashable:%u retain_hdr:%u ",
+					entry->rule.hashable,
+					entry->rule.retain_hdr);
+			}
+
+			ipa3_attrib_dump(&entry->rule.attrib, ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+static ssize_t ipa3_read_rt_hw(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	int tbls_num;
+	int rules_num;
+	int tbl;
+	int rl;
+	int res = 0;
+	struct ipahal_rt_rule_entry *rules = NULL;
+
+	switch (ip) {
+	case IPA_IP_v4:
+		tbls_num = IPA_MEM_PART(v4_rt_num_index);
+		break;
+	case IPA_IP_v6:
+		tbls_num = IPA_MEM_PART(v6_rt_num_index);
+		break;
+	default:
+		IPAERR("ip type error %d\n", ip);
+		return -EINVAL;
+	};
+
+	IPADBG("Tring to parse %d H/W routing tables - IP=%d\n", tbls_num, ip);
+
+	rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+	if (!rules) {
+		IPAERR("failed to allocate mem for tbl rules\n");
+		return -ENOMEM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	mutex_lock(&ipa3_ctx->lock);
+
+	for (tbl = 0 ; tbl < tbls_num ; tbl++) {
+		pr_err("=== Routing Table %d = Hashable Rules ===\n", tbl);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_rt_read_tbl_from_hw(tbl, ip, true, rules,
+			&rules_num);
+		if (res) {
+			pr_err("ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			pr_err("-->No rules. Empty tbl or modem system table\n");
+
+		for (rl = 0 ; rl < rules_num ; rl++) {
+			pr_err("rule_idx:%d dst ep:%d L:%u ",
+				rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl);
+
+			if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+				pr_err("proc_ctx:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+			else
+				pr_err("hdr_ofst:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+
+			pr_err("rule_id:%u prio:%u retain_hdr:%u ",
+				rules[rl].id, rules[rl].priority,
+				rules[rl].retain_hdr);
+			ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+		}
+
+		pr_err("=== Routing Table %d = Non-Hashable Rules ===\n", tbl);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_rt_read_tbl_from_hw(tbl, ip, false, rules,
+			&rules_num);
+		if (res) {
+			pr_err("ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			pr_err("-->No rules. Empty tbl or modem system table\n");
+
+		for (rl = 0 ; rl < rules_num ; rl++) {
+			pr_err("rule_idx:%d dst ep:%d L:%u ",
+				rl, rules[rl].dst_pipe_idx, rules[rl].hdr_lcl);
+
+			if (rules[rl].hdr_type == IPAHAL_RT_RULE_HDR_PROC_CTX)
+				pr_err("proc_ctx:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+			else
+				pr_err("hdr_ofst:%u attrib_mask:%08x ",
+					rules[rl].hdr_ofst,
+					rules[rl].eq_attrib.rule_eq_bitmap);
+
+			pr_err("rule_id:%u prio:%u retain_hdr:%u\n",
+				rules[rl].id, rules[rl].priority,
+				rules[rl].retain_hdr);
+			ipa3_attrib_dump_eq(&rules[rl].eq_attrib);
+		}
+		pr_err("\n");
+	}
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	kfree(rules);
+	return res;
+}
+
+static ssize_t ipa3_read_proc_ctx(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+	struct ipa3_hdr_proc_ctx_tbl *tbl;
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	u32 ofst_words;
+
+	tbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl)
+		pr_info("Table resides on local memory\n");
+	else
+		pr_info("Table resides on system(ddr) memory\n");
+
+	list_for_each_entry(entry, &tbl->head_proc_ctx_entry_list, link) {
+		ofst_words = (entry->offset_entry->offset +
+			ipa3_ctx->hdr_proc_ctx_tbl.start_offset)
+			>> 5;
+		if (entry->hdr->is_hdr_proc_ctx) {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+				entry->id,
+				ipa3_hdr_proc_type_name[entry->type],
+				ofst_words);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"hdr_phys_base:0x%pa\n",
+				&entry->hdr->phys_base);
+		} else {
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"id:%u hdr_proc_type:%s proc_ctx[32B]:%u ",
+				entry->id,
+				ipa3_hdr_proc_type_name[entry->type],
+				ofst_words);
+			nbytes += scnprintf(dbg_buff + nbytes,
+				IPA_MAX_MSG_LEN - nbytes,
+				"hdr[words]:%u\n",
+				entry->hdr->offset_entry->offset >> 2);
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
+		loff_t *ppos)
+{
+	int i;
+	int j;
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_flt_entry *entry;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	struct ipa3_rt_tbl *rt_tbl;
+	u32 rt_tbl_idx;
+	u32 bitmap;
+	bool eq;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	for (j = 0; j < ipa3_ctx->ipa_num_pipes; j++) {
+		if (!ipa_is_ep_support_flt(j))
+			continue;
+		tbl = &ipa3_ctx->flt_tbl[j][ip];
+		i = 0;
+		list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+			if (entry->rule.eq_attrib_type) {
+				rt_tbl_idx = entry->rule.rt_tbl_idx;
+				bitmap = entry->rule.eq_attrib.rule_eq_bitmap;
+				eq = true;
+			} else {
+				rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl);
+				if (rt_tbl)
+					rt_tbl_idx = rt_tbl->idx;
+				else
+					rt_tbl_idx = ~0;
+				bitmap = entry->rule.attrib.attrib_mask;
+				eq = false;
+			}
+			pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				j, i, entry->rule.action, rt_tbl_idx);
+			pr_err("attrib_mask:%08x retain_hdr:%d eq:%d ",
+				bitmap, entry->rule.retain_hdr, eq);
+			pr_err("hashable:%u rule_id:%u max_prio:%u prio:%u ",
+				entry->rule.hashable, entry->rule_id,
+				entry->rule.max_prio, entry->prio);
+			if (eq)
+				ipa3_attrib_dump_eq(
+					&entry->rule.eq_attrib);
+			else
+				ipa3_attrib_dump(
+					&entry->rule.attrib, ip);
+			i++;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+static ssize_t ipa3_read_flt_hw(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	int pipe;
+	int rl;
+	int rules_num;
+	struct ipahal_flt_rule_entry *rules;
+	enum ipa_ip_type ip = (enum ipa_ip_type)file->private_data;
+	u32 rt_tbl_idx;
+	u32 bitmap;
+	int res = 0;
+
+	IPADBG("Tring to parse %d H/W filtering tables - IP=%d\n",
+		ipa3_ctx->ep_flt_num, ip);
+
+	rules = kzalloc(sizeof(*rules) * IPA_DBG_MAX_RULE_IN_TBL, GFP_KERNEL);
+	if (!rules) {
+		IPAERR("failed to allocate mem for tbl rules\n");
+		return -ENOMEM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	mutex_lock(&ipa3_ctx->lock);
+	for (pipe = 0; pipe < ipa3_ctx->ipa_num_pipes; pipe++) {
+		if (!ipa_is_ep_support_flt(pipe))
+			continue;
+		pr_err("=== Filtering Table ep:%d = Hashable Rules ===\n",
+			pipe);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_flt_read_tbl_from_hw(pipe, ip, true, rules,
+			&rules_num);
+		if (res) {
+			pr_err("ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			pr_err("-->No rules. Empty tbl or modem sys table\n");
+
+		for (rl = 0; rl < rules_num; rl++) {
+			rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+			bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
+			pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				pipe, rl, rules[rl].rule.action, rt_tbl_idx);
+			pr_err("attrib_mask:%08x retain_hdr:%d ",
+				bitmap, rules[rl].rule.retain_hdr);
+			pr_err("rule_id:%u prio:%u ",
+				rules[rl].id, rules[rl].priority);
+			ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+		}
+
+		pr_err("=== Filtering Table ep:%d = Non-Hashable Rules ===\n",
+			pipe);
+		rules_num = IPA_DBG_MAX_RULE_IN_TBL;
+		res = ipa3_flt_read_tbl_from_hw(pipe, ip, false, rules,
+			&rules_num);
+		if (res) {
+			pr_err("ERROR - Check the logs\n");
+			IPAERR("failed reading tbl from hw\n");
+			goto bail;
+		}
+		if (!rules_num)
+			pr_err("-->No rules. Empty tbl or modem sys table\n");
+		for (rl = 0; rl < rules_num; rl++) {
+			rt_tbl_idx = rules[rl].rule.rt_tbl_idx;
+			bitmap = rules[rl].rule.eq_attrib.rule_eq_bitmap;
+			pr_err("ep_idx:%d rule_idx:%d act:%d rt_tbl_idx:%d ",
+				pipe, rl, rules[rl].rule.action, rt_tbl_idx);
+			pr_err("attrib_mask:%08x retain_hdr:%d ",
+				bitmap, rules[rl].rule.retain_hdr);
+			pr_err("rule_id:%u  prio:%u ",
+				rules[rl].id, rules[rl].priority);
+			ipa3_attrib_dump_eq(&rules[rl].rule.eq_attrib);
+		}
+		pr_err("\n");
+	}
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	kfree(rules);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int i;
+	int cnt = 0;
+	uint connect = 0;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++)
+		connect |= (ipa3_ctx->ep[i].valid << i);
+
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+		"sw_tx=%u\n"
+		"hw_tx=%u\n"
+		"tx_non_linear=%u\n"
+		"tx_compl=%u\n"
+		"wan_rx=%u\n"
+		"stat_compl=%u\n"
+		"lan_aggr_close=%u\n"
+		"wan_aggr_close=%u\n"
+		"act_clnt=%u\n"
+		"con_clnt_bmap=0x%x\n"
+		"wan_rx_empty=%u\n"
+		"wan_repl_rx_empty=%u\n"
+		"lan_rx_empty=%u\n"
+		"lan_repl_rx_empty=%u\n"
+		"flow_enable=%u\n"
+		"flow_disable=%u\n",
+		ipa3_ctx->stats.tx_sw_pkts,
+		ipa3_ctx->stats.tx_hw_pkts,
+		ipa3_ctx->stats.tx_non_linear,
+		ipa3_ctx->stats.tx_pkts_compl,
+		ipa3_ctx->stats.rx_pkts,
+		ipa3_ctx->stats.stat_compl,
+		ipa3_ctx->stats.aggr_close,
+		ipa3_ctx->stats.wan_aggr_close,
+		ipa3_ctx->ipa3_active_clients.cnt,
+		connect,
+		ipa3_ctx->stats.wan_rx_empty,
+		ipa3_ctx->stats.wan_repl_rx_empty,
+		ipa3_ctx->stats.lan_rx_empty,
+		ipa3_ctx->stats.lan_repl_rx_empty,
+		ipa3_ctx->stats.flow_enable,
+		ipa3_ctx->stats.flow_disable);
+	cnt += nbytes;
+
+	for (i = 0; i < IPAHAL_PKT_STATUS_EXCEPTION_MAX; i++) {
+		nbytes = scnprintf(dbg_buff + cnt,
+			IPA_MAX_MSG_LEN - cnt,
+			"lan_rx_excp[%u:%20s]=%u\n", i,
+			ipahal_pkt_status_exception_str(i),
+			ipa3_ctx->stats.rx_excp_pkts[i]);
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_wstats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+
+#define HEAD_FRMT_STR "%25s\n"
+#define FRMT_STR "%25s %10u\n"
+#define FRMT_STR1 "%25s %10u\n\n"
+
+	int cnt = 0;
+	int nbytes;
+	int ipa_ep_idx;
+	enum ipa_client_type client = IPA_CLIENT_WLAN1_PROD;
+	struct ipa3_ep_context *ep;
+
+	do {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			HEAD_FRMT_STR, "Client IPA_CLIENT_WLAN1_PROD Stats:");
+		cnt += nbytes;
+
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			break;
+		}
+
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+		if (ep->valid != 1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			break;
+		}
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Avail Fifo Desc:",
+			atomic_read(&ep->avail_fifo_desc));
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkts Rcvd:", ep->wstats.rx_pkts_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkts Status Rcvd:",
+			ep->wstats.rx_pkts_status_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Rcvd:", ep->wstats.rx_hd_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Processed:",
+			ep->wstats.rx_hd_processed);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx DH Sent Back:", ep->wstats.rx_hd_reply);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Rx Pkt Leak:", ep->wstats.rx_pkt_leak);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR1, "Rx DP Fail:", ep->wstats.rx_dp_fail);
+		cnt += nbytes;
+
+	} while (0);
+
+	client = IPA_CLIENT_WLAN1_CONS;
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+		"Client IPA_CLIENT_WLAN1_CONS Stats:");
+	cnt += nbytes;
+	while (1) {
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			goto nxt_clnt_cons;
+		}
+
+		ep = &ipa3_ctx->ep[ipa_ep_idx];
+		if (ep->valid != 1) {
+			nbytes = scnprintf(dbg_buff + cnt,
+				IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR, "Not up");
+			cnt += nbytes;
+			goto nxt_clnt_cons;
+		}
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Tx Pkts Received:", ep->wstats.tx_pkts_rcvd);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR, "Tx Pkts Sent:", ep->wstats.tx_pkts_sent);
+		cnt += nbytes;
+
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			FRMT_STR1, "Tx Pkts Dropped:",
+			ep->wstats.tx_pkts_dropped);
+		cnt += nbytes;
+
+nxt_clnt_cons:
+			switch (client) {
+			case IPA_CLIENT_WLAN1_CONS:
+				client = IPA_CLIENT_WLAN2_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN2_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN2_CONS:
+				client = IPA_CLIENT_WLAN3_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN3_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN3_CONS:
+				client = IPA_CLIENT_WLAN4_CONS;
+				nbytes = scnprintf(dbg_buff + cnt,
+					IPA_MAX_MSG_LEN - cnt, HEAD_FRMT_STR,
+					"Client IPA_CLIENT_WLAN4_CONS Stats:");
+				cnt += nbytes;
+				continue;
+			case IPA_CLIENT_WLAN4_CONS:
+			default:
+				break;
+			}
+		break;
+	}
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+		"\n"HEAD_FRMT_STR, "All Wlan Consumer pipes stats:");
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+		"Tx Comm Buff Allocated:",
+		ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR,
+		"Tx Comm Buff Avail:", ipa3_ctx->wc_memb.wlan_comm_free_cnt);
+	cnt += nbytes;
+
+	nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, FRMT_STR1,
+		"Total Tx Pkts Freed:", ipa3_ctx->wc_memb.total_tx_pkts_freed);
+	cnt += nbytes;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_ntn(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+#define TX_STATS(y) \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+	struct Ipa3HwStatsNTNInfoData_t stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (!ipa3_get_ntn_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX num_pkts_processed=%u\n"
+			"TX tail_ptr_val=%u\n"
+			"TX num_db_fired=%u\n"
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n"
+			"TX bamFifoFull=%u\n"
+			"TX bamFifoEmpty=%u\n"
+			"TX bamFifoUsageHigh=%u\n"
+			"TX bamFifoUsageLow=%u\n"
+			"TX bamUtilCount=%u\n"
+			"TX num_db=%u\n"
+			"TX num_unexpected_db=%u\n"
+			"TX num_bam_int_handled=%u\n"
+			"TX num_bam_int_in_non_running_state=%u\n"
+			"TX num_qmb_int_handled=%u\n"
+			"TX num_bam_int_handled_while_wait_for_bam=%u\n"
+			"TX num_bam_int_handled_while_not_in_bam=%u\n",
+			TX_STATS(num_pkts_processed),
+			TX_STATS(tail_ptr_val),
+			TX_STATS(num_db_fired),
+			TX_STATS(tx_comp_ring_stats.ringFull),
+			TX_STATS(tx_comp_ring_stats.ringEmpty),
+			TX_STATS(tx_comp_ring_stats.ringUsageHigh),
+			TX_STATS(tx_comp_ring_stats.ringUsageLow),
+			TX_STATS(tx_comp_ring_stats.RingUtilCount),
+			TX_STATS(bam_stats.bamFifoFull),
+			TX_STATS(bam_stats.bamFifoEmpty),
+			TX_STATS(bam_stats.bamFifoUsageHigh),
+			TX_STATS(bam_stats.bamFifoUsageLow),
+			TX_STATS(bam_stats.bamUtilCount),
+			TX_STATS(num_db),
+			TX_STATS(num_unexpected_db),
+			TX_STATS(num_bam_int_handled),
+			TX_STATS(num_bam_int_in_non_running_state),
+			TX_STATS(num_qmb_int_handled),
+			TX_STATS(num_bam_int_handled_while_wait_for_bam),
+			TX_STATS(num_bam_int_handled_while_not_in_bam));
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX max_outstanding_pkts=%u\n"
+			"RX num_pkts_processed=%u\n"
+			"RX rx_ring_rp_value=%u\n"
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n"
+			"RX bamFifoFull=%u\n"
+			"RX bamFifoEmpty=%u\n"
+			"RX bamFifoUsageHigh=%u\n"
+			"RX bamFifoUsageLow=%u\n"
+			"RX bamUtilCount=%u\n"
+			"RX num_bam_int_handled=%u\n"
+			"RX num_db=%u\n"
+			"RX num_unexpected_db=%u\n"
+			"RX num_pkts_in_dis_uninit_state=%u\n"
+			"num_ic_inj_vdev_change=%u\n"
+			"num_ic_inj_fw_desc_change=%u\n",
+			RX_STATS(max_outstanding_pkts),
+			RX_STATS(num_pkts_processed),
+			RX_STATS(rx_ring_rp_value),
+			RX_STATS(rx_ind_ring_stats.ringFull),
+			RX_STATS(rx_ind_ring_stats.ringEmpty),
+			RX_STATS(rx_ind_ring_stats.ringUsageHigh),
+			RX_STATS(rx_ind_ring_stats.ringUsageLow),
+			RX_STATS(rx_ind_ring_stats.RingUtilCount),
+			RX_STATS(bam_stats.bamFifoFull),
+			RX_STATS(bam_stats.bamFifoEmpty),
+			RX_STATS(bam_stats.bamFifoUsageHigh),
+			RX_STATS(bam_stats.bamFifoUsageLow),
+			RX_STATS(bam_stats.bamUtilCount),
+			RX_STATS(num_bam_int_handled),
+			RX_STATS(num_db),
+			RX_STATS(num_unexpected_db),
+			RX_STATS(num_pkts_in_dis_uninit_state),
+			RX_STATS(num_bam_int_handled_while_not_in_bam),
+			RX_STATS(num_bam_int_handled_while_in_bam_state));
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read NTN stats\n");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_wdi(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct IpaHwStatsWDIInfoData_t stats;
+	int nbytes;
+	int cnt = 0;
+
+	if (!ipa3_get_wdi_stats(&stats)) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"TX num_pkts_processed=%u\n"
+			"TX copy_engine_doorbell_value=%u\n"
+			"TX num_db_fired=%u\n"
+			"TX ringFull=%u\n"
+			"TX ringEmpty=%u\n"
+			"TX ringUsageHigh=%u\n"
+			"TX ringUsageLow=%u\n"
+			"TX RingUtilCount=%u\n"
+			"TX bamFifoFull=%u\n"
+			"TX bamFifoEmpty=%u\n"
+			"TX bamFifoUsageHigh=%u\n"
+			"TX bamFifoUsageLow=%u\n"
+			"TX bamUtilCount=%u\n"
+			"TX num_db=%u\n"
+			"TX num_unexpected_db=%u\n"
+			"TX num_bam_int_handled=%u\n"
+			"TX num_bam_int_in_non_running_state=%u\n"
+			"TX num_qmb_int_handled=%u\n"
+			"TX num_bam_int_handled_while_wait_for_bam=%u\n",
+			stats.tx_ch_stats.num_pkts_processed,
+			stats.tx_ch_stats.copy_engine_doorbell_value,
+			stats.tx_ch_stats.num_db_fired,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringFull,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringEmpty,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringUsageHigh,
+			stats.tx_ch_stats.tx_comp_ring_stats.ringUsageLow,
+			stats.tx_ch_stats.tx_comp_ring_stats.RingUtilCount,
+			stats.tx_ch_stats.bam_stats.bamFifoFull,
+			stats.tx_ch_stats.bam_stats.bamFifoEmpty,
+			stats.tx_ch_stats.bam_stats.bamFifoUsageHigh,
+			stats.tx_ch_stats.bam_stats.bamFifoUsageLow,
+			stats.tx_ch_stats.bam_stats.bamUtilCount,
+			stats.tx_ch_stats.num_db,
+			stats.tx_ch_stats.num_unexpected_db,
+			stats.tx_ch_stats.num_bam_int_handled,
+			stats.tx_ch_stats.num_bam_int_in_non_running_state,
+			stats.tx_ch_stats.num_qmb_int_handled,
+			stats.tx_ch_stats.
+				num_bam_int_handled_while_wait_for_bam);
+		cnt += nbytes;
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+			"RX max_outstanding_pkts=%u\n"
+			"RX num_pkts_processed=%u\n"
+			"RX rx_ring_rp_value=%u\n"
+			"RX ringFull=%u\n"
+			"RX ringEmpty=%u\n"
+			"RX ringUsageHigh=%u\n"
+			"RX ringUsageLow=%u\n"
+			"RX RingUtilCount=%u\n"
+			"RX bamFifoFull=%u\n"
+			"RX bamFifoEmpty=%u\n"
+			"RX bamFifoUsageHigh=%u\n"
+			"RX bamFifoUsageLow=%u\n"
+			"RX bamUtilCount=%u\n"
+			"RX num_bam_int_handled=%u\n"
+			"RX num_db=%u\n"
+			"RX num_unexpected_db=%u\n"
+			"RX num_pkts_in_dis_uninit_state=%u\n"
+			"num_ic_inj_vdev_change=%u\n"
+			"num_ic_inj_fw_desc_change=%u\n"
+			"RX reserved1=%u\n"
+			"RX reserved2=%u\n",
+			stats.rx_ch_stats.max_outstanding_pkts,
+			stats.rx_ch_stats.num_pkts_processed,
+			stats.rx_ch_stats.rx_ring_rp_value,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringFull,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringEmpty,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringUsageHigh,
+			stats.rx_ch_stats.rx_ind_ring_stats.ringUsageLow,
+			stats.rx_ch_stats.rx_ind_ring_stats.RingUtilCount,
+			stats.rx_ch_stats.bam_stats.bamFifoFull,
+			stats.rx_ch_stats.bam_stats.bamFifoEmpty,
+			stats.rx_ch_stats.bam_stats.bamFifoUsageHigh,
+			stats.rx_ch_stats.bam_stats.bamFifoUsageLow,
+			stats.rx_ch_stats.bam_stats.bamUtilCount,
+			stats.rx_ch_stats.num_bam_int_handled,
+			stats.rx_ch_stats.num_db,
+			stats.rx_ch_stats.num_unexpected_db,
+			stats.rx_ch_stats.num_pkts_in_dis_uninit_state,
+			stats.rx_ch_stats.num_ic_inj_vdev_change,
+			stats.rx_ch_stats.num_ic_inj_fw_desc_change,
+			stats.rx_ch_stats.reserved1,
+			stats.rx_ch_stats.reserved2);
+		cnt += nbytes;
+	} else {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Fail to read WDI stats\n");
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_write_dbg_cnt(struct file *file, const char __user *buf,
+		size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	u32 option = 0;
+	struct ipahal_reg_debug_cnt_ctrl dbg_cnt_ctrl;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, buf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtou32(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	memset(&dbg_cnt_ctrl, 0, sizeof(dbg_cnt_ctrl));
+	dbg_cnt_ctrl.type = DBG_CNT_TYPE_GENERAL;
+	dbg_cnt_ctrl.product = true;
+	dbg_cnt_ctrl.src_pipe = 0xff;
+	dbg_cnt_ctrl.rule_idx_pipe_rule = false;
+	dbg_cnt_ctrl.rule_idx = 0;
+	if (option == 1)
+		dbg_cnt_ctrl.en = true;
+	else
+		dbg_cnt_ctrl.en = false;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_write_reg_n_fields(IPA_DEBUG_CNT_CTRL_n, 0, &dbg_cnt_ctrl);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return count;
+}
+
+static ssize_t ipa3_read_dbg_cnt(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	u32 regval;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	regval =
+		ipahal_read_reg_n(IPA_DEBUG_CNT_REG_n, 0);
+	nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+			"IPA_DEBUG_CNT_REG_0=0x%x\n", regval);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_read_msg(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int nbytes;
+	int cnt = 0;
+	int i;
+
+	for (i = 0; i < IPA_EVENT_MAX_NUM; i++) {
+		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
+				"msg[%u:%27s] W:%u R:%u\n", i,
+				ipa3_event_name[i],
+				ipa3_ctx->stats.msg_w[i],
+				ipa3_ctx->stats.msg_r[i]);
+		cnt += nbytes;
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static ssize_t ipa3_read_nat4(struct file *file,
+		char __user *ubuf, size_t count,
+		loff_t *ppos) {
+
+#define ENTRY_U32_FIELDS 8
+#define NAT_ENTRY_ENABLE 0x8000
+#define NAT_ENTRY_RST_FIN_BIT 0x4000
+#define BASE_TABLE 0
+#define EXPANSION_TABLE 1
+
+	u32 *base_tbl, *indx_tbl;
+	u32 tbl_size, *tmp;
+	u32 value, i, j, rule_id;
+	u16 enable, tbl_entry, flag;
+	u32 no_entrys = 0;
+
+	mutex_lock(&ipa3_ctx->nat_mem.lock);
+	value = ipa3_ctx->nat_mem.public_ip_addr;
+	pr_err(
+				"Table IP Address:%d.%d.%d.%d\n",
+				((value & 0xFF000000) >> 24),
+				((value & 0x00FF0000) >> 16),
+				((value & 0x0000FF00) >> 8),
+				((value & 0x000000FF)));
+
+	pr_err("Table Size:%d\n",
+				ipa3_ctx->nat_mem.size_base_tables);
+
+	pr_err("Expansion Table Size:%d\n",
+				ipa3_ctx->nat_mem.size_expansion_tables-1);
+
+	if (!ipa3_ctx->nat_mem.is_sys_mem)
+		pr_err("Not supported for local(shared) memory\n");
+
+	/* Print Base tables */
+	rule_id = 0;
+	for (j = 0; j < 2; j++) {
+		if (j == BASE_TABLE) {
+			tbl_size = ipa3_ctx->nat_mem.size_base_tables;
+			base_tbl = (u32 *)ipa3_ctx->nat_mem.ipv4_rules_addr;
+
+			pr_err("\nBase Table:\n");
+		} else {
+			tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1;
+			base_tbl =
+			 (u32 *)ipa3_ctx->nat_mem.ipv4_expansion_rules_addr;
+
+			pr_err("\nExpansion Base Table:\n");
+		}
+
+		if (base_tbl != NULL) {
+			for (i = 0; i <= tbl_size; i++, rule_id++) {
+				tmp = base_tbl;
+				value = tmp[4];
+				enable = ((value & 0xFFFF0000) >> 16);
+
+				if (enable & NAT_ENTRY_ENABLE) {
+					no_entrys++;
+					pr_err("Rule:%d ", rule_id);
+
+					value = *tmp;
+					pr_err(
+						"Private_IP:%d.%d.%d.%d ",
+						((value & 0xFF000000) >> 24),
+						((value & 0x00FF0000) >> 16),
+						((value & 0x0000FF00) >> 8),
+						((value & 0x000000FF)));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Target_IP:%d.%d.%d.%d ",
+						((value & 0xFF000000) >> 24),
+						((value & 0x00FF0000) >> 16),
+						((value & 0x0000FF00) >> 8),
+						((value & 0x000000FF)));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Next_Index:%d  Public_Port:%d ",
+						(value & 0x0000FFFF),
+						((value & 0xFFFF0000) >> 16));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Private_Port:%d  Target_Port:%d ",
+						(value & 0x0000FFFF),
+						((value & 0xFFFF0000) >> 16));
+					tmp++;
+
+					value = *tmp;
+					flag = ((value & 0xFFFF0000) >> 16);
+					if (flag & NAT_ENTRY_RST_FIN_BIT) {
+						pr_err(
+								"IP_CKSM_delta:0x%x  Flags:%s ",
+							  (value & 0x0000FFFF),
+								"Direct_To_A5");
+					} else {
+						pr_err(
+							"IP_CKSM_delta:0x%x  Flags:%s ",
+							(value & 0x0000FFFF),
+							"Fwd_to_route");
+					}
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Time_stamp:0x%x Proto:%d ",
+						(value & 0x00FFFFFF),
+						((value & 0xFF000000) >> 24));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"Prev_Index:%d  Indx_tbl_entry:%d ",
+						(value & 0x0000FFFF),
+						((value & 0xFFFF0000) >> 16));
+					tmp++;
+
+					value = *tmp;
+					pr_err(
+						"TCP_UDP_cksum_delta:0x%x\n",
+						((value & 0xFFFF0000) >> 16));
+				}
+
+				base_tbl += ENTRY_U32_FIELDS;
+
+			}
+		}
+	}
+
+	/* Print Index tables */
+	rule_id = 0;
+	for (j = 0; j < 2; j++) {
+		if (j == BASE_TABLE) {
+			tbl_size = ipa3_ctx->nat_mem.size_base_tables;
+			indx_tbl = (u32 *)ipa3_ctx->nat_mem.index_table_addr;
+
+			pr_err("\nIndex Table:\n");
+		} else {
+			tbl_size = ipa3_ctx->nat_mem.size_expansion_tables-1;
+			indx_tbl =
+			 (u32 *)ipa3_ctx->nat_mem.index_table_expansion_addr;
+
+			pr_err("\nExpansion Index Table:\n");
+		}
+
+		if (indx_tbl != NULL) {
+			for (i = 0; i <= tbl_size; i++, rule_id++) {
+				tmp = indx_tbl;
+				value = *tmp;
+				tbl_entry = (value & 0x0000FFFF);
+
+				if (tbl_entry) {
+					pr_err("Rule:%d ", rule_id);
+
+					value = *tmp;
+					pr_err(
+						"Table_Entry:%d  Next_Index:%d\n",
+						tbl_entry,
+						((value & 0xFFFF0000) >> 16));
+				}
+
+				indx_tbl++;
+			}
+		}
+	}
+	pr_err("Current No. Nat Entries: %d\n", no_entrys);
+	mutex_unlock(&ipa3_ctx->nat_mem.lock);
+
+	return 0;
+}
+
+static ssize_t ipa3_rm_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	int result, nbytes, cnt = 0;
+
+	result = ipa_rm_stat(dbg_buff, IPA_MAX_MSG_LEN);
+	if (result < 0) {
+		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
+				"Error in printing RM stat %d\n", result);
+		cnt += nbytes;
+	} else
+		cnt += result;
+
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
+}
+
+static void ipa_dump_status(struct ipahal_pkt_status *status)
+{
+	IPA_DUMP_STATUS_FIELD(status_opcode);
+	IPA_DUMP_STATUS_FIELD(exception);
+	IPA_DUMP_STATUS_FIELD(status_mask);
+	IPA_DUMP_STATUS_FIELD(pkt_len);
+	IPA_DUMP_STATUS_FIELD(endp_src_idx);
+	IPA_DUMP_STATUS_FIELD(endp_dest_idx);
+	IPA_DUMP_STATUS_FIELD(metadata);
+	IPA_DUMP_STATUS_FIELD(flt_local);
+	IPA_DUMP_STATUS_FIELD(flt_hash);
+	IPA_DUMP_STATUS_FIELD(flt_global);
+	IPA_DUMP_STATUS_FIELD(flt_ret_hdr);
+	IPA_DUMP_STATUS_FIELD(flt_miss);
+	IPA_DUMP_STATUS_FIELD(flt_rule_id);
+	IPA_DUMP_STATUS_FIELD(rt_local);
+	IPA_DUMP_STATUS_FIELD(rt_hash);
+	IPA_DUMP_STATUS_FIELD(ucp);
+	IPA_DUMP_STATUS_FIELD(rt_tbl_idx);
+	IPA_DUMP_STATUS_FIELD(rt_miss);
+	IPA_DUMP_STATUS_FIELD(rt_rule_id);
+	IPA_DUMP_STATUS_FIELD(nat_hit);
+	IPA_DUMP_STATUS_FIELD(nat_entry_idx);
+	IPA_DUMP_STATUS_FIELD(nat_type);
+	pr_err("tag = 0x%llx\n", (u64)status->tag_info & 0xFFFFFFFFFFFF);
+	IPA_DUMP_STATUS_FIELD(seq_num);
+	IPA_DUMP_STATUS_FIELD(time_of_day_ctr);
+	IPA_DUMP_STATUS_FIELD(hdr_local);
+	IPA_DUMP_STATUS_FIELD(hdr_offset);
+	IPA_DUMP_STATUS_FIELD(frag_hit);
+	IPA_DUMP_STATUS_FIELD(frag_rule);
+}
+
+static ssize_t ipa_status_stats_read(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	struct ipa3_status_stats *stats;
+	int i, j;
+
+	stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+	if (!stats)
+		return -EFAULT;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa3_ctx->ep[i].sys || !ipa3_ctx->ep[i].sys->status_stat)
+			continue;
+
+		memcpy(stats, ipa3_ctx->ep[i].sys->status_stat, sizeof(*stats));
+		pr_err("Statuses for pipe %d\n", i);
+		for (j = 0; j < IPA_MAX_STATUS_STAT_NUM; j++) {
+			pr_err("curr=%d\n", stats->curr);
+			ipa_dump_status(&stats->status[stats->curr]);
+			pr_err("\n\n\n");
+			stats->curr = (stats->curr + 1) %
+				IPA_MAX_STATUS_STAT_NUM;
+		}
+	}
+
+	kfree(stats);
+	return 0;
+}
+
+static ssize_t ipa3_print_active_clients_log(struct file *file,
+		char __user *ubuf, size_t count, loff_t *ppos)
+{
+	int cnt;
+	int table_size;
+
+	if (active_clients_buf == NULL) {
+		IPAERR("Active Clients buffer is not allocated");
+		return 0;
+	}
+	memset(active_clients_buf, 0, IPA_DBG_ACTIVE_CLIENT_BUF_SIZE);
+	ipa3_active_clients_lock();
+	cnt = ipa3_active_clients_log_print_buffer(active_clients_buf,
+			IPA_DBG_ACTIVE_CLIENT_BUF_SIZE - IPA_MAX_MSG_LEN);
+	table_size = ipa3_active_clients_log_print_table(active_clients_buf
+			+ cnt, IPA_MAX_MSG_LEN);
+	ipa3_active_clients_unlock();
+
+	return simple_read_from_buffer(ubuf, count, ppos,
+			active_clients_buf, cnt + table_size);
+}
+
+static ssize_t ipa3_clear_active_clients_log(struct file *file,
+		const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+		s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	ipa3_active_clients_log_clear();
+
+	return count;
+}
+
+static ssize_t ipa3_enable_ipc_low(struct file *file,
+	const char __user *ubuf, size_t count, loff_t *ppos)
+{
+	unsigned long missing;
+	s8 option = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &option))
+		return -EFAULT;
+
+	if (option) {
+		if (!ipa3_ctx->logbuf_low) {
+			ipa3_ctx->logbuf_low =
+				ipc_log_context_create(IPA_IPC_LOG_PAGES,
+					"ipa_low", 0);
+		}
+
+		if (ipa3_ctx->logbuf_low == NULL) {
+			IPAERR("failed to get logbuf_low\n");
+			return -EFAULT;
+		}
+	} else {
+		if (ipa3_ctx->logbuf_low)
+			ipc_log_context_destroy(ipa3_ctx->logbuf_low);
+		ipa3_ctx->logbuf_low = NULL;
+	}
+
+	return count;
+}
+
+const struct file_operations ipa3_gen_reg_ops = {
+	.read = ipa3_read_gen_reg,
+};
+
+const struct file_operations ipa3_ep_reg_ops = {
+	.read = ipa3_read_ep_reg,
+	.write = ipa3_write_ep_reg,
+};
+
+const struct file_operations ipa3_keep_awake_ops = {
+	.read = ipa3_read_keep_awake,
+	.write = ipa3_write_keep_awake,
+};
+
+const struct file_operations ipa3_ep_holb_ops = {
+	.write = ipa3_write_ep_holb,
+};
+
+const struct file_operations ipa3_hdr_ops = {
+	.read = ipa3_read_hdr,
+};
+
+const struct file_operations ipa3_rt_ops = {
+	.read = ipa3_read_rt,
+	.open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_rt_hw_ops = {
+	.read = ipa3_read_rt_hw,
+	.open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_proc_ctx_ops = {
+	.read = ipa3_read_proc_ctx,
+};
+
+const struct file_operations ipa3_flt_ops = {
+	.read = ipa3_read_flt,
+	.open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_flt_hw_ops = {
+	.read = ipa3_read_flt_hw,
+	.open = ipa3_open_dbg,
+};
+
+const struct file_operations ipa3_stats_ops = {
+	.read = ipa3_read_stats,
+};
+
+const struct file_operations ipa3_wstats_ops = {
+	.read = ipa3_read_wstats,
+};
+
+const struct file_operations ipa3_wdi_ops = {
+	.read = ipa3_read_wdi,
+};
+
+const struct file_operations ipa3_ntn_ops = {
+	.read = ipa3_read_ntn,
+};
+
+const struct file_operations ipa3_msg_ops = {
+	.read = ipa3_read_msg,
+};
+
+const struct file_operations ipa3_dbg_cnt_ops = {
+	.read = ipa3_read_dbg_cnt,
+	.write = ipa3_write_dbg_cnt,
+};
+
+const struct file_operations ipa3_status_stats_ops = {
+	.read = ipa_status_stats_read,
+};
+
+const struct file_operations ipa3_nat4_ops = {
+	.read = ipa3_read_nat4,
+};
+
+const struct file_operations ipa3_rm_stats = {
+	.read = ipa3_rm_read_stats,
+};
+
+const struct file_operations ipa3_active_clients = {
+	.read = ipa3_print_active_clients_log,
+	.write = ipa3_clear_active_clients_log,
+};
+
+const struct file_operations ipa3_ipc_low_ops = {
+	.write = ipa3_enable_ipc_low,
+};
+
+void ipa3_debugfs_init(void)
+{
+	const mode_t read_only_mode = S_IRUSR | S_IRGRP | S_IROTH;
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP;
+	const mode_t write_only_mode = S_IWUSR | S_IWGRP;
+	struct dentry *file;
+
+	dent = debugfs_create_dir("ipa", 0);
+	if (IS_ERR(dent)) {
+		IPAERR("fail to create folder in debug_fs.\n");
+		return;
+	}
+
+	file = debugfs_create_u32("hw_type", read_only_mode,
+			dent, &ipa3_ctx->ipa_hw_type);
+	if (!file) {
+		IPAERR("could not create hw_type file\n");
+		goto fail;
+	}
+
+
+	dfile_gen_reg = debugfs_create_file("gen_reg", read_only_mode, dent, 0,
+			&ipa3_gen_reg_ops);
+	if (!dfile_gen_reg || IS_ERR(dfile_gen_reg)) {
+		IPAERR("fail to create file for debug_fs gen_reg\n");
+		goto fail;
+	}
+
+	dfile_active_clients = debugfs_create_file("active_clients",
+			read_write_mode, dent, 0, &ipa3_active_clients);
+	if (!dfile_active_clients || IS_ERR(dfile_active_clients)) {
+		IPAERR("fail to create file for debug_fs active_clients\n");
+		goto fail;
+	}
+
+	active_clients_buf = NULL;
+	active_clients_buf = kzalloc(IPA_DBG_ACTIVE_CLIENT_BUF_SIZE,
+			GFP_KERNEL);
+	if (active_clients_buf == NULL)
+		IPAERR("fail to allocate active clients memory buffer");
+
+	dfile_ep_reg = debugfs_create_file("ep_reg", read_write_mode, dent, 0,
+			&ipa3_ep_reg_ops);
+	if (!dfile_ep_reg || IS_ERR(dfile_ep_reg)) {
+		IPAERR("fail to create file for debug_fs ep_reg\n");
+		goto fail;
+	}
+
+	dfile_keep_awake = debugfs_create_file("keep_awake", read_write_mode,
+			dent, 0, &ipa3_keep_awake_ops);
+	if (!dfile_keep_awake || IS_ERR(dfile_keep_awake)) {
+		IPAERR("fail to create file for debug_fs dfile_keep_awake\n");
+		goto fail;
+	}
+
+	dfile_ep_holb = debugfs_create_file("holb", write_only_mode, dent,
+			0, &ipa3_ep_holb_ops);
+	if (!dfile_ep_holb || IS_ERR(dfile_ep_holb)) {
+		IPAERR("fail to create file for debug_fs dfile_ep_hol_en\n");
+		goto fail;
+	}
+
+	dfile_hdr = debugfs_create_file("hdr", read_only_mode, dent, 0,
+			&ipa3_hdr_ops);
+	if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+		IPAERR("fail to create file for debug_fs hdr\n");
+		goto fail;
+	}
+
+	dfile_proc_ctx = debugfs_create_file("proc_ctx", read_only_mode, dent,
+		0, &ipa3_proc_ctx_ops);
+	if (!dfile_hdr || IS_ERR(dfile_hdr)) {
+		IPAERR("fail to create file for debug_fs proc_ctx\n");
+		goto fail;
+	}
+
+	dfile_ip4_rt = debugfs_create_file("ip4_rt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa3_rt_ops);
+	if (!dfile_ip4_rt || IS_ERR(dfile_ip4_rt)) {
+		IPAERR("fail to create file for debug_fs ip4 rt\n");
+		goto fail;
+	}
+
+	dfile_ip4_rt_hw = debugfs_create_file("ip4_rt_hw", read_only_mode, dent,
+		(void *)IPA_IP_v4, &ipa3_rt_hw_ops);
+	if (!dfile_ip4_rt_hw || IS_ERR(dfile_ip4_rt_hw)) {
+		IPAERR("fail to create file for debug_fs ip4 rt hw\n");
+		goto fail;
+	}
+
+	dfile_ip6_rt = debugfs_create_file("ip6_rt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa3_rt_ops);
+	if (!dfile_ip6_rt || IS_ERR(dfile_ip6_rt)) {
+		IPAERR("fail to create file for debug_fs ip6:w rt\n");
+		goto fail;
+	}
+
+	dfile_ip6_rt_hw = debugfs_create_file("ip6_rt_hw", read_only_mode, dent,
+		(void *)IPA_IP_v6, &ipa3_rt_hw_ops);
+	if (!dfile_ip6_rt_hw || IS_ERR(dfile_ip6_rt_hw)) {
+		IPAERR("fail to create file for debug_fs ip6 rt hw\n");
+		goto fail;
+	}
+
+	dfile_ip4_flt = debugfs_create_file("ip4_flt", read_only_mode, dent,
+			(void *)IPA_IP_v4, &ipa3_flt_ops);
+	if (!dfile_ip4_flt || IS_ERR(dfile_ip4_flt)) {
+		IPAERR("fail to create file for debug_fs ip4 flt\n");
+		goto fail;
+	}
+
+	dfile_ip4_flt_hw = debugfs_create_file("ip4_flt_hw", read_only_mode,
+			dent, (void *)IPA_IP_v4, &ipa3_flt_hw_ops);
+	if (!dfile_ip4_flt_hw || IS_ERR(dfile_ip4_flt_hw)) {
+		IPAERR("fail to create file for debug_fs ip4 flt\n");
+		goto fail;
+	}
+
+	dfile_ip6_flt = debugfs_create_file("ip6_flt", read_only_mode, dent,
+			(void *)IPA_IP_v6, &ipa3_flt_ops);
+	if (!dfile_ip6_flt || IS_ERR(dfile_ip6_flt)) {
+		IPAERR("fail to create file for debug_fs ip6 flt\n");
+		goto fail;
+	}
+
+	dfile_ip6_flt_hw = debugfs_create_file("ip6_flt_hw", read_only_mode,
+			dent, (void *)IPA_IP_v6, &ipa3_flt_hw_ops);
+	if (!dfile_ip6_flt_hw || IS_ERR(dfile_ip6_flt_hw)) {
+		IPAERR("fail to create file for debug_fs ip6 flt\n");
+		goto fail;
+	}
+
+	dfile_stats = debugfs_create_file("stats", read_only_mode, dent, 0,
+			&ipa3_stats_ops);
+	if (!dfile_stats || IS_ERR(dfile_stats)) {
+		IPAERR("fail to create file for debug_fs stats\n");
+		goto fail;
+	}
+
+	dfile_wstats = debugfs_create_file("wstats", read_only_mode,
+			dent, 0, &ipa3_wstats_ops);
+	if (!dfile_wstats || IS_ERR(dfile_wstats)) {
+		IPAERR("fail to create file for debug_fs wstats\n");
+		goto fail;
+	}
+
+	dfile_wdi_stats = debugfs_create_file("wdi", read_only_mode, dent, 0,
+			&ipa3_wdi_ops);
+	if (!dfile_wdi_stats || IS_ERR(dfile_wdi_stats)) {
+		IPAERR("fail to create file for debug_fs wdi stats\n");
+		goto fail;
+	}
+
+	dfile_ntn_stats = debugfs_create_file("ntn", read_only_mode, dent, 0,
+			&ipa3_ntn_ops);
+	if (!dfile_ntn_stats || IS_ERR(dfile_ntn_stats)) {
+		IPAERR("fail to create file for debug_fs ntn stats\n");
+		goto fail;
+	}
+
+	dfile_dbg_cnt = debugfs_create_file("dbg_cnt", read_write_mode, dent, 0,
+			&ipa3_dbg_cnt_ops);
+	if (!dfile_dbg_cnt || IS_ERR(dfile_dbg_cnt)) {
+		IPAERR("fail to create file for debug_fs dbg_cnt\n");
+		goto fail;
+	}
+
+	dfile_msg = debugfs_create_file("msg", read_only_mode, dent, 0,
+			&ipa3_msg_ops);
+	if (!dfile_msg || IS_ERR(dfile_msg)) {
+		IPAERR("fail to create file for debug_fs msg\n");
+		goto fail;
+	}
+
+	dfile_ip4_nat = debugfs_create_file("ip4_nat", read_only_mode, dent,
+			0, &ipa3_nat4_ops);
+	if (!dfile_ip4_nat || IS_ERR(dfile_ip4_nat)) {
+		IPAERR("fail to create file for debug_fs ip4 nat\n");
+		goto fail;
+	}
+
+	dfile_rm_stats = debugfs_create_file("rm_stats",
+			read_only_mode, dent, 0, &ipa3_rm_stats);
+	if (!dfile_rm_stats || IS_ERR(dfile_rm_stats)) {
+		IPAERR("fail to create file for debug_fs rm_stats\n");
+		goto fail;
+	}
+
+	dfile_status_stats = debugfs_create_file("status_stats",
+			read_only_mode, dent, 0, &ipa3_status_stats_ops);
+	if (!dfile_status_stats || IS_ERR(dfile_status_stats)) {
+		IPAERR("fail to create file for debug_fs status_stats\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("enable_clock_scaling", read_write_mode,
+		dent, &ipa3_ctx->enable_clock_scaling);
+	if (!file) {
+		IPAERR("could not create enable_clock_scaling file\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("clock_scaling_bw_threshold_nominal_mbps",
+		read_write_mode, dent,
+		&ipa3_ctx->ctrl->clock_scaling_bw_threshold_nominal);
+	if (!file) {
+		IPAERR("could not create bw_threshold_nominal_mbps\n");
+		goto fail;
+	}
+
+	file = debugfs_create_u32("clock_scaling_bw_threshold_turbo_mbps",
+		read_write_mode, dent,
+		&ipa3_ctx->ctrl->clock_scaling_bw_threshold_turbo);
+	if (!file) {
+		IPAERR("could not create bw_threshold_turbo_mbps\n");
+		goto fail;
+	}
+
+	file = debugfs_create_file("enable_low_prio_print", write_only_mode,
+		dent, 0, &ipa3_ipc_low_ops);
+	if (!file) {
+		IPAERR("could not create enable_low_prio_print file\n");
+		goto fail;
+	}
+
+	return;
+
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+void ipa3_debugfs_remove(void)
+{
+	if (IS_ERR(dent)) {
+		IPAERR("ipa3_debugfs_remove: folder was not created.\n");
+		return;
+	}
+	if (active_clients_buf != NULL) {
+		kfree(active_clients_buf);
+		active_clients_buf = NULL;
+	}
+	debugfs_remove_recursive(dent);
+}
+
+struct dentry *ipa_debugfs_get_root(void)
+{
+	return dent;
+}
+EXPORT_SYMBOL(ipa_debugfs_get_root);
+
+#else /* !CONFIG_DEBUG_FS */
+void ipa3_debugfs_init(void) {}
+void ipa3_debugfs_remove(void) {}
+#endif
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
new file mode 100644
index 0000000..2a1c286
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
@@ -0,0 +1,990 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include "linux/msm_gsi.h"
+#include "ipa_i.h"
+
+#define IPA_DMA_POLLING_MIN_SLEEP_RX 1010
+#define IPA_DMA_POLLING_MAX_SLEEP_RX 1050
+#define IPA_DMA_SYS_DESC_MAX_FIFO_SZ 0x7FF8
+#define IPA_DMA_MAX_PKT_SZ 0xFFFF
+#define IPA_DMA_MAX_PENDING_SYNC (IPA_SYS_DESC_FIFO_SZ / \
+	sizeof(struct sps_iovec) - 1)
+#define IPA_DMA_MAX_PENDING_ASYNC (IPA_DMA_SYS_DESC_MAX_FIFO_SZ / \
+	sizeof(struct sps_iovec) - 1)
+
+#define IPADMA_DRV_NAME "ipa_dma"
+
+#define IPADMA_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPADMA_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPADMA_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPADMA_ERR(fmt, args...) \
+	do { \
+		pr_err(IPADMA_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPADMA_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPADMA_FUNC_ENTRY() \
+	IPADMA_DBG_LOW("ENTRY\n")
+
+#define IPADMA_FUNC_EXIT() \
+	IPADMA_DBG_LOW("EXIT\n")
+
+#ifdef CONFIG_DEBUG_FS
+#define IPADMA_MAX_MSG_LEN 1024
+static char dbg_buff[IPADMA_MAX_MSG_LEN];
+static void ipa3_dma_debugfs_init(void);
+static void ipa3_dma_debugfs_destroy(void);
+#else
+static void ipa3_dma_debugfs_init(void) {}
+static void ipa3_dma_debugfs_destroy(void) {}
+#endif
+
+/**
+ * struct ipa3_dma_ctx -IPADMA driver context information
+ * @is_enabled:is ipa_dma enabled?
+ * @destroy_pending: destroy ipa_dma after handling all pending memcpy
+ * @ipa_dma_xfer_wrapper_cache: cache of ipa3_dma_xfer_wrapper structs
+ * @sync_lock: lock for synchronisation in sync_memcpy
+ * @async_lock: lock for synchronisation in async_memcpy
+ * @enable_lock: lock for is_enabled
+ * @pending_lock: lock for synchronize is_enable and pending_cnt
+ * @done: no pending works-ipadma can be destroyed
+ * @ipa_dma_sync_prod_hdl: handle of sync memcpy producer
+ * @ipa_dma_async_prod_hdl:handle of async memcpy producer
+ * @ipa_dma_sync_cons_hdl: handle of sync memcpy consumer
+ * @sync_memcpy_pending_cnt: number of pending sync memcopy operations
+ * @async_memcpy_pending_cnt: number of pending async memcopy operations
+ * @uc_memcpy_pending_cnt: number of pending uc memcopy operations
+ * @total_sync_memcpy: total number of sync memcpy (statistics)
+ * @total_async_memcpy: total number of async memcpy (statistics)
+ * @total_uc_memcpy: total number of uc memcpy (statistics)
+ */
+struct ipa3_dma_ctx {
+	bool is_enabled;
+	bool destroy_pending;
+	struct kmem_cache *ipa_dma_xfer_wrapper_cache;
+	struct mutex sync_lock;
+	spinlock_t async_lock;
+	struct mutex enable_lock;
+	spinlock_t pending_lock;
+	struct completion done;
+	u32 ipa_dma_sync_prod_hdl;
+	u32 ipa_dma_async_prod_hdl;
+	u32 ipa_dma_sync_cons_hdl;
+	u32 ipa_dma_async_cons_hdl;
+	atomic_t sync_memcpy_pending_cnt;
+	atomic_t async_memcpy_pending_cnt;
+	atomic_t uc_memcpy_pending_cnt;
+	atomic_t total_sync_memcpy;
+	atomic_t total_async_memcpy;
+	atomic_t total_uc_memcpy;
+};
+static struct ipa3_dma_ctx *ipa3_dma_ctx;
+
+/**
+ * ipa3_dma_init() -Initialize IPADMA.
+ *
+ * This function initialize all IPADMA internal data and connect in dma:
+ *	MEMCPY_DMA_SYNC_PROD ->MEMCPY_DMA_SYNC_CONS
+ *	MEMCPY_DMA_ASYNC_PROD->MEMCPY_DMA_SYNC_CONS
+ *
+ * Return codes: 0: success
+ *		-EFAULT: IPADMA is already initialized
+ *		-EINVAL: IPA driver is not initialized
+ *		-ENOMEM: allocating memory error
+ *		-EPERM: pipe connection failed
+ */
+int ipa3_dma_init(void)
+{
+	struct ipa3_dma_ctx *ipa_dma_ctx_t;
+	struct ipa_sys_connect_params sys_in;
+	int res = 0;
+
+	IPADMA_FUNC_ENTRY();
+
+	if (ipa3_dma_ctx) {
+		IPADMA_ERR("Already initialized.\n");
+		return -EFAULT;
+	}
+
+	if (!ipa3_is_ready()) {
+		IPADMA_ERR("IPA is not ready yet\n");
+		return -EINVAL;
+	}
+
+	ipa_dma_ctx_t = kzalloc(sizeof(*(ipa3_dma_ctx)), GFP_KERNEL);
+
+	if (!ipa_dma_ctx_t) {
+		IPADMA_ERR("kzalloc error.\n");
+		return -ENOMEM;
+	}
+
+	ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache =
+		kmem_cache_create("IPA DMA XFER WRAPPER",
+			sizeof(struct ipa3_dma_xfer_wrapper), 0, 0, NULL);
+	if (!ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache) {
+		IPAERR(":failed to create ipa dma xfer wrapper cache.\n");
+		res = -ENOMEM;
+		goto fail_mem_ctrl;
+	}
+
+	mutex_init(&ipa_dma_ctx_t->enable_lock);
+	spin_lock_init(&ipa_dma_ctx_t->async_lock);
+	mutex_init(&ipa_dma_ctx_t->sync_lock);
+	spin_lock_init(&ipa_dma_ctx_t->pending_lock);
+	init_completion(&ipa_dma_ctx_t->done);
+	ipa_dma_ctx_t->is_enabled = false;
+	ipa_dma_ctx_t->destroy_pending = false;
+	atomic_set(&ipa_dma_ctx_t->async_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->sync_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->uc_memcpy_pending_cnt, 0);
+	atomic_set(&ipa_dma_ctx_t->total_async_memcpy, 0);
+	atomic_set(&ipa_dma_ctx_t->total_sync_memcpy, 0);
+	atomic_set(&ipa_dma_ctx_t->total_uc_memcpy, 0);
+
+	/* IPADMA SYNC PROD-source for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_PROD;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+	sys_in.skip_ep_cfg = false;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_sync_prod_hdl)) {
+		IPADMA_ERR(":setup sync prod pipe failed\n");
+		res = -EPERM;
+		goto fail_sync_prod;
+	}
+
+	/* IPADMA SYNC CONS-destination for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_SYNC_CONS;
+	sys_in.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	sys_in.skip_ep_cfg = false;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.notify = NULL;
+	sys_in.priv = NULL;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_sync_cons_hdl)) {
+		IPADMA_ERR(":setup sync cons pipe failed.\n");
+		res = -EPERM;
+		goto fail_sync_cons;
+	}
+
+	IPADMA_DBG("SYNC MEMCPY pipes are connected\n");
+
+	/* IPADMA ASYNC PROD-source for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD;
+	sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_DMA;
+	sys_in.ipa_ep_cfg.mode.dst = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+	sys_in.skip_ep_cfg = false;
+	sys_in.notify = NULL;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_async_prod_hdl)) {
+		IPADMA_ERR(":setup async prod pipe failed.\n");
+		res = -EPERM;
+		goto fail_async_prod;
+	}
+
+	/* IPADMA ASYNC CONS-destination for sync memcpy */
+	memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
+	sys_in.client = IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS;
+	sys_in.desc_fifo_sz = IPA_DMA_SYS_DESC_MAX_FIFO_SZ;
+	sys_in.skip_ep_cfg = false;
+	sys_in.ipa_ep_cfg.mode.mode = IPA_BASIC;
+	sys_in.notify = ipa3_dma_async_memcpy_notify_cb;
+	sys_in.priv = NULL;
+	if (ipa3_setup_sys_pipe(&sys_in,
+		&ipa_dma_ctx_t->ipa_dma_async_cons_hdl)) {
+		IPADMA_ERR(":setup async cons pipe failed.\n");
+		res = -EPERM;
+		goto fail_async_cons;
+	}
+	ipa3_dma_debugfs_init();
+	ipa3_dma_ctx = ipa_dma_ctx_t;
+	IPADMA_DBG("ASYNC MEMCPY pipes are connected\n");
+
+	IPADMA_FUNC_EXIT();
+	return res;
+fail_async_cons:
+	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_async_prod_hdl);
+fail_async_prod:
+	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_cons_hdl);
+fail_sync_cons:
+	ipa3_teardown_sys_pipe(ipa_dma_ctx_t->ipa_dma_sync_prod_hdl);
+fail_sync_prod:
+	kmem_cache_destroy(ipa_dma_ctx_t->ipa_dma_xfer_wrapper_cache);
+fail_mem_ctrl:
+	kfree(ipa_dma_ctx_t);
+	ipa3_dma_ctx = NULL;
+	return res;
+
+}
+
+/**
+ * ipa3_dma_enable() -Vote for IPA clocks.
+ *
+ *Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *		 enabled
+ */
+int ipa3_dma_enable(void)
+{
+	IPADMA_FUNC_ENTRY();
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't enable\n");
+		return -EPERM;
+	}
+	mutex_lock(&ipa3_dma_ctx->enable_lock);
+	if (ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("Already enabled.\n");
+		mutex_unlock(&ipa3_dma_ctx->enable_lock);
+		return -EPERM;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("DMA");
+	ipa3_dma_ctx->is_enabled = true;
+	mutex_unlock(&ipa3_dma_ctx->enable_lock);
+
+	IPADMA_FUNC_EXIT();
+	return 0;
+}
+
+static bool ipa3_dma_work_pending(void)
+{
+	if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending sync\n");
+		return true;
+	}
+	if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending async\n");
+		return true;
+	}
+	if (atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt)) {
+		IPADMA_DBG("pending uc\n");
+		return true;
+	}
+	IPADMA_DBG_LOW("no pending work\n");
+	return false;
+}
+
+/**
+ * ipa3_dma_disable()- Unvote for IPA clocks.
+ *
+ * enter to power save mode.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: IPADMA is not initialized
+ *		-EPERM: Operation not permitted as ipa_dma is already
+ *			diabled
+ *		-EFAULT: can not disable ipa_dma as there are pending
+ *			memcopy works
+ */
+int ipa3_dma_disable(void)
+{
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't disable\n");
+		return -EPERM;
+	}
+	mutex_lock(&ipa3_dma_ctx->enable_lock);
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("Already disabled.\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		mutex_unlock(&ipa3_dma_ctx->enable_lock);
+		return -EPERM;
+	}
+	if (ipa3_dma_work_pending()) {
+		IPADMA_ERR("There is pending work, can't disable.\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		mutex_unlock(&ipa3_dma_ctx->enable_lock);
+		return -EFAULT;
+	}
+	ipa3_dma_ctx->is_enabled = false;
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("DMA");
+	mutex_unlock(&ipa3_dma_ctx->enable_lock);
+	IPADMA_FUNC_EXIT();
+	return 0;
+}
+
+/**
+ * ipa3_dma_sync_memcpy()- Perform synchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: other
+ */
+int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len)
+{
+	int ep_idx;
+	int res;
+	int i = 0;
+	struct ipa3_sys_context *cons_sys;
+	struct ipa3_sys_context *prod_sys;
+	struct sps_iovec iov;
+	struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
+	struct ipa3_dma_xfer_wrapper *head_descr = NULL;
+	struct gsi_xfer_elem xfer_elem;
+	struct gsi_chan_xfer_notify gsi_notify;
+	unsigned long flags;
+	bool stop_polling = false;
+
+	IPADMA_FUNC_ENTRY();
+	IPADMA_DBG_LOW("dest =  0x%llx, src = 0x%llx, len = %d\n",
+		dest, src, len);
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+	if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
+		if (((u32)src != src) || ((u32)dest != dest)) {
+			IPADMA_ERR("Bad addr, only 32b addr supported for BAM");
+			return -EINVAL;
+		}
+	}
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
+		if (atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt) >=
+				IPA_DMA_MAX_PENDING_SYNC) {
+			atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+			IPADMA_ERR("Reached pending requests limit\n");
+			return -EFAULT;
+		}
+	}
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_CONS);
+		return -EFAULT;
+	}
+	cons_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+		return -EFAULT;
+	}
+	prod_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+					GFP_KERNEL);
+	if (!xfer_descr) {
+		IPADMA_ERR("failed to alloc xfer descr wrapper\n");
+		res = -ENOMEM;
+		goto fail_mem_alloc;
+	}
+	xfer_descr->phys_addr_dest = dest;
+	xfer_descr->phys_addr_src = src;
+	xfer_descr->len = len;
+	init_completion(&xfer_descr->xfer_done);
+
+	mutex_lock(&ipa3_dma_ctx->sync_lock);
+	list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+	cons_sys->len++;
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		xfer_elem.addr = dest;
+		xfer_elem.len = len;
+		xfer_elem.type = GSI_XFER_ELEM_DATA;
+		xfer_elem.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem.xfer_user_data = xfer_descr;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+				&xfer_elem, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer dest descr res:%d\n",
+				res);
+			goto fail_send;
+		}
+		xfer_elem.addr = src;
+		xfer_elem.len = len;
+		xfer_elem.type = GSI_XFER_ELEM_DATA;
+		xfer_elem.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem.xfer_user_data = NULL;
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+				&xfer_elem, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer src descr res:%d\n",
+				 res);
+			BUG();
+		}
+	} else {
+		res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len,
+			NULL, 0);
+		if (res) {
+			IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+			goto fail_send;
+		}
+		res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+			NULL, SPS_IOVEC_FLAG_EOT);
+		if (res) {
+			IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+			BUG();
+		}
+	}
+	head_descr = list_first_entry(&cons_sys->head_desc_list,
+				struct ipa3_dma_xfer_wrapper, link);
+
+	/* in case we are not the head of the list, wait for head to wake us */
+	if (xfer_descr != head_descr) {
+		mutex_unlock(&ipa3_dma_ctx->sync_lock);
+		wait_for_completion(&xfer_descr->xfer_done);
+		mutex_lock(&ipa3_dma_ctx->sync_lock);
+		head_descr = list_first_entry(&cons_sys->head_desc_list,
+					struct ipa3_dma_xfer_wrapper, link);
+		BUG_ON(xfer_descr != head_descr);
+	}
+	mutex_unlock(&ipa3_dma_ctx->sync_lock);
+
+	do {
+		/* wait for transfer to complete */
+		if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+			res = gsi_poll_channel(cons_sys->ep->gsi_chan_hdl,
+				&gsi_notify);
+			if (res == GSI_STATUS_SUCCESS)
+				stop_polling = true;
+			else if (res != GSI_STATUS_POLL_EMPTY)
+				IPADMA_ERR(
+					"Failed: gsi_poll_chanel, returned %d loop#:%d\n",
+					res, i);
+		} else {
+			res = sps_get_iovec(cons_sys->ep->ep_hdl, &iov);
+			if (res)
+				IPADMA_ERR(
+					"Failed: get_iovec, returned %d loop#:%d\n",
+					res, i);
+			if (iov.addr != 0)
+				stop_polling = true;
+		}
+		usleep_range(IPA_DMA_POLLING_MIN_SLEEP_RX,
+			IPA_DMA_POLLING_MAX_SLEEP_RX);
+		i++;
+	} while (!stop_polling);
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		BUG_ON(len != gsi_notify.bytes_xfered);
+		BUG_ON(dest != ((struct ipa3_dma_xfer_wrapper *)
+				(gsi_notify.xfer_user_data))->phys_addr_dest);
+	} else {
+		BUG_ON(dest != iov.addr);
+		BUG_ON(len != iov.size);
+	}
+
+	mutex_lock(&ipa3_dma_ctx->sync_lock);
+	list_del(&head_descr->link);
+	cons_sys->len--;
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+	/* wake the head of the list */
+	if (!list_empty(&cons_sys->head_desc_list)) {
+		head_descr = list_first_entry(&cons_sys->head_desc_list,
+				struct ipa3_dma_xfer_wrapper, link);
+		complete(&head_descr->xfer_done);
+	}
+	mutex_unlock(&ipa3_dma_ctx->sync_lock);
+
+	atomic_inc(&ipa3_dma_ctx->total_sync_memcpy);
+	atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+
+	IPADMA_FUNC_EXIT();
+	return res;
+
+fail_send:
+	list_del(&xfer_descr->link);
+	cons_sys->len--;
+	mutex_unlock(&ipa3_dma_ctx->sync_lock);
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+	atomic_dec(&ipa3_dma_ctx->sync_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+	return res;
+}
+
+/**
+ * ipa3_dma_async_memcpy()- Perform asynchronous memcpy using IPA.
+ *
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ * @user_cb: callback function to notify the client when the copy was done.
+ * @user_param: cookie for user_cb.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-SPS_ERROR: on sps faliures
+ *		-EFAULT: descr fifo is full.
+ */
+int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
+		void (*user_cb)(void *user1), void *user_param)
+{
+	int ep_idx;
+	int res = 0;
+	struct ipa3_dma_xfer_wrapper *xfer_descr = NULL;
+	struct ipa3_sys_context *prod_sys;
+	struct ipa3_sys_context *cons_sys;
+	struct gsi_xfer_elem xfer_elem_cons, xfer_elem_prod;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	IPADMA_DBG_LOW("dest =  0x%llx, src = 0x%llx, len = %d\n",
+		dest, src, len);
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+	if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
+		if (((u32)src != src) || ((u32)dest != dest)) {
+			IPADMA_ERR(
+				"Bad addr - only 32b addr supported for BAM");
+			return -EINVAL;
+		}
+	}
+	if (!user_cb) {
+		IPADMA_ERR("null pointer: user_cb\n");
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("can't memcpy, IPA_DMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS) {
+		if (atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt) >=
+				IPA_DMA_MAX_PENDING_ASYNC) {
+			atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+			IPADMA_ERR("Reached pending requests limit\n");
+			return -EFAULT;
+		}
+	}
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+		return -EFAULT;
+	}
+	cons_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD);
+	if (-1 == ep_idx) {
+		IPADMA_ERR("Client %u is not mapped\n",
+			IPA_CLIENT_MEMCPY_DMA_SYNC_PROD);
+		return -EFAULT;
+	}
+	prod_sys = ipa3_ctx->ep[ep_idx].sys;
+
+	xfer_descr = kmem_cache_zalloc(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+					GFP_KERNEL);
+	if (!xfer_descr) {
+		IPADMA_ERR("failed to alloc xfrer descr wrapper\n");
+		res = -ENOMEM;
+		goto fail_mem_alloc;
+	}
+	xfer_descr->phys_addr_dest = dest;
+	xfer_descr->phys_addr_src = src;
+	xfer_descr->len = len;
+	xfer_descr->callback = user_cb;
+	xfer_descr->user1 = user_param;
+
+	spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
+	list_add_tail(&xfer_descr->link, &cons_sys->head_desc_list);
+	cons_sys->len++;
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		xfer_elem_cons.addr = dest;
+		xfer_elem_cons.len = len;
+		xfer_elem_cons.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_cons.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem_cons.xfer_user_data = xfer_descr;
+		xfer_elem_prod.addr = src;
+		xfer_elem_prod.len = len;
+		xfer_elem_prod.type = GSI_XFER_ELEM_DATA;
+		xfer_elem_prod.flags = GSI_XFER_FLAG_EOT;
+		xfer_elem_prod.xfer_user_data = NULL;
+		res = gsi_queue_xfer(cons_sys->ep->gsi_chan_hdl, 1,
+				&xfer_elem_cons, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer on dest descr res: %d\n",
+				res);
+			goto fail_send;
+		}
+		res = gsi_queue_xfer(prod_sys->ep->gsi_chan_hdl, 1,
+				&xfer_elem_prod, true);
+		if (res) {
+			IPADMA_ERR(
+				"Failed: gsi_queue_xfer on src descr res: %d\n",
+				res);
+			BUG();
+			goto fail_send;
+		}
+	} else {
+		res = sps_transfer_one(cons_sys->ep->ep_hdl, dest, len,
+			xfer_descr, 0);
+		if (res) {
+			IPADMA_ERR("Failed: sps_transfer_one on dest descr\n");
+			goto fail_send;
+		}
+		res = sps_transfer_one(prod_sys->ep->ep_hdl, src, len,
+			NULL, SPS_IOVEC_FLAG_EOT);
+		if (res) {
+			IPADMA_ERR("Failed: sps_transfer_one on src descr\n");
+			BUG();
+			goto fail_send;
+		}
+	}
+	spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+	IPADMA_FUNC_EXIT();
+	return res;
+
+fail_send:
+	list_del(&xfer_descr->link);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache, xfer_descr);
+fail_mem_alloc:
+	atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+	return res;
+}
+
+/**
+ * ipa3_dma_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Return codes: 0: success
+ *		-EINVAL: invalid params
+ *		-EPERM: operation not permitted as ipa_dma isn't enable or
+ *			initialized
+ *		-EBADF: IPA uC is not loaded
+ */
+int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int res;
+	unsigned long flags;
+
+	IPADMA_FUNC_ENTRY();
+	if (ipa3_dma_ctx == NULL) {
+		IPADMA_ERR("IPADMA isn't initialized, can't memcpy\n");
+		return -EPERM;
+	}
+	if ((max(src, dest) - min(src, dest)) < len) {
+		IPADMA_ERR("invalid addresses - overlapping buffers\n");
+		return -EINVAL;
+	}
+	if (len > IPA_DMA_MAX_PKT_SZ || len <= 0) {
+		IPADMA_ERR("invalid len, %d\n", len);
+		return	-EINVAL;
+	}
+
+	spin_lock_irqsave(&ipa3_dma_ctx->pending_lock, flags);
+	if (!ipa3_dma_ctx->is_enabled) {
+		IPADMA_ERR("can't memcpy, IPADMA isn't enabled\n");
+		spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+		return -EPERM;
+	}
+	atomic_inc(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
+	spin_unlock_irqrestore(&ipa3_dma_ctx->pending_lock, flags);
+
+	res = ipa3_uc_memcpy(dest, src, len);
+	if (res) {
+		IPADMA_ERR("ipa3_uc_memcpy failed %d\n", res);
+		goto dec_and_exit;
+	}
+
+	atomic_inc(&ipa3_dma_ctx->total_uc_memcpy);
+	res = 0;
+dec_and_exit:
+	atomic_dec(&ipa3_dma_ctx->uc_memcpy_pending_cnt);
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+	IPADMA_FUNC_EXIT();
+	return res;
+}
+
+/**
+ * ipa3_dma_destroy() -teardown IPADMA pipes and release ipadma.
+ *
+ * this is a blocking function, returns just after destroying IPADMA.
+ */
+void ipa3_dma_destroy(void)
+{
+	int res = 0;
+
+	IPADMA_FUNC_ENTRY();
+	if (!ipa3_dma_ctx) {
+		IPADMA_ERR("IPADMA isn't initialized\n");
+		return;
+	}
+
+	if (ipa3_dma_work_pending()) {
+		ipa3_dma_ctx->destroy_pending = true;
+		IPADMA_DBG("There are pending memcpy, wait for completion\n");
+		wait_for_completion(&ipa3_dma_ctx->done);
+	}
+
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_cons_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA ASYNC CONS failed\n");
+	ipa3_dma_ctx->ipa_dma_async_cons_hdl = 0;
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_cons_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA SYNC CONS failed\n");
+	ipa3_dma_ctx->ipa_dma_sync_cons_hdl = 0;
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_async_prod_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA ASYNC PROD failed\n");
+	ipa3_dma_ctx->ipa_dma_async_prod_hdl = 0;
+	res = ipa3_teardown_sys_pipe(ipa3_dma_ctx->ipa_dma_sync_prod_hdl);
+	if (res)
+		IPADMA_ERR("teardown IPADMA SYNC PROD failed\n");
+	ipa3_dma_ctx->ipa_dma_sync_prod_hdl = 0;
+
+	ipa3_dma_debugfs_destroy();
+	kmem_cache_destroy(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache);
+	kfree(ipa3_dma_ctx);
+	ipa3_dma_ctx = NULL;
+
+	IPADMA_FUNC_EXIT();
+}
+
+/**
+ * ipa3_dma_async_memcpy_notify_cb() -Callback function which will be called by
+ * IPA driver after getting notify from SPS driver or poll mode on Rx operation
+ * is completed (data was written to dest descriptor on async_cons ep).
+ *
+ * @priv -not in use.
+ * @evt - event name - IPA_RECIVE.
+ * @data -the ipa_mem_buffer.
+ */
+void ipa3_dma_async_memcpy_notify_cb(void *priv
+			, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	int ep_idx = 0;
+	struct ipa3_dma_xfer_wrapper *xfer_descr_expected;
+	struct ipa3_sys_context *sys;
+	unsigned long flags;
+	struct ipa_mem_buffer *mem_info;
+
+	IPADMA_FUNC_ENTRY();
+
+	mem_info = (struct ipa_mem_buffer *)data;
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
+	xfer_descr_expected = list_first_entry(&sys->head_desc_list,
+				 struct ipa3_dma_xfer_wrapper, link);
+	list_del(&xfer_descr_expected->link);
+	sys->len--;
+	spin_unlock_irqrestore(&ipa3_dma_ctx->async_lock, flags);
+	if (ipa3_ctx->transport_prototype != IPA_TRANSPORT_TYPE_GSI) {
+		BUG_ON(xfer_descr_expected->phys_addr_dest !=
+				mem_info->phys_base);
+		BUG_ON(xfer_descr_expected->len != mem_info->size);
+	}
+	atomic_inc(&ipa3_dma_ctx->total_async_memcpy);
+	atomic_dec(&ipa3_dma_ctx->async_memcpy_pending_cnt);
+	xfer_descr_expected->callback(xfer_descr_expected->user1);
+
+	kmem_cache_free(ipa3_dma_ctx->ipa_dma_xfer_wrapper_cache,
+		xfer_descr_expected);
+
+	if (ipa3_dma_ctx->destroy_pending && !ipa3_dma_work_pending())
+		complete(&ipa3_dma_ctx->done);
+
+	IPADMA_FUNC_EXIT();
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *dent;
+static struct dentry *dfile_info;
+
+static ssize_t ipa3_dma_debugfs_read(struct file *file, char __user *ubuf,
+				 size_t count, loff_t *ppos)
+{
+	int nbytes = 0;
+
+	if (!ipa3_dma_ctx) {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Not initialized\n");
+	} else {
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Status:\n	IPADMA is %s\n",
+			(ipa3_dma_ctx->is_enabled) ? "Enabled" : "Disabled");
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"Statistics:\n	total sync memcpy: %d\n	",
+			atomic_read(&ipa3_dma_ctx->total_sync_memcpy));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"total async memcpy: %d\n	",
+			atomic_read(&ipa3_dma_ctx->total_async_memcpy));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending sync memcpy jobs: %d\n	",
+			atomic_read(&ipa3_dma_ctx->sync_memcpy_pending_cnt));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending async memcpy jobs: %d\n",
+			atomic_read(&ipa3_dma_ctx->async_memcpy_pending_cnt));
+		nbytes += scnprintf(&dbg_buff[nbytes],
+			IPADMA_MAX_MSG_LEN - nbytes,
+			"pending uc memcpy jobs: %d\n",
+			atomic_read(&ipa3_dma_ctx->uc_memcpy_pending_cnt));
+	}
+	return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, nbytes);
+}
+
+static ssize_t ipa3_dma_debugfs_reset_statistics(struct file *file,
+					const char __user *ubuf,
+					size_t count,
+					loff_t *ppos)
+{
+	unsigned long missing;
+	s8 in_num = 0;
+
+	if (sizeof(dbg_buff) < count + 1)
+		return -EFAULT;
+
+	missing = copy_from_user(dbg_buff, ubuf, count);
+	if (missing)
+		return -EFAULT;
+
+	dbg_buff[count] = '\0';
+	if (kstrtos8(dbg_buff, 0, &in_num))
+		return -EFAULT;
+	switch (in_num) {
+	case 0:
+		if (ipa3_dma_work_pending())
+			IPADMA_ERR("Note, there are pending memcpy\n");
+
+		atomic_set(&ipa3_dma_ctx->total_async_memcpy, 0);
+		atomic_set(&ipa3_dma_ctx->total_sync_memcpy, 0);
+		break;
+	default:
+		IPADMA_ERR("invalid argument: To reset statistics echo 0\n");
+		break;
+	}
+	return count;
+}
+
+const struct file_operations ipa3_ipadma_stats_ops = {
+	.read = ipa3_dma_debugfs_read,
+	.write = ipa3_dma_debugfs_reset_statistics,
+};
+
+static void ipa3_dma_debugfs_init(void)
+{
+	const mode_t read_write_mode = S_IRUSR | S_IRGRP | S_IROTH |
+			S_IWUSR | S_IWGRP | S_IWOTH;
+
+	dent = debugfs_create_dir("ipa_dma", 0);
+	if (IS_ERR(dent)) {
+		IPADMA_ERR("fail to create folder ipa_dma\n");
+		return;
+	}
+
+	dfile_info =
+		debugfs_create_file("info", read_write_mode, dent,
+				 0, &ipa3_ipadma_stats_ops);
+	if (!dfile_info || IS_ERR(dfile_info)) {
+		IPADMA_ERR("fail to create file stats\n");
+		goto fail;
+	}
+	return;
+fail:
+	debugfs_remove_recursive(dent);
+}
+
+static void ipa3_dma_debugfs_destroy(void)
+{
+	debugfs_remove_recursive(dent);
+}
+
+#endif /* !CONFIG_DEBUG_FS */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
new file mode 100644
index 0000000..574e81c
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -0,0 +1,4414 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmapool.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/msm_gsi.h>
+#include "ipa_i.h"
+#include "ipa_trace.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_LAST_DESC_CNT 0xFFFF
+#define POLLING_INACTIVITY_RX 40
+#define POLLING_MIN_SLEEP_RX 1010
+#define POLLING_MAX_SLEEP_RX 1050
+#define POLLING_INACTIVITY_TX 40
+#define POLLING_MIN_SLEEP_TX 400
+#define POLLING_MAX_SLEEP_TX 500
+/* 8K less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_MTU 1500
+#define IPA_GENERIC_AGGR_BYTE_LIMIT 6
+#define IPA_GENERIC_AGGR_TIME_LIMIT 1
+#define IPA_GENERIC_AGGR_PKT_LIMIT 0
+
+#define IPA_GENERIC_RX_BUFF_BASE_SZ 8192
+#define IPA_REAL_GENERIC_RX_BUFF_SZ(X) (SKB_DATA_ALIGN(\
+		(X) + NET_SKB_PAD) +\
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define IPA_GENERIC_RX_BUFF_SZ(X) ((X) -\
+		(IPA_REAL_GENERIC_RX_BUFF_SZ(X) - (X)))
+#define IPA_GENERIC_RX_BUFF_LIMIT (\
+		IPA_REAL_GENERIC_RX_BUFF_SZ(\
+		IPA_GENERIC_RX_BUFF_BASE_SZ) -\
+		IPA_GENERIC_RX_BUFF_BASE_SZ)
+
+/* less 1 nominal MTU (1500 bytes) rounded to units of KB */
+#define IPA_ADJUST_AGGR_BYTE_LIMIT(X) (((X) - IPA_MTU)/1000)
+
+#define IPA_RX_BUFF_CLIENT_HEADROOM 256
+
+#define IPA_WLAN_RX_POOL_SZ 100
+#define IPA_WLAN_RX_POOL_SZ_LOW_WM 5
+#define IPA_WLAN_RX_BUFF_SZ 2048
+#define IPA_WLAN_COMM_RX_POOL_LOW 100
+#define IPA_WLAN_COMM_RX_POOL_HIGH 900
+
+#define IPA_ODU_RX_BUFF_SZ 2048
+#define IPA_ODU_RX_POOL_SZ 64
+#define IPA_SIZE_DL_CSUM_META_TRAILER 8
+
+#define IPA_GSI_EVT_RING_LEN 4096
+#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
+#define IPA_GSI_EVT_RING_INT_MODT 3200 /* 0.1s under 32KHz clock */
+
+#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
+/* The below virtual channel cannot be used by any entity */
+#define IPA_GSI_CH_20_WA_VIRT_CHAN 29
+
+#define IPA_DEFAULT_SYS_YELLOW_WM 32
+
+static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
+static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_replenish_rx_work_func(struct work_struct *work);
+static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
+static void ipa3_wq_handle_rx(struct work_struct *work);
+static void ipa3_wq_handle_tx(struct work_struct *work);
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size);
+static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
+				u32 size);
+static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
+		struct ipa3_sys_context *sys);
+static void ipa3_cleanup_rx(struct ipa3_sys_context *sys);
+static void ipa3_wq_rx_avail(struct work_struct *work);
+static void ipa3_alloc_wlan_rx_common_cache(u32 size);
+static void ipa3_cleanup_wlan_rx_common_cache(void);
+static void ipa3_wq_repl_rx(struct work_struct *work);
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
+		struct ipa_mem_buffer *mem_info);
+static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
+	struct ipa3_ep_context *ep);
+static int ipa_populate_tag_field(struct ipa3_desc *desc,
+		struct ipa3_tx_pkt_wrapper *tx_pkt,
+		struct ipahal_imm_cmd_pyld **tag_pyld_ret);
+static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
+	bool process_all, bool in_poll_state);
+static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
+	bool process_all, bool in_poll_state);
+static unsigned long tag_to_pointer_wa(uint64_t tag);
+static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
+
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit);
+
+static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
+				struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+	struct ipa3_tx_pkt_wrapper *next_pkt;
+	int i, cnt;
+
+	if (unlikely(tx_pkt == NULL)) {
+		IPAERR("tx_pkt is NULL\n");
+		return;
+	}
+
+	cnt = tx_pkt->cnt;
+	IPADBG_LOW("cnt: %d\n", cnt);
+	for (i = 0; i < cnt; i++) {
+		spin_lock_bh(&sys->spinlock);
+		if (unlikely(list_empty(&sys->head_desc_list))) {
+			spin_unlock_bh(&sys->spinlock);
+			return;
+		}
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		sys->len--;
+		spin_unlock_bh(&sys->spinlock);
+		if (!tx_pkt->no_unmap_dma) {
+			if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
+				dma_unmap_single(ipa3_ctx->pdev,
+					tx_pkt->mem.phys_base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+			} else {
+				dma_unmap_page(ipa3_ctx->pdev,
+					next_pkt->mem.phys_base,
+					next_pkt->mem.size,
+					DMA_TO_DEVICE);
+			}
+		}
+		if (tx_pkt->callback)
+			tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
+
+		if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_SPS
+			&& tx_pkt->cnt > 1
+			&& tx_pkt->cnt != IPA_LAST_DESC_CNT) {
+			if (tx_pkt->cnt == IPA_NUM_DESC_PER_SW_TX) {
+				dma_pool_free(ipa3_ctx->dma_pool,
+					tx_pkt->mult.base,
+					tx_pkt->mult.phys_base);
+			} else {
+				dma_unmap_single(ipa3_ctx->pdev,
+					tx_pkt->mult.phys_base,
+					tx_pkt->mult.size,
+					DMA_TO_DEVICE);
+				kfree(tx_pkt->mult.base);
+			}
+		}
+
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+}
+
+static void ipa3_wq_write_done_status(int src_pipe,
+			struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+	struct ipa3_sys_context *sys;
+
+	WARN_ON(src_pipe >= ipa3_ctx->ipa_num_pipes);
+
+	if (!ipa3_ctx->ep[src_pipe].status.status_en)
+		return;
+
+	sys = ipa3_ctx->ep[src_pipe].sys;
+	if (!sys)
+		return;
+
+	ipa3_wq_write_done_common(sys, tx_pkt);
+}
+
+/**
+ * ipa_write_done() - this function will be (eventually) called when a Tx
+ * operation is complete
+ * * @work:	work_struct used by the work queue
+ *
+ * Will be called in deferred context.
+ * - invoke the callback supplied by the client who sent this command
+ * - iterate over all packets and validate that
+ *   the order for sent packet is the same as expected
+ * - delete all the tx packet descriptors from the system
+ *   pipe context (not needed anymore)
+ * - return the tx buffer back to dma_pool
+ */
+static void ipa3_wq_write_done(struct work_struct *work)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+	struct ipa3_sys_context *sys;
+
+	tx_pkt = container_of(work, struct ipa3_tx_pkt_wrapper, work);
+	sys = tx_pkt->sys;
+
+	ipa3_wq_write_done_common(sys, tx_pkt);
+}
+
+static int ipa3_handle_tx_core(struct ipa3_sys_context *sys, bool process_all,
+		bool in_poll_state)
+{
+	struct sps_iovec iov;
+	struct ipa3_tx_pkt_wrapper *tx_pkt_expected;
+	int ret;
+	int cnt = 0;
+
+	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+				!atomic_read(&sys->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+		ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+		if (ret) {
+			IPAERR("sps_get_iovec failed %d\n", ret);
+			break;
+		}
+
+		if (iov.addr == 0)
+			break;
+
+		tx_pkt_expected = list_first_entry(&sys->head_desc_list,
+						   struct ipa3_tx_pkt_wrapper,
+						   link);
+		ipa3_wq_write_done_common(sys, tx_pkt_expected);
+		cnt++;
+	};
+
+	return cnt;
+}
+
+/**
+ * ipa3_tx_switch_to_intr_mode() - Operate the Tx data path in interrupt mode
+ */
+static void ipa3_tx_switch_to_intr_mode(struct ipa3_sys_context *sys)
+{
+	int ret;
+
+	if (!atomic_read(&sys->curr_polling_state)) {
+		IPAERR("already in intr mode\n");
+		goto fail;
+	}
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		atomic_set(&sys->curr_polling_state, 0);
+		ipa3_dec_release_wakelock();
+		ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+			GSI_CHAN_MODE_CALLBACK);
+		if (ret != GSI_STATUS_SUCCESS) {
+			IPAERR("Failed to switch to intr mode.\n");
+			goto fail;
+		}
+	} else {
+		ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_get_config() failed %d\n", ret);
+			goto fail;
+		}
+		sys->event.options = SPS_O_EOT;
+		ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+		if (ret) {
+			IPAERR("sps_register_event() failed %d\n", ret);
+			goto fail;
+		}
+		sys->ep->connect.options =
+			SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+		ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_set_config() failed %d\n", ret);
+			goto fail;
+		}
+		atomic_set(&sys->curr_polling_state, 0);
+		ipa3_handle_tx_core(sys, true, false);
+		ipa3_dec_release_wakelock();
+	}
+	return;
+
+fail:
+	queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+			msecs_to_jiffies(1));
+}
+
+static void ipa3_handle_tx(struct ipa3_sys_context *sys)
+{
+	int inactive_cycles = 0;
+	int cnt;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	do {
+		cnt = ipa3_handle_tx_core(sys, true, true);
+		if (cnt == 0) {
+			inactive_cycles++;
+			usleep_range(POLLING_MIN_SLEEP_TX,
+					POLLING_MAX_SLEEP_TX);
+		} else {
+			inactive_cycles = 0;
+		}
+	} while (inactive_cycles <= POLLING_INACTIVITY_TX);
+
+	ipa3_tx_switch_to_intr_mode(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static void ipa3_wq_handle_tx(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+
+	sys = container_of(work, struct ipa3_sys_context, work);
+
+	ipa3_handle_tx(sys);
+}
+
+/**
+ * ipa3_send_one() - Send a single descriptor
+ * @sys:	system pipe context
+ * @desc:	descriptor to send
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * - Allocate tx_packet wrapper
+ * - transfer data to the IPA
+ * - after the transfer was done the SPS will
+ *   notify the sending user via ipa_sps_irq_comp_tx()
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+		bool in_atomic)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+	struct gsi_xfer_elem gsi_xfer;
+	int result;
+	u16 sps_flags = SPS_IOVEC_FLAG_EOT;
+	dma_addr_t dma_address;
+	u16 len;
+	u32 mem_flag = GFP_ATOMIC;
+
+	if (unlikely(!in_atomic))
+		mem_flag = GFP_KERNEL;
+
+	tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache, mem_flag);
+	if (!tx_pkt) {
+		IPAERR("failed to alloc tx wrapper\n");
+		goto fail_mem_alloc;
+	}
+
+	if (!desc->dma_address_valid) {
+		dma_address = dma_map_single(ipa3_ctx->pdev, desc->pyld,
+			desc->len, DMA_TO_DEVICE);
+	} else {
+		dma_address = desc->dma_address;
+		tx_pkt->no_unmap_dma = true;
+	}
+	if (!dma_address) {
+		IPAERR("failed to DMA wrap\n");
+		goto fail_dma_map;
+	}
+
+	INIT_LIST_HEAD(&tx_pkt->link);
+	tx_pkt->type = desc->type;
+	tx_pkt->cnt = 1;    /* only 1 desc in this "set" */
+
+	tx_pkt->mem.phys_base = dma_address;
+	tx_pkt->mem.base = desc->pyld;
+	tx_pkt->mem.size = desc->len;
+	tx_pkt->sys = sys;
+	tx_pkt->callback = desc->callback;
+	tx_pkt->user1 = desc->user1;
+	tx_pkt->user2 = desc->user2;
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		memset(&gsi_xfer, 0, sizeof(gsi_xfer));
+		gsi_xfer.addr = dma_address;
+		gsi_xfer.flags |= GSI_XFER_FLAG_EOT;
+		gsi_xfer.xfer_user_data = tx_pkt;
+		if (desc->type == IPA_IMM_CMD_DESC) {
+			gsi_xfer.len = desc->opcode;
+			gsi_xfer.type = GSI_XFER_ELEM_IMME_CMD;
+		} else {
+			gsi_xfer.len = desc->len;
+			gsi_xfer.type = GSI_XFER_ELEM_DATA;
+		}
+	} else {
+		/*
+		 * Special treatment for immediate commands, where the
+		 * structure of the descriptor is different
+		 */
+		if (desc->type == IPA_IMM_CMD_DESC) {
+			sps_flags |= SPS_IOVEC_FLAG_IMME;
+			len = desc->opcode;
+			IPADBG_LOW("sending cmd=%d pyld_len=%d sps_flags=%x\n",
+					desc->opcode, desc->len, sps_flags);
+			IPA_DUMP_BUFF(desc->pyld, dma_address, desc->len);
+		} else {
+			len = desc->len;
+		}
+	}
+
+	INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
+
+	spin_lock_bh(&sys->spinlock);
+	list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+					&gsi_xfer, true);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("GSI xfer failed.\n");
+			goto fail_transport_send;
+		}
+	} else {
+		result = sps_transfer_one(sys->ep->ep_hdl, dma_address,
+					len, tx_pkt, sps_flags);
+		if (result) {
+			IPAERR("sps_transfer_one failed rc=%d\n", result);
+			goto fail_transport_send;
+		}
+	}
+
+	spin_unlock_bh(&sys->spinlock);
+
+	return 0;
+
+fail_transport_send:
+	list_del(&tx_pkt->link);
+	spin_unlock_bh(&sys->spinlock);
+	dma_unmap_single(ipa3_ctx->pdev, dma_address, desc->len, DMA_TO_DEVICE);
+fail_dma_map:
+	kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+fail_mem_alloc:
+	return -EFAULT;
+}
+
+/**
+ * ipa3_send() - Send multiple descriptors in one HW transaction
+ * @sys: system pipe context
+ * @num_desc: number of packets
+ * @desc: packets to send (may be immediate command or data)
+ * @in_atomic:  whether caller is in atomic context
+ *
+ * This function is used for system-to-bam connection.
+ * - SPS driver expect struct sps_transfer which will contain all the data
+ *   for a transaction
+ * - ipa3_tx_pkt_wrapper will be used for each ipa
+ *   descriptor (allocated from wrappers cache)
+ * - The wrapper struct will be configured for each ipa-desc payload and will
+ *   contain information which will be later used by the user callbacks
+ * - each transfer will be made by calling to sps_transfer()
+ * - Each packet (command or data) that will be sent will also be saved in
+ *   ipa3_sys_context for later check that all data was sent
+ *
+ * Return codes: 0: success, -EFAULT: failure
+ */
+int ipa3_send(struct ipa3_sys_context *sys,
+		u32 num_desc,
+		struct ipa3_desc *desc,
+		bool in_atomic)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt, *tx_pkt_first;
+	struct ipahal_imm_cmd_pyld *tag_pyld_ret = NULL;
+	struct ipa3_tx_pkt_wrapper *next_pkt;
+	struct sps_transfer transfer = { 0 };
+	struct sps_iovec *iovec;
+	struct gsi_xfer_elem *gsi_xfer_elem_array = NULL;
+	dma_addr_t dma_addr;
+	int i = 0;
+	int j;
+	int result;
+	int fail_dma_wrap = 0;
+	uint size;
+	u32 mem_flag = GFP_ATOMIC;
+	int ipa_ep_idx;
+	struct ipa_gsi_ep_config *gsi_ep_cfg;
+
+	if (unlikely(!in_atomic))
+		mem_flag = GFP_KERNEL;
+
+	size = num_desc * sizeof(struct sps_iovec);
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		ipa_ep_idx = ipa3_get_ep_mapping(sys->ep->client);
+		if (unlikely(ipa_ep_idx < 0)) {
+			IPAERR("invalid ep_index of client = %d\n",
+				sys->ep->client);
+			return -EFAULT;
+		}
+		gsi_ep_cfg = ipa3_get_gsi_ep_info(ipa_ep_idx);
+		if (unlikely(!gsi_ep_cfg)) {
+			IPAERR("failed to get gsi EP config of ep_idx=%d\n",
+				ipa_ep_idx);
+			return -EFAULT;
+		}
+		if (unlikely(num_desc > gsi_ep_cfg->ipa_if_tlv)) {
+			IPAERR("Too many chained descriptors need=%d max=%d\n",
+				num_desc, gsi_ep_cfg->ipa_if_tlv);
+			WARN_ON(1);
+			return -EPERM;
+		}
+
+		gsi_xfer_elem_array =
+			kzalloc(num_desc * sizeof(struct gsi_xfer_elem),
+			mem_flag);
+		if (!gsi_xfer_elem_array) {
+			IPAERR("Failed to alloc mem for gsi xfer array.\n");
+			return -EFAULT;
+		}
+	} else {
+		if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+			transfer.iovec = dma_pool_alloc(ipa3_ctx->dma_pool,
+					mem_flag, &dma_addr);
+			if (!transfer.iovec) {
+				IPAERR("fail to alloc dma mem\n");
+				return -EFAULT;
+			}
+		} else {
+			transfer.iovec = kmalloc(size, mem_flag);
+			if (!transfer.iovec) {
+				IPAERR("fail to alloc mem for sps xfr buff ");
+				IPAERR("num_desc = %d size = %d\n",
+						num_desc, size);
+				return -EFAULT;
+			}
+			dma_addr  = dma_map_single(ipa3_ctx->pdev,
+					transfer.iovec, size, DMA_TO_DEVICE);
+			if (!dma_addr) {
+				IPAERR("dma_map_single failed\n");
+				kfree(transfer.iovec);
+				return -EFAULT;
+			}
+		}
+		transfer.iovec_phys = dma_addr;
+		transfer.iovec_count = num_desc;
+	}
+
+	spin_lock_bh(&sys->spinlock);
+
+	for (i = 0; i < num_desc; i++) {
+		fail_dma_wrap = 0;
+		tx_pkt = kmem_cache_zalloc(ipa3_ctx->tx_pkt_wrapper_cache,
+					   mem_flag);
+		if (!tx_pkt) {
+			IPAERR("failed to alloc tx wrapper\n");
+			goto failure;
+		}
+
+		INIT_LIST_HEAD(&tx_pkt->link);
+
+		if (i == 0) {
+			tx_pkt_first = tx_pkt;
+			tx_pkt->cnt = num_desc;
+			INIT_WORK(&tx_pkt->work, ipa3_wq_write_done);
+		}
+
+		/* populate tag field */
+		if (desc[i].opcode ==
+			ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_IP_PACKET_TAG_STATUS)) {
+			if (ipa_populate_tag_field(&desc[i], tx_pkt,
+				&tag_pyld_ret)) {
+				IPAERR("Failed to populate tag field\n");
+				goto failure;
+			}
+		}
+
+		tx_pkt->type = desc[i].type;
+
+		if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+			tx_pkt->mem.base = desc[i].pyld;
+			tx_pkt->mem.size = desc[i].len;
+
+			if (!desc[i].dma_address_valid) {
+				tx_pkt->mem.phys_base =
+					dma_map_single(ipa3_ctx->pdev,
+					tx_pkt->mem.base,
+					tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+				if (!tx_pkt->mem.phys_base) {
+					IPAERR("failed to do dma map.\n");
+					fail_dma_wrap = 1;
+					goto failure;
+				}
+			} else {
+					tx_pkt->mem.phys_base =
+						desc[i].dma_address;
+					tx_pkt->no_unmap_dma = true;
+			}
+		} else {
+			tx_pkt->mem.base = desc[i].frag;
+			tx_pkt->mem.size = desc[i].len;
+
+			if (!desc[i].dma_address_valid) {
+				tx_pkt->mem.phys_base =
+					skb_frag_dma_map(ipa3_ctx->pdev,
+					desc[i].frag,
+					0, tx_pkt->mem.size,
+					DMA_TO_DEVICE);
+				if (!tx_pkt->mem.phys_base) {
+					IPAERR("dma map failed\n");
+					fail_dma_wrap = 1;
+					goto failure;
+				}
+			} else {
+				tx_pkt->mem.phys_base =
+					desc[i].dma_address;
+				tx_pkt->no_unmap_dma = true;
+			}
+		}
+		tx_pkt->sys = sys;
+		tx_pkt->callback = desc[i].callback;
+		tx_pkt->user1 = desc[i].user1;
+		tx_pkt->user2 = desc[i].user2;
+
+		list_add_tail(&tx_pkt->link, &sys->head_desc_list);
+
+		if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+			gsi_xfer_elem_array[i].addr = tx_pkt->mem.phys_base;
+
+			/*
+			 * Special treatment for immediate commands, where
+			 * the structure of the descriptor is different
+			 */
+			if (desc[i].type == IPA_IMM_CMD_DESC) {
+				gsi_xfer_elem_array[i].len = desc[i].opcode;
+				gsi_xfer_elem_array[i].type =
+					GSI_XFER_ELEM_IMME_CMD;
+			} else {
+				gsi_xfer_elem_array[i].len = desc[i].len;
+				gsi_xfer_elem_array[i].type =
+					GSI_XFER_ELEM_DATA;
+			}
+
+			if (i == (num_desc - 1)) {
+				gsi_xfer_elem_array[i].flags |=
+					GSI_XFER_FLAG_EOT;
+				gsi_xfer_elem_array[i].xfer_user_data =
+					tx_pkt_first;
+				/* "mark" the last desc */
+				tx_pkt->cnt = IPA_LAST_DESC_CNT;
+			} else
+				gsi_xfer_elem_array[i].flags |=
+					GSI_XFER_FLAG_CHAIN;
+		} else {
+			/*
+			 * first desc of set is "special" as it
+			 * holds the count and other info
+			 */
+			if (i == 0) {
+				transfer.user = tx_pkt;
+				tx_pkt->mult.phys_base = dma_addr;
+				tx_pkt->mult.base = transfer.iovec;
+				tx_pkt->mult.size = size;
+			}
+
+			iovec = &transfer.iovec[i];
+			iovec->flags = 0;
+			/*
+			 * Point the iovec to the buffer and
+			 */
+			iovec->addr = tx_pkt->mem.phys_base;
+			/*
+			 * Special treatment for immediate commands, where
+			 * the structure of the descriptor is different
+			 */
+			if (desc[i].type == IPA_IMM_CMD_DESC) {
+				iovec->size = desc[i].opcode;
+				iovec->flags |= SPS_IOVEC_FLAG_IMME;
+				IPA_DUMP_BUFF(desc[i].pyld,
+					tx_pkt->mem.phys_base, desc[i].len);
+			} else {
+				iovec->size = desc[i].len;
+			}
+
+			if (i == (num_desc - 1)) {
+				iovec->flags |= SPS_IOVEC_FLAG_EOT;
+				/* "mark" the last desc */
+				tx_pkt->cnt = IPA_LAST_DESC_CNT;
+			}
+		}
+	}
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		result = gsi_queue_xfer(sys->ep->gsi_chan_hdl, num_desc,
+				gsi_xfer_elem_array, true);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("GSI xfer failed.\n");
+			goto failure;
+		}
+		kfree(gsi_xfer_elem_array);
+	} else {
+		result = sps_transfer(sys->ep->ep_hdl, &transfer);
+		if (result) {
+			IPAERR("sps_transfer failed rc=%d\n", result);
+			goto failure;
+		}
+	}
+
+	spin_unlock_bh(&sys->spinlock);
+	return 0;
+
+failure:
+	ipahal_destroy_imm_cmd(tag_pyld_ret);
+	tx_pkt = tx_pkt_first;
+	for (j = 0; j < i; j++) {
+		next_pkt = list_next_entry(tx_pkt, link);
+		list_del(&tx_pkt->link);
+		if (desc[j].type != IPA_DATA_DESC_SKB_PAGED) {
+			dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
+				tx_pkt->mem.size,
+				DMA_TO_DEVICE);
+		} else {
+			dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
+				tx_pkt->mem.size,
+				DMA_TO_DEVICE);
+		}
+		kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+		tx_pkt = next_pkt;
+	}
+	if (j < num_desc)
+		/* last desc failed */
+		if (fail_dma_wrap)
+			kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		kfree(gsi_xfer_elem_array);
+	} else {
+		if (transfer.iovec_phys) {
+			if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
+				dma_pool_free(ipa3_ctx->dma_pool,
+					transfer.iovec, transfer.iovec_phys);
+			} else {
+				dma_unmap_single(ipa3_ctx->pdev,
+					transfer.iovec_phys, size,
+					DMA_TO_DEVICE);
+				kfree(transfer.iovec);
+			}
+		}
+	}
+	spin_unlock_bh(&sys->spinlock);
+	return -EFAULT;
+}
+
+/**
+ * ipa3_transport_irq_cmd_ack - callback function which will be called by
+ * SPS/GSI driver after an immediate command is complete.
+ * @user1:	pointer to the descriptor of the transfer
+ * @user2:
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa3_send_cmd())
+ */
+static void ipa3_transport_irq_cmd_ack(void *user1, int user2)
+{
+	struct ipa3_desc *desc = (struct ipa3_desc *)user1;
+
+	if (!desc) {
+		IPAERR("desc is NULL\n");
+		WARN_ON(1);
+		return;
+	}
+	IPADBG_LOW("got ack for cmd=%d\n", desc->opcode);
+	complete(&desc->xfer_done);
+}
+
+/**
+ * ipa3_transport_irq_cmd_ack_free - callback function which will be
+ * called by SPS/GSI driver after an immediate command is complete.
+ * This function will also free the completion object once it is done.
+ * @tag_comp: pointer to the completion object
+ * @ignored: parameter not used
+ *
+ * Complete the immediate commands completion object, this will release the
+ * thread which waits on this completion object (ipa3_send_cmd())
+ */
+static void ipa3_transport_irq_cmd_ack_free(void *tag_comp, int ignored)
+{
+	struct ipa3_tag_completion *comp = tag_comp;
+
+	if (!comp) {
+		IPAERR("comp is NULL\n");
+		return;
+	}
+
+	complete(&comp->comp);
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+}
+
+/**
+ * ipa3_send_cmd - send immediate commands
+ * @num_desc:	number of descriptors within the desc struct
+ * @descr:	descriptor structure
+ *
+ * Function will block till command gets ACK from IPA HW, caller needs
+ * to free any resources it allocated after function returns
+ * The callback in ipa3_desc should not be set by the caller
+ * for this function.
+ */
+int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr)
+{
+	struct ipa3_desc *desc;
+	int i, result = 0;
+	struct ipa3_sys_context *sys;
+	int ep_idx;
+
+	for (i = 0; i < num_desc; i++)
+		IPADBG("sending imm cmd %d\n", descr[i].opcode);
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+
+	sys = ipa3_ctx->ep[ep_idx].sys;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (num_desc == 1) {
+		init_completion(&descr->xfer_done);
+
+		if (descr->callback || descr->user1)
+			WARN_ON(1);
+
+		descr->callback = ipa3_transport_irq_cmd_ack;
+		descr->user1 = descr;
+		if (ipa3_send_one(sys, descr, true)) {
+			IPAERR("fail to send immediate command\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&descr->xfer_done);
+	} else {
+		desc = &descr[num_desc - 1];
+		init_completion(&desc->xfer_done);
+
+		if (desc->callback || desc->user1)
+			WARN_ON(1);
+
+		desc->callback = ipa3_transport_irq_cmd_ack;
+		desc->user1 = desc;
+		if (ipa3_send(sys, num_desc, descr, true)) {
+			IPAERR("fail to send multiple immediate command set\n");
+			result = -EFAULT;
+			goto bail;
+		}
+		wait_for_completion(&desc->xfer_done);
+	}
+
+bail:
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return result;
+}
+
+/**
+ * ipa3_send_cmd_timeout - send immediate commands with limited time
+ *	waiting for ACK from IPA HW
+ * @num_desc:	number of descriptors within the desc struct
+ * @descr:	descriptor structure
+ * @timeout:	millisecond to wait till get ACK from IPA HW
+ *
+ * Function will block till command gets ACK from IPA HW or timeout.
+ * Caller needs to free any resources it allocated after function returns
+ * The callback in ipa3_desc should not be set by the caller
+ * for this function.
+ */
+int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout)
+{
+	struct ipa3_desc *desc;
+	int i, result = 0;
+	struct ipa3_sys_context *sys;
+	int ep_idx;
+	int completed;
+	struct ipa3_tag_completion *comp;
+
+	for (i = 0; i < num_desc; i++)
+		IPADBG("sending imm cmd %d\n", descr[i].opcode);
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+
+	comp = kzalloc(sizeof(*comp), GFP_ATOMIC);
+	if (!comp) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+	init_completion(&comp->comp);
+
+	/* completion needs to be released from both here and in ack callback */
+	atomic_set(&comp->cnt, 2);
+
+	sys = ipa3_ctx->ep[ep_idx].sys;
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	if (num_desc == 1) {
+		if (descr->callback || descr->user1)
+			WARN_ON(1);
+
+		descr->callback = ipa3_transport_irq_cmd_ack_free;
+		descr->user1 = comp;
+		if (ipa3_send_one(sys, descr, true)) {
+			IPAERR("fail to send immediate command\n");
+			kfree(comp);
+			result = -EFAULT;
+			goto bail;
+		}
+	} else {
+		desc = &descr[num_desc - 1];
+
+		if (desc->callback || desc->user1)
+			WARN_ON(1);
+
+		desc->callback = ipa3_transport_irq_cmd_ack_free;
+		desc->user1 = comp;
+		if (ipa3_send(sys, num_desc, descr, true)) {
+			IPAERR("fail to send multiple immediate command set\n");
+			kfree(comp);
+			result = -EFAULT;
+			goto bail;
+		}
+	}
+
+	completed = wait_for_completion_timeout(
+		&comp->comp, msecs_to_jiffies(timeout));
+	if (!completed)
+		IPADBG("timeout waiting for imm-cmd ACK\n");
+
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+
+bail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+/**
+ * ipa3_sps_irq_tx_notify() - Callback function which will be called by
+ * the SPS driver to start a Tx poll operation.
+ * Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ */
+static void ipa3_sps_irq_tx_notify(struct sps_event_notify *notify)
+{
+	struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
+	int ret;
+
+	IPADBG_LOW("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+			atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&sys->curr_polling_state)) {
+			ret = sps_get_config(sys->ep->ep_hdl,
+					&sys->ep->connect);
+			if (ret) {
+				IPAERR("sps_get_config() failed %d\n", ret);
+				break;
+			}
+			sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+				SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+			ret = sps_set_config(sys->ep->ep_hdl,
+					&sys->ep->connect);
+			if (ret) {
+				IPAERR("sps_set_config() failed %d\n", ret);
+				break;
+			}
+			ipa3_inc_acquire_wakelock();
+			atomic_set(&sys->curr_polling_state, 1);
+			queue_work(sys->wq, &sys->work);
+		}
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa3_sps_irq_tx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Tx operation is complete.
+ * Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to the tx workqueue.
+ * This event will be later handled by ipa_write_done.
+ */
+static void ipa3_sps_irq_tx_no_aggr_notify(struct sps_event_notify *notify)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+
+	IPADBG_LOW("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		tx_pkt = notify->data.transfer.user;
+		if (IPA_CLIENT_IS_APPS_CONS(tx_pkt->sys->ep->client))
+			atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		queue_work(tx_pkt->sys->wq, &tx_pkt->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * ipa3_handle_rx_core() - The core functionality of packet reception. This
+ * function is read from multiple code paths.
+ *
+ * All the packets on the Rx data path are received on the IPA_A5_LAN_WAN_IN
+ * endpoint. The function runs as long as there are packets in the pipe.
+ * For each packet:
+ *  - Disconnect the packet from the system pipe linked list
+ *  - Unmap the packets skb, make it non DMAable
+ *  - Free the packet from the cache
+ *  - Prepare a proper skb
+ *  - Call the endpoints notify function, passing the skb in the parameters
+ *  - Replenish the rx cache
+ */
+static int ipa3_handle_rx_core(struct ipa3_sys_context *sys, bool process_all,
+		bool in_poll_state)
+{
+	int cnt;
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+		cnt = ipa_handle_rx_core_gsi(sys, process_all, in_poll_state);
+	else
+		cnt = ipa_handle_rx_core_sps(sys, process_all, in_poll_state);
+
+	return cnt;
+}
+
+/**
+ * ipa3_rx_switch_to_intr_mode() - Operate the Rx data path in interrupt mode
+ */
+static void ipa3_rx_switch_to_intr_mode(struct ipa3_sys_context *sys)
+{
+	int ret;
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		if (!atomic_read(&sys->curr_polling_state)) {
+			IPAERR("already in intr mode\n");
+			goto fail;
+		}
+		atomic_set(&sys->curr_polling_state, 0);
+		ipa3_dec_release_wakelock();
+		ret = gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+			GSI_CHAN_MODE_CALLBACK);
+		if (ret != GSI_STATUS_SUCCESS) {
+			IPAERR("Failed to switch to intr mode.\n");
+			goto fail;
+		}
+	} else {
+		ret = sps_get_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_get_config() failed %d\n", ret);
+			goto fail;
+		}
+		if (!atomic_read(&sys->curr_polling_state) &&
+			((sys->ep->connect.options & SPS_O_EOT) == SPS_O_EOT)) {
+			IPADBG("already in intr mode\n");
+			return;
+		}
+		if (!atomic_read(&sys->curr_polling_state)) {
+			IPAERR("already in intr mode\n");
+			goto fail;
+		}
+		sys->event.options = SPS_O_EOT;
+		ret = sps_register_event(sys->ep->ep_hdl, &sys->event);
+		if (ret) {
+			IPAERR("sps_register_event() failed %d\n", ret);
+			goto fail;
+		}
+		sys->ep->connect.options =
+			SPS_O_AUTO_ENABLE | SPS_O_ACK_TRANSFERS | SPS_O_EOT;
+		ret = sps_set_config(sys->ep->ep_hdl, &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_set_config() failed %d\n", ret);
+			goto fail;
+		}
+		atomic_set(&sys->curr_polling_state, 0);
+		ipa3_handle_rx_core(sys, true, false);
+		ipa3_dec_release_wakelock();
+	}
+	return;
+
+fail:
+	queue_delayed_work(sys->wq, &sys->switch_to_intr_work,
+			msecs_to_jiffies(1));
+}
+
+/**
+ * ipa_rx_notify() - Callback function which is called by the SPS driver when a
+ * a packet is received
+ * @notify:	SPS driver supplied notification information
+ *
+ * Called in an interrupt context, therefore the majority of the work is
+ * deffered using a work queue.
+ *
+ * After receiving a packet, the driver goes to polling mode and keeps pulling
+ * packets until the rx buffer is empty, then it goes back to interrupt mode.
+ * This comes to prevent the CPU from handling too many interrupts when the
+ * throughput is high.
+ */
+static void ipa3_sps_irq_rx_notify(struct sps_event_notify *notify)
+{
+	struct ipa3_sys_context *sys = (struct ipa3_sys_context *)notify->user;
+	int ret;
+
+	IPADBG_LOW("event %d notified\n", notify->event_id);
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		if (IPA_CLIENT_IS_APPS_CONS(sys->ep->client))
+			atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&sys->curr_polling_state)) {
+			sys->ep->eot_in_poll_err++;
+			break;
+		}
+
+		ret = sps_get_config(sys->ep->ep_hdl,
+							 &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_get_config() failed %d\n", ret);
+			break;
+		}
+		sys->ep->connect.options = SPS_O_AUTO_ENABLE |
+			  SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		ret = sps_set_config(sys->ep->ep_hdl,
+							 &sys->ep->connect);
+		if (ret) {
+			IPAERR("sps_set_config() failed %d\n", ret);
+			break;
+		}
+		ipa3_inc_acquire_wakelock();
+		atomic_set(&sys->curr_polling_state, 1);
+		trace_intr_to_poll3(sys->ep->client);
+		queue_work(sys->wq, &sys->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->event_id);
+	}
+}
+
+/**
+ * switch_to_intr_tx_work_func() - Wrapper function to move from polling
+ *	to interrupt mode
+ * @work: work struct
+ */
+void ipa3_switch_to_intr_tx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa3_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
+	ipa3_handle_tx(sys);
+}
+
+/**
+ * ipa3_handle_rx() - handle packet reception. This function is executed in the
+ * context of a work queue.
+ * @work: work struct needed by the work queue
+ *
+ * ipa3_handle_rx_core() is run in polling mode. After all packets has been
+ * received, the driver switches back to interrupt mode.
+ */
+static void ipa3_handle_rx(struct ipa3_sys_context *sys)
+{
+	int inactive_cycles = 0;
+	int cnt;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	do {
+		cnt = ipa3_handle_rx_core(sys, true, true);
+		if (cnt == 0) {
+			inactive_cycles++;
+			trace_idle_sleep_enter3(sys->ep->client);
+			usleep_range(POLLING_MIN_SLEEP_RX,
+					POLLING_MAX_SLEEP_RX);
+			trace_idle_sleep_exit3(sys->ep->client);
+		} else {
+			inactive_cycles = 0;
+		}
+	} while (inactive_cycles <= POLLING_INACTIVITY_RX);
+
+	trace_poll_to_intr3(sys->ep->client);
+	ipa3_rx_switch_to_intr_mode(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static void ipa3_switch_to_intr_rx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa3_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
+
+	if (sys->ep->napi_enabled) {
+		if (sys->ep->switch_to_intr) {
+			ipa3_rx_switch_to_intr_mode(sys);
+			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
+			sys->ep->switch_to_intr = false;
+			sys->ep->inactive_cycles = 0;
+		} else
+			sys->ep->client_notify(sys->ep->priv,
+				IPA_CLIENT_START_POLL, 0);
+	} else
+		ipa3_handle_rx(sys);
+}
+
+/**
+ * ipa3_setup_sys_pipe() - Setup an IPA end-point in system-BAM mode and perform
+ * IPA EP configuration
+ * @sys_in:	[in] input needed to setup BAM pipe and configure EP
+ * @clnt_hdl:	[out] client handle
+ *
+ *  - configure the end-point registers with the supplied
+ *    parameters from the user.
+ *  - call SPS APIs to create a system-to-bam connection with IPA.
+ *  - allocate descriptor FIFO
+ *  - register callback function(ipa3_sps_irq_rx_notify or
+ *    ipa3_sps_irq_tx_notify - depends on client type) in case the driver is
+ *    not configured to pulling mode
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+	dma_addr_t dma_addr;
+	char buff[IPA_RESOURCE_NAME_MAX];
+	struct iommu_domain *smmu_domain;
+
+	if (sys_in == NULL || clnt_hdl == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+
+	if (sys_in->client >= IPA_CLIENT_MAX || sys_in->desc_fifo_sz == 0) {
+		IPAERR("bad parm client:%d fifo_sz:%d\n",
+			sys_in->client, sys_in->desc_fifo_sz);
+		goto fail_gen;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		goto fail_gen;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+	if (ep->valid == 1) {
+		if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+			IPAERR("EP already allocated.\n");
+			goto fail_and_disable_clocks;
+		} else {
+			if (ipa3_cfg_ep_hdr(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.hdr)) {
+				IPAERR("fail to configure hdr prop of EP.\n");
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			if (ipa3_cfg_ep_cfg(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.cfg)) {
+				IPAERR("fail to configure cfg prop of EP.\n");
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			IPADBG("client %d (ep: %d) overlay ok sys=%p\n",
+					sys_in->client, ipa_ep_idx, ep->sys);
+			ep->client_notify = sys_in->notify;
+			ep->priv = sys_in->priv;
+			*clnt_hdl = ipa_ep_idx;
+			if (!ep->keep_ipa_awake)
+				IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+			return 0;
+		}
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+	if (!ep->sys) {
+		ep->sys = kzalloc(sizeof(struct ipa3_sys_context), GFP_KERNEL);
+		if (!ep->sys) {
+			IPAERR("failed to sys ctx for client %d\n",
+					sys_in->client);
+			result = -ENOMEM;
+			goto fail_and_disable_clocks;
+		}
+
+		ep->sys->ep = ep;
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
+				sys_in->client);
+		ep->sys->wq = alloc_workqueue(buff,
+				WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+		if (!ep->sys->wq) {
+			IPAERR("failed to create wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq;
+		}
+
+		snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
+				sys_in->client);
+		ep->sys->repl_wq = alloc_workqueue(buff,
+				WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+		if (!ep->sys->repl_wq) {
+			IPAERR("failed to create rep wq for client %d\n",
+					sys_in->client);
+			result = -EFAULT;
+			goto fail_wq2;
+		}
+
+		INIT_LIST_HEAD(&ep->sys->head_desc_list);
+		INIT_LIST_HEAD(&ep->sys->rcycl_list);
+		spin_lock_init(&ep->sys->spinlock);
+	} else {
+		memset(ep->sys, 0, offsetof(struct ipa3_sys_context, ep));
+	}
+
+	ep->skip_ep_cfg = sys_in->skip_ep_cfg;
+	if (ipa3_assign_policy(sys_in, ep->sys)) {
+		IPAERR("failed to sys ctx for client %d\n", sys_in->client);
+		result = -ENOMEM;
+		goto fail_gen2;
+	}
+
+	ep->valid = 1;
+	ep->client = sys_in->client;
+	ep->client_notify = sys_in->notify;
+	ep->napi_enabled = sys_in->napi_enabled;
+	ep->priv = sys_in->priv;
+	ep->keep_ipa_awake = sys_in->keep_ipa_awake;
+	atomic_set(&ep->avail_fifo_desc,
+		((sys_in->desc_fifo_sz/sizeof(struct sps_iovec))-1));
+
+	if (ep->status.status_en && IPA_CLIENT_IS_CONS(ep->client) &&
+	    ep->sys->status_stat == NULL) {
+		ep->sys->status_stat =
+			kzalloc(sizeof(struct ipa3_status_stats), GFP_KERNEL);
+		if (!ep->sys->status_stat) {
+			IPAERR("no memory\n");
+			goto fail_gen2;
+		}
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
+				ipa_ep_idx);
+		goto fail_gen2;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_gen2;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_gen2;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("skipping ep configuration\n");
+	}
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		result = ipa_gsi_setup_channel(sys_in, ep);
+		if (result) {
+			IPAERR("Failed to setup GSI channel\n");
+			goto fail_gen2;
+		}
+	} else {
+		/* Default Config */
+		ep->ep_hdl = sps_alloc_endpoint();
+		if (ep->ep_hdl == NULL) {
+			IPAERR("SPS EP allocation failed.\n");
+			goto fail_gen2;
+		}
+
+		result = sps_get_config(ep->ep_hdl, &ep->connect);
+		if (result) {
+			IPAERR("fail to get config.\n");
+			goto fail_sps_cfg;
+		}
+
+		/* Specific Config */
+		if (IPA_CLIENT_IS_CONS(sys_in->client)) {
+			ep->connect.mode = SPS_MODE_SRC;
+			ep->connect.destination = SPS_DEV_HANDLE_MEM;
+			ep->connect.source = ipa3_ctx->bam_handle;
+			ep->connect.dest_pipe_index = ipa3_ctx->a5_pipe_index++;
+			ep->connect.src_pipe_index = ipa_ep_idx;
+		} else {
+			ep->connect.mode = SPS_MODE_DEST;
+			ep->connect.source = SPS_DEV_HANDLE_MEM;
+			ep->connect.destination = ipa3_ctx->bam_handle;
+			ep->connect.src_pipe_index = ipa3_ctx->a5_pipe_index++;
+			ep->connect.dest_pipe_index = ipa_ep_idx;
+		}
+
+		IPADBG("client:%d ep:%d",
+			sys_in->client, ipa_ep_idx);
+
+		IPADBG("dest_pipe_index:%d src_pipe_index:%d\n",
+			ep->connect.dest_pipe_index,
+			ep->connect.src_pipe_index);
+
+		ep->connect.options = ep->sys->sps_option;
+		ep->connect.desc.size = sys_in->desc_fifo_sz;
+		ep->connect.desc.base = dma_alloc_coherent(ipa3_ctx->pdev,
+				ep->connect.desc.size, &dma_addr, 0);
+		if (ipa3_ctx->smmu_s1_bypass) {
+			ep->connect.desc.phys_base = dma_addr;
+		} else {
+			ep->connect.desc.iova = dma_addr;
+			smmu_domain = ipa3_get_smmu_domain();
+			if (smmu_domain != NULL) {
+				ep->connect.desc.phys_base =
+					iommu_iova_to_phys(smmu_domain,
+							dma_addr);
+			}
+		}
+		if (ep->connect.desc.base == NULL) {
+			IPAERR("fail to get DMA desc memory.\n");
+			goto fail_sps_cfg;
+		}
+
+		ep->connect.event_thresh = IPA_EVENT_THRESHOLD;
+
+		result = ipa3_sps_connect_safe(ep->ep_hdl,
+				&ep->connect, sys_in->client);
+		if (result) {
+			IPAERR("sps_connect fails.\n");
+			goto fail_sps_connect;
+		}
+
+		ep->sys->event.options = SPS_O_EOT;
+		ep->sys->event.mode = SPS_TRIGGER_CALLBACK;
+		ep->sys->event.xfer_done = NULL;
+		ep->sys->event.user = ep->sys;
+		ep->sys->event.callback = ep->sys->sps_callback;
+		result = sps_register_event(ep->ep_hdl, &ep->sys->event);
+		if (result < 0) {
+			IPAERR("register event error %d\n", result);
+			goto fail_register_event;
+		}
+	}	/* end of sps config */
+
+	*clnt_hdl = ipa_ep_idx;
+
+	if (ep->sys->repl_hdlr == ipa3_fast_replenish_rx_cache) {
+		ep->sys->repl.capacity = ep->sys->rx_pool_sz + 1;
+		ep->sys->repl.cache = kzalloc(ep->sys->repl.capacity *
+				sizeof(void *), GFP_KERNEL);
+		if (!ep->sys->repl.cache) {
+			IPAERR("ep=%d fail to alloc repl cache\n", ipa_ep_idx);
+			ep->sys->repl_hdlr = ipa3_replenish_rx_cache;
+			ep->sys->repl.capacity = 0;
+		} else {
+			atomic_set(&ep->sys->repl.head_idx, 0);
+			atomic_set(&ep->sys->repl.tail_idx, 0);
+			ipa3_wq_repl_rx(&ep->sys->repl_work);
+		}
+	}
+
+	if (IPA_CLIENT_IS_CONS(sys_in->client))
+		ipa3_replenish_rx_cache(ep->sys);
+
+	if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
+		ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
+		atomic_inc(&ipa3_ctx->wc_memb.active_clnt_cnt);
+	}
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(sys_in->client)) {
+		if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
+			sys_in->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+			IPADBG("modem cfg emb pipe flt\n");
+		else
+			ipa3_install_dflt_flt_rules(ipa_ep_idx);
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+	IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+			ipa_ep_idx, ep->sys);
+
+	return 0;
+
+fail_register_event:
+	sps_disconnect(ep->ep_hdl);
+fail_sps_connect:
+	dma_free_coherent(ipa3_ctx->pdev, ep->connect.desc.size,
+			  ep->connect.desc.base,
+			  ep->connect.desc.phys_base);
+fail_sps_cfg:
+	sps_free_endpoint(ep->ep_hdl);
+fail_gen2:
+	destroy_workqueue(ep->sys->repl_wq);
+fail_wq2:
+	destroy_workqueue(ep->sys->wq);
+fail_wq:
+	kfree(ep->sys);
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+fail_and_disable_clocks:
+	IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+	return result;
+}
+
+/**
+ * ipa3_teardown_sys_pipe() - Teardown the system-BAM pipe and cleanup IPA EP
+ * @clnt_hdl:	[in] the handle obtained from ipa3_setup_sys_pipe
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_teardown_sys_pipe(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int empty;
+	int result;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_disable_data_path(clnt_hdl);
+	if (ep->napi_enabled) {
+		ep->switch_to_intr = true;
+		do {
+			usleep_range(95, 105);
+		} while (atomic_read(&ep->sys->curr_polling_state));
+	}
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		do {
+			spin_lock_bh(&ep->sys->spinlock);
+			empty = list_empty(&ep->sys->head_desc_list);
+			spin_unlock_bh(&ep->sys->spinlock);
+			if (!empty)
+				usleep_range(95, 105);
+			else
+				break;
+		} while (1);
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		cancel_delayed_work_sync(&ep->sys->replenish_rx_work);
+	flush_workqueue(ep->sys->wq);
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		result = ipa3_stop_gsi_channel(clnt_hdl);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("GSI stop chan err: %d.\n", result);
+			BUG();
+			return result;
+		}
+		result = gsi_reset_channel(ep->gsi_chan_hdl);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("Failed to reset chan: %d.\n", result);
+			BUG();
+			return result;
+		}
+		dma_free_coherent(ipa3_ctx->pdev,
+			ep->gsi_mem_info.chan_ring_len,
+			ep->gsi_mem_info.chan_ring_base_vaddr,
+			ep->gsi_mem_info.chan_ring_base_addr);
+		result = gsi_dealloc_channel(ep->gsi_chan_hdl);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("Failed to dealloc chan: %d.\n", result);
+			BUG();
+			return result;
+		}
+
+		/* free event ring only when it is present */
+		if (ep->gsi_evt_ring_hdl != ~0) {
+			result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+			if (result != GSI_STATUS_SUCCESS) {
+				IPAERR("Failed to reset evt ring: %d.\n",
+						result);
+				BUG();
+				return result;
+			}
+			dma_free_coherent(ipa3_ctx->pdev,
+				ep->gsi_mem_info.evt_ring_len,
+				ep->gsi_mem_info.evt_ring_base_vaddr,
+				ep->gsi_mem_info.evt_ring_base_addr);
+			result = gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+			if (result != GSI_STATUS_SUCCESS) {
+				IPAERR("Failed to dealloc evt ring: %d.\n",
+						result);
+				BUG();
+				return result;
+			}
+		}
+	} else {
+		sps_disconnect(ep->ep_hdl);
+		dma_free_coherent(ipa3_ctx->pdev, ep->connect.desc.size,
+				  ep->connect.desc.base,
+				  ep->connect.desc.phys_base);
+		sps_free_endpoint(ep->ep_hdl);
+	}
+	if (ep->sys->repl_wq)
+		flush_workqueue(ep->sys->repl_wq);
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		ipa3_cleanup_rx(ep->sys);
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(ep->client)) {
+		if (ipa3_ctx->modem_cfg_emb_pipe_flt &&
+			ep->client == IPA_CLIENT_APPS_LAN_WAN_PROD)
+			IPADBG("modem cfg emb pipe flt\n");
+		else
+			ipa3_delete_dflt_flt_rules(clnt_hdl);
+	}
+
+	if (IPA_CLIENT_IS_WLAN_CONS(ep->client))
+		atomic_dec(&ipa3_ctx->wc_memb.active_clnt_cnt);
+
+	memset(&ep->wstats, 0, sizeof(struct ipa3_wlan_stats));
+
+	if (!atomic_read(&ipa3_ctx->wc_memb.active_clnt_cnt))
+		ipa3_cleanup_wlan_rx_common_cache();
+
+	ep->valid = 0;
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa3_tx_comp_usr_notify_release() - Callback function which will call the
+ * user supplied callback function to release the skb, or release it on
+ * its own if no callback function was supplied.
+ * @user1
+ * @user2
+ *
+ * This notified callback is for the destination client.
+ * This function is supplied in ipa3_connect.
+ */
+static void ipa3_tx_comp_usr_notify_release(void *user1, int user2)
+{
+	struct sk_buff *skb = (struct sk_buff *)user1;
+	int ep_idx = user2;
+
+	IPADBG_LOW("skb=%p ep=%d\n", skb, ep_idx);
+
+	IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_pkts_compl);
+
+	if (ipa3_ctx->ep[ep_idx].client_notify)
+		ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)skb);
+	else
+		dev_kfree_skb_any(skb);
+}
+
+static void ipa3_tx_cmd_comp(void *user1, int user2)
+{
+	ipahal_destroy_imm_cmd(user1);
+}
+
+/**
+ * ipa3_tx_dp() - Data-path tx handler
+ * @dst:	[in] which IPA destination to route tx packets to
+ * @skb:	[in] the packet to send
+ * @metadata:	[in] TX packet meta-data
+ *
+ * Data-path tx handler, this is used for both SW data-path which by-passes most
+ * IPA HW blocks AND the regular HW data-path for WLAN AMPDU traffic only. If
+ * dst is a "valid" CONS type, then SW data-path is used. If dst is the
+ * WLAN_AMPDU PROD type, then HW data-path for WLAN AMPDU is used. Anything else
+ * is an error. For errors, client needs to free the skb as needed. For success,
+ * IPA driver will later invoke client callback if one was supplied. That
+ * callback should free the skb. If no callback supplied, IPA driver will free
+ * the skb internally
+ *
+ * The function will use two descriptors for this send command
+ * (for A5_WLAN_AMPDU_PROD only one desciprtor will be sent),
+ * the first descriptor will be used to inform the IPA hardware that
+ * apps need to push data into the IPA (IP_PACKET_INIT immediate command).
+ * Once this send was done from SPS point-of-view the IPA driver will
+ * get notified by the supplied callback - ipa_sps_irq_tx_comp()
+ *
+ * ipa_sps_irq_tx_comp will call to the user supplied
+ * callback (from ipa3_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *meta)
+{
+	struct ipa3_desc *desc;
+	struct ipa3_desc _desc[3];
+	int dst_ep_idx;
+	struct ipahal_imm_cmd_ip_packet_init cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipa3_sys_context *sys;
+	int src_ep_idx;
+	int num_frags, f;
+
+	if (unlikely(!ipa3_ctx)) {
+		IPAERR("IPA3 driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (skb->len == 0) {
+		IPAERR("packet size is 0\n");
+		return -EINVAL;
+	}
+
+	num_frags = skb_shinfo(skb)->nr_frags;
+	if (num_frags) {
+		/* 1 desc for tag to resolve status out-of-order issue;
+		 * 1 desc is needed for the linear portion of skb;
+		 * 1 desc may be needed for the PACKET_INIT;
+		 * 1 desc for each frag
+		 */
+		desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
+		if (!desc) {
+			IPAERR("failed to alloc desc array\n");
+			goto fail_mem;
+		}
+	} else {
+		memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
+		desc = &_desc[0];
+	}
+
+	/*
+	 * USB_CONS: PKT_INIT ep_idx = dst pipe
+	 * Q6_CONS: PKT_INIT ep_idx = sender pipe
+	 * A5_LAN_WAN_PROD: HW path ep_idx = sender pipe
+	 *
+	 * LAN TX: all PKT_INIT
+	 * WAN TX: PKT_INIT (cmd) + HW (data)
+	 *
+	 */
+	if (IPA_CLIENT_IS_CONS(dst)) {
+		src_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+		if (-1 == src_ep_idx) {
+			IPAERR("Client %u is not mapped\n",
+				IPA_CLIENT_APPS_LAN_WAN_PROD);
+			goto fail_gen;
+		}
+		dst_ep_idx = ipa3_get_ep_mapping(dst);
+	} else {
+		src_ep_idx = ipa3_get_ep_mapping(dst);
+		if (-1 == src_ep_idx) {
+			IPAERR("Client %u is not mapped\n", dst);
+			goto fail_gen;
+		}
+		if (meta && meta->pkt_init_dst_ep_valid)
+			dst_ep_idx = meta->pkt_init_dst_ep;
+		else
+			dst_ep_idx = -1;
+	}
+
+	sys = ipa3_ctx->ep[src_ep_idx].sys;
+
+	if (!sys->ep->valid) {
+		IPAERR("pipe not valid\n");
+		goto fail_gen;
+	}
+
+	if (dst_ep_idx != -1) {
+		/* SW data path */
+		cmd.destination_pipe_index = dst_ep_idx;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_IP_PACKET_INIT, &cmd, true);
+		if (unlikely(!cmd_pyld)) {
+			IPAERR("failed to construct ip_packet_init imm cmd\n");
+			goto fail_gen;
+		}
+
+		/* the tag field will be populated in ipa3_send() function */
+		desc[0].opcode = ipahal_imm_cmd_get_opcode(
+			IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+		desc[0].type = IPA_IMM_CMD_DESC;
+		desc[0].callback = ipa3_tag_destroy_imm;
+		desc[1].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+		desc[1].pyld = cmd_pyld->data;
+		desc[1].len = cmd_pyld->len;
+		desc[1].type = IPA_IMM_CMD_DESC;
+		desc[1].callback = ipa3_tx_cmd_comp;
+		desc[1].user1 = cmd_pyld;
+		desc[2].pyld = skb->data;
+		desc[2].len = skb_headlen(skb);
+		desc[2].type = IPA_DATA_DESC_SKB;
+		desc[2].callback = ipa3_tx_comp_usr_notify_release;
+		desc[2].user1 = skb;
+		desc[2].user2 = (meta && meta->pkt_init_dst_ep_valid &&
+				meta->pkt_init_dst_ep_remote) ?
+				src_ep_idx :
+				dst_ep_idx;
+		if (meta && meta->dma_address_valid) {
+			desc[2].dma_address_valid = true;
+			desc[2].dma_address = meta->dma_address;
+		}
+
+		for (f = 0; f < num_frags; f++) {
+			desc[3+f].frag = &skb_shinfo(skb)->frags[f];
+			desc[3+f].type = IPA_DATA_DESC_SKB_PAGED;
+			desc[3+f].len = skb_frag_size(desc[3+f].frag);
+		}
+		/* don't free skb till frag mappings are released */
+		if (num_frags) {
+			desc[3+f-1].callback = desc[2].callback;
+			desc[3+f-1].user1 = desc[2].user1;
+			desc[3+f-1].user2 = desc[2].user2;
+			desc[2].callback = NULL;
+		}
+
+		if (ipa3_send(sys, num_frags + 3, desc, true)) {
+			IPAERR("fail to send skb %p num_frags %u SWP\n",
+				skb, num_frags);
+			goto fail_send;
+		}
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
+	} else {
+		/* HW data path */
+		desc[0].opcode =
+			ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+		desc[0].type = IPA_IMM_CMD_DESC;
+		desc[0].callback = ipa3_tag_destroy_imm;
+		desc[1].pyld = skb->data;
+		desc[1].len = skb_headlen(skb);
+		desc[1].type = IPA_DATA_DESC_SKB;
+		desc[1].callback = ipa3_tx_comp_usr_notify_release;
+		desc[1].user1 = skb;
+		desc[1].user2 = src_ep_idx;
+
+		if (meta && meta->dma_address_valid) {
+			desc[1].dma_address_valid = true;
+			desc[1].dma_address = meta->dma_address;
+		}
+		if (num_frags == 0) {
+			if (ipa3_send(sys, 2, desc, true)) {
+				IPAERR("fail to send skb %p HWP\n", skb);
+				goto fail_gen;
+			}
+		} else {
+			for (f = 0; f < num_frags; f++) {
+				desc[2+f].frag = &skb_shinfo(skb)->frags[f];
+				desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
+				desc[2+f].len = skb_frag_size(desc[2+f].frag);
+			}
+			/* don't free skb till frag mappings are released */
+			desc[2+f-1].callback = desc[1].callback;
+			desc[2+f-1].user1 = desc[1].user1;
+			desc[2+f-1].user2 = desc[1].user2;
+			desc[1].callback = NULL;
+
+			if (ipa3_send(sys, num_frags + 2, desc, true)) {
+				IPAERR("fail to send skb %p num_frags %u HWP\n",
+					skb, num_frags);
+				goto fail_gen;
+			}
+		}
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
+	}
+
+	if (num_frags) {
+		kfree(desc);
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
+	}
+	return 0;
+
+fail_send:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+fail_gen:
+	if (num_frags)
+		kfree(desc);
+fail_mem:
+	return -EFAULT;
+}
+
+static void ipa3_wq_handle_rx(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+
+	sys = container_of(work, struct ipa3_sys_context, work);
+
+	if (sys->ep->napi_enabled) {
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL("NAPI");
+		sys->ep->client_notify(sys->ep->priv,
+				IPA_CLIENT_START_POLL, 0);
+	} else
+		ipa3_handle_rx(sys);
+}
+
+static void ipa3_wq_repl_rx(struct work_struct *work)
+{
+	struct ipa3_sys_context *sys;
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	gfp_t flag = GFP_KERNEL;
+	u32 next;
+	u32 curr;
+
+	sys = container_of(work, struct ipa3_sys_context, repl_work);
+	curr = atomic_read(&sys->repl.tail_idx);
+
+begin:
+	while (1) {
+		next = (curr + 1) % sys->repl.capacity;
+		if (next == atomic_read(&sys->repl.head_idx))
+			goto fail_kmem_cache_alloc;
+
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			pr_err_ratelimited("%s fail alloc rx wrapper sys=%p\n",
+					__func__, sys);
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL) {
+			pr_err_ratelimited("%s fail alloc skb sys=%p\n",
+					__func__, sys);
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+			pr_err_ratelimited("%s dma map fail %p for %p sys=%p\n",
+			       __func__, (void *)rx_pkt->data.dma_addr,
+			       ptr, sys);
+			goto fail_dma_mapping;
+		}
+
+		sys->repl.cache[curr] = rx_pkt;
+		curr = next;
+		/* ensure write is done before setting tail index */
+		mb();
+		atomic_set(&sys->repl.tail_idx, next);
+	}
+
+	return;
+
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	if (atomic_read(&sys->repl.tail_idx) ==
+			atomic_read(&sys->repl.head_idx)) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_repl_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_repl_rx_empty);
+		else
+			WARN_ON(1);
+		pr_err_ratelimited("%s sys=%p repl ring empty\n",
+				__func__, sys);
+		goto begin;
+	}
+}
+
+static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt = NULL;
+	struct ipa3_rx_pkt_wrapper *tmp;
+	int ret;
+	struct gsi_xfer_elem gsi_xfer_elem_one;
+	u32 rx_len_cached = 0;
+
+	IPADBG_LOW("\n");
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+	rx_len_cached = sys->len;
+
+	if (rx_len_cached < sys->rx_pool_sz) {
+		list_for_each_entry_safe(rx_pkt, tmp,
+			&ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
+			list_del(&rx_pkt->link);
+
+			if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0)
+				ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
+
+			INIT_LIST_HEAD(&rx_pkt->link);
+			rx_pkt->len = 0;
+			rx_pkt->sys = sys;
+
+			list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+			if (ipa3_ctx->transport_prototype ==
+					IPA_TRANSPORT_TYPE_GSI) {
+				memset(&gsi_xfer_elem_one, 0,
+					sizeof(gsi_xfer_elem_one));
+				gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+				gsi_xfer_elem_one.len = IPA_WLAN_RX_BUFF_SZ;
+				gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+				gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+				gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+				gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+				ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+					&gsi_xfer_elem_one, true);
+			} else {
+				ret = sps_transfer_one(sys->ep->ep_hdl,
+					rx_pkt->data.dma_addr,
+					IPA_WLAN_RX_BUFF_SZ, rx_pkt, 0);
+			}
+
+			if (ret) {
+				IPAERR("failed to provide buffer: %d\n", ret);
+				goto fail_provide_rx_buffer;
+			}
+
+			rx_len_cached = ++sys->len;
+
+			if (rx_len_cached >= sys->rx_pool_sz) {
+				spin_unlock_bh(
+					&ipa3_ctx->wc_memb.wlan_spinlock);
+				return;
+			}
+		}
+	}
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+	if (rx_len_cached < sys->rx_pool_sz &&
+			ipa3_ctx->wc_memb.wlan_comm_total_cnt <
+			 IPA_WLAN_COMM_RX_POOL_HIGH) {
+		ipa3_replenish_rx_cache(sys);
+		ipa3_ctx->wc_memb.wlan_comm_total_cnt +=
+			(sys->rx_pool_sz - rx_len_cached);
+	}
+
+	return;
+
+fail_provide_rx_buffer:
+	list_del(&rx_pkt->link);
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+}
+
+static void ipa3_cleanup_wlan_rx_common_cache(void)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct ipa3_rx_pkt_wrapper *tmp;
+
+	list_for_each_entry_safe(rx_pkt, tmp,
+		&ipa3_ctx->wc_memb.wlan_comm_desc_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+				IPA_WLAN_COMM_RX_POOL_LOW, DMA_FROM_DEVICE);
+		dev_kfree_skb_any(rx_pkt->data.skb);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+		ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
+		ipa3_ctx->wc_memb.wlan_comm_total_cnt--;
+	}
+	ipa3_ctx->wc_memb.total_tx_pkts_freed = 0;
+
+	if (ipa3_ctx->wc_memb.wlan_comm_free_cnt != 0)
+		IPAERR("wlan comm buff free cnt: %d\n",
+			ipa3_ctx->wc_memb.wlan_comm_free_cnt);
+
+	if (ipa3_ctx->wc_memb.wlan_comm_total_cnt != 0)
+		IPAERR("wlan comm buff total cnt: %d\n",
+			ipa3_ctx->wc_memb.wlan_comm_total_cnt);
+
+}
+
+static void ipa3_alloc_wlan_rx_common_cache(u32 size)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int rx_len_cached = 0;
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = ipa3_ctx->wc_memb.wlan_comm_total_cnt;
+	while (rx_len_cached < size) {
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc rx wrapper\n");
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+
+		rx_pkt->data.skb =
+			ipa3_get_skb_ipa_rx(IPA_WLAN_RX_BUFF_SZ,
+						flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+				IPA_WLAN_RX_BUFF_SZ, DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+			IPAERR("dma_map_single failure %p for %p\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		list_add_tail(&rx_pkt->link,
+			&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+		rx_len_cached = ++ipa3_ctx->wc_memb.wlan_comm_total_cnt;
+
+		ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+
+	}
+
+	return;
+
+fail_dma_mapping:
+	dev_kfree_skb_any(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	return;
+}
+
+
+/**
+ * ipa3_replenish_rx_cache() - Replenish the Rx packets cache.
+ *
+ * The function allocates buffers in the rx_pkt_wrapper_cache cache until there
+ * are IPA_RX_POOL_CEIL buffers in the cache.
+ *   - Allocate a buffer in the cache
+ *   - Initialized the packets link
+ *   - Initialize the packets work struct
+ *   - Allocate the packets socket buffer (skb)
+ *   - Fill the packets skb with data
+ *   - Make the packet DMAable
+ *   - Add the packet to the system pipe linked list
+ *   - Initiate a SPS transfer so that SPS driver will use this packet later.
+ */
+static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_one;
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = sys->len;
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					   flag);
+		if (!rx_pkt) {
+			IPAERR("failed to alloc rx wrapper\n");
+			goto fail_kmem_cache_alloc;
+		}
+
+		INIT_LIST_HEAD(&rx_pkt->link);
+		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+		rx_pkt->sys = sys;
+
+		rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+		if (rx_pkt->data.skb == NULL) {
+			IPAERR("failed to alloc skb\n");
+			goto fail_skb_alloc;
+		}
+		ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+		rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev, ptr,
+						     sys->rx_buff_sz,
+						     DMA_FROM_DEVICE);
+		if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+			IPAERR("dma_map_single failure %p for %p\n",
+			       (void *)rx_pkt->data.dma_addr, ptr);
+			goto fail_dma_mapping;
+		}
+
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		rx_len_cached = ++sys->len;
+
+		if (ipa3_ctx->transport_prototype ==
+				IPA_TRANSPORT_TYPE_GSI) {
+			memset(&gsi_xfer_elem_one, 0,
+				sizeof(gsi_xfer_elem_one));
+			gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+			gsi_xfer_elem_one.len = sys->rx_buff_sz;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+			gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+			gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
+					1, &gsi_xfer_elem_one, true);
+			if (ret != GSI_STATUS_SUCCESS) {
+				IPAERR("failed to provide buffer: %d\n",
+					ret);
+				goto fail_provide_rx_buffer;
+			}
+		} else {
+			ret = sps_transfer_one(sys->ep->ep_hdl,
+				rx_pkt->data.dma_addr, sys->rx_buff_sz,
+				rx_pkt, 0);
+
+			if (ret) {
+				IPAERR("sps_transfer_one failed %d\n", ret);
+				goto fail_provide_rx_buffer;
+			}
+		}
+	}
+
+	return;
+
+fail_provide_rx_buffer:
+	list_del(&rx_pkt->link);
+	rx_len_cached = --sys->len;
+	dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+	sys->free_skb(rx_pkt->data.skb);
+fail_skb_alloc:
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+fail_kmem_cache_alloc:
+	if (rx_len_cached == 0)
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+}
+
+static void ipa3_replenish_rx_cache_recycle(struct ipa3_sys_context *sys)
+{
+	void *ptr;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_one;
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	rx_len_cached = sys->len;
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		if (list_empty(&sys->rcycl_list)) {
+			rx_pkt = kmem_cache_zalloc(
+				ipa3_ctx->rx_pkt_wrapper_cache, flag);
+			if (!rx_pkt) {
+				IPAERR("failed to alloc rx wrapper\n");
+				goto fail_kmem_cache_alloc;
+			}
+
+			INIT_LIST_HEAD(&rx_pkt->link);
+			INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+			rx_pkt->sys = sys;
+
+			rx_pkt->data.skb = sys->get_skb(sys->rx_buff_sz, flag);
+			if (rx_pkt->data.skb == NULL) {
+				IPAERR("failed to alloc skb\n");
+				kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache,
+					rx_pkt);
+				goto fail_kmem_cache_alloc;
+			}
+			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+			if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+				IPAERR("dma_map_single failure %p for %p\n",
+					(void *)rx_pkt->data.dma_addr, ptr);
+				goto fail_dma_mapping;
+			}
+		} else {
+			spin_lock_bh(&sys->spinlock);
+			rx_pkt = list_first_entry(&sys->rcycl_list,
+				struct ipa3_rx_pkt_wrapper, link);
+			list_del(&rx_pkt->link);
+			spin_unlock_bh(&sys->spinlock);
+			INIT_LIST_HEAD(&rx_pkt->link);
+			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
+			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
+				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
+			if (rx_pkt->data.dma_addr == 0 ||
+				rx_pkt->data.dma_addr == ~0) {
+				IPAERR("dma_map_single failure %p for %p\n",
+					(void *)rx_pkt->data.dma_addr, ptr);
+				goto fail_dma_mapping;
+			}
+		}
+
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+		rx_len_cached = ++sys->len;
+		if (ipa3_ctx->transport_prototype ==
+				IPA_TRANSPORT_TYPE_GSI) {
+			memset(&gsi_xfer_elem_one, 0,
+				sizeof(gsi_xfer_elem_one));
+			gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+			gsi_xfer_elem_one.len = sys->rx_buff_sz;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+			gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+			gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl,
+					1, &gsi_xfer_elem_one, true);
+			if (ret != GSI_STATUS_SUCCESS) {
+				IPAERR("failed to provide buffer: %d\n",
+					ret);
+				goto fail_provide_rx_buffer;
+			}
+		} else {
+			ret = sps_transfer_one(sys->ep->ep_hdl,
+				rx_pkt->data.dma_addr, sys->rx_buff_sz,
+				rx_pkt, 0);
+
+			if (ret) {
+				IPAERR("sps_transfer_one failed %d\n", ret);
+				goto fail_provide_rx_buffer;
+			}
+		}
+	}
+
+	return;
+fail_provide_rx_buffer:
+	rx_len_cached = --sys->len;
+	list_del(&rx_pkt->link);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+		sys->rx_buff_sz, DMA_FROM_DEVICE);
+fail_dma_mapping:
+	spin_lock_bh(&sys->spinlock);
+	list_add_tail(&rx_pkt->link, &sys->rcycl_list);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_unlock_bh(&sys->spinlock);
+fail_kmem_cache_alloc:
+	if (rx_len_cached == 0)
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+		msecs_to_jiffies(1));
+}
+
+static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ret;
+	int rx_len_cached = 0;
+	struct gsi_xfer_elem gsi_xfer_elem_one;
+	u32 curr;
+
+	rx_len_cached = sys->len;
+	curr = atomic_read(&sys->repl.head_idx);
+
+	while (rx_len_cached < sys->rx_pool_sz) {
+		if (curr == atomic_read(&sys->repl.tail_idx))
+			break;
+
+		rx_pkt = sys->repl.cache[curr];
+		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
+
+		if (ipa3_ctx->transport_prototype ==
+				IPA_TRANSPORT_TYPE_GSI) {
+			memset(&gsi_xfer_elem_one, 0,
+				sizeof(gsi_xfer_elem_one));
+			gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
+			gsi_xfer_elem_one.len = sys->rx_buff_sz;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOT;
+			gsi_xfer_elem_one.flags |= GSI_XFER_FLAG_EOB;
+			gsi_xfer_elem_one.type = GSI_XFER_ELEM_DATA;
+			gsi_xfer_elem_one.xfer_user_data = rx_pkt;
+
+			ret = gsi_queue_xfer(sys->ep->gsi_chan_hdl, 1,
+				&gsi_xfer_elem_one, true);
+			if (ret != GSI_STATUS_SUCCESS) {
+				IPAERR("failed to provide buffer: %d\n",
+					ret);
+				break;
+			}
+		} else {
+			ret = sps_transfer_one(sys->ep->ep_hdl,
+				rx_pkt->data.dma_addr, sys->rx_buff_sz,
+				rx_pkt, 0);
+
+			if (ret) {
+				IPAERR("sps_transfer_one failed %d\n", ret);
+				list_del(&rx_pkt->link);
+				break;
+			}
+		}
+		rx_len_cached = ++sys->len;
+		curr = (curr + 1) % sys->repl.capacity;
+		/* ensure write is done before setting head index */
+		mb();
+		atomic_set(&sys->repl.head_idx, curr);
+	}
+
+	queue_work(sys->repl_wq, &sys->repl_work);
+
+	if (rx_len_cached <= IPA_DEFAULT_SYS_YELLOW_WM) {
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_rx_empty);
+		else if (sys->ep->client == IPA_CLIENT_APPS_LAN_CONS)
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.lan_rx_empty);
+		else
+			WARN_ON(1);
+		queue_delayed_work(sys->wq, &sys->replenish_rx_work,
+				msecs_to_jiffies(1));
+	}
+}
+
+static void ipa3_replenish_rx_work_func(struct work_struct *work)
+{
+	struct delayed_work *dwork;
+	struct ipa3_sys_context *sys;
+
+	dwork = container_of(work, struct delayed_work, work);
+	sys = container_of(dwork, struct ipa3_sys_context, replenish_rx_work);
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	sys->repl_hdlr(sys);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+/**
+ * ipa3_cleanup_rx() - release RX queue resources
+ *
+ */
+static void ipa3_cleanup_rx(struct ipa3_sys_context *sys)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct ipa3_rx_pkt_wrapper *r;
+	u32 head;
+	u32 tail;
+
+	list_for_each_entry_safe(rx_pkt, r,
+				 &sys->head_desc_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+		sys->free_skb(rx_pkt->data.skb);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	}
+
+	list_for_each_entry_safe(rx_pkt, r,
+				 &sys->rcycl_list, link) {
+		list_del(&rx_pkt->link);
+		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+		sys->free_skb(rx_pkt->data.skb);
+		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+	}
+
+	if (sys->repl.cache) {
+		head = atomic_read(&sys->repl.head_idx);
+		tail = atomic_read(&sys->repl.tail_idx);
+		while (head != tail) {
+			rx_pkt = sys->repl.cache[head];
+			dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+					sys->rx_buff_sz, DMA_FROM_DEVICE);
+			sys->free_skb(rx_pkt->data.skb);
+			kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+			head = (head + 1) % sys->repl.capacity;
+		}
+		kfree(sys->repl.cache);
+	}
+}
+
+static struct sk_buff *ipa3_skb_copy_for_client(struct sk_buff *skb, int len)
+{
+	struct sk_buff *skb2 = NULL;
+
+	skb2 = __dev_alloc_skb(len + IPA_RX_BUFF_CLIENT_HEADROOM, GFP_KERNEL);
+	if (likely(skb2)) {
+		/* Set the data pointer */
+		skb_reserve(skb2, IPA_RX_BUFF_CLIENT_HEADROOM);
+		memcpy(skb2->data, skb->data, len);
+		skb2->len = len;
+		skb_set_tail_pointer(skb2, len);
+	}
+
+	return skb2;
+}
+
+static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
+		struct ipa3_sys_context *sys)
+{
+	int rc = 0;
+	struct ipahal_pkt_status status;
+	u32 pkt_status_sz;
+	struct sk_buff *skb2;
+	int pad_len_byte;
+	int len;
+	unsigned char *buf;
+	int src_pipe;
+	unsigned int used = *(unsigned int *)skb->cb;
+	unsigned int used_align = ALIGN(used, 32);
+	unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+	struct ipa3_tx_pkt_wrapper *tx_pkt = NULL;
+	unsigned long ptr;
+
+	IPA_DUMP_BUFF(skb->data, 0, skb->len);
+
+	if (skb->len == 0) {
+		IPAERR("ZLT\n");
+		return rc;
+	}
+
+	if (sys->len_partial) {
+		IPADBG_LOW("len_partial %d\n", sys->len_partial);
+		buf = skb_push(skb, sys->len_partial);
+		memcpy(buf, sys->prev_skb->data, sys->len_partial);
+		sys->len_partial = 0;
+		sys->free_skb(sys->prev_skb);
+		sys->prev_skb = NULL;
+		goto begin;
+	}
+
+	/* this pipe has TX comp (status only) + mux-ed LAN RX data
+	 * (status+data)
+	 */
+	if (sys->len_rem) {
+		IPADBG_LOW("rem %d skb %d pad %d\n", sys->len_rem, skb->len,
+				sys->len_pad);
+		if (sys->len_rem <= skb->len) {
+			if (sys->prev_skb) {
+				skb2 = skb_copy_expand(sys->prev_skb, 0,
+						sys->len_rem, GFP_KERNEL);
+				if (likely(skb2)) {
+					memcpy(skb_put(skb2, sys->len_rem),
+						skb->data, sys->len_rem);
+					skb_trim(skb2,
+						skb2->len - sys->len_pad);
+					skb2->truesize = skb2->len +
+						sizeof(struct sk_buff);
+					if (sys->drop_packet)
+						dev_kfree_skb_any(skb2);
+					else
+						sys->ep->client_notify(
+							sys->ep->priv,
+							IPA_RECEIVE,
+							(unsigned long)(skb2));
+				} else {
+					IPAERR("copy expand failed\n");
+				}
+				dev_kfree_skb_any(sys->prev_skb);
+			}
+			skb_pull(skb, sys->len_rem);
+			sys->prev_skb = NULL;
+			sys->len_rem = 0;
+			sys->len_pad = 0;
+		} else {
+			if (sys->prev_skb) {
+				skb2 = skb_copy_expand(sys->prev_skb, 0,
+					skb->len, GFP_KERNEL);
+				if (likely(skb2)) {
+					memcpy(skb_put(skb2, skb->len),
+						skb->data, skb->len);
+				} else {
+					IPAERR("copy expand failed\n");
+				}
+				dev_kfree_skb_any(sys->prev_skb);
+				sys->prev_skb = skb2;
+			}
+			sys->len_rem -= skb->len;
+			return rc;
+		}
+	}
+
+begin:
+	pkt_status_sz = ipahal_pkt_status_get_size();
+	while (skb->len) {
+		sys->drop_packet = false;
+		IPADBG_LOW("LEN_REM %d\n", skb->len);
+
+		if (skb->len < pkt_status_sz) {
+			WARN_ON(sys->prev_skb != NULL);
+			IPADBG_LOW("status straddles buffer\n");
+			sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+			sys->len_partial = skb->len;
+			return rc;
+		}
+
+		ipahal_pkt_status_parse(skb->data, &status);
+		IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.pkt_len);
+		if (sys->status_stat) {
+			sys->status_stat->status[sys->status_stat->curr] =
+				status;
+			sys->status_stat->curr++;
+			if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+				sys->status_stat->curr = 0;
+		}
+
+		if ((status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
+			IPAERR("unsupported opcode(%d)\n",
+				status.status_opcode);
+			skb_pull(skb, pkt_status_sz);
+			continue;
+		}
+		IPA_STATS_EXCP_CNT(status.exception,
+				ipa3_ctx->stats.rx_excp_pkts);
+		if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
+			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes) {
+			IPAERR("status fields invalid\n");
+			IPAERR("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.pkt_len);
+			WARN_ON(1);
+			BUG();
+		}
+		if (IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
+			IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT, &status)) {
+			struct ipa3_tag_completion *comp;
+
+			IPADBG_LOW("TAG packet arrived\n");
+			if (status.tag_info == IPA_COOKIE) {
+				skb_pull(skb, pkt_status_sz);
+				if (skb->len < sizeof(comp)) {
+					IPAERR("TAG arrived without packet\n");
+					return rc;
+				}
+				memcpy(&comp, skb->data, sizeof(comp));
+				skb_pull(skb, sizeof(comp) +
+						IPA_SIZE_DL_CSUM_META_TRAILER);
+				complete(&comp->comp);
+				if (atomic_dec_return(&comp->cnt) == 0)
+					kfree(comp);
+				continue;
+			} else {
+				ptr = tag_to_pointer_wa(status.tag_info);
+				tx_pkt = (struct ipa3_tx_pkt_wrapper *)ptr;
+				IPADBG_LOW("tx_pkt recv = %p\n", tx_pkt);
+			}
+		}
+		if (status.pkt_len == 0) {
+			IPADBG_LOW("Skip aggr close status\n");
+			skb_pull(skb, pkt_status_sz);
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.aggr_close);
+			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
+				[IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
+			continue;
+		}
+
+		if (status.endp_dest_idx == (sys->ep - ipa3_ctx->ep)) {
+			/* RX data */
+			src_pipe = status.endp_src_idx;
+
+			/*
+			 * A packet which is received back to the AP after
+			 * there was no route match.
+			 */
+			if (status.exception ==
+				IPAHAL_PKT_STATUS_EXCEPTION_NONE &&
+				ipahal_is_rule_miss_id(status.rt_rule_id))
+				sys->drop_packet = true;
+
+			if (skb->len == pkt_status_sz &&
+				status.exception ==
+				IPAHAL_PKT_STATUS_EXCEPTION_NONE) {
+				WARN_ON(sys->prev_skb != NULL);
+				IPADBG_LOW("Ins header in next buffer\n");
+				sys->prev_skb = skb_copy(skb, GFP_KERNEL);
+				sys->len_partial = skb->len;
+				return rc;
+			}
+
+			pad_len_byte = ((status.pkt_len + 3) & ~3) -
+					status.pkt_len;
+
+			len = status.pkt_len + pad_len_byte +
+				IPA_SIZE_DL_CSUM_META_TRAILER;
+			IPADBG_LOW("pad %d pkt_len %d len %d\n", pad_len_byte,
+					status.pkt_len, len);
+
+			if (status.exception ==
+					IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR) {
+				IPADBG_LOW(
+					"Dropping packet on DeAggr Exception\n");
+				sys->drop_packet = true;
+			}
+
+			skb2 = ipa3_skb_copy_for_client(skb,
+				min(status.pkt_len + pkt_status_sz, skb->len));
+			if (likely(skb2)) {
+				if (skb->len < len + pkt_status_sz) {
+					IPADBG_LOW("SPL skb len %d len %d\n",
+							skb->len, len);
+					sys->prev_skb = skb2;
+					sys->len_rem = len - skb->len +
+						pkt_status_sz;
+					sys->len_pad = pad_len_byte;
+					skb_pull(skb, skb->len);
+				} else {
+					skb_trim(skb2, status.pkt_len +
+							pkt_status_sz);
+					IPADBG_LOW("rx avail for %d\n",
+							status.endp_dest_idx);
+					if (sys->drop_packet) {
+						dev_kfree_skb_any(skb2);
+					} else if (status.pkt_len >
+						   IPA_GENERIC_AGGR_BYTE_LIMIT *
+						   1024) {
+						IPAERR("packet size invalid\n");
+						IPAERR("STATUS opcode=%d\n",
+							status.status_opcode);
+						IPAERR("src=%d dst=%d len=%d\n",
+							status.endp_src_idx,
+							status.endp_dest_idx,
+							status.pkt_len);
+						BUG();
+					} else {
+					skb2->truesize = skb2->len +
+						sizeof(struct sk_buff) +
+						(ALIGN(len +
+						pkt_status_sz, 32) *
+						unused / used_align);
+						sys->ep->client_notify(
+							sys->ep->priv,
+							IPA_RECEIVE,
+							(unsigned long)(skb2));
+					}
+					skb_pull(skb, len + pkt_status_sz);
+				}
+			} else {
+				IPAERR("fail to alloc skb\n");
+				if (skb->len < len) {
+					sys->prev_skb = NULL;
+					sys->len_rem = len - skb->len +
+						pkt_status_sz;
+					sys->len_pad = pad_len_byte;
+					skb_pull(skb, skb->len);
+				} else {
+					skb_pull(skb, len + pkt_status_sz);
+				}
+			}
+			/* TX comp */
+			ipa3_wq_write_done_status(src_pipe, tx_pkt);
+			IPADBG_LOW("tx comp imp for %d\n", src_pipe);
+		} else {
+			/* TX comp */
+			ipa3_wq_write_done_status(status.endp_src_idx, tx_pkt);
+			IPADBG_LOW("tx comp exp for %d\n",
+				status.endp_src_idx);
+			skb_pull(skb, pkt_status_sz);
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.stat_compl);
+			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_excp_pkts
+				[IPAHAL_PKT_STATUS_EXCEPTION_NONE]);
+		}
+	};
+
+	return rc;
+}
+
+static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb,
+		struct sk_buff *skb, unsigned int len)
+{
+	struct sk_buff *skb2;
+
+	skb2 = skb_copy_expand(prev_skb, 0,
+			len, GFP_KERNEL);
+	if (likely(skb2)) {
+		memcpy(skb_put(skb2, len),
+			skb->data, len);
+	} else {
+		IPAERR("copy expand failed\n");
+		skb2 = NULL;
+	}
+	dev_kfree_skb_any(prev_skb);
+
+	return skb2;
+}
+
+static void ipa3_wan_rx_handle_splt_pyld(struct sk_buff *skb,
+		struct ipa3_sys_context *sys)
+{
+	struct sk_buff *skb2;
+
+	IPADBG_LOW("rem %d skb %d\n", sys->len_rem, skb->len);
+	if (sys->len_rem <= skb->len) {
+		if (sys->prev_skb) {
+			skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
+					sys->len_rem);
+			if (likely(skb2)) {
+				IPADBG_LOW(
+					"removing Status element from skb and sending to WAN client");
+				skb_pull(skb2, ipahal_pkt_status_get_size());
+				skb2->truesize = skb2->len +
+					sizeof(struct sk_buff);
+				sys->ep->client_notify(sys->ep->priv,
+					IPA_RECEIVE,
+					(unsigned long)(skb2));
+			}
+		}
+		skb_pull(skb, sys->len_rem);
+		sys->prev_skb = NULL;
+		sys->len_rem = 0;
+	} else {
+		if (sys->prev_skb) {
+			skb2 = ipa3_join_prev_skb(sys->prev_skb, skb,
+					skb->len);
+			sys->prev_skb = skb2;
+		}
+		sys->len_rem -= skb->len;
+		skb_pull(skb, skb->len);
+	}
+}
+
+static int ipa3_wan_rx_pyld_hdlr(struct sk_buff *skb,
+		struct ipa3_sys_context *sys)
+{
+	int rc = 0;
+	struct ipahal_pkt_status status;
+	unsigned char *skb_data;
+	u32 pkt_status_sz;
+	struct sk_buff *skb2;
+	u16 pkt_len_with_pad;
+	u32 qmap_hdr;
+	int checksum_trailer_exists;
+	int frame_len;
+	int ep_idx;
+	unsigned int used = *(unsigned int *)skb->cb;
+	unsigned int used_align = ALIGN(used, 32);
+	unsigned long unused = IPA_GENERIC_RX_BUFF_BASE_SZ - used;
+
+	IPA_DUMP_BUFF(skb->data, 0, skb->len);
+	if (skb->len == 0) {
+		IPAERR("ZLT\n");
+		goto bail;
+	}
+
+	if (ipa3_ctx->ipa_client_apps_wan_cons_agg_gro) {
+		sys->ep->client_notify(sys->ep->priv,
+			IPA_RECEIVE, (unsigned long)(skb));
+		return rc;
+	}
+	if (sys->repl_hdlr == ipa3_replenish_rx_cache_recycle) {
+		IPAERR("Recycle should enable only with GRO Aggr\n");
+		ipa_assert();
+	}
+
+	/*
+	 * payload splits across 2 buff or more,
+	 * take the start of the payload from prev_skb
+	 */
+	if (sys->len_rem)
+		ipa3_wan_rx_handle_splt_pyld(skb, sys);
+
+	pkt_status_sz = ipahal_pkt_status_get_size();
+	while (skb->len) {
+		IPADBG_LOW("LEN_REM %d\n", skb->len);
+		if (skb->len < pkt_status_sz) {
+			IPAERR("status straddles buffer\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		ipahal_pkt_status_parse(skb->data, &status);
+		skb_data = skb->data;
+		IPADBG_LOW("STATUS opcode=%d src=%d dst=%d len=%d\n",
+				status.status_opcode, status.endp_src_idx,
+				status.endp_dest_idx, status.pkt_len);
+
+		if (sys->status_stat) {
+			sys->status_stat->status[sys->status_stat->curr] =
+				status;
+			sys->status_stat->curr++;
+			if (sys->status_stat->curr == IPA_MAX_STATUS_STAT_NUM)
+				sys->status_stat->curr = 0;
+		}
+
+		if ((status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET) &&
+			(status.status_opcode !=
+			IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS)) {
+			IPAERR("unsupported opcode(%d)\n",
+				status.status_opcode);
+			skb_pull(skb, pkt_status_sz);
+			continue;
+		}
+
+		IPA_STATS_INC_CNT(ipa3_ctx->stats.rx_pkts);
+		if (status.endp_dest_idx >= ipa3_ctx->ipa_num_pipes ||
+			status.endp_src_idx >= ipa3_ctx->ipa_num_pipes ||
+			status.pkt_len > IPA_GENERIC_AGGR_BYTE_LIMIT * 1024) {
+			IPAERR("status fields invalid\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		if (status.pkt_len == 0) {
+			IPADBG_LOW("Skip aggr close status\n");
+			skb_pull(skb, pkt_status_sz);
+			IPA_STATS_DEC_CNT(ipa3_ctx->stats.rx_pkts);
+			IPA_STATS_INC_CNT(ipa3_ctx->stats.wan_aggr_close);
+			continue;
+		}
+		ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+		if (status.endp_dest_idx != ep_idx) {
+			IPAERR("expected endp_dest_idx %d received %d\n",
+					ep_idx, status.endp_dest_idx);
+			WARN_ON(1);
+			goto bail;
+		}
+		/* RX data */
+		if (skb->len == pkt_status_sz) {
+			IPAERR("Ins header in next buffer\n");
+			WARN_ON(1);
+			goto bail;
+		}
+		qmap_hdr = *(u32 *)(skb_data + pkt_status_sz);
+		/*
+		 * Take the pkt_len_with_pad from the last 2 bytes of the QMAP
+		 * header
+		 */
+
+		/*QMAP is BE: convert the pkt_len field from BE to LE*/
+		pkt_len_with_pad = ntohs((qmap_hdr>>16) & 0xffff);
+		IPADBG_LOW("pkt_len with pad %d\n", pkt_len_with_pad);
+		/*get the CHECKSUM_PROCESS bit*/
+		checksum_trailer_exists = IPAHAL_PKT_STATUS_MASK_FLAG_VAL(
+			IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT, &status);
+		IPADBG_LOW("checksum_trailer_exists %d\n",
+				checksum_trailer_exists);
+
+		frame_len = pkt_status_sz + IPA_QMAP_HEADER_LENGTH +
+			    pkt_len_with_pad;
+		if (checksum_trailer_exists)
+			frame_len += IPA_DL_CHECKSUM_LENGTH;
+		IPADBG_LOW("frame_len %d\n", frame_len);
+
+		skb2 = skb_clone(skb, GFP_KERNEL);
+		if (likely(skb2)) {
+			/*
+			 * the len of actual data is smaller than expected
+			 * payload split across 2 buff
+			 */
+			if (skb->len < frame_len) {
+				IPADBG_LOW("SPL skb len %d len %d\n",
+						skb->len, frame_len);
+				sys->prev_skb = skb2;
+				sys->len_rem = frame_len - skb->len;
+				skb_pull(skb, skb->len);
+			} else {
+				skb_trim(skb2, frame_len);
+				IPADBG_LOW("rx avail for %d\n",
+						status.endp_dest_idx);
+				IPADBG_LOW(
+					"removing Status element from skb and sending to WAN client");
+				skb_pull(skb2, pkt_status_sz);
+				skb2->truesize = skb2->len +
+					sizeof(struct sk_buff) +
+					(ALIGN(frame_len, 32) *
+					 unused / used_align);
+				sys->ep->client_notify(sys->ep->priv,
+					IPA_RECEIVE, (unsigned long)(skb2));
+				skb_pull(skb, frame_len);
+			}
+		} else {
+			IPAERR("fail to clone\n");
+			if (skb->len < frame_len) {
+				sys->prev_skb = NULL;
+				sys->len_rem = frame_len - skb->len;
+				skb_pull(skb, skb->len);
+			} else {
+				skb_pull(skb, frame_len);
+			}
+		}
+	};
+bail:
+	sys->free_skb(skb);
+	return rc;
+}
+
+static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags)
+{
+	return __dev_alloc_skb(len, flags);
+}
+
+static void ipa3_free_skb_rx(struct sk_buff *skb)
+{
+	dev_kfree_skb_any(skb);
+}
+
+void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data)
+{
+	struct sk_buff *rx_skb = (struct sk_buff *)data;
+	struct ipahal_pkt_status status;
+	struct ipa3_ep_context *ep;
+	unsigned int src_pipe;
+	u32 metadata;
+
+	ipahal_pkt_status_parse(rx_skb->data, &status);
+	src_pipe = status.endp_src_idx;
+	metadata = status.metadata;
+	ep = &ipa3_ctx->ep[src_pipe];
+	if (unlikely(src_pipe >= ipa3_ctx->ipa_num_pipes ||
+		!ep->valid ||
+		!ep->client_notify)) {
+		IPAERR("drop pipe=%d ep_valid=%d client_notify=%p\n",
+		  src_pipe, ep->valid, ep->client_notify);
+		dev_kfree_skb_any(rx_skb);
+		return;
+	}
+	if (status.exception == IPAHAL_PKT_STATUS_EXCEPTION_NONE)
+		skb_pull(rx_skb, ipahal_pkt_status_get_size() +
+				IPA_LAN_RX_HEADER_LENGTH);
+	else
+		skb_pull(rx_skb, ipahal_pkt_status_get_size());
+
+	/* Metadata Info
+	 *  ------------------------------------------
+	 *  |   3     |   2     |    1        |  0   |
+	 *  | fw_desc | vdev_id | qmap mux id | Resv |
+	 *  ------------------------------------------
+	 */
+	*(u16 *)rx_skb->cb = ((metadata >> 16) & 0xFFFF);
+	IPADBG_LOW("meta_data: 0x%x cb: 0x%x\n",
+			metadata, *(u32 *)rx_skb->cb);
+
+	ep->client_notify(ep->priv, IPA_RECEIVE, (unsigned long)(rx_skb));
+}
+
+static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
+{
+	rx_pkt->data.dma_addr = 0;
+	ipa3_skb_recycle(rx_pkt->data.skb);
+	INIT_LIST_HEAD(&rx_pkt->link);
+	spin_lock_bh(&rx_pkt->sys->spinlock);
+	list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
+	spin_unlock_bh(&rx_pkt->sys->spinlock);
+}
+
+void ipa3_recycle_wan_skb(struct sk_buff *skb)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	int ep_idx = ipa3_get_ep_mapping(
+	   IPA_CLIENT_APPS_WAN_CONS);
+	gfp_t flag = GFP_NOWAIT | __GFP_NOWARN;
+
+	if (unlikely(ep_idx == -1)) {
+		IPAERR("dest EP does not exist\n");
+		ipa_assert();
+	}
+
+	rx_pkt = kmem_cache_zalloc(ipa3_ctx->rx_pkt_wrapper_cache,
+					flag);
+	if (!rx_pkt)
+		ipa_assert();
+
+	INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
+	rx_pkt->sys = ipa3_ctx->ep[ep_idx].sys;
+
+	rx_pkt->data.skb = skb;
+	ipa3_recycle_rx_wrapper(rx_pkt);
+}
+
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
+	struct sk_buff *rx_skb;
+
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		WARN_ON(1);
+		return;
+	}
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+					   struct ipa3_rx_pkt_wrapper,
+					   link);
+	list_del(&rx_pkt_expected->link);
+	sys->len--;
+	if (size)
+		rx_pkt_expected->len = size;
+	rx_skb = rx_pkt_expected->data.skb;
+	dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr,
+			sys->rx_buff_sz, DMA_FROM_DEVICE);
+	skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+	rx_skb->len = rx_pkt_expected->len;
+	*(unsigned int *)rx_skb->cb = rx_skb->len;
+	rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+	sys->pyld_hdlr(rx_skb, sys);
+	sys->free_rx_wrapper(rx_pkt_expected);
+	sys->repl_hdlr(sys);
+}
+
+static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
+	struct sk_buff *rx_skb;
+
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		WARN_ON(1);
+		return;
+	}
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+					   struct ipa3_rx_pkt_wrapper,
+					   link);
+	list_del(&rx_pkt_expected->link);
+	sys->len--;
+
+	if (size)
+		rx_pkt_expected->len = size;
+
+	rx_skb = rx_pkt_expected->data.skb;
+	skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
+	rx_skb->len = rx_pkt_expected->len;
+	rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
+	sys->ep->wstats.tx_pkts_rcvd++;
+	if (sys->len <= IPA_WLAN_RX_POOL_SZ_LOW_WM) {
+		ipa3_free_skb(&rx_pkt_expected->data);
+		sys->ep->wstats.tx_pkts_dropped++;
+	} else {
+		sys->ep->wstats.tx_pkts_sent++;
+		sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+				(unsigned long)(&rx_pkt_expected->data));
+	}
+	ipa3_replenish_wlan_rx_cache(sys);
+}
+
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
+	struct ipa_mem_buffer *mem_info)
+{
+	IPADBG_LOW("ENTER.\n");
+	if (unlikely(list_empty(&sys->head_desc_list))) {
+		IPAERR("descriptor list is empty!\n");
+		WARN_ON(1);
+		return;
+	}
+	sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+				(unsigned long)(mem_info));
+	IPADBG_LOW("EXIT\n");
+}
+
+static void ipa3_wq_rx_avail(struct work_struct *work)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+	struct ipa3_sys_context *sys;
+
+	rx_pkt = container_of(work, struct ipa3_rx_pkt_wrapper, work);
+	if (unlikely(rx_pkt == NULL))
+		WARN_ON(1);
+	sys = rx_pkt->sys;
+	ipa3_wq_rx_common(sys, 0);
+}
+
+/**
+ * ipa3_sps_irq_rx_no_aggr_notify() - Callback function which will be called by
+ * the SPS driver after a Rx operation is complete.
+ * Called in an interrupt context.
+ * @notify:	SPS driver supplied notification struct
+ *
+ * This function defer the work for this event to a workqueue.
+ */
+void ipa3_sps_irq_rx_no_aggr_notify(struct sps_event_notify *notify)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+	switch (notify->event_id) {
+	case SPS_EVENT_EOT:
+		rx_pkt = notify->data.transfer.user;
+		if (IPA_CLIENT_IS_APPS_CONS(rx_pkt->sys->ep->client))
+			atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		rx_pkt->len = notify->data.transfer.iovec.size;
+		IPADBG_LOW("event %d notified sys=%p len=%u\n",
+				notify->event_id,
+				notify->user, rx_pkt->len);
+		queue_work(rx_pkt->sys->wq, &rx_pkt->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d sys=%p\n",
+				notify->event_id, notify->user);
+	}
+}
+
+static int ipa3_odu_rx_pyld_hdlr(struct sk_buff *rx_skb,
+	struct ipa3_sys_context *sys)
+{
+	if (sys->ep->client_notify) {
+		sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
+			(unsigned long)(rx_skb));
+	} else {
+		dev_kfree_skb_any(rx_skb);
+		WARN_ON(1);
+	}
+
+	return 0;
+}
+
+static void ipa3_free_rx_wrapper(struct ipa3_rx_pkt_wrapper *rk_pkt)
+{
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rk_pkt);
+}
+
+static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
+		struct ipa3_sys_context *sys)
+{
+	if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
+		sys->policy = IPA_POLICY_INTR_MODE;
+		sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT);
+		sys->sps_callback = ipa3_sps_irq_tx_no_aggr_notify;
+		return 0;
+	}
+
+	if (IPA_CLIENT_IS_MEMCPY_DMA_PROD(in->client)) {
+		sys->policy = IPA_POLICY_NOINTR_MODE;
+		sys->sps_option = SPS_O_AUTO_ENABLE;
+		sys->sps_callback = NULL;
+		return 0;
+	}
+
+	if (IPA_CLIENT_IS_PROD(in->client)) {
+		if (sys->ep->skip_ep_cfg) {
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE|
+				SPS_O_EOT | SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_tx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_tx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_tx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+		} else {
+			sys->policy = IPA_POLICY_NOINTR_MODE;
+			sys->sps_option = SPS_O_AUTO_ENABLE;
+			sys->sps_callback = NULL;
+			sys->ep->status.status_en = true;
+			sys->ep->status.status_ep = ipa3_get_ep_mapping(
+					IPA_CLIENT_APPS_LAN_CONS);
+		}
+	} else {
+		if (in->client == IPA_CLIENT_APPS_LAN_CONS ||
+		    in->client == IPA_CLIENT_APPS_WAN_CONS) {
+			sys->ep->status.status_en = true;
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+					| SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_rx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+					ipa3_replenish_rx_work_func);
+			INIT_WORK(&sys->repl_work, ipa3_wq_repl_rx);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz = IPA_GENERIC_RX_BUFF_SZ(
+				IPA_GENERIC_RX_BUFF_BASE_SZ);
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			in->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
+			in->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
+			in->ipa_ep_cfg.aggr.aggr_time_limit =
+				IPA_GENERIC_AGGR_TIME_LIMIT;
+			if (in->client == IPA_CLIENT_APPS_LAN_CONS) {
+				sys->pyld_hdlr = ipa3_lan_rx_pyld_hdlr;
+				sys->repl_hdlr =
+					ipa3_replenish_rx_cache_recycle;
+				sys->free_rx_wrapper =
+					ipa3_recycle_rx_wrapper;
+				sys->rx_pool_sz =
+					ipa3_ctx->lan_rx_ring_size;
+				in->ipa_ep_cfg.aggr.aggr_byte_limit =
+				IPA_GENERIC_AGGR_BYTE_LIMIT;
+				in->ipa_ep_cfg.aggr.aggr_pkt_limit =
+				IPA_GENERIC_AGGR_PKT_LIMIT;
+			} else if (in->client ==
+					IPA_CLIENT_APPS_WAN_CONS) {
+				sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
+				sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+				sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size;
+				if (nr_cpu_ids > 1) {
+					sys->repl_hdlr =
+					   ipa3_fast_replenish_rx_cache;
+				} else {
+					sys->repl_hdlr =
+					   ipa3_replenish_rx_cache;
+				}
+				if (in->napi_enabled)
+					sys->rx_pool_sz =
+					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+				if (in->napi_enabled && in->recycle_enabled)
+					sys->repl_hdlr =
+					 ipa3_replenish_rx_cache_recycle;
+				in->ipa_ep_cfg.aggr.aggr_sw_eof_active
+					= true;
+				if (ipa3_ctx->
+				ipa_client_apps_wan_cons_agg_gro) {
+					IPAERR("get close-by %u\n",
+					ipa_adjust_ra_buff_base_sz(
+					in->ipa_ep_cfg.aggr.
+					aggr_byte_limit));
+					IPAERR("set rx_buff_sz %lu\n",
+					(unsigned long int)
+					IPA_GENERIC_RX_BUFF_SZ(
+					ipa_adjust_ra_buff_base_sz(
+					in->ipa_ep_cfg.
+						aggr.aggr_byte_limit)));
+					/* disable ipa_status */
+					sys->ep->status.
+						status_en = false;
+					sys->rx_buff_sz =
+					IPA_GENERIC_RX_BUFF_SZ(
+					ipa_adjust_ra_buff_base_sz(
+					in->ipa_ep_cfg.aggr.
+						aggr_byte_limit));
+					in->ipa_ep_cfg.aggr.
+						aggr_byte_limit =
+					sys->rx_buff_sz < in->
+					ipa_ep_cfg.aggr.
+					aggr_byte_limit ?
+					IPA_ADJUST_AGGR_BYTE_LIMIT(
+					sys->rx_buff_sz) :
+					IPA_ADJUST_AGGR_BYTE_LIMIT(
+					in->ipa_ep_cfg.
+					aggr.aggr_byte_limit);
+					IPAERR("set aggr_limit %lu\n",
+					(unsigned long int)
+					in->ipa_ep_cfg.aggr.
+					aggr_byte_limit);
+				} else {
+					in->ipa_ep_cfg.aggr.
+						aggr_byte_limit =
+					IPA_GENERIC_AGGR_BYTE_LIMIT;
+					in->ipa_ep_cfg.aggr.
+						aggr_pkt_limit =
+					IPA_GENERIC_AGGR_PKT_LIMIT;
+				}
+			}
+		} else if (IPA_CLIENT_IS_WLAN_CONS(in->client)) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+				| SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_rx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+				ipa3_replenish_rx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz = IPA_WLAN_RX_BUFF_SZ;
+			sys->rx_pool_sz = in->desc_fifo_sz/
+				sizeof(struct sps_iovec) - 1;
+			if (sys->rx_pool_sz > IPA_WLAN_RX_POOL_SZ)
+				sys->rx_pool_sz = IPA_WLAN_RX_POOL_SZ;
+			sys->pyld_hdlr = NULL;
+			sys->repl_hdlr = ipa3_replenish_wlan_rx_cache;
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+			in->ipa_ep_cfg.aggr.aggr_en = IPA_BYPASS_AGGR;
+		} else if (IPA_CLIENT_IS_ODU_CONS(in->client)) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+				| SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_rx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+			ipa3_switch_to_intr_rx_work_func);
+			INIT_DELAYED_WORK(&sys->replenish_rx_work,
+				ipa3_replenish_rx_work_func);
+			atomic_set(&sys->curr_polling_state, 0);
+			sys->rx_buff_sz = IPA_ODU_RX_BUFF_SZ;
+			sys->rx_pool_sz = in->desc_fifo_sz /
+				sizeof(struct sps_iovec) - 1;
+			if (sys->rx_pool_sz > IPA_ODU_RX_POOL_SZ)
+				sys->rx_pool_sz = IPA_ODU_RX_POOL_SZ;
+			sys->pyld_hdlr = ipa3_odu_rx_pyld_hdlr;
+			sys->get_skb = ipa3_get_skb_ipa_rx;
+			sys->free_skb = ipa3_free_skb_rx;
+			sys->free_rx_wrapper = ipa3_free_rx_wrapper;
+			sys->repl_hdlr = ipa3_replenish_rx_cache;
+		} else if (in->client ==
+				IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_INTR_POLL_MODE;
+			sys->sps_option = (SPS_O_AUTO_ENABLE | SPS_O_EOT
+					| SPS_O_ACK_TRANSFERS);
+			sys->sps_callback = ipa3_sps_irq_rx_notify;
+			INIT_WORK(&sys->work, ipa3_wq_handle_rx);
+			INIT_DELAYED_WORK(&sys->switch_to_intr_work,
+				ipa3_switch_to_intr_rx_work_func);
+		} else if (in->client ==
+				IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+			IPADBG("assigning policy to client:%d",
+				in->client);
+
+			sys->policy = IPA_POLICY_NOINTR_MODE;
+			sys->sps_option = SPS_O_AUTO_ENABLE |
+			SPS_O_ACK_TRANSFERS | SPS_O_POLL;
+		} else {
+			IPAERR("Need to install a RX pipe hdlr\n");
+			WARN_ON(1);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_tx_client_rx_notify_release() - Callback function
+ * which will call the user supplied callback function to
+ * release the skb, or release it on its own if no callback
+ * function was supplied
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa3_tx_dp_mul
+ */
+static void ipa3_tx_client_rx_notify_release(void *user1, int user2)
+{
+	struct ipa_tx_data_desc *dd = (struct ipa_tx_data_desc *)user1;
+	int ep_idx = user2;
+
+	IPADBG_LOW("Received data desc anchor:%p\n", dd);
+
+	atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
+	ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+
+  /* wlan host driver waits till tx complete before unload */
+	IPADBG_LOW("ep=%d fifo_desc_free_count=%d\n",
+		ep_idx, atomic_read(&ipa3_ctx->ep[ep_idx].avail_fifo_desc));
+	IPADBG_LOW("calling client notify callback with priv:%p\n",
+		ipa3_ctx->ep[ep_idx].priv);
+
+	if (ipa3_ctx->ep[ep_idx].client_notify) {
+		ipa3_ctx->ep[ep_idx].client_notify(ipa3_ctx->ep[ep_idx].priv,
+				IPA_WRITE_DONE, (unsigned long)user1);
+		ipa3_ctx->ep[ep_idx].wstats.rx_hd_reply++;
+	}
+}
+/**
+ * ipa3_tx_client_rx_pkt_status() - Callback function
+ * which will call the user supplied callback function to
+ * increase the available fifo descriptor
+ *
+ * @user1: [in] - Data Descriptor
+ * @user2: [in] - endpoint idx
+ *
+ * This notified callback is for the destination client
+ * This function is supplied in ipa3_tx_dp_mul
+ */
+static void ipa3_tx_client_rx_pkt_status(void *user1, int user2)
+{
+	int ep_idx = user2;
+
+	atomic_inc(&ipa3_ctx->ep[ep_idx].avail_fifo_desc);
+	ipa3_ctx->ep[ep_idx].wstats.rx_pkts_status_rcvd++;
+}
+
+
+/**
+ * ipa3_tx_dp_mul() - Data-path tx handler for multiple packets
+ * @src: [in] - Client that is sending data
+ * @ipa_tx_data_desc:	[in] data descriptors from wlan
+ *
+ * this is used for to transfer data descriptors that received
+ * from WLAN1_PROD pipe to IPA HW
+ *
+ * The function will send data descriptors from WLAN1_PROD (one
+ * at a time) using sps_transfer_one. Will set EOT flag for last
+ * descriptor Once this send was done from SPS point-of-view the
+ * IPA driver will get notified by the supplied callback -
+ * ipa3_sps_irq_tx_no_aggr_notify()
+ *
+ * ipa3_sps_irq_tx_no_aggr_notify will call to the user supplied
+ * callback (from ipa3_connect)
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_tx_dp_mul(enum ipa_client_type src,
+			struct ipa_tx_data_desc *data_desc)
+{
+	/* The second byte in wlan header holds qmap id */
+#define IPA_WLAN_HDR_QMAP_ID_OFFSET 1
+	struct ipa_tx_data_desc *entry;
+	struct ipa3_sys_context *sys;
+	struct ipa3_desc desc[2];
+	u32 num_desc, cnt;
+	int ep_idx;
+
+	IPADBG_LOW("Received data desc anchor:%p\n", data_desc);
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+
+	ep_idx = ipa3_get_ep_mapping(src);
+	if (unlikely(ep_idx == -1)) {
+		IPAERR("dest EP does not exist.\n");
+		goto fail_send;
+	}
+	IPADBG_LOW("ep idx:%d\n", ep_idx);
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	if (unlikely(ipa3_ctx->ep[ep_idx].valid == 0)) {
+		IPAERR("dest EP not valid.\n");
+		goto fail_send;
+	}
+	sys->ep->wstats.rx_hd_rcvd++;
+
+	/* Calculate the number of descriptors */
+	num_desc = 0;
+	list_for_each_entry(entry, &data_desc->link, link) {
+		num_desc++;
+	}
+	IPADBG_LOW("Number of Data Descriptors:%d", num_desc);
+
+	if (atomic_read(&sys->ep->avail_fifo_desc) < num_desc) {
+		IPAERR("Insufficient data descriptors available\n");
+		goto fail_send;
+	}
+
+	/* Assign callback only for last data descriptor */
+	cnt = 0;
+	list_for_each_entry(entry, &data_desc->link, link) {
+		memset(desc, 0, 2 * sizeof(struct ipa3_desc));
+
+		IPADBG_LOW("Parsing data desc :%d\n", cnt);
+		cnt++;
+		((u8 *)entry->pyld_buffer)[IPA_WLAN_HDR_QMAP_ID_OFFSET] =
+			(u8)sys->ep->cfg.meta.qmap_id;
+
+		/* the tag field will be populated in ipa3_send() function */
+		desc[0].opcode =
+			ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+		desc[0].type = IPA_IMM_CMD_DESC;
+		desc[0].callback = ipa3_tag_destroy_imm;
+		desc[1].pyld = entry->pyld_buffer;
+		desc[1].len = entry->pyld_len;
+		desc[1].type = IPA_DATA_DESC_SKB;
+		desc[1].user1 = data_desc;
+		desc[1].user2 = ep_idx;
+		IPADBG_LOW("priv:%p pyld_buf:0x%p pyld_len:%d\n",
+			entry->priv, desc[1].pyld, desc[1].len);
+
+		/* In case of last descriptor populate callback */
+		if (cnt == num_desc) {
+			IPADBG_LOW("data desc:%p\n", data_desc);
+			desc[1].callback = ipa3_tx_client_rx_notify_release;
+		} else {
+			desc[1].callback = ipa3_tx_client_rx_pkt_status;
+		}
+
+		IPADBG_LOW("calling ipa3_send_one()\n");
+		if (ipa3_send(sys, 2, desc, true)) {
+			IPAERR("fail to send skb\n");
+			sys->ep->wstats.rx_pkt_leak += (cnt-1);
+			sys->ep->wstats.rx_dp_fail++;
+			goto fail_send;
+		}
+
+		if (atomic_read(&sys->ep->avail_fifo_desc) >= 0)
+			atomic_dec(&sys->ep->avail_fifo_desc);
+
+		sys->ep->wstats.rx_pkts_rcvd++;
+		IPADBG_LOW("ep=%d fifo desc=%d\n",
+			ep_idx, atomic_read(&sys->ep->avail_fifo_desc));
+	}
+
+	sys->ep->wstats.rx_hd_processed++;
+	spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+	return 0;
+
+fail_send:
+	spin_unlock_bh(&ipa3_ctx->wc_memb.ipa_tx_mul_spinlock);
+	return -EFAULT;
+
+}
+
+void ipa3_free_skb(struct ipa_rx_data *data)
+{
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+	spin_lock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+
+	ipa3_ctx->wc_memb.total_tx_pkts_freed++;
+	rx_pkt = container_of(data, struct ipa3_rx_pkt_wrapper, data);
+
+	ipa3_skb_recycle(rx_pkt->data.skb);
+	(void)skb_put(rx_pkt->data.skb, IPA_WLAN_RX_BUFF_SZ);
+
+	list_add_tail(&rx_pkt->link,
+		&ipa3_ctx->wc_memb.wlan_comm_desc_list);
+	ipa3_ctx->wc_memb.wlan_comm_free_cnt++;
+
+	spin_unlock_bh(&ipa3_ctx->wc_memb.wlan_spinlock);
+}
+
+/* Functions added to support kernel tests */
+
+int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
+			unsigned long *ipa_bam_or_gsi_hdl,
+			u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+
+	if (sys_in == NULL || clnt_hdl == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+
+	if (ipa_bam_or_gsi_hdl == NULL || ipa_pipe_num == NULL) {
+		IPAERR("NULL args\n");
+		goto fail_gen;
+	}
+	if (sys_in->client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm client:%d\n", sys_in->client);
+		goto fail_gen;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client :%d\n", sys_in->client);
+		goto fail_gen;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	IPA_ACTIVE_CLIENTS_INC_EP(sys_in->client);
+
+	if (ep->valid == 1) {
+		if (sys_in->client != IPA_CLIENT_APPS_LAN_WAN_PROD) {
+			IPAERR("EP %d already allocated\n", ipa_ep_idx);
+			goto fail_and_disable_clocks;
+		} else {
+			if (ipa3_cfg_ep_hdr(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.hdr)) {
+				IPAERR("fail to configure hdr prop of EP %d\n",
+						ipa_ep_idx);
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			if (ipa3_cfg_ep_cfg(ipa_ep_idx,
+						&sys_in->ipa_ep_cfg.cfg)) {
+				IPAERR("fail to configure cfg prop of EP %d\n",
+						ipa_ep_idx);
+				result = -EFAULT;
+				goto fail_and_disable_clocks;
+			}
+			IPAERR("client %d (ep: %d) overlay ok sys=%p\n",
+					sys_in->client, ipa_ep_idx, ep->sys);
+			ep->client_notify = sys_in->notify;
+			ep->priv = sys_in->priv;
+			*clnt_hdl = ipa_ep_idx;
+			if (!ep->keep_ipa_awake)
+				IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+			return 0;
+		}
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+
+	ep->valid = 1;
+	ep->client = sys_in->client;
+	ep->client_notify = sys_in->notify;
+	ep->priv = sys_in->priv;
+	ep->keep_ipa_awake = true;
+	if (en_status) {
+		ep->status.status_en = true;
+		ep->status.status_ep = ipa_ep_idx;
+	}
+
+	result = ipa3_enable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("enable data path failed res=%d clnt=%d.\n",
+				 result, ipa_ep_idx);
+		goto fail_gen2;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &sys_in->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_gen2;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_gen2;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("skipping ep configuration\n");
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	*ipa_pipe_num = ipa_ep_idx;
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+		*ipa_bam_or_gsi_hdl = ipa3_ctx->gsi_dev_hdl;
+	else
+		*ipa_bam_or_gsi_hdl = ipa3_ctx->bam_handle;
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	IPADBG("client %d (ep: %d) connected sys=%p\n", sys_in->client,
+			ipa_ep_idx, ep->sys);
+
+	return 0;
+
+fail_gen2:
+fail_and_disable_clocks:
+	IPA_ACTIVE_CLIENTS_DEC_EP(sys_in->client);
+fail_gen:
+	return result;
+}
+
+int ipa3_sys_teardown(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipa3_disable_data_path(clnt_hdl);
+	ep->valid = 0;
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+	return 0;
+}
+
+int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm(Either endpoint or client hdl invalid)\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	ep->gsi_chan_hdl = gsi_ch_hdl;
+	ep->gsi_evt_ring_hdl = gsi_ev_hdl;
+
+	return 0;
+}
+
+static void ipa_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_EVT_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_EVT_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_EVT_EVT_RING_EMPTY_ERR:
+		IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
+{
+	switch (notify->evt_id) {
+	case GSI_CHAN_INVALID_TRE_ERR:
+		IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
+		break;
+	case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
+		IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_BUFFERS_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
+		break;
+	case GSI_CHAN_OUT_OF_RESOURCES_ERR:
+		IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
+		break;
+	case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
+		IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
+		break;
+	case GSI_CHAN_HWO_1_ERR:
+		IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
+		break;
+	default:
+		IPAERR("Unexpected err evt: %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_gsi_irq_tx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_tx_pkt_wrapper *tx_pkt;
+
+	IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+	switch (notify->evt_id) {
+	case GSI_CHAN_EVT_EOT:
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		tx_pkt = notify->xfer_user_data;
+		queue_work(tx_pkt->sys->wq, &tx_pkt->work);
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
+
+	if (!notify) {
+		IPAERR("gsi notify is NULL.\n");
+		return;
+	}
+	IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+	sys = (struct ipa3_sys_context *)notify->chan_user_data;
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+					   struct ipa3_rx_pkt_wrapper, link);
+	rx_pkt_rcvd = (struct ipa3_rx_pkt_wrapper *)notify->xfer_user_data;
+
+	if (rx_pkt_expected != rx_pkt_rcvd) {
+		IPAERR("Pkt was not filled in head of rx buffer.\n");
+		WARN_ON(1);
+		return;
+	}
+	sys->ep->bytes_xfered_valid = true;
+	sys->ep->bytes_xfered = notify->bytes_xfered;
+	sys->ep->phys_base = rx_pkt_rcvd->data.dma_addr;
+
+	switch (notify->evt_id) {
+	case GSI_CHAN_EVT_EOT:
+	case GSI_CHAN_EVT_EOB:
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&sys->curr_polling_state)) {
+			/* put the gsi channel into polling mode */
+			gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_POLL);
+			ipa3_inc_acquire_wakelock();
+			atomic_set(&sys->curr_polling_state, 1);
+			queue_work(sys->wq, &sys->work);
+		}
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->evt_id);
+	}
+}
+
+static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_dma_xfer_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
+
+	if (!notify) {
+		IPAERR("gsi notify is NULL.\n");
+		return;
+	}
+	IPADBG_LOW("event %d notified\n", notify->evt_id);
+
+	sys = (struct ipa3_sys_context *)notify->chan_user_data;
+	if (sys->ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS) {
+		IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n");
+		return;
+	}
+	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
+	struct ipa3_dma_xfer_wrapper, link);
+		rx_pkt_rcvd = (struct ipa3_dma_xfer_wrapper *)notify
+			->xfer_user_data;
+	if (rx_pkt_expected != rx_pkt_rcvd) {
+		IPAERR("Pkt was not filled in head of rx buffer.\n");
+		WARN_ON(1);
+		return;
+	}
+
+	sys->ep->bytes_xfered_valid = true;
+	sys->ep->bytes_xfered = notify->bytes_xfered;
+	sys->ep->phys_base = rx_pkt_rcvd->phys_addr_dest;
+
+	switch (notify->evt_id) {
+	case GSI_CHAN_EVT_EOT:
+		if (!atomic_read(&sys->curr_polling_state)) {
+			/* put the gsi channel into polling mode */
+			gsi_config_channel_mode(sys->ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_POLL);
+			ipa3_inc_acquire_wakelock();
+			atomic_set(&sys->curr_polling_state, 1);
+			queue_work(sys->wq, &sys->work);
+		}
+		break;
+	default:
+		IPAERR("received unexpected event id %d\n", notify->evt_id);
+	}
+}
+
+static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
+	struct ipa3_ep_context *ep)
+{
+	struct gsi_evt_ring_props gsi_evt_ring_props;
+	struct gsi_chan_props gsi_channel_props;
+	union __packed gsi_channel_scratch ch_scratch;
+	struct ipa_gsi_ep_config *gsi_ep_info;
+	dma_addr_t dma_addr;
+	dma_addr_t evt_dma_addr;
+	int result;
+
+	if (!ep) {
+		IPAERR("EP context is empty\n");
+		return -EINVAL;
+	}
+
+	ep->gsi_evt_ring_hdl = ~0;
+	memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
+	/*
+	 * allocate event ring for all interrupt-policy
+	 * pipes and IPA consumers pipes
+	 */
+	if (ep->sys->policy != IPA_POLICY_NOINTR_MODE ||
+	     IPA_CLIENT_IS_CONS(ep->client)) {
+		gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_GPI_EV;
+		gsi_evt_ring_props.intr = GSI_INTR_IRQ;
+		gsi_evt_ring_props.re_size =
+			GSI_EVT_RING_RE_SIZE_16B;
+
+		gsi_evt_ring_props.ring_len = IPA_GSI_EVT_RING_LEN;
+		gsi_evt_ring_props.ring_base_vaddr =
+			dma_alloc_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
+			&evt_dma_addr, GFP_KERNEL);
+		if (!gsi_evt_ring_props.ring_base_vaddr) {
+			IPAERR("fail to dma alloc %u bytes\n",
+				IPA_GSI_EVT_RING_LEN);
+			return -ENOMEM;
+		}
+		gsi_evt_ring_props.ring_base_addr = evt_dma_addr;
+
+		/* copy mem info */
+		ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
+		ep->gsi_mem_info.evt_ring_base_addr =
+			gsi_evt_ring_props.ring_base_addr;
+		ep->gsi_mem_info.evt_ring_base_vaddr =
+			gsi_evt_ring_props.ring_base_vaddr;
+
+		gsi_evt_ring_props.int_modt = IPA_GSI_EVT_RING_INT_MODT;
+		gsi_evt_ring_props.int_modc = 1;
+		gsi_evt_ring_props.rp_update_addr = 0;
+		gsi_evt_ring_props.exclusive = true;
+		gsi_evt_ring_props.err_cb = ipa_gsi_evt_ring_err_cb;
+		gsi_evt_ring_props.user_data = NULL;
+
+		result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
+			ipa3_ctx->gsi_dev_hdl, &ep->gsi_evt_ring_hdl);
+		if (result != GSI_STATUS_SUCCESS)
+			goto fail_alloc_evt_ring;
+	}
+
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+	} else {
+		gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
+		gsi_channel_props.max_re_expected = ep->sys->rx_pool_sz;
+	}
+
+	gsi_ep_info = ipa3_get_gsi_ep_info(ipa3_get_ep_mapping(ep->client));
+	if (!gsi_ep_info) {
+		IPAERR("Invalid ep number\n");
+		result = -EINVAL;
+		goto fail_get_gsi_ep_info;
+	} else
+		gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
+
+	gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+
+	/*
+	 * GSI ring length is calculated based on the desc_fifo_sz which was
+	 * meant to define the BAM desc fifo. GSI descriptors are 16B as opposed
+	 * to 8B for BAM. For PROD pipes there is also an additional descriptor
+	 * for TAG STATUS immediate command.
+	 */
+	if (IPA_CLIENT_IS_PROD(ep->client))
+		gsi_channel_props.ring_len = 4 * in->desc_fifo_sz;
+	else
+		gsi_channel_props.ring_len = 2 * in->desc_fifo_sz;
+	gsi_channel_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+			&dma_addr, GFP_KERNEL);
+	if (!gsi_channel_props.ring_base_vaddr) {
+		IPAERR("fail to dma alloc %u bytes\n",
+			gsi_channel_props.ring_len);
+		goto fail_alloc_channel_ring;
+	}
+	gsi_channel_props.ring_base_addr = dma_addr;
+
+	/* copy mem info */
+	ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
+	ep->gsi_mem_info.chan_ring_base_addr =
+		gsi_channel_props.ring_base_addr;
+	ep->gsi_mem_info.chan_ring_base_vaddr =
+		gsi_channel_props.ring_base_vaddr;
+
+	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	if (ep->client == IPA_CLIENT_APPS_CMD_PROD)
+		gsi_channel_props.low_weight = IPA_GSI_MAX_CH_LOW_WEIGHT;
+	else
+		gsi_channel_props.low_weight = 1;
+	gsi_channel_props.chan_user_data = ep->sys;
+	gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
+	if (IPA_CLIENT_IS_PROD(ep->client))
+		gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
+	else
+		gsi_channel_props.xfer_cb = ipa_gsi_irq_rx_notify_cb;
+	if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
+		gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb;
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_alloc_channel;
+
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.gpi.max_outstanding_tre = gsi_ep_info->ipa_if_tlv *
+		GSI_CHAN_RE_SIZE_16B;
+	ch_scratch.gpi.outstanding_threshold = 2 * GSI_CHAN_RE_SIZE_16B;
+	result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to write scratch %d\n", result);
+		goto fail_write_channel_scratch;
+	}
+
+	result = gsi_start_channel(ep->gsi_chan_hdl);
+	if (result != GSI_STATUS_SUCCESS)
+		goto fail_start_channel;
+	if (ep->client == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS)
+		gsi_config_channel_mode(ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_POLL);
+	return 0;
+
+fail_start_channel:
+fail_write_channel_scratch:
+	if (gsi_dealloc_channel(ep->gsi_chan_hdl)
+		!= GSI_STATUS_SUCCESS) {
+		IPAERR("Failed to dealloc GSI chan.\n");
+		BUG();
+	}
+fail_alloc_channel:
+	dma_free_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+			gsi_channel_props.ring_base_vaddr, dma_addr);
+fail_alloc_channel_ring:
+fail_get_gsi_ep_info:
+	if (ep->gsi_evt_ring_hdl != ~0) {
+		gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+		ep->gsi_evt_ring_hdl = ~0;
+	}
+fail_alloc_evt_ring:
+	if (gsi_evt_ring_props.ring_base_vaddr)
+		dma_free_coherent(ipa3_ctx->pdev, IPA_GSI_EVT_RING_LEN,
+			gsi_evt_ring_props.ring_base_vaddr, evt_dma_addr);
+	IPAERR("Return with err: %d\n", result);
+	return result;
+}
+
+static int ipa_populate_tag_field(struct ipa3_desc *desc,
+		struct ipa3_tx_pkt_wrapper *tx_pkt,
+		struct ipahal_imm_cmd_pyld **tag_pyld_ret)
+{
+	struct ipahal_imm_cmd_pyld *tag_pyld;
+	struct ipahal_imm_cmd_ip_packet_tag_status tag_cmd = {0};
+
+	/* populate tag field only if it is NULL */
+	if (desc->pyld == NULL) {
+		tag_cmd.tag = pointer_to_tag_wa(tx_pkt);
+		tag_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &tag_cmd, true);
+		if (unlikely(!tag_pyld)) {
+			IPAERR("Failed to construct ip_packet_tag_status\n");
+			return -EFAULT;
+		}
+		/*
+		 * This is for 32-bit pointer, will need special
+		 * handling if 64-bit pointer is used
+		 */
+		IPADBG_LOW("tx_pkt sent in tag: 0x%p\n", tx_pkt);
+		desc->pyld = tag_pyld->data;
+		desc->len = tag_pyld->len;
+		desc->user1 = tag_pyld;
+
+		*tag_pyld_ret = tag_pyld;
+	}
+	return 0;
+}
+
+static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
+		struct ipa_mem_buffer *mem_info)
+{
+	int ret;
+	struct gsi_chan_xfer_notify xfer_notify;
+	struct ipa3_rx_pkt_wrapper *rx_pkt;
+
+	if (sys->ep->bytes_xfered_valid) {
+		mem_info->phys_base = sys->ep->phys_base;
+		mem_info->size = (u32)sys->ep->bytes_xfered;
+		sys->ep->bytes_xfered_valid = false;
+		return GSI_STATUS_SUCCESS;
+	}
+
+	ret = gsi_poll_channel(sys->ep->gsi_chan_hdl,
+		&xfer_notify);
+	if (ret == GSI_STATUS_POLL_EMPTY)
+		return ret;
+	else if (ret != GSI_STATUS_SUCCESS) {
+		IPAERR("Poll channel err: %d\n", ret);
+		return ret;
+	}
+
+	rx_pkt = (struct ipa3_rx_pkt_wrapper *)
+		xfer_notify.xfer_user_data;
+	mem_info->phys_base = rx_pkt->data.dma_addr;
+	mem_info->size = xfer_notify.bytes_xfered;
+
+	return ret;
+}
+
+static int ipa_handle_rx_core_gsi(struct ipa3_sys_context *sys,
+	bool process_all, bool in_poll_state)
+{
+	int ret;
+	int cnt = 0;
+	struct ipa_mem_buffer mem_info = {0};
+
+	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+			!atomic_read(&sys->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+
+		ret = ipa_poll_gsi_pkt(sys, &mem_info);
+		if (ret)
+			break;
+
+		if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+			ipa3_dma_memcpy_notify(sys, &mem_info);
+		else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+			ipa3_wlan_wq_rx_common(sys, mem_info.size);
+		else
+			ipa3_wq_rx_common(sys, mem_info.size);
+
+		cnt++;
+	}
+	return cnt;
+}
+
+static int ipa_poll_sps_pkt(struct ipa3_sys_context *sys,
+		struct ipa_mem_buffer *mem_info)
+{
+	int ret;
+	struct sps_iovec iov;
+
+	ret = sps_get_iovec(sys->ep->ep_hdl, &iov);
+	if (ret) {
+		IPAERR("sps_get_iovec failed %d\n", ret);
+		return ret;
+	}
+
+	if (iov.addr == 0)
+		return -EIO;
+
+	mem_info->phys_base = iov.addr;
+	mem_info->size = iov.size;
+	return 0;
+}
+
+static int ipa_handle_rx_core_sps(struct ipa3_sys_context *sys,
+	bool process_all, bool in_poll_state)
+{
+	int ret;
+	int cnt = 0;
+	struct ipa_mem_buffer mem_info = {0};
+
+	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
+			!atomic_read(&sys->curr_polling_state))) {
+		if (cnt && !process_all)
+			break;
+
+		ret = ipa_poll_sps_pkt(sys, &mem_info);
+		if (ret)
+			break;
+
+		if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
+			ipa3_dma_memcpy_notify(sys, &mem_info);
+		else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
+			ipa3_wlan_wq_rx_common(sys, mem_info.size);
+		else
+			ipa3_wq_rx_common(sys, mem_info.size);
+
+		cnt++;
+	}
+
+	return cnt;
+}
+
+/**
+ * ipa3_rx_poll() - Poll the rx packets from IPA HW. This
+ * function is exectued in the softirq context
+ *
+ * if input budget is zero, the driver switches back to
+ * interrupt mode
+ *
+ * return number of polled packets, on error 0(zero)
+ */
+int ipa3_rx_poll(u32 clnt_hdl, int weight)
+{
+	struct ipa3_ep_context *ep;
+	int ret;
+	int cnt = 0;
+	unsigned int delay = 1;
+	struct ipa_mem_buffer mem_info = {0};
+
+	IPADBG("\n");
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm 0x%x\n", clnt_hdl);
+		return cnt;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	while (cnt < weight &&
+		   atomic_read(&ep->sys->curr_polling_state)) {
+
+		if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+			ret = ipa_poll_gsi_pkt(ep->sys, &mem_info);
+		else
+			ret = ipa_poll_sps_pkt(ep->sys, &mem_info);
+
+		if (ret)
+			break;
+
+		ipa3_wq_rx_common(ep->sys, mem_info.size);
+		cnt += 5;
+	};
+
+	if (cnt == 0) {
+		ep->inactive_cycles++;
+		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
+
+		if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
+			ep->switch_to_intr = true;
+			delay = 0;
+		}
+		queue_delayed_work(ep->sys->wq,
+			&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
+	} else
+		ep->inactive_cycles = 0;
+
+	return cnt;
+}
+
+static unsigned long tag_to_pointer_wa(uint64_t tag)
+{
+	return 0xFFFF000000000000 | (unsigned long) tag;
+}
+
+static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
+{
+	u16 temp;
+	/* Add the check but it might have throughput issue */
+	if (ipa3_is_msm_device()) {
+		temp = (u16) (~((unsigned long) tx_pkt &
+			0xFFFF000000000000) >> 48);
+		if (temp) {
+			IPAERR("The 16 prefix is not all 1s (%p)\n",
+			tx_pkt);
+			BUG();
+		}
+	}
+	return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF;
+}
+
+/**
+ * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20
+ *
+ * A hardware limitation requires to avoid using GSI physical channel 20.
+ * This function allocates GSI physical channel 20 and holds it to prevent
+ * others to use it.
+ *
+ * Return codes: 0 on success, negative on failure
+ */
+int ipa_gsi_ch20_wa(void)
+{
+	struct gsi_chan_props gsi_channel_props;
+	dma_addr_t dma_addr;
+	int result;
+	int i;
+	unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC];
+	unsigned long chan_hdl_to_keep;
+
+
+	memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+	gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
+	gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+	gsi_channel_props.evt_ring_hdl = ~0;
+	gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size;
+	gsi_channel_props.ring_base_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+		&dma_addr, 0);
+	gsi_channel_props.ring_base_addr = dma_addr;
+	gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+	gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	gsi_channel_props.low_weight = 1;
+	gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
+	gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
+
+	/* first allocate channels up to channel 20 */
+	for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+		gsi_channel_props.ch_id = i;
+		result = gsi_alloc_channel(&gsi_channel_props,
+			ipa3_ctx->gsi_dev_hdl,
+			&chan_hdl[i]);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("failed to alloc channel %d err %d\n",
+				i, result);
+			return result;
+		}
+	}
+
+	/* allocate channel 20 */
+	gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN;
+	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+		&chan_hdl_to_keep);
+	if (result != GSI_STATUS_SUCCESS) {
+		IPAERR("failed to alloc channel %d err %d\n",
+			i, result);
+		return result;
+	}
+
+	/* release all other channels */
+	for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+		result = gsi_dealloc_channel(chan_hdl[i]);
+		if (result != GSI_STATUS_SUCCESS) {
+			IPAERR("failed to dealloc channel %d err %d\n",
+				i, result);
+			return result;
+		}
+	}
+
+	/* DMA memory shall not be freed as it is used by channel 20 */
+	return 0;
+}
+
+/**
+ * ipa_adjust_ra_buff_base_sz()
+ *
+ * Return value: the largest power of two which is smaller
+ * than the input value
+ */
+static u32 ipa_adjust_ra_buff_base_sz(u32 aggr_byte_limit)
+{
+	aggr_byte_limit += IPA_MTU;
+	aggr_byte_limit += IPA_GENERIC_RX_BUFF_LIMIT;
+	aggr_byte_limit--;
+	aggr_byte_limit |= aggr_byte_limit >> 1;
+	aggr_byte_limit |= aggr_byte_limit >> 2;
+	aggr_byte_limit |= aggr_byte_limit >> 4;
+	aggr_byte_limit |= aggr_byte_limit >> 8;
+	aggr_byte_limit |= aggr_byte_limit >> 16;
+	aggr_byte_limit++;
+	return aggr_byte_limit >> 1;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
new file mode 100644
index 0000000..340bacab0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -0,0 +1,1592 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_FLT_TABLE_INDEX_NOT_FOUND		(-1)
+#define IPA_FLT_STATUS_OF_ADD_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_DEL_FAILED		(-1)
+#define IPA_FLT_STATUS_OF_MDFY_FAILED		(-1)
+
+#define IPA_FLT_GET_RULE_TYPE(__entry) \
+	( \
+	((__entry)->rule.hashable) ? \
+	(IPA_RULE_HASHABLE):(IPA_RULE_NON_HASHABLE) \
+	)
+
+/**
+ * ipa3_generate_flt_hw_rule() - generates the filtering hardware rule
+ * @ip: the ip address family type
+ * @entry: filtering entry
+ * @buf: output buffer, buf == NULL means
+ *		caller wants to know the size of the rule as seen
+ *		by HW so they did not pass a valid buffer, we will use a
+ *		scratch buffer instead.
+ *		With this scheme we are going to
+ *		generate the rule twice, once to know size using scratch
+ *		buffer and second to write the rule to the actual caller
+ *		supplied buffer which is of required size
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa3_generate_flt_hw_rule(enum ipa_ip_type ip,
+		struct ipa3_flt_entry *entry, u8 *buf)
+{
+	struct ipahal_flt_rule_gen_params gen_params;
+	int res = 0;
+
+	memset(&gen_params, 0, sizeof(gen_params));
+
+	gen_params.ipt = ip;
+	if (entry->rt_tbl)
+		gen_params.rt_tbl_idx = entry->rt_tbl->idx;
+	else
+		gen_params.rt_tbl_idx = entry->rule.rt_tbl_idx;
+
+	gen_params.priority = entry->prio;
+	gen_params.id = entry->rule_id;
+	gen_params.rule = (const struct ipa_flt_rule *)&entry->rule;
+
+	res = ipahal_flt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+	if (res)
+		IPAERR("failed to generate flt h/w rule\n");
+
+	return 0;
+}
+
+static void __ipa_reap_sys_flt_tbls(enum ipa_ip_type ip, enum ipa_rule_type rlt)
+{
+	struct ipa3_flt_tbl *tbl;
+	int i;
+
+	IPADBG_LOW("reaping sys flt tbls ip=%d rlt=%d\n", ip, rlt);
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (tbl->prev_mem[rlt].phys_base) {
+			IPADBG_LOW("reaping flt tbl (prev) pipe=%d\n", i);
+			ipahal_free_dma_mem(&tbl->prev_mem[rlt]);
+		}
+
+		if (list_empty(&tbl->head_flt_rule_list)) {
+			if (tbl->curr_mem[rlt].phys_base) {
+				IPADBG_LOW("reaping flt tbl (curr) pipe=%d\n",
+					i);
+				ipahal_free_dma_mem(&tbl->curr_mem[rlt]);
+			}
+		}
+	}
+}
+
+/**
+ * ipa_prep_flt_tbl_for_cmt() - preparing the flt table for commit
+ *  assign priorities to the rules, calculate their sizes and calculate
+ *  the overall table size
+ * @ip: the ip address family type
+ * @tbl: the flt tbl to be prepared
+ * @pipe_idx: the ep pipe appropriate for the given tbl
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_prep_flt_tbl_for_cmt(enum ipa_ip_type ip,
+	struct ipa3_flt_tbl *tbl, int pipe_idx)
+{
+	struct ipa3_flt_entry *entry;
+	int prio_i;
+	int max_prio;
+	u32 hdr_width;
+
+	tbl->sz[IPA_RULE_HASHABLE] = 0;
+	tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
+
+	max_prio = ipahal_get_rule_max_priority();
+
+	prio_i = max_prio;
+	list_for_each_entry(entry, &tbl->head_flt_rule_list, link) {
+
+		if (entry->rule.max_prio) {
+			entry->prio = max_prio;
+		} else {
+			if (ipahal_rule_decrease_priority(&prio_i)) {
+				IPAERR("cannot decrease rule priority - %d\n",
+					prio_i);
+				return -EPERM;
+			}
+			entry->prio = prio_i;
+		}
+
+		if (ipa3_generate_flt_hw_rule(ip, entry, NULL)) {
+			IPAERR("failed to calculate HW FLT rule size\n");
+			return -EPERM;
+		}
+		IPADBG_LOW("pipe %d rule_id(handle) %u hw_len %d priority %u\n",
+			pipe_idx, entry->rule_id, entry->hw_len, entry->prio);
+
+		if (entry->rule.hashable)
+			tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
+		else
+			tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
+	}
+
+	if ((tbl->sz[IPA_RULE_HASHABLE] +
+		tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
+		IPADBG_LOW("flt tbl pipe %d is with zero total size\n",
+			pipe_idx);
+		return 0;
+	}
+
+	hdr_width = ipahal_get_hw_tbl_hdr_width();
+
+	/* for the header word */
+	if (tbl->sz[IPA_RULE_HASHABLE])
+		tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
+	if (tbl->sz[IPA_RULE_NON_HASHABLE])
+		tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
+
+	IPADBG_LOW("FLT tbl pipe idx %d hash sz %u non-hash sz %u\n", pipe_idx,
+		tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
+
+	return 0;
+}
+
+/**
+ * ipa_translate_flt_tbl_to_hw_fmt() - translate the flt driver structures
+ *  (rules and tables) to HW format and fill it in the given buffers
+ * @ip: the ip address family type
+ * @rlt: the type of the rules to translate (hashable or non-hashable)
+ * @base: the rules body buffer to be filled
+ * @hdr: the rules header (addresses/offsets) buffer to be filled
+ * @body_ofst: the offset of the rules body from the rules header at
+ *  ipa sram
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_translate_flt_tbl_to_hw_fmt(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt, u8 *base, u8 *hdr, u32 body_ofst)
+{
+	u64 offset;
+	u8 *body_i;
+	int res;
+	struct ipa3_flt_entry *entry;
+	u8 *tbl_mem_buf;
+	struct ipa_mem_buffer tbl_mem;
+	struct ipa3_flt_tbl *tbl;
+	int i;
+	int hdr_idx = 0;
+
+	body_i = base;
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (tbl->sz[rlt] == 0) {
+			hdr_idx++;
+			continue;
+		}
+		if (tbl->in_sys[rlt]) {
+			/* only body (no header) */
+			tbl_mem.size = tbl->sz[rlt] -
+				ipahal_get_hw_tbl_hdr_width();
+			if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+				IPAERR("fail to alloc sys tbl of size %d\n",
+					tbl_mem.size);
+				goto err;
+			}
+
+			if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+				hdr, hdr_idx, true)) {
+				IPAERR("fail to wrt sys tbl addr to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			tbl_mem_buf = tbl_mem.base;
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+				link) {
+				if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa3_generate_flt_hw_rule(
+					ip, entry, tbl_mem_buf);
+				if (res) {
+					IPAERR("failed to gen HW FLT rule\n");
+					goto hdr_update_fail;
+				}
+				tbl_mem_buf += entry->hw_len;
+			}
+
+			if (tbl->curr_mem[rlt].phys_base) {
+				WARN_ON(tbl->prev_mem[rlt].phys_base);
+				tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
+			}
+			tbl->curr_mem[rlt] = tbl_mem;
+		} else {
+			offset = body_i - base + body_ofst;
+
+			/* update the hdr at the right index */
+			if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+				hdr_idx, true)) {
+				IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_flt_rule_list,
+				link) {
+				if (IPA_FLT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa3_generate_flt_hw_rule(
+					ip, entry, body_i);
+				if (res) {
+					IPAERR("failed to gen HW FLT rule\n");
+					goto err;
+				}
+				body_i += entry->hw_len;
+			}
+
+			/**
+			 * advance body_i to next table alignment as local
+			 * tables are order back-to-back
+			 */
+			body_i += ipahal_get_lcl_tbl_addr_alignment();
+			body_i = (u8 *)((long)body_i &
+				~ipahal_get_lcl_tbl_addr_alignment());
+		}
+		hdr_idx++;
+	}
+
+	return 0;
+
+hdr_update_fail:
+	ipahal_free_dma_mem(&tbl_mem);
+err:
+	return -EPERM;
+}
+
+/**
+ * ipa_generate_flt_hw_tbl_img() - generates the flt hw tbls.
+ *  headers and bodies are being created into buffers that will be filled into
+ *  the local memory (sram)
+ * @ip: the ip address family type
+ * @alloc_params: In and Out parameters for the allocations of the buffers
+ *  4 buffers: hdr and bdy, each hashable and non-hashable
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_generate_flt_hw_tbl_img(enum ipa_ip_type ip,
+	struct ipahal_fltrt_alloc_imgs_params *alloc_params)
+{
+	u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
+	int rc = 0;
+
+	if (ip == IPA_IP_v4) {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_nhash_ofst) -
+			IPA_MEM_PART(v4_flt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_flt_hash_ofst) -
+			IPA_MEM_PART(v4_flt_hash_ofst);
+	} else {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_nhash_ofst) -
+			IPA_MEM_PART(v6_flt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_flt_hash_ofst) -
+			IPA_MEM_PART(v6_flt_hash_ofst);
+	}
+
+	if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+		IPAERR("fail to allocate FLT HW TBL images. IP %d\n", ip);
+		rc = -ENOMEM;
+		goto allocate_failed;
+	}
+
+	if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
+		alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
+		hash_bdy_start_ofst)) {
+		IPAERR("fail to translate hashable flt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+	if (ipa_translate_flt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
+		alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
+		nhash_bdy_start_ofst)) {
+		IPAERR("fail to translate non-hash flt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+
+	return rc;
+
+translate_fail:
+	if (alloc_params->hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params->hash_hdr);
+	ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+	if (alloc_params->hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->hash_bdy);
+	if (alloc_params->nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_failed:
+	return rc;
+}
+
+/**
+ * ipa_flt_valid_lcl_tbl_size() - validate if the space allocated for flt
+ * tbl bodies at the sram is enough for the commit
+ * @ipt: the ip address family type
+ * @rlt: the rule type (hashable or non-hashable)
+ *
+ * Return: true if enough space available or false in other cases
+ */
+static bool ipa_flt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
+	enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
+{
+	u16 avail;
+
+	if (!bdy) {
+		IPAERR("Bad parameters, bdy = NULL\n");
+		return false;
+	}
+
+	if (ipt == IPA_IP_v4)
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v4_flt_hash_size) :
+			IPA_MEM_PART(apps_v4_flt_nhash_size);
+	else
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v6_flt_hash_size) :
+			IPA_MEM_PART(apps_v6_flt_nhash_size);
+
+	if (bdy->size <= avail)
+		return true;
+
+	IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
+	       bdy->size, avail, ipt, rlt);
+	return false;
+}
+
+/**
+ * ipa_flt_alloc_cmd_buffers() - alloc descriptors and imm cmds
+ *  payload pointers buffers for headers and bodies of flt structure
+ *  as well as place for flush imm.
+ * @ipt: the ip address family type
+ * @desc: [OUT] descriptor buffer
+ * @cmd: [OUT] imm commands payload pointers buffer
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_flt_alloc_cmd_buffers(enum ipa_ip_type ip,
+	struct ipa3_desc **desc, struct ipahal_imm_cmd_pyld ***cmd_pyld)
+{
+	u16 entries;
+
+	/* +3: 2 for bodies (hashable and non-hashable) and 1 for flushing */
+	entries = (ipa3_ctx->ep_flt_num) * 2 + 3;
+
+	*desc = kcalloc(entries, sizeof(**desc), GFP_ATOMIC);
+	if (*desc == NULL) {
+		IPAERR("fail to alloc desc blob ip %d\n", ip);
+		goto fail_desc_alloc;
+	}
+
+	*cmd_pyld = kcalloc(entries, sizeof(**cmd_pyld), GFP_ATOMIC);
+	if (*cmd_pyld == NULL) {
+		IPAERR("fail to alloc cmd pyld blob ip %d\n", ip);
+		goto fail_cmd_alloc;
+	}
+
+	return 0;
+
+fail_cmd_alloc:
+	kfree(*desc);
+fail_desc_alloc:
+	return -ENOMEM;
+}
+
+/**
+ * ipa_flt_skip_pipe_config() - skip ep flt configuration or not?
+ *  will skip according to pre-configuration or modem pipes
+ * @pipe: the EP pipe index
+ *
+ * Return: true if to skip, false otherwize
+ */
+static bool ipa_flt_skip_pipe_config(int pipe)
+{
+	if (ipa_is_modem_pipe(pipe)) {
+		IPADBG_LOW("skip %d - modem owned pipe\n", pipe);
+		return true;
+	}
+
+	if (ipa3_ctx->skip_ep_cfg_shadow[pipe]) {
+		IPADBG_LOW("skip %d\n", pipe);
+		return true;
+	}
+
+	if ((ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD) == pipe
+		&& ipa3_ctx->modem_cfg_emb_pipe_flt)) {
+		IPADBG_LOW("skip %d\n", pipe);
+		return true;
+	}
+
+	return false;
+}
+
+/**
+ * __ipa_commit_flt_v3() - commit flt tables to the hw
+ *  commit the headers and the bodies if are local with internal cache flushing.
+ *  The headers (and local bodies) will first be created into dma buffers and
+ *  then written via IC to the SRAM
+ * @ipt: the ip address family type
+ *
+ * Return: 0 on success, negative on failure
+ */
+int __ipa_commit_flt_v3(enum ipa_ip_type ip)
+{
+	struct ipahal_fltrt_alloc_imgs_params alloc_params;
+	int rc = 0;
+	struct ipa3_desc *desc;
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	struct ipahal_imm_cmd_dma_shared_mem mem_cmd = {0};
+	struct ipahal_imm_cmd_pyld **cmd_pyld;
+	int num_cmd = 0;
+	int i;
+	int hdr_idx;
+	u32 lcl_hash_hdr, lcl_nhash_hdr;
+	u32 lcl_hash_bdy, lcl_nhash_bdy;
+	bool lcl_hash, lcl_nhash;
+	struct ipahal_reg_fltrt_hash_flush flush;
+	struct ipahal_reg_valmask valmask;
+	u32 tbl_hdr_width;
+	struct ipa3_flt_tbl *tbl;
+
+	tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
+	memset(&alloc_params, 0, sizeof(alloc_params));
+	alloc_params.ipt = ip;
+	alloc_params.tbls_num = ipa3_ctx->ep_flt_num;
+
+	if (ip == IPA_IP_v4) {
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_flt_hash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_flt_nhash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_flt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_flt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip4_flt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip4_flt_tbl_nhash_lcl;
+	} else {
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_flt_hash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_flt_nhash_ofst) +
+			tbl_hdr_width; /* to skip the bitmap */
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_flt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_flt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip6_flt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip6_flt_tbl_nhash_lcl;
+	}
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		if (ipa_prep_flt_tbl_for_cmt(ip, tbl, i)) {
+			rc = -EPERM;
+			goto prep_failed;
+		}
+		if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+			tbl->sz[IPA_RULE_HASHABLE]) {
+			alloc_params.num_lcl_hash_tbls++;
+			alloc_params.total_sz_lcl_hash_tbls +=
+				tbl->sz[IPA_RULE_HASHABLE];
+			alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+
+		}
+		if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+			tbl->sz[IPA_RULE_NON_HASHABLE]) {
+			alloc_params.num_lcl_nhash_tbls++;
+			alloc_params.total_sz_lcl_nhash_tbls +=
+				tbl->sz[IPA_RULE_NON_HASHABLE];
+			alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+		}
+	}
+
+	if (ipa_generate_flt_hw_tbl_img(ip, &alloc_params)) {
+		IPAERR("fail to generate FLT HW TBL image. IP %d\n", ip);
+		rc = -EFAULT;
+		goto prep_failed;
+	}
+
+	if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+		&alloc_params.hash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+	if (!ipa_flt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
+		&alloc_params.nhash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+
+	if (ipa_flt_alloc_cmd_buffers(ip, &desc, &cmd_pyld)) {
+		rc = -ENOMEM;
+		goto fail_size_valid;
+	}
+
+	/* flushing ipa internal hashable flt rules cache */
+	memset(&flush, 0, sizeof(flush));
+	if (ip == IPA_IP_v4)
+		flush.v4_flt = true;
+	else
+		flush.v6_flt = true;
+	ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	cmd_pyld[0] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
+	if (!cmd_pyld[0]) {
+		IPAERR("fail construct register_write imm cmd: IP %d\n", ip);
+		rc = -EFAULT;
+		goto fail_reg_write_construct;
+	}
+	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].pyld = cmd_pyld[0]->data;
+	desc[0].len = cmd_pyld[0]->len;
+	desc[0].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	hdr_idx = 0;
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i)) {
+			IPADBG_LOW("skip %d - not filtering pipe\n", i);
+			continue;
+		}
+
+		if (ipa_flt_skip_pipe_config(i)) {
+			hdr_idx++;
+			continue;
+		}
+
+		IPADBG_LOW("Prepare imm cmd for hdr at index %d for pipe %d\n",
+			hdr_idx, i);
+
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = tbl_hdr_width;
+		mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base +
+			hdr_idx * tbl_hdr_width;
+		mem_cmd.local_addr = lcl_nhash_hdr +
+			hdr_idx * tbl_hdr_width;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = tbl_hdr_width;
+		mem_cmd.system_addr = alloc_params.hash_hdr.phys_base +
+			hdr_idx * tbl_hdr_width;
+		mem_cmd.local_addr = lcl_hash_hdr +
+			hdr_idx * tbl_hdr_width;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+
+		hdr_idx++;
+	}
+
+	if (lcl_nhash) {
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.nhash_bdy.size;
+		mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_nhash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+	}
+	if (lcl_hash) {
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.hash_bdy.size;
+		mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_hash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd: IP = %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd++].type = IPA_IMM_CMD_DESC;
+	}
+
+	if (ipa3_send_cmd(num_cmd, desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+		goto fail_imm_cmd_construct;
+	}
+
+	IPADBG_LOW("Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+		alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
+
+	IPADBG_LOW("Non-Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+		alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
+
+	if (alloc_params.hash_bdy.size) {
+		IPADBG_LOW("Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+			alloc_params.hash_bdy.phys_base,
+			alloc_params.hash_bdy.size);
+	}
+
+	if (alloc_params.nhash_bdy.size) {
+		IPADBG_LOW("Non-Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+			alloc_params.nhash_bdy.phys_base,
+			alloc_params.nhash_bdy.size);
+	}
+
+	__ipa_reap_sys_flt_tbls(ip, IPA_RULE_HASHABLE);
+	__ipa_reap_sys_flt_tbls(ip, IPA_RULE_NON_HASHABLE);
+
+fail_imm_cmd_construct:
+	for (i = 0 ; i < num_cmd ; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+fail_reg_write_construct:
+	kfree(desc);
+	kfree(cmd_pyld);
+fail_size_valid:
+	if (alloc_params.hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params.hash_hdr);
+	ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+	if (alloc_params.hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.hash_bdy);
+	if (alloc_params.nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+prep_failed:
+	return rc;
+}
+
+static int __ipa_validate_flt_rule(const struct ipa_flt_rule *rule,
+		struct ipa3_rt_tbl **rt_tbl, enum ipa_ip_type ip)
+{
+	if (rule->action != IPA_PASS_TO_EXCEPTION) {
+		if (!rule->eq_attrib_type) {
+			if (!rule->rt_tbl_hdl) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+
+			*rt_tbl = ipa3_id_find(rule->rt_tbl_hdl);
+			if (*rt_tbl == NULL) {
+				IPAERR("RT tbl not found\n");
+				goto error;
+			}
+
+			if ((*rt_tbl)->cookie != IPA_COOKIE) {
+				IPAERR("RT table cookie is invalid\n");
+				goto error;
+			}
+		} else {
+			if (rule->rt_tbl_idx > ((ip == IPA_IP_v4) ?
+				IPA_MEM_PART(v4_modem_rt_index_hi) :
+				IPA_MEM_PART(v6_modem_rt_index_hi))) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+		}
+	}
+
+	if (rule->rule_id) {
+		if (!(rule->rule_id & ipahal_get_rule_id_hi_bit())) {
+			IPAERR("invalid rule_id provided 0x%x\n"
+				"rule_id with bit 0x%x are auto generated\n",
+				rule->rule_id, ipahal_get_rule_id_hi_bit());
+			goto error;
+		}
+	}
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_create_flt_entry(struct ipa3_flt_entry **entry,
+		const struct ipa_flt_rule *rule, struct ipa3_rt_tbl *rt_tbl,
+		struct ipa3_flt_tbl *tbl)
+{
+	int id;
+
+	*entry = kmem_cache_zalloc(ipa3_ctx->flt_rule_cache, GFP_KERNEL);
+	if (!*entry) {
+		IPAERR("failed to alloc FLT rule object\n");
+		goto error;
+	}
+	INIT_LIST_HEAD(&((*entry)->link));
+	(*entry)->rule = *rule;
+	(*entry)->cookie = IPA_COOKIE;
+	(*entry)->rt_tbl = rt_tbl;
+	(*entry)->tbl = tbl;
+	if (rule->rule_id) {
+		id = rule->rule_id;
+	} else {
+		id = ipa3_alloc_rule_id(&tbl->rule_ids);
+		if (id < 0) {
+			IPAERR("failed to allocate rule id\n");
+			WARN_ON(1);
+			goto rule_id_fail;
+		}
+	}
+	(*entry)->rule_id = id;
+
+	return 0;
+
+rule_id_fail:
+	kmem_cache_free(ipa3_ctx->flt_rule_cache, *entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa_finish_flt_rule_add(struct ipa3_flt_tbl *tbl,
+		struct ipa3_flt_entry *entry, u32 *rule_hdl)
+{
+	int id;
+
+	tbl->rule_cnt++;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt++;
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+	}
+	*rule_hdl = id;
+	entry->id = id;
+	IPADBG_LOW("add flt rule rule_cnt=%d\n", tbl->rule_cnt);
+
+	return 0;
+}
+
+static int __ipa_add_flt_rule(struct ipa3_flt_tbl *tbl, enum ipa_ip_type ip,
+			      const struct ipa_flt_rule *rule, u8 add_rear,
+			      u32 *rule_hdl)
+{
+	struct ipa3_flt_entry *entry;
+	struct ipa3_rt_tbl *rt_tbl = NULL;
+
+	if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
+		goto error;
+
+	if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
+		goto error;
+
+	if (add_rear) {
+		if (tbl->sticky_rear)
+			list_add_tail(&entry->link,
+					tbl->head_flt_rule_list.prev);
+		else
+			list_add_tail(&entry->link, &tbl->head_flt_rule_list);
+	} else {
+		list_add(&entry->link, &tbl->head_flt_rule_list);
+	}
+
+	__ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_flt_rule_after(struct ipa3_flt_tbl *tbl,
+				const struct ipa_flt_rule *rule,
+				u32 *rule_hdl,
+				enum ipa_ip_type ip,
+				struct ipa3_flt_entry **add_after_entry)
+{
+	struct ipa3_flt_entry *entry;
+	struct ipa3_rt_tbl *rt_tbl = NULL;
+
+	if (!*add_after_entry)
+		goto error;
+
+	if (rule == NULL || rule_hdl == NULL) {
+		IPAERR("bad parms rule=%p rule_hdl=%p\n", rule,
+				rule_hdl);
+		goto error;
+	}
+
+	if (__ipa_validate_flt_rule(rule, &rt_tbl, ip))
+		goto error;
+
+	if (__ipa_create_flt_entry(&entry, rule, rt_tbl, tbl))
+		goto error;
+
+	list_add(&entry->link, &((*add_after_entry)->link));
+
+	__ipa_finish_flt_rule_add(tbl, entry, rule_hdl);
+
+	/*
+	 * prepare for next insertion
+	 */
+	*add_after_entry = entry;
+
+	return 0;
+
+error:
+	*add_after_entry = NULL;
+	return -EPERM;
+}
+
+static int __ipa_del_flt_rule(u32 rule_hdl)
+{
+	struct ipa3_flt_entry *entry;
+	int id;
+
+	entry = ipa3_id_find(rule_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+	id = entry->id;
+
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt--;
+	IPADBG("del flt rule rule_cnt=%d rule_id=%d\n",
+		entry->tbl->rule_cnt, entry->rule_id);
+	entry->cookie = 0;
+	/* if rule id was allocated from idr, remove it */
+	if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+		idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+
+	kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(id);
+
+	return 0;
+}
+
+static int __ipa_mdfy_flt_rule(struct ipa_flt_rule_mdfy *frule,
+		enum ipa_ip_type ip)
+{
+	struct ipa3_flt_entry *entry;
+	struct ipa3_rt_tbl *rt_tbl = NULL;
+
+	entry = ipa3_id_find(frule->rule_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		goto error;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		goto error;
+	}
+
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt--;
+
+	if (frule->rule.action != IPA_PASS_TO_EXCEPTION) {
+		if (!frule->rule.eq_attrib_type) {
+			if (!frule->rule.rt_tbl_hdl) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+
+			rt_tbl = ipa3_id_find(frule->rule.rt_tbl_hdl);
+			if (rt_tbl == NULL) {
+				IPAERR("RT tbl not found\n");
+				goto error;
+			}
+
+			if (rt_tbl->cookie != IPA_COOKIE) {
+				IPAERR("RT table cookie is invalid\n");
+				goto error;
+			}
+		} else {
+			if (frule->rule.rt_tbl_idx > ((ip == IPA_IP_v4) ?
+				IPA_MEM_PART(v4_modem_rt_index_hi) :
+				IPA_MEM_PART(v6_modem_rt_index_hi))) {
+				IPAERR("invalid RT tbl\n");
+				goto error;
+			}
+		}
+	}
+
+	entry->rule = frule->rule;
+	entry->rt_tbl = rt_tbl;
+	if (entry->rt_tbl)
+		entry->rt_tbl->ref_cnt++;
+	entry->hw_len = 0;
+	entry->prio = 0;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
+{
+	*ipa_ep_idx = ipa3_get_ep_mapping(ep);
+	if (*ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
+		IPAERR("ep not valid ep=%d\n", ep);
+		return -EINVAL;
+	}
+	if (ipa3_ctx->ep[*ipa_ep_idx].valid == 0)
+		IPADBG("ep not connected ep_idx=%d\n", *ipa_ep_idx);
+
+	if (!ipa_is_ep_support_flt(*ipa_ep_idx)) {
+		IPAERR("ep do not support filtering ep=%d\n", ep);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int __ipa_add_ep_flt_rule(enum ipa_ip_type ip, enum ipa_client_type ep,
+				 const struct ipa_flt_rule *rule, u8 add_rear,
+				 u32 *rule_hdl)
+{
+	struct ipa3_flt_tbl *tbl;
+	int ipa_ep_idx;
+
+	if (rule == NULL || rule_hdl == NULL || ep >= IPA_CLIENT_MAX) {
+		IPAERR("bad parms rule=%p rule_hdl=%p ep=%d\n", rule,
+				rule_hdl, ep);
+
+		return -EINVAL;
+	}
+
+	if (__ipa_add_flt_get_ep_idx(ep, &ipa_ep_idx))
+		return -EINVAL;
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][ip];
+	IPADBG_LOW("add ep flt rule ip=%d ep=%d\n", ip, ep);
+
+	return __ipa_add_flt_rule(tbl, ip, rule, add_rear, rule_hdl);
+}
+
+/**
+ * ipa3_add_flt_rule() - Add the specified filtering rules to SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	int i;
+	int result;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (!rules->global)
+			result = __ipa_add_ep_flt_rule(rules->ip, rules->ep,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].flt_rule_hdl);
+		else
+			result = -1;
+
+		if (result) {
+			IPAERR("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->global) {
+		IPAERR("no support for global filter rules\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_add_flt_rule_after() - Add the specified filtering rules to SW after
+ *  the rule which its handle is given and optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
+{
+	int i;
+	int result;
+	struct ipa3_flt_tbl *tbl;
+	int ipa_ep_idx;
+	struct ipa3_flt_entry *entry;
+
+	if (rules == NULL || rules->num_rules == 0 ||
+			rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (rules->ep >= IPA_CLIENT_MAX) {
+		IPAERR("bad parms ep=%d\n", rules->ep);
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (__ipa_add_flt_get_ep_idx(rules->ep, &ipa_ep_idx)) {
+		result = -EINVAL;
+		goto bail;
+	}
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][rules->ip];
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR("given entry does not match the table\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (tbl->sticky_rear)
+		if (&entry->link == tbl->head_flt_rule_list.prev) {
+			IPAERR("cannot add rule at end of a sticky table");
+			result = -EINVAL;
+			goto bail;
+		}
+
+	IPADBG("add ep flt rule ip=%d ep=%d after hdl %d\n",
+			rules->ip, rules->ep, rules->add_after_hdl);
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_flt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		result = __ipa_add_flt_rule_after(tbl,
+				&rules->rules[i].rule,
+				&rules->rules[i].flt_rule_hdl,
+				rules->ip,
+				&entry);
+
+		if (result) {
+			IPAERR("failed to add flt rule %d\n", i);
+			rules->rules[i].status = IPA_FLT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(rules->ip)) {
+			IPAERR("failed to commit flt rules\n");
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_del_flt_rule() - Remove the specified filtering rules from SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa_del_flt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del flt rule %i\n", i);
+			hdls->hdl[i].status = IPA_FLT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_mdfy_flt_rule() - Modify the specified filtering rules in SW and
+ * optionally commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		if (__ipa_mdfy_flt_rule(&hdls->rules[i], hdls->ip)) {
+			IPAERR("failed to mdfy flt rule %i\n", i);
+			hdls->rules[i].status = IPA_FLT_STATUS_OF_MDFY_FAILED;
+		} else {
+			hdls->rules[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_flt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+
+/**
+ * ipa3_commit_flt() - Commit the current SW filtering table of specified type
+ * to IPA HW
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_commit_flt(enum ipa_ip_type ip)
+{
+	int result;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	if (ipa3_ctx->ctrl->ipa3_commit_flt(ip)) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_reset_flt() - Reset the current SW filtering table of specified type
+ * (does not commit to HW)
+ * @ip:	[in] the family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_reset_flt(enum ipa_ip_type ip)
+{
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_flt_entry *entry;
+	struct ipa3_flt_entry *next;
+	int i;
+	int id;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if (!ipa_is_ep_support_flt(i))
+			continue;
+
+		tbl = &ipa3_ctx->flt_tbl[i][ip];
+		list_for_each_entry_safe(entry, next, &tbl->head_flt_rule_list,
+				link) {
+			if (ipa3_id_find(entry->id) == NULL) {
+				WARN_ON(1);
+				mutex_unlock(&ipa3_ctx->lock);
+				return -EFAULT;
+			}
+			list_del(&entry->link);
+			entry->tbl->rule_cnt--;
+			if (entry->rt_tbl)
+				entry->rt_tbl->ref_cnt--;
+			/* if rule id was allocated from idr, remove it */
+			if (!(entry->rule_id & ipahal_get_rule_id_hi_bit()))
+				idr_remove(&entry->tbl->rule_ids,
+					entry->rule_id);
+			entry->cookie = 0;
+			id = entry->id;
+			kmem_cache_free(ipa3_ctx->flt_rule_cache, entry);
+
+			/* remove the handle from the database */
+			ipa3_id_remove(id);
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx)
+{
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+	struct ipa_flt_rule rule;
+
+	if (!ipa_is_ep_support_flt(ipa_ep_idx)) {
+		IPADBG("cannot add flt rules to non filtering pipe num %d\n",
+			ipa_ep_idx);
+		return;
+	}
+
+	memset(&rule, 0, sizeof(rule));
+
+	mutex_lock(&ipa3_ctx->lock);
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v4];
+	rule.action = IPA_PASS_TO_EXCEPTION;
+	__ipa_add_flt_rule(tbl, IPA_IP_v4, &rule, true,
+			&ep->dflt_flt4_rule_hdl);
+	ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+	tbl->sticky_rear = true;
+
+	tbl = &ipa3_ctx->flt_tbl[ipa_ep_idx][IPA_IP_v6];
+	rule.action = IPA_PASS_TO_EXCEPTION;
+	__ipa_add_flt_rule(tbl, IPA_IP_v6, &rule, true,
+			&ep->dflt_flt6_rule_hdl);
+	ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+	tbl->sticky_rear = true;
+	mutex_unlock(&ipa3_ctx->lock);
+}
+
+void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx)
+{
+	struct ipa3_ep_context *ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ep->dflt_flt4_rule_hdl) {
+		__ipa_del_flt_rule(ep->dflt_flt4_rule_hdl);
+		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v4);
+		ep->dflt_flt4_rule_hdl = 0;
+	}
+	if (ep->dflt_flt6_rule_hdl) {
+		__ipa_del_flt_rule(ep->dflt_flt6_rule_hdl);
+		ipa3_ctx->ctrl->ipa3_commit_flt(IPA_IP_v6);
+		ep->dflt_flt6_rule_hdl = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+}
+
+/**
+ * ipa3_set_flt_tuple_mask() - Sets the flt tuple masking for the given pipe
+ *  Pipe must be for AP EP (not modem) and support filtering
+ *  updates the the filtering masking values without changing the rt ones.
+ *
+ * @pipe_idx: filter pipe index to configure the tuple masking
+ * @tuple: the tuple members masking
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple)
+{
+	struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
+
+	if (!tuple) {
+		IPAERR("bad tuple\n");
+		return -EINVAL;
+	}
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("bad pipe index!\n");
+		return -EINVAL;
+	}
+
+	if (!ipa_is_ep_support_flt(pipe_idx)) {
+		IPAERR("pipe %d not filtering pipe\n", pipe_idx);
+		return -EINVAL;
+	}
+
+	if (ipa_is_modem_pipe(pipe_idx)) {
+		IPAERR("modem pipe tuple is not configured by AP\n");
+		return -EINVAL;
+	}
+
+	ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		pipe_idx, &fltrt_tuple);
+	fltrt_tuple.flt = *tuple;
+	ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		pipe_idx, &fltrt_tuple);
+
+	return 0;
+}
+
+/**
+ * ipa3_flt_read_tbl_from_hw() -Read filtering table from IPA HW
+ * @pipe_idx: IPA endpoint index
+ * @ip_type: IPv4 or IPv6 table
+ * @hashable: hashable or non-hashable table
+ * @entry: array to fill the table entries
+ * @num_entry: number of entries in entry array. set by the caller to indicate
+ *  entry array size. Then set by this function as an output parameter to
+ *  indicate the number of entries in the array
+ *
+ * This function reads the filtering table from IPA SRAM and prepares an array
+ * of entries. This function is mainly used for debugging purposes.
+ *
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_flt_read_tbl_from_hw(u32 pipe_idx, enum ipa_ip_type ip_type,
+	bool hashable, struct ipahal_flt_rule_entry entry[], int *num_entry)
+{
+	void *ipa_sram_mmio;
+	u64 hdr_base_ofst;
+	int tbl_entry_idx;
+	int i;
+	int res = 0;
+	u64 tbl_addr;
+	bool is_sys;
+	u8 *rule_addr;
+	struct ipa_mem_buffer *sys_tbl_mem;
+	int rule_idx;
+
+	IPADBG("pipe_idx=%d ip=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+		pipe_idx, ip_type, hashable, entry, num_entry);
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || ip_type >= IPA_IP_MAX ||
+	    !entry || !num_entry) {
+		IPAERR("Invalid params\n");
+		return -EFAULT;
+	}
+
+	if (!ipa_is_ep_support_flt(pipe_idx)) {
+		IPAERR("pipe %d does not support filtering\n", pipe_idx);
+		return -EINVAL;
+	}
+
+	/* map IPA SRAM */
+	ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+			ipa3_ctx->smem_restricted_bytes / 4),
+		ipa3_ctx->smem_sz);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	memset(entry, 0, sizeof(*entry) * (*num_entry));
+	if (hashable) {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_flt_hash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_flt_hash_ofst);
+	} else {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_flt_nhash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_flt_nhash_ofst);
+	}
+
+	/* calculate the index of the tbl entry */
+	tbl_entry_idx = 1; /* skip the bitmap */
+	for (i = 0; i < pipe_idx; i++)
+		if (ipa3_ctx->ep_flt_bitmap & (1 << i))
+			tbl_entry_idx++;
+
+	IPADBG("hdr_base_ofst=0x%llx tbl_entry_idx=%d\n",
+		hdr_base_ofst, tbl_entry_idx);
+
+	res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+		tbl_entry_idx, &tbl_addr, &is_sys);
+	if (res) {
+		IPAERR("failed to read table address from header structure\n");
+		goto bail;
+	}
+	IPADBG("flt tbl ep=%d: tbl_addr=0x%llx is_sys=%d\n",
+		pipe_idx, tbl_addr, is_sys);
+	if (!tbl_addr) {
+		IPAERR("invalid flt tbl addr\n");
+		res = -EFAULT;
+		goto bail;
+	}
+
+	/* for tables resides in DDR access it from the virtual memory */
+	if (is_sys) {
+		sys_tbl_mem = &ipa3_ctx->flt_tbl[pipe_idx][ip_type].
+			curr_mem[hashable ? IPA_RULE_HASHABLE :
+				IPA_RULE_NON_HASHABLE];
+		if (sys_tbl_mem->phys_base &&
+			sys_tbl_mem->phys_base != tbl_addr) {
+			IPAERR("mismatch addr: parsed=%llx sw=%pad\n",
+				tbl_addr, &sys_tbl_mem->phys_base);
+		}
+		if (sys_tbl_mem->phys_base)
+			rule_addr = sys_tbl_mem->base;
+		else
+			rule_addr = NULL;
+	} else {
+		rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
+	}
+
+	IPADBG("First rule addr 0x%p\n", rule_addr);
+
+	if (!rule_addr) {
+		/* Modem table in system memory or empty table */
+		*num_entry = 0;
+		goto bail;
+	}
+
+	rule_idx = 0;
+	while (rule_idx < *num_entry) {
+		res = ipahal_flt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+		if (res) {
+			IPAERR("failed parsing flt rule\n");
+			goto bail;
+		}
+
+		IPADBG("rule_size=%d\n", entry[rule_idx].rule_size);
+		if (!entry[rule_idx].rule_size)
+			break;
+
+		rule_addr += entry[rule_idx].rule_size;
+		rule_idx++;
+	}
+	*num_entry = rule_idx;
+bail:
+	iounmap(ipa_sram_mmio);
+	return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
new file mode 100644
index 0000000..6274579
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hdr.c
@@ -0,0 +1,1181 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+
+static const u32 ipa_hdr_bin_sz[IPA_HDR_BIN_MAX] = { 8, 16, 24, 36, 60};
+static const u32 ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN_MAX] = { 32, 64};
+
+#define HDR_TYPE_IS_VALID(type) \
+	((type) >= 0 && (type) < IPA_HDR_L2_MAX)
+
+#define HDR_PROC_TYPE_IS_VALID(type) \
+	((type) >= 0 && (type) < IPA_HDR_PROC_MAX)
+
+/**
+ * ipa3_generate_hdr_hw_tbl() - generates the headers table
+ * @mem:	[out] buffer to put the header table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa3_generate_hdr_hw_tbl(struct ipa_mem_buffer *mem)
+{
+	struct ipa3_hdr_entry *entry;
+
+	mem->size = ipa3_ctx->hdr_tbl.end;
+
+	if (mem->size == 0) {
+		IPAERR("hdr tbl empty\n");
+		return -EPERM;
+	}
+	IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_tbl.end);
+
+	mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+			&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	memset(mem->base, 0, mem->size);
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (entry->is_hdr_proc_ctx)
+			continue;
+		IPADBG_LOW("hdr of len %d ofst=%d\n", entry->hdr_len,
+				entry->offset_entry->offset);
+		ipahal_cp_hdr_to_hw_buff(mem->base, entry->offset_entry->offset,
+				entry->hdr, entry->hdr_len);
+	}
+
+	return 0;
+}
+
+static int ipa3_hdr_proc_ctx_to_hw_format(struct ipa_mem_buffer *mem,
+	u32 hdr_base_addr)
+{
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	int ret;
+
+	list_for_each_entry(entry,
+			&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+			link) {
+		IPADBG_LOW("processing type %d ofst=%d\n",
+			entry->type, entry->offset_entry->offset);
+		ret = ipahal_cp_proc_ctx_to_hw_buff(entry->type, mem->base,
+				entry->offset_entry->offset,
+				entry->hdr->hdr_len,
+				entry->hdr->is_hdr_proc_ctx,
+				entry->hdr->phys_base,
+				hdr_base_addr,
+				entry->hdr->offset_entry);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_generate_hdr_proc_ctx_hw_tbl() -
+ * generates the headers processing context table.
+ * @mem:		[out] buffer to put the processing context table
+ * @aligned_mem:	[out] actual processing context table (with alignment).
+ *			Processing context table needs to be 8 Bytes aligned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+static int ipa3_generate_hdr_proc_ctx_hw_tbl(u32 hdr_sys_addr,
+	struct ipa_mem_buffer *mem, struct ipa_mem_buffer *aligned_mem)
+{
+	u32 hdr_base_addr;
+
+	mem->size = (ipa3_ctx->hdr_proc_ctx_tbl.end) ? : 4;
+
+	/* make sure table is aligned */
+	mem->size += IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+
+	IPADBG_LOW("tbl_sz=%d\n", ipa3_ctx->hdr_proc_ctx_tbl.end);
+
+	mem->base = dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+			&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	aligned_mem->phys_base =
+		IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(mem->phys_base);
+	aligned_mem->base = mem->base +
+		(aligned_mem->phys_base - mem->phys_base);
+	aligned_mem->size = mem->size - IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE;
+	memset(aligned_mem->base, 0, aligned_mem->size);
+	hdr_base_addr = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_ofst) :
+		hdr_sys_addr;
+	return ipa3_hdr_proc_ctx_to_hw_format(aligned_mem, hdr_base_addr);
+}
+
+/**
+ * __ipa_commit_hdr_v3_0() - Commits the header table from memory to HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa_commit_hdr_v3_0(void)
+{
+	struct ipa3_desc desc[2];
+	struct ipa_mem_buffer hdr_mem;
+	struct ipa_mem_buffer ctx_mem;
+	struct ipa_mem_buffer aligned_ctx_mem;
+	struct ipahal_imm_cmd_dma_shared_mem dma_cmd_hdr = {0};
+	struct ipahal_imm_cmd_dma_shared_mem dma_cmd_ctx = {0};
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	struct ipahal_imm_cmd_hdr_init_system hdr_init_cmd = {0};
+	struct ipahal_imm_cmd_pyld *hdr_cmd_pyld = NULL;
+	struct ipahal_imm_cmd_pyld *ctx_cmd_pyld = NULL;
+	int rc = -EFAULT;
+	u32 proc_ctx_size;
+	u32 proc_ctx_ofst;
+	u32 proc_ctx_size_ddr;
+
+	memset(desc, 0, 2 * sizeof(struct ipa3_desc));
+
+	if (ipa3_generate_hdr_hw_tbl(&hdr_mem)) {
+		IPAERR("fail to generate HDR HW TBL\n");
+		goto end;
+	}
+
+	if (ipa3_generate_hdr_proc_ctx_hw_tbl(hdr_mem.phys_base, &ctx_mem,
+	    &aligned_ctx_mem)) {
+		IPAERR("fail to generate HDR PROC CTX HW TBL\n");
+		goto end;
+	}
+
+	if (ipa3_ctx->hdr_tbl_lcl) {
+		if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size)) {
+			IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+				IPA_MEM_PART(apps_hdr_size));
+			goto end;
+		} else {
+			dma_cmd_hdr.is_read = false; /* write operation */
+			dma_cmd_hdr.skip_pipeline_clear = false;
+			dma_cmd_hdr.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			dma_cmd_hdr.system_addr = hdr_mem.phys_base;
+			dma_cmd_hdr.size = hdr_mem.size;
+			dma_cmd_hdr.local_addr =
+				ipa3_ctx->smem_restricted_bytes +
+				IPA_MEM_PART(apps_hdr_ofst);
+			hdr_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM,
+				&dma_cmd_hdr, false);
+			if (!hdr_cmd_pyld) {
+				IPAERR("fail construct dma_shared_mem cmd\n");
+				goto end;
+			}
+			desc[0].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_DMA_SHARED_MEM);
+			desc[0].pyld = hdr_cmd_pyld->data;
+			desc[0].len = hdr_cmd_pyld->len;
+		}
+	} else {
+		if (hdr_mem.size > IPA_MEM_PART(apps_hdr_size_ddr)) {
+			IPAERR("tbl too big needed %d avail %d\n", hdr_mem.size,
+				IPA_MEM_PART(apps_hdr_size_ddr));
+			goto end;
+		} else {
+			hdr_init_cmd.hdr_table_addr = hdr_mem.phys_base;
+			hdr_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_HDR_INIT_SYSTEM,
+				&hdr_init_cmd, false);
+			if (!hdr_cmd_pyld) {
+				IPAERR("fail construct hdr_init_system cmd\n");
+				goto end;
+			}
+			desc[0].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_HDR_INIT_SYSTEM);
+			desc[0].pyld = hdr_cmd_pyld->data;
+			desc[0].len = hdr_cmd_pyld->len;
+		}
+	}
+	desc[0].type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(hdr_mem.base, hdr_mem.phys_base, hdr_mem.size);
+
+	proc_ctx_size = IPA_MEM_PART(apps_hdr_proc_ctx_size);
+	proc_ctx_ofst = IPA_MEM_PART(apps_hdr_proc_ctx_ofst);
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+		if (aligned_ctx_mem.size > proc_ctx_size) {
+			IPAERR("tbl too big needed %d avail %d\n",
+				aligned_ctx_mem.size,
+				proc_ctx_size);
+			goto end;
+		} else {
+			dma_cmd_ctx.is_read = false; /* Write operation */
+			dma_cmd_ctx.skip_pipeline_clear = false;
+			dma_cmd_ctx.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+			dma_cmd_ctx.system_addr = aligned_ctx_mem.phys_base;
+			dma_cmd_ctx.size = aligned_ctx_mem.size;
+			dma_cmd_ctx.local_addr =
+				ipa3_ctx->smem_restricted_bytes +
+				proc_ctx_ofst;
+			ctx_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_DMA_SHARED_MEM,
+				&dma_cmd_ctx, false);
+			if (!ctx_cmd_pyld) {
+				IPAERR("fail construct dma_shared_mem cmd\n");
+				goto end;
+			}
+			desc[1].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_DMA_SHARED_MEM);
+			desc[1].pyld = ctx_cmd_pyld->data;
+			desc[1].len = ctx_cmd_pyld->len;
+		}
+	} else {
+		proc_ctx_size_ddr = IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+		if (aligned_ctx_mem.size > proc_ctx_size_ddr) {
+			IPAERR("tbl too big, needed %d avail %d\n",
+				aligned_ctx_mem.size,
+				proc_ctx_size_ddr);
+			goto end;
+		} else {
+			reg_write_cmd.skip_pipeline_clear = false;
+			reg_write_cmd.pipeline_clear_options =
+				IPAHAL_HPS_CLEAR;
+			reg_write_cmd.offset =
+				ipahal_get_reg_ofst(
+				IPA_SYS_PKT_PROC_CNTXT_BASE);
+			reg_write_cmd.value = aligned_ctx_mem.phys_base;
+			reg_write_cmd.value_mask =
+				~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1);
+			ctx_cmd_pyld = ipahal_construct_imm_cmd(
+				IPA_IMM_CMD_REGISTER_WRITE,
+				&reg_write_cmd, false);
+			if (!ctx_cmd_pyld) {
+				IPAERR("fail construct register_write cmd\n");
+				goto end;
+			}
+			desc[1].opcode = ipahal_imm_cmd_get_opcode(
+				IPA_IMM_CMD_REGISTER_WRITE);
+			desc[1].pyld = ctx_cmd_pyld->data;
+			desc[1].len = ctx_cmd_pyld->len;
+		}
+	}
+	desc[1].type = IPA_IMM_CMD_DESC;
+	IPA_DUMP_BUFF(ctx_mem.base, ctx_mem.phys_base, ctx_mem.size);
+
+	if (ipa3_send_cmd(2, desc))
+		IPAERR("fail to send immediate command\n");
+	else
+		rc = 0;
+
+	if (ipa3_ctx->hdr_tbl_lcl) {
+		dma_free_coherent(ipa3_ctx->pdev, hdr_mem.size, hdr_mem.base,
+			hdr_mem.phys_base);
+	} else {
+		if (!rc) {
+			if (ipa3_ctx->hdr_mem.phys_base)
+				dma_free_coherent(ipa3_ctx->pdev,
+				ipa3_ctx->hdr_mem.size,
+				ipa3_ctx->hdr_mem.base,
+				ipa3_ctx->hdr_mem.phys_base);
+			ipa3_ctx->hdr_mem = hdr_mem;
+		}
+	}
+
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+		dma_free_coherent(ipa3_ctx->pdev, ctx_mem.size, ctx_mem.base,
+			ctx_mem.phys_base);
+	} else {
+		if (!rc) {
+			if (ipa3_ctx->hdr_proc_ctx_mem.phys_base)
+				dma_free_coherent(ipa3_ctx->pdev,
+					ipa3_ctx->hdr_proc_ctx_mem.size,
+					ipa3_ctx->hdr_proc_ctx_mem.base,
+					ipa3_ctx->hdr_proc_ctx_mem.phys_base);
+			ipa3_ctx->hdr_proc_ctx_mem = ctx_mem;
+		}
+	}
+
+end:
+	if (ctx_cmd_pyld)
+		ipahal_destroy_imm_cmd(ctx_cmd_pyld);
+
+	if (hdr_cmd_pyld)
+		ipahal_destroy_imm_cmd(hdr_cmd_pyld);
+
+	return rc;
+}
+
+static int __ipa_add_hdr_proc_ctx(struct ipa_hdr_proc_ctx_add *proc_ctx,
+	bool add_ref_hdr)
+{
+	struct ipa3_hdr_entry *hdr_entry;
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	struct ipa3_hdr_proc_ctx_offset_entry *offset;
+	u32 bin;
+	struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+	int id;
+	int needed_len;
+	int mem_size;
+
+	IPADBG_LOW("Add processing type %d hdr_hdl %d\n",
+		proc_ctx->type, proc_ctx->hdr_hdl);
+
+	if (!HDR_PROC_TYPE_IS_VALID(proc_ctx->type)) {
+		IPAERR("invalid processing type %d\n", proc_ctx->type);
+		return -EINVAL;
+	}
+
+	hdr_entry = ipa3_id_find(proc_ctx->hdr_hdl);
+	if (!hdr_entry) {
+		IPAERR("hdr_hdl is invalid\n");
+		return -EINVAL;
+	}
+	if (hdr_entry->cookie != IPA_COOKIE) {
+		IPAERR("Invalid header cookie %u\n", hdr_entry->cookie);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	IPADBG("Associated header is name=%s is_hdr_proc_ctx=%d\n",
+		hdr_entry->name, hdr_entry->is_hdr_proc_ctx);
+
+	entry = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc proc_ctx object\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&entry->link);
+
+	entry->type = proc_ctx->type;
+	entry->hdr = hdr_entry;
+	if (add_ref_hdr)
+		hdr_entry->ref_cnt++;
+	entry->cookie = IPA_COOKIE;
+
+	needed_len = ipahal_get_proc_ctx_needed_len(proc_ctx->type);
+
+	if (needed_len <= ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN0]) {
+		bin = IPA_HDR_PROC_CTX_BIN0;
+	} else if (needed_len <=
+			ipa_hdr_proc_ctx_bin_sz[IPA_HDR_PROC_CTX_BIN1]) {
+		bin = IPA_HDR_PROC_CTX_BIN1;
+	} else {
+		IPAERR("unexpected needed len %d\n", needed_len);
+		WARN_ON(1);
+		goto bad_len;
+	}
+
+	mem_size = (ipa3_ctx->hdr_proc_ctx_tbl_lcl) ?
+		IPA_MEM_PART(apps_hdr_proc_ctx_size) :
+		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr);
+	if (htbl->end + ipa_hdr_proc_ctx_bin_sz[bin] > mem_size) {
+		IPAERR("hdr proc ctx table overflow\n");
+		goto bad_len;
+	}
+
+	if (list_empty(&htbl->head_free_offset_list[bin])) {
+		offset = kmem_cache_zalloc(ipa3_ctx->hdr_proc_ctx_offset_cache,
+					   GFP_KERNEL);
+		if (!offset) {
+			IPAERR("failed to alloc offset object\n");
+			goto bad_len;
+		}
+		INIT_LIST_HEAD(&offset->link);
+		/*
+		 * for a first item grow, set the bin and offset which are set
+		 * in stone
+		 */
+		offset->offset = htbl->end;
+		offset->bin = bin;
+		htbl->end += ipa_hdr_proc_ctx_bin_sz[bin];
+		list_add(&offset->link,
+				&htbl->head_offset_list[bin]);
+	} else {
+		/* get the first free slot */
+		offset =
+		    list_first_entry(&htbl->head_free_offset_list[bin],
+				struct ipa3_hdr_proc_ctx_offset_entry, link);
+		list_move(&offset->link, &htbl->head_offset_list[bin]);
+	}
+
+	entry->offset_entry = offset;
+	list_add(&entry->link, &htbl->head_proc_ctx_entry_list);
+	htbl->proc_ctx_cnt++;
+	IPADBG("add proc ctx of sz=%d cnt=%d ofst=%d\n", needed_len,
+			htbl->proc_ctx_cnt, offset->offset);
+
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to alloc id\n");
+		WARN_ON(1);
+	}
+	entry->id = id;
+	proc_ctx->proc_ctx_hdl = id;
+	entry->ref_cnt++;
+
+	return 0;
+
+bad_len:
+	if (add_ref_hdr)
+		hdr_entry->ref_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
+	return -EPERM;
+}
+
+
+static int __ipa_add_hdr(struct ipa_hdr_add *hdr)
+{
+	struct ipa3_hdr_entry *entry;
+	struct ipa_hdr_offset_entry *offset;
+	u32 bin;
+	struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+	int id;
+	int mem_size;
+
+	if (hdr->hdr_len == 0 || hdr->hdr_len > IPA_HDR_MAX_SIZE) {
+		IPAERR("bad parm\n");
+		goto error;
+	}
+
+	if (!HDR_TYPE_IS_VALID(hdr->type)) {
+		IPAERR("invalid hdr type %d\n", hdr->type);
+		goto error;
+	}
+
+	entry = kmem_cache_zalloc(ipa3_ctx->hdr_cache, GFP_KERNEL);
+	if (!entry) {
+		IPAERR("failed to alloc hdr object\n");
+		goto error;
+	}
+
+	INIT_LIST_HEAD(&entry->link);
+
+	memcpy(entry->hdr, hdr->hdr, hdr->hdr_len);
+	entry->hdr_len = hdr->hdr_len;
+	strlcpy(entry->name, hdr->name, IPA_RESOURCE_NAME_MAX);
+	entry->is_partial = hdr->is_partial;
+	entry->type = hdr->type;
+	entry->is_eth2_ofst_valid = hdr->is_eth2_ofst_valid;
+	entry->eth2_ofst = hdr->eth2_ofst;
+	entry->cookie = IPA_COOKIE;
+
+	if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN0])
+		bin = IPA_HDR_BIN0;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN1])
+		bin = IPA_HDR_BIN1;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN2])
+		bin = IPA_HDR_BIN2;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN3])
+		bin = IPA_HDR_BIN3;
+	else if (hdr->hdr_len <= ipa_hdr_bin_sz[IPA_HDR_BIN4])
+		bin = IPA_HDR_BIN4;
+	else {
+		IPAERR("unexpected hdr len %d\n", hdr->hdr_len);
+		goto bad_hdr_len;
+	}
+
+	mem_size = (ipa3_ctx->hdr_tbl_lcl) ? IPA_MEM_PART(apps_hdr_size) :
+		IPA_MEM_PART(apps_hdr_size_ddr);
+
+	/* if header does not fit to table, place it in DDR */
+	if (htbl->end + ipa_hdr_bin_sz[bin] > mem_size) {
+		entry->is_hdr_proc_ctx = true;
+		entry->phys_base = dma_map_single(ipa3_ctx->pdev,
+			entry->hdr,
+			entry->hdr_len,
+			DMA_TO_DEVICE);
+	} else {
+		entry->is_hdr_proc_ctx = false;
+		if (list_empty(&htbl->head_free_offset_list[bin])) {
+			offset = kmem_cache_zalloc(ipa3_ctx->hdr_offset_cache,
+						   GFP_KERNEL);
+			if (!offset) {
+				IPAERR("failed to alloc hdr offset object\n");
+				goto bad_hdr_len;
+			}
+			INIT_LIST_HEAD(&offset->link);
+			/*
+			 * for a first item grow, set the bin and offset which
+			 * are set in stone
+			 */
+			offset->offset = htbl->end;
+			offset->bin = bin;
+			htbl->end += ipa_hdr_bin_sz[bin];
+			list_add(&offset->link,
+					&htbl->head_offset_list[bin]);
+		} else {
+			/* get the first free slot */
+			offset =
+			list_first_entry(&htbl->head_free_offset_list[bin],
+					struct ipa_hdr_offset_entry, link);
+			list_move(&offset->link, &htbl->head_offset_list[bin]);
+		}
+
+		entry->offset_entry = offset;
+	}
+
+	list_add(&entry->link, &htbl->head_hdr_entry_list);
+	htbl->hdr_cnt++;
+	if (entry->is_hdr_proc_ctx)
+		IPADBG("add hdr of sz=%d hdr_cnt=%d phys_base=%pa\n",
+			hdr->hdr_len,
+			htbl->hdr_cnt,
+			&entry->phys_base);
+	else
+		IPADBG("add hdr of sz=%d hdr_cnt=%d ofst=%d\n",
+			hdr->hdr_len,
+			htbl->hdr_cnt,
+			entry->offset_entry->offset);
+
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to alloc id\n");
+		WARN_ON(1);
+	}
+	entry->id = id;
+	hdr->hdr_hdl = id;
+	entry->ref_cnt++;
+
+	if (entry->is_hdr_proc_ctx) {
+		struct ipa_hdr_proc_ctx_add proc_ctx;
+
+		IPADBG("adding processing context for header %s\n", hdr->name);
+		proc_ctx.type = IPA_HDR_PROC_NONE;
+		proc_ctx.hdr_hdl = id;
+		if (__ipa_add_hdr_proc_ctx(&proc_ctx, false)) {
+			IPAERR("failed to add hdr proc ctx\n");
+			goto fail_add_proc_ctx;
+		}
+		entry->proc_ctx = ipa3_id_find(proc_ctx.proc_ctx_hdl);
+	}
+
+	return 0;
+
+fail_add_proc_ctx:
+	entry->ref_cnt--;
+	hdr->hdr_hdl = 0;
+	ipa3_id_remove(id);
+	htbl->hdr_cnt--;
+	list_del(&entry->link);
+	dma_unmap_single(ipa3_ctx->pdev, entry->phys_base,
+			entry->hdr_len, DMA_TO_DEVICE);
+bad_hdr_len:
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa3_del_hdr_proc_ctx(u32 proc_ctx_hdl, bool release_hdr)
+{
+	struct ipa3_hdr_proc_ctx_entry *entry;
+	struct ipa3_hdr_proc_ctx_tbl *htbl = &ipa3_ctx->hdr_proc_ctx_tbl;
+
+	entry = ipa3_id_find(proc_ctx_hdl);
+	if (!entry || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	IPADBG("del proc ctx cnt=%d ofst=%d\n",
+		htbl->proc_ctx_cnt, entry->offset_entry->offset);
+
+	if (--entry->ref_cnt) {
+		IPADBG("proc_ctx_hdl %x ref_cnt %d\n",
+			proc_ctx_hdl, entry->ref_cnt);
+		return 0;
+	}
+
+	if (release_hdr)
+		__ipa3_del_hdr(entry->hdr->id);
+
+	/* move the offset entry to appropriate free list */
+	list_move(&entry->offset_entry->link,
+		&htbl->head_free_offset_list[entry->offset_entry->bin]);
+	list_del(&entry->link);
+	htbl->proc_ctx_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(proc_ctx_hdl);
+
+	return 0;
+}
+
+
+int __ipa3_del_hdr(u32 hdr_hdl)
+{
+	struct ipa3_hdr_entry *entry;
+	struct ipa3_hdr_tbl *htbl = &ipa3_ctx->hdr_tbl;
+
+	entry = ipa3_id_find(hdr_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (!entry || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (entry->is_hdr_proc_ctx)
+		IPADBG("del hdr of len=%d hdr_cnt=%d phys_base=%pa\n",
+			entry->hdr_len, htbl->hdr_cnt, &entry->phys_base);
+	else
+		IPADBG("del hdr of len=%d hdr_cnt=%d ofst=%d\n",
+			entry->hdr_len, htbl->hdr_cnt,
+			entry->offset_entry->offset);
+
+	if (--entry->ref_cnt) {
+		IPADBG("hdr_hdl %x ref_cnt %d\n", hdr_hdl, entry->ref_cnt);
+		return 0;
+	}
+
+	if (entry->is_hdr_proc_ctx) {
+		dma_unmap_single(ipa3_ctx->pdev,
+			entry->phys_base,
+			entry->hdr_len,
+			DMA_TO_DEVICE);
+		__ipa3_del_hdr_proc_ctx(entry->proc_ctx->id, false);
+	} else {
+		/* move the offset entry to appropriate free list */
+		list_move(&entry->offset_entry->link,
+			&htbl->head_free_offset_list[entry->offset_entry->bin]);
+	}
+	list_del(&entry->link);
+	htbl->hdr_cnt--;
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(hdr_hdl);
+
+	return 0;
+}
+
+/**
+ * ipa3_add_hdr() - add the specified headers to SW and optionally commit them
+ * to IPA HW
+ * @hdrs:	[inout] set of headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdrs == NULL || hdrs->num_hdrs == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("adding %d headers to IPA driver internal data struct\n",
+			hdrs->num_hdrs);
+	for (i = 0; i < hdrs->num_hdrs; i++) {
+		if (__ipa_add_hdr(&hdrs->hdr[i])) {
+			IPAERR("failed to add hdr %d\n", i);
+			hdrs->hdr[i].status = -1;
+		} else {
+			hdrs->hdr[i].status = 0;
+		}
+	}
+
+	if (hdrs->commit) {
+		IPADBG("committing all headers to IPA core");
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_del_hdr() - Remove the specified headers from SW and optionally commit
+ * them to IPA HW
+ * @hdls:	[inout] set of headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa3_del_hdr(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_add_hdr_proc_ctx() - add the specified headers to SW
+ * and optionally commit them to IPA HW
+ * @proc_ctxs:	[inout] set of processing context headers to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+	int i;
+	int result = -EFAULT;
+
+	if (proc_ctxs == NULL || proc_ctxs->num_proc_ctxs == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("adding %d header processing contextes to IPA driver\n",
+			proc_ctxs->num_proc_ctxs);
+	for (i = 0; i < proc_ctxs->num_proc_ctxs; i++) {
+		if (__ipa_add_hdr_proc_ctx(&proc_ctxs->proc_ctx[i], true)) {
+			IPAERR("failed to add hdr pric ctx %d\n", i);
+			proc_ctxs->proc_ctx[i].status = -1;
+		} else {
+			proc_ctxs->proc_ctx[i].status = 0;
+		}
+	}
+
+	if (proc_ctxs->commit) {
+		IPADBG("committing all headers to IPA core");
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_del_hdr_proc_ctx() -
+ * Remove the specified processing context headers from SW and
+ * optionally commit them to IPA HW.
+ * @hdls:	[inout] set of processing context headers to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_hdls == 0) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa3_del_hdr_proc_ctx(hdls->hdl[i].hdl, true)) {
+			IPAERR("failed to del hdr %i\n", i);
+			hdls->hdl[i].status = -1;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit) {
+		if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+			result = -EPERM;
+			goto bail;
+		}
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_commit_hdr() - commit to IPA HW the current header table in SW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_commit_hdr(void)
+{
+	int result = -EFAULT;
+
+	/*
+	 * issue a commit on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa3_commit_rt(IPA_IP_v4))
+		return -EPERM;
+	if (ipa3_commit_rt(IPA_IP_v6))
+		return -EPERM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		result = -EPERM;
+		goto bail;
+	}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_reset_hdr() - reset the current header table in SW (does not commit to
+ * HW)
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_reset_hdr(void)
+{
+	struct ipa3_hdr_entry *entry;
+	struct ipa3_hdr_entry *next;
+	struct ipa3_hdr_proc_ctx_entry *ctx_entry;
+	struct ipa3_hdr_proc_ctx_entry *ctx_next;
+	struct ipa_hdr_offset_entry *off_entry;
+	struct ipa_hdr_offset_entry *off_next;
+	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_entry;
+	struct ipa3_hdr_proc_ctx_offset_entry *ctx_off_next;
+	int i;
+
+	/*
+	 * issue a reset on the routing module since routing rules point to
+	 * header table entries
+	 */
+	if (ipa3_reset_rt(IPA_IP_v4))
+		IPAERR("fail to reset v4 rt\n");
+	if (ipa3_reset_rt(IPA_IP_v6))
+		IPAERR("fail to reset v4 rt\n");
+
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("reset hdr\n");
+	list_for_each_entry_safe(entry, next,
+			&ipa3_ctx->hdr_tbl.head_hdr_entry_list, link) {
+
+		/* do not remove the default header */
+		if (!strcmp(entry->name, IPA_LAN_RX_HDR_NAME)) {
+			if (entry->is_hdr_proc_ctx) {
+				IPAERR("default header is proc ctx\n");
+				mutex_unlock(&ipa3_ctx->lock);
+				WARN_ON(1);
+				return -EFAULT;
+			}
+			continue;
+		}
+
+		if (ipa3_id_find(entry->id) == NULL) {
+			mutex_unlock(&ipa3_ctx->lock);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		if (entry->is_hdr_proc_ctx) {
+			dma_unmap_single(ipa3_ctx->pdev,
+				entry->phys_base,
+				entry->hdr_len,
+				DMA_TO_DEVICE);
+			entry->proc_ctx = NULL;
+		}
+		list_del(&entry->link);
+		entry->ref_cnt = 0;
+		entry->cookie = 0;
+
+		/* remove the handle from the database */
+		ipa3_id_remove(entry->id);
+		kmem_cache_free(ipa3_ctx->hdr_cache, entry);
+
+	}
+	for (i = 0; i < IPA_HDR_BIN_MAX; i++) {
+		list_for_each_entry_safe(off_entry, off_next,
+					 &ipa3_ctx->hdr_tbl.head_offset_list[i],
+					 link) {
+
+			/*
+			 * do not remove the default exception header which is
+			 * at offset 0
+			 */
+			if (off_entry->offset == 0)
+				continue;
+
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
+		}
+		list_for_each_entry_safe(off_entry, off_next,
+				&ipa3_ctx->hdr_tbl.head_free_offset_list[i],
+				link) {
+			list_del(&off_entry->link);
+			kmem_cache_free(ipa3_ctx->hdr_offset_cache, off_entry);
+		}
+	}
+	/* there is one header of size 8 */
+	ipa3_ctx->hdr_tbl.end = 8;
+	ipa3_ctx->hdr_tbl.hdr_cnt = 1;
+
+	IPADBG("reset hdr proc ctx\n");
+	list_for_each_entry_safe(
+		ctx_entry,
+		ctx_next,
+		&ipa3_ctx->hdr_proc_ctx_tbl.head_proc_ctx_entry_list,
+		link) {
+
+		if (ipa3_id_find(ctx_entry->id) == NULL) {
+			mutex_unlock(&ipa3_ctx->lock);
+			WARN_ON(1);
+			return -EFAULT;
+		}
+		list_del(&ctx_entry->link);
+		ctx_entry->ref_cnt = 0;
+		ctx_entry->cookie = 0;
+
+		/* remove the handle from the database */
+		ipa3_id_remove(ctx_entry->id);
+		kmem_cache_free(ipa3_ctx->hdr_proc_ctx_cache, ctx_entry);
+
+	}
+	for (i = 0; i < IPA_HDR_PROC_CTX_BIN_MAX; i++) {
+		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+				&ipa3_ctx->hdr_proc_ctx_tbl.head_offset_list[i],
+				link) {
+
+			list_del(&ctx_off_entry->link);
+			kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
+					ctx_off_entry);
+		}
+		list_for_each_entry_safe(ctx_off_entry, ctx_off_next,
+			&ipa3_ctx->hdr_proc_ctx_tbl.head_free_offset_list[i],
+			link) {
+			list_del(&ctx_off_entry->link);
+			kmem_cache_free(ipa3_ctx->hdr_proc_ctx_offset_cache,
+				ctx_off_entry);
+		}
+	}
+	ipa3_ctx->hdr_proc_ctx_tbl.end = 0;
+	ipa3_ctx->hdr_proc_ctx_tbl.proc_ctx_cnt = 0;
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+static struct ipa3_hdr_entry *__ipa_find_hdr(const char *name)
+{
+	struct ipa3_hdr_entry *entry;
+
+	if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Header name too long: %s\n", name);
+		return NULL;
+	}
+
+	list_for_each_entry(entry, &ipa3_ctx->hdr_tbl.head_hdr_entry_list,
+			link) {
+		if (!strcmp(name, entry->name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa3_get_hdr() - Lookup the specified header resource
+ * @lookup:	[inout] header to lookup and its handle
+ *
+ * lookup the specified header resource and return handle if it exists
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *		Caller should call ipa3_put_hdr later if this function succeeds
+ */
+int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -1;
+
+	if (lookup == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	entry = __ipa_find_hdr(lookup->name);
+	if (entry) {
+		lookup->hdl = entry->id;
+		result = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * __ipa3_release_hdr() - drop reference to header and cause
+ * deletion if reference count permits
+ * @hdr_hdl:	[in] handle of header to be released
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa3_release_hdr(u32 hdr_hdl)
+{
+	int result = 0;
+
+	if (__ipa3_del_hdr(hdr_hdl)) {
+		IPADBG("fail to del hdr %x\n", hdr_hdl);
+		result = -EFAULT;
+		goto bail;
+	}
+
+	/* commit for put */
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		result = -EFAULT;
+		goto bail;
+	}
+
+bail:
+	return result;
+}
+
+/**
+ * __ipa3_release_hdr_proc_ctx() - drop reference to processing context
+ *  and cause deletion if reference count permits
+ * @proc_ctx_hdl:	[in] handle of processing context to be released
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl)
+{
+	int result = 0;
+
+	if (__ipa3_del_hdr_proc_ctx(proc_ctx_hdl, true)) {
+		IPADBG("fail to del hdr %x\n", proc_ctx_hdl);
+		result = -EFAULT;
+		goto bail;
+	}
+
+	/* commit for put */
+	if (ipa3_ctx->ctrl->ipa3_commit_hdr()) {
+		IPAERR("fail to commit hdr\n");
+		result = -EFAULT;
+		goto bail;
+	}
+
+bail:
+	return result;
+}
+
+/**
+ * ipa3_put_hdr() - Release the specified header handle
+ * @hdr_hdl:	[in] the header handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_put_hdr(u32 hdr_hdl)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -EFAULT;
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	entry = ipa3_id_find(hdr_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("invalid header entry\n");
+		result = -EINVAL;
+		goto bail;
+	}
+
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+/**
+ * ipa3_copy_hdr() - Lookup the specified header resource and return a copy of
+ * it
+ * @copy:	[inout] header to lookup and its copy
+ *
+ * lookup the specified header resource and return a copy of it (along with its
+ * attributes) if it exists, this would be called for partial headers
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	struct ipa3_hdr_entry *entry;
+	int result = -EFAULT;
+
+	if (copy == NULL) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	entry = __ipa_find_hdr(copy->name);
+	if (entry) {
+		memcpy(copy->hdr, entry->hdr, entry->hdr_len);
+		copy->hdr_len = entry->hdr_len;
+		copy->type = entry->type;
+		copy->is_partial = entry->is_partial;
+		copy->is_eth2_ofst_valid = entry->is_eth2_ofst_valid;
+		copy->eth2_ofst = entry->eth2_ofst;
+		result = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
new file mode 100644
index 0000000..dff3a3f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_hw_defs.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_HW_DEFS_H
+#define _IPA_HW_DEFS_H
+#include <linux/bitops.h>
+
+/* This header defines various HW related data types */
+
+
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IP		BIT(7)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_NAT		BIT(6)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_SW_FLT	BIT(5)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_TAG		BIT(4)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_REPLICATED	BIT(3)
+#define IPA_A5_MUX_HDR_EXCP_FLAG_IHL		BIT(2)
+
+/**
+ * struct ipa3_a5_mux_hdr - A5 MUX header definition
+ * @interface_id: interface ID
+ * @src_pipe_index: source pipe index
+ * @flags: flags
+ * @metadata: metadata
+ *
+ * A5 MUX header is in BE, A5 runs in LE. This struct definition
+ * allows A5 SW to correctly parse the header
+ */
+struct ipa3_a5_mux_hdr {
+	u16 interface_id;
+	u8 src_pipe_index;
+	u8 flags;
+	u32 metadata;
+};
+
+#endif /* _IPA_HW_DEFS_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
new file mode 100644
index 0000000..d0c5c9d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -0,0 +1,2035 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA3_I_H_
+#define _IPA3_I_H_
+
+#include <linux/bitops.h>
+#include <linux/cdev.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/ipa.h>
+#include <linux/ipa_usb.h>
+#include <linux/msm-sps.h>
+#include <asm/dma-iommu.h>
+#include <linux/iommu.h>
+#include <linux/platform_device.h>
+#include <linux/firmware.h>
+#include "ipa_hw_defs.h"
+#include "ipa_qmi_service.h"
+#include "../ipa_api.h"
+#include "ipahal/ipahal_reg.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+#include "../ipa_common_i.h"
+#include "ipa_uc_offload_i.h"
+
+#define DRV_NAME "ipa"
+#define NAT_DEV_NAME "ipaNatTable"
+#define IPA_COOKIE 0x57831603
+#define MTU_BYTE 1500
+
+#define IPA3_MAX_NUM_PIPES 31
+#define IPA_SYS_DESC_FIFO_SZ 0x800
+#define IPA_SYS_TX_DATA_DESC_FIFO_SZ 0x1000
+#define IPA_LAN_RX_HEADER_LENGTH (2)
+#define IPA_QMAP_HEADER_LENGTH (4)
+#define IPA_DL_CHECKSUM_LENGTH (8)
+#define IPA_NUM_DESC_PER_SW_TX (3)
+#define IPA_GENERIC_RX_POOL_SZ 192
+#define IPA_UC_FINISH_MAX 6
+#define IPA_UC_WAIT_MIN_SLEEP 1000
+#define IPA_UC_WAII_MAX_SLEEP 1200
+#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
+#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
+
+#define IPA_MAX_STATUS_STAT_NUM 30
+
+#define IPA_IPC_LOG_PAGES 50
+
+#define IPADBG(fmt, args...) \
+	do { \
+		pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa3_ctx) { \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define IPADBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa3_ctx) \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAERR(fmt, args...) \
+	do { \
+		pr_err(DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args);\
+		if (ipa3_ctx) { \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+			IPA_IPC_LOGGING(ipa3_ctx->logbuf_low, \
+				DRV_NAME " %s:%d " fmt, ## args); \
+		} \
+	} while (0)
+
+#define WLAN_AMPDU_TX_EP 15
+#define WLAN_PROD_TX_EP  19
+#define WLAN1_CONS_RX_EP  14
+#define WLAN2_CONS_RX_EP  16
+#define WLAN3_CONS_RX_EP  17
+#define WLAN4_CONS_RX_EP  18
+
+#define IPA_RAM_NAT_OFST    0
+#define IPA_RAM_NAT_SIZE    0
+#define IPA_MEM_CANARY_VAL 0xdeadbeef
+
+#define IPA_STATS
+
+#ifdef IPA_STATS
+#define IPA_STATS_INC_CNT(val) (++val)
+#define IPA_STATS_DEC_CNT(val) (--val)
+#define IPA_STATS_EXCP_CNT(__excp, __base) do {				\
+	if (__excp < 0 || __excp >= IPAHAL_PKT_STATUS_EXCEPTION_MAX)	\
+		break;							\
+	++__base[__excp];						\
+	} while (0)
+#else
+#define IPA_STATS_INC_CNT(x) do { } while (0)
+#define IPA_STATS_DEC_CNT(x)
+#define IPA_STATS_EXCP_CNT(__excp, __base) do { } while (0)
+#endif
+
+#define IPA_HDR_BIN0 0
+#define IPA_HDR_BIN1 1
+#define IPA_HDR_BIN2 2
+#define IPA_HDR_BIN3 3
+#define IPA_HDR_BIN4 4
+#define IPA_HDR_BIN_MAX 5
+
+#define IPA_HDR_PROC_CTX_BIN0 0
+#define IPA_HDR_PROC_CTX_BIN1 1
+#define IPA_HDR_PROC_CTX_BIN_MAX 2
+
+#define IPA_EVENT_THRESHOLD 0x10
+
+/*
+ * Due to ZLT issue with USB 3.0 core, IPA BAM threashold need to be set
+ * to max packet size + 1. After setting the threshold, USB core
+ * will not be notified on ZLTs
+ */
+#define IPA_USB_EVENT_THRESHOLD 0x4001
+
+#define IPA_RX_POOL_CEIL 32
+#define IPA_RX_SKB_SIZE 1792
+
+#define IPA_A5_MUX_HDR_NAME "ipa_excp_hdr"
+#define IPA_LAN_RX_HDR_NAME "ipa_lan_hdr"
+#define IPA_INVALID_L4_PROTOCOL 0xFF
+
+#define IPA_CLIENT_IS_PROD(x) (x >= IPA_CLIENT_PROD && x < IPA_CLIENT_CONS)
+#define IPA_CLIENT_IS_CONS(x) (x >= IPA_CLIENT_CONS && x < IPA_CLIENT_MAX)
+
+#define IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst) \
+	(((start_ofst) + 127) & ~127)
+
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE 8
+#define IPA_HDR_PROC_CTX_TABLE_ALIGNMENT(start_ofst) \
+	(((start_ofst) + IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1) & \
+	~(IPA_HDR_PROC_CTX_TABLE_ALIGNMENT_BYTE - 1))
+
+#define MAX_RESOURCE_TO_CLIENTS (IPA_CLIENT_MAX)
+#define IPA_MEM_PART(x_) (ipa3_ctx->ctrl->mem_partition.x_)
+
+#define IPA_GSI_CHANNEL_STOP_MAX_RETRY 10
+#define IPA_GSI_CHANNEL_STOP_PKT_SIZE 1
+
+#define IPA_GSI_CHANNEL_EMPTY_MAX_RETRY 15
+#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC (1000)
+#define IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC (2000)
+
+#define IPA_SLEEP_CLK_RATE_KHZ (32)
+
+#define IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES 120
+#define IPA3_ACTIVE_CLIENTS_LOG_LINE_LEN 96
+#define IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE 50
+#define IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN 40
+
+struct ipa3_active_client_htable_entry {
+	struct hlist_node list;
+	char id_string[IPA3_ACTIVE_CLIENTS_LOG_NAME_LEN];
+	int count;
+	enum ipa_active_client_log_type type;
+};
+
+struct ipa3_active_clients_log_ctx {
+	char *log_buffer[IPA3_ACTIVE_CLIENTS_LOG_BUFFER_SIZE_LINES];
+	int log_head;
+	int log_tail;
+	bool log_rdy;
+	struct hlist_head htable[IPA3_ACTIVE_CLIENTS_LOG_HASHTABLE_SIZE];
+};
+
+struct ipa3_client_names {
+	enum ipa_client_type names[MAX_RESOURCE_TO_CLIENTS];
+	int length;
+};
+
+struct ipa_smmu_cb_ctx {
+	bool valid;
+	struct device *dev;
+	struct dma_iommu_mapping *mapping;
+	struct iommu_domain *iommu;
+	unsigned long next_addr;
+	u32 va_start;
+	u32 va_size;
+	u32 va_end;
+};
+
+/**
+ * struct ipa3_flt_entry - IPA filtering table entry
+ * @link: entry's link in global filtering enrties list
+ * @rule: filter rule
+ * @cookie: cookie used for validity check
+ * @tbl: filter table
+ * @rt_tbl: routing table
+ * @hw_len: entry's size
+ * @id: rule handle - globally unique
+ * @prio: rule 10bit priority which defines the order of the rule
+ *  among other rules at the same integrated table
+ * @rule_id: rule 10bit ID to be returned in packet status
+ */
+struct ipa3_flt_entry {
+	struct list_head link;
+	struct ipa_flt_rule rule;
+	u32 cookie;
+	struct ipa3_flt_tbl *tbl;
+	struct ipa3_rt_tbl *rt_tbl;
+	u32 hw_len;
+	int id;
+	u16 prio;
+	u16 rule_id;
+};
+
+/**
+ * struct ipa3_rt_tbl - IPA routing table
+ * @link: table's link in global routing tables list
+ * @head_rt_rule_list: head of routing rules list
+ * @name: routing table name
+ * @idx: routing table index
+ * @rule_cnt: number of rules in routing table
+ * @ref_cnt: reference counter of routing table
+ * @set: collection of routing tables
+ * @cookie: cookie used for validity check
+ * @in_sys: flag indicating if the table is located in system memory
+ * @sz: the size of the routing table
+ * @curr_mem: current routing tables block in sys memory
+ * @prev_mem: previous routing table block in sys memory
+ * @id: routing table id
+ * @rule_ids: idr structure that holds the rule_id for each rule
+ */
+struct ipa3_rt_tbl {
+	struct list_head link;
+	struct list_head head_rt_rule_list;
+	char name[IPA_RESOURCE_NAME_MAX];
+	u32 idx;
+	u32 rule_cnt;
+	u32 ref_cnt;
+	struct ipa3_rt_tbl_set *set;
+	u32 cookie;
+	bool in_sys[IPA_RULE_TYPE_MAX];
+	u32 sz[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+	int id;
+	struct idr rule_ids;
+};
+
+/**
+ * struct ipa3_hdr_entry - IPA header table entry
+ * @link: entry's link in global header table entries list
+ * @hdr: the header
+ * @hdr_len: header length
+ * @name: name of header table entry
+ * @type: l2 header type
+ * @is_partial: flag indicating if header table entry is partial
+ * @is_hdr_proc_ctx: false - hdr entry resides in hdr table,
+ * true - hdr entry resides in DDR and pointed to by proc ctx
+ * @phys_base: physical address of entry in DDR when is_hdr_proc_ctx is true,
+ * else 0
+ * @proc_ctx: processing context header
+ * @offset_entry: entry's offset
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: header entry id
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa3_hdr_entry {
+	struct list_head link;
+	u8 hdr[IPA_HDR_MAX_SIZE];
+	u32 hdr_len;
+	char name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_hdr_l2_type type;
+	u8 is_partial;
+	bool is_hdr_proc_ctx;
+	dma_addr_t phys_base;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+	struct ipa_hdr_offset_entry *offset_entry;
+	u32 cookie;
+	u32 ref_cnt;
+	int id;
+	u8 is_eth2_ofst_valid;
+	u16 eth2_ofst;
+};
+
+/**
+ * struct ipa3_hdr_tbl - IPA header table
+ * @head_hdr_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @hdr_cnt: number of headers
+ * @end: the last header index
+ */
+struct ipa3_hdr_tbl {
+	struct list_head head_hdr_entry_list;
+	struct list_head head_offset_list[IPA_HDR_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_BIN_MAX];
+	u32 hdr_cnt;
+	u32 end;
+};
+
+/**
+ * struct ipa3_hdr_offset_entry - IPA header offset entry
+ * @link: entry's link in global processing context header offset entries list
+ * @offset: the offset
+ * @bin: bin
+ */
+struct ipa3_hdr_proc_ctx_offset_entry {
+	struct list_head link;
+	u32 offset;
+	u32 bin;
+};
+
+/**
+ * struct ipa3_hdr_proc_ctx_entry - IPA processing context header table entry
+ * @link: entry's link in global header table entries list
+ * @type:
+ * @offset_entry: entry's offset
+ * @hdr: the header
+ * @cookie: cookie used for validity check
+ * @ref_cnt: reference counter of routing table
+ * @id: processing context header entry id
+ */
+struct ipa3_hdr_proc_ctx_entry {
+	struct list_head link;
+	enum ipa_hdr_proc_type type;
+	struct ipa3_hdr_proc_ctx_offset_entry *offset_entry;
+	struct ipa3_hdr_entry *hdr;
+	u32 cookie;
+	u32 ref_cnt;
+	int id;
+};
+
+/**
+ * struct ipa3_hdr_proc_ctx_tbl - IPA processing context header table
+ * @head_proc_ctx_entry_list: header entries list
+ * @head_offset_list: header offset list
+ * @head_free_offset_list: header free offset list
+ * @proc_ctx_cnt: number of processing context headers
+ * @end: the last processing context header index
+ * @start_offset: offset in words of processing context header table
+ */
+struct ipa3_hdr_proc_ctx_tbl {
+	struct list_head head_proc_ctx_entry_list;
+	struct list_head head_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+	struct list_head head_free_offset_list[IPA_HDR_PROC_CTX_BIN_MAX];
+	u32 proc_ctx_cnt;
+	u32 end;
+	u32 start_offset;
+};
+
+/**
+ * struct ipa3_flt_tbl - IPA filter table
+ * @head_flt_rule_list: filter rules list
+ * @rule_cnt: number of filter rules
+ * @in_sys: flag indicating if filter table is located in system memory
+ * @sz: the size of the filter tables
+ * @end: the last header index
+ * @curr_mem: current filter tables block in sys memory
+ * @prev_mem: previous filter table block in sys memory
+ * @rule_ids: idr structure that holds the rule_id for each rule
+ */
+struct ipa3_flt_tbl {
+	struct list_head head_flt_rule_list;
+	u32 rule_cnt;
+	bool in_sys[IPA_RULE_TYPE_MAX];
+	u32 sz[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer curr_mem[IPA_RULE_TYPE_MAX];
+	struct ipa_mem_buffer prev_mem[IPA_RULE_TYPE_MAX];
+	bool sticky_rear;
+	struct idr rule_ids;
+};
+
+/**
+ * struct ipa3_rt_entry - IPA routing table entry
+ * @link: entry's link in global routing table entries list
+ * @rule: routing rule
+ * @cookie: cookie used for validity check
+ * @tbl: routing table
+ * @hdr: header table
+ * @proc_ctx: processing context table
+ * @hw_len: the length of the table
+ * @id: rule handle - globaly unique
+ * @prio: rule 10bit priority which defines the order of the rule
+ *  among other rules at the integrated same table
+ * @rule_id: rule 10bit ID to be returned in packet status
+ */
+struct ipa3_rt_entry {
+	struct list_head link;
+	struct ipa_rt_rule rule;
+	u32 cookie;
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_hdr_entry *hdr;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+	u32 hw_len;
+	int id;
+	u16 prio;
+	u16 rule_id;
+};
+
+/**
+ * struct ipa3_rt_tbl_set - collection of routing tables
+ * @head_rt_tbl_list: collection of routing tables
+ * @tbl_cnt: number of routing tables
+ */
+struct ipa3_rt_tbl_set {
+	struct list_head head_rt_tbl_list;
+	u32 tbl_cnt;
+};
+
+/**
+ * struct ipa3_wlan_stats - Wlan stats for each wlan endpoint
+ * @rx_pkts_rcvd: Packets sent by wlan driver
+ * @rx_pkts_status_rcvd: Status packets received from ipa hw
+ * @rx_hd_processed: Data Descriptors processed by IPA Driver
+ * @rx_hd_reply: Data Descriptors recycled by wlan driver
+ * @rx_hd_rcvd: Data Descriptors sent by wlan driver
+ * @rx_pkt_leak: Packet count that are not recycled
+ * @rx_dp_fail: Packets failed to transfer to IPA HW
+ * @tx_pkts_rcvd: SKB Buffers received from ipa hw
+ * @tx_pkts_sent: SKB Buffers sent to wlan driver
+ * @tx_pkts_dropped: Dropped packets count
+ */
+struct ipa3_wlan_stats {
+	u32 rx_pkts_rcvd;
+	u32 rx_pkts_status_rcvd;
+	u32 rx_hd_processed;
+	u32 rx_hd_reply;
+	u32 rx_hd_rcvd;
+	u32 rx_pkt_leak;
+	u32 rx_dp_fail;
+	u32 tx_pkts_rcvd;
+	u32 tx_pkts_sent;
+	u32 tx_pkts_dropped;
+};
+
+/**
+ * struct ipa3_wlan_comm_memb - Wlan comm members
+ * @wlan_spinlock: protects wlan comm buff list and its size
+ * @ipa_tx_mul_spinlock: protects tx dp mul transfer
+ * @wlan_comm_total_cnt: wlan common skb buffers allocated count
+ * @wlan_comm_free_cnt: wlan common skb buffer free count
+ * @total_tx_pkts_freed: Recycled Buffer count
+ * @wlan_comm_desc_list: wlan common skb buffer list
+ */
+struct ipa3_wlan_comm_memb {
+	spinlock_t wlan_spinlock;
+	spinlock_t ipa_tx_mul_spinlock;
+	u32 wlan_comm_total_cnt;
+	u32 wlan_comm_free_cnt;
+	u32 total_tx_pkts_freed;
+	struct list_head wlan_comm_desc_list;
+	atomic_t active_clnt_cnt;
+};
+
+struct ipa_gsi_ep_mem_info {
+	u16 evt_ring_len;
+	u64 evt_ring_base_addr;
+	void *evt_ring_base_vaddr;
+	u16 chan_ring_len;
+	u64 chan_ring_base_addr;
+	void *chan_ring_base_vaddr;
+};
+
+struct ipa3_status_stats {
+	struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
+	unsigned int curr;
+};
+
+/**
+ * struct ipa3_ep_context - IPA end point context
+ * @valid: flag indicating id EP context is valid
+ * @client: EP client type
+ * @ep_hdl: EP's client SPS handle
+ * @gsi_chan_hdl: EP's GSI channel handle
+ * @gsi_evt_ring_hdl: EP's GSI channel event ring handle
+ * @gsi_mem_info: EP's GSI channel rings info
+ * @chan_scratch: EP's GSI channel scratch info
+ * @cfg: EP cionfiguration
+ * @dst_pipe_index: destination pipe index
+ * @rt_tbl_idx: routing table index
+ * @connect: SPS connect
+ * @priv: user provided information which will forwarded once the user is
+ *        notified for new data avail
+ * @client_notify: user provided CB for EP events notification, the event is
+ *                 data revived.
+ * @desc_fifo_in_pipe_mem: flag indicating if descriptors FIFO uses pipe memory
+ * @data_fifo_in_pipe_mem: flag indicating if data FIFO uses pipe memory
+ * @desc_fifo_pipe_mem_ofst: descriptors FIFO pipe memory offset
+ * @data_fifo_pipe_mem_ofst: data FIFO pipe memory offset
+ * @desc_fifo_client_allocated: if descriptors FIFO was allocated by a client
+ * @data_fifo_client_allocated: if data FIFO was allocated by a client
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ *  by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @disconnect_in_progress: Indicates client disconnect in progress.
+ * @qmi_request_sent: Indicates whether QMI request to enable clear data path
+ *					request is sent or not.
+ * @napi_enabled: when true, IPA call client callback to start polling
+ */
+struct ipa3_ep_context {
+	int valid;
+	enum ipa_client_type client;
+	struct sps_pipe *ep_hdl;
+	unsigned long gsi_chan_hdl;
+	unsigned long gsi_evt_ring_hdl;
+	struct ipa_gsi_ep_mem_info gsi_mem_info;
+	union __packed gsi_channel_scratch chan_scratch;
+	bool bytes_xfered_valid;
+	u16 bytes_xfered;
+	dma_addr_t phys_base;
+	struct ipa_ep_cfg cfg;
+	struct ipa_ep_cfg_holb holb;
+	struct ipahal_reg_ep_cfg_status status;
+	u32 dst_pipe_index;
+	u32 rt_tbl_idx;
+	struct sps_connect connect;
+	void *priv;
+	void (*client_notify)(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data);
+	bool desc_fifo_in_pipe_mem;
+	bool data_fifo_in_pipe_mem;
+	u32 desc_fifo_pipe_mem_ofst;
+	u32 data_fifo_pipe_mem_ofst;
+	bool desc_fifo_client_allocated;
+	bool data_fifo_client_allocated;
+	atomic_t avail_fifo_desc;
+	u32 dflt_flt4_rule_hdl;
+	u32 dflt_flt6_rule_hdl;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+	struct ipa3_wlan_stats wstats;
+	u32 uc_offload_state;
+	bool disconnect_in_progress;
+	u32 qmi_request_sent;
+	bool napi_enabled;
+	bool switch_to_intr;
+	int inactive_cycles;
+	u32 eot_in_poll_err;
+
+	/* sys MUST be the last element of this struct */
+	struct ipa3_sys_context *sys;
+};
+
+/**
+ * ipa_usb_xdci_chan_params - xDCI channel related properties
+ *
+ * @ipa_ep_cfg:          IPA EP configuration
+ * @client:              type of "client"
+ * @priv:                callback cookie
+ * @notify:              callback
+ *           priv - callback cookie evt - type of event data - data relevant
+ *           to event.  May not be valid. See event_type enum for valid
+ *           cases.
+ * @skip_ep_cfg:         boolean field that determines if EP should be
+ *                       configured by IPA driver
+ * @keep_ipa_awake:      when true, IPA will not be clock gated
+ * @evt_ring_params:     parameters for the channel's event ring
+ * @evt_scratch:         parameters for the channel's event ring scratch
+ * @chan_params:         parameters for the channel
+ * @chan_scratch:        parameters for the channel's scratch
+ *
+ */
+struct ipa_request_gsi_channel_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	void *priv;
+	ipa_notify_cb notify;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+	struct gsi_evt_ring_props evt_ring_params;
+	union __packed gsi_evt_scratch evt_scratch;
+	struct gsi_chan_props chan_params;
+	union __packed gsi_channel_scratch chan_scratch;
+};
+
+enum ipa3_sys_pipe_policy {
+	IPA_POLICY_INTR_MODE,
+	IPA_POLICY_NOINTR_MODE,
+	IPA_POLICY_INTR_POLL_MODE,
+};
+
+struct ipa3_repl_ctx {
+	struct ipa3_rx_pkt_wrapper **cache;
+	atomic_t head_idx;
+	atomic_t tail_idx;
+	u32 capacity;
+};
+
+/**
+ * struct ipa3_sys_context - IPA endpoint context for system to BAM pipes
+ * @head_desc_list: header descriptors list
+ * @len: the size of the above list
+ * @spinlock: protects the list and its size
+ * @event: used to request CALLBACK mode from SPS driver
+ * @ep: IPA EP context
+ *
+ * IPA context specific to the system-bam pipes a.k.a LAN IN/OUT and WAN
+ */
+struct ipa3_sys_context {
+	u32 len;
+	struct sps_register_event event;
+	atomic_t curr_polling_state;
+	struct delayed_work switch_to_intr_work;
+	enum ipa3_sys_pipe_policy policy;
+	int (*pyld_hdlr)(struct sk_buff *skb, struct ipa3_sys_context *sys);
+	struct sk_buff * (*get_skb)(unsigned int len, gfp_t flags);
+	void (*free_skb)(struct sk_buff *skb);
+	void (*free_rx_wrapper)(struct ipa3_rx_pkt_wrapper *rk_pkt);
+	u32 rx_buff_sz;
+	u32 rx_pool_sz;
+	struct sk_buff *prev_skb;
+	unsigned int len_rem;
+	unsigned int len_pad;
+	unsigned int len_partial;
+	bool drop_packet;
+	struct work_struct work;
+	void (*sps_callback)(struct sps_event_notify *notify);
+	enum sps_option sps_option;
+	struct delayed_work replenish_rx_work;
+	struct work_struct repl_work;
+	void (*repl_hdlr)(struct ipa3_sys_context *sys);
+	struct ipa3_repl_ctx repl;
+
+	/* ordering is important - mutable fields go above */
+	struct ipa3_ep_context *ep;
+	struct list_head head_desc_list;
+	struct list_head rcycl_list;
+	spinlock_t spinlock;
+	struct workqueue_struct *wq;
+	struct workqueue_struct *repl_wq;
+	struct ipa3_status_stats *status_stat;
+	/* ordering is important - other immutable fields go below */
+};
+
+/**
+ * enum ipa3_desc_type - IPA decriptors type
+ *
+ * IPA decriptors type, IPA supports DD and ICD but no CD
+ */
+enum ipa3_desc_type {
+	IPA_DATA_DESC,
+	IPA_DATA_DESC_SKB,
+	IPA_DATA_DESC_SKB_PAGED,
+	IPA_IMM_CMD_DESC,
+};
+
+/**
+ * struct ipa3_tx_pkt_wrapper - IPA Tx packet wrapper
+ * @type: specify if this packet is for the skb or immediate command
+ * @mem: memory buffer used by this Tx packet
+ * @work: work struct for current Tx packet
+ * @link: linked to the wrappers on that pipe
+ * @callback: IPA client provided callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @sys: corresponding IPA sys context
+ * @mult: valid only for first of a "multiple" transfer,
+ * holds info for the "sps_transfer" buffer
+ * @cnt: 1 for single transfers,
+ * >1 and <0xFFFF for first of a "multiple" transfer,
+ * 0xFFFF for last desc, 0 for rest of "multiple' transfer
+ * @bounce: va of bounce buffer
+ * @unmap_dma: in case this is true, the buffer will not be dma unmapped
+ *
+ * This struct can wrap both data packet and immediate command packet.
+ */
+struct ipa3_tx_pkt_wrapper {
+	enum ipa3_desc_type type;
+	struct ipa_mem_buffer mem;
+	struct work_struct work;
+	struct list_head link;
+	void (*callback)(void *user1, int user2);
+	void *user1;
+	int user2;
+	struct ipa3_sys_context *sys;
+	struct ipa_mem_buffer mult;
+	u32 cnt;
+	void *bounce;
+	bool no_unmap_dma;
+};
+
+/**
+ * struct ipa3_dma_xfer_wrapper - IPADMA transfer descr wrapper
+ * @phys_addr_src: physical address of the source data to copy
+ * @phys_addr_dest: physical address to store the copied data
+ * @len: len in bytes to copy
+ * @link: linked to the wrappers list on the proper(sync/async) cons pipe
+ * @xfer_done: completion object for sync_memcpy completion
+ * @callback: IPADMA client provided completion callback
+ * @user1: cookie1 for above callback
+ *
+ * This struct can wrap both sync and async memcpy transfers descriptors.
+ */
+struct ipa3_dma_xfer_wrapper {
+	u64 phys_addr_src;
+	u64 phys_addr_dest;
+	u16 len;
+	struct list_head link;
+	struct completion xfer_done;
+	void (*callback)(void *user1);
+	void *user1;
+};
+
+/**
+ * struct ipa3_desc - IPA descriptor
+ * @type: skb or immediate command or plain old data
+ * @pyld: points to skb
+ * @frag: points to paged fragment
+ * or kmalloc'ed immediate command parameters/plain old data
+ * @dma_address: dma mapped address of pyld
+ * @dma_address_valid: valid field for dma_address
+ * @len: length of the pyld
+ * @opcode: for immediate commands
+ * @callback: IPA client provided completion callback
+ * @user1: cookie1 for above callback
+ * @user2: cookie2 for above callback
+ * @xfer_done: completion object for sync completion
+ */
+struct ipa3_desc {
+	enum ipa3_desc_type type;
+	void *pyld;
+	skb_frag_t *frag;
+	dma_addr_t dma_address;
+	bool dma_address_valid;
+	u16 len;
+	u16 opcode;
+	void (*callback)(void *user1, int user2);
+	void *user1;
+	int user2;
+	struct completion xfer_done;
+};
+
+/**
+ * struct ipa3_rx_pkt_wrapper - IPA Rx packet wrapper
+ * @skb: skb
+ * @dma_address: DMA address of this Rx packet
+ * @link: linked to the Rx packets on that pipe
+ * @len: how many bytes are copied into skb's flat buffer
+ */
+struct ipa3_rx_pkt_wrapper {
+	struct list_head link;
+	struct ipa_rx_data data;
+	u32 len;
+	struct work_struct work;
+	struct ipa3_sys_context *sys;
+};
+
+/**
+ * struct ipa3_nat_mem - IPA NAT memory description
+ * @class: pointer to the struct class
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @dev_num: device number
+ * @vaddr: virtual address
+ * @dma_handle: DMA handle
+ * @size: NAT memory size
+ * @is_mapped: flag indicating if NAT memory is mapped
+ * @is_sys_mem: flag indicating if NAT memory is sys memory
+ * @is_dev_init: flag indicating if NAT device is initialized
+ * @lock: NAT memory mutex
+ * @nat_base_address: nat table virutal address
+ * @ipv4_rules_addr: base nat table address
+ * @ipv4_expansion_rules_addr: expansion table address
+ * @index_table_addr: index table address
+ * @index_table_expansion_addr: index expansion table address
+ * @size_base_tables: base table size
+ * @size_expansion_tables: expansion table size
+ * @public_ip_addr: ip address of nat table
+ */
+struct ipa3_nat_mem {
+	struct class *class;
+	struct device *dev;
+	struct cdev cdev;
+	dev_t dev_num;
+	void *vaddr;
+	dma_addr_t dma_handle;
+	size_t size;
+	bool is_mapped;
+	bool is_sys_mem;
+	bool is_dev_init;
+	bool is_dev;
+	struct mutex lock;
+	void *nat_base_address;
+	char *ipv4_rules_addr;
+	char *ipv4_expansion_rules_addr;
+	char *index_table_addr;
+	char *index_table_expansion_addr;
+	u32 size_base_tables;
+	u32 size_expansion_tables;
+	u32 public_ip_addr;
+	void *tmp_vaddr;
+	dma_addr_t tmp_dma_handle;
+	bool is_tmp_mem;
+};
+
+/**
+ * enum ipa3_hw_mode - IPA hardware mode
+ * @IPA_HW_Normal: Regular IPA hardware
+ * @IPA_HW_Virtual: IPA hardware supporting virtual memory allocation
+ * @IPA_HW_PCIE: IPA hardware supporting memory allocation over PCIE Bridge
+ */
+enum ipa3_hw_mode {
+	IPA_HW_MODE_NORMAL  = 0,
+	IPA_HW_MODE_VIRTUAL = 1,
+	IPA_HW_MODE_PCIE    = 2
+};
+
+enum ipa3_config_this_ep {
+	IPA_CONFIGURE_THIS_EP,
+	IPA_DO_NOT_CONFIGURE_THIS_EP,
+};
+
+struct ipa3_stats {
+	u32 tx_sw_pkts;
+	u32 tx_hw_pkts;
+	u32 rx_pkts;
+	u32 rx_excp_pkts[IPAHAL_PKT_STATUS_EXCEPTION_MAX];
+	u32 rx_repl_repost;
+	u32 tx_pkts_compl;
+	u32 rx_q_len;
+	u32 msg_w[IPA_EVENT_MAX_NUM];
+	u32 msg_r[IPA_EVENT_MAX_NUM];
+	u32 stat_compl;
+	u32 aggr_close;
+	u32 wan_aggr_close;
+	u32 wan_rx_empty;
+	u32 wan_repl_rx_empty;
+	u32 lan_rx_empty;
+	u32 lan_repl_rx_empty;
+	u32 flow_enable;
+	u32 flow_disable;
+	u32 tx_non_linear;
+};
+
+struct ipa3_active_clients {
+	struct mutex mutex;
+	spinlock_t spinlock;
+	bool mutex_locked;
+	int cnt;
+};
+
+struct ipa3_wakelock_ref_cnt {
+	spinlock_t spinlock;
+	int cnt;
+};
+
+struct ipa3_tag_completion {
+	struct completion comp;
+	atomic_t cnt;
+};
+
+struct ipa3_controller;
+
+/**
+ * struct ipa3_uc_hdlrs - IPA uC callback functions
+ * @ipa_uc_loaded_hdlr: Function handler when uC is loaded
+ * @ipa_uc_event_hdlr: Event handler function
+ * @ipa3_uc_response_hdlr: Response handler function
+ * @ipa_uc_event_log_info_hdlr: Log event handler function
+ */
+struct ipa3_uc_hdlrs {
+	void (*ipa_uc_loaded_hdlr)(void);
+
+	void (*ipa_uc_event_hdlr)
+		(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio);
+
+	int (*ipa3_uc_response_hdlr)
+		(struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio,
+		u32 *uc_status);
+
+	void (*ipa_uc_event_log_info_hdlr)
+		(struct IpaHwEventLogInfoData_t *uc_event_top_mmio);
+};
+
+/**
+ * enum ipa3_hw_flags - flags which defines the behavior of HW
+ *
+ * @IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE: Halt system in case of assert
+ *	failure.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR: Channel error would be reported
+ *	in the event ring only. No event to CPU.
+ * @IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP: No need to report event
+ *	IPA_HW_2_CPU_EVENT_MHI_WAKE_UP_REQUEST
+ * @IPA_HW_FLAG_WORK_OVER_DDR: Perform all transaction to external addresses by
+ *	QMB (avoid memcpy)
+ * @IPA_HW_FLAG_NO_REPORT_OOB: If set do not report that the device is OOB in
+ *	IN Channel
+ * @IPA_HW_FLAG_NO_REPORT_DB_MODE: If set, do not report that the device is
+ *	entering a mode where it expects a doorbell to be rung for OUT Channel
+ * @IPA_HW_FLAG_NO_START_OOB_TIMER
+ */
+enum ipa3_hw_flags {
+	IPA_HW_FLAG_HALT_SYSTEM_ON_ASSERT_FAILURE	= 0x01,
+	IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR		= 0x02,
+	IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP	= 0x04,
+	IPA_HW_FLAG_WORK_OVER_DDR			= 0x08,
+	IPA_HW_FLAG_NO_REPORT_OOB			= 0x10,
+	IPA_HW_FLAG_NO_REPORT_DB_MODE			= 0x20,
+	IPA_HW_FLAG_NO_START_OOB_TIMER			= 0x40
+};
+
+/**
+ * struct ipa3_uc_ctx - IPA uC context
+ * @uc_inited: Indicates if uC interface has been initialized
+ * @uc_loaded: Indicates if uC has loaded
+ * @uc_failed: Indicates if uC has failed / returned an error
+ * @uc_lock: uC interface lock to allow only one uC interaction at a time
+ * @uc_spinlock: same as uc_lock but for irq contexts
+ * @uc_completation: Completion mechanism to wait for uC commands
+ * @uc_sram_mmio: Pointer to uC mapped memory
+ * @pending_cmd: The last command sent waiting to be ACKed
+ * @uc_status: The last status provided by the uC
+ * @uc_error_type: error type from uC error event
+ * @uc_error_timestamp: tag timer sampled after uC crashed
+ */
+struct ipa3_uc_ctx {
+	bool uc_inited;
+	bool uc_loaded;
+	bool uc_failed;
+	struct mutex uc_lock;
+	spinlock_t uc_spinlock;
+	struct completion uc_completion;
+	struct IpaHwSharedMemCommonMapping_t *uc_sram_mmio;
+	struct IpaHwEventLogInfoData_t *uc_event_top_mmio;
+	u32 uc_event_top_ofst;
+	u32 pending_cmd;
+	u32 uc_status;
+	u32 uc_error_type;
+	u32 uc_error_timestamp;
+	phys_addr_t rdy_ring_base_pa;
+	phys_addr_t rdy_ring_rp_pa;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_comp_ring_base_pa;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
+};
+
+/**
+ * struct ipa3_uc_wdi_ctx
+ * @wdi_uc_top_ofst:
+ * @wdi_uc_top_mmio:
+ * @wdi_uc_stats_ofst:
+ * @wdi_uc_stats_mmio:
+ */
+struct ipa3_uc_wdi_ctx {
+	/* WDI specific fields */
+	u32 wdi_uc_stats_ofst;
+	struct IpaHwStatsWDIInfoData_t *wdi_uc_stats_mmio;
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * struct ipa3_transport_pm - transport power management related members
+ * @lock: lock for ensuring atomic operations
+ * @res_granted: true if SPS requested IPA resource and IPA granted it
+ * @res_rel_in_prog: true if releasing IPA resource is in progress
+ */
+struct ipa3_transport_pm {
+	spinlock_t lock;
+	bool res_granted;
+	bool res_rel_in_prog;
+	atomic_t dec_clients;
+	atomic_t eot_activity;
+};
+
+/**
+ * struct ipa3cm_client_info - the client-info indicated from IPACM
+ * @ipacm_client_enum: the enum to indicate tether-client
+ * @ipacm_client_uplink: the bool to indicate pipe for uplink
+ */
+struct ipa3cm_client_info {
+	enum ipacm_client_enum client_enum;
+	bool uplink;
+};
+
+struct ipa3_smp2p_info {
+	u32 out_base_id;
+	u32 in_base_id;
+	bool res_sent;
+};
+
+/**
+ * struct ipa3_ready_cb_info - A list of all the registrations
+ *  for an indication of IPA driver readiness
+ *
+ * @link: linked list link
+ * @ready_cb: callback
+ * @user_data: User data
+ *
+ */
+struct ipa3_ready_cb_info {
+	struct list_head link;
+	ipa_ready_cb ready_cb;
+	void *user_data;
+};
+
+struct ipa_tz_unlock_reg_info {
+	u64 reg_addr;
+	u32 size;
+};
+
+/**
+ * struct ipa3_context - IPA context
+ * @class: pointer to the struct class
+ * @dev_num: device number
+ * @dev: the dev_t of the device
+ * @cdev: cdev of the device
+ * @bam_handle: IPA driver's BAM handle
+ * @ep: list of all end points
+ * @skip_ep_cfg_shadow: state to update filter table correctly across
+  power-save
+ * @ep_flt_bitmap: End-points supporting filtering bitmap
+ * @ep_flt_num: End-points supporting filtering number
+ * @resume_on_connect: resume ep on ipa3_connect
+ * @flt_tbl: list of all IPA filter tables
+ * @mode: IPA operating mode
+ * @mmio: iomem
+ * @ipa_wrapper_base: IPA wrapper base address
+ * @hdr_tbl: IPA header table
+ * @hdr_proc_ctx_tbl: IPA processing context table
+ * @rt_tbl_set: list of routing tables each of which is a list of rules
+ * @reap_rt_tbl_set: list of sys mem routing tables waiting to be reaped
+ * @flt_rule_cache: filter rule cache
+ * @rt_rule_cache: routing rule cache
+ * @hdr_cache: header cache
+ * @hdr_offset_cache: header offset cache
+ * @hdr_proc_ctx_cache: processing context cache
+ * @hdr_proc_ctx_offset_cache: processing context offset cache
+ * @rt_tbl_cache: routing table cache
+ * @tx_pkt_wrapper_cache: Tx packets cache
+ * @rx_pkt_wrapper_cache: Rx packets cache
+ * @rt_idx_bitmap: routing table index bitmap
+ * @lock: this does NOT protect the linked lists within ipa3_sys_context
+ * @smem_sz: shared memory size available for SW use starting
+ *  from non-restricted bytes
+ * @smem_restricted_bytes: the bytes that SW should not use in the shared mem
+ * @nat_mem: NAT memory
+ * @excp_hdr_hdl: exception header handle
+ * @dflt_v4_rt_rule_hdl: default v4 routing rule handle
+ * @dflt_v6_rt_rule_hdl: default v6 routing rule handle
+ * @aggregation_type: aggregation type used on USB client endpoint
+ * @aggregation_byte_limit: aggregation byte limit used on USB client endpoint
+ * @aggregation_time_limit: aggregation time limit used on USB client endpoint
+ * @hdr_tbl_lcl: where hdr tbl resides 1-local, 0-system
+ * @hdr_proc_ctx_tbl_lcl: where proc_ctx tbl resides true-local, false-system
+ * @hdr_mem: header memory
+ * @hdr_proc_ctx_mem: processing context memory
+ * @ip4_rt_tbl_lcl: where ip4 rt tables reside 1-local; 0-system
+ * @ip6_rt_tbl_lcl: where ip6 rt tables reside 1-local; 0-system
+ * @ip4_flt_tbl_lcl: where ip4 flt tables reside 1-local; 0-system
+ * @ip6_flt_tbl_lcl: where ip6 flt tables reside 1-local; 0-system
+ * @power_mgmt_wq: workqueue for power management
+ * @transport_power_mgmt_wq: workqueue transport related power management
+ * @tag_process_before_gating: indicates whether to start tag process before
+ *  gating IPA clocks
+ * @transport_pm: transport power management related information
+ * @disconnect_lock: protects LAN_CONS packet receive notification CB
+ * @pipe_mem_pool: pipe memory pool
+ * @dma_pool: special purpose DMA pool
+ * @ipa3_active_clients: structure for reference counting connected IPA clients
+ * @ipa_hw_type: type of IPA HW type (e.g. IPA 1.0, IPA 1.1 etc')
+ * @ipa3_hw_mode: mode of IPA HW mode (e.g. Normal, Virtual or over PCIe)
+ * @use_ipa_teth_bridge: use tethering bridge driver
+ * @ipa_bam_remote_mode: ipa bam is in remote mode
+ * @modem_cfg_emb_pipe_flt: modem configure embedded pipe filtering rules
+ * @logbuf: ipc log buffer for high priority messages
+ * @logbuf_low: ipc log buffer for low priority messages
+ * @ipa_wdi2: using wdi-2.0
+ * @use_64_bit_dma_mask: using 64bits dma mask
+ * @ipa_bus_hdl: msm driver handle for the data path bus
+ * @ctrl: holds the core specific operations based on
+ *  core version (vtable like)
+ * @enable_clock_scaling: clock scaling is enabled ?
+ * @curr_ipa_clk_rate: ipa3_clk current rate
+ * @wcstats: wlan common buffer stats
+ * @uc_ctx: uC interface context
+ * @uc_wdi_ctx: WDI specific fields for uC interface
+ * @ipa_num_pipes: The number of pipes used by IPA HW
+ * @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided
+ * @ipa_client_apps_wan_cons_agg_gro: RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA
+ * @apply_rg10_wa: Indicates whether to use register group 10 workaround
+ * @gsi_ch20_wa: Indicates whether to apply GSI physical channel 20 workaround
+ * @w_lock: Indicates the wakeup source.
+ * @wakelock_ref_cnt: Indicates the number of times wakelock is acquired
+ * @ipa_initialization_complete: Indicates that IPA is fully initialized
+ * @ipa_ready_cb_list: A list of all the clients who require a CB when IPA
+ *  driver is ready/initialized.
+ * @init_completion_obj: Completion object to be used in case IPA driver hasn't
+ *  finished initializing. Example of use - IOCTLs to /dev/ipa
+ * IPA context - holds all relevant info about IPA driver and its state
+ */
+struct ipa3_context {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+	unsigned long bam_handle;
+	struct ipa3_ep_context ep[IPA3_MAX_NUM_PIPES];
+	bool skip_ep_cfg_shadow[IPA3_MAX_NUM_PIPES];
+	u32 ep_flt_bitmap;
+	u32 ep_flt_num;
+	bool resume_on_connect[IPA_CLIENT_MAX];
+	struct ipa3_flt_tbl flt_tbl[IPA3_MAX_NUM_PIPES][IPA_IP_MAX];
+	void __iomem *mmio;
+	u32 ipa_wrapper_base;
+	u32 ipa_wrapper_size;
+	struct ipa3_hdr_tbl hdr_tbl;
+	struct ipa3_hdr_proc_ctx_tbl hdr_proc_ctx_tbl;
+	struct ipa3_rt_tbl_set rt_tbl_set[IPA_IP_MAX];
+	struct ipa3_rt_tbl_set reap_rt_tbl_set[IPA_IP_MAX];
+	struct kmem_cache *flt_rule_cache;
+	struct kmem_cache *rt_rule_cache;
+	struct kmem_cache *hdr_cache;
+	struct kmem_cache *hdr_offset_cache;
+	struct kmem_cache *hdr_proc_ctx_cache;
+	struct kmem_cache *hdr_proc_ctx_offset_cache;
+	struct kmem_cache *rt_tbl_cache;
+	struct kmem_cache *tx_pkt_wrapper_cache;
+	struct kmem_cache *rx_pkt_wrapper_cache;
+	unsigned long rt_idx_bitmap[IPA_IP_MAX];
+	struct mutex lock;
+	u16 smem_sz;
+	u16 smem_restricted_bytes;
+	u16 smem_reqd_sz;
+	struct ipa3_nat_mem nat_mem;
+	u32 excp_hdr_hdl;
+	u32 dflt_v4_rt_rule_hdl;
+	u32 dflt_v6_rt_rule_hdl;
+	uint aggregation_type;
+	uint aggregation_byte_limit;
+	uint aggregation_time_limit;
+	bool hdr_tbl_lcl;
+	bool hdr_proc_ctx_tbl_lcl;
+	struct ipa_mem_buffer hdr_mem;
+	struct ipa_mem_buffer hdr_proc_ctx_mem;
+	bool ip4_rt_tbl_hash_lcl;
+	bool ip4_rt_tbl_nhash_lcl;
+	bool ip6_rt_tbl_hash_lcl;
+	bool ip6_rt_tbl_nhash_lcl;
+	bool ip4_flt_tbl_hash_lcl;
+	bool ip4_flt_tbl_nhash_lcl;
+	bool ip6_flt_tbl_hash_lcl;
+	bool ip6_flt_tbl_nhash_lcl;
+	struct gen_pool *pipe_mem_pool;
+	struct dma_pool *dma_pool;
+	struct ipa3_active_clients ipa3_active_clients;
+	struct ipa3_active_clients_log_ctx ipa3_active_clients_logging;
+	struct workqueue_struct *power_mgmt_wq;
+	struct workqueue_struct *transport_power_mgmt_wq;
+	bool tag_process_before_gating;
+	struct ipa3_transport_pm transport_pm;
+	u32 clnt_hdl_cmd;
+	u32 clnt_hdl_data_in;
+	u32 clnt_hdl_data_out;
+	spinlock_t disconnect_lock;
+	u8 a5_pipe_index;
+	struct list_head intf_list;
+	struct list_head msg_list;
+	struct list_head pull_msg_list;
+	struct mutex msg_lock;
+	wait_queue_head_t msg_waitq;
+	enum ipa_hw_type ipa_hw_type;
+	enum ipa3_hw_mode ipa3_hw_mode;
+	bool use_ipa_teth_bridge;
+	bool ipa_bam_remote_mode;
+	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
+	bool use_64_bit_dma_mask;
+	/* featurize if memory footprint becomes a concern */
+	struct ipa3_stats stats;
+	void *smem_pipe_mem;
+	void *logbuf;
+	void *logbuf_low;
+	u32 ipa_bus_hdl;
+	struct ipa3_controller *ctrl;
+	struct idr ipa_idr;
+	struct device *pdev;
+	struct device *uc_pdev;
+	spinlock_t idr_lock;
+	u32 enable_clock_scaling;
+	u32 curr_ipa_clk_rate;
+	bool q6_proxy_clk_vote_valid;
+	u32 ipa_num_pipes;
+
+	struct ipa3_wlan_comm_memb wc_memb;
+
+	struct ipa3_uc_ctx uc_ctx;
+
+	struct ipa3_uc_wdi_ctx uc_wdi_ctx;
+	struct ipa3_uc_ntn_ctx uc_ntn_ctx;
+	u32 wan_rx_ring_size;
+	u32 lan_rx_ring_size;
+	bool skip_uc_pipe_reset;
+	enum ipa_transport_type transport_prototype;
+	unsigned long gsi_dev_hdl;
+	u32 ee;
+	bool apply_rg10_wa;
+	bool gsi_ch20_wa;
+	bool smmu_present;
+	bool smmu_s1_bypass;
+	unsigned long peer_bam_iova;
+	phys_addr_t peer_bam_pa;
+	u32 peer_bam_map_size;
+	unsigned long peer_bam_dev;
+	u32 peer_bam_map_cnt;
+	u32 wdi_map_cnt;
+	struct wakeup_source w_lock;
+	struct ipa3_wakelock_ref_cnt wakelock_ref_cnt;
+	/* RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA */
+	bool ipa_client_apps_wan_cons_agg_gro;
+	/* M-release support to know client pipes */
+	struct ipa3cm_client_info ipacm_client[IPA3_MAX_NUM_PIPES];
+	bool tethered_flow_control;
+	bool ipa_initialization_complete;
+	struct list_head ipa_ready_cb_list;
+	struct completion init_completion_obj;
+	struct completion uc_loaded_completion_obj;
+	struct ipa3_smp2p_info smp2p_info;
+	u32 ipa_tz_unlock_reg_num;
+	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
+};
+
+/**
+ * enum ipa3_pipe_mem_type - IPA pipe memory type
+ * @IPA_SPS_PIPE_MEM: Default, SPS dedicated pipe memory
+ * @IPA_PRIVATE_MEM: IPA's private memory
+ * @IPA_SYSTEM_MEM: System RAM, requires allocation
+ */
+enum ipa3_pipe_mem_type {
+	IPA_SPS_PIPE_MEM = 0,
+	IPA_PRIVATE_MEM  = 1,
+	IPA_SYSTEM_MEM   = 2,
+};
+
+struct ipa3_plat_drv_res {
+	bool use_ipa_teth_bridge;
+	u32 ipa_mem_base;
+	u32 ipa_mem_size;
+	u32 transport_mem_base;
+	u32 transport_mem_size;
+	u32 ipa_irq;
+	u32 transport_irq;
+	u32 ipa_pipe_mem_start_ofst;
+	u32 ipa_pipe_mem_size;
+	enum ipa_hw_type ipa_hw_type;
+	enum ipa3_hw_mode ipa3_hw_mode;
+	u32 ee;
+	bool ipa_bam_remote_mode;
+	bool modem_cfg_emb_pipe_flt;
+	bool ipa_wdi2;
+	bool use_64_bit_dma_mask;
+	u32 wan_rx_ring_size;
+	u32 lan_rx_ring_size;
+	bool skip_uc_pipe_reset;
+	enum ipa_transport_type transport_prototype;
+	bool apply_rg10_wa;
+	bool gsi_ch20_wa;
+	bool tethered_flow_control;
+	u32 ipa_tz_unlock_reg_num;
+	struct ipa_tz_unlock_reg_info *ipa_tz_unlock_reg;
+};
+
+/**
+ * struct ipa3_mem_partition - represents IPA RAM Map as read from DTS
+ * Order and type of members should not be changed without a suitable change
+ * to DTS file or the code that reads it.
+ *
+ * IPA v3.0 SRAM memory layout:
+ * +-------------------------+
+ * |    UC INFO              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 FLT HDR HASHABLE     |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 FLT HDR NON-HASHABLE |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 FLT HDR HASHABLE     |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 FLT HDR NON-HASHABLE |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 RT HDR HASHABLE      |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V4 RT HDR NON-HASHABLE  |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 RT HDR HASHABLE      |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | V6 RT HDR NON-HASHABLE  |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |  MODEM HDR              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * | MODEM PROC CTX          |
+ * +-------------------------+
+ * | APPS PROC CTX           |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ * |  MODEM MEM              |
+ * +-------------------------+
+ * |    CANARY               |
+ * +-------------------------+
+ */
+struct ipa3_mem_partition {
+	u32 ofst_start;
+	u32 nat_ofst;
+	u32 nat_size;
+	u32 v4_flt_hash_ofst;
+	u32 v4_flt_hash_size;
+	u32 v4_flt_hash_size_ddr;
+	u32 v4_flt_nhash_ofst;
+	u32 v4_flt_nhash_size;
+	u32 v4_flt_nhash_size_ddr;
+	u32 v6_flt_hash_ofst;
+	u32 v6_flt_hash_size;
+	u32 v6_flt_hash_size_ddr;
+	u32 v6_flt_nhash_ofst;
+	u32 v6_flt_nhash_size;
+	u32 v6_flt_nhash_size_ddr;
+	u32 v4_rt_num_index;
+	u32 v4_modem_rt_index_lo;
+	u32 v4_modem_rt_index_hi;
+	u32 v4_apps_rt_index_lo;
+	u32 v4_apps_rt_index_hi;
+	u32 v4_rt_hash_ofst;
+	u32 v4_rt_hash_size;
+	u32 v4_rt_hash_size_ddr;
+	u32 v4_rt_nhash_ofst;
+	u32 v4_rt_nhash_size;
+	u32 v4_rt_nhash_size_ddr;
+	u32 v6_rt_num_index;
+	u32 v6_modem_rt_index_lo;
+	u32 v6_modem_rt_index_hi;
+	u32 v6_apps_rt_index_lo;
+	u32 v6_apps_rt_index_hi;
+	u32 v6_rt_hash_ofst;
+	u32 v6_rt_hash_size;
+	u32 v6_rt_hash_size_ddr;
+	u32 v6_rt_nhash_ofst;
+	u32 v6_rt_nhash_size;
+	u32 v6_rt_nhash_size_ddr;
+	u32 modem_hdr_ofst;
+	u32 modem_hdr_size;
+	u32 apps_hdr_ofst;
+	u32 apps_hdr_size;
+	u32 apps_hdr_size_ddr;
+	u32 modem_hdr_proc_ctx_ofst;
+	u32 modem_hdr_proc_ctx_size;
+	u32 apps_hdr_proc_ctx_ofst;
+	u32 apps_hdr_proc_ctx_size;
+	u32 apps_hdr_proc_ctx_size_ddr;
+	u32 modem_comp_decomp_ofst;
+	u32 modem_comp_decomp_size;
+	u32 modem_ofst;
+	u32 modem_size;
+	u32 apps_v4_flt_hash_ofst;
+	u32 apps_v4_flt_hash_size;
+	u32 apps_v4_flt_nhash_ofst;
+	u32 apps_v4_flt_nhash_size;
+	u32 apps_v6_flt_hash_ofst;
+	u32 apps_v6_flt_hash_size;
+	u32 apps_v6_flt_nhash_ofst;
+	u32 apps_v6_flt_nhash_size;
+	u32 uc_info_ofst;
+	u32 uc_info_size;
+	u32 end_ofst;
+	u32 apps_v4_rt_hash_ofst;
+	u32 apps_v4_rt_hash_size;
+	u32 apps_v4_rt_nhash_ofst;
+	u32 apps_v4_rt_nhash_size;
+	u32 apps_v6_rt_hash_ofst;
+	u32 apps_v6_rt_hash_size;
+	u32 apps_v6_rt_nhash_ofst;
+	u32 apps_v6_rt_nhash_size;
+};
+
+struct ipa3_controller {
+	struct ipa3_mem_partition mem_partition;
+	u32 ipa_clk_rate_turbo;
+	u32 ipa_clk_rate_nominal;
+	u32 ipa_clk_rate_svs;
+	u32 clock_scaling_bw_threshold_turbo;
+	u32 clock_scaling_bw_threshold_nominal;
+	u32 ipa_reg_base_ofst;
+	u32 max_holb_tmr_val;
+	void (*ipa_sram_read_settings)(void);
+	int (*ipa_init_sram)(void);
+	int (*ipa_init_hdr)(void);
+	int (*ipa_init_rt4)(void);
+	int (*ipa_init_rt6)(void);
+	int (*ipa_init_flt4)(void);
+	int (*ipa_init_flt6)(void);
+	int (*ipa3_read_ep_reg)(char *buff, int max_len, int pipe);
+	int (*ipa3_commit_flt)(enum ipa_ip_type ip);
+	int (*ipa3_commit_rt)(enum ipa_ip_type ip);
+	int (*ipa3_commit_hdr)(void);
+	void (*ipa3_enable_clks)(void);
+	void (*ipa3_disable_clks)(void);
+	struct msm_bus_scale_pdata *msm_bus_data_ptr;
+};
+
+extern struct ipa3_context *ipa3_ctx;
+
+/* public APIs */
+/*
+ * Connect / Disconnect
+ */
+int ipa3_connect(const struct ipa_connect_params *in,
+		struct ipa_sps_params *sps,
+		u32 *clnt_hdl);
+int ipa3_disconnect(u32 clnt_hdl);
+
+/* Generic GSI channels functions */
+int ipa3_request_gsi_channel(struct ipa_request_gsi_channel_params *params,
+			     struct ipa_req_chan_out_params *out_params);
+
+int ipa3_release_gsi_channel(u32 clnt_hdl);
+
+int ipa3_start_gsi_channel(u32 clnt_hdl);
+
+int ipa3_stop_gsi_channel(u32 clnt_hdl);
+
+int ipa3_reset_gsi_channel(u32 clnt_hdl);
+
+int ipa3_reset_gsi_event_ring(u32 clnt_hdl);
+
+/* Specific xDCI channels functions */
+int ipa3_set_usb_max_packet_size(
+	enum ipa_usb_max_usb_packet_size usb_max_packet_size);
+
+int ipa3_xdci_connect(u32 clnt_hdl, u8 xferrscidx, bool xferrscidx_valid);
+
+int ipa3_xdci_disconnect(u32 clnt_hdl, bool should_force_clear, u32 qmi_req_id);
+
+int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+	bool should_force_clear, u32 qmi_req_id, bool is_dpl);
+
+int ipa3_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl, bool is_dpl);
+
+/*
+ * Resume / Suspend
+ */
+int ipa3_reset_endpoint(u32 clnt_hdl);
+
+/*
+ * Remove ep delay
+ */
+int ipa3_clear_endpoint_delay(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
+			const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
+		      const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
+
+int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Header removal / addition
+ */
+int ipa3_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa3_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa3_commit_hdr(void);
+
+int ipa3_reset_hdr(void);
+
+int ipa3_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa3_put_hdr(u32 hdr_hdl);
+
+int ipa3_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Header Processing Context
+ */
+int ipa3_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+int ipa3_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+/*
+ * Routing
+ */
+int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
+
+int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa3_commit_rt(enum ipa_ip_type ip);
+
+int ipa3_reset_rt(enum ipa_ip_type ip);
+
+int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa3_put_rt_tbl(u32 rt_tbl_hdl);
+
+int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
+
+int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
+
+/*
+ * Filtering
+ */
+int ipa3_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules);
+
+int ipa3_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa3_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
+
+int ipa3_commit_flt(enum ipa_ip_type ip);
+
+int ipa3_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Messaging
+ */
+int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback);
+int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
+int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta);
+
+/*
+ * Interface
+ */
+int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx);
+int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx,
+		       const struct ipa_ext_intf *ext);
+int ipa3_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+int ipa3_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa3_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa3_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * Data path
+ */
+int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+/*
+ * To transfer multiple data packets
+ * While passing the data descriptor list, the anchor node
+ * should be of type struct ipa_tx_data_desc not list_head
+*/
+int ipa3_tx_dp_mul(enum ipa_client_type dst,
+			struct ipa_tx_data_desc *data_desc);
+
+void ipa3_free_skb(struct ipa_rx_data *);
+
+/*
+ * System pipes
+ */
+int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa3_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa3_sys_setup(struct ipa_sys_connect_params *sys_in,
+	unsigned long *ipa_bam_hdl,
+	u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
+
+int ipa3_sys_teardown(u32 clnt_hdl);
+
+int ipa3_sys_update_gsi_hdls(u32 clnt_hdl, unsigned long gsi_ch_hdl,
+	unsigned long gsi_ev_hdl);
+
+int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out);
+int ipa3_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa3_enable_wdi_pipe(u32 clnt_hdl);
+int ipa3_disable_wdi_pipe(u32 clnt_hdl);
+int ipa3_resume_wdi_pipe(u32 clnt_hdl);
+int ipa3_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+u16 ipa3_get_smem_restr_bytes(void);
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+		ipa_notify_cb notify, void *priv, u8 hdr_len,
+		struct ipa_ntn_conn_out_params *outp);
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl);
+
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa3_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa3_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa3_uc_dereg_rdyCB(void);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int ipa3_teth_bridge_init(struct teth_bridge_init_params *params);
+
+int ipa3_teth_bridge_disconnect(enum ipa_client_type client);
+
+int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
+/*
+ * Tethering client info
+ */
+void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink);
+
+enum ipacm_client_enum ipa3_get_client(int pipe_idx);
+
+bool ipa3_get_client_uplink(int pipe_idx);
+
+/*
+ * IPADMA
+ */
+int ipa3_dma_init(void);
+
+int ipa3_dma_enable(void);
+
+int ipa3_dma_disable(void);
+
+int ipa3_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+int ipa3_dma_async_memcpy(u64 dest, u64 src, int len,
+			void (*user_cb)(void *user1), void *user_param);
+
+int ipa3_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+
+void ipa3_dma_destroy(void);
+
+/*
+ * MHI
+ */
+
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params);
+
+int ipa3_connect_mhi_pipe(
+		struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl);
+
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl);
+
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client);
+
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client);
+
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client);
+
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client);
+
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index);
+
+int ipa3_mhi_destroy_channel(enum ipa_client_type client);
+
+/*
+ * mux id
+ */
+int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
+
+/*
+ * interrupts
+ */
+int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data);
+
+int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt);
+
+/*
+ * Miscellaneous
+ */
+void ipa3_bam_reg_dump(void);
+
+int ipa3_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa3_is_ready(void);
+
+void ipa3_proxy_clk_vote(void);
+void ipa3_proxy_clk_unvote(void);
+
+bool ipa3_is_client_handle_valid(u32 clnt_hdl);
+
+enum ipa_client_type ipa3_get_client_mapping(int pipe_idx);
+
+void ipa_init_ep_flt_bitmap(void);
+
+bool ipa_is_ep_support_flt(int pipe_idx);
+
+enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx);
+
+bool ipa3_get_modem_cfg_emb_pipe_flt(void);
+
+u8 ipa3_get_qmb_master_sel(enum ipa_client_type client);
+
+/* internal functions */
+
+int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+	struct ipa_api_controller *api_ctrl);
+
+bool ipa_is_modem_pipe(int pipe_idx);
+
+int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
+		bool in_atomic);
+int ipa3_send(struct ipa3_sys_context *sys,
+		u32 num_desc,
+		struct ipa3_desc *desc,
+		bool in_atomic);
+int ipa3_get_ep_mapping(enum ipa_client_type client);
+int ipa_get_ep_group(enum ipa_client_type client);
+
+int ipa3_generate_hw_rule(enum ipa_ip_type ip,
+			 const struct ipa_rule_attrib *attrib,
+			 u8 **buf,
+			 u16 *en_rule);
+int ipa3_init_hw(void);
+struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name);
+int ipa3_set_single_ndp_per_mbim(bool);
+void ipa3_debugfs_init(void);
+void ipa3_debugfs_remove(void);
+
+void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
+#ifdef IPA_DEBUG
+#define IPA_DUMP_BUFF(base, phy_base, size) \
+	ipa3_dump_buff_internal(base, phy_base, size)
+#else
+#define IPA_DUMP_BUFF(base, phy_base, size)
+#endif
+int ipa3_init_mem_partition(struct device_node *dev_node);
+int ipa3_controller_static_bind(struct ipa3_controller *controller,
+		enum ipa_hw_type ipa_hw_type);
+int ipa3_cfg_route(struct ipahal_reg_route *route);
+int ipa3_send_cmd_timeout(u16 num_desc, struct ipa3_desc *descr, u32 timeout);
+int ipa3_send_cmd(u16 num_desc, struct ipa3_desc *descr);
+int ipa3_cfg_filter(u32 disable);
+int ipa3_pipe_mem_init(u32 start_ofst, u32 size);
+int ipa3_pipe_mem_alloc(u32 *ofst, u32 size);
+int ipa3_pipe_mem_free(u32 ofst, u32 size);
+int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary);
+struct ipa3_context *ipa3_get_ctx(void);
+void ipa3_enable_clks(void);
+void ipa3_disable_clks(void);
+void ipa3_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
+int ipa3_inc_client_enable_clks_no_block(struct ipa_active_client_logging_info
+		*id);
+void ipa3_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
+void ipa3_active_clients_log_dec(struct ipa_active_client_logging_info *id,
+		bool int_ctx);
+void ipa3_active_clients_log_inc(struct ipa_active_client_logging_info *id,
+		bool int_ctx);
+int ipa3_active_clients_log_print_buffer(char *buf, int size);
+int ipa3_active_clients_log_print_table(char *buf, int size);
+void ipa3_active_clients_log_clear(void);
+int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev);
+int __ipa3_del_rt_rule(u32 rule_hdl);
+int __ipa3_del_hdr(u32 hdr_hdl);
+int __ipa3_release_hdr(u32 hdr_hdl);
+int __ipa3_release_hdr_proc_ctx(u32 proc_ctx_hdl);
+int _ipa_read_ep_reg_v3_0(char *buf, int max_len, int pipe);
+void _ipa_enable_clks_v3_0(void);
+void _ipa_disable_clks_v3_0(void);
+struct device *ipa3_get_dma_dev(void);
+void ipa3_suspend_active_aggr_wa(u32 clnt_hdl);
+void ipa3_suspend_handler(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data);
+
+
+int ipa_bridge_init(void);
+void ipa_bridge_cleanup(void);
+
+ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
+		 loff_t *f_pos);
+int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count);
+int ipa3_query_intf(struct ipa_ioc_query_intf *lookup);
+int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx);
+int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx);
+int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext);
+
+void wwan_cleanup(void);
+
+int ipa3_teth_bridge_driver_init(void);
+void ipa3_lan_rx_cb(void *priv, enum ipa_dp_evt_type evt, unsigned long data);
+
+int _ipa_init_sram_v3_0(void);
+int _ipa_init_hdr_v3_0(void);
+int _ipa_init_rt4_v3(void);
+int _ipa_init_rt6_v3(void);
+int _ipa_init_flt4_v3(void);
+int _ipa_init_flt6_v3(void);
+
+int __ipa_commit_flt_v3(enum ipa_ip_type ip);
+int __ipa_commit_rt_v3(enum ipa_ip_type ip);
+
+int __ipa_commit_hdr_v3_0(void);
+void ipa3_skb_recycle(struct sk_buff *skb);
+void ipa3_install_dflt_flt_rules(u32 ipa_ep_idx);
+void ipa3_delete_dflt_flt_rules(u32 ipa_ep_idx);
+
+int ipa3_enable_data_path(u32 clnt_hdl);
+int ipa3_disable_data_path(u32 clnt_hdl);
+int ipa3_alloc_rule_id(struct idr *rule_ids);
+int ipa3_id_alloc(void *ptr);
+void *ipa3_id_find(u32 id);
+void ipa3_id_remove(u32 id);
+
+int ipa3_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
+				  u32 bandwidth_mbps);
+
+int ipa3_cfg_ep_status(u32 clnt_hdl,
+		const struct ipahal_reg_ep_cfg_status *ipa_ep_cfg);
+
+int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name name);
+int ipa3_suspend_resource_sync(enum ipa_rm_resource_name name);
+int ipa3_resume_resource(enum ipa_rm_resource_name name);
+bool ipa3_should_pipe_be_suspended(enum ipa_client_type client);
+int ipa3_tag_aggr_force_close(int pipe_num);
+
+void ipa3_active_clients_lock(void);
+int ipa3_active_clients_trylock(unsigned long *flags);
+void ipa3_active_clients_unlock(void);
+void ipa3_active_clients_trylock_unlock(unsigned long *flags);
+int ipa3_wdi_init(void);
+int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id);
+int ipa3_tag_process(struct ipa3_desc *desc, int num_descs,
+		    unsigned long timeout);
+
+void ipa3_q6_pre_shutdown_cleanup(void);
+void ipa3_q6_post_shutdown_cleanup(void);
+int ipa3_init_q6_smem(void);
+
+int ipa3_sps_connect_safe(struct sps_pipe *h, struct sps_connect *connect,
+			 enum ipa_client_type ipa_client);
+
+int ipa3_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info);
+
+int ipa3_uc_interface_init(void);
+int ipa3_uc_reset_pipe(enum ipa_client_type ipa_client);
+int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client);
+int ipa3_uc_state_check(void);
+int ipa3_uc_loaded_check(void);
+void ipa3_uc_load_notify(void);
+int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+		    bool polling_mode, unsigned long timeout_jiffies);
+void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
+			      struct ipa3_uc_hdlrs *hdlrs);
+int ipa3_create_nat_device(void);
+int ipa3_uc_notify_clk_state(bool enabled);
+void ipa3_dma_async_memcpy_notify_cb(void *priv,
+		enum ipa_dp_evt_type evt, unsigned long data);
+
+int ipa3_uc_update_hw_flags(u32 flags);
+
+int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
+void ipa3_uc_mhi_cleanup(void);
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
+int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+	u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+	u32 first_evt_idx);
+int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+	int contexArrayIndex, int channelDirection);
+int ipa3_uc_mhi_reset_channel(int channelHandle);
+int ipa3_uc_mhi_suspend_channel(int channelHandle);
+int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected);
+int ipa3_uc_mhi_stop_event_update_channel(int channelHandle);
+int ipa3_uc_mhi_print_stats(char *dbg_buff, int size);
+int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+void ipa3_tag_destroy_imm(void *user1, int user2);
+struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx);
+void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val);
+
+u32 ipa3_get_num_pipes(void);
+struct ipa_smmu_cb_ctx *ipa3_get_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa3_get_wlan_smmu_ctx(void);
+struct ipa_smmu_cb_ctx *ipa3_get_uc_smmu_ctx(void);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+struct iommu_domain *ipa3_get_uc_smmu_domain(void);
+struct iommu_domain *ipa3_get_wlan_smmu_domain(void);
+int ipa3_iommu_map(struct iommu_domain *domain, unsigned long iova,
+	phys_addr_t paddr, size_t size, int prot);
+int ipa3_ap_suspend(struct device *dev);
+int ipa3_ap_resume(struct device *dev);
+int ipa3_init_interrupts(void);
+struct iommu_domain *ipa3_get_smmu_domain(void);
+int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa3_set_flt_tuple_mask(int pipe_idx, struct ipahal_reg_hash_tuple *tuple);
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple);
+void ipa3_set_resorce_groups_min_max_limits(void);
+void ipa3_suspend_apps_pipes(bool suspend);
+void ipa3_flow_control(enum ipa_client_type ipa_client, bool enable,
+			uint32_t qmap_id);
+int ipa3_flt_read_tbl_from_hw(u32 pipe_idx,
+	enum ipa_ip_type ip_type,
+	bool hashable,
+	struct ipahal_flt_rule_entry entry[],
+	int *num_entry);
+int ipa3_rt_read_tbl_from_hw(u32 tbl_idx,
+	enum ipa_ip_type ip_type,
+	bool hashable,
+	struct ipahal_rt_rule_entry entry[],
+	int *num_entry);
+int ipa3_restore_suspend_handler(void);
+int ipa3_inject_dma_task_for_gsi(void);
+int ipa3_uc_panic_notifier(struct notifier_block *this,
+	unsigned long event, void *ptr);
+void ipa3_inc_acquire_wakelock(void);
+void ipa3_dec_release_wakelock(void);
+int ipa3_load_fws(const struct firmware *firmware);
+int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
+int ipa_gsi_ch20_wa(void);
+int ipa3_rx_poll(u32 clnt_hdl, int budget);
+void ipa3_recycle_wan_skb(struct sk_buff *skb);
+int ipa3_smmu_map_peer_reg(phys_addr_t phys_addr, bool map);
+int ipa3_smmu_map_peer_buff(u64 iova, phys_addr_t phys_addr,
+	u32 size, bool map);
+int ipa3_ntn_init(void);
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats);
+struct dentry *ipa_debugfs_get_root(void);
+bool ipa3_is_msm_device(void);
+struct device *ipa3_get_pdev(void);
+void ipa3_enable_dcd(void);
+#endif /* _IPA3_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
new file mode 100644
index 0000000..75711c0
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_interrupts.c
@@ -0,0 +1,567 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include "ipa_i.h"
+
+#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq"
+#define DIS_SUSPEND_INTERRUPT_TIMEOUT 5
+#define IPA_IRQ_NUM_MAX 32
+
+struct ipa3_interrupt_info {
+	ipa_irq_handler_t handler;
+	enum ipa_irq_type interrupt;
+	void *private_data;
+	bool deferred_flag;
+};
+
+struct ipa3_interrupt_work_wrap {
+	struct work_struct interrupt_work;
+	ipa_irq_handler_t handler;
+	enum ipa_irq_type interrupt;
+	void *private_data;
+	void *interrupt_data;
+};
+
+static struct ipa3_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX];
+static struct workqueue_struct *ipa_interrupt_wq;
+static u32 ipa_ee;
+
+static void ipa3_tx_suspend_interrupt_wa(void);
+static void ipa3_enable_tx_suspend_wa(struct work_struct *work);
+static DECLARE_DELAYED_WORK(dwork_en_suspend_int,
+						ipa3_enable_tx_suspend_wa);
+static spinlock_t suspend_wa_lock;
+static void ipa3_process_interrupts(bool isr_context);
+
+static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
+	[IPA_UC_TX_CMD_Q_NOT_FULL_IRQ]		= -1,
+	[IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ]	= -1,
+	[IPA_BAD_SNOC_ACCESS_IRQ]		= 0,
+	[IPA_EOT_COAL_IRQ]			= -1,
+	[IPA_UC_IRQ_0]				= 2,
+	[IPA_UC_IRQ_1]				= 3,
+	[IPA_UC_IRQ_2]				= 4,
+	[IPA_UC_IRQ_3]				= 5,
+	[IPA_UC_IN_Q_NOT_EMPTY_IRQ]		= 6,
+	[IPA_UC_RX_CMD_Q_NOT_FULL_IRQ]		= 7,
+	[IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ]	= 8,
+	[IPA_RX_ERR_IRQ]			= 9,
+	[IPA_DEAGGR_ERR_IRQ]			= 10,
+	[IPA_TX_ERR_IRQ]			= 11,
+	[IPA_STEP_MODE_IRQ]			= 12,
+	[IPA_PROC_ERR_IRQ]			= 13,
+	[IPA_TX_SUSPEND_IRQ]			= 14,
+	[IPA_TX_HOLB_DROP_IRQ]			= 15,
+	[IPA_BAM_GSI_IDLE_IRQ]			= 16,
+};
+
+static void ipa3_interrupt_defer(struct work_struct *work);
+static DECLARE_WORK(ipa3_interrupt_defer_work, ipa3_interrupt_defer);
+
+static void ipa3_deferred_interrupt_work(struct work_struct *work)
+{
+	struct ipa3_interrupt_work_wrap *work_data =
+			container_of(work,
+			struct ipa3_interrupt_work_wrap,
+			interrupt_work);
+	IPADBG("call handler from workq...\n");
+	work_data->handler(work_data->interrupt, work_data->private_data,
+			work_data->interrupt_data);
+	kfree(work_data->interrupt_data);
+	kfree(work_data);
+}
+
+static bool ipa3_is_valid_ep(u32 ep_suspend_data)
+{
+	u32 bmsk = 1;
+	u32 i = 0;
+
+	for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
+		if ((ep_suspend_data & bmsk) && (ipa3_ctx->ep[i].valid))
+			return true;
+		bmsk = bmsk << 1;
+	}
+	return false;
+}
+
+static int ipa3_handle_interrupt(int irq_num, bool isr_context)
+{
+	struct ipa3_interrupt_info interrupt_info;
+	struct ipa3_interrupt_work_wrap *work_data;
+	u32 suspend_data;
+	void *interrupt_data = NULL;
+	struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL;
+	int res;
+
+	interrupt_info = ipa_interrupt_to_cb[irq_num];
+	if (interrupt_info.handler == NULL) {
+		IPAERR("A callback function wasn't set for interrupt num %d\n",
+			irq_num);
+		return -EINVAL;
+	}
+
+	switch (interrupt_info.interrupt) {
+	case IPA_TX_SUSPEND_IRQ:
+		IPADBG_LOW("processing TX_SUSPEND interrupt work-around\n");
+		ipa3_tx_suspend_interrupt_wa();
+		suspend_data = ipahal_read_reg_n(IPA_IRQ_SUSPEND_INFO_EE_n,
+			ipa_ee);
+		IPADBG_LOW("get interrupt %d\n", suspend_data);
+
+		if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+			/* Clearing L2 interrupts status */
+			ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n,
+				ipa_ee, suspend_data);
+		}
+		if (!ipa3_is_valid_ep(suspend_data))
+			return 0;
+
+		suspend_interrupt_data =
+			kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC);
+		if (!suspend_interrupt_data) {
+			IPAERR("failed allocating suspend_interrupt_data\n");
+			return -ENOMEM;
+		}
+		suspend_interrupt_data->endpoints = suspend_data;
+		interrupt_data = suspend_interrupt_data;
+		break;
+	case IPA_UC_IRQ_0:
+		if (ipa3_ctx->apply_rg10_wa) {
+			/*
+			 * Early detect of uC crash. If RG10 workaround is
+			 * enable uC crash will not be detected as before
+			 * processing uC event the interrupt is cleared using
+			 * uC register write which times out as it crashed
+			 * already.
+			 */
+			if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+			    IPA_HW_2_CPU_EVENT_ERROR)
+				ipa3_ctx->uc_ctx.uc_failed = true;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* Force defer processing if in ISR context. */
+	if (interrupt_info.deferred_flag || isr_context) {
+		work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
+				GFP_ATOMIC);
+		if (!work_data) {
+			IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
+			res = -ENOMEM;
+			goto fail_alloc_work;
+		}
+		INIT_WORK(&work_data->interrupt_work,
+				ipa3_deferred_interrupt_work);
+		work_data->handler = interrupt_info.handler;
+		work_data->interrupt = interrupt_info.interrupt;
+		work_data->private_data = interrupt_info.private_data;
+		work_data->interrupt_data = interrupt_data;
+		queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+
+	} else {
+		interrupt_info.handler(interrupt_info.interrupt,
+			interrupt_info.private_data,
+			interrupt_data);
+		kfree(interrupt_data);
+	}
+
+	return 0;
+
+fail_alloc_work:
+	kfree(interrupt_data);
+	return res;
+}
+
+static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
+{
+	u32 en;
+	u32 suspend_bmask;
+	int irq_num;
+
+	IPADBG_LOW("Enter\n");
+
+	irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+	BUG_ON(irq_num == -1);
+
+	/* make sure ipa hw is clocked on*/
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	suspend_bmask = 1 << irq_num;
+	/*enable  TX_SUSPEND_IRQ*/
+	en |= suspend_bmask;
+	IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n"
+		, en);
+	ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, en);
+	ipa3_process_interrupts(false);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	IPADBG_LOW("Exit\n");
+}
+
+static void ipa3_tx_suspend_interrupt_wa(void)
+{
+	u32 val;
+	u32 suspend_bmask;
+	int irq_num;
+
+	IPADBG_LOW("Enter\n");
+	irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+	BUG_ON(irq_num == -1);
+
+	/*disable TX_SUSPEND_IRQ*/
+	val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	suspend_bmask = 1 << irq_num;
+	val &= ~suspend_bmask;
+	IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n",
+		val);
+	ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+
+	IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n");
+	queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int,
+			msecs_to_jiffies(DIS_SUSPEND_INTERRUPT_TIMEOUT));
+
+	IPADBG_LOW("Exit\n");
+}
+
+static inline bool is_uc_irq(int irq_num)
+{
+	if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 &&
+		ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3)
+		return true;
+	else
+		return false;
+}
+
+static void ipa3_process_interrupts(bool isr_context)
+{
+	u32 reg;
+	u32 bmsk;
+	u32 i = 0;
+	u32 en;
+	unsigned long flags;
+	bool uc_irq;
+
+	IPADBG_LOW("Enter\n");
+
+	spin_lock_irqsave(&suspend_wa_lock, flags);
+	en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
+	while (en & reg) {
+		bmsk = 1;
+		for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
+			if (en & reg & bmsk) {
+				uc_irq = is_uc_irq(i);
+
+				/*
+				 * Clear uC interrupt before processing to avoid
+				 * clearing unhandled interrupts
+				 */
+				if (uc_irq)
+					ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n,
+							ipa_ee, bmsk);
+
+				/*
+				 * handle the interrupt with spin_lock
+				 * unlocked to avoid calling client in atomic
+				 * context. mutual exclusion still preserved
+				 * as the read/clr is done with spin_lock
+				 * locked.
+				 */
+				spin_unlock_irqrestore(&suspend_wa_lock, flags);
+				ipa3_handle_interrupt(i, isr_context);
+				spin_lock_irqsave(&suspend_wa_lock, flags);
+
+				/*
+				 * Clear non uC interrupt after processing
+				 * to avoid clearing interrupt data
+				 */
+				if (!uc_irq)
+					ipa3_uc_rg10_write_reg(IPA_IRQ_CLR_EE_n,
+							ipa_ee, bmsk);
+			}
+			bmsk = bmsk << 1;
+		}
+		/*
+		 * In case uC failed interrupt cannot be cleared.
+		 * Device will crash as part of handling uC event handler.
+		 */
+		if (ipa3_ctx->apply_rg10_wa && ipa3_ctx->uc_ctx.uc_failed)
+			break;
+
+		reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
+		/* since the suspend interrupt HW bug we must
+		  * read again the EN register, otherwise the while is endless
+		  */
+		en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	}
+
+	spin_unlock_irqrestore(&suspend_wa_lock, flags);
+	IPADBG_LOW("Exit\n");
+}
+
+static void ipa3_interrupt_defer(struct work_struct *work)
+{
+	IPADBG("processing interrupts in wq\n");
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa3_process_interrupts(false);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("Done\n");
+}
+
+static irqreturn_t ipa3_isr(int irq, void *ctxt)
+{
+	unsigned long flags;
+
+	IPADBG_LOW("Enter\n");
+	/* defer interrupt handling in case IPA is not clocked on */
+	if (ipa3_active_clients_trylock(&flags) == 0) {
+		IPADBG("defer interrupt processing\n");
+		queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
+		return IRQ_HANDLED;
+	}
+
+	if (ipa3_ctx->ipa3_active_clients.cnt == 0) {
+		IPADBG("defer interrupt processing\n");
+		queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
+		goto bail;
+	}
+
+	ipa3_process_interrupts(true);
+	IPADBG_LOW("Exit\n");
+
+bail:
+	ipa3_active_clients_trylock_unlock(&flags);
+	return IRQ_HANDLED;
+}
+/**
+* ipa3_add_interrupt_handler() - Adds handler to an interrupt type
+* @interrupt:		Interrupt type
+* @handler:		The handler to be added
+* @deferred_flag:	whether the handler processing should be deferred in
+*			a workqueue
+* @private_data:	the client's private data
+*
+* Adds handler to an interrupt type and enable the specific bit
+* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
+*/
+int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data)
+{
+	u32 val;
+	u32 bmsk;
+	int irq_num;
+	int client_idx, ep_idx;
+
+	IPADBG("in ipa3_add_interrupt_handler interrupt_enum(%d)\n", interrupt);
+	if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+		interrupt >= IPA_IRQ_MAX) {
+		IPAERR("invalid interrupt number %d\n", interrupt);
+		return -EINVAL;
+	}
+
+	irq_num = ipa3_irq_mapping[interrupt];
+	if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+		IPAERR("interrupt %d not supported\n", interrupt);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+	IPADBG("ipa_interrupt_to_cb irq_num(%d)\n", irq_num);
+
+	ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag;
+	ipa_interrupt_to_cb[irq_num].handler = handler;
+	ipa_interrupt_to_cb[irq_num].private_data = private_data;
+	ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
+
+	val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val);
+	bmsk = 1 << irq_num;
+	val |= bmsk;
+	ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+	IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val);
+
+	/* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
+	if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
+		(ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
+		val = ~0;
+		for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
+			if (IPA_CLIENT_IS_Q6_CONS(client_idx) ||
+				IPA_CLIENT_IS_Q6_PROD(client_idx)) {
+				ep_idx = ipa3_get_ep_mapping(client_idx);
+				IPADBG("modem ep_idx(%d) client_idx = %d\n",
+					ep_idx, client_idx);
+			if (ep_idx == -1)
+				IPADBG("Invalid IPA client\n");
+			else
+				val &= ~(1 << ep_idx);
+		}
+
+		ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val);
+		IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val);
+	}
+	return 0;
+}
+
+/**
+* ipa3_remove_interrupt_handler() - Removes handler to an interrupt type
+* @interrupt:		Interrupt type
+*
+* Removes the handler and disable the specific bit in IRQ_EN register
+*/
+int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+	u32 val;
+	u32 bmsk;
+	int irq_num;
+
+	if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
+		interrupt >= IPA_IRQ_MAX) {
+		IPAERR("invalid interrupt number %d\n", interrupt);
+		return -EINVAL;
+	}
+
+	irq_num = ipa3_irq_mapping[interrupt];
+	if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
+		IPAERR("interrupt %d not supported\n", interrupt);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	kfree(ipa_interrupt_to_cb[irq_num].private_data);
+	ipa_interrupt_to_cb[irq_num].deferred_flag = false;
+	ipa_interrupt_to_cb[irq_num].handler = NULL;
+	ipa_interrupt_to_cb[irq_num].private_data = NULL;
+	ipa_interrupt_to_cb[irq_num].interrupt = -1;
+
+	/* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
+	if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
+		(ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
+		ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
+		IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
+	}
+
+	val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
+	bmsk = 1 << irq_num;
+	val &= ~bmsk;
+	ipa3_uc_rg10_write_reg(IPA_IRQ_EN_EE_n, ipa_ee, val);
+
+	return 0;
+}
+
+/**
+* ipa3_interrupts_init() - Initialize the IPA interrupts framework
+* @ipa_irq:	The interrupt number to allocate
+* @ee:		Execution environment
+* @ipa_dev:	The basic device structure representing the IPA driver
+*
+* - Initialize the ipa_interrupt_to_cb array
+* - Clear interrupts status
+* - Register the ipa interrupt handler - ipa3_isr
+* - Enable apps processor wakeup by IPA interrupts
+*/
+int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
+{
+	int idx;
+	int res = 0;
+
+	ipa_ee = ee;
+	for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) {
+		ipa_interrupt_to_cb[idx].deferred_flag = false;
+		ipa_interrupt_to_cb[idx].handler = NULL;
+		ipa_interrupt_to_cb[idx].private_data = NULL;
+		ipa_interrupt_to_cb[idx].interrupt = -1;
+	}
+
+	ipa_interrupt_wq = create_singlethread_workqueue(
+			INTERRUPT_WORKQUEUE_NAME);
+	if (!ipa_interrupt_wq) {
+		IPAERR("workqueue creation failed\n");
+		return -ENOMEM;
+	}
+
+	res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
+				IRQF_TRIGGER_RISING, "ipa", ipa_dev);
+	if (res) {
+		IPAERR("fail to register IPA IRQ handler irq=%d\n", ipa_irq);
+		return -ENODEV;
+	}
+	IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
+
+	res = enable_irq_wake(ipa_irq);
+	if (res)
+		IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
+				ipa_irq, res);
+	else
+		IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
+
+	spin_lock_init(&suspend_wa_lock);
+	return 0;
+}
+
+/**
+* ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ
+* @clnt_hndl:		suspended client handle, IRQ is emulated for this pipe
+*
+*  Emulate suspend IRQ to unsuspend client which was suspended with an open
+*  aggregation frame in order to bypass HW bug of IRQ not generated when
+*  endpoint is suspended during an open aggregation.
+*/
+void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
+{
+	struct ipa3_interrupt_info interrupt_info;
+	struct ipa3_interrupt_work_wrap *work_data;
+	struct ipa_tx_suspend_irq_data *suspend_interrupt_data;
+	int irq_num;
+	int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+
+	if (aggr_active_bitmap & (1 << clnt_hdl)) {
+		/* force close aggregation */
+		ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
+
+		/* simulate suspend IRQ */
+		irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
+		interrupt_info = ipa_interrupt_to_cb[irq_num];
+		if (interrupt_info.handler == NULL) {
+			IPAERR("no CB function for IPA_TX_SUSPEND_IRQ!\n");
+			return;
+		}
+		suspend_interrupt_data = kzalloc(
+				sizeof(*suspend_interrupt_data),
+				GFP_ATOMIC);
+		if (!suspend_interrupt_data) {
+			IPAERR("failed allocating suspend_interrupt_data\n");
+			return;
+		}
+		suspend_interrupt_data->endpoints = 1 << clnt_hdl;
+
+		work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
+				GFP_ATOMIC);
+		if (!work_data) {
+			IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
+			goto fail_alloc_work;
+		}
+		INIT_WORK(&work_data->interrupt_work,
+				ipa3_deferred_interrupt_work);
+		work_data->handler = interrupt_info.handler;
+		work_data->interrupt = IPA_TX_SUSPEND_IRQ;
+		work_data->private_data = interrupt_info.private_data;
+		work_data->interrupt_data = (void *)suspend_interrupt_data;
+		queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
+		return;
+fail_alloc_work:
+		kfree(suspend_interrupt_data);
+	}
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
new file mode 100644
index 0000000..b9f5755
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -0,0 +1,632 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include "ipa_i.h"
+
+struct ipa3_intf {
+	char name[IPA_RESOURCE_NAME_MAX];
+	struct list_head link;
+	u32 num_tx_props;
+	u32 num_rx_props;
+	u32 num_ext_props;
+	struct ipa_ioc_tx_intf_prop *tx;
+	struct ipa_ioc_rx_intf_prop *rx;
+	struct ipa_ioc_ext_intf_prop *ext;
+	enum ipa_client_type excp_pipe;
+};
+
+struct ipa3_push_msg {
+	struct ipa_msg_meta meta;
+	ipa_msg_free_fn callback;
+	void *buff;
+	struct list_head link;
+};
+
+struct ipa3_pull_msg {
+	struct ipa_msg_meta meta;
+	ipa_msg_pull_fn callback;
+	struct list_head link;
+};
+
+/**
+ * ipa3_register_intf() - register "logical" interface
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ *
+ * Register an interface and its tx and rx properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx)
+{
+	return ipa3_register_intf_ext(name, tx, rx, NULL);
+}
+
+/**
+ * ipa3_register_intf_ext() - register "logical" interface which has only
+ * extended properties
+ * @name: [in] interface name
+ * @tx:	[in] TX properties of the interface
+ * @rx:	[in] RX properties of the interface
+ * @ext: [in] EXT properties of the interface
+ *
+ * Register an interface and its tx, rx and ext properties, this allows
+ * configuration of rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx,
+		       const struct ipa_ext_intf *ext)
+{
+	struct ipa3_intf *intf;
+	u32 len;
+
+	if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) {
+		IPAERR("invalid params name=%p tx=%p rx=%p ext=%p\n", name,
+				tx, rx, ext);
+		return -EINVAL;
+	}
+
+	if (tx && tx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid tx num_props=%d max=%d\n", tx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	if (rx && rx->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid rx num_props=%d max=%d\n", rx->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	if (ext && ext->num_props > IPA_NUM_PROPS_MAX) {
+		IPAERR("invalid ext num_props=%d max=%d\n", ext->num_props,
+				IPA_NUM_PROPS_MAX);
+		return -EINVAL;
+	}
+
+	len = sizeof(struct ipa3_intf);
+	intf = kzalloc(len, GFP_KERNEL);
+	if (intf == NULL) {
+		IPAERR("fail to alloc 0x%x bytes\n", len);
+		return -ENOMEM;
+	}
+
+	strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX);
+
+	if (tx) {
+		intf->num_tx_props = tx->num_props;
+		len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop);
+		intf->tx = kzalloc(len, GFP_KERNEL);
+		if (intf->tx == NULL) {
+			IPAERR("fail to alloc 0x%x bytes\n", len);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->tx, tx->prop, len);
+	}
+
+	if (rx) {
+		intf->num_rx_props = rx->num_props;
+		len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop);
+		intf->rx = kzalloc(len, GFP_KERNEL);
+		if (intf->rx == NULL) {
+			IPAERR("fail to alloc 0x%x bytes\n", len);
+			kfree(intf->tx);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->rx, rx->prop, len);
+	}
+
+	if (ext) {
+		intf->num_ext_props = ext->num_props;
+		len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop);
+		intf->ext = kzalloc(len, GFP_KERNEL);
+		if (intf->ext == NULL) {
+			IPAERR("fail to alloc 0x%x bytes\n", len);
+			kfree(intf->rx);
+			kfree(intf->tx);
+			kfree(intf);
+			return -ENOMEM;
+		}
+		memcpy(intf->ext, ext->prop, len);
+	}
+
+	if (ext && ext->excp_pipe_valid)
+		intf->excp_pipe = ext->excp_pipe;
+	else
+		intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS;
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_add_tail(&intf->link, &ipa3_ctx->intf_list);
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+/**
+ * ipa3_deregister_intf() - de-register previously registered logical interface
+ * @name: [in] interface name
+ *
+ * De-register a previously registered interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_deregister_intf(const char *name)
+{
+	struct ipa3_intf *entry;
+	struct ipa3_intf *next;
+	int result = -EINVAL;
+
+	if ((name == NULL) ||
+	    (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX)) {
+		IPAERR("invalid param name=%s\n", name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry_safe(entry, next, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, name)) {
+			list_del(&entry->link);
+			kfree(entry->ext);
+			kfree(entry->rx);
+			kfree(entry->tx);
+			kfree(entry);
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf() - query logical interface properties
+ * @lookup:	[inout] interface name and number of properties
+ *
+ * Obtain the handle and number of tx and rx properties for the named
+ * interface, used as part of querying the tx and rx properties for
+ * configuration of various rules from user-space
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf(struct ipa_ioc_query_intf *lookup)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (lookup == NULL) {
+		IPAERR("invalid param lookup=%p\n", lookup);
+		return result;
+	}
+
+	if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
+			IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Interface name too long. (%s)\n", lookup->name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, lookup->name)) {
+			lookup->num_tx_props = entry->num_tx_props;
+			lookup->num_rx_props = entry->num_rx_props;
+			lookup->num_ext_props = entry->num_ext_props;
+			lookup->excp_pipe = entry->excp_pipe;
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf_tx_props() - qeury TX props of an interface
+ * @tx:  [inout] interface tx attributes
+ *
+ * Obtain the tx properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (tx == NULL) {
+		IPAERR("invalid param tx=%p\n", tx);
+		return result;
+	}
+
+	if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Interface name too long. (%s)\n", tx->name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, tx->name)) {
+			memcpy(tx->tx, entry->tx, entry->num_tx_props *
+			       sizeof(struct ipa_ioc_tx_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf_rx_props() - qeury RX props of an interface
+ * @rx:  [inout] interface rx attributes
+ *
+ * Obtain the rx properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (rx == NULL) {
+		IPAERR("invalid param rx=%p\n", rx);
+		return result;
+	}
+
+	if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Interface name too long. (%s)\n", rx->name);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, rx->name)) {
+			memcpy(rx->rx, entry->rx, entry->num_rx_props *
+					sizeof(struct ipa_ioc_rx_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_query_intf_ext_props() - qeury EXT props of an interface
+ * @ext:  [inout] interface ext attributes
+ *
+ * Obtain the ext properties for the specified interface
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
+{
+	struct ipa3_intf *entry;
+	int result = -EINVAL;
+
+	if (ext == NULL) {
+		IPAERR("invalid param ext=%p\n", ext);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
+		if (!strcmp(entry->name, ext->name)) {
+			memcpy(ext->ext, entry->ext, entry->num_ext_props *
+					sizeof(struct ipa_ioc_ext_intf_prop));
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+	return result;
+}
+
+static void ipa3_send_msg_free(void *buff, u32 len, u32 type)
+{
+	kfree(buff);
+}
+
+/**
+ * ipa3_send_msg() - Send "message" from kernel client to IPA driver
+ * @meta: [in] message meta-data
+ * @buff: [in] the payload for message
+ * @callback: [in] free callback
+ *
+ * Client supplies the message meta-data and payload which IPA driver buffers
+ * till read by user-space. After read from user space IPA driver invokes the
+ * callback supplied to free the message payload. Client must not touch/free
+ * the message payload after calling this API.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback)
+{
+	struct ipa3_push_msg *msg;
+	void *data = NULL;
+
+	if (meta == NULL || (buff == NULL && callback != NULL) ||
+	    (buff != NULL && callback == NULL)) {
+		IPAERR("invalid param meta=%p buff=%p, callback=%p\n",
+		       meta, buff, callback);
+		return -EINVAL;
+	}
+
+	if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
+		IPAERR("unsupported message type %d\n", meta->msg_type);
+		return -EINVAL;
+	}
+
+	msg = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL);
+	if (msg == NULL) {
+		IPAERR("fail to alloc ipa_msg container\n");
+		return -ENOMEM;
+	}
+
+	msg->meta = *meta;
+	if (meta->msg_len > 0 && buff) {
+		data = kmalloc(meta->msg_len, GFP_KERNEL);
+		if (data == NULL) {
+			IPAERR("fail to alloc data container\n");
+			kfree(msg);
+			return -ENOMEM;
+		}
+		memcpy(data, buff, meta->msg_len);
+		msg->buff = data;
+		msg->callback = ipa3_send_msg_free;
+	}
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_add_tail(&msg->link, &ipa3_ctx->msg_list);
+	mutex_unlock(&ipa3_ctx->msg_lock);
+	IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]);
+
+	wake_up(&ipa3_ctx->msg_waitq);
+	if (buff)
+		callback(buff, meta->msg_len, meta->msg_type);
+
+	return 0;
+}
+
+/**
+ * ipa3_register_pull_msg() - register pull message type
+ * @meta: [in] message meta-data
+ * @callback: [in] pull callback
+ *
+ * Register message callback by kernel client with IPA driver for IPA driver to
+ * pull message on-demand.
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
+{
+	struct ipa3_pull_msg *msg;
+
+	if (meta == NULL || callback == NULL) {
+		IPAERR("invalid param meta=%p callback=%p\n", meta, callback);
+		return -EINVAL;
+	}
+
+	msg = kzalloc(sizeof(struct ipa3_pull_msg), GFP_KERNEL);
+	if (msg == NULL) {
+		IPAERR("fail to alloc ipa_msg container\n");
+		return -ENOMEM;
+	}
+
+	msg->meta = *meta;
+	msg->callback = callback;
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_add_tail(&msg->link, &ipa3_ctx->pull_msg_list);
+	mutex_unlock(&ipa3_ctx->msg_lock);
+
+	return 0;
+}
+
+/**
+ * ipa3_deregister_pull_msg() - De-register pull message type
+ * @meta: [in] message meta-data
+ *
+ * De-register "message" by kernel client from IPA driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+	struct ipa3_pull_msg *entry;
+	struct ipa3_pull_msg *next;
+	int result = -EINVAL;
+
+	if (meta == NULL) {
+		IPAERR("invalid param name=%p\n", meta);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_for_each_entry_safe(entry, next, &ipa3_ctx->pull_msg_list, link) {
+		if (entry->meta.msg_len == meta->msg_len &&
+		    entry->meta.msg_type == meta->msg_type) {
+			list_del(&entry->link);
+			kfree(entry);
+			result = 0;
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->msg_lock);
+	return result;
+}
+
+/**
+ * ipa3_read() - read message from IPA device
+ * @filp:	[in] file pointer
+ * @buf:	[out] buffer to read into
+ * @count:	[in] size of above buffer
+ * @f_pos:	[inout] file position
+ *
+ * Uer-space should continually read from /dev/ipa, read wll block when there
+ * are no messages to read. Upon return, user-space should read the ipa_msg_meta
+ * from the start of the buffer to know what type of message was read and its
+ * length in the remainder of the buffer. Buffer supplied must be big enough to
+ * hold the message meta-data and the largest defined message type
+ *
+ * Returns:	how many bytes copied to buffer
+ *
+ * Note:	Should not be called from atomic context
+ */
+ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
+		  loff_t *f_pos)
+{
+	char __user *start;
+	struct ipa3_push_msg *msg = NULL;
+	int ret;
+	DEFINE_WAIT(wait);
+	int locked;
+
+	start = buf;
+
+	while (1) {
+		mutex_lock(&ipa3_ctx->msg_lock);
+		locked = 1;
+		prepare_to_wait(&ipa3_ctx->msg_waitq,
+				&wait,
+				TASK_INTERRUPTIBLE);
+
+		if (!list_empty(&ipa3_ctx->msg_list)) {
+			msg = list_first_entry(&ipa3_ctx->msg_list,
+					struct ipa3_push_msg, link);
+			list_del(&msg->link);
+		}
+
+		IPADBG_LOW("msg=%p\n", msg);
+
+		if (msg) {
+			locked = 0;
+			mutex_unlock(&ipa3_ctx->msg_lock);
+			if (copy_to_user(buf, &msg->meta,
+					  sizeof(struct ipa_msg_meta))) {
+				ret = -EFAULT;
+				break;
+			}
+			buf += sizeof(struct ipa_msg_meta);
+			count -= sizeof(struct ipa_msg_meta);
+			if (msg->buff) {
+				if (copy_to_user(buf, msg->buff,
+						  msg->meta.msg_len)) {
+					ret = -EFAULT;
+					break;
+				}
+				buf += msg->meta.msg_len;
+				count -= msg->meta.msg_len;
+				msg->callback(msg->buff, msg->meta.msg_len,
+					       msg->meta.msg_type);
+			}
+			IPA_STATS_INC_CNT(
+				ipa3_ctx->stats.msg_r[msg->meta.msg_type]);
+			kfree(msg);
+		}
+
+		ret = -EAGAIN;
+		if (filp->f_flags & O_NONBLOCK)
+			break;
+
+		ret = -EINTR;
+		if (signal_pending(current))
+			break;
+
+		if (start != buf)
+			break;
+
+		locked = 0;
+		mutex_unlock(&ipa3_ctx->msg_lock);
+		schedule();
+	}
+
+	finish_wait(&ipa3_ctx->msg_waitq, &wait);
+	if (start != buf && ret != -EFAULT)
+		ret = buf - start;
+
+	if (locked)
+		mutex_unlock(&ipa3_ctx->msg_lock);
+
+	return ret;
+}
+
+/**
+ * ipa3_pull_msg() - pull the specified message from client
+ * @meta: [in] message meta-data
+ * @buf:  [out] buffer to read into
+ * @count: [in] size of above buffer
+ *
+ * Populate the supplied buffer with the pull message which is fetched
+ * from client, the message must have previously been registered with
+ * the IPA driver
+ *
+ * Returns:	how many bytes copied to buffer
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
+{
+	struct ipa3_pull_msg *entry;
+	int result = -EINVAL;
+
+	if (meta == NULL || buff == NULL || !count) {
+		IPAERR("invalid param name=%p buff=%p count=%zu\n",
+				meta, buff, count);
+		return result;
+	}
+
+	mutex_lock(&ipa3_ctx->msg_lock);
+	list_for_each_entry(entry, &ipa3_ctx->pull_msg_list, link) {
+		if (entry->meta.msg_len == meta->msg_len &&
+		    entry->meta.msg_type == meta->msg_type) {
+			result = entry->callback(buff, count, meta->msg_type);
+			break;
+		}
+	}
+	mutex_unlock(&ipa3_ctx->msg_lock);
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
new file mode 100644
index 0000000..4ef1a96
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -0,0 +1,629 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/ipa.h>
+#include <linux/msm_gsi.h>
+#include <linux/ipa_mhi.h>
+#include "../ipa_common_i.h"
+#include "ipa_i.h"
+#include "ipa_qmi_service.h"
+
+#define IPA_MHI_DRV_NAME "ipa_mhi"
+
+
+#define IPA_MHI_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_MHI_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MHI_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+				IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPA_MHI_FUNC_ENTRY() \
+	IPA_MHI_DBG_LOW("ENTRY\n")
+#define IPA_MHI_FUNC_EXIT() \
+	IPA_MHI_DBG_LOW("EXIT\n")
+
+#define IPA_MHI_MAX_UL_CHANNELS 1
+#define IPA_MHI_MAX_DL_CHANNELS 1
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_HOST_ADDR_COND(addr) \
+		((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
+
+enum ipa3_mhi_polling_mode {
+	IPA_MHI_POLLING_MODE_DB_MODE,
+	IPA_MHI_POLLING_MODE_POLL_MODE,
+};
+
+bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client)
+{
+	int res;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	IPA_MHI_FUNC_ENTRY();
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPA_MHI_ERR("Invalid client.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl);
+	res = gsi_stop_channel(ep->gsi_chan_hdl);
+	if (res != 0 &&
+		res != -GSI_STATUS_AGAIN &&
+		res != -GSI_STATUS_TIMED_OUT) {
+		IPA_MHI_ERR("GSI stop channel failed %d\n",
+			res);
+		WARN_ON(1);
+		return false;
+	}
+
+	if (res == 0) {
+		IPA_MHI_DBG_LOW("GSI channel %ld STOP\n",
+			ep->gsi_chan_hdl);
+		return true;
+	}
+
+	return false;
+}
+
+static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client)
+{
+	int res;
+	int clnt_hdl;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	clnt_hdl = ipa3_get_ep_mapping(client);
+	if (clnt_hdl < 0)
+		return -EFAULT;
+
+	res = ipa3_reset_gsi_channel(clnt_hdl);
+	if (res) {
+		IPA_MHI_ERR("ipa3_reset_gsi_channel failed %d\n", res);
+		return -EFAULT;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa3_mhi_reset_gsi_channel(client);
+	if (res) {
+		IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
+		ipa_assert();
+		return res;
+	}
+
+	res = ipa3_disable_data_path(ipa3_get_ep_mapping(client));
+	if (res) {
+		IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
+		return res;
+	}
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
+{
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	res = ipa3_enable_data_path(ipa3_get_ep_mapping(client));
+	if (res) {
+		IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
+		return res;
+	}
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+}
+
+static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
+		struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size)
+{
+	switch (ch_ctx_host->pollcfg) {
+	case 0:
+	/*set default polling configuration according to MHI spec*/
+		if (IPA_CLIENT_IS_PROD(client))
+			return 7;
+		else
+			return (ring_size/2)/8;
+		break;
+	default:
+		return ch_ctx_host->pollcfg;
+	}
+}
+
+static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
+	int ipa_ep_idx, struct start_gsi_channel *params)
+{
+	int res;
+	struct gsi_evt_ring_props ev_props;
+	struct ipa_mhi_msi_info *msi;
+	struct gsi_chan_props ch_props;
+	union __packed gsi_channel_scratch ch_scratch;
+	struct ipa3_ep_context *ep;
+	struct ipa_gsi_ep_config *ep_cfg;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	msi = params->msi;
+	ep_cfg = ipa_get_gsi_ep_info(ipa_ep_idx);
+	if (!ep_cfg) {
+		IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
+		return -EPERM;
+	}
+
+	/* allocate event ring only for the first time pipe is connected */
+	if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
+		memset(&ev_props, 0, sizeof(ev_props));
+		ev_props.intf = GSI_EVT_CHTYPE_MHI_EV;
+		ev_props.intr = GSI_INTR_MSI;
+		ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
+		ev_props.ring_len = params->ev_ctx_host->rlen;
+		ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
+				params->ev_ctx_host->rbase);
+		ev_props.int_modt = params->ev_ctx_host->intmodt *
+				IPA_SLEEP_CLK_RATE_KHZ;
+		ev_props.int_modc = params->ev_ctx_host->intmodc;
+		ev_props.intvec = ((msi->data & ~msi->mask) |
+				(params->ev_ctx_host->msivec & msi->mask));
+		ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND(
+				(((u64)msi->addr_hi << 32) | msi->addr_low));
+		ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND(
+				params->event_context_addr +
+				offsetof(struct ipa_mhi_ev_ctx, rp));
+		ev_props.exclusive = true;
+		ev_props.err_cb = params->ev_err_cb;
+		ev_props.user_data = params->channel;
+		ev_props.evchid_valid = true;
+		ev_props.evchid = params->evchid;
+		IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n",
+			ipa_ep_idx, ev_props.evchid);
+		res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl,
+			&ep->gsi_evt_ring_hdl);
+		if (res) {
+			IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
+			goto fail_alloc_evt;
+			return res;
+		}
+		IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
+				client,
+				ep->gsi_evt_ring_hdl);
+		*params->cached_gsi_evt_ring_hdl =
+			ep->gsi_evt_ring_hdl;
+
+	} else {
+		IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n",
+			*params->cached_gsi_evt_ring_hdl);
+		ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
+	}
+
+	memset(&ch_props, 0, sizeof(ch_props));
+	ch_props.prot = GSI_CHAN_PROT_MHI;
+	ch_props.dir = IPA_CLIENT_IS_PROD(client) ?
+		GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
+	ch_props.ch_id = ep_cfg->ipa_gsi_chan_num;
+	ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
+	ch_props.re_size = GSI_CHAN_RE_SIZE_16B;
+	ch_props.ring_len = params->ch_ctx_host->rlen;
+	ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
+			params->ch_ctx_host->rbase);
+	ch_props.use_db_eng = GSI_CHAN_DB_MODE;
+	ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+	ch_props.low_weight = 1;
+	ch_props.err_cb = params->ch_err_cb;
+	ch_props.chan_user_data = params->channel;
+	res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl,
+		&ep->gsi_chan_hdl);
+	if (res) {
+		IPA_MHI_ERR("gsi_alloc_channel failed %d\n",
+			res);
+		goto fail_alloc_ch;
+	}
+
+	memset(&ch_scratch, 0, sizeof(ch_scratch));
+	ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND(
+			params->channel_context_addr +
+			offsetof(struct ipa_mhi_ch_ctx, wp));
+	ch_scratch.mhi.assert_bit40 = params->assert_bit40;
+	ch_scratch.mhi.max_outstanding_tre =
+		ep_cfg->ipa_if_tlv * ch_props.re_size;
+	ch_scratch.mhi.outstanding_threshold =
+		min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
+	ch_scratch.mhi.oob_mod_threshold = 4;
+	if (params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
+		params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE) {
+		ch_scratch.mhi.burst_mode_enabled = true;
+		ch_scratch.mhi.polling_configuration =
+			ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
+				(ch_props.ring_len / ch_props.re_size));
+		ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE;
+	} else {
+		ch_scratch.mhi.burst_mode_enabled = false;
+	}
+	res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
+		ch_scratch);
+	if (res) {
+		IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n",
+			res);
+		goto fail_ch_scratch;
+	}
+
+	*params->mhi = ch_scratch.mhi;
+
+	IPA_MHI_DBG("Starting channel\n");
+	res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (res) {
+		IPA_MHI_ERR("gsi_start_channel failed %d\n", res);
+		goto fail_ch_start;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_ch_start:
+fail_ch_scratch:
+	gsi_dealloc_channel(ep->gsi_chan_hdl);
+fail_alloc_ch:
+	gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
+	ep->gsi_evt_ring_hdl = ~0;
+fail_alloc_evt:
+	return res;
+}
+
+int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
+{
+	int res;
+	struct gsi_device_scratch gsi_scratch;
+	struct ipa_gsi_ep_config *gsi_ep_info;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!params) {
+		IPA_MHI_ERR("null args\n");
+		return -EINVAL;
+	}
+
+	/* Initialize IPA MHI engine */
+	gsi_ep_info = ipa_get_gsi_ep_info(
+		ipa_get_ep_mapping(IPA_CLIENT_MHI_PROD));
+	if (!gsi_ep_info) {
+		IPAERR("MHI PROD has no ep allocated\n");
+		ipa_assert();
+	}
+	memset(&gsi_scratch, 0, sizeof(gsi_scratch));
+	gsi_scratch.mhi_base_chan_idx_valid = true;
+	gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num +
+		params->gsi.first_ch_idx;
+	res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
+		&gsi_scratch);
+	if (res) {
+		IPA_MHI_ERR("failed to write device scratch %d\n", res);
+		goto fail_init_engine;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_init_engine:
+	return res;
+}
+
+/**
+ * ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
+ * MHI channel
+ * @in: connect parameters
+ * @clnt_hdl: [out] client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel start.
+ * This function is called after MHI engine was started.
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
+		u32 *clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int res;
+	enum ipa_client_type client;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (!in || !clnt_hdl) {
+		IPA_MHI_ERR("NULL args\n");
+		return -EINVAL;
+	}
+
+	client = in->sys->client;
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPA_MHI_ERR("Invalid client.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid == 1) {
+		IPA_MHI_ERR("EP already allocated.\n");
+		return -EPERM;
+	}
+
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+	ep->valid = 1;
+	ep->skip_ep_cfg = in->sys->skip_ep_cfg;
+	ep->client = client;
+	ep->client_notify = in->sys->notify;
+	ep->priv = in->sys->priv;
+	ep->keep_ipa_awake = in->sys->keep_ipa_awake;
+
+	res = ipa_mhi_start_gsi_channel(client,
+					ipa_ep_idx, &in->start.gsi);
+	if (res) {
+		IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
+			res);
+		goto fail_start_channel;
+	}
+
+	res = ipa3_enable_data_path(ipa_ep_idx);
+	if (res) {
+		IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
+			ipa_ep_idx);
+		goto fail_ep_cfg;
+	}
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto fail_ep_cfg;
+		}
+		if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
+			IPAERR("fail to configure status of EP.\n");
+			goto fail_ep_cfg;
+		}
+		IPA_MHI_DBG("ep configuration successful\n");
+	} else {
+		IPA_MHI_DBG("skipping ep configuration\n");
+	}
+
+	*clnt_hdl = ipa_ep_idx;
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
+	IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
+		ipa_ep_idx);
+
+	IPA_MHI_FUNC_EXIT();
+
+	return 0;
+
+fail_ep_cfg:
+	ipa3_disable_data_path(ipa_ep_idx);
+fail_start_channel:
+	memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
+	return -EPERM;
+}
+
+/**
+ * ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
+ * MHI channel
+ * @clnt_hdl: client handle for this pipe
+ *
+ * This function is called by IPA MHI client driver on MHI channel reset.
+ * This function is called after MHI channel was started.
+ * This function is doing the following:
+ *	- Send command to uC/GSI to reset corresponding MHI channel
+ *	- Configure IPA EP control
+ *
+ * Return codes: 0	  : success
+ *		 negative : error
+ */
+int ipa3_disconnect_mhi_pipe(u32 clnt_hdl)
+{
+	struct ipa3_ep_context *ep;
+	int res;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("invalid handle %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("pipe was not connected %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
+		res = gsi_dealloc_channel(ep->gsi_chan_hdl);
+		if (res) {
+			IPAERR("gsi_dealloc_channel failed %d\n", res);
+			goto fail_reset_channel;
+		}
+	}
+
+	ep->valid = 0;
+	ipa3_delete_dflt_flt_rules(clnt_hdl);
+
+	IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+
+fail_reset_channel:
+	return res;
+}
+
+int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
+		bool LPTransitionRejected, bool brstmode_enabled,
+		union __packed gsi_channel_scratch ch_scratch, u8 index)
+{
+	int res;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (brstmode_enabled && !LPTransitionRejected) {
+		/*
+		 * set polling mode bit to DB mode before
+		 * resuming the channel
+		 */
+		res = gsi_write_channel_scratch(
+			ep->gsi_chan_hdl, ch_scratch);
+		if (res) {
+			IPA_MHI_ERR("write ch scratch fail %d\n"
+				, res);
+			return res;
+		}
+	}
+
+	res = gsi_start_channel(ep->gsi_chan_hdl);
+	if (res) {
+		IPA_MHI_ERR("failed to resume channel error %d\n", res);
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+int ipa3_mhi_query_ch_info(enum ipa_client_type client,
+		struct gsi_chan_info *ch_info)
+{
+	int ipa_ep_idx;
+	int res;
+	struct ipa3_ep_context *ep;
+
+	IPA_MHI_FUNC_ENTRY();
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
+	if (res) {
+		IPAERR("gsi_query_channel_info failed\n");
+		return res;
+	}
+
+	IPA_MHI_FUNC_EXIT();
+	return 0;
+}
+
+bool ipa3_has_open_aggr_frame(enum ipa_client_type client)
+{
+	u32 aggr_state_active;
+	int ipa_ep_idx;
+
+	aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+	IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
+
+	ipa_ep_idx = ipa_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		ipa_assert();
+		return false;
+	}
+
+	if ((1 << ipa_ep_idx) & aggr_state_active)
+		return true;
+
+	return false;
+}
+
+int ipa3_mhi_destroy_channel(enum ipa_client_type client)
+{
+	int res;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
+		ep->gsi_evt_ring_hdl, ipa_ep_idx);
+
+	res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
+	if (res) {
+		IPAERR(" failed to reset evt ring %lu, err %d\n"
+			, ep->gsi_evt_ring_hdl, res);
+		goto fail;
+	}
+
+	IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n",
+		ep->gsi_evt_ring_hdl, ipa_ep_idx);
+
+	res = gsi_dealloc_evt_ring(
+		ep->gsi_evt_ring_hdl);
+	if (res) {
+		IPAERR("dealloc evt ring %lu failed, err %d\n"
+			, ep->gsi_evt_ring_hdl, res);
+		goto fail;
+	}
+
+	return 0;
+fail:
+	return res;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA MHI driver");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
new file mode 100644
index 0000000..4b22203
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -0,0 +1,763 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+
+#define IPA_NAT_PHYS_MEM_OFFSET  0
+#define IPA_NAT_PHYS_MEM_SIZE  IPA_RAM_NAT_SIZE
+
+#define IPA_NAT_TEMP_MEM_SIZE 128
+
+static int ipa3_nat_vma_fault_remap(
+	 struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	IPADBG("\n");
+	vmf->page = NULL;
+
+	return VM_FAULT_SIGBUS;
+}
+
+/* VMA related file operations functions */
+static struct vm_operations_struct ipa3_nat_remap_vm_ops = {
+	.fault = ipa3_nat_vma_fault_remap,
+};
+
+static int ipa3_nat_open(struct inode *inode, struct file *filp)
+{
+	struct ipa3_nat_mem *nat_ctx;
+
+	IPADBG("\n");
+	nat_ctx = container_of(inode->i_cdev, struct ipa3_nat_mem, cdev);
+	filp->private_data = nat_ctx;
+	IPADBG("return\n");
+
+	return 0;
+}
+
+static int ipa3_nat_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	unsigned long vsize = vma->vm_end - vma->vm_start;
+	struct ipa3_nat_mem *nat_ctx =
+		(struct ipa3_nat_mem *)filp->private_data;
+	unsigned long phys_addr;
+	int result;
+
+	mutex_lock(&nat_ctx->lock);
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("Mapping system memory\n");
+		if (nat_ctx->is_mapped) {
+			IPAERR("mapping already exists, only 1 supported\n");
+			result = -EINVAL;
+			goto bail;
+		}
+		IPADBG("map sz=0x%zx\n", nat_ctx->size);
+		result =
+			dma_mmap_coherent(
+				 ipa3_ctx->pdev, vma,
+				 nat_ctx->vaddr, nat_ctx->dma_handle,
+				 nat_ctx->size);
+
+		if (result) {
+			IPAERR("unable to map memory. Err:%d\n", result);
+			goto bail;
+		}
+		ipa3_ctx->nat_mem.nat_base_address = nat_ctx->vaddr;
+	} else {
+		IPADBG("Mapping shared(local) memory\n");
+		IPADBG("map sz=0x%lx\n", vsize);
+
+		if ((IPA_NAT_PHYS_MEM_SIZE == 0) ||
+				(vsize > IPA_NAT_PHYS_MEM_SIZE)) {
+			result = -EINVAL;
+			goto bail;
+		}
+		phys_addr = ipa3_ctx->ipa_wrapper_base +
+			ipa3_ctx->ctrl->ipa_reg_base_ofst +
+			ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+			IPA_NAT_PHYS_MEM_OFFSET);
+
+		if (remap_pfn_range(
+			 vma, vma->vm_start,
+			 phys_addr >> PAGE_SHIFT, vsize, vma->vm_page_prot)) {
+			IPAERR("remap failed\n");
+			result = -EAGAIN;
+			goto bail;
+		}
+		ipa3_ctx->nat_mem.nat_base_address = (void *)vma->vm_start;
+	}
+	nat_ctx->is_mapped = true;
+	vma->vm_ops = &ipa3_nat_remap_vm_ops;
+	IPADBG("return\n");
+	result = 0;
+bail:
+	mutex_unlock(&nat_ctx->lock);
+	return result;
+}
+
+static const struct file_operations ipa3_nat_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa3_nat_open,
+	.mmap = ipa3_nat_mmap
+};
+
+/**
+ * ipa3_allocate_temp_nat_memory() - Allocates temp nat memory
+ *
+ * Called during nat table delete
+ */
+void ipa3_allocate_temp_nat_memory(void)
+{
+	struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+	int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+
+	nat_ctx->tmp_vaddr =
+		dma_alloc_coherent(ipa3_ctx->pdev, IPA_NAT_TEMP_MEM_SIZE,
+				&nat_ctx->tmp_dma_handle, gfp_flags);
+
+	if (nat_ctx->tmp_vaddr == NULL) {
+		IPAERR("Temp Memory alloc failed\n");
+		nat_ctx->is_tmp_mem = false;
+		return;
+	}
+
+	nat_ctx->is_tmp_mem = true;
+	IPADBG("IPA NAT allocated temp memory successfully\n");
+}
+
+/**
+ * ipa3_create_nat_device() - Create the NAT device
+ *
+ * Called during ipa init to create nat device
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_create_nat_device(void)
+{
+	struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+	int result;
+
+	IPADBG("\n");
+
+	mutex_lock(&nat_ctx->lock);
+	nat_ctx->class = class_create(THIS_MODULE, NAT_DEV_NAME);
+	if (IS_ERR(nat_ctx->class)) {
+		IPAERR("unable to create the class\n");
+		result = -ENODEV;
+		goto vaddr_alloc_fail;
+	}
+	result = alloc_chrdev_region(&nat_ctx->dev_num,
+					0,
+					1,
+					NAT_DEV_NAME);
+	if (result) {
+		IPAERR("alloc_chrdev_region err.\n");
+		result = -ENODEV;
+		goto alloc_chrdev_region_fail;
+	}
+
+	nat_ctx->dev =
+	   device_create(nat_ctx->class, NULL, nat_ctx->dev_num, nat_ctx,
+			"%s", NAT_DEV_NAME);
+
+	if (IS_ERR(nat_ctx->dev)) {
+		IPAERR("device_create err:%ld\n", PTR_ERR(nat_ctx->dev));
+		result = -ENODEV;
+		goto device_create_fail;
+	}
+
+	cdev_init(&nat_ctx->cdev, &ipa3_nat_fops);
+	nat_ctx->cdev.owner = THIS_MODULE;
+	nat_ctx->cdev.ops = &ipa3_nat_fops;
+
+	result = cdev_add(&nat_ctx->cdev, nat_ctx->dev_num, 1);
+	if (result) {
+		IPAERR("cdev_add err=%d\n", -result);
+		goto cdev_add_fail;
+	}
+	IPADBG("ipa nat dev added successful. major:%d minor:%d\n",
+			MAJOR(nat_ctx->dev_num),
+			MINOR(nat_ctx->dev_num));
+
+	nat_ctx->is_dev = true;
+	ipa3_allocate_temp_nat_memory();
+	IPADBG("IPA NAT device created successfully\n");
+	result = 0;
+	goto bail;
+
+cdev_add_fail:
+	device_destroy(nat_ctx->class, nat_ctx->dev_num);
+device_create_fail:
+	unregister_chrdev_region(nat_ctx->dev_num, 1);
+alloc_chrdev_region_fail:
+	class_destroy(nat_ctx->class);
+vaddr_alloc_fail:
+	if (nat_ctx->vaddr) {
+		IPADBG("Releasing system memory\n");
+		dma_free_coherent(
+			 ipa3_ctx->pdev, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->vaddr = NULL;
+		nat_ctx->dma_handle = 0;
+		nat_ctx->size = 0;
+	}
+
+bail:
+	mutex_unlock(&nat_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_allocate_nat_device() - Allocates memory for the NAT device
+ * @mem:	[in/out] memory parameters
+ *
+ * Called by NAT client driver to allocate memory for the NAT entries. Based on
+ * the request size either shared or system memory will be used.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	struct ipa3_nat_mem *nat_ctx = &(ipa3_ctx->nat_mem);
+	int gfp_flags = GFP_KERNEL | __GFP_ZERO;
+	int result;
+
+	IPADBG("passed memory size %zu\n", mem->size);
+
+	mutex_lock(&nat_ctx->lock);
+	if (strcmp(mem->dev_name, NAT_DEV_NAME)) {
+		IPAERR("Nat device name mismatch\n");
+		IPAERR("Expect: %s Recv: %s\n", NAT_DEV_NAME, mem->dev_name);
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (nat_ctx->is_dev != true) {
+		IPAERR("Nat device not created successfully during boot up\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (nat_ctx->is_dev_init == true) {
+		IPAERR("Device already init\n");
+		result = 0;
+		goto bail;
+	}
+
+	if (mem->size <= 0 ||
+			nat_ctx->is_dev_init == true) {
+		IPAERR("Invalid Parameters or device is already init\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	if (mem->size > IPA_NAT_PHYS_MEM_SIZE) {
+		IPADBG("Allocating system memory\n");
+		nat_ctx->is_sys_mem = true;
+		nat_ctx->vaddr =
+		   dma_alloc_coherent(ipa3_ctx->pdev, mem->size,
+				   &nat_ctx->dma_handle, gfp_flags);
+		if (nat_ctx->vaddr == NULL) {
+			IPAERR("memory alloc failed\n");
+			result = -ENOMEM;
+			goto bail;
+		}
+		nat_ctx->size = mem->size;
+	} else {
+		IPADBG("using shared(local) memory\n");
+		nat_ctx->is_sys_mem = false;
+	}
+
+	nat_ctx->is_dev_init = true;
+	IPADBG("IPA NAT dev init successfully\n");
+	result = 0;
+
+bail:
+	mutex_unlock(&nat_ctx->lock);
+
+	return result;
+}
+
+/* IOCTL function handlers */
+/**
+ * ipa3_nat_init_cmd() - Post IP_V4_NAT_INIT command to IPA HW
+ * @init:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post IP_V4_NAT_INIT command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+#define TBL_ENTRY_SIZE 32
+#define INDX_TBL_ENTRY_SIZE 4
+
+	struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+	struct ipa3_desc desc[2];
+	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	int result;
+	u32 offset = 0;
+	size_t tmp;
+
+	IPADBG("\n");
+	if (init->table_entries == 0) {
+		IPADBG("Table entries is zero\n");
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->ipv4_rules_offset >
+		UINT_MAX - (TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Table Entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->ipv4_rules_offset +
+		(TBL_ENTRY_SIZE * (init->table_entries + 1));
+	if (tmp > ipa3_ctx->nat_mem.size) {
+		IPAERR("Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->ipv4_rules_offset, (init->table_entries + 1),
+			tmp, ipa3_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->expn_rules_offset >
+		UINT_MAX - (TBL_ENTRY_SIZE * init->expn_table_entries)) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Expn Table Entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->expn_rules_offset +
+		(TBL_ENTRY_SIZE * init->expn_table_entries);
+	if (tmp > ipa3_ctx->nat_mem.size) {
+		IPAERR("Expn Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->expn_rules_offset, init->expn_table_entries,
+			tmp, ipa3_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->index_offset >
+		UINT_MAX - (INDX_TBL_ENTRY_SIZE * (init->table_entries + 1))) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Indx Table Entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->index_offset +
+		(INDX_TBL_ENTRY_SIZE * (init->table_entries + 1));
+	if (tmp > ipa3_ctx->nat_mem.size) {
+		IPAERR("Indx Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->index_offset, (init->table_entries + 1),
+			tmp, ipa3_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	/* check for integer overflow */
+	if (init->index_expn_offset >
+		UINT_MAX - (INDX_TBL_ENTRY_SIZE * init->expn_table_entries)) {
+		IPAERR("Detected overflow\n");
+		return -EPERM;
+	}
+	/* Check Expn Table entry offset is not
+	 * beyond allocated size
+	 */
+	tmp = init->index_expn_offset +
+		(INDX_TBL_ENTRY_SIZE * init->expn_table_entries);
+	if (tmp > ipa3_ctx->nat_mem.size) {
+		IPAERR("Indx Expn Table rules offset not valid\n");
+		IPAERR("offset:%d entrys:%d size:%zu mem_size:%zu\n",
+			init->index_expn_offset, init->expn_table_entries,
+			tmp, ipa3_ctx->nat_mem.size);
+		return -EPERM;
+	}
+
+	memset(&desc, 0, sizeof(desc));
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	nop_cmd_pyld =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!nop_cmd_pyld) {
+		IPAERR("failed to construct NOP imm cmd\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+
+	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[0].callback = NULL;
+	desc[0].user1 = NULL;
+	desc[0].user2 = 0;
+	desc[0].pyld = nop_cmd_pyld->data;
+	desc[0].len = nop_cmd_pyld->len;
+
+	if (ipa3_ctx->nat_mem.vaddr) {
+		IPADBG("using system memory for nat table\n");
+		cmd.ipv4_rules_addr_shared = false;
+		cmd.ipv4_expansion_rules_addr_shared = false;
+		cmd.index_table_addr_shared = false;
+		cmd.index_table_expansion_addr_shared = false;
+
+		offset = UINT_MAX - ipa3_ctx->nat_mem.dma_handle;
+
+		if ((init->ipv4_rules_offset > offset) ||
+				(init->expn_rules_offset > offset) ||
+				(init->index_offset > offset) ||
+				(init->index_expn_offset > offset)) {
+			IPAERR("Failed due to integer overflow\n");
+			IPAERR("nat.mem.dma_handle: 0x%pa\n",
+				&ipa3_ctx->nat_mem.dma_handle);
+			IPAERR("ipv4_rules_offset: 0x%x\n",
+				init->ipv4_rules_offset);
+			IPAERR("expn_rules_offset: 0x%x\n",
+				init->expn_rules_offset);
+			IPAERR("index_offset: 0x%x\n",
+				init->index_offset);
+			IPAERR("index_expn_offset: 0x%x\n",
+				init->index_expn_offset);
+			result = -EPERM;
+			goto free_nop;
+		}
+		cmd.ipv4_rules_addr =
+			ipa3_ctx->nat_mem.dma_handle + init->ipv4_rules_offset;
+		IPADBG("ipv4_rules_offset:0x%x\n", init->ipv4_rules_offset);
+
+		cmd.ipv4_expansion_rules_addr =
+		   ipa3_ctx->nat_mem.dma_handle + init->expn_rules_offset;
+		IPADBG("expn_rules_offset:0x%x\n", init->expn_rules_offset);
+
+		cmd.index_table_addr =
+			ipa3_ctx->nat_mem.dma_handle + init->index_offset;
+		IPADBG("index_offset:0x%x\n", init->index_offset);
+
+		cmd.index_table_expansion_addr =
+		   ipa3_ctx->nat_mem.dma_handle + init->index_expn_offset;
+		IPADBG("index_expn_offset:0x%x\n", init->index_expn_offset);
+	} else {
+		IPADBG("using shared(local) memory for nat table\n");
+		cmd.ipv4_rules_addr_shared = true;
+		cmd.ipv4_expansion_rules_addr_shared = true;
+		cmd.index_table_addr_shared = true;
+		cmd.index_table_expansion_addr_shared = true;
+
+		cmd.ipv4_rules_addr = init->ipv4_rules_offset +
+				IPA_RAM_NAT_OFST;
+
+		cmd.ipv4_expansion_rules_addr = init->expn_rules_offset +
+				IPA_RAM_NAT_OFST;
+
+		cmd.index_table_addr = init->index_offset  +
+				IPA_RAM_NAT_OFST;
+
+		cmd.index_table_expansion_addr = init->index_expn_offset +
+				IPA_RAM_NAT_OFST;
+	}
+	cmd.table_index = init->tbl_index;
+	IPADBG("Table index:0x%x\n", cmd.table_index);
+	cmd.size_base_tables = init->table_entries;
+	IPADBG("Base Table size:0x%x\n", cmd.size_base_tables);
+	cmd.size_expansion_tables = init->expn_table_entries;
+	IPADBG("Expansion Table size:0x%x\n", cmd.size_expansion_tables);
+	cmd.public_ip_addr = init->ip_addr;
+	IPADBG("Public ip address:0x%x\n", cmd.public_ip_addr);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+		result = -EPERM;
+		goto free_nop;
+	}
+
+	desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+	desc[1].type = IPA_IMM_CMD_DESC;
+	desc[1].callback = NULL;
+	desc[1].user1 = NULL;
+	desc[1].user2 = 0;
+	desc[1].pyld = cmd_pyld->data;
+	desc[1].len = cmd_pyld->len;
+	IPADBG("posting v4 init command\n");
+	if (ipa3_send_cmd(2, desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_ctx->nat_mem.public_ip_addr = init->ip_addr;
+	IPADBG("Table ip address:0x%x", ipa3_ctx->nat_mem.public_ip_addr);
+
+	ipa3_ctx->nat_mem.ipv4_rules_addr =
+	 (char *)ipa3_ctx->nat_mem.nat_base_address + init->ipv4_rules_offset;
+	IPADBG("ipv4_rules_addr: 0x%p\n",
+				 ipa3_ctx->nat_mem.ipv4_rules_addr);
+
+	ipa3_ctx->nat_mem.ipv4_expansion_rules_addr =
+	 (char *)ipa3_ctx->nat_mem.nat_base_address + init->expn_rules_offset;
+	IPADBG("ipv4_expansion_rules_addr: 0x%p\n",
+				 ipa3_ctx->nat_mem.ipv4_expansion_rules_addr);
+
+	ipa3_ctx->nat_mem.index_table_addr =
+		 (char *)ipa3_ctx->nat_mem.nat_base_address +
+		 init->index_offset;
+	IPADBG("index_table_addr: 0x%p\n",
+				 ipa3_ctx->nat_mem.index_table_addr);
+
+	ipa3_ctx->nat_mem.index_table_expansion_addr =
+	 (char *)ipa3_ctx->nat_mem.nat_base_address + init->index_expn_offset;
+	IPADBG("index_table_expansion_addr: 0x%p\n",
+				 ipa3_ctx->nat_mem.index_table_expansion_addr);
+
+	IPADBG("size_base_tables: %d\n", init->table_entries);
+	ipa3_ctx->nat_mem.size_base_tables  = init->table_entries;
+
+	IPADBG("size_expansion_tables: %d\n", init->expn_table_entries);
+	ipa3_ctx->nat_mem.size_expansion_tables = init->expn_table_entries;
+
+	IPADBG("return\n");
+	result = 0;
+destroy_imm_cmd:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+free_nop:
+	ipahal_destroy_imm_cmd(nop_cmd_pyld);
+bail:
+	return result;
+}
+
+/**
+ * ipa3_nat_dma_cmd() - Post NAT_DMA command to IPA HW
+ * @dma:	[in] initialization command attributes
+ *
+ * Called by NAT client driver to post NAT_DMA command to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+#define NUM_OF_DESC 2
+
+	struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+	struct ipahal_imm_cmd_nat_dma cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipa3_desc *desc = NULL;
+	u16 size = 0, cnt = 0;
+	int ret = 0;
+
+	IPADBG("\n");
+	if (dma->entries <= 0) {
+		IPAERR("Invalid number of commands %d\n",
+			dma->entries);
+		ret = -EPERM;
+		goto bail;
+	}
+
+	size = sizeof(struct ipa3_desc) * NUM_OF_DESC;
+	desc = kzalloc(size, GFP_KERNEL);
+	if (desc == NULL) {
+		IPAERR("Failed to alloc memory\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	nop_cmd_pyld =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!nop_cmd_pyld) {
+		IPAERR("Failed to construct NOP imm cmd\n");
+		ret = -ENOMEM;
+		goto bail;
+	}
+	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].callback = NULL;
+	desc[0].user1 = NULL;
+	desc[0].user2 = 0;
+	desc[0].pyld = nop_cmd_pyld->data;
+	desc[0].len = nop_cmd_pyld->len;
+
+	for (cnt = 0; cnt < dma->entries; cnt++) {
+		cmd.table_index = dma->dma[cnt].table_index;
+		cmd.base_addr = dma->dma[cnt].base_addr;
+		cmd.offset = dma->dma[cnt].offset;
+		cmd.data = dma->dma[cnt].data;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_NAT_DMA, &cmd, false);
+		if (!cmd_pyld) {
+			IPAERR("Fail to construct nat_dma imm cmd\n");
+			continue;
+		}
+		desc[1].type = IPA_IMM_CMD_DESC;
+		desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_NAT_DMA);
+		desc[1].callback = NULL;
+		desc[1].user1 = NULL;
+		desc[1].user2 = 0;
+		desc[1].pyld = cmd_pyld->data;
+		desc[1].len = cmd_pyld->len;
+
+		ret = ipa3_send_cmd(NUM_OF_DESC, desc);
+		if (ret == -EPERM)
+			IPAERR("Fail to send immediate command %d\n", cnt);
+		ipahal_destroy_imm_cmd(cmd_pyld);
+	}
+
+bail:
+	if (desc != NULL)
+		kfree(desc);
+
+	if (nop_cmd_pyld != NULL)
+		ipahal_destroy_imm_cmd(nop_cmd_pyld);
+
+	return ret;
+}
+
+/**
+ * ipa3_nat_free_mem_and_device() - free the NAT memory and remove the device
+ * @nat_ctx:	[in] the IPA NAT memory to free
+ *
+ * Called by NAT client driver to free the NAT memory and remove the device
+ */
+void ipa3_nat_free_mem_and_device(struct ipa3_nat_mem *nat_ctx)
+{
+	IPADBG("\n");
+	mutex_lock(&nat_ctx->lock);
+
+	if (nat_ctx->is_sys_mem) {
+		IPADBG("freeing the dma memory\n");
+		dma_free_coherent(
+			 ipa3_ctx->pdev, nat_ctx->size,
+			 nat_ctx->vaddr, nat_ctx->dma_handle);
+		nat_ctx->size = 0;
+		nat_ctx->vaddr = NULL;
+	}
+	nat_ctx->is_mapped = false;
+	nat_ctx->is_sys_mem = false;
+	nat_ctx->is_dev_init = false;
+
+	mutex_unlock(&nat_ctx->lock);
+	IPADBG("return\n");
+}
+
+/**
+ * ipa3_nat_del_cmd() - Delete a NAT table
+ * @del:	[in] delete table table table parameters
+ *
+ * Called by NAT client driver to delete the nat table
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	struct ipahal_imm_cmd_pyld *nop_cmd_pyld = NULL;
+	struct ipa3_desc desc[2];
+	struct ipahal_imm_cmd_ip_v4_nat_init cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	bool mem_type_shared = true;
+	u32 base_addr = IPA_NAT_PHYS_MEM_OFFSET;
+	int result;
+
+	IPADBG("\n");
+	if (ipa3_ctx->nat_mem.is_tmp_mem) {
+		IPAERR("using temp memory during nat del\n");
+		mem_type_shared = false;
+		base_addr = ipa3_ctx->nat_mem.tmp_dma_handle;
+	}
+
+	if (del->public_ip_addr == 0) {
+		IPADBG("Bad Parameter\n");
+		result = -EPERM;
+		goto bail;
+	}
+
+	memset(&desc, 0, sizeof(desc));
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	nop_cmd_pyld =
+		ipahal_construct_nop_imm_cmd(false, IPAHAL_HPS_CLEAR, false);
+	if (!nop_cmd_pyld) {
+		IPAERR("Failed to construct NOP imm cmd\n");
+		result = -ENOMEM;
+		goto bail;
+	}
+	desc[0].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[0].type = IPA_IMM_CMD_DESC;
+	desc[0].callback = NULL;
+	desc[0].user1 = NULL;
+	desc[0].user2 = 0;
+	desc[0].pyld = nop_cmd_pyld->data;
+	desc[0].len = nop_cmd_pyld->len;
+
+	cmd.table_index = del->table_index;
+	cmd.ipv4_rules_addr = base_addr;
+	cmd.ipv4_rules_addr_shared = mem_type_shared;
+	cmd.ipv4_expansion_rules_addr = base_addr;
+	cmd.ipv4_expansion_rules_addr_shared = mem_type_shared;
+	cmd.index_table_addr = base_addr;
+	cmd.index_table_addr_shared = mem_type_shared;
+	cmd.index_table_expansion_addr = base_addr;
+	cmd.index_table_expansion_addr_shared = mem_type_shared;
+	cmd.size_base_tables = 0;
+	cmd.size_expansion_tables = 0;
+	cmd.public_ip_addr = 0;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_V4_NAT_INIT, &cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("Fail to construct ip_v4_nat_init imm cmd\n");
+		result = -EPERM;
+		goto destroy_regwrt_imm_cmd;
+	}
+	desc[1].opcode = ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_V4_NAT_INIT);
+	desc[1].type = IPA_IMM_CMD_DESC;
+	desc[1].callback = NULL;
+	desc[1].user1 = NULL;
+	desc[1].user2 = 0;
+	desc[1].pyld = cmd_pyld->data;
+	desc[1].len = cmd_pyld->len;
+
+	if (ipa3_send_cmd(2, desc)) {
+		IPAERR("Fail to send immediate command\n");
+		result = -EPERM;
+		goto destroy_imm_cmd;
+	}
+
+	ipa3_ctx->nat_mem.size_base_tables = 0;
+	ipa3_ctx->nat_mem.size_expansion_tables = 0;
+	ipa3_ctx->nat_mem.public_ip_addr = 0;
+	ipa3_ctx->nat_mem.ipv4_rules_addr = 0;
+	ipa3_ctx->nat_mem.ipv4_expansion_rules_addr = 0;
+	ipa3_ctx->nat_mem.index_table_addr = 0;
+	ipa3_ctx->nat_mem.index_table_expansion_addr = 0;
+
+	ipa3_nat_free_mem_and_device(&ipa3_ctx->nat_mem);
+	IPADBG("return\n");
+	result = 0;
+
+destroy_imm_cmd:
+	ipahal_destroy_imm_cmd(cmd_pyld);
+destroy_regwrt_imm_cmd:
+	ipahal_destroy_imm_cmd(nop_cmd_pyld);
+bail:
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
new file mode 100644
index 0000000..b49fb32
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -0,0 +1,1283 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/debugfs.h>
+#include <linux/qmi_encdec.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <linux/ipa.h>
+#include <linux/vmalloc.h>
+
+#include "ipa_qmi_service.h"
+
+#define IPA_Q6_SVC_VERS 1
+#define IPA_A5_SVC_VERS 1
+#define Q6_QMI_COMPLETION_TIMEOUT (60*HZ)
+
+#define IPA_A5_SERVICE_SVC_ID 0x31
+#define IPA_A5_SERVICE_INS_ID 1
+#define IPA_Q6_SERVICE_SVC_ID 0x31
+#define IPA_Q6_SERVICE_INS_ID 2
+
+#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
+#define QMI_SEND_REQ_TIMEOUT_MS 60000
+
+static struct qmi_handle *ipa3_svc_handle;
+static void ipa3_a5_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, ipa3_a5_svc_recv_msg);
+static struct workqueue_struct *ipa_svc_workqueue;
+static struct workqueue_struct *ipa_clnt_req_workqueue;
+static struct workqueue_struct *ipa_clnt_resp_workqueue;
+static void *curr_conn;
+static bool ipa3_qmi_modem_init_fin, ipa3_qmi_indication_fin;
+static struct work_struct ipa3_qmi_service_init_work;
+static uint32_t ipa_wan_platform;
+struct ipa3_qmi_context *ipa3_qmi_ctx;
+static bool workqueues_stopped;
+static bool ipa3_modem_init_cmplt;
+static bool first_time_handshake;
+/* QMI A5 service */
+
+static struct msg_desc ipa3_indication_reg_req_desc = {
+	.max_msg_len = QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INDICATION_REGISTER_REQ_V01,
+	.ei_array = ipa3_indication_reg_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_indication_reg_resp_desc = {
+	.max_msg_len = QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INDICATION_REGISTER_RESP_V01,
+	.ei_array = ipa3_indication_reg_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_master_driver_complete_indication_desc = {
+	.max_msg_len = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01,
+	.ei_array = ipa3_master_driver_init_complt_ind_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_install_fltr_rule_req_desc = {
+	.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01,
+	.ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_install_fltr_rule_resp_desc = {
+	.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01,
+	.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_filter_installed_notif_req_desc = {
+	.max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01,
+	.ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_filter_installed_notif_resp_desc = {
+	.max_msg_len = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01,
+	.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_config_req_desc = {
+	.max_msg_len = QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_CONFIG_REQ_V01,
+	.ei_array = ipa3_config_req_msg_data_v01_ei,
+};
+static struct msg_desc ipa3_config_resp_desc = {
+	.max_msg_len = QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_CONFIG_RESP_V01,
+	.ei_array = ipa3_config_resp_msg_data_v01_ei,
+};
+
+static struct msg_desc ipa3_init_modem_driver_cmplt_req_desc = {
+	.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01,
+	.ei_array = ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei,
+};
+
+static struct msg_desc ipa3_init_modem_driver_cmplt_resp_desc = {
+	.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01,
+	.msg_id = QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01,
+	.ei_array = ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei,
+};
+
+static int ipa3_handle_indication_req(void *req_h, void *req)
+{
+	struct ipa_indication_reg_req_msg_v01 *indication_req;
+	struct ipa_indication_reg_resp_msg_v01 resp;
+	struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+	int rc;
+
+	indication_req = (struct ipa_indication_reg_req_msg_v01 *)req;
+	IPAWANDBG("Received INDICATION Request\n");
+
+	memset(&resp, 0, sizeof(struct ipa_indication_reg_resp_msg_v01));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+			&ipa3_indication_reg_resp_desc, &resp, sizeof(resp));
+	ipa3_qmi_indication_fin = true;
+	/* check if need sending indication to modem */
+	if (ipa3_qmi_modem_init_fin)	{
+		IPAWANDBG("send indication to modem (%d)\n",
+		ipa3_qmi_modem_init_fin);
+		memset(&ind, 0, sizeof(struct
+				ipa_master_driver_init_complt_ind_msg_v01));
+		ind.master_driver_init_status.result =
+			IPA_QMI_RESULT_SUCCESS_V01;
+		rc = qmi_send_ind_from_cb(ipa3_svc_handle, curr_conn,
+			&ipa3_master_driver_complete_indication_desc,
+			&ind,
+			sizeof(ind));
+	} else {
+		IPAWANERR("not send indication\n");
+	}
+	return rc;
+}
+
+
+static int ipa3_handle_install_filter_rule_req(void *req_h, void *req)
+{
+	struct ipa_install_fltr_rule_req_msg_v01 *rule_req;
+	struct ipa_install_fltr_rule_resp_msg_v01 resp;
+	uint32_t rule_hdl[MAX_NUM_Q6_RULE];
+	int rc = 0, i;
+
+	rule_req = (struct ipa_install_fltr_rule_req_msg_v01 *)req;
+	memset(rule_hdl, 0, sizeof(rule_hdl));
+	memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+	IPAWANDBG("Received install filter Request\n");
+
+	rc = ipa3_copy_ul_filter_rule_to_ipa((struct
+		ipa_install_fltr_rule_req_msg_v01*)req);
+	if (rc)
+		IPAWANERR("copy UL rules from modem is failed\n");
+
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	if (rule_req->filter_spec_ex_list_valid == true) {
+		resp.rule_id_valid = 1;
+		if (rule_req->filter_spec_ex_list_len > MAX_NUM_Q6_RULE) {
+			resp.rule_id_len = MAX_NUM_Q6_RULE;
+			IPAWANERR("installed (%d) max Q6-UL rules ",
+			MAX_NUM_Q6_RULE);
+			IPAWANERR("but modem gives total (%u)\n",
+			rule_req->filter_spec_ex_list_len);
+		} else {
+			resp.rule_id_len =
+				rule_req->filter_spec_ex_list_len;
+		}
+	} else {
+		resp.rule_id_valid = 0;
+		resp.rule_id_len = 0;
+	}
+
+	/* construct UL filter rules response to Modem*/
+	for (i = 0; i < resp.rule_id_len; i++) {
+		resp.rule_id[i] =
+			rule_req->filter_spec_ex_list[i].rule_id;
+	}
+
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+			&ipa3_install_fltr_rule_resp_desc, &resp, sizeof(resp));
+
+	IPAWANDBG("Replied to install filter request\n");
+	return rc;
+}
+
+static int ipa3_handle_filter_installed_notify_req(void *req_h, void *req)
+{
+	struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+	int rc = 0;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	IPAWANDBG("Received filter_install_notify Request\n");
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+			&ipa3_filter_installed_notif_resp_desc,
+			&resp, sizeof(resp));
+
+	IPAWANDBG("Responsed filter_install_notify Request\n");
+	return rc;
+}
+
+static int handle_ipa_config_req(void *req_h, void *req)
+{
+	struct ipa_config_resp_msg_v01 resp;
+	int rc;
+
+	memset(&resp, 0, sizeof(struct ipa_config_resp_msg_v01));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+	IPAWANDBG("Received IPA CONFIG Request\n");
+	rc = ipa_mhi_handle_ipa_config_req(
+		(struct ipa_config_req_msg_v01 *)req);
+	if (rc) {
+		IPAERR("ipa3_mhi_handle_ipa_config_req failed %d\n", rc);
+		resp.resp.result = IPA_QMI_RESULT_FAILURE_V01;
+	}
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+		&ipa3_config_resp_desc,
+		&resp, sizeof(resp));
+	IPAWANDBG("Responsed IPA CONFIG Request\n");
+	return rc;
+}
+
+static int ipa3_handle_modem_init_cmplt_req(void *req_h, void *req)
+{
+	struct ipa_init_modem_driver_cmplt_req_msg_v01 *cmplt_req;
+	struct ipa_init_modem_driver_cmplt_resp_msg_v01 resp;
+	int rc;
+
+	IPAWANDBG("Received QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01\n");
+	cmplt_req = (struct ipa_init_modem_driver_cmplt_req_msg_v01 *)req;
+
+	if (ipa3_modem_init_cmplt == false) {
+		ipa3_modem_init_cmplt = true;
+		if (ipa3_qmi_modem_init_fin == true) {
+			IPAWANDBG("load uc related registers (%d)\n",
+			ipa3_qmi_modem_init_fin);
+			ipa3_uc_load_notify();
+		}
+	}
+
+	memset(&resp, 0, sizeof(resp));
+	resp.resp.result = IPA_QMI_RESULT_SUCCESS_V01;
+
+	rc = qmi_send_resp_from_cb(ipa3_svc_handle, curr_conn, req_h,
+			&ipa3_init_modem_driver_cmplt_resp_desc,
+			&resp, sizeof(resp));
+
+	IPAWANDBG("Sent QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01\n");
+	return rc;
+}
+
+static int ipa3_a5_svc_connect_cb(struct qmi_handle *handle,
+			       void *conn_h)
+{
+	if (ipa3_svc_handle != handle || !conn_h)
+		return -EINVAL;
+
+	if (curr_conn) {
+		IPAWANERR("Service is busy\n");
+		return -ECONNREFUSED;
+	}
+	curr_conn = conn_h;
+	return 0;
+}
+
+static int ipa3_a5_svc_disconnect_cb(struct qmi_handle *handle,
+				  void *conn_h)
+{
+	if (ipa3_svc_handle != handle || curr_conn != conn_h)
+		return -EINVAL;
+
+	curr_conn = NULL;
+	return 0;
+}
+
+static int ipa3_a5_svc_req_desc_cb(unsigned int msg_id,
+				struct msg_desc **req_desc)
+{
+	int rc;
+
+	switch (msg_id) {
+	case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+		*req_desc = &ipa3_indication_reg_req_desc;
+		rc = sizeof(struct ipa_indication_reg_req_msg_v01);
+		break;
+
+	case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+		*req_desc = &ipa3_install_fltr_rule_req_desc;
+		rc = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+		break;
+	case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+		*req_desc = &ipa3_filter_installed_notif_req_desc;
+		rc = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+		break;
+	case QMI_IPA_CONFIG_REQ_V01:
+		*req_desc = &ipa3_config_req_desc;
+		rc = sizeof(struct ipa_config_req_msg_v01);
+		break;
+	case QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01:
+		*req_desc = &ipa3_init_modem_driver_cmplt_req_desc;
+		rc = sizeof(struct ipa_init_modem_driver_cmplt_req_msg_v01);
+		break;
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static int ipa3_a5_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			void *req_h, unsigned int msg_id, void *req)
+{
+	int rc;
+
+	if (ipa3_svc_handle != handle || curr_conn != conn_h)
+		return -EINVAL;
+
+	switch (msg_id) {
+	case QMI_IPA_INDICATION_REGISTER_REQ_V01:
+		rc = ipa3_handle_indication_req(req_h, req);
+		break;
+	case QMI_IPA_INSTALL_FILTER_RULE_REQ_V01:
+		rc = ipa3_handle_install_filter_rule_req(req_h, req);
+		rc = ipa3_wwan_update_mux_channel_prop();
+		break;
+	case QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01:
+		rc = ipa3_handle_filter_installed_notify_req(req_h, req);
+		break;
+	case QMI_IPA_CONFIG_REQ_V01:
+		rc = handle_ipa_config_req(req_h, req);
+		break;
+	case QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01:
+		rc = ipa3_handle_modem_init_cmplt_req(req_h, req);
+		break;
+	default:
+		rc = -ENOTSUPP;
+		break;
+	}
+	return rc;
+}
+
+static void ipa3_a5_svc_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	do {
+		IPAWANDBG_LOW("Notified about a Receive Event");
+		rc = qmi_recv_msg(ipa3_svc_handle);
+	} while (rc == 0);
+	if (rc != -ENOMSG)
+		IPAWANERR("Error receiving message\n");
+}
+
+static void qmi_ipa_a5_svc_ntfy(struct qmi_handle *handle,
+		enum qmi_event_type event, void *priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		if (!workqueues_stopped)
+			queue_delayed_work(ipa_svc_workqueue,
+					   &work_recv_msg, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options ipa3_a5_svc_ops_options = {
+	.version = 1,
+	.service_id = IPA_A5_SERVICE_SVC_ID,
+	.service_vers = IPA_A5_SVC_VERS,
+	.service_ins = IPA_A5_SERVICE_INS_ID,
+	.connect_cb = ipa3_a5_svc_connect_cb,
+	.disconnect_cb = ipa3_a5_svc_disconnect_cb,
+	.req_desc_cb = ipa3_a5_svc_req_desc_cb,
+	.req_cb = ipa3_a5_svc_req_cb,
+};
+
+
+/****************************************************/
+/*                 QMI A5 client ->Q6               */
+/****************************************************/
+static void ipa3_q6_clnt_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_recv_msg_client, ipa3_q6_clnt_recv_msg);
+static void ipa3_q6_clnt_svc_arrive(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_svc_arrive, ipa3_q6_clnt_svc_arrive);
+static void ipa3_q6_clnt_svc_exit(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_work_svc_exit, ipa3_q6_clnt_svc_exit);
+/* Test client port for IPC Router */
+static struct qmi_handle *ipa_q6_clnt;
+static int ipa_q6_clnt_reset;
+
+static int ipa3_check_qmi_response(int rc,
+				  int req_id,
+				  enum ipa_qmi_result_type_v01 result,
+				  enum ipa_qmi_error_type_v01 error,
+				  char *resp_type)
+{
+	if (rc < 0) {
+		if (rc == -ETIMEDOUT && ipa3_rmnet_ctx.ipa_rmnet_ssr) {
+			IPAWANERR(
+			"Timeout for qmi request id %d\n", req_id);
+			return rc;
+		}
+		if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+			IPAWANERR(
+			"SSR while waiting for qmi request id %d\n", req_id);
+			return rc;
+		}
+		IPAWANERR("Error sending qmi request id %d, rc = %d\n",
+			req_id, rc);
+		return rc;
+	}
+	if (result != IPA_QMI_RESULT_SUCCESS_V01 &&
+	    ipa3_rmnet_ctx.ipa_rmnet_ssr) {
+		IPAWANERR(
+		"Got bad response %d from request id %d (error %d)\n",
+		req_id, result, error);
+		return result;
+	}
+	IPAWANDBG_LOW("Received %s successfully\n", resp_type);
+	return 0;
+}
+
+static int ipa3_qmi_init_modem_send_sync_msg(void)
+{
+	struct ipa_init_modem_driver_req_msg_v01 req;
+	struct ipa_init_modem_driver_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+	u16 smem_restr_bytes = ipa3_get_smem_restr_bytes();
+
+	memset(&req, 0, sizeof(struct ipa_init_modem_driver_req_msg_v01));
+	memset(&resp, 0, sizeof(struct ipa_init_modem_driver_resp_msg_v01));
+
+	req.platform_type_valid = true;
+	req.platform_type = ipa_wan_platform;
+
+	req.hdr_tbl_info_valid = (IPA_MEM_PART(modem_hdr_size) != 0);
+	req.hdr_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_hdr_ofst) + smem_restr_bytes;
+	req.hdr_tbl_info.modem_offset_end = IPA_MEM_PART(modem_hdr_ofst) +
+		smem_restr_bytes + IPA_MEM_PART(modem_hdr_size) - 1;
+
+	req.v4_route_tbl_info_valid = true;
+	req.v4_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v4_rt_nhash_ofst) + smem_restr_bytes;
+	req.v4_route_tbl_info.num_indices =
+		IPA_MEM_PART(v4_modem_rt_index_hi);
+	req.v6_route_tbl_info_valid = true;
+
+	req.v6_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v6_rt_nhash_ofst) + smem_restr_bytes;
+	req.v6_route_tbl_info.num_indices =
+		IPA_MEM_PART(v6_modem_rt_index_hi);
+
+	req.v4_filter_tbl_start_addr_valid = true;
+	req.v4_filter_tbl_start_addr =
+		IPA_MEM_PART(v4_flt_nhash_ofst) + smem_restr_bytes;
+
+	req.v6_filter_tbl_start_addr_valid = true;
+	req.v6_filter_tbl_start_addr =
+		IPA_MEM_PART(v6_flt_nhash_ofst) + smem_restr_bytes;
+
+	req.modem_mem_info_valid = (IPA_MEM_PART(modem_size) != 0);
+	req.modem_mem_info.block_start_addr =
+		IPA_MEM_PART(modem_ofst) + smem_restr_bytes;
+	req.modem_mem_info.size = IPA_MEM_PART(modem_size);
+
+	req.ctrl_comm_dest_end_pt_valid = true;
+	req.ctrl_comm_dest_end_pt =
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+
+	req.hdr_proc_ctx_tbl_info_valid =
+		(IPA_MEM_PART(modem_hdr_proc_ctx_size) != 0);
+	req.hdr_proc_ctx_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) + smem_restr_bytes;
+	req.hdr_proc_ctx_tbl_info.modem_offset_end =
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst) +
+		IPA_MEM_PART(modem_hdr_proc_ctx_size) + smem_restr_bytes - 1;
+
+	req.zip_tbl_info_valid = (IPA_MEM_PART(modem_comp_decomp_size) != 0);
+	req.zip_tbl_info.modem_offset_start =
+		IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes;
+	req.zip_tbl_info.modem_offset_end =
+		IPA_MEM_PART(modem_comp_decomp_ofst) +
+		IPA_MEM_PART(modem_comp_decomp_size) + smem_restr_bytes - 1;
+
+	req.v4_hash_route_tbl_info_valid = true;
+	req.v4_hash_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v4_rt_hash_ofst) + smem_restr_bytes;
+	req.v4_hash_route_tbl_info.num_indices =
+		IPA_MEM_PART(v4_modem_rt_index_hi);
+
+	req.v6_hash_route_tbl_info_valid = true;
+	req.v6_hash_route_tbl_info.route_tbl_start_addr =
+		IPA_MEM_PART(v6_rt_hash_ofst) + smem_restr_bytes;
+	req.v6_hash_route_tbl_info.num_indices =
+		IPA_MEM_PART(v6_modem_rt_index_hi);
+
+	req.v4_hash_filter_tbl_start_addr_valid = true;
+	req.v4_hash_filter_tbl_start_addr =
+		IPA_MEM_PART(v4_flt_hash_ofst) + smem_restr_bytes;
+
+	req.v6_hash_filter_tbl_start_addr_valid = true;
+	req.v6_hash_filter_tbl_start_addr =
+		IPA_MEM_PART(v6_flt_hash_ofst) + smem_restr_bytes;
+
+	if (!ipa3_uc_loaded_check()) {  /* First time boot */
+		req.is_ssr_bootup_valid = false;
+		req.is_ssr_bootup = 0;
+	} else {  /* After SSR boot */
+		req.is_ssr_bootup_valid = true;
+		req.is_ssr_bootup = 1;
+	}
+
+	IPAWANDBG("platform_type %d\n", req.platform_type);
+	IPAWANDBG("hdr_tbl_info.modem_offset_start %d\n",
+			req.hdr_tbl_info.modem_offset_start);
+	IPAWANDBG("hdr_tbl_info.modem_offset_end %d\n",
+			req.hdr_tbl_info.modem_offset_end);
+	IPAWANDBG("v4_route_tbl_info.route_tbl_start_addr %d\n",
+			req.v4_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v4_route_tbl_info.num_indices %d\n",
+			req.v4_route_tbl_info.num_indices);
+	IPAWANDBG("v6_route_tbl_info.route_tbl_start_addr %d\n",
+			req.v6_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v6_route_tbl_info.num_indices %d\n",
+			req.v6_route_tbl_info.num_indices);
+	IPAWANDBG("v4_filter_tbl_start_addr %d\n",
+			req.v4_filter_tbl_start_addr);
+	IPAWANDBG("v6_filter_tbl_start_addr %d\n",
+			req.v6_filter_tbl_start_addr);
+	IPAWANDBG("modem_mem_info.block_start_addr %d\n",
+			req.modem_mem_info.block_start_addr);
+	IPAWANDBG("modem_mem_info.size %d\n",
+			req.modem_mem_info.size);
+	IPAWANDBG("ctrl_comm_dest_end_pt %d\n",
+			req.ctrl_comm_dest_end_pt);
+	IPAWANDBG("is_ssr_bootup %d\n",
+			req.is_ssr_bootup);
+	IPAWANDBG("v4_hash_route_tbl_info.route_tbl_start_addr %d\n",
+		req.v4_hash_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v4_hash_route_tbl_info.num_indices %d\n",
+		req.v4_hash_route_tbl_info.num_indices);
+	IPAWANDBG("v6_hash_route_tbl_info.route_tbl_start_addr %d\n",
+		req.v6_hash_route_tbl_info.route_tbl_start_addr);
+	IPAWANDBG("v6_hash_route_tbl_info.num_indices %d\n",
+		req.v6_hash_route_tbl_info.num_indices);
+	IPAWANDBG("v4_hash_filter_tbl_start_addr %d\n",
+		req.v4_hash_filter_tbl_start_addr);
+	IPAWANDBG("v6_hash_filter_tbl_start_addr %d\n",
+		req.v6_hash_filter_tbl_start_addr);
+
+	req_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_REQ_V01;
+	req_desc.ei_array = ipa3_init_modem_driver_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INIT_MODEM_DRIVER_RESP_V01;
+	resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei;
+
+	pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_REQ_TIMEOUT_MS);
+	pr_info("QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 response received\n");
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INIT_MODEM_DRIVER_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_init_modem_driver_resp_msg_v01");
+}
+
+/* sending filter-install-request to modem*/
+int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+	struct ipa_install_fltr_rule_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->filter_spec_ex_list_len == 0) {
+		IPAWANDBG("IPACM pass zero rules to Q6\n");
+	} else {
+		IPAWANDBG("IPACM pass %u rules to Q6\n",
+		req->filter_spec_ex_list_len);
+	}
+
+	/* cache the qmi_filter_request */
+	memcpy(&(ipa3_qmi_ctx->ipa_install_fltr_rule_req_msg_cache[
+		ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg]),
+			req, sizeof(struct ipa_install_fltr_rule_req_msg_v01));
+	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg++;
+	ipa3_qmi_ctx->num_ipa_install_fltr_rule_req_msg %= 10;
+
+	req_desc.max_msg_len = QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_REQ_V01;
+	req_desc.ei_array = ipa3_install_fltr_rule_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_install_fltr_rule_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
+	resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei;
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+			req,
+			sizeof(struct ipa_install_fltr_rule_req_msg_v01),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_REQ_TIMEOUT_MS);
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_INSTALL_FILTER_RULE_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_install_filter");
+}
+
+
+int ipa3_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	struct ipa_enable_force_clear_datapath_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+
+	if (!req || !req->source_pipe_bitmask) {
+		IPAWANERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	req_desc.max_msg_len =
+	QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+	req_desc.ei_array =
+		ipa3_enable_force_clear_datapath_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei;
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt,
+			&req_desc,
+			req,
+			sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), 0);
+	if (rc < 0) {
+		IPAWANERR("send req failed %d\n", rc);
+		return rc;
+	}
+	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+		IPAWANERR("filter_notify failed %d\n",
+			resp.resp.result);
+		return resp.resp.result;
+	}
+	IPAWANDBG("SUCCESS\n");
+	return rc;
+}
+
+int ipa3_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	struct ipa_disable_force_clear_datapath_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+
+	if (!req) {
+		IPAWANERR("invalid params\n");
+		return -EINVAL;
+	}
+
+	req_desc.max_msg_len =
+		QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01;
+	req_desc.ei_array =
+		ipa3_disable_force_clear_datapath_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
+	resp_desc.ei_array =
+		ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei;
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt,
+			&req_desc,
+			req,
+			sizeof(*req),
+			&resp_desc, &resp, sizeof(resp), 0);
+	if (rc < 0) {
+		IPAWANERR("send req failed %d\n", rc);
+		return rc;
+	}
+	if (resp.resp.result != IPA_QMI_RESULT_SUCCESS_V01) {
+		IPAWANERR("filter_notify failed %d\n",
+			resp.resp.result);
+		return resp.resp.result;
+	}
+	IPAWANDBG("SUCCESS\n");
+	return rc;
+}
+
+/* sending filter-installed-notify-request to modem*/
+int ipa3_qmi_filter_notify_send(
+		struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+	struct ipa_fltr_installed_notif_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc = 0;
+
+	/* check if the filter rules from IPACM is valid */
+	if (req->rule_id_len == 0) {
+		IPAWANERR(" delete UL filter rule for pipe %d\n",
+		req->source_pipe_index);
+		return -EINVAL;
+	} else if (req->rule_id_len > QMI_IPA_MAX_FILTERS_V01) {
+		IPAWANERR(" UL filter rule for pipe %d exceed max (%u)\n",
+		req->source_pipe_index,
+		req->rule_id_len);
+		return -EINVAL;
+	}
+
+	/* cache the qmi_filter_request */
+	memcpy(&(ipa3_qmi_ctx->ipa_fltr_installed_notif_req_msg_cache[
+		ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg]),
+		req, sizeof(struct ipa_fltr_installed_notif_req_msg_v01));
+	ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg++;
+	ipa3_qmi_ctx->num_ipa_fltr_installed_notif_req_msg %= 10;
+
+	req_desc.max_msg_len =
+	QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01;
+	req_desc.ei_array = ipa3_fltr_installed_notif_req_msg_data_v01_ei;
+
+	memset(&resp, 0, sizeof(struct ipa_fltr_installed_notif_resp_msg_v01));
+	resp_desc.max_msg_len =
+		QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
+	resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei;
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt,
+			&req_desc,
+			req,
+			sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_REQ_TIMEOUT_MS);
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_fltr_installed_notif_resp");
+}
+
+static void ipa3_q6_clnt_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	do {
+		IPAWANDBG_LOW("Notified about a Receive Event");
+		rc = qmi_recv_msg(ipa_q6_clnt);
+	} while (rc == 0);
+	if (rc != -ENOMSG)
+		IPAWANERR("Error receiving message\n");
+}
+
+static void ipa3_q6_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		IPAWANDBG_LOW("client qmi recv message called");
+		if (!workqueues_stopped)
+			queue_delayed_work(ipa_clnt_resp_workqueue,
+					   &ipa3_work_recv_msg_client, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
+			       void *msg, unsigned int msg_len,
+			       void *ind_cb_priv)
+{
+	struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+	struct msg_desc qmi_ind_desc;
+	int rc = 0;
+
+	if (handle != ipa_q6_clnt) {
+		IPAWANERR("Wrong client\n");
+		return;
+	}
+
+	if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) {
+		memset(&qmi_ind, 0, sizeof(
+			struct ipa_data_usage_quota_reached_ind_msg_v01));
+		qmi_ind_desc.max_msg_len =
+			QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01;
+		qmi_ind_desc.msg_id = QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01;
+		qmi_ind_desc.ei_array =
+			ipa3_data_usage_quota_reached_ind_msg_data_v01_ei;
+
+		rc = qmi_kernel_decode(&qmi_ind_desc, &qmi_ind, msg, msg_len);
+		if (rc < 0) {
+			IPAWANERR("Error decoding msg_id %d\n", msg_id);
+			return;
+		}
+		IPAWANDBG("Quota reached indication on qmux(%d) Mbytes(%lu)\n",
+			  qmi_ind.apn.mux_id,
+			  (unsigned long int) qmi_ind.apn.num_Mbytes);
+		ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id);
+	}
+}
+
+static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
+{
+	int rc;
+	struct ipa_master_driver_init_complt_ind_msg_v01 ind;
+
+	/* Create a Local client port for QMI communication */
+	ipa_q6_clnt = qmi_handle_create(ipa3_q6_clnt_notify, NULL);
+	if (!ipa_q6_clnt) {
+		IPAWANERR("QMI client handle alloc failed\n");
+		return;
+	}
+
+	IPAWANDBG("Lookup server name, get client-hdl(%p)\n",
+		ipa_q6_clnt);
+	rc = qmi_connect_to_service(ipa_q6_clnt,
+			IPA_Q6_SERVICE_SVC_ID,
+			IPA_Q6_SVC_VERS,
+			IPA_Q6_SERVICE_INS_ID);
+	if (rc < 0) {
+		IPAWANERR("Server not found\n");
+		qmi_handle_destroy(ipa_q6_clnt);
+		ipa_q6_clnt = NULL;
+		return;
+	}
+
+	rc = qmi_register_ind_cb(ipa_q6_clnt, ipa3_q6_clnt_ind_cb, NULL);
+	if (rc < 0)
+		IPAWANERR("Unable to register for indications\n");
+
+	ipa_q6_clnt_reset = 0;
+	IPAWANDBG("Q6 QMI service available now\n");
+	/* Initialize modem IPA-driver */
+	IPAWANDBG("send ipa3_qmi_init_modem_send_sync_msg to modem\n");
+	rc = ipa3_qmi_init_modem_send_sync_msg();
+	if ((rc == -ENETRESET) || (rc == -ENODEV)) {
+		IPAWANERR(
+			"ipa3_qmi_init_modem_send_sync_msg failed due to SSR!\n");
+		/* Cleanup will take place when ipa3_wwan_remove is called */
+		return;
+	}
+	if (rc != 0) {
+		IPAWANERR("ipa3_qmi_init_modem_send_sync_msg failed\n");
+		/*
+		 * This is a very unexpected scenario, which requires a kernel
+		 * panic in order to force dumps for QMI/Q6 side analysis.
+		 */
+		BUG();
+		return;
+	}
+	ipa3_qmi_modem_init_fin = true;
+
+	/* got modem_init_cmplt_req already, load uc-related register */
+	if (ipa3_modem_init_cmplt == true) {
+		IPAWANDBG("load uc related registers (%d)\n",
+		ipa3_modem_init_cmplt);
+			ipa3_uc_load_notify();
+	}
+
+	/* In cold-bootup, first_time_handshake = false */
+	ipa3_q6_handshake_complete(first_time_handshake);
+	first_time_handshake = true;
+	IPAWANDBG("complete, ipa3_qmi_modem_init_fin : %d\n",
+		ipa3_qmi_modem_init_fin);
+
+	if (ipa3_qmi_indication_fin)	{
+		IPAWANDBG("send indication to modem (%d)\n",
+		ipa3_qmi_indication_fin);
+		memset(&ind, 0, sizeof(struct
+				ipa_master_driver_init_complt_ind_msg_v01));
+		ind.master_driver_init_status.result =
+			IPA_QMI_RESULT_SUCCESS_V01;
+		rc = qmi_send_ind(ipa3_svc_handle, curr_conn,
+			&ipa3_master_driver_complete_indication_desc,
+			&ind,
+			sizeof(ind));
+		IPAWANDBG("ipa_qmi_service_client good\n");
+	} else {
+		IPAWANERR("not send indication (%d)\n",
+		ipa3_qmi_indication_fin);
+	}
+}
+
+
+static void ipa3_q6_clnt_svc_exit(struct work_struct *work)
+{
+	qmi_handle_destroy(ipa_q6_clnt);
+	ipa_q6_clnt_reset = 1;
+	ipa_q6_clnt = NULL;
+}
+
+
+static int ipa3_q6_clnt_svc_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	IPAWANDBG("event %ld\n", code);
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		if (!workqueues_stopped)
+			queue_delayed_work(ipa_clnt_req_workqueue,
+					   &ipa3_work_svc_arrive, 0);
+		break;
+	case QMI_SERVER_EXIT:
+		if (!workqueues_stopped)
+			queue_delayed_work(ipa_clnt_req_workqueue,
+					   &ipa3_work_svc_exit, 0);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+
+static struct notifier_block ipa3_q6_clnt_nb = {
+	.notifier_call = ipa3_q6_clnt_svc_event_notify,
+};
+
+static void ipa3_qmi_service_init_worker(struct work_struct *work)
+{
+	int rc;
+
+	/* Initialize QMI-service*/
+	IPAWANDBG("IPA A7 QMI init OK :>>>>\n");
+
+	/* start the QMI msg cache */
+	ipa3_qmi_ctx = vzalloc(sizeof(*ipa3_qmi_ctx));
+	if (!ipa3_qmi_ctx) {
+		IPAWANERR(":kzalloc err.\n");
+		return;
+	}
+	ipa3_qmi_ctx->modem_cfg_emb_pipe_flt =
+		ipa3_get_modem_cfg_emb_pipe_flt();
+
+	ipa_svc_workqueue = create_singlethread_workqueue("ipa_A7_svc");
+	if (!ipa_svc_workqueue) {
+		IPAWANERR("Creating ipa_A7_svc workqueue failed\n");
+		vfree(ipa3_qmi_ctx);
+		ipa3_qmi_ctx = NULL;
+		return;
+	}
+
+	ipa3_svc_handle = qmi_handle_create(qmi_ipa_a5_svc_ntfy, NULL);
+	if (!ipa3_svc_handle) {
+		IPAWANERR("Creating ipa_A7_svc qmi handle failed\n");
+		goto destroy_ipa_A7_svc_wq;
+	}
+
+	/*
+	 * Setting the current connection to NULL, as due to a race between
+	 * server and client clean-up in SSR, the disconnect_cb might not
+	 * have necessarily been called
+	 */
+	curr_conn = NULL;
+
+	rc = qmi_svc_register(ipa3_svc_handle, &ipa3_a5_svc_ops_options);
+	if (rc < 0) {
+		IPAWANERR("Registering ipa_a5 svc failed %d\n",
+				rc);
+		goto destroy_qmi_handle;
+	}
+
+	/* Initialize QMI-client */
+
+	ipa_clnt_req_workqueue = create_singlethread_workqueue("clnt_req");
+	if (!ipa_clnt_req_workqueue) {
+		IPAWANERR("Creating clnt_req workqueue failed\n");
+		goto deregister_qmi_srv;
+	}
+
+	ipa_clnt_resp_workqueue = create_singlethread_workqueue("clnt_resp");
+	if (!ipa_clnt_resp_workqueue) {
+		IPAWANERR("Creating clnt_resp workqueue failed\n");
+		goto destroy_clnt_req_wq;
+	}
+
+	rc = qmi_svc_event_notifier_register(IPA_Q6_SERVICE_SVC_ID,
+				IPA_Q6_SVC_VERS,
+				IPA_Q6_SERVICE_INS_ID, &ipa3_q6_clnt_nb);
+	if (rc < 0) {
+		IPAWANERR("notifier register failed\n");
+		goto destroy_clnt_resp_wq;
+	}
+
+	/* get Q6 service and start send modem-initial to Q6 */
+	IPAWANDBG("wait service available\n");
+	return;
+
+destroy_clnt_resp_wq:
+	destroy_workqueue(ipa_clnt_resp_workqueue);
+	ipa_clnt_resp_workqueue = NULL;
+destroy_clnt_req_wq:
+	destroy_workqueue(ipa_clnt_req_workqueue);
+	ipa_clnt_req_workqueue = NULL;
+deregister_qmi_srv:
+	qmi_svc_unregister(ipa3_svc_handle);
+destroy_qmi_handle:
+	qmi_handle_destroy(ipa3_svc_handle);
+	ipa3_svc_handle = 0;
+destroy_ipa_A7_svc_wq:
+	destroy_workqueue(ipa_svc_workqueue);
+	ipa_svc_workqueue = NULL;
+	vfree(ipa3_qmi_ctx);
+	ipa3_qmi_ctx = NULL;
+}
+
+int ipa3_qmi_service_init(uint32_t wan_platform_type)
+{
+	ipa_wan_platform = wan_platform_type;
+	ipa3_qmi_modem_init_fin = false;
+	ipa3_qmi_indication_fin = false;
+	ipa3_modem_init_cmplt = false;
+	workqueues_stopped = false;
+
+	if (!ipa3_svc_handle) {
+		INIT_WORK(&ipa3_qmi_service_init_work,
+			ipa3_qmi_service_init_worker);
+		schedule_work(&ipa3_qmi_service_init_work);
+	}
+	return 0;
+}
+
+void ipa3_qmi_service_exit(void)
+{
+	int ret = 0;
+
+	workqueues_stopped = true;
+
+	/* qmi-service */
+	if (ipa3_svc_handle) {
+		ret = qmi_svc_unregister(ipa3_svc_handle);
+		if (ret < 0)
+			IPAWANERR("unregister qmi handle %p failed, ret=%d\n",
+			ipa3_svc_handle, ret);
+	}
+	if (ipa_svc_workqueue) {
+		flush_workqueue(ipa_svc_workqueue);
+		destroy_workqueue(ipa_svc_workqueue);
+		ipa_svc_workqueue = NULL;
+	}
+
+	if (ipa3_svc_handle) {
+		ret = qmi_handle_destroy(ipa3_svc_handle);
+		if (ret < 0)
+			IPAWANERR("Error destroying qmi handle %p, ret=%d\n",
+			ipa3_svc_handle, ret);
+	}
+
+	/* qmi-client */
+
+	/* Unregister from events */
+	ret = qmi_svc_event_notifier_unregister(IPA_Q6_SERVICE_SVC_ID,
+				IPA_Q6_SVC_VERS,
+				IPA_Q6_SERVICE_INS_ID, &ipa3_q6_clnt_nb);
+	if (ret < 0)
+		IPAWANERR(
+		"Error qmi_svc_event_notifier_unregister service %d, ret=%d\n",
+		IPA_Q6_SERVICE_SVC_ID, ret);
+
+	/* Release client handle */
+	ipa3_q6_clnt_svc_exit(0);
+
+	if (ipa_clnt_req_workqueue) {
+		destroy_workqueue(ipa_clnt_req_workqueue);
+		ipa_clnt_req_workqueue = NULL;
+	}
+	if (ipa_clnt_resp_workqueue) {
+		destroy_workqueue(ipa_clnt_resp_workqueue);
+		ipa_clnt_resp_workqueue = NULL;
+	}
+
+	/* clean the QMI msg cache */
+	if (ipa3_qmi_ctx != NULL) {
+		vfree(ipa3_qmi_ctx);
+		ipa3_qmi_ctx = NULL;
+	}
+	ipa3_svc_handle = 0;
+	ipa3_qmi_modem_init_fin = false;
+	ipa3_qmi_indication_fin = false;
+	ipa3_modem_init_cmplt = false;
+}
+
+void ipa3_qmi_stop_workqueues(void)
+{
+	IPAWANDBG("Stopping all QMI workqueues\n");
+
+	/* Stopping all workqueues so new work won't be scheduled */
+	workqueues_stopped = true;
+
+	/* Making sure that the current scheduled work won't be executed */
+	cancel_delayed_work(&work_recv_msg);
+	cancel_delayed_work(&ipa3_work_recv_msg_client);
+	cancel_delayed_work(&ipa3_work_svc_arrive);
+	cancel_delayed_work(&ipa3_work_svc_exit);
+}
+
+
+/* voting for bus BW to ipa_rm*/
+int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
+{
+	struct ipa_rm_perf_profile profile;
+	int ret;
+
+	if (bw_mbps == NULL) {
+		IPAWANERR("Bus BW is invalid\n");
+		return -EINVAL;
+	}
+
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = *bw_mbps;
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+			&profile);
+	if (ret)
+		IPAWANERR("Failed to set perf profile to BW %u\n",
+			profile.max_supported_bandwidth_mbps);
+	else
+		IPAWANDBG("Succeeded to set perf profile to BW %u\n",
+			profile.max_supported_bandwidth_mbps);
+
+	return ret;
+}
+
+int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+			   struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_DATA_STATS_REQ_V01;
+	req_desc.ei_array = ipa3_get_data_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_DATA_STATS_RESP_V01;
+	resp_desc.ei_array = ipa3_get_data_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_get_data_stats_req_msg_v01),
+			&resp_desc, resp,
+			sizeof(struct ipa_get_data_stats_resp_msg_v01),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG_LOW("QMI_IPA_GET_DATA_STATS_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_GET_DATA_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa_get_data_stats_resp_msg_v01");
+}
+
+int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+			      struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	req_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_REQ_V01;
+	req_desc.ei_array = ipa3_get_apn_data_stats_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len = QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_GET_APN_DATA_STATS_RESP_V01;
+	resp_desc.ei_array = ipa3_get_apn_data_stats_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
+
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
+			&resp_desc, resp,
+			sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG_LOW("QMI_IPA_GET_APN_DATA_STATS_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_GET_APN_DATA_STATS_REQ_V01, resp->resp.result,
+		resp->resp.error, "ipa_get_apn_data_stats_req_msg_v01");
+}
+
+int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+	struct ipa_set_data_usage_quota_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	memset(&resp, 0, sizeof(struct ipa_set_data_usage_quota_resp_msg_v01));
+
+	req_desc.max_msg_len = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01;
+	req_desc.ei_array = ipa3_set_data_usage_quota_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01;
+	resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+			sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
+			&resp_desc, &resp, sizeof(resp),
+			QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG_LOW("QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_set_data_usage_quota_req_msg_v01");
+}
+
+int ipa3_qmi_stop_data_qouta(void)
+{
+	struct ipa_stop_data_usage_quota_req_msg_v01 req;
+	struct ipa_stop_data_usage_quota_resp_msg_v01 resp;
+	struct msg_desc req_desc, resp_desc;
+	int rc;
+
+	memset(&req, 0, sizeof(struct ipa_stop_data_usage_quota_req_msg_v01));
+	memset(&resp, 0, sizeof(struct ipa_stop_data_usage_quota_resp_msg_v01));
+
+	req_desc.max_msg_len =
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01;
+	req_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01;
+	req_desc.ei_array = ipa3_stop_data_usage_quota_req_msg_data_v01_ei;
+
+	resp_desc.max_msg_len =
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01;
+	resp_desc.msg_id = QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01;
+	resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei;
+
+	IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
+	if (unlikely(!ipa_q6_clnt))
+		return -ETIMEDOUT;
+	rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
+		&resp_desc, &resp, sizeof(resp),
+		QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+	IPAWANDBG_LOW("QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 received\n");
+
+	return ipa3_check_qmi_response(rc,
+		QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01, resp.resp.result,
+		resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
new file mode 100644
index 0000000..0f64120
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -0,0 +1,303 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IPA_QMI_SERVICE_H
+#define IPA_QMI_SERVICE_H
+
+#include <linux/ipa.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include "ipa_i.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+
+/**
+ * name of the DL wwan default routing tables for v4 and v6
+ */
+#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr"
+#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt"
+#define MAX_NUM_Q6_RULE 35
+#define MAX_NUM_QMI_RULE_CACHE 10
+#define DEV_NAME "ipa-wan"
+#define SUBSYS_MODEM "modem"
+
+#define IPAWANDBG(fmt, args...) \
+	do { \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+
+#define IPAWANDBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAWANERR(fmt, args...) \
+	do { \
+		pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAWANINFO(fmt, args...) \
+	do { \
+		pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			DEV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+extern struct ipa3_qmi_context *ipa3_qmi_ctx;
+
+struct ipa3_qmi_context {
+struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+int num_ipa_install_fltr_rule_req_msg;
+struct ipa_install_fltr_rule_req_msg_v01
+		ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+int num_ipa_fltr_installed_notif_req_msg;
+struct ipa_fltr_installed_notif_req_msg_v01
+		ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
+bool modem_cfg_emb_pipe_flt;
+};
+
+struct ipa3_rmnet_mux_val {
+	uint32_t  mux_id;
+	int8_t    vchannel_name[IFNAMSIZ];
+	bool mux_channel_set;
+	bool ul_flt_reg;
+	bool mux_hdr_set;
+	uint32_t  hdr_hdl;
+};
+
+extern struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
+extern struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info
+	ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_config_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_config_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
+extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
+
+/**
+ * struct ipa3_rmnet_context - IPA rmnet context
+ * @ipa_rmnet_ssr: support modem SSR
+ * @polling_interval: Requested interval for polling tethered statistics
+ * @metered_mux_id: The mux ID on which quota has been set
+ */
+struct ipa3_rmnet_context {
+	bool ipa_rmnet_ssr;
+	u64 polling_interval;
+	u32 metered_mux_id;
+};
+
+extern struct ipa3_rmnet_context ipa3_rmnet_ctx;
+
+#ifdef CONFIG_RMNET_IPA3
+
+int ipa3_qmi_service_init(uint32_t wan_platform_type);
+
+void ipa3_qmi_service_exit(void);
+
+/* sending filter-install-request to modem*/
+int ipa3_qmi_filter_request_send(
+	struct ipa_install_fltr_rule_req_msg_v01 *req);
+
+/* sending filter-installed-notify-request to modem*/
+int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
+		*req);
+
+/* voting for bus BW to ipa_rm*/
+int ipa3_vote_for_bus_bw(uint32_t *bw_mbps);
+
+int ipa3_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
+
+int ipa3_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
+
+int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+	*rule_req);
+
+int ipa3_wwan_update_mux_channel_prop(void);
+
+int ipa3_wan_ioctl_init(void);
+
+void ipa3_wan_ioctl_stop_qmi_messages(void);
+
+void ipa3_wan_ioctl_enable_qmi_messages(void);
+
+void ipa3_wan_ioctl_deinit(void);
+
+void ipa3_qmi_stop_workqueues(void);
+
+int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats
+		*data);
+
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data);
+
+void ipa3_broadcast_quota_reach_ind(uint32_t mux_id);
+
+int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
+	*data);
+
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+	bool reset);
+
+int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
+	struct ipa_get_data_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req);
+
+int ipa3_qmi_stop_data_qouta(void);
+
+void ipa3_q6_handshake_complete(bool ssr_bootup);
+
+#else /* CONFIG_RMNET_IPA3 */
+
+static inline int ipa3_qmi_service_init(uint32_t wan_platform_type)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_qmi_service_exit(void) { }
+
+/* sending filter-install-request to modem*/
+static inline int ipa3_qmi_filter_request_send(
+	struct ipa_install_fltr_rule_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+/* sending filter-installed-notify-request to modem*/
+static inline int ipa3_qmi_filter_notify_send(
+	struct ipa_fltr_installed_notif_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_enable_force_clear_datapath_send(
+	struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_disable_force_clear_datapath_send(
+	struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_copy_ul_filter_rule_to_ipa(
+	struct ipa_install_fltr_rule_req_msg_v01 *rule_req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_wwan_update_mux_channel_prop(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_wan_ioctl_init(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_wan_ioctl_stop_qmi_messages(void) { }
+
+static inline void ipa3_wan_ioctl_enable_qmi_messages(void) { }
+
+static inline void ipa3_wan_ioctl_deinit(void) { }
+
+static inline void ipa3_qmi_stop_workqueues(void) { }
+
+static inline int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
+{
+	return -EPERM;
+}
+
+static inline int rmnet_ipa3_poll_tethering_stats(
+	struct wan_ioctl_poll_tethering_stats *data)
+{
+	return -EPERM;
+}
+
+static inline int rmnet_ipa3_set_data_quota(
+	struct wan_ioctl_set_data_quota *data)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id) { }
+
+static inline int ipa3_qmi_get_data_stats(
+	struct ipa_get_data_stats_req_msg_v01 *req,
+	struct ipa_get_data_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_get_network_stats(
+	struct ipa_get_apn_data_stats_req_msg_v01 *req,
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_set_data_quota(
+	struct ipa_set_data_usage_quota_req_msg_v01 *req)
+{
+	return -EPERM;
+}
+
+static inline int ipa3_qmi_stop_data_qouta(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
+
+#endif /* CONFIG_RMNET_IPA3 */
+
+#endif /* IPA_QMI_SERVICE_H */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
new file mode 100644
index 0000000..6907811
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service_v01.c
@@ -0,0 +1,2746 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/qmi_encdec.h>
+#include <linux/ipa_qmi_service_v01.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+/* Type Definitions  */
+static struct elem_info ipa3_hdr_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_hdr_tbl_info_type_v01,
+					modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_hdr_tbl_info_type_v01,
+					modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_route_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_route_tbl_info_type_v01,
+					route_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_route_tbl_info_type_v01,
+					num_indices),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_modem_mem_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_modem_mem_info_type_v01,
+					block_start_addr),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_modem_mem_info_type_v01,
+					size),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+			modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01,
+			modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_zip_tbl_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_zip_tbl_info_type_v01,
+					modem_offset_start),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_zip_tbl_info_type_v01,
+					modem_offset_end),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_range_eq_16_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			range_low),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_range_eq_16_type_v01,
+			range_high),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_mask_eq_32_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+				struct ipa_ipfltr_mask_eq_32_type_v01,
+				offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+				struct ipa_ipfltr_mask_eq_32_type_v01,
+				mask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_32_type_v01,
+			value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_eq_16_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_eq_16_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_16_type_v01,
+					value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_eq_32_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_32_type_v01,
+					offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_ipfltr_eq_32_type_v01,
+					value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_ipfltr_mask_eq_128_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			offset),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 16,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			mask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 16,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_ipfltr_mask_eq_128_type_v01,
+			value),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_filter_rule_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			rule_eq_bitmap),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			tos_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tos_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					protocol_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					protocol_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_ihl_offset_range_16),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01,
+		.elem_size	= sizeof(
+			struct ipa_ipfltr_range_eq_16_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_range_16),
+		.ei_array	= ipa3_ipfltr_range_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_offset_meq_32),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					offset_meq_32),
+		.ei_array	= ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tc_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					tc_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					flow_eq_present),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					flow_eq),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_16_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_eq_16_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_16),
+		.ei_array	= ipa3_ipfltr_eq_16_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_32_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_eq_32_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_eq_32),
+		.ei_array	= ipa3_ipfltr_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_ihl_offset_meq_32),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ihl_offset_meq_32),
+		.ei_array	= ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					num_offset_meq_128),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	=
+			QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01,
+		.elem_size	= sizeof(
+			struct ipa_ipfltr_mask_eq_128_type_v01),
+		.is_array	= STATIC_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_type_v01,
+			offset_meq_128),
+		.ei_array	= ipa3_ipfltr_mask_eq_128_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					metadata_meq32_present),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_ipfltr_mask_eq_32_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					metadata_meq32),
+		.ei_array	= ipa3_ipfltr_mask_eq_32_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_rule_type_v01,
+					ipv4_frag_eq_present),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_filter_spec_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_spec_identifier),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_rule),
+		.ei_array	= ipa3_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					filter_action),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					is_routing_table_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					route_table_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					is_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa_filter_spec_ex_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					ip_type),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_filter_rule_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					filter_rule),
+		.ei_array	= ipa3_filter_rule_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					filter_action),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					is_routing_table_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					route_table_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					is_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					rule_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_filter_spec_ex_type_v01,
+					is_rule_hashable),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct
+elem_info ipa3_filter_rule_identifier_to_handle_map_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01,
+			filter_spec_identifier),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01,
+			filter_handle),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_filter_handle_to_index_map_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_handle_to_index_map_v01,
+			filter_handle),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(
+			struct ipa_filter_handle_to_index_map_v01,
+			filter_index),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			platform_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			platform_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_hdr_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_tbl_info),
+		.ei_array	= ipa3_hdr_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			modem_mem_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_modem_mem_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			modem_mem_info),
+		.ei_array	= ipa3_modem_mem_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			ctrl_comm_dest_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			ctrl_comm_dest_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			is_ssr_bootup_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			is_ssr_bootup),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_proc_ctx_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(
+			struct ipa_hdr_proc_ctx_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			hdr_proc_ctx_tbl_info),
+		.ei_array	= ipa3_hdr_proc_ctx_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			zip_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_zip_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			zip_tbl_info),
+		.ei_array	= ipa3_zip_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_route_tbl_info_valid),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct ipa_route_tbl_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_route_tbl_info),
+		.ei_array	= ipa3_route_tbl_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v4_hash_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_filter_tbl_start_addr_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_req_msg_v01,
+			v6_hash_filter_tbl_start_addr),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			ctrl_comm_dest_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			ctrl_comm_dest_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			default_end_pt_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			default_end_pt),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			modem_driver_init_pending_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_resp_msg_v01,
+			modem_driver_init_pending),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_cmplt_req_msg_v01,
+			status),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_init_modem_driver_cmplt_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			master_driver_init_complete_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			master_driver_init_complete),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			data_usage_quota_reached_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_indication_reg_req_msg_v01,
+			data_usage_quota_reached),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_indication_reg_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(struct
+			ipa_master_driver_init_complt_ind_msg_v01,
+			master_driver_init_status),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_filter_spec_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_list),
+		.ei_array	= ipa_filter_spec_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			source_pipe_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			source_pipe_index),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			xlat_filter_indices_list),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_filter_spec_ex_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_req_msg_v01,
+			filter_spec_ex_list),
+		.ei_array	= ipa_filter_spec_ex_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			resp),
+		.ei_array       = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(
+			struct ipa_filter_rule_identifier_to_handle_map_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			filter_handle_list),
+		.ei_array	=
+			ipa3_filter_rule_identifier_to_handle_map_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			rule_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			rule_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_install_fltr_rule_resp_msg_v01,
+			rule_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			source_pipe_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_2_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			install_status),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x03,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			filter_index_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(
+			struct ipa_filter_handle_to_index_map_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x03,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			filter_index_list),
+		.ei_array	= ipa3_filter_handle_to_index_map_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_pipe_index_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_pipe_index),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			retain_header_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			retain_header),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_call_mux_id_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			embedded_call_mux_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv4_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv4_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv6_filters_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			num_ipv6_filters),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv4_filter_idx_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv4_filter_idx),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv6_filter_idx_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			start_ipv6_filter_idx),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_req_msg_v01,
+			rule_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_fltr_installed_notif_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			source_pipe_bitmask),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			request_id),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			throttle_source_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_req_msg_v01,
+			throttle_source),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_enable_force_clear_datapath_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_disable_force_clear_datapath_req_msg_v01,
+			request_id),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_disable_force_clear_datapath_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_config_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_deaggr_supported_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_deaggr_supported),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			max_aggr_frame_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+					max_aggr_frame_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ipa_ingress_pipe_mode_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ipa_ingress_pipe_mode),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_speed_info_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x14,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			peripheral_speed_info),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_time_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x15,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_time_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_pkt_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x16,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_pkt_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_byte_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x17,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_accumulation_byte_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_accumulation_time_limit_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x18,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_accumulation_time_limit),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_control_flags_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x19,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			hw_control_flags),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_msi_event_threshold_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1A,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_msi_event_threshold),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_msi_event_threshold_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1B,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_msi_event_threshold),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_fifo_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1C,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			ul_fifo_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_fifo_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1D,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_fifo_size),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_buf_size_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x1E,
+		.offset		= offsetof(
+			struct ipa_config_req_msg_v01,
+			dl_buf_size),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_config_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_config_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			ipa_stats_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			reset_stats_valid),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_1_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_req_msg_v01,
+			reset_stats),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_pipe_stats_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					pipe_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv4_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv4_bytes),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv6_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct ipa_pipe_stats_info_type_v01,
+					num_ipv6_bytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_stats_type_filter_rule_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_stats_type_filter_rule_v01,
+					filter_rule_index),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_stats_type_filter_rule_v01,
+					num_packets),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ipa_stats_type_valid),
+	},
+	{
+		.data_type	= QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ipa_stats_type),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PIPES_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x11,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			ul_src_pipe_stats_list),
+		.ei_array	= ipa3_pipe_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_PIPES_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x12,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_dst_pipe_stats_list),
+		.ei_array	= ipa3_pipe_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_FILTERS_V01,
+		.elem_size	= sizeof(struct ipa_pipe_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x13,
+		.offset		= offsetof(
+			struct ipa_get_data_stats_resp_msg_v01,
+			dl_filter_rule_stats_list),
+		.ei_array	= ipa3_stats_type_filter_rule_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_apn_data_stats_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_ul_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_ul_bytes),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_dl_packets),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_apn_data_stats_info_type_v01,
+					num_dl_bytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list_len),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_req_msg_v01,
+			mux_id_list),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(struct
+					ipa_apn_data_stats_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_get_apn_data_stats_resp_msg_v01,
+			apn_data_stats_list),
+		.ei_array	= ipa3_apn_data_stats_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static struct elem_info ipa3_data_usage_quota_info_type_data_v01_ei[] = {
+	{
+		.data_type	= QMI_UNSIGNED_4_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint32_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_data_usage_quota_info_type_v01,
+					mux_id),
+	},
+	{
+		.data_type	= QMI_UNSIGNED_8_BYTE,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint64_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct
+					ipa_data_usage_quota_info_type_v01,
+					num_Mbytes),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_OPT_FLAG,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list_valid),
+	},
+	{
+		.data_type	= QMI_DATA_LEN,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint8_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list_len),
+	},
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= QMI_IPA_MAX_APN_V01,
+		.elem_size	= sizeof(struct
+					ipa_data_usage_quota_info_type_v01),
+		.is_array	= VAR_LEN_ARRAY,
+		.tlv_type	= 0x10,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_req_msg_v01,
+			apn_quota_list),
+		.ei_array	= ipa3_data_usage_quota_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_set_data_usage_quota_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct
+					ipa_data_usage_quota_info_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x01,
+		.offset		= offsetof(
+			struct ipa_data_usage_quota_reached_ind_msg_v01,
+			apn),
+		.ei_array	= ipa3_data_usage_quota_info_type_data_v01_ei,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[] = {
+	/* ipa_stop_data_usage_quota_req_msg is empty */
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[] = {
+	{
+		.data_type	= QMI_STRUCT,
+		.elem_len	= 1,
+		.elem_size	= sizeof(struct qmi_response_type_v01),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= 0x02,
+		.offset		= offsetof(
+			struct ipa_stop_data_usage_quota_resp_msg_v01,
+			resp),
+		.ei_array	= get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
new file mode 100644
index 0000000..273877c
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -0,0 +1,1795 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/idr.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+
+#define IPA_RT_INDEX_BITMAP_SIZE	(32)
+#define IPA_RT_STATUS_OF_ADD_FAILED	(-1)
+#define IPA_RT_STATUS_OF_DEL_FAILED	(-1)
+#define IPA_RT_STATUS_OF_MDFY_FAILED (-1)
+
+#define IPA_RT_GET_RULE_TYPE(__entry) \
+	( \
+	((__entry)->rule.hashable) ? \
+	(IPA_RULE_HASHABLE) : (IPA_RULE_NON_HASHABLE) \
+	)
+
+/**
+ * ipa_generate_rt_hw_rule() - Generated the RT H/W single rule
+ *  This func will do the preparation core driver work and then calls
+ *  the HAL layer for the real work.
+ * @ip: the ip address family type
+ * @entry: routing entry
+ * @buf: output buffer, buf == NULL means
+ *	caller wants to know the size of the rule as seen
+ *	by HW so they did not pass a valid buffer, we will use a
+ *	scratch buffer instead.
+ *	With this scheme we are going to
+ *	generate the rule twice, once to know size using scratch
+ *	buffer and second to write the rule to the actual caller
+ *	supplied buffer which is of required size
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ */
+static int ipa_generate_rt_hw_rule(enum ipa_ip_type ip,
+	struct ipa3_rt_entry *entry, u8 *buf)
+{
+	struct ipahal_rt_rule_gen_params gen_params;
+	int res = 0;
+
+	memset(&gen_params, 0, sizeof(gen_params));
+
+	gen_params.ipt = ip;
+	gen_params.dst_pipe_idx = ipa3_get_ep_mapping(entry->rule.dst);
+	if (gen_params.dst_pipe_idx == -1) {
+		IPAERR("Wrong destination pipe specified in RT rule\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+	if (!IPA_CLIENT_IS_CONS(entry->rule.dst)) {
+		IPAERR("No RT rule on IPA_client_producer pipe.\n");
+		IPAERR("pipe_idx: %d dst_pipe: %d\n",
+				gen_params.dst_pipe_idx, entry->rule.dst);
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	if (entry->proc_ctx || (entry->hdr && entry->hdr->is_hdr_proc_ctx)) {
+		struct ipa3_hdr_proc_ctx_entry *proc_ctx;
+
+		proc_ctx = (entry->proc_ctx) ? : entry->hdr->proc_ctx;
+		gen_params.hdr_lcl = ipa3_ctx->hdr_proc_ctx_tbl_lcl;
+		gen_params.hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+		gen_params.hdr_ofst = proc_ctx->offset_entry->offset +
+			ipa3_ctx->hdr_proc_ctx_tbl.start_offset;
+	} else if (entry->hdr) {
+		gen_params.hdr_lcl = ipa3_ctx->hdr_tbl_lcl;
+		gen_params.hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+		gen_params.hdr_ofst = entry->hdr->offset_entry->offset;
+	} else {
+		gen_params.hdr_type = IPAHAL_RT_RULE_HDR_NONE;
+		gen_params.hdr_ofst = 0;
+	}
+
+	gen_params.priority = entry->prio;
+	gen_params.id = entry->rule_id;
+	gen_params.rule = (const struct ipa_rt_rule *)&entry->rule;
+
+	res = ipahal_rt_generate_hw_rule(&gen_params, &entry->hw_len, buf);
+	if (res)
+		IPAERR("failed to generate rt h/w rule\n");
+
+	return res;
+}
+
+/**
+ * ipa_translate_rt_tbl_to_hw_fmt() - translate the routing driver structures
+ *  (rules and tables) to HW format and fill it in the given buffers
+ * @ip: the ip address family type
+ * @rlt: the type of the rules to translate (hashable or non-hashable)
+ * @base: the rules body buffer to be filled
+ * @hdr: the rules header (addresses/offsets) buffer to be filled
+ * @body_ofst: the offset of the rules body from the rules header at
+ *  ipa sram (for local body usage)
+ * @apps_start_idx: the first rt table index of apps tables
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * caller needs to hold any needed locks to ensure integrity
+ *
+ */
+static int ipa_translate_rt_tbl_to_hw_fmt(enum ipa_ip_type ip,
+	enum ipa_rule_type rlt, u8 *base, u8 *hdr,
+	u32 body_ofst, u32 apps_start_idx)
+{
+	struct ipa3_rt_tbl_set *set;
+	struct ipa3_rt_tbl *tbl;
+	struct ipa_mem_buffer tbl_mem;
+	u8 *tbl_mem_buf;
+	struct ipa3_rt_entry *entry;
+	int res;
+	u64 offset;
+	u8 *body_i;
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	body_i = base;
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (tbl->sz[rlt] == 0)
+			continue;
+		if (tbl->in_sys[rlt]) {
+			/* only body (no header) */
+			tbl_mem.size = tbl->sz[rlt] -
+				ipahal_get_hw_tbl_hdr_width();
+			if (ipahal_fltrt_allocate_hw_sys_tbl(&tbl_mem)) {
+				IPAERR("fail to alloc sys tbl of size %d\n",
+					tbl_mem.size);
+				goto err;
+			}
+
+			if (ipahal_fltrt_write_addr_to_hdr(tbl_mem.phys_base,
+				hdr, tbl->idx - apps_start_idx, true)) {
+				IPAERR("fail to wrt sys tbl addr to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			tbl_mem_buf = tbl_mem.base;
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa_generate_rt_hw_rule(ip, entry,
+					tbl_mem_buf);
+				if (res) {
+					IPAERR("failed to gen HW RT rule\n");
+					goto hdr_update_fail;
+				}
+				tbl_mem_buf += entry->hw_len;
+			}
+
+			if (tbl->curr_mem[rlt].phys_base) {
+				WARN_ON(tbl->prev_mem[rlt].phys_base);
+				tbl->prev_mem[rlt] = tbl->curr_mem[rlt];
+			}
+			tbl->curr_mem[rlt] = tbl_mem;
+		} else {
+			offset = body_i - base + body_ofst;
+
+			/* update the hdr at the right index */
+			if (ipahal_fltrt_write_addr_to_hdr(offset, hdr,
+				tbl->idx - apps_start_idx, true)) {
+				IPAERR("fail to wrt lcl tbl ofst to hdr\n");
+				goto hdr_update_fail;
+			}
+
+			/* generate the rule-set */
+			list_for_each_entry(entry, &tbl->head_rt_rule_list,
+					link) {
+				if (IPA_RT_GET_RULE_TYPE(entry) != rlt)
+					continue;
+				res = ipa_generate_rt_hw_rule(ip, entry,
+					body_i);
+				if (res) {
+					IPAERR("failed to gen HW RT rule\n");
+					goto err;
+				}
+				body_i += entry->hw_len;
+			}
+
+			/**
+			 * advance body_i to next table alignment as local
+			 * tables
+			 * are order back-to-back
+			 */
+			body_i += ipahal_get_lcl_tbl_addr_alignment();
+			body_i = (u8 *)((long)body_i &
+				~ipahal_get_lcl_tbl_addr_alignment());
+		}
+	}
+
+	return 0;
+
+hdr_update_fail:
+	ipahal_free_dma_mem(&tbl_mem);
+err:
+	return -EPERM;
+}
+
+static void __ipa_reap_sys_rt_tbls(enum ipa_ip_type ip)
+{
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_tbl *next;
+	struct ipa3_rt_tbl_set *set;
+	int i;
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
+			if (tbl->prev_mem[i].phys_base) {
+				IPADBG_LOW(
+				"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
+				tbl->name, ip, i);
+				ipahal_free_dma_mem(&tbl->prev_mem[i]);
+				memset(&tbl->prev_mem[i], 0,
+					sizeof(tbl->prev_mem[i]));
+			}
+		}
+	}
+
+	set = &ipa3_ctx->reap_rt_tbl_set[ip];
+	list_for_each_entry_safe(tbl, next, &set->head_rt_tbl_list, link) {
+		for (i = 0; i < IPA_RULE_TYPE_MAX; i++) {
+			WARN_ON(tbl->prev_mem[i].phys_base != 0);
+			if (tbl->curr_mem[i].phys_base) {
+				IPADBG_LOW(
+				"reaping sys rt tbl name=%s ip=%d rlt=%d\n",
+				tbl->name, ip, i);
+				ipahal_free_dma_mem(&tbl->curr_mem[i]);
+			}
+		}
+		list_del(&tbl->link);
+		kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl);
+	}
+}
+
+/**
+ * ipa_prep_rt_tbl_for_cmt() - preparing the rt table for commit
+ *  assign priorities to the rules, calculate their sizes and calculate
+ *  the overall table size
+ * @ip: the ip address family type
+ * @tbl: the rt tbl to be prepared
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_prep_rt_tbl_for_cmt(enum ipa_ip_type ip,
+	struct ipa3_rt_tbl *tbl)
+{
+	struct ipa3_rt_entry *entry;
+	int prio_i;
+	int res;
+	int max_prio;
+	u32 hdr_width;
+
+	tbl->sz[IPA_RULE_HASHABLE] = 0;
+	tbl->sz[IPA_RULE_NON_HASHABLE] = 0;
+
+	max_prio = ipahal_get_rule_max_priority();
+
+	prio_i = max_prio;
+	list_for_each_entry(entry, &tbl->head_rt_rule_list, link) {
+
+		if (entry->rule.max_prio) {
+			entry->prio = max_prio;
+		} else {
+			if (ipahal_rule_decrease_priority(&prio_i)) {
+				IPAERR("cannot rule decrease priority - %d\n",
+					prio_i);
+				return -EPERM;
+			}
+			entry->prio = prio_i;
+		}
+
+		res = ipa_generate_rt_hw_rule(ip, entry, NULL);
+		if (res) {
+			IPAERR("failed to calculate HW RT rule size\n");
+			return -EPERM;
+		}
+
+		IPADBG_LOW("RT rule id (handle) %d hw_len %u priority %u\n",
+			entry->id, entry->hw_len, entry->prio);
+
+		if (entry->rule.hashable)
+			tbl->sz[IPA_RULE_HASHABLE] += entry->hw_len;
+		else
+			tbl->sz[IPA_RULE_NON_HASHABLE] += entry->hw_len;
+	}
+
+	if ((tbl->sz[IPA_RULE_HASHABLE] +
+		tbl->sz[IPA_RULE_NON_HASHABLE]) == 0) {
+		WARN_ON(1);
+		IPAERR("rt tbl %s is with zero total size\n", tbl->name);
+	}
+
+	hdr_width = ipahal_get_hw_tbl_hdr_width();
+
+	if (tbl->sz[IPA_RULE_HASHABLE])
+		tbl->sz[IPA_RULE_HASHABLE] += hdr_width;
+	if (tbl->sz[IPA_RULE_NON_HASHABLE])
+		tbl->sz[IPA_RULE_NON_HASHABLE] += hdr_width;
+
+	IPADBG("RT tbl index %u hash_sz %u non-hash sz %u\n", tbl->idx,
+		tbl->sz[IPA_RULE_HASHABLE], tbl->sz[IPA_RULE_NON_HASHABLE]);
+
+	return 0;
+}
+
+/**
+ * ipa_generate_rt_hw_tbl_img() - generates the rt hw tbls.
+ *  headers and bodies (sys bodies) are being created into buffers that will
+ *  be filled into the local memory (sram)
+ * @ip: the ip address family type
+ * @alloc_params: IN/OUT parameters to hold info regard the tables headers
+ *  and bodies on DDR (DMA buffers), and needed info for the allocation
+ *  that the HAL needs
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_generate_rt_hw_tbl_img(enum ipa_ip_type ip,
+	struct ipahal_fltrt_alloc_imgs_params *alloc_params)
+{
+	u32 hash_bdy_start_ofst, nhash_bdy_start_ofst;
+	u32 apps_start_idx;
+	int rc = 0;
+
+	if (ip == IPA_IP_v4) {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_nhash_ofst) -
+			IPA_MEM_PART(v4_rt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v4_rt_hash_ofst) -
+			IPA_MEM_PART(v4_rt_hash_ofst);
+		apps_start_idx = IPA_MEM_PART(v4_apps_rt_index_lo);
+	} else {
+		nhash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_nhash_ofst) -
+			IPA_MEM_PART(v6_rt_nhash_ofst);
+		hash_bdy_start_ofst = IPA_MEM_PART(apps_v6_rt_hash_ofst) -
+			IPA_MEM_PART(v6_rt_hash_ofst);
+		apps_start_idx = IPA_MEM_PART(v6_apps_rt_index_lo);
+	}
+
+	if (ipahal_fltrt_allocate_hw_tbl_imgs(alloc_params)) {
+		IPAERR("fail to allocate RT HW TBL images. IP %d\n", ip);
+		rc = -ENOMEM;
+		goto allocate_fail;
+	}
+
+	if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_HASHABLE,
+		alloc_params->hash_bdy.base, alloc_params->hash_hdr.base,
+		hash_bdy_start_ofst, apps_start_idx)) {
+		IPAERR("fail to translate hashable rt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+	if (ipa_translate_rt_tbl_to_hw_fmt(ip, IPA_RULE_NON_HASHABLE,
+		alloc_params->nhash_bdy.base, alloc_params->nhash_hdr.base,
+		nhash_bdy_start_ofst, apps_start_idx)) {
+		IPAERR("fail to translate non-hashable rt tbls to hw format\n");
+		rc = -EPERM;
+		goto translate_fail;
+	}
+
+	return rc;
+
+translate_fail:
+	if (alloc_params->hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params->hash_hdr);
+	ipahal_free_dma_mem(&alloc_params->nhash_hdr);
+	if (alloc_params->hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->hash_bdy);
+	if (alloc_params->nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params->nhash_bdy);
+allocate_fail:
+	return rc;
+}
+
+/**
+ * ipa_rt_valid_lcl_tbl_size() - validate if the space allocated for rt tbl
+ *  bodies at the sram is enough for the commit
+ * @ipt: the ip address family type
+ * @rlt: the rule type (hashable or non-hashable)
+ *
+ * Return: true if enough space available or false in other cases
+ */
+static bool ipa_rt_valid_lcl_tbl_size(enum ipa_ip_type ipt,
+	enum ipa_rule_type rlt, struct ipa_mem_buffer *bdy)
+{
+	u16 avail;
+
+	if (ipt == IPA_IP_v4)
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v4_rt_hash_size) :
+			IPA_MEM_PART(apps_v4_rt_nhash_size);
+	else
+		avail = (rlt == IPA_RULE_HASHABLE) ?
+			IPA_MEM_PART(apps_v6_rt_hash_size) :
+			IPA_MEM_PART(apps_v6_rt_nhash_size);
+
+	if (bdy->size <= avail)
+		return true;
+
+	IPAERR("tbl too big, needed %d avail %d ipt %d rlt %d\n",
+		bdy->size, avail, ipt, rlt);
+	return false;
+}
+
+/**
+ * __ipa_commit_rt_v3() - commit rt tables to the hw
+ * commit the headers and the bodies if are local with internal cache flushing
+ * @ipt: the ip address family type
+ *
+ * Return: 0 on success, negative on failure
+ */
+int __ipa_commit_rt_v3(enum ipa_ip_type ip)
+{
+	struct ipa3_desc desc[5];
+	struct ipahal_imm_cmd_register_write reg_write_cmd = {0};
+	struct ipahal_imm_cmd_dma_shared_mem  mem_cmd = {0};
+	struct ipahal_imm_cmd_pyld *cmd_pyld[5];
+	int num_cmd = 0;
+	struct ipahal_fltrt_alloc_imgs_params alloc_params;
+	u32 num_modem_rt_index;
+	int rc = 0;
+	u32 lcl_hash_hdr, lcl_nhash_hdr;
+	u32 lcl_hash_bdy, lcl_nhash_bdy;
+	bool lcl_hash, lcl_nhash;
+	struct ipahal_reg_fltrt_hash_flush flush;
+	struct ipahal_reg_valmask valmask;
+	int i;
+	struct ipa3_rt_tbl_set *set;
+	struct ipa3_rt_tbl *tbl;
+	u32 tbl_hdr_width;
+
+	tbl_hdr_width = ipahal_get_hw_tbl_hdr_width();
+	memset(desc, 0, sizeof(desc));
+	memset(cmd_pyld, 0, sizeof(cmd_pyld));
+	memset(&alloc_params, 0, sizeof(alloc_params));
+	alloc_params.ipt = ip;
+
+	if (ip == IPA_IP_v4) {
+		num_modem_rt_index =
+			IPA_MEM_PART(v4_modem_rt_index_hi) -
+			IPA_MEM_PART(v4_modem_rt_index_lo) + 1;
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_rt_hash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v4_rt_nhash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_rt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v4_rt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip4_rt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip4_rt_tbl_nhash_lcl;
+		alloc_params.tbls_num = IPA_MEM_PART(v4_apps_rt_index_hi) -
+			IPA_MEM_PART(v4_apps_rt_index_lo) + 1;
+	} else {
+		num_modem_rt_index =
+			IPA_MEM_PART(v6_modem_rt_index_hi) -
+			IPA_MEM_PART(v6_modem_rt_index_lo) + 1;
+		lcl_hash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_rt_hash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_nhash_hdr = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(v6_rt_nhash_ofst) +
+			num_modem_rt_index * tbl_hdr_width;
+		lcl_hash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_rt_hash_ofst);
+		lcl_nhash_bdy = ipa3_ctx->smem_restricted_bytes +
+			IPA_MEM_PART(apps_v6_rt_nhash_ofst);
+		lcl_hash = ipa3_ctx->ip6_rt_tbl_hash_lcl;
+		lcl_nhash = ipa3_ctx->ip6_rt_tbl_nhash_lcl;
+		alloc_params.tbls_num = IPA_MEM_PART(v6_apps_rt_index_hi) -
+			IPA_MEM_PART(v6_apps_rt_index_lo) + 1;
+	}
+
+	if (!ipa3_ctx->rt_idx_bitmap[ip]) {
+		IPAERR("no rt tbls present\n");
+		rc = -EPERM;
+		goto no_rt_tbls;
+	}
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+		if (ipa_prep_rt_tbl_for_cmt(ip, tbl)) {
+			rc = -EPERM;
+			goto no_rt_tbls;
+		}
+		if (!tbl->in_sys[IPA_RULE_HASHABLE] &&
+			tbl->sz[IPA_RULE_HASHABLE]) {
+			alloc_params.num_lcl_hash_tbls++;
+			alloc_params.total_sz_lcl_hash_tbls +=
+				tbl->sz[IPA_RULE_HASHABLE];
+			alloc_params.total_sz_lcl_hash_tbls -= tbl_hdr_width;
+		}
+		if (!tbl->in_sys[IPA_RULE_NON_HASHABLE] &&
+			tbl->sz[IPA_RULE_NON_HASHABLE]) {
+			alloc_params.num_lcl_nhash_tbls++;
+			alloc_params.total_sz_lcl_nhash_tbls +=
+				tbl->sz[IPA_RULE_NON_HASHABLE];
+			alloc_params.total_sz_lcl_nhash_tbls -= tbl_hdr_width;
+		}
+	}
+
+	if (ipa_generate_rt_hw_tbl_img(ip, &alloc_params)) {
+		IPAERR("fail to generate RT HW TBL images. IP %d\n", ip);
+		rc = -EFAULT;
+		goto no_rt_tbls;
+	}
+
+	if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_HASHABLE,
+		&alloc_params.hash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+	if (!ipa_rt_valid_lcl_tbl_size(ip, IPA_RULE_NON_HASHABLE,
+		&alloc_params.nhash_bdy)) {
+		rc = -EFAULT;
+		goto fail_size_valid;
+	}
+
+	/* flushing ipa internal hashable rt rules cache */
+	memset(&flush, 0, sizeof(flush));
+	if (ip == IPA_IP_v4)
+		flush.v4_rt = true;
+	else
+		flush.v6_rt = true;
+	ipahal_get_fltrt_hash_flush_valmask(&flush, &valmask);
+	reg_write_cmd.skip_pipeline_clear = false;
+	reg_write_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	reg_write_cmd.offset = ipahal_get_reg_ofst(IPA_FILT_ROUT_HASH_FLUSH);
+	reg_write_cmd.value = valmask.val;
+	reg_write_cmd.value_mask = valmask.mask;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_REGISTER_WRITE, &reg_write_cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("fail construct register_write imm cmd. IP %d\n", ip);
+		goto fail_size_valid;
+	}
+	desc[num_cmd].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	mem_cmd.is_read = false;
+	mem_cmd.skip_pipeline_clear = false;
+	mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	mem_cmd.size = alloc_params.nhash_hdr.size;
+	mem_cmd.system_addr = alloc_params.nhash_hdr.phys_base;
+	mem_cmd.local_addr = lcl_nhash_hdr;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+		goto fail_imm_cmd_construct;
+	}
+	desc[num_cmd].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	mem_cmd.is_read = false;
+	mem_cmd.skip_pipeline_clear = false;
+	mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+	mem_cmd.size = alloc_params.hash_hdr.size;
+	mem_cmd.system_addr = alloc_params.hash_hdr.phys_base;
+	mem_cmd.local_addr = lcl_hash_hdr;
+	cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+	if (!cmd_pyld[num_cmd]) {
+		IPAERR("fail construct dma_shared_mem imm cmd. IP %d\n", ip);
+		goto fail_imm_cmd_construct;
+	}
+	desc[num_cmd].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+	desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+	desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+	desc[num_cmd].type = IPA_IMM_CMD_DESC;
+	num_cmd++;
+
+	if (lcl_nhash) {
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.nhash_bdy.size;
+		mem_cmd.system_addr = alloc_params.nhash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_nhash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd].type = IPA_IMM_CMD_DESC;
+		num_cmd++;
+	}
+	if (lcl_hash) {
+		mem_cmd.is_read = false;
+		mem_cmd.skip_pipeline_clear = false;
+		mem_cmd.pipeline_clear_options = IPAHAL_HPS_CLEAR;
+		mem_cmd.size = alloc_params.hash_bdy.size;
+		mem_cmd.system_addr = alloc_params.hash_bdy.phys_base;
+		mem_cmd.local_addr = lcl_hash_bdy;
+		cmd_pyld[num_cmd] = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_SHARED_MEM, &mem_cmd, false);
+		if (!cmd_pyld[num_cmd]) {
+			IPAERR("fail construct dma_shared_mem cmd. IP %d\n",
+				ip);
+			goto fail_imm_cmd_construct;
+		}
+		desc[num_cmd].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_DMA_SHARED_MEM);
+		desc[num_cmd].pyld = cmd_pyld[num_cmd]->data;
+		desc[num_cmd].len = cmd_pyld[num_cmd]->len;
+		desc[num_cmd].type = IPA_IMM_CMD_DESC;
+		num_cmd++;
+	}
+
+	if (ipa3_send_cmd(num_cmd, desc)) {
+		IPAERR("fail to send immediate command\n");
+		rc = -EFAULT;
+		goto fail_imm_cmd_construct;
+	}
+
+	IPADBG_LOW("Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.hash_hdr.base,
+		alloc_params.hash_hdr.phys_base, alloc_params.hash_hdr.size);
+
+	IPADBG_LOW("Non-Hashable HEAD\n");
+	IPA_DUMP_BUFF(alloc_params.nhash_hdr.base,
+		alloc_params.nhash_hdr.phys_base, alloc_params.nhash_hdr.size);
+
+	if (alloc_params.hash_bdy.size) {
+		IPADBG_LOW("Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.hash_bdy.base,
+			alloc_params.hash_bdy.phys_base,
+			alloc_params.hash_bdy.size);
+	}
+
+	if (alloc_params.nhash_bdy.size) {
+		IPADBG_LOW("Non-Hashable BODY\n");
+		IPA_DUMP_BUFF(alloc_params.nhash_bdy.base,
+			alloc_params.nhash_bdy.phys_base,
+			alloc_params.nhash_bdy.size);
+	}
+
+	__ipa_reap_sys_rt_tbls(ip);
+
+fail_imm_cmd_construct:
+	for (i = 0 ; i < num_cmd ; i++)
+		ipahal_destroy_imm_cmd(cmd_pyld[i]);
+fail_size_valid:
+	if (alloc_params.hash_hdr.size)
+		ipahal_free_dma_mem(&alloc_params.hash_hdr);
+	ipahal_free_dma_mem(&alloc_params.nhash_hdr);
+	if (alloc_params.hash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.hash_bdy);
+	if (alloc_params.nhash_bdy.size)
+		ipahal_free_dma_mem(&alloc_params.nhash_bdy);
+
+no_rt_tbls:
+	return rc;
+}
+
+/**
+ * __ipa3_find_rt_tbl() - find the routing table
+ *			which name is given as parameter
+ * @ip:	[in] the ip address family type of the wanted routing table
+ * @name:	[in] the name of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name)
+{
+	struct ipa3_rt_tbl *entry;
+	struct ipa3_rt_tbl_set *set;
+
+	if (strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
+		IPAERR("Name too long: %s\n", name);
+		return NULL;
+	}
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	list_for_each_entry(entry, &set->head_rt_tbl_list, link) {
+		if (!strcmp(name, entry->name))
+			return entry;
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa3_query_rt_index() - find the routing table index
+ *			which name and ip type are given as parameters
+ * @in:	[out] the index of the wanted routing table
+ *
+ * Returns: the routing table which name is given as parameter, or NULL if it
+ * doesn't exist
+ */
+int ipa3_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+	struct ipa3_rt_tbl *entry;
+
+	if (in->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	/* check if this table exists */
+	entry = __ipa3_find_rt_tbl(in->ip, in->name);
+	if (!entry)
+		return -EFAULT;
+
+	in->idx  = entry->idx;
+	return 0;
+}
+
+static struct ipa3_rt_tbl *__ipa_add_rt_tbl(enum ipa_ip_type ip,
+		const char *name)
+{
+	struct ipa3_rt_tbl *entry;
+	struct ipa3_rt_tbl_set *set;
+	int i;
+	int id;
+	int max_tbl_indx;
+
+	if (name == NULL) {
+		IPAERR("no tbl name\n");
+		goto error;
+	}
+
+	if (ip == IPA_IP_v4) {
+		max_tbl_indx =
+			max(IPA_MEM_PART(v4_modem_rt_index_hi),
+			IPA_MEM_PART(v4_apps_rt_index_hi));
+	} else if (ip == IPA_IP_v6) {
+		max_tbl_indx =
+			max(IPA_MEM_PART(v6_modem_rt_index_hi),
+			IPA_MEM_PART(v6_apps_rt_index_hi));
+	} else {
+		IPAERR("bad ip family type\n");
+		goto error;
+	}
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	/* check if this table exists */
+	entry = __ipa3_find_rt_tbl(ip, name);
+	if (!entry) {
+		entry = kmem_cache_zalloc(ipa3_ctx->rt_tbl_cache, GFP_KERNEL);
+		if (!entry) {
+			IPAERR("failed to alloc RT tbl object\n");
+			goto error;
+		}
+		/* find a routing tbl index */
+		for (i = 0; i < IPA_RT_INDEX_BITMAP_SIZE; i++) {
+			if (!test_bit(i, &ipa3_ctx->rt_idx_bitmap[ip])) {
+				entry->idx = i;
+				set_bit(i, &ipa3_ctx->rt_idx_bitmap[ip]);
+				break;
+			}
+		}
+		if (i == IPA_RT_INDEX_BITMAP_SIZE) {
+			IPAERR("not free RT tbl indices left\n");
+			goto fail_rt_idx_alloc;
+		}
+		if (i > max_tbl_indx) {
+			IPAERR("rt tbl index is above max\n");
+			goto fail_rt_idx_alloc;
+		}
+
+		INIT_LIST_HEAD(&entry->head_rt_rule_list);
+		INIT_LIST_HEAD(&entry->link);
+		strlcpy(entry->name, name, IPA_RESOURCE_NAME_MAX);
+		entry->set = set;
+		entry->cookie = IPA_COOKIE;
+		entry->in_sys[IPA_RULE_HASHABLE] = (ip == IPA_IP_v4) ?
+			!ipa3_ctx->ip4_rt_tbl_hash_lcl :
+			!ipa3_ctx->ip6_rt_tbl_hash_lcl;
+		entry->in_sys[IPA_RULE_NON_HASHABLE] = (ip == IPA_IP_v4) ?
+			!ipa3_ctx->ip4_rt_tbl_nhash_lcl :
+			!ipa3_ctx->ip6_rt_tbl_nhash_lcl;
+		set->tbl_cnt++;
+		idr_init(&entry->rule_ids);
+		list_add(&entry->link, &set->head_rt_tbl_list);
+
+		IPADBG("add rt tbl idx=%d tbl_cnt=%d ip=%d\n", entry->idx,
+				set->tbl_cnt, ip);
+
+		id = ipa3_id_alloc(entry);
+		if (id < 0) {
+			IPAERR("failed to add to tree\n");
+			WARN_ON(1);
+		}
+		entry->id = id;
+	}
+
+	return entry;
+
+fail_rt_idx_alloc:
+	entry->cookie = 0;
+	kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
+error:
+	return NULL;
+}
+
+static int __ipa_del_rt_tbl(struct ipa3_rt_tbl *entry)
+{
+	enum ipa_ip_type ip = IPA_IP_MAX;
+	u32 id;
+	struct ipa3_rt_tbl_set *rset;
+
+	if (entry == NULL || (entry->cookie != IPA_COOKIE)) {
+		IPAERR("bad parms\n");
+		return -EINVAL;
+	}
+	id = entry->id;
+	if (ipa3_id_find(id) == NULL) {
+		IPAERR("lookup failed\n");
+		return -EPERM;
+	}
+
+	if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	rset = &ipa3_ctx->reap_rt_tbl_set[ip];
+
+	idr_destroy(&entry->rule_ids);
+	if (entry->in_sys[IPA_RULE_HASHABLE] ||
+		entry->in_sys[IPA_RULE_NON_HASHABLE]) {
+		list_move(&entry->link, &rset->head_rt_tbl_list);
+		clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del sys rt tbl_idx=%d tbl_cnt=%d ip=%d\n",
+			entry->idx, entry->set->tbl_cnt, ip);
+	} else {
+		list_del(&entry->link);
+		clear_bit(entry->idx, &ipa3_ctx->rt_idx_bitmap[ip]);
+		entry->set->tbl_cnt--;
+		IPADBG("del rt tbl_idx=%d tbl_cnt=%d ip=%d\n",
+			entry->idx, entry->set->tbl_cnt, ip);
+		kmem_cache_free(ipa3_ctx->rt_tbl_cache, entry);
+	}
+
+	/* remove the handle from the database */
+	ipa3_id_remove(id);
+	return 0;
+}
+
+static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
+				struct ipa3_hdr_entry **hdr,
+				struct ipa3_hdr_proc_ctx_entry **proc_ctx)
+{
+	if (rule->hdr_hdl && rule->hdr_proc_ctx_hdl) {
+		IPAERR("rule contains both hdr_hdl and hdr_proc_ctx_hdl\n");
+		return -EPERM;
+	}
+
+	if (rule->hdr_hdl) {
+		*hdr = ipa3_id_find(rule->hdr_hdl);
+		if ((*hdr == NULL) || ((*hdr)->cookie != IPA_COOKIE)) {
+			IPAERR("rt rule does not point to valid hdr\n");
+			return -EPERM;
+		}
+	} else if (rule->hdr_proc_ctx_hdl) {
+		*proc_ctx = ipa3_id_find(rule->hdr_proc_ctx_hdl);
+		if ((*proc_ctx == NULL) ||
+			((*proc_ctx)->cookie != IPA_COOKIE)) {
+
+			IPAERR("rt rule does not point to valid proc ctx\n");
+			return -EPERM;
+		}
+	}
+
+	return 0;
+}
+
+static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
+		const struct ipa_rt_rule *rule,
+		struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
+		struct ipa3_hdr_proc_ctx_entry *proc_ctx)
+{
+	int id;
+
+	*entry = kmem_cache_zalloc(ipa3_ctx->rt_rule_cache, GFP_KERNEL);
+	if (!*entry) {
+		IPAERR("failed to alloc RT rule object\n");
+		goto error;
+	}
+	INIT_LIST_HEAD(&(*entry)->link);
+	(*(entry))->cookie = IPA_COOKIE;
+	(*(entry))->rule = *rule;
+	(*(entry))->tbl = tbl;
+	(*(entry))->hdr = hdr;
+	(*(entry))->proc_ctx = proc_ctx;
+	id = ipa3_alloc_rule_id(&tbl->rule_ids);
+	if (id < 0) {
+		IPAERR("failed to allocate rule id\n");
+		WARN_ON(1);
+		goto alloc_rule_id_fail;
+	}
+	(*(entry))->rule_id = id;
+
+	return 0;
+
+alloc_rule_id_fail:
+	kmem_cache_free(ipa3_ctx->rt_rule_cache, *entry);
+error:
+	return -EPERM;
+}
+
+static int __ipa_finish_rt_rule_add(struct ipa3_rt_entry *entry, u32 *rule_hdl,
+		struct ipa3_rt_tbl *tbl)
+{
+	int id;
+
+	tbl->rule_cnt++;
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+	else if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt++;
+	id = ipa3_id_alloc(entry);
+	if (id < 0) {
+		IPAERR("failed to add to tree\n");
+		WARN_ON(1);
+		goto ipa_insert_failed;
+	}
+	IPADBG("add rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n",
+		tbl->idx, tbl->rule_cnt, entry->rule_id);
+	*rule_hdl = id;
+	entry->id = id;
+
+	return 0;
+
+ipa_insert_failed:
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+	else if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt--;
+	idr_remove(&tbl->rule_ids, entry->rule_id);
+	list_del(&entry->link);
+	kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
+	return -EPERM;
+}
+
+static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
+		const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+{
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_entry *entry;
+	struct ipa3_hdr_entry *hdr = NULL;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+	if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
+		goto error;
+
+
+	tbl = __ipa_add_rt_tbl(ip, name);
+	if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+		IPAERR("failed adding rt tbl name = %s\n",
+			name ? name : "");
+		goto error;
+	}
+	/*
+	 * do not allow any rules to be added at end of the "default" routing
+	 * tables
+	 */
+	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+	    (tbl->rule_cnt > 0) && (at_rear != 0)) {
+		IPAERR("cannot add rule at end of tbl rule_cnt=%d at_rear=%d\n",
+		       tbl->rule_cnt, at_rear);
+		goto error;
+	}
+
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+		goto error;
+
+	if (at_rear)
+		list_add_tail(&entry->link, &tbl->head_rt_rule_list);
+	else
+		list_add(&entry->link, &tbl->head_rt_rule_list);
+
+	if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl))
+		goto error;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl,
+		const struct ipa_rt_rule *rule, u32 *rule_hdl,
+		struct ipa3_rt_entry **add_after_entry)
+{
+	struct ipa3_rt_entry *entry;
+	struct ipa3_hdr_entry *hdr = NULL;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+	if (!*add_after_entry)
+		goto error;
+
+	if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
+		goto error;
+
+	if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+		goto error;
+
+	list_add(&entry->link, &((*add_after_entry)->link));
+
+	if (__ipa_finish_rt_rule_add(entry, rule_hdl, tbl))
+		goto error;
+
+	/*
+	 * prepare for next insertion
+	 */
+	*add_after_entry = entry;
+
+	return 0;
+
+error:
+	*add_after_entry = NULL;
+	return -EPERM;
+}
+
+/**
+ * ipa3_add_rt_rule() - Add the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @rules:	[inout] set of routing rules to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	int i;
+	int ret;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < rules->num_rules; i++) {
+		if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+					&rules->rules[i].rule,
+					rules->rules[i].at_rear,
+					&rules->rules[i].rt_rule_hdl)) {
+			IPAERR("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_after() - Add the given routing rules after the
+ * specified rule to SW and optionally commit to IPA HW
+ * @rules:	[inout] set of routing rules to add + handle where to add
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
+{
+	int i;
+	int ret = 0;
+	struct ipa3_rt_tbl *tbl = NULL;
+	struct ipa3_rt_entry *entry = NULL;
+
+	if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+
+	tbl = __ipa3_find_rt_tbl(rules->ip, rules->rt_tbl_name);
+	if (tbl == NULL || (tbl->cookie != IPA_COOKIE)) {
+		IPAERR("failed finding rt tbl name = %s\n",
+			rules->rt_tbl_name ? rules->rt_tbl_name : "");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (tbl->rule_cnt <= 0) {
+		IPAERR("tbl->rule_cnt <= 0");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	entry = ipa3_id_find(rules->add_after_hdl);
+	if (!entry) {
+		IPAERR("failed finding rule %d in rt tbls\n",
+			rules->add_after_hdl);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	if (entry->tbl != tbl) {
+		IPAERR("given rt rule does not match the table\n");
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * do not allow any rules to be added at end of the "default" routing
+	 * tables
+	 */
+	if (!strcmp(tbl->name, IPA_DFLT_RT_TBL_NAME) &&
+			(&entry->link == tbl->head_rt_rule_list.prev)) {
+		IPAERR("cannot add rule at end of tbl rule_cnt=%d\n",
+			tbl->rule_cnt);
+		ret = -EINVAL;
+		goto bail;
+	}
+
+	/*
+	 * we add all rules one after the other, if one insertion fails, it cuts
+	 * the chain (all following will receive fail status) following calls to
+	 * __ipa_add_rt_rule_after will fail (entry == NULL)
+	 */
+
+	for (i = 0; i < rules->num_rules; i++) {
+		if (__ipa_add_rt_rule_after(tbl,
+					&rules->rules[i].rule,
+					&rules->rules[i].rt_rule_hdl,
+					&entry)) {
+			IPAERR("failed to add rt rule %d\n", i);
+			rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+		} else {
+			rules->rules[i].status = 0;
+		}
+	}
+
+	if (rules->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+			IPAERR("failed to commit\n");
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+	goto bail;
+
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+int __ipa3_del_rt_rule(u32 rule_hdl)
+{
+	struct ipa3_rt_entry *entry;
+	int id;
+
+	entry = ipa3_id_find(rule_hdl);
+
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		return -EINVAL;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		return -EINVAL;
+	}
+
+	if (entry->hdr)
+		__ipa3_release_hdr(entry->hdr->id);
+	else if (entry->proc_ctx)
+		__ipa3_release_hdr_proc_ctx(entry->proc_ctx->id);
+	list_del(&entry->link);
+	entry->tbl->rule_cnt--;
+	IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
+		entry->tbl->idx, entry->tbl->rule_cnt,
+		entry->rule_id, entry->tbl->ref_cnt);
+	idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+	if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
+		if (__ipa_del_rt_tbl(entry->tbl))
+			IPAERR("fail to del RT tbl\n");
+	}
+	entry->cookie = 0;
+	id = entry->id;
+	kmem_cache_free(ipa3_ctx->rt_rule_cache, entry);
+
+	/* remove the handle from the database */
+	ipa3_id_remove(id);
+
+	return 0;
+}
+
+/**
+ * ipa3_del_rt_rule() - Remove the specified routing rules to SW and optionally
+ * commit to IPA HW
+ * @hdls:	[inout] set of routing rules to delete
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	int i;
+	int ret;
+
+	if (hdls == NULL || hdls->num_hdls == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_hdls; i++) {
+		if (__ipa3_del_rt_rule(hdls->hdl[i].hdl)) {
+			IPAERR("failed to del rt rule %i\n", i);
+			hdls->hdl[i].status = IPA_RT_STATUS_OF_DEL_FAILED;
+		} else {
+			hdls->hdl[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+			ret = -EPERM;
+			goto bail;
+		}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa_commit_rt_rule() - Commit the current SW routing table of specified type
+ * to IPA HW
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_commit_rt(enum ipa_ip_type ip)
+{
+	int ret;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	/*
+	 * issue a commit on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa3_commit_flt(ip))
+		return -EPERM;
+
+	mutex_lock(&ipa3_ctx->lock);
+	if (ipa3_ctx->ctrl->ipa3_commit_rt(ip)) {
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa3_reset_rt() - reset the current SW routing table of specified type
+ * (does not commit to HW)
+ * @ip:	The family of routing tables
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_reset_rt(enum ipa_ip_type ip)
+{
+	struct ipa3_rt_tbl *tbl;
+	struct ipa3_rt_tbl *tbl_next;
+	struct ipa3_rt_tbl_set *set;
+	struct ipa3_rt_entry *rule;
+	struct ipa3_rt_entry *rule_next;
+	struct ipa3_rt_tbl_set *rset;
+	u32 apps_start_idx;
+	int id;
+
+	if (ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	if (ip == IPA_IP_v4)
+		apps_start_idx =
+			IPA_MEM_PART(v4_apps_rt_index_lo);
+	else
+		apps_start_idx =
+			IPA_MEM_PART(v6_apps_rt_index_lo);
+
+	/*
+	 * issue a reset on the filtering module of same IP type since
+	 * filtering rules point to routing tables
+	 */
+	if (ipa3_reset_flt(ip))
+		IPAERR("fail to reset flt ip=%d\n", ip);
+
+	set = &ipa3_ctx->rt_tbl_set[ip];
+	rset = &ipa3_ctx->reap_rt_tbl_set[ip];
+	mutex_lock(&ipa3_ctx->lock);
+	IPADBG("reset rt ip=%d\n", ip);
+	list_for_each_entry_safe(tbl, tbl_next, &set->head_rt_tbl_list, link) {
+		list_for_each_entry_safe(rule, rule_next,
+					 &tbl->head_rt_rule_list, link) {
+			if (ipa3_id_find(rule->id) == NULL) {
+				WARN_ON(1);
+				mutex_unlock(&ipa3_ctx->lock);
+				return -EFAULT;
+			}
+
+			/*
+			 * for the "default" routing tbl, remove all but the
+			 *  last rule
+			 */
+			if (tbl->idx == apps_start_idx && tbl->rule_cnt == 1)
+				continue;
+
+			list_del(&rule->link);
+			tbl->rule_cnt--;
+			if (rule->hdr)
+				__ipa3_release_hdr(rule->hdr->id);
+			else if (rule->proc_ctx)
+				__ipa3_release_hdr_proc_ctx(rule->proc_ctx->id);
+			rule->cookie = 0;
+			idr_remove(&tbl->rule_ids, rule->rule_id);
+			id = rule->id;
+			kmem_cache_free(ipa3_ctx->rt_rule_cache, rule);
+
+			/* remove the handle from the database */
+			ipa3_id_remove(id);
+		}
+
+		if (ipa3_id_find(tbl->id) == NULL) {
+			WARN_ON(1);
+			mutex_unlock(&ipa3_ctx->lock);
+			return -EFAULT;
+		}
+		id = tbl->id;
+
+		/* do not remove the "default" routing tbl which has index 0 */
+		if (tbl->idx != apps_start_idx) {
+			idr_destroy(&tbl->rule_ids);
+			if (tbl->in_sys[IPA_RULE_HASHABLE] ||
+				tbl->in_sys[IPA_RULE_NON_HASHABLE]) {
+				list_move(&tbl->link, &rset->head_rt_tbl_list);
+				clear_bit(tbl->idx,
+					  &ipa3_ctx->rt_idx_bitmap[ip]);
+				set->tbl_cnt--;
+				IPADBG("rst sys rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+			} else {
+				list_del(&tbl->link);
+				set->tbl_cnt--;
+				clear_bit(tbl->idx,
+					  &ipa3_ctx->rt_idx_bitmap[ip]);
+				IPADBG("rst rt tbl_idx=%d tbl_cnt=%d\n",
+						tbl->idx, set->tbl_cnt);
+				kmem_cache_free(ipa3_ctx->rt_tbl_cache, tbl);
+			}
+			/* remove the handle from the database */
+			ipa3_id_remove(id);
+		}
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return 0;
+}
+
+/**
+ * ipa3_get_rt_tbl() - lookup the specified routing table and return handle if
+ * it exists, if lookup succeeds the routing table ref cnt is increased
+ * @lookup:	[inout] routing table to lookup and its handle
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ *	Caller should call ipa3_put_rt_tbl later if this function succeeds
+ */
+int ipa3_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	struct ipa3_rt_tbl *entry;
+	int result = -EFAULT;
+
+	if (lookup == NULL || lookup->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+	mutex_lock(&ipa3_ctx->lock);
+	entry = __ipa3_find_rt_tbl(lookup->ip, lookup->name);
+	if (entry && entry->cookie == IPA_COOKIE) {
+		entry->ref_cnt++;
+		lookup->hdl = entry->id;
+
+		/* commit for get */
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(lookup->ip))
+			IPAERR("fail to commit RT tbl\n");
+
+		result = 0;
+	}
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_put_rt_tbl() - Release the specified routing table handle
+ * @rt_tbl_hdl:	[in] the routing table handle to release
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	struct ipa3_rt_tbl *entry;
+	enum ipa_ip_type ip = IPA_IP_MAX;
+	int result;
+
+	mutex_lock(&ipa3_ctx->lock);
+	entry = ipa3_id_find(rt_tbl_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		result = -EINVAL;
+		goto ret;
+	}
+
+	if ((entry->cookie != IPA_COOKIE) || entry->ref_cnt == 0) {
+		IPAERR("bad parms\n");
+		result = -EINVAL;
+		goto ret;
+	}
+
+	if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v4])
+		ip = IPA_IP_v4;
+	else if (entry->set == &ipa3_ctx->rt_tbl_set[IPA_IP_v6])
+		ip = IPA_IP_v6;
+	else
+		WARN_ON(1);
+
+	entry->ref_cnt--;
+	if (entry->ref_cnt == 0 && entry->rule_cnt == 0) {
+		IPADBG("zero ref_cnt, delete rt tbl (idx=%u)\n",
+			entry->idx);
+		if (__ipa_del_rt_tbl(entry))
+			IPAERR("fail to del RT tbl\n");
+		/* commit for put */
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(ip))
+			IPAERR("fail to commit RT tbl\n");
+	}
+
+	result = 0;
+
+ret:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+
+static int __ipa_mdfy_rt_rule(struct ipa_rt_rule_mdfy *rtrule)
+{
+	struct ipa3_rt_entry *entry;
+	struct ipa3_hdr_entry *hdr = NULL;
+	struct ipa3_hdr_proc_ctx_entry *proc_ctx = NULL;
+
+	if (rtrule->rule.hdr_hdl) {
+		hdr = ipa3_id_find(rtrule->rule.hdr_hdl);
+		if ((hdr == NULL) || (hdr->cookie != IPA_COOKIE)) {
+			IPAERR("rt rule does not point to valid hdr\n");
+			goto error;
+		}
+	} else if (rtrule->rule.hdr_proc_ctx_hdl) {
+		proc_ctx = ipa3_id_find(rtrule->rule.hdr_proc_ctx_hdl);
+		if ((proc_ctx == NULL) || (proc_ctx->cookie != IPA_COOKIE)) {
+			IPAERR("rt rule does not point to valid proc ctx\n");
+			goto error;
+		}
+	}
+
+	entry = ipa3_id_find(rtrule->rt_rule_hdl);
+	if (entry == NULL) {
+		IPAERR("lookup failed\n");
+		goto error;
+	}
+
+	if (entry->cookie != IPA_COOKIE) {
+		IPAERR("bad params\n");
+		goto error;
+	}
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt--;
+	if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt--;
+
+	entry->rule = rtrule->rule;
+	entry->hdr = hdr;
+	entry->proc_ctx = proc_ctx;
+
+	if (entry->hdr)
+		entry->hdr->ref_cnt++;
+	if (entry->proc_ctx)
+		entry->proc_ctx->ref_cnt++;
+
+	entry->hw_len = 0;
+	entry->prio = 0;
+
+	return 0;
+
+error:
+	return -EPERM;
+}
+
+/**
+ * ipa3_mdfy_rt_rule() - Modify the specified routing rules in SW and optionally
+ * commit to IPA HW
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *hdls)
+{
+	int i;
+	int result;
+
+	if (hdls == NULL || hdls->num_rules == 0 || hdls->ip >= IPA_IP_MAX) {
+		IPAERR("bad parm\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&ipa3_ctx->lock);
+	for (i = 0; i < hdls->num_rules; i++) {
+		if (__ipa_mdfy_rt_rule(&hdls->rules[i])) {
+			IPAERR("failed to mdfy rt rule %i\n", i);
+			hdls->rules[i].status = IPA_RT_STATUS_OF_MDFY_FAILED;
+		} else {
+			hdls->rules[i].status = 0;
+		}
+	}
+
+	if (hdls->commit)
+		if (ipa3_ctx->ctrl->ipa3_commit_rt(hdls->ip)) {
+			result = -EPERM;
+			goto bail;
+		}
+	result = 0;
+bail:
+	mutex_unlock(&ipa3_ctx->lock);
+
+	return result;
+}
+
+/**
+ * ipa3_set_rt_tuple_mask() - Sets the rt tuple masking for the given tbl
+ *  table index must be for AP EP (not modem)
+ *  updates the the routing masking values without changing the flt ones.
+ *
+ * @tbl_idx: routing table index to configure the tuple masking
+ * @tuple: the tuple members masking
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_set_rt_tuple_mask(int tbl_idx, struct ipahal_reg_hash_tuple *tuple)
+{
+	struct ipahal_reg_fltrt_hash_tuple fltrt_tuple;
+
+	if (!tuple) {
+		IPAERR("bad tuple\n");
+		return -EINVAL;
+	}
+
+	if (tbl_idx >=
+		max(IPA_MEM_PART(v6_rt_num_index),
+		IPA_MEM_PART(v4_rt_num_index)) ||
+		tbl_idx < 0) {
+		IPAERR("bad table index\n");
+		return -EINVAL;
+	}
+
+	if (tbl_idx >= IPA_MEM_PART(v4_modem_rt_index_lo) &&
+		tbl_idx <= IPA_MEM_PART(v4_modem_rt_index_hi)) {
+		IPAERR("cannot configure modem v4 rt tuple by AP\n");
+		return -EINVAL;
+	}
+
+	if (tbl_idx >= IPA_MEM_PART(v6_modem_rt_index_lo) &&
+		tbl_idx <= IPA_MEM_PART(v6_modem_rt_index_hi)) {
+		IPAERR("cannot configure modem v6 rt tuple by AP\n");
+		return -EINVAL;
+	}
+
+	ipahal_read_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		tbl_idx, &fltrt_tuple);
+	fltrt_tuple.rt = *tuple;
+	ipahal_write_reg_n_fields(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+		tbl_idx, &fltrt_tuple);
+
+	return 0;
+}
+
+/**
+ * ipa3_rt_read_tbl_from_hw() -Read routing table from IPA HW
+ * @tbl_idx: routing table index
+ * @ip_type: IPv4 or IPv6 table
+ * @hashable: hashable or non-hashable table
+ * @entry: array to fill the table entries
+ * @num_entry: number of entries in entry array. set by the caller to indicate
+ *  entry array size. Then set by this function as an output parameter to
+ *  indicate the number of entries in the array
+ *
+ * This function reads the routing table from IPA SRAM and prepares an array
+ * of entries. This function is mainly used for debugging purposes.
+ *
+ * If empty table or Modem Apps table, zero entries will be returned.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_rt_read_tbl_from_hw(u32 tbl_idx, enum ipa_ip_type ip_type,
+	bool hashable, struct ipahal_rt_rule_entry entry[], int *num_entry)
+{
+	void *ipa_sram_mmio;
+	u64 hdr_base_ofst;
+	int res = 0;
+	u64 tbl_addr;
+	bool is_sys;
+	struct ipa_mem_buffer *sys_tbl_mem;
+	u8 *rule_addr;
+	int rule_idx;
+
+	IPADBG_LOW("tbl_idx=%d ip_t=%d hashable=%d entry=0x%p num_entry=0x%p\n",
+		tbl_idx, ip_type, hashable, entry, num_entry);
+
+	if (ip_type == IPA_IP_v4 && tbl_idx >= IPA_MEM_PART(v4_rt_num_index)) {
+		IPAERR("Invalid params\n");
+		return -EFAULT;
+	}
+
+	if (ip_type == IPA_IP_v6 && tbl_idx >= IPA_MEM_PART(v6_rt_num_index)) {
+		IPAERR("Invalid params\n");
+		return -EFAULT;
+	}
+
+	/* map IPA SRAM */
+	ipa_sram_mmio = ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n,
+			ipa3_ctx->smem_restricted_bytes / 4),
+		ipa3_ctx->smem_sz);
+	if (!ipa_sram_mmio) {
+		IPAERR("fail to ioremap IPA SRAM\n");
+		return -ENOMEM;
+	}
+
+	memset(entry, 0, sizeof(*entry) * (*num_entry));
+	if (hashable) {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_rt_hash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_rt_hash_ofst);
+	} else {
+		if (ip_type == IPA_IP_v4)
+			hdr_base_ofst =
+				IPA_MEM_PART(v4_rt_nhash_ofst);
+		else
+			hdr_base_ofst =
+				IPA_MEM_PART(v6_rt_nhash_ofst);
+	}
+
+	IPADBG_LOW("hdr_base_ofst=0x%llx\n", hdr_base_ofst);
+
+	res = ipahal_fltrt_read_addr_from_hdr(ipa_sram_mmio + hdr_base_ofst,
+		tbl_idx, &tbl_addr, &is_sys);
+	if (res) {
+		IPAERR("failed to read table address from header structure\n");
+		goto bail;
+	}
+	IPADBG_LOW("rt tbl %d: tbl_addr=0x%llx is_sys=%d\n",
+		tbl_idx, tbl_addr, is_sys);
+	if (!tbl_addr) {
+		IPAERR("invalid rt tbl addr\n");
+		res = -EFAULT;
+		goto bail;
+	}
+
+	/* for tables which reside in DDR access it from the virtual memory */
+	if (is_sys) {
+		struct ipa3_rt_tbl_set *set;
+		struct ipa3_rt_tbl *tbl;
+
+		set = &ipa3_ctx->rt_tbl_set[ip_type];
+		rule_addr = NULL;
+		list_for_each_entry(tbl, &set->head_rt_tbl_list, link) {
+			if (tbl->idx == tbl_idx) {
+				sys_tbl_mem = &(tbl->curr_mem[hashable ?
+					IPA_RULE_HASHABLE :
+					IPA_RULE_NON_HASHABLE]);
+				if (sys_tbl_mem->phys_base &&
+					sys_tbl_mem->phys_base != tbl_addr) {
+					IPAERR("mismatch:parsed=%llx sw=%pad\n"
+						, tbl_addr,
+						&sys_tbl_mem->phys_base);
+				}
+				if (sys_tbl_mem->phys_base)
+					rule_addr = sys_tbl_mem->base;
+				else
+					rule_addr = NULL;
+			}
+		}
+	} else {
+		rule_addr = ipa_sram_mmio + hdr_base_ofst + tbl_addr;
+	}
+
+	IPADBG_LOW("First rule addr 0x%p\n", rule_addr);
+
+	if (!rule_addr) {
+		/* Modem table in system memory or empty table */
+		*num_entry = 0;
+		goto bail;
+	}
+
+	rule_idx = 0;
+	while (rule_idx < *num_entry) {
+		res = ipahal_rt_parse_hw_rule(rule_addr, &entry[rule_idx]);
+		if (res) {
+			IPAERR("failed parsing rt rule\n");
+			goto bail;
+		}
+
+		IPADBG_LOW("rule_size=%d\n", entry[rule_idx].rule_size);
+		if (!entry[rule_idx].rule_size)
+			break;
+
+		rule_addr += entry[rule_idx].rule_size;
+		rule_idx++;
+	}
+	*num_entry = rule_idx;
+bail:
+	iounmap(ipa_sram_mmio);
+	return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
new file mode 100644
index 0000000..b67899b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_trace.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ipa
+#define TRACE_INCLUDE_FILE ipa_trace
+
+#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _IPA_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(
+	intr_to_poll3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	poll_to_intr3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	idle_sleep_enter3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	idle_sleep_exit3,
+
+	TP_PROTO(unsigned long client),
+
+	TP_ARGS(client),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	client)
+	),
+
+	TP_fast_assign(
+		__entry->client = client;
+	),
+
+	TP_printk("client=%lu", __entry->client)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netifni3,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netifrx3,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+TRACE_EVENT(
+	rmnet_ipa_netif_rcv_skb3,
+
+	TP_PROTO(unsigned long rx_pkt_cnt),
+
+	TP_ARGS(rx_pkt_cnt),
+
+	TP_STRUCT__entry(
+		__field(unsigned long,	rx_pkt_cnt)
+	),
+
+	TP_fast_assign(
+		__entry->rx_pkt_cnt = rx_pkt_cnt;
+	),
+
+	TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
+);
+
+#endif /* _IPA_TRACE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
new file mode 100644
index 0000000..69d0f5b
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -0,0 +1,995 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/delay.h>
+
+#define IPA_RAM_UC_SMEM_SIZE 128
+#define IPA_HW_INTERFACE_VERSION     0x2000
+#define IPA_PKT_FLUSH_TO_US 100
+#define IPA_UC_POLL_SLEEP_USEC 100
+#define IPA_UC_POLL_MAX_RETRY 10000
+
+/**
+ * Mailbox register to Interrupt HWP for CPU cmd
+ * Usage of IPA_UC_MAILBOX_m_n doorbell instead of IPA_IRQ_EE_UC_0
+ * due to HW limitation.
+ *
+ */
+#define IPA_CPU_2_HW_CMD_MBOX_m          0
+#define IPA_CPU_2_HW_CMD_MBOX_n         23
+
+/**
+ * enum ipa3_cpu_2_hw_commands - Values that represent the commands from the CPU
+ * IPA_CPU_2_HW_CMD_NO_OP : No operation is required.
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS : Update SW flags which defines the behavior
+ *                                 of HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST : Launch predefined test over HW.
+ * IPA_CPU_2_HW_CMD_DEBUG_GET_INFO : Read HW internal debug information.
+ * IPA_CPU_2_HW_CMD_ERR_FATAL : CPU instructs HW to perform error fatal
+ *                              handling.
+ * IPA_CPU_2_HW_CMD_CLK_GATE : CPU instructs HW to goto Clock Gated state.
+ * IPA_CPU_2_HW_CMD_CLK_UNGATE : CPU instructs HW to goto Clock Ungated state.
+ * IPA_CPU_2_HW_CMD_MEMCPY : CPU instructs HW to do memcopy using QMB.
+ * IPA_CPU_2_HW_CMD_RESET_PIPE : Command to reset a pipe - SW WA for a HW bug.
+ * IPA_CPU_2_HW_CMD_GSI_CH_EMPTY : Command to check for GSI channel emptiness.
+ */
+enum ipa3_cpu_2_hw_commands {
+	IPA_CPU_2_HW_CMD_NO_OP                     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_CPU_2_HW_CMD_UPDATE_FLAGS              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_CPU_2_HW_CMD_DEBUG_RUN_TEST            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_CPU_2_HW_CMD_DEBUG_GET_INFO            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+	IPA_CPU_2_HW_CMD_ERR_FATAL                 =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+	IPA_CPU_2_HW_CMD_CLK_GATE                  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+	IPA_CPU_2_HW_CMD_CLK_UNGATE                =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+	IPA_CPU_2_HW_CMD_MEMCPY                    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+	IPA_CPU_2_HW_CMD_RESET_PIPE                =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
+	IPA_CPU_2_HW_CMD_REG_WRITE                 =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
+	IPA_CPU_2_HW_CMD_GSI_CH_EMPTY              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10),
+};
+
+/**
+ * enum ipa3_hw_2_cpu_responses -  Values that represent common HW responses
+ *  to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_NO_OP : No operation response
+ * @IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED : HW shall send this command once
+ *  boot sequence is completed and HW is ready to serve commands from CPU
+ * @IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED: Response to CPU commands
+ * @IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO : Response to
+ *  IPA_CPU_2_HW_CMD_DEBUG_GET_INFO command
+ */
+enum ipa3_hw_2_cpu_responses {
+	IPA_HW_2_CPU_RESPONSE_NO_OP          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_2_CPU_RESPONSE_DEBUG_GET_INFO =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+};
+
+/**
+ * struct IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_MEMCPY command.
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+struct IpaHwMemCopyData_t  {
+	u32 destination_addr;
+	u32 source_addr;
+	u32 dest_buffer_size;
+	u32 source_buffer_size;
+};
+
+/**
+ * union IpaHwResetPipeCmdData_t - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_RESET_PIPE command.
+ * @pipeNum : Pipe number to be reset
+ * @direction : 1 - IPA Producer, 0 - IPA Consumer
+ * @reserved_02_03 : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwResetPipeCmdData_t {
+	struct IpaHwResetPipeCmdParams_t {
+		u8     pipeNum;
+		u8     direction;
+		u32    reserved_02_03;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * struct IpaHwRegWriteCmdData_t - holds the parameters for
+ * IPA_CPU_2_HW_CMD_REG_WRITE command. Parameters are
+ * sent as 64b immediate parameters.
+ * @RegisterAddress: RG10 register address where the value needs to be written
+ * @RegisterValue: 32-Bit value to be written into the register
+ */
+struct IpaHwRegWriteCmdData_t {
+	u32 RegisterAddress;
+	u32 RegisterValue;
+};
+
+/**
+ * union IpaHwCpuCmdCompletedResponseData_t - Structure holding the parameters
+ * for IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED response.
+ * @originalCmdOp : The original command opcode
+ * @status : 0 for success indication, otherwise failure
+ * @reserved : Reserved
+ *
+ * Parameters are sent as 32b immediate parameters.
+ */
+union IpaHwCpuCmdCompletedResponseData_t {
+	struct IpaHwCpuCmdCompletedResponseParams_t {
+		u32 originalCmdOp:8;
+		u32 status:8;
+		u32 reserved:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwUpdateFlagsCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_UPDATE_FLAGS command
+ * @newFlags: SW flags defined the behavior of HW.
+ *	This field is expected to be used as bitmask for enum ipa3_hw_flags
+ */
+union IpaHwUpdateFlagsCmdData_t {
+	struct IpaHwUpdateFlagsCmdParams_t {
+		u32 newFlags;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * union IpaHwChkChEmptyCmdData_t -  Structure holding the parameters for
+ *  IPA_CPU_2_HW_CMD_GSI_CH_EMPTY command. Parameters are sent as 32b
+ *  immediate parameters.
+ * @ee_n : EE owner of the channel
+ * @vir_ch_id : GSI virtual channel ID of the channel to checked of emptiness
+ * @reserved_02_04 : Reserved
+ */
+union IpaHwChkChEmptyCmdData_t {
+	struct IpaHwChkChEmptyCmdParams_t {
+		u8 ee_n;
+		u8 vir_ch_id;
+		u16 reserved_02_04;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * When resource group 10 limitation mitigation is enabled, uC send
+ * cmd should be able to run in interrupt context, so using spin lock
+ * instead of mutex.
+ */
+#define IPA3_UC_LOCK(flags)						 \
+do {									 \
+	if (ipa3_ctx->apply_rg10_wa)					 \
+		spin_lock_irqsave(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \
+	else								 \
+		mutex_lock(&ipa3_ctx->uc_ctx.uc_lock);			 \
+} while (0)
+
+#define IPA3_UC_UNLOCK(flags)						      \
+do {									      \
+	if (ipa3_ctx->apply_rg10_wa)					      \
+		spin_unlock_irqrestore(&ipa3_ctx->uc_ctx.uc_spinlock, flags); \
+	else								      \
+		mutex_unlock(&ipa3_ctx->uc_ctx.uc_lock);		      \
+} while (0)
+
+struct ipa3_uc_hdlrs ipa3_uc_hdlrs[IPA_HW_NUM_FEATURES] = { { 0 } };
+
+const char *ipa_hw_error_str(enum ipa3_hw_errors err_type)
+{
+	const char *str;
+
+	switch (err_type) {
+	case IPA_HW_ERROR_NONE:
+		str = "IPA_HW_ERROR_NONE";
+		break;
+	case IPA_HW_INVALID_DOORBELL_ERROR:
+		str = "IPA_HW_INVALID_DOORBELL_ERROR";
+		break;
+	case IPA_HW_DMA_ERROR:
+		str = "IPA_HW_DMA_ERROR";
+		break;
+	case IPA_HW_FATAL_SYSTEM_ERROR:
+		str = "IPA_HW_FATAL_SYSTEM_ERROR";
+		break;
+	case IPA_HW_INVALID_OPCODE:
+		str = "IPA_HW_INVALID_OPCODE";
+		break;
+	case IPA_HW_INVALID_PARAMS:
+		str = "IPA_HW_INVALID_PARAMS";
+		break;
+	case IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE:
+		str = "IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE";
+		break;
+	case IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE:
+		str = "IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE";
+		break;
+	case IPA_HW_GSI_CH_NOT_EMPTY_FAILURE:
+		str = "IPA_HW_GSI_CH_NOT_EMPTY_FAILURE";
+		break;
+	default:
+		str = "INVALID ipa_hw_errors type";
+	}
+
+	return str;
+}
+
+static void ipa3_log_evt_hdlr(void)
+{
+	int i;
+
+	if (!ipa3_ctx->uc_ctx.uc_event_top_ofst) {
+		ipa3_ctx->uc_ctx.uc_event_top_ofst =
+			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
+		if (ipa3_ctx->uc_ctx.uc_event_top_ofst +
+			sizeof(struct IpaHwEventLogInfoData_t) >=
+			ipa3_ctx->ctrl->ipa_reg_base_ofst +
+			ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+			ipa3_ctx->smem_sz) {
+			IPAERR("uc_top 0x%x outside SRAM\n",
+				ipa3_ctx->uc_ctx.uc_event_top_ofst);
+			goto bad_uc_top_ofst;
+		}
+
+		ipa3_ctx->uc_ctx.uc_event_top_mmio = ioremap(
+			ipa3_ctx->ipa_wrapper_base +
+			ipa3_ctx->uc_ctx.uc_event_top_ofst,
+			sizeof(struct IpaHwEventLogInfoData_t));
+		if (!ipa3_ctx->uc_ctx.uc_event_top_mmio) {
+			IPAERR("fail to ioremap uc top\n");
+			goto bad_uc_top_ofst;
+		}
+
+		for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+			if (ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr)
+				ipa3_uc_hdlrs[i].ipa_uc_event_log_info_hdlr
+					(ipa3_ctx->uc_ctx.uc_event_top_mmio);
+		}
+	} else {
+
+		if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams !=
+			ipa3_ctx->uc_ctx.uc_event_top_ofst) {
+			IPAERR("uc top ofst changed new=%u cur=%u\n",
+				ipa3_ctx->uc_ctx.uc_sram_mmio->
+				eventParams,
+				ipa3_ctx->uc_ctx.uc_event_top_ofst);
+		}
+	}
+
+	return;
+
+bad_uc_top_ofst:
+	ipa3_ctx->uc_ctx.uc_event_top_ofst = 0;
+}
+
+/**
+ * ipa3_uc_state_check() - Check the status of the uC interface
+ *
+ * Return value: 0 if the uC is loaded, interface is initialized
+ *               and there was no recent failure in one of the commands.
+ *               A negative value is returned otherwise.
+ */
+int ipa3_uc_state_check(void)
+{
+	if (!ipa3_ctx->uc_ctx.uc_inited) {
+		IPAERR("uC interface not initialized\n");
+		return -EFAULT;
+	}
+
+	if (!ipa3_ctx->uc_ctx.uc_loaded) {
+		IPAERR("uC is not loaded\n");
+		return -EFAULT;
+	}
+
+	if (ipa3_ctx->uc_ctx.uc_failed) {
+		IPAERR("uC has failed its last command\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_uc_loaded_check() - Check the uC has been loaded
+ *
+ * Return value: 1 if the uC is loaded, 0 otherwise
+ */
+int ipa3_uc_loaded_check(void)
+{
+	return ipa3_ctx->uc_ctx.uc_loaded;
+}
+EXPORT_SYMBOL(ipa3_uc_loaded_check);
+
+static void ipa3_uc_event_handler(enum ipa_irq_type interrupt,
+				 void *private_data,
+				 void *interrupt_data)
+{
+	union IpaHwErrorEventData_t evt;
+	u8 feature;
+
+	WARN_ON(private_data != ipa3_ctx);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	IPADBG("uC evt opcode=%u\n",
+		ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+
+	feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Invalid feature %u for event %u\n",
+			feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return;
+	}
+	/* Feature specific handling */
+	if (ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr)
+		ipa3_uc_hdlrs[feature].ipa_uc_event_hdlr
+			(ipa3_ctx->uc_ctx.uc_sram_mmio);
+
+	/* General handling */
+	if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+	    IPA_HW_2_CPU_EVENT_ERROR) {
+		evt.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams;
+		IPAERR("uC Error, evt errorType = %s\n",
+			ipa_hw_error_str(evt.params.errorType));
+		ipa3_ctx->uc_ctx.uc_failed = true;
+		ipa3_ctx->uc_ctx.uc_error_type = evt.params.errorType;
+		ipa3_ctx->uc_ctx.uc_error_timestamp =
+			ipahal_read_reg(IPA_TAG_TIMER);
+		BUG();
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_LOG_INFO) {
+		IPADBG("uC evt log info ofst=0x%x\n",
+			ipa3_ctx->uc_ctx.uc_sram_mmio->eventParams);
+		ipa3_log_evt_hdlr();
+	} else {
+		IPADBG("unsupported uC evt opcode=%u\n",
+				ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+}
+
+int ipa3_uc_panic_notifier(struct notifier_block *this,
+		unsigned long event, void *ptr)
+{
+	int result = 0;
+	struct ipa_active_client_logging_info log_info;
+
+	IPADBG("this=%p evt=%lu ptr=%p\n", this, event, ptr);
+
+	result = ipa3_uc_state_check();
+	if (result)
+		goto fail;
+
+	IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
+	if (ipa3_inc_client_enable_clks_no_block(&log_info))
+		goto fail;
+
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp =
+		IPA_CPU_2_HW_CMD_ERR_FATAL;
+	ipa3_ctx->uc_ctx.pending_cmd = ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp;
+	/* ensure write to shared memory is done before triggering uc */
+	wmb();
+
+	if (ipa3_ctx->apply_rg10_wa)
+		ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+			IPA_CPU_2_HW_CMD_MBOX_m,
+			IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
+	else
+		ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
+
+	/* give uc enough time to save state */
+	udelay(IPA_PKT_FLUSH_TO_US);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	IPADBG("err_fatal issued\n");
+
+fail:
+	return NOTIFY_DONE;
+}
+
+static void ipa3_uc_response_hdlr(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data)
+{
+	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	u8 feature;
+	int res;
+	int i;
+
+	WARN_ON(private_data != ipa3_ctx);
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	IPADBG("uC rsp opcode=%u\n",
+			ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+	feature = EXTRACT_UC_FEATURE(ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Invalid feature %u for event %u\n",
+			feature, ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+		return;
+	}
+
+	/* Feature specific handling */
+	if (ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr) {
+		res = ipa3_uc_hdlrs[feature].ipa3_uc_response_hdlr(
+			ipa3_ctx->uc_ctx.uc_sram_mmio,
+			&ipa3_ctx->uc_ctx.uc_status);
+		if (res == 0) {
+			IPADBG("feature %d specific response handler\n",
+				feature);
+			complete_all(&ipa3_ctx->uc_ctx.uc_completion);
+			IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+			return;
+		}
+	}
+
+	/* General handling */
+	if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+			IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED) {
+		ipa3_ctx->uc_ctx.uc_loaded = true;
+
+		IPADBG("IPA uC loaded\n");
+		/*
+		 * The proxy vote is held until uC is loaded to ensure that
+		 * IPA_HW_2_CPU_RESPONSE_INIT_COMPLETED is received.
+		 */
+		ipa3_proxy_clk_unvote();
+
+		for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+			if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
+				ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
+		}
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+		   IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+		uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams;
+		IPADBG("uC cmd response opcode=%u status=%u\n",
+		       uc_rsp.params.originalCmdOp,
+		       uc_rsp.params.status);
+		if (uc_rsp.params.originalCmdOp ==
+		    ipa3_ctx->uc_ctx.pending_cmd) {
+			ipa3_ctx->uc_ctx.uc_status = uc_rsp.params.status;
+			complete_all(&ipa3_ctx->uc_ctx.uc_completion);
+		} else {
+			IPAERR("Expected cmd=%u rcvd cmd=%u\n",
+			       ipa3_ctx->uc_ctx.pending_cmd,
+			       uc_rsp.params.originalCmdOp);
+		}
+	} else {
+		IPAERR("Unsupported uC rsp opcode = %u\n",
+		       ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+
+static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
+	u32 expected_status, bool polling_mode, unsigned long timeout_jiffies)
+{
+	int index;
+	union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
+	unsigned long flags;
+	int retries = 0;
+
+send_cmd_lock:
+	IPA3_UC_LOCK(flags);
+
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC send command aborted\n");
+		IPA3_UC_UNLOCK(flags);
+		return -EBADF;
+	}
+send_cmd:
+	if (ipa3_ctx->apply_rg10_wa) {
+		if (!polling_mode)
+			IPADBG("Overriding mode to polling mode\n");
+		polling_mode = true;
+	} else {
+		init_completion(&ipa3_ctx->uc_ctx.uc_completion);
+	}
+
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams = cmd_lo;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdParams_hi = cmd_hi;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->cmdOp = opcode;
+	ipa3_ctx->uc_ctx.pending_cmd = opcode;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp = 0;
+	ipa3_ctx->uc_ctx.uc_sram_mmio->responseParams = 0;
+
+	ipa3_ctx->uc_ctx.uc_status = 0;
+
+	/* ensure write to shared memory is done before triggering uc */
+	wmb();
+
+	if (ipa3_ctx->apply_rg10_wa)
+		ipahal_write_reg_mn(IPA_UC_MAILBOX_m_n,
+			IPA_CPU_2_HW_CMD_MBOX_m,
+			IPA_CPU_2_HW_CMD_MBOX_n, 0x1);
+	else
+		ipahal_write_reg_n(IPA_IRQ_EE_UC_n, 0, 0x1);
+
+	if (polling_mode) {
+		for (index = 0; index < IPA_UC_POLL_MAX_RETRY; index++) {
+			if (ipa3_ctx->uc_ctx.uc_sram_mmio->responseOp ==
+			    IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED) {
+				uc_rsp.raw32b = ipa3_ctx->uc_ctx.uc_sram_mmio->
+						responseParams;
+				if (uc_rsp.params.originalCmdOp ==
+					ipa3_ctx->uc_ctx.pending_cmd) {
+					ipa3_ctx->uc_ctx.uc_status =
+						uc_rsp.params.status;
+					break;
+				}
+			}
+			if (ipa3_ctx->apply_rg10_wa)
+				udelay(IPA_UC_POLL_SLEEP_USEC);
+			else
+				usleep_range(IPA_UC_POLL_SLEEP_USEC,
+					IPA_UC_POLL_SLEEP_USEC);
+		}
+
+		if (index == IPA_UC_POLL_MAX_RETRY) {
+			IPAERR("uC max polling retries reached\n");
+			if (ipa3_ctx->uc_ctx.uc_failed) {
+				IPAERR("uC reported on Error, errorType = %s\n",
+					ipa_hw_error_str(ipa3_ctx->
+					uc_ctx.uc_error_type));
+			}
+			IPA3_UC_UNLOCK(flags);
+			BUG();
+			return -EFAULT;
+		}
+	} else {
+		if (wait_for_completion_timeout(&ipa3_ctx->uc_ctx.uc_completion,
+			timeout_jiffies) == 0) {
+			IPAERR("uC timed out\n");
+			if (ipa3_ctx->uc_ctx.uc_failed) {
+				IPAERR("uC reported on Error, errorType = %s\n",
+					ipa_hw_error_str(ipa3_ctx->
+					uc_ctx.uc_error_type));
+			}
+			IPA3_UC_UNLOCK(flags);
+			BUG();
+			return -EFAULT;
+		}
+	}
+
+	if (ipa3_ctx->uc_ctx.uc_status != expected_status) {
+		if (ipa3_ctx->uc_ctx.uc_status ==
+		    IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE ||
+		    ipa3_ctx->uc_ctx.uc_status ==
+		    IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE) {
+			retries++;
+			if (retries == IPA_GSI_CHANNEL_STOP_MAX_RETRY) {
+				IPAERR("Failed after %d tries\n", retries);
+				IPA3_UC_UNLOCK(flags);
+				BUG();
+				return -EFAULT;
+			}
+			IPA3_UC_UNLOCK(flags);
+			if (ipa3_ctx->uc_ctx.uc_status ==
+			    IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE)
+				ipa3_inject_dma_task_for_gsi();
+			/* sleep for short period to flush IPA */
+			usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+				IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+			goto send_cmd_lock;
+		}
+
+		if (ipa3_ctx->uc_ctx.uc_status ==
+			IPA_HW_GSI_CH_NOT_EMPTY_FAILURE) {
+			retries++;
+			if (retries >= IPA_GSI_CHANNEL_EMPTY_MAX_RETRY) {
+				IPAERR("Failed after %d tries\n", retries);
+				IPA3_UC_UNLOCK(flags);
+				return -EFAULT;
+			}
+			if (ipa3_ctx->apply_rg10_wa)
+				udelay(
+				IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC / 2 +
+				IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC / 2);
+			else
+				usleep_range(
+				IPA_GSI_CHANNEL_EMPTY_SLEEP_MIN_USEC,
+				IPA_GSI_CHANNEL_EMPTY_SLEEP_MAX_USEC);
+			goto send_cmd;
+		}
+
+		IPAERR("Recevied status %u, Expected status %u\n",
+			ipa3_ctx->uc_ctx.uc_status, expected_status);
+		IPA3_UC_UNLOCK(flags);
+		return -EFAULT;
+	}
+
+	IPA3_UC_UNLOCK(flags);
+
+	IPADBG("uC cmd %u send succeeded\n", opcode);
+
+	return 0;
+}
+
+/**
+ * ipa3_uc_interface_init() - Initialize the interface with the uC
+ *
+ * Return value: 0 on success, negative value otherwise
+ */
+int ipa3_uc_interface_init(void)
+{
+	int result;
+	unsigned long phys_addr;
+
+	if (ipa3_ctx->uc_ctx.uc_inited) {
+		IPADBG("uC interface already initialized\n");
+		return 0;
+	}
+
+	mutex_init(&ipa3_ctx->uc_ctx.uc_lock);
+	spin_lock_init(&ipa3_ctx->uc_ctx.uc_spinlock);
+
+	phys_addr = ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0);
+	ipa3_ctx->uc_ctx.uc_sram_mmio = ioremap(phys_addr,
+					       IPA_RAM_UC_SMEM_SIZE);
+	if (!ipa3_ctx->uc_ctx.uc_sram_mmio) {
+		IPAERR("Fail to ioremap IPA uC SRAM\n");
+		result = -ENOMEM;
+		goto remap_fail;
+	}
+
+	if (!ipa3_ctx->apply_rg10_wa) {
+		result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
+			ipa3_uc_event_handler, true,
+			ipa3_ctx);
+		if (result) {
+			IPAERR("Fail to register for UC_IRQ0 rsp interrupt\n");
+			result = -EFAULT;
+			goto irq_fail0;
+		}
+
+		result = ipa3_add_interrupt_handler(IPA_UC_IRQ_1,
+			ipa3_uc_response_hdlr, true,
+			ipa3_ctx);
+		if (result) {
+			IPAERR("fail to register for UC_IRQ1 rsp interrupt\n");
+			result = -EFAULT;
+			goto irq_fail1;
+		}
+	}
+
+	ipa3_ctx->uc_ctx.uc_inited = true;
+
+	IPADBG("IPA uC interface is initialized\n");
+	return 0;
+
+irq_fail1:
+	ipa3_remove_interrupt_handler(IPA_UC_IRQ_0);
+irq_fail0:
+	iounmap(ipa3_ctx->uc_ctx.uc_sram_mmio);
+remap_fail:
+	return result;
+}
+
+/**
+ * ipa3_uc_load_notify() - Notification about uC loading
+ *
+ * This function should be called when IPA uC interface layer cannot
+ * determine by itself about uC loading by waits for external notification.
+ * Example is resource group 10 limitation were ipa driver does not get uC
+ * interrupts.
+ * The function should perform actions that were not done at init due to uC
+ * not being loaded then.
+ */
+void ipa3_uc_load_notify(void)
+{
+	int i;
+	int result;
+
+	if (!ipa3_ctx->apply_rg10_wa)
+		return;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipa3_ctx->uc_ctx.uc_loaded = true;
+	IPADBG("IPA uC loaded\n");
+
+	ipa3_proxy_clk_unvote();
+
+	ipa3_init_interrupts();
+
+	result = ipa3_add_interrupt_handler(IPA_UC_IRQ_0,
+		ipa3_uc_event_handler, true,
+		ipa3_ctx);
+	if (result)
+		IPAERR("Fail to register for UC_IRQ0 rsp interrupt.\n");
+
+	for (i = 0; i < IPA_HW_NUM_FEATURES; i++) {
+		if (ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr)
+			ipa3_uc_hdlrs[i].ipa_uc_loaded_hdlr();
+	}
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+}
+EXPORT_SYMBOL(ipa3_uc_load_notify);
+
+/**
+ * ipa3_uc_send_cmd() - Send a command to the uC
+ *
+ * Note1: This function sends command with 32bit parameter and do not
+ *	use the higher 32bit of the command parameter (set to zero).
+ *
+ * Note2: In case the operation times out (No response from the uC) or
+ *       polling maximal amount of retries has reached, the logic
+ *       considers it as an invalid state of the uC/IPA, and
+ *       issues a kernel panic.
+ *
+ * Returns: 0 on success.
+ *          -EINVAL in case of invalid input.
+ *          -EBADF in case uC interface is not initialized /
+ *                 or the uC has failed previously.
+ *          -EFAULT in case the received status doesn't match
+ *                  the expected.
+ */
+int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
+		    bool polling_mode, unsigned long timeout_jiffies)
+{
+	return ipa3_uc_send_cmd_64b_param(cmd, 0, opcode,
+		expected_status, polling_mode, timeout_jiffies);
+}
+
+/**
+ * ipa3_uc_register_handlers() - Registers event, response and log event
+ *                              handlers for a specific feature.Please note
+ *                              that currently only one handler can be
+ *                              registered per feature.
+ *
+ * Return value: None
+ */
+void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
+			      struct ipa3_uc_hdlrs *hdlrs)
+{
+	unsigned long flags;
+
+	if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
+		IPAERR("Feature %u is invalid, not registering hdlrs\n",
+		       feature);
+		return;
+	}
+
+	IPA3_UC_LOCK(flags);
+	ipa3_uc_hdlrs[feature] = *hdlrs;
+	IPA3_UC_UNLOCK(flags);
+
+	IPADBG("uC handlers registered for feature %u\n", feature);
+}
+
+/**
+ * ipa3_uc_reset_pipe() - reset a BAM pipe using the uC interface
+ * @ipa_client: [in] ipa client handle representing the pipe
+ *
+ * The function uses the uC interface in order to issue a BAM
+ * PIPE reset request. The uC makes sure there's no traffic in
+ * the TX command queue before issuing the reset.
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_uc_reset_pipe(enum ipa_client_type ipa_client)
+{
+	union IpaHwResetPipeCmdData_t cmd;
+	int ep_idx;
+	int ret;
+
+	ep_idx = ipa3_get_ep_mapping(ipa_client);
+	if (ep_idx == -1) {
+		IPAERR("Invalid IPA client\n");
+		return 0;
+	}
+
+	/*
+	 * If the uC interface has not been initialized yet,
+	 * continue with the sequence without resetting the
+	 * pipe.
+	 */
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC interface will not be used to reset %s pipe %d\n",
+		       IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD",
+		       ep_idx);
+		return 0;
+	}
+
+	/*
+	 * IPA consumer = 0, IPA producer = 1.
+	 * IPA driver concept of PROD/CONS is the opposite of the
+	 * IPA HW concept. Therefore, IPA AP CLIENT PRODUCER = IPA CONSUMER,
+	 * and vice-versa.
+	 */
+	cmd.params.direction = (u8)(IPA_CLIENT_IS_PROD(ipa_client) ? 0 : 1);
+	cmd.params.pipeNum = (u8)ep_idx;
+
+	IPADBG("uC pipe reset on IPA %s pipe %d\n",
+	       IPA_CLIENT_IS_PROD(ipa_client) ? "CONS" : "PROD", ep_idx);
+
+	ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_RESET_PIPE, 0,
+			      false, 10*HZ);
+
+	return ret;
+}
+
+int ipa3_uc_is_gsi_channel_empty(enum ipa_client_type ipa_client)
+{
+	struct ipa_gsi_ep_config *gsi_ep_info;
+	union IpaHwChkChEmptyCmdData_t cmd;
+	int ret;
+
+	gsi_ep_info = ipa3_get_gsi_ep_info(ipa3_get_ep_mapping(ipa_client));
+	if (!gsi_ep_info) {
+		IPAERR("Invalid IPA ep index\n");
+		return 0;
+	}
+
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC cannot be used to validate ch emptiness clnt=%d\n"
+			, ipa_client);
+		return 0;
+	}
+
+	cmd.params.ee_n = gsi_ep_info->ee;
+	cmd.params.vir_ch_id = gsi_ep_info->ipa_gsi_chan_num;
+
+	IPADBG("uC emptiness check for IPA GSI Channel %d\n",
+	       gsi_ep_info->ipa_gsi_chan_num);
+
+	ret = ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_GSI_CH_EMPTY, 0,
+			      false, 10*HZ);
+
+	return ret;
+}
+
+
+/**
+ * ipa3_uc_notify_clk_state() - notify to uC of clock enable / disable
+ * @enabled: true if clock are enabled
+ *
+ * The function uses the uC interface in order to notify uC before IPA clocks
+ * are disabled to make sure uC is not in the middle of operation.
+ * Also after clocks are enabled ned to notify uC to start processing.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_notify_clk_state(bool enabled)
+{
+	u32 opcode;
+
+	/*
+	 * If the uC interface has not been initialized yet,
+	 * don't notify the uC on the enable/disable
+	 */
+	if (ipa3_uc_state_check()) {
+		IPADBG("uC interface will not notify the UC on clock state\n");
+		return 0;
+	}
+
+	IPADBG("uC clock %s notification\n", (enabled) ? "UNGATE" : "GATE");
+
+	opcode = (enabled) ? IPA_CPU_2_HW_CMD_CLK_UNGATE :
+			     IPA_CPU_2_HW_CMD_CLK_GATE;
+
+	return ipa3_uc_send_cmd(0, opcode, 0, true, 0);
+}
+
+/**
+ * ipa3_uc_update_hw_flags() - send uC the HW flags to be used
+ * @flags: This field is expected to be used as bitmask for enum ipa3_hw_flags
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_update_hw_flags(u32 flags)
+{
+	union IpaHwUpdateFlagsCmdData_t cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.newFlags = flags;
+	return ipa3_uc_send_cmd(cmd.raw32b, IPA_CPU_2_HW_CMD_UPDATE_FLAGS, 0,
+		false, HZ);
+}
+
+/**
+ * ipa3_uc_rg10_write_reg() - write to register possibly via uC
+ *
+ * if the RG10 limitation workaround is enabled, then writing
+ * to a register will be proxied by the uC due to H/W limitation.
+ * This func should be called for RG10 registers only
+ *
+ * @Parameters: Like ipahal_write_reg_n() parameters
+ *
+ */
+void ipa3_uc_rg10_write_reg(enum ipahal_reg_name reg, u32 n, u32 val)
+{
+	int ret;
+	u32 paddr;
+
+	if (!ipa3_ctx->apply_rg10_wa)
+		return ipahal_write_reg_n(reg, n, val);
+
+
+	/* calculate register physical address */
+	paddr = ipa3_ctx->ipa_wrapper_base + ipa3_ctx->ctrl->ipa_reg_base_ofst;
+	paddr += ipahal_get_reg_n_ofst(reg, n);
+
+	IPADBG("Sending uC cmd to reg write: addr=0x%x val=0x%x\n",
+		paddr, val);
+	ret = ipa3_uc_send_cmd_64b_param(paddr, val,
+		IPA_CPU_2_HW_CMD_REG_WRITE, 0, true, 0);
+	if (ret) {
+		IPAERR("failed to send cmd to uC for reg write\n");
+		BUG();
+	}
+}
+
+/**
+ * ipa3_uc_memcpy() - Perform a memcpy action using IPA uC
+ * @dest: physical address to store the copied data.
+ * @src: physical address of the source data to copy.
+ * @len: number of bytes to copy.
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa3_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	int res;
+	struct ipa_mem_buffer mem;
+	struct IpaHwMemCopyData_t *cmd;
+
+	IPADBG("dest 0x%pa src 0x%pa len %d\n", &dest, &src, len);
+	mem.size = sizeof(cmd);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		return -ENOMEM;
+	}
+	cmd = (struct IpaHwMemCopyData_t *)mem.base;
+	memset(cmd, 0, sizeof(*cmd));
+	cmd->destination_addr = dest;
+	cmd->dest_buffer_size = len;
+	cmd->source_addr = src;
+	cmd->source_buffer_size = len;
+	res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MEMCPY, 0,
+		true, 10 * HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto free_coherent;
+	}
+
+	res = 0;
+free_coherent:
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+	return res;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
new file mode 100644
index 0000000..7949d91
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
@@ -0,0 +1,962 @@
+/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/* MHI uC interface definitions */
+#define IPA_HW_INTERFACE_MHI_VERSION            0x0004
+
+#define IPA_HW_MAX_NUMBER_OF_CHANNELS	2
+#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS	2
+#define IPA_HW_MAX_CHANNEL_HANDLE	(IPA_HW_MAX_NUMBER_OF_CHANNELS-1)
+
+/**
+ * Values that represent the MHI commands from CPU to IPA HW.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing.
+ *	Once operation was completed HW shall respond with
+ *	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready
+ *	to serve MHI transfers. Once initialization was completed HW shall
+ *	respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ *		IPA_HW_MHI_CHANNEL_STATE_ENABLE
+ * @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data.
+ *	Once operation was completed HW shall respond with
+ *	IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
+ * @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel
+ *	processing state following host request. Once operation was completed
+ *	HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
+ * @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization.
+ * @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing.
+ */
+enum ipa_cpu_2_hw_mhi_commands {
+	IPA_CPU_2_HW_CMD_MHI_INIT
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+	IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+	IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3),
+	IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+	IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5)
+};
+
+/**
+ * Values that represent MHI related HW responses to CPU commands.
+ * @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to
+ *	IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or
+ *	IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands.
+ */
+enum ipa_hw_2_cpu_mhi_responses {
+	IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+};
+
+/**
+ * Values that represent MHI related HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an
+ *	error in an element from the transfer ring associated with the channel
+ * @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a bam
+ *	interrupt was asserted when MHI engine is suspended
+ */
+enum ipa_hw_2_cpu_mhi_events {
+	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+};
+
+/**
+ * Channel error types.
+ * @IPA_HW_CHANNEL_ERROR_NONE: No error persists.
+ * @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected
+ */
+enum ipa_hw_channel_errors {
+	IPA_HW_CHANNEL_ERROR_NONE,
+	IPA_HW_CHANNEL_INVALID_RE_ERROR
+};
+
+/**
+ * MHI error types.
+ * @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space
+ * @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array
+ * @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array
+ * @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on
+ *	secondary event ring
+ * @IPA_HW_LINK_ERROR: Link error
+ */
+enum ipa_hw_mhi_errors {
+	IPA_HW_INVALID_MMIO_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
+	IPA_HW_INVALID_CHANNEL_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
+	IPA_HW_INVALID_EVENT_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
+	IPA_HW_NO_ED_IN_RING_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
+	IPA_HW_LINK_ERROR
+		= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5),
+};
+
+
+/**
+ * Structure referring to the common and MHI section of 128B shared memory
+ * located in offset zero of SW Partition in IPA SRAM.
+ * The shared memory is used for communication between IPA HW and CPU.
+ * @common: common section in IPA SRAM
+ * @interfaceVersionMhi: The MHI interface version as reported by HW
+ * @mhiState: Overall MHI state
+ * @reserved_2B: reserved
+ * @mhiCnl0State: State of MHI channel 0.
+ *	The state carries information regarding the error type.
+ *	See IPA_HW_MHI_CHANNEL_STATES.
+ * @mhiCnl0State: State of MHI channel 1.
+ * @mhiCnl0State: State of MHI channel 2.
+ * @mhiCnl0State: State of MHI channel 3
+ * @mhiCnl0State: State of MHI channel 4.
+ * @mhiCnl0State: State of MHI channel 5.
+ * @mhiCnl0State: State of MHI channel 6.
+ * @mhiCnl0State: State of MHI channel 7.
+ * @reserved_37_34: reserved
+ * @reserved_3B_38: reserved
+ * @reserved_3F_3C: reserved
+ */
+struct IpaHwSharedMemMhiMapping_t {
+	struct IpaHwSharedMemCommonMapping_t common;
+	u16 interfaceVersionMhi;
+	u8 mhiState;
+	u8 reserved_2B;
+	u8 mhiCnl0State;
+	u8 mhiCnl1State;
+	u8 mhiCnl2State;
+	u8 mhiCnl3State;
+	u8 mhiCnl4State;
+	u8 mhiCnl5State;
+	u8 mhiCnl6State;
+	u8 mhiCnl7State;
+	u32 reserved_37_34;
+	u32 reserved_3B_38;
+	u32 reserved_3F_3C;
+};
+
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command.
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW.
+ * @msiAddress: The MSI base (in device space) used for asserting the interrupt
+ *	(MSI) associated with the event ring
+ * mmioBaseAddress: The address (in device space) of MMIO structure in
+ *	host space
+ * deviceMhiCtrlBaseAddress: Base address of the memory region in the device
+ *	address space where the MHI control data structures are allocated by
+ *	the host, including channel context array, event context array,
+ *	and rings. This value is used for host/device address translation.
+ * deviceMhiDataBaseAddress: Base address of the memory region in the device
+ *	address space where the MHI data buffers are allocated by the host.
+ *	This value is used for host/device address translation.
+ * firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel
+ * firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this
+ *	event ring.
+ */
+struct IpaHwMhiInitCmdData_t {
+	u32 msiAddress;
+	u32 mmioBaseAddress;
+	u32 deviceMhiCtrlBaseAddress;
+	u32 deviceMhiDataBaseAddress;
+	u32 firstChannelIndex;
+	u32 firstEventRingIndex;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
+ *	command. Parameters are sent as 32b immediate parameters.
+ * @hannelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @contexArrayIndex: Unique index for channels, between 0 and 255. The index is
+ *	used as an index in channel context array structures.
+ * @bamPipeId: The BAM pipe number for pipe dedicated for this channel
+ * @channelDirection: The direction of the channel as defined in the channel
+ *	type field (CHTYPE) in the channel context data structure.
+ * @reserved: reserved.
+ */
+union IpaHwMhiInitChannelCmdData_t {
+	struct IpaHwMhiInitChannelCmdParams_t {
+		u32 channelHandle:8;
+		u32 contexArrayIndex:8;
+		u32 bamPipeId:6;
+		u32 channelDirection:2;
+		u32 reserved:8;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command.
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwMhiMsiCmdData_t {
+	u32 msiAddress_low;
+	u32 msiAddress_hi;
+	u32 msiMask;
+	u32 msiData;
+};
+
+/**
+ * Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @requestedState: The requested channel state as was indicated from Host.
+ *	Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @LPTransitionRejected: Indication that low power state transition was
+ *	rejected
+ * @reserved: reserved
+ */
+union IpaHwMhiChangeChannelStateCmdData_t {
+	struct IpaHwMhiChangeChannelStateCmdParams_t {
+		u32 requestedState:8;
+		u32 channelHandle:8;
+		u32 LPTransitionRejected:8;
+		u32 reserved:8;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiStopEventUpdateData_t {
+	struct IpaHwMhiStopEventUpdateDataParams_t {
+		u32 channelHandle:8;
+		u32 reserved:24;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response.
+ * Parameters are sent as 32b immediate parameters.
+ * @state: The new channel state. In case state is not as requested this is
+ *	error indication for the last command
+ * @channelHandle: The channel identifier
+ * @additonalParams: For stop: the number of pending bam descriptors currently
+ *	queued
+*/
+union IpaHwMhiChangeChannelStateResponseData_t {
+	struct IpaHwMhiChangeChannelStateResponseParams_t {
+		u32 state:8;
+		u32 channelHandle:8;
+		u32 additonalParams:16;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event.
+ * Parameters are sent as 32b immediate parameters.
+ * @errorType: Type of error - IPA_HW_CHANNEL_ERRORS
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelErrorEventData_t {
+	struct IpaHwMhiChannelErrorEventParams_t {
+		u32 errorType:8;
+		u32 channelHandle:8;
+		u32 reserved:16;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the parameters for
+ *	IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event.
+ * Parameters are sent as 32b immediate parameters.
+ * @channelHandle: The channel identifier as allocated by driver.
+ *	value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
+ * @reserved: reserved
+ */
+union IpaHwMhiChannelWakeupEventData_t {
+	struct IpaHwMhiChannelWakeupEventParams_t {
+		u32 channelHandle:8;
+		u32 reserved:24;
+	} params;
+	u32 raw32b;
+};
+
+/**
+ * Structure holding the MHI Common statistics
+ * @numULDLSync: Number of times UL activity trigged due to DL activity
+ * @numULTimerExpired: Number of times UL Accm Timer expired
+ */
+struct IpaHwStatsMhiCmnInfoData_t {
+	u32 numULDLSync;
+	u32 numULTimerExpired;
+	u32 numChEvCtxWpRead;
+	u32 reserved;
+};
+
+/**
+ * Structure holding the MHI Channel statistics
+ * @doorbellInt: The number of doorbell int
+ * @reProccesed: The number of ring elements processed
+ * @bamFifoFull: Number of times Bam Fifo got full
+ * @bamFifoEmpty: Number of times Bam Fifo got empty
+ * @bamFifoUsageHigh: Number of times Bam fifo usage went above 75%
+ * @bamFifoUsageLow: Number of times Bam fifo usage went below 25%
+ * @bamInt: Number of BAM Interrupts
+ * @ringFull: Number of times Transfer Ring got full
+ * @ringEmpty: umber of times Transfer Ring got empty
+ * @ringUsageHigh: Number of times Transfer Ring usage went above 75%
+ * @ringUsageLow: Number of times Transfer Ring usage went below 25%
+ * @delayedMsi: Number of times device triggered MSI to host after
+ *	Interrupt Moderation Timer expiry
+ * @immediateMsi: Number of times device triggered MSI to host immediately
+ * @thresholdMsi: Number of times device triggered MSI due to max pending
+ *	events threshold reached
+ * @numSuspend: Number of times channel was suspended
+ * @numResume: Number of times channel was suspended
+ * @num_OOB: Number of times we indicated that we are OOB
+ * @num_OOB_timer_expiry: Number of times we indicated that we are OOB
+ *	after timer expiry
+ * @num_OOB_moderation_timer_start: Number of times we started timer after
+ *	sending OOB and hitting OOB again before we processed threshold
+ *	number of packets
+ * @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode
+ */
+struct IpaHwStatsMhiCnlInfoData_t {
+	u32 doorbellInt;
+	u32 reProccesed;
+	u32 bamFifoFull;
+	u32 bamFifoEmpty;
+	u32 bamFifoUsageHigh;
+	u32 bamFifoUsageLow;
+	u32 bamInt;
+	u32 ringFull;
+	u32 ringEmpty;
+	u32 ringUsageHigh;
+	u32 ringUsageLow;
+	u32 delayedMsi;
+	u32 immediateMsi;
+	u32 thresholdMsi;
+	u32 numSuspend;
+	u32 numResume;
+	u32 num_OOB;
+	u32 num_OOB_timer_expiry;
+	u32 num_OOB_moderation_timer_start;
+	u32 num_db_mode_evt;
+};
+
+/**
+ * Structure holding the MHI statistics
+ * @mhiCmnStats: Stats pertaining to MHI
+ * @mhiCnlStats: Stats pertaining to each channel
+ */
+struct IpaHwStatsMhiInfoData_t {
+	struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats;
+	struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[
+						IPA_HW_MAX_NUMBER_OF_CHANNELS];
+};
+
+/**
+ * Structure holding the MHI Common Config info
+ * @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled
+ * @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is
+ *	enabled
+ * @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
+ * @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
+ */
+struct IpaHwConfigMhiCmnInfoData_t {
+	u8 isDlUlSyncEnabled;
+	u8 UlAccmVal;
+	u8 ulMsiEventThreshold;
+	u8 dlMsiEventThreshold;
+};
+
+/**
+ * Structure holding the parameters for MSI info data
+ * @msiAddress_low: The MSI lower base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
+ *	the interrupt (MSI) associated with the event ring.
+ * @msiMask: Mask indicating number of messages assigned by the host to device
+ * @msiData: Data Pattern to use when generating the MSI
+ */
+struct IpaHwConfigMhiMsiInfoData_t {
+	u32 msiAddress_low;
+	u32 msiAddress_hi;
+	u32 msiMask;
+	u32 msiData;
+};
+
+/**
+ * Structure holding the MHI Channel Config info
+ * @transferRingSize: The Transfer Ring size in terms of Ring Elements
+ * @transferRingIndex: The Transfer Ring channel number as defined by host
+ * @eventRingIndex: The Event Ring Index associated with this Transfer Ring
+ * @bamPipeIndex: The BAM Pipe associated with this channel
+ * @isOutChannel: Indication for the direction of channel
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiCnlInfoData_t {
+	u16 transferRingSize;
+	u8  transferRingIndex;
+	u8  eventRingIndex;
+	u8  bamPipeIndex;
+	u8  isOutChannel;
+	u8  reserved_0;
+	u8  reserved_1;
+};
+
+/**
+ * Structure holding the MHI Event Config info
+ * @msiVec: msi vector to invoke MSI interrupt
+ * @intmodtValue: Interrupt moderation timer (in milliseconds)
+ * @eventRingSize: The Event Ring size in terms of Ring Elements
+ * @eventRingIndex: The Event Ring number as defined by host
+ * @reserved_0: Reserved byte for maintaining 4byte alignment
+ * @reserved_1: Reserved byte for maintaining 4byte alignment
+ * @reserved_2: Reserved byte for maintaining 4byte alignment
+ */
+struct IpaHwConfigMhiEventInfoData_t {
+	u32 msiVec;
+	u16 intmodtValue;
+	u16 eventRingSize;
+	u8  eventRingIndex;
+	u8  reserved_0;
+	u8  reserved_1;
+	u8  reserved_2;
+};
+
+/**
+ * Structure holding the MHI Config info
+ * @mhiCmnCfg: Common Config pertaining to MHI
+ * @mhiMsiCfg: Config pertaining to MSI config
+ * @mhiCnlCfg: Config pertaining to each channel
+ * @mhiEvtCfg: Config pertaining to each event Ring
+ */
+struct IpaHwConfigMhiInfoData_t {
+	struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg;
+	struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg;
+	struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[
+						IPA_HW_MAX_NUMBER_OF_CHANNELS];
+	struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[
+					IPA_HW_MAX_NUMBER_OF_EVENTRINGS];
+};
+
+
+struct ipa3_uc_mhi_ctx {
+	u8 expected_responseOp;
+	u32 expected_responseParams;
+	void (*ready_cb)(void);
+	void (*wakeup_request_cb)(void);
+	u32 mhi_uc_stats_ofst;
+	struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio;
+};
+
+#define PRINT_COMMON_STATS(x) \
+	(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+	#x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x))
+
+#define PRINT_CHANNEL_STATS(ch, x) \
+	(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
+	#x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x))
+
+struct ipa3_uc_mhi_ctx *ipa3_uc_mhi_ctx;
+
+static int ipa3_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t
+	*uc_sram_mmio, u32 *uc_status)
+{
+	IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp);
+	if (uc_sram_mmio->responseOp == ipa3_uc_mhi_ctx->expected_responseOp &&
+	    uc_sram_mmio->responseParams ==
+	    ipa3_uc_mhi_ctx->expected_responseParams) {
+		*uc_status = 0;
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static void ipa3_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t
+	*uc_sram_mmio)
+{
+	if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+	    IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) {
+		union IpaHwMhiChannelErrorEventData_t evt;
+
+		IPAERR("Channel error\n");
+		evt.raw32b = uc_sram_mmio->eventParams;
+		IPAERR("errorType=%d channelHandle=%d reserved=%d\n",
+			evt.params.errorType, evt.params.channelHandle,
+			evt.params.reserved);
+	} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
+		   IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) {
+		union IpaHwMhiChannelWakeupEventData_t evt;
+
+		IPADBG("WakeUp channel request\n");
+		evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("channelHandle=%d reserved=%d\n",
+			evt.params.channelHandle, evt.params.reserved);
+		ipa3_uc_mhi_ctx->wakeup_request_cb();
+	}
+}
+
+static void ipa3_uc_mhi_event_log_info_hdlr(
+	struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) {
+		IPAERR("MHI feature missing 0x%x\n",
+			uc_event_top_mmio->featureMask);
+		return;
+	}
+
+	if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_MHI].
+		params.size != sizeof(struct IpaHwStatsMhiInfoData_t)) {
+		IPAERR("mhi stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct IpaHwStatsMhiInfoData_t),
+			uc_event_top_mmio->statsInfo.
+			featureInfo[IPA_HW_FEATURE_MHI].params.size);
+		return;
+	}
+
+	ipa3_uc_mhi_ctx->mhi_uc_stats_ofst = uc_event_top_mmio->
+		statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+		featureInfo[IPA_HW_FEATURE_MHI].params.offset;
+	IPAERR("MHI stats ofst=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
+	if (ipa3_uc_mhi_ctx->mhi_uc_stats_ofst +
+		sizeof(struct IpaHwStatsMhiInfoData_t) >=
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+		ipa3_ctx->smem_sz) {
+		IPAERR("uc_mhi_stats 0x%x outside SRAM\n",
+			ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
+		return;
+	}
+
+	ipa3_uc_mhi_ctx->mhi_uc_stats_mmio =
+		ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_uc_mhi_ctx->mhi_uc_stats_ofst,
+		sizeof(struct IpaHwStatsMhiInfoData_t));
+	if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc mhi stats\n");
+		return;
+	}
+}
+
+int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
+{
+	struct ipa3_uc_hdlrs hdlrs;
+
+	if (ipa3_uc_mhi_ctx) {
+		IPAERR("Already initialized\n");
+		return -EFAULT;
+	}
+
+	ipa3_uc_mhi_ctx = kzalloc(sizeof(*ipa3_uc_mhi_ctx), GFP_KERNEL);
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+
+	ipa3_uc_mhi_ctx->ready_cb = ready_cb;
+	ipa3_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb;
+
+	memset(&hdlrs, 0, sizeof(hdlrs));
+	hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_mhi_ctx->ready_cb;
+	hdlrs.ipa3_uc_response_hdlr = ipa3_uc_mhi_response_hdlr;
+	hdlrs.ipa_uc_event_hdlr = ipa3_uc_mhi_event_hdlr;
+	hdlrs.ipa_uc_event_log_info_hdlr = ipa3_uc_mhi_event_log_info_hdlr;
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs);
+
+	IPADBG("Done\n");
+	return 0;
+}
+
+void ipa3_uc_mhi_cleanup(void)
+{
+	struct ipa3_uc_hdlrs null_hdlrs = { 0 };
+
+	IPADBG("Enter\n");
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("ipa3_uc_mhi_ctx is not initialized\n");
+		return;
+	}
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs);
+	kfree(ipa3_uc_mhi_ctx);
+	ipa3_uc_mhi_ctx = NULL;
+
+	IPADBG("Done\n");
+}
+
+int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
+	u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
+	u32 first_evt_idx)
+{
+	int res;
+	struct ipa_mem_buffer mem;
+	struct IpaHwMhiInitCmdData_t *init_cmd_data;
+	struct IpaHwMhiMsiCmdData_t *msi_cmd;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_update_hw_flags(0);
+	if (res) {
+		IPAERR("ipa3_uc_update_hw_flags failed %d\n", res);
+		goto disable_clks;
+	}
+
+	mem.size = sizeof(*init_cmd_data);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		res = -ENOMEM;
+		goto disable_clks;
+	}
+	memset(mem.base, 0, mem.size);
+	init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base;
+	init_cmd_data->msiAddress = msi->addr_low;
+	init_cmd_data->mmioBaseAddress = mmio_addr;
+	init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr;
+	init_cmd_data->deviceMhiDataBaseAddress = host_data_addr;
+	init_cmd_data->firstChannelIndex = first_ch_idx;
+	init_cmd_data->firstEventRingIndex = first_evt_idx;
+	res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0,
+		false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+			mem.phys_base);
+		goto disable_clks;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	mem.size = sizeof(*msi_cmd);
+	mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
+		GFP_KERNEL);
+	if (!mem.base) {
+		IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
+		res = -ENOMEM;
+		goto disable_clks;
+	}
+
+	msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base;
+	msi_cmd->msiAddress_hi = msi->addr_hi;
+	msi_cmd->msiAddress_low = msi->addr_low;
+	msi_cmd->msiData = msi->data;
+	msi_cmd->msiMask = msi->mask;
+	res = ipa3_uc_send_cmd((u32)mem.phys_base,
+		IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
+			mem.phys_base);
+		goto disable_clks;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+
+}
+
+int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
+	int contexArrayIndex, int channelDirection)
+
+{
+	int res;
+	union IpaHwMhiInitChannelCmdData_t init_cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	if (ipa_ep_idx < 0  || ipa_ep_idx >= ipa3_ctx->ipa_num_pipes) {
+		IPAERR("Invalid ipa_ep_idx.\n");
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&init_cmd, 0, sizeof(init_cmd));
+	init_cmd.params.channelHandle = channelHandle;
+	init_cmd.params.contexArrayIndex = contexArrayIndex;
+	init_cmd.params.bamPipeId = ipa_ep_idx;
+	init_cmd.params.channelDirection = channelDirection;
+
+	res = ipa3_uc_send_cmd(init_cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+
+int ipa3_uc_mhi_reset_channel(int channelHandle)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
+	cmd.params.channelHandle = channelHandle;
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_suspend_channel(int channelHandle)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
+	cmd.params.channelHandle = channelHandle;
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
+{
+	union IpaHwMhiChangeChannelStateCmdData_t cmd;
+	union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&uc_rsp, 0, sizeof(uc_rsp));
+	uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	uc_rsp.params.channelHandle = channelHandle;
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN;
+	cmd.params.channelHandle = channelHandle;
+	cmd.params.LPTransitionRejected = LPTransitionRejected;
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_stop_event_update_channel(int channelHandle)
+{
+	union IpaHwMhiStopEventUpdateData_t cmd;
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.params.channelHandle = channelHandle;
+
+	ipa3_uc_mhi_ctx->expected_responseOp =
+		IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE;
+	ipa3_uc_mhi_ctx->expected_responseParams = cmd.raw32b;
+
+	res = ipa3_uc_send_cmd(cmd.raw32b,
+		IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
+{
+	int res;
+
+	if (!ipa3_uc_mhi_ctx) {
+		IPAERR("Not initialized\n");
+		return -EFAULT;
+	}
+
+	IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
+		cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal);
+	IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
+		cmd->params.ulMsiEventThreshold,
+		cmd->params.dlMsiEventThreshold);
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	res = ipa3_uc_send_cmd(cmd->raw32b,
+		IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
+	if (res) {
+		IPAERR("ipa3_uc_send_cmd failed %d\n", res);
+		goto disable_clks;
+	}
+
+	res = 0;
+disable_clks:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return res;
+}
+
+int ipa3_uc_mhi_print_stats(char *dbg_buff, int size)
+{
+	int nBytes = 0;
+	int i;
+
+	if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
+		IPAERR("MHI uc stats is not valid\n");
+		return 0;
+	}
+
+	nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+		"Common Stats:\n");
+	PRINT_COMMON_STATS(numULDLSync);
+	PRINT_COMMON_STATS(numULTimerExpired);
+	PRINT_COMMON_STATS(numChEvCtxWpRead);
+
+	for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) {
+		nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
+			"Channel %d Stats:\n", i);
+		PRINT_CHANNEL_STATS(i, doorbellInt);
+		PRINT_CHANNEL_STATS(i, reProccesed);
+		PRINT_CHANNEL_STATS(i, bamFifoFull);
+		PRINT_CHANNEL_STATS(i, bamFifoEmpty);
+		PRINT_CHANNEL_STATS(i, bamFifoUsageHigh);
+		PRINT_CHANNEL_STATS(i, bamFifoUsageLow);
+		PRINT_CHANNEL_STATS(i, bamInt);
+		PRINT_CHANNEL_STATS(i, ringFull);
+		PRINT_CHANNEL_STATS(i, ringEmpty);
+		PRINT_CHANNEL_STATS(i, ringUsageHigh);
+		PRINT_CHANNEL_STATS(i, ringUsageLow);
+		PRINT_CHANNEL_STATS(i, delayedMsi);
+		PRINT_CHANNEL_STATS(i, immediateMsi);
+		PRINT_CHANNEL_STATS(i, thresholdMsi);
+		PRINT_CHANNEL_STATS(i, numSuspend);
+		PRINT_CHANNEL_STATS(i, numResume);
+		PRINT_CHANNEL_STATS(i, num_OOB);
+		PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry);
+		PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start);
+		PRINT_CHANNEL_STATS(i, num_db_mode_evt);
+	}
+
+	return nBytes;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
new file mode 100644
index 0000000..7b89184
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -0,0 +1,410 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+
+#define IPA_UC_NTN_DB_PA_TX 0x79620DC
+#define IPA_UC_NTN_DB_PA_RX 0x79620D8
+
+static void ipa3_uc_ntn_event_handler(struct IpaHwSharedMemCommonMapping_t
+				     *uc_sram_mmio)
+
+{
+	union Ipa3HwNTNErrorEventData_t ntn_evt;
+
+	if (uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_NTN_ERROR) {
+		ntn_evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
+			   ntn_evt.params.ntn_error_type,
+			   ntn_evt.params.ipa_pipe_number,
+			   ntn_evt.params.ntn_ch_err_type);
+	}
+}
+
+static void ipa3_uc_ntn_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
+		IPAERR("NTN feature missing 0x%x\n",
+			uc_event_top_mmio->featureMask);
+		return;
+	}
+
+	if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_NTN].
+		params.size != sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
+		IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
+			   sizeof(struct Ipa3HwStatsNTNInfoData_t),
+			   uc_event_top_mmio->statsInfo.
+			   featureInfo[IPA_HW_FEATURE_NTN].params.size);
+		return;
+	}
+
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst = uc_event_top_mmio->
+		statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+		featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+	IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+	if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
+		sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+		ipa3_ctx->smem_sz) {
+		IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
+			   ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
+		return;
+	}
+
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
+		ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
+		sizeof(struct Ipa3HwStatsNTNInfoData_t));
+	if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc ntn stats\n");
+		return;
+	}
+}
+
+/**
+ * ipa2_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats[0].y = \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
+#define RX_STATS(y) stats->rx_ch_stats[0].y = \
+	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
+
+	if (unlikely(!ipa3_ctx)) {
+		IPAERR("IPA driver was not initialized\n");
+		return -EINVAL;
+	}
+
+	if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
+		IPAERR("bad parms stats=%p ntn_stats=%p\n",
+			stats,
+			ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
+		return -EINVAL;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	TX_STATS(num_pkts_processed);
+	TX_STATS(tail_ptr_val);
+	TX_STATS(num_db_fired);
+	TX_STATS(tx_comp_ring_stats.ringFull);
+	TX_STATS(tx_comp_ring_stats.ringEmpty);
+	TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+	TX_STATS(tx_comp_ring_stats.ringUsageLow);
+	TX_STATS(tx_comp_ring_stats.RingUtilCount);
+	TX_STATS(bam_stats.bamFifoFull);
+	TX_STATS(bam_stats.bamFifoEmpty);
+	TX_STATS(bam_stats.bamFifoUsageHigh);
+	TX_STATS(bam_stats.bamFifoUsageLow);
+	TX_STATS(bam_stats.bamUtilCount);
+	TX_STATS(num_db);
+	TX_STATS(num_unexpected_db);
+	TX_STATS(num_bam_int_handled);
+	TX_STATS(num_bam_int_in_non_running_state);
+	TX_STATS(num_qmb_int_handled);
+	TX_STATS(num_bam_int_handled_while_wait_for_bam);
+	TX_STATS(num_bam_int_handled_while_not_in_bam);
+
+	RX_STATS(max_outstanding_pkts);
+	RX_STATS(num_pkts_processed);
+	RX_STATS(rx_ring_rp_value);
+	RX_STATS(rx_ind_ring_stats.ringFull);
+	RX_STATS(rx_ind_ring_stats.ringEmpty);
+	RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+	RX_STATS(rx_ind_ring_stats.ringUsageLow);
+	RX_STATS(rx_ind_ring_stats.RingUtilCount);
+	RX_STATS(bam_stats.bamFifoFull);
+	RX_STATS(bam_stats.bamFifoEmpty);
+	RX_STATS(bam_stats.bamFifoUsageHigh);
+	RX_STATS(bam_stats.bamFifoUsageLow);
+	RX_STATS(bam_stats.bamUtilCount);
+	RX_STATS(num_bam_int_handled);
+	RX_STATS(num_db);
+	RX_STATS(num_unexpected_db);
+	RX_STATS(num_pkts_in_dis_uninit_state);
+	RX_STATS(num_bam_int_handled_while_not_in_bam);
+	RX_STATS(num_bam_int_handled_while_in_bam_state);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+int ipa3_ntn_init(void)
+{
+	struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
+
+	uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler;
+	uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
+		ipa3_uc_ntn_event_log_info_handler;
+
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
+
+	return 0;
+}
+
+static int ipa3_uc_send_ntn_setup_pipe_cmd(
+	struct ipa_ntn_setup_info *ntn_info, u8 dir)
+{
+	int ipa_ep_idx;
+	int result = 0;
+	struct ipa_mem_buffer cmd;
+	struct Ipa3HwNtnSetUpCmdData_t *Ntn_params;
+	struct IpaHwOffloadSetUpCmdData_t *cmd_data;
+
+	if (ntn_info == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to get ep idx.\n");
+		return -EFAULT;
+	}
+
+	IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
+
+	IPADBG("ring_base_pa = 0x%pa\n",
+			&ntn_info->ring_base_pa);
+	IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
+	IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
+	IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
+	IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
+	IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
+	cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+	Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
+	Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
+	Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
+	Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
+	Ntn_params->num_buffers = ntn_info->num_buffers;
+	Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
+	Ntn_params->data_buff_size = ntn_info->data_buff_size;
+	Ntn_params->ipa_pipe_number = ipa_ep_idx;
+	Ntn_params->dir = dir;
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result)
+		result = -EFAULT;
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	return result;
+}
+
+/**
+ * ipa3_setup_uc_ntn_pipes() - setup uc offload pipes
+ */
+int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
+	ipa_notify_cb notify, void *priv, u8 hdr_len,
+	struct ipa_ntn_conn_out_params *outp)
+{
+	struct ipa3_ep_context *ep_ul;
+	struct ipa3_ep_context *ep_dl;
+	int ipa_ep_idx_ul;
+	int ipa_ep_idx_dl;
+	int result = 0;
+
+	if (in == NULL) {
+		IPAERR("invalid input\n");
+		return -EINVAL;
+	}
+
+	ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
+	ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
+	if (ipa_ep_idx_ul == -1 || ipa_ep_idx_dl == -1) {
+		IPAERR("fail to alloc EP.\n");
+		return -EFAULT;
+	}
+
+	ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+	ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+	if (ep_ul->valid || ep_dl->valid) {
+		IPAERR("EP already allocated.\n");
+		return -EFAULT;
+	}
+
+	memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys));
+	memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys));
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	/* setup ul ep cfg */
+	ep_ul->valid = 1;
+	ep_ul->client = in->ul.client;
+	result = ipa3_enable_data_path(ipa_ep_idx_ul);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_ul);
+		return -EFAULT;
+	}
+	ep_ul->client_notify = notify;
+	ep_ul->priv = priv;
+
+	memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
+	ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
+	ep_ul->cfg.hdr.hdr_len = hdr_len;
+	ep_ul->cfg.mode.mode = IPA_BASIC;
+
+	if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
+		IPAERR("fail to setup ul pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
+		IPAERR("fail to send cmd to uc for ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
+	outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
+	ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+	IPADBG("client %d (ep: %d) connected\n", in->ul.client,
+		ipa_ep_idx_ul);
+
+	/* setup dl ep cfg */
+	ep_dl->valid = 1;
+	ep_dl->client = in->dl.client;
+	result = ipa3_enable_data_path(ipa_ep_idx_dl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx_dl);
+		result = -EFAULT;
+		goto fail;
+	}
+
+	memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
+	ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
+	ep_dl->cfg.hdr.hdr_len = hdr_len;
+	ep_dl->cfg.mode.mode = IPA_BASIC;
+
+	if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
+		IPAERR("fail to setup dl pipe cfg\n");
+		result = -EFAULT;
+		goto fail;
+	}
+
+	if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
+		IPAERR("fail to send cmd to uc for dl pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
+	ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
+	IPADBG("client %d (ep: %d) connected\n", in->dl.client,
+		ipa_ep_idx_dl);
+
+fail:
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
+
+/**
+ * ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes
+ */
+
+int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
+		int ipa_ep_idx_dl)
+{
+	struct ipa_mem_buffer cmd;
+	struct ipa3_ep_context *ep_ul, *ep_dl;
+	struct IpaHwOffloadCommonChCmdData_t *cmd_data;
+	union Ipa3HwNtnCommonChCmdData_t *tear;
+	int result = 0;
+
+	IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
+	IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
+
+	ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
+	ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
+
+	if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
+		ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
+		IPAERR("channel bad state: ul %d dl %d\n",
+			ep_ul->uc_offload_state, ep_dl->uc_offload_state);
+		return -EFAULT;
+	}
+
+	cmd.size = sizeof(*cmd_data);
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+		&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		return -ENOMEM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	/* teardown the UL pipe */
+	cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
+	cmd_data->protocol = IPA_HW_FEATURE_NTN;
+
+	tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
+	tear->params.ipa_pipe_number = ipa_ep_idx_ul;
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	ipa3_disable_data_path(ipa_ep_idx_ul);
+	ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
+	memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
+	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
+
+	/* teardown the DL pipe */
+	tear->params.ipa_pipe_number = ipa_ep_idx_dl;
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+	if (result) {
+		IPAERR("fail to tear down ul pipe\n");
+		result = -EFAULT;
+		goto fail;
+	}
+	ipa3_disable_data_path(ipa_ep_idx_dl);
+	memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+	IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
+
+fail:
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	return result;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
new file mode 100644
index 0000000..946fc7e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -0,0 +1,580 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_I_H_
+#define _IPA_UC_OFFLOAD_I_H_
+
+#include <linux/ipa.h>
+#include "ipa_i.h"
+
+/*
+ * Neutrino protocol related data structures
+ */
+
+#define IPA_UC_MAX_NTN_TX_CHANNELS 1
+#define IPA_UC_MAX_NTN_RX_CHANNELS 1
+
+#define IPA_NTN_TX_DIR 1
+#define IPA_NTN_RX_DIR 2
+
+/**
+ *  @brief   Enum value determined based on the feature it
+ *           corresponds to
+ *  +----------------+----------------+
+ *  |    3 bits      |     5 bits     |
+ *  +----------------+----------------+
+ *  |   HW_FEATURE   |     OPCODE     |
+ *  +----------------+----------------+
+ *
+ */
+#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
+#define EXTRACT_UC_FEATURE(value) (value >> 5)
+
+#define IPA_HW_NUM_FEATURES 0x8
+
+/**
+ * enum ipa3_hw_features - Values that represent the features supported
+ * in IPA HW
+ * @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
+ * @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
+ * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
+ * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_ZIP: Feature related to CMP/DCMP operation in IPA HW
+ * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
+*/
+enum ipa3_hw_features {
+	IPA_HW_FEATURE_COMMON		=	0x0,
+	IPA_HW_FEATURE_MHI		=	0x1,
+	IPA_HW_FEATURE_POWER_COLLAPSE	=	0x2,
+	IPA_HW_FEATURE_WDI		=	0x3,
+	IPA_HW_FEATURE_ZIP		=	0x4,
+	IPA_HW_FEATURE_NTN		=	0x5,
+	IPA_HW_FEATURE_OFFLOAD	=	0x6,
+	IPA_HW_FEATURE_MAX		=	IPA_HW_NUM_FEATURES
+};
+
+/**
+ * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
+ * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
+ * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
+ *  device
+ * @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
+ */
+enum ipa3_hw_2_cpu_events {
+	IPA_HW_2_CPU_EVENT_NO_OP     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_2_CPU_EVENT_ERROR     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_2_CPU_EVENT_LOG_INFO  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+};
+
+/**
+ * enum ipa3_hw_errors - Common error types.
+ * @IPA_HW_ERROR_NONE : No error persists
+ * @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
+ * @IPA_HW_DMA_ERROR : Unexpected DMA error
+ * @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
+ * @IPA_HW_INVALID_OPCODE : Invalid opcode sent
+ * @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
+ * @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
+ */
+enum ipa3_hw_errors {
+	IPA_HW_ERROR_NONE              =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
+	IPA_HW_INVALID_DOORBELL_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
+	IPA_HW_DMA_ERROR               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
+	IPA_HW_FATAL_SYSTEM_ERROR      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
+	IPA_HW_INVALID_OPCODE          =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
+	IPA_HW_INVALID_PARAMS        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
+	IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
+	IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
+	IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8)
+};
+
+/**
+ * struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
+ * section in 128B shared memory located in offset zero of SW Partition in IPA
+ * SRAM.
+ * @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
+ * @cmdParams : CPU->HW command parameter lower 32bit.
+ * @cmdParams_hi : CPU->HW command parameter higher 32bit.
+ * of parameters (immediate parameters) and point on structure in system memory
+ * (in such case the address must be accessible for HW)
+ * @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
+ * @responseParams : HW->CPU response parameter. The parameter filed can hold 32
+ * bits of parameters (immediate parameters) and point on structure in system
+ * memory
+ * @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
+ * @eventParams : HW->CPU event parameter. The parameter filed can hold 32
+ *		bits of parameters (immediate parameters) and point on
+ *		structure in system memory
+ * @firstErrorAddress : Contains the address of first error-source on SNOC
+ * @hwState : State of HW. The state carries information regarding the
+ *				error type.
+ * @warningCounter : The warnings counter. The counter carries information
+ *						regarding non fatal errors in HW
+ * @interfaceVersionCommon : The Common interface version as reported by HW
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemCommonMapping_t {
+	u8  cmdOp;
+	u8  reserved_01;
+	u16 reserved_03_02;
+	u32 cmdParams;
+	u32 cmdParams_hi;
+	u8  responseOp;
+	u8  reserved_0D;
+	u16 reserved_0F_0E;
+	u32 responseParams;
+	u8  eventOp;
+	u8  reserved_15;
+	u16 reserved_17_16;
+	u32 eventParams;
+	u32 firstErrorAddress;
+	u8  hwState;
+	u8  warningCounter;
+	u16 reserved_23_22;
+	u16 interfaceVersionCommon;
+	u16 reserved_27_26;
+} __packed;
+
+/**
+ * union Ipa3HwFeatureInfoData_t - parameters for stats/config blob
+ *
+ * @offset : Location of a feature within the EventInfoData
+ * @size : Size of the feature
+ */
+union Ipa3HwFeatureInfoData_t {
+	struct IpaHwFeatureInfoParams_t {
+		u32 offset:16;
+		u32 size:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwErrorEventData_t - HW->CPU Common Events
+ * @errorType : Entered when a system error is detected by the HW. Type of
+ * error is specified by IPA_HW_ERRORS
+ * @reserved : Reserved
+ */
+union IpaHwErrorEventData_t {
+	struct IpaHwErrorEventParams_t {
+		u32 errorType:8;
+		u32 reserved:24;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * struct Ipa3HwEventInfoData_t - Structure holding the parameters for
+ * statistics and config info
+ *
+ * @baseAddrOffset : Base Address Offset of the statistics or config
+ * structure from IPA_WRAPPER_BASE
+ * @Ipa3HwFeatureInfoData_t : Location and size of each feature within
+ * the statistics or config structure
+ *
+ * @note    Information about each feature in the featureInfo[]
+ * array is populated at predefined indices per the IPA_HW_FEATURES
+ * enum definition
+ */
+struct Ipa3HwEventInfoData_t {
+	u32 baseAddrOffset;
+	union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
+} __packed;
+
+/**
+ * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
+ * IPA_HW_2_CPU_EVENT_LOG_INFO Event
+ *
+ * @featureMask : Mask indicating the features enabled in HW.
+ * Refer IPA_HW_FEATURE_MASK
+ * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
+ * Log Buffer structure
+ * @statsInfo : Statistics related information
+ * @configInfo : Configuration related information
+ *
+ * @note    The offset location of this structure from IPA_WRAPPER_BASE
+ * will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
+ * Event
+ */
+struct IpaHwEventLogInfoData_t {
+	u32 featureMask;
+	u32 circBuffBaseAddrOffset;
+	struct Ipa3HwEventInfoData_t statsInfo;
+	struct Ipa3HwEventInfoData_t configInfo;
+
+} __packed;
+
+/**
+ * struct ipa3_uc_ntn_ctx
+ * @ntn_uc_stats_ofst: Neutrino stats offset
+ * @ntn_uc_stats_mmio: Neutrino stats
+ * @priv: private data of client
+ * @uc_ready_cb: uc Ready cb
+ */
+struct ipa3_uc_ntn_ctx {
+	u32 ntn_uc_stats_ofst;
+	struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio;
+	void *priv;
+	ipa_uc_ready_cb uc_ready_cb;
+};
+
+/**
+ * enum ipa3_hw_2_cpu_ntn_events - Values that represent HW event
+ *			to be sent to CPU
+ * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
+ *			detected an error in NTN
+ *
+ */
+enum ipa3_hw_2_cpu_ntn_events {
+	IPA_HW_2_CPU_EVENT_NTN_ERROR =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
+};
+
+
+/**
+ * enum ipa3_hw_ntn_errors - NTN specific error types.
+ * @IPA_HW_NTN_ERROR_NONE : No error persists
+ * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa3_hw_ntn_errors {
+	IPA_HW_NTN_ERROR_NONE    = 0,
+	IPA_HW_NTN_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_states - Values that represent NTN
+ * channel state machine.
+ * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
+ *			initialized but disabled
+ * @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
+ *     Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ * be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_states {
+	IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_NTN_CHANNEL_STATE_RUNNING  = 2,
+	IPA_HW_NTN_CHANNEL_STATE_ERROR    = 3,
+	IPA_HW_NTN_CHANNEL_STATE_INVALID  = 0xFF
+};
+
+/**
+ * enum ipa3_hw_ntn_channel_errors - List of NTN Channel error
+ * types. This is present in the event param
+ * @IPA_HW_NTN_CH_ERR_NONE: No error persists
+ * @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
+ *		transition
+ * @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
+ *		num RE to bring
+ * @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
+ *		failed in Rx ring
+ * @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
+ *		transition
+ * @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
+ * @IPA_HW_NTN_CH_ERR_RESERVED:
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in.
+ */
+enum ipa3_hw_ntn_channel_errors {
+	IPA_HW_NTN_CH_ERR_NONE            = 0,
+	IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
+	IPA_HW_NTN_TX_FSM_ERROR           = 2,
+	IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL  = 3,
+	IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
+	IPA_HW_NTN_RX_FSM_ERROR           = 5,
+	IPA_HW_NTN_RX_CACHE_NON_EMPTY     = 6,
+	IPA_HW_NTN_CH_ERR_RESERVED        = 0xFF
+};
+
+
+/**
+ * struct Ipa3HwNtnSetUpCmdData_t  - Ntn setup command data
+ * @ring_base_pa: physical address of the base of the Tx/Rx NTN
+ *  ring
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ *  buffer pool
+ * @ntn_ring_size: size of the Tx/Rx NTN ring
+ * @num_buffers: Rx/tx buffer pool size
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
+ *  Ring's tail pointer
+ * @ipa_pipe_number: IPA pipe number that has to be used for the
+ *  Tx/Rx path
+ * @dir: Tx/Rx Direction
+ * @data_buff_size: size of the each data buffer allocated in
+ *  DDR
+ */
+struct Ipa3HwNtnSetUpCmdData_t {
+	u32 ring_base_pa;
+	u32 buff_pool_base_pa;
+	u16 ntn_ring_size;
+	u16 num_buffers;
+	u32 ntn_reg_base_ptr_pa;
+	u8  ipa_pipe_number;
+	u8  dir;
+	u16 data_buff_size;
+
+} __packed;
+
+/**
+ * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the
+ * parameters for Ntn Tear down command data params
+ *
+ *@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
+ */
+union Ipa3HwNtnCommonChCmdData_t {
+	struct IpaHwNtnCommonChCmdParams_t {
+		u32  ipa_pipe_number :8;
+		u32  reserved        :24;
+	} __packed params;
+	uint32_t raw32b;
+} __packed;
+
+
+/**
+ * struct Ipa3HwNTNErrorEventData_t - Structure holding the
+ * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
+ * as immediate params in the shared memory
+ *
+ *@ntn_error_type: type of NTN error (ipa3_hw_ntn_errors)
+ *@ipa_pipe_number: IPA pipe number on which error has happened
+ *   Applicable only if error type indicates channel error
+ *@ntn_ch_err_type: Information about the channel error (if
+ *		available)
+ */
+union Ipa3HwNTNErrorEventData_t {
+	struct IpaHwNTNErrorEventParams_t {
+		u32  ntn_error_type  :8;
+		u32  reserved        :8;
+		u32  ipa_pipe_number :8;
+		u32  ntn_ch_err_type :8;
+	} __packed params;
+	uint32_t raw32b;
+} __packed;
+
+/**
+ * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
+ * information
+ *
+ *@max_outstanding_pkts: Number of outstanding packets in Rx
+ *		Ring
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
+ *
+ *@ntn_ch_err_type: Information about the channel error (if
+ *		available)
+ *@rx_ind_ring_stats:
+ *@bam_stats:
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_pkts_in_dis_uninit_state:
+ *@num_bam_int_handled_while_not_in_bam: Number of Bam
+ *		Interrupts handled by FW
+ *@num_bam_int_handled_while_in_bam_state: Number of Bam
+ *   Interrupts handled by FW
+ */
+struct NTN3RxInfoData_t {
+	u32  max_outstanding_pkts;
+	u32  num_pkts_processed;
+	u32  rx_ring_rp_value;
+	struct IpaHwRingStats_t rx_ind_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32  num_bam_int_handled;
+	u32  num_db;
+	u32  num_unexpected_db;
+	u32  num_pkts_in_dis_uninit_state;
+	u32  num_bam_int_handled_while_not_in_bam;
+	u32  num_bam_int_handled_while_in_bam_state;
+} __packed;
+
+
+/**
+ * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * Ensure that this is always word aligned
+ *
+ *@num_pkts_processed: Number of packets processed - cumulative
+ *@tail_ptr_val: Latest value of doorbell written to copy engine
+ *@num_db_fired: Number of DB from uC FW to Copy engine
+ *
+ *@tx_comp_ring_stats:
+ *@bam_stats:
+ *@num_db: Number of times the doorbell was rung
+ *@num_unexpected_db: Number of unexpected doorbells
+ *@num_bam_int_handled: Number of Bam Interrupts handled by FW
+ *@num_bam_int_in_non_running_state: Number of Bam interrupts
+ *			while not in Running state
+ *@num_qmb_int_handled: Number of QMB interrupts handled
+ *@num_bam_int_handled_while_wait_for_bam: Number of times the
+ *		Imm Cmd is injected due to fw_desc change
+ */
+struct NTNTxInfoData_t {
+	u32  num_pkts_processed;
+	u32  tail_ptr_val;
+	u32  num_db_fired;
+	struct IpaHwRingStats_t tx_comp_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32  num_db;
+	u32  num_unexpected_db;
+	u32  num_bam_int_handled;
+	u32  num_bam_int_in_non_running_state;
+	u32  num_qmb_int_handled;
+	u32  num_bam_int_handled_while_wait_for_bam;
+	u32  num_bam_int_handled_while_not_in_bam;
+} __packed;
+
+
+/**
+ * struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx
+ * channel Ensure that this is always word aligned
+ *
+ */
+struct Ipa3HwStatsNTNInfoData_t {
+	struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
+	struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+} __packed;
+
+
+/*
+ * uC offload related data structures
+ */
+#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
+#define IPA_UC_OFFLOAD_ENABLED BIT(1)
+#define IPA_UC_OFFLOAD_RESUMED BIT(2)
+
+/**
+ * enum ipa_cpu_2_hw_offload_commands -  Values that represent
+ * the offload commands from CPU
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
+ *				Offload protocol's Tx/Rx Path
+ * @IPA_CPU_2_HW_CMD_OFFLOAD_RX_SET_UP : Command to tear down
+ *				Offload protocol's Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_offload_commands {
+	IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+	IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
+};
+
+
+/**
+ * enum ipa3_hw_offload_channel_states - Values that represent
+ * offload channel state machine.
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is
+ *			initialized but disabled
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running.
+ *			Entered after SET_UP_COMMAND is processed successfully
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not
+ *				be in use in operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not
+ * reflect the sub-state the state machine may be in
+ */
+enum ipa3_hw_offload_channel_states {
+	IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING  = 2,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR    = 3,
+	IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID  = 0xFF
+};
+
+
+/**
+ * enum ipa3_hw_2_cpu_cmd_resp_status -  Values that represent
+ * offload related command response status to be sent to CPU.
+ */
+enum ipa3_hw_2_cpu_offload_cmd_resp_status {
+	IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
+	IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
+	IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
+	IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
+	IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
+	IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
+	IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
+	IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
+	IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
+	IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
+	IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
+};
+
+/**
+ * struct IpaHwSetUpCmd  -
+ *
+ *
+ */
+union IpaHwSetUpCmd {
+	struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwOffloadSetUpCmdData_t  -
+ *
+ *
+ */
+struct IpaHwOffloadSetUpCmdData_t {
+	u8 protocol;
+	union IpaHwSetUpCmd SetupCh_params;
+} __packed;
+
+/**
+ * struct IpaHwCommonChCmd  - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
+ *
+ *
+ */
+union IpaHwCommonChCmd {
+	union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
+} __packed;
+
+struct IpaHwOffloadCommonChCmdData_t {
+	u8 protocol;
+	union IpaHwCommonChCmd CommonCh_params;
+} __packed;
+
+#endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
new file mode 100644
index 0000000..e1deb58
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -0,0 +1,1815 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include "ipa_i.h"
+#include <linux/dmapool.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+
+#define IPA_HOLB_TMR_DIS 0x0
+
+#define IPA_HW_INTERFACE_WDI_VERSION 0x0001
+#define IPA_HW_WDI_RX_MBOX_START_INDEX 48
+#define IPA_HW_WDI_TX_MBOX_START_INDEX 50
+#define IPA_WDI_RING_ALIGNMENT 8
+
+#define IPA_WDI_CONNECTED BIT(0)
+#define IPA_WDI_ENABLED BIT(1)
+#define IPA_WDI_RESUMED BIT(2)
+#define IPA_UC_POLL_SLEEP_USEC 100
+
+#define IPA_WDI_RX_RING_RES			0
+#define IPA_WDI_RX_RING_RP_RES		1
+#define IPA_WDI_RX_COMP_RING_RES	2
+#define IPA_WDI_RX_COMP_RING_WP_RES	3
+#define IPA_WDI_TX_RING_RES			4
+#define IPA_WDI_CE_RING_RES			5
+#define IPA_WDI_CE_DB_RES			6
+#define IPA_WDI_MAX_RES				7
+
+struct ipa_wdi_res {
+	struct ipa_wdi_buffer_info *res;
+	unsigned int nents;
+	bool valid;
+};
+
+static struct ipa_wdi_res wdi_res[IPA_WDI_MAX_RES];
+
+static void ipa3_uc_wdi_loaded_handler(void);
+
+/**
+ * enum ipa_hw_2_cpu_wdi_events - Values that represent HW event to be sent to
+ * CPU.
+ * @IPA_HW_2_CPU_EVENT_WDI_ERROR : Event to specify that HW detected an error
+ * in WDI
+ */
+enum ipa_hw_2_cpu_wdi_events {
+	IPA_HW_2_CPU_EVENT_WDI_ERROR =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+};
+
+/**
+ * enum ipa_hw_wdi_channel_states - Values that represent WDI channel state
+ * machine.
+ * @IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED : Channel is initialized but
+ * disabled
+ * @IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND : Channel is enabled but in
+ * suspended state
+ * @IPA_HW_WDI_CHANNEL_STATE_RUNNING : Channel is running. Entered after
+ * SET_UP_COMMAND is processed successfully
+ * @IPA_HW_WDI_CHANNEL_STATE_ERROR : Channel is in error state
+ * @IPA_HW_WDI_CHANNEL_STATE_INVALID : Invalid state. Shall not be in use in
+ * operational scenario
+ *
+ * These states apply to both Tx and Rx paths. These do not reflect the
+ * sub-state the state machine may be in.
+ */
+enum ipa_hw_wdi_channel_states {
+	IPA_HW_WDI_CHANNEL_STATE_INITED_DISABLED = 1,
+	IPA_HW_WDI_CHANNEL_STATE_ENABLED_SUSPEND = 2,
+	IPA_HW_WDI_CHANNEL_STATE_RUNNING         = 3,
+	IPA_HW_WDI_CHANNEL_STATE_ERROR           = 4,
+	IPA_HW_WDI_CHANNEL_STATE_INVALID         = 0xFF
+};
+
+/**
+ * enum ipa3_cpu_2_hw_commands -  Values that represent the WDI commands from
+ * CPU
+ * @IPA_CPU_2_HW_CMD_WDI_TX_SET_UP : Command to set up WDI Tx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_SET_UP : Command to set up WDI Rx Path
+ * @IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG : Provide extended config info for Rx path
+ * @IPA_CPU_2_HW_CMD_WDI_CH_ENABLE : Command to enable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_DISABLE : Command to disable a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND : Command to suspend a channel
+ * @IPA_CPU_2_HW_CMD_WDI_CH_RESUME : Command to resume a channel
+ * @IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN : Command to tear down WDI Tx/ Rx Path
+ */
+enum ipa_cpu_2_hw_wdi_commands {
+	IPA_CPU_2_HW_CMD_WDI_TX_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_CPU_2_HW_CMD_WDI_RX_SET_UP  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_CPU_2_HW_CMD_WDI_CH_ENABLE  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_CPU_2_HW_CMD_WDI_CH_DISABLE =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_CPU_2_HW_CMD_WDI_CH_RESUME  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN  =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+};
+
+/**
+ * enum ipa_hw_2_cpu_cmd_resp_status -  Values that represent WDI related
+ * command response status to be sent to CPU.
+ */
+enum ipa_hw_2_cpu_cmd_resp_status {
+	IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 0),
+	IPA_HW_2_CPU_MAX_WDI_TX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 1),
+	IPA_HW_2_CPU_WDI_CE_RING_OVERRUN_POSSIBILITY   =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 2),
+	IPA_HW_2_CPU_WDI_CE_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 3),
+	IPA_HW_2_CPU_WDI_CE_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 4),
+	IPA_HW_2_CPU_WDI_COMP_RING_OVERRUN_POSSIBILITY =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 5),
+	IPA_HW_2_CPU_WDI_COMP_RING_SET_UP_FAILURE      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 6),
+	IPA_HW_2_CPU_WDI_COMP_RING_PARAMS_UNALIGNED    =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 7),
+	IPA_HW_2_CPU_WDI_UNKNOWN_TX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 8),
+	IPA_HW_2_CPU_WDI_TX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 9),
+	IPA_HW_2_CPU_WDI_TX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 10),
+	IPA_HW_2_CPU_MAX_WDI_RX_CHANNELS               =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 11),
+	IPA_HW_2_CPU_WDI_RX_RING_PARAMS_UNALIGNED      =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 12),
+	IPA_HW_2_CPU_WDI_RX_RING_SET_UP_FAILURE        =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 13),
+	IPA_HW_2_CPU_WDI_UNKNOWN_RX_CHANNEL            =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 14),
+	IPA_HW_2_CPU_WDI_RX_INVALID_FSM_TRANSITION     =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 15),
+	IPA_HW_2_CPU_WDI_RX_FSM_TRANSITION_ERROR       =
+		FEATURE_ENUM_VAL(IPA_HW_FEATURE_WDI, 16),
+};
+
+/**
+ * enum ipa_hw_wdi_errors - WDI specific error types.
+ * @IPA_HW_WDI_ERROR_NONE : No error persists
+ * @IPA_HW_WDI_CHANNEL_ERROR : Error is specific to channel
+ */
+enum ipa_hw_wdi_errors {
+	IPA_HW_WDI_ERROR_NONE    = 0,
+	IPA_HW_WDI_CHANNEL_ERROR = 1
+};
+
+/**
+ * enum ipa_hw_wdi_ch_errors = List of WDI Channel error types. This is present
+ * in the event param.
+ * @IPA_HW_WDI_CH_ERR_NONE : No error persists
+ * @IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL : Write pointer update failed in Tx
+ * Completion ring
+ * @IPA_HW_WDI_TX_FSM_ERROR : Error in the state machine transition
+ * @IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL : Error while calculating num RE to bring
+ * @IPA_HW_WDI_CH_ERR_RESERVED : Reserved - Not available for CPU to use
+*/
+enum ipa_hw_wdi_ch_errors {
+	IPA_HW_WDI_CH_ERR_NONE                 = 0,
+	IPA_HW_WDI_TX_COMP_RING_WP_UPDATE_FAIL = 1,
+	IPA_HW_WDI_TX_FSM_ERROR                = 2,
+	IPA_HW_WDI_TX_COMP_RE_FETCH_FAIL       = 3,
+	IPA_HW_WDI_CH_ERR_RESERVED             = 0xFF
+};
+
+/**
+ * struct IpaHwSharedMemWdiMapping_t  - Structure referring to the common and
+ * WDI section of 128B shared memory located in offset zero of SW Partition in
+ * IPA SRAM.
+ *
+ * The shared memory is used for communication between IPA HW and CPU.
+ */
+struct IpaHwSharedMemWdiMapping_t {
+	struct IpaHwSharedMemCommonMapping_t common;
+	u32 reserved_2B_28;
+	u32 reserved_2F_2C;
+	u32 reserved_33_30;
+	u32 reserved_37_34;
+	u32 reserved_3B_38;
+	u32 reserved_3F_3C;
+	u16 interfaceVersionWdi;
+	u16 reserved_43_42;
+	u8  wdi_tx_ch_0_state;
+	u8  wdi_rx_ch_0_state;
+	u16 reserved_47_46;
+} __packed;
+
+/**
+ * struct IpaHwWdiTxSetUpCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TX_SET_UP command.
+ * @comp_ring_base_pa : This is the physical address of the base of the Tx
+ * completion ring
+ * @comp_ring_size : This is the size of the Tx completion ring
+ * @reserved_comp_ring : Reserved field for expansion of Completion ring params
+ * @ce_ring_base_pa : This is the physical address of the base of the Copy
+ * Engine Source Ring
+ * @ce_ring_size : Copy Engine Ring size
+ * @reserved_ce_ring : Reserved field for expansion of CE ring params
+ * @ce_ring_doorbell_pa : This is the physical address of the doorbell that the
+ * IPA uC has to write into to trigger the copy engine
+ * @num_tx_buffers : Number of pkt buffers allocated. The size of the CE ring
+ * and the Tx completion ring has to be atleast ( num_tx_buffers + 1)
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Tx path
+ * @reserved : Reserved field
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+ */
+struct IpaHwWdiTxSetUpCmdData_t {
+	u32 comp_ring_base_pa;
+	u16 comp_ring_size;
+	u16 reserved_comp_ring;
+	u32 ce_ring_base_pa;
+	u16 ce_ring_size;
+	u16 reserved_ce_ring;
+	u32 ce_ring_doorbell_pa;
+	u16 num_tx_buffers;
+	u8  ipa_pipe_number;
+	u8  reserved;
+} __packed;
+
+struct IpaHwWdi2TxSetUpCmdData_t {
+	u32 comp_ring_base_pa;
+	u32 comp_ring_base_pa_hi;
+	u16 comp_ring_size;
+	u16 reserved_comp_ring;
+	u32 ce_ring_base_pa;
+	u32 ce_ring_base_pa_hi;
+	u16 ce_ring_size;
+	u16 reserved_ce_ring;
+	u32 ce_ring_doorbell_pa;
+	u32 ce_ring_doorbell_pa_hi;
+	u16 num_tx_buffers;
+	u8  ipa_pipe_number;
+	u8  reserved;
+} __packed;
+/**
+ * struct IpaHwWdiRxSetUpCmdData_t -  Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_SET_UP command.
+ * @rx_ring_base_pa : This is the physical address of the base of the Rx ring
+ * (containing Rx buffers)
+ * @rx_ring_size : This is the size of the Rx ring
+ * @rx_ring_rp_pa : This is the physical address of the location through which
+ * IPA uc is expected to communicate about the Read pointer into the Rx Ring
+ * @ipa_pipe_number : This is the IPA pipe number that has to be used for the
+ * Rx path
+ *
+ * Parameters are sent as pointer thus should be reside in address accessible
+ * to HW
+*/
+struct IpaHwWdiRxSetUpCmdData_t {
+	u32 rx_ring_base_pa;
+	u32 rx_ring_size;
+	u32 rx_ring_rp_pa;
+	u8  ipa_pipe_number;
+} __packed;
+
+struct IpaHwWdi2RxSetUpCmdData_t {
+	u32 rx_ring_base_pa;
+	u32 rx_ring_base_pa_hi;
+	u32 rx_ring_size;
+	u32 rx_ring_rp_pa;
+	u32 rx_ring_rp_pa_hi;
+	u32 rx_comp_ring_base_pa;
+	u32 rx_comp_ring_base_pa_hi;
+	u32 rx_comp_ring_size;
+	u32 rx_comp_ring_wp_pa;
+	u32 rx_comp_ring_wp_pa_hi;
+	u8  ipa_pipe_number;
+} __packed;
+/**
+ * union IpaHwWdiRxExtCfgCmdData_t - Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG command.
+ * @ipa_pipe_number : The IPA pipe number for which this config is passed
+ * @qmap_id : QMAP ID to be set in the metadata register
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+*/
+union IpaHwWdiRxExtCfgCmdData_t {
+	struct IpaHwWdiRxExtCfgCmdParams_t {
+		u32 ipa_pipe_number:8;
+		u32 qmap_id:8;
+		u32 reserved:16;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiCommonChCmdData_t -  Structure holding the parameters for
+ * IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+ * IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+ * IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+ * IPA_CPU_2_HW_CMD_WDI_CH_RESUME command.
+ * @ipa_pipe_number :  The IPA pipe number. This could be Tx or an Rx pipe
+ * @reserved : Reserved
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiCommonChCmdData_t {
+	struct IpaHwWdiCommonChCmdParams_t {
+		u32 ipa_pipe_number:8;
+		u32 reserved:24;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+/**
+ * union IpaHwWdiErrorEventData_t - parameters for IPA_HW_2_CPU_EVENT_WDI_ERROR
+ * event.
+ * @wdi_error_type : The IPA pipe number to be torn down. This could be Tx or
+ * an Rx pipe
+ * @reserved : Reserved
+ * @ipa_pipe_number : IPA pipe number on which error has happened. Applicable
+ * only if error type indicates channel error
+ * @wdi_ch_err_type : Information about the channel error (if available)
+ *
+ * The parameters are passed as immediate params in the shared memory
+ */
+union IpaHwWdiErrorEventData_t {
+	struct IpaHwWdiErrorEventParams_t {
+		u32 wdi_error_type:8;
+		u32 reserved:8;
+		u32 ipa_pipe_number:8;
+		u32 wdi_ch_err_type:8;
+	} __packed params;
+	u32 raw32b;
+} __packed;
+
+static void ipa3_uc_wdi_event_log_info_handler(
+struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
+
+{
+	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_WDI)) == 0) {
+		IPAERR("WDI feature missing 0x%x\n",
+			uc_event_top_mmio->featureMask);
+		return;
+	}
+
+	if (uc_event_top_mmio->statsInfo.featureInfo[IPA_HW_FEATURE_WDI].
+		params.size != sizeof(struct IpaHwStatsWDIInfoData_t)) {
+		IPAERR("wdi stats sz invalid exp=%zu is=%u\n",
+			sizeof(struct IpaHwStatsWDIInfoData_t),
+			uc_event_top_mmio->statsInfo.
+			featureInfo[IPA_HW_FEATURE_WDI].params.size);
+		return;
+	}
+
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst = uc_event_top_mmio->
+		statsInfo.baseAddrOffset + uc_event_top_mmio->statsInfo.
+		featureInfo[IPA_HW_FEATURE_WDI].params.offset;
+	IPAERR("WDI stats ofst=0x%x\n", ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+	if (ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
+		sizeof(struct IpaHwStatsWDIInfoData_t) >=
+		ipa3_ctx->ctrl->ipa_reg_base_ofst +
+		ipahal_get_reg_n_ofst(IPA_SRAM_DIRECT_ACCESS_n, 0) +
+		ipa3_ctx->smem_sz) {
+		IPAERR("uc_wdi_stats 0x%x outside SRAM\n",
+			ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
+		return;
+	}
+
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio =
+		ioremap(ipa3_ctx->ipa_wrapper_base +
+		ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst,
+		sizeof(struct IpaHwStatsWDIInfoData_t));
+	if (!ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+		IPAERR("fail to ioremap uc wdi stats\n");
+		return;
+	}
+}
+
+static void ipa3_uc_wdi_event_handler(struct IpaHwSharedMemCommonMapping_t
+				     *uc_sram_mmio)
+
+{
+	union IpaHwWdiErrorEventData_t wdi_evt;
+	struct IpaHwSharedMemWdiMapping_t *wdi_sram_mmio_ext;
+
+	if (uc_sram_mmio->eventOp ==
+		IPA_HW_2_CPU_EVENT_WDI_ERROR) {
+		wdi_evt.raw32b = uc_sram_mmio->eventParams;
+		IPADBG("uC WDI evt errType=%u pipe=%d cherrType=%u\n",
+			wdi_evt.params.wdi_error_type,
+			wdi_evt.params.ipa_pipe_number,
+			wdi_evt.params.wdi_ch_err_type);
+		wdi_sram_mmio_ext =
+			(struct IpaHwSharedMemWdiMapping_t *)
+			uc_sram_mmio;
+		IPADBG("tx_ch_state=%u rx_ch_state=%u\n",
+			wdi_sram_mmio_ext->wdi_tx_ch_0_state,
+			wdi_sram_mmio_ext->wdi_rx_ch_0_state);
+	}
+}
+
+/**
+ * ipa3_get_wdi_stats() - Query WDI statistics from uc
+ * @stats:	[inout] stats blob from client populated by driver
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * @note Cannot be called from atomic context
+ *
+ */
+int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+#define TX_STATS(y) stats->tx_ch_stats.y = \
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->tx_ch_stats.y
+#define RX_STATS(y) stats->rx_ch_stats.y = \
+	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio->rx_ch_stats.y
+
+	if (!stats || !ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio) {
+		IPAERR("bad parms stats=%p wdi_stats=%p\n",
+			stats,
+			ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_mmio);
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	TX_STATS(num_pkts_processed);
+	TX_STATS(copy_engine_doorbell_value);
+	TX_STATS(num_db_fired);
+	TX_STATS(tx_comp_ring_stats.ringFull);
+	TX_STATS(tx_comp_ring_stats.ringEmpty);
+	TX_STATS(tx_comp_ring_stats.ringUsageHigh);
+	TX_STATS(tx_comp_ring_stats.ringUsageLow);
+	TX_STATS(tx_comp_ring_stats.RingUtilCount);
+	TX_STATS(bam_stats.bamFifoFull);
+	TX_STATS(bam_stats.bamFifoEmpty);
+	TX_STATS(bam_stats.bamFifoUsageHigh);
+	TX_STATS(bam_stats.bamFifoUsageLow);
+	TX_STATS(bam_stats.bamUtilCount);
+	TX_STATS(num_db);
+	TX_STATS(num_unexpected_db);
+	TX_STATS(num_bam_int_handled);
+	TX_STATS(num_bam_int_in_non_running_state);
+	TX_STATS(num_qmb_int_handled);
+	TX_STATS(num_bam_int_handled_while_wait_for_bam);
+
+	RX_STATS(max_outstanding_pkts);
+	RX_STATS(num_pkts_processed);
+	RX_STATS(rx_ring_rp_value);
+	RX_STATS(rx_ind_ring_stats.ringFull);
+	RX_STATS(rx_ind_ring_stats.ringEmpty);
+	RX_STATS(rx_ind_ring_stats.ringUsageHigh);
+	RX_STATS(rx_ind_ring_stats.ringUsageLow);
+	RX_STATS(rx_ind_ring_stats.RingUtilCount);
+	RX_STATS(bam_stats.bamFifoFull);
+	RX_STATS(bam_stats.bamFifoEmpty);
+	RX_STATS(bam_stats.bamFifoUsageHigh);
+	RX_STATS(bam_stats.bamFifoUsageLow);
+	RX_STATS(bam_stats.bamUtilCount);
+	RX_STATS(num_bam_int_handled);
+	RX_STATS(num_db);
+	RX_STATS(num_unexpected_db);
+	RX_STATS(num_pkts_in_dis_uninit_state);
+	RX_STATS(reserved1);
+	RX_STATS(reserved2);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+int ipa3_wdi_init(void)
+{
+	struct ipa3_uc_hdlrs uc_wdi_cbs = { 0 };
+
+	uc_wdi_cbs.ipa_uc_event_hdlr = ipa3_uc_wdi_event_handler;
+	uc_wdi_cbs.ipa_uc_event_log_info_hdlr =
+		ipa3_uc_wdi_event_log_info_handler;
+	uc_wdi_cbs.ipa_uc_loaded_hdlr =
+		ipa3_uc_wdi_loaded_handler;
+
+	ipa3_uc_register_handlers(IPA_HW_FEATURE_WDI, &uc_wdi_cbs);
+
+	return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_pa(phys_addr_t pa, size_t len,
+		bool device, unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	size_t true_len = roundup(len + pa - rounddown(pa, PAGE_SIZE),
+			PAGE_SIZE);
+	int ret;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	ret = ipa3_iommu_map(cb->mapping->domain, va, rounddown(pa, PAGE_SIZE),
+			true_len,
+			device ? (prot | IOMMU_DEVICE) : prot);
+	if (ret) {
+		IPAERR("iommu map failed for pa=%pa len=%zu\n", &pa, true_len);
+		return -EINVAL;
+	}
+
+	ipa3_ctx->wdi_map_cnt++;
+	cb->next_addr = va + true_len;
+	*iova = va + pa - rounddown(pa, PAGE_SIZE);
+	return 0;
+}
+
+static int ipa_create_uc_smmu_mapping_sgt(struct sg_table *sgt,
+		unsigned long *iova)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+	unsigned long va = roundup(cb->next_addr, PAGE_SIZE);
+	int prot = IOMMU_READ | IOMMU_WRITE;
+	int ret;
+	int i;
+	struct scatterlist *sg;
+	unsigned long start_iova = va;
+	phys_addr_t phys;
+	size_t len;
+	int count = 0;
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return -EINVAL;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan-driver */
+		phys = sg->dma_address;
+		len = PAGE_ALIGN(sg->offset + sg->length);
+
+		ret = ipa3_iommu_map(cb->mapping->domain, va, phys, len, prot);
+		if (ret) {
+			IPAERR("iommu map failed for pa=%pa len=%zu\n",
+					&phys, len);
+			goto bad_mapping;
+		}
+		va += len;
+		ipa3_ctx->wdi_map_cnt++;
+		count++;
+	}
+	cb->next_addr = va;
+	*iova = start_iova;
+
+	return 0;
+
+bad_mapping:
+	for_each_sg(sgt->sgl, sg, count, i)
+		iommu_unmap(cb->mapping->domain, sg_dma_address(sg),
+				sg_dma_len(sg));
+	return -EINVAL;
+}
+
+static void ipa_release_uc_smmu_mappings(enum ipa_client_type client)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
+	int i;
+	int j;
+	int start;
+	int end;
+
+	if (IPA_CLIENT_IS_CONS(client)) {
+		start = IPA_WDI_TX_RING_RES;
+		end = IPA_WDI_CE_DB_RES;
+	} else {
+		start = IPA_WDI_RX_RING_RES;
+		if (ipa3_ctx->ipa_wdi2)
+			end = IPA_WDI_RX_COMP_RING_WP_RES;
+		else
+			end = IPA_WDI_RX_RING_RP_RES;
+	}
+
+	for (i = start; i <= end; i++) {
+		if (wdi_res[i].valid) {
+			for (j = 0; j < wdi_res[i].nents; j++) {
+				iommu_unmap(cb->mapping->domain,
+					wdi_res[i].res[j].iova,
+					wdi_res[i].res[j].size);
+				ipa3_ctx->wdi_map_cnt--;
+			}
+			kfree(wdi_res[i].res);
+			wdi_res[i].valid = false;
+		}
+	}
+
+	if (ipa3_ctx->wdi_map_cnt == 0)
+		cb->next_addr = cb->va_end;
+
+}
+
+static void ipa_save_uc_smmu_mapping_pa(int res_idx, phys_addr_t pa,
+		unsigned long iova, size_t len)
+{
+	IPADBG("--res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&pa, iova, len);
+	wdi_res[res_idx].res = kzalloc(sizeof(struct ipa_wdi_res), GFP_KERNEL);
+	if (!wdi_res[res_idx].res)
+		BUG();
+	wdi_res[res_idx].nents = 1;
+	wdi_res[res_idx].valid = true;
+	wdi_res[res_idx].res->pa = rounddown(pa, PAGE_SIZE);
+	wdi_res[res_idx].res->iova = rounddown(iova, PAGE_SIZE);
+	wdi_res[res_idx].res->size = roundup(len + pa - rounddown(pa,
+				PAGE_SIZE), PAGE_SIZE);
+	IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&wdi_res[res_idx].res->pa, wdi_res[res_idx].res->iova,
+			wdi_res[res_idx].res->size);
+}
+
+static void ipa_save_uc_smmu_mapping_sgt(int res_idx, struct sg_table *sgt,
+		unsigned long iova)
+{
+	int i;
+	struct scatterlist *sg;
+	unsigned long curr_iova = iova;
+
+	if (!sgt) {
+		IPAERR("Bad parameters, scatter / gather list is NULL\n");
+		return;
+	}
+
+	wdi_res[res_idx].res = kcalloc(sgt->nents, sizeof(struct ipa_wdi_res),
+			GFP_KERNEL);
+	if (!wdi_res[res_idx].res)
+		BUG();
+	wdi_res[res_idx].nents = sgt->nents;
+	wdi_res[res_idx].valid = true;
+	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+		/* directly get sg_tbl PA from wlan */
+		wdi_res[res_idx].res[i].pa = sg->dma_address;
+		wdi_res[res_idx].res[i].iova = curr_iova;
+		wdi_res[res_idx].res[i].size = PAGE_ALIGN(sg->offset +
+				sg->length);
+		IPADBG("res_idx=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", res_idx,
+			&wdi_res[res_idx].res[i].pa,
+			wdi_res[res_idx].res[i].iova,
+			wdi_res[res_idx].res[i].size);
+		curr_iova += wdi_res[res_idx].res[i].size;
+	}
+}
+
+static int ipa_create_uc_smmu_mapping(int res_idx, bool wlan_smmu_en,
+		phys_addr_t pa, struct sg_table *sgt, size_t len, bool device,
+		unsigned long *iova)
+{
+	/* support for SMMU on WLAN but no SMMU on IPA */
+	if (wlan_smmu_en && ipa3_ctx->smmu_s1_bypass) {
+		IPAERR("Unsupported SMMU pairing\n");
+		return -EINVAL;
+	}
+
+	/* legacy: no SMMUs on either end */
+	if (!wlan_smmu_en && ipa3_ctx->smmu_s1_bypass) {
+		*iova = pa;
+		return 0;
+	}
+
+	/* no SMMU on WLAN but SMMU on IPA */
+	if (!wlan_smmu_en && !ipa3_ctx->smmu_s1_bypass) {
+		if (ipa_create_uc_smmu_mapping_pa(pa, len,
+			(res_idx == IPA_WDI_CE_DB_RES) ? true : false, iova)) {
+			IPAERR("Fail to create mapping res %d\n", res_idx);
+			return -EFAULT;
+		}
+		ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+		return 0;
+	}
+
+	/* SMMU on WLAN and SMMU on IPA */
+	if (wlan_smmu_en && !ipa3_ctx->smmu_s1_bypass) {
+		switch (res_idx) {
+		case IPA_WDI_RX_RING_RP_RES:
+		case IPA_WDI_RX_COMP_RING_WP_RES:
+		case IPA_WDI_CE_DB_RES:
+			if (ipa_create_uc_smmu_mapping_pa(pa, len,
+				(res_idx == IPA_WDI_CE_DB_RES) ? true : false,
+				iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_pa(res_idx, pa, *iova, len);
+			break;
+		case IPA_WDI_RX_RING_RES:
+		case IPA_WDI_RX_COMP_RING_RES:
+		case IPA_WDI_TX_RING_RES:
+		case IPA_WDI_CE_RING_RES:
+			if (ipa_create_uc_smmu_mapping_sgt(sgt, iova)) {
+				IPAERR("Fail to create mapping res %d\n",
+						res_idx);
+				return -EFAULT;
+			}
+			ipa_save_uc_smmu_mapping_sgt(res_idx, sgt, *iova);
+			break;
+		default:
+			BUG();
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_connect_wdi_pipe() - WDI client connect
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out)
+{
+	int ipa_ep_idx;
+	int result = -EFAULT;
+	struct ipa3_ep_context *ep;
+	struct ipa_mem_buffer cmd;
+	struct IpaHwWdiTxSetUpCmdData_t *tx;
+	struct IpaHwWdiRxSetUpCmdData_t *rx;
+	struct IpaHwWdi2TxSetUpCmdData_t *tx_2;
+	struct IpaHwWdi2RxSetUpCmdData_t *rx_2;
+
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	unsigned long va;
+	phys_addr_t pa;
+	u32 len;
+
+	if (in == NULL || out == NULL || in->sys.client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm. in=%p out=%p\n", in, out);
+		if (in)
+			IPAERR("client = %d\n", in->sys.client);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (in->u.dl.comp_ring_base_pa % IPA_WDI_RING_ALIGNMENT ||
+			in->u.dl.ce_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+			IPAERR("alignment failure on TX\n");
+			return -EINVAL;
+		}
+	} else {
+		if (in->u.ul.rdy_ring_base_pa % IPA_WDI_RING_ALIGNMENT) {
+			IPAERR("alignment failure on RX\n");
+			return -EINVAL;
+		}
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(in->sys.client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("fail to alloc EP.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->valid) {
+		IPAERR("EP already allocated.\n");
+		goto fail;
+	}
+
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_INC_EP(in->sys.client);
+
+	IPADBG("client=%d ep=%d\n", in->sys.client, ipa_ep_idx);
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (ipa3_ctx->ipa_wdi2)
+			cmd.size = sizeof(*tx_2);
+		else
+			cmd.size = sizeof(*tx);
+		IPADBG("comp_ring_base_pa=0x%pa\n",
+				&in->u.dl.comp_ring_base_pa);
+		IPADBG("comp_ring_size=%d\n", in->u.dl.comp_ring_size);
+		IPADBG("ce_ring_base_pa=0x%pa\n", &in->u.dl.ce_ring_base_pa);
+		IPADBG("ce_ring_size=%d\n", in->u.dl.ce_ring_size);
+		IPADBG("ce_ring_doorbell_pa=0x%pa\n",
+				&in->u.dl.ce_door_bell_pa);
+		IPADBG("num_tx_buffers=%d\n", in->u.dl.num_tx_buffers);
+	} else {
+		if (ipa3_ctx->ipa_wdi2)
+			cmd.size = sizeof(*rx_2);
+		else
+			cmd.size = sizeof(*rx);
+		IPADBG("rx_ring_base_pa=0x%pa\n",
+			&in->u.ul.rdy_ring_base_pa);
+		IPADBG("rx_ring_size=%d\n",
+			in->u.ul.rdy_ring_size);
+		IPADBG("rx_ring_rp_pa=0x%pa\n",
+			&in->u.ul.rdy_ring_rp_pa);
+		IPADBG("rx_comp_ring_base_pa=0x%pa\n",
+			&in->u.ul.rdy_comp_ring_base_pa);
+		IPADBG("rx_comp_ring_size=%d\n",
+			in->u.ul.rdy_comp_ring_size);
+		IPADBG("rx_comp_ring_wp_pa=0x%pa\n",
+			&in->u.ul.rdy_comp_ring_wp_pa);
+		ipa3_ctx->uc_ctx.rdy_ring_base_pa =
+			in->u.ul.rdy_ring_base_pa;
+		ipa3_ctx->uc_ctx.rdy_ring_rp_pa =
+			in->u.ul.rdy_ring_rp_pa;
+		ipa3_ctx->uc_ctx.rdy_ring_size =
+			in->u.ul.rdy_ring_size;
+		ipa3_ctx->uc_ctx.rdy_comp_ring_base_pa =
+			in->u.ul.rdy_comp_ring_base_pa;
+		ipa3_ctx->uc_ctx.rdy_comp_ring_wp_pa =
+			in->u.ul.rdy_comp_ring_wp_pa;
+		ipa3_ctx->uc_ctx.rdy_comp_ring_size =
+			in->u.ul.rdy_comp_ring_size;
+
+		/* check if the VA is empty */
+		if (ipa3_ctx->ipa_wdi2) {
+			if (in->smmu_enabled) {
+				if (!in->u.ul_smmu.rdy_ring_rp_va ||
+					!in->u.ul_smmu.rdy_comp_ring_wp_va)
+					goto dma_alloc_fail;
+			} else {
+				if (!in->u.ul.rdy_ring_rp_va ||
+					!in->u.ul.rdy_comp_ring_wp_va)
+					goto dma_alloc_fail;
+			}
+			IPADBG("rdy_ring_rp value =%d\n",
+				in->smmu_enabled ?
+				*in->u.ul_smmu.rdy_ring_rp_va :
+				*in->u.ul.rdy_ring_rp_va);
+			IPADBG("rx_comp_ring_wp value=%d\n",
+				in->smmu_enabled ?
+				*in->u.ul_smmu.rdy_comp_ring_wp_va :
+				*in->u.ul.rdy_comp_ring_wp_va);
+				ipa3_ctx->uc_ctx.rdy_ring_rp_va =
+					in->smmu_enabled ?
+					in->u.ul_smmu.rdy_ring_rp_va :
+					in->u.ul.rdy_ring_rp_va;
+				ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va =
+					in->smmu_enabled ?
+					in->u.ul_smmu.rdy_comp_ring_wp_va :
+					in->u.ul.rdy_comp_ring_wp_va;
+		}
+	}
+
+	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
+			&cmd.phys_base, GFP_KERNEL);
+	if (cmd.base == NULL) {
+		IPAERR("fail to get DMA memory.\n");
+		result = -ENOMEM;
+		goto dma_alloc_fail;
+	}
+
+	if (IPA_CLIENT_IS_CONS(in->sys.client)) {
+		if (ipa3_ctx->ipa_wdi2) {
+			tx_2 = (struct IpaHwWdi2TxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+			IPADBG("TX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.dl_smmu.comp_ring_size,
+				in->u.dl.comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+					in->smmu_enabled,
+					in->u.dl.comp_ring_base_pa,
+					&in->u.dl_smmu.comp_ring,
+					len,
+					false,
+					&va)) {
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->comp_ring_size = len;
+			IPADBG("TX_2 comp_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->comp_ring_base_pa_hi,
+					tx_2->comp_ring_base_pa);
+
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX_2 CE ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size);
+			/* WA: wlan passed ce_ring sg_table PA directly */
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			tx_2->ce_ring_size = len;
+			IPADBG("TX_2 ce_ring_base_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_base_pa_hi,
+					tx_2->ce_ring_base_pa);
+
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx_2->ce_ring_doorbell_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			tx_2->ce_ring_doorbell_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("TX_2 ce_ring_doorbell_pa_hi=0x%08x :0x%08x\n",
+					tx_2->ce_ring_doorbell_pa_hi,
+					tx_2->ce_ring_doorbell_pa);
+
+			tx_2->num_tx_buffers = in->smmu_enabled ?
+				in->u.dl_smmu.num_tx_buffers :
+				in->u.dl.num_tx_buffers;
+			tx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			tx = (struct IpaHwWdiTxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.dl_smmu.comp_ring_size :
+				in->u.dl.comp_ring_size;
+			IPADBG("TX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.comp_ring_size,
+					in->u.dl.comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.comp_ring_base_pa,
+						&in->u.dl_smmu.comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping TX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->comp_ring_base_pa = va;
+			tx->comp_ring_size = len;
+			len = in->smmu_enabled ? in->u.dl_smmu.ce_ring_size :
+				in->u.dl.ce_ring_size;
+			IPADBG("TX CE ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.dl_smmu.ce_ring_size,
+					in->u.dl.ce_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
+						in->smmu_enabled,
+						in->u.dl.ce_ring_base_pa,
+						&in->u.dl_smmu.ce_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping CE ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->ce_ring_base_pa = va;
+			tx->ce_ring_size = len;
+			pa = in->smmu_enabled ? in->u.dl_smmu.ce_door_bell_pa :
+				in->u.dl.ce_door_bell_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						true,
+						&va)) {
+				IPAERR("fail to create uc mapping CE DB.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			tx->ce_ring_doorbell_pa = va;
+			tx->num_tx_buffers = in->u.dl.num_tx_buffers;
+			tx->ipa_pipe_number = ipa_ep_idx;
+		}
+		out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+				IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+				IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+	} else {
+		if (ipa3_ctx->ipa_wdi2) {
+			rx_2 = (struct IpaHwWdi2RxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d ring_size=%d %d\n",
+				in->smmu_enabled,
+				in->u.ul_smmu.rdy_ring_size,
+				in->u.ul.rdy_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_ring_size = len;
+			IPADBG("RX_2 rx_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_ring_base_pa_hi,
+					rx_2->rx_ring_base_pa);
+
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_ring_rp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_ring_rp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_ring_rp_pa_hi=0x%08x :0x%08x\n",
+					rx_2->rx_ring_rp_pa_hi,
+					rx_2->rx_ring_rp_pa);
+			len = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_size :
+				in->u.ul.rdy_comp_ring_size;
+			IPADBG("RX_2 ring smmu_en=%d comp_ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.ul_smmu.rdy_comp_ring_size,
+					in->u.ul.rdy_comp_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_COMP_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_comp_ring_base_pa,
+						&in->u.ul_smmu.rdy_comp_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_base_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_base_pa = (u32) (va & 0xFFFFFFFF);
+			rx_2->rx_comp_ring_size = len;
+			IPADBG("RX_2 rx_comp_ring_base_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_base_pa_hi,
+					rx_2->rx_comp_ring_base_pa);
+
+			pa = in->smmu_enabled ?
+				in->u.ul_smmu.rdy_comp_ring_wp_pa :
+				in->u.ul.rdy_comp_ring_wp_pa;
+			if (ipa_create_uc_smmu_mapping(
+						IPA_WDI_RX_COMP_RING_WP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc RX_2 comp_rng WP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx_2->rx_comp_ring_wp_pa_hi =
+				(u32) ((va & 0xFFFFFFFF00000000) >> 32);
+			rx_2->rx_comp_ring_wp_pa = (u32) (va & 0xFFFFFFFF);
+			IPADBG("RX_2 rx_comp_ring_wp_pa_hi=0x%08x:0x%08x\n",
+					rx_2->rx_comp_ring_wp_pa_hi,
+					rx_2->rx_comp_ring_wp_pa);
+			rx_2->ipa_pipe_number = ipa_ep_idx;
+		} else {
+			rx = (struct IpaHwWdiRxSetUpCmdData_t *)cmd.base;
+
+			len = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_size :
+				in->u.ul.rdy_ring_size;
+			IPADBG("RX ring smmu_en=%d ring_size=%d %d\n",
+					in->smmu_enabled,
+					in->u.ul_smmu.rdy_ring_size,
+					in->u.ul.rdy_ring_size);
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
+						in->smmu_enabled,
+						in->u.ul.rdy_ring_base_pa,
+						&in->u.ul_smmu.rdy_ring,
+						len,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX ring.\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_base_pa = va;
+			rx->rx_ring_size = len;
+
+			pa = in->smmu_enabled ? in->u.ul_smmu.rdy_ring_rp_pa :
+				in->u.ul.rdy_ring_rp_pa;
+			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
+						in->smmu_enabled,
+						pa,
+						NULL,
+						4,
+						false,
+						&va)) {
+				IPAERR("fail to create uc mapping RX rng RP\n");
+				result = -ENOMEM;
+				goto uc_timeout;
+			}
+			rx->rx_ring_rp_pa = va;
+			rx->ipa_pipe_number = ipa_ep_idx;
+		}
+		out->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+					IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+					IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+	}
+
+	ep->valid = 1;
+	ep->client = in->sys.client;
+	ep->keep_ipa_awake = in->sys.keep_ipa_awake;
+	result = ipa3_disable_data_path(ipa_ep_idx);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			ipa_ep_idx);
+		goto uc_timeout;
+	}
+	if (IPA_CLIENT_IS_PROD(in->sys.client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
+	}
+
+	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
+				IPA_CLIENT_IS_CONS(in->sys.client) ?
+				IPA_CPU_2_HW_CMD_WDI_TX_SET_UP :
+				IPA_CPU_2_HW_CMD_WDI_RX_SET_UP,
+				IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	ep->skip_ep_cfg = in->sys.skip_ep_cfg;
+	ep->client_notify = in->sys.notify;
+	ep->priv = in->sys.priv;
+
+	if (!ep->skip_ep_cfg) {
+		if (ipa3_cfg_ep(ipa_ep_idx, &in->sys.ipa_ep_cfg)) {
+			IPAERR("fail to configure EP.\n");
+			goto ipa_cfg_ep_fail;
+		}
+		IPADBG("ep configuration successful\n");
+	} else {
+		IPADBG("Skipping endpoint configuration.\n");
+	}
+
+	out->clnt_hdl = ipa_ep_idx;
+
+	if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(in->sys.client))
+		ipa3_install_dflt_flt_rules(ipa_ep_idx);
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+	ep->uc_offload_state |= IPA_WDI_CONNECTED;
+	IPADBG("client %d (ep: %d) connected\n", in->sys.client, ipa_ep_idx);
+
+	return 0;
+
+ipa_cfg_ep_fail:
+	memset(&ipa3_ctx->ep[ipa_ep_idx], 0, sizeof(struct ipa3_ep_context));
+uc_timeout:
+	ipa_release_uc_smmu_mappings(in->sys.client);
+	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
+dma_alloc_fail:
+	IPA_ACTIVE_CLIENTS_DEC_EP(in->sys.client);
+fail:
+	return result;
+}
+
+/**
+ * ipa3_disconnect_wdi_pipe() - WDI client disconnect
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t tear;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	if (!ep->keep_ipa_awake)
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	tear.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(tear.raw32b,
+				IPA_CPU_2_HW_CMD_WDI_TEAR_DOWN,
+				IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+				false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	ipa3_delete_dflt_flt_rules(clnt_hdl);
+	ipa_release_uc_smmu_mappings(ep->client);
+
+	memset(&ipa3_ctx->ep[clnt_hdl], 0, sizeof(struct ipa3_ep_context));
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) disconnected\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_enable_wdi_pipe() - WDI client enable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_enable_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t enable;
+	struct ipa_ep_cfg_holb holb_cfg;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != IPA_WDI_CONNECTED) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	enable.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(enable.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_ENABLE,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		memset(&holb_cfg, 0, sizeof(holb_cfg));
+		holb_cfg.en = IPA_HOLB_TMR_DIS;
+		holb_cfg.tmr_val = 0;
+		result = ipa3_cfg_ep_holb(clnt_hdl, &holb_cfg);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state |= IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) enabled\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_disable_wdi_pipe() - WDI client disable
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_disable_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t disable;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+	u32 prod_hdl;
+	int i;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	/* checking rdy_ring_rp_pa matches the rdy_comp_ring_wp_pa on WDI2.0 */
+	if (ipa3_ctx->ipa_wdi2) {
+		for (i = 0; i < IPA_UC_FINISH_MAX; i++) {
+			IPADBG("(%d) rp_value(%u), comp_wp_value(%u)\n",
+					i,
+					*ipa3_ctx->uc_ctx.rdy_ring_rp_va,
+					*ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va);
+			if (*ipa3_ctx->uc_ctx.rdy_ring_rp_va !=
+				*ipa3_ctx->uc_ctx.rdy_comp_ring_wp_va) {
+				usleep_range(IPA_UC_WAIT_MIN_SLEEP,
+					IPA_UC_WAII_MAX_SLEEP);
+			} else {
+				break;
+			}
+		}
+		/* In case ipa_uc still haven't processed all
+		 * pending descriptors, we have to assert
+		 */
+		if (i == IPA_UC_FINISH_MAX)
+			WARN_ON(1);
+	}
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	result = ipa3_disable_data_path(clnt_hdl);
+	if (result) {
+		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
+			clnt_hdl);
+		result = -EPERM;
+		goto uc_timeout;
+	}
+
+	/**
+	 * To avoid data stall during continuous SAP on/off before
+	 * setting delay to IPA Consumer pipe, remove delay and enable
+	 * holb on IPA Producer pipe
+	 */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+
+		prod_hdl = ipa3_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
+		if (ipa3_ctx->ep[prod_hdl].valid == 1) {
+			result = ipa3_disable_data_path(prod_hdl);
+			if (result) {
+				IPAERR("disable data path failed\n");
+				IPAERR("res=%d clnt=%d\n",
+					result, prod_hdl);
+				result = -EPERM;
+				goto uc_timeout;
+			}
+		}
+		usleep_range(IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC,
+			IPA_UC_POLL_SLEEP_USEC * IPA_UC_POLL_SLEEP_USEC);
+	}
+
+	disable.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(disable.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_DISABLE,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	/* Set the delay after disabling IPA Producer pipe */
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state &= ~IPA_WDI_ENABLED;
+	IPADBG("client (ep: %d) disabled\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_resume_wdi_pipe() - WDI client resume
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_resume_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t resume;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	resume.params.ipa_pipe_number = clnt_hdl;
+
+	result = ipa3_uc_send_cmd(resume.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_CH_RESUME,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+	if (result)
+		IPAERR("client (ep: %d) fail un-susp/delay result=%d\n",
+				clnt_hdl, result);
+	else
+		IPADBG("client (ep: %d) un-susp/delay\n", clnt_hdl);
+
+	ep->uc_offload_state |= IPA_WDI_RESUMED;
+	IPADBG("client (ep: %d) resumed\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_suspend_wdi_pipe() - WDI client suspend
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_suspend_wdi_pipe(u32 clnt_hdl)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiCommonChCmdData_t suspend;
+	struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (ep->uc_offload_state != (IPA_WDI_CONNECTED | IPA_WDI_ENABLED |
+				IPA_WDI_RESUMED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+
+	suspend.params.ipa_pipe_number = clnt_hdl;
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		IPADBG("Post suspend event first for IPA Producer\n");
+		IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
+		result = ipa3_uc_send_cmd(suspend.raw32b,
+			IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+			IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+			false, 10*HZ);
+
+		if (result) {
+			result = -EFAULT;
+			goto uc_timeout;
+		}
+	}
+
+	memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		ep_cfg_ctrl.ipa_ep_suspend = true;
+		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed to suspend result=%d\n",
+					clnt_hdl, result);
+		else
+			IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+	} else {
+		ep_cfg_ctrl.ipa_ep_delay = true;
+		result = ipa3_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+		if (result)
+			IPAERR("client (ep: %d) failed to delay result=%d\n",
+					clnt_hdl, result);
+		else
+			IPADBG("client (ep: %d) delayed\n", clnt_hdl);
+	}
+
+	if (IPA_CLIENT_IS_CONS(ep->client)) {
+		result = ipa3_uc_send_cmd(suspend.raw32b,
+			IPA_CPU_2_HW_CMD_WDI_CH_SUSPEND,
+			IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+			false, 10*HZ);
+
+		if (result) {
+			result = -EFAULT;
+			goto uc_timeout;
+		}
+	}
+
+	ipa3_ctx->tag_process_before_gating = true;
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	ep->uc_offload_state &= ~IPA_WDI_RESUMED;
+	IPADBG("client (ep: %d) suspended\n", clnt_hdl);
+
+uc_timeout:
+	return result;
+}
+
+int ipa3_write_qmapid_wdi_pipe(u32 clnt_hdl, u8 qmap_id)
+{
+	int result = 0;
+	struct ipa3_ep_context *ep;
+	union IpaHwWdiRxExtCfgCmdData_t qmap;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm, %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result)
+		return result;
+
+	IPADBG("ep=%d\n", clnt_hdl);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	if (!(ep->uc_offload_state & IPA_WDI_CONNECTED)) {
+		IPAERR("WDI channel bad state %d\n", ep->uc_offload_state);
+		return -EFAULT;
+	}
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+	qmap.params.ipa_pipe_number = clnt_hdl;
+	qmap.params.qmap_id = qmap_id;
+
+	result = ipa3_uc_send_cmd(qmap.raw32b,
+		IPA_CPU_2_HW_CMD_WDI_RX_EXT_CFG,
+		IPA_HW_2_CPU_WDI_CMD_STATUS_SUCCESS,
+		false, 10*HZ);
+
+	if (result) {
+		result = -EFAULT;
+		goto uc_timeout;
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("client (ep: %d) qmap_id %d updated\n", clnt_hdl, qmap_id);
+
+uc_timeout:
+	return result;
+}
+
+/**
+ * ipa3_uc_reg_rdyCB() - To register uC
+ * ready CB if uC not ready
+ * @inout:	[in/out] input/output parameters
+ * from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_uc_reg_rdyCB(
+	struct ipa_wdi_uc_ready_params *inout)
+{
+	int result = 0;
+
+	if (inout == NULL) {
+		IPAERR("bad parm. inout=%p ", inout);
+		return -EINVAL;
+	}
+
+	result = ipa3_uc_state_check();
+	if (result) {
+		inout->is_uC_ready = false;
+		ipa3_ctx->uc_wdi_ctx.uc_ready_cb = inout->notify;
+		ipa3_ctx->uc_wdi_ctx.priv = inout->priv;
+	} else {
+		inout->is_uC_ready = true;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_uc_dereg_rdyCB() - To de-register uC ready CB
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_uc_dereg_rdyCB(void)
+{
+	ipa3_ctx->uc_wdi_ctx.uc_ready_cb = NULL;
+	ipa3_ctx->uc_wdi_ctx.priv = NULL;
+
+	return 0;
+}
+
+
+/**
+ * ipa3_uc_wdi_get_dbpa() - To retrieve
+ * doorbell physical address of wlan pipes
+ * @param:  [in/out] input/output parameters
+ *          from/to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ */
+int ipa3_uc_wdi_get_dbpa(
+	struct ipa_wdi_db_params *param)
+{
+	if (param == NULL || param->client >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm. param=%p ", param);
+		if (param)
+			IPAERR("client = %d\n", param->client);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(param->client)) {
+		param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+					IPA_HW_WDI_TX_MBOX_START_INDEX/32,
+					IPA_HW_WDI_TX_MBOX_START_INDEX % 32);
+	} else {
+		param->uc_door_bell_pa = ipa3_ctx->ipa_wrapper_base +
+				ipahal_get_reg_base() +
+				ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
+					IPA_HW_WDI_RX_MBOX_START_INDEX/32,
+					IPA_HW_WDI_RX_MBOX_START_INDEX % 32);
+	}
+
+	return 0;
+}
+
+static void ipa3_uc_wdi_loaded_handler(void)
+{
+	if (!ipa3_ctx) {
+		IPAERR("IPA ctx is null\n");
+		return;
+	}
+
+	if (ipa3_ctx->uc_wdi_ctx.uc_ready_cb) {
+		ipa3_ctx->uc_wdi_ctx.uc_ready_cb(
+			ipa3_ctx->uc_wdi_ctx.priv);
+
+		ipa3_ctx->uc_wdi_ctx.uc_ready_cb =
+			NULL;
+		ipa3_ctx->uc_wdi_ctx.priv = NULL;
+	}
+}
+
+int ipa3_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
+	int i;
+	int ret = 0;
+	int prot = IOMMU_READ | IOMMU_WRITE;
+
+	if (!info) {
+		IPAERR("info = %p\n", info);
+		return -EINVAL;
+	}
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_buffers; i++) {
+		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+			&info[i].pa, info[i].iova, info[i].size);
+		info[i].result = ipa3_iommu_map(cb->iommu,
+			rounddown(info[i].iova, PAGE_SIZE),
+			rounddown(info[i].pa, PAGE_SIZE),
+			roundup(info[i].size + info[i].pa -
+				rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE),
+			prot);
+	}
+
+	return ret;
+}
+
+int ipa3_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info)
+{
+	struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
+	int i;
+	int ret = 0;
+
+	if (!info) {
+		IPAERR("info = %p\n", info);
+		return -EINVAL;
+	}
+
+	if (!cb->valid) {
+		IPAERR("No SMMU CB setup\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_buffers; i++) {
+		IPADBG("i=%d pa=0x%pa iova=0x%lx sz=0x%zx\n", i,
+			&info[i].pa, info[i].iova, info[i].size);
+		info[i].result = iommu_unmap(cb->iommu,
+			rounddown(info[i].iova, PAGE_SIZE),
+			roundup(info[i].size + info[i].pa -
+				rounddown(info[i].pa, PAGE_SIZE), PAGE_SIZE));
+	}
+
+	return ret;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
new file mode 100644
index 0000000..2564b90
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -0,0 +1,4089 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <net/ip.h>
+#include <linux/genalloc.h>	/* gen_pool_alloc() */
+#include <linux/io.h>
+#include <linux/ratelimit.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_gsi.h>
+#include <linux/elf.h>
+#include "ipa_i.h"
+#include "ipahal/ipahal.h"
+#include "ipahal/ipahal_fltrt.h"
+#include "../ipa_rm_i.h"
+
+#define IPA_V3_0_CLK_RATE_SVS (75 * 1000 * 1000UL)
+#define IPA_V3_0_CLK_RATE_NOMINAL (150 * 1000 * 1000UL)
+#define IPA_V3_0_CLK_RATE_TURBO (200 * 1000 * 1000UL)
+#define IPA_V3_0_MAX_HOLB_TMR_VAL (4294967296 - 1)
+
+#define IPA_V3_0_BW_THRESHOLD_TURBO_MBPS (1000)
+#define IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS (600)
+
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK 0xFF0000
+#define IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT 0x10
+
+/* Max pipes + ICs for TAG process */
+#define IPA_TAG_MAX_DESC (IPA3_MAX_NUM_PIPES + 6)
+
+#define IPA_TAG_SLEEP_MIN_USEC (1000)
+#define IPA_TAG_SLEEP_MAX_USEC (2000)
+#define IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT (10 * HZ)
+#define IPA_BCR_REG_VAL_v3_0 (0x00000001)
+#define IPA_BCR_REG_VAL_v3_5 (0x0000003B)
+#define IPA_AGGR_GRAN_MIN (1)
+#define IPA_AGGR_GRAN_MAX (32)
+#define IPA_EOT_COAL_GRAN_MIN (1)
+#define IPA_EOT_COAL_GRAN_MAX (16)
+
+#define IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC (15)
+
+#define IPA_AGGR_BYTE_LIMIT (\
+		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_BMSK >> \
+		IPA_ENDP_INIT_AGGR_N_AGGR_BYTE_LIMIT_SHFT)
+#define IPA_AGGR_PKT_LIMIT (\
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >> \
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT)
+
+/* In IPAv3 only endpoints 0-3 can be configured to deaggregation */
+#define IPA_EP_SUPPORTS_DEAGGR(idx) ((idx) >= 0 && (idx) <= 3)
+
+/* configure IPA spare register 1 in order to have correct IPA version
+ * set bits 0,2,3 and 4. see SpareBits documentation.xlsx
+ */
+#define IPA_SPARE_REG_1_VAL (0x0000081D)
+
+
+/* HPS, DPS sequencers Types*/
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY  0x00000000
+/* DMA + DECIPHER/CIPHER */
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_DEC 0x00000011
+/* Packet Processing + no decipher + uCP (for Ethernet Bridging) */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP 0x00000002
+/* Packet Processing + decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_UCP 0x00000013
+/* 2 Packet Processing pass + no decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP 0x00000004
+/* 2 Packet Processing pass + decipher + uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP 0x00000015
+/* Packet Processing + no decipher + no uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_NO_UCP 0x00000006
+/* Packet Processing + no decipher + no uCP */
+#define IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_DEC_NO_UCP 0x00000017
+/* COMP/DECOMP */
+#define IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP 0x00000020
+/* Invalid sequencer type */
+#define IPA_DPS_HPS_SEQ_TYPE_INVALID 0xFFFFFFFF
+
+#define IPA_DPS_HPS_SEQ_TYPE_IS_DMA(seq_type) \
+	(seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY || \
+	seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_DEC || \
+	seq_type == IPA_DPS_HPS_SEQ_TYPE_DMA_COMP_DECOMP)
+
+#define QMB_MASTER_SELECT_DDR  (0)
+#define QMB_MASTER_SELECT_PCIE (1)
+
+#define IPA_CLIENT_NOT_USED \
+	{-1, -1, false, IPA_DPS_HPS_SEQ_TYPE_INVALID, QMB_MASTER_SELECT_DDR}
+
+/* Resource Group index*/
+#define IPA_v3_0_GROUP_UL		(0)
+#define IPA_v3_0_GROUP_DL		(1)
+#define IPA_v3_0_GROUP_DPL		IPA_v3_0_GROUP_DL
+#define IPA_v3_0_GROUP_DIAG		(2)
+#define IPA_v3_0_GROUP_DMA		(3)
+#define IPA_v3_0_GROUP_IMM_CMD		IPA_v3_0_GROUP_UL
+#define IPA_v3_0_GROUP_Q6ZIP		(4)
+#define IPA_v3_0_GROUP_Q6ZIP_GENERAL	IPA_v3_0_GROUP_Q6ZIP
+#define IPA_v3_0_GROUP_UC_RX_Q		(5)
+#define IPA_v3_0_GROUP_Q6ZIP_ENGINE	IPA_v3_0_GROUP_UC_RX_Q
+#define IPA_v3_0_GROUP_MAX		(6)
+
+#define IPA_v3_5_1_GROUP_LWA_DL	(0)
+#define IPA_v3_5_1_GROUP_UL_DL		(1)
+#define IPA_v3_5_1_GROUP_DMA		(2)
+#define IPA_v3_5_1_GROUP_UC_RX_Q	(3)
+#define IPA_v3_5_1_SRC_GROUP_MAX	(4)
+#define IPA_v3_5_1_DST_GROUP_MAX	(3)
+
+#define IPA_GROUP_MAX IPA_v3_0_GROUP_MAX
+
+enum ipa_rsrc_grp_type_src {
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER,
+	IPA_v3_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+	IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX,
+
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS = 0,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_HPS_DMARS,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_ACK_ENTRIES,
+	IPA_v3_5_1_RSRC_GRP_TYPE_SRC_MAX
+};
+
+#define IPA_RSRC_GRP_TYPE_SRC_MAX IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX
+
+enum ipa_rsrc_grp_type_dst {
+	IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS,
+	IPA_v3_0_RSRC_GRP_TYPE_DST_MAX,
+
+	IPA_v3_5_1_RSRC_GRP_TYPE_DST_DATA_SECTORS = 0,
+	IPA_v3_5_1_RSRC_GRP_TYPE_DST_DPS_DMARS,
+	IPA_v3_5_1_RSRC_GRP_TYPE_DST_MAX,
+};
+#define IPA_RSRC_GRP_TYPE_DST_MAX IPA_v3_0_RSRC_GRP_TYPE_DST_MAX
+
+enum ipa_rsrc_grp_type_rx {
+	IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ,
+	IPA_RSRC_GRP_TYPE_RX_MAX
+};
+struct rsrc_min_max {
+	u32 min;
+	u32 max;
+};
+
+enum ipa_ver {
+	IPA_3_0,
+	IPA_3_5,
+	IPA_3_5_1,
+	IPA_VER_MAX,
+};
+
+static const struct rsrc_min_max ipa3_rsrc_src_grp_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_SRC_MAX][IPA_GROUP_MAX] = {
+	[IPA_3_0] = {
+		/*UL	DL	DIAG	DMA	Not Used	uC Rx*/
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{3, 255}, {3, 255}, {1, 255}, {1, 255}, {1, 255}, {2, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HDR_SECTORS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI1_BUFFER] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+		{14, 14}, {16, 16}, {5, 5}, {5, 5},  {0, 0}, {8, 8} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{19, 19}, {26, 26}, {3, 3}, {7, 7}, {0, 0}, {8, 8} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HDRI2_BUFFERS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {16, 16}, {5, 5}, {5, 5}, {0, 0}, {8, 8} },
+	},
+	[IPA_3_5_1] = {
+		/* LWA_DL  UL_DL    not used  UC_RX_Q, other are invalid */
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRC_PKT_CONTEXTS] = {
+		{1, 255}, {1, 255}, {0, 0}, {1, 255}, {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRS_DESCRIPTOR_LISTS] = {
+		{10, 10}, {10, 10}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRC_DESCRIPTOR_BUFF] = {
+		{12, 12}, {14, 14}, {0, 0}, {8, 8}, {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRC_HPS_DMARS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255},  {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_SRC_ACK_ENTRIES] = {
+		{14, 14}, {20, 20}, {0, 0}, {14, 14}, {0, 0}, {0, 0} },
+	}
+};
+
+static const struct rsrc_min_max ipa3_rsrc_dst_grp_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_DST_MAX][IPA_GROUP_MAX] = {
+	[IPA_3_0] = {
+			/*UL	DL/DPL	DIAG	DMA  Q6zip_gen Q6zip_eng*/
+		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{2, 2}, {3, 3}, {0, 0}, {2, 2}, {3, 3}, {3, 3} },
+		[IPA_v3_0_RSRC_GRP_TYPE_DST_DATA_SECTOR_LISTS] = {
+		{0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255}, {0, 255} },
+		[IPA_v3_0_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{1, 1}, {1, 1}, {1, 1}, {1, 1}, {1, 1}, {0, 0} },
+	},
+	[IPA_3_5_1] = {
+			/*LWA_DL UL/DL/DPL not used, other are invalid */
+		[IPA_v3_5_1_RSRC_GRP_TYPE_DST_DATA_SECTORS] = {
+		{4, 4}, {4, 4}, {3, 3}, {0, 0}, {0, 0}, {0, 0} },
+		[IPA_v3_5_1_RSRC_GRP_TYPE_DST_DPS_DMARS] = {
+		{2, 255}, {1, 255}, {1, 2}, {0, 0}, {0, 0}, {0, 0} },
+	}
+};
+static const struct rsrc_min_max ipa3_rsrc_rx_grp_config
+	[IPA_VER_MAX][IPA_RSRC_GRP_TYPE_RX_MAX][IPA_GROUP_MAX] = {
+		[IPA_3_0] = {
+		/*UL	DL	DIAG	DMA	Unused	uC Rx*/
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{16, 16}, {24, 24}, {8, 8}, {8, 8}, {0, 0}, {8, 8} },
+		},
+		[IPA_3_5_1] = {
+		/* LWA_DL UL_DL	not used UC_RX_Q, other are invalid */
+		[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ] = {
+		{3, 3}, {7, 7}, {0, 0}, {2, 2}, {0, 0}, {0, 0} },
+		},
+};
+
+struct ipa_ep_configuration {
+	int pipe_num;
+	int group_num;
+	bool support_flt;
+	int sequencer_type;
+	u8 qmb_master_sel;
+};
+
+static const struct ipa_ep_configuration ipa3_ep_mapping
+					[IPA_VER_MAX][IPA_CLIENT_MAX] = {
+	[IPA_3_0][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN1_PROD]          = {
+			10, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB_PROD]            = {
+			1, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_UC_USB_PROD]         = {
+			2, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_APPS_LAN_WAN_PROD]   = {
+			14, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_APPS_CMD_PROD]
+			= {22, IPA_v3_0_GROUP_IMM_CMD, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_ODU_PROD]            = {
+			12, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_MHI_PROD]            = {
+			0, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_Q6_LAN_PROD]         = {
+			9, IPA_v3_0_GROUP_UL, false,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_WAN_PROD]         = {
+			5, IPA_v3_0_GROUP_DL,
+			true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_CMD_PROD]
+			= {6, IPA_v3_0_GROUP_IMM_CMD, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_PROD]      = {7, IPA_v3_0_GROUP_Q6ZIP,
+			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_PROD]     = {8, IPA_v3_0_GROUP_Q6ZIP,
+			false, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD]
+			= {12, IPA_v3_0_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
+			= {13, IPA_v3_0_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_PCIE},
+	/* Only for test purpose */
+	[IPA_3_0][IPA_CLIENT_TEST_PROD]           = {
+			1, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST1_PROD]          = {
+			1, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST2_PROD]          = {
+			3, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST3_PROD]          = {
+			12, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST4_PROD]          = {
+			13, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+
+	[IPA_3_0][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN1_CONS]          = {
+			25, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN2_CONS]          = {
+			27, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN3_CONS]          = {
+			28, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_WLAN4_CONS]          = {
+			29, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_USB_CONS]            = {
+			26, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_USB_DPL_CONS]        = {
+			17, IPA_v3_0_GROUP_DPL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_APPS_LAN_CONS]       = {
+			15, IPA_v3_0_GROUP_UL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_APPS_WAN_CONS]       = {
+			16, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_ODU_EMB_CONS]        = {
+			23, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_MHI_CONS]            = {
+			23, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_Q6_LAN_CONS]         = {
+			19, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_WAN_CONS]         = {
+			18, IPA_v3_0_GROUP_UL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DUN_CONS]         = {
+			30, IPA_v3_0_GROUP_DIAG,
+			false, IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP_CONS]
+			= {21, IPA_v3_0_GROUP_Q6ZIP, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_Q6_DECOMP2_CONS]
+			= {4, IPA_v3_0_GROUP_Q6ZIP, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]
+			= {28, IPA_v3_0_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS]
+			= {29, IPA_v3_0_GROUP_DMA, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_PCIE},
+	[IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
+	/* Only for test purpose */
+	[IPA_3_0][IPA_CLIENT_TEST_CONS]           = {
+			26, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST1_CONS]          = {
+			26, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST2_CONS]          = {
+			27, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST3_CONS]          = {
+			28, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_0][IPA_CLIENT_TEST4_CONS]          = {
+			29, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+
+	/* IPA_3_5_1 */
+	[IPA_3_5_1][IPA_CLIENT_HSIC1_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN1_PROD]          = {
+			7, IPA_v3_5_1_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_HSIC2_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB2_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_HSIC3_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB3_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_HSIC4_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB4_PROD]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_HSIC5_PROD]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB_PROD]            = {
+			0, IPA_v3_5_1_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_UC_USB_PROD]         = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A5_WLAN_AMPDU_PROD]  = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A2_EMBEDDED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A2_TETHERED_PROD]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_WAN_PROD]   = {
+			8, IPA_v3_5_1_GROUP_UL_DL,
+			true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_APPS_CMD_PROD]		= {
+			5, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_ODU_PROD]            = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MHI_PROD]            = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_LAN_PROD]         = {
+			3, IPA_v3_5_1_GROUP_UL_DL,
+			true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR },
+	[IPA_3_5_1][IPA_CLIENT_Q6_WAN_PROD]         = {
+			6, IPA_v3_5_1_GROUP_UL_DL,
+			true, IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_Q6_CMD_PROD]
+			= {4, IPA_v3_5_1_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP_PROD]      = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
+	/* Only for test purpose */
+	[IPA_3_5_1][IPA_CLIENT_TEST_PROD]           = {
+			0, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST1_PROD]          = {
+			0, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST2_PROD]          = {
+			2, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST3_PROD]          = {
+			4, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST4_PROD]          = {
+			1, IPA_v3_5_1_GROUP_UL_DL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR},
+
+	[IPA_3_5_1][IPA_CLIENT_HSIC1_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN1_CONS]          = {
+			16, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_HSIC2_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB2_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN2_CONS]          =  {
+			18, IPA_v3_5_1_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_HSIC3_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB3_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN3_CONS]          =  {
+			19, IPA_v3_5_1_GROUP_UL_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_HSIC4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB4_CONS]           = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_WLAN4_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_HSIC5_CONS]          = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_USB_CONS]            = {
+			17, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_USB_DPL_CONS]        = {
+			11, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_A2_EMBEDDED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A2_TETHERED_CONS]    = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_A5_LAN_WAN_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_APPS_LAN_CONS]       = {
+			9, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_APPS_WAN_CONS]       = {
+			10, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_ODU_EMB_CONS]        = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_ODU_TETH_CONS]       = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MHI_CONS]            = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_LAN_CONS]         = {
+			13, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_Q6_WAN_CONS]         = {
+			12, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_Q6_DUN_CONS]         = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP_CONS]		= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_CONS]		= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
+	/* Only for test purpose */
+	[IPA_3_5_1][IPA_CLIENT_TEST_CONS]           = {
+			17, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST1_CONS]          = {
+			17, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST2_CONS]          = {
+			18, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST3_CONS]          = {
+			19, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+	[IPA_3_5_1][IPA_CLIENT_TEST4_CONS]          = {
+			11, IPA_v3_5_1_GROUP_UL_DL,
+			false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR},
+};
+
+enum ipa_ees {
+	IPA_EE_AP = 0,
+	IPA_EE_Q6 = 1,
+	IPA_EE_UC = 3,
+};
+
+static struct ipa_gsi_ep_config
+	ipa_gsi_ep_info[IPA_VER_MAX][IPA3_MAX_NUM_PIPES] = {
+		/* IPA_3_0 - valid also for IPAv3.1 */
+	[IPA_3_0] = {
+	/* {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
+		{0, 0, 8, 16, IPA_EE_AP},
+		{1, 3, 8, 16, IPA_EE_AP},
+		{3, 5, 16, 32, IPA_EE_AP},
+		{4, 9, 4, 4, IPA_EE_Q6},
+		{5, 0, 16, 32, IPA_EE_Q6},
+		{6, 1, 18, 28, IPA_EE_Q6},
+		{7, 2, 0, 0, IPA_EE_Q6},
+		{8, 3, 0, 0, IPA_EE_Q6},
+		{9, 4, 8, 12, IPA_EE_Q6},
+		{10, 1, 8, 16, IPA_EE_UC},
+		{12, 9, 8, 16, IPA_EE_AP},
+		{13, 10, 8, 16, IPA_EE_AP},
+		{14, 11, 8, 16, IPA_EE_AP},
+		{15, 7, 8, 12, IPA_EE_AP},
+		{16, 8, 8, 12, IPA_EE_AP},
+		{17, 2, 8, 12, IPA_EE_AP},
+		{18, 5, 8, 12, IPA_EE_Q6},
+		{19, 6, 8, 12, IPA_EE_Q6},
+		{21, 8, 4, 4, IPA_EE_Q6},
+		{22, 6, 18, 28, IPA_EE_AP},
+		{23, 1, 8, 8, IPA_EE_AP},
+		{25, 4, 8, 8, IPA_EE_UC},
+		{26, 12, 8, 8, IPA_EE_AP},
+		{27, 4, 8, 8, IPA_EE_AP},
+		{28, 13, 8, 8, IPA_EE_AP},
+		{29, 14, 8, 8, IPA_EE_AP},
+		{30, 7, 4, 4, IPA_EE_Q6},
+		{-1, -1, -1, -1, -1}
+	},
+	[IPA_3_5] = {
+	/* {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
+		{0, 7, 8, 16, IPA_EE_AP},
+		{1, 0, 8, 16, IPA_EE_UC},
+		{2, 3, 16, 32, IPA_EE_AP},
+		{3, 0, 16, 32, IPA_EE_Q6},
+		{4, 1, 20, 23, IPA_EE_Q6},
+		{5, 4, 20, 23, IPA_EE_AP},
+		{6, 4, 12, 30, IPA_EE_Q6},
+		{7, 1, 8, 16, IPA_EE_UC},
+		{8, 9, 8, 16, IPA_EE_AP},
+		{9, 5, 8, 12, IPA_EE_AP},
+		{10, 6, 8, 12, IPA_EE_AP},
+		{11, 2, 4, 6, IPA_EE_AP},
+		{12, 2, 8, 12, IPA_EE_Q6},
+		{13, 3, 8, 12, IPA_EE_Q6},
+		{14, 10, 4, 6, IPA_EE_AP},
+		{15, 2, 8, 8, IPA_EE_UC},
+		{16, 3, 8, 8, IPA_EE_UC},
+		{17, 11, 8, 8, IPA_EE_AP},
+		{18, 12, 8, 8, IPA_EE_AP},
+		{19, 13, 8, 8, IPA_EE_AP},
+		{-1, -1, -1, -1, -1}
+	},
+	[IPA_3_5_1] = {
+	/* {ipa_ep_num, ipa_gsi_chan_num, ipa_if_tlv, ipa_if_aos, ee} */
+		{0, 0, 8, 16, IPA_EE_AP},
+		{1, 0, 8, 16, IPA_EE_UC},
+		{2, 3, 16, 32, IPA_EE_AP},
+		{3, 0, 16, 32, IPA_EE_Q6},
+		{4, 1, 20, 23, IPA_EE_Q6},
+		{5, 4, 20, 23, IPA_EE_AP},
+		{6, 4, 12, 30, IPA_EE_Q6},
+		{7, 1, 8, 16, IPA_EE_UC},
+		{8, 7, 8, 16, IPA_EE_AP},
+		{9, 5, 8, 12, IPA_EE_AP},
+		{10, 6, 8, 12, IPA_EE_AP},
+		{11, 2, 4, 6, IPA_EE_AP},
+		{12, 2, 8, 12, IPA_EE_Q6},
+		{13, 3, 8, 12, IPA_EE_Q6},
+		{14, 5, 8, 8, IPA_EE_Q6},
+		{15, 2, 8, 8, IPA_EE_UC},
+		{16, 3, 8, 8, IPA_EE_UC},
+		{17, 8, 8, 8, IPA_EE_AP},
+		{18, 9, 8, 8, IPA_EE_AP},
+		{19, 10, 8, 8, IPA_EE_AP},
+		{-1, -1, -1, -1, -1}
+	},
+};
+
+static struct msm_bus_vectors ipa_init_vectors_v3_0[]  = {
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 0,
+		.ib = 0,
+	},
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 0,
+		.ib = 0,
+	},
+};
+
+static struct msm_bus_vectors ipa_nominal_perf_vectors_v3_0[]  = {
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab = 100000000,
+		.ib = 1300000000,
+	},
+	{
+		.src = MSM_BUS_MASTER_IPA,
+		.dst = MSM_BUS_SLAVE_OCIMEM,
+		.ab = 100000000,
+		.ib = 1300000000,
+	},
+};
+
+static struct msm_bus_paths ipa_usecases_v3_0[]  = {
+	{
+		ARRAY_SIZE(ipa_init_vectors_v3_0),
+		ipa_init_vectors_v3_0,
+	},
+	{
+		ARRAY_SIZE(ipa_nominal_perf_vectors_v3_0),
+		ipa_nominal_perf_vectors_v3_0,
+	},
+};
+
+static struct msm_bus_scale_pdata ipa_bus_client_pdata_v3_0 = {
+	ipa_usecases_v3_0,
+	ARRAY_SIZE(ipa_usecases_v3_0),
+	.name = "ipa",
+};
+
+void ipa3_active_clients_lock(void)
+{
+	unsigned long flags;
+
+	mutex_lock(&ipa3_ctx->ipa3_active_clients.mutex);
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+	ipa3_ctx->ipa3_active_clients.mutex_locked = true;
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+}
+
+int ipa3_active_clients_trylock(unsigned long *flags)
+{
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
+	if (ipa3_ctx->ipa3_active_clients.mutex_locked) {
+		spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock,
+					 *flags);
+		return 0;
+	}
+
+	return 1;
+}
+
+void ipa3_active_clients_trylock_unlock(unsigned long *flags)
+{
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, *flags);
+}
+
+void ipa3_active_clients_unlock(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+	ipa3_ctx->ipa3_active_clients.mutex_locked = false;
+	spin_unlock_irqrestore(&ipa3_ctx->ipa3_active_clients.spinlock, flags);
+	mutex_unlock(&ipa3_ctx->ipa3_active_clients.mutex);
+}
+
+/**
+ * ipa3_get_clients_from_rm_resource() - get IPA clients which are related to an
+ * IPA_RM resource
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ * @clients: [OUT] Empty array which will contain the list of clients. The
+ *         caller must initialize this array.
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_get_clients_from_rm_resource(
+	enum ipa_rm_resource_name resource,
+	struct ipa3_client_names *clients)
+{
+	int i = 0;
+
+	if (resource < 0 ||
+	    resource >= IPA_RM_RESOURCE_MAX ||
+	    !clients) {
+		IPAERR("Bad parameters\n");
+		return -EINVAL;
+	}
+
+	switch (resource) {
+	case IPA_RM_RESOURCE_USB_CONS:
+		clients->names[i++] = IPA_CLIENT_USB_CONS;
+		break;
+	case IPA_RM_RESOURCE_USB_DPL_CONS:
+		clients->names[i++] = IPA_CLIENT_USB_DPL_CONS;
+		break;
+	case IPA_RM_RESOURCE_HSIC_CONS:
+		clients->names[i++] = IPA_CLIENT_HSIC1_CONS;
+		break;
+	case IPA_RM_RESOURCE_WLAN_CONS:
+		clients->names[i++] = IPA_CLIENT_WLAN1_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN2_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN3_CONS;
+		clients->names[i++] = IPA_CLIENT_WLAN4_CONS;
+		break;
+	case IPA_RM_RESOURCE_MHI_CONS:
+		clients->names[i++] = IPA_CLIENT_MHI_CONS;
+		break;
+	case IPA_RM_RESOURCE_USB_PROD:
+		clients->names[i++] = IPA_CLIENT_USB_PROD;
+		break;
+	case IPA_RM_RESOURCE_HSIC_PROD:
+		clients->names[i++] = IPA_CLIENT_HSIC1_PROD;
+		break;
+	case IPA_RM_RESOURCE_MHI_PROD:
+		clients->names[i++] = IPA_CLIENT_MHI_PROD;
+		break;
+	default:
+		break;
+	}
+	clients->length = i;
+
+	return 0;
+}
+
+/**
+ * ipa3_should_pipe_be_suspended() - returns true when the client's pipe should
+ * be suspended during a power save scenario. False otherwise.
+ *
+ * @client: [IN] IPA client
+ */
+bool ipa3_should_pipe_be_suspended(enum ipa_client_type client)
+{
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		WARN_ON(1);
+		return false;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+
+	if (ep->keep_ipa_awake)
+		return false;
+
+	if (client == IPA_CLIENT_USB_CONS     ||
+	    client == IPA_CLIENT_USB_DPL_CONS ||
+	    client == IPA_CLIENT_MHI_CONS     ||
+	    client == IPA_CLIENT_HSIC1_CONS   ||
+	    client == IPA_CLIENT_WLAN1_CONS   ||
+	    client == IPA_CLIENT_WLAN2_CONS   ||
+	    client == IPA_CLIENT_WLAN3_CONS   ||
+	    client == IPA_CLIENT_WLAN4_CONS)
+		return true;
+
+	return false;
+}
+
+/**
+ * ipa3_suspend_resource_sync() - suspend client endpoints related to the IPA_RM
+ * resource and decrement active clients counter, which may result in clock
+ * gating of IPA clocks.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_suspend_resource_sync(enum ipa_rm_resource_name resource)
+{
+	struct ipa3_client_names clients;
+	int res;
+	int index;
+	struct ipa_ep_cfg_ctrl suspend;
+	enum ipa_client_type client;
+	int ipa_ep_idx;
+	bool pipe_suspended = false;
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa3_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR("Bad params.\n");
+		return res;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		ipa3_ctx->resume_on_connect[client] = false;
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa3_should_pipe_be_suspended(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				/* suspend endpoint */
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = true;
+				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+				pipe_suspended = true;
+			}
+		}
+	}
+	/* Sleep ~1 msec */
+	if (pipe_suspended)
+		usleep_range(1000, 2000);
+
+	/* before gating IPA clocks do TAG process */
+	ipa3_ctx->tag_process_before_gating = true;
+	IPA_ACTIVE_CLIENTS_DEC_RESOURCE(ipa_rm_resource_str(resource));
+
+	return 0;
+}
+
+/**
+ * ipa3_suspend_resource_no_block() - suspend client endpoints related to the
+ * IPA_RM resource and decrement active clients counter. This function is
+ * guaranteed to avoid sleeping.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_suspend_resource_no_block(enum ipa_rm_resource_name resource)
+{
+	int res;
+	struct ipa3_client_names clients;
+	int index;
+	enum ipa_client_type client;
+	struct ipa_ep_cfg_ctrl suspend;
+	int ipa_ep_idx;
+	unsigned long flags;
+	struct ipa_active_client_logging_info log_info;
+
+	if (ipa3_active_clients_trylock(&flags) == 0)
+		return -EPERM;
+	if (ipa3_ctx->ipa3_active_clients.cnt == 1) {
+		res = -EPERM;
+		goto bail;
+	}
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa3_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR(
+			"ipa3_get_clients_from_rm_resource() failed, name = %d.\n",
+			resource);
+		goto bail;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		ipa3_ctx->resume_on_connect[client] = false;
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa3_should_pipe_be_suspended(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				/* suspend endpoint */
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = true;
+				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+			}
+		}
+	}
+
+	if (res == 0) {
+		IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info,
+				ipa_rm_resource_str(resource));
+		ipa3_active_clients_log_dec(&log_info, true);
+		ipa3_ctx->ipa3_active_clients.cnt--;
+		IPADBG("active clients = %d\n",
+		       ipa3_ctx->ipa3_active_clients.cnt);
+	}
+bail:
+	ipa3_active_clients_trylock_unlock(&flags);
+
+	return res;
+}
+
+/**
+ * ipa3_resume_resource() - resume client endpoints related to the IPA_RM
+ * resource.
+ *
+ * @resource: [IN] IPA Resource Manager resource
+ *
+ * Return codes: 0 on success, negative on failure.
+ */
+int ipa3_resume_resource(enum ipa_rm_resource_name resource)
+{
+
+	struct ipa3_client_names clients;
+	int res;
+	int index;
+	struct ipa_ep_cfg_ctrl suspend;
+	enum ipa_client_type client;
+	int ipa_ep_idx;
+
+	memset(&clients, 0, sizeof(clients));
+	res = ipa3_get_clients_from_rm_resource(resource, &clients);
+	if (res) {
+		IPAERR("ipa3_get_clients_from_rm_resource() failed.\n");
+		return res;
+	}
+
+	for (index = 0; index < clients.length; index++) {
+		client = clients.names[index];
+		ipa_ep_idx = ipa3_get_ep_mapping(client);
+		if (ipa_ep_idx == -1) {
+			IPAERR("Invalid client.\n");
+			res = -EINVAL;
+			continue;
+		}
+		/*
+		 * The related ep, will be resumed on connect
+		 * while its resource is granted
+		 */
+		ipa3_ctx->resume_on_connect[client] = true;
+		IPADBG("%d will be resumed on connect.\n", client);
+		if (ipa3_ctx->ep[ipa_ep_idx].client == client &&
+		    ipa3_should_pipe_be_suspended(client)) {
+			if (ipa3_ctx->ep[ipa_ep_idx].valid) {
+				memset(&suspend, 0, sizeof(suspend));
+				suspend.ipa_ep_suspend = false;
+				ipa3_cfg_ep_ctrl(ipa_ep_idx, &suspend);
+			}
+		}
+	}
+
+	return res;
+}
+
+/**
+ * _ipa_sram_settings_read_v3_0() - Read SRAM settings from HW
+ *
+ * Returns:	None
+ */
+void _ipa_sram_settings_read_v3_0(void)
+{
+	struct ipahal_reg_shared_mem_size smem_sz;
+
+	memset(&smem_sz, 0, sizeof(smem_sz));
+
+	ipahal_read_reg_fields(IPA_SHARED_MEM_SIZE, &smem_sz);
+
+	ipa3_ctx->smem_restricted_bytes = smem_sz.shared_mem_baddr;
+	ipa3_ctx->smem_sz = smem_sz.shared_mem_sz;
+
+	/* reg fields are in 8B units */
+	ipa3_ctx->smem_restricted_bytes *= 8;
+	ipa3_ctx->smem_sz *= 8;
+	ipa3_ctx->smem_reqd_sz = IPA_MEM_PART(end_ofst);
+	ipa3_ctx->hdr_tbl_lcl = 0;
+	ipa3_ctx->hdr_proc_ctx_tbl_lcl = 1;
+
+	/*
+	 * when proc ctx table is located in internal memory,
+	 * modem entries resides first.
+	 */
+	if (ipa3_ctx->hdr_proc_ctx_tbl_lcl) {
+		ipa3_ctx->hdr_proc_ctx_tbl.start_offset =
+			IPA_MEM_PART(modem_hdr_proc_ctx_size);
+	}
+	ipa3_ctx->ip4_rt_tbl_hash_lcl = 0;
+	ipa3_ctx->ip4_rt_tbl_nhash_lcl = 0;
+	ipa3_ctx->ip6_rt_tbl_hash_lcl = 0;
+	ipa3_ctx->ip6_rt_tbl_nhash_lcl = 0;
+	ipa3_ctx->ip4_flt_tbl_hash_lcl = 0;
+	ipa3_ctx->ip4_flt_tbl_nhash_lcl = 0;
+	ipa3_ctx->ip6_flt_tbl_hash_lcl = 0;
+	ipa3_ctx->ip6_flt_tbl_nhash_lcl = 0;
+}
+
+/**
+ * ipa3_cfg_route() - configure IPA route
+ * @route: IPA route
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_route(struct ipahal_reg_route *route)
+{
+
+	IPADBG("disable_route_block=%d, default_pipe=%d, default_hdr_tbl=%d\n",
+		route->route_dis,
+		route->route_def_pipe,
+		route->route_def_hdr_table);
+	IPADBG("default_hdr_ofst=%d, default_frag_pipe=%d\n",
+		route->route_def_hdr_ofst,
+		route->route_frag_def_pipe);
+
+	IPADBG("default_retain_hdr=%d\n",
+		route->route_def_retain_hdr);
+
+	if (route->route_dis) {
+		IPAERR("Route disable is not supported!\n");
+		return -EPERM;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+
+	ipahal_write_reg_fields(IPA_ROUTE, route);
+
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_filter() - configure filter
+ * @disable: disable value
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_cfg_filter(u32 disable)
+{
+	IPAERR("Filter disable is not supported!\n");
+	return -EPERM;
+}
+
+/**
+ * ipa3_cfg_qsb() - Configure IPA QSB maximal reads and writes
+ *
+ * Returns:	None
+ */
+void ipa3_cfg_qsb(void)
+{
+	int qsb_max_writes[2] = { 8, 2 };
+	int qsb_max_reads[2] = { 8, 8 };
+
+	ipahal_write_reg_fields(IPA_QSB_MAX_WRITES, qsb_max_writes);
+	ipahal_write_reg_fields(IPA_QSB_MAX_READS, qsb_max_reads);
+}
+
+/**
+ * ipa3_init_hw() - initialize HW
+ *
+ * Return codes:
+ * 0: success
+ */
+int ipa3_init_hw(void)
+{
+	u32 ipa_version = 0;
+	u32 val;
+
+	/* Read IPA version and make sure we have access to the registers */
+	ipa_version = ipahal_read_reg(IPA_VERSION);
+	if (ipa_version == 0)
+		return -EFAULT;
+
+	switch (ipa3_ctx->ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+		val = IPA_BCR_REG_VAL_v3_0;
+		break;
+	case IPA_HW_v3_5:
+	case IPA_HW_v3_5_1:
+		val = IPA_BCR_REG_VAL_v3_5;
+		break;
+	default:
+		IPAERR("unknown HW type in dts\n");
+		return -EFAULT;
+	}
+
+	ipahal_write_reg(IPA_BCR, val);
+
+	ipa3_cfg_qsb();
+
+	return 0;
+}
+
+/**
+ * ipa3_get_hw_type_index() - Get HW type index which is used as the entry index
+ *	for ep\resource groups related arrays .
+ *
+ * Return value: HW type index
+ */
+u8 ipa3_get_hw_type_index(void)
+{
+	u8 hw_type_index;
+
+	switch (ipa3_ctx->ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_1:
+		hw_type_index = IPA_3_0;
+		break;
+	case IPA_HW_v3_5:
+		hw_type_index = IPA_3_5;
+		break;
+	case IPA_HW_v3_5_1:
+		hw_type_index = IPA_3_5_1;
+		break;
+	default:
+		IPAERR("Incorrect IPA version %d\n", ipa3_ctx->ipa_hw_type);
+		hw_type_index = IPA_3_0;
+		break;
+	}
+
+	return hw_type_index;
+}
+
+/**
+ * ipa3_get_ep_mapping() - provide endpoint mapping
+ * @client: client type
+ *
+ * Return value: endpoint mapping
+ */
+int ipa3_get_ep_mapping(enum ipa_client_type client)
+{
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR("Bad client number! client =%d\n", client);
+		return -EINVAL;
+	}
+
+	return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].pipe_num;
+}
+
+/**
+ * ipa3_get_gsi_ep_info() - provide gsi ep information
+ * @ipa_ep_idx: IPA endpoint index
+ *
+ * Return value: pointer to ipa_gsi_ep_info
+ */
+struct ipa_gsi_ep_config *ipa3_get_gsi_ep_info(int ipa_ep_idx)
+{
+	int i;
+	u8 hw_index;
+
+	hw_index = ipa3_get_hw_type_index();
+
+	for (i = 0; ; i++) {
+		if (ipa_gsi_ep_info[hw_index][i].ipa_ep_num < 0)
+			break;
+
+		if (ipa_gsi_ep_info[hw_index][i].ipa_ep_num ==
+			ipa_ep_idx)
+			return &(ipa_gsi_ep_info[hw_index][i]);
+	}
+
+	return NULL;
+}
+
+/**
+ * ipa_get_ep_group() - provide endpoint group by client
+ * @client: client type
+ *
+ * Return value: endpoint group
+ */
+int ipa_get_ep_group(enum ipa_client_type client)
+{
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR("Bad client number! client =%d\n", client);
+		return -EINVAL;
+	}
+
+	return ipa3_ep_mapping[ipa3_get_hw_type_index()][client].group_num;
+}
+
+/**
+ * ipa3_get_qmb_master_sel() - provide QMB master selection for the client
+ * @client: client type
+ *
+ * Return value: QMB master index
+ */
+u8 ipa3_get_qmb_master_sel(enum ipa_client_type client)
+{
+	if (client >= IPA_CLIENT_MAX || client < 0) {
+		IPAERR("Bad client number! client =%d\n", client);
+		return -EINVAL;
+	}
+
+	return ipa3_ep_mapping[ipa3_get_hw_type_index()]
+		[client].qmb_master_sel;
+}
+
+/* ipa3_set_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+
+void ipa3_set_client(int index, enum ipacm_client_enum client, bool uplink)
+{
+	if (client >= IPACM_CLIENT_MAX || client < IPACM_CLIENT_USB) {
+		IPAERR("Bad client number! client =%d\n", client);
+	} else if (index >= IPA3_MAX_NUM_PIPES || index < 0) {
+		IPAERR("Bad pipe index! index =%d\n", index);
+	} else {
+		ipa3_ctx->ipacm_client[index].client_enum = client;
+		ipa3_ctx->ipacm_client[index].uplink = uplink;
+	}
+}
+
+/**
+ * ipa3_get_client() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+enum ipacm_client_enum ipa3_get_client(int pipe_idx)
+{
+	if (pipe_idx >= IPA3_MAX_NUM_PIPES || pipe_idx < 0) {
+		IPAERR("Bad pipe index! pipe_idx =%d\n", pipe_idx);
+		return IPACM_CLIENT_MAX;
+	} else {
+		return ipa3_ctx->ipacm_client[pipe_idx].client_enum;
+	}
+}
+
+/**
+ * ipa2_get_client_uplink() - provide client mapping
+ * @client: client type
+ *
+ * Return value: none
+ */
+bool ipa3_get_client_uplink(int pipe_idx)
+{
+	return ipa3_ctx->ipacm_client[pipe_idx].uplink;
+}
+
+/**
+ * ipa3_get_rm_resource_from_ep() - get the IPA_RM resource which is related to
+ * the supplied pipe index.
+ *
+ * @pipe_idx:
+ *
+ * Return value: IPA_RM resource related to the pipe, -1 if a resource was not
+ * found.
+ */
+enum ipa_rm_resource_name ipa3_get_rm_resource_from_ep(int pipe_idx)
+{
+	int i;
+	int j;
+	enum ipa_client_type client;
+	struct ipa3_client_names clients;
+	bool found = false;
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return -EINVAL;
+	}
+
+	client = ipa3_ctx->ep[pipe_idx].client;
+
+	for (i = 0; i < IPA_RM_RESOURCE_MAX; i++) {
+		memset(&clients, 0, sizeof(clients));
+		ipa3_get_clients_from_rm_resource(i, &clients);
+		for (j = 0; j < clients.length; j++) {
+			if (clients.names[j] == client) {
+				found = true;
+				break;
+			}
+		}
+		if (found)
+			break;
+	}
+
+	if (!found)
+		return -EFAULT;
+
+	return i;
+}
+
+/**
+ * ipa3_get_client_mapping() - provide client mapping
+ * @pipe_idx: IPA end-point number
+ *
+ * Return value: client mapping
+ */
+enum ipa_client_type ipa3_get_client_mapping(int pipe_idx)
+{
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return -EINVAL;
+	}
+
+	return ipa3_ctx->ep[pipe_idx].client;
+}
+
+/**
+ * ipa_init_ep_flt_bitmap() - Initialize the bitmap
+ * that represents the End-points that supports filtering
+ */
+void ipa_init_ep_flt_bitmap(void)
+{
+	enum ipa_client_type cl;
+	u8 hw_type_idx = ipa3_get_hw_type_index();
+	u32 bitmap;
+
+	bitmap = 0;
+
+	BUG_ON(ipa3_ctx->ep_flt_bitmap);
+
+	for (cl = 0; cl < IPA_CLIENT_MAX ; cl++) {
+		if (ipa3_ep_mapping[hw_type_idx][cl].support_flt) {
+			bitmap |=
+				(1U<<ipa3_ep_mapping[hw_type_idx][cl].pipe_num);
+			if (bitmap != ipa3_ctx->ep_flt_bitmap) {
+				ipa3_ctx->ep_flt_bitmap = bitmap;
+				ipa3_ctx->ep_flt_num++;
+			}
+		}
+	}
+}
+
+/**
+ * ipa_is_ep_support_flt() - Given an End-point check
+ * whether it supports filtering or not.
+ *
+ * @pipe_idx:
+ *
+ * Return values:
+ * true if supports and false if not
+ */
+bool ipa_is_ep_support_flt(int pipe_idx)
+{
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return false;
+	}
+
+	return ipa3_ctx->ep_flt_bitmap & (1U<<pipe_idx);
+}
+
+/**
+ * ipa3_cfg_ep_seq() - IPA end-point HPS/DPS sequencer type configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_seq(u32 clnt_hdl, const struct ipa_ep_cfg_seq *seq_cfg)
+{
+	int type;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad param, clnt_hdl = %d", clnt_hdl);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("SEQ does not apply to IPA consumer EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	/*
+	 * Skip Configure sequencers type for test clients.
+	 * These are configured dynamically in ipa3_cfg_ep_mode
+	 */
+	if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPADBG("Skip sequencers configuration for test clients\n");
+		return 0;
+	}
+
+	if (seq_cfg->set_dynamic)
+		type = seq_cfg->seq_type;
+	else
+		type = ipa3_ep_mapping[ipa3_get_hw_type_index()]
+			[ipa3_ctx->ep[clnt_hdl].client].sequencer_type;
+
+	if (type != IPA_DPS_HPS_SEQ_TYPE_INVALID) {
+		if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA &&
+			!IPA_DPS_HPS_SEQ_TYPE_IS_DMA(type)) {
+			IPAERR("Configuring non-DMA SEQ type to DMA pipe\n");
+			BUG();
+		}
+		IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+		/* Configure sequencers type*/
+
+		IPADBG("set sequencers to sequence 0x%x, ep = %d\n", type,
+				clnt_hdl);
+		ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
+
+		IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+	} else {
+		IPADBG("should not set sequencer type of ep = %d\n", clnt_hdl);
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep - IPA end-point configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * This includes nat, header, mode, aggregation and route settings and is a one
+ * shot API to configure the IPA end-point fully
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	int result = -EINVAL;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ipa_ep_cfg == NULL) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	result = ipa3_cfg_ep_hdr(clnt_hdl, &ipa_ep_cfg->hdr);
+	if (result)
+		return result;
+
+	result = ipa3_cfg_ep_hdr_ext(clnt_hdl, &ipa_ep_cfg->hdr_ext);
+	if (result)
+		return result;
+
+	result = ipa3_cfg_ep_aggr(clnt_hdl, &ipa_ep_cfg->aggr);
+	if (result)
+		return result;
+
+	result = ipa3_cfg_ep_cfg(clnt_hdl, &ipa_ep_cfg->cfg);
+	if (result)
+		return result;
+
+	if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
+		result = ipa3_cfg_ep_nat(clnt_hdl, &ipa_ep_cfg->nat);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_mode(clnt_hdl, &ipa_ep_cfg->mode);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_seq(clnt_hdl, &ipa_ep_cfg->seq);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_route(clnt_hdl, &ipa_ep_cfg->route);
+		if (result)
+			return result;
+
+		result = ipa3_cfg_ep_deaggr(clnt_hdl, &ipa_ep_cfg->deaggr);
+		if (result)
+			return result;
+	} else {
+		result = ipa3_cfg_ep_metadata_mask(clnt_hdl,
+				&ipa_ep_cfg->metadata_mask);
+		if (result)
+			return result;
+	}
+
+	return 0;
+}
+
+const char *ipa3_get_nat_en_str(enum ipa_nat_en_type nat_en)
+{
+	switch (nat_en) {
+	case (IPA_BYPASS_NAT):
+		return "NAT disabled";
+	case (IPA_SRC_NAT):
+		return "Source NAT";
+	case (IPA_DST_NAT):
+		return "Dst NAT";
+	}
+
+	return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_nat() - IPA end-point NAT configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ep_nat)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_nat == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("NAT does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, nat_en=%d(%s)\n",
+			clnt_hdl,
+			ep_nat->nat_en,
+			ipa3_get_nat_en_str(ep_nat->nat_en));
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.nat = *ep_nat;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_NAT_n, clnt_hdl, ep_nat);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+
+/**
+ * ipa3_cfg_ep_status() - IPA end-point status configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_status(u32 clnt_hdl,
+	const struct ipahal_reg_ep_cfg_status *ep_status)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_status == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, status_en=%d status_ep=%d status_location=%d\n",
+			clnt_hdl,
+			ep_status->status_en,
+			ep_status->status_ep,
+			ep_status->status_location);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].status = *ep_status;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_STATUS_n, clnt_hdl, ep_status);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_cfg() - IPA end-point cfg configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *cfg)
+{
+	u8 qmb_master_sel;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || cfg == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.cfg = *cfg;
+
+	/* Override QMB master selection */
+	qmb_master_sel = ipa3_get_qmb_master_sel(ipa3_ctx->ep[clnt_hdl].client);
+	ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel = qmb_master_sel;
+	IPADBG(
+	       "pipe=%d, frag_ofld_en=%d cs_ofld_en=%d mdata_hdr_ofst=%d gen_qmb_master_sel=%d\n",
+			clnt_hdl,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.frag_offload_en,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_offload_en,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.cs_metadata_hdr_offset,
+			ipa3_ctx->ep[clnt_hdl].cfg.cfg.gen_qmb_master_sel);
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_CFG_n, clnt_hdl,
+				  &ipa3_ctx->ep[clnt_hdl].cfg.cfg);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_metadata_mask() - IPA end-point meta-data mask configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_metadata_mask(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask
+		*metadata_mask)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || metadata_mask == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl,
+					ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, metadata_mask=0x%x\n",
+			clnt_hdl,
+			metadata_mask->metadata_mask);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.metadata_mask = *metadata_mask;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+		clnt_hdl, metadata_mask);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_hdr() -  IPA end-point header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ep_hdr)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+	IPADBG("pipe=%d metadata_reg_valid=%d\n",
+		clnt_hdl,
+		ep_hdr->hdr_metadata_reg_valid);
+
+	IPADBG("remove_additional=%d, a5_mux=%d, ofst_pkt_size=0x%x\n",
+		ep_hdr->hdr_remove_additional,
+		ep_hdr->hdr_a5_mux,
+		ep_hdr->hdr_ofst_pkt_size);
+
+	IPADBG("ofst_pkt_size_valid=%d, additional_const_len=0x%x\n",
+		ep_hdr->hdr_ofst_pkt_size_valid,
+		ep_hdr->hdr_additional_const_len);
+
+	IPADBG("ofst_metadata=0x%x, ofst_metadata_valid=%d, len=0x%x",
+		ep_hdr->hdr_ofst_metadata,
+		ep_hdr->hdr_ofst_metadata_valid,
+		ep_hdr->hdr_len);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr = *ep_hdr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl, &ep->cfg.hdr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_hdr_ext() -  IPA end-point extended header configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_hdr_ext:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_hdr_ext(u32 clnt_hdl,
+		       const struct ipa_ep_cfg_hdr_ext *ep_hdr_ext)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_hdr_ext == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d hdr_pad_to_alignment=%d\n",
+		clnt_hdl,
+		ep_hdr_ext->hdr_pad_to_alignment);
+
+	IPADBG("hdr_total_len_or_pad_offset=%d\n",
+		ep_hdr_ext->hdr_total_len_or_pad_offset);
+
+	IPADBG("hdr_payload_len_inc_padding=%d hdr_total_len_or_pad=%d\n",
+		ep_hdr_ext->hdr_payload_len_inc_padding,
+		ep_hdr_ext->hdr_total_len_or_pad);
+
+	IPADBG("hdr_total_len_or_pad_valid=%d hdr_little_endian=%d\n",
+		ep_hdr_ext->hdr_total_len_or_pad_valid,
+		ep_hdr_ext->hdr_little_endian);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.hdr_ext = *ep_hdr_ext;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_EXT_n, clnt_hdl,
+		&ep->cfg.hdr_ext);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_ctrl() -  IPA end-point Control configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg_ctrl:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes || ep_ctrl == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d ep_suspend=%d, ep_delay=%d\n",
+		clnt_hdl,
+		ep_ctrl->ipa_ep_suspend,
+		ep_ctrl->ipa_ep_delay);
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_CTRL_n, clnt_hdl, ep_ctrl);
+
+	if (ep_ctrl->ipa_ep_suspend == true &&
+			IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client))
+		ipa3_suspend_active_aggr_wa(clnt_hdl);
+
+	return 0;
+}
+
+const char *ipa3_get_mode_type_str(enum ipa_mode_type mode)
+{
+	switch (mode) {
+	case (IPA_BASIC):
+		return "Basic";
+	case (IPA_ENABLE_FRAMING_HDLC):
+		return "HDLC framing";
+	case (IPA_ENABLE_DEFRAMING_HDLC):
+		return "HDLC de-framing";
+	case (IPA_DMA):
+		return "DMA";
+	}
+
+	return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_mode() - IPA end-point mode configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ep_mode)
+{
+	int ep;
+	int type;
+	struct ipahal_reg_endp_init_mode init_mode;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_mode == NULL) {
+		IPAERR("bad params clnt_hdl=%d , ep_valid=%d ep_mode=%p\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid,
+				ep_mode);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("MODE does not apply to IPA out EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ep = ipa3_get_ep_mapping(ep_mode->dst);
+	if (ep == -1 && ep_mode->mode == IPA_DMA) {
+		IPAERR("dst %d does not exist in DMA mode\n", ep_mode->dst);
+		return -EINVAL;
+	}
+
+	WARN_ON(ep_mode->mode == IPA_DMA && IPA_CLIENT_IS_PROD(ep_mode->dst));
+
+	if (!IPA_CLIENT_IS_CONS(ep_mode->dst))
+		ep = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+
+	IPADBG("pipe=%d mode=%d(%s), dst_client_number=%d",
+			clnt_hdl,
+			ep_mode->mode,
+			ipa3_get_mode_type_str(ep_mode->mode),
+			ep_mode->dst);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.mode = *ep_mode;
+	ipa3_ctx->ep[clnt_hdl].dst_pipe_index = ep;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	init_mode.dst_pipe_number = ipa3_ctx->ep[clnt_hdl].dst_pipe_index;
+	init_mode.ep_mode = *ep_mode;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_MODE_n, clnt_hdl, &init_mode);
+
+	 /* Configure sequencers type for test clients*/
+	if (IPA_CLIENT_IS_TEST(ipa3_ctx->ep[clnt_hdl].client)) {
+		if (ep_mode->mode == IPA_DMA)
+			type = IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY;
+		else
+			type = IPA_DPS_HPS_SEQ_TYPE_PKT_PROCESS_NO_DEC_UCP;
+
+		IPADBG(" set sequencers to sequance 0x%x, ep = %d\n", type,
+				clnt_hdl);
+		ipahal_write_reg_n(IPA_ENDP_INIT_SEQ_n, clnt_hdl, type);
+	}
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+const char *ipa3_get_aggr_enable_str(enum ipa_aggr_en_type aggr_en)
+{
+	switch (aggr_en) {
+	case (IPA_BYPASS_AGGR):
+			return "no aggregation";
+	case (IPA_ENABLE_AGGR):
+			return "aggregation enabled";
+	case (IPA_ENABLE_DEAGGR):
+		return "de-aggregation enabled";
+	}
+
+	return "undefined";
+}
+
+const char *ipa3_get_aggr_type_str(enum ipa_aggr_type aggr_type)
+{
+	switch (aggr_type) {
+	case (IPA_MBIM_16):
+			return "MBIM_16";
+	case (IPA_HDLC):
+		return "HDLC";
+	case (IPA_TLP):
+			return "TLP";
+	case (IPA_RNDIS):
+			return "RNDIS";
+	case (IPA_GENERIC):
+			return "GENERIC";
+	case (IPA_QCMAP):
+			return "QCMAP";
+	}
+	return "undefined";
+}
+
+/**
+ * ipa3_cfg_ep_aggr() - IPA end-point aggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ep_aggr)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_aggr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+			clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (ep_aggr->aggr_en == IPA_ENABLE_DEAGGR &&
+	    !IPA_EP_SUPPORTS_DEAGGR(clnt_hdl)) {
+		IPAERR("pipe=%d cannot be configured to DEAGGR\n", clnt_hdl);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d en=%d(%s), type=%d(%s), byte_limit=%d, time_limit=%d\n",
+			clnt_hdl,
+			ep_aggr->aggr_en,
+			ipa3_get_aggr_enable_str(ep_aggr->aggr_en),
+			ep_aggr->aggr,
+			ipa3_get_aggr_type_str(ep_aggr->aggr),
+			ep_aggr->aggr_byte_limit,
+			ep_aggr->aggr_time_limit);
+	IPADBG("hard_byte_limit_en=%d aggr_sw_eof_active=%d\n",
+		ep_aggr->aggr_hard_byte_limit_en,
+		ep_aggr->aggr_sw_eof_active);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.aggr = *ep_aggr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_AGGR_n, clnt_hdl, ep_aggr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_route() - IPA end-point routing configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ep_route)
+{
+	struct ipahal_reg_endp_init_route init_rt;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_route == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+			clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_CONS(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("ROUTE does not apply to IPA out EP %d\n",
+				clnt_hdl);
+		return -EINVAL;
+	}
+
+	/*
+	 * if DMA mode was configured previously for this EP, return with
+	 * success
+	 */
+	if (ipa3_ctx->ep[clnt_hdl].cfg.mode.mode == IPA_DMA) {
+		IPADBG("DMA enabled for ep %d, dst pipe is part of DMA\n",
+				clnt_hdl);
+		return 0;
+	}
+
+	if (ep_route->rt_tbl_hdl)
+		IPAERR("client specified non-zero RT TBL hdl - ignore it\n");
+
+	IPADBG("pipe=%d, rt_tbl_hdl=%d\n",
+			clnt_hdl,
+			ep_route->rt_tbl_hdl);
+
+	/* always use "default" routing table when programming EP ROUTE reg */
+	ipa3_ctx->ep[clnt_hdl].rt_tbl_idx =
+		IPA_MEM_PART(v4_apps_rt_index_lo);
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	init_rt.route_table_index = ipa3_ctx->ep[clnt_hdl].rt_tbl_idx;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_ROUTE_n, clnt_hdl, &init_rt);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_holb() - IPA end-point holb configuration
+ *
+ * If an IPA producer pipe is full, IPA HW by default will block
+ * indefinitely till space opens up. During this time no packets
+ * including those from unrelated pipes will be processed. Enabling
+ * HOLB means IPA HW will be allowed to drop packets as/when needed
+ * and indefinite blocking is avoided.
+ *
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ep_holb)
+{
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_holb == NULL ||
+	    ep_holb->tmr_val > ipa3_ctx->ctrl->max_holb_tmr_val ||
+	    ep_holb->en > 1) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	if (IPA_CLIENT_IS_PROD(ipa3_ctx->ep[clnt_hdl].client)) {
+		IPAERR("HOLB does not apply to IPA in EP %d\n", clnt_hdl);
+		return -EINVAL;
+	}
+
+	ipa3_ctx->ep[clnt_hdl].holb = *ep_holb;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_EN_n, clnt_hdl,
+		ep_holb);
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n, clnt_hdl,
+		ep_holb);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	IPADBG("cfg holb %u ep=%d tmr=%d\n", ep_holb->en, clnt_hdl,
+				ep_holb->tmr_val);
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_holb_by_client() - IPA end-point holb configuration
+ *
+ * Wrapper function for ipa3_cfg_ep_holb() with client name instead of
+ * client handle. This function is used for clients that does not have
+ * client handle.
+ *
+ * @client:	[in] client name
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ep_holb)
+{
+	return ipa3_cfg_ep_holb(ipa3_get_ep_mapping(client), ep_holb);
+}
+
+/**
+ * ipa3_cfg_ep_deaggr() -  IPA end-point deaggregation configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ep_deaggr:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_deaggr(u32 clnt_hdl,
+			const struct ipa_ep_cfg_deaggr *ep_deaggr)
+{
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+	    ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_deaggr == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+				clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d deaggr_hdr_len=%d\n",
+		clnt_hdl,
+		ep_deaggr->deaggr_hdr_len);
+
+	IPADBG("packet_offset_valid=%d\n",
+		ep_deaggr->packet_offset_valid);
+
+	IPADBG("packet_offset_location=%d max_packet_len=%d\n",
+		ep_deaggr->packet_offset_location,
+		ep_deaggr->max_packet_len);
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	/* copy over EP cfg */
+	ep->cfg.deaggr = *ep_deaggr;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_DEAGGR_n, clnt_hdl,
+		&ep->cfg.deaggr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+/**
+ * ipa3_cfg_ep_metadata() - IPA end-point metadata configuration
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ * @ipa_ep_cfg:	[in] IPA end-point configuration params
+ *
+ * Returns:	0 on success, negative on failure
+ *
+ * Note:	Should not be called from atomic context
+ */
+int ipa3_cfg_ep_metadata(u32 clnt_hdl, const struct ipa_ep_cfg_metadata *ep_md)
+{
+	u32 qmap_id = 0;
+	struct ipa_ep_cfg_metadata ep_md_reg_wrt;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0 || ep_md == NULL) {
+		IPAERR("bad parm, clnt_hdl = %d , ep_valid = %d\n",
+					clnt_hdl, ipa3_ctx->ep[clnt_hdl].valid);
+		return -EINVAL;
+	}
+
+	IPADBG("pipe=%d, mux id=%d\n", clnt_hdl, ep_md->qmap_id);
+
+	/* copy over EP cfg */
+	ipa3_ctx->ep[clnt_hdl].cfg.meta = *ep_md;
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	ep_md_reg_wrt = *ep_md;
+	qmap_id = (ep_md->qmap_id <<
+		IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_SHFT) &
+		IPA_ENDP_INIT_HDR_METADATA_n_MUX_ID_BMASK;
+
+	ep_md_reg_wrt.qmap_id = qmap_id;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_METADATA_n, clnt_hdl,
+		&ep_md_reg_wrt);
+	ipa3_ctx->ep[clnt_hdl].cfg.hdr.hdr_metadata_reg_valid = 1;
+	ipahal_write_reg_n_fields(IPA_ENDP_INIT_HDR_n, clnt_hdl,
+		&ipa3_ctx->ep[clnt_hdl].cfg.hdr);
+
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return 0;
+}
+
+int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+	struct ipa_ep_cfg_metadata meta;
+	struct ipa3_ep_context *ep;
+	int ipa_ep_idx;
+	int result = -EINVAL;
+
+	if (param_in->client  >= IPA_CLIENT_MAX) {
+		IPAERR("bad parm client:%d\n", param_in->client);
+		goto fail;
+	}
+
+	ipa_ep_idx = ipa3_get_ep_mapping(param_in->client);
+	if (ipa_ep_idx == -1) {
+		IPAERR("Invalid client.\n");
+		goto fail;
+	}
+
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (!ep->valid) {
+		IPAERR("EP not allocated.\n");
+		goto fail;
+	}
+
+	meta.qmap_id = param_in->qmap_id;
+	if (param_in->client == IPA_CLIENT_USB_PROD ||
+	    param_in->client == IPA_CLIENT_HSIC1_PROD ||
+	    param_in->client == IPA_CLIENT_ODU_PROD) {
+		result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
+	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
+		ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
+		result = ipa3_write_qmapid_wdi_pipe(ipa_ep_idx, meta.qmap_id);
+		if (result)
+			IPAERR("qmap_id %d write failed on ep=%d\n",
+					meta.qmap_id, ipa_ep_idx);
+		result = 0;
+	}
+
+fail:
+	return result;
+}
+
+/**
+ * ipa3_dump_buff_internal() - dumps buffer for debug purposes
+ * @base: buffer base address
+ * @phy_base: buffer physical base address
+ * @size: size of the buffer
+ */
+void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size)
+{
+	int i;
+	u32 *cur = (u32 *)base;
+	u8 *byt;
+
+	IPADBG("system phys addr=%pa len=%u\n", &phy_base, size);
+	for (i = 0; i < size / 4; i++) {
+		byt = (u8 *)(cur + i);
+		IPADBG("%2d %08x   %02x %02x %02x %02x\n", i, *(cur + i),
+				byt[0], byt[1], byt[2], byt[3]);
+	}
+	IPADBG("END\n");
+}
+
+/**
+ * ipa3_pipe_mem_init() - initialize the pipe memory
+ * @start_ofst: start offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ * -ENOMEM: no memory
+ */
+int ipa3_pipe_mem_init(u32 start_ofst, u32 size)
+{
+	int res;
+	u32 aligned_start_ofst;
+	u32 aligned_size;
+	struct gen_pool *pool;
+
+	if (!size) {
+		IPAERR("no IPA pipe memory allocated\n");
+		goto fail;
+	}
+
+	aligned_start_ofst = IPA_PIPE_MEM_START_OFST_ALIGNMENT(start_ofst);
+	aligned_size = size - (aligned_start_ofst - start_ofst);
+
+	IPADBG("start_ofst=%u aligned_start_ofst=%u size=%u aligned_size=%u\n",
+	       start_ofst, aligned_start_ofst, size, aligned_size);
+
+	/* allocation order of 8 i.e. 128 bytes, global pool */
+	pool = gen_pool_create(8, -1);
+	if (!pool) {
+		IPAERR("Failed to create a new memory pool.\n");
+		goto fail;
+	}
+
+	res = gen_pool_add(pool, aligned_start_ofst, aligned_size, -1);
+	if (res) {
+		IPAERR("Failed to add memory to IPA pipe pool\n");
+		goto err_pool_add;
+	}
+
+	ipa3_ctx->pipe_mem_pool = pool;
+	return 0;
+
+err_pool_add:
+	gen_pool_destroy(pool);
+fail:
+	return -ENOMEM;
+}
+
+/**
+ * ipa3_pipe_mem_alloc() - allocate pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa3_pipe_mem_alloc(u32 *ofst, u32 size)
+{
+	u32 vaddr;
+	int res = -1;
+
+	if (!ipa3_ctx->pipe_mem_pool || !size) {
+		IPAERR("failed size=%u pipe_mem_pool=%p\n", size,
+				ipa3_ctx->pipe_mem_pool);
+		return res;
+	}
+
+	vaddr = gen_pool_alloc(ipa3_ctx->pipe_mem_pool, size);
+
+	if (vaddr) {
+		*ofst = vaddr;
+		res = 0;
+		IPADBG("size=%u ofst=%u\n", size, vaddr);
+	} else {
+		IPAERR("size=%u failed\n", size);
+	}
+
+	return res;
+}
+
+/**
+ * ipa3_pipe_mem_free() - free pipe memory
+ * @ofst: offset
+ * @size: size
+ *
+ * Return value:
+ * 0: success
+ */
+int ipa3_pipe_mem_free(u32 ofst, u32 size)
+{
+	IPADBG("size=%u ofst=%u\n", size, ofst);
+	if (ipa3_ctx->pipe_mem_pool && size)
+		gen_pool_free(ipa3_ctx->pipe_mem_pool, ofst, size);
+	return 0;
+}
+
+/**
+ * ipa3_set_aggr_mode() - Set the aggregation mode which is a global setting
+ * @mode:	[in] the desired aggregation mode for e.g. straight MBIM, QCNCM,
+ * etc
+ *
+ * Returns:	0 on success
+ */
+int ipa3_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	struct ipahal_reg_qcncm qcncm;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+	qcncm.mode_en = mode;
+	ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_set_qcncm_ndp_sig() - Set the NDP signature used for QCNCM aggregation
+ * mode
+ * @sig:	[in] the first 3 bytes of QCNCM NDP signature (expected to be
+ * "QND")
+ *
+ * Set the NDP signature used for QCNCM aggregation mode. The fourth byte
+ * (expected to be 'P') needs to be set using the header addition mechanism
+ *
+ * Returns:	0 on success, negative on failure
+ */
+int ipa3_set_qcncm_ndp_sig(char sig[3])
+{
+	struct ipahal_reg_qcncm qcncm;
+
+	if (sig == NULL) {
+		IPAERR("bad argument for ipa3_set_qcncm_ndp_sig/n");
+		return -EINVAL;
+	}
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_read_reg_fields(IPA_QCNCM, &qcncm);
+	qcncm.mode_val = ((sig[0] << 16) | (sig[1] << 8) | sig[2]);
+	ipahal_write_reg_fields(IPA_QCNCM, &qcncm);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_set_single_ndp_per_mbim() - Enable/disable single NDP per MBIM frame
+ * configuration
+ * @enable:	[in] true for single NDP/MBIM; false otherwise
+ *
+ * Returns:	0 on success
+ */
+int ipa3_set_single_ndp_per_mbim(bool enable)
+{
+	struct ipahal_reg_single_ndp_mode mode;
+
+	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+	ipahal_read_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+	mode.single_ndp_en = enable;
+	ipahal_write_reg_fields(IPA_SINGLE_NDP_MODE, &mode);
+	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+
+	return 0;
+}
+
+/**
+ * ipa3_straddle_boundary() - Checks whether a memory buffer straddles a
+ * boundary
+ * @start: start address of the memory buffer
+ * @end: end address of the memory buffer
+ * @boundary: boundary
+ *
+ * Return value:
+ * 1: if the interval [start, end] straddles boundary
+ * 0: otherwise
+ */
+int ipa3_straddle_boundary(u32 start, u32 end, u32 boundary)
+{
+	u32 next_start;
+	u32 prev_end;
+
+	IPADBG("start=%u end=%u boundary=%u\n", start, end, boundary);
+
+	next_start = (start + (boundary - 1)) & ~(boundary - 1);
+	prev_end = ((end + (boundary - 1)) & ~(boundary - 1)) - boundary;
+
+	while (next_start < prev_end)
+		next_start += boundary;
+
+	if (next_start == prev_end)
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * ipa3_bam_reg_dump() - Dump selected BAM registers for IPA.
+ * The API is right now used only to dump IPA registers towards USB.
+ *
+ * Function is rate limited to avoid flooding kernel log buffer
+ */
+void ipa3_bam_reg_dump(void)
+{
+	static DEFINE_RATELIMIT_STATE(_rs, 500*HZ, 1);
+
+	if (__ratelimit(&_rs)) {
+		IPA_ACTIVE_CLIENTS_INC_SIMPLE();
+		pr_err("IPA BAM START\n");
+		sps_get_bam_debug_info(ipa3_ctx->bam_handle, 93,
+			(SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_CONS))
+			|
+			SPS_BAM_PIPE(ipa3_get_ep_mapping(IPA_CLIENT_USB_PROD))),
+			0, 2);
+		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
+	}
+}
+
+/**
+ * ipa3_init_mem_partition() - Reads IPA memory map from DTS, performs alignment
+ * checks and logs the fetched values.
+ *
+ * Returns:	0 on success
+ */
+int ipa3_init_mem_partition(struct device_node *node)
+{
+	int result;
+
+	IPADBG("Reading from DTS as u32 array\n");
+	result = of_property_read_u32_array(node,
+		"qcom,ipa-ram-mmap", (u32 *)&ipa3_ctx->ctrl->mem_partition,
+		sizeof(ipa3_ctx->ctrl->mem_partition) / sizeof(u32));
+
+	if (result) {
+		IPAERR("Read operation failed\n");
+		return -ENODEV;
+	}
+
+	IPADBG("NAT OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(nat_ofst),
+		IPA_MEM_PART(nat_size));
+
+	if (IPA_MEM_PART(uc_info_ofst) & 3) {
+		IPAERR("UC INFO OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(uc_info_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("UC INFO OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(uc_info_ofst), IPA_MEM_PART(uc_info_size));
+
+	IPADBG("RAM OFST 0x%x\n", IPA_MEM_PART(ofst_start));
+
+	if (IPA_MEM_PART(v4_flt_hash_ofst) & 7) {
+		IPAERR("V4 FLT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_flt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_flt_hash_ofst),
+		IPA_MEM_PART(v4_flt_hash_size),
+		IPA_MEM_PART(v4_flt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v4_flt_nhash_ofst) & 7) {
+		IPAERR("V4 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_flt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_flt_nhash_ofst),
+		IPA_MEM_PART(v4_flt_nhash_size),
+		IPA_MEM_PART(v4_flt_nhash_size_ddr));
+
+	if (IPA_MEM_PART(v6_flt_hash_ofst) & 7) {
+		IPAERR("V6 FLT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_flt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 FLT HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_flt_hash_ofst), IPA_MEM_PART(v6_flt_hash_size),
+		IPA_MEM_PART(v6_flt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v6_flt_nhash_ofst) & 7) {
+		IPAERR("V6 FLT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_flt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 FLT NON-HASHABLE OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_flt_nhash_ofst),
+		IPA_MEM_PART(v6_flt_nhash_size),
+		IPA_MEM_PART(v6_flt_nhash_size_ddr));
+
+	IPADBG("V4 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v4_rt_num_index));
+
+	IPADBG("V4 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_modem_rt_index_lo),
+		IPA_MEM_PART(v4_modem_rt_index_hi));
+
+	IPADBG("V4 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v4_apps_rt_index_lo),
+		IPA_MEM_PART(v4_apps_rt_index_hi));
+
+	if (IPA_MEM_PART(v4_rt_hash_ofst) & 7) {
+		IPAERR("V4 RT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_rt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v4_rt_hash_ofst));
+
+	IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_rt_hash_size),
+		IPA_MEM_PART(v4_rt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v4_rt_nhash_ofst) & 7) {
+		IPAERR("V4 RT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v4_rt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V4 RT NON-HASHABLE OFST 0x%x\n",
+		IPA_MEM_PART(v4_rt_nhash_ofst));
+
+	IPADBG("V4 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v4_rt_nhash_size),
+		IPA_MEM_PART(v4_rt_nhash_size_ddr));
+
+	IPADBG("V6 RT NUM INDEX 0x%x\n", IPA_MEM_PART(v6_rt_num_index));
+
+	IPADBG("V6 RT MODEM INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_modem_rt_index_lo),
+		IPA_MEM_PART(v6_modem_rt_index_hi));
+
+	IPADBG("V6 RT APPS INDEXES 0x%x - 0x%x\n",
+		IPA_MEM_PART(v6_apps_rt_index_lo),
+		IPA_MEM_PART(v6_apps_rt_index_hi));
+
+	if (IPA_MEM_PART(v6_rt_hash_ofst) & 7) {
+		IPAERR("V6 RT HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_rt_hash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 RT HASHABLE OFST 0x%x\n", IPA_MEM_PART(v6_rt_hash_ofst));
+
+	IPADBG("V6 RT HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_rt_hash_size),
+		IPA_MEM_PART(v6_rt_hash_size_ddr));
+
+	if (IPA_MEM_PART(v6_rt_nhash_ofst) & 7) {
+		IPAERR("V6 RT NON-HASHABLE OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(v6_rt_nhash_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("V6 RT NON-HASHABLE OFST 0x%x\n",
+		IPA_MEM_PART(v6_rt_nhash_ofst));
+
+	IPADBG("V6 RT NON-HASHABLE SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(v6_rt_nhash_size),
+		IPA_MEM_PART(v6_rt_nhash_size_ddr));
+
+	if (IPA_MEM_PART(modem_hdr_ofst) & 7) {
+		IPAERR("MODEM HDR OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(modem_hdr_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("MODEM HDR OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_ofst), IPA_MEM_PART(modem_hdr_size));
+
+	if (IPA_MEM_PART(apps_hdr_ofst) & 7) {
+		IPAERR("APPS HDR OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(apps_hdr_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("APPS HDR OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_ofst), IPA_MEM_PART(apps_hdr_size),
+		IPA_MEM_PART(apps_hdr_size_ddr));
+
+	if (IPA_MEM_PART(modem_hdr_proc_ctx_ofst) & 7) {
+		IPAERR("MODEM HDR PROC CTX OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(modem_hdr_proc_ctx_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("MODEM HDR PROC CTX OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(modem_hdr_proc_ctx_ofst),
+		IPA_MEM_PART(modem_hdr_proc_ctx_size));
+
+	if (IPA_MEM_PART(apps_hdr_proc_ctx_ofst) & 7) {
+		IPAERR("APPS HDR PROC CTX OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(apps_hdr_proc_ctx_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("APPS HDR PROC CTX OFST 0x%x SIZE 0x%x DDR SIZE 0x%x\n",
+		IPA_MEM_PART(apps_hdr_proc_ctx_ofst),
+		IPA_MEM_PART(apps_hdr_proc_ctx_size),
+		IPA_MEM_PART(apps_hdr_proc_ctx_size_ddr));
+
+	if (IPA_MEM_PART(modem_ofst) & 7) {
+		IPAERR("MODEM OFST 0x%x is unaligned\n",
+			IPA_MEM_PART(modem_ofst));
+		return -ENODEV;
+	}
+
+	IPADBG("MODEM OFST 0x%x SIZE 0x%x\n", IPA_MEM_PART(modem_ofst),
+		IPA_MEM_PART(modem_size));
+
+	IPADBG("V4 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_flt_hash_ofst),
+		IPA_MEM_PART(apps_v4_flt_hash_size));
+
+	IPADBG("V4 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_flt_nhash_ofst),
+		IPA_MEM_PART(apps_v4_flt_nhash_size));
+
+	IPADBG("V6 APPS HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_flt_hash_ofst),
+		IPA_MEM_PART(apps_v6_flt_hash_size));
+
+	IPADBG("V6 APPS NON-HASHABLE FLT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_flt_nhash_ofst),
+		IPA_MEM_PART(apps_v6_flt_nhash_size));
+
+	IPADBG("RAM END OFST 0x%x\n",
+		IPA_MEM_PART(end_ofst));
+
+	IPADBG("V4 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_rt_hash_ofst),
+		IPA_MEM_PART(apps_v4_rt_hash_size));
+
+	IPADBG("V4 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v4_rt_nhash_ofst),
+		IPA_MEM_PART(apps_v4_rt_nhash_size));
+
+	IPADBG("V6 APPS HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_rt_hash_ofst),
+		IPA_MEM_PART(apps_v6_rt_hash_size));
+
+	IPADBG("V6 APPS NON-HASHABLE RT OFST 0x%x SIZE 0x%x\n",
+		IPA_MEM_PART(apps_v6_rt_nhash_ofst),
+		IPA_MEM_PART(apps_v6_rt_nhash_size));
+
+	return 0;
+}
+
+/**
+ * ipa_ctrl_static_bind() - set the appropriate methods for
+ *  IPA Driver based on the HW version
+ *
+ *  @ctrl: data structure which holds the function pointers
+ *  @hw_type: the HW type in use
+ *
+ *  This function can avoid the runtime assignment by using C99 special
+ *  struct initialization - hard decision... time.vs.mem
+ */
+int ipa3_controller_static_bind(struct ipa3_controller *ctrl,
+		enum ipa_hw_type hw_type)
+{
+	ctrl->ipa_init_rt4 = _ipa_init_rt4_v3;
+	ctrl->ipa_init_rt6 = _ipa_init_rt6_v3;
+	ctrl->ipa_init_flt4 = _ipa_init_flt4_v3;
+	ctrl->ipa_init_flt6 = _ipa_init_flt6_v3;
+	ctrl->ipa_clk_rate_turbo = IPA_V3_0_CLK_RATE_TURBO;
+	ctrl->ipa_clk_rate_nominal = IPA_V3_0_CLK_RATE_NOMINAL;
+	ctrl->ipa_clk_rate_svs = IPA_V3_0_CLK_RATE_SVS;
+	ctrl->ipa3_read_ep_reg = _ipa_read_ep_reg_v3_0;
+	ctrl->ipa3_commit_flt = __ipa_commit_flt_v3;
+	ctrl->ipa3_commit_rt = __ipa_commit_rt_v3;
+	ctrl->ipa3_commit_hdr = __ipa_commit_hdr_v3_0;
+	ctrl->ipa3_enable_clks = _ipa_enable_clks_v3_0;
+	ctrl->ipa3_disable_clks = _ipa_disable_clks_v3_0;
+	ctrl->msm_bus_data_ptr = &ipa_bus_client_pdata_v3_0;
+	ctrl->clock_scaling_bw_threshold_nominal =
+		IPA_V3_0_BW_THRESHOLD_NOMINAL_MBPS;
+	ctrl->clock_scaling_bw_threshold_turbo =
+		IPA_V3_0_BW_THRESHOLD_TURBO_MBPS;
+	ctrl->ipa_reg_base_ofst = ipahal_get_reg_base();
+	ctrl->ipa_init_sram = _ipa_init_sram_v3_0;
+	ctrl->ipa_sram_read_settings = _ipa_sram_settings_read_v3_0;
+
+	ctrl->ipa_init_hdr = _ipa_init_hdr_v3_0;
+
+	return 0;
+}
+
+void ipa3_skb_recycle(struct sk_buff *skb)
+{
+	struct skb_shared_info *shinfo;
+
+	shinfo = skb_shinfo(skb);
+	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
+	atomic_set(&shinfo->dataref, 1);
+
+	memset(skb, 0, offsetof(struct sk_buff, tail));
+	skb->data = skb->head + NET_SKB_PAD;
+	skb_reset_tail_pointer(skb);
+}
+
+int ipa3_alloc_rule_id(struct idr *rule_ids)
+{
+	/* There is two groups of rule-Ids, Modem ones and Apps ones.
+	 * Distinction by high bit: Modem Ids are high bit asserted.
+	 */
+	return idr_alloc(rule_ids, NULL,
+		ipahal_get_low_rule_id(), ipahal_get_rule_id_hi_bit(),
+		GFP_KERNEL);
+}
+
+int ipa3_id_alloc(void *ptr)
+{
+	int id;
+
+	idr_preload(GFP_KERNEL);
+	spin_lock(&ipa3_ctx->idr_lock);
+	id = idr_alloc(&ipa3_ctx->ipa_idr, ptr, 0, 0, GFP_NOWAIT);
+	spin_unlock(&ipa3_ctx->idr_lock);
+	idr_preload_end();
+
+	return id;
+}
+
+void *ipa3_id_find(u32 id)
+{
+	void *ptr;
+
+	spin_lock(&ipa3_ctx->idr_lock);
+	ptr = idr_find(&ipa3_ctx->ipa_idr, id);
+	spin_unlock(&ipa3_ctx->idr_lock);
+
+	return ptr;
+}
+
+void ipa3_id_remove(u32 id)
+{
+	spin_lock(&ipa3_ctx->idr_lock);
+	idr_remove(&ipa3_ctx->ipa_idr, id);
+	spin_unlock(&ipa3_ctx->idr_lock);
+}
+
+void ipa3_tag_destroy_imm(void *user1, int user2)
+{
+	ipahal_destroy_imm_cmd(user1);
+}
+
+static void ipa3_tag_free_skb(void *user1, int user2)
+{
+	dev_kfree_skb_any((struct sk_buff *)user1);
+}
+
+#define REQUIRED_TAG_PROCESS_DESCRIPTORS 4
+
+/* ipa3_tag_process() - Initiates a tag process. Incorporates the input
+ * descriptors
+ *
+ * @desc:	descriptors with commands for IC
+ * @desc_size:	amount of descriptors in the above variable
+ *
+ * Note: The descriptors are copied (if there's room), the client needs to
+ * free his descriptors afterwards
+ *
+ * Return: 0 or negative in case of failure
+ */
+int ipa3_tag_process(struct ipa3_desc desc[],
+	int descs_num,
+	unsigned long timeout)
+{
+	struct ipa3_sys_context *sys;
+	struct ipa3_desc *tag_desc;
+	int desc_idx = 0;
+	struct ipahal_imm_cmd_ip_packet_init pktinit_cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
+	struct ipahal_imm_cmd_ip_packet_tag_status status;
+	int i;
+	struct sk_buff *dummy_skb;
+	int res;
+	struct ipa3_tag_completion *comp;
+	int ep_idx;
+
+	/* Not enough room for the required descriptors for the tag process */
+	if (IPA_TAG_MAX_DESC - descs_num < REQUIRED_TAG_PROCESS_DESCRIPTORS) {
+		IPAERR("up to %d descriptors are allowed (received %d)\n",
+		       IPA_TAG_MAX_DESC - REQUIRED_TAG_PROCESS_DESCRIPTORS,
+		       descs_num);
+		return -ENOMEM;
+	}
+
+	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_CMD_PROD);
+	if (-1 == ep_idx) {
+		IPAERR("Client %u is not mapped\n",
+			IPA_CLIENT_APPS_CMD_PROD);
+		return -EFAULT;
+	}
+	sys = ipa3_ctx->ep[ep_idx].sys;
+
+	tag_desc = kzalloc(sizeof(*tag_desc) * IPA_TAG_MAX_DESC, GFP_KERNEL);
+	if (!tag_desc) {
+		IPAERR("failed to allocate memory\n");
+		return -ENOMEM;
+	}
+
+	/* Copy the required descriptors from the client now */
+	if (desc) {
+		memcpy(&(tag_desc[0]), desc, descs_num *
+			sizeof(tag_desc[0]));
+		desc_idx += descs_num;
+	}
+
+	/* NO-OP IC for ensuring that IPA pipeline is empty */
+	cmd_pyld = ipahal_construct_nop_imm_cmd(
+		false, IPAHAL_FULL_PIPELINE_CLEAR, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct NOP imm cmd\n");
+		res = -ENOMEM;
+		goto fail_free_tag_desc;
+	}
+	tag_desc[desc_idx].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+	tag_desc[desc_idx].pyld = cmd_pyld->data;
+	tag_desc[desc_idx].len = cmd_pyld->len;
+	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+	tag_desc[desc_idx].user1 = cmd_pyld;
+	desc_idx++;
+
+	/* IP_PACKET_INIT IC for tag status to be sent to apps */
+	pktinit_cmd.destination_pipe_index =
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_PACKET_INIT, &pktinit_cmd, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct ip_packet_init imm cmd\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	tag_desc[desc_idx].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_INIT);
+	tag_desc[desc_idx].pyld = cmd_pyld->data;
+	tag_desc[desc_idx].len = cmd_pyld->len;
+	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+	tag_desc[desc_idx].user1 = cmd_pyld;
+	desc_idx++;
+
+	/* status IC */
+	status.tag = IPA_COOKIE;
+	cmd_pyld = ipahal_construct_imm_cmd(
+		IPA_IMM_CMD_IP_PACKET_TAG_STATUS, &status, false);
+	if (!cmd_pyld) {
+		IPAERR("failed to construct ip_packet_tag_status imm cmd\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	tag_desc[desc_idx].opcode =
+		ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_IP_PACKET_TAG_STATUS);
+	tag_desc[desc_idx].pyld = cmd_pyld->data;
+	tag_desc[desc_idx].len = cmd_pyld->len;
+	tag_desc[desc_idx].type = IPA_IMM_CMD_DESC;
+	tag_desc[desc_idx].callback = ipa3_tag_destroy_imm;
+	tag_desc[desc_idx].user1 = cmd_pyld;
+	desc_idx++;
+
+	comp = kzalloc(sizeof(*comp), GFP_KERNEL);
+	if (!comp) {
+		IPAERR("no mem\n");
+		res = -ENOMEM;
+		goto fail_free_desc;
+	}
+	init_completion(&comp->comp);
+
+	/* completion needs to be released from both here and rx handler */
+	atomic_set(&comp->cnt, 2);
+
+	/* dummy packet to send to IPA. packet payload is a completion object */
+	dummy_skb = alloc_skb(sizeof(comp), GFP_KERNEL);
+	if (!dummy_skb) {
+		IPAERR("failed to allocate memory\n");
+		res = -ENOMEM;
+		goto fail_free_comp;
+	}
+
+	memcpy(skb_put(dummy_skb, sizeof(comp)), &comp, sizeof(comp));
+
+	tag_desc[desc_idx].pyld = dummy_skb->data;
+	tag_desc[desc_idx].len = dummy_skb->len;
+	tag_desc[desc_idx].type = IPA_DATA_DESC_SKB;
+	tag_desc[desc_idx].callback = ipa3_tag_free_skb;
+	tag_desc[desc_idx].user1 = dummy_skb;
+	desc_idx++;
+
+	/* send all descriptors to IPA with single EOT */
+	res = ipa3_send(sys, desc_idx, tag_desc, true);
+	if (res) {
+		IPAERR("failed to send TAG packets %d\n", res);
+		res = -ENOMEM;
+		goto fail_free_comp;
+	}
+	kfree(tag_desc);
+	tag_desc = NULL;
+
+	IPADBG("waiting for TAG response\n");
+	res = wait_for_completion_timeout(&comp->comp, timeout);
+	if (res == 0) {
+		IPAERR("timeout (%lu msec) on waiting for TAG response\n",
+			timeout);
+		WARN_ON(1);
+		if (atomic_dec_return(&comp->cnt) == 0)
+			kfree(comp);
+		return -ETIME;
+	}
+
+	IPADBG("TAG response arrived!\n");
+	if (atomic_dec_return(&comp->cnt) == 0)
+		kfree(comp);
+
+	/* sleep for short period to ensure IPA wrote all packets to BAM */
+	usleep_range(IPA_TAG_SLEEP_MIN_USEC, IPA_TAG_SLEEP_MAX_USEC);
+
+	return 0;
+
+fail_free_comp:
+	kfree(comp);
+fail_free_desc:
+	/*
+	 * Free only the first descriptors allocated here.
+	 * [nop, pkt_init, status, dummy_skb]
+	 * The user is responsible to free his allocations
+	 * in case of failure.
+	 * The min is required because we may fail during
+	 * of the initial allocations above
+	 */
+	for (i = descs_num;
+		i < min(REQUIRED_TAG_PROCESS_DESCRIPTORS, desc_idx); i++)
+		if (tag_desc[i].callback)
+			tag_desc[i].callback(tag_desc[i].user1,
+				tag_desc[i].user2);
+fail_free_tag_desc:
+	kfree(tag_desc);
+	return res;
+}
+
+/**
+ * ipa3_tag_generate_force_close_desc() - generate descriptors for force close
+ *					 immediate command
+ *
+ * @desc: descriptors for IC
+ * @desc_size: desc array size
+ * @start_pipe: first pipe to close aggregation
+ * @end_pipe: last (non-inclusive) pipe to close aggregation
+ *
+ * Return: number of descriptors written or negative in case of failure
+ */
+static int ipa3_tag_generate_force_close_desc(struct ipa3_desc desc[],
+	int desc_size, int start_pipe, int end_pipe)
+{
+	int i;
+	struct ipa_ep_cfg_aggr ep_aggr;
+	int desc_idx = 0;
+	int res;
+	struct ipahal_imm_cmd_register_write reg_write_agg_close;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipahal_reg_valmask valmask;
+
+	for (i = start_pipe; i < end_pipe; i++) {
+		ipahal_read_reg_n_fields(IPA_ENDP_INIT_AGGR_n, i, &ep_aggr);
+		if (!ep_aggr.aggr_en)
+			continue;
+		IPADBG("Force close ep: %d\n", i);
+		if (desc_idx + 1 > desc_size) {
+			IPAERR("Internal error - no descriptors\n");
+			res = -EFAULT;
+			goto fail_no_desc;
+		}
+
+		reg_write_agg_close.skip_pipeline_clear = false;
+		reg_write_agg_close.pipeline_clear_options =
+			IPAHAL_FULL_PIPELINE_CLEAR;
+		reg_write_agg_close.offset =
+			ipahal_get_reg_ofst(IPA_AGGR_FORCE_CLOSE);
+		ipahal_get_aggr_force_close_valmask(1<<i, &valmask);
+		reg_write_agg_close.value = valmask.val;
+		reg_write_agg_close.value_mask = valmask.mask;
+		cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+			&reg_write_agg_close, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct register_write imm cmd\n");
+			res = -ENOMEM;
+			goto fail_alloc_reg_write_agg_close;
+		}
+
+		desc[desc_idx].opcode =
+			ipahal_imm_cmd_get_opcode(IPA_IMM_CMD_REGISTER_WRITE);
+		desc[desc_idx].pyld = cmd_pyld->data;
+		desc[desc_idx].len = cmd_pyld->len;
+		desc[desc_idx].type = IPA_IMM_CMD_DESC;
+		desc[desc_idx].callback = ipa3_tag_destroy_imm;
+		desc[desc_idx].user1 = cmd_pyld;
+		desc_idx++;
+	}
+
+	return desc_idx;
+
+fail_alloc_reg_write_agg_close:
+	for (i = 0; i < desc_idx; i++)
+		if (desc[desc_idx].callback)
+			desc[desc_idx].callback(desc[desc_idx].user1,
+				desc[desc_idx].user2);
+fail_no_desc:
+	return res;
+}
+
+/**
+ * ipa3_tag_aggr_force_close() - Force close aggregation
+ *
+ * @pipe_num: pipe number or -1 for all pipes
+ */
+int ipa3_tag_aggr_force_close(int pipe_num)
+{
+	struct ipa3_desc *desc;
+	int res = -1;
+	int start_pipe;
+	int end_pipe;
+	int num_descs;
+	int num_aggr_descs;
+
+	if (pipe_num < -1 || pipe_num >= (int)ipa3_ctx->ipa_num_pipes) {
+		IPAERR("Invalid pipe number %d\n", pipe_num);
+		return -EINVAL;
+	}
+
+	if (pipe_num == -1) {
+		start_pipe = 0;
+		end_pipe = ipa3_ctx->ipa_num_pipes;
+	} else {
+		start_pipe = pipe_num;
+		end_pipe = pipe_num + 1;
+	}
+
+	num_descs = end_pipe - start_pipe;
+
+	desc = kcalloc(num_descs, sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		IPAERR("no mem\n");
+		return -ENOMEM;
+	}
+
+	/* Force close aggregation on all valid pipes with aggregation */
+	num_aggr_descs = ipa3_tag_generate_force_close_desc(desc, num_descs,
+						start_pipe, end_pipe);
+	if (num_aggr_descs < 0) {
+		IPAERR("ipa3_tag_generate_force_close_desc failed %d\n",
+			num_aggr_descs);
+		goto fail_free_desc;
+	}
+
+	res = ipa3_tag_process(desc, num_aggr_descs,
+			      IPA_FORCE_CLOSE_TAG_PROCESS_TIMEOUT);
+
+fail_free_desc:
+	kfree(desc);
+
+	return res;
+}
+
+/**
+ * ipa3_is_ready() - check if IPA module was initialized
+ * successfully
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa3_is_ready(void)
+{
+	bool complete;
+
+	if (ipa3_ctx == NULL)
+		return false;
+	mutex_lock(&ipa3_ctx->lock);
+	complete = ipa3_ctx->ipa_initialization_complete;
+	mutex_unlock(&ipa3_ctx->lock);
+	return complete;
+}
+
+/**
+ * ipa3_is_client_handle_valid() - check if IPA client handle is valid handle
+ *
+ * Return value: true for yes; false for no
+ */
+bool ipa3_is_client_handle_valid(u32 clnt_hdl)
+{
+	if (clnt_hdl >= 0 && clnt_hdl < ipa3_ctx->ipa_num_pipes)
+		return true;
+	return false;
+}
+
+/**
+ * ipa3_proxy_clk_unvote() - called to remove IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa3_proxy_clk_unvote(void)
+{
+	if (ipa3_is_ready() && ipa3_ctx->q6_proxy_clk_vote_valid) {
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("PROXY_CLK_VOTE");
+		ipa3_ctx->q6_proxy_clk_vote_valid = false;
+	}
+}
+
+/**
+ * ipa3_proxy_clk_vote() - called to add IPA clock proxy vote
+ *
+ * Return value: none
+ */
+void ipa3_proxy_clk_vote(void)
+{
+	if (ipa3_is_ready() && !ipa3_ctx->q6_proxy_clk_vote_valid) {
+		IPA_ACTIVE_CLIENTS_INC_SPECIAL("PROXY_CLK_VOTE");
+		ipa3_ctx->q6_proxy_clk_vote_valid = true;
+	}
+}
+
+/**
+ * ipa3_get_smem_restr_bytes()- Return IPA smem restricted bytes
+ *
+ * Return value: u16 - number of IPA smem restricted bytes
+ */
+u16 ipa3_get_smem_restr_bytes(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->smem_restricted_bytes;
+
+	IPAERR("IPA Driver not initialized\n");
+
+	return 0;
+}
+
+/**
+ * ipa3_get_modem_cfg_emb_pipe_flt()- Return ipa3_ctx->modem_cfg_emb_pipe_flt
+ *
+ * Return value: true if modem configures embedded pipe flt, false otherwise
+ */
+bool ipa3_get_modem_cfg_emb_pipe_flt(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->modem_cfg_emb_pipe_flt;
+
+	IPAERR("IPA driver has not been initialized\n");
+
+	return false;
+}
+
+/**
+ * ipa3_get_transport_type()- Return ipa3_ctx->transport_prototype
+ *
+ * Return value: enum ipa_transport_type
+ */
+enum ipa_transport_type ipa3_get_transport_type(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->transport_prototype;
+
+	IPAERR("IPA driver has not been initialized\n");
+	return IPA_TRANSPORT_TYPE_GSI;
+}
+
+u32 ipa3_get_num_pipes(void)
+{
+	return ipahal_read_reg(IPA_ENABLED_PIPES);
+}
+
+/**
+ * ipa3_disable_apps_wan_cons_deaggr()-
+ * set ipa_ctx->ipa_client_apps_wan_cons_agg_gro
+ *
+ * Return value: 0 or negative in case of failure
+ */
+int ipa3_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count)
+{
+	int res = -1;
+	u32 limit;
+
+	/* checking if IPA-HW can support */
+	limit = ipahal_aggr_get_max_byte_limit();
+	if ((agg_size >> 10) > limit) {
+		IPAERR("IPA-AGG byte limit %d\n", limit);
+		IPAERR("exceed aggr_byte_limit\n");
+		return res;
+	}
+	limit = ipahal_aggr_get_max_pkt_limit();
+	if (agg_count > limit) {
+		IPAERR("IPA-AGG pkt limit %d\n", limit);
+		IPAERR("exceed aggr_pkt_limit\n");
+		return res;
+	}
+
+	if (ipa3_ctx) {
+		ipa3_ctx->ipa_client_apps_wan_cons_agg_gro = true;
+		return 0;
+	}
+	return res;
+}
+
+static void *ipa3_get_ipc_logbuf(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->logbuf;
+
+	return NULL;
+}
+
+static void *ipa3_get_ipc_logbuf_low(void)
+{
+	if (ipa3_ctx)
+		return ipa3_ctx->logbuf_low;
+
+	return NULL;
+}
+
+static void ipa3_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb)
+{
+	*holb = ipa3_ctx->ep[ep_idx].holb;
+}
+
+static void ipa3_set_tag_process_before_gating(bool val)
+{
+	ipa3_ctx->tag_process_before_gating = val;
+}
+
+int ipa3_bind_api_controller(enum ipa_hw_type ipa_hw_type,
+	struct ipa_api_controller *api_ctrl)
+{
+	if (ipa_hw_type < IPA_HW_v3_0) {
+		IPAERR("Unsupported IPA HW version %d\n", ipa_hw_type);
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	api_ctrl->ipa_connect = ipa3_connect;
+	api_ctrl->ipa_disconnect = ipa3_disconnect;
+	api_ctrl->ipa_reset_endpoint = ipa3_reset_endpoint;
+	api_ctrl->ipa_clear_endpoint_delay = ipa3_clear_endpoint_delay;
+	api_ctrl->ipa_disable_endpoint = NULL;
+	api_ctrl->ipa_cfg_ep = ipa3_cfg_ep;
+	api_ctrl->ipa_cfg_ep_nat = ipa3_cfg_ep_nat;
+	api_ctrl->ipa_cfg_ep_hdr = ipa3_cfg_ep_hdr;
+	api_ctrl->ipa_cfg_ep_hdr_ext = ipa3_cfg_ep_hdr_ext;
+	api_ctrl->ipa_cfg_ep_mode = ipa3_cfg_ep_mode;
+	api_ctrl->ipa_cfg_ep_aggr = ipa3_cfg_ep_aggr;
+	api_ctrl->ipa_cfg_ep_deaggr = ipa3_cfg_ep_deaggr;
+	api_ctrl->ipa_cfg_ep_route = ipa3_cfg_ep_route;
+	api_ctrl->ipa_cfg_ep_holb = ipa3_cfg_ep_holb;
+	api_ctrl->ipa_get_holb = ipa3_get_holb;
+	api_ctrl->ipa_set_tag_process_before_gating =
+			ipa3_set_tag_process_before_gating;
+	api_ctrl->ipa_cfg_ep_cfg = ipa3_cfg_ep_cfg;
+	api_ctrl->ipa_cfg_ep_metadata_mask = ipa3_cfg_ep_metadata_mask;
+	api_ctrl->ipa_cfg_ep_holb_by_client = ipa3_cfg_ep_holb_by_client;
+	api_ctrl->ipa_cfg_ep_ctrl = ipa3_cfg_ep_ctrl;
+	api_ctrl->ipa_add_hdr = ipa3_add_hdr;
+	api_ctrl->ipa_del_hdr = ipa3_del_hdr;
+	api_ctrl->ipa_commit_hdr = ipa3_commit_hdr;
+	api_ctrl->ipa_reset_hdr = ipa3_reset_hdr;
+	api_ctrl->ipa_get_hdr = ipa3_get_hdr;
+	api_ctrl->ipa_put_hdr = ipa3_put_hdr;
+	api_ctrl->ipa_copy_hdr = ipa3_copy_hdr;
+	api_ctrl->ipa_add_hdr_proc_ctx = ipa3_add_hdr_proc_ctx;
+	api_ctrl->ipa_del_hdr_proc_ctx = ipa3_del_hdr_proc_ctx;
+	api_ctrl->ipa_add_rt_rule = ipa3_add_rt_rule;
+	api_ctrl->ipa_del_rt_rule = ipa3_del_rt_rule;
+	api_ctrl->ipa_commit_rt = ipa3_commit_rt;
+	api_ctrl->ipa_reset_rt = ipa3_reset_rt;
+	api_ctrl->ipa_get_rt_tbl = ipa3_get_rt_tbl;
+	api_ctrl->ipa_put_rt_tbl = ipa3_put_rt_tbl;
+	api_ctrl->ipa_query_rt_index = ipa3_query_rt_index;
+	api_ctrl->ipa_mdfy_rt_rule = ipa3_mdfy_rt_rule;
+	api_ctrl->ipa_add_flt_rule = ipa3_add_flt_rule;
+	api_ctrl->ipa_del_flt_rule = ipa3_del_flt_rule;
+	api_ctrl->ipa_mdfy_flt_rule = ipa3_mdfy_flt_rule;
+	api_ctrl->ipa_commit_flt = ipa3_commit_flt;
+	api_ctrl->ipa_reset_flt = ipa3_reset_flt;
+	api_ctrl->allocate_nat_device = ipa3_allocate_nat_device;
+	api_ctrl->ipa_nat_init_cmd = ipa3_nat_init_cmd;
+	api_ctrl->ipa_nat_dma_cmd = ipa3_nat_dma_cmd;
+	api_ctrl->ipa_nat_del_cmd = ipa3_nat_del_cmd;
+	api_ctrl->ipa_send_msg = ipa3_send_msg;
+	api_ctrl->ipa_register_pull_msg = ipa3_register_pull_msg;
+	api_ctrl->ipa_deregister_pull_msg = ipa3_deregister_pull_msg;
+	api_ctrl->ipa_register_intf = ipa3_register_intf;
+	api_ctrl->ipa_register_intf_ext = ipa3_register_intf_ext;
+	api_ctrl->ipa_deregister_intf = ipa3_deregister_intf;
+	api_ctrl->ipa_set_aggr_mode = ipa3_set_aggr_mode;
+	api_ctrl->ipa_set_qcncm_ndp_sig = ipa3_set_qcncm_ndp_sig;
+	api_ctrl->ipa_set_single_ndp_per_mbim = ipa3_set_single_ndp_per_mbim;
+	api_ctrl->ipa_tx_dp = ipa3_tx_dp;
+	api_ctrl->ipa_tx_dp_mul = ipa3_tx_dp_mul;
+	api_ctrl->ipa_free_skb = ipa3_free_skb;
+	api_ctrl->ipa_setup_sys_pipe = ipa3_setup_sys_pipe;
+	api_ctrl->ipa_teardown_sys_pipe = ipa3_teardown_sys_pipe;
+	api_ctrl->ipa_sys_setup = ipa3_sys_setup;
+	api_ctrl->ipa_sys_teardown = ipa3_sys_teardown;
+	api_ctrl->ipa_sys_update_gsi_hdls = ipa3_sys_update_gsi_hdls;
+	api_ctrl->ipa_connect_wdi_pipe = ipa3_connect_wdi_pipe;
+	api_ctrl->ipa_disconnect_wdi_pipe = ipa3_disconnect_wdi_pipe;
+	api_ctrl->ipa_enable_wdi_pipe = ipa3_enable_wdi_pipe;
+	api_ctrl->ipa_disable_wdi_pipe = ipa3_disable_wdi_pipe;
+	api_ctrl->ipa_resume_wdi_pipe = ipa3_resume_wdi_pipe;
+	api_ctrl->ipa_suspend_wdi_pipe = ipa3_suspend_wdi_pipe;
+	api_ctrl->ipa_get_wdi_stats = ipa3_get_wdi_stats;
+	api_ctrl->ipa_get_smem_restr_bytes = ipa3_get_smem_restr_bytes;
+	api_ctrl->ipa_uc_wdi_get_dbpa = ipa3_uc_wdi_get_dbpa;
+	api_ctrl->ipa_uc_reg_rdyCB = ipa3_uc_reg_rdyCB;
+	api_ctrl->ipa_uc_dereg_rdyCB = ipa3_uc_dereg_rdyCB;
+	api_ctrl->teth_bridge_init = ipa3_teth_bridge_init;
+	api_ctrl->teth_bridge_disconnect = ipa3_teth_bridge_disconnect;
+	api_ctrl->teth_bridge_connect = ipa3_teth_bridge_connect;
+	api_ctrl->ipa_set_client = ipa3_set_client;
+	api_ctrl->ipa_get_client = ipa3_get_client;
+	api_ctrl->ipa_get_client_uplink = ipa3_get_client_uplink;
+	api_ctrl->ipa_dma_init = ipa3_dma_init;
+	api_ctrl->ipa_dma_enable = ipa3_dma_enable;
+	api_ctrl->ipa_dma_disable = ipa3_dma_disable;
+	api_ctrl->ipa_dma_sync_memcpy = ipa3_dma_sync_memcpy;
+	api_ctrl->ipa_dma_async_memcpy = ipa3_dma_async_memcpy;
+	api_ctrl->ipa_dma_uc_memcpy = ipa3_dma_uc_memcpy;
+	api_ctrl->ipa_dma_destroy = ipa3_dma_destroy;
+	api_ctrl->ipa_mhi_init_engine = ipa3_mhi_init_engine;
+	api_ctrl->ipa_connect_mhi_pipe = ipa3_connect_mhi_pipe;
+	api_ctrl->ipa_disconnect_mhi_pipe = ipa3_disconnect_mhi_pipe;
+	api_ctrl->ipa_mhi_stop_gsi_channel = ipa3_mhi_stop_gsi_channel;
+	api_ctrl->ipa_uc_mhi_reset_channel = ipa3_uc_mhi_reset_channel;
+	api_ctrl->ipa_qmi_enable_force_clear_datapath_send =
+			ipa3_qmi_enable_force_clear_datapath_send;
+	api_ctrl->ipa_qmi_disable_force_clear_datapath_send =
+			ipa3_qmi_disable_force_clear_datapath_send;
+	api_ctrl->ipa_mhi_reset_channel_internal =
+			ipa3_mhi_reset_channel_internal;
+	api_ctrl->ipa_mhi_start_channel_internal =
+			ipa3_mhi_start_channel_internal;
+	api_ctrl->ipa_mhi_query_ch_info = ipa3_mhi_query_ch_info;
+	api_ctrl->ipa_mhi_resume_channels_internal =
+			ipa3_mhi_resume_channels_internal;
+	api_ctrl->ipa_has_open_aggr_frame = ipa3_has_open_aggr_frame;
+	api_ctrl->ipa_mhi_destroy_channel = ipa3_mhi_destroy_channel;
+	api_ctrl->ipa_uc_mhi_send_dl_ul_sync_info =
+			ipa3_uc_mhi_send_dl_ul_sync_info;
+	api_ctrl->ipa_uc_mhi_init = ipa3_uc_mhi_init;
+	api_ctrl->ipa_uc_mhi_suspend_channel = ipa3_uc_mhi_suspend_channel;
+	api_ctrl->ipa_uc_mhi_stop_event_update_channel =
+			ipa3_uc_mhi_stop_event_update_channel;
+	api_ctrl->ipa_uc_mhi_cleanup = ipa3_uc_mhi_cleanup;
+	api_ctrl->ipa_uc_state_check = ipa3_uc_state_check;
+	api_ctrl->ipa_write_qmap_id = ipa3_write_qmap_id;
+	api_ctrl->ipa_add_interrupt_handler = ipa3_add_interrupt_handler;
+	api_ctrl->ipa_remove_interrupt_handler = ipa3_remove_interrupt_handler;
+	api_ctrl->ipa_restore_suspend_handler = ipa3_restore_suspend_handler;
+	api_ctrl->ipa_bam_reg_dump = ipa3_bam_reg_dump;
+	api_ctrl->ipa_get_ep_mapping = ipa3_get_ep_mapping;
+	api_ctrl->ipa_is_ready = ipa3_is_ready;
+	api_ctrl->ipa_proxy_clk_vote = ipa3_proxy_clk_vote;
+	api_ctrl->ipa_proxy_clk_unvote = ipa3_proxy_clk_unvote;
+	api_ctrl->ipa_is_client_handle_valid = ipa3_is_client_handle_valid;
+	api_ctrl->ipa_get_client_mapping = ipa3_get_client_mapping;
+	api_ctrl->ipa_get_rm_resource_from_ep = ipa3_get_rm_resource_from_ep;
+	api_ctrl->ipa_get_modem_cfg_emb_pipe_flt =
+		ipa3_get_modem_cfg_emb_pipe_flt;
+	api_ctrl->ipa_get_transport_type = ipa3_get_transport_type;
+	api_ctrl->ipa_ap_suspend = ipa3_ap_suspend;
+	api_ctrl->ipa_ap_resume = ipa3_ap_resume;
+	api_ctrl->ipa_get_smmu_domain = ipa3_get_smmu_domain;
+	api_ctrl->ipa_disable_apps_wan_cons_deaggr =
+		ipa3_disable_apps_wan_cons_deaggr;
+	api_ctrl->ipa_get_dma_dev = ipa3_get_dma_dev;
+	api_ctrl->ipa_release_wdi_mapping = ipa3_release_wdi_mapping;
+	api_ctrl->ipa_create_wdi_mapping = ipa3_create_wdi_mapping;
+	api_ctrl->ipa_get_gsi_ep_info = ipa3_get_gsi_ep_info;
+	api_ctrl->ipa_stop_gsi_channel = ipa3_stop_gsi_channel;
+	api_ctrl->ipa_register_ipa_ready_cb = ipa3_register_ipa_ready_cb;
+	api_ctrl->ipa_inc_client_enable_clks = ipa3_inc_client_enable_clks;
+	api_ctrl->ipa_dec_client_disable_clks = ipa3_dec_client_disable_clks;
+	api_ctrl->ipa_inc_client_enable_clks_no_block =
+		ipa3_inc_client_enable_clks_no_block;
+	api_ctrl->ipa_suspend_resource_no_block =
+		ipa3_suspend_resource_no_block;
+	api_ctrl->ipa_resume_resource = ipa3_resume_resource;
+	api_ctrl->ipa_suspend_resource_sync = ipa3_suspend_resource_sync;
+	api_ctrl->ipa_set_required_perf_profile =
+		ipa3_set_required_perf_profile;
+	api_ctrl->ipa_get_ipc_logbuf = ipa3_get_ipc_logbuf;
+	api_ctrl->ipa_get_ipc_logbuf_low = ipa3_get_ipc_logbuf_low;
+	api_ctrl->ipa_rx_poll = ipa3_rx_poll;
+	api_ctrl->ipa_recycle_wan_skb = ipa3_recycle_wan_skb;
+	api_ctrl->ipa_setup_uc_ntn_pipes = ipa3_setup_uc_ntn_pipes;
+	api_ctrl->ipa_tear_down_uc_offload_pipes =
+		ipa3_tear_down_uc_offload_pipes;
+	api_ctrl->ipa_get_pdev = ipa3_get_pdev;
+
+	return 0;
+}
+
+/**
+ * ipa_is_modem_pipe()- Checks if pipe is owned by the modem
+ *
+ * @pipe_idx: pipe number
+ * Return value: true if owned by modem, false otherwize
+ */
+bool ipa_is_modem_pipe(int pipe_idx)
+{
+	int client_idx;
+
+	if (pipe_idx >= ipa3_ctx->ipa_num_pipes || pipe_idx < 0) {
+		IPAERR("Bad pipe index!\n");
+		return false;
+	}
+
+	for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+		if (!IPA_CLIENT_IS_Q6_CONS(client_idx) &&
+			!IPA_CLIENT_IS_Q6_PROD(client_idx))
+			continue;
+		if (ipa3_get_ep_mapping(client_idx) == pipe_idx)
+			return true;
+	}
+
+	return false;
+}
+
+static void ipa3_write_rsrc_grp_type_reg(int group_index,
+			enum ipa_rsrc_grp_type_src n, bool src,
+			struct ipahal_reg_rsrc_grp_cfg *val) {
+	u8 hw_type_idx;
+
+	hw_type_idx = ipa3_get_hw_type_index();
+
+	switch (hw_type_idx) {
+	case IPA_3_0:
+		if (src) {
+			switch (group_index) {
+			case IPA_v3_0_GROUP_UL:
+			case IPA_v3_0_GROUP_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_DIAG:
+			case IPA_v3_0_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_Q6ZIP:
+			case IPA_v3_0_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v3_0_GROUP_UL:
+			case IPA_v3_0_GROUP_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_DIAG:
+			case IPA_v3_0_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_0_GROUP_Q6ZIP_GENERAL:
+			case IPA_v3_0_GROUP_Q6ZIP_ENGINE:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
+	case IPA_3_5_1:
+		if (src) {
+			switch (group_index) {
+			case IPA_v3_5_1_GROUP_LWA_DL:
+			case IPA_v3_5_1_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_5_1_GROUP_DMA:
+			case IPA_v3_5_1_GROUP_UC_RX_Q:
+				ipahal_write_reg_n_fields(
+					IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid source resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		} else {
+			switch (group_index) {
+			case IPA_v3_5_1_GROUP_LWA_DL:
+			case IPA_v3_5_1_GROUP_UL_DL:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+					n, val);
+				break;
+			case IPA_v3_5_1_GROUP_DMA:
+				ipahal_write_reg_n_fields(
+					IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+					n, val);
+				break;
+			default:
+				IPAERR(
+				" Invalid destination resource group,index #%d\n",
+				group_index);
+				break;
+			}
+		}
+		break;
+	default:
+		IPAERR("invalid hw type\n");
+		WARN_ON(1);
+		return;
+	}
+}
+
+static void ipa3_configure_rx_hps_clients(int depth, bool min)
+{
+	int i;
+	struct ipahal_reg_rx_hps_clients val;
+	u8 hw_type_idx;
+
+	hw_type_idx = ipa3_get_hw_type_index();
+
+	/*
+	 * depth 0 contains 4 first clients out of 6
+	 * depth 1 contains 2 last clients out of 6
+	 */
+	for (i = 0 ; i < (depth ? 2 : 4) ; i++) {
+		if (min)
+			val.client_minmax[i] =
+				ipa3_rsrc_rx_grp_config
+				[hw_type_idx]
+				[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
+				[!depth ? i : 4 + i].min;
+		else
+			val.client_minmax[i] =
+				ipa3_rsrc_rx_grp_config
+				[hw_type_idx]
+				[IPA_RSRC_GRP_TYPE_RX_HPS_CMDQ]
+				[!depth ? i : 4 + i].max;
+	}
+	if (depth) {
+		ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_1 :
+					IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+					&val);
+	} else {
+		ipahal_write_reg_fields(min ? IPA_RX_HPS_CLIENTS_MIN_DEPTH_0 :
+					IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+					&val);
+	}
+}
+
+void ipa3_set_resorce_groups_min_max_limits(void)
+{
+	int i;
+	int j;
+	int src_rsrc_type_max;
+	int dst_rsrc_type_max;
+	int src_grp_idx_max;
+	int dst_grp_idx_max;
+	struct ipahal_reg_rsrc_grp_cfg val;
+	u8 hw_type_idx;
+
+	IPADBG("ENTER\n");
+	IPADBG("Assign source rsrc groups min-max limits\n");
+
+	hw_type_idx = ipa3_get_hw_type_index();
+	switch (hw_type_idx) {
+	case IPA_3_0:
+		src_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v3_0_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v3_0_GROUP_MAX;
+		dst_grp_idx_max = IPA_v3_0_GROUP_MAX;
+		break;
+	case IPA_3_5_1:
+		src_rsrc_type_max = IPA_v3_5_1_RSRC_GRP_TYPE_SRC_MAX;
+		dst_rsrc_type_max = IPA_v3_5_1_RSRC_GRP_TYPE_DST_MAX;
+		src_grp_idx_max = IPA_v3_5_1_SRC_GROUP_MAX;
+		dst_grp_idx_max = IPA_v3_5_1_DST_GROUP_MAX;
+		break;
+	default:
+		IPAERR("invalid hw type index\n");
+		WARN_ON(1);
+		return;
+	}
+
+	for (i = 0; i < src_rsrc_type_max; i++) {
+		for (j = 0; j < src_grp_idx_max; j = j + 2) {
+			val.x_min =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j].min;
+			val.x_max =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j].max;
+			val.y_min =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].min;
+			val.y_max =
+			ipa3_rsrc_src_grp_config[hw_type_idx][i][j + 1].max;
+			ipa3_write_rsrc_grp_type_reg(j, i, true, &val);
+		}
+	}
+
+	IPADBG("Assign destination rsrc groups min-max limits\n");
+
+	for (i = 0; i < dst_rsrc_type_max; i++) {
+		for (j = 0; j < dst_grp_idx_max; j = j + 2) {
+			val.x_min =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].min;
+			val.x_max =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j].max;
+			val.y_min =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].min;
+			val.y_max =
+			ipa3_rsrc_dst_grp_config[hw_type_idx][i][j + 1].max;
+			ipa3_write_rsrc_grp_type_reg(j, i, false, &val);
+		}
+	}
+
+	/* move resource group configuration from HLOS to TZ */
+	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
+		IPAERR("skip configuring ipa_rx_hps_clients from HLOS\n");
+		return;
+	}
+
+	IPADBG("Assign RX_HPS CMDQ rsrc groups min-max limits\n");
+
+	ipa3_configure_rx_hps_clients(0, true);
+	ipa3_configure_rx_hps_clients(0, false);
+
+	/* only hw_type v3_0\3_1 have 6 RX_HPS_CMDQ and needs depth 1*/
+	if (ipa3_ctx->ipa_hw_type <= IPA_HW_v3_1) {
+		ipa3_configure_rx_hps_clients(1, true);
+		ipa3_configure_rx_hps_clients(1, false);
+	}
+
+	IPADBG("EXIT\n");
+}
+
+static void ipa3_gsi_poll_after_suspend(struct ipa3_ep_context *ep)
+{
+	bool empty;
+
+	IPADBG("switch ch %ld to poll\n", ep->gsi_chan_hdl);
+	gsi_config_channel_mode(ep->gsi_chan_hdl, GSI_CHAN_MODE_POLL);
+	gsi_is_channel_empty(ep->gsi_chan_hdl, &empty);
+	if (!empty) {
+		IPADBG("ch %ld not empty\n", ep->gsi_chan_hdl);
+		/* queue a work to start polling if don't have one */
+		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
+		if (!atomic_read(&ep->sys->curr_polling_state)) {
+			atomic_set(&ep->sys->curr_polling_state, 1);
+			queue_work(ep->sys->wq, &ep->sys->work);
+		}
+	}
+}
+
+void ipa3_suspend_apps_pipes(bool suspend)
+{
+	struct ipa_ep_cfg_ctrl cfg;
+	int ipa_ep_idx;
+	struct ipa3_ep_context *ep;
+
+	memset(&cfg, 0, sizeof(cfg));
+	cfg.ipa_ep_suspend = suspend;
+
+	ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
+			ipa_ep_idx);
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		if (suspend)
+			ipa3_gsi_poll_after_suspend(ep);
+		else if (!atomic_read(&ep->sys->curr_polling_state))
+			gsi_config_channel_mode(ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_CALLBACK);
+	}
+
+	ipa_ep_idx = ipa_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+	/* Considering the case for SSR. */
+	if (ipa_ep_idx == -1) {
+		IPADBG("Invalid client.\n");
+		return;
+	}
+	ep = &ipa3_ctx->ep[ipa_ep_idx];
+	if (ep->valid) {
+		IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
+			ipa_ep_idx);
+		ipa3_cfg_ep_ctrl(ipa_ep_idx, &cfg);
+		if (suspend)
+			ipa3_gsi_poll_after_suspend(ep);
+		else if (!atomic_read(&ep->sys->curr_polling_state))
+			gsi_config_channel_mode(ep->gsi_chan_hdl,
+				GSI_CHAN_MODE_CALLBACK);
+	}
+}
+
+/**
+ * ipa3_inject_dma_task_for_gsi()- Send DMA_TASK to IPA for GSI stop channel
+ *
+ * Send a DMA_TASK of 1B to IPA to unblock GSI channel in STOP_IN_PROG.
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa3_inject_dma_task_for_gsi(void)
+{
+	static struct ipa_mem_buffer mem = {0};
+	struct ipahal_imm_cmd_dma_task_32b_addr cmd = {0};
+	static struct ipahal_imm_cmd_pyld *cmd_pyld;
+	struct ipa3_desc desc = {0};
+
+	/* allocate the memory only for the very first time */
+	if (!mem.base) {
+		IPADBG("Allocate mem\n");
+		mem.size = IPA_GSI_CHANNEL_STOP_PKT_SIZE;
+		mem.base = dma_alloc_coherent(ipa3_ctx->pdev,
+			mem.size,
+			&mem.phys_base,
+			GFP_KERNEL);
+		if (!mem.base) {
+			IPAERR("no mem\n");
+			return -EFAULT;
+		}
+	}
+	if (!cmd_pyld) {
+		cmd.flsh = 1;
+		cmd.size1 = mem.size;
+		cmd.addr1 = mem.phys_base;
+		cmd.packet_size = mem.size;
+		cmd_pyld = ipahal_construct_imm_cmd(
+			IPA_IMM_CMD_DMA_TASK_32B_ADDR, &cmd, false);
+		if (!cmd_pyld) {
+			IPAERR("failed to construct dma_task_32b_addr cmd\n");
+			return -EFAULT;
+		}
+	}
+
+	desc.opcode = ipahal_imm_cmd_get_opcode_param(
+		IPA_IMM_CMD_DMA_TASK_32B_ADDR, 1);
+	desc.pyld = cmd_pyld->data;
+	desc.len = cmd_pyld->len;
+	desc.type = IPA_IMM_CMD_DESC;
+
+	IPADBG("sending 1B packet to IPA\n");
+	if (ipa3_send_cmd_timeout(1, &desc,
+		IPA_DMA_TASK_FOR_GSI_TIMEOUT_MSEC)) {
+		IPAERR("ipa3_send_cmd failed\n");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * ipa3_stop_gsi_channel()- Stops a GSI channel in IPA
+ * @chan_hdl: GSI channel handle
+ *
+ * This function implements the sequence to stop a GSI channel
+ * in IPA. This function returns when the channel is is STOP state.
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+int ipa3_stop_gsi_channel(u32 clnt_hdl)
+{
+	struct ipa_mem_buffer mem;
+	int res = 0;
+	int i;
+	struct ipa3_ep_context *ep;
+
+	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
+		ipa3_ctx->ep[clnt_hdl].valid == 0) {
+		IPAERR("bad parm.\n");
+		return -EINVAL;
+	}
+
+	ep = &ipa3_ctx->ep[clnt_hdl];
+
+	IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	memset(&mem, 0, sizeof(mem));
+
+	if (IPA_CLIENT_IS_PROD(ep->client)) {
+		res = gsi_stop_channel(ep->gsi_chan_hdl);
+		goto end_sequence;
+	}
+
+	for (i = 0; i < IPA_GSI_CHANNEL_STOP_MAX_RETRY; i++) {
+		IPADBG("Calling gsi_stop_channel\n");
+		res = gsi_stop_channel(ep->gsi_chan_hdl);
+		IPADBG("gsi_stop_channel returned %d\n", res);
+		if (res != -GSI_STATUS_AGAIN && res != -GSI_STATUS_TIMED_OUT)
+			goto end_sequence;
+
+		IPADBG("Inject a DMA_TASK with 1B packet to IPA and retry\n");
+		/* Send a 1B packet DMA_RASK to IPA and try again*/
+		res = ipa3_inject_dma_task_for_gsi();
+		if (res) {
+			IPAERR("Failed to inject DMA TASk for GSI\n");
+			goto end_sequence;
+		}
+
+		/* sleep for short period to flush IPA */
+		usleep_range(IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC,
+			IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC);
+	}
+
+	IPAERR("Failed  to stop GSI channel with retries\n");
+	res = -EFAULT;
+end_sequence:
+	IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
+
+	return res;
+}
+
+/**
+ * ipa3_load_fws() - Load the IPAv3 FWs into IPA&GSI SRAM.
+ *
+ * @firmware: Structure which contains the FW data from the user space.
+ *
+ * Return value: 0 on success, negative otherwise
+ *
+ */
+int ipa3_load_fws(const struct firmware *firmware)
+{
+	const struct elf32_hdr *ehdr;
+	const struct elf32_phdr *phdr;
+	const uint8_t *elf_phdr_ptr;
+	uint32_t *elf_data_ptr;
+	int phdr_idx, index;
+	uint32_t *fw_mem_base;
+
+	ehdr = (struct elf32_hdr *) firmware->data;
+
+	elf_phdr_ptr = firmware->data + sizeof(*ehdr);
+
+	for (phdr_idx = 0; phdr_idx < ehdr->e_phnum; phdr_idx++) {
+		/*
+		 * The ELF program header will contain the starting
+		 * address to which the firmware needs to copied.
+		 */
+		phdr = (struct elf32_phdr *)elf_phdr_ptr;
+
+		/*
+		 * p_vaddr will contain the starting address to which the
+		 * FW needs to be loaded.
+		 * p_memsz will contain the size of the IRAM.
+		 * p_filesz will contain the size of the FW image.
+		 */
+		fw_mem_base = ioremap(phdr->p_vaddr, phdr->p_memsz);
+		if (!fw_mem_base) {
+			IPAERR("Failed to map 0x%x for the size of %u\n",
+				phdr->p_vaddr, phdr->p_memsz);
+				return -ENOMEM;
+		}
+
+		/* Set the entire region to 0s */
+		memset(fw_mem_base, 0, phdr->p_memsz);
+
+		/*
+		 * p_offset will contain and absolute offset from the beginning
+		 * of the ELF file.
+		 */
+		elf_data_ptr = (uint32_t *)
+				((uint8_t *)firmware->data + phdr->p_offset);
+
+		if (phdr->p_memsz % sizeof(uint32_t)) {
+			IPAERR("FW size %u doesn't align to 32bit\n",
+				phdr->p_memsz);
+			return -EFAULT;
+		}
+
+		/* Write the FW */
+		for (index = 0; index < phdr->p_filesz/sizeof(uint32_t);
+			index++) {
+			writel_relaxed(*elf_data_ptr, &fw_mem_base[index]);
+			elf_data_ptr++;
+		}
+
+		iounmap(fw_mem_base);
+
+		elf_phdr_ptr = elf_phdr_ptr + sizeof(*phdr);
+	}
+	IPADBG("IPA FWs (GSI FW, HPS and DPS) were loaded\n");
+	return 0;
+}
+
+/**
+ * ipa3_is_msm_device() - Is the running device a MSM or MDM?
+ *  Determine according to IPA version
+ *
+ * Return value: true if MSM, false if MDM
+ *
+ */
+bool ipa3_is_msm_device(void)
+{
+	switch (ipa3_ctx->ipa_hw_type) {
+	case IPA_HW_v3_0:
+	case IPA_HW_v3_5:
+		return false;
+	case IPA_HW_v3_1:
+	case IPA_HW_v3_5_1:
+		return true;
+	default:
+		IPAERR("unknown HW type %d\n", ipa3_ctx->ipa_hw_type);
+		ipa_assert();
+	}
+
+	return false;
+}
+
+/**
+ * ipa3_get_pdev() - return a pointer to IPA dev struct
+ *
+ * Return value: a pointer to IPA dev struct
+ *
+ */
+struct device *ipa3_get_pdev(void)
+{
+	if (!ipa3_ctx)
+		return NULL;
+
+	return ipa3_ctx->pdev;
+}
+
+/**
+ * ipa3_enable_dcd() - enable dynamic clock division on IPA
+ *
+ * Return value: Non applicable
+ *
+ */
+void ipa3_enable_dcd(void)
+{
+	struct ipahal_reg_idle_indication_cfg idle_indication_cfg;
+
+	/* recommended values for IPA 3.5 according to IPA HPG */
+	idle_indication_cfg.const_non_idle_enable = 0;
+	idle_indication_cfg.enter_idle_debounce_thresh = 256;
+
+	ipahal_write_reg_fields(IPA_IDLE_INDICATION_CFG,
+			&idle_indication_cfg);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
new file mode 100644
index 0000000..b945eb06
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_IPA3) += ipa_hal.o
+
+ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
new file mode 100644
index 0000000..e39874e
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.c
@@ -0,0 +1,1359 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/debugfs.h>
+#include "ipahal.h"
+#include "ipahal_i.h"
+#include "ipahal_reg_i.h"
+#include "ipahal_fltrt_i.h"
+
+struct ipahal_context *ipahal_ctx;
+
+static const char *ipahal_imm_cmd_name_to_str[IPA_IMM_CMD_MAX] = {
+	__stringify(IPA_IMM_CMD_IP_V4_FILTER_INIT),
+	__stringify(IPA_IMM_CMD_IP_V6_FILTER_INIT),
+	__stringify(IPA_IMM_CMD_IP_V4_NAT_INIT),
+	__stringify(IPA_IMM_CMD_IP_V4_ROUTING_INIT),
+	__stringify(IPA_IMM_CMD_IP_V6_ROUTING_INIT),
+	__stringify(IPA_IMM_CMD_HDR_INIT_LOCAL),
+	__stringify(IPA_IMM_CMD_HDR_INIT_SYSTEM),
+	__stringify(IPA_IMM_CMD_REGISTER_WRITE),
+	__stringify(IPA_IMM_CMD_NAT_DMA),
+	__stringify(IPA_IMM_CMD_IP_PACKET_INIT),
+	__stringify(IPA_IMM_CMD_DMA_SHARED_MEM),
+	__stringify(IPA_IMM_CMD_IP_PACKET_TAG_STATUS),
+	__stringify(IPA_IMM_CMD_DMA_TASK_32B_ADDR),
+};
+
+static const char *ipahal_pkt_status_exception_to_str
+	[IPAHAL_PKT_STATUS_EXCEPTION_MAX] = {
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_NONE),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT),
+	__stringify(IPAHAL_PKT_STATUS_EXCEPTION_NAT),
+};
+
+#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
+		(kzalloc((__size), ((__is_atomic_ctx)?GFP_ATOMIC:GFP_KERNEL)))
+
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_task_32b_addr(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_dma_task_32b_addr *data;
+	struct ipahal_imm_cmd_dma_task_32b_addr *dma_params =
+		(struct ipahal_imm_cmd_dma_task_32b_addr *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_dma_task_32b_addr *)pyld->data;
+
+	if (unlikely(dma_params->size1 & ~0xFFFF)) {
+		IPAHAL_ERR("Size1 is bigger than 16bit width 0x%x\n",
+			dma_params->size1);
+		WARN_ON(1);
+	}
+	if (unlikely(dma_params->packet_size & ~0xFFFF)) {
+		IPAHAL_ERR("Pkt size is bigger than 16bit width 0x%x\n",
+			dma_params->packet_size);
+		WARN_ON(1);
+	}
+	data->cmplt = dma_params->cmplt ? 1 : 0;
+	data->eof = dma_params->eof ? 1 : 0;
+	data->flsh = dma_params->flsh ? 1 : 0;
+	data->lock = dma_params->lock ? 1 : 0;
+	data->unlock = dma_params->unlock ? 1 : 0;
+	data->size1 = dma_params->size1;
+	data->addr1 = dma_params->addr1;
+	data->packet_size = dma_params->packet_size;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_tag_status(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_packet_tag_status *data;
+	struct ipahal_imm_cmd_ip_packet_tag_status *tag_params =
+		(struct ipahal_imm_cmd_ip_packet_tag_status *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_packet_tag_status *)pyld->data;
+
+	if (unlikely(tag_params->tag & ~0xFFFFFFFFFFFF)) {
+		IPAHAL_ERR("tag is bigger than 48bit width 0x%llx\n",
+			tag_params->tag);
+		WARN_ON(1);
+	}
+	data->tag = tag_params->tag;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_dma_shared_mem(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_dma_shared_mem *data;
+	struct ipahal_imm_cmd_dma_shared_mem *mem_params =
+		(struct ipahal_imm_cmd_dma_shared_mem *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_dma_shared_mem *)pyld->data;
+
+	if (unlikely(mem_params->size & ~0xFFFF)) {
+		IPAHAL_ERR("Size is bigger than 16bit width 0x%x\n",
+			mem_params->size);
+		WARN_ON(1);
+	}
+	if (unlikely(mem_params->local_addr & ~0xFFFF)) {
+		IPAHAL_ERR("Local addr is bigger than 16bit width 0x%x\n",
+			mem_params->local_addr);
+		WARN_ON(1);
+	}
+	data->direction = mem_params->is_read ? 1 : 0;
+	data->size = mem_params->size;
+	data->local_addr = mem_params->local_addr;
+	data->system_addr = mem_params->system_addr;
+	data->skip_pipeline_clear = mem_params->skip_pipeline_clear ? 1 : 0;
+	switch (mem_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		data->pipeline_clear_options = 0;
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		data->pipeline_clear_options = 1;
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		data->pipeline_clear_options = 2;
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			mem_params->pipeline_clear_options);
+		WARN_ON(1);
+	};
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_register_write(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_register_write *data;
+	struct ipahal_imm_cmd_register_write *regwrt_params =
+		(struct ipahal_imm_cmd_register_write *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_register_write *)pyld->data;
+
+	if (unlikely(regwrt_params->offset & ~0xFFFF)) {
+		IPAHAL_ERR("Offset is bigger than 16bit width 0x%x\n",
+			regwrt_params->offset);
+		WARN_ON(1);
+	}
+	data->offset = regwrt_params->offset;
+	data->value = regwrt_params->value;
+	data->value_mask = regwrt_params->value_mask;
+
+	data->skip_pipeline_clear = regwrt_params->skip_pipeline_clear ? 1 : 0;
+	switch (regwrt_params->pipeline_clear_options) {
+	case IPAHAL_HPS_CLEAR:
+		data->pipeline_clear_options = 0;
+		break;
+	case IPAHAL_SRC_GRP_CLEAR:
+		data->pipeline_clear_options = 1;
+		break;
+	case IPAHAL_FULL_PIPELINE_CLEAR:
+		data->pipeline_clear_options = 2;
+		break;
+	default:
+		IPAHAL_ERR("unsupported pipline clear option %d\n",
+			regwrt_params->pipeline_clear_options);
+		WARN_ON(1);
+	};
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_packet_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_packet_init *data;
+	struct ipahal_imm_cmd_ip_packet_init *pktinit_params =
+		(struct ipahal_imm_cmd_ip_packet_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_packet_init *)pyld->data;
+
+	if (unlikely(pktinit_params->destination_pipe_index & ~0x1F)) {
+		IPAHAL_ERR("Dst pipe idx is bigger than 5bit width 0x%x\n",
+			pktinit_params->destination_pipe_index);
+		WARN_ON(1);
+	}
+	data->destination_pipe_index = pktinit_params->destination_pipe_index;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_nat_dma(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_nat_dma *data;
+	struct ipahal_imm_cmd_nat_dma *nat_params =
+		(struct ipahal_imm_cmd_nat_dma *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_nat_dma *)pyld->data;
+
+	data->table_index = nat_params->table_index;
+	data->base_addr = nat_params->base_addr;
+	data->offset = nat_params->offset;
+	data->data = nat_params->data;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_system(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_hdr_init_system *data;
+	struct ipahal_imm_cmd_hdr_init_system *syshdr_params =
+		(struct ipahal_imm_cmd_hdr_init_system *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_hdr_init_system *)pyld->data;
+
+	data->hdr_table_addr = syshdr_params->hdr_table_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_hdr_init_local(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_hdr_init_local *data;
+	struct ipahal_imm_cmd_hdr_init_local *lclhdr_params =
+		(struct ipahal_imm_cmd_hdr_init_local *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_hdr_init_local *)pyld->data;
+
+	if (unlikely(lclhdr_params->size_hdr_table & ~0xFFF)) {
+		IPAHAL_ERR("Hdr tble size is bigger than 12bit width 0x%x\n",
+			lclhdr_params->size_hdr_table);
+		WARN_ON(1);
+	}
+	data->hdr_table_addr = lclhdr_params->hdr_table_addr;
+	data->size_hdr_table = lclhdr_params->size_hdr_table;
+	data->hdr_addr = lclhdr_params->hdr_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_routing_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v6_routing_init *data;
+	struct ipahal_imm_cmd_ip_v6_routing_init *rt6_params =
+		(struct ipahal_imm_cmd_ip_v6_routing_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v6_routing_init *)pyld->data;
+
+	data->hash_rules_addr = rt6_params->hash_rules_addr;
+	data->hash_rules_size = rt6_params->hash_rules_size;
+	data->hash_local_addr = rt6_params->hash_local_addr;
+	data->nhash_rules_addr = rt6_params->nhash_rules_addr;
+	data->nhash_rules_size = rt6_params->nhash_rules_size;
+	data->nhash_local_addr = rt6_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_routing_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v4_routing_init *data;
+	struct ipahal_imm_cmd_ip_v4_routing_init *rt4_params =
+		(struct ipahal_imm_cmd_ip_v4_routing_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v4_routing_init *)pyld->data;
+
+	data->hash_rules_addr = rt4_params->hash_rules_addr;
+	data->hash_rules_size = rt4_params->hash_rules_size;
+	data->hash_local_addr = rt4_params->hash_local_addr;
+	data->nhash_rules_addr = rt4_params->nhash_rules_addr;
+	data->nhash_rules_size = rt4_params->nhash_rules_size;
+	data->nhash_local_addr = rt4_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_nat_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v4_nat_init *data;
+	struct ipahal_imm_cmd_ip_v4_nat_init *nat4_params =
+		(struct ipahal_imm_cmd_ip_v4_nat_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v4_nat_init *)pyld->data;
+
+	data->ipv4_rules_addr = nat4_params->ipv4_rules_addr;
+	data->ipv4_expansion_rules_addr =
+		nat4_params->ipv4_expansion_rules_addr;
+	data->index_table_addr = nat4_params->index_table_addr;
+	data->index_table_expansion_addr =
+		nat4_params->index_table_expansion_addr;
+	data->table_index = nat4_params->table_index;
+	data->ipv4_rules_addr_type =
+		nat4_params->ipv4_rules_addr_shared ? 1 : 0;
+	data->ipv4_expansion_rules_addr_type =
+		nat4_params->ipv4_expansion_rules_addr_shared ? 1 : 0;
+	data->index_table_addr_type =
+		nat4_params->index_table_addr_shared ? 1 : 0;
+	data->index_table_expansion_addr_type =
+		nat4_params->index_table_expansion_addr_shared ? 1 : 0;
+	data->size_base_tables = nat4_params->size_base_tables;
+	data->size_expansion_tables = nat4_params->size_expansion_tables;
+	data->public_ip_addr = nat4_params->public_ip_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v6_filter_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v6_filter_init *data;
+	struct ipahal_imm_cmd_ip_v6_filter_init *flt6_params =
+		(struct ipahal_imm_cmd_ip_v6_filter_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v6_filter_init *)pyld->data;
+
+	data->hash_rules_addr = flt6_params->hash_rules_addr;
+	data->hash_rules_size = flt6_params->hash_rules_size;
+	data->hash_local_addr = flt6_params->hash_local_addr;
+	data->nhash_rules_addr = flt6_params->nhash_rules_addr;
+	data->nhash_rules_size = flt6_params->nhash_rules_size;
+	data->nhash_local_addr = flt6_params->nhash_local_addr;
+
+	return pyld;
+}
+
+static struct ipahal_imm_cmd_pyld *ipa_imm_cmd_construct_ip_v4_filter_init(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_pyld *pyld;
+	struct ipa_imm_cmd_hw_ip_v4_filter_init *data;
+	struct ipahal_imm_cmd_ip_v4_filter_init *flt4_params =
+		(struct ipahal_imm_cmd_ip_v4_filter_init *)params;
+
+	pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) + sizeof(*data), is_atomic_ctx);
+	if (unlikely(!pyld)) {
+		IPAHAL_ERR("kzalloc err\n");
+		return pyld;
+	}
+	pyld->len = sizeof(*data);
+	data = (struct ipa_imm_cmd_hw_ip_v4_filter_init *)pyld->data;
+
+	data->hash_rules_addr = flt4_params->hash_rules_addr;
+	data->hash_rules_size = flt4_params->hash_rules_size;
+	data->hash_local_addr = flt4_params->hash_local_addr;
+	data->nhash_rules_addr = flt4_params->nhash_rules_addr;
+	data->nhash_rules_size = flt4_params->nhash_rules_size;
+	data->nhash_local_addr = flt4_params->nhash_local_addr;
+
+	return pyld;
+}
+
+/*
+ * struct ipahal_imm_cmd_obj - immediate command H/W information for
+ *  specific IPA version
+ * @construct - CB to construct imm command payload from abstracted structure
+ * @opcode - Immediate command OpCode
+ * @dyn_op - Does this command supports Dynamic opcode?
+ *  Some commands opcode are dynamic where the part of the opcode is
+ *  supplied as param. This flag indicates if the specific command supports it
+ *  or not.
+ */
+struct ipahal_imm_cmd_obj {
+	struct ipahal_imm_cmd_pyld *(*construct)(enum ipahal_imm_cmd_name cmd,
+		const void *params, bool is_atomic_ctx);
+	u16 opcode;
+	bool dyn_op;
+};
+
+/*
+ * This table contains the info regard each immediate command for IPAv3
+ *  and later.
+ * Information like: opcode and construct functions.
+ * All the information on the IMM on IPAv3 are statically defined below.
+ * If information is missing regard some IMM on some IPA version,
+ *  the init function will fill it with the information from the previous
+ *  IPA version.
+ * Information is considered missing if all of the fields are 0
+ * If opcode is -1, this means that the IMM is removed on the
+ *  specific version
+ */
+static struct ipahal_imm_cmd_obj
+		ipahal_imm_cmd_objs[IPA_HW_MAX][IPA_IMM_CMD_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_FILTER_INIT] = {
+		ipa_imm_cmd_construct_ip_v4_filter_init,
+		3, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_FILTER_INIT] = {
+		ipa_imm_cmd_construct_ip_v6_filter_init,
+		4, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_NAT_INIT] = {
+		ipa_imm_cmd_construct_ip_v4_nat_init,
+		5, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V4_ROUTING_INIT] = {
+		ipa_imm_cmd_construct_ip_v4_routing_init,
+		7, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_V6_ROUTING_INIT] = {
+		ipa_imm_cmd_construct_ip_v6_routing_init,
+		8, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_LOCAL] = {
+		ipa_imm_cmd_construct_hdr_init_local,
+		9, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_HDR_INIT_SYSTEM] = {
+		ipa_imm_cmd_construct_hdr_init_system,
+		10, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_REGISTER_WRITE] = {
+		ipa_imm_cmd_construct_register_write,
+		12, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_NAT_DMA] = {
+		ipa_imm_cmd_construct_nat_dma,
+		14, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_INIT] = {
+		ipa_imm_cmd_construct_ip_packet_init,
+		16, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_DMA_TASK_32B_ADDR] = {
+		ipa_imm_cmd_construct_dma_task_32b_addr,
+		17, true},
+	[IPA_HW_v3_0][IPA_IMM_CMD_DMA_SHARED_MEM] = {
+		ipa_imm_cmd_construct_dma_shared_mem,
+		19, false},
+	[IPA_HW_v3_0][IPA_IMM_CMD_IP_PACKET_TAG_STATUS] = {
+		ipa_imm_cmd_construct_ip_packet_tag_status,
+		20, false},
+};
+
+/*
+ * ipahal_imm_cmd_init() - Build the Immediate command information table
+ *  See ipahal_imm_cmd_objs[][] comments
+ */
+static int ipahal_imm_cmd_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	int j;
+	struct ipahal_imm_cmd_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		for (j = 0; j < IPA_IMM_CMD_MAX ; j++) {
+			if (!memcmp(&ipahal_imm_cmd_objs[i+1][j], &zero_obj,
+				sizeof(struct ipahal_imm_cmd_obj))) {
+				memcpy(&ipahal_imm_cmd_objs[i+1][j],
+					&ipahal_imm_cmd_objs[i][j],
+					sizeof(struct ipahal_imm_cmd_obj));
+			} else {
+				/*
+				 * explicitly overridden immediate command.
+				 * Check validity
+				 */
+				if (!ipahal_imm_cmd_objs[i+1][j].opcode) {
+					IPAHAL_ERR(
+					  "imm_cmd=%s with zero opcode ipa_ver=%d\n",
+					  ipahal_imm_cmd_name_str(j), i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_imm_cmd_objs[i+1][j].construct) {
+					IPAHAL_ERR(
+					  "imm_cmd=%s with NULL construct func ipa_ver=%d\n",
+					  ipahal_imm_cmd_name_str(j), i+1);
+					WARN_ON(1);
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name)
+{
+	if (cmd_name < 0 || cmd_name >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("requested name of invalid imm_cmd=%d\n", cmd_name);
+		return "Invalid IMM_CMD";
+	}
+
+	return ipahal_imm_cmd_name_to_str[cmd_name];
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd)
+{
+	u32 opcode;
+
+	if (cmd >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("Invalid immediate command imm_cmd=%u\n", cmd);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
+		ipahal_imm_cmd_name_str(cmd));
+	opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+	if (opcode == -1) {
+		IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	return opcode;
+}
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ *  that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ *  a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ *  is a given parameter.
+ * This API will return the composed opcode of the command given
+ *  the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param)
+{
+	u32 opcode;
+
+	if (cmd >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("Invalid immediate command IMM_CMD=%u\n", cmd);
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("Get opcode of IMM_CMD=%s\n",
+		ipahal_imm_cmd_name_str(cmd));
+
+	if (!ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].dyn_op) {
+		IPAHAL_ERR("IMM_CMD=%s does not support dynamic opcode\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+
+	/* Currently, dynamic opcode commands uses params to be set
+	 *  on the Opcode hi-byte (lo-byte is fixed).
+	 * If this to be changed in the future, make the opcode calculation
+	 *  a CB per command
+	 */
+	if (param & ~0xFFFF) {
+		IPAHAL_ERR("IMM_CMD=%s opcode param is invalid\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+	opcode = ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].opcode;
+	if (opcode == -1) {
+		IPAHAL_ERR("Try to get opcode of obsolete IMM_CMD=%s\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+	if (opcode & ~0xFFFF) {
+		IPAHAL_ERR("IMM_CMD=%s opcode will be overridden\n",
+			ipahal_imm_cmd_name_str(cmd));
+		ipa_assert();
+		return -EFAULT;
+	}
+	return (opcode + (param<<8));
+}
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx)
+{
+	if (!params) {
+		IPAHAL_ERR("Input error: params=%p\n", params);
+		ipa_assert();
+		return NULL;
+	}
+
+	if (cmd >= IPA_IMM_CMD_MAX) {
+		IPAHAL_ERR("Invalid immediate command %u\n", cmd);
+		ipa_assert();
+		return NULL;
+	}
+
+	IPAHAL_DBG_LOW("construct IMM_CMD:%s\n", ipahal_imm_cmd_name_str(cmd));
+	return ipahal_imm_cmd_objs[ipahal_ctx->hw_type][cmd].construct(
+		cmd, params, is_atomic_ctx);
+}
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ *  to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ *  ipahal_construct_imm_cmd(). This function is helper to the core driver
+ *  to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+	bool skip_pipline_clear,
+	enum ipahal_pipeline_clear_option pipline_clr_opt,
+	bool is_atomic_ctx)
+{
+	struct ipahal_imm_cmd_register_write cmd;
+	struct ipahal_imm_cmd_pyld *cmd_pyld;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.skip_pipeline_clear = skip_pipline_clear;
+	cmd.pipeline_clear_options = pipline_clr_opt;
+	cmd.value_mask = 0x0;
+
+	cmd_pyld = ipahal_construct_imm_cmd(IPA_IMM_CMD_REGISTER_WRITE,
+		&cmd, is_atomic_ctx);
+
+	if (!cmd_pyld)
+		IPAHAL_ERR("failed to construct register_write imm cmd\n");
+
+	return cmd_pyld;
+}
+
+
+/* IPA Packet Status Logic */
+
+#define IPA_PKT_STATUS_SET_MSK(__hw_bit_msk, __shft) \
+	(status->status_mask |= \
+		((hw_status->status_mask & (__hw_bit_msk) ? 1 : 0) << (__shft)))
+
+static void ipa_pkt_status_parse(
+	const void *unparsed_status, struct ipahal_pkt_status *status)
+{
+	enum ipahal_pkt_status_opcode opcode = 0;
+	enum ipahal_pkt_status_exception exception_type = 0;
+
+	struct ipa_pkt_status_hw *hw_status =
+		(struct ipa_pkt_status_hw *)unparsed_status;
+
+	status->pkt_len = hw_status->pkt_len;
+	status->endp_src_idx = hw_status->endp_src_idx;
+	status->endp_dest_idx = hw_status->endp_dest_idx;
+	status->metadata = hw_status->metadata;
+	status->flt_local = hw_status->flt_local;
+	status->flt_hash = hw_status->flt_hash;
+	status->flt_global = hw_status->flt_hash;
+	status->flt_ret_hdr = hw_status->flt_ret_hdr;
+	status->flt_miss = ~(hw_status->flt_rule_id) ? false : true;
+	status->flt_rule_id = hw_status->flt_rule_id;
+	status->rt_local = hw_status->rt_local;
+	status->rt_hash = hw_status->rt_hash;
+	status->ucp = hw_status->ucp;
+	status->rt_tbl_idx = hw_status->rt_tbl_idx;
+	status->rt_miss = ~(hw_status->rt_rule_id) ? false : true;
+	status->rt_rule_id = hw_status->rt_rule_id;
+	status->nat_hit = hw_status->nat_hit;
+	status->nat_entry_idx = hw_status->nat_entry_idx;
+	status->tag_info = hw_status->tag_info;
+	status->seq_num = hw_status->seq_num;
+	status->time_of_day_ctr = hw_status->time_of_day_ctr;
+	status->hdr_local = hw_status->hdr_local;
+	status->hdr_offset = hw_status->hdr_offset;
+	status->frag_hit = hw_status->frag_hit;
+	status->frag_rule = hw_status->frag_rule;
+
+	switch (hw_status->status_opcode) {
+	case 0x1:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET;
+		break;
+	case 0x2:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE;
+		break;
+	case 0x4:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET;
+		break;
+	case 0x8:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET;
+		break;
+	case 0x10:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_LOG;
+		break;
+	case 0x20:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_DCMP;
+		break;
+	case 0x40:
+		opcode = IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS;
+		break;
+	default:
+		IPAHAL_ERR("unsupported Status Opcode 0x%x\n",
+			hw_status->status_opcode);
+		WARN_ON(1);
+	};
+	status->status_opcode = opcode;
+
+	switch (hw_status->nat_type) {
+	case 0:
+		status->nat_type = IPAHAL_PKT_STATUS_NAT_NONE;
+		break;
+	case 1:
+		status->nat_type = IPAHAL_PKT_STATUS_NAT_SRC;
+		break;
+	case 2:
+		status->nat_type = IPAHAL_PKT_STATUS_NAT_DST;
+		break;
+	default:
+		IPAHAL_ERR("unsupported Status NAT type 0x%x\n",
+			hw_status->nat_type);
+		WARN_ON(1);
+	};
+
+	switch (hw_status->exception) {
+	case 0:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NONE;
+		break;
+	case 1:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR;
+		break;
+	case 4:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE;
+		break;
+	case 8:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH;
+		break;
+	case 16:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS;
+		break;
+	case 32:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT;
+		break;
+	case 64:
+		exception_type = IPAHAL_PKT_STATUS_EXCEPTION_NAT;
+		break;
+	default:
+		IPAHAL_ERR("unsupported Status Exception type 0x%x\n",
+			hw_status->exception);
+		WARN_ON(1);
+	};
+	status->exception = exception_type;
+
+	IPA_PKT_STATUS_SET_MSK(0x1, IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x2, IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x4, IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x8, IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x10, IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x20, IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x40,
+		IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x80, IPAHAL_PKT_STATUS_MASK_V4_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x100,
+		IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x200, IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x400, IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x800,
+		IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x1000, IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x2000, IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x4000, IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT);
+	IPA_PKT_STATUS_SET_MSK(0x8000, IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT);
+	status->status_mask &= 0xFFFF;
+}
+
+/*
+ * struct ipahal_pkt_status_obj - Pakcet Status H/W information for
+ *  specific IPA version
+ * @size: H/W size of the status packet
+ * @parse: CB that parses the H/W packet status into the abstracted structure
+ */
+struct ipahal_pkt_status_obj {
+	u32 size;
+	void (*parse)(const void *unparsed_status,
+		struct ipahal_pkt_status *status);
+};
+
+/*
+ * This table contains the info regard packet status for IPAv3 and later
+ * Information like: size of packet status and parsing function
+ * All the information on the pkt Status on IPAv3 are statically defined below.
+ * If information is missing regard some IPA version, the init function
+ *  will fill it with the information from the previous IPA version.
+ * Information is considered missing if all of the fields are 0
+ */
+static struct ipahal_pkt_status_obj ipahal_pkt_status_objs[IPA_HW_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0] = {
+		IPA3_0_PKT_STATUS_SIZE,
+		ipa_pkt_status_parse,
+		},
+};
+
+/*
+ * ipahal_pkt_status_init() - Build the packet status information array
+ *  for the different IPA versions
+ *  See ipahal_pkt_status_objs[] comments
+ */
+static int ipahal_pkt_status_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	struct ipahal_pkt_status_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	/*
+	 * Since structure alignment is implementation dependent,
+	 * add test to avoid different and incompatible data layouts.
+	 *
+	 * In case new H/W has different size or structure of status packet,
+	 * add a compile time validty check for it like below (as well as
+	 * the new defines and/or the new strucutre in the internal header).
+	 */
+	BUILD_BUG_ON(sizeof(struct ipa_pkt_status_hw) !=
+		IPA3_0_PKT_STATUS_SIZE);
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		if (!memcmp(&ipahal_pkt_status_objs[i+1], &zero_obj,
+			sizeof(struct ipahal_pkt_status_obj))) {
+			memcpy(&ipahal_pkt_status_objs[i+1],
+				&ipahal_pkt_status_objs[i],
+				sizeof(struct ipahal_pkt_status_obj));
+		} else {
+			/*
+			 * explicitly overridden Packet Status info
+			 * Check validity
+			 */
+			if (!ipahal_pkt_status_objs[i+1].size) {
+				IPAHAL_ERR(
+				  "Packet Status with zero size ipa_ver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_pkt_status_objs[i+1].parse) {
+				IPAHAL_ERR(
+				  "Packet Status without Parse func ipa_ver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void)
+{
+	return ipahal_pkt_status_objs[ipahal_ctx->hw_type].size;
+}
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+	struct ipahal_pkt_status *status)
+{
+	if (!unparsed_status || !status) {
+		IPAHAL_ERR("Input Error: unparsed_status=%p status=%p\n",
+			unparsed_status, status);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("Parse Status Packet\n");
+	memset(status, 0, sizeof(*status));
+	ipahal_pkt_status_objs[ipahal_ctx->hw_type].parse(unparsed_status,
+		status);
+}
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+	enum ipahal_pkt_status_exception exception)
+{
+	if (exception < 0 || exception >= IPAHAL_PKT_STATUS_EXCEPTION_MAX) {
+		IPAHAL_ERR(
+			"requested string of invalid pkt_status exception=%d\n",
+			exception);
+		return "Invalid PKT_STATUS_EXCEPTION";
+	}
+
+	return ipahal_pkt_status_exception_to_str[exception];
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void ipahal_debugfs_init(void)
+{
+	ipahal_ctx->dent = debugfs_create_dir("ipahal", 0);
+	if (!ipahal_ctx->dent || IS_ERR(ipahal_ctx->dent)) {
+		IPAHAL_ERR("fail to create ipahal debugfs folder\n");
+		goto fail;
+	}
+
+	return;
+fail:
+	debugfs_remove_recursive(ipahal_ctx->dent);
+	ipahal_ctx->dent = NULL;
+}
+
+static void ipahal_debugfs_remove(void)
+{
+	if (!ipahal_ctx)
+		return;
+
+	if (IS_ERR(ipahal_ctx->dent)) {
+		IPAHAL_ERR("ipahal debugfs folder was not created\n");
+		return;
+	}
+
+	debugfs_remove_recursive(ipahal_ctx->dent);
+}
+#else /* CONFIG_DEBUG_FS */
+static void ipahal_debugfs_init(void) {}
+static void ipahal_debugfs_remove(void) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/*
+ * ipahal_cp_hdr_to_hw_buff_v3() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+static void ipahal_cp_hdr_to_hw_buff_v3(void *const base, u32 offset,
+		u8 *const hdr, u32 hdr_len)
+{
+	memcpy(base + offset, hdr, hdr_len);
+}
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff_v3() - copy processing context to
+ * base address and offset given.
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+static int ipahal_cp_proc_ctx_to_hw_buff_v3(enum ipa_hdr_proc_type type,
+		void *const base, u32 offset,
+		u32 hdr_len, bool is_hdr_proc_ctx,
+		dma_addr_t phys_base, u32 hdr_base_addr,
+		struct ipa_hdr_offset_entry *offset_entry){
+	if (type == IPA_HDR_PROC_NONE) {
+		struct ipa_hw_hdr_proc_ctx_add_hdr_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.value = hdr_len;
+		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%x\n",
+			ctx->hdr_add.hdr_addr);
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	} else {
+		struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *ctx;
+
+		ctx = (struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq *)
+			(base + offset);
+		ctx->hdr_add.tlv.type = IPA_PROC_CTX_TLV_TYPE_HDR_ADD;
+		ctx->hdr_add.tlv.length = 1;
+		ctx->hdr_add.tlv.value = hdr_len;
+		ctx->hdr_add.hdr_addr = is_hdr_proc_ctx ? phys_base :
+			hdr_base_addr + offset_entry->offset;
+		IPAHAL_DBG("header address 0x%x\n",
+			ctx->hdr_add.hdr_addr);
+		ctx->cmd.type = IPA_PROC_CTX_TLV_TYPE_PROC_CMD;
+		ctx->cmd.length = 0;
+		switch (type) {
+		case IPA_HDR_PROC_ETHII_TO_ETHII:
+			ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_ETHII;
+			break;
+		case IPA_HDR_PROC_ETHII_TO_802_3:
+			ctx->cmd.value = IPA_HDR_UCP_ETHII_TO_802_3;
+			break;
+		case IPA_HDR_PROC_802_3_TO_ETHII:
+			ctx->cmd.value = IPA_HDR_UCP_802_3_TO_ETHII;
+			break;
+		case IPA_HDR_PROC_802_3_TO_802_3:
+			ctx->cmd.value = IPA_HDR_UCP_802_3_TO_802_3;
+			break;
+		default:
+			IPAHAL_ERR("unknown ipa_hdr_proc_type %d", type);
+			WARN_ON(1);
+			return -EINVAL;
+		}
+		IPAHAL_DBG("command id %d\n", ctx->cmd.value);
+		ctx->end.type = IPA_PROC_CTX_TLV_TYPE_END;
+		ctx->end.length = 0;
+		ctx->end.value = 0;
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len_v3() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context.
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+static int ipahal_get_proc_ctx_needed_len_v3(enum ipa_hdr_proc_type type)
+{
+	return (type == IPA_HDR_PROC_NONE) ?
+			sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_seq) :
+			sizeof(struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq);
+}
+
+/*
+ * struct ipahal_hdr_funcs - headers handling functions for specific IPA
+ * version
+ * @ipahal_cp_hdr_to_hw_buff - copy function for regular headers
+ */
+struct ipahal_hdr_funcs {
+	void (*ipahal_cp_hdr_to_hw_buff)(void *const base, u32 offset,
+			u8 *const hdr, u32 hdr_len);
+
+	int (*ipahal_cp_proc_ctx_to_hw_buff)(enum ipa_hdr_proc_type type,
+			void *const base, u32 offset, u32 hdr_len,
+			bool is_hdr_proc_ctx, dma_addr_t phys_base,
+			u32 hdr_base_addr,
+			struct ipa_hdr_offset_entry *offset_entry);
+
+	int (*ipahal_get_proc_ctx_needed_len)(enum ipa_hdr_proc_type type);
+};
+
+static struct ipahal_hdr_funcs hdr_funcs;
+
+static void ipahal_hdr_init(enum ipa_hw_type ipa_hw_type)
+{
+
+	IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	/*
+	 * once there are changes in HW and need to use different case, insert
+	 * new case for the new h/w. put the default always for the latest HW
+	 * and make sure all previous supported versions have their cases.
+	 */
+	switch (ipa_hw_type) {
+	case IPA_HW_v3_0:
+	default:
+		hdr_funcs.ipahal_cp_hdr_to_hw_buff =
+				ipahal_cp_hdr_to_hw_buff_v3;
+		hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff =
+				ipahal_cp_proc_ctx_to_hw_buff_v3;
+		hdr_funcs.ipahal_get_proc_ctx_needed_len =
+				ipahal_get_proc_ctx_needed_len_v3;
+	}
+	IPAHAL_DBG("Exit\n");
+}
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *const hdr,
+		u32 hdr_len)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+	IPAHAL_DBG("base %p, offset %d, hdr %p, hdr_len %d\n", base,
+			offset, hdr, hdr_len);
+	if (!base || !hdr_len || !hdr) {
+		IPAHAL_ERR("failed on validating params");
+		return;
+	}
+
+	hdr_funcs.ipahal_cp_hdr_to_hw_buff(base, offset, hdr, hdr_len);
+
+	IPAHAL_DBG_LOW("Exit\n");
+}
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+		void *const base, u32 offset, u32 hdr_len,
+		bool is_hdr_proc_ctx, dma_addr_t phys_base,
+		u32 hdr_base_addr, struct ipa_hdr_offset_entry *offset_entry)
+{
+	IPAHAL_DBG(
+		"type %d, base %p, offset %d, hdr_len %d, is_hdr_proc_ctx %d, hdr_base_addr %d, offset_entry %p\n"
+			, type, base, offset, hdr_len, is_hdr_proc_ctx,
+			hdr_base_addr, offset_entry);
+
+	if (!base ||
+		!hdr_len ||
+		(is_hdr_proc_ctx && !phys_base) ||
+		(!is_hdr_proc_ctx && !offset_entry) ||
+		(!is_hdr_proc_ctx && !hdr_base_addr)) {
+		IPAHAL_ERR(
+			"invalid input: hdr_len:%u phys_base:%pad hdr_base_addr:%u is_hdr_proc_ctx:%d offset_entry:%pK\n"
+			, hdr_len, &phys_base, hdr_base_addr
+			, is_hdr_proc_ctx, offset_entry);
+		return -EINVAL;
+	}
+
+	return hdr_funcs.ipahal_cp_proc_ctx_to_hw_buff(type, base, offset,
+			hdr_len, is_hdr_proc_ctx, phys_base,
+			hdr_base_addr, offset_entry);
+}
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for
+ * addition of header processing context according to the type of processing
+ * context
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type)
+{
+	int res;
+
+	IPAHAL_DBG("entry\n");
+
+	res = hdr_funcs.ipahal_get_proc_ctx_needed_len(type);
+
+	IPAHAL_DBG("Exit\n");
+
+	return res;
+}
+
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+	struct device *ipa_pdev)
+{
+	int result;
+
+	IPAHAL_DBG("Entry - IPA HW TYPE=%d base=%p ipa_pdev=%p\n",
+		ipa_hw_type, base, ipa_pdev);
+
+	ipahal_ctx = kzalloc(sizeof(*ipahal_ctx), GFP_KERNEL);
+	if (!ipahal_ctx) {
+		IPAHAL_ERR("kzalloc err for ipahal_ctx\n");
+		result = -ENOMEM;
+		goto bail_err_exit;
+	}
+
+	if (ipa_hw_type < IPA_HW_v3_0) {
+		IPAHAL_ERR("ipahal supported on IPAv3 and later only\n");
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	if (ipa_hw_type >= IPA_HW_MAX) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	if (!base) {
+		IPAHAL_ERR("invalid memory io mapping addr\n");
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	if (!ipa_pdev) {
+		IPAHAL_ERR("invalid IPA platform device\n");
+		result = -EINVAL;
+		goto bail_free_ctx;
+	}
+
+	ipahal_ctx->hw_type = ipa_hw_type;
+	ipahal_ctx->base = base;
+	ipahal_ctx->ipa_pdev = ipa_pdev;
+
+	if (ipahal_reg_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal reg\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	if (ipahal_imm_cmd_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal imm cmd\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	if (ipahal_pkt_status_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal pkt status\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	ipahal_hdr_init(ipa_hw_type);
+
+	if (ipahal_fltrt_init(ipa_hw_type)) {
+		IPAHAL_ERR("failed to init ipahal flt rt\n");
+		result = -EFAULT;
+		goto bail_free_ctx;
+	}
+
+	ipahal_debugfs_init();
+
+	return 0;
+
+bail_free_ctx:
+	kfree(ipahal_ctx);
+	ipahal_ctx = NULL;
+bail_err_exit:
+	return result;
+}
+
+void ipahal_destroy(void)
+{
+	IPAHAL_DBG("Entry\n");
+	ipahal_fltrt_destroy();
+	ipahal_debugfs_remove();
+	kfree(ipahal_ctx);
+	ipahal_ctx = NULL;
+}
+
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem)
+{
+	if (likely(mem)) {
+		dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+			mem->phys_base);
+		mem->size = 0;
+		mem->base = NULL;
+		mem->phys_base = 0;
+	}
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
new file mode 100644
index 0000000..6549775
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal.h
@@ -0,0 +1,642 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_H_
+#define _IPAHAL_H_
+
+#include <linux/msm_ipa.h>
+#include "../../ipa_common_i.h"
+
+/*
+ * Immediate command names
+ *
+ * NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str
+ *	array as well.
+ */
+enum ipahal_imm_cmd_name {
+	IPA_IMM_CMD_IP_V4_FILTER_INIT,
+	IPA_IMM_CMD_IP_V6_FILTER_INIT,
+	IPA_IMM_CMD_IP_V4_NAT_INIT,
+	IPA_IMM_CMD_IP_V4_ROUTING_INIT,
+	IPA_IMM_CMD_IP_V6_ROUTING_INIT,
+	IPA_IMM_CMD_HDR_INIT_LOCAL,
+	IPA_IMM_CMD_HDR_INIT_SYSTEM,
+	IPA_IMM_CMD_REGISTER_WRITE,
+	IPA_IMM_CMD_NAT_DMA,
+	IPA_IMM_CMD_IP_PACKET_INIT,
+	IPA_IMM_CMD_DMA_SHARED_MEM,
+	IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
+	IPA_IMM_CMD_DMA_TASK_32B_ADDR,
+	IPA_IMM_CMD_MAX,
+};
+
+/* Immediate commands abstracted structures */
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_filter_init {
+	u64 hash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u64 nhash_rules_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_filter_init {
+	u64 hash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u64 nhash_rules_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ *  cache address abd itger related parameters.
+ * @table_index: For future support of multiple NAT tables
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_rules_addr_shared: ipv4_rules_addr in shared mem (if not, then sys)
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ *  table starts. IPv4 NAT rules that result in NAT collision are located
+ *  in this table.
+ * @ipv4_expansion_rules_addr_shared: ipv4_expansion_rules_addr in
+ *  shared mem (if not, then sys)
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ *  to NAT table starts
+ * @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ *  table starts
+ * @index_table_expansion_addr_shared: index_table_expansion_addr in
+ *  shared mem (if not, then sys)
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ *  idx tbl (each)
+ * @public_ip_addr: public IP address
+ */
+struct ipahal_imm_cmd_ip_v4_nat_init {
+	u8 table_index;
+	u64 ipv4_rules_addr;
+	bool ipv4_rules_addr_shared;
+	u64 ipv4_expansion_rules_addr;
+	bool ipv4_expansion_rules_addr_shared;
+	u64 index_table_addr;
+	bool index_table_addr_shared;
+	u64 index_table_expansion_addr;
+	bool index_table_expansion_addr_shared;
+	u16 size_base_tables;
+	u16 size_expansion_tables;
+	u32 public_ip_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v4_routing_init {
+	u64 hash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u64 nhash_rules_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ *  be copied to
+ */
+struct ipahal_imm_cmd_ip_v6_routing_init {
+	u64 hash_rules_addr;
+	u32 hash_rules_size;
+	u32 hash_local_addr;
+	u64 nhash_rules_addr;
+	u32 nhash_rules_size;
+	u32 nhash_local_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipahal_imm_cmd_hdr_init_local {
+	u64 hdr_table_addr;
+	u32 size_hdr_table;
+	u32 hdr_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipahal_imm_cmd_hdr_init_system {
+	u64 hdr_table_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_nat_dma - NAT_DMA cmd payload
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ *  different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ */
+struct ipahal_imm_cmd_nat_dma {
+	u8 table_index;
+	u8 base_addr;
+	u32 offset;
+	u16 data;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ *  data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index  (in case routing
+ *  is enabled, this field will overwrite the rt  rule)
+ */
+struct ipahal_imm_cmd_ip_packet_init {
+	u32 destination_pipe_index;
+};
+
+/*
+ * enum ipa_pipeline_clear_option - Values for pipeline clear waiting options
+ * @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
+ *  shall not be serviced until HPS is clear of packets or immediate commands.
+ *  The high priority Rx queue / Q6ZIP group shall still be serviced normally.
+ *
+ * @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear
+ *  (for no packet contexts allocated to the originating source group).
+ *  The source group / Rx queue shall not be serviced until all previously
+ *  allocated packet contexts are released. All other source groups/queues shall
+ *  be serviced normally.
+ *
+ * @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
+ *  All groups / Rx queues shall not be serviced until IPA pipeline is fully
+ *  clear. This should be used for debug only.
+ */
+enum ipahal_pipeline_clear_option {
+	IPAHAL_HPS_CLEAR,
+	IPAHAL_SRC_GRP_CLEAR,
+	IPAHAL_FULL_PIPELINE_CLEAR
+};
+
+/*
+ * struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload
+ * Write value to register. Allows reg changes to be synced with data packet
+ *  and other immediate commands. Can be used to access the sram
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ */
+struct ipahal_imm_cmd_register_write {
+	u32 offset;
+	u32 value;
+	u32 value_mask;
+	bool skip_pipeline_clear;
+	enum ipahal_pipeline_clear_option pipeline_clear_options;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @is_read: Read operation from local memory? If not, then write.
+ * @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipeline_clear_option: options for pipeline clear waiting
+ * @system_addr: Address in system memory
+ */
+struct ipahal_imm_cmd_dma_shared_mem {
+	u32 size;
+	u32 local_addr;
+	bool is_read;
+	bool skip_pipeline_clear;
+	enum ipahal_pipeline_clear_option pipeline_clear_options;
+	u64 system_addr;
+};
+
+/*
+ * struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ *  value that is passed back to SW inside Packet Status information.
+ *  TAG info will be provided as part of Packet Status info generated for
+ *  the next pkt transferred over the pipe.
+ *  This immediate command must be followed by a packet in the same transfer.
+ * @tag: Tag that is provided back to SW
+ */
+struct ipahal_imm_cmd_ip_packet_tag_status {
+	u64 tag;
+};
+
+/*
+ * struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ *  multiple descriptors.
+ *  The Opcode is dynamic, where it holds the number of buffer to process
+ * @cmplt: Complete flag: If true, IPA interrupt SW when the entire
+ *  DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: If true, IPA assert the EOT to the
+ *  dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: If true pkt will go through the IPA blocks but
+ *  will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: If true, IPA will stop processing descriptors
+ *  from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: If true, IPA will stop exclusively
+ *  servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ *  only the first one needs to have this field set. It will be ignored
+ *  in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ *  must contain this field (2 or more buffers) or EOT.
+ */
+struct ipahal_imm_cmd_dma_task_32b_addr {
+	bool cmplt;
+	bool eof;
+	bool flsh;
+	bool lock;
+	bool unlock;
+	u32 size1;
+	u32 addr1;
+	u32 packet_size;
+};
+
+/*
+ * struct ipahal_imm_cmd_pyld - Immediate cmd payload information
+ * @len: length of the buffer
+ * @data: buffer contains the immediate command payload. Buffer goes
+ *  back to back with this structure
+ */
+struct ipahal_imm_cmd_pyld {
+	u16 len;
+	u8 data[0];
+};
+
+
+/* Immediate command Function APIs */
+
+/*
+ * ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
+ * @cmd_name: [in] Immediate command name
+ */
+const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
+
+/*
+ * ipahal_imm_cmd_get_opcode() - Get the fixed opcode of the immediate command
+ */
+u16 ipahal_imm_cmd_get_opcode(enum ipahal_imm_cmd_name cmd);
+
+/*
+ * ipahal_imm_cmd_get_opcode_param() - Get the opcode of an immediate command
+ *  that supports dynamic opcode
+ * Some commands opcode are not totaly fixed, but part of it is
+ *  a supplied parameter. E.g. Low-Byte is fixed and Hi-Byte
+ *  is a given parameter.
+ * This API will return the composed opcode of the command given
+ *  the parameter
+ * Note: Use this API only for immediate comamnds that support Dynamic Opcode
+ */
+u16 ipahal_imm_cmd_get_opcode_param(enum ipahal_imm_cmd_name cmd, int param);
+
+/*
+ * ipahal_construct_imm_cmd() - Construct immdiate command
+ * This function builds imm cmd bulk that can be be sent to IPA
+ * The command will be allocated dynamically.
+ * After done using it, call ipahal_destroy_imm_cmd() to release it
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
+	enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx);
+
+/*
+ * ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
+ * Core driver may want functionality to inject NOP commands to IPA
+ *  to ensure e.g., PIPLINE clear before someother operation.
+ * The functionality given by this function can be reached by
+ *  ipahal_construct_imm_cmd(). This function is helper to the core driver
+ *  to reach this NOP functionlity easily.
+ * @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
+ * @pipline_clr_opt: options for pipeline clear waiting
+ * @is_atomic_ctx: is called in atomic context or can sleep?
+ */
+struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
+	bool skip_pipline_clear,
+	enum ipahal_pipeline_clear_option pipline_clr_opt,
+	bool is_atomic_ctx);
+
+/*
+ * ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built
+ *  by the construction functions
+ */
+static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld)
+{
+	kfree(pyld);
+}
+
+
+/* IPA Status packet Structures and Function APIs */
+
+/*
+ * enum ipahal_pkt_status_opcode - Packet Status Opcode
+ * @IPAHAL_STATUS_OPCODE_PACKET_2ND_PASS: Packet Status generated as part of
+ *  IPA second processing pass for a packet (i.e. IPA XLAT processing for
+ *  the translated packet).
+ */
+enum ipahal_pkt_status_opcode {
+	IPAHAL_PKT_STATUS_OPCODE_PACKET = 0,
+	IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE,
+	IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET,
+	IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET,
+	IPAHAL_PKT_STATUS_OPCODE_LOG,
+	IPAHAL_PKT_STATUS_OPCODE_DCMP,
+	IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS,
+};
+
+/*
+ * enum ipahal_pkt_status_exception - Packet Status exception type
+ * @IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH: formerly IHL exception.
+ *
+ * Note: IPTYPE, PACKET_LENGTH and PACKET_THRESHOLD exceptions means that
+ *  partial / no IP processing took place and corresponding Status Mask
+ *  fields should be ignored. Flt and rt info is not valid.
+ *
+ * NOTE:: Any change to this enum, need to change to
+ *	ipahal_pkt_status_exception_to_str array as well.
+ */
+enum ipahal_pkt_status_exception {
+	IPAHAL_PKT_STATUS_EXCEPTION_NONE = 0,
+	IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR,
+	IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE,
+	IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH,
+	IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD,
+	IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS,
+	IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT,
+	IPAHAL_PKT_STATUS_EXCEPTION_NAT,
+	IPAHAL_PKT_STATUS_EXCEPTION_MAX,
+};
+
+/*
+ * enum ipahal_pkt_status_mask - Packet Status bitmask shift values of
+ *  the contained flags. This bitmask indicates flags on the properties of
+ *  the packet as well as IPA processing it may had.
+ * @FRAG_PROCESS: Frag block processing flag: Was pkt processed by frag block?
+ *  Also means the frag info is valid unless exception or first frag
+ * @FILT_PROCESS: Flt block processing flag: Was pkt processed by flt block?
+ *  Also means that flt info is valid.
+ * @NAT_PROCESS: NAT block processing flag: Was pkt processed by NAT block?
+ *  Also means that NAT info is valid, unless exception.
+ * @ROUTE_PROCESS: Rt block processing flag: Was pkt processed by rt block?
+ *  Also means that rt info is valid, unless exception.
+ * @TAG_VALID: Flag specifying if TAG and TAG info valid?
+ * @FRAGMENT: Flag specifying if pkt is IP fragment.
+ * @FIRST_FRAGMENT: Flag specifying if pkt is first fragment. In this case, frag
+ *  info is invalid
+ * @V4: Flag specifying pkt is IPv4 or IPv6
+ * @CKSUM_PROCESS: CSUM block processing flag: Was pkt processed by csum block?
+ *  If so, csum trailer exists
+ * @AGGR_PROCESS: Aggr block processing flag: Was pkt processed by aggr block?
+ * @DEST_EOT: Flag specifying if EOT was asserted for the pkt on dest endp
+ * @DEAGGR_PROCESS: Deaggr block processing flag: Was pkt processed by deaggr
+ *  block?
+ * @DEAGG_FIRST: Flag specifying if this is the first pkt in deaggr frame
+ * @SRC_EOT: Flag specifying if EOT asserted by src endp when sending the buffer
+ * @PREV_EOT: Flag specifying if EOT was sent just before the pkt as part of
+ *  aggr hard-byte-limit
+ * @BYTE_LIMIT: Flag specifying if pkt is over a configured byte limit.
+ */
+enum ipahal_pkt_status_mask {
+	IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT = 0,
+	IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT,
+	IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_V4_SHFT,
+	IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT,
+	IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT,
+	IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT,
+	IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT,
+};
+
+/*
+ * Returns boolean value representing a property of the a packet.
+ * @__flag_shft: The shift value of the flag of the status bitmask of
+ * @__status: Pointer to abstracrted status structure
+ *  the needed property. See enum ipahal_pkt_status_mask
+ */
+#define IPAHAL_PKT_STATUS_MASK_FLAG_VAL(__flag_shft, __status) \
+	(((__status)->status_mask) & ((u32)0x1<<(__flag_shft)) ? true : false)
+
+/*
+ * enum ipahal_pkt_status_nat_type - Type of NAT
+ */
+enum ipahal_pkt_status_nat_type {
+	IPAHAL_PKT_STATUS_NAT_NONE,
+	IPAHAL_PKT_STATUS_NAT_SRC,
+	IPAHAL_PKT_STATUS_NAT_DST,
+};
+
+/*
+ * struct ipahal_pkt_status - IPA status packet abstracted payload.
+ *  This structure describes the status packet fields for the
+ *   following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ *   IPA_STATUS_SUSPENDED_PACKET.
+ *  Other statuses types has different status packet structure.
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: The first exception that took place.
+ *  In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask for flags on several properties on the packet
+ *  and processing it may passed at IPA. See enum ipahal_pkt_status_mask
+ * @pkt_len: Pkt pyld len including hdr and retained hdr if used. Does
+ *  not include padding or checksum trailer len.
+ * @endp_src_idx: Source end point index.
+ * @endp_dest_idx: Destination end point index.
+ *  Not valid in case of exception
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ *  flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ *  the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ *  specifies to retain header?
+ * @flt_miss: Filtering miss flag: Was their a filtering rule miss?
+ *   In case of miss, all flt info to be ignored
+ * @flt_rule_id: The ID of the matching filter rule (if no miss).
+ *  This info can be combined with endp_src_idx to locate the exact rule.
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ *  rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @rt_miss: Routing miss flag: Was their a routing rule miss?
+ * @rt_rule_id: The ID of the matching rt rule. (if no miss). This info
+ *  can be combined with rt_tbl_idx to locate the exact rule.
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @nat_type: Defines the type of the NAT operation:
+ * @tag_info: S/W defined value provided via immediate command
+ * @seq_num: Per source endp unique packet sequence number
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ *  taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ */
+struct ipahal_pkt_status {
+	enum ipahal_pkt_status_opcode status_opcode;
+	enum ipahal_pkt_status_exception exception;
+	u32 status_mask;
+	u32 pkt_len;
+	u8 endp_src_idx;
+	u8 endp_dest_idx;
+	u32 metadata;
+	bool flt_local;
+	bool flt_hash;
+	bool flt_global;
+	bool flt_ret_hdr;
+	bool flt_miss;
+	u16 flt_rule_id;
+	bool rt_local;
+	bool rt_hash;
+	bool ucp;
+	u8 rt_tbl_idx;
+	bool rt_miss;
+	u16 rt_rule_id;
+	bool nat_hit;
+	u16 nat_entry_idx;
+	enum ipahal_pkt_status_nat_type nat_type;
+	u64 tag_info;
+	u8 seq_num;
+	u32 time_of_day_ctr;
+	bool hdr_local;
+	u16 hdr_offset;
+	bool frag_hit;
+	u8 frag_rule;
+};
+
+/*
+ * ipahal_pkt_status_get_size() - Get H/W size of packet status
+ */
+u32 ipahal_pkt_status_get_size(void);
+
+/*
+ * ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
+ * @unparsed_status: Pointer to H/W format of the packet status as read from H/W
+ * @status: Pointer to pre-allocated buffer where the parsed info will be stored
+ */
+void ipahal_pkt_status_parse(const void *unparsed_status,
+	struct ipahal_pkt_status *status);
+
+/*
+ * ipahal_pkt_status_exception_str() - returns string represents exception type
+ * @exception: [in] The exception type
+ */
+const char *ipahal_pkt_status_exception_str(
+	enum ipahal_pkt_status_exception exception);
+
+/*
+ * ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
+ * base address and offset given.
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr: the header to be copied
+ * @hdr_len: the length of the header
+ */
+void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len);
+
+/*
+ * ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
+ * base address and offset given.
+ * @type: type of header processing context
+ * @base: dma base address
+ * @offset: offset from base address where the data will be copied
+ * @hdr_len: the length of the header
+ * @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
+ * @phys_base: memory location in DDR
+ * @hdr_base_addr: base address in table
+ * @offset_entry: offset from hdr_base_addr in table
+ */
+int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
+		void *base, u32 offset, u32 hdr_len,
+		bool is_hdr_proc_ctx, dma_addr_t phys_base,
+		u32 hdr_base_addr,
+		struct ipa_hdr_offset_entry *offset_entry);
+
+/*
+ * ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
+ * of header processing context according to the type of processing context
+ * @type: header processing context type (no processing context,
+ *	IPA_HDR_PROC_ETHII_TO_ETHII etc.)
+ */
+int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type);
+
+int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
+	struct device *ipa_pdev);
+void ipahal_destroy(void);
+void ipahal_free_dma_mem(struct ipa_mem_buffer *mem);
+
+#endif /* _IPAHAL_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
new file mode 100644
index 0000000..67b3cb3
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.c
@@ -0,0 +1,3200 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ipc_logging.h>
+#include <linux/debugfs.h>
+#include <linux/ipa.h>
+#include "ipahal.h"
+#include "ipahal_fltrt.h"
+#include "ipahal_fltrt_i.h"
+#include "ipahal_i.h"
+#include "../../ipa_common_i.h"
+
+/*
+ * struct ipahal_fltrt_obj - Flt/Rt H/W information for specific IPA version
+ * @support_hash: Is hashable tables supported
+ * @tbl_width: Width of table in bytes
+ * @sysaddr_alignment: System table address alignment
+ * @lcladdr_alignment: Local table offset alignment
+ * @blk_sz_alignment: Rules block size alignment
+ * @rule_start_alignment: Rule start address alignment
+ * @tbl_hdr_width: Width of the header structure in bytes
+ * @tbl_addr_mask: Masking for Table address
+ * @rule_max_prio: Max possible priority of a rule
+ * @rule_min_prio: Min possible priority of a rule
+ * @low_rule_id: Low value of Rule ID that can be used
+ * @rule_id_bit_len: Rule is high (MSB) bit len
+ * @rule_buf_size: Max size rule may utilize.
+ * @write_val_to_hdr: Write address or offset to header entry
+ * @create_flt_bitmap: Create bitmap in H/W format using given bitmap
+ * @create_tbl_addr: Given raw table address, create H/W formated one
+ * @parse_tbl_addr: Parse the given H/W address (hdr format)
+ * @rt_generate_hw_rule: Generate RT rule in H/W format
+ * @flt_generate_hw_rule: Generate FLT rule in H/W format
+ * @flt_generate_eq: Generate flt equation attributes from rule attributes
+ * @rt_parse_hw_rule: Parse rt rule read from H/W
+ * @flt_parse_hw_rule: Parse flt rule read from H/W
+ * @eq_bitfield: Array of the bit fields of the support equations
+ */
+struct ipahal_fltrt_obj {
+	bool support_hash;
+	u32 tbl_width;
+	u32 sysaddr_alignment;
+	u32 lcladdr_alignment;
+	u32 blk_sz_alignment;
+	u32 rule_start_alignment;
+	u32 tbl_hdr_width;
+	u32 tbl_addr_mask;
+	int rule_max_prio;
+	int rule_min_prio;
+	u32 low_rule_id;
+	u32 rule_id_bit_len;
+	u32 rule_buf_size;
+	u8* (*write_val_to_hdr)(u64 val, u8 *hdr);
+	u64 (*create_flt_bitmap)(u64 ep_bitmap);
+	u64 (*create_tbl_addr)(bool is_sys, u64 addr);
+	void (*parse_tbl_addr)(u64 hwaddr, u64 *addr, bool *is_sys);
+	int (*rt_generate_hw_rule)(struct ipahal_rt_rule_gen_params *params,
+		u32 *hw_len, u8 *buf);
+	int (*flt_generate_hw_rule)(struct ipahal_flt_rule_gen_params *params,
+		u32 *hw_len, u8 *buf);
+	int (*flt_generate_eq)(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+	int (*rt_parse_hw_rule)(u8 *addr, struct ipahal_rt_rule_entry *rule);
+	int (*flt_parse_hw_rule)(u8 *addr, struct ipahal_flt_rule_entry *rule);
+	u8 eq_bitfield[IPA_EQ_MAX];
+};
+
+
+static u64 ipa_fltrt_create_flt_bitmap(u64 ep_bitmap)
+{
+	/* At IPA3, there global configuration is possible but not used */
+	return (ep_bitmap << 1) & ~0x1;
+}
+
+static u64 ipa_fltrt_create_tbl_addr(bool is_sys, u64 addr)
+{
+	if (is_sys) {
+		if (addr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+			IPAHAL_ERR(
+				"sys addr is not aligned accordingly addr=0x%pad\n",
+				&addr);
+			ipa_assert();
+			return 0;
+		}
+	} else {
+		if (addr & IPA3_0_HW_TBL_LCLADDR_ALIGNMENT) {
+			IPAHAL_ERR("addr/ofst isn't lcl addr aligned %llu\n",
+				addr);
+			ipa_assert();
+			return 0;
+		}
+		/*
+		 * for local tables (at sram) offsets is used as tables
+		 * addresses. offset need to be in 8B units
+		 * (local address aligned) and left shifted to its place.
+		 * Local bit need to be enabled.
+		 */
+		addr /= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+		addr *= IPA3_0_HW_TBL_ADDR_MASK + 1;
+		addr += 1;
+	}
+
+	return addr;
+}
+
+static void ipa_fltrt_parse_tbl_addr(u64 hwaddr, u64 *addr, bool *is_sys)
+{
+	IPAHAL_DBG_LOW("Parsing hwaddr 0x%llx\n", hwaddr);
+
+	*is_sys = !(hwaddr & 0x1);
+	hwaddr &= (~0ULL - 1);
+	if (hwaddr & IPA3_0_HW_TBL_SYSADDR_ALIGNMENT) {
+		IPAHAL_ERR(
+			"sys addr is not aligned accordingly addr=0x%pad\n",
+			&hwaddr);
+		ipa_assert();
+		return;
+	}
+
+	if (!*is_sys) {
+		hwaddr /= IPA3_0_HW_TBL_ADDR_MASK + 1;
+		hwaddr *= IPA3_0_HW_TBL_LCLADDR_ALIGNMENT + 1;
+	}
+
+	*addr = hwaddr;
+}
+
+/* Update these tables of the number of equations changes */
+static const int ipa3_0_ofst_meq32[] = { IPA_OFFSET_MEQ32_0,
+					IPA_OFFSET_MEQ32_1};
+static const int ipa3_0_ofst_meq128[] = { IPA_OFFSET_MEQ128_0,
+					IPA_OFFSET_MEQ128_1};
+static const int ipa3_0_ihl_ofst_rng16[] = { IPA_IHL_OFFSET_RANGE16_0,
+					IPA_IHL_OFFSET_RANGE16_1};
+static const int ipa3_0_ihl_ofst_meq32[] = { IPA_IHL_OFFSET_MEQ32_0,
+					IPA_IHL_OFFSET_MEQ32_1};
+
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+	const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule);
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+		const struct ipa_ipfltri_rule_eq *attrib, u8 **buf);
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+static int ipa_rt_parse_hw_rule(u8 *addr,
+		struct ipahal_rt_rule_entry *rule);
+static int ipa_flt_parse_hw_rule(u8 *addr,
+		struct ipahal_flt_rule_entry *rule);
+
+#define IPA_IS_RAN_OUT_OF_EQ(__eq_array, __eq_index) \
+	(ARRAY_SIZE(__eq_array) <= (__eq_index))
+
+#define IPA_GET_RULE_EQ_BIT_PTRN(__eq) \
+	(BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].eq_bitfield[(__eq)]))
+
+/*
+ * ipa_fltrt_rule_generation_err_check() - check basic validity on the rule
+ *  attribs before starting building it
+ *  checks if not not using ipv4 attribs on ipv6 and vice-versa
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ *
+ * Return: 0 on success, -EPERM on failure
+ */
+static int ipa_fltrt_rule_generation_err_check(
+	enum ipa_ip_type ipt, const struct ipa_rule_attrib *attrib)
+{
+	if (ipt == IPA_IP_v4) {
+		if (attrib->attrib_mask & IPA_FLT_NEXT_HDR ||
+		    attrib->attrib_mask & IPA_FLT_TC ||
+		    attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+			IPAHAL_ERR("v6 attrib's specified for v4 rule\n");
+			return -EPERM;
+		}
+	} else if (ipt == IPA_IP_v6) {
+		if (attrib->attrib_mask & IPA_FLT_TOS ||
+		    attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+			IPAHAL_ERR("v4 attrib's specified for v6 rule\n");
+			return -EPERM;
+		}
+	} else {
+		IPAHAL_ERR("unsupported ip %d\n", ipt);
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int ipa_rt_gen_hw_rule(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)buf;
+
+	ipa_assert_on(params->dst_pipe_idx & ~0x1F);
+	rule_hdr->u.hdr.pipe_dest_idx = params->dst_pipe_idx;
+	switch (params->hdr_type) {
+	case IPAHAL_RT_RULE_HDR_PROC_CTX:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 1;
+		ipa_assert_on(params->hdr_ofst & 31);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 5;
+		break;
+	case IPAHAL_RT_RULE_HDR_RAW:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		ipa_assert_on(params->hdr_ofst & 3);
+		rule_hdr->u.hdr.hdr_offset = (params->hdr_ofst) >> 2;
+		break;
+	case IPAHAL_RT_RULE_HDR_NONE:
+		rule_hdr->u.hdr.system = !params->hdr_lcl;
+		rule_hdr->u.hdr.proc_ctx = 0;
+		rule_hdr->u.hdr.hdr_offset = 0;
+		break;
+	default:
+		IPAHAL_ERR("Invalid HDR type %d\n", params->hdr_type);
+		WARN_ON(1);
+		return -EINVAL;
+	};
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+
+	buf += sizeof(struct ipa3_0_rt_rule_hw_hdr);
+
+	if (ipa_fltrt_generate_hw_rule_bdy(params->ipt, &params->rule->attrib,
+		&buf, &en_rule)) {
+		IPAHAL_ERR("fail to generate hw rule\n");
+		return -EPERM;
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule 0x%x\n", en_rule);
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+static int ipa_flt_gen_hw_rule(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+	u8 *start;
+	u16 en_rule = 0;
+
+	start = buf;
+	rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)buf;
+
+	switch (params->rule->action) {
+	case IPA_PASS_TO_ROUTING:
+		rule_hdr->u.hdr.action = 0x0;
+		break;
+	case IPA_PASS_TO_SRC_NAT:
+		rule_hdr->u.hdr.action = 0x1;
+		break;
+	case IPA_PASS_TO_DST_NAT:
+		rule_hdr->u.hdr.action = 0x2;
+		break;
+	case IPA_PASS_TO_EXCEPTION:
+		rule_hdr->u.hdr.action = 0x3;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", params->rule->action);
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	ipa_assert_on(params->rt_tbl_idx & ~0x1F);
+	rule_hdr->u.hdr.rt_tbl_idx = params->rt_tbl_idx;
+	rule_hdr->u.hdr.retain_hdr = params->rule->retain_hdr ? 0x1 : 0x0;
+	rule_hdr->u.hdr.rsvd1 = 0;
+	rule_hdr->u.hdr.rsvd2 = 0;
+	rule_hdr->u.hdr.rsvd3 = 0;
+
+	ipa_assert_on(params->priority & ~0x3FF);
+	rule_hdr->u.hdr.priority = params->priority;
+	ipa_assert_on(params->id & ~((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	ipa_assert_on(params->id == ((1 << IPA3_0_RULE_ID_BIT_LEN) - 1));
+	rule_hdr->u.hdr.rule_id = params->id;
+
+	buf += sizeof(struct ipa3_0_flt_rule_hw_hdr);
+
+	if (params->rule->eq_attrib_type) {
+		if (ipa_fltrt_generate_hw_rule_bdy_from_eq(
+			&params->rule->eq_attrib, &buf)) {
+			IPAHAL_ERR("fail to generate hw rule from eq\n");
+			return -EPERM;
+		}
+		en_rule = params->rule->eq_attrib.rule_eq_bitmap;
+	} else {
+		if (ipa_fltrt_generate_hw_rule_bdy(params->ipt,
+			&params->rule->attrib, &buf, &en_rule)) {
+			IPAHAL_ERR("fail to generate hw rule\n");
+			return -EPERM;
+		}
+	}
+	rule_hdr->u.hdr.en_rule = en_rule;
+
+	IPAHAL_DBG_LOW("en_rule=0x%x, action=%d, rt_idx=%d, retain_hdr=%d\n",
+		en_rule,
+		rule_hdr->u.hdr.action,
+		rule_hdr->u.hdr.rt_tbl_idx,
+		rule_hdr->u.hdr.retain_hdr);
+	IPAHAL_DBG_LOW("priority=%d, rule_id=%d\n",
+		rule_hdr->u.hdr.priority,
+		rule_hdr->u.hdr.rule_id);
+
+	ipa_write_64(rule_hdr->u.word, (u8 *)rule_hdr);
+
+	if (*hw_len == 0) {
+		*hw_len = buf - start;
+	} else if (*hw_len != (buf - start)) {
+		IPAHAL_ERR("hw_len differs b/w passed=0x%x calc=%td\n",
+			*hw_len, (buf - start));
+		return -EPERM;
+	}
+
+	return 0;
+}
+
+/*
+ * This array contains the FLT/RT info for IPAv3 and later.
+ * All the information on IPAv3 are statically defined below.
+ * If information is missing regarding on some IPA version,
+ *  the init function will fill it with the information from the previous
+ *  IPA version.
+ * Information is considered missing if all of the fields are 0.
+ */
+static struct ipahal_fltrt_obj ipahal_fltrt_objs[IPA_HW_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0] = {
+		true,
+		IPA3_0_HW_TBL_WIDTH,
+		IPA3_0_HW_TBL_SYSADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_LCLADDR_ALIGNMENT,
+		IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT,
+		IPA3_0_HW_RULE_START_ALIGNMENT,
+		IPA3_0_HW_TBL_HDR_WIDTH,
+		IPA3_0_HW_TBL_ADDR_MASK,
+		IPA3_0_RULE_MAX_PRIORITY,
+		IPA3_0_RULE_MIN_PRIORITY,
+		IPA3_0_LOW_RULE_ID,
+		IPA3_0_RULE_ID_BIT_LEN,
+		IPA3_0_HW_RULE_BUF_SIZE,
+		ipa_write_64,
+		ipa_fltrt_create_flt_bitmap,
+		ipa_fltrt_create_tbl_addr,
+		ipa_fltrt_parse_tbl_addr,
+		ipa_rt_gen_hw_rule,
+		ipa_flt_gen_hw_rule,
+		ipa_flt_generate_eq,
+		ipa_rt_parse_hw_rule,
+		ipa_flt_parse_hw_rule,
+		{
+			[IPA_TOS_EQ]			= 0,
+			[IPA_PROTOCOL_EQ]		= 1,
+			[IPA_TC_EQ]			= 2,
+			[IPA_OFFSET_MEQ128_0]		= 3,
+			[IPA_OFFSET_MEQ128_1]		= 4,
+			[IPA_OFFSET_MEQ32_0]		= 5,
+			[IPA_OFFSET_MEQ32_1]		= 6,
+			[IPA_IHL_OFFSET_MEQ32_0]	= 7,
+			[IPA_IHL_OFFSET_MEQ32_1]	= 8,
+			[IPA_METADATA_COMPARE]		= 9,
+			[IPA_IHL_OFFSET_RANGE16_0]	= 10,
+			[IPA_IHL_OFFSET_RANGE16_1]	= 11,
+			[IPA_IHL_OFFSET_EQ_32]		= 12,
+			[IPA_IHL_OFFSET_EQ_16]		= 13,
+			[IPA_FL_EQ]			= 14,
+			[IPA_IS_FRAG]			= 15,
+		},
+	},
+};
+
+static int ipa_flt_generate_eq(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	if (ipa_fltrt_rule_generation_err_check(ipt, attrib))
+		return -EPERM;
+
+	if (ipt == IPA_IP_v4) {
+		if (ipa_flt_generate_eq_ip4(ipt, attrib, eq_atrb)) {
+			IPAHAL_ERR("failed to build ipv4 flt eq rule\n");
+			return -EPERM;
+		}
+	} else if (ipt == IPA_IP_v6) {
+		if (ipa_flt_generate_eq_ip6(ipt, attrib, eq_atrb)) {
+			IPAHAL_ERR("failed to build ipv6 flt eq rule\n");
+			return -EPERM;
+		}
+	} else {
+		IPAHAL_ERR("unsupported ip %d\n", ipt);
+		return  -EPERM;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		eq_atrb->rule_eq_bitmap = 0;
+		eq_atrb->rule_eq_bitmap |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_OFFSET_MEQ32_0);
+		eq_atrb->offset_meq_32[0].offset = 0;
+		eq_atrb->offset_meq_32[0].mask = 0;
+		eq_atrb->offset_meq_32[0].value = 0;
+	}
+
+	return 0;
+}
+
+static void ipa_fltrt_generate_mac_addr_hw_rule(u8 **extra, u8 **rest,
+	u8 hdr_mac_addr_offset,
+	const uint8_t mac_addr_mask[ETH_ALEN],
+	const uint8_t mac_addr[ETH_ALEN])
+{
+	int i;
+
+	*extra = ipa_write_8(hdr_mac_addr_offset, *extra);
+
+	/* LSB MASK and ADDR */
+	*rest = ipa_write_64(0, *rest);
+	*rest = ipa_write_64(0, *rest);
+
+	/* MSB MASK and ADDR */
+	*rest = ipa_write_16(0, *rest);
+	for (i = 5; i >= 0; i--)
+		*rest = ipa_write_8(mac_addr_mask[i], *rest);
+	*rest = ipa_write_16(0, *rest);
+	for (i = 5; i >= 0; i--)
+		*rest = ipa_write_8(mac_addr[i], *rest);
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip4(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 **extra_wrds, u8 **rest_wrds)
+{
+	u8 *extra = *extra_wrds;
+	u8 *rest = *rest_wrds;
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	int rc = 0;
+
+	if (attrib->attrib_mask & IPA_FLT_TOS) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+		extra = ipa_write_8(attrib->u.v4.tos, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+		extra = ipa_write_8(attrib->u.v4.protocol, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -14 => offset of dst mac addr in Ethernet II hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-14,
+			attrib->dst_mac_addr_mask,
+			attrib->dst_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -8 => offset of src mac addr in Ethernet II hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-8,
+			attrib->src_mac_addr_mask,
+			attrib->src_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -22 => offset of dst mac addr in 802.3 hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-22,
+			attrib->dst_mac_addr_mask,
+			attrib->dst_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -16 => offset of src mac addr in 802.3 hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-16,
+			attrib->src_mac_addr_mask,
+			attrib->src_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* 0 => offset of TOS in v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32((attrib->tos_mask << 16), rest);
+		rest = ipa_write_32((attrib->tos_value << 16), rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* 12 => offset of src ip in v4 header */
+		extra = ipa_write_8(12, extra);
+		rest = ipa_write_32(attrib->u.v4.src_addr_mask, rest);
+		rest = ipa_write_32(attrib->u.v4.src_addr, rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* 16 => offset of dst ip in v4 header */
+		extra = ipa_write_8(16, extra);
+		rest = ipa_write_32(attrib->u.v4.dst_addr_mask, rest);
+		rest = ipa_write_32(attrib->u.v4.dst_addr, rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* -2 => offset of ether type in L2 hdr */
+		extra = ipa_write_8((u8)-2, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of type after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->type, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 1  => offset of code after v4 header */
+		extra = ipa_write_8(1, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->code, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of SPI after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFFFFFFFF, rest);
+		rest = ipa_write_32(attrib->spi, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+		rest = ipa_write_32(attrib->meta_data_mask, rest);
+		rest = ipa_write_32(attrib->meta_data, rest);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port_hi, rest);
+		rest = ipa_write_16(attrib->src_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v4 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port_hi, rest);
+		rest = ipa_write_16(attrib->dst_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v4 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port, rest);
+		rest = ipa_write_16(attrib->src_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v4 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+	goto done;
+
+err:
+	rc = -EPERM;
+done:
+	*extra_wrds = extra;
+	*rest_wrds = rest;
+	return rc;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_ip6(u16 *en_rule,
+	const struct ipa_rule_attrib *attrib,
+	u8 **extra_wrds, u8 **rest_wrds)
+{
+	u8 *extra = *extra_wrds;
+	u8 *rest = *rest_wrds;
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	int rc = 0;
+
+	/* v6 code below assumes no extension headers TODO: fix this */
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+		extra = ipa_write_8(attrib->u.v6.next_hdr, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TC) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ);
+		extra = ipa_write_8(attrib->u.v6.tc, extra);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* 8 => offset of src ip in v6 header */
+		extra = ipa_write_8(8, extra);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[3], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[2], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[3], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[2], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[1], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr_mask[0], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[1], rest);
+		rest = ipa_write_32(attrib->u.v6.src_addr[0], rest);
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* 24 => offset of dst ip in v6 header */
+		extra = ipa_write_8(24, extra);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[3], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[2], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[3], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[2], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[1], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr_mask[0], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[1], rest);
+		rest = ipa_write_32(attrib->u.v6.dst_addr[0], rest);
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* 0 => offset of TOS in v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_64(0, rest);
+		rest = ipa_write_64(0, rest);
+		rest = ipa_write_32(0, rest);
+		rest = ipa_write_32((attrib->tos_mask << 20), rest);
+		rest = ipa_write_32(0, rest);
+		rest = ipa_write_32((attrib->tos_value << 20), rest);
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -14 => offset of dst mac addr in Ethernet II hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-14,
+			attrib->dst_mac_addr_mask,
+			attrib->dst_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -8 => offset of src mac addr in Ethernet II hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-8,
+			attrib->src_mac_addr_mask,
+			attrib->src_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -22 => offset of dst mac addr in 802.3 hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-22,
+			attrib->dst_mac_addr_mask,
+			attrib->dst_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -16 => offset of src mac addr in 802.3 hdr */
+		ipa_fltrt_generate_mac_addr_hw_rule(
+			&extra,
+			&rest,
+			-16,
+			attrib->src_mac_addr_mask,
+			attrib->src_mac_addr);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		/* -2 => offset of ether type in L2 hdr */
+		extra = ipa_write_8((u8)-2, extra);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		rest = ipa_write_16(0, rest);
+		rest = ipa_write_16(htons(attrib->ether_type), rest);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of type after v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->type, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 1  => offset of code after v6 header */
+		extra = ipa_write_8(1, extra);
+		rest = ipa_write_32(0xFF, rest);
+		rest = ipa_write_32(attrib->code, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		/* 0  => offset of SPI after v6 header FIXME */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_32(0xFFFFFFFF, rest);
+		rest = ipa_write_32(attrib->spi, rest);
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE);
+		rest = ipa_write_32(attrib->meta_data_mask, rest);
+		rest = ipa_write_32(attrib->meta_data, rest);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port, rest);
+		rest = ipa_write_16(attrib->src_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v6 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		rest = ipa_write_16(attrib->dst_port, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 0  => offset of src port after v6 header */
+		extra = ipa_write_8(0, extra);
+		rest = ipa_write_16(attrib->src_port_hi, rest);
+		rest = ipa_write_16(attrib->src_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			goto err;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			goto err;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		/* 2  => offset of dst port after v6 header */
+		extra = ipa_write_8(2, extra);
+		rest = ipa_write_16(attrib->dst_port_hi, rest);
+		rest = ipa_write_16(attrib->dst_port_lo, rest);
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+		rest = ipa_write_32(attrib->u.v6.flow_label & 0xFFFFF,
+			rest);
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT)
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+
+	goto done;
+
+err:
+	rc = -EPERM;
+done:
+	*extra_wrds = extra;
+	*rest_wrds = rest;
+	return rc;
+}
+
+static u8 *ipa_fltrt_copy_mem(u8 *src, u8 *dst, int cnt)
+{
+	while (cnt--)
+		*dst++ = *src++;
+
+	return dst;
+}
+
+/*
+ * ipa_fltrt_generate_hw_rule_bdy() - generate HW rule body (w/o header)
+ * @ip: IP address type
+ * @attrib: IPA rule attribute
+ * @buf: output buffer. Advance it after building the rule
+ * @en_rule: enable rule
+ *
+ * Return codes:
+ * 0: success
+ * -EPERM: wrong input
+ */
+static int ipa_fltrt_generate_hw_rule_bdy(enum ipa_ip_type ipt,
+	const struct ipa_rule_attrib *attrib, u8 **buf, u16 *en_rule)
+{
+	int sz;
+	int rc = 0;
+	u8 *extra_wrd_buf;
+	u8 *rest_wrd_buf;
+	u8 *extra_wrd_start;
+	u8 *rest_wrd_start;
+	u8 *extra_wrd_i;
+	u8 *rest_wrd_i;
+
+	sz = IPA3_0_HW_TBL_WIDTH * 2 + IPA3_0_HW_RULE_START_ALIGNMENT;
+	extra_wrd_buf = kzalloc(sz, GFP_KERNEL);
+	if (!extra_wrd_buf) {
+		IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+		rc = -ENOMEM;
+		goto fail_extra_alloc;
+	}
+
+	sz = IPA3_0_HW_RULE_BUF_SIZE + IPA3_0_HW_RULE_START_ALIGNMENT;
+	rest_wrd_buf = kzalloc(sz, GFP_KERNEL);
+	if (!rest_wrd_buf) {
+		IPAHAL_ERR("failed to allocate %d bytes\n", sz);
+		rc = -ENOMEM;
+		goto fail_rest_alloc;
+	}
+
+	extra_wrd_start = extra_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+	extra_wrd_start = (u8 *)((long)extra_wrd_start &
+		~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+	rest_wrd_start = rest_wrd_buf + IPA3_0_HW_RULE_START_ALIGNMENT;
+	rest_wrd_start = (u8 *)((long)rest_wrd_start &
+		~IPA3_0_HW_RULE_START_ALIGNMENT);
+
+	extra_wrd_i = extra_wrd_start;
+	rest_wrd_i = rest_wrd_start;
+
+	rc = ipa_fltrt_rule_generation_err_check(ipt, attrib);
+	if (rc) {
+		IPAHAL_ERR("rule generation err check failed\n");
+		goto fail_err_check;
+	}
+
+	if (ipt == IPA_IP_v4) {
+		if (ipa_fltrt_generate_hw_rule_bdy_ip4(en_rule, attrib,
+			&extra_wrd_i, &rest_wrd_i)) {
+			IPAHAL_ERR("failed to build ipv4 hw rule\n");
+			rc = -EPERM;
+			goto fail_err_check;
+		}
+
+	} else if (ipt == IPA_IP_v6) {
+		if (ipa_fltrt_generate_hw_rule_bdy_ip6(en_rule, attrib,
+			&extra_wrd_i, &rest_wrd_i)) {
+			IPAHAL_ERR("failed to build ipv6 hw rule\n");
+			rc = -EPERM;
+			goto fail_err_check;
+		}
+	} else {
+		IPAHAL_ERR("unsupported ip %d\n", ipt);
+		goto fail_err_check;
+	}
+
+	/*
+	 * default "rule" means no attributes set -> map to
+	 * OFFSET_MEQ32_0 with mask of 0 and val of 0 and offset 0
+	 */
+	if (attrib->attrib_mask == 0) {
+		IPAHAL_DBG_LOW("building default rule\n");
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(ipa3_0_ofst_meq32[0]);
+		extra_wrd_i = ipa_write_8(0, extra_wrd_i);  /* offset */
+		rest_wrd_i = ipa_write_32(0, rest_wrd_i);   /* mask */
+		rest_wrd_i = ipa_write_32(0, rest_wrd_i);   /* val */
+	}
+
+	IPAHAL_DBG_LOW("extra_word_1 0x%llx\n", *(u64 *)extra_wrd_start);
+	IPAHAL_DBG_LOW("extra_word_2 0x%llx\n",
+		*(u64 *)(extra_wrd_start + IPA3_0_HW_TBL_WIDTH));
+
+	extra_wrd_i = ipa_pad_to_64(extra_wrd_i);
+	sz = extra_wrd_i - extra_wrd_start;
+	IPAHAL_DBG_LOW("extra words params sz %d\n", sz);
+	*buf = ipa_fltrt_copy_mem(extra_wrd_start, *buf, sz);
+
+	rest_wrd_i = ipa_pad_to_64(rest_wrd_i);
+	sz = rest_wrd_i - rest_wrd_start;
+	IPAHAL_DBG_LOW("non extra words params sz %d\n", sz);
+	*buf = ipa_fltrt_copy_mem(rest_wrd_start, *buf, sz);
+
+fail_err_check:
+	kfree(rest_wrd_buf);
+fail_rest_alloc:
+	kfree(extra_wrd_buf);
+fail_extra_alloc:
+	return rc;
+}
+
+
+/**
+ * ipa_fltrt_calc_extra_wrd_bytes()- Calculate the number of extra words for eq
+ * @attrib: equation attribute
+ *
+ * Return value: 0 on success, negative otherwise
+ */
+static int ipa_fltrt_calc_extra_wrd_bytes(
+	const struct ipa_ipfltri_rule_eq *attrib)
+{
+	int num = 0;
+
+	if (attrib->tos_eq_present)
+		num++;
+	if (attrib->protocol_eq_present)
+		num++;
+	if (attrib->tc_eq_present)
+		num++;
+	num += attrib->num_offset_meq_128;
+	num += attrib->num_offset_meq_32;
+	num += attrib->num_ihl_offset_meq_32;
+	num += attrib->num_ihl_offset_range_16;
+	if (attrib->ihl_offset_eq_32_present)
+		num++;
+	if (attrib->ihl_offset_eq_16_present)
+		num++;
+
+	IPAHAL_DBG_LOW("extra bytes number %d\n", num);
+
+	return num;
+}
+
+static int ipa_fltrt_generate_hw_rule_bdy_from_eq(
+		const struct ipa_ipfltri_rule_eq *attrib, u8 **buf)
+{
+	int num_offset_meq_32 = attrib->num_offset_meq_32;
+	int num_ihl_offset_range_16 = attrib->num_ihl_offset_range_16;
+	int num_ihl_offset_meq_32 = attrib->num_ihl_offset_meq_32;
+	int num_offset_meq_128 = attrib->num_offset_meq_128;
+	int i;
+	int extra_bytes;
+	u8 *extra;
+	u8 *rest;
+
+	extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(attrib);
+	/* only 3 eq does not have extra word param, 13 out of 16 is the number
+	 * of equations that needs extra word param
+	 */
+	if (extra_bytes > 13) {
+		IPAHAL_ERR("too much extra bytes\n");
+		return -EPERM;
+	} else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+		/* two extra words */
+		extra = *buf;
+		rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+	} else if (extra_bytes > 0) {
+		/* single exra word */
+		extra = *buf;
+		rest = *buf + IPA3_0_HW_TBL_HDR_WIDTH;
+	} else {
+		/* no extra words */
+		extra = NULL;
+		rest = *buf;
+	}
+
+	if (attrib->tos_eq_present)
+		extra = ipa_write_8(attrib->tos_eq, extra);
+
+	if (attrib->protocol_eq_present)
+		extra = ipa_write_8(attrib->protocol_eq, extra);
+
+	if (attrib->tc_eq_present)
+		extra = ipa_write_8(attrib->tc_eq, extra);
+
+	if (num_offset_meq_128) {
+		extra = ipa_write_8(attrib->offset_meq_128[0].offset, extra);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+				rest);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].mask[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[0].value[i],
+				rest);
+		num_offset_meq_128--;
+	}
+
+	if (num_offset_meq_128) {
+		extra = ipa_write_8(attrib->offset_meq_128[1].offset, extra);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+				rest);
+		for (i = 0; i < 8; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].mask[i],
+				rest);
+		for (i = 8; i < 16; i++)
+			rest = ipa_write_8(attrib->offset_meq_128[1].value[i],
+				rest);
+		num_offset_meq_128--;
+	}
+
+	if (num_offset_meq_32) {
+		extra = ipa_write_8(attrib->offset_meq_32[0].offset, extra);
+		rest = ipa_write_32(attrib->offset_meq_32[0].mask, rest);
+		rest = ipa_write_32(attrib->offset_meq_32[0].value, rest);
+		num_offset_meq_32--;
+	}
+
+	if (num_offset_meq_32) {
+		extra = ipa_write_8(attrib->offset_meq_32[1].offset, extra);
+		rest = ipa_write_32(attrib->offset_meq_32[1].mask, rest);
+		rest = ipa_write_32(attrib->offset_meq_32[1].value, rest);
+		num_offset_meq_32--;
+	}
+
+	if (num_ihl_offset_meq_32) {
+		extra = ipa_write_8(attrib->ihl_offset_meq_32[0].offset,
+		extra);
+
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[0].mask, rest);
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[0].value, rest);
+		num_ihl_offset_meq_32--;
+	}
+
+	if (num_ihl_offset_meq_32) {
+		extra = ipa_write_8(attrib->ihl_offset_meq_32[1].offset,
+		extra);
+
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[1].mask, rest);
+		rest = ipa_write_32(attrib->ihl_offset_meq_32[1].value, rest);
+		num_ihl_offset_meq_32--;
+	}
+
+	if (attrib->metadata_meq32_present) {
+		rest = ipa_write_32(attrib->metadata_meq32.mask, rest);
+		rest = ipa_write_32(attrib->metadata_meq32.value, rest);
+	}
+
+	if (num_ihl_offset_range_16) {
+		extra = ipa_write_8(attrib->ihl_offset_range_16[0].offset,
+		extra);
+
+		rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_high,
+				rest);
+		rest = ipa_write_16(attrib->ihl_offset_range_16[0].range_low,
+				rest);
+		num_ihl_offset_range_16--;
+	}
+
+	if (num_ihl_offset_range_16) {
+		extra = ipa_write_8(attrib->ihl_offset_range_16[1].offset,
+		extra);
+
+		rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_high,
+				rest);
+		rest = ipa_write_16(attrib->ihl_offset_range_16[1].range_low,
+				rest);
+		num_ihl_offset_range_16--;
+	}
+
+	if (attrib->ihl_offset_eq_32_present) {
+		extra = ipa_write_8(attrib->ihl_offset_eq_32.offset, extra);
+		rest = ipa_write_32(attrib->ihl_offset_eq_32.value, rest);
+	}
+
+	if (attrib->ihl_offset_eq_16_present) {
+		extra = ipa_write_8(attrib->ihl_offset_eq_16.offset, extra);
+		rest = ipa_write_16(attrib->ihl_offset_eq_16.value, rest);
+		rest = ipa_write_16(0, rest);
+	}
+
+	if (attrib->fl_eq_present)
+		rest = ipa_write_32(attrib->fl_eq & 0xFFFFF, rest);
+
+	extra = ipa_pad_to_64(extra);
+	rest = ipa_pad_to_64(rest);
+	*buf = rest;
+
+	return 0;
+}
+
+static void ipa_flt_generate_mac_addr_eq(struct ipa_ipfltri_rule_eq *eq_atrb,
+	u8 hdr_mac_addr_offset,	const uint8_t mac_addr_mask[ETH_ALEN],
+	const uint8_t mac_addr[ETH_ALEN], u8 ofst_meq128)
+{
+	int i;
+
+	eq_atrb->offset_meq_128[ofst_meq128].offset = hdr_mac_addr_offset;
+
+	/* LSB MASK and ADDR */
+	memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 8);
+	memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 8);
+
+	/* MSB MASK and ADDR */
+	memset(eq_atrb->offset_meq_128[ofst_meq128].mask + 8, 0, 2);
+	for (i = 0; i <= 5; i++)
+		eq_atrb->offset_meq_128[ofst_meq128].mask[15 - i] =
+			mac_addr_mask[i];
+
+	memset(eq_atrb->offset_meq_128[ofst_meq128].value + 8, 0, 2);
+	for (i = 0; i <= 5; i++)
+		eq_atrb->offset_meq_128[ofst_meq128].value[15 - i] =
+			mac_addr[i];
+}
+
+static int ipa_flt_generate_eq_ip4(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	u16 eq_bitmap = 0;
+	u16 *en_rule = &eq_bitmap;
+
+	if (attrib->attrib_mask & IPA_FLT_TOS) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ);
+		eq_atrb->tos_eq_present = 1;
+		eq_atrb->tos_eq = attrib->u.v4.tos;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_PROTOCOL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ);
+		eq_atrb->protocol_eq_present = 1;
+		eq_atrb->protocol_eq = attrib->u.v4.protocol;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -14 => offset of dst mac addr in Ethernet II hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -8 => offset of src mac addr in Ethernet II hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+			attrib->src_mac_addr_mask, attrib->src_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -22 => offset of dst mac addr in 802.3 hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -16 => offset of src mac addr in 802.3 hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+			attrib->src_mac_addr_mask, attrib->src_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 0;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			attrib->tos_mask << 16;
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			attrib->tos_value << 16;
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 12;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			attrib->u.v4.src_addr_mask;
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			attrib->u.v4.src_addr;
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = 16;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			attrib->u.v4.dst_addr_mask;
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			attrib->u.v4.dst_addr;
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			htons(attrib->ether_type);
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			htons(attrib->ether_type);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->type;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->code;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			0xFFFFFFFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->spi;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_METADATA_COMPARE);
+		eq_atrb->metadata_meq32_present = 1;
+		eq_atrb->metadata_meq32.offset = 0;
+		eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+		eq_atrb->metadata_meq32.value = attrib->meta_data;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG);
+		eq_atrb->ipv4_frag_eq_present = 1;
+	}
+
+	eq_atrb->rule_eq_bitmap = *en_rule;
+	eq_atrb->num_offset_meq_32 = ofst_meq32;
+	eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+	eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+	eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+	return 0;
+}
+
+static int ipa_flt_generate_eq_ip6(enum ipa_ip_type ip,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	u8 ofst_meq32 = 0;
+	u8 ihl_ofst_rng16 = 0;
+	u8 ihl_ofst_meq32 = 0;
+	u8 ofst_meq128 = 0;
+	u16 eq_bitmap = 0;
+	u16 *en_rule = &eq_bitmap;
+
+	if (attrib->attrib_mask & IPA_FLT_NEXT_HDR) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_PROTOCOL_EQ);
+		eq_atrb->protocol_eq_present = 1;
+		eq_atrb->protocol_eq = attrib->u.v6.next_hdr;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TC) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_TC_EQ);
+		eq_atrb->tc_eq_present = 1;
+		eq_atrb->tc_eq = attrib->u.v6.tc;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		/* use the same word order as in ipa v2 */
+		eq_atrb->offset_meq_128[ofst_meq128].offset = 8;
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+			= attrib->u.v6.src_addr_mask[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+			= attrib->u.v6.src_addr_mask[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+			= attrib->u.v6.src_addr_mask[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+			= attrib->u.v6.src_addr_mask[3];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+			= attrib->u.v6.src_addr[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+			= attrib->u.v6.src_addr[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+			= attrib->u.v6.src_addr[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+				12) = attrib->u.v6.src_addr[3];
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_ADDR) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		eq_atrb->offset_meq_128[ofst_meq128].offset = 24;
+		/* use the same word order as in ipa v2 */
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 0)
+			= attrib->u.v6.dst_addr_mask[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 4)
+			= attrib->u.v6.dst_addr_mask[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 8)
+			= attrib->u.v6.dst_addr_mask[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+			= attrib->u.v6.dst_addr_mask[3];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 0)
+			= attrib->u.v6.dst_addr[0];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 4)
+			= attrib->u.v6.dst_addr[1];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value + 8)
+			= attrib->u.v6.dst_addr[2];
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+				12) = attrib->u.v6.dst_addr[3];
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TOS_MASKED) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+		eq_atrb->offset_meq_128[ofst_meq128].offset = 0;
+		memset(eq_atrb->offset_meq_128[ofst_meq128].mask, 0, 12);
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].mask + 12)
+			= attrib->tos_mask << 20;
+		memset(eq_atrb->offset_meq_128[ofst_meq128].value, 0, 12);
+		*(u32 *)(eq_atrb->offset_meq_128[ofst_meq128].value +
+				12) = attrib->tos_value << 20;
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -14 => offset of dst mac addr in Ethernet II hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -14,
+			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_ETHER_II) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -8 => offset of src mac addr in Ethernet II hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -8,
+			attrib->src_mac_addr_mask, attrib->src_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_DST_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -22 => offset of dst mac addr in 802.3 hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -22,
+			attrib->dst_mac_addr_mask, attrib->dst_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_SRC_ADDR_802_3) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq128, ofst_meq128)) {
+			IPAHAL_ERR("ran out of meq128 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq128[ofst_meq128]);
+
+		/* -16 => offset of src mac addr in 802.3 hdr */
+		ipa_flt_generate_mac_addr_eq(eq_atrb, -16,
+			attrib->src_mac_addr_mask, attrib->src_mac_addr,
+			ofst_meq128);
+
+		ofst_meq128++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_MAC_ETHER_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ofst_meq32, ofst_meq32)) {
+			IPAHAL_ERR("ran out of meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ofst_meq32[ofst_meq32]);
+		eq_atrb->offset_meq_32[ofst_meq32].offset = -2;
+		eq_atrb->offset_meq_32[ofst_meq32].mask =
+			htons(attrib->ether_type);
+		eq_atrb->offset_meq_32[ofst_meq32].value =
+			htons(attrib->ether_type);
+		ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_TYPE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->type;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_CODE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 1;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask = 0xFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->code;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SPI) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_meq32,
+			ihl_ofst_meq32)) {
+			IPAHAL_ERR("ran out of ihl_meq32 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_meq32[ihl_ofst_meq32]);
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].offset = 0;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].mask =
+			0xFFFFFFFF;
+		eq_atrb->ihl_offset_meq_32[ihl_ofst_meq32].value =
+			attrib->spi;
+		ihl_ofst_meq32++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_META_DATA) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_METADATA_COMPARE);
+		eq_atrb->metadata_meq32_present = 1;
+		eq_atrb->metadata_meq32.offset = 0;
+		eq_atrb->metadata_meq32.mask = attrib->meta_data_mask;
+		eq_atrb->metadata_meq32.value = attrib->meta_data;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_SRC_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->src_port_hi < attrib->src_port_lo) {
+			IPAHAL_ERR("bad src port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 0;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->src_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->src_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_DST_PORT_RANGE) {
+		if (IPA_IS_RAN_OUT_OF_EQ(ipa3_0_ihl_ofst_rng16,
+				ihl_ofst_rng16)) {
+			IPAHAL_ERR("ran out of ihl_rng16 eq\n");
+			return -EPERM;
+		}
+		if (attrib->dst_port_hi < attrib->dst_port_lo) {
+			IPAHAL_ERR("bad dst port range param\n");
+			return -EPERM;
+		}
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			ipa3_0_ihl_ofst_rng16[ihl_ofst_rng16]);
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].offset = 2;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_low
+			= attrib->dst_port_lo;
+		eq_atrb->ihl_offset_range_16[ihl_ofst_rng16].range_high
+			= attrib->dst_port_hi;
+		ihl_ofst_rng16++;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FLOW_LABEL) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ);
+		eq_atrb->fl_eq_present = 1;
+		eq_atrb->fl_eq = attrib->u.v6.flow_label;
+	}
+
+	if (attrib->attrib_mask & IPA_FLT_FRAGMENT) {
+		*en_rule |= IPA_GET_RULE_EQ_BIT_PTRN(
+			IPA_IS_FRAG);
+		eq_atrb->ipv4_frag_eq_present = 1;
+	}
+
+	eq_atrb->rule_eq_bitmap = *en_rule;
+	eq_atrb->num_offset_meq_32 = ofst_meq32;
+	eq_atrb->num_ihl_offset_range_16 = ihl_ofst_rng16;
+	eq_atrb->num_ihl_offset_meq_32 = ihl_ofst_meq32;
+	eq_atrb->num_offset_meq_128 = ofst_meq128;
+
+	return 0;
+}
+
+static int ipa_fltrt_parse_hw_rule_eq(u8 *addr, u32 hdr_sz,
+	struct ipa_ipfltri_rule_eq *atrb, u32 *rule_size)
+{
+	u16 eq_bitmap;
+	int extra_bytes;
+	u8 *extra;
+	u8 *rest;
+	int i;
+	u8 dummy_extra_wrd;
+
+	if (!addr || !atrb || !rule_size) {
+		IPAHAL_ERR("Input error: addr=%p atrb=%p rule_size=%p\n",
+			addr, atrb, rule_size);
+		return -EINVAL;
+	}
+
+	eq_bitmap = atrb->rule_eq_bitmap;
+
+	IPAHAL_DBG_LOW("eq_bitmap=0x%x\n", eq_bitmap);
+
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TOS_EQ))
+		atrb->tos_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_PROTOCOL_EQ))
+		atrb->protocol_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_TC_EQ))
+		atrb->tc_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_0))
+		atrb->num_offset_meq_128++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ128_1))
+		atrb->num_offset_meq_128++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_0))
+		atrb->num_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_OFFSET_MEQ32_1))
+		atrb->num_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_0))
+		atrb->num_ihl_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_MEQ32_1))
+		atrb->num_ihl_offset_meq_32++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_METADATA_COMPARE))
+		atrb->metadata_meq32_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_0))
+		atrb->num_ihl_offset_range_16++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_RANGE16_1))
+		atrb->num_ihl_offset_range_16++;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_32))
+		atrb->ihl_offset_eq_32_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IHL_OFFSET_EQ_16))
+		atrb->ihl_offset_eq_16_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_FL_EQ))
+		atrb->fl_eq_present = true;
+	if (eq_bitmap & IPA_GET_RULE_EQ_BIT_PTRN(IPA_IS_FRAG))
+		atrb->ipv4_frag_eq_present = true;
+
+	extra_bytes = ipa_fltrt_calc_extra_wrd_bytes(atrb);
+	/* only 3 eq does not have extra word param, 13 out of 16 is the number
+	 * of equations that needs extra word param
+	 */
+	if (extra_bytes > 13) {
+		IPAHAL_ERR("too much extra bytes\n");
+		return -EPERM;
+	} else if (extra_bytes > IPA3_0_HW_TBL_HDR_WIDTH) {
+		/* two extra words */
+		extra = addr + hdr_sz;
+		rest = extra + IPA3_0_HW_TBL_HDR_WIDTH * 2;
+	} else if (extra_bytes > 0) {
+		/* single extra word */
+		extra = addr + hdr_sz;
+		rest = extra + IPA3_0_HW_TBL_HDR_WIDTH;
+	} else {
+		/* no extra words */
+		dummy_extra_wrd = 0;
+		extra = &dummy_extra_wrd;
+		rest = addr + hdr_sz;
+	}
+	IPAHAL_DBG_LOW("addr=0x%p extra=0x%p rest=0x%p\n", addr, extra, rest);
+
+	if (atrb->tos_eq_present)
+		atrb->tos_eq = *extra++;
+	if (atrb->protocol_eq_present)
+		atrb->protocol_eq = *extra++;
+	if (atrb->tc_eq_present)
+		atrb->tc_eq = *extra++;
+
+	if (atrb->num_offset_meq_128 > 0) {
+		atrb->offset_meq_128[0].offset = *extra++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[0].mask[i] = *rest++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[0].value[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[0].mask[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[0].value[i] = *rest++;
+	}
+	if (atrb->num_offset_meq_128 > 1) {
+		atrb->offset_meq_128[1].offset = *extra++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[1].mask[i] = *rest++;
+		for (i = 0; i < 8; i++)
+			atrb->offset_meq_128[1].value[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[1].mask[i] = *rest++;
+		for (i = 8; i < 16; i++)
+			atrb->offset_meq_128[1].value[i] = *rest++;
+	}
+
+	if (atrb->num_offset_meq_32 > 0) {
+		atrb->offset_meq_32[0].offset = *extra++;
+		atrb->offset_meq_32[0].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->offset_meq_32[0].value = *((u32 *)rest);
+		rest += 4;
+	}
+	if (atrb->num_offset_meq_32 > 1) {
+		atrb->offset_meq_32[1].offset = *extra++;
+		atrb->offset_meq_32[1].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->offset_meq_32[1].value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->num_ihl_offset_meq_32 > 0) {
+		atrb->ihl_offset_meq_32[0].offset = *extra++;
+		atrb->ihl_offset_meq_32[0].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->ihl_offset_meq_32[0].value = *((u32 *)rest);
+		rest += 4;
+	}
+	if (atrb->num_ihl_offset_meq_32 > 1) {
+		atrb->ihl_offset_meq_32[1].offset = *extra++;
+		atrb->ihl_offset_meq_32[1].mask = *((u32 *)rest);
+		rest += 4;
+		atrb->ihl_offset_meq_32[1].value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->metadata_meq32_present) {
+		atrb->metadata_meq32.mask = *((u32 *)rest);
+		rest += 4;
+		atrb->metadata_meq32.value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->num_ihl_offset_range_16 > 0) {
+		atrb->ihl_offset_range_16[0].offset = *extra++;
+		atrb->ihl_offset_range_16[0].range_high = *((u16 *)rest);
+		rest += 2;
+		atrb->ihl_offset_range_16[0].range_low = *((u16 *)rest);
+		rest += 2;
+	}
+	if (atrb->num_ihl_offset_range_16 > 1) {
+		atrb->ihl_offset_range_16[1].offset = *extra++;
+		atrb->ihl_offset_range_16[1].range_high = *((u16 *)rest);
+		rest += 2;
+		atrb->ihl_offset_range_16[1].range_low = *((u16 *)rest);
+		rest += 2;
+	}
+
+	if (atrb->ihl_offset_eq_32_present) {
+		atrb->ihl_offset_eq_32.offset = *extra++;
+		atrb->ihl_offset_eq_32.value = *((u32 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->ihl_offset_eq_16_present) {
+		atrb->ihl_offset_eq_16.offset = *extra++;
+		atrb->ihl_offset_eq_16.value = *((u16 *)rest);
+		rest += 4;
+	}
+
+	if (atrb->fl_eq_present) {
+		atrb->fl_eq = *((u32 *)rest);
+		atrb->fl_eq &= 0xfffff;
+		rest += 4;
+	}
+
+	IPAHAL_DBG_LOW("before rule alignment rest=0x%p\n", rest);
+	rest = (u8 *)(((unsigned long)rest + IPA3_0_HW_RULE_START_ALIGNMENT) &
+		~IPA3_0_HW_RULE_START_ALIGNMENT);
+	IPAHAL_DBG_LOW("after rule alignment  rest=0x%p\n", rest);
+
+	*rule_size = rest - addr;
+	IPAHAL_DBG_LOW("rule_size=0x%x\n", *rule_size);
+
+	return 0;
+}
+
+static int ipa_rt_parse_hw_rule(u8 *addr, struct ipahal_rt_rule_entry *rule)
+{
+	struct ipa3_0_rt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa3_0_rt_rule_hw_hdr *)addr;
+	atrb = &rule->eq_attrib;
+
+	IPAHAL_DBG_LOW("read hdr 0x%llx\n", rule_hdr->u.word);
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	rule->dst_pipe_idx = rule_hdr->u.hdr.pipe_dest_idx;
+	if (rule_hdr->u.hdr.proc_ctx) {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_PROC_CTX;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 5;
+	} else {
+		rule->hdr_type = IPAHAL_RT_RULE_HDR_RAW;
+		rule->hdr_ofst = (rule_hdr->u.hdr.hdr_offset) << 2;
+	}
+	rule->hdr_lcl = !rule_hdr->u.hdr.system;
+
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->id = rule_hdr->u.hdr.rule_id;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
+static int ipa_flt_parse_hw_rule(u8 *addr, struct ipahal_flt_rule_entry *rule)
+{
+	struct ipa3_0_flt_rule_hw_hdr *rule_hdr;
+	struct ipa_ipfltri_rule_eq *atrb;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	rule_hdr = (struct ipa3_0_flt_rule_hw_hdr *)addr;
+	atrb = &rule->rule.eq_attrib;
+
+	if (rule_hdr->u.word == 0) {
+		/* table termintator - empty table */
+		rule->rule_size = 0;
+		return 0;
+	}
+
+	switch (rule_hdr->u.hdr.action) {
+	case 0x0:
+		rule->rule.action = IPA_PASS_TO_ROUTING;
+		break;
+	case 0x1:
+		rule->rule.action = IPA_PASS_TO_SRC_NAT;
+		break;
+	case 0x2:
+		rule->rule.action = IPA_PASS_TO_DST_NAT;
+		break;
+	case 0x3:
+		rule->rule.action = IPA_PASS_TO_EXCEPTION;
+		break;
+	default:
+		IPAHAL_ERR("Invalid Rule Action %d\n", rule_hdr->u.hdr.action);
+		WARN_ON(1);
+		rule->rule.action = rule_hdr->u.hdr.action;
+	}
+
+	rule->rule.rt_tbl_idx = rule_hdr->u.hdr.rt_tbl_idx;
+	rule->rule.retain_hdr = rule_hdr->u.hdr.retain_hdr;
+	rule->priority = rule_hdr->u.hdr.priority;
+	rule->id = rule_hdr->u.hdr.rule_id;
+
+	atrb->rule_eq_bitmap = rule_hdr->u.hdr.en_rule;
+	rule->rule.eq_attrib_type = 1;
+	return ipa_fltrt_parse_hw_rule_eq(addr, sizeof(*rule_hdr),
+		atrb, &rule->rule_size);
+}
+
+/*
+ * ipahal_fltrt_init() - Build the FLT/RT information table
+ *  See ipahal_fltrt_objs[] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ *  register entry will be zero. By this we recognize them.
+ */
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type)
+{
+	struct ipahal_fltrt_obj zero_obj;
+	int i;
+	struct ipa_mem_buffer *mem;
+	int rc = -EFAULT;
+
+	IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if (ipa_hw_type >= IPA_HW_MAX) {
+		IPAHAL_ERR("Invalid H/W type\n");
+		return -EFAULT;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		if (!memcmp(&ipahal_fltrt_objs[i+1], &zero_obj,
+			sizeof(struct ipahal_fltrt_obj))) {
+			memcpy(&ipahal_fltrt_objs[i+1],
+				&ipahal_fltrt_objs[i],
+				sizeof(struct ipahal_fltrt_obj));
+		} else {
+			/*
+			 * explicitly overridden FLT RT info
+			 * Check validity
+			 */
+			if (!ipahal_fltrt_objs[i+1].tbl_width) {
+				IPAHAL_ERR(
+				 "Zero tbl width ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].sysaddr_alignment) {
+				IPAHAL_ERR(
+				  "No tbl sysaddr alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].lcladdr_alignment) {
+				IPAHAL_ERR(
+				  "No tbl lcladdr alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].blk_sz_alignment) {
+				IPAHAL_ERR(
+				  "No blk sz alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rule_start_alignment) {
+				IPAHAL_ERR(
+				  "No rule start alignment ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].tbl_hdr_width) {
+				IPAHAL_ERR(
+				 "Zero tbl hdr width ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].tbl_addr_mask) {
+				IPAHAL_ERR(
+				 "Zero tbl hdr width ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (ipahal_fltrt_objs[i+1].rule_id_bit_len < 2) {
+				IPAHAL_ERR(
+				 "Too little bits for rule_id ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rule_buf_size) {
+				IPAHAL_ERR(
+				 "zero rule buf size ipaver=%d\n",
+				 i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].write_val_to_hdr) {
+				IPAHAL_ERR(
+				  "No write_val_to_hdr CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].create_flt_bitmap) {
+				IPAHAL_ERR(
+				  "No create_flt_bitmap CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].create_tbl_addr) {
+				IPAHAL_ERR(
+				  "No create_tbl_addr CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].parse_tbl_addr) {
+				IPAHAL_ERR(
+				  "No parse_tbl_addr CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rt_generate_hw_rule) {
+				IPAHAL_ERR(
+				  "No rt_generate_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].flt_generate_hw_rule) {
+				IPAHAL_ERR(
+				  "No flt_generate_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].flt_generate_eq) {
+				IPAHAL_ERR(
+				  "No flt_generate_eq CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].rt_parse_hw_rule) {
+				IPAHAL_ERR(
+				  "No rt_parse_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+			if (!ipahal_fltrt_objs[i+1].flt_parse_hw_rule) {
+				IPAHAL_ERR(
+				  "No flt_parse_hw_rule CB ipaver=%d\n",
+				  i+1);
+				WARN_ON(1);
+			}
+		}
+	}
+
+	mem = &ipahal_ctx->empty_fltrt_tbl;
+
+	/* setup an empty  table in system memory; This will
+	 * be used, for example, to delete a rt tbl safely
+	 */
+	mem->size = ipahal_fltrt_objs[ipa_hw_type].tbl_width;
+	mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+		&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAHAL_ERR("DMA buff alloc fail %d bytes for empty tbl\n",
+			mem->size);
+		return -ENOMEM;
+	}
+
+	if (mem->phys_base &
+		ipahal_fltrt_objs[ipa_hw_type].sysaddr_alignment) {
+		IPAHAL_ERR("Empty table buf is not address aligned 0x%pad\n",
+			&mem->phys_base);
+		rc = -EFAULT;
+		goto clear_empty_tbl;
+	}
+
+	memset(mem->base, 0, mem->size);
+	IPAHAL_DBG("empty table allocated in system memory");
+
+	return 0;
+
+clear_empty_tbl:
+	dma_free_coherent(ipahal_ctx->ipa_pdev, mem->size, mem->base,
+		mem->phys_base);
+	return rc;
+}
+
+void ipahal_fltrt_destroy(void)
+{
+	IPAHAL_DBG("Entry\n");
+
+	if (ipahal_ctx && ipahal_ctx->empty_fltrt_tbl.base)
+		dma_free_coherent(ipahal_ctx->ipa_pdev,
+			ipahal_ctx->empty_fltrt_tbl.size,
+			ipahal_ctx->empty_fltrt_tbl.base,
+			ipahal_ctx->empty_fltrt_tbl.phys_base);
+}
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].tbl_hdr_width;
+}
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].lcladdr_alignment;
+}
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void)
+{
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_max_prio;
+}
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio)
+{
+	struct ipahal_fltrt_obj *obj;
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!prio) {
+		IPAHAL_ERR("Invalid Input\n");
+		return -EINVAL;
+	}
+
+	/* Priority logic is reverse. 0 priority considred max priority */
+	if (*prio > obj->rule_min_prio || *prio < obj->rule_max_prio) {
+		IPAHAL_ERR("Invalid given priority %d\n", *prio);
+		return -EINVAL;
+	}
+
+	*prio += 1;
+
+	if (*prio > obj->rule_min_prio) {
+		IPAHAL_ERR("Cannot decrease priority. Already on min\n");
+		*prio -= 1;
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/* Does the given ID represents rule miss?
+ * Rule miss ID, is always the max ID possible in the bit-pattern
+ */
+bool ipahal_is_rule_miss_id(u32 id)
+{
+	return (id ==
+		((1U << ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len)
+		-1));
+}
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void)
+{
+	return BIT(ipahal_fltrt_objs[ipahal_ctx->hw_type].rule_id_bit_len - 1);
+}
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void)
+{
+	return  ipahal_fltrt_objs[ipahal_ctx->hw_type].low_rule_id;
+}
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ *  Creates routing header buffer for the given tables number.
+ *  For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, struct ipa_mem_buffer *mem)
+{
+	int i;
+	u64 addr;
+	struct ipahal_fltrt_obj *obj;
+
+	IPAHAL_DBG("Entry\n");
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!tbls_num || !nhash_hdr_size || !mem) {
+		IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+			tbls_num, nhash_hdr_size, mem);
+		return -EINVAL;
+	}
+	if (obj->support_hash && !hash_hdr_size) {
+		IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+		return -EINVAL;
+	}
+
+	if (nhash_hdr_size < (tbls_num * obj->tbl_hdr_width)) {
+		IPAHAL_ERR("No enough spc at non-hash hdr blk for all tbls\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+	if (obj->support_hash &&
+		(hash_hdr_size < (tbls_num * obj->tbl_hdr_width))) {
+		IPAHAL_ERR("No enough spc at hash hdr blk for all tbls\n");
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	mem->size = tbls_num * obj->tbl_hdr_width;
+	mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+		&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	addr = obj->create_tbl_addr(true,
+		ipahal_ctx->empty_fltrt_tbl.phys_base);
+	for (i = 0; i < tbls_num; i++)
+		obj->write_val_to_hdr(addr,
+			mem->base + i * obj->tbl_hdr_width);
+
+	return 0;
+}
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ *  Creates filter header buffer for the given tables number.
+ *  For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ *  should be: bit0->EP0, bit1->EP1
+ *  If bitmap is zero -> create tbl without bitmap entry
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem)
+{
+	int flt_spc;
+	u64 flt_bitmap;
+	int i;
+	u64 addr;
+	struct ipahal_fltrt_obj *obj;
+
+	IPAHAL_DBG("Entry - ep_bitmap 0x%llx\n", ep_bitmap);
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!tbls_num || !nhash_hdr_size || !mem) {
+		IPAHAL_ERR("Input Error: tbls_num=%d nhash_hdr_sz=%d mem=%p\n",
+			tbls_num, nhash_hdr_size, mem);
+		return -EINVAL;
+	}
+	if (obj->support_hash && !hash_hdr_size) {
+		IPAHAL_ERR("Input Error: hash_hdr_sz=%d\n", hash_hdr_size);
+		return -EINVAL;
+	}
+
+	if (obj->support_hash) {
+		flt_spc = hash_hdr_size;
+		/* bitmap word */
+		if (ep_bitmap)
+			flt_spc -= obj->tbl_hdr_width;
+		flt_spc /= obj->tbl_hdr_width;
+		if (tbls_num > flt_spc)  {
+			IPAHAL_ERR("space for hash flt hdr is too small\n");
+			WARN_ON(1);
+			return -EPERM;
+		}
+	}
+
+	flt_spc = nhash_hdr_size;
+	/* bitmap word */
+	if (ep_bitmap)
+		flt_spc -= obj->tbl_hdr_width;
+	flt_spc /= obj->tbl_hdr_width;
+	if (tbls_num > flt_spc)  {
+		IPAHAL_ERR("space for non-hash flt hdr is too small\n");
+		WARN_ON(1);
+		return -EPERM;
+	}
+
+	mem->size = tbls_num * obj->tbl_hdr_width;
+	if (ep_bitmap)
+		mem->size += obj->tbl_hdr_width;
+	mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, mem->size,
+		&mem->phys_base, GFP_KERNEL);
+	if (!mem->base) {
+		IPAHAL_ERR("fail to alloc DMA buff of size %d\n", mem->size);
+		return -ENOMEM;
+	}
+
+	if (ep_bitmap) {
+		flt_bitmap = obj->create_flt_bitmap(ep_bitmap);
+		IPAHAL_DBG("flt bitmap 0x%llx\n", flt_bitmap);
+		obj->write_val_to_hdr(flt_bitmap, mem->base);
+	}
+
+	addr = obj->create_tbl_addr(true,
+		ipahal_ctx->empty_fltrt_tbl.phys_base);
+
+	if (ep_bitmap) {
+		for (i = 1; i <= tbls_num; i++)
+			obj->write_val_to_hdr(addr,
+				mem->base + i * obj->tbl_hdr_width);
+	} else {
+		for (i = 0; i < tbls_num; i++)
+			obj->write_val_to_hdr(addr,
+				mem->base + i * obj->tbl_hdr_width);
+	}
+
+	return 0;
+}
+
+/*
+ * ipa_fltrt_alloc_init_tbl_hdr() - allocate and initialize buffers for
+ *  flt/rt tables headers to be filled into sram. Init each table to point
+ *  to empty system table
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_init_tbl_hdr(
+	struct ipahal_fltrt_alloc_imgs_params *params)
+{
+	u64 addr;
+	int i;
+	struct ipahal_fltrt_obj *obj;
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!params) {
+		IPAHAL_ERR("Input error: params=%p\n", params);
+		return -EINVAL;
+	}
+
+	params->nhash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+	params->nhash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+		params->nhash_hdr.size,
+		&params->nhash_hdr.phys_base, GFP_KERNEL);
+	if (!params->nhash_hdr.size) {
+		IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+			params->nhash_hdr.size);
+		goto nhash_alloc_fail;
+	}
+
+	if (obj->support_hash) {
+		params->hash_hdr.size = params->tbls_num * obj->tbl_hdr_width;
+		params->hash_hdr.base = dma_alloc_coherent(ipahal_ctx->ipa_pdev,
+			params->hash_hdr.size, &params->hash_hdr.phys_base,
+			GFP_KERNEL);
+		if (!params->hash_hdr.base) {
+			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+				params->hash_hdr.size);
+			goto hash_alloc_fail;
+		}
+	}
+
+	addr = obj->create_tbl_addr(true,
+		ipahal_ctx->empty_fltrt_tbl.phys_base);
+	for (i = 0; i < params->tbls_num; i++) {
+		obj->write_val_to_hdr(addr,
+			params->nhash_hdr.base + i * obj->tbl_hdr_width);
+		if (obj->support_hash)
+			obj->write_val_to_hdr(addr,
+				params->hash_hdr.base +
+				i * obj->tbl_hdr_width);
+	}
+
+	return 0;
+
+hash_alloc_fail:
+	ipahal_free_dma_mem(&params->nhash_hdr);
+nhash_alloc_fail:
+	return -ENOMEM;
+}
+
+/*
+ * ipa_fltrt_alloc_lcl_bdy() - allocate and initialize buffers for
+ *  local flt/rt tables bodies to be filled into sram
+ * @params: Allocate IN and OUT params
+ *
+ * Return: 0 on success, negative on failure
+ */
+static int ipa_fltrt_alloc_lcl_bdy(
+	struct ipahal_fltrt_alloc_imgs_params *params)
+{
+	struct ipahal_fltrt_obj *obj;
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	/* The HAL allocates larger sizes than the given effective ones
+	 * for alignments and border indications
+	 */
+	IPAHAL_DBG_LOW("lcl tbl bdy total effective sizes: hash=%u nhash=%u\n",
+		params->total_sz_lcl_hash_tbls,
+		params->total_sz_lcl_nhash_tbls);
+
+	IPAHAL_DBG_LOW("lcl tbl bdy count: hash=%u nhash=%u\n",
+		params->num_lcl_hash_tbls,
+		params->num_lcl_nhash_tbls);
+
+	/* Align the sizes to coop with termination word
+	 *  and H/W local table start offset alignment
+	 */
+	if (params->nhash_bdy.size) {
+		params->nhash_bdy.size = params->total_sz_lcl_nhash_tbls;
+		/* for table terminator */
+		params->nhash_bdy.size += obj->tbl_width *
+			params->num_lcl_nhash_tbls;
+		/* align the start of local rule-set */
+		params->nhash_bdy.size += obj->lcladdr_alignment *
+			params->num_lcl_nhash_tbls;
+		/* SRAM block size alignment */
+		params->nhash_bdy.size += obj->blk_sz_alignment;
+		params->nhash_bdy.size &= ~(obj->blk_sz_alignment);
+
+		IPAHAL_DBG_LOW("nhash lcl tbl bdy total h/w size = %u\n",
+			params->nhash_bdy.size);
+
+		params->nhash_bdy.base = dma_alloc_coherent(
+			ipahal_ctx->ipa_pdev, params->nhash_bdy.size,
+			&params->nhash_bdy.phys_base, GFP_KERNEL);
+		if (!params->nhash_bdy.base) {
+			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+				params->nhash_bdy.size);
+			return -ENOMEM;
+		}
+		memset(params->nhash_bdy.base, 0, params->nhash_bdy.size);
+	}
+
+	if (!obj->support_hash && params->hash_bdy.size) {
+		IPAHAL_ERR("No HAL Hash tbls support - Will be ignored\n");
+		WARN_ON(1);
+	}
+
+	if (obj->support_hash && params->hash_bdy.size) {
+		params->hash_bdy.size = params->total_sz_lcl_hash_tbls;
+		/* for table terminator */
+		params->hash_bdy.size += obj->tbl_width *
+			params->num_lcl_hash_tbls;
+		/* align the start of local rule-set */
+		params->hash_bdy.size += obj->lcladdr_alignment *
+			params->num_lcl_hash_tbls;
+		/* SRAM block size alignment */
+		params->hash_bdy.size += obj->blk_sz_alignment;
+		params->hash_bdy.size &= ~(obj->blk_sz_alignment);
+
+		IPAHAL_DBG_LOW("hash lcl tbl bdy total h/w size = %u\n",
+			params->hash_bdy.size);
+
+		params->hash_bdy.base = dma_alloc_coherent(
+			ipahal_ctx->ipa_pdev, params->hash_bdy.size,
+			&params->hash_bdy.phys_base, GFP_KERNEL);
+		if (!params->hash_bdy.base) {
+			IPAHAL_ERR("fail to alloc DMA buff of size %d\n",
+				params->hash_bdy.size);
+			goto hash_bdy_fail;
+		}
+		memset(params->hash_bdy.base, 0, params->hash_bdy.size);
+	}
+
+	return 0;
+
+hash_bdy_fail:
+	if (params->nhash_bdy.size)
+		ipahal_free_dma_mem(&params->nhash_bdy);
+
+	return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ *  Used usually during commit.
+ *  Allocates header structures and init them to point to empty DDR table
+ *  Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+	struct ipahal_fltrt_alloc_imgs_params *params)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+
+	/* Input validation */
+	if (!params) {
+		IPAHAL_ERR("Input err: no params\n");
+		return -EINVAL;
+	}
+	if (params->ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+		return -EINVAL;
+	}
+
+	if (ipa_fltrt_alloc_init_tbl_hdr(params)) {
+		IPAHAL_ERR("fail to alloc and init tbl hdr\n");
+		return -ENOMEM;
+	}
+
+	if (ipa_fltrt_alloc_lcl_bdy(params)) {
+		IPAHAL_ERR("fail to alloc tbl bodies\n");
+		goto bdy_alloc_fail;
+	}
+
+	return 0;
+
+bdy_alloc_fail:
+	ipahal_free_dma_mem(&params->nhash_hdr);
+	if (params->hash_hdr.size)
+		ipahal_free_dma_mem(&params->hash_hdr);
+	return -ENOMEM;
+}
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ *  allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem)
+{
+	struct ipahal_fltrt_obj *obj;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!tbl_mem) {
+		IPAHAL_ERR("Input err\n");
+		return -EINVAL;
+	}
+
+	if (!tbl_mem->size) {
+		IPAHAL_ERR("Input err: zero table size\n");
+		return -EINVAL;
+	}
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	/* add word for rule-set terminator */
+	tbl_mem->size += obj->tbl_width;
+
+	tbl_mem->base = dma_alloc_coherent(ipahal_ctx->ipa_pdev, tbl_mem->size,
+		&tbl_mem->phys_base, GFP_KERNEL);
+	if (!tbl_mem->base) {
+		IPAHAL_ERR("fail to alloc DMA buf of size %d\n",
+			tbl_mem->size);
+		return -ENOMEM;
+	}
+	if (tbl_mem->phys_base & obj->sysaddr_alignment) {
+		IPAHAL_ERR("sys rt tbl address is not aligned\n");
+		goto align_err;
+	}
+
+	memset(tbl_mem->base, 0, tbl_mem->size);
+
+	return 0;
+
+align_err:
+	ipahal_free_dma_mem(tbl_mem);
+	return -EPERM;
+}
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ *  Given table addr/offset, adapt it to IPA H/W format and write it
+ *  to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+	bool is_sys)
+{
+	struct ipahal_fltrt_obj *obj;
+	u64 hwaddr;
+	u8 *hdr;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!addr || !hdr_base) {
+		IPAHAL_ERR("Input err: addr=0x%llx hdr_base=%p\n",
+			addr, hdr_base);
+		return -EINVAL;
+	}
+
+	hdr = (u8 *)hdr_base;
+	hdr += hdr_idx * obj->tbl_hdr_width;
+	hwaddr = obj->create_tbl_addr(is_sys, addr);
+	obj->write_val_to_hdr(hwaddr, hdr);
+
+	return 0;
+}
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ *  content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+	bool *is_sys)
+{
+	struct ipahal_fltrt_obj *obj;
+	u64 hwaddr;
+	u8 *hdr;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (!addr || !hdr_base || !is_sys) {
+		IPAHAL_ERR("Input err: addr=%p hdr_base=%p is_sys=%p\n",
+			addr, hdr_base, is_sys);
+		return -EINVAL;
+	}
+
+	hdr = (u8 *)hdr_base;
+	hdr += hdr_idx * obj->tbl_hdr_width;
+	hwaddr = *((u64 *)hdr);
+	obj->parse_tbl_addr(hwaddr, addr, is_sys);
+	return 0;
+}
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipahal_fltrt_obj *obj;
+	u8 *tmp = NULL;
+	int rc;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!params || !hw_len) {
+		IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+		return -EINVAL;
+	}
+	if (!params->rule) {
+		IPAHAL_ERR("Input err: invalid rule\n");
+		return -EINVAL;
+	}
+	if (params->ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+		return -EINVAL;
+	}
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (buf == NULL) {
+		tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+		if (!tmp) {
+			IPAHAL_ERR("failed to alloc %u bytes\n",
+				obj->rule_buf_size);
+			return -ENOMEM;
+		}
+		buf = tmp;
+	} else
+		if ((long)buf & obj->rule_start_alignment) {
+			IPAHAL_ERR("buff is not rule rule start aligned\n");
+			return -EPERM;
+		}
+
+	rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_generate_hw_rule(
+		params, hw_len, buf);
+	if (!tmp && !rc) {
+		/* write the rule-set terminator */
+		memset(buf + *hw_len, 0, obj->tbl_width);
+	}
+
+	kfree(tmp);
+
+	return rc;
+}
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf)
+{
+	struct ipahal_fltrt_obj *obj;
+	u8 *tmp = NULL;
+	int rc;
+
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!params || !hw_len) {
+		IPAHAL_ERR("Input err: params=%p hw_len=%p\n", params, hw_len);
+		return -EINVAL;
+	}
+	if (!params->rule) {
+		IPAHAL_ERR("Input err: invalid rule\n");
+		return -EINVAL;
+	}
+	if (params->ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", params->ipt);
+		return -EINVAL;
+	}
+
+	obj = &ipahal_fltrt_objs[ipahal_ctx->hw_type];
+
+	if (buf == NULL) {
+		tmp = kzalloc(obj->rule_buf_size, GFP_KERNEL);
+		if (!tmp) {
+			IPAHAL_ERR("failed to alloc %u bytes\n",
+				obj->rule_buf_size);
+			return -ENOMEM;
+		}
+		buf = tmp;
+	} else
+		if ((long)buf & obj->rule_start_alignment) {
+			IPAHAL_ERR("buff is not rule rule start aligned\n");
+			return -EPERM;
+		}
+
+	rc = ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_hw_rule(
+		params, hw_len, buf);
+	if (!tmp && !rc) {
+		/* write the rule-set terminator */
+		memset(buf + *hw_len, 0, obj->tbl_width);
+	}
+
+	kfree(tmp);
+
+	return rc;
+
+}
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ *  Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ *  for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (ipt >= IPA_IP_MAX) {
+		IPAHAL_ERR("Input err: Invalid ip type %d\n", ipt);
+		return -EINVAL;
+	}
+
+	if (!attrib || !eq_atrb) {
+		IPAHAL_ERR("Input err: attrib=%p eq_atrb=%p\n",
+			attrib, eq_atrb);
+		return -EINVAL;
+	}
+
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_generate_eq(ipt,
+		attrib, eq_atrb);
+
+}
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_rt_rule_entry *rule)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!rule_addr || !rule) {
+		IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+			rule_addr, rule);
+		return -EINVAL;
+	}
+
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].rt_parse_hw_rule(
+		rule_addr, rule);
+}
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_flt_rule_entry *rule)
+{
+	IPAHAL_DBG_LOW("Entry\n");
+
+	if (!rule_addr || !rule) {
+		IPAHAL_ERR("Input err: rule_addr=%p rule=%p\n",
+			rule_addr, rule);
+		return -EINVAL;
+	}
+
+	return ipahal_fltrt_objs[ipahal_ctx->hw_type].flt_parse_hw_rule(
+		rule_addr, rule);
+}
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
new file mode 100644
index 0000000..ee2704d6
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt.h
@@ -0,0 +1,288 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_H_
+#define _IPAHAL_FLTRT_H_
+
+/*
+ * struct ipahal_fltrt_alloc_imgs_params - Params for tbls imgs allocations
+ *  The allocation logic will allocate DMA memory representing the header.
+ *  If the bodies are local (SRAM) the allocation will allocate
+ *  a DMA buffers that would contain the content of these local tables in raw
+ * @ipt: IP version type
+ * @tbls_num: Number of tables to represent by the header
+ * @num_lcl_hash_tbls: Number of local (sram) hashable tables
+ * @num_lcl_nhash_tbls: Number of local (sram) non-hashable tables
+ * @total_sz_lcl_hash_tbls: Total size of local hashable tables
+ * @total_sz_lcl_nhash_tbls: Total size of local non-hashable tables
+ * @hash_hdr/nhash_hdr: OUT params for the header structures
+ * @hash_bdy/nhash_bdy: OUT params for the local body structures
+ */
+struct ipahal_fltrt_alloc_imgs_params {
+	enum ipa_ip_type ipt;
+	u32 tbls_num;
+	u32 num_lcl_hash_tbls;
+	u32 num_lcl_nhash_tbls;
+	u32 total_sz_lcl_hash_tbls;
+	u32 total_sz_lcl_nhash_tbls;
+
+	/* OUT PARAMS */
+	struct ipa_mem_buffer hash_hdr;
+	struct ipa_mem_buffer nhash_hdr;
+	struct ipa_mem_buffer hash_bdy;
+	struct ipa_mem_buffer nhash_bdy;
+};
+
+/*
+ * enum ipahal_rt_rule_hdr_type - Header type used in rt rules
+ * @IPAHAL_RT_RULE_HDR_NONE: No header is used
+ * @IPAHAL_RT_RULE_HDR_RAW: Raw header is used
+ * @IPAHAL_RT_RULE_HDR_PROC_CTX: Header Processing context is used
+ */
+enum ipahal_rt_rule_hdr_type {
+	IPAHAL_RT_RULE_HDR_NONE,
+	IPAHAL_RT_RULE_HDR_RAW,
+	IPAHAL_RT_RULE_HDR_PROC_CTX,
+};
+
+/*
+ * struct ipahal_rt_rule_gen_params - Params for generating rt rule
+ * @ipt: IP family version
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_type: Header type to be used
+ * @hdr_lcl: Does header on local or system table?
+ * @hdr_ofst: Offset of the header in the header table
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_rt_rule_gen_params {
+	enum ipa_ip_type ipt;
+	int dst_pipe_idx;
+	enum ipahal_rt_rule_hdr_type hdr_type;
+	bool hdr_lcl;
+	u32 hdr_ofst;
+	u32 priority;
+	u32 id;
+	const struct ipa_rt_rule *rule;
+};
+
+/*
+ * struct ipahal_rt_rule_entry - Rt rule info parsed from H/W
+ * @dst_pipe_idx: Destination pipe index
+ * @hdr_lcl: Does the references header located in sram or system mem?
+ * @hdr_ofst: Offset of the header in the header table
+ * @hdr_type: Header type to be used
+ * @priority: Rule priority
+ * @retain_hdr: to retain the removed header in header removal
+ * @id: Rule ID
+ * @eq_attrib: Equations and their params in the rule
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_rt_rule_entry {
+	int dst_pipe_idx;
+	bool hdr_lcl;
+	u32 hdr_ofst;
+	enum ipahal_rt_rule_hdr_type hdr_type;
+	u32 priority;
+	bool retain_hdr;
+	u32 id;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	u32 rule_size;
+};
+
+/*
+ * struct ipahal_flt_rule_gen_params - Params for generating flt rule
+ * @ipt: IP family version
+ * @rt_tbl_idx: Routing table the rule pointing to
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule: Rule info
+ */
+struct ipahal_flt_rule_gen_params {
+	enum ipa_ip_type ipt;
+	u32 rt_tbl_idx;
+	u32 priority;
+	u32 id;
+	const struct ipa_flt_rule *rule;
+};
+
+/*
+ * struct ipahal_flt_rule_entry - Flt rule info parsed from H/W
+ * @rule: Rule info
+ * @priority: Rule priority
+ * @id: Rule ID
+ * @rule_size: Rule size in memory
+ */
+struct ipahal_flt_rule_entry {
+	struct ipa_flt_rule rule;
+	u32 priority;
+	u32 id;
+	u32 rule_size;
+};
+
+/* Get the H/W table (flt/rt) header width */
+u32 ipahal_get_hw_tbl_hdr_width(void);
+
+/* Get the H/W local table (SRAM) address alignment
+ * Tables headers references to local tables via offsets in SRAM
+ * This function return the alignment of the offset that IPA expects
+ */
+u32 ipahal_get_lcl_tbl_addr_alignment(void);
+
+/*
+ * Rule priority is used to distinguish rules order
+ * at the integrated table consisting from hashable and
+ * non-hashable tables. Max priority are rules that once are
+ * scanned by IPA, IPA will not look for further rules and use it.
+ */
+int ipahal_get_rule_max_priority(void);
+
+/* Given a priority, calc and return the next lower one if it is in
+ * legal range.
+ */
+int ipahal_rule_decrease_priority(int *prio);
+
+/* Does the given ID represents rule miss? */
+bool ipahal_is_rule_miss_id(u32 id);
+
+/* Get rule ID with high bit only asserted
+ * Used e.g. to create groups of IDs according to this bit
+ */
+u32 ipahal_get_rule_id_hi_bit(void);
+
+/* Get the low value possible to be used for rule-id */
+u32 ipahal_get_low_rule_id(void);
+
+/*
+ * ipahal_rt_generate_empty_img() - Generate empty route image
+ *  Creates routing header buffer for the given tables number.
+ * For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_flt_generate_empty_img() - Generate empty filter image
+ *  Creates filter header buffer for the given tables number.
+ *  For each table, make it point to the empty table on DDR.
+ * @tbls_num: Number of tables. For each will have an entry in the header
+ * @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
+ * @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
+ * @ep_bitmap: Bitmap representing the EP that has flt tables. The format
+ *  should be: bit0->EP0, bit1->EP1
+ * @mem: mem object that points to DMA mem representing the hdr structure
+ */
+int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
+	u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem);
+
+/*
+ * ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
+ *  Used usually during commit.
+ *  Allocates header structures and init them to point to empty DDR table
+ *  Allocate body strucutres for local bodies tables
+ * @params: Parameters for IN and OUT regard the allocation.
+ */
+int ipahal_fltrt_allocate_hw_tbl_imgs(
+	struct ipahal_fltrt_alloc_imgs_params *params);
+
+/*
+ * ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
+ * @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
+ *  allocated memory.
+ *
+ * The size is adapted for needed alignments/borders.
+ */
+int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem);
+
+/*
+ * ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
+ *  Given table addr/offset, adapt it to IPA H/W format and write it
+ *  to given header index.
+ * @addr: Address or offset to be used
+ * @hdr_base: base address of header structure to write the address
+ * @hdr_idx: index of the address in the header structure
+ * @is_sys: Is it system address or local offset
+ */
+int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
+	bool is_sys);
+
+/*
+ * ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
+ *  content (physical address or offset) and parse it.
+ * @hdr_base: base sram address of the header structure.
+ * @hdr_idx: index of the header entry line in the header structure.
+ * @addr: The parsed address - Out parameter
+ * @is_sys: Is this system or local address - Out parameter
+ */
+int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
+	bool *is_sys);
+
+/*
+ * ipahal_rt_generate_hw_rule() - generates the routing hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
+ * @params: Params for the rule creation.
+ * @hw_len: Size of the H/W rule to be returned
+ * @buf: Buffer to build the rule in. If buf is NULL, then the rule will
+ *  be built in internal temp buf. This is used e.g. to get the rule size
+ *  only.
+ */
+int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
+	u32 *hw_len, u8 *buf);
+
+/*
+ * ipahal_flt_generate_equation() - generate flt rule in equation form
+ *  Will build equation form flt rule from given info.
+ * @ipt: IP family
+ * @attrib: Rule attribute to be generated
+ * @eq_atrb: Equation form generated rule
+ * Note: Usage example: Pass the generated form to other sub-systems
+ *  for inter-subsystems rules exchange.
+ */
+int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
+		const struct ipa_rule_attrib *attrib,
+		struct ipa_ipfltri_rule_eq *eq_atrb);
+
+/*
+ * ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_rt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_rt_rule_entry *rule);
+
+/*
+ * ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
+ *  Given the rule address, read the rule info from H/W and parse it.
+ * @rule_addr: Rule address (virtual memory)
+ * @rule: Out parameter for parsed rule info
+ */
+int ipahal_flt_parse_hw_rule(u8 *rule_addr,
+	struct ipahal_flt_rule_entry *rule);
+
+
+#endif /* _IPAHAL_FLTRT_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
new file mode 100644
index 0000000..0c0637d
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_fltrt_i.h
@@ -0,0 +1,143 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_FLTRT_I_H_
+#define _IPAHAL_FLTRT_I_H_
+
+/*
+ * enum ipa_fltrt_equations - RULE equations
+ *  These are names values to the equations that can be used
+ *  The HAL layer holds mapping between these names and H/W
+ *  presentation.
+ */
+enum ipa_fltrt_equations {
+	IPA_TOS_EQ,
+	IPA_PROTOCOL_EQ,
+	IPA_TC_EQ,
+	IPA_OFFSET_MEQ128_0,
+	IPA_OFFSET_MEQ128_1,
+	IPA_OFFSET_MEQ32_0,
+	IPA_OFFSET_MEQ32_1,
+	IPA_IHL_OFFSET_MEQ32_0,
+	IPA_IHL_OFFSET_MEQ32_1,
+	IPA_METADATA_COMPARE,
+	IPA_IHL_OFFSET_RANGE16_0,
+	IPA_IHL_OFFSET_RANGE16_1,
+	IPA_IHL_OFFSET_EQ_32,
+	IPA_IHL_OFFSET_EQ_16,
+	IPA_FL_EQ,
+	IPA_IS_FRAG,
+	IPA_EQ_MAX,
+};
+
+/* Width and Alignment values for H/W structures.
+ * Specific for IPA version.
+ */
+#define IPA3_0_HW_TBL_SYSADDR_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_LCLADDR_ALIGNMENT (7)
+#define IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT (127)
+#define IPA3_0_HW_TBL_WIDTH (8)
+#define IPA3_0_HW_TBL_HDR_WIDTH (8)
+#define IPA3_0_HW_TBL_ADDR_MASK (127)
+#define IPA3_0_HW_RULE_BUF_SIZE (256)
+#define IPA3_0_HW_RULE_START_ALIGNMENT (7)
+
+
+/*
+ * Rules Priority.
+ * Needed due to rules classification to hashable and non-hashable.
+ * Higher priority is lower in number. i.e. 0 is highest priority
+ */
+#define IPA3_0_RULE_MAX_PRIORITY (0)
+#define IPA3_0_RULE_MIN_PRIORITY (1023)
+
+/*
+ * RULE ID, bit length (e.g. 10 bits).
+ */
+#define IPA3_0_RULE_ID_BIT_LEN (10)
+#define IPA3_0_LOW_RULE_ID (1)
+
+/**
+ * struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
+ * @word: routing rule header properties
+ * @en_rule: enable rule - Equation bit fields
+ * @pipe_dest_idx: destination pipe index
+ * @system: Is referenced header is lcl or sys memory
+ * @hdr_offset: header offset
+ * @proc_ctx: whether hdr_offset points to header table or to
+ *	header processing context table
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @rsvd1: reserved bits
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd2: reserved bits
+ */
+struct ipa3_0_rt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule:16;
+			u64 pipe_dest_idx:5;
+			u64 system:1;
+			u64 hdr_offset:9;
+			u64 proc_ctx:1;
+			u64 priority:10;
+			u64 rsvd1:5;
+			u64 retain_hdr:1;
+			u64 rule_id:10;
+			u64 rsvd2:6;
+		} hdr;
+	} u;
+};
+
+/**
+ * struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule
+ * @word: filtering rule properties
+ * @en_rule: enable rule
+ * @action: post filtering action
+ * @rt_tbl_idx: index in routing table
+ * @retain_hdr: added to add back to the packet the header removed
+ *  as part of header removal. This will be done as part of
+ *  header insertion block.
+ * @rsvd1: reserved bits
+ * @priority: Rule priority. Added to distinguish rules order
+ *  at the integrated table consisting from hashable and
+ *  non-hashable parts
+ * @rsvd2: reserved bits
+ * @rule_id: rule ID that will be returned in the packet status
+ * @rsvd3: reserved bits
+ */
+struct ipa3_0_flt_rule_hw_hdr {
+	union {
+		u64 word;
+		struct {
+			u64 en_rule:16;
+			u64 action:5;
+			u64 rt_tbl_idx:5;
+			u64 retain_hdr:1;
+			u64 rsvd1:5;
+			u64 priority:10;
+			u64 rsvd2:6;
+			u64 rule_id:10;
+			u64 rsvd3:6;
+		} hdr;
+	} u;
+};
+
+int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type);
+void ipahal_fltrt_destroy(void);
+
+#endif /* _IPAHAL_FLTRT_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
new file mode 100644
index 0000000..4c4b666
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_i.h
@@ -0,0 +1,549 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_I_H_
+#define _IPAHAL_I_H_
+
+#include <linux/ipa.h>
+#include "../../ipa_common_i.h"
+
+#define IPAHAL_DRV_NAME "ipahal"
+
+#define IPAHAL_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAHAL_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPAHAL_ERR(fmt, args...) \
+	do { \
+		pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
+			## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+/*
+ * struct ipahal_context - HAL global context data
+ * @hw_type: IPA H/W type/version.
+ * @base: Base address to be used for accessing IPA memory. This is
+ *  I/O memory mapped address.
+ *  Controlled by debugfs. default is off
+ * @dent: Debugfs folder dir entry
+ * @ipa_pdev: IPA Platform Device. Will be used for DMA memory
+ * @empty_fltrt_tbl: Empty table to be used at tables init.
+ */
+struct ipahal_context {
+	enum ipa_hw_type hw_type;
+	void __iomem *base;
+	struct dentry *dent;
+	struct device *ipa_pdev;
+	struct ipa_mem_buffer empty_fltrt_tbl;
+};
+
+extern struct ipahal_context *ipahal_ctx;
+
+
+
+/* Immediate commands H/W structures */
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload
+ *  in H/W format.
+ * Inits IPv4 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_filter_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload
+ *  in H/W format.
+ * Inits IPv6 filter block.
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_filter_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload
+ *  in H/W format.
+ * Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
+ *  cache address abd itger related parameters.
+ * @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
+ * @ipv4_expansion_rules_addr: Addr in sys/shared mem where expantion NAT
+ *  table starts. IPv4 NAT rules that result in NAT collision are located
+ *  in this table.
+ * @index_table_addr: Addr in sys/shared mem where index table, which points
+ *  to NAT table starts
+ * @index_table_expansion_addr: Addr in sys/shared mem where expansion index
+ *  table starts
+ * @table_index: For future support of multiple NAT tables
+ * @rsvd1: reserved
+ * @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem
+ * @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in
+ *  sys or shared mem
+ * @index_table_addr_type: index_table_addr in sys or shared mem
+ * @index_table_expansion_addr_type: index_table_expansion_addr in
+ *  sys or shared mem
+ * @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
+ * @size_expansion_tables: Num of entries in NAT expantion tbl and expantion
+ *  idx tbl (each)
+ * @rsvd2: reserved
+ * @public_ip_addr: public IP address
+ */
+struct ipa_imm_cmd_hw_ip_v4_nat_init {
+	u64 ipv4_rules_addr:64;
+	u64 ipv4_expansion_rules_addr:64;
+	u64 index_table_addr:64;
+	u64 index_table_expansion_addr:64;
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 ipv4_rules_addr_type:1;
+	u64 ipv4_expansion_rules_addr_type:1;
+	u64 index_table_addr_type:1;
+	u64 index_table_expansion_addr_type:1;
+	u64 size_base_tables:12;
+	u64 size_expansion_tables:10;
+	u64 rsvd2:2;
+	u64 public_ip_addr:32;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload
+ *  in H/W format.
+ * Inits IPv4 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v4_routing_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload
+ *  in H/W format.
+ * Inits IPv6 routing table/structure - with the rules and other related params
+ * @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts
+ * @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
+ * @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
+ *  be copied to
+ * @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
+ * @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
+ *  be copied to
+ * @rsvd: reserved
+ * @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
+ */
+struct ipa_imm_cmd_hw_ip_v6_routing_init {
+	u64 hash_rules_addr:64;
+	u64 hash_rules_size:12;
+	u64 hash_local_addr:16;
+	u64 nhash_rules_size:12;
+	u64 nhash_local_addr:16;
+	u64 rsvd:8;
+	u64 nhash_rules_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload
+ *  in H/W format.
+ * Inits hdr table within local mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in sys mem where the table starts (SRC)
+ * @size_hdr_table: Size of the above (in bytes)
+ * @hdr_addr: header address in IPA sram (used as DST for memory copy)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_hdr_init_local {
+	u64 hdr_table_addr:64;
+	u64 size_hdr_table:12;
+	u64 hdr_addr:16;
+	u64 rsvd:4;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload
+ *  in H/W format
+ * Perform DMA operation on NAT related mem addressess. Copy data into
+ *  different locations within NAT associated tbls. (For add/remove NAT rules)
+ * @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
+ * @rsvd1: reserved
+ * @base_addr: Base addr to which the DMA operation should be performed.
+ * @rsvd2: reserved
+ * @offset: offset in bytes from base addr to write 'data' to
+ * @data: data to be written
+ * @rsvd3: reserved
+ */
+struct ipa_imm_cmd_hw_nat_dma {
+	u64 table_index:3;
+	u64 rsvd1:1;
+	u64 base_addr:2;
+	u64 rsvd2:2;
+	u64 offset:32;
+	u64 data:16;
+	u64 rsvd3:8;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload
+ *  in H/W format.
+ * Inits hdr table within sys mem with the hdrs and their length.
+ * @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
+ */
+struct ipa_imm_cmd_hw_hdr_init_system {
+	u64 hdr_table_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload
+ *  in H/W format.
+ * Configuration for specific IP pkt. Shall be called prior to an IP pkt
+ *  data. Pkt will not go through IP pkt processing.
+ * @destination_pipe_index: Destination pipe index  (in case routing
+ *  is enabled, this field will overwrite the rt  rule)
+ * @rsvd: reserved
+ */
+struct ipa_imm_cmd_hw_ip_packet_init {
+	u64 destination_pipe_index:5;
+	u64 rsv1:59;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
+ *  in H/W format.
+ * Write value to register. Allows reg changes to be synced with data packet
+ *  and other immediate command. Can be used to access the sram
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
+ * @value: value to write to register
+ * @value_mask: mask specifying which value bits to write to the register
+ * @pipeline_clear_options: options for pipeline to clear
+ *	0: HPS - no pkt inside HPS (not grp specific)
+ *	1: source group - The immediate cmd src grp does not use any pkt ctxs
+ *	2: Wait until no pkt reside inside IPA pipeline
+ *	3: reserved
+ * @rsvd: reserved - should be set to zero
+ */
+struct ipa_imm_cmd_hw_register_write {
+	u64 sw_rsvd:15;
+	u64 skip_pipeline_clear:1;
+	u64 offset:16;
+	u64 value:32;
+	u64 value_mask:32;
+	u64 pipeline_clear_options:2;
+	u64 rsvd:30;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
+ *  in H/W format.
+ * Perform mem copy into or out of the SW area of IPA local mem
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @size: Size in bytes of data to copy. Expected size is up to 2K bytes
+ * @local_addr: Address in IPA local memory
+ * @direction: Read or write?
+ *	0: IPA write, Write to local address from system address
+ *	1: IPA read, Read from local address to system address
+ * @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
+ * @pipeline_clear_options: options for pipeline to clear
+ *	0: HPS - no pkt inside HPS (not grp specific)
+ *	1: source group - The immediate cmd src grp does npt use any pkt ctxs
+ *	2: Wait until no pkt reside inside IPA pipeline
+ *	3: reserved
+ * @rsvd: reserved - should be set to zero
+ * @system_addr: Address in system memory
+ */
+struct ipa_imm_cmd_hw_dma_shared_mem {
+	u64 sw_rsvd:16;
+	u64 size:16;
+	u64 local_addr:16;
+	u64 direction:1;
+	u64 skip_pipeline_clear:1;
+	u64 pipeline_clear_options:2;
+	u64 rsvd:12;
+	u64 system_addr:64;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_ip_packet_tag_status -
+ *  IP_PACKET_TAG_STATUS command payload in H/W format.
+ * This cmd is used for to allow SW to track HW processing by setting a TAG
+ *  value that is passed back to SW inside Packet Status information.
+ *  TAG info will be provided as part of Packet Status info generated for
+ *  the next pkt transferred over the pipe.
+ *  This immediate command must be followed by a packet in the same transfer.
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @tag: Tag that is provided back to SW
+ */
+struct ipa_imm_cmd_hw_ip_packet_tag_status {
+	u64 sw_rsvd:16;
+	u64 tag:48;
+};
+
+/*
+ * struct ipa_imm_cmd_hw_dma_task_32b_addr -
+ *	IPA_DMA_TASK_32B_ADDR command payload in H/W format.
+ * Used by clients using 32bit addresses. Used to perform DMA operation on
+ *  multiple descriptors.
+ *  The Opcode is dynamic, where it holds the number of buffer to process
+ * @sw_rsvd: Ignored by H/W. My be used by S/W
+ * @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire
+ *  DMA related data was completely xfered to its destination.
+ * @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the
+ *  dest client. This is used used for aggr sequence
+ * @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but
+ *  will not be xfered to dest client but rather will be discarded
+ * @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors
+ *  from other EPs in the same src grp (RX queue)
+ * @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively
+ *  servicing current EP out of the src EPs of the grp (RX queue)
+ * @size1: Size of buffer1 data
+ * @addr1: Pointer to buffer1 data
+ * @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
+ *  only the first one needs to have this field set. It will be ignored
+ *  in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
+ *  must contain this field (2 or more buffers) or EOT.
+ */
+struct ipa_imm_cmd_hw_dma_task_32b_addr {
+	u64 sw_rsvd:11;
+	u64 cmplt:1;
+	u64 eof:1;
+	u64 flsh:1;
+	u64 lock:1;
+	u64 unlock:1;
+	u64 size1:16;
+	u64 addr1:32;
+	u64 packet_size:16;
+};
+
+
+
+/* IPA Status packet H/W structures and info */
+
+/*
+ * struct ipa_status_pkt_hw - IPA status packet payload in H/W format.
+ *  This structure describes the status packet H/W structure for the
+ *   following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
+ *   IPA_STATUS_SUSPENDED_PACKET.
+ *  Other statuses types has different status packet structure.
+ * @status_opcode: The Type of the status (Opcode).
+ * @exception: (not bitmask) - the first exception that took place.
+ *  In case of exception, src endp and pkt len are always valid.
+ * @status_mask: Bit mask specifying on which H/W blocks the pkt was processed.
+ * @pkt_len: Pkt pyld len including hdr, include retained hdr if used. Does
+ *  not include padding or checksum trailer len.
+ * @endp_src_idx: Source end point index.
+ * @rsvd1: reserved
+ * @endp_dest_idx: Destination end point index.
+ *  Not valid in case of exception
+ * @rsvd2: reserved
+ * @metadata: meta data value used by packet
+ * @flt_local: Filter table location flag: Does matching flt rule belongs to
+ *  flt tbl that resides in lcl memory? (if not, then system mem)
+ * @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
+ * @flt_global: Global filter rule flag: Does matching flt rule belongs to
+ *  the global flt tbl? (if not, then the per endp tables)
+ * @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
+ *  specifies to retain header?
+ * @flt_rule_id: The ID of the matching filter rule. This info can be combined
+ *  with endp_src_idx to locate the exact rule. ID=0x3FF reserved to specify
+ *  flt miss. In case of miss, all flt info to be ignored
+ * @rt_local: Route table location flag: Does matching rt rule belongs to
+ *  rt tbl that resides in lcl memory? (if not, then system mem)
+ * @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
+ * @ucp: UC Processing flag.
+ * @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
+ * @rt_rule_id: The ID of the matching rt rule. This info can be combined
+ *  with rt_tbl_idx to locate the exact rule. ID=0x3FF reserved to specify
+ *  rt miss. In case of miss, all rt info to be ignored
+ * @nat_hit: NAT hit flag: Was their NAT hit?
+ * @nat_entry_idx: Index of the NAT entry used of NAT processing
+ * @nat_type: Defines the type of the NAT operation:
+ *	00: No NAT
+ *	01: Source NAT
+ *	10: Destination NAT
+ *	11: Reserved
+ * @tag_info: S/W defined value provided via immediate command
+ * @seq_num: Per source endp unique packet sequence number
+ * @time_of_day_ctr: running counter from IPA clock
+ * @hdr_local: Header table location flag: In header insertion, was the header
+ *  taken from the table resides in local memory? (If no, then system mem)
+ * @hdr_offset: Offset of used header in the header table
+ * @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
+ * @frag_rule: Frag rule index in H/W frag table in case of frag hit
+ * @hw_specific: H/W specific reserved value
+ */
+struct ipa_pkt_status_hw {
+	u64 status_opcode:8;
+	u64 exception:8;
+	u64 status_mask:16;
+	u64 pkt_len:16;
+	u64 endp_src_idx:5;
+	u64 rsvd1:3;
+	u64 endp_dest_idx:5;
+	u64 rsvd2:3;
+	u64 metadata:32;
+	u64 flt_local:1;
+	u64 flt_hash:1;
+	u64 flt_global:1;
+	u64 flt_ret_hdr:1;
+	u64 flt_rule_id:10;
+	u64 rt_local:1;
+	u64 rt_hash:1;
+	u64 ucp:1;
+	u64 rt_tbl_idx:5;
+	u64 rt_rule_id:10;
+	u64 nat_hit:1;
+	u64 nat_entry_idx:13;
+	u64 nat_type:2;
+	u64 tag_info:48;
+	u64 seq_num:8;
+	u64 time_of_day_ctr:24;
+	u64 hdr_local:1;
+	u64 hdr_offset:10;
+	u64 frag_hit:1;
+	u64 frag_rule:4;
+	u64 hw_specific:16;
+};
+
+/* Size of H/W Packet Status */
+#define IPA3_0_PKT_STATUS_SIZE 32
+
+/* Headers and processing context H/W structures and definitions */
+
+/* uCP command numbers */
+#define IPA_HDR_UCP_802_3_TO_802_3 6
+#define IPA_HDR_UCP_802_3_TO_ETHII 7
+#define IPA_HDR_UCP_ETHII_TO_802_3 8
+#define IPA_HDR_UCP_ETHII_TO_ETHII 9
+
+/* Processing context TLV type */
+#define IPA_PROC_CTX_TLV_TYPE_END 0
+#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
+#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_tlv -
+ * HW structure of IPA processing context header - TLV part
+ * @type: 0 - end type
+ *        1 - header addition type
+ *        3 - processing command type
+ * @length: number of bytes after tlv
+ *        for type:
+ *        0 - needs to be 0
+ *        1 - header addition length
+ *        3 - number of 32B including type and length.
+ * @value: specific value for type
+ *        for type:
+ *        0 - needs to be 0
+ *        1 - header length
+ *        3 - command ID (see IPA_HDR_UCP_* definitions)
+ */
+struct ipa_hw_hdr_proc_ctx_tlv {
+	u32 type:8;
+	u32 length:8;
+	u32 value:16;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_hdr_add -
+ * HW structure of IPA processing context - add header tlv
+ * @tlv: IPA processing context TLV
+ * @hdr_addr: processing context header address
+ */
+struct ipa_hw_hdr_proc_ctx_hdr_add {
+	struct ipa_hw_hdr_proc_ctx_tlv tlv;
+	u32 hdr_addr;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_seq -
+ * IPA processing context header - add header sequence
+ * @hdr_add: add header command
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+/**
+ * struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq -
+ * IPA processing context header - process command sequence
+ * @hdr_add: add header command
+ * @cmd: tlv processing command (cmd.type must be 3)
+ * @end: tlv end command (cmd.type must be 0)
+ */
+struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq {
+	struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
+	struct ipa_hw_hdr_proc_ctx_tlv cmd;
+	struct ipa_hw_hdr_proc_ctx_tlv end;
+};
+
+#endif /* _IPAHAL_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
new file mode 100644
index 0000000..e297dea
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -0,0 +1,1562 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/ipa.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipa.h>
+#include "ipahal_i.h"
+#include "ipahal_reg.h"
+#include "ipahal_reg_i.h"
+
+static const char *ipareg_name_to_str[IPA_REG_MAX] = {
+	__stringify(IPA_ROUTE),
+	__stringify(IPA_IRQ_STTS_EE_n),
+	__stringify(IPA_IRQ_EN_EE_n),
+	__stringify(IPA_IRQ_CLR_EE_n),
+	__stringify(IPA_IRQ_SUSPEND_INFO_EE_n),
+	__stringify(IPA_SUSPEND_IRQ_EN_EE_n),
+	__stringify(IPA_SUSPEND_IRQ_CLR_EE_n),
+	__stringify(IPA_BCR),
+	__stringify(IPA_ENABLED_PIPES),
+	__stringify(IPA_COMP_SW_RESET),
+	__stringify(IPA_VERSION),
+	__stringify(IPA_TAG_TIMER),
+	__stringify(IPA_COMP_HW_VERSION),
+	__stringify(IPA_SPARE_REG_1),
+	__stringify(IPA_SPARE_REG_2),
+	__stringify(IPA_COMP_CFG),
+	__stringify(IPA_STATE_AGGR_ACTIVE),
+	__stringify(IPA_ENDP_INIT_HDR_n),
+	__stringify(IPA_ENDP_INIT_HDR_EXT_n),
+	__stringify(IPA_ENDP_INIT_AGGR_n),
+	__stringify(IPA_AGGR_FORCE_CLOSE),
+	__stringify(IPA_ENDP_INIT_ROUTE_n),
+	__stringify(IPA_ENDP_INIT_MODE_n),
+	__stringify(IPA_ENDP_INIT_NAT_n),
+	__stringify(IPA_ENDP_INIT_CTRL_n),
+	__stringify(IPA_ENDP_INIT_HOL_BLOCK_EN_n),
+	__stringify(IPA_ENDP_INIT_HOL_BLOCK_TIMER_n),
+	__stringify(IPA_ENDP_INIT_DEAGGR_n),
+	__stringify(IPA_ENDP_INIT_SEQ_n),
+	__stringify(IPA_DEBUG_CNT_REG_n),
+	__stringify(IPA_ENDP_INIT_CFG_n),
+	__stringify(IPA_IRQ_EE_UC_n),
+	__stringify(IPA_ENDP_INIT_HDR_METADATA_MASK_n),
+	__stringify(IPA_ENDP_INIT_HDR_METADATA_n),
+	__stringify(IPA_ENDP_INIT_RSRC_GRP_n),
+	__stringify(IPA_SHARED_MEM_SIZE),
+	__stringify(IPA_SRAM_DIRECT_ACCESS_n),
+	__stringify(IPA_DEBUG_CNT_CTRL_n),
+	__stringify(IPA_UC_MAILBOX_m_n),
+	__stringify(IPA_FILT_ROUT_HASH_FLUSH),
+	__stringify(IPA_SINGLE_NDP_MODE),
+	__stringify(IPA_QCNCM),
+	__stringify(IPA_SYS_PKT_PROC_CNTXT_BASE),
+	__stringify(IPA_LOCAL_PKT_PROC_CNTXT_BASE),
+	__stringify(IPA_ENDP_STATUS_n),
+	__stringify(IPA_ENDP_FILTER_ROUTER_HSH_CFG_n),
+	__stringify(IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n),
+	__stringify(IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n),
+	__stringify(IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n),
+	__stringify(IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_01_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_23_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_45_RSRC_TYPE_n),
+	__stringify(IPA_DST_RSRC_GRP_67_RSRC_TYPE_n),
+	__stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_0),
+	__stringify(IPA_RX_HPS_CLIENTS_MIN_DEPTH_1),
+	__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_0),
+	__stringify(IPA_RX_HPS_CLIENTS_MAX_DEPTH_1),
+	__stringify(IPA_QSB_MAX_WRITES),
+	__stringify(IPA_QSB_MAX_READS),
+	__stringify(IPA_TX_CFG),
+};
+
+static void ipareg_construct_dummy(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	IPAHAL_ERR("No construct function for %s\n",
+		ipahal_reg_name_str(reg));
+	WARN_ON(1);
+}
+
+static void ipareg_parse_dummy(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	IPAHAL_ERR("No parse function for %s\n",
+		ipahal_reg_name_str(reg));
+	WARN_ON(1);
+}
+
+static void ipareg_construct_rx_hps_clients_depth1(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(0));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(1));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(2));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(3));
+}
+
+static void ipareg_construct_rx_hps_clients_depth0_v3_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rx_hps_clients *clients =
+		(struct ipahal_reg_rx_hps_clients *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[0],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(0),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(0));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[1],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(1),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(1));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[2],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(2),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(2));
+
+	IPA_SETFIELD_IN_REG(*val, clients->client_minmax[3],
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(3),
+		IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(3));
+}
+
+static void ipareg_construct_rsrg_grp_xy(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rsrc_grp_cfg *grp =
+		(struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, grp->x_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK);
+	IPA_SETFIELD_IN_REG(*val, grp->x_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK);
+	IPA_SETFIELD_IN_REG(*val, grp->y_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK);
+	IPA_SETFIELD_IN_REG(*val, grp->y_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK);
+}
+
+static void ipareg_construct_rsrg_grp_xy_v3_5(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_rsrc_grp_cfg *grp =
+		(struct ipahal_reg_rsrc_grp_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, grp->x_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5);
+	IPA_SETFIELD_IN_REG(*val, grp->x_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5);
+
+	/* DST_23 register has only X fields at ipa V3_5 */
+	if (reg == IPA_DST_RSRC_GRP_23_RSRC_TYPE_n)
+		return;
+
+	IPA_SETFIELD_IN_REG(*val, grp->y_min,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5);
+	IPA_SETFIELD_IN_REG(*val, grp->y_max,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5,
+		IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5);
+}
+
+static void ipareg_construct_hash_cfg_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_fltrt_hash_tuple *tuple =
+		(struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.src_id,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.src_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.src_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.dst_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.protocol,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->flt.meta_data,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->undefined1,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.src_id,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.src_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_ip_addr,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.src_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.dst_port,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.protocol,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->rt.meta_data,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+	IPA_SETFIELD_IN_REG(*val, tuple->undefined2,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_parse_hash_cfg_n(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_fltrt_hash_tuple *tuple =
+		(struct ipahal_reg_fltrt_hash_tuple *)fields;
+
+	tuple->flt.src_id =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK);
+	tuple->flt.src_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK);
+	tuple->flt.dst_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK);
+	tuple->flt.src_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK);
+	tuple->flt.dst_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK);
+	tuple->flt.protocol =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK);
+	tuple->flt.meta_data =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK);
+	tuple->undefined1 =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK);
+	tuple->rt.src_id =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK);
+	tuple->rt.src_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK);
+	tuple->rt.dst_ip_addr =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK);
+	tuple->rt.src_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK);
+	tuple->rt.dst_port =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK);
+	tuple->rt.protocol =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK);
+	tuple->rt.meta_data =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK);
+	tuple->undefined2 =
+		IPA_GETFIELD_FROM_REG(val,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT,
+		IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_endp_status_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_ep_cfg_status *ep_status =
+		(struct ipahal_reg_ep_cfg_status *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_en,
+			IPA_ENDP_STATUS_n_STATUS_EN_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_ep,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_status->status_location,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT,
+			IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK);
+}
+
+static void ipareg_construct_qcncm(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_qcncm *qcncm =
+		(struct ipahal_reg_qcncm *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, qcncm->mode_en ? 1 : 0,
+		IPA_QCNCM_MODE_EN_SHFT,
+		IPA_QCNCM_MODE_EN_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qcncm->mode_val,
+		IPA_QCNCM_MODE_VAL_SHFT,
+		IPA_QCNCM_MODE_VAL_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qcncm->undefined,
+		0, IPA_QCNCM_MODE_VAL_BMSK);
+}
+
+static void ipareg_parse_qcncm(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_qcncm *qcncm =
+		(struct ipahal_reg_qcncm *)fields;
+
+	memset(qcncm, 0, sizeof(struct ipahal_reg_qcncm));
+	qcncm->mode_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_QCNCM_MODE_EN_SHFT,
+		IPA_QCNCM_MODE_EN_BMSK);
+	qcncm->mode_val = IPA_GETFIELD_FROM_REG(val,
+		IPA_QCNCM_MODE_VAL_SHFT,
+		IPA_QCNCM_MODE_VAL_BMSK);
+	qcncm->undefined = IPA_GETFIELD_FROM_REG(val,
+		0, IPA_QCNCM_UNDEFINED1_BMSK);
+	qcncm->undefined |= IPA_GETFIELD_FROM_REG(val,
+		0, IPA_QCNCM_MODE_UNDEFINED2_BMSK);
+}
+
+static void ipareg_construct_single_ndp_mode(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_single_ndp_mode *mode =
+		(struct ipahal_reg_single_ndp_mode *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, mode->single_ndp_en ? 1 : 0,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, mode->undefined,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_parse_single_ndp_mode(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_single_ndp_mode *mode =
+		(struct ipahal_reg_single_ndp_mode *)fields;
+
+	memset(mode, 0, sizeof(struct ipahal_reg_single_ndp_mode));
+	mode->single_ndp_en = IPA_GETFIELD_FROM_REG(val,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT,
+		IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK);
+	mode->undefined = IPA_GETFIELD_FROM_REG(val,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT,
+		IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK);
+}
+
+static void ipareg_construct_debug_cnt_ctrl_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_debug_cnt_ctrl *dbg_cnt_ctrl =
+		(struct ipahal_reg_debug_cnt_ctrl *)fields;
+	u8 type;
+
+	IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->en ? 1 : 0,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK);
+
+	switch (dbg_cnt_ctrl->type) {
+	case DBG_CNT_TYPE_IPV4_FLTR:
+		type = 0x0;
+		if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+			IPAHAL_ERR("No FLT global rules\n");
+			WARN_ON(1);
+		}
+		break;
+	case DBG_CNT_TYPE_IPV4_ROUT:
+		type = 0x1;
+		break;
+	case DBG_CNT_TYPE_GENERAL:
+		type = 0x2;
+		break;
+	case DBG_CNT_TYPE_IPV6_FLTR:
+		type = 0x4;
+		if (!dbg_cnt_ctrl->rule_idx_pipe_rule) {
+			IPAHAL_ERR("No FLT global rules\n");
+			WARN_ON(1);
+		}
+		break;
+	case DBG_CNT_TYPE_IPV6_ROUT:
+		type = 0x5;
+		break;
+	default:
+		IPAHAL_ERR("Invalid dbg_cnt_ctrl type (%d) for %s\n",
+			dbg_cnt_ctrl->type, ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+
+	};
+
+	IPA_SETFIELD_IN_REG(*val, type,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->product ? 1 : 0,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->src_pipe,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT,
+		IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK);
+
+	if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+		IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK);
+		IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx_pipe_rule,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK
+			);
+	} else {
+		IPA_SETFIELD_IN_REG(*val, dbg_cnt_ctrl->rule_idx,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT,
+			IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5);
+	}
+}
+
+static void ipareg_parse_shared_mem_size(
+	enum ipahal_reg_name reg, void *fields, u32 val)
+{
+	struct ipahal_reg_shared_mem_size *smem_sz =
+		(struct ipahal_reg_shared_mem_size *)fields;
+
+	memset(smem_sz, 0, sizeof(struct ipahal_reg_shared_mem_size));
+	smem_sz->shared_mem_sz = IPA_GETFIELD_FROM_REG(val,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK);
+
+	smem_sz->shared_mem_baddr = IPA_GETFIELD_FROM_REG(val,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT,
+		IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+		(struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK);
+}
+
+static void ipareg_construct_endp_init_rsrc_grp_n_v3_5(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_rsrc_grp *rsrc_grp =
+		(struct ipahal_reg_endp_init_rsrc_grp *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, rsrc_grp->rsrc_grp,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5,
+		IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_metadata *metadata =
+		(struct ipa_ep_cfg_metadata *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, metadata->qmap_id,
+			IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT,
+			IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_metadata_mask_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_metadata_mask *metadata_mask =
+		(struct ipa_ep_cfg_metadata_mask *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, metadata_mask->metadata_mask,
+			IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT,
+			IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK);
+}
+
+static void ipareg_construct_endp_init_cfg_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_cfg *cfg =
+		(struct ipa_ep_cfg_cfg *)fields;
+	u32 cs_offload_en;
+
+	switch (cfg->cs_offload_en) {
+	case IPA_DISABLE_CS_OFFLOAD:
+		cs_offload_en = 0;
+		break;
+	case IPA_ENABLE_CS_OFFLOAD_UL:
+		cs_offload_en = 1;
+		break;
+	case IPA_ENABLE_CS_OFFLOAD_DL:
+		cs_offload_en = 2;
+		break;
+	default:
+		IPAHAL_ERR("Invalid cs_offload_en value for %s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+	}
+
+	IPA_SETFIELD_IN_REG(*val, cfg->frag_offload_en ? 1 : 0,
+			IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT,
+			IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK);
+	IPA_SETFIELD_IN_REG(*val, cs_offload_en,
+			IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK);
+	IPA_SETFIELD_IN_REG(*val, cfg->cs_metadata_hdr_offset,
+			IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK);
+	IPA_SETFIELD_IN_REG(*val, cfg->gen_qmb_master_sel,
+			IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT,
+			IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK);
+
+}
+
+static void ipareg_construct_endp_init_deaggr_n(
+		enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_deaggr *ep_deaggr =
+		(struct ipa_ep_cfg_deaggr *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->deaggr_hdr_len,
+		IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_valid,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->packet_offset_location,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_deaggr->max_packet_len,
+		IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT,
+		IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_en_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_holb *ep_holb =
+		(struct ipa_ep_cfg_holb *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_holb->en,
+		IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT,
+		IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_hol_block_timer_n(
+	enum ipahal_reg_name reg, const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_holb *ep_holb =
+		(struct ipa_ep_cfg_holb *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_holb->tmr_val,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT,
+		IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK);
+}
+
+static void ipareg_construct_endp_init_ctrl_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_ctrl *ep_ctrl =
+		(struct ipa_ep_cfg_ctrl *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_suspend,
+		IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT,
+		IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_ctrl->ipa_ep_delay,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT,
+		IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK);
+}
+
+static void ipareg_construct_endp_init_nat_n(enum ipahal_reg_name reg,
+		const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_nat *ep_nat =
+		(struct ipa_ep_cfg_nat *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_nat->nat_en,
+		IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT,
+		IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK);
+}
+
+static void ipareg_construct_endp_init_mode_n(enum ipahal_reg_name reg,
+		const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_mode *init_mode =
+		(struct ipahal_reg_endp_init_mode *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, init_mode->ep_mode.mode,
+		IPA_ENDP_INIT_MODE_n_MODE_SHFT,
+		IPA_ENDP_INIT_MODE_n_MODE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, init_mode->dst_pipe_number,
+		IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT,
+		IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK);
+}
+
+static void ipareg_construct_endp_init_route_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_endp_init_route *ep_init_rt =
+		(struct ipahal_reg_endp_init_route *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_init_rt->route_table_index,
+		IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT,
+		IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK);
+
+}
+
+static void ipareg_parse_endp_init_aggr_n(enum ipahal_reg_name reg,
+	void *fields, u32 val)
+{
+	struct ipa_ep_cfg_aggr *ep_aggr =
+		(struct ipa_ep_cfg_aggr *)fields;
+
+	memset(ep_aggr, 0, sizeof(struct ipa_ep_cfg_aggr));
+
+	ep_aggr->aggr_en =
+		(((val & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT)
+			== IPA_ENABLE_AGGR);
+	ep_aggr->aggr =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT);
+	ep_aggr->aggr_byte_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT);
+	ep_aggr->aggr_time_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT);
+	ep_aggr->aggr_pkt_limit =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT);
+	ep_aggr->aggr_sw_eof_active =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK) >>
+			IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT);
+	ep_aggr->aggr_hard_byte_limit_en =
+		((val & IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK)
+			>>
+			IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT);
+}
+
+static void ipareg_construct_endp_init_aggr_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_aggr *ep_aggr =
+		(struct ipa_ep_cfg_aggr *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_en,
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_byte_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_time_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_pkt_limit,
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_sw_eof_active,
+		IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK);
+
+	/* At IPAv3 hard_byte_limit is not supported */
+	ep_aggr->aggr_hard_byte_limit_en = 0;
+	IPA_SETFIELD_IN_REG(*val, ep_aggr->aggr_hard_byte_limit_en,
+		IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT,
+		IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_ext_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_hdr_ext *ep_hdr_ext;
+	u8 hdr_endianness;
+
+	ep_hdr_ext = (struct ipa_ep_cfg_hdr_ext *)fields;
+	hdr_endianness = ep_hdr_ext->hdr_little_endian ? 0 : 1;
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_pad_to_alignment,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_offset,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_payload_len_inc_padding,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr_ext->hdr_total_len_or_pad_valid,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, hdr_endianness,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT,
+		IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK);
+}
+
+static void ipareg_construct_endp_init_hdr_n(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipa_ep_cfg_hdr *ep_hdr;
+
+	ep_hdr = (struct ipa_ep_cfg_hdr *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_metadata_reg_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2,
+		IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_remove_additional,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_a5_mux,
+		IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_pkt_size_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_additional_const_len,
+		IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_ofst_metadata_valid,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, ep_hdr->hdr_len,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT,
+		IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK);
+}
+
+static void ipareg_construct_route(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_route *route;
+
+	route = (struct ipahal_reg_route *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, route->route_dis,
+		IPA_ROUTE_ROUTE_DIS_SHFT,
+		IPA_ROUTE_ROUTE_DIS_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_pipe,
+		IPA_ROUTE_ROUTE_DEF_PIPE_SHFT,
+		IPA_ROUTE_ROUTE_DEF_PIPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_table,
+		IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT,
+		IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_hdr_ofst,
+		IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT,
+		IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_frag_def_pipe,
+		IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT,
+		IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK);
+
+	IPA_SETFIELD_IN_REG(*val, route->route_def_retain_hdr,
+		IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT,
+		IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK);
+}
+
+static void ipareg_construct_qsb_max_writes(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	int *qsb_max_writes = (int *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, qsb_max_writes[0],
+			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qsb_max_writes[1],
+			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT,
+			    IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK);
+}
+
+static void ipareg_construct_qsb_max_reads(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	int *qsb_max_reads = (int *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, qsb_max_reads[0],
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK);
+	IPA_SETFIELD_IN_REG(*val, qsb_max_reads[1],
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT,
+			    IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK);
+}
+
+static void ipareg_construct_tx_cfg(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_tx_cfg *tx_cfg;
+
+	tx_cfg = (struct ipahal_reg_tx_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx0_prefetch_disable,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->tx1_prefetch_disable,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5,
+		IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5);
+
+	IPA_SETFIELD_IN_REG(*val, tx_cfg->prefetch_almost_empty_size,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5,
+		IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5);
+}
+
+static void ipareg_construct_idle_indication_cfg(enum ipahal_reg_name reg,
+	const void *fields, u32 *val)
+{
+	struct ipahal_reg_idle_indication_cfg *idle_indication_cfg;
+
+	idle_indication_cfg = (struct ipahal_reg_idle_indication_cfg *)fields;
+
+	IPA_SETFIELD_IN_REG(*val,
+		idle_indication_cfg->enter_idle_debounce_thresh,
+		IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5,
+		IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5);
+
+	IPA_SETFIELD_IN_REG(*val,
+		idle_indication_cfg->const_non_idle_enable,
+		IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5,
+		IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5);
+}
+
+/*
+ * struct ipahal_reg_obj - Register H/W information for specific IPA version
+ * @construct - CB to construct register value from abstracted structure
+ * @parse - CB to parse register value to abstracted structure
+ * @offset - register offset relative to base address
+ * @n_ofst - N parameterized register sub-offset
+ */
+struct ipahal_reg_obj {
+	void (*construct)(enum ipahal_reg_name reg, const void *fields,
+		u32 *val);
+	void (*parse)(enum ipahal_reg_name reg, void *fields,
+		u32 val);
+	u32 offset;
+	u32 n_ofst;
+};
+
+/*
+ * This table contains the info regarding each register for IPAv3 and later.
+ * Information like: offset and construct/parse functions.
+ * All the information on the register on IPAv3 are statically defined below.
+ * If information is missing regarding some register on some IPA version,
+ *  the init function will fill it with the information from the previous
+ *  IPA version.
+ * Information is considered missing if all of the fields are 0.
+ * If offset is -1, this means that the register is removed on the
+ *  specific version.
+ */
+static struct ipahal_reg_obj ipahal_reg_objs[IPA_HW_MAX][IPA_REG_MAX] = {
+	/* IPAv3 */
+	[IPA_HW_v3_0][IPA_ROUTE] = {
+		ipareg_construct_route, ipareg_parse_dummy,
+		0x00000048, 0},
+	[IPA_HW_v3_0][IPA_IRQ_STTS_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003008, 0x1000},
+	[IPA_HW_v3_0][IPA_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000300c, 0x1000},
+	[IPA_HW_v3_0][IPA_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003010, 0x1000},
+	[IPA_HW_v3_0][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003098, 0x1000},
+	[IPA_HW_v3_0][IPA_BCR] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001D0, 0},
+	[IPA_HW_v3_0][IPA_ENABLED_PIPES] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000038, 0},
+	[IPA_HW_v3_0][IPA_COMP_SW_RESET] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000040, 0},
+	[IPA_HW_v3_0][IPA_VERSION] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000034, 0},
+	[IPA_HW_v3_0][IPA_TAG_TIMER] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000060, 0 },
+	[IPA_HW_v3_0][IPA_COMP_HW_VERSION] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000030, 0},
+	[IPA_HW_v3_0][IPA_SPARE_REG_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00005090, 0},
+	[IPA_HW_v3_0][IPA_SPARE_REG_2] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00005094, 0},
+	[IPA_HW_v3_0][IPA_COMP_CFG] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000003C, 0},
+	[IPA_HW_v3_0][IPA_STATE_AGGR_ACTIVE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000010C, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_n] = {
+		ipareg_construct_endp_init_hdr_n, ipareg_parse_dummy,
+		0x00000810, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_EXT_n] = {
+		ipareg_construct_endp_init_hdr_ext_n, ipareg_parse_dummy,
+		0x00000814, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_AGGR_n] = {
+		ipareg_construct_endp_init_aggr_n,
+		ipareg_parse_endp_init_aggr_n,
+		0x00000824, 0x70},
+	[IPA_HW_v3_0][IPA_AGGR_FORCE_CLOSE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001EC, 0},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_ROUTE_n] = {
+		ipareg_construct_endp_init_route_n, ipareg_parse_dummy,
+		0x00000828, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_MODE_n] = {
+		ipareg_construct_endp_init_mode_n, ipareg_parse_dummy,
+		0x00000820, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_NAT_n] = {
+		ipareg_construct_endp_init_nat_n, ipareg_parse_dummy,
+		0x0000080C, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_CTRL_n] = {
+		ipareg_construct_endp_init_ctrl_n, ipareg_parse_dummy,
+		0x00000800, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_EN_n] = {
+		ipareg_construct_endp_init_hol_block_en_n,
+		ipareg_parse_dummy,
+		0x0000082c, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HOL_BLOCK_TIMER_n] = {
+		ipareg_construct_endp_init_hol_block_timer_n,
+		ipareg_parse_dummy,
+		0x00000830, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_DEAGGR_n] = {
+		ipareg_construct_endp_init_deaggr_n,
+		ipareg_parse_dummy,
+		0x00000834, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_SEQ_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000083C, 0x70},
+	[IPA_HW_v3_0][IPA_DEBUG_CNT_REG_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000600, 0x4},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_CFG_n] = {
+		ipareg_construct_endp_init_cfg_n, ipareg_parse_dummy,
+		0x00000808, 0x70},
+	[IPA_HW_v3_0][IPA_IRQ_EE_UC_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x0000301c, 0x1000},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_MASK_n] = {
+		ipareg_construct_endp_init_hdr_metadata_mask_n,
+		ipareg_parse_dummy,
+		0x00000818, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_HDR_METADATA_n] = {
+		ipareg_construct_endp_init_hdr_metadata_n,
+		ipareg_parse_dummy,
+		0x0000081c, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_INIT_RSRC_GRP_n] = {
+		ipareg_construct_endp_init_rsrc_grp_n,
+		ipareg_parse_dummy,
+		0x00000838, 0x70},
+	[IPA_HW_v3_0][IPA_SHARED_MEM_SIZE] = {
+		ipareg_construct_dummy, ipareg_parse_shared_mem_size,
+		0x00000054, 0},
+	[IPA_HW_v3_0][IPA_SRAM_DIRECT_ACCESS_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00007000, 0x4},
+	[IPA_HW_v3_0][IPA_DEBUG_CNT_CTRL_n] = {
+		ipareg_construct_debug_cnt_ctrl_n, ipareg_parse_dummy,
+		0x00000640, 0x4},
+	[IPA_HW_v3_0][IPA_UC_MAILBOX_m_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00032000, 0x4},
+	[IPA_HW_v3_0][IPA_FILT_ROUT_HASH_FLUSH] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00000090, 0},
+	[IPA_HW_v3_0][IPA_SINGLE_NDP_MODE] = {
+		ipareg_construct_single_ndp_mode, ipareg_parse_single_ndp_mode,
+		0x00000068, 0},
+	[IPA_HW_v3_0][IPA_QCNCM] = {
+		ipareg_construct_qcncm, ipareg_parse_qcncm,
+		0x00000064, 0},
+	[IPA_HW_v3_0][IPA_SYS_PKT_PROC_CNTXT_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001e0, 0},
+	[IPA_HW_v3_0][IPA_LOCAL_PKT_PROC_CNTXT_BASE] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x000001e8, 0},
+	[IPA_HW_v3_0][IPA_ENDP_STATUS_n] = {
+		ipareg_construct_endp_status_n, ipareg_parse_dummy,
+		0x00000840, 0x70},
+	[IPA_HW_v3_0][IPA_ENDP_FILTER_ROUTER_HSH_CFG_n] = {
+		ipareg_construct_hash_cfg_n, ipareg_parse_hash_cfg_n,
+		0x0000085C, 0x70},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000400, 0x20},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000404, 0x20},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000408, 0x20},
+	[IPA_HW_v3_0][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x0000040C, 0x20},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000500, 0x20},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000504, 0x20},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x00000508, 0x20},
+	[IPA_HW_v3_0][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy, ipareg_parse_dummy,
+		0x0000050c, 0x20},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+		0x000023C4, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+		ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+		0x000023C8, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0, ipareg_parse_dummy,
+		0x000023CC, 0},
+	[IPA_HW_v3_0][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+		ipareg_construct_rx_hps_clients_depth1, ipareg_parse_dummy,
+		0x000023D0, 0},
+	[IPA_HW_v3_0][IPA_QSB_MAX_WRITES] = {
+		ipareg_construct_qsb_max_writes, ipareg_parse_dummy,
+		0x00000074, 0},
+	[IPA_HW_v3_0][IPA_QSB_MAX_READS] = {
+		ipareg_construct_qsb_max_reads, ipareg_parse_dummy,
+		0x00000078, 0},
+
+
+	/* IPAv3.1 */
+	[IPA_HW_v3_1][IPA_IRQ_SUSPEND_INFO_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003030, 0x1000},
+	[IPA_HW_v3_1][IPA_SUSPEND_IRQ_EN_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003034, 0x1000},
+	[IPA_HW_v3_1][IPA_SUSPEND_IRQ_CLR_EE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00003038, 0x1000},
+
+
+	/* IPAv3.5 */
+	[IPA_HW_v3_5][IPA_TX_CFG] = {
+		ipareg_construct_tx_cfg, ipareg_parse_dummy,
+		0x000001FC, 0},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000400, 0x20},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000404, 0x20},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_01_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000500, 0x20},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_23_RSRC_TYPE_n] = {
+		ipareg_construct_rsrg_grp_xy_v3_5, ipareg_parse_dummy,
+		0x00000504, 0x20},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_45_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_DST_RSRC_GRP_67_RSRC_TYPE_n] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_ENDP_INIT_RSRC_GRP_n] = {
+		ipareg_construct_endp_init_rsrc_grp_n_v3_5,
+		ipareg_parse_dummy,
+		0x00000838, 0x70},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0_v3_5,
+		ipareg_parse_dummy,
+		0x000023C4, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MIN_DEPTH_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_0] = {
+		ipareg_construct_rx_hps_clients_depth0_v3_5,
+		ipareg_parse_dummy,
+		0x000023CC, 0},
+	[IPA_HW_v3_5][IPA_RX_HPS_CLIENTS_MAX_DEPTH_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		-1, 0},
+	[IPA_HW_v3_5][IPA_SPARE_REG_1] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002780, 0},
+	[IPA_HW_v3_5][IPA_SPARE_REG_2] = {
+		ipareg_construct_dummy, ipareg_parse_dummy,
+		0x00002784, 0},
+	[IPA_HW_v3_5][IPA_IDLE_INDICATION_CFG] = {
+		ipareg_construct_idle_indication_cfg, ipareg_parse_dummy,
+		0x00000220, 0},
+};
+
+/*
+ * ipahal_reg_init() - Build the registers information table
+ *  See ipahal_reg_objs[][] comments
+ *
+ * Note: As global variables are initialized with zero, any un-overridden
+ *  register entry will be zero. By this we recognize them.
+ */
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type)
+{
+	int i;
+	int j;
+	struct ipahal_reg_obj zero_obj;
+
+	IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
+
+	if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
+		IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
+		return -EINVAL;
+	}
+
+	memset(&zero_obj, 0, sizeof(zero_obj));
+	for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; i++) {
+		for (j = 0; j < IPA_REG_MAX ; j++) {
+			if (!memcmp(&ipahal_reg_objs[i+1][j], &zero_obj,
+				sizeof(struct ipahal_reg_obj))) {
+				memcpy(&ipahal_reg_objs[i+1][j],
+					&ipahal_reg_objs[i][j],
+					sizeof(struct ipahal_reg_obj));
+			} else {
+				/*
+				 * explicitly overridden register.
+				 * Check validity
+				 */
+				if (!ipahal_reg_objs[i+1][j].offset) {
+					IPAHAL_ERR(
+					  "reg=%s with zero offset ipa_ver=%d\n",
+					  ipahal_reg_name_str(j), i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_reg_objs[i+1][j].construct) {
+					IPAHAL_ERR(
+					  "reg=%s with NULL construct func ipa_ver=%d\n",
+					  ipahal_reg_name_str(j), i+1);
+					WARN_ON(1);
+				}
+				if (!ipahal_reg_objs[i+1][j].parse) {
+					IPAHAL_ERR(
+					  "reg=%s with NULL parse func ipa_ver=%d\n",
+					  ipahal_reg_name_str(j), i+1);
+					WARN_ON(1);
+				}
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name)
+{
+	if (reg_name < 0 || reg_name >= IPA_REG_MAX) {
+		IPAHAL_ERR("requested name of invalid reg=%d\n", reg_name);
+		return "Invalid Register";
+	}
+
+	return ipareg_name_to_str[reg_name];
+}
+
+/*
+ * ipahal_read_reg_n() - Get n parameterized reg value
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("read from %s n=%u\n",
+		ipahal_reg_name_str(reg), n);
+
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Read access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return -EFAULT;
+	}
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	return ioread32(ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("write to %s m=%u n=%u val=%u\n",
+		ipahal_reg_name_str(reg), m, n, val);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Write access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+	}
+	/*
+	 * Currently there is one register with m and n parameters
+	 *	IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+	 * If more such registers will be added in the future,
+	 *	we can move the m parameter to the table above.
+	 */
+	offset +=  0x80 * m;
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields)
+{
+	u32 val = 0;
+	u32 offset;
+
+	if (!fields) {
+		IPAHAL_ERR("Input error fields=%p\n", fields);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("read from %s n=%u and parse it\n",
+		ipahal_reg_name_str(reg), n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Read access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return -EFAULT;
+	}
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	val = ioread32(ipahal_ctx->base + offset);
+	ipahal_reg_objs[ipahal_ctx->hw_type][reg].parse(reg, fields, val);
+
+	return val;
+}
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+		const void *fields)
+{
+	u32 val = 0;
+	u32 offset;
+
+	if (!fields) {
+		IPAHAL_ERR("Input error fields=%p\n", fields);
+		return;
+	}
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		return;
+	}
+
+	IPAHAL_DBG_LOW("write to %s n=%u after constructing it\n",
+		ipahal_reg_name_str(reg), n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Write access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return;
+	}
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+	ipahal_reg_objs[ipahal_ctx->hw_type][reg].construct(reg, fields, &val);
+
+	iowrite32(val, ipahal_ctx->base + offset);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n)
+{
+	u32 offset;
+
+	if (reg >= IPA_REG_MAX) {
+		IPAHAL_ERR("Invalid register reg=%u\n", reg);
+		WARN_ON(1);
+		return -EFAULT;
+	}
+
+	IPAHAL_DBG_LOW("get offset of %s m=%u n=%u\n",
+		ipahal_reg_name_str(reg), m, n);
+	offset = ipahal_reg_objs[ipahal_ctx->hw_type][reg].offset;
+	if (offset == -1) {
+		IPAHAL_ERR("Access to obsolete reg=%s\n",
+			ipahal_reg_name_str(reg));
+		WARN_ON(1);
+		return -EFAULT;
+	}
+	/*
+	 * Currently there is one register with m and n parameters
+	 *	IPA_UC_MAILBOX_m_n. The m value of it is 0x80.
+	 * If more such registers will be added in the future,
+	 *	we can move the m parameter to the table above.
+	 */
+	offset +=  0x80 * m;
+	offset += ipahal_reg_objs[ipahal_ctx->hw_type][reg].n_ofst * n;
+
+	return offset;
+}
+
+u32 ipahal_get_reg_base(void)
+{
+	return 0x00040000;
+}
+
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ *  that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ *  register. The other bits should be untouched. This oeprate is very specific
+ *  and cannot be generically defined. For such operations we define these
+ *  specific functions.
+ */
+
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask)
+{
+	if (!valmask) {
+		IPAHAL_ERR("Input error\n");
+		return;
+	}
+
+	valmask->val = (1 & IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK) <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+	valmask->mask = IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT;
+
+	valmask->val |= ((0 & IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK) <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT);
+	valmask->mask |= ((IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK <<
+		IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT));
+}
+
+u32 ipahal_aggr_get_max_byte_limit(void)
+{
+	return
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK >>
+		IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT;
+}
+
+u32 ipahal_aggr_get_max_pkt_limit(void)
+{
+	return
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK >>
+		IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT;
+}
+
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+	struct ipahal_reg_valmask *valmask)
+{
+	u32 shft;
+	u32 bmsk;
+
+	if (!valmask) {
+		IPAHAL_ERR("Input error\n");
+		return;
+	}
+
+	if (ipahal_ctx->hw_type <= IPA_HW_v3_1) {
+		shft = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT;
+		bmsk = IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK;
+	} else {
+		shft =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5;
+		bmsk =
+		IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5;
+	}
+
+	IPA_SETFIELD_IN_REG(valmask->val, 1 << ep_idx, shft, bmsk);
+	valmask->mask = bmsk << shft;
+}
+
+void ipahal_get_fltrt_hash_flush_valmask(
+	struct ipahal_reg_fltrt_hash_flush *flush,
+	struct ipahal_reg_valmask *valmask)
+{
+	if (!flush || !valmask) {
+		IPAHAL_ERR("Input error: flush=%p ; valmask=%p\n",
+			flush, valmask);
+		return;
+	}
+
+	memset(valmask, 0, sizeof(struct ipahal_reg_valmask));
+
+	if (flush->v6_rt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT);
+	if (flush->v6_flt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT);
+	if (flush->v4_rt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT);
+	if (flush->v4_flt)
+		valmask->val |=
+			(1<<IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT);
+
+	valmask->mask = valmask->val;
+}
+
+void ipahal_get_status_ep_valmask(int pipe_num,
+	struct ipahal_reg_valmask *valmask)
+{
+	if (!valmask) {
+		IPAHAL_ERR("Input error\n");
+		return;
+	}
+
+	valmask->val =
+		(pipe_num & IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK) <<
+		IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+
+	valmask->mask =
+		IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK <<
+		IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
new file mode 100644
index 0000000..98894c3
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.h
@@ -0,0 +1,460 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_H_
+#define _IPAHAL_REG_H_
+
+#include <linux/ipa.h>
+
+/*
+ * Registers names
+ *
+ * NOTE:: Any change to this enum, need to change to ipareg_name_to_str
+ *	array as well.
+ */
+enum ipahal_reg_name {
+	IPA_ROUTE,
+	IPA_IRQ_STTS_EE_n,
+	IPA_IRQ_EN_EE_n,
+	IPA_IRQ_CLR_EE_n,
+	IPA_IRQ_SUSPEND_INFO_EE_n,
+	IPA_SUSPEND_IRQ_EN_EE_n,
+	IPA_SUSPEND_IRQ_CLR_EE_n,
+	IPA_BCR,
+	IPA_ENABLED_PIPES,
+	IPA_COMP_SW_RESET,
+	IPA_VERSION,
+	IPA_TAG_TIMER,
+	IPA_COMP_HW_VERSION,
+	IPA_SPARE_REG_1,
+	IPA_SPARE_REG_2,
+	IPA_COMP_CFG,
+	IPA_STATE_AGGR_ACTIVE,
+	IPA_ENDP_INIT_HDR_n,
+	IPA_ENDP_INIT_HDR_EXT_n,
+	IPA_ENDP_INIT_AGGR_n,
+	IPA_AGGR_FORCE_CLOSE,
+	IPA_ENDP_INIT_ROUTE_n,
+	IPA_ENDP_INIT_MODE_n,
+	IPA_ENDP_INIT_NAT_n,
+	IPA_ENDP_INIT_CTRL_n,
+	IPA_ENDP_INIT_HOL_BLOCK_EN_n,
+	IPA_ENDP_INIT_HOL_BLOCK_TIMER_n,
+	IPA_ENDP_INIT_DEAGGR_n,
+	IPA_ENDP_INIT_SEQ_n,
+	IPA_DEBUG_CNT_REG_n,
+	IPA_ENDP_INIT_CFG_n,
+	IPA_IRQ_EE_UC_n,
+	IPA_ENDP_INIT_HDR_METADATA_MASK_n,
+	IPA_ENDP_INIT_HDR_METADATA_n,
+	IPA_ENDP_INIT_RSRC_GRP_n,
+	IPA_SHARED_MEM_SIZE,
+	IPA_SRAM_DIRECT_ACCESS_n,
+	IPA_DEBUG_CNT_CTRL_n,
+	IPA_UC_MAILBOX_m_n,
+	IPA_FILT_ROUT_HASH_FLUSH,
+	IPA_SINGLE_NDP_MODE,
+	IPA_QCNCM,
+	IPA_SYS_PKT_PROC_CNTXT_BASE,
+	IPA_LOCAL_PKT_PROC_CNTXT_BASE,
+	IPA_ENDP_STATUS_n,
+	IPA_ENDP_FILTER_ROUTER_HSH_CFG_n,
+	IPA_SRC_RSRC_GRP_01_RSRC_TYPE_n,
+	IPA_SRC_RSRC_GRP_23_RSRC_TYPE_n,
+	IPA_SRC_RSRC_GRP_45_RSRC_TYPE_n,
+	IPA_SRC_RSRC_GRP_67_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_01_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_23_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_45_RSRC_TYPE_n,
+	IPA_DST_RSRC_GRP_67_RSRC_TYPE_n,
+	IPA_RX_HPS_CLIENTS_MIN_DEPTH_0,
+	IPA_RX_HPS_CLIENTS_MIN_DEPTH_1,
+	IPA_RX_HPS_CLIENTS_MAX_DEPTH_0,
+	IPA_RX_HPS_CLIENTS_MAX_DEPTH_1,
+	IPA_QSB_MAX_WRITES,
+	IPA_QSB_MAX_READS,
+	IPA_TX_CFG,
+	IPA_IDLE_INDICATION_CFG,
+	IPA_REG_MAX,
+};
+
+/*
+ * struct ipahal_reg_route - IPA route register
+ * @route_dis: route disable
+ * @route_def_pipe: route default pipe
+ * @route_def_hdr_table: route default header table
+ * @route_def_hdr_ofst: route default header offset table
+ * @route_frag_def_pipe: Default pipe to route fragmented exception
+ *    packets and frag new rule statues, if source pipe does not have
+ *    a notification status pipe defined.
+ * @route_def_retain_hdr: default value of retain header. It is used
+ *    when no rule was hit
+ */
+struct ipahal_reg_route {
+	u32 route_dis;
+	u32 route_def_pipe;
+	u32 route_def_hdr_table;
+	u32 route_def_hdr_ofst;
+	u8  route_frag_def_pipe;
+	u32 route_def_retain_hdr;
+};
+
+/*
+ * struct ipahal_reg_endp_init_route - IPA ENDP_INIT_ROUTE_n register
+ * @route_table_index: Default index of routing table (IPA Consumer).
+ */
+struct ipahal_reg_endp_init_route {
+	u32 route_table_index;
+};
+
+/*
+ * struct ipahal_reg_endp_init_rsrc_grp - IPA_ENDP_INIT_RSRC_GRP_n register
+ * @rsrc_grp: Index of group for this ENDP. If this ENDP is a source-ENDP,
+ *	index is for source-resource-group. If destination ENPD, index is
+ *	for destination-resoruce-group.
+ */
+struct ipahal_reg_endp_init_rsrc_grp {
+	u32 rsrc_grp;
+};
+
+/*
+ * struct ipahal_reg_endp_init_mode - IPA ENDP_INIT_MODE_n register
+ * @dst_pipe_number: This parameter specifies destination output-pipe-packets
+ *	will be routed to. Valid for DMA mode only and for Input
+ *	Pipes only (IPA Consumer)
+ */
+struct ipahal_reg_endp_init_mode {
+	u32 dst_pipe_number;
+	struct ipa_ep_cfg_mode ep_mode;
+};
+
+/*
+ * struct ipahal_reg_shared_mem_size - IPA SHARED_MEM_SIZE register
+ * @shared_mem_sz: Available size [in 8Bytes] of SW partition within
+ *	IPA shared memory.
+ * @shared_mem_baddr: Offset of SW partition within IPA
+ *	shared memory[in 8Bytes]. To get absolute address of SW partition,
+ *	add this offset to IPA_SRAM_DIRECT_ACCESS_n baddr.
+ */
+struct ipahal_reg_shared_mem_size {
+	u32 shared_mem_sz;
+	u32 shared_mem_baddr;
+};
+
+/*
+ * struct ipahal_reg_ep_cfg_status - status configuration in IPA end-point
+ * @status_en: Determines if end point supports Status Indications. SW should
+ *	set this bit in order to enable Statuses. Output Pipe - send
+ *	Status indications only if bit is set. Input Pipe - forward Status
+ *	indication to STATUS_ENDP only if bit is set. Valid for Input
+ *	and Output Pipes (IPA Consumer and Producer)
+ * @status_ep: Statuses generated for this endpoint will be forwarded to the
+ *	specified Status End Point. Status endpoint needs to be
+ *	configured with STATUS_EN=1 Valid only for Input Pipes (IPA
+ *	Consumer)
+ * @status_location: Location of PKT-STATUS on destination pipe.
+ *	If set to 0 (default), PKT-STATUS will be appended before the packet
+ *	for this endpoint. If set to 1, PKT-STATUS will be appended after the
+ *	packet for this endpoint. Valid only for Output Pipes (IPA Producer)
+ */
+struct ipahal_reg_ep_cfg_status {
+	bool status_en;
+	u8 status_ep;
+	bool status_location;
+};
+
+/*
+ * struct ipa_hash_tuple - Hash tuple members for flt and rt
+ *  the fields tells if to be masked or not
+ * @src_id: pipe number for flt, table index for rt
+ * @src_ip_addr: IP source address
+ * @dst_ip_addr: IP destination address
+ * @src_port: L4 source port
+ * @dst_port: L4 destination port
+ * @protocol: IP protocol field
+ * @meta_data: packet meta-data
+ *
+ */
+struct ipahal_reg_hash_tuple {
+	/* src_id: pipe in flt, tbl index in rt */
+	bool src_id;
+	bool src_ip_addr;
+	bool dst_ip_addr;
+	bool src_port;
+	bool dst_port;
+	bool protocol;
+	bool meta_data;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_tuple - IPA hash tuple register
+ * @flt: Hash tuple info for filtering
+ * @rt: Hash tuple info for routing
+ * @undefinedX: Undefined/Unused bit fields set of the register
+ */
+struct ipahal_reg_fltrt_hash_tuple {
+	struct ipahal_reg_hash_tuple flt;
+	struct ipahal_reg_hash_tuple rt;
+	u32 undefined1;
+	u32 undefined2;
+};
+
+/*
+ * enum ipahal_reg_dbg_cnt_type - Debug Counter Type
+ * DBG_CNT_TYPE_IPV4_FLTR - Count IPv4 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv4 routing rules
+ * DBG_CNT_TYPE_GENERAL - General counter
+ * DBG_CNT_TYPE_IPV6_FLTR - Count IPv6 filtering rules
+ * DBG_CNT_TYPE_IPV4_ROUT - Count IPv6 routing rules
+ */
+enum ipahal_reg_dbg_cnt_type {
+	DBG_CNT_TYPE_IPV4_FLTR,
+	DBG_CNT_TYPE_IPV4_ROUT,
+	DBG_CNT_TYPE_GENERAL,
+	DBG_CNT_TYPE_IPV6_FLTR,
+	DBG_CNT_TYPE_IPV6_ROUT,
+};
+
+/*
+ * struct ipahal_reg_debug_cnt_ctrl - IPA_DEBUG_CNT_CTRL_n register
+ * @en - Enable debug counter
+ * @type - Type of debugging couting
+ * @product - False->Count Bytes . True->Count #packets
+ * @src_pipe - Specific Pipe to match. If FF, no need to match
+ *	specific pipe
+ * @rule_idx_pipe_rule - Global Rule or Pipe Rule. If pipe, then indicated by
+ *	src_pipe. Starting at IPA V3_5,
+ *	no support on Global Rule. This field will be ignored.
+ * @rule_idx - Rule index. Irrelevant for type General
+ */
+struct ipahal_reg_debug_cnt_ctrl {
+	bool en;
+	enum ipahal_reg_dbg_cnt_type type;
+	bool product;
+	u8 src_pipe;
+	bool rule_idx_pipe_rule;
+	u16 rule_idx;
+};
+
+/*
+ * struct ipahal_reg_rsrc_grp_cfg - Mix/Max values for two rsrc groups
+ * @x_min - first group min value
+ * @x_max - first group max value
+ * @y_min - second group min value
+ * @y_max - second group max value
+ */
+struct ipahal_reg_rsrc_grp_cfg {
+	u32 x_min;
+	u32 x_max;
+	u32 y_min;
+	u32 y_max;
+};
+
+/*
+ * struct ipahal_reg_rx_hps_clients - Min or Max values for RX HPS clients
+ * @client_minmax - Min or Max values. In case of depth 0 the 4 values
+ *	are used. In case of depth 1, only the first 2 values are used
+ */
+struct ipahal_reg_rx_hps_clients {
+	u32 client_minmax[4];
+};
+
+/*
+ * struct ipahal_reg_valmask - holding values and masking for registers
+ *	HAL application may require only value and mask of it for some
+ *	register fields.
+ * @val - The value
+ * @mask - Tha mask of the value
+ */
+struct ipahal_reg_valmask {
+	u32 val;
+	u32 mask;
+};
+
+/*
+ * struct ipahal_reg_fltrt_hash_flush - Flt/Rt flush configuration
+ * @v6_rt - Flush IPv6 Routing cache
+ * @v6_flt - Flush IPv6 Filtering cache
+ * @v4_rt - Flush IPv4 Routing cache
+ * @v4_flt - Flush IPv4 Filtering cache
+ */
+struct ipahal_reg_fltrt_hash_flush {
+	bool v6_rt;
+	bool v6_flt;
+	bool v4_rt;
+	bool v4_flt;
+};
+
+/*
+ * struct ipahal_reg_single_ndp_mode - IPA SINGLE_NDP_MODE register
+ * @single_ndp_en: When set to '1', IPA builds MBIM frames with up to 1
+ *	NDP-header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_single_ndp_mode {
+	bool single_ndp_en;
+	u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_qcncm - IPA QCNCM register
+ * @mode_en: When QCNCM_MODE_EN=1, IPA will use QCNCM signature.
+ * @mode_val: Used only when QCNCM_MODE_EN=1 and sets SW Signature in
+ *	the NDP header.
+ * @unused: undefined bits of the register
+ */
+struct ipahal_reg_qcncm {
+	bool mode_en;
+	u32 mode_val;
+	u32 undefined;
+};
+
+/*
+ * struct ipahal_reg_tx_cfg - IPA TX_CFG register
+ * @tx0_prefetch_disable: Disable prefetch on TX0
+ * @tx1_prefetch_disable: Disable prefetch on TX1
+ * @prefetch_almost_empty_size: Prefetch almost empty size
+ */
+struct ipahal_reg_tx_cfg {
+	bool tx0_prefetch_disable;
+	bool tx1_prefetch_disable;
+	u16 prefetch_almost_empty_size;
+};
+
+/*
+ * struct ipahal_reg_idle_indication_cfg - IPA IDLE_INDICATION_CFG register
+ * @const_non_idle_enable: enable the asserting of the IDLE value and DCD
+ * @enter_idle_debounce_thresh:  configure the debounce threshold
+ */
+struct ipahal_reg_idle_indication_cfg {
+	u16 enter_idle_debounce_thresh;
+	bool const_non_idle_enable;
+};
+
+/*
+ * ipahal_reg_name_str() - returns string that represent the register
+ * @reg_name: [in] register name
+ */
+const char *ipahal_reg_name_str(enum ipahal_reg_name reg_name);
+
+/*
+ * ipahal_read_reg_n() - Get the raw value of n parameterized reg
+ */
+u32 ipahal_read_reg_n(enum ipahal_reg_name reg, u32 n);
+
+/*
+ * ipahal_write_reg_mn() - Write to m/n parameterized reg a raw value
+ */
+void ipahal_write_reg_mn(enum ipahal_reg_name reg, u32 m, u32 n, u32 val);
+
+/*
+ * ipahal_write_reg_n() - Write to n parameterized reg a raw value
+ */
+static inline void ipahal_write_reg_n(enum ipahal_reg_name reg,
+	u32 n, u32 val)
+{
+	ipahal_write_reg_mn(reg, 0, n, val);
+}
+
+/*
+ * ipahal_read_reg_n_fields() - Get the parsed value of n parameterized reg
+ */
+u32 ipahal_read_reg_n_fields(enum ipahal_reg_name reg, u32 n, void *fields);
+
+/*
+ * ipahal_write_reg_n_fields() - Write to n parameterized reg a prased value
+ */
+void ipahal_write_reg_n_fields(enum ipahal_reg_name reg, u32 n,
+	const void *fields);
+
+/*
+ * ipahal_read_reg() - Get the raw value of a reg
+ */
+static inline u32 ipahal_read_reg(enum ipahal_reg_name reg)
+{
+	return ipahal_read_reg_n(reg, 0);
+}
+
+/*
+ * ipahal_write_reg() - Write to reg a raw value
+ */
+static inline void ipahal_write_reg(enum ipahal_reg_name reg,
+	u32 val)
+{
+	ipahal_write_reg_mn(reg, 0, 0, val);
+}
+
+/*
+ * ipahal_read_reg_fields() - Get the parsed value of a reg
+ */
+static inline u32 ipahal_read_reg_fields(enum ipahal_reg_name reg, void *fields)
+{
+	return ipahal_read_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * ipahal_write_reg_fields() - Write to reg a parsed value
+ */
+static inline void ipahal_write_reg_fields(enum ipahal_reg_name reg,
+	const void *fields)
+{
+	ipahal_write_reg_n_fields(reg, 0, fields);
+}
+
+/*
+ * Get the offset of a m/n parameterized register
+ */
+u32 ipahal_get_reg_mn_ofst(enum ipahal_reg_name reg, u32 m, u32 n);
+
+/*
+ * Get the offset of a n parameterized register
+ */
+static inline u32 ipahal_get_reg_n_ofst(enum ipahal_reg_name reg, u32 n)
+{
+	return ipahal_get_reg_mn_ofst(reg, 0, n);
+}
+
+/*
+ * Get the offset of a register
+ */
+static inline u32 ipahal_get_reg_ofst(enum ipahal_reg_name reg)
+{
+	return ipahal_get_reg_mn_ofst(reg, 0, 0);
+}
+
+/*
+ * Get the register base address
+ */
+u32 ipahal_get_reg_base(void);
+
+/*
+ * Specific functions
+ * These functions supply specific register values for specific operations
+ *  that cannot be reached by generic functions.
+ * E.g. To disable aggregation, need to write to specific bits of the AGGR
+ *  register. The other bits should be untouched. This oeprate is very specific
+ *  and cannot be generically defined. For such operations we define these
+ *  specific functions.
+ */
+void ipahal_get_disable_aggr_valmask(struct ipahal_reg_valmask *valmask);
+u32 ipahal_aggr_get_max_byte_limit(void);
+u32 ipahal_aggr_get_max_pkt_limit(void);
+void ipahal_get_aggr_force_close_valmask(int ep_idx,
+	struct ipahal_reg_valmask *valmask);
+void ipahal_get_fltrt_hash_flush_valmask(
+	struct ipahal_reg_fltrt_hash_flush *flush,
+	struct ipahal_reg_valmask *valmask);
+void ipahal_get_status_ep_valmask(int pipe_num,
+	struct ipahal_reg_valmask *valmask);
+
+#endif /* _IPAHAL_REG_H_ */
+
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
new file mode 100644
index 0000000..342803f
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg_i.h
@@ -0,0 +1,321 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPAHAL_REG_I_H_
+#define _IPAHAL_REG_I_H_
+
+int ipahal_reg_init(enum ipa_hw_type ipa_hw_type);
+
+#define IPA_SETFIELD(val, shift, mask) (((val) << (shift)) & (mask))
+#define IPA_SETFIELD_IN_REG(reg, val, shift, mask) \
+			(reg |= ((val) << (shift)) & (mask))
+#define IPA_GETFIELD_FROM_REG(reg, shift, mask) \
+		(((reg) & (mask)) >> (shift))
+
+
+/* IPA_ROUTE register */
+#define IPA_ROUTE_ROUTE_DIS_SHFT 0x0
+#define IPA_ROUTE_ROUTE_DIS_BMSK 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_SHFT 0x1
+#define IPA_ROUTE_ROUTE_DEF_PIPE_BMSK 0x3e
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_SHFT 0x6
+#define IPA_ROUTE_ROUTE_DEF_HDR_TABLE_BMSK 0X40
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_SHFT 0x7
+#define IPA_ROUTE_ROUTE_DEF_HDR_OFST_BMSK 0x1ff80
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_BMSK 0x3e0000
+#define IPA_ROUTE_ROUTE_FRAG_DEF_PIPE_SHFT 0x11
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_BMSK  0x1000000
+#define IPA_ROUTE_ROUTE_DEF_RETAIN_HDR_SHFT 0x18
+
+/* IPA_ENDP_INIT_HDR_n register */
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_BMSK 0x3f
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_BMSK 0x40
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_VALID_SHFT 0x6
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_SHFT 0x7
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_METADATA_BMSK 0x1f80
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_BMSK 0x7e000
+#define IPA_ENDP_INIT_HDR_n_HDR_ADDITIONAL_CONST_LEN_SHFT 0xd
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_BMSK 0x80000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_VALID_SHFT 0x13
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_BMSK 0x3f00000
+#define IPA_ENDP_INIT_HDR_n_HDR_OFST_PKT_SIZE_SHFT 0x14
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_BMSK 0x4000000
+#define IPA_ENDP_INIT_HDR_n_HDR_A5_MUX_SHFT 0x1a
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_BMSK_v2 0x8000000
+#define IPA_ENDP_INIT_HDR_n_HDR_LEN_INC_DEAGG_HDR_SHFT_v2 0x1b
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_BMSK_v2 0x10000000
+#define IPA_ENDP_INIT_HDR_n_HDR_METADATA_REG_VALID_SHFT_v2 0x1c
+
+/* IPA_ENDP_INIT_HDR_EXT_n register */
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_BMSK 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_ENDIANNESS_SHFT 0x0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_BMSK 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_VALID_SHFT 0x1
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_BMSK 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_SHFT 0x2
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_BMSK 0x8
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAYLOAD_LEN_INC_PADDING_SHFT 0x3
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_BMSK 0x3f0
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_TOTAL_LEN_OR_PAD_OFFSET_SHFT 0x4
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_SHFT 0xa
+#define IPA_ENDP_INIT_HDR_EXT_n_HDR_PAD_TO_ALIGNMENT_BMSK_v3_0 0x3c00
+
+/* IPA_ENDP_INIT_AGGR_N register */
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_BMSK	0x1000000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_HARD_BYTE_LIMIT_ENABLE_SHFT	0x18
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_BMSK 0x400000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_FORCE_CLOSE_SHFT 0x16
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_BMSK	0x200000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_SW_EOF_ACTIVE_SHFT	0x15
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_BMSK 0x1f8000
+#define IPA_ENDP_INIT_AGGR_n_AGGR_PKT_LIMIT_SHFT 0xf
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_BMSK 0x7c00
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TIME_LIMIT_SHFT 0xa
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_BMSK 0x3e0
+#define IPA_ENDP_INIT_AGGR_n_AGGR_BYTE_LIMIT_SHFT 0x5
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_BMSK 0x1c
+#define IPA_ENDP_INIT_AGGR_n_AGGR_TYPE_SHFT 0x2
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_BMSK 0x3
+#define IPA_ENDP_INIT_AGGR_n_AGGR_EN_SHFT 0x0
+
+/* IPA_AGGR_FORCE_CLOSE register */
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK 0x3fffffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT 0
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_BMSK_V3_5 0xfffff
+#define IPA_AGGR_FORCE_CLOSE_AGGR_FORCE_CLOSE_PIPE_BITMAP_SHFT_V3_5 0
+
+/* IPA_ENDP_INIT_ROUTE_n register */
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_BMSK 0x1f
+#define IPA_ENDP_INIT_ROUTE_n_ROUTE_TABLE_INDEX_SHFT 0x0
+
+/* IPA_ENDP_INIT_MODE_n register */
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_BMSK 0x40000000
+#define IPA_ENDP_INIT_MODE_n_HDR_FTCH_DISABLE_SHFT 0x1e
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_BMSK 0x20000000
+#define IPA_ENDP_INIT_MODE_n_PAD_EN_SHFT 0x1d
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_BMSK 0x10000000
+#define IPA_ENDP_INIT_MODE_n_PIPE_REPLICATION_EN_SHFT 0x1c
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_BMSK 0xffff000
+#define IPA_ENDP_INIT_MODE_n_BYTE_THRESHOLD_SHFT 0xc
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_BMSK 0x1f0
+#define IPA_ENDP_INIT_MODE_n_DEST_PIPE_INDEX_SHFT 0x4
+#define IPA_ENDP_INIT_MODE_n_MODE_BMSK 0x7
+#define IPA_ENDP_INIT_MODE_n_MODE_SHFT 0x0
+
+/* IPA_ENDP_INIT_NAT_n register */
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_BMSK 0x3
+#define IPA_ENDP_INIT_NAT_n_NAT_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_CTRL_n register */
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_BMSK 0x1
+#define IPA_ENDP_INIT_CTRL_n_ENDP_SUSPEND_SHFT 0x0
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_BMSK 0x2
+#define IPA_ENDP_INIT_CTRL_n_ENDP_DELAY_SHFT 0x1
+
+/* IPA_ENDP_INIT_HOL_BLOCK_EN_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_RMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_MAX 19
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_BMSK 0x1
+#define IPA_ENDP_INIT_HOL_BLOCK_EN_n_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HOL_BLOCK_TIMER_n register */
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HOL_BLOCK_TIMER_n_TIMER_SHFT 0x0
+
+/* IPA_ENDP_INIT_DEAGGR_n register */
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_BMSK 0xFFFF0000
+#define IPA_ENDP_INIT_DEAGGR_n_MAX_PACKET_LEN_SHFT 0x10
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_BMSK 0x3F00
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_LOCATION_SHFT 0x8
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_BMSK  0x80
+#define IPA_ENDP_INIT_DEAGGR_n_PACKET_OFFSET_VALID_SHFT 0x7
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_BMSK 0x3F
+#define IPA_ENDP_INIT_DEAGGR_n_DEAGGR_HDR_LEN_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_SEQ_n register */
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_BMSK 0xf000
+#define IPA_ENDP_INIT_SEQ_n_DPS_REP_SEQ_TYPE_SHFT 0xc
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_BMSK 0xf00
+#define IPA_ENDP_INIT_SEQ_n_HPS_REP_SEQ_TYPE_SHFT 0x8
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_BMSK 0xf0
+#define IPA_ENDP_INIT_SEQ_n_DPS_SEQ_TYPE_SHFT 0x4
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_BMSK 0xf
+#define IPA_ENDP_INIT_SEQ_n_HPS_SEQ_TYPE_SHFT 0x0
+
+/* IPA_DEBUG_CNT_REG_m register */
+#define IPA_DEBUG_CNT_REG_N_RMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_MAX 15
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_BMSK 0xffffffff
+#define IPA_DEBUG_CNT_REG_N_DBG_CNT_REG_SHFT 0x0
+
+/* IPA_ENDP_INIT_CFG_n register */
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_BMSK 0x100
+#define IPA_ENDP_INIT_CFG_n_CS_GEN_QMB_MASTER_SEL_SHFT 0x8
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_BMSK 0x78
+#define IPA_ENDP_INIT_CFG_n_CS_METADATA_HDR_OFFSET_SHFT 0x3
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_BMSK 0x6
+#define IPA_ENDP_INIT_CFG_n_CS_OFFLOAD_EN_SHFT 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_BMSK 0x1
+#define IPA_ENDP_INIT_CFG_n_FRAG_OFFLOAD_EN_SHFT 0x0
+
+/* IPA_ENDP_INIT_HDR_METADATA_MASK_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_MASK_n_METADATA_MASK_SHFT 0x0
+
+/* IPA_IPA_ENDP_INIT_HDR_METADATA_n register */
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_BMSK 0xffffffff
+#define IPA_ENDP_INIT_HDR_METADATA_n_METADATA_SHFT 0x0
+
+/* IPA_ENDP_INIT_RSRC_GRP_n register */
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK 0x7
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT 0
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_BMSK_v3_5 0x3
+#define IPA_ENDP_INIT_RSRC_GRP_n_RSRC_GRP_SHFT_v3_5 0
+
+/* IPA_SHARED_MEM_SIZE register */
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_BMSK 0xffff0000
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_BADDR_SHFT 0x10
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_BMSK  0xffff
+#define IPA_SHARED_MEM_SIZE_SHARED_MEM_SIZE_SHFT  0x0
+
+/* IPA_DEBUG_CNT_CTRL_n register */
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_BMSK 0x10000000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_PIPE_RULE_SHFT 0x1c
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK 0x0ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_BMSK_V3_5 0x1ff00000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_RULE_INDEX_SHFT 0x14
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_BMSK 0x1f000
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_SOURCE_PIPE_SHFT 0xc
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_BMSK 0x100
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_PRODUCT_SHFT 0x8
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_BMSK 0x70
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_TYPE_SHFT 0x4
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_BMSK 0x1
+#define IPA_DEBUG_CNT_CTRL_n_DBG_CNT_EN_SHFT 0x0
+
+/* IPA_FILT_ROUT_HASH_FLUSH register */
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_FILT_SHFT 12
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv4_ROUT_SHFT 8
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_FILT_SHFT 4
+#define IPA_FILT_ROUT_HASH_FLUSH_IPv6_ROUT_SHFT 0
+
+/* IPA_SINGLE_NDP_MODE register */
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_BMSK 0xfffffffe
+#define IPA_SINGLE_NDP_MODE_UNDEFINED_SHFT 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_BMSK 0x1
+#define IPA_SINGLE_NDP_MODE_SINGLE_NDP_EN_SHFT 0
+
+/* IPA_QCNCM register */
+#define IPA_QCNCM_MODE_UNDEFINED2_BMSK 0xf0000000
+#define IPA_QCNCM_MODE_UNDEFINED2_SHFT 0x1c
+#define IPA_QCNCM_MODE_VAL_BMSK 0xffffff0
+#define IPA_QCNCM_MODE_VAL_SHFT 0x4
+#define IPA_QCNCM_UNDEFINED1_BMSK 0xe
+#define IPA_QCNCM_UNDEFINED1_SHFT 0x1
+#define IPA_QCNCM_MODE_EN_BMSK 0x1
+#define IPA_QCNCM_MODE_EN_SHFT 0
+
+/* IPA_ENDP_STATUS_n register */
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_BMSK 0x100
+#define IPA_ENDP_STATUS_n_STATUS_LOCATION_SHFT 0x8
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_BMSK 0x3e
+#define IPA_ENDP_STATUS_n_STATUS_ENDP_SHFT 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_BMSK 0x1
+#define IPA_ENDP_STATUS_n_STATUS_EN_SHFT 0x0
+
+/* IPA_ENDP_FILTER_ROUTER_HSH_CFG_n register */
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_SHFT 0
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_ID_BMSK 0x1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_SHFT 1
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_IP_BMSK 0x2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_SHFT 2
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_IP_BMSK 0x4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_SHFT 3
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_SRC_PORT_BMSK 0x8
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_SHFT 4
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_DST_PORT_BMSK 0x10
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_SHFT 5
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_PROTOCOL_BMSK 0x20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_SHFT 6
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_FILTER_HASH_MSK_METADATA_BMSK 0x40
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_SHFT 7
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED1_BMSK 0xff80
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_SHFT 16
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_ID_BMSK 0x10000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_SHFT 17
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_IP_BMSK 0x20000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_SHFT 18
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_IP_BMSK 0x40000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_SHFT 19
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_SRC_PORT_BMSK 0x80000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_SHFT 20
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_DST_PORT_BMSK 0x100000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_SHFT 21
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_PROTOCOL_BMSK 0x200000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_SHFT 22
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_ROUTER_HASH_MSK_METADATA_BMSK 0x400000
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_SHFT 23
+#define IPA_ENDP_FILTER_ROUTER_HSH_CFG_n_UNDEFINED2_BMSK 0xff800000
+
+/* IPA_RSRC_GRP_XY_RSRC_TYPE_n register */
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK 0xFF000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK 0xFF0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK 0xFF00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK 0xFF
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT 0
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_BMSK_V3_5 0x3F000000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MAX_LIM_SHFT_V3_5 24
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_BMSK_V3_5 0x3F0000
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_Y_MIN_LIM_SHFT_V3_5 16
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_BMSK_V3_5 0x3F00
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MAX_LIM_SHFT_V3_5 8
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_BMSK_V3_5 0x3F
+#define IPA_RSRC_GRP_XY_RSRC_TYPE_n_X_MIN_LIM_SHFT_V3_5 0
+
+
+/* IPA_IPA_IPA_RX_HPS_CLIENTS_MIN/MAX_DEPTH_0/1 registers */
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK(n) (0x7F << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_BMSK_V3_5(n) \
+						(0xF << (8 * (n)))
+#define IPA_RX_HPS_CLIENTS_MINMAX_DEPTH_X_CLIENT_n_SHFT(n) (8 * (n))
+
+/* IPA_QSB_MAX_WRITES register */
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_BMSK (0xf)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_0_MAX_WRITES_SHFT (0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_BMSK (0xf0)
+#define IPA_QSB_MAX_WRITES_GEN_QMB_1_MAX_WRITES_SHFT (4)
+
+/* IPA_QSB_MAX_READS register */
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_BMSK (0xf)
+#define IPA_QSB_MAX_READS_GEN_QMB_0_MAX_READS_SHFT (0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_BMSK (0xf0)
+#define IPA_QSB_MAX_READS_GEN_QMB_1_MAX_READS_SHFT (4)
+
+/* IPA_TX_CFG register */
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_BMSK_V3_5 (0x1)
+#define IPA_TX_CFG_TX0_PREFETCH_DISABLE_SHFT_V3_5 (0)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_BMSK_V3_5 (0x2)
+#define IPA_TX_CFG_TX1_PREFETCH_DISABLE_SHFT_V3_5 (1)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_BMSK_V3_5 (0x1C)
+#define IPA_TX_CFG_PREFETCH_ALMOST_EMPTY_SIZE_SHFT_V3_5 (2)
+
+/* IPA_IDLE_INDICATION_CFG regiser */
+#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_BMSK_V3_5 (0xffff)
+#define IPA_IDLE_INDICATION_CFG_ENTER_IDLE_DEBOUNCE_THRESH_SHFT_V3_5 (0)
+#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_BMSK_V3_5 (0x10000)
+#define IPA_IDLE_INDICATION_CFG_CONST_NON_IDLE_ENABLE_SHFT_V3_5 (16)
+
+#endif /* _IPAHAL_REG_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
new file mode 100644
index 0000000..e653bcd
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -0,0 +1,2967 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * WWAN Transport Network Driver.
+ */
+
+#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <net/pkt_sched.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include "ipa_qmi_service.h"
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include <linux/ipa.h>
+#include <uapi/linux/net_map.h>
+#include <uapi/linux/msm_rmnet.h>
+#include <net/rmnet_config.h>
+
+#include "ipa_trace.h"
+
+#define WWAN_METADATA_SHFT 24
+#define WWAN_METADATA_MASK 0xFF000000
+#define WWAN_DATA_LEN 2000
+#define IPA_RM_INACTIVITY_TIMER 100 /* IPA_RM */
+#define HEADROOM_FOR_QMAP   8 /* for mux header */
+#define TAILROOM            0 /* for padding by mux layer */
+#define MAX_NUM_OF_MUX_CHANNEL  10 /* max mux channels */
+#define UL_FILTER_RULE_HANDLE_START 69
+#define DEFAULT_OUTSTANDING_HIGH_CTL 96
+#define DEFAULT_OUTSTANDING_HIGH 64
+#define DEFAULT_OUTSTANDING_LOW 32
+
+#define IPA_WWAN_DEV_NAME "rmnet_ipa%d"
+
+#define IPA_WWAN_RX_SOFTIRQ_THRESH 16
+
+#define INVALID_MUX_ID 0xFF
+#define IPA_QUOTA_REACH_ALERT_MAX_SIZE 64
+#define IPA_QUOTA_REACH_IF_NAME_MAX_SIZE 64
+#define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
+#define NAPI_WEIGHT 60
+
+#define IPA_NETDEV() \
+	((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
+	  rmnet_ipa3_ctx->wwan_priv->net : NULL)
+
+
+static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
+static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
+static void ipa3_wwan_msg_free_cb(void*, u32, u32);
+static void ipa3_rmnet_rx_cb(void *priv);
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget);
+
+static void ipa3_wake_tx_queue(struct work_struct *work);
+static DECLARE_WORK(ipa3_tx_wakequeue_work, ipa3_wake_tx_queue);
+
+static void tethering_stats_poll_queue(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa_tether_stats_poll_wakequeue_work,
+			    tethering_stats_poll_queue);
+
+enum ipa3_wwan_device_status {
+	WWAN_DEVICE_INACTIVE = 0,
+	WWAN_DEVICE_ACTIVE   = 1
+};
+
+struct ipa3_rmnet_plat_drv_res {
+	bool ipa_rmnet_ssr;
+	bool ipa_loaduC;
+	bool ipa_advertise_sg_support;
+	bool ipa_napi_enable;
+};
+
+/**
+ * struct ipa3_wwan_private - WWAN private data
+ * @net: network interface struct implemented by this driver
+ * @stats: iface statistics
+ * @outstanding_pkts: number of packets sent to IPA without TX complete ACKed
+ * @outstanding_high: number of outstanding packets allowed
+ * @outstanding_low: number of outstanding packets which shall cause
+ * @ch_id: channel id
+ * @lock: spinlock for mutual exclusion
+ * @device_status: holds device status
+ *
+ * WWAN private - holds all relevant info about WWAN driver
+ */
+struct ipa3_wwan_private {
+	struct net_device *net;
+	struct net_device_stats stats;
+	atomic_t outstanding_pkts;
+	int outstanding_high_ctl;
+	int outstanding_high;
+	int outstanding_low;
+	uint32_t ch_id;
+	spinlock_t lock;
+	struct completion resource_granted_completion;
+	enum ipa3_wwan_device_status device_status;
+	struct napi_struct napi;
+};
+
+struct rmnet_ipa3_context {
+	struct ipa3_wwan_private *wwan_priv;
+	struct ipa_sys_connect_params apps_to_ipa_ep_cfg;
+	struct ipa_sys_connect_params ipa_to_apps_ep_cfg;
+	u32 qmap_hdr_hdl;
+	u32 dflt_v4_wan_rt_hdl;
+	u32 dflt_v6_wan_rt_hdl;
+	struct ipa3_rmnet_mux_val mux_channel[MAX_NUM_OF_MUX_CHANNEL];
+	int num_q6_rules;
+	int old_num_q6_rules;
+	int rmnet_index;
+	bool egress_set;
+	bool a7_ul_flt_set;
+	struct workqueue_struct *rm_q6_wq;
+	atomic_t is_initialized;
+	atomic_t is_ssr;
+	void *subsys_notify_handle;
+	u32 apps_to_ipa3_hdl;
+	u32 ipa3_to_apps_hdl;
+	struct mutex ipa_to_apps_pipe_handle_guard;
+};
+
+static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
+static struct ipa3_rmnet_plat_drv_res ipa3_rmnet_res;
+
+/**
+* ipa3_setup_a7_qmap_hdr() - Setup default a7 qmap hdr
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa3_setup_a7_qmap_hdr(void)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	u32 pyld_sz;
+	int ret;
+
+	/* install the basic exception header */
+	pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!hdr) {
+		IPAWANERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	strlcpy(hdr_entry->name, IPA_A7_QMAP_HDR_NAME,
+				IPA_RESOURCE_NAME_MAX);
+	hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+
+	if (ipa3_add_hdr(hdr)) {
+		IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAWANERR("fail to add IPA_A7_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+	rmnet_ipa3_ctx->qmap_hdr_hdl = hdr_entry->hdr_hdl;
+
+	ret = 0;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+static void ipa3_del_a7_qmap_hdr(void)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *hdl_entry;
+	u32 pyld_sz;
+	int ret;
+
+	pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+		      sizeof(struct ipa_hdr_del);
+	del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!del_hdr) {
+		IPAWANERR("fail to alloc exception hdr_del\n");
+		return;
+	}
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 1;
+	hdl_entry = &del_hdr->hdl[0];
+	hdl_entry->hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
+
+	ret = ipa3_del_hdr(del_hdr);
+	if (ret || hdl_entry->status)
+		IPAWANERR("ipa3_del_hdr failed\n");
+	else
+		IPAWANDBG("hdrs deletion done\n");
+
+	rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
+	kfree(del_hdr);
+}
+
+static void ipa3_del_qmap_hdr(uint32_t hdr_hdl)
+{
+	struct ipa_ioc_del_hdr *del_hdr;
+	struct ipa_hdr_del *hdl_entry;
+	u32 pyld_sz;
+	int ret;
+
+	if (hdr_hdl == 0) {
+		IPAWANERR("Invalid hdr_hdl provided\n");
+		return;
+	}
+
+	pyld_sz = sizeof(struct ipa_ioc_del_hdr) + 1 *
+		sizeof(struct ipa_hdr_del);
+	del_hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!del_hdr) {
+		IPAWANERR("fail to alloc exception hdr_del\n");
+		return;
+	}
+
+	del_hdr->commit = 1;
+	del_hdr->num_hdls = 1;
+	hdl_entry = &del_hdr->hdl[0];
+	hdl_entry->hdl = hdr_hdl;
+
+	ret = ipa3_del_hdr(del_hdr);
+	if (ret || hdl_entry->status)
+		IPAWANERR("ipa3_del_hdr failed\n");
+	else
+		IPAWANDBG("header deletion done\n");
+
+	rmnet_ipa3_ctx->qmap_hdr_hdl = 0;
+	kfree(del_hdr);
+}
+
+static void ipa3_del_mux_qmap_hdrs(void)
+{
+	int index;
+
+	for (index = 0; index < rmnet_ipa3_ctx->rmnet_index; index++) {
+		ipa3_del_qmap_hdr(rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
+		rmnet_ipa3_ctx->mux_channel[index].hdr_hdl = 0;
+	}
+}
+
+static int ipa3_add_qmap_hdr(uint32_t mux_id, uint32_t *hdr_hdl)
+{
+	struct ipa_ioc_add_hdr *hdr;
+	struct ipa_hdr_add *hdr_entry;
+	char hdr_name[IPA_RESOURCE_NAME_MAX];
+	u32 pyld_sz;
+	int ret;
+
+	pyld_sz = sizeof(struct ipa_ioc_add_hdr) + 1 *
+		      sizeof(struct ipa_hdr_add);
+	hdr = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!hdr) {
+		IPAWANERR("fail to alloc exception hdr\n");
+		return -ENOMEM;
+	}
+	hdr->num_hdrs = 1;
+	hdr->commit = 1;
+	hdr_entry = &hdr->hdr[0];
+
+	snprintf(hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 mux_id);
+	 strlcpy(hdr_entry->name, hdr_name,
+				IPA_RESOURCE_NAME_MAX);
+
+	hdr_entry->hdr_len = IPA_QMAP_HEADER_LENGTH; /* 4 bytes */
+	hdr_entry->hdr[1] = (uint8_t) mux_id;
+	IPAWANDBG("header (%s) with mux-id: (%d)\n",
+		hdr_name,
+		hdr_entry->hdr[1]);
+	if (ipa3_add_hdr(hdr)) {
+		IPAWANERR("fail to add IPA_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	if (hdr_entry->status) {
+		IPAWANERR("fail to add IPA_QMAP hdr\n");
+		ret = -EPERM;
+		goto bail;
+	}
+
+	ret = 0;
+	*hdr_hdl = hdr_entry->hdr_hdl;
+bail:
+	kfree(hdr);
+	return ret;
+}
+
+/**
+* ipa3_setup_dflt_wan_rt_tables() - Setup default wan routing tables
+*
+* Return codes:
+* 0: success
+* -ENOMEM: failed to allocate memory
+* -EPERM: failed to add the tables
+*/
+static int ipa3_setup_dflt_wan_rt_tables(void)
+{
+	struct ipa_ioc_add_rt_rule *rt_rule;
+	struct ipa_rt_rule_add *rt_rule_entry;
+
+	rt_rule =
+	   kzalloc(sizeof(struct ipa_ioc_add_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_add), GFP_KERNEL);
+	if (!rt_rule) {
+		IPAWANERR("fail to alloc mem\n");
+		return -ENOMEM;
+	}
+	/* setup a default v4 route to point to Apps */
+	rt_rule->num_rules = 1;
+	rt_rule->commit = 1;
+	rt_rule->ip = IPA_IP_v4;
+	strlcpy(rt_rule->rt_tbl_name, IPA_DFLT_WAN_RT_TBL_NAME,
+			IPA_RESOURCE_NAME_MAX);
+
+	rt_rule_entry = &rt_rule->rules[0];
+	rt_rule_entry->at_rear = 1;
+	rt_rule_entry->rule.dst = IPA_CLIENT_APPS_WAN_CONS;
+	rt_rule_entry->rule.hdr_hdl = rmnet_ipa3_ctx->qmap_hdr_hdl;
+
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAWANERR("fail to add dflt_wan v4 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+
+	IPAWANDBG("dflt v4 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+	/* setup a default v6 route to point to A5 */
+	rt_rule->ip = IPA_IP_v6;
+	if (ipa3_add_rt_rule(rt_rule)) {
+		IPAWANERR("fail to add dflt_wan v6 rule\n");
+		kfree(rt_rule);
+		return -EPERM;
+	}
+	IPAWANDBG("dflt v6 rt rule hdl=%x\n", rt_rule_entry->rt_rule_hdl);
+	rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl = rt_rule_entry->rt_rule_hdl;
+
+	kfree(rt_rule);
+	return 0;
+}
+
+static void ipa3_del_dflt_wan_rt_tables(void)
+{
+	struct ipa_ioc_del_rt_rule *rt_rule;
+	struct ipa_rt_rule_del *rt_rule_entry;
+	int len;
+
+	len = sizeof(struct ipa_ioc_del_rt_rule) + 1 *
+			   sizeof(struct ipa_rt_rule_del);
+	rt_rule = kzalloc(len, GFP_KERNEL);
+	if (!rt_rule) {
+		IPAWANERR("unable to allocate memory for del route rule\n");
+		return;
+	}
+
+	memset(rt_rule, 0, len);
+	rt_rule->commit = 1;
+	rt_rule->num_hdls = 1;
+	rt_rule->ip = IPA_IP_v4;
+
+	rt_rule_entry = &rt_rule->hdl[0];
+	rt_rule_entry->status = -1;
+	rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v4_wan_rt_hdl;
+
+	IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+		rt_rule_entry->hdl, IPA_IP_v4);
+	if (ipa3_del_rt_rule(rt_rule) ||
+			(rt_rule_entry->status)) {
+		IPAWANERR("Routing rule deletion failed!\n");
+	}
+
+	rt_rule->ip = IPA_IP_v6;
+	rt_rule_entry->hdl = rmnet_ipa3_ctx->dflt_v6_wan_rt_hdl;
+	IPAWANERR("Deleting Route hdl:(0x%x) with ip type: %d\n",
+		rt_rule_entry->hdl, IPA_IP_v6);
+	if (ipa3_del_rt_rule(rt_rule) ||
+			(rt_rule_entry->status)) {
+		IPAWANERR("Routing rule deletion failed!\n");
+	}
+
+	kfree(rt_rule);
+}
+
+int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
+		*rule_req)
+{
+	int i, j;
+
+	if (rule_req->filter_spec_ex_list_valid == true) {
+		rmnet_ipa3_ctx->num_q6_rules =
+			rule_req->filter_spec_ex_list_len;
+		IPAWANDBG("Received (%d) install_flt_req\n",
+			rmnet_ipa3_ctx->num_q6_rules);
+	} else {
+		rmnet_ipa3_ctx->num_q6_rules = 0;
+		IPAWANERR("got no UL rules from modem\n");
+		return -EINVAL;
+	}
+
+	/* copy UL filter rules from Modem*/
+	for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+		/* check if rules overside the cache*/
+		if (i == MAX_NUM_Q6_RULE) {
+			IPAWANERR("Reaching (%d) max cache ",
+				MAX_NUM_Q6_RULE);
+			IPAWANERR(" however total (%d)\n",
+				rmnet_ipa3_ctx->num_q6_rules);
+			goto failure;
+		}
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].ip =
+			rule_req->filter_spec_ex_list[i].ip_type;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].action =
+			rule_req->filter_spec_ex_list[i].filter_action;
+		if (rule_req->filter_spec_ex_list[i].
+			is_routing_table_index_valid == true)
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx =
+			rule_req->filter_spec_ex_list[i].route_table_index;
+		if (rule_req->filter_spec_ex_list[i].is_mux_id_valid == true)
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].mux_id =
+			rule_req->filter_spec_ex_list[i].mux_id;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id =
+			rule_req->filter_spec_ex_list[i].rule_id;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable =
+			rule_req->filter_spec_ex_list[i].is_rule_hashable;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.rule_eq_bitmap =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			rule_eq_bitmap;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq_present =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			tos_eq_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tos_eq =
+			rule_req->filter_spec_ex_list[i].filter_rule.tos_eq;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			protocol_eq_present = rule_req->filter_spec_ex_list[i].
+			filter_rule.protocol_eq_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.protocol_eq =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			protocol_eq;
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			num_ihl_offset_range_16 =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.num_ihl_offset_range_16;
+		for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			num_ihl_offset_range_16; j++) {
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ihl_offset_range_16[j].offset = rule_req->
+			filter_spec_ex_list[i].filter_rule.
+			ihl_offset_range_16[j].offset;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ihl_offset_range_16[j].range_low = rule_req->
+			filter_spec_ex_list[i].filter_rule.
+			ihl_offset_range_16[j].range_low;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ihl_offset_range_16[j].range_high = rule_req->
+			filter_spec_ex_list[i].filter_rule.
+			ihl_offset_range_16[j].range_high;
+		}
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.num_offset_meq_32 =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			num_offset_meq_32;
+		for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				num_offset_meq_32; j++) {
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			offset_meq_32[j].offset =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.offset_meq_32[j].offset;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			offset_meq_32[j].mask =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.offset_meq_32[j].mask;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			offset_meq_32[j].value =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.offset_meq_32[j].value;
+		}
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq_present =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.tc_eq_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.tc_eq =
+			rule_req->filter_spec_ex_list[i].filter_rule.tc_eq;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq_present =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			flow_eq_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.fl_eq =
+			rule_req->filter_spec_ex_list[i].filter_rule.flow_eq;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_16_present = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_16_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_16.offset = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_16.offset;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_16.value = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_16.value;
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_32_present = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_32_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_32.offset = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_32.offset;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		ihl_offset_eq_32.value = rule_req->filter_spec_ex_list[i].
+		filter_rule.ihl_offset_eq_32.value;
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+		num_ihl_offset_meq_32 = rule_req->filter_spec_ex_list[i].
+		filter_rule.num_ihl_offset_meq_32;
+		for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].
+			eq_attrib.num_ihl_offset_meq_32; j++) {
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				ihl_offset_meq_32[j].offset = rule_req->
+				filter_spec_ex_list[i].filter_rule.
+				ihl_offset_meq_32[j].offset;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				ihl_offset_meq_32[j].mask = rule_req->
+				filter_spec_ex_list[i].filter_rule.
+				ihl_offset_meq_32[j].mask;
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				ihl_offset_meq_32[j].value = rule_req->
+				filter_spec_ex_list[i].filter_rule.
+				ihl_offset_meq_32[j].value;
+		}
+		ipa3_qmi_ctx->
+			q6_ul_filter_rule[i].eq_attrib.num_offset_meq_128 =
+			rule_req->filter_spec_ex_list[i].filter_rule.
+			num_offset_meq_128;
+		for (j = 0; j < ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			num_offset_meq_128; j++) {
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+				offset_meq_128[j].offset = rule_req->
+				filter_spec_ex_list[i].filter_rule.
+				offset_meq_128[j].offset;
+			memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+					offset_meq_128[j].mask,
+					rule_req->filter_spec_ex_list[i].
+					filter_rule.offset_meq_128[j].mask, 16);
+			memcpy(ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+					offset_meq_128[j].value, rule_req->
+					filter_spec_ex_list[i].filter_rule.
+					offset_meq_128[j].value, 16);
+		}
+
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			metadata_meq32_present =
+				rule_req->filter_spec_ex_list[i].
+				filter_rule.metadata_meq32_present;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			metadata_meq32.offset =
+			rule_req->filter_spec_ex_list[i].
+			filter_rule.metadata_meq32.offset;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			metadata_meq32.mask = rule_req->filter_spec_ex_list[i].
+			filter_rule.metadata_meq32.mask;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.metadata_meq32.
+			value = rule_req->filter_spec_ex_list[i].filter_rule.
+			metadata_meq32.value;
+		ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib.
+			ipv4_frag_eq_present = rule_req->filter_spec_ex_list[i].
+			filter_rule.ipv4_frag_eq_present;
+	}
+
+	if (rule_req->xlat_filter_indices_list_valid) {
+		if (rule_req->xlat_filter_indices_list_len >
+		    rmnet_ipa3_ctx->num_q6_rules) {
+			IPAWANERR("Number of xlat indices is not valid: %d\n",
+					rule_req->xlat_filter_indices_list_len);
+			goto failure;
+		}
+		IPAWANDBG("Receive %d XLAT indices: ",
+				rule_req->xlat_filter_indices_list_len);
+		for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++)
+			IPAWANDBG("%d ", rule_req->xlat_filter_indices_list[i]);
+		IPAWANDBG("\n");
+
+		for (i = 0; i < rule_req->xlat_filter_indices_list_len; i++) {
+			if (rule_req->xlat_filter_indices_list[i]
+				>= rmnet_ipa3_ctx->num_q6_rules) {
+				IPAWANERR("Xlat rule idx is wrong: %d\n",
+					rule_req->xlat_filter_indices_list[i]);
+				goto failure;
+			} else {
+				ipa3_qmi_ctx->q6_ul_filter_rule
+				[rule_req->xlat_filter_indices_list[i]]
+				.is_xlat_rule = 1;
+				IPAWANDBG("Rule %d is xlat rule\n",
+					rule_req->xlat_filter_indices_list[i]);
+			}
+		}
+	}
+	goto success;
+
+failure:
+	rmnet_ipa3_ctx->num_q6_rules = 0;
+	memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0,
+		sizeof(ipa3_qmi_ctx->q6_ul_filter_rule));
+	return -EINVAL;
+
+success:
+	return 0;
+}
+
+static int ipa3_wwan_add_ul_flt_rule_to_ipa(void)
+{
+	u32 pyld_sz;
+	int i, retval = 0;
+	struct ipa_ioc_add_flt_rule *param;
+	struct ipa_flt_rule_add flt_rule_entry;
+	struct ipa_fltr_installed_notif_req_msg_v01 *req;
+
+	pyld_sz = sizeof(struct ipa_ioc_add_flt_rule) +
+	   sizeof(struct ipa_flt_rule_add);
+	param = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!param)
+		return -ENOMEM;
+
+	req = (struct ipa_fltr_installed_notif_req_msg_v01 *)
+		kzalloc(sizeof(struct ipa_fltr_installed_notif_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		kfree(param);
+		return -ENOMEM;
+	}
+
+	param->commit = 1;
+	param->ep = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	param->global = false;
+	param->num_rules = (uint8_t)1;
+
+	for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+		param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
+		memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_add));
+		flt_rule_entry.at_rear = true;
+		flt_rule_entry.rule.action =
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].action;
+		flt_rule_entry.rule.rt_tbl_idx
+		= ipa3_qmi_ctx->q6_ul_filter_rule[i].rt_tbl_idx;
+		flt_rule_entry.rule.retain_hdr = true;
+		flt_rule_entry.rule.hashable =
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].is_rule_hashable;
+		flt_rule_entry.rule.rule_id =
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
+
+		/* debug rt-hdl*/
+		IPAWANDBG("install-IPA index(%d),rt-tbl:(%d)\n",
+			i, flt_rule_entry.rule.rt_tbl_idx);
+		flt_rule_entry.rule.eq_attrib_type = true;
+		memcpy(&(flt_rule_entry.rule.eq_attrib),
+			&ipa3_qmi_ctx->q6_ul_filter_rule[i].eq_attrib,
+			sizeof(struct ipa_ipfltri_rule_eq));
+		memcpy(&(param->rules[0]), &flt_rule_entry,
+			sizeof(struct ipa_flt_rule_add));
+		if (ipa3_add_flt_rule((struct ipa_ioc_add_flt_rule *)param)) {
+			retval = -EFAULT;
+			IPAWANERR("add A7 UL filter rule(%d) failed\n", i);
+		} else {
+			/* store the rule handler */
+			ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i] =
+				param->rules[0].flt_rule_hdl;
+		}
+	}
+
+	/* send ipa_fltr_installed_notif_req_msg_v01 to Q6*/
+	req->source_pipe_index =
+		ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+	req->install_status = QMI_RESULT_SUCCESS_V01;
+	req->rule_id_valid = 1;
+	req->rule_id_len = rmnet_ipa3_ctx->num_q6_rules;
+	for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+		req->rule_id[i] =
+			ipa3_qmi_ctx->q6_ul_filter_rule[i].rule_id;
+	}
+	if (ipa3_qmi_filter_notify_send(req)) {
+		IPAWANDBG("add filter rule index on A7-RX failed\n");
+		retval = -EFAULT;
+	}
+	rmnet_ipa3_ctx->old_num_q6_rules = rmnet_ipa3_ctx->num_q6_rules;
+	IPAWANDBG("add (%d) filter rule index on A7-RX\n",
+			rmnet_ipa3_ctx->old_num_q6_rules);
+	kfree(param);
+	kfree(req);
+	return retval;
+}
+
+static int ipa3_wwan_del_ul_flt_rule_to_ipa(void)
+{
+	u32 pyld_sz;
+	int i, retval = 0;
+	struct ipa_ioc_del_flt_rule *param;
+	struct ipa_flt_rule_del flt_rule_entry;
+
+	pyld_sz = sizeof(struct ipa_ioc_del_flt_rule) +
+	   sizeof(struct ipa_flt_rule_del);
+	param = kzalloc(pyld_sz, GFP_KERNEL);
+	if (!param) {
+		IPAWANERR("kzalloc failed\n");
+		return -ENOMEM;
+	}
+
+	param->commit = 1;
+	param->num_hdls = (uint8_t) 1;
+
+	for (i = 0; i < rmnet_ipa3_ctx->old_num_q6_rules; i++) {
+		param->ip = ipa3_qmi_ctx->q6_ul_filter_rule[i].ip;
+		memset(&flt_rule_entry, 0, sizeof(struct ipa_flt_rule_del));
+		flt_rule_entry.hdl = ipa3_qmi_ctx->q6_ul_filter_rule_hdl[i];
+		/* debug rt-hdl*/
+		IPAWANDBG("delete-IPA rule index(%d)\n", i);
+		memcpy(&(param->hdl[0]), &flt_rule_entry,
+			sizeof(struct ipa_flt_rule_del));
+		if (ipa3_del_flt_rule((struct ipa_ioc_del_flt_rule *)param)) {
+			IPAWANERR("del A7 UL filter rule(%d) failed\n", i);
+			kfree(param);
+			return -EFAULT;
+		}
+	}
+
+	/* set UL filter-rule add-indication */
+	rmnet_ipa3_ctx->a7_ul_flt_set = false;
+	rmnet_ipa3_ctx->old_num_q6_rules = 0;
+
+	kfree(param);
+	return retval;
+}
+
+static int ipa3_find_mux_channel_index(uint32_t mux_id)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+		if (mux_id == rmnet_ipa3_ctx->mux_channel[i].mux_id)
+			return i;
+	}
+	return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int find_vchannel_name_index(const char *vchannel_name)
+{
+	int i;
+
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++) {
+		if (strcmp(rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+					vchannel_name) == 0)
+			return i;
+	}
+	return MAX_NUM_OF_MUX_CHANNEL;
+}
+
+static int ipa3_wwan_register_to_ipa(int index)
+{
+	struct ipa_tx_intf tx_properties = {0};
+	struct ipa_ioc_tx_intf_prop tx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_ioc_tx_intf_prop *tx_ipv4_property;
+	struct ipa_ioc_tx_intf_prop *tx_ipv6_property;
+	struct ipa_rx_intf rx_properties = {0};
+	struct ipa_ioc_rx_intf_prop rx_ioc_properties[2] = { {0}, {0} };
+	struct ipa_ioc_rx_intf_prop *rx_ipv4_property;
+	struct ipa_ioc_rx_intf_prop *rx_ipv6_property;
+	struct ipa_ext_intf ext_properties = {0};
+	struct ipa_ioc_ext_intf_prop *ext_ioc_properties;
+	u32 pyld_sz;
+	int ret = 0, i;
+
+	IPAWANDBG("index(%d) device[%s]:\n", index,
+		rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	if (!rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set) {
+		ret = ipa3_add_qmap_hdr(
+			rmnet_ipa3_ctx->mux_channel[index].mux_id,
+			&rmnet_ipa3_ctx->mux_channel[index].hdr_hdl);
+		if (ret) {
+			IPAWANERR("ipa_add_mux_hdr failed (%d)\n", index);
+			return ret;
+		}
+		rmnet_ipa3_ctx->mux_channel[index].mux_hdr_set = true;
+	}
+	tx_properties.prop = tx_ioc_properties;
+	tx_ipv4_property = &tx_properties.prop[0];
+	tx_ipv4_property->ip = IPA_IP_v4;
+	tx_ipv4_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+	snprintf(tx_ipv4_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 rmnet_ipa3_ctx->mux_channel[index].mux_id);
+	tx_ipv6_property = &tx_properties.prop[1];
+	tx_ipv6_property->ip = IPA_IP_v6;
+	tx_ipv6_property->dst_pipe = IPA_CLIENT_APPS_WAN_CONS;
+	/* no need use A2_MUX_HDR_NAME_V6_PREF, same header */
+	snprintf(tx_ipv6_property->hdr_name, IPA_RESOURCE_NAME_MAX, "%s%d",
+		 A2_MUX_HDR_NAME_V4_PREF,
+		 rmnet_ipa3_ctx->mux_channel[index].mux_id);
+	tx_properties.num_props = 2;
+
+	rx_properties.prop = rx_ioc_properties;
+	rx_ipv4_property = &rx_properties.prop[0];
+	rx_ipv4_property->ip = IPA_IP_v4;
+	rx_ipv4_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_ipv4_property->attrib.meta_data =
+		rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+	rx_ipv4_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+	rx_ipv4_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	rx_ipv6_property = &rx_properties.prop[1];
+	rx_ipv6_property->ip = IPA_IP_v6;
+	rx_ipv6_property->attrib.attrib_mask |= IPA_FLT_META_DATA;
+	rx_ipv6_property->attrib.meta_data =
+		rmnet_ipa3_ctx->mux_channel[index].mux_id << WWAN_METADATA_SHFT;
+	rx_ipv6_property->attrib.meta_data_mask = WWAN_METADATA_MASK;
+	rx_ipv6_property->src_pipe = IPA_CLIENT_APPS_LAN_WAN_PROD;
+	rx_properties.num_props = 2;
+
+	pyld_sz = rmnet_ipa3_ctx->num_q6_rules *
+	   sizeof(struct ipa_ioc_ext_intf_prop);
+	ext_ioc_properties = kmalloc(pyld_sz, GFP_KERNEL);
+	if (!ext_ioc_properties) {
+		IPAWANERR("Error allocate memory\n");
+		return -ENOMEM;
+	}
+
+	ext_properties.prop = ext_ioc_properties;
+	ext_properties.excp_pipe_valid = true;
+	ext_properties.excp_pipe = IPA_CLIENT_APPS_WAN_CONS;
+	ext_properties.num_props = rmnet_ipa3_ctx->num_q6_rules;
+	for (i = 0; i < rmnet_ipa3_ctx->num_q6_rules; i++) {
+		memcpy(&(ext_properties.prop[i]),
+				 &(ipa3_qmi_ctx->q6_ul_filter_rule[i]),
+				sizeof(struct ipa_ioc_ext_intf_prop));
+	ext_properties.prop[i].mux_id =
+		rmnet_ipa3_ctx->mux_channel[index].mux_id;
+	IPAWANDBG("index %d ip: %d rt-tbl:%d\n", i,
+		ext_properties.prop[i].ip,
+		ext_properties.prop[i].rt_tbl_idx);
+	IPAWANDBG("action: %d mux:%d\n",
+		ext_properties.prop[i].action,
+		ext_properties.prop[i].mux_id);
+	}
+	ret = ipa3_register_intf_ext(rmnet_ipa3_ctx->mux_channel[index].
+		vchannel_name, &tx_properties,
+		&rx_properties, &ext_properties);
+	if (ret) {
+		IPAWANERR("[%s]:ipa3_register_intf failed %d\n",
+			rmnet_ipa3_ctx->mux_channel[index].vchannel_name, ret);
+		goto fail;
+	}
+	rmnet_ipa3_ctx->mux_channel[index].ul_flt_reg = true;
+fail:
+	kfree(ext_ioc_properties);
+	return ret;
+}
+
+static void ipa3_cleanup_deregister_intf(void)
+{
+	int i;
+	int ret;
+
+	for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
+		if (rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg) {
+			ret = ipa3_deregister_intf(
+				rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
+			if (ret < 0) {
+				IPAWANERR("de-register device %s(%d) failed\n",
+					rmnet_ipa3_ctx->mux_channel[i].
+					vchannel_name,
+					i);
+				return;
+			}
+			IPAWANDBG("de-register device %s(%d) success\n",
+				rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+				i);
+		}
+		rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = false;
+	}
+}
+
+int ipa3_wwan_update_mux_channel_prop(void)
+{
+	int ret = 0, i;
+	/* install UL filter rules */
+	if (rmnet_ipa3_ctx->egress_set) {
+		if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
+			IPAWANDBG("setup UL filter rules\n");
+			if (rmnet_ipa3_ctx->a7_ul_flt_set) {
+				IPAWANDBG("del previous UL filter rules\n");
+				/* delete rule hdlers */
+				ret = ipa3_wwan_del_ul_flt_rule_to_ipa();
+				if (ret) {
+					IPAWANERR("failed to del old rules\n");
+					return -EINVAL;
+				}
+				IPAWANDBG("deleted old UL rules\n");
+			}
+			ret = ipa3_wwan_add_ul_flt_rule_to_ipa();
+		}
+		if (ret)
+			IPAWANERR("failed to install UL rules\n");
+		else
+			rmnet_ipa3_ctx->a7_ul_flt_set = true;
+	}
+	/* update Tx/Rx/Ext property */
+	IPAWANDBG("update Tx/Rx/Ext property in IPA\n");
+	if (rmnet_ipa3_ctx->rmnet_index == 0) {
+		IPAWANDBG("no Tx/Rx/Ext property registered in IPA\n");
+		return ret;
+	}
+
+	ipa3_cleanup_deregister_intf();
+
+	for (i = 0; i < rmnet_ipa3_ctx->rmnet_index; i++) {
+		ret = ipa3_wwan_register_to_ipa(i);
+		if (ret < 0) {
+			IPAWANERR("failed to re-regist %s, mux %d, index %d\n",
+				rmnet_ipa3_ctx->mux_channel[i].vchannel_name,
+				rmnet_ipa3_ctx->mux_channel[i].mux_id,
+				i);
+			return -ENODEV;
+		}
+		IPAWANERR("dev(%s) has registered to IPA\n",
+		rmnet_ipa3_ctx->mux_channel[i].vchannel_name);
+		rmnet_ipa3_ctx->mux_channel[i].ul_flt_reg = true;
+	}
+	return ret;
+}
+
+#ifdef INIT_COMPLETION
+#define reinit_completion(x) INIT_COMPLETION(*(x))
+#endif /* INIT_COMPLETION */
+
+static int __ipa_wwan_open(struct net_device *dev)
+{
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
+	IPAWANDBG("[%s] __wwan_open()\n", dev->name);
+	if (wwan_ptr->device_status != WWAN_DEVICE_ACTIVE)
+		reinit_completion(&wwan_ptr->resource_granted_completion);
+	wwan_ptr->device_status = WWAN_DEVICE_ACTIVE;
+
+	if (ipa3_rmnet_res.ipa_napi_enable)
+		napi_enable(&(wwan_ptr->napi));
+	return 0;
+}
+
+/**
+ * wwan_open() - Opens the wwan network interface. Opens logical
+ * channel on A2 MUX driver and starts the network stack queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa3_wwan_open(struct net_device *dev)
+{
+	int rc = 0;
+
+	IPAWANDBG("[%s] wwan_open()\n", dev->name);
+	rc = __ipa_wwan_open(dev);
+	if (rc == 0)
+		netif_start_queue(dev);
+	return rc;
+}
+
+static int __ipa_wwan_close(struct net_device *dev)
+{
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+	int rc = 0;
+
+	if (wwan_ptr->device_status == WWAN_DEVICE_ACTIVE) {
+		wwan_ptr->device_status = WWAN_DEVICE_INACTIVE;
+		/* do not close wwan port once up,  this causes
+		 * remote side to hang if tried to open again
+		 */
+		reinit_completion(&wwan_ptr->resource_granted_completion);
+		rc = ipa3_deregister_intf(dev->name);
+		if (rc) {
+			IPAWANERR("[%s]: ipa3_deregister_intf failed %d\n",
+			       dev->name, rc);
+			return rc;
+		}
+		return rc;
+	} else {
+		return -EBADF;
+	}
+}
+
+/**
+ * ipa3_wwan_stop() - Stops the wwan network interface. Closes
+ * logical channel on A2 MUX driver and stops the network stack
+ * queue
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * -ENODEV: Error while opening logical channel on A2 MUX driver
+ */
+static int ipa3_wwan_stop(struct net_device *dev)
+{
+	IPAWANDBG("[%s] ipa3_wwan_stop()\n", dev->name);
+	__ipa_wwan_close(dev);
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static int ipa3_wwan_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (0 > new_mtu || WWAN_DATA_LEN < new_mtu)
+		return -EINVAL;
+	IPAWANDBG("[%s] MTU change: old=%d new=%d\n",
+		dev->name, dev->mtu, new_mtu);
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/**
+ * ipa3_wwan_xmit() - Transmits an skb.
+ *
+ * @skb: skb to be transmitted
+ * @dev: network device
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa3_wwan_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	int ret = 0;
+	bool qmap_check;
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+	struct ipa_tx_meta meta;
+
+	if (skb->protocol != htons(ETH_P_MAP)) {
+		IPAWANDBG_LOW
+		("SW filtering out none QMAP packet received from %s",
+		current->comm);
+		return NETDEV_TX_OK;
+	}
+
+	qmap_check = RMNET_MAP_GET_CD_BIT(skb);
+	if (netif_queue_stopped(dev)) {
+		if (qmap_check &&
+			atomic_read(&wwan_ptr->outstanding_pkts) <
+					wwan_ptr->outstanding_high_ctl) {
+			pr_err("[%s]Queue stop, send ctrl pkts\n", dev->name);
+			goto send;
+		} else {
+			pr_err("[%s]fatal: ipa_wwan_xmit stopped\n", dev->name);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	/* checking High WM hit */
+	if (atomic_read(&wwan_ptr->outstanding_pkts) >=
+					wwan_ptr->outstanding_high) {
+		if (!qmap_check) {
+			IPAWANDBG_LOW("pending(%d)/(%d)- stop(%d)\n",
+				atomic_read(&wwan_ptr->outstanding_pkts),
+				wwan_ptr->outstanding_high,
+				netif_queue_stopped(dev));
+			IPAWANDBG_LOW("qmap_chk(%d)\n", qmap_check);
+			netif_stop_queue(dev);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+send:
+	/* IPA_RM checking start */
+	ret = ipa_rm_inactivity_timer_request_resource(
+		IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret == -EINPROGRESS) {
+		netif_stop_queue(dev);
+		return NETDEV_TX_BUSY;
+	}
+	if (ret) {
+		pr_err("[%s] fatal: ipa rm timer request resource failed %d\n",
+		       dev->name, ret);
+		return -EFAULT;
+	}
+	/* IPA_RM checking end */
+
+	if (RMNET_MAP_GET_CD_BIT(skb)) {
+		memset(&meta, 0, sizeof(meta));
+		meta.pkt_init_dst_ep_valid = true;
+		meta.pkt_init_dst_ep_remote = true;
+		ret = ipa3_tx_dp(IPA_CLIENT_Q6_LAN_CONS, skb, &meta);
+	} else {
+		ret = ipa3_tx_dp(IPA_CLIENT_APPS_LAN_WAN_PROD, skb, NULL);
+	}
+
+	if (ret) {
+		ret = NETDEV_TX_BUSY;
+		dev->stats.tx_dropped++;
+		goto out;
+	}
+
+	atomic_inc(&wwan_ptr->outstanding_pkts);
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+	ret = NETDEV_TX_OK;
+out:
+	ipa_rm_inactivity_timer_release_resource(
+		IPA_RM_RESOURCE_WWAN_0_PROD);
+	return ret;
+}
+
+static void ipa3_wwan_tx_timeout(struct net_device *dev)
+{
+	IPAWANERR("[%s] ipa3_wwan_tx_timeout(), data stall in UL\n", dev->name);
+}
+
+/**
+ * apps_ipa_tx_complete_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * Check that the packet is the one we sent and release it
+ * This function will be called in defered context in IPA wq.
+ */
+static void apps_ipa_tx_complete_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+	struct net_device *dev = (struct net_device *)priv;
+	struct ipa3_wwan_private *wwan_ptr;
+
+	if (dev != IPA_NETDEV()) {
+		IPAWANDBG("Received pre-SSR packet completion\n");
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	if (evt != IPA_WRITE_DONE) {
+		IPAWANERR("unsupported evt on Tx callback, Drop the packet\n");
+		dev_kfree_skb_any(skb);
+		dev->stats.tx_dropped++;
+		return;
+	}
+
+	wwan_ptr = netdev_priv(dev);
+	atomic_dec(&wwan_ptr->outstanding_pkts);
+	__netif_tx_lock_bh(netdev_get_tx_queue(dev, 0));
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr) &&
+		netif_queue_stopped(wwan_ptr->net) &&
+		atomic_read(&wwan_ptr->outstanding_pkts) <
+					(wwan_ptr->outstanding_low)) {
+		IPAWANDBG_LOW("Outstanding low (%d) - waking up queue\n",
+				wwan_ptr->outstanding_low);
+		netif_wake_queue(wwan_ptr->net);
+	}
+	__netif_tx_unlock_bh(netdev_get_tx_queue(dev, 0));
+	dev_kfree_skb_any(skb);
+	ipa_rm_inactivity_timer_release_resource(
+		IPA_RM_RESOURCE_WWAN_0_PROD);
+}
+
+/**
+ * apps_ipa_packet_receive_notify() - Rx notify
+ *
+ * @priv: driver context
+ * @evt: event type
+ * @data: data provided with event
+ *
+ * IPA will pass a packet to the Linux network stack with skb->data
+ */
+static void apps_ipa_packet_receive_notify(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)priv;
+
+	if (evt == IPA_RECEIVE) {
+		struct sk_buff *skb = (struct sk_buff *)data;
+		int result;
+		unsigned int packet_len = skb->len;
+
+		IPAWANDBG_LOW("Rx packet was received");
+		skb->dev = IPA_NETDEV();
+		skb->protocol = htons(ETH_P_MAP);
+
+		if (ipa3_rmnet_res.ipa_napi_enable) {
+			trace_rmnet_ipa_netif_rcv_skb3(dev->stats.rx_packets);
+			result = netif_receive_skb(skb);
+		} else {
+			if (dev->stats.rx_packets % IPA_WWAN_RX_SOFTIRQ_THRESH
+					== 0) {
+				trace_rmnet_ipa_netifni3(dev->stats.rx_packets);
+				result = netif_rx_ni(skb);
+			} else {
+				trace_rmnet_ipa_netifrx3(dev->stats.rx_packets);
+				result = netif_rx(skb);
+			}
+		}
+
+		if (result)	{
+			pr_err_ratelimited(DEV_NAME " %s:%d fail on netif_receive_skb\n",
+							   __func__, __LINE__);
+			dev->stats.rx_dropped++;
+		}
+		dev->stats.rx_packets++;
+		dev->stats.rx_bytes += packet_len;
+	} else if (evt == IPA_CLIENT_START_POLL)
+		ipa3_rmnet_rx_cb(priv);
+	else if (evt == IPA_CLIENT_COMP_NAPI) {
+		if (ipa3_rmnet_res.ipa_napi_enable)
+			napi_complete(&(rmnet_ipa3_ctx->wwan_priv->napi));
+	} else
+		IPAWANERR("Invalid evt %d received in wan_ipa_receive\n", evt);
+}
+
+static int handle3_ingress_format(struct net_device *dev,
+			struct rmnet_ioctl_extended_s *in)
+{
+	int ret = 0;
+	struct ipa_sys_connect_params *ipa_wan_ep_cfg;
+	struct rmnet_phys_ep_conf_s *ep_cfg;
+
+	IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
+	ipa_wan_ep_cfg = &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg;
+	if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM)
+		ipa_wan_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en =
+		   IPA_ENABLE_CS_OFFLOAD_DL;
+
+	if ((in->u.data) & RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA) {
+		IPAWANERR("get AGG size %d count %d\n",
+				  in->u.ingress_format.agg_size,
+				  in->u.ingress_format.agg_count);
+
+		ret = ipa_disable_apps_wan_cons_deaggr(
+			  in->u.ingress_format.agg_size,
+			  in->u.ingress_format.agg_count);
+
+		if (!ret) {
+			ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
+			   in->u.ingress_format.agg_size;
+			ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
+			   in->u.ingress_format.agg_count;
+
+			if (ipa_wan_ep_cfg->napi_enabled) {
+				ipa_wan_ep_cfg->recycle_enabled = true;
+				ep_cfg = (struct rmnet_phys_ep_conf_s *)
+				   rcu_dereference(dev->rx_handler_data);
+				ep_cfg->recycle = ipa_recycle_wan_skb;
+				pr_info("Wan Recycle Enabled\n");
+			}
+		}
+	}
+
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
+
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = true;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
+	ipa_wan_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
+	ipa_wan_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
+
+	ipa_wan_ep_cfg->client = IPA_CLIENT_APPS_WAN_CONS;
+	ipa_wan_ep_cfg->notify = apps_ipa_packet_receive_notify;
+	ipa_wan_ep_cfg->priv = dev;
+
+	ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable;
+	if (ipa_wan_ep_cfg->napi_enabled)
+		ipa_wan_ep_cfg->desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
+	else
+		ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+
+	mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+
+	if (atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+		IPAWANDBG("In SSR sequence/recovery\n");
+		mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+		return -EFAULT;
+	}
+	ret = ipa3_setup_sys_pipe(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg,
+	   &rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+
+	mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+
+	if (ret)
+		IPAWANERR("failed to configure ingress\n");
+
+	return ret;
+}
+
+/**
+ * ipa3_wwan_ioctl() - I/O control for wwan network driver.
+ *
+ * @dev: network device
+ * @ifr: ignored
+ * @cmd: cmd to be excecuded. can be one of the following:
+ * IPA_WWAN_IOCTL_OPEN - Open the network interface
+ * IPA_WWAN_IOCTL_CLOSE - Close the network interface
+ *
+ * Return codes:
+ * 0: success
+ * NETDEV_TX_BUSY: Error while transmitting the skb. Try again
+ * later
+ * -EFAULT: Error while transmitting the skb
+ */
+static int ipa3_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	int rc = 0;
+	int mru = 1000, epid = 1, mux_index, len;
+	struct ipa_msg_meta msg_meta;
+	struct ipa_wan_msg *wan_msg = NULL;
+	struct rmnet_ioctl_extended_s extend_ioctl_data;
+	struct rmnet_ioctl_data_s ioctl_data;
+	struct ipa3_rmnet_mux_val *mux_channel;
+	int rmnet_index;
+
+	IPAWANDBG("rmnet_ipa got ioctl number 0x%08x", cmd);
+	switch (cmd) {
+	/*  Set Ethernet protocol  */
+	case RMNET_IOCTL_SET_LLP_ETHERNET:
+		break;
+	/*  Set RAWIP protocol  */
+	case RMNET_IOCTL_SET_LLP_IP:
+		break;
+	/*  Get link protocol  */
+	case RMNET_IOCTL_GET_LLP:
+		ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+			sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	/*  Set QoS header enabled  */
+	case RMNET_IOCTL_SET_QOS_ENABLE:
+		return -EINVAL;
+	/*  Set QoS header disabled  */
+	case RMNET_IOCTL_SET_QOS_DISABLE:
+		break;
+	/*  Get QoS header state  */
+	case RMNET_IOCTL_GET_QOS:
+		ioctl_data.u.operation_mode = RMNET_MODE_NONE;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+			sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	/*  Get operation mode */
+	case RMNET_IOCTL_GET_OPMODE:
+		ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+			sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+	/*  Open transport port  */
+	case RMNET_IOCTL_OPEN:
+		break;
+	/*  Close transport port  */
+	case RMNET_IOCTL_CLOSE:
+		break;
+	/*  Flow enable  */
+	case RMNET_IOCTL_FLOW_ENABLE:
+		IPAWANDBG("Received flow enable\n");
+		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+			sizeof(struct rmnet_ioctl_data_s))) {
+			rc = -EFAULT;
+			break;
+		}
+		ipa3_flow_control(IPA_CLIENT_USB_PROD, true,
+			ioctl_data.u.tcm_handle);
+		break;
+	/*  Flow disable  */
+	case RMNET_IOCTL_FLOW_DISABLE:
+		IPAWANDBG("Received flow disable\n");
+		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+			sizeof(struct rmnet_ioctl_data_s))) {
+			rc = -EFAULT;
+			break;
+		}
+		ipa3_flow_control(IPA_CLIENT_USB_PROD, false,
+			ioctl_data.u.tcm_handle);
+		break;
+	/*  Set flow handle  */
+	case RMNET_IOCTL_FLOW_SET_HNDL:
+		break;
+
+	/*  Extended IOCTLs  */
+	case RMNET_IOCTL_EXTENDED:
+		IPAWANDBG("get ioctl: RMNET_IOCTL_EXTENDED\n");
+		if (copy_from_user(&extend_ioctl_data,
+			(u8 *)ifr->ifr_ifru.ifru_data,
+			sizeof(struct rmnet_ioctl_extended_s))) {
+			IPAWANERR("failed to copy extended ioctl data\n");
+			rc = -EFAULT;
+			break;
+		}
+		switch (extend_ioctl_data.extended_ioctl) {
+		/*  Get features  */
+		case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+			IPAWANDBG("get RMNET_IOCTL_GET_SUPPORTED_FEATURES\n");
+			extend_ioctl_data.u.data =
+				(RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL |
+				RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT |
+				RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT);
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/*  Set MRU  */
+		case RMNET_IOCTL_SET_MRU:
+			mru = extend_ioctl_data.u.data;
+			IPAWANDBG("get MRU size %d\n",
+				extend_ioctl_data.u.data);
+			break;
+		/*  Get MRU  */
+		case RMNET_IOCTL_GET_MRU:
+			extend_ioctl_data.u.data = mru;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/* GET SG support */
+		case RMNET_IOCTL_GET_SG_SUPPORT:
+			extend_ioctl_data.u.data =
+				ipa3_rmnet_res.ipa_advertise_sg_support;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/*  Get endpoint ID  */
+		case RMNET_IOCTL_GET_EPID:
+			IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EPID\n");
+			extend_ioctl_data.u.data = epid;
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			if (copy_from_user(&extend_ioctl_data,
+				(u8 *)ifr->ifr_ifru.ifru_data,
+				sizeof(struct rmnet_ioctl_extended_s))) {
+				IPAWANERR("copy extended ioctl data failed\n");
+				rc = -EFAULT;
+			break;
+			}
+			IPAWANDBG("RMNET_IOCTL_GET_EPID return %d\n",
+					extend_ioctl_data.u.data);
+			break;
+		/*  Endpoint pair  */
+		case RMNET_IOCTL_GET_EP_PAIR:
+			IPAWANDBG("get ioctl: RMNET_IOCTL_GET_EP_PAIR\n");
+			extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num =
+			ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_WAN_PROD);
+			extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num =
+			ipa3_get_ep_mapping(IPA_CLIENT_APPS_WAN_CONS);
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+				&extend_ioctl_data,
+				sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			if (copy_from_user(&extend_ioctl_data,
+				(u8 *)ifr->ifr_ifru.ifru_data,
+				sizeof(struct rmnet_ioctl_extended_s))) {
+				IPAWANERR("copy extended ioctl data failed\n");
+				rc = -EFAULT;
+			break;
+		}
+			IPAWANDBG("RMNET_IOCTL_GET_EP_PAIR c: %d p: %d\n",
+			extend_ioctl_data.u.ipa_ep_pair.consumer_pipe_num,
+			extend_ioctl_data.u.ipa_ep_pair.producer_pipe_num);
+			break;
+		/*  Get driver name  */
+		case RMNET_IOCTL_GET_DRIVER_NAME:
+			memcpy(&extend_ioctl_data.u.if_name,
+				IPA_NETDEV()->name,
+							sizeof(IFNAMSIZ));
+			if (copy_to_user((u8 *)ifr->ifr_ifru.ifru_data,
+					&extend_ioctl_data,
+					sizeof(struct rmnet_ioctl_extended_s)))
+				rc = -EFAULT;
+			break;
+		/*  Add MUX ID  */
+		case RMNET_IOCTL_ADD_MUX_CHANNEL:
+			mux_index = ipa3_find_mux_channel_index(
+				extend_ioctl_data.u.rmnet_mux_val.mux_id);
+			if (mux_index < MAX_NUM_OF_MUX_CHANNEL) {
+				IPAWANDBG("already setup mux(%d)\n",
+					extend_ioctl_data.u.
+					rmnet_mux_val.mux_id);
+				return rc;
+			}
+			if (rmnet_ipa3_ctx->rmnet_index
+				>= MAX_NUM_OF_MUX_CHANNEL) {
+				IPAWANERR("Exceed mux_channel limit(%d)\n",
+				rmnet_ipa3_ctx->rmnet_index);
+				return -EFAULT;
+			}
+			IPAWANDBG("ADD_MUX_CHANNEL(%d, name: %s)\n",
+			extend_ioctl_data.u.rmnet_mux_val.mux_id,
+			extend_ioctl_data.u.rmnet_mux_val.vchannel_name);
+			/* cache the mux name and id */
+			mux_channel = rmnet_ipa3_ctx->mux_channel;
+			rmnet_index = rmnet_ipa3_ctx->rmnet_index;
+
+			mux_channel[rmnet_index].mux_id =
+				extend_ioctl_data.u.rmnet_mux_val.mux_id;
+			memcpy(mux_channel[rmnet_index].vchannel_name,
+				extend_ioctl_data.u.rmnet_mux_val.vchannel_name,
+				sizeof(mux_channel[rmnet_index]
+					.vchannel_name));
+			IPAWANDBG("cashe device[%s:%d] in IPA_wan[%d]\n",
+				mux_channel[rmnet_index].vchannel_name,
+				mux_channel[rmnet_index].mux_id,
+				rmnet_index);
+			/* check if UL filter rules coming*/
+			if (rmnet_ipa3_ctx->num_q6_rules != 0) {
+				IPAWANERR("dev(%s) register to IPA\n",
+					extend_ioctl_data.u.rmnet_mux_val.
+					vchannel_name);
+				rc = ipa3_wwan_register_to_ipa(
+						rmnet_ipa3_ctx->rmnet_index);
+				if (rc < 0) {
+					IPAWANERR("device %s reg IPA failed\n",
+						extend_ioctl_data.u.
+						rmnet_mux_val.vchannel_name);
+					return -ENODEV;
+				}
+				mux_channel[rmnet_index].mux_channel_set = true;
+				mux_channel[rmnet_index].ul_flt_reg = true;
+			} else {
+				IPAWANDBG("dev(%s) haven't registered to IPA\n",
+					extend_ioctl_data.u.
+					rmnet_mux_val.vchannel_name);
+				mux_channel[rmnet_index].mux_channel_set = true;
+				mux_channel[rmnet_index].ul_flt_reg = false;
+			}
+			rmnet_ipa3_ctx->rmnet_index++;
+			break;
+		case RMNET_IOCTL_SET_EGRESS_DATA_FORMAT:
+			IPAWANDBG("get RMNET_IOCTL_SET_EGRESS_DATA_FORMAT\n");
+			if ((extend_ioctl_data.u.data) &
+					RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM) {
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.hdr.hdr_len = 8;
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.cfg.cs_offload_en =
+					IPA_ENABLE_CS_OFFLOAD_UL;
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.cfg.cs_metadata_hdr_offset
+						= 1;
+			} else {
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.hdr.hdr_len = 4;
+			}
+			if ((extend_ioctl_data.u.data) &
+					RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION)
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.aggr.aggr_en =
+						IPA_ENABLE_AGGR;
+			else
+				rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.
+					ipa_ep_cfg.aggr.aggr_en =
+						IPA_BYPASS_AGGR;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+				hdr_ofst_metadata_valid = 1;
+			/* modem want offset at 0! */
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.hdr.
+				hdr_ofst_metadata = 0;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
+				dst = IPA_CLIENT_APPS_LAN_WAN_PROD;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.ipa_ep_cfg.mode.
+				mode = IPA_BASIC;
+
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.client =
+				IPA_CLIENT_APPS_LAN_WAN_PROD;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.notify =
+				apps_ipa_tx_complete_notify;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.desc_fifo_sz =
+			IPA_SYS_TX_DATA_DESC_FIFO_SZ;
+			rmnet_ipa3_ctx->apps_to_ipa_ep_cfg.priv = dev;
+
+			rc = ipa3_setup_sys_pipe(
+				&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg,
+				&rmnet_ipa3_ctx->apps_to_ipa3_hdl);
+			if (rc)
+				IPAWANERR("failed to config egress endpoint\n");
+
+			if (rmnet_ipa3_ctx->num_q6_rules != 0) {
+				/* already got Q6 UL filter rules*/
+				if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt
+					== false)
+					rc = ipa3_wwan_add_ul_flt_rule_to_ipa();
+				else
+					rc = 0;
+				rmnet_ipa3_ctx->egress_set = true;
+				if (rc)
+					IPAWANERR("install UL rules failed\n");
+				else
+					rmnet_ipa3_ctx->a7_ul_flt_set = true;
+			} else {
+				/* wait Q6 UL filter rules*/
+				rmnet_ipa3_ctx->egress_set = true;
+				IPAWANDBG("no UL-rules, egress_set(%d)\n",
+					rmnet_ipa3_ctx->egress_set);
+			}
+			break;
+		case RMNET_IOCTL_SET_INGRESS_DATA_FORMAT:/*  Set IDF  */
+			rc = handle3_ingress_format(dev, &extend_ioctl_data);
+			break;
+		case RMNET_IOCTL_SET_XLAT_DEV_INFO:
+			wan_msg = kzalloc(sizeof(struct ipa_wan_msg),
+						GFP_KERNEL);
+			if (!wan_msg) {
+				IPAWANERR("Failed to allocate memory.\n");
+				return -ENOMEM;
+			}
+			len = sizeof(wan_msg->upstream_ifname) >
+			sizeof(extend_ioctl_data.u.if_name) ?
+				sizeof(extend_ioctl_data.u.if_name) :
+				sizeof(wan_msg->upstream_ifname);
+			strlcpy(wan_msg->upstream_ifname,
+				extend_ioctl_data.u.if_name, len);
+			memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+			msg_meta.msg_type = WAN_XLAT_CONNECT;
+			msg_meta.msg_len = sizeof(struct ipa_wan_msg);
+			rc = ipa3_send_msg(&msg_meta, wan_msg,
+						ipa3_wwan_msg_free_cb);
+			if (rc) {
+				IPAWANERR("Failed to send XLAT_CONNECT msg\n");
+				kfree(wan_msg);
+			}
+			break;
+		/*  Get agg count  */
+		case RMNET_IOCTL_GET_AGGREGATION_COUNT:
+			break;
+		/*  Set agg count  */
+		case RMNET_IOCTL_SET_AGGREGATION_COUNT:
+			break;
+		/*  Get agg size  */
+		case RMNET_IOCTL_GET_AGGREGATION_SIZE:
+			break;
+		/*  Set agg size  */
+		case RMNET_IOCTL_SET_AGGREGATION_SIZE:
+			break;
+		/*  Do flow control  */
+		case RMNET_IOCTL_FLOW_CONTROL:
+			break;
+		/*  For legacy use  */
+		case RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL:
+			break;
+		/*  Get HW/SW map  */
+		case RMNET_IOCTL_GET_HWSW_MAP:
+			break;
+		/*  Set RX Headroom  */
+		case RMNET_IOCTL_SET_RX_HEADROOM:
+			break;
+		default:
+			IPAWANERR("[%s] unsupported extended cmd[%d]",
+				dev->name,
+				extend_ioctl_data.extended_ioctl);
+			rc = -EINVAL;
+		}
+		break;
+	default:
+			IPAWANERR("[%s] unsupported cmd[%d]",
+				dev->name, cmd);
+			rc = -EINVAL;
+	}
+	return rc;
+}
+
+static const struct net_device_ops ipa3_wwan_ops_ip = {
+	.ndo_open = ipa3_wwan_open,
+	.ndo_stop = ipa3_wwan_stop,
+	.ndo_start_xmit = ipa3_wwan_xmit,
+	.ndo_tx_timeout = ipa3_wwan_tx_timeout,
+	.ndo_do_ioctl = ipa3_wwan_ioctl,
+	.ndo_change_mtu = ipa3_wwan_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+/**
+ * wwan_setup() - Setups the wwan network driver.
+ *
+ * @dev: network device
+ *
+ * Return codes:
+ * None
+ */
+
+static void ipa3_wwan_setup(struct net_device *dev)
+{
+	dev->netdev_ops = &ipa3_wwan_ops_ip;
+	ether_setup(dev);
+	/* set this after calling ether_setup */
+	dev->header_ops = 0;  /* No header */
+	dev->type = ARPHRD_RAWIP;
+	dev->hard_header_len = 0;
+	dev->mtu = WWAN_DATA_LEN;
+	dev->addr_len = 0;
+	dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+	dev->needed_headroom = HEADROOM_FOR_QMAP;
+	dev->needed_tailroom = TAILROOM;
+	dev->watchdog_timeo = 1000;
+}
+
+/* IPA_RM related functions start*/
+static void ipa3_q6_prod_rm_request_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_request,
+		ipa3_q6_prod_rm_request_resource);
+static void ipa3_q6_prod_rm_release_resource(struct work_struct *work);
+static DECLARE_DELAYED_WORK(ipa3_q6_con_rm_release,
+		ipa3_q6_prod_rm_release_resource);
+
+static void ipa3_q6_prod_rm_request_resource(struct work_struct *work)
+{
+	int ret = 0;
+
+	ret = ipa_rm_request_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (ret < 0 && ret != -EINPROGRESS) {
+		IPAWANERR("%s: ipa_rm_request_resource failed %d\n", __func__,
+		       ret);
+		return;
+	}
+}
+
+static int ipa3_q6_rm_request_resource(void)
+{
+	queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
+	   &ipa3_q6_con_rm_request, 0);
+	return 0;
+}
+
+static void ipa3_q6_prod_rm_release_resource(struct work_struct *work)
+{
+	int ret = 0;
+
+	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (ret < 0 && ret != -EINPROGRESS) {
+		IPAWANERR("%s: ipa_rm_release_resource failed %d\n", __func__,
+		      ret);
+		return;
+	}
+}
+
+
+static int ipa3_q6_rm_release_resource(void)
+{
+	queue_delayed_work(rmnet_ipa3_ctx->rm_q6_wq,
+	   &ipa3_q6_con_rm_release, 0);
+	return 0;
+}
+
+
+static void ipa3_q6_rm_notify_cb(void *user_data,
+		enum ipa_rm_event event,
+		unsigned long data)
+{
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		IPAWANDBG_LOW("%s: Q6_PROD GRANTED CB\n", __func__);
+		break;
+	case IPA_RM_RESOURCE_RELEASED:
+		IPAWANDBG_LOW("%s: Q6_PROD RELEASED CB\n", __func__);
+		break;
+	default:
+		return;
+	}
+}
+static int ipa3_q6_initialize_rm(void)
+{
+	struct ipa_rm_create_params create_params;
+	struct ipa_rm_perf_profile profile;
+	int result;
+
+	/* Initialize IPA_RM workqueue */
+	rmnet_ipa3_ctx->rm_q6_wq = create_singlethread_workqueue("clnt_req");
+	if (!rmnet_ipa3_ctx->rm_q6_wq)
+		return -ENOMEM;
+
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_Q6_PROD;
+	create_params.reg_params.notify_cb = &ipa3_q6_rm_notify_cb;
+	result = ipa_rm_create_resource(&create_params);
+	if (result)
+		goto create_rsrc_err1;
+	memset(&create_params, 0, sizeof(create_params));
+	create_params.name = IPA_RM_RESOURCE_Q6_CONS;
+	create_params.release_resource = &ipa3_q6_rm_release_resource;
+	create_params.request_resource = &ipa3_q6_rm_request_resource;
+	result = ipa_rm_create_resource(&create_params);
+	if (result)
+		goto create_rsrc_err2;
+	/* add dependency*/
+	result = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+	if (result)
+		goto add_dpnd_err;
+	/* setup Performance profile */
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = 100;
+	result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_PROD,
+			&profile);
+	if (result)
+		goto set_perf_err;
+	result = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_Q6_CONS,
+			&profile);
+	if (result)
+		goto set_perf_err;
+	return result;
+
+set_perf_err:
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+add_dpnd_err:
+	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+	if (result < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_CONS, result);
+create_rsrc_err2:
+	result = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (result < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_PROD, result);
+create_rsrc_err1:
+	destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+	return result;
+}
+
+void ipa3_q6_deinitialize_rm(void)
+{
+	int ret;
+
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+			IPA_RM_RESOURCE_APPS_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_PROD, IPA_RM_RESOURCE_APPS_CONS,
+			ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_CONS, ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_Q6_PROD);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+			IPA_RM_RESOURCE_Q6_PROD, ret);
+	destroy_workqueue(rmnet_ipa3_ctx->rm_q6_wq);
+}
+
+static void ipa3_wake_tx_queue(struct work_struct *work)
+{
+	if (IPA_NETDEV()) {
+		__netif_tx_lock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
+		netif_wake_queue(IPA_NETDEV());
+		__netif_tx_unlock_bh(netdev_get_tx_queue(IPA_NETDEV(), 0));
+	}
+}
+
+/**
+ * ipa3_rm_resource_granted() - Called upon
+ * IPA_RM_RESOURCE_GRANTED event. Wakes up queue is was stopped.
+ *
+ * @work: work object supplied ny workqueue
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_rm_resource_granted(void *dev)
+{
+	IPAWANDBG_LOW("Resource Granted - starting queue\n");
+	schedule_work(&ipa3_tx_wakequeue_work);
+}
+
+/**
+ * ipa3_rm_notify() - Callback function for RM events. Handles
+ * IPA_RM_RESOURCE_GRANTED and IPA_RM_RESOURCE_RELEASED events.
+ * IPA_RM_RESOURCE_GRANTED is handled in the context of shared
+ * workqueue.
+ *
+ * @dev: network device
+ * @event: IPA RM event
+ * @data: Additional data provided by IPA RM
+ *
+ * Return codes:
+ * None
+ */
+static void ipa3_rm_notify(void *dev, enum ipa_rm_event event,
+			  unsigned long data)
+{
+	struct ipa3_wwan_private *wwan_ptr = netdev_priv(dev);
+
+	pr_debug("%s: event %d\n", __func__, event);
+	switch (event) {
+	case IPA_RM_RESOURCE_GRANTED:
+		if (wwan_ptr->device_status == WWAN_DEVICE_INACTIVE) {
+			complete_all(&wwan_ptr->resource_granted_completion);
+			break;
+		}
+		ipa3_rm_resource_granted(dev);
+		break;
+	case IPA_RM_RESOURCE_RELEASED:
+		break;
+	default:
+		pr_err("%s: unknown event %d\n", __func__, event);
+		break;
+	}
+}
+
+/* IPA_RM related functions end*/
+
+static int ipa3_ssr_notifier_cb(struct notifier_block *this,
+			   unsigned long code,
+			   void *data);
+
+static struct notifier_block ipa3_ssr_notifier = {
+	.notifier_call = ipa3_ssr_notifier_cb,
+};
+
+static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
+		struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
+{
+	ipa_rmnet_drv_res->ipa_rmnet_ssr =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,rmnet-ipa-ssr");
+	pr_info("IPA SSR support = %s\n",
+		ipa_rmnet_drv_res->ipa_rmnet_ssr ? "True" : "False");
+	ipa_rmnet_drv_res->ipa_loaduC =
+			of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-loaduC");
+	pr_info("IPA ipa-loaduC = %s\n",
+		ipa_rmnet_drv_res->ipa_loaduC ? "True" : "False");
+
+	ipa_rmnet_drv_res->ipa_advertise_sg_support =
+		of_property_read_bool(pdev->dev.of_node,
+		"qcom,ipa-advertise-sg-support");
+	pr_info("IPA SG support = %s\n",
+		ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
+
+	ipa_rmnet_drv_res->ipa_napi_enable =
+		of_property_read_bool(pdev->dev.of_node,
+			"qcom,ipa-napi-enable");
+	pr_info("IPA Napi Enable = %s\n",
+		ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+	return 0;
+}
+
+struct ipa3_rmnet_context ipa3_rmnet_ctx;
+static int ipa3_wwan_probe(struct platform_device *pdev);
+struct platform_device *m_pdev;
+
+static void ipa3_delayed_probe(struct work_struct *work)
+{
+	(void)ipa3_wwan_probe(m_pdev);
+}
+
+static DECLARE_WORK(ipa3_scheduled_probe, ipa3_delayed_probe);
+
+static void ipa3_ready_cb(void *user_data)
+{
+	struct platform_device *pdev = (struct platform_device *)(user_data);
+
+	m_pdev = pdev;
+
+	IPAWANDBG("IPA ready callback has been triggered!\n");
+
+	schedule_work(&ipa3_scheduled_probe);
+}
+
+/**
+ * ipa3_wwan_probe() - Initialized the module and registers as a
+ * network interface to the network stack
+ *
+ * Note: In case IPA driver hasn't initialized already, the probe function
+ * will return immediately after registering a callback to be invoked when
+ * IPA driver initialization is complete.
+ *
+ * Return codes:
+ * 0: success
+ * -ENOMEM: No memory available
+ * -EFAULT: Internal error
+ */
+static int ipa3_wwan_probe(struct platform_device *pdev)
+{
+	int ret, i;
+	struct net_device *dev;
+	struct ipa_rm_create_params ipa_rm_params;	/* IPA_RM */
+	struct ipa_rm_perf_profile profile;			/* IPA_RM */
+
+	pr_info("rmnet_ipa3 started initialization\n");
+
+	if (!ipa3_is_ready()) {
+		IPAWANDBG("IPA driver not ready, registering callback\n");
+		ret = ipa_register_ipa_ready_cb(ipa3_ready_cb, (void *)pdev);
+
+		/*
+		 * If we received -EEXIST, IPA has initialized. So we need
+		 * to continue the probing process.
+		 */
+		if (ret != -EEXIST) {
+			if (ret)
+				IPAWANERR("IPA CB reg failed - %d\n", ret);
+			return ret;
+		}
+	}
+
+	ret = get_ipa_rmnet_dts_configuration(pdev, &ipa3_rmnet_res);
+	ipa3_rmnet_ctx.ipa_rmnet_ssr = ipa3_rmnet_res.ipa_rmnet_ssr;
+
+	ret = ipa3_init_q6_smem();
+	if (ret) {
+		IPAWANERR("ipa3_init_q6_smem failed!\n");
+		return ret;
+	}
+
+	/* initialize tx/rx endpoint setup */
+	memset(&rmnet_ipa3_ctx->apps_to_ipa_ep_cfg, 0,
+		sizeof(struct ipa_sys_connect_params));
+	memset(&rmnet_ipa3_ctx->ipa_to_apps_ep_cfg, 0,
+		sizeof(struct ipa_sys_connect_params));
+
+	/* initialize ex property setup */
+	rmnet_ipa3_ctx->num_q6_rules = 0;
+	rmnet_ipa3_ctx->old_num_q6_rules = 0;
+	rmnet_ipa3_ctx->rmnet_index = 0;
+	rmnet_ipa3_ctx->egress_set = false;
+	rmnet_ipa3_ctx->a7_ul_flt_set = false;
+	for (i = 0; i < MAX_NUM_OF_MUX_CHANNEL; i++)
+		memset(&rmnet_ipa3_ctx->mux_channel[i], 0,
+				sizeof(struct ipa3_rmnet_mux_val));
+
+	/* start A7 QMI service/client */
+	if (ipa3_rmnet_res.ipa_loaduC)
+		/* Android platform loads uC */
+		ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01);
+	else
+		/* LE platform not loads uC */
+		ipa3_qmi_service_init(QMI_IPA_PLATFORM_TYPE_LE_V01);
+
+	/* construct default WAN RT tbl for IPACM */
+	ret = ipa3_setup_a7_qmap_hdr();
+	if (ret)
+		goto setup_a7_qmap_hdr_err;
+	ret = ipa3_setup_dflt_wan_rt_tables();
+	if (ret)
+		goto setup_dflt_wan_rt_tables_err;
+
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+		/* Start transport-driver fd ioctl for ipacm for first init */
+		ret = ipa3_wan_ioctl_init();
+		if (ret)
+			goto wan_ioctl_init_err;
+	} else {
+		/* Enable sending QMI messages after SSR */
+		ipa3_wan_ioctl_enable_qmi_messages();
+	}
+
+	/* initialize wan-driver netdev */
+	dev = alloc_netdev(sizeof(struct ipa3_wwan_private),
+			   IPA_WWAN_DEV_NAME,
+			   NET_NAME_UNKNOWN,
+			   ipa3_wwan_setup);
+	if (!dev) {
+		IPAWANERR("no memory for netdev\n");
+		ret = -ENOMEM;
+		goto alloc_netdev_err;
+	}
+	rmnet_ipa3_ctx->wwan_priv = netdev_priv(dev);
+	memset(rmnet_ipa3_ctx->wwan_priv, 0,
+		sizeof(*(rmnet_ipa3_ctx->wwan_priv)));
+	IPAWANDBG("wwan_ptr (private) = %p", rmnet_ipa3_ctx->wwan_priv);
+	rmnet_ipa3_ctx->wwan_priv->net = dev;
+	rmnet_ipa3_ctx->wwan_priv->outstanding_high = DEFAULT_OUTSTANDING_HIGH;
+	rmnet_ipa3_ctx->wwan_priv->outstanding_low = DEFAULT_OUTSTANDING_LOW;
+	atomic_set(&rmnet_ipa3_ctx->wwan_priv->outstanding_pkts, 0);
+	spin_lock_init(&rmnet_ipa3_ctx->wwan_priv->lock);
+	init_completion(
+		&rmnet_ipa3_ctx->wwan_priv->resource_granted_completion);
+
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+		/* IPA_RM configuration starts */
+		ret = ipa3_q6_initialize_rm();
+		if (ret) {
+			IPAWANERR("%s: ipa3_q6_initialize_rm failed, ret: %d\n",
+				__func__, ret);
+			goto q6_init_err;
+		}
+	}
+
+	memset(&ipa_rm_params, 0, sizeof(struct ipa_rm_create_params));
+	ipa_rm_params.name = IPA_RM_RESOURCE_WWAN_0_PROD;
+	ipa_rm_params.reg_params.user_data = dev;
+	ipa_rm_params.reg_params.notify_cb = ipa3_rm_notify;
+	ret = ipa_rm_create_resource(&ipa_rm_params);
+	if (ret) {
+		pr_err("%s: unable to create resourse %d in IPA RM\n",
+		       __func__, IPA_RM_RESOURCE_WWAN_0_PROD);
+		goto create_rsrc_err;
+	}
+	ret = ipa_rm_inactivity_timer_init(IPA_RM_RESOURCE_WWAN_0_PROD,
+					   IPA_RM_INACTIVITY_TIMER);
+	if (ret) {
+		pr_err("%s: ipa rm timer init failed %d on resourse %d\n",
+		       __func__, ret, IPA_RM_RESOURCE_WWAN_0_PROD);
+		goto timer_init_err;
+	}
+	/* add dependency */
+	ret = ipa_rm_add_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+			IPA_RM_RESOURCE_Q6_CONS);
+	if (ret)
+		goto add_dpnd_err;
+	/* setup Performance profile */
+	memset(&profile, 0, sizeof(profile));
+	profile.max_supported_bandwidth_mbps = IPA_APPS_MAX_BW_IN_MBPS;
+	ret = ipa_rm_set_perf_profile(IPA_RM_RESOURCE_WWAN_0_PROD,
+			&profile);
+	if (ret)
+		goto set_perf_err;
+	/* IPA_RM configuration ends */
+
+	/* Enable SG support in netdevice. */
+	if (ipa3_rmnet_res.ipa_advertise_sg_support)
+		dev->hw_features |= NETIF_F_SG;
+
+	if (ipa3_rmnet_res.ipa_napi_enable)
+		netif_napi_add(dev, &(rmnet_ipa3_ctx->wwan_priv->napi),
+		       ipa3_rmnet_poll, NAPI_WEIGHT);
+	ret = register_netdev(dev);
+	if (ret) {
+		IPAWANERR("unable to register ipa_netdev %d rc=%d\n",
+			0, ret);
+		goto set_perf_err;
+	}
+
+	IPAWANDBG("IPA-WWAN devices (%s) initialization ok :>>>>\n", dev->name);
+	if (ret) {
+		IPAWANERR("default configuration failed rc=%d\n",
+				ret);
+		goto config_err;
+	}
+	atomic_set(&rmnet_ipa3_ctx->is_initialized, 1);
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr)) {
+		/* offline charging mode */
+		ipa3_proxy_clk_unvote();
+	}
+	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+
+	pr_info("rmnet_ipa completed initialization\n");
+	return 0;
+config_err:
+	if (ipa3_rmnet_res.ipa_napi_enable)
+		netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
+	unregister_netdev(dev);
+set_perf_err:
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (ret)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+			IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+			ret);
+add_dpnd_err:
+	ret = ipa_rm_inactivity_timer_destroy(
+		IPA_RM_RESOURCE_WWAN_0_PROD); /* IPA_RM */
+	if (ret)
+		IPAWANERR("Error ipa_rm_inactivity_timer_destroy %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+timer_init_err:
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+create_rsrc_err:
+	ipa3_q6_deinitialize_rm();
+q6_init_err:
+	free_netdev(dev);
+	rmnet_ipa3_ctx->wwan_priv = NULL;
+alloc_netdev_err:
+	ipa3_wan_ioctl_deinit();
+wan_ioctl_init_err:
+	ipa3_del_dflt_wan_rt_tables();
+setup_dflt_wan_rt_tables_err:
+	ipa3_del_a7_qmap_hdr();
+setup_a7_qmap_hdr_err:
+	ipa3_qmi_service_exit();
+	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+	return ret;
+}
+
+static int ipa3_wwan_remove(struct platform_device *pdev)
+{
+	int ret;
+
+	pr_info("rmnet_ipa started deinitialization\n");
+	mutex_lock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+	ret = ipa3_teardown_sys_pipe(rmnet_ipa3_ctx->ipa3_to_apps_hdl);
+	if (ret < 0)
+		IPAWANERR("Failed to teardown IPA->APPS pipe\n");
+	else
+		rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
+	if (ipa3_rmnet_res.ipa_napi_enable)
+		netif_napi_del(&(rmnet_ipa3_ctx->wwan_priv->napi));
+	mutex_unlock(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+	unregister_netdev(IPA_NETDEV());
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_WWAN_0_PROD,
+		IPA_RM_RESOURCE_Q6_CONS);
+	if (ret < 0)
+		IPAWANERR("Error deleting dependency %d->%d, ret=%d\n",
+			IPA_RM_RESOURCE_WWAN_0_PROD, IPA_RM_RESOURCE_Q6_CONS,
+			ret);
+	ret = ipa_rm_inactivity_timer_destroy(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret < 0)
+		IPAWANERR(
+		"Error ipa_rm_inactivity_timer_destroy resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+	ret = ipa_rm_delete_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	if (ret < 0)
+		IPAWANERR("Error deleting resource %d, ret=%d\n",
+		IPA_RM_RESOURCE_WWAN_0_PROD, ret);
+	cancel_work_sync(&ipa3_tx_wakequeue_work);
+	cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+	if (IPA_NETDEV())
+		free_netdev(IPA_NETDEV());
+	rmnet_ipa3_ctx->wwan_priv = NULL;
+	/* No need to remove wwan_ioctl during SSR */
+	if (!atomic_read(&rmnet_ipa3_ctx->is_ssr))
+		ipa3_wan_ioctl_deinit();
+	ipa3_del_dflt_wan_rt_tables();
+	ipa3_del_a7_qmap_hdr();
+	ipa3_del_mux_qmap_hdrs();
+	if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false)
+		ipa3_wwan_del_ul_flt_rule_to_ipa();
+	ipa3_cleanup_deregister_intf();
+	atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
+	pr_info("rmnet_ipa completed deinitialization\n");
+	return 0;
+}
+
+/**
+* rmnet_ipa_ap_suspend() - suspend callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP suspend
+* operation is invoked, usually by pressing a suspend button.
+*
+* Returns -EAGAIN to runtime_pm framework in case there are pending packets
+* in the Tx queue. This will postpone the suspend operation until all the
+* pending packets will be transmitted.
+*
+* In case there are no packets to send, releases the WWAN0_PROD entity.
+* As an outcome, the number of IPA active clients should be decremented
+* until IPA clocks can be gated.
+*/
+static int rmnet_ipa_ap_suspend(struct device *dev)
+{
+	struct net_device *netdev = IPA_NETDEV();
+	struct ipa3_wwan_private *wwan_ptr;
+
+	IPAWANDBG_LOW("Enter...\n");
+	if (netdev == NULL) {
+		IPAWANERR("netdev is NULL.\n");
+		return 0;
+	}
+
+	wwan_ptr = netdev_priv(netdev);
+	if (wwan_ptr == NULL) {
+		IPAWANERR("wwan_ptr is NULL.\n");
+		return 0;
+	}
+
+	/* Do not allow A7 to suspend in case there are oustanding packets */
+	if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
+		IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
+		return -EAGAIN;
+	}
+
+	/* Make sure that there is no Tx operation ongoing */
+	netif_tx_lock_bh(netdev);
+	ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
+	netif_tx_unlock_bh(netdev);
+	IPAWANDBG_LOW("Exit\n");
+
+	return 0;
+}
+
+/**
+* rmnet_ipa_ap_resume() - resume callback for runtime_pm
+* @dev: pointer to device
+*
+* This callback will be invoked by the runtime_pm framework when an AP resume
+* operation is invoked.
+*
+* Enables the network interface queue and returns success to the
+* runtime_pm framework.
+*/
+static int rmnet_ipa_ap_resume(struct device *dev)
+{
+	struct net_device *netdev = IPA_NETDEV();
+
+	IPAWANDBG_LOW("Enter...\n");
+	if (netdev)
+		netif_wake_queue(netdev);
+	IPAWANDBG_LOW("Exit\n");
+
+	return 0;
+}
+
+static void ipa_stop_polling_stats(void)
+{
+	cancel_delayed_work(&ipa_tether_stats_poll_wakequeue_work);
+	ipa3_rmnet_ctx.polling_interval = 0;
+}
+
+static const struct of_device_id rmnet_ipa_dt_match[] = {
+	{.compatible = "qcom,rmnet-ipa3"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, rmnet_ipa_dt_match);
+
+static const struct dev_pm_ops rmnet_ipa_pm_ops = {
+	.suspend_noirq = rmnet_ipa_ap_suspend,
+	.resume_noirq = rmnet_ipa_ap_resume,
+};
+
+static struct platform_driver rmnet_ipa_driver = {
+	.driver = {
+		.name = "rmnet_ipa3",
+		.owner = THIS_MODULE,
+		.pm = &rmnet_ipa_pm_ops,
+		.of_match_table = rmnet_ipa_dt_match,
+	},
+	.probe = ipa3_wwan_probe,
+	.remove = ipa3_wwan_remove,
+};
+
+static int ipa3_ssr_notifier_cb(struct notifier_block *this,
+			   unsigned long code,
+			   void *data)
+{
+	if (!ipa3_rmnet_ctx.ipa_rmnet_ssr)
+		return NOTIFY_DONE;
+
+	switch (code) {
+	case SUBSYS_BEFORE_SHUTDOWN:
+		IPAWANINFO("IPA received MPSS BEFORE_SHUTDOWN\n");
+		atomic_set(&rmnet_ipa3_ctx->is_ssr, 1);
+		ipa3_q6_pre_shutdown_cleanup();
+		if (IPA_NETDEV())
+			netif_stop_queue(IPA_NETDEV());
+		ipa3_qmi_stop_workqueues();
+		ipa3_wan_ioctl_stop_qmi_messages();
+		ipa_stop_polling_stats();
+		if (atomic_read(&rmnet_ipa3_ctx->is_initialized))
+			platform_driver_unregister(&rmnet_ipa_driver);
+		IPAWANINFO("IPA BEFORE_SHUTDOWN handling is complete\n");
+		break;
+	case SUBSYS_AFTER_SHUTDOWN:
+		IPAWANINFO("IPA Received MPSS AFTER_SHUTDOWN\n");
+		if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
+			ipa3_q6_post_shutdown_cleanup();
+		IPAWANINFO("IPA AFTER_SHUTDOWN handling is complete\n");
+		break;
+	case SUBSYS_BEFORE_POWERUP:
+		IPAWANINFO("IPA received MPSS BEFORE_POWERUP\n");
+		if (atomic_read(&rmnet_ipa3_ctx->is_ssr))
+			/* clean up cached QMI msg/handlers */
+			ipa3_qmi_service_exit();
+		/*hold a proxy vote for the modem*/
+		ipa3_proxy_clk_vote();
+		IPAWANINFO("IPA BEFORE_POWERUP handling is complete\n");
+		break;
+	case SUBSYS_AFTER_POWERUP:
+		IPAWANINFO("%s:%d IPA received MPSS AFTER_POWERUP\n",
+			__func__, __LINE__);
+		if (!atomic_read(&rmnet_ipa3_ctx->is_initialized) &&
+		       atomic_read(&rmnet_ipa3_ctx->is_ssr))
+			platform_driver_register(&rmnet_ipa_driver);
+
+		IPAWANINFO("IPA AFTER_POWERUP handling is complete\n");
+		break;
+	default:
+		IPAWANDBG("Unsupported subsys notification, IPA received: %lu",
+			code);
+		break;
+	}
+
+	IPAWANDBG_LOW("Exit\n");
+	return NOTIFY_DONE;
+}
+
+/**
+ * rmnet_ipa_free_msg() - Free the msg sent to user space via ipa_send_msg
+ * @buff: pointer to buffer containing the message
+ * @len: message len
+ * @type: message type
+ *
+ * This function is invoked when ipa_send_msg is complete (Provided as a
+ * free function pointer along with the message).
+ */
+static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
+{
+	if (!buff) {
+		IPAWANERR("Null buffer\n");
+		return;
+	}
+
+	if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
+		type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+		IPAWANERR("Wrong type given. buff %p type %d\n",
+			  buff, type);
+	}
+	kfree(buff);
+}
+
+/**
+ * rmnet_ipa_get_stats_and_update() - Gets pipe stats from Modem
+ *
+ * This function queries the IPA Modem driver for the pipe stats
+ * via QMI, and updates the user space IPA entity.
+ */
+static void rmnet_ipa_get_stats_and_update(void)
+{
+	struct ipa_get_data_stats_req_msg_v01 req;
+	struct ipa_get_data_stats_resp_msg_v01 *resp;
+	struct ipa_msg_meta msg_meta;
+	int rc;
+
+	resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+		       GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		return;
+	}
+
+	memset(&req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+	req.ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+
+	rc = ipa3_qmi_get_data_stats(&req, resp);
+	if (rc) {
+		IPAWANERR("ipa3_qmi_get_data_stats failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_STATS;
+	msg_meta.msg_len = sizeof(struct ipa_get_data_stats_resp_msg_v01);
+	rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa_send_msg failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
+}
+
+/**
+ * tethering_stats_poll_queue() - Stats polling function
+ * @work - Work entry
+ *
+ * This function is scheduled periodically (per the interval) in
+ * order to poll the IPA Modem driver for the pipe stats.
+ */
+static void tethering_stats_poll_queue(struct work_struct *work)
+{
+	rmnet_ipa_get_stats_and_update();
+
+	/* Schedule again only if there's an active polling interval */
+	if (ipa3_rmnet_ctx.polling_interval != 0)
+		schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work,
+			msecs_to_jiffies(ipa3_rmnet_ctx.polling_interval*1000));
+}
+
+/**
+ * rmnet_ipa_get_network_stats_and_update() - Get network stats from IPA Modem
+ *
+ * This function retrieves the data usage (used quota) from the IPA Modem driver
+ * via QMI, and updates IPA user space entity.
+ */
+static void rmnet_ipa_get_network_stats_and_update(void)
+{
+	struct ipa_get_apn_data_stats_req_msg_v01 req;
+	struct ipa_get_apn_data_stats_resp_msg_v01 *resp;
+	struct ipa_msg_meta msg_meta;
+	int rc;
+
+	resp = kzalloc(sizeof(struct ipa_get_apn_data_stats_resp_msg_v01),
+		       GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for network stats message\n");
+		return;
+	}
+
+	memset(&req, 0, sizeof(struct ipa_get_apn_data_stats_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_apn_data_stats_resp_msg_v01));
+
+	req.mux_id_list_valid = true;
+	req.mux_id_list_len = 1;
+	req.mux_id_list[0] = ipa3_rmnet_ctx.metered_mux_id;
+
+	rc = ipa3_qmi_get_network_stats(&req, resp);
+	if (rc) {
+		IPAWANERR("ipa3_qmi_get_network_stats failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
+
+	memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+	msg_meta.msg_type = IPA_TETHERING_STATS_UPDATE_NETWORK_STATS;
+	msg_meta.msg_len = sizeof(struct ipa_get_apn_data_stats_resp_msg_v01);
+	rc = ipa_send_msg(&msg_meta, resp, rmnet_ipa_free_msg);
+	if (rc) {
+		IPAWANERR("ipa_send_msg failed: %d\n", rc);
+		kfree(resp);
+		return;
+	}
+}
+
+/**
+ * rmnet_ipa3_poll_tethering_stats() - Tethering stats polling IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_POLL_TETHERING_STATS.
+ * In case polling interval received is 0, polling will stop
+ * (If there's a polling in progress, it will allow it to finish), and then will
+ * fetch network stats, and update the IPA user space.
+ *
+ * Return codes:
+ * 0: Success
+ */
+int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats *data)
+{
+	ipa3_rmnet_ctx.polling_interval = data->polling_interval_secs;
+
+	cancel_delayed_work_sync(&ipa_tether_stats_poll_wakequeue_work);
+
+	if (ipa3_rmnet_ctx.polling_interval == 0) {
+		ipa3_qmi_stop_data_qouta();
+		rmnet_ipa_get_network_stats_and_update();
+		rmnet_ipa_get_stats_and_update();
+		return 0;
+	}
+
+	schedule_delayed_work(&ipa_tether_stats_poll_wakequeue_work, 0);
+	return 0;
+}
+
+/**
+ * rmnet_ipa_set_data_quota() - Data quota setting IOCTL handler
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
+{
+	u32 mux_id;
+	int index;
+	struct ipa_set_data_usage_quota_req_msg_v01 req;
+
+	index = find_vchannel_name_index(data->interface_name);
+	IPAWANERR("iface name %s, quota %lu\n",
+		  data->interface_name,
+		  (unsigned long int) data->quota_mbytes);
+
+	if (index == MAX_NUM_OF_MUX_CHANNEL) {
+		IPAWANERR("%s is an invalid iface name\n",
+			  data->interface_name);
+		return -EFAULT;
+	}
+
+	mux_id = rmnet_ipa3_ctx->mux_channel[index].mux_id;
+	ipa3_rmnet_ctx.metered_mux_id = mux_id;
+
+	memset(&req, 0, sizeof(struct ipa_set_data_usage_quota_req_msg_v01));
+	req.apn_quota_list_valid = true;
+	req.apn_quota_list_len = 1;
+	req.apn_quota_list[0].mux_id = mux_id;
+	req.apn_quota_list[0].num_Mbytes = data->quota_mbytes;
+
+	return ipa3_qmi_set_data_quota(&req);
+}
+
+ /* rmnet_ipa_set_tether_client_pipe() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_DATA_QUOTA.
+ * It translates the given interface name to the Modem MUX ID and
+ * sends the request of the quota to the IPA Modem driver via QMI.
+ *
+ * Return codes:
+ * 0: Success
+ * -EFAULT: Invalid interface name provided
+ * other: See ipa_qmi_set_data_quota
+ */
+int rmnet_ipa3_set_tether_client_pipe(
+	struct wan_ioctl_set_tether_client_pipe *data)
+{
+	int number, i;
+
+	IPAWANDBG("client %d, UL %d, DL %d, reset %d\n",
+	data->ipa_client,
+	data->ul_src_pipe_len,
+	data->dl_dst_pipe_len,
+	data->reset_client);
+	number = data->ul_src_pipe_len;
+	for (i = 0; i < number; i++) {
+		IPAWANDBG("UL index-%d pipe %d\n", i,
+			data->ul_src_pipe_list[i]);
+		if (data->reset_client)
+			ipa3_set_client(data->ul_src_pipe_list[i],
+				0, false);
+		else
+			ipa3_set_client(data->ul_src_pipe_list[i],
+				data->ipa_client, true);
+	}
+	number = data->dl_dst_pipe_len;
+	for (i = 0; i < number; i++) {
+		IPAWANDBG("DL index-%d pipe %d\n", i,
+			data->dl_dst_pipe_list[i]);
+		if (data->reset_client)
+			ipa3_set_client(data->dl_dst_pipe_list[i],
+				0, false);
+		else
+			ipa3_set_client(data->dl_dst_pipe_list[i],
+				data->ipa_client, false);
+	}
+	return 0;
+}
+
+int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
+	bool reset)
+{
+	struct ipa_get_data_stats_req_msg_v01 *req;
+	struct ipa_get_data_stats_resp_msg_v01 *resp;
+	int pipe_len, rc;
+
+	req = kzalloc(sizeof(struct ipa_get_data_stats_req_msg_v01),
+			GFP_KERNEL);
+	if (!req) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		return -ENOMEM;
+	}
+	resp = kzalloc(sizeof(struct ipa_get_data_stats_resp_msg_v01),
+			GFP_KERNEL);
+	if (!resp) {
+		IPAWANERR("Can't allocate memory for stats message\n");
+		kfree(req);
+		return -ENOMEM;
+	}
+	memset(req, 0, sizeof(struct ipa_get_data_stats_req_msg_v01));
+	memset(resp, 0, sizeof(struct ipa_get_data_stats_resp_msg_v01));
+
+	req->ipa_stats_type = QMI_IPA_STATS_TYPE_PIPE_V01;
+	if (reset) {
+		req->reset_stats_valid = true;
+		req->reset_stats = true;
+		IPAWANERR("reset the pipe stats\n");
+	} else {
+		/* print tethered-client enum */
+		IPAWANDBG_LOW("Tethered-client enum(%d)\n", data->ipa_client);
+	}
+
+	rc = ipa3_qmi_get_data_stats(req, resp);
+	if (rc) {
+		IPAWANERR("can't get ipa_qmi_get_data_stats\n");
+		kfree(req);
+		kfree(resp);
+		return rc;
+	} else if (reset) {
+		kfree(req);
+		kfree(resp);
+		return 0;
+	}
+
+	if (resp->dl_dst_pipe_stats_list_valid) {
+		for (pipe_len = 0; pipe_len < resp->dl_dst_pipe_stats_list_len;
+			pipe_len++) {
+			IPAWANDBG_LOW("Check entry(%d) dl_dst_pipe(%d)\n",
+				pipe_len, resp->dl_dst_pipe_stats_list
+					[pipe_len].pipe_index);
+			IPAWANDBG_LOW("dl_p_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv4_packets,
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv6_packets);
+			IPAWANDBG_LOW("dl_b_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv4_bytes,
+				(unsigned long int) resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				num_ipv6_bytes);
+			if (ipa_get_client_uplink(resp->
+				dl_dst_pipe_stats_list[pipe_len].
+				pipe_index) == false) {
+				if (data->ipa_client == ipa_get_client(resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					pipe_index)) {
+					/* update the DL stats */
+					data->ipv4_rx_packets += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv4_packets;
+					data->ipv6_rx_packets += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv6_packets;
+					data->ipv4_rx_bytes += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv4_bytes;
+					data->ipv6_rx_bytes += resp->
+					dl_dst_pipe_stats_list[pipe_len].
+					num_ipv6_bytes;
+				}
+			}
+		}
+	}
+	IPAWANDBG_LOW("v4_rx_p(%lu) v6_rx_p(%lu) v4_rx_b(%lu) v6_rx_b(%lu)\n",
+		(unsigned long int) data->ipv4_rx_packets,
+		(unsigned long int) data->ipv6_rx_packets,
+		(unsigned long int) data->ipv4_rx_bytes,
+		(unsigned long int) data->ipv6_rx_bytes);
+
+	if (resp->ul_src_pipe_stats_list_valid) {
+		for (pipe_len = 0; pipe_len < resp->ul_src_pipe_stats_list_len;
+			pipe_len++) {
+			IPAWANDBG_LOW("Check entry(%d) ul_dst_pipe(%d)\n",
+				pipe_len,
+				resp->ul_src_pipe_stats_list[pipe_len].
+				pipe_index);
+			IPAWANDBG_LOW("ul_p_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv4_packets,
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv6_packets);
+			IPAWANDBG_LOW("ul_b_v4(%lu)v6(%lu)\n",
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv4_bytes,
+				(unsigned long int) resp->
+				ul_src_pipe_stats_list[pipe_len].
+				num_ipv6_bytes);
+			if (ipa_get_client_uplink(resp->
+				ul_src_pipe_stats_list[pipe_len].
+				pipe_index) == true) {
+				if (data->ipa_client == ipa_get_client(resp->
+				ul_src_pipe_stats_list[pipe_len].
+				pipe_index)) {
+					/* update the DL stats */
+					data->ipv4_tx_packets += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv4_packets;
+					data->ipv6_tx_packets += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv6_packets;
+					data->ipv4_tx_bytes += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv4_bytes;
+					data->ipv6_tx_bytes += resp->
+					ul_src_pipe_stats_list[pipe_len].
+					num_ipv6_bytes;
+				}
+			}
+		}
+	}
+	IPAWANDBG_LOW("tx_p_v4(%lu)v6(%lu)tx_b_v4(%lu) v6(%lu)\n",
+		(unsigned long int) data->ipv4_tx_packets,
+		(unsigned long  int) data->ipv6_tx_packets,
+		(unsigned long int) data->ipv4_tx_bytes,
+		(unsigned long int) data->ipv6_tx_bytes);
+	kfree(req);
+	kfree(resp);
+	return 0;
+}
+
+/**
+ * ipa3_broadcast_quota_reach_ind() - Send Netlink broadcast on Quota
+ * @mux_id - The MUX ID on which the quota has been reached
+ *
+ * This function broadcasts a Netlink event using the kobject of the
+ * rmnet_ipa interface in order to alert the user space that the quota
+ * on the specific interface which matches the mux_id has been reached.
+ *
+ */
+void ipa3_broadcast_quota_reach_ind(u32 mux_id)
+{
+	char alert_msg[IPA_QUOTA_REACH_ALERT_MAX_SIZE];
+	char iface_name_m[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+	char iface_name_l[IPA_QUOTA_REACH_IF_NAME_MAX_SIZE];
+	char *envp[IPA_UEVENT_NUM_EVNP] = {
+		alert_msg, iface_name_l, iface_name_m, NULL };
+	int res;
+	int index;
+
+	index = ipa3_find_mux_channel_index(mux_id);
+
+	if (index == MAX_NUM_OF_MUX_CHANNEL) {
+		IPAWANERR("%u is an mux ID\n", mux_id);
+		return;
+	}
+
+	res = snprintf(alert_msg, IPA_QUOTA_REACH_ALERT_MAX_SIZE,
+			"ALERT_NAME=%s", "quotaReachedAlert");
+	if (res >= IPA_QUOTA_REACH_ALERT_MAX_SIZE) {
+		IPAWANERR("message too long (%d)", res);
+		return;
+	}
+	/* posting msg for L-release for CNE */
+	res = snprintf(iface_name_l, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+	    "UPSTREAM=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+		IPAWANERR("message too long (%d)", res);
+		return;
+	}
+	/* posting msg for M-release for CNE */
+	res = snprintf(iface_name_m, IPA_QUOTA_REACH_IF_NAME_MAX_SIZE,
+	    "INTERFACE=%s", rmnet_ipa3_ctx->mux_channel[index].vchannel_name);
+	if (res >= IPA_QUOTA_REACH_IF_NAME_MAX_SIZE) {
+		IPAWANERR("message too long (%d)", res);
+		return;
+	}
+
+	IPAWANERR("putting nlmsg: <%s> <%s> <%s>\n",
+		alert_msg, iface_name_l, iface_name_m);
+	kobject_uevent_env(&(IPA_NETDEV()->dev.kobj),
+		KOBJ_CHANGE, envp);
+}
+
+/**
+ * ipa3_q6_handshake_complete() - Perform operations once Q6 is up
+ * @ssr_bootup - Indicates whether this is a cold boot-up or post-SSR.
+ *
+ * This function is invoked once the handshake between the IPA AP driver
+ * and IPA Q6 driver is complete. At this point, it is possible to perform
+ * operations which can't be performed until IPA Q6 driver is up.
+ *
+ */
+void ipa3_q6_handshake_complete(bool ssr_bootup)
+{
+	/* It is required to recover the network stats after SSR recovery */
+	if (ssr_bootup) {
+		/*
+		 * In case the uC is required to be loaded by the Modem,
+		 * the proxy vote will be removed only when uC loading is
+		 * complete and indication is received by the AP. After SSR,
+		 * uC is already loaded. Therefore, proxy vote can be removed
+		 * once Modem init is complete.
+		 */
+		ipa3_proxy_clk_unvote();
+
+		/*
+		 * It is required to recover the network stats after
+		 * SSR recovery
+		 */
+		rmnet_ipa_get_network_stats_and_update();
+	}
+}
+
+static int __init ipa3_wwan_init(void)
+{
+	rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
+	if (!rmnet_ipa3_ctx) {
+		IPAWANERR("no memory\n");
+		return -ENOMEM;
+	}
+
+	atomic_set(&rmnet_ipa3_ctx->is_initialized, 0);
+	atomic_set(&rmnet_ipa3_ctx->is_ssr, 0);
+
+	mutex_init(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+	rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
+	/* Register for Modem SSR */
+	rmnet_ipa3_ctx->subsys_notify_handle = subsys_notif_register_notifier(
+			SUBSYS_MODEM,
+			&ipa3_ssr_notifier);
+	if (!IS_ERR(rmnet_ipa3_ctx->subsys_notify_handle))
+		return platform_driver_register(&rmnet_ipa_driver);
+	else
+		return (int)PTR_ERR(rmnet_ipa3_ctx->subsys_notify_handle);
+}
+
+static void __exit ipa3_wwan_cleanup(void)
+{
+	int ret;
+
+	mutex_destroy(&rmnet_ipa3_ctx->ipa_to_apps_pipe_handle_guard);
+	ret = subsys_notif_unregister_notifier(
+		rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
+	if (ret)
+		IPAWANERR(
+		"Error subsys_notif_unregister_notifier system %s, ret=%d\n",
+		SUBSYS_MODEM, ret);
+	platform_driver_unregister(&rmnet_ipa_driver);
+	kfree(rmnet_ipa3_ctx);
+	rmnet_ipa3_ctx = NULL;
+}
+
+static void ipa3_wwan_msg_free_cb(void *buff, u32 len, u32 type)
+{
+	if (!buff)
+		IPAWANERR("Null buffer.\n");
+	kfree(buff);
+}
+
+static void ipa3_rmnet_rx_cb(void *priv)
+{
+	IPAWANDBG_LOW("\n");
+	napi_schedule(&(rmnet_ipa3_ctx->wwan_priv->napi));
+}
+
+static int ipa3_rmnet_poll(struct napi_struct *napi, int budget)
+{
+	int rcvd_pkts = 0;
+
+	rcvd_pkts = ipa_rx_poll(rmnet_ipa3_ctx->ipa3_to_apps_hdl,
+					NAPI_WEIGHT);
+	IPAWANDBG_LOW("rcvd packets: %d\n", rcvd_pkts);
+	return rcvd_pkts;
+}
+
+late_initcall(ipa3_wwan_init);
+module_exit(ipa3_wwan_cleanup);
+MODULE_DESCRIPTION("WWAN Network Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
new file mode 100644
index 0000000..80b07ab
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -0,0 +1,391 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/rmnet_ipa_fd_ioctl.h>
+#include "ipa_qmi_service.h"
+
+#define DRIVER_NAME "wwan_ioctl"
+
+#ifdef CONFIG_COMPAT
+#define WAN_IOC_ADD_FLT_RULE32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_RULE, \
+		compat_uptr_t)
+#define WAN_IOC_ADD_FLT_RULE_INDEX32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_INDEX, \
+		compat_uptr_t)
+#define WAN_IOC_POLL_TETHERING_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_POLL_TETHERING_STATS, \
+		compat_uptr_t)
+#define WAN_IOC_SET_DATA_QUOTA32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_DATA_QUOTA, \
+		compat_uptr_t)
+#define WAN_IOC_SET_TETHER_CLIENT_PIPE32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \
+		compat_uptr_t)
+#define WAN_IOC_QUERY_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_TETHER_STATS, \
+		compat_uptr_t)
+#define WAN_IOC_RESET_TETHER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_RESET_TETHER_STATS, \
+		compat_uptr_t)
+#define WAN_IOC_QUERY_DL_FILTER_STATS32 _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_DL_FILTER_STATS, \
+		compat_uptr_t)
+#endif
+
+static unsigned int dev_num = 1;
+static struct cdev ipa3_wan_ioctl_cdev;
+static unsigned int ipa3_process_ioctl = 1;
+static struct class *class;
+static dev_t device;
+
+static long ipa3_wan_ioctl(struct file *filp,
+		unsigned int cmd,
+		unsigned long arg)
+{
+	int retval = 0;
+	u32 pyld_sz;
+	u8 *param = NULL;
+
+	IPAWANDBG("device %s got ioctl events :>>>\n",
+		DRIVER_NAME);
+
+	if (!ipa3_process_ioctl) {
+		IPAWANDBG("modem is in SSR, ignoring ioctl\n");
+		return -EAGAIN;
+	}
+
+	switch (cmd) {
+	case WAN_IOC_ADD_FLT_RULE:
+		IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE :>>>\n",
+		DRIVER_NAME);
+		pyld_sz = sizeof(struct ipa_install_fltr_rule_req_msg_v01);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_qmi_filter_request_send(
+			(struct ipa_install_fltr_rule_req_msg_v01 *)param)) {
+			IPAWANDBG("IPACM->Q6 add filter rule failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_ADD_FLT_RULE_INDEX:
+		IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
+		DRIVER_NAME);
+		pyld_sz = sizeof(struct ipa_fltr_installed_notif_req_msg_v01);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_qmi_filter_notify_send(
+		(struct ipa_fltr_installed_notif_req_msg_v01 *)param)) {
+			IPAWANDBG("IPACM->Q6 rule index fail\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_VOTE_FOR_BW_MBPS:
+		IPAWANDBG("device %s got WAN_IOC_VOTE_FOR_BW_MBPS :>>>\n",
+		DRIVER_NAME);
+		pyld_sz = sizeof(uint32_t);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (ipa3_vote_for_bus_bw((uint32_t *)param)) {
+			IPAWANERR("Failed to vote for bus BW\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_POLL_TETHERING_STATS:
+		IPAWANDBG_LOW("got WAN_IOCTL_POLL_TETHERING_STATS :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_poll_tethering_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_poll_tethering_stats(
+		(struct wan_ioctl_poll_tethering_stats *)param)) {
+			IPAWANERR("WAN_IOCTL_POLL_TETHERING_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_SET_DATA_QUOTA:
+		IPAWANDBG_LOW("got WAN_IOCTL_SET_DATA_QUOTA :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_set_data_quota);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_set_data_quota(
+		(struct wan_ioctl_set_data_quota *)param)) {
+			IPAWANERR("WAN_IOC_SET_DATA_QUOTA failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_SET_TETHER_CLIENT_PIPE:
+		IPAWANDBG_LOW("got WAN_IOC_SET_TETHER_CLIENT_PIPE :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_set_tether_client_pipe);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		if (rmnet_ipa3_set_tether_client_pipe(
+			(struct wan_ioctl_set_tether_client_pipe *)param)) {
+			IPAWANERR("WAN_IOC_SET_TETHER_CLIENT_PIPE failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_QUERY_TETHER_STATS:
+		IPAWANDBG_LOW("got WAN_IOC_QUERY_TETHER_STATS :>>>\n");
+		pyld_sz = sizeof(struct wan_ioctl_query_tether_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (rmnet_ipa3_query_tethering_stats(
+			(struct wan_ioctl_query_tether_stats *)param, false)) {
+			IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+
+		if (copy_to_user((u8 *)arg, param, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	case WAN_IOC_RESET_TETHER_STATS:
+		IPAWANDBG_LOW("device %s got WAN_IOC_RESET_TETHER_STATS :>>>\n",
+				DRIVER_NAME);
+		pyld_sz = sizeof(struct wan_ioctl_reset_tether_stats);
+		param = kzalloc(pyld_sz, GFP_KERNEL);
+		if (!param) {
+			retval = -ENOMEM;
+			break;
+		}
+		if (copy_from_user(param, (u8 *)arg, pyld_sz)) {
+			retval = -EFAULT;
+			break;
+		}
+
+		if (rmnet_ipa3_query_tethering_stats(NULL, true)) {
+			IPAWANERR("WAN_IOC_QUERY_TETHER_STATS failed\n");
+			retval = -EFAULT;
+			break;
+		}
+		break;
+
+	default:
+		retval = -ENOTTY;
+	}
+	kfree(param);
+	return retval;
+}
+
+#ifdef CONFIG_COMPAT
+long ipa3_compat_wan_ioctl(struct file *file,
+		unsigned int cmd,
+		unsigned long arg)
+{
+	switch (cmd) {
+	case WAN_IOC_ADD_FLT_RULE32:
+		cmd = WAN_IOC_ADD_FLT_RULE;
+		break;
+	case WAN_IOC_ADD_FLT_RULE_INDEX32:
+		cmd = WAN_IOC_ADD_FLT_RULE_INDEX;
+		break;
+	case WAN_IOC_POLL_TETHERING_STATS32:
+		cmd = WAN_IOC_POLL_TETHERING_STATS;
+		break;
+	case WAN_IOC_SET_DATA_QUOTA32:
+		cmd = WAN_IOC_SET_DATA_QUOTA;
+		break;
+	case WAN_IOC_SET_TETHER_CLIENT_PIPE32:
+		cmd = WAN_IOC_SET_TETHER_CLIENT_PIPE;
+		break;
+	case WAN_IOC_QUERY_TETHER_STATS32:
+		cmd = WAN_IOC_QUERY_TETHER_STATS;
+		break;
+	case WAN_IOC_RESET_TETHER_STATS32:
+		cmd = WAN_IOC_RESET_TETHER_STATS;
+		break;
+	case WAN_IOC_QUERY_DL_FILTER_STATS32:
+		cmd = WAN_IOC_QUERY_DL_FILTER_STATS;
+		break;
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return ipa3_wan_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static int ipa3_wan_ioctl_open(struct inode *inode, struct file *filp)
+{
+	IPAWANDBG("\n IPA A7 ipa3_wan_ioctl open OK :>>>> ");
+	return 0;
+}
+
+const struct file_operations rmnet_ipa3_fops = {
+	.owner = THIS_MODULE,
+	.open = ipa3_wan_ioctl_open,
+	.read = NULL,
+	.unlocked_ioctl = ipa3_wan_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = ipa3_compat_wan_ioctl,
+#endif
+};
+
+int ipa3_wan_ioctl_init(void)
+{
+	unsigned int wan_ioctl_major = 0;
+	int ret;
+	struct device *dev;
+
+	device = MKDEV(wan_ioctl_major, 0);
+
+	ret = alloc_chrdev_region(&device, 0, dev_num, DRIVER_NAME);
+	if (ret) {
+		IPAWANERR(":device_alloc err.\n");
+		goto dev_alloc_err;
+	}
+	wan_ioctl_major = MAJOR(device);
+
+	class = class_create(THIS_MODULE, DRIVER_NAME);
+	if (IS_ERR(class)) {
+		IPAWANERR(":class_create err.\n");
+		goto class_err;
+	}
+
+	dev = device_create(class, NULL, device,
+		NULL, DRIVER_NAME);
+	if (IS_ERR(dev)) {
+		IPAWANERR(":device_create err.\n");
+		goto device_err;
+	}
+
+	cdev_init(&ipa3_wan_ioctl_cdev, &rmnet_ipa3_fops);
+	ret = cdev_add(&ipa3_wan_ioctl_cdev, device, dev_num);
+	if (ret) {
+		IPAWANERR(":cdev_add err.\n");
+		goto cdev_add_err;
+	}
+
+	ipa3_process_ioctl = 1;
+
+	IPAWANDBG("IPA %s major(%d) initial ok :>>>>\n",
+	DRIVER_NAME, wan_ioctl_major);
+	return 0;
+
+cdev_add_err:
+	device_destroy(class, device);
+device_err:
+	class_destroy(class);
+class_err:
+	unregister_chrdev_region(device, dev_num);
+dev_alloc_err:
+	return -ENODEV;
+}
+
+void ipa3_wan_ioctl_stop_qmi_messages(void)
+{
+	ipa3_process_ioctl = 0;
+}
+
+void ipa3_wan_ioctl_enable_qmi_messages(void)
+{
+	ipa3_process_ioctl = 1;
+}
+
+void ipa3_wan_ioctl_deinit(void)
+{
+	cdev_del(&ipa3_wan_ioctl_cdev);
+	device_destroy(class, device);
+	class_destroy(class);
+	unregister_chrdev_region(device, dev_num);
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
new file mode 100644
index 0000000..3ed3e44
--- /dev/null
+++ b/drivers/platform/msm/ipa/ipa_v3/teth_bridge.c
@@ -0,0 +1,253 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/if_ether.h>
+#include <linux/ioctl.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msm_ipa.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/ipa.h>
+#include <linux/netdevice.h>
+#include "ipa_i.h"
+
+#define TETH_BRIDGE_DRV_NAME "ipa_tethering_bridge"
+
+#define TETH_DBG(fmt, args...) \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, \
+		 __func__, __LINE__, ## args)
+#define TETH_DBG_FUNC_ENTRY() \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d ENTRY\n", __func__, __LINE__)
+#define TETH_DBG_FUNC_EXIT() \
+	pr_debug(TETH_BRIDGE_DRV_NAME " %s:%d EXIT\n", __func__, __LINE__)
+#define TETH_ERR(fmt, args...) \
+	pr_err(TETH_BRIDGE_DRV_NAME " %s:%d " fmt, __func__, __LINE__, ## args)
+
+/**
+ * struct ipa3_teth_bridge_ctx - Tethering bridge driver context information
+ * @class: kernel class pointer
+ * @dev_num: kernel device number
+ * @dev: kernel device struct pointer
+ * @cdev: kernel character device struct
+ */
+struct ipa3_teth_bridge_ctx {
+	struct class *class;
+	dev_t dev_num;
+	struct device *dev;
+	struct cdev cdev;
+};
+static struct ipa3_teth_bridge_ctx *ipa3_teth_ctx;
+
+/**
+* teth_bridge_ipa_cb() - Callback to handle IPA data path events
+* @priv - private data
+* @evt - event type
+* @data - event specific data (usually skb)
+*
+* This callback is called by IPA driver for exception packets from USB.
+* All exception packets are handled by Q6 and should not reach this function.
+* Packets will arrive to AP exception pipe only in case where packets are
+* sent from USB before Q6 has setup the call.
+*/
+static void teth_bridge_ipa_cb(void *priv, enum ipa_dp_evt_type evt,
+	unsigned long data)
+{
+	struct sk_buff *skb = (struct sk_buff *)data;
+
+	TETH_DBG_FUNC_ENTRY();
+	if (evt != IPA_RECEIVE) {
+		TETH_ERR("unexpected event %d\n", evt);
+		WARN_ON(1);
+		return;
+	}
+
+	TETH_ERR("Unexpected exception packet from USB, dropping packet\n");
+	dev_kfree_skb_any(skb);
+	TETH_DBG_FUNC_EXIT();
+}
+
+/**
+* ipa3_teth_bridge_init() - Initialize the Tethering bridge driver
+* @params - in/out params for USB initialization API (please look at struct
+*  definition for more info)
+*
+* USB driver gets a pointer to a callback function (usb_notify_cb) and an
+* associated data. USB driver installs this callback function in the call to
+* ipa3_connect().
+*
+* Builds IPA resource manager dependency graph.
+*
+* Return codes: 0: success,
+*		-EINVAL - Bad parameter
+*		Other negative value - Failure
+*/
+int ipa3_teth_bridge_init(struct teth_bridge_init_params *params)
+{
+	TETH_DBG_FUNC_ENTRY();
+
+	if (!params) {
+		TETH_ERR("Bad parameter\n");
+		TETH_DBG_FUNC_EXIT();
+		return -EINVAL;
+	}
+
+	params->usb_notify_cb = teth_bridge_ipa_cb;
+	params->private_data = NULL;
+	params->skip_ep_cfg = true;
+
+	TETH_DBG_FUNC_EXIT();
+	return 0;
+}
+
+/**
+* ipa3_teth_bridge_disconnect() - Disconnect tethering bridge module
+*/
+int ipa3_teth_bridge_disconnect(enum ipa_client_type client)
+{
+	TETH_DBG_FUNC_ENTRY();
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+				 IPA_RM_RESOURCE_Q6_CONS);
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_Q6_PROD,
+				 IPA_RM_RESOURCE_USB_CONS);
+	TETH_DBG_FUNC_EXIT();
+
+	return 0;
+}
+
+/**
+* ipa3_teth_bridge_connect() - Connect bridge for a tethered Rmnet / MBIM call
+* @connect_params:	Connection info
+*
+* Return codes: 0: success
+*		-EINVAL: invalid parameters
+*		-EPERM: Operation not permitted as the bridge is already
+*		connected
+*/
+int ipa3_teth_bridge_connect(struct teth_bridge_connect_params *connect_params)
+{
+	int res = 0;
+
+	TETH_DBG_FUNC_ENTRY();
+
+	/* Build the dependency graph, first add_dependency call is sync
+	 * in order to make sure the IPA clocks are up before we continue
+	 * and notify the USB driver it may continue.
+	 */
+	res = ipa_rm_add_dependency_sync(IPA_RM_RESOURCE_USB_PROD,
+				    IPA_RM_RESOURCE_Q6_CONS);
+	if (res < 0) {
+		TETH_ERR("ipa_rm_add_dependency() failed.\n");
+		goto bail;
+	}
+
+	/* this add_dependency call can't be sync since it will block until USB
+	 * status is connected (which can happen only after the tethering
+	 * bridge is connected), the clocks are already up so the call doesn't
+	 * need to block.
+	 */
+	res = ipa_rm_add_dependency(IPA_RM_RESOURCE_Q6_PROD,
+				    IPA_RM_RESOURCE_USB_CONS);
+	if (res < 0 && res != -EINPROGRESS) {
+		ipa_rm_delete_dependency(IPA_RM_RESOURCE_USB_PROD,
+					IPA_RM_RESOURCE_Q6_CONS);
+		TETH_ERR("ipa_rm_add_dependency() failed.\n");
+		goto bail;
+	}
+
+	res = 0;
+
+bail:
+	TETH_DBG_FUNC_EXIT();
+	return res;
+}
+
+static long ipa3_teth_bridge_ioctl(struct file *filp,
+			      unsigned int cmd,
+			      unsigned long arg)
+{
+	IPAERR("No ioctls are supported!\n");
+	return -ENOIOCTLCMD;
+}
+
+static const struct file_operations ipa3_teth_bridge_drv_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = ipa3_teth_bridge_ioctl,
+};
+
+/**
+* ipa3_teth_bridge_driver_init() - Initialize tethering bridge driver
+*
+*/
+int ipa3_teth_bridge_driver_init(void)
+{
+	int res;
+
+	TETH_DBG("Tethering bridge driver init\n");
+	ipa3_teth_ctx = kzalloc(sizeof(*ipa3_teth_ctx), GFP_KERNEL);
+	if (!ipa3_teth_ctx) {
+		TETH_ERR("kzalloc err.\n");
+		return -ENOMEM;
+	}
+
+	ipa3_teth_ctx->class = class_create(THIS_MODULE, TETH_BRIDGE_DRV_NAME);
+
+	res = alloc_chrdev_region(&ipa3_teth_ctx->dev_num, 0, 1,
+				  TETH_BRIDGE_DRV_NAME);
+	if (res) {
+		TETH_ERR("alloc_chrdev_region err.\n");
+		res = -ENODEV;
+		goto fail_alloc_chrdev_region;
+	}
+
+	ipa3_teth_ctx->dev = device_create(ipa3_teth_ctx->class,
+			NULL,
+			ipa3_teth_ctx->dev_num,
+			ipa3_teth_ctx,
+			TETH_BRIDGE_DRV_NAME);
+	if (IS_ERR(ipa3_teth_ctx->dev)) {
+		TETH_ERR(":device_create err.\n");
+		res = -ENODEV;
+		goto fail_device_create;
+	}
+
+	cdev_init(&ipa3_teth_ctx->cdev, &ipa3_teth_bridge_drv_fops);
+	ipa3_teth_ctx->cdev.owner = THIS_MODULE;
+	ipa3_teth_ctx->cdev.ops = &ipa3_teth_bridge_drv_fops;
+
+	res = cdev_add(&ipa3_teth_ctx->cdev, ipa3_teth_ctx->dev_num, 1);
+	if (res) {
+		TETH_ERR(":cdev_add err=%d\n", -res);
+		res = -ENODEV;
+		goto fail_cdev_add;
+	}
+	TETH_DBG("Tethering bridge driver init OK\n");
+
+	return 0;
+fail_cdev_add:
+	device_destroy(ipa3_teth_ctx->class, ipa3_teth_ctx->dev_num);
+fail_device_create:
+	unregister_chrdev_region(ipa3_teth_ctx->dev_num, 1);
+fail_alloc_chrdev_region:
+	kfree(ipa3_teth_ctx);
+	ipa3_teth_ctx = NULL;
+
+	return res;
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Tethering bridge driver");
diff --git a/drivers/platform/msm/ipa/test/Makefile b/drivers/platform/msm/ipa/test/Makefile
new file mode 100644
index 0000000..e1686e6
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_IPA_UT) += ipa_ut_mod.o
+ipa_ut_mod-y := ipa_ut_framework.o ipa_test_example.o ipa_test_mhi.o
diff --git a/drivers/platform/msm/ipa/test/ipa_test_example.c b/drivers/platform/msm/ipa/test/ipa_test_example.c
new file mode 100644
index 0000000..0313375
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_example.c
@@ -0,0 +1,99 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ipa_ut_framework.h"
+
+/**
+ * Example IPA Unit-test suite
+ * To be a reference for writing new suites and tests.
+ * This suite is also used as unit-test for the testing framework itself.
+ * Structure:
+ *	1- Define the setup and teardown  functions
+ *	 Not Mandatory. Null may be used as well
+ *	2- For each test, define its Run() function
+ *	3- Use IPA_UT_DEFINE_SUITE_START() to start defining the suite
+ *	4- use IPA_UT_ADD_TEST() for adding tests within
+ *	 the suite definition block
+ *	5- IPA_UT_DEFINE_SUITE_END() close the suite definition
+ */
+
+static int ipa_test_example_dummy;
+
+static int ipa_test_example_suite_setup(void **ppriv)
+{
+	IPA_UT_DBG("Start Setup - set 0x1234F\n");
+
+	ipa_test_example_dummy = 0x1234F;
+	*ppriv = (void *)&ipa_test_example_dummy;
+
+	return 0;
+}
+
+static int ipa_test_example_teardown(void *priv)
+{
+	IPA_UT_DBG("Start Teardown\n");
+	IPA_UT_DBG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+
+	return 0;
+}
+
+static int ipa_test_example_test1(void *priv)
+{
+	IPA_UT_LOG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+	ipa_test_example_dummy++;
+
+	return 0;
+}
+
+static int ipa_test_example_test2(void *priv)
+{
+	IPA_UT_LOG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+	ipa_test_example_dummy++;
+
+	return 0;
+}
+
+static int ipa_test_example_test3(void *priv)
+{
+	IPA_UT_LOG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+	ipa_test_example_dummy++;
+
+	return 0;
+}
+
+static int ipa_test_example_test4(void *priv)
+{
+	IPA_UT_LOG("priv=0x%p - value=0x%x\n", priv, *((int *)priv));
+	ipa_test_example_dummy++;
+
+	IPA_UT_TEST_FAIL_REPORT("failed on test");
+
+	return -EFAULT;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(example, "Example suite",
+	ipa_test_example_suite_setup, ipa_test_example_teardown)
+{
+	IPA_UT_ADD_TEST(test1, "This is test number 1",
+		ipa_test_example_test1, false, IPA_HW_v1_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(test2, "This is test number 2",
+		ipa_test_example_test2, false, IPA_HW_v1_0, IPA_HW_MAX),
+
+	IPA_UT_ADD_TEST(test3, "This is test number 3",
+		ipa_test_example_test3, false, IPA_HW_v1_1, IPA_HW_v2_6),
+
+	IPA_UT_ADD_TEST(test4, "This is test number 4",
+		ipa_test_example_test4, false, IPA_HW_v1_1, IPA_HW_MAX),
+
+} IPA_UT_DEFINE_SUITE_END(example);
diff --git a/drivers/platform/msm/ipa/test/ipa_test_mhi.c b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
new file mode 100644
index 0000000..5a41d64
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_test_mhi.c
@@ -0,0 +1,3306 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/ipa_mhi.h>
+#include <linux/ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "../../gsi/gsi.h"
+#include "../../gsi/gsi_reg.h"
+#include "ipa_ut_framework.h"
+
+#define IPA_MHI_TEST_NUM_CHANNELS		8
+#define IPA_MHI_TEST_NUM_EVENT_RINGS		8
+#define IPA_MHI_TEST_FIRST_CHANNEL_ID		100
+#define IPA_MHI_TEST_FIRST_EVENT_RING_ID	100
+#define IPA_MHI_TEST_LAST_CHANNEL_ID \
+	(IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS - 1)
+#define IPA_MHI_TEST_LAST_EVENT_RING_ID \
+	(IPA_MHI_TEST_FIRST_EVENT_RING_ID + IPA_MHI_TEST_NUM_EVENT_RINGS - 1)
+#define IPA_MHI_TEST_MAX_DATA_BUF_SIZE		1500
+#define IPA_MHI_TEST_SEQ_TYPE_DMA		0x00000000
+
+#define IPA_MHI_TEST_LOOP_NUM			5
+#define IPA_MHI_RUN_TEST_UNIT_IN_LOOP(test_unit, rc, args...)		\
+	do {								\
+		int __i;						\
+		for (__i = 0; __i < IPA_MHI_TEST_LOOP_NUM; __i++) {	\
+			IPA_UT_LOG(#test_unit " START iter %d\n", __i);	\
+			rc = test_unit(args);				\
+			if (!rc)					\
+				continue;				\
+			IPA_UT_LOG(#test_unit " failed %d\n", rc);	\
+			break;						\
+		}							\
+	} while (0)
+
+/**
+ * check for MSI interrupt for one or both channels:
+ * OUT channel MSI my be missed as it
+ * will be overwritten by the IN channel MSI
+ */
+#define IPA_MHI_TEST_CHECK_MSI_INTR(__both, __timeout)			\
+	do {								\
+		int i;							\
+		for (i = 0; i < 20; i++) {				\
+			if (*((u32 *)test_mhi_ctx->msi.base) ==		\
+				(0x10000000 |				\
+				(IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1))) { \
+				__timeout = false;			\
+				break;					\
+			}						\
+			if (__both && (*((u32 *)test_mhi_ctx->msi.base) == \
+				(0x10000000 |				\
+				(IPA_MHI_TEST_FIRST_EVENT_RING_ID)))) { \
+				/* sleep to be sure IN MSI is generated */ \
+				msleep(20);				\
+				__timeout = false;			\
+				break;					\
+			}						\
+			msleep(20);					\
+		}							\
+	} while (0)
+
+static DECLARE_COMPLETION(mhi_test_ready_comp);
+static DECLARE_COMPLETION(mhi_test_wakeup_comp);
+
+/**
+ * enum ipa_mhi_ring_elements_type - MHI ring elements types.
+ */
+enum ipa_mhi_ring_elements_type {
+	IPA_MHI_RING_ELEMENT_NO_OP = 1,
+	IPA_MHI_RING_ELEMENT_TRANSFER = 2
+};
+
+/**
+ * enum ipa_mhi_channel_direction - MHI channel directions
+ */
+enum ipa_mhi_channel_direction {
+	IPA_MHI_OUT_CHAHNNEL = 1,
+	IPA_MHI_IN_CHAHNNEL = 2,
+};
+
+/**
+ * struct ipa_mhi_channel_context_array - MHI Channel context array entry
+ *
+ * mapping is taken from MHI spec
+ */
+struct ipa_mhi_channel_context_array {
+	u32	chstate:8;	/*0-7*/
+	u32	brsmode:2;	/*8-9*/
+	u32	pollcfg:6;	/*10-15*/
+	u32	reserved:16;	/*16-31*/
+	u32	chtype;		/*channel type (inbound/outbound)*/
+	u32	erindex;	/*event ring index*/
+	u64	rbase;		/*ring base address in the host addr spc*/
+	u64	rlen;		/*ring length in bytes*/
+	u64	rp;		/*read pointer in the host system addr spc*/
+	u64	wp;		/*write pointer in the host system addr spc*/
+} __packed;
+
+/**
+ * struct ipa_mhi_event_context_array - MGI event ring context array entry
+ *
+ * mapping is taken from MHI spec
+ */
+struct ipa_mhi_event_context_array {
+	u16	intmodc;
+	u16	intmodt;/* Interrupt moderation timer (in microseconds) */
+	u32	ertype;
+	u32	msivec;	/* MSI vector for interrupt (MSI data)*/
+	u64	rbase;	/* ring base address in host address space*/
+	u64	rlen;	/* ring length in bytes*/
+	u64	rp;	/* read pointer in the host system address space*/
+	u64	wp;	/* write pointer in the host system address space*/
+} __packed;
+
+/**
+ *
+ * struct ipa_mhi_mmio_register_set - MHI configuration registers,
+ *	control registers, status registers, pointers to doorbell arrays,
+ *	pointers to channel and event context arrays.
+ *
+ * The structure is defined in mhi spec (register names are taken from there).
+ *	Only values accessed by HWP or test are documented
+ */
+struct ipa_mhi_mmio_register_set {
+	u32	mhireglen;
+	u32	reserved_08_04;
+	u32	mhiver;
+	u32	reserved_10_0c;
+	struct mhicfg {
+		u8		nch;
+		u8		reserved_15_8;
+		u8		ner;
+		u8		reserved_31_23;
+	} __packed mhicfg;
+
+	u32	reserved_18_14;
+	u32	chdboff;
+	u32	reserved_20_1C;
+	u32	erdboff;
+	u32	reserved_28_24;
+	u32	bhioff;
+	u32	reserved_30_2C;
+	u32	debugoff;
+	u32	reserved_38_34;
+
+	struct mhictrl {
+		u32 rs : 1;
+		u32 reset : 1;
+		u32 reserved_7_2 : 6;
+		u32 mhistate : 8;
+		u32 reserved_31_16 : 16;
+	} __packed mhictrl;
+
+	u64	reserved_40_3c;
+	u32	reserved_44_40;
+
+	struct mhistatus {
+		u32 ready : 1;
+		u32 reserved_3_2 : 1;
+		u32 syserr : 1;
+		u32 reserved_7_3 : 5;
+		u32 mhistate : 8;
+		u32 reserved_31_16 : 16;
+	} __packed mhistatus;
+
+	/**
+	 * Register is not accessed by HWP.
+	 * In test register carries the handle for
+	 *  the buffer of channel context array
+	 */
+	u32 reserved_50_4c;
+
+	u32 mhierror;
+
+	/**
+	 * Register is not accessed by HWP.
+	 * In test register carries the handle for
+	 * the buffer of event ring context array
+	 */
+	u32 reserved_58_54;
+
+	/**
+	 * 64-bit pointer to the channel context array in the host memory space
+	 *  host sets the pointer to the channel context array during
+	 *  initialization.
+	 */
+	u64 ccabap;
+	/**
+	 * 64-bit pointer to the event context array in the host memory space
+	 *  host sets the pointer to the event context array during
+	 *  initialization
+	 */
+	u64 ecabap;
+	/**
+	 * Register is not accessed by HWP.
+	 * In test register carries the pointer of virtual address
+	 *  for the buffer of channel context array
+	 */
+	u64 crcbap;
+	/**
+	 * Register is not accessed by HWP.
+	 * In test register carries the pointer of virtual address
+	 *  for the buffer of event ring context array
+	 */
+	u64 crdb;
+
+	u64	reserved_80_78;
+
+	struct mhiaddr {
+		/**
+		 * Base address (64-bit) of the memory region in
+		 *  the host address space where the MHI control
+		 *  data structures are allocated by the host,
+		 *  including channel context array, event context array,
+		 *  and rings.
+		 *  The device uses this information to set up its internal
+		 *   address translation tables.
+		 *  value must be aligned to 4 Kbytes.
+		 */
+		u64 mhicrtlbase;
+		/**
+		 * Upper limit address (64-bit) of the memory region in
+		 *  the host address space where the MHI control
+		 *  data structures are allocated by the host.
+		 * The device uses this information to setup its internal
+		 *  address translation tables.
+		 * The most significant 32 bits of MHICTRLBASE and
+		 * MHICTRLLIMIT registers must be equal.
+		 */
+		u64 mhictrllimit;
+		u64 reserved_18_10;
+		/**
+		 * Base address (64-bit) of the memory region in
+		 *  the host address space where the MHI data buffers
+		 *  are allocated by the host.
+		 * The device uses this information to setup its
+		 *  internal address translation tables.
+		 * value must be aligned to 4 Kbytes.
+		 */
+		u64 mhidatabase;
+		/**
+		 * Upper limit address (64-bit) of the memory region in
+		 *  the host address space where the MHI data buffers
+		 *  are allocated by the host.
+		 * The device uses this information to setup its
+		 *  internal address translation tables.
+		 * The most significant 32 bits of MHIDATABASE and
+		 *  MHIDATALIMIT registers must be equal.
+		 */
+		u64 mhidatalimit;
+		u64 reserved_30_28;
+	} __packed mhiaddr;
+
+} __packed;
+
+/**
+ * struct ipa_mhi_event_ring_element - MHI Event ring element
+ *
+ * mapping is taken from MHI spec
+ */
+struct ipa_mhi_event_ring_element {
+	/**
+	 * pointer to ring element that generated event in
+	 *  the host system address space
+	 */
+	u64	ptr;
+	union {
+		struct {
+			u32	len : 24;
+			u32	code : 8;
+		} __packed bits;
+		u32	dword;
+	} __packed dword_8;
+	u16	reserved;
+	u8		type;
+	u8		chid;
+} __packed;
+
+/**
+* struct ipa_mhi_transfer_ring_element - MHI Transfer ring element
+*
+* mapping is taken from MHI spec
+*/
+struct ipa_mhi_transfer_ring_element {
+	u64	ptr; /*pointer to buffer in the host system address space*/
+	u16	len; /*transaction length in bytes*/
+	u16	reserved0;
+	union {
+		struct {
+			u16		chain : 1;
+			u16		reserved_7_1 : 7;
+			u16		ieob : 1;
+			u16		ieot : 1;
+			u16		bei : 1;
+			u16		reserved_15_11 : 5;
+		} __packed bits;
+		u16	word;
+	} __packed word_C;
+	u8		type;
+	u8		reserved1;
+} __packed;
+
+/**
+ * struct ipa_test_mhi_context - MHI test context
+ */
+struct ipa_test_mhi_context {
+	void __iomem *gsi_mmio;
+	struct ipa_mem_buffer msi;
+	struct ipa_mem_buffer ch_ctx_array;
+	struct ipa_mem_buffer ev_ctx_array;
+	struct ipa_mem_buffer mmio_buf;
+	struct ipa_mem_buffer xfer_ring_bufs[IPA_MHI_TEST_NUM_CHANNELS];
+	struct ipa_mem_buffer ev_ring_bufs[IPA_MHI_TEST_NUM_EVENT_RINGS];
+	struct ipa_mem_buffer in_buffer;
+	struct ipa_mem_buffer out_buffer;
+	u32 prod_hdl;
+	u32 cons_hdl;
+};
+
+static struct ipa_test_mhi_context *test_mhi_ctx;
+
+static void ipa_mhi_test_cb(void *priv,
+	enum ipa_mhi_event_type event, unsigned long data)
+{
+	IPA_UT_DBG("Entry\n");
+
+	if (event == IPA_MHI_EVENT_DATA_AVAILABLE)
+		complete_all(&mhi_test_wakeup_comp);
+	else if (event == IPA_MHI_EVENT_READY)
+		complete_all(&mhi_test_ready_comp);
+	else
+		WARN_ON(1);
+}
+
+static void ipa_test_mhi_free_mmio_space(void)
+{
+	IPA_UT_DBG("Entry\n");
+
+	if (!test_mhi_ctx)
+		return;
+
+	dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->mmio_buf.size,
+		test_mhi_ctx->mmio_buf.base,
+		test_mhi_ctx->mmio_buf.phys_base);
+
+	dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ev_ctx_array.size,
+		test_mhi_ctx->ev_ctx_array.base,
+		test_mhi_ctx->ev_ctx_array.phys_base);
+
+	dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->ch_ctx_array.size,
+		test_mhi_ctx->ch_ctx_array.base,
+		test_mhi_ctx->ch_ctx_array.phys_base);
+
+	dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->msi.size,
+		test_mhi_ctx->msi.base, test_mhi_ctx->msi.phys_base);
+}
+
+static int ipa_test_mhi_alloc_mmio_space(void)
+{
+	int rc = 0;
+	struct ipa_mem_buffer *msi;
+	struct ipa_mem_buffer *ch_ctx_array;
+	struct ipa_mem_buffer *ev_ctx_array;
+	struct ipa_mem_buffer *mmio_buf;
+	struct ipa_mhi_mmio_register_set *p_mmio;
+
+	IPA_UT_DBG("Entry\n");
+
+	msi = &test_mhi_ctx->msi;
+	ch_ctx_array = &test_mhi_ctx->ch_ctx_array;
+	ev_ctx_array = &test_mhi_ctx->ev_ctx_array;
+	mmio_buf = &test_mhi_ctx->mmio_buf;
+
+	/* Allocate MSI */
+	msi->size = 4;
+	msi->base = dma_alloc_coherent(ipa3_ctx->pdev, msi->size,
+		&msi->phys_base, GFP_KERNEL);
+	if (!msi->base) {
+		IPA_UT_ERR("no mem for msi\n");
+		return -ENOMEM;
+	}
+
+	IPA_UT_DBG("msi: base 0x%pK phys_addr 0x%pad size %d\n",
+		msi->base, &msi->phys_base, msi->size);
+
+	/* allocate buffer for channel context */
+	ch_ctx_array->size = sizeof(struct ipa_mhi_channel_context_array) *
+		IPA_MHI_TEST_NUM_CHANNELS;
+	ch_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev,
+		ch_ctx_array->size, &ch_ctx_array->phys_base, GFP_KERNEL);
+	if (!ch_ctx_array->base) {
+		IPA_UT_ERR("no mem for ch ctx array\n");
+		rc = -ENOMEM;
+		goto fail_free_msi;
+	}
+	IPA_UT_DBG("channel ctx array: base 0x%pK phys_addr %pad size %d\n",
+		ch_ctx_array->base, &ch_ctx_array->phys_base,
+		ch_ctx_array->size);
+
+	/* allocate buffer for event context */
+	ev_ctx_array->size = sizeof(struct ipa_mhi_event_context_array) *
+		IPA_MHI_TEST_NUM_EVENT_RINGS;
+	ev_ctx_array->base = dma_alloc_coherent(ipa3_ctx->pdev,
+		ev_ctx_array->size, &ev_ctx_array->phys_base, GFP_KERNEL);
+	if (!ev_ctx_array->base) {
+		IPA_UT_ERR("no mem for ev ctx array\n");
+		rc = -ENOMEM;
+		goto fail_free_ch_ctx_arr;
+	}
+	IPA_UT_DBG("event ctx array: base 0x%pK phys_addr %pad size %d\n",
+		ev_ctx_array->base, &ev_ctx_array->phys_base,
+		ev_ctx_array->size);
+
+	/* allocate buffer for mmio */
+	mmio_buf->size = sizeof(struct ipa_mhi_mmio_register_set);
+	mmio_buf->base = dma_alloc_coherent(ipa3_ctx->pdev, mmio_buf->size,
+		&mmio_buf->phys_base, GFP_KERNEL);
+	if (!mmio_buf->base) {
+		IPA_UT_ERR("no mem for mmio buf\n");
+		rc = -ENOMEM;
+		goto fail_free_ev_ctx_arr;
+	}
+	IPA_UT_DBG("mmio buffer: base 0x%pK phys_addr %pad size %d\n",
+		mmio_buf->base, &mmio_buf->phys_base, mmio_buf->size);
+
+	/* initlize table */
+	p_mmio = (struct ipa_mhi_mmio_register_set *)mmio_buf->base;
+
+	/**
+	 * 64-bit pointer to the channel context array in the host memory space;
+	 * Host sets the pointer to the channel context array
+	 * during initialization.
+	 */
+	p_mmio->ccabap = (u32)ch_ctx_array->phys_base -
+		(IPA_MHI_TEST_FIRST_CHANNEL_ID *
+		sizeof(struct ipa_mhi_channel_context_array));
+	IPA_UT_DBG("pMmio->ccabap 0x%llx\n", p_mmio->ccabap);
+
+	/**
+	 * 64-bit pointer to the event context array in the host memory space;
+	 * Host sets the pointer to the event context array
+	 * during initialization
+	 */
+	p_mmio->ecabap = (u32)ev_ctx_array->phys_base -
+		(IPA_MHI_TEST_FIRST_EVENT_RING_ID *
+		sizeof(struct ipa_mhi_event_context_array));
+	IPA_UT_DBG("pMmio->ecabap 0x%llx\n", p_mmio->ecabap);
+
+	/**
+	 * Register is not accessed by HWP.
+	 * In test register carries the pointer of
+	 *  virtual address for the buffer of channel context array
+	 */
+	p_mmio->crcbap = (unsigned long)ch_ctx_array->base;
+
+	/**
+	 * Register is not accessed by HWP.
+	 * In test register carries the pointer of
+	 *  virtual address for the buffer of channel context array
+	 */
+	p_mmio->crdb = (unsigned long)ev_ctx_array->base;
+
+	/* test is running only on device. no need to translate addresses */
+	p_mmio->mhiaddr.mhicrtlbase = 0x04;
+	p_mmio->mhiaddr.mhictrllimit = 0xFFFFFFFF;
+	p_mmio->mhiaddr.mhidatabase = 0x04;
+	p_mmio->mhiaddr.mhidatalimit = 0xFFFFFFFF;
+
+	return rc;
+
+fail_free_ev_ctx_arr:
+	dma_free_coherent(ipa3_ctx->pdev, ev_ctx_array->size,
+		ev_ctx_array->base, ev_ctx_array->phys_base);
+	ev_ctx_array->base = NULL;
+fail_free_ch_ctx_arr:
+	dma_free_coherent(ipa3_ctx->pdev, ch_ctx_array->size,
+		ch_ctx_array->base, ch_ctx_array->phys_base);
+	ch_ctx_array->base = NULL;
+fail_free_msi:
+	dma_free_coherent(ipa3_ctx->pdev, msi->size, msi->base,
+		msi->phys_base);
+	msi->base = NULL;
+	return rc;
+}
+
+static void ipa_mhi_test_destroy_channel_context(
+	struct ipa_mem_buffer transfer_ring_bufs[],
+	struct ipa_mem_buffer event_ring_bufs[],
+	u8 channel_id,
+	u8 event_ring_id)
+{
+	u32 ev_ring_idx;
+	u32 ch_idx;
+
+	IPA_UT_DBG("Entry\n");
+
+	if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) ||
+		(channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) {
+		IPA_UT_ERR("channal_id invalid %d\n", channel_id);
+		return;
+	}
+
+	if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) ||
+		(event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) {
+		IPA_UT_ERR("event_ring_id invalid %d\n", event_ring_id);
+		return;
+	}
+
+	ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID;
+	ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+
+	if (transfer_ring_bufs[ch_idx].base) {
+		dma_free_coherent(ipa3_ctx->pdev,
+			transfer_ring_bufs[ch_idx].size,
+			transfer_ring_bufs[ch_idx].base,
+			transfer_ring_bufs[ch_idx].phys_base);
+		transfer_ring_bufs[ch_idx].base = NULL;
+	}
+
+	if (event_ring_bufs[ev_ring_idx].base) {
+		dma_free_coherent(ipa3_ctx->pdev,
+			event_ring_bufs[ev_ring_idx].size,
+			event_ring_bufs[ev_ring_idx].base,
+			event_ring_bufs[ev_ring_idx].phys_base);
+		event_ring_bufs[ev_ring_idx].base = NULL;
+	}
+}
+
+static int ipa_mhi_test_config_channel_context(
+	struct ipa_mem_buffer *mmio,
+	struct ipa_mem_buffer transfer_ring_bufs[],
+	struct ipa_mem_buffer event_ring_bufs[],
+	u8 channel_id,
+	u8 event_ring_id,
+	u16 transfer_ring_size,
+	u16 event_ring_size,
+	u8 ch_type)
+{
+	struct ipa_mhi_mmio_register_set *p_mmio;
+	struct ipa_mhi_channel_context_array *p_channels;
+	struct ipa_mhi_event_context_array *p_events;
+	u32 ev_ring_idx;
+	u32 ch_idx;
+
+	IPA_UT_DBG("Entry\n");
+
+	if ((channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) ||
+		(channel_id > IPA_MHI_TEST_LAST_CHANNEL_ID)) {
+		IPA_UT_DBG("channal_id invalid %d\n", channel_id);
+		return -EFAULT;
+	}
+
+	if ((event_ring_id < IPA_MHI_TEST_FIRST_EVENT_RING_ID) ||
+		(event_ring_id > IPA_MHI_TEST_LAST_EVENT_RING_ID)) {
+		IPA_UT_DBG("event_ring_id invalid %d\n", event_ring_id);
+		return -EFAULT;
+	}
+
+	p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base;
+	p_channels =
+		(struct ipa_mhi_channel_context_array *)
+		((unsigned long)p_mmio->crcbap);
+	p_events = (struct ipa_mhi_event_context_array *)
+		((unsigned long)p_mmio->crdb);
+
+	IPA_UT_DBG("p_mmio: %pK p_channels: %pK p_events: %pK\n",
+		p_mmio, p_channels, p_events);
+
+	ch_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID;
+	ev_ring_idx = event_ring_id - IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+
+	IPA_UT_DBG("ch_idx: %u ev_ring_idx: %u\n", ch_idx, ev_ring_idx);
+	if (transfer_ring_bufs[ch_idx].base) {
+		IPA_UT_ERR("ChannelId %d is already allocated\n", channel_id);
+		return -EFAULT;
+	}
+
+	/* allocate and init event ring if needed */
+	if (!event_ring_bufs[ev_ring_idx].base) {
+		IPA_UT_LOG("Configuring event ring...\n");
+		event_ring_bufs[ev_ring_idx].size =
+			event_ring_size *
+				sizeof(struct ipa_mhi_event_ring_element);
+		event_ring_bufs[ev_ring_idx].base =
+			dma_alloc_coherent(ipa3_ctx->pdev,
+				event_ring_bufs[ev_ring_idx].size,
+				&event_ring_bufs[ev_ring_idx].phys_base,
+				GFP_KERNEL);
+		if (!event_ring_bufs[ev_ring_idx].base) {
+			IPA_UT_ERR("no mem for ev ring buf\n");
+			return -ENOMEM;
+		}
+		p_events[ev_ring_idx].intmodc = 1;
+		p_events[ev_ring_idx].intmodt = 0;
+		p_events[ev_ring_idx].msivec = event_ring_id;
+		p_events[ev_ring_idx].rbase =
+			(u32)event_ring_bufs[ev_ring_idx].phys_base;
+		p_events[ev_ring_idx].rlen =
+			event_ring_bufs[ev_ring_idx].size;
+		p_events[ev_ring_idx].rp =
+			(u32)event_ring_bufs[ev_ring_idx].phys_base;
+		p_events[ev_ring_idx].wp =
+			(u32)event_ring_bufs[ev_ring_idx].phys_base;
+	} else {
+		IPA_UT_LOG("Skip configuring event ring - already done\n");
+	}
+
+	transfer_ring_bufs[ch_idx].size =
+		transfer_ring_size *
+			sizeof(struct ipa_mhi_transfer_ring_element);
+	transfer_ring_bufs[ch_idx].base =
+		dma_alloc_coherent(ipa3_ctx->pdev,
+			transfer_ring_bufs[ch_idx].size,
+			&transfer_ring_bufs[ch_idx].phys_base,
+			GFP_KERNEL);
+	if (!transfer_ring_bufs[ch_idx].base) {
+		IPA_UT_ERR("no mem for xfer ring buf\n");
+		dma_free_coherent(ipa3_ctx->pdev,
+			event_ring_bufs[ev_ring_idx].size,
+			event_ring_bufs[ev_ring_idx].base,
+			event_ring_bufs[ev_ring_idx].phys_base);
+		event_ring_bufs[ev_ring_idx].base = NULL;
+		return -ENOMEM;
+	}
+
+	p_channels[ch_idx].erindex = event_ring_id;
+	p_channels[ch_idx].rbase = (u32)transfer_ring_bufs[ch_idx].phys_base;
+	p_channels[ch_idx].rlen = transfer_ring_bufs[ch_idx].size;
+	p_channels[ch_idx].rp = (u32)transfer_ring_bufs[ch_idx].phys_base;
+	p_channels[ch_idx].wp = (u32)transfer_ring_bufs[ch_idx].phys_base;
+	p_channels[ch_idx].chtype = ch_type;
+	p_channels[ch_idx].brsmode = IPA_MHI_BURST_MODE_DEFAULT;
+	p_channels[ch_idx].pollcfg = 0;
+
+	return 0;
+}
+
+static void ipa_mhi_test_destroy_data_structures(void)
+{
+	IPA_UT_DBG("Entry\n");
+
+	/* Destroy OUT data buffer */
+	if (test_mhi_ctx->out_buffer.base) {
+		dma_free_coherent(ipa3_ctx->pdev,
+			test_mhi_ctx->out_buffer.size,
+			test_mhi_ctx->out_buffer.base,
+			test_mhi_ctx->out_buffer.phys_base);
+		test_mhi_ctx->out_buffer.base = NULL;
+	}
+
+	/* Destroy IN data buffer */
+	if (test_mhi_ctx->in_buffer.base) {
+		dma_free_coherent(ipa3_ctx->pdev,
+			test_mhi_ctx->in_buffer.size,
+			test_mhi_ctx->in_buffer.base,
+			test_mhi_ctx->in_buffer.phys_base);
+		test_mhi_ctx->in_buffer.base = NULL;
+	}
+
+	/* Destroy IN channel ctx */
+	ipa_mhi_test_destroy_channel_context(
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1);
+
+	/* Destroy OUT channel ctx */
+	ipa_mhi_test_destroy_channel_context(
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID);
+}
+
+static int ipa_mhi_test_setup_data_structures(void)
+{
+	int rc = 0;
+
+	IPA_UT_DBG("Entry\n");
+
+	/* Config OUT Channel Context */
+	rc = ipa_mhi_test_config_channel_context(
+		&test_mhi_ctx->mmio_buf,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID,
+		0x100,
+		0x80,
+		IPA_MHI_OUT_CHAHNNEL);
+	if (rc) {
+		IPA_UT_ERR("Fail to config OUT ch ctx - err %d", rc);
+		return rc;
+	}
+
+	/* Config IN Channel Context */
+	rc = ipa_mhi_test_config_channel_context(
+		&test_mhi_ctx->mmio_buf,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1,
+		0x100,
+		0x80,
+		IPA_MHI_IN_CHAHNNEL);
+	if (rc) {
+		IPA_UT_ERR("Fail to config IN ch ctx - err %d", rc);
+		goto fail_destroy_out_ch_ctx;
+	}
+
+	/* allocate IN data buffer */
+	test_mhi_ctx->in_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE;
+	test_mhi_ctx->in_buffer.base = dma_alloc_coherent(
+		ipa3_ctx->pdev, test_mhi_ctx->in_buffer.size,
+		&test_mhi_ctx->in_buffer.phys_base, GFP_KERNEL);
+	if (!test_mhi_ctx->in_buffer.base) {
+		IPA_UT_ERR("no mem for In data buffer\n");
+		rc = -ENOMEM;
+		goto fail_destroy_in_ch_ctx;
+	}
+	memset(test_mhi_ctx->in_buffer.base, 0,
+		IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+
+	/* allocate OUT data buffer */
+	test_mhi_ctx->out_buffer.size = IPA_MHI_TEST_MAX_DATA_BUF_SIZE;
+	test_mhi_ctx->out_buffer.base = dma_alloc_coherent(
+		ipa3_ctx->pdev, test_mhi_ctx->out_buffer.size,
+		&test_mhi_ctx->out_buffer.phys_base, GFP_KERNEL);
+	if (!test_mhi_ctx->out_buffer.base) {
+		IPA_UT_ERR("no mem for Out data buffer\n");
+		rc = -EFAULT;
+		goto fail_destroy_in_data_buf;
+	}
+	memset(test_mhi_ctx->out_buffer.base, 0,
+		IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+
+	return 0;
+
+fail_destroy_in_data_buf:
+	dma_free_coherent(ipa3_ctx->pdev,
+		test_mhi_ctx->in_buffer.size,
+		test_mhi_ctx->in_buffer.base,
+		test_mhi_ctx->in_buffer.phys_base);
+	test_mhi_ctx->in_buffer.base = NULL;
+fail_destroy_in_ch_ctx:
+	ipa_mhi_test_destroy_channel_context(
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1);
+fail_destroy_out_ch_ctx:
+	ipa_mhi_test_destroy_channel_context(
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID);
+	return 0;
+}
+
+/**
+ * ipa_test_mhi_suite_setup() - Suite setup function
+ */
+static int ipa_test_mhi_suite_setup(void **ppriv)
+{
+	int rc = 0;
+
+	IPA_UT_DBG("Start Setup\n");
+
+	if (!gsi_ctx) {
+		IPA_UT_ERR("No GSI ctx\n");
+		return -EINVAL;
+	}
+
+	if (!ipa3_ctx) {
+		IPA_UT_ERR("No IPA ctx\n");
+		return -EINVAL;
+	}
+
+	test_mhi_ctx = kzalloc(sizeof(struct ipa_test_mhi_context),
+		GFP_KERNEL);
+	if (!test_mhi_ctx) {
+		IPA_UT_ERR("failed allocated ctx\n");
+		return -ENOMEM;
+	}
+
+	test_mhi_ctx->gsi_mmio = ioremap_nocache(gsi_ctx->per.phys_addr,
+		gsi_ctx->per.size);
+	if (!test_mhi_ctx) {
+		IPA_UT_ERR("failed to remap GSI HW size=%lu\n",
+			gsi_ctx->per.size);
+		rc = -EFAULT;
+		goto fail_free_ctx;
+	}
+
+	rc = ipa_test_mhi_alloc_mmio_space();
+	if (rc) {
+		IPA_UT_ERR("failed to alloc mmio space");
+		goto fail_iounmap;
+	}
+
+	rc = ipa_mhi_test_setup_data_structures();
+	if (rc) {
+		IPA_UT_ERR("failed to setup data structures");
+		goto fail_free_mmio_spc;
+	}
+
+	*ppriv = test_mhi_ctx;
+	return 0;
+
+fail_free_mmio_spc:
+	ipa_test_mhi_free_mmio_space();
+fail_iounmap:
+	iounmap(test_mhi_ctx->gsi_mmio);
+fail_free_ctx:
+	kfree(test_mhi_ctx);
+	test_mhi_ctx = NULL;
+	return rc;
+}
+
+/**
+ * ipa_test_mhi_suite_teardown() - Suite teardown function
+ */
+static int ipa_test_mhi_suite_teardown(void *priv)
+{
+	IPA_UT_DBG("Start Teardown\n");
+
+	if (!test_mhi_ctx)
+		return  0;
+
+	ipa_mhi_test_destroy_data_structures();
+	ipa_test_mhi_free_mmio_space();
+	iounmap(test_mhi_ctx->gsi_mmio);
+	kfree(test_mhi_ctx);
+	test_mhi_ctx = NULL;
+
+	return 0;
+}
+
+/**
+ * ipa_mhi_test_initialize_driver() - MHI init and possibly start and connect
+ *
+ * To be run during tests
+ * 1. MHI init (Ready state)
+ * 2. Conditional MHO start and connect (M0 state)
+ */
+static int ipa_mhi_test_initialize_driver(bool skip_start_and_conn)
+{
+	int rc = 0;
+	struct ipa_mhi_init_params init_params;
+	struct ipa_mhi_start_params start_params;
+	struct ipa_mhi_connect_params prod_params;
+	struct ipa_mhi_connect_params cons_params;
+	struct ipa_mhi_mmio_register_set *p_mmio;
+	struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+	bool is_dma;
+	u64 phys_addr;
+
+	IPA_UT_LOG("Entry\n");
+
+	p_mmio = test_mhi_ctx->mmio_buf.base;
+
+	/* start IPA MHI */
+	memset(&init_params, 0, sizeof(init_params));
+	init_params.msi.addr_low = test_mhi_ctx->msi.phys_base;
+	init_params.msi.data = 0x10000000;
+	init_params.msi.mask = ~0x10000000;
+	/* MMIO not needed for GSI */
+	init_params.first_ch_idx = IPA_MHI_TEST_FIRST_CHANNEL_ID;
+	init_params.first_er_idx = IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+	init_params.assert_bit40 = false;
+	init_params.notify = ipa_mhi_test_cb;
+	init_params.priv = NULL;
+	init_params.test_mode = true;
+
+	rc = ipa_mhi_init(&init_params);
+	if (rc) {
+		IPA_UT_LOG("ipa_mhi_init failed %d\n", rc);
+		return rc;
+	}
+
+	IPA_UT_LOG("Wait async ready event\n");
+	if (wait_for_completion_timeout(&mhi_test_ready_comp, 10 * HZ) == 0) {
+		IPA_UT_LOG("timeout waiting for READY event");
+		IPA_UT_TEST_FAIL_REPORT("failed waiting for state ready");
+		return -ETIME;
+	}
+
+	if (ipa_mhi_is_using_dma(&is_dma)) {
+		IPA_UT_LOG("is_dma checkign failed. Is MHI loaded?\n");
+		IPA_UT_TEST_FAIL_REPORT("failed checking using dma");
+		return -EPERM;
+	}
+
+	if (is_dma) {
+		IPA_UT_LOG("init ipa_dma\n");
+		rc = ipa_dma_init();
+		if (rc && rc != -EFAULT) {
+			IPA_UT_LOG("ipa_dma_init failed, %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("failed init dma");
+			return rc;
+		}
+		IPA_UT_LOG("enable ipa_dma\n");
+		rc = ipa_dma_enable();
+		if (rc && rc != -EPERM) {
+			IPA_UT_LOG("ipa_dma_enable failed, %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("failed enable dma");
+			return rc;
+		}
+	}
+
+	if (!skip_start_and_conn) {
+		memset(&start_params, 0, sizeof(start_params));
+		start_params.channel_context_array_addr = p_mmio->ccabap;
+		start_params.event_context_array_addr = p_mmio->ecabap;
+
+		IPA_UT_LOG("BEFORE mhi_start\n");
+		rc = ipa_mhi_start(&start_params);
+		if (rc) {
+			IPA_UT_LOG("mhi_start failed %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("fail start mhi");
+			return rc;
+		}
+		IPA_UT_LOG("AFTER mhi_start\n");
+
+		phys_addr = p_mmio->ccabap + (IPA_MHI_TEST_FIRST_CHANNEL_ID *
+			sizeof(struct ipa_mhi_channel_context_array));
+		p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+			(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+		IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n",
+			IPA_MHI_TEST_FIRST_CHANNEL_ID,
+			p_ch_ctx_array, phys_addr,
+			ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+		memset(&prod_params, 0, sizeof(prod_params));
+		prod_params.sys.client = IPA_CLIENT_MHI_PROD;
+		prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA;
+		prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS;
+		prod_params.sys.ipa_ep_cfg.seq.seq_type =
+			IPA_MHI_TEST_SEQ_TYPE_DMA;
+		prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true;
+		prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID;
+		IPA_UT_LOG("BEFORE connect_pipe (PROD): client:%d ch_id:%u\n",
+			prod_params.sys.client, prod_params.channel_id);
+		rc = ipa_mhi_connect_pipe(&prod_params,
+			&test_mhi_ctx->prod_hdl);
+		if (rc) {
+			IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe");
+			return rc;
+		}
+
+		if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+			IPA_UT_LOG("MHI_PROD: chstate is not RUN chstate:%s\n",
+				ipa_mhi_get_state_str(
+				p_ch_ctx_array->chstate));
+			IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run");
+			return -EFAULT;
+		}
+
+		phys_addr = p_mmio->ccabap +
+			((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+			sizeof(struct ipa_mhi_channel_context_array));
+		p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+			(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+		IPA_UT_LOG("ch: %d base: 0x%pK phys_addr 0x%llx chstate: %s\n",
+			IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+			p_ch_ctx_array, phys_addr,
+			ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+		memset(&cons_params, 0, sizeof(cons_params));
+		cons_params.sys.client = IPA_CLIENT_MHI_CONS;
+		cons_params.sys.skip_ep_cfg = true;
+		cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1;
+		IPA_UT_LOG("BEFORE connect_pipe (CONS): client:%d ch_id:%u\n",
+			cons_params.sys.client, cons_params.channel_id);
+		rc = ipa_mhi_connect_pipe(&cons_params,
+			&test_mhi_ctx->cons_hdl);
+		if (rc) {
+			IPA_UT_LOG("mhi_connect_pipe failed %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe");
+			return rc;
+		}
+
+		if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+			IPA_UT_LOG("MHI_CONS: chstate is not RUN chstate:%s\n",
+				ipa_mhi_get_state_str(
+				p_ch_ctx_array->chstate));
+			IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run");
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ * 1. MHI destroy
+ * 2. re-configure the channels
+ */
+static int ipa_mhi_test_destroy(struct ipa_test_mhi_context *ctx)
+{
+	struct ipa_mhi_mmio_register_set *p_mmio;
+	u64 phys_addr;
+	struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+	int rc;
+
+	IPA_UT_LOG("Entry\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("Input err invalid ctx\n");
+		return -EINVAL;
+	}
+
+	p_mmio = ctx->mmio_buf.base;
+
+	phys_addr = p_mmio->ccabap +
+		((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = ctx->ch_ctx_array.base +
+		(phys_addr - ctx->ch_ctx_array.phys_base);
+	IPA_UT_LOG("channel id %d (CONS): chstate %s\n",
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+	phys_addr = p_mmio->ccabap +
+		((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = ctx->ch_ctx_array.base +
+		(phys_addr - ctx->ch_ctx_array.phys_base);
+	IPA_UT_LOG("channel id %d (PROD): chstate %s\n",
+		IPA_MHI_TEST_FIRST_CHANNEL_ID,
+		ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+
+	IPA_UT_LOG("MHI Destroy\n");
+	ipa_mhi_destroy();
+	IPA_UT_LOG("Post MHI Destroy\n");
+
+	ctx->prod_hdl = 0;
+	ctx->cons_hdl = 0;
+
+	dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[1].size,
+		ctx->xfer_ring_bufs[1].base, ctx->xfer_ring_bufs[1].phys_base);
+	ctx->xfer_ring_bufs[1].base = NULL;
+
+	IPA_UT_LOG("config channel context for channel %d (MHI CONS)\n",
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1);
+	rc = ipa_mhi_test_config_channel_context(
+		&ctx->mmio_buf,
+		ctx->xfer_ring_bufs,
+		ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1,
+		0x100,
+		0x80,
+		IPA_MHI_IN_CHAHNNEL);
+	if (rc) {
+		IPA_UT_LOG("config channel context failed %d, channel %d\n",
+			rc, IPA_MHI_TEST_FIRST_CHANNEL_ID + 1);
+		IPA_UT_TEST_FAIL_REPORT("fail config CONS channel ctx");
+		return -EFAULT;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, ctx->xfer_ring_bufs[0].size,
+		ctx->xfer_ring_bufs[0].base, ctx->xfer_ring_bufs[0].phys_base);
+	ctx->xfer_ring_bufs[0].base = NULL;
+
+	IPA_UT_LOG("config channel context for channel %d (MHI PROD)\n",
+		IPA_MHI_TEST_FIRST_CHANNEL_ID);
+	rc = ipa_mhi_test_config_channel_context(
+		&ctx->mmio_buf,
+		ctx->xfer_ring_bufs,
+		ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID,
+		0x100,
+		0x80,
+		IPA_MHI_OUT_CHAHNNEL);
+	if (rc) {
+		IPA_UT_LOG("config channel context failed %d, channel %d\n",
+			rc, IPA_MHI_TEST_FIRST_CHANNEL_ID);
+		IPA_UT_TEST_FAIL_REPORT("fail config PROD channel ctx");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ * 1. Destroy
+ * 2. Initialize (to Ready or M0 states)
+ */
+static int ipa_mhi_test_reset(struct ipa_test_mhi_context *ctx,
+	bool skip_start_and_conn)
+{
+	int rc;
+
+	IPA_UT_LOG("Entry\n");
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy fail");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(skip_start_and_conn);
+	if (rc) {
+		IPA_UT_LOG("driver init failed skip_start_and_con=%d rc=%d\n",
+			skip_start_and_conn, rc);
+		IPA_UT_TEST_FAIL_REPORT("init fail");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ *	1. disconnect cons channel
+ *	2. config cons channel
+ *	3. disconnect prod channel
+ *	4. config prod channel
+ *	5. connect prod
+ *	6. connect cons
+ */
+static int ipa_mhi_test_channel_reset(void)
+{
+	int rc;
+	struct ipa_mhi_connect_params prod_params;
+	struct ipa_mhi_connect_params cons_params;
+	struct ipa_mhi_mmio_register_set *p_mmio;
+	struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+	u64 phys_addr;
+
+	p_mmio = test_mhi_ctx->mmio_buf.base;
+
+	IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n",
+		test_mhi_ctx->cons_hdl);
+	rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->cons_hdl);
+	if (rc) {
+		IPA_UT_LOG("disconnect_pipe failed (CONS) %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("CONS pipe disconnect fail");
+		return -EFAULT;
+	}
+	test_mhi_ctx->cons_hdl = 0;
+
+	phys_addr = p_mmio->ccabap +
+		((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+		(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+	if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+		IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n",
+			IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+			ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+		IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not disabled");
+		return -EFAULT;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev,
+		test_mhi_ctx->xfer_ring_bufs[1].size,
+		test_mhi_ctx->xfer_ring_bufs[1].base,
+		test_mhi_ctx->xfer_ring_bufs[1].phys_base);
+	test_mhi_ctx->xfer_ring_bufs[1].base = NULL;
+	rc = ipa_mhi_test_config_channel_context(
+		&test_mhi_ctx->mmio_buf,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID + 1,
+		0x100,
+		0x80,
+		IPA_MHI_IN_CHAHNNEL);
+	if (rc) {
+		IPA_UT_LOG("config_channel_context IN failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail config CONS channel context");
+		return -EFAULT;
+	}
+	IPA_UT_LOG("Before pipe disconnect (CONS) client hdl=%u=\n",
+		test_mhi_ctx->prod_hdl);
+	rc = ipa_mhi_disconnect_pipe(test_mhi_ctx->prod_hdl);
+	if (rc) {
+		IPA_UT_LOG("disconnect_pipe failed (PROD) %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("PROD pipe disconnect fail");
+		return -EFAULT;
+	}
+	test_mhi_ctx->prod_hdl = 0;
+
+	phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+		(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+	if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_DISABLE) {
+		IPA_UT_LOG("chstate is not disabled! ch %d chstate %s\n",
+			IPA_MHI_TEST_FIRST_CHANNEL_ID,
+			ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+		IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled");
+		return -EFAULT;
+	}
+
+	dma_free_coherent(ipa3_ctx->pdev, test_mhi_ctx->xfer_ring_bufs[0].size,
+		test_mhi_ctx->xfer_ring_bufs[0].base,
+		test_mhi_ctx->xfer_ring_bufs[0].phys_base);
+	test_mhi_ctx->xfer_ring_bufs[0].base = NULL;
+	rc = ipa_mhi_test_config_channel_context(
+		&test_mhi_ctx->mmio_buf,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID,
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID,
+		0x100,
+		0x80,
+		IPA_MHI_OUT_CHAHNNEL);
+	if (rc) {
+		IPA_UT_LOG("config_channel_context OUT failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not disabled");
+		return -EFAULT;
+	}
+
+	memset(&prod_params, 0, sizeof(prod_params));
+	prod_params.sys.client = IPA_CLIENT_MHI_PROD;
+	prod_params.sys.ipa_ep_cfg.mode.mode = IPA_DMA;
+	prod_params.sys.ipa_ep_cfg.mode.dst = IPA_CLIENT_MHI_CONS;
+	prod_params.sys.ipa_ep_cfg.seq.seq_type = IPA_MHI_TEST_SEQ_TYPE_DMA;
+	prod_params.sys.ipa_ep_cfg.seq.set_dynamic = true;
+	prod_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID;
+	IPA_UT_LOG("BEFORE connect PROD\n");
+	rc = ipa_mhi_connect_pipe(&prod_params, &test_mhi_ctx->prod_hdl);
+	if (rc) {
+		IPA_UT_LOG("connect_pipe failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail connect PROD pipe");
+		return rc;
+	}
+
+	phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+		(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+	if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+		IPA_UT_LOG("chstate is not run! ch %d chstate %s\n",
+			IPA_MHI_TEST_FIRST_CHANNEL_ID,
+			ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+		IPA_UT_TEST_FAIL_REPORT("PROD pipe state is not run");
+		return -EFAULT;
+	}
+
+	memset(&cons_params, 0, sizeof(cons_params));
+	cons_params.sys.client = IPA_CLIENT_MHI_CONS;
+	cons_params.sys.skip_ep_cfg = true;
+	cons_params.channel_id = IPA_MHI_TEST_FIRST_CHANNEL_ID + 1;
+	IPA_UT_LOG("BEFORE connect CONS\n");
+	rc = ipa_mhi_connect_pipe(&cons_params, &test_mhi_ctx->cons_hdl);
+	if (rc) {
+		IPA_UT_LOG("ipa_mhi_connect_pipe failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail connect CONS pipe");
+		return rc;
+	}
+
+	phys_addr = p_mmio->ccabap +
+		((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+		(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+	if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+		IPA_UT_LOG("chstate is not run! ch %d chstate %s\n",
+			IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+			ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+		IPA_UT_TEST_FAIL_REPORT("CONS pipe state is not run");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ * Send data
+ */
+static int ipa_mhi_test_q_transfer_re(struct ipa_mem_buffer *mmio,
+	struct ipa_mem_buffer xfer_ring_bufs[],
+	struct ipa_mem_buffer ev_ring_bufs[],
+	u8 channel_id,
+	struct ipa_mem_buffer buf_array[],
+	int buf_array_size,
+	bool ieob,
+	bool ieot,
+	bool bei,
+	bool trigger_db)
+{
+	struct ipa_mhi_transfer_ring_element *curr_re;
+	struct ipa_mhi_mmio_register_set *p_mmio;
+	struct ipa_mhi_channel_context_array *p_channels;
+	struct ipa_mhi_event_context_array *p_events;
+	u32 channel_idx;
+	u32 event_ring_index;
+	u32 wp_ofst;
+	u32 rp_ofst;
+	u32 next_wp_ofst;
+	int i;
+	u32 num_of_ed_to_queue;
+
+	IPA_UT_LOG("Entry\n");
+
+	p_mmio = (struct ipa_mhi_mmio_register_set *)mmio->base;
+	p_channels = (struct ipa_mhi_channel_context_array *)
+		((unsigned long)p_mmio->crcbap);
+	p_events = (struct ipa_mhi_event_context_array *)
+		((unsigned long)p_mmio->crdb);
+
+	if (ieob)
+		num_of_ed_to_queue = buf_array_size;
+	else
+		num_of_ed_to_queue = ieot ? 1 : 0;
+
+	if (channel_id >=
+		(IPA_MHI_TEST_FIRST_CHANNEL_ID + IPA_MHI_TEST_NUM_CHANNELS) ||
+		channel_id < IPA_MHI_TEST_FIRST_CHANNEL_ID) {
+		IPA_UT_LOG("Invalud Channel ID %d\n", channel_id);
+		return -EFAULT;
+	}
+
+	channel_idx = channel_id - IPA_MHI_TEST_FIRST_CHANNEL_ID;
+
+	if (!xfer_ring_bufs[channel_idx].base) {
+		IPA_UT_LOG("Channel is not allocated\n");
+		return -EFAULT;
+	}
+	if (p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_DEFAULT ||
+	    p_channels[channel_idx].brsmode == IPA_MHI_BURST_MODE_ENABLE)
+		num_of_ed_to_queue += 1; /* for OOB/DB mode event */
+
+	/* First queue EDs */
+	event_ring_index = p_channels[channel_idx].erindex -
+		IPA_MHI_TEST_FIRST_EVENT_RING_ID;
+
+	wp_ofst = (u32)(p_events[event_ring_index].wp -
+		p_events[event_ring_index].rbase);
+
+	if (p_events[event_ring_index].rlen & 0xFFFFFFFF00000000) {
+		IPA_UT_LOG("invalid ev rlen %llu\n",
+			p_events[event_ring_index].rlen);
+		return -EFAULT;
+	}
+
+	next_wp_ofst = (wp_ofst + num_of_ed_to_queue *
+		sizeof(struct ipa_mhi_event_ring_element)) %
+		(u32)p_events[event_ring_index].rlen;
+
+	/* set next WP */
+	p_events[event_ring_index].wp =
+		(u32)p_events[event_ring_index].rbase + next_wp_ofst;
+
+	/* write value to event ring doorbell */
+	IPA_UT_LOG("DB to event 0x%llx: base %pa ofst 0x%x\n",
+		p_events[event_ring_index].wp,
+		&(gsi_ctx->per.phys_addr), GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+			event_ring_index + IPA_MHI_GSI_ER_START, 0));
+	iowrite32(p_events[event_ring_index].wp,
+		test_mhi_ctx->gsi_mmio +
+		GSI_EE_n_EV_CH_k_DOORBELL_0_OFFS(
+			event_ring_index + IPA_MHI_GSI_ER_START, 0));
+
+	for (i = 0; i < buf_array_size; i++) {
+		/* calculate virtual pointer for current WP and RP */
+		wp_ofst = (u32)(p_channels[channel_idx].wp -
+			p_channels[channel_idx].rbase);
+		rp_ofst = (u32)(p_channels[channel_idx].rp -
+			p_channels[channel_idx].rbase);
+		(void)rp_ofst;
+		curr_re = (struct ipa_mhi_transfer_ring_element *)
+			((unsigned long)xfer_ring_bufs[channel_idx].base +
+			wp_ofst);
+		if (p_channels[channel_idx].rlen & 0xFFFFFFFF00000000) {
+			IPA_UT_LOG("invalid ch rlen %llu\n",
+				p_channels[channel_idx].rlen);
+			return -EFAULT;
+		}
+		next_wp_ofst = (wp_ofst +
+			sizeof(struct ipa_mhi_transfer_ring_element)) %
+			(u32)p_channels[channel_idx].rlen;
+
+		/* write current RE */
+		curr_re->type = IPA_MHI_RING_ELEMENT_TRANSFER;
+		curr_re->len = (u16)buf_array[i].size;
+		curr_re->ptr = (u32)buf_array[i].phys_base;
+		curr_re->word_C.bits.bei = bei;
+		curr_re->word_C.bits.ieob = ieob;
+		curr_re->word_C.bits.ieot = ieot;
+
+		/* set next WP */
+		p_channels[channel_idx].wp =
+			p_channels[channel_idx].rbase + next_wp_ofst;
+
+		if (i == (buf_array_size - 1)) {
+			/* last buffer */
+			curr_re->word_C.bits.chain = 0;
+			if (trigger_db) {
+				IPA_UT_LOG(
+					"DB to channel 0x%llx: base %pa ofst 0x%x\n"
+					, p_channels[channel_idx].wp
+					, &(gsi_ctx->per.phys_addr)
+					, GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(
+						channel_idx, 0));
+				iowrite32(p_channels[channel_idx].wp,
+					test_mhi_ctx->gsi_mmio +
+					GSI_EE_n_GSI_CH_k_DOORBELL_0_OFFS(
+					channel_idx, 0));
+			}
+		} else {
+			curr_re->word_C.bits.chain = 1;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ * Send data in loopback (from In to OUT) and compare
+ */
+static int ipa_mhi_test_loopback_data_transfer(void)
+{
+	struct ipa_mem_buffer *p_mmio;
+	int i;
+	int rc;
+	static int val;
+	bool timeout = true;
+
+	IPA_UT_LOG("Entry\n");
+
+	p_mmio = &test_mhi_ctx->mmio_buf;
+
+	/* invalidate spare register value (for msi) */
+	memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+	val++;
+
+	memset(test_mhi_ctx->in_buffer.base, 0,
+		IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+	for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++)
+		memset(test_mhi_ctx->out_buffer.base + i, (val + i) & 0xFF, 1);
+
+	/* queue RE for IN side and trigger doorbell */
+	rc = ipa_mhi_test_q_transfer_re(p_mmio,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		&test_mhi_ctx->in_buffer,
+		1,
+		true,
+		true,
+		false,
+		true);
+
+	if (rc) {
+		IPA_UT_LOG("q_transfer_re failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+		return rc;
+	}
+
+	/* queue REs for OUT side and trigger doorbell */
+	rc = ipa_mhi_test_q_transfer_re(p_mmio,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID,
+		&test_mhi_ctx->out_buffer,
+		1,
+		true,
+		true,
+		false,
+		true);
+
+	if (rc) {
+		IPA_UT_LOG("q_transfer_re failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re");
+		return rc;
+	}
+
+	IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+	if (timeout) {
+		IPA_UT_LOG("transfer timeout. MSI = 0x%x\n",
+			*((u32 *)test_mhi_ctx->msi.base));
+		IPA_UT_TEST_FAIL_REPORT("xfter timeout");
+		return -EFAULT;
+	}
+
+	/* compare the two buffers */
+	if (memcmp(test_mhi_ctx->in_buffer.base, test_mhi_ctx->out_buffer.base,
+		IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+		IPA_UT_LOG("buffer are not equal\n");
+		IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ * Do suspend and check channel states to be suspend if should success
+ */
+static int ipa_mhi_test_suspend(bool force, bool should_success)
+{
+	int rc;
+	struct ipa_mhi_mmio_register_set *p_mmio;
+	struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+	u64 phys_addr;
+
+	IPA_UT_LOG("Entry\n");
+
+	rc = ipa_mhi_suspend(force);
+	if (should_success && rc != 0) {
+		IPA_UT_LOG("ipa_mhi_suspend failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("suspend failed");
+		return -EFAULT;
+	}
+
+	if (!should_success && rc != -EAGAIN) {
+		IPA_UT_LOG("ipa_mhi_suspenddid not return -EAGAIN fail %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("suspend succeeded unexpectedly");
+		return -EFAULT;
+	}
+
+	p_mmio = test_mhi_ctx->mmio_buf.base;
+
+	phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+		(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+	if (should_success) {
+		if (p_ch_ctx_array->chstate !=
+			IPA_HW_MHI_CHANNEL_STATE_SUSPEND) {
+			IPA_UT_LOG("chstate is not suspend. ch %d chstate %s\n",
+				IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+				ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+			IPA_UT_TEST_FAIL_REPORT("channel state not suspend");
+			return -EFAULT;
+		}
+		if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) {
+			IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n",
+				IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+				p_ch_ctx_array->rp, p_ch_ctx_array->wp);
+			IPA_UT_TEST_FAIL_REPORT("rp was not updated");
+			return -EFAULT;
+		}
+	} else {
+		if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+			IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+				IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+				ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+			IPA_UT_TEST_FAIL_REPORT("channel state not run");
+			return -EFAULT;
+		}
+	}
+
+	phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+		(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+	if (should_success) {
+		if (p_ch_ctx_array->chstate !=
+			IPA_HW_MHI_CHANNEL_STATE_SUSPEND) {
+			IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+				IPA_MHI_TEST_FIRST_CHANNEL_ID,
+				ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+			IPA_UT_TEST_FAIL_REPORT("channel state not suspend");
+			return -EFAULT;
+		}
+		if (!force && p_ch_ctx_array->rp != p_ch_ctx_array->wp) {
+			IPA_UT_LOG("rp not updated ch %d rp 0x%llx wp 0x%llx\n",
+				IPA_MHI_TEST_FIRST_CHANNEL_ID,
+				p_ch_ctx_array->rp, p_ch_ctx_array->wp);
+			IPA_UT_TEST_FAIL_REPORT("rp was not updated");
+			return -EFAULT;
+		}
+	} else {
+		if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+			IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+				IPA_MHI_TEST_FIRST_CHANNEL_ID,
+				ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+			IPA_UT_TEST_FAIL_REPORT("channel state not run");
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ * Do resume and check channel state to be running
+ */
+static int ipa_test_mhi_resume(void)
+{
+	int rc;
+	struct ipa_mhi_mmio_register_set *p_mmio;
+	struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+	u64 phys_addr;
+
+	rc = ipa_mhi_resume();
+	if (rc) {
+		IPA_UT_LOG("resume failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("resume failed");
+		return -EFAULT;
+	}
+
+	p_mmio = test_mhi_ctx->mmio_buf.base;
+
+	phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID + 1) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+		(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+	if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+		IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+			IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+			ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+		IPA_UT_TEST_FAIL_REPORT("channel state not run");
+		return -EFAULT;
+	}
+
+	phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+		(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+	if (p_ch_ctx_array->chstate != IPA_HW_MHI_CHANNEL_STATE_RUN) {
+		IPA_UT_LOG("chstate is not running! ch %d chstate %s\n",
+			IPA_MHI_TEST_FIRST_CHANNEL_ID,
+			ipa_mhi_get_state_str(p_ch_ctx_array->chstate));
+		IPA_UT_TEST_FAIL_REPORT("channel state not run");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ *	1. suspend
+ *	2. queue RE for IN and OUT and send data
+ *	3. should get MSI timeout due to suspend
+ *	4. resume
+ *	5. should get the MSIs now
+ *	6. comapre the IN and OUT buffers
+ */
+static int ipa_mhi_test_suspend_resume(void)
+{
+	int rc;
+	int i;
+	bool timeout = true;
+
+	IPA_UT_LOG("Entry\n");
+
+	IPA_UT_LOG("BEFORE suspend\n");
+	rc = ipa_mhi_test_suspend(false, true);
+	if (rc) {
+		IPA_UT_LOG("suspend failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("suspend failed");
+		return rc;
+	}
+	IPA_UT_LOG("AFTER suspend\n");
+
+	/* invalidate spare register value (for msi) */
+	memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+	memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+	for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++)
+		memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+
+	/* queue RE for IN side and trigger doorbell */
+	rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		&test_mhi_ctx->in_buffer,
+		1,
+		true,
+		true,
+		false,
+		true);
+	if (rc) {
+		IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+		return rc;
+	}
+
+	/* queue REs for OUT side and trigger doorbell */
+	rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID,
+		&test_mhi_ctx->out_buffer,
+		1,
+		true,
+		true,
+		false,
+		true);
+
+	if (rc) {
+		IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail OUT q xfer re");
+		return rc;
+	}
+
+	IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+	if (!timeout) {
+		IPA_UT_LOG("Error: transfer success on suspend\n");
+		IPA_UT_TEST_FAIL_REPORT("xfer suceeded unexpectedly");
+		return -EFAULT;
+	}
+
+	IPA_UT_LOG("BEFORE resume\n");
+	rc = ipa_test_mhi_resume();
+	if (rc) {
+		IPA_UT_LOG("ipa_mhi_resume failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("resume fail");
+		return rc;
+	}
+	IPA_UT_LOG("AFTER resume\n");
+
+	IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+	if (timeout) {
+		IPA_UT_LOG("Error: transfer timeout\n");
+		IPA_UT_TEST_FAIL_REPORT("xfer timeout");
+		return -EFAULT;
+	}
+
+	/* compare the two buffers */
+	if (memcmp(test_mhi_ctx->in_buffer.base,
+		test_mhi_ctx->out_buffer.base,
+		IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+		IPA_UT_LOG("Error: buffers are not equal\n");
+		IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ *	1. enable aggregation
+ *	2. queue IN RE (ring element)
+ *	3. allocate skb with data
+ *	4. send it (this will create open aggr frame)
+ */
+static int ipa_mhi_test_create_aggr_open_frame(void)
+{
+	struct ipa_ep_cfg_aggr ep_aggr;
+	struct sk_buff *skb;
+	int rc;
+	int i;
+	u32 aggr_state_active;
+
+	IPA_UT_LOG("Entry\n");
+
+	memset(&ep_aggr, 0, sizeof(ep_aggr));
+	ep_aggr.aggr_en = IPA_ENABLE_AGGR;
+	ep_aggr.aggr = IPA_GENERIC;
+	ep_aggr.aggr_pkt_limit = 2;
+
+	rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr);
+	if (rc) {
+		IPA_UT_LOG("failed to configure aggr");
+		IPA_UT_TEST_FAIL_REPORT("failed to configure aggr");
+		return rc;
+	}
+
+	/* invalidate spare register value (for msi) */
+	memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+	/* queue RE for IN side and trigger doorbell */
+	rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		&test_mhi_ctx->in_buffer,
+		1,
+		true,
+		true,
+		false,
+		true);
+	if (rc) {
+		IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+		return rc;
+	}
+
+	skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+	if (!skb) {
+		IPA_UT_LOG("non mem for skb\n");
+		IPA_UT_TEST_FAIL_REPORT("fail alloc skb");
+		return -ENOMEM;
+	}
+	skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+	for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) {
+		memset(skb->data + i, i & 0xFF, 1);
+		memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+	}
+
+	rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+	if (rc) {
+		IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
+		return rc;
+	}
+
+	msleep(20);
+
+	aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+	IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE  0x%x\n", aggr_state_active);
+	if (aggr_state_active == 0) {
+		IPA_UT_LOG("No aggregation frame open!\n");
+		IPA_UT_TEST_FAIL_REPORT("No aggregation frame open");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ *	1. create open aggr by sending data
+ *	2. suspend - if force it should succeed, otherwize it fails
+ *	3. if force - wait for wakeup event - it should arrive
+ *	4. if force - resume
+ *	5. force close the aggr.
+ *	6. wait for MSI - it should arrive
+ *	7. compare IN and OUT buffers
+ *	8. disable aggr.
+ */
+static int ipa_mhi_test_suspend_aggr_open(bool force)
+{
+	int rc;
+	struct ipa_ep_cfg_aggr ep_aggr;
+	bool timeout = true;
+
+	IPA_UT_LOG("Entry\n");
+
+	rc = ipa_mhi_test_create_aggr_open_frame();
+	if (rc) {
+		IPA_UT_LOG("failed create open aggr\n");
+		IPA_UT_TEST_FAIL_REPORT("fail create open aggr");
+		return rc;
+	}
+
+	if (force)
+		reinit_completion(&mhi_test_wakeup_comp);
+
+	IPA_UT_LOG("BEFORE suspend\n");
+	/**
+	 * if suspend force, then suspend should succeed.
+	 * otherwize it should fail due to open aggr.
+	 */
+	rc = ipa_mhi_test_suspend(force, force);
+	if (rc) {
+		IPA_UT_LOG("suspend failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("suspend fail");
+		return rc;
+	}
+	IPA_UT_LOG("AFTER suspend\n");
+
+	if (force) {
+		if (!wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ)) {
+			IPA_UT_LOG("timeout waiting for wakeup event\n");
+			IPA_UT_TEST_FAIL_REPORT("timeout waitinf wakeup event");
+			return -ETIME;
+		}
+
+		IPA_UT_LOG("BEFORE resume\n");
+		rc = ipa_test_mhi_resume();
+		if (rc) {
+			IPA_UT_LOG("resume failed %d\n", rc);
+			IPA_UT_TEST_FAIL_REPORT("resume failed");
+			return rc;
+		}
+		IPA_UT_LOG("AFTER resume\n");
+	}
+
+	ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << test_mhi_ctx->cons_hdl));
+
+	IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout);
+	if (timeout) {
+		IPA_UT_LOG("fail: transfer not completed\n");
+		IPA_UT_TEST_FAIL_REPORT("timeout on transferring data");
+		return -EFAULT;
+	}
+
+	/* compare the two buffers */
+	if (memcmp(test_mhi_ctx->in_buffer.base,
+		test_mhi_ctx->out_buffer.base,
+		IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+		IPA_UT_LOG("fail: buffer are not equal\n");
+		IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+		return -EFAULT;
+	}
+
+	memset(&ep_aggr, 0, sizeof(ep_aggr));
+	rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr);
+	if (rc) {
+		IPA_UT_LOG("failed to configure aggr");
+		IPA_UT_TEST_FAIL_REPORT("fail to disable aggr");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ *	1. suspend
+ *	2. queue IN RE (ring element)
+ *	3. allocate skb with data
+ *	4. send it (this will create open aggr frame)
+ *	5. wait for wakeup event - it should arrive
+ *	6. resume
+ *	7. wait for MSI - it should arrive
+ *	8. compare IN and OUT buffers
+ */
+static int ipa_mhi_test_suspend_host_wakeup(void)
+{
+	int rc;
+	int i;
+	bool timeout = true;
+	struct sk_buff *skb;
+
+	reinit_completion(&mhi_test_wakeup_comp);
+
+	IPA_UT_LOG("BEFORE suspend\n");
+	rc = ipa_mhi_test_suspend(false, true);
+	if (rc) {
+		IPA_UT_LOG("suspend failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("suspend fail");
+		return rc;
+	}
+	IPA_UT_LOG("AFTER suspend\n");
+
+	/* invalidate spare register value (for msi) */
+	memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+	memset(test_mhi_ctx->in_buffer.base, 0, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+	/* queue RE for IN side and trigger doorbell*/
+	rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		&test_mhi_ctx->in_buffer,
+		1,
+		true,
+		true,
+		false,
+		true);
+
+	if (rc) {
+		IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail IN q xfer re");
+		return rc;
+	}
+
+	skb = dev_alloc_skb(IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+	if (!skb) {
+		IPA_UT_LOG("non mem for skb\n");
+		IPA_UT_TEST_FAIL_REPORT("no mem for skb");
+		return -ENOMEM;
+	}
+	skb_put(skb, IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+	for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++) {
+		memset(skb->data + i, i & 0xFF, 1);
+		memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+	}
+
+	rc = ipa_tx_dp(IPA_CLIENT_MHI_CONS, skb, NULL);
+	if (rc) {
+		IPA_UT_LOG("ipa_tx_dp failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("ipa tx dp fail");
+		return rc;
+	}
+
+	if (wait_for_completion_timeout(&mhi_test_wakeup_comp, HZ) == 0) {
+		IPA_UT_LOG("timeout waiting for wakeup event\n");
+		IPA_UT_TEST_FAIL_REPORT("timeout waiting for wakeup event");
+		return -ETIME;
+	}
+
+	IPA_UT_LOG("BEFORE resume\n");
+	rc = ipa_test_mhi_resume();
+	if (rc) {
+		IPA_UT_LOG("resume failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("resume fail");
+		return rc;
+	}
+	IPA_UT_LOG("AFTER resume\n");
+
+	/* check for MSI interrupt one channels */
+	IPA_MHI_TEST_CHECK_MSI_INTR(false, timeout);
+	if (timeout) {
+		IPA_UT_LOG("fail: transfer timeout\n");
+		IPA_UT_TEST_FAIL_REPORT("timeout on xfer");
+		return -EFAULT;
+	}
+
+	/* compare the two buffers */
+	if (memcmp(test_mhi_ctx->in_buffer.base,
+		test_mhi_ctx->out_buffer.base,
+		IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+		IPA_UT_LOG("fail: buffer are not equal\n");
+		IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ *	1. queue OUT RE/buffer
+ *	2. wait for MSI on OUT
+ *	3. Do 1. and 2. till got MSI wait timeout (ch full / holb)
+ */
+static int ipa_mhi_test_create_full_channel(int *submitted_packets)
+{
+	int i;
+	bool timeout = true;
+	int rc;
+
+	if (!submitted_packets) {
+		IPA_UT_LOG("Input error\n");
+		return -EINVAL;
+	}
+
+	*submitted_packets = 0;
+
+	for (i = 0; i < IPA_MHI_TEST_MAX_DATA_BUF_SIZE; i++)
+		memset(test_mhi_ctx->out_buffer.base + i, i & 0xFF, 1);
+
+	do {
+		/* invalidate spare register value (for msi) */
+		memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+		IPA_UT_LOG("submitting OUT buffer\n");
+		timeout = true;
+		/* queue REs for OUT side and trigger doorbell */
+		rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+			test_mhi_ctx->xfer_ring_bufs,
+			test_mhi_ctx->ev_ring_bufs,
+			IPA_MHI_TEST_FIRST_CHANNEL_ID,
+			&test_mhi_ctx->out_buffer,
+			1,
+			true,
+			true,
+			false,
+			true);
+		if (rc) {
+			IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n",
+				rc);
+			IPA_UT_TEST_FAIL_REPORT("fail OUT q re");
+			return rc;
+		}
+		(*submitted_packets)++;
+
+		IPA_UT_LOG("waiting for MSI\n");
+		for (i = 0; i < 10; i++) {
+			if (*((u32 *)test_mhi_ctx->msi.base) ==
+				(0x10000000 |
+				(IPA_MHI_TEST_FIRST_EVENT_RING_ID))) {
+				IPA_UT_LOG("got MSI\n");
+				timeout = false;
+				break;
+			}
+			msleep(20);
+		}
+	} while (!timeout);
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ *	1. queue OUT RE/buffer
+ *	2. wait for MSI on OUT
+ *	3. Do 1. and 2. till got MSI wait timeout (ch full)
+ *	4. suspend - it should fail with -EAGAIN - M1 is rejected
+ *	5. foreach submitted pkt, do the next steps
+ *	6. queue IN RE/buffer
+ *	7. wait for MSI
+ *	8. compare IN and OUT buffers
+ */
+static int ipa_mhi_test_suspend_full_channel(bool force)
+{
+	int rc;
+	bool timeout;
+	int submitted_packets = 0;
+
+	rc = ipa_mhi_test_create_full_channel(&submitted_packets);
+	if (rc) {
+		IPA_UT_LOG("fail create full channel\n");
+		IPA_UT_TEST_FAIL_REPORT("fail create full channel");
+		return rc;
+	}
+
+	IPA_UT_LOG("BEFORE suspend\n");
+	rc = ipa_mhi_test_suspend(force, false);
+	if (rc) {
+		IPA_UT_LOG("ipa_mhi_suspend did not returned -EAGAIN. rc %d\n",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("test suspend fail");
+		return -EFAULT;
+	}
+	IPA_UT_LOG("AFTER suspend\n");
+
+	while (submitted_packets) {
+		memset(test_mhi_ctx->in_buffer.base, 0,
+			IPA_MHI_TEST_MAX_DATA_BUF_SIZE);
+
+		/* invalidate spare register value (for msi) */
+		memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+
+		timeout = true;
+		/* queue RE for IN side and trigger doorbell */
+		rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+			test_mhi_ctx->xfer_ring_bufs,
+			test_mhi_ctx->ev_ring_bufs,
+			IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+			&test_mhi_ctx->in_buffer,
+			1,
+			true,
+			true,
+			false,
+			true);
+		if (rc) {
+			IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n",
+				rc);
+			IPA_UT_TEST_FAIL_REPORT("fail IN q re");
+			return rc;
+		}
+
+		IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+		if (timeout) {
+			IPA_UT_LOG("transfer failed - timeout\n");
+			IPA_UT_TEST_FAIL_REPORT("timeout on xfer");
+			return -EFAULT;
+		}
+
+		/* compare the two buffers */
+		if (memcmp(test_mhi_ctx->in_buffer.base,
+			test_mhi_ctx->out_buffer.base,
+			IPA_MHI_TEST_MAX_DATA_BUF_SIZE)) {
+			IPA_UT_LOG("buffer are not equal\n");
+			IPA_UT_TEST_FAIL_REPORT("non-equal buffers after xfer");
+			return -EFAULT;
+		}
+
+		submitted_packets--;
+	}
+
+	return 0;
+}
+
+/**
+ * To be called from test
+ *	1. suspend
+ *	2. reset to M0 state
+ */
+static int ipa_mhi_test_suspend_and_reset(struct ipa_test_mhi_context *ctx)
+{
+	int rc;
+
+	IPA_UT_LOG("BEFORE suspend\n");
+	rc = ipa_mhi_test_suspend(false, true);
+	if (rc) {
+		IPA_UT_LOG("suspend failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("suspend fail");
+		return rc;
+	}
+	IPA_UT_LOG("AFTER suspend\n");
+
+	rc = ipa_mhi_test_reset(ctx, false);
+	if (rc) {
+		IPA_UT_LOG("reset failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("reset fail");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ *	1. manualy update wp
+ *	2. suspend - should succeed
+ *	3. restore wp value
+ */
+static int ipa_mhi_test_suspend_wp_update(void)
+{
+	int rc;
+	struct ipa_mhi_mmio_register_set *p_mmio;
+	struct ipa_mhi_channel_context_array *p_ch_ctx_array;
+	u64 old_wp;
+	u64 phys_addr;
+
+	/* simulate a write by updating the wp */
+	p_mmio = test_mhi_ctx->mmio_buf.base;
+	phys_addr = p_mmio->ccabap + ((IPA_MHI_TEST_FIRST_CHANNEL_ID) *
+		sizeof(struct ipa_mhi_channel_context_array));
+	p_ch_ctx_array = test_mhi_ctx->ch_ctx_array.base +
+		(phys_addr - test_mhi_ctx->ch_ctx_array.phys_base);
+	old_wp = p_ch_ctx_array->wp;
+	p_ch_ctx_array->wp += 16;
+
+	IPA_UT_LOG("BEFORE suspend\n");
+	rc = ipa_mhi_test_suspend(false, false);
+	if (rc) {
+		IPA_UT_LOG("suspend failed rc %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("suspend fail");
+		p_ch_ctx_array->wp = old_wp;
+		return rc;
+	}
+	IPA_UT_LOG("AFTER suspend\n");
+
+	p_ch_ctx_array->wp = old_wp;
+
+	return 0;
+}
+
+/**
+ * To be run during test
+ *	1. create open aggr by sending data
+ *	2. channel reset (disconnect/connet)
+ *	3. validate no aggr. open after reset
+ *	4. disable aggr.
+ */
+static int ipa_mhi_test_channel_reset_aggr_open(void)
+{
+	int rc;
+	u32 aggr_state_active;
+	struct ipa_ep_cfg_aggr ep_aggr;
+
+	IPA_UT_LOG("Entry\n");
+
+	rc = ipa_mhi_test_create_aggr_open_frame();
+	if (rc) {
+		IPA_UT_LOG("failed create open aggr rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail creare open aggr frame");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_channel_reset();
+	if (rc) {
+		IPA_UT_LOG("channel reset failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("channel reset fail");
+		return rc;
+	}
+
+	aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
+	IPADBG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active);
+	if (aggr_state_active != 0) {
+		IPA_UT_LOG("aggregation frame open after reset!\n");
+		IPA_UT_LOG("IPA_STATE_AGGR_ACTIVE 0x%x\n", aggr_state_active);
+		IPA_UT_TEST_FAIL_REPORT("open aggr after reset");
+		return -EFAULT;
+	}
+
+	memset(&ep_aggr, 0, sizeof(ep_aggr));
+	rc = ipa3_cfg_ep_aggr(test_mhi_ctx->cons_hdl, &ep_aggr);
+	if (rc) {
+		IPA_UT_LOG("failed to configure aggr");
+		IPA_UT_TEST_FAIL_REPORT("fail to disable aggr");
+		return rc;
+	}
+
+	return rc;
+}
+
+/**
+ * To be run during test
+ *	1. queue OUT RE/buffer
+ *	2. wait for MSI on OUT
+ *	3. Do 1. and 2. till got MSI wait timeout (ch full)
+ *	4. channel reset
+ *		disconnect and reconnect the prod and cons
+ *	5. queue IN RE/buffer and ring DB
+ *	6. wait for MSI - should get timeout as channels were reset
+ *	7. reset again
+ */
+static int ipa_mhi_test_channel_reset_ipa_holb(void)
+{
+	int rc;
+	int submitted_packets = 0;
+	bool timeout;
+
+	IPA_UT_LOG("Entry\n");
+
+	rc = ipa_mhi_test_create_full_channel(&submitted_packets);
+	if (rc) {
+		IPA_UT_LOG("fail create full channel rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail create full channel");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_channel_reset();
+	if (rc) {
+		IPA_UT_LOG("channel reset failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("channel reset fail");
+		return rc;
+	}
+
+	/* invalidate spare register value (for msi) */
+	memset(test_mhi_ctx->msi.base, 0xFF, test_mhi_ctx->msi.size);
+	timeout = true;
+	/* queue RE for IN side and trigger doorbell */
+	rc = ipa_mhi_test_q_transfer_re(&test_mhi_ctx->mmio_buf,
+		test_mhi_ctx->xfer_ring_bufs,
+		test_mhi_ctx->ev_ring_bufs,
+		IPA_MHI_TEST_FIRST_CHANNEL_ID + 1,
+		&test_mhi_ctx->in_buffer,
+		1,
+		true,
+		true,
+		false,
+		true);
+
+	if (rc) {
+		IPA_UT_LOG("ipa_mhi_test_q_transfer_re failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail IN q re");
+		return rc;
+	}
+	submitted_packets--;
+
+	IPA_MHI_TEST_CHECK_MSI_INTR(true, timeout);
+	if (!timeout) {
+		IPA_UT_LOG("transfer succeed although we had reset\n");
+		IPA_UT_TEST_FAIL_REPORT("xfer succeed although we had reset");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_channel_reset();
+	if (rc) {
+		IPA_UT_LOG("channel reset failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("channel reset fail");
+		return rc;
+	}
+
+	return rc;
+}
+
+
+/**
+ * TEST: mhi reset in READY state
+ *	1. init to ready state (without start and connect)
+ *	2. reset (destroy and re-init)
+ *	2. destroy
+ */
+static int ipa_mhi_test_reset_ready_state(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(true);
+	if (rc) {
+		IPA_UT_LOG("init to Ready state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to init to ready state");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_reset(ctx, true);
+	if (rc) {
+		IPA_UT_LOG("reset failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi reset in M0 state
+ *	1. init to M0 state (with start and connect)
+ *	2. reset (destroy and re-init)
+ *	2. destroy
+ */
+static int ipa_mhi_test_reset_m0_state(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT
+			("fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_reset(ctx, false);
+	if (rc) {
+		IPA_UT_LOG("reset failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi in-loop reset in M0 state
+ *	1. init to M0 state (with start and connect)
+ *	2. reset (destroy and re-init) in-loop
+ *	3. destroy
+ */
+static int ipa_mhi_test_inloop_reset_m0_state(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT
+			("fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_reset, rc, ctx, false);
+	if (rc) {
+		IPA_UT_LOG("in-loop reset failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"reset (destroy/re-init) in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data with reset
+ *	1. init to M0 state (with start and connect)
+ *	2. reset (destroy and re-init)
+ *	3. loopback data
+ *	4. reset (destroy and re-init)
+ *	5. loopback data again
+ *	6. destroy
+ */
+static int ipa_mhi_test_loopback_data_with_reset(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_reset(ctx, false);
+	if (rc) {
+		IPA_UT_LOG("reset failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_reset(ctx, false);
+	if (rc) {
+		IPA_UT_LOG("reset failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("reset (destroy/re-init) failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi reset in suspend state
+ *	1. init to M0 state (with start and connect)
+ *	2. suspend
+ *	3. reset (destroy and re-init)
+ *	4. destroy
+ */
+static int ipa_mhi_test_reset_on_suspend(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_suspend_and_reset(ctx);
+	if (rc) {
+		IPA_UT_LOG("suspend and reset failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("suspend and then reset failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed %d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi in-loop reset in suspend state
+ *	1. init to M0 state (with start and connect)
+ *	2. suspend
+ *	3. reset (destroy and re-init)
+ *	4. Do 2 and 3 in loop
+ *	3. destroy
+ */
+static int ipa_mhi_test_inloop_reset_on_suspend(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_and_reset, rc, ctx);
+	if (rc) {
+		IPA_UT_LOG("in-loop reset in suspend failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to in-loop reset while suspend");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data with reset
+ *	1. init to M0 state (with start and connect)
+ *	2. suspend
+ *	3. reset (destroy and re-init)
+ *	4. loopback data
+ *	5. suspend
+ *	5. reset (destroy and re-init)
+ *	6. destroy
+ */
+static int ipa_mhi_test_loopback_data_with_reset_on_suspend(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_suspend_and_reset(ctx);
+	if (rc) {
+		IPA_UT_LOG("suspend and reset failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_suspend_and_reset(ctx);
+	if (rc) {
+		IPA_UT_LOG("suspend and reset failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("fail to suspend and then reset");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend/resume
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop suspend/resume
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_resume(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_resume, rc);
+	if (rc) {
+		IPA_UT_LOG("suspend resume failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT("in loop suspend/resume failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend/resume with aggr open
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop suspend/resume with open aggr.
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_resume_aggr_open(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open,
+		rc, false);
+	if (rc) {
+		IPA_UT_LOG("suspend resume with aggr open failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"in loop suspend/resume with open aggr failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop force suspend/resume with aggr open
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop force suspend/resume with open aggr.
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_force_suspend_resume_aggr_open(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_aggr_open,
+		rc, true);
+	if (rc) {
+		IPA_UT_LOG("force suspend resume with aggr open failed rc=%d",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"in loop force suspend/resume with open aggr failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend/host wakeup resume
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop suspend/resume with host wakeup
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_host_wakeup(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_host_wakeup, rc);
+	if (rc) {
+		IPA_UT_LOG("suspend host wakeup resume failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"in loop suspend/resume with hsot wakeup failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop rejected suspend as full channel
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop rejrected suspend
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_reject_suspend_full_channel(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel,
+		rc, false);
+	if (rc) {
+		IPA_UT_LOG("full channel rejected suspend failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"in loop rejected suspend due to full channel failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop rejected force suspend as full channel
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop force rejected suspend
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_reject_force_suspend_full_channel(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_full_channel,
+		rc, true);
+	if (rc) {
+		IPA_UT_LOG("full channel rejected force suspend failed rc=%d",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"in loop force rejected suspend as full ch failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop suspend after wp manual update
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop suspend after wp update
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_suspend_resume_wp_update(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_suspend_wp_update, rc);
+	if (rc) {
+		IPA_UT_LOG("suspend after wp update failed rc=%d", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"in loop suspend after wp update failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop channel reset (disconnect/connect)
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop channel reset (disconnect/connect)
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_channel_reset(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset, rc);
+	if (rc) {
+		IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT("in loop channel reset failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop channel reset (disconnect/connect)
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop channel reset (disconnect/connect) with open aggr
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_channel_reset_aggr_open(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_aggr_open, rc);
+	if (rc) {
+		IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"in loop channel reset with open aggr failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/**
+ * TEST: mhi loopback data after in loop channel reset (disconnect/connect)
+ *	1. init to M0 state (with start and connect)
+ *	2. in loop channel reset (disconnect/connect) with channel in HOLB
+ *	3. loopback data
+ *	4. destroy
+ */
+static int ipa_mhi_test_in_loop_channel_reset_ipa_holb(void *priv)
+{
+	int rc;
+	struct ipa_test_mhi_context *ctx = (struct ipa_test_mhi_context *)priv;
+
+	IPA_UT_LOG("Test Start\n");
+
+	if (unlikely(!ctx)) {
+		IPA_UT_LOG("No context");
+		return -EFAULT;
+	}
+
+	rc = ipa_mhi_test_initialize_driver(false);
+	if (rc) {
+		IPA_UT_LOG("init to M0 state failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"fail to init to M0 state (w/ start and connect)");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_channel_reset_ipa_holb, rc);
+	if (rc) {
+		IPA_UT_LOG("channel reset (disconnect/connect) failed rc=%d",
+			rc);
+		IPA_UT_TEST_FAIL_REPORT(
+			"in loop channel reset with channel HOLB failed");
+		return rc;
+	}
+
+	IPA_MHI_RUN_TEST_UNIT_IN_LOOP(ipa_mhi_test_loopback_data_transfer, rc);
+	if (rc) {
+		IPA_UT_LOG("data loopback failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("loopback data in loop failed");
+		return rc;
+	}
+
+	rc = ipa_mhi_test_destroy(ctx);
+	if (rc) {
+		IPA_UT_LOG("destroy failed rc=%d\n", rc);
+		IPA_UT_TEST_FAIL_REPORT("destroy failed");
+		return rc;
+	}
+
+	return 0;
+}
+
+/* Suite definition block */
+IPA_UT_DEFINE_SUITE_START(mhi, "MHI for GSI",
+	ipa_test_mhi_suite_setup, ipa_test_mhi_suite_teardown)
+{
+	IPA_UT_ADD_TEST(reset_ready_state,
+		"reset test in Ready state",
+		ipa_mhi_test_reset_ready_state,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(reset_m0_state,
+		"reset test in M0 state",
+		ipa_mhi_test_reset_m0_state,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(inloop_reset_m0_state,
+		"several reset iterations in M0 state",
+		ipa_mhi_test_inloop_reset_m0_state,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(loopback_data_with_reset_on_m0,
+		"reset before and after loopback data in M0 state",
+		ipa_mhi_test_loopback_data_with_reset,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(reset_on_suspend,
+		"reset test in suspend state",
+		ipa_mhi_test_reset_on_suspend,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(inloop_reset_on_suspend,
+		"several reset iterations in suspend state",
+		ipa_mhi_test_inloop_reset_on_suspend,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(loopback_data_with_reset_on_suspend,
+		"reset before and after loopback data in suspend state",
+		ipa_mhi_test_loopback_data_with_reset_on_suspend,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(suspend_resume,
+		"several suspend/resume iterations",
+		ipa_mhi_test_in_loop_suspend_resume,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(suspend_resume_with_open_aggr,
+		"several suspend/resume iterations with open aggregation frame",
+		ipa_mhi_test_in_loop_suspend_resume_aggr_open,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(force_suspend_resume_with_open_aggr,
+		"several force suspend/resume iterations with open aggregation frame",
+		ipa_mhi_test_in_loop_force_suspend_resume_aggr_open,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(suspend_resume_with_host_wakeup,
+		"several suspend and host wakeup resume iterations",
+		ipa_mhi_test_in_loop_suspend_host_wakeup,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(reject_suspend_channel_full,
+		"several rejected suspend iterations due to full channel",
+		ipa_mhi_test_in_loop_reject_suspend_full_channel,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(reject_force_suspend_channel_full,
+		"several rejected force suspend iterations due to full channel",
+		ipa_mhi_test_in_loop_reject_force_suspend_full_channel,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(suspend_resume_manual_wp_update,
+		"several suspend/resume iterations with after simulating writing by wp manual update",
+		ipa_mhi_test_in_loop_suspend_resume_wp_update,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(channel_reset,
+		"several channel reset (disconnect/connect) iterations",
+		ipa_mhi_test_in_loop_channel_reset,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(channel_reset_aggr_open,
+		"several channel reset (disconnect/connect) iterations with open aggregation frame",
+		ipa_mhi_test_in_loop_channel_reset_aggr_open,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+	IPA_UT_ADD_TEST(channel_reset_ipa_holb,
+		"several channel reset (disconnect/connect) iterations with channel in HOLB state",
+		ipa_mhi_test_in_loop_channel_reset_ipa_holb,
+		true, IPA_HW_v3_0, IPA_HW_MAX),
+} IPA_UT_DEFINE_SUITE_END(mhi);
+
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.c b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
new file mode 100644
index 0000000..3bf9ac1
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.c
@@ -0,0 +1,1017 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mutex.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/ipa.h>
+#include "../ipa_v3/ipa_i.h"
+#include "ipa_ut_framework.h"
+#include "ipa_ut_suite_list.h"
+#include "ipa_ut_i.h"
+
+
+#define IPA_UT_DEBUG_WRITE_BUF_SIZE 256
+#define IPA_UT_DEBUG_READ_BUF_SIZE 1024
+
+#define IPA_UT_READ_WRITE_DBG_FILE_MODE \
+	(S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR | S_IWGRP)
+
+/**
+ * struct ipa_ut_context - I/S context
+ * @inited: Will wait till IPA is ready. Will create the enable file
+ * @enabled: All tests and suite debugfs files are created
+ * @lock: Lock for mutual exclustion
+ * @ipa_dbgfs_root: IPA root debugfs folder
+ * @test_dbgfs_root: UT root debugfs folder. Sub-folder of IPA root
+ * @test_dbgfs_suites: Suites root debugfs folder. Sub-folder of UT root
+ */
+struct ipa_ut_context {
+	bool inited;
+	bool enabled;
+	struct mutex lock;
+	struct dentry *ipa_dbgfs_root;
+	struct dentry *test_dbgfs_root;
+	struct dentry *test_dbgfs_suites;
+};
+
+static ssize_t ipa_ut_dbgfs_enable_read(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ipa_ut_dbgfs_enable_write(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos);
+static ssize_t ipa_ut_dbgfs_test_read(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ipa_ut_dbgfs_test_write(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos);
+static int ipa_ut_dbgfs_all_test_open(struct inode *inode,
+	struct file *filp);
+static int ipa_ut_dbgfs_regression_test_open(struct inode *inode,
+	struct file *filp);
+static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos);
+static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos);
+
+
+static const struct file_operations ipa_ut_dbgfs_enable_fops = {
+	.read = ipa_ut_dbgfs_enable_read,
+	.write = ipa_ut_dbgfs_enable_write,
+};
+static const struct file_operations ipa_ut_dbgfs_test_fops = {
+	.read = ipa_ut_dbgfs_test_read,
+	.write = ipa_ut_dbgfs_test_write,
+};
+static const struct file_operations ipa_ut_dbgfs_all_test_fops = {
+	.open = ipa_ut_dbgfs_all_test_open,
+	.read = ipa_ut_dbgfs_meta_test_read,
+	.write = ipa_ut_dbgfs_meta_test_write,
+};
+static const struct file_operations ipa_ut_dbgfs_regression_test_fops = {
+	.open = ipa_ut_dbgfs_regression_test_open,
+	.read = ipa_ut_dbgfs_meta_test_read,
+	.write = ipa_ut_dbgfs_meta_test_write,
+};
+
+static struct ipa_ut_context *ipa_ut_ctx;
+char *_IPA_UT_TEST_LOG_BUF_NAME;
+struct ipa_ut_tst_fail_report
+	_IPA_UT_TEST_FAIL_REPORT_DATA[_IPA_UT_TEST_FAIL_REPORT_SIZE];
+u32 _IPA_UT_TEST_FAIL_REPORT_IDX;
+
+/**
+ * ipa_ut_print_log_buf() - Dump given buffer via kernel error mechanism
+ * @buf: Buffer to print
+ *
+ * Tokenize the string according to new-line and then print
+ *
+ * Note: Assumes lock acquired
+ */
+static void ipa_ut_print_log_buf(char *buf)
+{
+	char *token;
+
+	if (!buf) {
+		IPA_UT_ERR("Input error - no buf\n");
+		return;
+	}
+
+	for (token = strsep(&buf, "\n"); token; token = strsep(&buf, "\n"))
+		pr_err("%s\n", token);
+}
+
+/**
+ * ipa_ut_dump_fail_report_stack() - dump the report info stack via kernel err
+ *
+ * Note: Assumes lock acquired
+ */
+static void ipa_ut_dump_fail_report_stack(void)
+{
+	int i;
+
+	IPA_UT_DBG("Entry\n");
+
+	if (_IPA_UT_TEST_FAIL_REPORT_IDX == 0) {
+		IPA_UT_DBG("no report info\n");
+		return;
+	}
+
+	for (i = 0 ; i < _IPA_UT_TEST_FAIL_REPORT_IDX; i++) {
+		if (i == 0)
+			pr_err("***** FAIL INFO STACK *****:\n");
+		else
+			pr_err("Called From:\n");
+
+		pr_err("\tFILE = %s\n\tFUNC = %s()\n\tLINE = %d\n",
+			_IPA_UT_TEST_FAIL_REPORT_DATA[i].file,
+			_IPA_UT_TEST_FAIL_REPORT_DATA[i].func,
+			_IPA_UT_TEST_FAIL_REPORT_DATA[i].line);
+		pr_err("\t%s\n", _IPA_UT_TEST_FAIL_REPORT_DATA[i].info);
+	}
+}
+
+/**
+ * ipa_ut_show_suite_exec_summary() - Show tests run summary
+ * @suite: suite to print its running summary
+ *
+ * Print list of succeeded tests, failed tests and skipped tests
+ *
+ * Note: Assumes lock acquired
+ */
+static void ipa_ut_show_suite_exec_summary(const struct ipa_ut_suite *suite)
+{
+	int i;
+
+	IPA_UT_DBG("Entry\n");
+
+	ipa_assert_on(!suite);
+
+	pr_info("\n\n");
+	pr_info("\t  Suite '%s' summary\n", suite->meta_data->name);
+	pr_info("===========================\n");
+	pr_info("Successful tests\n");
+	pr_info("----------------\n");
+	for (i = 0 ; i < suite->tests_cnt ; i++) {
+		if (suite->tests[i].res != IPA_UT_TEST_RES_SUCCESS)
+			continue;
+		pr_info("\t%s\n", suite->tests[i].name);
+	}
+	pr_info("\nFailed tests\n");
+	pr_info("------------\n");
+	for (i = 0 ; i < suite->tests_cnt ; i++) {
+		if (suite->tests[i].res != IPA_UT_TEST_RES_FAIL)
+			continue;
+		pr_info("\t%s\n", suite->tests[i].name);
+	}
+	pr_info("\nSkipped tests\n");
+	pr_info("-------------\n");
+	for (i = 0 ; i < suite->tests_cnt ; i++) {
+		if (suite->tests[i].res != IPA_UT_TEST_RES_SKIP)
+			continue;
+		pr_info("\t%s\n", suite->tests[i].name);
+	}
+	pr_info("\n");
+}
+
+/**
+ * ipa_ut_dbgfs_meta_test_write() - Debugfs write func for a for a meta test
+ * @params: write fops
+ *
+ * Used to run all/regression tests in a suite
+ * Create log buffer that the test can use to store ongoing logs
+ * IPA clocks need to be voted.
+ * Run setup() once before running the tests and teardown() once after
+ * If no such call-backs then ignore it; if failed then fail the suite
+ * Print tests progress during running
+ * Test log and fail report will be showed only if the test failed.
+ * Finally show Summary of the suite tests running
+ *
+ * Note: If test supported IPA H/W version mismatch, skip it
+ *	 If a test lack run function, skip it
+ *	 If test doesn't belong to regression and it is regression run, skip it
+ * Note: Running mode: Do not stop running on failure
+ *
+ * Return: Negative in failure, given characters amount in success
+ */
+static ssize_t ipa_ut_dbgfs_meta_test_write(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ipa_ut_suite *suite;
+	int i;
+	enum ipa_hw_type ipa_ver;
+	int rc = 0;
+	long meta_type;
+	bool tst_fail = false;
+
+	IPA_UT_DBG("Entry\n");
+
+	mutex_lock(&ipa_ut_ctx->lock);
+	suite = file->f_inode->i_private;
+	ipa_assert_on(!suite);
+	meta_type = (long)(file->private_data);
+	IPA_UT_DBG("Meta test type %ld\n", meta_type);
+
+	_IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE,
+		GFP_KERNEL);
+	if (!_IPA_UT_TEST_LOG_BUF_NAME) {
+		IPA_UT_ERR("failed to allocate %d bytes\n",
+			_IPA_UT_TEST_LOG_BUF_SIZE);
+		rc = -ENOMEM;
+		goto unlock_mutex;
+	}
+
+	if (!suite->tests_cnt || !suite->tests) {
+		pr_info("No tests for suite '%s'\n", suite->meta_data->name);
+		goto free_mem;
+	}
+
+	ipa_ver = ipa_get_hw_type();
+
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT");
+
+	if (suite->meta_data->setup) {
+		pr_info("*** Suite '%s': Run setup ***\n",
+			suite->meta_data->name);
+		rc = suite->meta_data->setup(&suite->meta_data->priv);
+		if (rc) {
+			IPA_UT_ERR("Setup failed for suite %s\n",
+				suite->meta_data->name);
+			rc = -EFAULT;
+			goto release_clock;
+		}
+	} else {
+		pr_info("*** Suite '%s': No Setup ***\n",
+			suite->meta_data->name);
+	}
+
+	pr_info("*** Suite '%s': Run %s tests ***\n\n",
+		suite->meta_data->name,
+		meta_type == IPA_UT_META_TEST_REGRESSION ? "regression" : "all"
+		);
+	for (i = 0 ; i < suite->tests_cnt ; i++) {
+		if (meta_type == IPA_UT_META_TEST_REGRESSION &&
+			!suite->tests[i].run_in_regression) {
+			pr_info(
+				"*** Test '%s': Skip - Not in regression ***\n\n"
+				, suite->tests[i].name);
+			suite->tests[i].res = IPA_UT_TEST_RES_SKIP;
+			continue;
+		}
+		if (suite->tests[i].min_ipa_hw_ver > ipa_ver ||
+			suite->tests[i].max_ipa_hw_ver < ipa_ver) {
+			pr_info(
+				"*** Test '%s': Skip - IPA VER mismatch ***\n\n"
+				, suite->tests[i].name);
+			suite->tests[i].res = IPA_UT_TEST_RES_SKIP;
+			continue;
+		}
+		if (!suite->tests[i].run) {
+			pr_info(
+				"*** Test '%s': Skip - No Run function ***\n\n"
+				, suite->tests[i].name);
+			suite->tests[i].res = IPA_UT_TEST_RES_SKIP;
+			continue;
+		}
+
+		_IPA_UT_TEST_LOG_BUF_NAME[0] = '\0';
+		_IPA_UT_TEST_FAIL_REPORT_IDX = 0;
+		pr_info("*** Test '%s': Running... ***\n",
+			suite->tests[i].name);
+		rc = suite->tests[i].run(suite->meta_data->priv);
+		if (rc) {
+			tst_fail = true;
+			suite->tests[i].res = IPA_UT_TEST_RES_FAIL;
+			ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME);
+		} else {
+			suite->tests[i].res = IPA_UT_TEST_RES_SUCCESS;
+		}
+
+		pr_info(">>>>>>**** TEST '%s': %s ****<<<<<<\n",
+			suite->tests[i].name, tst_fail ? "FAIL" : "SUCCESS");
+
+		if (tst_fail)
+			ipa_ut_dump_fail_report_stack();
+
+		pr_info("\n");
+	}
+
+	if (suite->meta_data->teardown) {
+		pr_info("*** Suite '%s': Run Teardown ***\n",
+			suite->meta_data->name);
+		rc = suite->meta_data->teardown(suite->meta_data->priv);
+		if (rc) {
+			IPA_UT_ERR("Teardown failed for suite %s\n",
+				suite->meta_data->name);
+			rc = -EFAULT;
+			goto release_clock;
+		}
+	} else {
+		pr_info("*** Suite '%s': No Teardown ***\n",
+			suite->meta_data->name);
+	}
+
+	ipa_ut_show_suite_exec_summary(suite);
+
+release_clock:
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT");
+free_mem:
+	kfree(_IPA_UT_TEST_LOG_BUF_NAME);
+	_IPA_UT_TEST_LOG_BUF_NAME = NULL;
+unlock_mutex:
+	mutex_unlock(&ipa_ut_ctx->lock);
+	return ((!rc && !tst_fail) ? count : -EFAULT);
+}
+
+/**
+ * ipa_ut_dbgfs_meta_test_read() - Debugfs read func for a meta test
+ * @params: read fops
+ *
+ * Meta test, is a test that describes other test or bunch of tests.
+ *  for example, the 'all' test. Running this test will run all
+ *  the tests in the suite.
+ *
+ * Show information regard the suite. E.g. name and description
+ * If regression - List the regression tests names
+ *
+ * Return: Amount of characters written to user space buffer
+ */
+static ssize_t ipa_ut_dbgfs_meta_test_read(struct file *file,
+	char __user *ubuf, size_t count, loff_t *ppos)
+{
+	char *buf;
+	struct ipa_ut_suite *suite;
+	int nbytes;
+	ssize_t cnt;
+	long meta_type;
+	int i;
+
+	IPA_UT_DBG("Entry\n");
+
+	mutex_lock(&ipa_ut_ctx->lock);
+	suite = file->f_inode->i_private;
+	ipa_assert_on(!suite);
+	meta_type = (long)(file->private_data);
+	IPA_UT_DBG("Meta test type %ld\n", meta_type);
+
+	buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		IPA_UT_ERR("failed to allocate %d bytes\n",
+			IPA_UT_DEBUG_READ_BUF_SIZE);
+		cnt = 0;
+		goto unlock_mutex;
+	}
+
+	if (meta_type == IPA_UT_META_TEST_ALL) {
+		nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE,
+			"\tMeta-test running all the tests in the suite:\n"
+			"\tSuite Name: %s\n"
+			"\tDescription: %s\n"
+			"\tNumber of test in suite: %zu\n",
+			suite->meta_data->name,
+			suite->meta_data->desc ?: "",
+			suite->tests_cnt);
+	} else {
+		nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE,
+			"\tMeta-test running regression tests in the suite:\n"
+			"\tSuite Name: %s\n"
+			"\tDescription: %s\n"
+			"\tRegression tests:\n",
+			suite->meta_data->name,
+			suite->meta_data->desc ?: "");
+		for (i = 0 ; i < suite->tests_cnt ; i++) {
+			if (!suite->tests[i].run_in_regression)
+				continue;
+			nbytes += scnprintf(buf + nbytes,
+				IPA_UT_DEBUG_READ_BUF_SIZE - nbytes,
+				"\t\t%s\n", suite->tests[i].name);
+		}
+	}
+
+	cnt = simple_read_from_buffer(ubuf, count, ppos, buf, nbytes);
+	kfree(buf);
+
+unlock_mutex:
+	mutex_unlock(&ipa_ut_ctx->lock);
+	return cnt;
+}
+
+/**
+ * ipa_ut_dbgfs_regression_test_open() - Debugfs open function for
+ * 'regression' tests
+ * @params: open fops
+ *
+ * Mark "Regression tests" for meta-tests later operations.
+ *
+ * Return: Zero (always success).
+ */
+static int ipa_ut_dbgfs_regression_test_open(struct inode *inode,
+	struct file *filp)
+{
+	IPA_UT_DBG("Entry\n");
+
+	filp->private_data = (void *)(IPA_UT_META_TEST_REGRESSION);
+
+	return 0;
+}
+
+/**
+ * ipa_ut_dbgfs_all_test_open() - Debugfs open function for 'all' tests
+ * @params: open fops
+ *
+ * Mark "All tests" for meta-tests later operations.
+ *
+ * Return: Zero (always success).
+ */
+static int ipa_ut_dbgfs_all_test_open(struct inode *inode,
+	struct file *filp)
+{
+	IPA_UT_DBG("Entry\n");
+
+	filp->private_data = (void *)(IPA_UT_META_TEST_ALL);
+
+	return 0;
+}
+
+/**
+ * ipa_ut_dbgfs_test_write() - Debugfs write function for a test
+ * @params: write fops
+ *
+ * Used to run a test.
+ * Create log buffer that the test can use to store ongoing logs
+ * IPA clocks need to be voted.
+ * Run setup() before the test and teardown() after the tests.
+ * If no such call-backs then ignore it; if failed then fail the test
+ * If all succeeds, no printing to user
+ * If failed, test logs and failure report will be printed to user
+ *
+ * Note: Test must has run function and it's supported IPA H/W version
+ * must be matching. Otherwise test will fail.
+ *
+ * Return: Negative in failure, given characters amount in success
+ */
+static ssize_t ipa_ut_dbgfs_test_write(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct ipa_ut_test *test;
+	struct ipa_ut_suite *suite;
+	bool tst_fail = false;
+	int rc = 0;
+	enum ipa_hw_type ipa_ver;
+
+	IPA_UT_DBG("Entry\n");
+
+	mutex_lock(&ipa_ut_ctx->lock);
+	test = file->f_inode->i_private;
+	ipa_assert_on(!test);
+
+	_IPA_UT_TEST_LOG_BUF_NAME = kzalloc(_IPA_UT_TEST_LOG_BUF_SIZE,
+		GFP_KERNEL);
+	if (!_IPA_UT_TEST_LOG_BUF_NAME) {
+		IPA_UT_ERR("failed to allocate %d bytes\n",
+			_IPA_UT_TEST_LOG_BUF_SIZE);
+		rc = -ENOMEM;
+		goto unlock_mutex;
+	}
+
+	if (!test->run) {
+		IPA_UT_ERR("*** Test %s - No run func ***\n",
+			test->name);
+		rc = -EFAULT;
+		goto free_mem;
+	}
+
+	ipa_ver = ipa_get_hw_type();
+	if (test->min_ipa_hw_ver > ipa_ver ||
+		test->max_ipa_hw_ver < ipa_ver) {
+		IPA_UT_ERR("Cannot run test %s on IPA HW Ver %s\n",
+			test->name, ipa_get_version_string(ipa_ver));
+		rc = -EFAULT;
+		goto free_mem;
+	}
+
+	suite = test->suite;
+	if (!suite || !suite->meta_data) {
+		IPA_UT_ERR("test %s with invalid suite\n", test->name);
+		rc = -EINVAL;
+		goto free_mem;
+	}
+
+	IPA_ACTIVE_CLIENTS_INC_SPECIAL("IPA_UT");
+
+	if (suite->meta_data->setup) {
+		IPA_UT_DBG("*** Suite '%s': Run setup ***\n",
+			suite->meta_data->name);
+		rc = suite->meta_data->setup(&suite->meta_data->priv);
+		if (rc) {
+			IPA_UT_ERR("Setup failed for suite %s\n",
+				suite->meta_data->name);
+			rc = -EFAULT;
+			goto release_clock;
+		}
+	} else {
+		IPA_UT_DBG("*** Suite '%s': No Setup ***\n",
+			suite->meta_data->name);
+	}
+
+	IPA_UT_DBG("*** Test '%s': Running... ***\n", test->name);
+	_IPA_UT_TEST_FAIL_REPORT_IDX = 0;
+	rc = test->run(suite->meta_data->priv);
+	if (rc)
+		tst_fail = true;
+	IPA_UT_DBG("*** Test %s - ***\n", tst_fail ? "FAIL" : "SUCCESS");
+	if (tst_fail) {
+		pr_info("=================>>>>>>>>>>>\n");
+		ipa_ut_print_log_buf(_IPA_UT_TEST_LOG_BUF_NAME);
+		pr_info("**** TEST %s FAILED ****\n", test->name);
+		ipa_ut_dump_fail_report_stack();
+		pr_info("<<<<<<<<<<<=================\n");
+	}
+
+	if (suite->meta_data->teardown) {
+		IPA_UT_DBG("*** Suite '%s': Run Teardown ***\n",
+			suite->meta_data->name);
+		rc = suite->meta_data->teardown(suite->meta_data->priv);
+		if (rc) {
+			IPA_UT_ERR("Teardown failed for suite %s\n",
+				suite->meta_data->name);
+			rc = -EFAULT;
+			goto release_clock;
+		}
+	} else {
+		IPA_UT_DBG("*** Suite '%s': No Teardown ***\n",
+			suite->meta_data->name);
+	}
+
+release_clock:
+	IPA_ACTIVE_CLIENTS_DEC_SPECIAL("IPA_UT");
+free_mem:
+	kfree(_IPA_UT_TEST_LOG_BUF_NAME);
+	_IPA_UT_TEST_LOG_BUF_NAME = NULL;
+unlock_mutex:
+	mutex_unlock(&ipa_ut_ctx->lock);
+	return ((!rc && !tst_fail) ? count : -EFAULT);
+}
+
+/**
+ * ipa_ut_dbgfs_test_read() - Debugfs read function for a test
+ * @params: read fops
+ *
+ * print information regard the test. E.g. name and description
+ *
+ * Return: Amount of characters written to user space buffer
+ */
+static ssize_t ipa_ut_dbgfs_test_read(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	char *buf;
+	struct ipa_ut_test *test;
+	int nbytes;
+	ssize_t cnt;
+
+	IPA_UT_DBG("Entry\n");
+
+	mutex_lock(&ipa_ut_ctx->lock);
+	test = file->f_inode->i_private;
+	ipa_assert_on(!test);
+
+	buf = kmalloc(IPA_UT_DEBUG_READ_BUF_SIZE, GFP_KERNEL);
+	if (!buf) {
+		IPA_UT_ERR("failed to allocate %d bytes\n",
+			IPA_UT_DEBUG_READ_BUF_SIZE);
+		cnt = 0;
+		goto unlock_mutex;
+	}
+
+	nbytes = scnprintf(buf, IPA_UT_DEBUG_READ_BUF_SIZE,
+		"\t Test Name: %s\n"
+		"\t Description: %s\n"
+		"\t Suite Name: %s\n"
+		"\t Run In Regression: %s\n"
+		"\t Supported IPA versions: [%s -> %s]\n",
+		test->name, test->desc ?: "", test->suite->meta_data->name,
+		test->run_in_regression ? "Yes" : "No",
+		ipa_get_version_string(test->min_ipa_hw_ver),
+		test->max_ipa_hw_ver == IPA_HW_MAX ? "MAX" :
+			ipa_get_version_string(test->max_ipa_hw_ver));
+
+	if (nbytes > count)
+		IPA_UT_ERR("User buf too small - return partial info\n");
+
+	cnt = simple_read_from_buffer(ubuf, count, ppos, buf, nbytes);
+	kfree(buf);
+
+unlock_mutex:
+	mutex_unlock(&ipa_ut_ctx->lock);
+	return cnt;
+}
+
+/**
+ * ipa_ut_framework_load_suites() - Load tests and expose them to user space
+ *
+ * Creates debugfs folder for each suite and then file for each test in it.
+ * Create debugfs "all" file for each suite for meta-test to run all tests.
+ *
+ * Note: Assumes lock acquired
+ *
+ * Return: Zero in success, otherwise in failure
+ */
+int ipa_ut_framework_load_suites(void)
+{
+	int suite_idx;
+	int tst_idx;
+	struct ipa_ut_suite *suite;
+	struct dentry *s_dent;
+	struct dentry *f_dent;
+
+	IPA_UT_DBG("Entry\n");
+
+	for (suite_idx = IPA_UT_SUITE_FIRST_INDEX;
+		suite_idx < IPA_UT_SUITES_COUNT; suite_idx++) {
+		suite = IPA_UT_GET_SUITE(suite_idx);
+
+		if (!suite->meta_data->name) {
+			IPA_UT_ERR("No suite name\n");
+			return -EFAULT;
+		}
+
+		s_dent = debugfs_create_dir(suite->meta_data->name,
+			ipa_ut_ctx->test_dbgfs_suites);
+
+		if (!s_dent || IS_ERR(s_dent)) {
+			IPA_UT_ERR("fail create dbg entry - suite %s\n",
+				suite->meta_data->name);
+			return -EFAULT;
+		}
+
+		for (tst_idx = 0; tst_idx < suite->tests_cnt ; tst_idx++) {
+			if (!suite->tests[tst_idx].name) {
+				IPA_UT_ERR("No test name on suite %s\n",
+					suite->meta_data->name);
+				return -EFAULT;
+			}
+			f_dent = debugfs_create_file(
+				suite->tests[tst_idx].name,
+				IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent,
+				&suite->tests[tst_idx],
+				&ipa_ut_dbgfs_test_fops);
+			if (!f_dent || IS_ERR(f_dent)) {
+				IPA_UT_ERR("fail create dbg entry - tst %s\n",
+					suite->tests[tst_idx].name);
+				return -EFAULT;
+			}
+		}
+
+		/* entry for meta-test all to run all tests in suites */
+		f_dent = debugfs_create_file(_IPA_UT_RUN_ALL_TEST_NAME,
+			IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent,
+			suite, &ipa_ut_dbgfs_all_test_fops);
+		if (!f_dent || IS_ERR(f_dent)) {
+			IPA_UT_ERR("fail to create dbg entry - %s\n",
+				_IPA_UT_RUN_ALL_TEST_NAME);
+			return -EFAULT;
+		}
+
+		/*
+		 * entry for meta-test regression to run all regression
+		 * tests in suites
+		 */
+		f_dent = debugfs_create_file(_IPA_UT_RUN_REGRESSION_TEST_NAME,
+			IPA_UT_READ_WRITE_DBG_FILE_MODE, s_dent,
+			suite, &ipa_ut_dbgfs_regression_test_fops);
+		if (!f_dent || IS_ERR(f_dent)) {
+			IPA_UT_ERR("fail to create dbg entry - %s\n",
+				_IPA_UT_RUN_ALL_TEST_NAME);
+			return -EFAULT;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ipa_ut_framework_enable() - Enable the framework
+ *
+ * Creates the tests and suites debugfs entries and load them.
+ * This will expose the tests to user space.
+ *
+ * Return: Zero in success, otherwise in failure
+ */
+static int ipa_ut_framework_enable(void)
+{
+	int ret = 0;
+
+	IPA_UT_DBG("Entry\n");
+
+	mutex_lock(&ipa_ut_ctx->lock);
+
+	if (ipa_ut_ctx->enabled) {
+		IPA_UT_ERR("Already enabled\n");
+		goto unlock_mutex;
+	}
+
+	ipa_ut_ctx->test_dbgfs_suites = debugfs_create_dir("suites",
+		ipa_ut_ctx->test_dbgfs_root);
+	if (!ipa_ut_ctx->test_dbgfs_suites ||
+		IS_ERR(ipa_ut_ctx->test_dbgfs_suites)) {
+		IPA_UT_ERR("failed to create suites debugfs dir\n");
+		ret = -EFAULT;
+		goto unlock_mutex;
+	}
+
+	if (ipa_ut_framework_load_suites()) {
+		IPA_UT_ERR("failed to load the suites into debugfs\n");
+		ret = -EFAULT;
+		goto fail_clean_suites_dbgfs;
+	}
+
+	ipa_ut_ctx->enabled = true;
+	goto unlock_mutex;
+
+fail_clean_suites_dbgfs:
+	debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_suites);
+unlock_mutex:
+	mutex_unlock(&ipa_ut_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa_ut_framework_disable() - Disable the framework
+ *
+ * Remove the tests and suites debugfs exposure.
+ *
+ * Return: Zero in success, otherwise in failure
+ */
+static int ipa_ut_framework_disable(void)
+{
+	int ret = 0;
+
+	IPA_UT_DBG("Entry\n");
+
+	mutex_lock(&ipa_ut_ctx->lock);
+
+	if (!ipa_ut_ctx->enabled) {
+		IPA_UT_ERR("Already disabled\n");
+		goto unlock_mutex;
+	}
+
+	debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_suites);
+
+	ipa_ut_ctx->enabled = false;
+
+unlock_mutex:
+	mutex_unlock(&ipa_ut_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa_ut_dbgfs_enable_write() - Debugfs enable file write fops
+ * @params: write fops
+ *
+ * Input should be number. If 0, then disable. Otherwise enable.
+ *
+ * Return: if failed then negative value, if succeeds, amount of given chars
+ */
+static ssize_t ipa_ut_dbgfs_enable_write(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	char lcl_buf[IPA_UT_DEBUG_WRITE_BUF_SIZE];
+	s8 option = 0;
+	int ret;
+
+	IPA_UT_DBG("Entry\n");
+
+	if (sizeof(lcl_buf) < count + 1) {
+		IPA_UT_ERR("No enough space\n");
+		return -E2BIG;
+	}
+
+	if (copy_from_user(lcl_buf, buf, count)) {
+		IPA_UT_ERR("fail to copy buf from user space\n");
+		return -EFAULT;
+	}
+
+	lcl_buf[count] = '\0';
+	if (kstrtos8(lcl_buf, 0, &option)) {
+		IPA_UT_ERR("fail convert str to s8\n");
+		return -EINVAL;
+	}
+
+	if (option == 0)
+		ret = ipa_ut_framework_disable();
+	else
+		ret = ipa_ut_framework_enable();
+
+	return ret ?: count;
+}
+
+/**
+ * ipa_ut_dbgfs_enable_read() - Debugfs enable file read fops
+ * @params: read fops
+ *
+ * To show to user space if the I/S is enabled or disabled.
+ *
+ * Return: amount of characters returned to user space
+ */
+static ssize_t ipa_ut_dbgfs_enable_read(struct file *file, char __user *ubuf,
+	size_t count, loff_t *ppos)
+{
+	const char *status;
+
+	IPA_UT_DBG("Entry\n");
+
+	mutex_lock(&ipa_ut_ctx->lock);
+	status = ipa_ut_ctx->enabled ?
+		"Enabled - Write 0 to disable\n" :
+		"Disabled - Write 1 to enable\n";
+	mutex_unlock(&ipa_ut_ctx->lock);
+	return simple_read_from_buffer(ubuf, count, ppos,
+		status, strlen(status));
+}
+
+/**
+ * ipa_ut_framework_init() - Unit-tests framework initialization
+ *
+ * Complete tests initialization: Each tests needs to point to it's
+ * corresponing suite.
+ * Creates the framework debugfs root directory  under IPA directory.
+ * Create enable debugfs file - to enable/disable the framework.
+ *
+ * Return: Zero in success, otherwise in failure
+ */
+static int ipa_ut_framework_init(void)
+{
+	struct dentry *dfile_enable;
+	int ret;
+	int suite_idx;
+	int test_idx;
+	struct ipa_ut_suite *suite;
+
+	IPA_UT_DBG("Entry\n");
+
+	ipa_assert_on(!ipa_ut_ctx);
+
+	ipa_ut_ctx->ipa_dbgfs_root = ipa_debugfs_get_root();
+	if (!ipa_ut_ctx->ipa_dbgfs_root) {
+		IPA_UT_ERR("No IPA debugfs root entry\n");
+		return -EFAULT;
+	}
+
+	mutex_lock(&ipa_ut_ctx->lock);
+
+	/* tests needs to point to their corresponding suites structures */
+	for (suite_idx = IPA_UT_SUITE_FIRST_INDEX;
+		suite_idx < IPA_UT_SUITES_COUNT; suite_idx++) {
+		suite = IPA_UT_GET_SUITE(suite_idx);
+		ipa_assert_on(!suite);
+		if (!suite->tests) {
+			IPA_UT_DBG("No tests for suite %s\n",
+				suite->meta_data->name);
+			continue;
+		}
+		for (test_idx = 0; test_idx < suite->tests_cnt; test_idx++) {
+			suite->tests[test_idx].suite = suite;
+			IPA_UT_DBG("Updating test %s info for suite %s\n",
+				suite->tests[test_idx].name,
+				suite->meta_data->name);
+		}
+	}
+
+	ipa_ut_ctx->test_dbgfs_root = debugfs_create_dir("test",
+		ipa_ut_ctx->ipa_dbgfs_root);
+	if (!ipa_ut_ctx->test_dbgfs_root ||
+		IS_ERR(ipa_ut_ctx->test_dbgfs_root)) {
+		IPA_UT_ERR("failed to create test debugfs dir\n");
+		ret = -EFAULT;
+		goto unlock_mutex;
+	}
+
+	dfile_enable = debugfs_create_file("enable",
+		IPA_UT_READ_WRITE_DBG_FILE_MODE,
+		ipa_ut_ctx->test_dbgfs_root, 0, &ipa_ut_dbgfs_enable_fops);
+	if (!dfile_enable || IS_ERR(dfile_enable)) {
+		IPA_UT_ERR("failed to create enable debugfs file\n");
+		ret = -EFAULT;
+		goto fail_clean_dbgfs;
+	}
+
+	_IPA_UT_TEST_FAIL_REPORT_IDX = 0;
+	ipa_ut_ctx->inited = true;
+	IPA_UT_DBG("Done\n");
+	ret = 0;
+	goto unlock_mutex;
+
+fail_clean_dbgfs:
+	debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_root);
+unlock_mutex:
+	mutex_unlock(&ipa_ut_ctx->lock);
+	return ret;
+}
+
+/**
+ * ipa_ut_framework_destroy() - Destroy the UT framework info
+ *
+ * Disable it if enabled.
+ * Remove the debugfs entries using the root entry
+ */
+static void ipa_ut_framework_destroy(void)
+{
+	IPA_UT_DBG("Entry\n");
+
+	mutex_lock(&ipa_ut_ctx->lock);
+	if (ipa_ut_ctx->enabled)
+		ipa_ut_framework_disable();
+	if (ipa_ut_ctx->inited)
+		debugfs_remove_recursive(ipa_ut_ctx->test_dbgfs_root);
+	mutex_unlock(&ipa_ut_ctx->lock);
+}
+
+/**
+ * ipa_ut_ipa_ready_cb() - IPA ready CB
+ *
+ * Once IPA is ready starting initializing  the unit-test framework
+ */
+static void ipa_ut_ipa_ready_cb(void *user_data)
+{
+	IPA_UT_DBG("Entry\n");
+	(void)ipa_ut_framework_init();
+}
+
+/**
+ * ipa_ut_module_init() - Module init
+ *
+ * Create the framework context, wait for IPA driver readiness
+ * and Initialize it.
+ * If IPA driver already ready, continue initialization immediately.
+ * if not, wait for IPA ready notification by IPA driver context
+ */
+static int __init ipa_ut_module_init(void)
+{
+	int ret;
+
+	IPA_UT_INFO("Loading IPA test module...\n");
+
+	ipa_ut_ctx = kzalloc(sizeof(struct ipa_ut_context), GFP_KERNEL);
+	if (!ipa_ut_ctx) {
+		IPA_UT_ERR("Failed to allocate ctx\n");
+		return -ENOMEM;
+	}
+	mutex_init(&ipa_ut_ctx->lock);
+
+	if (!ipa_is_ready()) {
+		IPA_UT_DBG("IPA driver not ready, registering callback\n");
+		ret = ipa_register_ipa_ready_cb(ipa_ut_ipa_ready_cb, NULL);
+
+		/*
+		 * If we received -EEXIST, IPA has initialized. So we need
+		 * to continue the initing process.
+		 */
+		if (ret != -EEXIST) {
+			if (ret) {
+				IPA_UT_ERR("IPA CB reg failed - %d\n", ret);
+				kfree(ipa_ut_ctx);
+				ipa_ut_ctx = NULL;
+			}
+			return ret;
+		}
+	}
+
+	ret = ipa_ut_framework_init();
+	if (ret) {
+		IPA_UT_ERR("framework init failed\n");
+		kfree(ipa_ut_ctx);
+		ipa_ut_ctx = NULL;
+	}
+	return ret;
+}
+
+/**
+ * ipa_ut_module_exit() - Module exit function
+ *
+ * Destroys the Framework and removes its context
+ */
+static void ipa_ut_module_exit(void)
+{
+	IPA_UT_DBG("Entry\n");
+
+	if (!ipa_ut_ctx)
+		return;
+
+	ipa_ut_framework_destroy();
+	kfree(ipa_ut_ctx);
+	ipa_ut_ctx = NULL;
+}
+
+module_init(ipa_ut_module_init);
+module_exit(ipa_ut_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("IPA Unit Test module");
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_framework.h b/drivers/platform/msm/ipa/test/ipa_ut_framework.h
new file mode 100644
index 0000000..e3884d6
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_ut_framework.h
@@ -0,0 +1,240 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UT_FRAMEWORK_H_
+#define _IPA_UT_FRAMEWORK_H_
+
+#include <linux/kernel.h>
+#include "../ipa_common_i.h"
+#include "ipa_ut_i.h"
+
+#define IPA_UT_DRV_NAME "ipa_ut"
+
+#define IPA_UT_DBG(fmt, args...) \
+	do { \
+		pr_debug(IPA_UT_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_UT_DBG_LOW(fmt, args...) \
+	do { \
+		pr_debug(IPA_UT_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_UT_ERR(fmt, args...) \
+	do { \
+		pr_err(IPA_UT_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+#define IPA_UT_INFO(fmt, args...) \
+	do { \
+		pr_info(IPA_UT_DRV_NAME " %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
+			IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+		IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
+			IPA_UT_DRV_NAME " %s:%d " fmt, ## args); \
+	} while (0)
+
+/**
+ * struct ipa_ut_tst_fail_report - Information on test failure
+ * @valid: When a test posts a report, valid will be marked true
+ * @file: File name containing  the failed test.
+ * @line: Number of line in the file where the test failed.
+ * @func: Function where the test failed in.
+ * @info: Information about the failure.
+ */
+struct ipa_ut_tst_fail_report {
+	bool valid;
+	const char *file;
+	int line;
+	const char *func;
+	const char *info;
+};
+
+/**
+ * Report on test failure
+ * To be used by tests to report a point were a test fail.
+ * Failures are saved in a stack manner.
+ * Dumping the failure info will dump the fail reports
+ *  from all the function in the calling stack
+ */
+#define IPA_UT_TEST_FAIL_REPORT(__info) \
+	do { \
+		extern struct ipa_ut_tst_fail_report \
+			_IPA_UT_TEST_FAIL_REPORT_DATA \
+			[_IPA_UT_TEST_FAIL_REPORT_SIZE]; \
+		extern u32 _IPA_UT_TEST_FAIL_REPORT_IDX; \
+		struct ipa_ut_tst_fail_report *entry; \
+		if (_IPA_UT_TEST_FAIL_REPORT_IDX >= \
+			_IPA_UT_TEST_FAIL_REPORT_SIZE) \
+			break; \
+		entry = &(_IPA_UT_TEST_FAIL_REPORT_DATA \
+			[_IPA_UT_TEST_FAIL_REPORT_IDX]); \
+		entry->file = __FILENAME__; \
+		entry->line = __LINE__; \
+		entry->func = __func__; \
+		if (__info) \
+			entry->info = __info; \
+		else \
+			entry->info = ""; \
+		_IPA_UT_TEST_FAIL_REPORT_IDX++; \
+	} while (0)
+
+/**
+ * To be used by tests to log progress and ongoing information
+ * Logs are not printed to user, but saved to a buffer.
+ * I/S shall print the buffer at different occasions - e.g. in test failure
+ */
+#define IPA_UT_LOG(fmt, args...) \
+	do { \
+		extern char *_IPA_UT_TEST_LOG_BUF_NAME; \
+		char __buf[512]; \
+		IPA_UT_DBG(fmt, ## args); \
+		if (!_IPA_UT_TEST_LOG_BUF_NAME) {\
+			pr_err(IPA_UT_DRV_NAME " %s:%d " fmt, \
+				__func__, __LINE__, ## args); \
+			break; \
+		} \
+		scnprintf(__buf, sizeof(__buf), \
+			" %s:%d " fmt, \
+			__func__, __LINE__, ## args); \
+		strlcat(_IPA_UT_TEST_LOG_BUF_NAME, __buf, \
+			_IPA_UT_TEST_LOG_BUF_SIZE); \
+	} while (0)
+
+/**
+ * struct ipa_ut_suite_meta - Suite meta-data
+ * @name: Suite unique name
+ * @desc: Suite description
+ * @setup: Setup Call-back of the suite
+ * @teardown: Teardown Call-back of the suite
+ * @priv: Private pointer of the suite
+ *
+ * Setup/Teardown  will be called once for the suite when running a tests of it.
+ * priv field is shared between the Setup/Teardown and the tests
+ */
+struct ipa_ut_suite_meta {
+	char *name;
+	char *desc;
+	int (*setup)(void **ppriv);
+	int (*teardown)(void *priv);
+	void *priv;
+};
+
+/* Test suite data structure declaration */
+struct ipa_ut_suite;
+
+/**
+ * struct ipa_ut_test - Test information
+ * @name: Test name
+ * @desc: Test description
+ * @run: Test execution call-back
+ * @run_in_regression: To run this test as part of regression?
+ * @min_ipa_hw_ver: Minimum IPA H/W version where the test is supported?
+ * @max_ipa_hw_ver: Maximum IPA H/W version where the test is supported?
+ * @suite: Pointer to suite containing this test
+ * @res: Test execution result. Will be updated after running a test as part
+ * of suite tests run
+ */
+struct ipa_ut_test {
+	char *name;
+	char *desc;
+	int (*run)(void *priv);
+	bool run_in_regression;
+	int min_ipa_hw_ver;
+	int max_ipa_hw_ver;
+	struct ipa_ut_suite *suite;
+	enum ipa_ut_test_result res;
+};
+
+/**
+ * struct ipa_ut_suite - Suite information
+ * @meta_data: Pointer to meta-data structure of the suite
+ * @tests: Pointer to array of tests belongs to the suite
+ * @tests_cnt: Number of tests
+ */
+struct ipa_ut_suite {
+	struct ipa_ut_suite_meta *meta_data;
+	struct ipa_ut_test *tests;
+	size_t tests_cnt;
+};
+
+
+/**
+ * Add a test to a suite.
+ * Will add entry to tests array and update its info with
+ * the given info, thus adding new test.
+ */
+#define IPA_UT_ADD_TEST(__name, __desc, __run, __run_in_regression, \
+	__min_ipa_hw_ver, __max_ipa__hw_ver) \
+	{ \
+		.name = #__name, \
+		.desc = __desc, \
+		.run = __run, \
+		.run_in_regression = __run_in_regression, \
+		.min_ipa_hw_ver = __min_ipa_hw_ver, \
+		.max_ipa_hw_ver = __max_ipa__hw_ver, \
+		.suite = NULL, \
+	}
+
+/**
+ * Declare a suite
+ * Every suite need to be declared  before it is registered.
+ */
+#define IPA_UT_DECLARE_SUITE(__name) \
+	extern struct ipa_ut_suite _IPA_UT_SUITE_DATA(__name)
+
+/**
+ * Register a suite
+ * Registering a suite is mandatory so it will be considered.
+ */
+#define IPA_UT_REGISTER_SUITE(__name) \
+	(&_IPA_UT_SUITE_DATA(__name))
+
+/**
+ * Start/End suite definition
+ * Will create the suite global structures and adds adding tests to it.
+ * Use IPA_UT_ADD_TEST() with these macros to add tests when defining
+ * a suite
+ */
+#define IPA_UT_DEFINE_SUITE_START(__name, __desc, __setup, __teardown) \
+	static struct ipa_ut_suite_meta _IPA_UT_SUITE_META_DATA(__name) = \
+	{ \
+		.name = #__name, \
+		.desc = __desc, \
+		.setup = __setup, \
+		.teardown = __teardown, \
+	}; \
+	static struct ipa_ut_test _IPA_UT_SUITE_TESTS(__name)[] =
+#define IPA_UT_DEFINE_SUITE_END(__name) \
+	; \
+	struct ipa_ut_suite _IPA_UT_SUITE_DATA(__name) = \
+	{ \
+		.meta_data = &_IPA_UT_SUITE_META_DATA(__name), \
+		.tests = _IPA_UT_SUITE_TESTS(__name), \
+		.tests_cnt = ARRAY_SIZE(_IPA_UT_SUITE_TESTS(__name)), \
+	}
+
+#endif /* _IPA_UT_FRAMEWORK_H_ */
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_i.h b/drivers/platform/msm/ipa/test/ipa_ut_i.h
new file mode 100644
index 0000000..973debf
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_ut_i.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UT_I_H_
+#define _IPA_UT_I_H_
+
+/* Suite data global structure  name */
+#define _IPA_UT_SUITE_DATA(__name) ipa_ut_ ##__name ##_data
+
+/* Suite meta-data global structure name */
+#define _IPA_UT_SUITE_META_DATA(__name) ipa_ut_ ##__name ##_meta_data
+
+/* Suite global array of tests */
+#define _IPA_UT_SUITE_TESTS(__name) ipa_ut_ ##__name ##_tests
+
+/* Global array of all suites */
+#define _IPA_UT_ALL_SUITES ipa_ut_all_suites_data
+
+/* Meta-test "all" name - test to run all tests in given suite */
+#define _IPA_UT_RUN_ALL_TEST_NAME "all"
+
+/**
+ * Meta-test "regression" name -
+ * test to run all regression tests in given suite
+ */
+#define _IPA_UT_RUN_REGRESSION_TEST_NAME "regression"
+
+
+/* Test Log buffer name and size */
+#define _IPA_UT_TEST_LOG_BUF_NAME ipa_ut_tst_log_buf
+#define _IPA_UT_TEST_LOG_BUF_SIZE 8192
+
+/* Global structure  for test fail execution result information */
+#define _IPA_UT_TEST_FAIL_REPORT_DATA ipa_ut_tst_fail_report_data
+#define _IPA_UT_TEST_FAIL_REPORT_SIZE 5
+#define _IPA_UT_TEST_FAIL_REPORT_IDX ipa_ut_tst_fail_report_data_index
+
+/* Start/End definitions of the array of suites */
+#define IPA_UT_DEFINE_ALL_SUITES_START \
+	static struct ipa_ut_suite *_IPA_UT_ALL_SUITES[] =
+#define IPA_UT_DEFINE_ALL_SUITES_END
+
+/**
+ * Suites iterator - Array-like container
+ * First index, number of elements  and element fetcher
+ */
+#define IPA_UT_SUITE_FIRST_INDEX 0
+#define IPA_UT_SUITES_COUNT \
+	ARRAY_SIZE(_IPA_UT_ALL_SUITES)
+#define IPA_UT_GET_SUITE(__index) \
+	_IPA_UT_ALL_SUITES[__index]
+
+/**
+ * enum ipa_ut_test_result - Test execution result
+ * @IPA_UT_TEST_RES_FAIL: Test executed and failed
+ * @IPA_UT_TEST_RES_SUCCESS: Test executed and succeeded
+ * @IPA_UT_TEST_RES_SKIP: Test was not executed.
+ *
+ * When running all tests in a suite, a specific test could
+ * be skipped and not executed. For example due to mismatch of
+ * IPA H/W version.
+ */
+enum ipa_ut_test_result {
+	IPA_UT_TEST_RES_FAIL,
+	IPA_UT_TEST_RES_SUCCESS,
+	IPA_UT_TEST_RES_SKIP,
+};
+
+/**
+ * enum ipa_ut_meta_test_type - Type of suite meta-test
+ * @IPA_UT_META_TEST_ALL: Represents all tests in suite
+ * @IPA_UT_META_TEST_REGRESSION: Represents all regression tests in suite
+ */
+enum ipa_ut_meta_test_type {
+	IPA_UT_META_TEST_ALL,
+	IPA_UT_META_TEST_REGRESSION,
+};
+
+#endif /* _IPA_UT_I_H_ */
diff --git a/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
new file mode 100644
index 0000000..944800f
--- /dev/null
+++ b/drivers/platform/msm/ipa/test/ipa_ut_suite_list.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UT_SUITE_LIST_H_
+#define _IPA_UT_SUITE_LIST_H_
+
+#include "ipa_ut_framework.h"
+#include "ipa_ut_i.h"
+
+/**
+ * Declare every suite here so that it will be found later below
+ * No importance for order.
+ */
+IPA_UT_DECLARE_SUITE(mhi);
+IPA_UT_DECLARE_SUITE(example);
+
+
+/**
+ * Register every suite inside the below block.
+ * Unregistered suites will be ignored
+ */
+IPA_UT_DEFINE_ALL_SUITES_START
+{
+	IPA_UT_REGISTER_SUITE(mhi),
+	IPA_UT_REGISTER_SUITE(example),
+} IPA_UT_DEFINE_ALL_SUITES_END;
+
+#endif /* _IPA_UT_SUITE_LIST_H_ */
diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig
index c74c3f6..1843db3 100644
--- a/drivers/power/reset/Kconfig
+++ b/drivers/power/reset/Kconfig
@@ -98,12 +98,6 @@
 	  say N here or disable in dts to make sure pm_power_off never be
 	  overwrote wrongly by this driver.
 
-config POWER_RESET_MSM
-	bool "Qualcomm MSM power-off driver"
-	depends on ARCH_QCOM
-	help
-	  Power off and restart support for Qualcomm boards.
-
 config POWER_RESET_LTC2952
 	bool "LTC2952 PowerPath power-off driver"
 	depends on OF_GPIO
@@ -111,6 +105,25 @@
 	  This driver supports an external powerdown trigger and board power
 	  down via the LTC2952. Bindings are made in the device tree.
 
+config POWER_RESET_QCOM
+	bool "Qualcomm Technologies, Inc. MSM power-off driver"
+	depends on ARCH_MSM || ARCH_QCOM
+	depends on POWER_RESET
+	help
+	  Power off and restart support for Qualcomm Technologies, Inc. boards.
+	  This driver supports board power down and restart using PMIC PS_HOLD.
+	  Say Y here if you have a Qualcomm Tecnologies, Inc. board and wish to
+	  enable restart driver.
+
+config QCOM_DLOAD_MODE
+	bool "Qualcomm Technologies, Inc. download mode"
+	depends on POWER_RESET_QCOM
+	help
+	  This makes the SoC enter download mode when it resets
+	  due to a kernel panic. Note that this doesn't by itself
+	  make the kernel reboot on a kernel panic - that must be
+	  enabled via another mechanism.
+
 config POWER_RESET_QNAP
 	bool "QNAP power-off driver"
 	depends on OF_GPIO && PLAT_ORION
diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile
index 1be307c..7cc06a8 100644
--- a/drivers/power/reset/Makefile
+++ b/drivers/power/reset/Makefile
@@ -9,7 +9,7 @@
 obj-$(CONFIG_POWER_RESET_GPIO_RESTART) += gpio-restart.o
 obj-$(CONFIG_POWER_RESET_HISI) += hisi-reboot.o
 obj-$(CONFIG_POWER_RESET_IMX) += imx-snvs-poweroff.o
-obj-$(CONFIG_POWER_RESET_MSM) += msm-poweroff.o
+obj-$(CONFIG_POWER_RESET_QCOM) += msm-poweroff.o
 obj-$(CONFIG_POWER_RESET_LTC2952) += ltc2952-poweroff.o
 obj-$(CONFIG_POWER_RESET_QNAP) += qnap-poweroff.o
 obj-$(CONFIG_POWER_RESET_RESTART) += restart-poweroff.o
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 4702efd..1890a28 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -15,49 +15,457 @@
 #include <linux/err.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
 #include <linux/module.h>
 #include <linux/reboot.h>
 #include <linux/pm.h>
+#include <linux/delay.h>
+#include <linux/qpnp/power-on.h>
+#include <linux/of_address.h>
 
+#include <asm/cacheflush.h>
+#include <asm/system_misc.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/restart.h>
+#include <soc/qcom/watchdog.h>
+
+#define EMERGENCY_DLOAD_MAGIC1    0x322A4F99
+#define EMERGENCY_DLOAD_MAGIC2    0xC67E4350
+#define EMERGENCY_DLOAD_MAGIC3    0x77777777
+
+#define SCM_IO_DISABLE_PMIC_ARBITER	1
+#define SCM_IO_DEASSERT_PS_HOLD		2
+#define SCM_WDOG_DEBUG_BOOT_PART	0x9
+#define SCM_DLOAD_MODE			0X10
+#define SCM_EDLOAD_MODE			0X01
+#define SCM_DLOAD_CMD			0x10
+
+
+static int restart_mode;
+void *restart_reason;
+static bool scm_pmic_arbiter_disable_supported;
+static bool scm_deassert_ps_hold_supported;
+/* Download mode master kill-switch */
 static void __iomem *msm_ps_hold;
-static int do_msm_restart(struct notifier_block *nb, unsigned long action,
-			   void *data)
-{
-	writel(0, msm_ps_hold);
-	mdelay(10000);
+static phys_addr_t tcsr_boot_misc_detect;
 
+#ifdef CONFIG_QCOM_DLOAD_MODE
+#define EDL_MODE_PROP "qcom,msm-imem-emergency_download_mode"
+#define DL_MODE_PROP "qcom,msm-imem-download_mode"
+
+static int in_panic;
+static void *dload_mode_addr;
+static bool dload_mode_enabled;
+static void *emergency_dload_mode_addr;
+static bool scm_dload_supported;
+
+static int dload_set(const char *val, struct kernel_param *kp);
+static int download_mode = 1;
+module_param_call(download_mode, dload_set, param_get_int,
+			&download_mode, 0644);
+static int panic_prep_restart(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	in_panic = 1;
 	return NOTIFY_DONE;
 }
 
-static struct notifier_block restart_nb = {
-	.notifier_call = do_msm_restart,
-	.priority = 128,
+static struct notifier_block panic_blk = {
+	.notifier_call	= panic_prep_restart,
 };
 
+int scm_set_dload_mode(int arg1, int arg2)
+{
+	struct scm_desc desc = {
+		.args[0] = arg1,
+		.args[1] = arg2,
+		.arginfo = SCM_ARGS(2),
+	};
+
+	if (!scm_dload_supported) {
+		if (tcsr_boot_misc_detect)
+			return scm_io_write(tcsr_boot_misc_detect, arg1);
+
+		return 0;
+	}
+
+	if (!is_scm_armv8())
+		return scm_call_atomic2(SCM_SVC_BOOT, SCM_DLOAD_CMD, arg1,
+					arg2);
+
+	return scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_DLOAD_CMD),
+				&desc);
+}
+
+static void set_dload_mode(int on)
+{
+	int ret;
+
+	if (dload_mode_addr) {
+		__raw_writel(on ? 0xE47B337D : 0, dload_mode_addr);
+		__raw_writel(on ? 0xCE14091A : 0,
+		       dload_mode_addr + sizeof(unsigned int));
+		/* Make sure the download cookie is updated */
+		mb();
+	}
+
+	ret = scm_set_dload_mode(on ? SCM_DLOAD_MODE : 0, 0);
+	if (ret)
+		pr_err("Failed to set secure DLOAD mode: %d\n", ret);
+
+	dload_mode_enabled = on;
+}
+
+static bool get_dload_mode(void)
+{
+	return dload_mode_enabled;
+}
+
+static void enable_emergency_dload_mode(void)
+{
+	int ret;
+
+	if (emergency_dload_mode_addr) {
+		__raw_writel(EMERGENCY_DLOAD_MAGIC1,
+				emergency_dload_mode_addr);
+		__raw_writel(EMERGENCY_DLOAD_MAGIC2,
+				emergency_dload_mode_addr +
+				sizeof(unsigned int));
+		__raw_writel(EMERGENCY_DLOAD_MAGIC3,
+				emergency_dload_mode_addr +
+				(2 * sizeof(unsigned int)));
+
+		/* Need disable the pmic wdt, then the emergency dload mode
+		 * will not auto reset.
+		 */
+		qpnp_pon_wd_config(0);
+		/* Make sure all the cookied are flushed to memory */
+		mb();
+	}
+
+	ret = scm_set_dload_mode(SCM_EDLOAD_MODE, 0);
+	if (ret)
+		pr_err("Failed to set secure EDLOAD mode: %d\n", ret);
+}
+
+static int dload_set(const char *val, struct kernel_param *kp)
+{
+	int ret;
+
+	int old_val = download_mode;
+
+	ret = param_set_int(val, kp);
+
+	if (ret)
+		return ret;
+
+	/* If download_mode is not zero or one, ignore. */
+	if (download_mode >> 1) {
+		download_mode = old_val;
+		return -EINVAL;
+	}
+
+	set_dload_mode(download_mode);
+
+	return 0;
+}
+#else
+#define set_dload_mode(x) do {} while (0)
+
+static void enable_emergency_dload_mode(void)
+{
+	pr_err("dload mode is not enabled on target\n");
+}
+
+static bool get_dload_mode(void)
+{
+	return false;
+}
+#endif
+
+void msm_set_restart_mode(int mode)
+{
+	restart_mode = mode;
+}
+EXPORT_SYMBOL(msm_set_restart_mode);
+
+/*
+ * Force the SPMI PMIC arbiter to shutdown so that no more SPMI transactions
+ * are sent from the MSM to the PMIC.  This is required in order to avoid an
+ * SPMI lockup on certain PMIC chips if PS_HOLD is lowered in the middle of
+ * an SPMI transaction.
+ */
+static void halt_spmi_pmic_arbiter(void)
+{
+	struct scm_desc desc = {
+		.args[0] = 0,
+		.arginfo = SCM_ARGS(1),
+	};
+
+	if (scm_pmic_arbiter_disable_supported) {
+		pr_crit("Calling SCM to disable SPMI PMIC arbiter\n");
+		if (!is_scm_armv8())
+			scm_call_atomic1(SCM_SVC_PWR,
+					 SCM_IO_DISABLE_PMIC_ARBITER, 0);
+		else
+			scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR,
+				  SCM_IO_DISABLE_PMIC_ARBITER), &desc);
+	}
+}
+
+static void msm_restart_prepare(const char *cmd)
+{
+#ifdef CONFIG_QCOM_DLOAD_MODE
+	bool need_warm_reset = false;
+
+
+	/* Write download mode flags if we're panic'ing
+	 * Write download mode flags if restart_mode says so
+	 * Kill download mode if master-kill switch is set
+	 */
+
+	set_dload_mode(download_mode &&
+			(in_panic || restart_mode == RESTART_DLOAD));
+#endif
+
+	if (qpnp_pon_check_hard_reset_stored()) {
+		/* Set warm reset as true when device is in dload mode */
+		if (get_dload_mode() ||
+			((cmd != NULL && cmd[0] != '\0') &&
+			!strcmp(cmd, "edl")))
+			need_warm_reset = true;
+	} else {
+		need_warm_reset = (get_dload_mode() ||
+				(cmd != NULL && cmd[0] != '\0'));
+	}
+
+	/* Hard reset the PMIC unless memory contents must be maintained. */
+	if (need_warm_reset)
+		qpnp_pon_system_pwr_off(PON_POWER_OFF_WARM_RESET);
+	else
+		qpnp_pon_system_pwr_off(PON_POWER_OFF_HARD_RESET);
+
+	if (cmd != NULL) {
+		if (!strncmp(cmd, "bootloader", 10)) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_BOOTLOADER);
+			__raw_writel(0x77665500, restart_reason);
+		} else if (!strncmp(cmd, "recovery", 8)) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_RECOVERY);
+			__raw_writel(0x77665502, restart_reason);
+		} else if (!strcmp(cmd, "rtc")) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_RTC);
+			__raw_writel(0x77665503, restart_reason);
+		} else if (!strcmp(cmd, "dm-verity device corrupted")) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_DMVERITY_CORRUPTED);
+			__raw_writel(0x77665508, restart_reason);
+		} else if (!strcmp(cmd, "dm-verity enforcing")) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_DMVERITY_ENFORCE);
+			__raw_writel(0x77665509, restart_reason);
+		} else if (!strcmp(cmd, "keys clear")) {
+			qpnp_pon_set_restart_reason(
+				PON_RESTART_REASON_KEYS_CLEAR);
+			__raw_writel(0x7766550a, restart_reason);
+		} else if (!strncmp(cmd, "oem-", 4)) {
+			unsigned long code;
+			int ret;
+
+			ret = kstrtoul(cmd + 4, 16, &code);
+			if (!ret)
+				__raw_writel(0x6f656d00 | (code & 0xff),
+					     restart_reason);
+		} else if (!strncmp(cmd, "edl", 3)) {
+			enable_emergency_dload_mode();
+		} else {
+			__raw_writel(0x77665501, restart_reason);
+		}
+	}
+
+	flush_cache_all();
+
+	/*outer_flush_all is not supported by 64bit kernel*/
+#ifndef CONFIG_ARM64
+	outer_flush_all();
+#endif
+
+}
+
+/*
+ * Deassert PS_HOLD to signal the PMIC that we are ready to power down or reset.
+ * Do this by calling into the secure environment, if available, or by directly
+ * writing to a hardware register.
+ *
+ * This function should never return.
+ */
+static void deassert_ps_hold(void)
+{
+	struct scm_desc desc = {
+		.args[0] = 0,
+		.arginfo = SCM_ARGS(1),
+	};
+
+	if (scm_deassert_ps_hold_supported) {
+		/* This call will be available on ARMv8 only */
+		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_PWR,
+				 SCM_IO_DEASSERT_PS_HOLD), &desc);
+	}
+
+	/* Fall-through to the direct write in case the scm_call "returns" */
+	__raw_writel(0, msm_ps_hold);
+}
+
+static void do_msm_restart(enum reboot_mode reboot_mode, const char *cmd)
+{
+	int ret;
+	struct scm_desc desc = {
+		.args[0] = 1,
+		.args[1] = 0,
+		.arginfo = SCM_ARGS(2),
+	};
+
+	pr_notice("Going down for restart now\n");
+
+	msm_restart_prepare(cmd);
+
+#ifdef CONFIG_QCOM_DLOAD_MODE
+	/*
+	 * Trigger a watchdog bite here and if this fails,
+	 * device will take the usual restart path.
+	 */
+
+	if (WDOG_BITE_ON_PANIC && in_panic)
+		msm_trigger_wdog_bite();
+#endif
+
+	/* Needed to bypass debug image on some chips */
+	if (!is_scm_armv8())
+		ret = scm_call_atomic2(SCM_SVC_BOOT,
+			       SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
+	else
+		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+			  SCM_WDOG_DEBUG_BOOT_PART), &desc);
+	if (ret)
+		pr_err("Failed to disable secure wdog debug: %d\n", ret);
+
+	halt_spmi_pmic_arbiter();
+	deassert_ps_hold();
+
+	msleep(10000);
+}
+
 static void do_msm_poweroff(void)
 {
-	/* TODO: Add poweroff capability */
-	do_msm_restart(&restart_nb, 0, NULL);
+	int ret;
+	struct scm_desc desc = {
+		.args[0] = 1,
+		.args[1] = 0,
+		.arginfo = SCM_ARGS(2),
+	};
+
+	pr_notice("Powering off the SoC\n");
+#ifdef CONFIG_QCOM_DLOAD_MODE
+	set_dload_mode(0);
+#endif
+	qpnp_pon_system_pwr_off(PON_POWER_OFF_SHUTDOWN);
+	/* Needed to bypass debug image on some chips */
+	if (!is_scm_armv8())
+		ret = scm_call_atomic2(SCM_SVC_BOOT,
+			       SCM_WDOG_DEBUG_BOOT_PART, 1, 0);
+	else
+		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_BOOT,
+			  SCM_WDOG_DEBUG_BOOT_PART), &desc);
+	if (ret)
+		pr_err("Failed to disable wdog debug: %d\n", ret);
+
+	halt_spmi_pmic_arbiter();
+	deassert_ps_hold();
+
+	msleep(10000);
+	pr_err("Powering off has failed\n");
 }
 
 static int msm_restart_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct resource *mem;
+	struct device_node *np;
+	int ret = 0;
 
-	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+#ifdef CONFIG_QCOM_DLOAD_MODE
+	if (scm_is_call_available(SCM_SVC_BOOT, SCM_DLOAD_CMD) > 0)
+		scm_dload_supported = true;
+
+	atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
+	np = of_find_compatible_node(NULL, NULL, DL_MODE_PROP);
+	if (!np) {
+		pr_err("unable to find DT imem DLOAD mode node\n");
+	} else {
+		dload_mode_addr = of_iomap(np, 0);
+		if (!dload_mode_addr)
+			pr_err("unable to map imem DLOAD offset\n");
+	}
+
+	np = of_find_compatible_node(NULL, NULL, EDL_MODE_PROP);
+	if (!np) {
+		pr_err("unable to find DT imem EDLOAD mode node\n");
+	} else {
+		emergency_dload_mode_addr = of_iomap(np, 0);
+		if (!emergency_dload_mode_addr)
+			pr_err("unable to map imem EDLOAD mode offset\n");
+	}
+
+#endif
+	np = of_find_compatible_node(NULL, NULL,
+				"qcom,msm-imem-restart_reason");
+	if (!np) {
+		pr_err("unable to find DT imem restart reason node\n");
+	} else {
+		restart_reason = of_iomap(np, 0);
+		if (!restart_reason) {
+			pr_err("unable to map imem restart reason offset\n");
+			ret = -ENOMEM;
+			goto err_restart_reason;
+		}
+	}
+
+	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pshold-base");
 	msm_ps_hold = devm_ioremap_resource(dev, mem);
 	if (IS_ERR(msm_ps_hold))
 		return PTR_ERR(msm_ps_hold);
 
-	register_restart_handler(&restart_nb);
+	mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "tcsr-boot-misc-detect");
+	if (mem)
+		tcsr_boot_misc_detect = mem->start;
 
 	pm_power_off = do_msm_poweroff;
+	arm_pm_restart = do_msm_restart;
+
+	if (scm_is_call_available(SCM_SVC_PWR, SCM_IO_DISABLE_PMIC_ARBITER) > 0)
+		scm_pmic_arbiter_disable_supported = true;
+
+	if (scm_is_call_available(SCM_SVC_PWR, SCM_IO_DEASSERT_PS_HOLD) > 0)
+		scm_deassert_ps_hold_supported = true;
+
+	download_mode = scm_is_secure_device();
+	set_dload_mode(download_mode);
 
 	return 0;
+
+err_restart_reason:
+#ifdef CONFIG_QCOM_DLOAD_MODE
+	iounmap(emergency_dload_mode_addr);
+	iounmap(dload_mode_addr);
+#endif
+	return ret;
 }
 
 static const struct of_device_id of_msm_restart_match[] = {
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index bcde8d1..fdb824f 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -107,7 +107,10 @@
 	else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
 		return sprintf(buf, "%s\n", value.strval);
 
-	return sprintf(buf, "%d\n", value.intval);
+	if (off == POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT)
+		return sprintf(buf, "%lld\n", value.int64val);
+	else
+		return sprintf(buf, "%d\n", value.intval);
 }
 
 static ssize_t power_supply_store_property(struct device *dev,
@@ -198,6 +201,12 @@
 	POWER_SUPPLY_ATTR(scope),
 	POWER_SUPPLY_ATTR(charge_term_current),
 	POWER_SUPPLY_ATTR(calibrate),
+	/* Local extensions */
+	POWER_SUPPLY_ATTR(usb_hc),
+	POWER_SUPPLY_ATTR(usb_otg),
+	POWER_SUPPLY_ATTR(charge_enabled),
+	/* Local extensions of type int64_t */
+	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_ATTR(model_name),
 	POWER_SUPPLY_ATTR(manufacturer),
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 936f7cc..a07ce59 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -878,5 +878,16 @@
 	  This driver provides support for the voltage regulators on the
 	  WM8994 CODEC.
 
+config REGULATOR_STUB
+	tristate "Stub Regulator"
+	help
+	  This driver adds stub regulator support. The driver is absent of any
+	  real hardware based implementation. It allows for clients to register
+	  their regulator device constraints and use all of the standard
+	  regulator interfaces. This is useful for bringing up new platforms
+	  when the real hardware based implementation may not be yet available.
+	  Clients can use the real regulator device names with proper
+	  constraint checking while the real driver is being developed.
+
 endif
 
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 2142a5d..519cdeb 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -113,5 +113,6 @@
 obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
 obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
 
+obj-$(CONFIG_REGULATOR_STUB) += stub-regulator.o
 
 ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/stub-regulator.c b/drivers/regulator/stub-regulator.c
new file mode 100644
index 0000000..3d7465d
--- /dev/null
+++ b/drivers/regulator/stub-regulator.c
@@ -0,0 +1,307 @@
+/*
+ * Copyright (c) 2012, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/stub-regulator.h>
+
+#define STUB_REGULATOR_MAX_NAME 40
+
+struct regulator_stub {
+	struct regulator_desc	rdesc;
+	struct regulator_dev	*rdev;
+	int			voltage;
+	bool			enabled;
+	int			mode;
+	int			hpm_min_load;
+	int			system_uA;
+	char			name[STUB_REGULATOR_MAX_NAME];
+};
+
+static int regulator_stub_set_voltage(struct regulator_dev *rdev, int min_uV,
+				  int max_uV, unsigned *selector)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+	vreg_priv->voltage = min_uV;
+	return 0;
+}
+
+static int regulator_stub_get_voltage(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+	return vreg_priv->voltage;
+}
+
+static int regulator_stub_list_voltage(struct regulator_dev *rdev,
+				    unsigned selector)
+{
+	struct regulation_constraints *constraints = rdev->constraints;
+
+	if (selector >= 2)
+		return -EINVAL;
+	else if (selector == 0)
+		return constraints->min_uV;
+	else
+		return constraints->max_uV;
+}
+
+static unsigned int regulator_stub_get_mode(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+	return vreg_priv->mode;
+}
+
+static int regulator_stub_set_mode(struct regulator_dev *rdev,
+				   unsigned int mode)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+	if (mode != REGULATOR_MODE_NORMAL && mode != REGULATOR_MODE_IDLE) {
+		dev_err(&rdev->dev, "%s: invalid mode requested %u\n",
+							__func__, mode);
+		return -EINVAL;
+	}
+	vreg_priv->mode = mode;
+	return 0;
+}
+
+static unsigned int regulator_stub_get_optimum_mode(struct regulator_dev *rdev,
+		int input_uV, int output_uV, int load_uA)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+	unsigned int mode;
+
+	if (load_uA + vreg_priv->system_uA >= vreg_priv->hpm_min_load)
+		mode = REGULATOR_MODE_NORMAL;
+	else
+		mode = REGULATOR_MODE_IDLE;
+
+	return mode;
+}
+
+static int regulator_stub_enable(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+	vreg_priv->enabled = true;
+	return 0;
+}
+
+static int regulator_stub_disable(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+	vreg_priv->enabled = false;
+	return 0;
+}
+
+static int regulator_stub_is_enabled(struct regulator_dev *rdev)
+{
+	struct regulator_stub *vreg_priv = rdev_get_drvdata(rdev);
+
+	return vreg_priv->enabled;
+}
+
+/* Real regulator operations. */
+static struct regulator_ops regulator_stub_ops = {
+	.enable			= regulator_stub_enable,
+	.disable		= regulator_stub_disable,
+	.is_enabled		= regulator_stub_is_enabled,
+	.set_voltage		= regulator_stub_set_voltage,
+	.get_voltage		= regulator_stub_get_voltage,
+	.list_voltage		= regulator_stub_list_voltage,
+	.set_mode		= regulator_stub_set_mode,
+	.get_mode		= regulator_stub_get_mode,
+	.get_optimum_mode	= regulator_stub_get_optimum_mode,
+};
+
+static void regulator_stub_cleanup(struct regulator_stub *vreg_priv)
+{
+	if (vreg_priv && vreg_priv->rdev)
+		regulator_unregister(vreg_priv->rdev);
+	kfree(vreg_priv);
+}
+
+static int regulator_stub_probe(struct platform_device *pdev)
+{
+	struct regulator_config reg_config = {};
+	struct regulator_init_data *init_data = NULL;
+	struct device *dev = &pdev->dev;
+	struct stub_regulator_pdata *vreg_pdata;
+	struct regulator_desc *rdesc;
+	struct regulator_stub *vreg_priv;
+	int rc;
+
+	vreg_priv = kzalloc(sizeof(*vreg_priv), GFP_KERNEL);
+	if (!vreg_priv)
+		return -ENOMEM;
+
+	if (dev->of_node) {
+		/* Use device tree. */
+		init_data = of_get_regulator_init_data(dev, dev->of_node,
+							&vreg_priv->rdesc);
+		if (!init_data) {
+			dev_err(dev, "%s: unable to allocate memory\n",
+					__func__);
+			rc = -ENOMEM;
+			goto err_probe;
+		}
+
+		if (init_data->constraints.name == NULL) {
+			dev_err(dev, "%s: regulator name not specified\n",
+				__func__);
+			rc = -EINVAL;
+			goto err_probe;
+		}
+
+		if (of_get_property(dev->of_node, "parent-supply", NULL))
+			init_data->supply_regulator = "parent";
+
+		of_property_read_u32(dev->of_node, "qcom,system-load",
+					&vreg_priv->system_uA);
+		of_property_read_u32(dev->of_node, "qcom,hpm-min-load",
+					&vreg_priv->hpm_min_load);
+
+		init_data->constraints.input_uV	= init_data->constraints.max_uV;
+
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_STATUS;
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_VOLTAGE;
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+		init_data->constraints.valid_modes_mask
+			= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+	} else {
+		/* Use platform data. */
+		vreg_pdata = dev->platform_data;
+		if (!vreg_pdata) {
+			dev_err(dev, "%s: no platform data\n", __func__);
+			rc = -EINVAL;
+			goto err_probe;
+		}
+		init_data = &vreg_pdata->init_data;
+
+		vreg_priv->system_uA = vreg_pdata->system_uA;
+		vreg_priv->hpm_min_load = vreg_pdata->hpm_min_load;
+	}
+
+	dev_set_drvdata(dev, vreg_priv);
+
+	rdesc = &vreg_priv->rdesc;
+	strlcpy(vreg_priv->name, init_data->constraints.name,
+						   STUB_REGULATOR_MAX_NAME);
+	rdesc->name = vreg_priv->name;
+	rdesc->ops = &regulator_stub_ops;
+
+	/*
+	 * Ensure that voltage set points are handled correctly for regulators
+	 * which have a specified voltage constraint range, as well as those
+	 * that do not.
+	 */
+	if (init_data->constraints.min_uV == 0 &&
+	    init_data->constraints.max_uV == 0)
+		rdesc->n_voltages = 0;
+	else
+		rdesc->n_voltages = 2;
+
+	rdesc->id    = pdev->id;
+	rdesc->owner = THIS_MODULE;
+	rdesc->type  = REGULATOR_VOLTAGE;
+	vreg_priv->voltage = init_data->constraints.min_uV;
+	if (vreg_priv->system_uA >= vreg_priv->hpm_min_load)
+		vreg_priv->mode = REGULATOR_MODE_NORMAL;
+	else
+		vreg_priv->mode = REGULATOR_MODE_IDLE;
+
+	reg_config.dev = dev;
+	reg_config.init_data = init_data;
+	reg_config.driver_data = vreg_priv;
+	reg_config.of_node = dev->of_node;
+	vreg_priv->rdev = regulator_register(rdesc, &reg_config);
+
+	if (IS_ERR(vreg_priv->rdev)) {
+		rc = PTR_ERR(vreg_priv->rdev);
+		vreg_priv->rdev = NULL;
+		if (rc != -EPROBE_DEFER)
+			dev_err(dev, "%s: regulator_register failed\n",
+				__func__);
+		goto err_probe;
+	}
+
+	return 0;
+
+err_probe:
+	regulator_stub_cleanup(vreg_priv);
+	return rc;
+}
+
+static int regulator_stub_remove(struct platform_device *pdev)
+{
+	struct regulator_stub *vreg_priv = dev_get_drvdata(&pdev->dev);
+
+	regulator_stub_cleanup(vreg_priv);
+	return 0;
+}
+
+static const struct of_device_id regulator_stub_match_table[] = {
+	{ .compatible = "qcom," STUB_REGULATOR_DRIVER_NAME, },
+	{}
+};
+
+static struct platform_driver regulator_stub_driver = {
+	.probe	= regulator_stub_probe,
+	.remove	= regulator_stub_remove,
+	.driver	= {
+		.name	= STUB_REGULATOR_DRIVER_NAME,
+		.owner	= THIS_MODULE,
+		.of_match_table = regulator_stub_match_table,
+	},
+};
+
+int __init regulator_stub_init(void)
+{
+	static int registered;
+
+	if (registered)
+		return 0;
+
+	registered = 1;
+
+	return platform_driver_register(&regulator_stub_driver);
+}
+EXPORT_SYMBOL(regulator_stub_init);
+postcore_initcall(regulator_stub_init);
+
+static void __exit regulator_stub_exit(void)
+{
+	platform_driver_unregister(&regulator_stub_driver);
+}
+module_exit(regulator_stub_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("stub regulator driver");
+MODULE_ALIAS("platform: " STUB_REGULATOR_DRIVER_NAME);
diff --git a/drivers/rtc/rtc-palmas.c b/drivers/rtc/rtc-palmas.c
index 4bcfb88..34aea38 100644
--- a/drivers/rtc/rtc-palmas.c
+++ b/drivers/rtc/rtc-palmas.c
@@ -45,6 +45,42 @@
 /* Total number of RTC registers needed to set time*/
 #define PALMAS_NUM_TIME_REGS	(PALMAS_YEARS_REG - PALMAS_SECONDS_REG + 1)
 
+/*
+ * Special bin2bcd mapping to deal with bcd storage of year.
+ *
+ *   0-69                -> 0xD0
+ *  70-99  (1970 - 1999) -> 0xD0 - 0xF9 (correctly rolls to 0x00)
+ * 100-199 (2000 - 2099) -> 0x00 - 0x99 (does not roll to 0xA0 :-( )
+ * 200-229 (2100 - 2129) -> 0xA0 - 0xC9 (really for completeness)
+ * 230-                  -> 0xC9
+ *
+ * Confirmed: the only transition that does not work correctly for this rtc
+ * clock is the transition from 2099 to 2100, it proceeds to 2000. We will
+ * accept this issue since the clock retains and transitions the year correctly
+ * in all other conditions.
+ */
+static unsigned char year_bin2bcd(int val)
+{
+	if (val < 70)
+		return 0xD0;
+	if (val < 100)
+		return bin2bcd(val - 20) | 0x80; /* KISS leverage of bin2bcd */
+	if (val >= 230)
+		return 0xC9;
+	if (val >= 200)
+		return bin2bcd(val - 180) | 0x80;
+	return bin2bcd(val - 100);
+}
+
+static int year_bcd2bin(unsigned char val)
+{
+	if (val >= 0xD0)
+		return bcd2bin(val & 0x7F) + 20;
+	if (val >= 0xA0)
+		return bcd2bin(val & 0x7F) + 180;
+	return bcd2bin(val) + 100;
+}
+
 static int palmas_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
 	unsigned char rtc_data[PALMAS_NUM_TIME_REGS];
@@ -71,7 +107,7 @@
 	tm->tm_hour = bcd2bin(rtc_data[2]);
 	tm->tm_mday = bcd2bin(rtc_data[3]);
 	tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
-	tm->tm_year = bcd2bin(rtc_data[5]) + 100;
+	tm->tm_year = year_bcd2bin(rtc_data[5]);
 
 	return ret;
 }
@@ -87,7 +123,7 @@
 	rtc_data[2] = bin2bcd(tm->tm_hour);
 	rtc_data[3] = bin2bcd(tm->tm_mday);
 	rtc_data[4] = bin2bcd(tm->tm_mon + 1);
-	rtc_data[5] = bin2bcd(tm->tm_year - 100);
+	rtc_data[5] = year_bin2bcd(tm->tm_year);
 
 	/* Stop RTC while updating the RTC time registers */
 	ret = palmas_update_bits(palmas, PALMAS_RTC_BASE, PALMAS_RTC_CTRL_REG,
@@ -142,7 +178,7 @@
 	alm->time.tm_hour = bcd2bin(alarm_data[2]);
 	alm->time.tm_mday = bcd2bin(alarm_data[3]);
 	alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1;
-	alm->time.tm_year = bcd2bin(alarm_data[5]) + 100;
+	alm->time.tm_year = year_bcd2bin(alarm_data[5]);
 
 	ret = palmas_read(palmas, PALMAS_RTC_BASE, PALMAS_RTC_INTERRUPTS_REG,
 			&int_val);
@@ -173,7 +209,7 @@
 	alarm_data[2] = bin2bcd(alm->time.tm_hour);
 	alarm_data[3] = bin2bcd(alm->time.tm_mday);
 	alarm_data[4] = bin2bcd(alm->time.tm_mon + 1);
-	alarm_data[5] = bin2bcd(alm->time.tm_year - 100);
+	alarm_data[5] = year_bin2bcd(alm->time.tm_year);
 
 	ret = palmas_bulk_write(palmas, PALMAS_RTC_BASE,
 		PALMAS_ALARM_SECONDS_REG, alarm_data, PALMAS_NUM_TIME_REGS);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 2cca9cf..935f782 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -202,7 +202,11 @@
 	req->sense = sense;
 	req->sense_len = 0;
 	req->retries = retries;
-	req->timeout = timeout;
+	if (likely(!sdev->timeout_override))
+		req->timeout = timeout;
+	else
+		req->timeout = sdev->timeout_override;
+
 	req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
 
 	/*
@@ -2175,6 +2179,33 @@
 }
 EXPORT_SYMBOL(scsi_unblock_requests);
 
+/*
+ * Function:    scsi_set_cmd_timeout_override()
+ *
+ * Purpose:     Utility function used by low-level drivers to override
+		timeout for the scsi commands.
+ *
+ * Arguments:   sdev       - scsi device in question
+ *		timeout	   - timeout in jiffies
+ *
+ * Returns:     Nothing
+ *
+ * Lock status: No locks are assumed held.
+ *
+ * Notes:	Some platforms might be very slow and command completion may
+ *		take much longer than default scsi command timeouts.
+ *		SCSI Read/Write command timeout can be changed by
+ *		blk_queue_rq_timeout() but there is no option to override
+ *		timeout for rest of the scsi commands. This function would
+ *		would allow this.
+ */
+void scsi_set_cmd_timeout_override(struct scsi_device *sdev,
+				   unsigned int timeout)
+{
+	sdev->timeout_override = timeout;
+}
+EXPORT_SYMBOL(scsi_set_cmd_timeout_override);
+
 int __init scsi_init_queue(void)
 {
 	scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index b44c1bb..af17066 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -16,6 +16,9 @@
 
 #include "scsi_priv.h"
 
+static int do_scsi_runtime_resume(struct device *dev,
+				   const struct dev_pm_ops *pm);
+
 #ifdef CONFIG_PM_SLEEP
 
 static int do_scsi_suspend(struct device *dev, const struct dev_pm_ops *pm)
@@ -77,10 +80,22 @@
 	scsi_device_resume(to_scsi_device(dev));
 	dev_dbg(dev, "scsi resume: %d\n", err);
 
-	if (err == 0) {
+	if (err == 0 && (cb != do_scsi_runtime_resume)) {
 		pm_runtime_disable(dev);
-		pm_runtime_set_active(dev);
+		err = pm_runtime_set_active(dev);
 		pm_runtime_enable(dev);
+
+		if (!err && scsi_is_sdev_device(dev)) {
+			struct scsi_device *sdev = to_scsi_device(dev);
+
+			/*
+			 * If scsi device runtime PM is managed by block layer
+			 * then we should update request queue's runtime status
+			 * as well.
+			 */
+			if (sdev->request_queue->dev)
+				blk_post_runtime_resume(sdev->request_queue, 0);
+		}
 	}
 
 	return err;
@@ -223,12 +238,32 @@
 
 #endif /* CONFIG_PM_SLEEP */
 
+static int do_scsi_runtime_suspend(struct device *dev,
+				   const struct dev_pm_ops *pm)
+{
+	return pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : 0;
+}
+
+static int do_scsi_runtime_resume(struct device *dev,
+				   const struct dev_pm_ops *pm)
+{
+	return pm && pm->runtime_resume ? pm->runtime_resume(dev) : 0;
+}
+
 static int sdev_runtime_suspend(struct device *dev)
 {
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	struct scsi_device *sdev = to_scsi_device(dev);
 	int err = 0;
 
+	if (!sdev->request_queue->dev) {
+		err = scsi_dev_type_suspend(dev, do_scsi_runtime_suspend);
+		if (err == -EAGAIN)
+			pm_schedule_suspend(dev, jiffies_to_msecs(
+					round_jiffies_up_relative(HZ/10)));
+		return err;
+	}
+
 	err = blk_pre_runtime_suspend(sdev->request_queue);
 	if (err)
 		return err;
@@ -258,6 +293,9 @@
 	const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
 	int err = 0;
 
+	if (!sdev->request_queue->dev)
+		return scsi_dev_type_resume(dev, do_scsi_runtime_resume);
+
 	blk_pre_runtime_resume(sdev->request_queue);
 	if (pm && pm->runtime_resume)
 		err = pm->runtime_resume(dev);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6f7128f..a06069b 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -825,13 +825,8 @@
 		 * well-known logical units. Force well-known type
 		 * to enumerate them correctly.
 		 */
-		if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
-			sdev_printk(KERN_WARNING, sdev,
-				"%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
-				__func__, sdev->type, (unsigned int)sdev->lun);
+		if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN)
 			sdev->type = TYPE_WLUN;
-		}
-
 	}
 
 	if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
@@ -979,6 +974,10 @@
 
 	transport_configure_device(&sdev->sdev_gendev);
 
+	/* The LLD can override auto suspend tunables in ->slave_configure() */
+	sdev->use_rpm_auto = 0;
+	sdev->autosuspend_delay = SCSI_DEFAULT_AUTOSUSPEND_DELAY;
+
 	if (sdev->host->hostt->slave_configure) {
 		ret = sdev->host->hostt->slave_configure(sdev);
 		if (ret) {
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 0734927..a72bfde 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1217,7 +1217,8 @@
 	device_enable_async_suspend(&sdev->sdev_gendev);
 	scsi_autopm_get_target(starget);
 	pm_runtime_set_active(&sdev->sdev_gendev);
-	pm_runtime_forbid(&sdev->sdev_gendev);
+	if (!sdev->use_rpm_auto)
+		pm_runtime_forbid(&sdev->sdev_gendev);
 	pm_runtime_enable(&sdev->sdev_gendev);
 	scsi_autopm_put_target(starget);
 
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 51e5629..b7889c7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -767,7 +767,10 @@
 	}
 
 	rq->completion_data = page;
-	rq->timeout = SD_TIMEOUT;
+	if (likely(!sdp->timeout_override))
+		rq->timeout = SD_TIMEOUT;
+	else
+		rq->timeout = sdp->timeout_override;
 
 	cmd->transfersize = len;
 	cmd->allowed = SD_MAX_RETRIES;
@@ -847,7 +850,10 @@
 	sector >>= ilog2(sdp->sector_size) - 9;
 	nr_sectors >>= ilog2(sdp->sector_size) - 9;
 
-	rq->timeout = SD_WRITE_SAME_TIMEOUT;
+	if (likely(!sdp->timeout_override))
+		rq->timeout = SD_WRITE_SAME_TIMEOUT;
+	else
+		rq->timeout = sdp->timeout_override;
 
 	if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
 		cmd->cmd_len = 16;
@@ -1394,86 +1400,6 @@
 	return 0;
 }
 
-/**
- *	sd_check_events - check media events
- *	@disk: kernel device descriptor
- *	@clearing: disk events currently being cleared
- *
- *	Returns mask of DISK_EVENT_*.
- *
- *	Note: this function is invoked from the block subsystem.
- **/
-static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
-{
-	struct scsi_disk *sdkp = scsi_disk_get(disk);
-	struct scsi_device *sdp;
-	struct scsi_sense_hdr *sshdr = NULL;
-	int retval;
-
-	if (!sdkp)
-		return 0;
-
-	sdp = sdkp->device;
-	SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_check_events\n"));
-
-	/*
-	 * If the device is offline, don't send any commands - just pretend as
-	 * if the command failed.  If the device ever comes back online, we
-	 * can deal with it then.  It is only because of unrecoverable errors
-	 * that we would ever take a device offline in the first place.
-	 */
-	if (!scsi_device_online(sdp)) {
-		set_media_not_present(sdkp);
-		goto out;
-	}
-
-	/*
-	 * Using TEST_UNIT_READY enables differentiation between drive with
-	 * no cartridge loaded - NOT READY, drive with changed cartridge -
-	 * UNIT ATTENTION, or with same cartridge - GOOD STATUS.
-	 *
-	 * Drives that auto spin down. eg iomega jaz 1G, will be started
-	 * by sd_spinup_disk() from sd_revalidate_disk(), which happens whenever
-	 * sd_revalidate() is called.
-	 */
-	retval = -ENODEV;
-
-	if (scsi_block_when_processing_errors(sdp)) {
-		sshdr  = kzalloc(sizeof(*sshdr), GFP_KERNEL);
-		retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
-					      sshdr);
-	}
-
-	/* failed to execute TUR, assume media not present */
-	if (host_byte(retval)) {
-		set_media_not_present(sdkp);
-		goto out;
-	}
-
-	if (media_not_present(sdkp, sshdr))
-		goto out;
-
-	/*
-	 * For removable scsi disk we have to recognise the presence
-	 * of a disk in the drive.
-	 */
-	if (!sdkp->media_present)
-		sdp->changed = 1;
-	sdkp->media_present = 1;
-out:
-	/*
-	 * sdp->changed is set under the following conditions:
-	 *
-	 *	Medium present state has changed in either direction.
-	 *	Device has indicated UNIT_ATTENTION.
-	 */
-	kfree(sshdr);
-	retval = sdp->changed ? DISK_EVENT_MEDIA_CHANGE : 0;
-	sdp->changed = 0;
-	scsi_disk_put(sdkp);
-	return retval;
-}
-
 static int sd_sync_cache(struct scsi_disk *sdkp)
 {
 	int retries, res;
@@ -1665,7 +1591,6 @@
 #ifdef CONFIG_COMPAT
 	.compat_ioctl		= sd_compat_ioctl,
 #endif
-	.check_events		= sd_check_events,
 	.revalidate_disk	= sd_revalidate_disk,
 	.unlock_native_capacity	= sd_unlock_native_capacity,
 	.pr_ops			= &sd_pr_ops,
@@ -2334,11 +2259,6 @@
 				sizeof(cap_str_10));
 
 		if (sdkp->first_scan || old_capacity != sdkp->capacity) {
-			sd_printk(KERN_NOTICE, sdkp,
-				  "%llu %d-byte logical blocks: (%s/%s)\n",
-				  (unsigned long long)sdkp->capacity,
-				  sector_size, cap_str_10, cap_str_2);
-
 			if (sdkp->physical_block_size != sector_size)
 				sd_printk(KERN_NOTICE, sdkp,
 					  "%u-byte physical blocks\n",
@@ -2375,7 +2295,6 @@
 	int res;
 	struct scsi_device *sdp = sdkp->device;
 	struct scsi_mode_data data;
-	int old_wp = sdkp->write_prot;
 
 	set_disk_ro(sdkp->disk, 0);
 	if (sdp->skip_ms_page_3f) {
@@ -2416,13 +2335,6 @@
 	} else {
 		sdkp->write_prot = ((data.device_specific & 0x80) != 0);
 		set_disk_ro(sdkp->disk, sdkp->write_prot);
-		if (sdkp->first_scan || old_wp != sdkp->write_prot) {
-			sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
-				  sdkp->write_prot ? "on" : "off");
-			sd_printk(KERN_DEBUG, sdkp,
-				  "Mode Sense: %02x %02x %02x %02x\n",
-				  buffer[0], buffer[1], buffer[2], buffer[3]);
-		}
 	}
 }
 
@@ -2435,16 +2347,13 @@
 {
 	int len = 0, res;
 	struct scsi_device *sdp = sdkp->device;
+	struct Scsi_Host *host = sdp->host;
 
 	int dbd;
 	int modepage;
 	int first_len;
 	struct scsi_mode_data data;
 	struct scsi_sense_hdr sshdr;
-	int old_wce = sdkp->WCE;
-	int old_rcd = sdkp->RCD;
-	int old_dpofua = sdkp->DPOFUA;
-
 
 	if (sdkp->cache_override)
 		return;
@@ -2466,7 +2375,10 @@
 		dbd = 8;
 	} else {
 		modepage = 8;
-		dbd = 0;
+		if (host->set_dbd_for_caching)
+			dbd = 8;
+		else
+			dbd = 0;
 	}
 
 	/* cautiously ask */
@@ -2566,15 +2478,6 @@
 		if (sdkp->WCE && sdkp->write_prot)
 			sdkp->WCE = 0;
 
-		if (sdkp->first_scan || old_wce != sdkp->WCE ||
-		    old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
-			sd_printk(KERN_NOTICE, sdkp,
-				  "Write cache: %s, read cache: %s, %s\n",
-				  sdkp->WCE ? "enabled" : "disabled",
-				  sdkp->RCD ? "disabled" : "enabled",
-				  sdkp->DPOFUA ? "supports DPO and FUA"
-				  : "doesn't support DPO or FUA");
-
 		return;
 	}
 
@@ -3001,14 +2904,15 @@
 	}
 
 	blk_pm_runtime_init(sdp->request_queue, dev);
+	if (sdp->autosuspend_delay >= 0)
+		pm_runtime_set_autosuspend_delay(dev, sdp->autosuspend_delay);
+
 	device_add_disk(dev, gd);
 	if (sdkp->capacity)
 		sd_dif_config_host(sdkp);
 
 	sd_revalidate_disk(gd);
 
-	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
-		  sdp->removable ? "removable " : "");
 	scsi_autopm_put_device(sdp);
 	put_device(&sdkp->dev);
 }
@@ -3253,7 +3157,6 @@
 		return 0;
 
 	if (sdkp->WCE && sdkp->media_present) {
-		sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
 		ret = sd_sync_cache(sdkp);
 		if (ret) {
 			/* ignore OFFLINE device */
@@ -3264,7 +3167,7 @@
 	}
 
 	if (sdkp->device->manage_start_stop) {
-		sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
+		sd_printk(KERN_DEBUG, sdkp, "Stopping disk\n");
 		/* an error is not worth aborting a system sleep */
 		ret = sd_start_stop_device(sdkp, 0);
 		if (ignore_stop_errors)
@@ -3295,7 +3198,7 @@
 	if (!sdkp->device->manage_start_stop)
 		return 0;
 
-	sd_printk(KERN_NOTICE, sdkp, "Starting disk\n");
+	sd_printk(KERN_DEBUG, sdkp, "Starting disk\n");
 	return sd_start_stop_device(sdkp, 1);
 }
 
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 070332e..f22fa96 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -796,7 +796,11 @@
 	else
 		at_head = 1;
 
-	srp->rq->timeout = timeout;
+	if (likely(!sdp->device->timeout_override))
+		srp->rq->timeout = timeout;
+	else
+		srp->rq->timeout = sdp->device->timeout_override;
+
 	kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */
 	blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk,
 			      srp->rq, at_head, sg_rq_end_io);
@@ -1506,9 +1510,6 @@
 	} else
 		pr_warn("%s: sg_sys Invalid\n", __func__);
 
-	sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d "
-		    "type %d\n", sdp->index, scsidp->type);
-
 	dev_set_drvdata(cl_dev, sdp);
 
 	return 0;
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index e27b4d4..6be274f 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -37,7 +37,6 @@
 	depends on SCSI && SCSI_DMA
 	select PM_DEVFREQ
 	select DEVFREQ_GOV_SIMPLE_ONDEMAND
-	select NLS
 	---help---
 	This selects the support for UFS devices in Linux, say Y and make
 	  sure that you know the name of your UFS host adapter (the card
@@ -80,14 +79,6 @@
 
 	  If unsure, say N.
 
-config SCSI_UFS_DWC_TC_PLATFORM
-	tristate "DesignWare platform support using a G210 Test Chip"
-	depends on SCSI_UFSHCD_PLATFORM
-	---help---
-	  Synopsys Test Chip is a PHY for prototyping purposes.
-
-	  If unsure, say N.
-
 config SCSI_UFS_QCOM
 	tristate "QCOM specific hooks to UFS controller platform driver"
 	depends on SCSI_UFSHCD_PLATFORM && ARCH_QCOM
@@ -100,3 +91,26 @@
 
 	  Select this if you have UFS controller on QCOM chipset.
 	  If unsure, say N.
+
+config SCSI_UFS_QCOM_ICE
+	bool "QCOM specific hooks to Inline Crypto Engine for UFS driver"
+	depends on SCSI_UFS_QCOM && CRYPTO_DEV_QCOM_ICE
+	help
+	  This selects the QCOM specific additions to support Inline Crypto
+	  Engine (ICE).
+	  ICE accelerates the crypto operations and maintains the high UFS
+	  performance.
+
+	  Select this if you have ICE supported for UFS on QCOM chipset.
+	  If unsure, say N.
+
+
+config SCSI_UFS_TEST
+	tristate "Universal Flash Storage host controller driver unit-tests"
+	depends on SCSI_UFSHCD && IOSCHED_TEST
+	default m
+	---help---
+	This adds UFS Host controller unit-test framework.
+	The UFS unit-tests register as a block device test utility to
+	the test-iosched and will be initiated when the test-iosched will
+	be chosen to be the active I/O scheduler.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 6e77cb0..ce98c09 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,7 +1,8 @@
 # UFSHCD makefile
-obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
-obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
 obj-$(CONFIG_SCSI_UFS_QCOM) += ufs-qcom.o
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFS_QCOM_ICE) += ufs-qcom-ice.o
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o ufs_quirks.o
 obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
 obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
+obj-$(CONFIG_SCSI_UFS_TEST) += ufs_test.o
+obj-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufs-qcom-debugfs.o
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
new file mode 100644
index 0000000..f3b4b6c
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -0,0 +1,1661 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * UFS debugfs - add debugfs interface to the ufshcd.
+ * This is currently used for statistics collection and exporting from the
+ * UFS driver.
+ * This infrastructure can be used for debugging or direct tweaking
+ * of the driver from userspace.
+ *
+ */
+
+#include <linux/random.h>
+#include "ufs-debugfs.h"
+#include "unipro.h"
+#include "ufshci.h"
+
+enum field_width {
+	BYTE	= 1,
+	WORD	= 2,
+};
+
+struct desc_field_offset {
+	char *name;
+	int offset;
+	enum field_width width_byte;
+};
+
+#define UFS_ERR_STATS_PRINT(file, error_index, string, error_seen)	\
+	do {								\
+		if (err_stats[error_index]) {				\
+			seq_printf(file, string,			\
+					err_stats[error_index]);	\
+			error_seen = true;				\
+		}							\
+	} while (0)
+
+#define DOORBELL_CLR_TOUT_US	(1000 * 1000) /* 1 sec */
+
+#ifdef CONFIG_UFS_FAULT_INJECTION
+
+#define INJECT_COMMAND_HANG (0x0)
+
+static DECLARE_FAULT_ATTR(fail_default_attr);
+static char *fail_request;
+module_param(fail_request, charp, 0);
+
+/**
+ * struct ufsdbg_err_scenario - error scenario use case
+ * @name: the name of the error scenario
+ * @err_code_arr: error codes array for this error scenario
+ * @num_err_codes: number of error codes in err_code_arr
+ */
+struct ufsdbg_err_scenario {
+	const char *name;
+	const int *err_code_arr;
+	u32 num_err_codes;
+	u32 num_err_injected;
+};
+
+/*
+ * the following static arrays are aggregation of possible errors
+ * that might occur during the relevant error scenario
+ */
+static const int err_inject_intr_err_codes[] = {
+	CONTROLLER_FATAL_ERROR,
+	SYSTEM_BUS_FATAL_ERROR,
+	INJECT_COMMAND_HANG,
+};
+
+static const int err_inject_pwr_change_err_codes[] = {
+	-EIO,
+	-ETIMEDOUT,
+	-1,
+	PWR_REMOTE,
+	PWR_BUSY,
+	PWR_ERROR_CAP,
+	PWR_FATAL_ERROR,
+};
+
+static const int err_inject_uic_err_codes[] = {
+	-EIO,
+	-ETIMEDOUT,
+};
+
+static const int err_inject_dme_attr_err_codes[] = {
+	/* an invalid DME attribute for host and device */
+	0x1600,
+};
+
+static const int err_inject_query_err_codes[] = {
+	/* an invalid idn for flag/attribute/descriptor query request */
+	0xFF,
+};
+
+static struct ufsdbg_err_scenario err_scen_arr[] = {
+	{
+		"ERR_INJECT_INTR",
+		err_inject_intr_err_codes,
+		ARRAY_SIZE(err_inject_intr_err_codes),
+	},
+	{
+		"ERR_INJECT_PWR_CHANGE",
+		err_inject_pwr_change_err_codes,
+		ARRAY_SIZE(err_inject_pwr_change_err_codes),
+	},
+	{
+		"ERR_INJECT_UIC",
+		err_inject_uic_err_codes,
+		ARRAY_SIZE(err_inject_uic_err_codes),
+	},
+	{
+		"ERR_INJECT_DME_ATTR",
+		err_inject_dme_attr_err_codes,
+		ARRAY_SIZE(err_inject_dme_attr_err_codes),
+	},
+	{
+		"ERR_INJECT_QUERY",
+		err_inject_query_err_codes,
+		ARRAY_SIZE(err_inject_query_err_codes),
+	},
+};
+
+static bool inject_fatal_err_tr(struct ufs_hba *hba, u8 ocs_err)
+{
+	int tag;
+
+	tag = find_first_bit(&hba->outstanding_reqs, hba->nutrs);
+	if (tag == hba->nutrs)
+		return 0;
+
+	ufshcd_writel(hba, ~(1 << tag), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
+	(&hba->lrb[tag])->utr_descriptor_ptr->header.dword_2 =
+							cpu_to_be32(ocs_err);
+
+	/* fatal error injected */
+	return 1;
+}
+
+static bool inject_fatal_err_tm(struct ufs_hba *hba, u8 ocs_err)
+{
+	int tag;
+
+	tag = find_first_bit(&hba->outstanding_tasks, hba->nutmrs);
+	if (tag == hba->nutmrs)
+		return 0;
+
+	ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
+	(&hba->utmrdl_base_addr[tag])->header.dword_2 =
+						cpu_to_be32(ocs_err);
+
+	/* fatal error injected */
+	return 1;
+}
+
+static bool inject_cmd_hang_tr(struct ufs_hba *hba)
+{
+	int tag;
+
+	tag = find_first_bit(&hba->outstanding_reqs, hba->nutrs);
+	if (tag == hba->nutrs)
+		return 0;
+
+	__clear_bit(tag, &hba->outstanding_reqs);
+	hba->lrb[tag].cmd = NULL;
+	__clear_bit(tag, &hba->lrb_in_use);
+
+	/* command hang injected */
+	return 1;
+}
+
+static int inject_cmd_hang_tm(struct ufs_hba *hba)
+{
+	int tag;
+
+	tag = find_first_bit(&hba->outstanding_tasks, hba->nutmrs);
+	if (tag == hba->nutmrs)
+		return 0;
+
+	__clear_bit(tag, &hba->outstanding_tasks);
+	__clear_bit(tag, &hba->tm_slots_in_use);
+
+	/* command hang injected */
+	return 1;
+}
+
+static void
+ufsdbg_intr_fail_request(struct ufs_hba *hba, u32 *intr_status)
+{
+	u8 ocs_err;
+
+	dev_info(hba->dev, "%s: fault-inject error: 0x%x\n",
+			__func__, *intr_status);
+
+	switch (*intr_status) {
+	case CONTROLLER_FATAL_ERROR: /* fall through */
+		ocs_err = OCS_FATAL_ERROR;
+		goto set_ocs;
+	case SYSTEM_BUS_FATAL_ERROR:
+		ocs_err = OCS_INVALID_CMD_TABLE_ATTR;
+set_ocs:
+		if (!inject_fatal_err_tr(hba, ocs_err))
+			if (!inject_fatal_err_tm(hba, ocs_err))
+				goto out;
+		break;
+	case INJECT_COMMAND_HANG:
+		if (!inject_cmd_hang_tr(hba))
+			inject_cmd_hang_tm(hba);
+		break;
+	default:
+		BUG();
+		/* some configurations ignore panics caused by BUG() */
+		break;
+	}
+out:
+	return;
+}
+
+static bool
+ufsdbg_find_err_code(enum ufsdbg_err_inject_scenario usecase,
+		     int *ret, u32 *index)
+{
+	struct ufsdbg_err_scenario *err_scen = &err_scen_arr[usecase];
+	u32 err_code_index;
+
+	if (!err_scen->num_err_codes)
+		return false;
+
+	err_code_index = prandom_u32() % err_scen->num_err_codes;
+
+	*index = err_code_index;
+	*ret = err_scen->err_code_arr[err_code_index];
+	return true;
+}
+
+void ufsdbg_error_inject_dispatcher(struct ufs_hba *hba,
+			enum ufsdbg_err_inject_scenario usecase,
+			int success_value, int *ret_value)
+{
+	int opt_ret = 0;
+	u32 err_code_index = 0;
+
+	/* sanity check and verify error scenario bit */
+	if ((unlikely(!hba || !ret_value)) ||
+	    (likely(!(hba->debugfs_files.err_inj_scenario_mask &
+						BIT(usecase)))))
+		goto out;
+
+	if (usecase < 0 || usecase >= ERR_INJECT_MAX_ERR_SCENARIOS) {
+		dev_err(hba->dev, "%s: invalid usecase value (%d)\n",
+			__func__, usecase);
+		goto out;
+	}
+
+	if (!ufsdbg_find_err_code(usecase, &opt_ret, &err_code_index))
+		goto out;
+
+	if (!should_fail(&hba->debugfs_files.fail_attr, 1))
+		goto out;
+
+	/* if an error already occurred/injected */
+	if (*ret_value != success_value)
+		goto out;
+
+	switch (usecase) {
+	case ERR_INJECT_INTR:
+		/* an error already occurred */
+		if (*ret_value & UFSHCD_ERROR_MASK)
+			goto out;
+
+		ufsdbg_intr_fail_request(hba, (u32 *)&opt_ret);
+		/* fall through */
+	case ERR_INJECT_PWR_CHANGE:
+	case ERR_INJECT_UIC:
+	case ERR_INJECT_DME_ATTR:
+	case ERR_INJECT_QUERY:
+		goto should_fail;
+	default:
+		dev_err(hba->dev, "%s: unsupported error scenario\n",
+				__func__);
+		goto out;
+	}
+
+should_fail:
+	*ret_value = opt_ret;
+	err_scen_arr[usecase].num_err_injected++;
+	pr_debug("%s: error code index [%d], error code %d (0x%x) is injected for scenario \"%s\"\n",
+		 __func__, err_code_index, *ret_value, *ret_value,
+		 err_scen_arr[usecase].name);
+out:
+	/**
+	 * here it's guaranteed that ret_value has the correct value,
+	 * whether it was assigned with a new value, or kept its own
+	 * original incoming value
+	 */
+	return;
+}
+
+static int ufsdbg_err_inj_scenario_read(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	enum ufsdbg_err_inject_scenario err_case;
+
+	if (!hba)
+		return -EINVAL;
+
+	seq_printf(file, "%-40s %-17s %-15s\n",
+		   "Error Scenario:", "Bit[#]", "STATUS");
+
+	for (err_case = ERR_INJECT_INTR;
+		err_case < ERR_INJECT_MAX_ERR_SCENARIOS; err_case++) {
+		seq_printf(file, "%-40s 0x%-15lx %-15s\n",
+			   err_scen_arr[err_case].name,
+			   UFS_BIT(err_case),
+			   hba->debugfs_files.err_inj_scenario_mask &
+				UFS_BIT(err_case) ? "ENABLE" : "DISABLE");
+	}
+
+	seq_printf(file, "bitwise of error scenario is 0x%x\n\n",
+		   hba->debugfs_files.err_inj_scenario_mask);
+
+	seq_puts(file, "usage example:\n");
+	seq_puts(file, "echo 0x4 > /sys/kernel/debug/.../err_inj_scenario\n");
+	seq_puts(file, "in order to enable ERR_INJECT_INTR\n");
+
+	return 0;
+}
+
+static
+int ufsdbg_err_inj_scenario_open(struct inode *inode, struct file *file)
+{
+	return single_open(file,
+			ufsdbg_err_inj_scenario_read, inode->i_private);
+}
+
+static ssize_t ufsdbg_err_inj_scenario_write(struct file *file,
+				     const char __user *ubuf, size_t cnt,
+				     loff_t *ppos)
+{
+	struct ufs_hba *hba = file->f_mapping->host->i_private;
+	int ret;
+	int err_scen = 0;
+
+	if (!hba)
+		return -EINVAL;
+
+	ret = kstrtoint_from_user(ubuf, cnt, 0, &err_scen);
+	if (ret) {
+		dev_err(hba->dev, "%s: Invalid argument\n", __func__);
+		return ret;
+	}
+
+	hba->debugfs_files.err_inj_scenario_mask = err_scen;
+
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_err_inj_scenario_ops = {
+	.open		= ufsdbg_err_inj_scenario_open,
+	.read		= seq_read,
+	.write		= ufsdbg_err_inj_scenario_write,
+};
+
+static int ufsdbg_err_inj_stats_read(struct seq_file *file, void *data)
+{
+	enum ufsdbg_err_inject_scenario err;
+
+	seq_printf(file, "%-40s %-20s\n",
+		   "Error Scenario:", "Num of Errors Injected");
+
+	for (err = 0; err < ERR_INJECT_MAX_ERR_SCENARIOS; err++) {
+		seq_printf(file, "%-40s %-20d\n",
+			err_scen_arr[err].name,
+			err_scen_arr[err].num_err_injected);
+	}
+
+	return 0;
+}
+
+static
+int ufsdbg_err_inj_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file,
+			ufsdbg_err_inj_stats_read, inode->i_private);
+}
+
+static ssize_t ufsdbg_err_inj_stats_write(struct file *file,
+				     const char __user *ubuf, size_t cnt,
+				     loff_t *ppos)
+{
+	enum ufsdbg_err_inject_scenario err;
+
+	for (err = 0; err < ERR_INJECT_MAX_ERR_SCENARIOS; err++)
+		err_scen_arr[err].num_err_injected = 0;
+
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_err_inj_stats_ops = {
+	.open		= ufsdbg_err_inj_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_err_inj_stats_write,
+};
+
+static void ufsdbg_setup_fault_injection(struct ufs_hba *hba)
+{
+	struct dentry *fault_dir;
+
+	hba->debugfs_files.fail_attr = fail_default_attr;
+
+	if (fail_request)
+		setup_fault_attr(&hba->debugfs_files.fail_attr, fail_request);
+
+	/* suppress dump stack every time failure is injected */
+	hba->debugfs_files.fail_attr.verbose = 0;
+
+	fault_dir = fault_create_debugfs_attr("inject_fault",
+					hba->debugfs_files.debugfs_root,
+					&hba->debugfs_files.fail_attr);
+
+	if (IS_ERR(fault_dir)) {
+		dev_err(hba->dev, "%s: failed to create debugfs entry for fault injection\n",
+			__func__);
+		return;
+	}
+
+	hba->debugfs_files.err_inj_scenario =
+		debugfs_create_file("err_inj_scenario",
+				   S_IRUGO | S_IWUGO,
+				   hba->debugfs_files.debugfs_root, hba,
+				   &ufsdbg_err_inj_scenario_ops);
+
+	if (!hba->debugfs_files.err_inj_scenario) {
+		dev_err(hba->dev,
+			"%s: Could not create debugfs entry for err_scenario",
+				__func__);
+		goto fail_err_inj_scenario;
+	}
+
+	hba->debugfs_files.err_inj_stats =
+		debugfs_create_file("err_inj_stats", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_err_inj_stats_ops);
+	if (!hba->debugfs_files.err_inj_stats) {
+		dev_err(hba->dev,
+			"%s:  failed create err_inj_stats debugfs entry\n",
+			__func__);
+		goto fail_err_inj_stats;
+	}
+
+	return;
+
+fail_err_inj_stats:
+	debugfs_remove(hba->debugfs_files.err_inj_scenario);
+fail_err_inj_scenario:
+	debugfs_remove_recursive(fault_dir);
+}
+#else
+static void ufsdbg_setup_fault_injection(struct ufs_hba *hba)
+{
+}
+#endif /* CONFIG_UFS_FAULT_INJECTION */
+
+#define BUFF_LINE_SIZE 16 /* Must be a multiplication of sizeof(u32) */
+#define TAB_CHARS 8
+
+static int ufsdbg_tag_stats_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	struct ufs_stats *ufs_stats;
+	int i, j;
+	int max_depth;
+	bool is_tag_empty = true;
+	unsigned long flags;
+	char *sep = " | * | ";
+
+	if (!hba)
+		goto exit;
+
+	ufs_stats = &hba->ufs_stats;
+
+	if (!ufs_stats->enabled) {
+		pr_debug("%s: ufs statistics are disabled\n", __func__);
+		seq_puts(file, "ufs statistics are disabled");
+		goto exit;
+	}
+
+	max_depth = hba->nutrs;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	/* Header */
+	seq_printf(file, " Tag Stat\t\t%s Number of pending reqs upon issue (Q fullness)\n",
+		sep);
+	for (i = 0; i < TAB_CHARS * (TS_NUM_STATS + 4); i++) {
+		seq_puts(file, "-");
+		if (i == (TAB_CHARS * 3 - 1))
+			seq_puts(file, sep);
+	}
+	seq_printf(file,
+		"\n #\tnum uses\t%s\t #\tAll\tRead\tWrite\tUrg.R\tUrg.W\tFlush\n",
+		sep);
+
+	/* values */
+	for (i = 0; i < max_depth; i++) {
+		if (ufs_stats->tag_stats[i][TS_TAG] <= 0 &&
+				ufs_stats->tag_stats[i][TS_READ] <= 0 &&
+				ufs_stats->tag_stats[i][TS_WRITE] <= 0 &&
+				ufs_stats->tag_stats[i][TS_URGENT_READ] <= 0 &&
+				ufs_stats->tag_stats[i][TS_URGENT_WRITE] <= 0 &&
+				ufs_stats->tag_stats[i][TS_FLUSH] <= 0)
+			continue;
+
+		is_tag_empty = false;
+		seq_printf(file, " %d\t ", i);
+		for (j = 0; j < TS_NUM_STATS; j++) {
+			seq_printf(file, "%llu\t", ufs_stats->tag_stats[i][j]);
+			if (j != 0)
+				continue;
+			seq_printf(file, "\t%s\t %d\t%llu\t", sep, i,
+				ufs_stats->tag_stats[i][TS_READ] +
+				ufs_stats->tag_stats[i][TS_WRITE] +
+				ufs_stats->tag_stats[i][TS_URGENT_READ] +
+				ufs_stats->tag_stats[i][TS_URGENT_WRITE] +
+				ufs_stats->tag_stats[i][TS_FLUSH]);
+		}
+		seq_puts(file, "\n");
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (is_tag_empty)
+		pr_debug("%s: All tags statistics are empty", __func__);
+
+exit:
+	return 0;
+}
+
+static int ufsdbg_tag_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_tag_stats_show, inode->i_private);
+}
+
+static ssize_t ufsdbg_tag_stats_write(struct file *filp,
+				      const char __user *ubuf, size_t cnt,
+				       loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	struct ufs_stats *ufs_stats;
+	int val = 0;
+	int ret, bit = 0;
+	unsigned long flags;
+
+	ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+	if (ret) {
+		dev_err(hba->dev, "%s: Invalid argument\n", __func__);
+		return ret;
+	}
+
+	ufs_stats = &hba->ufs_stats;
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	if (!val) {
+		ufs_stats->enabled = false;
+		pr_debug("%s: Disabling UFS tag statistics", __func__);
+	} else {
+		ufs_stats->enabled = true;
+		pr_debug("%s: Enabling & Resetting UFS tag statistics",
+			 __func__);
+		memset(hba->ufs_stats.tag_stats[0], 0,
+			sizeof(**hba->ufs_stats.tag_stats) *
+			TS_NUM_STATS * hba->nutrs);
+
+		/* initialize current queue depth */
+		ufs_stats->q_depth = 0;
+		for_each_set_bit_from(bit, &hba->outstanding_reqs, hba->nutrs)
+			ufs_stats->q_depth++;
+		pr_debug("%s: Enabled UFS tag statistics", __func__);
+	}
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_tag_stats_fops = {
+	.open		= ufsdbg_tag_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_tag_stats_write,
+};
+
+static int ufsdbg_query_stats_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	struct ufs_stats *ufs_stats = &hba->ufs_stats;
+	int i, j;
+	static const char *opcode_name[UPIU_QUERY_OPCODE_MAX] = {
+		"QUERY_OPCODE_NOP:",
+		"QUERY_OPCODE_READ_DESC:",
+		"QUERY_OPCODE_WRITE_DESC:",
+		"QUERY_OPCODE_READ_ATTR:",
+		"QUERY_OPCODE_WRITE_ATTR:",
+		"QUERY_OPCODE_READ_FLAG:",
+		"QUERY_OPCODE_SET_FLAG:",
+		"QUERY_OPCODE_CLEAR_FLAG:",
+		"QUERY_OPCODE_TOGGLE_FLAG:",
+	};
+
+	seq_puts(file, "\n");
+	seq_puts(file, "The following table shows how many TIMES each IDN was sent to device for each QUERY OPCODE:\n");
+	seq_puts(file, "\n");
+
+	for (i = 0; i < UPIU_QUERY_OPCODE_MAX; i++) {
+		seq_printf(file, "%-30s", opcode_name[i]);
+
+		for (j = 0; j < MAX_QUERY_IDN; j++) {
+			/*
+			 * we would like to print only the non-zero data,
+			 * (non-zero number of times that IDN was sent
+			 * to the device per opcode). There is no
+			 * importance to the "table structure" of the output.
+			 */
+			if (ufs_stats->query_stats_arr[i][j])
+				seq_printf(file, "IDN 0x%02X: %d,\t", j,
+					   ufs_stats->query_stats_arr[i][j]);
+		}
+		seq_puts(file, "\n");
+	}
+
+	return 0;
+}
+
+static int ufsdbg_query_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_query_stats_show, inode->i_private);
+}
+
+static ssize_t ufsdbg_query_stats_write(struct file *filp,
+				      const char __user *ubuf, size_t cnt,
+				       loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	struct ufs_stats *ufs_stats = &hba->ufs_stats;
+	int i, j;
+
+	mutex_lock(&hba->dev_cmd.lock);
+
+	for (i = 0; i < UPIU_QUERY_OPCODE_MAX; i++)
+		for (j = 0; j < MAX_QUERY_IDN; j++)
+			ufs_stats->query_stats_arr[i][j] = 0;
+
+	mutex_unlock(&hba->dev_cmd.lock);
+
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_query_stats_fops = {
+	.open		= ufsdbg_query_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_query_stats_write,
+};
+
+static int ufsdbg_err_stats_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	int *err_stats;
+	unsigned long flags;
+	bool error_seen = false;
+
+	if (!hba)
+		goto exit;
+
+	err_stats = hba->ufs_stats.err_stats;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	seq_puts(file, "\n==UFS errors that caused controller reset==\n");
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_HIBERN8_EXIT,
+			"controller reset due to hibern8 exit error:\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_VOPS_SUSPEND,
+			"controller reset due to vops suspend error:\t\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_EH,
+			"controller reset due to error handling:\t\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_CLEAR_PEND_XFER_TM,
+			"controller reset due to clear xfer/tm regs:\t\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_INT_FATAL_ERRORS,
+			"controller reset due to fatal interrupt:\t %d\n",
+			error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_INT_UIC_ERROR,
+			"controller reset due to uic interrupt error:\t %d\n",
+			error_seen);
+
+	if (error_seen)
+		error_seen = false;
+	else
+		seq_puts(file,
+			"so far, no errors that caused controller reset\n\n");
+
+	seq_puts(file, "\n\n==UFS other errors==\n");
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_HIBERN8_ENTER,
+			"hibern8 enter:\t\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_RESUME,
+			"resume error:\t\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_SUSPEND,
+			"suspend error:\t\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_LINKSTARTUP,
+			"linkstartup error:\t\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_POWER_MODE_CHANGE,
+			"power change error:\t %d\n", error_seen);
+
+	UFS_ERR_STATS_PRINT(file, UFS_ERR_TASK_ABORT,
+			"abort callback:\t\t %d\n\n", error_seen);
+
+	if (!error_seen)
+		seq_puts(file,
+		"so far, no other UFS related errors\n\n");
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+exit:
+	return 0;
+}
+
+static int ufsdbg_err_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_err_stats_show, inode->i_private);
+}
+
+static ssize_t ufsdbg_err_stats_write(struct file *filp,
+				      const char __user *ubuf, size_t cnt,
+				       loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	struct ufs_stats *ufs_stats;
+	unsigned long flags;
+
+	ufs_stats = &hba->ufs_stats;
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	pr_debug("%s: Resetting UFS error statistics", __func__);
+	memset(ufs_stats->err_stats, 0, sizeof(hba->ufs_stats.err_stats));
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_err_stats_fops = {
+	.open		= ufsdbg_err_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_err_stats_write,
+};
+
+static int ufshcd_init_statistics(struct ufs_hba *hba)
+{
+	struct ufs_stats *stats = &hba->ufs_stats;
+	int ret = 0;
+	int i;
+
+	stats->enabled = false;
+	stats->tag_stats = kzalloc(sizeof(*stats->tag_stats) * hba->nutrs,
+			GFP_KERNEL);
+	if (!hba->ufs_stats.tag_stats)
+		goto no_mem;
+
+	stats->tag_stats[0] = kzalloc(sizeof(**stats->tag_stats) *
+			TS_NUM_STATS * hba->nutrs, GFP_KERNEL);
+	if (!stats->tag_stats[0])
+		goto no_mem;
+
+	for (i = 1; i < hba->nutrs; i++)
+		stats->tag_stats[i] = &stats->tag_stats[0][i * TS_NUM_STATS];
+
+	memset(stats->err_stats, 0, sizeof(hba->ufs_stats.err_stats));
+
+	goto exit;
+
+no_mem:
+	dev_err(hba->dev, "%s: Unable to allocate UFS tag_stats", __func__);
+	ret = -ENOMEM;
+exit:
+	return ret;
+}
+
+void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset, int num_regs,
+				char *str, void *priv)
+{
+	int i;
+	char linebuf[38];
+	int size = num_regs * sizeof(u32);
+	int lines = size / BUFF_LINE_SIZE +
+			(size % BUFF_LINE_SIZE ? 1 : 0);
+	struct seq_file *file = priv;
+
+	if (!hba || !file) {
+		pr_err("%s called with NULL pointer\n", __func__);
+		return;
+	}
+
+	for (i = 0; i < lines; i++) {
+		hex_dump_to_buffer(hba->mmio_base + offset + i * BUFF_LINE_SIZE,
+				min(BUFF_LINE_SIZE, size), BUFF_LINE_SIZE, 4,
+				linebuf, sizeof(linebuf), false);
+		seq_printf(file, "%s [%x]: %s\n", str, i * BUFF_LINE_SIZE,
+				linebuf);
+		size -= BUFF_LINE_SIZE/sizeof(u32);
+	}
+}
+
+static int ufsdbg_host_regs_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold(hba, false);
+	ufsdbg_pr_buf_to_std(hba, 0, UFSHCI_REG_SPACE_SIZE / sizeof(u32),
+				"host regs", file);
+	ufshcd_release(hba, false);
+	pm_runtime_put_sync(hba->dev);
+	return 0;
+}
+
+static int ufsdbg_host_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_host_regs_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_host_regs_fops = {
+	.open		= ufsdbg_host_regs_open,
+	.read		= seq_read,
+};
+
+static int ufsdbg_dump_device_desc_show(struct seq_file *file, void *data)
+{
+	int err = 0;
+	int buff_len = QUERY_DESC_DEVICE_MAX_SIZE;
+	u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+
+	struct desc_field_offset device_desc_field_name[] = {
+		{"bLength",		0x00, BYTE},
+		{"bDescriptorType",	0x01, BYTE},
+		{"bDevice",		0x02, BYTE},
+		{"bDeviceClass",	0x03, BYTE},
+		{"bDeviceSubClass",	0x04, BYTE},
+		{"bProtocol",		0x05, BYTE},
+		{"bNumberLU",		0x06, BYTE},
+		{"bNumberWLU",		0x07, BYTE},
+		{"bBootEnable",		0x08, BYTE},
+		{"bDescrAccessEn",	0x09, BYTE},
+		{"bInitPowerMode",	0x0A, BYTE},
+		{"bHighPriorityLUN",	0x0B, BYTE},
+		{"bSecureRemovalType",	0x0C, BYTE},
+		{"bSecurityLU",		0x0D, BYTE},
+		{"Reserved",		0x0E, BYTE},
+		{"bInitActiveICCLevel",	0x0F, BYTE},
+		{"wSpecVersion",	0x10, WORD},
+		{"wManufactureDate",	0x12, WORD},
+		{"iManufactureName",	0x14, BYTE},
+		{"iProductName",	0x15, BYTE},
+		{"iSerialNumber",	0x16, BYTE},
+		{"iOemID",		0x17, BYTE},
+		{"wManufactureID",	0x18, WORD},
+		{"bUD0BaseOffset",	0x1A, BYTE},
+		{"bUDConfigPLength",	0x1B, BYTE},
+		{"bDeviceRTTCap",	0x1C, BYTE},
+		{"wPeriodicRTCUpdate",	0x1D, WORD}
+	};
+
+	pm_runtime_get_sync(hba->dev);
+	err = ufshcd_read_device_desc(hba, desc_buf, buff_len);
+	pm_runtime_put_sync(hba->dev);
+
+	if (!err) {
+		int i;
+		struct desc_field_offset *tmp;
+		for (i = 0; i < ARRAY_SIZE(device_desc_field_name); ++i) {
+			tmp = &device_desc_field_name[i];
+
+			if (tmp->width_byte == BYTE) {
+				seq_printf(file,
+					   "Device Descriptor[Byte offset 0x%x]: %s = 0x%x\n",
+					   tmp->offset,
+					   tmp->name,
+					   (u8)desc_buf[tmp->offset]);
+			} else if (tmp->width_byte == WORD) {
+				seq_printf(file,
+					   "Device Descriptor[Byte offset 0x%x]: %s = 0x%x\n",
+					   tmp->offset,
+					   tmp->name,
+					   *(u16 *)&desc_buf[tmp->offset]);
+			} else {
+				seq_printf(file,
+				"Device Descriptor[offset 0x%x]: %s. Wrong Width = %d",
+				tmp->offset, tmp->name, tmp->width_byte);
+			}
+		}
+	} else {
+		seq_printf(file, "Reading Device Descriptor failed. err = %d\n",
+			   err);
+	}
+
+	return err;
+}
+
+static int ufsdbg_show_hba_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+
+	seq_printf(file, "hba->outstanding_tasks = 0x%x\n",
+			(u32)hba->outstanding_tasks);
+	seq_printf(file, "hba->outstanding_reqs = 0x%x\n",
+			(u32)hba->outstanding_reqs);
+
+	seq_printf(file, "hba->capabilities = 0x%x\n", hba->capabilities);
+	seq_printf(file, "hba->nutrs = %d\n", hba->nutrs);
+	seq_printf(file, "hba->nutmrs = %d\n", hba->nutmrs);
+	seq_printf(file, "hba->ufs_version = 0x%x\n", hba->ufs_version);
+	seq_printf(file, "hba->irq = 0x%x\n", hba->irq);
+	seq_printf(file, "hba->auto_bkops_enabled = %d\n",
+			hba->auto_bkops_enabled);
+
+	seq_printf(file, "hba->ufshcd_state = 0x%x\n", hba->ufshcd_state);
+	seq_printf(file, "hba->clk_gating.state = 0x%x\n",
+			hba->clk_gating.state);
+	seq_printf(file, "hba->eh_flags = 0x%x\n", hba->eh_flags);
+	seq_printf(file, "hba->intr_mask = 0x%x\n", hba->intr_mask);
+	seq_printf(file, "hba->ee_ctrl_mask = 0x%x\n", hba->ee_ctrl_mask);
+
+	/* HBA Errors */
+	seq_printf(file, "hba->errors = 0x%x\n", hba->errors);
+	seq_printf(file, "hba->uic_error = 0x%x\n", hba->uic_error);
+	seq_printf(file, "hba->saved_err = 0x%x\n", hba->saved_err);
+	seq_printf(file, "hba->saved_uic_err = 0x%x\n", hba->saved_uic_err);
+
+	return 0;
+}
+
+static int ufsdbg_show_hba_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_show_hba_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_show_hba_fops = {
+	.open		= ufsdbg_show_hba_open,
+	.read		= seq_read,
+};
+
+static int ufsdbg_dump_device_desc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file,
+			   ufsdbg_dump_device_desc_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_dump_device_desc = {
+	.open		= ufsdbg_dump_device_desc_open,
+	.read		= seq_read,
+};
+
+static int ufsdbg_power_mode_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	char *names[] = {
+		"INVALID MODE",
+		"FAST MODE",
+		"SLOW MODE",
+		"INVALID MODE",
+		"FASTAUTO MODE",
+		"SLOWAUTO MODE",
+		"INVALID MODE",
+	};
+
+	/* Print current status */
+	seq_puts(file, "UFS current power mode [RX, TX]:");
+	seq_printf(file, "gear=[%d,%d], lane=[%d,%d], pwr=[%s,%s], rate = %c",
+		 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+		 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+		 names[hba->pwr_info.pwr_rx],
+		 names[hba->pwr_info.pwr_tx],
+		 hba->pwr_info.hs_rate == PA_HS_MODE_B ? 'B' : 'A');
+	seq_puts(file, "\n\n");
+
+	/* Print usage */
+	seq_puts(file,
+		"To change power mode write 'GGLLMM' where:\n"
+		"G - selected gear\n"
+		"L - number of lanes\n"
+		"M - power mode:\n"
+		"\t1 = fast mode\n"
+		"\t2 = slow mode\n"
+		"\t4 = fast-auto mode\n"
+		"\t5 = slow-auto mode\n"
+		"first letter is for RX, second letter is for TX.\n\n");
+
+	return 0;
+}
+
+static bool ufsdbg_power_mode_validate(struct ufs_pa_layer_attr *pwr_mode)
+{
+	if (pwr_mode->gear_rx < UFS_PWM_G1 || pwr_mode->gear_rx > UFS_PWM_G7 ||
+	    pwr_mode->gear_tx < UFS_PWM_G1 || pwr_mode->gear_tx > UFS_PWM_G7 ||
+	    pwr_mode->lane_rx < 1 || pwr_mode->lane_rx > 2 ||
+	    pwr_mode->lane_tx < 1 || pwr_mode->lane_tx > 2 ||
+	    (pwr_mode->pwr_rx != FAST_MODE && pwr_mode->pwr_rx != SLOW_MODE &&
+	     pwr_mode->pwr_rx != FASTAUTO_MODE &&
+	     pwr_mode->pwr_rx != SLOWAUTO_MODE) ||
+	    (pwr_mode->pwr_tx != FAST_MODE && pwr_mode->pwr_tx != SLOW_MODE &&
+	     pwr_mode->pwr_tx != FASTAUTO_MODE &&
+	     pwr_mode->pwr_tx != SLOWAUTO_MODE)) {
+		pr_err("%s: power parameters are not valid\n", __func__);
+		return false;
+	}
+
+	return true;
+}
+
+static int ufsdbg_cfg_pwr_param(struct ufs_hba *hba,
+				struct ufs_pa_layer_attr *new_pwr,
+				struct ufs_pa_layer_attr *final_pwr)
+{
+	int ret = 0;
+	bool is_dev_sup_hs = false;
+	bool is_new_pwr_hs = false;
+	int dev_pwm_max_rx_gear;
+	int dev_pwm_max_tx_gear;
+
+	if (!hba->max_pwr_info.is_valid) {
+		dev_err(hba->dev, "%s: device max power is not valid. can't configure power\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	if (hba->max_pwr_info.info.pwr_rx == FAST_MODE)
+		is_dev_sup_hs = true;
+
+	if (new_pwr->pwr_rx == FAST_MODE || new_pwr->pwr_rx == FASTAUTO_MODE)
+		is_new_pwr_hs = true;
+
+	final_pwr->lane_rx = hba->max_pwr_info.info.lane_rx;
+	final_pwr->lane_tx = hba->max_pwr_info.info.lane_tx;
+
+	/* device doesn't support HS but requested power is HS */
+	if (!is_dev_sup_hs && is_new_pwr_hs) {
+		pr_err("%s: device doesn't support HS. requested power is HS\n",
+			__func__);
+		return -ENOTSUPP;
+	} else if ((is_dev_sup_hs && is_new_pwr_hs) ||
+		   (!is_dev_sup_hs && !is_new_pwr_hs)) {
+		/*
+		 * If device and requested power mode are both HS or both PWM
+		 * then dev_max->gear_xx are the gears to be assign to
+		 * final_pwr->gear_xx
+		 */
+		final_pwr->gear_rx = hba->max_pwr_info.info.gear_rx;
+		final_pwr->gear_tx = hba->max_pwr_info.info.gear_tx;
+	} else if (is_dev_sup_hs && !is_new_pwr_hs) {
+		/*
+		 * If device supports HS but requested power is PWM, then we
+		 * need to find out what is the max gear in PWM the device
+		 * supports
+		 */
+
+		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+			       &dev_pwm_max_rx_gear);
+
+		if (!dev_pwm_max_rx_gear) {
+			pr_err("%s: couldn't get device max pwm rx gear\n",
+				__func__);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+				    &dev_pwm_max_tx_gear);
+
+		if (!dev_pwm_max_tx_gear) {
+			pr_err("%s: couldn't get device max pwm tx gear\n",
+				__func__);
+			ret = -EINVAL;
+			goto out;
+		}
+
+		final_pwr->gear_rx = dev_pwm_max_rx_gear;
+		final_pwr->gear_tx = dev_pwm_max_tx_gear;
+	}
+
+	if ((new_pwr->gear_rx > final_pwr->gear_rx) ||
+	    (new_pwr->gear_tx > final_pwr->gear_tx) ||
+	    (new_pwr->lane_rx > final_pwr->lane_rx) ||
+	    (new_pwr->lane_tx > final_pwr->lane_tx)) {
+		pr_err("%s: (RX,TX) GG,LL: in PWM/HS new pwr [%d%d,%d%d] exceeds device limitation [%d%d,%d%d]\n",
+			__func__,
+			new_pwr->gear_rx, new_pwr->gear_tx,
+			new_pwr->lane_rx, new_pwr->lane_tx,
+			final_pwr->gear_rx, final_pwr->gear_tx,
+			final_pwr->lane_rx, final_pwr->lane_tx);
+		return -ENOTSUPP;
+	}
+
+	final_pwr->gear_rx = new_pwr->gear_rx;
+	final_pwr->gear_tx = new_pwr->gear_tx;
+	final_pwr->lane_rx = new_pwr->lane_rx;
+	final_pwr->lane_tx = new_pwr->lane_tx;
+	final_pwr->pwr_rx = new_pwr->pwr_rx;
+	final_pwr->pwr_tx = new_pwr->pwr_tx;
+	final_pwr->hs_rate = new_pwr->hs_rate;
+
+out:
+	return ret;
+}
+
+static int ufsdbg_config_pwr_mode(struct ufs_hba *hba,
+		struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+	int ret;
+
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_scsi_block_requests(hba);
+	ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
+	if (!ret)
+		ret = ufshcd_change_power_mode(hba, desired_pwr_mode);
+	ufshcd_scsi_unblock_requests(hba);
+	pm_runtime_put_sync(hba->dev);
+
+	return ret;
+}
+
+static ssize_t ufsdbg_power_mode_write(struct file *file,
+				const char __user *ubuf, size_t cnt,
+				loff_t *ppos)
+{
+	struct ufs_hba *hba = file->f_mapping->host->i_private;
+	struct ufs_pa_layer_attr pwr_mode;
+	struct ufs_pa_layer_attr final_pwr_mode;
+	char pwr_mode_str[BUFF_LINE_SIZE] = {0};
+	loff_t buff_pos = 0;
+	int ret;
+	int idx = 0;
+
+	ret = simple_write_to_buffer(pwr_mode_str, BUFF_LINE_SIZE,
+		&buff_pos, ubuf, cnt);
+
+	pwr_mode.gear_rx = pwr_mode_str[idx++] - '0';
+	pwr_mode.gear_tx = pwr_mode_str[idx++] - '0';
+	pwr_mode.lane_rx = pwr_mode_str[idx++] - '0';
+	pwr_mode.lane_tx = pwr_mode_str[idx++] - '0';
+	pwr_mode.pwr_rx = pwr_mode_str[idx++] - '0';
+	pwr_mode.pwr_tx = pwr_mode_str[idx++] - '0';
+
+	/*
+	 * Switching between rates is not currently supported so use the
+	 * current rate.
+	 * TODO: add rate switching if and when it is supported in the future
+	 */
+	pwr_mode.hs_rate = hba->pwr_info.hs_rate;
+
+	/* Validate user input */
+	if (!ufsdbg_power_mode_validate(&pwr_mode))
+		return -EINVAL;
+
+	pr_debug("%s: new power mode requested [RX,TX]: Gear=[%d,%d], Lane=[%d,%d], Mode=[%d,%d]\n",
+		__func__,
+		pwr_mode.gear_rx, pwr_mode.gear_tx, pwr_mode.lane_rx,
+		pwr_mode.lane_tx, pwr_mode.pwr_rx, pwr_mode.pwr_tx);
+
+	ret = ufsdbg_cfg_pwr_param(hba, &pwr_mode, &final_pwr_mode);
+	if (ret) {
+		dev_err(hba->dev,
+			"%s: failed to configure new power parameters, ret = %d\n",
+			__func__, ret);
+		return cnt;
+	}
+
+	ret = ufsdbg_config_pwr_mode(hba, &final_pwr_mode);
+	if (ret == -EBUSY)
+		dev_err(hba->dev,
+			"%s: ufshcd_config_pwr_mode failed: system is busy, try again\n",
+			__func__);
+	else if (ret)
+		dev_err(hba->dev,
+			"%s: ufshcd_config_pwr_mode failed, ret=%d\n",
+			__func__, ret);
+
+	return cnt;
+}
+
+static int ufsdbg_power_mode_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_power_mode_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_power_mode_desc = {
+	.open		= ufsdbg_power_mode_open,
+	.read		= seq_read,
+	.write		= ufsdbg_power_mode_write,
+};
+
+static int ufsdbg_dme_read(void *data, u64 *attr_val, bool peer)
+{
+	int ret;
+	struct ufs_hba *hba = data;
+	u32 attr_id, read_val = 0;
+	int (*read_func)(struct ufs_hba *, u32, u32 *);
+	u32 attr_sel;
+
+	if (!hba)
+		return -EINVAL;
+
+	read_func = peer ? ufshcd_dme_peer_get : ufshcd_dme_get;
+	attr_id = peer ? hba->debugfs_files.dme_peer_attr_id :
+			 hba->debugfs_files.dme_local_attr_id;
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_scsi_block_requests(hba);
+	ret = ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US);
+	if (!ret) {
+		if ((attr_id >= MPHY_RX_ATTR_ADDR_START)
+		    && (attr_id <= MPHY_RX_ATTR_ADDR_END))
+			attr_sel = UIC_ARG_MIB_SEL(attr_id,
+					UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0));
+		else
+			attr_sel = UIC_ARG_MIB(attr_id);
+
+		ret = read_func(hba, attr_sel, &read_val);
+	}
+	ufshcd_scsi_unblock_requests(hba);
+	pm_runtime_put_sync(hba->dev);
+
+	if (!ret)
+		*attr_val = (u64)read_val;
+
+	return ret;
+}
+
+static int ufsdbg_dme_local_set_attr_id(void *data, u64 attr_id)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	hba->debugfs_files.dme_local_attr_id = (u32)attr_id;
+
+	return 0;
+}
+
+static int ufsdbg_dme_local_read(void *data, u64 *attr_val)
+{
+	return ufsdbg_dme_read(data, attr_val, false);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_dme_local_read_ops,
+			ufsdbg_dme_local_read,
+			ufsdbg_dme_local_set_attr_id,
+			"%llu\n");
+
+static int ufsdbg_dme_peer_read(void *data, u64 *attr_val)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+	else
+		return ufsdbg_dme_read(data, attr_val, true);
+}
+
+static int ufsdbg_dme_peer_set_attr_id(void *data, u64 attr_id)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	hba->debugfs_files.dme_peer_attr_id = (u32)attr_id;
+
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_dme_peer_read_ops,
+			ufsdbg_dme_peer_read,
+			ufsdbg_dme_peer_set_attr_id,
+			"%llu\n");
+
+static int ufsdbg_dbg_print_en_read(void *data, u64 *attr_val)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	*attr_val = (u64)hba->ufshcd_dbg_print;
+	return 0;
+}
+
+static int ufsdbg_dbg_print_en_set(void *data, u64 attr_id)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	if (attr_id & ~UFSHCD_DBG_PRINT_ALL)
+		return -EINVAL;
+
+	hba->ufshcd_dbg_print = (u32)attr_id;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_dbg_print_en_ops,
+			ufsdbg_dbg_print_en_read,
+			ufsdbg_dbg_print_en_set,
+			"%llu\n");
+
+static ssize_t ufsdbg_req_stats_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	int val;
+	int ret;
+	unsigned long flags;
+
+	ret = kstrtoint_from_user(ubuf, cnt, 0, &val);
+	if (ret) {
+		dev_err(hba->dev, "%s: Invalid argument\n", __func__);
+		return ret;
+	}
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	ufshcd_init_req_stats(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	return cnt;
+}
+
+static int ufsdbg_req_stats_show(struct seq_file *file, void *data)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)file->private;
+	int i;
+	unsigned long flags;
+
+	/* Header */
+	seq_printf(file, "\t%-10s %-10s %-10s %-10s %-10s %-10s",
+		"All", "Write", "Read", "Read(urg)", "Write(urg)", "Flush");
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	seq_printf(file, "\n%s:\t", "Min");
+	for (i = 0; i < TS_NUM_STATS; i++)
+		seq_printf(file, "%-10llu ", hba->ufs_stats.req_stats[i].min);
+	seq_printf(file, "\n%s:\t", "Max");
+	for (i = 0; i < TS_NUM_STATS; i++)
+		seq_printf(file, "%-10llu ", hba->ufs_stats.req_stats[i].max);
+	seq_printf(file, "\n%s:\t", "Avg.");
+	for (i = 0; i < TS_NUM_STATS; i++)
+		seq_printf(file, "%-10llu ",
+			div64_u64(hba->ufs_stats.req_stats[i].sum,
+				hba->ufs_stats.req_stats[i].count));
+	seq_printf(file, "\n%s:\t", "Count");
+	for (i = 0; i < TS_NUM_STATS; i++)
+		seq_printf(file, "%-10llu ", hba->ufs_stats.req_stats[i].count);
+	seq_puts(file, "\n");
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	return 0;
+}
+
+static int ufsdbg_req_stats_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_req_stats_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_req_stats_desc = {
+	.open		= ufsdbg_req_stats_open,
+	.read		= seq_read,
+	.write		= ufsdbg_req_stats_write,
+};
+
+
+static int ufsdbg_reset_controller_show(struct seq_file *file, void *data)
+{
+	seq_puts(file, "echo 1 > /sys/kernel/debug/.../reset_controller\n");
+	seq_puts(file, "resets the UFS controller and restores its operational state\n\n");
+
+	return 0;
+}
+
+static int ufsdbg_reset_controller_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufsdbg_reset_controller_show,
+						inode->i_private);
+}
+
+static ssize_t ufsdbg_reset_controller_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct ufs_hba *hba = filp->f_mapping->host->i_private;
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	/*
+	 * simulating a dummy error in order to "convince"
+	 * eh_work to actually reset the controller
+	 */
+	hba->saved_err |= INT_FATAL_ERRORS;
+	hba->silence_err_logs = true;
+	schedule_work(&hba->eh_work);
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	return cnt;
+}
+
+static const struct file_operations ufsdbg_reset_controller = {
+	.open		= ufsdbg_reset_controller_open,
+	.read		= seq_read,
+	.write		= ufsdbg_reset_controller_write,
+};
+
+static int ufsdbg_clear_err_state(void *data, u64 val)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	/* clear the error state on any write attempt */
+	hba->debugfs_files.err_occurred = false;
+
+	return 0;
+}
+
+static int ufsdbg_read_err_state(void *data, u64 *val)
+{
+	struct ufs_hba *hba = data;
+
+	if (!hba)
+		return -EINVAL;
+
+	*val = hba->debugfs_files.err_occurred ? 1 : 0;
+
+	return 0;
+}
+
+void ufsdbg_set_err_state(struct ufs_hba *hba)
+{
+	hba->debugfs_files.err_occurred = true;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufsdbg_err_state,
+			ufsdbg_read_err_state,
+			ufsdbg_clear_err_state,
+			"%llu\n");
+
+void ufsdbg_add_debugfs(struct ufs_hba *hba)
+{
+	char root_name[sizeof("ufshcd00")];
+
+	if (!hba) {
+		dev_err(hba->dev, "%s: NULL hba, exiting", __func__);
+		goto err_no_root;
+	}
+
+	snprintf(root_name, ARRAY_SIZE(root_name), "%s%d", UFSHCD,
+		hba->host->host_no);
+
+	hba->debugfs_files.debugfs_root = debugfs_create_dir(root_name, NULL);
+	if (IS_ERR(hba->debugfs_files.debugfs_root))
+		/* Don't complain -- debugfs just isn't enabled */
+		goto err_no_root;
+	if (!hba->debugfs_files.debugfs_root) {
+		/*
+		 * Complain -- debugfs is enabled, but it failed to
+		 * create the directory
+		 */
+		dev_err(hba->dev,
+			"%s: NULL debugfs root directory, exiting", __func__);
+		goto err_no_root;
+	}
+
+	hba->debugfs_files.stats_folder = debugfs_create_dir("stats",
+					hba->debugfs_files.debugfs_root);
+	if (!hba->debugfs_files.stats_folder) {
+		dev_err(hba->dev,
+			"%s: NULL stats_folder, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.tag_stats =
+		debugfs_create_file("tag_stats", S_IRUSR | S_IWUSR,
+					   hba->debugfs_files.stats_folder, hba,
+					   &ufsdbg_tag_stats_fops);
+	if (!hba->debugfs_files.tag_stats) {
+		dev_err(hba->dev, "%s:  NULL tag_stats file, exiting",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.query_stats =
+		debugfs_create_file("query_stats", S_IRUSR | S_IWUSR,
+					   hba->debugfs_files.stats_folder, hba,
+					   &ufsdbg_query_stats_fops);
+	if (!hba->debugfs_files.query_stats) {
+		dev_err(hba->dev, "%s:  NULL query_stats file, exiting",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.err_stats =
+		debugfs_create_file("err_stats", S_IRUSR | S_IWUSR,
+					   hba->debugfs_files.stats_folder, hba,
+					   &ufsdbg_err_stats_fops);
+	if (!hba->debugfs_files.err_stats) {
+		dev_err(hba->dev, "%s:  NULL err_stats file, exiting",
+			__func__);
+		goto err;
+	}
+
+	if (ufshcd_init_statistics(hba)) {
+		dev_err(hba->dev, "%s: Error initializing statistics",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.host_regs = debugfs_create_file("host_regs", S_IRUSR,
+				hba->debugfs_files.debugfs_root, hba,
+				&ufsdbg_host_regs_fops);
+	if (!hba->debugfs_files.host_regs) {
+		dev_err(hba->dev, "%s:  NULL hcd regs file, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.show_hba = debugfs_create_file("show_hba", S_IRUSR,
+				hba->debugfs_files.debugfs_root, hba,
+				&ufsdbg_show_hba_fops);
+	if (!hba->debugfs_files.show_hba) {
+		dev_err(hba->dev, "%s:  NULL hba file, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.dump_dev_desc =
+		debugfs_create_file("dump_device_desc", S_IRUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_dump_device_desc);
+	if (!hba->debugfs_files.dump_dev_desc) {
+		dev_err(hba->dev,
+			"%s:  NULL dump_device_desc file, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.power_mode =
+		debugfs_create_file("power_mode", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_power_mode_desc);
+	if (!hba->debugfs_files.power_mode) {
+		dev_err(hba->dev,
+			"%s:  NULL power_mode_desc file, exiting", __func__);
+		goto err;
+	}
+
+	hba->debugfs_files.dme_local_read =
+		debugfs_create_file("dme_local_read", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_dme_local_read_ops);
+	if (!hba->debugfs_files.dme_local_read) {
+		dev_err(hba->dev,
+			"%s:  failed create dme_local_read debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.dme_peer_read =
+		debugfs_create_file("dme_peer_read", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_dme_peer_read_ops);
+	if (!hba->debugfs_files.dme_peer_read) {
+		dev_err(hba->dev,
+			"%s:  failed create dme_peer_read debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.dbg_print_en =
+		debugfs_create_file("dbg_print_en", S_IRUSR | S_IWUSR,
+				    hba->debugfs_files.debugfs_root, hba,
+				    &ufsdbg_dbg_print_en_ops);
+	if (!hba->debugfs_files.dbg_print_en) {
+		dev_err(hba->dev,
+			"%s:  failed create dbg_print_en debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.req_stats =
+		debugfs_create_file("req_stats", S_IRUSR | S_IWUSR,
+			hba->debugfs_files.stats_folder, hba,
+			&ufsdbg_req_stats_desc);
+	if (!hba->debugfs_files.req_stats) {
+		dev_err(hba->dev,
+			"%s:  failed create req_stats debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.reset_controller =
+		debugfs_create_file("reset_controller", S_IRUSR | S_IWUSR,
+			hba->debugfs_files.debugfs_root, hba,
+			&ufsdbg_reset_controller);
+	if (!hba->debugfs_files.reset_controller) {
+		dev_err(hba->dev,
+			"%s: failed create reset_controller debugfs entry",
+				__func__);
+		goto err;
+	}
+
+	hba->debugfs_files.err_state =
+		debugfs_create_file("err_state", S_IRUSR | S_IWUSR,
+			hba->debugfs_files.debugfs_root, hba,
+			&ufsdbg_err_state);
+	if (!hba->debugfs_files.err_state) {
+		dev_err(hba->dev,
+		     "%s: failed create err_state debugfs entry", __func__);
+		goto err;
+	}
+
+	ufsdbg_setup_fault_injection(hba);
+
+	ufshcd_vops_add_debugfs(hba, hba->debugfs_files.debugfs_root);
+
+	return;
+
+err:
+	debugfs_remove_recursive(hba->debugfs_files.debugfs_root);
+	hba->debugfs_files.debugfs_root = NULL;
+err_no_root:
+	dev_err(hba->dev, "%s: failed to initialize debugfs\n", __func__);
+}
+
+void ufsdbg_remove_debugfs(struct ufs_hba *hba)
+{
+	ufshcd_vops_remove_debugfs(hba);
+	debugfs_remove_recursive(hba->debugfs_files.debugfs_root);
+	kfree(hba->ufs_stats.tag_stats);
+
+}
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
new file mode 100644
index 0000000..13848e8
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-debugfs.h
@@ -0,0 +1,69 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * UFS debugfs - add debugfs interface to the ufshcd.
+ * This is currently used for statistics collection and exporting from the
+ * UFS driver.
+ * This infrastructure can be used for debugging or direct tweaking
+ * of the driver from userspace.
+ *
+ */
+
+#ifndef _UFS_DEBUGFS_H
+#define _UFS_DEBUGFS_H
+
+#include <linux/debugfs.h>
+#include "ufshcd.h"
+
+enum ufsdbg_err_inject_scenario {
+	ERR_INJECT_INTR,
+	ERR_INJECT_PWR_CHANGE,
+	ERR_INJECT_UIC,
+	ERR_INJECT_DME_ATTR,
+	ERR_INJECT_QUERY,
+	ERR_INJECT_MAX_ERR_SCENARIOS,
+};
+
+#ifdef CONFIG_DEBUG_FS
+void ufsdbg_add_debugfs(struct ufs_hba *hba);
+void ufsdbg_remove_debugfs(struct ufs_hba *hba);
+void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset, int num_regs,
+				char *str, void *priv);
+void ufsdbg_set_err_state(struct ufs_hba *hba);
+#else
+static inline void ufsdbg_add_debugfs(struct ufs_hba *hba)
+{
+}
+static inline void ufsdbg_remove_debugfs(struct ufs_hba *hba)
+{
+}
+static inline void ufsdbg_pr_buf_to_std(struct ufs_hba *hba, int offset,
+	int num_regs, char *str, void *priv)
+{
+}
+void ufsdbg_set_err_state(struct ufs_hba *hba)
+{
+}
+#endif
+
+#ifdef CONFIG_UFS_FAULT_INJECTION
+void ufsdbg_error_inject_dispatcher(struct ufs_hba *hba,
+			enum ufsdbg_err_inject_scenario err_scenario,
+			int success_value, int *ret_value);
+#else
+static inline void ufsdbg_error_inject_dispatcher(struct ufs_hba *hba,
+			enum ufsdbg_err_inject_scenario err_scenario,
+			int success_value, int *ret_value)
+{
+}
+#endif
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.c b/drivers/scsi/ufs/ufs-qcom-debugfs.c
new file mode 100644
index 0000000..8532439
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include "ufs-qcom.h"
+#include "ufs-qcom-debugfs.h"
+#include "ufs-debugfs.h"
+
+#define TESTBUS_CFG_BUFF_LINE_SIZE	sizeof("0xXY, 0xXY")
+
+static void ufs_qcom_dbg_remove_debugfs(struct ufs_qcom_host *host);
+
+static int ufs_qcom_dbg_print_en_read(void *data, u64 *attr_val)
+{
+	struct ufs_qcom_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	*attr_val = (u64)host->dbg_print_en;
+	return 0;
+}
+
+static int ufs_qcom_dbg_print_en_set(void *data, u64 attr_id)
+{
+	struct ufs_qcom_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	if (attr_id & ~UFS_QCOM_DBG_PRINT_ALL)
+		return -EINVAL;
+
+	host->dbg_print_en = (u32)attr_id;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_print_en_ops,
+			ufs_qcom_dbg_print_en_read,
+			ufs_qcom_dbg_print_en_set,
+			"%llu\n");
+
+static int ufs_qcom_dbg_testbus_en_read(void *data, u64 *attr_val)
+{
+	struct ufs_qcom_host *host = data;
+	bool enabled;
+
+	if (!host)
+		return -EINVAL;
+
+	enabled = !!(host->dbg_print_en & UFS_QCOM_DBG_PRINT_TEST_BUS_EN);
+	*attr_val = (u64)enabled;
+	return 0;
+}
+
+static int ufs_qcom_dbg_testbus_en_set(void *data, u64 attr_id)
+{
+	struct ufs_qcom_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	if (!!attr_id)
+		host->dbg_print_en |= UFS_QCOM_DBG_PRINT_TEST_BUS_EN;
+	else
+		host->dbg_print_en &= ~UFS_QCOM_DBG_PRINT_TEST_BUS_EN;
+
+	return ufs_qcom_testbus_config(host);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_testbus_en_ops,
+			ufs_qcom_dbg_testbus_en_read,
+			ufs_qcom_dbg_testbus_en_set,
+			"%llu\n");
+
+static int ufs_qcom_dbg_testbus_cfg_show(struct seq_file *file, void *data)
+{
+	struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
+
+	seq_printf(file , "Current configuration: major=%d, minor=%d\n\n",
+			host->testbus.select_major, host->testbus.select_minor);
+
+	/* Print usage */
+	seq_puts(file,
+		"To change the test-bus configuration, write 'MAJ,MIN' where:\n"
+		"MAJ - major select\n"
+		"MIN - minor select\n\n");
+	return 0;
+}
+
+static ssize_t ufs_qcom_dbg_testbus_cfg_write(struct file *file,
+				const char __user *ubuf, size_t cnt,
+				loff_t *ppos)
+{
+	struct ufs_qcom_host *host = file->f_mapping->host->i_private;
+	char configuration[TESTBUS_CFG_BUFF_LINE_SIZE] = {0};
+	loff_t buff_pos = 0;
+	char *comma;
+	int ret = 0;
+	int major;
+	int minor;
+
+	ret = simple_write_to_buffer(configuration, TESTBUS_CFG_BUFF_LINE_SIZE,
+		&buff_pos, ubuf, cnt);
+	if (ret < 0) {
+		dev_err(host->hba->dev, "%s: failed to read user data\n",
+			__func__);
+		goto out;
+	}
+
+	comma = strnchr(configuration, TESTBUS_CFG_BUFF_LINE_SIZE, ',');
+	if (!comma || comma == configuration) {
+		dev_err(host->hba->dev,
+			"%s: error in configuration of testbus\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (sscanf(configuration, "%i,%i", &major, &minor) != 2) {
+		dev_err(host->hba->dev,
+			"%s: couldn't parse input to 2 numeric values\n",
+			__func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	host->testbus.select_major = (u8)major;
+	host->testbus.select_minor = (u8)minor;
+
+	/*
+	 * Sanity check of the {major, minor} tuple is done in the
+	 * config function
+	 */
+	ret = ufs_qcom_testbus_config(host);
+	if (!ret)
+		dev_dbg(host->hba->dev,
+				"%s: New configuration: major=%d, minor=%d\n",
+				__func__, host->testbus.select_major,
+				host->testbus.select_minor);
+
+out:
+	return ret ? ret : cnt;
+}
+
+static int ufs_qcom_dbg_testbus_cfg_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ufs_qcom_dbg_testbus_cfg_show,
+				inode->i_private);
+}
+
+static const struct file_operations ufs_qcom_dbg_testbus_cfg_desc = {
+	.open		= ufs_qcom_dbg_testbus_cfg_open,
+	.read		= seq_read,
+	.write		= ufs_qcom_dbg_testbus_cfg_write,
+};
+
+static int ufs_qcom_dbg_testbus_bus_read(void *data, u64 *attr_val)
+{
+	struct ufs_qcom_host *host = data;
+
+	if (!host)
+		return -EINVAL;
+
+	pm_runtime_get_sync(host->hba->dev);
+	ufshcd_hold(host->hba, false);
+	*attr_val = (u64)ufshcd_readl(host->hba, UFS_TEST_BUS);
+	ufshcd_release(host->hba, false);
+	pm_runtime_put_sync(host->hba->dev);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(ufs_qcom_dbg_testbus_bus_ops,
+			ufs_qcom_dbg_testbus_bus_read,
+			NULL,
+			"%llu\n");
+
+static int ufs_qcom_dbg_dbg_regs_show(struct seq_file *file, void *data)
+{
+	struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
+	bool dbg_print_reg = !!(host->dbg_print_en &
+				UFS_QCOM_DBG_PRINT_REGS_EN);
+
+	pm_runtime_get_sync(host->hba->dev);
+	ufshcd_hold(host->hba, false);
+
+	/* Temporarily override the debug print enable */
+	host->dbg_print_en |= UFS_QCOM_DBG_PRINT_REGS_EN;
+	ufs_qcom_print_hw_debug_reg_all(host->hba, file, ufsdbg_pr_buf_to_std);
+	/* Restore previous debug print enable value */
+	if (!dbg_print_reg)
+		host->dbg_print_en &= ~UFS_QCOM_DBG_PRINT_REGS_EN;
+
+	ufshcd_release(host->hba, false);
+	pm_runtime_put_sync(host->hba->dev);
+
+	return 0;
+}
+
+static int ufs_qcom_dbg_dbg_regs_open(struct inode *inode,
+					      struct file *file)
+{
+	return single_open(file, ufs_qcom_dbg_dbg_regs_show,
+				inode->i_private);
+}
+
+static const struct file_operations ufs_qcom_dbg_dbg_regs_desc = {
+	.open		= ufs_qcom_dbg_dbg_regs_open,
+	.read		= seq_read,
+};
+
+static int ufs_qcom_dbg_pm_qos_show(struct seq_file *file, void *data)
+{
+	struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(host->hba->host->host_lock, flags);
+
+	seq_printf(file, "enabled: %d\n", host->pm_qos.is_enabled);
+	for (i = 0; i < host->pm_qos.num_groups && host->pm_qos.groups; i++)
+		seq_printf(file,
+			"CPU Group #%d(mask=0x%lx): active_reqs=%d, state=%d, latency=%d\n",
+			i, host->pm_qos.groups[i].mask.bits[0],
+			host->pm_qos.groups[i].active_reqs,
+			host->pm_qos.groups[i].state,
+			host->pm_qos.groups[i].latency_us);
+
+	spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+
+	return 0;
+}
+
+static int ufs_qcom_dbg_pm_qos_open(struct inode *inode,
+					      struct file *file)
+{
+	return single_open(file, ufs_qcom_dbg_pm_qos_show, inode->i_private);
+}
+
+static const struct file_operations ufs_qcom_dbg_pm_qos_desc = {
+	.open		= ufs_qcom_dbg_pm_qos_open,
+	.read		= seq_read,
+};
+
+void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root)
+{
+	struct ufs_qcom_host *host;
+
+	if (!hba || !hba->priv) {
+		pr_err("%s: NULL host, exiting\n", __func__);
+		return;
+	}
+
+	host = hba->priv;
+	host->debugfs_files.debugfs_root = debugfs_create_dir("qcom", root);
+	if (IS_ERR(host->debugfs_files.debugfs_root))
+		/* Don't complain -- debugfs just isn't enabled */
+		goto err_no_root;
+	if (!host->debugfs_files.debugfs_root) {
+		/*
+		 * Complain -- debugfs is enabled, but it failed to
+		 * create the directory
+		 */
+		dev_err(host->hba->dev,
+			"%s: NULL debugfs root directory, exiting", __func__);
+		goto err_no_root;
+	}
+
+	host->debugfs_files.dbg_print_en =
+		debugfs_create_file("dbg_print_en", S_IRUSR | S_IWUSR,
+				    host->debugfs_files.debugfs_root, host,
+				    &ufs_qcom_dbg_print_en_ops);
+	if (!host->debugfs_files.dbg_print_en) {
+		dev_err(host->hba->dev,
+			"%s: failed to create dbg_print_en debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.testbus = debugfs_create_dir("testbus",
+					host->debugfs_files.debugfs_root);
+	if (!host->debugfs_files.testbus) {
+		dev_err(host->hba->dev,
+			"%s: failed create testbus directory\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.testbus_en =
+		debugfs_create_file("enable", S_IRUSR | S_IWUSR,
+				    host->debugfs_files.testbus, host,
+				    &ufs_qcom_dbg_testbus_en_ops);
+	if (!host->debugfs_files.testbus_en) {
+		dev_err(host->hba->dev,
+			"%s: failed create testbus_en debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.testbus_cfg =
+		debugfs_create_file("configuration", S_IRUSR | S_IWUSR,
+				    host->debugfs_files.testbus, host,
+				    &ufs_qcom_dbg_testbus_cfg_desc);
+	if (!host->debugfs_files.testbus_cfg) {
+		dev_err(host->hba->dev,
+			"%s: failed create testbus_cfg debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.testbus_bus =
+		debugfs_create_file("TEST_BUS", S_IRUSR,
+				    host->debugfs_files.testbus, host,
+				    &ufs_qcom_dbg_testbus_bus_ops);
+	if (!host->debugfs_files.testbus_bus) {
+		dev_err(host->hba->dev,
+			"%s: failed create testbus_bus debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.dbg_regs =
+		debugfs_create_file("debug-regs", S_IRUSR,
+				    host->debugfs_files.debugfs_root, host,
+				    &ufs_qcom_dbg_dbg_regs_desc);
+	if (!host->debugfs_files.dbg_regs) {
+		dev_err(host->hba->dev,
+			"%s: failed create dbg_regs debugfs entry\n",
+			__func__);
+		goto err;
+	}
+
+	host->debugfs_files.pm_qos =
+		debugfs_create_file("pm_qos", S_IRUSR,
+				host->debugfs_files.debugfs_root, host,
+				&ufs_qcom_dbg_pm_qos_desc);
+		if (!host->debugfs_files.dbg_regs) {
+			dev_err(host->hba->dev,
+				"%s: failed create dbg_regs debugfs entry\n",
+				__func__);
+			goto err;
+		}
+
+	return;
+
+err:
+	ufs_qcom_dbg_remove_debugfs(host);
+err_no_root:
+	dev_err(host->hba->dev, "%s: failed to initialize debugfs\n", __func__);
+}
+
+static void ufs_qcom_dbg_remove_debugfs(struct ufs_qcom_host *host)
+{
+	debugfs_remove_recursive(host->debugfs_files.debugfs_root);
+	host->debugfs_files.debugfs_root = NULL;
+}
diff --git a/drivers/scsi/ufs/ufs-qcom-debugfs.h b/drivers/scsi/ufs/ufs-qcom-debugfs.h
new file mode 100644
index 0000000..b693bfa
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom-debugfs.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2015, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef QCOM_DEBUGFS_H_
+#define QCOM_DEBUGFS_H_
+
+#include "ufshcd.h"
+
+#ifdef CONFIG_DEBUG_FS
+void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root);
+#endif
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
new file mode 100644
index 0000000..1ba4f2b
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -0,0 +1,655 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/blkdev.h>
+#include <crypto/ice.h>
+
+#include "ufs-qcom-ice.h"
+#include "ufs-qcom-debugfs.h"
+#include "ufshcd.h"
+
+#define UFS_QCOM_CRYPTO_LABEL "ufs-qcom-crypto"
+/* Timeout waiting for ICE initialization, that requires TZ access */
+#define UFS_QCOM_ICE_COMPLETION_TIMEOUT_MS 500
+
+#define UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN	0
+
+static void ufs_qcom_ice_dump_regs(struct ufs_qcom_host *qcom_host, int offset,
+					int len, char *prefix)
+{
+	print_hex_dump(KERN_ERR, prefix,
+			len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
+			16, 4, qcom_host->hba->mmio_base + offset, len * 4,
+			false);
+}
+
+void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host)
+{
+	int i;
+
+	if (!(qcom_host->dbg_print_en & UFS_QCOM_DBG_PRINT_ICE_REGS_EN))
+		return;
+
+	ufs_qcom_ice_dump_regs(qcom_host, REG_UFS_QCOM_ICE_CFG, 1,
+			"REG_UFS_QCOM_ICE_CFG ");
+	for (i = 0; i < NUM_QCOM_ICE_CTRL_INFO_n_REGS; i++) {
+		pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_1_%d = 0x%08X\n", i,
+			ufshcd_readl(qcom_host->hba,
+				(REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * i)));
+
+		pr_err("REG_UFS_QCOM_ICE_CTRL_INFO_2_%d = 0x%08X\n", i,
+			ufshcd_readl(qcom_host->hba,
+				(REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * i)));
+	}
+
+	if (qcom_host->ice.pdev && qcom_host->ice.vops &&
+	    qcom_host->ice.vops->debug)
+		qcom_host->ice.vops->debug(qcom_host->ice.pdev);
+}
+
+static void ufs_qcom_ice_error_cb(void *host_ctrl, u32 error)
+{
+	struct ufs_qcom_host *qcom_host = (struct ufs_qcom_host *)host_ctrl;
+
+	dev_err(qcom_host->hba->dev, "%s: Error in ice operation 0x%x",
+		__func__, error);
+
+	if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE)
+		qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED;
+}
+
+static struct platform_device *ufs_qcom_ice_get_pdevice(struct device *ufs_dev)
+{
+	struct device_node *node;
+	struct platform_device *ice_pdev = NULL;
+
+	node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0);
+
+	if (!node) {
+		dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n",
+			__func__);
+		goto out;
+	}
+
+	ice_pdev = qcom_ice_get_pdevice(node);
+out:
+	return ice_pdev;
+}
+
+static
+struct qcom_ice_variant_ops *ufs_qcom_ice_get_vops(struct device *ufs_dev)
+{
+	struct qcom_ice_variant_ops *ice_vops = NULL;
+	struct device_node *node;
+
+	node = of_parse_phandle(ufs_dev->of_node, UFS_QCOM_CRYPTO_LABEL, 0);
+
+	if (!node) {
+		dev_err(ufs_dev, "%s: ufs-qcom-crypto property not specified\n",
+			__func__);
+		goto out;
+	}
+
+	ice_vops = qcom_ice_get_variant_ops(node);
+
+	if (!ice_vops)
+		dev_err(ufs_dev, "%s: invalid ice_vops\n", __func__);
+
+	of_node_put(node);
+out:
+	return ice_vops;
+}
+
+/**
+ * ufs_qcom_ice_get_dev() - sets pointers to ICE data structs in UFS QCom host
+ * @qcom_host:	Pointer to a UFS QCom internal host structure.
+ *
+ * Sets ICE platform device pointer and ICE vops structure
+ * corresponding to the current UFS device.
+ *
+ * Return: -EINVAL in-case of invalid input parameters:
+ *  qcom_host, qcom_host->hba or qcom_host->hba->dev
+ *         -ENODEV in-case ICE device is not required
+ *         -EPROBE_DEFER in-case ICE is required and hasn't been probed yet
+ *         0 otherwise
+ */
+int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
+{
+	struct device *ufs_dev;
+	int err = 0;
+
+	if (!qcom_host || !qcom_host->hba || !qcom_host->hba->dev) {
+		pr_err("%s: invalid qcom_host %p or qcom_host->hba or qcom_host->hba->dev\n",
+			__func__, qcom_host);
+		err = -EINVAL;
+		goto out;
+	}
+
+	ufs_dev = qcom_host->hba->dev;
+
+	qcom_host->ice.vops  = ufs_qcom_ice_get_vops(ufs_dev);
+	qcom_host->ice.pdev = ufs_qcom_ice_get_pdevice(ufs_dev);
+
+	if (qcom_host->ice.pdev == ERR_PTR(-EPROBE_DEFER)) {
+		dev_err(ufs_dev, "%s: ICE device not probed yet\n",
+			__func__);
+		qcom_host->ice.pdev = NULL;
+		qcom_host->ice.vops = NULL;
+		err = -EPROBE_DEFER;
+		goto out;
+	}
+
+	if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
+		dev_err(ufs_dev, "%s: invalid platform device %p or vops %p\n",
+			__func__, qcom_host->ice.pdev, qcom_host->ice.vops);
+		qcom_host->ice.pdev = NULL;
+		qcom_host->ice.vops = NULL;
+		err = -ENODEV;
+		goto out;
+	}
+
+	qcom_host->ice.state = UFS_QCOM_ICE_STATE_DISABLED;
+
+out:
+	return err;
+}
+
+static void ufs_qcom_ice_cfg_work(struct work_struct *work)
+{
+	struct ice_data_setting ice_set;
+	struct ufs_qcom_host *qcom_host =
+		container_of(work, struct ufs_qcom_host, ice_cfg_work);
+
+	if (!qcom_host->ice.vops->config_start || !qcom_host->req_pending)
+		return;
+
+	/*
+	 * config_start is called again as previous attempt returned -EAGAIN,
+	 * this call shall now take care of the necessary key setup.
+	 * 'ice_set' will not actually be used, instead the next call to
+	 * config_start() for this request, in the normal call flow, will
+	 * succeed as the key has now been setup.
+	 */
+	qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
+		qcom_host->req_pending, &ice_set, false);
+
+	/*
+	 * Resume with requests processing. We assume config_start has been
+	 * successful, but even if it wasn't we still must resume in order to
+	 * allow for the request to be retried.
+	 */
+	ufshcd_scsi_unblock_requests(qcom_host->hba);
+}
+
+/**
+ * ufs_qcom_ice_init() - initializes the ICE-UFS interface and ICE device
+ * @qcom_host:	Pointer to a UFS QCom internal host structure.
+ *		qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ *		be valid pointers.
+ *
+ * Return: -EINVAL in-case of an error
+ *         0 otherwise
+ */
+int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
+{
+	struct device *ufs_dev = qcom_host->hba->dev;
+	int err;
+
+	err = qcom_host->ice.vops->init(qcom_host->ice.pdev,
+				qcom_host,
+				ufs_qcom_ice_error_cb);
+	if (err) {
+		dev_err(ufs_dev, "%s: ice init failed. err = %d\n",
+			__func__, err);
+		goto out;
+	} else {
+		qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE;
+	}
+
+	qcom_host->dbg_print_en |= UFS_QCOM_ICE_DEFAULT_DBG_PRINT_EN;
+	INIT_WORK(&qcom_host->ice_cfg_work, ufs_qcom_ice_cfg_work);
+
+out:
+	return err;
+}
+
+static inline bool ufs_qcom_is_data_cmd(char cmd_op, bool is_write)
+{
+	if (is_write) {
+		if (cmd_op == WRITE_6 || cmd_op == WRITE_10 ||
+		    cmd_op == WRITE_16)
+			return true;
+	} else {
+		if (cmd_op == READ_6 || cmd_op == READ_10 ||
+		    cmd_op == READ_16)
+			return true;
+	}
+
+	return false;
+}
+
+int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
+		struct scsi_cmnd *cmd, u8 *cc_index, bool *enable)
+{
+	struct ice_data_setting ice_set;
+	char cmd_op = cmd->cmnd[0];
+	int err;
+
+	if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
+		dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n",
+			__func__);
+		return 0;
+	}
+
+	if (qcom_host->ice.vops->config_start) {
+		memset(&ice_set, 0, sizeof(ice_set));
+		err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
+			cmd->request, &ice_set, true);
+		if (err) {
+			/*
+			 * config_start() returns -EAGAIN when a key slot is
+			 * available but still not configured. As configuration
+			 * requires a non-atomic context, this means we should
+			 * call the function again from the worker thread to do
+			 * the configuration. For this request the error will
+			 * propagate so it will be re-queued and until the
+			 * configuration is is completed we block further
+			 * request processing.
+			 */
+			if (err == -EAGAIN) {
+				dev_dbg(qcom_host->hba->dev,
+					"%s: scheduling task for ice setup\n",
+					__func__);
+				qcom_host->req_pending = cmd->request;
+				if (schedule_work(&qcom_host->ice_cfg_work))
+					ufshcd_scsi_block_requests(
+						qcom_host->hba);
+			} else {
+				dev_err(qcom_host->hba->dev,
+					"%s: error in ice_vops->config %d\n",
+					__func__, err);
+			}
+
+			return err;
+		}
+
+		if (ufs_qcom_is_data_cmd(cmd_op, true))
+			*enable = !ice_set.encr_bypass;
+		else if (ufs_qcom_is_data_cmd(cmd_op, false))
+			*enable = !ice_set.decr_bypass;
+
+		if (ice_set.crypto_data.key_index >= 0)
+			*cc_index = (u8)ice_set.crypto_data.key_index;
+	}
+	return 0;
+}
+
+/**
+ * ufs_qcom_ice_cfg_start() - starts configuring UFS's ICE registers
+ *							  for an ICE transaction
+ * @qcom_host:	Pointer to a UFS QCom internal host structure.
+ *		qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ *		be valid pointers.
+ * @cmd:	Pointer to a valid scsi command. cmd->request should also be
+ *              a valid pointer.
+ *
+ * Return: -EINVAL in-case of an error
+ *         0 otherwise
+ */
+int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
+		struct scsi_cmnd *cmd)
+{
+	struct device *dev = qcom_host->hba->dev;
+	int err = 0;
+	struct ice_data_setting ice_set;
+	unsigned int slot = 0;
+	sector_t lba = 0;
+	unsigned int ctrl_info_val = 0;
+	unsigned int bypass = 0;
+	struct request *req;
+	char cmd_op;
+
+	if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
+		dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+		goto out;
+	}
+
+	if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
+		dev_err(dev, "%s: ice state (%d) is not active\n",
+			__func__, qcom_host->ice.state);
+		return -EINVAL;
+	}
+
+	if (qcom_host->hw_ver.major == 0x3) {
+		/* nothing to do here for version 0x3, exit silently */
+		return 0;
+	}
+
+	req = cmd->request;
+	if (req->bio)
+		lba = req->bio->bi_iter.bi_sector;
+
+	slot = req->tag;
+	if (slot < 0 || slot > qcom_host->hba->nutrs) {
+		dev_err(dev, "%s: slot (%d) is out of boundaries (0...%d)\n",
+			__func__, slot, qcom_host->hba->nutrs);
+		return -EINVAL;
+	}
+
+	memset(&ice_set, 0, sizeof(ice_set));
+	if (qcom_host->ice.vops->config_start) {
+		err = qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
+							req, &ice_set, true);
+		if (err) {
+			/*
+			 * config_start() returns -EAGAIN when a key slot is
+			 * available but still not configured. As configuration
+			 * requires a non-atomic context, this means we should
+			 * call the function again from the worker thread to do
+			 * the configuration. For this request the error will
+			 * propagate so it will be re-queued and until the
+			 * configuration is is completed we block further
+			 * request processing.
+			 */
+			if (err == -EAGAIN) {
+				qcom_host->req_pending = req;
+				if (schedule_work(&qcom_host->ice_cfg_work))
+					ufshcd_scsi_block_requests(
+							qcom_host->hba);
+			}
+			goto out;
+		}
+	}
+
+	cmd_op = cmd->cmnd[0];
+
+#define UFS_QCOM_DIR_WRITE	true
+#define UFS_QCOM_DIR_READ	false
+	/* if non data command, bypass shall be enabled */
+	if (!ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE) &&
+	    !ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ))
+		bypass = UFS_QCOM_ICE_ENABLE_BYPASS;
+	/* if writing data command */
+	else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_WRITE))
+		bypass = ice_set.encr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
+						UFS_QCOM_ICE_DISABLE_BYPASS;
+	/* if reading data command */
+	else if (ufs_qcom_is_data_cmd(cmd_op, UFS_QCOM_DIR_READ))
+		bypass = ice_set.decr_bypass ? UFS_QCOM_ICE_ENABLE_BYPASS :
+						UFS_QCOM_ICE_DISABLE_BYPASS;
+
+	/* Configure ICE index */
+	ctrl_info_val =
+		(ice_set.crypto_data.key_index &
+		 MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX)
+		 << OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX;
+
+	/* Configure data unit size of transfer request */
+	ctrl_info_val |=
+		(UFS_QCOM_ICE_TR_DATA_UNIT_4_KB &
+		 MASK_UFS_QCOM_ICE_CTRL_INFO_CDU)
+		 << OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU;
+
+	/* Configure ICE bypass mode */
+	ctrl_info_val |=
+		(bypass & MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS)
+		 << OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS;
+
+	if (qcom_host->hw_ver.major == 0x1) {
+		ufshcd_writel(qcom_host->hba, lba,
+			     (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 8 * slot));
+
+		ufshcd_writel(qcom_host->hba, ctrl_info_val,
+			     (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 8 * slot));
+	}
+	if (qcom_host->hw_ver.major == 0x2) {
+		ufshcd_writel(qcom_host->hba, (lba & 0xFFFFFFFF),
+			     (REG_UFS_QCOM_ICE_CTRL_INFO_1_n + 16 * slot));
+
+		ufshcd_writel(qcom_host->hba, ((lba >> 32) & 0xFFFFFFFF),
+			     (REG_UFS_QCOM_ICE_CTRL_INFO_2_n + 16 * slot));
+
+		ufshcd_writel(qcom_host->hba, ctrl_info_val,
+			     (REG_UFS_QCOM_ICE_CTRL_INFO_3_n + 16 * slot));
+	}
+
+	/*
+	 * Ensure UFS-ICE registers are being configured
+	 * before next operation, otherwise UFS Host Controller might
+	 * set get errors
+	 */
+	mb();
+out:
+	return err;
+}
+
+/**
+ * ufs_qcom_ice_cfg_end() - finishes configuring UFS's ICE registers
+ *							for an ICE transaction
+ * @qcom_host:	Pointer to a UFS QCom internal host structure.
+ *				qcom_host, qcom_host->hba and
+ *				qcom_host->hba->dev should all
+ *				be valid pointers.
+ * @cmd:	Pointer to a valid scsi command. cmd->request should also be
+ *              a valid pointer.
+ *
+ * Return: -EINVAL in-case of an error
+ *         0 otherwise
+ */
+int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host, struct request *req)
+{
+	int err = 0;
+	struct device *dev = qcom_host->hba->dev;
+
+	if (qcom_host->ice.vops->config_end) {
+		err = qcom_host->ice.vops->config_end(req);
+		if (err) {
+			dev_err(dev, "%s: error in ice_vops->config_end %d\n",
+				__func__, err);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ * ufs_qcom_ice_reset() - resets UFS-ICE interface and ICE device
+ * @qcom_host:	Pointer to a UFS QCom internal host structure.
+ *		qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ *		be valid pointers.
+ *
+ * Return: -EINVAL in-case of an error
+ *         0 otherwise
+ */
+int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host)
+{
+	struct device *dev = qcom_host->hba->dev;
+	int err = 0;
+
+	if (!qcom_host->ice.pdev) {
+		dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+		goto out;
+	}
+
+	if (!qcom_host->ice.vops) {
+		dev_err(dev, "%s: invalid ice_vops\n", __func__);
+		return -EINVAL;
+	}
+
+	if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE)
+		goto out;
+
+	if (qcom_host->ice.vops->reset) {
+		err = qcom_host->ice.vops->reset(qcom_host->ice.pdev);
+		if (err) {
+			dev_err(dev, "%s: ice_vops->reset failed. err %d\n",
+				__func__, err);
+			goto out;
+		}
+	}
+
+	if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
+		dev_err(qcom_host->hba->dev,
+			"%s: error. ice.state (%d) is not in active state\n",
+			__func__, qcom_host->ice.state);
+		err = -EINVAL;
+	}
+
+out:
+	return err;
+}
+
+/**
+ * ufs_qcom_ice_resume() - resumes UFS-ICE interface and ICE device from power
+ * collapse
+ * @qcom_host:	Pointer to a UFS QCom internal host structure.
+ *		qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ *		be valid pointers.
+ *
+ * Return: -EINVAL in-case of an error
+ *         0 otherwise
+ */
+int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
+{
+	struct device *dev = qcom_host->hba->dev;
+	int err = 0;
+
+	if (!qcom_host->ice.pdev) {
+		dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+		goto out;
+	}
+
+	if (qcom_host->ice.state !=
+			UFS_QCOM_ICE_STATE_SUSPENDED) {
+		goto out;
+	}
+
+	if (!qcom_host->ice.vops) {
+		dev_err(dev, "%s: invalid ice_vops\n", __func__);
+		return -EINVAL;
+	}
+
+	if (qcom_host->ice.vops->resume) {
+		err = qcom_host->ice.vops->resume(qcom_host->ice.pdev);
+		if (err) {
+			dev_err(dev, "%s: ice_vops->resume failed. err %d\n",
+				__func__, err);
+			return err;
+		}
+	}
+	qcom_host->ice.state = UFS_QCOM_ICE_STATE_ACTIVE;
+out:
+	return err;
+}
+
+/**
+ * ufs_qcom_ice_suspend() - suspends UFS-ICE interface and ICE device
+ * @qcom_host:	Pointer to a UFS QCom internal host structure.
+ *		qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ *		be valid pointers.
+ *
+ * Return: -EINVAL in-case of an error
+ *         0 otherwise
+ */
+int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host)
+{
+	struct device *dev = qcom_host->hba->dev;
+	int err = 0;
+
+	if (!qcom_host->ice.pdev) {
+		dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+		goto out;
+	}
+
+	if (qcom_host->ice.vops->suspend) {
+		err = qcom_host->ice.vops->suspend(qcom_host->ice.pdev);
+		if (err) {
+			dev_err(qcom_host->hba->dev,
+				"%s: ice_vops->suspend failed. err %d\n",
+				__func__, err);
+			return -EINVAL;
+		}
+	}
+
+	if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_ACTIVE) {
+		qcom_host->ice.state = UFS_QCOM_ICE_STATE_SUSPENDED;
+	} else if (qcom_host->ice.state == UFS_QCOM_ICE_STATE_DISABLED) {
+		dev_err(qcom_host->hba->dev,
+				"%s: ice state is invalid: disabled\n",
+				__func__);
+		err = -EINVAL;
+	}
+
+out:
+	return err;
+}
+
+/**
+ * ufs_qcom_ice_get_status() - returns the status of an ICE transaction
+ * @qcom_host:	Pointer to a UFS QCom internal host structure.
+ *		qcom_host, qcom_host->hba and qcom_host->hba->dev should all
+ *		be valid pointers.
+ * @ice_status:	Pointer to a valid output parameter.
+ *		< 0 in case of ICE transaction failure.
+ *		0 otherwise.
+ *
+ * Return: -EINVAL in-case of an error
+ *         0 otherwise
+ */
+int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status)
+{
+	struct device *dev = NULL;
+	int err = 0;
+	int stat = -EINVAL;
+
+	*ice_status = 0;
+
+	dev = qcom_host->hba->dev;
+	if (!dev) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (!qcom_host->ice.pdev) {
+		dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
+		goto out;
+	}
+
+	if (qcom_host->ice.state != UFS_QCOM_ICE_STATE_ACTIVE) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (!qcom_host->ice.vops) {
+		dev_err(dev, "%s: invalid ice_vops\n", __func__);
+		return -EINVAL;
+	}
+
+	if (qcom_host->ice.vops->status) {
+		stat = qcom_host->ice.vops->status(qcom_host->ice.pdev);
+		if (stat < 0) {
+			dev_err(dev, "%s: ice_vops->status failed. stat %d\n",
+				__func__, stat);
+			err = -EINVAL;
+			goto out;
+		}
+
+		*ice_status = stat;
+	}
+
+out:
+	return err;
+}
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.h b/drivers/scsi/ufs/ufs-qcom-ice.h
new file mode 100644
index 0000000..eb02916
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-qcom-ice.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UFS_QCOM_ICE_H_
+#define _UFS_QCOM_ICE_H_
+
+#include <scsi/scsi_cmnd.h>
+
+#include "ufs-qcom.h"
+
+/*
+ * UFS host controller ICE registers. There are n [0..31]
+ * of each of these registers
+ */
+enum {
+	REG_UFS_QCOM_ICE_CFG		         = 0x2200,
+	REG_UFS_QCOM_ICE_CTRL_INFO_1_n           = 0x2204,
+	REG_UFS_QCOM_ICE_CTRL_INFO_2_n           = 0x2208,
+	REG_UFS_QCOM_ICE_CTRL_INFO_3_n           = 0x220C,
+};
+#define NUM_QCOM_ICE_CTRL_INFO_n_REGS		32
+
+/* UFS QCOM ICE CTRL Info register offset */
+enum {
+	OFFSET_UFS_QCOM_ICE_CTRL_INFO_BYPASS     = 0,
+	OFFSET_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX  = 0x1,
+	OFFSET_UFS_QCOM_ICE_CTRL_INFO_CDU        = 0x6,
+};
+
+/* UFS QCOM ICE CTRL Info register masks */
+enum {
+	MASK_UFS_QCOM_ICE_CTRL_INFO_BYPASS     = 0x1,
+	MASK_UFS_QCOM_ICE_CTRL_INFO_KEY_INDEX  = 0x1F,
+	MASK_UFS_QCOM_ICE_CTRL_INFO_CDU        = 0x8,
+};
+
+/* UFS QCOM ICE encryption/decryption bypass state */
+enum {
+	UFS_QCOM_ICE_DISABLE_BYPASS  = 0,
+	UFS_QCOM_ICE_ENABLE_BYPASS = 1,
+};
+
+/* UFS QCOM ICE Crypto Data Unit of target DUN of Transfer Request */
+enum {
+	UFS_QCOM_ICE_TR_DATA_UNIT_512_B          = 0,
+	UFS_QCOM_ICE_TR_DATA_UNIT_1_KB           = 1,
+	UFS_QCOM_ICE_TR_DATA_UNIT_2_KB           = 2,
+	UFS_QCOM_ICE_TR_DATA_UNIT_4_KB           = 3,
+	UFS_QCOM_ICE_TR_DATA_UNIT_8_KB           = 4,
+	UFS_QCOM_ICE_TR_DATA_UNIT_16_KB          = 5,
+	UFS_QCOM_ICE_TR_DATA_UNIT_32_KB          = 6,
+};
+
+/* UFS QCOM ICE internal state */
+enum {
+	UFS_QCOM_ICE_STATE_DISABLED   = 0,
+	UFS_QCOM_ICE_STATE_ACTIVE     = 1,
+	UFS_QCOM_ICE_STATE_SUSPENDED  = 2,
+};
+
+#ifdef CONFIG_SCSI_UFS_QCOM_ICE
+int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
+			   struct scsi_cmnd *cmd, u8 *cc_index, bool *enable);
+int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
+		struct scsi_cmnd *cmd);
+int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host,
+		struct request *req);
+int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host);
+int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host, int *ice_status);
+void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host);
+#else
+inline int ufs_qcom_ice_get_dev(struct ufs_qcom_host *qcom_host)
+{
+	if (qcom_host) {
+		qcom_host->ice.pdev = NULL;
+		qcom_host->ice.vops = NULL;
+	}
+	return -ENODEV;
+}
+inline int ufs_qcom_ice_init(struct ufs_qcom_host *qcom_host)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
+					struct scsi_cmnd *cmd)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_cfg_end(struct ufs_qcom_host *qcom_host,
+					struct request *req)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_reset(struct ufs_qcom_host *qcom_host)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_resume(struct ufs_qcom_host *qcom_host)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_suspend(struct ufs_qcom_host *qcom_host)
+{
+	return 0;
+}
+inline int ufs_qcom_ice_get_status(struct ufs_qcom_host *qcom_host,
+				   int *ice_status)
+{
+	return 0;
+}
+inline void ufs_qcom_ice_print_regs(struct ufs_qcom_host *qcom_host)
+{
+	return;
+}
+#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
+
+#endif /* UFS_QCOM_ICE_H_ */
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 3aedf73..dfa2ef2 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -14,7 +14,13 @@
 
 #include <linux/time.h>
 #include <linux/of.h>
+#include <linux/iopoll.h>
 #include <linux/platform_device.h>
+
+#ifdef CONFIG_QCOM_BUS_SCALING
+#include <linux/msm-bus.h>
+#endif
+
 #include <linux/phy/phy.h>
 #include <linux/phy/phy-qcom-ufs.h>
 
@@ -23,6 +29,13 @@
 #include "unipro.h"
 #include "ufs-qcom.h"
 #include "ufshci.h"
+#include "ufs_quirks.h"
+#include "ufs-qcom-ice.h"
+#include "ufs-qcom-debugfs.h"
+
+/* TODO: further tuning for this parameter may be required */
+#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US	(10000) /* microseconds */
+
 #define UFS_QCOM_DEFAULT_DBG_PRINT_EN	\
 	(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
 
@@ -44,18 +57,18 @@
 
 static struct ufs_qcom_host *ufs_qcom_hosts[MAX_UFS_QCOM_HOSTS];
 
-static int ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote);
+static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg);
 static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
 						       u32 clk_cycles);
+static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
 
 static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
 		char *prefix)
 {
 	print_hex_dump(KERN_ERR, prefix,
 			len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
-			16, 4, (void __force *)hba->mmio_base + offset,
-			len * 4, false);
+			16, 4, hba->mmio_base + offset, len * 4, false);
 }
 
 static void ufs_qcom_dump_regs_wrapper(struct ufs_hba *hba, int offset, int len,
@@ -112,10 +125,10 @@
 	if (!host->is_lane_clks_enabled)
 		return;
 
-	if (host->hba->lanes_per_direction > 1)
+	if (host->tx_l1_sync_clk)
 		clk_disable_unprepare(host->tx_l1_sync_clk);
 	clk_disable_unprepare(host->tx_l0_sync_clk);
-	if (host->hba->lanes_per_direction > 1)
+	if (host->rx_l1_sync_clk)
 		clk_disable_unprepare(host->rx_l1_sync_clk);
 	clk_disable_unprepare(host->rx_l0_sync_clk);
 
@@ -146,18 +159,14 @@
 		if (err)
 			goto disable_tx_l0;
 
-		err = ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
-			host->tx_l1_sync_clk);
-		if (err)
-			goto disable_rx_l1;
+		/* The tx lane1 clk could be muxed, hence keep this optional */
+		if (host->tx_l1_sync_clk)
+			ufs_qcom_host_clk_enable(dev, "tx_lane1_sync_clk",
+						 host->tx_l1_sync_clk);
 	}
-
 	host->is_lane_clks_enabled = true;
 	goto out;
 
-disable_rx_l1:
-	if (host->hba->lanes_per_direction > 1)
-		clk_disable_unprepare(host->rx_l1_sync_clk);
 disable_tx_l0:
 	clk_disable_unprepare(host->tx_l0_sync_clk);
 disable_rx_l0:
@@ -188,33 +197,14 @@
 		if (err)
 			goto out;
 
-		err = ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
-			&host->tx_l1_sync_clk);
+		/* The tx lane1 clk could be muxed, hence keep this optional */
+		ufs_qcom_host_clk_get(dev, "tx_lane1_sync_clk",
+					&host->tx_l1_sync_clk);
 	}
 out:
 	return err;
 }
 
-static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
-{
-	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	struct phy *phy = host->generic_phy;
-	u32 tx_lanes;
-	int err = 0;
-
-	err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
-	if (err)
-		goto out;
-
-	err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
-	if (err)
-		dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
-			__func__);
-
-out:
-	return err;
-}
-
 static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
 {
 	int err;
@@ -302,8 +292,7 @@
 
 	ret = ufs_qcom_phy_is_pcs_ready(phy);
 	if (ret)
-		dev_err(hba->dev,
-			"%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
+		dev_err(hba->dev, "%s: is_physical_coding_sublayer_ready() failed, ret = %d\n",
 			__func__, ret);
 
 	ufs_qcom_select_unipro_mode(host);
@@ -319,15 +308,43 @@
  * in a specific operation, UTP controller CGCs are by default disabled and
  * this function enables them (after every UFS link startup) to save some power
  * leakage.
+ *
+ * UFS host controller v3.0.0 onwards has internal clock gating mechanism
+ * in Qunipro, enable them to save additional power.
  */
-static void ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
+static int ufs_qcom_enable_hw_clk_gating(struct ufs_hba *hba)
 {
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int err = 0;
+
+	/* Enable UTP internal clock gating */
 	ufshcd_writel(hba,
 		ufshcd_readl(hba, REG_UFS_CFG2) | REG_UFS_CFG2_CGC_EN_ALL,
 		REG_UFS_CFG2);
 
 	/* Ensure that HW clock gating is enabled before next operations */
 	mb();
+
+	/* Enable Qunipro internal clock gating if supported */
+	if (!ufs_qcom_cap_qunipro_clk_gating(host))
+		goto out;
+
+	/* Enable all the mask bits */
+	err = ufshcd_dme_rmw(hba, DL_VS_CLK_CFG_MASK,
+				DL_VS_CLK_CFG_MASK, DL_VS_CLK_CFG);
+	if (err)
+		goto out;
+
+	err = ufshcd_dme_rmw(hba, PA_VS_CLK_CFG_REG_MASK,
+				PA_VS_CLK_CFG_REG_MASK, PA_VS_CLK_CFG_REG);
+	if (err)
+		goto out;
+
+	err = ufshcd_dme_rmw(hba, DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+				DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN,
+				DME_VS_CORE_CLK_CTRL);
+out:
+	return err;
 }
 
 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
@@ -345,12 +362,19 @@
 		 * is initialized.
 		 */
 		err = ufs_qcom_enable_lane_clks(host);
+		if (!err && host->ice.pdev) {
+			err = ufs_qcom_ice_init(host);
+			if (err) {
+				dev_err(hba->dev, "%s: ICE init failed (%d)\n",
+					__func__, err);
+				err = -EINVAL;
+			}
+		}
+
 		break;
 	case POST_CHANGE:
 		/* check if UFS PHY moved from DISABLED to HIBERN8 */
 		err = ufs_qcom_check_hibern8(hba);
-		ufs_qcom_enable_hw_clk_gating(hba);
-
 		break;
 	default:
 		dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
@@ -398,9 +422,11 @@
 	 * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
 	 * UFS_REG_PA_LINK_STARTUP_TIMER
 	 * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
-	 * Aggregation logic.
+	 * Aggregation / Auto hibern8 logic.
 	*/
-	if (ufs_qcom_cap_qunipro(host) && !ufshcd_is_intr_aggr_allowed(hba))
+	if (ufs_qcom_cap_qunipro(host) &&
+	    (!(ufshcd_is_intr_aggr_allowed(hba) ||
+	       ufshcd_is_auto_hibern8_supported(hba))))
 		goto out;
 
 	if (gear == 0) {
@@ -507,51 +533,136 @@
 	return ret;
 }
 
+static int ufs_qcom_link_startup_pre_change(struct ufs_hba *hba)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct phy *phy = host->generic_phy;
+	u32 unipro_ver;
+	int err = 0;
+
+	if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE, 0, true)) {
+		dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
+			__func__);
+		err = -EINVAL;
+		goto out;
+	}
+
+	/* make sure RX LineCfg is enabled before link startup */
+	err = ufs_qcom_phy_ctrl_rx_linecfg(phy, true);
+	if (err)
+		goto out;
+
+	if (ufs_qcom_cap_qunipro(host)) {
+		/*
+		 * set unipro core clock cycles to 150 & clear clock divider
+		 */
+		err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 150);
+		if (err)
+			goto out;
+	}
+
+	err = ufs_qcom_enable_hw_clk_gating(hba);
+	if (err)
+		goto out;
+
+	/*
+	 * Some UFS devices (and may be host) have issues if LCC is
+	 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
+	 * before link startup which will make sure that both host
+	 * and device TX LCC are disabled once link startup is
+	 * completed.
+	 */
+	unipro_ver = ufshcd_get_local_unipro_ver(hba);
+	if (unipro_ver != UFS_UNIPRO_VER_1_41)
+		err = ufshcd_dme_set(hba,
+				     UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
+				     0);
+	if (err)
+		goto out;
+
+	if (!ufs_qcom_cap_qunipro_clk_gating(host))
+		goto out;
+
+	/* Enable all the mask bits */
+	err = ufshcd_dme_rmw(hba, SAVECONFIGTIME_MODE_MASK,
+				SAVECONFIGTIME_MODE_MASK,
+				PA_VS_CONFIG_REG1);
+out:
+	return err;
+}
+
+static int ufs_qcom_link_startup_post_change(struct ufs_hba *hba)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct phy *phy = host->generic_phy;
+	u32 tx_lanes;
+	int err = 0;
+
+	err = ufs_qcom_get_connected_tx_lanes(hba, &tx_lanes);
+	if (err)
+		goto out;
+
+	err = ufs_qcom_phy_set_tx_lane_enable(phy, tx_lanes);
+	if (err) {
+		dev_err(hba->dev, "%s: ufs_qcom_phy_set_tx_lane_enable failed\n",
+			__func__);
+		goto out;
+	}
+
+	/*
+	 * Some UFS devices send incorrect LineCfg data as part of power mode
+	 * change sequence which may cause host PHY to go into bad state.
+	 * Disabling Rx LineCfg of host PHY should help avoid this.
+	 */
+	if (ufshcd_get_local_unipro_ver(hba) == UFS_UNIPRO_VER_1_41)
+		err = ufs_qcom_phy_ctrl_rx_linecfg(phy, false);
+	if (err) {
+		dev_err(hba->dev, "%s: ufs_qcom_phy_ctrl_rx_linecfg failed\n",
+			__func__);
+		goto out;
+	}
+
+	/*
+	 * UFS controller has *clk_req output to GCC, for each one if the clocks
+	 * entering it. When *clk_req for a specific clock is de-asserted,
+	 * a corresponding clock from GCC is stopped. UFS controller de-asserts
+	 * *clk_req outputs when it is in Auto Hibernate state only if the
+	 * Clock request feature is enabled.
+	 * Enable the Clock request feature:
+	 * - Enable HW clock control for UFS clocks in GCC (handled by the
+	 *   clock driver as part of clk_prepare_enable).
+	 * - Set the AH8_CFG.*CLK_REQ register bits to 1.
+	 */
+	if (ufshcd_is_auto_hibern8_supported(hba))
+		ufshcd_writel(hba, ufshcd_readl(hba, UFS_AH8_CFG) |
+				   UFS_HW_CLK_CTRL_EN,
+				   UFS_AH8_CFG);
+	/*
+	 * Make sure clock request feature gets enabled for HW clk gating
+	 * before further operations.
+	 */
+	mb();
+
+out:
+	return err;
+}
+
 static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
 					enum ufs_notify_change_status status)
 {
 	int err = 0;
-	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
 	switch (status) {
 	case PRE_CHANGE:
-		if (ufs_qcom_cfg_timers(hba, UFS_PWM_G1, SLOWAUTO_MODE,
-					0, true)) {
-			dev_err(hba->dev, "%s: ufs_qcom_cfg_timers() failed\n",
-				__func__);
-			err = -EINVAL;
-			goto out;
-		}
-
-		if (ufs_qcom_cap_qunipro(host))
-			/*
-			 * set unipro core clock cycles to 150 & clear clock
-			 * divider
-			 */
-			err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba,
-									  150);
-
-		/*
-		 * Some UFS devices (and may be host) have issues if LCC is
-		 * enabled. So we are setting PA_Local_TX_LCC_Enable to 0
-		 * before link startup which will make sure that both host
-		 * and device TX LCC are disabled once link startup is
-		 * completed.
-		 */
-		if (ufshcd_get_local_unipro_ver(hba) != UFS_UNIPRO_VER_1_41)
-			err = ufshcd_dme_set(hba,
-					UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE),
-					0);
-
+		err = ufs_qcom_link_startup_pre_change(hba);
 		break;
 	case POST_CHANGE:
-		ufs_qcom_link_startup_post_change(hba);
+		err = ufs_qcom_link_startup_post_change(hba);
 		break;
 	default:
 		break;
 	}
 
-out:
 	return err;
 }
 
@@ -569,6 +680,10 @@
 		 */
 		ufs_qcom_disable_lane_clks(host);
 		phy_power_off(phy);
+		ret = ufs_qcom_ice_suspend(host);
+		if (ret)
+			dev_err(hba->dev, "%s: failed ufs_qcom_ice_suspend %d\n",
+					__func__, ret);
 
 		/* Assert PHY soft reset */
 		ufs_qcom_assert_reset(hba);
@@ -582,8 +697,12 @@
 	if (!ufs_qcom_is_link_active(hba)) {
 		ufs_qcom_disable_lane_clks(host);
 		phy_power_off(phy);
+		ufs_qcom_ice_suspend(host);
 	}
 
+	/* Unvote PM QoS */
+	ufs_qcom_pm_qos_suspend(host);
+
 out:
 	return ret;
 }
@@ -605,12 +724,108 @@
 	if (err)
 		goto out;
 
+	err = ufs_qcom_ice_resume(host);
+	if (err) {
+		dev_err(hba->dev, "%s: ufs_qcom_ice_resume failed, err = %d\n",
+			__func__, err);
+		goto out;
+	}
+
 	hba->is_sys_suspended = false;
 
 out:
 	return err;
 }
 
+static int ufs_qcom_full_reset(struct ufs_hba *hba)
+{
+	return -ENOTSUPP;
+}
+
+#ifdef CONFIG_SCSI_UFS_QCOM_ICE
+static int ufs_qcom_crypto_req_setup(struct ufs_hba *hba,
+	struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct request *req;
+	int ret;
+
+	if (lrbp->cmd && lrbp->cmd->request)
+		req = lrbp->cmd->request;
+	else
+		return 0;
+
+	/* Use request LBA as the DUN value */
+	if (req->bio)
+		*dun = req->bio->bi_iter.bi_sector;
+
+	ret = ufs_qcom_ice_req_setup(host, lrbp->cmd, cc_index, enable);
+
+	return ret;
+}
+
+static
+int ufs_qcom_crytpo_engine_cfg_start(struct ufs_hba *hba, unsigned int task_tag)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct ufshcd_lrb *lrbp = &hba->lrb[task_tag];
+	int err = 0;
+
+	if (!host->ice.pdev ||
+	    !lrbp->cmd || lrbp->command_type != UTP_CMD_TYPE_SCSI)
+		goto out;
+
+	err = ufs_qcom_ice_cfg_start(host, lrbp->cmd);
+out:
+	return err;
+}
+
+static
+int ufs_qcom_crytpo_engine_cfg_end(struct ufs_hba *hba,
+		struct ufshcd_lrb *lrbp, struct request *req)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int err = 0;
+
+	if (!host->ice.pdev || lrbp->command_type != UTP_CMD_TYPE_SCSI)
+		goto out;
+
+	err = ufs_qcom_ice_cfg_end(host, req);
+out:
+	return err;
+}
+
+static
+int ufs_qcom_crytpo_engine_reset(struct ufs_hba *hba)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int err = 0;
+
+	if (!host->ice.pdev)
+		goto out;
+
+	err = ufs_qcom_ice_reset(host);
+out:
+	return err;
+}
+
+static int ufs_qcom_crypto_engine_get_status(struct ufs_hba *hba, u32 *status)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+	if (!status)
+		return -EINVAL;
+
+	return ufs_qcom_ice_get_status(host, status);
+}
+#else /* !CONFIG_SCSI_UFS_QCOM_ICE */
+#define ufs_qcom_crypto_req_setup		NULL
+#define ufs_qcom_crytpo_engine_cfg_start	NULL
+#define ufs_qcom_crytpo_engine_cfg_end		NULL
+#define ufs_qcom_crytpo_engine_reset		NULL
+#define ufs_qcom_crypto_engine_get_status	NULL
+#endif /* CONFIG_SCSI_UFS_QCOM_ICE */
+
 struct ufs_qcom_dev_params {
 	u32 pwm_rx_gear;	/* pwm rx gear to work in */
 	u32 pwm_tx_gear;	/* pwm tx gear to work in */
@@ -709,7 +924,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_MSM_BUS_SCALING
+#ifdef CONFIG_QCOM_BUS_SCALING
 static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
 		const char *speed_mode)
 {
@@ -875,7 +1090,7 @@
 out:
 	return err;
 }
-#else /* CONFIG_MSM_BUS_SCALING */
+#else /* CONFIG_QCOM_BUS_SCALING */
 static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
 {
 	return 0;
@@ -890,7 +1105,10 @@
 {
 	return 0;
 }
-#endif /* CONFIG_MSM_BUS_SCALING */
+static inline void msm_bus_scale_unregister_client(uint32_t cl)
+{
+}
+#endif /* CONFIG_QCOM_BUS_SCALING */
 
 static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
 {
@@ -1031,6 +1249,34 @@
 	return ret;
 }
 
+static int ufs_qcom_quirk_host_pa_saveconfigtime(struct ufs_hba *hba)
+{
+	int err;
+	u32 pa_vs_config_reg1;
+
+	err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+			     &pa_vs_config_reg1);
+	if (err)
+		goto out;
+
+	/* Allow extension of MSB bits of PA_SaveConfigTime attribute */
+	err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_VS_CONFIG_REG1),
+			    (pa_vs_config_reg1 | (1 << 12)));
+
+out:
+	return err;
+}
+
+static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
+{
+	int err = 0;
+
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
+		err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
+
+	return err;
+}
+
 static u32 ufs_qcom_get_ufs_hci_version(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1054,18 +1300,18 @@
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
-	if (host->hw_ver.major == 0x01) {
-		hba->quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
-			    | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
-			    | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE;
+	if (host->hw_ver.major == 0x1) {
+		hba->quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
+			      | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
+			      | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE);
 
-		if (host->hw_ver.minor == 0x0001 && host->hw_ver.step == 0x0001)
+		if (host->hw_ver.minor == 0x001 && host->hw_ver.step == 0x0001)
 			hba->quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR;
 
 		hba->quirks |= UFSHCD_QUIRK_BROKEN_LCC;
 	}
 
-	if (host->hw_ver.major >= 0x2) {
+	if (host->hw_ver.major == 0x2) {
 		hba->quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION;
 
 		if (!ufs_qcom_cap_qunipro(host))
@@ -1074,30 +1320,56 @@
 				| UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
 				| UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP);
 	}
+
+	if (host->disable_lpm)
+		hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
 }
 
 static void ufs_qcom_set_caps(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
-	hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
-	hba->caps |= UFSHCD_CAP_CLK_SCALING;
+	if (!host->disable_lpm) {
+		hba->caps |= UFSHCD_CAP_CLK_GATING;
+		hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+		hba->caps |= UFSHCD_CAP_CLK_SCALING;
+	}
 	hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
 
 	if (host->hw_ver.major >= 0x2) {
+		if (!host->disable_lpm)
+			hba->caps |= UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8;
 		host->caps = UFS_QCOM_CAP_QUNIPRO |
 			     UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE;
 	}
+	if (host->hw_ver.major >= 0x3) {
+		host->caps |= UFS_QCOM_CAP_QUNIPRO_CLK_GATING;
+		/*
+		 * The UFS PHY attached to v3.0.0 controller supports entering
+		 * deeper low power state of SVS2. This lets the controller
+		 * run at much lower clock frequencies for saving power.
+		 * Assuming this and any future revisions of the controller
+		 * support this capability. Need to revist this assumption if
+		 * any future platform with this core doesn't support the
+		 * capability, as there will be no benefit running at lower
+		 * frequencies then.
+		 */
+		host->caps |= UFS_QCOM_CAP_SVS2;
+	}
 }
 
 /**
  * ufs_qcom_setup_clocks - enables/disable clocks
  * @hba: host controller instance
  * @on: If true, enable clocks else disable them.
+ * @is_gating_context: If true then it means this function is called from
+ * aggressive clock gating context and we may only need to gate off important
+ * clocks. If false then make sure to gate off all clocks.
  *
  * Returns 0 on success, non-zero on failure.
  */
-static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
+				 bool is_gating_context)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 	int err;
@@ -1130,14 +1402,23 @@
 		if (vote == host->bus_vote.min_bw_vote)
 			ufs_qcom_update_bus_bw_vote(host);
 
+		err = ufs_qcom_ice_resume(host);
+		if (err)
+			goto out;
 	} else {
+		err = ufs_qcom_ice_suspend(host);
+		if (err)
+			goto out;
 
 		/* M-PHY RMMI interface clocks can be turned off */
 		ufs_qcom_phy_disable_iface_clk(host->generic_phy);
-		if (!ufs_qcom_is_link_active(hba))
+		if (!ufs_qcom_is_link_active(hba)) {
+			if (!is_gating_context)
+				/* turn off UFS local PHY ref_clk */
+				ufs_qcom_phy_disable_ref_clk(host->generic_phy);
 			/* disable device ref_clk */
 			ufs_qcom_dev_ref_clk_ctrl(host, false);
-
+		}
 		vote = host->bus_vote.min_bw_vote;
 	}
 
@@ -1150,6 +1431,395 @@
 	return err;
 }
 
+#ifdef CONFIG_SMP /* CONFIG_SMP */
+static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
+{
+	int i;
+
+	if (cpu >= 0 && cpu < num_possible_cpus())
+		for (i = 0; i < host->pm_qos.num_groups; i++)
+			if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
+				return i;
+
+	return host->pm_qos.default_cpu;
+}
+
+static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
+{
+	unsigned long flags;
+	struct ufs_qcom_host *host;
+	struct ufs_qcom_pm_qos_cpu_group *group;
+
+	if (!hba || !req)
+		return;
+
+	host = ufshcd_get_variant(hba);
+	if (!host->pm_qos.groups)
+		return;
+
+	group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (!host->pm_qos.is_enabled)
+		goto out;
+
+	group->active_reqs++;
+	if (group->state != PM_QOS_REQ_VOTE &&
+			group->state != PM_QOS_VOTED) {
+		group->state = PM_QOS_REQ_VOTE;
+		queue_work(host->pm_qos.workq, &group->vote_work);
+	}
+out:
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+/* hba->host->host_lock is assumed to be held by caller */
+static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
+{
+	struct ufs_qcom_pm_qos_cpu_group *group;
+
+	if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
+		return;
+
+	group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
+
+	if (--group->active_reqs)
+		return;
+	group->state = PM_QOS_REQ_UNVOTE;
+	queue_work(host->pm_qos.workq, &group->unvote_work);
+}
+
+static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
+	bool should_lock)
+{
+	unsigned long flags = 0;
+
+	if (!hba || !req)
+		return;
+
+	if (should_lock)
+		spin_lock_irqsave(hba->host->host_lock, flags);
+	__ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
+	if (should_lock)
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
+{
+	struct ufs_qcom_pm_qos_cpu_group *group =
+		container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
+	struct ufs_qcom_host *host = group->host;
+	unsigned long flags;
+
+	spin_lock_irqsave(host->hba->host->host_lock, flags);
+
+	if (!host->pm_qos.is_enabled || !group->active_reqs) {
+		spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+		return;
+	}
+
+	group->state = PM_QOS_VOTED;
+	spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+
+	pm_qos_update_request(&group->req, group->latency_us);
+}
+
+static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
+{
+	struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
+		struct ufs_qcom_pm_qos_cpu_group, unvote_work);
+	struct ufs_qcom_host *host = group->host;
+	unsigned long flags;
+
+	/*
+	 * Check if new requests were submitted in the meantime and do not
+	 * unvote if so.
+	 */
+	spin_lock_irqsave(host->hba->host->host_lock, flags);
+
+	if (!host->pm_qos.is_enabled || group->active_reqs) {
+		spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+		return;
+	}
+
+	group->state = PM_QOS_UNVOTED;
+	spin_unlock_irqrestore(host->hba->host->host_lock, flags);
+
+	pm_qos_update_request_timeout(&group->req,
+		group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
+}
+
+static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
+}
+
+static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	unsigned long value;
+	unsigned long flags;
+	bool enable;
+	int i;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	enable = !!value;
+
+	/*
+	 * Must take the spinlock and save irqs before changing the enabled
+	 * flag in order to keep correctness of PM QoS release.
+	 */
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (enable == host->pm_qos.is_enabled) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		return count;
+	}
+	host->pm_qos.is_enabled = enable;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (!enable)
+		for (i = 0; i < host->pm_qos.num_groups; i++) {
+			cancel_work_sync(&host->pm_qos.groups[i].vote_work);
+			cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
+			host->pm_qos.groups[i].active_reqs = 0;
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			pm_qos_update_request(&host->pm_qos.groups[i].req,
+				PM_QOS_DEFAULT_VALUE);
+		}
+
+	return count;
+}
+
+static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int ret;
+	int i;
+	int offset = 0;
+
+	for (i = 0; i < host->pm_qos.num_groups; i++) {
+		ret = snprintf(&buf[offset], PAGE_SIZE,
+			"cpu group #%d(mask=0x%lx): %d\n", i,
+			host->pm_qos.groups[i].mask.bits[0],
+			host->pm_qos.groups[i].latency_us);
+		if (ret > 0)
+			offset += ret;
+		else
+			break;
+	}
+
+	return offset;
+}
+
+static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev->parent);
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	unsigned long value;
+	unsigned long flags;
+	char *strbuf;
+	char *strbuf_copy;
+	char *token;
+	int i;
+	int ret;
+
+	/* reserve one byte for null termination */
+	strbuf = kmalloc(count + 1, GFP_KERNEL);
+	if (!strbuf)
+		return -ENOMEM;
+	strbuf_copy = strbuf;
+	strlcpy(strbuf, buf, count + 1);
+
+	for (i = 0; i < host->pm_qos.num_groups; i++) {
+		token = strsep(&strbuf, ",");
+		if (!token)
+			break;
+
+		ret = kstrtoul(token, 0, &value);
+		if (ret)
+			break;
+
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		host->pm_qos.groups[i].latency_us = value;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+	}
+
+	kfree(strbuf_copy);
+	return count;
+}
+
+static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
+{
+	struct device_node *node = host->hba->dev->of_node;
+	struct device_attribute *attr;
+	int ret = 0;
+	int num_groups;
+	int num_values;
+	char wq_name[sizeof("ufs_pm_qos_00")];
+	int i;
+
+	num_groups = of_property_count_u32_elems(node,
+		"qcom,pm-qos-cpu-groups");
+	if (num_groups <= 0)
+		goto no_pm_qos;
+
+	num_values = of_property_count_u32_elems(node,
+		"qcom,pm-qos-cpu-group-latency-us");
+	if (num_values <= 0)
+		goto no_pm_qos;
+
+	if (num_values != num_groups || num_groups > num_possible_cpus()) {
+		dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
+			__func__, num_groups, num_values, num_possible_cpus());
+		goto no_pm_qos;
+	}
+
+	host->pm_qos.num_groups = num_groups;
+	host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
+			sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
+	if (!host->pm_qos.groups)
+		return -ENOMEM;
+
+	for (i = 0; i < host->pm_qos.num_groups; i++) {
+		u32 mask;
+
+		ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
+			i, &mask);
+		if (ret)
+			goto free_groups;
+		host->pm_qos.groups[i].mask.bits[0] = mask;
+		if (!cpumask_subset(&host->pm_qos.groups[i].mask,
+			cpu_possible_mask)) {
+			dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
+				__func__, mask);
+			goto free_groups;
+		}
+
+		ret = of_property_read_u32_index(node,
+			"qcom,pm-qos-cpu-group-latency-us", i,
+			&host->pm_qos.groups[i].latency_us);
+		if (ret)
+			goto free_groups;
+
+		host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
+		host->pm_qos.groups[i].req.cpus_affine =
+			host->pm_qos.groups[i].mask;
+		host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
+		host->pm_qos.groups[i].active_reqs = 0;
+		host->pm_qos.groups[i].host = host;
+
+		INIT_WORK(&host->pm_qos.groups[i].vote_work,
+			ufs_qcom_pm_qos_vote_work);
+		INIT_WORK(&host->pm_qos.groups[i].unvote_work,
+			ufs_qcom_pm_qos_unvote_work);
+	}
+
+	ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
+		&host->pm_qos.default_cpu);
+	if (ret || host->pm_qos.default_cpu > num_possible_cpus())
+		host->pm_qos.default_cpu = 0;
+
+	/*
+	 * Use a single-threaded workqueue to assure work submitted to the queue
+	 * is performed in order. Consider the following 2 possible cases:
+	 *
+	 * 1. A new request arrives and voting work is scheduled for it. Before
+	 *    the voting work is performed the request is finished and unvote
+	 *    work is also scheduled.
+	 * 2. A request is finished and unvote work is scheduled. Before the
+	 *    work is performed a new request arrives and voting work is also
+	 *    scheduled.
+	 *
+	 * In both cases a vote work and unvote work wait to be performed.
+	 * If ordering is not guaranteed, then the end state might be the
+	 * opposite of the desired state.
+	 */
+	snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
+		host->hba->host->host_no);
+	host->pm_qos.workq = create_singlethread_workqueue(wq_name);
+	if (!host->pm_qos.workq) {
+		dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
+				__func__);
+		ret = -ENOMEM;
+		goto free_groups;
+	}
+
+	/* Initialization was ok, add all PM QoS requests */
+	for (i = 0; i < host->pm_qos.num_groups; i++)
+		pm_qos_add_request(&host->pm_qos.groups[i].req,
+			PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
+
+	/* PM QoS latency sys-fs attribute */
+	attr = &host->pm_qos.latency_attr;
+	attr->show = ufs_qcom_pm_qos_latency_show;
+	attr->store = ufs_qcom_pm_qos_latency_store;
+	sysfs_attr_init(&attr->attr);
+	attr->attr.name = "pm_qos_latency_us";
+	attr->attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(host->hba->var->dev, attr))
+		dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
+
+	/* PM QoS enable sys-fs attribute */
+	attr = &host->pm_qos.enable_attr;
+	attr->show = ufs_qcom_pm_qos_enable_show;
+	attr->store = ufs_qcom_pm_qos_enable_store;
+	sysfs_attr_init(&attr->attr);
+	attr->attr.name = "pm_qos_enable";
+	attr->attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(host->hba->var->dev, attr))
+		dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
+
+	host->pm_qos.is_enabled = true;
+
+	return 0;
+
+free_groups:
+	kfree(host->pm_qos.groups);
+no_pm_qos:
+	host->pm_qos.groups = NULL;
+	return ret ? ret : -ENOTSUPP;
+}
+
+static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
+{
+	int i;
+
+	if (!host->pm_qos.groups)
+		return;
+
+	for (i = 0; i < host->pm_qos.num_groups; i++)
+		flush_work(&host->pm_qos.groups[i].unvote_work);
+}
+
+static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
+{
+	int i;
+
+	if (!host->pm_qos.groups)
+		return;
+
+	for (i = 0; i < host->pm_qos.num_groups; i++)
+		pm_qos_remove_request(&host->pm_qos.groups[i].req);
+	destroy_workqueue(host->pm_qos.workq);
+
+	kfree(host->pm_qos.groups);
+	host->pm_qos.groups = NULL;
+}
+#endif /* CONFIG_SMP */
+
 #define	ANDROID_BOOT_DEV_MAX	30
 static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
 
@@ -1162,6 +1832,18 @@
 __setup("androidboot.bootdevice=", get_android_boot_dev);
 #endif
 
+/*
+ * ufs_qcom_parse_lpm - read from DTS whether LPM modes should be disabled.
+ */
+static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
+{
+	struct device_node *node = host->hba->dev->of_node;
+
+	host->disable_lpm = of_property_read_bool(node, "qcom,disable-lpm");
+	if (host->disable_lpm)
+		pr_info("%s: will disable all LPM modes\n", __func__);
+}
+
 /**
  * ufs_qcom_init - bind phy with controller
  * @hba: host controller instance
@@ -1199,14 +1881,56 @@
 	 * skip devoting it during aggressive clock gating. This clock
 	 * will still be gated off during runtime suspend.
 	 */
+	hba->no_ref_clk_gating = true;
+
+	err = ufs_qcom_ice_get_dev(host);
+	if (err == -EPROBE_DEFER) {
+		/*
+		 * UFS driver might be probed before ICE driver does.
+		 * In that case we would like to return EPROBE_DEFER code
+		 * in order to delay its probing.
+		 */
+		dev_err(dev, "%s: required ICE device not probed yet err = %d\n",
+			__func__, err);
+		goto out_host_free;
+
+	} else if (err == -ENODEV) {
+		/*
+		 * ICE device is not enabled in DTS file. No need for further
+		 * initialization of ICE driver.
+		 */
+		dev_warn(dev, "%s: ICE device is not enabled",
+			__func__);
+	} else if (err) {
+		dev_err(dev, "%s: ufs_qcom_ice_get_dev failed %d\n",
+			__func__, err);
+		goto out_host_free;
+	}
+
 	host->generic_phy = devm_phy_get(dev, "ufsphy");
 
-	if (IS_ERR(host->generic_phy)) {
+	if (host->generic_phy == ERR_PTR(-EPROBE_DEFER)) {
+		/*
+		 * UFS driver might be probed before the phy driver does.
+		 * In that case we would like to return EPROBE_DEFER code.
+		 */
+		err = -EPROBE_DEFER;
+		dev_warn(dev, "%s: required phy device. hasn't probed yet. err = %d\n",
+			__func__, err);
+		goto out_host_free;
+	} else if (IS_ERR(host->generic_phy)) {
 		err = PTR_ERR(host->generic_phy);
 		dev_err(dev, "%s: PHY get failed %d\n", __func__, err);
 		goto out;
 	}
 
+	err = ufs_qcom_pm_qos_init(host);
+	if (err)
+		dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
+
+	/* restore the secure configuration */
+	ufs_qcom_update_sec_cfg(hba, true);
+
 	err = ufs_qcom_bus_register(host);
 	if (err)
 		goto out_host_free;
@@ -1251,10 +1975,13 @@
 	if (err)
 		goto out_disable_phy;
 
+	ufs_qcom_parse_lpm(host);
+	if (host->disable_lpm)
+		pm_runtime_forbid(host->hba->dev);
 	ufs_qcom_set_caps(hba);
 	ufs_qcom_advertise_quirks(hba);
 
-	ufs_qcom_setup_clocks(hba, true);
+	ufs_qcom_setup_clocks(hba, true, false);
 
 	if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
 		ufs_qcom_hosts[hba->dev->id] = host;
@@ -1274,6 +2001,7 @@
 	phy_power_off(host->generic_phy);
 out_unregister_bus:
 	phy_exit(host->generic_phy);
+	msm_bus_scale_unregister_client(host->bus_vote.client_handle);
 out_host_free:
 	devm_kfree(dev, host);
 	ufshcd_set_variant(hba, NULL);
@@ -1285,8 +2013,10 @@
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
 
+	msm_bus_scale_unregister_client(host->bus_vote.client_handle);
 	ufs_qcom_disable_lane_clks(host);
 	phy_power_off(host->generic_phy);
+	ufs_qcom_pm_qos_remove(host);
 }
 
 static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
@@ -1317,10 +2047,41 @@
 	return err;
 }
 
+static inline int ufs_qcom_configure_lpm(struct ufs_hba *hba, bool enable)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	struct phy *phy = host->generic_phy;
+	int err = 0;
+
+	/* The default low power mode configuration is SVS2 */
+	if (!ufs_qcom_cap_svs2(host))
+		goto out;
+
+	/*
+	 * The link should be put in hibern8 state before
+	 * configuring the PHY to enter/exit SVS2 mode.
+	 */
+	err = ufshcd_uic_hibern8_enter(hba);
+	if (err)
+		goto out;
+
+	err = ufs_qcom_phy_configure_lpm(phy, enable);
+	if (err)
+		goto out;
+
+	err = ufshcd_uic_hibern8_exit(hba);
+out:
+	return err;
+}
+
 static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
 {
-	/* nothing to do as of now */
-	return 0;
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+	if (!ufs_qcom_cap_qunipro(host))
+		return 0;
+
+	return ufs_qcom_configure_lpm(hba, false);
 }
 
 static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
@@ -1337,11 +2098,15 @@
 static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-	int err;
 	u32 core_clk_ctrl_reg;
+	int err = 0;
 
 	if (!ufs_qcom_cap_qunipro(host))
-		return 0;
+		goto out;
+
+	err = ufs_qcom_configure_lpm(hba, true);
+	if (err)
+		goto out;
 
 	err = ufshcd_dme_get(hba,
 			    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
@@ -1355,19 +2120,32 @@
 				    UIC_ARG_MIB(DME_VS_CORE_CLK_CTRL),
 				    core_clk_ctrl_reg);
 	}
-
+out:
 	return err;
 }
 
 static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
 {
 	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+	int err = 0;
 
 	if (!ufs_qcom_cap_qunipro(host))
 		return 0;
 
-	/* set unipro core clock cycles to 75 and clear clock divider */
-	return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
+	if (ufs_qcom_cap_svs2(host))
+		/*
+		 * For SVS2 set unipro core clock cycles to 37 and
+		 * clear clock divider
+		 */
+		err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 37);
+	else
+		/*
+		 * For SVS set unipro core clock cycles to 75 and
+		 * clear clock divider
+		 */
+		err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(hba, 75);
+
+	return err;
 }
 
 static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
@@ -1377,12 +2155,14 @@
 	struct ufs_pa_layer_attr *dev_req_params = &host->dev_req_params;
 	int err = 0;
 
-	if (status == PRE_CHANGE) {
+	switch (status) {
+	case PRE_CHANGE:
 		if (scale_up)
 			err = ufs_qcom_clk_scale_up_pre_change(hba);
 		else
 			err = ufs_qcom_clk_scale_down_pre_change(hba);
-	} else {
+		break;
+	case POST_CHANGE:
 		if (scale_up)
 			err = ufs_qcom_clk_scale_up_post_change(hba);
 		else
@@ -1397,15 +2177,44 @@
 				    dev_req_params->hs_rate,
 				    false);
 		ufs_qcom_update_bus_bw_vote(host);
+		break;
+	default:
+		dev_err(hba->dev, "%s: invalid status %d\n", __func__, status);
+		err = -EINVAL;
+		break;
 	}
 
 out:
 	return err;
 }
 
-static void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba,
-		void *priv, void (*print_fn)(struct ufs_hba *hba,
-		int offset, int num_regs, char *str, void *priv))
+/*
+ * This function should be called to restore the security configuration of UFS
+ * register space after coming out of UFS host core power collapse.
+ *
+ * @hba: host controller instance
+ * @restore_sec_cfg: Set "true" if secure configuration needs to be restored
+ * and set "false" when secure configuration is lost.
+ */
+static int ufs_qcom_update_sec_cfg(struct ufs_hba *hba, bool restore_sec_cfg)
+{
+	return 0;
+}
+
+
+static inline u32 ufs_qcom_get_scale_down_gear(struct ufs_hba *hba)
+{
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+	if (ufs_qcom_cap_svs2(host))
+		return UFS_HS_G1;
+	/* Default SVS support @ HS G2 frequencies*/
+	return UFS_HS_G2;
+}
+
+void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
+		void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
+				char *str, void *priv))
 {
 	u32 reg;
 	struct ufs_qcom_host *host;
@@ -1439,7 +2248,8 @@
 	reg = ufs_qcom_get_debug_reg_offset(host, UFS_UFS_DBG_RD_PRDT_RAM);
 	print_fn(hba, reg, 64, "UFS_UFS_DBG_RD_PRDT_RAM ", priv);
 
-	ufshcd_writel(hba, (reg & ~UFS_BIT(17)), REG_UFS_CFG1);
+	/* clear bit 17 - UTP_DBG_RAMS_EN */
+	ufshcd_rmwl(hba, UFS_BIT(17), 0, REG_UFS_CFG1);
 
 	reg = ufs_qcom_get_debug_reg_offset(host, UFS_DBG_RD_REG_UAWM);
 	print_fn(hba, reg, 4, "UFS_DBG_RD_REG_UAWM ", priv);
@@ -1580,7 +2390,7 @@
 		    (u32)host->testbus.select_minor << offset,
 		    reg);
 	ufs_qcom_enable_test_bus(host);
-	ufshcd_release(host->hba);
+	ufshcd_release(host->hba, false);
 	pm_runtime_put_sync(host->hba->dev);
 
 	return 0;
@@ -1593,11 +2403,14 @@
 
 static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
 {
+	struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
 	ufs_qcom_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16,
 			"HCI Vendor Specific Registers ");
 
 	ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
 	ufs_qcom_testbus_read(hba);
+	ufs_qcom_ice_print_regs(host);
 }
 
 /**
@@ -1607,7 +2420,6 @@
  * handshake during initialization.
  */
 static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
-	.name                   = "qcom",
 	.init                   = ufs_qcom_init,
 	.exit                   = ufs_qcom_exit,
 	.get_ufs_hci_version	= ufs_qcom_get_ufs_hci_version,
@@ -1616,9 +2428,36 @@
 	.hce_enable_notify      = ufs_qcom_hce_enable_notify,
 	.link_startup_notify    = ufs_qcom_link_startup_notify,
 	.pwr_change_notify	= ufs_qcom_pwr_change_notify,
+	.apply_dev_quirks	= ufs_qcom_apply_dev_quirks,
 	.suspend		= ufs_qcom_suspend,
 	.resume			= ufs_qcom_resume,
+	.full_reset		= ufs_qcom_full_reset,
+	.update_sec_cfg		= ufs_qcom_update_sec_cfg,
+	.get_scale_down_gear	= ufs_qcom_get_scale_down_gear,
 	.dbg_register_dump	= ufs_qcom_dump_dbg_regs,
+#ifdef CONFIG_DEBUG_FS
+	.add_debugfs		= ufs_qcom_dbg_add_debugfs,
+#endif
+};
+
+static struct ufs_hba_crypto_variant_ops ufs_hba_crypto_variant_ops = {
+	.crypto_req_setup	= ufs_qcom_crypto_req_setup,
+	.crypto_engine_cfg_start	= ufs_qcom_crytpo_engine_cfg_start,
+	.crypto_engine_cfg_end	= ufs_qcom_crytpo_engine_cfg_end,
+	.crypto_engine_reset	  = ufs_qcom_crytpo_engine_reset,
+	.crypto_engine_get_status = ufs_qcom_crypto_engine_get_status,
+};
+
+static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
+	.req_start	= ufs_qcom_pm_qos_req_start,
+	.req_end	= ufs_qcom_pm_qos_req_end,
+};
+
+static struct ufs_hba_variant ufs_hba_qcom_variant = {
+	.name		= "qcom",
+	.vops		= &ufs_hba_qcom_vops,
+	.crypto_vops	= &ufs_hba_crypto_variant_ops,
+	.pm_qos_vops	= &ufs_hba_pm_qos_variant_ops,
 };
 
 /**
@@ -1633,7 +2472,7 @@
 	struct device *dev = &pdev->dev;
 
 	/* Perform generic probe */
-	err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_vops);
+	err = ufshcd_pltfrm_init(pdev, &ufs_hba_qcom_variant);
 	if (err)
 		dev_err(dev, "ufshcd_pltfrm_init() failed %d\n", err);
 
@@ -1644,7 +2483,7 @@
  * ufs_qcom_remove - set driver_data of the device to NULL
  * @pdev: pointer to platform device handle
  *
- * Always returns 0
+ * Always return 0
  */
 static int ufs_qcom_remove(struct platform_device *pdev)
 {
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index a19307a..ba36d98 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,10 @@
 #ifndef UFS_QCOM_H_
 #define UFS_QCOM_H_
 
+#include <linux/phy/phy.h>
+#include <linux/pm_qos.h>
+#include "ufshcd.h"
+
 #define MAX_UFS_QCOM_HOSTS	1
 #define MAX_U32                 (~(u32)0)
 #define MPHY_TX_FSM_STATE       0x41
@@ -71,6 +75,7 @@
 	UFS_AH8_CFG				= 0xFC,
 };
 
+
 /* QCOM UFS host controller vendor specific debug registers */
 enum {
 	UFS_DBG_RD_REG_UAWM			= 0x100,
@@ -114,6 +119,17 @@
 				 DFC_HW_CGC_EN | TRLUT_HW_CGC_EN |\
 				 TMRLUT_HW_CGC_EN | OCSC_HW_CGC_EN)
 
+/* bit definitions for UFS_AH8_CFG register */
+#define CC_UFS_HCLK_REQ_EN		BIT(1)
+#define CC_UFS_SYS_CLK_REQ_EN		BIT(2)
+#define CC_UFS_ICE_CORE_CLK_REQ_EN	BIT(3)
+#define CC_UFS_UNIPRO_CORE_CLK_REQ_EN	BIT(4)
+#define CC_UFS_AUXCLK_REQ_EN		BIT(5)
+
+#define UFS_HW_CLK_CTRL_EN	(CC_UFS_SYS_CLK_REQ_EN |\
+				 CC_UFS_ICE_CORE_CLK_REQ_EN |\
+				 CC_UFS_UNIPRO_CORE_CLK_REQ_EN |\
+				 CC_UFS_AUXCLK_REQ_EN)
 /* bit offset */
 enum {
 	OFFSET_UFS_PHY_SOFT_RESET           = 1,
@@ -142,10 +158,20 @@
 	 UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
 
 /* QUniPro Vendor specific attributes */
+#define PA_VS_CONFIG_REG1		0x9000
+#define SAVECONFIGTIME_MODE_MASK	0x6000
+
+#define PA_VS_CLK_CFG_REG	0x9004
+#define PA_VS_CLK_CFG_REG_MASK	0x1FF
+
+#define DL_VS_CLK_CFG		0xA00B
+#define DL_VS_CLK_CFG_MASK	0x3FF
+
 #define DME_VS_CORE_CLK_CTRL	0xD002
 /* bit and mask definitions for DME_VS_CORE_CLK_CTRL attribute */
-#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT		BIT(8)
 #define DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK	0xFF
+#define DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT		BIT(8)
+#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN			BIT(9)
 
 static inline void
 ufs_qcom_get_controller_revision(struct ufs_hba *hba,
@@ -192,6 +218,26 @@
 	struct device_attribute max_bus_bw;
 };
 
+/**
+ * struct ufs_qcom_ice_data - ICE related information
+ * @vops:	pointer to variant operations of ICE
+ * @async_done:	completion for supporting ICE's driver asynchronous nature
+ * @pdev:	pointer to the proper ICE platform device
+ * @state:      UFS-ICE interface's internal state (see
+ *       ufs-qcom-ice.h for possible internal states)
+ * @quirks:     UFS-ICE interface related quirks
+ * @crypto_engine_err: crypto engine errors
+ */
+struct ufs_qcom_ice_data {
+	struct qcom_ice_variant_ops *vops;
+	struct platform_device *pdev;
+	int state;
+
+	u16 quirks;
+
+	bool crypto_engine_err;
+};
+
 /* Host controller hardware version: major.minor.step */
 struct ufs_hw_version {
 	u16 step;
@@ -199,11 +245,76 @@
 	u8 major;
 };
 
+#ifdef CONFIG_DEBUG_FS
+struct qcom_debugfs_files {
+	struct dentry *debugfs_root;
+	struct dentry *dbg_print_en;
+	struct dentry *testbus;
+	struct dentry *testbus_en;
+	struct dentry *testbus_cfg;
+	struct dentry *testbus_bus;
+	struct dentry *dbg_regs;
+	struct dentry *pm_qos;
+};
+#endif
+
 struct ufs_qcom_testbus {
 	u8 select_major;
 	u8 select_minor;
 };
 
+/* PM QoS voting state  */
+enum ufs_qcom_pm_qos_state {
+	PM_QOS_UNVOTED,
+	PM_QOS_VOTED,
+	PM_QOS_REQ_VOTE,
+	PM_QOS_REQ_UNVOTE,
+};
+
+/**
+ * struct ufs_qcom_pm_qos_cpu_group - data related to cluster PM QoS voting
+ *	logic
+ * @req: request object for PM QoS
+ * @vote_work: work object for voting procedure
+ * @unvote_work: work object for un-voting procedure
+ * @host: back pointer to the main structure
+ * @state: voting state machine current state
+ * @latency_us: requested latency value used for cluster voting, in
+ *	microseconds
+ * @mask: cpu mask defined for this cluster
+ * @active_reqs: number of active requests on this cluster
+ */
+struct ufs_qcom_pm_qos_cpu_group {
+	struct pm_qos_request req;
+	struct work_struct vote_work;
+	struct work_struct unvote_work;
+	struct ufs_qcom_host *host;
+	enum ufs_qcom_pm_qos_state state;
+	s32 latency_us;
+	cpumask_t mask;
+	int active_reqs;
+};
+
+/**
+ * struct ufs_qcom_pm_qos - data related to PM QoS voting logic
+ * @groups: PM QoS cpu group state array
+ * @enable_attr: sysfs attribute to enable/disable PM QoS voting logic
+ * @latency_attr: sysfs attribute to set latency value
+ * @workq: single threaded workqueue to run PM QoS voting/unvoting
+ * @num_clusters: number of clusters defined
+ * @default_cpu: cpu to use for voting for request not specifying a cpu
+ * @is_enabled: flag specifying whether voting logic is enabled
+ */
+struct ufs_qcom_pm_qos {
+	struct ufs_qcom_pm_qos_cpu_group *groups;
+	struct device_attribute enable_attr;
+	struct device_attribute latency_attr;
+	struct workqueue_struct *workq;
+	int num_groups;
+	int default_cpu;
+	bool is_enabled;
+};
+
 struct ufs_qcom_host {
 	/*
 	 * Set this capability if host controller supports the QUniPro mode
@@ -218,6 +329,17 @@
 	 * configuration even after UFS controller core power collapse.
 	 */
 	#define UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE	UFS_BIT(1)
+
+	/*
+	 * Set this capability if host controller supports Qunipro internal
+	 * clock gating.
+	 */
+	#define UFS_QCOM_CAP_QUNIPRO_CLK_GATING		UFS_BIT(2)
+
+	/*
+	 * Set this capability if host controller supports SVS2 frequencies.
+	 */
+	#define UFS_QCOM_CAP_SVS2	UFS_BIT(3)
 	u32 caps;
 
 	struct phy *generic_phy;
@@ -228,17 +350,27 @@
 	struct clk *tx_l0_sync_clk;
 	struct clk *rx_l1_sync_clk;
 	struct clk *tx_l1_sync_clk;
-	bool is_lane_clks_enabled;
 
+	/* PM Quality-of-Service (QoS) data */
+	struct ufs_qcom_pm_qos pm_qos;
+
+	bool disable_lpm;
+	bool is_lane_clks_enabled;
+	bool sec_cfg_updated;
+	struct ufs_qcom_ice_data ice;
 	void __iomem *dev_ref_clk_ctrl_mmio;
 	bool is_dev_ref_clk_enabled;
 	struct ufs_hw_version hw_ver;
-
 	u32 dev_ref_clk_en_mask;
-
+#ifdef CONFIG_DEBUG_FS
+	struct qcom_debugfs_files debugfs_files;
+#endif
 	/* Bitmask for enabling debug prints */
 	u32 dbg_print_en;
 	struct ufs_qcom_testbus testbus;
+
+	struct work_struct ice_cfg_work;
+	struct request *req_pending;
 };
 
 static inline u32
@@ -255,6 +387,9 @@
 #define ufs_qcom_is_link_hibern8(hba) ufshcd_is_link_hibern8(hba)
 
 int ufs_qcom_testbus_config(struct ufs_qcom_host *host);
+void ufs_qcom_print_hw_debug_reg_all(struct ufs_hba *hba, void *priv,
+		void (*print_fn)(struct ufs_hba *hba, int offset, int num_regs,
+				char *str, void *priv));
 
 static inline bool ufs_qcom_cap_qunipro(struct ufs_qcom_host *host)
 {
@@ -264,4 +399,14 @@
 		return false;
 }
 
+static inline bool ufs_qcom_cap_qunipro_clk_gating(struct ufs_qcom_host *host)
+{
+	return !!(host->caps & UFS_QCOM_CAP_QUNIPRO_CLK_GATING);
+}
+
+static inline bool ufs_qcom_cap_svs2(struct ufs_qcom_host *host)
+{
+	return !!(host->caps & UFS_QCOM_CAP_SVS2);
+}
+
 #endif /* UFS_QCOM_H_ */
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 845b874..9afc0f4 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -38,6 +38,7 @@
 
 #include <linux/mutex.h>
 #include <linux/types.h>
+#include <scsi/ufs/ufs.h>
 
 #define MAX_CDB_SIZE	16
 #define GENERAL_UPIU_REQUEST_SIZE 32
@@ -46,6 +47,7 @@
 #define QUERY_DESC_HDR_SIZE       2
 #define QUERY_OSF_SIZE            (GENERAL_UPIU_REQUEST_SIZE - \
 					(sizeof(struct utp_upiu_header)))
+#define RESPONSE_UPIU_SENSE_DATA_LENGTH	18
 
 #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
 			cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
@@ -72,6 +74,16 @@
 	UFS_UPIU_RPMB_WLUN		= 0xC4,
 };
 
+/**
+ * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor
+ * @lun: LU number to check
+ * @return: true if the lun has a matching unit descriptor, false otherwise
+ */
+static inline bool ufs_is_valid_unit_desc_lun(u8 lun)
+{
+	return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN);
+}
+
 /*
  * UFS Protocol Information Unit related definitions
  */
@@ -127,42 +139,13 @@
 	UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST          = 0x81,
 };
 
-/* Flag idn for Query Requests*/
-enum flag_idn {
-	QUERY_FLAG_IDN_FDEVICEINIT      = 0x01,
-	QUERY_FLAG_IDN_PWR_ON_WPE	= 0x03,
-	QUERY_FLAG_IDN_BKOPS_EN         = 0x04,
-};
-
-/* Attribute idn for Query requests */
-enum attr_idn {
-	QUERY_ATTR_IDN_ACTIVE_ICC_LVL	= 0x03,
-	QUERY_ATTR_IDN_BKOPS_STATUS	= 0x05,
-	QUERY_ATTR_IDN_EE_CONTROL	= 0x0D,
-	QUERY_ATTR_IDN_EE_STATUS	= 0x0E,
-};
-
-/* Descriptor idn for Query requests */
-enum desc_idn {
-	QUERY_DESC_IDN_DEVICE		= 0x0,
-	QUERY_DESC_IDN_CONFIGURAION	= 0x1,
-	QUERY_DESC_IDN_UNIT		= 0x2,
-	QUERY_DESC_IDN_RFU_0		= 0x3,
-	QUERY_DESC_IDN_INTERCONNECT	= 0x4,
-	QUERY_DESC_IDN_STRING		= 0x5,
-	QUERY_DESC_IDN_RFU_1		= 0x6,
-	QUERY_DESC_IDN_GEOMETRY		= 0x7,
-	QUERY_DESC_IDN_POWER		= 0x8,
-	QUERY_DESC_IDN_MAX,
-};
-
 enum desc_header_offset {
 	QUERY_DESC_LENGTH_OFFSET	= 0x00,
 	QUERY_DESC_DESC_TYPE_OFFSET	= 0x01,
 };
 
 enum ufs_desc_max_size {
-	QUERY_DESC_DEVICE_MAX_SIZE		= 0x1F,
+	QUERY_DESC_DEVICE_MAX_SIZE		= 0x40,
 	QUERY_DESC_CONFIGURAION_MAX_SIZE	= 0x90,
 	QUERY_DESC_UNIT_MAX_SIZE		= 0x23,
 	QUERY_DESC_INTERCONNECT_MAX_SIZE	= 0x06,
@@ -171,7 +154,7 @@
 	 * of descriptor header.
 	 */
 	QUERY_DESC_STRING_MAX_SIZE		= 0xFE,
-	QUERY_DESC_GEOMETRY_MAX_SIZE		= 0x44,
+	QUERY_DESC_GEOMETRY_MAZ_SIZE		= 0x44,
 	QUERY_DESC_POWER_MAX_SIZE		= 0x62,
 	QUERY_DESC_RFU_MAX_SIZE			= 0x00,
 };
@@ -226,7 +209,6 @@
 	DEVICE_DESC_PARAM_RTT_CAP		= 0x1C,
 	DEVICE_DESC_PARAM_FRQ_RTC		= 0x1D,
 };
-
 /*
  * Logical Unit Write Protect
  * 00h: LU not write protected
@@ -279,19 +261,6 @@
 	BKOPS_STATUS_MAX		 = BKOPS_STATUS_CRITICAL,
 };
 
-/* UTP QUERY Transaction Specific Fields OpCode */
-enum query_opcode {
-	UPIU_QUERY_OPCODE_NOP		= 0x0,
-	UPIU_QUERY_OPCODE_READ_DESC	= 0x1,
-	UPIU_QUERY_OPCODE_WRITE_DESC	= 0x2,
-	UPIU_QUERY_OPCODE_READ_ATTR	= 0x3,
-	UPIU_QUERY_OPCODE_WRITE_ATTR	= 0x4,
-	UPIU_QUERY_OPCODE_READ_FLAG	= 0x5,
-	UPIU_QUERY_OPCODE_SET_FLAG	= 0x6,
-	UPIU_QUERY_OPCODE_CLEAR_FLAG	= 0x7,
-	UPIU_QUERY_OPCODE_TOGGLE_FLAG	= 0x8,
-};
-
 /* Query response result code */
 enum {
 	QUERY_RESULT_SUCCESS                    = 0x00,
@@ -416,7 +385,7 @@
 	__be32 residual_transfer_count;
 	__be32 reserved[4];
 	__be16 sense_data_len;
-	u8 sense_data[18];
+	u8 sense_data[RESPONSE_UPIU_SENSE_DATA_LENGTH];
 };
 
 /**
diff --git a/drivers/scsi/ufs/ufs_quirks.c b/drivers/scsi/ufs/ufs_quirks.c
new file mode 100644
index 0000000..3d33d1b
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_quirks.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include "ufshcd.h"
+#include "ufs_quirks.h"
+
+static struct ufs_card_fix ufs_fixups[] = {
+	/* UFS cards deviations table */
+	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
+	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+		UFS_DEVICE_NO_FASTAUTO),
+	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
+		UFS_DEVICE_QUIRK_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
+		UFS_DEVICE_QUIRK_PA_TACTIVATE),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL,
+		UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME),
+	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+
+	END_FIX
+};
+
+static int ufs_get_device_info(struct ufs_hba *hba,
+				struct ufs_card_info *card_data)
+{
+	int err;
+	u8 model_index;
+	u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1];
+	u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
+
+	err = ufshcd_read_device_desc(hba, desc_buf,
+					QUERY_DESC_DEVICE_MAX_SIZE);
+	if (err)
+		goto out;
+
+	/*
+	 * getting vendor (manufacturerID) and Bank Index in big endian
+	 * format
+	 */
+	card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
+				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
+
+	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
+
+	memset(str_desc_buf, 0, QUERY_DESC_STRING_MAX_SIZE);
+	err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
+					QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
+	if (err)
+		goto out;
+
+	str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
+	strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
+		min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
+		      MAX_MODEL_LEN));
+	/* Null terminate the model string */
+	card_data->model[MAX_MODEL_LEN] = '\0';
+
+out:
+	return err;
+}
+
+void ufs_advertise_fixup_device(struct ufs_hba *hba)
+{
+	int err;
+	struct ufs_card_fix *f;
+	struct ufs_card_info card_data;
+
+	card_data.wmanufacturerid = 0;
+	card_data.model = kmalloc(MAX_MODEL_LEN + 1, GFP_KERNEL);
+	if (!card_data.model)
+		goto out;
+
+	/* get device data*/
+	err = ufs_get_device_info(hba, &card_data);
+	if (err) {
+		dev_err(hba->dev, "%s: Failed getting device info\n", __func__);
+		goto out;
+	}
+
+	for (f = ufs_fixups; f->quirk; f++) {
+		/* if same wmanufacturerid */
+		if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
+		     (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
+		    /* and same model */
+		    (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
+		     !strcmp(f->card.model, UFS_ANY_MODEL)))
+			/* update quirks */
+			hba->dev_quirks |= f->quirk;
+	}
+out:
+	kfree(card_data.model);
+}
diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
index 22f881e..cfb2db5 100644
--- a/drivers/scsi/ufs/ufs_quirks.h
+++ b/drivers/scsi/ufs/ufs_quirks.h
@@ -1,5 +1,4 @@
-/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -18,7 +17,7 @@
 /* return true if s1 is a prefix of s2 */
 #define STR_PRFX_EQUAL(s1, s2) !strncmp(s1, s2, strlen(s1))
 
-#define UFS_ANY_VENDOR 0xFFFF
+#define UFS_ANY_VENDOR -1
 #define UFS_ANY_MODEL  "ANY_MODEL"
 
 #define MAX_MODEL_LEN 16
@@ -27,34 +26,38 @@
 #define UFS_VENDOR_SAMSUNG     0x1CE
 #define UFS_VENDOR_SKHYNIX     0x1AD
 
+/* UFS TOSHIBA MODELS */
+#define UFS_MODEL_TOSHIBA_32GB "THGLF2G8D4KBADR"
+#define UFS_MODEL_TOSHIBA_64GB "THGLF2G9D8KBADG"
+
 /**
- * ufs_device_info - ufs device details
+ * ufs_card_info - ufs device details
  * @wmanufacturerid: card details
  * @model: card model
  */
-struct ufs_device_info {
+struct ufs_card_info {
 	u16 wmanufacturerid;
-	char model[MAX_MODEL_LEN + 1];
+	char *model;
 };
 
 /**
- * ufs_dev_fix - ufs device quirk info
+ * ufs_card_fix - ufs device quirk info
  * @card: ufs card details
  * @quirk: device quirk
  */
-struct ufs_dev_fix {
-	struct ufs_device_info card;
+struct ufs_card_fix {
+	struct ufs_card_info card;
 	unsigned int quirk;
 };
 
-#define END_FIX { { 0 }, 0 }
+#define END_FIX { { 0 } , 0 }
 
 /* add specific device quirk */
 #define UFS_FIX(_vendor, _model, _quirk) \
-		{					  \
-			.card.wmanufacturerid = (_vendor),\
-			.card.model = (_model),		  \
-			.quirk = (_quirk),		  \
+		{						  \
+				.card.wmanufacturerid = (_vendor),\
+				.card.model = (_model),		  \
+				.quirk = (_quirk),		  \
 		}
 
 /*
@@ -121,33 +124,22 @@
 #define UFS_DEVICE_NO_FASTAUTO		(1 << 5)
 
 /*
- * It seems some UFS devices may keep drawing more than sleep current
- * (atleast for 500us) from UFS rails (especially from VCCQ rail).
- * To avoid this situation, add 2ms delay before putting these UFS
- * rails in LPM mode.
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, enabling this quirk ensure this.
  */
-#define UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM	(1 << 6)
+#define UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE	(1 << 6)
+
+/*
+ * The max. value PA_SaveConfigTime is 250 (10us) but this is not enough for
+ * some vendors.
+ * Gear switch from PWM to HS may fail even with this max. PA_SaveConfigTime.
+ * Gear switch can be issued by host controller as an error recovery and any
+ * software delay will not help on this case so we need to increase
+ * PA_SaveConfigTime to >32us as per vendor recommendation.
+ */
+#define UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME	(1 << 7)
 
 struct ufs_hba;
 void ufs_advertise_fixup_device(struct ufs_hba *hba);
 
-static struct ufs_dev_fix ufs_fixups[] = {
-	/* UFS cards deviations table */
-	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
-	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-		UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS),
-	UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
-		UFS_DEVICE_NO_FASTAUTO),
-	UFS_FIX(UFS_VENDOR_TOSHIBA, UFS_ANY_MODEL,
-		UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
-	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9C8KBADG",
-		UFS_DEVICE_QUIRK_PA_TACTIVATE),
-	UFS_FIX(UFS_VENDOR_TOSHIBA, "THGLF2G9D8KBADG",
-		UFS_DEVICE_QUIRK_PA_TACTIVATE),
-	UFS_FIX(UFS_VENDOR_SKHYNIX, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
-
-	END_FIX
-};
 #endif /* UFS_QUIRKS_H_ */
diff --git a/drivers/scsi/ufs/ufs_test.c b/drivers/scsi/ufs/ufs_test.c
new file mode 100644
index 0000000..8953722e8
--- /dev/null
+++ b/drivers/scsi/ufs/ufs_test.c
@@ -0,0 +1,1536 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt"\n"
+
+#include <linux/async.h>
+#include <linux/atomic.h>
+#include <linux/blkdev.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/test-iosched.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <linux/delay.h>
+#include "ufshcd.h"
+#include "ufs.h"
+
+#define MODULE_NAME "ufs_test"
+#define UFS_TEST_BLK_DEV_TYPE_PREFIX "sd"
+
+#define TEST_MAX_BIOS_PER_REQ		128
+#define TEST_DEFAULT_SECTOR_RANGE		(1024*1024) /* 512MB */
+#define LARGE_PRIME_1	1103515367
+#define LARGE_PRIME_2	35757
+#define MAGIC_SEED	7
+#define DEFAULT_NUM_OF_BIOS		2
+#define LONG_SEQUENTIAL_MIXED_TIMOUT_MS 100000
+#define THREADS_COMPLETION_TIMOUT msecs_to_jiffies(10000) /* 10 sec */
+#define MAX_PARALLEL_QUERIES 33
+#define RANDOM_REQUEST_THREADS 4
+#define LUN_DEPTH_TEST_SIZE 9
+#define SECTOR_SIZE	512
+#define NUM_UNLUCKY_RETRIES	10
+
+/*
+ * this defines the density of random requests in the address space, and
+ * it represents the ratio between accessed sectors and non-accessed sectors
+ */
+#define LONG_RAND_TEST_REQ_RATIO	64
+/* request queue limitation is 128 requests, and we leave 10 spare requests */
+#define QUEUE_MAX_REQUESTS 118
+#define MB_MSEC_RATIO_APPROXIMATION ((1024 * 1024) / 1000)
+/* actual number of MiB in test multiplied by 10, for single digit precision*/
+#define BYTE_TO_MB_x_10(x) ((x * 10) / (1024 * 1024))
+/* extract integer value */
+#define LONG_TEST_SIZE_INTEGER(x) (BYTE_TO_MB_x_10(x) / 10)
+/* and calculate the MiB value fraction */
+#define LONG_TEST_SIZE_FRACTION(x) (BYTE_TO_MB_x_10(x) - \
+		(LONG_TEST_SIZE_INTEGER(x) * 10))
+/* translation mask from sectors to block */
+#define SECTOR_TO_BLOCK_MASK 0x7
+
+#define TEST_OPS(test_name, upper_case_name)				\
+static int ufs_test_ ## test_name ## _show(struct seq_file *file,	\
+		void *data)						\
+{ return ufs_test_show(file, UFS_TEST_ ## upper_case_name); }		\
+static int ufs_test_ ## test_name ## _open(struct inode *inode,		\
+		struct file *file)					\
+{ return single_open(file, ufs_test_ ## test_name ## _show,		\
+		inode->i_private); }					\
+static ssize_t ufs_test_ ## test_name ## _write(struct file *file,	\
+		const char __user *buf, size_t count, loff_t *ppos)	\
+{ return ufs_test_write(file, buf, count, ppos,				\
+			UFS_TEST_ ## upper_case_name); }		\
+static const struct file_operations ufs_test_ ## test_name ## _ops = {	\
+	.open = ufs_test_ ## test_name ## _open,			\
+	.read = seq_read,						\
+	.write = ufs_test_ ## test_name ## _write,			\
+};
+
+#define add_test(utd, test_name, upper_case_name)			\
+ufs_test_add_test(utd, UFS_TEST_ ## upper_case_name, "ufs_test_"#test_name,\
+				&(ufs_test_ ## test_name ## _ops));	\
+
+enum ufs_test_testcases {
+	UFS_TEST_WRITE_READ_TEST,
+	UFS_TEST_MULTI_QUERY,
+	UFS_TEST_DATA_INTEGRITY,
+
+	UFS_TEST_LONG_SEQUENTIAL_READ,
+	UFS_TEST_LONG_SEQUENTIAL_WRITE,
+	UFS_TEST_LONG_SEQUENTIAL_MIXED,
+
+	UFS_TEST_LONG_RANDOM_READ,
+	UFS_TEST_LONG_RANDOM_WRITE,
+
+	UFS_TEST_PARALLEL_READ_AND_WRITE,
+	UFS_TEST_LUN_DEPTH,
+
+	NUM_TESTS,
+};
+
+enum ufs_test_stage {
+	DEFAULT,
+	UFS_TEST_ERROR,
+
+	UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE1,
+	UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2,
+
+	UFS_TEST_LUN_DEPTH_TEST_RUNNING,
+	UFS_TEST_LUN_DEPTH_DONE_ISSUING_REQ,
+};
+
+/* device test */
+static struct blk_dev_test_type *ufs_bdt;
+
+struct ufs_test_data {
+	/* Data structure for debugfs dentrys */
+	struct dentry **test_list;
+	/*
+	 * Data structure containing individual test information, including
+	 * self-defined specific data
+	 */
+	struct test_info test_info;
+
+	/* A wait queue for OPs to complete */
+	wait_queue_head_t wait_q;
+	/* a flag for write compleation */
+	bool queue_complete;
+	/*
+	 * To determine the number of r/w bios. When seed = 0, random is
+	 * disabled and 2 BIOs are written.
+	 */
+	unsigned int random_test_seed;
+	struct dentry *random_test_seed_dentry;
+
+	/* A counter for the number of test requests completed */
+	unsigned int completed_req_count;
+	/* Test stage */
+	enum ufs_test_stage test_stage;
+
+	/* Parameters for maintaining multiple threads */
+	int fail_threads;
+	atomic_t outstanding_threads;
+	struct completion outstanding_complete;
+
+	/* user-defined size of address space in which to perform I/O */
+	u32 sector_range;
+	/* total number of requests to be submitted in long test */
+	u32 long_test_num_reqs;
+
+	struct test_iosched *test_iosched;
+};
+
+static int ufs_test_add_test(struct ufs_test_data *utd,
+		enum ufs_test_testcases test_id, char *test_str,
+		const struct file_operations *test_fops)
+{
+	int ret = 0;
+	struct dentry *tests_root;
+
+	if (test_id >= NUM_TESTS)
+		return -EINVAL;
+
+	tests_root = utd->test_iosched->debug.debug_tests_root;
+	if (!tests_root) {
+		pr_err("%s: Failed to create debugfs root.", __func__);
+		return -EINVAL;
+	}
+
+	utd->test_list[test_id] = debugfs_create_file(test_str,
+						S_IRUGO | S_IWUGO, tests_root,
+						utd, test_fops);
+	if (!utd->test_list[test_id]) {
+		pr_err("%s: Could not create the test %s", test_str,
+				__func__);
+		ret = -ENOMEM;
+	}
+	return ret;
+}
+
+/**
+ * struct test_scenario - keeps scenario data that creates unique pattern
+ * @td: per test reference
+ * @direction: pattern initial direction
+ * @toggle_direction: every toggle_direction requests switch direction for one
+ *			request
+ * @total_req: number of request to issue
+ * @rnd_req: should request issue to random LBA with random size
+ * @run_q: the maximum number of request to hold in queue (before run_queue())
+ */
+struct test_scenario {
+	struct test_iosched *test_iosched;
+	int direction;
+	int toggle_direction;
+	int total_req;
+	bool rnd_req;
+	int run_q;
+};
+
+enum scenario_id {
+	/* scenarios for parallel read and write test */
+	SCEN_RANDOM_READ_50,
+	SCEN_RANDOM_WRITE_50,
+
+	SCEN_RANDOM_READ_32_NO_FLUSH,
+	SCEN_RANDOM_WRITE_32_NO_FLUSH,
+
+	SCEN_RANDOM_MAX,
+};
+
+static struct test_scenario test_scenario[SCEN_RANDOM_MAX] = {
+		{NULL, READ, 0, 50, true, 5}, /* SCEN_RANDOM_READ_50 */
+		{NULL, WRITE, 0, 50, true, 5}, /* SCEN_RANDOM_WRITE_50 */
+
+		/* SCEN_RANDOM_READ_32_NO_FLUSH */
+		{NULL, READ, 0, 32, true, 64},
+		/* SCEN_RANDOM_WRITE_32_NO_FLUSH */
+		{NULL, WRITE, 0, 32, true, 64},
+};
+
+static
+struct test_scenario *get_scenario(struct test_iosched *test_iosched,
+	enum scenario_id id)
+{
+	struct test_scenario *ret = &test_scenario[id];
+
+	ret->test_iosched = test_iosched;
+	return ret;
+}
+
+static char *ufs_test_get_test_case_str(int testcase)
+{
+	switch (testcase) {
+	case UFS_TEST_WRITE_READ_TEST:
+		return "UFS write read test";
+	case UFS_TEST_MULTI_QUERY:
+		return "Test multiple queries at the same time";
+	case UFS_TEST_LONG_RANDOM_READ:
+		return "UFS long random read test";
+	case UFS_TEST_LONG_RANDOM_WRITE:
+		return "UFS long random write test";
+	case UFS_TEST_DATA_INTEGRITY:
+		return "UFS random data integrity test";
+	case UFS_TEST_LONG_SEQUENTIAL_READ:
+		return "UFS long sequential read test";
+	case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+		return "UFS long sequential write test";
+	case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+		return "UFS long sequential mixed test";
+	case UFS_TEST_PARALLEL_READ_AND_WRITE:
+		return "UFS parallel read and write test";
+	case UFS_TEST_LUN_DEPTH:
+		return "UFS LUN depth test";
+	}
+	return "Unknown test";
+}
+
+static unsigned int ufs_test_pseudo_random_seed(unsigned int *seed_number,
+		unsigned int min_val, unsigned int max_val)
+{
+	int ret = 0;
+
+	if (!seed_number)
+		return 0;
+
+	*seed_number = ((unsigned int) (((unsigned long) *seed_number
+			* (unsigned long) LARGE_PRIME_1) + LARGE_PRIME_2));
+	ret = (unsigned int) ((*seed_number) % max_val);
+
+	return (ret > min_val ? ret : min_val);
+}
+
+/**
+ * pseudo_rnd_sector_and_size - provides random sector and size for test request
+ * @seed: random seed
+ * @min_start_sector: minimum lba
+ * @start_sector: pointer for output start sector
+ * @num_of_bios: pointer for output number of bios
+ *
+ * Note that for UFS sector number has to be aligned with block size. Since
+ * scsi will send the block number as the LBA.
+ */
+static void pseudo_rnd_sector_and_size(struct ufs_test_data *utd,
+					unsigned int *start_sector,
+					unsigned int *num_of_bios)
+{
+	struct test_iosched *tios = utd->test_iosched;
+	u32 min_start_sector = tios->start_sector;
+	unsigned int max_sec = min_start_sector + utd->sector_range;
+
+	do {
+		*start_sector = ufs_test_pseudo_random_seed(
+			&utd->random_test_seed, 1, max_sec);
+		*num_of_bios = ufs_test_pseudo_random_seed(
+			&utd->random_test_seed, 1, TEST_MAX_BIOS_PER_REQ);
+		if (!(*num_of_bios))
+			*num_of_bios = 1;
+	} while ((*start_sector < min_start_sector) ||
+		 (*start_sector + (*num_of_bios * TEST_BIO_SIZE)) > max_sec);
+	/*
+	 * The test-iosched API is working with sectors 512b, while UFS LBA
+	 * is in blocks (4096). Thus the last 3 bits has to be cleared.
+	 */
+	*start_sector &= ~SECTOR_TO_BLOCK_MASK;
+}
+
+static void ufs_test_pseudo_rnd_size(unsigned int *seed,
+				unsigned int *num_of_bios)
+{
+	*num_of_bios = ufs_test_pseudo_random_seed(seed, 1,
+						TEST_MAX_BIOS_PER_REQ);
+	if (!(*num_of_bios))
+		*num_of_bios = DEFAULT_NUM_OF_BIOS;
+}
+
+static inline int ufs_test_pm_runtime_cfg_sync(struct test_iosched *tios,
+	bool enable)
+{
+	struct scsi_device *sdev;
+	struct ufs_hba *hba;
+	int ret;
+
+	BUG_ON(!tios || !tios->req_q || !tios->req_q->queuedata);
+	sdev = (struct scsi_device *)tios->req_q->queuedata;
+	BUG_ON(!sdev->host);
+	hba = shost_priv(sdev->host);
+	BUG_ON(!hba);
+
+	if (enable) {
+		ret = pm_runtime_get_sync(hba->dev);
+		/* Positive non-zero return values are not errors */
+		if (ret < 0) {
+			pr_err("%s: pm_runtime_get_sync failed, ret=%d\n",
+				__func__, ret);
+			return ret;
+		}
+		return 0;
+	}
+	pm_runtime_put_sync(hba->dev);
+	return 0;
+}
+
+static int ufs_test_show(struct seq_file *file, int test_case)
+{
+	char *test_description;
+
+	switch (test_case) {
+	case UFS_TEST_WRITE_READ_TEST:
+		test_description = "\nufs_write_read_test\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test write once a random block and than reads it to "
+		 "verify its content. Used to debug first time transactions.\n";
+		break;
+	case UFS_TEST_MULTI_QUERY:
+		test_description = "Test multiple queries at the same time.\n";
+		break;
+	case UFS_TEST_DATA_INTEGRITY:
+		test_description = "\nufs_data_integrity_test\n"
+		"=========\n"
+		 "Description:\n"
+		 "This test writes 118 requests of size 4KB to randomly chosen LBAs.\n"
+		 "The test then reads from these LBAs and checks that the\n"
+		 "correct buffer has been read.\n";
+		break;
+	case UFS_TEST_LONG_SEQUENTIAL_READ:
+		test_description = "\nufs_long_sequential_read_test\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test runs the following scenarios\n"
+		 "- Long Sequential Read Test: this test measures read "
+		 "throughput at the driver level by sequentially reading many "
+		 "large requests.\n";
+		break;
+	case UFS_TEST_LONG_RANDOM_READ:
+		test_description = "\nufs_long_random_read_test\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test runs the following scenarios\n"
+		 "- Long Random Read Test: this test measures read "
+		 "IOPS at the driver level by reading many 4KB requests"
+		 "with random LBAs\n";
+		break;
+	case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+		test_description =  "\nufs_long_sequential_write_test\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test runs the following scenarios\n"
+		 "- Long Sequential Write Test: this test measures write "
+		 "throughput at the driver level by sequentially writing many "
+		 "large requests\n";
+		break;
+	case UFS_TEST_LONG_RANDOM_WRITE:
+		test_description = "\nufs_long_random_write_test\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test runs the following scenarios\n"
+		 "- Long Random Write Test: this test measures write "
+		 "IOPS at the driver level by writing many 4KB requests"
+		 "with random LBAs\n";
+		break;
+	case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+		test_description = "\nufs_long_sequential_mixed_test_read\n"
+		 "=========\n"
+		 "Description:\n"
+		 "The test will verify correctness of sequential data pattern "
+		 "written to the device while new data (with same pattern) is "
+		 "written simultaneously.\n"
+		 "First this test will run a long sequential write scenario."
+		 "This first stage will write the pattern that will be read "
+		 "later. Second, sequential read requests will read and "
+		 "compare the same data. The second stage reads, will issue in "
+		 "Parallel to write requests with the same LBA and size.\n"
+		 "NOTE: The test requires a long timeout.\n";
+		break;
+	case UFS_TEST_PARALLEL_READ_AND_WRITE:
+		test_description = "\nufs_test_parallel_read_and_write\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test initiate two threads. Each thread is issuing "
+		 "multiple random requests. One thread will issue only read "
+		 "requests, while the other will only issue write requests.\n";
+		break;
+	case UFS_TEST_LUN_DEPTH:
+		test_description = "\nufs_test_lun_depth\n"
+		 "=========\n"
+		 "Description:\n"
+		 "This test is trying to stress the edge cases of the UFS "
+		 "device queue. This queue has two such edges, the total queue "
+		 "depth and the command per LU. To test those edges properly, "
+		 "two deviations from the edge in addition to the edge are "
+		 "tested as well. One deviation will be fixed (1), and the "
+		 "second will be picked randomly.\n"
+		 "The test will fill a request queue with random read "
+		 "requests. The amount of request will vary each iteration and "
+		 "will be either the one of the edges or the sum of this edge "
+		 "with one deviations.\n"
+		 "The test will test for each iteration once only reads and "
+		 "once only writes.\n";
+		break;
+	default:
+		test_description = "Unknown test";
+	}
+
+	seq_puts(file, test_description);
+	return 0;
+}
+
+static struct gendisk *ufs_test_get_rq_disk(struct test_iosched *test_iosched)
+{
+	struct request_queue *req_q = test_iosched->req_q;
+	struct scsi_device *sd;
+
+	if (!req_q) {
+		pr_info("%s: Could not fetch request_queue", __func__);
+		goto exit;
+	}
+
+	sd = (struct scsi_device *)req_q->queuedata;
+	if (!sd) {
+		pr_info("%s: req_q is missing required queuedata", __func__);
+		goto exit;
+	}
+
+	return scsi_gendisk_get_from_dev(&sd->sdev_gendev);
+
+exit:
+	return NULL;
+}
+
+static int ufs_test_put_gendisk(struct test_iosched *test_iosched)
+{
+	struct request_queue *req_q = test_iosched->req_q;
+	struct scsi_device *sd;
+	int ret = 0;
+
+	if (!req_q) {
+		pr_info("%s: Could not fetch request_queue", __func__);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	sd = (struct scsi_device *)req_q->queuedata;
+	if (!sd) {
+		pr_info("%s: req_q is missing required queuedata", __func__);
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	scsi_gendisk_put(&sd->sdev_gendev);
+
+exit:
+	return ret;
+}
+
+static int ufs_test_prepare(struct test_iosched *tios)
+{
+	return ufs_test_pm_runtime_cfg_sync(tios, true);
+}
+
+static int ufs_test_post(struct test_iosched *tios)
+{
+	int ret;
+
+	ret = ufs_test_pm_runtime_cfg_sync(tios, false);
+	if (!ret)
+		ret = ufs_test_put_gendisk(tios);
+
+	return ret;
+}
+
+static int ufs_test_check_result(struct test_iosched *test_iosched)
+{
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	if (utd->test_stage == UFS_TEST_ERROR) {
+		pr_err("%s: An error occurred during the test.", __func__);
+		return TEST_FAILED;
+	}
+
+	if (utd->fail_threads != 0) {
+		pr_err("%s: About %d threads failed during execution.",
+				__func__, utd->fail_threads);
+		return utd->fail_threads;
+	}
+
+	return 0;
+}
+
+static bool ufs_write_read_completion(struct test_iosched *test_iosched)
+{
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	if (!utd->queue_complete) {
+		utd->queue_complete = true;
+		wake_up(&utd->wait_q);
+		return false;
+	}
+	return true;
+}
+
+static int ufs_test_run_write_read_test(struct test_iosched *test_iosched)
+{
+	int ret = 0;
+	unsigned int start_sec;
+	unsigned int num_bios;
+	struct request_queue *q = test_iosched->req_q;
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	start_sec = test_iosched->start_sector + sizeof(int) * BIO_U32_SIZE
+			* test_iosched->num_of_write_bios;
+	if (utd->random_test_seed != 0)
+		ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
+	else
+		num_bios = DEFAULT_NUM_OF_BIOS;
+
+	/* Adding a write request */
+	pr_info("%s: Adding a write request with %d bios to Q, req_id=%d",
+		__func__, num_bios, test_iosched->wr_rd_next_req_id);
+
+	utd->queue_complete = false;
+	ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, WRITE, start_sec,
+		num_bios, TEST_PATTERN_5A, NULL);
+	if (ret) {
+		pr_err("%s: failed to add a write request", __func__);
+		return ret;
+	}
+
+	/* waiting for the write request to finish */
+	blk_post_runtime_resume(q, 0);
+	wait_event(utd->wait_q, utd->queue_complete);
+
+	/* Adding a read request*/
+	pr_info("%s: Adding a read request to Q", __func__);
+
+	ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, READ, start_sec,
+			num_bios, TEST_PATTERN_5A, NULL);
+	if (ret) {
+		pr_err("%s: failed to add a read request", __func__);
+		return ret;
+	}
+
+	blk_post_runtime_resume(q, 0);
+	return ret;
+}
+
+static void ufs_test_thread_complete(struct ufs_test_data *utd, int result)
+{
+	if (result)
+		utd->fail_threads++;
+	atomic_dec(&utd->outstanding_threads);
+	if (!atomic_read(&utd->outstanding_threads))
+		complete(&utd->outstanding_complete);
+}
+
+static void ufs_test_random_async_query(void *data, async_cookie_t cookie)
+{
+	int op;
+	struct test_iosched *test_iosched = data;
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+	struct scsi_device *sdev;
+	struct ufs_hba *hba;
+	int buff_len = QUERY_DESC_UNIT_MAX_SIZE;
+	u8 desc_buf[QUERY_DESC_UNIT_MAX_SIZE];
+	bool flag;
+	u32 att;
+	int ret = 0;
+
+	sdev = (struct scsi_device *)test_iosched->req_q->queuedata;
+	BUG_ON(!sdev->host);
+	hba = shost_priv(sdev->host);
+	BUG_ON(!hba);
+
+	op = ufs_test_pseudo_random_seed(&utd->random_test_seed, 1, 8);
+	/*
+	 * When write data (descriptor/attribute/flag) queries are issued,
+	 * regular work and functionality must be kept. The data is read
+	 * first to make sure the original state is restored.
+	 */
+	switch (op) {
+	case UPIU_QUERY_OPCODE_READ_DESC:
+	case UPIU_QUERY_OPCODE_WRITE_DESC:
+		ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+				QUERY_DESC_IDN_UNIT, 0, 0, desc_buf, &buff_len);
+		break;
+	case UPIU_QUERY_OPCODE_WRITE_ATTR:
+	case UPIU_QUERY_OPCODE_READ_ATTR:
+		ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
+				QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &att);
+		if (ret || op == UPIU_QUERY_OPCODE_READ_ATTR)
+			break;
+
+		ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+				QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &att);
+		break;
+	case UPIU_QUERY_OPCODE_READ_FLAG:
+	case UPIU_QUERY_OPCODE_SET_FLAG:
+	case UPIU_QUERY_OPCODE_CLEAR_FLAG:
+	case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
+		/* We read the QUERY_FLAG_IDN_BKOPS_EN and restore it later */
+		ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+				QUERY_FLAG_IDN_BKOPS_EN, &flag);
+		if (ret || op == UPIU_QUERY_OPCODE_READ_FLAG)
+			break;
+
+		/* After changing the flag we have to change it back */
+		ret = ufshcd_query_flag(hba, op, QUERY_FLAG_IDN_BKOPS_EN, NULL);
+		if ((op == UPIU_QUERY_OPCODE_SET_FLAG && flag) ||
+				(op == UPIU_QUERY_OPCODE_CLEAR_FLAG && !flag))
+			/* No need to change it back */
+			break;
+
+		if (flag)
+			ret |= ufshcd_query_flag(hba,
+				UPIU_QUERY_OPCODE_SET_FLAG,
+				QUERY_FLAG_IDN_BKOPS_EN, NULL);
+		else
+			ret |= ufshcd_query_flag(hba,
+				UPIU_QUERY_OPCODE_CLEAR_FLAG,
+				QUERY_FLAG_IDN_BKOPS_EN, NULL);
+		break;
+	default:
+		pr_err("%s: Random error unknown op %d", __func__, op);
+	}
+
+	if (ret)
+		pr_err("%s: Query thread with op %d, failed with err %d.",
+			__func__, op, ret);
+
+	ufs_test_thread_complete(utd, ret);
+}
+
+static void scenario_free_end_io_fn(struct request *rq, int err)
+{
+	struct test_request *test_rq;
+	struct test_iosched *test_iosched = rq->q->elevator->elevator_data;
+	unsigned long flags;
+
+	BUG_ON(!rq);
+	test_rq = (struct test_request *)rq->elv.priv[0];
+	BUG_ON(!test_rq);
+
+	spin_lock_irqsave(&test_iosched->lock, flags);
+	test_iosched->dispatched_count--;
+	list_del_init(&test_rq->queuelist);
+	__blk_put_request(test_iosched->req_q, test_rq->rq);
+	spin_unlock_irqrestore(&test_iosched->lock, flags);
+
+	test_iosched_free_test_req_data_buffer(test_rq);
+	kfree(test_rq);
+
+	if (err)
+		pr_err("%s: request %d completed, err=%d", __func__,
+			test_rq->req_id, err);
+
+	check_test_completion(test_iosched);
+}
+
+static bool ufs_test_multi_thread_completion(struct test_iosched *test_iosched)
+{
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+	return atomic_read(&utd->outstanding_threads) <= 0 &&
+			utd->test_stage != UFS_TEST_LUN_DEPTH_TEST_RUNNING;
+}
+
+static bool long_rand_test_check_completion(struct test_iosched *test_iosched)
+{
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	if (utd->completed_req_count > utd->long_test_num_reqs) {
+		pr_err("%s: Error: Completed more requests than total test requests.\nTerminating test."
+		       , __func__);
+		return true;
+	}
+	return utd->completed_req_count == utd->long_test_num_reqs;
+}
+
+static bool long_seq_test_check_completion(struct test_iosched *test_iosched)
+{
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	if (utd->completed_req_count > utd->long_test_num_reqs) {
+		pr_err("%s: Error: Completed more requests than total test requests"
+		       , __func__);
+		pr_err("%s: Terminating test.", __func__);
+		return true;
+	}
+	return utd->completed_req_count == utd->long_test_num_reqs;
+}
+
+/**
+ * ufs_test_toggle_direction() - decides whether toggling is
+ * needed. Toggle factor zero means no toggling.
+ *
+ * toggle_factor - iteration to toggle = toggling frequency
+ * iteration - the current request iteration
+ *
+ * Returns nonzero if toggling is needed, and 0 when toggling is
+ * not needed.
+ */
+static inline int ufs_test_toggle_direction(int toggle_factor, int iteration)
+{
+	if (!toggle_factor)
+		return 0;
+
+	return !(iteration % toggle_factor);
+}
+
+static void ufs_test_run_scenario(void *data, async_cookie_t cookie)
+{
+	struct test_scenario *ts = (struct test_scenario *)data;
+	struct test_iosched *test_iosched = ts->test_iosched;
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+	int start_sec;
+	int i;
+	int ret = 0;
+
+	BUG_ON(!ts);
+	start_sec = ts->test_iosched->start_sector;
+
+	for (i = 0; i < ts->total_req; i++) {
+		int num_bios = DEFAULT_NUM_OF_BIOS;
+		int direction;
+
+		if (ufs_test_toggle_direction(ts->toggle_direction, i))
+			direction = (ts->direction == WRITE) ? READ : WRITE;
+		else
+			direction = ts->direction;
+
+		/* use randomly generated requests */
+		if (ts->rnd_req && utd->random_test_seed != 0)
+			pseudo_rnd_sector_and_size(utd, &start_sec, &num_bios);
+
+		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
+			direction, start_sec, num_bios, TEST_PATTERN_5A,
+			scenario_free_end_io_fn);
+		if (ret) {
+			pr_err("%s: failed to create request" , __func__);
+			break;
+		}
+
+		/*
+		 * We want to run the queue every run_q requests, or,
+		 * when the requests pool is exhausted
+		 */
+
+		if (test_iosched->dispatched_count >= QUEUE_MAX_REQUESTS ||
+				(ts->run_q && !(i % ts->run_q)))
+			blk_post_runtime_resume(test_iosched->req_q, 0);
+	}
+
+	blk_post_runtime_resume(test_iosched->req_q, 0);
+	ufs_test_thread_complete(utd, ret);
+}
+
+static int ufs_test_run_multi_query_test(struct test_iosched *test_iosched)
+{
+	int i;
+	struct ufs_test_data *utd;
+	struct scsi_device *sdev;
+	struct ufs_hba *hba;
+
+	BUG_ON(!test_iosched || !test_iosched->req_q ||
+		!test_iosched->req_q->queuedata);
+	sdev = (struct scsi_device *)test_iosched->req_q->queuedata;
+	BUG_ON(!sdev->host);
+	hba = shost_priv(sdev->host);
+	BUG_ON(!hba);
+
+	utd = test_iosched->blk_dev_test_data;
+	atomic_set(&utd->outstanding_threads, 0);
+	utd->fail_threads = 0;
+	init_completion(&utd->outstanding_complete);
+	for (i = 0; i < MAX_PARALLEL_QUERIES; ++i) {
+		atomic_inc(&utd->outstanding_threads);
+		async_schedule(ufs_test_random_async_query, test_iosched);
+	}
+
+	if (!wait_for_completion_timeout(&utd->outstanding_complete,
+			THREADS_COMPLETION_TIMOUT)) {
+		pr_err("%s: Multi-query test timed-out %d threads left",
+			__func__, atomic_read(&utd->outstanding_threads));
+	}
+	test_iosched_mark_test_completion(test_iosched);
+	return 0;
+}
+
+static int ufs_test_run_parallel_read_and_write_test(
+	struct test_iosched *test_iosched)
+{
+	struct test_scenario *read_data, *write_data;
+	int i;
+	bool changed_seed = false;
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	read_data = get_scenario(test_iosched, SCEN_RANDOM_READ_50);
+	write_data = get_scenario(test_iosched, SCEN_RANDOM_WRITE_50);
+
+	/* allow randomness even if user forgot */
+	if (utd->random_test_seed <= 0) {
+		changed_seed = true;
+		utd->random_test_seed = 1;
+	}
+
+	atomic_set(&utd->outstanding_threads, 0);
+	utd->fail_threads = 0;
+	init_completion(&utd->outstanding_complete);
+
+	for (i = 0; i < (RANDOM_REQUEST_THREADS / 2); i++) {
+		async_schedule(ufs_test_run_scenario, read_data);
+		async_schedule(ufs_test_run_scenario, write_data);
+		atomic_add(2, &utd->outstanding_threads);
+	}
+
+	if (!wait_for_completion_timeout(&utd->outstanding_complete,
+				THREADS_COMPLETION_TIMOUT)) {
+		pr_err("%s: Multi-thread test timed-out %d threads left",
+			__func__, atomic_read(&utd->outstanding_threads));
+	}
+	check_test_completion(test_iosched);
+
+	/* clear random seed if changed */
+	if (changed_seed)
+		utd->random_test_seed = 0;
+
+	return 0;
+}
+
+static void ufs_test_run_synchronous_scenario(struct test_scenario *read_data)
+{
+	struct ufs_test_data *utd = read_data->test_iosched->blk_dev_test_data;
+	init_completion(&utd->outstanding_complete);
+	atomic_set(&utd->outstanding_threads, 1);
+	async_schedule(ufs_test_run_scenario, read_data);
+	if (!wait_for_completion_timeout(&utd->outstanding_complete,
+			THREADS_COMPLETION_TIMOUT)) {
+		pr_err("%s: Multi-thread test timed-out %d threads left",
+			__func__, atomic_read(&utd->outstanding_threads));
+	}
+}
+
+static int ufs_test_run_lun_depth_test(struct test_iosched *test_iosched)
+{
+	struct test_scenario *read_data, *write_data;
+	struct scsi_device *sdev;
+	bool changed_seed = false;
+	int i = 0, num_req[LUN_DEPTH_TEST_SIZE];
+	int lun_qdepth, nutrs, num_scenarios;
+	struct ufs_test_data *utd;
+
+	BUG_ON(!test_iosched || !test_iosched->req_q ||
+		!test_iosched->req_q->queuedata);
+	sdev = (struct scsi_device *)test_iosched->req_q->queuedata;
+	lun_qdepth = sdev->max_queue_depth;
+	nutrs = sdev->host->can_queue;
+	utd = test_iosched->blk_dev_test_data;
+
+	/* allow randomness even if user forgot */
+	if (utd->random_test_seed <= 0) {
+		changed_seed = true;
+		utd->random_test_seed = 1;
+	}
+
+	/* initialize the number of request for each iteration */
+	num_req[i++] = ufs_test_pseudo_random_seed(
+			&utd->random_test_seed, 1, lun_qdepth - 2);
+	num_req[i++] = lun_qdepth - 1;
+	num_req[i++] = lun_qdepth;
+	num_req[i++] = lun_qdepth + 1;
+	/* if (nutrs-lun_qdepth-2 <= 0), do not run this scenario */
+	if (nutrs - lun_qdepth - 2 > 0)
+		num_req[i++] = lun_qdepth + 1 + ufs_test_pseudo_random_seed(
+			&utd->random_test_seed, 1, nutrs - lun_qdepth - 2);
+
+	/* if nutrs == lun_qdepth, do not run these three scenarios */
+	if (nutrs != lun_qdepth) {
+		num_req[i++] = nutrs - 1;
+		num_req[i++] = nutrs;
+		num_req[i++] = nutrs + 1;
+	}
+
+	/* a random number up to 10, not to cause overflow or timeout */
+	num_req[i++] = nutrs + 1 + ufs_test_pseudo_random_seed(
+			&utd->random_test_seed, 1, 10);
+
+	num_scenarios = i;
+	utd->test_stage = UFS_TEST_LUN_DEPTH_TEST_RUNNING;
+	utd->fail_threads = 0;
+	read_data = get_scenario(test_iosched, SCEN_RANDOM_READ_32_NO_FLUSH);
+	write_data = get_scenario(test_iosched, SCEN_RANDOM_WRITE_32_NO_FLUSH);
+
+	for (i = 0; i < num_scenarios; i++) {
+		int reqs = num_req[i];
+
+		read_data->total_req = reqs;
+		write_data->total_req = reqs;
+
+		ufs_test_run_synchronous_scenario(read_data);
+		ufs_test_run_synchronous_scenario(write_data);
+	}
+
+	utd->test_stage = UFS_TEST_LUN_DEPTH_DONE_ISSUING_REQ;
+	check_test_completion(test_iosched);
+
+	/* clear random seed if changed */
+	if (changed_seed)
+		utd->random_test_seed = 0;
+
+	return 0;
+}
+
+static void long_test_free_end_io_fn(struct request *rq, int err)
+{
+	struct test_request *test_rq;
+	struct test_iosched *test_iosched = rq->q->elevator->elevator_data;
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+	unsigned long flags;
+
+	if (!rq) {
+		pr_err("%s: error: NULL request", __func__);
+		return;
+	}
+
+	test_rq = (struct test_request *)rq->elv.priv[0];
+
+	BUG_ON(!test_rq);
+
+	spin_lock_irqsave(&test_iosched->lock, flags);
+	test_iosched->dispatched_count--;
+	list_del_init(&test_rq->queuelist);
+	__blk_put_request(test_iosched->req_q, test_rq->rq);
+	spin_unlock_irqrestore(&test_iosched->lock, flags);
+
+	if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2 &&
+			rq_data_dir(rq) == READ &&
+			compare_buffer_to_pattern(test_rq)) {
+		/* if the pattern does not match */
+		pr_err("%s: read pattern not as expected", __func__);
+		utd->test_stage = UFS_TEST_ERROR;
+		check_test_completion(test_iosched);
+		return;
+	}
+
+	test_iosched_free_test_req_data_buffer(test_rq);
+	kfree(test_rq);
+	utd->completed_req_count++;
+
+	if (err)
+		pr_err("%s: request %d completed, err=%d", __func__,
+			test_rq->req_id, err);
+
+	check_test_completion(test_iosched);
+}
+
+/**
+ * run_long_test - main function for long sequential test
+ * @td - test specific data
+ *
+ * This function is used to fill up (and keep full) the test queue with
+ * requests. There are two scenarios this function works with:
+ * 1. Only read/write (STAGE_1 or no stage)
+ * 2. Simultaneous read and write to the same LBAs (STAGE_2)
+ */
+static int run_long_test(struct test_iosched *test_iosched)
+{
+	int ret = 0;
+	int direction, num_bios_per_request;
+	static unsigned int inserted_requests;
+	u32 sector, seed, num_bios, seq_sector_delta;
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	BUG_ON(!test_iosched);
+	sector = test_iosched->start_sector;
+	if (test_iosched->sector_range)
+		utd->sector_range = test_iosched->sector_range;
+	else
+		utd->sector_range = TEST_DEFAULT_SECTOR_RANGE;
+
+	if (utd->test_stage != UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
+		test_iosched->test_count = 0;
+		utd->completed_req_count = 0;
+		inserted_requests = 0;
+	}
+
+	/* Set test parameters */
+	switch (test_iosched->test_info.testcase) {
+	case  UFS_TEST_LONG_RANDOM_READ:
+		num_bios_per_request = 1;
+		utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
+			(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
+					num_bios_per_request);
+		direction = READ;
+		break;
+	case  UFS_TEST_LONG_RANDOM_WRITE:
+		num_bios_per_request = 1;
+		utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
+			(LONG_RAND_TEST_REQ_RATIO * TEST_BIO_SIZE *
+					num_bios_per_request);
+		direction = WRITE;
+		break;
+	case UFS_TEST_LONG_SEQUENTIAL_READ:
+		num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
+		utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
+			(num_bios_per_request * TEST_BIO_SIZE);
+		direction = READ;
+		break;
+	case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+	case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+		num_bios_per_request = TEST_MAX_BIOS_PER_REQ;
+		utd->long_test_num_reqs = (utd->sector_range * SECTOR_SIZE) /
+			(num_bios_per_request * TEST_BIO_SIZE);
+	default:
+		direction = WRITE;
+	}
+
+	seq_sector_delta = num_bios_per_request * (TEST_BIO_SIZE / SECTOR_SIZE);
+
+	seed = utd->random_test_seed ? utd->random_test_seed : MAGIC_SEED;
+
+	pr_info("%s: Adding %d requests, first req_id=%d", __func__,
+	     utd->long_test_num_reqs, test_iosched->wr_rd_next_req_id);
+
+	do {
+		/*
+		* since our requests come from a pool containing 128
+		* requests, we don't want to exhaust this quantity,
+		* therefore we add up to QUEUE_MAX_REQUESTS (which
+		* includes a safety margin) and then call the block layer
+		* to fetch them
+		*/
+		if (test_iosched->test_count >= QUEUE_MAX_REQUESTS) {
+			blk_post_runtime_resume(test_iosched->req_q, 0);
+			continue;
+		}
+
+		switch (test_iosched->test_info.testcase) {
+		case UFS_TEST_LONG_SEQUENTIAL_READ:
+		case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+		case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+			/* don't need to increment on the first iteration */
+			if (inserted_requests)
+				sector += seq_sector_delta;
+			break;
+		case  UFS_TEST_LONG_RANDOM_READ:
+		case  UFS_TEST_LONG_RANDOM_WRITE:
+			pseudo_rnd_sector_and_size(utd, &sector, &num_bios);
+		default:
+			break;
+		}
+
+		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
+			direction, sector, num_bios_per_request,
+			TEST_PATTERN_5A, long_test_free_end_io_fn);
+		if (ret) {
+			pr_err("%s: failed to create request" , __func__);
+			break;
+		}
+		inserted_requests++;
+		if (utd->test_stage == UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2) {
+			ret = test_iosched_add_wr_rd_test_req(test_iosched, 0,
+				READ, sector, num_bios_per_request,
+				TEST_PATTERN_5A, long_test_free_end_io_fn);
+			if (ret) {
+				pr_err("%s: failed to create request" ,
+						__func__);
+				break;
+			}
+			inserted_requests++;
+		}
+
+	} while (inserted_requests < utd->long_test_num_reqs);
+
+	/* in this case the queue will not run in the above loop */
+	if (utd->long_test_num_reqs < QUEUE_MAX_REQUESTS)
+		blk_post_runtime_resume(test_iosched->req_q, 0);
+
+	return ret;
+}
+
+static int run_mixed_long_seq_test(struct test_iosched *test_iosched)
+{
+	int ret;
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	utd->test_stage = UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE1;
+	ret = run_long_test(test_iosched);
+	if (ret)
+		goto out;
+
+	pr_info("%s: First write iteration completed.", __func__);
+	pr_info("%s: Starting mixed write and reads sequence.", __func__);
+	utd->test_stage = UFS_TEST_LONG_SEQUENTIAL_MIXED_STAGE2;
+	ret = run_long_test(test_iosched);
+out:
+	return ret;
+}
+
+static int long_rand_test_calc_iops(struct test_iosched *test_iosched)
+{
+	unsigned long mtime, num_ios, iops;
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	mtime = ktime_to_ms(utd->test_info.test_duration);
+	num_ios = utd->completed_req_count;
+
+	pr_info("%s: time is %lu msec, IOS count is %lu", __func__, mtime,
+				num_ios);
+
+	/* preserve some precision */
+	num_ios *= 1000;
+	/* calculate those iops */
+	iops = num_ios / mtime;
+
+	pr_info("%s: IOPS: %lu IOP/sec\n", __func__, iops);
+
+	return ufs_test_post(test_iosched);
+}
+
+static int long_seq_test_calc_throughput(struct test_iosched *test_iosched)
+{
+	unsigned long fraction, integer;
+	unsigned long mtime, byte_count;
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	mtime = ktime_to_ms(utd->test_info.test_duration);
+	byte_count = utd->test_info.test_byte_count;
+
+	pr_info("%s: time is %lu msec, size is %lu.%lu MiB", __func__, mtime,
+				LONG_TEST_SIZE_INTEGER(byte_count),
+				LONG_TEST_SIZE_FRACTION(byte_count));
+
+	/* we first multiply in order not to lose precision */
+	mtime *= MB_MSEC_RATIO_APPROXIMATION;
+	/* divide values to get a MiB/sec integer value with one
+	   digit of precision
+	   */
+	fraction = integer = (byte_count * 10) / mtime;
+	integer /= 10;
+	/* and calculate the MiB value fraction */
+	fraction -= integer * 10;
+
+	pr_info("%s: Throughput: %lu.%lu MiB/sec\n", __func__, integer,
+				fraction);
+
+	return ufs_test_post(test_iosched);
+}
+
+static bool ufs_data_integrity_completion(struct test_iosched *test_iosched)
+{
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+	bool ret = false;
+
+	if (!test_iosched->dispatched_count) {
+		/* q is empty in this case */
+		if (!utd->queue_complete) {
+			utd->queue_complete = true;
+			wake_up(&utd->wait_q);
+		} else {
+			/* declare completion only on second time q is empty */
+			ret = true;
+		}
+	}
+
+	return ret;
+}
+
+static int ufs_test_run_data_integrity_test(struct test_iosched *test_iosched)
+{
+	int ret = 0;
+	int i, j;
+	unsigned int start_sec, num_bios, retries = NUM_UNLUCKY_RETRIES;
+	struct request_queue *q = test_iosched->req_q;
+	int sectors[QUEUE_MAX_REQUESTS] = {0};
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	start_sec = test_iosched->start_sector;
+	utd->queue_complete = false;
+
+	if (utd->random_test_seed != 0) {
+		ufs_test_pseudo_rnd_size(&utd->random_test_seed, &num_bios);
+	} else {
+		num_bios = DEFAULT_NUM_OF_BIOS;
+		utd->random_test_seed = MAGIC_SEED;
+	}
+
+	/* Adding write requests */
+	pr_info("%s: Adding %d write requests, first req_id=%d", __func__,
+		     QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id);
+
+	for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
+		/* make sure that we didn't draw the same start_sector twice */
+		while (retries--) {
+			pseudo_rnd_sector_and_size(utd, &start_sec, &num_bios);
+			sectors[i] = start_sec;
+			for (j = 0; (j < i) && (sectors[i] != sectors[j]); j++)
+				/* just increment j */;
+			if (j == i)
+				break;
+		}
+		if (!retries) {
+			pr_err("%s: too many unlucky start_sector draw retries",
+			       __func__);
+			ret = -EINVAL;
+			return ret;
+		}
+		retries = NUM_UNLUCKY_RETRIES;
+
+		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, WRITE,
+			start_sec, 1, i, long_test_free_end_io_fn);
+
+		if (ret) {
+			pr_err("%s: failed to add a write request", __func__);
+			return ret;
+		}
+	}
+
+	/* waiting for the write request to finish */
+	blk_post_runtime_resume(q, 0);
+	wait_event(utd->wait_q, utd->queue_complete);
+
+	/* Adding read requests */
+	pr_info("%s: Adding %d read requests, first req_id=%d", __func__,
+		     QUEUE_MAX_REQUESTS, test_iosched->wr_rd_next_req_id);
+
+	for (i = 0; i < QUEUE_MAX_REQUESTS; i++) {
+		ret = test_iosched_add_wr_rd_test_req(test_iosched, 0, READ,
+			sectors[i], 1, i, long_test_free_end_io_fn);
+
+		if (ret) {
+			pr_err("%s: failed to add a read request", __func__);
+			return ret;
+		}
+	}
+
+	blk_post_runtime_resume(q, 0);
+	return ret;
+}
+
+static ssize_t ufs_test_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos, int test_case)
+{
+	int ret = 0;
+	int i;
+	int number;
+	struct seq_file *seq_f = file->private_data;
+	struct ufs_test_data *utd = seq_f->private;
+
+	ret = kstrtoint_from_user(buf, count, 0, &number);
+	if (ret < 0) {
+		pr_err("%s: Error while reading test parameter value %d",
+				__func__, ret);
+		return ret;
+	}
+
+	if (number <= 0)
+		number = 1;
+
+	pr_info("%s:the test will run for %d iterations.", __func__, number);
+	memset(&utd->test_info, 0, sizeof(struct test_info));
+
+	/* Initializing test */
+	utd->test_info.data = utd;
+	utd->test_info.get_test_case_str_fn = ufs_test_get_test_case_str;
+	utd->test_info.testcase = test_case;
+	utd->test_info.get_rq_disk_fn = ufs_test_get_rq_disk;
+	utd->test_info.check_test_result_fn = ufs_test_check_result;
+	utd->test_info.post_test_fn = ufs_test_post;
+	utd->test_info.prepare_test_fn = ufs_test_prepare;
+	utd->test_stage = DEFAULT;
+
+	switch (test_case) {
+	case UFS_TEST_WRITE_READ_TEST:
+		utd->test_info.run_test_fn = ufs_test_run_write_read_test;
+		utd->test_info.check_test_completion_fn =
+				ufs_write_read_completion;
+		break;
+	case UFS_TEST_MULTI_QUERY:
+		utd->test_info.run_test_fn = ufs_test_run_multi_query_test;
+		utd->test_info.check_test_result_fn = ufs_test_check_result;
+		break;
+	case UFS_TEST_DATA_INTEGRITY:
+		utd->test_info.run_test_fn = ufs_test_run_data_integrity_test;
+		utd->test_info.check_test_completion_fn =
+			ufs_data_integrity_completion;
+		break;
+	case UFS_TEST_LONG_RANDOM_READ:
+	case UFS_TEST_LONG_RANDOM_WRITE:
+		utd->test_info.run_test_fn = run_long_test;
+		utd->test_info.post_test_fn = long_rand_test_calc_iops;
+		utd->test_info.check_test_result_fn = ufs_test_check_result;
+		utd->test_info.check_test_completion_fn =
+			long_rand_test_check_completion;
+		break;
+	case UFS_TEST_LONG_SEQUENTIAL_READ:
+	case UFS_TEST_LONG_SEQUENTIAL_WRITE:
+		utd->test_info.run_test_fn = run_long_test;
+		utd->test_info.post_test_fn = long_seq_test_calc_throughput;
+		utd->test_info.check_test_result_fn = ufs_test_check_result;
+		utd->test_info.check_test_completion_fn =
+			long_seq_test_check_completion;
+		break;
+	case UFS_TEST_LONG_SEQUENTIAL_MIXED:
+		utd->test_info.timeout_msec = LONG_SEQUENTIAL_MIXED_TIMOUT_MS;
+		utd->test_info.run_test_fn = run_mixed_long_seq_test;
+		utd->test_info.post_test_fn = long_seq_test_calc_throughput;
+		utd->test_info.check_test_result_fn = ufs_test_check_result;
+		break;
+	case UFS_TEST_PARALLEL_READ_AND_WRITE:
+		utd->test_info.run_test_fn =
+				ufs_test_run_parallel_read_and_write_test;
+		utd->test_info.check_test_completion_fn =
+				ufs_test_multi_thread_completion;
+		break;
+	case UFS_TEST_LUN_DEPTH:
+		utd->test_info.run_test_fn = ufs_test_run_lun_depth_test;
+		break;
+	default:
+		pr_err("%s: Unknown test-case: %d", __func__, test_case);
+		WARN_ON(true);
+	}
+
+	/* Running the test multiple times */
+	for (i = 0; i < number; ++i) {
+		pr_info("%s: Cycle # %d / %d", __func__, i+1, number);
+		pr_info("%s: ====================", __func__);
+
+		utd->test_info.test_byte_count = 0;
+		ret = test_iosched_start_test(utd->test_iosched,
+			&utd->test_info);
+		if (ret) {
+			pr_err("%s: Test failed, err=%d.", __func__, ret);
+			return ret;
+		}
+
+		/* Allow FS requests to be dispatched */
+		msleep(1000);
+	}
+
+	pr_info("%s: Completed all the ufs test iterations.", __func__);
+
+	return count;
+}
+
+TEST_OPS(write_read_test, WRITE_READ_TEST);
+TEST_OPS(multi_query, MULTI_QUERY);
+TEST_OPS(data_integrity, DATA_INTEGRITY);
+TEST_OPS(long_random_read, LONG_RANDOM_READ);
+TEST_OPS(long_random_write, LONG_RANDOM_WRITE);
+TEST_OPS(long_sequential_read, LONG_SEQUENTIAL_READ);
+TEST_OPS(long_sequential_write, LONG_SEQUENTIAL_WRITE);
+TEST_OPS(long_sequential_mixed, LONG_SEQUENTIAL_MIXED);
+TEST_OPS(parallel_read_and_write, PARALLEL_READ_AND_WRITE);
+TEST_OPS(lun_depth, LUN_DEPTH);
+
+static void ufs_test_debugfs_cleanup(struct test_iosched *test_iosched)
+{
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+	debugfs_remove_recursive(test_iosched->debug.debug_root);
+	kfree(utd->test_list);
+}
+
+static int ufs_test_debugfs_init(struct ufs_test_data *utd)
+{
+	struct dentry *utils_root, *tests_root;
+	int ret = 0;
+	struct test_iosched *ts = utd->test_iosched;
+
+	utils_root = ts->debug.debug_utils_root;
+	tests_root = ts->debug.debug_tests_root;
+
+	utd->test_list = kmalloc(sizeof(struct dentry *) * NUM_TESTS,
+			GFP_KERNEL);
+	if (!utd->test_list) {
+		pr_err("%s: failed to allocate tests dentrys", __func__);
+		return -ENODEV;
+	}
+
+	if (!utils_root || !tests_root) {
+		pr_err("%s: Failed to create debugfs root.", __func__);
+		ret = -EINVAL;
+		goto exit_err;
+	}
+
+	utd->random_test_seed_dentry = debugfs_create_u32("random_test_seed",
+			S_IRUGO | S_IWUGO, utils_root, &utd->random_test_seed);
+
+	if (!utd->random_test_seed_dentry) {
+		pr_err("%s: Could not create debugfs random_test_seed.",
+				__func__);
+		ret = -ENOMEM;
+		goto exit_err;
+	}
+
+	ret = add_test(utd, write_read_test, WRITE_READ_TEST);
+	if (ret)
+		goto exit_err;
+	ret = add_test(utd, data_integrity, DATA_INTEGRITY);
+	if (ret)
+		goto exit_err;
+	ret = add_test(utd, long_random_read, LONG_RANDOM_READ);
+	if (ret)
+		goto exit_err;
+	ret = add_test(utd, long_random_write, LONG_RANDOM_WRITE);
+	if (ret)
+		goto exit_err;
+	ret = add_test(utd, long_sequential_read, LONG_SEQUENTIAL_READ);
+	if (ret)
+		goto exit_err;
+	ret = add_test(utd, long_sequential_write, LONG_SEQUENTIAL_WRITE);
+	if (ret)
+		goto exit_err;
+	ret = add_test(utd, long_sequential_mixed, LONG_SEQUENTIAL_MIXED);
+	if (ret)
+		goto exit_err;
+	add_test(utd, multi_query, MULTI_QUERY);
+	if (ret)
+		goto exit_err;
+	add_test(utd, parallel_read_and_write, PARALLEL_READ_AND_WRITE);
+	if (ret)
+		goto exit_err;
+	add_test(utd, lun_depth, LUN_DEPTH);
+	if (ret)
+		goto exit_err;
+
+	goto exit;
+
+exit_err:
+	ufs_test_debugfs_cleanup(ts);
+exit:
+	return ret;
+}
+
+static int ufs_test_probe(struct test_iosched *test_iosched)
+{
+	struct ufs_test_data *utd;
+	int ret;
+
+	utd = kzalloc(sizeof(*utd), GFP_KERNEL);
+	if (!utd) {
+		pr_err("%s: failed to allocate ufs test data\n", __func__);
+		return -ENOMEM;
+	}
+
+	init_waitqueue_head(&utd->wait_q);
+	utd->test_iosched = test_iosched;
+	test_iosched->blk_dev_test_data = utd;
+
+	ret = ufs_test_debugfs_init(utd);
+	if (ret) {
+		pr_err("%s: failed to init debug-fs entries, ret=%d\n",
+			__func__, ret);
+		kfree(utd);
+	}
+
+	return ret;
+}
+
+static void ufs_test_remove(struct test_iosched *test_iosched)
+{
+	struct ufs_test_data *utd = test_iosched->blk_dev_test_data;
+
+	ufs_test_debugfs_cleanup(test_iosched);
+	test_iosched->blk_dev_test_data = NULL;
+	kfree(utd);
+}
+
+static int __init ufs_test_init(void)
+{
+	ufs_bdt = kzalloc(sizeof(*ufs_bdt), GFP_KERNEL);
+	if (!ufs_bdt)
+		return -ENOMEM;
+
+	ufs_bdt->type_prefix = UFS_TEST_BLK_DEV_TYPE_PREFIX;
+	ufs_bdt->init_fn = ufs_test_probe;
+	ufs_bdt->exit_fn = ufs_test_remove;
+	INIT_LIST_HEAD(&ufs_bdt->list);
+
+	test_iosched_register(ufs_bdt);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(ufs_test_init);
+
+static void __exit ufs_test_exit(void)
+{
+	test_iosched_unregister(ufs_bdt);
+	kfree(ufs_bdt);
+}
+module_init(ufs_test_init);
+module_exit(ufs_test_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("UFC test");
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index d15eaa4..52b546f 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -104,6 +104,7 @@
 	pm_runtime_forbid(&pdev->dev);
 	pm_runtime_get_noresume(&pdev->dev);
 	ufshcd_remove(hba);
+	ufshcd_dealloc_host(hba);
 }
 
 /**
@@ -147,6 +148,7 @@
 	err = ufshcd_init(hba, mmio_base, pdev->irq);
 	if (err) {
 		dev_err(&pdev->dev, "Initialization failed\n");
+		ufshcd_dealloc_host(hba);
 		return err;
 	}
 
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index db53f38d..de0d2f4 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -40,8 +40,6 @@
 #include "ufshcd.h"
 #include "ufshcd-pltfrm.h"
 
-#define UFSHCD_DEFAULT_LANES_PER_DIRECTION		2
-
 static int ufshcd_parse_clock_info(struct ufs_hba *hba)
 {
 	int ret = 0;
@@ -163,7 +161,7 @@
 	if (ret) {
 		dev_err(dev, "%s: unable to find %s err %d\n",
 				__func__, prop_name, ret);
-		goto out_free;
+		goto out;
 	}
 
 	vreg->min_uA = 0;
@@ -185,9 +183,6 @@
 
 	goto out;
 
-out_free:
-	devm_kfree(dev, vreg);
-	vreg = NULL;
 out:
 	if (!ret)
 		*out_vreg = vreg;
@@ -226,7 +221,65 @@
 	return err;
 }
 
-#ifdef CONFIG_PM
+static void ufshcd_parse_pm_levels(struct ufs_hba *hba)
+{
+	struct device *dev = hba->dev;
+	struct device_node *np = dev->of_node;
+
+	if (np) {
+		if (of_property_read_u32(np, "rpm-level", &hba->rpm_lvl))
+			hba->rpm_lvl = -1;
+		if (of_property_read_u32(np, "spm-level", &hba->spm_lvl))
+			hba->spm_lvl = -1;
+	}
+}
+
+static void ufshcd_parse_gear_limits(struct ufs_hba *hba)
+{
+	struct device *dev = hba->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	if (!np)
+		return;
+
+	ret = of_property_read_u32(np, "limit-tx-hs-gear",
+		&hba->limit_tx_hs_gear);
+	if (ret)
+		hba->limit_tx_hs_gear = -1;
+
+	ret = of_property_read_u32(np, "limit-rx-hs-gear",
+		&hba->limit_rx_hs_gear);
+	if (ret)
+		hba->limit_rx_hs_gear = -1;
+
+	ret = of_property_read_u32(np, "limit-tx-pwm-gear",
+		&hba->limit_tx_pwm_gear);
+	if (ret)
+		hba->limit_tx_pwm_gear = -1;
+
+	ret = of_property_read_u32(np, "limit-rx-pwm-gear",
+		&hba->limit_rx_pwm_gear);
+	if (ret)
+		hba->limit_rx_pwm_gear = -1;
+}
+
+static void ufshcd_parse_cmd_timeout(struct ufs_hba *hba)
+{
+	struct device *dev = hba->dev;
+	struct device_node *np = dev->of_node;
+	int ret;
+
+	if (!np)
+		return;
+
+	ret = of_property_read_u32(np, "scsi-cmd-timeout",
+		&hba->scsi_cmd_timeout);
+	if (ret)
+		hba->scsi_cmd_timeout = 0;
+}
+
+#ifdef CONFIG_SMP
 /**
  * ufshcd_pltfrm_suspend - suspend power management function
  * @dev: pointer to device handle
@@ -279,30 +332,15 @@
 }
 EXPORT_SYMBOL_GPL(ufshcd_pltfrm_shutdown);
 
-static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
-{
-	struct device *dev = hba->dev;
-	int ret;
-
-	ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
-		&hba->lanes_per_direction);
-	if (ret) {
-		dev_dbg(hba->dev,
-			"%s: failed to read lanes-per-direction, ret=%d\n",
-			__func__, ret);
-		hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
-	}
-}
-
 /**
  * ufshcd_pltfrm_init - probe routine of the driver
  * @pdev: pointer to Platform device handle
- * @vops: pointer to variant ops
+ * @var: pointer to variant specific data
  *
  * Returns 0 on success, non-zero value on failure
  */
 int ufshcd_pltfrm_init(struct platform_device *pdev,
-		       struct ufs_hba_variant_ops *vops)
+		       struct ufs_hba_variant *var)
 {
 	struct ufs_hba *hba;
 	void __iomem *mmio_base;
@@ -330,7 +368,7 @@
 		goto out;
 	}
 
-	hba->vops = vops;
+	hba->var = var;
 
 	err = ufshcd_parse_clock_info(hba);
 	if (err) {
@@ -345,24 +383,25 @@
 		goto dealloc_host;
 	}
 
-	pm_runtime_set_active(&pdev->dev);
-	pm_runtime_enable(&pdev->dev);
+	ufshcd_parse_pm_levels(hba);
+	ufshcd_parse_gear_limits(hba);
+	ufshcd_parse_cmd_timeout(hba);
 
-	ufshcd_init_lanes_per_dir(hba);
+	if (!dev->dma_mask)
+		dev->dma_mask = &dev->coherent_dma_mask;
 
 	err = ufshcd_init(hba, mmio_base, irq);
 	if (err) {
-		dev_err(dev, "Initialization failed\n");
-		goto out_disable_rpm;
+		dev_err(dev, "Intialization failed\n");
+		goto dealloc_host;
 	}
 
 	platform_set_drvdata(pdev, hba);
 
-	return 0;
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
 
-out_disable_rpm:
-	pm_runtime_disable(&pdev->dev);
-	pm_runtime_set_suspended(&pdev->dev);
+	return 0;
 dealloc_host:
 	ufshcd_dealloc_host(hba);
 out:
@@ -372,6 +411,6 @@
 
 MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
 MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("UFS host controller Platform bus based glue driver");
+MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.h b/drivers/scsi/ufs/ufshcd-pltfrm.h
index df64c41..6d8330b 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.h
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.h
@@ -17,7 +17,7 @@
 #include "ufshcd.h"
 
 int ufshcd_pltfrm_init(struct platform_device *pdev,
-		       struct ufs_hba_variant_ops *vops);
+		       struct ufs_hba_variant *var);
 void ufshcd_pltfrm_shutdown(struct platform_device *pdev);
 
 #ifdef CONFIG_PM
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 05c7456..f15c607 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -38,12 +38,140 @@
  */
 
 #include <linux/async.h>
+#include <scsi/ufs/ioctl.h>
 #include <linux/devfreq.h>
 #include <linux/nls.h>
 #include <linux/of.h>
+#include <linux/blkdev.h>
 #include "ufshcd.h"
+#include "ufshci.h"
 #include "ufs_quirks.h"
-#include "unipro.h"
+#include "ufs-debugfs.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/ufs.h>
+
+#ifdef CONFIG_DEBUG_FS
+
+static int ufshcd_tag_req_type(struct request *rq)
+{
+	int rq_type = TS_WRITE;
+
+	if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
+		rq_type = TS_NOT_SUPPORTED;
+	else if (rq->cmd_flags & REQ_PREFLUSH)
+		rq_type = TS_FLUSH;
+	else if (rq_data_dir(rq) == READ)
+		rq_type = (rq->cmd_flags & REQ_URGENT) ?
+			TS_URGENT_READ : TS_READ;
+	else if (rq->cmd_flags & REQ_URGENT)
+		rq_type = TS_URGENT_WRITE;
+
+	return rq_type;
+}
+
+static void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
+{
+	ufsdbg_set_err_state(hba);
+	if (type < UFS_ERR_MAX)
+		hba->ufs_stats.err_stats[type]++;
+}
+
+static void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
+{
+	struct request *rq =
+		hba->lrb[tag].cmd ? hba->lrb[tag].cmd->request : NULL;
+	u64 **tag_stats = hba->ufs_stats.tag_stats;
+	int rq_type;
+
+	if (!hba->ufs_stats.enabled)
+		return;
+
+	tag_stats[tag][TS_TAG]++;
+	if (!rq || !(rq->cmd_type & REQ_TYPE_FS))
+		return;
+
+	WARN_ON(hba->ufs_stats.q_depth > hba->nutrs);
+	rq_type = ufshcd_tag_req_type(rq);
+	if (!(rq_type < 0 || rq_type > TS_NUM_STATS))
+		tag_stats[hba->ufs_stats.q_depth++][rq_type]++;
+}
+
+static void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
+		struct scsi_cmnd *cmd)
+{
+	struct request *rq = cmd ? cmd->request : NULL;
+
+	if (rq && rq->cmd_type & REQ_TYPE_FS)
+		hba->ufs_stats.q_depth--;
+}
+
+static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+	int rq_type;
+	struct request *rq = lrbp->cmd ? lrbp->cmd->request : NULL;
+	s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
+		lrbp->issue_time_stamp);
+
+	/* update general request statistics */
+	if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
+		hba->ufs_stats.req_stats[TS_TAG].min = delta;
+	hba->ufs_stats.req_stats[TS_TAG].count++;
+	hba->ufs_stats.req_stats[TS_TAG].sum += delta;
+	if (delta > hba->ufs_stats.req_stats[TS_TAG].max)
+		hba->ufs_stats.req_stats[TS_TAG].max = delta;
+	if (delta < hba->ufs_stats.req_stats[TS_TAG].min)
+			hba->ufs_stats.req_stats[TS_TAG].min = delta;
+
+	rq_type = ufshcd_tag_req_type(rq);
+	if (rq_type == TS_NOT_SUPPORTED)
+		return;
+
+	/* update request type specific statistics */
+	if (hba->ufs_stats.req_stats[rq_type].count == 0)
+		hba->ufs_stats.req_stats[rq_type].min = delta;
+	hba->ufs_stats.req_stats[rq_type].count++;
+	hba->ufs_stats.req_stats[rq_type].sum += delta;
+	if (delta > hba->ufs_stats.req_stats[rq_type].max)
+		hba->ufs_stats.req_stats[rq_type].max = delta;
+	if (delta < hba->ufs_stats.req_stats[rq_type].min)
+			hba->ufs_stats.req_stats[rq_type].min = delta;
+}
+
+static void
+ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
+{
+	if (opcode < UPIU_QUERY_OPCODE_MAX && idn < MAX_QUERY_IDN)
+		hba->ufs_stats.query_stats_arr[opcode][idn]++;
+}
+
+#else
+static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
+{
+}
+
+static inline void ufshcd_update_tag_stats_completion(struct ufs_hba *hba,
+		struct scsi_cmnd *cmd)
+{
+}
+
+static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
+{
+}
+
+static inline
+void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+{
+}
+
+static inline
+void ufshcd_update_query_stats(struct ufs_hba *hba,
+			       enum query_opcode opcode, u8 idn)
+{
+}
+#endif
+
+#define UFSHCD_REQ_SENSE_SIZE	18
 
 #define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
 				 UTP_TASK_REQ_COMPL |\
@@ -57,15 +185,9 @@
 #define NOP_OUT_TIMEOUT    30 /* msecs */
 
 /* Query request retries */
-#define QUERY_REQ_RETRIES 10
+#define QUERY_REQ_RETRIES 3
 /* Query request timeout */
-#define QUERY_REQ_TIMEOUT 30 /* msec */
-/*
- * Query request timeout for fDeviceInit flag
- * fDeviceInit query response time for some devices is too large that default
- * QUERY_REQ_TIMEOUT may not be enough for such devices.
- */
-#define QUERY_FDEVICEINIT_REQ_TIMEOUT 600 /* msec */
+#define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */
 
 /* Task management command timeout */
 #define TM_CMD_TIMEOUT	100 /* msecs */
@@ -88,6 +210,17 @@
 /* Interrupt aggregation default timeout, unit: 40us */
 #define INT_AGGR_DEF_TO	0x02
 
+/* default value of auto suspend is 3 seconds */
+#define UFSHCD_AUTO_SUSPEND_DELAY_MS 3000 /* millisecs */
+
+#define UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE	10
+#define UFSHCD_CLK_GATING_DELAY_MS_PERF		50
+
+/* IOCTL opcode for command - ufs set device read only */
+#define UFS_IOCTL_BLKROSET      BLKROSET
+
+#define UFSHCD_DEFAULT_LANES_PER_DIRECTION		2
+
 #define ufshcd_toggle_vreg(_dev, _vreg, _on)				\
 	({                                                              \
 		int _ret;                                               \
@@ -98,6 +231,9 @@
 		_ret;                                                   \
 	})
 
+#define ufshcd_hex_dump(prefix_str, buf, len) \
+print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
+
 static u32 ufs_query_desc_max_size[] = {
 	QUERY_DESC_DEVICE_MAX_SIZE,
 	QUERY_DESC_CONFIGURAION_MAX_SIZE,
@@ -106,7 +242,7 @@
 	QUERY_DESC_INTERCONNECT_MAX_SIZE,
 	QUERY_DESC_STRING_MAX_SIZE,
 	QUERY_DESC_RFU_MAX_SIZE,
-	QUERY_DESC_GEOMETRY_MAX_SIZE,
+	QUERY_DESC_GEOMETRY_MAZ_SIZE,
 	QUERY_DESC_POWER_MAX_SIZE,
 	QUERY_DESC_RFU_MAX_SIZE,
 };
@@ -147,6 +283,8 @@
 	UFSHCD_INT_CLEAR,
 };
 
+#define DEFAULT_UFSHCD_DBG_PRINT_EN	UFSHCD_DBG_PRINT_ALL
+
 #define ufshcd_set_eh_in_progress(h) \
 	(h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
 #define ufshcd_eh_in_progress(h) \
@@ -188,54 +326,105 @@
 	return ufs_pm_lvl_states[lvl].link_state;
 }
 
+static inline enum ufs_pm_level
+ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+					enum uic_link_state link_state)
+{
+	enum ufs_pm_level lvl;
+
+	for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++) {
+		if ((ufs_pm_lvl_states[lvl].dev_state == dev_state) &&
+			(ufs_pm_lvl_states[lvl].link_state == link_state))
+			return lvl;
+	}
+
+	/* if no match found, return the level 0 */
+	return UFS_PM_LVL_0;
+}
+
+static inline bool ufshcd_is_valid_pm_lvl(int lvl)
+{
+	if (lvl >= 0 && lvl < ARRAY_SIZE(ufs_pm_lvl_states))
+		return true;
+	else
+		return false;
+}
+
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
 static void ufshcd_tmc_handler(struct ufs_hba *hba);
 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
+static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 static void ufshcd_hba_exit(struct ufs_hba *hba);
 static int ufshcd_probe_hba(struct ufs_hba *hba);
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
-				 bool skip_ref_clk);
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
+static int ufshcd_enable_clocks(struct ufs_hba *hba);
+static int ufshcd_disable_clocks(struct ufs_hba *hba,
+				 bool is_gating_context);
+static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
+					      bool is_gating_context);
 static int ufshcd_set_vccq_rail_unused(struct ufs_hba *hba, bool unused);
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
 static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
+static inline void ufshcd_save_tstamp_of_last_dme_cmd(struct ufs_hba *hba);
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
-static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
-		struct ufs_pa_layer_attr *desired_pwr_mode);
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
-			     struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba);
+static void ufshcd_release_all(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
+
 static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag)
 {
 	return tag >= 0 && tag < hba->nutrs;
 }
 
-static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+static inline void ufshcd_enable_irq(struct ufs_hba *hba)
 {
-	int ret = 0;
-
 	if (!hba->is_irq_enabled) {
-		ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
-				hba);
-		if (ret)
-			dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
-				__func__, ret);
+		enable_irq(hba->irq);
 		hba->is_irq_enabled = true;
 	}
-
-	return ret;
 }
 
 static inline void ufshcd_disable_irq(struct ufs_hba *hba)
 {
 	if (hba->is_irq_enabled) {
-		free_irq(hba->irq, hba);
+		disable_irq(hba->irq);
 		hba->is_irq_enabled = false;
 	}
 }
 
+void ufshcd_scsi_unblock_requests(struct ufs_hba *hba)
+{
+	unsigned long flags;
+	bool unblock = false;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->scsi_block_reqs_cnt--;
+	unblock = !hba->scsi_block_reqs_cnt;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (unblock)
+		scsi_unblock_requests(hba->host);
+}
+EXPORT_SYMBOL(ufshcd_scsi_unblock_requests);
+
+static inline void __ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+	if (!hba->scsi_block_reqs_cnt++)
+		scsi_block_requests(hba->host);
+}
+
+void ufshcd_scsi_block_requests(struct ufs_hba *hba)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	__ufshcd_scsi_block_requests(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+EXPORT_SYMBOL(ufshcd_scsi_block_requests);
+
 /* replace non-printable or non-ASCII characters with spaces */
 static inline void ufshcd_remove_non_printable(char *val)
 {
@@ -246,6 +435,241 @@
 		*val = ' ';
 }
 
+#ifdef CONFIG_TRACEPOINTS
+static void ufshcd_add_command_trace(struct ufs_hba *hba,
+		unsigned int tag, const char *str)
+{
+	sector_t lba = -1;
+	u8 opcode = 0;
+	u32 intr, doorbell;
+	struct ufshcd_lrb *lrbp;
+	int transfer_len = -1;
+
+	lrbp = &hba->lrb[tag];
+
+	if (lrbp->cmd) { /* data phase exists */
+		opcode = (u8)(*lrbp->cmd->cmnd);
+		if ((opcode == READ_10) || (opcode == WRITE_10)) {
+			/*
+			 * Currently we only fully trace read(10) and write(10)
+			 * commands
+			 */
+			if (lrbp->cmd->request && lrbp->cmd->request->bio)
+				lba =
+				  lrbp->cmd->request->bio->bi_iter.bi_sector;
+			transfer_len = be32_to_cpu(
+				lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+		}
+	}
+
+	intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+	doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	trace_ufshcd_command(dev_name(hba->dev), str, tag,
+				doorbell, transfer_len, intr, lba, opcode);
+}
+
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+					unsigned int tag, const char *str)
+{
+	if (trace_ufshcd_command_enabled())
+		ufshcd_add_command_trace(hba, tag, str);
+}
+#else
+static inline void ufshcd_cond_add_cmd_trace(struct ufs_hba *hba,
+					unsigned int tag, const char *str)
+{
+}
+#endif
+
+static void ufshcd_print_clk_freqs(struct ufs_hba *hba)
+{
+	struct ufs_clk_info *clki;
+	struct list_head *head = &hba->clk_list_head;
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_CLK_FREQ_EN))
+		return;
+
+	if (!head || list_empty(head))
+		return;
+
+	list_for_each_entry(clki, head, list) {
+		if (!IS_ERR_OR_NULL(clki->clk) && clki->min_freq &&
+				clki->max_freq)
+			dev_err(hba->dev, "clk: %s, rate: %u\n",
+					clki->name, clki->curr_freq);
+	}
+}
+
+static void ufshcd_print_uic_err_hist(struct ufs_hba *hba,
+		struct ufs_uic_err_reg_hist *err_hist, char *err_name)
+{
+	int i;
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN))
+		return;
+
+	for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) {
+		int p = (i + err_hist->pos - 1) % UIC_ERR_REG_HIST_LENGTH;
+
+		if (err_hist->reg[p] == 0)
+			continue;
+		dev_err(hba->dev, "%s[%d] = 0x%x at %lld us", err_name, i,
+			err_hist->reg[p], ktime_to_us(err_hist->tstamp[p]));
+	}
+}
+
+static void ufshcd_print_host_regs(struct ufs_hba *hba)
+{
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_REGS_EN))
+		return;
+
+	/*
+	 * hex_dump reads its data without the readl macro. This might
+	 * cause inconsistency issues on some platform, as the printed
+	 * values may be from cache and not the most recent value.
+	 * To know whether you are looking at an un-cached version verify
+	 * that IORESOURCE_MEM flag is on when xxx_get_resource() is invoked
+	 * during platform/pci probe function.
+	 */
+	ufshcd_hex_dump("host regs: ", hba->mmio_base, UFSHCI_REG_SPACE_SIZE);
+	dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x",
+		hba->ufs_version, hba->capabilities);
+	dev_err(hba->dev,
+		"hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x",
+		(u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
+	dev_err(hba->dev,
+		"last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d",
+		ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+		hba->ufs_stats.hibern8_exit_cnt);
+
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err");
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err");
+	ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err");
+
+	ufshcd_print_clk_freqs(hba);
+
+	ufshcd_vops_dbg_register_dump(hba);
+}
+
+static
+void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
+{
+	struct ufshcd_lrb *lrbp;
+	int prdt_length;
+	int tag;
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TRS_EN))
+		return;
+
+	for_each_set_bit(tag, &bitmap, hba->nutrs) {
+		lrbp = &hba->lrb[tag];
+
+		dev_err(hba->dev, "UPIU[%d] - issue time %lld us",
+				tag, ktime_to_us(lrbp->issue_time_stamp));
+		dev_err(hba->dev,
+			"UPIU[%d] - Transfer Request Descriptor phys@0x%llx",
+			tag, (u64)lrbp->utrd_dma_addr);
+		ufshcd_hex_dump("UPIU TRD: ", lrbp->utr_descriptor_ptr,
+				sizeof(struct utp_transfer_req_desc));
+		dev_err(hba->dev, "UPIU[%d] - Request UPIU phys@0x%llx", tag,
+			(u64)lrbp->ucd_req_dma_addr);
+		ufshcd_hex_dump("UPIU REQ: ", lrbp->ucd_req_ptr,
+				sizeof(struct utp_upiu_req));
+		dev_err(hba->dev, "UPIU[%d] - Response UPIU phys@0x%llx", tag,
+			(u64)lrbp->ucd_rsp_dma_addr);
+		ufshcd_hex_dump("UPIU RSP: ", lrbp->ucd_rsp_ptr,
+				sizeof(struct utp_upiu_rsp));
+		prdt_length =
+			le16_to_cpu(lrbp->utr_descriptor_ptr->prd_table_length);
+		dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries  phys@0x%llx",
+			tag, prdt_length, (u64)lrbp->ucd_prdt_dma_addr);
+		if (pr_prdt)
+			ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr,
+				sizeof(struct ufshcd_sg_entry) * prdt_length);
+	}
+}
+
+static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
+{
+	struct utp_task_req_desc *tmrdp;
+	int tag;
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_TMRS_EN))
+		return;
+
+	for_each_set_bit(tag, &bitmap, hba->nutmrs) {
+		tmrdp = &hba->utmrdl_base_addr[tag];
+		dev_err(hba->dev, "TM[%d] - Task Management Header", tag);
+		ufshcd_hex_dump("TM TRD: ", &tmrdp->header,
+				sizeof(struct request_desc_header));
+		dev_err(hba->dev, "TM[%d] - Task Management Request UPIU",
+				tag);
+		ufshcd_hex_dump("TM REQ: ", tmrdp->task_req_upiu,
+				sizeof(struct utp_upiu_req));
+		dev_err(hba->dev, "TM[%d] - Task Management Response UPIU",
+				tag);
+		ufshcd_hex_dump("TM RSP: ", tmrdp->task_rsp_upiu,
+				sizeof(struct utp_task_req_desc));
+	}
+}
+
+static void ufshcd_print_host_state(struct ufs_hba *hba)
+{
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_HOST_STATE_EN))
+		return;
+
+	dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
+	dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n",
+		hba->lrb_in_use, hba->outstanding_tasks, hba->outstanding_reqs);
+	dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x, saved_ce_err=0x%x\n",
+		hba->saved_err, hba->saved_uic_err, hba->saved_ce_err);
+	dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n",
+		hba->curr_dev_pwr_mode, hba->uic_link_state);
+	dev_err(hba->dev, "PM in progress=%d, sys. suspended=%d\n",
+		hba->pm_op_in_progress, hba->is_sys_suspended);
+	dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
+		hba->auto_bkops_enabled, hba->host->host_self_blocked);
+	dev_err(hba->dev, "Clk gate=%d, hibern8 on idle=%d\n",
+		hba->clk_gating.state, hba->hibern8_on_idle.state);
+	dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
+		hba->eh_flags, hba->req_abort_count);
+	dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
+		hba->capabilities, hba->caps);
+	dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
+		hba->dev_quirks);
+}
+
+/**
+ * ufshcd_print_pwr_info - print power params as saved in hba
+ * power info
+ * @hba: per-adapter instance
+ */
+static void ufshcd_print_pwr_info(struct ufs_hba *hba)
+{
+	char *names[] = {
+		"INVALID MODE",
+		"FAST MODE",
+		"SLOW_MODE",
+		"INVALID MODE",
+		"FASTAUTO_MODE",
+		"SLOWAUTO_MODE",
+		"INVALID MODE",
+	};
+
+	if (!(hba->ufshcd_dbg_print & UFSHCD_DBG_PRINT_PWR_EN))
+		return;
+
+	dev_err(hba->dev, "%s:[RX, TX]: gear=[%d, %d], lane[%d, %d], pwr[%s, %s], rate = %d\n",
+		 __func__,
+		 hba->pwr_info.gear_rx, hba->pwr_info.gear_tx,
+		 hba->pwr_info.lane_rx, hba->pwr_info.lane_tx,
+		 names[hba->pwr_info.pwr_rx],
+		 names[hba->pwr_info.pwr_tx],
+		 hba->pwr_info.hs_rate);
+}
+
 /*
  * ufshcd_wait_for_register - wait for register value to change
  * @hba - per-adapter interface
@@ -255,7 +679,6 @@
  * @interval_us - polling interval in microsecs
  * @timeout_ms - timeout in millisecs
  * @can_sleep - perform sleep or just spin
- *
  * Returns -ETIMEDOUT on error, zero on success
  */
 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
@@ -291,10 +714,27 @@
  */
 static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
 {
-	if (hba->ufs_version == UFSHCI_VERSION_10)
-		return INTERRUPT_MASK_ALL_VER_10;
-	else
-		return INTERRUPT_MASK_ALL_VER_11;
+	u32 intr_mask = 0;
+
+	switch (hba->ufs_version) {
+	case UFSHCI_VERSION_10:
+		intr_mask = INTERRUPT_MASK_ALL_VER_10;
+		break;
+	/* allow fall through */
+	case UFSHCI_VERSION_11:
+	case UFSHCI_VERSION_20:
+		intr_mask = INTERRUPT_MASK_ALL_VER_11;
+		break;
+	/* allow fall through */
+	case UFSHCI_VERSION_21:
+	default:
+		intr_mask = INTERRUPT_MASK_ALL_VER_21;
+	}
+
+	if (!ufshcd_is_crypto_supported(hba))
+		intr_mask &= ~CRYPTO_ENGINE_FATAL_ERROR;
+
+	return intr_mask;
 }
 
 /**
@@ -556,7 +996,11 @@
  */
 static inline void ufshcd_hba_start(struct ufs_hba *hba)
 {
-	ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
+	u32 val = CONTROLLER_ENABLE;
+
+	if (ufshcd_is_crypto_supported(hba))
+		val |= CRYPTO_GENERAL_ENABLE;
+	ufshcd_writel(hba, val, REG_CONTROLLER_ENABLE);
 }
 
 /**
@@ -570,6 +1014,28 @@
 	return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 }
 
+static const char *ufschd_uic_link_state_to_string(
+			enum uic_link_state state)
+{
+	switch (state) {
+	case UIC_LINK_OFF_STATE:	return "OFF";
+	case UIC_LINK_ACTIVE_STATE:	return "ACTIVE";
+	case UIC_LINK_HIBERN8_STATE:	return "HIBERN8";
+	default:			return "UNKNOWN";
+	}
+}
+
+static const char *ufschd_ufs_dev_pwr_mode_to_string(
+			enum ufs_dev_pwr_mode state)
+{
+	switch (state) {
+	case UFS_ACTIVE_PWR_MODE:	return "ACTIVE";
+	case UFS_SLEEP_PWR_MODE:	return "SLEEP";
+	case UFS_POWERDOWN_PWR_MODE:	return "POWERDOWN";
+	default:			return "UNKNOWN";
+	}
+}
+
 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba)
 {
 	/* HCI version 1.0 and 1.1 supports UniPro 1.41 */
@@ -598,6 +1064,97 @@
 		return false;
 }
 
+/**
+ * ufshcd_set_clk_freq - set UFS controller clock frequencies
+ * @hba: per adapter instance
+ * @scale_up: If True, set max possible frequency othewise set low frequency
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_set_clk_freq(struct ufs_hba *hba, bool scale_up)
+{
+	int ret = 0;
+	struct ufs_clk_info *clki;
+	struct list_head *head = &hba->clk_list_head;
+
+	if (!head || list_empty(head))
+		goto out;
+
+	list_for_each_entry(clki, head, list) {
+		if (!IS_ERR_OR_NULL(clki->clk)) {
+			if (scale_up && clki->max_freq) {
+				if (clki->curr_freq == clki->max_freq)
+					continue;
+
+				ret = clk_set_rate(clki->clk, clki->max_freq);
+				if (ret) {
+					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+						__func__, clki->name,
+						clki->max_freq, ret);
+					break;
+				}
+				trace_ufshcd_clk_scaling(dev_name(hba->dev),
+						"scaled up", clki->name,
+						clki->curr_freq,
+						clki->max_freq);
+				clki->curr_freq = clki->max_freq;
+
+			} else if (!scale_up && clki->min_freq) {
+				if (clki->curr_freq == clki->min_freq)
+					continue;
+
+				ret = clk_set_rate(clki->clk, clki->min_freq);
+				if (ret) {
+					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+						__func__, clki->name,
+						clki->min_freq, ret);
+					break;
+				}
+				trace_ufshcd_clk_scaling(dev_name(hba->dev),
+						"scaled down", clki->name,
+						clki->curr_freq,
+						clki->min_freq);
+				clki->curr_freq = clki->min_freq;
+			}
+		}
+		dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+				clki->name, clk_get_rate(clki->clk));
+	}
+
+out:
+	return ret;
+}
+
+/**
+ * ufshcd_scale_clks - scale up or scale down UFS controller clocks
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns 0 if successful
+ * Returns < 0 for any other errors
+ */
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+	int ret = 0;
+
+	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
+	if (ret)
+		return ret;
+
+	ret = ufshcd_set_clk_freq(hba, scale_up);
+	if (ret)
+		return ret;
+
+	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+	if (ret) {
+		ufshcd_set_clk_freq(hba, !scale_up);
+		return ret;
+	}
+
+	return ret;
+}
+
 static void ufshcd_ungate_work(struct work_struct *work)
 {
 	int ret;
@@ -614,7 +1171,8 @@
 	}
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	ufshcd_setup_clocks(hba, true);
+	ufshcd_hba_vreg_set_hpm(hba);
+	ufshcd_enable_clocks(hba);
 
 	/* Exit from hibern8 */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
@@ -631,9 +1189,7 @@
 		hba->clk_gating.is_suspended = false;
 	}
 unblock_reqs:
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_resume_device(hba->devfreq);
-	scsi_unblock_requests(hba->host);
+	ufshcd_scsi_unblock_requests(hba);
 }
 
 /**
@@ -660,10 +1216,27 @@
 start:
 	switch (hba->clk_gating.state) {
 	case CLKS_ON:
+		/*
+		 * Wait for the ungate work to complete if in progress.
+		 * Though the clocks may be in ON state, the link could
+		 * still be in hibner8 state if hibern8 is allowed
+		 * during clock gating.
+		 * Make sure we exit hibern8 state also in addition to
+		 * clocks being ON.
+		 */
+		if (ufshcd_can_hibern8_during_gating(hba) &&
+		    ufshcd_is_link_hibern8(hba)) {
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			flush_work(&hba->clk_gating.ungate_work);
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			goto start;
+		}
 		break;
 	case REQ_CLKS_OFF:
 		if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
 			hba->clk_gating.state = CLKS_ON;
+			trace_ufshcd_clk_gating(dev_name(hba->dev),
+				hba->clk_gating.state);
 			break;
 		}
 		/*
@@ -672,8 +1245,10 @@
 		 * work and to enable clocks.
 		 */
 	case CLKS_OFF:
-		scsi_block_requests(hba->host);
+		__ufshcd_scsi_block_requests(hba);
 		hba->clk_gating.state = REQ_CLKS_ON;
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+			hba->clk_gating.state);
 		schedule_work(&hba->clk_gating.ungate_work);
 		/*
 		 * fall through to check if we should wait for this
@@ -711,6 +1286,8 @@
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	if (hba->clk_gating.is_suspended) {
 		hba->clk_gating.state = CLKS_ON;
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+			hba->clk_gating.state);
 		goto rel_lock;
 	}
 
@@ -722,25 +1299,33 @@
 
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	if (ufshcd_is_hibern8_on_idle_allowed(hba) &&
+	    hba->hibern8_on_idle.is_enabled)
+		/*
+		 * Hibern8 enter work (on Idle) needs clocks to be ON hence
+		 * make sure that it is flushed before turning off the clocks.
+		 */
+		flush_delayed_work(&hba->hibern8_on_idle.enter_work);
+
 	/* put the link into hibern8 mode before turning off clocks */
 	if (ufshcd_can_hibern8_during_gating(hba)) {
 		if (ufshcd_uic_hibern8_enter(hba)) {
 			hba->clk_gating.state = CLKS_ON;
+			trace_ufshcd_clk_gating(dev_name(hba->dev),
+				hba->clk_gating.state);
 			goto out;
 		}
 		ufshcd_set_link_hibern8(hba);
 	}
 
-	if (ufshcd_is_clkscaling_enabled(hba)) {
-		devfreq_suspend_device(hba->devfreq);
-		hba->clk_scaling.window_start_t = 0;
-	}
-
-	if (!ufshcd_is_link_active(hba))
-		ufshcd_setup_clocks(hba, false);
+	if (!ufshcd_is_link_active(hba) && !hba->no_ref_clk_gating)
+		ufshcd_disable_clocks(hba, true);
 	else
 		/* If link is active, device ref_clk can't be switched off */
-		__ufshcd_setup_clocks(hba, false, true);
+		ufshcd_disable_clocks_skip_ref_clk(hba, true);
+
+	/* Put the host controller in low power mode if possible */
+	ufshcd_hba_vreg_set_lpm(hba);
 
 	/*
 	 * In case you are here to cancel this work the gating state
@@ -752,9 +1337,11 @@
 	 * new requests arriving before the current cancel work is done.
 	 */
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	if (hba->clk_gating.state == REQ_CLKS_OFF)
+	if (hba->clk_gating.state == REQ_CLKS_OFF) {
 		hba->clk_gating.state = CLKS_OFF;
-
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+			hba->clk_gating.state);
+	}
 rel_lock:
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
@@ -762,7 +1349,7 @@
 }
 
 /* host lock must be held before calling this variant */
-static void __ufshcd_release(struct ufs_hba *hba)
+static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
 {
 	if (!ufshcd_is_clkgating_allowed(hba))
 		return;
@@ -773,20 +1360,22 @@
 		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
 		|| hba->lrb_in_use || hba->outstanding_tasks
 		|| hba->active_uic_cmd || hba->uic_async_done
-		|| ufshcd_eh_in_progress(hba))
+		|| ufshcd_eh_in_progress(hba) || no_sched)
 		return;
 
 	hba->clk_gating.state = REQ_CLKS_OFF;
+	trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+
 	schedule_delayed_work(&hba->clk_gating.gate_work,
-			msecs_to_jiffies(hba->clk_gating.delay_ms));
+			      msecs_to_jiffies(hba->clk_gating.delay_ms));
 }
 
-void ufshcd_release(struct ufs_hba *hba)
+void ufshcd_release(struct ufs_hba *hba, bool no_sched)
 {
 	unsigned long flags;
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	__ufshcd_release(hba);
+	__ufshcd_release(hba, no_sched);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 }
 EXPORT_SYMBOL_GPL(ufshcd_release);
@@ -814,15 +1403,154 @@
 	return count;
 }
 
+static ssize_t ufshcd_clkgate_delay_pwr_save_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n",
+			hba->clk_gating.delay_ms_pwr_save);
+}
+
+static ssize_t ufshcd_clkgate_delay_pwr_save_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	hba->clk_gating.delay_ms_pwr_save = value;
+	if (ufshcd_is_clkscaling_supported(hba) &&
+	    !hba->clk_scaling.is_scaled_up)
+		hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_pwr_save;
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return count;
+}
+
+static ssize_t ufshcd_clkgate_delay_perf_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms_perf);
+}
+
+static ssize_t ufshcd_clkgate_delay_perf_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+
+	hba->clk_gating.delay_ms_perf = value;
+	if (ufshcd_is_clkscaling_supported(hba) &&
+	    hba->clk_scaling.is_scaled_up)
+		hba->clk_gating.delay_ms = hba->clk_gating.delay_ms_perf;
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return count;
+}
+
+static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
+}
+
+static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags;
+	u32 value;
+
+	if (kstrtou32(buf, 0, &value))
+		return -EINVAL;
+
+	value = !!value;
+	if (value == hba->clk_gating.is_enabled)
+		goto out;
+
+	if (value) {
+		ufshcd_release(hba, false);
+	} else {
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		hba->clk_gating.active_reqs++;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+	}
+
+	hba->clk_gating.is_enabled = value;
+out:
+	return count;
+}
+
 static void ufshcd_init_clk_gating(struct ufs_hba *hba)
 {
+	struct ufs_clk_gating *gating = &hba->clk_gating;
+
+	hba->clk_gating.state = CLKS_ON;
+
 	if (!ufshcd_is_clkgating_allowed(hba))
 		return;
 
-	hba->clk_gating.delay_ms = 150;
-	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
-	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+	INIT_DELAYED_WORK(&gating->gate_work, ufshcd_gate_work);
+	INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
 
+	gating->is_enabled = true;
+
+	/*
+	 * Scheduling the delayed work after 1 jiffies will make the work to
+	 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
+	 * for hibern8 enter work as it may impact the performance if it gets
+	 * scheduled almost immediately. Hence make sure that hibern8 enter
+	 * work gets scheduled atleast after 2 jiffies (any time between
+	 * 1000/HZ ms to 2000/HZ ms).
+	 */
+	gating->delay_ms_pwr_save = jiffies_to_msecs(
+		max_t(unsigned long,
+		      msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE),
+		      2));
+	gating->delay_ms_perf = jiffies_to_msecs(
+		max_t(unsigned long,
+		      msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PERF),
+		      2));
+
+	/* start with performance mode */
+	gating->delay_ms = gating->delay_ms_perf;
+
+	if (!ufshcd_is_clkscaling_supported(hba))
+		goto scaling_not_supported;
+
+	gating->delay_pwr_save_attr.show = ufshcd_clkgate_delay_pwr_save_show;
+	gating->delay_pwr_save_attr.store = ufshcd_clkgate_delay_pwr_save_store;
+	sysfs_attr_init(&gating->delay_pwr_save_attr.attr);
+	gating->delay_pwr_save_attr.attr.name = "clkgate_delay_ms_pwr_save";
+	gating->delay_pwr_save_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &gating->delay_pwr_save_attr))
+		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_pwr_save\n");
+
+	gating->delay_perf_attr.show = ufshcd_clkgate_delay_perf_show;
+	gating->delay_perf_attr.store = ufshcd_clkgate_delay_perf_store;
+	sysfs_attr_init(&gating->delay_perf_attr.attr);
+	gating->delay_perf_attr.attr.name = "clkgate_delay_ms_perf";
+	gating->delay_perf_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &gating->delay_perf_attr))
+		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay_ms_perf\n");
+
+	goto add_clkgate_enable;
+
+scaling_not_supported:
 	hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
 	hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
 	sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
@@ -830,23 +1558,423 @@
 	hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
 	if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
 		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+
+add_clkgate_enable:
+	gating->enable_attr.show = ufshcd_clkgate_enable_show;
+	gating->enable_attr.store = ufshcd_clkgate_enable_store;
+	sysfs_attr_init(&gating->enable_attr.attr);
+	gating->enable_attr.attr.name = "clkgate_enable";
+	gating->enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &gating->enable_attr))
+		dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
 }
 
 static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
 {
 	if (!ufshcd_is_clkgating_allowed(hba))
 		return;
-	device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		device_remove_file(hba->dev,
+				   &hba->clk_gating.delay_pwr_save_attr);
+		device_remove_file(hba->dev, &hba->clk_gating.delay_perf_attr);
+	} else {
+		device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+	}
+	device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
 	cancel_work_sync(&hba->clk_gating.ungate_work);
 	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
 }
 
+static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
+{
+	ufshcd_rmwl(hba, AUTO_HIBERN8_TIMER_SCALE_MASK |
+			 AUTO_HIBERN8_IDLE_TIMER_MASK,
+			AUTO_HIBERN8_TIMER_SCALE_1_MS | delay,
+			REG_AUTO_HIBERN8_IDLE_TIMER);
+	/* Make sure the timer gets applied before further operations */
+	mb();
+}
+
+/**
+ * ufshcd_hibern8_hold - Make sure that link is not in hibern8.
+ *
+ * @hba: per adapter instance
+ * @async: This indicates whether caller wants to exit hibern8 asynchronously.
+ *
+ * Exit from hibern8 mode and set the link as active.
+ *
+ * Return 0 on success, non-zero on failure.
+ */
+static int ufshcd_hibern8_hold(struct ufs_hba *hba, bool async)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	if (!ufshcd_is_hibern8_on_idle_allowed(hba))
+		goto out;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->hibern8_on_idle.active_reqs++;
+
+	if (ufshcd_eh_in_progress(hba)) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		return 0;
+	}
+
+start:
+	switch (hba->hibern8_on_idle.state) {
+	case HIBERN8_EXITED:
+		break;
+	case REQ_HIBERN8_ENTER:
+		if (cancel_delayed_work(&hba->hibern8_on_idle.enter_work)) {
+			hba->hibern8_on_idle.state = HIBERN8_EXITED;
+			trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+				hba->hibern8_on_idle.state);
+			break;
+		}
+		/*
+		 * If we here, it means Hibern8 enter work is either done or
+		 * currently running. Hence, fall through to cancel hibern8
+		 * work and exit hibern8.
+		 */
+	case HIBERN8_ENTERED:
+		__ufshcd_scsi_block_requests(hba);
+		hba->hibern8_on_idle.state = REQ_HIBERN8_EXIT;
+		trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+			hba->hibern8_on_idle.state);
+		schedule_work(&hba->hibern8_on_idle.exit_work);
+		/*
+		 * fall through to check if we should wait for this
+		 * work to be done or not.
+		 */
+	case REQ_HIBERN8_EXIT:
+		if (async) {
+			rc = -EAGAIN;
+			hba->hibern8_on_idle.active_reqs--;
+			break;
+		} else {
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+			flush_work(&hba->hibern8_on_idle.exit_work);
+			/* Make sure state is HIBERN8_EXITED before returning */
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			goto start;
+		}
+	default:
+		dev_err(hba->dev, "%s: H8 is in invalid state %d\n",
+				__func__, hba->hibern8_on_idle.state);
+		break;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+	return rc;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
+{
+	unsigned long delay_in_jiffies;
+
+	if (!ufshcd_is_hibern8_on_idle_allowed(hba))
+		return;
+
+	hba->hibern8_on_idle.active_reqs--;
+	BUG_ON(hba->hibern8_on_idle.active_reqs < 0);
+
+	if (hba->hibern8_on_idle.active_reqs
+		|| hba->hibern8_on_idle.is_suspended
+		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+		|| hba->lrb_in_use || hba->outstanding_tasks
+		|| hba->active_uic_cmd || hba->uic_async_done
+		|| ufshcd_eh_in_progress(hba) || no_sched)
+		return;
+
+	hba->hibern8_on_idle.state = REQ_HIBERN8_ENTER;
+	trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+		hba->hibern8_on_idle.state);
+	/*
+	 * Scheduling the delayed work after 1 jiffies will make the work to
+	 * get schedule any time from 0ms to 1000/HZ ms which is not desirable
+	 * for hibern8 enter work as it may impact the performance if it gets
+	 * scheduled almost immediately. Hence make sure that hibern8 enter
+	 * work gets scheduled atleast after 2 jiffies (any time between
+	 * 1000/HZ ms to 2000/HZ ms).
+	 */
+	delay_in_jiffies = msecs_to_jiffies(hba->hibern8_on_idle.delay_ms);
+	if (delay_in_jiffies == 1)
+		delay_in_jiffies++;
+
+	schedule_delayed_work(&hba->hibern8_on_idle.enter_work,
+			      delay_in_jiffies);
+}
+
+static void ufshcd_hibern8_release(struct ufs_hba *hba, bool no_sched)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	__ufshcd_hibern8_release(hba, no_sched);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_hibern8_enter_work(struct work_struct *work)
+{
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+					   hibern8_on_idle.enter_work.work);
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->hibern8_on_idle.is_suspended) {
+		hba->hibern8_on_idle.state = HIBERN8_EXITED;
+		trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+			hba->hibern8_on_idle.state);
+		goto rel_lock;
+	}
+
+	if (hba->hibern8_on_idle.active_reqs
+		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+		|| hba->lrb_in_use || hba->outstanding_tasks
+		|| hba->active_uic_cmd || hba->uic_async_done)
+		goto rel_lock;
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (ufshcd_is_link_active(hba) && ufshcd_uic_hibern8_enter(hba)) {
+		/* Enter failed */
+		hba->hibern8_on_idle.state = HIBERN8_EXITED;
+		trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+			hba->hibern8_on_idle.state);
+		goto out;
+	}
+	ufshcd_set_link_hibern8(hba);
+
+	/*
+	 * In case you are here to cancel this work the hibern8_on_idle.state
+	 * would be marked as REQ_HIBERN8_EXIT. In this case keep the state
+	 * as REQ_HIBERN8_EXIT which would anyway imply that we are in hibern8
+	 * and a request to exit from it is pending. By doing this way,
+	 * we keep the state machine in tact and this would ultimately
+	 * prevent from doing cancel work multiple times when there are
+	 * new requests arriving before the current cancel work is done.
+	 */
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->hibern8_on_idle.state == REQ_HIBERN8_ENTER) {
+		hba->hibern8_on_idle.state = HIBERN8_ENTERED;
+		trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+			hba->hibern8_on_idle.state);
+	}
+rel_lock:
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+	return;
+}
+
+static void ufshcd_hibern8_exit_work(struct work_struct *work)
+{
+	int ret;
+	unsigned long flags;
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+					   hibern8_on_idle.exit_work);
+
+	cancel_delayed_work_sync(&hba->hibern8_on_idle.enter_work);
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if ((hba->hibern8_on_idle.state == HIBERN8_EXITED)
+	     || ufshcd_is_link_active(hba)) {
+		hba->hibern8_on_idle.state = HIBERN8_EXITED;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		goto unblock_reqs;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/* Exit from hibern8 */
+	if (ufshcd_is_link_hibern8(hba)) {
+		ufshcd_hold(hba, false);
+		ret = ufshcd_uic_hibern8_exit(hba);
+		ufshcd_release(hba, false);
+		if (!ret) {
+			spin_lock_irqsave(hba->host->host_lock, flags);
+			ufshcd_set_link_active(hba);
+			hba->hibern8_on_idle.state = HIBERN8_EXITED;
+			trace_ufshcd_hibern8_on_idle(dev_name(hba->dev),
+				hba->hibern8_on_idle.state);
+			spin_unlock_irqrestore(hba->host->host_lock, flags);
+		}
+	}
+unblock_reqs:
+	ufshcd_scsi_unblock_requests(hba);
+}
+
+static ssize_t ufshcd_hibern8_on_idle_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", hba->hibern8_on_idle.delay_ms);
+}
+
+static ssize_t ufshcd_hibern8_on_idle_delay_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->hibern8_on_idle.delay_ms = value;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/* Update auto hibern8 timer value if supported */
+	if (ufshcd_is_auto_hibern8_supported(hba) &&
+	    hba->hibern8_on_idle.is_enabled)
+		ufshcd_set_auto_hibern8_timer(hba,
+					      hba->hibern8_on_idle.delay_ms);
+
+	return count;
+}
+
+static ssize_t ufshcd_hibern8_on_idle_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			hba->hibern8_on_idle.is_enabled);
+}
+
+static ssize_t ufshcd_hibern8_on_idle_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags;
+	u32 value;
+
+	if (kstrtou32(buf, 0, &value))
+		return -EINVAL;
+
+	value = !!value;
+	if (value == hba->hibern8_on_idle.is_enabled)
+		goto out;
+
+	/* Update auto hibern8 timer value if supported */
+	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		ufshcd_set_auto_hibern8_timer(hba,
+			value ? hba->hibern8_on_idle.delay_ms : value);
+		goto update;
+	}
+
+	if (value) {
+		/*
+		 * As clock gating work would wait for the hibern8 enter work
+		 * to finish, clocks would remain on during hibern8 enter work.
+		 */
+		ufshcd_hold(hba, false);
+		ufshcd_release_all(hba);
+	} else {
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		hba->hibern8_on_idle.active_reqs++;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+	}
+
+update:
+	hba->hibern8_on_idle.is_enabled = value;
+out:
+	return count;
+}
+
+static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
+{
+	/* initialize the state variable here */
+	hba->hibern8_on_idle.state = HIBERN8_EXITED;
+
+	if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
+	    !ufshcd_is_auto_hibern8_supported(hba))
+		return;
+
+	if (ufshcd_is_auto_hibern8_supported(hba)) {
+		hba->hibern8_on_idle.state = AUTO_HIBERN8;
+		/*
+		 * Disable SW hibern8 enter on idle in case
+		 * auto hibern8 is supported
+		 */
+		hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
+	} else {
+		INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
+				  ufshcd_hibern8_enter_work);
+		INIT_WORK(&hba->hibern8_on_idle.exit_work,
+			  ufshcd_hibern8_exit_work);
+	}
+
+	hba->hibern8_on_idle.delay_ms = 10;
+	hba->hibern8_on_idle.is_enabled = true;
+
+	hba->hibern8_on_idle.delay_attr.show =
+					ufshcd_hibern8_on_idle_delay_show;
+	hba->hibern8_on_idle.delay_attr.store =
+					ufshcd_hibern8_on_idle_delay_store;
+	sysfs_attr_init(&hba->hibern8_on_idle.delay_attr.attr);
+	hba->hibern8_on_idle.delay_attr.attr.name = "hibern8_on_idle_delay_ms";
+	hba->hibern8_on_idle.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->hibern8_on_idle.delay_attr))
+		dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_delay\n");
+
+	hba->hibern8_on_idle.enable_attr.show =
+					ufshcd_hibern8_on_idle_enable_show;
+	hba->hibern8_on_idle.enable_attr.store =
+					ufshcd_hibern8_on_idle_enable_store;
+	sysfs_attr_init(&hba->hibern8_on_idle.enable_attr.attr);
+	hba->hibern8_on_idle.enable_attr.attr.name = "hibern8_on_idle_enable";
+	hba->hibern8_on_idle.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->hibern8_on_idle.enable_attr))
+		dev_err(hba->dev, "Failed to create sysfs for hibern8_on_idle_enable\n");
+}
+
+static void ufshcd_exit_hibern8_on_idle(struct ufs_hba *hba)
+{
+	if (!ufshcd_is_hibern8_on_idle_allowed(hba) &&
+	    !ufshcd_is_auto_hibern8_supported(hba))
+		return;
+	device_remove_file(hba->dev, &hba->hibern8_on_idle.delay_attr);
+	device_remove_file(hba->dev, &hba->hibern8_on_idle.enable_attr);
+}
+
+static void ufshcd_hold_all(struct ufs_hba *hba)
+{
+	ufshcd_hold(hba, false);
+	ufshcd_hibern8_hold(hba, false);
+}
+
+static void ufshcd_release_all(struct ufs_hba *hba)
+{
+	ufshcd_hibern8_release(hba, false);
+	ufshcd_release(hba, false);
+}
+
 /* Must be called with host lock acquired */
 static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
 {
-	if (!ufshcd_is_clkscaling_enabled(hba))
+	bool queue_resume_work = false;
+
+	if (!ufshcd_is_clkscaling_supported(hba))
 		return;
 
+	if (!hba->clk_scaling.active_reqs++)
+		queue_resume_work = true;
+
+	if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+		return;
+
+	if (queue_resume_work)
+		queue_work(hba->clk_scaling.workq,
+			   &hba->clk_scaling.resume_work);
+
+	if (!hba->clk_scaling.window_start_t) {
+		hba->clk_scaling.window_start_t = jiffies;
+		hba->clk_scaling.tot_busy_t = 0;
+		hba->clk_scaling.is_busy_started = false;
+	}
+
 	if (!hba->clk_scaling.is_busy_started) {
 		hba->clk_scaling.busy_start_t = ktime_get();
 		hba->clk_scaling.is_busy_started = true;
@@ -857,7 +1985,7 @@
 {
 	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
 
-	if (!ufshcd_is_clkscaling_enabled(hba))
+	if (!ufshcd_is_clkscaling_supported(hba))
 		return;
 
 	if (!hba->outstanding_reqs && scaling->is_busy_started) {
@@ -867,17 +1995,27 @@
 		scaling->is_busy_started = false;
 	}
 }
+
 /**
  * ufshcd_send_command - Send SCSI or device management commands
  * @hba: per adapter instance
  * @task_tag: Task tag of the command
  */
 static inline
-void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
+	int ret = 0;
+
+	hba->lrb[task_tag].issue_time_stamp = ktime_get();
+	hba->lrb[task_tag].complete_time_stamp = ktime_set(0, 0);
 	ufshcd_clk_scaling_start_busy(hba);
 	__set_bit(task_tag, &hba->outstanding_reqs);
 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+	/* Make sure that doorbell is committed immediately */
+	wmb();
+	ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
+	ufshcd_update_tag_stats(hba, task_tag);
+	return ret;
 }
 
 /**
@@ -889,10 +2027,14 @@
 	int len;
 	if (lrbp->sense_buffer &&
 	    ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
+		int len_to_copy;
+
 		len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
+		len_to_copy = min_t(int, RESPONSE_UPIU_SENSE_DATA_LENGTH, len);
+
 		memcpy(lrbp->sense_buffer,
 			lrbp->ucd_rsp_ptr->sr.sense_data,
-			min_t(int, len, SCSI_SENSE_BUFFERSIZE));
+			min_t(int, len_to_copy, UFSHCD_REQ_SENSE_SIZE));
 	}
 }
 
@@ -1018,6 +2160,9 @@
 	else
 		ret = -ETIMEDOUT;
 
+	if (ret)
+		ufsdbg_set_err_state(hba);
+
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->active_uic_cmd = NULL;
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1066,7 +2211,7 @@
 	int ret;
 	unsigned long flags;
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	mutex_lock(&hba->uic_cmd_mutex);
 	ufshcd_add_delay_before_dme_cmd(hba);
 
@@ -1076,9 +2221,13 @@
 	if (!ret)
 		ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
 
+	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	mutex_unlock(&hba->uic_cmd_mutex);
+	ufshcd_release_all(hba);
 
-	ufshcd_release(hba);
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_UIC, 0, &ret);
+
 	return ret;
 }
 
@@ -1165,15 +2314,55 @@
 	ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
 }
 
+static int ufshcd_prepare_crypto_utrd(struct ufs_hba *hba,
+		struct ufshcd_lrb *lrbp)
+{
+	struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
+	u8 cc_index = 0;
+	bool enable = false;
+	u64 dun = 0;
+	int ret;
+
+	/*
+	 * Call vendor specific code to get crypto info for this request:
+	 * enable, crypto config. index, DUN.
+	 * If bypass is set, don't bother setting the other fields.
+	 */
+	ret = ufshcd_vops_crypto_req_setup(hba, lrbp, &cc_index, &enable, &dun);
+	if (ret) {
+		if (ret != -EAGAIN) {
+			dev_err(hba->dev,
+				"%s: failed to setup crypto request (%d)\n",
+				__func__, ret);
+		}
+
+		return ret;
+	}
+
+	if (!enable)
+		goto out;
+
+	req_desc->header.dword_0 |= cc_index | UTRD_CRYPTO_ENABLE;
+	if (lrbp->cmd->request && lrbp->cmd->request->bio)
+		dun = lrbp->cmd->request->bio->bi_iter.bi_sector;
+
+	req_desc->header.dword_1 = (u32)(dun & 0xFFFFFFFF);
+	req_desc->header.dword_3 = (u32)((dun >> 32) & 0xFFFFFFFF);
+out:
+	return 0;
+}
+
 /**
  * ufshcd_prepare_req_desc_hdr() - Fills the requests header
  * descriptor according to request
+ * @hba: per adapter instance
  * @lrbp: pointer to local reference block
  * @upiu_flags: flags required in the header
  * @cmd_dir: requests data direction
  */
-static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
-			u32 *upiu_flags, enum dma_data_direction cmd_dir)
+static int ufshcd_prepare_req_desc_hdr(struct ufs_hba *hba,
+	struct ufshcd_lrb *lrbp, u32 *upiu_flags,
+	enum dma_data_direction cmd_dir)
 {
 	struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
 	u32 data_direction;
@@ -1210,6 +2399,11 @@
 	req_desc->header.dword_3 = 0;
 
 	req_desc->prd_table_length = 0;
+
+	if (ufshcd_is_crypto_supported(hba))
+		return ufshcd_prepare_crypto_utrd(hba, lrbp);
+
+	return 0;
 }
 
 /**
@@ -1238,9 +2432,10 @@
 		cpu_to_be32(lrbp->cmd->sdb.length);
 
 	cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE);
-	memset(ucd_req_ptr->sc.cdb, 0, MAX_CDB_SIZE);
 	memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len);
-
+	if (cdb_len < MAX_CDB_SIZE)
+		memset(ucd_req_ptr->sc.cdb + cdb_len, 0,
+		       (MAX_CDB_SIZE - cdb_len));
 	memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp));
 }
 
@@ -1302,55 +2497,48 @@
 }
 
 /**
- * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
- *			     for Device Management Purposes
+ * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
  * @hba - per adapter instance
  * @lrb - pointer to local reference block
  */
-static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
 {
 	u32 upiu_flags;
 	int ret = 0;
 
-	if (hba->ufs_version == UFSHCI_VERSION_20)
-		lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-	else
-		lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
-
-	ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
-	if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
-		ufshcd_prepare_utp_query_req_upiu(hba, lrbp, upiu_flags);
-	else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
-		ufshcd_prepare_utp_nop_upiu(lrbp);
-	else
-		ret = -EINVAL;
-
-	return ret;
-}
-
-/**
- * ufshcd_comp_scsi_upiu - UFS Protocol Information Unit(UPIU)
- *			   for SCSI Purposes
- * @hba - per adapter instance
- * @lrb - pointer to local reference block
- */
-static int ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
-{
-	u32 upiu_flags;
-	int ret = 0;
-
-	if (hba->ufs_version == UFSHCI_VERSION_20)
-		lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE;
-	else
-		lrbp->command_type = UTP_CMD_TYPE_SCSI;
-
-	if (likely(lrbp->cmd)) {
-		ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
-						lrbp->cmd->sc_data_direction);
-		ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
-	} else {
-		ret = -EINVAL;
-	}
+	switch (lrbp->command_type) {
+	case UTP_CMD_TYPE_SCSI:
+		if (likely(lrbp->cmd)) {
+			ret = ufshcd_prepare_req_desc_hdr(hba, lrbp,
+				&upiu_flags, lrbp->cmd->sc_data_direction);
+			ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
+		} else {
+			ret = -EINVAL;
+		}
+		break;
+	case UTP_CMD_TYPE_DEV_MANAGE:
+		ret = ufshcd_prepare_req_desc_hdr(hba, lrbp, &upiu_flags,
+			DMA_NONE);
+		if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
+			ufshcd_prepare_utp_query_req_upiu(
+					hba, lrbp, upiu_flags);
+		else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
+			ufshcd_prepare_utp_nop_upiu(lrbp);
+		else
+			ret = -EINVAL;
+		break;
+	case UTP_CMD_TYPE_UFS:
+		/* For UFS native command implementation */
+		ret = -ENOTSUPP;
+		dev_err(hba->dev, "%s: UFS native command are not supported\n",
+			__func__);
+		break;
+	default:
+		ret = -ENOTSUPP;
+		dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
+				__func__, lrbp->command_type);
+		break;
+	} /* end of switch */
 
 	return ret;
 }
@@ -1406,6 +2594,9 @@
 		BUG();
 	}
 
+	if (!down_read_trylock(&hba->clk_scaling_lock))
+		return SCSI_MLQUEUE_HOST_BUSY;
+
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	switch (hba->ufshcd_state) {
 	case UFSHCD_STATE_OPERATIONAL:
@@ -1433,6 +2624,8 @@
 	}
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	hba->req_abort_count = 0;
+
 	/* acquire the tag to make sure device cmds don't use it */
 	if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
 		/*
@@ -1451,33 +2644,111 @@
 		clear_bit_unlock(tag, &hba->lrb_in_use);
 		goto out;
 	}
+
+	if (ufshcd_is_clkgating_allowed(hba))
+		WARN_ON(hba->clk_gating.state != CLKS_ON);
+
+	err = ufshcd_hibern8_hold(hba, true);
+	if (err) {
+		clear_bit_unlock(tag, &hba->lrb_in_use);
+		err = SCSI_MLQUEUE_HOST_BUSY;
+		ufshcd_release(hba, true);
+		goto out;
+	}
+	if (ufshcd_is_hibern8_on_idle_allowed(hba))
+		WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
+
+	/* Vote PM QoS for the request */
+	ufshcd_vops_pm_qos_req_start(hba, cmd->request);
+
+	/* IO svc time latency histogram */
+	if (hba != NULL && cmd->request != NULL) {
+		if (hba->latency_hist_enabled &&
+		    (cmd->request->cmd_type == REQ_TYPE_FS)) {
+			cmd->request->lat_hist_io_start = ktime_get();
+			cmd->request->lat_hist_enabled = 1;
+		} else
+			cmd->request->lat_hist_enabled = 0;
+	}
+
 	WARN_ON(hba->clk_gating.state != CLKS_ON);
 
 	lrbp = &hba->lrb[tag];
 
 	WARN_ON(lrbp->cmd);
 	lrbp->cmd = cmd;
-	lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
+	lrbp->sense_bufflen = UFSHCD_REQ_SENSE_SIZE;
 	lrbp->sense_buffer = cmd->sense_buffer;
 	lrbp->task_tag = tag;
 	lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
 	lrbp->intr_cmd = !ufshcd_is_intr_aggr_allowed(hba) ? true : false;
+	lrbp->command_type = UTP_CMD_TYPE_SCSI;
+	lrbp->req_abort_skip = false;
 
-	ufshcd_comp_scsi_upiu(hba, lrbp);
+	/* form UPIU before issuing the command */
+	err = ufshcd_compose_upiu(hba, lrbp);
+	if (err) {
+		if (err != -EAGAIN)
+			dev_err(hba->dev,
+				"%s: failed to compose upiu %d\n",
+				__func__, err);
+
+			lrbp->cmd = NULL;
+			clear_bit_unlock(tag, &hba->lrb_in_use);
+			ufshcd_release_all(hba);
+			ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+			goto out;
+	}
 
 	err = ufshcd_map_sg(lrbp);
 	if (err) {
 		lrbp->cmd = NULL;
 		clear_bit_unlock(tag, &hba->lrb_in_use);
+		ufshcd_release_all(hba);
+		ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
 		goto out;
 	}
 
+	err = ufshcd_vops_crypto_engine_cfg_start(hba, tag);
+	if (err) {
+		if (err != -EAGAIN)
+			dev_err(hba->dev,
+				"%s: failed to configure crypto engine %d\n",
+				__func__, err);
+
+		scsi_dma_unmap(lrbp->cmd);
+		lrbp->cmd = NULL;
+		clear_bit_unlock(tag, &hba->lrb_in_use);
+		ufshcd_release_all(hba);
+		ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+
+		goto out;
+	}
+
+	/* Make sure descriptors are ready before ringing the doorbell */
+	wmb();
 	/* issue command to the controller */
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_send_command(hba, tag);
+
+	err = ufshcd_send_command(hba, tag);
+	if (err) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		scsi_dma_unmap(lrbp->cmd);
+		lrbp->cmd = NULL;
+		clear_bit_unlock(tag, &hba->lrb_in_use);
+		ufshcd_release_all(hba);
+		ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+		ufshcd_vops_crypto_engine_cfg_end(hba, lrbp, cmd->request);
+		dev_err(hba->dev, "%s: failed sending command, %d\n",
+							__func__, err);
+		err = DID_ERROR;
+		goto out;
+	}
+
 out_unlock:
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 out:
+	up_read(&hba->clk_scaling_lock);
 	return err;
 }
 
@@ -1489,10 +2760,11 @@
 	lrbp->sense_buffer = NULL;
 	lrbp->task_tag = tag;
 	lrbp->lun = 0; /* device management cmd is not specific to any LUN */
+	lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
 	lrbp->intr_cmd = true; /* No interrupt aggregation */
 	hba->dev_cmd.type = cmd_type;
 
-	return ufshcd_comp_devman_upiu(hba, lrbp);
+	return ufshcd_compose_upiu(hba, lrbp);
 }
 
 static int
@@ -1540,6 +2812,7 @@
 	int resp;
 	int err = 0;
 
+	hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
 	resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
 
 	switch (resp) {
@@ -1605,6 +2878,9 @@
 		ufshcd_outstanding_req_clear(hba, lrbp->task_tag);
 	}
 
+	if (err)
+		ufsdbg_set_err_state(hba);
+
 	return err;
 }
 
@@ -1664,6 +2940,8 @@
 	struct completion wait;
 	unsigned long flags;
 
+	down_read(&hba->clk_scaling_lock);
+
 	/*
 	 * Get free slot, sleep if slots are unavailable.
 	 * Even though we use wait_event() which sleeps indefinitely,
@@ -1683,14 +2961,19 @@
 	/* Make sure descriptors are ready before ringing the doorbell */
 	wmb();
 	spin_lock_irqsave(hba->host->host_lock, flags);
-	ufshcd_send_command(hba, tag);
+	err = ufshcd_send_command(hba, tag);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-
+	if (err) {
+		dev_err(hba->dev, "%s: failed sending command, %d\n",
+							__func__, err);
+		goto out_put_tag;
+	}
 	err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
 
 out_put_tag:
 	ufshcd_put_dev_cmd_tag(hba, tag);
 	wake_up(&hba->dev_cmd.tag_wq);
+	up_read(&hba->clk_scaling_lock);
 	return err;
 }
 
@@ -1708,6 +2991,12 @@
 		struct ufs_query_req **request, struct ufs_query_res **response,
 		enum query_opcode opcode, u8 idn, u8 index, u8 selector)
 {
+	int idn_t = (int)idn;
+
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_QUERY, idn_t, (int *)&idn_t);
+	idn = idn_t;
+
 	*request = &hba->dev_cmd.query.request;
 	*response = &hba->dev_cmd.query.response;
 	memset(*request, 0, sizeof(struct ufs_query_req));
@@ -1716,6 +3005,8 @@
 	(*request)->upiu_req.idn = idn;
 	(*request)->upiu_req.index = index;
 	(*request)->upiu_req.selector = selector;
+
+	ufshcd_update_query_stats(hba, opcode, idn);
 }
 
 static int ufshcd_query_flag_retry(struct ufs_hba *hba,
@@ -1760,7 +3051,7 @@
 
 	BUG_ON(!hba);
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	mutex_lock(&hba->dev_cmd.lock);
 	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
 			selector);
@@ -1789,15 +3080,12 @@
 		goto out_unlock;
 	}
 
-	if (idn == QUERY_FLAG_IDN_FDEVICEINIT)
-		timeout = QUERY_FDEVICEINIT_REQ_TIMEOUT;
-
 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, timeout);
 
 	if (err) {
 		dev_err(hba->dev,
 			"%s: Sending flag query for idn %d failed, err = %d\n",
-			__func__, idn, err);
+			__func__, request->upiu_req.idn, err);
 		goto out_unlock;
 	}
 
@@ -1807,9 +3095,10 @@
 
 out_unlock:
 	mutex_unlock(&hba->dev_cmd.lock);
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
+EXPORT_SYMBOL(ufshcd_query_flag);
 
 /**
  * ufshcd_query_attr - API function for sending attribute requests
@@ -1822,7 +3111,7 @@
  *
  * Returns 0 for success, non-zero in case of failure
 */
-static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
 			enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
 {
 	struct ufs_query_req *request = NULL;
@@ -1831,7 +3120,7 @@
 
 	BUG_ON(!hba);
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	if (!attr_val) {
 		dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
 				__func__, opcode);
@@ -1861,8 +3150,9 @@
 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 
 	if (err) {
-		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-				__func__, opcode, idn, err);
+		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+				__func__, opcode,
+				request->upiu_req.idn, index, err);
 		goto out_unlock;
 	}
 
@@ -1871,9 +3161,10 @@
 out_unlock:
 	mutex_unlock(&hba->dev_cmd.lock);
 out:
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
+EXPORT_SYMBOL(ufshcd_query_attr);
 
 /**
  * ufshcd_query_attr_retry() - API function for sending query
@@ -1908,7 +3199,7 @@
 	if (ret)
 		dev_err(hba->dev,
 			"%s: query attribute, idn %d, failed with error %d after %d retires\n",
-			__func__, idn, ret, QUERY_REQ_RETRIES);
+			__func__, idn, ret, retries);
 	return ret;
 }
 
@@ -1922,7 +3213,7 @@
 
 	BUG_ON(!hba);
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	if (!desc_buf) {
 		dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
 				__func__, opcode);
@@ -1961,8 +3252,9 @@
 	err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
 
 	if (err) {
-		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
-				__func__, opcode, idn, err);
+		dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, index %d, err = %d\n",
+				__func__, opcode,
+				request->upiu_req.idn, index, err);
 		goto out_unlock;
 	}
 
@@ -1972,13 +3264,12 @@
 out_unlock:
 	mutex_unlock(&hba->dev_cmd.lock);
 out:
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
 
 /**
- * ufshcd_query_descriptor_retry - API function for sending descriptor
- * requests
+ * ufshcd_query_descriptor - API function for sending descriptor requests
  * hba: per-adapter instance
  * opcode: attribute opcode
  * idn: attribute idn to access
@@ -1991,7 +3282,7 @@
  * The buf_len parameter will contain, on return, the length parameter
  * received on the response.
  */
-int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
+int ufshcd_query_descriptor(struct ufs_hba *hba,
 			enum query_opcode opcode, enum desc_idn idn, u8 index,
 			u8 selector, u8 *desc_buf, int *buf_len)
 {
@@ -2007,7 +3298,7 @@
 
 	return err;
 }
-EXPORT_SYMBOL(ufshcd_query_descriptor_retry);
+EXPORT_SYMBOL(ufshcd_query_descriptor);
 
 /**
  * ufshcd_read_desc_param - read the specified descriptor parameter
@@ -2051,22 +3342,45 @@
 			return -ENOMEM;
 	}
 
-	ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
-					desc_id, desc_index, 0, desc_buf,
-					&buff_len);
+	ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+				      desc_id, desc_index, 0, desc_buf,
+				      &buff_len);
 
-	if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
-	    (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
-	     ufs_query_desc_max_size[desc_id])
-	    || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
-		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
-			__func__, desc_id, param_offset, buff_len, ret);
-		if (!ret)
-			ret = -EINVAL;
+	if (ret) {
+		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
+			__func__, desc_id, desc_index, param_offset, ret);
 
 		goto out;
 	}
 
+	/* Sanity check */
+	if (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id) {
+		dev_err(hba->dev, "%s: invalid desc_id %d in descriptor header",
+			__func__, desc_buf[QUERY_DESC_DESC_TYPE_OFFSET]);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * While reading variable size descriptors (like string descriptor),
+	 * some UFS devices may report the "LENGTH" (field in "Transaction
+	 * Specific fields" of Query Response UPIU) same as what was requested
+	 * in Query Request UPIU instead of reporting the actual size of the
+	 * variable size descriptor.
+	 * Although it's safe to ignore the "LENGTH" field for variable size
+	 * descriptors as we can always derive the length of the descriptor from
+	 * the descriptor header fields. Hence this change impose the length
+	 * match check only for fixed size descriptors (for which we always
+	 * request the correct size as part of Query Request UPIU).
+	 */
+	if ((desc_id != QUERY_DESC_IDN_STRING) &&
+	    (buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
+		dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
+			__func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
+		ret = -EINVAL;
+		goto out;
+	}
+
 	if (is_kmalloc)
 		memcpy(param_read_buf, &desc_buf[param_offset], param_size);
 out:
@@ -2095,7 +3409,6 @@
 {
 	return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size);
 }
-EXPORT_SYMBOL(ufshcd_read_device_desc);
 
 /**
  * ufshcd_read_string_desc - read string descriptor
@@ -2139,8 +3452,10 @@
 
 		buff_ascii = kmalloc(ascii_len, GFP_KERNEL);
 		if (!buff_ascii) {
+			dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
+					__func__, ascii_len);
 			err = -ENOMEM;
-			goto out;
+			goto out_free_buff;
 		}
 
 		/*
@@ -2159,12 +3474,12 @@
 				size - QUERY_DESC_HDR_SIZE);
 		memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len);
 		buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE;
+out_free_buff:
 		kfree(buff_ascii);
 	}
 out:
 	return err;
 }
-EXPORT_SYMBOL(ufshcd_read_string_desc);
 
 /**
  * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
@@ -2186,7 +3501,7 @@
 	 * Unit descriptors are only available for general purpose LUs (LUN id
 	 * from 0 to 7) and RPMB Well known LU.
 	 */
-	if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
+	if (!ufs_is_valid_unit_desc_lun(lun))
 		return -EOPNOTSUPP;
 
 	return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -2328,12 +3643,19 @@
 				cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
 
 		hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
+		hba->lrb[i].utrd_dma_addr = hba->utrdl_dma_addr +
+				(i * sizeof(struct utp_transfer_req_desc));
 		hba->lrb[i].ucd_req_ptr =
 			(struct utp_upiu_req *)(cmd_descp + i);
+		hba->lrb[i].ucd_req_dma_addr = cmd_desc_element_addr;
 		hba->lrb[i].ucd_rsp_ptr =
 			(struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
+		hba->lrb[i].ucd_rsp_dma_addr = cmd_desc_element_addr +
+				response_offset;
 		hba->lrb[i].ucd_prdt_ptr =
 			(struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
+		hba->lrb[i].ucd_prdt_dma_addr = cmd_desc_element_addr +
+				prdt_offset;
 	}
 }
 
@@ -2357,7 +3679,7 @@
 
 	ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
 	if (ret)
-		dev_err(hba->dev,
+		dev_dbg(hba->dev,
 			"dme-link-startup: error code %d\n", ret);
 	return ret;
 }
@@ -2393,6 +3715,13 @@
 	usleep_range(min_sleep_time_us, min_sleep_time_us + 50);
 }
 
+static inline void ufshcd_save_tstamp_of_last_dme_cmd(
+			struct ufs_hba *hba)
+{
+	if (hba->quirks & UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS)
+		hba->last_dme_cmd_tstamp = ktime_get();
+}
+
 /**
  * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
  * @hba: per adapter instance
@@ -2415,6 +3744,9 @@
 	int ret;
 	int retries = UFS_UIC_COMMAND_RETRIES;
 
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
+
 	uic_cmd.command = peer ?
 		UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
 	uic_cmd.argument1 = attr_sel;
@@ -2429,10 +3761,10 @@
 				set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
 	} while (ret && peer && --retries);
 
-	if (!retries)
+	if (ret)
 		dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x failed %d retries\n",
-				set, UIC_GET_ATTR_ID(attr_sel), mib_val,
-				retries);
+			set, UIC_GET_ATTR_ID(attr_sel), mib_val,
+			UFS_UIC_COMMAND_RETRIES - retries);
 
 	return ret;
 }
@@ -2486,6 +3818,10 @@
 
 	uic_cmd.command = peer ?
 		UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
+
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_DME_ATTR, attr_sel, &attr_sel);
+
 	uic_cmd.argument1 = attr_sel;
 
 	do {
@@ -2496,9 +3832,10 @@
 				get, UIC_GET_ATTR_ID(attr_sel), ret);
 	} while (ret && peer && --retries);
 
-	if (!retries)
+	if (ret)
 		dev_err(hba->dev, "%s: attr-id 0x%x failed %d retries\n",
-				get, UIC_GET_ATTR_ID(attr_sel), retries);
+			get, UIC_GET_ATTR_ID(attr_sel),
+			UFS_UIC_COMMAND_RETRIES - retries);
 
 	if (mib_val && !ret)
 		*mib_val = uic_cmd.argument3;
@@ -2576,6 +3913,10 @@
 		ret = (status != PWR_OK) ? status : -1;
 	}
 out:
+	if (ret)
+		ufsdbg_set_err_state(hba);
+
+	ufshcd_save_tstamp_of_last_dme_cmd(hba);
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	hba->active_uic_cmd = NULL;
 	hba->uic_async_done = NULL;
@@ -2583,7 +3924,64 @@
 		ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 	mutex_unlock(&hba->uic_cmd_mutex);
+	return ret;
+}
 
+int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us)
+{
+	unsigned long flags;
+	int ret = 0;
+	u32 tm_doorbell;
+	u32 tr_doorbell;
+	bool timeout = false, do_last_check = false;
+	ktime_t start;
+
+	ufshcd_hold_all(hba);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	/*
+	 * Wait for all the outstanding tasks/transfer requests.
+	 * Verify by checking the doorbell registers are clear.
+	 */
+	start = ktime_get();
+	do {
+		tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
+		tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+		if (!tm_doorbell && !tr_doorbell) {
+			timeout = false;
+			break;
+		} else if (do_last_check) {
+			break;
+		}
+
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		schedule();
+		if (ktime_to_us(ktime_sub(ktime_get(), start)) >
+		    wait_timeout_us) {
+			timeout = true;
+			/*
+			 * We might have scheduled out for long time so make
+			 * sure to check if doorbells are cleared by this time
+			 * or not.
+			 */
+			do_last_check = true;
+		}
+		spin_lock_irqsave(hba->host->host_lock, flags);
+	} while (tm_doorbell || tr_doorbell);
+
+	if (timeout) {
+		dev_err(hba->dev,
+			"%s: timedout waiting for doorbell to clear (tm=0x%x, tr=0x%x)\n",
+			__func__, tm_doorbell, tr_doorbell);
+		ret = -EBUSY;
+	}
+out:
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	ufshcd_release_all(hba);
 	return ret;
 }
 
@@ -2613,10 +4011,9 @@
 	uic_cmd.command = UIC_CMD_DME_SET;
 	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
 	uic_cmd.argument3 = mode;
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
-	ufshcd_release(hba);
-
+	ufshcd_release_all(hba);
 out:
 	return ret;
 }
@@ -2631,6 +4028,12 @@
 	ufshcd_set_eh_in_progress(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	ret = ufshcd_vops_full_reset(hba);
+	if (ret)
+		dev_warn(hba->dev,
+			"full reset returned %d, trying to recover the link\n",
+			ret);
+
 	ret = ufshcd_host_reset_and_restore(hba);
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2650,49 +4053,71 @@
 {
 	int ret;
 	struct uic_command uic_cmd = {0};
+	ktime_t start = ktime_get();
 
 	uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
+			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 
 	if (ret) {
-		dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
+		ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
+		dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
 			__func__, ret);
-
 		/*
 		 * If link recovery fails then return error so that caller
 		 * don't retry the hibern8 enter again.
 		 */
 		if (ufshcd_link_recovery(hba))
 			ret = -ENOLINK;
+	} else {
+		dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
+			ktime_to_us(ktime_get()));
 	}
 
 	return ret;
 }
 
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
 {
 	int ret = 0, retries;
 
 	for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
 		ret = __ufshcd_uic_hibern8_enter(hba);
-		if (!ret || ret == -ENOLINK)
+		if (!ret)
 			goto out;
+		/* Unable to recover the link, so no point proceeding */
+		 if (ret == -ENOLINK)
+			BUG();
 	}
 out:
 	return ret;
 }
 
-static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
 {
 	struct uic_command uic_cmd = {0};
 	int ret;
+	ktime_t start = ktime_get();
 
 	uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
 	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+	trace_ufshcd_profile_hibern8(dev_name(hba->dev), "exit",
+			     ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
 	if (ret) {
-		dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
+		ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_EXIT);
+		dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d",
 			__func__, ret);
 		ret = ufshcd_link_recovery(hba);
+		/* Unable to recover the link, so no point proceeding */
+		if (ret)
+			BUG();
+	} else {
+		dev_dbg(hba->dev, "%s: Hibern8 Exit at %lld us", __func__,
+			ktime_to_us(ktime_get()));
+		hba->ufs_stats.last_hibern8_exit_tstamp = ktime_get();
+		hba->ufs_stats.hibern8_exit_cnt++;
 	}
 
 	return ret;
@@ -2725,8 +4150,8 @@
 	if (hba->max_pwr_info.is_valid)
 		return 0;
 
-	pwr_info->pwr_tx = FASTAUTO_MODE;
-	pwr_info->pwr_rx = FASTAUTO_MODE;
+	pwr_info->pwr_tx = FAST_MODE;
+	pwr_info->pwr_rx = FAST_MODE;
 	pwr_info->hs_rate = PA_HS_MODE_B;
 
 	/* Get the connected lane count */
@@ -2756,8 +4181,16 @@
 			dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
 				__func__, pwr_info->gear_rx);
 			return -EINVAL;
+		} else {
+			if (hba->limit_rx_pwm_gear > 0 &&
+			    (hba->limit_rx_pwm_gear < pwr_info->gear_rx))
+				pwr_info->gear_rx = hba->limit_rx_pwm_gear;
 		}
-		pwr_info->pwr_rx = SLOWAUTO_MODE;
+		pwr_info->pwr_rx = SLOW_MODE;
+	} else {
+		if (hba->limit_rx_hs_gear > 0 &&
+		    (hba->limit_rx_hs_gear < pwr_info->gear_rx))
+			pwr_info->gear_rx = hba->limit_rx_hs_gear;
 	}
 
 	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
@@ -2769,18 +4202,26 @@
 			dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
 				__func__, pwr_info->gear_tx);
 			return -EINVAL;
+		} else {
+			if (hba->limit_tx_pwm_gear > 0 &&
+			    (hba->limit_tx_pwm_gear < pwr_info->gear_tx))
+				pwr_info->gear_tx = hba->limit_tx_pwm_gear;
 		}
-		pwr_info->pwr_tx = SLOWAUTO_MODE;
+		pwr_info->pwr_tx = SLOW_MODE;
+	} else {
+		if (hba->limit_tx_hs_gear > 0 &&
+		    (hba->limit_tx_hs_gear < pwr_info->gear_tx))
+			pwr_info->gear_tx = hba->limit_tx_hs_gear;
 	}
 
 	hba->max_pwr_info.is_valid = true;
 	return 0;
 }
 
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
+int ufshcd_change_power_mode(struct ufs_hba *hba,
 			     struct ufs_pa_layer_attr *pwr_mode)
 {
-	int ret;
+	int ret = 0;
 
 	/* if already configured to the requested pwr_mode */
 	if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
@@ -2794,6 +4235,10 @@
 		return 0;
 	}
 
+	ufsdbg_error_inject_dispatcher(hba, ERR_INJECT_PWR_CHANGE, 0, &ret);
+	if (ret)
+		return ret;
+
 	/*
 	 * Configure attributes for power mode change with below.
 	 * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
@@ -2825,10 +4270,25 @@
 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
 						pwr_mode->hs_rate);
 
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+			DL_FC0ProtectionTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+			DL_TC0ReplayTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+			DL_AFC0ReqTimeOutVal_Default);
+
+	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+			DL_FC0ProtectionTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+			DL_TC0ReplayTimeOutVal_Default);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+			DL_AFC0ReqTimeOutVal_Default);
+
 	ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
 			| pwr_mode->pwr_tx);
 
 	if (ret) {
+		ufshcd_update_error_stats(hba, UFS_ERR_POWER_MODE_CHANGE);
 		dev_err(hba->dev,
 			"%s: power mode change failed %d\n", __func__, ret);
 	} else {
@@ -2860,6 +4320,8 @@
 		memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
 
 	ret = ufshcd_change_power_mode(hba, &final_params);
+	if (!ret)
+		ufshcd_print_pwr_info(hba);
 
 	return ret;
 }
@@ -3075,6 +4537,11 @@
 	return err;
 }
 
+static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
+{
+	return ufshcd_disable_tx_lcc(hba, false);
+}
+
 static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba)
 {
 	return ufshcd_disable_tx_lcc(hba, true);
@@ -3090,14 +4557,26 @@
 {
 	int ret;
 	int retries = DME_LINKSTARTUP_RETRIES;
+	bool link_startup_again = false;
 
+	/*
+	 * If UFS device isn't active then we will have to issue link startup
+	 * 2 times to make sure the device state move to active.
+	 */
+	if (!ufshcd_is_ufs_dev_active(hba))
+		link_startup_again = true;
+
+link_startup:
 	do {
 		ufshcd_vops_link_startup_notify(hba, PRE_CHANGE);
 
 		ret = ufshcd_dme_link_startup(hba);
+		if (ret)
+			ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
 
 		/* check if device is detected by inter-connect layer */
 		if (!ret && !ufshcd_is_device_present(hba)) {
+			ufshcd_update_error_stats(hba, UFS_ERR_LINKSTARTUP);
 			dev_err(hba->dev, "%s: Device not present\n", __func__);
 			ret = -ENXIO;
 			goto out;
@@ -3116,12 +4595,28 @@
 		/* failed to get the link up... retire */
 		goto out;
 
+	if (link_startup_again) {
+		link_startup_again = false;
+		retries = DME_LINKSTARTUP_RETRIES;
+		goto link_startup;
+	}
+
+	/* Mark that link is up in PWM-G1, 1-lane, SLOW-AUTO mode */
+	ufshcd_init_pwr_info(hba);
+	ufshcd_print_pwr_info(hba);
+
 	if (hba->quirks & UFSHCD_QUIRK_BROKEN_LCC) {
 		ret = ufshcd_disable_device_tx_lcc(hba);
 		if (ret)
 			goto out;
 	}
 
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_BROKEN_LCC) {
+		ret = ufshcd_disable_host_tx_lcc(hba);
+		if (ret)
+			goto out;
+	}
+
 	/* Include any host controller configuration via UIC commands */
 	ret = ufshcd_vops_link_startup_notify(hba, POST_CHANGE);
 	if (ret)
@@ -3149,7 +4644,7 @@
 	int err = 0;
 	int retries;
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	mutex_lock(&hba->dev_cmd.lock);
 	for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
 		err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -3161,7 +4656,7 @@
 		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
 	}
 	mutex_unlock(&hba->dev_cmd.lock);
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 
 	if (err)
 		dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -3187,10 +4682,10 @@
 
 	lun_qdepth = hba->nutrs;
 	ret = ufshcd_read_unit_desc_param(hba,
-					  ufshcd_scsi_to_upiu_lun(sdev->lun),
-					  UNIT_DESC_PARAM_LU_Q_DEPTH,
-					  &lun_qdepth,
-					  sizeof(lun_qdepth));
+			  ufshcd_scsi_to_upiu_lun(sdev->lun),
+			  UNIT_DESC_PARAM_LU_Q_DEPTH,
+			  &lun_qdepth,
+			  sizeof(lun_qdepth));
 
 	/* Some WLUN doesn't support unit descriptor */
 	if (ret == -EOPNOTSUPP)
@@ -3283,6 +4778,8 @@
 	/* REPORT SUPPORTED OPERATION CODES is not supported */
 	sdev->no_report_opcodes = 1;
 
+	/* WRITE_SAME command is not supported*/
+	sdev->no_write_same = 1;
 
 	ufshcd_set_queue_depth(sdev);
 
@@ -3314,10 +4811,19 @@
 static int ufshcd_slave_configure(struct scsi_device *sdev)
 {
 	struct request_queue *q = sdev->request_queue;
+	struct ufs_hba *hba = shost_priv(sdev->host);
 
 	blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
 	blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
 
+	if (hba->scsi_cmd_timeout) {
+		blk_queue_rq_timeout(q, hba->scsi_cmd_timeout * HZ);
+		scsi_set_cmd_timeout_override(sdev, hba->scsi_cmd_timeout * HZ);
+	}
+
+	sdev->autosuspend_delay = UFSHCD_AUTO_SUSPEND_DELAY_MS;
+	sdev->use_rpm_auto = 1;
+
 	return 0;
 }
 
@@ -3427,6 +4933,7 @@
 	int result = 0;
 	int scsi_status;
 	int ocs;
+	bool print_prdt;
 
 	/* overall command status of utrd */
 	ocs = ufshcd_get_tr_ocs(lrbp);
@@ -3434,7 +4941,7 @@
 	switch (ocs) {
 	case OCS_SUCCESS:
 		result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
-
+		hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
 		switch (result) {
 		case UPIU_TRANSACTION_RESPONSE:
 			/*
@@ -3492,13 +4999,29 @@
 	case OCS_MISMATCH_RESP_UPIU_SIZE:
 	case OCS_PEER_COMM_FAILURE:
 	case OCS_FATAL_ERROR:
+	case OCS_DEVICE_FATAL_ERROR:
+	case OCS_INVALID_CRYPTO_CONFIG:
+	case OCS_GENERAL_CRYPTO_ERROR:
 	default:
 		result |= DID_ERROR << 16;
 		dev_err(hba->dev,
-		"OCS error from controller = %x\n", ocs);
+				"OCS error from controller = %x for tag %d\n",
+				ocs, lrbp->task_tag);
+		ufshcd_print_host_regs(hba);
+		ufshcd_print_host_state(hba);
 		break;
 	} /* end of switch */
 
+	if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) {
+		print_prdt = (ocs == OCS_INVALID_PRDT_ATTR ||
+			ocs == OCS_MISMATCH_DATA_BUF_SIZE);
+		ufshcd_print_trs(hba, 1 << lrbp->task_tag, print_prdt);
+	}
+
+	if ((host_byte(result) == DID_ERROR) ||
+	    (host_byte(result) == DID_ABORT))
+		ufsdbg_set_err_state(hba);
+
 	return result;
 }
 
@@ -3522,6 +5045,64 @@
 }
 
 /**
+ * ufshcd_abort_outstanding_requests - abort all outstanding transfer requests.
+ * @hba: per adapter instance
+ * @result: error result to inform scsi layer about
+ */
+void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
+{
+	u8 index;
+	struct ufshcd_lrb *lrbp;
+	struct scsi_cmnd *cmd;
+
+	if (!hba->outstanding_reqs)
+		return;
+
+	for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) {
+		lrbp = &hba->lrb[index];
+		cmd = lrbp->cmd;
+		if (cmd) {
+			ufshcd_cond_add_cmd_trace(hba, index, "failed");
+			ufshcd_update_error_stats(hba,
+					UFS_ERR_INT_FATAL_ERRORS);
+			scsi_dma_unmap(cmd);
+			cmd->result = result;
+			/* Clear pending transfer requests */
+			ufshcd_clear_cmd(hba, index);
+			ufshcd_outstanding_req_clear(hba, index);
+			clear_bit_unlock(index, &hba->lrb_in_use);
+			lrbp->complete_time_stamp = ktime_get();
+			update_req_stats(hba, lrbp);
+			/* Mark completed command as NULL in LRB */
+			lrbp->cmd = NULL;
+			ufshcd_release_all(hba);
+			if (cmd->request) {
+				/*
+				 * As we are accessing the "request" structure,
+				 * this must be called before calling
+				 * ->scsi_done() callback.
+				 */
+				ufshcd_vops_pm_qos_req_end(hba, cmd->request,
+					true);
+				ufshcd_vops_crypto_engine_cfg_end(hba,
+						lrbp, cmd->request);
+			}
+			/* Do not touch lrbp after scsi done */
+			cmd->scsi_done(cmd);
+		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+			if (hba->dev_cmd.complete) {
+				ufshcd_cond_add_cmd_trace(hba, index,
+							"dev_failed");
+				ufshcd_outstanding_req_clear(hba, index);
+				complete(hba->dev_cmd.complete);
+			}
+		}
+		if (ufshcd_is_clkscaling_supported(hba))
+			hba->clk_scaling.active_reqs--;
+	}
+}
+
+/**
  * __ufshcd_transfer_req_compl - handle SCSI and query command completion
  * @hba: per adapter instance
  * @completed_reqs: requests to complete
@@ -3533,25 +5114,63 @@
 	struct scsi_cmnd *cmd;
 	int result;
 	int index;
+	struct request *req;
 
 	for_each_set_bit(index, &completed_reqs, hba->nutrs) {
 		lrbp = &hba->lrb[index];
 		cmd = lrbp->cmd;
 		if (cmd) {
+			ufshcd_cond_add_cmd_trace(hba, index, "complete");
+			ufshcd_update_tag_stats_completion(hba, cmd);
 			result = ufshcd_transfer_rsp_status(hba, lrbp);
 			scsi_dma_unmap(cmd);
 			cmd->result = result;
+			clear_bit_unlock(index, &hba->lrb_in_use);
+			lrbp->complete_time_stamp = ktime_get();
+			update_req_stats(hba, lrbp);
 			/* Mark completed command as NULL in LRB */
 			lrbp->cmd = NULL;
-			clear_bit_unlock(index, &hba->lrb_in_use);
+			__ufshcd_release(hba, false);
+			__ufshcd_hibern8_release(hba, false);
+			if (cmd->request) {
+				/*
+				 * As we are accessing the "request" structure,
+				 * this must be called before calling
+				 * ->scsi_done() callback.
+				 */
+				ufshcd_vops_pm_qos_req_end(hba, cmd->request,
+					false);
+				ufshcd_vops_crypto_engine_cfg_end(hba,
+					lrbp, cmd->request);
+			}
+
+			req = cmd->request;
+			if (req) {
+				/* Update IO svc time latency histogram */
+				if (req->lat_hist_enabled) {
+					ktime_t completion;
+					u_int64_t delta_us;
+
+					completion = ktime_get();
+					delta_us = ktime_us_delta(completion,
+						  req->lat_hist_io_start);
+					/* rq_data_dir() => true if WRITE */
+					blk_update_latency_hist(&hba->io_lat_s,
+						(rq_data_dir(req) == READ),
+						delta_us);
+				}
+			}
 			/* Do not touch lrbp after scsi done */
 			cmd->scsi_done(cmd);
-			__ufshcd_release(hba);
-		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE ||
-			lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
-			if (hba->dev_cmd.complete)
+		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
+			if (hba->dev_cmd.complete) {
+				ufshcd_cond_add_cmd_trace(hba, index,
+						"dev_complete");
 				complete(hba->dev_cmd.complete);
+			}
 		}
+		if (ufshcd_is_clkscaling_supported(hba))
+			hba->clk_scaling.active_reqs--;
 	}
 
 	/* clear corresponding bits of completed commands */
@@ -3671,6 +5290,7 @@
 	}
 
 	hba->auto_bkops_enabled = true;
+	trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 1);
 
 	/* No need of URGENT_BKOPS exception from the device */
 	err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
@@ -3721,23 +5341,31 @@
 	}
 
 	hba->auto_bkops_enabled = false;
+	trace_ufshcd_auto_bkops_state(dev_name(hba->dev), 0);
 out:
 	return err;
 }
 
 /**
- * ufshcd_force_reset_auto_bkops - force enable of auto bkops
+ * ufshcd_force_reset_auto_bkops - force reset auto bkops state
  * @hba: per adapter instance
  *
  * After a device reset the device may toggle the BKOPS_EN flag
  * to default value. The s/w tracking variables should be updated
- * as well. Do this by forcing enable of auto bkops.
+ * as well. This function would change the auto-bkops state based on
+ * UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND.
  */
-static void  ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
+static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
 {
-	hba->auto_bkops_enabled = false;
-	hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
-	ufshcd_enable_auto_bkops(hba);
+	if (ufshcd_keep_autobkops_enabled_except_suspend(hba)) {
+		hba->auto_bkops_enabled = false;
+		hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
+		ufshcd_enable_auto_bkops(hba);
+	} else {
+		hba->auto_bkops_enabled = true;
+		hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS;
+		ufshcd_disable_auto_bkops(hba);
+	}
 }
 
 static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
@@ -3861,6 +5489,7 @@
 	hba = container_of(work, struct ufs_hba, eeh_work);
 
 	pm_runtime_get_sync(hba->dev);
+	ufshcd_scsi_block_requests(hba);
 	err = ufshcd_get_ee_status(hba, &status);
 	if (err) {
 		dev_err(hba->dev, "%s: failed to get exception status %d\n",
@@ -3874,6 +5503,7 @@
 		ufshcd_bkops_exception_event_handler(hba);
 
 out:
+	ufshcd_scsi_unblock_requests(hba);
 	pm_runtime_put_sync(hba->dev);
 	return;
 }
@@ -3907,8 +5537,14 @@
 
 	if ((hba->saved_err & DEVICE_FATAL_ERROR) ||
 	    ((hba->saved_err & UIC_ERROR) &&
-	     (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR)))
+	     (hba->saved_uic_err & UFSHCD_UIC_DL_TCx_REPLAY_ERROR))) {
+		/*
+		 * we have to do error recovery but atleast silence the error
+		 * logs.
+		 */
+		hba->silence_err_logs = true;
 		goto out;
+	}
 
 	if ((hba->saved_err & UIC_ERROR) &&
 	    (hba->saved_uic_err & UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)) {
@@ -3926,8 +5562,13 @@
 		 */
 		if ((hba->saved_err & INT_FATAL_ERRORS) ||
 		    ((hba->saved_err & UIC_ERROR) &&
-		    (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)))
+		    (hba->saved_uic_err & ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))) {
+			if (((hba->saved_err & INT_FATAL_ERRORS) ==
+				DEVICE_FATAL_ERROR) || (hba->saved_uic_err &
+					~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR))
+				hba->silence_err_logs = true;
 			goto out;
+		}
 
 		/*
 		 * As DL NAC is the only error received so far, send out NOP
@@ -3936,12 +5577,17 @@
 		 *   - If we get response then clear the DL NAC error bit.
 		 */
 
+		/* silence the error logs from NOP command */
+		hba->silence_err_logs = true;
 		spin_unlock_irqrestore(hba->host->host_lock, flags);
 		err = ufshcd_verify_dev_init(hba);
 		spin_lock_irqsave(hba->host->host_lock, flags);
+		hba->silence_err_logs = false;
 
-		if (err)
+		if (err) {
+			hba->silence_err_logs = true;
 			goto out;
+		}
 
 		/* Link seems to be alive hence ignore the DL NAC errors */
 		if (hba->saved_uic_err == UFSHCD_UIC_DL_NAC_RECEIVED_ERROR)
@@ -3952,6 +5598,11 @@
 			err_handling = false;
 			goto out;
 		}
+		/*
+		 * there seems to be some errors other than NAC, so do error
+		 * recovery
+		 */
+		hba->silence_err_logs = true;
 	}
 out:
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -3966,16 +5617,16 @@
 {
 	struct ufs_hba *hba;
 	unsigned long flags;
-	u32 err_xfer = 0;
-	u32 err_tm = 0;
+	bool err_xfer = false, err_tm = false;
 	int err = 0;
 	int tag;
 	bool needs_reset = false;
 
 	hba = container_of(work, struct ufs_hba, eh_work);
 
+	ufsdbg_set_err_state(hba);
 	pm_runtime_get_sync(hba->dev);
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	if (hba->ufshcd_state == UFSHCD_STATE_RESET)
@@ -3997,7 +5648,23 @@
 		if (!ret)
 			goto skip_err_handling;
 	}
-	if ((hba->saved_err & INT_FATAL_ERRORS) ||
+
+	/*
+	 * Dump controller state before resetting. Transfer requests state
+	 * will be dump as part of the request completion.
+	 */
+	if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
+		dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x",
+			__func__, hba->saved_err, hba->saved_uic_err);
+		if (!hba->silence_err_logs) {
+			ufshcd_print_host_regs(hba);
+			ufshcd_print_host_state(hba);
+			ufshcd_print_pwr_info(hba);
+			ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+		}
+	}
+
+	if ((hba->saved_err & INT_FATAL_ERRORS) || hba->saved_ce_err ||
 	    ((hba->saved_err & UIC_ERROR) &&
 	    (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
 				   UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
@@ -4044,6 +5711,20 @@
 	if (needs_reset) {
 		unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
 
+		if (hba->saved_err & INT_FATAL_ERRORS)
+			ufshcd_update_error_stats(hba,
+						  UFS_ERR_INT_FATAL_ERRORS);
+		if (hba->saved_ce_err)
+			ufshcd_update_error_stats(hba, UFS_ERR_CRYPTO_ENGINE);
+
+		if (hba->saved_err & UIC_ERROR)
+			ufshcd_update_error_stats(hba,
+						  UFS_ERR_INT_UIC_ERROR);
+
+		if (err_xfer || err_tm)
+			ufshcd_update_error_stats(hba,
+						  UFS_ERR_CLEAR_PEND_XFER_TM);
+
 		/*
 		 * ufshcd_reset_and_restore() does the link reinitialization
 		 * which will need atleast one empty doorbell slot to send the
@@ -4070,6 +5751,7 @@
 		scsi_report_bus_reset(hba->host, 0);
 		hba->saved_err = 0;
 		hba->saved_uic_err = 0;
+		hba->saved_ce_err = 0;
 	}
 
 skip_err_handling:
@@ -4080,15 +5762,23 @@
 			    __func__, hba->saved_err, hba->saved_uic_err);
 	}
 
+	hba->silence_err_logs = false;
 	ufshcd_clear_eh_in_progress(hba);
-
 out:
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	scsi_unblock_requests(hba->host);
-	ufshcd_release(hba);
+	ufshcd_scsi_unblock_requests(hba);
+	ufshcd_release_all(hba);
 	pm_runtime_put_sync(hba->dev);
 }
 
+static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
+		u32 reg)
+{
+	reg_hist->reg[reg_hist->pos] = reg;
+	reg_hist->tstamp[reg_hist->pos] = ktime_get();
+	reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
+}
+
 /**
  * ufshcd_update_uic_error - check and set fatal UIC error flags.
  * @hba: per-adapter instance
@@ -4097,11 +5787,28 @@
 {
 	u32 reg;
 
+	/* PHY layer lane error */
+	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
+	/* Ignore LINERESET indication, as this is not an error */
+	if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
+			(reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+		/*
+		 * To know whether this error is fatal or not, DB timeout
+		 * must be checked but this error is handled separately.
+		 */
+		dev_dbg(hba->dev, "%s: UIC Lane error reported, reg 0x%x\n",
+				__func__, reg);
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg);
+	}
+
 	/* PA_INIT_ERROR is fatal and needs UIC reset */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
-	if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
+	if (reg)
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg);
+
+	if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) {
 		hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
-	else if (hba->dev_quirks &
+	} else if (hba->dev_quirks &
 		   UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) {
 		if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED)
 			hba->uic_error |=
@@ -4112,16 +5819,22 @@
 
 	/* UIC NL/TL/DME errors needs software retry */
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
-	if (reg)
+	if (reg) {
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg);
 		hba->uic_error |= UFSHCD_UIC_NL_ERROR;
+	}
 
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
-	if (reg)
+	if (reg) {
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg);
 		hba->uic_error |= UFSHCD_UIC_TL_ERROR;
+	}
 
 	reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
-	if (reg)
+	if (reg) {
+		ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg);
 		hba->uic_error |= UFSHCD_UIC_DME_ERROR;
+	}
 
 	dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
 			__func__, hba->uic_error);
@@ -4135,7 +5848,7 @@
 {
 	bool queue_eh_work = false;
 
-	if (hba->errors & INT_FATAL_ERRORS)
+	if (hba->errors & INT_FATAL_ERRORS || hba->ce_error)
 		queue_eh_work = true;
 
 	if (hba->errors & UIC_ERROR) {
@@ -4152,11 +5865,12 @@
 		 */
 		hba->saved_err |= hba->errors;
 		hba->saved_uic_err |= hba->uic_error;
+		hba->saved_ce_err |= hba->ce_error;
 
 		/* handle fatal errors only when link is functional */
 		if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
 			/* block commands from scsi mid-layer */
-			scsi_block_requests(hba->host);
+			__ufshcd_scsi_block_requests(hba);
 
 			hba->ufshcd_state = UFSHCD_STATE_ERROR;
 			schedule_work(&hba->eh_work);
@@ -4190,8 +5904,13 @@
  */
 static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
 {
+	ufsdbg_error_inject_dispatcher(hba,
+		ERR_INJECT_INTR, intr_status, &intr_status);
+
+	ufshcd_vops_crypto_engine_get_status(hba, &hba->ce_error);
+
 	hba->errors = UFSHCD_ERROR_MASK & intr_status;
-	if (hba->errors)
+	if (hba->errors || hba->ce_error)
 		ufshcd_check_errors(hba);
 
 	if (intr_status & UFSHCD_UIC_MASK)
@@ -4284,7 +6003,7 @@
 	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
 	 */
 	wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 
 	spin_lock_irqsave(host->host_lock, flags);
 	task_req_descp = hba->utmrdl_base_addr;
@@ -4318,6 +6037,8 @@
 	wmb();
 
 	ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
+	/* Make sure that doorbell is committed immediately */
+	wmb();
 
 	spin_unlock_irqrestore(host->host_lock, flags);
 
@@ -4340,7 +6061,7 @@
 	ufshcd_put_tm_slot(hba, free_slot);
 	wake_up(&hba->tm_tag_wq);
 
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
 
@@ -4385,7 +6106,9 @@
 	spin_lock_irqsave(host->host_lock, flags);
 	ufshcd_transfer_req_compl(hba);
 	spin_unlock_irqrestore(host->host_lock, flags);
+
 out:
+	hba->req_abort_count = 0;
 	if (!err) {
 		err = SUCCESS;
 	} else {
@@ -4395,6 +6118,17 @@
 	return err;
 }
 
+static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
+{
+	struct ufshcd_lrb *lrbp;
+	int tag;
+
+	for_each_set_bit(tag, &bitmap, hba->nutrs) {
+		lrbp = &hba->lrb[tag];
+		lrbp->req_abort_skip = true;
+	}
+}
+
 /**
  * ufshcd_abort - abort a specific command
  * @cmd: SCSI command pointer
@@ -4429,7 +6163,21 @@
 		BUG();
 	}
 
-	ufshcd_hold(hba, false);
+	lrbp = &hba->lrb[tag];
+
+	ufshcd_update_error_stats(hba, UFS_ERR_TASK_ABORT);
+
+	/*
+	 * Task abort to the device W-LUN is illegal. When this command
+	 * will fail, due to spec violation, scsi err handling next step
+	 * will be to send LU reset which, again, is a spec violation.
+	 * To avoid these unnecessary/illegal step we skip to the last error
+	 * handling stage: reset and restore.
+	 */
+	if (lrbp->lun == UFS_UPIU_UFS_DEVICE_WLUN)
+		return ufshcd_eh_host_reset_handler(cmd);
+
+	ufshcd_hold_all(hba);
 	reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 	/* If command is already aborted/completed, return SUCCESS */
 	if (!(test_bit(tag, &hba->outstanding_reqs))) {
@@ -4445,18 +6193,49 @@
 		__func__, tag);
 	}
 
-	lrbp = &hba->lrb[tag];
+	/* Print Transfer Request of aborted task */
+	dev_err(hba->dev, "%s: Device abort task at tag %d", __func__, tag);
+
+	/*
+	 * Print detailed info about aborted request.
+	 * As more than one request might get aborted at the same time,
+	 * print full information only for the first aborted request in order
+	 * to reduce repeated printouts. For other aborted requests only print
+	 * basic details.
+	 */
+	scsi_print_command(cmd);
+	if (!hba->req_abort_count) {
+		ufshcd_print_host_regs(hba);
+		ufshcd_print_host_state(hba);
+		ufshcd_print_pwr_info(hba);
+		ufshcd_print_trs(hba, 1 << tag, true);
+	} else {
+		ufshcd_print_trs(hba, 1 << tag, false);
+	}
+	hba->req_abort_count++;
+
+
+	/* Skip task abort in case previous aborts failed and report failure */
+	if (lrbp->req_abort_skip) {
+		err = -EIO;
+		goto out;
+	}
+
 	for (poll_cnt = 100; poll_cnt; poll_cnt--) {
 		err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
 				UFS_QUERY_TASK, &resp);
 		if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
 			/* cmd pending in the device */
+			dev_err(hba->dev, "%s: cmd pending in the device. tag = %d",
+				__func__, tag);
 			break;
 		} else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
 			/*
 			 * cmd not pending in the device, check if it is
 			 * in transition.
 			 */
+			dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.",
+				__func__, tag);
 			reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 			if (reg & (1 << tag)) {
 				/* sleep for max. 200us to stabilize */
@@ -4464,8 +6243,13 @@
 				continue;
 			}
 			/* command completed already */
+			dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.",
+				__func__, tag);
 			goto out;
 		} else {
+			dev_err(hba->dev,
+				"%s: no response from device. tag = %d, err %d",
+				__func__, tag, err);
 			if (!err)
 				err = resp; /* service response error */
 			goto out;
@@ -4480,14 +6264,20 @@
 	err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
 			UFS_ABORT_TASK, &resp);
 	if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
-		if (!err)
+		if (!err) {
 			err = resp; /* service response error */
+			dev_err(hba->dev, "%s: issued. tag = %d, err %d",
+				__func__, tag, err);
+		}
 		goto out;
 	}
 
 	err = ufshcd_clear_cmd(hba, tag);
-	if (err)
+	if (err) {
+		dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d",
+			__func__, tag, err);
 		goto out;
+	}
 
 	scsi_dma_unmap(cmd);
 
@@ -4504,14 +6294,15 @@
 		err = SUCCESS;
 	} else {
 		dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
+		ufshcd_set_req_abort_skip(hba, hba->outstanding_reqs);
 		err = FAILED;
 	}
 
 	/*
-	 * This ufshcd_release() corresponds to the original scsi cmd that got
-	 * aborted here (as we won't get any IRQ for it).
+	 * This ufshcd_release_all() corresponds to the original scsi cmd that
+	 * got aborted here (as we won't get any IRQ for it).
 	 */
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
 
@@ -4535,6 +6326,9 @@
 	ufshcd_hba_stop(hba, false);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	/* scale up clocks to max frequency before full reinitialization */
+	ufshcd_set_clk_freq(hba, true);
+
 	err = ufshcd_hba_enable(hba);
 	if (err)
 		goto out;
@@ -4542,8 +6336,21 @@
 	/* Establish the link again and restore the device */
 	err = ufshcd_probe_hba(hba);
 
-	if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
+	if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) {
 		err = -EIO;
+		goto out;
+	}
+
+	if (!err) {
+		err = ufshcd_vops_crypto_engine_reset(hba);
+		if (err) {
+			dev_err(hba->dev,
+				"%s: failed to reset crypto engine %d\n",
+				__func__, err);
+			goto out;
+		}
+	}
+
 out:
 	if (err)
 		dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -4571,6 +6378,12 @@
 	} while (err && --retries);
 
 	/*
+	 * There is no point proceeding even after failing
+	 * to recover after multiple retries.
+	 */
+	if (err)
+		BUG();
+	/*
 	 * After reset the door-bell might be cleared, complete
 	 * outstanding requests in s/w here.
 	 */
@@ -4596,7 +6409,7 @@
 
 	hba = shost_priv(cmd->device->host);
 
-	ufshcd_hold(hba, false);
+	ufshcd_hold_all(hba);
 	/*
 	 * Check if there is any race with fatal error handling.
 	 * If so, wait for it to complete. Even though fatal error
@@ -4617,6 +6430,7 @@
 	ufshcd_set_eh_in_progress(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	ufshcd_update_error_stats(hba, UFS_ERR_EH);
 	err = ufshcd_reset_and_restore(hba);
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4630,7 +6444,7 @@
 	ufshcd_clear_eh_in_progress(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	return err;
 }
 
@@ -4819,75 +6633,6 @@
 	return ret;
 }
 
-static int ufs_get_device_info(struct ufs_hba *hba,
-				struct ufs_device_info *card_data)
-{
-	int err;
-	u8 model_index;
-	u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
-	u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
-
-	err = ufshcd_read_device_desc(hba, desc_buf,
-					QUERY_DESC_DEVICE_MAX_SIZE);
-	if (err) {
-		dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
-			__func__, err);
-		goto out;
-	}
-
-	/*
-	 * getting vendor (manufacturerID) and Bank Index in big endian
-	 * format
-	 */
-	card_data->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 |
-				     desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1];
-
-	model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
-
-	err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
-					QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
-	if (err) {
-		dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
-			__func__, err);
-		goto out;
-	}
-
-	str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
-	strlcpy(card_data->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
-		min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
-		      MAX_MODEL_LEN));
-
-	/* Null terminate the model string */
-	card_data->model[MAX_MODEL_LEN] = '\0';
-
-out:
-	return err;
-}
-
-void ufs_advertise_fixup_device(struct ufs_hba *hba)
-{
-	int err;
-	struct ufs_dev_fix *f;
-	struct ufs_device_info card_data;
-
-	card_data.wmanufacturerid = 0;
-
-	err = ufs_get_device_info(hba, &card_data);
-	if (err) {
-		dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
-			__func__, err);
-		return;
-	}
-
-	for (f = ufs_fixups; f->quirk; f++) {
-		if (((f->card.wmanufacturerid == card_data.wmanufacturerid) ||
-		    (f->card.wmanufacturerid == UFS_ANY_VENDOR)) &&
-		    (STR_PRFX_EQUAL(f->card.model, card_data.model) ||
-		     !strcmp(f->card.model, UFS_ANY_MODEL)))
-			hba->dev_quirks |= f->quirk;
-	}
-}
-
 /**
  * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
  * @hba: per-adapter instance
@@ -4904,6 +6649,9 @@
 	int ret = 0;
 	u32 peer_rx_min_activatetime = 0, tuned_pa_tactivate;
 
+	if (!ufshcd_is_unipro_pa_params_tuning_req(hba))
+		return 0;
+
 	ret = ufshcd_dme_peer_get(hba,
 				  UIC_ARG_MIB_SEL(
 					RX_MIN_ACTIVATETIME_CAPABILITY,
@@ -4965,6 +6713,76 @@
 	return ret;
 }
 
+/**
+ * ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
+ * less than device PA_TACTIVATE time.
+ * @hba: per-adapter instance
+ *
+ * Some UFS devices require host PA_TACTIVATE to be lower than device
+ * PA_TACTIVATE, we need to enable UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE quirk
+ * for such devices.
+ *
+ * Returns zero on success, non-zero error value on failure.
+ */
+static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
+{
+	int ret = 0;
+	u32 granularity, peer_granularity;
+	u32 pa_tactivate, peer_pa_tactivate;
+	u32 pa_tactivate_us, peer_pa_tactivate_us;
+	u8 gran_to_us_table[] = {1, 4, 8, 16, 32, 100};
+
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+				  &granularity);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY),
+				  &peer_granularity);
+	if (ret)
+		goto out;
+
+	if ((granularity < PA_GRANULARITY_MIN_VAL) ||
+	    (granularity > PA_GRANULARITY_MAX_VAL)) {
+		dev_err(hba->dev, "%s: invalid host PA_GRANULARITY %d",
+			__func__, granularity);
+		return -EINVAL;
+	}
+
+	if ((peer_granularity < PA_GRANULARITY_MIN_VAL) ||
+	    (peer_granularity > PA_GRANULARITY_MAX_VAL)) {
+		dev_err(hba->dev, "%s: invalid device PA_GRANULARITY %d",
+			__func__, peer_granularity);
+		return -EINVAL;
+	}
+
+	ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE),
+				  &peer_pa_tactivate);
+	if (ret)
+		goto out;
+
+	pa_tactivate_us = pa_tactivate * gran_to_us_table[granularity - 1];
+	peer_pa_tactivate_us = peer_pa_tactivate *
+			     gran_to_us_table[peer_granularity - 1];
+
+	if (pa_tactivate_us > peer_pa_tactivate_us) {
+		u32 new_peer_pa_tactivate;
+
+		new_peer_pa_tactivate = pa_tactivate_us /
+				      gran_to_us_table[peer_granularity - 1];
+		new_peer_pa_tactivate++;
+		ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE),
+					  new_peer_pa_tactivate);
+	}
+
+out:
+	return ret;
+}
+
 static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
 {
 	if (ufshcd_is_unipro_pa_params_tuning_req(hba)) {
@@ -4975,6 +6793,51 @@
 	if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE)
 		/* set 1ms timeout for PA_TACTIVATE */
 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 10);
+
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE)
+		ufshcd_quirk_tune_host_pa_tactivate(hba);
+
+	ufshcd_vops_apply_dev_quirks(hba);
+}
+
+static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
+{
+	int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist);
+
+	hba->ufs_stats.hibern8_exit_cnt = 0;
+	hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0);
+
+	memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size);
+	memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size);
+	memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size);
+	memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size);
+	memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size);
+
+	hba->req_abort_count = 0;
+}
+
+static void ufshcd_apply_pm_quirks(struct ufs_hba *hba)
+{
+	if (hba->dev_quirks & UFS_DEVICE_QUIRK_NO_LINK_OFF) {
+		if (ufs_get_pm_lvl_to_link_pwr_state(hba->rpm_lvl) ==
+		    UIC_LINK_OFF_STATE) {
+			hba->rpm_lvl =
+				ufs_get_desired_pm_lvl_for_dev_link_state(
+						UFS_SLEEP_PWR_MODE,
+						UIC_LINK_HIBERN8_STATE);
+			dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed rpm_lvl to %d\n",
+				hba->rpm_lvl);
+		}
+		if (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+		    UIC_LINK_OFF_STATE) {
+			hba->spm_lvl =
+				ufs_get_desired_pm_lvl_for_dev_link_state(
+						UFS_SLEEP_PWR_MODE,
+						UIC_LINK_HIBERN8_STATE);
+			dev_info(hba->dev, "UFS_DEVICE_QUIRK_NO_LINK_OFF enabled, changed spm_lvl to %d\n",
+				hba->spm_lvl);
+		}
+	}
 }
 
 /**
@@ -4986,13 +6849,19 @@
 static int ufshcd_probe_hba(struct ufs_hba *hba)
 {
 	int ret;
+	ktime_t start = ktime_get();
 
 	ret = ufshcd_link_startup(hba);
 	if (ret)
 		goto out;
 
-	ufshcd_init_pwr_info(hba);
+	/* Enable auto hibern8 if supported */
+	if (ufshcd_is_auto_hibern8_supported(hba))
+		ufshcd_set_auto_hibern8_timer(hba,
+					      hba->hibern8_on_idle.delay_ms);
 
+	/* Debug counters initialization */
+	ufshcd_clear_dbg_ufs_stats(hba);
 	/* set the default level for urgent bkops */
 	hba->urgent_bkops_lvl = BKOPS_STATUS_PERF_IMPACT;
 	hba->is_urgent_bkops_lvl_checked = false;
@@ -5011,6 +6880,7 @@
 	ufs_advertise_fixup_device(hba);
 	ufshcd_tune_unipro_params(hba);
 
+	ufshcd_apply_pm_quirks(hba);
 	ret = ufshcd_set_vccq_rail_unused(hba,
 		(hba->dev_quirks & UFS_DEVICE_NO_VCCQ) ? true : false);
 	if (ret)
@@ -5027,9 +6897,11 @@
 			__func__);
 	} else {
 		ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
-		if (ret)
+		if (ret) {
 			dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
 					__func__, ret);
+			goto out;
+		}
 	}
 
 	/* set the state as operational after switching to desired gear */
@@ -5058,13 +6930,19 @@
 		pm_runtime_put_sync(hba->dev);
 	}
 
+	/* Resume devfreq after UFS device is detected */
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		memcpy(&hba->clk_scaling.saved_pwr_info.info, &hba->pwr_info,
+		       sizeof(struct ufs_pa_layer_attr));
+		hba->clk_scaling.saved_pwr_info.is_valid = true;
+		hba->clk_scaling.is_scaled_up = true;
+		ufshcd_resume_clkscaling(hba);
+		hba->clk_scaling.is_allowed = true;
+	}
+
 	if (!hba->is_init_prefetch)
 		hba->is_init_prefetch = true;
 
-	/* Resume devfreq after UFS device is detected */
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_resume_device(hba->devfreq);
-
 out:
 	/*
 	 * If we failed to initialize the device or the device is not
@@ -5075,6 +6953,9 @@
 		ufshcd_hba_exit(hba);
 	}
 
+	trace_ufshcd_init(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode, hba->uic_link_state);
 	return ret;
 }
 
@@ -5087,7 +6968,261 @@
 {
 	struct ufs_hba *hba = (struct ufs_hba *)data;
 
+	/*
+	 * Don't allow clock gating and hibern8 enter for faster device
+	 * detection.
+	 */
+	ufshcd_hold_all(hba);
 	ufshcd_probe_hba(hba);
+	ufshcd_release_all(hba);
+}
+
+/**
+ * ufshcd_query_ioctl - perform user read queries
+ * @hba: per-adapter instance
+ * @lun: used for lun specific queries
+ * @buffer: user space buffer for reading and submitting query data and params
+ * @return: 0 for success negative error code otherwise
+ *
+ * Expected/Submitted buffer structure is struct ufs_ioctl_query_data.
+ * It will read the opcode, idn and buf_length parameters, and, put the
+ * response in the buffer field while updating the used size in buf_length.
+ */
+static int ufshcd_query_ioctl(struct ufs_hba *hba, u8 lun, void __user *buffer)
+{
+	struct ufs_ioctl_query_data *ioctl_data;
+	int err = 0;
+	int length = 0;
+	void *data_ptr;
+	bool flag;
+	u32 att;
+	u8 index;
+	u8 *desc = NULL;
+
+	ioctl_data = kzalloc(sizeof(struct ufs_ioctl_query_data), GFP_KERNEL);
+	if (!ioctl_data) {
+		dev_err(hba->dev, "%s: Failed allocating %zu bytes\n", __func__,
+				sizeof(struct ufs_ioctl_query_data));
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/* extract params from user buffer */
+	err = copy_from_user(ioctl_data, buffer,
+			sizeof(struct ufs_ioctl_query_data));
+	if (err) {
+		dev_err(hba->dev,
+			"%s: Failed copying buffer from user, err %d\n",
+			__func__, err);
+		goto out_release_mem;
+	}
+
+	/* verify legal parameters & send query */
+	switch (ioctl_data->opcode) {
+	case UPIU_QUERY_OPCODE_READ_DESC:
+		switch (ioctl_data->idn) {
+		case QUERY_DESC_IDN_DEVICE:
+		case QUERY_DESC_IDN_CONFIGURAION:
+		case QUERY_DESC_IDN_INTERCONNECT:
+		case QUERY_DESC_IDN_GEOMETRY:
+		case QUERY_DESC_IDN_POWER:
+			index = 0;
+			break;
+		case QUERY_DESC_IDN_UNIT:
+			if (!ufs_is_valid_unit_desc_lun(lun)) {
+				dev_err(hba->dev,
+					"%s: No unit descriptor for lun 0x%x\n",
+					__func__, lun);
+				err = -EINVAL;
+				goto out_release_mem;
+			}
+			index = lun;
+			break;
+		default:
+			goto out_einval;
+		}
+		length = min_t(int, QUERY_DESC_MAX_SIZE,
+				ioctl_data->buf_size);
+		desc = kzalloc(length, GFP_KERNEL);
+		if (!desc) {
+			dev_err(hba->dev, "%s: Failed allocating %d bytes\n",
+					__func__, length);
+			err = -ENOMEM;
+			goto out_release_mem;
+		}
+		err = ufshcd_query_descriptor(hba, ioctl_data->opcode,
+				ioctl_data->idn, index, 0, desc, &length);
+		break;
+	case UPIU_QUERY_OPCODE_READ_ATTR:
+		switch (ioctl_data->idn) {
+		case QUERY_ATTR_IDN_BOOT_LU_EN:
+		case QUERY_ATTR_IDN_POWER_MODE:
+		case QUERY_ATTR_IDN_ACTIVE_ICC_LVL:
+		case QUERY_ATTR_IDN_OOO_DATA_EN:
+		case QUERY_ATTR_IDN_BKOPS_STATUS:
+		case QUERY_ATTR_IDN_PURGE_STATUS:
+		case QUERY_ATTR_IDN_MAX_DATA_IN:
+		case QUERY_ATTR_IDN_MAX_DATA_OUT:
+		case QUERY_ATTR_IDN_REF_CLK_FREQ:
+		case QUERY_ATTR_IDN_CONF_DESC_LOCK:
+		case QUERY_ATTR_IDN_MAX_NUM_OF_RTT:
+		case QUERY_ATTR_IDN_EE_CONTROL:
+		case QUERY_ATTR_IDN_EE_STATUS:
+		case QUERY_ATTR_IDN_SECONDS_PASSED:
+			index = 0;
+			break;
+		case QUERY_ATTR_IDN_DYN_CAP_NEEDED:
+		case QUERY_ATTR_IDN_CORR_PRG_BLK_NUM:
+			index = lun;
+			break;
+		default:
+			goto out_einval;
+		}
+		err = ufshcd_query_attr(hba, ioctl_data->opcode, ioctl_data->idn,
+					index, 0, &att);
+		break;
+
+	case UPIU_QUERY_OPCODE_WRITE_ATTR:
+		err = copy_from_user(&att,
+				buffer + sizeof(struct ufs_ioctl_query_data),
+				sizeof(u32));
+		if (err) {
+			dev_err(hba->dev,
+				"%s: Failed copying buffer from user, err %d\n",
+				__func__, err);
+			goto out_release_mem;
+		}
+
+		switch (ioctl_data->idn) {
+		case QUERY_ATTR_IDN_BOOT_LU_EN:
+			index = 0;
+			if (att > QUERY_ATTR_IDN_BOOT_LU_EN_MAX) {
+				dev_err(hba->dev,
+					"%s: Illegal ufs query ioctl data, opcode 0x%x, idn 0x%x, att 0x%x\n",
+					__func__, ioctl_data->opcode,
+					(unsigned int)ioctl_data->idn, att);
+				err = -EINVAL;
+				goto out_release_mem;
+			}
+			break;
+		default:
+			goto out_einval;
+		}
+		err = ufshcd_query_attr(hba, ioctl_data->opcode,
+					ioctl_data->idn, index, 0, &att);
+		break;
+
+	case UPIU_QUERY_OPCODE_READ_FLAG:
+		switch (ioctl_data->idn) {
+		case QUERY_FLAG_IDN_FDEVICEINIT:
+		case QUERY_FLAG_IDN_PERMANENT_WPE:
+		case QUERY_FLAG_IDN_PWR_ON_WPE:
+		case QUERY_FLAG_IDN_BKOPS_EN:
+		case QUERY_FLAG_IDN_PURGE_ENABLE:
+		case QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL:
+		case QUERY_FLAG_IDN_BUSY_RTC:
+			break;
+		default:
+			goto out_einval;
+		}
+		err = ufshcd_query_flag_retry(hba, ioctl_data->opcode,
+				ioctl_data->idn, &flag);
+		break;
+	default:
+		goto out_einval;
+	}
+
+	if (err) {
+		dev_err(hba->dev, "%s: Query for idn %d failed\n", __func__,
+				ioctl_data->idn);
+		goto out_release_mem;
+	}
+
+	/*
+	 * copy response data
+	 * As we might end up reading less data then what is specified in
+	 * "ioctl_data->buf_size". So we are updating "ioctl_data->
+	 * buf_size" to what exactly we have read.
+	 */
+	switch (ioctl_data->opcode) {
+	case UPIU_QUERY_OPCODE_READ_DESC:
+		ioctl_data->buf_size = min_t(int, ioctl_data->buf_size, length);
+		data_ptr = desc;
+		break;
+	case UPIU_QUERY_OPCODE_READ_ATTR:
+		ioctl_data->buf_size = sizeof(u32);
+		data_ptr = &att;
+		break;
+	case UPIU_QUERY_OPCODE_READ_FLAG:
+		ioctl_data->buf_size = 1;
+		data_ptr = &flag;
+		break;
+	case UPIU_QUERY_OPCODE_WRITE_ATTR:
+		goto out_release_mem;
+	default:
+		goto out_einval;
+	}
+
+	/* copy to user */
+	err = copy_to_user(buffer, ioctl_data,
+			sizeof(struct ufs_ioctl_query_data));
+	if (err)
+		dev_err(hba->dev, "%s: Failed copying back to user.\n",
+			__func__);
+	err = copy_to_user(buffer + sizeof(struct ufs_ioctl_query_data),
+			data_ptr, ioctl_data->buf_size);
+	if (err)
+		dev_err(hba->dev, "%s: err %d copying back to user.\n",
+				__func__, err);
+	goto out_release_mem;
+
+out_einval:
+	dev_err(hba->dev,
+		"%s: illegal ufs query ioctl data, opcode 0x%x, idn 0x%x\n",
+		__func__, ioctl_data->opcode, (unsigned int)ioctl_data->idn);
+	err = -EINVAL;
+out_release_mem:
+	kfree(ioctl_data);
+	kfree(desc);
+out:
+	return err;
+}
+
+/**
+ * ufshcd_ioctl - ufs ioctl callback registered in scsi_host
+ * @dev: scsi device required for per LUN queries
+ * @cmd: command opcode
+ * @buffer: user space buffer for transferring data
+ *
+ * Supported commands:
+ * UFS_IOCTL_QUERY
+ */
+static int ufshcd_ioctl(struct scsi_device *dev, int cmd, void __user *buffer)
+{
+	struct ufs_hba *hba = shost_priv(dev->host);
+	int err = 0;
+
+	BUG_ON(!hba);
+	if (!buffer) {
+		dev_err(hba->dev, "%s: User buffer is NULL!\n", __func__);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case UFS_IOCTL_QUERY:
+		pm_runtime_get_sync(hba->dev);
+		err = ufshcd_query_ioctl(hba, ufshcd_scsi_to_upiu_lun(dev->lun),
+				buffer);
+		pm_runtime_put_sync(hba->dev);
+		break;
+	default:
+		err = -ENOIOCTLCMD;
+		dev_dbg(hba->dev, "%s: Unsupported ioctl cmd %d\n", __func__,
+			cmd);
+		break;
+	}
+
+	return err;
 }
 
 static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd)
@@ -5138,6 +7273,10 @@
 	.eh_device_reset_handler = ufshcd_eh_device_reset_handler,
 	.eh_host_reset_handler   = ufshcd_eh_host_reset_handler,
 	.eh_timed_out		= ufshcd_eh_timed_out,
+	.ioctl			= ufshcd_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= ufshcd_ioctl,
+#endif
 	.this_id		= -1,
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= UFSHCD_CMD_PER_LUN,
@@ -5292,11 +7431,16 @@
 static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
 {
 	struct ufs_vreg_info *info = &hba->vreg_info;
+	int ret = 0;
 
-	if (info)
-		return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+	if (info->vdd_hba) {
+		ret = ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
 
-	return 0;
+		if (!ret)
+			ufshcd_vops_update_sec_cfg(hba, on);
+	}
+
+	return ret;
 }
 
 static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
@@ -5378,22 +7522,36 @@
 	return ret;
 }
 
-static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
-					bool skip_ref_clk)
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+			       bool skip_ref_clk, bool is_gating_context)
 {
 	int ret = 0;
 	struct ufs_clk_info *clki;
 	struct list_head *head = &hba->clk_list_head;
 	unsigned long flags;
+	ktime_t start = ktime_get();
+	bool clk_state_changed = false;
 
 	if (!head || list_empty(head))
 		goto out;
 
+	/*
+	 * vendor specific setup_clocks ops may depend on clocks managed by
+	 * this standard driver hence call the vendor specific setup_clocks
+	 * before disabling the clocks managed here.
+	 */
+	if (!on) {
+		ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+		if (ret)
+			return ret;
+	}
+
 	list_for_each_entry(clki, head, list) {
 		if (!IS_ERR_OR_NULL(clki->clk)) {
 			if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
 				continue;
 
+			clk_state_changed = on ^ clki->enabled;
 			if (on && !clki->enabled) {
 				ret = clk_prepare_enable(clki->clk);
 				if (ret) {
@@ -5410,24 +7568,52 @@
 		}
 	}
 
-	ret = ufshcd_vops_setup_clocks(hba, on);
+	/*
+	 * vendor specific setup_clocks ops may depend on clocks managed by
+	 * this standard driver hence call the vendor specific setup_clocks
+	 * after enabling the clocks managed here.
+	 */
+	if (on)
+		ret = ufshcd_vops_setup_clocks(hba, on, is_gating_context);
+
 out:
 	if (ret) {
 		list_for_each_entry(clki, head, list) {
 			if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
 				clk_disable_unprepare(clki->clk);
 		}
-	} else if (on) {
+	} else if (!ret && on) {
 		spin_lock_irqsave(hba->host->host_lock, flags);
 		hba->clk_gating.state = CLKS_ON;
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+			hba->clk_gating.state);
 		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		/* restore the secure configuration as clocks are enabled */
+		ufshcd_vops_update_sec_cfg(hba, true);
 	}
+
+	if (clk_state_changed)
+		trace_ufshcd_profile_clk_gating(dev_name(hba->dev),
+			(on ? "on" : "off"),
+			ktime_to_us(ktime_sub(ktime_get(), start)), ret);
 	return ret;
 }
 
-static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+static int ufshcd_enable_clocks(struct ufs_hba *hba)
 {
-	return  __ufshcd_setup_clocks(hba, on, false);
+	return  ufshcd_setup_clocks(hba, true, false, false);
+}
+
+static int ufshcd_disable_clocks(struct ufs_hba *hba,
+				 bool is_gating_context)
+{
+	return  ufshcd_setup_clocks(hba, false, false, is_gating_context);
+}
+
+static int ufshcd_disable_clocks_skip_ref_clk(struct ufs_hba *hba,
+					      bool is_gating_context)
+{
+	return  ufshcd_setup_clocks(hba, false, true, is_gating_context);
 }
 
 static int ufshcd_init_clocks(struct ufs_hba *hba)
@@ -5473,7 +7659,7 @@
 {
 	int err = 0;
 
-	if (!hba->vops)
+	if (!hba->var || !hba->var->vops)
 		goto out;
 
 	err = ufshcd_vops_init(hba);
@@ -5497,11 +7683,9 @@
 
 static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
 {
-	if (!hba->vops)
+	if (!hba->var || !hba->var->vops)
 		return;
 
-	ufshcd_vops_setup_clocks(hba, false);
-
 	ufshcd_vops_setup_regulators(hba, false);
 
 	ufshcd_vops_exit(hba);
@@ -5530,7 +7714,7 @@
 	if (err)
 		goto out_disable_hba_vreg;
 
-	err = ufshcd_setup_clocks(hba, true);
+	err = ufshcd_enable_clocks(hba);
 	if (err)
 		goto out_disable_hba_vreg;
 
@@ -5552,7 +7736,7 @@
 out_disable_vreg:
 	ufshcd_setup_vreg(hba, false);
 out_disable_clks:
-	ufshcd_setup_clocks(hba, false);
+	ufshcd_disable_clocks(hba, false);
 out_disable_hba_vreg:
 	ufshcd_setup_hba_vreg(hba, false);
 out:
@@ -5564,7 +7748,11 @@
 	if (hba->is_powered) {
 		ufshcd_variant_hba_exit(hba);
 		ufshcd_setup_vreg(hba, false);
-		ufshcd_setup_clocks(hba, false);
+		if (ufshcd_is_clkscaling_supported(hba)) {
+			ufshcd_suspend_clkscaling(hba);
+			destroy_workqueue(hba->clk_scaling.workq);
+		}
+		ufshcd_disable_clocks(hba, false);
 		ufshcd_setup_hba_vreg(hba, false);
 		hba->is_powered = false;
 	}
@@ -5577,19 +7765,19 @@
 				0,
 				0,
 				0,
-				SCSI_SENSE_BUFFERSIZE,
+				UFSHCD_REQ_SENSE_SIZE,
 				0};
 	char *buffer;
 	int ret;
 
-	buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+	buffer = kzalloc(UFSHCD_REQ_SENSE_SIZE, GFP_KERNEL);
 	if (!buffer) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
 	ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
-				SCSI_SENSE_BUFFERSIZE, NULL,
+				UFSHCD_REQ_SENSE_SIZE, NULL,
 				msecs_to_jiffies(1000), 3, NULL, REQ_PM);
 	if (ret)
 		pr_err("%s: failed with err %d\n", __func__, ret);
@@ -5730,8 +7918,7 @@
 	 * To avoid this situation, add 2ms delay before putting these UFS
 	 * rails in LPM mode.
 	 */
-	if (!ufshcd_is_link_active(hba) &&
-	    hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM)
+	if (!ufshcd_is_link_active(hba))
 		usleep_range(2000, 2100);
 
 	/*
@@ -5766,7 +7953,6 @@
 	    !hba->dev_info.is_lu_power_on_wp) {
 		ret = ufshcd_setup_vreg(hba, true);
 	} else if (!ufshcd_is_ufs_dev_active(hba)) {
-		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
 		if (!ret && !ufshcd_is_link_active(hba)) {
 			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
 			if (ret)
@@ -5775,6 +7961,7 @@
 			if (ret)
 				goto vccq_lpm;
 		}
+		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
 	}
 	goto out;
 
@@ -5788,13 +7975,17 @@
 
 static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
 {
-	if (ufshcd_is_link_off(hba))
+	if (ufshcd_is_link_off(hba) ||
+	    (ufshcd_is_link_hibern8(hba)
+	     && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
 		ufshcd_setup_hba_vreg(hba, false);
 }
 
 static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
 {
-	if (ufshcd_is_link_off(hba))
+	if (ufshcd_is_link_off(hba) ||
+	    (ufshcd_is_link_hibern8(hba)
+	     && ufshcd_is_power_collapse_during_hibern8_allowed(hba)))
 		ufshcd_setup_hba_vreg(hba, true);
 }
 
@@ -5836,8 +8027,17 @@
 	 * If we can't transition into any of the low power modes
 	 * just gate the clocks.
 	 */
-	ufshcd_hold(hba, false);
+	WARN_ON(hba->hibern8_on_idle.is_enabled &&
+		hba->hibern8_on_idle.active_reqs);
+	ufshcd_hold_all(hba);
 	hba->clk_gating.is_suspended = true;
+	hba->hibern8_on_idle.is_suspended = true;
+
+	if (hba->clk_scaling.is_allowed) {
+		cancel_work_sync(&hba->clk_scaling.suspend_work);
+		cancel_work_sync(&hba->clk_scaling.resume_work);
+		ufshcd_suspend_clkscaling(hba);
+	}
 
 	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
 			req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -5846,12 +8046,12 @@
 
 	if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
 	    (req_link_state == hba->uic_link_state))
-		goto out;
+		goto enable_gating;
 
 	/* UFS device & link must be active before we enter in this function */
 	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
 		ret = -EINVAL;
-		goto out;
+		goto enable_gating;
 	}
 
 	if (ufshcd_is_runtime_pm(pm_op)) {
@@ -5884,19 +8084,14 @@
 	if (ret)
 		goto set_dev_active;
 
+	if (ufshcd_is_link_hibern8(hba) &&
+	    ufshcd_is_hibern8_on_idle_allowed(hba))
+		hba->hibern8_on_idle.state = HIBERN8_ENTERED;
+
 	ufshcd_vreg_set_lpm(hba);
 
 disable_clks:
 	/*
-	 * The clock scaling needs access to controller registers. Hence, Wait
-	 * for pending clock scaling work to be done before clocks are
-	 * turned off.
-	 */
-	if (ufshcd_is_clkscaling_enabled(hba)) {
-		devfreq_suspend_device(hba->devfreq);
-		hba->clk_scaling.window_start_t = 0;
-	}
-	/*
 	 * Call vendor specific suspend callback. As these callbacks may access
 	 * vendor specific host controller register space call them before the
 	 * host clocks are ON.
@@ -5905,17 +8100,19 @@
 	if (ret)
 		goto set_link_active;
 
-	ret = ufshcd_vops_setup_clocks(hba, false);
-	if (ret)
-		goto vops_resume;
-
 	if (!ufshcd_is_link_active(hba))
-		ufshcd_setup_clocks(hba, false);
+		ret = ufshcd_disable_clocks(hba, false);
 	else
 		/* If link is active, device ref_clk can't be switched off */
-		__ufshcd_setup_clocks(hba, false, true);
+		ret = ufshcd_disable_clocks_skip_ref_clk(hba, false);
+	if (ret)
+		goto set_link_active;
 
-	hba->clk_gating.state = CLKS_OFF;
+	if (ufshcd_is_clkgating_allowed(hba)) {
+		hba->clk_gating.state = CLKS_OFF;
+		trace_ufshcd_clk_gating(dev_name(hba->dev),
+					hba->clk_gating.state);
+	}
 	/*
 	 * Disable the host irq as host controller as there won't be any
 	 * host controller transaction expected till resume.
@@ -5925,22 +8122,31 @@
 	ufshcd_hba_vreg_set_lpm(hba);
 	goto out;
 
-vops_resume:
-	ufshcd_vops_resume(hba, pm_op);
 set_link_active:
+	if (hba->clk_scaling.is_allowed)
+		ufshcd_resume_clkscaling(hba);
 	ufshcd_vreg_set_hpm(hba);
-	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba)) {
 		ufshcd_set_link_active(hba);
-	else if (ufshcd_is_link_off(hba))
+	} else if (ufshcd_is_link_off(hba)) {
+		ufshcd_update_error_stats(hba, UFS_ERR_VOPS_SUSPEND);
 		ufshcd_host_reset_and_restore(hba);
+	}
 set_dev_active:
 	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
 		ufshcd_disable_auto_bkops(hba);
 enable_gating:
+	if (hba->clk_scaling.is_allowed)
+		ufshcd_resume_clkscaling(hba);
+	hba->hibern8_on_idle.is_suspended = false;
 	hba->clk_gating.is_suspended = false;
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 out:
 	hba->pm_op_in_progress = 0;
+
+	if (ret)
+		ufshcd_update_error_stats(hba, UFS_ERR_SUSPEND);
+
 	return ret;
 }
 
@@ -5964,14 +8170,12 @@
 
 	ufshcd_hba_vreg_set_hpm(hba);
 	/* Make sure clocks are enabled before accessing controller */
-	ret = ufshcd_setup_clocks(hba, true);
+	ret = ufshcd_enable_clocks(hba);
 	if (ret)
 		goto out;
 
 	/* enable the host irq as host controller would be active soon */
-	ret = ufshcd_enable_irq(hba);
-	if (ret)
-		goto disable_irq_and_vops_clks;
+	ufshcd_enable_irq(hba);
 
 	ret = ufshcd_vreg_set_hpm(hba);
 	if (ret)
@@ -5988,10 +8192,13 @@
 
 	if (ufshcd_is_link_hibern8(hba)) {
 		ret = ufshcd_uic_hibern8_exit(hba);
-		if (!ret)
+		if (!ret) {
 			ufshcd_set_link_active(hba);
-		else
+			if (ufshcd_is_hibern8_on_idle_allowed(hba))
+				hba->hibern8_on_idle.state = HIBERN8_EXITED;
+		} else {
 			goto vendor_suspend;
+		}
 	} else if (ufshcd_is_link_off(hba)) {
 		ret = ufshcd_host_reset_and_restore(hba);
 		/*
@@ -6000,6 +8207,9 @@
 		 */
 		if (ret || !ufshcd_is_link_active(hba))
 			goto vendor_suspend;
+		/* mark link state as hibern8 exited */
+		if (ufshcd_is_hibern8_on_idle_allowed(hba))
+			hba->hibern8_on_idle.state = HIBERN8_EXITED;
 	}
 
 	if (!ufshcd_is_ufs_dev_active(hba)) {
@@ -6008,31 +8218,47 @@
 			goto set_old_link_state;
 	}
 
-	/*
-	 * If BKOPs operations are urgently needed at this moment then
-	 * keep auto-bkops enabled or else disable it.
-	 */
-	ufshcd_urgent_bkops(hba);
-	hba->clk_gating.is_suspended = false;
+	if (ufshcd_keep_autobkops_enabled_except_suspend(hba))
+		ufshcd_enable_auto_bkops(hba);
+	else
+		/*
+		 * If BKOPs operations are urgently needed at this moment then
+		 * keep auto-bkops enabled or else disable it.
+		 */
+		ufshcd_urgent_bkops(hba);
 
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_resume_device(hba->devfreq);
+	hba->clk_gating.is_suspended = false;
+	hba->hibern8_on_idle.is_suspended = false;
+
+	if (hba->clk_scaling.is_allowed)
+		ufshcd_resume_clkscaling(hba);
 
 	/* Schedule clock gating in case of no access to UFS device yet */
-	ufshcd_release(hba);
+	ufshcd_release_all(hba);
 	goto out;
 
 set_old_link_state:
 	ufshcd_link_state_transition(hba, old_link_state, 0);
+	if (ufshcd_is_link_hibern8(hba) &&
+	    ufshcd_is_hibern8_on_idle_allowed(hba))
+		hba->hibern8_on_idle.state = HIBERN8_ENTERED;
 vendor_suspend:
 	ufshcd_vops_suspend(hba, pm_op);
 disable_vreg:
 	ufshcd_vreg_set_lpm(hba);
 disable_irq_and_vops_clks:
 	ufshcd_disable_irq(hba);
-	ufshcd_setup_clocks(hba, false);
+	if (hba->clk_scaling.is_allowed)
+		ufshcd_suspend_clkscaling(hba);
+	ufshcd_disable_clocks(hba, false);
+	if (ufshcd_is_clkgating_allowed(hba))
+		hba->clk_gating.state = CLKS_OFF;
 out:
 	hba->pm_op_in_progress = 0;
+
+	if (ret)
+		ufshcd_update_error_stats(hba, UFS_ERR_RESUME);
+
 	return ret;
 }
 
@@ -6048,20 +8274,18 @@
 int ufshcd_system_suspend(struct ufs_hba *hba)
 {
 	int ret = 0;
+	ktime_t start = ktime_get();
 
 	if (!hba || !hba->is_powered)
 		return 0;
 
-	if (pm_runtime_suspended(hba->dev)) {
-		if (hba->rpm_lvl == hba->spm_lvl)
-			/*
-			 * There is possibility that device may still be in
-			 * active state during the runtime suspend.
-			 */
-			if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
-			    hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
-				goto out;
+	if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+	     hba->curr_dev_pwr_mode) &&
+	    (ufs_get_pm_lvl_to_link_pwr_state(hba->spm_lvl) ==
+	     hba->uic_link_state))
+		goto out;
 
+	if (pm_runtime_suspended(hba->dev)) {
 		/*
 		 * UFS device and/or UFS link low power states during runtime
 		 * suspend seems to be different than what is expected during
@@ -6077,6 +8301,9 @@
 
 	ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
 out:
+	trace_ufshcd_system_suspend(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode, hba->uic_link_state);
 	if (!ret)
 		hba->is_sys_suspended = true;
 	return ret;
@@ -6092,14 +8319,25 @@
 
 int ufshcd_system_resume(struct ufs_hba *hba)
 {
-	if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+	int ret = 0;
+	ktime_t start = ktime_get();
+
+	if (!hba)
+		return -EINVAL;
+
+	if (!hba->is_powered || pm_runtime_suspended(hba->dev))
 		/*
 		 * Let the runtime resume take care of resuming
 		 * if runtime suspended.
 		 */
-		return 0;
-
-	return ufshcd_resume(hba, UFS_SYSTEM_PM);
+		goto out;
+	else
+		ret = ufshcd_resume(hba, UFS_SYSTEM_PM);
+out:
+	trace_ufshcd_system_resume(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode, hba->uic_link_state);
+	return ret;
 }
 EXPORT_SYMBOL(ufshcd_system_resume);
 
@@ -6113,10 +8351,23 @@
  */
 int ufshcd_runtime_suspend(struct ufs_hba *hba)
 {
-	if (!hba || !hba->is_powered)
-		return 0;
+	int ret = 0;
+	ktime_t start = ktime_get();
 
-	return ufshcd_suspend(hba, UFS_RUNTIME_PM);
+	if (!hba)
+		return -EINVAL;
+
+	if (!hba->is_powered)
+		goto out;
+	else
+		ret = ufshcd_suspend(hba, UFS_RUNTIME_PM);
+out:
+	trace_ufshcd_runtime_suspend(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode,
+		hba->uic_link_state);
+	return ret;
+
 }
 EXPORT_SYMBOL(ufshcd_runtime_suspend);
 
@@ -6143,10 +8394,22 @@
  */
 int ufshcd_runtime_resume(struct ufs_hba *hba)
 {
-	if (!hba || !hba->is_powered)
-		return 0;
+	int ret = 0;
+	ktime_t start = ktime_get();
+
+	if (!hba)
+		return -EINVAL;
+
+	if (!hba->is_powered)
+		goto out;
 	else
-		return ufshcd_resume(hba, UFS_RUNTIME_PM);
+		ret = ufshcd_resume(hba, UFS_RUNTIME_PM);
+out:
+	trace_ufshcd_runtime_resume(dev_name(hba->dev), ret,
+		ktime_to_us(ktime_sub(ktime_get(), start)),
+		hba->curr_dev_pwr_mode,
+		hba->uic_link_state);
+	return ret;
 }
 EXPORT_SYMBOL(ufshcd_runtime_resume);
 
@@ -6156,6 +8419,128 @@
 }
 EXPORT_SYMBOL(ufshcd_runtime_idle);
 
+static inline ssize_t ufshcd_pm_lvl_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t count,
+					   bool rpm)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	if (value >= UFS_PM_LVL_MAX)
+		return -EINVAL;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (rpm)
+		hba->rpm_lvl = value;
+	else
+		hba->spm_lvl = value;
+	ufshcd_apply_pm_quirks(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return count;
+}
+
+static ssize_t ufshcd_rpm_lvl_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	int curr_len;
+	u8 lvl;
+
+	curr_len = snprintf(buf, PAGE_SIZE,
+			    "\nCurrent Runtime PM level [%d] => dev_state [%s] link_state [%s]\n",
+			    hba->rpm_lvl,
+			    ufschd_ufs_dev_pwr_mode_to_string(
+				ufs_pm_lvl_states[hba->rpm_lvl].dev_state),
+			    ufschd_uic_link_state_to_string(
+				ufs_pm_lvl_states[hba->rpm_lvl].link_state));
+
+	curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+			     "\nAll available Runtime PM levels info:\n");
+	for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+		curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+				     "\tRuntime PM level [%d] => dev_state [%s] link_state [%s]\n",
+				    lvl,
+				    ufschd_ufs_dev_pwr_mode_to_string(
+					ufs_pm_lvl_states[lvl].dev_state),
+				    ufschd_uic_link_state_to_string(
+					ufs_pm_lvl_states[lvl].link_state));
+
+	return curr_len;
+}
+
+static ssize_t ufshcd_rpm_lvl_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return ufshcd_pm_lvl_store(dev, attr, buf, count, true);
+}
+
+static void ufshcd_add_rpm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+	hba->rpm_lvl_attr.show = ufshcd_rpm_lvl_show;
+	hba->rpm_lvl_attr.store = ufshcd_rpm_lvl_store;
+	sysfs_attr_init(&hba->rpm_lvl_attr.attr);
+	hba->rpm_lvl_attr.attr.name = "rpm_lvl";
+	hba->rpm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->rpm_lvl_attr))
+		dev_err(hba->dev, "Failed to create sysfs for rpm_lvl\n");
+}
+
+static ssize_t ufshcd_spm_lvl_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	int curr_len;
+	u8 lvl;
+
+	curr_len = snprintf(buf, PAGE_SIZE,
+			    "\nCurrent System PM level [%d] => dev_state [%s] link_state [%s]\n",
+			    hba->spm_lvl,
+			    ufschd_ufs_dev_pwr_mode_to_string(
+				ufs_pm_lvl_states[hba->spm_lvl].dev_state),
+			    ufschd_uic_link_state_to_string(
+				ufs_pm_lvl_states[hba->spm_lvl].link_state));
+
+	curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+			     "\nAll available System PM levels info:\n");
+	for (lvl = UFS_PM_LVL_0; lvl < UFS_PM_LVL_MAX; lvl++)
+		curr_len += snprintf((buf + curr_len), (PAGE_SIZE - curr_len),
+				     "\tSystem PM level [%d] => dev_state [%s] link_state [%s]\n",
+				    lvl,
+				    ufschd_ufs_dev_pwr_mode_to_string(
+					ufs_pm_lvl_states[lvl].dev_state),
+				    ufschd_uic_link_state_to_string(
+					ufs_pm_lvl_states[lvl].link_state));
+
+	return curr_len;
+}
+
+static ssize_t ufshcd_spm_lvl_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	return ufshcd_pm_lvl_store(dev, attr, buf, count, false);
+}
+
+static void ufshcd_add_spm_lvl_sysfs_nodes(struct ufs_hba *hba)
+{
+	hba->spm_lvl_attr.show = ufshcd_spm_lvl_show;
+	hba->spm_lvl_attr.store = ufshcd_spm_lvl_store;
+	sysfs_attr_init(&hba->spm_lvl_attr.attr);
+	hba->spm_lvl_attr.attr.name = "spm_lvl";
+	hba->spm_lvl_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->spm_lvl_attr))
+		dev_err(hba->dev, "Failed to create sysfs for spm_lvl\n");
+}
+
+static inline void ufshcd_add_sysfs_nodes(struct ufs_hba *hba)
+{
+	ufshcd_add_rpm_lvl_sysfs_nodes(hba);
+	ufshcd_add_spm_lvl_sysfs_nodes(hba);
+}
+
 /**
  * ufshcd_shutdown - shutdown routine
  * @hba: per adapter instance
@@ -6166,26 +8551,63 @@
  */
 int ufshcd_shutdown(struct ufs_hba *hba)
 {
-	int ret = 0;
-
-	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
-		goto out;
-
-	if (pm_runtime_suspended(hba->dev)) {
-		ret = ufshcd_runtime_resume(hba);
-		if (ret)
-			goto out;
-	}
-
-	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
-out:
-	if (ret)
-		dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
-	/* allow force shutdown even in case of errors */
+	/*
+	 * TODO: This function should send the power down notification to
+	 * UFS device and then power off the UFS link. But we need to be sure
+	 * that there will not be any new UFS requests issued after this.
+	 */
 	return 0;
 }
 EXPORT_SYMBOL(ufshcd_shutdown);
 
+/*
+ * Values permitted 0, 1, 2.
+ * 0 -> Disable IO latency histograms (default)
+ * 1 -> Enable IO latency histograms
+ * 2 -> Zero out IO latency histograms
+ */
+static ssize_t
+latency_hist_store(struct device *dev, struct device_attribute *attr,
+		   const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	long value;
+
+	if (kstrtol(buf, 0, &value))
+		return -EINVAL;
+	if (value == BLK_IO_LAT_HIST_ZERO)
+		blk_zero_latency_hist(&hba->io_lat_s);
+	else if (value == BLK_IO_LAT_HIST_ENABLE ||
+		 value == BLK_IO_LAT_HIST_DISABLE)
+		hba->latency_hist_enabled = value;
+	return count;
+}
+
+ssize_t
+latency_hist_show(struct device *dev, struct device_attribute *attr,
+		  char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return blk_latency_hist_show(&hba->io_lat_s, buf);
+}
+
+static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
+		   latency_hist_show, latency_hist_store);
+
+static void
+ufshcd_init_latency_hist(struct ufs_hba *hba)
+{
+	if (device_create_file(hba->dev, &dev_attr_latency_hist))
+		dev_err(hba->dev, "Failed to create latency_hist sysfs entry\n");
+}
+
+static void
+ufshcd_exit_latency_hist(struct ufs_hba *hba)
+{
+	device_create_file(hba->dev, &dev_attr_latency_hist);
+}
+
 /**
  * ufshcd_remove - de-allocate SCSI host and host memory space
  *		data structure memory
@@ -6198,12 +8620,16 @@
 	ufshcd_disable_intr(hba, hba->intr_mask);
 	ufshcd_hba_stop(hba, true);
 
-	scsi_host_put(hba->host);
-
 	ufshcd_exit_clk_gating(hba);
-	if (ufshcd_is_clkscaling_enabled(hba))
-		devfreq_remove_device(hba->devfreq);
+	ufshcd_exit_hibern8_on_idle(hba);
+	ufshcd_exit_latency_hist(hba);
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+		if (hba->devfreq)
+			devfreq_remove_device(hba->devfreq);
+	}
 	ufshcd_hba_exit(hba);
+	ufsdbg_remove_debugfs(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
 
@@ -6269,71 +8695,353 @@
 }
 EXPORT_SYMBOL(ufshcd_alloc_host);
 
-static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+/**
+ * ufshcd_is_devfreq_scaling_required - check if scaling is required or not
+ * @hba: per adapter instance
+ * @scale_up: True if scaling up and false if scaling down
+ *
+ * Returns true if scaling is required, false otherwise.
+ */
+static bool ufshcd_is_devfreq_scaling_required(struct ufs_hba *hba,
+					       bool scale_up)
 {
-	int ret = 0;
 	struct ufs_clk_info *clki;
 	struct list_head *head = &hba->clk_list_head;
 
 	if (!head || list_empty(head))
-		goto out;
-
-	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, PRE_CHANGE);
-	if (ret)
-		return ret;
+		return false;
 
 	list_for_each_entry(clki, head, list) {
 		if (!IS_ERR_OR_NULL(clki->clk)) {
 			if (scale_up && clki->max_freq) {
 				if (clki->curr_freq == clki->max_freq)
 					continue;
-				ret = clk_set_rate(clki->clk, clki->max_freq);
-				if (ret) {
-					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
-						__func__, clki->name,
-						clki->max_freq, ret);
-					break;
-				}
-				clki->curr_freq = clki->max_freq;
-
+				return true;
 			} else if (!scale_up && clki->min_freq) {
 				if (clki->curr_freq == clki->min_freq)
 					continue;
-				ret = clk_set_rate(clki->clk, clki->min_freq);
-				if (ret) {
-					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
-						__func__, clki->name,
-						clki->min_freq, ret);
-					break;
-				}
-				clki->curr_freq = clki->min_freq;
+				return true;
 			}
 		}
-		dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
-				clki->name, clk_get_rate(clki->clk));
 	}
 
-	ret = ufshcd_vops_clk_scale_notify(hba, scale_up, POST_CHANGE);
+	return false;
+}
 
-out:
+/**
+ * ufshcd_scale_gear - scale up/down UFS gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up gear and false for scaling down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_scale_gear(struct ufs_hba *hba, bool scale_up)
+{
+	int ret = 0;
+	struct ufs_pa_layer_attr new_pwr_info;
+	u32 scale_down_gear = ufshcd_vops_get_scale_down_gear(hba);
+
+	BUG_ON(!hba->clk_scaling.saved_pwr_info.is_valid);
+
+	if (scale_up) {
+		memcpy(&new_pwr_info, &hba->clk_scaling.saved_pwr_info.info,
+		       sizeof(struct ufs_pa_layer_attr));
+	} else {
+		memcpy(&new_pwr_info, &hba->pwr_info,
+		       sizeof(struct ufs_pa_layer_attr));
+
+		if (hba->pwr_info.gear_tx > scale_down_gear
+		    || hba->pwr_info.gear_rx > scale_down_gear) {
+			/* save the current power mode */
+			memcpy(&hba->clk_scaling.saved_pwr_info.info,
+				&hba->pwr_info,
+				sizeof(struct ufs_pa_layer_attr));
+
+			/* scale down gear */
+			new_pwr_info.gear_tx = scale_down_gear;
+			new_pwr_info.gear_rx = scale_down_gear;
+			if (!(hba->dev_quirks & UFS_DEVICE_NO_FASTAUTO)) {
+				new_pwr_info.pwr_tx = FASTAUTO_MODE;
+				new_pwr_info.pwr_rx = FASTAUTO_MODE;
+			}
+		}
+	}
+
+	ret = ufshcd_change_power_mode(hba, &new_pwr_info);
+
+	if (ret)
+		dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d), scale_up = %d",
+			__func__, ret,
+			hba->pwr_info.gear_tx, hba->pwr_info.gear_rx,
+			new_pwr_info.gear_tx, new_pwr_info.gear_rx,
+			scale_up);
+
 	return ret;
 }
 
+static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
+{
+	#define DOORBELL_CLR_TOUT_US		(1000 * 1000) /* 1 sec */
+	int ret = 0;
+	/*
+	 * make sure that there are no outstanding requests when
+	 * clock scaling is in progress
+	 */
+	ufshcd_scsi_block_requests(hba);
+	down_write(&hba->clk_scaling_lock);
+	if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+		ret = -EBUSY;
+		up_write(&hba->clk_scaling_lock);
+		ufshcd_scsi_unblock_requests(hba);
+	}
+
+	return ret;
+}
+
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+{
+	up_write(&hba->clk_scaling_lock);
+	ufshcd_scsi_unblock_requests(hba);
+}
+
+/**
+ * ufshcd_devfreq_scale - scale up/down UFS clocks and gear
+ * @hba: per adapter instance
+ * @scale_up: True for scaling up and false for scalin down
+ *
+ * Returns 0 for success,
+ * Returns -EBUSY if scaling can't happen at this time
+ * Returns non-zero for any other errors
+ */
+static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
+{
+	int ret = 0;
+
+	/* let's not get into low power until clock scaling is completed */
+	ufshcd_hold_all(hba);
+
+	ret = ufshcd_clock_scaling_prepare(hba);
+	if (ret)
+		goto out;
+
+	/* scale down the gear before scaling down clocks */
+	if (!scale_up) {
+		ret = ufshcd_scale_gear(hba, false);
+		if (ret)
+			goto clk_scaling_unprepare;
+	}
+
+	ret = ufshcd_scale_clks(hba, scale_up);
+	if (ret)
+		goto scale_up_gear;
+
+	/* scale up the gear after scaling up clocks */
+	if (scale_up) {
+		ret = ufshcd_scale_gear(hba, true);
+		if (ret) {
+			ufshcd_scale_clks(hba, false);
+			goto clk_scaling_unprepare;
+		}
+	}
+
+	if (!ret) {
+		hba->clk_scaling.is_scaled_up = scale_up;
+		if (scale_up)
+			hba->clk_gating.delay_ms =
+				hba->clk_gating.delay_ms_perf;
+		else
+			hba->clk_gating.delay_ms =
+				hba->clk_gating.delay_ms_pwr_save;
+	}
+
+	goto clk_scaling_unprepare;
+
+scale_up_gear:
+	if (!scale_up)
+		ufshcd_scale_gear(hba, true);
+clk_scaling_unprepare:
+	ufshcd_clock_scaling_unprepare(hba);
+out:
+	ufshcd_release_all(hba);
+	return ret;
+}
+
+static void __ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+	unsigned long flags;
+
+	devfreq_suspend_device(hba->devfreq);
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->clk_scaling.window_start_t = 0;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
+{
+	unsigned long flags;
+	bool suspend = false;
+
+	if (!ufshcd_is_clkscaling_supported(hba))
+		return;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (!hba->clk_scaling.is_suspended) {
+		suspend = true;
+		hba->clk_scaling.is_suspended = true;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (suspend)
+		__ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
+{
+	unsigned long flags;
+	bool resume = false;
+
+	if (!ufshcd_is_clkscaling_supported(hba))
+		return;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->clk_scaling.is_suspended) {
+		resume = true;
+		hba->clk_scaling.is_suspended = false;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	if (resume)
+		devfreq_resume_device(hba->devfreq);
+}
+
+static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+}
+
+static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	u32 value;
+	int err;
+
+	if (kstrtou32(buf, 0, &value))
+		return -EINVAL;
+
+	value = !!value;
+	if (value == hba->clk_scaling.is_allowed)
+		goto out;
+
+	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold(hba, false);
+
+	cancel_work_sync(&hba->clk_scaling.suspend_work);
+	cancel_work_sync(&hba->clk_scaling.resume_work);
+
+	hba->clk_scaling.is_allowed = value;
+
+	if (value) {
+		ufshcd_resume_clkscaling(hba);
+	} else {
+		ufshcd_suspend_clkscaling(hba);
+		err = ufshcd_devfreq_scale(hba, true);
+		if (err)
+			dev_err(hba->dev, "%s: failed to scale clocks up %d\n",
+					__func__, err);
+	}
+
+	ufshcd_release(hba, false);
+	pm_runtime_put_sync(hba->dev);
+out:
+	return count;
+}
+
+static void ufshcd_clk_scaling_suspend_work(struct work_struct *work)
+{
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+					   clk_scaling.suspend_work);
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(hba->host->host_lock, irq_flags);
+	if (hba->clk_scaling.active_reqs || hba->clk_scaling.is_suspended) {
+		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+		return;
+	}
+	hba->clk_scaling.is_suspended = true;
+	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+	__ufshcd_suspend_clkscaling(hba);
+}
+
+static void ufshcd_clk_scaling_resume_work(struct work_struct *work)
+{
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+					   clk_scaling.resume_work);
+	unsigned long irq_flags;
+
+	spin_lock_irqsave(hba->host->host_lock, irq_flags);
+	if (!hba->clk_scaling.is_suspended) {
+		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+		return;
+	}
+	hba->clk_scaling.is_suspended = false;
+	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+	devfreq_resume_device(hba->devfreq);
+}
+
 static int ufshcd_devfreq_target(struct device *dev,
 				unsigned long *freq, u32 flags)
 {
-	int err = 0;
+	int ret = 0;
 	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long irq_flags;
+	ktime_t start;
+	bool scale_up, sched_clk_scaling_suspend_work = false;
 
-	if (!ufshcd_is_clkscaling_enabled(hba))
+	if (!ufshcd_is_clkscaling_supported(hba))
 		return -EINVAL;
 
-	if (*freq == UINT_MAX)
-		err = ufshcd_scale_clks(hba, true);
-	else if (*freq == 0)
-		err = ufshcd_scale_clks(hba, false);
+	if ((*freq > 0) && (*freq < UINT_MAX)) {
+		dev_err(hba->dev, "%s: invalid freq = %lu\n", __func__, *freq);
+		return -EINVAL;
+	}
 
-	return err;
+	spin_lock_irqsave(hba->host->host_lock, irq_flags);
+	if (ufshcd_eh_in_progress(hba)) {
+		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+		return 0;
+	}
+
+	if (!hba->clk_scaling.active_reqs)
+		sched_clk_scaling_suspend_work = true;
+
+	scale_up = (*freq == UINT_MAX) ? true : false;
+	if (!ufshcd_is_devfreq_scaling_required(hba, scale_up)) {
+		spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+		ret = 0;
+		goto out; /* no state change required */
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
+
+	start = ktime_get();
+	ret = ufshcd_devfreq_scale(hba, scale_up);
+	trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
+		(scale_up ? "up" : "down"),
+		ktime_to_us(ktime_sub(ktime_get(), start)), ret);
+
+out:
+	if (sched_clk_scaling_suspend_work)
+		queue_work(hba->clk_scaling.workq,
+			   &hba->clk_scaling.suspend_work);
+
+	return ret;
 }
 
 static int ufshcd_devfreq_get_dev_status(struct device *dev,
@@ -6343,7 +9051,7 @@
 	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
 	unsigned long flags;
 
-	if (!ufshcd_is_clkscaling_enabled(hba))
+	if (!ufshcd_is_clkscaling_supported(hba))
 		return -EINVAL;
 
 	memset(stat, 0, sizeof(*stat));
@@ -6374,12 +9082,48 @@
 	return 0;
 }
 
+#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
+static struct devfreq_simple_ondemand_data ufshcd_ondemand_data = {
+	.upthreshold = 35,
+	.downdifferential = 30,
+	.simple_scaling = 1,
+};
+
+static void *gov_data = &ufshcd_ondemand_data;
+#else
+static void *gov_data;
+#endif
+
 static struct devfreq_dev_profile ufs_devfreq_profile = {
-	.polling_ms	= 100,
+	.polling_ms	= 40,
 	.target		= ufshcd_devfreq_target,
 	.get_dev_status	= ufshcd_devfreq_get_dev_status,
 };
+static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+{
+	hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
+	hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
+	sysfs_attr_init(&hba->clk_scaling.enable_attr.attr);
+	hba->clk_scaling.enable_attr.attr.name = "clkscale_enable";
+	hba->clk_scaling.enable_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->clk_scaling.enable_attr))
+		dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
+}
 
+static void ufshcd_init_lanes_per_dir(struct ufs_hba *hba)
+{
+	struct device *dev = hba->dev;
+	int ret;
+
+	ret = of_property_read_u32(dev->of_node, "lanes-per-direction",
+		&hba->lanes_per_direction);
+	if (ret) {
+		dev_dbg(hba->dev,
+			"%s: failed to read lanes-per-direction, ret=%d\n",
+			__func__, ret);
+		hba->lanes_per_direction = UFSHCD_DEFAULT_LANES_PER_DIRECTION;
+	}
+}
 /**
  * ufshcd_init - Driver initialization routine
  * @hba: per-adapter instance
@@ -6403,6 +9147,8 @@
 	hba->mmio_base = mmio_base;
 	hba->irq = irq;
 
+	ufshcd_init_lanes_per_dir(hba);
+
 	err = ufshcd_hba_init(hba);
 	if (err)
 		goto out_error;
@@ -6413,9 +9159,20 @@
 	/* Get UFS version supported by the controller */
 	hba->ufs_version = ufshcd_get_ufs_version(hba);
 
+	/* print error message if ufs_version is not valid */
+	if ((hba->ufs_version != UFSHCI_VERSION_10) &&
+	    (hba->ufs_version != UFSHCI_VERSION_11) &&
+	    (hba->ufs_version != UFSHCI_VERSION_20) &&
+	    (hba->ufs_version != UFSHCI_VERSION_21))
+		dev_err(hba->dev, "invalid UFS version 0x%x\n",
+			hba->ufs_version);
+
 	/* Get Interrupt bit mask per version */
 	hba->intr_mask = ufshcd_get_intr_mask(hba);
 
+	/* Enable debug prints */
+	hba->ufshcd_dbg_print = DEFAULT_UFSHCD_DBG_PRINT_EN;
+
 	err = ufshcd_set_dma_mask(hba);
 	if (err) {
 		dev_err(hba->dev, "set dma mask failed\n");
@@ -6439,6 +9196,7 @@
 	host->max_channel = UFSHCD_MAX_CHANNEL;
 	host->unique_id = host->host_no;
 	host->max_cmd_len = MAX_CDB_SIZE;
+	host->set_dbd_for_caching = 1;
 
 	hba->max_pwr_info.is_valid = false;
 
@@ -6456,10 +9214,13 @@
 	/* Initialize mutex for device management commands */
 	mutex_init(&hba->dev_cmd.lock);
 
+	init_rwsem(&hba->clk_scaling_lock);
+
 	/* Initialize device management tag acquire wait queue */
 	init_waitqueue_head(&hba->dev_cmd.tag_wq);
 
 	ufshcd_init_clk_gating(hba);
+	ufshcd_init_hibern8_on_idle(hba);
 
 	/*
 	 * In order to avoid any spurious interrupt immediately after
@@ -6494,43 +9255,81 @@
 	err = ufshcd_hba_enable(hba);
 	if (err) {
 		dev_err(hba->dev, "Host controller enable failed\n");
+		ufshcd_print_host_regs(hba);
+		ufshcd_print_host_state(hba);
 		goto out_remove_scsi_host;
 	}
 
-	if (ufshcd_is_clkscaling_enabled(hba)) {
+	if (ufshcd_is_clkscaling_supported(hba)) {
+		char wq_name[sizeof("ufs_clkscaling_00")];
+
 		hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
-						   "simple_ondemand", NULL);
+						   "simple_ondemand", gov_data);
 		if (IS_ERR(hba->devfreq)) {
 			dev_err(hba->dev, "Unable to register with devfreq %ld\n",
 					PTR_ERR(hba->devfreq));
 			err = PTR_ERR(hba->devfreq);
 			goto out_remove_scsi_host;
 		}
+		hba->clk_scaling.is_suspended = false;
+
+		INIT_WORK(&hba->clk_scaling.suspend_work,
+			  ufshcd_clk_scaling_suspend_work);
+		INIT_WORK(&hba->clk_scaling.resume_work,
+			  ufshcd_clk_scaling_resume_work);
+
+		snprintf(wq_name, ARRAY_SIZE(wq_name), "ufs_clkscaling_%d",
+			 host->host_no);
+		hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
 		/* Suspend devfreq until the UFS device is detected */
-		devfreq_suspend_device(hba->devfreq);
-		hba->clk_scaling.window_start_t = 0;
+		ufshcd_suspend_clkscaling(hba);
+		ufshcd_clkscaling_init_sysfs(hba);
 	}
 
+	/*
+	 * If rpm_lvl and and spm_lvl are not already set to valid levels,
+	 * set the default power management level for UFS runtime and system
+	 * suspend. Default power saving mode selected is keeping UFS link in
+	 * Hibern8 state and UFS device in sleep.
+	 */
+	if (!ufshcd_is_valid_pm_lvl(hba->rpm_lvl))
+		hba->rpm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+							UFS_SLEEP_PWR_MODE,
+							UIC_LINK_HIBERN8_STATE);
+	if (!ufshcd_is_valid_pm_lvl(hba->spm_lvl))
+		hba->spm_lvl = ufs_get_desired_pm_lvl_for_dev_link_state(
+							UFS_SLEEP_PWR_MODE,
+							UIC_LINK_HIBERN8_STATE);
+
 	/* Hold auto suspend until async scan completes */
 	pm_runtime_get_sync(dev);
 
+	ufshcd_init_latency_hist(hba);
+
 	/*
-	 * The device-initialize-sequence hasn't been invoked yet.
-	 * Set the device to power-off state
+	 * We are assuming that device wasn't put in sleep/power-down
+	 * state exclusively during the boot stage before kernel.
+	 * This assumption helps avoid doing link startup twice during
+	 * ufshcd_probe_hba().
 	 */
-	ufshcd_set_ufs_dev_poweroff(hba);
+	ufshcd_set_ufs_dev_active(hba);
 
 	async_schedule(ufshcd_async_scan, hba);
 
+	ufsdbg_add_debugfs(hba);
+
+	ufshcd_add_sysfs_nodes(hba);
+
 	return 0;
 
 out_remove_scsi_host:
 	scsi_remove_host(hba->host);
 exit_gating:
 	ufshcd_exit_clk_gating(hba);
+	ufshcd_exit_latency_hist(hba);
 out_disable:
 	hba->is_irq_enabled = false;
-	scsi_host_put(host);
 	ufshcd_hba_exit(hba);
 out_error:
 	return err;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 430bef1..3dd5559 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -45,6 +45,7 @@
 #include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
+#include <linux/rwsem.h>
 #include <linux/workqueue.h>
 #include <linux/errno.h>
 #include <linux/types.h>
@@ -65,11 +66,15 @@
 #include <scsi/scsi_dbg.h>
 #include <scsi/scsi_eh.h>
 
+#include <linux/fault-inject.h>
 #include "ufs.h"
 #include "ufshci.h"
 
 #define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.2"
+#define UFSHCD_DRIVER_VERSION "0.3"
+
+#define UFS_BIT(x)	BIT(x)
+#define UFS_MASK(x, y)	(x << ((y) % BITS_PER_LONG))
 
 struct ufs_hba;
 
@@ -127,6 +132,26 @@
 #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
 				    UIC_LINK_HIBERN8_STATE)
 
+enum {
+	/* errors which require the host controller reset for recovery */
+	UFS_ERR_HIBERN8_EXIT,
+	UFS_ERR_VOPS_SUSPEND,
+	UFS_ERR_EH,
+	UFS_ERR_CLEAR_PEND_XFER_TM,
+	UFS_ERR_INT_FATAL_ERRORS,
+	UFS_ERR_INT_UIC_ERROR,
+	UFS_ERR_CRYPTO_ENGINE,
+
+	/* other errors */
+	UFS_ERR_HIBERN8_ENTER,
+	UFS_ERR_RESUME,
+	UFS_ERR_SUSPEND,
+	UFS_ERR_LINKSTARTUP,
+	UFS_ERR_POWER_MODE_CHANGE,
+	UFS_ERR_TASK_ABORT,
+	UFS_ERR_MAX,
+};
+
 /*
  * UFS Power management levels.
  * Each level is in increasing order of power savings.
@@ -152,6 +177,10 @@
  * @ucd_req_ptr: UCD address of the command
  * @ucd_rsp_ptr: Response UPIU address for this command
  * @ucd_prdt_ptr: PRDT address of the command
+ * @utrd_dma_addr: UTRD dma address for debug
+ * @ucd_prdt_dma_addr: PRDT dma address for debug
+ * @ucd_rsp_dma_addr: UPIU response dma address for debug
+ * @ucd_req_dma_addr: UPIU request dma address for debug
  * @cmd: pointer to SCSI command
  * @sense_buffer: pointer to sense buffer address of the SCSI command
  * @sense_bufflen: Length of the sense buffer
@@ -160,6 +189,9 @@
  * @task_tag: Task tag of the command
  * @lun: LUN of the command
  * @intr_cmd: Interrupt command (doesn't participate in interrupt aggregation)
+ * @issue_time_stamp: time stamp for debug purposes
+ * @complete_time_stamp: time stamp for statistics
+ * @req_abort_skip: skip request abort task flag
  */
 struct ufshcd_lrb {
 	struct utp_transfer_req_desc *utr_descriptor_ptr;
@@ -167,6 +199,11 @@
 	struct utp_upiu_rsp *ucd_rsp_ptr;
 	struct ufshcd_sg_entry *ucd_prdt_ptr;
 
+	dma_addr_t utrd_dma_addr;
+	dma_addr_t ucd_req_dma_addr;
+	dma_addr_t ucd_rsp_dma_addr;
+	dma_addr_t ucd_prdt_dma_addr;
+
 	struct scsi_cmnd *cmd;
 	u8 *sense_buffer;
 	unsigned int sense_bufflen;
@@ -176,10 +213,14 @@
 	int task_tag;
 	u8 lun; /* UPIU LUN id field is only 8-bit wide */
 	bool intr_cmd;
+	ktime_t issue_time_stamp;
+	ktime_t complete_time_stamp;
+
+	bool req_abort_skip;
 };
 
 /**
- * struct ufs_query - holds relevant data structures for query request
+ * struct ufs_query - holds relevent data structures for query request
  * @request: request upiu and function
  * @descriptor: buffer for sending/receiving descriptor
  * @response: response upiu and response
@@ -247,7 +288,6 @@
 
 /**
  * struct ufs_hba_variant_ops - variant specific callbacks
- * @name: variant name
  * @init: called when the driver is initialized
  * @exit: called to cleanup everything done in init
  * @get_ufs_hci_version: called to get UFS HCI version
@@ -263,18 +303,23 @@
  *			to be set.
  * @suspend: called during host controller PM callback
  * @resume: called during host controller PM callback
+ * @full_reset:  called during link recovery for handling variant specific
+ *		 implementations of resetting the hci
  * @dbg_register_dump: used to dump controller debug information
- * @phy_initialization: used to initialize phys
+ * @update_sec_cfg: called to restore host controller secure configuration
+ * @get_scale_down_gear: called to get the minimum supported gear to
+ *			 scale down
+ * @add_debugfs: used to add debugfs entries
+ * @remove_debugfs: used to remove debugfs entries
  */
 struct ufs_hba_variant_ops {
-	const char *name;
 	int	(*init)(struct ufs_hba *);
-	void    (*exit)(struct ufs_hba *);
+	void	(*exit)(struct ufs_hba *);
 	u32	(*get_ufs_hci_version)(struct ufs_hba *);
 	int	(*clk_scale_notify)(struct ufs_hba *, bool,
 				    enum ufs_notify_change_status);
-	int	(*setup_clocks)(struct ufs_hba *, bool);
-	int     (*setup_regulators)(struct ufs_hba *, bool);
+	int	(*setup_clocks)(struct ufs_hba *, bool, bool);
+	int	(*setup_regulators)(struct ufs_hba *, bool);
 	int	(*hce_enable_notify)(struct ufs_hba *,
 				     enum ufs_notify_change_status);
 	int	(*link_startup_notify)(struct ufs_hba *,
@@ -283,10 +328,59 @@
 					enum ufs_notify_change_status status,
 					struct ufs_pa_layer_attr *,
 					struct ufs_pa_layer_attr *);
-	int     (*suspend)(struct ufs_hba *, enum ufs_pm_op);
-	int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
+	int	(*apply_dev_quirks)(struct ufs_hba *);
+	int	(*suspend)(struct ufs_hba *, enum ufs_pm_op);
+	int	(*resume)(struct ufs_hba *, enum ufs_pm_op);
+	int	(*full_reset)(struct ufs_hba *);
 	void	(*dbg_register_dump)(struct ufs_hba *hba);
-	int	(*phy_initialization)(struct ufs_hba *);
+	int	(*update_sec_cfg)(struct ufs_hba *hba, bool restore_sec_cfg);
+	u32	(*get_scale_down_gear)(struct ufs_hba *);
+#ifdef CONFIG_DEBUG_FS
+	void	(*add_debugfs)(struct ufs_hba *hba, struct dentry *root);
+	void	(*remove_debugfs)(struct ufs_hba *hba);
+#endif
+};
+
+/**
+ * struct ufs_hba_crypto_variant_ops - variant specific crypto callbacks
+ * @crypto_req_setup:	retreieve the necessary cryptographic arguments to setup
+			a requests's transfer descriptor.
+ * @crypto_engine_cfg_start: start configuring cryptographic engine
+ *							 according to tag
+ *							 parameter
+ * @crypto_engine_cfg_end: end configuring cryptographic engine
+ *						   according to tag parameter
+ * @crypto_engine_reset: perform reset to the cryptographic engine
+ * @crypto_engine_get_status: get errors status of the cryptographic engine
+ */
+struct ufs_hba_crypto_variant_ops {
+	int	(*crypto_req_setup)(struct ufs_hba *, struct ufshcd_lrb *lrbp,
+				    u8 *cc_index, bool *enable, u64 *dun);
+	int	(*crypto_engine_cfg_start)(struct ufs_hba *, unsigned int);
+	int	(*crypto_engine_cfg_end)(struct ufs_hba *, struct ufshcd_lrb *,
+			struct request *);
+	int	(*crypto_engine_reset)(struct ufs_hba *);
+	int	(*crypto_engine_get_status)(struct ufs_hba *, u32 *);
+};
+
+/**
+* struct ufs_hba_pm_qos_variant_ops - variant specific PM QoS callbacks
+*/
+struct ufs_hba_pm_qos_variant_ops {
+	void		(*req_start)(struct ufs_hba *, struct request *);
+	void		(*req_end)(struct ufs_hba *, struct request *, bool);
+};
+
+/**
+ * struct ufs_hba_variant - variant specific parameters
+ * @name: variant name
+ */
+struct ufs_hba_variant {
+	struct device				*dev;
+	const char				*name;
+	struct ufs_hba_variant_ops		*vops;
+	struct ufs_hba_crypto_variant_ops	*crypto_vops;
+	struct ufs_hba_pm_qos_variant_ops	*pm_qos_vops;
 };
 
 /* clock gating state  */
@@ -304,10 +398,16 @@
  * @ungate_work: worker to turn on clocks that will be used in case of
  * interrupt context
  * @state: the current clocks state
- * @delay_ms: gating delay in ms
+ * @delay_ms: current gating delay in ms
+ * @delay_ms_pwr_save: gating delay (in ms) in power save mode
+ * @delay_ms_perf: gating delay (in ms) in performance mode
  * @is_suspended: clk gating is suspended when set to 1 which can be used
  * during suspend/resume
- * @delay_attr: sysfs attribute to control delay_attr
+ * @delay_attr: sysfs attribute to control delay_ms if clock scaling is disabled
+ * @delay_pwr_save_attr: sysfs attribute to control delay_ms_pwr_save
+ * @delay_perf_attr: sysfs attribute to control delay_ms_perf
+ * @enable_attr: sysfs attribute to enable/disable clock gating
+ * @is_enabled: Indicates the current status of clock gating
  * @active_reqs: number of requests that are pending and should be waited for
  * completion before gating clocks.
  */
@@ -316,16 +416,91 @@
 	struct work_struct ungate_work;
 	enum clk_gating_state state;
 	unsigned long delay_ms;
+	unsigned long delay_ms_pwr_save;
+	unsigned long delay_ms_perf;
 	bool is_suspended;
 	struct device_attribute delay_attr;
+	struct device_attribute delay_pwr_save_attr;
+	struct device_attribute delay_perf_attr;
+	struct device_attribute enable_attr;
+	bool is_enabled;
 	int active_reqs;
 };
 
+/* Hibern8 state  */
+enum ufshcd_hibern8_on_idle_state {
+	HIBERN8_ENTERED,
+	HIBERN8_EXITED,
+	REQ_HIBERN8_ENTER,
+	REQ_HIBERN8_EXIT,
+	AUTO_HIBERN8,
+};
+
+/**
+ * struct ufs_hibern8_on_idle - UFS Hibern8 on idle related data
+ * @enter_work: worker to put UFS link in hibern8 after some delay as
+ * specified in delay_ms
+ * @exit_work: worker to bring UFS link out of hibern8
+ * @state: the current hibern8 state
+ * @delay_ms: hibern8 enter delay in ms
+ * @is_suspended: hibern8 enter is suspended when set to 1 which can be used
+ * during suspend/resume
+ * @active_reqs: number of requests that are pending and should be waited for
+ * completion before scheduling delayed "enter_work".
+ * @delay_attr: sysfs attribute to control delay_attr
+ * @enable_attr: sysfs attribute to enable/disable hibern8 on idle
+ * @is_enabled: Indicates the current status of hibern8
+ */
+struct ufs_hibern8_on_idle {
+	struct delayed_work enter_work;
+	struct work_struct exit_work;
+	enum ufshcd_hibern8_on_idle_state state;
+	unsigned long delay_ms;
+	bool is_suspended;
+	int active_reqs;
+	struct device_attribute delay_attr;
+	struct device_attribute enable_attr;
+	bool is_enabled;
+};
+
+struct ufs_saved_pwr_info {
+	struct ufs_pa_layer_attr info;
+	bool is_valid;
+};
+
+/**
+ * struct ufs_clk_scaling - UFS clock scaling related data
+ * @active_reqs: number of requests that are pending. If this is zero when
+ * devfreq ->target() function is called then schedule "suspend_work" to
+ * suspend devfreq.
+ * @tot_busy_t: Total busy time in current polling window
+ * @window_start_t: Start time (in jiffies) of the current polling window
+ * @busy_start_t: Start time of current busy period
+ * @enable_attr: sysfs attribute to enable/disable clock scaling
+ * @saved_pwr_info: UFS power mode may also be changed during scaling and this
+ * one keeps track of previous power mode.
+ * @workq: workqueue to schedule devfreq suspend/resume work
+ * @suspend_work: worker to suspend devfreq
+ * @resume_work: worker to resume devfreq
+ * @is_allowed: tracks if scaling is currently allowed or not
+ * @is_busy_started: tracks if busy period has started or not
+ * @is_suspended: tracks if devfreq is suspended or not
+ * @is_scaled_up: tracks if we are currently scaled up or scaled down
+ */
 struct ufs_clk_scaling {
-	ktime_t  busy_start_t;
-	bool is_busy_started;
-	unsigned long  tot_busy_t;
+	int active_reqs;
+	unsigned long tot_busy_t;
 	unsigned long window_start_t;
+	ktime_t busy_start_t;
+	struct device_attribute enable_attr;
+	struct ufs_saved_pwr_info saved_pwr_info;
+	struct workqueue_struct *workq;
+	struct work_struct suspend_work;
+	struct work_struct resume_work;
+	bool is_allowed;
+	bool is_busy_started;
+	bool is_suspended;
+	bool is_scaled_up;
 };
 
 /**
@@ -337,6 +512,128 @@
 	u32 icc_level;
 };
 
+#define UIC_ERR_REG_HIST_LENGTH 8
+/**
+ * struct ufs_uic_err_reg_hist - keeps history of uic errors
+ * @pos: index to indicate cyclic buffer position
+ * @reg: cyclic buffer for registers value
+ * @tstamp: cyclic buffer for time stamp
+ */
+struct ufs_uic_err_reg_hist {
+	int pos;
+	u32 reg[UIC_ERR_REG_HIST_LENGTH];
+	ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH];
+};
+
+#ifdef CONFIG_DEBUG_FS
+struct debugfs_files {
+	struct dentry *debugfs_root;
+	struct dentry *stats_folder;
+	struct dentry *tag_stats;
+	struct dentry *err_stats;
+	struct dentry *show_hba;
+	struct dentry *host_regs;
+	struct dentry *dump_dev_desc;
+	struct dentry *power_mode;
+	struct dentry *dme_local_read;
+	struct dentry *dme_peer_read;
+	struct dentry *dbg_print_en;
+	struct dentry *req_stats;
+	struct dentry *query_stats;
+	u32 dme_local_attr_id;
+	u32 dme_peer_attr_id;
+	struct dentry *reset_controller;
+	struct dentry *err_state;
+	bool err_occurred;
+#ifdef CONFIG_UFS_FAULT_INJECTION
+	struct dentry *err_inj_scenario;
+	struct dentry *err_inj_stats;
+	u32 err_inj_scenario_mask;
+	struct fault_attr fail_attr;
+#endif
+	bool is_sys_suspended;
+};
+
+/* tag stats statistics types */
+enum ts_types {
+	TS_NOT_SUPPORTED	= -1,
+	TS_TAG			= 0,
+	TS_READ			= 1,
+	TS_WRITE		= 2,
+	TS_URGENT_READ		= 3,
+	TS_URGENT_WRITE		= 4,
+	TS_FLUSH		= 5,
+	TS_NUM_STATS		= 6,
+};
+
+/**
+ * struct ufshcd_req_stat - statistics for request handling times (in usec)
+ * @min: shortest time measured
+ * @max: longest time measured
+ * @sum: sum of all the handling times measured (used for average calculation)
+ * @count: number of measurements taken
+ */
+struct ufshcd_req_stat {
+	u64 min;
+	u64 max;
+	u64 sum;
+	u64 count;
+};
+#endif
+
+/**
+ * struct ufs_stats - keeps usage/err statistics
+ * @enabled: enable tag stats for debugfs
+ * @tag_stats: pointer to tag statistic counters
+ * @q_depth: current amount of busy slots
+ * @err_stats: counters to keep track of various errors
+ * @req_stats: request handling time statistics per request type
+ * @query_stats_arr: array that holds query statistics
+ * @hibern8_exit_cnt: Counter to keep track of number of exits,
+ *		reset this after link-startup.
+ * @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
+ *		Clear after the first successful command completion.
+ * @pa_err: tracks pa-uic errors
+ * @dl_err: tracks dl-uic errors
+ * @nl_err: tracks nl-uic errors
+ * @tl_err: tracks tl-uic errors
+ * @dme_err: tracks dme errors
+ */
+struct ufs_stats {
+#ifdef CONFIG_DEBUG_FS
+	bool enabled;
+	u64 **tag_stats;
+	int q_depth;
+	int err_stats[UFS_ERR_MAX];
+	struct ufshcd_req_stat req_stats[TS_NUM_STATS];
+	int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN];
+
+#endif
+	u32 hibern8_exit_cnt;
+	ktime_t last_hibern8_exit_tstamp;
+	struct ufs_uic_err_reg_hist pa_err;
+	struct ufs_uic_err_reg_hist dl_err;
+	struct ufs_uic_err_reg_hist nl_err;
+	struct ufs_uic_err_reg_hist tl_err;
+	struct ufs_uic_err_reg_hist dme_err;
+};
+
+/* UFS Host Controller debug print bitmask */
+#define UFSHCD_DBG_PRINT_CLK_FREQ_EN		UFS_BIT(0)
+#define UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN	UFS_BIT(1)
+#define UFSHCD_DBG_PRINT_HOST_REGS_EN		UFS_BIT(2)
+#define UFSHCD_DBG_PRINT_TRS_EN			UFS_BIT(3)
+#define UFSHCD_DBG_PRINT_TMRS_EN		UFS_BIT(4)
+#define UFSHCD_DBG_PRINT_PWR_EN			UFS_BIT(5)
+#define UFSHCD_DBG_PRINT_HOST_STATE_EN		UFS_BIT(6)
+
+#define UFSHCD_DBG_PRINT_ALL						   \
+		(UFSHCD_DBG_PRINT_CLK_FREQ_EN		|		   \
+		 UFSHCD_DBG_PRINT_UIC_ERR_HIST_EN	|		   \
+		 UFSHCD_DBG_PRINT_HOST_REGS_EN | UFSHCD_DBG_PRINT_TRS_EN | \
+		 UFSHCD_DBG_PRINT_TMRS_EN | UFSHCD_DBG_PRINT_PWR_EN |	   \
+		 UFSHCD_DBG_PRINT_HOST_STATE_EN)
+
 /**
  * struct ufs_hba - per adapter private structure
  * @mmio_base: UFSHCI base register address
@@ -356,7 +653,7 @@
  * @nutrs: Transfer Request Queue depth supported by controller
  * @nutmrs: Task Management Queue depth supported by controller
  * @ufs_version: UFS Version to which controller complies
- * @vops: pointer to variant specific operations
+ * @var: pointer to variant specific data
  * @priv: pointer to variant specific private data
  * @irq: Irq number of the controller
  * @active_uic_cmd: handle of active UIC command
@@ -382,13 +679,18 @@
  * @dev_cmd: ufs device management command information
  * @last_dme_cmd_tstamp: time stamp of the last completed DME command
  * @auto_bkops_enabled: to track whether bkops is enabled in device
+ * @ufs_stats: ufshcd statistics to be used via debugfs
+ * @debugfs_files: debugfs files associated with the ufs stats
+ * @ufshcd_dbg_print: Bitmask for enabling debug prints
  * @vreg_info: UFS device voltage regulator information
  * @clk_list_head: UFS host controller clocks list node head
  * @pwr_info: holds current power mode
  * @max_pwr_info: keeps the device max valid pwm
+ * @hibern8_on_idle: UFS Hibern8 on idle related data
  * @urgent_bkops_lvl: keeps track of urgent bkops level for device
  * @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
  *  device is known or not.
+ * @scsi_block_reqs_cnt: reference counting for scsi block requests
  */
 struct ufs_hba {
 	void __iomem *mmio_base;
@@ -414,9 +716,11 @@
 	enum ufs_dev_pwr_mode curr_dev_pwr_mode;
 	enum uic_link_state uic_link_state;
 	/* Desired UFS power management level during runtime PM */
-	enum ufs_pm_level rpm_lvl;
+	int rpm_lvl;
 	/* Desired UFS power management level during system PM */
-	enum ufs_pm_level spm_lvl;
+	int spm_lvl;
+	struct device_attribute rpm_lvl_attr;
+	struct device_attribute spm_lvl_attr;
 	int pm_op_in_progress;
 
 	struct ufshcd_lrb *lrb;
@@ -429,7 +733,7 @@
 	int nutrs;
 	int nutmrs;
 	u32 ufs_version;
-	struct ufs_hba_variant_ops *vops;
+	struct ufs_hba_variant *var;
 	void *priv;
 	unsigned int irq;
 	bool is_irq_enabled;
@@ -474,6 +778,9 @@
 	 */
 	#define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION		UFS_BIT(5)
 
+	/* Auto hibern8 support is broken */
+	#define UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8		UFS_BIT(6)
+
 	unsigned int quirks;	/* Deviations from standard UFSHCI spec. */
 
 	/* Device deviations from standard UFS device spec. */
@@ -503,8 +810,11 @@
 	/* HBA Errors */
 	u32 errors;
 	u32 uic_error;
+	u32 ce_error;	/* crypto engine errors */
 	u32 saved_err;
 	u32 saved_uic_err;
+	u32 saved_ce_err;
+	bool silence_err_logs;
 
 	/* Device management request data */
 	struct ufs_dev_cmd dev_cmd;
@@ -513,17 +823,40 @@
 	/* Keeps information of the UFS device connected to this host */
 	struct ufs_dev_info dev_info;
 	bool auto_bkops_enabled;
+
+	struct ufs_stats ufs_stats;
+#ifdef CONFIG_DEBUG_FS
+	struct debugfs_files debugfs_files;
+#endif
+
 	struct ufs_vreg_info vreg_info;
 	struct list_head clk_list_head;
 
 	bool wlun_dev_clr_ua;
 
+	/* Number of requests aborts */
+	int req_abort_count;
+
 	/* Number of lanes available (1 or 2) for Rx/Tx */
 	u32 lanes_per_direction;
+
+	/* Gear limits */
+	u32 limit_tx_hs_gear;
+	u32 limit_rx_hs_gear;
+	u32 limit_tx_pwm_gear;
+	u32 limit_rx_pwm_gear;
+
+	u32 scsi_cmd_timeout;
+
+	/* Bitmask for enabling debug prints */
+	u32 ufshcd_dbg_print;
+
 	struct ufs_pa_layer_attr pwr_info;
 	struct ufs_pwr_mode_info max_pwr_info;
 
 	struct ufs_clk_gating clk_gating;
+	struct ufs_hibern8_on_idle hibern8_on_idle;
+
 	/* Control to enable/disable host capabilities */
 	u32 caps;
 	/* Allow dynamic clk gating */
@@ -540,6 +873,21 @@
 	 * CAUTION: Enabling this might reduce overall UFS throughput.
 	 */
 #define UFSHCD_CAP_INTR_AGGR (1 << 4)
+	/* Allow standalone Hibern8 enter on idle */
+#define UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE (1 << 5)
+	/*
+	 * This capability allows the device auto-bkops to be always enabled
+	 * except during suspend (both runtime and suspend).
+	 * Enabling this capability means that device will always be allowed
+	 * to do background operation when it's active but it might degrade
+	 * the performance of ongoing read/write operations.
+	 */
+#define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 6)
+	/*
+	 * If host controller hardware can be power collapsed when UFS link is
+	 * in hibern8 then enable this cap.
+	 */
+#define UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8 (1 << 7)
 
 	struct devfreq *devfreq;
 	struct ufs_clk_scaling clk_scaling;
@@ -547,6 +895,15 @@
 
 	enum bkops_status urgent_bkops_lvl;
 	bool is_urgent_bkops_lvl_checked;
+
+	struct rw_semaphore clk_scaling_lock;
+
+	/* If set, don't gate device ref_clk during clock gating */
+	bool no_ref_clk_gating;
+
+	int scsi_block_reqs_cnt;
+	int latency_hist_enabled;
+	struct io_latency_state io_lat_s;
 };
 
 /* Returns true if clocks can be gated. Otherwise false */
@@ -558,7 +915,7 @@
 {
 	return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
 }
-static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
+static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
 {
 	return hba->caps & UFSHCD_CAP_CLK_SCALING;
 }
@@ -566,25 +923,47 @@
 {
 	return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
 }
+static inline bool ufshcd_is_hibern8_on_idle_allowed(struct ufs_hba *hba)
+{
+	return hba->caps & UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
+}
+
+static inline bool ufshcd_is_power_collapse_during_hibern8_allowed(
+						struct ufs_hba *hba)
+{
+	return !!(hba->caps & UFSHCD_CAP_POWER_COLLAPSE_DURING_HIBERN8);
+}
+
+static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
+							struct ufs_hba *hba)
+{
+	return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
+}
 
 static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
 {
-/* DWC UFS Core has the Interrupt aggregation feature but is not detectable*/
-#ifndef CONFIG_SCSI_UFS_DWC
 	if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
 	    !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
 		return true;
 	else
 		return false;
-#else
-return true;
-#endif
+}
+
+static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
+{
+	return !!((hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT) &&
+		!(hba->quirks & UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8));
+}
+
+static inline bool ufshcd_is_crypto_supported(struct ufs_hba *hba)
+{
+	return !!(hba->capabilities & MASK_CRYPTO_SUPPORT);
 }
 
 #define ufshcd_writel(hba, val, reg)	\
-	writel((val), (hba)->mmio_base + (reg))
+	writel_relaxed((val), (hba)->mmio_base + (reg))
 #define ufshcd_readl(hba, reg)	\
-	readl((hba)->mmio_base + (reg))
+	readl_relaxed((hba)->mmio_base + (reg))
 
 /**
  * ufshcd_rmwl - read modify write into a register
@@ -595,7 +974,7 @@
  */
 static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
 {
-	u32 tmp;
+       u32 tmp;
 
 	tmp = ufshcd_readl(hba, reg);
 	tmp &= ~mask;
@@ -610,12 +989,8 @@
 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
 				u32 val, unsigned long interval_us,
 				unsigned long timeout_ms, bool can_sleep);
-
-static inline void check_upiu_size(void)
-{
-	BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
-		GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
-}
+int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
 
 /**
  * ufshcd_set_variant - set variant specific data to the hba
@@ -695,6 +1070,32 @@
 	return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
 }
 
+/**
+ * ufshcd_dme_rmw - get modify set a dme attribute
+ * @hba - per adapter instance
+ * @mask - mask to apply on read value
+ * @val - actual value to write
+ * @attr - dme attribute
+ */
+static inline int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask,
+				 u32 val, u32 attr)
+{
+	u32 cfg = 0;
+	int err = 0;
+
+	err = ufshcd_dme_get(hba, UIC_ARG_MIB(attr), &cfg);
+	if (err)
+		goto out;
+
+	cfg &= ~mask;
+	cfg |= (val & mask);
+
+	err = ufshcd_dme_set(hba, UIC_ARG_MIB(attr), cfg);
+
+out:
+	return err;
+}
+
 int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size);
 
 static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
@@ -705,85 +1106,103 @@
 		pwr_info->pwr_tx == FASTAUTO_MODE);
 }
 
-#define ASCII_STD true
+#ifdef CONFIG_DEBUG_FS
+static inline void ufshcd_init_req_stats(struct ufs_hba *hba)
+{
+	memset(hba->ufs_stats.req_stats, 0, sizeof(hba->ufs_stats.req_stats));
+}
+#else
+static inline void ufshcd_init_req_stats(struct ufs_hba *hba) {}
+#endif
 
+#define ASCII_STD true
+#define UTF16_STD false
 int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, u8 *buf,
 				u32 size, bool ascii);
 
 /* Expose Query-Request API */
 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
 	enum flag_idn idn, bool *flag_res);
+int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
+	enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
+int ufshcd_query_descriptor(struct ufs_hba *hba, enum query_opcode opcode,
+	enum desc_idn idn, u8 index, u8 selector, u8 *desc_buf, int *buf_len);
+
 int ufshcd_hold(struct ufs_hba *hba, bool async);
-void ufshcd_release(struct ufs_hba *hba);
+void ufshcd_release(struct ufs_hba *hba, bool no_sched);
+int ufshcd_wait_for_doorbell_clr(struct ufs_hba *hba, u64 wait_timeout_us);
+int ufshcd_change_power_mode(struct ufs_hba *hba,
+			     struct ufs_pa_layer_attr *pwr_mode);
+void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba,
+		int result);
 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
 
+void ufshcd_scsi_block_requests(struct ufs_hba *hba);
+void ufshcd_scsi_unblock_requests(struct ufs_hba *hba);
+
 /* Wrapper functions for safely calling variant operations */
 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
 {
-	if (hba->vops)
-		return hba->vops->name;
+	if (hba->var && hba->var->name)
+		return hba->var->name;
 	return "";
 }
 
 static inline int ufshcd_vops_init(struct ufs_hba *hba)
 {
-	if (hba->vops && hba->vops->init)
-		return hba->vops->init(hba);
-
+	if (hba->var && hba->var->vops && hba->var->vops->init)
+		return hba->var->vops->init(hba);
 	return 0;
 }
 
 static inline void ufshcd_vops_exit(struct ufs_hba *hba)
 {
-	if (hba->vops && hba->vops->exit)
-		return hba->vops->exit(hba);
+	if (hba->var && hba->var->vops && hba->var->vops->exit)
+		hba->var->vops->exit(hba);
 }
 
 static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
 {
-	if (hba->vops && hba->vops->get_ufs_hci_version)
-		return hba->vops->get_ufs_hci_version(hba);
-
+	if (hba->var && hba->var->vops && hba->var->vops->get_ufs_hci_version)
+		return hba->var->vops->get_ufs_hci_version(hba);
 	return ufshcd_readl(hba, REG_UFS_VERSION);
 }
 
 static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
 			bool up, enum ufs_notify_change_status status)
 {
-	if (hba->vops && hba->vops->clk_scale_notify)
-		return hba->vops->clk_scale_notify(hba, up, status);
+	if (hba->var && hba->var->vops && hba->var->vops->clk_scale_notify)
+		return hba->var->vops->clk_scale_notify(hba, up, status);
 	return 0;
 }
 
-static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on)
+static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
+					   bool is_gating_context)
 {
-	if (hba->vops && hba->vops->setup_clocks)
-		return hba->vops->setup_clocks(hba, on);
+	if (hba->var && hba->var->vops && hba->var->vops->setup_clocks)
+		return hba->var->vops->setup_clocks(hba, on, is_gating_context);
 	return 0;
 }
 
 static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
 {
-	if (hba->vops && hba->vops->setup_regulators)
-		return hba->vops->setup_regulators(hba, status);
-
+	if (hba->var && hba->var->vops && hba->var->vops->setup_regulators)
+		return hba->var->vops->setup_regulators(hba, status);
 	return 0;
 }
 
 static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
 						bool status)
 {
-	if (hba->vops && hba->vops->hce_enable_notify)
-		return hba->vops->hce_enable_notify(hba, status);
-
+	if (hba->var && hba->var->vops && hba->var->vops->hce_enable_notify)
+		hba->var->vops->hce_enable_notify(hba, status);
 	return 0;
 }
 static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
 						bool status)
 {
-	if (hba->vops && hba->vops->link_startup_notify)
-		return hba->vops->link_startup_notify(hba, status);
-
+	if (hba->var && hba->var->vops && hba->var->vops->link_startup_notify)
+		return hba->var->vops->link_startup_notify(hba, status);
 	return 0;
 }
 
@@ -792,33 +1211,140 @@
 				  struct ufs_pa_layer_attr *dev_max_params,
 				  struct ufs_pa_layer_attr *dev_req_params)
 {
-	if (hba->vops && hba->vops->pwr_change_notify)
-		return hba->vops->pwr_change_notify(hba, status,
+	if (hba->var && hba->var->vops && hba->var->vops->pwr_change_notify)
+		return hba->var->vops->pwr_change_notify(hba, status,
 					dev_max_params, dev_req_params);
-
 	return -ENOTSUPP;
 }
 
+static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->apply_dev_quirks)
+		return hba->var->vops->apply_dev_quirks(hba);
+	return 0;
+}
+
 static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
 {
-	if (hba->vops && hba->vops->suspend)
-		return hba->vops->suspend(hba, op);
-
+	if (hba->var && hba->var->vops && hba->var->vops->suspend)
+		return hba->var->vops->suspend(hba, op);
 	return 0;
 }
 
 static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
 {
-	if (hba->vops && hba->vops->resume)
-		return hba->vops->resume(hba, op);
-
+	if (hba->var && hba->var->vops && hba->var->vops->resume)
+		return hba->var->vops->resume(hba, op);
 	return 0;
 }
 
+static inline int ufshcd_vops_full_reset(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->full_reset)
+		return hba->var->vops->full_reset(hba);
+	return 0;
+}
+
+
 static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
 {
-	if (hba->vops && hba->vops->dbg_register_dump)
-		hba->vops->dbg_register_dump(hba);
+	if (hba->var && hba->var->vops && hba->var->vops->dbg_register_dump)
+		hba->var->vops->dbg_register_dump(hba);
+}
+
+static inline int ufshcd_vops_update_sec_cfg(struct ufs_hba *hba,
+						bool restore_sec_cfg)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->update_sec_cfg)
+		return hba->var->vops->update_sec_cfg(hba, restore_sec_cfg);
+	return 0;
+}
+
+static inline u32 ufshcd_vops_get_scale_down_gear(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->get_scale_down_gear)
+		return hba->var->vops->get_scale_down_gear(hba);
+	/* Default to lowest high speed gear */
+	return UFS_HS_G1;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static inline void ufshcd_vops_add_debugfs(struct ufs_hba *hba,
+						struct dentry *root)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->add_debugfs)
+		hba->var->vops->add_debugfs(hba, root);
+}
+
+static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->vops && hba->var->vops->remove_debugfs)
+		hba->var->vops->remove_debugfs(hba);
+}
+#endif
+
+static inline int ufshcd_vops_crypto_req_setup(struct ufs_hba *hba,
+	struct ufshcd_lrb *lrbp, u8 *cc_index, bool *enable, u64 *dun)
+{
+	if (hba->var && hba->var->crypto_vops &&
+		hba->var->crypto_vops->crypto_req_setup)
+		return hba->var->crypto_vops->crypto_req_setup(hba, lrbp,
+			cc_index, enable, dun);
+	return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_cfg_start(struct ufs_hba *hba,
+						unsigned int task_tag)
+{
+	if (hba->var && hba->var->crypto_vops &&
+	    hba->var->crypto_vops->crypto_engine_cfg_start)
+		return hba->var->crypto_vops->crypto_engine_cfg_start
+				(hba, task_tag);
+	return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_cfg_end(struct ufs_hba *hba,
+						struct ufshcd_lrb *lrbp,
+						struct request *req)
+{
+	if (hba->var && hba->var->crypto_vops &&
+	    hba->var->crypto_vops->crypto_engine_cfg_end)
+		return hba->var->crypto_vops->crypto_engine_cfg_end
+				(hba, lrbp, req);
+	return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_reset(struct ufs_hba *hba)
+{
+	if (hba->var && hba->var->crypto_vops &&
+	    hba->var->crypto_vops->crypto_engine_reset)
+		return hba->var->crypto_vops->crypto_engine_reset(hba);
+	return 0;
+}
+
+static inline int ufshcd_vops_crypto_engine_get_status(struct ufs_hba *hba,
+		u32 *status)
+{
+	if (hba->var && hba->var->crypto_vops &&
+	    hba->var->crypto_vops->crypto_engine_get_status)
+		return hba->var->crypto_vops->crypto_engine_get_status(hba,
+			status);
+	return 0;
+}
+
+static inline void ufshcd_vops_pm_qos_req_start(struct ufs_hba *hba,
+		struct request *req)
+{
+	if (hba->var && hba->var->pm_qos_vops &&
+		hba->var->pm_qos_vops->req_start)
+		hba->var->pm_qos_vops->req_start(hba, req);
+}
+
+static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba,
+		struct request *req, bool lock)
+{
+	if (hba->var && hba->var->pm_qos_vops && hba->var->pm_qos_vops->req_end)
+		hba->var->pm_qos_vops->req_end(hba, req, lock);
 }
 
 #endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 9599741..d65dad0 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -48,6 +48,7 @@
 	REG_UFS_VERSION				= 0x08,
 	REG_CONTROLLER_DEV_ID			= 0x10,
 	REG_CONTROLLER_PROD_ID			= 0x14,
+	REG_AUTO_HIBERN8_IDLE_TIMER		= 0x18,
 	REG_INTERRUPT_STATUS			= 0x20,
 	REG_INTERRUPT_ENABLE			= 0x24,
 	REG_CONTROLLER_STATUS			= 0x30,
@@ -72,15 +73,24 @@
 	REG_UIC_COMMAND_ARG_1			= 0x94,
 	REG_UIC_COMMAND_ARG_2			= 0x98,
 	REG_UIC_COMMAND_ARG_3			= 0x9C,
+
+	UFSHCI_REG_SPACE_SIZE			= 0xA0,
+
+	REG_UFS_CCAP				= 0x100,
+	REG_UFS_CRYPTOCAP			= 0x104,
+
+	UFSHCI_CRYPTO_REG_SPACE_SIZE		= 0x400,
 };
 
 /* Controller capability masks */
 enum {
 	MASK_TRANSFER_REQUESTS_SLOTS		= 0x0000001F,
 	MASK_TASK_MANAGEMENT_REQUEST_SLOTS	= 0x00070000,
+	MASK_AUTO_HIBERN8_SUPPORT		= 0x00800000,
 	MASK_64_ADDRESSING_SUPPORT		= 0x01000000,
 	MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT	= 0x02000000,
 	MASK_UIC_DME_TEST_MODE_SUPPORT		= 0x04000000,
+	MASK_CRYPTO_SUPPORT			= 0x10000000,
 };
 
 /* UFS Version 08h */
@@ -109,8 +119,19 @@
 #define MANUFACTURE_ID_MASK	UFS_MASK(0xFFFF, 0)
 #define PRODUCT_ID_MASK		UFS_MASK(0xFFFF, 16)
 
-#define UFS_BIT(x)	(1L << (x))
+/*
+ * AHIT - Auto-Hibernate Idle Timer  18h
+ */
+#define AUTO_HIBERN8_IDLE_TIMER_MASK		UFS_MASK(0x3FF, 0)
+#define AUTO_HIBERN8_TIMER_SCALE_MASK		UFS_MASK(0x7, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_1_US		UFS_MASK(0x0, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_10_US		UFS_MASK(0x1, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_100_US		UFS_MASK(0x2, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_1_MS		UFS_MASK(0x3, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_10_MS		UFS_MASK(0x4, 10)
+#define AUTO_HIBERN8_TIMER_SCALE_100_MS		UFS_MASK(0x5, 10)
 
+/* IS - Interrupt status (20h) / IE - Interrupt enable (24h) */
 #define UTP_TRANSFER_REQ_COMPL			UFS_BIT(0)
 #define UIC_DME_END_PT_RESET			UFS_BIT(1)
 #define UIC_ERROR				UFS_BIT(2)
@@ -125,6 +146,7 @@
 #define DEVICE_FATAL_ERROR			UFS_BIT(11)
 #define CONTROLLER_FATAL_ERROR			UFS_BIT(16)
 #define SYSTEM_BUS_FATAL_ERROR			UFS_BIT(17)
+#define CRYPTO_ENGINE_FATAL_ERROR		UFS_BIT(18)
 
 #define UFSHCD_UIC_PWR_MASK	(UIC_HIBERNATE_ENTER |\
 				UIC_HIBERNATE_EXIT |\
@@ -135,11 +157,13 @@
 #define UFSHCD_ERROR_MASK	(UIC_ERROR |\
 				DEVICE_FATAL_ERROR |\
 				CONTROLLER_FATAL_ERROR |\
-				SYSTEM_BUS_FATAL_ERROR)
+				SYSTEM_BUS_FATAL_ERROR |\
+				CRYPTO_ENGINE_FATAL_ERROR)
 
 #define INT_FATAL_ERRORS	(DEVICE_FATAL_ERROR |\
 				CONTROLLER_FATAL_ERROR |\
-				SYSTEM_BUS_FATAL_ERROR)
+				SYSTEM_BUS_FATAL_ERROR |\
+				CRYPTO_ENGINE_FATAL_ERROR)
 
 /* HCS - Host Controller Status 30h */
 #define DEVICE_PRESENT				UFS_BIT(0)
@@ -161,11 +185,13 @@
 
 /* HCE - Host Controller Enable 34h */
 #define CONTROLLER_ENABLE	UFS_BIT(0)
+#define CRYPTO_GENERAL_ENABLE	UFS_BIT(1)
 #define CONTROLLER_DISABLE	0x0
 
 /* UECPA - Host UIC Error Code PHY Adapter Layer 38h */
 #define UIC_PHY_ADAPTER_LAYER_ERROR			UFS_BIT(31)
 #define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK		0x1F
+#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK		0xF
 
 /* UECDL - Host UIC Error Code Data Link Layer 3Ch */
 #define UIC_DATA_LINK_LAYER_ERROR		UFS_BIT(31)
@@ -220,12 +246,6 @@
 #define UIC_ARG_ATTR_TYPE(t)		(((t) & 0xFF) << 16)
 #define UIC_GET_ATTR_ID(v)		(((v) >> 16) & 0xFFFF)
 
-/* Link Status*/
-enum link_status {
-	UFSHCD_LINK_IS_DOWN	= 1,
-	UFSHCD_LINK_IS_UP	= 2,
-};
-
 /* UIC Commands */
 enum uic_cmd_dme {
 	UIC_CMD_DME_GET			= 0x01,
@@ -272,6 +292,9 @@
 
 	/* Interrupt disable mask for UFSHCI v1.1 */
 	INTERRUPT_MASK_ALL_VER_11	= 0x31FFF,
+
+	/* Interrupt disable mask for UFSHCI v2.1 */
+	INTERRUPT_MASK_ALL_VER_21	= 0x71FFF,
 };
 
 /*
@@ -285,11 +308,6 @@
 	UTP_CMD_TYPE_DEV_MANAGE		= 0x2,
 };
 
-/* To accommodate UFS2.0 required Command type */
-enum {
-	UTP_CMD_TYPE_UFS_STORAGE	= 0x1,
-};
-
 enum {
 	UTP_SCSI_COMMAND		= 0x00000000,
 	UTP_NATIVE_UFS_COMMAND		= 0x10000000,
@@ -314,6 +332,9 @@
 	OCS_PEER_COMM_FAILURE		= 0x5,
 	OCS_ABORTED			= 0x6,
 	OCS_FATAL_ERROR			= 0x7,
+	OCS_DEVICE_FATAL_ERROR		= 0x8,
+	OCS_INVALID_CRYPTO_CONFIG	= 0x9,
+	OCS_GENERAL_CRYPTO_ERROR	= 0xA,
 	OCS_INVALID_COMMAND_STATUS	= 0x0F,
 	MASK_OCS			= 0x0F,
 };
@@ -349,6 +370,8 @@
 	struct ufshcd_sg_entry    prd_table[SG_ALL];
 };
 
+#define UTRD_CRYPTO_ENABLE	UFS_BIT(23)
+
 /**
  * struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
  * @dword0: Descriptor Header DW0
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index eff8b56..602e196 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -1,6 +1,4 @@
 /*
- * drivers/scsi/ufs/unipro.h
- *
  * Copyright (C) 2013 Samsung Electronics Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
@@ -36,10 +34,6 @@
 #define TX_LCC_SEQUENCER			0x0032
 #define TX_MIN_ACTIVATETIME			0x0033
 #define TX_PWM_G6_G7_SYNC_LENGTH		0x0034
-#define TX_REFCLKFREQ				0x00EB
-#define TX_CFGCLKFREQVAL			0x00EC
-#define	CFGEXTRATTR				0x00F0
-#define DITHERCTRL2				0x00F1
 
 /*
  * M-RX Configuration Attributes
@@ -55,41 +49,15 @@
 #define RX_TERMINATION_FORCE_ENABLE		0x0089
 #define RX_MIN_ACTIVATETIME_CAPABILITY		0x008F
 #define RX_HIBERN8TIME_CAPABILITY		0x0092
-#define RX_REFCLKFREQ				0x00EB
-#define	RX_CFGCLKFREQVAL			0x00EC
-#define CFGWIDEINLN				0x00F0
-#define CFGRXCDR8				0x00BA
-#define ENARXDIRECTCFG4				0x00F2
-#define CFGRXOVR8				0x00BD
-#define RXDIRECTCTRL2				0x00C7
-#define ENARXDIRECTCFG3				0x00F3
-#define RXCALCTRL				0x00B4
-#define ENARXDIRECTCFG2				0x00F4
-#define CFGRXOVR4				0x00E9
-#define RXSQCTRL				0x00B5
-#define CFGRXOVR6				0x00BF
+
+#define MPHY_RX_ATTR_ADDR_START			0x81
+#define MPHY_RX_ATTR_ADDR_END			0xC1
 
 #define is_mphy_tx_attr(attr)			(attr < RX_MODE)
 #define RX_MIN_ACTIVATETIME_UNIT_US		100
 #define HIBERN8TIME_UNIT_US			100
 
 /*
- * Common Block Attributes
- */
-#define TX_GLOBALHIBERNATE			UNIPRO_CB_OFFSET(0x002B)
-#define REFCLKMODE				UNIPRO_CB_OFFSET(0x00BF)
-#define DIRECTCTRL19				UNIPRO_CB_OFFSET(0x00CD)
-#define DIRECTCTRL10				UNIPRO_CB_OFFSET(0x00E6)
-#define CDIRECTCTRL6				UNIPRO_CB_OFFSET(0x00EA)
-#define RTOBSERVESELECT				UNIPRO_CB_OFFSET(0x00F0)
-#define CBDIVFACTOR				UNIPRO_CB_OFFSET(0x00F1)
-#define CBDCOCTRL5				UNIPRO_CB_OFFSET(0x00F3)
-#define CBPRGPLL2				UNIPRO_CB_OFFSET(0x00F8)
-#define CBPRGTUNING				UNIPRO_CB_OFFSET(0x00FB)
-
-#define UNIPRO_CB_OFFSET(x)			(0x8000 | x)
-
-/*
  * PHY Adpater attributes
  */
 #define PA_ACTIVETXDATALANES	0x1560
@@ -123,6 +91,7 @@
 #define PA_MAXRXHSGEAR		0x1587
 #define PA_RXHSUNTERMCAP	0x15A5
 #define PA_RXLSTERMCAP		0x15A6
+#define PA_GRANULARITY		0x15AA
 #define PA_PACPREQTIMEOUT	0x1590
 #define PA_PACPREQEOBTIMEOUT	0x1591
 #define PA_HIBERN8TIME		0x15A7
@@ -153,14 +122,20 @@
 #define PA_TACTIVATE_TIME_UNIT_US	10
 #define PA_HIBERN8_TIME_UNIT_US		100
 
-/*Other attributes*/
-#define VS_MPHYCFGUPDT		0xD085
-#define VS_DEBUGOMC		0xD09E
-#define VS_POWERSTATE		0xD083
+#define PA_GRANULARITY_MIN_VAL	1
+#define PA_GRANULARITY_MAX_VAL	6
 
 /* PHY Adapter Protocol Constants */
 #define PA_MAXDATALANES	4
 
+#define DL_FC0ProtectionTimeOutVal_Default	8191
+#define DL_TC0ReplayTimeOutVal_Default		65535
+#define DL_AFC0ReqTimeOutVal_Default		32767
+
+#define DME_LocalFC0ProtectionTimeOutVal	0xD041
+#define DME_LocalTC0ReplayTimeOutVal		0xD042
+#define DME_LocalAFC0ReqTimeOutVal		0xD043
+
 /* PA power modes */
 enum {
 	FAST_MODE	= 1,
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 461b387..6f6c013 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -1,6 +1,23 @@
 #
 # QCOM Soc drivers
 #
+config QCOM_CPUSS_DUMP
+	bool "CPU Subsystem Dumping support"
+	help
+	  Add support to dump various hardware entities such as the instruction
+	  and data tlb's as well as the unified tlb, which are a part of the
+	  cpu subsystem to an allocated buffer. This allows for analysis of the
+	  the entities if corruption is suspected.
+	  If unsure, say N
+
+config QCOM_RUN_QUEUE_STATS
+       bool "Enable collection and exporting of QTI Run Queue stats to userspace"
+       help
+        This option enables the driver to periodically collecting the statistics
+        of kernel run queue information and calculate the load of the system.
+        This information is exported to usespace via sysfs entries and userspace
+        algorithms uses info and decide when to turn on/off the cpu cores.
+
 config QCOM_GSBI
         tristate "QCOM General Serial Bus Interface"
         depends on ARCH_QCOM
@@ -10,6 +27,40 @@
           functions for connecting the underlying serial UART, SPI, and I2C
           devices to the output pins.
 
+config QCOM_LLCC
+	tristate "Qualcomm Technologies, Inc. LLCC driver"
+	depends on ARCH_QCOM
+	help
+	  Qualcomm Technologies, Inc. platform specific LLCC driver for Last
+	  Level Cache. This provides interfaces to client's that use the LLCC.
+	  Say yes here to enable LLCC slice driver.
+
+config QCOM_MSMSKUNK_LLCC
+	tristate "Qualcomm Technologies, Inc. MSMSKUNK LLCC driver"
+	depends on QCOM_LLCC
+	help
+	  Say yes here to enable the LLCC driver for MSMSKUNK. This is provides
+	  data required to configure LLCC so that clients can start using the
+	  LLCC slices.
+
+config QCOM_LLCC_AMON
+	tristate "Qualcomm Technologies, Inc. LLCC Activity Monitor(AMON) driver"
+	depends on QCOM_LLCC
+	help
+	  This option enables a activity monitor driver for last level cache
+	  controller. This driver configures the activity monitor as
+	  deadlock detector and dumps the AMON registers upon detection of
+	  deadlock.
+
+config QCOM_LLCC_AMON_PANIC
+	tristate "Panic on detecting LLCC Activity Monitor(AMON) error"
+	depends on QCOM_LLCC_AMON
+	help
+	  This option enables panic upon detection of LLCC Activity Monitor(AMON)
+	  errors. Say yes here to enable deadlock detection mode of AMON. In
+	  deadlock detection mode AMON will trigger an interrupt if some LLCC request
+	  ages out.
+
 config QCOM_PM
 	bool "Qualcomm Power Management"
 	depends on ARCH_QCOM && !ARM64
@@ -51,6 +102,10 @@
 	  Say M here if you want to include support for the Qualcomm RPM as a
 	  module. This will build a module called "qcom-smd-rpm".
 
+config QCOM_SCM
+	bool "Secure Channel Manager (SCM) support"
+	default n
+
 config QCOM_SMEM_STATE
 	bool
 
@@ -76,3 +131,305 @@
 	help
 	  Client driver for the WCNSS_CTRL SMD channel, used to download nv
 	  firmware to a newly booted WCNSS chip.
+
+config MSM_BOOT_STATS
+	bool "Use MSM boot stats reporting"
+	help
+	  Use this to report msm boot stats such as bootloader throughput,
+	  display init, total boot time.
+	  This figures are reported in mpm sleep clock cycles and have a
+	  resolution of 31 bits as 1 bit is used as an overflow check.
+
+config MSM_CORE_HANG_DETECT
+       tristate "MSM Core Hang Detection Support"
+       help
+         This enables the core hang detection module. It causes SoC
+         reset on core hang detection and collects the core context
+         for hang.
+
+config MSM_GLADIATOR_HANG_DETECT
+       tristate "MSM Gladiator Hang Detection Support"
+       help
+         This enables the gladiator hang detection module.
+         If the configured threshold is reached, it causes SoC reset on
+         gladiator hang detection and collects the context for the
+         gladiator hang.
+
+config MSM_GLADIATOR_ERP_V2
+       tristate "GLADIATOR coherency interconnect error reporting driver v2"
+       help
+               Support dumping debug information for the GLADIATOR
+               cache interconnect in the error interrupt handler.
+               Meant to be used for debug scenarios only.
+
+               If unsure, say N.
+
+config PANIC_ON_GLADIATOR_ERROR_V2
+       depends on MSM_GLADIATOR_ERP_V2
+       bool "Panic on GLADIATOR error report v2"
+       help
+               Panic upon detection of an Gladiator coherency interconnect error
+               in order to support dumping debug information.
+               Meant to be used for debug scenarios only.
+
+               If unsure, say N.
+
+config QCOM_EUD
+	tristate "QTI Embedded USB Debugger (EUD)"
+	depends on ARCH_QCOM
+	select SERIAL_CORE
+	help
+	  The EUD (Embedded USB Debugger) is a mini-USB hub implemented
+	  on chip to support the USB-based debug and trace capabilities.
+	  This module enables support for Qualcomm Technologies, Inc.
+	  Embedded USB Debugger (EUD).
+
+	  If unsure, say N.
+
+config QCOM_WATCHDOG_V2
+	bool "Qualcomm Watchdog Support"
+	depends on ARCH_QCOM
+	help
+	  This enables the watchdog module. It causes kernel panic if the
+	  watchdog times out. It allows for detection of cpu hangs and
+	  deadlocks. It does not run during the bootup process, so it will
+	  not catch any early lockups.
+
+config QCOM_MEMORY_DUMP_V2
+	bool "QCOM Memory Dump V2 Support"
+	help
+	  This enables memory dump feature. It allows various client
+	  subsystems to register respective dump regions. At the time
+	  of deadlocks or cpu hangs these dump regions are captured to
+	  give a snapshot of the system at the time of the crash.
+
+config QCOM_BUS_SCALING
+	bool "Bus scaling driver"
+	help
+	This option enables bus scaling on MSM devices.  Bus scaling
+	allows devices to request the clocks be set to rates sufficient
+	for the active devices needs without keeping the clocks at max
+	frequency when a slower speed is sufficient.
+
+config  QCOM_BUS_CONFIG_RPMH
+	bool "RPMH Bus scaling driver"
+	depends on QCOM_BUS_SCALING
+	help
+	  This option enables bus scaling using QCOM specific hardware
+	  accelerators. It enables the translation of bandwidth requests
+	  from logical nodes to hardware nodes controlled by the BCM (Bus
+	  Clock Manager)
+
+config QCOM_SECURE_BUFFER
+	bool "Helper functions for securing buffers through TZ"
+	help
+	 Say 'Y' here for targets that need to call into TZ to secure
+	 memory buffers. This ensures that only the correct clients can
+	 use this memory and no unauthorized access is made to the
+	 buffer
+
+config MSM_SMEM
+	depends on ARCH_QCOM
+	depends on REMOTE_SPINLOCK_MSM
+	bool "MSM Shared Memory (SMEM)"
+	help
+	  Support for the shared memory interface between the various
+	  processors in the System on a Chip (SoC) which allows basic
+	  inter-processor communication.
+
+config MSM_GLINK
+	bool "Generic Link (G-Link)"
+	help
+	  G-Link is a generic link transport that replaces SMD.  It is used
+	  within a System-on-Chip (SoC) for communication between both internal
+	  processors and external peripherals.  The actual physical transport
+	  is handled by transport plug-ins that can be individually enabled and
+	  configured separately.
+
+config MSM_GLINK_LOOPBACK_SERVER
+	bool "Generic Link (G-Link) Loopback Server"
+	help
+	  G-Link Loopback Server that enable loopback test framework to test
+	  and validate the G-Link protocol stack. It support both local and
+	  remote clients to configure the loopback server and echo back the
+	  data received from the clients.
+
+config MSM_GLINK_SMEM_NATIVE_XPRT
+	depends on MSM_SMEM
+	depends on MSM_GLINK
+	bool "Generic Link (G-Link) SMEM Native Transport"
+	help
+	  G-Link SMEM Native Transport is a G-Link Transport plug-in.  It allows
+	  G-Link communication to remote entities through a shared memory
+	  physical transport.  The nature of shared memory limits this G-Link
+	  transport to only connecting with entities internal to the
+	  System-on-Chip.
+
+config MSM_GLINK_SPI_XPRT
+	depends on MSM_GLINK
+	tristate "Generic Link (G-Link) SPI Transport"
+	help
+	  G-Link SPI Transport is a Transport plug-in developed over SPI
+	  bus. This transport plug-in performs marshaling of G-Link
+	  commands & data to the appropriate SPI bus wire format and
+	  allows for G-Link communication with remote subsystems that are
+	  external to the System-on-Chip.
+
+config TRACER_PKT
+	bool "Tracer Packet"
+	help
+	  Tracer Packet helps in profiling the performance of inter-
+	  processor communication protocols. The profiling information
+	  can be logged into the tracer packet itself.
+
+config QTI_RPMH_API
+	bool "QTI RPMH (h/w accelerators) Communication API"
+	select MAILBOX
+	select QTI_RPMH_MBOX
+	select QTI_SYSTEM_PM
+	help
+	  This option enables RPMH hardware communication for making shared
+	  resource requests on Qualcomm Technologies Inc SoCs.
+
+config QTI_SYSTEM_PM
+	bool
+
+config MSM_SMP2P
+	bool "SMSM Point-to-Point (SMP2P)"
+	depends on MSM_SMEM
+	help
+	  Provide point-to-point remote signaling support.
+	  SMP2P enables transferring 32-bit values between
+	  the local and a remote system using shared
+	  memory and interrupts. A client can open multiple
+	  32-bit values by specifying a unique string and
+	  remote processor ID.
+
+config MSM_SMP2P_TEST
+	bool "SMSM Point-to-Point Test"
+	depends on MSM_SMP2P
+	help
+	  Enables loopback and unit testing support for
+	  SMP2P. Loopback support is used by other
+	  processors to do unit testing. Unit tests
+	  are used to verify the local and remote
+	  implementations.
+
+config MSM_IPC_ROUTER_SMD_XPRT
+	depends on MSM_SMD
+	depends on IPC_ROUTER
+	bool "MSM SMD XPRT Layer"
+	help
+	  SMD Transport Layer that enables IPC Router communication within
+	  a System-on-Chip(SoC). When the SMD channels become available,
+	  this layer registers a transport with IPC Router and enable
+	  message exchange.
+
+config MSM_IPC_ROUTER_HSIC_XPRT
+	depends on USB_QCOM_IPC_BRIDGE
+	depends on IPC_ROUTER
+	bool "MSM HSIC XPRT Layer"
+	help
+	  HSIC Transport Layer that enables off-chip communication of
+	  IPC Router. When the HSIC endpoint becomes available, this layer
+	  registers the transport with IPC Router and enable message
+	  exchange.
+
+config MSM_IPC_ROUTER_MHI_XPRT
+	depends on MSM_MHI
+	depends on IPC_ROUTER
+	bool "MSM MHI XPRT Layer"
+	help
+	  MHI Transport Layer that enables off-chip communication of
+	  IPC Router. When the MHI endpoint becomes available, this layer
+	  registers the transport with IPC Router and enable message
+	  exchange.
+
+config MSM_IPC_ROUTER_GLINK_XPRT
+	depends on MSM_GLINK
+	depends on IPC_ROUTER
+	bool "MSM GLINK XPRT Layer"
+	help
+	  GLINK Transport Layer that enables IPC Router communication within
+	  a System-on-Chip(SoC). When the GLINK channels become available,
+	  this layer registers a transport with IPC Router and enable
+	  message exchange.
+
+config MSM_QMI_INTERFACE
+	depends on IPC_ROUTER
+	depends on QMI_ENCDEC
+	bool "MSM QMI Interface Library"
+	help
+	  Library to send and receive QMI messages over IPC Router.
+	  This library provides interface functions to the kernel drivers
+	  to perform QMI message marshaling and transport them over IPC
+	  Router.
+
+config MSM_GLINK_PKT
+	bool "Enable device interface for GLINK packet channels"
+	depends on MSM_GLINK
+	help
+	  G-link packet driver provides the interface for the userspace
+	  clients to communicate over G-Link via device nodes.
+	  This enable the userspace clients to read and write to
+	  some glink packets channel.
+
+config MSM_SYSTEM_HEALTH_MONITOR
+	bool "System Health Monitor"
+	depends on MSM_QMI_INTERFACE && MSM_SUBSYSTEM_RESTART
+	help
+	  System Health Monitor (SHM) passively monitors the health of the
+	  peripherals connected to the application processor. Software
+	  components in the application processor that experience
+	  communication failure can request the SHM to perform a system-wide
+	  health check. If any failures are detected during the health-check,
+	  then a subsystem restart will be triggered for the failed subsystem.
+
+config MSM_SUBSYSTEM_RESTART
+       bool "MSM Subsystem Restart"
+       help
+         This option enables the MSM subsystem restart framework.
+
+         The MSM subsystem restart framework provides support to boot,
+         shutdown, and restart subsystems with a reference counted API.
+         It also notifies userspace of transitions between these states via
+         sysfs.
+
+config MSM_PIL
+       bool "Peripheral image loading"
+       select FW_LOADER
+       default n
+       help
+         Some peripherals need to be loaded into memory before they can be
+         brought out of reset.
+
+         Say yes to support these devices.
+
+config MSM_SYSMON_GLINK_COMM
+       bool "MSM System Monitor communication support using GLINK transport"
+       depends on MSM_GLINK && MSM_SUBSYSTEM_RESTART
+       help
+         This option adds support for MSM System Monitor APIs using the GLINK
+         transport layer. The APIs provided may be used for notifying
+         subsystems within the SoC about other subsystems' power-up/down
+         state-changes.
+
+config MSM_PIL_SSR_GENERIC
+       tristate "MSM Subsystem Boot Support"
+       depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+       help
+         Support for booting and shutting down MSM Subsystem processors.
+         This driver also monitors the SMSM status bits and the watchdog
+         interrupt for the subsystem and restarts it on a watchdog bite
+         or a fatal error. Subsystems include LPASS, Venus, VPU, WCNSS and
+         BCSS.
+
+config MSM_PIL_MSS_QDSP6V5
+       tristate "MSS QDSP6v5 (Hexagon) Boot Support"
+       depends on MSM_PIL && MSM_SUBSYSTEM_RESTART
+       help
+         Support for booting and shutting down QDSP6v5 (Hexagon) processors
+         in modem subsystems. If you would like to make or receive phone
+         calls then say Y here.
+
+         If unsure, say N.
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index fdd664e..25ed482 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -1,4 +1,8 @@
+obj-$(CONFIG_QCOM_CPUSS_DUMP) += cpuss_dump.o
 obj-$(CONFIG_QCOM_GSBI)	+=	qcom_gsbi.o
+obj-$(CONFIG_QCOM_LLCC) += llcc-core.o llcc-slice.o
+obj-$(CONFIG_QCOM_MSMSKUNK_LLCC) += llcc-msmskunk.o
+obj-$(CONFIG_QCOM_LLCC_AMON) += llcc-amon.o
 obj-$(CONFIG_QCOM_PM)	+=	spm.o
 obj-$(CONFIG_QCOM_SMD) +=	smd.o
 obj-$(CONFIG_QCOM_SMD_RPM)	+= smd-rpm.o
@@ -7,3 +11,45 @@
 obj-$(CONFIG_QCOM_SMP2P)	+= smp2p.o
 obj-$(CONFIG_QCOM_SMSM)	+= smsm.o
 obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
+CFLAGS_scm.o :=$(call as-instr,.arch_extension sec,-DREQUIRES_SEC=1)
+obj-$(CONFIG_QCOM_SCM)  +=      scm.o scm-boot.o
+obj-$(CONFIG_SOC_BUS) += socinfo.o
+obj-$(CONFIG_MSM_BOOT_STATS) += boot_stats.o
+obj-$(CONFIG_MSM_CORE_HANG_DETECT) += core_hang_detect.o
+obj-$(CONFIG_MSM_GLADIATOR_HANG_DETECT) += gladiator_hang_detect.o
+obj-$(CONFIG_MSM_GLADIATOR_ERP_V2) += gladiator_erp_v2.o
+obj-$(CONFIG_QCOM_EUD) += eud.o
+obj-$(CONFIG_QCOM_WATCHDOG_V2) += watchdog_v2.o
+obj-$(CONFIG_QCOM_MEMORY_DUMP_V2) += memory_dump_v2.o
+obj-$(CONFIG_QCOM_RUN_QUEUE_STATS) += rq_stats.o
+obj-$(CONFIG_QCOM_SECURE_BUFFER) += secure_buffer.o
+obj-$(CONFIG_MSM_SMEM) += msm_smem.o smem_debug.o
+obj-$(CONFIG_MSM_GLINK) += glink.o glink_debugfs.o glink_ssr.o
+obj-$(CONFIG_MSM_GLINK_LOOPBACK_SERVER) += glink_loopback_server.o
+obj-$(CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT) += glink_smem_native_xprt.o
+obj-$(CONFIG_MSM_GLINK_SPI_XPRT) += glink_spi_xprt.o
+obj-$(CONFIG_TRACER_PKT) += tracer_pkt.o
+obj-$(CONFIG_QCOM_BUS_SCALING) += msm_bus/
+obj-$(CONFIG_QTI_RPMH_API) += rpmh.o
+obj-$(CONFIG_QTI_SYSTEM_PM) += system_pm.o
+obj-$(CONFIG_MSM_SMP2P) += msm_smp2p.o smp2p_debug.o smp2p_sleepstate.o
+obj-$(CONFIG_MSM_SMP2P_TEST) += smp2p_loopback.o smp2p_test.o smp2p_spinlock_test.o
+obj-$(CONFIG_MSM_IPC_ROUTER_SMD_XPRT) += ipc_router_smd_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_HSIC_XPRT) += ipc_router_hsic_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_MHI_XPRT) += ipc_router_mhi_xprt.o
+obj-$(CONFIG_MSM_IPC_ROUTER_GLINK_XPRT) += ipc_router_glink_xprt.o
+obj-$(CONFIG_MSM_QMI_INTERFACE) += qmi_interface.o
+obj-$(CONFIG_MSM_GLINK_PKT) += msm_glink_pkt.o
+obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor_v01.o
+obj-$(CONFIG_MSM_SYSTEM_HEALTH_MONITOR)	+=	system_health_monitor.o
+obj-$(CONFIG_MSM_SYSMON_GLINK_COMM) += sysmon-glink.o sysmon-qmi.o
+
+obj-$(CONFIG_MSM_PIL_SSR_GENERIC) += subsys-pil-tz.o
+obj-$(CONFIG_MSM_PIL_MSS_QDSP6V5) += pil-q6v5.o pil-msa.o pil-q6v5-mss.o
+obj-$(CONFIG_MSM_PIL)   +=      peripheral-loader.o
+
+ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+       obj-y += subsystem_notif.o
+       obj-y += subsystem_restart.o
+       obj-y += ramdump.o
+endif
diff --git a/drivers/soc/qcom/boot_stats.c b/drivers/soc/qcom/boot_stats.c
new file mode 100644
index 0000000..2fc9cbf
--- /dev/null
+++ b/drivers/soc/qcom/boot_stats.c
@@ -0,0 +1,106 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/sched.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+struct boot_stats {
+	uint32_t bootloader_start;
+	uint32_t bootloader_end;
+	uint32_t bootloader_display;
+	uint32_t bootloader_load_kernel;
+};
+
+static void __iomem *mpm_counter_base;
+static uint32_t mpm_counter_freq;
+static struct boot_stats __iomem *boot_stats;
+
+static int mpm_parse_dt(void)
+{
+	struct device_node *np;
+	u32 freq;
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-boot_stats");
+	if (!np) {
+		pr_err("can't find qcom,msm-imem node\n");
+		return -ENODEV;
+	}
+	boot_stats = of_iomap(np, 0);
+	if (!boot_stats) {
+		pr_err("boot_stats: Can't map imem\n");
+		return -ENODEV;
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,mpm2-sleep-counter");
+	if (!np) {
+		pr_err("mpm_counter: can't find DT node\n");
+		return -ENODEV;
+	}
+
+	if (!of_property_read_u32(np, "clock-frequency", &freq))
+		mpm_counter_freq = freq;
+	else
+		return -ENODEV;
+
+	if (of_get_address(np, 0, NULL, NULL)) {
+		mpm_counter_base = of_iomap(np, 0);
+		if (!mpm_counter_base) {
+			pr_err("mpm_counter: cant map counter base\n");
+			return -ENODEV;
+		}
+	}
+
+	return 0;
+}
+
+static void print_boot_stats(void)
+{
+	pr_info("KPI: Bootloader start count = %u\n",
+		readl_relaxed(&boot_stats->bootloader_start));
+	pr_info("KPI: Bootloader end count = %u\n",
+		readl_relaxed(&boot_stats->bootloader_end));
+	pr_info("KPI: Bootloader display count = %u\n",
+		readl_relaxed(&boot_stats->bootloader_display));
+	pr_info("KPI: Bootloader load kernel count = %u\n",
+		readl_relaxed(&boot_stats->bootloader_load_kernel));
+	pr_info("KPI: Kernel MPM timestamp = %u\n",
+		readl_relaxed(mpm_counter_base));
+	pr_info("KPI: Kernel MPM Clock frequency = %u\n",
+		mpm_counter_freq);
+}
+
+int boot_stats_init(void)
+{
+	int ret;
+
+	ret = mpm_parse_dt();
+	if (ret < 0)
+		return -ENODEV;
+
+	print_boot_stats();
+
+	iounmap(boot_stats);
+	iounmap(mpm_counter_base);
+
+	return 0;
+}
+
diff --git a/drivers/soc/qcom/core_hang_detect.c b/drivers/soc/qcom/core_hang_detect.c
new file mode 100644
index 0000000..f3a8b93
--- /dev/null
+++ b/drivers/soc/qcom/core_hang_detect.c
@@ -0,0 +1,355 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <soc/qcom/scm.h>
+#include <linux/platform_device.h>
+
+/* pmu event min and max value */
+#define PMU_EVENT_MIN			0
+#define PMU_EVENT_MAX			0x1F
+
+#define PMU_MUX_OFFSET			4
+#define PMU_MUX_MASK_BITS		0xF
+#define ENABLE_OFFSET			1
+#define ENABLE_MASK_BITS		0x1
+
+#define _VAL(z)			(z##_MASK_BITS << z##_OFFSET)
+#define _VALUE(_val, z)		(_val<<(z##_OFFSET))
+#define _WRITE(x, y, z)		(((~(_VAL(z))) & y) | _VALUE(x, z))
+
+#define MODULE_NAME	"msm_hang_detect"
+#define MAX_SYSFS_LEN 12
+
+struct hang_detect {
+	phys_addr_t threshold[NR_CPUS];
+	phys_addr_t config[NR_CPUS];
+	uint32_t enabled;
+	uint32_t pmu_event_sel;
+	uint32_t threshold_val;
+	struct kobject kobj;
+};
+
+/* interface for exporting attributes */
+struct core_hang_attribute {
+	struct attribute        attr;
+	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+			char *buf);
+	size_t (*store)(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count);
+};
+
+#define CORE_HANG_ATTR(_name, _mode, _show, _store)	\
+	struct core_hang_attribute hang_attr_##_name =	\
+			__ATTR(_name, _mode, _show, _store)
+
+#define to_core_hang_dev(kobj) \
+	container_of(kobj, struct hang_detect, kobj)
+
+#define to_core_attr(_attr) \
+	container_of(_attr, struct core_hang_attribute, attr)
+
+/*
+ * On the kernel command line specify core_hang_detect.enable=1
+ * to enable the core hang detect module.
+ * By default core hang detect is turned on
+ */
+static int enable = 1;
+module_param(enable, int, 0444);
+
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct core_hang_attribute *core_attr = to_core_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (core_attr->show)
+		ret = core_attr->show(kobj, attr, buf);
+
+	return ret;
+}
+
+static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct core_hang_attribute *core_attr = to_core_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (core_attr->store)
+		ret = core_attr->store(kobj, attr, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops core_sysfs_ops = {
+	.show	= attr_show,
+	.store	= attr_store,
+};
+
+static struct kobj_type core_ktype = {
+	.sysfs_ops	= &core_sysfs_ops,
+};
+
+static ssize_t show_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct hang_detect *device =  to_core_hang_dev(kobj);
+
+	return snprintf(buf, MAX_SYSFS_LEN, "0x%x\n", device->threshold_val);
+}
+
+static size_t store_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct hang_detect *hang_dev = to_core_hang_dev(kobj);
+	uint32_t threshold_val;
+	int ret, cpu;
+
+	ret = kstrtouint(buf, 0, &threshold_val);
+	if (ret < 0)
+		return ret;
+
+	if (threshold_val <= 0)
+		return -EINVAL;
+
+	for_each_possible_cpu(cpu) {
+		if (!hang_dev->threshold[cpu])
+			continue;
+
+		if (scm_io_write(hang_dev->threshold[cpu], threshold_val)) {
+			pr_err("%s: Failed to set threshold for core%d\n",
+					__func__, cpu);
+			return -EIO;
+		}
+	}
+
+	hang_dev->threshold_val = threshold_val;
+	return count;
+}
+CORE_HANG_ATTR(threshold, 0644, show_threshold, store_threshold);
+
+static ssize_t show_pmu_event_sel(struct kobject *kobj, struct attribute *attr,
+			char *buf)
+{
+	struct hang_detect *hang_device = to_core_hang_dev(kobj);
+
+	return snprintf(buf, MAX_SYSFS_LEN, "0x%x\n",
+			hang_device->pmu_event_sel);
+}
+
+static size_t store_pmu_event_sel(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count)
+{
+	int  cpu, ret;
+	uint32_t pmu_event_sel, reg_value;
+	struct hang_detect *hang_dev = to_core_hang_dev(kobj);
+
+	ret = kstrtouint(buf, 0, &pmu_event_sel);
+	if (ret < 0)
+		return ret;
+
+	if (pmu_event_sel < PMU_EVENT_MIN || pmu_event_sel > PMU_EVENT_MAX)
+		return -EINVAL;
+
+	for_each_possible_cpu(cpu) {
+		if (!hang_dev->config[cpu])
+			continue;
+
+		reg_value = scm_io_read(hang_dev->config[cpu]);
+		if (scm_io_write(hang_dev->config[cpu],
+			_WRITE(pmu_event_sel, reg_value, PMU_MUX))) {
+			pr_err("%s: Failed to set pmu event for core%d\n",
+					__func__, cpu);
+			return -EIO;
+		}
+	}
+
+	hang_dev->pmu_event_sel = pmu_event_sel;
+	return count;
+}
+CORE_HANG_ATTR(pmu_event_sel, 0644, show_pmu_event_sel, store_pmu_event_sel);
+
+static ssize_t show_enable(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct hang_detect *hang_device = to_core_hang_dev(kobj);
+
+	return snprintf(buf, MAX_SYSFS_LEN, "%u\n", hang_device->enabled);
+}
+
+static size_t store_enable(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct hang_detect *hang_dev = to_core_hang_dev(kobj);
+	uint32_t enabled, reg_value;
+	int cpu, ret;
+
+	ret = kstrtouint(buf, 0, &enabled);
+	if (ret < 0)
+		return -EINVAL;
+
+	if (!(enabled == 0 || enabled == 1))
+		return -EINVAL;
+
+	for_each_possible_cpu(cpu) {
+		if (!hang_dev->config[cpu])
+			continue;
+
+		reg_value = scm_io_read(hang_dev->config[cpu]);
+		if (scm_io_write(hang_dev->config[cpu],
+			_WRITE(enabled, reg_value, ENABLE))) {
+			pr_err("%s: Failed to set enable for core%d\n",
+					__func__, cpu);
+			return -EIO;
+		}
+	}
+
+	hang_dev->enabled = enabled;
+	return count;
+}
+CORE_HANG_ATTR(enable, 0644, show_enable, store_enable);
+
+static struct attribute *hang_attrs[] = {
+	&hang_attr_threshold.attr,
+	&hang_attr_pmu_event_sel.attr,
+	&hang_attr_enable.attr,
+	NULL
+};
+
+static struct attribute_group hang_attr_group = {
+	.attrs = hang_attrs,
+};
+
+static const struct of_device_id msm_hang_detect_table[] = {
+	{ .compatible = "qcom,core-hang-detect" },
+	{}
+};
+
+static int msm_hang_detect_probe(struct platform_device *pdev)
+{
+	struct device_node *cpu_node;
+	struct device_node *node = pdev->dev.of_node;
+	struct hang_detect *hang_det = NULL;
+	int cpu, ret, cpu_count = 0;
+	const char *name;
+	u32 treg[NR_CPUS] = {0}, creg[NR_CPUS] = {0};
+	u32 num_reg = 0;
+
+	if (!pdev->dev.of_node || !enable)
+		return -ENODEV;
+
+	hang_det = devm_kzalloc(&pdev->dev,
+			sizeof(struct hang_detect), GFP_KERNEL);
+
+	if (!hang_det) {
+		pr_err("Can't allocate hang_detect memory\n");
+		return -ENOMEM;
+	}
+
+	name = of_get_property(node, "label", NULL);
+	if (!name) {
+		pr_err("Can't get label property\n");
+		return -EINVAL;
+	}
+
+	num_reg = of_property_count_u32_elems(node,
+			"qcom,threshold-arr");
+	if (num_reg < 0) {
+		pr_err("Can't get threshold-arr property\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_array(node, "qcom,threshold-arr",
+				treg, num_reg);
+	if (ret) {
+		pr_err("Can't get threshold-arr property\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32_array(node, "qcom,config-arr",
+				creg, num_reg);
+	if (ret) {
+		pr_err("Can't get config-arr property\n");
+		return -EINVAL;
+	}
+
+	for_each_possible_cpu(cpu) {
+		cpu_node = of_get_cpu_node(cpu, NULL);
+		if (cpu_node == NULL)
+			continue;
+		else {
+			hang_det->threshold[cpu] = treg[cpu];
+			hang_det->config[cpu] = creg[cpu];
+			cpu_count++;
+		}
+	}
+
+	if (cpu_count == 0) {
+		pr_err("%s:core-hang-arr prop is missing %d\n", __func__, ret);
+		return -EINVAL;
+	}
+
+	ret = kobject_init_and_add(&hang_det->kobj, &core_ktype,
+			&cpu_subsys.dev_root->kobj, "%s_%s",
+			"hang_detect", name);
+	if (ret) {
+		pr_err("%s:Error in creation kobject_add\n", __func__);
+		goto out_put_kobj;
+	}
+
+	ret = sysfs_create_group(&hang_det->kobj, &hang_attr_group);
+	if (ret) {
+		pr_err("%s:Error in creation sysfs_create_group\n", __func__);
+		goto out_del_kobj;
+	}
+
+	platform_set_drvdata(pdev, hang_det);
+	return 0;
+
+out_del_kobj:
+	kobject_del(&hang_det->kobj);
+out_put_kobj:
+	kobject_put(&hang_det->kobj);
+
+	return ret;
+}
+
+static int msm_hang_detect_remove(struct platform_device *pdev)
+{
+	struct hang_detect *hang_det = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	sysfs_remove_group(&hang_det->kobj, &hang_attr_group);
+	kobject_del(&hang_det->kobj);
+	kobject_put(&hang_det->kobj);
+	return 0;
+}
+
+static struct platform_driver msm_hang_detect_driver = {
+	.probe = msm_hang_detect_probe,
+	.remove = msm_hang_detect_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_hang_detect_table,
+	},
+};
+
+module_platform_driver(msm_hang_detect_driver);
+
+MODULE_DESCRIPTION("MSM Core Hang Detect Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/cpuss_dump.c b/drivers/soc/qcom/cpuss_dump.c
new file mode 100644
index 0000000..886a32f
--- /dev/null
+++ b/drivers/soc/qcom/cpuss_dump.c
@@ -0,0 +1,124 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <soc/qcom/memory_dump.h>
+
+static int cpuss_dump_probe(struct platform_device *pdev)
+{
+	struct device_node *child_node, *dump_node;
+	const struct device_node *node = pdev->dev.of_node;
+	static dma_addr_t dump_addr;
+	static void *dump_vaddr;
+	struct msm_dump_data *dump_data;
+	struct msm_dump_entry dump_entry;
+	int ret;
+	u32 size, id;
+
+	for_each_available_child_of_node(node, child_node) {
+		dump_node = of_parse_phandle(child_node, "qcom,dump-node", 0);
+
+		if (!dump_node) {
+			dev_err(&pdev->dev, "Unable to find node for %s\n",
+				child_node->name);
+			continue;
+		}
+
+		ret = of_property_read_u32(dump_node, "qcom,dump-size", &size);
+		if (ret) {
+			dev_err(&pdev->dev, "Unable to find size for %s\n",
+					dump_node->name);
+			continue;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,dump-id", &id);
+		if (ret) {
+			dev_err(&pdev->dev, "Unable to find id for %s\n",
+					child_node->name);
+			continue;
+		}
+
+		dump_vaddr = (void *) dma_alloc_coherent(&pdev->dev, size,
+						&dump_addr, GFP_KERNEL);
+
+		if (!dump_vaddr) {
+			dev_err(&pdev->dev, "Couldn't get memory for dumping\n");
+			continue;
+		}
+
+		memset(dump_vaddr, 0x0, size);
+
+		dump_data = devm_kzalloc(&pdev->dev,
+				sizeof(struct msm_dump_data), GFP_KERNEL);
+		if (!dump_data) {
+			dma_free_coherent(&pdev->dev, size, dump_vaddr,
+					dump_addr);
+			continue;
+		}
+
+		dump_data->addr = dump_addr;
+		dump_data->len = size;
+		dump_entry.id = id;
+		dump_entry.addr = virt_to_phys(dump_data);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS, &dump_entry);
+		if (ret) {
+			dev_err(&pdev->dev, "Data dump setup failed, id = %d\n",
+				id);
+			dma_free_coherent(&pdev->dev, size, dump_vaddr,
+					dump_addr);
+			devm_kfree(&pdev->dev, dump_data);
+		}
+
+	}
+	return 0;
+}
+
+static int cpuss_dump_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id cpuss_dump_match_table[] = {
+	{	.compatible = "qcom,cpuss-dump",	},
+	{}
+};
+
+static struct platform_driver cpuss_dump_driver = {
+	.probe = cpuss_dump_probe,
+	.remove = cpuss_dump_remove,
+	.driver = {
+		.name = "msm_cpuss_dump",
+		.owner = THIS_MODULE,
+		.of_match_table = cpuss_dump_match_table,
+	},
+};
+
+static int __init cpuss_dump_init(void)
+{
+	return platform_driver_register(&cpuss_dump_driver);
+}
+
+static void __exit cpuss_dump_exit(void)
+{
+	platform_driver_unregister(&cpuss_dump_driver);
+}
+
+subsys_initcall(cpuss_dump_init);
+module_exit(cpuss_dump_exit)
+
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
new file mode 100644
index 0000000..acdeb9d
--- /dev/null
+++ b/drivers/soc/qcom/eud.c
@@ -0,0 +1,538 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/extcon.h>
+#include <linux/delay.h>
+#include <linux/sysfs.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+
+#define EUD_ENABLE_CMD 1
+#define EUD_DISABLE_CMD 0
+
+#define EUD_REG_COM_TX_ID	0x0000
+#define EUD_REG_COM_TX_LEN	0x0004
+#define EUD_REG_COM_TX_DAT	0x0008
+#define EUD_REG_COM_RX_ID	0x000C
+#define EUD_REG_COM_RX_LEN	0x0010
+#define EUD_REG_COM_RX_DAT	0x0014
+#define EUD_REG_INT1_EN_MASK	0x0024
+#define EUD_REG_INT_STATUS_1	0x0044
+#define EUD_REG_CTL_OUT_1	0x0074
+#define EUD_REG_VBUS_INT_CLR	0x0080
+#define EUD_REG_CHGR_INT_CLR	0x0084
+#define EUD_REG_CSR_EUD_EN	0x1014
+#define EUD_REG_SW_ATTACH_DET	0x1018
+
+#define EUD_INT_RX		BIT(0)
+#define EUD_INT_TX		BIT(1)
+#define EUD_INT_VBUS		BIT(2)
+#define EUD_INT_CHGR		BIT(3)
+#define EUD_INT_SAFE_MODE	BIT(4)
+#define EUD_INT_ALL		(EUD_INT_RX | EUD_INT_TX | \
+				EUD_INT_VBUS | EUD_INT_CHGR | \
+				EUD_INT_SAFE_MODE)
+
+#define EUD_NR			1
+#define EUD_CONSOLE		NULL
+#define UART_ID			0x90
+#define MAX_FIFO_SIZE		14
+
+struct eud_chip {
+	struct device			*dev;
+	int				eud_irq;
+	bool				usb_attach;
+	bool				chgr_enable;
+	void __iomem			*eud_reg_base;
+	struct extcon_dev		*extcon;
+	struct uart_port		port;
+};
+
+static const unsigned int eud_extcon_cable[] = {
+	EXTCON_USB,
+	EXTCON_CHG_USB_SDP,
+	EXTCON_NONE,
+};
+
+/*
+ * On the kernel command line specify eud.enable=1 to enable EUD.
+ * EUD is disabled by default.
+ */
+static int enable;
+static struct platform_device *eud_private;
+
+static void enable_eud(struct platform_device *pdev)
+{
+	struct eud_chip *priv = platform_get_drvdata(pdev);
+
+	/* write into CSR to enable EUD */
+	writel_relaxed(BIT(0), priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
+	/* Ensure Register Writes Complete */
+	wmb();
+	dev_dbg(&pdev->dev, "%s: EUD Enabled!\n", __func__);
+}
+
+static void disable_eud(struct platform_device *pdev)
+{
+	struct eud_chip *priv = platform_get_drvdata(pdev);
+
+	/* write into CSR to disable EUD */
+	writel_relaxed(0, priv->eud_reg_base + EUD_REG_CSR_EUD_EN);
+	dev_dbg(&pdev->dev, "%s: EUD Disabled!\n", __func__);
+}
+
+static int param_eud_set(const char *val, const struct kernel_param *kp)
+{
+	int enable = 0;
+
+	if (sscanf(val, "%du", &enable) != 1)
+		return -EINVAL;
+
+	if (enable != EUD_ENABLE_CMD && enable != EUD_DISABLE_CMD)
+		return -EINVAL;
+
+	if (enable == EUD_ENABLE_CMD) {
+		pr_debug("%s: Enbling EUD\n", __func__);
+		enable_eud(eud_private);
+	} else if (enable == EUD_DISABLE_CMD) {
+		pr_debug("%s: Disabling EUD\n", __func__);
+		disable_eud(eud_private);
+	}
+
+	*((uint *)kp->arg) = enable;
+	return 0;
+}
+
+static const struct kernel_param_ops eud_param_ops = {
+	.set = param_eud_set,
+	.get = param_get_int,
+};
+
+module_param_cb(enable, &eud_param_ops, &enable, 0644);
+
+static void usb_attach_detach(struct eud_chip *chip)
+{
+	u32 reg;
+
+	/* read ctl_out_1[4] to find USB attach or detach event */
+	reg = readl_relaxed(chip->eud_reg_base + EUD_REG_CTL_OUT_1);
+	if (reg & BIT(4))
+		chip->usb_attach = true;
+	else
+		chip->usb_attach = false;
+
+	extcon_set_cable_state_(chip->extcon, EXTCON_USB, chip->usb_attach);
+
+	/* set and clear vbus_int_clr[0] to clear interrupt */
+	writel_relaxed(BIT(0), chip->eud_reg_base + EUD_REG_VBUS_INT_CLR);
+	/* Ensure Register Writes Complete */
+	wmb();
+	writel_relaxed(0, chip->eud_reg_base + EUD_REG_VBUS_INT_CLR);
+}
+
+static void chgr_enable_disable(struct eud_chip *chip)
+{
+	u32 reg;
+
+	/* read ctl_out_1[6] to find charger enable or disable event */
+	reg = readl_relaxed(chip->eud_reg_base + EUD_REG_CTL_OUT_1);
+	if (reg & BIT(6))
+		chip->chgr_enable = true;
+	else
+		chip->chgr_enable = false;
+
+	extcon_set_cable_state_(chip->extcon, EXTCON_CHG_USB_SDP,
+						chip->chgr_enable);
+
+	/* set and clear chgr_int_clr[0] to clear interrupt */
+	writel_relaxed(BIT(0), chip->eud_reg_base + EUD_REG_CHGR_INT_CLR);
+	/* Ensure Register Writes Complete */
+	wmb();
+	writel_relaxed(0, chip->eud_reg_base + EUD_REG_CHGR_INT_CLR);
+}
+
+static void pet_eud(struct eud_chip *chip)
+{
+	u32 reg;
+
+	/* read sw_attach_det[0] to find attach/detach event */
+	reg = readl_relaxed(chip->eud_reg_base + EUD_REG_SW_ATTACH_DET);
+	if (reg & BIT(0)) {
+		/* Detach & Attach pet for EUD */
+		writel_relaxed(0, chip->eud_reg_base + EUD_REG_SW_ATTACH_DET);
+		/* Ensure Register Writes Complete */
+		wmb();
+		/* Delay to make sure detach pet is done before attach pet */
+		udelay(100);
+		writel_relaxed(BIT(0), chip->eud_reg_base +
+					EUD_REG_SW_ATTACH_DET);
+		/* Ensure Register Writes Complete */
+		wmb();
+	} else {
+		/* Attach pet for EUD */
+		writel_relaxed(BIT(0), chip->eud_reg_base +
+					EUD_REG_SW_ATTACH_DET);
+		/* Ensure Register Writes Complete */
+		wmb();
+	}
+}
+
+static unsigned int eud_tx_empty(struct uart_port *port)
+{
+	u32 reg;
+
+	/* read status register and cross check for Tx interrupt */
+	reg = readl_relaxed(port->membase + EUD_REG_INT_STATUS_1);
+	if (reg & EUD_INT_TX)
+		return TIOCSER_TEMT;
+	else
+		return 0;
+}
+
+static void eud_stop_tx(struct uart_port *port)
+{
+	/* Disable Tx interrupt */
+	writel_relaxed(~EUD_INT_TX, port->membase + EUD_REG_INT_STATUS_1);
+	/* Ensure Register Writes Complete */
+	wmb();
+}
+
+static void eud_start_tx(struct uart_port *port)
+{
+	/* Enable Tx interrupt */
+	writel_relaxed(EUD_INT_TX, port->membase + EUD_REG_INT_STATUS_1);
+	/* Ensure Register Writes Complete */
+	wmb();
+}
+
+static void eud_stop_rx(struct uart_port *port)
+{
+	/* Disable Rx interrupt */
+	writel_relaxed(~EUD_INT_RX, port->membase + EUD_REG_INT_STATUS_1);
+	/* Ensure Register Writes Complete */
+	wmb();
+}
+
+static int eud_startup(struct uart_port *port)
+{
+	/* Enable Rx interrupt */
+	writel_relaxed(EUD_INT_RX, port->membase + EUD_REG_INT_STATUS_1);
+	/* Ensure Register Writes Complete */
+	wmb();
+	return 0;
+}
+
+static void eud_shutdown(struct uart_port *port)
+{
+	/* Disable both Tx & Rx interrupts */
+	writel_relaxed(~EUD_INT_TX | ~EUD_INT_RX,
+			port->membase + EUD_REG_INT_STATUS_1);
+	/* Ensure Register Writes Complete */
+	wmb();
+}
+
+static const char *eud_type(struct uart_port *port)
+{
+	return (port->type == PORT_EUD_UART) ? "EUD UART" : NULL;
+}
+
+static int eud_request_port(struct uart_port *port)
+{
+	/* Nothing to request */
+	return 0;
+}
+
+static void eud_release_port(struct uart_port *port)
+{
+	/* Nothing to release */
+}
+
+static void eud_config_port(struct uart_port *port, int flags)
+{
+	/* set port type, clear Tx and Rx interrupts */
+	port->type = PORT_EUD_UART;
+	writel_relaxed(~EUD_INT_TX | ~EUD_INT_RX,
+			port->membase + EUD_REG_INT_STATUS_1);
+	/* Ensure Register Writes Complete */
+	wmb();
+}
+
+static int eud_verify_port(struct uart_port *port,
+				       struct serial_struct *ser)
+{
+	if (ser->type != PORT_UNKNOWN && ser->type != PORT_EUD_UART)
+		return -EINVAL;
+	return 0;
+}
+
+/* serial functions supported */
+static const struct uart_ops eud_uart_ops = {
+	.tx_empty	= eud_tx_empty,
+	.stop_tx	= eud_stop_tx,
+	.start_tx	= eud_start_tx,
+	.stop_rx	= eud_stop_rx,
+	.startup	= eud_startup,
+	.shutdown	= eud_shutdown,
+	.type		= eud_type,
+	.release_port	= eud_release_port,
+	.request_port	= eud_request_port,
+	.config_port	= eud_config_port,
+	.verify_port	= eud_verify_port,
+};
+
+static struct uart_driver eud_uart_driver = {
+	.owner		= THIS_MODULE,
+	.driver_name	= "msm-eud",
+	.dev_name	= "ttyEUD",
+	.nr		= EUD_NR,
+	.cons		= EUD_CONSOLE,
+};
+
+static void eud_uart_rx(struct eud_chip *chip)
+{
+	struct uart_port *port = &chip->port;
+	u32 reg;
+	unsigned int len;
+	unsigned char ch, flag;
+	int i;
+
+	reg = readl_relaxed(chip->eud_reg_base + EUD_REG_COM_RX_ID);
+	if (reg != UART_ID) {
+		dev_err(chip->dev, "Rx isn't for us!\n");
+		return;
+	}
+	/* Read Rx Len & Data registers */
+	spin_lock(&port->lock);
+	len = readl_relaxed(chip->eud_reg_base + EUD_REG_COM_RX_LEN);
+	for (i = 0; i < len; i++) {
+		ch = readl_relaxed(chip->eud_reg_base + EUD_REG_COM_RX_DAT);
+		flag = TTY_NORMAL;
+		port->icount.rx++;
+
+		if (uart_handle_sysrq_char(port, ch))
+			continue;
+		uart_insert_char(port, 0, 0, ch, flag);
+	}
+
+	spin_unlock(&port->lock);
+	tty_flip_buffer_push(&port->state->port);
+}
+
+static void eud_uart_tx(struct eud_chip *chip)
+{
+	struct uart_port *port = &chip->port;
+	struct circ_buf *xmit = &port->state->xmit;
+	unsigned int len;
+	u32 reg;
+
+	writel_relaxed(UART_ID, chip->eud_reg_base + EUD_REG_COM_TX_ID);
+	reg = readl_relaxed(chip->eud_reg_base + EUD_REG_COM_TX_ID);
+	if (reg != UART_ID) {
+		dev_err(chip->dev, "Tx isn't for us!\n");
+		return;
+	}
+	/* Write to Tx Len & Data registers */
+	spin_lock(&port->lock);
+	len = uart_circ_chars_pending(xmit);
+	if (len > 0) {
+		if (len > port->fifosize)
+			len = port->fifosize;
+		while (len--) {
+			writel_relaxed(xmit->buf[xmit->tail],
+			       port->membase + EUD_REG_COM_TX_DAT);
+			xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+			port->icount.tx++;
+		}
+	}
+	spin_unlock(&port->lock);
+}
+
+static irqreturn_t handle_eud_irq(int irq, void *data)
+{
+	struct eud_chip *chip = data;
+	u32 reg;
+
+	/* read status register and find out which interrupt triggered */
+	reg = readl_relaxed(chip->eud_reg_base + EUD_REG_INT_STATUS_1);
+	if (reg & EUD_INT_RX) {
+		dev_dbg(chip->dev, "EUD RX Interrupt!\n");
+		eud_uart_rx(chip);
+	} else if (reg & EUD_INT_TX) {
+		dev_dbg(chip->dev, "EUD TX Interrupt!\n");
+		eud_uart_tx(chip);
+	} else if (reg & EUD_INT_VBUS) {
+		dev_dbg(chip->dev, "EUD VBUS Interrupt!\n");
+		usb_attach_detach(chip);
+	} else if (reg & EUD_INT_CHGR) {
+		dev_dbg(chip->dev, "EUD CHGR Interrupt!\n");
+		chgr_enable_disable(chip);
+	} else if (reg & EUD_INT_SAFE_MODE) {
+		dev_dbg(chip->dev, "EUD SAFE MODE Interrupt!\n");
+		pet_eud(chip);
+	} else {
+		dev_dbg(chip->dev, "Unknown/spurious EUD Interrupt!\n");
+		return IRQ_NONE;
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int msm_eud_probe(struct platform_device *pdev)
+{
+	struct eud_chip *chip;
+	struct uart_port *port;
+	struct resource *res;
+	int ret;
+
+	chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
+	if (!chip) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	platform_set_drvdata(pdev, chip);
+
+	chip->extcon = devm_extcon_dev_allocate(&pdev->dev, eud_extcon_cable);
+	if (IS_ERR(chip->extcon)) {
+		dev_err(chip->dev, "failed to allocate extcon device\n");
+		return PTR_ERR(chip->extcon);
+	}
+
+	ret = devm_extcon_dev_register(&pdev->dev, chip->extcon);
+	if (ret) {
+		dev_err(chip->dev, "failed to register extcon device\n");
+		return ret;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "eud_base");
+	if (!res) {
+		dev_err(chip->dev, "%s: failed to get resource eud_base\n",
+					__func__);
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	chip->eud_reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(chip->eud_reg_base))
+		return PTR_ERR(chip->eud_reg_base);
+
+	chip->eud_irq = platform_get_irq_byname(pdev, "eud_irq");
+
+	ret = devm_request_irq(&pdev->dev, chip->eud_irq, handle_eud_irq,
+				IRQF_TRIGGER_HIGH, "eud_irq", chip);
+	if (ret) {
+		dev_err(chip->dev, "request failed for eud irq\n");
+		return ret;
+	}
+
+	device_init_wakeup(&pdev->dev, true);
+	enable_irq_wake(chip->eud_irq);
+
+	port = &chip->port;
+	port->line = pdev->id;
+	port->type = PORT_EUD_UART;
+	port->dev = chip->dev;
+	port->fifosize = MAX_FIFO_SIZE;
+	port->iotype = SERIAL_IO_MEM;
+	port->flags = UPF_BOOT_AUTOCONF;
+	port->membase = chip->eud_reg_base;
+	port->irq = chip->eud_irq;
+	port->ops = &eud_uart_ops;
+
+	ret = uart_add_one_port(&eud_uart_driver, port);
+	if (!ret) {
+		dev_err(chip->dev, "failed to add uart port!\n");
+		return ret;
+	}
+
+	/* Enable vbus, chgr & safe mode warning interrupts */
+	writel_relaxed(EUD_INT_VBUS | EUD_INT_CHGR | EUD_INT_SAFE_MODE,
+			chip->eud_reg_base + EUD_REG_INT1_EN_MASK);
+
+	eud_private = pdev;
+
+	/* Enable EUD */
+	if (enable)
+		enable_eud(pdev);
+
+	return 0;
+}
+
+static int msm_eud_remove(struct platform_device *pdev)
+{
+	struct eud_chip *chip = platform_get_drvdata(pdev);
+	struct uart_port *port = &chip->port;
+
+	uart_remove_one_port(&eud_uart_driver, port);
+	device_init_wakeup(chip->dev, false);
+
+	return 0;
+}
+
+static const struct of_device_id msm_eud_dt_match[] = {
+	{.compatible = "qcom,msm-eud"},
+	{},
+};
+MODULE_DEVICE_TABLE(of, msm_eud_dt_match);
+
+static struct platform_driver msm_eud_driver = {
+	.probe		= msm_eud_probe,
+	.remove		= msm_eud_remove,
+	.driver		= {
+		.name		= "msm-eud",
+		.owner		= THIS_MODULE,
+		.of_match_table = msm_eud_dt_match,
+	},
+};
+
+static int __init msm_eud_init(void)
+{
+	int ret;
+
+	ret = uart_register_driver(&eud_uart_driver);
+	if (ret) {
+		pr_err("%s: Failed to register EUD UART driver\n",
+			__func__);
+		return ret;
+	}
+
+	ret = platform_driver_register(&msm_eud_driver);
+	if (ret) {
+		pr_err("%s: Failed to register EUD driver\n",
+			__func__);
+		uart_unregister_driver(&eud_uart_driver);
+		return ret;
+	}
+
+	return 0;
+}
+module_init(msm_eud_init);
+
+static void __exit msm_eud_exit(void)
+{
+	platform_driver_unregister(&msm_eud_driver);
+	uart_unregister_driver(&eud_uart_driver);
+}
+module_exit(msm_eud_exit);
+
+MODULE_DESCRIPTION("QTI EUD driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/gladiator_erp_v2.c b/drivers/soc/qcom/gladiator_erp_v2.c
new file mode 100644
index 0000000..25c7bd7
--- /dev/null
+++ b/drivers/soc/qcom/gladiator_erp_v2.c
@@ -0,0 +1,851 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/cpu_pm.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/scm.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+
+#define MODULE_NAME "gladiator-v2_error_reporting"
+
+/* Register Offsets */
+#define GLADIATOR_ID_COREID	0x0
+#define GLADIATOR_ID_REVISIONID	0x4
+#define GLADIATOR_FAULTEN	0x1010
+#define GLADIATOR_ERRVLD	0x1014
+#define GLADIATOR_ERRCLR	0x1018
+#define GLADIATOR_ERRLOG0	0x101C
+#define GLADIATOR_ERRLOG1	0x1020
+#define GLADIATOR_ERRLOG2	0x1024
+#define GLADIATOR_ERRLOG3	0x1028
+#define GLADIATOR_ERRLOG4	0x102C
+#define GLADIATOR_ERRLOG5	0x1030
+#define GLADIATOR_ERRLOG6	0x1034
+#define GLADIATOR_ERRLOG7	0x1038
+#define GLADIATOR_ERRLOG8	0x103C
+#define OBSERVER_0_ID_COREID	0x8000
+#define OBSERVER_0_ID_REVISIONID	0x8004
+#define OBSERVER_0_FAULTEN	0x8008
+#define OBSERVER_0_ERRVLD	0x800C
+#define OBSERVER_0_ERRCLR	0x8010
+#define OBSERVER_0_ERRLOG0	0x8014
+#define OBSERVER_0_ERRLOG1	0x8018
+#define OBSERVER_0_ERRLOG2	0x801C
+#define OBSERVER_0_ERRLOG3	0x8020
+#define OBSERVER_0_ERRLOG4	0x8024
+#define OBSERVER_0_ERRLOG5	0x8028
+#define OBSERVER_0_ERRLOG6	0x802C
+#define OBSERVER_0_ERRLOG7	0x8030
+#define OBSERVER_0_ERRLOG8	0x8034
+#define OBSERVER_0_STALLEN	0x8038
+
+#define GLD_TRANS_OPCODE_MASK			0xE
+#define GLD_TRANS_OPCODE_SHIFT			1
+#define GLD_ERROR_TYPE_MASK				0x700
+#define GLD_ERROR_TYPE_SHIFT			8
+#define GLD_LEN1_MASK					0xFFF0000
+#define GLD_LEN1_SHIFT					16
+#define	GLD_TRANS_SOURCEID_MASK			0x7
+#define	GLD_TRANS_SOURCEID_SHIFT		0
+#define	GLD_TRANS_TARGETID_MASK			0x7
+#define	GLD_TRANS_TARGETID_SHIFT		0
+#define	GLD_ERRLOG_ERROR				0x7
+#define GLD_ERRLOG5_ERROR_TYPE_MASK		0xFF000000
+#define GLD_ERRLOG5_ERROR_TYPE_SHIFT	24
+#define GLD_ACE_PORT_PARITY_MASK		0xc000
+#define GLD_ACE_PORT_PARITY_SHIFT		14
+#define GLD_ACE_PORT_DISCONNECT_MASK	0xf0000
+#define GLD_ACE_PORT_DISCONNECT_SHIFT	16
+#define GLD_ACE_PORT_DIRECTORY_MASK		0xf00000
+#define GLD_ACE_PORT_DIRECTORY_SHIFT	20
+#define GLD_INDEX_PARITY_MASK			0x1FFF
+#define GLD_INDEX_PARITY_SHIFT			0
+#define OBS_TRANS_OPCODE_MASK			0x1E
+#define OBS_TRANS_OPCODE_SHIFT			1
+#define OBS_ERROR_TYPE_MASK				0x700
+#define OBS_ERROR_TYPE_SHIFT			8
+#define OBS_LEN1_MASK					0x7F0000
+#define OBS_LEN1_SHIFT					16
+
+struct msm_gladiator_data {
+	void __iomem *gladiator_virt_base;
+	int erp_irq;
+	struct notifier_block pm_notifier_block;
+	struct clk *qdss_clk;
+};
+
+static int enable_panic_on_error;
+module_param(enable_panic_on_error, int, 0);
+
+enum gld_trans_opcode {
+	GLD_RD,
+	GLD_RDX,
+	GLD_RDL,
+	GLD_RESERVED,
+	GLD_WR,
+	GLD_WRC,
+	GLD_PRE,
+};
+
+enum obs_trans_opcode {
+	OBS_RD,
+	OBS_RDW,
+	OBS_RDL,
+	OBS_RDX,
+	OBS_WR,
+	OBS_WRW,
+	OBS_WRC,
+	OBS_RESERVED,
+	OBS_PRE,
+	OBS_URG,
+};
+
+enum obs_err_code {
+	OBS_SLV,
+	OBS_DEC,
+	OBS_UNS,
+	OBS_DISC,
+	OBS_SEC,
+	OBS_HIDE,
+	OBS_TMO,
+	OBS_RSV,
+};
+
+enum err_log {
+	ID_COREID,
+	ID_REVISIONID,
+	FAULTEN,
+	ERRVLD,
+	ERRCLR,
+	ERR_LOG0,
+	ERR_LOG1,
+	ERR_LOG2,
+	ERR_LOG3,
+	ERR_LOG4,
+	ERR_LOG5,
+	ERR_LOG6,
+	ERR_LOG7,
+	ERR_LOG8,
+	STALLEN,
+	MAX_NUM,
+};
+
+enum type_logger_error {
+	DATA_TRANSFER_ERROR,
+	DVM_ERROR,
+	TX_ERROR,
+	TXR_ERROR,
+	DISCONNECT_ERROR,
+	DIRECTORY_ERROR,
+	PARITY_ERROR,
+};
+
+static void clear_gladiator_error(void __iomem *gladiator_virt_base)
+{
+	writel_relaxed(1, gladiator_virt_base + GLADIATOR_ERRCLR);
+	writel_relaxed(1, gladiator_virt_base + OBSERVER_0_ERRCLR);
+}
+
+static inline void print_gld_transaction(unsigned int opc)
+{
+	switch (opc) {
+	case GLD_RD:
+		pr_alert("Transaction type: READ\n");
+		break;
+	case GLD_RDX:
+		pr_alert("Transaction type: EXCLUSIVE READ\n");
+		break;
+	case GLD_RDL:
+		pr_alert("Transaction type: LINKED READ\n");
+		break;
+	case GLD_WR:
+		pr_alert("Transaction type: WRITE\n");
+		break;
+	case GLD_WRC:
+		pr_alert("Transaction type: CONDITIONAL WRITE\n");
+		break;
+	case GLD_PRE:
+		pr_alert("Transaction: Preamble packet of linked sequence\n");
+		break;
+	default:
+		pr_alert("Transaction type: Unknown; value:%u\n", opc);
+	}
+}
+
+static inline void print_gld_errtype(unsigned int errtype)
+{
+	if (errtype == 0)
+		pr_alert("Error type: Snoop data transfer\n");
+	else if (errtype == 1)
+		pr_alert("Error type: DVM error\n");
+	else if (errtype == 3)
+		pr_alert("Error type: Disconnect, directory, or parity error\n");
+	else
+		pr_alert("Error type: Unknown; value:%u\n", errtype);
+}
+
+static void decode_gld_errlog0(u32 err_reg)
+{
+	unsigned int opc, errtype, len1;
+
+	opc = (err_reg & GLD_TRANS_OPCODE_MASK) >> GLD_TRANS_OPCODE_SHIFT;
+	errtype = (err_reg & GLD_ERROR_TYPE_MASK) >> GLD_ERROR_TYPE_SHIFT;
+	len1 = (err_reg & GLD_LEN1_MASK) >> GLD_LEN1_SHIFT;
+
+	print_gld_transaction(opc);
+	print_gld_errtype(errtype);
+	pr_alert("number of payload bytes: %d\n", len1 + 1);
+}
+
+static void decode_gld_errlog1(u32 err_reg)
+{
+	if ((err_reg & GLD_ERRLOG_ERROR) == GLD_ERRLOG_ERROR)
+		pr_alert("Transaction issued on IO target generic interface\n");
+	else
+		pr_alert("Transaction source ID: %d\n",
+				(err_reg & GLD_TRANS_SOURCEID_MASK)
+				>> GLD_TRANS_SOURCEID_SHIFT);
+}
+
+static void decode_gld_errlog2(u32 err_reg)
+{
+	if ((err_reg & GLD_ERRLOG_ERROR) == GLD_ERRLOG_ERROR)
+		pr_alert("Error response coming from: external DVM network\n");
+	else
+		pr_alert("Error response coming from: Target ID: %d\n",
+				(err_reg & GLD_TRANS_TARGETID_MASK)
+				>> GLD_TRANS_TARGETID_SHIFT);
+}
+
+static void decode_ace_port_index(u32 type, u32 error)
+{
+	unsigned port;
+
+	switch (type) {
+	case DISCONNECT_ERROR:
+		port = (error & GLD_ACE_PORT_DISCONNECT_MASK)
+			>> GLD_ACE_PORT_DISCONNECT_SHIFT;
+		pr_alert("ACE port index: %d\n", port);
+		break;
+	case DIRECTORY_ERROR:
+		port = (error & GLD_ACE_PORT_DIRECTORY_MASK)
+			>> GLD_ACE_PORT_DIRECTORY_SHIFT;
+		pr_alert("ACE port index: %d\n", port);
+		break;
+	case PARITY_ERROR:
+		port = (error & GLD_ACE_PORT_PARITY_MASK)
+			>> GLD_ACE_PORT_PARITY_SHIFT;
+		pr_alert("ACE port index: %d\n", port);
+	}
+}
+
+static void decode_index_parity(u32 error)
+{
+	pr_alert("Index: %d\n",
+			(error & GLD_INDEX_PARITY_MASK)
+			>> GLD_INDEX_PARITY_SHIFT);
+}
+
+static void decode_gld_logged_error(u32 err_reg5)
+{
+	unsigned int log_err_type, i, value;
+
+	log_err_type = (err_reg5 & GLD_ERRLOG5_ERROR_TYPE_MASK)
+		>> GLD_ERRLOG5_ERROR_TYPE_SHIFT;
+	for (i = 0 ; i <= 6 ; i++) {
+		value = log_err_type & 0x1;
+		switch (i) {
+		case DATA_TRANSFER_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Data transfer error\n");
+			break;
+		case DVM_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: DVM error\n");
+			break;
+		case TX_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Tx error\n");
+			break;
+		case TXR_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: TxR error\n");
+			break;
+		case DISCONNECT_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Disconnect error\n");
+			decode_ace_port_index(
+					DISCONNECT_ERROR,
+					err_reg5);
+			break;
+		case DIRECTORY_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Directory error\n");
+			decode_ace_port_index(
+					DIRECTORY_ERROR,
+					err_reg5);
+			break;
+		case PARITY_ERROR:
+			if (value == 0)
+				continue;
+			pr_alert("Error type: Parity error\n");
+			decode_ace_port_index(PARITY_ERROR, err_reg5);
+			decode_index_parity(err_reg5);
+			break;
+		}
+		log_err_type = log_err_type >> 1;
+	}
+}
+
+static void decode_gld_errlog(u32 err_reg, unsigned int err_log)
+{
+	switch (err_log) {
+	case ERR_LOG0:
+		decode_gld_errlog0(err_reg);
+		break;
+	case ERR_LOG1:
+		decode_gld_errlog1(err_reg);
+		break;
+	case ERR_LOG2:
+		decode_gld_errlog2(err_reg);
+		break;
+	case ERR_LOG3:
+		pr_alert("Lower 32-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG4:
+		pr_alert("Upper 32-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG5:
+		pr_alert("Lower 32-bits of user: %08x\n", err_reg);
+		break;
+	case ERR_LOG6:
+		pr_alert("Mid 32-bits(63-32) of user: %08x\n", err_reg);
+		break;
+	case ERR_LOG7:
+		break;
+	case ERR_LOG8:
+		pr_alert("Upper 32-bits(95-64) of user: %08x\n", err_reg);
+		break;
+	default:
+		pr_alert("Invalid error register; reg num:%u\n", err_log);
+	}
+}
+
+static inline void print_obs_transaction(unsigned int opc)
+{
+	switch (opc) {
+	case OBS_RD:
+		pr_alert("Transaction type: READ\n");
+		break;
+	case OBS_RDW:
+		pr_alert("Transaction type: WRAPPED READ\n");
+		break;
+	case OBS_RDL:
+		pr_alert("Transaction type: LINKED READ\n");
+		break;
+	case OBS_RDX:
+		pr_alert("Transaction type: EXCLUSIVE READ\n");
+		break;
+	case OBS_WR:
+		pr_alert("Transaction type: WRITE\n");
+		break;
+	case OBS_WRW:
+		pr_alert("Transaction type: WRAPPED WRITE\n");
+		break;
+	case OBS_WRC:
+		pr_alert("Transaction type: CONDITIONAL WRITE\n");
+		break;
+	case OBS_PRE:
+		pr_alert("Transaction: Preamble packet of linked sequence\n");
+		break;
+	case OBS_URG:
+		pr_alert("Transaction type: Urgency Packet\n");
+		break;
+	default:
+		pr_alert("Transaction type: Unknown; value:%u\n", opc);
+	}
+}
+
+static inline void print_obs_errcode(unsigned int errcode)
+{
+	switch (errcode) {
+	case OBS_SLV:
+		pr_alert("Error code: Target error detected by slave\n");
+		pr_alert("Source: Target\n");
+		break;
+	case OBS_DEC:
+		pr_alert("Error code: Address decode error\n");
+		pr_alert("Source: Initiator NIU\n");
+		break;
+	case OBS_UNS:
+		pr_alert("Error code: Unsupported request\n");
+		pr_alert("Source: Target NIU\n");
+		break;
+	case OBS_DISC:
+		pr_alert("Error code: Disconnected target or domain\n");
+		pr_alert("Source: Power Disconnect\n");
+		break;
+	case OBS_SEC:
+		pr_alert("Error code: Security violation\n");
+		pr_alert("Source: Initiator NIU or Firewall\n");
+		break;
+	case OBS_HIDE:
+		pr_alert("Error :Hidden security violation, reported as OK\n");
+		pr_alert("Source: Firewall\n");
+		break;
+	case OBS_TMO:
+		pr_alert("Error code: Time-out\n");
+		pr_alert("Source: Target NIU\n");
+		break;
+	default:
+		pr_alert("Error code: Unknown; code:%u\n", errcode);
+	}
+}
+
+static void decode_obs_errlog0(u32 err_reg)
+{
+	unsigned int opc, errcode, len1;
+
+	opc = (err_reg & OBS_TRANS_OPCODE_MASK) >> OBS_TRANS_OPCODE_SHIFT;
+	errcode = (err_reg & OBS_ERROR_TYPE_MASK) >> OBS_ERROR_TYPE_SHIFT;
+	len1 = (err_reg & OBS_LEN1_MASK) >> OBS_LEN1_SHIFT;
+
+	print_obs_transaction(opc);
+	print_obs_errcode(errcode);
+	pr_alert("number of payload bytes: %d\n", len1 + 1);
+}
+
+static void decode_obs_errlog(u32 err_reg, unsigned int err_log)
+{
+	switch (err_log) {
+	case ERR_LOG0:
+		decode_obs_errlog0(err_reg);
+		break;
+	case ERR_LOG1:
+		pr_alert("RouteId of the error: %08x\n", err_reg);
+		break;
+	case ERR_LOG2:
+		/* reserved error log register */
+		break;
+	case ERR_LOG3:
+		pr_alert("Lower 32-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG4:
+		pr_alert("Upper 12-bits of error address: %08x\n", err_reg);
+		break;
+	case ERR_LOG5:
+		pr_alert("Lower 13-bits of user: %08x\n", err_reg);
+		break;
+	case ERR_LOG6:
+		/* reserved error log register */
+		break;
+	case ERR_LOG7:
+		pr_alert("Security filed of the logged error: %08x\n", err_reg);
+		break;
+	case ERR_LOG8:
+		/* reserved error log register */
+		break;
+	case STALLEN:
+		pr_alert("stall mode of the error logger: %08x\n",
+				err_reg & 0x1);
+		break;
+	default:
+		pr_alert("Invalid error register; reg num:%u\n", err_log);
+	}
+}
+
+static u32 get_gld_offset(unsigned int err_log)
+{
+	u32 offset = 0;
+
+	switch (err_log) {
+	case FAULTEN:
+		offset = GLADIATOR_FAULTEN;
+		break;
+	case ERRVLD:
+		offset = GLADIATOR_ERRVLD;
+		break;
+	case ERRCLR:
+		offset = GLADIATOR_ERRCLR;
+		break;
+	case ERR_LOG0:
+		offset = GLADIATOR_ERRLOG0;
+		break;
+	case ERR_LOG1:
+		offset = GLADIATOR_ERRLOG1;
+		break;
+	case ERR_LOG2:
+		offset = GLADIATOR_ERRLOG2;
+		break;
+	case ERR_LOG3:
+		offset = GLADIATOR_ERRLOG3;
+		break;
+	case ERR_LOG4:
+		offset = GLADIATOR_ERRLOG4;
+		break;
+	case ERR_LOG5:
+		offset = GLADIATOR_ERRLOG5;
+		break;
+	case ERR_LOG6:
+		offset = GLADIATOR_ERRLOG6;
+		break;
+	case ERR_LOG7:
+		offset = GLADIATOR_ERRLOG7;
+		break;
+	case ERR_LOG8:
+		offset = GLADIATOR_ERRLOG8;
+		break;
+	default:
+		pr_alert("Invalid gladiator error register; reg num:%u\n",
+				err_log);
+	}
+	return offset;
+}
+
+static u32 get_obs_offset(unsigned int err_log)
+{
+	u32 offset = 0;
+
+	switch (err_log) {
+	case ID_COREID:
+		offset = OBSERVER_0_ID_COREID;
+		break;
+	case ID_REVISIONID:
+		offset = OBSERVER_0_ID_REVISIONID;
+		break;
+	case FAULTEN:
+		offset = OBSERVER_0_FAULTEN;
+		break;
+	case ERRVLD:
+		offset = OBSERVER_0_ERRVLD;
+		break;
+	case ERRCLR:
+		offset = OBSERVER_0_ERRCLR;
+		break;
+	case ERR_LOG0:
+		offset = OBSERVER_0_ERRLOG0;
+		break;
+	case ERR_LOG1:
+		offset = OBSERVER_0_ERRLOG1;
+		break;
+	case ERR_LOG2:
+		offset = OBSERVER_0_ERRLOG2;
+		break;
+	case ERR_LOG3:
+		offset = OBSERVER_0_ERRLOG3;
+		break;
+	case ERR_LOG4:
+		offset = OBSERVER_0_ERRLOG4;
+		break;
+	case ERR_LOG5:
+		offset = OBSERVER_0_ERRLOG5;
+		break;
+	case ERR_LOG6:
+		offset = OBSERVER_0_ERRLOG6;
+		break;
+	case ERR_LOG7:
+		offset = OBSERVER_0_ERRLOG7;
+		break;
+	case ERR_LOG8:
+		offset = OBSERVER_0_ERRLOG8;
+		break;
+	case STALLEN:
+		offset = OBSERVER_0_STALLEN;
+		break;
+	default:
+		pr_alert("Invalid observer error register; reg num:%u\n",
+				err_log);
+	}
+	return offset;
+}
+
+static void decode_gld_errlog5(struct msm_gladiator_data *msm_gld_data)
+{
+	unsigned int errtype;
+	u32 err_reg0, err_reg5;
+
+	err_reg0 = readl_relaxed(msm_gld_data->gladiator_virt_base +
+			get_gld_offset(ERR_LOG0));
+	err_reg5 = readl_relaxed(msm_gld_data->gladiator_virt_base +
+			get_gld_offset(ERR_LOG5));
+
+	errtype = (err_reg0 & GLD_ERROR_TYPE_MASK) >> GLD_ERROR_TYPE_SHIFT;
+	if (errtype == 3)
+		decode_gld_logged_error(err_reg5);
+	else if (errtype == 0 || errtype == 1)
+		pr_alert("Lower 32-bits of user: %08x\n", err_reg5);
+	else
+		pr_alert("Error type: Unknown; value:%u\n", errtype);
+}
+
+static irqreturn_t msm_gladiator_isr(int irq, void *dev_id)
+{
+	u32 err_reg;
+	unsigned int err_log, err_buf[MAX_NUM];
+
+	struct msm_gladiator_data *msm_gld_data = dev_id;
+
+	/* Check validity */
+	bool gld_err_valid = readl_relaxed(msm_gld_data->gladiator_virt_base +
+			GLADIATOR_ERRVLD);
+
+	bool obsrv_err_valid = readl_relaxed(
+			msm_gld_data->gladiator_virt_base + OBSERVER_0_ERRVLD);
+
+	if (!gld_err_valid && !obsrv_err_valid) {
+		pr_err("%s Invalid Gladiator error reported, clear it\n",
+				__func__);
+		/* Clear IRQ */
+		clear_gladiator_error(msm_gld_data->gladiator_virt_base);
+		return IRQ_HANDLED;
+	}
+	pr_alert("Gladiator Error Detected:\n");
+	if (gld_err_valid) {
+		for (err_log = FAULTEN; err_log <= ERR_LOG8; err_log++) {
+			err_buf[err_log] = readl_relaxed(
+					msm_gld_data->gladiator_virt_base +
+					get_gld_offset(err_log));
+		}
+		pr_alert("Main log register data:\n%08x %08x %08x %08x\n%08x %08x %08x %08x\n%08x %08x %08x\n",
+			err_buf[0], err_buf[1], err_buf[2], err_buf[3], err_buf[4], err_buf[5], err_buf[6],
+			err_buf[7], err_buf[8], err_buf[9], err_buf[10]);
+	}
+
+	if (obsrv_err_valid) {
+		for (err_log = ID_COREID; err_log <= STALLEN; err_log++) {
+			err_buf[err_log] = readl_relaxed(
+					msm_gld_data->gladiator_virt_base +
+					get_obs_offset(err_log));
+		}
+		pr_alert("Observer log register data:\n%08x %08x %08x %08x\n%08x %08x %08x %08x\n%08x %08x %08x %08x\n%08x\n",
+			err_buf[0], err_buf[1], err_buf[2], err_buf[3], err_buf[4], err_buf[5], err_buf[6], err_buf[7],
+			err_buf[8], err_buf[9], err_buf[10], err_buf[11], err_buf[12]);
+	}
+
+	if (gld_err_valid) {
+		pr_alert("Main error log register data:\n");
+		for (err_log = ERR_LOG0; err_log <= ERR_LOG8; err_log++) {
+			/* skip log register 7 as its reserved */
+			if (err_log == ERR_LOG7)
+				continue;
+			if (err_log == ERR_LOG5) {
+				decode_gld_errlog5(msm_gld_data);
+				continue;
+			}
+			err_reg = readl_relaxed(
+					msm_gld_data->gladiator_virt_base +
+					get_gld_offset(err_log));
+			decode_gld_errlog(err_reg, err_log);
+		}
+	}
+	if (obsrv_err_valid) {
+		pr_alert("Observor error log register data:\n");
+		for (err_log = ERR_LOG0; err_log <= STALLEN; err_log++)	{
+			/* skip log register 2, 6 and 8 as they are reserved */
+			if ((err_log == ERR_LOG2) || (err_log == ERR_LOG6)
+					|| (err_log == ERR_LOG8))
+				continue;
+			err_reg = readl_relaxed(
+					msm_gld_data->gladiator_virt_base +
+					get_obs_offset(err_log));
+			decode_obs_errlog(err_reg, err_log);
+		}
+	}
+	/* Clear IRQ */
+	clear_gladiator_error(msm_gld_data->gladiator_virt_base);
+	if (enable_panic_on_error)
+		panic("Gladiator Cache Interconnect Error Detected!\n");
+	else
+		WARN(1, "Gladiator Cache Interconnect Error Detected\n");
+
+	return IRQ_HANDLED;
+}
+
+static const struct of_device_id gladiator_erp_v2_match_table[] = {
+	{ .compatible = "qcom,msm-gladiator-v2" },
+	{},
+};
+
+static int parse_dt_node(struct platform_device *pdev,
+		struct msm_gladiator_data *msm_gld_data)
+{
+	int ret = 0;
+	struct resource *res;
+
+	res = platform_get_resource_byname(pdev,
+			IORESOURCE_MEM, "gladiator_base");
+	if (!res)
+		return -ENODEV;
+	if (!devm_request_mem_region(&pdev->dev, res->start,
+				resource_size(res),
+				"msm-gladiator-erp")) {
+
+		dev_err(&pdev->dev, "%s cannot reserve gladiator erp region\n",
+				__func__);
+		return -ENXIO;
+	}
+	msm_gld_data->gladiator_virt_base  = devm_ioremap(&pdev->dev,
+			res->start, resource_size(res));
+	if (!msm_gld_data->gladiator_virt_base) {
+		dev_err(&pdev->dev, "%s cannot map gladiator register space\n",
+				__func__);
+		return -ENXIO;
+	}
+	msm_gld_data->erp_irq = platform_get_irq(pdev, 0);
+	if (!msm_gld_data->erp_irq)
+		return -ENODEV;
+
+	/* clear existing errors before enabling the interrupt */
+	clear_gladiator_error(msm_gld_data->gladiator_virt_base);
+	ret = devm_request_irq(&pdev->dev, msm_gld_data->erp_irq,
+			msm_gladiator_isr, IRQF_TRIGGER_HIGH,
+			"gladiator-error", msm_gld_data);
+	if (ret)
+		dev_err(&pdev->dev, "Failed to register irq handler\n");
+
+	return ret;
+}
+
+static inline void gladiator_irq_init(void __iomem *gladiator_virt_base)
+{
+	writel_relaxed(1, gladiator_virt_base + GLADIATOR_FAULTEN);
+	writel_relaxed(1, gladiator_virt_base + OBSERVER_0_FAULTEN);
+}
+
+#define CCI_LEVEL 2
+static int gladiator_erp_pm_callback(struct notifier_block *nb,
+		unsigned long val, void *data)
+{
+	unsigned int level = (unsigned long) data;
+	struct msm_gladiator_data *msm_gld_data = container_of(nb,
+			struct msm_gladiator_data, pm_notifier_block);
+
+	if (level != CCI_LEVEL)
+		return NOTIFY_DONE;
+
+	switch (val) {
+	case CPU_CLUSTER_PM_EXIT:
+		gladiator_irq_init(msm_gld_data->gladiator_virt_base);
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int gladiator_erp_v2_probe(struct platform_device *pdev)
+{
+	int ret = -1;
+	struct msm_gladiator_data *msm_gld_data;
+
+	msm_gld_data = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_gladiator_data), GFP_KERNEL);
+	if (!msm_gld_data) {
+		ret = -ENOMEM;
+		goto bail;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "atb_clk") >= 0) {
+		msm_gld_data->qdss_clk = devm_clk_get(&pdev->dev, "atb_clk");
+		if (IS_ERR(msm_gld_data->qdss_clk)) {
+			dev_err(&pdev->dev, "Failed to get QDSS ATB clock\n");
+			goto bail;
+		}
+	} else {
+		dev_err(&pdev->dev, "No matching string of QDSS ATB clock\n");
+		goto bail;
+	}
+
+	ret = clk_prepare_enable(msm_gld_data->qdss_clk);
+	if (ret)
+		goto err_atb_clk;
+
+	ret = parse_dt_node(pdev, msm_gld_data);
+	if (ret)
+		goto bail;
+	msm_gld_data->pm_notifier_block.notifier_call =
+		gladiator_erp_pm_callback;
+
+	gladiator_irq_init(msm_gld_data->gladiator_virt_base);
+	platform_set_drvdata(pdev, msm_gld_data);
+	cpu_pm_register_notifier(&msm_gld_data->pm_notifier_block);
+#ifdef CONFIG_PANIC_ON_GLADIATOR_ERROR_V2
+	enable_panic_on_error = 1;
+#endif
+	dev_info(&pdev->dev, "MSM Gladiator Error Reporting V2 Initialized\n");
+	return ret;
+
+err_atb_clk:
+	clk_disable_unprepare(msm_gld_data->qdss_clk);
+
+bail:
+	dev_err(&pdev->dev, "Probe failed bailing out\n");
+	return ret;
+}
+
+static int gladiator_erp_v2_remove(struct platform_device *pdev)
+{
+	struct msm_gladiator_data *msm_gld_data = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	cpu_pm_unregister_notifier(&msm_gld_data->pm_notifier_block);
+	clk_disable_unprepare(msm_gld_data->qdss_clk);
+	return 0;
+}
+
+static struct platform_driver gladiator_erp_driver = {
+	.probe = gladiator_erp_v2_probe,
+	.remove = gladiator_erp_v2_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = gladiator_erp_v2_match_table,
+	},
+};
+
+static int __init init_gladiator_erp_v2(void)
+{
+	int ret;
+
+	ret = scm_is_secure_device();
+	if (ret == 0) {
+		pr_info("Gladiator Error Reporting not available\n");
+		return -ENODEV;
+	}
+
+	return platform_driver_register(&gladiator_erp_driver);
+}
+module_init(init_gladiator_erp_v2);
+
+static void __exit exit_gladiator_erp_v2(void)
+{
+	return platform_driver_unregister(&gladiator_erp_driver);
+}
+module_exit(exit_gladiator_erp_v2);
+
+MODULE_DESCRIPTION("Gladiator Error Reporting V2");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/gladiator_hang_detect.c b/drivers/soc/qcom/gladiator_hang_detect.c
new file mode 100644
index 0000000..33b0fa4
--- /dev/null
+++ b/drivers/soc/qcom/gladiator_hang_detect.c
@@ -0,0 +1,561 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/stat.h>
+#include <soc/qcom/scm.h>
+#include <linux/platform_device.h>
+
+#define ACE_OFFSET	0
+#define IO_OFFSET	2
+#define M1_OFFSET	3
+#define M2_OFFSET	4
+#define PCIO_OFFSET	5
+#define ENABLE_MASK_BITS	0x1
+
+#define _VAL(z)			(ENABLE_MASK_BITS << z##_OFFSET)
+#define _VALUE(_val, z)		(_val<<(z##_OFFSET))
+#define _WRITE(x, y, z)		(((~(_VAL(z))) & y) | _VALUE(x, z))
+
+#define NR_GLA_REG 6
+#define MODULE_NAME	"gladiator_hang_detect"
+#define MAX_THRES	0xFFFFFFFF
+#define MAX_LEN_SYSFS 12
+
+struct hang_detect {
+	phys_addr_t threshold[NR_GLA_REG];
+	phys_addr_t config;
+	int ACE_enable, IO_enable, M1_enable, M2_enable, PCIO_enable;
+	uint32_t ACE_threshold, IO_threshold, M1_threshold, M2_threshold,
+			 PCIO_threshold;
+	struct kobject kobj;
+	struct mutex lock;
+};
+
+/* interface for exporting attributes */
+struct gladiator_hang_attr {
+	struct attribute        attr;
+	ssize_t (*show)(struct kobject *kobj, struct attribute *attr,
+			char *buf);
+	size_t (*store)(struct kobject *kobj, struct attribute *attr,
+			const char *buf, size_t count);
+};
+
+#define GLADIATOR_HANG_ATTR(_name, _mode, _show, _store)	\
+	struct gladiator_hang_attr hang_attr_##_name =	\
+			__ATTR(_name, _mode, _show, _store)
+
+#define to_gladiator_hang_dev(kobj) \
+	container_of(kobj, struct hang_detect, kobj)
+
+#define to_gladiator_attr(_attr) \
+	container_of(_attr, struct gladiator_hang_attr, attr)
+
+static void set_threshold(int offset, struct hang_detect *hang_dev,
+		int32_t threshold_val)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		hang_dev->ACE_threshold = threshold_val;
+		break;
+	case IO_OFFSET:
+		hang_dev->IO_threshold = threshold_val;
+		break;
+	case M1_OFFSET:
+		hang_dev->M1_threshold = threshold_val;
+		break;
+	case M2_OFFSET:
+		hang_dev->M2_threshold = threshold_val;
+		break;
+	case PCIO_OFFSET:
+		hang_dev->PCIO_threshold = threshold_val;
+		break;
+	}
+}
+
+static void get_threshold(int offset, struct hang_detect *hang_dev,
+		uint32_t *reg_value)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		*reg_value = hang_dev->ACE_threshold;
+	break;
+	case IO_OFFSET:
+		*reg_value = hang_dev->IO_threshold;
+		break;
+	case M1_OFFSET:
+		*reg_value = hang_dev->M1_threshold;
+		break;
+	case M2_OFFSET:
+		*reg_value = hang_dev->M2_threshold;
+		break;
+	case PCIO_OFFSET:
+		*reg_value = hang_dev->PCIO_threshold;
+		break;
+	}
+}
+
+static void set_enable(int offset, struct hang_detect *hang_dev,
+		int enabled)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		hang_dev->ACE_enable = enabled;
+		break;
+	case IO_OFFSET:
+		hang_dev->IO_enable = enabled;
+		break;
+	case M1_OFFSET:
+		hang_dev->M1_enable = enabled;
+		break;
+	case M2_OFFSET:
+		hang_dev->M2_enable = enabled;
+		break;
+	case PCIO_OFFSET:
+		hang_dev->PCIO_enable = enabled;
+		break;
+	}
+}
+
+static void get_enable(int offset, struct hang_detect *hang_dev,
+		uint32_t *reg_value)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		*reg_value = hang_dev->ACE_enable;
+		break;
+	case IO_OFFSET:
+		*reg_value = hang_dev->IO_enable;
+		break;
+	case M1_OFFSET:
+		*reg_value = hang_dev->M1_enable;
+		break;
+	case M2_OFFSET:
+		*reg_value = hang_dev->M2_enable;
+		break;
+	case PCIO_OFFSET:
+		*reg_value = hang_dev->PCIO_enable;
+		break;
+	}
+}
+
+static void scm_enable_write(int offset, struct hang_detect *hang_dev,
+		int enabled, uint32_t reg_value, int *ret)
+{
+	switch (offset) {
+	case ACE_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+			_WRITE(enabled, reg_value, ACE));
+		break;
+	case IO_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+				_WRITE(enabled, reg_value, IO));
+		break;
+	case M1_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+				_WRITE(enabled, reg_value, M1));
+		break;
+	case M2_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+				_WRITE(enabled, reg_value, M2));
+		break;
+	case PCIO_OFFSET:
+		*ret = scm_io_write(hang_dev->config,
+				_WRITE(enabled, reg_value, PCIO));
+		break;
+	}
+}
+
+static int enable_check(const char *buf, int *enabled_pt)
+{
+	int ret;
+
+	ret = kstrtouint(buf, 0, enabled_pt);
+	if (ret < 0)
+		return ret;
+	if (!(*enabled_pt == 0 || *enabled_pt == 1))
+		return -EINVAL;
+	return ret;
+}
+
+
+static inline ssize_t generic_enable_show(struct kobject *kobj,
+		struct attribute *attr, char *buf, int offset)
+{
+	struct hang_detect *hang_dev = to_gladiator_hang_dev(kobj);
+	uint32_t reg_value;
+
+	get_enable(offset, hang_dev, &reg_value);
+	return snprintf(buf, MAX_LEN_SYSFS, "%u\n", reg_value);
+}
+
+static inline ssize_t generic_threshold_show(struct kobject *kobj,
+		struct attribute *attr, char *buf, int offset)
+{
+	struct hang_detect *hang_dev = to_gladiator_hang_dev(kobj);
+	uint32_t reg_value;
+
+	get_threshold(offset, hang_dev, &reg_value);
+	return snprintf(buf, MAX_LEN_SYSFS, "0x%x\n", reg_value);
+}
+
+static inline size_t generic_threshold_store(struct kobject *kobj,
+		struct attribute *attr, const char *buf, size_t count,
+		int offset)
+{
+	struct hang_detect *hang_dev = to_gladiator_hang_dev(kobj);
+	uint32_t threshold_val;
+	int ret;
+
+	ret = kstrtouint(buf, 0, &threshold_val);
+	if (ret < 0)
+		return ret;
+	if (threshold_val <= 0 || threshold_val > MAX_THRES)
+		return -EINVAL;
+	if (scm_io_write(hang_dev->threshold[offset],
+				threshold_val)){
+		pr_err("%s: Failed to set threshold for gladiator port",
+				__func__);
+		return -EIO;
+	}
+	set_threshold(offset, hang_dev, threshold_val);
+	return count;
+}
+
+static inline size_t generic_enable_store(struct kobject *kobj,
+		struct attribute *attr, const char *buf, size_t count,
+		int offset)
+{
+	int  ret, enabled;
+	uint32_t reg_value;
+	struct hang_detect *hang_dev = to_gladiator_hang_dev(kobj);
+
+	ret = enable_check(buf, &enabled);
+	if (ret < 0)
+		return ret;
+	get_threshold(offset, hang_dev, &reg_value);
+	if (reg_value <= 0)
+		return -EPERM;
+	mutex_lock(&hang_dev->lock);
+	reg_value = scm_io_read(hang_dev->config);
+
+	scm_enable_write(offset, hang_dev, enabled, reg_value, &ret);
+
+	if (ret) {
+		pr_err("%s: Gladiator failed to set enable for port %s\n",
+				__func__, "#_name");
+		mutex_unlock(&hang_dev->lock);
+		return -EIO;
+	}
+	mutex_unlock(&hang_dev->lock);
+	set_enable(offset, hang_dev, enabled);
+	return count;
+}
+
+
+static ssize_t attr_show(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	struct gladiator_hang_attr *gladiator_attr = to_gladiator_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (gladiator_attr->show)
+		ret = gladiator_attr->show(kobj, attr, buf);
+
+	return ret;
+}
+
+static ssize_t attr_store(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	struct gladiator_hang_attr *gladiator_attr = to_gladiator_attr(attr);
+	ssize_t ret = -EIO;
+
+	if (gladiator_attr->store)
+		ret = gladiator_attr->store(kobj, attr, buf, count);
+
+	return ret;
+}
+
+static const struct sysfs_ops gladiator_sysfs_ops = {
+	.show	= attr_show,
+	.store	= attr_store,
+};
+
+static struct kobj_type gladiator_ktype = {
+	.sysfs_ops	= &gladiator_sysfs_ops,
+};
+
+static ssize_t show_ace_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, ACE_OFFSET);
+}
+
+static size_t store_ace_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, ACE_OFFSET);
+}
+GLADIATOR_HANG_ATTR(ace_threshold, S_IRUGO|S_IWUSR, show_ace_threshold,
+					store_ace_threshold);
+
+static ssize_t show_io_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, IO_OFFSET);
+}
+
+static size_t store_io_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, IO_OFFSET);
+}
+GLADIATOR_HANG_ATTR(io_threshold, S_IRUGO|S_IWUSR, show_io_threshold,
+					store_io_threshold);
+
+static ssize_t show_m1_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, M1_OFFSET);
+}
+
+static size_t store_m1_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, M1_OFFSET);
+}
+GLADIATOR_HANG_ATTR(m1_threshold, S_IRUGO|S_IWUSR, show_m1_threshold,
+					store_m1_threshold);
+
+static ssize_t show_m2_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, M2_OFFSET);
+}
+
+static size_t store_m2_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, M2_OFFSET);
+}
+GLADIATOR_HANG_ATTR(m2_threshold, S_IRUGO|S_IWUSR, show_m2_threshold,
+					store_m2_threshold);
+
+static ssize_t show_pcio_threshold(struct kobject *kobj, struct attribute *attr,
+				char *buf)
+{
+	return generic_threshold_show(kobj, attr, buf, PCIO_OFFSET);
+}
+
+static size_t store_pcio_threshold(struct kobject *kobj, struct attribute *attr,
+				const char *buf, size_t count)
+{
+	return generic_threshold_store(kobj, attr, buf, count, PCIO_OFFSET);
+}
+GLADIATOR_HANG_ATTR(pcio_threshold, S_IRUGO|S_IWUSR, show_pcio_threshold,
+					store_pcio_threshold);
+
+static ssize_t show_ace_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, ACE_OFFSET);
+}
+
+static size_t store_ace_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, ACE_OFFSET);
+}
+GLADIATOR_HANG_ATTR(ace_enable, S_IRUGO|S_IWUSR, show_ace_enable,
+		store_ace_enable);
+
+static ssize_t show_io_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, IO_OFFSET);
+}
+
+static size_t store_io_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, IO_OFFSET);
+}
+GLADIATOR_HANG_ATTR(io_enable, S_IRUGO|S_IWUSR,
+		show_io_enable, store_io_enable);
+
+
+static ssize_t show_m1_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, M1_OFFSET);
+}
+
+static size_t store_m1_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, M1_OFFSET);
+}
+GLADIATOR_HANG_ATTR(m1_enable, S_IRUGO|S_IWUSR,
+		show_m1_enable, store_m1_enable);
+
+static ssize_t show_m2_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, M2_OFFSET);
+}
+
+static size_t store_m2_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, M2_OFFSET);
+}
+GLADIATOR_HANG_ATTR(m2_enable, S_IRUGO|S_IWUSR,
+		show_m2_enable, store_m2_enable);
+
+static ssize_t show_pcio_enable(struct kobject *kobj,
+			struct attribute *attr, char *buf)
+{
+	return generic_enable_show(kobj, attr, buf, PCIO_OFFSET);
+}
+
+static size_t store_pcio_enable(struct kobject *kobj,
+			struct attribute *attr, const char *buf, size_t count)
+{
+	return generic_enable_store(kobj, attr, buf, count, PCIO_OFFSET);
+}
+GLADIATOR_HANG_ATTR(pcio_enable, S_IRUGO|S_IWUSR,
+		show_pcio_enable, store_pcio_enable);
+
+static struct attribute *hang_attrs[] = {
+	&hang_attr_ace_threshold.attr,
+	&hang_attr_io_threshold.attr,
+	&hang_attr_m1_threshold.attr,
+	&hang_attr_m2_threshold.attr,
+	&hang_attr_pcio_threshold.attr,
+	&hang_attr_ace_enable.attr,
+	&hang_attr_io_enable.attr,
+	&hang_attr_m1_enable.attr,
+	&hang_attr_m2_enable.attr,
+	&hang_attr_pcio_enable.attr,
+	NULL
+};
+
+static struct attribute_group hang_attr_group = {
+	.attrs = hang_attrs,
+};
+
+static const struct of_device_id msm_gladiator_hang_detect_table[] = {
+	{ .compatible = "qcom,gladiator-hang-detect" },
+	{}
+};
+
+static int msm_gladiator_hang_detect_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct hang_detect *hang_det = NULL;
+	int i = 0, ret;
+	u32 treg[NR_GLA_REG], creg;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	hang_det = devm_kzalloc(&pdev->dev,
+			sizeof(struct hang_detect), GFP_KERNEL);
+
+	if (!hang_det) {
+		pr_err("Can't allocate hang_detect memory\n");
+		return -ENOMEM;
+	}
+
+	ret = of_property_read_u32_array(node, "qcom,threshold-arr",
+			treg, NR_GLA_REG);
+	if (ret) {
+		pr_err("Can't get threshold-arr property\n");
+		return -EINVAL;
+	}
+
+	ret = of_property_read_u32(node, "qcom,config-reg", &creg);
+	if (ret) {
+		pr_err("Can't get config-reg property\n");
+		return -EINVAL;
+	}
+
+	for (i = 0 ; i < NR_GLA_REG ; i++)
+		hang_det->threshold[i] = treg[i];
+
+	hang_det->config = creg;
+
+	ret = kobject_init_and_add(&hang_det->kobj, &gladiator_ktype,
+		&cpu_subsys.dev_root->kobj, "%s", "gladiator_hang_detect");
+	if (ret) {
+		pr_err("%s:Error in creation kobject_add\n", __func__);
+		goto out_put_kobj;
+	}
+
+	ret = sysfs_create_group(&hang_det->kobj, &hang_attr_group);
+	if (ret) {
+		pr_err("%s:Error in creation sysfs_create_group\n", __func__);
+		goto out_del_kobj;
+	}
+	mutex_init(&hang_det->lock);
+	platform_set_drvdata(pdev, hang_det);
+	return 0;
+
+out_del_kobj:
+	kobject_del(&hang_det->kobj);
+out_put_kobj:
+	kobject_put(&hang_det->kobj);
+
+	return ret;
+}
+
+static int msm_gladiator_hang_detect_remove(struct platform_device *pdev)
+{
+	struct hang_detect *hang_det = platform_get_drvdata(pdev);
+
+	platform_set_drvdata(pdev, NULL);
+	sysfs_remove_group(&hang_det->kobj, &hang_attr_group);
+	kobject_del(&hang_det->kobj);
+	kobject_put(&hang_det->kobj);
+	mutex_destroy(&hang_det->lock);
+	return 0;
+}
+
+static struct platform_driver msm_gladiator_hang_detect_driver = {
+	.probe = msm_gladiator_hang_detect_probe,
+	.remove = msm_gladiator_hang_detect_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_gladiator_hang_detect_table,
+	},
+};
+
+static int __init init_gladiator_hang_detect(void)
+{
+	return platform_driver_register(&msm_gladiator_hang_detect_driver);
+}
+module_init(init_gladiator_hang_detect);
+
+static void __exit exit_gladiator_hang_detect(void)
+{
+	platform_driver_unregister(&msm_gladiator_hang_detect_driver);
+}
+module_exit(exit_gladiator_hang_detect);
+
+MODULE_DESCRIPTION("MSM Gladiator Hang Detect Driver");
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
new file mode 100644
index 0000000..49a0173
--- /dev/null
+++ b/drivers/soc/qcom/glink.c
@@ -0,0 +1,6036 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/arch_timer.h>
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <linux/rwsem.h>
+#include <linux/pm_qos.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+/* Number of internal IPC Logging log pages */
+#define NUM_LOG_PAGES	10
+#define GLINK_PM_QOS_HOLDOFF_MS		10
+#define GLINK_QOS_DEF_NUM_TOKENS	10
+#define GLINK_QOS_DEF_NUM_PRIORITY	1
+#define GLINK_QOS_DEF_MTU		2048
+
+#define GLINK_KTHREAD_PRIO 1
+/**
+ * struct glink_qos_priority_bin - Packet Scheduler's priority bucket
+ * @max_rate_kBps:	Maximum rate supported by the priority bucket.
+ * @power_state:	Transport power state for this priority bin.
+ * @tx_ready:		List of channels ready for tx in the priority bucket.
+ * @active_ch_cnt:	Active channels of this priority.
+ */
+struct glink_qos_priority_bin {
+	unsigned long max_rate_kBps;
+	uint32_t power_state;
+	struct list_head tx_ready;
+	uint32_t active_ch_cnt;
+};
+
+/**
+ * struct glink_core_xprt_ctx - transport representation structure
+ * @xprt_state_lhb0:		controls read/write access to transport state
+ * @list_node:			used to chain this transport in a global
+ *				transport list
+ * @name:			name of this transport
+ * @edge:			what this transport connects to
+ * @id:				the id to use for channel migration
+ * @versions:			array of transport versions this implementation
+ *				supports
+ * @versions_entries:		number of entries in @versions
+ * @local_version_idx:		local version index into @versions this
+ *				transport is currently running
+ * @remote_version_idx:		remote version index into @versions this
+ *				transport is currently running
+ * @l_features:			Features negotiated by the local side
+ * @capabilities:		Capabilities of underlying transport
+ * @ops:			transport defined implementation of common
+ *				operations
+ * @local_state:		value from local_channel_state_e representing
+ *				the local state of this transport
+ * @remote_neg_completed:	is the version negotiation with the remote end
+ *				completed
+ * @xprt_ctx_lock_lhb1		lock to protect @next_lcid and @channels
+ * @next_lcid:			logical channel identifier to assign to the next
+ *				created channel
+ * @max_cid:			maximum number of channel identifiers supported
+ * @max_iid:			maximum number of intent identifiers supported
+ * @tx_kwork:			work item to process @tx_ready
+ * @tx_wq:				workqueue to run @tx_kwork
+ * @tx_task:		handle to the running kthread
+ * @channels:			list of all existing channels on this transport
+ * @dummy_in_use:		True when channels are being migrated to dummy.
+ * @notified:			list holds channels during dummy xprt cleanup.
+ * @mtu:			MTU supported by this transport.
+ * @token_count:		Number of tokens to be assigned per assignment.
+ * @curr_qos_rate_kBps:		Aggregate of currently supported QoS requests.
+ * @threshold_rate_kBps:	Maximum Rate allocated for QoS traffic.
+ * @num_priority:		Number of priority buckets in the transport.
+ * @tx_ready_lock_lhb3:	lock to protect @tx_ready
+ * @active_high_prio:		Highest priority of active channels.
+ * @prio_bin:			Pointer to priority buckets.
+ * @pm_qos_req:			power management QoS request for TX path
+ * @qos_req_active:		a vote is active with the PM QoS system
+ * @tx_path_activity:		transmit activity has occurred
+ * @pm_qos_work:		removes PM QoS vote due to inactivity
+ * @xprt_dbgfs_lock_lhb4:	debugfs channel structure lock
+ * @log_ctx:			IPC logging context for this transport.
+ */
+struct glink_core_xprt_ctx {
+	struct rwref_lock xprt_state_lhb0;
+	struct list_head list_node;
+	char name[GLINK_NAME_SIZE];
+	char edge[GLINK_NAME_SIZE];
+	uint16_t id;
+	const struct glink_core_version *versions;
+	size_t versions_entries;
+	uint32_t local_version_idx;
+	uint32_t remote_version_idx;
+	uint32_t l_features;
+	uint32_t capabilities;
+	struct glink_transport_if *ops;
+	enum transport_state_e local_state;
+	bool remote_neg_completed;
+
+	spinlock_t xprt_ctx_lock_lhb1;
+	struct list_head channels;
+	uint32_t next_lcid;
+	struct list_head free_lcid_list;
+	struct list_head notified;
+	bool dummy_in_use;
+
+	uint32_t max_cid;
+	uint32_t max_iid;
+	struct kthread_work tx_kwork;
+	struct kthread_worker tx_wq;
+	struct task_struct *tx_task;
+
+	size_t mtu;
+	uint32_t token_count;
+	unsigned long curr_qos_rate_kBps;
+	unsigned long threshold_rate_kBps;
+	uint32_t num_priority;
+	spinlock_t tx_ready_lock_lhb3;
+	uint32_t active_high_prio;
+	struct glink_qos_priority_bin *prio_bin;
+
+	struct pm_qos_request pm_qos_req;
+	bool qos_req_active;
+	bool tx_path_activity;
+	struct delayed_work pm_qos_work;
+	struct glink_core_edge_ctx *edge_ctx;
+
+	struct mutex xprt_dbgfs_lock_lhb4;
+	void *log_ctx;
+};
+
+/**
+ * Edge Context
+ * @list_node		edge list node used by edge list
+ * @name:		name of the edge
+ * @edge_migration_lock:mutex lock for migration over edge
+ * @edge_ref_lock:	lock for reference count
+ */
+struct glink_core_edge_ctx {
+	struct list_head list_node;
+	char name[GLINK_NAME_SIZE];
+	struct mutex edge_migration_lock_lhd2;
+	struct rwref_lock edge_ref_lock_lhd1;
+};
+
+static LIST_HEAD(edge_list);
+static DEFINE_MUTEX(edge_list_lock_lhd0);
+/**
+ * Channel Context
+ * @xprt_state_lhb0:	controls read/write access to channel state
+ * @port_list_node:	channel list node used by transport "channels" list
+ * @tx_ready_list_node:	channels that have data ready to transmit
+ * @name:		name of the channel
+ *
+ * @user_priv:		user opaque data type passed into glink_open()
+ * @notify_rx:		RX notification function
+ * @notify_tx_done:	TX-done notification function (remote side is done)
+ * @notify_state:	Channel state (connected / disconnected) notifications
+ * @notify_rx_intent_req: Request from remote side for an intent
+ * @notify_rxv:		RX notification function (for io buffer chain)
+ * @notify_rx_sigs:	RX signal change notification
+ * @notify_rx_abort:	Channel close RX Intent aborted
+ * @notify_tx_abort:	Channel close TX aborted
+ * @notify_rx_tracer_pkt:	Receive notification for tracer packet
+ * @notify_remote_rx_intent:	Receive notification for remote-queued RX intent
+ *
+ * @transport_ptr:		Transport this channel uses
+ * @lcid:			Local channel ID
+ * @rcid:			Remote channel ID
+ * @local_open_state:		Local channel state
+ * @remote_opened:		Remote channel state (opened or closed)
+ * @int_req_ack:		Remote side intent request ACK state
+ * @int_req_ack_complete:	Intent tracking completion - received remote ACK
+ * @int_req_complete:		Intent tracking completion - received intent
+ * @rx_intent_req_timeout_jiffies:	Timeout for requesting an RX intent, in
+ *			jiffies; if set to 0, timeout is infinite
+ *
+ * @local_rx_intent_lst_lock_lhc1:	RX intent list lock
+ * @local_rx_intent_list:		Active RX Intents queued by client
+ * @local_rx_intent_ntfy_list:		Client notified, waiting for rx_done()
+ * @local_rx_intent_free_list:		Available intent container structure
+ *
+ * @rmt_rx_intent_lst_lock_lhc2:	Remote RX intent list lock
+ * @rmt_rx_intent_list:			Remote RX intent list
+ *
+ * @max_used_liid:			Maximum Local Intent ID used
+ * @dummy_riid:				Dummy remote intent ID
+ *
+ * @tx_lists_lock_lhc3:		TX list lock
+ * @tx_active:				Ready to transmit
+ *
+ * @tx_pending_rmt_done_lock_lhc4:	Remote-done list lock
+ * @tx_pending_remote_done:		Transmitted, waiting for remote done
+ * @lsigs:				Local signals
+ * @rsigs:				Remote signals
+ * @pending_delete:			waiting for channel to be deleted
+ * @no_migrate:				The local client does not want to
+ *					migrate transports
+ * @local_xprt_req:			The transport the local side requested
+ * @local_xprt_resp:			The response to @local_xprt_req
+ * @remote_xprt_req:			The transport the remote side requested
+ * @remote_xprt_resp:			The response to @remote_xprt_req
+ * @curr_priority:			Channel's current priority.
+ * @initial_priority:			Channel's initial priority.
+ * @token_count:			Tokens for consumption by packet.
+ * @txd_len:				Transmitted data size in the current
+ *					token assignment cycle.
+ * @token_start_time:			Time at which tokens are assigned.
+ * @req_rate_kBps:			Current QoS request by the channel.
+ * @tx_intent_cnt:			Intent count to transmit soon in future.
+ * @tx_cnt:				Packets to be picked by tx scheduler.
+ */
+struct channel_ctx {
+	struct rwref_lock ch_state_lhb2;
+	struct list_head port_list_node;
+	struct list_head tx_ready_list_node;
+	char name[GLINK_NAME_SIZE];
+
+	/* user info */
+	void *user_priv;
+	void (*notify_rx)(void *handle, const void *priv, const void *pkt_priv,
+			const void *ptr, size_t size);
+	void (*notify_tx_done)(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr);
+	void (*notify_state)(void *handle, const void *priv,
+			unsigned int event);
+	bool (*notify_rx_intent_req)(void *handle, const void *priv,
+			size_t req_size);
+	void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv,
+			   void *iovec, size_t size,
+			   void * (*vbuf_provider)(void *iovec, size_t offset,
+						  size_t *size),
+			   void * (*pbuf_provider)(void *iovec, size_t offset,
+						  size_t *size));
+	void (*notify_rx_sigs)(void *handle, const void *priv,
+			uint32_t old_sigs, uint32_t new_sigs);
+	void (*notify_rx_abort)(void *handle, const void *priv,
+				const void *pkt_priv);
+	void (*notify_tx_abort)(void *handle, const void *priv,
+				const void *pkt_priv);
+	void (*notify_rx_tracer_pkt)(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr, size_t size);
+	void (*notify_remote_rx_intent)(void *handle, const void *priv,
+					size_t size);
+
+	/* internal port state */
+	struct glink_core_xprt_ctx *transport_ptr;
+	uint32_t lcid;
+	uint32_t rcid;
+	enum local_channel_state_e local_open_state;
+	bool remote_opened;
+	bool int_req_ack;
+	struct completion int_req_ack_complete;
+	struct completion int_req_complete;
+	unsigned long rx_intent_req_timeout_jiffies;
+
+	spinlock_t local_rx_intent_lst_lock_lhc1;
+	struct list_head local_rx_intent_list;
+	struct list_head local_rx_intent_ntfy_list;
+	struct list_head local_rx_intent_free_list;
+
+	spinlock_t rmt_rx_intent_lst_lock_lhc2;
+	struct list_head rmt_rx_intent_list;
+
+	uint32_t max_used_liid;
+	uint32_t dummy_riid;
+
+	spinlock_t tx_lists_lock_lhc3;
+	struct list_head tx_active;
+
+	spinlock_t tx_pending_rmt_done_lock_lhc4;
+	struct list_head tx_pending_remote_done;
+
+	uint32_t lsigs;
+	uint32_t rsigs;
+	bool pending_delete;
+
+	bool no_migrate;
+	uint16_t local_xprt_req;
+	uint16_t local_xprt_resp;
+	uint16_t remote_xprt_req;
+	uint16_t remote_xprt_resp;
+
+	uint32_t curr_priority;
+	uint32_t initial_priority;
+	uint32_t token_count;
+	size_t txd_len;
+	unsigned long token_start_time;
+	unsigned long req_rate_kBps;
+	uint32_t tx_intent_cnt;
+	uint32_t tx_cnt;
+};
+
+static struct glink_core_if core_impl;
+static void *log_ctx;
+static unsigned int glink_debug_mask = QCOM_GLINK_INFO;
+module_param_named(debug_mask, glink_debug_mask,
+		   uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+static unsigned int glink_pm_qos;
+module_param_named(pm_qos_enable, glink_pm_qos,
+		   uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+
+static LIST_HEAD(transport_list);
+
+/*
+ * Used while notifying the clients about link state events. Since the clients
+ * need to store the callback information temporarily and since all the
+ * existing accesses to transport list are in non-IRQ context, defining the
+ * transport_list_lock as a mutex.
+ */
+static DEFINE_MUTEX(transport_list_lock_lha0);
+
+struct link_state_notifier_info {
+	struct list_head list;
+	char transport[GLINK_NAME_SIZE];
+	char edge[GLINK_NAME_SIZE];
+	void (*glink_link_state_notif_cb)(
+			struct glink_link_state_cb_info *cb_info, void *priv);
+	void *priv;
+};
+static LIST_HEAD(link_state_notifier_list);
+static DEFINE_MUTEX(link_state_notifier_lock_lha1);
+
+static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
+						       const char *name,
+						       bool initial_xprt,
+						       uint16_t *best_id);
+
+static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt);
+
+static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					uint32_t lcid);
+
+static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					uint32_t rcid);
+
+static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
+			     struct channel_ctx *ch_ptr,
+			     struct glink_core_tx_pkt *tx_info);
+
+static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
+			     struct channel_ctx *ch_ptr,
+			     struct glink_core_tx_pkt *tx_info);
+
+static void tx_func(struct kthread_work *work);
+
+static struct channel_ctx *ch_name_to_ch_ctx_create(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					const char *name);
+
+static void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+					uint32_t riid, void *cookie);
+
+static int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+			uint32_t *riid_ptr, size_t *intent_size, void **cookie);
+
+static struct glink_core_rx_intent *ch_push_local_rx_intent(
+		struct channel_ctx *ctx, const void *pkt_priv, size_t size);
+
+static void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid);
+
+static struct glink_core_rx_intent *ch_get_local_rx_intent(
+		struct channel_ctx *ctx, uint32_t liid);
+
+static void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
+				struct glink_core_rx_intent *intent_ptr);
+
+static struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
+		struct channel_ctx *ctx, const void *ptr);
+
+static void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
+			struct glink_core_rx_intent *liid_ptr, bool reuse);
+
+static struct glink_core_rx_intent *ch_get_free_local_rx_intent(
+		struct channel_ctx *ctx);
+
+static void ch_purge_intent_lists(struct channel_ctx *ctx);
+
+static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
+			struct channel_ctx *ctx,
+			uint32_t rcid);
+
+static bool ch_is_fully_opened(struct channel_ctx *ctx);
+static bool ch_is_fully_closed(struct channel_ctx *ctx);
+
+struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(struct channel_ctx *ctx,
+							uint32_t riid);
+
+static void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
+					struct glink_core_tx_pkt *tx_pkt);
+
+static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
+					*if_ptr, uint32_t rcid, bool granted);
+
+static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe);
+
+static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
+					   enum glink_link_state link_state);
+
+static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr);
+static void glink_pm_qos_cancel_worker(struct work_struct *work);
+static bool ch_update_local_state(struct channel_ctx *ctx,
+			enum local_channel_state_e lstate);
+static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate);
+static void glink_core_deinit_xprt_qos_cfg(
+			struct glink_core_xprt_ctx *xprt_ptr);
+
+#define glink_prio_to_power_state(xprt_ctx, priority) \
+		((xprt_ctx)->prio_bin[priority].power_state)
+
+#define GLINK_GET_CH_TX_STATE(ctx) \
+		((ctx)->tx_intent_cnt || (ctx)->tx_cnt)
+
+/**
+ * glink_ssr() - Clean up locally for SSR by simulating remote close
+ * @subsystem:	The name of the subsystem being restarted
+ *
+ * Call into the transport using the ssr(if_ptr) function to allow it to
+ * clean up any necessary structures, then simulate a remote close from
+ * subsystem for all channels on that edge.
+ *
+ * Return: Standard error codes.
+ */
+int glink_ssr(const char *subsystem)
+{
+	int ret = 0;
+	bool transport_found = false;
+	struct glink_core_xprt_ctx *xprt_ctx = NULL;
+	struct channel_ctx *ch_ctx, *temp_ch_ctx;
+	uint32_t i;
+	unsigned long flags;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt_ctx, &transport_list, list_node) {
+		if (!strcmp(subsystem, xprt_ctx->edge) &&
+				xprt_is_fully_opened(xprt_ctx)) {
+			GLINK_INFO_XPRT(xprt_ctx, "%s: SSR\n", __func__);
+			spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb3,
+					  flags);
+			for (i = 0; i < xprt_ctx->num_priority; i++)
+				list_for_each_entry_safe(ch_ctx, temp_ch_ctx,
+						&xprt_ctx->prio_bin[i].tx_ready,
+						tx_ready_list_node)
+					list_del_init(
+						&ch_ctx->tx_ready_list_node);
+			spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb3,
+						flags);
+
+			xprt_ctx->ops->ssr(xprt_ctx->ops);
+			transport_found = true;
+		}
+	}
+	mutex_unlock(&transport_list_lock_lha0);
+
+	if (!transport_found)
+		ret = -ENODEV;
+
+	return ret;
+}
+EXPORT_SYMBOL(glink_ssr);
+
+/**
+ * glink_core_ch_close_ack_common() - handles the common operations during
+ *                                    close ack.
+ * @ctx:	Pointer to channel instance.
+ * @is_safe:	Is function called while holding ctx lock
+ *
+ * Return: True if the channel is fully closed after the state change,
+ *	false otherwise.
+ */
+static bool glink_core_ch_close_ack_common(struct channel_ctx *ctx, bool safe)
+{
+	bool is_fully_closed;
+
+	if (ctx == NULL)
+		return false;
+
+	if (safe) {
+		ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+		is_fully_closed = ch_is_fully_closed(ctx);
+	} else {
+		is_fully_closed = ch_update_local_state(ctx,
+							GLINK_CHANNEL_CLOSED);
+	}
+
+	GLINK_INFO_PERF_CH(ctx,
+		"%s: local:GLINK_CHANNEL_CLOSING->GLINK_CHANNEL_CLOSED\n",
+		__func__);
+
+	if (ctx->notify_state) {
+		ctx->notify_state(ctx, ctx->user_priv,
+			GLINK_LOCAL_DISCONNECTED);
+		ch_purge_intent_lists(ctx);
+		GLINK_INFO_PERF_CH(ctx,
+		"%s: notify state: GLINK_LOCAL_DISCONNECTED\n",
+		__func__);
+	}
+
+	return is_fully_closed;
+}
+
+/**
+ * glink_core_remote_close_common() - Handles the common operations during
+ *                                    a remote close.
+ * @ctx:	Pointer to channel instance.
+ * @safe:       Is function called with ctx rwref lock already acquired.
+ * Return: True if the channel is fully closed after the state change,
+ *	false otherwise.
+ */
+static bool glink_core_remote_close_common(struct channel_ctx *ctx, bool safe)
+{
+	bool is_fully_closed;
+
+	if (ctx == NULL)
+		return false;
+
+	if (safe) {
+		ctx->remote_opened = false;
+		is_fully_closed = ch_is_fully_closed(ctx);
+	} else {
+		is_fully_closed = ch_update_rmt_state(ctx, false);
+	}
+	ctx->rcid = 0;
+
+	if (ctx->local_open_state != GLINK_CHANNEL_CLOSED &&
+		ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
+		if (ctx->notify_state)
+			ctx->notify_state(ctx, ctx->user_priv,
+				GLINK_REMOTE_DISCONNECTED);
+		GLINK_INFO_CH(ctx,
+				"%s: %s: GLINK_REMOTE_DISCONNECTED\n",
+				__func__, "notify state");
+	}
+
+	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+		GLINK_INFO_CH(ctx,
+			"%s: %s, %s\n", __func__,
+			"Did not send GLINK_REMOTE_DISCONNECTED",
+			"local state is already CLOSED");
+
+	ctx->int_req_ack = false;
+	complete_all(&ctx->int_req_ack_complete);
+	complete_all(&ctx->int_req_complete);
+	ch_purge_intent_lists(ctx);
+
+	return is_fully_closed;
+}
+
+/**
+ * glink_qos_calc_rate_kBps() - Calculate the transmit rate in kBps
+ * @pkt_size:		Worst case packet size per transmission.
+ * @interval_us:	Packet transmit interval in us.
+ *
+ * This function is used to calculate the rate of transmission rate of
+ * a channel in kBps.
+ *
+ * Return: Transmission rate in kBps.
+ */
+static unsigned long glink_qos_calc_rate_kBps(size_t pkt_size,
+				       unsigned long interval_us)
+{
+	unsigned long rate_kBps, rem;
+
+	rate_kBps = pkt_size * USEC_PER_SEC;
+	rem = do_div(rate_kBps, (interval_us * 1024));
+	return rate_kBps;
+}
+
+/**
+ * glink_qos_check_feasibility() - Feasibility test on a QoS Request
+ * @xprt_ctx:		Transport in which the QoS request is made.
+ * @req_rate_kBps:	QoS Request.
+ *
+ * This function is used to perform the schedulability test on a QoS request
+ * over a specific transport.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_check_feasibility(struct glink_core_xprt_ctx *xprt_ctx,
+				       unsigned long req_rate_kBps)
+{
+	unsigned long new_rate_kBps;
+
+	if (xprt_ctx->num_priority == GLINK_QOS_DEF_NUM_PRIORITY)
+		return -EOPNOTSUPP;
+
+	new_rate_kBps = xprt_ctx->curr_qos_rate_kBps + req_rate_kBps;
+	if (new_rate_kBps > xprt_ctx->threshold_rate_kBps) {
+		GLINK_ERR_XPRT(xprt_ctx,
+			"New_rate(%lu + %lu) > threshold_rate(%lu)\n",
+			xprt_ctx->curr_qos_rate_kBps, req_rate_kBps,
+			xprt_ctx->threshold_rate_kBps);
+		return -EBUSY;
+	}
+	return 0;
+}
+
+/**
+ * glink_qos_update_ch_prio() - Update the channel priority
+ * @ctx:		Channel context whose priority is updated.
+ * @new_priority:	New priority of the channel.
+ *
+ * This function is called to update the channel priority during QoS request,
+ * QoS Cancel or Priority evaluation by packet scheduler. This function must
+ * be called with transport's tx_ready_lock_lhb3 lock and channel's
+ * tx_lists_lock_lhc3 locked.
+ */
+static void glink_qos_update_ch_prio(struct channel_ctx *ctx,
+				     uint32_t new_priority)
+{
+	uint32_t old_priority;
+
+	if (unlikely(!ctx))
+		return;
+
+	old_priority = ctx->curr_priority;
+	if (!list_empty(&ctx->tx_ready_list_node)) {
+		ctx->transport_ptr->prio_bin[old_priority].active_ch_cnt--;
+		list_move(&ctx->tx_ready_list_node,
+			  &ctx->transport_ptr->prio_bin[new_priority].tx_ready);
+		ctx->transport_ptr->prio_bin[new_priority].active_ch_cnt++;
+	}
+	ctx->curr_priority = new_priority;
+}
+
+/**
+ * glink_qos_assign_priority() - Assign priority to a channel
+ * @ctx:		Channel for which the priority has to be assigned.
+ * @req_rate_kBps:	QoS request by the channel.
+ *
+ * This function is used to assign a priority to the channel depending on its
+ * QoS Request.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_assign_priority(struct channel_ctx *ctx,
+				     unsigned long req_rate_kBps)
+{
+	int ret;
+	uint32_t i;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	if (ctx->req_rate_kBps) {
+		spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3,
+					flags);
+		GLINK_ERR_CH(ctx, "%s: QoS Request already exists\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = glink_qos_check_feasibility(ctx->transport_ptr, req_rate_kBps);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3,
+					flags);
+		return ret;
+	}
+
+	spin_lock(&ctx->tx_lists_lock_lhc3);
+	i = ctx->transport_ptr->num_priority - 1;
+	while (i > 0 &&
+	       ctx->transport_ptr->prio_bin[i-1].max_rate_kBps >= req_rate_kBps)
+		i--;
+
+	ctx->initial_priority = i;
+	glink_qos_update_ch_prio(ctx, i);
+	ctx->req_rate_kBps = req_rate_kBps;
+	if (i > 0) {
+		ctx->transport_ptr->curr_qos_rate_kBps += req_rate_kBps;
+		ctx->token_count = ctx->transport_ptr->token_count;
+		ctx->txd_len = 0;
+		ctx->token_start_time = arch_counter_get_cntvct();
+	}
+	spin_unlock(&ctx->tx_lists_lock_lhc3);
+	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	return 0;
+}
+
+/**
+ * glink_qos_reset_priority() - Reset the channel priority
+ * @ctx:	Channel for which the priority is reset.
+ *
+ * This function is used to reset the channel priority when the QoS request
+ * is cancelled by the channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_reset_priority(struct channel_ctx *ctx)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	spin_lock(&ctx->tx_lists_lock_lhc3);
+	if (ctx->initial_priority > 0) {
+		ctx->initial_priority = 0;
+		glink_qos_update_ch_prio(ctx, 0);
+		ctx->transport_ptr->curr_qos_rate_kBps -= ctx->req_rate_kBps;
+		ctx->txd_len = 0;
+		ctx->req_rate_kBps = 0;
+	}
+	spin_unlock(&ctx->tx_lists_lock_lhc3);
+	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	return 0;
+}
+
+/**
+ * glink_qos_ch_vote_xprt() - Vote the transport that channel is active
+ * @ctx:	Channel context which is active.
+ *
+ * This function is called to vote for the transport either when the channel
+ * is transmitting or when it shows an intention to transmit sooner. This
+ * function must be called with transport's tx_ready_lock_lhb3 lock and
+ * channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_ch_vote_xprt(struct channel_ctx *ctx)
+{
+	uint32_t prio;
+
+	if (unlikely(!ctx || !ctx->transport_ptr))
+		return -EINVAL;
+
+	prio = ctx->curr_priority;
+	ctx->transport_ptr->prio_bin[prio].active_ch_cnt++;
+
+	if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt == 1 &&
+	    ctx->transport_ptr->active_high_prio < prio) {
+		/*
+		 * One active channel in this priority and this is the
+		 * highest active priority bucket
+		 */
+		ctx->transport_ptr->active_high_prio = prio;
+		return ctx->transport_ptr->ops->power_vote(
+				ctx->transport_ptr->ops,
+				glink_prio_to_power_state(ctx->transport_ptr,
+							  prio));
+	}
+	return 0;
+}
+
+/**
+ * glink_qos_ch_unvote_xprt() - Unvote the transport when channel is inactive
+ * @ctx:	Channel context which is inactive.
+ *
+ * This function is called to unvote for the transport either when all the
+ * packets queued by the channel are transmitted by the scheduler. This
+ * function must be called with transport's tx_ready_lock_lhb3 lock and
+ * channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_ch_unvote_xprt(struct channel_ctx *ctx)
+{
+	uint32_t prio;
+
+	if (unlikely(!ctx || !ctx->transport_ptr))
+		return -EINVAL;
+
+	prio = ctx->curr_priority;
+	ctx->transport_ptr->prio_bin[prio].active_ch_cnt--;
+
+	if (ctx->transport_ptr->prio_bin[prio].active_ch_cnt ||
+	    ctx->transport_ptr->active_high_prio > prio)
+		return 0;
+
+	/*
+	 * No active channel in this priority and this is the
+	 * highest active priority bucket
+	 */
+	while (prio > 0) {
+		prio--;
+		if (!ctx->transport_ptr->prio_bin[prio].active_ch_cnt)
+			continue;
+
+		ctx->transport_ptr->active_high_prio = prio;
+		return ctx->transport_ptr->ops->power_vote(
+				ctx->transport_ptr->ops,
+				glink_prio_to_power_state(ctx->transport_ptr,
+							  prio));
+	}
+	return ctx->transport_ptr->ops->power_unvote(ctx->transport_ptr->ops);
+}
+
+/**
+ * glink_qos_add_ch_tx_intent() - Add the channel's intention to transmit soon
+ * @ctx:	Channel context which is going to be active.
+ *
+ * This function is called to update the channel state when it is intending to
+ * transmit sooner. This function must be called with transport's
+ * tx_ready_lock_lhb3 lock and channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_add_ch_tx_intent(struct channel_ctx *ctx)
+{
+	bool active_tx;
+
+	if (unlikely(!ctx))
+		return -EINVAL;
+
+	active_tx = GLINK_GET_CH_TX_STATE(ctx);
+	ctx->tx_intent_cnt++;
+	if (!active_tx)
+		glink_qos_ch_vote_xprt(ctx);
+	return 0;
+}
+
+/**
+ * glink_qos_do_ch_tx() - Update the channel's state that it is transmitting
+ * @ctx:	Channel context which is transmitting.
+ *
+ * This function is called to update the channel state when it is queueing a
+ * packet to transmit. This function must be called with transport's
+ * tx_ready_lock_lhb3 lock and channel's tx_lists_lock_lhc3 locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_do_ch_tx(struct channel_ctx *ctx)
+{
+	bool active_tx;
+
+	if (unlikely(!ctx))
+		return -EINVAL;
+
+	active_tx = GLINK_GET_CH_TX_STATE(ctx);
+	ctx->tx_cnt++;
+	if (ctx->tx_intent_cnt)
+		ctx->tx_intent_cnt--;
+	if (!active_tx)
+		glink_qos_ch_vote_xprt(ctx);
+	return 0;
+}
+
+/**
+ * glink_qos_done_ch_tx() - Update the channel's state when transmission is done
+ * @ctx:	Channel context for which all packets are transmitted.
+ *
+ * This function is called to update the channel state when all packets in its
+ * transmit queue are successfully transmitted. This function must be called
+ * with transport's tx_ready_lock_lhb3 lock and channel's tx_lists_lock_lhc3
+ * locked.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_qos_done_ch_tx(struct channel_ctx *ctx)
+{
+	bool active_tx;
+
+	if (unlikely(!ctx))
+		return -EINVAL;
+
+	WARN_ON(ctx->tx_cnt == 0);
+	ctx->tx_cnt = 0;
+	active_tx = GLINK_GET_CH_TX_STATE(ctx);
+	if (!active_tx)
+		glink_qos_ch_unvote_xprt(ctx);
+	return 0;
+}
+
+/**
+ * tx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers
+ * @iovec:	Pointer to the beginning of the linear buffer.
+ * @offset:	Offset into the buffer whose address is needed.
+ * @size:	Pointer to hold the length of the contiguous buffer space.
+ *
+ * This function is used when a linear buffer is transmitted.
+ *
+ * Return: Address of the buffer which is at offset "offset" from the beginning
+ *         of the buffer.
+ */
+static void *tx_linear_vbuf_provider(void *iovec, size_t offset, size_t *size)
+{
+	struct glink_core_tx_pkt *tx_info = (struct glink_core_tx_pkt *)iovec;
+
+	if (unlikely(!iovec || !size))
+		return NULL;
+
+	if (offset >= tx_info->size)
+		return NULL;
+
+	if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, tx_info->data, offset)))
+		return NULL;
+
+	*size = tx_info->size - offset;
+
+	return (void *)tx_info->data + offset;
+}
+
+/**
+ * linearize_vector() - Linearize the vector buffer
+ * @iovec:	Pointer to the vector buffer.
+ * @size:	Size of data in the vector buffer.
+ * vbuf_provider:	Virtual address-space Buffer Provider for the vector.
+ * pbuf_provider:	Physical address-space Buffer Provider for the vector.
+ *
+ * This function is used to linearize the vector buffer provided by the
+ * transport when the client has registered to receive only the vector
+ * buffer.
+ *
+ * Return: address of the linear buffer on success, NULL on failure.
+ */
+static void *linearize_vector(void *iovec, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *buf_size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *buf_size))
+{
+	void *bounce_buf;
+	void *pdata;
+	void *vdata;
+	size_t data_size;
+	size_t offset = 0;
+
+	bounce_buf = kmalloc(size, GFP_KERNEL);
+	if (!bounce_buf)
+		return ERR_PTR(-ENOMEM);
+
+	do {
+		if (vbuf_provider) {
+			vdata = vbuf_provider(iovec, offset, &data_size);
+		} else {
+			pdata = pbuf_provider(iovec, offset, &data_size);
+			vdata = phys_to_virt((unsigned long)pdata);
+		}
+
+		if (!vdata)
+			break;
+
+		if (OVERFLOW_ADD_UNSIGNED(size_t, data_size, offset)) {
+			GLINK_ERR("%s: overflow data_size %zu + offset %zu\n",
+				  __func__, data_size, offset);
+			goto err;
+		}
+
+		memcpy(bounce_buf + offset, vdata, data_size);
+		offset += data_size;
+	} while (offset < size);
+
+	if (offset != size) {
+		GLINK_ERR("%s: Error size_copied %zu != total_size %zu\n",
+			  __func__, offset, size);
+		goto err;
+	}
+	return bounce_buf;
+
+err:
+	kfree(bounce_buf);
+	return NULL;
+}
+
+/**
+ * glink_core_migration_edge_lock() - gains a reference count for edge and
+ *					take muted lock
+ * @xprt_ctx:	transport of the edge
+ */
+static void glink_core_migration_edge_lock(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_core_edge_ctx *edge_ctx = xprt_ctx->edge_ctx;
+
+	rwref_get(&edge_ctx->edge_ref_lock_lhd1);
+	mutex_lock(&edge_ctx->edge_migration_lock_lhd2);
+}
+
+/**
+ * glink_core_migration_edge_unlock() - release a reference count for edge
+ *					and release muted lock.
+ * @xprt_ctx:	transport of the edge
+ */
+static void glink_core_migration_edge_unlock(
+					struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_core_edge_ctx *edge_ctx = xprt_ctx->edge_ctx;
+
+	mutex_unlock(&edge_ctx->edge_migration_lock_lhd2);
+	rwref_put(&edge_ctx->edge_ref_lock_lhd1);
+}
+
+/**
+ * glink_edge_ctx_release - Free the edge context
+ * @ch_st_lock:	handle to the rwref_lock associated with the edge
+ *
+ * This should only be called when the reference count associated with the
+ * edge goes to zero.
+ */
+static void glink_edge_ctx_release(struct rwref_lock *ch_st_lock)
+{
+	struct glink_core_edge_ctx *ctx = container_of(ch_st_lock,
+					struct glink_core_edge_ctx,
+						edge_ref_lock_lhd1);
+
+	mutex_lock(&edge_list_lock_lhd0);
+	list_del(&ctx->list_node);
+	mutex_unlock(&edge_list_lock_lhd0);
+	kfree(ctx);
+}
+
+
+/**
+ * edge_name_to_ctx_create() - lookup a edge by name, create the edge ctx if
+ *                              it is not found.
+ * @xprt_ctx:	Transport to search for a matching edge.
+ *
+ * Return: The edge ctx corresponding to edge of @xprt_ctx.
+ */
+static struct glink_core_edge_ctx *edge_name_to_ctx_create(
+				struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_core_edge_ctx *edge_ctx;
+
+	mutex_lock(&edge_list_lock_lhd0);
+	list_for_each_entry(edge_ctx, &edge_list, list_node) {
+		if (!strcmp(edge_ctx->name, xprt_ctx->edge)) {
+			rwref_get(&edge_ctx->edge_ref_lock_lhd1);
+			mutex_unlock(&edge_list_lock_lhd0);
+			return edge_ctx;
+		}
+	}
+	edge_ctx = kzalloc(sizeof(struct glink_core_edge_ctx), GFP_KERNEL);
+	strlcpy(edge_ctx->name, xprt_ctx->edge, GLINK_NAME_SIZE);
+	rwref_lock_init(&edge_ctx->edge_ref_lock_lhd1, glink_edge_ctx_release);
+	mutex_init(&edge_ctx->edge_migration_lock_lhd2);
+	INIT_LIST_HEAD(&edge_ctx->list_node);
+	list_add_tail(&edge_ctx->list_node, &edge_list);
+	mutex_unlock(&edge_list_lock_lhd0);
+	return edge_ctx;
+}
+
+/**
+ * xprt_lcid_to_ch_ctx_get() - lookup a channel by local id
+ * @xprt_ctx:	Transport to search for a matching channel.
+ * @lcid:	Local channel identifier corresponding to the desired channel.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context.  The caller must call rwref_put() when done.
+ *
+ * Return: The channel corresponding to @lcid or NULL if a matching channel
+ *	is not found.
+ */
+static struct channel_ctx *xprt_lcid_to_ch_ctx_get(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					uint32_t lcid)
+{
+	struct channel_ctx *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
+		if (entry->lcid == lcid) {
+			rwref_get(&entry->ch_state_lhb2);
+			spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+					flags);
+			return entry;
+		}
+	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+
+	return NULL;
+}
+
+/**
+ * xprt_rcid_to_ch_ctx_get() - lookup a channel by remote id
+ * @xprt_ctx:	Transport to search for a matching channel.
+ * @rcid:	Remote channel identifier corresponding to the desired channel.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context.  The caller must call rwref_put() when done.
+ *
+ * Return: The channel corresponding to @rcid or NULL if a matching channel
+ *	is not found.
+ */
+static struct channel_ctx *xprt_rcid_to_ch_ctx_get(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					uint32_t rcid)
+{
+	struct channel_ctx *entry;
+	unsigned long flags;
+
+	spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	list_for_each_entry(entry, &xprt_ctx->channels, port_list_node)
+		if (entry->rcid == rcid) {
+			rwref_get(&entry->ch_state_lhb2);
+			spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+					flags);
+			return entry;
+		}
+	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+
+	return NULL;
+}
+
+/**
+ * ch_check_duplicate_riid() - Checks for duplicate riid
+ * @ctx:	Local channel context
+ * @riid:	Remote intent ID
+ *
+ * This functions check the riid is present in the remote_rx_list or not
+ */
+bool ch_check_duplicate_riid(struct channel_ctx *ctx, int riid)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	list_for_each_entry(intent, &ctx->rmt_rx_intent_list, list) {
+		if (riid == intent->id) {
+			spin_unlock_irqrestore(
+				&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+			return true;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	return false;
+}
+
+/**
+ * ch_pop_remote_rx_intent() - Finds a matching RX intent
+ * @ctx:	Local channel context
+ * @size:	Size of Intent
+ * @riid_ptr:	Pointer to return value of remote intent ID
+ * @cookie:	Transport-specific cookie to return
+ *
+ * This functions searches for an RX intent that is >= to the requested size.
+ */
+int ch_pop_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+	uint32_t *riid_ptr, size_t *intent_size, void **cookie)
+{
+	struct glink_core_rx_intent *intent;
+	struct glink_core_rx_intent *intent_tmp;
+	struct glink_core_rx_intent *best_intent = NULL;
+	unsigned long flags;
+
+	if (size >= GLINK_MAX_PKT_SIZE) {
+		GLINK_ERR_CH(ctx, "%s: R[]:%zu Invalid size.\n", __func__,
+				size);
+		return -EINVAL;
+	}
+
+	if (riid_ptr == NULL)
+		return -EINVAL;
+
+	*riid_ptr = 0;
+	spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS) {
+		*riid_ptr = ++ctx->dummy_riid;
+		spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2,
+					flags);
+		return 0;
+	}
+	list_for_each_entry_safe(intent, intent_tmp, &ctx->rmt_rx_intent_list,
+			list) {
+		if (intent->intent_size >= size) {
+			if (!best_intent)
+				best_intent = intent;
+			else if (best_intent->intent_size > intent->intent_size)
+				best_intent = intent;
+			if (best_intent->intent_size == size)
+				break;
+		}
+	}
+	if (best_intent) {
+		list_del(&best_intent->list);
+		GLINK_DBG_CH(ctx,
+				"%s: R[%u]:%zu Removed remote intent\n",
+				__func__,
+				best_intent->id,
+				best_intent->intent_size);
+		*riid_ptr = best_intent->id;
+		*intent_size = best_intent->intent_size;
+		*cookie = best_intent->cookie;
+		kfree(best_intent);
+		spin_unlock_irqrestore(
+			&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	return -EAGAIN;
+}
+
+/**
+ * ch_push_remote_rx_intent() - Registers a remote RX intent
+ * @ctx:	Local channel context
+ * @size:	Size of Intent
+ * @riid:	Remote intent ID
+ * @cookie:	Transport-specific cookie to cache
+ *
+ * This functions adds a remote RX intent to the remote RX intent list.
+ */
+void ch_push_remote_rx_intent(struct channel_ctx *ctx, size_t size,
+		uint32_t riid, void *cookie)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+	gfp_t gfp_flag;
+
+	if (size >= GLINK_MAX_PKT_SIZE) {
+		GLINK_ERR_CH(ctx, "%s: R[%u]:%zu Invalid size.\n", __func__,
+				riid, size);
+		return;
+	}
+
+	if (ch_check_duplicate_riid(ctx, riid)) {
+		GLINK_ERR_CH(ctx, "%s: R[%d]:%zu Duplicate RIID found\n",
+				__func__, riid, size);
+		return;
+	}
+
+	gfp_flag = (ctx->transport_ptr->capabilities & GCAP_AUTO_QUEUE_RX_INT) ?
+							GFP_ATOMIC : GFP_KERNEL;
+	intent = kzalloc(sizeof(struct glink_core_rx_intent), gfp_flag);
+	if (!intent) {
+		GLINK_ERR_CH(ctx,
+			"%s: R[%u]:%zu Memory allocation for intent failed\n",
+			__func__, riid, size);
+		return;
+	}
+	intent->id = riid;
+	intent->intent_size = size;
+	intent->cookie = cookie;
+
+	spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	list_add_tail(&intent->list, &ctx->rmt_rx_intent_list);
+
+	complete_all(&ctx->int_req_complete);
+	if (ctx->notify_remote_rx_intent)
+		ctx->notify_remote_rx_intent(ctx, ctx->user_priv, size);
+	spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+
+	GLINK_DBG_CH(ctx, "%s: R[%u]:%zu Pushed remote intent\n", __func__,
+			intent->id,
+			intent->intent_size);
+}
+
+/**
+ * ch_push_local_rx_intent() - Create an rx_intent
+ * @ctx:	Local channel context
+ * @pkt_priv:	Opaque private pointer provided by client to be returned later
+ * @size:	Size of intent
+ *
+ * This functions creates a local intent and adds it to the local
+ * intent list.
+ */
+struct glink_core_rx_intent *ch_push_local_rx_intent(struct channel_ctx *ctx,
+		const void *pkt_priv, size_t size)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+	int ret;
+
+	if (size >= GLINK_MAX_PKT_SIZE) {
+		GLINK_ERR_CH(ctx,
+			"%s: L[]:%zu Invalid size\n", __func__, size);
+		return NULL;
+	}
+
+	intent = ch_get_free_local_rx_intent(ctx);
+	if (!intent) {
+		if (ctx->max_used_liid >= ctx->transport_ptr->max_iid) {
+			GLINK_ERR_CH(ctx,
+				"%s: All intents are in USE max_iid[%d]",
+				__func__, ctx->transport_ptr->max_iid);
+			return NULL;
+		}
+
+		intent = kzalloc(sizeof(struct glink_core_rx_intent),
+								GFP_KERNEL);
+		if (!intent) {
+			GLINK_ERR_CH(ctx,
+			"%s: Memory Allocation for local rx_intent failed",
+				__func__);
+			return NULL;
+		}
+		intent->id = ++ctx->max_used_liid;
+	}
+
+	/* transport is responsible for allocating/reserving for the intent */
+	ret = ctx->transport_ptr->ops->allocate_rx_intent(
+					ctx->transport_ptr->ops, size, intent);
+	if (ret < 0) {
+		/* intent data allocation failure */
+		GLINK_ERR_CH(ctx, "%s: unable to allocate intent sz[%zu] %d",
+			__func__, size, ret);
+		spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+		list_add_tail(&intent->list,
+				&ctx->local_rx_intent_free_list);
+		spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
+				flags);
+		return NULL;
+	}
+
+	intent->pkt_priv = pkt_priv;
+	intent->intent_size = size;
+	intent->write_offset = 0;
+	intent->pkt_size = 0;
+	intent->bounce_buf = NULL;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_add_tail(&intent->list, &ctx->local_rx_intent_list);
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
+			intent->id,
+			intent->intent_size);
+	return intent;
+}
+
+/**
+ * ch_remove_local_rx_intent() - Find and remove RX Intent from list
+ * @ctx:	Local channel context
+ * @liid:	Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent using the intent ID. If found, the intent
+ * is deleted from the list.
+ */
+void ch_remove_local_rx_intent(struct channel_ctx *ctx, uint32_t liid)
+{
+	struct glink_core_rx_intent *intent, *tmp_intent;
+	unsigned long flags;
+
+	if (ctx->transport_ptr->max_iid < liid) {
+		GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
+				liid);
+		return;
+	}
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
+									list) {
+		if (liid == intent->id) {
+			list_del(&intent->list);
+			list_add_tail(&intent->list,
+					&ctx->local_rx_intent_free_list);
+			spin_unlock_irqrestore(
+					&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+			GLINK_DBG_CH(ctx,
+			"%s: L[%u]:%zu moved intent to Free/unused list\n",
+				__func__,
+				intent->id,
+				intent->intent_size);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+			liid);
+}
+
+/**
+ * ch_get_dummy_rx_intent() - Get a dummy rx_intent
+ * @ctx:	Local channel context
+ * @liid:	Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel and
+ * returns either a matching intent or allocates a dummy one if no matching
+ * intents can be found.
+ *
+ * Return: Pointer to the intent if intent is found else NULL
+ */
+struct glink_core_rx_intent *ch_get_dummy_rx_intent(struct channel_ctx *ctx,
+		uint32_t liid)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	if (!list_empty(&ctx->local_rx_intent_list)) {
+		intent = list_first_entry(&ctx->local_rx_intent_list,
+					  struct glink_core_rx_intent, list);
+		spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+		return intent;
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+	intent = ch_get_free_local_rx_intent(ctx);
+	if (!intent) {
+		intent = kzalloc(sizeof(struct glink_core_rx_intent),
+								GFP_ATOMIC);
+		if (!intent) {
+			GLINK_ERR_CH(ctx,
+			"%s: Memory Allocation for local rx_intent failed",
+				__func__);
+			return NULL;
+		}
+		intent->id = ++ctx->max_used_liid;
+	}
+	intent->intent_size = 0;
+	intent->write_offset = 0;
+	intent->pkt_size = 0;
+	intent->bounce_buf = NULL;
+	intent->pkt_priv = NULL;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_add_tail(&intent->list, &ctx->local_rx_intent_list);
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu Pushed intent\n", __func__,
+			intent->id,
+			intent->intent_size);
+	return intent;
+}
+
+/**
+ * ch_get_local_rx_intent() - Search for an rx_intent
+ * @ctx:	Local channel context
+ * @liid:	Local channel Intent ID
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent using the intent ID. If found, pointer to
+ * the intent is returned.
+ *
+ * Return: Pointer to the intent if intent is found else NULL
+ */
+struct glink_core_rx_intent *ch_get_local_rx_intent(struct channel_ctx *ctx,
+		uint32_t liid)
+{
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+
+	if (ctx->transport_ptr->max_iid < liid) {
+		GLINK_ERR_CH(ctx, "%s: L[%u] Invalid ID.\n", __func__,
+				liid);
+		return NULL;
+	}
+
+	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+		return ch_get_dummy_rx_intent(ctx, liid);
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
+		if (liid == intent->id) {
+			spin_unlock_irqrestore(
+				&ctx->local_rx_intent_lst_lock_lhc1, flags);
+			return intent;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+			liid);
+	return NULL;
+}
+
+/**
+ * ch_set_local_rx_intent_notified() - Add a rx intent to local intent
+ *					notified list
+ * @ctx:	Local channel context
+ * @intent_ptr:	Pointer to the local intent
+ *
+ * This functions parses the local intent list for a specific channel
+ * and checks for the intent. If found, the function deletes the intent
+ * from local_rx_intent list and adds it to local_rx_intent_notified list.
+ */
+void ch_set_local_rx_intent_notified(struct channel_ctx *ctx,
+		struct glink_core_rx_intent *intent_ptr)
+{
+	struct glink_core_rx_intent *tmp_intent, *intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry_safe(intent, tmp_intent, &ctx->local_rx_intent_list,
+									list) {
+		if (intent == intent_ptr) {
+			list_del(&intent->list);
+			list_add_tail(&intent->list,
+				&ctx->local_rx_intent_ntfy_list);
+			GLINK_DBG_CH(ctx,
+				"%s: L[%u]:%zu Moved intent %s",
+				__func__,
+				intent_ptr->id,
+				intent_ptr->intent_size,
+				"from local to notify list\n");
+			spin_unlock_irqrestore(
+					&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+			intent_ptr->id);
+}
+
+/**
+ * ch_get_local_rx_intent_notified() - Find rx intent in local notified list
+ * @ctx:	Local channel context
+ * @ptr:	Pointer to the rx intent
+ *
+ * This functions parses the local intent notify list for a specific channel
+ * and checks for the intent.
+ *
+ * Return: Pointer to the intent if intent is found else NULL.
+ */
+struct glink_core_rx_intent *ch_get_local_rx_intent_notified(
+	struct channel_ctx *ctx, const void *ptr)
+{
+	struct glink_core_rx_intent *ptr_intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry(ptr_intent, &ctx->local_rx_intent_ntfy_list,
+								list) {
+		if (ptr_intent->data == ptr || ptr_intent->iovec == ptr ||
+		    ptr_intent->bounce_buf == ptr) {
+			spin_unlock_irqrestore(
+					&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+			return ptr_intent;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: Local intent not found\n", __func__);
+	return NULL;
+}
+
+/**
+ * ch_remove_local_rx_intent_notified() - Remove a rx intent in local intent
+ *					notified list
+ * @ctx:	Local channel context
+ * @ptr:	Pointer to the rx intent
+ * @reuse:	Reuse the rx intent
+ *
+ * This functions parses the local intent notify list for a specific channel
+ * and checks for the intent. If found, the function deletes the intent
+ * from local_rx_intent_notified list and adds it to local_rx_intent_free list.
+ */
+void ch_remove_local_rx_intent_notified(struct channel_ctx *ctx,
+	struct glink_core_rx_intent *liid_ptr, bool reuse)
+{
+	struct glink_core_rx_intent *ptr_intent, *tmp_intent;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry_safe(ptr_intent, tmp_intent,
+				&ctx->local_rx_intent_ntfy_list, list) {
+		if (ptr_intent == liid_ptr) {
+			list_del(&ptr_intent->list);
+			GLINK_DBG_CH(ctx,
+				"%s: L[%u]:%zu Removed intent from notify list\n",
+				__func__,
+				ptr_intent->id,
+				ptr_intent->intent_size);
+			kfree(ptr_intent->bounce_buf);
+			ptr_intent->bounce_buf = NULL;
+			ptr_intent->write_offset = 0;
+			ptr_intent->pkt_size = 0;
+			if (reuse)
+				list_add_tail(&ptr_intent->list,
+					&ctx->local_rx_intent_list);
+			else
+				list_add_tail(&ptr_intent->list,
+					&ctx->local_rx_intent_free_list);
+			spin_unlock_irqrestore(
+					&ctx->local_rx_intent_lst_lock_lhc1,
+					flags);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	GLINK_ERR_CH(ctx, "%s: L[%u] Intent not found.\n", __func__,
+			liid_ptr->id);
+}
+
+/**
+ * ch_get_free_local_rx_intent() - Return a rx intent in local intent
+ *					free list
+ * @ctx:	Local channel context
+ *
+ * This functions parses the local_rx_intent_free list for a specific channel
+ * and checks for the free unused intent. If found, the function returns
+ * the free intent pointer else NULL pointer.
+ */
+struct glink_core_rx_intent *ch_get_free_local_rx_intent(
+	struct channel_ctx *ctx)
+{
+	struct glink_core_rx_intent *ptr_intent = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	if (!list_empty(&ctx->local_rx_intent_free_list)) {
+		ptr_intent = list_first_entry(&ctx->local_rx_intent_free_list,
+				struct glink_core_rx_intent,
+				list);
+		list_del(&ptr_intent->list);
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	return ptr_intent;
+}
+
+/**
+ * ch_purge_intent_lists() - Remove all intents for a channel
+ *
+ * @ctx:	Local channel context
+ *
+ * This functions parses the local intent lists for a specific channel and
+ * removes and frees all intents.
+ */
+void ch_purge_intent_lists(struct channel_ctx *ctx)
+{
+	struct glink_core_rx_intent *ptr_intent, *tmp_intent;
+	struct glink_core_tx_pkt *tx_info, *tx_info_temp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+	list_for_each_entry_safe(tx_info, tx_info_temp, &ctx->tx_active,
+			list_node) {
+		ctx->notify_tx_abort(ctx, ctx->user_priv,
+				tx_info->pkt_priv);
+		rwref_put(&tx_info->pkt_ref);
+	}
+	spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry_safe(ptr_intent, tmp_intent,
+				&ctx->local_rx_intent_list, list) {
+		ctx->notify_rx_abort(ctx, ctx->user_priv,
+				ptr_intent->pkt_priv);
+		list_del(&ptr_intent->list);
+		kfree(ptr_intent);
+	}
+
+	if (!list_empty(&ctx->local_rx_intent_ntfy_list))
+		/*
+		 * The client is still processing an rx_notify() call and has
+		 * not yet called glink_rx_done() to return the pointer to us.
+		 * glink_rx_done() will do the appropriate cleanup when this
+		 * call occurs, but log a message here just for internal state
+		 * tracking.
+		 */
+		GLINK_INFO_CH(ctx, "%s: waiting on glink_rx_done()\n",
+				__func__);
+
+	list_for_each_entry_safe(ptr_intent, tmp_intent,
+				&ctx->local_rx_intent_free_list, list) {
+		list_del(&ptr_intent->list);
+		kfree(ptr_intent);
+	}
+	ctx->max_used_liid = 0;
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+	spin_lock_irqsave(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+	list_for_each_entry_safe(ptr_intent, tmp_intent,
+			&ctx->rmt_rx_intent_list, list) {
+		list_del(&ptr_intent->list);
+		kfree(ptr_intent);
+	}
+	spin_unlock_irqrestore(&ctx->rmt_rx_intent_lst_lock_lhc2, flags);
+}
+
+/**
+ * ch_get_tx_pending_remote_done() - Lookup for a packet that is waiting for
+ *                                   the remote-done notification.
+ * @ctx:	Pointer to the channel context
+ * @riid:	riid of transmit packet
+ *
+ * This function adds a packet to the tx_pending_remote_done list.
+ *
+ * The tx_lists_lock_lhc3 lock needs to be held while calling this function.
+ *
+ * Return: Pointer to the tx packet
+ */
+struct glink_core_tx_pkt *ch_get_tx_pending_remote_done(
+	struct channel_ctx *ctx, uint32_t riid)
+{
+	struct glink_core_tx_pkt *tx_pkt;
+	unsigned long flags;
+
+	if (!ctx) {
+		GLINK_ERR("%s: Invalid context pointer", __func__);
+		return NULL;
+	}
+
+	spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+	list_for_each_entry(tx_pkt, &ctx->tx_pending_remote_done, list_done) {
+		if (tx_pkt->riid == riid) {
+			if (tx_pkt->size_remaining) {
+				GLINK_ERR_CH(ctx, "%s: R[%u] TX not complete",
+						__func__, riid);
+				tx_pkt = NULL;
+			}
+			spin_unlock_irqrestore(
+				&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+			return tx_pkt;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
+	GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found.\n",
+			__func__, riid);
+	return NULL;
+}
+
+/**
+ * ch_remove_tx_pending_remote_done() - Removes a packet transmit context for a
+ *                     packet that is waiting for the remote-done notification
+ * @ctx:	Pointer to the channel context
+ * @tx_pkt:	Pointer to the transmit packet
+ *
+ * This function parses through tx_pending_remote_done and removes a
+ * packet that matches with the tx_pkt.
+ */
+void ch_remove_tx_pending_remote_done(struct channel_ctx *ctx,
+	struct glink_core_tx_pkt *tx_pkt)
+{
+	struct glink_core_tx_pkt *local_tx_pkt, *tmp_tx_pkt;
+	unsigned long flags;
+
+	if (!ctx || !tx_pkt) {
+		GLINK_ERR("%s: Invalid input", __func__);
+		return;
+	}
+
+	spin_lock_irqsave(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+	list_for_each_entry_safe(local_tx_pkt, tmp_tx_pkt,
+			&ctx->tx_pending_remote_done, list_done) {
+		if (tx_pkt == local_tx_pkt) {
+			list_del_init(&tx_pkt->list_done);
+			GLINK_DBG_CH(ctx,
+				"%s: R[%u] Removed Tx packet for intent\n",
+				__func__,
+				tx_pkt->riid);
+			rwref_put(&tx_pkt->pkt_ref);
+			spin_unlock_irqrestore(
+				&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+			return;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->tx_pending_rmt_done_lock_lhc4, flags);
+
+	GLINK_ERR_CH(ctx, "%s: R[%u] Tx packet for intent not found", __func__,
+			tx_pkt->riid);
+}
+
+/**
+ * glink_add_free_lcid_list() - adds the lcid of a to be deleted channel to
+ *				available lcid list
+ * @ctx:	Pointer to channel context.
+ */
+static void glink_add_free_lcid_list(struct channel_ctx *ctx)
+{
+	struct channel_lcid *free_lcid;
+	unsigned long flags;
+
+	free_lcid = kzalloc(sizeof(*free_lcid), GFP_KERNEL);
+	if (!free_lcid) {
+		GLINK_ERR(
+			"%s: allocation failed on xprt:edge [%s:%s] for lcid [%d]\n",
+			__func__, ctx->transport_ptr->name,
+			ctx->transport_ptr->edge, ctx->lcid);
+		return;
+	}
+	free_lcid->lcid = ctx->lcid;
+	spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+	list_add_tail(&free_lcid->list_node,
+			&ctx->transport_ptr->free_lcid_list);
+	spin_unlock_irqrestore(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+					flags);
+}
+
+/**
+ * glink_ch_ctx_release - Free the channel context
+ * @ch_st_lock:	handle to the rwref_lock associated with the chanel
+ *
+ * This should only be called when the reference count associated with the
+ * channel goes to zero.
+ */
+static void glink_ch_ctx_release(struct rwref_lock *ch_st_lock)
+{
+	struct channel_ctx *ctx = container_of(ch_st_lock, struct channel_ctx,
+						ch_state_lhb2);
+	ctx->transport_ptr = NULL;
+	kfree(ctx);
+	GLINK_INFO("%s: freed the channel ctx in pid [%d]\n", __func__,
+			current->pid);
+	ctx = NULL;
+}
+
+/**
+ * ch_name_to_ch_ctx_create() - lookup a channel by name, create the channel if
+ *                              it is not found.
+ * @xprt_ctx:	Transport to search for a matching channel.
+ * @name:	Name of the desired channel.
+ *
+ * Return: The channel corresponding to @name, NULL if a matching channel was
+ *         not found AND a new channel could not be created.
+ */
+static struct channel_ctx *ch_name_to_ch_ctx_create(
+					struct glink_core_xprt_ctx *xprt_ctx,
+					const char *name)
+{
+	struct channel_ctx *entry;
+	struct channel_ctx *ctx;
+	struct channel_ctx *temp;
+	unsigned long flags;
+	struct channel_lcid *flcid;
+
+	ctx = kzalloc(sizeof(struct channel_ctx), GFP_KERNEL);
+	if (!ctx) {
+		GLINK_ERR_XPRT(xprt_ctx, "%s: Failed to allocated ctx, %s",
+			"checking if there is one existing\n",
+			__func__);
+		goto check_ctx;
+	}
+
+	ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+	strlcpy(ctx->name, name, GLINK_NAME_SIZE);
+	rwref_lock_init(&ctx->ch_state_lhb2, glink_ch_ctx_release);
+	INIT_LIST_HEAD(&ctx->tx_ready_list_node);
+	init_completion(&ctx->int_req_ack_complete);
+	init_completion(&ctx->int_req_complete);
+	INIT_LIST_HEAD(&ctx->local_rx_intent_list);
+	INIT_LIST_HEAD(&ctx->local_rx_intent_ntfy_list);
+	INIT_LIST_HEAD(&ctx->local_rx_intent_free_list);
+	spin_lock_init(&ctx->local_rx_intent_lst_lock_lhc1);
+	INIT_LIST_HEAD(&ctx->rmt_rx_intent_list);
+	spin_lock_init(&ctx->rmt_rx_intent_lst_lock_lhc2);
+	INIT_LIST_HEAD(&ctx->tx_active);
+	spin_lock_init(&ctx->tx_pending_rmt_done_lock_lhc4);
+	INIT_LIST_HEAD(&ctx->tx_pending_remote_done);
+	spin_lock_init(&ctx->tx_lists_lock_lhc3);
+
+check_ctx:
+	rwref_write_get(&xprt_ctx->xprt_state_lhb0);
+	if (xprt_ctx->local_state != GLINK_XPRT_OPENED) {
+		kfree(ctx);
+		rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+		return NULL;
+	}
+	spin_lock_irqsave(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	list_for_each_entry_safe(entry, temp, &xprt_ctx->channels,
+		    port_list_node)
+		if (!strcmp(entry->name, name) && !entry->pending_delete) {
+			spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1,
+					flags);
+			kfree(ctx);
+			rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+			return entry;
+		}
+
+	if (ctx) {
+		if (list_empty(&xprt_ctx->free_lcid_list)) {
+			if (xprt_ctx->next_lcid > xprt_ctx->max_cid) {
+				/* no more channels available */
+				GLINK_ERR_XPRT(xprt_ctx,
+					"%s: unable to exceed %u channels\n",
+					__func__, xprt_ctx->max_cid);
+				spin_unlock_irqrestore(
+						&xprt_ctx->xprt_ctx_lock_lhb1,
+						flags);
+				kfree(ctx);
+				rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+				return NULL;
+			}
+			ctx->lcid = xprt_ctx->next_lcid++;
+		} else {
+			flcid = list_first_entry(&xprt_ctx->free_lcid_list,
+						struct channel_lcid, list_node);
+			ctx->lcid = flcid->lcid;
+			list_del(&flcid->list_node);
+			kfree(flcid);
+		}
+
+		list_add_tail(&ctx->port_list_node, &xprt_ctx->channels);
+
+		GLINK_INFO_PERF_CH_XPRT(ctx, xprt_ctx,
+			"%s: local:GLINK_CHANNEL_CLOSED\n",
+			__func__);
+	}
+	spin_unlock_irqrestore(&xprt_ctx->xprt_ctx_lock_lhb1, flags);
+	rwref_write_put(&xprt_ctx->xprt_state_lhb0);
+	mutex_lock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
+	if (ctx != NULL)
+		glink_debugfs_add_channel(ctx, xprt_ctx);
+	mutex_unlock(&xprt_ctx->xprt_dbgfs_lock_lhb4);
+	return ctx;
+}
+
+/**
+ * ch_add_rcid() - add a remote channel identifier to an existing channel
+ * @xprt_ctx:	Transport the channel resides on.
+ * @ctx:	Channel receiving the identifier.
+ * @rcid:	The remote channel identifier.
+ */
+static void ch_add_rcid(struct glink_core_xprt_ctx *xprt_ctx,
+			struct channel_ctx *ctx,
+			uint32_t rcid)
+{
+	ctx->rcid = rcid;
+}
+
+/**
+ * ch_update_local_state() - Update the local channel state
+ * @ctx:	Pointer to channel context.
+ * @lstate:	Local channel state.
+ *
+ * Return: True if the channel is fully closed as a result of this update,
+ *	false otherwise.
+ */
+static bool ch_update_local_state(struct channel_ctx *ctx,
+					enum local_channel_state_e lstate)
+{
+	bool is_fully_closed;
+
+	rwref_write_get(&ctx->ch_state_lhb2);
+	ctx->local_open_state = lstate;
+	is_fully_closed = ch_is_fully_closed(ctx);
+	rwref_write_put(&ctx->ch_state_lhb2);
+
+	return is_fully_closed;
+}
+
+/**
+ * ch_update_local_state() - Update the local channel state
+ * @ctx:	Pointer to channel context.
+ * @rstate:	Remote Channel state.
+ *
+ * Return: True if the channel is fully closed as result of this update,
+ *	false otherwise.
+ */
+static bool ch_update_rmt_state(struct channel_ctx *ctx, bool rstate)
+{
+	bool is_fully_closed;
+
+	rwref_write_get(&ctx->ch_state_lhb2);
+	ctx->remote_opened = rstate;
+	is_fully_closed = ch_is_fully_closed(ctx);
+	rwref_write_put(&ctx->ch_state_lhb2);
+
+	return is_fully_closed;
+}
+
+/*
+ * ch_is_fully_opened() - Verify if a channel is open
+ * ctx:	Pointer to channel context
+ *
+ * Return: True if open, else flase
+ */
+static bool ch_is_fully_opened(struct channel_ctx *ctx)
+{
+	if (ctx->remote_opened && ctx->local_open_state == GLINK_CHANNEL_OPENED)
+		return true;
+
+	return false;
+}
+
+/*
+ * ch_is_fully_closed() - Verify if a channel is closed on both sides
+ * @ctx: Pointer to channel context
+ * @returns: True if open, else flase
+ */
+static bool ch_is_fully_closed(struct channel_ctx *ctx)
+{
+	if (!ctx->remote_opened &&
+			ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+		return true;
+
+	return false;
+}
+
+/**
+ * find_open_transport() - find a specific open transport
+ * @edge:		Edge the transport is on.
+ * @name:		Name of the transport (or NULL if no preference)
+ * @initial_xprt:	The specified transport is the start for migration
+ * @best_id:		The best transport found for this connection
+ *
+ * Find an open transport corresponding to the specified @name and @edge.  @edge
+ * is expected to be valid.  @name is expected to be NULL (unspecified) or
+ * valid.  If @name is not specified, then the best transport found on the
+ * specified edge will be returned.
+ *
+ * Return: Transport with the specified name on the specified edge, if open.
+ *	NULL if the transport exists, but is not fully open.  ENODEV if no such
+ *	transport exists.
+ */
+static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
+						       const char *name,
+						       bool initial_xprt,
+						       uint16_t *best_id)
+{
+	struct glink_core_xprt_ctx *xprt = NULL;
+	struct glink_core_xprt_ctx *best_xprt = NULL;
+	struct glink_core_xprt_ctx *ret = NULL;
+	bool first = true;
+
+	ret = (struct glink_core_xprt_ctx *)ERR_PTR(-ENODEV);
+	*best_id = USHRT_MAX;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node) {
+		if (strcmp(edge, xprt->edge))
+			continue;
+		if (first) {
+			first = false;
+			ret = NULL;
+		}
+		if (!xprt_is_fully_opened(xprt))
+			continue;
+
+		if (xprt->id < *best_id) {
+			*best_id = xprt->id;
+			best_xprt = xprt;
+		}
+
+		/*
+		 * Braces are required in this instacne because the else will
+		 * attach to the wrong if otherwise.
+		 */
+		if (name) {
+			if (!strcmp(name, xprt->name))
+				ret = xprt;
+		} else {
+			ret = best_xprt;
+		}
+	}
+
+	mutex_unlock(&transport_list_lock_lha0);
+
+	if (IS_ERR_OR_NULL(ret))
+		return ret;
+	if (!initial_xprt)
+		*best_id = ret->id;
+
+	return ret;
+}
+
+/**
+ * xprt_is_fully_opened() - check the open status of a transport
+ * @xprt:	Transport being checked.
+ *
+ * Return: True if the transport is fully opened, false otherwise.
+ */
+static bool xprt_is_fully_opened(struct glink_core_xprt_ctx *xprt)
+{
+	if (xprt->remote_neg_completed &&
+					xprt->local_state == GLINK_XPRT_OPENED)
+		return true;
+
+	return false;
+}
+
+/**
+ * glink_dummy_notify_rx_intent_req() - Dummy RX Request
+ *
+ * @handle:	Channel handle (ignored)
+ * @priv:	Private data pointer (ignored)
+ * @req_size:	Requested size (ignored)
+ *
+ * Dummy RX intent request if client does not implement the optional callback
+ * function.
+ *
+ * Return:  False
+ */
+static bool glink_dummy_notify_rx_intent_req(void *handle, const void *priv,
+	size_t req_size)
+{
+	return false;
+}
+
+/**
+ * glink_dummy_notify_rx_sigs() - Dummy signal callback
+ *
+ * @handle:	Channel handle (ignored)
+ * @priv:	Private data pointer (ignored)
+ * @req_size:	Requested size (ignored)
+ *
+ * Dummy signal callback if client does not implement the optional callback
+ * function.
+ *
+ * Return:  False
+ */
+static void glink_dummy_notify_rx_sigs(void *handle, const void *priv,
+				uint32_t old_sigs, uint32_t new_sigs)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * glink_dummy_rx_abort() - Dummy rx abort callback
+ *
+ * handle:	Channel handle (ignored)
+ * priv:	Private data pointer (ignored)
+ * pkt_priv:	Private intent data pointer (ignored)
+ *
+ * Dummy rx abort callback if client does not implement the optional callback
+ * function.
+ */
+static void glink_dummy_notify_rx_abort(void *handle, const void *priv,
+				const void *pkt_priv)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * glink_dummy_tx_abort() - Dummy tx abort callback
+ *
+ * @handle:	Channel handle (ignored)
+ * @priv:	Private data pointer (ignored)
+ * @pkt_priv:	Private intent data pointer (ignored)
+ *
+ * Dummy tx abort callback if client does not implement the optional callback
+ * function.
+ */
+static void glink_dummy_notify_tx_abort(void *handle, const void *priv,
+				const void *pkt_priv)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * dummy_poll() - a dummy poll() for transports that don't define one
+ * @if_ptr:	The transport interface handle for this transport.
+ * @lcid:	The channel to poll.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_reuse_rx_intent() - a dummy reuse_rx_intent() for transports that
+ *			     don't define one
+ * @if_ptr:	The transport interface handle for this transport.
+ * @intent:	The intent to reuse.
+ *
+ * Return: Success.
+ */
+static int dummy_reuse_rx_intent(struct glink_transport_if *if_ptr,
+				 struct glink_core_rx_intent *intent)
+{
+	return 0;
+}
+
+/**
+ * dummy_mask_rx_irq() - a dummy mask_rx_irq() for transports that don't define
+ *			 one
+ * @if_ptr:	The transport interface handle for this transport.
+ * @lcid:	The local channel id for this channel.
+ * @mask:	True to mask the irq, false to unmask.
+ * @pstruct:	Platform defined structure with data necessary for masking.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+			     bool mask, void *pstruct)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_wait_link_down() - a dummy wait_link_down() for transports that don't
+ *			define one
+ * @if_ptr:	The transport interface handle for this transport.
+ *
+ * Return: An error to indicate that this operation is unsupported.
+ */
+static int dummy_wait_link_down(struct glink_transport_if *if_ptr)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_allocate_rx_intent() - a dummy RX intent allocation function that does
+ *				not allocate anything
+ * @if_ptr:	The transport the intent is associated with.
+ * @size:	Size of intent.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Return:	Success.
+ */
+static int dummy_allocate_rx_intent(struct glink_transport_if *if_ptr,
+			size_t size, struct glink_core_rx_intent *intent)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_tracer_pkt() - a dummy tracer packet tx cmd for transports
+ *                             that don't define one
+ * @if_ptr:	The transport interface handle for this transport.
+ * @lcid:	The channel in which the tracer packet is transmitted.
+ * @pctx:	Context of the packet to be transmitted.
+ *
+ * Return: 0.
+ */
+static int dummy_tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr,
+		uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+	pctx->size_remaining = 0;
+	return 0;
+}
+
+/**
+ * dummy_deallocate_rx_intent() - a dummy rx intent deallocation function that
+ *				does not deallocate anything
+ * @if_ptr:	The transport the intent is associated with.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Return:	Success.
+ */
+static int dummy_deallocate_rx_intent(struct glink_transport_if *if_ptr,
+				struct glink_core_rx_intent *intent)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_local_rx_intent() - dummy local rx intent request
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The intent size to encode.
+ * @liid:	The local intent id to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+				uint32_t lcid, size_t size, uint32_t liid)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_local_rx_done() - dummy rx done command
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @liid:	The local intent id to encode.
+ * @reuse:	Reuse the consumed intent.
+ */
+static void dummy_tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+				uint32_t lcid, uint32_t liid, bool reuse)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * dummy_tx() - dummy tx() that does not send anything
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @pctx:	The data to encode.
+ *
+ * Return: Number of bytes written i.e. zero.
+ */
+static int dummy_tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+				struct glink_core_tx_pkt *pctx)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_rx_intent_req() - dummy rx intent request functon
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The requested intent size to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+				uint32_t lcid, size_t size)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_rx_intent_req_ack() - dummy rx intent request ack
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @granted:	The request response to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_remote_rx_intent_req_ack(
+					struct glink_transport_if *if_ptr,
+					uint32_t lcid, bool granted)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_set_sigs() - dummy signals ack transmit function
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @sigs:	The signals to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_set_sigs(struct glink_transport_if *if_ptr,
+				uint32_t lcid, uint32_t sigs)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_ch_close() - dummy channel close transmit function
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ *
+ * Return:	Success.
+ */
+static int dummy_tx_cmd_ch_close(struct glink_transport_if *if_ptr,
+				uint32_t lcid)
+{
+	return 0;
+}
+
+/**
+ * dummy_tx_cmd_ch_remote_close_ack() - dummy channel close ack sending function
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ */
+static void dummy_tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+				       uint32_t rcid)
+{
+	/* intentionally left blank */
+}
+
+/**
+ * dummy_get_power_vote_ramp_time() - Dummy Power vote ramp time
+ * @if_ptr:	The transport to transmit on.
+ * @state:	The power state being requested from the transport.
+ */
+static unsigned long dummy_get_power_vote_ramp_time(
+		struct glink_transport_if *if_ptr, uint32_t state)
+{
+	return (unsigned long)-EOPNOTSUPP;
+}
+
+/**
+ * dummy_power_vote() - Dummy Power vote operation
+ * @if_ptr:	The transport to transmit on.
+ * @state:	The power state being requested from the transport.
+ */
+static int dummy_power_vote(struct glink_transport_if *if_ptr,
+			    uint32_t state)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * dummy_power_unvote() - Dummy Power unvote operation
+ * @if_ptr:	The transport to transmit on.
+ */
+static int dummy_power_unvote(struct glink_transport_if *if_ptr)
+{
+	return -EOPNOTSUPP;
+}
+
+/**
+ * notif_if_up_all_xprts() - Check and notify existing transport state if up
+ * @notif_info:	Data structure containing transport information to be notified.
+ *
+ * This function is called when the client registers a notifier to know about
+ * the state of a transport. This function matches the existing transports with
+ * the transport in the "notif_info" parameter. When a matching transport is
+ * found, the callback function in the "notif_info" parameter is called with
+ * the state of the matching transport.
+ *
+ * If an edge or transport is not defined, then all edges and/or transports
+ * will be matched and will receive up notifications.
+ */
+static void notif_if_up_all_xprts(
+		struct link_state_notifier_info *notif_info)
+{
+	struct glink_core_xprt_ctx *xprt_ptr;
+	struct glink_link_state_cb_info cb_info;
+
+	cb_info.link_state = GLINK_LINK_STATE_UP;
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt_ptr, &transport_list, list_node) {
+		if (strlen(notif_info->edge) &&
+		    strcmp(notif_info->edge, xprt_ptr->edge))
+			continue;
+
+		if (strlen(notif_info->transport) &&
+		    strcmp(notif_info->transport, xprt_ptr->name))
+			continue;
+
+		if (!xprt_is_fully_opened(xprt_ptr))
+			continue;
+
+		cb_info.transport = xprt_ptr->name;
+		cb_info.edge = xprt_ptr->edge;
+		notif_info->glink_link_state_notif_cb(&cb_info,
+						notif_info->priv);
+	}
+	mutex_unlock(&transport_list_lock_lha0);
+}
+
+/**
+ * check_link_notifier_and_notify() - Check and notify clients about link state
+ * @xprt_ptr:	Transport whose state to be notified.
+ * @link_state:	State of the transport to be notified.
+ *
+ * This function is called when the state of the transport changes. This
+ * function matches the transport with the clients that have registered to
+ * be notified about the state changes. When a matching client notifier is
+ * found, the callback function in the client notifier is called with the
+ * new state of the transport.
+ */
+static void check_link_notifier_and_notify(struct glink_core_xprt_ctx *xprt_ptr,
+					   enum glink_link_state link_state)
+{
+	struct link_state_notifier_info *notif_info;
+	struct glink_link_state_cb_info cb_info;
+
+	cb_info.link_state = link_state;
+	mutex_lock(&link_state_notifier_lock_lha1);
+	list_for_each_entry(notif_info, &link_state_notifier_list, list) {
+		if (strlen(notif_info->edge) &&
+		    strcmp(notif_info->edge, xprt_ptr->edge))
+			continue;
+
+		if (strlen(notif_info->transport) &&
+		    strcmp(notif_info->transport, xprt_ptr->name))
+			continue;
+
+		cb_info.transport = xprt_ptr->name;
+		cb_info.edge = xprt_ptr->edge;
+		notif_info->glink_link_state_notif_cb(&cb_info,
+						notif_info->priv);
+	}
+	mutex_unlock(&link_state_notifier_lock_lha1);
+}
+
+/**
+ * Open GLINK channel.
+ *
+ * @cfg_ptr:	Open configuration structure (the structure is copied before
+ *		glink_open returns).  All unused fields should be zero-filled.
+ *
+ * This should not be called from link state callback context by clients.
+ * It is recommended that client should invoke this function from their own
+ * thread.
+ *
+ * Return:  Pointer to channel on success, PTR_ERR() with standard Linux
+ * error code on failure.
+ */
+void *glink_open(const struct glink_open_config *cfg)
+{
+	struct channel_ctx *ctx = NULL;
+	struct glink_core_xprt_ctx *transport_ptr;
+	size_t len;
+	int ret;
+	uint16_t best_id;
+
+	if (!cfg->edge || !cfg->name) {
+		GLINK_ERR("%s: !cfg->edge || !cfg->name\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	len = strlen(cfg->edge);
+	if (len == 0 || len >= GLINK_NAME_SIZE) {
+		GLINK_ERR("%s: [EDGE] len == 0 || len >= GLINK_NAME_SIZE\n",
+				__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	len = strlen(cfg->name);
+	if (len == 0 || len >= GLINK_NAME_SIZE) {
+		GLINK_ERR("%s: [NAME] len == 0 || len >= GLINK_NAME_SIZE\n",
+				__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	if (cfg->transport) {
+		len = strlen(cfg->transport);
+		if (len == 0 || len >= GLINK_NAME_SIZE) {
+			GLINK_ERR("%s: [TRANSPORT] len == 0 || %s\n",
+				__func__,
+				"len >= GLINK_NAME_SIZE");
+			return ERR_PTR(-EINVAL);
+		}
+	}
+
+	/* confirm required notification parameters */
+	if (!(cfg->notify_rx || cfg->notify_rxv) || !cfg->notify_tx_done
+		|| !cfg->notify_state
+		|| ((cfg->options & GLINK_OPT_RX_INTENT_NOTIF)
+			&& !cfg->notify_remote_rx_intent)) {
+		GLINK_ERR("%s: Incorrect notification parameters\n", __func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* find transport */
+	transport_ptr = find_open_transport(cfg->edge, cfg->transport,
+					cfg->options & GLINK_OPT_INITIAL_XPORT,
+					&best_id);
+	if (IS_ERR_OR_NULL(transport_ptr)) {
+		GLINK_ERR("%s:%s %s: Error %d - unable to find transport\n",
+				cfg->transport, cfg->edge, __func__,
+				(unsigned int)PTR_ERR(transport_ptr));
+		return ERR_PTR(-ENODEV);
+	}
+
+	/*
+	 * look for an existing port structure which can occur in
+	 * reopen and remote-open-first cases
+	 */
+	ctx = ch_name_to_ch_ctx_create(transport_ptr, cfg->name);
+	if (ctx == NULL) {
+		GLINK_ERR("%s:%s %s: Error - unable to allocate new channel\n",
+				cfg->transport, cfg->edge, __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* port already exists */
+	if (ctx->local_open_state != GLINK_CHANNEL_CLOSED) {
+		/* not ready to be re-opened */
+		GLINK_INFO_CH_XPRT(ctx, transport_ptr,
+		"%s: Channel not ready to be re-opened. State: %u\n",
+		__func__, ctx->local_open_state);
+		return ERR_PTR(-EBUSY);
+	}
+
+	/* initialize port structure */
+	ctx->user_priv = cfg->priv;
+	ctx->rx_intent_req_timeout_jiffies =
+		msecs_to_jiffies(cfg->rx_intent_req_timeout_ms);
+	ctx->notify_rx = cfg->notify_rx;
+	ctx->notify_tx_done = cfg->notify_tx_done;
+	ctx->notify_state = cfg->notify_state;
+	ctx->notify_rx_intent_req = cfg->notify_rx_intent_req;
+	ctx->notify_rxv = cfg->notify_rxv;
+	ctx->notify_rx_sigs = cfg->notify_rx_sigs;
+	ctx->notify_rx_abort = cfg->notify_rx_abort;
+	ctx->notify_tx_abort = cfg->notify_tx_abort;
+	ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
+	ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
+
+	if (!ctx->notify_rx_intent_req)
+		ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
+	if (!ctx->notify_rx_sigs)
+		ctx->notify_rx_sigs = glink_dummy_notify_rx_sigs;
+	if (!ctx->notify_rx_abort)
+		ctx->notify_rx_abort = glink_dummy_notify_rx_abort;
+	if (!ctx->notify_tx_abort)
+		ctx->notify_tx_abort = glink_dummy_notify_tx_abort;
+
+	if (!ctx->rx_intent_req_timeout_jiffies)
+		ctx->rx_intent_req_timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
+
+	ctx->local_xprt_req = best_id;
+	ctx->no_migrate = cfg->transport &&
+				!(cfg->options & GLINK_OPT_INITIAL_XPORT);
+	ctx->transport_ptr = transport_ptr;
+	ctx->local_open_state = GLINK_CHANNEL_OPENING;
+	GLINK_INFO_PERF_CH(ctx,
+		"%s: local:GLINK_CHANNEL_CLOSED->GLINK_CHANNEL_OPENING\n",
+		__func__);
+
+	/* start local-open sequence */
+	ret = ctx->transport_ptr->ops->tx_cmd_ch_open(ctx->transport_ptr->ops,
+		ctx->lcid, cfg->name, best_id);
+	if (ret) {
+		/* failure to send open command (transport failure) */
+		ctx->local_open_state = GLINK_CHANNEL_CLOSED;
+		GLINK_ERR_CH(ctx, "%s: Unable to send open command %d\n",
+			__func__, ret);
+		return ERR_PTR(ret);
+	}
+
+	GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
+			__func__, ctx);
+
+	return ctx;
+}
+EXPORT_SYMBOL(glink_open);
+
+/**
+ * glink_get_channel_id_for_handle() - Get logical channel ID
+ *
+ * @handle:	handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return:  Logical Channel ID or standard Linux error code
+ */
+int glink_get_channel_id_for_handle(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (ctx == NULL)
+		return -EINVAL;
+
+	return ctx->lcid;
+}
+EXPORT_SYMBOL(glink_get_channel_id_for_handle);
+
+/**
+ * glink_get_channel_name_for_handle() - return channel name
+ *
+ * @handle:	handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return:  Channel name or NULL
+ */
+char *glink_get_channel_name_for_handle(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (ctx == NULL)
+		return NULL;
+
+	return ctx->name;
+}
+EXPORT_SYMBOL(glink_get_channel_name_for_handle);
+
+/**
+ * glink_delete_ch_from_list() -  delete the channel from the list
+ * @ctx:	Pointer to channel context.
+ * @add_flcid:	Boolean value to decide whether the lcid should be added or not.
+ *
+ * This function deletes the channel from the list along with the debugfs
+ * information associated with it. It also adds the channel lcid to the free
+ * lcid list except if the channel is deleted in case of ssr/unregister case.
+ * It can only called when channel is fully closed.
+ *
+ * Return: true when transport_ptr->channels is empty.
+ */
+static bool glink_delete_ch_from_list(struct channel_ctx *ctx, bool add_flcid)
+{
+	unsigned long flags;
+	bool ret = false;
+
+	spin_lock_irqsave(&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+				flags);
+	if (!list_empty(&ctx->port_list_node))
+		list_del_init(&ctx->port_list_node);
+	if (list_empty(&ctx->transport_ptr->channels) &&
+			list_empty(&ctx->transport_ptr->notified))
+		ret = true;
+	spin_unlock_irqrestore(
+			&ctx->transport_ptr->xprt_ctx_lock_lhb1,
+			flags);
+	if (add_flcid)
+		glink_add_free_lcid_list(ctx);
+	mutex_lock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
+	glink_debugfs_remove_channel(ctx, ctx->transport_ptr);
+	mutex_unlock(&ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
+	rwref_put(&ctx->ch_state_lhb2);
+	return ret;
+}
+
+/**
+ * glink_close() - Close a previously opened channel.
+ *
+ * @handle:	handle to close
+ *
+ * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED
+ * state event will be sent and the channel can be reopened.
+ *
+ * Return:  0 on success; -EINVAL for invalid handle, -EBUSY is close is
+ * already in progress, standard Linux Error code otherwise.
+ */
+int glink_close(void *handle)
+{
+	struct glink_core_xprt_ctx *xprt_ctx = NULL;
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret = 0;
+	unsigned long flags;
+	bool is_empty = false;
+
+	if (!ctx)
+		return -EINVAL;
+
+	GLINK_INFO_CH(ctx, "%s: Closing channel, ctx: %p\n", __func__, ctx);
+	if (ctx->local_open_state == GLINK_CHANNEL_CLOSED)
+		return 0;
+
+	if (ctx->local_open_state == GLINK_CHANNEL_CLOSING) {
+		/* close already pending */
+		return -EBUSY;
+	}
+
+	rwref_get(&ctx->ch_state_lhb2);
+relock: xprt_ctx = ctx->transport_ptr;
+	rwref_read_get(&xprt_ctx->xprt_state_lhb0);
+	rwref_write_get(&ctx->ch_state_lhb2);
+	if (xprt_ctx != ctx->transport_ptr) {
+		rwref_write_put(&ctx->ch_state_lhb2);
+		rwref_read_put(&xprt_ctx->xprt_state_lhb0);
+		goto relock;
+	}
+
+	/* Set the channel state before removing it from xprt's list(s) */
+	GLINK_INFO_PERF_CH(ctx,
+		"%s: local:%u->GLINK_CHANNEL_CLOSING\n",
+		__func__, ctx->local_open_state);
+	ctx->local_open_state = GLINK_CHANNEL_CLOSING;
+
+	ctx->pending_delete = true;
+	ctx->int_req_ack = false;
+
+	spin_lock_irqsave(&xprt_ctx->tx_ready_lock_lhb3, flags);
+	if (!list_empty(&ctx->tx_ready_list_node))
+		list_del_init(&ctx->tx_ready_list_node);
+	spin_unlock_irqrestore(&xprt_ctx->tx_ready_lock_lhb3, flags);
+
+	if (xprt_ctx->local_state != GLINK_XPRT_DOWN) {
+		glink_qos_reset_priority(ctx);
+		ret = xprt_ctx->ops->tx_cmd_ch_close(xprt_ctx->ops, ctx->lcid);
+		rwref_write_put(&ctx->ch_state_lhb2);
+	} else if (!strcmp(xprt_ctx->name, "dummy")) {
+		/*
+		 * This check will avoid any race condition when clients call
+		 * glink_close before the dummy xprt swapping happens in link
+		 * down scenario.
+		 */
+		ret = 0;
+		rwref_write_put(&ctx->ch_state_lhb2);
+		glink_core_ch_close_ack_common(ctx, false);
+		if (ch_is_fully_closed(ctx)) {
+			is_empty = glink_delete_ch_from_list(ctx, false);
+			rwref_put(&xprt_ctx->xprt_state_lhb0);
+			if (is_empty && !xprt_ctx->dummy_in_use)
+				/* For the xprt reference */
+				rwref_put(&xprt_ctx->xprt_state_lhb0);
+		} else {
+			GLINK_ERR_CH(ctx,
+			"channel Not closed yet local state [%d] remote_state [%d]\n",
+			ctx->local_open_state, ctx->remote_opened);
+		}
+	} else {
+		/*
+		 * This case handles the scenario where glink_core_link_down
+		 * changes the local_state to GLINK_XPRT_DOWN but glink_close
+		 * gets the channel write lock before glink_core_channel_cleanup
+		 */
+		rwref_write_put(&ctx->ch_state_lhb2);
+	}
+	complete_all(&ctx->int_req_ack_complete);
+	complete_all(&ctx->int_req_complete);
+
+	rwref_put(&ctx->ch_state_lhb2);
+	rwref_read_put(&xprt_ctx->xprt_state_lhb0);
+	return ret;
+}
+EXPORT_SYMBOL(glink_close);
+
+/**
+ * glink_tx_pkt_release() - Release a packet's transmit information
+ * @tx_pkt_ref:	Packet information which needs to be released.
+ *
+ * This function is called when all the references to a packet information
+ * is dropped.
+ */
+static void glink_tx_pkt_release(struct rwref_lock *tx_pkt_ref)
+{
+	struct glink_core_tx_pkt *tx_info = container_of(tx_pkt_ref,
+						struct glink_core_tx_pkt,
+						pkt_ref);
+	if (!list_empty(&tx_info->list_done))
+		list_del_init(&tx_info->list_done);
+	if (!list_empty(&tx_info->list_node))
+		list_del_init(&tx_info->list_node);
+	kfree(tx_info);
+}
+
+/**
+ * glink_tx_common() - Common TX implementation
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @data:	pointer to the data
+ * @size:	size of data
+ * @vbuf_provider: Virtual Address-space Buffer Provider for the tx buffer.
+ * @vbuf_provider: Physical Address-space Buffer Provider for the tx buffer.
+ * @tx_flags:	Flags to indicate transmit options
+ *
+ * Return:	-EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *		transmit operation (not fully opened); -EAGAIN if remote side
+ *		has not provided a receive intent that is big enough.
+ */
+static int glink_tx_common(void *handle, void *pkt_priv,
+	void *data, void *iovec, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+	uint32_t tx_flags)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	uint32_t riid;
+	int ret = 0;
+	struct glink_core_tx_pkt *tx_info;
+	size_t intent_size;
+	bool is_atomic =
+		tx_flags & (GLINK_TX_SINGLE_THREADED | GLINK_TX_ATOMIC);
+	unsigned long flags;
+	void *cookie = NULL;
+
+	if (!size)
+		return -EINVAL;
+
+	if (!ctx)
+		return -EINVAL;
+
+	rwref_read_get_atomic(&ctx->ch_state_lhb2, is_atomic);
+	if (!(vbuf_provider || pbuf_provider)) {
+		rwref_read_put(&ctx->ch_state_lhb2);
+		return -EINVAL;
+	}
+
+	if (!ch_is_fully_opened(ctx)) {
+		rwref_read_put(&ctx->ch_state_lhb2);
+		return -EBUSY;
+	}
+
+	if (size > GLINK_MAX_PKT_SIZE) {
+		rwref_read_put(&ctx->ch_state_lhb2);
+		return -EINVAL;
+	}
+
+	if (unlikely(tx_flags & GLINK_TX_TRACER_PKT)) {
+		if (!(ctx->transport_ptr->capabilities & GCAP_TRACER_PKT)) {
+			rwref_read_put(&ctx->ch_state_lhb2);
+			return -EOPNOTSUPP;
+		}
+		tracer_pkt_log_event(data, GLINK_CORE_TX);
+	}
+
+	/* find matching rx intent (first-fit algorithm for now) */
+	if (ch_pop_remote_rx_intent(ctx, size, &riid, &intent_size, &cookie)) {
+		if (!(tx_flags & GLINK_TX_REQ_INTENT)) {
+			/* no rx intent available */
+			GLINK_ERR_CH(ctx,
+				"%s: R[%u]:%zu Intent not present for lcid\n",
+				__func__, riid, size);
+			rwref_read_put(&ctx->ch_state_lhb2);
+			return -EAGAIN;
+		}
+		if (is_atomic && !(ctx->transport_ptr->capabilities &
+					  GCAP_AUTO_QUEUE_RX_INT)) {
+			GLINK_ERR_CH(ctx,
+				"%s: Cannot request intent in atomic context\n",
+				__func__);
+			rwref_read_put(&ctx->ch_state_lhb2);
+			return -EINVAL;
+		}
+
+		/* request intent of correct size */
+		reinit_completion(&ctx->int_req_ack_complete);
+		ret = ctx->transport_ptr->ops->tx_cmd_rx_intent_req(
+				ctx->transport_ptr->ops, ctx->lcid, size);
+		if (ret) {
+			GLINK_ERR_CH(ctx, "%s: Request intent failed %d\n",
+					__func__, ret);
+			rwref_read_put(&ctx->ch_state_lhb2);
+			return ret;
+		}
+
+		while (ch_pop_remote_rx_intent(ctx, size, &riid,
+						&intent_size, &cookie)) {
+			rwref_get(&ctx->ch_state_lhb2);
+			rwref_read_put(&ctx->ch_state_lhb2);
+			if (is_atomic) {
+				GLINK_ERR_CH(ctx,
+				    "%s Intent of size %zu not ready\n",
+				    __func__, size);
+				rwref_put(&ctx->ch_state_lhb2);
+				return -EAGAIN;
+			}
+
+			if (ctx->transport_ptr->local_state == GLINK_XPRT_DOWN
+			    || !ch_is_fully_opened(ctx)) {
+				GLINK_ERR_CH(ctx,
+					"%s: Channel closed while waiting for intent\n",
+					__func__);
+				rwref_put(&ctx->ch_state_lhb2);
+				return -EBUSY;
+			}
+
+			/* wait for the remote intent req ack */
+			if (!wait_for_completion_timeout(
+					&ctx->int_req_ack_complete,
+					ctx->rx_intent_req_timeout_jiffies)) {
+				GLINK_ERR_CH(ctx,
+					"%s: Intent request ack with size: %zu not granted for lcid\n",
+					__func__, size);
+				rwref_put(&ctx->ch_state_lhb2);
+				return -ETIMEDOUT;
+			}
+
+			if (!ctx->int_req_ack) {
+				GLINK_ERR_CH(ctx,
+				    "%s: Intent Request with size: %zu %s",
+				    __func__, size,
+				    "not granted for lcid\n");
+				rwref_put(&ctx->ch_state_lhb2);
+				return -EAGAIN;
+			}
+
+			/* wait for the rx_intent from remote side */
+			if (!wait_for_completion_timeout(
+					&ctx->int_req_complete,
+					ctx->rx_intent_req_timeout_jiffies)) {
+				GLINK_ERR_CH(ctx,
+					"%s: Intent request with size: %zu not granted for lcid\n",
+					__func__, size);
+				rwref_put(&ctx->ch_state_lhb2);
+				return -ETIMEDOUT;
+			}
+
+			reinit_completion(&ctx->int_req_complete);
+			rwref_read_get(&ctx->ch_state_lhb2);
+			rwref_put(&ctx->ch_state_lhb2);
+		}
+	}
+
+	if (!is_atomic) {
+		spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3,
+				  flags);
+		glink_pm_qos_vote(ctx->transport_ptr);
+		spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3,
+					flags);
+	}
+
+	GLINK_INFO_PERF_CH(ctx, "%s: R[%u]:%zu data[%p], size[%zu]. TID %u\n",
+			__func__, riid, intent_size,
+			data ? data : iovec, size, current->pid);
+	tx_info = kzalloc(sizeof(struct glink_core_tx_pkt),
+				is_atomic ? GFP_ATOMIC : GFP_KERNEL);
+	if (!tx_info) {
+		GLINK_ERR_CH(ctx, "%s: No memory for allocation\n", __func__);
+		ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
+		rwref_read_put(&ctx->ch_state_lhb2);
+		return -ENOMEM;
+	}
+	rwref_lock_init(&tx_info->pkt_ref, glink_tx_pkt_release);
+	INIT_LIST_HEAD(&tx_info->list_done);
+	INIT_LIST_HEAD(&tx_info->list_node);
+	tx_info->pkt_priv = pkt_priv;
+	tx_info->data = data;
+	tx_info->riid = riid;
+	tx_info->rcid = ctx->rcid;
+	tx_info->size = size;
+	tx_info->size_remaining = size;
+	tx_info->tracer_pkt = tx_flags & GLINK_TX_TRACER_PKT ? true : false;
+	tx_info->iovec = iovec ? iovec : (void *)tx_info;
+	tx_info->vprovider = vbuf_provider;
+	tx_info->pprovider = pbuf_provider;
+	tx_info->intent_size = intent_size;
+	tx_info->cookie = cookie;
+
+	/* schedule packet for transmit */
+	if ((tx_flags & GLINK_TX_SINGLE_THREADED) &&
+	    (ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+		ret = xprt_single_threaded_tx(ctx->transport_ptr,
+					       ctx, tx_info);
+	else
+		xprt_schedule_tx(ctx->transport_ptr, ctx, tx_info);
+
+	rwref_read_put(&ctx->ch_state_lhb2);
+	return ret;
+}
+
+/**
+ * glink_tx() - Transmit packet.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @data:	pointer to the data
+ * @size:	size of data
+ * @tx_flags:	Flags to specify transmit specific options
+ *
+ * Return:	-EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *		transmit operation (not fully opened); -EAGAIN if remote side
+ *		has not provided a receive intent that is big enough.
+ */
+int glink_tx(void *handle, void *pkt_priv, void *data, size_t size,
+							uint32_t tx_flags)
+{
+	return glink_tx_common(handle, pkt_priv, data, NULL, size,
+			       tx_linear_vbuf_provider, NULL, tx_flags);
+}
+EXPORT_SYMBOL(glink_tx);
+
+/**
+ * glink_queue_rx_intent() - Register an intent to receive data.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data type that is returned when a packet is received
+ * size:	maximum size of data to receive
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	struct glink_core_rx_intent *intent_ptr;
+	int ret = 0;
+
+	if (!ctx)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx)) {
+		/* Can only queue rx intents if channel is fully opened */
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	intent_ptr = ch_push_local_rx_intent(ctx, pkt_priv, size);
+	if (!intent_ptr) {
+		GLINK_ERR_CH(ctx,
+			"%s: Intent pointer allocation failed size[%zu]\n",
+			__func__, size);
+		return -ENOMEM;
+	}
+	GLINK_DBG_CH(ctx, "%s: L[%u]:%zu\n", __func__, intent_ptr->id,
+			intent_ptr->intent_size);
+
+	if (ctx->transport_ptr->capabilities & GCAP_INTENTLESS)
+		return ret;
+
+	/* notify remote side of rx intent */
+	ret = ctx->transport_ptr->ops->tx_cmd_local_rx_intent(
+		ctx->transport_ptr->ops, ctx->lcid, size, intent_ptr->id);
+	if (ret)
+		/* unable to transmit, dequeue intent */
+		ch_remove_local_rx_intent(ctx, intent_ptr->id);
+
+	return ret;
+}
+EXPORT_SYMBOL(glink_queue_rx_intent);
+
+/**
+ * glink_rx_intent_exists() - Check if an intent exists.
+ *
+ * @handle:	handle returned by glink_open()
+ * @size:	size of an intent to check or 0 for any intent
+ *
+ * Return:	TRUE if an intent exists with greater than or equal to the size
+ *		else FALSE
+ */
+bool glink_rx_intent_exists(void *handle, size_t size)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	struct glink_core_rx_intent *intent;
+	unsigned long flags;
+
+	if (!ctx || !ch_is_fully_opened(ctx))
+		return false;
+
+	spin_lock_irqsave(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+	list_for_each_entry(intent, &ctx->local_rx_intent_list, list) {
+		if (size <= intent->intent_size) {
+			spin_unlock_irqrestore(
+				&ctx->local_rx_intent_lst_lock_lhc1, flags);
+			return true;
+		}
+	}
+	spin_unlock_irqrestore(&ctx->local_rx_intent_lst_lock_lhc1, flags);
+
+	return false;
+}
+EXPORT_SYMBOL(glink_rx_intent_exists);
+
+/**
+ * glink_rx_done() - Return receive buffer to remote side.
+ *
+ * @handle:	handle returned by glink_open()
+ * @ptr:	data pointer provided in the notify_rx() call
+ * @reuse:	if true, receive intent is re-used
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_rx_done(void *handle, const void *ptr, bool reuse)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	struct glink_core_rx_intent *liid_ptr;
+	uint32_t id;
+	int ret = 0;
+
+	liid_ptr = ch_get_local_rx_intent_notified(ctx, ptr);
+
+	if (IS_ERR_OR_NULL(liid_ptr)) {
+		/* invalid pointer */
+		GLINK_ERR_CH(ctx, "%s: Invalid pointer %p\n", __func__, ptr);
+		return -EINVAL;
+	}
+
+	GLINK_INFO_PERF_CH(ctx, "%s: L[%u]: data[%p]. TID %u\n",
+			__func__, liid_ptr->id, ptr, current->pid);
+	id = liid_ptr->id;
+	if (reuse) {
+		ret = ctx->transport_ptr->ops->reuse_rx_intent(
+					ctx->transport_ptr->ops, liid_ptr);
+		if (ret) {
+			GLINK_ERR_CH(ctx, "%s: Intent reuse err %d for %p\n",
+					__func__, ret, ptr);
+			ret = -ENOBUFS;
+			reuse = false;
+			ctx->transport_ptr->ops->deallocate_rx_intent(
+					ctx->transport_ptr->ops, liid_ptr);
+		}
+	} else {
+		ctx->transport_ptr->ops->deallocate_rx_intent(
+					ctx->transport_ptr->ops, liid_ptr);
+	}
+	ch_remove_local_rx_intent_notified(ctx, liid_ptr, reuse);
+	/* send rx done */
+	ctx->transport_ptr->ops->tx_cmd_local_rx_done(ctx->transport_ptr->ops,
+			ctx->lcid, id, reuse);
+
+	return ret;
+}
+EXPORT_SYMBOL(glink_rx_done);
+
+/**
+ * glink_txv() - Transmit a packet in vector form.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @iovec:	pointer to the vector (must remain valid until notify_tx_done
+ *		notification)
+ * @size:	size of data/vector
+ * @vbuf_provider: Client provided helper function to iterate the vector
+ *		in physical address space
+ * @pbuf_provider: Client provided helper function to iterate the vector
+ *		in virtual address space
+ * @tx_flags:	Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *           transmit operation (not fully opened); -EAGAIN if remote side has
+ *           not provided a receive intent that is big enough.
+ */
+int glink_txv(void *handle, void *pkt_priv,
+	void *iovec, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+	uint32_t tx_flags)
+{
+	return glink_tx_common(handle, pkt_priv, NULL, iovec, size,
+			vbuf_provider, pbuf_provider, tx_flags);
+}
+EXPORT_SYMBOL(glink_txv);
+
+/**
+ * glink_sigs_set() - Set the local signals for the GLINK channel
+ *
+ * @handle:	handle returned by glink_open()
+ * @sigs:	modified signal value
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_set(void *handle, uint32_t sigs)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	if (!ctx)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	ctx->lsigs = sigs;
+
+	ret = ctx->transport_ptr->ops->tx_cmd_set_sigs(ctx->transport_ptr->ops,
+			ctx->lcid, ctx->lsigs);
+	GLINK_INFO_CH(ctx, "%s: Sent SIGNAL SET command\n", __func__);
+
+	return ret;
+}
+EXPORT_SYMBOL(glink_sigs_set);
+
+/**
+ * glink_sigs_local_get() - Get the local signals for the GLINK channel
+ *
+ * handle:	handle returned by glink_open()
+ * sigs:	Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_local_get(void *handle, uint32_t *sigs)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (!ctx || !sigs)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	*sigs = ctx->lsigs;
+	return 0;
+}
+EXPORT_SYMBOL(glink_sigs_local_get);
+
+/**
+ * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel
+ *
+ * handle:	handle returned by glink_open()
+ * sigs:	Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_remote_get(void *handle, uint32_t *sigs)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (!ctx || !sigs)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	*sigs = ctx->rsigs;
+	return 0;
+}
+EXPORT_SYMBOL(glink_sigs_remote_get);
+
+/**
+ * glink_register_link_state_cb() - Register for link state notification
+ * @link_info:	Data structure containing the link identification and callback.
+ * @priv:	Private information to be passed with the callback.
+ *
+ * This function is used to register a notifier to receive the updates about a
+ * link's/transport's state. This notifier needs to be registered first before
+ * an attempt to open a channel.
+ *
+ * Return: a reference to the notifier handle.
+ */
+void *glink_register_link_state_cb(struct glink_link_info *link_info,
+				   void *priv)
+{
+	struct link_state_notifier_info *notif_info;
+
+	if (!link_info || !link_info->glink_link_state_notif_cb)
+		return ERR_PTR(-EINVAL);
+
+	notif_info = kzalloc(sizeof(*notif_info), GFP_KERNEL);
+	if (!notif_info) {
+		GLINK_ERR("%s: Error allocating link state notifier info\n",
+			  __func__);
+		return ERR_PTR(-ENOMEM);
+	}
+	if (link_info->transport)
+		strlcpy(notif_info->transport, link_info->transport,
+			GLINK_NAME_SIZE);
+
+	if (link_info->edge)
+		strlcpy(notif_info->edge, link_info->edge, GLINK_NAME_SIZE);
+	notif_info->priv = priv;
+	notif_info->glink_link_state_notif_cb =
+				link_info->glink_link_state_notif_cb;
+
+	mutex_lock(&link_state_notifier_lock_lha1);
+	list_add_tail(&notif_info->list, &link_state_notifier_list);
+	mutex_unlock(&link_state_notifier_lock_lha1);
+
+	notif_if_up_all_xprts(notif_info);
+	return notif_info;
+}
+EXPORT_SYMBOL(glink_register_link_state_cb);
+
+/**
+ * glink_unregister_link_state_cb() - Unregister the link state notification
+ * notif_handle:	Handle to be unregistered.
+ *
+ * This function is used to unregister a notifier to stop receiving the updates
+ * about a link's/ transport's state.
+ */
+void glink_unregister_link_state_cb(void *notif_handle)
+{
+	struct link_state_notifier_info *notif_info, *tmp_notif_info;
+
+	if (IS_ERR_OR_NULL(notif_handle))
+		return;
+
+	mutex_lock(&link_state_notifier_lock_lha1);
+	list_for_each_entry_safe(notif_info, tmp_notif_info,
+				 &link_state_notifier_list, list) {
+		if (notif_info == notif_handle) {
+			list_del(&notif_info->list);
+			mutex_unlock(&link_state_notifier_lock_lha1);
+			kfree(notif_info);
+			return;
+		}
+	}
+	mutex_unlock(&link_state_notifier_lock_lha1);
+}
+EXPORT_SYMBOL(glink_unregister_link_state_cb);
+
+/**
+ * glink_qos_latency() - Register the latency QoS requirement
+ * @handle:	Channel handle in which the latency is required.
+ * @latency_us:	Latency requirement in units of micro-seconds.
+ * @pkt_size:	Worst case packet size for which the latency is required.
+ *
+ * This function is used to register the latency requirement for a channel
+ * and ensures that the latency requirement for this channel is met without
+ * impacting the existing latency requirements of other channels.
+ *
+ * Return: 0 if QoS request is achievable, standard Linux error codes on error
+ */
+int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+	unsigned long req_rate_kBps;
+
+	if (!ctx || !latency_us || !pkt_size)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	req_rate_kBps = glink_qos_calc_rate_kBps(pkt_size, latency_us);
+
+	ret = glink_qos_assign_priority(ctx, req_rate_kBps);
+	if (ret < 0)
+		GLINK_ERR_CH(ctx, "%s: QoS %lu:%zu cannot be met\n",
+			     __func__, latency_us, pkt_size);
+
+	return ret;
+}
+EXPORT_SYMBOL(glink_qos_latency);
+
+/**
+ * glink_qos_cancel() - Cancel or unregister the QoS request
+ * @handle:	Channel handle for which the QoS request is cancelled.
+ *
+ * This function is used to cancel/unregister the QoS requests for a channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_cancel(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+
+	if (!ctx)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	ret = glink_qos_reset_priority(ctx);
+	return ret;
+}
+EXPORT_SYMBOL(glink_qos_cancel);
+
+/**
+ * glink_qos_start() - Start of the transmission requiring QoS
+ * @handle:	Channel handle in which the transmit activity is performed.
+ *
+ * This function is called by the clients to indicate G-Link regarding the
+ * start of the transmission which requires a certain QoS. The clients
+ * must account for the QoS ramp time to ensure meeting the QoS.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_start(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+	int ret;
+	unsigned long flags;
+
+	if (!ctx)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	spin_lock_irqsave(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	spin_lock(&ctx->tx_lists_lock_lhc3);
+	ret = glink_qos_add_ch_tx_intent(ctx);
+	spin_unlock(&ctx->tx_lists_lock_lhc3);
+	spin_unlock_irqrestore(&ctx->transport_ptr->tx_ready_lock_lhb3, flags);
+	return ret;
+}
+EXPORT_SYMBOL(glink_qos_start);
+
+/**
+ * glink_qos_get_ramp_time() - Get the QoS ramp time
+ * @handle:	Channel handle for which the QoS ramp time is required.
+ * @pkt_size:	Worst case packet size.
+ *
+ * This function is called by the clients to obtain the ramp time required
+ * to meet the QoS requirements.
+ *
+ * Return: QoS ramp time is returned in units of micro-seconds on success,
+ *	   standard Linux error codes cast to unsigned long on error.
+ */
+unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (!ctx)
+		return (unsigned long)-EINVAL;
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		return (unsigned long)-EBUSY;
+	}
+
+	return ctx->transport_ptr->ops->get_power_vote_ramp_time(
+			ctx->transport_ptr->ops,
+			glink_prio_to_power_state(ctx->transport_ptr,
+						ctx->initial_priority));
+}
+EXPORT_SYMBOL(glink_qos_get_ramp_time);
+
+/**
+ * glink_rpm_rx_poll() - Poll and receive any available events
+ * @handle:	Channel handle in which this operation is performed.
+ *
+ * This function is used to poll and receive events and packets while the
+ * receive interrupt from RPM is disabled.
+ *
+ * Note that even if a return value > 0 is returned indicating that some events
+ * were processed, clients should only use the notification functions passed
+ * into glink_open() to determine if an entire packet has been received since
+ * some events may be internal details that are not visible to clients.
+ *
+ * Return: 0 for no packets available; > 0 for events available; standard
+ * Linux error codes on failure.
+ */
+int glink_rpm_rx_poll(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (!ctx)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx))
+		return -EBUSY;
+
+	if (!ctx->transport_ptr ||
+	    !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+		return -EOPNOTSUPP;
+
+	return ctx->transport_ptr->ops->poll(ctx->transport_ptr->ops,
+					     ctx->lcid);
+}
+EXPORT_SYMBOL(glink_rpm_rx_poll);
+
+/**
+ * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt
+ * @handle:	Channel handle in which this operation is performed.
+ * @mask:	Flag to mask or unmask the interrupt.
+ * @pstruct:	Pointer to any platform specific data.
+ *
+ * This function is used to mask or unmask the receive interrupt from RPM.
+ * "mask" set to true indicates masking the interrupt and when set to false
+ * indicates unmasking the interrupt.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (!ctx)
+		return -EINVAL;
+
+	if (!ch_is_fully_opened(ctx))
+		return -EBUSY;
+
+	if (!ctx->transport_ptr ||
+	    !(ctx->transport_ptr->capabilities & GCAP_INTENTLESS))
+		return -EOPNOTSUPP;
+
+	return ctx->transport_ptr->ops->mask_rx_irq(ctx->transport_ptr->ops,
+						    ctx->lcid, mask, pstruct);
+
+}
+EXPORT_SYMBOL(glink_rpm_mask_rx_interrupt);
+
+/**
+ * glink_wait_link_down() - Get status of link
+ * @handle:	Channel handle in which this operation is performed
+ *
+ * This function will query the transport for its status, to allow clients to
+ * proceed in cleanup operations.
+ */
+int glink_wait_link_down(void *handle)
+{
+	struct channel_ctx *ctx = (struct channel_ctx *)handle;
+
+	if (!ctx)
+		return -EINVAL;
+	if (!ctx->transport_ptr)
+		return -EOPNOTSUPP;
+
+	return ctx->transport_ptr->ops->wait_link_down(ctx->transport_ptr->ops);
+}
+EXPORT_SYMBOL(glink_wait_link_down);
+
+/**
+ * glink_xprt_ctx_release - Free the transport context
+ * @ch_st_lock:	handle to the rwref_lock associated with the transport
+ *
+ * This should only be called when the reference count associated with the
+ * transport goes to zero.
+ */
+void glink_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
+{
+	struct glink_dbgfs xprt_rm_dbgfs;
+	struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
+				struct glink_core_xprt_ctx, xprt_state_lhb0);
+	GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
+				xprt_ctx->name,
+				xprt_ctx->edge);
+	xprt_rm_dbgfs.curr_name = xprt_ctx->name;
+	xprt_rm_dbgfs.par_name = "xprt";
+	glink_debugfs_remove_recur(&xprt_rm_dbgfs);
+	GLINK_INFO("%s: xprt debugfs removec\n", __func__);
+	rwref_put(&xprt_ctx->edge_ctx->edge_ref_lock_lhd1);
+	kthread_stop(xprt_ctx->tx_task);
+	xprt_ctx->tx_task = NULL;
+	glink_core_deinit_xprt_qos_cfg(xprt_ctx);
+	kfree(xprt_ctx);
+	xprt_ctx = NULL;
+}
+
+/**
+ * glink_dummy_xprt_ctx_release - free the dummy transport context
+ * @xprt_st_lock:	Handle to the rwref_lock associated with the transport.
+ *
+ * The release function is called when all the channels on this dummy
+ * transport are closed and the reference count goes to zero.
+ */
+static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
+{
+	struct glink_core_xprt_ctx *xprt_ctx = container_of(xprt_st_lock,
+				struct glink_core_xprt_ctx, xprt_state_lhb0);
+	GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
+				xprt_ctx->name,
+				xprt_ctx->edge);
+	kfree(xprt_ctx);
+}
+
+/**
+ * glink_xprt_name_to_id() - convert transport name to id
+ * @name:	Name of the transport.
+ * @id:		Assigned id.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+int glink_xprt_name_to_id(const char *name, uint16_t *id)
+{
+	if (!strcmp(name, "smem")) {
+		*id = SMEM_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "mailbox")) {
+		*id = SMEM_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "spi")) {
+		*id = SPIV2_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "smd_trans")) {
+		*id = SMD_TRANS_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "lloop")) {
+		*id = LLOOP_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "mock")) {
+		*id = MOCK_XPRT_ID;
+		return 0;
+	}
+	if (!strcmp(name, "mock_low")) {
+		*id = MOCK_XPRT_LOW_ID;
+		return 0;
+	}
+	if (!strcmp(name, "mock_high")) {
+		*id = MOCK_XPRT_HIGH_ID;
+		return 0;
+	}
+	return -ENODEV;
+}
+EXPORT_SYMBOL(glink_xprt_name_to_id);
+
+/**
+ * of_get_glink_core_qos_cfg() - Parse the qos related dt entries
+ * @phandle:	The handle to the qos related node in DT.
+ * @cfg:	The transport configuration to be filled.
+ *
+ * Return: 0 on Success, standard Linux error otherwise.
+ */
+int of_get_glink_core_qos_cfg(struct device_node *phandle,
+				struct glink_core_transport_cfg *cfg)
+{
+	int rc, i;
+	char *key;
+	uint32_t num_flows;
+	uint32_t *arr32;
+
+	if (!phandle) {
+		GLINK_ERR("%s: phandle is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	key = "qcom,mtu-size";
+	rc = of_property_read_u32(phandle, key, (uint32_t *)&cfg->mtu);
+	if (rc) {
+		GLINK_ERR("%s: missing key %s\n", __func__, key);
+		return -ENODEV;
+	}
+
+	key = "qcom,tput-stats-cycle";
+	rc = of_property_read_u32(phandle, key, &cfg->token_count);
+	if (rc) {
+		GLINK_ERR("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto error;
+	}
+
+	key = "qcom,flow-info";
+	if (!of_find_property(phandle, key, &num_flows)) {
+		GLINK_ERR("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto error;
+	}
+
+	num_flows /= sizeof(uint32_t);
+	if (num_flows % 2) {
+		GLINK_ERR("%s: Invalid flow info length\n", __func__);
+		rc = -EINVAL;
+		goto error;
+	}
+
+	num_flows /= 2;
+	cfg->num_flows = num_flows;
+
+	cfg->flow_info = kmalloc_array(num_flows, sizeof(*(cfg->flow_info)),
+					GFP_KERNEL);
+	if (!cfg->flow_info) {
+		GLINK_ERR("%s: Memory allocation for flow info failed\n",
+				__func__);
+		rc = -ENOMEM;
+		goto error;
+	}
+	arr32 = kmalloc_array(num_flows * 2, sizeof(uint32_t), GFP_KERNEL);
+	if (!arr32) {
+		GLINK_ERR("%s: Memory allocation for temporary array failed\n",
+				__func__);
+		rc = -ENOMEM;
+		goto temp_mem_alloc_fail;
+	}
+
+	of_property_read_u32_array(phandle, key, arr32, num_flows * 2);
+
+	for (i = 0; i < num_flows; i++) {
+		cfg->flow_info[i].mtu_tx_time_us = arr32[2 * i];
+		cfg->flow_info[i].power_state = arr32[2 * i + 1];
+	}
+
+	kfree(arr32);
+	of_node_put(phandle);
+	return 0;
+
+temp_mem_alloc_fail:
+	kfree(cfg->flow_info);
+error:
+	cfg->mtu = 0;
+	cfg->token_count = 0;
+	cfg->num_flows = 0;
+	cfg->flow_info = NULL;
+	return rc;
+}
+EXPORT_SYMBOL(of_get_glink_core_qos_cfg);
+
+/**
+ * glink_core_init_xprt_qos_cfg() - Initialize a transport's QoS configuration
+ * @xprt_ptr:	Transport to be initialized with QoS configuration.
+ * @cfg:	Data structure containing QoS configuration.
+ *
+ * This function is used during the transport registration to initialize it
+ * with QoS configuration.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_core_init_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr,
+					 struct glink_core_transport_cfg *cfg)
+{
+	int i;
+	struct sched_param param = { .sched_priority = GLINK_KTHREAD_PRIO };
+
+	xprt_ptr->mtu = cfg->mtu ? cfg->mtu : GLINK_QOS_DEF_MTU;
+	xprt_ptr->num_priority = cfg->num_flows ? cfg->num_flows :
+					GLINK_QOS_DEF_NUM_PRIORITY;
+	xprt_ptr->token_count = cfg->token_count ? cfg->token_count :
+					GLINK_QOS_DEF_NUM_TOKENS;
+
+	xprt_ptr->prio_bin = kzalloc(xprt_ptr->num_priority *
+				sizeof(struct glink_qos_priority_bin),
+				GFP_KERNEL);
+	if (xprt_ptr->num_priority > 1)
+		sched_setscheduler(xprt_ptr->tx_task, SCHED_FIFO, &param);
+	if (!xprt_ptr->prio_bin) {
+		GLINK_ERR("%s: unable to allocate priority bins\n", __func__);
+		return -ENOMEM;
+	}
+	for (i = 1; i < xprt_ptr->num_priority; i++) {
+		xprt_ptr->prio_bin[i].max_rate_kBps =
+			glink_qos_calc_rate_kBps(xprt_ptr->mtu,
+				cfg->flow_info[i].mtu_tx_time_us);
+		xprt_ptr->prio_bin[i].power_state =
+				cfg->flow_info[i].power_state;
+		INIT_LIST_HEAD(&xprt_ptr->prio_bin[i].tx_ready);
+	}
+	xprt_ptr->prio_bin[0].max_rate_kBps = 0;
+	if (cfg->flow_info)
+		xprt_ptr->prio_bin[0].power_state =
+						cfg->flow_info[0].power_state;
+	INIT_LIST_HEAD(&xprt_ptr->prio_bin[0].tx_ready);
+	xprt_ptr->threshold_rate_kBps =
+		xprt_ptr->prio_bin[xprt_ptr->num_priority - 1].max_rate_kBps;
+
+	return 0;
+}
+
+/**
+ * glink_core_deinit_xprt_qos_cfg() - Reset a transport's QoS configuration
+ * @xprt_ptr: Transport to be deinitialized.
+ *
+ * This function is used during the time of transport unregistration to
+ * de-initialize the QoS configuration from a transport.
+ */
+static void glink_core_deinit_xprt_qos_cfg(struct glink_core_xprt_ctx *xprt_ptr)
+{
+	kfree(xprt_ptr->prio_bin);
+	xprt_ptr->prio_bin = NULL;
+	xprt_ptr->mtu = 0;
+	xprt_ptr->num_priority = 0;
+	xprt_ptr->token_count = 0;
+	xprt_ptr->threshold_rate_kBps = 0;
+}
+
+/**
+ * glink_core_register_transport() - register a new transport
+ * @if_ptr:	The interface to the transport.
+ * @cfg:	Description and configuration of the transport.
+ *
+ * Return: 0 on success, EINVAL for invalid input.
+ */
+int glink_core_register_transport(struct glink_transport_if *if_ptr,
+				  struct glink_core_transport_cfg *cfg)
+{
+	struct glink_core_xprt_ctx *xprt_ptr;
+	size_t len;
+	uint16_t id;
+	int ret;
+	char log_name[GLINK_NAME_SIZE*2+2] = {0};
+
+	if (!if_ptr || !cfg || !cfg->name || !cfg->edge)
+		return -EINVAL;
+
+	len = strlen(cfg->name);
+	if (len == 0 || len >= GLINK_NAME_SIZE)
+		return -EINVAL;
+
+	len = strlen(cfg->edge);
+	if (len == 0 || len >= GLINK_NAME_SIZE)
+		return -EINVAL;
+
+	if (cfg->versions_entries < 1)
+		return -EINVAL;
+
+	ret = glink_xprt_name_to_id(cfg->name, &id);
+	if (ret)
+		return ret;
+
+	xprt_ptr = kzalloc(sizeof(struct glink_core_xprt_ctx), GFP_KERNEL);
+	if (xprt_ptr == NULL)
+		return -ENOMEM;
+
+	xprt_ptr->id = id;
+	rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
+			glink_xprt_ctx_release);
+	strlcpy(xprt_ptr->name, cfg->name, GLINK_NAME_SIZE);
+	strlcpy(xprt_ptr->edge, cfg->edge, GLINK_NAME_SIZE);
+	xprt_ptr->versions = cfg->versions;
+	xprt_ptr->versions_entries = cfg->versions_entries;
+	xprt_ptr->local_version_idx = cfg->versions_entries - 1;
+	xprt_ptr->remote_version_idx = cfg->versions_entries - 1;
+	xprt_ptr->edge_ctx = edge_name_to_ctx_create(xprt_ptr);
+	xprt_ptr->l_features =
+			cfg->versions[cfg->versions_entries - 1].features;
+	if (!if_ptr->poll)
+		if_ptr->poll = dummy_poll;
+	if (!if_ptr->mask_rx_irq)
+		if_ptr->mask_rx_irq = dummy_mask_rx_irq;
+	if (!if_ptr->reuse_rx_intent)
+		if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
+	if (!if_ptr->wait_link_down)
+		if_ptr->wait_link_down = dummy_wait_link_down;
+	if (!if_ptr->tx_cmd_tracer_pkt)
+		if_ptr->tx_cmd_tracer_pkt = dummy_tx_cmd_tracer_pkt;
+	if (!if_ptr->get_power_vote_ramp_time)
+		if_ptr->get_power_vote_ramp_time =
+					dummy_get_power_vote_ramp_time;
+	if (!if_ptr->power_vote)
+		if_ptr->power_vote = dummy_power_vote;
+	if (!if_ptr->power_unvote)
+		if_ptr->power_unvote = dummy_power_unvote;
+	xprt_ptr->capabilities = 0;
+	xprt_ptr->ops = if_ptr;
+	spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
+	xprt_ptr->next_lcid = 1; /* 0 reserved for default unconfigured */
+	INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
+	xprt_ptr->max_cid = cfg->max_cid;
+	xprt_ptr->max_iid = cfg->max_iid;
+	xprt_ptr->local_state = GLINK_XPRT_DOWN;
+	xprt_ptr->remote_neg_completed = false;
+	INIT_LIST_HEAD(&xprt_ptr->channels);
+	INIT_LIST_HEAD(&xprt_ptr->notified);
+	spin_lock_init(&xprt_ptr->tx_ready_lock_lhb3);
+	mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb4);
+	kthread_init_work(&xprt_ptr->tx_kwork, tx_func);
+	kthread_init_worker(&xprt_ptr->tx_wq);
+	xprt_ptr->tx_task = kthread_run(kthread_worker_fn,
+			&xprt_ptr->tx_wq, "%s_%s_glink_tx",
+			xprt_ptr->edge, xprt_ptr->name);
+	if (IS_ERR_OR_NULL(xprt_ptr->tx_task)) {
+		GLINK_ERR("%s: unable to run thread\n", __func__);
+		glink_core_deinit_xprt_qos_cfg(xprt_ptr);
+		kfree(xprt_ptr);
+		return -ENOMEM;
+	}
+	ret = glink_core_init_xprt_qos_cfg(xprt_ptr, cfg);
+	if (ret < 0) {
+		kfree(xprt_ptr);
+		return ret;
+	}
+	INIT_DELAYED_WORK(&xprt_ptr->pm_qos_work, glink_pm_qos_cancel_worker);
+	pm_qos_add_request(&xprt_ptr->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
+			PM_QOS_DEFAULT_VALUE);
+
+	if_ptr->glink_core_priv = xprt_ptr;
+	if_ptr->glink_core_if_ptr = &core_impl;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_add_tail(&xprt_ptr->list_node, &transport_list);
+	mutex_unlock(&transport_list_lock_lha0);
+	glink_debugfs_add_xprt(xprt_ptr);
+	snprintf(log_name, sizeof(log_name), "%s_%s",
+			xprt_ptr->edge, xprt_ptr->name);
+	xprt_ptr->log_ctx = ipc_log_context_create(NUM_LOG_PAGES, log_name, 0);
+	if (!xprt_ptr->log_ctx)
+		GLINK_ERR("%s: unable to create log context for [%s:%s]\n",
+				__func__, xprt_ptr->edge, xprt_ptr->name);
+
+	return 0;
+}
+EXPORT_SYMBOL(glink_core_register_transport);
+
+/**
+ * glink_core_unregister_transport() - unregister a transport
+ *
+ * @if_ptr:	The interface to the transport.
+ */
+void glink_core_unregister_transport(struct glink_transport_if *if_ptr)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	GLINK_DBG_XPRT(xprt_ptr, "%s: destroying transport\n", __func__);
+	if (xprt_ptr->local_state != GLINK_XPRT_DOWN) {
+		GLINK_ERR_XPRT(xprt_ptr,
+		"%s: link_down should have been called before this\n",
+		__func__);
+		return;
+	}
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_del(&xprt_ptr->list_node);
+	mutex_unlock(&transport_list_lock_lha0);
+	flush_delayed_work(&xprt_ptr->pm_qos_work);
+	pm_qos_remove_request(&xprt_ptr->pm_qos_req);
+	ipc_log_context_destroy(xprt_ptr->log_ctx);
+	xprt_ptr->log_ctx = NULL;
+	rwref_put(&xprt_ptr->xprt_state_lhb0);
+}
+EXPORT_SYMBOL(glink_core_unregister_transport);
+
+/**
+ * glink_core_link_up() - transport link-up notification
+ *
+ * @if_ptr:	pointer to transport interface
+ */
+static void glink_core_link_up(struct glink_transport_if *if_ptr)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	/* start local negotiation */
+	xprt_ptr->local_state = GLINK_XPRT_NEGOTIATING;
+	xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
+	xprt_ptr->l_features =
+		xprt_ptr->versions[xprt_ptr->local_version_idx].features;
+	if_ptr->tx_cmd_version(if_ptr,
+		    xprt_ptr->versions[xprt_ptr->local_version_idx].version,
+		    xprt_ptr->versions[xprt_ptr->local_version_idx].features);
+
+}
+
+/**
+ * glink_core_link_down() - transport link-down notification
+ *
+ * @if_ptr:	pointer to transport interface
+ */
+static void glink_core_link_down(struct glink_transport_if *if_ptr)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	rwref_write_get(&xprt_ptr->xprt_state_lhb0);
+	xprt_ptr->next_lcid = 1;
+	xprt_ptr->local_state = GLINK_XPRT_DOWN;
+	xprt_ptr->local_version_idx = xprt_ptr->versions_entries - 1;
+	xprt_ptr->remote_version_idx = xprt_ptr->versions_entries - 1;
+	xprt_ptr->l_features =
+		xprt_ptr->versions[xprt_ptr->local_version_idx].features;
+	xprt_ptr->remote_neg_completed = false;
+	rwref_write_put(&xprt_ptr->xprt_state_lhb0);
+	GLINK_DBG_XPRT(xprt_ptr,
+		"%s: Flushing work from tx_wq. Thread: %u\n", __func__,
+		current->pid);
+	kthread_flush_worker(&xprt_ptr->tx_wq);
+	glink_core_channel_cleanup(xprt_ptr);
+	check_link_notifier_and_notify(xprt_ptr, GLINK_LINK_STATE_DOWN);
+}
+
+/**
+ * glink_create_dummy_xprt_ctx() - create a dummy transport that replaces all
+ *				the transport interface functions with a dummy
+ * @orig_xprt_ctx:	Pointer to the original transport context.
+ *
+ * The dummy transport is used only when it is swapped with the actual transport
+ * pointer in ssr/unregister case.
+ *
+ * Return:	Pointer to dummy transport context.
+ */
+static struct glink_core_xprt_ctx *glink_create_dummy_xprt_ctx(
+				struct glink_core_xprt_ctx *orig_xprt_ctx)
+{
+
+	struct glink_core_xprt_ctx *xprt_ptr;
+	struct glink_transport_if *if_ptr;
+
+	xprt_ptr = kzalloc(sizeof(*xprt_ptr), GFP_KERNEL);
+	if (!xprt_ptr)
+		return ERR_PTR(-ENOMEM);
+	if_ptr = kmalloc(sizeof(*if_ptr), GFP_KERNEL);
+	if (!if_ptr) {
+		kfree(xprt_ptr);
+		return ERR_PTR(-ENOMEM);
+	}
+	rwref_lock_init(&xprt_ptr->xprt_state_lhb0,
+			glink_dummy_xprt_ctx_release);
+
+	strlcpy(xprt_ptr->name, "dummy", GLINK_NAME_SIZE);
+	strlcpy(xprt_ptr->edge, orig_xprt_ctx->edge, GLINK_NAME_SIZE);
+	if_ptr->poll = dummy_poll;
+	if_ptr->mask_rx_irq = dummy_mask_rx_irq;
+	if_ptr->reuse_rx_intent = dummy_reuse_rx_intent;
+	if_ptr->wait_link_down = dummy_wait_link_down;
+	if_ptr->allocate_rx_intent = dummy_allocate_rx_intent;
+	if_ptr->deallocate_rx_intent = dummy_deallocate_rx_intent;
+	if_ptr->tx_cmd_local_rx_intent = dummy_tx_cmd_local_rx_intent;
+	if_ptr->tx_cmd_local_rx_done = dummy_tx_cmd_local_rx_done;
+	if_ptr->tx = dummy_tx;
+	if_ptr->tx_cmd_rx_intent_req = dummy_tx_cmd_rx_intent_req;
+	if_ptr->tx_cmd_remote_rx_intent_req_ack =
+				dummy_tx_cmd_remote_rx_intent_req_ack;
+	if_ptr->tx_cmd_set_sigs = dummy_tx_cmd_set_sigs;
+	if_ptr->tx_cmd_ch_close = dummy_tx_cmd_ch_close;
+	if_ptr->tx_cmd_ch_remote_close_ack = dummy_tx_cmd_ch_remote_close_ack;
+
+	xprt_ptr->ops = if_ptr;
+	xprt_ptr->log_ctx = log_ctx;
+	spin_lock_init(&xprt_ptr->xprt_ctx_lock_lhb1);
+	INIT_LIST_HEAD(&xprt_ptr->free_lcid_list);
+	xprt_ptr->local_state = GLINK_XPRT_DOWN;
+	xprt_ptr->remote_neg_completed = false;
+	INIT_LIST_HEAD(&xprt_ptr->channels);
+	xprt_ptr->dummy_in_use = true;
+	INIT_LIST_HEAD(&xprt_ptr->notified);
+	spin_lock_init(&xprt_ptr->tx_ready_lock_lhb3);
+	mutex_init(&xprt_ptr->xprt_dbgfs_lock_lhb4);
+	return xprt_ptr;
+}
+
+/**
+ * glink_core_channel_cleanup() - cleanup all channels for the transport
+ *
+ * @xprt_ptr:	pointer to transport context
+ *
+ * This function should be called either from link_down or ssr
+ */
+static void glink_core_channel_cleanup(struct glink_core_xprt_ctx *xprt_ptr)
+{
+	unsigned long flags, d_flags;
+	struct channel_ctx *ctx, *tmp_ctx;
+	struct channel_lcid *temp_lcid, *temp_lcid1;
+	struct glink_core_xprt_ctx *dummy_xprt_ctx;
+
+	dummy_xprt_ctx = glink_create_dummy_xprt_ctx(xprt_ptr);
+	if (IS_ERR_OR_NULL(dummy_xprt_ctx)) {
+		GLINK_ERR("%s: Dummy Transport creation failed\n", __func__);
+		return;
+	}
+
+	rwref_read_get(&dummy_xprt_ctx->xprt_state_lhb0);
+	rwref_read_get(&xprt_ptr->xprt_state_lhb0);
+	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+
+	list_for_each_entry_safe(ctx, tmp_ctx, &xprt_ptr->channels,
+						port_list_node) {
+		rwref_write_get_atomic(&ctx->ch_state_lhb2, true);
+		if (ctx->local_open_state == GLINK_CHANNEL_OPENED ||
+			ctx->local_open_state == GLINK_CHANNEL_OPENING) {
+			rwref_get(&dummy_xprt_ctx->xprt_state_lhb0);
+			list_move_tail(&ctx->port_list_node,
+					&dummy_xprt_ctx->channels);
+			ctx->transport_ptr = dummy_xprt_ctx;
+			rwref_write_put(&ctx->ch_state_lhb2);
+		} else {
+			/* local state is in either CLOSED or CLOSING */
+			spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1,
+							flags);
+			spin_unlock_irqrestore(
+					&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+					d_flags);
+			glink_core_remote_close_common(ctx, true);
+			if (ctx->local_open_state == GLINK_CHANNEL_CLOSING)
+				glink_core_ch_close_ack_common(ctx, true);
+			/* Channel should be fully closed now. Delete here */
+			if (ch_is_fully_closed(ctx))
+				glink_delete_ch_from_list(ctx, false);
+			rwref_write_put(&ctx->ch_state_lhb2);
+			spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+						d_flags);
+			spin_lock_irqsave(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+		}
+	}
+	list_for_each_entry_safe(temp_lcid, temp_lcid1,
+			&xprt_ptr->free_lcid_list, list_node) {
+		list_del(&temp_lcid->list_node);
+		kfree(&temp_lcid->list_node);
+	}
+	dummy_xprt_ctx->dummy_in_use = false;
+	spin_unlock_irqrestore(&xprt_ptr->xprt_ctx_lock_lhb1, flags);
+	spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	rwref_read_put(&xprt_ptr->xprt_state_lhb0);
+
+	spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	while (!list_empty(&dummy_xprt_ctx->channels)) {
+		ctx = list_first_entry(&dummy_xprt_ctx->channels,
+					struct channel_ctx, port_list_node);
+		list_move_tail(&ctx->port_list_node,
+					&dummy_xprt_ctx->notified);
+
+		rwref_get(&ctx->ch_state_lhb2);
+		spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+				d_flags);
+		glink_core_remote_close_common(ctx, false);
+		spin_lock_irqsave(&dummy_xprt_ctx->xprt_ctx_lock_lhb1,
+				d_flags);
+		rwref_put(&ctx->ch_state_lhb2);
+	}
+	spin_unlock_irqrestore(&dummy_xprt_ctx->xprt_ctx_lock_lhb1, d_flags);
+	rwref_read_put(&dummy_xprt_ctx->xprt_state_lhb0);
+}
+/**
+ * glink_core_rx_cmd_version() - receive version/features from remote system
+ *
+ * @if_ptr:	pointer to transport interface
+ * @r_version:	remote version
+ * @r_features:	remote features
+ *
+ * This function is called in response to a remote-initiated version/feature
+ * negotiation sequence.
+ */
+static void glink_core_rx_cmd_version(struct glink_transport_if *if_ptr,
+	uint32_t r_version, uint32_t r_features)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+	const struct glink_core_version *versions = xprt_ptr->versions;
+	bool neg_complete = false;
+	uint32_t l_version;
+
+	if (xprt_is_fully_opened(xprt_ptr)) {
+		GLINK_ERR_XPRT(xprt_ptr,
+			"%s: Negotiation already complete\n", __func__);
+		return;
+	}
+
+	l_version = versions[xprt_ptr->remote_version_idx].version;
+
+	GLINK_INFO_XPRT(xprt_ptr,
+		"%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
+		l_version, xprt_ptr->l_features, r_version, r_features);
+
+	if (l_version > r_version) {
+		/* Find matching version */
+		while (true) {
+			uint32_t rver_idx;
+
+			if (xprt_ptr->remote_version_idx == 0) {
+				/* version negotiation failed */
+				GLINK_ERR_XPRT(xprt_ptr,
+					"%s: Transport negotiation failed\n",
+					__func__);
+				l_version = 0;
+				xprt_ptr->l_features = 0;
+				break;
+			}
+			--xprt_ptr->remote_version_idx;
+			rver_idx = xprt_ptr->remote_version_idx;
+
+			if (versions[rver_idx].version <= r_version) {
+				/* found a potential match */
+				l_version = versions[rver_idx].version;
+				xprt_ptr->l_features =
+					versions[rver_idx].features;
+				break;
+			}
+		}
+	}
+
+	if (l_version == r_version) {
+		GLINK_INFO_XPRT(xprt_ptr,
+			"%s: Remote and local version are matched %x:%08x\n",
+			__func__, r_version, r_features);
+		if (xprt_ptr->l_features != r_features) {
+			uint32_t rver_idx = xprt_ptr->remote_version_idx;
+
+			xprt_ptr->l_features = versions[rver_idx]
+						.negotiate_features(if_ptr,
+					&xprt_ptr->versions[rver_idx],
+					r_features);
+			GLINK_INFO_XPRT(xprt_ptr,
+				"%s: negotiate features %x:%08x\n",
+				__func__, l_version, xprt_ptr->l_features);
+		}
+		neg_complete = true;
+	}
+	if_ptr->tx_cmd_version_ack(if_ptr, l_version, xprt_ptr->l_features);
+
+	if (neg_complete) {
+		GLINK_INFO_XPRT(xprt_ptr,
+			"%s: Remote negotiation complete %x:%08x\n", __func__,
+			l_version, xprt_ptr->l_features);
+
+		if (xprt_ptr->local_state == GLINK_XPRT_OPENED) {
+			xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
+							l_version,
+							xprt_ptr->l_features);
+		}
+		if_ptr->glink_core_priv->remote_neg_completed = true;
+		if (xprt_is_fully_opened(xprt_ptr))
+			check_link_notifier_and_notify(xprt_ptr,
+						       GLINK_LINK_STATE_UP);
+	}
+}
+
+/**
+ * glink_core_rx_cmd_version_ack() - receive negotiation ack from remote system
+ *
+ * @if_ptr:	pointer to transport interface
+ * @r_version:	remote version response
+ * @r_features:	remote features response
+ *
+ * This function is called in response to a local-initiated version/feature
+ * negotiation sequence and is the counter-offer from the remote side based
+ * upon the initial version and feature set requested.
+ */
+static void glink_core_rx_cmd_version_ack(struct glink_transport_if *if_ptr,
+	uint32_t r_version, uint32_t r_features)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+	const struct glink_core_version *versions = xprt_ptr->versions;
+	uint32_t l_version;
+	bool neg_complete = false;
+
+	if (xprt_is_fully_opened(xprt_ptr)) {
+		GLINK_ERR_XPRT(xprt_ptr,
+			"%s: Negotiation already complete\n", __func__);
+		return;
+	}
+
+	l_version = versions[xprt_ptr->local_version_idx].version;
+
+	GLINK_INFO_XPRT(xprt_ptr,
+		"%s: [local]%x:%08x [remote]%x:%08x\n", __func__,
+		 l_version, xprt_ptr->l_features, r_version, r_features);
+
+	if (l_version > r_version) {
+		/* find matching version */
+		while (true) {
+			uint32_t lver_idx = xprt_ptr->local_version_idx;
+
+			if (xprt_ptr->local_version_idx == 0) {
+				/* version negotiation failed */
+				xprt_ptr->local_state = GLINK_XPRT_FAILED;
+				GLINK_ERR_XPRT(xprt_ptr,
+					"%s: Transport negotiation failed\n",
+					__func__);
+				l_version = 0;
+				xprt_ptr->l_features = 0;
+				break;
+			}
+			--xprt_ptr->local_version_idx;
+			lver_idx = xprt_ptr->local_version_idx;
+
+			if (versions[lver_idx].version <= r_version) {
+				/* found a potential match */
+				l_version = versions[lver_idx].version;
+				xprt_ptr->l_features =
+					versions[lver_idx].features;
+				break;
+			}
+		}
+	} else if (l_version == r_version) {
+		if (xprt_ptr->l_features != r_features) {
+			/* version matches, negotiate features */
+			uint32_t lver_idx = xprt_ptr->local_version_idx;
+
+			xprt_ptr->l_features = versions[lver_idx]
+						.negotiate_features(if_ptr,
+							&versions[lver_idx],
+							r_features);
+			GLINK_INFO_XPRT(xprt_ptr,
+				"%s: negotiation features %x:%08x\n",
+				__func__, l_version, xprt_ptr->l_features);
+		} else {
+			neg_complete = true;
+		}
+	} else {
+		/*
+		 * r_version > l_version
+		 *
+		 * Remote responded with a version greater than what we
+		 * requested which is invalid and is treated as failure of the
+		 * negotiation algorithm.
+		 */
+		GLINK_ERR_XPRT(xprt_ptr,
+			"%s: [local]%x:%08x [remote]%x:%08x neg failure\n",
+			__func__, l_version, xprt_ptr->l_features, r_version,
+			r_features);
+		xprt_ptr->local_state = GLINK_XPRT_FAILED;
+		l_version = 0;
+		xprt_ptr->l_features = 0;
+	}
+
+	if (neg_complete) {
+		/* negotiation complete */
+		GLINK_INFO_XPRT(xprt_ptr,
+			"%s: Local negotiation complete %x:%08x\n",
+			__func__, l_version, xprt_ptr->l_features);
+
+		if (xprt_ptr->remote_neg_completed) {
+			xprt_ptr->capabilities = if_ptr->set_version(if_ptr,
+							l_version,
+							xprt_ptr->l_features);
+		}
+
+		xprt_ptr->local_state = GLINK_XPRT_OPENED;
+		if (xprt_is_fully_opened(xprt_ptr))
+			check_link_notifier_and_notify(xprt_ptr,
+						       GLINK_LINK_STATE_UP);
+	} else {
+		if_ptr->tx_cmd_version(if_ptr, l_version, xprt_ptr->l_features);
+	}
+}
+
+/**
+ * find_l_ctx_get() - find a local channel context based on a remote one
+ * @r_ctx:	The remote channel to use as a lookup key.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context.  The caller must call rwref_put() when done.
+ *
+ * Return: The corresponding local ctx or NULL is not found.
+ */
+static struct channel_ctx *find_l_ctx_get(struct channel_ctx *r_ctx)
+{
+	struct glink_core_xprt_ctx *xprt;
+	struct channel_ctx *ctx;
+	unsigned long flags;
+	struct channel_ctx *l_ctx = NULL;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node)
+		if (!strcmp(r_ctx->transport_ptr->edge, xprt->edge)) {
+			rwref_write_get(&xprt->xprt_state_lhb0);
+			if (xprt->local_state != GLINK_XPRT_OPENED) {
+				rwref_write_put(&xprt->xprt_state_lhb0);
+				continue;
+			}
+			spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+			list_for_each_entry(ctx, &xprt->channels,
+							port_list_node)
+				if (!strcmp(ctx->name, r_ctx->name) &&
+							ctx->local_xprt_req &&
+							ctx->local_xprt_resp) {
+					l_ctx = ctx;
+					rwref_get(&l_ctx->ch_state_lhb2);
+				}
+			spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+									flags);
+			rwref_write_put(&xprt->xprt_state_lhb0);
+		}
+	mutex_unlock(&transport_list_lock_lha0);
+
+	return l_ctx;
+}
+
+/**
+ * find_r_ctx_get() - find a remote channel context based on a local one
+ * @l_ctx:	The local channel to use as a lookup key.
+ *
+ * If the channel is found, the reference count is incremented to ensure the
+ * lifetime of the channel context.  The caller must call rwref_put() when done.
+ *
+ * Return: The corresponding remote ctx or NULL is not found.
+ */
+static struct channel_ctx *find_r_ctx_get(struct channel_ctx *l_ctx)
+{
+	struct glink_core_xprt_ctx *xprt;
+	struct channel_ctx *ctx;
+	unsigned long flags;
+	struct channel_ctx *r_ctx = NULL;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node)
+		if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge)) {
+			rwref_write_get(&xprt->xprt_state_lhb0);
+			if (xprt->local_state != GLINK_XPRT_OPENED) {
+				rwref_write_put(&xprt->xprt_state_lhb0);
+				continue;
+			}
+			spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+			list_for_each_entry(ctx, &xprt->channels,
+							port_list_node)
+				if (!strcmp(ctx->name, l_ctx->name) &&
+							ctx->remote_xprt_req &&
+							ctx->remote_xprt_resp) {
+					r_ctx = ctx;
+					rwref_get(&r_ctx->ch_state_lhb2);
+				}
+			spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+									flags);
+			rwref_write_put(&xprt->xprt_state_lhb0);
+		}
+	mutex_unlock(&transport_list_lock_lha0);
+
+	return r_ctx;
+}
+
+/**
+ * will_migrate() - will a channel migrate to a different transport
+ * @l_ctx:	The local channel to migrate.
+ * @r_ctx:	The remote channel to migrate.
+ *
+ * One of the channel contexts can be NULL if not known, but at least one ctx
+ * must be provided.
+ *
+ * Return: Bool indicating if migration will occur.
+ */
+static bool will_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
+{
+	uint16_t new_xprt;
+	bool migrate = false;
+
+	if (!r_ctx)
+		r_ctx = find_r_ctx_get(l_ctx);
+	else
+		rwref_get(&r_ctx->ch_state_lhb2);
+	if (!r_ctx)
+		return migrate;
+
+	if (!l_ctx)
+		l_ctx = find_l_ctx_get(r_ctx);
+	else
+		rwref_get(&l_ctx->ch_state_lhb2);
+	if (!l_ctx)
+		goto exit;
+
+	if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
+			l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
+		goto exit;
+	if (l_ctx->no_migrate)
+		goto exit;
+
+	if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
+		l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
+
+	if (ch_is_fully_opened(l_ctx) &&
+		(l_ctx->transport_ptr->id == l_ctx->local_xprt_req))
+		goto exit;
+
+	new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
+
+	if (new_xprt == l_ctx->transport_ptr->id)
+		goto exit;
+
+	migrate = true;
+exit:
+	if (l_ctx)
+		rwref_put(&l_ctx->ch_state_lhb2);
+	if (r_ctx)
+		rwref_put(&r_ctx->ch_state_lhb2);
+
+	return migrate;
+}
+
+/**
+ * ch_migrate() - migrate a channel to a different transport
+ * @l_ctx:	The local channel to migrate.
+ * @r_ctx:	The remote channel to migrate.
+ *
+ * One of the channel contexts can be NULL if not known, but at least one ctx
+ * must be provided.
+ *
+ * Return: Bool indicating if migration occurred.
+ */
+static bool ch_migrate(struct channel_ctx *l_ctx, struct channel_ctx *r_ctx)
+{
+	uint16_t new_xprt;
+	struct glink_core_xprt_ctx *xprt;
+	unsigned long flags;
+	struct channel_lcid *flcid;
+	uint16_t best_xprt = USHRT_MAX;
+	struct channel_ctx *ctx_clone;
+	bool migrated = false;
+
+	if (!r_ctx)
+		r_ctx = find_r_ctx_get(l_ctx);
+	else
+		rwref_get(&r_ctx->ch_state_lhb2);
+	if (!r_ctx)
+		return migrated;
+
+	if (!l_ctx)
+		l_ctx = find_l_ctx_get(r_ctx);
+	else
+		rwref_get(&l_ctx->ch_state_lhb2);
+	if (!l_ctx) {
+		rwref_put(&r_ctx->ch_state_lhb2);
+		return migrated;
+	}
+	if (ch_is_fully_opened(l_ctx) &&
+		(l_ctx->transport_ptr->id == l_ctx->local_xprt_req)) {
+		rwref_put(&l_ctx->ch_state_lhb2);
+		rwref_put(&r_ctx->ch_state_lhb2);
+		return migrated;
+	}
+
+	if (l_ctx->local_xprt_req == r_ctx->remote_xprt_req &&
+			l_ctx->local_xprt_req == l_ctx->transport_ptr->id)
+		goto exit;
+	if (l_ctx->no_migrate)
+		goto exit;
+
+	if (l_ctx->local_xprt_req > r_ctx->transport_ptr->id)
+		l_ctx->local_xprt_req = r_ctx->transport_ptr->id;
+
+	new_xprt = max(l_ctx->local_xprt_req, r_ctx->remote_xprt_req);
+
+	if (new_xprt == l_ctx->transport_ptr->id)
+		goto exit;
+
+	ctx_clone = kmalloc(sizeof(*ctx_clone), GFP_KERNEL);
+	if (!ctx_clone)
+		goto exit;
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node)
+		if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
+			if (xprt->id == new_xprt)
+				break;
+	mutex_unlock(&transport_list_lock_lha0);
+
+	spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+	list_del_init(&l_ctx->port_list_node);
+	spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
+									flags);
+	mutex_lock(&l_ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
+	glink_debugfs_remove_channel(l_ctx, l_ctx->transport_ptr);
+	mutex_unlock(&l_ctx->transport_ptr->xprt_dbgfs_lock_lhb4);
+
+	memcpy(ctx_clone, l_ctx, sizeof(*ctx_clone));
+	ctx_clone->local_xprt_req = 0;
+	ctx_clone->local_xprt_resp = 0;
+	ctx_clone->remote_xprt_req = 0;
+	ctx_clone->remote_xprt_resp = 0;
+	ctx_clone->notify_state = NULL;
+	ctx_clone->local_open_state = GLINK_CHANNEL_CLOSING;
+	rwref_lock_init(&ctx_clone->ch_state_lhb2, glink_ch_ctx_release);
+	init_completion(&ctx_clone->int_req_ack_complete);
+	init_completion(&ctx_clone->int_req_complete);
+	spin_lock_init(&ctx_clone->local_rx_intent_lst_lock_lhc1);
+	spin_lock_init(&ctx_clone->rmt_rx_intent_lst_lock_lhc2);
+	INIT_LIST_HEAD(&ctx_clone->tx_ready_list_node);
+	INIT_LIST_HEAD(&ctx_clone->local_rx_intent_list);
+	INIT_LIST_HEAD(&ctx_clone->local_rx_intent_ntfy_list);
+	INIT_LIST_HEAD(&ctx_clone->local_rx_intent_free_list);
+	INIT_LIST_HEAD(&ctx_clone->rmt_rx_intent_list);
+	INIT_LIST_HEAD(&ctx_clone->tx_active);
+	spin_lock_init(&ctx_clone->tx_pending_rmt_done_lock_lhc4);
+	INIT_LIST_HEAD(&ctx_clone->tx_pending_remote_done);
+	spin_lock_init(&ctx_clone->tx_lists_lock_lhc3);
+	spin_lock_irqsave(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1, flags);
+	list_add_tail(&ctx_clone->port_list_node,
+					&l_ctx->transport_ptr->channels);
+	spin_unlock_irqrestore(&l_ctx->transport_ptr->xprt_ctx_lock_lhb1,
+									flags);
+
+	l_ctx->transport_ptr->ops->tx_cmd_ch_close(l_ctx->transport_ptr->ops,
+								l_ctx->lcid);
+
+	l_ctx->transport_ptr = xprt;
+	l_ctx->local_xprt_req = 0;
+	l_ctx->local_xprt_resp = 0;
+	if (new_xprt != r_ctx->transport_ptr->id || l_ctx == r_ctx) {
+		if (new_xprt != r_ctx->transport_ptr->id) {
+			r_ctx->local_xprt_req = 0;
+			r_ctx->local_xprt_resp = 0;
+			r_ctx->remote_xprt_req = 0;
+			r_ctx->remote_xprt_resp = 0;
+		}
+
+		l_ctx->remote_xprt_req = 0;
+		l_ctx->remote_xprt_resp = 0;
+		l_ctx->remote_opened = false;
+
+		rwref_write_get(&xprt->xprt_state_lhb0);
+		spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+		if (list_empty(&xprt->free_lcid_list)) {
+			l_ctx->lcid = xprt->next_lcid++;
+		} else {
+			flcid = list_first_entry(&xprt->free_lcid_list,
+						struct channel_lcid, list_node);
+			l_ctx->lcid = flcid->lcid;
+			list_del(&flcid->list_node);
+			kfree(flcid);
+		}
+		list_add_tail(&l_ctx->port_list_node, &xprt->channels);
+		spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
+		rwref_write_put(&xprt->xprt_state_lhb0);
+	} else {
+		l_ctx->lcid = r_ctx->lcid;
+		l_ctx->rcid = r_ctx->rcid;
+		l_ctx->remote_opened = r_ctx->remote_opened;
+		l_ctx->remote_xprt_req = r_ctx->remote_xprt_req;
+		l_ctx->remote_xprt_resp = r_ctx->remote_xprt_resp;
+		glink_delete_ch_from_list(r_ctx, false);
+
+		spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+		list_add_tail(&l_ctx->port_list_node, &xprt->channels);
+		spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1, flags);
+	}
+
+	mutex_lock(&xprt->xprt_dbgfs_lock_lhb4);
+	glink_debugfs_add_channel(l_ctx, xprt);
+	mutex_unlock(&xprt->xprt_dbgfs_lock_lhb4);
+
+	mutex_lock(&transport_list_lock_lha0);
+	list_for_each_entry(xprt, &transport_list, list_node)
+		if (!strcmp(l_ctx->transport_ptr->edge, xprt->edge))
+			if (xprt->id < best_xprt)
+				best_xprt = xprt->id;
+	mutex_unlock(&transport_list_lock_lha0);
+	l_ctx->local_open_state = GLINK_CHANNEL_OPENING;
+	l_ctx->local_xprt_req = best_xprt;
+	l_ctx->transport_ptr->ops->tx_cmd_ch_open(l_ctx->transport_ptr->ops,
+					l_ctx->lcid, l_ctx->name, best_xprt);
+
+	migrated = true;
+exit:
+	rwref_put(&l_ctx->ch_state_lhb2);
+	rwref_put(&r_ctx->ch_state_lhb2);
+
+	return migrated;
+}
+
+/**
+ * calculate_xprt_resp() - calculate the response to a remote xprt request
+ * @r_ctx:	The channel the remote xprt request is for.
+ *
+ * Return: The calculated response.
+ */
+static uint16_t calculate_xprt_resp(struct channel_ctx *r_ctx)
+{
+	struct channel_ctx *l_ctx;
+
+	l_ctx = find_l_ctx_get(r_ctx);
+	if (!l_ctx) {
+		r_ctx->remote_xprt_resp = r_ctx->transport_ptr->id;
+	} else if (r_ctx->remote_xprt_req == r_ctx->transport_ptr->id) {
+		r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
+	} else {
+		if (!l_ctx->local_xprt_req)
+			r_ctx->remote_xprt_resp = r_ctx->remote_xprt_req;
+		else if (l_ctx->no_migrate)
+			r_ctx->remote_xprt_resp = l_ctx->local_xprt_req;
+		else
+			r_ctx->remote_xprt_resp = max(l_ctx->local_xprt_req,
+							r_ctx->remote_xprt_req);
+	}
+
+	if (l_ctx)
+		rwref_put(&l_ctx->ch_state_lhb2);
+
+	return r_ctx->remote_xprt_resp;
+}
+
+/**
+ * glink_core_rx_cmd_ch_remote_open() - Remote-initiated open command
+ *
+ * @if_ptr:	Pointer to transport instance
+ * @rcid:	Remote Channel ID
+ * @name:	Channel name
+ * @req_xprt:	Requested transport to migrate to
+ */
+static void glink_core_rx_cmd_ch_remote_open(struct glink_transport_if *if_ptr,
+	uint32_t rcid, const char *name, uint16_t req_xprt)
+{
+	struct channel_ctx *ctx;
+	uint16_t xprt_resp;
+	bool do_migrate;
+
+	glink_core_migration_edge_lock(if_ptr->glink_core_priv);
+	ctx = ch_name_to_ch_ctx_create(if_ptr->glink_core_priv, name);
+	if (ctx == NULL) {
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+		       "%s: invalid rcid %u received, name '%s'\n",
+		       __func__, rcid, name);
+		glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+		return;
+	}
+
+	/* port already exists */
+	if (ctx->remote_opened) {
+		GLINK_ERR_CH(ctx,
+		       "%s: Duplicate remote open for rcid %u, name '%s'\n",
+		       __func__, rcid, name);
+		glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+		return;
+	}
+
+	ctx->remote_opened = true;
+	ch_add_rcid(if_ptr->glink_core_priv, ctx, rcid);
+	ctx->transport_ptr = if_ptr->glink_core_priv;
+
+	ctx->remote_xprt_req = req_xprt;
+	xprt_resp = calculate_xprt_resp(ctx);
+
+	do_migrate = will_migrate(NULL, ctx);
+	GLINK_INFO_CH(ctx, "%s: remote: CLOSED->OPENED ; xprt req:resp %u:%u\n",
+			__func__, req_xprt, xprt_resp);
+
+	if_ptr->tx_cmd_ch_remote_open_ack(if_ptr, rcid, xprt_resp);
+	if (!do_migrate && ch_is_fully_opened(ctx))
+		ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
+
+
+	if (do_migrate)
+		ch_migrate(NULL, ctx);
+	glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+}
+
+/**
+ * glink_core_rx_cmd_ch_open_ack() - Receive ack to previously sent open request
+ *
+ * if_ptr:	Pointer to transport instance
+ * lcid:	Local Channel ID
+ * @xprt_resp:	Response to the transport migration request
+ */
+static void glink_core_rx_cmd_ch_open_ack(struct glink_transport_if *if_ptr,
+	uint32_t lcid, uint16_t xprt_resp)
+{
+	struct channel_ctx *ctx;
+
+	glink_core_migration_edge_lock(if_ptr->glink_core_priv);
+	ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid lcid %u received\n", __func__,
+				(unsigned int)lcid);
+		glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+		return;
+	}
+
+	if (ctx->local_open_state != GLINK_CHANNEL_OPENING) {
+		GLINK_ERR_CH(ctx,
+			"%s: unexpected open ack receive for lcid. Current state: %u. Thread: %u\n",
+				__func__, ctx->local_open_state, current->pid);
+		rwref_put(&ctx->ch_state_lhb2);
+		glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+		return;
+	}
+
+	ctx->local_xprt_resp = xprt_resp;
+	if (!ch_migrate(ctx, NULL)) {
+		ctx->local_open_state = GLINK_CHANNEL_OPENED;
+		GLINK_INFO_PERF_CH(ctx,
+			"%s: local:GLINK_CHANNEL_OPENING_WAIT->GLINK_CHANNEL_OPENED\n",
+			__func__);
+
+		if (ch_is_fully_opened(ctx)) {
+			ctx->notify_state(ctx, ctx->user_priv, GLINK_CONNECTED);
+			GLINK_INFO_PERF_CH(ctx,
+					"%s: notify state: GLINK_CONNECTED\n",
+					__func__);
+		}
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+	glink_core_migration_edge_unlock(if_ptr->glink_core_priv);
+}
+
+/**
+ * glink_core_rx_cmd_ch_remote_close() - Receive remote close command
+ *
+ * if_ptr:	Pointer to transport instance
+ * rcid:	Remote Channel ID
+ */
+static void glink_core_rx_cmd_ch_remote_close(
+		struct glink_transport_if *if_ptr, uint32_t rcid)
+{
+	struct channel_ctx *ctx;
+	bool is_ch_fully_closed;
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid %u received\n", __func__,
+				(unsigned int)rcid);
+		return;
+	}
+
+	if (!ctx->remote_opened) {
+		GLINK_ERR_CH(ctx,
+			"%s: unexpected remote close receive for rcid %u\n",
+			__func__, (unsigned int)rcid);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+	GLINK_INFO_CH(ctx, "%s: remote: OPENED->CLOSED\n", __func__);
+
+	is_ch_fully_closed = glink_core_remote_close_common(ctx, false);
+
+	ctx->pending_delete = true;
+	if_ptr->tx_cmd_ch_remote_close_ack(if_ptr, rcid);
+
+	if (is_ch_fully_closed) {
+		glink_delete_ch_from_list(ctx, true);
+		kthread_flush_worker(&xprt_ptr->tx_wq);
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_cmd_ch_close_ack() - Receive locally-request close ack
+ *
+ * if_ptr:	Pointer to transport instance
+ * lcid:	Local Channel ID
+ */
+static void glink_core_rx_cmd_ch_close_ack(struct glink_transport_if *if_ptr,
+	uint32_t lcid)
+{
+	struct channel_ctx *ctx;
+	bool is_ch_fully_closed;
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	ctx = xprt_lcid_to_ch_ctx_get(if_ptr->glink_core_priv, lcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid lcid %u received\n", __func__,
+				(unsigned int)lcid);
+		return;
+	}
+
+	if (ctx->local_open_state != GLINK_CHANNEL_CLOSING) {
+		GLINK_ERR_CH(ctx,
+			"%s: unexpected close ack receive for lcid %u\n",
+			__func__, (unsigned int)lcid);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	is_ch_fully_closed = glink_core_ch_close_ack_common(ctx, false);
+	if (is_ch_fully_closed) {
+		glink_delete_ch_from_list(ctx, true);
+		kthread_flush_worker(&xprt_ptr->tx_wq);
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_remote_rx_intent_put() - Receive remove intent
+ *
+ * @if_ptr:	Pointer to transport instance
+ * @rcid:	Remote Channel ID
+ * @riid:	Remote Intent ID
+ * @size:	Size of the remote intent ID
+ */
+static void glink_core_remote_rx_intent_put(struct glink_transport_if *if_ptr,
+		uint32_t rcid, uint32_t riid, size_t size)
+{
+	struct channel_ctx *ctx;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown rcid received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid received %u\n", __func__,
+				(unsigned int)rcid);
+		return;
+	}
+
+	ch_push_remote_rx_intent(ctx, size, riid, NULL);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_remote_rx_intent_put_cookie() - Receive remove intent
+ *
+ * @if_ptr:    Pointer to transport instance
+ * @rcid:      Remote Channel ID
+ * @riid:      Remote Intent ID
+ * @size:      Size of the remote intent ID
+ * @cookie:    Transport-specific cookie to cache
+ */
+static void glink_core_remote_rx_intent_put_cookie(
+		struct glink_transport_if *if_ptr,
+		uint32_t rcid, uint32_t riid, size_t size, void *cookie)
+{
+	struct channel_ctx *ctx;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown rcid received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid received %u\n", __func__,
+				(unsigned int)rcid);
+		return;
+	}
+
+	ch_push_remote_rx_intent(ctx, size, riid, cookie);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_cmd_remote_rx_intent_req() - Receive a request for rx_intent
+ *                                            from remote side
+ * if_ptr:	Pointer to the transport interface
+ * rcid:	Remote channel ID
+ * size:	size of the intent
+ *
+ * The function searches for the local channel to which the request for
+ * rx_intent has arrived and informs this request to the local channel through
+ * notify_rx_intent_req callback registered by the local channel.
+ */
+static void glink_core_rx_cmd_remote_rx_intent_req(
+	struct glink_transport_if *if_ptr, uint32_t rcid, size_t size)
+{
+	struct channel_ctx *ctx;
+	bool cb_ret;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid received %u\n", __func__,
+				(unsigned int)rcid);
+		return;
+	}
+	if (!ctx->notify_rx_intent_req) {
+		GLINK_ERR_CH(ctx,
+			"%s: Notify function not defined for local channel",
+			__func__);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	cb_ret = ctx->notify_rx_intent_req(ctx, ctx->user_priv, size);
+	if_ptr->tx_cmd_remote_rx_intent_req_ack(if_ptr, ctx->lcid, cb_ret);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_cmd_remote_rx_intent_req_ack()- Receive ack from remote side
+ *						for a local rx_intent request
+ * if_ptr:	Pointer to the transport interface
+ * rcid:	Remote channel ID
+ * size:	size of the intent
+ *
+ * This function receives the ack for rx_intent request from local channel.
+ */
+static void glink_core_rx_cmd_rx_intent_req_ack(struct glink_transport_if
+					*if_ptr, uint32_t rcid, bool granted)
+{
+	struct channel_ctx *ctx;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: Invalid rcid received %u\n", __func__,
+				(unsigned int)rcid);
+		return;
+	}
+	ctx->int_req_ack = granted;
+	complete_all(&ctx->int_req_ack_complete);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_get_pkt_ctx() - lookup RX intent structure
+ *
+ * if_ptr:	Pointer to the transport interface
+ * rcid:	Remote channel ID
+ * liid:	Local RX Intent ID
+ *
+ * Note that this function is designed to always be followed by a call to
+ * glink_core_rx_put_pkt_ctx() to complete an RX operation by the transport.
+ *
+ * Return: Pointer to RX intent structure (or NULL if none found)
+ */
+static struct glink_core_rx_intent *glink_core_rx_get_pkt_ctx(
+		struct glink_transport_if *if_ptr, uint32_t rcid, uint32_t liid)
+{
+	struct channel_ctx *ctx;
+	struct glink_core_rx_intent *intent_ptr;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid received %u\n", __func__,
+				(unsigned int)rcid);
+		return NULL;
+	}
+
+	/* match pending intent */
+	intent_ptr = ch_get_local_rx_intent(ctx, liid);
+	if (intent_ptr == NULL) {
+		GLINK_ERR_CH(ctx,
+			"%s: L[%u]: No matching rx intent\n",
+			__func__, liid);
+		rwref_put(&ctx->ch_state_lhb2);
+		return NULL;
+	}
+
+	rwref_put(&ctx->ch_state_lhb2);
+	return intent_ptr;
+}
+
+/**
+ * glink_core_rx_put_pkt_ctx() - lookup RX intent structure
+ *
+ * if_ptr:	Pointer to the transport interface
+ * rcid:	Remote channel ID
+ * intent_ptr:	Pointer to the RX intent
+ * complete:	Packet has been completely received
+ *
+ * Note that this function should always be preceded by a call to
+ * glink_core_rx_get_pkt_ctx().
+ */
+void glink_core_rx_put_pkt_ctx(struct glink_transport_if *if_ptr,
+	uint32_t rcid, struct glink_core_rx_intent *intent_ptr, bool complete)
+{
+	struct channel_ctx *ctx;
+
+	if (!complete) {
+		GLINK_DBG_XPRT(if_ptr->glink_core_priv,
+			"%s: rcid[%u] liid[%u] pkt_size[%zu] write_offset[%zu] Fragment received\n",
+				__func__, rcid, intent_ptr->id,
+				intent_ptr->pkt_size,
+				intent_ptr->write_offset);
+		return;
+	}
+
+	/* packet complete */
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+			       "%s: invalid rcid received %u\n", __func__,
+			       (unsigned int)rcid);
+		return;
+	}
+
+	if (unlikely(intent_ptr->tracer_pkt)) {
+		tracer_pkt_log_event(intent_ptr->data, GLINK_CORE_RX);
+		ch_set_local_rx_intent_notified(ctx, intent_ptr);
+		if (ctx->notify_rx_tracer_pkt)
+			ctx->notify_rx_tracer_pkt(ctx, ctx->user_priv,
+				intent_ptr->pkt_priv, intent_ptr->data,
+				intent_ptr->pkt_size);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	GLINK_PERF_CH(ctx, "%s: L[%u]: data[%p] size[%zu]\n",
+		__func__, intent_ptr->id,
+		intent_ptr->data ? intent_ptr->data : intent_ptr->iovec,
+		intent_ptr->write_offset);
+	if (!intent_ptr->data && !ctx->notify_rxv) {
+		/* Received a vector, but client can't handle a vector */
+		intent_ptr->bounce_buf = linearize_vector(intent_ptr->iovec,
+						intent_ptr->pkt_size,
+						intent_ptr->vprovider,
+						intent_ptr->pprovider);
+		if (IS_ERR_OR_NULL(intent_ptr->bounce_buf)) {
+			GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: Error %ld linearizing vector\n", __func__,
+				PTR_ERR(intent_ptr->bounce_buf));
+			WARN(1, "Failed to linearize vector\n");
+			rwref_put(&ctx->ch_state_lhb2);
+			return;
+		}
+	}
+
+	ch_set_local_rx_intent_notified(ctx, intent_ptr);
+	if (ctx->notify_rx && (intent_ptr->data || intent_ptr->bounce_buf)) {
+		ctx->notify_rx(ctx, ctx->user_priv, intent_ptr->pkt_priv,
+			       intent_ptr->data ?
+				intent_ptr->data : intent_ptr->bounce_buf,
+			       intent_ptr->pkt_size);
+	} else if (ctx->notify_rxv) {
+		ctx->notify_rxv(ctx, ctx->user_priv, intent_ptr->pkt_priv,
+				intent_ptr->iovec, intent_ptr->pkt_size,
+				intent_ptr->vprovider, intent_ptr->pprovider);
+	} else {
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: Unable to process rx data\n", __func__);
+		WARN(1, "Failed to process rx data\n");
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * glink_core_rx_cmd_tx_done() - Receive Transmit Done Command
+ * @xprt_ptr:	Transport to send packet on.
+ * @rcid:	Remote channel ID
+ * @riid:	Remote intent ID
+ * @reuse:	Reuse the consumed intent
+ */
+void glink_core_rx_cmd_tx_done(struct glink_transport_if *if_ptr,
+			       uint32_t rcid, uint32_t riid, bool reuse)
+{
+	struct channel_ctx *ctx;
+	struct glink_core_tx_pkt *tx_pkt;
+	unsigned long flags;
+	size_t intent_size;
+	void *cookie;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown RCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid %u received\n", __func__,
+				rcid);
+		return;
+	}
+
+	spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+	tx_pkt = ch_get_tx_pending_remote_done(ctx, riid);
+	if (IS_ERR_OR_NULL(tx_pkt)) {
+		/*
+		 * FUTURE - in the case of a zero-copy transport, this is a
+		 * fatal protocol failure since memory corruption could occur
+		 * in this case.  Prevent this by adding code in glink_close()
+		 * to recall any buffers in flight / wait for them to be
+		 * returned.
+		 */
+		GLINK_ERR_CH(ctx, "%s: R[%u]: No matching tx\n",
+				__func__,
+				(unsigned int)riid);
+		spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	/* notify client */
+	ctx->notify_tx_done(ctx, ctx->user_priv, tx_pkt->pkt_priv,
+			    tx_pkt->data ? tx_pkt->data : tx_pkt->iovec);
+	intent_size = tx_pkt->intent_size;
+	cookie = tx_pkt->cookie;
+	ch_remove_tx_pending_remote_done(ctx, tx_pkt);
+	spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+	if (reuse)
+		ch_push_remote_rx_intent(ctx, intent_size, riid, cookie);
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+/**
+ * xprt_schedule_tx() - Schedules packet for transmit.
+ * @xprt_ptr:	Transport to send packet on.
+ * @ch_ptr:	Channel to send packet on.
+ * @tx_info:	Packet to transmit.
+ */
+static void xprt_schedule_tx(struct glink_core_xprt_ctx *xprt_ptr,
+			     struct channel_ctx *ch_ptr,
+			     struct glink_core_tx_pkt *tx_info)
+{
+	unsigned long flags;
+
+	if (unlikely(xprt_ptr->local_state == GLINK_XPRT_DOWN)) {
+		GLINK_ERR_CH(ch_ptr, "%s: Error XPRT is down\n", __func__);
+		kfree(tx_info);
+		return;
+	}
+
+	spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+	if (unlikely(!ch_is_fully_opened(ch_ptr))) {
+		spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+		GLINK_ERR_CH(ch_ptr, "%s: Channel closed before tx\n",
+			     __func__);
+		kfree(tx_info);
+		return;
+	}
+	if (list_empty(&ch_ptr->tx_ready_list_node))
+		list_add_tail(&ch_ptr->tx_ready_list_node,
+			&xprt_ptr->prio_bin[ch_ptr->curr_priority].tx_ready);
+
+	spin_lock(&ch_ptr->tx_lists_lock_lhc3);
+	list_add_tail(&tx_info->list_node, &ch_ptr->tx_active);
+	glink_qos_do_ch_tx(ch_ptr);
+	if (unlikely(tx_info->tracer_pkt))
+		tracer_pkt_log_event((void *)(tx_info->data),
+				     GLINK_QUEUE_TO_SCHEDULER);
+
+	spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
+	spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+
+	kthread_queue_work(&xprt_ptr->tx_wq, &xprt_ptr->tx_kwork);
+}
+
+/**
+ * xprt_single_threaded_tx() - Transmit in the context of sender.
+ * @xprt_ptr:	Transport to send packet on.
+ * @ch_ptr:	Channel to send packet on.
+ * @tx_info:	Packet to transmit.
+ */
+static int xprt_single_threaded_tx(struct glink_core_xprt_ctx *xprt_ptr,
+			     struct channel_ctx *ch_ptr,
+			     struct glink_core_tx_pkt *tx_info)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
+	do {
+		ret = xprt_ptr->ops->tx(ch_ptr->transport_ptr->ops,
+					ch_ptr->lcid, tx_info);
+	} while (ret == -EAGAIN);
+	if (ret < 0 || tx_info->size_remaining) {
+		GLINK_ERR_CH(ch_ptr, "%s: Error %d writing data\n",
+			     __func__, ret);
+		kfree(tx_info);
+	} else {
+		list_add_tail(&tx_info->list_done,
+			      &ch_ptr->tx_pending_remote_done);
+		ret = 0;
+	}
+	spin_unlock_irqrestore(&ch_ptr->tx_pending_rmt_done_lock_lhc4, flags);
+	return ret;
+}
+
+/**
+ * glink_scheduler_eval_prio() - Evaluate the channel priority
+ * @ctx:	Channel whose priority is evaluated.
+ * @xprt_ctx:	Transport in which the channel is part of.
+ *
+ * This function is called by the packet scheduler to measure the traffic
+ * rate observed in the channel and compare it against the traffic rate
+ * requested by the channel. The comparison result is used to evaluate the
+ * priority of the channel.
+ */
+static void glink_scheduler_eval_prio(struct channel_ctx *ctx,
+			struct glink_core_xprt_ctx *xprt_ctx)
+{
+	unsigned long token_end_time;
+	unsigned long token_consume_time, rem;
+	unsigned long obs_rate_kBps;
+
+	if (ctx->initial_priority == 0)
+		return;
+
+	if (ctx->token_count)
+		return;
+
+	token_end_time = arch_counter_get_cntvct();
+
+	token_consume_time = NSEC_PER_SEC;
+	rem = do_div(token_consume_time, arch_timer_get_rate());
+	token_consume_time = (token_end_time - ctx->token_start_time) *
+				token_consume_time;
+	rem = do_div(token_consume_time, 1000);
+	obs_rate_kBps = glink_qos_calc_rate_kBps(ctx->txd_len,
+				token_consume_time);
+	if (obs_rate_kBps > ctx->req_rate_kBps) {
+		GLINK_INFO_CH(ctx, "%s: Obs. Rate (%lu) > Req. Rate (%lu)\n",
+			__func__, obs_rate_kBps, ctx->req_rate_kBps);
+		glink_qos_update_ch_prio(ctx, 0);
+	} else {
+		glink_qos_update_ch_prio(ctx, ctx->initial_priority);
+	}
+
+	ctx->token_count = xprt_ctx->token_count;
+	ctx->txd_len = 0;
+	ctx->token_start_time = arch_counter_get_cntvct();
+}
+
+/**
+ * glink_scheduler_tx() - Transmit operation by the scheduler
+ * @ctx:	Channel which is scheduled for transmission.
+ * @xprt_ctx:	Transport context in which the transmission is performed.
+ *
+ * This function is called by the scheduler after scheduling a channel for
+ * transmission over the transport.
+ *
+ * Return: return value as returned by the transport on success,
+ *         standard Linux error codes on failure.
+ */
+static int glink_scheduler_tx(struct channel_ctx *ctx,
+			struct glink_core_xprt_ctx *xprt_ctx)
+{
+	unsigned long flags;
+	struct glink_core_tx_pkt *tx_info;
+	size_t txd_len = 0;
+	size_t tx_len = 0;
+	uint32_t num_pkts = 0;
+	int ret = 0;
+
+	spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+	while (txd_len < xprt_ctx->mtu &&
+		!list_empty(&ctx->tx_active)) {
+		tx_info = list_first_entry(&ctx->tx_active,
+				struct glink_core_tx_pkt, list_node);
+		rwref_get(&tx_info->pkt_ref);
+
+		spin_lock(&ctx->tx_pending_rmt_done_lock_lhc4);
+		if (list_empty(&tx_info->list_done))
+			list_add(&tx_info->list_done,
+				 &ctx->tx_pending_remote_done);
+		spin_unlock(&ctx->tx_pending_rmt_done_lock_lhc4);
+		spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+		if (unlikely(tx_info->tracer_pkt)) {
+			tracer_pkt_log_event((void *)(tx_info->data),
+					      GLINK_SCHEDULER_TX);
+			ret = xprt_ctx->ops->tx_cmd_tracer_pkt(xprt_ctx->ops,
+						ctx->lcid, tx_info);
+		} else {
+			tx_len = tx_info->size_remaining <
+				 (xprt_ctx->mtu - txd_len) ?
+				 tx_info->size_remaining :
+				 (xprt_ctx->mtu - txd_len);
+			tx_info->tx_len = tx_len;
+			ret = xprt_ctx->ops->tx(xprt_ctx->ops,
+						ctx->lcid, tx_info);
+		}
+		spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
+		if (ret == -EAGAIN) {
+			/*
+			 * transport unable to send at the moment and will call
+			 * tx_resume() when it can send again.
+			 */
+			rwref_put(&tx_info->pkt_ref);
+			break;
+		} else if (ret < 0) {
+			/*
+			 * General failure code that indicates that the
+			 * transport is unable to recover.  In this case, the
+			 * communication failure will be detected at a higher
+			 * level and a subsystem restart of the affected system
+			 * will be triggered.
+			 */
+			GLINK_ERR_XPRT(xprt_ctx,
+					"%s: unrecoverable xprt failure %d\n",
+					__func__, ret);
+			rwref_put(&tx_info->pkt_ref);
+			break;
+		} else if (!ret && tx_info->size_remaining) {
+			/*
+			 * Transport unable to send any data on this channel.
+			 * Break out of the loop so that the scheduler can
+			 * continue with the next channel.
+			 */
+			break;
+		}
+
+		txd_len += tx_len;
+		if (!tx_info->size_remaining) {
+			num_pkts++;
+			list_del_init(&tx_info->list_node);
+			rwref_put(&tx_info->pkt_ref);
+		}
+	}
+
+	ctx->txd_len += txd_len;
+	if (txd_len) {
+		if (num_pkts >= ctx->token_count)
+			ctx->token_count = 0;
+		else if (num_pkts)
+			ctx->token_count -= num_pkts;
+		else
+			ctx->token_count--;
+	}
+	spin_unlock_irqrestore(&ctx->tx_lists_lock_lhc3, flags);
+
+	return ret;
+}
+
+/**
+ * tx_func()	Transmit Kthread
+ * @work:	Linux kthread work structure
+ */
+static void tx_func(struct kthread_work *work)
+{
+	struct channel_ctx *ch_ptr;
+	uint32_t prio;
+	uint32_t tx_ready_head_prio;
+	int ret;
+	struct channel_ctx *tx_ready_head = NULL;
+	bool transmitted_successfully = true;
+	unsigned long flags;
+	struct glink_core_xprt_ctx *xprt_ptr = container_of(work,
+			struct glink_core_xprt_ctx, tx_kwork);
+
+	GLINK_PERF("%s: worker starting\n", __func__);
+
+	while (1) {
+		prio = xprt_ptr->num_priority - 1;
+		spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+		while (list_empty(&xprt_ptr->prio_bin[prio].tx_ready)) {
+			if (prio == 0) {
+				spin_unlock_irqrestore(
+					&xprt_ptr->tx_ready_lock_lhb3, flags);
+				return;
+			}
+			prio--;
+		}
+		glink_pm_qos_vote(xprt_ptr);
+		ch_ptr = list_first_entry(&xprt_ptr->prio_bin[prio].tx_ready,
+				struct channel_ctx, tx_ready_list_node);
+		spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+
+		if (tx_ready_head == NULL || tx_ready_head_prio < prio) {
+			tx_ready_head = ch_ptr;
+			tx_ready_head_prio = prio;
+		}
+
+		if (ch_ptr == tx_ready_head && !transmitted_successfully) {
+			GLINK_ERR_XPRT(xprt_ptr,
+				"%s: Unable to send data on this transport.\n",
+				__func__);
+			break;
+		}
+		transmitted_successfully = false;
+
+		ret = glink_scheduler_tx(ch_ptr, xprt_ptr);
+		if (ret == -EAGAIN) {
+			/*
+			 * transport unable to send at the moment and will call
+			 * tx_resume() when it can send again.
+			 */
+			break;
+		} else if (ret < 0) {
+			/*
+			 * General failure code that indicates that the
+			 * transport is unable to recover.  In this case, the
+			 * communication failure will be detected at a higher
+			 * level and a subsystem restart of the affected system
+			 * will be triggered.
+			 */
+			GLINK_ERR_XPRT(xprt_ptr,
+					"%s: unrecoverable xprt failure %d\n",
+					__func__, ret);
+			break;
+		} else if (!ret) {
+			/*
+			 * Transport unable to send any data on this channel,
+			 * but didn't return an error. Move to the next channel
+			 * and continue.
+			 */
+			spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+			list_rotate_left(&xprt_ptr->prio_bin[prio].tx_ready);
+			spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3,
+						flags);
+			continue;
+		}
+
+		spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+		spin_lock(&ch_ptr->tx_lists_lock_lhc3);
+
+		glink_scheduler_eval_prio(ch_ptr, xprt_ptr);
+		if (list_empty(&ch_ptr->tx_active)) {
+			list_del_init(&ch_ptr->tx_ready_list_node);
+			glink_qos_done_ch_tx(ch_ptr);
+		}
+
+		spin_unlock(&ch_ptr->tx_lists_lock_lhc3);
+		spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+
+		tx_ready_head = NULL;
+		transmitted_successfully = true;
+	}
+	glink_pm_qos_unvote(xprt_ptr);
+	GLINK_PERF("%s: worker exiting\n", __func__);
+}
+
+static void glink_core_tx_resume(struct glink_transport_if *if_ptr)
+{
+	struct glink_core_xprt_ctx *xprt_ptr = if_ptr->glink_core_priv;
+
+	kthread_queue_work(&xprt_ptr->tx_wq, &xprt_ptr->tx_kwork);
+}
+
+/**
+ * glink_pm_qos_vote() - Add Power Management QoS Vote
+ * @xprt_ptr:	Transport for power vote
+ *
+ * Note - must be called with tx_ready_lock_lhb3 locked.
+ */
+static void glink_pm_qos_vote(struct glink_core_xprt_ctx *xprt_ptr)
+{
+	if (glink_pm_qos && !xprt_ptr->qos_req_active) {
+		GLINK_PERF("%s: qos vote %u us\n", __func__, glink_pm_qos);
+		pm_qos_update_request(&xprt_ptr->pm_qos_req, glink_pm_qos);
+		xprt_ptr->qos_req_active = true;
+	}
+	xprt_ptr->tx_path_activity = true;
+}
+
+/**
+ * glink_pm_qos_unvote() - Schedule Power Management QoS Vote Removal
+ * @xprt_ptr:	Transport for power vote removal
+ *
+ * Note - must be called with tx_ready_lock_lhb3 locked.
+ */
+static void glink_pm_qos_unvote(struct glink_core_xprt_ctx *xprt_ptr)
+{
+	xprt_ptr->tx_path_activity = false;
+	if (xprt_ptr->qos_req_active) {
+		GLINK_PERF("%s: qos unvote\n", __func__);
+		schedule_delayed_work(&xprt_ptr->pm_qos_work,
+				msecs_to_jiffies(GLINK_PM_QOS_HOLDOFF_MS));
+	}
+}
+
+/**
+ * glink_pm_qos_cancel_worker() - Remove Power Management QoS Vote
+ * @work:	Delayed work structure
+ *
+ * Removes PM QoS vote if no additional transmit activity has occurred between
+ * the unvote and when this worker runs.
+ */
+static void glink_pm_qos_cancel_worker(struct work_struct *work)
+{
+	struct glink_core_xprt_ctx *xprt_ptr;
+	unsigned long flags;
+
+	xprt_ptr = container_of(to_delayed_work(work),
+			struct glink_core_xprt_ctx, pm_qos_work);
+
+	spin_lock_irqsave(&xprt_ptr->tx_ready_lock_lhb3, flags);
+	if (!xprt_ptr->tx_path_activity) {
+		/* no more tx activity */
+		GLINK_PERF("%s: qos off\n", __func__);
+		pm_qos_update_request(&xprt_ptr->pm_qos_req,
+				PM_QOS_DEFAULT_VALUE);
+		xprt_ptr->qos_req_active = false;
+	}
+	xprt_ptr->tx_path_activity = false;
+	spin_unlock_irqrestore(&xprt_ptr->tx_ready_lock_lhb3, flags);
+}
+
+/**
+ * glink_core_rx_cmd_remote_sigs() - Receive remote channel signal command
+ *
+ * if_ptr:	Pointer to transport instance
+ * rcid:	Remote Channel ID
+ */
+static void glink_core_rx_cmd_remote_sigs(struct glink_transport_if *if_ptr,
+					uint32_t rcid, uint32_t sigs)
+{
+	struct channel_ctx *ctx;
+	uint32_t old_sigs;
+
+	ctx = xprt_rcid_to_ch_ctx_get(if_ptr->glink_core_priv, rcid);
+	if (!ctx) {
+		/* unknown LCID received - this shouldn't happen */
+		GLINK_ERR_XPRT(if_ptr->glink_core_priv,
+				"%s: invalid rcid %u received\n", __func__,
+				(unsigned int)rcid);
+		return;
+	}
+
+	if (!ch_is_fully_opened(ctx)) {
+		GLINK_ERR_CH(ctx, "%s: Channel is not fully opened\n",
+			__func__);
+		rwref_put(&ctx->ch_state_lhb2);
+		return;
+	}
+
+	old_sigs = ctx->rsigs;
+	ctx->rsigs = sigs;
+	if (ctx->notify_rx_sigs) {
+		ctx->notify_rx_sigs(ctx, ctx->user_priv, old_sigs, ctx->rsigs);
+		GLINK_INFO_CH(ctx, "%s: notify rx sigs old:0x%x new:0x%x\n",
+				__func__, old_sigs, ctx->rsigs);
+	}
+	rwref_put(&ctx->ch_state_lhb2);
+}
+
+static struct glink_core_if core_impl = {
+	.link_up = glink_core_link_up,
+	.link_down = glink_core_link_down,
+	.rx_cmd_version = glink_core_rx_cmd_version,
+	.rx_cmd_version_ack = glink_core_rx_cmd_version_ack,
+	.rx_cmd_ch_remote_open = glink_core_rx_cmd_ch_remote_open,
+	.rx_cmd_ch_open_ack = glink_core_rx_cmd_ch_open_ack,
+	.rx_cmd_ch_remote_close = glink_core_rx_cmd_ch_remote_close,
+	.rx_cmd_ch_close_ack = glink_core_rx_cmd_ch_close_ack,
+	.rx_get_pkt_ctx = glink_core_rx_get_pkt_ctx,
+	.rx_put_pkt_ctx = glink_core_rx_put_pkt_ctx,
+	.rx_cmd_remote_rx_intent_put = glink_core_remote_rx_intent_put,
+	.rx_cmd_remote_rx_intent_put_cookie =
+					glink_core_remote_rx_intent_put_cookie,
+	.rx_cmd_remote_rx_intent_req = glink_core_rx_cmd_remote_rx_intent_req,
+	.rx_cmd_rx_intent_req_ack = glink_core_rx_cmd_rx_intent_req_ack,
+	.rx_cmd_tx_done = glink_core_rx_cmd_tx_done,
+	.tx_resume = glink_core_tx_resume,
+	.rx_cmd_remote_sigs = glink_core_rx_cmd_remote_sigs,
+};
+
+/**
+ * glink_xprt_ctx_iterator_init() - Initializes the transport context list
+ *					iterator
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * This function acquires the transport context lock which must then be
+ * released by glink_xprt_ctx_iterator_end()
+ */
+void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i)
+{
+	if (xprt_i == NULL)
+		return;
+
+	mutex_lock(&transport_list_lock_lha0);
+	xprt_i->xprt_list = &transport_list;
+	xprt_i->i_curr = list_entry(&transport_list,
+			struct glink_core_xprt_ctx, list_node);
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_init);
+
+/**
+ * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration
+ * @xprt_i:	pointer to the transport context iterator.
+ */
+void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i)
+{
+	if (xprt_i == NULL)
+		return;
+
+	xprt_i->xprt_list = NULL;
+	xprt_i->i_curr = NULL;
+	mutex_unlock(&transport_list_lock_lha0);
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_end);
+
+/**
+ * glink_xprt_ctx_iterator_next() - iterates element by element in transport
+ *					context list
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * Return: pointer to the transport context structure
+ */
+struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next(
+			struct xprt_ctx_iterator *xprt_i)
+{
+	struct glink_core_xprt_ctx *xprt_ctx = NULL;
+
+	if (xprt_i == NULL)
+		return xprt_ctx;
+
+	if (list_empty(xprt_i->xprt_list))
+		return xprt_ctx;
+
+	list_for_each_entry_continue(xprt_i->i_curr,
+			xprt_i->xprt_list, list_node) {
+		xprt_ctx = xprt_i->i_curr;
+		break;
+	}
+	return xprt_ctx;
+}
+EXPORT_SYMBOL(glink_xprt_ctx_iterator_next);
+
+/**
+ * glink_get_xprt_name() - get the transport name
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: name of the transport
+ */
+char *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	if (xprt_ctx == NULL)
+		return NULL;
+
+	return xprt_ctx->name;
+}
+EXPORT_SYMBOL(glink_get_xprt_name);
+
+/**
+ * glink_get_xprt_name() - get the name of the remote processor/edge
+ *				of the transport
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: Name of the remote processor/edge
+ */
+char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	if (xprt_ctx == NULL)
+		return NULL;
+	return xprt_ctx->edge;
+}
+EXPORT_SYMBOL(glink_get_xprt_edge_name);
+
+/**
+ * glink_get_xprt_state() - get the state of the transport
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: Name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	if (xprt_ctx == NULL)
+		return NULL;
+
+	return glink_get_xprt_state_string(xprt_ctx->local_state);
+}
+EXPORT_SYMBOL(glink_get_xprt_state);
+
+/**
+ * glink_get_xprt_version_features() - get the version and feature set
+ *					of local transport in glink
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: pointer to the glink_core_version
+ */
+const struct glink_core_version *glink_get_xprt_version_features(
+		struct glink_core_xprt_ctx *xprt_ctx)
+{
+	const struct glink_core_version *ver = NULL;
+
+	if (xprt_ctx == NULL)
+		return ver;
+
+	ver = &xprt_ctx->versions[xprt_ctx->local_version_idx];
+	return ver;
+}
+EXPORT_SYMBOL(glink_get_xprt_version_features);
+
+/**
+ * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator
+ * @ch_iter:	pointer to the channel context iterator.
+ * xprt:	pointer to the transport context that holds the channel list
+ *
+ * This function acquires the channel context lock which must then be
+ * released by glink_ch_ctx_iterator_end()
+ */
+void glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter,
+		struct glink_core_xprt_ctx *xprt)
+{
+	unsigned long flags;
+
+	if (ch_iter == NULL || xprt == NULL)
+		return;
+
+	spin_lock_irqsave(&xprt->xprt_ctx_lock_lhb1, flags);
+	ch_iter->ch_list = &(xprt->channels);
+	ch_iter->i_curr = list_entry(&(xprt->channels),
+				struct channel_ctx, port_list_node);
+	ch_iter->ch_list_flags = flags;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_init);
+
+/**
+ * glink_ch_ctx_iterator_end() - Ends the channel context list iteration
+ * @ch_iter:	pointer to the channel context iterator.
+ */
+void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter,
+				struct glink_core_xprt_ctx *xprt)
+{
+	if (ch_iter == NULL || xprt == NULL)
+		return;
+
+	spin_unlock_irqrestore(&xprt->xprt_ctx_lock_lhb1,
+			ch_iter->ch_list_flags);
+	ch_iter->ch_list = NULL;
+	ch_iter->i_curr = NULL;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_end);
+
+/**
+ * glink_ch_ctx_iterator_next() - iterates element by element in channel
+ *					context list
+ * @c_i:	pointer to the channel context iterator.
+ *
+ * Return: pointer to the channel context structure
+ */
+struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *c_i)
+{
+	struct channel_ctx *ch_ctx = NULL;
+
+	if (c_i == NULL)
+		return ch_ctx;
+
+	if (list_empty(c_i->ch_list))
+		return ch_ctx;
+
+	list_for_each_entry_continue(c_i->i_curr,
+			c_i->ch_list, port_list_node) {
+		ch_ctx = c_i->i_curr;
+		break;
+	}
+	return ch_ctx;
+}
+EXPORT_SYMBOL(glink_ch_ctx_iterator_next);
+
+/**
+ * glink_get_ch_name() - get the channel name
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the channel, NULL in case of invalid input
+ */
+char *glink_get_ch_name(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return ch_ctx->name;
+}
+EXPORT_SYMBOL(glink_get_ch_name);
+
+/**
+ * glink_get_ch_edge_name() - get the edge on which channel is created
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the edge, NULL in case of invalid input
+ */
+char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return ch_ctx->transport_ptr->edge;
+}
+EXPORT_SYMBOL(glink_get_ch_edge_name);
+
+/**
+ * glink_get_ch_lcid() - get the local channel ID
+ * @c_i:	pointer to the channel context.
+ *
+ * Return: local channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lcid(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	return ch_ctx->lcid;
+}
+EXPORT_SYMBOL(glink_get_ch_lcid);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rcid(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	return ch_ctx->rcid;
+}
+EXPORT_SYMBOL(glink_get_ch_rcid);
+
+/**
+ * glink_get_ch_lstate() - get the local channel state
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: Name of the local channel state, NUll in case of invalid input
+ */
+const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return glink_get_ch_state_string(ch_ctx->local_open_state);
+}
+EXPORT_SYMBOL(glink_get_ch_lstate);
+
+/**
+ * glink_get_ch_rstate() - get the remote channel state
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: true if remote side is opened false otherwise
+ */
+bool glink_get_ch_rstate(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return ch_ctx->remote_opened;
+}
+EXPORT_SYMBOL(glink_get_ch_rstate);
+
+/**
+ * glink_get_ch_xprt_name() - get the name of the transport to which
+ *				the channel belongs
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the export, NULL in case of invalid input
+ */
+char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return NULL;
+
+	return ch_ctx->transport_ptr->name;
+}
+EXPORT_SYMBOL(glink_get_ch_xprt_name);
+
+/**
+ * glink_get_tx_pkt_count() - get the total number of packets sent
+ *				through this channel
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of packets transmitted, -EINVAL in case of invalid input
+ */
+int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	/* FUTURE: packet stats not yet implemented */
+
+	return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(glink_get_ch_tx_pkt_count);
+
+/**
+ * glink_get_ch_rx_pkt_count() - get the total number of packets
+ *				received at this channel
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of packets received, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx)
+{
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	/* FUTURE: packet stats not yet implemented */
+
+	return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(glink_get_ch_rx_pkt_count);
+
+/**
+ * glink_get_ch_lintents_queued() - get the total number of intents queued
+ *				at local side
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx)
+{
+	struct glink_core_rx_intent *intent;
+	int ilrx_count = 0;
+
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	list_for_each_entry(intent, &ch_ctx->local_rx_intent_list, list)
+		ilrx_count++;
+
+	return ilrx_count;
+}
+EXPORT_SYMBOL(glink_get_ch_lintents_queued);
+
+/**
+ * glink_get_ch_rintents_queued() - get the total number of intents queued
+ *				from remote side
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx)
+{
+	struct glink_core_rx_intent *intent;
+	int irrx_count = 0;
+
+	if (ch_ctx == NULL)
+		return -EINVAL;
+
+	list_for_each_entry(intent, &ch_ctx->rmt_rx_intent_list, list)
+		irrx_count++;
+
+	return irrx_count;
+}
+EXPORT_SYMBOL(glink_get_ch_rintents_queued);
+
+/**
+ * glink_get_ch_intent_info() - get the intent details of a channel
+ * @ch_ctx:	pointer to the channel context.
+ * ch_ctx_i:	pointer to a structure that will contain intent details
+ *
+ * This function is used to get all the channel intent details including locks.
+ */
+void glink_get_ch_intent_info(struct channel_ctx *ch_ctx,
+			struct glink_ch_intent_info *ch_ctx_i)
+{
+	if (ch_ctx == NULL || ch_ctx_i == NULL)
+		return;
+
+	ch_ctx_i->li_lst_lock = &ch_ctx->local_rx_intent_lst_lock_lhc1;
+	ch_ctx_i->li_avail_list = &ch_ctx->local_rx_intent_list;
+	ch_ctx_i->li_used_list = &ch_ctx->local_rx_intent_ntfy_list;
+	ch_ctx_i->ri_lst_lock = &ch_ctx->rmt_rx_intent_lst_lock_lhc2;
+	ch_ctx_i->ri_list = &ch_ctx->rmt_rx_intent_list;
+}
+EXPORT_SYMBOL(glink_get_ch_intent_info);
+
+/**
+ * glink_get_debug_mask() - Return debug mask attribute
+ *
+ * Return: debug mask attribute
+ */
+unsigned int glink_get_debug_mask(void)
+{
+	return glink_debug_mask;
+}
+EXPORT_SYMBOL(glink_get_debug_mask);
+
+/**
+ * glink_get_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_log_ctx(void)
+{
+	return log_ctx;
+}
+EXPORT_SYMBOL(glink_get_log_ctx);
+
+/**
+ * glink_get_xprt_log_ctx() - Return log context for GLINK xprts.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_xprt_log_ctx(struct glink_core_xprt_ctx *xprt)
+{
+	if (xprt)
+		return xprt->log_ctx;
+	else
+		return NULL;
+}
+EXPORT_SYMBOL(glink_get_xprt_log_ctx);
+
+static int glink_init(void)
+{
+	log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "glink", 0);
+	if (!log_ctx)
+		GLINK_ERR("%s: unable to create log context\n", __func__);
+	glink_debugfs_init();
+
+	return 0;
+}
+arch_initcall(glink_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_core_if.h b/drivers/soc/qcom/glink_core_if.h
new file mode 100644
index 0000000..1411330
--- /dev/null
+++ b/drivers/soc/qcom/glink_core_if.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_CORE_IF_H_
+#define _SOC_QCOM_GLINK_CORE_IF_H_
+
+#include <linux/of.h>
+#include <linux/types.h>
+#include "glink_private.h"
+
+/* Local Channel state */
+enum local_channel_state_e {
+	GLINK_CHANNEL_CLOSED = 0,
+	GLINK_CHANNEL_OPENING,
+	GLINK_CHANNEL_OPENED,
+	GLINK_CHANNEL_CLOSING,
+};
+
+/* Transport Negotiation State */
+enum transport_state_e {
+	GLINK_XPRT_DOWN,
+	GLINK_XPRT_NEGOTIATING,
+	GLINK_XPRT_OPENED,
+	GLINK_XPRT_FAILED,
+};
+
+struct channel_ctx;
+struct glink_core_xprt_ctx;
+struct glink_transport_if;
+struct glink_core_version;
+
+/**
+ * struct glink_core_version - Individual version element
+ *
+ * version:	supported version
+ * features:	all supported features for version
+ */
+struct glink_core_version {
+	uint32_t version;
+	uint32_t features;
+
+	uint32_t (*negotiate_features)(struct glink_transport_if *if_ptr,
+			const struct glink_core_version *version_ptr,
+			uint32_t features);
+};
+
+/**
+ * RX intent
+ *
+ * data:	pointer to the data (may be NULL for zero-copy)
+ * id:		remote or local intent ID
+ * pkt_size:	total size of packet
+ * write_offset: next write offset (initially 0)
+ * intent_size:	size of the original intent (do not modify)
+ * tracer_pkt:	Flag to indicate if the data is a tracer packet
+ * iovec:	Pointer to vector buffer if the transport passes a vector buffer
+ * vprovider:	Virtual address-space buffer provider for a vector buffer
+ * pprovider:	Physical address-space buffer provider for a vector buffer
+ * cookie:	Private transport specific cookie
+ * pkt_priv:	G-Link core owned packet-private data
+ * list:	G-Link core owned list node
+ * bounce_buf:	Pointer to the temporary/internal bounce buffer
+ */
+struct glink_core_rx_intent {
+	void *data;
+	uint32_t id;
+	size_t pkt_size;
+	size_t write_offset;
+	size_t intent_size;
+	bool tracer_pkt;
+	void *iovec;
+	void * (*vprovider)(void *iovec, size_t offset, size_t *size);
+	void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+	void *cookie;
+
+	/* G-Link-Core-owned elements - please ignore */
+	struct list_head list;
+	const void *pkt_priv;
+	void *bounce_buf;
+};
+
+/**
+ * struct glink_core_flow_info - Flow specific Information
+ * @mtu_tx_time_us:	Time to transmit an MTU in microseconds.
+ * @power_state:	Power state associated with the traffic flow.
+ */
+struct glink_core_flow_info {
+	unsigned long mtu_tx_time_us;
+	uint32_t power_state;
+};
+
+/**
+ * struct glink_core_transport_cfg - configuration of a new transport
+ * @name:		Name of the transport.
+ * @edge:		Subsystem the transport connects to.
+ * @versions:		Array of transport versions supported.
+ * @versions_entries:	Number of entries in @versions.
+ * @max_cid:		Maximum number of channel identifiers supported.
+ * @max_iid:		Maximum number of intent identifiers supported.
+ * @mtu:		MTU supported by this transport.
+ * @num_flows:		Number of traffic flows/priority buckets.
+ * @flow_info:		Information about each flow/priority.
+ * @token_count:	Number of tokens per assignment.
+ */
+struct glink_core_transport_cfg {
+	const char *name;
+	const char *edge;
+	const struct glink_core_version *versions;
+	size_t versions_entries;
+	uint32_t max_cid;
+	uint32_t max_iid;
+
+	size_t mtu;
+	uint32_t num_flows;
+	struct glink_core_flow_info *flow_info;
+	uint32_t token_count;
+};
+
+struct glink_core_if {
+	/* Negotiation */
+	void (*link_up)(struct glink_transport_if *if_ptr);
+	void (*link_down)(struct glink_transport_if *if_ptr);
+	void (*rx_cmd_version)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+	void (*rx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+
+	/* channel management */
+	void (*rx_cmd_ch_remote_open)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, const char *name, uint16_t req_xprt);
+	void (*rx_cmd_ch_open_ack)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, uint16_t xprt_resp);
+	void (*rx_cmd_ch_remote_close)(struct glink_transport_if *if_ptr,
+			uint32_t rcid);
+	void (*rx_cmd_ch_close_ack)(struct glink_transport_if *if_ptr,
+			uint32_t lcid);
+
+	/* channel data */
+	struct glink_core_rx_intent * (*rx_get_pkt_ctx)(
+			struct glink_transport_if *if_ptr,
+			uint32_t rcid, uint32_t liid);
+	void (*rx_put_pkt_ctx)(struct glink_transport_if *if_ptr, uint32_t rcid,
+			struct glink_core_rx_intent *intent_ptr, bool complete);
+	void (*rx_cmd_remote_rx_intent_put)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, uint32_t riid, size_t size);
+	void (*rx_cmd_remote_rx_intent_put_cookie)(
+			struct glink_transport_if *if_ptr, uint32_t rcid,
+			uint32_t riid, size_t size, void *cookie);
+	void (*rx_cmd_tx_done)(struct glink_transport_if *if_ptr, uint32_t rcid,
+			uint32_t riid, bool reuse);
+	void (*rx_cmd_remote_rx_intent_req)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, size_t size);
+	void (*rx_cmd_rx_intent_req_ack)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, bool granted);
+	void (*rx_cmd_remote_sigs)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, uint32_t sigs);
+
+	/* channel scheduling */
+	void (*tx_resume)(struct glink_transport_if *if_ptr);
+};
+
+int glink_core_register_transport(struct glink_transport_if *if_ptr,
+		struct glink_core_transport_cfg *cfg);
+
+void glink_core_unregister_transport(struct glink_transport_if *if_ptr);
+
+/**
+ * of_get_glink_core_qos_cfg() - Parse the qos related dt entries
+ * @phandle:	The handle to the qos related node in DT.
+ * @cfg:	The transport configuration to be filled.
+ *
+ * Return: 0 on Success, standard Linux error otherwise.
+ */
+int of_get_glink_core_qos_cfg(struct device_node *phandle,
+				struct glink_core_transport_cfg *cfg);
+
+/**
+ * rx_linear_vbuf_provider() - Virtual Buffer Provider for linear buffers
+ * iovec:	Pointer to the beginning of the linear buffer.
+ * offset:	Offset into the buffer whose address is needed.
+ * size:	Pointer to hold the length of the contiguous buffer space.
+ *
+ * This function is used when a linear buffer is received while the client has
+ * registered to receive vector buffers.
+ *
+ * Return: Address of the buffer which is at offset "offset" from the beginning
+ *         of the buffer.
+ */
+static inline void *rx_linear_vbuf_provider(void *iovec, size_t offset,
+					    size_t *size)
+{
+	struct glink_core_rx_intent *rx_info =
+		(struct glink_core_rx_intent *)iovec;
+
+	if (unlikely(!iovec || !size))
+		return NULL;
+
+	if (unlikely(offset >= rx_info->pkt_size))
+		return NULL;
+
+	if (unlikely(OVERFLOW_ADD_UNSIGNED(void *, rx_info->data, offset)))
+		return NULL;
+
+	*size = rx_info->pkt_size - offset;
+	return rx_info->data + offset;
+}
+
+#endif /* _SOC_QCOM_GLINK_CORE_IF_H_ */
diff --git a/drivers/soc/qcom/glink_debugfs.c b/drivers/soc/qcom/glink_debugfs.c
new file mode 100644
index 0000000..0a66cd5
--- /dev/null
+++ b/drivers/soc/qcom/glink_debugfs.c
@@ -0,0 +1,788 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <soc/qcom/glink.h>
+#include "glink_private.h"
+#include "glink_core_if.h"
+
+
+static const char * const ss_string[] = {
+	[GLINK_DBGFS_MPSS] = "mpss",
+	[GLINK_DBGFS_APSS] = "apss",
+	[GLINK_DBGFS_LPASS] = "lpass",
+	[GLINK_DBGFS_DSPS] = "dsps",
+	[GLINK_DBGFS_RPM] = "rpm",
+	[GLINK_DBGFS_WCNSS] = "wcnss",
+	[GLINK_DBGFS_LLOOP] = "lloop",
+	[GLINK_DBGFS_MOCK] = "mock"
+};
+
+static const char * const xprt_string[] = {
+	[GLINK_DBGFS_SMEM] = "smem",
+	[GLINK_DBGFS_SMD] = "smd",
+	[GLINK_DBGFS_XLLOOP] = "lloop",
+	[GLINK_DBGFS_XMOCK] = "mock",
+	[GLINK_DBGFS_XMOCK_LOW] = "mock_low",
+	[GLINK_DBGFS_XMOCK_HIGH] = "mock_high"
+};
+
+static const char * const ch_st_string[] = {
+	[GLINK_CHANNEL_CLOSED] = "CLOSED",
+	[GLINK_CHANNEL_OPENING] = "OPENING",
+	[GLINK_CHANNEL_OPENED] = "OPENED",
+	[GLINK_CHANNEL_CLOSING] = "CLOSING",
+};
+
+static const char * const xprt_st_string[] = {
+	[GLINK_XPRT_DOWN] = "DOWN",
+	[GLINK_XPRT_NEGOTIATING] = "NEGOT",
+	[GLINK_XPRT_OPENED] = "OPENED",
+	[GLINK_XPRT_FAILED] = "FAILED"
+};
+
+#if defined(CONFIG_DEBUG_FS)
+#define GLINK_DBGFS_NAME_SIZE (2 * GLINK_NAME_SIZE + 1)
+
+struct glink_dbgfs_dent {
+	struct list_head list_node;
+	char par_name[GLINK_DBGFS_NAME_SIZE];
+	char self_name[GLINK_DBGFS_NAME_SIZE];
+	struct dentry *parent;
+	struct dentry *self;
+	spinlock_t file_list_lock_lhb0;
+	struct list_head file_list;
+};
+
+static struct dentry *dent;
+static LIST_HEAD(dent_list);
+static DEFINE_MUTEX(dent_list_lock_lha0);
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	struct glink_dbgfs_data *dfs_d;
+
+	dfs_d = s->private;
+	dfs_d->o_func(s);
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+#endif
+
+/**
+ * glink_get_ss_enum_string() - get the name of the subsystem based on enum
+ *				value
+ * @enum_id:	enum id of a specific subsystem.
+ *
+ * Return: name of the subsystem, NULL in case of invalid input
+ */
+const char *glink_get_ss_enum_string(unsigned int enum_id)
+{
+	if (enum_id >= ARRAY_SIZE(ss_string))
+		return NULL;
+
+	return ss_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_ss_enum_string);
+
+/**
+ * glink_get_xprt_enum_string() - get the name of the transport based on enum
+ *					value
+ * @enum_id:	enum id of a specific transport.
+ *
+ * Return: name of the transport, NULL in case of invalid input
+ */
+const char *glink_get_xprt_enum_string(unsigned int enum_id)
+{
+	if (enum_id >= ARRAY_SIZE(xprt_string))
+		return NULL;
+	return xprt_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_xprt_enum_string);
+
+/**
+ * glink_get_xprt_state_string() - get the name of the transport based on enum
+ *					value
+ * @enum_id:	enum id of the state of the transport.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state_string(
+				enum transport_state_e enum_id)
+{
+	if (enum_id >= ARRAY_SIZE(xprt_st_string))
+		return NULL;
+
+	return xprt_st_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_xprt_state_string);
+
+/**
+ * glink_get_ch_state_string() - get the name of the transport based on enum
+ *					value
+ * @enum_id:	enum id of a specific state of the channel.
+ *
+ * Return: name of the channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_state_string(
+				enum local_channel_state_e enum_id)
+{
+	if (enum_id >= ARRAY_SIZE(ch_st_string))
+		return NULL;
+
+	return ch_st_string[enum_id];
+}
+EXPORT_SYMBOL(glink_get_ch_state_string);
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * glink_dfs_create_file() - create the debugfs file
+ * @name:	debugfs file name
+ * @parent:	pointer to the parent dentry structure
+ * @show:	pointer to the actual function which will be invoked upon
+ *		opening this file.
+ *
+ * Return:	pointer to the allocated glink_dbgfs_data structure or
+ *		NULL in case of an error.
+ *
+ * This function actually create a debugfs file under the parent directory
+ */
+static struct glink_dbgfs_data *glink_dfs_create_file(const char *name,
+		struct dentry *parent, void (*show)(struct seq_file *s),
+		void *dbgfs_data, bool b_free_req)
+{
+	struct dentry *file;
+	struct glink_dbgfs_data *dfs_d;
+
+	dfs_d = kzalloc(sizeof(struct glink_dbgfs_data), GFP_KERNEL);
+	if (dfs_d == NULL)
+		return NULL;
+
+	dfs_d->o_func = show;
+	if (dbgfs_data != NULL) {
+		dfs_d->priv_data = dbgfs_data;
+		dfs_d->b_priv_free_req = b_free_req;
+	}
+	file = debugfs_create_file(name, 0400, parent, dfs_d, &debug_ops);
+	if (!file)
+		GLINK_DBG("%s: unable to create file '%s'\n", __func__,
+				name);
+	dfs_d->dent = file;
+	return dfs_d;
+}
+
+/**
+ * write_ch_intent() - write channel intent details
+ * @s:		pointer to the sequential file
+ * @intent:	pointer glink core intent structure
+ * @i_type:	type of intent
+ * @count:	serial number of the intent.
+ *
+ * This function is a helper function of glink_dfs_update_ch_intents()
+ * that prints out details of any specific intent.
+ */
+static void write_ch_intent(struct seq_file *s,
+			struct glink_core_rx_intent *intent,
+			char *i_type, unsigned int count)
+{
+	char *intent_type;
+	/*
+	* formatted, human readable channel state output, ie:
+	* TYPE       |SN  |ID |PKT_SIZE|W_OFFSET|INT_SIZE|
+	* --------------------------------------------------------------
+	* LOCAL_LIST|#2  |1   |0       |0       |8       |
+	*/
+	if (count == 1) {
+		intent_type = i_type;
+		seq_puts(s,
+		"\n--------------------------------------------------------\n");
+	} else {
+		intent_type = "";
+	}
+	seq_printf(s, "%-20s|#%-5d|%-6u|%-10zu|%-10zu|%-10zu|\n",
+			intent_type,
+			count,
+			intent->id,
+			intent->pkt_size,
+			intent->write_offset,
+			intent->intent_size);
+}
+
+/**
+ * glink_dfs_update_ch_intent() - writes the intent details of a specific
+ *				  channel to the corresponding debugfs file
+ * @s:		pointer to the sequential file
+ *
+ * This function extracts the intent details of a channel & prints them to
+ * corrseponding debugfs file of that channel.
+ */
+static void glink_dfs_update_ch_intent(struct seq_file *s)
+{
+	struct glink_dbgfs_data *dfs_d;
+	struct channel_ctx *ch_ctx;
+	struct glink_core_rx_intent *intent;
+	struct glink_core_rx_intent *intent_temp;
+	struct glink_ch_intent_info ch_intent_info;
+	unsigned long flags;
+	unsigned int count = 0;
+
+	dfs_d = s->private;
+	ch_ctx = dfs_d->priv_data;
+	if (ch_ctx != NULL) {
+		glink_get_ch_intent_info(ch_ctx, &ch_intent_info);
+		seq_puts(s,
+		"---------------------------------------------------------------\n");
+		seq_printf(s, "%-20s|%-6s|%-6s|%-10s|%-10s|%-10s|\n",
+					"INTENT TYPE",
+					"SN",
+					"ID",
+					"PKT_SIZE",
+					"W_OFFSET",
+					"INT_SIZE");
+		seq_puts(s,
+		"---------------------------------------------------------------\n");
+		spin_lock_irqsave(ch_intent_info.li_lst_lock, flags);
+		list_for_each_entry_safe(intent, intent_temp,
+				ch_intent_info.li_avail_list, list) {
+			count++;
+			write_ch_intent(s, intent, "LOCAL_AVAIL_LIST", count);
+		}
+
+		count = 0;
+		list_for_each_entry_safe(intent, intent_temp,
+				ch_intent_info.li_used_list, list) {
+			count++;
+			write_ch_intent(s, intent, "LOCAL_USED_LIST", count);
+		}
+		spin_unlock_irqrestore(ch_intent_info.li_lst_lock, flags);
+
+		count = 0;
+		spin_lock_irqsave(ch_intent_info.ri_lst_lock, flags);
+		list_for_each_entry_safe(intent, intent_temp,
+				ch_intent_info.ri_list, list) {
+			count++;
+			write_ch_intent(s, intent, "REMOTE_LIST", count);
+		}
+		spin_unlock_irqrestore(ch_intent_info.ri_lst_lock,
+					flags);
+		seq_puts(s,
+		"---------------------------------------------------------------\n");
+	}
+}
+
+/**
+ * glink_dfs_update_ch_stats() - writes statistics of a specific
+ *				 channel to the corresponding debugfs file
+ * @s:		pointer to the sequential file
+ *
+ * This function extracts other statistics of a channel & prints them to
+ * corrseponding debugfs file of that channel
+ */
+static void glink_dfs_update_ch_stats(struct seq_file *s)
+{
+	/* FUTURE:  add channel statistics */
+	seq_puts(s, "not yet implemented\n");
+}
+
+/**
+ * glink_debugfs_remove_channel() - remove all channel specific files & folder
+ *					in debugfs when channel is fully closed
+ * @ch_ctx:		pointer to the channel_contenxt
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when any channel is fully closed. It removes the
+ * folders & other files in debugfs for that channel.
+ */
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+			struct glink_core_xprt_ctx *xprt_ctx){
+
+	struct glink_dbgfs ch_rm_dbgfs;
+	char *edge_name;
+	char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+	char *xprt_name;
+
+	ch_rm_dbgfs.curr_name = glink_get_ch_name(ch_ctx);
+	edge_name = glink_get_xprt_edge_name(xprt_ctx);
+	xprt_name = glink_get_xprt_name(xprt_ctx);
+	if (!xprt_name || !edge_name) {
+		GLINK_ERR("%s: Invalid xprt_name  or edge_name for ch '%s'\n",
+				__func__, ch_rm_dbgfs.curr_name);
+		return;
+	}
+	snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+					edge_name, xprt_name);
+	ch_rm_dbgfs.par_name = curr_dir_name;
+	glink_debugfs_remove_recur(&ch_rm_dbgfs);
+}
+EXPORT_SYMBOL(glink_debugfs_remove_channel);
+
+/**
+ * glink_debugfs_add_channel() - create channel specific files & folder in
+ *				 debugfs when channel is added
+ * @ch_ctx:		pointer to the channel_contenxt
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when a new channel is created. It creates the
+ * folders & other files in debugfs for that channel
+ */
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+		struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_dbgfs ch_dbgfs;
+	char *ch_name;
+	char *edge_name;
+	char *xprt_name;
+	char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+
+	if (ch_ctx == NULL) {
+		GLINK_ERR("%s: Channel Context is NULL\n", __func__);
+		return;
+	}
+
+	ch_name = glink_get_ch_name(ch_ctx);
+	edge_name =  glink_get_xprt_edge_name(xprt_ctx);
+	xprt_name =  glink_get_xprt_name(xprt_ctx);
+	if (!xprt_name || !edge_name) {
+		GLINK_ERR("%s: Invalid xprt_name  or edge_name for ch '%s'\n",
+				__func__, ch_name);
+		return;
+	}
+	snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+					edge_name, xprt_name);
+
+	ch_dbgfs.curr_name = curr_dir_name;
+	ch_dbgfs.par_name = "channel";
+	ch_dbgfs.b_dir_create = true;
+	glink_debugfs_create(ch_name, NULL, &ch_dbgfs, NULL, false);
+
+	ch_dbgfs.par_name = ch_dbgfs.curr_name;
+	ch_dbgfs.curr_name = ch_name;
+	ch_dbgfs.b_dir_create = false;
+	glink_debugfs_create("stats", glink_dfs_update_ch_stats,
+				&ch_dbgfs, (void *)ch_ctx, false);
+	glink_debugfs_create("intents", glink_dfs_update_ch_intent,
+			&ch_dbgfs, (void *)ch_ctx, false);
+}
+EXPORT_SYMBOL(glink_debugfs_add_channel);
+
+/**
+ * glink_debugfs_add_xprt() - create transport specific files & folder in
+ *			      debugfs when new transport is registered
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when a new transport is registered. It creates the
+ * folders & other files in debugfs for that transport
+ */
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx)
+{
+	struct glink_dbgfs xprt_dbgfs;
+	char *xprt_name;
+	char *edge_name;
+	char curr_dir_name[GLINK_DBGFS_NAME_SIZE];
+
+	if (xprt_ctx == NULL)
+		GLINK_ERR("%s: Transport Context is NULL\n", __func__);
+	xprt_name = glink_get_xprt_name(xprt_ctx);
+	edge_name = glink_get_xprt_edge_name(xprt_ctx);
+	if (!xprt_name || !edge_name) {
+		GLINK_ERR("%s: xprt name or edge name is NULL\n", __func__);
+		return;
+	}
+	snprintf(curr_dir_name, sizeof(curr_dir_name), "%s_%s",
+					edge_name, xprt_name);
+	xprt_dbgfs.par_name = "glink";
+	xprt_dbgfs.curr_name = "xprt";
+	xprt_dbgfs.b_dir_create = true;
+	glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false);
+	xprt_dbgfs.curr_name = "channel";
+	glink_debugfs_create(curr_dir_name, NULL, &xprt_dbgfs, NULL, false);
+}
+EXPORT_SYMBOL(glink_debugfs_add_xprt);
+
+/**
+ * glink_dfs_create_channel_list() - create & update the channel details
+ * s:	pointer to seq_file
+ *
+ * This function updates channel details in debugfs
+ * file present in /glink/channel/channels
+ */
+static void glink_dfs_create_channel_list(struct seq_file *s)
+{
+	struct xprt_ctx_iterator xprt_iter;
+	struct ch_ctx_iterator ch_iter;
+
+	struct glink_core_xprt_ctx *xprt_ctx;
+	struct channel_ctx *ch_ctx;
+	int count = 0;
+	/*
+	* formatted, human readable channel state output, ie:
+	* NAME               |LCID|RCID|XPRT|EDGE|LSTATE |RSTATE|LINT-Q|RINT-Q|
+	* --------------------------------------------------------------------
+	* LOCAL_LOOPBACK_CLNT|2   |1  |lloop|local|OPENED|OPENED|5     |6    |
+	* N.B. Number of TX & RX Packets not implemented yet. -ENOSYS is printed
+	*/
+	seq_printf(s, "%-20s|%-4s|%-4s|%-10s|%-6s|%-7s|%-7s|%-5s|%-5s|\n",
+								"NAME",
+								"LCID",
+								"RCID",
+								"XPRT",
+								"EDGE",
+								"LSTATE",
+								"RSTATE",
+								"LINTQ",
+								"RINTQ");
+	seq_puts(s,
+		"-------------------------------------------------------------------------------\n");
+	glink_xprt_ctx_iterator_init(&xprt_iter);
+	xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+	while (xprt_ctx != NULL) {
+		glink_ch_ctx_iterator_init(&ch_iter, xprt_ctx);
+		ch_ctx = glink_ch_ctx_iterator_next(&ch_iter);
+		while (ch_ctx != NULL) {
+			count++;
+			seq_printf(s, "%-20s|%-4i|%-4i|%-10s|%-6s|%-7s|",
+					glink_get_ch_name(ch_ctx),
+					glink_get_ch_lcid(ch_ctx),
+					glink_get_ch_rcid(ch_ctx),
+					glink_get_ch_xprt_name(ch_ctx),
+					glink_get_ch_edge_name(ch_ctx),
+					glink_get_ch_lstate(ch_ctx));
+			seq_printf(s, "%-7s|%-5i|%-5i|\n",
+			(glink_get_ch_rstate(ch_ctx) ? "OPENED" : "CLOSED"),
+			glink_get_ch_lintents_queued(ch_ctx),
+			glink_get_ch_rintents_queued(ch_ctx));
+
+			ch_ctx = glink_ch_ctx_iterator_next(&ch_iter);
+		}
+		glink_ch_ctx_iterator_end(&ch_iter, xprt_ctx);
+		xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+	}
+
+	glink_xprt_ctx_iterator_end(&xprt_iter);
+}
+
+/**
+ * glink_dfs_create_xprt_list() - create & update the transport details
+ * @s:	pointer to seq_file
+ *
+ * This function updates channel details in debugfs file present
+ * in /glink/xprt/xprts
+ */
+static void glink_dfs_create_xprt_list(struct seq_file *s)
+{
+	struct xprt_ctx_iterator xprt_iter;
+	struct glink_core_xprt_ctx *xprt_ctx;
+	const struct glink_core_version  *gver;
+	uint32_t version;
+	uint32_t features;
+	int count = 0;
+	/*
+	* formatted, human readable channel state output, ie:
+	* XPRT_NAME|REMOTE    |STATE|VERSION |FEATURES|
+	* ---------------------------------------------
+	* smd_trans|lpass     |2    |0       |1       |
+	* smem     |mpss      |0    |0       |0       |
+	*/
+	seq_printf(s, "%-20s|%-20s|%-6s|%-8s|%-8s|\n",
+							"XPRT_NAME",
+							"REMOTE",
+							"STATE",
+							"VERSION",
+							"FEATURES");
+	seq_puts(s,
+		"-------------------------------------------------------------------------------\n");
+	glink_xprt_ctx_iterator_init(&xprt_iter);
+	xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+	while (xprt_ctx != NULL) {
+		count++;
+		seq_printf(s, "%-20s|%-20s|",
+					glink_get_xprt_name(xprt_ctx),
+					glink_get_xprt_edge_name(xprt_ctx));
+		gver = glink_get_xprt_version_features(xprt_ctx);
+		if (gver != NULL) {
+			version = gver->version;
+			features = gver->features;
+			seq_printf(s, "%-6s|%-8i|%-8i|\n",
+					glink_get_xprt_state(xprt_ctx),
+					version,
+					features);
+		} else {
+			seq_printf(s, "%-6s|%-8i|%-8i|\n",
+					glink_get_xprt_state(xprt_ctx),
+					-ENODATA,
+					-ENODATA);
+		}
+		xprt_ctx = glink_xprt_ctx_iterator_next(&xprt_iter);
+
+	}
+
+	glink_xprt_ctx_iterator_end(&xprt_iter);
+}
+
+/**
+ * glink_dfs_update_list() - update the internally maintained dentry linked list
+ * @curr_dent:	pointer to the current dentry object
+ * @parent:	pointer to the parent dentry object
+ * @curr:	current directory name
+ * @par_dir:	parent directory name
+ */
+void glink_dfs_update_list(struct dentry *curr_dent, struct dentry *parent,
+			const char *curr, const char *par_dir)
+{
+	struct glink_dbgfs_dent *dbgfs_dent_s;
+
+	if (curr_dent != NULL) {
+		dbgfs_dent_s = kzalloc(sizeof(struct glink_dbgfs_dent),
+				GFP_KERNEL);
+		if (dbgfs_dent_s != NULL) {
+			INIT_LIST_HEAD(&dbgfs_dent_s->file_list);
+			spin_lock_init(&dbgfs_dent_s->file_list_lock_lhb0);
+			dbgfs_dent_s->parent = parent;
+			dbgfs_dent_s->self = curr_dent;
+			strlcpy(dbgfs_dent_s->self_name,
+				curr, strlen(curr) + 1);
+			strlcpy(dbgfs_dent_s->par_name, par_dir,
+					strlen(par_dir) + 1);
+			mutex_lock(&dent_list_lock_lha0);
+			list_add_tail(&dbgfs_dent_s->list_node, &dent_list);
+			mutex_unlock(&dent_list_lock_lha0);
+		}
+	} else {
+		GLINK_DBG("%s:create directory failed for par:curr [%s:%s]\n",
+				__func__, par_dir, curr);
+	}
+}
+
+/**
+ * glink_remove_dfs_entry() - remove the the entries from dent_list
+ * @entry:	pointer to the glink_dbgfs_dent structure
+ *
+ * This function removes the removes the entries from internally maintained
+ * linked list of dentries. It also deletes the file list and associated memory
+ * if present.
+ */
+void glink_remove_dfs_entry(struct glink_dbgfs_dent *entry)
+{
+	struct glink_dbgfs_data *fentry, *fentry_temp;
+	unsigned long flags;
+
+	if (entry == NULL)
+		return;
+	if (!list_empty(&entry->file_list)) {
+		spin_lock_irqsave(&entry->file_list_lock_lhb0, flags);
+		list_for_each_entry_safe(fentry, fentry_temp,
+				&entry->file_list, flist) {
+			if (fentry->b_priv_free_req)
+				kfree(fentry->priv_data);
+			list_del(&fentry->flist);
+			kfree(fentry);
+			fentry = NULL;
+		}
+		spin_unlock_irqrestore(&entry->file_list_lock_lhb0, flags);
+	}
+	list_del(&entry->list_node);
+	kfree(entry);
+	entry = NULL;
+}
+
+/**
+ * glink_debugfs_remove_recur() - remove the the directory & files recursively
+ * @rm_dfs:	pointer to the structure glink_dbgfs
+ *
+ * This function removes the files & directories below the given directory.
+ * This also takes care of freeing any memory associated with the debugfs file.
+ */
+void glink_debugfs_remove_recur(struct glink_dbgfs *rm_dfs)
+{
+	const char *c_dir_name;
+	const char *p_dir_name;
+	struct glink_dbgfs_dent *entry, *entry_temp;
+	struct dentry *par_dent = NULL;
+
+	if (rm_dfs == NULL)
+		return;
+
+	c_dir_name = rm_dfs->curr_name;
+	p_dir_name = rm_dfs->par_name;
+
+	mutex_lock(&dent_list_lock_lha0);
+	list_for_each_entry_safe(entry, entry_temp, &dent_list, list_node) {
+		if (!strcmp(entry->par_name, c_dir_name)) {
+			glink_remove_dfs_entry(entry);
+		} else if (!strcmp(entry->self_name, c_dir_name)
+				&& !strcmp(entry->par_name, p_dir_name)) {
+			par_dent = entry->self;
+			glink_remove_dfs_entry(entry);
+		}
+	}
+	mutex_unlock(&dent_list_lock_lha0);
+	if (par_dent != NULL)
+		debugfs_remove_recursive(par_dent);
+}
+EXPORT_SYMBOL(glink_debugfs_remove_recur);
+
+/**
+ * glink_debugfs_create() - create the debugfs file
+ * @name:	debugfs file name
+ * @show:	pointer to the actual function which will be invoked upon
+ *		opening this file.
+ * @dir:	pointer to a structure debugfs_dir
+ * dbgfs_data:	pointer to any private data need to be associated with debugfs
+ * b_free_req:	boolean value to decide to free the memory associated with
+ *		@dbgfs_data during deletion of the file
+ *
+ * Return:	pointer to the file/directory created, NULL in case of error
+ *
+ * This function checks which directory will be used to create the debugfs file
+ * and calls glink_dfs_create_file. Anybody who intend to allocate some memory
+ * for the dbgfs_data and required to free it in deletion, need to set
+ * b_free_req to true. Otherwise, there will be a memory leak.
+ */
+struct dentry *glink_debugfs_create(const char *name,
+		void (*show)(struct seq_file *),
+		struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req)
+{
+	struct dentry *parent =  NULL;
+	struct dentry *dent = NULL;
+	struct glink_dbgfs_dent *entry;
+	struct glink_dbgfs_data *file_data;
+	const char *c_dir_name;
+	const char *p_dir_name;
+	unsigned long flags;
+
+	if (dir == NULL) {
+		GLINK_ERR("%s: debugfs_dir strucutre is null\n", __func__);
+		return NULL;
+	}
+	c_dir_name = dir->curr_name;
+	p_dir_name = dir->par_name;
+
+	mutex_lock(&dent_list_lock_lha0);
+	list_for_each_entry(entry, &dent_list, list_node)
+		if (!strcmp(entry->par_name, p_dir_name)
+				&& !strcmp(entry->self_name, c_dir_name)) {
+			parent = entry->self;
+			break;
+		}
+	mutex_unlock(&dent_list_lock_lha0);
+	p_dir_name = c_dir_name;
+	c_dir_name = name;
+	if (parent != NULL) {
+		if (dir->b_dir_create) {
+			dent = debugfs_create_dir(name, parent);
+			if (dent != NULL)
+				glink_dfs_update_list(dent, parent,
+							c_dir_name, p_dir_name);
+		} else {
+			file_data = glink_dfs_create_file(name, parent, show,
+							dbgfs_data, b_free_req);
+			spin_lock_irqsave(&entry->file_list_lock_lhb0, flags);
+			if (file_data != NULL)
+				list_add_tail(&file_data->flist,
+						&entry->file_list);
+			spin_unlock_irqrestore(&entry->file_list_lock_lhb0,
+						flags);
+		}
+	} else {
+		GLINK_DBG("%s: parent dentry is null for [%s]\n",
+				__func__, name);
+	}
+	return dent;
+}
+EXPORT_SYMBOL(glink_debugfs_create);
+
+/**
+ * glink_debugfs_init() - initialize the glink debugfs directory structure
+ *
+ * Return:	0 in success otherwise appropriate error code
+ *
+ * This function initializes the debugfs directory for glink
+ */
+int glink_debugfs_init(void)
+{
+	struct glink_dbgfs dbgfs;
+
+	/* fake parent name */
+	dent = debugfs_create_dir("glink", NULL);
+	if (IS_ERR_OR_NULL(dent))
+		return PTR_ERR(dent);
+
+	glink_dfs_update_list(dent, NULL, "glink", "root");
+
+	dbgfs.b_dir_create = true;
+	dbgfs.curr_name = "glink";
+	dbgfs.par_name = "root";
+	glink_debugfs_create("xprt", NULL, &dbgfs, NULL, false);
+	glink_debugfs_create("channel", NULL, &dbgfs, NULL, false);
+
+	dbgfs.curr_name = "channel";
+	dbgfs.par_name = "glink";
+	dbgfs.b_dir_create = false;
+	glink_debugfs_create("channels", glink_dfs_create_channel_list,
+				&dbgfs, NULL, false);
+	dbgfs.curr_name = "xprt";
+	glink_debugfs_create("xprts", glink_dfs_create_xprt_list,
+				&dbgfs, NULL, false);
+
+	return 0;
+}
+EXPORT_SYMBOL(glink_debugfs_init);
+
+/**
+ * glink_debugfs_exit() - removes the glink debugfs directory
+ *
+ * This function recursively remove all the debugfs directories
+ * starting from dent
+ */
+void glink_debugfs_exit(void)
+{
+	if (dent != NULL)
+		debugfs_remove_recursive(dent);
+}
+EXPORT_SYMBOL(glink_debugfs_exit);
+#else
+void glink_debugfs_remove_recur(struct glink_dbgfs *dfs) { }
+EXPORT_SYMBOL(glink_debugfs_remove_recur);
+
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+			struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_remove_channel);
+
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+		struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_add_channel);
+
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx) { }
+EXPORT_SYMBOL(glink_debugfs_add_xprt);
+
+int glink_debugfs_init(void) { return 0; }
+EXPORT_SYMBOL(glink_debugfs_init);
+
+void glink_debugfs_exit(void) { }
+EXPORT_SYMBOL(glink_debugfs_exit);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/soc/qcom/glink_loopback_commands.h b/drivers/soc/qcom/glink_loopback_commands.h
new file mode 100644
index 0000000..39620c9
--- /dev/null
+++ b/drivers/soc/qcom/glink_loopback_commands.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _GLINK_LOOPBACK_COMMANDS_H_
+#define _GLINK_LOOPBACK_COMMANDS_H_
+
+#define MAX_NAME_LEN 32
+
+enum request_type {
+	OPEN = 1,
+	CLOSE,
+	QUEUE_RX_INTENT_CONFIG,
+	TX_CONFIG,
+	RX_DONE_CONFIG,
+};
+
+struct req_hdr {
+	uint32_t req_id;
+	uint32_t req_type;
+	uint32_t req_size;
+};
+
+struct open_req {
+	uint32_t delay_ms;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+struct close_req {
+	uint32_t delay_ms;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+struct queue_rx_intent_config_req {
+	uint32_t num_intents;
+	uint32_t intent_size;
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+enum transform_type {
+	NO_TRANSFORM = 0,
+	PACKET_COUNT,
+	CHECKSUM,
+};
+
+struct tx_config_req {
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	uint32_t echo_count;
+	uint32_t transform_type;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+struct rx_done_config_req {
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	uint32_t name_len;
+	char ch_name[MAX_NAME_LEN];
+};
+
+union req_payload {
+	struct open_req open;
+	struct close_req close;
+	struct queue_rx_intent_config_req q_rx_int_conf;
+	struct tx_config_req tx_conf;
+	struct rx_done_config_req rx_done_conf;
+};
+
+struct req {
+	struct req_hdr hdr;
+	union req_payload payload;
+};
+
+struct resp {
+	uint32_t req_id;
+	uint32_t req_type;
+	uint32_t response;
+};
+
+/*
+ * Tracer Packet Event IDs for Loopback Client/Server.
+ * This being a client of G-Link, the tracer packet events start
+ * from 256.
+ */
+enum loopback_tracer_pkt_events {
+	LOOPBACK_SRV_TX = 256,
+	LOOPBACK_SRV_RX = 257,
+	LOOPBACK_CLNT_TX = 258,
+	LOOPBACK_CLNT_RX = 259,
+};
+#endif
diff --git a/drivers/soc/qcom/glink_loopback_server.c b/drivers/soc/qcom/glink_loopback_server.c
new file mode 100644
index 0000000..0aeb0e8
--- /dev/null
+++ b/drivers/soc/qcom/glink_loopback_server.c
@@ -0,0 +1,1297 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/uio.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_loopback_commands.h"
+
+
+/* Number of internal IPC Logging log pages */
+#define GLINK_LBSRV_NUM_LOG_PAGES	3
+
+static void *glink_lbsrv_log_ctx;
+
+#define GLINK_LBSRV_IPC_LOG_STR(x...) do { \
+	if (glink_lbsrv_log_ctx) \
+		ipc_log_string(glink_lbsrv_log_ctx, x); \
+} while (0)
+
+#define LBSRV_INFO(x...) GLINK_LBSRV_IPC_LOG_STR("<LBSRV> " x)
+
+#define LBSRV_ERR(x...) do {                              \
+	pr_err("<LBSRV> " x); \
+	GLINK_LBSRV_IPC_LOG_STR("<LBSRV> " x);  \
+} while (0)
+
+enum ch_type {
+	CTL,
+	DATA,
+};
+
+enum buf_type {
+	LINEAR,
+	VECTOR,
+};
+
+struct tx_config_info {
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	uint32_t echo_count;
+	uint32_t transform_type;
+};
+
+struct rx_done_config_info {
+	uint32_t random_delay;
+	uint32_t delay_ms;
+};
+
+struct rmt_rx_intent_req_work_info {
+	size_t req_intent_size;
+	struct delayed_work work;
+	struct ch_info *work_ch_info;
+};
+
+struct queue_rx_intent_work_info {
+	uint32_t req_id;
+	bool deferred;
+	struct ch_info *req_ch_info;
+	uint32_t num_intents;
+	uint32_t intent_size;
+	uint32_t random_delay;
+	uint32_t delay_ms;
+	struct delayed_work work;
+	struct ch_info *work_ch_info;
+};
+
+struct lbsrv_vec {
+	uint32_t num_bufs;
+	struct kvec vec[0];
+};
+
+struct tx_work_info {
+	struct tx_config_info tx_config;
+	struct delayed_work work;
+	struct ch_info *tx_ch_info;
+	void *data;
+	bool tracer_pkt;
+	uint32_t buf_type;
+	size_t size;
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
+};
+
+struct rx_done_work_info {
+	struct delayed_work work;
+	struct ch_info *rx_done_ch_info;
+	void *ptr;
+};
+
+struct rx_work_info {
+	struct ch_info *rx_ch_info;
+	void *pkt_priv;
+	void *ptr;
+	bool tracer_pkt;
+	uint32_t buf_type;
+	size_t size;
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
+	struct delayed_work work;
+};
+
+struct ch_info {
+	struct list_head list;
+	struct mutex ch_info_lock;
+	char name[MAX_NAME_LEN];
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	void *handle;
+	bool fully_opened;
+	uint32_t type;
+	struct delayed_work open_work;
+	struct delayed_work close_work;
+	struct tx_config_info tx_config;
+	struct rx_done_config_info rx_done_config;
+	struct queue_rx_intent_work_info *queue_rx_intent_work_info;
+};
+
+struct ctl_ch_info {
+	char name[MAX_NAME_LEN];
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+};
+
+static struct ctl_ch_info ctl_ch_tbl[] = {
+	{"LOCAL_LOOPBACK_SRV", "local", "lloop"},
+	{"LOOPBACK_CTL_APSS", "mpss", "smem"},
+	{"LOOPBACK_CTL_APSS", "lpass", "smem"},
+	{"LOOPBACK_CTL_APSS", "dsps", "smem"},
+	{"LOOPBACK_CTL_APSS", "spss", "mailbox"},
+};
+
+static DEFINE_MUTEX(ctl_ch_list_lock);
+static LIST_HEAD(ctl_ch_list);
+static DEFINE_MUTEX(data_ch_list_lock);
+static LIST_HEAD(data_ch_list);
+
+struct workqueue_struct *glink_lbsrv_wq;
+
+/**
+ * link_state_work_info - Information about work handling link state updates
+ * edge:	Remote subsystem name in the link.
+ * transport:	Name of the transport/link.
+ * link_state:	State of the transport/link.
+ * work:	Reference to the work item.
+ */
+struct link_state_work_info {
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	enum glink_link_state link_state;
+	struct delayed_work work;
+};
+
+static void glink_lbsrv_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv);
+static struct glink_link_info glink_lbsrv_link_info = {
+			NULL, NULL, glink_lbsrv_link_state_cb};
+static void *glink_lbsrv_link_state_notif_handle;
+
+static void glink_lbsrv_open_worker(struct work_struct *work);
+static void glink_lbsrv_close_worker(struct work_struct *work);
+static void glink_lbsrv_rmt_rx_intent_req_worker(struct work_struct *work);
+static void glink_lbsrv_queue_rx_intent_worker(struct work_struct *work);
+static void glink_lbsrv_rx_worker(struct work_struct *work);
+static void glink_lbsrv_rx_done_worker(struct work_struct *work);
+static void glink_lbsrv_tx_worker(struct work_struct *work);
+
+int glink_lbsrv_send_response(void *handle, uint32_t req_id, uint32_t req_type,
+		uint32_t response)
+{
+	struct resp *resp_pkt = kzalloc(sizeof(struct resp), GFP_KERNEL);
+
+	if (!resp_pkt) {
+		LBSRV_ERR("%s: Error allocating response packet\n", __func__);
+		return -ENOMEM;
+	}
+
+	resp_pkt->req_id = req_id;
+	resp_pkt->req_type = req_type;
+	resp_pkt->response = response;
+
+	return glink_tx(handle, (void *)LINEAR, (void *)resp_pkt,
+			sizeof(struct resp), 0);
+}
+
+static uint32_t calc_delay_ms(uint32_t random_delay, uint32_t delay_ms)
+{
+	uint32_t tmp_delay_ms;
+
+	if (random_delay && delay_ms)
+		tmp_delay_ms = prandom_u32() % delay_ms;
+	else if (random_delay)
+		tmp_delay_ms = prandom_u32();
+	else
+		tmp_delay_ms = delay_ms;
+
+	return tmp_delay_ms;
+}
+
+static int create_ch_info(char *name, char *edge, char *transport,
+			  uint32_t type, struct ch_info **ret_ch_info)
+{
+	struct ch_info *tmp_ch_info;
+
+	tmp_ch_info = kzalloc(sizeof(struct ch_info), GFP_KERNEL);
+	if (!tmp_ch_info) {
+		LBSRV_ERR("%s: Error allocation ch_info\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&tmp_ch_info->list);
+	mutex_init(&tmp_ch_info->ch_info_lock);
+	strlcpy(tmp_ch_info->name, name, MAX_NAME_LEN);
+	strlcpy(tmp_ch_info->edge, edge, GLINK_NAME_SIZE);
+	strlcpy(tmp_ch_info->transport, transport, GLINK_NAME_SIZE);
+	tmp_ch_info->type = type;
+	INIT_DELAYED_WORK(&tmp_ch_info->open_work,
+			  glink_lbsrv_open_worker);
+	INIT_DELAYED_WORK(&tmp_ch_info->close_work,
+			  glink_lbsrv_close_worker);
+	tmp_ch_info->tx_config.echo_count = 1;
+
+	if (type == CTL) {
+		mutex_lock(&ctl_ch_list_lock);
+		list_add_tail(&tmp_ch_info->list, &ctl_ch_list);
+		mutex_unlock(&ctl_ch_list_lock);
+	} else if (type == DATA) {
+		mutex_lock(&data_ch_list_lock);
+		list_add_tail(&tmp_ch_info->list, &data_ch_list);
+		mutex_unlock(&data_ch_list_lock);
+	} else {
+		LBSRV_ERR("%s:%s:%s %s: Invalid ch type %d\n", transport,
+				edge, name, __func__, type);
+		kfree(tmp_ch_info);
+		return -EINVAL;
+	}
+	*ret_ch_info = tmp_ch_info;
+	return 0;
+}
+
+struct ch_info *lookup_ch_list(char *name, char *edge, char *transport,
+			       uint32_t type)
+{
+	struct list_head *ch_list;
+	struct mutex *lock;
+	struct ch_info *tmp_ch_info;
+
+	if (type == DATA) {
+		ch_list = &data_ch_list;
+		lock = &data_ch_list_lock;
+	} else if (type == CTL) {
+		ch_list = &ctl_ch_list;
+		lock = &ctl_ch_list_lock;
+	} else {
+		LBSRV_ERR("%s:%s:%s %s: Invalid ch type %d\n", transport,
+			    edge, name, __func__, type);
+		return NULL;
+	}
+
+	mutex_lock(lock);
+	list_for_each_entry(tmp_ch_info, ch_list, list) {
+		if (!strcmp(name, tmp_ch_info->name) &&
+		    !strcmp(edge, tmp_ch_info->edge) &&
+		    !strcmp(transport, tmp_ch_info->transport)) {
+			mutex_unlock(lock);
+			return tmp_ch_info;
+		}
+	}
+	mutex_unlock(lock);
+	return NULL;
+}
+
+int glink_lbsrv_handle_open_req(struct ch_info *rx_ch_info,
+				struct open_req req)
+{
+	struct ch_info *tmp_ch_info;
+	int ret;
+	char name[MAX_NAME_LEN];
+	char *temp;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: delay_ms[%d]\n",
+		   rx_ch_info->transport, rx_ch_info->edge,
+		   name, __func__, req.delay_ms);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (tmp_ch_info)
+		goto queue_open_work;
+
+	ret = create_ch_info(name, rx_ch_info->edge, rx_ch_info->transport,
+			     DATA, &tmp_ch_info);
+	if (ret)
+		return ret;
+queue_open_work:
+	queue_delayed_work(glink_lbsrv_wq, &tmp_ch_info->open_work,
+			   msecs_to_jiffies(req.delay_ms));
+	return 0;
+}
+
+int glink_lbsrv_handle_close_req(struct ch_info *rx_ch_info,
+				 struct close_req req)
+{
+	struct ch_info *tmp_ch_info;
+	char name[MAX_NAME_LEN];
+	char *temp;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: delay_ms[%d]\n",
+		    rx_ch_info->transport, rx_ch_info->edge,
+		    name, __func__, req.delay_ms);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (tmp_ch_info)
+		queue_delayed_work(glink_lbsrv_wq, &tmp_ch_info->close_work,
+				   msecs_to_jiffies(req.delay_ms));
+	return 0;
+}
+
+int glink_lbsrv_handle_queue_rx_intent_config_req(struct ch_info *rx_ch_info,
+			struct queue_rx_intent_config_req req, uint32_t req_id)
+{
+	struct ch_info *tmp_ch_info;
+	struct queue_rx_intent_work_info *tmp_work_info;
+	char name[MAX_NAME_LEN];
+	char *temp;
+	uint32_t delay_ms;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: num_intents[%d] size[%d]\n",
+		   rx_ch_info->transport, rx_ch_info->edge, name, __func__,
+		   req.num_intents, req.intent_size);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (!tmp_ch_info) {
+		LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				name, __func__);
+		return -EINVAL;
+	}
+
+	tmp_work_info = kzalloc(sizeof(struct queue_rx_intent_work_info),
+				GFP_KERNEL);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s: Error allocating work_info\n", __func__);
+		return -ENOMEM;
+	}
+
+	tmp_work_info->req_id = req_id;
+	tmp_work_info->req_ch_info = rx_ch_info;
+	tmp_work_info->num_intents = req.num_intents;
+	tmp_work_info->intent_size = req.intent_size;
+	tmp_work_info->random_delay =  req.random_delay;
+	tmp_work_info->delay_ms = req.delay_ms;
+	INIT_DELAYED_WORK(&tmp_work_info->work,
+			  glink_lbsrv_queue_rx_intent_worker);
+	tmp_work_info->work_ch_info = tmp_ch_info;
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (tmp_ch_info->fully_opened) {
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		delay_ms = calc_delay_ms(tmp_work_info->random_delay,
+					 tmp_work_info->delay_ms);
+		queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
+				   msecs_to_jiffies(delay_ms));
+
+		if (tmp_work_info->random_delay || tmp_work_info->delay_ms)
+			glink_lbsrv_send_response(rx_ch_info->handle, req_id,
+					QUEUE_RX_INTENT_CONFIG, 0);
+	} else {
+		tmp_work_info->deferred = true;
+		tmp_ch_info->queue_rx_intent_work_info = tmp_work_info;
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+
+		glink_lbsrv_send_response(rx_ch_info->handle, req_id,
+				QUEUE_RX_INTENT_CONFIG, 0);
+	}
+
+	return 0;
+}
+
+int glink_lbsrv_handle_tx_config_req(struct ch_info *rx_ch_info,
+				     struct tx_config_req req)
+{
+	struct ch_info *tmp_ch_info;
+	char name[MAX_NAME_LEN];
+	char *temp;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: echo_count[%d] transform[%d]\n",
+		   rx_ch_info->transport, rx_ch_info->edge, name, __func__,
+		   req.echo_count, req.transform_type);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (!tmp_ch_info) {
+		LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				name, __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	tmp_ch_info->tx_config.random_delay = req.random_delay;
+	tmp_ch_info->tx_config.delay_ms = req.delay_ms;
+	tmp_ch_info->tx_config.echo_count = req.echo_count;
+	tmp_ch_info->tx_config.transform_type = req.transform_type;
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	return 0;
+}
+
+int glink_lbsrv_handle_rx_done_config_req(struct ch_info *rx_ch_info,
+					  struct rx_done_config_req req)
+{
+	struct ch_info *tmp_ch_info;
+	char name[MAX_NAME_LEN];
+	char *temp;
+
+	strlcpy(name, req.ch_name, MAX_NAME_LEN);
+	if (!strcmp(rx_ch_info->transport, "lloop")) {
+		temp = strnstr(name, "_CLNT", MAX_NAME_LEN);
+		if (temp)
+			*temp = '\0';
+		strlcat(name, "_SRV", MAX_NAME_LEN);
+	}
+	LBSRV_INFO("%s:%s:%s %s: delay_ms[%d] random_delay[%d]\n",
+		   rx_ch_info->transport, rx_ch_info->edge, name,
+		   __func__, req.delay_ms, req.random_delay);
+	tmp_ch_info = lookup_ch_list(name, rx_ch_info->edge,
+				     rx_ch_info->transport, DATA);
+	if (!tmp_ch_info) {
+		LBSRV_ERR("%s:%s:%s %s: Channel info not found\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				name, __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	tmp_ch_info->rx_done_config.random_delay = req.random_delay;
+	tmp_ch_info->rx_done_config.delay_ms = req.delay_ms;
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	return 0;
+}
+
+/**
+ * glink_lbsrv_handle_req() - Handle the request commands received by clients
+ *
+ * rx_ch_info:	Channel info on which the request is received
+ * pkt:	Request structure received from client
+ *
+ * This function handles the all supported request types received from client
+ * and send the response back to client
+ */
+void glink_lbsrv_handle_req(struct ch_info *rx_ch_info, struct req pkt)
+{
+	int ret;
+
+	LBSRV_INFO("%s:%s:%s %s: Request packet type[%d]:id[%d]\n",
+			rx_ch_info->transport, rx_ch_info->edge,
+			rx_ch_info->name, __func__, pkt.hdr.req_type,
+			pkt.hdr.req_id);
+	switch (pkt.hdr.req_type) {
+	case OPEN:
+		ret = glink_lbsrv_handle_open_req(rx_ch_info,
+						  pkt.payload.open);
+		break;
+	case CLOSE:
+		ret = glink_lbsrv_handle_close_req(rx_ch_info,
+						   pkt.payload.close);
+		break;
+	case QUEUE_RX_INTENT_CONFIG:
+		ret = glink_lbsrv_handle_queue_rx_intent_config_req(
+			rx_ch_info, pkt.payload.q_rx_int_conf, pkt.hdr.req_id);
+		break;
+	case TX_CONFIG:
+		ret = glink_lbsrv_handle_tx_config_req(rx_ch_info,
+						       pkt.payload.tx_conf);
+		break;
+	case RX_DONE_CONFIG:
+		ret = glink_lbsrv_handle_rx_done_config_req(rx_ch_info,
+						pkt.payload.rx_done_conf);
+		break;
+	default:
+		LBSRV_ERR("%s:%s:%s %s: Invalid Request type [%d]\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__, pkt.hdr.req_type);
+		ret = -1;
+		break;
+	}
+
+	if (pkt.hdr.req_type != QUEUE_RX_INTENT_CONFIG)
+		glink_lbsrv_send_response(rx_ch_info->handle, pkt.hdr.req_id,
+				pkt.hdr.req_type, ret);
+}
+
+static void *glink_lbsrv_vbuf_provider(void *iovec, size_t offset,
+				       size_t *buf_size)
+{
+	struct lbsrv_vec *tmp_vec_info = (struct lbsrv_vec *)iovec;
+	uint32_t i;
+	size_t temp_size = 0;
+
+	for (i = 0; i < tmp_vec_info->num_bufs; i++) {
+		temp_size += tmp_vec_info->vec[i].iov_len;
+		if (offset >= temp_size)
+			continue;
+		*buf_size = temp_size - offset;
+		return (void *)tmp_vec_info->vec[i].iov_base +
+			tmp_vec_info->vec[i].iov_len - *buf_size;
+	}
+	*buf_size = 0;
+	return NULL;
+}
+
+static void glink_lbsrv_free_data(void *data, uint32_t buf_type)
+{
+	struct lbsrv_vec *tmp_vec_info;
+	uint32_t i;
+
+	if (buf_type == LINEAR) {
+		kfree(data);
+	} else {
+		tmp_vec_info = (struct lbsrv_vec *)data;
+		for (i = 0; i < tmp_vec_info->num_bufs; i++) {
+			kfree(tmp_vec_info->vec[i].iov_base);
+			tmp_vec_info->vec[i].iov_base = NULL;
+		}
+		kfree(tmp_vec_info);
+	}
+}
+
+static void *copy_linear_data(struct rx_work_info *tmp_rx_work_info)
+{
+	char *data;
+	struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+
+	data = kmalloc(tmp_rx_work_info->size, GFP_KERNEL);
+	if (data)
+		memcpy(data, tmp_rx_work_info->ptr, tmp_rx_work_info->size);
+	else
+		LBSRV_ERR("%s:%s:%s %s: Error allocating the data\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+	return data;
+}
+
+static void *copy_vector_data(struct rx_work_info *tmp_rx_work_info)
+{
+	uint32_t num_bufs = 0;
+	struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+	struct lbsrv_vec *tmp_vec_info;
+	void *buf, *pbuf, *dest_buf;
+	size_t offset = 0;
+	size_t buf_size;
+	uint32_t i;
+
+	do {
+		if (tmp_rx_work_info->vbuf_provider)
+			buf = tmp_rx_work_info->vbuf_provider(
+				tmp_rx_work_info->ptr, offset, &buf_size);
+		else
+			buf = tmp_rx_work_info->pbuf_provider(
+				tmp_rx_work_info->ptr, offset, &buf_size);
+		if (!buf)
+			break;
+		offset += buf_size;
+		num_bufs++;
+	} while (buf);
+
+	tmp_vec_info = kzalloc(sizeof(*tmp_vec_info) +
+			       num_bufs * sizeof(struct kvec), GFP_KERNEL);
+	if (!tmp_vec_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating vector info\n",
+			  rx_ch_info->transport, rx_ch_info->edge,
+			  rx_ch_info->name, __func__);
+		return NULL;
+	}
+	tmp_vec_info->num_bufs = num_bufs;
+
+	offset = 0;
+	for (i = 0; i < num_bufs; i++) {
+		if (tmp_rx_work_info->vbuf_provider) {
+			buf = tmp_rx_work_info->vbuf_provider(
+				tmp_rx_work_info->ptr, offset, &buf_size);
+		} else {
+			pbuf = tmp_rx_work_info->pbuf_provider(
+				tmp_rx_work_info->ptr, offset, &buf_size);
+			buf = phys_to_virt((unsigned long)pbuf);
+		}
+		dest_buf = kmalloc(buf_size, GFP_KERNEL);
+		if (!dest_buf) {
+			LBSRV_ERR("%s:%s:%s %s: Error allocating data\n",
+				  rx_ch_info->transport, rx_ch_info->edge,
+				  rx_ch_info->name, __func__);
+			goto out_copy_vector_data;
+		}
+		memcpy(dest_buf, buf, buf_size);
+		tmp_vec_info->vec[i].iov_base = dest_buf;
+		tmp_vec_info->vec[i].iov_len = buf_size;
+		offset += buf_size;
+	}
+	return tmp_vec_info;
+out_copy_vector_data:
+	glink_lbsrv_free_data((void *)tmp_vec_info, VECTOR);
+	return NULL;
+}
+
+static void *glink_lbsrv_copy_data(struct rx_work_info *tmp_rx_work_info)
+{
+	if (tmp_rx_work_info->buf_type == LINEAR)
+		return copy_linear_data(tmp_rx_work_info);
+	else
+		return copy_vector_data(tmp_rx_work_info);
+}
+
+static int glink_lbsrv_handle_data(struct rx_work_info *tmp_rx_work_info)
+{
+	void *data;
+	int ret;
+	struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+	struct tx_work_info *tmp_tx_work_info;
+	struct rx_done_work_info *tmp_rx_done_work_info;
+	uint32_t delay_ms;
+
+	data = glink_lbsrv_copy_data(tmp_rx_work_info);
+	if (!data) {
+		ret = -ENOMEM;
+		goto out_handle_data;
+	}
+
+	tmp_rx_done_work_info = kmalloc(sizeof(struct rx_done_work_info),
+					GFP_KERNEL);
+	if (!tmp_rx_done_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_done_work_info\n",
+			  rx_ch_info->transport, rx_ch_info->edge,
+			  rx_ch_info->name, __func__);
+		glink_lbsrv_free_data(data, tmp_rx_work_info->buf_type);
+		ret = -ENOMEM;
+		goto out_handle_data;
+	}
+	INIT_DELAYED_WORK(&tmp_rx_done_work_info->work,
+			  glink_lbsrv_rx_done_worker);
+	tmp_rx_done_work_info->rx_done_ch_info = rx_ch_info;
+	tmp_rx_done_work_info->ptr = tmp_rx_work_info->ptr;
+	delay_ms = calc_delay_ms(rx_ch_info->rx_done_config.random_delay,
+				 rx_ch_info->rx_done_config.delay_ms);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_rx_done_work_info->work,
+			   msecs_to_jiffies(delay_ms));
+
+	tmp_tx_work_info = kmalloc(sizeof(struct tx_work_info), GFP_KERNEL);
+	if (!tmp_tx_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating tx_work_info\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+		glink_lbsrv_free_data(data, tmp_rx_work_info->buf_type);
+		return -ENOMEM;
+	}
+	mutex_lock(&rx_ch_info->ch_info_lock);
+	tmp_tx_work_info->tx_config.random_delay =
+					rx_ch_info->tx_config.random_delay;
+	tmp_tx_work_info->tx_config.delay_ms = rx_ch_info->tx_config.delay_ms;
+	tmp_tx_work_info->tx_config.echo_count =
+					rx_ch_info->tx_config.echo_count;
+	tmp_tx_work_info->tx_config.transform_type =
+					rx_ch_info->tx_config.transform_type;
+	mutex_unlock(&rx_ch_info->ch_info_lock);
+	INIT_DELAYED_WORK(&tmp_tx_work_info->work, glink_lbsrv_tx_worker);
+	tmp_tx_work_info->tx_ch_info = rx_ch_info;
+	tmp_tx_work_info->data = data;
+	tmp_tx_work_info->tracer_pkt = tmp_rx_work_info->tracer_pkt;
+	tmp_tx_work_info->buf_type = tmp_rx_work_info->buf_type;
+	tmp_tx_work_info->size = tmp_rx_work_info->size;
+	if (tmp_tx_work_info->buf_type == VECTOR)
+		tmp_tx_work_info->vbuf_provider = glink_lbsrv_vbuf_provider;
+	else
+		tmp_tx_work_info->vbuf_provider = NULL;
+	tmp_tx_work_info->pbuf_provider = NULL;
+	delay_ms = calc_delay_ms(tmp_tx_work_info->tx_config.random_delay,
+				 tmp_tx_work_info->tx_config.delay_ms);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_tx_work_info->work,
+			   msecs_to_jiffies(delay_ms));
+	return 0;
+out_handle_data:
+	glink_rx_done(rx_ch_info->handle, tmp_rx_work_info->ptr, false);
+	return ret;
+}
+
+void glink_lpbsrv_notify_rx(void *handle, const void *priv,
+			    const void *pkt_priv, const void *ptr, size_t size)
+{
+	struct rx_work_info *tmp_work_info;
+	struct ch_info *rx_ch_info = (struct ch_info *)priv;
+
+	LBSRV_INFO(
+		"%s:%s:%s %s: end (Success) RX priv[%p] data[%p] size[%zu]\n",
+		rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
+		__func__, pkt_priv, (char *)ptr, size);
+	tmp_work_info = kzalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+		return;
+	}
+
+	tmp_work_info->rx_ch_info = rx_ch_info;
+	tmp_work_info->pkt_priv = (void *)pkt_priv;
+	tmp_work_info->ptr = (void *)ptr;
+	tmp_work_info->buf_type = LINEAR;
+	tmp_work_info->size = size;
+	INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+}
+
+void glink_lpbsrv_notify_rxv(void *handle, const void *priv,
+	const void *pkt_priv, void *ptr, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size))
+{
+	struct rx_work_info *tmp_work_info;
+	struct ch_info *rx_ch_info = (struct ch_info *)priv;
+
+	LBSRV_INFO("%s:%s:%s %s: priv[%p] data[%p] size[%zu]\n",
+		   rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
+		   __func__, pkt_priv, (char *)ptr, size);
+	tmp_work_info = kzalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+		return;
+	}
+
+	tmp_work_info->rx_ch_info = rx_ch_info;
+	tmp_work_info->pkt_priv = (void *)pkt_priv;
+	tmp_work_info->ptr = (void *)ptr;
+	tmp_work_info->buf_type = VECTOR;
+	tmp_work_info->size = size;
+	tmp_work_info->vbuf_provider = vbuf_provider;
+	tmp_work_info->pbuf_provider = pbuf_provider;
+	INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+}
+
+void glink_lpbsrv_notify_rx_tp(void *handle, const void *priv,
+			    const void *pkt_priv, const void *ptr, size_t size)
+{
+	struct rx_work_info *tmp_work_info;
+	struct ch_info *rx_ch_info = (struct ch_info *)priv;
+
+	LBSRV_INFO(
+		"%s:%s:%s %s: end (Success) RX priv[%p] data[%p] size[%zu]\n",
+		rx_ch_info->transport, rx_ch_info->edge, rx_ch_info->name,
+		__func__, pkt_priv, (char *)ptr, size);
+	tracer_pkt_log_event((void *)ptr, LOOPBACK_SRV_RX);
+	tmp_work_info = kmalloc(sizeof(struct rx_work_info), GFP_ATOMIC);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__);
+		return;
+	}
+
+	tmp_work_info->rx_ch_info = rx_ch_info;
+	tmp_work_info->pkt_priv = (void *)pkt_priv;
+	tmp_work_info->ptr = (void *)ptr;
+	tmp_work_info->tracer_pkt = true;
+	tmp_work_info->buf_type = LINEAR;
+	tmp_work_info->size = size;
+	INIT_DELAYED_WORK(&tmp_work_info->work, glink_lbsrv_rx_worker);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+}
+
+void glink_lpbsrv_notify_tx_done(void *handle, const void *priv,
+				 const void *pkt_priv, const void *ptr)
+{
+	struct ch_info *tx_done_ch_info = (struct ch_info *)priv;
+
+	LBSRV_INFO("%s:%s:%s %s: end (Success) TX_DONE ptr[%p]\n",
+			tx_done_ch_info->transport, tx_done_ch_info->edge,
+			tx_done_ch_info->name, __func__, ptr);
+
+	if (pkt_priv != (const void *)0xFFFFFFFF)
+		glink_lbsrv_free_data((void *)ptr,
+				(uint32_t)(uintptr_t)pkt_priv);
+}
+
+void glink_lpbsrv_notify_state(void *handle, const void *priv,
+				unsigned int event)
+{
+	int ret;
+	uint32_t delay_ms;
+	struct ch_info *tmp_ch_info = (struct ch_info *)priv;
+	struct queue_rx_intent_work_info *tmp_work_info = NULL;
+
+	LBSRV_INFO("%s:%s:%s %s: event[%d]\n",
+			tmp_ch_info->transport, tmp_ch_info->edge,
+			tmp_ch_info->name, __func__, event);
+	if (tmp_ch_info->type == CTL) {
+		if (event == GLINK_CONNECTED) {
+			ret = glink_queue_rx_intent(handle,
+					priv, sizeof(struct req));
+			LBSRV_INFO(
+				"%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
+				tmp_ch_info->transport,
+				tmp_ch_info->edge,
+				tmp_ch_info->name,
+				__func__, sizeof(struct req), ret);
+		} else if (event == GLINK_LOCAL_DISCONNECTED) {
+			queue_delayed_work(glink_lbsrv_wq,
+					&tmp_ch_info->open_work,
+					msecs_to_jiffies(0));
+		} else if (event == GLINK_REMOTE_DISCONNECTED)
+			if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
+				queue_delayed_work(glink_lbsrv_wq,
+					&tmp_ch_info->close_work, 0);
+	} else if (tmp_ch_info->type == DATA) {
+
+		if (event == GLINK_CONNECTED) {
+			mutex_lock(&tmp_ch_info->ch_info_lock);
+			tmp_ch_info->fully_opened = true;
+			tmp_work_info = tmp_ch_info->queue_rx_intent_work_info;
+			tmp_ch_info->queue_rx_intent_work_info = NULL;
+			mutex_unlock(&tmp_ch_info->ch_info_lock);
+
+			if (tmp_work_info) {
+				delay_ms = calc_delay_ms(
+						tmp_work_info->random_delay,
+						tmp_work_info->delay_ms);
+				queue_delayed_work(glink_lbsrv_wq,
+						&tmp_work_info->work,
+						msecs_to_jiffies(delay_ms));
+			}
+		} else if (event == GLINK_LOCAL_DISCONNECTED ||
+			event == GLINK_REMOTE_DISCONNECTED) {
+
+			mutex_lock(&tmp_ch_info->ch_info_lock);
+			tmp_ch_info->fully_opened = false;
+			/*
+			* If the state has changed to LOCAL_DISCONNECTED,
+			* the channel has been fully closed and can now be
+			* re-opened. If the handle value is -EBUSY, an earlier
+			* open request failed because the channel was in the
+			* process of closing. Requeue the work from the open
+			* request.
+			*/
+			if (event == GLINK_LOCAL_DISCONNECTED &&
+				tmp_ch_info->handle == ERR_PTR(-EBUSY)) {
+				queue_delayed_work(glink_lbsrv_wq,
+				&tmp_ch_info->open_work,
+				msecs_to_jiffies(0));
+			}
+			if (event == GLINK_REMOTE_DISCONNECTED)
+				if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
+					queue_delayed_work(
+					glink_lbsrv_wq,
+					&tmp_ch_info->close_work, 0);
+			mutex_unlock(&tmp_ch_info->ch_info_lock);
+		}
+	}
+}
+
+bool glink_lpbsrv_rmt_rx_intent_req_cb(void *handle, const void *priv,
+				       size_t sz)
+{
+	struct rmt_rx_intent_req_work_info *tmp_work_info;
+	struct ch_info *tmp_ch_info = (struct ch_info *)priv;
+
+	LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT to receive size[%zu]\n",
+		   tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
+		   __func__, sz);
+
+	tmp_work_info = kmalloc(sizeof(struct rmt_rx_intent_req_work_info),
+				GFP_ATOMIC);
+	if (!tmp_work_info) {
+		LBSRV_ERR("%s:%s:%s %s: Error allocating rx_work\n",
+				tmp_ch_info->transport, tmp_ch_info->edge,
+				tmp_ch_info->name, __func__);
+		return false;
+	}
+	tmp_work_info->req_intent_size = sz;
+	tmp_work_info->work_ch_info = tmp_ch_info;
+
+	INIT_DELAYED_WORK(&tmp_work_info->work,
+			  glink_lbsrv_rmt_rx_intent_req_worker);
+	queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work, 0);
+	return true;
+}
+
+void glink_lpbsrv_notify_rx_sigs(void *handle, const void *priv,
+			uint32_t old_sigs, uint32_t new_sigs)
+{
+	LBSRV_INFO(" %s old_sigs[0x%x] New_sigs[0x%x]\n",
+				__func__, old_sigs, new_sigs);
+	glink_sigs_set(handle, new_sigs);
+}
+
+static void glink_lbsrv_rx_worker(struct work_struct *work)
+{
+	struct delayed_work *rx_work = to_delayed_work(work);
+	struct rx_work_info *tmp_rx_work_info =
+		container_of(rx_work, struct rx_work_info, work);
+	struct ch_info *rx_ch_info = tmp_rx_work_info->rx_ch_info;
+	struct req request_pkt;
+	int ret;
+
+	if (rx_ch_info->type == CTL) {
+		request_pkt = *((struct req *)tmp_rx_work_info->ptr);
+		glink_rx_done(rx_ch_info->handle, tmp_rx_work_info->ptr, false);
+		ret = glink_queue_rx_intent(rx_ch_info->handle, rx_ch_info,
+					    sizeof(struct req));
+		LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
+				rx_ch_info->transport, rx_ch_info->edge,
+				rx_ch_info->name, __func__,
+				sizeof(struct req), ret);
+		glink_lbsrv_handle_req(rx_ch_info, request_pkt);
+	} else {
+		ret = glink_lbsrv_handle_data(tmp_rx_work_info);
+	}
+	kfree(tmp_rx_work_info);
+}
+
+static void glink_lbsrv_open_worker(struct work_struct *work)
+{
+	struct delayed_work *open_work = to_delayed_work(work);
+	struct ch_info *tmp_ch_info =
+		container_of(open_work, struct ch_info, open_work);
+	struct glink_open_config open_cfg;
+
+	LBSRV_INFO("%s: glink_loopback_server_init\n", __func__);
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (!IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		return;
+	}
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+	open_cfg.transport = tmp_ch_info->transport;
+	open_cfg.edge = tmp_ch_info->edge;
+	open_cfg.name = tmp_ch_info->name;
+
+	open_cfg.notify_rx = glink_lpbsrv_notify_rx;
+	if (tmp_ch_info->type == DATA)
+		open_cfg.notify_rxv = glink_lpbsrv_notify_rxv;
+	open_cfg.notify_tx_done = glink_lpbsrv_notify_tx_done;
+	open_cfg.notify_state = glink_lpbsrv_notify_state;
+	open_cfg.notify_rx_intent_req = glink_lpbsrv_rmt_rx_intent_req_cb;
+	open_cfg.notify_rx_sigs = glink_lpbsrv_notify_rx_sigs;
+	open_cfg.notify_rx_abort = NULL;
+	open_cfg.notify_tx_abort = NULL;
+	open_cfg.notify_rx_tracer_pkt = glink_lpbsrv_notify_rx_tp;
+	open_cfg.priv = tmp_ch_info;
+
+	tmp_ch_info->handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+		LBSRV_ERR("%s:%s:%s %s: unable to open channel\n",
+			  open_cfg.transport, open_cfg.edge, open_cfg.name,
+			  __func__);
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		return;
+	}
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	LBSRV_INFO("%s:%s:%s %s: Open complete\n", open_cfg.transport,
+			open_cfg.edge, open_cfg.name, __func__);
+}
+
+static void glink_lbsrv_close_worker(struct work_struct *work)
+{
+	struct delayed_work *close_work = to_delayed_work(work);
+	struct ch_info *tmp_ch_info =
+		container_of(close_work, struct ch_info, close_work);
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (!IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+		glink_close(tmp_ch_info->handle);
+		tmp_ch_info->handle = NULL;
+	}
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	LBSRV_INFO("%s:%s:%s %s: Close complete\n", tmp_ch_info->transport,
+			tmp_ch_info->edge, tmp_ch_info->name, __func__);
+}
+
+static void glink_lbsrv_rmt_rx_intent_req_worker(struct work_struct *work)
+{
+
+	struct delayed_work *rmt_rx_intent_req_work = to_delayed_work(work);
+	struct rmt_rx_intent_req_work_info *tmp_work_info =
+		container_of(rmt_rx_intent_req_work,
+			struct rmt_rx_intent_req_work_info, work);
+	struct ch_info *tmp_ch_info = tmp_work_info->work_ch_info;
+	int ret;
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		LBSRV_ERR("%s:%s:%s %s: Invalid CH handle\n",
+				  tmp_ch_info->transport,
+				  tmp_ch_info->edge,
+				  tmp_ch_info->name, __func__);
+		kfree(tmp_work_info);
+		return;
+	}
+	ret = glink_queue_rx_intent(tmp_ch_info->handle,
+			(void *)tmp_ch_info, tmp_work_info->req_intent_size);
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	LBSRV_INFO("%s:%s:%s %s: QUEUE RX INTENT size[%zu] ret[%d]\n",
+		   tmp_ch_info->transport, tmp_ch_info->edge,
+		   tmp_ch_info->name, __func__, tmp_work_info->req_intent_size,
+		   ret);
+	if (ret < 0) {
+		LBSRV_ERR("%s:%s:%s %s: Err %d q'ing intent size %zu\n",
+			  tmp_ch_info->transport, tmp_ch_info->edge,
+			  tmp_ch_info->name, __func__, ret,
+			  tmp_work_info->req_intent_size);
+	}
+	kfree(tmp_work_info);
+}
+
+static void glink_lbsrv_queue_rx_intent_worker(struct work_struct *work)
+{
+	struct delayed_work *queue_rx_intent_work = to_delayed_work(work);
+	struct queue_rx_intent_work_info *tmp_work_info =
+		container_of(queue_rx_intent_work,
+			struct queue_rx_intent_work_info, work);
+	struct ch_info *tmp_ch_info = tmp_work_info->work_ch_info;
+	int ret;
+	uint32_t delay_ms;
+
+	while (1) {
+		mutex_lock(&tmp_ch_info->ch_info_lock);
+		if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+			mutex_unlock(&tmp_ch_info->ch_info_lock);
+			return;
+		}
+
+		ret = glink_queue_rx_intent(tmp_ch_info->handle,
+			(void *)tmp_ch_info, tmp_work_info->intent_size);
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		if (ret < 0) {
+			LBSRV_ERR("%s:%s:%s %s: Err %d q'ing intent size %d\n",
+				  tmp_ch_info->transport, tmp_ch_info->edge,
+				  tmp_ch_info->name, __func__, ret,
+				  tmp_work_info->intent_size);
+			kfree(tmp_work_info);
+			return;
+		}
+		LBSRV_INFO("%s:%s:%s %s: Queued rx intent of size %d\n",
+			   tmp_ch_info->transport, tmp_ch_info->edge,
+			   tmp_ch_info->name, __func__,
+			   tmp_work_info->intent_size);
+		tmp_work_info->num_intents--;
+		if (!tmp_work_info->num_intents)
+			break;
+
+		delay_ms = calc_delay_ms(tmp_work_info->random_delay,
+					 tmp_work_info->delay_ms);
+		if (delay_ms) {
+			queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
+					   msecs_to_jiffies(delay_ms));
+			return;
+		}
+	}
+	LBSRV_INFO("%s:%s:%s %s: Queued all intents. size:%d\n",
+		   tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
+		   __func__, tmp_work_info->intent_size);
+
+	if (!tmp_work_info->deferred && !tmp_work_info->random_delay &&
+			!tmp_work_info->delay_ms)
+		glink_lbsrv_send_response(tmp_work_info->req_ch_info->handle,
+				tmp_work_info->req_id, QUEUE_RX_INTENT_CONFIG,
+				0);
+	kfree(tmp_work_info);
+}
+
+static void glink_lbsrv_rx_done_worker(struct work_struct *work)
+{
+	struct delayed_work *rx_done_work = to_delayed_work(work);
+	struct rx_done_work_info *tmp_work_info =
+		container_of(rx_done_work, struct rx_done_work_info, work);
+	struct ch_info *tmp_ch_info = tmp_work_info->rx_done_ch_info;
+
+	mutex_lock(&tmp_ch_info->ch_info_lock);
+	if (!IS_ERR_OR_NULL(tmp_ch_info->handle))
+		glink_rx_done(tmp_ch_info->handle, tmp_work_info->ptr, false);
+	mutex_unlock(&tmp_ch_info->ch_info_lock);
+	kfree(tmp_work_info);
+}
+
+static void glink_lbsrv_tx_worker(struct work_struct *work)
+{
+	struct delayed_work *tx_work = to_delayed_work(work);
+	struct tx_work_info *tmp_work_info =
+		container_of(tx_work, struct tx_work_info, work);
+	struct ch_info *tmp_ch_info = tmp_work_info->tx_ch_info;
+	int ret;
+	uint32_t delay_ms;
+	uint32_t flags;
+
+	LBSRV_INFO("%s:%s:%s %s: start TX data[%p] size[%zu]\n",
+		   tmp_ch_info->transport, tmp_ch_info->edge, tmp_ch_info->name,
+		   __func__, tmp_work_info->data, tmp_work_info->size);
+	while (1) {
+		mutex_lock(&tmp_ch_info->ch_info_lock);
+		if (IS_ERR_OR_NULL(tmp_ch_info->handle)) {
+			mutex_unlock(&tmp_ch_info->ch_info_lock);
+			return;
+		}
+
+		flags = 0;
+		if (tmp_work_info->tracer_pkt) {
+			flags |= GLINK_TX_TRACER_PKT;
+			tracer_pkt_log_event(tmp_work_info->data,
+					     LOOPBACK_SRV_TX);
+		}
+		if (tmp_work_info->buf_type == LINEAR)
+			ret = glink_tx(tmp_ch_info->handle,
+			       (tmp_work_info->tx_config.echo_count > 1 ?
+					(void *)0xFFFFFFFF :
+					(void *)(uintptr_t)
+						tmp_work_info->buf_type),
+			       (void *)tmp_work_info->data,
+			       tmp_work_info->size, flags);
+		else
+			ret = glink_txv(tmp_ch_info->handle,
+				(tmp_work_info->tx_config.echo_count > 1 ?
+					(void *)0xFFFFFFFF :
+					(void *)(uintptr_t)
+						tmp_work_info->buf_type),
+				(void *)tmp_work_info->data,
+				tmp_work_info->size,
+				tmp_work_info->vbuf_provider,
+				tmp_work_info->pbuf_provider,
+				flags);
+		mutex_unlock(&tmp_ch_info->ch_info_lock);
+		if (ret < 0 && ret != -EAGAIN) {
+			LBSRV_ERR("%s:%s:%s %s: TX Error %d\n",
+					tmp_ch_info->transport,
+					tmp_ch_info->edge,
+					tmp_ch_info->name, __func__, ret);
+			glink_lbsrv_free_data(tmp_work_info->data,
+					      tmp_work_info->buf_type);
+			kfree(tmp_work_info);
+			return;
+		}
+		if (ret != -EAGAIN)
+			tmp_work_info->tx_config.echo_count--;
+		if (!tmp_work_info->tx_config.echo_count)
+			break;
+
+		delay_ms = calc_delay_ms(tmp_work_info->tx_config.random_delay,
+					 tmp_work_info->tx_config.delay_ms);
+		if (delay_ms) {
+			queue_delayed_work(glink_lbsrv_wq, &tmp_work_info->work,
+					   msecs_to_jiffies(delay_ms));
+			return;
+		}
+	}
+	kfree(tmp_work_info);
+}
+
+/**
+ * glink_lbsrv_link_state_worker() - Function to handle link state updates
+ * work:	Pointer to the work item in the link_state_work_info.
+ *
+ * This worker function is scheduled when there is a link state update. Since
+ * the loopback server registers for all transports, it receives all link state
+ * updates about all transports that get registered in the system.
+ */
+static void glink_lbsrv_link_state_worker(struct work_struct *work)
+{
+	struct delayed_work *ls_work = to_delayed_work(work);
+	struct link_state_work_info *ls_info =
+		container_of(ls_work, struct link_state_work_info, work);
+	struct ch_info *tmp_ch_info;
+
+	if (ls_info->link_state == GLINK_LINK_STATE_UP) {
+		LBSRV_INFO("%s: LINK_STATE_UP %s:%s\n",
+			  __func__, ls_info->edge, ls_info->transport);
+		mutex_lock(&ctl_ch_list_lock);
+		list_for_each_entry(tmp_ch_info, &ctl_ch_list, list) {
+			if (strcmp(tmp_ch_info->edge, ls_info->edge) ||
+			    strcmp(tmp_ch_info->transport, ls_info->transport))
+				continue;
+			queue_delayed_work(glink_lbsrv_wq,
+					   &tmp_ch_info->open_work, 0);
+		}
+		mutex_unlock(&ctl_ch_list_lock);
+	} else if (ls_info->link_state == GLINK_LINK_STATE_DOWN) {
+		LBSRV_INFO("%s: LINK_STATE_DOWN %s:%s\n",
+			  __func__, ls_info->edge, ls_info->transport);
+
+	}
+	kfree(ls_info);
+}
+
+/**
+ * glink_lbsrv_link_state_cb() - Callback to receive link state updates
+ * cb_info:	Information containing link & its state.
+ * priv:	Private data passed during the link state registration.
+ *
+ * This function is called by the GLINK core to notify the loopback server
+ * regarding the link state updates. This function is registered with the
+ * GLINK core by the loopback server during glink_register_link_state_cb().
+ */
+static void glink_lbsrv_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct link_state_work_info *ls_info;
+
+	if (!cb_info)
+		return;
+
+	LBSRV_INFO("%s: %s:%s\n", __func__, cb_info->edge, cb_info->transport);
+	ls_info = kmalloc(sizeof(*ls_info), GFP_KERNEL);
+	if (!ls_info) {
+		LBSRV_ERR("%s: Error allocating link state info\n", __func__);
+		return;
+	}
+
+	strlcpy(ls_info->edge, cb_info->edge, GLINK_NAME_SIZE);
+	strlcpy(ls_info->transport, cb_info->transport, GLINK_NAME_SIZE);
+	ls_info->link_state = cb_info->link_state;
+	INIT_DELAYED_WORK(&ls_info->work, glink_lbsrv_link_state_worker);
+	queue_delayed_work(glink_lbsrv_wq, &ls_info->work, 0);
+}
+
+static int glink_loopback_server_init(void)
+{
+	int i;
+	int ret;
+	struct ch_info *tmp_ch_info;
+
+	glink_lbsrv_log_ctx = ipc_log_context_create(GLINK_LBSRV_NUM_LOG_PAGES,
+							"glink_lbsrv", 0);
+	if (!glink_lbsrv_log_ctx)
+		pr_err("%s: unable to create log context\n", __func__);
+
+	glink_lbsrv_wq = create_singlethread_workqueue("glink_lbsrv");
+	if (!glink_lbsrv_wq) {
+		LBSRV_ERR("%s: Error creating glink_lbsrv_wq\n", __func__);
+		return -EFAULT;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(ctl_ch_tbl); i++) {
+		ret = create_ch_info(ctl_ch_tbl[i].name, ctl_ch_tbl[i].edge,
+				     ctl_ch_tbl[i].transport, CTL,
+				     &tmp_ch_info);
+		if (ret < 0) {
+			LBSRV_ERR("%s: Error creating ctl ch index %d\n",
+				__func__, i);
+			continue;
+		}
+	}
+	glink_lbsrv_link_state_notif_handle = glink_register_link_state_cb(
+						&glink_lbsrv_link_info, NULL);
+	return 0;
+}
+
+module_init(glink_loopback_server_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) Loopback Server");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
new file mode 100644
index 0000000..c837bd8
--- /dev/null
+++ b/drivers/soc/qcom/glink_private.h
@@ -0,0 +1,1099 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_PRIVATE_H_
+#define _SOC_QCOM_GLINK_PRIVATE_H_
+
+#include <linux/bug.h>
+#include <linux/completion.h>
+#include <linux/dcache.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/ratelimit.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <soc/qcom/glink.h>
+
+struct glink_core_xprt_ctx;
+struct channel_ctx;
+enum transport_state_e;
+enum local_channel_state_e;
+
+/* Logging Macros */
+enum {
+	QCOM_GLINK_INFO = 1U << 0,
+	QCOM_GLINK_DEBUG = 1U << 1,
+	QCOM_GLINK_GPIO = 1U << 2,
+	QCOM_GLINK_PERF = 1U << 3,
+};
+
+enum glink_dbgfs_ss {
+	GLINK_DBGFS_MPSS,
+	GLINK_DBGFS_APSS,
+	GLINK_DBGFS_LPASS,
+	GLINK_DBGFS_DSPS,
+	GLINK_DBGFS_RPM,
+	GLINK_DBGFS_WCNSS,
+	GLINK_DBGFS_LLOOP,
+	GLINK_DBGFS_MOCK,
+	GLINK_DBGFS_MAX_NUM_SUBS
+};
+
+enum glink_dbgfs_xprt {
+	GLINK_DBGFS_SMEM,
+	GLINK_DBGFS_SMD,
+	GLINK_DBGFS_XLLOOP,
+	GLINK_DBGFS_XMOCK,
+	GLINK_DBGFS_XMOCK_LOW,
+	GLINK_DBGFS_XMOCK_HIGH,
+	GLINK_DBGFS_MAX_NUM_XPRTS
+};
+
+struct glink_dbgfs {
+	const char *curr_name;
+	const char *par_name;
+	bool b_dir_create;
+};
+
+struct glink_dbgfs_data {
+	struct list_head flist;
+	struct dentry *dent;
+	void (*o_func)(struct seq_file *s);
+	void *priv_data;
+	bool b_priv_free_req;
+};
+
+struct xprt_ctx_iterator {
+	struct list_head *xprt_list;
+	struct glink_core_xprt_ctx *i_curr;
+	unsigned long xprt_list_flags;
+};
+
+struct ch_ctx_iterator {
+	struct list_head *ch_list;
+	struct channel_ctx *i_curr;
+	unsigned long ch_list_flags;
+};
+
+struct glink_ch_intent_info {
+	spinlock_t *li_lst_lock;
+	struct list_head *li_avail_list;
+	struct list_head *li_used_list;
+	spinlock_t *ri_lst_lock;
+	struct list_head *ri_list;
+};
+
+/* Tracer Packet Event IDs for G-Link */
+enum glink_tracer_pkt_events {
+	GLINK_CORE_TX = 1,
+	GLINK_QUEUE_TO_SCHEDULER = 2,
+	GLINK_SCHEDULER_TX = 3,
+	GLINK_XPRT_TX = 4,
+	GLINK_XPRT_RX = 5,
+	GLINK_CORE_RX = 6,
+};
+
+/**
+ * glink_get_ss_enum_string() - get the name of the subsystem based on enum
+ *				value
+ * @enum_id:	enum id of a specific subsystem.
+ *
+ * Return: name of the subsystem, NULL in case of invalid input
+ */
+const char *glink_get_ss_enum_string(unsigned int enum_id);
+
+/**
+ * glink_get_xprt_enum_string() - get the name of the transport based on enum
+ *					value
+ * @enum_id:	enum id of a specific transport.
+ *
+ * Return: name of the transport, NULL in case of invalid input
+ */
+const char *glink_get_xprt_enum_string(unsigned int enum_id);
+
+/**
+ * glink_get_xprt_state_string() - get the name of the transport based on enum
+ *					value
+ * @enum_id:	enum id of the state of the transport.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state_string(enum transport_state_e enum_id);
+
+/**
+ * glink_get_ch_state_string() - get the name of the transport based on enum
+ *					value
+ * @enum_id:	enum id of a specific state of the channel.
+ *
+ * Return: name of the channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_state_string(enum local_channel_state_e enum_id);
+
+#define GLINK_IPC_LOG_STR(x...) do { \
+	if (glink_get_log_ctx()) \
+		ipc_log_string(glink_get_log_ctx(), x); \
+} while (0)
+
+#define GLINK_DBG(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+		GLINK_IPC_LOG_STR(x);  \
+} while (0)
+
+#define GLINK_INFO(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+		GLINK_IPC_LOG_STR(x);  \
+} while (0)
+
+#define GLINK_INFO_PERF(x...) do {                              \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+		GLINK_IPC_LOG_STR(x);  \
+} while (0)
+
+#define GLINK_PERF(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+		GLINK_IPC_LOG_STR("<PERF> " x);  \
+} while (0)
+
+#define GLINK_UT_ERR(x...) do {                              \
+	if (!(glink_get_debug_mask() & QCOM_GLINK_PERF)) \
+		pr_err("<UT> " x); \
+	GLINK_IPC_LOG_STR("<UT> " x);  \
+} while (0)
+
+#define GLINK_UT_DBG(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+		GLINK_IPC_LOG_STR("<UT> " x);  \
+} while (0)
+
+#define GLINK_UT_INFO(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+		GLINK_IPC_LOG_STR("<UT> " x);  \
+} while (0)
+
+#define GLINK_UT_INFO_PERF(x...) do {                              \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+		GLINK_IPC_LOG_STR("<UT> " x);  \
+} while (0)
+
+#define GLINK_UT_PERF(x...) do {                              \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+		GLINK_IPC_LOG_STR("<PERF> " x);  \
+} while (0)
+
+#define GLINK_XPRT_IPC_LOG_STR(xprt, x...) do { \
+	if (glink_get_xprt_log_ctx(xprt)) \
+		ipc_log_string(glink_get_xprt_log_ctx(xprt), x); \
+} while (0)
+
+#define GLINK_XPRT_IF_INFO(xprt_if, x...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+		GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, "<XPRT> " x); \
+} while (0)
+
+#define GLINK_XPRT_IF_DBG(xprt_if, x...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+		GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, "<XPRT> " x); \
+} while (0)
+
+#define GLINK_XPRT_IF_ERR(xprt_if, x...) do { \
+	pr_err("<XPRT> " x); \
+	GLINK_XPRT_IPC_LOG_STR(xprt_if.glink_core_priv, "<XPRT> " x); \
+} while (0)
+
+#define GLINK_PERF_XPRT(xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+		GLINK_XPRT_IPC_LOG_STR(xprt, "<PERF> %s:%s " fmt, \
+					xprt->name, xprt->edge, args);  \
+} while (0)
+
+#define GLINK_PERF_CH(ctx, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+		GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+				"<PERF> %s:%s:%s[%u:%u] " fmt, \
+				ctx->transport_ptr->name, \
+				ctx->transport_ptr->edge, \
+				ctx->name, \
+				ctx->lcid, \
+				ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_PERF) \
+		GLINK_XPRT_IPC_LOG_STR(xprt, \
+				"<PERF> %s:%s:%s[%u:%u] " fmt, \
+				xprt->name, \
+				xprt->edge, \
+				ctx->name, \
+				ctx->lcid, \
+				ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_INFO_PERF_XPRT(xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+		GLINK_XPRT_IPC_LOG_STR(xprt, "<CORE> %s:%s " fmt, \
+				xprt->name, xprt->edge, args);  \
+} while (0)
+
+#define GLINK_INFO_PERF_CH(ctx, fmt, args...) do { \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+		GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+				"<CORE> %s:%s:%s[%u:%u] " fmt, \
+				ctx->transport_ptr->name, \
+				ctx->transport_ptr->edge, \
+				ctx->name, \
+				ctx->lcid, \
+				ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_INFO_PERF_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & (QCOM_GLINK_INFO | QCOM_GLINK_PERF)) \
+		GLINK_XPRT_IPC_LOG_STR(xprt,\
+				"<CORE> %s:%s:%s[%u:%u] " fmt, \
+				xprt->name, \
+				xprt->edge, \
+				ctx->name, \
+				ctx->lcid, \
+				ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_INFO_XPRT(xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+		GLINK_XPRT_IPC_LOG_STR(xprt, "<CORE> %s:%s " fmt, \
+				xprt->name, xprt->edge, args);  \
+} while (0)
+
+#define GLINK_INFO_CH(ctx, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+		GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+				"<CORE> %s:%s:%s[%u:%u] " fmt, \
+				ctx->transport_ptr->name, \
+				ctx->transport_ptr->edge, \
+				ctx->name, \
+				ctx->lcid, \
+				ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_INFO) \
+		GLINK_XPRT_IPC_LOG_STR(xprt, \
+				"<CORE> %s:%s:%s[%u:%u] " fmt, \
+				xprt->name, \
+				xprt->edge, \
+				ctx->name, \
+				ctx->lcid, \
+				ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_DBG_XPRT(xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+		GLINK_XPRT_IPC_LOG_STR(xprt, "<CORE> %s:%s " fmt, \
+				xprt->name, xprt->edge, args);  \
+} while (0)
+
+#define GLINK_DBG_CH(ctx, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+		GLINK_XPRT_IPC_LOG_STR(ctx->transport_ptr, \
+				"<CORE> %s:%s:%s[%u:%u] " fmt, \
+				ctx->transport_ptr->name, \
+				ctx->transport_ptr->edge, \
+				ctx->name, \
+				ctx->lcid, \
+				ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_DBG_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	if (glink_get_debug_mask() & QCOM_GLINK_DEBUG) \
+		GLINK_XPRT_IPC_LOG_STR(xprt, \
+				"<CORE> %s:%s:%s[%u:%u] " fmt, \
+				xprt->name, \
+				xprt->edge, \
+				ctx->name, \
+				ctx->lcid, \
+				ctx->rcid, args);  \
+} while (0)
+
+#define GLINK_ERR(x...) do {                              \
+	pr_err_ratelimited("<CORE> " x); \
+	GLINK_IPC_LOG_STR("<CORE> " x);  \
+} while (0)
+
+#define GLINK_ERR_XPRT(xprt, fmt, args...) do { \
+	pr_err_ratelimited("<CORE> %s:%s " fmt, \
+		xprt->name, xprt->edge, args);  \
+	GLINK_INFO_XPRT(xprt, fmt, args); \
+} while (0)
+
+#define GLINK_ERR_CH(ctx, fmt, args...) do { \
+	pr_err_ratelimited("<CORE> %s:%s:%s[%u:%u] " fmt, \
+		ctx->transport_ptr->name, \
+		ctx->transport_ptr->edge, \
+		ctx->name, \
+		ctx->lcid, \
+		ctx->rcid, args);  \
+	GLINK_INFO_CH(ctx, fmt, args); \
+} while (0)
+
+#define GLINK_ERR_CH_XPRT(ctx, xprt, fmt, args...) do { \
+	pr_err_ratelimited("<CORE> %s:%s:%s[%u:%u] " fmt, \
+		xprt->name, \
+		xprt->edge, \
+		ctx->name, \
+		ctx->lcid, \
+		ctx->rcid, args);  \
+	GLINK_INFO_CH_XPRT(ctx, xprt, fmt, args); \
+} while (0)
+
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * type:	type to check for overflow
+ * a:	left value to use
+ * b:	right value to use
+ * returns:	true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+	(((type)~0 - (a)) < (b) ? true : false)
+
+/**
+ * glink_get_debug_mask() - Return debug mask attribute
+ *
+ * Return: debug mask attribute
+ */
+unsigned int glink_get_debug_mask(void);
+
+/**
+ * glink_get_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_log_ctx(void);
+
+/**
+ * glink_get_xprt_log_ctx() - Return log context for other GLINK modules.
+ *
+ * Return: Log context or NULL if none.
+ */
+void *glink_get_xprt_log_ctx(struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_get_channel_id_for_handle() - Get logical channel ID
+ *
+ * @handle:	handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return:  Logical Channel ID or standard Linux error code
+ */
+int glink_get_channel_id_for_handle(void *handle);
+
+/**
+ * glink_get_channel_name_for_handle() - return channel name
+ *
+ * @handle:	handle of channel
+ *
+ * Used internally by G-Link debugfs.
+ *
+ * Return:  Channel name or NULL
+ */
+char *glink_get_channel_name_for_handle(void *handle);
+
+/**
+ * glink_debugfs_init() - initialize glink debugfs directory
+ *
+ * Return: error code or success.
+ */
+int glink_debugfs_init(void);
+
+/**
+ * glink_debugfs_exit() - removes glink debugfs directory
+ */
+void glink_debugfs_exit(void);
+
+/**
+ * glink_debugfs_create() - create the debugfs file
+ * @name:	debugfs file name
+ * @show:	pointer to the actual function which will be invoked upon
+ *		opening this file.
+ * @dir:	pointer to a structure debugfs_dir
+ * @dbgfs_data: pointer to any private data need to be associated with debugfs
+ * @b_free_req: boolean value to decide to free the memory associated with
+ *		@dbgfs_data during deletion of the file
+ *
+ * Return:	pointer to the file/directory created, NULL in case of error
+ *
+ * This function checks which directory will be used to create the debugfs file
+ * and calls glink_dfs_create_file. Anybody who intend to allocate some memory
+ * for the dbgfs_data and required to free it in deletion, need to set
+ * b_free_req to true. Otherwise, there will be a memory leak.
+ */
+struct dentry *glink_debugfs_create(const char *name,
+		void (*show)(struct seq_file *),
+		struct glink_dbgfs *dir, void *dbgfs_data, bool b_free_req);
+
+/**
+ * glink_debugfs_remove_recur() - remove the the directory & files recursively
+ * @rm_dfs:	pointer to the structure glink_dbgfs
+ *
+ * This function removes the files & directories. This also takes care of
+ * freeing any memory associated with the debugfs file.
+ */
+void glink_debugfs_remove_recur(struct glink_dbgfs *dfs);
+
+/**
+ * glink_debugfs_remove_channel() - remove all channel specific files & folder
+ *					in debugfs when channel is fully closed
+ * @ch_ctx:		pointer to the channel_contenxt
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when any channel is fully closed. It removes the
+ * folders & other files in debugfs for that channel.
+ */
+void glink_debugfs_remove_channel(struct channel_ctx *ch_ctx,
+			struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_debugfs_add_channel() - create channel specific files & folder in
+ *				 debugfs when channel is added
+ * @ch_ctx:		pointer to the channel_contenxt
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when a new channel is created. It creates the
+ * folders & other files in debugfs for that channel
+ */
+void glink_debugfs_add_channel(struct channel_ctx *ch_ctx,
+		struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_debugfs_add_xprt() - create transport specific files & folder in
+ *			      debugfs when new transport is registered
+ * @xprt_ctx:		pointer to the transport_context
+ *
+ * This function is invoked when a new transport is registered. It creates the
+ * folders & other files in debugfs for that transport
+ */
+void glink_debugfs_add_xprt(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_xprt_ctx_iterator_init() - Initializes the transport context list
+ *					iterator
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * Return: None
+ *
+ * This function acquires the transport context lock which must then be
+ * released by glink_xprt_ctx_iterator_end()
+ */
+void glink_xprt_ctx_iterator_init(struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_xprt_ctx_iterator_end() - Ends the transport context list iteration
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * Return: None
+ */
+void glink_xprt_ctx_iterator_end(struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_xprt_ctx_iterator_next() - iterates element by element in transport
+ *					context list
+ * @xprt_i:	pointer to the transport context iterator.
+ *
+ * Return: pointer to the transport context structure
+ */
+struct glink_core_xprt_ctx *glink_xprt_ctx_iterator_next(
+			struct xprt_ctx_iterator *xprt_i);
+
+/**
+ * glink_get_xprt_name() - get the transport name
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: name of the transport
+ */
+char  *glink_get_xprt_name(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_edge_name() - get the name of the remote processor/edge
+ *				of the transport
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: name of the remote processor/edge
+ */
+char *glink_get_xprt_edge_name(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_state() - get the state of the transport
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: name of the transport state, NULL in case of invalid input
+ */
+const char *glink_get_xprt_state(struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_get_xprt_version_features() - get the version and feature set
+ *					of local transport in glink
+ * @xprt_ctx:	pointer to the transport context.
+ *
+ * Return: pointer to the glink_core_version
+ */
+const struct glink_core_version *glink_get_xprt_version_features(
+			struct glink_core_xprt_ctx *xprt_ctx);
+
+/**
+ * glink_ch_ctx_iterator_init() - Initializes the channel context list iterator
+ * @ch_iter:	pointer to the channel context iterator.
+ * @xprt:       pointer to the transport context that holds the channel list
+ *
+ * This function acquires the channel context lock which must then be
+ * released by glink_ch_ctx_iterator_end()
+ */
+void  glink_ch_ctx_iterator_init(struct ch_ctx_iterator *ch_iter,
+			struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_ch_ctx_iterator_end() - Ends the channel context list iteration
+ * @ch_iter:	pointer to the channel context iterator.
+ *
+ */
+void glink_ch_ctx_iterator_end(struct ch_ctx_iterator *ch_iter,
+				struct glink_core_xprt_ctx *xprt);
+
+/**
+ * glink_ch_ctx_iterator_next() - iterates element by element in channel
+ *					context list
+ * @c_i:	pointer to the channel context iterator.
+ *
+ * Return: pointer to the channel context structure
+ */
+struct channel_ctx *glink_ch_ctx_iterator_next(struct ch_ctx_iterator *ch_iter);
+
+/**
+ * glink_get_ch_name() - get the channel name
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the channel, NULL in case of invalid input
+ */
+char *glink_get_ch_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_edge_name() - get the name of the remote processor/edge
+ *				of the channel
+ * @xprt_ctx:	pointer to the channel context.
+ *
+ * Return: name of the remote processor/edge
+ */
+char *glink_get_ch_edge_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lcid(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rcid() - get the remote channel ID
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: remote channel id, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rcid(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_lstate() - get the local channel state
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the local channel state, NULL in case of invalid input
+ */
+const char *glink_get_ch_lstate(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rstate() - get the remote channel state
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: true if remote side is opened false otherwise
+ */
+bool glink_get_ch_rstate(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_xprt_name() - get the name of the transport to which
+ *				the channel belongs
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: name of the export, NULL in case of invalid input
+ */
+char *glink_get_ch_xprt_name(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_tx_pkt_count() - get the total number of packets sent
+ *				through this channel
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of packets transmitted, -EINVAL in case of invalid input
+ */
+int glink_get_ch_tx_pkt_count(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rx_pkt_count() - get the total number of packets
+ *				received at this channel
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of packets received, -EINVAL in case of invalid input
+ */
+int glink_get_ch_rx_pkt_count(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_lintents_queued() - get the total number of intents queued
+ *				at local side
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of intents queued, -EINVAL in case of invalid input
+ */
+int glink_get_ch_lintents_queued(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_rintents_queued() - get the total number of intents queued
+ *				from remote side
+ * @ch_ctx:	pointer to the channel context.
+ *
+ * Return: number of intents queued
+ */
+int glink_get_ch_rintents_queued(struct channel_ctx *ch_ctx);
+
+/**
+ * glink_get_ch_intent_info() - get the intent details of a channel
+ * @ch_ctx:	pointer to the channel context.
+ * @ch_ctx_i:   pointer to a structure that will contain intent details
+ *
+ * This function is used to get all the channel intent details including locks.
+ */
+void glink_get_ch_intent_info(struct channel_ctx *ch_ctx,
+			struct glink_ch_intent_info *ch_ctx_i);
+
+/**
+ * enum ssr_command - G-Link SSR protocol commands
+ */
+enum ssr_command {
+	GLINK_SSR_DO_CLEANUP,
+	GLINK_SSR_CLEANUP_DONE,
+};
+
+/**
+ * struct ssr_notify_data - Contains private data used for client notifications
+ *                          from G-Link.
+ * tx_done:		Indicates whether or not the tx_done notification has
+ *			been received.
+ * event:		The state notification event received.
+ * responded:		Indicates whether or not a cleanup_done response was
+ *			received.
+ * edge:		The G-Link edge name for the channel associated with
+ *			this callback data
+ * do_cleanup_data:	Structure containing the G-Link SSR do_cleanup message.
+ */
+struct ssr_notify_data {
+	bool tx_done;
+	unsigned int event;
+	bool responded;
+	const char *edge;
+	struct do_cleanup_msg *do_cleanup_data;
+};
+
+/**
+ * struct subsys_info - Subsystem info structure
+ * ssr_name:		name of the subsystem recognized by the SSR framework
+ * edge:		name of the G-Link edge
+ * xprt:		name of the G-Link transport
+ * handle:		glink_ssr channel used for this subsystem
+ * link_state_handle:	link state handle for this edge, used to unregister
+ *			from receiving link state callbacks
+ * link_info:		Transport info used in link state callback registration
+ * cb_data:		Private callback data structure for notification
+ *			functions
+ * subsystem_list_node:	used to chain this structure in a list of subsystem
+ *			info structures
+ * notify_list:		list of subsys_info_leaf structures, containing the
+ *			subsystems to notify if this subsystem undergoes SSR
+ * notify_list_len:	length of notify_list
+ * link_up:		Flag indicating whether transport is up or not
+ * link_up_lock:	Lock for protecting the link_up flag
+ */
+struct subsys_info {
+	const char *ssr_name;
+	const char *edge;
+	const char *xprt;
+	void *handle;
+	void *link_state_handle;
+	struct glink_link_info *link_info;
+	struct ssr_notify_data *cb_data;
+	struct list_head subsystem_list_node;
+	struct list_head notify_list;
+	int notify_list_len;
+	bool link_up;
+	spinlock_t link_up_lock;
+};
+
+/**
+ * struct subsys_info_leaf - Subsystem info leaf structure (a subsystem on the
+ *                           notify list of a subsys_info structure)
+ * ssr_name:	Name of the subsystem recognized by the SSR framework
+ * edge:	Name of the G-Link edge
+ * xprt:	Name of the G-Link transport
+ * restarted:	Indicates whether a restart has been triggered for this edge
+ * cb_data:	Private callback data structure for notification functions
+ * notify_list_node:	used to chain this structure in the notify list
+ */
+struct subsys_info_leaf {
+	const char *ssr_name;
+	const char *edge;
+	const char *xprt;
+	bool restarted;
+	struct ssr_notify_data *cb_data;
+	struct list_head notify_list_node;
+};
+
+/**
+ * struct do_cleanup_msg - The data structure for an SSR do_cleanup message
+ * version:	The G-Link SSR protocol version
+ * command:	The G-Link SSR command - do_cleanup
+ * seq_num:	Sequence number
+ * name_len:	Length of the name of the subsystem being restarted
+ * name:	G-Link edge name of the subsystem being restarted
+ */
+struct do_cleanup_msg {
+	uint32_t version;
+	uint32_t command;
+	uint32_t seq_num;
+	uint32_t name_len;
+	char name[32];
+};
+
+/**
+ * struct cleanup_done_msg - The data structure for an SSR cleanup_done message
+ * version:	The G-Link SSR protocol version
+ * response:	The G-Link SSR response to a do_cleanup command, cleanup_done
+ * seq_num:	Sequence number
+ */
+struct cleanup_done_msg {
+	uint32_t version;
+	uint32_t response;
+	uint32_t seq_num;
+};
+
+/**
+ * get_info_for_subsystem() - Retrieve information about a subsystem from the
+ *                            global subsystem_info_list
+ * @subsystem:	The name of the subsystem recognized by the SSR
+ *		framework
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ *         NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_subsystem(const char *subsystem);
+
+/**
+ * get_info_for_edge() - Retrieve information about a subsystem from the
+ *                       global subsystem_info_list
+ * @edge:	The name of the edge recognized by G-Link
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ *         NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_edge(const char *edge);
+
+/**
+ * glink_ssr_get_seq_num() - Get the current SSR sequence number
+ *
+ * Return: The current SSR sequence number
+ */
+uint32_t glink_ssr_get_seq_num(void);
+
+/*
+ * glink_ssr() - SSR cleanup function.
+ *
+ * Return: Standard error code.
+ */
+int glink_ssr(const char *subsystem);
+
+/**
+ * notify for subsystem() - Notify other subsystems that a subsystem is being
+ *                          restarted
+ * @ss_info:	Subsystem info structure for the subsystem being restarted
+ *
+ * This function sends notifications to affected subsystems that the subsystem
+ * in ss_info is being restarted, and waits for the cleanup done response from
+ * all of those subsystems. It also initiates any local cleanup that is
+ * necessary.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+int notify_for_subsystem(struct subsys_info *ss_info);
+
+/**
+ * glink_ssr_wait_cleanup_done() - Get the value of the
+ *                                 notifications_successful flag in glink_ssr.
+ * @timeout_multiplier: timeout multiplier for waiting on all processors
+ *
+ *
+ * Return: True if cleanup_done received from all processors, false otherwise
+ */
+bool glink_ssr_wait_cleanup_done(unsigned int ssr_timeout_multiplier);
+
+struct channel_lcid {
+	struct list_head list_node;
+	uint32_t lcid;
+};
+
+/**
+ * struct rwref_lock - Read/Write Reference Lock
+ *
+ * kref:	reference count
+ * read_count:	number of readers that own the lock
+ * write_count:	number of writers (max 1) that own the lock
+ * count_zero:	used for internal signaling for non-atomic locks
+ *
+ * A Read/Write Reference Lock is a combination of a read/write spinlock and a
+ * reference count.  The main difference is that no locks are held in the
+ * critical section and the lifetime of the object is guaranteed.
+ *
+ * Read Locking
+ * Multiple readers may access the lock at any given time and a read lock will
+ * also ensure that the object exists for the life of the lock.
+ *
+ * rwref_read_get()
+ *     use resource in "critical section" (no locks are held)
+ * rwref_read_put()
+ *
+ * Write Locking
+ * A single writer may access the lock at any given time and a write lock will
+ * also ensure that the object exists for the life of the lock.
+ *
+ * rwref_write_get()
+ *     use resource in "critical section" (no locks are held)
+ * rwref_write_put()
+ *
+ * Reference Lock
+ * To ensure the lifetime of the lock (and not affect the read or write lock),
+ * a simple reference can be done.  By default, rwref_lock_init() will set the
+ * reference count to 1.
+ *
+ * rwref_lock_init()  Reference count is 1
+ * rwref_get()        Reference count is 2
+ * rwref_put()        Reference count is 1
+ * rwref_put()        Reference count goes to 0 and object is destroyed
+ */
+struct rwref_lock {
+	struct kref kref;
+	unsigned int read_count;
+	unsigned int write_count;
+	spinlock_t lock;
+	wait_queue_head_t count_zero;
+
+	void (*release)(struct rwref_lock *);
+};
+
+/**
+ * rwref_lock_release() - Initialize rwref_lock
+ * lock_ptr:	pointer to lock structure
+ */
+static inline void rwref_lock_release(struct kref *kref_ptr)
+{
+	struct rwref_lock *lock_ptr;
+
+	if (WARN_ON(kref_ptr == NULL))
+		return;
+
+	lock_ptr = container_of(kref_ptr, struct rwref_lock, kref);
+	if (lock_ptr->release)
+		lock_ptr->release(lock_ptr);
+}
+
+/**
+ * rwref_lock_init() - Initialize rwref_lock
+ * lock_ptr:	pointer to lock structure
+ * release:	release function called when reference count goes to 0
+ */
+static inline void rwref_lock_init(struct rwref_lock *lock_ptr,
+		void (*release)(struct rwref_lock *))
+{
+	if (WARN_ON(lock_ptr == NULL))
+		return;
+
+	kref_init(&lock_ptr->kref);
+	lock_ptr->read_count = 0;
+	lock_ptr->write_count = 0;
+	spin_lock_init(&lock_ptr->lock);
+	init_waitqueue_head(&lock_ptr->count_zero);
+	lock_ptr->release = release;
+}
+
+/**
+ * rwref_get() - gains a reference count for the object
+ * lock_ptr:	pointer to lock structure
+ */
+static inline void rwref_get(struct rwref_lock *lock_ptr)
+{
+	if (WARN_ON(lock_ptr == NULL))
+		return;
+
+	kref_get(&lock_ptr->kref);
+}
+
+/**
+ * rwref_put() - puts a reference count for the object
+ * lock_ptr:	pointer to lock structure
+ *
+ * If the reference count goes to zero, the release function is called.
+ */
+static inline void rwref_put(struct rwref_lock *lock_ptr)
+{
+	if (WARN_ON(lock_ptr == NULL))
+		return;
+
+	kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+/**
+ * rwref_read_get_atomic() - gains a reference count for a read operation
+ * lock_ptr:	pointer to lock structure
+ * is_atomic:   if True, do not wait when acquiring lock
+ *
+ * Multiple readers may acquire the lock as long as the write count is zero.
+ */
+static inline void rwref_read_get_atomic(struct rwref_lock *lock_ptr,
+			bool is_atomic)
+{
+	unsigned long flags;
+
+	if (WARN_ON(lock_ptr == NULL))
+		return;
+
+	kref_get(&lock_ptr->kref);
+	while (1) {
+		spin_lock_irqsave(&lock_ptr->lock, flags);
+		if (lock_ptr->write_count == 0) {
+			lock_ptr->read_count++;
+			spin_unlock_irqrestore(&lock_ptr->lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&lock_ptr->lock, flags);
+		if (!is_atomic) {
+			wait_event(lock_ptr->count_zero,
+					lock_ptr->write_count == 0);
+		}
+	}
+}
+
+/**
+ * rwref_read_get() - gains a reference count for a read operation
+ * lock_ptr:	pointer to lock structure
+ *
+ * Multiple readers may acquire the lock as long as the write count is zero.
+ */
+static inline void rwref_read_get(struct rwref_lock *lock_ptr)
+{
+	rwref_read_get_atomic(lock_ptr, false);
+}
+
+/**
+ * rwref_read_put() - returns a reference count for a read operation
+ * lock_ptr:	pointer to lock structure
+ *
+ * Must be preceded by a call to rwref_read_get().
+ */
+static inline void rwref_read_put(struct rwref_lock *lock_ptr)
+{
+	unsigned long flags;
+
+	if (WARN_ON(lock_ptr == NULL))
+		return;
+
+	spin_lock_irqsave(&lock_ptr->lock, flags);
+	if (WARN_ON(lock_ptr->read_count == 0)) {
+		spin_unlock_irqrestore(&lock_ptr->lock, flags);
+		return;
+	}
+	if (--lock_ptr->read_count == 0)
+		wake_up(&lock_ptr->count_zero);
+	spin_unlock_irqrestore(&lock_ptr->lock, flags);
+	kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+/**
+ * rwref_write_get_atomic() - gains a reference count for a write operation
+ * lock_ptr:	pointer to lock structure
+ * is_atomic:   if True, do not wait when acquiring lock
+ *
+ * Only one writer may acquire the lock as long as the reader count is zero.
+ */
+static inline void rwref_write_get_atomic(struct rwref_lock *lock_ptr,
+			bool is_atomic)
+{
+	unsigned long flags;
+
+	if (WARN_ON(lock_ptr == NULL))
+		return;
+
+	kref_get(&lock_ptr->kref);
+	while (1) {
+		spin_lock_irqsave(&lock_ptr->lock, flags);
+		if (lock_ptr->read_count == 0 && lock_ptr->write_count == 0) {
+			lock_ptr->write_count++;
+			spin_unlock_irqrestore(&lock_ptr->lock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&lock_ptr->lock, flags);
+		if (!is_atomic) {
+			wait_event(lock_ptr->count_zero,
+					(lock_ptr->read_count == 0 &&
+					lock_ptr->write_count == 0));
+		}
+	}
+}
+
+/**
+ * rwref_write_get() - gains a reference count for a write operation
+ * lock_ptr:	pointer to lock structure
+ *
+ * Only one writer may acquire the lock as long as the reader count is zero.
+ */
+static inline void rwref_write_get(struct rwref_lock *lock_ptr)
+{
+	rwref_write_get_atomic(lock_ptr, false);
+}
+
+/**
+ * rwref_write_put() - returns a reference count for a write operation
+ * lock_ptr:	pointer to lock structure
+ *
+ * Must be preceded by a call to rwref_write_get().
+ */
+static inline void rwref_write_put(struct rwref_lock *lock_ptr)
+{
+	unsigned long flags;
+
+	if (WARN_ON(lock_ptr == NULL))
+		return;
+
+	spin_lock_irqsave(&lock_ptr->lock, flags);
+	if (WARN_ON(lock_ptr->write_count != 1)) {
+		spin_unlock_irqrestore(&lock_ptr->lock, flags);
+		return;
+	}
+	if (--lock_ptr->write_count == 0)
+		wake_up(&lock_ptr->count_zero);
+	spin_unlock_irqrestore(&lock_ptr->lock, flags);
+	kref_put(&lock_ptr->kref, rwref_lock_release);
+}
+
+#endif /* _SOC_QCOM_GLINK_PRIVATE_H_ */
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
new file mode 100644
index 0000000..ef886b2
--- /dev/null
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -0,0 +1,3079 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ipc_logging.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/sizes.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/tracer_pkt.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+#define XPRT_NAME "smem"
+#define FIFO_FULL_RESERVE 8
+#define FIFO_ALIGNMENT 8
+#define TX_BLOCKED_CMD_RESERVE 8 /* size of struct read_notif_request */
+#define SMEM_CH_DESC_SIZE 32
+#define RPM_TOC_ID 0x67727430
+#define RPM_TX_FIFO_ID 0x61703272
+#define RPM_RX_FIFO_ID 0x72326170
+#define RPM_TOC_SIZE 256
+#define RPM_MAX_TOC_ENTRIES 20
+#define RPM_FIFO_ADDR_ALIGN_BYTES 3
+#define TRACER_PKT_FEATURE BIT(2)
+
+/**
+ * enum command_types - definition of the types of commands sent/received
+ * @VERSION_CMD:		Version and feature set supported
+ * @VERSION_ACK_CMD:		Response for @VERSION_CMD
+ * @OPEN_CMD:			Open a channel
+ * @CLOSE_CMD:			Close a channel
+ * @OPEN_ACK_CMD:		Response to @OPEN_CMD
+ * @RX_INTENT_CMD:		RX intent for a channel was queued
+ * @RX_DONE_CMD:		Use of RX intent for a channel is complete
+ * @RX_INTENT_REQ_CMD:		Request to have RX intent queued
+ * @RX_INTENT_REQ_ACK_CMD:	Response for @RX_INTENT_REQ_CMD
+ * @TX_DATA_CMD:		Start of a data transfer
+ * @ZERO_COPY_TX_DATA_CMD:	Start of a data transfer with zero copy
+ * @CLOSE_ACK_CMD:		Response for @CLOSE_CMD
+ * @TX_DATA_CONT_CMD:		Continuation or end of a data transfer
+ * @READ_NOTIF_CMD:		Request for a notification when this cmd is read
+ * @RX_DONE_W_REUSE_CMD:	Same as @RX_DONE but also reuse the used intent
+ * @SIGNALS_CMD:		Sideband signals
+ * @TRACER_PKT_CMD:		Start of a Tracer Packet Command
+ * @TRACER_PKT_CONT_CMD:	Continuation or end of a Tracer Packet Command
+ */
+enum command_types {
+	VERSION_CMD,
+	VERSION_ACK_CMD,
+	OPEN_CMD,
+	CLOSE_CMD,
+	OPEN_ACK_CMD,
+	RX_INTENT_CMD,
+	RX_DONE_CMD,
+	RX_INTENT_REQ_CMD,
+	RX_INTENT_REQ_ACK_CMD,
+	TX_DATA_CMD,
+	ZERO_COPY_TX_DATA_CMD,
+	CLOSE_ACK_CMD,
+	TX_DATA_CONT_CMD,
+	READ_NOTIF_CMD,
+	RX_DONE_W_REUSE_CMD,
+	SIGNALS_CMD,
+	TRACER_PKT_CMD,
+	TRACER_PKT_CONT_CMD,
+};
+
+/**
+ * struct channel_desc - description of a channel fifo with a remote entity
+ * @read_index:		The read index for the fifo where data should be
+ *			consumed from.
+ * @write_index:	The write index for the fifo where data should produced
+ *			to.
+ *
+ * This structure resides in SMEM and contains the control information for the
+ * fifo data pipes of the channel.  There is one physical channel between us
+ * and a remote entity.
+ */
+struct channel_desc {
+	uint32_t read_index;
+	uint32_t write_index;
+};
+
+/**
+ * struct mailbox_config_info - description of a mailbox tranposrt channel
+ * @tx_read_index:	Offset into the tx fifo where data should be read from.
+ * @tx_write_index:	Offset into the tx fifo where new data will be placed.
+ * @tx_size:		Size of the transmit fifo in bytes.
+ * @rx_read_index:	Offset into the rx fifo where data should be read from.
+ * @rx_write_index:	Offset into the rx fifo where new data will be placed.
+ * @rx_size:		Size of the receive fifo in bytes.
+ * @fifo:		The fifos for the channel.
+ */
+struct mailbox_config_info {
+	uint32_t tx_read_index;
+	uint32_t tx_write_index;
+	uint32_t tx_size;
+	uint32_t rx_read_index;
+	uint32_t rx_write_index;
+	uint32_t rx_size;
+	char fifo[]; /* tx fifo, then rx fifo */
+};
+
+/**
+ * struct edge_info - local information for managing a single complete edge
+ * @xprt_if:			The transport interface registered with the
+ *				glink core associated with this edge.
+ * @xprt_cfg:			The transport configuration for the glink core
+ *				assocaited with this edge.
+ * @intentless:			True if this edge runs in intentless mode.
+ * @irq_disabled:		Flag indicating the whether interrupt is enabled
+ *				or disabled.
+ * @remote_proc_id:		The SMEM processor id for the remote side.
+ * @rx_reset_reg:		Reference to the register to reset the rx irq
+ *				line, if applicable.
+ * @out_irq_reg:		Reference to the register to send an irq to the
+ *				remote side.
+ * @out_irq_mask:		Mask written to @out_irq_reg to trigger the
+ *				correct irq.
+ * @irq_line:			The incoming interrupt line.
+ * @tx_irq_count:		Number of interrupts triggered.
+ * @rx_irq_count:		Number of interrupts received.
+ * @tx_ch_desc:			Reference to the channel description structure
+ *				for tx in SMEM for this edge.
+ * @rx_ch_desc:			Reference to the channel description structure
+ *				for rx in SMEM for this edge.
+ * @tx_fifo:			Reference to the transmit fifo in SMEM.
+ * @rx_fifo:			Reference to the receive fifo in SMEM.
+ * @tx_fifo_size:		Total size of @tx_fifo.
+ * @rx_fifo_size:		Total size of @rx_fifo.
+ * @read_from_fifo:		Memcpy for this edge.
+ * @write_to_fifo:		Memcpy for this edge.
+ * @write_lock:			Lock to serialize access to @tx_fifo.
+ * @tx_blocked_queue:		Queue of entities waiting for the remote side to
+ *				signal @tx_fifo has flushed and is now empty.
+ * @tx_resume_needed:		A tx resume signal needs to be sent to the glink
+ *				core once the remote side indicates @tx_fifo has
+ *				flushed.
+ * @tx_blocked_signal_sent:	Flag to indicate the flush signal has already
+ *				been sent, and a response is pending from the
+ *				remote side.  Protected by @write_lock.
+ * @kwork:			Work to be executed when an irq is received.
+ * @kworker:			Handle to the entity processing of
+				deferred commands.
+ * @tasklet			Handle to tasklet to process incoming data
+				packets in atomic manner.
+ * @task:			Handle to the task context used to run @kworker.
+ * @use_ref:			Active uses of this transport use this to grab
+ *				a reference.  Used for ssr synchronization.
+ * @in_ssr:			Signals if this transport is in ssr.
+ * @rx_lock:			Used to serialize concurrent instances of rx
+ *				processing.
+ * @deferred_cmds:		List of deferred commands that need to be
+ *				processed in process context.
+ * @num_pw_states:		Size of @ramp_time_us.
+ * @ramp_time_us:		Array of ramp times in microseconds where array
+ *				index position represents a power state.
+ * @mailbox:			Mailbox transport channel description reference.
+ */
+struct edge_info {
+	struct glink_transport_if xprt_if;
+	struct glink_core_transport_cfg xprt_cfg;
+	bool intentless;
+	bool irq_disabled;
+	uint32_t remote_proc_id;
+	void __iomem *rx_reset_reg;
+	void __iomem *out_irq_reg;
+	uint32_t out_irq_mask;
+	uint32_t irq_line;
+	uint32_t tx_irq_count;
+	uint32_t rx_irq_count;
+	struct channel_desc *tx_ch_desc;
+	struct channel_desc *rx_ch_desc;
+	void __iomem *tx_fifo;
+	void __iomem *rx_fifo;
+	uint32_t tx_fifo_size;
+	uint32_t rx_fifo_size;
+	void * (*read_from_fifo)(void *dest, const void *src, size_t num_bytes);
+	void * (*write_to_fifo)(void *dest, const void *src, size_t num_bytes);
+	spinlock_t write_lock;
+	wait_queue_head_t tx_blocked_queue;
+	bool tx_resume_needed;
+	bool tx_blocked_signal_sent;
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct tasklet_struct tasklet;
+	struct srcu_struct use_ref;
+	bool in_ssr;
+	spinlock_t rx_lock;
+	struct list_head deferred_cmds;
+	uint32_t num_pw_states;
+	unsigned long *ramp_time_us;
+	struct mailbox_config_info *mailbox;
+};
+
+/**
+ * struct deferred_cmd - description of a command to be processed later
+ * @list_node:	Used to put this command on a list in the edge.
+ * @id:		ID of the command.
+ * @param1:	Parameter one of the command.
+ * @param2:	Parameter two of the command.
+ * @data:	Extra data associated with the command, if applicable.
+ *
+ * This structure stores the relevant information of a command that was removed
+ * from the fifo but needs to be processed at a later time.
+ */
+struct deferred_cmd {
+	struct list_head list_node;
+	uint16_t id;
+	uint16_t param1;
+	uint32_t param2;
+	void *data;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features);
+static void register_debugfs_info(struct edge_info *einfo);
+
+static struct edge_info *edge_infos[NUM_SMEM_SUBSYSTEMS];
+static DEFINE_MUTEX(probe_lock);
+static struct glink_core_version versions[] = {
+	{1, TRACER_PKT_FEATURE, negotiate_features_v1},
+};
+
+/**
+ * send_irq() - send an irq to a remote entity as an event signal
+ * @einfo:	Which remote entity that should receive the irq.
+ */
+static void send_irq(struct edge_info *einfo)
+{
+	/*
+	 * Any data associated with this event must be visable to the remote
+	 * before the interrupt is triggered
+	 */
+	wmb();
+	writel_relaxed(einfo->out_irq_mask, einfo->out_irq_reg);
+	if (einfo->remote_proc_id != SMEM_SPSS)
+		writel_relaxed(0, einfo->out_irq_reg);
+	einfo->tx_irq_count++;
+}
+
+/**
+ * read_from_fifo() - memcpy from fifo memory
+ * @dest:	Destination address.
+ * @src:	Source address.
+ * @num_bytes:	Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *read_from_fifo(void *dest, const void *src, size_t num_bytes)
+{
+	memcpy_fromio(dest, src, num_bytes);
+	return dest;
+}
+
+/**
+ * write_to_fifo() - memcpy to fifo memory
+ * @dest:	Destination address.
+ * @src:	Source address.
+ * @num_bytes:	Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *write_to_fifo(void *dest, const void *src, size_t num_bytes)
+{
+	memcpy_toio(dest, src, num_bytes);
+	return dest;
+}
+
+/**
+ * memcpy32_toio() - memcpy to word access only memory
+ * @dest:	Destination address.
+ * @src:	Source address.
+ * @num_bytes:	Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *memcpy32_toio(void *dest, const void *src, size_t num_bytes)
+{
+	uint32_t *dest_local = (uint32_t *)dest;
+	uint32_t *src_local = (uint32_t *)src;
+
+	if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
+		return ERR_PTR(-EINVAL);
+	if (WARN_ON(!dest_local ||
+			((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
+		return ERR_PTR(-EINVAL);
+	if (WARN_ON(!src_local ||
+			((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
+		return ERR_PTR(-EINVAL);
+	num_bytes /= sizeof(uint32_t);
+
+	while (num_bytes--)
+		__raw_writel_no_log(*src_local++, dest_local++);
+
+	return dest;
+}
+
+/**
+ * memcpy32_fromio() - memcpy from word access only memory
+ * @dest:	Destination address.
+ * @src:	Source address.
+ * @num_bytes:	Number of bytes to copy.
+ *
+ * Return: Destination address.
+ */
+static void *memcpy32_fromio(void *dest, const void *src, size_t num_bytes)
+{
+	uint32_t *dest_local = (uint32_t *)dest;
+	uint32_t *src_local = (uint32_t *)src;
+
+	if (WARN_ON(num_bytes & RPM_FIFO_ADDR_ALIGN_BYTES))
+		return ERR_PTR(-EINVAL);
+	if (WARN_ON(!dest_local ||
+			((uintptr_t)dest_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
+		return ERR_PTR(-EINVAL);
+	if (WARN_ON(!src_local ||
+			((uintptr_t)src_local & RPM_FIFO_ADDR_ALIGN_BYTES)))
+		return ERR_PTR(-EINVAL);
+	num_bytes /= sizeof(uint32_t);
+
+	while (num_bytes--)
+		*dest_local++ = __raw_readl_no_log(src_local++);
+
+	return dest;
+}
+
+/**
+ * fifo_read_avail() - how many bytes are available to be read from an edge
+ * @einfo:	The concerned edge to query.
+ *
+ * Return: The number of bytes available to be read from edge.
+ */
+static uint32_t fifo_read_avail(struct edge_info *einfo)
+{
+	uint32_t read_index = einfo->rx_ch_desc->read_index;
+	uint32_t write_index = einfo->rx_ch_desc->write_index;
+	uint32_t fifo_size = einfo->rx_fifo_size;
+	uint32_t bytes_avail;
+
+	bytes_avail = write_index - read_index;
+	if (write_index < read_index)
+		/*
+		 * Case:  W < R - Write has wrapped
+		 * --------------------------------
+		 * In this case, the write operation has wrapped past the end
+		 * of the FIFO which means that now calculating the amount of
+		 * data in the FIFO results in a negative number.  This can be
+		 * easily fixed by adding the fifo_size to the value.  Even
+		 * though the values are unsigned, subtraction is always done
+		 * using 2's complement which means that the result will still
+		 * be correct once the FIFO size has been added to the negative
+		 * result.
+		 *
+		 * Example:
+		 *     '-' = data in fifo
+		 *     '.' = empty
+		 *
+		 *      0         1
+		 *      0123456789012345
+		 *     |-----w.....r----|
+		 *      0               N
+		 *
+		 *     write = 5 = 101b
+		 *     read = 11 = 1011b
+		 *     Data in FIFO
+		 *       (write - read) + fifo_size = (101b - 1011b) + 10000b
+		 *                          = 11111010b + 10000b = 1010b = 10
+		 */
+		bytes_avail += fifo_size;
+
+	return bytes_avail;
+}
+
+/**
+ * fifo_write_avail() - how many bytes can be written to the edge
+ * @einfo:	The concerned edge to query.
+ *
+ * Calculates the number of bytes that can be transmitted at this time.
+ * Automatically reserves some space to maintain alignment when the fifo is
+ * completely full, and reserves space so that the flush command can always be
+ * transmitted when needed.
+ *
+ * Return: The number of bytes available to be read from edge.
+ */
+static uint32_t fifo_write_avail(struct edge_info *einfo)
+{
+	uint32_t read_index = einfo->tx_ch_desc->read_index;
+	uint32_t write_index = einfo->tx_ch_desc->write_index;
+	uint32_t fifo_size = einfo->tx_fifo_size;
+	uint32_t bytes_avail = read_index - write_index;
+
+	if (read_index <= write_index)
+		bytes_avail += fifo_size;
+	if (bytes_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
+		bytes_avail = 0;
+	else
+		bytes_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
+
+	return bytes_avail;
+}
+
+/**
+ * fifo_read() - read data from an edge
+ * @einfo:	The concerned edge to read from.
+ * @_data:	Buffer to copy the read data into.
+ * @len:	The ammount of data to read in bytes.
+ *
+ * Return: The number of bytes read.
+ */
+static int fifo_read(struct edge_info *einfo, void *_data, int len)
+{
+	void *ptr;
+	void *ret;
+	void *data = _data;
+	int orig_len = len;
+	uint32_t read_index = einfo->rx_ch_desc->read_index;
+	uint32_t write_index = einfo->rx_ch_desc->write_index;
+	uint32_t fifo_size = einfo->rx_fifo_size;
+	uint32_t n;
+
+	while (len) {
+		ptr = einfo->rx_fifo + read_index;
+		if (read_index <= write_index)
+			n = write_index - read_index;
+		else
+			n = fifo_size - read_index;
+
+		if (n == 0)
+			break;
+		if (n > len)
+			n = len;
+
+		ret = einfo->read_from_fifo(data, ptr, n);
+		if (IS_ERR(ret))
+			return PTR_ERR(ret);
+
+		data += n;
+		len -= n;
+		read_index += n;
+		if (read_index >= fifo_size)
+			read_index -= fifo_size;
+	}
+	einfo->rx_ch_desc->read_index = read_index;
+
+	return orig_len - len;
+}
+
+/**
+ * fifo_write_body() - Copy transmit data into an edge
+ * @einfo:		The concerned edge to copy into.
+ * @_data:		Buffer of data to copy from.
+ * @len:		Size of data to copy in bytes.
+ * @write_index:	Index into the channel where the data should be copied.
+ *
+ * Return: Number of bytes remaining to be copied into the edge.
+ */
+static int fifo_write_body(struct edge_info *einfo, const void *_data,
+				int len, uint32_t *write_index)
+{
+	void *ptr;
+	void *ret;
+	const void *data = _data;
+	uint32_t read_index = einfo->tx_ch_desc->read_index;
+	uint32_t fifo_size = einfo->tx_fifo_size;
+	uint32_t n;
+
+	while (len) {
+		ptr = einfo->tx_fifo + *write_index;
+		if (*write_index < read_index) {
+			n = read_index - *write_index - FIFO_FULL_RESERVE;
+		} else {
+			if (read_index < FIFO_FULL_RESERVE)
+				n = fifo_size + read_index - *write_index -
+							FIFO_FULL_RESERVE;
+			else
+				n = fifo_size - *write_index;
+		}
+
+		if (n == 0)
+			break;
+		if (n > len)
+			n = len;
+
+		ret = einfo->write_to_fifo(ptr, data, n);
+		if (IS_ERR(ret))
+			return PTR_ERR(ret);
+
+		data += n;
+		len -= n;
+		*write_index += n;
+		if (*write_index >= fifo_size)
+			*write_index -= fifo_size;
+	}
+	return len;
+}
+
+/**
+ * fifo_write() - Write data into an edge
+ * @einfo:	The concerned edge to write to.
+ * @data:	Buffer of data to write.
+ * @len:	Length of data to write, in bytes.
+ *
+ * Wrapper around fifo_write_body() to manage additional details that are
+ * necessary for a complete write event.  Does not manage concurrency.  Clients
+ * should use fifo_write_avail() to check if there is sufficent space before
+ * calling fifo_write().
+ *
+ * Return: Number of bytes written to the edge.
+ */
+static int fifo_write(struct edge_info *einfo, const void *data, int len)
+{
+	int orig_len = len;
+	uint32_t write_index = einfo->tx_ch_desc->write_index;
+
+	len = fifo_write_body(einfo, data, len, &write_index);
+	if (unlikely(len < 0))
+		return len;
+	einfo->tx_ch_desc->write_index = write_index;
+	send_irq(einfo);
+
+	return orig_len - len;
+}
+
+/**
+ * fifo_write_complex() - writes a transaction of multiple buffers to an edge
+ * @einfo:	The concerned edge to write to.
+ * @data1:	The first buffer of data to write.
+ * @len1:	The length of the first buffer in bytes.
+ * @data2:	The second buffer of data to write.
+ * @len2:	The length of the second buffer in bytes.
+ * @data3:	The thirs buffer of data to write.
+ * @len3:	The length of the third buffer in bytes.
+ *
+ * A variant of fifo_write() which optimizes the usecase found in tx().  The
+ * remote side expects all or none of the transmitted data to be available.
+ * This prevents the tx() usecase from calling fifo_write() multiple times.  The
+ * alternative would be an allocation and additional memcpy to create a buffer
+ * to copy all the data segments into one location before calling fifo_write().
+ *
+ * Return: Number of bytes written to the edge.
+ */
+static int fifo_write_complex(struct edge_info *einfo,
+			      const void *data1, int len1,
+			      const void *data2, int len2,
+			      const void *data3, int len3)
+{
+	int orig_len = len1 + len2 + len3;
+	uint32_t write_index = einfo->tx_ch_desc->write_index;
+
+	len1 = fifo_write_body(einfo, data1, len1, &write_index);
+	if (unlikely(len1 < 0))
+		return len1;
+	len2 = fifo_write_body(einfo, data2, len2, &write_index);
+	if (unlikely(len2 < 0))
+		return len2;
+	len3 = fifo_write_body(einfo, data3, len3, &write_index);
+	if (unlikely(len3 < 0))
+		return len3;
+
+	einfo->tx_ch_desc->write_index = write_index;
+	send_irq(einfo);
+
+	return orig_len - len1 - len2 - len3;
+}
+
+/**
+ * send_tx_blocked_signal() - send the flush command as we are blocked from tx
+ * @einfo:	The concerned edge which is blocked.
+ *
+ * Used to send a signal to the remote side that we have no more space to
+ * transmit data and therefore need the remote side to signal us when they have
+ * cleared some space by reading some data.  This function relies upon the
+ * assumption that fifo_write_avail() will reserve some space so that the flush
+ * signal command can always be put into the transmit fifo, even when "everyone"
+ * else thinks that the transmit fifo is truely full.  This function assumes
+ * that it is called with the write_lock already locked.
+ */
+static void send_tx_blocked_signal(struct edge_info *einfo)
+{
+	struct read_notif_request {
+		uint16_t cmd;
+		uint16_t reserved;
+		uint32_t reserved2;
+	};
+	struct read_notif_request read_notif_req;
+
+	read_notif_req.cmd = READ_NOTIF_CMD;
+	read_notif_req.reserved = 0;
+	read_notif_req.reserved2 = 0;
+
+	if (!einfo->tx_blocked_signal_sent) {
+		einfo->tx_blocked_signal_sent = true;
+		fifo_write(einfo, &read_notif_req, sizeof(read_notif_req));
+	}
+}
+
+/**
+ * fifo_tx() - transmit data on an edge
+ * @einfo:	The concerned edge to transmit on.
+ * @data:	Buffer of data to transmit.
+ * @len:	Length of data to transmit in bytes.
+ *
+ * This helper function is the preferred interface to fifo_write() and should
+ * be used in the normal case for transmitting entities.  fifo_tx() will block
+ * until there is sufficent room to transmit the requested ammount of data.
+ * fifo_tx() will manage any concurrency between multiple transmitters on a
+ * channel.
+ *
+ * Return: Number of bytes transmitted.
+ */
+static int fifo_tx(struct edge_info *einfo, const void *data, int len)
+{
+	unsigned long flags;
+	int ret;
+
+	DEFINE_WAIT(wait);
+
+	spin_lock_irqsave(&einfo->write_lock, flags);
+	while (fifo_write_avail(einfo) < len) {
+		send_tx_blocked_signal(einfo);
+		prepare_to_wait(&einfo->tx_blocked_queue, &wait,
+							TASK_UNINTERRUPTIBLE);
+		if (fifo_write_avail(einfo) < len && !einfo->in_ssr) {
+			spin_unlock_irqrestore(&einfo->write_lock, flags);
+			schedule();
+			spin_lock_irqsave(&einfo->write_lock, flags);
+		}
+		finish_wait(&einfo->tx_blocked_queue, &wait);
+		if (einfo->in_ssr) {
+			spin_unlock_irqrestore(&einfo->write_lock, flags);
+			return -EFAULT;
+		}
+	}
+	ret = fifo_write(einfo, data, len);
+	spin_unlock_irqrestore(&einfo->write_lock, flags);
+
+	return ret;
+}
+
+/**
+ * process_rx_data() - process received data from an edge
+ * @einfo:	The edge the data was received on.
+ * @cmd_id:	ID to specify the type of data.
+ * @rcid:	The remote channel id associated with the data.
+ * @intend_id:	The intent the data should be put in.
+ */
+static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
+			    uint32_t rcid, uint32_t intent_id)
+{
+	struct command {
+		uint32_t frag_size;
+		uint32_t size_remaining;
+	};
+	struct command cmd;
+	struct glink_core_rx_intent *intent;
+	char trash[FIFO_ALIGNMENT];
+	int alignment;
+	bool err = false;
+
+	fifo_read(einfo, &cmd, sizeof(cmd));
+
+	intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+					&einfo->xprt_if, rcid, intent_id);
+	if (intent == NULL) {
+		GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
+								intent_id);
+		err = true;
+	} else if (intent->data == NULL) {
+		if (einfo->intentless) {
+			intent->data = kmalloc(cmd.frag_size, GFP_ATOMIC);
+			if (!intent->data)
+				err = true;
+			else
+				intent->intent_size = cmd.frag_size;
+		} else {
+			GLINK_ERR(
+				"%s: intent for ch %d liid %d has no data buff\n",
+						__func__, rcid, intent_id);
+			err = true;
+		}
+	}
+
+	if (!err &&
+	    (intent->intent_size - intent->write_offset < cmd.frag_size ||
+	    intent->write_offset + cmd.size_remaining > intent->intent_size)) {
+		GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
+							__func__,
+							cmd.frag_size,
+							cmd.size_remaining,
+							"will overflow ch",
+							rcid,
+							"intent",
+							intent_id);
+		err = true;
+	}
+
+	if (err) {
+		alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
+		alignment -= cmd.frag_size;
+		while (cmd.frag_size) {
+			if (cmd.frag_size > FIFO_ALIGNMENT) {
+				fifo_read(einfo, trash, FIFO_ALIGNMENT);
+				cmd.frag_size -= FIFO_ALIGNMENT;
+			} else {
+				fifo_read(einfo, trash, cmd.frag_size);
+				cmd.frag_size = 0;
+			}
+		}
+		if (alignment)
+			fifo_read(einfo, trash, alignment);
+		return;
+	}
+	fifo_read(einfo, intent->data + intent->write_offset, cmd.frag_size);
+	intent->write_offset += cmd.frag_size;
+	intent->pkt_size += cmd.frag_size;
+
+	alignment = ALIGN(cmd.frag_size, FIFO_ALIGNMENT);
+	alignment -= cmd.frag_size;
+	if (alignment)
+		fifo_read(einfo, trash, alignment);
+
+	if (unlikely((cmd_id == TRACER_PKT_CMD ||
+		      cmd_id == TRACER_PKT_CONT_CMD) && !cmd.size_remaining)) {
+		tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
+		intent->tracer_pkt = true;
+	}
+
+	einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
+							rcid,
+							intent,
+							cmd.size_remaining ?
+								false : true);
+}
+
+/**
+ * queue_cmd() - queue a deferred command for later processing
+ * @einfo:	Edge to queue commands on.
+ * @cmd:	Command to queue.
+ * @data:	Command specific data to queue with the command.
+ *
+ * Return: True if queuing was successful, false otherwise.
+ */
+static bool queue_cmd(struct edge_info *einfo, void *cmd, void *data)
+{
+	struct command {
+		uint16_t id;
+		uint16_t param1;
+		uint32_t param2;
+	};
+	struct command *_cmd = cmd;
+	struct deferred_cmd *d_cmd;
+
+	d_cmd = kmalloc(sizeof(*d_cmd), GFP_ATOMIC);
+	if (!d_cmd) {
+		GLINK_ERR("%s: Discarding cmd %d\n", __func__, _cmd->id);
+		return false;
+	}
+	d_cmd->id = _cmd->id;
+	d_cmd->param1 = _cmd->param1;
+	d_cmd->param2 = _cmd->param2;
+	d_cmd->data = data;
+	list_add_tail(&d_cmd->list_node, &einfo->deferred_cmds);
+	kthread_queue_work(&einfo->kworker, &einfo->kwork);
+	return true;
+}
+
+/**
+ * get_rx_fifo() - Find the rx fifo for an edge
+ * @einfo:	Edge to find the fifo for.
+ *
+ * Return: True if fifo was found, false otherwise.
+ */
+static bool get_rx_fifo(struct edge_info *einfo)
+{
+	if (einfo->mailbox) {
+		einfo->rx_fifo = &einfo->mailbox->fifo[einfo->mailbox->tx_size];
+		einfo->rx_fifo_size = einfo->mailbox->rx_size;
+	} else {
+		einfo->rx_fifo = smem_get_entry(SMEM_GLINK_NATIVE_XPRT_FIFO_1,
+							&einfo->rx_fifo_size,
+							einfo->remote_proc_id,
+							SMEM_ITEM_CACHED_FLAG);
+		if (!einfo->rx_fifo)
+			return false;
+	}
+
+	return true;
+}
+
+/**
+ * __rx_worker() - process received commands on a specific edge
+ * @einfo:	Edge to process commands on.
+ * @atomic_ctx:	Indicates if the caller is in atomic context and requires any
+ *		non-atomic operations to be deferred.
+ */
+static void __rx_worker(struct edge_info *einfo, bool atomic_ctx)
+{
+	struct command {
+		uint16_t id;
+		uint16_t param1;
+		uint32_t param2;
+	};
+	struct intent_desc {
+		uint32_t size;
+		uint32_t id;
+	};
+	struct command cmd;
+	struct intent_desc intent;
+	struct intent_desc *intents;
+	int i;
+	bool granted;
+	unsigned long flags;
+	int rcu_id;
+	uint16_t rcid;
+	uint32_t name_len;
+	uint32_t len;
+	char *name;
+	char trash[FIFO_ALIGNMENT];
+	struct deferred_cmd *d_cmd;
+	void *cmd_data;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+
+	if (unlikely(!einfo->rx_fifo)) {
+		if (!get_rx_fifo(einfo)) {
+			srcu_read_unlock(&einfo->use_ref, rcu_id);
+			return;
+		}
+		einfo->in_ssr = false;
+		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	}
+
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+	if (!atomic_ctx) {
+		if (einfo->tx_resume_needed && fifo_write_avail(einfo)) {
+			einfo->tx_resume_needed = false;
+			einfo->xprt_if.glink_core_if_ptr->tx_resume(
+							&einfo->xprt_if);
+		}
+		spin_lock_irqsave(&einfo->write_lock, flags);
+		if (einfo->tx_blocked_signal_sent) {
+			wake_up_all(&einfo->tx_blocked_queue);
+			einfo->tx_blocked_signal_sent = false;
+		}
+		spin_unlock_irqrestore(&einfo->write_lock, flags);
+	}
+
+
+	/*
+	 * Access to the fifo needs to be synchronized, however only the calls
+	 * into the core from process_rx_data() are compatible with an atomic
+	 * processing context.  For everything else, we need to do all the fifo
+	 * processing, then unlock the lock for the call into the core.  Data
+	 * in the fifo is allowed to be processed immediately instead of being
+	 * ordered with the commands because the channel open process prevents
+	 * intents from being queued (which prevents data from being sent) until
+	 * all the channel open commands are processed by the core, thus
+	 * eliminating a race.
+	 */
+	spin_lock_irqsave(&einfo->rx_lock, flags);
+	while (fifo_read_avail(einfo) ||
+			(!atomic_ctx && !list_empty(&einfo->deferred_cmds))) {
+		if (einfo->in_ssr)
+			break;
+
+		if (!atomic_ctx && !list_empty(&einfo->deferred_cmds)) {
+			d_cmd = list_first_entry(&einfo->deferred_cmds,
+						struct deferred_cmd, list_node);
+			list_del(&d_cmd->list_node);
+			cmd.id = d_cmd->id;
+			cmd.param1 = d_cmd->param1;
+			cmd.param2 = d_cmd->param2;
+			cmd_data = d_cmd->data;
+			kfree(d_cmd);
+		} else {
+			fifo_read(einfo, &cmd, sizeof(cmd));
+			cmd_data = NULL;
+		}
+
+		switch (cmd.id) {
+		case VERSION_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case VERSION_ACK_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case OPEN_CMD:
+			rcid = cmd.param1;
+			name_len = cmd.param2;
+
+			if (cmd_data) {
+				name = cmd_data;
+			} else {
+				len = ALIGN(name_len, FIFO_ALIGNMENT);
+				name = kmalloc(len, GFP_ATOMIC);
+				if (!name) {
+					pr_err("No memory available to rx ch open cmd name.  Discarding cmd.\n");
+					while (len) {
+						fifo_read(einfo, trash,
+								FIFO_ALIGNMENT);
+						len -= FIFO_ALIGNMENT;
+					}
+					break;
+				}
+				fifo_read(einfo, name, len);
+			}
+			if (atomic_ctx) {
+				if (!queue_cmd(einfo, &cmd, name))
+					kfree(name);
+				break;
+			}
+
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+								&einfo->xprt_if,
+								rcid,
+								name,
+								SMEM_XPRT_ID);
+			kfree(name);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case CLOSE_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->
+							rx_cmd_ch_remote_close(
+								&einfo->xprt_if,
+								cmd.param1);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case OPEN_ACK_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+								&einfo->xprt_if,
+								cmd.param1,
+								SMEM_XPRT_ID);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_INTENT_CMD:
+			/*
+			 * One intent listed with this command.  This is the
+			 * expected case and can be optimized over the general
+			 * case of an array of intents.
+			 */
+			if (cmd.param2 == 1) {
+				if (cmd_data) {
+					intent.id = ((struct intent_desc *)
+								cmd_data)->id;
+					intent.size = ((struct intent_desc *)
+								cmd_data)->size;
+					kfree(cmd_data);
+				} else {
+					fifo_read(einfo, &intent,
+								sizeof(intent));
+				}
+				if (atomic_ctx) {
+					cmd_data = kmalloc(sizeof(intent),
+								GFP_ATOMIC);
+					if (!cmd_data) {
+						GLINK_ERR(
+							"%s: dropping cmd %d\n",
+							__func__, cmd.id);
+						break;
+					}
+					((struct intent_desc *)cmd_data)->id =
+								intent.id;
+					((struct intent_desc *)cmd_data)->size =
+								intent.size;
+					if (!queue_cmd(einfo, &cmd, cmd_data))
+						kfree(cmd_data);
+					break;
+				}
+				spin_unlock_irqrestore(&einfo->rx_lock, flags);
+				einfo->xprt_if.glink_core_if_ptr->
+						rx_cmd_remote_rx_intent_put(
+								&einfo->xprt_if,
+								cmd.param1,
+								intent.id,
+								intent.size);
+				spin_lock_irqsave(&einfo->rx_lock, flags);
+				break;
+			}
+
+			/* Array of intents to process */
+			if (cmd_data) {
+				intents = cmd_data;
+			} else {
+				intents = kmalloc_array(cmd.param2,
+						sizeof(*intents), GFP_ATOMIC);
+				if (!intents) {
+					for (i = 0; i < cmd.param2; ++i)
+						fifo_read(einfo, &intent,
+								sizeof(intent));
+					break;
+				}
+				fifo_read(einfo, intents,
+					sizeof(*intents) * cmd.param2);
+			}
+			if (atomic_ctx) {
+				if (!queue_cmd(einfo, &cmd, intents))
+					kfree(intents);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			for (i = 0; i < cmd.param2; ++i) {
+				einfo->xprt_if.glink_core_if_ptr->
+					rx_cmd_remote_rx_intent_put(
+							&einfo->xprt_if,
+							cmd.param1,
+							intents[i].id,
+							intents[i].size);
+			}
+			kfree(intents);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_DONE_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2,
+								false);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_INTENT_REQ_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->
+						rx_cmd_remote_rx_intent_req(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_INTENT_REQ_ACK_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			granted = false;
+			if (cmd.param2 == 1)
+				granted = true;
+			einfo->xprt_if.glink_core_if_ptr->
+						rx_cmd_rx_intent_req_ack(
+								&einfo->xprt_if,
+								cmd.param1,
+								granted);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case TX_DATA_CMD:
+		case TX_DATA_CONT_CMD:
+		case TRACER_PKT_CMD:
+		case TRACER_PKT_CONT_CMD:
+			process_rx_data(einfo, cmd.id, cmd.param1, cmd.param2);
+			break;
+		case CLOSE_ACK_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+								&einfo->xprt_if,
+								cmd.param1);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case READ_NOTIF_CMD:
+			send_irq(einfo);
+			break;
+		case SIGNALS_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		case RX_DONE_W_REUSE_CMD:
+			if (atomic_ctx) {
+				queue_cmd(einfo, &cmd, NULL);
+				break;
+			}
+			spin_unlock_irqrestore(&einfo->rx_lock, flags);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+								&einfo->xprt_if,
+								cmd.param1,
+								cmd.param2,
+								true);
+			spin_lock_irqsave(&einfo->rx_lock, flags);
+			break;
+		default:
+			pr_err("Unrecognized command: %d\n", cmd.id);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&einfo->rx_lock, flags);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * rx_worker_atomic() - worker function to process received command in atomic
+ *			context.
+ * @param:	The param parameter passed during initialization of the tasklet.
+ */
+static void rx_worker_atomic(unsigned long param)
+{
+	struct edge_info *einfo = (struct edge_info *)param;
+
+	__rx_worker(einfo, true);
+}
+
+/**
+ * rx_worker() - worker function to process received commands
+ * @work:	kwork associated with the edge to process commands on.
+ */
+static void rx_worker(struct kthread_work *work)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(work, struct edge_info, kwork);
+	__rx_worker(einfo, false);
+}
+
+irqreturn_t irq_handler(int irq, void *priv)
+{
+	struct edge_info *einfo = (struct edge_info *)priv;
+
+	if (einfo->rx_reset_reg)
+		writel_relaxed(einfo->out_irq_mask, einfo->rx_reset_reg);
+
+	tasklet_hi_schedule(&einfo->tasklet);
+	einfo->rx_irq_count++;
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * tx_cmd_version() - convert a version cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @version:	The version number to encode.
+ * @features:	The features information to encode.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+			   uint32_t features)
+{
+	struct command {
+		uint16_t id;
+		uint16_t version;
+		uint32_t features;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = VERSION_CMD;
+	cmd.version = version;
+	cmd.features = features;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_version_ack() - convert a version ack cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @version:	The version number to encode.
+ * @features:	The features information to encode.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+			       uint32_t version,
+			       uint32_t features)
+{
+	struct command {
+		uint16_t id;
+		uint16_t version;
+		uint32_t features;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = VERSION_ACK_CMD;
+	cmd.version = version;
+	cmd.features = features;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * set_version() - activate a negotiated version and feature set
+ * @if_ptr:	The transport to configure.
+ * @version:	The version to use.
+ * @features:	The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+			uint32_t features)
+{
+	struct edge_info *einfo;
+	uint32_t ret;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return 0;
+	}
+
+	ret = einfo->intentless ?
+				GCAP_INTENTLESS | GCAP_SIGNALS : GCAP_SIGNALS;
+
+	if (features & TRACER_PKT_FEATURE)
+		ret |= GCAP_TRACER_PKT;
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return ret;
+}
+
+/**
+ * tx_cmd_ch_open() - convert a channel open cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @name:	The channel name to encode.
+ * @req_xprt:	The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+			  const char *name, uint16_t req_xprt)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t length;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t buf_size;
+	void *buf;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = OPEN_CMD;
+	cmd.lcid = lcid;
+	cmd.length = strlen(name) + 1;
+
+	buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		GLINK_ERR("%s: malloc fail for %d size buf\n",
+				__func__, buf_size);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENOMEM;
+	}
+
+	memcpy(buf, &cmd, sizeof(cmd));
+	memcpy(buf + sizeof(cmd), name, cmd.length);
+
+	fifo_tx(einfo, buf, buf_size);
+
+	kfree(buf);
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_ch_close() - convert a channel close cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = CLOSE_CMD;
+	cmd.lcid = lcid;
+	cmd.reserved = 0;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - convert a channel open ack cmd to wire format
+ *				 and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ * @xprt_resp:	The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+				     uint32_t rcid, uint16_t xprt_resp)
+{
+	struct command {
+		uint16_t id;
+		uint16_t rcid;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = OPEN_ACK_CMD;
+	cmd.rcid = rcid;
+	cmd.reserved = 0;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - convert a channel close ack cmd to wire format
+ *				  and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+				       uint32_t rcid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t rcid;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = CLOSE_ACK_CMD;
+	cmd.rcid = rcid;
+	cmd.reserved = 0;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * ssr() - process a subsystem restart notification of a transport
+ * @if_ptr:	The transport to restart
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int ssr(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+	struct deferred_cmd *cmd;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	BUG_ON(einfo->remote_proc_id == SMEM_RPM);
+
+	einfo->in_ssr = true;
+	wake_up_all(&einfo->tx_blocked_queue);
+
+	synchronize_srcu(&einfo->use_ref);
+
+	while (!list_empty(&einfo->deferred_cmds)) {
+		cmd = list_first_entry(&einfo->deferred_cmds,
+						struct deferred_cmd, list_node);
+		list_del(&cmd->list_node);
+		kfree(cmd->data);
+		kfree(cmd);
+	}
+
+	einfo->tx_resume_needed = false;
+	einfo->tx_blocked_signal_sent = false;
+	einfo->rx_fifo = NULL;
+	einfo->rx_fifo_size = 0;
+	einfo->tx_ch_desc->write_index = 0;
+	einfo->rx_ch_desc->read_index = 0;
+	einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
+
+	return 0;
+}
+
+/**
+ * int wait_link_down() - Check status of read/write indices
+ * @if_ptr:	The transport to check
+ *
+ * Return: 1 if indices are all zero, 0 otherwise
+ */
+int wait_link_down(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->tx_ch_desc->write_index == 0 &&
+		einfo->tx_ch_desc->read_index == 0 &&
+		einfo->rx_ch_desc->write_index == 0 &&
+		einfo->rx_ch_desc->read_index == 0)
+		return 1;
+	else
+		return 0;
+}
+
+/**
+ * allocate_rx_intent() - allocate/reserve space for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @size:	size of intent.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+			      struct glink_core_rx_intent *intent)
+{
+	void *t;
+
+	t = kmalloc(size, GFP_KERNEL);
+	if (!t)
+		return -ENOMEM;
+
+	intent->data = t;
+	intent->iovec = (void *)intent;
+	intent->vprovider = rx_linear_vbuf_provider;
+	intent->pprovider = NULL;
+	return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+				struct glink_core_rx_intent *intent)
+{
+	if (!intent || !intent->data)
+		return -EINVAL;
+
+	kfree(intent->data);
+	intent->data = NULL;
+	intent->iovec = NULL;
+	intent->vprovider = NULL;
+	return 0;
+}
+
+/**
+ * tx_cmd_local_rx_intent() - convert an rx intent cmd to wire format and
+ *			      transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The intent size to encode.
+ * @liid:	The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+				  uint32_t lcid, size_t size, uint32_t liid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t count;
+		uint32_t size;
+		uint32_t liid;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	if (size > UINT_MAX) {
+		pr_err("%s: size %zu is too large to encode\n", __func__, size);
+		return -EMSGSIZE;
+	}
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->intentless)
+		return -EOPNOTSUPP;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_CMD;
+	cmd.lcid = lcid;
+	cmd.count = 1;
+	cmd.size = size;
+	cmd.liid = liid;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - convert an rx done cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @liid:	The local intent id to encode.
+ * @reuse:	Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+				 uint32_t lcid, uint32_t liid, bool reuse)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t liid;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->intentless)
+		return;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
+	cmd.lcid = lcid;
+	cmd.liid = liid;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_rx_intent_req() - convert an rx intent request cmd to wire format and
+ *			    transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+				uint32_t lcid, size_t size)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t size;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	if (size > UINT_MAX) {
+		pr_err("%s: size %zu is too large to encode\n", __func__, size);
+		return -EMSGSIZE;
+	}
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->intentless)
+		return -EOPNOTSUPP;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_REQ_CMD,
+	cmd.lcid = lcid;
+	cmd.size = size;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - convert an rx intent request ack cmd to wire
+ *				format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @granted:	The request response to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+					   uint32_t lcid, bool granted)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t response;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (einfo->intentless)
+		return -EOPNOTSUPP;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_REQ_ACK_CMD,
+	cmd.lcid = lcid;
+	if (granted)
+		cmd.response = 1;
+	else
+		cmd.response = 0;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - convert a signals ack cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @sigs:	The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+			   uint32_t sigs)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t sigs;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = SIGNALS_CMD,
+	cmd.lcid = lcid;
+	cmd.sigs = sigs;
+
+	fifo_tx(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * poll() - poll for data on a channel
+ * @if_ptr:	The transport the channel exists on.
+ * @lcid:	The local channel id.
+ *
+ * Return: 0 if no data available, 1 if data available.
+ */
+static int poll(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	struct edge_info *einfo;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	if (fifo_read_avail(einfo)) {
+		__rx_worker(einfo, true);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return 1;
+	}
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * mask_rx_irq() - mask the receive irq for a channel
+ * @if_ptr:	The transport the channel exists on.
+ * @lcid:	The local channel id for the channel.
+ * @mask:	True to mask the irq, false to unmask.
+ * @pstruct:	Platform defined structure for handling the masking.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int mask_rx_irq(struct glink_transport_if *if_ptr, uint32_t lcid,
+		       bool mask, void *pstruct)
+{
+	struct edge_info *einfo;
+	struct irq_chip *irq_chip;
+	struct irq_data *irq_data;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	irq_chip = irq_get_chip(einfo->irq_line);
+	if (!irq_chip) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENODEV;
+	}
+
+	irq_data = irq_get_irq_data(einfo->irq_line);
+	if (!irq_data) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENODEV;
+	}
+
+	if (mask) {
+		irq_chip->irq_mask(irq_data);
+		einfo->irq_disabled = true;
+		if (pstruct)
+			irq_set_affinity(einfo->irq_line, pstruct);
+	} else {
+		irq_chip->irq_unmask(irq_data);
+		einfo->irq_disabled = false;
+	}
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_data() - convert a data/tracer_pkt to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @cmd_id:	The command ID to transmit.
+ * @lcid:	The local channel id to encode.
+ * @pctx:	The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
+		   uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t riid;
+		uint32_t size;
+		uint32_t size_left;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t size;
+	uint32_t zeros_size;
+	const void *data_start;
+	char zeros[FIFO_ALIGNMENT] = { 0 };
+	unsigned long flags;
+	size_t tx_size = 0;
+	int rcu_id;
+	int ret;
+
+	if (pctx->size < pctx->size_remaining) {
+		GLINK_ERR("%s: size remaining exceeds size.  Resetting.\n",
+								__func__);
+		pctx->size_remaining = pctx->size;
+	}
+	if (!pctx->size_remaining)
+		return 0;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	if (einfo->intentless &&
+	    (pctx->size_remaining != pctx->size || cmd_id == TRACER_PKT_CMD)) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EINVAL;
+	}
+
+	if (cmd_id == TX_DATA_CMD) {
+		if (pctx->size_remaining == pctx->size)
+			cmd.id = TX_DATA_CMD;
+		else
+			cmd.id = TX_DATA_CONT_CMD;
+	} else {
+		if (pctx->size_remaining == pctx->size)
+			cmd.id = TRACER_PKT_CMD;
+		else
+			cmd.id = TRACER_PKT_CONT_CMD;
+	}
+	cmd.lcid = lcid;
+	cmd.riid = pctx->riid;
+	data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+				  &tx_size);
+	if (!data_start) {
+		GLINK_ERR("%s: invalid data_start\n", __func__);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&einfo->write_lock, flags);
+	size = fifo_write_avail(einfo);
+
+	/* Intentless clients expect a complete commit or instant failure */
+	if (einfo->intentless && size < sizeof(cmd) + pctx->size) {
+		spin_unlock_irqrestore(&einfo->write_lock, flags);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENOSPC;
+	}
+
+	/* Need enough space to write the command and some data */
+	if (size <= sizeof(cmd)) {
+		einfo->tx_resume_needed = true;
+		spin_unlock_irqrestore(&einfo->write_lock, flags);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EAGAIN;
+	}
+	size -= sizeof(cmd);
+	if (size > tx_size)
+		size = tx_size;
+
+	cmd.size = size;
+	pctx->size_remaining -= size;
+	cmd.size_left = pctx->size_remaining;
+	zeros_size = ALIGN(size, FIFO_ALIGNMENT) - cmd.size;
+	if (cmd.id == TRACER_PKT_CMD)
+		tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
+
+	ret = fifo_write_complex(einfo, &cmd, sizeof(cmd), data_start, size,
+							zeros, zeros_size);
+	if (ret < 0) {
+		spin_unlock_irqrestore(&einfo->write_lock, flags);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return ret;
+	}
+
+	GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+		"<SMEM>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+		cmd.size_left);
+	spin_unlock_irqrestore(&einfo->write_lock, flags);
+
+	/* Fake tx_done for intentless since its not supported over the wire */
+	if (einfo->intentless) {
+		spin_lock_irqsave(&einfo->rx_lock, flags);
+		cmd.id = RX_DONE_CMD;
+		cmd.lcid = pctx->rcid;
+		queue_cmd(einfo, &cmd, NULL);
+		spin_unlock_irqrestore(&einfo->rx_lock, flags);
+	}
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return cmd.size;
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @pctx:	The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+	      struct glink_core_tx_pkt *pctx)
+{
+	return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
+}
+
+/**
+ * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @pctx:	The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
+	      struct glink_core_tx_pkt *pctx)
+{
+	return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
+}
+
+/**
+ * get_power_vote_ramp_time() - Get the ramp time required for the power
+ *				votes to be applied
+ * @if_ptr:	The transport interface on which power voting is requested.
+ * @state:	The power state for which ramp time is required.
+ *
+ * Return: The ramp time specific to the power state, standard error otherwise.
+ */
+static unsigned long get_power_vote_ramp_time(
+				struct glink_transport_if *if_ptr,
+				uint32_t state)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	if (state >= einfo->num_pw_states || !(einfo->ramp_time_us))
+		return (unsigned long)ERR_PTR(-EINVAL);
+
+	return einfo->ramp_time_us[state];
+}
+
+/**
+ * power_vote() - Update the power votes to meet qos requirement
+ * @if_ptr:	The transport interface on which power voting is requested.
+ * @state:	The power state for which the voting should be done.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
+{
+	return 0;
+}
+
+/**
+ * power_unvote() - Remove the all the power votes
+ * @if_ptr:	The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_unvote(struct glink_transport_if *if_ptr)
+{
+	return 0;
+}
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr:	The transport for which features are negotiated for.
+ * @version:	The version negotiated.
+ * @features:	The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features)
+{
+	return features & version->features;
+}
+
+/**
+ * init_xprt_if() - initialize the xprt_if for an edge
+ * @einfo:	The edge to initialize.
+ */
+static void init_xprt_if(struct edge_info *einfo)
+{
+	einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+	einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+	einfo->xprt_if.set_version = set_version;
+	einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+	einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+	einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+	einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+	einfo->xprt_if.ssr = ssr;
+	einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+	einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+	einfo->xprt_if.tx = tx;
+	einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+	einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+						tx_cmd_remote_rx_intent_req_ack;
+	einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+	einfo->xprt_if.poll = poll;
+	einfo->xprt_if.mask_rx_irq = mask_rx_irq;
+	einfo->xprt_if.wait_link_down = wait_link_down;
+	einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
+	einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
+	einfo->xprt_if.power_vote = power_vote;
+	einfo->xprt_if.power_unvote = power_unvote;
+}
+
+/**
+ * init_xprt_cfg() - initialize the xprt_cfg for an edge
+ * @einfo:	The edge to initialize.
+ * @name:	The name of the remote side this edge communicates to.
+ */
+static void init_xprt_cfg(struct edge_info *einfo, const char *name)
+{
+	einfo->xprt_cfg.name = XPRT_NAME;
+	einfo->xprt_cfg.edge = name;
+	einfo->xprt_cfg.versions = versions;
+	einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+	einfo->xprt_cfg.max_cid = SZ_64K;
+	einfo->xprt_cfg.max_iid = SZ_2G;
+}
+
+/**
+ * parse_qos_dt_params() - Parse the power states from DT
+ * @dev:	Reference to the platform device for a specific edge.
+ * @einfo:	Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_qos_dt_params(struct device_node *node,
+				struct edge_info *einfo)
+{
+	int rc;
+	int i;
+	char *key;
+	uint32_t *arr32;
+	uint32_t num_states;
+
+	key = "qcom,ramp-time";
+	if (!of_find_property(node, key, &num_states))
+		return -ENODEV;
+
+	num_states /= sizeof(uint32_t);
+
+	einfo->num_pw_states = num_states;
+
+	arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
+	if (!arr32)
+		return -ENOMEM;
+
+	einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!einfo->ramp_time_us) {
+		rc = -ENOMEM;
+		goto mem_alloc_fail;
+	}
+
+	rc = of_property_read_u32_array(node, key, arr32, num_states);
+	if (rc) {
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	for (i = 0; i < num_states; i++)
+		einfo->ramp_time_us[i] = arr32[i];
+
+	rc = 0;
+	return rc;
+
+invalid_key:
+	kfree(einfo->ramp_time_us);
+mem_alloc_fail:
+	kfree(arr32);
+	return rc;
+}
+
+/**
+ * subsys_name_to_id() - translate a subsystem name to a processor id
+ * @name:	The subsystem name to look up.
+ *
+ * Return: The processor id corresponding to @name or standard Linux error code.
+ */
+static int subsys_name_to_id(const char *name)
+{
+	if (!name)
+		return -ENODEV;
+
+	if (!strcmp(name, "apss"))
+		return SMEM_APPS;
+	if (!strcmp(name, "dsps"))
+		return SMEM_DSPS;
+	if (!strcmp(name, "lpass"))
+		return SMEM_Q6;
+	if (!strcmp(name, "mpss"))
+		return SMEM_MODEM;
+	if (!strcmp(name, "rpm"))
+		return SMEM_RPM;
+	if (!strcmp(name, "wcnss"))
+		return SMEM_WCNSS;
+	if (!strcmp(name, "spss"))
+		return SMEM_SPSS;
+	if (!strcmp(name, "cdsp"))
+		return SMEM_CDSP;
+	return -ENODEV;
+}
+
+static int glink_smem_native_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct device_node *phandle_node;
+	struct edge_info *einfo;
+	int rc;
+	char *key;
+	const char *subsys_name;
+	uint32_t irq_line;
+	uint32_t irq_mask;
+	struct resource *r;
+
+	node = pdev->dev.of_node;
+
+	einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+	if (!einfo) {
+		rc = -ENOMEM;
+		goto edge_info_alloc_fail;
+	}
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,irq-mask";
+	rc = of_property_read_u32(node, key, &irq_mask);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "irq-reg-base";
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	if (subsys_name_to_id(subsys_name) == -ENODEV) {
+		pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	einfo->remote_proc_id = subsys_name_to_id(subsys_name);
+
+	init_xprt_cfg(einfo, subsys_name);
+	init_xprt_if(einfo);
+	spin_lock_init(&einfo->write_lock);
+	init_waitqueue_head(&einfo->tx_blocked_queue);
+	kthread_init_work(&einfo->kwork, rx_worker);
+	kthread_init_worker(&einfo->kworker);
+	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
+	einfo->read_from_fifo = read_from_fifo;
+	einfo->write_to_fifo = write_to_fifo;
+	init_srcu_struct(&einfo->use_ref);
+	spin_lock_init(&einfo->rx_lock);
+	INIT_LIST_HEAD(&einfo->deferred_cmds);
+
+	mutex_lock(&probe_lock);
+	if (edge_infos[einfo->remote_proc_id]) {
+		pr_err("%s: duplicate subsys %s is not valid\n", __func__,
+								subsys_name);
+		rc = -ENODEV;
+		mutex_unlock(&probe_lock);
+		goto invalid_key;
+	}
+	edge_infos[einfo->remote_proc_id] = einfo;
+	mutex_unlock(&probe_lock);
+
+	einfo->out_irq_mask = irq_mask;
+	einfo->out_irq_reg = ioremap_nocache(r->start, resource_size(r));
+	if (!einfo->out_irq_reg) {
+		pr_err("%s: unable to map irq reg\n", __func__);
+		rc = -ENOMEM;
+		goto ioremap_fail;
+	}
+
+	einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+						"smem_native_%s", subsys_name);
+	if (IS_ERR(einfo->task)) {
+		rc = PTR_ERR(einfo->task);
+		pr_err("%s: kthread_run failed %d\n", __func__, rc);
+		goto kthread_fail;
+	}
+
+	einfo->tx_ch_desc = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
+							SMEM_CH_DESC_SIZE,
+							einfo->remote_proc_id,
+							0);
+	if (PTR_ERR(einfo->tx_ch_desc) == -EPROBE_DEFER) {
+		rc = -EPROBE_DEFER;
+		goto smem_alloc_fail;
+	}
+	if (!einfo->tx_ch_desc) {
+		pr_err("%s: smem alloc of ch descriptor failed\n", __func__);
+		rc = -ENOMEM;
+		goto smem_alloc_fail;
+	}
+	einfo->rx_ch_desc = einfo->tx_ch_desc + 1;
+
+	einfo->tx_fifo_size = SZ_16K;
+	einfo->tx_fifo = smem_alloc(SMEM_GLINK_NATIVE_XPRT_FIFO_0,
+							einfo->tx_fifo_size,
+							einfo->remote_proc_id,
+							SMEM_ITEM_CACHED_FLAG);
+	if (!einfo->tx_fifo) {
+		pr_err("%s: smem alloc of tx fifo failed\n", __func__);
+		rc = -ENOMEM;
+		goto smem_alloc_fail;
+	}
+
+	key = "qcom,qos-config";
+	phandle_node = of_parse_phandle(node, key, 0);
+	if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
+							&einfo->xprt_cfg)))
+		parse_qos_dt_params(node, einfo);
+
+	rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+	if (rc == -EPROBE_DEFER)
+		goto reg_xprt_fail;
+	if (rc) {
+		pr_err("%s: glink core register transport failed: %d\n",
+								__func__, rc);
+		goto reg_xprt_fail;
+	}
+
+	einfo->irq_line = irq_line;
+	rc = request_irq(irq_line, irq_handler,
+			IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+			node->name, einfo);
+	if (rc < 0) {
+		pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+									rc);
+		goto request_irq_fail;
+	}
+	rc = enable_irq_wake(irq_line);
+	if (rc < 0)
+		pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+								irq_line);
+
+	register_debugfs_info(einfo);
+	/* fake an interrupt on this edge to see if the remote side is up */
+	irq_handler(0, einfo);
+	return 0;
+
+request_irq_fail:
+	glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+smem_alloc_fail:
+	kthread_flush_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+	tasklet_kill(&einfo->tasklet);
+kthread_fail:
+	iounmap(einfo->out_irq_reg);
+ioremap_fail:
+	mutex_lock(&probe_lock);
+	edge_infos[einfo->remote_proc_id] = NULL;
+	mutex_unlock(&probe_lock);
+invalid_key:
+missing_key:
+	kfree(einfo);
+edge_info_alloc_fail:
+	return rc;
+}
+
+static int glink_rpm_native_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct edge_info *einfo;
+	int rc;
+	char *key;
+	const char *subsys_name;
+	uint32_t irq_line;
+	uint32_t irq_mask;
+	struct resource *irq_r;
+	struct resource *msgram_r;
+	void __iomem *msgram;
+	char toc[RPM_TOC_SIZE];
+	uint32_t *tocp;
+	uint32_t num_toc_entries;
+
+	node = pdev->dev.of_node;
+
+	einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+	if (!einfo) {
+		rc = -ENOMEM;
+		goto edge_info_alloc_fail;
+	}
+
+	subsys_name = "rpm";
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,irq-mask";
+	rc = of_property_read_u32(node, key, &irq_mask);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "irq-reg-base";
+	irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!irq_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "msgram";
+	msgram_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!msgram_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	if (subsys_name_to_id(subsys_name) == -ENODEV) {
+		pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	einfo->remote_proc_id = subsys_name_to_id(subsys_name);
+
+	init_xprt_cfg(einfo, subsys_name);
+	init_xprt_if(einfo);
+	spin_lock_init(&einfo->write_lock);
+	init_waitqueue_head(&einfo->tx_blocked_queue);
+	kthread_init_work(&einfo->kwork, rx_worker);
+	kthread_init_worker(&einfo->kworker);
+	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
+	einfo->intentless = true;
+	einfo->read_from_fifo = memcpy32_fromio;
+	einfo->write_to_fifo = memcpy32_toio;
+	init_srcu_struct(&einfo->use_ref);
+	spin_lock_init(&einfo->rx_lock);
+	INIT_LIST_HEAD(&einfo->deferred_cmds);
+
+	mutex_lock(&probe_lock);
+	if (edge_infos[einfo->remote_proc_id]) {
+		pr_err("%s: duplicate subsys %s is not valid\n", __func__,
+								subsys_name);
+		rc = -ENODEV;
+		mutex_unlock(&probe_lock);
+		goto invalid_key;
+	}
+	edge_infos[einfo->remote_proc_id] = einfo;
+	mutex_unlock(&probe_lock);
+
+	einfo->out_irq_mask = irq_mask;
+	einfo->out_irq_reg = ioremap_nocache(irq_r->start,
+							resource_size(irq_r));
+	if (!einfo->out_irq_reg) {
+		pr_err("%s: unable to map irq reg\n", __func__);
+		rc = -ENOMEM;
+		goto irq_ioremap_fail;
+	}
+
+	msgram = ioremap_nocache(msgram_r->start, resource_size(msgram_r));
+	if (!msgram) {
+		pr_err("%s: unable to map msgram\n", __func__);
+		rc = -ENOMEM;
+		goto msgram_ioremap_fail;
+	}
+
+	einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+						"smem_native_%s", subsys_name);
+	if (IS_ERR(einfo->task)) {
+		rc = PTR_ERR(einfo->task);
+		pr_err("%s: kthread_run failed %d\n", __func__, rc);
+		goto kthread_fail;
+	}
+
+	memcpy32_fromio(toc, msgram + resource_size(msgram_r) - RPM_TOC_SIZE,
+								RPM_TOC_SIZE);
+	tocp = (uint32_t *)toc;
+	if (*tocp != RPM_TOC_ID) {
+		rc = -ENODEV;
+		pr_err("%s: TOC id %d is not valid\n", __func__, *tocp);
+		goto toc_init_fail;
+	}
+	++tocp;
+	num_toc_entries = *tocp;
+	if (num_toc_entries > RPM_MAX_TOC_ENTRIES) {
+		rc = -ENODEV;
+		pr_err("%s: %d is too many toc entries\n", __func__,
+							num_toc_entries);
+		goto toc_init_fail;
+	}
+	++tocp;
+
+	for (rc = 0; rc < num_toc_entries; ++rc) {
+		if (*tocp != RPM_TX_FIFO_ID) {
+			tocp += 3;
+			continue;
+		}
+		++tocp;
+		einfo->tx_ch_desc = msgram + *tocp;
+		einfo->tx_fifo = einfo->tx_ch_desc + 1;
+		if ((uintptr_t)einfo->tx_fifo >
+				(uintptr_t)(msgram + resource_size(msgram_r))) {
+			pr_err("%s: invalid tx fifo address\n", __func__);
+			einfo->tx_fifo = NULL;
+			break;
+		}
+		++tocp;
+		einfo->tx_fifo_size = *tocp;
+		if (einfo->tx_fifo_size > resource_size(msgram_r) ||
+			(uintptr_t)(einfo->tx_fifo + einfo->tx_fifo_size) >
+				(uintptr_t)(msgram + resource_size(msgram_r))) {
+			pr_err("%s: invalid tx fifo size\n", __func__);
+			einfo->tx_fifo = NULL;
+			break;
+		}
+		break;
+	}
+	if (!einfo->tx_fifo) {
+		rc = -ENODEV;
+		pr_err("%s: tx fifo not found\n", __func__);
+		goto toc_init_fail;
+	}
+
+	tocp = (uint32_t *)toc;
+	tocp += 2;
+	for (rc = 0; rc < num_toc_entries; ++rc) {
+		if (*tocp != RPM_RX_FIFO_ID) {
+			tocp += 3;
+			continue;
+		}
+		++tocp;
+		einfo->rx_ch_desc = msgram + *tocp;
+		einfo->rx_fifo = einfo->rx_ch_desc + 1;
+		if ((uintptr_t)einfo->rx_fifo >
+				(uintptr_t)(msgram + resource_size(msgram_r))) {
+			pr_err("%s: invalid rx fifo address\n", __func__);
+			einfo->rx_fifo = NULL;
+			break;
+		}
+		++tocp;
+		einfo->rx_fifo_size = *tocp;
+		if (einfo->rx_fifo_size > resource_size(msgram_r) ||
+			(uintptr_t)(einfo->rx_fifo + einfo->rx_fifo_size) >
+				(uintptr_t)(msgram + resource_size(msgram_r))) {
+			pr_err("%s: invalid rx fifo size\n", __func__);
+			einfo->rx_fifo = NULL;
+			break;
+		}
+		break;
+	}
+	if (!einfo->rx_fifo) {
+		rc = -ENODEV;
+		pr_err("%s: rx fifo not found\n", __func__);
+		goto toc_init_fail;
+	}
+
+	einfo->tx_ch_desc->write_index = 0;
+	einfo->rx_ch_desc->read_index = 0;
+
+	rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+	if (rc == -EPROBE_DEFER)
+		goto reg_xprt_fail;
+	if (rc) {
+		pr_err("%s: glink core register transport failed: %d\n",
+								__func__, rc);
+		goto reg_xprt_fail;
+	}
+
+	einfo->irq_line = irq_line;
+	rc = request_irq(irq_line, irq_handler,
+			IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND | IRQF_SHARED,
+			node->name, einfo);
+	if (rc < 0) {
+		pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+									rc);
+		goto request_irq_fail;
+	}
+	rc = enable_irq_wake(irq_line);
+	if (rc < 0)
+		pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+								irq_line);
+
+	register_debugfs_info(einfo);
+	einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	return 0;
+
+request_irq_fail:
+	glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+toc_init_fail:
+	kthread_flush_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+	tasklet_kill(&einfo->tasklet);
+kthread_fail:
+	iounmap(msgram);
+msgram_ioremap_fail:
+	iounmap(einfo->out_irq_reg);
+irq_ioremap_fail:
+	mutex_lock(&probe_lock);
+	edge_infos[einfo->remote_proc_id] = NULL;
+	mutex_unlock(&probe_lock);
+invalid_key:
+missing_key:
+	kfree(einfo);
+edge_info_alloc_fail:
+	return rc;
+}
+
+static int glink_mailbox_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct edge_info *einfo;
+	int rc;
+	char *key;
+	const char *subsys_name;
+	uint32_t irq_line;
+	uint32_t irq_mask;
+	struct resource *irq_r;
+	struct resource *mbox_loc_r;
+	struct resource *mbox_size_r;
+	struct resource *rx_reset_r;
+	void *mbox_loc;
+	void *mbox_size;
+	struct mailbox_config_info *mbox_cfg;
+	uint32_t mbox_cfg_size;
+	phys_addr_t cfg_p_addr;
+
+	node = pdev->dev.of_node;
+
+	einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+	if (!einfo) {
+		rc = -ENOMEM;
+		goto edge_info_alloc_fail;
+	}
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "interrupts";
+	irq_line = irq_of_parse_and_map(node, 0);
+	if (!irq_line) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,irq-mask";
+	rc = of_property_read_u32(node, key, &irq_mask);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "irq-reg-base";
+	irq_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!irq_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "mbox-loc-addr";
+	mbox_loc_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!mbox_loc_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "mbox-loc-size";
+	mbox_size_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!mbox_size_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "irq-rx-reset";
+	rx_reset_r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!rx_reset_r) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,tx-ring-size";
+	rc = of_property_read_u32(node, key, &einfo->tx_fifo_size);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	key = "qcom,rx-ring-size";
+	rc = of_property_read_u32(node, key, &einfo->rx_fifo_size);
+	if (rc) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+
+	if (subsys_name_to_id(subsys_name) == -ENODEV) {
+		pr_err("%s: unknown subsystem: %s\n", __func__, subsys_name);
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	einfo->remote_proc_id = subsys_name_to_id(subsys_name);
+
+	init_xprt_cfg(einfo, subsys_name);
+	einfo->xprt_cfg.name = "mailbox";
+	init_xprt_if(einfo);
+	spin_lock_init(&einfo->write_lock);
+	init_waitqueue_head(&einfo->tx_blocked_queue);
+	kthread_init_work(&einfo->kwork, rx_worker);
+	kthread_init_worker(&einfo->kworker);
+	tasklet_init(&einfo->tasklet, rx_worker_atomic, (unsigned long)einfo);
+	einfo->read_from_fifo = read_from_fifo;
+	einfo->write_to_fifo = write_to_fifo;
+	init_srcu_struct(&einfo->use_ref);
+	spin_lock_init(&einfo->rx_lock);
+	INIT_LIST_HEAD(&einfo->deferred_cmds);
+
+	mutex_lock(&probe_lock);
+	if (edge_infos[einfo->remote_proc_id]) {
+		pr_err("%s: duplicate subsys %s is not valid\n", __func__,
+								subsys_name);
+		rc = -ENODEV;
+		mutex_unlock(&probe_lock);
+		goto invalid_key;
+	}
+	edge_infos[einfo->remote_proc_id] = einfo;
+	mutex_unlock(&probe_lock);
+
+	einfo->out_irq_mask = irq_mask;
+	einfo->out_irq_reg = ioremap_nocache(irq_r->start,
+							resource_size(irq_r));
+	if (!einfo->out_irq_reg) {
+		pr_err("%s: unable to map irq reg\n", __func__);
+		rc = -ENOMEM;
+		goto irq_ioremap_fail;
+	}
+
+	mbox_loc = ioremap_nocache(mbox_loc_r->start,
+						resource_size(mbox_loc_r));
+	if (!mbox_loc) {
+		pr_err("%s: unable to map mailbox location reg\n", __func__);
+		rc = -ENOMEM;
+		goto mbox_loc_ioremap_fail;
+	}
+
+	mbox_size = ioremap_nocache(mbox_size_r->start,
+						resource_size(mbox_size_r));
+	if (!mbox_size) {
+		pr_err("%s: unable to map mailbox size reg\n", __func__);
+		rc = -ENOMEM;
+		goto mbox_size_ioremap_fail;
+	}
+
+	einfo->rx_reset_reg = ioremap_nocache(rx_reset_r->start,
+						resource_size(rx_reset_r));
+	if (!einfo->rx_reset_reg) {
+		pr_err("%s: unable to map rx reset reg\n", __func__);
+		rc = -ENOMEM;
+		goto rx_reset_ioremap_fail;
+	}
+
+	einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+						"smem_native_%s", subsys_name);
+	if (IS_ERR(einfo->task)) {
+		rc = PTR_ERR(einfo->task);
+		pr_err("%s: kthread_run failed %d\n", __func__, rc);
+		goto kthread_fail;
+	}
+
+	mbox_cfg_size = sizeof(*mbox_cfg) + einfo->tx_fifo_size +
+							einfo->rx_fifo_size;
+	mbox_cfg = smem_alloc(SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR,
+							mbox_cfg_size,
+							einfo->remote_proc_id,
+							0);
+	if (PTR_ERR(mbox_cfg) == -EPROBE_DEFER) {
+		rc = -EPROBE_DEFER;
+		goto smem_alloc_fail;
+	}
+	if (!mbox_cfg) {
+		pr_err("%s: smem alloc of mailbox struct failed\n", __func__);
+		rc = -ENOMEM;
+		goto smem_alloc_fail;
+	}
+	einfo->mailbox = mbox_cfg;
+	einfo->tx_ch_desc = (struct channel_desc *)(&mbox_cfg->tx_read_index);
+	einfo->rx_ch_desc = (struct channel_desc *)(&mbox_cfg->rx_read_index);
+	mbox_cfg->tx_size = einfo->tx_fifo_size;
+	mbox_cfg->rx_size = einfo->rx_fifo_size;
+	einfo->tx_fifo = &mbox_cfg->fifo[0];
+
+	rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+	if (rc == -EPROBE_DEFER)
+		goto reg_xprt_fail;
+	if (rc) {
+		pr_err("%s: glink core register transport failed: %d\n",
+								__func__, rc);
+		goto reg_xprt_fail;
+	}
+
+	einfo->irq_line = irq_line;
+	rc = request_irq(irq_line, irq_handler,
+			IRQF_TRIGGER_HIGH | IRQF_NO_SUSPEND | IRQF_SHARED,
+			node->name, einfo);
+	if (rc < 0) {
+		pr_err("%s: request_irq on %d failed: %d\n", __func__, irq_line,
+									rc);
+		goto request_irq_fail;
+	}
+	rc = enable_irq_wake(irq_line);
+	if (rc < 0)
+		pr_err("%s: enable_irq_wake() failed on %d\n", __func__,
+								irq_line);
+
+	register_debugfs_info(einfo);
+
+	writel_relaxed(mbox_cfg_size, mbox_size);
+	cfg_p_addr = smem_virt_to_phys(mbox_cfg);
+	writel_relaxed(lower_32_bits(cfg_p_addr), mbox_loc);
+	writel_relaxed(upper_32_bits(cfg_p_addr), mbox_loc + 4);
+	send_irq(einfo);
+	iounmap(mbox_size);
+	iounmap(mbox_loc);
+	return 0;
+
+request_irq_fail:
+	glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+smem_alloc_fail:
+	kthread_flush_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+	tasklet_kill(&einfo->tasklet);
+kthread_fail:
+	iounmap(einfo->rx_reset_reg);
+rx_reset_ioremap_fail:
+	iounmap(mbox_size);
+mbox_size_ioremap_fail:
+	iounmap(mbox_loc);
+mbox_loc_ioremap_fail:
+	iounmap(einfo->out_irq_reg);
+irq_ioremap_fail:
+	mutex_lock(&probe_lock);
+	edge_infos[einfo->remote_proc_id] = NULL;
+	mutex_unlock(&probe_lock);
+invalid_key:
+missing_key:
+	kfree(einfo);
+edge_info_alloc_fail:
+	return rc;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+/**
+ * debug_edge() - generates formatted text output displaying current edge state
+ * @s:	File to send the output to.
+ */
+static void debug_edge(struct seq_file *s)
+{
+	struct edge_info *einfo;
+	struct glink_dbgfs_data *dfs_d;
+
+	dfs_d = s->private;
+	einfo = dfs_d->priv_data;
+
+/*
+ * formatted, human readable edge state output, ie:
+ * TX/RX fifo information:
+ID|EDGE      |TX READ   |TX WRITE  |TX SIZE   |RX READ   |RX WRITE  |RX SIZE
+-------------------------------------------------------------------------------
+01|mpss      |0x00000128|0x00000128|0x00000800|0x00000256|0x00000256|0x00001000
+ *
+ * Interrupt information:
+ * EDGE      |TX INT    |RX INT
+ * --------------------------------
+ * mpss      |0x00000006|0x00000008
+ */
+	seq_puts(s, "TX/RX fifo information:\n");
+	seq_printf(s, "%2s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s|%-10s\n",
+								"ID",
+								"EDGE",
+								"TX READ",
+								"TX WRITE",
+								"TX SIZE",
+								"RX READ",
+								"RX WRITE",
+								"RX SIZE");
+	seq_puts(s,
+		"-------------------------------------------------------------------------------\n");
+	if (!einfo)
+		return;
+
+	seq_printf(s, "%02i|%-10s|", einfo->remote_proc_id,
+					einfo->xprt_cfg.edge);
+	if (!einfo->rx_fifo)
+		seq_puts(s, "Link Not Up\n");
+	else
+		seq_printf(s, "0x%08X|0x%08X|0x%08X|0x%08X|0x%08X|0x%08X\n",
+						einfo->tx_ch_desc->read_index,
+						einfo->tx_ch_desc->write_index,
+						einfo->tx_fifo_size,
+						einfo->rx_ch_desc->read_index,
+						einfo->rx_ch_desc->write_index,
+						einfo->rx_fifo_size);
+
+	seq_puts(s, "\nInterrupt information:\n");
+	seq_printf(s, "%-10s|%-10s|%-10s\n", "EDGE", "TX INT", "RX INT");
+	seq_puts(s, "--------------------------------\n");
+	seq_printf(s, "%-10s|0x%08X|0x%08X\n", einfo->xprt_cfg.edge,
+						einfo->tx_irq_count,
+						einfo->rx_irq_count);
+}
+
+/**
+ * register_debugfs_info() - initialize debugfs device entries
+ * @einfo:	Pointer to specific edge_info for which register is called.
+ */
+static void register_debugfs_info(struct edge_info *einfo)
+{
+	struct glink_dbgfs dfs;
+	char *curr_dir_name;
+	int dir_name_len;
+
+	dir_name_len = strlen(einfo->xprt_cfg.edge) +
+				strlen(einfo->xprt_cfg.name) + 2;
+	curr_dir_name = kmalloc(dir_name_len, GFP_KERNEL);
+	if (!curr_dir_name) {
+		GLINK_ERR("%s: Memory allocation failed\n", __func__);
+		return;
+	}
+
+	snprintf(curr_dir_name, dir_name_len, "%s_%s",
+				einfo->xprt_cfg.edge, einfo->xprt_cfg.name);
+	dfs.curr_name = curr_dir_name;
+	dfs.par_name = "xprt";
+	dfs.b_dir_create = false;
+	glink_debugfs_create("XPRT_INFO", debug_edge,
+					&dfs, einfo, false);
+	kfree(curr_dir_name);
+}
+
+#else
+static void register_debugfs_info(struct edge_info *einfo)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static const struct of_device_id smem_match_table[] = {
+	{ .compatible = "qcom,glink-smem-native-xprt" },
+	{},
+};
+
+static struct platform_driver glink_smem_native_driver = {
+	.probe = glink_smem_native_probe,
+	.driver = {
+		.name = "msm_glink_smem_native_xprt",
+		.owner = THIS_MODULE,
+		.of_match_table = smem_match_table,
+	},
+};
+
+static const struct of_device_id rpm_match_table[] = {
+	{ .compatible = "qcom,glink-rpm-native-xprt" },
+	{},
+};
+
+static struct platform_driver glink_rpm_native_driver = {
+	.probe = glink_rpm_native_probe,
+	.driver = {
+		.name = "msm_glink_rpm_native_xprt",
+		.owner = THIS_MODULE,
+		.of_match_table = rpm_match_table,
+	},
+};
+
+static const struct of_device_id mailbox_match_table[] = {
+	{ .compatible = "qcom,glink-mailbox-xprt" },
+	{},
+};
+
+static struct platform_driver glink_mailbox_driver = {
+	.probe = glink_mailbox_probe,
+	.driver = {
+		.name = "msm_glink_mailbox_xprt",
+		.owner = THIS_MODULE,
+		.of_match_table = mailbox_match_table,
+	},
+};
+
+static int __init glink_smem_native_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&glink_smem_native_driver);
+	if (rc) {
+		pr_err("%s: glink_smem_native_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	rc = platform_driver_register(&glink_rpm_native_driver);
+	if (rc) {
+		pr_err("%s: glink_rpm_native_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	rc = platform_driver_register(&glink_mailbox_driver);
+	if (rc) {
+		pr_err("%s: glink_mailbox_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+arch_initcall(glink_smem_native_xprt_init);
+
+MODULE_DESCRIPTION("MSM G-Link SMEM Native Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
new file mode 100644
index 0000000..66caa6e
--- /dev/null
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -0,0 +1,2194 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+#include <linux/srcu.h>
+#include <linux/wait.h>
+#include <linux/component.h>
+#include <soc/qcom/tracer_pkt.h>
+#include <sound/wcd-dsp-mgr.h>
+#include <sound/wcd-spi.h>
+#include "glink_core_if.h"
+#include "glink_private.h"
+#include "glink_xprt_if.h"
+
+#define XPRT_NAME "spi"
+#define FIFO_ALIGNMENT 16
+#define FIFO_FULL_RESERVE 8
+#define TX_BLOCKED_CMD_RESERVE 16
+#define TRACER_PKT_FEATURE BIT(2)
+#define DEFAULT_FIFO_SIZE 1024
+#define SHORT_PKT_SIZE 16
+#define XPRT_ALIGNMENT 4
+
+#define MAX_INACTIVE_CYCLES 50
+#define POLL_INTERVAL_US 500
+
+#define ACTIVE_TX BIT(0)
+#define ACTIVE_RX BIT(1)
+
+#define ID_MASK 0xFFFFFF
+/**
+ * enum command_types - definition of the types of commands sent/received
+ * @VERSION_CMD:		Version and feature set supported
+ * @VERSION_ACK_CMD:		Response for @VERSION_CMD
+ * @OPEN_CMD:			Open a channel
+ * @CLOSE_CMD:			Close a channel
+ * @OPEN_ACK_CMD:		Response to @OPEN_CMD
+ * @CLOSE_ACK_CMD:		Response for @CLOSE_CMD
+ * @RX_INTENT_CMD:		RX intent for a channel is queued
+ * @RX_DONE_CMD:		Use of RX intent for a channel is complete
+ * @RX_DONE_W_REUSE_CMD:	Same as @RX_DONE but also reuse the used intent
+ * @RX_INTENT_REQ_CMD:		Request to have RX intent queued
+ * @RX_INTENT_REQ_ACK_CMD:	Response for @RX_INTENT_REQ_CMD
+ * @TX_DATA_CMD:		Start of a data transfer
+ * @TX_DATA_CONT_CMD:		Continuation or end of a data transfer
+ * @READ_NOTIF_CMD:		Request for a notification when this cmd is read
+ * @SIGNALS_CMD:		Sideband signals
+ * @TRACER_PKT_CMD:		Start of a Tracer Packet Command
+ * @TRACER_PKT_CONT_CMD:	Continuation or end of a Tracer Packet Command
+ * @TX_SHORT_DATA_CMD:		Transmit short packets
+ */
+enum command_types {
+	VERSION_CMD,
+	VERSION_ACK_CMD,
+	OPEN_CMD,
+	CLOSE_CMD,
+	OPEN_ACK_CMD,
+	CLOSE_ACK_CMD,
+	RX_INTENT_CMD,
+	RX_DONE_CMD,
+	RX_DONE_W_REUSE_CMD,
+	RX_INTENT_REQ_CMD,
+	RX_INTENT_REQ_ACK_CMD,
+	TX_DATA_CMD,
+	TX_DATA_CONT_CMD,
+	READ_NOTIF_CMD,
+	SIGNALS_CMD,
+	TRACER_PKT_CMD,
+	TRACER_PKT_CONT_CMD,
+	TX_SHORT_DATA_CMD,
+};
+
+/**
+ * struct glink_cmpnt - Component to cache WDSP component and its operations
+ * @master_dev:	Device structure corresponding to WDSP device.
+ * @master_ops:	Operations supported by the WDSP device.
+ */
+struct glink_cmpnt {
+	struct device *master_dev;
+	struct wdsp_mgr_ops *master_ops;
+};
+
+/**
+ * struct edge_info - local information for managing a single complete edge
+ * @xprt_if:			The transport interface registered with the
+ *				glink core associated with this edge.
+ * @xprt_cfg:			The transport configuration for the glink core
+ *				assocaited with this edge.
+ * @subsys_name:		Name of the remote subsystem in the edge.
+ * @spi_dev:			Pointer to the connectingSPI Device.
+ * @fifo_size:			Size of the FIFO at the remote end.
+ * @tx_fifo_start:		Base Address of the TX FIFO.
+ * @tx_fifo_end:		End Address of the TX FIFO.
+ * @rx_fifo_start:		Base Address of the RX FIFO.
+ * @rx_fifo_end:		End Address of the RX FIFO.
+ * @tx_fifo_read_reg_addr:	Address of the TX FIFO Read Index Register.
+ * @tx_fifo_write_reg_addr:	Address of the TX FIFO Write Index Register.
+ * @rx_fifo_read_reg_addr:	Address of the RX FIFO Read Index Register.
+ * @rx_fifo_write_reg_addr:	Address of the RX FIFO Write Index Register.
+ * @kwork:			Work to be executed when receiving data.
+ * @kworker:			Handle to the entity processing @kwork.
+ * @task:			Handle to the task context that runs @kworker.
+ * @use_ref:			Active users of this transport grab a
+ *				reference. Used for SSR synchronization.
+ * @in_ssr:			Signals if this transport is in ssr.
+ * @write_lock:			Lock to serialize write/tx operation.
+ * @tx_blocked_queue:		Queue of entities waiting for the remote side to
+ *				signal the resumption of TX.
+ * @tx_resume_needed:		A tx resume signal needs to be sent to the glink
+ *				core.
+ * @tx_blocked_signal_sent:	Flag to indicate the flush signal has already
+ *				been sent, and a response is pending from the
+ *				remote side.  Protected by @write_lock.
+ * @num_pw_states:		Size of @ramp_time_us.
+ * @ramp_time_us:		Array of ramp times in microseconds where array
+ *				index position represents a power state.
+ * @activity_flag:		Flag indicating active TX and RX.
+ * @activity_lock:		Lock to synchronize access to activity flag.
+ * @cmpnt:			Component to interface with the remote device.
+ */
+struct edge_info {
+	struct list_head list;
+	struct glink_transport_if xprt_if;
+	struct glink_core_transport_cfg xprt_cfg;
+	char subsys_name[GLINK_NAME_SIZE];
+	struct spi_device *spi_dev;
+
+	uint32_t fifo_size;
+	uint32_t tx_fifo_start;
+	uint32_t tx_fifo_end;
+	uint32_t rx_fifo_start;
+	uint32_t rx_fifo_end;
+	unsigned int tx_fifo_read_reg_addr;
+	unsigned int tx_fifo_write_reg_addr;
+	unsigned int rx_fifo_read_reg_addr;
+	unsigned int rx_fifo_write_reg_addr;
+
+	struct kthread_work kwork;
+	struct kthread_worker kworker;
+	struct task_struct *task;
+	struct srcu_struct use_ref;
+	bool in_ssr;
+	struct mutex write_lock;
+	wait_queue_head_t tx_blocked_queue;
+	bool tx_resume_needed;
+	bool tx_blocked_signal_sent;
+
+	uint32_t num_pw_states;
+	unsigned long *ramp_time_us;
+
+	uint32_t activity_flag;
+	spinlock_t activity_lock;
+
+	struct glink_cmpnt cmpnt;
+};
+
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features);
+static DEFINE_SPINLOCK(edge_infos_lock);
+static LIST_HEAD(edge_infos);
+static struct glink_core_version versions[] = {
+	{1, TRACER_PKT_FEATURE, negotiate_features_v1},
+};
+
+/**
+ * negotiate_features_v1() - determine what features of a version can be used
+ * @if_ptr:	The transport for which features are negotiated for.
+ * @version:	The version negotiated.
+ * @features:	The set of requested features.
+ *
+ * Return: What set of the requested features can be supported.
+ */
+static uint32_t negotiate_features_v1(struct glink_transport_if *if_ptr,
+				      const struct glink_core_version *version,
+				      uint32_t features)
+{
+	return features & version->features;
+}
+
+/**
+ * wdsp_suspend() - Vote for the WDSP device suspend
+ * @cmpnt:	Component to identify the WDSP device.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int wdsp_suspend(struct glink_cmpnt *cmpnt)
+{
+	int rc = 0;
+
+	if (cmpnt && cmpnt->master_dev &&
+	    cmpnt->master_ops && cmpnt->master_ops->suspend)
+		rc = cmpnt->master_ops->suspend(cmpnt->master_dev);
+	return rc;
+}
+
+/**
+ * wdsp_resume() - Vote for the WDSP device resume
+ * @cmpnt:	Component to identify the WDSP device.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int wdsp_resume(struct glink_cmpnt *cmpnt)
+{
+	int rc = 0;
+
+	if (cmpnt && cmpnt->master_dev &&
+	    cmpnt->master_ops && cmpnt->master_ops->resume)
+		rc = cmpnt->master_ops->resume(cmpnt->master_dev);
+	return rc;
+}
+
+/**
+ * glink_spi_xprt_set_poll_mode() - Set the transport to polling mode
+ * @einfo:	Edge information corresponding to the transport.
+ *
+ * This helper function indicates the start of RX polling. This will
+ * prevent the system from suspending and keeps polling for RX for a
+ * pre-defined duration.
+ */
+static void glink_spi_xprt_set_poll_mode(struct edge_info *einfo)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	einfo->activity_flag |= ACTIVE_RX;
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+	if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+		wdsp_resume(&einfo->cmpnt);
+}
+
+/**
+ * glink_spi_xprt_set_irq_mode() - Set the transport to IRQ mode
+ * @einfo:	Edge information corresponding to the transport.
+ *
+ * This helper indicates the end of RX polling. This will allow the
+ * system to suspend and new RX data can be handled only through an IRQ.
+ */
+static void glink_spi_xprt_set_irq_mode(struct edge_info *einfo)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	einfo->activity_flag &= ~ACTIVE_RX;
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+}
+
+/**
+ * glink_spi_xprt_rx_data() - Receive data over SPI bus
+ * @einfo:	Edge from which the data has to be received.
+ * @src:	Source Address of the RX data.
+ * @dst:	Address of the destination RX buffer.
+ * @size:	Size of the RX data.
+ *
+ * This function is used to receive data or command as a byte stream from
+ * the remote subsystem over the SPI bus.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_rx_data(struct edge_info *einfo, void *src,
+				  void *dst, uint32_t size)
+{
+	struct wcd_spi_msg spi_msg;
+
+	memset(&spi_msg, 0, sizeof(spi_msg));
+	spi_msg.data = dst;
+	spi_msg.remote_addr = (uint32_t)(size_t)src;
+	spi_msg.len = (size_t)size;
+	return wcd_spi_data_read(einfo->spi_dev, &spi_msg);
+}
+
+/**
+ * glink_spi_xprt_tx_data() - Transmit data over SPI bus
+ * @einfo:	Edge from which the data has to be received.
+ * @src:	Address of the TX buffer.
+ * @dst:	Destination Address of the TX Date.
+ * @size:	Size of the TX data.
+ *
+ * This function is used to transmit data or command as a byte stream to
+ * the remote subsystem over the SPI bus.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_tx_data(struct edge_info *einfo, void *src,
+				  void *dst, uint32_t size)
+{
+	struct wcd_spi_msg spi_msg;
+
+	memset(&spi_msg, 0, sizeof(spi_msg));
+	spi_msg.data = src;
+	spi_msg.remote_addr = (uint32_t)(size_t)dst;
+	spi_msg.len = (size_t)size;
+	return wcd_spi_data_write(einfo->spi_dev, &spi_msg);
+}
+
+/**
+ * glink_spi_xprt_reg_read() - Read the TX/RX FIFO Read/Write Index registers
+ * @einfo:	Edge from which the registers have to be read.
+ * @reg_addr:	Address of the register to be read.
+ * @data:	Buffer into which the register data has to be read.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_reg_read(struct edge_info *einfo, u32 reg_addr,
+				   uint32_t *data)
+{
+	int rc;
+
+	rc = glink_spi_xprt_rx_data(einfo, (void *)(unsigned long)reg_addr,
+				    data, sizeof(*data));
+	if (!rc)
+		*data = *data & ID_MASK;
+	return rc;
+}
+
+/**
+ * glink_spi_xprt_reg_write() - Write the TX/RX FIFO Read/Write Index registers
+ * @einfo:	Edge to which the registers have to be written.
+ * @reg_addr:	Address of the registers to be written.
+ * @data:	Data to be written to the registers.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int glink_spi_xprt_reg_write(struct edge_info *einfo, u32 reg_addr,
+					uint32_t data)
+{
+	return glink_spi_xprt_tx_data(einfo, &data,
+				(void *)(unsigned long)reg_addr, sizeof(data));
+}
+
+/**
+ * glink_spi_xprt_write_avail() - Available Write Space in the remote side
+ * @einfo:	Edge information corresponding to the remote side.
+ *
+ * This function reads the TX FIFO Read & Write Index registers from the
+ * remote subsystem and calculate the available write space.
+ *
+ * Return: 0 on error, available write space on success.
+ */
+static int glink_spi_xprt_write_avail(struct edge_info *einfo)
+{
+	uint32_t read_id;
+	uint32_t write_id;
+	int write_avail;
+	int ret;
+
+	ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_read_reg_addr,
+				   &read_id);
+	if (ret < 0) {
+		pr_err("%s: Error %d reading %s tx_fifo_read_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->tx_fifo_read_reg_addr);
+		return 0;
+	}
+
+	ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_write_reg_addr,
+				&write_id);
+	if (ret < 0) {
+		pr_err("%s: Error %d reading %s tx_fifo_write_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->tx_fifo_write_reg_addr);
+		return 0;
+	}
+
+	if (!read_id || !write_id)
+		return 0;
+
+	if (unlikely(!einfo->tx_fifo_start))
+		einfo->tx_fifo_start = write_id;
+
+	if (read_id > write_id)
+		write_avail = read_id - write_id;
+	else
+		write_avail = einfo->fifo_size - (write_id - read_id);
+
+	if (write_avail < FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE)
+		write_avail = 0;
+	else
+		write_avail -= FIFO_FULL_RESERVE + TX_BLOCKED_CMD_RESERVE;
+
+	return write_avail;
+}
+
+/**
+ * glink_spi_xprt_read_avail() - Available Read Data from the remote side
+ * @einfo:	Edge information corresponding to the remote side.
+ *
+ * This function reads the RX FIFO Read & Write Index registers from the
+ * remote subsystem and calculate the available read data size.
+ *
+ * Return: 0 on error, available read data on success.
+ */
+static int glink_spi_xprt_read_avail(struct edge_info *einfo)
+{
+	uint32_t read_id;
+	uint32_t write_id;
+	int read_avail;
+	int ret;
+
+	ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_read_reg_addr,
+				   &read_id);
+	if (ret < 0) {
+		pr_err("%s: Error %d reading %s rx_fifo_read_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->rx_fifo_read_reg_addr);
+		return 0;
+	}
+
+	ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_write_reg_addr,
+				&write_id);
+	if (ret < 0) {
+		pr_err("%s: Error %d reading %s rx_fifo_write_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->rx_fifo_write_reg_addr);
+		return 0;
+	}
+
+	if (!read_id || !write_id)
+		return 0;
+
+	if (unlikely(!einfo->rx_fifo_start))
+		einfo->rx_fifo_start = read_id;
+
+	if (read_id <= write_id)
+		read_avail = write_id - read_id;
+	else
+		read_avail = einfo->fifo_size - (read_id - write_id);
+	return read_avail;
+}
+
+/**
+ * glink_spi_xprt_rx_cmd() - Receive G-Link commands
+ * @einfo:	Edge information corresponding to the remote side.
+ * @dst:	Destination buffer where the commands have to be read into.
+ * @size:	Size of the data to be read.
+ *
+ * This function is used to receive the commands from the RX FIFO. This
+ * function updates the RX FIFO Read Index after reading the data.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_rx_cmd(struct edge_info *einfo, void *dst,
+				 uint32_t size)
+{
+	uint32_t read_id;
+	uint32_t size_to_read = size;
+	uint32_t offset = 0;
+	int ret;
+
+	ret = glink_spi_xprt_reg_read(einfo, einfo->rx_fifo_read_reg_addr,
+				   &read_id);
+	if (ret < 0) {
+		pr_err("%s: Error %d reading %s rx_fifo_read_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->rx_fifo_read_reg_addr);
+		return ret;
+	}
+
+	do {
+		if ((read_id + size_to_read) >=
+		    (einfo->rx_fifo_start + einfo->fifo_size))
+			size_to_read = einfo->rx_fifo_start + einfo->fifo_size
+					- read_id;
+		ret = glink_spi_xprt_rx_data(einfo, (void *)(size_t)read_id,
+					     dst + offset, size_to_read);
+		if (ret < 0) {
+			pr_err("%s: Error %d reading data\n", __func__, ret);
+			return ret;
+		}
+		read_id += size_to_read;
+		offset += size_to_read;
+		if (read_id >= (einfo->rx_fifo_start + einfo->fifo_size))
+			read_id = einfo->rx_fifo_start;
+		size_to_read = size - offset;
+	} while (size_to_read);
+
+	ret = glink_spi_xprt_reg_write(einfo, einfo->rx_fifo_read_reg_addr,
+				read_id);
+	if (ret < 0)
+		pr_err("%s: Error %d writing %s rx_fifo_read_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->rx_fifo_read_reg_addr);
+	return ret;
+}
+
+/**
+ * glink_spi_xprt_tx_cmd_safe() - Transmit G-Link commands
+ * @einfo:	Edge information corresponding to the remote subsystem.
+ * @src:	Source buffer containing the G-Link command.
+ * @size:	Size of the command to transmit.
+ *
+ * This function is used to transmit the G-Link commands. This function
+ * must be called with einfo->write_lock locked.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_tx_cmd_safe(struct edge_info *einfo, void *src,
+				      uint32_t size)
+{
+	uint32_t write_id;
+	uint32_t size_to_write = size;
+	uint32_t offset = 0;
+	int ret;
+
+	ret = glink_spi_xprt_reg_read(einfo, einfo->tx_fifo_write_reg_addr,
+				&write_id);
+	if (ret < 0) {
+		pr_err("%s: Error %d reading %s tx_fifo_write_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->tx_fifo_write_reg_addr);
+		return ret;
+	}
+
+	do {
+		if ((write_id + size_to_write) >=
+		    (einfo->tx_fifo_start + einfo->fifo_size))
+			size_to_write = einfo->tx_fifo_start + einfo->fifo_size
+					- write_id;
+		ret = glink_spi_xprt_tx_data(einfo, src + offset,
+				(void *)(size_t)write_id, size_to_write);
+		if (ret < 0) {
+			pr_err("%s: Error %d writing data\n", __func__, ret);
+			return ret;
+		}
+		write_id += size_to_write;
+		offset += size_to_write;
+		if (write_id >= (einfo->tx_fifo_start + einfo->fifo_size))
+			write_id = einfo->tx_fifo_start;
+		size_to_write = size - offset;
+	} while (size_to_write);
+
+	ret = glink_spi_xprt_reg_write(einfo, einfo->tx_fifo_write_reg_addr,
+				write_id);
+	if (ret < 0)
+		pr_err("%s: Error %d writing %s tx_fifo_write_reg_addr %d\n",
+			__func__, ret, einfo->xprt_cfg.edge,
+			einfo->tx_fifo_write_reg_addr);
+	return ret;
+}
+
+/**
+ * send_tx_blocked_signal() - Send flow control request message
+ * @einfo:	Edge information corresponding to the remote subsystem.
+ *
+ * This function is used to send a message to the remote subsystem indicating
+ * that the local subsystem is waiting for the write space. The remote
+ * subsystem on receiving this message will send a resume tx message.
+ */
+static void send_tx_blocked_signal(struct edge_info *einfo)
+{
+	struct read_notif_request {
+		uint16_t cmd;
+		uint16_t reserved;
+		uint32_t reserved2;
+		uint64_t reserved3;
+	};
+	struct read_notif_request read_notif_req = {0};
+
+	read_notif_req.cmd = READ_NOTIF_CMD;
+
+	if (!einfo->tx_blocked_signal_sent) {
+		einfo->tx_blocked_signal_sent = true;
+		glink_spi_xprt_tx_cmd_safe(einfo, &read_notif_req,
+					    sizeof(read_notif_req));
+	}
+}
+
+/**
+ * glink_spi_xprt_tx_cmd() - Transmit G-Link commands
+ * @einfo:	Edge information corresponding to the remote subsystem.
+ * @src:	Source buffer containing the G-Link command.
+ * @size:	Size of the command to transmit.
+ *
+ * This function is used to transmit the G-Link commands. This function
+ * might sleep if the space is not available to transmit the command.
+ *
+ * Return: 0 on success, standard Linux error codes on error.
+ */
+static int glink_spi_xprt_tx_cmd(struct edge_info *einfo, void *src,
+				 uint32_t size)
+{
+	int ret;
+	DEFINE_WAIT(wait);
+
+	mutex_lock(&einfo->write_lock);
+	while (glink_spi_xprt_write_avail(einfo) < size) {
+		send_tx_blocked_signal(einfo);
+		prepare_to_wait(&einfo->tx_blocked_queue, &wait,
+				TASK_UNINTERRUPTIBLE);
+		if (glink_spi_xprt_write_avail(einfo) < size &&
+		    !einfo->in_ssr) {
+			mutex_unlock(&einfo->write_lock);
+			schedule();
+			mutex_lock(&einfo->write_lock);
+		}
+		finish_wait(&einfo->tx_blocked_queue, &wait);
+		if (einfo->in_ssr) {
+			mutex_unlock(&einfo->write_lock);
+			return -EFAULT;
+		}
+	}
+	ret = glink_spi_xprt_tx_cmd_safe(einfo, src, size);
+	mutex_unlock(&einfo->write_lock);
+	return ret;
+}
+
+/**
+ * process_rx_data() - process received data from an edge
+ * @einfo:		The edge the data is received on.
+ * @cmd_id:		ID to specify the type of data.
+ * @rcid:		The remote channel id associated with the data.
+ * @intend_id:		The intent the data should be put in.
+ * @src:		Address of the source buffer from which the data
+ *			is read.
+ * @frag_size:		Size of the data fragment to read.
+ * @size_remaining:	Size of data left to be read in this packet.
+ */
+static void process_rx_data(struct edge_info *einfo, uint16_t cmd_id,
+			    uint32_t rcid, uint32_t intent_id, void *src,
+			    uint32_t frag_size, uint32_t size_remaining)
+{
+	struct glink_core_rx_intent *intent;
+	int rc = 0;
+
+	intent = einfo->xprt_if.glink_core_if_ptr->rx_get_pkt_ctx(
+				&einfo->xprt_if, rcid, intent_id);
+	if (intent == NULL) {
+		GLINK_ERR("%s: no intent for ch %d liid %d\n", __func__, rcid,
+			  intent_id);
+		return;
+	} else if (intent->data == NULL) {
+		GLINK_ERR("%s: intent for ch %d liid %d has no data buff\n",
+			  __func__, rcid, intent_id);
+		return;
+	} else if (intent->intent_size - intent->write_offset < frag_size ||
+		 intent->write_offset + size_remaining > intent->intent_size) {
+		GLINK_ERR("%s: rx data size:%d and remaining:%d %s %d %s:%d\n",
+			  __func__, frag_size, size_remaining,
+			  "will overflow ch", rcid, "intent", intent_id);
+		return;
+	}
+
+	if (cmd_id == TX_SHORT_DATA_CMD)
+		memcpy(intent->data + intent->write_offset, src, frag_size);
+	else
+		rc = glink_spi_xprt_rx_data(einfo, src,
+				intent->data + intent->write_offset, frag_size);
+	if (rc < 0) {
+		GLINK_ERR("%s: Error %d receiving data %d:%d:%d:%d\n",
+			  __func__, rc, rcid, intent_id, frag_size,
+			  size_remaining);
+		size_remaining += frag_size;
+	} else {
+		intent->write_offset += frag_size;
+		intent->pkt_size += frag_size;
+
+		if (unlikely((cmd_id == TRACER_PKT_CMD ||
+			cmd_id == TRACER_PKT_CONT_CMD) && !size_remaining)) {
+			tracer_pkt_log_event(intent->data, GLINK_XPRT_RX);
+			intent->tracer_pkt = true;
+		}
+	}
+	einfo->xprt_if.glink_core_if_ptr->rx_put_pkt_ctx(&einfo->xprt_if,
+				rcid, intent, size_remaining ? false : true);
+}
+
+/**
+ * process_rx_cmd() - Process incoming G-Link commands
+ * @einfo:	Edge information corresponding to the remote subsystem.
+ * @rx_data:	Buffer which contains the G-Link commands to be processed.
+ * @rx_size:	Size of the buffer containing the series of G-Link commands.
+ *
+ * This function is used to parse and process a series of G-Link commands
+ * received in a buffer.
+ */
+static void process_rx_cmd(struct edge_info *einfo,
+			   void *rx_data, int rx_size)
+{
+	struct command {
+		uint16_t id;
+		uint16_t param1;
+		uint32_t param2;
+		uint32_t param3;
+		uint32_t param4;
+	};
+	struct intent_desc {
+		uint32_t size;
+		uint32_t id;
+		uint64_t addr;
+	};
+	struct rx_desc {
+		uint32_t size;
+		uint32_t size_left;
+		uint64_t addr;
+	};
+	struct rx_short_data_desc {
+		unsigned char data[SHORT_PKT_SIZE];
+	};
+	struct command *cmd;
+	struct intent_desc *intents;
+	struct rx_desc *rx_descp;
+	struct rx_short_data_desc *rx_sd_descp;
+	int offset = 0;
+	int rcu_id;
+	uint16_t rcid;
+	uint16_t name_len;
+	uint16_t prio;
+	char *name;
+	bool granted;
+	int i;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	while (offset < rx_size) {
+		cmd = (struct command *)(rx_data + offset);
+		offset += sizeof(*cmd);
+		switch (cmd->id) {
+		case VERSION_CMD:
+			if (cmd->param3)
+				einfo->fifo_size = cmd->param3;
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_version(
+				&einfo->xprt_if, cmd->param1, cmd->param2);
+			break;
+
+		case VERSION_ACK_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_version_ack(
+				&einfo->xprt_if, cmd->param1, cmd->param2);
+			break;
+
+		case OPEN_CMD:
+			rcid = cmd->param1;
+			name_len = (uint16_t)(cmd->param2 & 0xFFFF);
+			prio = (uint16_t)((cmd->param2 & 0xFFFF0000) >> 16);
+			name = (char *)(rx_data + offset);
+			offset += ALIGN(name_len, FIFO_ALIGNMENT);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_remote_open(
+				&einfo->xprt_if, rcid, name, prio);
+			break;
+
+		case CLOSE_CMD:
+			einfo->xprt_if.glink_core_if_ptr->
+					rx_cmd_ch_remote_close(
+						&einfo->xprt_if, cmd->param1);
+			break;
+
+		case OPEN_ACK_CMD:
+			prio = (uint16_t)(cmd->param2 & 0xFFFF);
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_open_ack(
+				&einfo->xprt_if, cmd->param1, prio);
+			break;
+
+		case CLOSE_ACK_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_ch_close_ack(
+					&einfo->xprt_if, cmd->param1);
+			break;
+
+		case RX_INTENT_CMD:
+			for (i = 0; i < cmd->param2; i++) {
+				intents = (struct intent_desc *)
+						(rx_data + offset);
+				offset += sizeof(*intents);
+				einfo->xprt_if.glink_core_if_ptr->
+					rx_cmd_remote_rx_intent_put_cookie(
+						&einfo->xprt_if, cmd->param1,
+						intents->id, intents->size,
+						(void *)(intents->addr));
+			}
+			break;
+
+		case RX_DONE_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+				&einfo->xprt_if, cmd->param1, cmd->param2,
+				false);
+			break;
+
+		case RX_INTENT_REQ_CMD:
+			einfo->xprt_if.glink_core_if_ptr->
+				rx_cmd_remote_rx_intent_req(
+					&einfo->xprt_if, cmd->param1,
+					cmd->param2);
+			break;
+
+		case RX_INTENT_REQ_ACK_CMD:
+			granted = cmd->param2 == 1 ? true : false;
+			einfo->xprt_if.glink_core_if_ptr->
+				rx_cmd_rx_intent_req_ack(&einfo->xprt_if,
+						cmd->param1, granted);
+			break;
+
+		case TX_DATA_CMD:
+		case TX_DATA_CONT_CMD:
+		case TRACER_PKT_CMD:
+		case TRACER_PKT_CONT_CMD:
+			rx_descp = (struct rx_desc *)(rx_data + offset);
+			offset += sizeof(*rx_descp);
+			process_rx_data(einfo, cmd->id,	cmd->param1,
+					cmd->param2, (void *)rx_descp->addr,
+					rx_descp->size,	rx_descp->size_left);
+			break;
+
+		case TX_SHORT_DATA_CMD:
+			rx_sd_descp = (struct rx_short_data_desc *)
+							(rx_data + offset);
+			offset += sizeof(*rx_sd_descp);
+			process_rx_data(einfo, cmd->id, cmd->param1,
+					cmd->param2, (void *)rx_sd_descp->data,
+					cmd->param3, cmd->param4);
+			break;
+
+		case READ_NOTIF_CMD:
+			break;
+
+		case SIGNALS_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_remote_sigs(
+				&einfo->xprt_if, cmd->param1, cmd->param2);
+			break;
+
+		case RX_DONE_W_REUSE_CMD:
+			einfo->xprt_if.glink_core_if_ptr->rx_cmd_tx_done(
+				&einfo->xprt_if, cmd->param1,
+				cmd->param2, true);
+			break;
+
+		default:
+			pr_err("Unrecognized command: %d\n", cmd->id);
+			break;
+		}
+	}
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * __rx_worker() - Receive commands on a specific edge
+ * @einfo:      Edge to process commands on.
+ *
+ * This function checks the size of data to be received, allocates the
+ * buffer for that data and reads the data from the remote subsytem
+ * into that buffer. This function then calls the process_rx_cmd() to
+ * parse the received G-Link command sequence. This function will also
+ * poll for the data for a predefined duration for performance reasons.
+ */
+static void __rx_worker(struct edge_info *einfo)
+{
+	uint32_t inactive_cycles = 0;
+	int rx_avail, rc;
+	void *rx_data;
+	int rcu_id;
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (unlikely(!einfo->rx_fifo_start)) {
+		rx_avail = glink_spi_xprt_read_avail(einfo);
+		if (!rx_avail) {
+			srcu_read_unlock(&einfo->use_ref, rcu_id);
+			return;
+		}
+		einfo->in_ssr = false;
+		einfo->xprt_if.glink_core_if_ptr->link_up(&einfo->xprt_if);
+	}
+
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	glink_spi_xprt_set_poll_mode(einfo);
+	while (inactive_cycles < MAX_INACTIVE_CYCLES) {
+		if (einfo->tx_resume_needed &&
+		    glink_spi_xprt_write_avail(einfo)) {
+			einfo->tx_resume_needed = false;
+			einfo->xprt_if.glink_core_if_ptr->tx_resume(
+							&einfo->xprt_if);
+		}
+		mutex_lock(&einfo->write_lock);
+		if (einfo->tx_blocked_signal_sent) {
+			wake_up_all(&einfo->tx_blocked_queue);
+			einfo->tx_blocked_signal_sent = false;
+		}
+		mutex_unlock(&einfo->write_lock);
+
+		rx_avail = glink_spi_xprt_read_avail(einfo);
+		if (!rx_avail) {
+			usleep_range(POLL_INTERVAL_US, POLL_INTERVAL_US + 50);
+			inactive_cycles++;
+			continue;
+		}
+		inactive_cycles = 0;
+
+		rx_data = kzalloc(rx_avail, GFP_KERNEL);
+		if (!rx_data)
+			break;
+
+		rc = glink_spi_xprt_rx_cmd(einfo, rx_data, rx_avail);
+		if (rc < 0) {
+			GLINK_ERR("%s: Error %d receiving data\n",
+				  __func__, rc);
+			kfree(rx_data);
+			break;
+		}
+		process_rx_cmd(einfo, rx_data, rx_avail);
+		kfree(rx_data);
+	}
+	glink_spi_xprt_set_irq_mode(einfo);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * rx_worker() - Worker function to process received commands
+ * @work:       kwork associated with the edge to process commands on.
+ */
+static void rx_worker(struct kthread_work *work)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(work, struct edge_info, kwork);
+	__rx_worker(einfo);
+};
+
+/**
+ * tx_cmd_version() - Convert a version cmd to wire format and transmit
+ * @if_ptr:     The transport to transmit on.
+ * @version:    The version number to encode.
+ * @features:   The features information to encode.
+ */
+static void tx_cmd_version(struct glink_transport_if *if_ptr, uint32_t version,
+			   uint32_t features)
+{
+	struct command {
+		uint16_t id;
+		uint16_t version;
+		uint32_t features;
+		uint32_t fifo_size;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = VERSION_CMD;
+	cmd.version = version;
+	cmd.features = features;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_version_ack() - Convert a version ack cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @version:	The version number to encode.
+ * @features:	The features information to encode.
+ */
+static void tx_cmd_version_ack(struct glink_transport_if *if_ptr,
+			       uint32_t version,
+			       uint32_t features)
+{
+	struct command {
+		uint16_t id;
+		uint16_t version;
+		uint32_t features;
+		uint32_t fifo_size;
+		uint32_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = VERSION_ACK_CMD;
+	cmd.version = version;
+	cmd.features = features;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * set_version() - Activate a negotiated version and feature set
+ * @if_ptr:	The transport to configure.
+ * @version:	The version to use.
+ * @features:	The features to use.
+ *
+ * Return: The supported capabilities of the transport.
+ */
+static uint32_t set_version(struct glink_transport_if *if_ptr, uint32_t version,
+			uint32_t features)
+{
+	struct edge_info *einfo;
+	uint32_t ret;
+	int rcu_id;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return 0;
+	}
+
+	ret = GCAP_SIGNALS;
+	if (features & TRACER_PKT_FEATURE)
+		ret |= GCAP_TRACER_PKT;
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return ret;
+}
+
+/**
+ * tx_cmd_ch_open() - Convert a channel open cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @name:	The channel name to encode.
+ * @req_xprt:	The transport the core would like to migrate this channel to.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_open(struct glink_transport_if *if_ptr, uint32_t lcid,
+			  const char *name, uint16_t req_xprt)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint16_t length;
+		uint16_t req_xprt;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t buf_size;
+	void *buf;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = OPEN_CMD;
+	cmd.lcid = lcid;
+	cmd.length = (uint16_t)(strlen(name) + 1);
+	cmd.req_xprt = req_xprt;
+
+	buf_size = ALIGN(sizeof(cmd) + cmd.length, FIFO_ALIGNMENT);
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -ENOMEM;
+	}
+
+	memcpy(buf, &cmd, sizeof(cmd));
+	memcpy(buf + sizeof(cmd), name, cmd.length);
+
+	glink_spi_xprt_tx_cmd(einfo, buf, buf_size);
+
+	kfree(buf);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_ch_close() - Convert a channel close cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_ch_close(struct glink_transport_if *if_ptr, uint32_t lcid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t reserved1;
+		uint64_t reserved2;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = CLOSE_CMD;
+	cmd.lcid = lcid;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_ch_remote_open_ack() - Convert a channel open ack cmd to wire format
+ *				 and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ * @xprt_resp:	The response to a transport migration request.
+ */
+static void tx_cmd_ch_remote_open_ack(struct glink_transport_if *if_ptr,
+				     uint32_t rcid, uint16_t xprt_resp)
+{
+	struct command {
+		uint16_t id;
+		uint16_t rcid;
+		uint16_t reserved1;
+		uint16_t xprt_resp;
+		uint64_t reserved2;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = OPEN_ACK_CMD;
+	cmd.rcid = rcid;
+	cmd.xprt_resp = xprt_resp;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_ch_remote_close_ack() - Convert a channel close ack cmd to wire format
+ *				  and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @rcid:	The remote channel id to encode.
+ */
+static void tx_cmd_ch_remote_close_ack(struct glink_transport_if *if_ptr,
+				       uint32_t rcid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t rcid;
+		uint32_t reserved1;
+		uint64_t reserved2;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = CLOSE_ACK_CMD;
+	cmd.rcid = rcid;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * ssr() - Process a subsystem restart notification of a transport
+ * @if_ptr:	The transport to restart
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int ssr(struct glink_transport_if *if_ptr)
+{
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	einfo->in_ssr = true;
+	wake_up_all(&einfo->tx_blocked_queue);
+
+	synchronize_srcu(&einfo->use_ref);
+	einfo->tx_resume_needed = false;
+	einfo->tx_blocked_signal_sent = false;
+	einfo->tx_fifo_start = 0;
+	einfo->rx_fifo_start = 0;
+	einfo->fifo_size = DEFAULT_FIFO_SIZE;
+	einfo->xprt_if.glink_core_if_ptr->link_down(&einfo->xprt_if);
+
+	return 0;
+}
+
+/**
+ * allocate_rx_intent() - Allocate/reserve space for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @size:	size of intent.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Assign "data" with the buffer created, since the transport creates
+ * a linear buffer and "iovec" with the "intent" itself, so that
+ * the data can be passed to a client that receives only vector buffer.
+ * Note that returning NULL for the pointer is valid (it means that space has
+ * been reserved, but the actual pointer will be provided later).
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int allocate_rx_intent(struct glink_transport_if *if_ptr, size_t size,
+			      struct glink_core_rx_intent *intent)
+{
+	void *t;
+
+	t = kzalloc(size, GFP_KERNEL);
+	if (!t)
+		return -ENOMEM;
+
+	intent->data = t;
+	intent->iovec = (void *)intent;
+	intent->vprovider = rx_linear_vbuf_provider;
+	intent->pprovider = NULL;
+	return 0;
+}
+
+/**
+ * deallocate_rx_intent() - Deallocate space created for RX Intent
+ * @if_ptr:	The transport the intent is associated with.
+ * @intent:	Pointer to the intent structure.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int deallocate_rx_intent(struct glink_transport_if *if_ptr,
+				struct glink_core_rx_intent *intent)
+{
+	if (!intent || !intent->data)
+		return -EINVAL;
+
+	kfree(intent->data);
+	intent->data = NULL;
+	intent->iovec = NULL;
+	intent->vprovider = NULL;
+	return 0;
+}
+
+/**
+ * tx_cmd_local_rx_intent() - Convert an rx intent cmd to wire format and
+ *			      transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The intent size to encode.
+ * @liid:	The local intent id to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_local_rx_intent(struct glink_transport_if *if_ptr,
+				  uint32_t lcid, size_t size, uint32_t liid)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t count;
+		uint64_t reserved;
+		uint32_t size;
+		uint32_t liid;
+		uint64_t addr;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	if (size > UINT_MAX) {
+		pr_err("%s: size %zu is too large to encode\n", __func__, size);
+		return -EMSGSIZE;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_CMD;
+	cmd.lcid = lcid;
+	cmd.count = 1;
+	cmd.size = size;
+	cmd.liid = liid;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_local_rx_done() - Convert an rx done cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @liid:	The local intent id to encode.
+ * @reuse:	Reuse the consumed intent.
+ */
+static void tx_cmd_local_rx_done(struct glink_transport_if *if_ptr,
+				 uint32_t lcid, uint32_t liid, bool reuse)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t liid;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return;
+	}
+
+	cmd.id = reuse ? RX_DONE_W_REUSE_CMD : RX_DONE_CMD;
+	cmd.lcid = lcid;
+	cmd.liid = liid;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+}
+
+/**
+ * tx_cmd_rx_intent_req() - Convert an rx intent request cmd to wire format and
+ *			    transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @size:	The requested intent size to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_rx_intent_req(struct glink_transport_if *if_ptr,
+				uint32_t lcid, size_t size)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t size;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	if (size > UINT_MAX) {
+		pr_err("%s: size %zu is too large to encode\n", __func__, size);
+		return -EMSGSIZE;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_REQ_CMD,
+	cmd.lcid = lcid;
+	cmd.size = size;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_rx_intent_req_ack() - Convert an rx intent request ack cmd to wire
+ *				format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @granted:	The request response to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_remote_rx_intent_req_ack(struct glink_transport_if *if_ptr,
+					   uint32_t lcid, bool granted)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t response;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = RX_INTENT_REQ_ACK_CMD,
+	cmd.lcid = lcid;
+	if (granted)
+		cmd.response = 1;
+	else
+		cmd.response = 0;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_cmd_set_sigs() - Convert a signals ack cmd to wire format and transmit
+ * @if_ptr:	The transport to transmit on.
+ * @lcid:	The local channel id to encode.
+ * @sigs:	The signals to encode.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+static int tx_cmd_set_sigs(struct glink_transport_if *if_ptr, uint32_t lcid,
+			   uint32_t sigs)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t sigs;
+		uint64_t reserved;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	int rcu_id;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = SIGNALS_CMD,
+	cmd.lcid = lcid;
+	cmd.sigs = sigs;
+
+	glink_spi_xprt_tx_cmd(einfo, &cmd, sizeof(cmd));
+
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return 0;
+}
+
+/**
+ * tx_data() - convert a data/tracer_pkt to wire format and transmit
+ * @if_ptr:     The transport to transmit on.
+ * @cmd_id:     The command ID to transmit.
+ * @lcid:       The local channel id to encode.
+ * @pctx:       The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_data(struct glink_transport_if *if_ptr, uint16_t cmd_id,
+		   uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t riid;
+		uint64_t reserved;
+		uint32_t size;
+		uint32_t size_left;
+		uint64_t addr;
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t size;
+	void *data_start, *dst = NULL;
+	size_t tx_size = 0;
+	int rcu_id;
+
+	if (pctx->size < pctx->size_remaining) {
+		GLINK_ERR("%s: size remaining exceeds size.  Resetting.\n",
+			  __func__);
+		pctx->size_remaining = pctx->size;
+	}
+	if (!pctx->size_remaining)
+		return 0;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	if (cmd_id == TX_DATA_CMD) {
+		if (pctx->size_remaining == pctx->size)
+			cmd.id = TX_DATA_CMD;
+		else
+			cmd.id = TX_DATA_CONT_CMD;
+	} else {
+		if (pctx->size_remaining == pctx->size)
+			cmd.id = TRACER_PKT_CMD;
+		else
+			cmd.id = TRACER_PKT_CONT_CMD;
+	}
+	cmd.lcid = lcid;
+	cmd.riid = pctx->riid;
+	data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+				  &tx_size);
+	if (unlikely(!data_start)) {
+		GLINK_ERR("%s: invalid data_start\n", __func__);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EINVAL;
+	}
+	if (tx_size & (XPRT_ALIGNMENT - 1))
+		tx_size = ALIGN(tx_size - SHORT_PKT_SIZE, XPRT_ALIGNMENT);
+	if (likely(pctx->cookie))
+		dst = pctx->cookie + (pctx->size - pctx->size_remaining);
+
+	mutex_lock(&einfo->write_lock);
+	size = glink_spi_xprt_write_avail(einfo);
+	/* Need enough space to write the command */
+	if (size <= sizeof(cmd)) {
+		einfo->tx_resume_needed = true;
+		mutex_unlock(&einfo->write_lock);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EAGAIN;
+	}
+	cmd.addr = 0;
+	cmd.size = tx_size;
+	pctx->size_remaining -= tx_size;
+	cmd.size_left = pctx->size_remaining;
+	if (cmd.id == TRACER_PKT_CMD)
+		tracer_pkt_log_event((void *)(pctx->data), GLINK_XPRT_TX);
+
+	if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+		wdsp_resume(&einfo->cmpnt);
+	glink_spi_xprt_tx_data(einfo, data_start, dst, tx_size);
+	glink_spi_xprt_tx_cmd_safe(einfo, &cmd, sizeof(cmd));
+	GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+		  "<SPI>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+		  cmd.size_left);
+	mutex_unlock(&einfo->write_lock);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return cmd.size;
+}
+
+/**
+ * tx_short_data() - Tansmit a short packet in band along with command
+ * @if_ptr:     The transport to transmit on.
+ * @cmd_id:     The command ID to transmit.
+ * @lcid:       The local channel id to encode.
+ * @pctx:       The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_short_data(struct glink_transport_if *if_ptr,
+			 uint32_t lcid, struct glink_core_tx_pkt *pctx)
+{
+	struct command {
+		uint16_t id;
+		uint16_t lcid;
+		uint32_t riid;
+		uint32_t size;
+		uint32_t size_left;
+		unsigned char data[SHORT_PKT_SIZE];
+	};
+	struct command cmd;
+	struct edge_info *einfo;
+	uint32_t size;
+	void *data_start;
+	size_t tx_size = 0;
+	int rcu_id;
+
+	if (pctx->size < pctx->size_remaining) {
+		GLINK_ERR("%s: size remaining exceeds size.  Resetting.\n",
+			  __func__);
+		pctx->size_remaining = pctx->size;
+	}
+	if (!pctx->size_remaining)
+		return 0;
+
+	memset(&cmd, 0, sizeof(cmd));
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+
+	rcu_id = srcu_read_lock(&einfo->use_ref);
+	if (einfo->in_ssr) {
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EFAULT;
+	}
+
+	cmd.id = TX_SHORT_DATA_CMD;
+	cmd.lcid = lcid;
+	cmd.riid = pctx->riid;
+	data_start = get_tx_vaddr(pctx, pctx->size - pctx->size_remaining,
+				  &tx_size);
+	if (unlikely(!data_start || tx_size > SHORT_PKT_SIZE)) {
+		GLINK_ERR("%s: invalid data_start %p or tx_size %zu\n",
+			  __func__, data_start, tx_size);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EINVAL;
+	}
+
+	mutex_lock(&einfo->write_lock);
+	size = glink_spi_xprt_write_avail(einfo);
+	/* Need enough space to write the command */
+	if (size <= sizeof(cmd)) {
+		einfo->tx_resume_needed = true;
+		mutex_unlock(&einfo->write_lock);
+		srcu_read_unlock(&einfo->use_ref, rcu_id);
+		return -EAGAIN;
+	}
+	cmd.size = tx_size;
+	pctx->size_remaining -= tx_size;
+	cmd.size_left = pctx->size_remaining;
+	memcpy(cmd.data, data_start, tx_size);
+	if (!strcmp(einfo->xprt_cfg.edge, "wdsp"))
+		wdsp_resume(&einfo->cmpnt);
+	glink_spi_xprt_tx_cmd_safe(einfo, &cmd, sizeof(cmd));
+	GLINK_DBG("%s %s: lcid[%u] riid[%u] cmd[%d], size[%d], size_left[%d]\n",
+		  "<SPI>", __func__, cmd.lcid, cmd.riid, cmd.id, cmd.size,
+		  cmd.size_left);
+	mutex_unlock(&einfo->write_lock);
+	srcu_read_unlock(&einfo->use_ref, rcu_id);
+	return cmd.size;
+}
+
+/**
+ * tx() - convert a data transmit cmd to wire format and transmit
+ * @if_ptr:     The transport to transmit on.
+ * @lcid:       The local channel id to encode.
+ * @pctx:       The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx(struct glink_transport_if *if_ptr, uint32_t lcid,
+	      struct glink_core_tx_pkt *pctx)
+{
+	if (pctx->size_remaining <= SHORT_PKT_SIZE)
+		return tx_short_data(if_ptr, lcid, pctx);
+	return tx_data(if_ptr, TX_DATA_CMD, lcid, pctx);
+}
+
+/**
+ * tx_cmd_tracer_pkt() - convert a tracer packet cmd to wire format and transmit
+ * @if_ptr:     The transport to transmit on.
+ * @lcid:       The local channel id to encode.
+ * @pctx:       The data to encode.
+ *
+ * Return: Number of bytes written or standard Linux error code.
+ */
+static int tx_cmd_tracer_pkt(struct glink_transport_if *if_ptr, uint32_t lcid,
+			     struct glink_core_tx_pkt *pctx)
+{
+	return tx_data(if_ptr, TRACER_PKT_CMD, lcid, pctx);
+}
+
+/**
+ * int wait_link_down() - Check status of read/write indices
+ * @if_ptr:     The transport to check
+ *
+ * Return: 1 if indices are all zero, 0 otherwise
+ */
+static int wait_link_down(struct glink_transport_if *if_ptr)
+{
+	return 0;
+}
+
+/**
+ * get_power_vote_ramp_time() - Get the ramp time required for the power
+ *                              votes to be applied
+ * @if_ptr:     The transport interface on which power voting is requested.
+ * @state:      The power state for which ramp time is required.
+ *
+ * Return: The ramp time specific to the power state, standard error otherwise.
+ */
+static unsigned long get_power_vote_ramp_time(
+		struct glink_transport_if *if_ptr, uint32_t state)
+{
+	return 0;
+}
+
+/**
+ * power_vote() - Update the power votes to meet qos requirement
+ * @if_ptr:     The transport interface on which power voting is requested.
+ * @state:      The power state for which the voting should be done.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_vote(struct glink_transport_if *if_ptr, uint32_t state)
+{
+	unsigned long flags;
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	einfo->activity_flag |= ACTIVE_TX;
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+	return 0;
+}
+
+/**
+ * power_unvote() - Remove the all the power votes
+ * @if_ptr:     The transport interface on which power voting is requested.
+ *
+ * Return: 0 on Success, standard error otherwise.
+ */
+static int power_unvote(struct glink_transport_if *if_ptr)
+{
+	unsigned long flags;
+	struct edge_info *einfo;
+
+	einfo = container_of(if_ptr, struct edge_info, xprt_if);
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	einfo->activity_flag &= ~ACTIVE_TX;
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+	return 0;
+}
+
+static int glink_wdsp_cmpnt_init(struct device *dev, void *priv_data)
+{
+	return 0;
+}
+
+static int glink_wdsp_cmpnt_deinit(struct device *dev, void *priv_data)
+{
+	return 0;
+}
+
+static int glink_wdsp_cmpnt_event_handler(struct device *dev,
+		void *priv_data, enum wdsp_event_type event, void *data)
+{
+	struct edge_info *einfo = dev_get_drvdata(dev);
+	struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+	struct device *sdev;
+	struct spi_device *spi_dev;
+
+	switch (event) {
+	case WDSP_EVENT_PRE_BOOTUP:
+		if (cmpnt && cmpnt->master_dev &&
+		    cmpnt->master_ops &&
+		    cmpnt->master_ops->get_dev_for_cmpnt)
+			sdev = cmpnt->master_ops->get_dev_for_cmpnt(
+				cmpnt->master_dev, WDSP_CMPNT_TRANSPORT);
+		else
+			sdev = NULL;
+
+		if (!sdev) {
+			dev_err(dev, "%s: Failed to get transport device\n",
+				__func__);
+			break;
+		}
+
+		spi_dev = to_spi_device(sdev);
+		einfo->spi_dev = spi_dev;
+		break;
+	case WDSP_EVENT_IPC1_INTR:
+		queue_kthread_work(&einfo->kworker, &einfo->kwork);
+		break;
+	default:
+		pr_debug("%s: unhandled event %d", __func__, event);
+		break;
+	}
+
+	return 0;
+}
+
+/* glink_wdsp_cmpnt_ops - Callback operations registered wtih WDSP framework */
+static struct wdsp_cmpnt_ops glink_wdsp_cmpnt_ops = {
+	.init = glink_wdsp_cmpnt_init,
+	.deinit = glink_wdsp_cmpnt_deinit,
+	.event_handler = glink_wdsp_cmpnt_event_handler,
+};
+
+static int glink_component_bind(struct device *dev, struct device *master,
+				void *data)
+{
+	struct edge_info *einfo = dev_get_drvdata(dev);
+	struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+	int ret = 0;
+
+	cmpnt->master_dev = master;
+	cmpnt->master_ops = data;
+
+	if (cmpnt->master_ops && cmpnt->master_ops->register_cmpnt_ops)
+		ret = cmpnt->master_ops->register_cmpnt_ops(master, dev, einfo,
+							&glink_wdsp_cmpnt_ops);
+	else
+		ret = -EINVAL;
+
+	if (ret)
+		dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
+			__func__, ret);
+	return ret;
+}
+
+static void glink_component_unbind(struct device *dev, struct device *master,
+				   void *data)
+{
+	struct edge_info *einfo = dev_get_drvdata(dev);
+	struct glink_cmpnt *cmpnt = &einfo->cmpnt;
+
+	cmpnt->master_dev = NULL;
+	cmpnt->master_ops = NULL;
+}
+
+static const struct component_ops glink_component_ops = {
+	.bind = glink_component_bind,
+	.unbind = glink_component_unbind,
+};
+
+/**
+ * init_xprt_if() - Initialize the xprt_if for an edge
+ * @einfo:	The edge to initialize.
+ */
+static void init_xprt_if(struct edge_info *einfo)
+{
+	einfo->xprt_if.tx_cmd_version = tx_cmd_version;
+	einfo->xprt_if.tx_cmd_version_ack = tx_cmd_version_ack;
+	einfo->xprt_if.set_version = set_version;
+	einfo->xprt_if.tx_cmd_ch_open = tx_cmd_ch_open;
+	einfo->xprt_if.tx_cmd_ch_close = tx_cmd_ch_close;
+	einfo->xprt_if.tx_cmd_ch_remote_open_ack = tx_cmd_ch_remote_open_ack;
+	einfo->xprt_if.tx_cmd_ch_remote_close_ack = tx_cmd_ch_remote_close_ack;
+	einfo->xprt_if.ssr = ssr;
+	einfo->xprt_if.allocate_rx_intent = allocate_rx_intent;
+	einfo->xprt_if.deallocate_rx_intent = deallocate_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_intent = tx_cmd_local_rx_intent;
+	einfo->xprt_if.tx_cmd_local_rx_done = tx_cmd_local_rx_done;
+	einfo->xprt_if.tx = tx;
+	einfo->xprt_if.tx_cmd_rx_intent_req = tx_cmd_rx_intent_req;
+	einfo->xprt_if.tx_cmd_remote_rx_intent_req_ack =
+						tx_cmd_remote_rx_intent_req_ack;
+	einfo->xprt_if.tx_cmd_set_sigs = tx_cmd_set_sigs;
+	einfo->xprt_if.wait_link_down = wait_link_down;
+	einfo->xprt_if.tx_cmd_tracer_pkt = tx_cmd_tracer_pkt;
+	einfo->xprt_if.get_power_vote_ramp_time = get_power_vote_ramp_time;
+	einfo->xprt_if.power_vote = power_vote;
+	einfo->xprt_if.power_unvote = power_unvote;
+}
+
+/**
+ * init_xprt_cfg() - Initialize the xprt_cfg for an edge
+ * @einfo:	The edge to initialize.
+ * @name:	The name of the remote side this edge communicates to.
+ */
+static void init_xprt_cfg(struct edge_info *einfo, const char *name)
+{
+	einfo->xprt_cfg.name = XPRT_NAME;
+	einfo->xprt_cfg.edge = name;
+	einfo->xprt_cfg.versions = versions;
+	einfo->xprt_cfg.versions_entries = ARRAY_SIZE(versions);
+	einfo->xprt_cfg.max_cid = SZ_64K;
+	einfo->xprt_cfg.max_iid = SZ_2G;
+}
+
+/**
+ * parse_qos_dt_params() - Parse the power states from DT
+ * @dev:	Reference to the platform device for a specific edge.
+ * @einfo:	Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_qos_dt_params(struct device_node *node,
+				struct edge_info *einfo)
+{
+	int rc;
+	int i;
+	char *key;
+	uint32_t *arr32;
+	uint32_t num_states;
+
+	key = "qcom,ramp-time";
+	if (!of_find_property(node, key, &num_states))
+		return -ENODEV;
+
+	num_states /= sizeof(uint32_t);
+
+	einfo->num_pw_states = num_states;
+
+	arr32 = kmalloc_array(num_states, sizeof(uint32_t), GFP_KERNEL);
+	if (!arr32)
+		return -ENOMEM;
+
+	einfo->ramp_time_us = kmalloc_array(num_states, sizeof(unsigned long),
+					GFP_KERNEL);
+	if (!einfo->ramp_time_us) {
+		rc = -ENOMEM;
+		goto mem_alloc_fail;
+	}
+
+	rc = of_property_read_u32_array(node, key, arr32, num_states);
+	if (rc) {
+		rc = -ENODEV;
+		goto invalid_key;
+	}
+	for (i = 0; i < num_states; i++)
+		einfo->ramp_time_us[i] = arr32[i];
+
+	kfree(arr32);
+	return 0;
+
+invalid_key:
+	kfree(einfo->ramp_time_us);
+mem_alloc_fail:
+	kfree(arr32);
+	return rc;
+}
+
+/**
+ * parse_qos_dt_params() - Parse any remote FIFO configuration
+ * @node:	Reference to the platform device for a specific edge.
+ * @einfo:	Edge information for the edge probe function is called.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+static int parse_remote_fifo_cfg(struct device_node *node,
+				 struct edge_info *einfo)
+{
+	int rc;
+	char *key;
+
+	key = "qcom,out-read-idx-reg";
+	rc = of_property_read_u32(node, key, &einfo->tx_fifo_read_reg_addr);
+	if (rc)
+		goto key_error;
+
+	key = "qcom,out-write-idx-reg";
+	rc = of_property_read_u32(node, key, &einfo->tx_fifo_write_reg_addr);
+	if (rc)
+		goto key_error;
+
+	key = "qcom,in-read-idx-reg";
+	rc = of_property_read_u32(node, key, &einfo->rx_fifo_read_reg_addr);
+	if (rc)
+		goto key_error;
+
+	key = "qcom,in-write-idx-reg";
+	rc = of_property_read_u32(node, key, &einfo->rx_fifo_write_reg_addr);
+	if (rc)
+		goto key_error;
+	return 0;
+
+key_error:
+	pr_err("%s: Error %d parsing key %s\n", __func__, rc, key);
+	return rc;
+}
+
+static int glink_spi_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct device_node *phandle_node;
+	struct edge_info *einfo;
+	int rc;
+	char *key;
+	const char *subsys_name;
+	unsigned long flags;
+
+	node = pdev->dev.of_node;
+
+	einfo = kzalloc(sizeof(*einfo), GFP_KERNEL);
+	if (!einfo) {
+		rc = -ENOMEM;
+		goto edge_info_alloc_fail;
+	}
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		pr_err("%s: missing key %s\n", __func__, key);
+		rc = -ENODEV;
+		goto missing_key;
+	}
+	strlcpy(einfo->subsys_name, subsys_name, sizeof(einfo->subsys_name));
+
+	init_xprt_cfg(einfo, subsys_name);
+	init_xprt_if(einfo);
+
+	einfo->in_ssr = true;
+	einfo->fifo_size = DEFAULT_FIFO_SIZE;
+	init_kthread_work(&einfo->kwork, rx_worker);
+	init_kthread_worker(&einfo->kworker);
+	init_srcu_struct(&einfo->use_ref);
+	mutex_init(&einfo->write_lock);
+	init_waitqueue_head(&einfo->tx_blocked_queue);
+	spin_lock_init(&einfo->activity_lock);
+
+	spin_lock_irqsave(&edge_infos_lock, flags);
+	list_add_tail(&einfo->list, &edge_infos);
+	spin_unlock_irqrestore(&edge_infos_lock, flags);
+
+	einfo->task = kthread_run(kthread_worker_fn, &einfo->kworker,
+				  "spi_%s", subsys_name);
+	if (IS_ERR(einfo->task)) {
+		rc = PTR_ERR(einfo->task);
+		pr_err("%s: kthread run failed %d\n", __func__, rc);
+		goto kthread_fail;
+	}
+
+	key = "qcom,remote-fifo-config";
+	phandle_node = of_parse_phandle(node, key, 0);
+	if (phandle_node)
+		parse_remote_fifo_cfg(phandle_node, einfo);
+
+	key = "qcom,qos-config";
+	phandle_node = of_parse_phandle(node, key, 0);
+	if (phandle_node && !(of_get_glink_core_qos_cfg(phandle_node,
+							&einfo->xprt_cfg)))
+		parse_qos_dt_params(node, einfo);
+
+	rc = glink_core_register_transport(&einfo->xprt_if, &einfo->xprt_cfg);
+	if (rc == -EPROBE_DEFER)
+		goto reg_xprt_fail;
+	if (rc) {
+		pr_err("%s: glink core register transport failed: %d\n",
+			__func__, rc);
+		goto reg_xprt_fail;
+	}
+
+	dev_set_drvdata(&pdev->dev, einfo);
+	if (!strcmp(einfo->xprt_cfg.edge, "wdsp")) {
+		rc = component_add(&pdev->dev, &glink_component_ops);
+		if (rc) {
+			pr_err("%s: component_add failed, err = %d\n",
+				__func__, rc);
+			rc = -ENODEV;
+			goto reg_cmpnt_fail;
+		}
+	}
+	return 0;
+
+reg_cmpnt_fail:
+	dev_set_drvdata(&pdev->dev, NULL);
+	glink_core_unregister_transport(&einfo->xprt_if);
+reg_xprt_fail:
+	flush_kthread_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+kthread_fail:
+	spin_lock_irqsave(&edge_infos_lock, flags);
+	list_del(&einfo->list);
+	spin_unlock_irqrestore(&edge_infos_lock, flags);
+missing_key:
+	kfree(einfo);
+edge_info_alloc_fail:
+	return rc;
+}
+
+static int glink_spi_remove(struct platform_device *pdev)
+{
+	struct edge_info *einfo;
+	unsigned long flags;
+
+	einfo = (struct edge_info *)dev_get_drvdata(&pdev->dev);
+	glink_core_unregister_transport(&einfo->xprt_if);
+	flush_kthread_worker(&einfo->kworker);
+	kthread_stop(einfo->task);
+	einfo->task = NULL;
+	spin_lock_irqsave(&edge_infos_lock, flags);
+	list_del(&einfo->list);
+	spin_unlock_irqrestore(&edge_infos_lock, flags);
+	kfree(einfo);
+	return 0;
+}
+
+static int glink_spi_resume(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int glink_spi_suspend(struct platform_device *pdev,
+				   pm_message_t state)
+{
+	unsigned long flags;
+	struct edge_info *einfo;
+	bool suspend;
+	int rc = -EBUSY;
+
+	einfo = (struct edge_info *)dev_get_drvdata(&pdev->dev);
+	if (strcmp(einfo->xprt_cfg.edge, "wdsp"))
+		return 0;
+
+	spin_lock_irqsave(&einfo->activity_lock, flags);
+	suspend = !(einfo->activity_flag);
+	spin_unlock_irqrestore(&einfo->activity_lock, flags);
+	if (suspend)
+		rc = wdsp_suspend(&einfo->cmpnt);
+	if (rc < 0)
+		pr_err("%s: Could not suspend activity_flag %d, rc %d\n",
+			__func__, einfo->activity_flag, rc);
+	return rc;
+}
+
+static const struct of_device_id spi_match_table[] = {
+	{ .compatible = "qcom,glink-spi-xprt" },
+	{},
+};
+
+static struct platform_driver glink_spi_driver = {
+	.probe = glink_spi_probe,
+	.remove = glink_spi_remove,
+	.resume = glink_spi_resume,
+	.suspend = glink_spi_suspend,
+	.driver = {
+		.name = "msm_glink_spi_xprt",
+		.owner = THIS_MODULE,
+		.of_match_table = spi_match_table,
+	},
+};
+
+static int __init glink_spi_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&glink_spi_driver);
+	if (rc)
+		pr_err("%s: glink_spi register failed %d\n", __func__, rc);
+
+	return rc;
+}
+module_init(glink_spi_xprt_init);
+
+static void __exit glink_spi_xprt_exit(void)
+{
+	platform_driver_unregister(&glink_spi_driver);
+}
+module_exit(glink_spi_xprt_exit);
+
+MODULE_DESCRIPTION("MSM G-Link SPI Transport");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_ssr.c b/drivers/soc/qcom/glink_ssr.c
new file mode 100644
index 0000000..6d04b61
--- /dev/null
+++ b/drivers/soc/qcom/glink_ssr.c
@@ -0,0 +1,978 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/err.h>
+#include <linux/ipc_logging.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/random.h>
+#include <soc/qcom/glink.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include "glink_private.h"
+
+#define GLINK_SSR_REPLY_TIMEOUT	HZ
+#define GLINK_SSR_INTENT_REQ_TIMEOUT_MS 500
+#define GLINK_SSR_EVENT_INIT ~0
+#define NUM_LOG_PAGES 3
+
+#define GLINK_SSR_LOG(x...) do { \
+	if (glink_ssr_log_ctx) \
+		ipc_log_string(glink_ssr_log_ctx, x); \
+} while (0)
+
+#define GLINK_SSR_ERR(x...) do { \
+	pr_err(x); \
+	GLINK_SSR_LOG(x); \
+} while (0)
+
+static void *glink_ssr_log_ctx;
+
+/* Global restart counter */
+static uint32_t sequence_number;
+
+/* Flag indicating if responses were received for all SSR notifications */
+static bool notifications_successful;
+
+/* Completion for setting notifications_successful */
+struct completion notifications_successful_complete;
+
+/**
+ * struct restart_notifier_block - restart notifier wrapper structure
+ * subsystem:	the name of the subsystem as recognized by the SSR framework
+ * nb:		notifier block structure used by the SSR framework
+ */
+struct restart_notifier_block {
+	const char *subsystem;
+	struct notifier_block nb;
+};
+
+/**
+ * struct configure_and_open_ch_work - Work structure for used for opening
+ *				glink_ssr channels
+ * edge:	The G-Link edge obtained from the link state callback
+ * transport:	The G-Link transport obtained from the link state callback
+ * link_state:	The link state obtained from the link state callback
+ * ss_info:	Subsystem information structure containing the info for this
+ *		callback
+ * work:	Work structure
+ */
+struct configure_and_open_ch_work {
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	enum glink_link_state link_state;
+	struct subsys_info *ss_info;
+	struct work_struct work;
+};
+
+/**
+ * struct close_ch_work - Work structure for used for closing glink_ssr channels
+ * edge:	The G-Link edge name for the channel being closed
+ * handle:	G-Link channel handle to be closed
+ * work:	Work structure
+ */
+struct close_ch_work {
+	char edge[GLINK_NAME_SIZE];
+	void *handle;
+	struct work_struct work;
+};
+
+static int glink_ssr_restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data);
+static void delete_ss_info_notify_list(struct subsys_info *ss_info);
+static int configure_and_open_channel(struct subsys_info *ss_info);
+static struct workqueue_struct *glink_ssr_wq;
+
+static LIST_HEAD(subsystem_list);
+static atomic_t responses_remaining = ATOMIC_INIT(0);
+static wait_queue_head_t waitqueue;
+
+static void link_state_cb_worker(struct work_struct *work)
+{
+	unsigned long flags;
+	struct configure_and_open_ch_work *ch_open_work =
+		container_of(work, struct configure_and_open_ch_work, work);
+	struct subsys_info *ss_info = ch_open_work->ss_info;
+
+	GLINK_SSR_LOG("<SSR> %s: LINK STATE[%d] %s:%s\n", __func__,
+			ch_open_work->link_state, ch_open_work->edge,
+			ch_open_work->transport);
+
+	if (ss_info && ch_open_work->link_state == GLINK_LINK_STATE_UP) {
+		spin_lock_irqsave(&ss_info->link_up_lock, flags);
+		if (!ss_info->link_up) {
+			ss_info->link_up = true;
+			spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+			if (!configure_and_open_channel(ss_info)) {
+				glink_unregister_link_state_cb(
+						ss_info->link_state_handle);
+				ss_info->link_state_handle = NULL;
+			}
+			kfree(ch_open_work);
+			return;
+		}
+		spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+	} else {
+		if (ss_info) {
+			spin_lock_irqsave(&ss_info->link_up_lock, flags);
+			ss_info->link_up = false;
+			spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+			ss_info->handle = NULL;
+		} else {
+			GLINK_SSR_ERR("<SSR> %s: ss_info is NULL\n", __func__);
+		}
+	}
+
+	kfree(ch_open_work);
+}
+
+/**
+ * glink_lbsrv_link_state_cb() - Callback to receive link state updates
+ * @cb_info:	Information containing link & its state.
+ * @priv:	Private data passed during the link state registration.
+ *
+ * This function is called by the G-Link core to notify the glink_ssr module
+ * regarding the link state updates. This function is registered with the
+ * G-Link core by the loopback server during glink_register_link_state_cb().
+ */
+static void glink_ssr_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct subsys_info *ss_info;
+	struct configure_and_open_ch_work *open_ch_work;
+
+	if (!cb_info) {
+		GLINK_SSR_ERR("<SSR> %s: Missing cb_data\n", __func__);
+		return;
+	}
+
+	ss_info = get_info_for_edge(cb_info->edge);
+
+	open_ch_work = kmalloc(sizeof(*open_ch_work), GFP_KERNEL);
+	if (!open_ch_work) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate open_ch_work\n",
+				__func__);
+		return;
+	}
+
+	strlcpy(open_ch_work->edge, cb_info->edge, GLINK_NAME_SIZE);
+	strlcpy(open_ch_work->transport, cb_info->transport, GLINK_NAME_SIZE);
+	open_ch_work->link_state = cb_info->link_state;
+	open_ch_work->ss_info = ss_info;
+
+	INIT_WORK(&open_ch_work->work, link_state_cb_worker);
+	queue_work(glink_ssr_wq, &open_ch_work->work);
+}
+
+/**
+ * glink_ssr_notify_rx() - RX Notification callback
+ * @handle:	G-Link channel handle
+ * @priv:	Private callback data
+ * @pkt_priv:	Private packet data
+ * @ptr:	Pointer to the data received
+ * @size:	Size of the data received
+ *
+ * This function is a notification callback from the G-Link core that data
+ * has been received from the remote side. This data is validate to make
+ * sure it is a cleanup_done message and is processed accordingly if it is.
+ */
+void glink_ssr_notify_rx(void *handle, const void *priv, const void *pkt_priv,
+		const void *ptr, size_t size)
+{
+	struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+	struct cleanup_done_msg *resp = (struct cleanup_done_msg *)ptr;
+
+	if (unlikely(!cb_data))
+		goto missing_cb_data;
+	if (unlikely(!cb_data->do_cleanup_data))
+		goto missing_do_cleanup_data;
+	if (unlikely(!resp))
+		goto missing_response;
+	if (unlikely(resp->version != cb_data->do_cleanup_data->version))
+		goto version_mismatch;
+	if (unlikely(resp->seq_num != cb_data->do_cleanup_data->seq_num))
+		goto invalid_seq_number;
+	if (unlikely(resp->response != GLINK_SSR_CLEANUP_DONE))
+		goto wrong_response;
+
+	cb_data->responded = true;
+	atomic_dec(&responses_remaining);
+
+	GLINK_SSR_LOG(
+		"<SSR> %s: Response from %s resp[%d] version[%d] seq_num[%d] restarted[%s]\n",
+			__func__, cb_data->edge, resp->response,
+			resp->version, resp->seq_num,
+			cb_data->do_cleanup_data->name);
+
+	kfree(cb_data->do_cleanup_data);
+	cb_data->do_cleanup_data = NULL;
+	wake_up(&waitqueue);
+	return;
+
+missing_cb_data:
+	panic("%s: Missing cb_data!\n", __func__);
+	return;
+missing_do_cleanup_data:
+	panic("%s: Missing do_cleanup data!\n", __func__);
+	return;
+missing_response:
+	GLINK_SSR_ERR("<SSR> %s: Missing response data\n", __func__);
+	return;
+version_mismatch:
+	GLINK_SSR_ERR("<SSR> %s: Version mismatch. %s[%d], %s[%d]\n", __func__,
+			"do_cleanup version", cb_data->do_cleanup_data->version,
+			"cleanup_done version", resp->version);
+	return;
+invalid_seq_number:
+	GLINK_SSR_ERR("<SSR> %s: Invalid seq. number. %s[%d], %s[%d]\n",
+			__func__, "do_cleanup seq num",
+			cb_data->do_cleanup_data->seq_num,
+			"cleanup_done seq_num", resp->seq_num);
+	return;
+wrong_response:
+	GLINK_SSR_ERR("<SSR> %s: Not a cleaup_done message. %s[%d]\n", __func__,
+			"cleanup_done response", resp->response);
+}
+
+/**
+ * glink_ssr_notify_tx_done() - Transmit finished notification callback
+ * @handle:	G-Link channel handle
+ * @priv:	Private callback data
+ * @pkt_priv:	Private packet data
+ * @ptr:	Pointer to the data received
+ *
+ * This function is a notification callback from the G-Link core that data
+ * we sent has finished transmitting.
+ */
+void glink_ssr_notify_tx_done(void *handle, const void *priv,
+		const void *pkt_priv, const void *ptr)
+{
+	struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+
+	if (unlikely(!cb_data)) {
+		panic("%s: cb_data is NULL!\n", __func__);
+		return;
+	}
+
+	GLINK_SSR_LOG("<SSR> %s: Notified %s of restart\n",
+		__func__, cb_data->edge);
+
+	cb_data->tx_done = true;
+}
+
+void close_ch_worker(struct work_struct *work)
+{
+	unsigned long flags;
+	void *link_state_handle;
+	struct subsys_info *ss_info;
+	struct close_ch_work *close_work =
+		container_of(work, struct close_ch_work, work);
+
+	glink_close(close_work->handle);
+
+	ss_info = get_info_for_edge(close_work->edge);
+	if (WARN_ON(!ss_info))
+		return;
+
+	spin_lock_irqsave(&ss_info->link_up_lock, flags);
+	ss_info->link_up = false;
+	spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+
+	if (WARN_ON(ss_info->link_state_handle != NULL))
+		return;
+	link_state_handle = glink_register_link_state_cb(ss_info->link_info,
+			NULL);
+
+	if (IS_ERR_OR_NULL(link_state_handle))
+		GLINK_SSR_ERR("<SSR> %s: %s, ret[%d]\n", __func__,
+				"Couldn't register link state cb",
+				(int)PTR_ERR(link_state_handle));
+	else
+		ss_info->link_state_handle = link_state_handle;
+
+	if (WARN_ON(!ss_info->cb_data))
+		return;
+	kfree(ss_info->cb_data);
+	kfree(close_work);
+}
+
+/**
+ * glink_ssr_notify_state() - Channel state notification callback
+ * @handle:	G-Link channel handle
+ * @priv:	Private callback data
+ * @event:	The state that has been transitioned to
+ *
+ * This function is a notification callback from the G-Link core that the
+ * channel state has changed.
+ */
+void glink_ssr_notify_state(void *handle, const void *priv, unsigned int event)
+{
+	struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+	struct close_ch_work *close_work;
+
+	if (!cb_data) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate data for cb_data\n",
+				__func__);
+	} else {
+		GLINK_SSR_LOG("<SSR> %s: event[%d]\n",
+				__func__, event);
+		cb_data->event = event;
+		if (event == GLINK_REMOTE_DISCONNECTED) {
+			close_work =
+				kmalloc(sizeof(struct close_ch_work),
+						GFP_KERNEL);
+			if (!close_work) {
+				GLINK_SSR_ERR(
+					"<SSR> %s: Could not allocate %s\n",
+						__func__, "close work");
+				return;
+			}
+
+			strlcpy(close_work->edge, cb_data->edge,
+					sizeof(close_work->edge));
+			close_work->handle = handle;
+			INIT_WORK(&close_work->work, close_ch_worker);
+			queue_work(glink_ssr_wq, &close_work->work);
+		}
+	}
+}
+
+/**
+ * glink_ssr_notify_rx_intent_req() - RX intent request notification callback
+ * @handle:	G-Link channel handle
+ * @priv:	Private callback data
+ * @req_size:	The size of the requested intent
+ *
+ * This function is a notification callback from the G-Link core of the remote
+ * side's request for an RX intent to be queued.
+ *
+ * Return: Boolean indicating whether or not the request was successfully
+ *         received
+ */
+bool glink_ssr_notify_rx_intent_req(void *handle, const void *priv,
+		size_t req_size)
+{
+	struct ssr_notify_data *cb_data = (struct ssr_notify_data *)priv;
+
+	if (!cb_data) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate data for cb_data\n",
+				__func__);
+		return false;
+	}
+	GLINK_SSR_LOG("<SSR> %s: rx_intent_req of size %zu\n",
+			__func__, req_size);
+	return true;
+}
+
+/**
+ * glink_ssr_restart_notifier_cb() - SSR restart notifier callback function
+ * @this:	Notifier block used by the SSR framework
+ * @code:	The SSR code for which stage of restart is occurring
+ * @data:	Structure containing private data - not used here.
+ *
+ * This function is a callback for the SSR framework. From here we initiate
+ * our handling of SSR.
+ *
+ * Return: Status of SSR handling
+ */
+static int glink_ssr_restart_notifier_cb(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	int ret = 0;
+	struct subsys_info *ss_info = NULL;
+	struct restart_notifier_block *notifier =
+		container_of(this, struct restart_notifier_block, nb);
+
+	if (code == SUBSYS_AFTER_SHUTDOWN) {
+		GLINK_SSR_LOG("<SSR> %s: %s: subsystem restart for %s\n",
+				__func__, "SUBSYS_AFTER_SHUTDOWN",
+				notifier->subsystem);
+		ss_info = get_info_for_subsystem(notifier->subsystem);
+		if (ss_info == NULL) {
+			GLINK_SSR_ERR("<SSR> %s: ss_info is NULL\n", __func__);
+			return -EINVAL;
+		}
+
+		glink_ssr(ss_info->edge);
+		ret = notify_for_subsystem(ss_info);
+
+		if (ret) {
+			GLINK_SSR_ERR("<SSR>: %s: %s, ret[%d]\n", __func__,
+					"Subsystem notification failed", ret);
+			return ret;
+		}
+	}
+	return NOTIFY_DONE;
+}
+
+/**
+ * notify for subsystem() - Notify other subsystems that a subsystem is being
+ *                          restarted
+ * @ss_info:	Subsystem info structure for the subsystem being restarted
+ *
+ * This function sends notifications to affected subsystems that the subsystem
+ * in ss_info is being restarted, and waits for the cleanup done response from
+ * all of those subsystems. It also initiates any local cleanup that is
+ * necessary.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+int notify_for_subsystem(struct subsys_info *ss_info)
+{
+	struct subsys_info *ss_info_channel;
+	struct subsys_info_leaf *ss_leaf_entry;
+	struct do_cleanup_msg *do_cleanup_data;
+	void *handle;
+	int wait_ret;
+	int ret;
+	unsigned long flags;
+
+	if (!ss_info) {
+		GLINK_SSR_ERR("<SSR> %s: ss_info structure invalid\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * No locking is needed here because ss_info->notify_list_len is
+	 * only modified during setup.
+	 */
+	atomic_set(&responses_remaining, ss_info->notify_list_len);
+	init_waitqueue_head(&waitqueue);
+	notifications_successful = true;
+
+	list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
+			notify_list_node) {
+		GLINK_SSR_LOG(
+			"<SSR> %s: Notifying: %s:%s of %s restart, seq_num[%d]\n",
+				__func__, ss_leaf_entry->edge,
+				ss_leaf_entry->xprt, ss_info->edge,
+				sequence_number);
+
+		ss_info_channel =
+			get_info_for_subsystem(ss_leaf_entry->ssr_name);
+		if (ss_info_channel == NULL) {
+			GLINK_SSR_ERR(
+				"<SSR> %s: unable to find subsystem name\n",
+					__func__);
+			return -ENODEV;
+		}
+		handle = ss_info_channel->handle;
+		ss_leaf_entry->cb_data = ss_info_channel->cb_data;
+
+		spin_lock_irqsave(&ss_info->link_up_lock, flags);
+		if (IS_ERR_OR_NULL(ss_info_channel->handle) ||
+				!ss_info_channel->cb_data ||
+				!ss_info_channel->link_up ||
+				ss_info_channel->cb_data->event
+						!= GLINK_CONNECTED) {
+
+			GLINK_SSR_LOG(
+				"<SSR> %s: %s:%s %s[%d], %s[%p], %s[%d]\n",
+				__func__, ss_leaf_entry->edge, "Not connected",
+				"resp. remaining",
+				atomic_read(&responses_remaining), "handle",
+				ss_info_channel->handle, "link_up",
+				ss_info_channel->link_up);
+
+			spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+			atomic_dec(&responses_remaining);
+			continue;
+		}
+		spin_unlock_irqrestore(&ss_info->link_up_lock, flags);
+
+		do_cleanup_data = kmalloc(sizeof(struct do_cleanup_msg),
+				GFP_KERNEL);
+		if (!do_cleanup_data) {
+			GLINK_SSR_ERR(
+				"%s %s: Could not allocate do_cleanup_msg\n",
+				"<SSR>", __func__);
+			return -ENOMEM;
+		}
+
+		do_cleanup_data->version = 0;
+		do_cleanup_data->command = GLINK_SSR_DO_CLEANUP;
+		do_cleanup_data->seq_num = sequence_number;
+		do_cleanup_data->name_len = strlen(ss_info->edge);
+		strlcpy(do_cleanup_data->name, ss_info->edge,
+				do_cleanup_data->name_len + 1);
+		ss_leaf_entry->cb_data->do_cleanup_data = do_cleanup_data;
+
+		ret = glink_queue_rx_intent(handle,
+				(void *)ss_leaf_entry->cb_data,
+				sizeof(struct cleanup_done_msg));
+		if (ret) {
+			GLINK_SSR_ERR(
+				"%s %s: %s, ret[%d], resp. remaining[%d]\n",
+				"<SSR>", __func__,
+				"queue_rx_intent failed", ret,
+				atomic_read(&responses_remaining));
+			kfree(do_cleanup_data);
+			ss_leaf_entry->cb_data->do_cleanup_data = NULL;
+
+			if (strcmp(ss_leaf_entry->ssr_name, "rpm")) {
+				subsystem_restart(ss_leaf_entry->ssr_name);
+				ss_leaf_entry->restarted = true;
+			} else {
+				panic("%s: Could not queue intent for RPM!\n",
+						__func__);
+			}
+			atomic_dec(&responses_remaining);
+			continue;
+		}
+
+		if (strcmp(ss_leaf_entry->ssr_name, "rpm"))
+			ret = glink_tx(handle, ss_leaf_entry->cb_data,
+					do_cleanup_data,
+					sizeof(*do_cleanup_data),
+					GLINK_TX_REQ_INTENT);
+		else
+			ret = glink_tx(handle, ss_leaf_entry->cb_data,
+					do_cleanup_data,
+					sizeof(*do_cleanup_data),
+					GLINK_TX_SINGLE_THREADED);
+
+		if (ret) {
+			GLINK_SSR_ERR("<SSR> %s: tx failed, ret[%d], %s[%d]\n",
+					__func__, ret, "resp. remaining",
+					atomic_read(&responses_remaining));
+			kfree(do_cleanup_data);
+			ss_leaf_entry->cb_data->do_cleanup_data = NULL;
+
+			if (strcmp(ss_leaf_entry->ssr_name, "rpm")) {
+				subsystem_restart(ss_leaf_entry->ssr_name);
+				ss_leaf_entry->restarted = true;
+			} else {
+				panic("%s: glink_tx() to RPM failed!\n",
+						__func__);
+			}
+			atomic_dec(&responses_remaining);
+			continue;
+		}
+
+		sequence_number++;
+	}
+
+	wait_ret = wait_event_timeout(waitqueue,
+			atomic_read(&responses_remaining) == 0,
+			GLINK_SSR_REPLY_TIMEOUT);
+
+	list_for_each_entry(ss_leaf_entry, &ss_info->notify_list,
+			notify_list_node) {
+		if (!wait_ret && !IS_ERR_OR_NULL(ss_leaf_entry->cb_data)
+				&& !ss_leaf_entry->cb_data->responded) {
+			GLINK_SSR_ERR("%s %s: Subsystem %s %s\n",
+				"<SSR>", __func__, ss_leaf_entry->edge,
+				"failed to respond. Restarting.");
+
+			notifications_successful = false;
+
+			/* Check for RPM, as it can't be restarted */
+			if (!strcmp(ss_leaf_entry->ssr_name, "rpm"))
+				panic("%s: RPM failed to respond!\n", __func__);
+			else if (!ss_leaf_entry->restarted)
+				subsystem_restart(ss_leaf_entry->ssr_name);
+		}
+		ss_leaf_entry->restarted = false;
+
+		if (!IS_ERR_OR_NULL(ss_leaf_entry->cb_data))
+			ss_leaf_entry->cb_data->responded = false;
+	}
+	complete(&notifications_successful_complete);
+	return 0;
+}
+EXPORT_SYMBOL(notify_for_subsystem);
+
+/**
+ * configure_and_open_channel() - configure and open a G-Link channel for
+ *                                the given subsystem
+ * @ss_info:	The subsys_info structure where the channel will be stored
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+static int configure_and_open_channel(struct subsys_info *ss_info)
+{
+	struct glink_open_config open_cfg;
+	struct ssr_notify_data *cb_data = NULL;
+	void *handle = NULL;
+
+	if (!ss_info) {
+		GLINK_SSR_ERR("<SSR> %s: ss_info structure invalid\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	cb_data = kmalloc(sizeof(struct ssr_notify_data), GFP_KERNEL);
+	if (!cb_data) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate cb_data\n",
+				__func__);
+		return -ENOMEM;
+	}
+	cb_data->responded = false;
+	cb_data->event = GLINK_SSR_EVENT_INIT;
+	cb_data->edge = ss_info->edge;
+	ss_info->cb_data = cb_data;
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+
+	if (ss_info->xprt) {
+		open_cfg.transport = ss_info->xprt;
+	} else {
+		open_cfg.transport = NULL;
+		open_cfg.options = GLINK_OPT_INITIAL_XPORT;
+	}
+	open_cfg.edge = ss_info->edge;
+	open_cfg.name = "glink_ssr";
+	open_cfg.notify_rx = glink_ssr_notify_rx;
+	open_cfg.notify_tx_done = glink_ssr_notify_tx_done;
+	open_cfg.notify_state = glink_ssr_notify_state;
+	open_cfg.notify_rx_intent_req = glink_ssr_notify_rx_intent_req;
+	open_cfg.priv = ss_info->cb_data;
+	open_cfg.rx_intent_req_timeout_ms = GLINK_SSR_INTENT_REQ_TIMEOUT_MS;
+
+	handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(handle)) {
+		GLINK_SSR_ERR(
+			"<SSR> %s:%s %s: unable to open channel, ret[%d]\n",
+				 open_cfg.edge, open_cfg.name, __func__,
+				 (int)PTR_ERR(handle));
+		kfree(cb_data);
+		cb_data = NULL;
+		ss_info->cb_data = NULL;
+		return PTR_ERR(handle);
+	}
+	ss_info->handle = handle;
+	return 0;
+}
+
+/**
+ * get_info_for_subsystem() - Retrieve information about a subsystem from the
+ *                            global subsystem_info_list
+ * @subsystem:	The name of the subsystem recognized by the SSR
+ *		framework
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ *         NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_subsystem(const char *subsystem)
+{
+	struct subsys_info *ss_info_entry;
+
+	list_for_each_entry(ss_info_entry, &subsystem_list,
+			subsystem_list_node) {
+		if (!strcmp(subsystem, ss_info_entry->ssr_name))
+			return ss_info_entry;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(get_info_for_subsystem);
+
+/**
+ * get_info_for_edge() - Retrieve information about a subsystem from the
+ *                       global subsystem_info_list
+ * @edge:	The name of the edge recognized by G-Link
+ *
+ * Return: subsys_info structure containing info for the requested subsystem;
+ *         NULL if no structure can be found for the requested subsystem
+ */
+struct subsys_info *get_info_for_edge(const char *edge)
+{
+	struct subsys_info *ss_info_entry;
+
+	list_for_each_entry(ss_info_entry, &subsystem_list,
+			subsystem_list_node) {
+		if (!strcmp(edge, ss_info_entry->edge))
+			return ss_info_entry;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(get_info_for_edge);
+
+/**
+ * glink_ssr_get_seq_num() - Get the current SSR sequence number
+ *
+ * Return: The current SSR sequence number
+ */
+uint32_t glink_ssr_get_seq_num(void)
+{
+	return sequence_number;
+}
+EXPORT_SYMBOL(glink_ssr_get_seq_num);
+
+/**
+ * delete_ss_info_notify_list() - Delete the notify list for a subsystem
+ * @ss_info:	The subsystem info structure
+ */
+static void delete_ss_info_notify_list(struct subsys_info *ss_info)
+{
+	struct subsys_info_leaf *leaf, *temp;
+
+	list_for_each_entry_safe(leaf, temp, &ss_info->notify_list,
+			notify_list_node) {
+		list_del(&leaf->notify_list_node);
+		kfree(leaf);
+	}
+}
+
+/**
+ * glink_ssr_wait_cleanup_done() - Get the value of the
+ *                                 notifications_successful flag.
+ * @timeout_multiplier: timeout multiplier for waiting on all processors
+ *
+ * Return: True if cleanup_done received from all processors, false otherwise
+ */
+bool glink_ssr_wait_cleanup_done(unsigned int ssr_timeout_multiplier)
+{
+	int wait_ret =
+		wait_for_completion_timeout(&notifications_successful_complete,
+			ssr_timeout_multiplier * GLINK_SSR_REPLY_TIMEOUT);
+	reinit_completion(&notifications_successful_complete);
+
+	if (!notifications_successful || !wait_ret)
+		return false;
+	else
+		return true;
+}
+EXPORT_SYMBOL(glink_ssr_wait_cleanup_done);
+
+/**
+ * glink_ssr_probe() - G-Link SSR platform device probe function
+ * @pdev:	Pointer to the platform device structure
+ *
+ * This function parses DT for information on which subsystems should be
+ * notified when each subsystem undergoes SSR. The global subsystem information
+ * list is built from this information. In addition, SSR notifier callback
+ * functions are registered here for the necessary subsystems.
+ *
+ * Return: 0 on success, standard error codes otherwise
+ */
+static int glink_ssr_probe(struct platform_device *pdev)
+{
+	struct device_node *node;
+	struct device_node *phandle_node;
+	struct restart_notifier_block *nb;
+	struct subsys_info *ss_info;
+	struct subsys_info_leaf *ss_info_leaf;
+	struct glink_link_info *link_info;
+	char *key;
+	const char *edge;
+	const char *subsys_name;
+	const char *xprt;
+	void *handle;
+	void *link_state_handle;
+	int phandle_index = 0;
+	int ret = 0;
+
+	if (!pdev) {
+		GLINK_SSR_ERR("<SSR> %s: pdev is NULL\n", __func__);
+		ret = -EINVAL;
+		goto pdev_null_or_ss_info_alloc_failed;
+	}
+
+	node = pdev->dev.of_node;
+
+	ss_info = kmalloc(sizeof(*ss_info), GFP_KERNEL);
+	if (!ss_info) {
+		GLINK_SSR_ERR("<SSR> %s: %s\n", __func__,
+			"Could not allocate subsystem info structure\n");
+		ret = -ENOMEM;
+		goto pdev_null_or_ss_info_alloc_failed;
+	}
+	INIT_LIST_HEAD(&ss_info->notify_list);
+
+	link_info = kmalloc(sizeof(struct glink_link_info),
+			GFP_KERNEL);
+	if (!link_info) {
+		GLINK_SSR_ERR("<SSR> %s: %s\n", __func__,
+			"Could not allocate link info structure\n");
+		ret = -ENOMEM;
+		goto link_info_alloc_failed;
+	}
+	ss_info->link_info = link_info;
+
+	key = "label";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name) {
+		GLINK_SSR_ERR("<SSR> %s: missing key %s\n", __func__, key);
+		ret = -ENODEV;
+		goto label_or_edge_missing;
+	}
+
+	key = "qcom,edge";
+	edge = of_get_property(node, key, NULL);
+	if (!edge) {
+		GLINK_SSR_ERR("<SSR> %s: missing key %s\n", __func__, key);
+		ret = -ENODEV;
+		goto label_or_edge_missing;
+	}
+
+	key = "qcom,xprt";
+	xprt = of_get_property(node, key, NULL);
+	if (!xprt)
+		GLINK_SSR_LOG(
+			"%s %s: no transport present for subys/edge %s/%s\n",
+			"<SSR>", __func__, subsys_name, edge);
+
+	ss_info->ssr_name = subsys_name;
+	ss_info->edge = edge;
+	ss_info->xprt = xprt;
+	ss_info->notify_list_len = 0;
+	ss_info->link_info->transport = xprt;
+	ss_info->link_info->edge = edge;
+	ss_info->link_info->glink_link_state_notif_cb = glink_ssr_link_state_cb;
+	ss_info->link_up = false;
+	ss_info->handle = NULL;
+	ss_info->link_state_handle = NULL;
+	ss_info->cb_data = NULL;
+	spin_lock_init(&ss_info->link_up_lock);
+
+	nb = kmalloc(sizeof(struct restart_notifier_block), GFP_KERNEL);
+	if (!nb) {
+		GLINK_SSR_ERR("<SSR> %s: Could not allocate notifier block\n",
+				__func__);
+		ret = -ENOMEM;
+		goto label_or_edge_missing;
+	}
+
+	nb->subsystem = subsys_name;
+	nb->nb.notifier_call = glink_ssr_restart_notifier_cb;
+
+	handle = subsys_notif_register_notifier(nb->subsystem, &nb->nb);
+	if (IS_ERR_OR_NULL(handle)) {
+		GLINK_SSR_ERR("<SSR> %s: Could not register SSR notifier cb\n",
+				__func__);
+		ret = -EINVAL;
+		goto nb_registration_fail;
+	}
+
+	key = "qcom,notify-edges";
+	while (true) {
+		phandle_node = of_parse_phandle(node, key, phandle_index++);
+		if (!phandle_node && phandle_index == 0) {
+			GLINK_SSR_ERR(
+				"<SSR> %s: qcom,notify-edges is not present",
+				__func__);
+			ret = -ENODEV;
+			goto notify_edges_not_present;
+		}
+
+		if (!phandle_node)
+			break;
+
+		ss_info_leaf = kmalloc(sizeof(struct subsys_info_leaf),
+				GFP_KERNEL);
+		if (!ss_info_leaf) {
+			GLINK_SSR_ERR(
+				"<SSR> %s: Could not allocate subsys_info_leaf\n",
+				__func__);
+			ret = -ENOMEM;
+			goto notify_edges_not_present;
+		}
+
+		subsys_name = of_get_property(phandle_node, "label", NULL);
+		edge = of_get_property(phandle_node, "qcom,edge", NULL);
+		xprt = of_get_property(phandle_node, "qcom,xprt", NULL);
+
+		of_node_put(phandle_node);
+
+		if (!subsys_name || !edge) {
+			GLINK_SSR_ERR(
+				"%s, %s: Found DT node with invalid data!\n",
+				"<SSR>", __func__);
+			ret = -EINVAL;
+			goto invalid_dt_node;
+		}
+
+		ss_info_leaf->ssr_name = subsys_name;
+		ss_info_leaf->edge = edge;
+		ss_info_leaf->xprt = xprt;
+		ss_info_leaf->restarted = false;
+		list_add_tail(&ss_info_leaf->notify_list_node,
+				&ss_info->notify_list);
+		ss_info->notify_list_len++;
+	}
+
+	list_add_tail(&ss_info->subsystem_list_node, &subsystem_list);
+
+	link_state_handle = glink_register_link_state_cb(ss_info->link_info,
+			NULL);
+	if (IS_ERR_OR_NULL(link_state_handle)) {
+		GLINK_SSR_ERR("<SSR> %s: Could not register link state cb\n",
+				__func__);
+		ret = PTR_ERR(link_state_handle);
+		goto link_state_register_fail;
+	}
+	ss_info->link_state_handle = link_state_handle;
+
+	return 0;
+
+link_state_register_fail:
+	list_del(&ss_info->subsystem_list_node);
+invalid_dt_node:
+	kfree(ss_info_leaf);
+notify_edges_not_present:
+	subsys_notif_unregister_notifier(handle, &nb->nb);
+	delete_ss_info_notify_list(ss_info);
+nb_registration_fail:
+	kfree(nb);
+label_or_edge_missing:
+	kfree(link_info);
+link_info_alloc_failed:
+	kfree(ss_info);
+pdev_null_or_ss_info_alloc_failed:
+	return ret;
+}
+
+static const struct of_device_id match_table[] = {
+	{ .compatible = "qcom,glink_ssr" },
+	{},
+};
+
+static struct platform_driver glink_ssr_driver = {
+	.probe = glink_ssr_probe,
+	.driver = {
+		.name = "msm_glink_ssr",
+		.owner = THIS_MODULE,
+		.of_match_table = match_table,
+	},
+};
+
+static int glink_ssr_init(void)
+{
+	int ret;
+
+	glink_ssr_log_ctx =
+		ipc_log_context_create(NUM_LOG_PAGES, "glink_ssr", 0);
+	glink_ssr_wq = create_singlethread_workqueue("glink_ssr_wq");
+	ret = platform_driver_register(&glink_ssr_driver);
+	if (ret)
+		GLINK_SSR_ERR("<SSR> %s: %s ret: %d\n", __func__,
+				"glink_ssr driver registration failed", ret);
+
+	notifications_successful = false;
+	init_completion(&notifications_successful_complete);
+	return 0;
+}
+
+module_init(glink_ssr_init);
+
+MODULE_DESCRIPTION("MSM Generic Link (G-Link) SSR Module");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/glink_xprt_if.h b/drivers/soc/qcom/glink_xprt_if.h
new file mode 100644
index 0000000..f4d5a3b
--- /dev/null
+++ b/drivers/soc/qcom/glink_xprt_if.h
@@ -0,0 +1,204 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_XPRT_IF_H_
+#define _SOC_QCOM_GLINK_XPRT_IF_H_
+
+#include <linux/bitops.h>
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct glink_core_xprt_ctx;
+struct glink_core_if;
+struct channel_ctx;
+struct glink_core_rx_intent;
+
+enum buf_type {
+	LINEAR = 0,
+	VECTOR,
+};
+
+enum xprt_ids {
+	SMEM_XPRT_ID = 100,
+	SPIV2_XPRT_ID = SMEM_XPRT_ID,
+	SMD_TRANS_XPRT_ID = 200,
+	LLOOP_XPRT_ID = 300,
+	MOCK_XPRT_HIGH_ID = 390,
+	MOCK_XPRT_ID = 400,
+	MOCK_XPRT_LOW_ID = 410,
+};
+
+#define GCAP_SIGNALS		BIT(0)
+#define GCAP_INTENTLESS		BIT(1)
+#define GCAP_TRACER_PKT		BIT(2)
+#define GCAP_AUTO_QUEUE_RX_INT	BIT(3)
+
+/**
+ * struct glink_core_tx_pkt - Transmit Packet information
+ * @list_done:		Index to the channel's transmit queue.
+ * @list_done:		Index to the channel's acknowledgment queue.
+ * @pkt_priv:		Private information specific to the packet.
+ * @data:		Pointer to the buffer containing the data.
+ * @riid:		Remote receive intent used to transmit the packet.
+ * @rcid:		Remote channel receiving the packet.
+ * @size:		Total size of the data in the packet.
+ * @tx_len:		Data length to transmit in the current transmit slot.
+ * @size_remaining:	Remaining size of the data in the packet.
+ * @intent_size:	Receive intent size queued by the remote side.
+ * @tracer_pkt:		Flag to indicate if the packet is a tracer packet.
+ * @iovec:		Pointer to the vector buffer packet.
+ * @vprovider:		Packet-specific virtual buffer provider function.
+ * @pprovider:		Packet-specific physical buffer provider function.
+ * @cookie:		Transport-specific cookie
+ * @pkt_ref:		Active references to the packet.
+ */
+struct glink_core_tx_pkt {
+	struct list_head list_node;
+	struct list_head list_done;
+	const void *pkt_priv;
+	const void *data;
+	uint32_t riid;
+	uint32_t rcid;
+	uint32_t size;
+	uint32_t tx_len;
+	uint32_t size_remaining;
+	size_t intent_size;
+	bool tracer_pkt;
+	void *iovec;
+	void * (*vprovider)(void *iovec, size_t offset, size_t *size);
+	void * (*pprovider)(void *iovec, size_t offset, size_t *size);
+	void *cookie;
+	struct rwref_lock pkt_ref;
+};
+
+/**
+ * Note - each call to register the interface must pass a unique
+ * instance of this data.
+ */
+struct glink_transport_if {
+	/* Negotiation */
+	void (*tx_cmd_version)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+	void (*tx_cmd_version_ack)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+	uint32_t (*set_version)(struct glink_transport_if *if_ptr,
+			uint32_t version,
+			uint32_t features);
+
+	/* channel state */
+	int (*tx_cmd_ch_open)(struct glink_transport_if *if_ptr, uint32_t lcid,
+			const char *name, uint16_t req_xprt);
+	int (*tx_cmd_ch_close)(struct glink_transport_if *if_ptr,
+			uint32_t lcid);
+	void (*tx_cmd_ch_remote_open_ack)(struct glink_transport_if *if_ptr,
+			uint32_t rcid, uint16_t xprt_resp);
+	void (*tx_cmd_ch_remote_close_ack)(struct glink_transport_if *if_ptr,
+			uint32_t rcid);
+	int (*ssr)(struct glink_transport_if *if_ptr);
+
+	/* channel data */
+	int (*allocate_rx_intent)(struct glink_transport_if *if_ptr,
+				  size_t size,
+				  struct glink_core_rx_intent *intent);
+	int (*deallocate_rx_intent)(struct glink_transport_if *if_ptr,
+				    struct glink_core_rx_intent *intent);
+	/* Optional */
+	int (*reuse_rx_intent)(struct glink_transport_if *if_ptr,
+			       struct glink_core_rx_intent *intent);
+
+	int (*tx_cmd_local_rx_intent)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, size_t size, uint32_t liid);
+	void (*tx_cmd_local_rx_done)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, uint32_t liid, bool reuse);
+	int (*tx)(struct glink_transport_if *if_ptr, uint32_t lcid,
+			struct glink_core_tx_pkt *pctx);
+	int (*tx_cmd_rx_intent_req)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, size_t size);
+	int (*tx_cmd_remote_rx_intent_req_ack)(
+			struct glink_transport_if *if_ptr,
+			uint32_t lcid, bool granted);
+	int (*tx_cmd_set_sigs)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, uint32_t sigs);
+
+	/* Optional.  If NULL at xprt registration, dummies will be used */
+	int (*poll)(struct glink_transport_if *if_ptr, uint32_t lcid);
+	int (*mask_rx_irq)(struct glink_transport_if *if_ptr, uint32_t lcid,
+			bool mask, void *pstruct);
+	int (*wait_link_down)(struct glink_transport_if *if_ptr);
+	int (*tx_cmd_tracer_pkt)(struct glink_transport_if *if_ptr,
+			uint32_t lcid, struct glink_core_tx_pkt *pctx);
+	unsigned long (*get_power_vote_ramp_time)(
+			struct glink_transport_if *if_ptr, uint32_t state);
+	int (*power_vote)(struct glink_transport_if *if_ptr, uint32_t state);
+	int (*power_unvote)(struct glink_transport_if *if_ptr);
+	/*
+	 * Keep data pointers at the end of the structure after all function
+	 * pointer to allow for in-place initialization.
+	 */
+
+	/* private pointer for core */
+	struct glink_core_xprt_ctx *glink_core_priv;
+
+	/* core pointer (set during transport registration) */
+	struct glink_core_if *glink_core_if_ptr;
+};
+
+#ifdef CONFIG_MSM_GLINK
+
+/**
+ * get_tx_vaddr() - Get the virtual address from which the tx has to be done
+ * @pctx:	transmit packet context.
+ * @offset:	offset into the packet.
+ * @tx_size:	pointer to hold the length of the contiguous buffer
+ *              space.
+ *
+ * Return:	Address from which the tx has to be done.
+ */
+static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset,
+				 size_t *tx_size)
+{
+	void *pdata;
+
+	if (pctx->vprovider) {
+		return pctx->vprovider((void *)pctx->iovec, offset, tx_size);
+	} else if (pctx->pprovider) {
+		pdata = pctx->pprovider((void *)pctx->iovec, offset, tx_size);
+		return phys_to_virt((unsigned long)pdata);
+	}
+	return NULL;
+}
+
+/**
+ * glink_xprt_name_to_id() - convert transport name to id
+ * @name:	Name of the transport.
+ * @id:		Assigned id.
+ *
+ * Return: 0 on success or standard Linux error code.
+ */
+int glink_xprt_name_to_id(const char *name, uint16_t *id);
+
+
+#else /* CONFIG_MSM_GLINK */
+static inline void *get_tx_vaddr(struct glink_core_tx_pkt *pctx, size_t offset,
+				 size_t *tx_size)
+{
+	return NULL;
+}
+
+static inline int glink_xprt_name_to_id(const char *name, uint16_t *id)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_GLINK */
+#endif /* _SOC_QCOM_GLINK_XPRT_IF_H_ */
diff --git a/drivers/soc/qcom/ipc_router_glink_xprt.c b/drivers/soc/qcom/ipc_router_glink_xprt.c
new file mode 100644
index 0000000..9a9d73b
--- /dev/null
+++ b/drivers/soc/qcom/ipc_router_glink_xprt.c
@@ -0,0 +1,871 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER GLINK XPRT module.
+ */
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <soc/qcom/glink.h>
+#include <soc/qcom/subsystem_restart.h>
+
+static int ipc_router_glink_xprt_debug_mask;
+module_param_named(debug_mask, ipc_router_glink_xprt_debug_mask,
+		   int, 0664);
+
+#if defined(DEBUG)
+#define D(x...) do { \
+if (ipc_router_glink_xprt_debug_mask) \
+	pr_info(x); \
+} while (0)
+#else
+#define D(x...) do { } while (0)
+#endif
+
+#define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
+#define IPC_RTR_XPRT_NAME_LEN (2 * GLINK_NAME_SIZE)
+#define PIL_SUBSYSTEM_NAME_LEN 32
+#define DEFAULT_NUM_INTENTS 5
+#define DEFAULT_RX_INTENT_SIZE 2048
+/**
+ * ipc_router_glink_xprt - IPC Router's GLINK XPRT structure
+ * @list: IPC router's GLINK XPRT list.
+ * @ch_name: GLink Channel Name.
+ * @edge: Edge between the local node and the remote node.
+ * @transport: Physical Transport Name as identified by Glink.
+ * @pil_edge: Edge name understood by PIL.
+ * @ipc_rtr_xprt_name: XPRT Name to be registered with IPC Router.
+ * @xprt: IPC Router XPRT structure to contain XPRT specific info.
+ * @ch_hndl: Opaque Channel handle returned by GLink.
+ * @xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @ss_reset_rwlock: Read-Write lock to protect access to the ss_reset flag.
+ * @ss_reset: flag used to check SSR state.
+ * @pil: pil handle to the remote subsystem
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ *                      by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ * @disable_pil_loading: Disable PIL Loading of the subsystem.
+ */
+struct ipc_router_glink_xprt {
+	struct list_head list;
+	char ch_name[GLINK_NAME_SIZE];
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	char pil_edge[PIL_SUBSYSTEM_NAME_LEN];
+	char ipc_rtr_xprt_name[IPC_RTR_XPRT_NAME_LEN];
+	struct msm_ipc_router_xprt xprt;
+	void *ch_hndl;
+	struct workqueue_struct *xprt_wq;
+	struct rw_semaphore ss_reset_rwlock;
+	int ss_reset;
+	void *pil;
+	struct completion sft_close_complete;
+	unsigned int xprt_version;
+	unsigned int xprt_option;
+	bool disable_pil_loading;
+};
+
+struct ipc_router_glink_xprt_work {
+	struct ipc_router_glink_xprt *glink_xprtp;
+	struct work_struct work;
+};
+
+struct queue_rx_intent_work {
+	struct ipc_router_glink_xprt *glink_xprtp;
+	size_t intent_size;
+	struct work_struct work;
+};
+
+struct read_work {
+	struct ipc_router_glink_xprt *glink_xprtp;
+	void *iovec;
+	size_t iovec_size;
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size);
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size);
+	struct work_struct work;
+};
+
+static void glink_xprt_read_data(struct work_struct *work);
+static void glink_xprt_open_event(struct work_struct *work);
+static void glink_xprt_close_event(struct work_struct *work);
+
+/**
+ * ipc_router_glink_xprt_config - Config. Info. of each GLINK XPRT
+ * @ch_name:		Name of the GLINK endpoint exported by GLINK driver.
+ * @edge:		Edge between the local node and remote node.
+ * @transport:		Physical Transport Name as identified by GLINK.
+ * @pil_edge:		Edge name understood by PIL.
+ * @ipc_rtr_xprt_name:	XPRT Name to be registered with IPC Router.
+ * @link_id:		Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version:	IPC Router header version supported by this XPRT.
+ * @disable_pil_loading:Disable PIL Loading of the subsystem.
+ */
+struct ipc_router_glink_xprt_config {
+	char ch_name[GLINK_NAME_SIZE];
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	char ipc_rtr_xprt_name[IPC_RTR_XPRT_NAME_LEN];
+	char pil_edge[PIL_SUBSYSTEM_NAME_LEN];
+	uint32_t link_id;
+	unsigned int xprt_version;
+	unsigned int xprt_option;
+	bool disable_pil_loading;
+};
+
+#define MODULE_NAME "ipc_router_glink_xprt"
+static DEFINE_MUTEX(glink_xprt_list_lock_lha1);
+static LIST_HEAD(glink_xprt_list);
+
+static struct workqueue_struct *glink_xprt_wq;
+
+static void glink_xprt_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				     void *priv);
+static struct glink_link_info glink_xprt_link_info = {
+			NULL, NULL, glink_xprt_link_state_cb};
+static void *glink_xprt_link_state_notif_handle;
+
+struct xprt_state_work_info {
+	char edge[GLINK_NAME_SIZE];
+	char transport[GLINK_NAME_SIZE];
+	uint32_t link_state;
+	struct work_struct work;
+};
+
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+	(((type)~0 - (a)) < (b) ? true : false)
+
+static void *glink_xprt_vbuf_provider(void *iovec, size_t offset,
+				      size_t *buf_size)
+{
+	struct rr_packet *pkt = (struct rr_packet *)iovec;
+	struct sk_buff *skb;
+	size_t temp_size = 0;
+
+	if (unlikely(!pkt || !buf_size))
+		return NULL;
+
+	*buf_size = 0;
+	skb_queue_walk(pkt->pkt_fragment_q, skb) {
+		if (unlikely(OVERFLOW_ADD_UNSIGNED(size_t, temp_size,
+						   skb->len)))
+			break;
+
+		temp_size += skb->len;
+		if (offset >= temp_size)
+			continue;
+
+		*buf_size = temp_size - offset;
+		return (void *)skb->data + skb->len - *buf_size;
+	}
+	return NULL;
+}
+
+/**
+ * ipc_router_glink_xprt_set_version() - Set the IPC Router version in transport
+ * @xprt:	Reference to the transport structure.
+ * @version:	The version to be set in transport.
+ */
+static void ipc_router_glink_xprt_set_version(
+	struct msm_ipc_router_xprt *xprt, unsigned int version)
+{
+	struct ipc_router_glink_xprt *glink_xprtp;
+
+	if (!xprt)
+		return;
+	glink_xprtp = container_of(xprt, struct ipc_router_glink_xprt, xprt);
+	glink_xprtp->xprt_version = version;
+}
+
+static int ipc_router_glink_xprt_get_version(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_glink_xprt *glink_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	glink_xprtp = container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	return (int)glink_xprtp->xprt_version;
+}
+
+static int ipc_router_glink_xprt_get_option(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_glink_xprt *glink_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	glink_xprtp = container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	return (int)glink_xprtp->xprt_option;
+}
+
+static int ipc_router_glink_xprt_write(void *data, uint32_t len,
+				       struct msm_ipc_router_xprt *xprt)
+{
+	struct rr_packet *pkt = (struct rr_packet *)data;
+	struct rr_packet *temp_pkt;
+	int ret;
+	struct ipc_router_glink_xprt *glink_xprtp =
+		container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	if (!pkt)
+		return -EINVAL;
+
+	if (!len || pkt->length != len)
+		return -EINVAL;
+
+	temp_pkt = clone_pkt(pkt);
+	if (!temp_pkt) {
+		IPC_RTR_ERR("%s: Error cloning packet while tx\n", __func__);
+		return -ENOMEM;
+	}
+
+	down_read(&glink_xprtp->ss_reset_rwlock);
+	if (glink_xprtp->ss_reset) {
+		release_pkt(temp_pkt);
+		IPC_RTR_ERR("%s: %s chnl reset\n", __func__, xprt->name);
+		ret = -ENETRESET;
+		goto out_write_data;
+	}
+
+	D("%s: Ready to write %d bytes\n", __func__, len);
+	ret = glink_txv(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+			(void *)temp_pkt, len, glink_xprt_vbuf_provider,
+			NULL, true);
+	if (ret < 0) {
+		release_pkt(temp_pkt);
+		IPC_RTR_ERR("%s: Error %d while tx\n", __func__, ret);
+		goto out_write_data;
+	}
+	ret = len;
+	D("%s:%s: TX Complete for %d bytes @ %p\n", __func__,
+	  glink_xprtp->ipc_rtr_xprt_name, len, temp_pkt);
+
+out_write_data:
+	up_read(&glink_xprtp->ss_reset_rwlock);
+	return ret;
+}
+
+static int ipc_router_glink_xprt_close(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_glink_xprt *glink_xprtp =
+		container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	down_write(&glink_xprtp->ss_reset_rwlock);
+	glink_xprtp->ss_reset = 1;
+	up_write(&glink_xprtp->ss_reset_rwlock);
+	return glink_close(glink_xprtp->ch_hndl);
+}
+
+static void glink_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_glink_xprt *glink_xprtp =
+		container_of(xprt, struct ipc_router_glink_xprt, xprt);
+
+	complete_all(&glink_xprtp->sft_close_complete);
+}
+
+static struct rr_packet *glink_xprt_copy_data(struct read_work *rx_work)
+{
+	void *buf, *pbuf, *dest_buf;
+	size_t buf_size;
+	struct rr_packet *pkt;
+	struct sk_buff *skb;
+
+	pkt = create_pkt(NULL);
+	if (!pkt) {
+		IPC_RTR_ERR("%s: Couldn't alloc rr_packet\n", __func__);
+		return NULL;
+	}
+
+	do {
+		buf_size = 0;
+		if (rx_work->vbuf_provider) {
+			buf = rx_work->vbuf_provider(rx_work->iovec,
+						pkt->length, &buf_size);
+		} else {
+			pbuf = rx_work->pbuf_provider(rx_work->iovec,
+						pkt->length, &buf_size);
+			buf = phys_to_virt((unsigned long)pbuf);
+		}
+		if (!buf_size || !buf)
+			break;
+
+		skb = alloc_skb(buf_size, GFP_KERNEL);
+		if (!skb) {
+			IPC_RTR_ERR("%s: Couldn't alloc skb of size %zu\n",
+				    __func__, buf_size);
+			release_pkt(pkt);
+			return NULL;
+		}
+		dest_buf = skb_put(skb, buf_size);
+		memcpy(dest_buf, buf, buf_size);
+		skb_queue_tail(pkt->pkt_fragment_q, skb);
+		pkt->length += buf_size;
+	} while (buf && buf_size);
+	return pkt;
+}
+
+static void glink_xprt_read_data(struct work_struct *work)
+{
+	struct rr_packet *pkt;
+	struct read_work *rx_work =
+		container_of(work, struct read_work, work);
+	struct ipc_router_glink_xprt *glink_xprtp = rx_work->glink_xprtp;
+	bool reuse_intent = false;
+
+	down_read(&glink_xprtp->ss_reset_rwlock);
+	if (glink_xprtp->ss_reset) {
+		IPC_RTR_ERR("%s: %s channel reset\n",
+			__func__, glink_xprtp->xprt.name);
+		goto out_read_data;
+	}
+
+	D("%s %zu bytes @ %p\n", __func__, rx_work->iovec_size, rx_work->iovec);
+	if (rx_work->iovec_size <= DEFAULT_RX_INTENT_SIZE)
+		reuse_intent = true;
+
+	pkt = glink_xprt_copy_data(rx_work);
+	if (!pkt) {
+		IPC_RTR_ERR("%s: Error copying data\n", __func__);
+		goto out_read_data;
+	}
+
+	msm_ipc_router_xprt_notify(&glink_xprtp->xprt,
+				   IPC_ROUTER_XPRT_EVENT_DATA, pkt);
+	release_pkt(pkt);
+out_read_data:
+	glink_rx_done(glink_xprtp->ch_hndl, rx_work->iovec, reuse_intent);
+	kfree(rx_work);
+	up_read(&glink_xprtp->ss_reset_rwlock);
+}
+
+static void glink_xprt_open_event(struct work_struct *work)
+{
+	struct ipc_router_glink_xprt_work *xprt_work =
+		container_of(work, struct ipc_router_glink_xprt_work, work);
+	struct ipc_router_glink_xprt *glink_xprtp = xprt_work->glink_xprtp;
+	int i;
+
+	msm_ipc_router_xprt_notify(&glink_xprtp->xprt,
+				IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+	D("%s: Notified IPC Router of %s OPEN\n",
+	  __func__, glink_xprtp->xprt.name);
+	for (i = 0; i < DEFAULT_NUM_INTENTS; i++)
+		glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+				      DEFAULT_RX_INTENT_SIZE);
+	kfree(xprt_work);
+}
+
+static void glink_xprt_close_event(struct work_struct *work)
+{
+	struct ipc_router_glink_xprt_work *xprt_work =
+		container_of(work, struct ipc_router_glink_xprt_work, work);
+	struct ipc_router_glink_xprt *glink_xprtp = xprt_work->glink_xprtp;
+
+	init_completion(&glink_xprtp->sft_close_complete);
+	msm_ipc_router_xprt_notify(&glink_xprtp->xprt,
+				IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+	D("%s: Notified IPC Router of %s CLOSE\n",
+	   __func__, glink_xprtp->xprt.name);
+	wait_for_completion(&glink_xprtp->sft_close_complete);
+	kfree(xprt_work);
+}
+
+static void glink_xprt_qrx_intent_worker(struct work_struct *work)
+{
+	struct queue_rx_intent_work *qrx_intent_work =
+		container_of(work, struct queue_rx_intent_work, work);
+	struct ipc_router_glink_xprt *glink_xprtp =
+					qrx_intent_work->glink_xprtp;
+
+	glink_queue_rx_intent(glink_xprtp->ch_hndl, (void *)glink_xprtp,
+			      qrx_intent_work->intent_size);
+	kfree(qrx_intent_work);
+}
+
+static void msm_ipc_unload_subsystem(struct ipc_router_glink_xprt *glink_xprtp)
+{
+	if (glink_xprtp->pil) {
+		subsystem_put(glink_xprtp->pil);
+		glink_xprtp->pil = NULL;
+	}
+}
+
+static void *msm_ipc_load_subsystem(struct ipc_router_glink_xprt *glink_xprtp)
+{
+	void *pil = NULL;
+
+	if (!glink_xprtp->disable_pil_loading) {
+		pil = subsystem_get(glink_xprtp->pil_edge);
+		if (IS_ERR(pil)) {
+			pr_err("%s: Failed to load %s err = [%ld]\n",
+				__func__, glink_xprtp->pil_edge, PTR_ERR(pil));
+			pil = NULL;
+		}
+	}
+	return pil;
+}
+
+static void glink_xprt_notify_rxv(void *handle, const void *priv,
+	const void *pkt_priv, void *ptr, size_t size,
+	void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size))
+{
+	struct ipc_router_glink_xprt *glink_xprtp =
+		(struct ipc_router_glink_xprt *)priv;
+	struct read_work *rx_work;
+
+	rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
+	if (!rx_work) {
+		IPC_RTR_ERR("%s: couldn't allocate read_work\n", __func__);
+		glink_rx_done(glink_xprtp->ch_hndl, ptr, true);
+		return;
+	}
+
+	rx_work->glink_xprtp = glink_xprtp;
+	rx_work->iovec = ptr;
+	rx_work->iovec_size = size;
+	rx_work->vbuf_provider = vbuf_provider;
+	rx_work->pbuf_provider = pbuf_provider;
+	INIT_WORK(&rx_work->work, glink_xprt_read_data);
+	queue_work(glink_xprtp->xprt_wq, &rx_work->work);
+}
+
+static void glink_xprt_notify_tx_done(void *handle, const void *priv,
+				      const void *pkt_priv, const void *ptr)
+{
+	struct ipc_router_glink_xprt *glink_xprtp =
+		(struct ipc_router_glink_xprt *)priv;
+	struct rr_packet *temp_pkt = (struct rr_packet *)ptr;
+
+	D("%s:%s: @ %p\n", __func__, glink_xprtp->ipc_rtr_xprt_name, ptr);
+	release_pkt(temp_pkt);
+}
+
+static bool glink_xprt_notify_rx_intent_req(void *handle, const void *priv,
+					    size_t sz)
+{
+	struct queue_rx_intent_work *qrx_intent_work;
+	struct ipc_router_glink_xprt *glink_xprtp =
+		(struct ipc_router_glink_xprt *)priv;
+
+	if (sz <= DEFAULT_RX_INTENT_SIZE)
+		return true;
+
+	qrx_intent_work = kmalloc(sizeof(struct queue_rx_intent_work),
+				  GFP_ATOMIC);
+	if (!qrx_intent_work) {
+		IPC_RTR_ERR("%s: Couldn't queue rx_intent of %zu bytes\n",
+			    __func__, sz);
+		return false;
+	}
+	qrx_intent_work->glink_xprtp = glink_xprtp;
+	qrx_intent_work->intent_size = sz;
+	INIT_WORK(&qrx_intent_work->work, glink_xprt_qrx_intent_worker);
+	queue_work(glink_xprtp->xprt_wq, &qrx_intent_work->work);
+	return true;
+}
+
+static void glink_xprt_notify_state(void *handle, const void *priv,
+				    unsigned int event)
+{
+	struct ipc_router_glink_xprt_work *xprt_work;
+	struct ipc_router_glink_xprt *glink_xprtp =
+		(struct ipc_router_glink_xprt *)priv;
+
+	D("%s: %s:%s - State %d\n",
+	  __func__, glink_xprtp->edge, glink_xprtp->transport, event);
+	switch (event) {
+	case GLINK_CONNECTED:
+		if (IS_ERR_OR_NULL(glink_xprtp->ch_hndl))
+			glink_xprtp->ch_hndl = handle;
+		down_write(&glink_xprtp->ss_reset_rwlock);
+		glink_xprtp->ss_reset = 0;
+		up_write(&glink_xprtp->ss_reset_rwlock);
+		xprt_work = kmalloc(sizeof(struct ipc_router_glink_xprt_work),
+				    GFP_ATOMIC);
+		if (!xprt_work) {
+			IPC_RTR_ERR(
+			"%s: Couldn't notify %d event to IPC Router\n",
+				__func__, event);
+			return;
+		}
+		xprt_work->glink_xprtp = glink_xprtp;
+		INIT_WORK(&xprt_work->work, glink_xprt_open_event);
+		queue_work(glink_xprtp->xprt_wq, &xprt_work->work);
+		break;
+
+	case GLINK_LOCAL_DISCONNECTED:
+	case GLINK_REMOTE_DISCONNECTED:
+		down_write(&glink_xprtp->ss_reset_rwlock);
+		if (glink_xprtp->ss_reset) {
+			up_write(&glink_xprtp->ss_reset_rwlock);
+			break;
+		}
+		glink_xprtp->ss_reset = 1;
+		up_write(&glink_xprtp->ss_reset_rwlock);
+		xprt_work = kmalloc(sizeof(struct ipc_router_glink_xprt_work),
+				    GFP_ATOMIC);
+		if (!xprt_work) {
+			IPC_RTR_ERR(
+			"%s: Couldn't notify %d event to IPC Router\n",
+				__func__, event);
+			return;
+		}
+		xprt_work->glink_xprtp = glink_xprtp;
+		INIT_WORK(&xprt_work->work, glink_xprt_close_event);
+		queue_work(glink_xprtp->xprt_wq, &xprt_work->work);
+		break;
+	}
+}
+
+static void glink_xprt_ch_open(struct ipc_router_glink_xprt *glink_xprtp)
+{
+	struct glink_open_config open_cfg = {0};
+
+	if (!IS_ERR_OR_NULL(glink_xprtp->ch_hndl))
+		return;
+
+	open_cfg.transport = glink_xprtp->transport;
+	open_cfg.options |= GLINK_OPT_INITIAL_XPORT;
+	open_cfg.edge = glink_xprtp->edge;
+	open_cfg.name = glink_xprtp->ch_name;
+	open_cfg.notify_rx = NULL;
+	open_cfg.notify_rxv = glink_xprt_notify_rxv;
+	open_cfg.notify_tx_done = glink_xprt_notify_tx_done;
+	open_cfg.notify_state = glink_xprt_notify_state;
+	open_cfg.notify_rx_intent_req = glink_xprt_notify_rx_intent_req;
+	open_cfg.priv = glink_xprtp;
+
+	glink_xprtp->pil = msm_ipc_load_subsystem(glink_xprtp);
+	glink_xprtp->ch_hndl =  glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(glink_xprtp->ch_hndl)) {
+		IPC_RTR_ERR("%s:%s:%s %s: unable to open channel\n",
+			    open_cfg.transport, open_cfg.edge,
+			    open_cfg.name, __func__);
+			msm_ipc_unload_subsystem(glink_xprtp);
+	}
+}
+
+/**
+ * glink_xprt_link_state_worker() - Function to handle link state updates
+ * @work: Pointer to the work item in the link_state_work_info.
+ *
+ * This worker function is scheduled when there is a link state update. Since
+ * the loopback server registers for all transports, it receives all link state
+ * updates about all transports that get registered in the system.
+ */
+static void glink_xprt_link_state_worker(struct work_struct *work)
+{
+	struct xprt_state_work_info *xs_info =
+		container_of(work, struct xprt_state_work_info, work);
+	struct ipc_router_glink_xprt *glink_xprtp;
+
+	if (xs_info->link_state == GLINK_LINK_STATE_UP) {
+		D("%s: LINK_STATE_UP %s:%s\n",
+		  __func__, xs_info->edge, xs_info->transport);
+		mutex_lock(&glink_xprt_list_lock_lha1);
+		list_for_each_entry(glink_xprtp, &glink_xprt_list, list) {
+			if (strcmp(glink_xprtp->edge, xs_info->edge) ||
+			    strcmp(glink_xprtp->transport, xs_info->transport))
+				continue;
+			glink_xprt_ch_open(glink_xprtp);
+		}
+		mutex_unlock(&glink_xprt_list_lock_lha1);
+	} else if (xs_info->link_state == GLINK_LINK_STATE_DOWN) {
+		D("%s: LINK_STATE_DOWN %s:%s\n",
+		  __func__, xs_info->edge, xs_info->transport);
+		mutex_lock(&glink_xprt_list_lock_lha1);
+		list_for_each_entry(glink_xprtp, &glink_xprt_list, list) {
+			if (strcmp(glink_xprtp->edge, xs_info->edge) ||
+			    strcmp(glink_xprtp->transport, xs_info->transport)
+			    || IS_ERR_OR_NULL(glink_xprtp->ch_hndl))
+				continue;
+			glink_close(glink_xprtp->ch_hndl);
+			glink_xprtp->ch_hndl = NULL;
+			msm_ipc_unload_subsystem(glink_xprtp);
+		}
+		mutex_unlock(&glink_xprt_list_lock_lha1);
+
+	}
+	kfree(xs_info);
+}
+
+/**
+ * glink_xprt_link_state_cb() - Callback to receive link state updates
+ * @cb_info: Information containing link & its state.
+ * @priv: Private data passed during the link state registration.
+ *
+ * This function is called by the GLINK core to notify the IPC Router
+ * regarding the link state updates. This function is registered with the
+ * GLINK core by IPC Router during glink_register_link_state_cb().
+ */
+static void glink_xprt_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct xprt_state_work_info *xs_info;
+
+	if (!cb_info)
+		return;
+
+	D("%s: %s:%s\n", __func__, cb_info->edge, cb_info->transport);
+	xs_info = kmalloc(sizeof(*xs_info), GFP_KERNEL);
+	if (!xs_info) {
+		IPC_RTR_ERR("%s: Error allocating xprt state info\n", __func__);
+		return;
+	}
+
+	strlcpy(xs_info->edge, cb_info->edge, GLINK_NAME_SIZE);
+	strlcpy(xs_info->transport, cb_info->transport, GLINK_NAME_SIZE);
+	xs_info->link_state = cb_info->link_state;
+	INIT_WORK(&xs_info->work, glink_xprt_link_state_worker);
+	queue_work(glink_xprt_wq, &xs_info->work);
+}
+
+/**
+ * ipc_router_glink_config_init() - init GLINK xprt configs
+ *
+ * @glink_xprt_config: pointer to GLINK Channel configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the GLINK XPRT pointer with
+ * the GLINK XPRT configurations either from device tree or static arrays.
+ */
+static int ipc_router_glink_config_init(
+		struct ipc_router_glink_xprt_config *glink_xprt_config)
+{
+	struct ipc_router_glink_xprt *glink_xprtp;
+	char xprt_wq_name[GLINK_NAME_SIZE];
+
+	glink_xprtp = kzalloc(sizeof(struct ipc_router_glink_xprt), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(glink_xprtp)) {
+		IPC_RTR_ERR("%s:%s:%s:%s glink_xprtp alloc failed\n",
+			    __func__, glink_xprt_config->ch_name,
+			    glink_xprt_config->edge,
+			    glink_xprt_config->transport);
+		return -ENOMEM;
+	}
+
+	glink_xprtp->xprt.link_id = glink_xprt_config->link_id;
+	glink_xprtp->xprt_version = glink_xprt_config->xprt_version;
+	glink_xprtp->xprt_option = glink_xprt_config->xprt_option;
+	glink_xprtp->disable_pil_loading =
+				glink_xprt_config->disable_pil_loading;
+
+	if (!glink_xprtp->disable_pil_loading)
+		strlcpy(glink_xprtp->pil_edge, glink_xprt_config->pil_edge,
+				PIL_SUBSYSTEM_NAME_LEN);
+	strlcpy(glink_xprtp->ch_name, glink_xprt_config->ch_name,
+		GLINK_NAME_SIZE);
+	strlcpy(glink_xprtp->edge, glink_xprt_config->edge, GLINK_NAME_SIZE);
+	strlcpy(glink_xprtp->transport,
+		glink_xprt_config->transport, GLINK_NAME_SIZE);
+	strlcpy(glink_xprtp->ipc_rtr_xprt_name,
+		glink_xprt_config->ipc_rtr_xprt_name, IPC_RTR_XPRT_NAME_LEN);
+	glink_xprtp->xprt.name = glink_xprtp->ipc_rtr_xprt_name;
+
+	glink_xprtp->xprt.get_version =	ipc_router_glink_xprt_get_version;
+	glink_xprtp->xprt.set_version =	ipc_router_glink_xprt_set_version;
+	glink_xprtp->xprt.get_option = ipc_router_glink_xprt_get_option;
+	glink_xprtp->xprt.read_avail = NULL;
+	glink_xprtp->xprt.read = NULL;
+	glink_xprtp->xprt.write_avail = NULL;
+	glink_xprtp->xprt.write = ipc_router_glink_xprt_write;
+	glink_xprtp->xprt.close = ipc_router_glink_xprt_close;
+	glink_xprtp->xprt.sft_close_done = glink_xprt_sft_close_done;
+	glink_xprtp->xprt.priv = NULL;
+
+	init_rwsem(&glink_xprtp->ss_reset_rwlock);
+	glink_xprtp->ss_reset = 0;
+
+	scnprintf(xprt_wq_name, GLINK_NAME_SIZE, "%s_%s_%s",
+			glink_xprtp->ch_name, glink_xprtp->edge,
+			glink_xprtp->transport);
+	glink_xprtp->xprt_wq = create_singlethread_workqueue(xprt_wq_name);
+	if (IS_ERR_OR_NULL(glink_xprtp->xprt_wq)) {
+		IPC_RTR_ERR("%s:%s:%s:%s wq alloc failed\n",
+			    __func__, glink_xprt_config->ch_name,
+			    glink_xprt_config->edge,
+			    glink_xprt_config->transport);
+		kfree(glink_xprtp);
+		return -EFAULT;
+	}
+
+	mutex_lock(&glink_xprt_list_lock_lha1);
+	list_add(&glink_xprtp->list, &glink_xprt_list);
+	mutex_unlock(&glink_xprt_list_lock_lha1);
+
+	glink_xprt_link_info.edge = glink_xprt_config->edge;
+	glink_xprt_link_state_notif_handle = glink_register_link_state_cb(
+						&glink_xprt_link_info, NULL);
+	return 0;
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @glink_xprt_config: pointer to GLINK XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+		struct ipc_router_glink_xprt_config *glink_xprt_config)
+{
+	int ret;
+	int link_id;
+	int version;
+	char *key;
+	const char *ch_name;
+	const char *edge;
+	const char *transport;
+	const char *pil_edge;
+
+	key = "qcom,ch-name";
+	ch_name = of_get_property(node, key, NULL);
+	if (!ch_name)
+		goto error;
+	strlcpy(glink_xprt_config->ch_name, ch_name, GLINK_NAME_SIZE);
+
+	key = "qcom,xprt-remote";
+	edge = of_get_property(node, key, NULL);
+	if (!edge)
+		goto error;
+	strlcpy(glink_xprt_config->edge, edge, GLINK_NAME_SIZE);
+
+	key = "qcom,glink-xprt";
+	transport = of_get_property(node, key, NULL);
+	if (!transport)
+		goto error;
+	strlcpy(glink_xprt_config->transport, transport,
+		GLINK_NAME_SIZE);
+
+	key = "qcom,xprt-linkid";
+	ret = of_property_read_u32(node, key, &link_id);
+	if (ret)
+		goto error;
+	glink_xprt_config->link_id = link_id;
+
+	key = "qcom,xprt-version";
+	ret = of_property_read_u32(node, key, &version);
+	if (ret)
+		goto error;
+	glink_xprt_config->xprt_version = version;
+
+	key = "qcom,fragmented-data";
+	glink_xprt_config->xprt_option = of_property_read_bool(node, key);
+
+	key = "qcom,pil-label";
+	pil_edge = of_get_property(node, key, NULL);
+	if (pil_edge) {
+		strlcpy(glink_xprt_config->pil_edge,
+				pil_edge, PIL_SUBSYSTEM_NAME_LEN);
+		glink_xprt_config->disable_pil_loading = false;
+	} else {
+		glink_xprt_config->disable_pil_loading = true;
+	}
+	scnprintf(glink_xprt_config->ipc_rtr_xprt_name, IPC_RTR_XPRT_NAME_LEN,
+		  "%s_%s", edge, ch_name);
+
+	return 0;
+
+error:
+	IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+}
+
+/**
+ * ipc_router_glink_xprt_probe() - Probe a GLINK xprt
+ *
+ * @pdev: Platform device corresponding to GLINK xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to a GLINK transport.
+ */
+static int ipc_router_glink_xprt_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct ipc_router_glink_xprt_config glink_xprt_config;
+
+	if (pdev) {
+		if (pdev->dev.of_node) {
+			ret = parse_devicetree(pdev->dev.of_node,
+							&glink_xprt_config);
+			if (ret) {
+				IPC_RTR_ERR("%s: Failed to parse device tree\n",
+					    __func__);
+				return ret;
+			}
+
+			ret = ipc_router_glink_config_init(&glink_xprt_config);
+			if (ret) {
+				IPC_RTR_ERR("%s init failed\n", __func__);
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
+static const struct of_device_id ipc_router_glink_xprt_match_table[] = {
+	{ .compatible = "qcom,ipc_router_glink_xprt" },
+	{},
+};
+
+static struct platform_driver ipc_router_glink_xprt_driver = {
+	.probe = ipc_router_glink_xprt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = ipc_router_glink_xprt_match_table,
+	 },
+};
+
+static int __init ipc_router_glink_xprt_init(void)
+{
+	int rc;
+
+	glink_xprt_wq = create_singlethread_workqueue("glink_xprt_wq");
+	if (IS_ERR_OR_NULL(glink_xprt_wq)) {
+		pr_err("%s: create_singlethread_workqueue failed\n", __func__);
+		return -EFAULT;
+	}
+
+	rc = platform_driver_register(&ipc_router_glink_xprt_driver);
+	if (rc) {
+		IPC_RTR_ERR(
+		"%s: ipc_router_glink_xprt_driver register failed %d\n",
+		__func__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+module_init(ipc_router_glink_xprt_init);
+MODULE_DESCRIPTION("IPC Router GLINK XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ipc_router_hsic_xprt.c b/drivers/soc/qcom/ipc_router_hsic_xprt.c
new file mode 100644
index 0000000..937c9f7
--- /dev/null
+++ b/drivers/soc/qcom/ipc_router_hsic_xprt.c
@@ -0,0 +1,784 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER HSIC XPRT module.
+ */
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include <mach/ipc_bridge.h>
+
+static int msm_ipc_router_hsic_xprt_debug_mask;
+module_param_named(debug_mask, msm_ipc_router_hsic_xprt_debug_mask,
+		   int, 0664);
+
+#if defined(DEBUG)
+#define D(x...) do { \
+if (msm_ipc_router_hsic_xprt_debug_mask) \
+	pr_info(x); \
+} while (0)
+#else
+#define D(x...) do { } while (0)
+#endif
+
+#define NUM_HSIC_XPRTS 1
+#define XPRT_NAME_LEN 32
+
+/**
+ * msm_ipc_router_hsic_xprt - IPC Router's HSIC XPRT structure
+ * @list: IPC router's HSIC XPRTs list.
+ * @ch_name: Name of the HSIC endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @driver: Platform drivers register by this XPRT.
+ * @xprt: IPC Router XPRT structure to contain HSIC XPRT specific info.
+ * @pdev: Platform device registered by IPC Bridge function driver.
+ * @hsic_xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @read_work: Read Work to perform read operation from HSIC's ipc_bridge.
+ * @in_pkt: Pointer to any partially read packet.
+ * @ss_reset_lock: Lock to protect access to the ss_reset flag.
+ * @ss_reset: flag used to check SSR state.
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ *                      by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ */
+struct msm_ipc_router_hsic_xprt {
+	struct list_head list;
+	char ch_name[XPRT_NAME_LEN];
+	char xprt_name[XPRT_NAME_LEN];
+	struct platform_driver driver;
+	struct msm_ipc_router_xprt xprt;
+	struct platform_device *pdev;
+	struct workqueue_struct *hsic_xprt_wq;
+	struct delayed_work read_work;
+	struct rr_packet *in_pkt;
+	struct mutex ss_reset_lock;
+	int ss_reset;
+	struct completion sft_close_complete;
+	unsigned int xprt_version;
+	unsigned int xprt_option;
+};
+
+struct msm_ipc_router_hsic_xprt_work {
+	struct msm_ipc_router_xprt *xprt;
+	struct work_struct work;
+};
+
+static void hsic_xprt_read_data(struct work_struct *work);
+
+/**
+ * msm_ipc_router_hsic_xprt_config - Config. Info. of each HSIC XPRT
+ * @ch_name: Name of the HSIC endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @hsic_pdev_id: ID to differentiate among multiple ipc_bridge endpoints.
+ * @link_id: Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ */
+struct msm_ipc_router_hsic_xprt_config {
+	char ch_name[XPRT_NAME_LEN];
+	char xprt_name[XPRT_NAME_LEN];
+	int hsic_pdev_id;
+	uint32_t link_id;
+	unsigned int xprt_version;
+};
+
+struct msm_ipc_router_hsic_xprt_config hsic_xprt_cfg[] = {
+	{"ipc_bridge", "ipc_rtr_ipc_bridge1", 1, 1, 3},
+};
+
+#define MODULE_NAME "ipc_router_hsic_xprt"
+#define IPC_ROUTER_HSIC_XPRT_WAIT_TIMEOUT 3000
+static int ipc_router_hsic_xprt_probe_done;
+static struct delayed_work ipc_router_hsic_xprt_probe_work;
+static DEFINE_MUTEX(hsic_remote_xprt_list_lock_lha1);
+static LIST_HEAD(hsic_remote_xprt_list);
+
+/**
+ * find_hsic_xprt_list() - Find xprt item specific to an HSIC endpoint
+ * @name: Name of the platform device to find in list
+ *
+ * @return: pointer to msm_ipc_router_hsic_xprt if matching endpoint is found,
+ *		else NULL.
+ *
+ * This function is used to find specific xprt item from the global xprt list
+ */
+static struct msm_ipc_router_hsic_xprt *
+		find_hsic_xprt_list(const char *name)
+{
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+	mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+	list_for_each_entry(hsic_xprtp, &hsic_remote_xprt_list, list) {
+		if (!strcmp(name, hsic_xprtp->ch_name)) {
+			mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+			return hsic_xprtp;
+		}
+	}
+	mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+	return NULL;
+}
+
+/**
+ * ipc_router_hsic_set_xprt_version() - Set IPC Router header version
+ *                                          in the transport
+ * @xprt: Reference to the transport structure.
+ * @version: The version to be set in transport.
+ */
+static void ipc_router_hsic_set_xprt_version(
+	struct msm_ipc_router_xprt *xprt, unsigned int version)
+{
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+	if (!xprt)
+		return;
+	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+	hsic_xprtp->xprt_version = version;
+}
+
+/**
+ * msm_ipc_router_hsic_get_xprt_version() - Get IPC Router header version
+ *                                          supported by the XPRT
+ * @xprt: XPRT for which the version information is required.
+ *
+ * @return: IPC Router header version supported by the XPRT.
+ */
+static int msm_ipc_router_hsic_get_xprt_version(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+	return (int)hsic_xprtp->xprt_version;
+}
+
+/**
+ * msm_ipc_router_hsic_get_xprt_option() - Get XPRT options
+ * @xprt: XPRT for which the option information is required.
+ *
+ * @return: Options supported by the XPRT.
+ */
+static int msm_ipc_router_hsic_get_xprt_option(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+	return (int)hsic_xprtp->xprt_option;
+}
+
+/**
+ * msm_ipc_router_hsic_remote_write_avail() - Get available write space
+ * @xprt: XPRT for which the available write space info. is required.
+ *
+ * @return: Write space in bytes on success, 0 on SSR.
+ */
+static int msm_ipc_router_hsic_remote_write_avail(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_bridge_platform_data *pdata;
+	int write_avail;
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp =
+		container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+	mutex_lock(&hsic_xprtp->ss_reset_lock);
+	if (hsic_xprtp->ss_reset || !hsic_xprtp->pdev) {
+		write_avail = 0;
+	} else {
+		pdata = hsic_xprtp->pdev->dev.platform_data;
+		write_avail = pdata->max_write_size;
+	}
+	mutex_unlock(&hsic_xprtp->ss_reset_lock);
+	return write_avail;
+}
+
+/**
+ * msm_ipc_router_hsic_remote_write() - Write to XPRT
+ * @data: Data to be written to the XPRT.
+ * @len: Length of the data to be written.
+ * @xprt: XPRT to which the data has to be written.
+ *
+ * @return: Data Length on success, standard Linux error codes on failure.
+ */
+static int msm_ipc_router_hsic_remote_write(void *data,
+		uint32_t len, struct msm_ipc_router_xprt *xprt)
+{
+	struct rr_packet *pkt = (struct rr_packet *)data;
+	struct sk_buff *skb;
+	struct ipc_bridge_platform_data *pdata;
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+	int ret;
+	uint32_t bytes_written = 0;
+	uint32_t bytes_to_write;
+	unsigned char *tx_data;
+
+	if (!pkt || pkt->length != len || !xprt) {
+		IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+	mutex_lock(&hsic_xprtp->ss_reset_lock);
+	if (hsic_xprtp->ss_reset) {
+		IPC_RTR_ERR("%s: Trying to write on a reset link\n", __func__);
+		mutex_unlock(&hsic_xprtp->ss_reset_lock);
+		return -ENETRESET;
+	}
+
+	if (!hsic_xprtp->pdev) {
+		IPC_RTR_ERR("%s: Trying to write on a closed link\n", __func__);
+		mutex_unlock(&hsic_xprtp->ss_reset_lock);
+		return -ENODEV;
+	}
+
+	pdata = hsic_xprtp->pdev->dev.platform_data;
+	if (!pdata || !pdata->write) {
+		IPC_RTR_ERR("%s on a uninitialized link\n", __func__);
+		mutex_unlock(&hsic_xprtp->ss_reset_lock);
+		return -EFAULT;
+	}
+
+	skb = skb_peek(pkt->pkt_fragment_q);
+	if (!skb) {
+		IPC_RTR_ERR("%s SKB is NULL\n", __func__);
+		mutex_unlock(&hsic_xprtp->ss_reset_lock);
+		return -EINVAL;
+	}
+	D("%s: About to write %d bytes\n", __func__, len);
+
+	while (bytes_written < len) {
+		bytes_to_write = min_t(uint32_t, (skb->len - bytes_written),
+				       pdata->max_write_size);
+		tx_data = skb->data + bytes_written;
+		ret = pdata->write(hsic_xprtp->pdev, tx_data, bytes_to_write);
+		if (ret < 0) {
+			IPC_RTR_ERR("%s: Error writing data %d\n",
+				    __func__, ret);
+			break;
+		}
+		if (ret != bytes_to_write)
+			IPC_RTR_ERR("%s: Partial write %d < %d, retrying...\n",
+				    __func__, ret, bytes_to_write);
+		bytes_written += bytes_to_write;
+	}
+	if (bytes_written == len) {
+		ret = bytes_written;
+	} else if (ret > 0 && bytes_written != len) {
+		IPC_RTR_ERR("%s: Fault writing data %d != %d\n",
+			    __func__, bytes_written, len);
+		ret = -EFAULT;
+	}
+	D("%s: Finished writing %d bytes\n", __func__, len);
+	mutex_unlock(&hsic_xprtp->ss_reset_lock);
+	return ret;
+}
+
+/**
+ * msm_ipc_router_hsic_remote_close() - Close the XPRT
+ * @xprt: XPRT which needs to be closed.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int msm_ipc_router_hsic_remote_close(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+	struct ipc_bridge_platform_data *pdata;
+
+	if (!xprt)
+		return -EINVAL;
+	hsic_xprtp = container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+	mutex_lock(&hsic_xprtp->ss_reset_lock);
+	hsic_xprtp->ss_reset = 1;
+	mutex_unlock(&hsic_xprtp->ss_reset_lock);
+	flush_workqueue(hsic_xprtp->hsic_xprt_wq);
+	destroy_workqueue(hsic_xprtp->hsic_xprt_wq);
+	pdata = hsic_xprtp->pdev->dev.platform_data;
+	if (pdata && pdata->close)
+		pdata->close(hsic_xprtp->pdev);
+	hsic_xprtp->pdev = NULL;
+	return 0;
+}
+
+/**
+ * hsic_xprt_read_data() - Read work to read from the XPRT
+ * @work: Read work to be executed.
+ *
+ * This function is a read work item queued on a XPRT specific workqueue.
+ * The work parameter contains information regarding the XPRT on which this
+ * read work has to be performed. The work item keeps reading from the HSIC
+ * endpoint, until the endpoint returns an error.
+ */
+static void hsic_xprt_read_data(struct work_struct *work)
+{
+	int bytes_to_read;
+	int bytes_read;
+	int skb_size;
+	struct sk_buff *skb = NULL;
+	struct ipc_bridge_platform_data *pdata;
+	struct delayed_work *rwork = to_delayed_work(work);
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp =
+		container_of(rwork, struct msm_ipc_router_hsic_xprt, read_work);
+
+	while (1) {
+		mutex_lock(&hsic_xprtp->ss_reset_lock);
+		if (hsic_xprtp->ss_reset) {
+			mutex_unlock(&hsic_xprtp->ss_reset_lock);
+			break;
+		}
+		pdata = hsic_xprtp->pdev->dev.platform_data;
+		mutex_unlock(&hsic_xprtp->ss_reset_lock);
+		while (!hsic_xprtp->in_pkt) {
+			hsic_xprtp->in_pkt = create_pkt(NULL);
+			if (hsic_xprtp->in_pkt)
+				break;
+			IPC_RTR_ERR("%s: packet allocation failure\n",
+								__func__);
+			msleep(100);
+		}
+		D("%s: Allocated rr_packet\n", __func__);
+
+		bytes_to_read = 0;
+		skb_size = pdata->max_read_size;
+		do {
+			do {
+				skb = alloc_skb(skb_size, GFP_KERNEL);
+				if (skb)
+					break;
+				IPC_RTR_ERR("%s: Couldn't alloc SKB\n",
+					    __func__);
+				msleep(100);
+			} while (!skb);
+			bytes_read = pdata->read(hsic_xprtp->pdev, skb->data,
+						 pdata->max_read_size);
+			if (bytes_read < 0) {
+				IPC_RTR_ERR("%s: Error %d @ read operation\n",
+					    __func__, bytes_read);
+				kfree_skb(skb);
+				goto out_read_data;
+			}
+			if (!bytes_to_read) {
+				bytes_to_read = ipc_router_peek_pkt_size(
+						skb->data);
+				if (bytes_to_read < 0) {
+					IPC_RTR_ERR("%s: Invalid size %d\n",
+						__func__, bytes_to_read);
+					kfree_skb(skb);
+					goto out_read_data;
+				}
+			}
+			bytes_to_read -= bytes_read;
+			skb_put(skb, bytes_read);
+			skb_queue_tail(hsic_xprtp->in_pkt->pkt_fragment_q, skb);
+			hsic_xprtp->in_pkt->length += bytes_read;
+			skb_size = min_t(uint32_t, pdata->max_read_size,
+					 (uint32_t)bytes_to_read);
+		} while (bytes_to_read > 0);
+
+		D("%s: Packet size read %d\n",
+		  __func__, hsic_xprtp->in_pkt->length);
+		msm_ipc_router_xprt_notify(&hsic_xprtp->xprt,
+			IPC_ROUTER_XPRT_EVENT_DATA, (void *)hsic_xprtp->in_pkt);
+		release_pkt(hsic_xprtp->in_pkt);
+		hsic_xprtp->in_pkt = NULL;
+	}
+out_read_data:
+	release_pkt(hsic_xprtp->in_pkt);
+	hsic_xprtp->in_pkt = NULL;
+}
+
+/**
+ * hsic_xprt_sft_close_done() - Completion of XPRT reset
+ * @xprt: XPRT on which the reset operation is complete.
+ *
+ * This function is used by IPC Router to signal this HSIC XPRT Abstraction
+ * Layer(XAL) that the reset of XPRT is completely handled by IPC Router.
+ */
+static void hsic_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp =
+		container_of(xprt, struct msm_ipc_router_hsic_xprt, xprt);
+
+	complete_all(&hsic_xprtp->sft_close_complete);
+}
+
+/**
+ * msm_ipc_router_hsic_remote_remove() - Remove an HSIC endpoint
+ * @pdev: Platform device corresponding to HSIC endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying ipc_bridge driver unregisters
+ * a platform device, mapped to an HSIC endpoint, during SSR.
+ */
+static int msm_ipc_router_hsic_remote_remove(struct platform_device *pdev)
+{
+	struct ipc_bridge_platform_data *pdata;
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+	hsic_xprtp = find_hsic_xprt_list(pdev->name);
+	if (!hsic_xprtp) {
+		IPC_RTR_ERR("%s No device with name %s\n",
+					__func__, pdev->name);
+		return -ENODEV;
+	}
+
+	mutex_lock(&hsic_xprtp->ss_reset_lock);
+	hsic_xprtp->ss_reset = 1;
+	mutex_unlock(&hsic_xprtp->ss_reset_lock);
+	flush_workqueue(hsic_xprtp->hsic_xprt_wq);
+	destroy_workqueue(hsic_xprtp->hsic_xprt_wq);
+	init_completion(&hsic_xprtp->sft_close_complete);
+	msm_ipc_router_xprt_notify(&hsic_xprtp->xprt,
+				   IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+	D("%s: Notified IPC Router of %s CLOSE\n",
+	  __func__, hsic_xprtp->xprt.name);
+	wait_for_completion(&hsic_xprtp->sft_close_complete);
+	hsic_xprtp->pdev = NULL;
+	pdata = pdev->dev.platform_data;
+	if (pdata && pdata->close)
+		pdata->close(pdev);
+	return 0;
+}
+
+/**
+ * msm_ipc_router_hsic_remote_probe() - Probe an HSIC endpoint
+ * @pdev: Platform device corresponding to HSIC endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying ipc_bridge driver registers
+ * a platform device, mapped to an HSIC endpoint.
+ */
+static int msm_ipc_router_hsic_remote_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct ipc_bridge_platform_data *pdata;
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata || !pdata->open || !pdata->read ||
+	    !pdata->write || !pdata->close) {
+		IPC_RTR_ERR("%s: pdata or pdata->operations is NULL\n",
+								__func__);
+		return -EINVAL;
+	}
+
+	hsic_xprtp = find_hsic_xprt_list(pdev->name);
+	if (!hsic_xprtp) {
+		IPC_RTR_ERR("%s No device with name %s\n",
+						__func__, pdev->name);
+		return -ENODEV;
+	}
+
+	hsic_xprtp->hsic_xprt_wq =
+		create_singlethread_workqueue(pdev->name);
+	if (!hsic_xprtp->hsic_xprt_wq) {
+		IPC_RTR_ERR("%s: WQ creation failed for %s\n",
+			__func__, pdev->name);
+		return -EFAULT;
+	}
+
+	rc = pdata->open(pdev);
+	if (rc < 0) {
+		IPC_RTR_ERR("%s: Channel open failed for %s.%d\n",
+			__func__, pdev->name, pdev->id);
+		destroy_workqueue(hsic_xprtp->hsic_xprt_wq);
+		return rc;
+	}
+	hsic_xprtp->pdev = pdev;
+	mutex_lock(&hsic_xprtp->ss_reset_lock);
+	hsic_xprtp->ss_reset = 0;
+	mutex_unlock(&hsic_xprtp->ss_reset_lock);
+	msm_ipc_router_xprt_notify(&hsic_xprtp->xprt,
+				   IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+	D("%s: Notified IPC Router of %s OPEN\n",
+	  __func__, hsic_xprtp->xprt.name);
+	queue_delayed_work(hsic_xprtp->hsic_xprt_wq,
+			   &hsic_xprtp->read_work, 0);
+	return 0;
+}
+
+/**
+ * msm_ipc_router_hsic_driver_register() - register HSIC XPRT drivers
+ *
+ * @hsic_xprtp: pointer to IPC router hsic xprt structure.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when a new XPRT is added to register platform
+ * drivers for new XPRT.
+ */
+static int msm_ipc_router_hsic_driver_register(
+			struct msm_ipc_router_hsic_xprt *hsic_xprtp)
+{
+	int ret;
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp_item;
+
+	hsic_xprtp_item = find_hsic_xprt_list(hsic_xprtp->ch_name);
+
+	mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+	list_add(&hsic_xprtp->list, &hsic_remote_xprt_list);
+	mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+
+	if (!hsic_xprtp_item) {
+		hsic_xprtp->driver.driver.name = hsic_xprtp->ch_name;
+		hsic_xprtp->driver.driver.owner = THIS_MODULE;
+		hsic_xprtp->driver.probe = msm_ipc_router_hsic_remote_probe;
+		hsic_xprtp->driver.remove = msm_ipc_router_hsic_remote_remove;
+
+		ret = platform_driver_register(&hsic_xprtp->driver);
+		if (ret) {
+			IPC_RTR_ERR(
+			"%s: Failed to register platform driver[%s]\n",
+					__func__, hsic_xprtp->ch_name);
+			return ret;
+		}
+	} else {
+		IPC_RTR_ERR("%s Already driver registered %s\n",
+					__func__, hsic_xprtp->ch_name);
+	}
+
+	return 0;
+}
+
+/**
+ * msm_ipc_router_hsic_config_init() - init HSIC xprt configs
+ *
+ * @hsic_xprt_config: pointer to HSIC xprt configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the HSIC XPRT pointer with
+ * the HSIC XPRT configurations either from device tree or static arrays.
+ */
+static int msm_ipc_router_hsic_config_init(
+		struct msm_ipc_router_hsic_xprt_config *hsic_xprt_config)
+{
+	struct msm_ipc_router_hsic_xprt *hsic_xprtp;
+
+	hsic_xprtp = kzalloc(sizeof(struct msm_ipc_router_hsic_xprt),
+							GFP_KERNEL);
+	if (IS_ERR_OR_NULL(hsic_xprtp)) {
+		IPC_RTR_ERR("%s: kzalloc() failed for hsic_xprtp id:%s\n",
+				__func__, hsic_xprt_config->ch_name);
+		return -ENOMEM;
+	}
+
+	hsic_xprtp->xprt.link_id = hsic_xprt_config->link_id;
+	hsic_xprtp->xprt_version = hsic_xprt_config->xprt_version;
+
+	strlcpy(hsic_xprtp->ch_name, hsic_xprt_config->ch_name,
+					XPRT_NAME_LEN);
+
+	strlcpy(hsic_xprtp->xprt_name, hsic_xprt_config->xprt_name,
+						XPRT_NAME_LEN);
+	hsic_xprtp->xprt.name = hsic_xprtp->xprt_name;
+
+	hsic_xprtp->xprt.set_version =
+		ipc_router_hsic_set_xprt_version;
+	hsic_xprtp->xprt.get_version =
+		msm_ipc_router_hsic_get_xprt_version;
+	hsic_xprtp->xprt.get_option =
+		 msm_ipc_router_hsic_get_xprt_option;
+	hsic_xprtp->xprt.read_avail = NULL;
+	hsic_xprtp->xprt.read = NULL;
+	hsic_xprtp->xprt.write_avail =
+		msm_ipc_router_hsic_remote_write_avail;
+	hsic_xprtp->xprt.write = msm_ipc_router_hsic_remote_write;
+	hsic_xprtp->xprt.close = msm_ipc_router_hsic_remote_close;
+	hsic_xprtp->xprt.sft_close_done = hsic_xprt_sft_close_done;
+	hsic_xprtp->xprt.priv = NULL;
+
+	hsic_xprtp->in_pkt = NULL;
+	INIT_DELAYED_WORK(&hsic_xprtp->read_work, hsic_xprt_read_data);
+	mutex_init(&hsic_xprtp->ss_reset_lock);
+	hsic_xprtp->ss_reset = 0;
+	hsic_xprtp->xprt_option = 0;
+
+	msm_ipc_router_hsic_driver_register(hsic_xprtp);
+	return 0;
+
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @hsic_xprt_config: pointer to HSIC XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+		struct msm_ipc_router_hsic_xprt_config *hsic_xprt_config)
+{
+	int ret;
+	int link_id;
+	int version;
+	char *key;
+	const char *ch_name;
+	const char *remote_ss;
+
+	key = "qcom,ch-name";
+	ch_name = of_get_property(node, key, NULL);
+	if (!ch_name)
+		goto error;
+	strlcpy(hsic_xprt_config->ch_name, ch_name, XPRT_NAME_LEN);
+
+	key = "qcom,xprt-remote";
+	remote_ss = of_get_property(node, key, NULL);
+	if (!remote_ss)
+		goto error;
+
+	key = "qcom,xprt-linkid";
+	ret = of_property_read_u32(node, key, &link_id);
+	if (ret)
+		goto error;
+	hsic_xprt_config->link_id = link_id;
+
+	key = "qcom,xprt-version";
+	ret = of_property_read_u32(node, key, &version);
+	if (ret)
+		goto error;
+	hsic_xprt_config->xprt_version = version;
+
+	scnprintf(hsic_xprt_config->xprt_name, XPRT_NAME_LEN, "%s_%s",
+			remote_ss, hsic_xprt_config->ch_name);
+
+	return 0;
+
+error:
+	IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+}
+
+/**
+ * msm_ipc_router_hsic_xprt_probe() - Probe an HSIC xprt
+ * @pdev: Platform device corresponding to HSIC xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an HSIC transport.
+ */
+static int msm_ipc_router_hsic_xprt_probe(
+				struct platform_device *pdev)
+{
+	int ret;
+	struct msm_ipc_router_hsic_xprt_config hsic_xprt_config;
+
+	if (pdev && pdev->dev.of_node) {
+		mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+		ipc_router_hsic_xprt_probe_done = 1;
+		mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+
+		ret = parse_devicetree(pdev->dev.of_node,
+						&hsic_xprt_config);
+		if (ret) {
+			IPC_RTR_ERR("%s: Failed to parse device tree\n",
+								__func__);
+			return ret;
+		}
+
+		ret = msm_ipc_router_hsic_config_init(
+						&hsic_xprt_config);
+		if (ret) {
+			IPC_RTR_ERR(" %s init failed\n", __func__);
+			return ret;
+		}
+	}
+	return ret;
+}
+
+/**
+ * ipc_router_hsic_xprt_probe_worker() - probe worker for non DT configurations
+ *
+ * @work: work item to process
+ *
+ * This function is called by schedule_delay_work after 3sec and check if
+ * device tree probe is done or not. If device tree probe fails the default
+ * configurations read from static array.
+ */
+static void ipc_router_hsic_xprt_probe_worker(struct work_struct *work)
+{
+	int i, ret;
+
+	if (WARN_ON(ARRAY_SIZE(hsic_xprt_cfg) != NUM_HSIC_XPRTS))
+		return;
+
+	mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+	if (!ipc_router_hsic_xprt_probe_done) {
+		mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+		for (i = 0; i < ARRAY_SIZE(hsic_xprt_cfg); i++) {
+			ret = msm_ipc_router_hsic_config_init(
+							&hsic_xprt_cfg[i]);
+			if (ret)
+				IPC_RTR_ERR(" %s init failed config idx %d\n",
+								__func__, i);
+		}
+		mutex_lock(&hsic_remote_xprt_list_lock_lha1);
+	}
+	mutex_unlock(&hsic_remote_xprt_list_lock_lha1);
+}
+
+static const struct of_device_id msm_ipc_router_hsic_xprt_match_table[] = {
+	{ .compatible = "qcom,ipc_router_hsic_xprt" },
+	{},
+};
+
+static struct platform_driver msm_ipc_router_hsic_xprt_driver = {
+	.probe = msm_ipc_router_hsic_xprt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ipc_router_hsic_xprt_match_table,
+	 },
+};
+
+static int __init msm_ipc_router_hsic_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&msm_ipc_router_hsic_xprt_driver);
+	if (rc) {
+		IPC_RTR_ERR(
+		"%s: msm_ipc_router_hsic_xprt_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	INIT_DELAYED_WORK(&ipc_router_hsic_xprt_probe_work,
+					ipc_router_hsic_xprt_probe_worker);
+	schedule_delayed_work(&ipc_router_hsic_xprt_probe_work,
+			msecs_to_jiffies(IPC_ROUTER_HSIC_XPRT_WAIT_TIMEOUT));
+	return 0;
+}
+
+module_init(msm_ipc_router_hsic_xprt_init);
+MODULE_DESCRIPTION("IPC Router HSIC XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ipc_router_mhi_xprt.c b/drivers/soc/qcom/ipc_router_mhi_xprt.c
new file mode 100644
index 0000000..68849f7
--- /dev/null
+++ b/drivers/soc/qcom/ipc_router_mhi_xprt.c
@@ -0,0 +1,1011 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER MHI XPRT module.
+ */
+#include <linux/delay.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/module.h>
+#include <linux/msm_mhi.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+
+
+static int ipc_router_mhi_xprt_debug_mask;
+module_param_named(debug_mask, ipc_router_mhi_xprt_debug_mask,
+		   int, 0664);
+
+#define D(x...) do { \
+if (ipc_router_mhi_xprt_debug_mask) \
+	pr_info(x); \
+} while (0)
+
+#define NUM_MHI_XPRTS 1
+#define XPRT_NAME_LEN 32
+#define IPC_ROUTER_MHI_XPRT_MAX_PKT_SIZE 0x1000
+#define IPC_ROUTER_MHI_XPRT_NUM_TRBS 10
+
+/**
+ * ipc_router_mhi_addr_map - Struct for virtual address to IPC Router
+ *				packet mapping.
+ * @list_node: Address mapping list node used by mhi transport map list.
+ * @virt_addr: The virtual address in mapping.
+ * @pkt: The IPC Router packet for the virtual address
+ */
+struct ipc_router_mhi_addr_map {
+	struct list_head list_node;
+	void *virt_addr;
+	struct rr_packet *pkt;
+};
+
+/**
+ * ipc_router_mhi_channel - MHI Channel related information
+ * @out_chan_id: Out channel ID for use by IPC ROUTER enumerated in MHI driver.
+ * @out_handle: MHI Output channel handle.
+ * @out_clnt_info: IPC Router callbacks/info to be passed to the MHI driver.
+ * @in_chan_id: In channel ID for use by IPC ROUTER enumerated in MHI driver.
+ * @in_handle: MHI Input channel handle.
+ * @in_clnt_info: IPC Router callbacks/info to be passed to the MHI driver.
+ * @state_lock: Lock to protect access to the state information.
+ * @out_chan_enabled: State of the outgoing channel.
+ * @in_chan_enabled: State of the incoming channel.
+ * @bytes_to_rx: Remaining bytes to be received in a packet.
+ * @in_skbq_lock: Lock to protect access to the input skbs queue.
+ * @in_skbq: Queue containing the input buffers.
+ * @max_packet_size: Possible maximum packet size.
+ * @num_trbs: Number of TRBs.
+ * @mhi_xprtp: Pointer to IPC Router MHI XPRT.
+ */
+struct ipc_router_mhi_channel {
+	enum MHI_CLIENT_CHANNEL out_chan_id;
+	struct mhi_client_handle *out_handle;
+	struct mhi_client_info_t out_clnt_info;
+
+	enum MHI_CLIENT_CHANNEL in_chan_id;
+	struct mhi_client_handle *in_handle;
+	struct mhi_client_info_t in_clnt_info;
+
+	struct mutex state_lock;
+	bool out_chan_enabled;
+	bool in_chan_enabled;
+	int bytes_to_rx;
+
+	struct mutex in_skbq_lock;
+	struct sk_buff_head in_skbq;
+	size_t max_packet_size;
+	uint32_t num_trbs;
+	void *mhi_xprtp;
+};
+
+/**
+ * ipc_router_mhi_xprt - IPC Router's MHI XPRT structure
+ * @list: IPC router's MHI XPRTs list.
+ * @ch_hndl: Data Structure to hold MHI Channel information.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @xprt: IPC Router XPRT structure to contain MHI XPRT specific info.
+ * @wq: Workqueue to queue read & other XPRT related works.
+ * @read_work: Read Work to perform read operation from MHI Driver.
+ * @in_pkt: Pointer to any partially read packet.
+ * @write_wait_q: Wait Queue to handle the write events.
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ *			by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ * @tx_addr_map_list_lock: The lock to protect the address mapping list for TX
+ *			operations.
+ * @tx_addr_map_list: Virtual address mapping list for TX operations.
+ * @rx_addr_map_list_lock: The lock to protect the address mapping list for RX
+ *			operations.
+ * @rx_addr_map_list: Virtual address mapping list for RX operations.
+ */
+struct ipc_router_mhi_xprt {
+	struct list_head list;
+	struct ipc_router_mhi_channel ch_hndl;
+	char xprt_name[XPRT_NAME_LEN];
+	struct msm_ipc_router_xprt xprt;
+	struct workqueue_struct *wq;
+	struct work_struct read_work;
+	struct rr_packet *in_pkt;
+	wait_queue_head_t write_wait_q;
+	struct completion sft_close_complete;
+	unsigned int xprt_version;
+	unsigned int xprt_option;
+	struct mutex tx_addr_map_list_lock;
+	struct list_head tx_addr_map_list;
+	struct mutex rx_addr_map_list_lock;
+	struct list_head rx_addr_map_list;
+};
+
+struct ipc_router_mhi_xprt_work {
+	struct ipc_router_mhi_xprt *mhi_xprtp;
+	enum MHI_CLIENT_CHANNEL chan_id;
+	struct work_struct work;
+};
+
+static void mhi_xprt_read_data(struct work_struct *work);
+static void mhi_xprt_enable_event(struct work_struct *work);
+static void mhi_xprt_disable_event(struct work_struct *work);
+
+/**
+ * ipc_router_mhi_xprt_config - Config. Info. of each MHI XPRT
+ * @out_chan_id: Out channel ID for use by IPC ROUTER enumerated in MHI driver.
+ * @in_chan_id: In channel ID for use by IPC ROUTER enumerated in MHI driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @link_id: Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ */
+struct ipc_router_mhi_xprt_config {
+	enum MHI_CLIENT_CHANNEL out_chan_id;
+	enum MHI_CLIENT_CHANNEL in_chan_id;
+	char xprt_name[XPRT_NAME_LEN];
+	uint32_t link_id;
+	uint32_t xprt_version;
+};
+
+#define MODULE_NAME "ipc_router_mhi_xprt"
+static DEFINE_MUTEX(mhi_xprt_list_lock_lha1);
+static LIST_HEAD(mhi_xprt_list);
+
+/*
+ * ipc_router_mhi_release_pkt() - Release a cloned IPC Router packet
+ * @ref: Reference to the kref object in the IPC Router packet.
+ */
+void ipc_router_mhi_release_pkt(struct kref *ref)
+{
+	struct rr_packet *pkt = container_of(ref, struct rr_packet, ref);
+
+	release_pkt(pkt);
+}
+
+/*
+ * ipc_router_mhi_xprt_find_addr_map() - Search the mapped virtual address
+ * @addr_map_list: The list of address mappings.
+ * @addr_map_list_lock: Reference to the lock that protects the @addr_map_list.
+ * @addr: The virtual address that needs to be found.
+ *
+ * Return: The mapped virtual Address if found, NULL otherwise.
+ */
+void *ipc_router_mhi_xprt_find_addr_map(struct list_head *addr_map_list,
+				struct mutex *addr_map_list_lock,
+				void *addr)
+{
+	struct ipc_router_mhi_addr_map *addr_mapping;
+	struct ipc_router_mhi_addr_map *tmp_addr_mapping;
+	void *virt_addr;
+
+	if (!addr_map_list || !addr_map_list_lock)
+		return NULL;
+	mutex_lock(addr_map_list_lock);
+	list_for_each_entry_safe(addr_mapping, tmp_addr_mapping,
+				addr_map_list, list_node) {
+		if (addr_mapping->virt_addr == addr) {
+			virt_addr = addr_mapping->virt_addr;
+			list_del(&addr_mapping->list_node);
+			if (addr_mapping->pkt)
+				kref_put(&addr_mapping->pkt->ref,
+					ipc_router_mhi_release_pkt);
+			kfree(addr_mapping);
+			mutex_unlock(addr_map_list_lock);
+			return virt_addr;
+		}
+	}
+	mutex_unlock(addr_map_list_lock);
+	IPC_RTR_ERR(
+		"%s: Virtual address mapping [%p] not found\n",
+		__func__, (void *)addr);
+	return NULL;
+}
+
+/*
+ * ipc_router_mhi_xprt_add_addr_map() - Add a virtual address mapping structure
+ * @addr_map_list: The list of address mappings.
+ * @addr_map_list_lock: Reference to the lock that protects the @addr_map_list.
+ * @pkt: The IPC Router packet that contains the virtual address in skbs.
+ * @virt_addr: The virtual address which needs to be added.
+ *
+ * Return: 0 on success, standard Linux error code otherwise.
+ */
+int ipc_router_mhi_xprt_add_addr_map(struct list_head *addr_map_list,
+				struct mutex *addr_map_list_lock,
+				struct rr_packet *pkt, void *virt_addr)
+{
+	struct ipc_router_mhi_addr_map *addr_mapping;
+
+	if (!addr_map_list || !addr_map_list_lock)
+		return -EINVAL;
+	addr_mapping = kmalloc(sizeof(*addr_mapping), GFP_KERNEL);
+	if (!addr_mapping)
+		return -ENOMEM;
+	addr_mapping->virt_addr = virt_addr;
+	addr_mapping->pkt = pkt;
+	mutex_lock(addr_map_list_lock);
+	if (addr_mapping->pkt)
+		kref_get(&addr_mapping->pkt->ref);
+	list_add_tail(&addr_mapping->list_node, addr_map_list);
+	mutex_unlock(addr_map_list_lock);
+	return 0;
+}
+
+/*
+ * mhi_xprt_queue_in_buffers() - Queue input buffers
+ * @mhi_xprtp: MHI XPRT in which the input buffer has to be queued.
+ * @num_trbs: Number of buffers to be queued.
+ *
+ * @return: number of buffers queued.
+ */
+int mhi_xprt_queue_in_buffers(struct ipc_router_mhi_xprt *mhi_xprtp,
+			      uint32_t num_trbs)
+{
+	int i;
+	struct sk_buff *skb;
+	uint32_t buf_size = mhi_xprtp->ch_hndl.max_packet_size;
+	int rc_val = 0;
+
+	for (i = 0; i < num_trbs; i++) {
+		skb = alloc_skb(buf_size, GFP_KERNEL);
+		if (!skb) {
+			IPC_RTR_ERR("%s: Could not allocate %d SKB(s)\n",
+				    __func__, (i + 1));
+			break;
+		}
+		if (ipc_router_mhi_xprt_add_addr_map(
+					&mhi_xprtp->rx_addr_map_list,
+					&mhi_xprtp->rx_addr_map_list_lock, NULL,
+					skb->data) < 0) {
+			IPC_RTR_ERR("%s: Could not map %d SKB address\n",
+					__func__, (i + 1));
+			break;
+		}
+		mutex_lock(&mhi_xprtp->ch_hndl.in_skbq_lock);
+		rc_val = mhi_queue_xfer(mhi_xprtp->ch_hndl.in_handle,
+					skb->data, buf_size, MHI_EOT);
+		if (rc_val) {
+			mutex_unlock(&mhi_xprtp->ch_hndl.in_skbq_lock);
+			IPC_RTR_ERR("%s: Failed to queue TRB # %d into MHI\n",
+				    __func__, (i + 1));
+			kfree_skb(skb);
+			break;
+		}
+		skb_queue_tail(&mhi_xprtp->ch_hndl.in_skbq, skb);
+		mutex_unlock(&mhi_xprtp->ch_hndl.in_skbq_lock);
+	}
+	return i;
+}
+
+/**
+ * ipc_router_mhi_set_xprt_version() - Set the IPC Router version in transport
+ * @xprt:      Reference to the transport structure.
+ * @version:   The version to be set in transport.
+ */
+static void ipc_router_mhi_set_xprt_version(struct msm_ipc_router_xprt *xprt,
+					   unsigned int version)
+{
+	struct ipc_router_mhi_xprt *mhi_xprtp;
+
+	if (!xprt)
+		return;
+	mhi_xprtp = container_of(xprt, struct ipc_router_mhi_xprt, xprt);
+	mhi_xprtp->xprt_version = version;
+}
+
+/**
+ * ipc_router_mhi_get_xprt_version() - Get IPC Router header version
+ *				       supported by the XPRT
+ * @xprt: XPRT for which the version information is required.
+ *
+ * @return: IPC Router header version supported by the XPRT.
+ */
+static int ipc_router_mhi_get_xprt_version(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_mhi_xprt *mhi_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	mhi_xprtp = container_of(xprt, struct ipc_router_mhi_xprt, xprt);
+
+	return (int)mhi_xprtp->xprt_version;
+}
+
+/**
+ * ipc_router_mhi_get_xprt_option() - Get XPRT options
+ * @xprt: XPRT for which the option information is required.
+ *
+ * @return: Options supported by the XPRT.
+ */
+static int ipc_router_mhi_get_xprt_option(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_mhi_xprt *mhi_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	mhi_xprtp = container_of(xprt, struct ipc_router_mhi_xprt, xprt);
+
+	return (int)mhi_xprtp->xprt_option;
+}
+
+/**
+ * ipc_router_mhi_write_avail() - Get available write space
+ * @xprt: XPRT for which the available write space info. is required.
+ *
+ * @return: Write space in bytes on success, 0 on SSR.
+ */
+static int ipc_router_mhi_write_avail(struct msm_ipc_router_xprt *xprt)
+{
+	int write_avail;
+	struct ipc_router_mhi_xprt *mhi_xprtp =
+		container_of(xprt, struct ipc_router_mhi_xprt, xprt);
+
+	mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+	if (!mhi_xprtp->ch_hndl.out_chan_enabled)
+		write_avail = 0;
+	else
+		write_avail = mhi_get_free_desc(mhi_xprtp->ch_hndl.out_handle) *
+					mhi_xprtp->ch_hndl.max_packet_size;
+	mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+	return write_avail;
+}
+
+/**
+ * ipc_router_mhi_write_skb() - Write a single SKB onto the XPRT
+ * @mhi_xprtp: XPRT in which the SKB has to be written.
+ * @skb: SKB to be written.
+ *
+ * @return: return number of bytes written on success,
+ *          standard Linux error codes on failure.
+ */
+static int ipc_router_mhi_write_skb(struct ipc_router_mhi_xprt *mhi_xprtp,
+				    struct sk_buff *skb, struct rr_packet *pkt)
+{
+	size_t sz_to_write = 0;
+	size_t offset = 0;
+	int rc;
+
+	while (offset < skb->len) {
+		wait_event(mhi_xprtp->write_wait_q,
+			   mhi_get_free_desc(mhi_xprtp->ch_hndl.out_handle) ||
+			   !mhi_xprtp->ch_hndl.out_chan_enabled);
+		mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+		if (!mhi_xprtp->ch_hndl.out_chan_enabled) {
+			mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+			IPC_RTR_ERR("%s: %s chnl reset\n",
+				    __func__, mhi_xprtp->xprt_name);
+			return -ENETRESET;
+		}
+
+		sz_to_write = min((size_t)(skb->len - offset),
+				(size_t)IPC_ROUTER_MHI_XPRT_MAX_PKT_SIZE);
+		if (ipc_router_mhi_xprt_add_addr_map(
+					&mhi_xprtp->tx_addr_map_list,
+					&mhi_xprtp->tx_addr_map_list_lock, pkt,
+					skb->data + offset) < 0) {
+			IPC_RTR_ERR("%s: Could not map SKB address\n",
+					__func__);
+			break;
+		}
+
+		rc = mhi_queue_xfer(mhi_xprtp->ch_hndl.out_handle,
+				    skb->data + offset, sz_to_write,
+				    MHI_EOT | MHI_EOB);
+		if (rc) {
+			mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+			IPC_RTR_ERR("%s: Error queueing mhi_xfer 0x%zx\n",
+				    __func__, sz_to_write);
+			return -EFAULT;
+		}
+		offset += sz_to_write;
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+	}
+	return skb->len;
+}
+
+/**
+ * ipc_router_mhi_write() - Write to XPRT
+ * @data: Data to be written to the XPRT.
+ * @len: Length of the data to be written.
+ * @xprt: XPRT to which the data has to be written.
+ *
+ * @return: Data Length on success, standard Linux error codes on failure.
+ */
+static int ipc_router_mhi_write(void *data,
+		uint32_t len, struct msm_ipc_router_xprt *xprt)
+{
+	struct rr_packet *pkt = (struct rr_packet *)data;
+	struct sk_buff *ipc_rtr_pkt;
+	struct rr_packet *cloned_pkt;
+	int rc;
+	struct ipc_router_mhi_xprt *mhi_xprtp =
+		container_of(xprt, struct ipc_router_mhi_xprt, xprt);
+
+	if (!pkt)
+		return -EINVAL;
+
+	if (!len || pkt->length != len)
+		return -EINVAL;
+
+	cloned_pkt = clone_pkt(pkt);
+	if (!cloned_pkt) {
+		pr_err("%s: Error in cloning packet while tx\n", __func__);
+		return -ENOMEM;
+	}
+	D("%s: Ready to write %d bytes\n", __func__, len);
+	skb_queue_walk(cloned_pkt->pkt_fragment_q, ipc_rtr_pkt) {
+		rc = ipc_router_mhi_write_skb(mhi_xprtp, ipc_rtr_pkt,
+						cloned_pkt);
+		if (rc < 0) {
+			IPC_RTR_ERR("%s: Error writing SKB %d\n",
+				    __func__, rc);
+			break;
+		}
+	}
+
+	kref_put(&cloned_pkt->ref, ipc_router_mhi_release_pkt);
+	if (rc < 0)
+		return rc;
+	else
+		return len;
+}
+
+/**
+ * mhi_xprt_read_data() - Read work to read from the XPRT
+ * @work: Read work to be executed.
+ *
+ * This function is a read work item queued on a XPRT specific workqueue.
+ * The work parameter contains information regarding the XPRT on which this
+ * read work has to be performed. The work item keeps reading from the MHI
+ * endpoint, until the endpoint returns an error.
+ */
+static void mhi_xprt_read_data(struct work_struct *work)
+{
+	void *data_addr;
+	ssize_t data_sz;
+	void *skb_data;
+	struct sk_buff *skb;
+	struct ipc_router_mhi_xprt *mhi_xprtp =
+		container_of(work, struct ipc_router_mhi_xprt, read_work);
+	struct mhi_result result;
+	int rc;
+
+	mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+	if (!mhi_xprtp->ch_hndl.in_chan_enabled) {
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+		if (mhi_xprtp->in_pkt)
+			release_pkt(mhi_xprtp->in_pkt);
+		mhi_xprtp->in_pkt = NULL;
+		mhi_xprtp->ch_hndl.bytes_to_rx = 0;
+		IPC_RTR_ERR("%s: %s channel reset\n",
+			    __func__, mhi_xprtp->xprt.name);
+		return;
+	}
+	mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+
+	while (1) {
+		rc = mhi_poll_inbound(mhi_xprtp->ch_hndl.in_handle, &result);
+		if (rc || !result.buf_addr || !result.bytes_xferd) {
+			if (rc != -ENODATA)
+				IPC_RTR_ERR("%s: Poll failed %s:%d:%p:%u\n",
+					__func__, mhi_xprtp->xprt_name, rc,
+					result.buf_addr,
+					(unsigned int) result.bytes_xferd);
+			break;
+		}
+		data_addr = result.buf_addr;
+		data_sz = result.bytes_xferd;
+
+		/* Create a new rr_packet, if first fragment */
+		if (!mhi_xprtp->ch_hndl.bytes_to_rx) {
+			mhi_xprtp->in_pkt = create_pkt(NULL);
+			if (!mhi_xprtp->in_pkt) {
+				IPC_RTR_ERR("%s: Couldn't alloc rr_packet\n",
+					    __func__);
+				return;
+			}
+			D("%s: Allocated rr_packet\n", __func__);
+		}
+
+		skb_data = ipc_router_mhi_xprt_find_addr_map(
+					&mhi_xprtp->rx_addr_map_list,
+					&mhi_xprtp->rx_addr_map_list_lock,
+					data_addr);
+
+		if (!skb_data)
+			continue;
+		mutex_lock(&mhi_xprtp->ch_hndl.in_skbq_lock);
+		skb_queue_walk(&mhi_xprtp->ch_hndl.in_skbq, skb) {
+			if (skb->data == skb_data) {
+				skb_unlink(skb, &mhi_xprtp->ch_hndl.in_skbq);
+				break;
+			}
+		}
+		mutex_unlock(&mhi_xprtp->ch_hndl.in_skbq_lock);
+		skb_put(skb, data_sz);
+		skb_queue_tail(mhi_xprtp->in_pkt->pkt_fragment_q, skb);
+		mhi_xprtp->in_pkt->length += data_sz;
+		if (!mhi_xprtp->ch_hndl.bytes_to_rx)
+			mhi_xprtp->ch_hndl.bytes_to_rx =
+				ipc_router_peek_pkt_size(skb_data) - data_sz;
+		else
+			mhi_xprtp->ch_hndl.bytes_to_rx -= data_sz;
+		/* Packet is completely read, so notify to router */
+		if (!mhi_xprtp->ch_hndl.bytes_to_rx) {
+			D("%s: Packet size read %d\n",
+			  __func__, mhi_xprtp->in_pkt->length);
+			msm_ipc_router_xprt_notify(&mhi_xprtp->xprt,
+						IPC_ROUTER_XPRT_EVENT_DATA,
+						(void *)mhi_xprtp->in_pkt);
+			release_pkt(mhi_xprtp->in_pkt);
+			mhi_xprtp->in_pkt = NULL;
+		}
+
+		while (mhi_xprt_queue_in_buffers(mhi_xprtp, 1) != 1 &&
+		       mhi_xprtp->ch_hndl.in_chan_enabled)
+			msleep(100);
+	}
+}
+
+/**
+ * ipc_router_mhi_close() - Close the XPRT
+ * @xprt: XPRT which needs to be closed.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int ipc_router_mhi_close(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_mhi_xprt *mhi_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	mhi_xprtp = container_of(xprt, struct ipc_router_mhi_xprt, xprt);
+
+	mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+	mhi_xprtp->ch_hndl.out_chan_enabled = false;
+	mhi_xprtp->ch_hndl.in_chan_enabled = false;
+	mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+	flush_workqueue(mhi_xprtp->wq);
+	mhi_close_channel(mhi_xprtp->ch_hndl.in_handle);
+	mhi_close_channel(mhi_xprtp->ch_hndl.out_handle);
+	return 0;
+}
+
+/**
+ * mhi_xprt_sft_close_done() - Completion of XPRT reset
+ * @xprt: XPRT on which the reset operation is complete.
+ *
+ * This function is used by IPC Router to signal this MHI XPRT Abstraction
+ * Layer(XAL) that the reset of XPRT is completely handled by IPC Router.
+ */
+static void mhi_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+	struct ipc_router_mhi_xprt *mhi_xprtp =
+		container_of(xprt, struct ipc_router_mhi_xprt, xprt);
+
+	complete_all(&mhi_xprtp->sft_close_complete);
+}
+
+/**
+ * mhi_xprt_enable_event() - Enable the MHI link for communication
+ * @work: Work containing some reference to the link to be enabled.
+ *
+ * This work is scheduled when the MHI link to the peripheral is up.
+ */
+static void mhi_xprt_enable_event(struct work_struct *work)
+{
+	struct ipc_router_mhi_xprt_work *xprt_work =
+		container_of(work, struct ipc_router_mhi_xprt_work, work);
+	struct ipc_router_mhi_xprt *mhi_xprtp = xprt_work->mhi_xprtp;
+	int rc;
+	bool notify = false;
+
+	if (xprt_work->chan_id == mhi_xprtp->ch_hndl.out_chan_id) {
+		rc = mhi_open_channel(mhi_xprtp->ch_hndl.out_handle);
+		if (rc) {
+			IPC_RTR_ERR("%s Failed to open chan 0x%x, rc %d\n",
+				__func__, mhi_xprtp->ch_hndl.out_chan_id, rc);
+			goto out_enable_event;
+		}
+		mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+		mhi_xprtp->ch_hndl.out_chan_enabled = true;
+		notify = mhi_xprtp->ch_hndl.out_chan_enabled &&
+				mhi_xprtp->ch_hndl.in_chan_enabled;
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+	} else if (xprt_work->chan_id == mhi_xprtp->ch_hndl.in_chan_id) {
+		rc = mhi_open_channel(mhi_xprtp->ch_hndl.in_handle);
+		if (rc) {
+			IPC_RTR_ERR("%s Failed to open chan 0x%x, rc %d\n",
+				__func__, mhi_xprtp->ch_hndl.in_chan_id, rc);
+			goto out_enable_event;
+		}
+		mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+		mhi_xprtp->ch_hndl.in_chan_enabled = true;
+		notify = mhi_xprtp->ch_hndl.out_chan_enabled &&
+				mhi_xprtp->ch_hndl.in_chan_enabled;
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+	}
+
+	/* Register the XPRT before receiving any data */
+	if (notify) {
+		msm_ipc_router_xprt_notify(&mhi_xprtp->xprt,
+				   IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+		D("%s: Notified IPC Router of %s OPEN\n",
+		  __func__, mhi_xprtp->xprt.name);
+	}
+
+	if (xprt_work->chan_id != mhi_xprtp->ch_hndl.in_chan_id)
+		goto out_enable_event;
+
+	rc = mhi_xprt_queue_in_buffers(mhi_xprtp, mhi_xprtp->ch_hndl.num_trbs);
+	if (rc > 0)
+		goto out_enable_event;
+
+	IPC_RTR_ERR("%s: Could not queue one TRB atleast\n", __func__);
+	mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+	mhi_xprtp->ch_hndl.in_chan_enabled = false;
+	mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+	if (notify)
+		msm_ipc_router_xprt_notify(&mhi_xprtp->xprt,
+				   IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+	mhi_close_channel(mhi_xprtp->ch_hndl.in_handle);
+out_enable_event:
+	kfree(xprt_work);
+}
+
+/**
+ * mhi_xprt_disable_event() - Disable the MHI link for communication
+ * @work: Work containing some reference to the link to be disabled.
+ *
+ * This work is scheduled when the MHI link to the peripheral is down.
+ */
+static void mhi_xprt_disable_event(struct work_struct *work)
+{
+	struct ipc_router_mhi_xprt_work *xprt_work =
+		container_of(work, struct ipc_router_mhi_xprt_work, work);
+	struct ipc_router_mhi_xprt *mhi_xprtp = xprt_work->mhi_xprtp;
+	bool notify = false;
+
+	if (xprt_work->chan_id == mhi_xprtp->ch_hndl.out_chan_id) {
+		mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+		notify = mhi_xprtp->ch_hndl.out_chan_enabled &&
+				mhi_xprtp->ch_hndl.in_chan_enabled;
+		mhi_xprtp->ch_hndl.out_chan_enabled = false;
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+		wake_up(&mhi_xprtp->write_wait_q);
+		mhi_close_channel(mhi_xprtp->ch_hndl.out_handle);
+	} else if (xprt_work->chan_id == mhi_xprtp->ch_hndl.in_chan_id) {
+		mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+		notify = mhi_xprtp->ch_hndl.out_chan_enabled &&
+				mhi_xprtp->ch_hndl.in_chan_enabled;
+		mhi_xprtp->ch_hndl.in_chan_enabled = false;
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+		/* Queue a read work to remove any partially read packets */
+		queue_work(mhi_xprtp->wq, &mhi_xprtp->read_work);
+		flush_workqueue(mhi_xprtp->wq);
+		mhi_close_channel(mhi_xprtp->ch_hndl.in_handle);
+	}
+
+	if (notify) {
+		init_completion(&mhi_xprtp->sft_close_complete);
+		msm_ipc_router_xprt_notify(&mhi_xprtp->xprt,
+				   IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+		D("%s: Notified IPC Router of %s CLOSE\n",
+		  __func__, mhi_xprtp->xprt.name);
+		wait_for_completion(&mhi_xprtp->sft_close_complete);
+	}
+	kfree(xprt_work);
+}
+
+/**
+ * mhi_xprt_xfer_event() - Function to handle MHI XFER Callbacks
+ * @cb_info: Information containing xfer callback details.
+ *
+ * This function is called when the MHI generates a XFER event to the
+ * IPC Router. This function is used to handle events like tx/rx.
+ */
+static void mhi_xprt_xfer_event(struct mhi_cb_info *cb_info)
+{
+	struct ipc_router_mhi_xprt *mhi_xprtp;
+	void *out_addr;
+
+	mhi_xprtp = (struct ipc_router_mhi_xprt *)(cb_info->result->user_data);
+	if (cb_info->chan == mhi_xprtp->ch_hndl.out_chan_id) {
+		out_addr = cb_info->result->buf_addr;
+		mutex_lock(&mhi_xprtp->ch_hndl.state_lock);
+		ipc_router_mhi_xprt_find_addr_map(&mhi_xprtp->tx_addr_map_list,
+					&mhi_xprtp->tx_addr_map_list_lock,
+					out_addr);
+		wake_up(&mhi_xprtp->write_wait_q);
+		mutex_unlock(&mhi_xprtp->ch_hndl.state_lock);
+	} else if (cb_info->chan == mhi_xprtp->ch_hndl.in_chan_id) {
+		queue_work(mhi_xprtp->wq, &mhi_xprtp->read_work);
+	} else {
+		IPC_RTR_ERR("%s: chan_id %d not part of %s\n",
+			    __func__, cb_info->chan, mhi_xprtp->xprt_name);
+	}
+}
+
+/**
+ * ipc_router_mhi_xprt_cb() - Callback to notify events on a channel
+ * @cb_info: Information containing the details of callback.
+ *
+ * This function is called by the MHI driver to notify different events
+ * like successful tx/rx, SSR events etc.
+ */
+static void ipc_router_mhi_xprt_cb(struct mhi_cb_info *cb_info)
+{
+	struct ipc_router_mhi_xprt *mhi_xprtp;
+	struct ipc_router_mhi_xprt_work *xprt_work;
+
+	if (cb_info->result == NULL) {
+		IPC_RTR_ERR("%s: Result not available in cb_info\n", __func__);
+		return;
+	}
+
+	mhi_xprtp = (struct ipc_router_mhi_xprt *)(cb_info->result->user_data);
+	switch (cb_info->cb_reason) {
+	case MHI_CB_MHI_ENABLED:
+	case MHI_CB_MHI_DISABLED:
+		xprt_work = kmalloc(sizeof(*xprt_work), GFP_KERNEL);
+		if (!xprt_work) {
+			IPC_RTR_ERR("%s: Couldn't handle %d event on %s\n",
+				__func__, cb_info->cb_reason,
+				mhi_xprtp->xprt_name);
+			return;
+		}
+		xprt_work->mhi_xprtp = mhi_xprtp;
+		xprt_work->chan_id = cb_info->chan;
+		if (cb_info->cb_reason == MHI_CB_MHI_ENABLED)
+			INIT_WORK(&xprt_work->work, mhi_xprt_enable_event);
+		else
+			INIT_WORK(&xprt_work->work, mhi_xprt_disable_event);
+		queue_work(mhi_xprtp->wq, &xprt_work->work);
+		break;
+	case MHI_CB_XFER:
+		mhi_xprt_xfer_event(cb_info);
+		break;
+	default:
+		IPC_RTR_ERR("%s: Invalid cb reason %x\n",
+			    __func__, cb_info->cb_reason);
+	}
+}
+
+/**
+ * ipc_router_mhi_driver_register() - register for MHI channels
+ *
+ * @mhi_xprtp: pointer to IPC router mhi xprt structure.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when a new XPRT is added.
+ */
+static int ipc_router_mhi_driver_register(
+		struct ipc_router_mhi_xprt *mhi_xprtp)
+{
+	int rc_status;
+
+	rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.out_handle,
+				mhi_xprtp->ch_hndl.out_chan_id, 0,
+				&mhi_xprtp->ch_hndl.out_clnt_info,
+				(void *)mhi_xprtp);
+	if (rc_status) {
+		IPC_RTR_ERR("%s: Error %d registering out_chan for %s\n",
+			    __func__, rc_status, mhi_xprtp->xprt_name);
+		return -EFAULT;
+	}
+
+	rc_status = mhi_register_channel(&mhi_xprtp->ch_hndl.in_handle,
+				mhi_xprtp->ch_hndl.in_chan_id, 0,
+				&mhi_xprtp->ch_hndl.in_clnt_info,
+				(void *)mhi_xprtp);
+	if (rc_status) {
+		mhi_deregister_channel(mhi_xprtp->ch_hndl.out_handle);
+		IPC_RTR_ERR("%s: Error %d registering in_chan for %s\n",
+			    __func__, rc_status, mhi_xprtp->xprt_name);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/**
+ * ipc_router_mhi_config_init() - init MHI xprt configs
+ *
+ * @mhi_xprt_config: pointer to MHI xprt configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the MHI XPRT pointer with
+ * the MHI XPRT configurations from device tree.
+ */
+static int ipc_router_mhi_config_init(
+	struct ipc_router_mhi_xprt_config *mhi_xprt_config)
+{
+	struct ipc_router_mhi_xprt *mhi_xprtp;
+	char wq_name[XPRT_NAME_LEN];
+	int rc;
+
+	mhi_xprtp = kzalloc(sizeof(struct ipc_router_mhi_xprt), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(mhi_xprtp)) {
+		IPC_RTR_ERR("%s: kzalloc() failed for mhi_xprtp:%s\n",
+			__func__, mhi_xprt_config->xprt_name);
+		return -ENOMEM;
+	}
+
+	scnprintf(wq_name, XPRT_NAME_LEN, "MHI_XPRT%x:%x",
+		  mhi_xprt_config->out_chan_id, mhi_xprt_config->in_chan_id);
+	mhi_xprtp->wq = create_singlethread_workqueue(wq_name);
+	if (!mhi_xprtp->wq) {
+		IPC_RTR_ERR("%s: %s create WQ failed\n",
+			__func__, mhi_xprt_config->xprt_name);
+		kfree(mhi_xprtp);
+		return -EFAULT;
+	}
+
+	INIT_WORK(&mhi_xprtp->read_work, mhi_xprt_read_data);
+	init_waitqueue_head(&mhi_xprtp->write_wait_q);
+	mhi_xprtp->xprt_version = mhi_xprt_config->xprt_version;
+	strlcpy(mhi_xprtp->xprt_name, mhi_xprt_config->xprt_name,
+		XPRT_NAME_LEN);
+
+	/* Initialize XPRT operations and parameters registered with IPC RTR */
+	mhi_xprtp->xprt.link_id = mhi_xprt_config->link_id;
+	mhi_xprtp->xprt.name = mhi_xprtp->xprt_name;
+	mhi_xprtp->xprt.get_version = ipc_router_mhi_get_xprt_version;
+	mhi_xprtp->xprt.set_version = ipc_router_mhi_set_xprt_version;
+	mhi_xprtp->xprt.get_option = ipc_router_mhi_get_xprt_option;
+	mhi_xprtp->xprt.read_avail = NULL;
+	mhi_xprtp->xprt.read = NULL;
+	mhi_xprtp->xprt.write_avail = ipc_router_mhi_write_avail;
+	mhi_xprtp->xprt.write = ipc_router_mhi_write;
+	mhi_xprtp->xprt.close = ipc_router_mhi_close;
+	mhi_xprtp->xprt.sft_close_done = mhi_xprt_sft_close_done;
+	mhi_xprtp->xprt.priv = NULL;
+
+	/* Initialize channel handle parameters */
+	mhi_xprtp->ch_hndl.out_chan_id = mhi_xprt_config->out_chan_id;
+	mhi_xprtp->ch_hndl.in_chan_id = mhi_xprt_config->in_chan_id;
+	mhi_xprtp->ch_hndl.out_clnt_info.mhi_client_cb = ipc_router_mhi_xprt_cb;
+	mhi_xprtp->ch_hndl.in_clnt_info.mhi_client_cb = ipc_router_mhi_xprt_cb;
+	mutex_init(&mhi_xprtp->ch_hndl.state_lock);
+	mutex_init(&mhi_xprtp->ch_hndl.in_skbq_lock);
+	skb_queue_head_init(&mhi_xprtp->ch_hndl.in_skbq);
+	mhi_xprtp->ch_hndl.max_packet_size = IPC_ROUTER_MHI_XPRT_MAX_PKT_SIZE;
+	mhi_xprtp->ch_hndl.num_trbs = IPC_ROUTER_MHI_XPRT_NUM_TRBS;
+	mhi_xprtp->ch_hndl.mhi_xprtp = mhi_xprtp;
+	INIT_LIST_HEAD(&mhi_xprtp->tx_addr_map_list);
+	mutex_init(&mhi_xprtp->tx_addr_map_list_lock);
+	INIT_LIST_HEAD(&mhi_xprtp->rx_addr_map_list);
+	mutex_init(&mhi_xprtp->rx_addr_map_list_lock);
+
+	rc = ipc_router_mhi_driver_register(mhi_xprtp);
+	return rc;
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @mhi_xprt_config: pointer to MHI XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+		struct ipc_router_mhi_xprt_config *mhi_xprt_config)
+{
+	int rc;
+	uint32_t out_chan_id;
+	uint32_t in_chan_id;
+	const char *remote_ss;
+	uint32_t link_id;
+	uint32_t version;
+	char *key;
+
+	key = "qcom,out-chan-id";
+	rc = of_property_read_u32(node, key, &out_chan_id);
+	if (rc)
+		goto error;
+	mhi_xprt_config->out_chan_id = out_chan_id;
+
+	key = "qcom,in-chan-id";
+	rc = of_property_read_u32(node, key, &in_chan_id);
+	if (rc)
+		goto error;
+	mhi_xprt_config->in_chan_id = in_chan_id;
+
+	key = "qcom,xprt-remote";
+	remote_ss = of_get_property(node, key, NULL);
+	if (!remote_ss)
+		goto error;
+
+	key = "qcom,xprt-linkid";
+	rc = of_property_read_u32(node, key, &link_id);
+	if (rc)
+		goto error;
+	mhi_xprt_config->link_id = link_id;
+
+	key = "qcom,xprt-version";
+	rc = of_property_read_u32(node, key, &version);
+	if (rc)
+		goto error;
+	mhi_xprt_config->xprt_version = version;
+
+	scnprintf(mhi_xprt_config->xprt_name, XPRT_NAME_LEN,
+		  "IPCRTR_MHI%x:%x_%s",
+		  out_chan_id, in_chan_id, remote_ss);
+
+	return 0;
+error:
+	IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+}
+
+/**
+ * ipc_router_mhi_xprt_probe() - Probe an MHI xprt
+ * @pdev: Platform device corresponding to MHI xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an MHI transport.
+ */
+static int ipc_router_mhi_xprt_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct ipc_router_mhi_xprt_config mhi_xprt_config;
+
+	if (pdev && pdev->dev.of_node) {
+		rc = parse_devicetree(pdev->dev.of_node, &mhi_xprt_config);
+		if (rc) {
+			IPC_RTR_ERR("%s: failed to parse device tree\n",
+				    __func__);
+			return rc;
+		}
+
+		rc = ipc_router_mhi_config_init(&mhi_xprt_config);
+		if (rc) {
+			IPC_RTR_ERR("%s: init failed\n", __func__);
+			return rc;
+		}
+	}
+	return rc;
+}
+
+static const struct of_device_id ipc_router_mhi_xprt_match_table[] = {
+	{ .compatible = "qcom,ipc_router_mhi_xprt" },
+	{},
+};
+
+static struct platform_driver ipc_router_mhi_xprt_driver = {
+	.probe = ipc_router_mhi_xprt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = ipc_router_mhi_xprt_match_table,
+	},
+};
+
+static int __init ipc_router_mhi_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&ipc_router_mhi_xprt_driver);
+	if (rc) {
+		IPC_RTR_ERR("%s: ipc_router_mhi_xprt_driver reg. failed %d\n",
+			__func__, rc);
+		return rc;
+	}
+	return 0;
+}
+
+module_init(ipc_router_mhi_xprt_init);
+MODULE_DESCRIPTION("IPC Router MHI XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/ipc_router_smd_xprt.c b/drivers/soc/qcom/ipc_router_smd_xprt.c
new file mode 100644
index 0000000..513689a
--- /dev/null
+++ b/drivers/soc/qcom/ipc_router_smd_xprt.c
@@ -0,0 +1,867 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * IPC ROUTER SMD XPRT module.
+ */
+#define DEBUG
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <soc/qcom/smd.h>
+#include <soc/qcom/smsm.h>
+#include <soc/qcom/subsystem_restart.h>
+
+static int msm_ipc_router_smd_xprt_debug_mask;
+module_param_named(debug_mask, msm_ipc_router_smd_xprt_debug_mask,
+		   int, 0664);
+
+#if defined(DEBUG)
+#define D(x...) do { \
+if (msm_ipc_router_smd_xprt_debug_mask) \
+	pr_info(x); \
+} while (0)
+#else
+#define D(x...) do { } while (0)
+#endif
+
+#define MIN_FRAG_SZ (IPC_ROUTER_HDR_SIZE + sizeof(union rr_control_msg))
+
+#define NUM_SMD_XPRTS 4
+#define XPRT_NAME_LEN (SMD_MAX_CH_NAME_LEN + 12)
+
+/**
+ * msm_ipc_router_smd_xprt - IPC Router's SMD XPRT structure
+ * @list: IPC router's SMD XPRTs list.
+ * @ch_name: Name of the HSIC endpoint exported by ipc_bridge driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @edge: SMD channel edge.
+ * @driver: Platform drivers register by this XPRT.
+ * @xprt: IPC Router XPRT structure to contain XPRT specific info.
+ * @channel: SMD channel specific info.
+ * @smd_xprt_wq: Workqueue to queue read & other XPRT related works.
+ * @write_avail_wait_q: wait queue for writer thread.
+ * @in_pkt: Pointer to any partially read packet.
+ * @is_partial_in_pkt: check pkt completion.
+ * @read_work: Read Work to perform read operation from SMD.
+ * @ss_reset_lock: Lock to protect access to the ss_reset flag.
+ * @ss_reset: flag used to check SSR state.
+ * @pil: handle to the remote subsystem.
+ * @sft_close_complete: Variable to indicate completion of SSR handling
+ *                      by IPC Router.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @xprt_option: XPRT specific options to be handled by IPC Router.
+ * @disable_pil_loading: Disable PIL Loading of the subsystem.
+ */
+struct msm_ipc_router_smd_xprt {
+	struct list_head list;
+	char ch_name[SMD_MAX_CH_NAME_LEN];
+	char xprt_name[XPRT_NAME_LEN];
+	uint32_t edge;
+	struct platform_driver driver;
+	struct msm_ipc_router_xprt xprt;
+	smd_channel_t *channel;
+	struct workqueue_struct *smd_xprt_wq;
+	wait_queue_head_t write_avail_wait_q;
+	struct rr_packet *in_pkt;
+	int is_partial_in_pkt;
+	struct delayed_work read_work;
+	spinlock_t ss_reset_lock;	/*Subsystem reset lock*/
+	int ss_reset;
+	void *pil;
+	struct completion sft_close_complete;
+	unsigned int xprt_version;
+	unsigned int xprt_option;
+	bool disable_pil_loading;
+};
+
+struct msm_ipc_router_smd_xprt_work {
+	struct msm_ipc_router_xprt *xprt;
+	struct work_struct work;
+};
+
+static void smd_xprt_read_data(struct work_struct *work);
+static void smd_xprt_open_event(struct work_struct *work);
+static void smd_xprt_close_event(struct work_struct *work);
+
+/**
+ * msm_ipc_router_smd_xprt_config - Config. Info. of each SMD XPRT
+ * @ch_name: Name of the SMD endpoint exported by SMD driver.
+ * @xprt_name: Name of the XPRT to be registered with IPC Router.
+ * @edge: ID to differentiate among multiple SMD endpoints.
+ * @link_id: Network Cluster ID to which this XPRT belongs to.
+ * @xprt_version: IPC Router header version supported by this XPRT.
+ * @disable_pil_loading: Disable PIL Loading of the subsystem.
+ */
+struct msm_ipc_router_smd_xprt_config {
+	char ch_name[SMD_MAX_CH_NAME_LEN];
+	char xprt_name[XPRT_NAME_LEN];
+	uint32_t edge;
+	uint32_t link_id;
+	unsigned int xprt_version;
+	unsigned int xprt_option;
+	bool disable_pil_loading;
+};
+
+struct msm_ipc_router_smd_xprt_config smd_xprt_cfg[] = {
+	{"RPCRPY_CNTL", "ipc_rtr_smd_rpcrpy_cntl", SMD_APPS_MODEM, 1, 1},
+	{"IPCRTR", "ipc_rtr_smd_ipcrtr", SMD_APPS_MODEM, 1, 1},
+	{"IPCRTR", "ipc_rtr_q6_ipcrtr", SMD_APPS_QDSP, 1, 1},
+	{"IPCRTR", "ipc_rtr_wcnss_ipcrtr", SMD_APPS_WCNSS, 1, 1},
+};
+
+#define MODULE_NAME "ipc_router_smd_xprt"
+#define IPC_ROUTER_SMD_XPRT_WAIT_TIMEOUT 3000
+static int ipc_router_smd_xprt_probe_done;
+static struct delayed_work ipc_router_smd_xprt_probe_work;
+static DEFINE_MUTEX(smd_remote_xprt_list_lock_lha1);
+static LIST_HEAD(smd_remote_xprt_list);
+
+static bool is_pil_loading_disabled(uint32_t edge);
+
+/**
+ * ipc_router_smd_set_xprt_version() - Set IPC Router header version
+ *                                          in the transport
+ * @xprt: Reference to the transport structure.
+ * @version: The version to be set in transport.
+ */
+static void ipc_router_smd_set_xprt_version(
+	struct msm_ipc_router_xprt *xprt, unsigned int version)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	if (!xprt)
+		return;
+	smd_xprtp = container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+	smd_xprtp->xprt_version = version;
+}
+
+static int msm_ipc_router_smd_get_xprt_version(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	smd_xprtp = container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	return (int)smd_xprtp->xprt_version;
+}
+
+static int msm_ipc_router_smd_get_xprt_option(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	if (!xprt)
+		return -EINVAL;
+	smd_xprtp = container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	return (int)smd_xprtp->xprt_option;
+}
+
+static int msm_ipc_router_smd_remote_write_avail(
+	struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	return smd_write_avail(smd_xprtp->channel);
+}
+
+static int msm_ipc_router_smd_remote_write(void *data,
+					   uint32_t len,
+					   struct msm_ipc_router_xprt *xprt)
+{
+	struct rr_packet *pkt = (struct rr_packet *)data;
+	struct sk_buff *ipc_rtr_pkt;
+	int offset, sz_written = 0;
+	int ret, num_retries = 0;
+	unsigned long flags;
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	if (!pkt)
+		return -EINVAL;
+
+	if (!len || pkt->length != len)
+		return -EINVAL;
+
+	do {
+		spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+		if (smd_xprtp->ss_reset) {
+			spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
+						flags);
+			IPC_RTR_ERR("%s: %s chnl reset\n",
+					__func__, xprt->name);
+			return -ENETRESET;
+		}
+		spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+		ret = smd_write_start(smd_xprtp->channel, len);
+		if (ret < 0 && num_retries >= 5) {
+			IPC_RTR_ERR("%s: Error %d @smd_write_start for %s\n",
+				__func__, ret, xprt->name);
+			return ret;
+		} else if (ret < 0) {
+			msleep(50);
+			num_retries++;
+		}
+	} while (ret < 0);
+
+	D("%s: Ready to write %d bytes\n", __func__, len);
+	skb_queue_walk(pkt->pkt_fragment_q, ipc_rtr_pkt) {
+		offset = 0;
+		while (offset < ipc_rtr_pkt->len) {
+			if (!smd_write_segment_avail(smd_xprtp->channel))
+				smd_enable_read_intr(smd_xprtp->channel);
+
+			wait_event(smd_xprtp->write_avail_wait_q,
+				(smd_write_segment_avail(smd_xprtp->channel) ||
+				smd_xprtp->ss_reset));
+			smd_disable_read_intr(smd_xprtp->channel);
+			spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+			if (smd_xprtp->ss_reset) {
+				spin_unlock_irqrestore(
+					&smd_xprtp->ss_reset_lock, flags);
+				IPC_RTR_ERR("%s: %s chnl reset\n",
+					__func__, xprt->name);
+				return -ENETRESET;
+			}
+			spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock,
+						flags);
+
+			sz_written = smd_write_segment(smd_xprtp->channel,
+					ipc_rtr_pkt->data + offset,
+					(ipc_rtr_pkt->len - offset));
+			offset += sz_written;
+			sz_written = 0;
+		}
+		D("%s: Wrote %d bytes over %s\n",
+		  __func__, offset, xprt->name);
+	}
+
+	if (!smd_write_end(smd_xprtp->channel))
+		D("%s: Finished writing\n", __func__);
+	return len;
+}
+
+static int msm_ipc_router_smd_remote_close(struct msm_ipc_router_xprt *xprt)
+{
+	int rc;
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	rc = smd_close(smd_xprtp->channel);
+	if (smd_xprtp->pil) {
+		subsystem_put(smd_xprtp->pil);
+		smd_xprtp->pil = NULL;
+	}
+	return rc;
+}
+
+static void smd_xprt_sft_close_done(struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt, struct msm_ipc_router_smd_xprt, xprt);
+
+	complete_all(&smd_xprtp->sft_close_complete);
+}
+
+static void smd_xprt_read_data(struct work_struct *work)
+{
+	int pkt_size, sz_read, sz;
+	struct sk_buff *ipc_rtr_pkt;
+	void *data;
+	unsigned long flags;
+	struct delayed_work *rwork = to_delayed_work(work);
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(rwork, struct msm_ipc_router_smd_xprt, read_work);
+
+	spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+	if (smd_xprtp->ss_reset) {
+		spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+		if (smd_xprtp->in_pkt)
+			release_pkt(smd_xprtp->in_pkt);
+		smd_xprtp->is_partial_in_pkt = 0;
+		IPC_RTR_ERR("%s: %s channel reset\n",
+			__func__, smd_xprtp->xprt.name);
+		return;
+	}
+	spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+
+	D("%s pkt_size: %d, read_avail: %d\n", __func__,
+		smd_cur_packet_size(smd_xprtp->channel),
+		smd_read_avail(smd_xprtp->channel));
+	while ((pkt_size = smd_cur_packet_size(smd_xprtp->channel)) &&
+		smd_read_avail(smd_xprtp->channel)) {
+		if (!smd_xprtp->is_partial_in_pkt) {
+			smd_xprtp->in_pkt = create_pkt(NULL);
+			if (!smd_xprtp->in_pkt) {
+				IPC_RTR_ERR("%s: Couldn't alloc rr_packet\n",
+					__func__);
+				return;
+			}
+			smd_xprtp->is_partial_in_pkt = 1;
+			D("%s: Allocated rr_packet\n", __func__);
+		}
+
+		if (((pkt_size >= MIN_FRAG_SZ) &&
+		     (smd_read_avail(smd_xprtp->channel) < MIN_FRAG_SZ)) ||
+		    ((pkt_size < MIN_FRAG_SZ) &&
+		     (smd_read_avail(smd_xprtp->channel) < pkt_size)))
+			return;
+
+		sz = smd_read_avail(smd_xprtp->channel);
+		do {
+			ipc_rtr_pkt = alloc_skb(sz, GFP_KERNEL);
+			if (!ipc_rtr_pkt) {
+				if (sz <= (PAGE_SIZE/2)) {
+					queue_delayed_work(
+						smd_xprtp->smd_xprt_wq,
+						&smd_xprtp->read_work,
+						msecs_to_jiffies(100));
+					return;
+				}
+				sz = sz / 2;
+			}
+		} while (!ipc_rtr_pkt);
+
+		D("%s: Allocated the sk_buff of size %d\n", __func__, sz);
+		data = skb_put(ipc_rtr_pkt, sz);
+		sz_read = smd_read(smd_xprtp->channel, data, sz);
+		if (sz_read != sz) {
+			IPC_RTR_ERR("%s: Couldn't read %s completely\n",
+				__func__, smd_xprtp->xprt.name);
+			kfree_skb(ipc_rtr_pkt);
+			release_pkt(smd_xprtp->in_pkt);
+			smd_xprtp->is_partial_in_pkt = 0;
+			return;
+		}
+		skb_queue_tail(smd_xprtp->in_pkt->pkt_fragment_q, ipc_rtr_pkt);
+		smd_xprtp->in_pkt->length += sz_read;
+		if (sz_read != pkt_size)
+			smd_xprtp->is_partial_in_pkt = 1;
+		else
+			smd_xprtp->is_partial_in_pkt = 0;
+
+		if (!smd_xprtp->is_partial_in_pkt) {
+			D("%s: Packet size read %d\n",
+			  __func__, smd_xprtp->in_pkt->length);
+			msm_ipc_router_xprt_notify(&smd_xprtp->xprt,
+						IPC_ROUTER_XPRT_EVENT_DATA,
+						(void *)smd_xprtp->in_pkt);
+			release_pkt(smd_xprtp->in_pkt);
+			smd_xprtp->in_pkt = NULL;
+		}
+	}
+}
+
+static void smd_xprt_open_event(struct work_struct *work)
+{
+	struct msm_ipc_router_smd_xprt_work *xprt_work =
+		container_of(work, struct msm_ipc_router_smd_xprt_work, work);
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt_work->xprt,
+			     struct msm_ipc_router_smd_xprt, xprt);
+	unsigned long flags;
+
+	spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+	smd_xprtp->ss_reset = 0;
+	spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+	msm_ipc_router_xprt_notify(xprt_work->xprt,
+				IPC_ROUTER_XPRT_EVENT_OPEN, NULL);
+	D("%s: Notified IPC Router of %s OPEN\n",
+	   __func__, xprt_work->xprt->name);
+	kfree(xprt_work);
+}
+
+static void smd_xprt_close_event(struct work_struct *work)
+{
+	struct msm_ipc_router_smd_xprt_work *xprt_work =
+		container_of(work, struct msm_ipc_router_smd_xprt_work, work);
+	struct msm_ipc_router_smd_xprt *smd_xprtp =
+		container_of(xprt_work->xprt,
+			     struct msm_ipc_router_smd_xprt, xprt);
+
+	if (smd_xprtp->in_pkt) {
+		release_pkt(smd_xprtp->in_pkt);
+		smd_xprtp->in_pkt = NULL;
+	}
+	smd_xprtp->is_partial_in_pkt = 0;
+	init_completion(&smd_xprtp->sft_close_complete);
+	msm_ipc_router_xprt_notify(xprt_work->xprt,
+				IPC_ROUTER_XPRT_EVENT_CLOSE, NULL);
+	D("%s: Notified IPC Router of %s CLOSE\n",
+	   __func__, xprt_work->xprt->name);
+	wait_for_completion(&smd_xprtp->sft_close_complete);
+	kfree(xprt_work);
+}
+
+static void msm_ipc_router_smd_remote_notify(void *_dev, unsigned int event)
+{
+	unsigned long flags;
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+	struct msm_ipc_router_smd_xprt_work *xprt_work;
+
+	smd_xprtp = (struct msm_ipc_router_smd_xprt *)_dev;
+	if (!smd_xprtp)
+		return;
+
+	switch (event) {
+	case SMD_EVENT_DATA:
+		if (smd_read_avail(smd_xprtp->channel))
+			queue_delayed_work(smd_xprtp->smd_xprt_wq,
+					   &smd_xprtp->read_work, 0);
+		if (smd_write_segment_avail(smd_xprtp->channel))
+			wake_up(&smd_xprtp->write_avail_wait_q);
+		break;
+
+	case SMD_EVENT_OPEN:
+		xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
+				    GFP_ATOMIC);
+		if (!xprt_work) {
+			IPC_RTR_ERR(
+			"%s: Couldn't notify %d event to IPC Router\n",
+				__func__, event);
+			return;
+		}
+		xprt_work->xprt = &smd_xprtp->xprt;
+		INIT_WORK(&xprt_work->work, smd_xprt_open_event);
+		queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
+		break;
+
+	case SMD_EVENT_CLOSE:
+		spin_lock_irqsave(&smd_xprtp->ss_reset_lock, flags);
+		smd_xprtp->ss_reset = 1;
+		spin_unlock_irqrestore(&smd_xprtp->ss_reset_lock, flags);
+		wake_up(&smd_xprtp->write_avail_wait_q);
+		xprt_work = kmalloc(sizeof(struct msm_ipc_router_smd_xprt_work),
+				    GFP_ATOMIC);
+		if (!xprt_work) {
+			IPC_RTR_ERR(
+			"%s: Couldn't notify %d event to IPC Router\n",
+				__func__, event);
+			return;
+		}
+		xprt_work->xprt = &smd_xprtp->xprt;
+		INIT_WORK(&xprt_work->work, smd_xprt_close_event);
+		queue_work(smd_xprtp->smd_xprt_wq, &xprt_work->work);
+		break;
+	}
+}
+
+static void *msm_ipc_load_subsystem(uint32_t edge)
+{
+	void *pil = NULL;
+	const char *peripheral;
+	bool loading_disabled;
+
+	loading_disabled = is_pil_loading_disabled(edge);
+	peripheral = smd_edge_to_pil_str(edge);
+	if (!IS_ERR_OR_NULL(peripheral) && !loading_disabled) {
+		pil = subsystem_get(peripheral);
+		if (IS_ERR(pil)) {
+			IPC_RTR_ERR("%s: Failed to load %s\n",
+				__func__, peripheral);
+			pil = NULL;
+		}
+	}
+	return pil;
+}
+
+/**
+ * find_smd_xprt_list() - Find xprt item specific to an HSIC endpoint
+ * @pdev: Platform device registered by HSIC's ipc_bridge driver
+ *
+ * @return: pointer to msm_ipc_router_smd_xprt if matching endpoint is found,
+ *		else NULL.
+ *
+ * This function is used to find specific xprt item from the global xprt list
+ */
+static struct msm_ipc_router_smd_xprt *
+		find_smd_xprt_list(struct platform_device *pdev)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	list_for_each_entry(smd_xprtp, &smd_remote_xprt_list, list) {
+		if (!strcmp(pdev->name, smd_xprtp->ch_name)
+				&& (pdev->id == smd_xprtp->edge)) {
+			mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+			return smd_xprtp;
+		}
+	}
+	mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+	return NULL;
+}
+
+/**
+ * is_pil_loading_disabled() - Check if pil loading a subsystem is disabled
+ * @edge: Edge that points to the remote subsystem.
+ *
+ * @return: true if disabled, false if enabled.
+ */
+static bool is_pil_loading_disabled(uint32_t edge)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	list_for_each_entry(smd_xprtp, &smd_remote_xprt_list, list) {
+		if (smd_xprtp->edge == edge) {
+			mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+			return smd_xprtp->disable_pil_loading;
+		}
+	}
+	mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+	return true;
+}
+
+/**
+ * msm_ipc_router_smd_remote_probe() - Probe an SMD endpoint
+ *
+ * @pdev: Platform device corresponding to SMD endpoint.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying SMD driver registers
+ * a platform device, mapped to SMD endpoint.
+ */
+static int msm_ipc_router_smd_remote_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	smd_xprtp = find_smd_xprt_list(pdev);
+	if (!smd_xprtp) {
+		IPC_RTR_ERR("%s No device with name %s\n",
+					__func__, pdev->name);
+		return -EPROBE_DEFER;
+	}
+	if (strcmp(pdev->name, smd_xprtp->ch_name)
+			|| (pdev->id != smd_xprtp->edge)) {
+		IPC_RTR_ERR("%s wrong item name:%s edge:%d\n",
+				__func__, smd_xprtp->ch_name, smd_xprtp->edge);
+		return -ENODEV;
+	}
+	smd_xprtp->smd_xprt_wq =
+		create_singlethread_workqueue(pdev->name);
+	if (!smd_xprtp->smd_xprt_wq) {
+		IPC_RTR_ERR("%s: WQ creation failed for %s\n",
+			__func__, pdev->name);
+		return -EFAULT;
+	}
+
+	smd_xprtp->pil = msm_ipc_load_subsystem(
+					smd_xprtp->edge);
+	rc = smd_named_open_on_edge(smd_xprtp->ch_name,
+				    smd_xprtp->edge,
+				    &smd_xprtp->channel,
+				    smd_xprtp,
+				    msm_ipc_router_smd_remote_notify);
+	if (rc < 0) {
+		IPC_RTR_ERR("%s: Channel open failed for %s\n",
+			__func__, smd_xprtp->ch_name);
+		if (smd_xprtp->pil) {
+			subsystem_put(smd_xprtp->pil);
+			smd_xprtp->pil = NULL;
+		}
+		destroy_workqueue(smd_xprtp->smd_xprt_wq);
+		return rc;
+	}
+
+	smd_disable_read_intr(smd_xprtp->channel);
+
+	smsm_change_state(SMSM_APPS_STATE, 0, SMSM_RPCINIT);
+
+	return 0;
+}
+
+/**
+ * msm_ipc_router_smd_driver_register() - register SMD XPRT drivers
+ *
+ * @smd_xprtp: pointer to Ipc router smd xprt structure.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when a new XPRT is added to register platform
+ * drivers for new XPRT.
+ */
+static int msm_ipc_router_smd_driver_register(
+			struct msm_ipc_router_smd_xprt *smd_xprtp)
+{
+	int ret;
+	struct msm_ipc_router_smd_xprt *item;
+	unsigned int already_registered = 0;
+
+	mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	list_for_each_entry(item, &smd_remote_xprt_list, list) {
+		if (!strcmp(smd_xprtp->ch_name, item->ch_name))
+			already_registered = 1;
+	}
+	list_add(&smd_xprtp->list, &smd_remote_xprt_list);
+	mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+
+	if (!already_registered) {
+		smd_xprtp->driver.driver.name = smd_xprtp->ch_name;
+		smd_xprtp->driver.driver.owner = THIS_MODULE;
+		smd_xprtp->driver.probe = msm_ipc_router_smd_remote_probe;
+
+		ret = platform_driver_register(&smd_xprtp->driver);
+		if (ret) {
+			IPC_RTR_ERR(
+			"%s: Failed to register platform driver [%s]\n",
+						__func__, smd_xprtp->ch_name);
+			return ret;
+		}
+	} else {
+		IPC_RTR_ERR("%s Already driver registered %s\n",
+					__func__, smd_xprtp->ch_name);
+	}
+	return 0;
+}
+
+/**
+ * msm_ipc_router_smd_config_init() - init SMD xprt configs
+ *
+ * @smd_xprt_config: pointer to SMD xprt configurations.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the SMD XPRT pointer with
+ * the SMD XPRT configurations either from device tree or static arrays.
+ */
+static int msm_ipc_router_smd_config_init(
+		struct msm_ipc_router_smd_xprt_config *smd_xprt_config)
+{
+	struct msm_ipc_router_smd_xprt *smd_xprtp;
+
+	smd_xprtp = kzalloc(sizeof(struct msm_ipc_router_smd_xprt), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(smd_xprtp)) {
+		IPC_RTR_ERR("%s: kzalloc() failed for smd_xprtp id:%s\n",
+				__func__, smd_xprt_config->ch_name);
+		return -ENOMEM;
+	}
+
+	smd_xprtp->xprt.link_id = smd_xprt_config->link_id;
+	smd_xprtp->xprt_version = smd_xprt_config->xprt_version;
+	smd_xprtp->edge = smd_xprt_config->edge;
+	smd_xprtp->xprt_option = smd_xprt_config->xprt_option;
+	smd_xprtp->disable_pil_loading = smd_xprt_config->disable_pil_loading;
+
+	strlcpy(smd_xprtp->ch_name, smd_xprt_config->ch_name,
+						SMD_MAX_CH_NAME_LEN);
+
+	strlcpy(smd_xprtp->xprt_name, smd_xprt_config->xprt_name,
+						XPRT_NAME_LEN);
+	smd_xprtp->xprt.name = smd_xprtp->xprt_name;
+
+	smd_xprtp->xprt.set_version =
+		ipc_router_smd_set_xprt_version;
+	smd_xprtp->xprt.get_version =
+		msm_ipc_router_smd_get_xprt_version;
+	smd_xprtp->xprt.get_option =
+		msm_ipc_router_smd_get_xprt_option;
+	smd_xprtp->xprt.read_avail = NULL;
+	smd_xprtp->xprt.read = NULL;
+	smd_xprtp->xprt.write_avail =
+		msm_ipc_router_smd_remote_write_avail;
+	smd_xprtp->xprt.write = msm_ipc_router_smd_remote_write;
+	smd_xprtp->xprt.close = msm_ipc_router_smd_remote_close;
+	smd_xprtp->xprt.sft_close_done = smd_xprt_sft_close_done;
+	smd_xprtp->xprt.priv = NULL;
+
+	init_waitqueue_head(&smd_xprtp->write_avail_wait_q);
+	smd_xprtp->in_pkt = NULL;
+	smd_xprtp->is_partial_in_pkt = 0;
+	INIT_DELAYED_WORK(&smd_xprtp->read_work, smd_xprt_read_data);
+	spin_lock_init(&smd_xprtp->ss_reset_lock);
+	smd_xprtp->ss_reset = 0;
+
+	msm_ipc_router_smd_driver_register(smd_xprtp);
+
+	return 0;
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ * @smd_xprt_config: pointer to SMD XPRT configurations
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+		struct msm_ipc_router_smd_xprt_config *smd_xprt_config)
+{
+	int ret;
+	int edge;
+	int link_id;
+	int version;
+	char *key;
+	const char *ch_name;
+	const char *remote_ss;
+
+	key = "qcom,ch-name";
+	ch_name = of_get_property(node, key, NULL);
+	if (!ch_name)
+		goto error;
+	strlcpy(smd_xprt_config->ch_name, ch_name, SMD_MAX_CH_NAME_LEN);
+
+	key = "qcom,xprt-remote";
+	remote_ss = of_get_property(node, key, NULL);
+	if (!remote_ss)
+		goto error;
+	edge = smd_remote_ss_to_edge(remote_ss);
+	if (edge < 0)
+		goto error;
+	smd_xprt_config->edge = edge;
+
+	key = "qcom,xprt-linkid";
+	ret = of_property_read_u32(node, key, &link_id);
+	if (ret)
+		goto error;
+	smd_xprt_config->link_id = link_id;
+
+	key = "qcom,xprt-version";
+	ret = of_property_read_u32(node, key, &version);
+	if (ret)
+		goto error;
+	smd_xprt_config->xprt_version = version;
+
+	key = "qcom,fragmented-data";
+	smd_xprt_config->xprt_option = of_property_read_bool(node, key);
+
+	key = "qcom,disable-pil-loading";
+	smd_xprt_config->disable_pil_loading = of_property_read_bool(node, key);
+
+	scnprintf(smd_xprt_config->xprt_name, XPRT_NAME_LEN, "%s_%s",
+			remote_ss, smd_xprt_config->ch_name);
+
+	return 0;
+
+error:
+	IPC_RTR_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+}
+
+/**
+ * msm_ipc_router_smd_xprt_probe() - Probe an SMD xprt
+ *
+ * @pdev: Platform device corresponding to SMD xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an SMD transport.
+ */
+static int msm_ipc_router_smd_xprt_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct msm_ipc_router_smd_xprt_config smd_xprt_config;
+
+	if (pdev) {
+		if (pdev->dev.of_node) {
+			mutex_lock(&smd_remote_xprt_list_lock_lha1);
+			ipc_router_smd_xprt_probe_done = 1;
+			mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+
+			ret = parse_devicetree(pdev->dev.of_node,
+							&smd_xprt_config);
+			if (ret) {
+				IPC_RTR_ERR("%s: Failed to parse device tree\n",
+								__func__);
+				return ret;
+			}
+
+			ret = msm_ipc_router_smd_config_init(&smd_xprt_config);
+			if (ret) {
+				IPC_RTR_ERR("%s init failed\n", __func__);
+				return ret;
+			}
+		}
+	}
+	return 0;
+}
+
+/**
+ * ipc_router_smd_xprt_probe_worker() - probe worker for non DT configurations
+ *
+ * @work: work item to process
+ *
+ * This function is called by schedule_delay_work after 3sec and check if
+ * device tree probe is done or not. If device tree probe fails the default
+ * configurations read from static array.
+ */
+static void ipc_router_smd_xprt_probe_worker(struct work_struct *work)
+{
+	int i, ret;
+
+	if (WARN_ON(ARRAY_SIZE(smd_xprt_cfg) != NUM_SMD_XPRTS))
+		return;
+
+	mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	if (!ipc_router_smd_xprt_probe_done) {
+		mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+		for (i = 0; i < ARRAY_SIZE(smd_xprt_cfg); i++) {
+			ret = msm_ipc_router_smd_config_init(&smd_xprt_cfg[i]);
+			if (ret)
+				IPC_RTR_ERR(" %s init failed config idx %d\n",
+							__func__, i);
+		}
+		mutex_lock(&smd_remote_xprt_list_lock_lha1);
+	}
+	mutex_unlock(&smd_remote_xprt_list_lock_lha1);
+}
+
+static const struct of_device_id msm_ipc_router_smd_xprt_match_table[] = {
+	{ .compatible = "qcom,ipc_router_smd_xprt" },
+	{},
+};
+
+static struct platform_driver msm_ipc_router_smd_xprt_driver = {
+	.probe = msm_ipc_router_smd_xprt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_ipc_router_smd_xprt_match_table,
+	 },
+};
+
+static int __init msm_ipc_router_smd_xprt_init(void)
+{
+	int rc;
+
+	rc = platform_driver_register(&msm_ipc_router_smd_xprt_driver);
+	if (rc) {
+		IPC_RTR_ERR(
+		"%s: msm_ipc_router_smd_xprt_driver register failed %d\n",
+								__func__, rc);
+		return rc;
+	}
+
+	INIT_DELAYED_WORK(&ipc_router_smd_xprt_probe_work,
+					ipc_router_smd_xprt_probe_worker);
+	schedule_delayed_work(&ipc_router_smd_xprt_probe_work,
+			msecs_to_jiffies(IPC_ROUTER_SMD_XPRT_WAIT_TIMEOUT));
+	return 0;
+}
+
+module_init(msm_ipc_router_smd_xprt_init);
+MODULE_DESCRIPTION("IPC Router SMD XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-amon.c b/drivers/soc/qcom/llcc-amon.c
new file mode 100644
index 0000000..f814f2b
--- /dev/null
+++ b/drivers/soc/qcom/llcc-amon.c
@@ -0,0 +1,536 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+
+#define MODULE_NAME "LLCC AMON deadlock detector"
+static bool qcom_llcc_amon_panic = IS_ENABLED(
+			CONFIG_QCOM_LLCC_AMON_PANIC) ? true:false;
+module_param(qcom_llcc_amon_panic, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(qcom_llcc_amon_panic,
+		"Enables deadlock detection by AMON");
+
+static int amon_interrupt_mode;
+module_param(amon_interrupt_mode, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(amon_interrupt_mode,
+		"Controls whether to use interrupt or poll mode");
+
+enum channel_type {
+	AMON_READ,
+	AMON_WRITE,
+	AMON_EVICT,
+};
+
+#define LLCC_AMON_FG_CNT_MASK			0x70
+#define LLCC_AMON_CG_CNT_MASK			0x380
+#define LLCC_AMON_FG_CNT_SHIFT			0x4
+#define LLCC_AMON_CG_CNT_SHIFT			0x7
+#define LLCC_COMMON_IRQ_AMON			0x8
+#define LLCC_AMON_STATUS0_MASK			0x3ff
+#define LLCC_AMON_CHAN_SHIFT			0x4
+#define LLCC_AMON_CNTR_SATURATED_SOURCE_TYPE(x) (0x1 << x)
+#define LLCC_AMON_CFG0_DLDM_SHIFT		0x2
+
+/* AMON register offsets */
+#define LLCC_AMON_START			0x3d000
+#define LLCC_AMON_CFG0			0x3d000
+#define LLCC_AMON_EVICT_REGS		0x3d020
+#define LLCC_AMON_WRITE_REGS		0x3d060
+#define LLCC_AMON_READ_REGS		0x3d100
+#define LLCC_AMON_IRQ_STATUS		0x3d200
+#define LLCC_AMON_IRQ_CLEAR		0x3d208
+#define LLCC_AMON_IRQ_ENABLE		0x3d20c
+#define LLCC_AMON_STATUS_0		0x3d210
+
+/* AMON configuration register bits */
+#define LLCC_AMON_CFG0_ENABLE	0x0 /* Enable AMON */
+#define LLCC_AMON_CFG0_DLDM	0x2 /* AMON deadlock detection mode */
+#define LLCC_AMON_CFG0_SCRM	0x3 /* Enable SCID based deadlock detection */
+
+/* Number of entries maintained by AMON */
+#define NR_READ_ENTRIES		40
+#define NR_WRITE_ENTRIES	16
+#define NR_EVICT_ENTRIES	8
+#define AMON_IRQ_BIT		0x0
+#define CHAN_OFF(x)		(x << 2)
+
+/**
+ * llcc_amon_data: Data structure containing AMON driver data
+ *
+ * @llcc_amon_regmap: map of AMON register block
+ * @dev: LLCC activiy monitor device
+ * @amon_irq: LLCC activity monitor irq number
+ * @amon_fg_cnt: LLCC activity monitor fine grained counter overflow bit
+ * @amon_work: LLCC activity monitor work item to poll and
+ * dump AMON CSRs
+ * @amon_wq: LLCC activity monitor workqueue to poll and
+ * dump AMON CSRs
+ * @amon_lock: lock to access and change AMON driver data
+ *
+ **/
+struct llcc_amon_data {
+	struct regmap *llcc_amon_regmap;
+	struct device *dev;
+	u32 amon_irq;
+	u32 amon_fg_cnt;
+	struct work_struct amon_work;
+	struct workqueue_struct *amon_wq;
+	struct mutex amon_lock;
+};
+
+static void amon_dump_channel_status(struct device *dev, u32 chnum, u32 type)
+{
+	u32 llcc_amon_csr;
+	struct llcc_amon_data *amon_data = dev_get_drvdata(dev);
+
+	switch (type) {
+	case AMON_READ:
+		regmap_read(amon_data->llcc_amon_regmap,
+			(LLCC_AMON_READ_REGS + CHAN_OFF(chnum)),
+			&llcc_amon_csr);
+		dev_err(dev, "READ entry %u : %8x\n",
+					chnum, llcc_amon_csr);
+		break;
+	case AMON_WRITE:
+		regmap_read(amon_data->llcc_amon_regmap,
+			(LLCC_AMON_WRITE_REGS + CHAN_OFF(chnum)),
+			&llcc_amon_csr);
+		dev_err(dev, "WRITE entry %u : %8x\n",
+					chnum, llcc_amon_csr);
+		break;
+	case AMON_EVICT:
+		regmap_read(amon_data->llcc_amon_regmap,
+			(LLCC_AMON_EVICT_REGS + CHAN_OFF(chnum)),
+			&llcc_amon_csr);
+		dev_err(dev, "EVICT entry %u : %8x\n",
+					chnum, llcc_amon_csr);
+		break;
+	}
+}
+
+static void amon_dump_read_channel_status(struct device *dev, u32 chnum)
+{
+	amon_dump_channel_status(dev, chnum, AMON_READ);
+}
+
+static void amon_dump_write_channel_status(struct device *dev, u32 chnum)
+{
+	amon_dump_channel_status(dev, chnum, AMON_WRITE);
+}
+
+static void amon_dump_evict_channel_status(struct device *dev, u32 chnum)
+{
+	amon_dump_channel_status(dev, chnum, AMON_EVICT);
+}
+
+static ssize_t amon_fg_count_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t n)
+{
+	struct llcc_amon_data *amon_data;
+	int ret;
+	u32 count;
+
+	amon_data = dev_get_drvdata(dev);
+	if (!amon_data)
+		return -ENODEV;
+
+	mutex_lock(&amon_data->amon_lock);
+	ret = kstrtouint(buf, 0, &count);
+	if (ret) {
+		dev_err(amon_data->dev, "invalid user input\n");
+		mutex_unlock(&amon_data->amon_lock);
+		return ret;
+	}
+
+	/* Set fine grained counter */
+	regmap_update_bits(amon_data->llcc_amon_regmap, LLCC_AMON_CFG0,
+		LLCC_AMON_FG_CNT_MASK, (count << LLCC_AMON_FG_CNT_SHIFT));
+	mutex_unlock(&amon_data->amon_lock);
+	return n;
+}
+
+static ssize_t amon_fg_count_show(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	struct llcc_amon_data *amon_data;
+	int ret;
+	u32 count, llcc_amon_cfg0;
+
+	amon_data = dev_get_drvdata(dev);
+	if (!amon_data)
+		return -ENODEV;
+
+	mutex_lock(&amon_data->amon_lock);
+	/* Get fine grained counter */
+	regmap_read(amon_data->llcc_amon_regmap, LLCC_AMON_CFG0,
+		&llcc_amon_cfg0);
+	count = (llcc_amon_cfg0 & LLCC_AMON_FG_CNT_MASK)
+					>> LLCC_AMON_FG_CNT_SHIFT;
+	ret = snprintf(buf, PAGE_SIZE, "%u\n", count);
+	mutex_unlock(&amon_data->amon_lock);
+	return ret;
+}
+
+static ssize_t amon_deadlock_mode_store(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf, size_t n)
+{
+	struct llcc_amon_data *amon_data;
+	int ret;
+	u32 mode;
+
+	amon_data = dev_get_drvdata(dev);
+	if (!amon_data)
+		return -ENODEV;
+
+	mutex_lock(&amon_data->amon_lock);
+	ret = kstrtouint(buf, 0, &mode);
+	if (ret) {
+		dev_err(amon_data->dev, "invalid user input\n");
+		mutex_unlock(&amon_data->amon_lock);
+		return ret;
+	}
+
+	/* Set deadlock detection mode */
+	regmap_update_bits(amon_data->llcc_amon_regmap, LLCC_AMON_CFG0,
+		BIT(LLCC_AMON_CFG0_DLDM), mode);
+	mutex_unlock(&amon_data->amon_lock);
+	return n;
+}
+
+static ssize_t amon_deadlock_mode_show(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	struct llcc_amon_data *amon_data;
+	int ret;
+	u32 val, llcc_amon_cfg0;
+
+	amon_data = dev_get_drvdata(dev);
+	if (!amon_data)
+		return -ENODEV;
+
+	mutex_lock(&amon_data->amon_lock);
+
+	/* Get deadlock detection mode */
+	regmap_read(amon_data->llcc_amon_regmap, LLCC_AMON_CFG0,
+		&llcc_amon_cfg0);
+	val = llcc_amon_cfg0 & BIT(LLCC_AMON_CFG0_DLDM);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+			val >> LLCC_AMON_CFG0_DLDM_SHIFT);
+
+	mutex_unlock(&amon_data->amon_lock);
+	return ret;
+}
+
+
+static DEVICE_ATTR(amon_fg_count, S_IRUGO | S_IWUSR,
+				amon_fg_count_show, amon_fg_count_store);
+static DEVICE_ATTR(amon_deadlock_mode, S_IRUGO | S_IWUSR,
+			amon_deadlock_mode_show, amon_deadlock_mode_store);
+
+static const struct device_attribute *llcc_amon_attrs[] = {
+	&dev_attr_amon_fg_count,
+	&dev_attr_amon_deadlock_mode,
+	NULL,
+};
+
+static int amon_create_sysfs_files(struct device *dev,
+			const struct device_attribute **attrs)
+{
+	int ret = 0, i;
+
+	for (i = 0; attrs[i] != NULL; i++) {
+		ret = device_create_file(dev, attrs[i]);
+		if (ret) {
+			dev_err(dev, "AMON: Couldn't create sysfs entry: %s!\n",
+				attrs[i]->attr.name);
+			break;
+		}
+	}
+	return ret;
+}
+
+static int amon_remove_sysfs_files(struct device *dev,
+			const struct device_attribute **attrs)
+{
+	int ret = 0, i;
+
+	for (i = 0; attrs[i] != NULL; i++)
+		device_remove_file(dev, attrs[i]);
+
+	return ret;
+}
+
+static void enable_qcom_amon_interrupt(struct llcc_amon_data *amon_data)
+{
+
+	regmap_update_bits(amon_data->llcc_amon_regmap, LLCC_AMON_IRQ_ENABLE,
+		BIT(AMON_IRQ_BIT), BIT(AMON_IRQ_BIT));
+}
+
+void disable_qcom_amon_interrupt(struct llcc_amon_data *amon_data)
+{
+	regmap_update_bits(amon_data->llcc_amon_regmap, LLCC_AMON_IRQ_ENABLE,
+		AMON_IRQ_BIT, AMON_IRQ_BIT);
+}
+
+static void clear_qcom_amon_interrupt(struct llcc_amon_data *amon_data)
+{
+	regmap_update_bits(amon_data->llcc_amon_regmap, LLCC_AMON_IRQ_CLEAR,
+		BIT(AMON_IRQ_BIT), BIT(AMON_IRQ_BIT));
+}
+
+static void amon_poll_work(struct work_struct *work)
+{
+	u32 llcc_amon_status0, llcc_amon_irq_status, chnum;
+	struct llcc_amon_data *amon_data = container_of(work,
+				struct llcc_amon_data, amon_work);
+
+	/* Check for deadlock */
+	regmap_read(amon_data->llcc_amon_regmap, LLCC_AMON_IRQ_STATUS,
+		&llcc_amon_irq_status);
+	if (!(llcc_amon_irq_status & BIT(AMON_IRQ_BIT)))
+		/* No deadlock interrupt */
+		return;
+
+	regmap_read(amon_data->llcc_amon_regmap, LLCC_AMON_STATUS_0,
+		&llcc_amon_status0);
+	if (!llcc_amon_status0)
+		return;
+
+	chnum = (llcc_amon_status0 & LLCC_AMON_STATUS0_MASK) >>
+				 LLCC_AMON_CHAN_SHIFT;
+	if (llcc_amon_status0 &
+		LLCC_AMON_CNTR_SATURATED_SOURCE_TYPE(AMON_READ)) {
+		/* Read channel error */
+		amon_dump_read_channel_status(amon_data->dev, chnum);
+	} else if (llcc_amon_status0 &
+		LLCC_AMON_CNTR_SATURATED_SOURCE_TYPE(AMON_WRITE)) {
+		/* Write channel error */
+		amon_dump_write_channel_status(amon_data->dev, chnum);
+	} else if (llcc_amon_status0 &
+		LLCC_AMON_CNTR_SATURATED_SOURCE_TYPE(AMON_EVICT)) {
+		/* Evict channel error */
+		amon_dump_evict_channel_status(amon_data->dev, chnum);
+	}
+
+	clear_qcom_amon_interrupt(amon_data);
+
+	if (qcom_llcc_amon_panic)
+		panic("AMON deadlock detected");
+}
+
+static irqreturn_t llcc_amon_irq_handler(int irq, void *dev_data)
+{
+	u32 llcc_amon_status0, llcc_amon_irq_status;
+	int chnum;
+	struct llcc_amon_data *amon_data = dev_data;
+
+	regmap_read(amon_data->llcc_amon_regmap, LLCC_AMON_IRQ_STATUS,
+		&llcc_amon_irq_status);
+	if (!(llcc_amon_irq_status & BIT(AMON_IRQ_BIT)))
+		/* No deadlock interrupt */
+		return IRQ_NONE;
+
+	regmap_read(amon_data->llcc_amon_regmap, LLCC_AMON_STATUS_0,
+		&llcc_amon_status0);
+	if (unlikely(llcc_amon_status0 == 0))
+		return IRQ_NONE;
+
+	/*
+	 * Check type of interrupt and channel number.
+	 * Call corresponding handler with channel number.
+	 */
+
+	chnum = (llcc_amon_status0 & LLCC_AMON_STATUS0_MASK) >>
+			 LLCC_AMON_CHAN_SHIFT;
+	if (llcc_amon_status0 &
+		LLCC_AMON_CNTR_SATURATED_SOURCE_TYPE(AMON_READ)) {
+		/* Read channel error */
+		amon_dump_read_channel_status(amon_data->dev, chnum);
+	} else if (llcc_amon_status0 &
+		LLCC_AMON_CNTR_SATURATED_SOURCE_TYPE(AMON_WRITE)) {
+		/* Write channel error */
+		amon_dump_write_channel_status(amon_data->dev, chnum);
+	} else if (llcc_amon_status0 &
+		LLCC_AMON_CNTR_SATURATED_SOURCE_TYPE(AMON_EVICT)) {
+		/* Evict channel error */
+		amon_dump_evict_channel_status(amon_data->dev, chnum);
+	}
+
+	clear_qcom_amon_interrupt(amon_data);
+
+	if (qcom_llcc_amon_panic)
+		panic("AMON deadlock detected");
+
+	return IRQ_HANDLED;
+}
+
+
+static int qcom_llcc_amon_dt_to_pdata(struct platform_device *pdev,
+					struct llcc_amon_data *pdata)
+{
+	struct device_node *node = pdev->dev.of_node;
+
+	pdata->dev = &pdev->dev;
+
+	pdata->llcc_amon_regmap = syscon_node_to_regmap(
+				pdata->dev->parent->of_node);
+	if (IS_ERR(pdata->llcc_amon_regmap)) {
+		dev_err(pdata->dev, "No regmap for syscon amon parent\n");
+		return -ENOMEM;
+	}
+
+	pdata->amon_irq = platform_get_irq(pdev, 0);
+	if (!pdata->amon_irq)
+		return -ENODEV;
+
+	of_property_read_u32(node, "qcom,fg-cnt",
+				&pdata->amon_fg_cnt);
+	return 0;
+}
+
+static int qcom_llcc_amon_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct llcc_amon_data *amon_data;
+	u32 cnt;
+
+	if (!pdev->dev.of_node)
+		return -ENODEV;
+
+	amon_data = devm_kzalloc(&pdev->dev, sizeof(struct llcc_amon_data),
+				GFP_KERNEL);
+	if (!amon_data)
+		return -ENOMEM;
+
+	ret = qcom_llcc_amon_dt_to_pdata(pdev, amon_data);
+	if (ret) {
+		dev_err(amon_data->dev, "failed to get amon data\n");
+		return ret;
+	}
+
+	amon_data->dev = &pdev->dev;
+
+
+	platform_set_drvdata(pdev, amon_data);
+
+	/* Enable Activity Monitor */
+	regmap_update_bits(amon_data->llcc_amon_regmap, LLCC_AMON_CFG0,
+		BIT(LLCC_AMON_CFG0_ENABLE), 0x1);
+
+	/* Enable Activity Monitor (AMON) as deadlock detector */
+	regmap_update_bits(amon_data->llcc_amon_regmap, LLCC_AMON_CFG0,
+		BIT(LLCC_AMON_CFG0_DLDM), BIT(LLCC_AMON_CFG0_DLDM));
+
+	/* Set fine grained counter */
+	cnt = amon_data->amon_fg_cnt << LLCC_AMON_FG_CNT_SHIFT;
+	if (cnt)
+		regmap_update_bits(amon_data->llcc_amon_regmap,
+			LLCC_AMON_CFG0,	LLCC_AMON_FG_CNT_MASK, cnt);
+
+	mutex_init(&amon_data->amon_lock);
+	ret = amon_create_sysfs_files(&pdev->dev, llcc_amon_attrs);
+	if (ret) {
+		dev_err(amon_data->dev,
+			"failed to create sysfs entries\n");
+		platform_set_drvdata(pdev, NULL);
+		return ret;
+	}
+
+	if (amon_interrupt_mode) { /* Interrupt mode */
+		ret = devm_request_irq(amon_data->dev, amon_data->amon_irq,
+				llcc_amon_irq_handler, IRQF_TRIGGER_RISING,
+				"amon_deadlock", amon_data);
+		if (ret) {
+			dev_err(amon_data->dev,
+				"failed to request amon deadlock irq\n");
+			platform_set_drvdata(pdev, NULL);
+			return ret;
+		}
+		enable_qcom_amon_interrupt(amon_data);
+	} else { /* Polling mode */
+		amon_data->amon_wq = create_singlethread_workqueue(
+						"amon_deadlock_detector");
+		if (!amon_data->amon_wq) {
+			dev_err(amon_data->dev,
+				"failed to create polling work queue\n");
+			platform_set_drvdata(pdev, NULL);
+			return -ENOMEM;
+		}
+		INIT_WORK(&amon_data->amon_work, amon_poll_work);
+		queue_work(amon_data->amon_wq, &amon_data->amon_work);
+	}
+
+	return 0;
+}
+
+static int qcom_llcc_amon_remove(struct platform_device *pdev)
+{
+	struct llcc_amon_data *amon_data = platform_get_drvdata(pdev);
+
+	disable_qcom_amon_interrupt(amon_data);
+	clear_qcom_amon_interrupt(amon_data);
+	/* Disable Activity Monitor (AMON) as deadlock detector */
+	regmap_update_bits(amon_data->llcc_amon_regmap, LLCC_AMON_CFG0,
+		BIT(LLCC_AMON_CFG0_DLDM), 0x0);
+	/* Disable Activity Monitor */
+	regmap_update_bits(amon_data->llcc_amon_regmap, LLCC_AMON_CFG0,
+		BIT(LLCC_AMON_CFG0_ENABLE), 0x0);
+
+	amon_remove_sysfs_files(&pdev->dev, llcc_amon_attrs);
+	destroy_workqueue(amon_data->amon_wq);
+	platform_set_drvdata(pdev, NULL);
+	return 0;
+}
+
+static const struct of_device_id qcom_llcc_amon_match_table[] = {
+	{ .compatible = "qcom,llcc-amon" },
+	{},
+};
+
+static struct platform_driver qcom_llcc_amon_driver = {
+	.probe = qcom_llcc_amon_probe,
+	.remove = qcom_llcc_amon_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = qcom_llcc_amon_match_table,
+	},
+};
+
+static int __init init_qcom_llcc_amon(void)
+{
+	return platform_driver_register(&qcom_llcc_amon_driver);
+}
+module_init(init_qcom_llcc_amon);
+
+static void __exit exit_qcom_llcc_amon(void)
+{
+	return platform_driver_unregister(&qcom_llcc_amon_driver);
+}
+module_exit(exit_qcom_llcc_amon);
+
+MODULE_DESCRIPTION("QTI LLCC Activity Monitor Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-core.c b/drivers/soc/qcom/llcc-core.c
new file mode 100644
index 0000000..e4d5acc
--- /dev/null
+++ b/drivers/soc/qcom/llcc-core.c
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+/* Config registers offsets*/
+#define COMMON_CFG0		0x00030004
+#define DRP_ECC_ERROR_CFG	0x00040000
+#define TRP_MISC_CFG		0x00022300
+
+/* TRP, DRP interrupt register offsets */
+#define CMN_INTERRUPT_0_ENABLE		0x0003001C
+#define CMN_INTERRUPT_2_ENABLE		0x0003003C
+#define TRP_INTERRUPT_0_ENABLE		0x00020488
+#define DRP_INTERRUPT_ENABLE		0x0004100C
+
+#define DATA_RAM_ECC_ENABLE	0x1
+#define SB_ERROR_THRESHOLD	0x1
+#define SB_ERROR_THRESHOLD_SHIFT	24
+#define TAG_RAM_ECC_DISABLE	0x1
+#define TAG_RAM_ECC_DISABLE_SHIFT	0x1
+#define SB_DB_TRP_INTERRUPT_ENABLE	0x3
+#define TRP0_INTERRUPT_ENABLE	0x1
+#define DRP0_INTERRUPT_ENABLE	BIT(6)
+#define COMMON_INTERRUPT_0_AMON BIT(8)
+#define SB_DB_DRP_INTERRUPT_ENABLE	0x3
+
+static void qcom_llcc_core_setup(struct regmap *llcc_regmap)
+{
+	u32 trp_misc_val;
+	u32 sb_err_threshold;
+
+	/* Enable Tag RAM ECC */
+	trp_misc_val = (TAG_RAM_ECC_DISABLE << TAG_RAM_ECC_DISABLE_SHIFT);
+	regmap_update_bits(llcc_regmap, TRP_MISC_CFG,
+			   ~trp_misc_val, trp_misc_val);
+
+	/* Enable TRP in instance 2 of common interrupt enable register */
+	regmap_update_bits(llcc_regmap, CMN_INTERRUPT_2_ENABLE,
+			   TRP0_INTERRUPT_ENABLE, TRP0_INTERRUPT_ENABLE);
+
+	/* Enable ECC interrupts on Tag Ram */
+	regmap_update_bits(llcc_regmap, TRP_INTERRUPT_0_ENABLE,
+		SB_DB_TRP_INTERRUPT_ENABLE, SB_DB_TRP_INTERRUPT_ENABLE);
+
+	/* Enable ECC for for data ram */
+	regmap_update_bits(llcc_regmap, COMMON_CFG0,
+				DATA_RAM_ECC_ENABLE, DATA_RAM_ECC_ENABLE);
+
+	/* Enable SB error for Data RAM */
+	sb_err_threshold = (SB_ERROR_THRESHOLD << SB_ERROR_THRESHOLD_SHIFT);
+	regmap_write(llcc_regmap, DRP_ECC_ERROR_CFG, sb_err_threshold);
+
+	/* Enable DRP in instance 2 of common interrupt enable register */
+	regmap_update_bits(llcc_regmap, CMN_INTERRUPT_2_ENABLE,
+			   DRP0_INTERRUPT_ENABLE, DRP0_INTERRUPT_ENABLE);
+
+	/* Enable ECC interrupts on Data Ram */
+	regmap_write(llcc_regmap, DRP_INTERRUPT_ENABLE,
+		     SB_DB_DRP_INTERRUPT_ENABLE);
+
+	/* Enable AMON interrupt in the common interrupt register */
+	regmap_update_bits(llcc_regmap, CMN_INTERRUPT_0_ENABLE,
+			COMMON_INTERRUPT_0_AMON, COMMON_INTERRUPT_0_AMON);
+}
+
+static int qcom_llcc_core_probe(struct platform_device *pdev)
+{
+	struct regmap *llcc_regmap;
+	struct device *dev = &pdev->dev;
+
+	llcc_regmap = syscon_node_to_regmap(dev->of_node);
+
+	if (IS_ERR(llcc_regmap)) {
+		dev_err(&pdev->dev, "Cannot find regmap for llcc\n");
+		return PTR_ERR(llcc_regmap);
+	}
+
+	qcom_llcc_core_setup(llcc_regmap);
+
+	return 0;
+}
+
+static int qcom_llcc_core_remove(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static const struct of_device_id qcom_llcc_core_match_table[] = {
+	{ .compatible = "qcom,llcc-core" },
+	{ },
+};
+
+static struct platform_driver qcom_llcc_core_driver = {
+	.probe = qcom_llcc_core_probe,
+	.remove = qcom_llcc_core_remove,
+	.driver = {
+		.name = "qcom_llcc_core",
+		.owner = THIS_MODULE,
+		.of_match_table = qcom_llcc_core_match_table,
+	},
+};
+
+static int __init qcom_llcc_core_init(void)
+{
+	return platform_driver_register(&qcom_llcc_core_driver);
+}
+module_init(qcom_llcc_core_init);
+
+static void __exit qcom_llcc_core_exit(void)
+{
+	platform_driver_unregister(&qcom_llcc_core_driver);
+}
+module_exit(qcom_llcc_core_exit);
+
+MODULE_DESCRIPTION("QCOM LLCC Core Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-msmskunk.c b/drivers/soc/qcom/llcc-msmskunk.c
new file mode 100644
index 0000000..65c79f7
--- /dev/null
+++ b/drivers/soc/qcom/llcc-msmskunk.c
@@ -0,0 +1,114 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+/*
+ * SCT entry contains of the following parameters
+ * name: Name of the client's use case for which the llcc slice is used
+ * uid: Unique id for the client's use case
+ * slice_id: llcc slice id for each client
+ * max_cap: The maximum capacity of the cache slice provided in KB
+ * priority: Priority of the client used to select victim line for replacement
+ * fixed_size: Determine of the slice has a fixed capacity
+ * bonus_ways: Bonus ways to be used by any slice, bonus way is used only if
+ *             it't not a reserved way.
+ * res_ways: Reserved ways for the cache slice, the reserved ways cannot be used
+ *           by any other client than the one its assigned to.
+ * cache_mode: Each slice operates as a cache, this controls the mode of the
+ *             slice normal or TCM
+ * probe_target_ways: Determines what ways to probe for access hit. When
+ *                    configured to 1 only bonus and reseved ways are probed.
+ *                    when configured to 0 all ways in llcc are probed.
+ * dis_cap_alloc: Disable capacity based allocation for a client
+ * retain_on_pc: If this bit is set and client has maitained active vote
+ *               then the ways assigned to this client are not flushed on power
+ *               collapse.
+ * activate_on_init: Activate the slice immidiately after the SCT is programmed
+ */
+#define SCT_ENTRY(n, uid, sid, mc, p, fs, bway, rway, cmod, ptw, dca, rp, a) \
+	{					\
+		.name = n,			\
+		.usecase_id = uid,		\
+		.slice_id = sid,		\
+		.max_cap = mc,			\
+		.priority = p,			\
+		.fixed_size = fs,		\
+		.bonus_ways = bway,		\
+		.res_ways = rway,		\
+		.cache_mode = cmod,		\
+		.probe_target_ways = ptw,	\
+		.dis_cap_alloc = dca,		\
+		.retain_on_pc = rp,		\
+		.activate_on_init = a,		\
+	}
+
+static struct llcc_slice_config msmskunk_data[] =  {
+	SCT_ENTRY("cpuss",       1, 1, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 1),
+	SCT_ENTRY("vidsc0",      2, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("vidsc1",      3, 3, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("rotator",     4, 4, 800, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("voice",       5, 5, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("audio",       6, 6, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("modemhp",     7, 7, 1024, 2, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("modem",       8, 8, 1024, 0, 1, 0xF,  0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("modemhw",     9, 9, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("compute",     10, 10, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("gpuhtw",      11, 11, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("gpu",         12, 12, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("mmuhwt",      13, 13, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1),
+	SCT_ENTRY("sensor",      14, 14, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("compute_dma", 15, 15, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("display",     16, 16, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("videofw",     17, 17, 3072, 0, 0, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+	SCT_ENTRY("camerafw",    18, 18, 224, 0, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0),
+};
+
+static int msmskunk_qcom_llcc_probe(struct platform_device *pdev)
+{
+	return qcom_llcc_probe(pdev, msmskunk_data,
+				 ARRAY_SIZE(msmskunk_data));
+}
+
+static const struct of_device_id msmskunk_qcom_llcc_of_match[] = {
+	{ .compatible = "qcom,msmskunk-llcc", },
+	{ },
+};
+
+static struct platform_driver msmskunk_qcom_llcc_driver = {
+	.driver = {
+		.name = "msmskunk-llcc",
+		.owner = THIS_MODULE,
+		.of_match_table = msmskunk_qcom_llcc_of_match,
+	},
+	.probe = msmskunk_qcom_llcc_probe,
+	.remove = qcom_llcc_remove,
+};
+
+static int __init msmskunk_init_qcom_llcc_init(void)
+{
+	return platform_driver_register(&msmskunk_qcom_llcc_driver);
+}
+module_init(msmskunk_init_qcom_llcc_init);
+
+static void __exit msmskunk_exit_qcom_llcc_exit(void)
+{
+	platform_driver_unregister(&msmskunk_qcom_llcc_driver);
+}
+module_exit(msmskunk_exit_qcom_llcc_exit);
+
+MODULE_DESCRIPTION("QTI msmskunk LLCC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/llcc-slice.c b/drivers/soc/qcom/llcc-slice.c
new file mode 100644
index 0000000..3a9c7aa
--- /dev/null
+++ b/drivers/soc/qcom/llcc-slice.c
@@ -0,0 +1,440 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt)  "%s:" fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/soc/qcom/llcc-qcom.h>
+
+#define ACTIVATE                      0x1
+#define DEACTIVATE                    0x2
+#define ACT_CTRL_OPCODE_ACTIVATE      0x1
+#define ACT_CTRL_OPCODE_DEACTIVATE    0x2
+#define ACT_CTRL_ACT_TRIG             0x1
+#define ACT_CTRL_OPCODE_SHIFT         0x1
+#define ATTR1_PROBE_TARGET_WAYS_SHIFT 0x2
+#define ATTR1_FIXED_SIZE_SHIFT        0x3
+#define ATTR1_PRIORITY_SHIFT          0x4
+#define ATTR1_MAX_CAP_SHIFT           0x10
+#define ATTR0_RES_WAYS_MASK           0x00000fff
+#define ATR0_BONUS_WAYS_MASK          0x0fff0000
+#define ATR0_BONUS_WAYS_SHIFT         0x10
+#define LLCC_STATUS_READ_DELAY 100
+
+#define CACHE_LINE_SIZE_SHIFT 6
+#define SIZE_PER_LLCC_SHIFT   2
+#define MAX_CAP_TO_BYTES(n) (n * 1024)
+#define LLCC_TRP_ACT_CTRLn(n) (n * 0x1000)
+#define LLCC_TRP_STATUSn(n)   (4 + n * 0x1000)
+#define LLCC_TRP_ATTR0_CFGn(n) (0x21000 + 0x8 * n)
+#define LLCC_TRP_ATTR1_CFGn(n) (0x21004 + 0x8 * n)
+#define LLCC_TRP_PCB_ACT       0x23204
+#define LLCC_TRP_SCID_DIS_CAP_ALLOC 0x23200
+
+/**
+ * Driver data for llcc
+ * @llcc_virt_base: base address for llcc controller
+ * @slice_data: pointer to llcc slice config data
+ * @sz: Size of the config data table
+ * @llcc_slice_map: Bit map to track the active slice ids
+ */
+struct llcc_drv_data {
+	struct regmap *llcc_map;
+	const struct llcc_slice_config *slice_data;
+	struct mutex slice_mutex;
+	u32 llcc_config_data_sz;
+	u32 max_slices;
+	unsigned long *llcc_slice_map;
+};
+
+/* Get the slice entry by index */
+static struct llcc_slice_desc *llcc_slice_get_entry(struct device *dev, int n)
+{
+	int id;
+	struct of_phandle_args phargs;
+	struct llcc_drv_data *drv;
+	const struct llcc_slice_config *llcc_data_ptr;
+	struct llcc_slice_desc *desc;
+	struct platform_device *pdev;
+
+	if (of_parse_phandle_with_args(dev->of_node, "cache-slices",
+				       "#cache-cells", n, &phargs)) {
+		pr_err("can't parse \"cache-slices\" property\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	pdev = of_find_device_by_node(phargs.np);
+	if (!pdev) {
+		pr_err("Cannot find platform device from phandle\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	drv = platform_get_drvdata(pdev);
+	if (!drv) {
+		pr_err("cannot find platform driver data\n");
+		return ERR_PTR(-EFAULT);
+	}
+
+	llcc_data_ptr = drv->slice_data;
+
+	while (llcc_data_ptr) {
+		if (llcc_data_ptr->usecase_id == phargs.args[0])
+			break;
+		llcc_data_ptr++;
+	}
+
+	if (llcc_data_ptr == NULL) {
+		pr_err("can't find %d usecase id\n", id);
+		return ERR_PTR(-ENODEV);
+	}
+
+	desc = kzalloc(sizeof(struct llcc_slice_desc), GFP_KERNEL);
+	if (!desc)
+		return ERR_PTR(-ENOMEM);
+
+	desc->llcc_slice_id = llcc_data_ptr->slice_id;
+	desc->llcc_slice_size = llcc_data_ptr->max_cap;
+	desc->dev = &pdev->dev;
+
+	return desc;
+}
+
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @dev: Device pointer of the client
+ * @name: Name of the use case
+ *
+ * A pointer to llcc slice descriptor will be returned on success and
+ * and error pointer is returned on failure
+ */
+struct llcc_slice_desc *llcc_slice_getd(struct device *dev, const char *name)
+{
+	struct device_node *np = dev->of_node;
+	int index;
+	const char *slice_name;
+	struct property *prop;
+
+	if (!np) {
+		dev_err(dev, "%s() currently only supports DT\n", __func__);
+		return ERR_PTR(-ENOENT);
+	}
+
+	if (!of_get_property(np, "cache-slice-names", NULL)) {
+		dev_err(dev,
+			"%s() requires a \"cache-slice-names\" property\n",
+			__func__);
+		return ERR_PTR(-ENOENT);
+	}
+
+	of_property_for_each_string(np, "cache-slice-names", prop, slice_name) {
+		if (!strcmp(name, slice_name))
+			break;
+		index++;
+	}
+
+	return llcc_slice_get_entry(dev, index);
+}
+EXPORT_SYMBOL(llcc_slice_getd);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+	kfree(desc);
+}
+EXPORT_SYMBOL(llcc_slice_putd);
+
+static int llcc_update_act_ctrl(struct llcc_drv_data *drv, u32 sid,
+				u32 act_ctrl_reg_val, u32 status)
+{
+	u32 act_ctrl_reg;
+	u32 status_reg;
+	u32 slice_status;
+	unsigned long timeout;
+
+	act_ctrl_reg = LLCC_TRP_ACT_CTRLn(sid);
+	status_reg = LLCC_TRP_STATUSn(sid);
+
+	regmap_write(drv->llcc_map, act_ctrl_reg, act_ctrl_reg_val);
+
+	/* Make sure the activate trigger is applied before clearing it */
+	mb();
+
+	/* Clear the ACTIVE trigger */
+	act_ctrl_reg_val &= ~ACT_CTRL_ACT_TRIG;
+	regmap_write(drv->llcc_map, act_ctrl_reg, act_ctrl_reg_val);
+
+	timeout = jiffies + usecs_to_jiffies(LLCC_STATUS_READ_DELAY);
+	while (time_before(jiffies, timeout)) {
+		regmap_read(drv->llcc_map, status_reg, &slice_status);
+		if (slice_status & status)
+			return 0;
+	}
+
+	return -ETIMEDOUT;
+}
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+	int rc = -EINVAL;
+	u32 act_ctrl_val;
+	struct llcc_drv_data *drv;
+
+	if (desc == NULL) {
+		pr_err("Input descriptor supplied is invalid\n");
+		return rc;
+	}
+
+	drv = dev_get_drvdata(desc->dev);
+	if (!drv) {
+		pr_err("Invalid device pointer in the desc\n");
+		return rc;
+	}
+
+	mutex_lock(&drv->slice_mutex);
+	if (test_bit(desc->llcc_slice_id, drv->llcc_slice_map)) {
+		mutex_unlock(&drv->slice_mutex);
+		return 0;
+	}
+
+	act_ctrl_val = ACT_CTRL_OPCODE_ACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+	act_ctrl_val |= ACT_CTRL_ACT_TRIG;
+
+	rc = llcc_update_act_ctrl(drv, desc->llcc_slice_id, act_ctrl_val,
+				  ACTIVATE);
+
+	__set_bit(desc->llcc_slice_id, drv->llcc_slice_map);
+	mutex_unlock(&drv->slice_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL(llcc_slice_activate);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A value zero will be returned on success and a negative errno will
+ * be returned in error cases
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+	u32 act_ctrl_val;
+	int rc = -EINVAL;
+	struct llcc_drv_data *drv;
+
+	if (desc == NULL) {
+		pr_err("Input descriptor supplied is invalid\n");
+		return rc;
+	}
+
+	drv = dev_get_drvdata(desc->dev);
+	if (!drv) {
+		pr_err("Invalid device pointer in the desc\n");
+		return rc;
+	}
+
+	mutex_lock(&drv->slice_mutex);
+	if (!test_bit(desc->llcc_slice_id, drv->llcc_slice_map)) {
+		mutex_unlock(&drv->slice_mutex);
+		return 0;
+	}
+	act_ctrl_val = ACT_CTRL_OPCODE_DEACTIVATE << ACT_CTRL_OPCODE_SHIFT;
+	act_ctrl_val |= ACT_CTRL_ACT_TRIG;
+
+	rc = llcc_update_act_ctrl(drv, desc->llcc_slice_id, act_ctrl_val,
+				  DEACTIVATE);
+
+	__clear_bit(desc->llcc_slice_id, drv->llcc_slice_map);
+	mutex_unlock(&drv->slice_mutex);
+
+	return rc;
+}
+EXPORT_SYMBOL(llcc_slice_deactivate);
+
+/**
+ * llcc_get_slice_id - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A positive value will be returned on success and a negative errno will
+ * be returned on error
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+	if (!desc)
+		return -EINVAL;
+
+	return desc->llcc_slice_id;
+}
+EXPORT_SYMBOL(llcc_get_slice_id);
+
+/**
+ * llcc_get_slice_size - return the slice id
+ * @desc: Pointer to llcc slice descriptor
+ *
+ * A positive value will be returned on success and zero will returned on
+ * error
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+	if (!desc)
+		return 0;
+
+	return desc->llcc_slice_size;
+}
+EXPORT_SYMBOL(llcc_get_slice_size);
+
+static void qcom_llcc_cfg_program(struct platform_device *pdev)
+{
+	int i;
+	u32 attr1_cfg;
+	u32 attr0_cfg;
+	u32 attr1_val;
+	u32 attr0_val;
+	u32 pcb = 0;
+	u32 cad = 0;
+	u32 max_cap_cacheline;
+	u32 sz;
+	const struct llcc_slice_config *llcc_table;
+	struct llcc_drv_data *drv = platform_get_drvdata(pdev);
+	struct llcc_slice_desc desc;
+
+	sz = drv->llcc_config_data_sz;
+	llcc_table = drv->slice_data;
+
+	for (i = 0; i < sz; i++) {
+		attr1_cfg = LLCC_TRP_ATTR1_CFGn(llcc_table[i].slice_id);
+		attr0_cfg = LLCC_TRP_ATTR0_CFGn(llcc_table[i].slice_id);
+
+		attr1_val = llcc_table[i].cache_mode;
+		attr1_val |= (llcc_table[i].probe_target_ways <<
+				ATTR1_PROBE_TARGET_WAYS_SHIFT);
+		attr1_val |= (llcc_table[i].fixed_size <<
+				ATTR1_FIXED_SIZE_SHIFT);
+		attr1_val |= (llcc_table[i].priority << ATTR1_PRIORITY_SHIFT);
+
+		max_cap_cacheline = MAX_CAP_TO_BYTES(llcc_table[i].max_cap);
+		max_cap_cacheline >>= CACHE_LINE_SIZE_SHIFT;
+		/* There are four llcc instances llcc0..llcc3. The SW writes to
+		 * to broadcast register which gets propagated to each llcc.
+		 * Since the size of the memory is divided equally amongst the
+		 * four llcc, we need to divide the max cap by 4
+		 */
+		max_cap_cacheline >>= SIZE_PER_LLCC_SHIFT;
+		attr1_val |= (max_cap_cacheline << ATTR1_MAX_CAP_SHIFT);
+
+		attr0_val = llcc_table[i].res_ways & ATTR0_RES_WAYS_MASK;
+		attr0_val |= llcc_table[i].bonus_ways << ATR0_BONUS_WAYS_SHIFT;
+
+		regmap_write(drv->llcc_map, attr1_cfg, attr1_val);
+		regmap_write(drv->llcc_map, attr0_cfg, attr0_val);
+
+		/* Write the retain on power collapse bit for each scid */
+		pcb |= llcc_table[i].retain_on_pc << llcc_table[i].slice_id;
+		regmap_write(drv->llcc_map, LLCC_TRP_PCB_ACT, pcb);
+
+		/* Disable capacity alloc */
+		cad |= llcc_table[i].dis_cap_alloc << llcc_table[i].slice_id;
+		regmap_write(drv->llcc_map, LLCC_TRP_SCID_DIS_CAP_ALLOC, cad);
+
+		/* Make sure that the SCT is programmed before activating */
+		mb();
+
+		if (llcc_table[i].activate_on_init) {
+			desc.llcc_slice_id = llcc_table[i].slice_id;
+			desc.dev = &pdev->dev;
+			if (llcc_slice_activate(&desc)) {
+				pr_err("activate slice id: %d timed out\n",
+						desc.llcc_slice_id);
+			}
+		}
+	}
+}
+
+int qcom_llcc_probe(struct platform_device *pdev,
+		      const struct llcc_slice_config *llcc_cfg, u32 sz)
+{
+	int rc = 0;
+	struct device *dev = &pdev->dev;
+	static struct llcc_drv_data *drv_data;
+
+	drv_data = devm_kzalloc(dev, sizeof(*drv_data), GFP_KERNEL);
+	if (!drv_data)
+		return PTR_ERR(drv_data);
+
+	drv_data->llcc_map = syscon_node_to_regmap(dev->parent->of_node);
+	if (!drv_data->llcc_map)
+		return PTR_ERR(drv_data->llcc_map);
+
+	rc = of_property_read_u32(pdev->dev.of_node, "max-slices",
+				  &drv_data->max_slices);
+	if (rc) {
+		dev_info(&pdev->dev, "Invalid max-slices dt entry\n");
+		devm_kfree(&pdev->dev, drv_data);
+		return rc;
+	}
+
+	drv_data->llcc_slice_map = kcalloc(BITS_TO_LONGS(drv_data->max_slices),
+				   sizeof(unsigned long), GFP_KERNEL);
+
+	if (!drv_data->llcc_slice_map) {
+		devm_kfree(&pdev->dev, drv_data);
+		return PTR_ERR(drv_data->llcc_slice_map);
+	}
+
+	bitmap_zero(drv_data->llcc_slice_map, drv_data->max_slices);
+	drv_data->slice_data = llcc_cfg;
+	drv_data->llcc_config_data_sz = sz;
+	mutex_init(&drv_data->slice_mutex);
+	platform_set_drvdata(pdev, drv_data);
+
+	qcom_llcc_cfg_program(pdev);
+
+	return rc;
+}
+EXPORT_SYMBOL(qcom_llcc_probe);
+
+int qcom_llcc_remove(struct platform_device *pdev)
+{
+	static struct llcc_drv_data *drv_data;
+
+	drv_data = platform_get_drvdata(pdev);
+
+	mutex_destroy(&drv_data->slice_mutex);
+	kfree(drv_data->llcc_slice_map);
+	devm_kfree(&pdev->dev, drv_data);
+	platform_set_drvdata(pdev, NULL);
+
+	return 0;
+}
+EXPORT_SYMBOL(qcom_llcc_remove);
diff --git a/drivers/soc/qcom/memory_dump_v2.c b/drivers/soc/qcom/memory_dump_v2.c
new file mode 100644
index 0000000..b9ce417
--- /dev/null
+++ b/drivers/soc/qcom/memory_dump_v2.c
@@ -0,0 +1,197 @@
+/* Copyright (c) 2014-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/cacheflush.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/scm.h>
+
+#define MSM_DUMP_TABLE_VERSION		MSM_DUMP_MAKE_VERSION(2, 0)
+
+#define SCM_CMD_DEBUG_LAR_UNLOCK	0x4
+
+struct msm_dump_table {
+	uint32_t version;
+	uint32_t num_entries;
+	struct msm_dump_entry entries[MAX_NUM_ENTRIES];
+};
+
+struct msm_memory_dump {
+	uint64_t table_phys;
+	struct msm_dump_table *table;
+};
+
+static struct msm_memory_dump memdump;
+
+uint32_t msm_dump_table_version(void)
+{
+	return MSM_DUMP_TABLE_VERSION;
+}
+EXPORT_SYMBOL(msm_dump_table_version);
+
+static int msm_dump_table_register(struct msm_dump_entry *entry)
+{
+	struct msm_dump_entry *e;
+	struct msm_dump_table *table = memdump.table;
+
+	if (!table || table->num_entries >= MAX_NUM_ENTRIES)
+		return -EINVAL;
+
+	e = &table->entries[table->num_entries];
+	e->id = entry->id;
+	e->type = MSM_DUMP_TYPE_TABLE;
+	e->addr = entry->addr;
+	table->num_entries++;
+
+	dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table));
+	return 0;
+}
+
+static struct msm_dump_table *msm_dump_get_table(enum msm_dump_table_ids id)
+{
+	struct msm_dump_table *table = memdump.table;
+	int i;
+
+	if (!table) {
+		pr_err("mem dump base table does not exist\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	for (i = 0; i < MAX_NUM_ENTRIES; i++) {
+		if (table->entries[i].id == id)
+			break;
+	}
+	if (i == MAX_NUM_ENTRIES || !table->entries[i].addr) {
+		pr_err("mem dump base table entry %d invalid\n", id);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Get the apps table pointer */
+	table = phys_to_virt(table->entries[i].addr);
+
+	return table;
+}
+
+int msm_dump_data_register(enum msm_dump_table_ids id,
+			   struct msm_dump_entry *entry)
+{
+	struct msm_dump_entry *e;
+	struct msm_dump_table *table;
+
+	table = msm_dump_get_table(id);
+	if (IS_ERR(table))
+		return PTR_ERR(table);
+
+	if (!table || table->num_entries >= MAX_NUM_ENTRIES)
+		return -EINVAL;
+
+	e = &table->entries[table->num_entries];
+	e->id = entry->id;
+	e->type = MSM_DUMP_TYPE_DATA;
+	e->addr = entry->addr;
+	table->num_entries++;
+
+	dmac_flush_range(table, (void *)table + sizeof(struct msm_dump_table));
+	return 0;
+}
+EXPORT_SYMBOL(msm_dump_data_register);
+
+static int __init init_memory_dump(void)
+{
+	struct msm_dump_table *table;
+	struct msm_dump_entry entry;
+	struct device_node *np;
+	void __iomem *imem_base;
+	int ret;
+
+	np = of_find_compatible_node(NULL, NULL,
+				     "qcom,msm-imem-mem_dump_table");
+	if (!np) {
+		pr_err("mem dump base table DT node does not exist\n");
+		return -ENODEV;
+	}
+
+	imem_base = of_iomap(np, 0);
+	if (!imem_base) {
+		pr_err("mem dump base table imem offset mapping failed\n");
+		return -ENOMEM;
+	}
+
+	memdump.table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL);
+	if (!memdump.table) {
+		ret = -ENOMEM;
+		goto err0;
+	}
+	memdump.table->version = MSM_DUMP_TABLE_VERSION;
+	memdump.table_phys = virt_to_phys(memdump.table);
+	memcpy_toio(imem_base, &memdump.table_phys, sizeof(memdump.table_phys));
+	/* Ensure write to imem_base is complete before unmapping */
+	mb();
+	pr_info("MSM Memory Dump base table set up\n");
+
+	iounmap(imem_base);
+
+	table = kzalloc(sizeof(struct msm_dump_table), GFP_KERNEL);
+	if (!table) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+	table->version = MSM_DUMP_TABLE_VERSION;
+
+	entry.id = MSM_DUMP_TABLE_APPS;
+	entry.addr = virt_to_phys(table);
+	ret = msm_dump_table_register(&entry);
+	if (ret) {
+		pr_info("mem dump apps data table register failed\n");
+		goto err2;
+	}
+	pr_info("MSM Memory Dump apps data table set up\n");
+
+	return 0;
+err2:
+	kfree(table);
+err1:
+	kfree(memdump.table);
+	return ret;
+err0:
+	iounmap(imem_base);
+	return ret;
+}
+early_initcall(init_memory_dump);
+
+#ifdef CONFIG_MSM_DEBUG_LAR_UNLOCK
+static int __init init_debug_lar_unlock(void)
+{
+	int ret;
+	uint32_t argument = 0;
+	struct scm_desc desc = {0};
+
+	if (!is_scm_armv8())
+		ret = scm_call(SCM_SVC_TZ, SCM_CMD_DEBUG_LAR_UNLOCK, &argument,
+			       sizeof(argument), NULL, 0);
+	else
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_TZ,
+				SCM_CMD_DEBUG_LAR_UNLOCK), &desc);
+	if (ret)
+		pr_err("Core Debug Lock unlock failed, ret: %d\n", ret);
+	else
+		pr_info("Core Debug Lock unlocked\n");
+
+	return ret;
+}
+early_initcall(init_debug_lar_unlock);
+#endif
diff --git a/drivers/soc/qcom/msm_bus/Makefile b/drivers/soc/qcom/msm_bus/Makefile
new file mode 100644
index 0000000..1103360
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for msm-bus driver specific files
+#
+obj-y +=  msm_bus_core.o msm_bus_client_api.o
+obj-$(CONFIG_OF) += msm_bus_of.o
+obj-$(CONFIG_MSM_RPM_SMD) += msm_bus_rpm_smd.o
+
+ifdef CONFIG_QCOM_BUS_CONFIG_RPMH
+	obj-y += msm_bus_fabric_rpmh.o msm_bus_arb_rpmh.o msm_bus_rules.o \
+		msm_bus_bimc_rpmh.o msm_bus_noc_rpmh.o
+	obj-$(CONFIG_OF) += msm_bus_of_rpmh.o
+else
+	obj-y += msm_bus_fabric_adhoc.o msm_bus_arb_adhoc.o \
+		msm_bus_bimc_adhoc.o msm_bus_noc_adhoc.o
+	obj-$(CONFIG_OF) += msm_bus_of_adhoc.o
+endif
+
+obj-$(CONFIG_DEBUG_BUS_VOTER) += msm_bus_dbg_voter.o
+
+obj-$(CONFIG_DEBUG_FS) += msm_bus_dbg.o
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
new file mode 100644
index 0000000..2a2c52e
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_adhoc.h
@@ -0,0 +1,174 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_node_device_type;
+struct link_node {
+	uint64_t lnode_ib[NUM_CTX];
+	uint64_t lnode_ab[NUM_CTX];
+	int next;
+	struct device *next_dev;
+	struct list_head link;
+	uint32_t in_use;
+	const char *cl_name;
+};
+
+/* New types introduced for adhoc topology */
+struct msm_bus_noc_ops {
+	int (*qos_init)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*set_bw)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*limit_mport)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq, int enable_lim,
+			uint64_t lim_bw);
+	bool (*update_bw_reg)(int mode);
+};
+
+struct nodebw {
+	uint64_t sum_ab;
+	uint64_t last_sum_ab;
+	uint64_t max_ib;
+	uint64_t cur_clk_hz;
+	uint32_t util_used;
+	uint32_t vrail_used;
+};
+
+struct msm_bus_fab_device_type {
+	void __iomem *qos_base;
+	phys_addr_t pqos_base;
+	size_t qos_range;
+	uint32_t base_offset;
+	uint32_t qos_freq;
+	uint32_t qos_off;
+	struct msm_bus_noc_ops noc_ops;
+	enum msm_bus_hw_sel bus_type;
+	bool bypass_qos_prg;
+};
+
+struct qos_params_type {
+	int mode;
+	unsigned int prio_lvl;
+	unsigned int prio_rd;
+	unsigned int prio_wr;
+	unsigned int prio1;
+	unsigned int prio0;
+	unsigned int reg_prio1;
+	unsigned int reg_prio0;
+	unsigned int gp;
+	unsigned int thmp;
+	unsigned int ws;
+	u64 bw_buffer;
+};
+
+struct node_util_levels_type {
+	uint64_t threshold;
+	uint32_t util_fact;
+};
+
+struct node_agg_params_type {
+	uint32_t agg_scheme;
+	uint32_t num_aggports;
+	unsigned int buswidth;
+	uint32_t vrail_comp;
+	uint32_t num_util_levels;
+	struct node_util_levels_type *util_levels;
+};
+
+struct msm_bus_node_info_type {
+	const char *name;
+	unsigned int id;
+	int mas_rpm_id;
+	int slv_rpm_id;
+	int num_ports;
+	int num_qports;
+	int *qport;
+	struct qos_params_type qos_params;
+	unsigned int num_connections;
+	unsigned int num_blist;
+	bool is_fab_dev;
+	bool virt_dev;
+	bool is_traversed;
+	unsigned int *connections;
+	unsigned int *black_listed_connections;
+	struct device **dev_connections;
+	struct device **black_connections;
+	unsigned int bus_device_id;
+	struct device *bus_device;
+	struct rule_update_path_info rule;
+	uint64_t lim_bw;
+	bool defer_qos;
+	struct node_agg_params_type agg_params;
+};
+
+struct msm_bus_node_device_type {
+	struct msm_bus_node_info_type *node_info;
+	struct msm_bus_fab_device_type *fabdev;
+	int num_lnodes;
+	struct link_node *lnode_list;
+	struct nodebw node_bw[NUM_CTX];
+	struct list_head link;
+	unsigned int ap_owned;
+	struct nodeclk clk[NUM_CTX];
+	struct nodeclk bus_qos_clk;
+	uint32_t num_node_qos_clks;
+	struct nodeclk *node_qos_clks;
+	struct device_node *of_node;
+	struct device dev;
+	bool dirty;
+	struct list_head dev_link;
+	struct list_head devlist;
+};
+
+static inline struct msm_bus_node_device_type *to_msm_bus_node(struct device *d)
+{
+	return container_of(d, struct msm_bus_node_device_type, dev);
+}
+
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
+				int throttle_en, uint64_t lim_bw);
+int msm_bus_commit_data(struct list_head *clist);
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags);
+
+extern struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev);
+extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
+extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rule);
+extern int msm_rules_update_path(struct list_head *input_list,
+				struct list_head *output_list);
+extern void print_all_rules(void);
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_init(struct device *dev);
+#else
+static inline int msm_bus_floor_init(struct device *dev)
+{
+	return 0;
+}
+#endif /* CONFIG_DBG_BUS_VOTER */
+#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
new file mode 100644
index 0000000..4d6483c
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_adhoc.c
@@ -0,0 +1,1433 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+#define NUM_CL_HANDLES	50
+#define NUM_LNODES	3
+#define MAX_STR_CL	50
+
+struct bus_search_type {
+	struct list_head link;
+	struct list_head node_list;
+};
+
+struct handle_type {
+	int num_entries;
+	struct msm_bus_client **cl_list;
+};
+
+static struct handle_type handle_list;
+static LIST_HEAD(input_list);
+static LIST_HEAD(apply_list);
+static LIST_HEAD(commit_list);
+
+DEFINE_RT_MUTEX(msm_bus_adhoc_lock);
+
+static bool chk_bl_list(struct list_head *black_list, unsigned int id)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	list_for_each_entry(bus_node, black_list, link) {
+		if (bus_node->node_info->id == id)
+			return true;
+	}
+	return false;
+}
+
+static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
+	*traverse_list, struct list_head *route_list)
+{
+	struct bus_search_type *search_node;
+
+	if (list_empty(edge_list) && list_empty(traverse_list))
+		return;
+
+	search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
+	INIT_LIST_HEAD(&search_node->node_list);
+	list_splice_init(edge_list, traverse_list);
+	list_splice_init(traverse_list, &search_node->node_list);
+	list_add_tail(&search_node->link, route_list);
+}
+
+/*
+ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a
+ * "util" file for these common func/macros.
+ *
+ */
+uint64_t msm_bus_div64(unsigned int w, uint64_t bw)
+{
+	uint64_t *b = &bw;
+
+	if ((bw > 0) && (bw < w))
+		return 1;
+
+	switch (w) {
+	case 0:
+		WARN(1, "AXI: Divide by 0 attempted\n");
+	case 1: return bw;
+	case 2: return (bw >> 1);
+	case 4: return (bw >> 2);
+	case 8: return (bw >> 3);
+	case 16: return (bw >> 4);
+	case 32: return (bw >> 5);
+	}
+
+	do_div(*b, w);
+	return *b;
+}
+
+int msm_bus_device_match_adhoc(struct device *dev, void *id)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bnode = to_msm_bus_node(dev);
+
+	if (bnode)
+		ret = (bnode->node_info->id == *(unsigned int *)id);
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static int gen_lnode(struct device *dev,
+			int next_hop, int prev_idx, const char *cl_name)
+{
+	struct link_node *lnode;
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	int lnode_idx = -1;
+
+	if (!dev)
+		goto exit_gen_lnode;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_gen_lnode;
+	}
+
+	if (!cur_dev->num_lnodes) {
+		cur_dev->lnode_list = devm_kzalloc(dev,
+				sizeof(struct link_node) * NUM_LNODES,
+								GFP_KERNEL);
+		if (!cur_dev->lnode_list)
+			goto exit_gen_lnode;
+
+		lnode = cur_dev->lnode_list;
+		cur_dev->num_lnodes = NUM_LNODES;
+		lnode_idx = 0;
+	} else {
+		int i;
+
+		for (i = 0; i < cur_dev->num_lnodes; i++) {
+			if (!cur_dev->lnode_list[i].in_use)
+				break;
+		}
+
+		if (i < cur_dev->num_lnodes) {
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		} else {
+			struct link_node *realloc_list;
+			size_t cur_size = sizeof(struct link_node) *
+					cur_dev->num_lnodes;
+
+			cur_dev->num_lnodes += NUM_LNODES;
+			realloc_list = msm_bus_realloc_devmem(
+					dev,
+					cur_dev->lnode_list,
+					cur_size,
+					sizeof(struct link_node) *
+					cur_dev->num_lnodes, GFP_KERNEL);
+
+			if (!realloc_list)
+				goto exit_gen_lnode;
+
+			cur_dev->lnode_list = realloc_list;
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		}
+	}
+
+	lnode->in_use = 1;
+	lnode->cl_name = cl_name;
+	if (next_hop == cur_dev->node_info->id) {
+		lnode->next = -1;
+		lnode->next_dev = NULL;
+	} else {
+		lnode->next = prev_idx;
+		lnode->next_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &next_hop,
+					msm_bus_device_match_adhoc);
+	}
+
+	memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+	memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+
+exit_gen_lnode:
+	return lnode_idx;
+}
+
+static int remove_lnode(struct msm_bus_node_device_type *cur_dev,
+				int lnode_idx)
+{
+	int ret = 0;
+
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		ret = -ENODEV;
+		goto exit_remove_lnode;
+	}
+
+	if (lnode_idx != -1) {
+		if (!cur_dev->num_lnodes ||
+				(lnode_idx > (cur_dev->num_lnodes - 1))) {
+			MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d",
+				__func__, lnode_idx, cur_dev->num_lnodes);
+			ret = -ENODEV;
+			goto exit_remove_lnode;
+		}
+
+		cur_dev->lnode_list[lnode_idx].next = -1;
+		cur_dev->lnode_list[lnode_idx].next_dev = NULL;
+		cur_dev->lnode_list[lnode_idx].in_use = 0;
+		cur_dev->lnode_list[lnode_idx].cl_name = NULL;
+	}
+
+exit_remove_lnode:
+	return ret;
+}
+
+static int prune_path(struct list_head *route_list, int dest, int src,
+				struct list_head *black_list, int found,
+				const char *cl_name)
+{
+	struct bus_search_type *search_node, *temp_search_node;
+	struct msm_bus_node_device_type *bus_node;
+	struct list_head *bl_list;
+	struct list_head *temp_bl_list;
+	int search_dev_id = dest;
+	struct device *dest_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &dest,
+					msm_bus_device_match_adhoc);
+	int lnode_hop = -1;
+
+	if (!found)
+		goto reset_links;
+
+	if (!dest_dev) {
+		MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
+		goto exit_prune_path;
+	}
+
+	lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop, cl_name);
+
+	list_for_each_entry_reverse(search_node, route_list, link) {
+		list_for_each_entry(bus_node, &search_node->node_list, link) {
+			unsigned int i;
+
+			for (i = 0; i < bus_node->node_info->num_connections;
+									i++) {
+				if (bus_node->node_info->connections[i] ==
+								search_dev_id) {
+					dest_dev = bus_find_device(
+						&msm_bus_type,
+						NULL,
+						(void *)
+						&bus_node->node_info->
+						id,
+						msm_bus_device_match_adhoc);
+
+					if (!dest_dev) {
+						lnode_hop = -1;
+						goto reset_links;
+					}
+
+					lnode_hop = gen_lnode(dest_dev,
+							search_dev_id,
+							lnode_hop, cl_name);
+					search_dev_id =
+						bus_node->node_info->id;
+					break;
+				}
+			}
+		}
+	}
+reset_links:
+	list_for_each_entry_safe(search_node, temp_search_node, route_list,
+									link) {
+		list_for_each_entry(bus_node, &search_node->node_list,
+								link)
+			bus_node->node_info->is_traversed = false;
+
+		list_del(&search_node->link);
+		kfree(search_node);
+	}
+
+	list_for_each_safe(bl_list, temp_bl_list, black_list)
+		list_del(bl_list);
+
+exit_prune_path:
+	return lnode_hop;
+}
+
+static void setup_bl_list(struct msm_bus_node_device_type *node,
+				struct list_head *black_list)
+{
+	unsigned int i;
+
+	for (i = 0; i < node->node_info->num_blist; i++) {
+		struct msm_bus_node_device_type *bdev;
+
+		bdev = to_msm_bus_node(node->node_info->black_connections[i]);
+		list_add_tail(&bdev->link, black_list);
+	}
+}
+
+static int getpath(struct device *src_dev, int dest, const char *cl_name)
+{
+	struct list_head traverse_list;
+	struct list_head edge_list;
+	struct list_head route_list;
+	struct list_head black_list;
+	struct msm_bus_node_device_type *src_node;
+	struct bus_search_type *search_node;
+	int found = 0;
+	int depth_index = 0;
+	int first_hop = -1;
+	int src;
+
+	INIT_LIST_HEAD(&traverse_list);
+	INIT_LIST_HEAD(&edge_list);
+	INIT_LIST_HEAD(&route_list);
+	INIT_LIST_HEAD(&black_list);
+
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Cannot locate src dev ", __func__);
+		goto exit_getpath;
+	}
+
+	src_node = to_msm_bus_node(src_dev);
+	if (!src_node) {
+		MSM_BUS_ERR("%s:Fatal, Source node not found", __func__);
+		goto exit_getpath;
+	}
+	src = src_node->node_info->id;
+	list_add_tail(&src_node->link, &traverse_list);
+
+	while ((!found && !list_empty(&traverse_list))) {
+		struct msm_bus_node_device_type *bus_node = NULL;
+		/* Locate dest_id in the traverse list */
+		list_for_each_entry(bus_node, &traverse_list, link) {
+			if (bus_node->node_info->id == dest) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found) {
+			unsigned int i;
+			/* Setup the new edge list */
+			list_for_each_entry(bus_node, &traverse_list, link) {
+				/* Setup list of black-listed nodes */
+				setup_bl_list(bus_node, &black_list);
+
+				for (i = 0; i < bus_node->node_info->
+						num_connections; i++) {
+					bool skip;
+					struct msm_bus_node_device_type
+							*node_conn;
+					node_conn =
+					to_msm_bus_node(bus_node->node_info->
+						dev_connections[i]);
+					if (node_conn->node_info->
+							is_traversed) {
+						MSM_BUS_ERR("Circ Path %d\n",
+						node_conn->node_info->id);
+						goto reset_traversed;
+					}
+					skip = chk_bl_list(&black_list,
+							bus_node->node_info->
+							connections[i]);
+					if (!skip) {
+						list_add_tail(&node_conn->link,
+							&edge_list);
+						node_conn->node_info->
+							is_traversed = true;
+					}
+				}
+			}
+
+			/* Keep tabs of the previous search list */
+			search_node = kzalloc(sizeof(struct bus_search_type),
+					 GFP_KERNEL);
+			INIT_LIST_HEAD(&search_node->node_list);
+			list_splice_init(&traverse_list,
+					 &search_node->node_list);
+			/* Add the previous search list to a route list */
+			list_add_tail(&search_node->link, &route_list);
+			/* Advancing the list depth */
+			depth_index++;
+			list_splice_init(&edge_list, &traverse_list);
+		}
+	}
+reset_traversed:
+	copy_remaining_nodes(&edge_list, &traverse_list, &route_list);
+	first_hop = prune_path(&route_list, dest, src, &black_list, found,
+								cl_name);
+
+exit_getpath:
+	return first_hop;
+}
+
+static uint64_t scheme1_agg_scheme(struct msm_bus_node_device_type *bus_dev,
+			struct msm_bus_node_device_type *fab_dev, int ctx)
+{
+	uint64_t max_ib;
+	uint64_t sum_ab;
+	uint64_t bw_max_hz;
+	uint32_t util_fact = 0;
+	uint32_t vrail_comp = 0;
+	struct node_util_levels_type *utils;
+	int i;
+	int num_util_levels;
+
+	/*
+	 *  Account for Util factor and vrail comp.
+	 *  Util factor is picked according to the current sum(AB) for this
+	 *  node and for this context.
+	 *  Vrail comp is fixed for the entire performance range.
+	 *  They default to 100 if absent.
+	 *
+	 *  The aggregated clock is computed as:
+	 *  Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
+	 *				/ bus-width
+	 */
+	if (bus_dev->node_info->agg_params.num_util_levels) {
+		utils = bus_dev->node_info->agg_params.util_levels;
+		num_util_levels =
+			bus_dev->node_info->agg_params.num_util_levels;
+	} else {
+		utils = fab_dev->node_info->agg_params.util_levels;
+		num_util_levels =
+			fab_dev->node_info->agg_params.num_util_levels;
+	}
+
+	sum_ab = bus_dev->node_bw[ctx].sum_ab;
+	max_ib = bus_dev->node_bw[ctx].max_ib;
+
+	for (i = 0; i < num_util_levels; i++) {
+		if (sum_ab < utils[i].threshold) {
+			util_fact = utils[i].util_fact;
+			break;
+		}
+	}
+	if (i == num_util_levels)
+		util_fact = utils[(num_util_levels - 1)].util_fact;
+
+	vrail_comp = bus_dev->node_info->agg_params.vrail_comp ?
+			bus_dev->node_info->agg_params.vrail_comp :
+			fab_dev->node_info->agg_params.vrail_comp;
+
+	bus_dev->node_bw[ctx].vrail_used = vrail_comp;
+	bus_dev->node_bw[ctx].util_used = util_fact;
+
+	if (util_fact && (util_fact != 100)) {
+		sum_ab *= util_fact;
+		sum_ab = msm_bus_div64(100, sum_ab);
+	}
+
+	if (vrail_comp && (vrail_comp != 100)) {
+		max_ib *= 100;
+		max_ib = msm_bus_div64(vrail_comp, max_ib);
+	}
+
+	/* Account for multiple channels if any */
+	if (bus_dev->node_info->agg_params.num_aggports > 1)
+		sum_ab = msm_bus_div64(
+				bus_dev->node_info->agg_params.num_aggports,
+					sum_ab);
+
+	if (!bus_dev->node_info->agg_params.buswidth) {
+		MSM_BUS_WARN("No bus width found for %d. Using default\n",
+					bus_dev->node_info->id);
+		bus_dev->node_info->agg_params.buswidth = 8;
+	}
+
+	bw_max_hz = max(max_ib, sum_ab);
+	bw_max_hz = msm_bus_div64(bus_dev->node_info->agg_params.buswidth,
+					bw_max_hz);
+
+	return bw_max_hz;
+}
+
+static uint64_t legacy_agg_scheme(struct msm_bus_node_device_type *bus_dev,
+			struct msm_bus_node_device_type *fab_dev, int ctx)
+{
+	uint64_t max_ib;
+	uint64_t sum_ab;
+	uint64_t bw_max_hz;
+	uint32_t util_fact = 0;
+	uint32_t vrail_comp = 0;
+
+	/*
+	 *  Util_fact and vrail comp are obtained from fabric/Node's dts
+	 *  properties and are fixed for the entire performance range.
+	 *  They default to 100 if absent.
+	 *
+	 *  The clock frequency is computed as:
+	 *  Freq_hz = max((sum(ab) * util_fact)/num_chan, max(ib)/vrail_comp)
+	 *				/ bus-width
+	 */
+	util_fact = fab_dev->node_info->agg_params.util_levels[0].util_fact;
+	vrail_comp = fab_dev->node_info->agg_params.vrail_comp;
+
+	if (bus_dev->node_info->agg_params.num_util_levels)
+		util_fact =
+		bus_dev->node_info->agg_params.util_levels[0].util_fact ?
+		bus_dev->node_info->agg_params.util_levels[0].util_fact :
+		util_fact;
+
+	vrail_comp = bus_dev->node_info->agg_params.vrail_comp ?
+			bus_dev->node_info->agg_params.vrail_comp :
+			vrail_comp;
+
+	bus_dev->node_bw[ctx].vrail_used = vrail_comp;
+	bus_dev->node_bw[ctx].util_used = util_fact;
+	sum_ab = bus_dev->node_bw[ctx].sum_ab;
+	max_ib = bus_dev->node_bw[ctx].max_ib;
+
+	if (util_fact && (util_fact != 100)) {
+		sum_ab *= util_fact;
+		sum_ab = msm_bus_div64(100, sum_ab);
+	}
+
+	if (vrail_comp && (vrail_comp != 100)) {
+		max_ib *= 100;
+		max_ib = msm_bus_div64(vrail_comp, max_ib);
+	}
+
+	/* Account for multiple channels if any */
+	if (bus_dev->node_info->agg_params.num_aggports > 1)
+		sum_ab = msm_bus_div64(
+				bus_dev->node_info->agg_params.num_aggports,
+					sum_ab);
+
+	if (!bus_dev->node_info->agg_params.buswidth) {
+		MSM_BUS_WARN("No bus width found for %d. Using default\n",
+					bus_dev->node_info->id);
+		bus_dev->node_info->agg_params.buswidth = 8;
+	}
+
+	bw_max_hz = max(max_ib, sum_ab);
+	bw_max_hz = msm_bus_div64(bus_dev->node_info->agg_params.buswidth,
+					bw_max_hz);
+
+	return bw_max_hz;
+}
+
+static uint64_t aggregate_bus_req(struct msm_bus_node_device_type *bus_dev,
+									int ctx)
+{
+	uint64_t bw_hz = 0;
+	int i;
+	struct msm_bus_node_device_type *fab_dev = NULL;
+	uint32_t agg_scheme;
+	uint64_t max_ib = 0;
+	uint64_t sum_ab = 0;
+
+	if (!bus_dev || !to_msm_bus_node(bus_dev->node_info->bus_device)) {
+		MSM_BUS_ERR("Bus node pointer is Invalid");
+		goto exit_agg_bus_req;
+	}
+
+	fab_dev = to_msm_bus_node(bus_dev->node_info->bus_device);
+	for (i = 0; i < bus_dev->num_lnodes; i++) {
+		max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]);
+		sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
+	}
+
+	bus_dev->node_bw[ctx].sum_ab = sum_ab;
+	bus_dev->node_bw[ctx].max_ib = max_ib;
+
+	if (bus_dev->node_info->agg_params.agg_scheme != AGG_SCHEME_NONE)
+		agg_scheme = bus_dev->node_info->agg_params.agg_scheme;
+	else
+		agg_scheme = fab_dev->node_info->agg_params.agg_scheme;
+
+	switch (agg_scheme) {
+	case AGG_SCHEME_1:
+		bw_hz = scheme1_agg_scheme(bus_dev, fab_dev, ctx);
+		break;
+	case AGG_SCHEME_LEG:
+		bw_hz = legacy_agg_scheme(bus_dev, fab_dev, ctx);
+		break;
+	}
+
+exit_agg_bus_req:
+	return bw_hz;
+}
+
+
+static void del_inp_list(struct list_head *list)
+{
+	struct rule_update_path_info *rule_node;
+	struct rule_update_path_info *rule_node_tmp;
+
+	list_for_each_entry_safe(rule_node, rule_node_tmp, list, link) {
+		list_del(&rule_node->link);
+		rule_node->added = false;
+	}
+}
+
+static void del_op_list(struct list_head *list)
+{
+	struct rule_apply_rcm_info *rule;
+	struct rule_apply_rcm_info *rule_tmp;
+
+	list_for_each_entry_safe(rule, rule_tmp, list, link)
+		list_del(&rule->link);
+}
+
+static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit)
+{
+	struct rule_apply_rcm_info *rule;
+	struct device *dev = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int ret = 0;
+
+	list_for_each_entry(rule, list, link) {
+		if (!rule)
+			continue;
+
+		if (rule && (rule->after_clk_commit != after_clk_commit))
+			continue;
+
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &rule->id,
+				msm_bus_device_match_adhoc);
+
+		if (!dev) {
+			MSM_BUS_ERR("Can't find dev node for %d", rule->id);
+			continue;
+		}
+		dev_info = to_msm_bus_node(dev);
+
+		ret = msm_bus_enable_limiter(dev_info, rule->throttle,
+							rule->lim_bw);
+		if (ret)
+			MSM_BUS_ERR("Failed to set limiter for %d", rule->id);
+	}
+
+	return ret;
+}
+
+static void commit_data(void)
+{
+	bool rules_registered = msm_rule_are_rules_registered();
+
+	if (rules_registered) {
+		msm_rules_update_path(&input_list, &apply_list);
+		msm_bus_apply_rules(&apply_list, false);
+	}
+
+	msm_bus_commit_data(&commit_list);
+
+	if (rules_registered) {
+		msm_bus_apply_rules(&apply_list, true);
+		del_inp_list(&input_list);
+		del_op_list(&apply_list);
+	}
+	INIT_LIST_HEAD(&input_list);
+	INIT_LIST_HEAD(&apply_list);
+	INIT_LIST_HEAD(&commit_list);
+}
+
+static void add_node_to_clist(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *node_parent =
+			to_msm_bus_node(node->node_info->bus_device);
+
+	if (!node->dirty) {
+		list_add_tail(&node->link, &commit_list);
+		node->dirty = true;
+	}
+
+	if (!node_parent->dirty) {
+		list_add_tail(&node_parent->link, &commit_list);
+		node_parent->dirty = true;
+	}
+}
+
+static int update_path(struct device *src_dev, int dest, uint64_t act_req_ib,
+			uint64_t act_req_bw, uint64_t slp_req_ib,
+			uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
+			int src_idx, int ctx)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int curr_idx;
+	int ret = 0;
+	struct rule_update_path_info *rule_node;
+	bool rules_registered = msm_rule_are_rules_registered();
+
+	if (IS_ERR_OR_NULL(src_dev)) {
+		MSM_BUS_ERR("%s: No source device", __func__);
+		ret = -ENODEV;
+		goto exit_update_path;
+	}
+
+	next_dev = src_dev;
+
+	if (src_idx < 0) {
+		MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+		ret = -ENXIO;
+		goto exit_update_path;
+	}
+	curr_idx = src_idx;
+
+	while (next_dev) {
+		int i;
+
+		dev_info = to_msm_bus_node(next_dev);
+
+		if (curr_idx >= dev_info->num_lnodes) {
+			MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+			 __func__, curr_idx, dev_info->num_lnodes);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+
+		lnode = &dev_info->lnode_list[curr_idx];
+		if (!lnode) {
+			MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+				 __func__, curr_idx);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+		lnode->lnode_ib[ACTIVE_CTX] = act_req_ib;
+		lnode->lnode_ab[ACTIVE_CTX] = act_req_bw;
+		lnode->lnode_ib[DUAL_CTX] = slp_req_ib;
+		lnode->lnode_ab[DUAL_CTX] = slp_req_bw;
+
+		for (i = 0; i < NUM_CTX; i++)
+			dev_info->node_bw[i].cur_clk_hz =
+					aggregate_bus_req(dev_info, i);
+
+		add_node_to_clist(dev_info);
+
+		if (rules_registered) {
+			rule_node = &dev_info->node_info->rule;
+			rule_node->id = dev_info->node_info->id;
+			rule_node->ib = dev_info->node_bw[ACTIVE_CTX].max_ib;
+			rule_node->ab = dev_info->node_bw[ACTIVE_CTX].sum_ab;
+			rule_node->clk =
+				dev_info->node_bw[ACTIVE_CTX].cur_clk_hz;
+			if (!rule_node->added) {
+				list_add_tail(&rule_node->link, &input_list);
+				rule_node->added = true;
+			}
+		}
+
+		next_dev = lnode->next_dev;
+		curr_idx = lnode->next;
+	}
+
+exit_update_path:
+	return ret;
+}
+
+static int remove_path(struct device *src_dev, int dst, uint64_t cur_ib,
+			uint64_t cur_ab, int src_idx, int active_only)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int ret = 0;
+	int cur_idx = src_idx;
+	int next_idx;
+
+	/* Update the current path to zero out all request from
+	 * this cient on all paths
+	 */
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Can't find source device", __func__);
+		ret = -ENODEV;
+		goto exit_remove_path;
+	}
+
+	ret = update_path(src_dev, dst, 0, 0, 0, 0, cur_ib, cur_ab, src_idx,
+							active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error zeroing out path ctx %d",
+					__func__, ACTIVE_CTX);
+		goto exit_remove_path;
+	}
+
+	next_dev = src_dev;
+
+	while (next_dev) {
+		dev_info = to_msm_bus_node(next_dev);
+		lnode = &dev_info->lnode_list[cur_idx];
+		next_idx = lnode->next;
+		next_dev = lnode->next_dev;
+		remove_lnode(dev_info, cur_idx);
+		cur_idx = next_idx;
+	}
+
+exit_remove_path:
+	return ret;
+}
+
+static void getpath_debug(int src, int curr, int active_only)
+{
+	struct device *dev_node;
+	struct device *dev_it;
+	unsigned int hop = 1;
+	int idx;
+	struct msm_bus_node_device_type *devinfo;
+	int i;
+
+	dev_node = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+
+	if (!dev_node) {
+		MSM_BUS_ERR("SRC NOT FOUND %d", src);
+		return;
+	}
+
+	idx = curr;
+	devinfo = to_msm_bus_node(dev_node);
+	dev_it = dev_node;
+
+	MSM_BUS_ERR("Route list Src %d", src);
+	while (dev_it) {
+		struct msm_bus_node_device_type *busdev =
+			to_msm_bus_node(devinfo->node_info->bus_device);
+
+		MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop,
+					devinfo->node_info->id, active_only);
+
+		for (i = 0; i < NUM_CTX; i++) {
+			MSM_BUS_ERR("dev info sel ib %llu",
+						devinfo->node_bw[i].cur_clk_hz);
+			MSM_BUS_ERR("dev info sel ab %llu",
+						devinfo->node_bw[i].sum_ab);
+		}
+
+		dev_it = devinfo->lnode_list[idx].next_dev;
+		idx = devinfo->lnode_list[idx].next;
+		if (dev_it)
+			devinfo = to_msm_bus_node(dev_it);
+
+		MSM_BUS_ERR("Bus Device %d", busdev->node_info->id);
+		MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate);
+
+		if (idx < 0)
+			break;
+		hop++;
+	}
+}
+
+static void unregister_client_adhoc(uint32_t cl)
+{
+	int i;
+	struct msm_bus_scale_pdata *pdata;
+	int lnode, src, curr, dest;
+	uint64_t  cur_clk, cur_bw;
+	struct msm_bus_client *client;
+	struct device *src_dev;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+	client = handle_list.cl_list[cl];
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Null pdata passed to unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	curr = client->curr;
+	if (curr >= pdata->num_usecases) {
+		MSM_BUS_ERR("Invalid index Defaulting curr to 0");
+		curr = 0;
+	}
+
+	MSM_BUS_DBG("%s: Unregistering client %p", __func__, client);
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = client->pdata->usecase[curr].vectors[i].src;
+		dest = client->pdata->usecase[curr].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		cur_clk = client->pdata->usecase[curr].vectors[i].ib;
+		cur_bw = client->pdata->usecase[curr].vectors[i].ab;
+		remove_path(src_dev, dest, cur_clk, cur_bw, lnode,
+						pdata->active_only);
+	}
+	commit_data();
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
+	kfree(client->src_pnode);
+	kfree(client->src_devs);
+	kfree(client);
+	handle_list.cl_list[cl] = NULL;
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static int alloc_handle_lst(int size)
+{
+	int ret = 0;
+	struct msm_bus_client **t_cl_list;
+
+	if (!handle_list.num_entries) {
+		t_cl_list = kzalloc(sizeof(struct msm_bus_client *)
+			* NUM_CL_HANDLES, GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+		handle_list.cl_list = t_cl_list;
+		handle_list.num_entries += NUM_CL_HANDLES;
+	} else {
+		t_cl_list = krealloc(handle_list.cl_list,
+				sizeof(struct msm_bus_client *) *
+				(handle_list.num_entries + NUM_CL_HANDLES),
+				GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+
+		handle_list.cl_list = t_cl_list;
+		memset(&handle_list.cl_list[handle_list.num_entries], 0,
+			NUM_CL_HANDLES * sizeof(struct msm_bus_client *));
+		handle_list.num_entries += NUM_CL_HANDLES;
+	}
+exit_alloc_handle_lst:
+	return ret;
+}
+
+static uint32_t gen_handle(struct msm_bus_client *client)
+{
+	uint32_t handle = 0;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < handle_list.num_entries; i++) {
+		if (i && !handle_list.cl_list[i]) {
+			handle = i;
+			break;
+		}
+	}
+
+	if (!handle) {
+		ret = alloc_handle_lst(NUM_CL_HANDLES);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to allocate handle list",
+							__func__);
+			goto exit_gen_handle;
+		}
+		handle = i + 1;
+	}
+	handle_list.cl_list[handle] = client;
+exit_gen_handle:
+	return handle;
+}
+
+static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata)
+{
+	int src, dest;
+	int i;
+	struct msm_bus_client *client = NULL;
+	int *lnode;
+	struct device *dev;
+	uint32_t handle = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register_client;
+	}
+	client->pdata = pdata;
+
+	lnode = kcalloc(pdata->usecase->num_paths, sizeof(int), GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(lnode)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_lnode_malloc_fail;
+	}
+	client->src_pnode = lnode;
+
+	client->src_devs = kzalloc(pdata->usecase->num_paths *
+					sizeof(struct device *), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(client->src_devs)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_src_dev_malloc_fail;
+	}
+	client->curr = -1;
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase->vectors[i].src;
+		dest = pdata->usecase->vectors[i].dst;
+
+		if ((src < 0) || (dest < 0)) {
+			MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+		if (IS_ERR_OR_NULL(dev)) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		client->src_devs[i] = dev;
+
+		lnode[i] = getpath(dev, dest, client->pdata->name);
+		if (lnode[i] < 0) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+	}
+
+	handle = gen_handle(client);
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
+					handle);
+	MSM_BUS_DBG("%s:Client handle %d %s", __func__, handle,
+						client->pdata->name);
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+exit_invalid_data:
+	kfree(client->src_devs);
+exit_src_dev_malloc_fail:
+	kfree(lnode);
+exit_lnode_malloc_fail:
+	kfree(client);
+exit_register_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+}
+
+static int update_client_paths(struct msm_bus_client *client, bool log_trns,
+							unsigned int idx)
+{
+	int lnode, src, dest, cur_idx;
+	uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+	int i, ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct device *src_dev;
+
+	if (!client) {
+		MSM_BUS_ERR("Client handle  Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("Client pdata Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	cur_idx = client->curr;
+	client->curr = idx;
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase[idx].vectors[i].src;
+		dest = pdata->usecase[idx].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		req_clk = client->pdata->usecase[idx].vectors[i].ib;
+		req_bw = client->pdata->usecase[idx].vectors[i].ab;
+		if (cur_idx < 0) {
+			curr_clk = 0;
+			curr_bw = 0;
+		} else {
+			curr_clk =
+				client->pdata->usecase[cur_idx].vectors[i].ib;
+			curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
+			MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+					curr_bw, curr_clk);
+		}
+
+		if (pdata->active_only) {
+			slp_clk = 0;
+			slp_bw = 0;
+		} else {
+			slp_clk = req_clk;
+			slp_bw = req_bw;
+		}
+
+		ret = update_path(src_dev, dest, req_clk, req_bw, slp_clk,
+			slp_bw, curr_clk, curr_bw, lnode, pdata->active_only);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+					__func__, ret, pdata->active_only);
+			goto exit_update_client_paths;
+		}
+
+		if (log_trns)
+			getpath_debug(src, lnode, pdata->active_only);
+	}
+	commit_data();
+exit_update_client_paths:
+	return ret;
+}
+
+static int update_context(uint32_t cl, bool active_only,
+					unsigned int ctx_idx)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+	if (pdata->active_only == active_only) {
+		MSM_BUS_ERR("No change in context(%d==%d), skip\n",
+					pdata->active_only, active_only);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	if (ctx_idx >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, ctx_idx);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata->active_only = active_only;
+
+	msm_bus_dbg_client_data(client->pdata, ctx_idx, cl);
+	ret = update_client_paths(client, false, ctx_idx);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_context;
+	}
+
+	trace_bus_update_request_end(pdata->name);
+
+exit_update_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static int update_request_adhoc(uint32_t cl, unsigned int index)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+	const char *test_cl = "Null";
+	bool log_transaction = false;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+				__func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (index >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, index);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (client->curr == index) {
+		MSM_BUS_DBG("%s: Not updating client request idx %d unchanged",
+				__func__, index);
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, pdata->name))
+		log_transaction = true;
+
+	MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+		cl, index, client->curr, client->pdata->usecase->num_paths);
+	msm_bus_dbg_client_data(client->pdata, index, cl);
+	ret = update_client_paths(client, log_transaction, index);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_request;
+	}
+
+	trace_bus_update_request_end(pdata->name);
+
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void free_cl_mem(struct msm_bus_client_handle *cl)
+{
+	if (cl) {
+		kfree(cl->name);
+		kfree(cl);
+		cl = NULL;
+	}
+}
+
+static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	int ret = 0;
+	char *test_cl = "test-client";
+	bool log_transaction = false;
+	u64 slp_ib, slp_ab;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %p", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, cl->name))
+		log_transaction = true;
+
+	msm_bus_dbg_rec_transaction(cl, ab, ib);
+
+	if ((cl->cur_act_ib == ib) && (cl->cur_act_ab == ab)) {
+		MSM_BUS_DBG("%s:no change in request", cl->name);
+		goto exit_update_request;
+	}
+
+	if (cl->active_only) {
+		slp_ib = 0;
+		slp_ab = 0;
+	} else {
+		slp_ib = ib;
+		slp_ab = ab;
+	}
+
+	ret = update_path(cl->mas_dev, cl->slv, ib, ab, slp_ib, slp_ab,
+		cl->cur_act_ib, cl->cur_act_ab, cl->first_hop, cl->active_only);
+
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_update_request;
+	}
+
+	commit_data();
+	cl->cur_act_ib = ib;
+	cl->cur_act_ab = ab;
+	cl->cur_slp_ib = slp_ib;
+	cl->cur_slp_ab = slp_ab;
+
+	if (log_transaction)
+		getpath_debug(cl->mas, cl->first_hop, cl->active_only);
+	trace_bus_update_request_end(cl->name);
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+
+	return ret;
+}
+
+static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab)
+{
+	int ret = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("Invalid client handle %p", cl);
+		ret = -ENXIO;
+		goto exit_change_context;
+	}
+
+	if ((cl->cur_act_ib == act_ib) &&
+		(cl->cur_act_ab == act_ab) &&
+		(cl->cur_slp_ib == slp_ib) &&
+		(cl->cur_slp_ab == slp_ab)) {
+		MSM_BUS_ERR("No change in vote");
+		goto exit_change_context;
+	}
+
+	if (!slp_ab && !slp_ib)
+		cl->active_only = true;
+	msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_slp_ib);
+	ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib, slp_ab,
+				cl->cur_act_ab, cl->cur_act_ab,  cl->first_hop,
+				cl->active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_change_context;
+	}
+	commit_data();
+	cl->cur_act_ib = act_ib;
+	cl->cur_act_ab = act_ab;
+	cl->cur_slp_ib = slp_ib;
+	cl->cur_slp_ab = slp_ab;
+	trace_bus_update_request_end(cl->name);
+exit_change_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void unregister_adhoc(struct msm_bus_client_handle *cl)
+{
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	MSM_BUS_DBG("%s: Unregistering client %p", __func__, cl);
+
+	remove_path(cl->mas_dev, cl->slv, cl->cur_act_ib, cl->cur_act_ab,
+				cl->first_hop, cl->active_only);
+	commit_data();
+	msm_bus_dbg_remove_client(cl);
+	kfree(cl);
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static struct msm_bus_client_handle*
+register_adhoc(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+	struct msm_bus_client_handle *client = NULL;
+	int len = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!(mas && slv && name)) {
+		pr_err("%s: Error: src dst name num_paths are required",
+								 __func__);
+		goto exit_register;
+	}
+
+	client = kzalloc(sizeof(struct msm_bus_client_handle), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register;
+	}
+
+	len = strnlen(name, MAX_STR_CL);
+	client->name = kzalloc((len + 1), GFP_KERNEL);
+	if (!client->name) {
+		MSM_BUS_ERR("%s: Error allocating client name buf", __func__);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+	strlcpy(client->name, name, MAX_STR_CL);
+	client->active_only = active_only;
+
+	client->mas = mas;
+	client->slv = slv;
+
+	client->mas_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &mas,
+					msm_bus_device_match_adhoc);
+	if (IS_ERR_OR_NULL(client->mas_dev)) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	client->first_hop = getpath(client->mas_dev, client->slv, client->name);
+	if (client->first_hop < 0) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	MSM_BUS_DBG("%s:Client handle %p %s", __func__, client,
+						client->name);
+	msm_bus_dbg_add_client(client);
+exit_register:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return client;
+}
+/**
+ *  msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops
+ *  @ arb_ops: pointer to the arb ops.
+ */
+void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops)
+{
+	arb_ops->register_client = register_client_adhoc;
+	arb_ops->update_request = update_request_adhoc;
+	arb_ops->unregister_client = unregister_client_adhoc;
+	arb_ops->update_context = update_context;
+
+	arb_ops->register_cl = register_adhoc;
+	arb_ops->unregister = unregister_adhoc;
+	arb_ops->update_bw = update_bw_adhoc;
+	arb_ops->update_bw_context = update_bw_context;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
new file mode 100644
index 0000000..9c86403
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_arb_rpmh.c
@@ -0,0 +1,1451 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/rtmutex.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+
+#define NUM_CL_HANDLES	50
+#define NUM_LNODES	3
+#define MAX_STR_CL	50
+
+struct bus_search_type {
+	struct list_head link;
+	struct list_head node_list;
+};
+
+struct handle_type {
+	int num_entries;
+	struct msm_bus_client **cl_list;
+};
+
+static struct handle_type handle_list;
+static LIST_HEAD(input_list);
+static LIST_HEAD(apply_list);
+static LIST_HEAD(commit_list);
+
+DEFINE_RT_MUTEX(msm_bus_adhoc_lock);
+
+static bool chk_bl_list(struct list_head *black_list, unsigned int id)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	list_for_each_entry(bus_node, black_list, link) {
+		if (bus_node->node_info->id == id)
+			return true;
+	}
+	return false;
+}
+
+static void copy_remaining_nodes(struct list_head *edge_list, struct list_head
+	*traverse_list, struct list_head *route_list)
+{
+	struct bus_search_type *search_node;
+
+	if (list_empty(edge_list) && list_empty(traverse_list))
+		return;
+
+	search_node = kzalloc(sizeof(struct bus_search_type), GFP_KERNEL);
+	INIT_LIST_HEAD(&search_node->node_list);
+	list_splice_init(edge_list, traverse_list);
+	list_splice_init(traverse_list, &search_node->node_list);
+	list_add_tail(&search_node->link, route_list);
+}
+
+/*
+ * Duplicate instantiaion from msm_bus_arb.c. Todo there needs to be a
+ * "util" file for these common func/macros.
+ *
+ */
+uint64_t msm_bus_div64(unsigned int w, uint64_t bw)
+{
+	uint64_t *b = &bw;
+
+	if ((bw > 0) && (bw < w))
+		return 1;
+
+	switch (w) {
+	case 0:
+		WARN(1, "AXI: Divide by 0 attempted\n");
+	case 1: return bw;
+	case 2: return (bw >> 1);
+	case 4: return (bw >> 2);
+	case 8: return (bw >> 3);
+	case 16: return (bw >> 4);
+	case 32: return (bw >> 5);
+	}
+
+	do_div(*b, w);
+	return *b;
+}
+
+int msm_bus_device_match_adhoc(struct device *dev, void *id)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bnode = to_msm_bus_node(dev);
+
+	if (bnode)
+		ret = (bnode->node_info->id == *(unsigned int *)id);
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static void bcm_add_bus_req(struct device *dev)
+{
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	struct msm_bus_node_device_type *bcm_dev = NULL;
+	struct link_node *lnode;
+	int lnode_idx = -1;
+	int max_num_lnodes = 0;
+	int i;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_bcm_add_bus_req;
+	}
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_add_bus_req;
+
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+		max_num_lnodes = bcm_dev->bcmdev->num_bus_devs;
+
+		if (!bcm_dev->num_lnodes) {
+			bcm_dev->lnode_list = devm_kzalloc(dev,
+				sizeof(struct link_node) * max_num_lnodes,
+								GFP_KERNEL);
+			if (!bcm_dev->lnode_list)
+				goto exit_bcm_add_bus_req;
+
+			lnode = bcm_dev->lnode_list;
+			bcm_dev->num_lnodes = max_num_lnodes;
+			lnode_idx = 0;
+		} else {
+			int i;
+
+			for (i = 0; i < bcm_dev->num_lnodes; i++) {
+				if (!bcm_dev->lnode_list[i].in_use)
+					break;
+			}
+
+			if (i < bcm_dev->num_lnodes) {
+				lnode = &bcm_dev->lnode_list[i];
+				lnode_idx = i;
+			} else {
+				struct link_node *realloc_list;
+				size_t cur_size = sizeof(struct link_node) *
+						bcm_dev->num_lnodes;
+
+				bcm_dev->num_lnodes += NUM_LNODES;
+				realloc_list = msm_bus_realloc_devmem(
+						dev,
+						bcm_dev->lnode_list,
+						cur_size,
+						sizeof(struct link_node) *
+						bcm_dev->num_lnodes,
+								GFP_KERNEL);
+
+				if (!realloc_list)
+					goto exit_bcm_add_bus_req;
+
+				bcm_dev->lnode_list = realloc_list;
+				lnode = &bcm_dev->lnode_list[i];
+				lnode_idx = i;
+			}
+		}
+
+		lnode->in_use = 1;
+		lnode->bus_dev_id = cur_dev->node_info->id;
+		cur_dev->node_info->bcm_req_idx = lnode_idx;
+		memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+		memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+		MSM_BUS_ERR("%s: Added %d entry to bcm %d @ %d\n", __func__,
+			lnode->bus_dev_id, bcm_dev->node_info->id, lnode_idx);
+	}
+
+exit_bcm_add_bus_req:
+	return;
+}
+
+static int gen_lnode(struct device *dev,
+			int next_hop, int prev_idx, const char *cl_name)
+{
+	struct link_node *lnode;
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	int lnode_idx = -1;
+
+	if (!dev)
+		goto exit_gen_lnode;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_gen_lnode;
+	}
+
+	if (!cur_dev->num_lnodes) {
+		cur_dev->lnode_list = devm_kzalloc(dev,
+				sizeof(struct link_node) * NUM_LNODES,
+								GFP_KERNEL);
+		if (!cur_dev->lnode_list)
+			goto exit_gen_lnode;
+
+		lnode = cur_dev->lnode_list;
+		cur_dev->num_lnodes = NUM_LNODES;
+		lnode_idx = 0;
+	} else {
+		int i;
+
+		for (i = 0; i < cur_dev->num_lnodes; i++) {
+			if (!cur_dev->lnode_list[i].in_use)
+				break;
+		}
+
+		if (i < cur_dev->num_lnodes) {
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		} else {
+			struct link_node *realloc_list;
+			size_t cur_size = sizeof(struct link_node) *
+					cur_dev->num_lnodes;
+
+			cur_dev->num_lnodes += NUM_LNODES;
+			realloc_list = msm_bus_realloc_devmem(
+					dev,
+					cur_dev->lnode_list,
+					cur_size,
+					sizeof(struct link_node) *
+					cur_dev->num_lnodes, GFP_KERNEL);
+
+			if (!realloc_list)
+				goto exit_gen_lnode;
+
+			cur_dev->lnode_list = realloc_list;
+			lnode = &cur_dev->lnode_list[i];
+			lnode_idx = i;
+		}
+	}
+
+	lnode->in_use = 1;
+	lnode->cl_name = cl_name;
+	if (next_hop == cur_dev->node_info->id) {
+		lnode->next = -1;
+		lnode->next_dev = NULL;
+	} else {
+		lnode->next = prev_idx;
+		lnode->next_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &next_hop,
+					msm_bus_device_match_adhoc);
+	}
+
+	memset(lnode->lnode_ib, 0, sizeof(uint64_t) * NUM_CTX);
+	memset(lnode->lnode_ab, 0, sizeof(uint64_t) * NUM_CTX);
+
+exit_gen_lnode:
+	return lnode_idx;
+}
+
+static int remove_lnode(struct msm_bus_node_device_type *cur_dev,
+				int lnode_idx)
+{
+	int ret = 0;
+
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		ret = -ENODEV;
+		goto exit_remove_lnode;
+	}
+
+	if (lnode_idx != -1) {
+		if (!cur_dev->num_lnodes ||
+				(lnode_idx > (cur_dev->num_lnodes - 1))) {
+			MSM_BUS_ERR("%s: Invalid Idx %d, num_lnodes %d",
+				__func__, lnode_idx, cur_dev->num_lnodes);
+			ret = -ENODEV;
+			goto exit_remove_lnode;
+		}
+
+		cur_dev->lnode_list[lnode_idx].next = -1;
+		cur_dev->lnode_list[lnode_idx].next_dev = NULL;
+		cur_dev->lnode_list[lnode_idx].in_use = 0;
+		cur_dev->lnode_list[lnode_idx].cl_name = NULL;
+	}
+
+exit_remove_lnode:
+	return ret;
+}
+
+static int prune_path(struct list_head *route_list, int dest, int src,
+				struct list_head *black_list, int found,
+				const char *cl_name)
+{
+	struct bus_search_type *search_node, *temp_search_node;
+	struct msm_bus_node_device_type *bus_node;
+	struct list_head *bl_list;
+	struct list_head *temp_bl_list;
+	int search_dev_id = dest;
+	struct device *dest_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &dest,
+					msm_bus_device_match_adhoc);
+	int lnode_hop = -1;
+
+	if (!found)
+		goto reset_links;
+
+	if (!dest_dev) {
+		MSM_BUS_ERR("%s: Can't find dest dev %d", __func__, dest);
+		goto exit_prune_path;
+	}
+	MSM_BUS_ERR("%s: dest dev %d", __func__, dest);
+
+	lnode_hop = gen_lnode(dest_dev, search_dev_id, lnode_hop, cl_name);
+	bcm_add_bus_req(dest_dev);
+
+	list_for_each_entry_reverse(search_node, route_list, link) {
+		list_for_each_entry(bus_node, &search_node->node_list, link) {
+			unsigned int i;
+
+			for (i = 0; i < bus_node->node_info->num_connections;
+									i++) {
+				if (bus_node->node_info->connections[i] ==
+								search_dev_id) {
+					dest_dev = bus_find_device(
+						&msm_bus_type,
+						NULL,
+						(void *)
+						&bus_node->node_info->
+						id,
+						msm_bus_device_match_adhoc);
+
+					if (!dest_dev) {
+						lnode_hop = -1;
+						goto reset_links;
+					}
+
+					lnode_hop = gen_lnode(dest_dev,
+							search_dev_id,
+							lnode_hop, cl_name);
+					bcm_add_bus_req(dest_dev);
+					search_dev_id =
+						bus_node->node_info->id;
+					break;
+				}
+			}
+		}
+	}
+reset_links:
+	list_for_each_entry_safe(search_node, temp_search_node, route_list,
+									link) {
+		list_for_each_entry(bus_node, &search_node->node_list, link)
+			bus_node->node_info->is_traversed = false;
+
+		list_del(&search_node->link);
+		kfree(search_node);
+	}
+
+	list_for_each_safe(bl_list, temp_bl_list, black_list)
+		list_del(bl_list);
+
+exit_prune_path:
+	return lnode_hop;
+}
+
+static void setup_bl_list(struct msm_bus_node_device_type *node,
+				struct list_head *black_list)
+{
+	unsigned int i;
+
+	for (i = 0; i < node->node_info->num_blist; i++) {
+		struct msm_bus_node_device_type *bdev;
+
+		bdev = to_msm_bus_node(node->node_info->black_connections[i]);
+		list_add_tail(&bdev->link, black_list);
+	}
+}
+
+static int getpath(struct device *src_dev, int dest, const char *cl_name)
+{
+	struct list_head traverse_list;
+	struct list_head edge_list;
+	struct list_head route_list;
+	struct list_head black_list;
+	struct msm_bus_node_device_type *src_node;
+	struct bus_search_type *search_node;
+	int found = 0;
+	int depth_index = 0;
+	int first_hop = -1;
+	int src;
+
+	INIT_LIST_HEAD(&traverse_list);
+	INIT_LIST_HEAD(&edge_list);
+	INIT_LIST_HEAD(&route_list);
+	INIT_LIST_HEAD(&black_list);
+
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Cannot locate src dev ", __func__);
+		goto exit_getpath;
+	}
+
+	src_node = to_msm_bus_node(src_dev);
+	if (!src_node) {
+		MSM_BUS_ERR("%s:Fatal, Source node not found", __func__);
+		goto exit_getpath;
+	}
+	src = src_node->node_info->id;
+	list_add_tail(&src_node->link, &traverse_list);
+
+	while ((!found && !list_empty(&traverse_list))) {
+		struct msm_bus_node_device_type *bus_node = NULL;
+		/* Locate dest_id in the traverse list */
+		list_for_each_entry(bus_node, &traverse_list, link) {
+			if (bus_node->node_info->id == dest) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found) {
+			unsigned int i;
+			/* Setup the new edge list */
+			list_for_each_entry(bus_node, &traverse_list, link) {
+				/* Setup list of black-listed nodes */
+				setup_bl_list(bus_node, &black_list);
+
+				for (i = 0; i < bus_node->node_info->
+						num_connections; i++) {
+					bool skip;
+					struct msm_bus_node_device_type
+							*node_conn;
+					node_conn =
+					to_msm_bus_node(bus_node->node_info->
+						dev_connections[i]);
+					if (node_conn->node_info->
+							is_traversed) {
+						MSM_BUS_ERR("Circ Path %d\n",
+						node_conn->node_info->id);
+						goto reset_traversed;
+					}
+					skip = chk_bl_list(&black_list,
+							bus_node->node_info->
+							connections[i]);
+					if (!skip) {
+						list_add_tail(&node_conn->link,
+							&edge_list);
+						node_conn->node_info->
+							is_traversed = true;
+					}
+				}
+			}
+
+			/* Keep tabs of the previous search list */
+			search_node = kzalloc(sizeof(struct bus_search_type),
+					 GFP_KERNEL);
+			INIT_LIST_HEAD(&search_node->node_list);
+			list_splice_init(&traverse_list,
+					 &search_node->node_list);
+			/* Add the previous search list to a route list */
+			list_add_tail(&search_node->link, &route_list);
+			/* Advancing the list depth */
+			depth_index++;
+			list_splice_init(&edge_list, &traverse_list);
+		}
+	}
+reset_traversed:
+	copy_remaining_nodes(&edge_list, &traverse_list, &route_list);
+	first_hop = prune_path(&route_list, dest, src, &black_list, found,
+								cl_name);
+
+exit_getpath:
+	return first_hop;
+}
+
+static void bcm_update_bus_req(struct device *dev, int ctx)
+{
+	struct msm_bus_node_device_type *cur_dev = NULL;
+	struct msm_bus_node_device_type *bcm_dev = NULL;
+	int i;
+	uint64_t max_ib = 0;
+	uint64_t max_ab = 0;
+	int lnode_idx = 0;
+
+	cur_dev = to_msm_bus_node(dev);
+	if (!cur_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_bcm_update_bus_req;
+	}
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_update_bus_req;
+
+	for (i = 0; i < cur_dev->node_info->num_bcm_devs; i++) {
+		bcm_dev = to_msm_bus_node(cur_dev->node_info->bcm_devs[i]);
+
+		if (!bcm_dev)
+			goto exit_bcm_update_bus_req;
+
+		lnode_idx = cur_dev->node_info->bcm_req_idx;
+		bcm_dev->lnode_list[lnode_idx].lnode_ib[ctx] =
+			msm_bus_div64(cur_dev->node_info->agg_params.buswidth,
+					cur_dev->node_bw[ctx].max_ib *
+					(uint64_t)bcm_dev->bcmdev->width);
+
+		bcm_dev->lnode_list[lnode_idx].lnode_ab[ctx] =
+			msm_bus_div64(cur_dev->node_info->agg_params.buswidth,
+					cur_dev->node_bw[ctx].max_ab *
+					(uint64_t)bcm_dev->bcmdev->width);
+
+		for (i = 0; i < bcm_dev->num_lnodes; i++) {
+			if (ctx == ACTIVE_CTX) {
+				max_ib = max(max_ib,
+				max(bcm_dev->lnode_list[i].lnode_ib[ACTIVE_CTX],
+				bcm_dev->lnode_list[i].lnode_ib[DUAL_CTX]));
+
+				max_ab = max(max_ab,
+				bcm_dev->lnode_list[i].lnode_ab[ACTIVE_CTX] +
+				bcm_dev->lnode_list[i].lnode_ab[DUAL_CTX]);
+			} else {
+				max_ib = max(max_ib,
+					bcm_dev->lnode_list[i].lnode_ib[ctx]);
+				max_ab = max(max_ab,
+					bcm_dev->lnode_list[i].lnode_ab[ctx]);
+			}
+		}
+
+		bcm_dev->node_bw[ctx].max_ab = max_ab;
+		bcm_dev->node_bw[ctx].max_ib = max_ib;
+	}
+exit_bcm_update_bus_req:
+	return;
+}
+
+int bcm_remove_handoff_req(struct device *dev, void *data)
+{
+	struct msm_bus_node_device_type *bcm_dev = NULL;
+	int i;
+	uint64_t max_ib = 0;
+	uint64_t max_ab = 0;
+	int ret = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	bcm_dev = to_msm_bus_node(dev);
+	if (!bcm_dev) {
+		MSM_BUS_ERR("%s: Null device ptr", __func__);
+		goto exit_bcm_remove_handoff_req;
+	}
+
+	if (!bcm_dev->node_info->is_bcm_dev)
+		goto exit_bcm_remove_handoff_req;
+
+	bcm_dev->bcmdev->init_ab = 0;
+	bcm_dev->bcmdev->init_ib = 0;
+
+	for (i = 0; i < bcm_dev->num_lnodes; i++) {
+		max_ib = max(max_ib,
+				bcm_dev->lnode_list[i].lnode_ib[0]);
+		max_ab = max(max_ab,
+				bcm_dev->lnode_list[i].lnode_ab[0]);
+	}
+
+	bcm_dev->node_bw[0].max_ab = max_ab;
+	bcm_dev->node_bw[0].max_ib = max_ib;
+
+exit_bcm_remove_handoff_req:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+
+
+static void aggregate_bus_req(struct msm_bus_node_device_type *bus_dev,
+									int ctx)
+{
+	int i;
+	uint64_t max_ib = 0;
+	uint64_t sum_ab = 0;
+
+	if (!bus_dev || !to_msm_bus_node(bus_dev->node_info->bus_device)) {
+		MSM_BUS_ERR("Bus node pointer is Invalid");
+		goto exit_agg_bus_req;
+	}
+
+	for (i = 0; i < bus_dev->num_lnodes; i++) {
+		max_ib = max(max_ib, bus_dev->lnode_list[i].lnode_ib[ctx]);
+		sum_ab += bus_dev->lnode_list[i].lnode_ab[ctx];
+	}
+
+	bus_dev->node_bw[ctx].sum_ab = sum_ab;
+	bus_dev->node_bw[ctx].max_ib = max_ib;
+
+exit_agg_bus_req:
+	return;
+}
+
+
+static void del_inp_list(struct list_head *list)
+{
+	struct rule_update_path_info *rule_node;
+	struct rule_update_path_info *rule_node_tmp;
+
+	list_for_each_entry_safe(rule_node, rule_node_tmp, list, link) {
+		list_del(&rule_node->link);
+		rule_node->added = false;
+	}
+}
+
+static void del_op_list(struct list_head *list)
+{
+	struct rule_apply_rcm_info *rule;
+	struct rule_apply_rcm_info *rule_tmp;
+
+	list_for_each_entry_safe(rule, rule_tmp, list, link)
+		list_del(&rule->link);
+}
+
+static int msm_bus_apply_rules(struct list_head *list, bool after_clk_commit)
+{
+	struct rule_apply_rcm_info *rule;
+	struct device *dev = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int ret = 0;
+
+	list_for_each_entry(rule, list, link) {
+		if (!rule)
+			continue;
+
+		if (rule && (rule->after_clk_commit != after_clk_commit))
+			continue;
+
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &rule->id,
+				msm_bus_device_match_adhoc);
+
+		if (!dev) {
+			MSM_BUS_ERR("Can't find dev node for %d", rule->id);
+			continue;
+		}
+		dev_info = to_msm_bus_node(dev);
+
+		ret = msm_bus_enable_limiter(dev_info, rule->throttle,
+							rule->lim_bw);
+		if (ret)
+			MSM_BUS_ERR("Failed to set limiter for %d", rule->id);
+	}
+
+	return ret;
+}
+
+static void commit_data(void)
+{
+	bool rules_registered = msm_rule_are_rules_registered();
+
+	if (rules_registered) {
+		msm_rules_update_path(&input_list, &apply_list);
+		msm_bus_apply_rules(&apply_list, false);
+	}
+
+	msm_bus_commit_data(&commit_list);
+
+	if (rules_registered) {
+		msm_bus_apply_rules(&apply_list, true);
+		del_inp_list(&input_list);
+		del_op_list(&apply_list);
+	}
+	INIT_LIST_HEAD(&input_list);
+	INIT_LIST_HEAD(&apply_list);
+	INIT_LIST_HEAD(&commit_list);
+}
+
+static void add_node_to_clist(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *node_parent =
+			to_msm_bus_node(node->node_info->bus_device);
+
+	if (!node->dirty) {
+		list_add_tail(&node->link, &commit_list);
+		node->dirty = true;
+	}
+
+	if (!node_parent->dirty) {
+		list_add_tail(&node_parent->link, &commit_list);
+		node_parent->dirty = true;
+	}
+}
+
+static int update_path(struct device *src_dev, int dest, uint64_t act_req_ib,
+			uint64_t act_req_bw, uint64_t slp_req_ib,
+			uint64_t slp_req_bw, uint64_t cur_ib, uint64_t cur_bw,
+			int src_idx, int ctx)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int curr_idx;
+	int ret = 0;
+	struct rule_update_path_info *rule_node;
+	bool rules_registered = msm_rule_are_rules_registered();
+
+	if (IS_ERR_OR_NULL(src_dev)) {
+		MSM_BUS_ERR("%s: No source device", __func__);
+		ret = -ENODEV;
+		goto exit_update_path;
+	}
+
+	next_dev = src_dev;
+
+	if (src_idx < 0) {
+		MSM_BUS_ERR("%s: Invalid lnode idx %d", __func__, src_idx);
+		ret = -ENXIO;
+		goto exit_update_path;
+	}
+	curr_idx = src_idx;
+
+	while (next_dev) {
+		int i;
+
+		dev_info = to_msm_bus_node(next_dev);
+
+		if (curr_idx >= dev_info->num_lnodes) {
+			MSM_BUS_ERR("%s: Invalid lnode Idx %d num lnodes %d",
+			 __func__, curr_idx, dev_info->num_lnodes);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+
+		lnode = &dev_info->lnode_list[curr_idx];
+		if (!lnode) {
+			MSM_BUS_ERR("%s: Invalid lnode ptr lnode %d",
+				 __func__, curr_idx);
+			ret = -ENXIO;
+			goto exit_update_path;
+		}
+		lnode->lnode_ib[ACTIVE_CTX] = act_req_ib;
+		lnode->lnode_ab[ACTIVE_CTX] = act_req_bw;
+		lnode->lnode_ib[DUAL_CTX] = slp_req_ib;
+		lnode->lnode_ab[DUAL_CTX] = slp_req_bw;
+
+		for (i = 0; i < NUM_CTX; i++) {
+			aggregate_bus_req(dev_info, i);
+			bcm_update_bus_req(next_dev, i);
+		}
+
+		add_node_to_clist(dev_info);
+
+		if (rules_registered) {
+			rule_node = &dev_info->node_info->rule;
+			rule_node->id = dev_info->node_info->id;
+			rule_node->ib = dev_info->node_bw[ACTIVE_CTX].max_ib;
+			rule_node->ab = dev_info->node_bw[ACTIVE_CTX].sum_ab;
+			rule_node->clk =
+				dev_info->node_bw[ACTIVE_CTX].cur_clk_hz;
+			if (!rule_node->added) {
+				list_add_tail(&rule_node->link, &input_list);
+				rule_node->added = true;
+			}
+		}
+
+		next_dev = lnode->next_dev;
+		curr_idx = lnode->next;
+	}
+
+exit_update_path:
+	return ret;
+}
+
+static int remove_path(struct device *src_dev, int dst, uint64_t cur_ib,
+			uint64_t cur_ab, int src_idx, int active_only)
+{
+	struct device *next_dev = NULL;
+	struct link_node *lnode = NULL;
+	struct msm_bus_node_device_type *dev_info = NULL;
+	int ret = 0;
+	int cur_idx = src_idx;
+	int next_idx;
+
+	/* Update the current path to zero out all request from
+	 * this cient on all paths
+	 */
+	if (!src_dev) {
+		MSM_BUS_ERR("%s: Can't find source device", __func__);
+		ret = -ENODEV;
+		goto exit_remove_path;
+	}
+
+	ret = update_path(src_dev, dst, 0, 0, 0, 0, cur_ib, cur_ab, src_idx,
+							active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error zeroing out path ctx %d",
+					__func__, ACTIVE_CTX);
+		goto exit_remove_path;
+	}
+
+	next_dev = src_dev;
+
+	while (next_dev) {
+		dev_info = to_msm_bus_node(next_dev);
+		lnode = &dev_info->lnode_list[cur_idx];
+		next_idx = lnode->next;
+		next_dev = lnode->next_dev;
+		remove_lnode(dev_info, cur_idx);
+		cur_idx = next_idx;
+	}
+
+exit_remove_path:
+	return ret;
+}
+
+static void getpath_debug(int src, int curr, int active_only)
+{
+	struct device *dev_node;
+	struct device *dev_it;
+	unsigned int hop = 1;
+	int idx;
+	struct msm_bus_node_device_type *devinfo;
+	int i;
+
+	dev_node = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+
+	if (!dev_node) {
+		MSM_BUS_ERR("SRC NOT FOUND %d", src);
+		return;
+	}
+
+	idx = curr;
+	devinfo = to_msm_bus_node(dev_node);
+	dev_it = dev_node;
+
+	MSM_BUS_ERR("Route list Src %d", src);
+	while (dev_it) {
+		struct msm_bus_node_device_type *busdev =
+			to_msm_bus_node(devinfo->node_info->bus_device);
+
+		MSM_BUS_ERR("Hop[%d] at Device %d ctx %d", hop,
+					devinfo->node_info->id, active_only);
+
+		for (i = 0; i < NUM_CTX; i++) {
+			MSM_BUS_ERR("dev info sel ib %llu",
+						devinfo->node_bw[i].cur_clk_hz);
+			MSM_BUS_ERR("dev info sel ab %llu",
+						devinfo->node_bw[i].sum_ab);
+		}
+
+		dev_it = devinfo->lnode_list[idx].next_dev;
+		idx = devinfo->lnode_list[idx].next;
+		if (dev_it)
+			devinfo = to_msm_bus_node(dev_it);
+
+		MSM_BUS_ERR("Bus Device %d", busdev->node_info->id);
+		MSM_BUS_ERR("Bus Clock %llu", busdev->clk[active_only].rate);
+
+		if (idx < 0)
+			break;
+		hop++;
+	}
+}
+
+static void unregister_client_adhoc(uint32_t cl)
+{
+	int i;
+	struct msm_bus_scale_pdata *pdata;
+	int lnode, src, curr, dest;
+	uint64_t  cur_clk, cur_bw;
+	struct msm_bus_client *client;
+	struct device *src_dev;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+	client = handle_list.cl_list[cl];
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Null pdata passed to unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	curr = client->curr;
+	if (curr >= pdata->num_usecases) {
+		MSM_BUS_ERR("Invalid index Defaulting curr to 0");
+		curr = 0;
+	}
+
+	MSM_BUS_DBG("%s: Unregistering client %p", __func__, client);
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = client->pdata->usecase[curr].vectors[i].src;
+		dest = client->pdata->usecase[curr].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		cur_clk = client->pdata->usecase[curr].vectors[i].ib;
+		cur_bw = client->pdata->usecase[curr].vectors[i].ab;
+		remove_path(src_dev, dest, cur_clk, cur_bw, lnode,
+						pdata->active_only);
+	}
+	commit_data();
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_UNREGISTER, cl);
+	kfree(client->src_pnode);
+	kfree(client->src_devs);
+	kfree(client);
+	handle_list.cl_list[cl] = NULL;
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static int alloc_handle_lst(int size)
+{
+	int ret = 0;
+	struct msm_bus_client **t_cl_list;
+
+	if (!handle_list.num_entries) {
+		t_cl_list = kzalloc(sizeof(struct msm_bus_client *)
+			* NUM_CL_HANDLES, GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+		handle_list.cl_list = t_cl_list;
+		handle_list.num_entries += NUM_CL_HANDLES;
+	} else {
+		t_cl_list = krealloc(handle_list.cl_list,
+				sizeof(struct msm_bus_client *) *
+				(handle_list.num_entries + NUM_CL_HANDLES),
+				GFP_KERNEL);
+		if (ZERO_OR_NULL_PTR(t_cl_list)) {
+			ret = -ENOMEM;
+			MSM_BUS_ERR("%s: Failed to allocate handles list",
+								__func__);
+			goto exit_alloc_handle_lst;
+		}
+
+		handle_list.cl_list = t_cl_list;
+		memset(&handle_list.cl_list[handle_list.num_entries], 0,
+			NUM_CL_HANDLES * sizeof(struct msm_bus_client *));
+		handle_list.num_entries += NUM_CL_HANDLES;
+	}
+exit_alloc_handle_lst:
+	return ret;
+}
+
+static uint32_t gen_handle(struct msm_bus_client *client)
+{
+	uint32_t handle = 0;
+	int i;
+	int ret = 0;
+
+	for (i = 0; i < handle_list.num_entries; i++) {
+		if (i && !handle_list.cl_list[i]) {
+			handle = i;
+			break;
+		}
+	}
+
+	if (!handle) {
+		ret = alloc_handle_lst(NUM_CL_HANDLES);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to allocate handle list",
+							__func__);
+			goto exit_gen_handle;
+		}
+		handle = i + 1;
+	}
+	handle_list.cl_list[handle] = client;
+exit_gen_handle:
+	return handle;
+}
+
+static uint32_t register_client_adhoc(struct msm_bus_scale_pdata *pdata)
+{
+	int src, dest;
+	int i;
+	struct msm_bus_client *client = NULL;
+	int *lnode;
+	struct device *dev;
+	uint32_t handle = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	client = kzalloc(sizeof(struct msm_bus_client), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register_client;
+	}
+	client->pdata = pdata;
+
+	lnode = kcalloc(pdata->usecase->num_paths, sizeof(int), GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(lnode)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_lnode_malloc_fail;
+	}
+	client->src_pnode = lnode;
+
+	client->src_devs = kcalloc(pdata->usecase->num_paths,
+					sizeof(struct device *), GFP_KERNEL);
+	if (IS_ERR_OR_NULL(client->src_devs)) {
+		MSM_BUS_ERR("%s: Error allocating pathnode ptr!", __func__);
+		goto exit_src_dev_malloc_fail;
+	}
+	client->curr = -1;
+
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase->vectors[i].src;
+		dest = pdata->usecase->vectors[i].dst;
+
+		if ((src < 0) || (dest < 0)) {
+			MSM_BUS_ERR("%s:Invalid src/dst.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		dev = bus_find_device(&msm_bus_type, NULL,
+				(void *) &src,
+				msm_bus_device_match_adhoc);
+		if (IS_ERR_OR_NULL(dev)) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+		client->src_devs[i] = dev;
+
+		MSM_BUS_ERR("%s:find path.src %d dest %d",
+				__func__, src, dest);
+
+		lnode[i] = getpath(dev, dest, client->pdata->name);
+		if (lnode[i] < 0) {
+			MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+				__func__, src, dest);
+			goto exit_invalid_data;
+		}
+	}
+
+	handle = gen_handle(client);
+	msm_bus_dbg_client_data(client->pdata, MSM_BUS_DBG_REGISTER,
+					handle);
+	MSM_BUS_ERR("%s:Client handle %d %s", __func__, handle,
+						client->pdata->name);
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+exit_invalid_data:
+	kfree(client->src_devs);
+exit_src_dev_malloc_fail:
+	kfree(lnode);
+exit_lnode_malloc_fail:
+	kfree(client);
+exit_register_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return handle;
+}
+
+static int update_client_paths(struct msm_bus_client *client, bool log_trns,
+							unsigned int idx)
+{
+	int lnode, src, dest, cur_idx;
+	uint64_t req_clk, req_bw, curr_clk, curr_bw, slp_clk, slp_bw;
+	int i, ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct device *src_dev;
+
+	if (!client) {
+		MSM_BUS_ERR("Client handle  Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("Client pdata Null");
+		ret = -ENXIO;
+		goto exit_update_client_paths;
+	}
+
+	cur_idx = client->curr;
+	client->curr = idx;
+	for (i = 0; i < pdata->usecase->num_paths; i++) {
+		src = pdata->usecase[idx].vectors[i].src;
+		dest = pdata->usecase[idx].vectors[i].dst;
+
+		lnode = client->src_pnode[i];
+		src_dev = client->src_devs[i];
+		req_clk = client->pdata->usecase[idx].vectors[i].ib;
+		req_bw = client->pdata->usecase[idx].vectors[i].ab;
+		if (cur_idx < 0) {
+			curr_clk = 0;
+			curr_bw = 0;
+		} else {
+			curr_clk =
+				client->pdata->usecase[cur_idx].vectors[i].ib;
+			curr_bw = client->pdata->usecase[cur_idx].vectors[i].ab;
+			MSM_BUS_DBG("%s:ab: %llu ib: %llu\n", __func__,
+					curr_bw, curr_clk);
+		}
+
+		if (pdata->active_only) {
+			slp_clk = 0;
+			slp_bw = 0;
+		} else {
+			slp_clk = req_clk;
+			slp_bw = req_bw;
+		}
+
+		ret = update_path(src_dev, dest, req_clk, req_bw, slp_clk,
+			slp_bw, curr_clk, curr_bw, lnode, pdata->active_only);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Update path failed! %d ctx %d\n",
+					__func__, ret, pdata->active_only);
+			goto exit_update_client_paths;
+		}
+
+		if (log_trns)
+			getpath_debug(src, lnode, pdata->active_only);
+	}
+	commit_data();
+exit_update_client_paths:
+	return ret;
+}
+
+static int update_context(uint32_t cl, bool active_only,
+					unsigned int ctx_idx)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+	if (pdata->active_only == active_only) {
+		MSM_BUS_ERR("No change in context(%d==%d), skip\n",
+					pdata->active_only, active_only);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	if (ctx_idx >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, ctx_idx);
+		ret = -ENXIO;
+		goto exit_update_context;
+	}
+
+	pdata->active_only = active_only;
+
+	msm_bus_dbg_client_data(client->pdata, ctx_idx, cl);
+	ret = update_client_paths(client, false, ctx_idx);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_context;
+	}
+
+//	trace_bus_update_request_end(pdata->name);
+
+exit_update_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static int update_request_adhoc(uint32_t cl, unsigned int index)
+{
+	int ret = 0;
+	struct msm_bus_scale_pdata *pdata;
+	struct msm_bus_client *client;
+	const char *test_cl = "Null";
+	bool log_transaction = false;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %d", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	client = handle_list.cl_list[cl];
+	if (!client) {
+		MSM_BUS_ERR("%s: Invalid client pointer ", __func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	pdata = client->pdata;
+	if (!pdata) {
+		MSM_BUS_ERR("%s: Client data Null.[client didn't register]",
+				__func__);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (index >= pdata->num_usecases) {
+		MSM_BUS_ERR("Client %u passed invalid index: %d\n",
+			cl, index);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (client->curr == index) {
+		MSM_BUS_DBG("%s: Not updating client request idx %d unchanged",
+				__func__, index);
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, pdata->name))
+		log_transaction = true;
+
+	MSM_BUS_DBG("%s: cl: %u index: %d curr: %d num_paths: %d\n", __func__,
+		cl, index, client->curr, client->pdata->usecase->num_paths);
+	msm_bus_dbg_client_data(client->pdata, index, cl);
+	ret = update_client_paths(client, log_transaction, index);
+	if (ret) {
+		pr_err("%s: Err updating path\n", __func__);
+		goto exit_update_request;
+	}
+
+//	trace_bus_update_request_end(pdata->name);
+
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void free_cl_mem(struct msm_bus_client_handle *cl)
+{
+	if (cl) {
+		kfree(cl->name);
+		kfree(cl);
+		cl = NULL;
+	}
+}
+
+static int update_bw_adhoc(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	int ret = 0;
+	char *test_cl = "test-client";
+	bool log_transaction = false;
+	u64 slp_ib, slp_ab;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!cl) {
+		MSM_BUS_ERR("%s: Invalid client handle %p", __func__, cl);
+		ret = -ENXIO;
+		goto exit_update_request;
+	}
+
+	if (!strcmp(test_cl, cl->name))
+		log_transaction = true;
+
+	msm_bus_dbg_rec_transaction(cl, ab, ib);
+
+	if ((cl->cur_act_ib == ib) && (cl->cur_act_ab == ab)) {
+		MSM_BUS_DBG("%s:no change in request", cl->name);
+		goto exit_update_request;
+	}
+
+	if (cl->active_only) {
+		slp_ib = 0;
+		slp_ab = 0;
+	} else {
+		slp_ib = ib;
+		slp_ab = ab;
+	}
+
+	ret = update_path(cl->mas_dev, cl->slv, ib, ab, slp_ib, slp_ab,
+		cl->cur_act_ib, cl->cur_act_ab, cl->first_hop, cl->active_only);
+
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_update_request;
+	}
+
+	commit_data();
+	cl->cur_act_ib = ib;
+	cl->cur_act_ab = ab;
+	cl->cur_slp_ib = slp_ib;
+	cl->cur_slp_ab = slp_ab;
+
+	if (log_transaction)
+		getpath_debug(cl->mas, cl->first_hop, cl->active_only);
+//	trace_bus_update_request_end(cl->name);
+exit_update_request:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+
+	return ret;
+}
+
+static int update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab)
+{
+	int ret = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("Invalid client handle %p", cl);
+		ret = -ENXIO;
+		goto exit_change_context;
+	}
+
+	if ((cl->cur_act_ib == act_ib) &&
+		(cl->cur_act_ab == act_ab) &&
+		(cl->cur_slp_ib == slp_ib) &&
+		(cl->cur_slp_ab == slp_ab)) {
+		MSM_BUS_ERR("No change in vote");
+		goto exit_change_context;
+	}
+
+	if (!slp_ab && !slp_ib)
+		cl->active_only = true;
+	msm_bus_dbg_rec_transaction(cl, cl->cur_act_ab, cl->cur_slp_ib);
+	ret = update_path(cl->mas_dev, cl->slv, act_ib, act_ab, slp_ib, slp_ab,
+				cl->cur_act_ab, cl->cur_act_ab,  cl->first_hop,
+				cl->active_only);
+	if (ret) {
+		MSM_BUS_ERR("%s: Update path failed! %d active_only %d\n",
+				__func__, ret, cl->active_only);
+		goto exit_change_context;
+	}
+	commit_data();
+	cl->cur_act_ib = act_ib;
+	cl->cur_act_ab = act_ab;
+	cl->cur_slp_ib = slp_ib;
+	cl->cur_slp_ab = slp_ab;
+//	trace_bus_update_request_end(cl->name);
+exit_change_context:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return ret;
+}
+
+static void unregister_adhoc(struct msm_bus_client_handle *cl)
+{
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+	if (!cl) {
+		MSM_BUS_ERR("%s: Null cl handle passed unregister\n",
+				__func__);
+		goto exit_unregister_client;
+	}
+
+	MSM_BUS_DBG("%s: Unregistering client %p", __func__, cl);
+
+	remove_path(cl->mas_dev, cl->slv, cl->cur_act_ib, cl->cur_act_ab,
+				cl->first_hop, cl->active_only);
+	commit_data();
+	msm_bus_dbg_remove_client(cl);
+	kfree(cl);
+exit_unregister_client:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+}
+
+static struct msm_bus_client_handle*
+register_adhoc(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+	struct msm_bus_client_handle *client = NULL;
+	int len = 0;
+
+	rt_mutex_lock(&msm_bus_adhoc_lock);
+
+	if (!(mas && slv && name)) {
+		pr_err("%s: Error: src dst name num_paths are required",
+								 __func__);
+		goto exit_register;
+	}
+
+	client = kzalloc(sizeof(struct msm_bus_client_handle), GFP_KERNEL);
+	if (!client) {
+		MSM_BUS_ERR("%s: Error allocating client data", __func__);
+		goto exit_register;
+	}
+
+	len = strnlen(name, MAX_STR_CL);
+	client->name = kzalloc((len + 1), GFP_KERNEL);
+	if (!client->name) {
+		MSM_BUS_ERR("%s: Error allocating client name buf", __func__);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+	strlcpy(client->name, name, MAX_STR_CL);
+	client->active_only = active_only;
+
+	client->mas = mas;
+	client->slv = slv;
+
+	client->mas_dev = bus_find_device(&msm_bus_type, NULL,
+					(void *) &mas,
+					msm_bus_device_match_adhoc);
+	if (IS_ERR_OR_NULL(client->mas_dev)) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	client->first_hop = getpath(client->mas_dev, client->slv, client->name);
+	if (client->first_hop < 0) {
+		MSM_BUS_ERR("%s:Failed to find path.src %d dest %d",
+			__func__, client->mas, client->slv);
+		free_cl_mem(client);
+		goto exit_register;
+	}
+
+	MSM_BUS_DBG("%s:Client handle %p %s", __func__, client,
+						client->name);
+	msm_bus_dbg_add_client(client);
+exit_register:
+	rt_mutex_unlock(&msm_bus_adhoc_lock);
+	return client;
+}
+/**
+ *  msm_bus_arb_setops_adhoc() : Setup the bus arbitration ops
+ *  @ arb_ops: pointer to the arb ops.
+ */
+void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops)
+{
+	arb_ops->register_client = register_client_adhoc;
+	arb_ops->update_request = update_request_adhoc;
+	arb_ops->unregister_client = unregister_client_adhoc;
+	arb_ops->update_context = update_context;
+
+	arb_ops->register_cl = register_adhoc;
+	arb_ops->unregister = unregister_adhoc;
+	arb_ops->update_bw = update_bw_adhoc;
+	arb_ops->update_bw_context = update_bw_context;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc.h b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h
new file mode 100644
index 0000000..ca7b112
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc.h
@@ -0,0 +1,127 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+struct msm_bus_bimc_params {
+	uint32_t bus_id;
+	uint32_t addr_width;
+	uint32_t data_width;
+	uint32_t nmasters;
+	uint32_t nslaves;
+};
+
+struct msm_bus_bimc_commit {
+	struct msm_bus_node_hw_info *mas;
+	struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_bimc_info {
+	void __iomem *base;
+	uint32_t base_addr;
+	uint32_t qos_freq;
+	struct msm_bus_bimc_params params;
+	struct msm_bus_bimc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_bimc_node {
+	uint32_t conn_mask;
+	uint32_t data_width;
+	uint8_t slv_arb_mode;
+};
+
+enum msm_bus_bimc_arb_mode {
+	BIMC_ARB_MODE_RR = 0,
+	BIMC_ARB_MODE_PRIORITY_RR,
+	BIMC_ARB_MODE_TIERED_RR,
+};
+
+
+enum msm_bus_bimc_interleave {
+	BIMC_INTERLEAVE_NONE = 0,
+	BIMC_INTERLEAVE_ODD,
+	BIMC_INTERLEAVE_EVEN,
+};
+
+struct msm_bus_bimc_slave_seg {
+	bool enable;
+	uint64_t start_addr;
+	uint64_t seg_size;
+	uint8_t interleave;
+};
+
+enum msm_bus_bimc_qos_mode_type {
+	BIMC_QOS_MODE_FIXED = 0,
+	BIMC_QOS_MODE_LIMITER,
+	BIMC_QOS_MODE_BYPASS,
+	BIMC_QOS_MODE_REGULATOR,
+};
+
+struct msm_bus_bimc_qos_health {
+	bool limit_commands;
+	uint32_t areq_prio;
+	uint32_t prio_level;
+};
+
+struct msm_bus_bimc_mode_fixed {
+	uint32_t prio_level;
+	uint32_t areq_prio_rd;
+	uint32_t areq_prio_wr;
+};
+
+struct msm_bus_bimc_mode_rl {
+	uint8_t qhealthnum;
+	struct msm_bus_bimc_qos_health qhealth[4];
+};
+
+struct msm_bus_bimc_qos_mode {
+	uint8_t mode;
+	struct msm_bus_bimc_mode_fixed fixed;
+	struct msm_bus_bimc_mode_rl rl;
+};
+
+struct msm_bus_bimc_qos_bw {
+	uint64_t bw;	/* bw is in Bytes/sec */
+	uint32_t ws;	/* Window size in nano seconds*/
+	int64_t thh;	/* Threshold high, bytes per second */
+	int64_t thm;	/* Threshold medium, bytes per second */
+	int64_t thl;	/* Threshold low, bytes per second */
+	u32 gp;	/* Grant Period in micro seconds */
+	u32 thmp; /* Threshold medium in percentage */
+};
+
+struct msm_bus_bimc_clk_gate {
+	bool core_clk_gate_en;
+	bool arb_clk_gate_en;	/* For arbiter */
+	bool port_clk_gate_en;	/* For regs on BIMC core clock */
+};
+
+void msm_bus_bimc_set_slave_seg(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, uint32_t seg_index,
+	struct msm_bus_bimc_slave_seg *bsseg);
+void msm_bus_bimc_set_slave_clk_gate(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_set_mas_clk_gate(struct msm_bus_bimc_info *binfo,
+	uint32_t mas_index, struct msm_bus_bimc_clk_gate *bgate);
+void msm_bus_bimc_arb_en(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, bool en);
+void msm_bus_bimc_get_params(struct msm_bus_bimc_info *binfo,
+	struct msm_bus_bimc_params *params);
+void msm_bus_bimc_get_mas_params(struct msm_bus_bimc_info *binfo,
+	uint32_t mas_index, struct msm_bus_bimc_node *mparams);
+void msm_bus_bimc_get_slv_params(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index, struct msm_bus_bimc_node *sparams);
+bool msm_bus_bimc_get_arb_en(struct msm_bus_bimc_info *binfo,
+	uint32_t slv_index);
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_BIMC_H*/
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
new file mode 100644
index 0000000..95f61aa
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_adhoc.c
@@ -0,0 +1,609 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_bimc.h"
+#include "msm_bus_adhoc.h"
+#include <trace/events/trace_msm_bus.h>
+
+/* M_Generic */
+
+enum bke_sw {
+	BKE_OFF = 0,
+	BKE_ON = 1,
+};
+
+#define M_REG_BASE(b)		((b) + 0x00008000)
+
+#define M_MODE_ADDR(b, n) \
+		(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
+enum bimc_m_mode {
+	M_MODE_RMSK				= 0xf0000011,
+	M_MODE_WR_GATHER_BEATS_BMSK		= 0xf0000000,
+	M_MODE_WR_GATHER_BEATS_SHFT		= 0x1c,
+	M_MODE_NARROW_WR_BMSK			= 0x10,
+	M_MODE_NARROW_WR_SHFT			= 0x4,
+	M_MODE_ORDERING_MODEL_BMSK		= 0x1,
+	M_MODE_ORDERING_MODEL_SHFT		= 0x0,
+};
+
+#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+enum bimc_m_priolvl_override {
+	M_PRIOLVL_OVERRIDE_RMSK			= 0x301,
+	M_PRIOLVL_OVERRIDE_BMSK			= 0x300,
+	M_PRIOLVL_OVERRIDE_SHFT			= 0x8,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK	= 0x1,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT	= 0x0,
+};
+
+#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+enum bimc_m_read_command_override {
+	M_RD_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_RD_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_RD_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK		= 0x1000,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT		= 0xc,
+	M_RD_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_RD_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_RD_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_RD_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_RD_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_RD_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK		= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT		= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
+enum bimc_m_write_command_override {
+	M_WR_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_WR_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_WR_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK	= 0x1000,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT	= 0xc,
+	M_WR_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_WR_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_WR_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_WR_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_WR_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_WR_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_BKE_EN_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300)
+enum bimc_m_bke_en {
+	M_BKE_EN_RMSK			= 0x1,
+	M_BKE_EN_EN_BMSK		= 0x1,
+	M_BKE_EN_EN_SHFT		= 0x0,
+};
+
+/* Grant Period registers */
+#define M_BKE_GP_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304)
+enum bimc_m_bke_grant_period {
+	M_BKE_GP_RMSK		= 0x3ff,
+	M_BKE_GP_GP_BMSK	= 0x3ff,
+	M_BKE_GP_GP_SHFT	= 0x0,
+};
+
+/* Grant count register.
+ * The Grant count register represents a signed 16 bit
+ * value, range 0-0x7fff
+ */
+#define M_BKE_GC_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308)
+enum bimc_m_bke_grant_count {
+	M_BKE_GC_RMSK			= 0xffff,
+	M_BKE_GC_GC_BMSK		= 0xffff,
+	M_BKE_GC_GC_SHFT		= 0x0,
+};
+
+/* Threshold High Registers */
+#define M_BKE_THH_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320)
+enum bimc_m_bke_thresh_high {
+	M_BKE_THH_RMSK		= 0xffff,
+	M_BKE_THH_THRESH_BMSK	= 0xffff,
+	M_BKE_THH_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Medium Registers */
+#define M_BKE_THM_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324)
+enum bimc_m_bke_thresh_medium {
+	M_BKE_THM_RMSK		= 0xffff,
+	M_BKE_THM_THRESH_BMSK	= 0xffff,
+	M_BKE_THM_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Low Registers */
+#define M_BKE_THL_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328)
+enum bimc_m_bke_thresh_low {
+	M_BKE_THL_RMSK			= 0xffff,
+	M_BKE_THL_THRESH_BMSK		= 0xffff,
+	M_BKE_THL_THRESH_SHFT		= 0x0,
+};
+
+#define NUM_HEALTH_LEVEL	(4)
+#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
+enum bimc_m_bke_health_0 {
+	M_BKE_HEALTH_0_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
+enum bimc_m_bke_health_1 {
+	M_BKE_HEALTH_1_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
+enum bimc_m_bke_health_2 {
+	M_BKE_HEALTH_2_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
+enum bimc_m_bke_health_3 {
+	M_BKE_HEALTH_3_CONFIG_RMSK			= 0x303,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK	= 0x300,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT	= 0x8,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define BKE_HEALTH_MASK \
+	(M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)
+
+#define BKE_HEALTH_VAL(limit, areq, plvl) \
+	((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \
+	(((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \
+	(((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK))
+
+#define MAX_GRANT_PERIOD \
+	(M_BKE_GP_GP_BMSK >> \
+	M_BKE_GP_GP_SHFT)
+
+#define MAX_GC \
+	(M_BKE_GC_GC_BMSK >> \
+	(M_BKE_GC_GC_SHFT + 1))
+
+static int bimc_div(int64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+static void set_bke_en(void __iomem *addr, uint32_t index,
+		bool req)
+{
+	uint32_t old_val, new_val;
+
+	old_val = readl_relaxed(M_BKE_EN_ADDR(addr, index));
+	new_val = req << M_BKE_EN_EN_SHFT;
+	if ((old_val & M_BKE_EN_RMSK) == (new_val))
+		return;
+	writel_relaxed(((old_val & ~(M_BKE_EN_EN_BMSK)) | (new_val &
+				M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(addr, index));
+	/* Ensure that BKE register is programmed set before returning */
+	wmb();
+}
+
+static void set_health_reg(void __iomem *addr, uint32_t rmsk,
+	uint8_t index, struct msm_bus_bimc_qos_mode *qmode)
+{
+	uint32_t reg_val, val0, val;
+
+	/* Note, addr is already passed with right mas_index */
+	reg_val = readl_relaxed(addr) & rmsk;
+	val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands,
+		qmode->rl.qhealth[index].areq_prio,
+		qmode->rl.qhealth[index].prio_level);
+	val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK));
+	writel_relaxed(val, addr);
+	/*
+	 * Ensure that priority for regulator/limiter modes are
+	 * set before returning
+	 */
+	wmb();
+}
+
+static void msm_bus_bimc_set_qos_prio(void __iomem *base,
+	uint32_t mas_index, uint8_t qmode_sel,
+	struct msm_bus_bimc_qos_mode *qmode)
+{
+
+	switch (qmode_sel) {
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+	case BIMC_QOS_MODE_LIMITER:
+		set_health_reg(M_BKE_HEALTH_3_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode);
+		set_health_reg(M_BKE_HEALTH_2_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode);
+		set_health_reg(M_BKE_HEALTH_1_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode);
+		set_health_reg(M_BKE_HEALTH_0_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0, qmode);
+		set_bke_en(base, mas_index, true);
+		break;
+	case BIMC_QOS_MODE_BYPASS:
+		set_bke_en(base, mas_index, false);
+		break;
+	default:
+		break;
+	}
+}
+
+static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
+	int32_t th, int32_t tm, int32_t tl, uint32_t gp,
+	uint32_t gc)
+{
+	int32_t reg_val, val;
+	int32_t bke_reg_val;
+	int16_t val2;
+
+	/* Disable BKE before writing to registers as per spec */
+	bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
+	writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
+		M_BKE_EN_ADDR(baddr, mas_index));
+
+	/* Write values of registers calculated */
+	reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index))
+		& M_BKE_GP_RMSK;
+	val =  gp << M_BKE_GP_GP_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val &
+		M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) &
+		M_BKE_GC_RMSK;
+	val =  gc << M_BKE_GC_GC_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val &
+		M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) &
+		M_BKE_THH_RMSK;
+	val =  th << M_BKE_THH_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val &
+		M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) &
+		M_BKE_THM_RMSK;
+	val2 =	tm << M_BKE_THM_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 &
+		M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) &
+		M_BKE_THL_RMSK;
+	val2 =	tl << M_BKE_THL_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) |
+		(val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr,
+		mas_index));
+
+	/* Ensure that all bandwidth register writes have completed
+	 * before returning
+	 */
+	wmb();
+}
+
+static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq,
+	int mport, struct msm_bus_bimc_qos_bw *qbw)
+{
+	int32_t bw_mbps, thh = 0, thm, thl, gc;
+	int32_t gp;
+	u64 temp;
+
+	if (qos_freq == 0) {
+		MSM_BUS_DBG("No QoS Frequency.\n");
+		return;
+	}
+
+	if (!(qbw->bw && qbw->gp)) {
+		MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
+		return;
+	}
+
+	/* Convert bandwidth to MBPS */
+	temp = qbw->bw;
+	bimc_div(&temp, 1000000);
+	bw_mbps = temp;
+
+	/* Grant period in clock cycles
+	 * Grant period from bandwidth structure
+	 * is in nano seconds, QoS freq is in KHz.
+	 * Divide by 1000 to get clock cycles.
+	 */
+	gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
+
+	/* Grant count = BW in MBps * Grant period
+	 * in micro seconds
+	 */
+	gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
+	gc = min(gc, MAX_GC);
+
+	/* Medium threshold = -((Medium Threshold percentage *
+	 * Grant count) / 100)
+	 */
+	thm = -((qbw->thmp * gc) / 100);
+	qbw->thm = thm;
+
+	/* Low threshold = -(Grant count) */
+	thl = -gc;
+	qbw->thl = thl;
+
+	MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+			__func__, gp, gc, thm, thl, thh);
+
+	trace_bus_bke_params(gc, gp, thl, thm, thl);
+	set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc);
+}
+
+static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				int enable_lim, u64 lim_bw)
+{
+	int mode;
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to limit\n");
+		return 0;
+	}
+
+	if ((enable_lim == THROTTLE_ON) && lim_bw) {
+		mode =  BIMC_QOS_MODE_LIMITER;
+
+		qmode.rl.qhealth[0].limit_commands = 1;
+		qmode.rl.qhealth[1].limit_commands = 0;
+		qmode.rl.qhealth[2].limit_commands = 0;
+		qmode.rl.qhealth[3].limit_commands = 0;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			struct msm_bus_bimc_qos_bw qbw;
+			/* If not in fixed mode, update bandwidth */
+			if (info->node_info->lim_bw != lim_bw) {
+				qbw.ws = info->node_info->qos_params.ws;
+				qbw.bw = lim_bw;
+				qbw.gp = info->node_info->qos_params.gp;
+				qbw.thmp = info->node_info->qos_params.thmp;
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->node_info->qport[i], &qbw);
+			}
+		}
+		info->node_info->lim_bw = lim_bw;
+	} else {
+		mode = info->node_info->qos_params.mode;
+		if (mode != BIMC_QOS_MODE_BYPASS) {
+			for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+				qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+				qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+			}
+		}
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+				mode, &qmode);
+	return 0;
+}
+
+static bool msm_bus_bimc_update_bw_reg(int mode)
+{
+	bool ret = false;
+
+	if ((mode == BIMC_QOS_MODE_LIMITER)
+		|| (mode == BIMC_QOS_MODE_REGULATOR))
+		ret = true;
+
+	return ret;
+}
+
+static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		return 0;
+	}
+
+	switch (info->node_info->qos_params.mode) {
+		/* For now Fixed and regulator are handled the same way. */
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	case BIMC_QOS_MODE_LIMITER:
+		qmode.rl.qhealth[0].limit_commands = 1;
+		qmode.rl.qhealth[1].limit_commands = 0;
+		qmode.rl.qhealth[2].limit_commands = 0;
+		qmode.rl.qhealth[3].limit_commands = 0;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	default:
+		break;
+	}
+
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+			info->node_info->qos_params.mode, &qmode);
+
+	return 0;
+}
+
+static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq)
+{
+	struct msm_bus_bimc_qos_bw qbw;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+	int i;
+	int64_t bw = 0;
+	int ret = 0;
+	struct msm_bus_node_info_type *info = dev->node_info;
+	int mode;
+
+	if (info && info->num_qports &&
+		((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) {
+		bw = msm_bus_div64(info->num_qports,
+				dev->node_bw[ACTIVE_CTX].sum_ab);
+
+		MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
+				info->id, bw);
+
+		if (!info->qport) {
+			MSM_BUS_DBG("No qos ports to update!\n");
+			goto exit_set_bw;
+		}
+
+		qbw.bw = bw + info->qos_params.bw_buffer;
+		trace_bus_bimc_config_limiter(info->id, bw);
+
+		/* Default to gp of 5us */
+		qbw.gp = (info->qos_params.gp ?
+				info->qos_params.gp : 5000);
+		/* Default to thmp of 50% */
+		qbw.thmp = (info->qos_params.thmp ?
+				info->qos_params.thmp : 50);
+		/*
+		 * If the BW vote is 0 then set the QoS mode to
+		 * Fixed/0/0.
+		 */
+		if (bw) {
+			qmode.rl.qhealth[0].limit_commands = 1;
+			qmode.rl.qhealth[1].limit_commands = 0;
+			qmode.rl.qhealth[2].limit_commands = 0;
+			qmode.rl.qhealth[3].limit_commands = 0;
+			mode = info->qos_params.mode;
+		} else {
+			mode =	BIMC_QOS_MODE_FIXED;
+		}
+
+		for (i = 0; i < info->num_qports; i++) {
+			msm_bus_bimc_set_qos_prio(qos_base,
+				info->qport[i], mode, &qmode);
+			if (bw)
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->qport[i], &qbw);
+		}
+	}
+exit_set_bw:
+	return ret;
+}
+
+int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init;
+	bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw;
+	bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport;
+	bus_dev->fabdev->noc_ops.update_bw_reg =
+					msm_bus_bimc_update_bw_reg;
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_bimc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c
new file mode 100644
index 0000000..c1e8feb
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_bimc_rpmh.c
@@ -0,0 +1,606 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: BIMC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_bimc.h"
+#include "msm_bus_adhoc.h"
+
+/* M_Generic */
+
+enum bke_sw {
+	BKE_OFF = 0,
+	BKE_ON = 1,
+};
+
+#define M_REG_BASE(b)		((b) + 0x00008000)
+
+#define M_MODE_ADDR(b, n) \
+		(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000210)
+enum bimc_m_mode {
+	M_MODE_RMSK				= 0xf0000011,
+	M_MODE_WR_GATHER_BEATS_BMSK		= 0xf0000000,
+	M_MODE_WR_GATHER_BEATS_SHFT		= 0x1c,
+	M_MODE_NARROW_WR_BMSK			= 0x10,
+	M_MODE_NARROW_WR_SHFT			= 0x4,
+	M_MODE_ORDERING_MODEL_BMSK		= 0x1,
+	M_MODE_ORDERING_MODEL_SHFT		= 0x0,
+};
+
+#define M_PRIOLVL_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000230)
+enum bimc_m_priolvl_override {
+	M_PRIOLVL_OVERRIDE_RMSK			= 0x301,
+	M_PRIOLVL_OVERRIDE_BMSK			= 0x300,
+	M_PRIOLVL_OVERRIDE_SHFT			= 0x8,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_BMSK	= 0x1,
+	M_PRIOLVL_OVERRIDE_OVERRIDE_PRIOLVL_SHFT	= 0x0,
+};
+
+#define M_RD_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000240)
+enum bimc_m_read_command_override {
+	M_RD_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_RD_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_RD_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_RD_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_BMSK		= 0x1000,
+	M_RD_CMD_OVERRIDE_ATRANSIENT_SHFT		= 0xc,
+	M_RD_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_RD_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_RD_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_RD_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_RD_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_RD_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_RD_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_RD_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK		= 0x2,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT		= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_RD_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_WR_CMD_OVERRIDE_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000250)
+enum bimc_m_write_command_override {
+	M_WR_CMD_OVERRIDE_RMSK			= 0x3071f7f,
+	M_WR_CMD_OVERRIDE_AREQPRIO_BMSK		= 0x3000000,
+	M_WR_CMD_OVERRIDE_AREQPRIO_SHFT		= 0x18,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_BMSK		= 0x70000,
+	M_WR_CMD_OVERRIDE_AMEMTYPE_SHFT		= 0x10,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_BMSK	= 0x1000,
+	M_WR_CMD_OVERRIDE_ATRANSIENT_SHFT	= 0xc,
+	M_WR_CMD_OVERRIDE_ASHARED_BMSK		= 0x800,
+	M_WR_CMD_OVERRIDE_ASHARED_SHFT		= 0xb,
+	M_WR_CMD_OVERRIDE_AREDIRECT_BMSK		= 0x400,
+	M_WR_CMD_OVERRIDE_AREDIRECT_SHFT		= 0xa,
+	M_WR_CMD_OVERRIDE_AOOO_BMSK			= 0x200,
+	M_WR_CMD_OVERRIDE_AOOO_SHFT			= 0x9,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_BMSK		= 0x100,
+	M_WR_CMD_OVERRIDE_AINNERSHARED_SHFT		= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_BMSK	= 0x40,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREQPRIO_SHFT	= 0x6,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_BMSK	= 0x20,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ATRANSIENT_SHFT	= 0x5,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_BMSK	= 0x10,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AMEMTYPE_SHFT	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_BMSK	= 0x8,
+	M_WR_CMD_OVERRIDE_OVERRIDE_ASHARED_SHFT	= 0x3,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_BMSK	= 0x4,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AREDIRECT_SHFT	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_BMSK	= 0x2,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AOOO_SHFT	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_BMSK	= 0x1,
+	M_WR_CMD_OVERRIDE_OVERRIDE_AINNERSHARED_SHFT	= 0x0,
+};
+
+#define M_BKE_EN_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000300)
+enum bimc_m_bke_en {
+	M_BKE_EN_RMSK			= 0x1,
+	M_BKE_EN_EN_BMSK		= 0x1,
+	M_BKE_EN_EN_SHFT		= 0x0,
+};
+
+/* Grant Period registers */
+#define M_BKE_GP_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000304)
+enum bimc_m_bke_grant_period {
+	M_BKE_GP_RMSK		= 0x3ff,
+	M_BKE_GP_GP_BMSK	= 0x3ff,
+	M_BKE_GP_GP_SHFT	= 0x0,
+};
+
+/* Grant count register.
+ * The Grant count register represents a signed 16 bit
+ * value, range 0-0x7fff
+ */
+#define M_BKE_GC_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000308)
+enum bimc_m_bke_grant_count {
+	M_BKE_GC_RMSK			= 0xffff,
+	M_BKE_GC_GC_BMSK		= 0xffff,
+	M_BKE_GC_GC_SHFT		= 0x0,
+};
+
+/* Threshold High Registers */
+#define M_BKE_THH_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000320)
+enum bimc_m_bke_thresh_high {
+	M_BKE_THH_RMSK		= 0xffff,
+	M_BKE_THH_THRESH_BMSK	= 0xffff,
+	M_BKE_THH_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Medium Registers */
+#define M_BKE_THM_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000324)
+enum bimc_m_bke_thresh_medium {
+	M_BKE_THM_RMSK		= 0xffff,
+	M_BKE_THM_THRESH_BMSK	= 0xffff,
+	M_BKE_THM_THRESH_SHFT	= 0x0,
+};
+
+/* Threshold Low Registers */
+#define M_BKE_THL_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000328)
+enum bimc_m_bke_thresh_low {
+	M_BKE_THL_RMSK			= 0xffff,
+	M_BKE_THL_THRESH_BMSK		= 0xffff,
+	M_BKE_THL_THRESH_SHFT		= 0x0,
+};
+
+#define NUM_HEALTH_LEVEL	(4)
+#define M_BKE_HEALTH_0_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000340)
+enum bimc_m_bke_health_0 {
+	M_BKE_HEALTH_0_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_1_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000344)
+enum bimc_m_bke_health_1 {
+	M_BKE_HEALTH_1_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_1_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_1_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_1_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_2_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x00000348)
+enum bimc_m_bke_health_2 {
+	M_BKE_HEALTH_2_CONFIG_RMSK			= 0x80000303,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_BMSK		= 0x80000000,
+	M_BKE_HEALTH_2_CONFIG_LIMIT_CMDS_SHFT		= 0x1f,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_BMSK		= 0x300,
+	M_BKE_HEALTH_2_CONFIG_AREQPRIO_SHFT		= 0x8,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_2_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define M_BKE_HEALTH_3_CONFIG_ADDR(b, n) \
+	(M_REG_BASE(b) + (0x4000 * (n)) + 0x0000034c)
+enum bimc_m_bke_health_3 {
+	M_BKE_HEALTH_3_CONFIG_RMSK			= 0x303,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_BMSK	= 0x300,
+	M_BKE_HEALTH_3_CONFIG_AREQPRIO_SHFT	= 0x8,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_BMSK		= 0x3,
+	M_BKE_HEALTH_3_CONFIG_PRIOLVL_SHFT		= 0x0,
+};
+
+#define BKE_HEALTH_MASK \
+	(M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK |\
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK)
+
+#define BKE_HEALTH_VAL(limit, areq, plvl) \
+	((((limit) << M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_LIMIT_CMDS_BMSK) | \
+	(((areq) << M_BKE_HEALTH_0_CONFIG_AREQPRIO_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_AREQPRIO_BMSK) | \
+	(((plvl) << M_BKE_HEALTH_0_CONFIG_PRIOLVL_SHFT) & \
+	M_BKE_HEALTH_0_CONFIG_PRIOLVL_BMSK))
+
+#define MAX_GRANT_PERIOD \
+	(M_BKE_GP_GP_BMSK >> \
+	M_BKE_GP_GP_SHFT)
+
+#define MAX_GC \
+	(M_BKE_GC_GC_BMSK >> \
+	(M_BKE_GC_GC_SHFT + 1))
+
+static int bimc_div(int64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+static void set_bke_en(void __iomem *addr, uint32_t index,
+		bool req)
+{
+	uint32_t old_val, new_val;
+
+	old_val = readl_relaxed(M_BKE_EN_ADDR(addr, index));
+	new_val = req << M_BKE_EN_EN_SHFT;
+	if ((old_val & M_BKE_EN_RMSK) == (new_val))
+		return;
+	writel_relaxed(((old_val & ~(M_BKE_EN_EN_BMSK)) | (new_val &
+				M_BKE_EN_EN_BMSK)), M_BKE_EN_ADDR(addr, index));
+	/* Ensure that BKE register is programmed set before returning */
+	wmb();
+}
+
+static void set_health_reg(void __iomem *addr, uint32_t rmsk,
+	uint8_t index, struct msm_bus_bimc_qos_mode *qmode)
+{
+	uint32_t reg_val, val0, val;
+
+	/* Note, addr is already passed with right mas_index */
+	reg_val = readl_relaxed(addr) & rmsk;
+	val0 = BKE_HEALTH_VAL(qmode->rl.qhealth[index].limit_commands,
+		qmode->rl.qhealth[index].areq_prio,
+		qmode->rl.qhealth[index].prio_level);
+	val = ((reg_val & (~(BKE_HEALTH_MASK))) | (val0 & BKE_HEALTH_MASK));
+	writel_relaxed(val, addr);
+	/*
+	 * Ensure that priority for regulator/limiter modes are
+	 * set before returning
+	 */
+	wmb();
+}
+
+static void msm_bus_bimc_set_qos_prio(void __iomem *base,
+	uint32_t mas_index, uint8_t qmode_sel,
+	struct msm_bus_bimc_qos_mode *qmode)
+{
+
+	switch (qmode_sel) {
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+	case BIMC_QOS_MODE_LIMITER:
+		set_health_reg(M_BKE_HEALTH_3_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_3_CONFIG_RMSK, 3, qmode);
+		set_health_reg(M_BKE_HEALTH_2_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_2_CONFIG_RMSK, 2, qmode);
+		set_health_reg(M_BKE_HEALTH_1_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_1_CONFIG_RMSK, 1, qmode);
+		set_health_reg(M_BKE_HEALTH_0_CONFIG_ADDR(base,
+			mas_index), M_BKE_HEALTH_0_CONFIG_RMSK, 0, qmode);
+		set_bke_en(base, mas_index, true);
+		break;
+	case BIMC_QOS_MODE_BYPASS:
+		set_bke_en(base, mas_index, false);
+		break;
+	default:
+		break;
+	}
+}
+
+static void set_qos_bw_regs(void __iomem *baddr, uint32_t mas_index,
+	int32_t th, int32_t tm, int32_t tl, uint32_t gp,
+	uint32_t gc)
+{
+	int32_t reg_val, val;
+	int32_t bke_reg_val;
+	int16_t val2;
+
+	/* Disable BKE before writing to registers as per spec */
+	bke_reg_val = readl_relaxed(M_BKE_EN_ADDR(baddr, mas_index));
+	writel_relaxed((bke_reg_val & ~(M_BKE_EN_EN_BMSK)),
+		M_BKE_EN_ADDR(baddr, mas_index));
+
+	/* Write values of registers calculated */
+	reg_val = readl_relaxed(M_BKE_GP_ADDR(baddr, mas_index))
+		& M_BKE_GP_RMSK;
+	val =  gp << M_BKE_GP_GP_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GP_GP_BMSK)) | (val &
+		M_BKE_GP_GP_BMSK)), M_BKE_GP_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_GC_ADDR(baddr, mas_index)) &
+		M_BKE_GC_RMSK;
+	val =  gc << M_BKE_GC_GC_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_GC_GC_BMSK)) | (val &
+		M_BKE_GC_GC_BMSK)), M_BKE_GC_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THH_ADDR(baddr, mas_index)) &
+		M_BKE_THH_RMSK;
+	val =  th << M_BKE_THH_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THH_THRESH_BMSK)) | (val &
+		M_BKE_THH_THRESH_BMSK)), M_BKE_THH_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THM_ADDR(baddr, mas_index)) &
+		M_BKE_THM_RMSK;
+	val2 =	tm << M_BKE_THM_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THM_THRESH_BMSK)) | (val2 &
+		M_BKE_THM_THRESH_BMSK)), M_BKE_THM_ADDR(baddr, mas_index));
+
+	reg_val = readl_relaxed(M_BKE_THL_ADDR(baddr, mas_index)) &
+		M_BKE_THL_RMSK;
+	val2 =	tl << M_BKE_THL_THRESH_SHFT;
+	writel_relaxed(((reg_val & ~(M_BKE_THL_THRESH_BMSK)) |
+		(val2 & M_BKE_THL_THRESH_BMSK)), M_BKE_THL_ADDR(baddr,
+		mas_index));
+
+	/* Ensure that all bandwidth register writes have completed
+	 * before returning
+	 */
+	wmb();
+}
+
+static void bimc_set_static_qos_bw(void __iomem *base, unsigned int qos_freq,
+	int mport, struct msm_bus_bimc_qos_bw *qbw)
+{
+	int32_t bw_mbps, thh = 0, thm, thl, gc;
+	int32_t gp;
+	u64 temp;
+
+	if (qos_freq == 0) {
+		MSM_BUS_DBG("No QoS Frequency.\n");
+		return;
+	}
+
+	if (!(qbw->bw && qbw->gp)) {
+		MSM_BUS_DBG("No QoS Bandwidth or Window size\n");
+		return;
+	}
+
+	/* Convert bandwidth to MBPS */
+	temp = qbw->bw;
+	bimc_div(&temp, 1000000);
+	bw_mbps = temp;
+
+	/* Grant period in clock cycles
+	 * Grant period from bandwidth structure
+	 * is in nano seconds, QoS freq is in KHz.
+	 * Divide by 1000 to get clock cycles.
+	 */
+	gp = (qos_freq * qbw->gp) / (1000 * NSEC_PER_USEC);
+
+	/* Grant count = BW in MBps * Grant period
+	 * in micro seconds
+	 */
+	gc = bw_mbps * (qbw->gp / NSEC_PER_USEC);
+	gc = min(gc, MAX_GC);
+
+	/* Medium threshold = -((Medium Threshold percentage *
+	 * Grant count) / 100)
+	 */
+	thm = -((qbw->thmp * gc) / 100);
+	qbw->thm = thm;
+
+	/* Low threshold = -(Grant count) */
+	thl = -gc;
+	qbw->thl = thl;
+
+	MSM_BUS_DBG("%s: BKE parameters: gp %d, gc %d, thm %d thl %d thh %d",
+			__func__, gp, gc, thm, thl, thh);
+
+	set_qos_bw_regs(base, mport, thh, thm, thl, gp, gc);
+}
+
+static int msm_bus_bimc_limit_mport(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				int enable_lim, u64 lim_bw)
+{
+	int mode;
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to limit\n");
+		return 0;
+	}
+
+	if ((enable_lim == THROTTLE_ON) && lim_bw) {
+		mode =  BIMC_QOS_MODE_LIMITER;
+
+		qmode.rl.qhealth[0].limit_commands = 1;
+		qmode.rl.qhealth[1].limit_commands = 0;
+		qmode.rl.qhealth[2].limit_commands = 0;
+		qmode.rl.qhealth[3].limit_commands = 0;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			struct msm_bus_bimc_qos_bw qbw;
+			/* If not in fixed mode, update bandwidth */
+			if (info->node_info->lim_bw != lim_bw) {
+				qbw.ws = info->node_info->qos_params.ws;
+				qbw.bw = lim_bw;
+				qbw.gp = info->node_info->qos_params.gp;
+				qbw.thmp = info->node_info->qos_params.thmp;
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->node_info->qport[i], &qbw);
+			}
+		}
+		info->node_info->lim_bw = lim_bw;
+	} else {
+		mode = info->node_info->qos_params.mode;
+		if (mode != BIMC_QOS_MODE_BYPASS) {
+			for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+				qmode.rl.qhealth[i].prio_level =
+					info->node_info->qos_params.prio_lvl;
+				qmode.rl.qhealth[i].areq_prio =
+					info->node_info->qos_params.prio_rd;
+			}
+		}
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+				mode, &qmode);
+	return 0;
+}
+
+static bool msm_bus_bimc_update_bw_reg(int mode)
+{
+	bool ret = false;
+
+	if ((mode == BIMC_QOS_MODE_LIMITER)
+		|| (mode == BIMC_QOS_MODE_REGULATOR))
+		ret = true;
+
+	return ret;
+}
+
+static int msm_bus_bimc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	int i;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+
+	if (ZERO_OR_NULL_PTR(info->node_info->qport)) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		return 0;
+	}
+
+	switch (info->node_info->qos_params.mode) {
+		/* For now Fixed and regulator are handled the same way. */
+	case BIMC_QOS_MODE_FIXED:
+	case BIMC_QOS_MODE_REGULATOR:
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	case BIMC_QOS_MODE_LIMITER:
+		qmode.rl.qhealth[0].limit_commands = 1;
+		qmode.rl.qhealth[1].limit_commands = 0;
+		qmode.rl.qhealth[2].limit_commands = 0;
+		qmode.rl.qhealth[3].limit_commands = 0;
+		for (i = 0; i < NUM_HEALTH_LEVEL; i++) {
+			qmode.rl.qhealth[i].prio_level =
+				info->node_info->qos_params.prio_lvl;
+			qmode.rl.qhealth[i].areq_prio =
+				info->node_info->qos_params.prio_rd;
+		}
+		break;
+	default:
+		break;
+	}
+
+
+	for (i = 0; i < info->node_info->num_qports; i++)
+		msm_bus_bimc_set_qos_prio(qos_base, info->node_info->qport[i],
+			info->node_info->qos_params.mode, &qmode);
+
+	return 0;
+}
+
+static int msm_bus_bimc_set_bw(struct msm_bus_node_device_type *dev,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq)
+{
+	struct msm_bus_bimc_qos_bw qbw;
+	struct msm_bus_bimc_qos_mode qmode = {0};
+	int i;
+	int64_t bw = 0;
+	int ret = 0;
+	struct msm_bus_node_info_type *info = dev->node_info;
+	int mode;
+
+	if (info && info->num_qports &&
+		((info->qos_params.mode == BIMC_QOS_MODE_LIMITER))) {
+		bw = msm_bus_div64(info->num_qports,
+				dev->node_bw[ACTIVE_CTX].sum_ab);
+
+		MSM_BUS_DBG("BIMC: Update mas_bw for ID: %d -> %llu\n",
+				info->id, bw);
+
+		if (!info->qport) {
+			MSM_BUS_DBG("No qos ports to update!\n");
+			goto exit_set_bw;
+		}
+
+		qbw.bw = bw + info->qos_params.bw_buffer;
+
+		/* Default to gp of 5us */
+		qbw.gp = (info->qos_params.gp ?
+				info->qos_params.gp : 5000);
+		/* Default to thmp of 50% */
+		qbw.thmp = (info->qos_params.thmp ?
+				info->qos_params.thmp : 50);
+		/*
+		 * If the BW vote is 0 then set the QoS mode to
+		 * Fixed/0/0.
+		 */
+		if (bw) {
+			qmode.rl.qhealth[0].limit_commands = 1;
+			qmode.rl.qhealth[1].limit_commands = 0;
+			qmode.rl.qhealth[2].limit_commands = 0;
+			qmode.rl.qhealth[3].limit_commands = 0;
+			mode = info->qos_params.mode;
+		} else {
+			mode =	BIMC_QOS_MODE_FIXED;
+		}
+
+		for (i = 0; i < info->num_qports; i++) {
+			msm_bus_bimc_set_qos_prio(qos_base,
+				info->qport[i], mode, &qmode);
+			if (bw)
+				bimc_set_static_qos_bw(qos_base, qos_freq,
+					info->qport[i], &qbw);
+		}
+	}
+exit_set_bw:
+	return ret;
+}
+
+int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_bimc_qos_init;
+	bus_dev->fabdev->noc_ops.set_bw = msm_bus_bimc_set_bw;
+	bus_dev->fabdev->noc_ops.limit_mport = msm_bus_bimc_limit_mport;
+	bus_dev->fabdev->noc_ops.update_bw_reg =
+					msm_bus_bimc_update_bw_reg;
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_bimc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_client_api.c b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c
new file mode 100644
index 0000000..0cbc417
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_client_api.c
@@ -0,0 +1,175 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_arb_ops arb_ops;
+
+/**
+ * msm_bus_scale_register_client() - Register the clients with the msm bus
+ * driver
+ * @pdata: Platform data of the client, containing src, dest, ab, ib.
+ * Return non-zero value in case of success, 0 in case of failure.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+	if (arb_ops.register_client)
+		return arb_ops.register_client(pdata);
+	pr_err("%s: Bus driver not ready.",
+			__func__);
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_scale_register_client);
+
+/**
+ * msm_bus_scale_client_update_request() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ * index: Index into the vector, to which the bw and clock values need to be
+ * updated
+ */
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+	if (arb_ops.update_request)
+		return arb_ops.update_request(cl, index);
+	pr_err("%s: Bus driver not ready.",
+			__func__);
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_client_update_request);
+
+/**
+ * msm_bus_scale_client_update_context() - Update the context for a client
+ * cl: Handle to the client
+ * active_only: Bool to indicate dual context or active-only context.
+ * ctx_idx: Voting index to be used when switching contexts.
+ */
+int msm_bus_scale_client_update_context(uint32_t cl, bool active_only,
+							unsigned int ctx_idx)
+{
+	if (arb_ops.update_context)
+		return arb_ops.update_context(cl, active_only, ctx_idx);
+
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_client_update_context);
+
+/**
+ * msm_bus_scale_unregister_client() - Unregister the client from the bus driver
+ * @cl: Handle to the client
+ */
+void msm_bus_scale_unregister_client(uint32_t cl)
+{
+	if (arb_ops.unregister_client)
+		arb_ops.unregister_client(cl);
+	else {
+		pr_err("%s: Bus driver not ready.",
+				__func__);
+	}
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister_client);
+
+/**
+ * msm_bus_scale_register() - Register the clients with the msm bus
+ * driver
+ *
+ * @mas: Master ID
+ * @slv: Slave ID
+ * @name: descriptive name for this client
+ * @active_only: Whether or not this bandwidth vote should only be
+ *               effective while the application processor is active.
+ *
+ * Client data contains the vectors specifying arbitrated bandwidth (ab)
+ * and instantaneous bandwidth (ib) requested between a particular
+ * src and dest.
+ */
+struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name, bool active_only)
+{
+	if (arb_ops.register_cl)
+		return arb_ops.register_cl(mas, slv, name, active_only);
+	pr_err("%s: Bus driver not ready.",
+			__func__);
+	return ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(msm_bus_scale_register);
+
+/**
+ * msm_bus_scale_client_update_bw() - Update the request for bandwidth
+ * from a particular client
+ *
+ * @cl: Handle to the client
+ * @ab: Arbitrated bandwidth being requested
+ * @ib: Instantaneous bandwidth being requested
+ */
+int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	if (arb_ops.update_request)
+		return arb_ops.update_bw(cl, ab, ib);
+	pr_err("%s: Bus driver not ready.", __func__);
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_update_bw);
+
+/**
+ * msm_bus_scale_change_context() - Update the context for a particular client
+ * cl: Handle to the client
+ * act_ab: The average bandwidth(AB) in Bytes/s to be used in active context.
+ * act_ib: The instantaneous bandwidth(IB) in Bytes/s to be used in active
+ *         context.
+ * slp_ib: The average bandwidth(AB) in Bytes/s to be used in dual context.
+ * slp_ab: The instantaneous bandwidth(IB) in Bytes/s to be used in dual
+ *         context.
+ */
+int
+msm_bus_scale_update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab)
+{
+	if (arb_ops.update_context)
+		return arb_ops.update_bw_context(cl, act_ab, act_ib,
+							slp_ab, slp_ib);
+
+	return -EPROBE_DEFER;
+}
+EXPORT_SYMBOL(msm_bus_scale_update_bw_context);
+
+/**
+ * msm_bus_scale_unregister() - Update the request for bandwidth
+ * from a particular client
+ *
+ * cl: Handle to the client
+ */
+void msm_bus_scale_unregister(struct msm_bus_client_handle *cl)
+{
+	if (arb_ops.unregister)
+		arb_ops.unregister(cl);
+	else
+		pr_err("%s: Bus driver not ready.",
+				__func__);
+}
+EXPORT_SYMBOL(msm_bus_scale_unregister);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.c b/drivers/soc/qcom/msm_bus/msm_bus_core.c
new file mode 100644
index 0000000..0a9d4d3
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.c
@@ -0,0 +1,127 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/radix-tree.h>
+#include <linux/clk.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include "msm_bus_core.h"
+
+static atomic_t num_fab = ATOMIC_INIT(0);
+
+int msm_bus_get_num_fab(void)
+{
+	return atomic_read(&num_fab);
+}
+
+int msm_bus_device_match(struct device *dev, void *id)
+{
+	struct msm_bus_fabric_device *fabdev = to_msm_bus_fabric_device(dev);
+
+	if (!fabdev) {
+		MSM_BUS_WARN("Fabric %p returning 0\n", fabdev);
+		return 0;
+	}
+	return fabdev->id == *(int *)id;
+}
+
+static void msm_bus_release(struct device *device)
+{
+}
+
+struct bus_type msm_bus_type = {
+	.name      = "msm-bus-type",
+};
+EXPORT_SYMBOL(msm_bus_type);
+
+/**
+ * msm_bus_get_fabric_device() - This function is used to search for
+ * the fabric device on the bus
+ * @fabid: Fabric id
+ * Function returns: Pointer to the fabric device
+ */
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid)
+{
+	struct device *dev;
+	struct msm_bus_fabric_device *fabric;
+
+	dev = bus_find_device(&msm_bus_type, NULL, (void *)&fabid,
+		msm_bus_device_match);
+	if (!dev)
+		return NULL;
+	fabric = to_msm_bus_fabric_device(dev);
+	return fabric;
+}
+
+/**
+ * msm_bus_fabric_device_register() - Registers a fabric on msm bus
+ * @fabdev: Fabric device to be registered
+ */
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabdev)
+{
+	int ret = 0;
+
+	fabdev->dev.bus = &msm_bus_type;
+	fabdev->dev.release = msm_bus_release;
+	ret = dev_set_name(&fabdev->dev, fabdev->name);
+	if (ret) {
+		MSM_BUS_ERR("error setting dev name\n");
+		goto err;
+	}
+
+	ret = device_register(&fabdev->dev);
+	if (ret < 0) {
+		MSM_BUS_ERR("error registering device%d %s\n",
+				ret, fabdev->name);
+		goto err;
+	}
+	atomic_inc(&num_fab);
+err:
+	return ret;
+}
+
+/**
+ * msm_bus_fabric_device_unregister() - Unregisters the fabric
+ * devices from the msm bus
+ */
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabdev)
+{
+	device_unregister(&fabdev->dev);
+	atomic_dec(&num_fab);
+}
+
+static void __exit msm_bus_exit(void)
+{
+	bus_unregister(&msm_bus_type);
+}
+
+static int __init msm_bus_init(void)
+{
+	int retval = 0;
+
+	retval = bus_register(&msm_bus_type);
+	if (retval)
+		MSM_BUS_ERR("bus_register error! %d\n",
+			retval);
+	return retval;
+}
+postcore_initcall(msm_bus_init);
+module_exit(msm_bus_exit);
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:msm_bus");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_core.h b/drivers/soc/qcom/msm_bus/msm_bus_core.h
new file mode 100644
index 0000000..24881fd
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_core.h
@@ -0,0 +1,416 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_CORE_H
+#define _ARCH_ARM_MACH_MSM_BUS_CORE_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/radix-tree.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+
+#define MSM_BUS_DBG(msg, ...) \
+	pr_debug(msg, ## __VA_ARGS__)
+#define MSM_BUS_ERR(msg, ...) \
+	pr_err(msg, ## __VA_ARGS__)
+#define MSM_BUS_WARN(msg, ...) \
+	pr_warn(msg, ## __VA_ARGS__)
+#define MSM_FAB_ERR(msg, ...) \
+	dev_err(&fabric->fabdev.dev, msg, ## __VA_ARGS__)
+
+#define IS_MASTER_VALID(mas) \
+	(((mas >= MSM_BUS_MASTER_FIRST) && (mas <= MSM_BUS_MASTER_LAST)) \
+	 ? 1 : 0)
+#define IS_SLAVE_VALID(slv) \
+	(((slv >= MSM_BUS_SLAVE_FIRST) && (slv <= MSM_BUS_SLAVE_LAST)) ? 1 : 0)
+
+#define INTERLEAVED_BW(fab_pdata, bw, ports) \
+	((fab_pdata->il_flag) ? ((bw < 0) \
+	? -msm_bus_div64((ports), (-bw)) : msm_bus_div64((ports), (bw))) : (bw))
+#define INTERLEAVED_VAL(fab_pdata, n) \
+	((fab_pdata->il_flag) ? (n) : 1)
+#define KBTOB(a) (a * 1000ULL)
+#define MAX_REG_NAME	(50)
+
+enum msm_bus_dbg_op_type {
+	MSM_BUS_DBG_UNREGISTER = -2,
+	MSM_BUS_DBG_REGISTER,
+	MSM_BUS_DBG_OP = 1,
+};
+
+enum msm_bus_hw_sel {
+	MSM_BUS_RPM = 0,
+	MSM_BUS_NOC,
+	MSM_BUS_BIMC,
+};
+
+struct msm_bus_arb_ops {
+	uint32_t (*register_client)(struct msm_bus_scale_pdata *pdata);
+	int (*update_request)(uint32_t cl, unsigned int index);
+	int (*update_context)(uint32_t cl, bool active_only,
+						unsigned int ctx_idx);
+	void (*unregister_client)(uint32_t cl);
+	struct msm_bus_client_handle*
+		(*register_cl)(uint32_t mas, uint32_t slv, char *name,
+						bool active_only);
+	int (*update_bw)(struct msm_bus_client_handle *cl, u64 ab, u64 ib);
+	void (*unregister)(struct msm_bus_client_handle *cl);
+	int (*update_bw_context)(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab);
+};
+
+enum {
+	SLAVE_NODE,
+	MASTER_NODE,
+	CLK_NODE,
+	NR_LIM_NODE,
+};
+
+
+extern struct bus_type msm_bus_type;
+extern struct msm_bus_arb_ops arb_ops;
+extern void msm_bus_arb_setops_legacy(struct msm_bus_arb_ops *arb_ops);
+
+struct msm_bus_node_info {
+	unsigned int id;
+	unsigned int priv_id;
+	unsigned int mas_hw_id;
+	unsigned int slv_hw_id;
+	int gateway;
+	int *masterp;
+	int *qport;
+	int num_mports;
+	int *slavep;
+	int num_sports;
+	int *tier;
+	int num_tiers;
+	int ahb;
+	int hw_sel;
+	const char *slaveclk[NUM_CTX];
+	const char *memclk[NUM_CTX];
+	const char *iface_clk_node;
+	unsigned int buswidth;
+	unsigned int ws;
+	unsigned int mode;
+	unsigned int perm_mode;
+	unsigned int prio_lvl;
+	unsigned int prio_rd;
+	unsigned int prio_wr;
+	unsigned int prio1;
+	unsigned int prio0;
+	unsigned int num_thresh;
+	u64 *th;
+	u64 cur_lim_bw;
+	unsigned int mode_thresh;
+	bool dual_conf;
+	u64 *bimc_bw;
+	bool nr_lim;
+	u32 ff;
+	bool rt_mas;
+	u32 bimc_gp;
+	u32 bimc_thmp;
+	u64 floor_bw;
+	const char *name;
+};
+
+struct path_node {
+	uint64_t clk[NUM_CTX];
+	uint64_t bw[NUM_CTX];
+	uint64_t *sel_clk;
+	uint64_t *sel_bw;
+	int next;
+};
+
+struct msm_bus_link_info {
+	uint64_t clk[NUM_CTX];
+	uint64_t *sel_clk;
+	uint64_t memclk;
+	int64_t bw[NUM_CTX];
+	int64_t *sel_bw;
+	int *tier;
+	int num_tiers;
+};
+
+struct nodeclk {
+	struct clk *clk;
+	struct regulator *reg;
+	uint64_t rate;
+	bool dirty;
+	bool enable_only_clk;
+	bool setrate_only_clk;
+	bool enable;
+	char reg_name[MAX_REG_NAME];
+};
+
+struct msm_bus_inode_info {
+	struct msm_bus_node_info *node_info;
+	uint64_t max_bw;
+	uint64_t max_clk;
+	uint64_t cur_lim_bw;
+	uint64_t cur_prg_bw;
+	struct msm_bus_link_info link_info;
+	int num_pnodes;
+	struct path_node *pnode;
+	int commit_index;
+	struct nodeclk nodeclk[NUM_CTX];
+	struct nodeclk memclk[NUM_CTX];
+	struct nodeclk iface_clk;
+	void *hw_data;
+};
+
+struct msm_bus_node_hw_info {
+	bool dirty;
+	unsigned int hw_id;
+	uint64_t bw;
+};
+
+struct msm_bus_hw_algorithm {
+	int (*allocate_commit_data)(struct msm_bus_fabric_registration
+		*fab_pdata, void **cdata, int ctx);
+	void *(*allocate_hw_data)(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *fab_pdata);
+	void (*node_init)(void *hw_data, struct msm_bus_inode_info *info);
+	void (*free_commit_data)(void *cdata);
+	void (*update_bw)(struct msm_bus_inode_info *hop,
+		struct msm_bus_inode_info *info,
+		struct msm_bus_fabric_registration *fab_pdata,
+		void *sel_cdata, int *master_tiers,
+		int64_t add_bw);
+	void (*fill_cdata_buffer)(int *curr, char *buf, const int max_size,
+		void *cdata, int nmasters, int nslaves, int ntslaves);
+	int (*commit)(struct msm_bus_fabric_registration
+		*fab_pdata, void *hw_data, void **cdata);
+	int (*port_unhalt)(uint32_t haltid, uint8_t mport);
+	int (*port_halt)(uint32_t haltid, uint8_t mport);
+	void (*config_master)(struct msm_bus_fabric_registration *fab_pdata,
+		struct msm_bus_inode_info *info,
+		uint64_t req_clk, uint64_t req_bw);
+	void (*config_limiter)(struct msm_bus_fabric_registration *fab_pdata,
+		struct msm_bus_inode_info *info);
+	bool (*update_bw_reg)(int mode);
+};
+
+struct msm_bus_fabric_device {
+	int id;
+	const char *name;
+	struct device dev;
+	const struct msm_bus_fab_algorithm *algo;
+	const struct msm_bus_board_algorithm *board_algo;
+	struct msm_bus_hw_algorithm hw_algo;
+	int visited;
+	int num_nr_lim;
+	u64 nr_lim_thresh;
+	u32 eff_fact;
+};
+#define to_msm_bus_fabric_device(d) container_of(d, \
+		struct msm_bus_fabric_device, d)
+
+struct msm_bus_fabric {
+	struct msm_bus_fabric_device fabdev;
+	int ahb;
+	void *cdata[NUM_CTX];
+	bool arb_dirty;
+	bool clk_dirty;
+	struct radix_tree_root fab_tree;
+	int num_nodes;
+	struct list_head gateways;
+	struct msm_bus_inode_info info;
+	struct msm_bus_fabric_registration *pdata;
+	void *hw_data;
+};
+#define to_msm_bus_fabric(d) container_of(d, \
+	struct msm_bus_fabric, d)
+
+
+struct msm_bus_fab_algorithm {
+	int (*update_clks)(struct msm_bus_fabric_device *fabdev,
+		struct msm_bus_inode_info *pme, int index,
+		uint64_t curr_clk, uint64_t req_clk,
+		uint64_t bwsum, int flag, int ctx,
+		unsigned int cl_active_flag);
+	int (*port_halt)(struct msm_bus_fabric_device *fabdev, int portid);
+	int (*port_unhalt)(struct msm_bus_fabric_device *fabdev, int portid);
+	int (*commit)(struct msm_bus_fabric_device *fabdev);
+	struct msm_bus_inode_info *(*find_node)(struct msm_bus_fabric_device
+		*fabdev, int id);
+	struct msm_bus_inode_info *(*find_gw_node)(struct msm_bus_fabric_device
+		*fabdev, int id);
+	struct list_head *(*get_gw_list)(struct msm_bus_fabric_device *fabdev);
+	void (*update_bw)(struct msm_bus_fabric_device *fabdev, struct
+		msm_bus_inode_info * hop, struct msm_bus_inode_info *info,
+		int64_t add_bw, int *master_tiers, int ctx);
+	void (*config_master)(struct msm_bus_fabric_device *fabdev,
+		struct msm_bus_inode_info *info, uint64_t req_clk,
+		uint64_t req_bw);
+	void (*config_limiter)(struct msm_bus_fabric_device *fabdev,
+		struct msm_bus_inode_info *info);
+};
+
+struct msm_bus_board_algorithm {
+	int board_nfab;
+	void (*assign_iids)(struct msm_bus_fabric_registration *fabreg,
+		int fabid);
+	int (*get_iid)(int id);
+};
+
+/**
+ * Used to store the list of fabrics and other info to be
+ * maintained outside the fabric structure.
+ * Used while calculating path, and to find fabric ptrs
+ */
+struct msm_bus_fabnodeinfo {
+	struct list_head list;
+	struct msm_bus_inode_info *info;
+};
+
+struct msm_bus_client {
+	int id;
+	struct msm_bus_scale_pdata *pdata;
+	int *src_pnode;
+	int curr;
+	struct device **src_devs;
+};
+
+uint64_t msm_bus_div64(unsigned int width, uint64_t bw);
+int msm_bus_fabric_device_register(struct msm_bus_fabric_device *fabric);
+void msm_bus_fabric_device_unregister(struct msm_bus_fabric_device *fabric);
+struct msm_bus_fabric_device *msm_bus_get_fabric_device(int fabid);
+int msm_bus_get_num_fab(void);
+
+
+int msm_bus_hw_fab_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+void msm_bus_board_init(struct msm_bus_fabric_registration *pdata);
+#if defined(CONFIG_MSM_RPM_SMD)
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata);
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+	void *cdata, int nmasters, int nslaves, int ntslaves);
+#else
+static inline int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo)
+{
+	return 0;
+}
+static inline int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata)
+{
+	return 0;
+}
+static inline void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf,
+	const int max_size, void *cdata, int nmasters, int nslaves,
+	int ntslaves)
+{
+}
+#endif
+
+int msm_bus_noc_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+int msm_bus_bimc_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo);
+#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_QCOM_BUS_SCALING)
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+	uint32_t cl);
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+	int nmasters, int nslaves, int ntslaves, int op);
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata);
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib);
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata);
+
+#else
+static inline void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t cl)
+{
+}
+static inline void msm_bus_dbg_commit_data(const char *fabname,
+	void *cdata, int nmasters, int nslaves, int ntslaves,
+	int op)
+{
+}
+static inline void msm_bus_dbg_remove_client
+		(const struct msm_bus_client_handle *pdata)
+{
+}
+
+static inline int
+msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib)
+{
+	return 0;
+}
+
+static inline int
+msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+{
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_CORESIGHT
+int msmbus_coresight_init(struct platform_device *pdev);
+void msmbus_coresight_remove(struct platform_device *pdev);
+int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+		struct device_node *of_node);
+void msmbus_coresight_remove_adhoc(struct platform_device *pdev);
+#else
+static inline int msmbus_coresight_init(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static inline void msmbus_coresight_remove(struct platform_device *pdev)
+{
+}
+
+static inline int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+		struct device_node *of_node)
+{
+	return 0;
+}
+
+static inline void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
+{
+}
+#endif
+
+
+#ifdef CONFIG_OF
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *pdata);
+struct msm_bus_fabric_registration
+	*msm_bus_of_get_fab_data(struct platform_device *pdev);
+static inline void msm_bus_board_set_nfab(struct msm_bus_fabric_registration
+		*pdata,	int nfab)
+{
+}
+#else
+void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
+	int nfab);
+static inline void msm_bus_of_get_nfab(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *pdata)
+{
+}
+
+static inline struct msm_bus_fabric_registration
+	*msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+	return NULL;
+}
+#endif
+
+#endif /*_ARCH_ARM_MACH_MSM_BUS_CORE_H*/
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
new file mode 100644
index 0000000..5908122
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg.c
@@ -0,0 +1,947 @@
+/* Copyright (c) 2010-2012, 2014-2016, The Linux Foundation. All rights
+ * reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/hrtimer.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/trace_msm_bus.h>
+
+#define MAX_BUFF_SIZE 4096
+#define FILL_LIMIT 128
+
+static struct dentry *clients;
+static struct dentry *dir;
+static DEFINE_MUTEX(msm_bus_dbg_fablist_lock);
+struct msm_bus_dbg_state {
+	uint32_t cl;
+	uint8_t enable;
+	uint8_t current_index;
+} clstate;
+
+struct msm_bus_cldata {
+	const struct msm_bus_scale_pdata *pdata;
+	const struct msm_bus_client_handle *handle;
+	int index;
+	uint32_t clid;
+	int size;
+	struct dentry *file;
+	struct list_head list;
+	char buffer[MAX_BUFF_SIZE];
+};
+
+struct msm_bus_fab_list {
+	const char *name;
+	int size;
+	struct dentry *file;
+	struct list_head list;
+	char buffer[MAX_BUFF_SIZE];
+};
+
+static char *rules_buf;
+
+LIST_HEAD(fabdata_list);
+LIST_HEAD(cl_list);
+
+/**
+ * The following structures and functions are used for
+ * the test-client which can be created at run-time.
+ */
+
+static struct msm_bus_vectors init_vectors[1];
+static struct msm_bus_vectors current_vectors[1];
+static struct msm_bus_vectors requested_vectors[1];
+
+static struct msm_bus_paths shell_client_usecases[] = {
+	{
+		.num_paths = ARRAY_SIZE(init_vectors),
+		.vectors = init_vectors,
+	},
+	{
+		.num_paths = ARRAY_SIZE(current_vectors),
+		.vectors = current_vectors,
+	},
+	{
+		.num_paths = ARRAY_SIZE(requested_vectors),
+		.vectors = requested_vectors,
+	},
+};
+
+static struct msm_bus_scale_pdata shell_client = {
+	.usecase = shell_client_usecases,
+	.num_usecases = ARRAY_SIZE(shell_client_usecases),
+	.name = "test-client",
+};
+
+static void msm_bus_dbg_init_vectors(void)
+{
+	init_vectors[0].src = -1;
+	init_vectors[0].dst = -1;
+	init_vectors[0].ab = 0;
+	init_vectors[0].ib = 0;
+	current_vectors[0].src = -1;
+	current_vectors[0].dst = -1;
+	current_vectors[0].ab = 0;
+	current_vectors[0].ib = 0;
+	requested_vectors[0].src = -1;
+	requested_vectors[0].dst = -1;
+	requested_vectors[0].ab = 0;
+	requested_vectors[0].ib = 0;
+	clstate.enable = 0;
+	clstate.current_index = 0;
+}
+
+static int msm_bus_dbg_update_cl_request(uint32_t cl)
+{
+	int ret = 0;
+
+	if (clstate.current_index < 2)
+		clstate.current_index = 2;
+	else {
+		clstate.current_index = 1;
+		current_vectors[0].ab = requested_vectors[0].ab;
+		current_vectors[0].ib = requested_vectors[0].ib;
+	}
+
+	if (clstate.enable) {
+		MSM_BUS_DBG("Updating request for shell client, index: %d\n",
+			clstate.current_index);
+		ret = msm_bus_scale_client_update_request(clstate.cl,
+			clstate.current_index);
+	} else
+		MSM_BUS_DBG("Enable bit not set. Skipping update request\n");
+
+	return ret;
+}
+
+static void msm_bus_dbg_unregister_client(uint32_t cl)
+{
+	MSM_BUS_DBG("Unregistering shell client\n");
+	msm_bus_scale_unregister_client(clstate.cl);
+	clstate.cl = 0;
+}
+
+static uint32_t msm_bus_dbg_register_client(void)
+{
+	int ret = 0;
+
+	if (init_vectors[0].src != requested_vectors[0].src) {
+		MSM_BUS_DBG("Shell client master changed. Unregistering\n");
+		msm_bus_dbg_unregister_client(clstate.cl);
+	}
+	if (init_vectors[0].dst != requested_vectors[0].dst) {
+		MSM_BUS_DBG("Shell client slave changed. Unregistering\n");
+		msm_bus_dbg_unregister_client(clstate.cl);
+	}
+
+	current_vectors[0].src = init_vectors[0].src;
+	requested_vectors[0].src = init_vectors[0].src;
+	current_vectors[0].dst = init_vectors[0].dst;
+	requested_vectors[0].dst = init_vectors[0].dst;
+
+	if (!clstate.enable) {
+		MSM_BUS_DBG("Enable bit not set, skipping registration: cl %d\n"
+			, clstate.cl);
+		return 0;
+	}
+
+	if (clstate.cl) {
+		MSM_BUS_DBG("Client  registered, skipping registration\n");
+		return clstate.cl;
+	}
+
+	MSM_BUS_DBG("Registering shell client\n");
+	ret = msm_bus_scale_register_client(&shell_client);
+	return ret;
+}
+
+static int msm_bus_dbg_mas_get(void  *data, u64 *val)
+{
+	*val = init_vectors[0].src;
+	MSM_BUS_DBG("Get master: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_mas_set(void  *data, u64 val)
+{
+	init_vectors[0].src = val;
+	MSM_BUS_DBG("Set master: %llu\n", val);
+	clstate.cl = msm_bus_dbg_register_client();
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_mas_fops, msm_bus_dbg_mas_get,
+	msm_bus_dbg_mas_set, "%llu\n");
+
+static int msm_bus_dbg_slv_get(void  *data, u64 *val)
+{
+	*val = init_vectors[0].dst;
+	MSM_BUS_DBG("Get slave: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_slv_set(void  *data, u64 val)
+{
+	init_vectors[0].dst = val;
+	MSM_BUS_DBG("Set slave: %llu\n", val);
+	clstate.cl = msm_bus_dbg_register_client();
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_slv_fops, msm_bus_dbg_slv_get,
+	msm_bus_dbg_slv_set, "%llu\n");
+
+static int msm_bus_dbg_ab_get(void  *data, u64 *val)
+{
+	*val = requested_vectors[0].ab;
+	MSM_BUS_DBG("Get ab: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_ab_set(void  *data, u64 val)
+{
+	requested_vectors[0].ab = val;
+	MSM_BUS_DBG("Set ab: %llu\n", val);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_ab_fops, msm_bus_dbg_ab_get,
+	msm_bus_dbg_ab_set, "%llu\n");
+
+static int msm_bus_dbg_ib_get(void  *data, u64 *val)
+{
+	*val = requested_vectors[0].ib;
+	MSM_BUS_DBG("Get ib: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_ib_set(void  *data, u64 val)
+{
+	requested_vectors[0].ib = val;
+	MSM_BUS_DBG("Set ib: %llu\n", val);
+	return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_ib_fops, msm_bus_dbg_ib_get,
+	msm_bus_dbg_ib_set, "%llu\n");
+
+static int msm_bus_dbg_en_get(void  *data, u64 *val)
+{
+	*val = clstate.enable;
+	MSM_BUS_DBG("Get enable: %llu\n", *val);
+	return 0;
+}
+
+static int msm_bus_dbg_en_set(void  *data, u64 val)
+{
+	int ret = 0;
+
+	clstate.enable = val;
+	if (clstate.enable) {
+		if (!clstate.cl) {
+			MSM_BUS_DBG("client: %u\n", clstate.cl);
+			clstate.cl = msm_bus_dbg_register_client();
+			if (clstate.cl)
+				ret = msm_bus_dbg_update_cl_request(clstate.cl);
+		} else {
+			MSM_BUS_DBG("update request for cl: %u\n", clstate.cl);
+			ret = msm_bus_dbg_update_cl_request(clstate.cl);
+		}
+	}
+
+	MSM_BUS_DBG("Set enable: %llu\n", val);
+	return ret;
+}
+DEFINE_SIMPLE_ATTRIBUTE(shell_client_en_fops, msm_bus_dbg_en_get,
+	msm_bus_dbg_en_set, "%llu\n");
+
+/**
+ * The following functions are used for viewing the client data
+ * and changing the client request at run-time
+ */
+
+static ssize_t client_data_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	int bsize = 0;
+	uint32_t cl = (uint32_t)(uintptr_t)file->private_data;
+	struct msm_bus_cldata *cldata = NULL;
+	const struct msm_bus_client_handle *handle = file->private_data;
+	int found = 0;
+
+	list_for_each_entry(cldata, &cl_list, list) {
+		if ((cldata->clid == cl) ||
+			(cldata->handle && (cldata->handle == handle))) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return 0;
+
+	bsize = cldata->size;
+	return simple_read_from_buffer(buf, count, ppos,
+		cldata->buffer, bsize);
+}
+
+static int client_data_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations client_data_fops = {
+	.open		= client_data_open,
+	.read		= client_data_read,
+};
+
+struct dentry *msm_bus_dbg_create(const char *name, mode_t mode,
+	struct dentry *dent, uint32_t clid)
+{
+	if (dent == NULL) {
+		MSM_BUS_DBG("debugfs not ready yet\n");
+		return NULL;
+	}
+	return debugfs_create_file(name, mode, dent, (void *)(uintptr_t)clid,
+		&client_data_fops);
+}
+
+int msm_bus_dbg_add_client(const struct msm_bus_client_handle *pdata)
+
+{
+	struct msm_bus_cldata *cldata;
+
+	cldata = kzalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+	if (!cldata) {
+		MSM_BUS_DBG("Failed to allocate memory for client data\n");
+		return -ENOMEM;
+	}
+	cldata->handle = pdata;
+	list_add_tail(&cldata->list, &cl_list);
+	return 0;
+}
+
+int msm_bus_dbg_rec_transaction(const struct msm_bus_client_handle *pdata,
+						u64 ab, u64 ib)
+{
+	struct msm_bus_cldata *cldata;
+	int i;
+	struct timespec ts;
+	bool found = false;
+	char *buf = NULL;
+
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->handle == pdata) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENOENT;
+
+	if (cldata->file == NULL) {
+		if (pdata->name == NULL) {
+			MSM_BUS_DBG("Client doesn't have a name\n");
+			return -EINVAL;
+		}
+		cldata->file = debugfs_create_file(pdata->name, S_IRUGO,
+				clients, (void *)pdata, &client_data_fops);
+	}
+
+	if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+		i = cldata->size;
+	else {
+		i = 0;
+		cldata->size = 0;
+	}
+	buf = cldata->buffer;
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "master: ");
+
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ", pdata->mas);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslave : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ", pdata->slv);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab     : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ", ab);
+
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib     : ");
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ", ib);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+	cldata->size = i;
+
+	trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+		pdata->name, pdata->mas, pdata->slv, ab, ib);
+
+	return i;
+}
+
+void msm_bus_dbg_remove_client(const struct msm_bus_client_handle *pdata)
+{
+	struct msm_bus_cldata *cldata = NULL;
+
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->handle == pdata) {
+			debugfs_remove(cldata->file);
+			list_del(&cldata->list);
+			kfree(cldata);
+			break;
+		}
+	}
+}
+
+static int msm_bus_dbg_record_client(const struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t clid, struct dentry *file)
+{
+	struct msm_bus_cldata *cldata;
+
+	cldata = kmalloc(sizeof(struct msm_bus_cldata), GFP_KERNEL);
+	if (!cldata) {
+		MSM_BUS_DBG("Failed to allocate memory for client data\n");
+		return -ENOMEM;
+	}
+	cldata->pdata = pdata;
+	cldata->index = index;
+	cldata->clid = clid;
+	cldata->file = file;
+	cldata->size = 0;
+	list_add_tail(&cldata->list, &cl_list);
+	return 0;
+}
+
+static void msm_bus_dbg_free_client(uint32_t clid)
+{
+	struct msm_bus_cldata *cldata = NULL;
+
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->clid == clid) {
+			debugfs_remove(cldata->file);
+			list_del(&cldata->list);
+			kfree(cldata);
+			break;
+		}
+	}
+}
+
+static int msm_bus_dbg_fill_cl_buffer(const struct msm_bus_scale_pdata *pdata,
+	int index, uint32_t clid)
+{
+	int i = 0, j;
+	char *buf = NULL;
+	struct msm_bus_cldata *cldata = NULL;
+	struct timespec ts;
+	int found = 0;
+
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->clid == clid) {
+			found = 1;
+			break;
+		}
+	}
+
+	if (!found)
+		return -ENOENT;
+
+	if (cldata->file == NULL) {
+		if (pdata->name == NULL) {
+			MSM_BUS_DBG("Client doesn't have a name\n");
+			return -EINVAL;
+		}
+		cldata->file = msm_bus_dbg_create(pdata->name, S_IRUGO,
+			clients, clid);
+	}
+
+	if (cldata->size < (MAX_BUFF_SIZE - FILL_LIMIT))
+		i = cldata->size;
+	else {
+		i = 0;
+		cldata->size = 0;
+	}
+	buf = cldata->buffer;
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "curr   : %d\n", index);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "masters: ");
+
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
+			pdata->usecase[index].vectors[j].src);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nslaves : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%d  ",
+			pdata->usecase[index].vectors[j].dst);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nab     : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
+			pdata->usecase[index].vectors[j].ab);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\nib     : ");
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "%llu  ",
+			pdata->usecase[index].vectors[j].ib);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+
+	for (j = 0; j < pdata->usecase->num_paths; j++)
+		trace_bus_update_request((int)ts.tv_sec, (int)ts.tv_nsec,
+		pdata->name,
+		pdata->usecase[index].vectors[j].src,
+		pdata->usecase[index].vectors[j].dst,
+		pdata->usecase[index].vectors[j].ab,
+		pdata->usecase[index].vectors[j].ib);
+
+	cldata->index = index;
+	cldata->size = i;
+	return i;
+}
+
+static int msm_bus_dbg_update_request(struct msm_bus_cldata *cldata, int index)
+{
+	int ret = 0;
+
+	if ((index < 0) || (index > cldata->pdata->num_usecases)) {
+		MSM_BUS_DBG("Invalid index!\n");
+		return -EINVAL;
+	}
+	ret = msm_bus_scale_client_update_request(cldata->clid, index);
+	return ret;
+}
+
+static ssize_t  msm_bus_dbg_update_request_write(struct file *file,
+	const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct msm_bus_cldata *cldata;
+	unsigned long index = 0;
+	int ret = 0;
+	char *chid;
+	char *buf = kmalloc((sizeof(char) * (cnt + 1)), GFP_KERNEL);
+	int found = 0;
+
+	if (!buf || IS_ERR(buf)) {
+		MSM_BUS_ERR("Memory allocation for buffer failed\n");
+		return -ENOMEM;
+	}
+	if (cnt == 0)
+		return 0;
+	if (copy_from_user(buf, ubuf, cnt))
+		return -EFAULT;
+	buf[cnt] = '\0';
+	chid = buf;
+	MSM_BUS_DBG("buffer: %s\n size: %zu\n", buf, sizeof(ubuf));
+
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (strnstr(chid, cldata->pdata->name, cnt)) {
+			found = 1;
+			cldata = cldata;
+			strsep(&chid, " ");
+			if (chid) {
+				ret = kstrtoul(chid, 10, &index);
+				if (ret) {
+					MSM_BUS_DBG("Index conversion failed\n"
+							);
+					return -EFAULT;
+				}
+			} else {
+				MSM_BUS_DBG("Error parsing input.\n"
+						"Index not found\n");
+				found = 0;
+			}
+			break;
+		}
+	}
+
+	if (found)
+		msm_bus_dbg_update_request(cldata, index);
+	kfree(buf);
+	return cnt;
+}
+
+/**
+ * The following functions are used for viewing the commit data
+ * for each fabric
+ */
+static ssize_t fabric_data_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	struct msm_bus_fab_list *fablist = NULL;
+	int bsize = 0;
+	ssize_t ret;
+	const char *name = file->private_data;
+	int found = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, name) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found)
+		return -ENOENT;
+	bsize = fablist->size;
+	ret = simple_read_from_buffer(buf, count, ppos,
+		fablist->buffer, bsize);
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return ret;
+}
+
+static const struct file_operations fabric_data_fops = {
+	.open		= client_data_open,
+	.read		= fabric_data_read,
+};
+
+static ssize_t rules_dbg_read(struct file *file, char __user *buf,
+	size_t count, loff_t *ppos)
+{
+	ssize_t ret;
+
+	memset(rules_buf, 0, MAX_BUFF_SIZE);
+	print_rules_buf(rules_buf, MAX_BUFF_SIZE);
+	ret = simple_read_from_buffer(buf, count, ppos,
+		rules_buf, MAX_BUFF_SIZE);
+	return ret;
+}
+
+static int rules_dbg_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations rules_dbg_fops = {
+	.open		= rules_dbg_open,
+	.read		= rules_dbg_read,
+};
+
+static int msm_bus_dbg_record_fabric(const char *fabname, struct dentry *file)
+{
+	struct msm_bus_fab_list *fablist;
+	int ret = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	fablist = kmalloc(sizeof(struct msm_bus_fab_list), GFP_KERNEL);
+	if (!fablist) {
+		MSM_BUS_DBG("Failed to allocate memory for commit data\n");
+		ret =  -ENOMEM;
+		goto err;
+	}
+
+	fablist->name = fabname;
+	fablist->size = 0;
+	list_add_tail(&fablist->list, &fabdata_list);
+err:
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return ret;
+}
+
+static void msm_bus_dbg_free_fabric(const char *fabname)
+{
+	struct msm_bus_fab_list *fablist = NULL;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, fabname) == 0) {
+			debugfs_remove(fablist->file);
+			list_del(&fablist->list);
+			kfree(fablist);
+			break;
+		}
+	}
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+
+static int msm_bus_dbg_fill_fab_buffer(const char *fabname,
+	void *cdata, int nmasters, int nslaves,
+	int ntslaves)
+{
+	int i;
+	char *buf = NULL;
+	struct msm_bus_fab_list *fablist = NULL;
+	struct timespec ts;
+	int found = 0;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		if (strcmp(fablist->name, fabname) == 0) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found)
+		return -ENOENT;
+
+	if (fablist->file == NULL) {
+		MSM_BUS_DBG("Fabric dbg entry does not exist\n");
+		mutex_unlock(&msm_bus_dbg_fablist_lock);
+		return -EFAULT;
+	}
+
+	if (fablist->size < MAX_BUFF_SIZE - 256)
+		i = fablist->size;
+	else {
+		i = 0;
+		fablist->size = 0;
+	}
+	buf = fablist->buffer;
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	ts = ktime_to_timespec(ktime_get());
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n%ld.%09lu\n",
+		ts.tv_sec, ts.tv_nsec);
+
+	msm_bus_rpm_fill_cdata_buffer(&i, buf, MAX_BUFF_SIZE, cdata,
+		nmasters, nslaves, ntslaves);
+	i += scnprintf(buf + i, MAX_BUFF_SIZE - i, "\n");
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	fablist->size = i;
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+	return 0;
+}
+
+static const struct file_operations msm_bus_dbg_update_request_fops = {
+	.open = client_data_open,
+	.write = msm_bus_dbg_update_request_write,
+};
+
+static int msm_bus_dbg_dump_clients_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t msm_bus_dbg_dump_clients_read(struct file *file,
+	char __user *buf, size_t count, loff_t *ppos)
+{
+	int j, cnt;
+	char msg[50];
+	struct msm_bus_cldata *cldata = NULL;
+
+	cnt = scnprintf(msg, 50,
+		"\nDumping curent client votes to trace log\n");
+	if (*ppos)
+		goto exit_dump_clients_read;
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (IS_ERR_OR_NULL(cldata->pdata))
+			continue;
+		for (j = 0; j < cldata->pdata->usecase->num_paths; j++) {
+			if (cldata->index == -1)
+				continue;
+			trace_bus_client_status(
+			cldata->pdata->name,
+			cldata->pdata->usecase[cldata->index].vectors[j].src,
+			cldata->pdata->usecase[cldata->index].vectors[j].dst,
+			cldata->pdata->usecase[cldata->index].vectors[j].ab,
+			cldata->pdata->usecase[cldata->index].vectors[j].ib,
+			cldata->pdata->active_only);
+		}
+	}
+exit_dump_clients_read:
+	return simple_read_from_buffer(buf, count, ppos, msg, cnt);
+}
+
+static const struct file_operations msm_bus_dbg_dump_clients_fops = {
+	.open		= msm_bus_dbg_dump_clients_open,
+	.read		= msm_bus_dbg_dump_clients_read,
+};
+
+/**
+ * msm_bus_dbg_client_data() - Add debug data for clients
+ * @pdata: Platform data of the client
+ * @index: The current index or operation to be performed
+ * @clid: Client handle obtained during registration
+ */
+void msm_bus_dbg_client_data(struct msm_bus_scale_pdata *pdata, int index,
+	uint32_t clid)
+{
+	struct dentry *file = NULL;
+
+	if (index == MSM_BUS_DBG_REGISTER) {
+		msm_bus_dbg_record_client(pdata, index, clid, file);
+		if (!pdata->name) {
+			MSM_BUS_DBG("Cannot create debugfs entry. Null name\n");
+			return;
+		}
+	} else if (index == MSM_BUS_DBG_UNREGISTER) {
+		msm_bus_dbg_free_client(clid);
+		MSM_BUS_DBG("Client %d unregistered\n", clid);
+	} else
+		msm_bus_dbg_fill_cl_buffer(pdata, index, clid);
+}
+EXPORT_SYMBOL(msm_bus_dbg_client_data);
+
+/**
+ * msm_bus_dbg_commit_data() - Add commit data from fabrics
+ * @fabname: Fabric name specified in platform data
+ * @cdata: Commit Data
+ * @nmasters: Number of masters attached to fabric
+ * @nslaves: Number of slaves attached to fabric
+ * @ntslaves: Number of tiered slaves attached to fabric
+ * @op: Operation to be performed
+ */
+void msm_bus_dbg_commit_data(const char *fabname, void *cdata,
+	int nmasters, int nslaves, int ntslaves, int op)
+{
+	struct dentry *file = NULL;
+
+	if (op == MSM_BUS_DBG_REGISTER)
+		msm_bus_dbg_record_fabric(fabname, file);
+	else if (op == MSM_BUS_DBG_UNREGISTER)
+		msm_bus_dbg_free_fabric(fabname);
+	else
+		msm_bus_dbg_fill_fab_buffer(fabname, cdata, nmasters,
+			nslaves, ntslaves);
+}
+EXPORT_SYMBOL(msm_bus_dbg_commit_data);
+
+static int __init msm_bus_debugfs_init(void)
+{
+	struct dentry *commit, *shell_client, *rules_dbg;
+	struct msm_bus_fab_list *fablist;
+	struct msm_bus_cldata *cldata = NULL;
+	uint64_t val = 0;
+
+	dir = debugfs_create_dir("msm-bus-dbg", NULL);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create msm-bus-dbg\n");
+		goto err;
+	}
+
+	clients = debugfs_create_dir("client-data", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create clients\n");
+		goto err;
+	}
+
+	shell_client = debugfs_create_dir("shell-client", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create clients\n");
+		goto err;
+	}
+
+	commit = debugfs_create_dir("commit-data", dir);
+	if ((!dir) || IS_ERR(dir)) {
+		MSM_BUS_ERR("Couldn't create commit\n");
+		goto err;
+	}
+
+	rules_dbg = debugfs_create_dir("rules-dbg", dir);
+	if ((!rules_dbg) || IS_ERR(rules_dbg)) {
+		MSM_BUS_ERR("Couldn't create rules-dbg\n");
+		goto err;
+	}
+
+	if (debugfs_create_file("print_rules", S_IRUGO | S_IWUSR,
+		rules_dbg, &val, &rules_dbg_fops) == NULL)
+		goto err;
+
+	if (debugfs_create_file("update_request", S_IRUGO | S_IWUSR,
+		shell_client, &val, &shell_client_en_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("ib", S_IRUGO | S_IWUSR, shell_client, &val,
+		&shell_client_ib_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("ab", S_IRUGO | S_IWUSR, shell_client, &val,
+		&shell_client_ab_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("slv", S_IRUGO | S_IWUSR, shell_client,
+		&val, &shell_client_slv_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("mas", S_IRUGO | S_IWUSR, shell_client,
+		&val, &shell_client_mas_fops) == NULL)
+		goto err;
+	if (debugfs_create_file("update-request", S_IRUGO | S_IWUSR,
+		clients, NULL, &msm_bus_dbg_update_request_fops) == NULL)
+		goto err;
+
+	rules_buf = kzalloc(MAX_BUFF_SIZE, GFP_KERNEL);
+	if (!rules_buf) {
+		MSM_BUS_ERR("Failed to alloc rules_buf");
+		goto err;
+	}
+
+	list_for_each_entry(cldata, &cl_list, list) {
+		if (cldata->pdata) {
+			if (cldata->pdata->name == NULL) {
+				MSM_BUS_DBG("Client name not found\n");
+				continue;
+			}
+			cldata->file = msm_bus_dbg_create(cldata->
+				pdata->name, S_IRUGO, clients, cldata->clid);
+		} else if (cldata->handle) {
+			if (cldata->handle->name == NULL) {
+				MSM_BUS_DBG("Client doesn't have a name\n");
+				continue;
+			}
+			cldata->file = debugfs_create_file(cldata->handle->name,
+							S_IRUGO, clients,
+							(void *)cldata->handle,
+							&client_data_fops);
+		}
+	}
+
+	if (debugfs_create_file("dump_clients", S_IRUGO | S_IWUSR,
+		clients, NULL, &msm_bus_dbg_dump_clients_fops) == NULL)
+		goto err;
+
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry(fablist, &fabdata_list, list) {
+		fablist->file = debugfs_create_file(fablist->name, S_IRUGO,
+			commit, (void *)fablist->name, &fabric_data_fops);
+		if (fablist->file == NULL) {
+			MSM_BUS_DBG("Cannot create files for commit data\n");
+			kfree(rules_buf);
+			goto err;
+		}
+	}
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+
+	msm_bus_dbg_init_vectors();
+	return 0;
+err:
+	debugfs_remove_recursive(dir);
+	return -ENODEV;
+}
+late_initcall(msm_bus_debugfs_init);
+
+static void __exit msm_bus_dbg_teardown(void)
+{
+	struct msm_bus_fab_list *fablist = NULL, *fablist_temp;
+	struct msm_bus_cldata *cldata = NULL, *cldata_temp;
+
+	debugfs_remove_recursive(dir);
+	list_for_each_entry_safe(cldata, cldata_temp, &cl_list, list) {
+		list_del(&cldata->list);
+		kfree(cldata);
+	}
+	mutex_lock(&msm_bus_dbg_fablist_lock);
+	list_for_each_entry_safe(fablist, fablist_temp, &fabdata_list, list) {
+		list_del(&fablist->list);
+		kfree(fablist);
+	}
+	kfree(rules_buf);
+	mutex_unlock(&msm_bus_dbg_fablist_lock);
+}
+module_exit(msm_bus_dbg_teardown);
+MODULE_DESCRIPTION("Debugfs for msm bus scaling client");
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
new file mode 100644
index 0000000..3f8b52c
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
@@ -0,0 +1,544 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_adhoc.h"
+
+struct msm_bus_floor_client_type {
+	int mas_id;
+	int slv_id;
+	struct msm_bus_client_handle *vote_handle;
+	struct device *dev;
+	u64 cur_vote_hz;
+	int active_only;
+};
+
+static struct class *bus_floor_class;
+#define MAX_VOTER_NAME	(50)
+#define DEFAULT_NODE_WIDTH	(8)
+#define DBG_NAME(s)	(strnstr(s, "-", 7) + 1)
+
+static int get_id(void)
+{
+	static int dev_id = MSM_BUS_INT_TEST_ID;
+	int id = dev_id;
+
+	if (id >= MSM_BUS_INT_TEST_LAST)
+		id = -EINVAL;
+	else
+		dev_id++;
+
+	return id;
+}
+
+static ssize_t bus_floor_active_only_show(struct device *dev,
+			struct device_attribute *dev_attr, char *buf)
+{
+	struct msm_bus_floor_client_type *cl;
+
+	cl = dev_get_drvdata(dev);
+
+	if (!cl) {
+		pr_err("%s: Can't find cl", __func__);
+		return 0;
+	}
+	return snprintf(buf, sizeof(int), "%d", cl->active_only);
+}
+
+static ssize_t bus_floor_active_only_store(struct device *dev,
+			struct device_attribute *dev_attr, const char *buf,
+			size_t n)
+{
+	struct msm_bus_floor_client_type *cl;
+
+	cl = dev_get_drvdata(dev);
+
+	if (!cl) {
+		pr_err("%s: Can't find cl", __func__);
+		return 0;
+	}
+
+	if (kstrtoint(buf, 10, &cl->active_only) != 0) {
+		pr_err("%s:return error", __func__);
+		return -EINVAL;
+	}
+
+	return n;
+}
+
+static ssize_t bus_floor_vote_show(struct device *dev,
+			struct device_attribute *dev_attr, char *buf)
+{
+	struct msm_bus_floor_client_type *cl;
+
+	cl = dev_get_drvdata(dev);
+
+	if (!cl) {
+		pr_err("%s: Can't find cl", __func__);
+		return 0;
+	}
+	return snprintf(buf, sizeof(u64), "%llu", cl->cur_vote_hz);
+}
+
+static ssize_t bus_floor_vote_store(struct device *dev,
+			struct device_attribute *dev_attr, const char *buf,
+			size_t n)
+{
+	struct msm_bus_floor_client_type *cl;
+	int ret = 0;
+
+	cl = dev_get_drvdata(dev);
+
+	if (!cl) {
+		pr_err("%s: Can't find cl", __func__);
+		return 0;
+	}
+
+	if (kstrtoull(buf, 10, &cl->cur_vote_hz) != 0) {
+		pr_err("%s:return error", __func__);
+		return -EINVAL;
+	}
+
+	ret = msm_bus_floor_vote_context(dev_name(dev), cl->cur_vote_hz,
+					cl->active_only);
+	return n;
+}
+
+static ssize_t bus_floor_vote_store_api(struct device *dev,
+			struct device_attribute *dev_attr, const char *buf,
+			size_t n)
+{
+	struct msm_bus_floor_client_type *cl;
+	int ret = 0;
+	char name[10];
+	u64 vote_khz = 0;
+
+	cl = dev_get_drvdata(dev);
+
+	if (!cl) {
+		pr_err("%s: Can't find cl", __func__);
+		return 0;
+	}
+
+	if (sscanf(buf, "%s %llu", name, &vote_khz) != 2) {
+		pr_err("%s:return error", __func__);
+		return -EINVAL;
+	}
+
+	pr_info("%s: name %s vote %llu\n",
+			__func__, name, vote_khz);
+
+	ret = msm_bus_floor_vote(name, vote_khz);
+	return n;
+}
+
+static DEVICE_ATTR(floor_vote, S_IRUGO | S_IWUSR,
+		bus_floor_vote_show, bus_floor_vote_store);
+
+static DEVICE_ATTR(floor_vote_api, S_IRUGO | S_IWUSR,
+		bus_floor_vote_show, bus_floor_vote_store_api);
+
+static DEVICE_ATTR(floor_active_only, S_IRUGO | S_IWUSR,
+		bus_floor_active_only_show, bus_floor_active_only_store);
+
+static struct msm_bus_node_device_type *msm_bus_floor_init_dev(
+				struct device *fab_dev, bool is_master)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_node_device_type *fab_node = NULL;
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct device *dev = NULL;
+	int ret = 0;
+
+	if (!fab_dev) {
+		bus_node = ERR_PTR(-ENXIO);
+		goto exit_init_bus_dev;
+	}
+
+	fab_node = to_msm_bus_node(fab_dev);
+
+	if (!fab_node) {
+		pr_info("\n%s: Can't create device", __func__);
+		bus_node = ERR_PTR(-ENXIO);
+		goto exit_init_bus_dev;
+	}
+
+	bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+	if (!bus_node) {
+		bus_node = ERR_PTR(-ENOMEM);
+		goto exit_init_bus_dev;
+	}
+	dev = &bus_node->dev;
+	device_initialize(dev);
+
+	node_info = devm_kzalloc(dev,
+		sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+
+	if (!node_info) {
+		devm_kfree(dev, bus_node);
+		bus_node = ERR_PTR(-ENOMEM);
+		goto exit_init_bus_dev;
+	}
+
+	bus_node->node_info = node_info;
+	bus_node->ap_owned = true;
+	bus_node->node_info->bus_device = fab_dev;
+	bus_node->node_info->agg_params.buswidth = 8;
+	dev->bus = &msm_bus_type;
+	list_add_tail(&bus_node->dev_link, &fab_node->devlist);
+
+	bus_node->node_info->id = get_id();
+	if (bus_node->node_info->id < 0) {
+		pr_err("%s: Failed to get id for dev. Bus:%s is_master:%d",
+			__func__, fab_node->node_info->name, is_master);
+		bus_node = ERR_PTR(-ENXIO);
+		goto exit_init_bus_dev;
+	}
+
+	dev_set_name(dev, "testnode-%s-%s", (is_master ? "mas" : "slv"),
+					fab_node->node_info->name);
+
+	ret = device_add(dev);
+	if (ret < 0) {
+		pr_err("%s: Failed to add %s", __func__, dev_name(dev));
+		bus_node = ERR_PTR(ret);
+		goto exit_init_bus_dev;
+	}
+
+exit_init_bus_dev:
+	return bus_node;
+}
+
+static int msm_bus_floor_show_info(struct device *dev, void *data)
+{
+	if (dev)
+		pr_err(" %s\n", dev_name(dev));
+	return 0;
+}
+
+static void msm_bus_floor_pr_usage(void)
+{
+	pr_err("msm_bus_floor_vote: Supported buses\n");
+	class_for_each_device(bus_floor_class, NULL, NULL,
+					msm_bus_floor_show_info);
+}
+
+static int msm_bus_floor_match(struct device *dev, const void *data)
+{
+	int ret = 0;
+
+	if (!(dev && data))
+		return ret;
+
+	if (strnstr(dev_name(dev), data, MAX_VOTER_NAME))
+		ret = 1;
+
+	return ret;
+}
+
+int msm_bus_floor_vote(const char *name, u64 floor_hz)
+{
+	int ret = -EINVAL;
+	struct msm_bus_floor_client_type *cl;
+	bool found = false;
+	struct device *dbg_voter = NULL;
+
+	if (!name) {
+		pr_err("%s: NULL name", __func__);
+		return -EINVAL;
+	}
+
+	dbg_voter = class_find_device(bus_floor_class, NULL,
+						name, msm_bus_floor_match);
+	if (dbg_voter) {
+		found = true;
+		cl = dev_get_drvdata(dbg_voter);
+
+		if (!cl) {
+			pr_err("%s: Can't find cl", __func__);
+			goto exit_bus_floor_vote;
+		}
+
+		if (!cl->vote_handle) {
+			char cl_name[MAX_VOTER_NAME];
+
+			snprintf(cl_name, MAX_VOTER_NAME, "%s-floor-voter",
+						dev_name(cl->dev));
+			cl->vote_handle = msm_bus_scale_register(cl->mas_id,
+					cl->slv_id, cl_name, false);
+			if (!cl->vote_handle) {
+				ret = -ENXIO;
+				goto exit_bus_floor_vote;
+			}
+		}
+
+		cl->cur_vote_hz = floor_hz;
+		ret = msm_bus_scale_update_bw(cl->vote_handle, 0,
+					(floor_hz * DEFAULT_NODE_WIDTH));
+		if (ret) {
+			pr_err("%s: Failed to update %s", __func__,
+								name);
+			goto exit_bus_floor_vote;
+		}
+	} else {
+		pr_err("\n%s:No matching voting device found for %s", __func__,
+									name);
+		msm_bus_floor_pr_usage();
+	}
+
+exit_bus_floor_vote:
+	if (dbg_voter)
+		put_device(dbg_voter);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_bus_floor_vote);
+
+int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
+						bool active_only)
+{
+	int ret = -EINVAL;
+	struct msm_bus_floor_client_type *cl;
+	bool found = false;
+	struct device *dbg_voter = NULL;
+
+	if (!name) {
+		pr_err("%s: NULL name", __func__);
+		return -EINVAL;
+	}
+
+	dbg_voter = class_find_device(bus_floor_class, NULL,
+						name, msm_bus_floor_match);
+	if (dbg_voter) {
+		found = true;
+		cl = dev_get_drvdata(dbg_voter);
+
+		if (!cl) {
+			pr_err("%s: Can't find cl", __func__);
+			goto exit_bus_floor_vote_context;
+		}
+
+		if (!(cl->vote_handle &&
+			(cl->vote_handle->active_only == active_only))) {
+			char cl_name[MAX_VOTER_NAME];
+
+			if (cl->vote_handle)
+				msm_bus_scale_unregister(cl->vote_handle);
+
+			snprintf(cl_name, MAX_VOTER_NAME, "%s-floor-voter",
+						dev_name(cl->dev));
+			cl->vote_handle = msm_bus_scale_register(cl->mas_id,
+					cl->slv_id, (char *)dev_name(cl->dev),
+								active_only);
+			if (!cl->vote_handle) {
+				ret = -ENXIO;
+				goto exit_bus_floor_vote_context;
+			}
+		}
+
+		cl->cur_vote_hz = floor_hz;
+		ret = msm_bus_scale_update_bw(cl->vote_handle, 0,
+					(floor_hz * DEFAULT_NODE_WIDTH));
+		if (ret) {
+			pr_err("%s: Failed to update %s", __func__,
+								name);
+			goto exit_bus_floor_vote_context;
+		}
+	} else {
+		pr_err("\n%s:No matching voting device found for %s", __func__,
+									name);
+		msm_bus_floor_pr_usage();
+	}
+
+exit_bus_floor_vote_context:
+	if (dbg_voter)
+		put_device(dbg_voter);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_bus_floor_vote_context);
+
+static int msm_bus_floor_setup_dev_conn(
+		struct msm_bus_node_device_type *mas_node,
+		struct msm_bus_node_device_type *slv_node)
+{
+	int ret = 0;
+	int slv_id = 0;
+
+	if (!(mas_node && slv_node)) {
+		pr_err("\n%s: Invalid master/slave device", __func__);
+		ret = -ENXIO;
+		goto exit_setup_dev_conn;
+	}
+
+	slv_id = slv_node->node_info->id;
+	mas_node->node_info->num_connections = 1;
+	mas_node->node_info->connections = devm_kzalloc(&mas_node->dev,
+			(sizeof(int) * mas_node->node_info->num_connections),
+			GFP_KERNEL);
+
+	if (!mas_node->node_info->connections) {
+		pr_err("%s:Bus node connections info alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_setup_dev_conn;
+	}
+
+	mas_node->node_info->dev_connections = devm_kzalloc(&mas_node->dev,
+			(sizeof(struct device *) *
+				mas_node->node_info->num_connections),
+			GFP_KERNEL);
+
+	if (!mas_node->node_info->dev_connections) {
+		pr_err("%s:Bus node dev connections info alloc failed\n",
+								__func__);
+		ret = -ENOMEM;
+		goto exit_setup_dev_conn;
+	}
+	mas_node->node_info->connections[0] = slv_id;
+	mas_node->node_info->dev_connections[0] = &slv_node->dev;
+
+exit_setup_dev_conn:
+	return ret;
+}
+
+static int msm_bus_floor_setup_floor_dev(
+			struct msm_bus_node_device_type *mas_node,
+			struct msm_bus_node_device_type *slv_node,
+			struct msm_bus_node_device_type *bus_node)
+{
+	struct msm_bus_floor_client_type *cl_ptr = NULL;
+	int ret = 0;
+	char *name = NULL;
+
+	cl_ptr = kzalloc(sizeof(struct msm_bus_floor_client_type), GFP_KERNEL);
+	if (!cl_ptr) {
+		ret = -ENOMEM;
+		goto err_setup_floor_dev;
+	}
+
+	if (!bus_floor_class) {
+		bus_floor_class = class_create(THIS_MODULE, "bus-voter");
+		if (IS_ERR(bus_floor_class)) {
+			ret = -ENXIO;
+			pr_err("%s: Error creating dev class", __func__);
+			goto err_setup_floor_dev;
+		}
+	}
+
+	name = DBG_NAME(bus_node->node_info->name);
+	if (!name) {
+		pr_err("%s: Invalid name derived for %s", __func__,
+						bus_node->node_info->name);
+		ret = -EINVAL;
+		goto err_setup_floor_dev;
+	}
+
+	cl_ptr->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+	if (!cl_ptr->dev) {
+		ret = -ENOMEM;
+		goto err_setup_floor_dev;
+	}
+
+	device_initialize(cl_ptr->dev);
+	cl_ptr->dev->class = bus_floor_class;
+	dev_set_name(cl_ptr->dev, "%s", name);
+	dev_set_drvdata(cl_ptr->dev, cl_ptr);
+	ret = device_add(cl_ptr->dev);
+
+	if (ret < 0) {
+		pr_err("%s: Failed to add device bus %d", __func__,
+			bus_node->node_info->id);
+		goto err_setup_floor_dev;
+	}
+
+	cl_ptr->mas_id = mas_node->node_info->id;
+	cl_ptr->slv_id = slv_node->node_info->id;
+
+	ret = device_create_file(cl_ptr->dev, &dev_attr_floor_vote);
+	if (ret < 0)
+		goto err_setup_floor_dev;
+
+	ret = device_create_file(cl_ptr->dev, &dev_attr_floor_vote_api);
+	if (ret < 0)
+		goto err_setup_floor_dev;
+
+	ret = device_create_file(cl_ptr->dev, &dev_attr_floor_active_only);
+	if (ret < 0)
+		goto err_setup_floor_dev;
+
+	return ret;
+
+err_setup_floor_dev:
+	kfree(cl_ptr);
+	return ret;
+}
+
+int msm_bus_floor_init(struct device *dev)
+{
+	struct msm_bus_node_device_type *mas_node = NULL;
+	struct msm_bus_node_device_type *slv_node = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int ret = 0;
+
+	if (!dev) {
+		pr_info("\n%s: Can't create voting client", __func__);
+		ret = -ENXIO;
+		goto exit_floor_init;
+	}
+
+	bus_node = to_msm_bus_node(dev);
+	if (!(bus_node && bus_node->node_info->is_fab_dev)) {
+		pr_info("\n%s: Can't create voting client, not a fab device",
+								__func__);
+		ret = -ENXIO;
+		goto exit_floor_init;
+	}
+
+	mas_node = msm_bus_floor_init_dev(dev, true);
+	if (IS_ERR_OR_NULL(mas_node)) {
+		pr_err("\n%s: Error setting up master dev, bus %d",
+					__func__, bus_node->node_info->id);
+		goto exit_floor_init;
+	}
+
+	slv_node = msm_bus_floor_init_dev(dev, false);
+	if (IS_ERR_OR_NULL(slv_node)) {
+		pr_err("\n%s: Error setting up slave dev, bus %d",
+					__func__, bus_node->node_info->id);
+		goto exit_floor_init;
+	}
+
+	ret = msm_bus_floor_setup_dev_conn(mas_node, slv_node);
+	if (ret) {
+		pr_err("\n%s: Error setting up connections bus %d",
+					__func__, bus_node->node_info->id);
+		goto err_floor_init;
+	}
+
+	ret = msm_bus_floor_setup_floor_dev(mas_node, slv_node, bus_node);
+	if (ret) {
+		pr_err("\n%s: Error getting mas/slv nodes bus %d",
+					__func__, bus_node->node_info->id);
+		goto err_floor_init;
+	}
+
+exit_floor_init:
+	return ret;
+err_floor_init:
+	kfree(mas_node);
+	kfree(slv_node);
+	return ret;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
new file mode 100644
index 0000000..269d09a
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_adhoc.c
@@ -0,0 +1,1291 @@
+/* Copyright (c) 2014-2016, Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/rpm-smd.h>
+#include <trace/events/trace_msm_bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data);
+
+ssize_t bw_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int off = 0;
+
+	bus_node = to_msm_bus_node(dev);
+	if (!bus_node)
+		return -EINVAL;
+
+	node_info = bus_node->node_info;
+
+	for (i = 0; i < bus_node->num_lnodes; i++) {
+		if (!bus_node->lnode_list[i].in_use)
+			continue;
+		off += scnprintf((buf + off), PAGE_SIZE,
+		"[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+			i, bus_node->lnode_list[i].cl_name,
+			bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+			bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+		trace_printk(
+		"[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+			i, bus_node->lnode_list[i].cl_name,
+			bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+			bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+	}
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+		bus_node->node_bw[ACTIVE_CTX].max_ib,
+		bus_node->node_bw[ACTIVE_CTX].sum_ab,
+		bus_node->node_bw[ACTIVE_CTX].util_used,
+		bus_node->node_bw[ACTIVE_CTX].vrail_used);
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Slp_IB %llu Sum_Slp_AB %llu Slp_Util_fact %d Slp_Vrail_comp %d\n",
+		bus_node->node_bw[DUAL_CTX].max_ib,
+		bus_node->node_bw[DUAL_CTX].sum_ab,
+		bus_node->node_bw[DUAL_CTX].util_used,
+		bus_node->node_bw[DUAL_CTX].vrail_used);
+	trace_printk(
+	"Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+		bus_node->node_bw[ACTIVE_CTX].max_ib,
+		bus_node->node_bw[ACTIVE_CTX].sum_ab,
+		bus_node->node_bw[ACTIVE_CTX].util_used,
+		bus_node->node_bw[ACTIVE_CTX].vrail_used);
+	trace_printk(
+	"Max_Slp_IB %llu Sum_Slp_AB %lluSlp_Util_fact %d Slp_Vrail_comp %d\n",
+		bus_node->node_bw[DUAL_CTX].max_ib,
+		bus_node->node_bw[DUAL_CTX].sum_ab,
+		bus_node->node_bw[DUAL_CTX].util_used,
+		bus_node->node_bw[DUAL_CTX].vrail_used);
+	return off;
+}
+
+ssize_t bw_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	return count;
+}
+
+DEVICE_ATTR(bw, 0600, bw_show, bw_store);
+
+struct static_rules_type {
+	int num_rules;
+	struct bus_rule_type *rules;
+};
+
+static struct static_rules_type static_rules;
+
+static int bus_get_reg(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev;
+
+	if (!(dev && nclk))
+		return -ENXIO;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!strlen(nclk->reg_name)) {
+		dev_dbg(dev, "No regulator exist for node %d\n",
+						node_dev->node_info->id);
+		goto exit_of_get_reg;
+	} else {
+		if (!(IS_ERR_OR_NULL(nclk->reg)))
+			goto exit_of_get_reg;
+
+		nclk->reg = devm_regulator_get(dev, nclk->reg_name);
+		if (IS_ERR_OR_NULL(nclk->reg)) {
+			ret =
+			(IS_ERR(nclk->reg) ? PTR_ERR(nclk->reg) : -ENXIO);
+			dev_err(dev, "Error: Failed to get regulator %s:%d\n",
+							nclk->reg_name, ret);
+		} else {
+			dev_dbg(dev, "Successfully got regulator for %d\n",
+				node_dev->node_info->id);
+		}
+	}
+
+exit_of_get_reg:
+	return ret;
+}
+
+static int bus_enable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	ret = regulator_enable(nclk->reg);
+	if (ret) {
+		MSM_BUS_ERR("Failed to enable regulator for %s\n",
+							nclk->reg_name);
+		goto exit_bus_enable_reg;
+	}
+	pr_debug("%s: Enabled Reg\n", __func__);
+exit_bus_enable_reg:
+	return ret;
+}
+
+static int bus_disable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	regulator_disable(nclk->reg);
+	pr_debug("%s: Disabled Reg\n", __func__);
+exit_bus_disable_reg:
+	return ret;
+}
+
+static int enable_nodeclk(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+
+	if (!nclk->enable && !nclk->setrate_only_clk) {
+		if (dev && strlen(nclk->reg_name)) {
+			if (IS_ERR_OR_NULL(nclk->reg)) {
+				ret = bus_get_reg(nclk, dev);
+				if (ret) {
+					dev_dbg(dev,
+						"Failed to get reg.Err %d\n",
+									ret);
+					goto exit_enable_nodeclk;
+				}
+			}
+
+			ret = bus_enable_reg(nclk);
+			if (ret) {
+				dev_dbg(dev, "Failed to enable reg. Err %d\n",
+									ret);
+				goto exit_enable_nodeclk;
+			}
+		}
+		ret = clk_prepare_enable(nclk->clk);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: failed to enable clk ", __func__);
+			nclk->enable = false;
+		} else
+			nclk->enable = true;
+	}
+exit_enable_nodeclk:
+	return ret;
+}
+
+static int disable_nodeclk(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (nclk->enable && !nclk->setrate_only_clk) {
+		clk_disable_unprepare(nclk->clk);
+		nclk->enable = false;
+		bus_disable_reg(nclk);
+	}
+	return ret;
+}
+
+static int setrate_nodeclk(struct nodeclk *nclk, long rate)
+{
+	int ret = 0;
+
+	if (!nclk->enable_only_clk)
+		ret = clk_set_rate(nclk->clk, rate);
+
+	if (ret)
+		MSM_BUS_ERR("%s: failed to setrate clk", __func__);
+	return ret;
+}
+
+static int send_rpm_msg(struct msm_bus_node_device_type *ndev, int ctx)
+{
+	int ret = 0;
+	int rsc_type;
+	struct msm_rpm_kvp rpm_kvp;
+	int rpm_ctx;
+
+	if (!ndev) {
+		MSM_BUS_ERR("%s: Error getting node info.", __func__);
+		ret = -ENODEV;
+		goto exit_send_rpm_msg;
+	}
+
+	rpm_kvp.length = sizeof(uint64_t);
+	rpm_kvp.key = RPM_MASTER_FIELD_BW;
+
+	if (ctx == DUAL_CTX)
+		rpm_ctx = MSM_RPM_CTX_SLEEP_SET;
+	else
+		rpm_ctx = MSM_RPM_CTX_ACTIVE_SET;
+
+	rpm_kvp.data = (uint8_t *)&ndev->node_bw[ctx].sum_ab;
+
+	if (ndev->node_info->mas_rpm_id != -1) {
+		rsc_type = RPM_BUS_MASTER_REQ;
+		ret = msm_rpm_send_message(rpm_ctx, rsc_type,
+			ndev->node_info->mas_rpm_id, &rpm_kvp, 1);
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to send RPM message:",
+					__func__);
+			MSM_BUS_ERR("%s:Node Id %d RPM id %d",
+			__func__, ndev->node_info->id,
+				 ndev->node_info->mas_rpm_id);
+			goto exit_send_rpm_msg;
+		}
+		trace_bus_agg_bw(ndev->node_info->id,
+			ndev->node_info->mas_rpm_id, rpm_ctx,
+			ndev->node_bw[ctx].sum_ab);
+	}
+
+	if (ndev->node_info->slv_rpm_id != -1) {
+		rsc_type = RPM_BUS_SLAVE_REQ;
+		ret = msm_rpm_send_message(rpm_ctx, rsc_type,
+			ndev->node_info->slv_rpm_id, &rpm_kvp, 1);
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to send RPM message:",
+						__func__);
+			MSM_BUS_ERR("%s: Node Id %d RPM id %d",
+			__func__, ndev->node_info->id,
+				ndev->node_info->slv_rpm_id);
+			goto exit_send_rpm_msg;
+		}
+		trace_bus_agg_bw(ndev->node_info->id,
+			ndev->node_info->slv_rpm_id, rpm_ctx,
+			ndev->node_bw[ctx].sum_ab);
+	}
+exit_send_rpm_msg:
+	return ret;
+}
+
+static int flush_bw_data(struct msm_bus_node_device_type *node_info, int ctx)
+{
+	int ret = 0;
+
+	if (!node_info) {
+		MSM_BUS_ERR("%s: Unable to find bus device for device",
+			__func__);
+		ret = -ENODEV;
+		goto exit_flush_bw_data;
+	}
+
+	if (node_info->node_bw[ctx].last_sum_ab !=
+				node_info->node_bw[ctx].sum_ab) {
+		if (node_info->ap_owned) {
+			struct msm_bus_node_device_type *bus_device =
+			to_msm_bus_node(node_info->node_info->bus_device);
+			struct msm_bus_fab_device_type *fabdev =
+							bus_device->fabdev;
+
+			/*
+			 * For AP owned ports, only care about the Active
+			 * context bandwidth.
+			 */
+			if (fabdev && (ctx == ACTIVE_CTX) &&
+				fabdev->noc_ops.update_bw_reg &&
+				fabdev->noc_ops.update_bw_reg
+					(node_info->node_info->qos_params.mode))
+				ret = fabdev->noc_ops.set_bw(node_info,
+							fabdev->qos_base,
+							fabdev->base_offset,
+							fabdev->qos_off,
+							fabdev->qos_freq);
+		} else {
+			ret = send_rpm_msg(node_info, ctx);
+
+			if (ret)
+				MSM_BUS_ERR("%s: Failed to send RPM msg for%d",
+				__func__, node_info->node_info->id);
+		}
+		node_info->node_bw[ctx].last_sum_ab =
+					node_info->node_bw[ctx].sum_ab;
+	}
+
+exit_flush_bw_data:
+	return ret;
+
+}
+
+static int flush_clk_data(struct msm_bus_node_device_type *node, int ctx)
+{
+	struct nodeclk *nodeclk = NULL;
+	int ret = 0;
+
+	if (!node) {
+		MSM_BUS_ERR("Unable to find bus device");
+		ret = -ENODEV;
+		goto exit_flush_clk_data;
+	}
+
+	nodeclk = &node->clk[ctx];
+
+	if (IS_ERR_OR_NULL(nodeclk) || IS_ERR_OR_NULL(nodeclk->clk))
+		goto exit_flush_clk_data;
+
+	if (nodeclk->rate != node->node_bw[ctx].cur_clk_hz) {
+		long rounded_rate;
+
+		nodeclk->rate = node->node_bw[ctx].cur_clk_hz;
+		nodeclk->dirty = true;
+
+		if (nodeclk->rate) {
+			rounded_rate = clk_round_rate(nodeclk->clk,
+							nodeclk->rate);
+			ret = setrate_nodeclk(nodeclk, rounded_rate);
+
+			if (ret) {
+				MSM_BUS_ERR("%s: Failed to set_rate %lu for %d",
+					__func__, rounded_rate,
+						node->node_info->id);
+				ret = -ENODEV;
+				goto exit_flush_clk_data;
+			}
+
+			ret = enable_nodeclk(nodeclk, &node->dev);
+
+			if ((node->node_info->is_fab_dev) &&
+				!IS_ERR_OR_NULL(node->bus_qos_clk.clk))
+				ret = enable_nodeclk(&node->bus_qos_clk,
+							&node->dev);
+		} else {
+			if ((node->node_info->is_fab_dev) &&
+				!IS_ERR_OR_NULL(node->bus_qos_clk.clk))
+				ret = disable_nodeclk(&node->bus_qos_clk);
+
+			ret = disable_nodeclk(nodeclk);
+		}
+
+		if (ret) {
+			MSM_BUS_ERR("%s: Failed to enable for %d", __func__,
+						node->node_info->id);
+			ret = -ENODEV;
+			goto exit_flush_clk_data;
+		}
+		MSM_BUS_DBG("%s: Updated %d clk to %llu", __func__,
+				node->node_info->id, nodeclk->rate);
+	}
+exit_flush_clk_data:
+	/* Reset the aggregated clock rate for fab devices*/
+	if (node && node->node_info->is_fab_dev)
+		node->node_bw[ctx].cur_clk_hz = 0;
+
+	if (nodeclk)
+		nodeclk->dirty = 0;
+	return ret;
+}
+
+static int msm_bus_agg_fab_clks(struct msm_bus_node_device_type *bus_dev)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node;
+	int ctx;
+
+	list_for_each_entry(node, &bus_dev->devlist, dev_link) {
+		for (ctx = 0; ctx < NUM_CTX; ctx++) {
+			if (node->node_bw[ctx].cur_clk_hz >=
+					bus_dev->node_bw[ctx].cur_clk_hz)
+				bus_dev->node_bw[ctx].cur_clk_hz =
+						node->node_bw[ctx].cur_clk_hz;
+		}
+	}
+	return ret;
+}
+
+int msm_bus_commit_data(struct list_head *clist)
+{
+	int ret = 0;
+	int ctx;
+	struct msm_bus_node_device_type *node;
+	struct msm_bus_node_device_type *node_tmp;
+
+	list_for_each_entry(node, clist, link) {
+		/* Aggregate the bus clocks */
+		if (node->node_info->is_fab_dev)
+			msm_bus_agg_fab_clks(node);
+	}
+
+	list_for_each_entry_safe(node, node_tmp, clist, link) {
+		if (unlikely(node->node_info->defer_qos))
+			msm_bus_dev_init_qos(&node->dev, NULL);
+
+		for (ctx = 0; ctx < NUM_CTX; ctx++) {
+			ret = flush_clk_data(node, ctx);
+			if (ret)
+				MSM_BUS_ERR("%s: Err flushing clk data for:%d",
+						__func__, node->node_info->id);
+			ret = flush_bw_data(node, ctx);
+			if (ret)
+				MSM_BUS_ERR("%s: Error flushing bw data for %d",
+					__func__, node->node_info->id);
+		}
+		node->dirty = false;
+		list_del_init(&node->link);
+	}
+	return ret;
+}
+
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags)
+{
+	void *ret;
+	size_t copy_size = old_size;
+
+	if (!new_size) {
+		devm_kfree(dev, p);
+		return ZERO_SIZE_PTR;
+	}
+
+	if (new_size < old_size)
+		copy_size = new_size;
+
+	ret = devm_kzalloc(dev, new_size, flags);
+	if (!ret)
+		goto exit_realloc_devmem;
+
+	memcpy(ret, p, copy_size);
+	devm_kfree(dev, p);
+exit_realloc_devmem:
+	return ret;
+}
+
+static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	switch (bus_dev->fabdev->bus_type) {
+	case MSM_BUS_NOC:
+		msm_bus_noc_set_ops(bus_dev);
+		break;
+	case MSM_BUS_BIMC:
+		msm_bus_bimc_set_ops(bus_dev);
+		break;
+	default:
+		MSM_BUS_ERR("%s: Invalid Bus type", __func__);
+	}
+}
+
+static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int ret = 0;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+		ret = -ENXIO;
+		goto exit_disable_node_qos_clk;
+	}
+
+	for (i = 0; i < node->num_node_qos_clks; i++)
+		ret = disable_nodeclk(&node->node_qos_clks[i]);
+
+	bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+	for (i = 0; i < bus_node->num_node_qos_clks; i++)
+		ret = disable_nodeclk(&bus_node->node_qos_clks[i]);
+
+exit_disable_node_qos_clk:
+	return ret;
+}
+
+static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int ret;
+	long rounded_rate;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+		ret = -ENXIO;
+		goto exit_enable_node_qos_clk;
+	}
+	bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+	for (i = 0; i < node->num_node_qos_clks; i++) {
+		if (!node->node_qos_clks[i].enable_only_clk) {
+			rounded_rate =
+				clk_round_rate(
+					node->node_qos_clks[i].clk, 1);
+			ret = setrate_nodeclk(&node->node_qos_clks[i],
+								rounded_rate);
+			if (ret)
+				MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+					__func__, node->node_info->id);
+		}
+		ret = enable_nodeclk(&node->node_qos_clks[i],
+					node->node_info->bus_device);
+		if (ret) {
+			MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+				__func__, ret);
+			msm_bus_disable_node_qos_clk(node);
+			goto exit_enable_node_qos_clk;
+		}
+
+	}
+
+	for (i = 0; i < bus_node->num_node_qos_clks; i++) {
+		if (!bus_node->node_qos_clks[i].enable_only_clk) {
+			rounded_rate =
+				clk_round_rate(
+					bus_node->node_qos_clks[i].clk, 1);
+			ret = setrate_nodeclk(&bus_node->node_qos_clks[i],
+								rounded_rate);
+			if (ret)
+				MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+					__func__, node->node_info->id);
+		}
+		ret = enable_nodeclk(&bus_node->node_qos_clks[i],
+					node->node_info->bus_device);
+		if (ret) {
+			MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+				__func__, ret);
+			msm_bus_disable_node_qos_clk(node);
+			goto exit_enable_node_qos_clk;
+		}
+
+	}
+exit_enable_node_qos_clk:
+	return ret;
+}
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev,
+				int enable, uint64_t lim_bw)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node_dev;
+
+	if (!node_dev) {
+		MSM_BUS_ERR("No device specified");
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	if (!node_dev->ap_owned) {
+		MSM_BUS_ERR("Device is not AP owned %d",
+						node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	bus_node_dev = to_msm_bus_node(node_dev->node_info->bus_device);
+	if (!bus_node_dev) {
+		MSM_BUS_ERR("Unable to get bus device infofor %d",
+			node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+	if (bus_node_dev->fabdev &&
+		bus_node_dev->fabdev->noc_ops.limit_mport) {
+		if (ret < 0) {
+			MSM_BUS_ERR("Can't Enable QoS clk %d",
+				node_dev->node_info->id);
+			goto exit_enable_limiter;
+		}
+		bus_node_dev->fabdev->noc_ops.limit_mport(
+				node_dev,
+				bus_node_dev->fabdev->qos_base,
+				bus_node_dev->fabdev->base_offset,
+				bus_node_dev->fabdev->qos_off,
+				bus_node_dev->fabdev->qos_freq,
+				enable, lim_bw);
+	}
+
+exit_enable_limiter:
+	return ret;
+}
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev = NULL;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get node device info", __func__);
+		ret = -ENXIO;
+		goto exit_init_qos;
+	}
+
+	MSM_BUS_DBG("Device = %d", node_dev->node_info->id);
+
+	if (node_dev->ap_owned) {
+		struct msm_bus_node_device_type *bus_node_info;
+
+		bus_node_info =
+			to_msm_bus_node(node_dev->node_info->bus_device);
+
+		if (!bus_node_info) {
+			MSM_BUS_ERR("%s: Unable to get bus device info for %d",
+				__func__,
+				node_dev->node_info->id);
+			ret = -ENXIO;
+			goto exit_init_qos;
+		}
+
+		if (bus_node_info->fabdev &&
+			bus_node_info->fabdev->noc_ops.qos_init) {
+			int ret = 0;
+
+			if (node_dev->ap_owned &&
+				(node_dev->node_info->qos_params.mode) != -1) {
+
+				if (bus_node_info->fabdev->bypass_qos_prg)
+					goto exit_init_qos;
+
+				ret = msm_bus_enable_node_qos_clk(node_dev);
+				if (ret < 0) {
+					MSM_BUS_DBG("Can't Enable QoS clk %d\n",
+					node_dev->node_info->id);
+					node_dev->node_info->defer_qos = true;
+					goto exit_init_qos;
+				}
+
+				bus_node_info->fabdev->noc_ops.qos_init(
+					node_dev,
+					bus_node_info->fabdev->qos_base,
+					bus_node_info->fabdev->base_offset,
+					bus_node_info->fabdev->qos_off,
+					bus_node_info->fabdev->qos_freq);
+				ret = msm_bus_disable_node_qos_clk(node_dev);
+				node_dev->node_info->defer_qos = false;
+			}
+		} else
+			MSM_BUS_ERR("%s: Skipping QOS init for %d",
+				__func__, node_dev->node_info->id);
+	}
+exit_init_qos:
+	return ret;
+}
+
+static int msm_bus_fabric_init(struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_fab_device_type *fabdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	int ret = 0;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get bus device info", __func__);
+		ret = -ENXIO;
+		goto exit_fabric_init;
+	}
+
+	if (node_dev->node_info->virt_dev) {
+		MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__,
+						node_dev->node_info->id);
+		goto exit_fabric_init;
+	}
+
+	fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
+								GFP_KERNEL);
+	if (!fabdev) {
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+	node_dev->fabdev = fabdev;
+	fabdev->pqos_base = pdata->fabdev->pqos_base;
+	fabdev->qos_range = pdata->fabdev->qos_range;
+	fabdev->base_offset = pdata->fabdev->base_offset;
+	fabdev->qos_off = pdata->fabdev->qos_off;
+	fabdev->qos_freq = pdata->fabdev->qos_freq;
+	fabdev->bus_type = pdata->fabdev->bus_type;
+	fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg;
+	msm_bus_fab_init_noc_ops(node_dev);
+
+	fabdev->qos_base = devm_ioremap(dev,
+				fabdev->pqos_base, fabdev->qos_range);
+	if (!fabdev->qos_base) {
+		MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d",
+			__func__,
+			 (size_t)fabdev->pqos_base, node_dev->node_info->id);
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+exit_fabric_init:
+	return ret;
+}
+
+static int msm_bus_init_clk(struct device *bus_dev,
+				struct msm_bus_node_device_type *pdata)
+{
+	unsigned int ctx;
+	struct msm_bus_node_device_type *node_dev = to_msm_bus_node(bus_dev);
+	int i;
+
+	for (ctx = 0; ctx < NUM_CTX; ctx++) {
+		if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) {
+			node_dev->clk[ctx].clk = pdata->clk[ctx].clk;
+			node_dev->clk[ctx].enable_only_clk =
+					pdata->clk[ctx].enable_only_clk;
+			node_dev->clk[ctx].setrate_only_clk =
+					pdata->clk[ctx].setrate_only_clk;
+			node_dev->clk[ctx].enable = false;
+			node_dev->clk[ctx].dirty = false;
+			strlcpy(node_dev->clk[ctx].reg_name,
+				pdata->clk[ctx].reg_name, MAX_REG_NAME);
+			node_dev->clk[ctx].reg = NULL;
+			bus_get_reg(&node_dev->clk[ctx], bus_dev);
+			MSM_BUS_DBG("%s: Valid node clk node %d ctx %d\n",
+				__func__, node_dev->node_info->id, ctx);
+		}
+	}
+
+	if (!IS_ERR_OR_NULL(pdata->bus_qos_clk.clk)) {
+		node_dev->bus_qos_clk.clk = pdata->bus_qos_clk.clk;
+		node_dev->bus_qos_clk.enable_only_clk =
+					pdata->bus_qos_clk.enable_only_clk;
+		node_dev->bus_qos_clk.setrate_only_clk =
+					pdata->bus_qos_clk.setrate_only_clk;
+		node_dev->bus_qos_clk.enable = false;
+		strlcpy(node_dev->bus_qos_clk.reg_name,
+			pdata->bus_qos_clk.reg_name, MAX_REG_NAME);
+		node_dev->bus_qos_clk.reg = NULL;
+		MSM_BUS_DBG("%s: Valid bus qos clk node %d\n", __func__,
+						node_dev->node_info->id);
+	}
+
+	if (pdata->num_node_qos_clks) {
+		node_dev->num_node_qos_clks = pdata->num_node_qos_clks;
+		node_dev->node_qos_clks = devm_kzalloc(bus_dev,
+			(node_dev->num_node_qos_clks * sizeof(struct nodeclk)),
+			GFP_KERNEL);
+		if (!node_dev->node_qos_clks) {
+			dev_err(bus_dev, "Failed to alloc memory for qos clk");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < pdata->num_node_qos_clks; i++) {
+			node_dev->node_qos_clks[i].clk =
+					pdata->node_qos_clks[i].clk;
+			node_dev->node_qos_clks[i].enable_only_clk =
+					pdata->node_qos_clks[i].enable_only_clk;
+			node_dev->node_qos_clks[i].setrate_only_clk =
+				pdata->node_qos_clks[i].setrate_only_clk;
+			node_dev->node_qos_clks[i].enable = false;
+			strlcpy(node_dev->node_qos_clks[i].reg_name,
+				pdata->node_qos_clks[i].reg_name, MAX_REG_NAME);
+			node_dev->node_qos_clks[i].reg = NULL;
+			MSM_BUS_DBG("%s: Valid qos clk[%d] node %d %d Reg%s\n",
+					__func__, i,
+					node_dev->node_info->id,
+					node_dev->num_node_qos_clks,
+					node_dev->node_qos_clks[i].reg_name);
+		}
+	}
+
+	return 0;
+}
+
+static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
+				struct device *bus_dev)
+{
+	int ret = 0;
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_info_type *pdata_node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+
+	if (!bus_node || !pdata) {
+		ret = -ENXIO;
+		MSM_BUS_ERR("%s: NULL pointers for pdata or bus_node",
+			__func__);
+		goto exit_copy_node_info;
+	}
+
+	node_info = bus_node->node_info;
+	pdata_node_info = pdata->node_info;
+
+	node_info->name = pdata_node_info->name;
+	node_info->id =  pdata_node_info->id;
+	node_info->bus_device_id = pdata_node_info->bus_device_id;
+	node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
+	node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
+	node_info->num_connections = pdata_node_info->num_connections;
+	node_info->num_blist = pdata_node_info->num_blist;
+	node_info->num_qports = pdata_node_info->num_qports;
+	node_info->virt_dev = pdata_node_info->virt_dev;
+	node_info->is_fab_dev = pdata_node_info->is_fab_dev;
+	node_info->qos_params.mode = pdata_node_info->qos_params.mode;
+	node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
+	node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
+	node_info->qos_params.reg_prio1 = pdata_node_info->qos_params.reg_prio1;
+	node_info->qos_params.reg_prio0 = pdata_node_info->qos_params.reg_prio0;
+	node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl;
+	node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd;
+	node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr;
+	node_info->qos_params.gp = pdata_node_info->qos_params.gp;
+	node_info->qos_params.thmp = pdata_node_info->qos_params.thmp;
+	node_info->qos_params.ws = pdata_node_info->qos_params.ws;
+	node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer;
+	node_info->agg_params.buswidth = pdata_node_info->agg_params.buswidth;
+	node_info->agg_params.agg_scheme =
+					pdata_node_info->agg_params.agg_scheme;
+	node_info->agg_params.vrail_comp =
+					pdata_node_info->agg_params.vrail_comp;
+	node_info->agg_params.num_aggports =
+				pdata_node_info->agg_params.num_aggports;
+	node_info->agg_params.num_util_levels =
+				pdata_node_info->agg_params.num_util_levels;
+	node_info->agg_params.util_levels = devm_kzalloc(bus_dev,
+			sizeof(struct node_util_levels_type) *
+			node_info->agg_params.num_util_levels,
+			GFP_KERNEL);
+	if (!node_info->agg_params.util_levels) {
+		MSM_BUS_ERR("%s: Agg util level alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+	memcpy(node_info->agg_params.util_levels,
+		pdata_node_info->agg_params.util_levels,
+		sizeof(struct node_util_levels_type) *
+			pdata_node_info->agg_params.num_util_levels);
+
+	node_info->dev_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->dev_connections) {
+		MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->connections = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->connections) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->connections,
+		pdata_node_info->connections,
+		sizeof(int) * pdata_node_info->num_connections);
+
+	node_info->black_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_blist,
+			GFP_KERNEL);
+	if (!node_info->black_connections) {
+		MSM_BUS_ERR("%s: Bus black connections alloc failed\n",
+			__func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->black_listed_connections = devm_kzalloc(bus_dev,
+			pdata_node_info->num_blist * sizeof(int),
+			GFP_KERNEL);
+	if (!node_info->black_listed_connections) {
+		MSM_BUS_ERR("%s:Bus black list connections alloc failed\n",
+					__func__);
+		devm_kfree(bus_dev, node_info->black_connections);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->black_listed_connections,
+		pdata_node_info->black_listed_connections,
+		sizeof(int) * pdata_node_info->num_blist);
+
+	node_info->qport = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_qports,
+			GFP_KERNEL);
+	if (!node_info->qport) {
+		MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		devm_kfree(bus_dev, node_info->black_listed_connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->qport,
+		pdata_node_info->qport,
+		sizeof(int) * pdata_node_info->num_qports);
+
+exit_copy_node_info:
+	return ret;
+}
+
+static struct device *msm_bus_device_init(
+			struct msm_bus_node_device_type *pdata)
+{
+	struct device *bus_dev = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_node_info_type *node_info = NULL;
+	int ret = 0;
+
+	/**
+	* Init here so we can use devm calls
+	*/
+
+	bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+	if (!bus_node) {
+		kfree(bus_dev);
+		bus_dev = NULL;
+		goto exit_device_init;
+	}
+	bus_dev = &bus_node->dev;
+	device_initialize(bus_dev);
+
+	node_info = devm_kzalloc(bus_dev,
+			sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+	if (!node_info) {
+		devm_kfree(bus_dev, bus_node);
+		kfree(bus_dev);
+		bus_dev = NULL;
+		goto exit_device_init;
+	}
+
+	bus_node->node_info = node_info;
+	bus_node->ap_owned = pdata->ap_owned;
+	bus_dev->of_node = pdata->of_node;
+
+	if (msm_bus_copy_node_info(pdata, bus_dev) < 0) {
+		devm_kfree(bus_dev, bus_node);
+		devm_kfree(bus_dev, node_info);
+		kfree(bus_dev);
+		bus_dev = NULL;
+		goto exit_device_init;
+	}
+
+	bus_dev->bus = &msm_bus_type;
+	dev_set_name(bus_dev, bus_node->node_info->name);
+
+	ret = device_add(bus_dev);
+	if (ret < 0) {
+		MSM_BUS_ERR("%s: Error registering device %d",
+				__func__, pdata->node_info->id);
+		devm_kfree(bus_dev, bus_node);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		devm_kfree(bus_dev, node_info->black_connections);
+		devm_kfree(bus_dev, node_info->black_listed_connections);
+		devm_kfree(bus_dev, node_info);
+		kfree(bus_dev);
+		bus_dev = NULL;
+		goto exit_device_init;
+	}
+	device_create_file(bus_dev, &dev_attr_bw);
+	INIT_LIST_HEAD(&bus_node->devlist);
+
+exit_device_init:
+	return bus_dev;
+}
+
+static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int ret = 0;
+	int j;
+	struct msm_bus_node_device_type *fab;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_setup_dev_conn;
+	}
+
+	/* Setup parent bus device for this node */
+	if (!bus_node->node_info->is_fab_dev) {
+		struct device *bus_parent_device =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->bus_device_id,
+				msm_bus_device_match_adhoc);
+
+		if (!bus_parent_device) {
+			MSM_BUS_ERR("%s: Error finding parentdev %d parent %d",
+				__func__,
+				bus_node->node_info->id,
+				bus_node->node_info->bus_device_id);
+			ret = -ENXIO;
+			goto exit_setup_dev_conn;
+		}
+		bus_node->node_info->bus_device = bus_parent_device;
+		fab = to_msm_bus_node(bus_parent_device);
+		list_add_tail(&bus_node->dev_link, &fab->devlist);
+	}
+
+	bus_node->node_info->is_traversed = false;
+
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		bus_node->node_info->dev_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->connections[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->dev_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->connections[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+	for (j = 0; j < bus_node->node_info->num_blist; j++) {
+		bus_node->node_info->black_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->
+				black_listed_connections[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->black_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d\n",
+				__func__, bus_node->node_info->
+				black_listed_connections[j],
+				bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+exit_setup_dev_conn:
+	return ret;
+}
+
+static int msm_bus_node_debug(struct device *bus_dev, void *data)
+{
+	int j;
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_node_debug;
+	}
+
+	MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id,
+				bus_node->node_info->agg_params.buswidth);
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		struct msm_bus_node_device_type *bdev =
+		to_msm_bus_node(bus_node->node_info->dev_connections[j]);
+		MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id);
+	}
+
+	if (bus_node->node_info->is_fab_dev)
+		msm_bus_floor_init(bus_dev);
+
+exit_node_debug:
+	return ret;
+}
+
+static int msm_bus_free_dev(struct device *dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(dev);
+
+	if (bus_node)
+		MSM_BUS_ERR("\n%s: Removing device %d", __func__,
+						bus_node->node_info->id);
+	device_unregister(dev);
+	kfree(bus_node);
+	return 0;
+}
+
+int msm_bus_device_remove(struct platform_device *pdev)
+{
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev);
+	return 0;
+}
+
+static int msm_bus_device_probe(struct platform_device *pdev)
+{
+	unsigned int i, ret;
+	struct msm_bus_device_node_registration *pdata;
+
+	/* If possible, get pdata from device-tree */
+	if (pdev->dev.of_node)
+		pdata = msm_bus_of_to_pdata(pdev);
+	else {
+		pdata = (struct msm_bus_device_node_registration *)pdev->
+			dev.platform_data;
+	}
+
+	if (IS_ERR_OR_NULL(pdata)) {
+		MSM_BUS_ERR("No platform data found");
+		ret = -ENODATA;
+		goto exit_device_probe;
+	}
+
+	for (i = 0; i < pdata->num_devices; i++) {
+		struct device *node_dev = NULL;
+
+		node_dev = msm_bus_device_init(&pdata->info[i]);
+
+		if (!node_dev) {
+			MSM_BUS_ERR("%s: Error during dev init for %d",
+				__func__, pdata->info[i].node_info->id);
+			ret = -ENXIO;
+			goto exit_device_probe;
+		}
+
+		ret = msm_bus_init_clk(node_dev, &pdata->info[i]);
+		if (ret) {
+			MSM_BUS_ERR("\n Failed to init bus clk. ret %d", ret);
+			msm_bus_device_remove(pdev);
+			goto exit_device_probe;
+		}
+		/*Is this a fabric device ?*/
+		if (pdata->info[i].node_info->is_fab_dev) {
+			MSM_BUS_DBG("%s: %d is a fab", __func__,
+						pdata->info[i].node_info->id);
+			ret = msm_bus_fabric_init(node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing fab %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+		}
+	}
+
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						msm_bus_setup_dev_conn);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error setting up dev connections", __func__);
+		goto exit_device_probe;
+	}
+
+	/*
+	 * Setup the QoS for the nodes, don't check the error codes as we
+	 * defer QoS programming to the first transaction in cases of failure
+	 * and we want to continue the probe.
+	 */
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos);
+
+	/* Register the arb layer ops */
+	msm_bus_arb_setops_adhoc(&arb_ops);
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
+
+	devm_kfree(&pdev->dev, pdata->info);
+	devm_kfree(&pdev->dev, pdata);
+exit_device_probe:
+	return ret;
+}
+
+static int msm_bus_device_rules_probe(struct platform_device *pdev)
+{
+	struct bus_rule_type *rule_data = NULL;
+	int num_rules = 0;
+
+	num_rules = msm_bus_of_get_static_rules(pdev, &rule_data);
+
+	if (!rule_data)
+		goto exit_rules_probe;
+
+	msm_rule_register(num_rules, rule_data, NULL);
+	static_rules.num_rules = num_rules;
+	static_rules.rules = rule_data;
+	pdev->dev.platform_data = &static_rules;
+
+exit_rules_probe:
+	return 0;
+}
+
+int msm_bus_device_rules_remove(struct platform_device *pdev)
+{
+	struct static_rules_type *static_rules = NULL;
+
+	static_rules = pdev->dev.platform_data;
+	if (static_rules)
+		msm_rule_unregister(static_rules->num_rules,
+					static_rules->rules, NULL);
+	return 0;
+}
+
+
+static const struct of_device_id rules_match[] = {
+	{.compatible = "qcom,msm-bus-static-bw-rules"},
+	{}
+};
+
+static struct platform_driver msm_bus_rules_driver = {
+	.probe = msm_bus_device_rules_probe,
+	.remove = msm_bus_device_rules_remove,
+	.driver = {
+		.name = "msm_bus_rules_device",
+		.owner = THIS_MODULE,
+		.of_match_table = rules_match,
+	},
+};
+
+static const struct of_device_id fabric_match[] = {
+	{.compatible = "qcom,msm-bus-device"},
+	{}
+};
+
+static struct platform_driver msm_bus_device_driver = {
+	.probe = msm_bus_device_probe,
+	.remove = msm_bus_device_remove,
+	.driver = {
+		.name = "msm_bus_device",
+		.owner = THIS_MODULE,
+		.of_match_table = fabric_match,
+	},
+};
+
+int __init msm_bus_device_init_driver(void)
+{
+	int rc;
+
+	MSM_BUS_ERR("msm_bus_fabric_init_driver\n");
+	rc =  platform_driver_register(&msm_bus_device_driver);
+
+	if (rc) {
+		MSM_BUS_ERR("Failed to register bus device driver");
+		return rc;
+	}
+	return platform_driver_register(&msm_bus_rules_driver);
+}
+subsys_initcall(msm_bus_device_init_driver);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
new file mode 100644
index 0000000..ee957b6
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_fabric_rpmh.c
@@ -0,0 +1,1319 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+//#include <soc/qcom/rpm-smd.h>
+#include <trace/events/trace_msm_bus.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+#define BCM_TCS_CMD_COMMIT_SHFT		30
+#define BCM_TCS_CMD_VALID_SHFT		29
+#define BCM_TCS_CMD_VOTE_X_SHFT		14
+#define BCM_TCS_CMD_VOTE_Y_SHFT		0
+#define BCM_TCS_CMD_VOTE_MASK		0x3FFF
+#define BCM_TCS_CMD_VOTE_X_SHFT		14
+
+#define BCM_VCD_MAX_CNT			10
+
+#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \
+	(((commit & 0x1) << BCM_TCS_CMD_COMMIT_SHFT) |\
+	((valid & 0x1) << BCM_TCS_CMD_VALID_SHFT) |\
+	((vote_x & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) |\
+	((vote_y & BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT))
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data);
+
+struct list_head bcm_clist_inorder[BCM_VCD_MAX_CNT];
+
+struct tcs_cmd {
+	u32 addr;	/* slv_id:18:16 | offset:0:15 */
+	u32 data;	/* data for resource (or read response) */
+	bool complete;	/* wait for completion before sending next */
+};
+
+ssize_t bw_show(struct device *dev, struct device_attribute *attr,
+			  char *buf)
+{
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int off = 0;
+
+	bus_node = to_msm_bus_node(dev);
+	if (!bus_node)
+		return -EINVAL;
+
+	node_info = bus_node->node_info;
+
+	for (i = 0; i < bus_node->num_lnodes; i++) {
+		if (!bus_node->lnode_list[i].in_use)
+			continue;
+		off += scnprintf((buf + off), PAGE_SIZE,
+		"[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+			i, bus_node->lnode_list[i].cl_name,
+			bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+			bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+		trace_printk(
+		"[%d]:%s:Act_IB %llu Act_AB %llu Slp_IB %llu Slp_AB %llu\n",
+			i, bus_node->lnode_list[i].cl_name,
+			bus_node->lnode_list[i].lnode_ib[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ab[ACTIVE_CTX],
+			bus_node->lnode_list[i].lnode_ib[DUAL_CTX],
+			bus_node->lnode_list[i].lnode_ab[DUAL_CTX]);
+	}
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+		bus_node->node_bw[ACTIVE_CTX].max_ib,
+		bus_node->node_bw[ACTIVE_CTX].sum_ab,
+		bus_node->node_bw[ACTIVE_CTX].util_used,
+		bus_node->node_bw[ACTIVE_CTX].vrail_used);
+	off += scnprintf((buf + off), PAGE_SIZE,
+	"Max_Slp_IB %llu Sum_Slp_AB %llu Slp_Util_fact %d Slp_Vrail_comp %d\n",
+		bus_node->node_bw[DUAL_CTX].max_ib,
+		bus_node->node_bw[DUAL_CTX].sum_ab,
+		bus_node->node_bw[DUAL_CTX].util_used,
+		bus_node->node_bw[DUAL_CTX].vrail_used);
+	trace_printk(
+	"Max_Act_IB %llu Sum_Act_AB %llu Act_Util_fact %d Act_Vrail_comp %d\n",
+		bus_node->node_bw[ACTIVE_CTX].max_ib,
+		bus_node->node_bw[ACTIVE_CTX].sum_ab,
+		bus_node->node_bw[ACTIVE_CTX].util_used,
+		bus_node->node_bw[ACTIVE_CTX].vrail_used);
+	trace_printk(
+	"Max_Slp_IB %llu Sum_Slp_AB %lluSlp_Util_fact %d Slp_Vrail_comp %d\n",
+		bus_node->node_bw[DUAL_CTX].max_ib,
+		bus_node->node_bw[DUAL_CTX].sum_ab,
+		bus_node->node_bw[DUAL_CTX].util_used,
+		bus_node->node_bw[DUAL_CTX].vrail_used);
+	return off;
+}
+
+ssize_t bw_store(struct device *dev, struct device_attribute *attr,
+			   const char *buf, size_t count)
+{
+	return count;
+}
+
+DEVICE_ATTR(bw, 0600, bw_show, bw_store);
+
+struct static_rules_type {
+	int num_rules;
+	struct bus_rule_type *rules;
+};
+
+static struct static_rules_type static_rules;
+
+static int bus_get_reg(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev;
+
+	if (!(dev && nclk))
+		return -ENXIO;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!strlen(nclk->reg_name)) {
+		dev_dbg(dev, "No regulator exist for node %d\n",
+						node_dev->node_info->id);
+		goto exit_of_get_reg;
+	} else {
+		if (!(IS_ERR_OR_NULL(nclk->reg)))
+			goto exit_of_get_reg;
+
+		nclk->reg = devm_regulator_get(dev, nclk->reg_name);
+		if (IS_ERR_OR_NULL(nclk->reg)) {
+			ret =
+			(IS_ERR(nclk->reg) ? PTR_ERR(nclk->reg) : -ENXIO);
+			dev_err(dev, "Error: Failed to get regulator %s:%d\n",
+							nclk->reg_name, ret);
+		} else {
+			dev_dbg(dev, "Successfully got regulator for %d\n",
+				node_dev->node_info->id);
+		}
+	}
+
+exit_of_get_reg:
+	return ret;
+}
+
+static int bus_enable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_enable_reg;
+	}
+
+	ret = regulator_enable(nclk->reg);
+	if (ret) {
+		MSM_BUS_ERR("Failed to enable regulator for %s\n",
+							nclk->reg_name);
+		goto exit_bus_enable_reg;
+	}
+	pr_debug("%s: Enabled Reg\n", __func__);
+exit_bus_enable_reg:
+	return ret;
+}
+
+static int bus_disable_reg(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (!nclk) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	if ((IS_ERR_OR_NULL(nclk->reg))) {
+		ret = -ENXIO;
+		goto exit_bus_disable_reg;
+	}
+
+	regulator_disable(nclk->reg);
+	pr_debug("%s: Disabled Reg\n", __func__);
+exit_bus_disable_reg:
+	return ret;
+}
+
+static int enable_nodeclk(struct nodeclk *nclk, struct device *dev)
+{
+	int ret = 0;
+
+	if (!nclk->enable && !nclk->setrate_only_clk) {
+		if (dev && strlen(nclk->reg_name)) {
+			if (IS_ERR_OR_NULL(nclk->reg)) {
+				ret = bus_get_reg(nclk, dev);
+				if (ret) {
+					dev_dbg(dev,
+						"Failed to get reg.Err %d\n",
+									ret);
+					goto exit_enable_nodeclk;
+				}
+			}
+
+			ret = bus_enable_reg(nclk);
+			if (ret) {
+				dev_dbg(dev, "Failed to enable reg. Err %d\n",
+									ret);
+				goto exit_enable_nodeclk;
+			}
+		}
+		ret = clk_prepare_enable(nclk->clk);
+
+		if (ret) {
+			MSM_BUS_ERR("%s: failed to enable clk ", __func__);
+			nclk->enable = false;
+		} else
+			nclk->enable = true;
+	}
+exit_enable_nodeclk:
+	return ret;
+}
+
+static int disable_nodeclk(struct nodeclk *nclk)
+{
+	int ret = 0;
+
+	if (nclk->enable && !nclk->setrate_only_clk) {
+		clk_disable_unprepare(nclk->clk);
+		nclk->enable = false;
+		bus_disable_reg(nclk);
+	}
+	return ret;
+}
+
+static int setrate_nodeclk(struct nodeclk *nclk, long rate)
+{
+	int ret = 0;
+
+	if (!nclk->enable_only_clk)
+		ret = clk_set_rate(nclk->clk, rate);
+
+	if (ret)
+		MSM_BUS_ERR("%s: failed to setrate clk", __func__);
+	return ret;
+}
+
+static int tcs_cmd_gen(struct msm_bus_node_device_type *cur_bcm,
+				struct tcs_cmd *cmd, uint64_t ib,
+					uint64_t ab, bool commit)
+{
+	int ret = 0;
+	uint32_t tcs_cmd_addr = 0;
+	uint32_t tcs_cmd_data = 0;
+	bool valid = true;
+
+	if (ib == 0 && ab == 0) {
+		valid = false;
+	} else {
+		do_div(ib, cur_bcm->bcmdev->unit_size);
+		do_div(ab, cur_bcm->bcmdev->unit_size);
+	}
+
+	if (ib > BCM_TCS_CMD_VOTE_MASK)
+		ib = BCM_TCS_CMD_VOTE_MASK;
+
+	if (ab > BCM_TCS_CMD_VOTE_MASK)
+		ab = BCM_TCS_CMD_VOTE_MASK;
+
+	tcs_cmd_addr = cur_bcm->bcmdev->addr;
+	tcs_cmd_data = BCM_TCS_CMD(commit, valid, ib, ab);
+
+	return ret;
+}
+
+static int tcs_cmd_list_gen(uint32_t vcd_cnt, uint32_t *n,
+				struct tcs_cmd *active_cmdlist,
+				struct tcs_cmd *awake_cmdlist,
+				struct tcs_cmd *sleep_cmdlist)
+{
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	struct msm_bus_node_device_type *next_bcm = NULL;
+	uint32_t bcm_cnt_amc = 0;
+	uint32_t bcm_cnt_tcs = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	bool commit;
+
+	if ((cur_bcm->node_bw[DUAL_CTX].max_ab ==
+				cur_bcm->node_bw[ACTIVE_CTX].max_ab) &&
+				(cur_bcm->node_bw[DUAL_CTX].max_ib ==
+				cur_bcm->node_bw[ACTIVE_CTX].max_ib))
+
+	for (i = 0; i < VCD_MAX_CNT; i++) {
+		if (list_empty(bcm_clist_inorder[i]))
+			continue;
+		list_for_each_entry(cur_bcm, &bcm_clist_inorder[i], link)
+			bcm_cnt_amc++;
+		if ((cur_bcm->node_bw[DUAL_CTX].max_ab !=
+				cur_bcm->node_bw[ACTIVE_CTX].max_ab) &&
+				(cur_bcm->node_bw[DUAL_CTX].max_ib !=
+				cur_bcm->node_bw[ACTIVE_CTX].max_ib))
+			bcm_cnt_tcs++;
+		vcd_cnt++;
+	}
+
+	n = kcalloc(vcd_cnt, sizeof(int), GFP_KERNEL);
+	active_cmdlist = kcalloc(bcm_cnt_amc, sizeof(struct tcs_cmd),
+								GFP_KERNEL);
+	awake_cmdlist = kcalloc(bcm_cnt_tcs, sizeof(struct tcs_cmd),
+								GFP_KERNEL);
+	sleep_cmdlist = kcalloc(bcm_cnt_tcs, sizeof(struct tcs_cmd),
+								GFP_KERNEL);
+
+	for (i = 0; i < VCD_MAX_CNT; i++) {
+		if (list_empty(bcm_clist_inorder[i]))
+			continue;
+		list_for_each_entry(cur_bcm, &bcm_clist_inorder[i], link) {
+			n[i]++;
+			list_is_last(&cur_bcm, &bcm_clist_inorder[i]) {
+				i++;
+				commit = true;
+			}
+			tcs_cmd_gen(cur_bcm, &active_cmdlist[j],
+				cur_bcm->node_bw[ACTIVE_CTX].max_ab,
+				cur_bcm->node_bw[ACTIVE_CTX].max_ib, commit);
+			j++;
+			if ((cur_bcm->node_bw[DUAL_CTX].max_ab ==
+				cur_bcm->node_bw[ACTIVE_CTX].max_ab) &&
+				(cur_bcm->node_bw[DUAL_CTX].max_ib ==
+				cur_bcm->node_bw[ACTIVE_CTX].max_ib))
+				continue;
+
+			tcs_cmd_gen(cur_bcm, &awake_cmdlist[k],
+				cur_bcm->node_bw[DUAL_CTX].max_ab,
+				cur_bcm->node_bw[DUAL_CTX].max_ib, commit);
+
+			tcs_cmd_gen(cur_bcm, &sleep_cmdlist[k],
+				cur_bcm->node_bw[DUAL_CTX].max_ab,
+				cur_bcm->node_bw[DUAL_CTX].max_ib, commit);
+			k++;
+		}
+	}
+	return bcm_cnt;
+}
+
+static int bcm_clist_add(struct msm_bus_node_device_type *cur_dev)
+{
+	int ret = 0;
+	int cur_vcd = 0;
+	struct msm_bus_node_device_type *cur_bcm = NULL;
+	struct msm_bus_node_device_type *tmp_bcm = NULL;
+
+	if (!cur_dev->node_info->num_bcm_devs)
+		goto exit_bcm_clist_add;
+
+	cur_bcm = to_msm_bus_node(cur_dev->node_info->bcm_devs[0]);
+	cur_vcd = cur_bcm->bcmdev->clk_domain;
+
+	list_add_tail(&bcm_clist_inorder[cur_vcd]);
+	cur_bcm->dirty = true;
+
+exit_bcm_clist_add:
+	return ret;
+}
+
+int msm_bus_commit_data(struct list_head *clist)
+{
+	int ret = 0;
+	int bcm_cnt;
+	struct msm_bus_node_device_type *node;
+	struct msm_bus_node_device_type *node_tmp;
+	struct tcs_cmd *active_cmdlist = NULL;
+	struct tcs_cmd *sleep_cmdlist = NULL;
+	uint32_t *n = NULL;
+
+	list_for_each_entry_safe(node, node_tmp, clist, link) {
+		if (unlikely(node->node_info->defer_qos))
+			msm_bus_dev_init_qos(&node->dev, NULL);
+
+		bcm_clist_add(node);
+
+		node->dirty = false;
+		list_del_init(&node->link);
+	}
+	bcm_cnt = tcs_cmd_list_gen(vcd_cnt, n, active_cmdlist, sleep_cmdlist);
+	// Insert calls to rpmh driver;
+	kfree(active_cmdlist);
+	kfree(sleep_cmdlist);
+	kfree(n);
+	return ret;
+}
+
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags)
+{
+	void *ret;
+	size_t copy_size = old_size;
+
+	if (!new_size) {
+		devm_kfree(dev, p);
+		return ZERO_SIZE_PTR;
+	}
+
+	if (new_size < old_size)
+		copy_size = new_size;
+
+	ret = devm_kzalloc(dev, new_size, flags);
+	if (!ret) {
+		MSM_BUS_ERR("%s: Error Reallocating memory", __func__);
+		goto exit_realloc_devmem;
+	}
+
+	memcpy(ret, p, copy_size);
+	devm_kfree(dev, p);
+exit_realloc_devmem:
+	return ret;
+}
+
+static void msm_bus_fab_init_noc_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	switch (bus_dev->fabdev->bus_type) {
+	case MSM_BUS_NOC:
+		msm_bus_noc_set_ops(bus_dev);
+		break;
+	case MSM_BUS_BIMC:
+		msm_bus_bimc_set_ops(bus_dev);
+		break;
+	default:
+		MSM_BUS_ERR("%s: Invalid Bus type", __func__);
+	}
+}
+
+static int msm_bus_disable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int ret = 0;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+		ret = -ENXIO;
+		goto exit_disable_node_qos_clk;
+	}
+	bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+	for (i = 0; i < bus_node->num_node_qos_clks; i++)
+		ret = disable_nodeclk(&bus_node->node_qos_clks[i]);
+
+exit_disable_node_qos_clk:
+	return ret;
+}
+
+static int msm_bus_enable_node_qos_clk(struct msm_bus_node_device_type *node)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	int i;
+	int ret;
+	long rounded_rate;
+
+	if (!node || (!to_msm_bus_node(node->node_info->bus_device))) {
+		ret = -ENXIO;
+		goto exit_enable_node_qos_clk;
+	}
+	bus_node = to_msm_bus_node(node->node_info->bus_device);
+
+	for (i = 0; i < bus_node->num_node_qos_clks; i++) {
+		if (!bus_node->node_qos_clks[i].enable_only_clk) {
+			rounded_rate =
+				clk_round_rate(
+					bus_node->node_qos_clks[i].clk, 1);
+			ret = setrate_nodeclk(&bus_node->node_qos_clks[i],
+								rounded_rate);
+			if (ret)
+				MSM_BUS_DBG("%s: Failed set rate clk,node %d\n",
+					__func__, node->node_info->id);
+		}
+		ret = enable_nodeclk(&bus_node->node_qos_clks[i],
+					node->node_info->bus_device);
+		if (ret) {
+			MSM_BUS_DBG("%s: Failed to set Qos Clks ret %d\n",
+				__func__, ret);
+			msm_bus_disable_node_qos_clk(node);
+			goto exit_enable_node_qos_clk;
+		}
+
+	}
+exit_enable_node_qos_clk:
+	return ret;
+}
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *node_dev,
+				int enable, uint64_t lim_bw)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node_dev;
+
+	if (!node_dev) {
+		MSM_BUS_ERR("No device specified");
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	if (!node_dev->ap_owned) {
+		MSM_BUS_ERR("Device is not AP owned %d",
+						node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+
+	bus_node_dev = to_msm_bus_node(node_dev->node_info->bus_device);
+	if (!bus_node_dev) {
+		MSM_BUS_ERR("Unable to get bus device infofor %d",
+			node_dev->node_info->id);
+		ret = -ENXIO;
+		goto exit_enable_limiter;
+	}
+	if (bus_node_dev->fabdev &&
+		bus_node_dev->fabdev->noc_ops.limit_mport) {
+		if (ret < 0) {
+			MSM_BUS_ERR("Can't Enable QoS clk %d",
+				node_dev->node_info->id);
+			goto exit_enable_limiter;
+		}
+		bus_node_dev->fabdev->noc_ops.limit_mport(
+				node_dev,
+				bus_node_dev->fabdev->qos_base,
+				bus_node_dev->fabdev->base_offset,
+				bus_node_dev->fabdev->qos_off,
+				bus_node_dev->fabdev->qos_freq,
+				enable, lim_bw);
+	}
+
+exit_enable_limiter:
+	return ret;
+}
+
+static int msm_bus_dev_init_qos(struct device *dev, void *data)
+{
+	int ret = 0;
+	struct msm_bus_node_device_type *node_dev = NULL;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get node device info", __func__);
+		ret = -ENXIO;
+		goto exit_init_qos;
+	}
+
+	MSM_BUS_DBG("Device = %d", node_dev->node_info->id);
+
+	if (node_dev->ap_owned) {
+		struct msm_bus_node_device_type *bus_node_info;
+
+		bus_node_info =
+			to_msm_bus_node(node_dev->node_info->bus_device);
+
+		if (!bus_node_info) {
+			MSM_BUS_ERR("%s: Unable to get bus device info for %d",
+				__func__,
+				node_dev->node_info->id);
+			ret = -ENXIO;
+			goto exit_init_qos;
+		}
+
+		if (bus_node_info->fabdev &&
+			bus_node_info->fabdev->noc_ops.qos_init) {
+			int ret = 0;
+
+			if (node_dev->ap_owned &&
+				(node_dev->node_info->qos_params.mode) != -1) {
+
+				if (bus_node_info->fabdev->bypass_qos_prg)
+					goto exit_init_qos;
+
+				ret = msm_bus_enable_node_qos_clk(node_dev);
+				if (ret < 0) {
+					MSM_BUS_DBG("Can't Enable QoS clk %d\n",
+					node_dev->node_info->id);
+					node_dev->node_info->defer_qos = true;
+					goto exit_init_qos;
+				}
+
+				bus_node_info->fabdev->noc_ops.qos_init(
+					node_dev,
+					bus_node_info->fabdev->qos_base,
+					bus_node_info->fabdev->base_offset,
+					bus_node_info->fabdev->qos_off,
+					bus_node_info->fabdev->qos_freq);
+				ret = msm_bus_disable_node_qos_clk(node_dev);
+				node_dev->node_info->defer_qos = false;
+			}
+		} else
+			MSM_BUS_ERR("%s: Skipping QOS init for %d",
+				__func__, node_dev->node_info->id);
+	}
+exit_init_qos:
+	return ret;
+}
+
+static int msm_bus_fabric_init(struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_fab_device_type *fabdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	int ret = 0;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		MSM_BUS_ERR("%s: Unable to get bus device info", __func__);
+		ret = -ENXIO;
+		goto exit_fabric_init;
+	}
+
+	if (node_dev->node_info->virt_dev) {
+		MSM_BUS_ERR("%s: Skip Fab init for virtual device %d", __func__,
+						node_dev->node_info->id);
+		goto exit_fabric_init;
+	}
+
+	fabdev = devm_kzalloc(dev, sizeof(struct msm_bus_fab_device_type),
+								GFP_KERNEL);
+	if (!fabdev) {
+		MSM_BUS_ERR("Fabric alloc failed\n");
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+	node_dev->fabdev = fabdev;
+	fabdev->pqos_base = pdata->fabdev->pqos_base;
+	fabdev->qos_range = pdata->fabdev->qos_range;
+	fabdev->base_offset = pdata->fabdev->base_offset;
+	fabdev->qos_off = pdata->fabdev->qos_off;
+	fabdev->qos_freq = pdata->fabdev->qos_freq;
+	fabdev->bus_type = pdata->fabdev->bus_type;
+	fabdev->bypass_qos_prg = pdata->fabdev->bypass_qos_prg;
+	msm_bus_fab_init_noc_ops(node_dev);
+
+	fabdev->qos_base = devm_ioremap(dev,
+				fabdev->pqos_base, fabdev->qos_range);
+	if (!fabdev->qos_base) {
+		MSM_BUS_ERR("%s: Error remapping address 0x%zx :bus device %d",
+			__func__,
+			 (size_t)fabdev->pqos_base, node_dev->node_info->id);
+		ret = -ENOMEM;
+		goto exit_fabric_init;
+	}
+
+exit_fabric_init:
+	return ret;
+}
+
+static int msm_bus_bcm_init(struct device *dev,
+			struct msm_bus_node_device_type *pdata)
+{
+	struct msm_bus_bcm_device_type *bcmdev;
+	struct msm_bus_node_device_type *node_dev = NULL;
+	int ret = 0;
+
+	node_dev = to_msm_bus_node(dev);
+	if (!node_dev) {
+		ret = -ENXIO;
+		goto exit_bcm_init;
+	}
+
+	bcmdev = devm_kzalloc(dev, sizeof(struct msm_bus_bcm_device_type),
+								GFP_KERNEL);
+	if (!bcmdev) {
+		ret = -ENOMEM;
+		goto exit_bcm_init;
+	}
+
+	node_dev->bcmdev = bcmdev;
+
+	//pending cmddb APIs
+	bcmdev->width = 1;
+	bcmdev->clk_domain = 0;
+	bcmdev->type = 0;
+	bcmdev->unit_size = 1000000;
+	bcmdev->num_bus_devs = 0;
+
+	// Add way to count # of VCDs, initialize LL
+	for (i = 0; i < VCD_MAX_CNT; i++)
+		INIT_LIST_HEAD(&bcm_clist_inorder[i]);
+
+exit_bcm_init:
+	return ret;
+}
+
+
+
+static int msm_bus_init_clk(struct device *bus_dev,
+				struct msm_bus_node_device_type *pdata)
+{
+	unsigned int ctx;
+	struct msm_bus_node_device_type *node_dev = to_msm_bus_node(bus_dev);
+	int i;
+
+	for (ctx = 0; ctx < NUM_CTX; ctx++) {
+		if (!IS_ERR_OR_NULL(pdata->clk[ctx].clk)) {
+			node_dev->clk[ctx].clk = pdata->clk[ctx].clk;
+			node_dev->clk[ctx].enable_only_clk =
+					pdata->clk[ctx].enable_only_clk;
+			node_dev->clk[ctx].setrate_only_clk =
+					pdata->clk[ctx].setrate_only_clk;
+			node_dev->clk[ctx].enable = false;
+			node_dev->clk[ctx].dirty = false;
+			strlcpy(node_dev->clk[ctx].reg_name,
+				pdata->clk[ctx].reg_name, MAX_REG_NAME);
+			node_dev->clk[ctx].reg = NULL;
+			bus_get_reg(&node_dev->clk[ctx], bus_dev);
+			MSM_BUS_DBG("%s: Valid node clk node %d ctx %d\n",
+				__func__, node_dev->node_info->id, ctx);
+		}
+	}
+
+	if (!IS_ERR_OR_NULL(pdata->bus_qos_clk.clk)) {
+		node_dev->bus_qos_clk.clk = pdata->bus_qos_clk.clk;
+		node_dev->bus_qos_clk.enable_only_clk =
+					pdata->bus_qos_clk.enable_only_clk;
+		node_dev->bus_qos_clk.setrate_only_clk =
+					pdata->bus_qos_clk.setrate_only_clk;
+		node_dev->bus_qos_clk.enable = false;
+		strlcpy(node_dev->bus_qos_clk.reg_name,
+			pdata->bus_qos_clk.reg_name, MAX_REG_NAME);
+		node_dev->bus_qos_clk.reg = NULL;
+		MSM_BUS_DBG("%s: Valid bus qos clk node %d\n", __func__,
+						node_dev->node_info->id);
+	}
+
+	if (pdata->num_node_qos_clks) {
+		node_dev->num_node_qos_clks = pdata->num_node_qos_clks;
+		node_dev->node_qos_clks = devm_kzalloc(bus_dev,
+			(node_dev->num_node_qos_clks * sizeof(struct nodeclk)),
+			GFP_KERNEL);
+		if (!node_dev->node_qos_clks) {
+			dev_err(bus_dev, "Failed to alloc memory for qos clk");
+			return -ENOMEM;
+		}
+
+		for (i = 0; i < pdata->num_node_qos_clks; i++) {
+			node_dev->node_qos_clks[i].clk =
+					pdata->node_qos_clks[i].clk;
+			node_dev->node_qos_clks[i].enable_only_clk =
+					pdata->node_qos_clks[i].enable_only_clk;
+			node_dev->node_qos_clks[i].setrate_only_clk =
+				pdata->node_qos_clks[i].setrate_only_clk;
+			node_dev->node_qos_clks[i].enable = false;
+			strlcpy(node_dev->node_qos_clks[i].reg_name,
+				pdata->node_qos_clks[i].reg_name, MAX_REG_NAME);
+			node_dev->node_qos_clks[i].reg = NULL;
+			MSM_BUS_DBG("%s: Valid qos clk[%d] node %d %d Reg%s\n",
+					__func__, i,
+					node_dev->node_info->id,
+					node_dev->num_node_qos_clks,
+					node_dev->node_qos_clks[i].reg_name);
+		}
+	}
+
+	return 0;
+}
+
+static int msm_bus_copy_node_info(struct msm_bus_node_device_type *pdata,
+				struct device *bus_dev)
+{
+	int ret = 0;
+	struct msm_bus_node_info_type *node_info = NULL;
+	struct msm_bus_node_info_type *pdata_node_info = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+
+	if (!bus_node || !pdata) {
+		ret = -ENXIO;
+		MSM_BUS_ERR("%s: Invalid pointers pdata %p, bus_node %p",
+			__func__, pdata, bus_node);
+		goto exit_copy_node_info;
+	}
+
+	node_info = bus_node->node_info;
+	pdata_node_info = pdata->node_info;
+
+	node_info->name = pdata_node_info->name;
+	node_info->id =  pdata_node_info->id;
+	node_info->bus_device_id = pdata_node_info->bus_device_id;
+	node_info->mas_rpm_id = pdata_node_info->mas_rpm_id;
+	node_info->slv_rpm_id = pdata_node_info->slv_rpm_id;
+	node_info->num_connections = pdata_node_info->num_connections;
+	node_info->num_blist = pdata_node_info->num_blist;
+	node_info->num_bcm_devs = pdata_node_info->num_bcm_devs;
+	node_info->num_qports = pdata_node_info->num_qports;
+	node_info->virt_dev = pdata_node_info->virt_dev;
+	node_info->is_fab_dev = pdata_node_info->is_fab_dev;
+	node_info->is_bcm_dev = pdata_node_info->is_bcm_dev;
+	node_info->qos_params.mode = pdata_node_info->qos_params.mode;
+	node_info->qos_params.prio1 = pdata_node_info->qos_params.prio1;
+	node_info->qos_params.prio0 = pdata_node_info->qos_params.prio0;
+	node_info->qos_params.reg_prio1 = pdata_node_info->qos_params.reg_prio1;
+	node_info->qos_params.reg_prio0 = pdata_node_info->qos_params.reg_prio0;
+	node_info->qos_params.prio_lvl = pdata_node_info->qos_params.prio_lvl;
+	node_info->qos_params.prio_rd = pdata_node_info->qos_params.prio_rd;
+	node_info->qos_params.prio_wr = pdata_node_info->qos_params.prio_wr;
+	node_info->qos_params.gp = pdata_node_info->qos_params.gp;
+	node_info->qos_params.thmp = pdata_node_info->qos_params.thmp;
+	node_info->qos_params.ws = pdata_node_info->qos_params.ws;
+	node_info->qos_params.bw_buffer = pdata_node_info->qos_params.bw_buffer;
+	node_info->agg_params.buswidth = pdata_node_info->agg_params.buswidth;
+	node_info->agg_params.agg_scheme =
+					pdata_node_info->agg_params.agg_scheme;
+	node_info->agg_params.vrail_comp =
+					pdata_node_info->agg_params.vrail_comp;
+	node_info->agg_params.num_aggports =
+				pdata_node_info->agg_params.num_aggports;
+	node_info->agg_params.num_util_levels =
+				pdata_node_info->agg_params.num_util_levels;
+	node_info->agg_params.util_levels = devm_kzalloc(bus_dev,
+			sizeof(struct node_util_levels_type) *
+			node_info->agg_params.num_util_levels,
+			GFP_KERNEL);
+	if (!node_info->agg_params.util_levels) {
+		MSM_BUS_ERR("%s: Agg util level alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+	memcpy(node_info->agg_params.util_levels,
+		pdata_node_info->agg_params.util_levels,
+		sizeof(struct node_util_levels_type) *
+			pdata_node_info->agg_params.num_util_levels);
+
+	node_info->dev_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->dev_connections) {
+		MSM_BUS_ERR("%s:Bus dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->connections = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_connections,
+			GFP_KERNEL);
+	if (!node_info->connections) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->connections,
+		pdata_node_info->connections,
+		sizeof(int) * pdata_node_info->num_connections);
+
+	node_info->black_connections = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_blist,
+			GFP_KERNEL);
+	if (!node_info->black_connections) {
+		MSM_BUS_ERR("%s: Bus black connections alloc failed\n",
+			__func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->black_listed_connections = devm_kzalloc(bus_dev,
+			pdata_node_info->num_blist * sizeof(int),
+			GFP_KERNEL);
+	if (!node_info->black_listed_connections) {
+		MSM_BUS_ERR("%s:Bus black list connections alloc failed\n",
+					__func__);
+		devm_kfree(bus_dev, node_info->black_connections);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->black_listed_connections,
+		pdata_node_info->black_listed_connections,
+		sizeof(int) * pdata_node_info->num_blist);
+
+	node_info->bcm_devs = devm_kzalloc(bus_dev,
+			sizeof(struct device *) *
+				pdata_node_info->num_bcm_devs,
+			GFP_KERNEL);
+	if (!node_info->bcm_devs) {
+		MSM_BUS_ERR("%s:Bcm dev connections alloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	node_info->bcm_dev_ids = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_bcm_devs,
+			GFP_KERNEL);
+	if (!node_info->bcm_devs) {
+		MSM_BUS_ERR("%s:Bus connections alloc failed\n", __func__);
+		devm_kfree(bus_dev, node_info->bcm_devs);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->bcm_dev_ids,
+		pdata_node_info->bcm_dev_ids,
+		sizeof(int) * pdata_node_info->num_bcm_devs);
+
+	node_info->qport = devm_kzalloc(bus_dev,
+			sizeof(int) * pdata_node_info->num_qports,
+			GFP_KERNEL);
+	if (!node_info->qport) {
+		MSM_BUS_ERR("%s:Bus qport allocation failed\n", __func__);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		devm_kfree(bus_dev, node_info->black_listed_connections);
+		ret = -ENOMEM;
+		goto exit_copy_node_info;
+	}
+
+	memcpy(node_info->qport,
+		pdata_node_info->qport,
+		sizeof(int) * pdata_node_info->num_qports);
+
+exit_copy_node_info:
+	return ret;
+}
+
+static struct device *msm_bus_device_init(
+			struct msm_bus_node_device_type *pdata)
+{
+	struct device *bus_dev = NULL;
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_node_info_type *node_info = NULL;
+	int ret = 0;
+
+	/**
+	* Init here so we can use devm calls
+	*/
+
+	bus_node = kzalloc(sizeof(struct msm_bus_node_device_type), GFP_KERNEL);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s:Bus node alloc failed\n", __func__);
+		kfree(bus_dev);
+		bus_dev = NULL;
+		goto exit_device_init;
+	}
+	bus_dev = &bus_node->dev;
+	device_initialize(bus_dev);
+
+	node_info = devm_kzalloc(bus_dev,
+			sizeof(struct msm_bus_node_info_type), GFP_KERNEL);
+	if (!node_info) {
+		MSM_BUS_ERR("%s:Bus node info alloc failed\n", __func__);
+		devm_kfree(bus_dev, bus_node);
+		kfree(bus_dev);
+		bus_dev = NULL;
+		goto exit_device_init;
+	}
+
+	bus_node->node_info = node_info;
+	bus_node->ap_owned = pdata->ap_owned;
+	bus_dev->of_node = pdata->of_node;
+
+	if (msm_bus_copy_node_info(pdata, bus_dev) < 0) {
+		devm_kfree(bus_dev, bus_node);
+		devm_kfree(bus_dev, node_info);
+		kfree(bus_dev);
+		bus_dev = NULL;
+		goto exit_device_init;
+	}
+
+	bus_dev->bus = &msm_bus_type;
+	dev_set_name(bus_dev, bus_node->node_info->name);
+
+	ret = device_add(bus_dev);
+	if (ret < 0) {
+		MSM_BUS_ERR("%s: Error registering device %d",
+				__func__, pdata->node_info->id);
+		devm_kfree(bus_dev, bus_node);
+		devm_kfree(bus_dev, node_info->dev_connections);
+		devm_kfree(bus_dev, node_info->connections);
+		devm_kfree(bus_dev, node_info->black_connections);
+		devm_kfree(bus_dev, node_info->black_listed_connections);
+		devm_kfree(bus_dev, node_info);
+		kfree(bus_dev);
+		bus_dev = NULL;
+		goto exit_device_init;
+	}
+	device_create_file(bus_dev, &dev_attr_bw);
+	INIT_LIST_HEAD(&bus_node->devlist);
+
+exit_device_init:
+	return bus_dev;
+}
+
+static int msm_bus_setup_dev_conn(struct device *bus_dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+	struct msm_bus_node_device_type *bcm_node = NULL;
+	int ret = 0;
+	int j;
+	struct msm_bus_node_device_type *fab;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_setup_dev_conn;
+	}
+
+	/* Setup parent bus device for this node */
+	if (!bus_node->node_info->is_fab_dev) {
+		struct device *bus_parent_device =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->bus_device_id,
+				msm_bus_device_match_adhoc);
+
+		if (!bus_parent_device) {
+			MSM_BUS_ERR("%s: Error finding parentdev %d parent %d",
+				__func__,
+				bus_node->node_info->id,
+				bus_node->node_info->bus_device_id);
+			ret = -ENXIO;
+			goto exit_setup_dev_conn;
+		}
+		bus_node->node_info->bus_device = bus_parent_device;
+		fab = to_msm_bus_node(bus_parent_device);
+		list_add_tail(&bus_node->dev_link, &fab->devlist);
+	}
+
+	bus_node->node_info->is_traversed = false;
+
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		bus_node->node_info->dev_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->connections[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->dev_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->connections[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+	for (j = 0; j < bus_node->node_info->num_blist; j++) {
+		bus_node->node_info->black_connections[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->
+				black_listed_connections[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->black_connections[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d\n",
+				__func__, bus_node->node_info->
+				black_listed_connections[j],
+				bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+	}
+
+	for (j = 0; j < bus_node->node_info->num_bcm_devs; j++) {
+		bus_node->node_info->bcm_devs[j] =
+			bus_find_device(&msm_bus_type, NULL,
+				(void *)&bus_node->node_info->bcm_dev_ids[j],
+				msm_bus_device_match_adhoc);
+
+		if (!bus_node->node_info->bcm_devs[j]) {
+			MSM_BUS_ERR("%s: Error finding conn %d for device %d",
+				__func__, bus_node->node_info->bcm_dev_ids[j],
+				 bus_node->node_info->id);
+			ret = -ENODEV;
+			goto exit_setup_dev_conn;
+		}
+		bcm_node = to_bcm_bus_node(bus_node->node_info->bcm_devs[j]);
+		bcm_node->bcmdev->num_bus_devs++;
+	}
+
+exit_setup_dev_conn:
+	return ret;
+}
+
+static int msm_bus_node_debug(struct device *bus_dev, void *data)
+{
+	int j;
+	int ret = 0;
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(bus_dev);
+	if (!bus_node) {
+		MSM_BUS_ERR("%s: Can't get device info", __func__);
+		ret = -ENODEV;
+		goto exit_node_debug;
+	}
+
+	MSM_BUS_DBG("Device = %d buswidth %u", bus_node->node_info->id,
+				bus_node->node_info->agg_params.buswidth);
+	for (j = 0; j < bus_node->node_info->num_connections; j++) {
+		struct msm_bus_node_device_type *bdev =
+		to_msm_bus_node(bus_node->node_info->dev_connections[j]);
+		MSM_BUS_DBG("\n\t Connection[%d] %d", j, bdev->node_info->id);
+	}
+
+	if (bus_node->node_info->is_fab_dev)
+		msm_bus_floor_init(bus_dev);
+
+exit_node_debug:
+	return ret;
+}
+
+static int msm_bus_free_dev(struct device *dev, void *data)
+{
+	struct msm_bus_node_device_type *bus_node = NULL;
+
+	bus_node = to_msm_bus_node(dev);
+
+	if (bus_node)
+		MSM_BUS_ERR("\n%s: Removing device %d", __func__,
+						bus_node->node_info->id);
+	device_unregister(dev);
+	kfree(bus_node);
+	return 0;
+}
+
+int msm_bus_device_remove(struct platform_device *pdev)
+{
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_free_dev);
+	return 0;
+}
+
+static int msm_bus_device_probe(struct platform_device *pdev)
+{
+	unsigned int i = 1, ret;
+	struct msm_bus_device_node_registration *pdata;
+
+	MSM_BUS_ERR("msm_bus: Probe started");
+	/* If possible, get pdata from device-tree */
+	if (pdev->dev.of_node)
+		pdata = msm_bus_of_to_pdata(pdev);
+	else {
+		pdata = (struct msm_bus_device_node_registration *)pdev->
+			dev.platform_data;
+	}
+
+	MSM_BUS_ERR("msm_bus: DT Parsing complete");
+
+	if (IS_ERR_OR_NULL(pdata)) {
+		MSM_BUS_ERR("No platform data found");
+		ret = -ENODATA;
+		goto exit_device_probe;
+	}
+
+	for (i = 0; i < pdata->num_devices; i++) {
+		struct device *node_dev = NULL;
+
+		node_dev = msm_bus_device_init(&pdata->info[i]);
+
+		if (!node_dev) {
+			MSM_BUS_ERR("%s: Error during dev init for %d",
+				__func__, pdata->info[i].node_info->id);
+			ret = -ENXIO;
+			goto exit_device_probe;
+		}
+
+		ret = msm_bus_init_clk(node_dev, &pdata->info[i]);
+		if (ret) {
+			MSM_BUS_ERR("\n Failed to init bus clk. ret %d", ret);
+			msm_bus_device_remove(pdev);
+			goto exit_device_probe;
+		}
+		/*Is this a fabric device ?*/
+		if (pdata->info[i].node_info->is_fab_dev) {
+			MSM_BUS_DBG("%s: %d is a fab", __func__,
+						pdata->info[i].node_info->id);
+			ret = msm_bus_fabric_init(node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing fab %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+		}
+		if (pdata->info[i].node_info->is_bcm_dev)
+			ret = msm_bus_bcm_init(node_dev, &pdata->info[i]);
+			if (ret) {
+				MSM_BUS_ERR("%s: Error intializing bcm %d",
+					__func__, pdata->info[i].node_info->id);
+				goto exit_device_probe;
+			}
+	}
+
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						msm_bus_setup_dev_conn);
+	if (ret) {
+		MSM_BUS_ERR("%s: Error setting up dev connections", __func__);
+		goto exit_device_probe;
+	}
+
+	/*
+	 * Setup the QoS for the nodes, don't check the error codes as we
+	 * defer QoS programming to the first transaction in cases of failure
+	 * and we want to continue the probe.
+	 */
+	ret = bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_dev_init_qos);
+
+	/* Register the arb layer ops */
+	msm_bus_arb_setops_adhoc(&arb_ops);
+	bus_for_each_dev(&msm_bus_type, NULL, NULL, msm_bus_node_debug);
+
+	devm_kfree(&pdev->dev, pdata->info);
+	devm_kfree(&pdev->dev, pdata);
+exit_device_probe:
+	return ret;
+}
+
+static int msm_bus_device_rules_probe(struct platform_device *pdev)
+{
+	struct bus_rule_type *rule_data = NULL;
+	int num_rules = 0;
+
+	num_rules = msm_bus_of_get_static_rules(pdev, &rule_data);
+
+	if (!rule_data)
+		goto exit_rules_probe;
+
+	msm_rule_register(num_rules, rule_data, NULL);
+	static_rules.num_rules = num_rules;
+	static_rules.rules = rule_data;
+	pdev->dev.platform_data = &static_rules;
+
+exit_rules_probe:
+	return 0;
+}
+
+int msm_bus_device_rules_remove(struct platform_device *pdev)
+{
+	struct static_rules_type *static_rules = NULL;
+
+	static_rules = pdev->dev.platform_data;
+	if (static_rules)
+		msm_rule_unregister(static_rules->num_rules,
+					static_rules->rules, NULL);
+	return 0;
+}
+
+
+static const struct of_device_id rules_match[] = {
+	{.compatible = "qcom,msm-bus-static-bw-rules"},
+	{}
+};
+
+static struct platform_driver msm_bus_rules_driver = {
+	.probe = msm_bus_device_rules_probe,
+	.remove = msm_bus_device_rules_remove,
+	.driver = {
+		.name = "msm_bus_rules_device",
+		.owner = THIS_MODULE,
+		.of_match_table = rules_match,
+	},
+};
+
+static const struct of_device_id fabric_match[] = {
+	{.compatible = "qcom,msm-bus-device"},
+	{}
+};
+
+static struct platform_driver msm_bus_device_driver = {
+	.probe = msm_bus_device_probe,
+	.remove = msm_bus_device_remove,
+	.driver = {
+		.name = "msm_bus_device",
+		.owner = THIS_MODULE,
+		.of_match_table = fabric_match,
+	},
+};
+
+int __init msm_bus_device_init_driver(void)
+{
+	int rc;
+
+	MSM_BUS_ERR("msm_bus_fabric_rpmh_init_driver\n");
+	rc =  platform_driver_register(&msm_bus_device_driver);
+
+	if (rc) {
+		MSM_BUS_ERR("Failed to register bus device driver");
+		return rc;
+	}
+	return platform_driver_register(&msm_bus_rules_driver);
+}
+
+int __init msm_bus_device_late_init(void)
+{
+	int rc;
+
+	MSM_BUS_ERR("msm_bus_late_init: Remove handoff bw requests\n");
+	rc = bus_for_each_dev(&msm_bus_type, NULL, NULL,
+						bcm_remove_handoff_req);
+
+	return rc;
+}
+subsys_initcall(msm_bus_device_init_driver);
+late_initcall_sync(msm_bus_device_late_init);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_id.c b/drivers/soc/qcom/msm_bus/msm_bus_id.c
new file mode 100644
index 0000000..33b5657
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_id.c
@@ -0,0 +1,101 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/socinfo.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_bimc.h"
+
+static uint32_t master_iids[MSM_BUS_MASTER_LAST];
+static uint32_t slave_iids[MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY];
+
+static void msm_bus_assign_iids(struct msm_bus_fabric_registration
+	*fabreg, int fabid)
+{
+	int i;
+
+	for (i = 0; i < fabreg->len; i++) {
+		if (!fabreg->info[i].gateway) {
+			fabreg->info[i].priv_id = fabid + fabreg->info[i].id;
+			if (fabreg->info[i].id < SLAVE_ID_KEY) {
+				if (fabreg->info[i].id >= MSM_BUS_MASTER_LAST) {
+					WARN(1, "id %d exceeds array size!\n",
+						fabreg->info[i].id);
+					continue;
+				}
+
+				master_iids[fabreg->info[i].id] =
+					fabreg->info[i].priv_id;
+			} else {
+				if ((fabreg->info[i].id - SLAVE_ID_KEY) >=
+					(MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY)) {
+					WARN(1, "id %d exceeds array size!\n",
+						fabreg->info[i].id);
+					continue;
+				}
+
+				slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)]
+					= fabreg->info[i].priv_id;
+			}
+		} else {
+			fabreg->info[i].priv_id = fabreg->info[i].id;
+		}
+	}
+}
+
+static int msm_bus_get_iid(int id)
+{
+	if ((id < SLAVE_ID_KEY && id >= MSM_BUS_MASTER_LAST) ||
+		id >= MSM_BUS_SLAVE_LAST) {
+		MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id);
+		return -EINVAL;
+	}
+
+	return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] :
+		slave_iids[id - SLAVE_ID_KEY]), id);
+}
+
+static struct msm_bus_board_algorithm msm_bus_id_algo = {
+	.get_iid = msm_bus_get_iid,
+	.assign_iids = msm_bus_assign_iids,
+};
+
+int msm_bus_board_rpm_get_il_ids(uint16_t *id)
+{
+	return -ENXIO;
+}
+
+void msm_bus_board_init(struct msm_bus_fabric_registration *pdata)
+{
+	if (machine_is_msm8226())
+		msm_bus_id_algo.board_nfab = NFAB_MSM8226;
+	else if (machine_is_msm8610())
+		msm_bus_id_algo.board_nfab = NFAB_MSM8610;
+
+	pdata->board_algo = &msm_bus_id_algo;
+}
+
+void msm_bus_board_set_nfab(struct msm_bus_fabric_registration *pdata,
+	int nfab)
+{
+	if (nfab <= 0)
+		return;
+
+	msm_bus_id_algo.board_nfab = nfab;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc.h b/drivers/soc/qcom/msm_bus/msm_bus_noc.h
new file mode 100644
index 0000000..8735edb
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+#define _ARCH_ARM_MACH_MSM_BUS_BIMC_H
+
+enum msm_bus_noc_qos_mode_type {
+	NOC_QOS_MODE_FIXED = 0,
+	NOC_QOS_MODE_LIMITER,
+	NOC_QOS_MODE_BYPASS,
+	NOC_QOS_MODE_REGULATOR,
+	NOC_QOS_MODE_MAX,
+};
+
+enum msm_bus_noc_qos_mode_perm {
+	NOC_QOS_PERM_MODE_FIXED = (1 << NOC_QOS_MODE_FIXED),
+	NOC_QOS_PERM_MODE_LIMITER = (1 << NOC_QOS_MODE_LIMITER),
+	NOC_QOS_PERM_MODE_BYPASS = (1 << NOC_QOS_MODE_BYPASS),
+	NOC_QOS_PERM_MODE_REGULATOR = (1 << NOC_QOS_MODE_REGULATOR),
+};
+
+#define NOC_QOS_MODES_ALL_PERM (NOC_QOS_PERM_MODE_FIXED | \
+	NOC_QOS_PERM_MODE_LIMITER | NOC_QOS_PERM_MODE_BYPASS | \
+	NOC_QOS_PERM_MODE_REGULATOR)
+
+struct msm_bus_noc_commit {
+	struct msm_bus_node_hw_info *mas;
+	struct msm_bus_node_hw_info *slv;
+};
+
+struct msm_bus_noc_info {
+	void __iomem *base;
+	uint32_t base_addr;
+	uint32_t nmasters;
+	uint32_t nqos_masters;
+	uint32_t nslaves;
+	uint32_t qos_freq; /* QOS Clock in KHz */
+	uint32_t qos_baseoffset;
+	uint32_t qos_delta;
+	uint32_t *mas_modes;
+	struct msm_bus_noc_commit cdata[NUM_CTX];
+};
+
+struct msm_bus_noc_qos_priority {
+	uint32_t high_prio;
+	uint32_t low_prio;
+	uint32_t read_prio;
+	uint32_t write_prio;
+	uint32_t p1;
+	uint32_t p0;
+};
+
+struct msm_bus_noc_qos_bw {
+	uint64_t bw; /* Bandwidth in bytes per second */
+	uint32_t ws; /* Window size in nano seconds */
+};
+
+void msm_bus_noc_init(struct msm_bus_noc_info *ninfo);
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode);
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta,
+	struct msm_bus_noc_qos_priority *qprio);
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+	uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+	uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw);
+#endif /*_ARCH_ARM_MACH_MSM_BUS_NOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
new file mode 100644
index 0000000..f51939f
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_adhoc.c
@@ -0,0 +1,590 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_adhoc.h"
+
+/* NOC_QOS generic */
+#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
+#define SAT_SCALE 16	/* 16 bytes minimum for saturation */
+#define BW_SCALE  256	/* 1/256 byte per cycle unit */
+#define QOS_DEFAULT_BASEOFFSET		0x00003000
+#define QOS_DEFAULT_DELTA		0x80
+#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT)
+#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
+#define MIN_SAT_FIELD	1
+#define MIN_BW_FIELD	1
+
+#define NOC_QOS_REG_BASE(b, o)		((b) + (o))
+
+#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
+enum noc_qos_id_priorityn {
+	NOC_QOS_PRIORITYn_RMSK		= 0x0000000f,
+	NOC_QOS_PRIORITYn_MAXn		= 32,
+	NOC_QOS_PRIORITYn_P1_BMSK	= 0xc,
+	NOC_QOS_PRIORITYn_P1_SHFT	= 0x2,
+	NOC_QOS_PRIORITYn_P0_BMSK	= 0x3,
+	NOC_QOS_PRIORITYn_P0_SHFT	= 0x0,
+};
+
+#define NOC_QOS_MODEn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n))
+enum noc_qos_id_moden_rmsk {
+	NOC_QOS_MODEn_RMSK		= 0x00000003,
+	NOC_QOS_MODEn_MAXn		= 32,
+	NOC_QOS_MODEn_MODE_BMSK		= 0x3,
+	NOC_QOS_MODEn_MODE_SHFT		= 0x0,
+};
+
+#define NOC_QOS_BWn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n))
+enum noc_qos_id_bwn {
+	NOC_QOS_BWn_RMSK		= 0x0000ffff,
+	NOC_QOS_BWn_MAXn		= 32,
+	NOC_QOS_BWn_BW_BMSK		= 0xffff,
+	NOC_QOS_BWn_BW_SHFT		= 0x0,
+};
+
+/* QOS Saturation registers */
+#define NOC_QOS_SATn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n))
+enum noc_qos_id_saturationn {
+	NOC_QOS_SATn_RMSK		= 0x000003ff,
+	NOC_QOS_SATn_MAXn		= 32,
+	NOC_QOS_SATn_SAT_BMSK		= 0x3ff,
+	NOC_QOS_SATn_SAT_SHFT		= 0x0,
+};
+
+static int noc_div(uint64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+/**
+ * Calculates bw hardware is using from register values
+ * bw returned is in bytes/sec
+ */
+static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq)
+{
+	uint64_t res;
+	uint32_t rem, scale;
+
+	res = 2 * qos_freq * bw_field;
+	scale = BW_SCALE * 1000;
+	rem = noc_div(&res, scale);
+	MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL);
+	return res * 1000000ULL;
+}
+
+/**
+ * Calculate the max BW in Bytes/s for a given time-base.
+ */
+static uint32_t noc_bw_ceil(long int bw_field, uint32_t qos_freq_khz)
+{
+	uint64_t bw_temp = 2 * qos_freq_khz * bw_field;
+	uint32_t scale = 1000 * BW_SCALE;
+
+	noc_div(&bw_temp, scale);
+	return bw_temp * 1000000;
+}
+#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase))
+
+/**
+ * Calculates ws hardware is using from register values
+ * ws returned is in nanoseconds
+ */
+static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq)
+{
+	if (bw && qos_freq) {
+		uint32_t bwf = bw * qos_freq;
+		uint64_t scale = 1000000000000LL * BW_SCALE *
+			SAT_SCALE * sat;
+		noc_div(&scale, bwf);
+		MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale);
+		return scale;
+	}
+
+	return 0;
+}
+#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase))
+
+/* Calculate bandwidth field value for requested bandwidth  */
+static uint32_t noc_bw_field(uint64_t bw_bps, uint32_t qos_freq_khz)
+{
+	uint32_t bw_field = 0;
+
+	if (bw_bps) {
+		uint32_t rem;
+		uint64_t bw_capped = min_t(uint64_t, bw_bps,
+						MAX_BW(qos_freq_khz));
+		uint64_t bwc = bw_capped * BW_SCALE;
+		uint64_t qf = 2 * qos_freq_khz * 1000;
+
+		rem = noc_div(&bwc, qf);
+		bw_field = (uint32_t)max_t(unsigned long, bwc, MIN_BW_FIELD);
+		bw_field = (uint32_t)min_t(unsigned long, bw_field,
+								MAX_BW_FIELD);
+	}
+
+	MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field);
+	return bw_field;
+}
+
+static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq)
+{
+	uint32_t sat_field = 0;
+
+	if (bw) {
+		/* Limit to max bw and scale bw to 100 KB increments */
+		uint64_t tbw, tscale;
+		uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq));
+		uint32_t rem = noc_div(&bw_scaled, 100000);
+
+		/*
+		 *	SATURATION =
+		 *	(BW [MBps] * integration window [us] *
+		 *		time base frequency [MHz]) / (256 * 16)
+		 */
+		tbw = bw_scaled * ws * qos_freq;
+		tscale = BW_SCALE * SAT_SCALE * 1000000LL;
+		rem = noc_div(&tbw, tscale);
+		sat_field = (uint32_t)max_t(unsigned long, tbw, MIN_SAT_FIELD);
+		sat_field = (uint32_t)min_t(unsigned long, sat_field,
+							MAX_SAT_FIELD);
+	}
+
+	MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field);
+	return sat_field;
+}
+
+static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta, uint8_t mode,
+		uint8_t perm_mode)
+{
+	if (mode < NOC_QOS_MODE_MAX &&
+		((1 << mode) & perm_mode)) {
+		uint32_t reg_val;
+
+		reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_RMSK;
+		writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) |
+			(mode & NOC_QOS_MODEn_MODE_BMSK)),
+			NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+	}
+	/* Ensure qos mode is set before exiting */
+	wmb();
+}
+
+static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		struct msm_bus_noc_qos_priority *priority)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_PRIORITYn_RMSK;
+	val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) |
+		(val & NOC_QOS_PRIORITYn_P1_BMSK)),
+		NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+								qos_delta))
+		& NOC_QOS_PRIORITYn_RMSK;
+	writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) |
+		(priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)),
+		NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+	/* Ensure qos priority is set before exiting */
+	wmb();
+}
+
+static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off,
+		uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+		uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw)
+{
+	uint32_t reg_val, val, mode;
+
+	if (!qos_freq) {
+		MSM_BUS_DBG("Zero QoS Freq\n");
+		return;
+	}
+
+	/* If Limiter or Regulator modes are not supported, bw not available*/
+	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+		NOC_QOS_PERM_MODE_REGULATOR)) {
+		uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq);
+		uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws,
+			qos_freq);
+
+		MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n",
+			perm_mode, bw_val, sat_val);
+		/*
+		 * If in Limiter/Regulator mode, first go to fixed mode.
+		 * Clear QoS accumulator
+		 **/
+		mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+		if (mode == NOC_QOS_MODE_REGULATOR || mode ==
+			NOC_QOS_MODE_LIMITER) {
+			reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(
+				base, qos_off, mport, qos_delta));
+			val = NOC_QOS_MODE_FIXED;
+			writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+				| (val & NOC_QOS_MODEn_MODE_BMSK),
+				NOC_QOS_MODEn_ADDR(base, qos_off, mport,
+								qos_delta));
+		}
+
+		reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport,
+								qos_delta));
+		val = bw_val << NOC_QOS_BWn_BW_SHFT;
+		writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) |
+			(val & NOC_QOS_BWn_BW_BMSK)),
+			NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta));
+
+		MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val &
+			(~NOC_QOS_BWn_BW_BMSK)) | (val &
+			NOC_QOS_BWn_BW_BMSK)));
+
+		reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off,
+			mport, qos_delta));
+		val = sat_val << NOC_QOS_SATn_SAT_SHFT;
+		writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) |
+			(val & NOC_QOS_SATn_SAT_BMSK)),
+			NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta));
+
+		MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val &
+			(~NOC_QOS_SATn_SAT_BMSK)) | (val &
+			NOC_QOS_SATn_SAT_BMSK)));
+
+		/* Set mode back to what it was initially */
+		reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta));
+		writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+			| (mode & NOC_QOS_MODEn_MODE_BMSK),
+			NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+		/* Ensure that all writes for bandwidth registers have
+		 * completed before returning
+		 */
+		wmb();
+	}
+}
+
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode)
+{
+	if (perm_mode == NOC_QOS_MODES_ALL_PERM)
+		return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+	else
+		return 31 - __CLZ(mode &
+			NOC_QOS_MODES_ALL_PERM);
+}
+
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta,
+	struct msm_bus_noc_qos_priority *priority)
+{
+	priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+		mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >>
+		NOC_QOS_PRIORITYn_P1_SHFT;
+
+	priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+		mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >>
+		NOC_QOS_PRIORITYn_P0_SHFT;
+}
+
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+	uint32_t qos_freq,
+	uint32_t mport, uint32_t qos_delta, uint8_t perm_mode,
+	struct msm_bus_noc_qos_bw *qbw)
+{
+	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+		NOC_QOS_PERM_MODE_REGULATOR)) {
+		uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR(
+			base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK;
+		uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR(
+			base, qos_off, mport, qos_delta))
+						& NOC_QOS_SATn_SAT_BMSK;
+
+		qbw->bw = noc_bw(bw_val, qos_freq);
+		qbw->ws = noc_ws(qbw->bw, sat, qos_freq);
+	} else {
+		qbw->bw = 0;
+		qbw->ws = 0;
+	}
+}
+
+static bool msm_bus_noc_update_bw_reg(int mode)
+{
+	bool ret = false;
+
+	if ((mode == NOC_QOS_MODE_LIMITER) ||
+			(mode == NOC_QOS_MODE_REGULATOR))
+		ret = true;
+
+	return ret;
+}
+
+static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	struct msm_bus_noc_qos_priority prio;
+	int ret = 0;
+	int i;
+
+	prio.p1 = info->node_info->qos_params.prio1;
+	prio.p0 = info->node_info->qos_params.prio0;
+
+	if (!info->node_info->qport) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		ret = 0;
+		goto err_qos_init;
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) {
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i], qos_delta,
+					&prio);
+
+			if (info->node_info->qos_params.mode !=
+							NOC_QOS_MODE_FIXED) {
+				struct msm_bus_noc_qos_bw qbw;
+
+				qbw.ws = info->node_info->qos_params.ws;
+				qbw.bw = 0;
+				msm_bus_noc_set_qos_bw(qos_base, qos_off,
+					qos_freq,
+					info->node_info->qport[i],
+					qos_delta,
+					info->node_info->qos_params.mode,
+					&qbw);
+			}
+		}
+
+		noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i],
+				qos_delta, info->node_info->qos_params.mode,
+				(1 << info->node_info->qos_params.mode));
+	}
+err_qos_init:
+	return ret;
+}
+
+static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	int ret = 0;
+	uint64_t bw = 0;
+	int i;
+	struct msm_bus_node_info_type *info = dev->node_info;
+
+	if (info && info->num_qports &&
+		((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) ||
+		(info->qos_params.mode ==
+			NOC_QOS_MODE_LIMITER))) {
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		bw = msm_bus_div64(info->num_qports,
+				dev->node_bw[ACTIVE_CTX].sum_ab);
+
+		for (i = 0; i < info->num_qports; i++) {
+			if (!info->qport) {
+				MSM_BUS_DBG("No qos ports to update!\n");
+				break;
+			}
+
+			qos_bw.bw = bw;
+			qos_bw.ws = info->qos_params.ws;
+			msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq,
+				info->qport[i], qos_delta,
+				(1 << info->qos_params.mode), &qos_bw);
+			MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+				qos_bw.ws);
+		}
+	}
+	return ret;
+}
+
+static int msm_bus_noc_set_lim_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	if (info && info->node_info->num_qports) {
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		if (lim_bw != info->node_info->lim_bw) {
+			for (i = 0; i < info->node_info->num_qports; i++) {
+				qos_bw.bw = lim_bw;
+				qos_bw.ws = info->node_info->qos_params.ws;
+					msm_bus_noc_set_qos_bw(qos_base,
+					qos_off, qos_freq,
+					info->node_info->qport[i], qos_delta,
+					(1 << NOC_QOS_MODE_LIMITER), &qos_bw);
+			}
+			info->node_info->lim_bw = lim_bw;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			noc_set_qos_mode(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					NOC_QOS_MODE_LIMITER,
+					(1 << NOC_QOS_MODE_LIMITER));
+		}
+	}
+
+	return 0;
+}
+
+static int msm_bus_noc_set_reg_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	if (info && info->node_info->num_qports) {
+		struct msm_bus_noc_qos_priority prio;
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			prio.p1 =
+				info->node_info->qos_params.reg_prio1;
+			prio.p0 =
+				info->node_info->qos_params.reg_prio0;
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					&prio);
+		}
+
+		if (lim_bw != info->node_info->lim_bw) {
+			for (i = 0; i < info->node_info->num_qports; i++) {
+				qos_bw.bw = lim_bw;
+				qos_bw.ws = info->node_info->qos_params.ws;
+				msm_bus_noc_set_qos_bw(qos_base, qos_off,
+					qos_freq,
+					info->node_info->qport[i], qos_delta,
+					(1 << NOC_QOS_MODE_REGULATOR), &qos_bw);
+			}
+			info->node_info->lim_bw = lim_bw;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			noc_set_qos_mode(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					NOC_QOS_MODE_REGULATOR,
+					(1 << NOC_QOS_MODE_REGULATOR));
+		}
+	}
+	return 0;
+}
+
+static int msm_bus_noc_set_def_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		if (info->node_info->qos_params.mode ==
+						NOC_QOS_MODE_FIXED) {
+			struct msm_bus_noc_qos_priority prio;
+
+			prio.p1 =
+				info->node_info->qos_params.prio1;
+			prio.p0 =
+				info->node_info->qos_params.prio0;
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta, &prio);
+		}
+		noc_set_qos_mode(qos_base, qos_off,
+			info->node_info->qport[i],
+			qos_delta,
+			info->node_info->qos_params.mode,
+			(1 << info->node_info->qos_params.mode));
+	}
+	return 0;
+}
+
+static int msm_bus_noc_limit_mport(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				int enable_lim, u64 lim_bw)
+{
+	int ret = 0;
+
+	if (!(info && info->node_info->num_qports)) {
+		MSM_BUS_ERR("Invalid Node info or no Qports to program");
+		ret = -ENXIO;
+		goto exit_limit_mport;
+	}
+
+	if (lim_bw) {
+		switch (enable_lim) {
+		case THROTTLE_REG:
+			msm_bus_noc_set_reg_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		case THROTTLE_ON:
+			msm_bus_noc_set_lim_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		default:
+			msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		}
+	} else
+		msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+					qos_delta, qos_freq, lim_bw);
+
+exit_limit_mport:
+	return ret;
+}
+
+int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
+	bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw;
+	bus_dev->fabdev->noc_ops.limit_mport = msm_bus_noc_limit_mport;
+	bus_dev->fabdev->noc_ops.update_bw_reg = msm_bus_noc_update_bw_reg;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c
new file mode 100644
index 0000000..c501e80
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_noc_rpmh.c
@@ -0,0 +1,590 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: NOC: %s(): " fmt, __func__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+#include "msm_bus_noc.h"
+#include "msm_bus_rpmh.h"
+
+/* NOC_QOS generic */
+#define __CLZ(x) ((8 * sizeof(uint32_t)) - 1 - __fls(x))
+#define SAT_SCALE 16	/* 16 bytes minimum for saturation */
+#define BW_SCALE  256	/* 1/256 byte per cycle unit */
+#define QOS_DEFAULT_BASEOFFSET		0x00003000
+#define QOS_DEFAULT_DELTA		0x80
+#define MAX_BW_FIELD (NOC_QOS_BWn_BW_BMSK >> NOC_QOS_BWn_BW_SHFT)
+#define MAX_SAT_FIELD (NOC_QOS_SATn_SAT_BMSK >> NOC_QOS_SATn_SAT_SHFT)
+#define MIN_SAT_FIELD	1
+#define MIN_BW_FIELD	1
+
+#define NOC_QOS_REG_BASE(b, o)		((b) + (o))
+
+#define NOC_QOS_PRIORITYn_ADDR(b, o, n, d)	\
+	(NOC_QOS_REG_BASE(b, o) + 0x8 + (d) * (n))
+enum noc_qos_id_priorityn {
+	NOC_QOS_PRIORITYn_RMSK		= 0x0000000f,
+	NOC_QOS_PRIORITYn_MAXn		= 32,
+	NOC_QOS_PRIORITYn_P1_BMSK	= 0xc,
+	NOC_QOS_PRIORITYn_P1_SHFT	= 0x2,
+	NOC_QOS_PRIORITYn_P0_BMSK	= 0x3,
+	NOC_QOS_PRIORITYn_P0_SHFT	= 0x0,
+};
+
+#define NOC_QOS_MODEn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0xC + (d) * (n))
+enum noc_qos_id_moden_rmsk {
+	NOC_QOS_MODEn_RMSK		= 0x00000003,
+	NOC_QOS_MODEn_MAXn		= 32,
+	NOC_QOS_MODEn_MODE_BMSK		= 0x3,
+	NOC_QOS_MODEn_MODE_SHFT		= 0x0,
+};
+
+#define NOC_QOS_BWn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x10 + (d) * (n))
+enum noc_qos_id_bwn {
+	NOC_QOS_BWn_RMSK		= 0x0000ffff,
+	NOC_QOS_BWn_MAXn		= 32,
+	NOC_QOS_BWn_BW_BMSK		= 0xffff,
+	NOC_QOS_BWn_BW_SHFT		= 0x0,
+};
+
+/* QOS Saturation registers */
+#define NOC_QOS_SATn_ADDR(b, o, n, d) \
+	(NOC_QOS_REG_BASE(b, o) + 0x14 + (d) * (n))
+enum noc_qos_id_saturationn {
+	NOC_QOS_SATn_RMSK		= 0x000003ff,
+	NOC_QOS_SATn_MAXn		= 32,
+	NOC_QOS_SATn_SAT_BMSK		= 0x3ff,
+	NOC_QOS_SATn_SAT_SHFT		= 0x0,
+};
+
+static int noc_div(uint64_t *a, uint32_t b)
+{
+	if ((*a > 0) && (*a < b)) {
+		*a = 0;
+		return 1;
+	} else {
+		return do_div(*a, b);
+	}
+}
+
+/**
+ * Calculates bw hardware is using from register values
+ * bw returned is in bytes/sec
+ */
+static uint64_t noc_bw(uint32_t bw_field, uint32_t qos_freq)
+{
+	uint64_t res;
+	uint32_t rem, scale;
+
+	res = 2 * qos_freq * bw_field;
+	scale = BW_SCALE * 1000;
+	rem = noc_div(&res, scale);
+	MSM_BUS_DBG("NOC: Calculated bw: %llu\n", res * 1000000ULL);
+	return res * 1000000ULL;
+}
+
+/**
+ * Calculate the max BW in Bytes/s for a given time-base.
+ */
+static uint32_t noc_bw_ceil(long int bw_field, uint32_t qos_freq_khz)
+{
+	uint64_t bw_temp = 2 * qos_freq_khz * bw_field;
+	uint32_t scale = 1000 * BW_SCALE;
+
+	noc_div(&bw_temp, scale);
+	return bw_temp * 1000000;
+}
+#define MAX_BW(timebase) noc_bw_ceil(MAX_BW_FIELD, (timebase))
+
+/**
+ * Calculates ws hardware is using from register values
+ * ws returned is in nanoseconds
+ */
+static uint32_t noc_ws(uint64_t bw, uint32_t sat, uint32_t qos_freq)
+{
+	if (bw && qos_freq) {
+		uint32_t bwf = bw * qos_freq;
+		uint64_t scale = 1000000000000LL * BW_SCALE *
+			SAT_SCALE * sat;
+		noc_div(&scale, bwf);
+		MSM_BUS_DBG("NOC: Calculated ws: %llu\n", scale);
+		return scale;
+	}
+
+	return 0;
+}
+#define MAX_WS(bw, timebase) noc_ws((bw), MAX_SAT_FIELD, (timebase))
+
+/* Calculate bandwidth field value for requested bandwidth  */
+static uint32_t noc_bw_field(uint64_t bw_bps, uint32_t qos_freq_khz)
+{
+	uint32_t bw_field = 0;
+
+	if (bw_bps) {
+		uint32_t rem;
+		uint64_t bw_capped = min_t(uint64_t, bw_bps,
+						MAX_BW(qos_freq_khz));
+		uint64_t bwc = bw_capped * BW_SCALE;
+		uint64_t qf = 2 * qos_freq_khz * 1000;
+
+		rem = noc_div(&bwc, qf);
+		bw_field = (uint32_t)max_t(unsigned long, bwc, MIN_BW_FIELD);
+		bw_field = (uint32_t)min_t(unsigned long, bw_field,
+								MAX_BW_FIELD);
+	}
+
+	MSM_BUS_DBG("NOC: bw_field: %u\n", bw_field);
+	return bw_field;
+}
+
+static uint32_t noc_sat_field(uint64_t bw, uint32_t ws, uint32_t qos_freq)
+{
+	uint32_t sat_field = 0;
+
+	if (bw) {
+		/* Limit to max bw and scale bw to 100 KB increments */
+		uint64_t tbw, tscale;
+		uint64_t bw_scaled = min_t(uint64_t, bw, MAX_BW(qos_freq));
+		uint32_t rem = noc_div(&bw_scaled, 100000);
+
+		/**
+		 *	SATURATION =
+		 *	(BW [MBps] * integration window [us] *
+		 *		time base frequency [MHz]) / (256 * 16)
+		 */
+		tbw = bw_scaled * ws * qos_freq;
+		tscale = BW_SCALE * SAT_SCALE * 1000000LL;
+		rem = noc_div(&tbw, tscale);
+		sat_field = (uint32_t)max_t(unsigned long, tbw, MIN_SAT_FIELD);
+		sat_field = (uint32_t)min_t(unsigned long, sat_field,
+							MAX_SAT_FIELD);
+	}
+
+	MSM_BUS_DBG("NOC: sat_field: %d\n", sat_field);
+	return sat_field;
+}
+
+static void noc_set_qos_mode(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta, uint8_t mode,
+		uint8_t perm_mode)
+{
+	if (mode < NOC_QOS_MODE_MAX &&
+		((1 << mode) & perm_mode)) {
+		uint32_t reg_val;
+
+		reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_RMSK;
+		writel_relaxed(((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK))) |
+			(mode & NOC_QOS_MODEn_MODE_BMSK)),
+			NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+	}
+	/* Ensure qos mode is set before exiting */
+	wmb();
+}
+
+static void noc_set_qos_priority(void __iomem *base, uint32_t qos_off,
+		uint32_t mport, uint32_t qos_delta,
+		struct msm_bus_noc_qos_priority *priority)
+{
+	uint32_t reg_val, val;
+
+	reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+		qos_delta)) & NOC_QOS_PRIORITYn_RMSK;
+	val = priority->p1 << NOC_QOS_PRIORITYn_P1_SHFT;
+	writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P1_BMSK))) |
+		(val & NOC_QOS_PRIORITYn_P1_BMSK)),
+		NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+
+	reg_val = readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport,
+								qos_delta))
+		& NOC_QOS_PRIORITYn_RMSK;
+	writel_relaxed(((reg_val & (~(NOC_QOS_PRIORITYn_P0_BMSK))) |
+		(priority->p0 & NOC_QOS_PRIORITYn_P0_BMSK)),
+		NOC_QOS_PRIORITYn_ADDR(base, qos_off, mport, qos_delta));
+	/* Ensure qos priority is set before exiting */
+	wmb();
+}
+
+static void msm_bus_noc_set_qos_bw(void __iomem *base, uint32_t qos_off,
+		uint32_t qos_freq, uint32_t mport, uint32_t qos_delta,
+		uint8_t perm_mode, struct msm_bus_noc_qos_bw *qbw)
+{
+	uint32_t reg_val, val, mode;
+
+	if (!qos_freq) {
+		MSM_BUS_DBG("Zero QoS Freq\n");
+		return;
+	}
+
+	/* If Limiter or Regulator modes are not supported, bw not available*/
+	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+		NOC_QOS_PERM_MODE_REGULATOR)) {
+		uint32_t bw_val = noc_bw_field(qbw->bw, qos_freq);
+		uint32_t sat_val = noc_sat_field(qbw->bw, qbw->ws,
+			qos_freq);
+
+		MSM_BUS_DBG("NOC: BW: perm_mode: %d bw_val: %d, sat_val: %d\n",
+			perm_mode, bw_val, sat_val);
+		/*
+		 * If in Limiter/Regulator mode, first go to fixed mode.
+		 * Clear QoS accumulator
+		 **/
+		mode = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+		if (mode == NOC_QOS_MODE_REGULATOR || mode ==
+			NOC_QOS_MODE_LIMITER) {
+			reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(
+				base, qos_off, mport, qos_delta));
+			val = NOC_QOS_MODE_FIXED;
+			writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+				| (val & NOC_QOS_MODEn_MODE_BMSK),
+				NOC_QOS_MODEn_ADDR(base, qos_off, mport,
+								qos_delta));
+		}
+
+		reg_val = readl_relaxed(NOC_QOS_BWn_ADDR(base, qos_off, mport,
+								qos_delta));
+		val = bw_val << NOC_QOS_BWn_BW_SHFT;
+		writel_relaxed(((reg_val & (~(NOC_QOS_BWn_BW_BMSK))) |
+			(val & NOC_QOS_BWn_BW_BMSK)),
+			NOC_QOS_BWn_ADDR(base, qos_off, mport, qos_delta));
+
+		MSM_BUS_DBG("NOC: BW: Wrote value: 0x%x\n", ((reg_val &
+			(~NOC_QOS_BWn_BW_BMSK)) | (val &
+			NOC_QOS_BWn_BW_BMSK)));
+
+		reg_val = readl_relaxed(NOC_QOS_SATn_ADDR(base, qos_off,
+			mport, qos_delta));
+		val = sat_val << NOC_QOS_SATn_SAT_SHFT;
+		writel_relaxed(((reg_val & (~(NOC_QOS_SATn_SAT_BMSK))) |
+			(val & NOC_QOS_SATn_SAT_BMSK)),
+			NOC_QOS_SATn_ADDR(base, qos_off, mport, qos_delta));
+
+		MSM_BUS_DBG("NOC: SAT: Wrote value: 0x%x\n", ((reg_val &
+			(~NOC_QOS_SATn_SAT_BMSK)) | (val &
+			NOC_QOS_SATn_SAT_BMSK)));
+
+		/* Set mode back to what it was initially */
+		reg_val = readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta));
+		writel_relaxed((reg_val & (~(NOC_QOS_MODEn_MODE_BMSK)))
+			| (mode & NOC_QOS_MODEn_MODE_BMSK),
+			NOC_QOS_MODEn_ADDR(base, qos_off, mport, qos_delta));
+		/* Ensure that all writes for bandwidth registers have
+		 * completed before returning
+		 */
+		wmb();
+	}
+}
+
+uint8_t msm_bus_noc_get_qos_mode(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta, uint32_t mode, uint32_t perm_mode)
+{
+	if (perm_mode == NOC_QOS_MODES_ALL_PERM)
+		return readl_relaxed(NOC_QOS_MODEn_ADDR(base, qos_off,
+			mport, qos_delta)) & NOC_QOS_MODEn_MODE_BMSK;
+	else
+		return 31 - __CLZ(mode &
+			NOC_QOS_MODES_ALL_PERM);
+}
+
+void msm_bus_noc_get_qos_priority(void __iomem *base, uint32_t qos_off,
+	uint32_t mport, uint32_t qos_delta,
+	struct msm_bus_noc_qos_priority *priority)
+{
+	priority->p1 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+		mport, qos_delta)) & NOC_QOS_PRIORITYn_P1_BMSK) >>
+		NOC_QOS_PRIORITYn_P1_SHFT;
+
+	priority->p0 = (readl_relaxed(NOC_QOS_PRIORITYn_ADDR(base, qos_off,
+		mport, qos_delta)) & NOC_QOS_PRIORITYn_P0_BMSK) >>
+		NOC_QOS_PRIORITYn_P0_SHFT;
+}
+
+void msm_bus_noc_get_qos_bw(void __iomem *base, uint32_t qos_off,
+	uint32_t qos_freq,
+	uint32_t mport, uint32_t qos_delta, uint8_t perm_mode,
+	struct msm_bus_noc_qos_bw *qbw)
+{
+	if (perm_mode & (NOC_QOS_PERM_MODE_LIMITER |
+		NOC_QOS_PERM_MODE_REGULATOR)) {
+		uint32_t bw_val = readl_relaxed(NOC_QOS_BWn_ADDR(
+			base, qos_off, mport, qos_delta)) & NOC_QOS_BWn_BW_BMSK;
+		uint32_t sat = readl_relaxed(NOC_QOS_SATn_ADDR(
+			base, qos_off, mport, qos_delta))
+						& NOC_QOS_SATn_SAT_BMSK;
+
+		qbw->bw = noc_bw(bw_val, qos_freq);
+		qbw->ws = noc_ws(qbw->bw, sat, qos_freq);
+	} else {
+		qbw->bw = 0;
+		qbw->ws = 0;
+	}
+}
+
+static bool msm_bus_noc_update_bw_reg(int mode)
+{
+	bool ret = false;
+
+	if ((mode == NOC_QOS_MODE_LIMITER) ||
+			(mode == NOC_QOS_MODE_REGULATOR))
+		ret = true;
+
+	return ret;
+}
+
+static int msm_bus_noc_qos_init(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	struct msm_bus_noc_qos_priority prio;
+	int ret = 0;
+	int i;
+
+	prio.p1 = info->node_info->qos_params.prio1;
+	prio.p0 = info->node_info->qos_params.prio0;
+
+	if (!info->node_info->qport) {
+		MSM_BUS_DBG("No QoS Ports to init\n");
+		ret = 0;
+		goto err_qos_init;
+	}
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		if (info->node_info->qos_params.mode != NOC_QOS_MODE_BYPASS) {
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i], qos_delta,
+					&prio);
+
+			if (info->node_info->qos_params.mode !=
+							NOC_QOS_MODE_FIXED) {
+				struct msm_bus_noc_qos_bw qbw;
+
+				qbw.ws = info->node_info->qos_params.ws;
+				qbw.bw = 0;
+				msm_bus_noc_set_qos_bw(qos_base, qos_off,
+					qos_freq,
+					info->node_info->qport[i],
+					qos_delta,
+					info->node_info->qos_params.mode,
+					&qbw);
+			}
+		}
+
+		noc_set_qos_mode(qos_base, qos_off, info->node_info->qport[i],
+				qos_delta, info->node_info->qos_params.mode,
+				(1 << info->node_info->qos_params.mode));
+	}
+err_qos_init:
+	return ret;
+}
+
+static int msm_bus_noc_set_bw(struct msm_bus_node_device_type *dev,
+				void __iomem *qos_base,
+				uint32_t qos_off, uint32_t qos_delta,
+				uint32_t qos_freq)
+{
+	int ret = 0;
+	uint64_t bw = 0;
+	int i;
+	struct msm_bus_node_info_type *info = dev->node_info;
+
+	if (info && info->num_qports &&
+		((info->qos_params.mode == NOC_QOS_MODE_REGULATOR) ||
+		(info->qos_params.mode ==
+			NOC_QOS_MODE_LIMITER))) {
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		bw = msm_bus_div64(info->num_qports,
+				dev->node_bw[ACTIVE_CTX].sum_ab);
+
+		for (i = 0; i < info->num_qports; i++) {
+			if (!info->qport) {
+				MSM_BUS_DBG("No qos ports to update!\n");
+				break;
+			}
+
+			qos_bw.bw = bw;
+			qos_bw.ws = info->qos_params.ws;
+			msm_bus_noc_set_qos_bw(qos_base, qos_off, qos_freq,
+				info->qport[i], qos_delta,
+				(1 << info->qos_params.mode), &qos_bw);
+			MSM_BUS_DBG("NOC: QoS: Update mas_bw: ws: %u\n",
+				qos_bw.ws);
+		}
+	}
+	return ret;
+}
+
+static int msm_bus_noc_set_lim_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	if (info && info->node_info->num_qports) {
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		if (lim_bw != info->node_info->lim_bw) {
+			for (i = 0; i < info->node_info->num_qports; i++) {
+				qos_bw.bw = lim_bw;
+				qos_bw.ws = info->node_info->qos_params.ws;
+					msm_bus_noc_set_qos_bw(qos_base,
+					qos_off, qos_freq,
+					info->node_info->qport[i], qos_delta,
+					(1 << NOC_QOS_MODE_LIMITER), &qos_bw);
+			}
+			info->node_info->lim_bw = lim_bw;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			noc_set_qos_mode(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					NOC_QOS_MODE_LIMITER,
+					(1 << NOC_QOS_MODE_LIMITER));
+		}
+	}
+
+	return 0;
+}
+
+static int msm_bus_noc_set_reg_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	if (info && info->node_info->num_qports) {
+		struct msm_bus_noc_qos_priority prio;
+		struct msm_bus_noc_qos_bw qos_bw;
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			prio.p1 =
+				info->node_info->qos_params.reg_prio1;
+			prio.p0 =
+				info->node_info->qos_params.reg_prio0;
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					&prio);
+		}
+
+		if (lim_bw != info->node_info->lim_bw) {
+			for (i = 0; i < info->node_info->num_qports; i++) {
+				qos_bw.bw = lim_bw;
+				qos_bw.ws = info->node_info->qos_params.ws;
+				msm_bus_noc_set_qos_bw(qos_base, qos_off,
+					qos_freq,
+					info->node_info->qport[i], qos_delta,
+					(1 << NOC_QOS_MODE_REGULATOR), &qos_bw);
+			}
+			info->node_info->lim_bw = lim_bw;
+		}
+
+		for (i = 0; i < info->node_info->num_qports; i++) {
+			noc_set_qos_mode(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta,
+					NOC_QOS_MODE_REGULATOR,
+					(1 << NOC_QOS_MODE_REGULATOR));
+		}
+	}
+	return 0;
+}
+
+static int msm_bus_noc_set_def_mode(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				u64 lim_bw)
+{
+	int i;
+
+	for (i = 0; i < info->node_info->num_qports; i++) {
+		if (info->node_info->qos_params.mode ==
+						NOC_QOS_MODE_FIXED) {
+			struct msm_bus_noc_qos_priority prio;
+
+			prio.p1 =
+				info->node_info->qos_params.prio1;
+			prio.p0 =
+				info->node_info->qos_params.prio0;
+			noc_set_qos_priority(qos_base, qos_off,
+					info->node_info->qport[i],
+					qos_delta, &prio);
+		}
+		noc_set_qos_mode(qos_base, qos_off,
+			info->node_info->qport[i],
+			qos_delta,
+			info->node_info->qos_params.mode,
+			(1 << info->node_info->qos_params.mode));
+	}
+	return 0;
+}
+
+static int msm_bus_noc_limit_mport(struct msm_bus_node_device_type *info,
+				void __iomem *qos_base, uint32_t qos_off,
+				uint32_t qos_delta, uint32_t qos_freq,
+				int enable_lim, u64 lim_bw)
+{
+	int ret = 0;
+
+	if (!(info && info->node_info->num_qports)) {
+		MSM_BUS_ERR("Invalid Node info or no Qports to program");
+		ret = -ENXIO;
+		goto exit_limit_mport;
+	}
+
+	if (lim_bw) {
+		switch (enable_lim) {
+		case THROTTLE_REG:
+			msm_bus_noc_set_reg_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		case THROTTLE_ON:
+			msm_bus_noc_set_lim_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		default:
+			msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+						qos_delta, qos_freq, lim_bw);
+			break;
+		}
+	} else
+		msm_bus_noc_set_def_mode(info, qos_base, qos_off,
+					qos_delta, qos_freq, lim_bw);
+
+exit_limit_mport:
+	return ret;
+}
+
+int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev)
+{
+	if (!bus_dev)
+		return -ENODEV;
+
+	bus_dev->fabdev->noc_ops.qos_init = msm_bus_noc_qos_init;
+	bus_dev->fabdev->noc_ops.set_bw = msm_bus_noc_set_bw;
+	bus_dev->fabdev->noc_ops.limit_mport = msm_bus_noc_limit_mport;
+	bus_dev->fabdev->noc_ops.update_bw_reg = msm_bus_noc_update_bw_reg;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_bus_noc_set_ops);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of.c b/drivers/soc/qcom/msm_bus/msm_bus_of.c
new file mode 100644
index 0000000..856dcce
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of.c
@@ -0,0 +1,701 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include "msm_bus_core.h"
+
+static const char * const hw_sel_name[] = {"RPM", "NoC", "BIMC", NULL};
+static const char * const mode_sel_name[] = {"Fixed", "Limiter", "Bypass",
+						"Regulator", NULL};
+
+static int get_num(const char *const str[], const char *name)
+{
+	int i = 0;
+
+	do {
+		if (!strcmp(name, str[i]))
+			return i;
+
+		i++;
+	} while (str[i] != NULL);
+
+	pr_err("Error: string %s not found\n", name);
+	return -EINVAL;
+}
+
+static struct msm_bus_scale_pdata *get_pdata(struct platform_device *pdev,
+	struct device_node *of_node)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+	struct msm_bus_paths *usecase = NULL;
+	int i = 0, j, ret, num_usecases = 0, num_paths, len;
+	const uint32_t *vec_arr = NULL;
+	bool mem_err = false;
+
+	if (!pdev) {
+		pr_err("Error: Null Platform device\n");
+		return NULL;
+	}
+
+	pdata = devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_scale_pdata),
+		GFP_KERNEL);
+	if (!pdata) {
+		mem_err = true;
+		goto err;
+	}
+
+	ret = of_property_read_string(of_node, "qcom,msm-bus,name",
+		&pdata->name);
+	if (ret) {
+		pr_err("Error: Client name not found\n");
+		goto err;
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,msm-bus,num-cases",
+		&num_usecases);
+	if (ret) {
+		pr_err("Error: num-usecases not found\n");
+		goto err;
+	}
+
+	pdata->num_usecases = num_usecases;
+
+	if (of_property_read_bool(of_node, "qcom,msm-bus,active-only"))
+		pdata->active_only = 1;
+	else {
+		pr_debug("active_only flag absent.\n");
+		pr_debug("Using dual context by default\n");
+	}
+
+	usecase = devm_kzalloc(&pdev->dev, (sizeof(struct msm_bus_paths) *
+		pdata->num_usecases), GFP_KERNEL);
+	if (!usecase) {
+		mem_err = true;
+		goto err;
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,msm-bus,num-paths",
+		&num_paths);
+	if (ret) {
+		pr_err("Error: num_paths not found\n");
+		goto err;
+	}
+
+	vec_arr = of_get_property(of_node, "qcom,msm-bus,vectors-KBps", &len);
+	if (vec_arr == NULL) {
+		pr_err("Error: Vector array not found\n");
+		goto err;
+	}
+
+	if (len != num_usecases * num_paths * sizeof(uint32_t) * 4) {
+		pr_err("Error: Length-error on getting vectors\n");
+		goto err;
+	}
+
+	for (i = 0; i < num_usecases; i++) {
+		usecase[i].num_paths = num_paths;
+		usecase[i].vectors = devm_kzalloc(&pdev->dev, num_paths *
+			sizeof(struct msm_bus_vectors), GFP_KERNEL);
+		if (!usecase[i].vectors) {
+			mem_err = true;
+			pr_err("Error: Mem alloc failure in vectors\n");
+			goto err;
+		}
+
+		for (j = 0; j < num_paths; j++) {
+			int index = ((i * num_paths) + j) * 4;
+
+			usecase[i].vectors[j].src = be32_to_cpu(vec_arr[index]);
+			usecase[i].vectors[j].dst =
+				be32_to_cpu(vec_arr[index + 1]);
+			usecase[i].vectors[j].ab = (uint64_t)
+				KBTOB(be32_to_cpu(vec_arr[index + 2]));
+			usecase[i].vectors[j].ib = (uint64_t)
+				KBTOB(be32_to_cpu(vec_arr[index + 3]));
+		}
+	}
+
+	pdata->usecase = usecase;
+	return pdata;
+err:
+	if (mem_err) {
+		for (; i > 0; i--)
+			kfree(usecase[i-1].vectors);
+
+		kfree(usecase);
+		kfree(pdata);
+	}
+
+	return NULL;
+}
+
+/**
+ * msm_bus_cl_get_pdata() - Generate bus client data from device tree
+ * provided by clients.
+ *
+ * of_node: Device tree node to extract information from
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+	struct device_node *of_node;
+	struct msm_bus_scale_pdata *pdata = NULL;
+
+	if (!pdev) {
+		pr_err("Error: Null Platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+	pdata = get_pdata(pdev, of_node);
+	if (!pdata) {
+		pr_err("client has to provide missing entry for successful registration\n");
+		return NULL;
+	}
+
+	return pdata;
+}
+EXPORT_SYMBOL(msm_bus_cl_get_pdata);
+
+/**
+ * msm_bus_cl_pdata_from_node() - Generate bus client data from device tree
+ * node provided by clients. This function should be used when a client
+ * driver needs to register multiple bus-clients from a single device-tree
+ * node associated with the platform-device.
+ *
+ * of_node: The subnode containing information about the bus scaling
+ * data
+ *
+ * pdev: Platform device associated with the device-tree node
+ *
+ * The function returns a valid pointer to the allocated bus-scale-pdata
+ * if the vectors were correctly read from the client's device node.
+ * Any error in reading or parsing the device node will return NULL
+ * to the caller.
+ */
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+		struct platform_device *pdev, struct device_node *of_node)
+{
+	struct msm_bus_scale_pdata *pdata = NULL;
+
+	if (!pdev) {
+		pr_err("Error: Null Platform device\n");
+		return NULL;
+	}
+
+	if (!of_node) {
+		pr_err("Error: Null of_node passed to bus driver\n");
+		return NULL;
+	}
+
+	pdata = get_pdata(pdev, of_node);
+	if (!pdata) {
+		pr_err("client has to provide missing entry for successful registration\n");
+		return NULL;
+	}
+
+	return pdata;
+}
+EXPORT_SYMBOL(msm_bus_pdata_from_node);
+
+/**
+ * msm_bus_cl_clear_pdata() - Clear pdata allocated from device-tree
+ * of_node: Device tree node to extract information from
+ */
+void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
+{
+	int i;
+
+	for (i = 0; i < pdata->num_usecases; i++)
+		kfree(pdata->usecase[i].vectors);
+
+	kfree(pdata->usecase);
+	kfree(pdata);
+}
+EXPORT_SYMBOL(msm_bus_cl_clear_pdata);
+
+static int *get_arr(struct platform_device *pdev,
+		const struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	int *arr = NULL;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		pr_debug("Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	if (!size) {
+		*nports = 0;
+		return NULL;
+	}
+
+	arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(arr)) {
+		pr_err("Error: Failed to alloc mem for %s\n", prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		pr_err("Error in reading property: %s\n", prop);
+		goto err;
+	}
+
+	return arr;
+err:
+	devm_kfree(&pdev->dev, arr);
+	return NULL;
+}
+
+static u64 *get_th_params(struct platform_device *pdev,
+		const struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	u64 *ret_arr = NULL;
+	int *arr = NULL;
+	int i;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		pr_debug("Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	if (!size) {
+		*nports = 0;
+		return NULL;
+	}
+
+	ret_arr = devm_kzalloc(&pdev->dev, (*nports * sizeof(u64)),
+							GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(ret_arr)) {
+		pr_err("Error: Failed to alloc mem for ret arr %s\n", prop);
+		return NULL;
+	}
+
+	arr = kzalloc(size, GFP_KERNEL);
+	if ((ZERO_OR_NULL_PTR(arr))) {
+		pr_err("Error: Failed to alloc temp mem for %s\n", prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		pr_err("Error in reading property: %s\n", prop);
+		goto err;
+	}
+
+	for (i = 0; i < *nports; i++)
+		ret_arr[i] = (uint64_t)KBTOB(arr[i]);
+
+	MSM_BUS_DBG("%s: num entries %d prop %s", __func__, *nports, prop);
+
+	for (i = 0; i < *nports; i++)
+		MSM_BUS_DBG("Th %d val %llu", i, ret_arr[i]);
+
+	kfree(arr);
+	return ret_arr;
+err:
+	kfree(arr);
+	devm_kfree(&pdev->dev, ret_arr);
+	return NULL;
+}
+
+static struct msm_bus_node_info *get_nodes(struct device_node *of_node,
+	struct platform_device *pdev,
+	struct msm_bus_fabric_registration *pdata)
+{
+	struct msm_bus_node_info *info;
+	struct device_node *child_node = NULL;
+	int i = 0, ret;
+	int num_bw = 0;
+	u32 temp;
+
+	for_each_child_of_node(of_node, child_node) {
+		i++;
+	}
+
+	pdata->len = i;
+	info = (struct msm_bus_node_info *)
+		devm_kzalloc(&pdev->dev, sizeof(struct msm_bus_node_info) *
+			pdata->len, GFP_KERNEL);
+	if (ZERO_OR_NULL_PTR(info)) {
+		pr_err("Failed to alloc memory for nodes: %d\n", pdata->len);
+		goto err;
+	}
+
+	i = 0;
+	child_node = NULL;
+	for_each_child_of_node(of_node, child_node) {
+		const char *sel_str;
+
+		ret = of_property_read_string(child_node, "label",
+			&info[i].name);
+		if (ret)
+			pr_err("Error reading node label\n");
+
+		ret = of_property_read_u32(child_node, "cell-id", &info[i].id);
+		if (ret) {
+			pr_err("Error reading node id\n");
+			goto err;
+		}
+
+		if (of_property_read_bool(child_node, "qcom,gateway"))
+			info[i].gateway = 1;
+
+		of_property_read_u32(child_node, "qcom,mas-hw-id",
+			&info[i].mas_hw_id);
+
+		of_property_read_u32(child_node, "qcom,slv-hw-id",
+			&info[i].slv_hw_id);
+		info[i].masterp = get_arr(pdev, child_node,
+					"qcom,masterp", &info[i].num_mports);
+		/* No need to store number of qports */
+		info[i].qport = get_arr(pdev, child_node,
+					"qcom,qport", &ret);
+		pdata->nmasters += info[i].num_mports;
+
+
+		info[i].slavep = get_arr(pdev, child_node,
+					"qcom,slavep", &info[i].num_sports);
+		pdata->nslaves += info[i].num_sports;
+
+
+		info[i].tier = get_arr(pdev, child_node,
+					"qcom,tier", &info[i].num_tiers);
+
+		if (of_property_read_bool(child_node, "qcom,ahb"))
+			info[i].ahb = 1;
+
+		ret = of_property_read_string(child_node, "qcom,hw-sel",
+			&sel_str);
+		if (ret)
+			info[i].hw_sel = 0;
+		else {
+			ret =  get_num(hw_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Invalid hw-sel\n");
+				goto err;
+			}
+
+			info[i].hw_sel = ret;
+		}
+
+		of_property_read_u32(child_node, "qcom,buswidth",
+			&info[i].buswidth);
+		of_property_read_u32(child_node, "qcom,ws", &info[i].ws);
+
+		info[i].dual_conf =
+			of_property_read_bool(child_node, "qcom,dual-conf");
+
+
+		info[i].th = get_th_params(pdev, child_node, "qcom,thresh",
+						&info[i].num_thresh);
+
+		info[i].bimc_bw = get_th_params(pdev, child_node,
+						"qcom,bimc,bw", &num_bw);
+
+		if (num_bw != info[i].num_thresh) {
+			pr_err("%s:num_bw %d must equal num_thresh %d",
+				__func__, num_bw, info[i].num_thresh);
+			pr_err("%s:Err setting up dual conf for %s",
+				__func__, info[i].name);
+			goto err;
+		}
+
+		of_property_read_u32(child_node, "qcom,bimc,gp",
+			&info[i].bimc_gp);
+		of_property_read_u32(child_node, "qcom,bimc,thmp",
+			&info[i].bimc_thmp);
+
+		ret = of_property_read_string(child_node, "qcom,mode-thresh",
+			&sel_str);
+		if (ret)
+			info[i].mode_thresh = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Unknown mode :%s\n", sel_str);
+				goto err;
+			}
+
+			info[i].mode_thresh = ret;
+			MSM_BUS_DBG("AXI: THreshold mode set: %d\n",
+					info[i].mode_thresh);
+		}
+
+		ret = of_property_read_string(child_node, "qcom,mode",
+				&sel_str);
+
+		if (ret)
+			info[i].mode = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0) {
+				pr_err("Unknown mode :%s\n", sel_str);
+				goto err;
+			}
+
+			info[i].mode = ret;
+		}
+
+		info[i].nr_lim =
+			of_property_read_bool(child_node, "qcom,nr-lim");
+
+		ret = of_property_read_u32(child_node, "qcom,ff",
+							&info[i].ff);
+		if (ret) {
+			pr_debug("fudge factor not present %d", info[i].id);
+			info[i].ff = 0;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,floor-bw",
+						&temp);
+		if (ret) {
+			pr_debug("fabdev floor bw not present %d", info[i].id);
+			info[i].floor_bw = 0;
+		} else {
+			info[i].floor_bw = KBTOB(temp);
+		}
+
+		info[i].rt_mas =
+			of_property_read_bool(child_node, "qcom,rt-mas");
+
+		ret = of_property_read_string(child_node, "qcom,perm-mode",
+			&sel_str);
+		if (ret)
+			info[i].perm_mode = 0;
+		else {
+			ret = get_num(mode_sel_name, sel_str);
+			if (ret < 0)
+				goto err;
+
+			info[i].perm_mode = 1 << ret;
+		}
+
+		of_property_read_u32(child_node, "qcom,prio-lvl",
+			&info[i].prio_lvl);
+		of_property_read_u32(child_node, "qcom,prio-rd",
+			&info[i].prio_rd);
+		of_property_read_u32(child_node, "qcom,prio-wr",
+			&info[i].prio_wr);
+		of_property_read_u32(child_node, "qcom,prio0", &info[i].prio0);
+		of_property_read_u32(child_node, "qcom,prio1", &info[i].prio1);
+		ret = of_property_read_string(child_node, "qcom,slaveclk-dual",
+			&info[i].slaveclk[DUAL_CTX]);
+		if (!ret)
+			pr_debug("Got slaveclk_dual: %s\n",
+				info[i].slaveclk[DUAL_CTX]);
+		else
+			info[i].slaveclk[DUAL_CTX] = NULL;
+
+		ret = of_property_read_string(child_node,
+			"qcom,slaveclk-active", &info[i].slaveclk[ACTIVE_CTX]);
+		if (!ret)
+			pr_debug("Got slaveclk_active\n");
+		else
+			info[i].slaveclk[ACTIVE_CTX] = NULL;
+
+		ret = of_property_read_string(child_node, "qcom,memclk-dual",
+			&info[i].memclk[DUAL_CTX]);
+		if (!ret)
+			pr_debug("Got memclk_dual\n");
+		else
+			info[i].memclk[DUAL_CTX] = NULL;
+
+		ret = of_property_read_string(child_node, "qcom,memclk-active",
+			&info[i].memclk[ACTIVE_CTX]);
+		if (!ret)
+			pr_debug("Got memclk_active\n");
+		else
+			info[i].memclk[ACTIVE_CTX] = NULL;
+
+		ret = of_property_read_string(child_node, "qcom,iface-clk-node",
+			&info[i].iface_clk_node);
+		if (!ret)
+			pr_debug("Got iface_clk_node\n");
+		else
+			info[i].iface_clk_node = NULL;
+
+		pr_debug("Node name: %s\n", info[i].name);
+		of_node_put(child_node);
+		i++;
+	}
+
+	pr_debug("Bus %d added: %d masters\n", pdata->id, pdata->nmasters);
+	pr_debug("Bus %d added: %d slaves\n", pdata->id, pdata->nslaves);
+	return info;
+err:
+	return NULL;
+}
+
+void msm_bus_of_get_nfab(struct platform_device *pdev,
+		struct msm_bus_fabric_registration *pdata)
+{
+	struct device_node *of_node;
+	int ret, nfab = 0;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return;
+	}
+
+	of_node = pdev->dev.of_node;
+	ret = of_property_read_u32(of_node, "qcom,nfab",
+		&nfab);
+	if (!ret)
+		pr_debug("Fab_of: Read number of buses: %u\n", nfab);
+
+	msm_bus_board_set_nfab(pdata, nfab);
+}
+
+struct msm_bus_fabric_registration
+	*msm_bus_of_get_fab_data(struct platform_device *pdev)
+{
+	struct device_node *of_node;
+	struct msm_bus_fabric_registration *pdata;
+	bool mem_err = false;
+	int ret = 0;
+	const char *sel_str;
+	u32 temp;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+	pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_fabric_registration), GFP_KERNEL);
+	if (!pdata) {
+		mem_err = true;
+		goto err;
+	}
+
+	ret = of_property_read_string(of_node, "label", &pdata->name);
+	if (ret) {
+		pr_err("Error: label not found\n");
+		goto err;
+	}
+	pr_debug("Fab_of: Read name: %s\n", pdata->name);
+
+	ret = of_property_read_u32(of_node, "cell-id",
+		&pdata->id);
+	if (ret) {
+		pr_err("Error: num-usecases not found\n");
+		goto err;
+	}
+	pr_debug("Fab_of: Read id: %u\n", pdata->id);
+
+	if (of_property_read_bool(of_node, "qcom,ahb"))
+		pdata->ahb = 1;
+
+	ret = of_property_read_string(of_node, "qcom,fabclk-dual",
+		&pdata->fabclk[DUAL_CTX]);
+	if (ret) {
+		pr_debug("fabclk_dual not available\n");
+		pdata->fabclk[DUAL_CTX] = NULL;
+	} else
+		pr_debug("Fab_of: Read clk dual ctx: %s\n",
+			pdata->fabclk[DUAL_CTX]);
+	ret = of_property_read_string(of_node, "qcom,fabclk-active",
+		&pdata->fabclk[ACTIVE_CTX]);
+	if (ret) {
+		pr_debug("Error: fabclk_active not available\n");
+		pdata->fabclk[ACTIVE_CTX] = NULL;
+	} else
+		pr_debug("Fab_of: Read clk act ctx: %s\n",
+			pdata->fabclk[ACTIVE_CTX]);
+
+	ret = of_property_read_u32(of_node, "qcom,ntieredslaves",
+		&pdata->ntieredslaves);
+	if (ret) {
+		pr_err("Error: ntieredslaves not found\n");
+		goto err;
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,qos-freq", &pdata->qos_freq);
+	if (ret)
+		pr_debug("qos_freq not available\n");
+
+	ret = of_property_read_string(of_node, "qcom,hw-sel", &sel_str);
+	if (ret) {
+		pr_err("Error: hw_sel not found\n");
+		goto err;
+	} else {
+		ret = get_num(hw_sel_name, sel_str);
+		if (ret < 0)
+			goto err;
+
+		pdata->hw_sel = ret;
+	}
+
+	if (of_property_read_bool(of_node, "qcom,virt"))
+		pdata->virt = true;
+
+	ret = of_property_read_u32(of_node, "qcom,qos-baseoffset",
+						&pdata->qos_baseoffset);
+	if (ret)
+		pr_debug("%s:qos_baseoffset not available\n", __func__);
+
+	ret = of_property_read_u32(of_node, "qcom,qos-delta",
+						&pdata->qos_delta);
+	if (ret)
+		pr_debug("%s:qos_delta not available\n", __func__);
+
+	if (of_property_read_bool(of_node, "qcom,rpm-en"))
+		pdata->rpm_enabled = 1;
+
+	ret = of_property_read_u32(of_node, "qcom,nr-lim-thresh",
+						&temp);
+
+	if (ret) {
+		pr_err("nr-lim threshold not specified");
+		pdata->nr_lim_thresh = 0;
+	} else {
+		pdata->nr_lim_thresh = KBTOB(temp);
+	}
+
+	ret = of_property_read_u32(of_node, "qcom,eff-fact",
+						&pdata->eff_fact);
+	if (ret) {
+		pr_err("Fab eff-factor not present");
+		pdata->eff_fact = 0;
+	}
+
+	pdata->info = get_nodes(of_node, pdev, pdata);
+	return pdata;
+err:
+	return NULL;
+}
+EXPORT_SYMBOL(msm_bus_of_get_fab_data);
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
new file mode 100644
index 0000000..d0c0e51
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_adhoc.c
@@ -0,0 +1,898 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_adhoc.h"
+
+#define DEFAULT_QOS_FREQ	19200
+#define DEFAULT_UTIL_FACT	100
+#define DEFAULT_VRAIL_COMP	100
+#define DEFAULT_AGG_SCHEME	AGG_SCHEME_LEG
+
+static int get_qos_mode(struct platform_device *pdev,
+			struct device_node *node, const char *qos_mode)
+{
+	static char const *qos_names[] = {"fixed", "limiter",
+						"bypass", "regulator"};
+	int i = 0;
+	int ret = -1;
+
+	if (!qos_mode)
+		goto exit_get_qos_mode;
+
+	for (i = 0; i < ARRAY_SIZE(qos_names); i++) {
+		if (!strcmp(qos_mode, qos_names[i]))
+			break;
+	}
+	if (i == ARRAY_SIZE(qos_names))
+		dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass",
+				qos_mode);
+	else
+		ret = i;
+
+exit_get_qos_mode:
+	return ret;
+}
+
+static int *get_arr(struct platform_device *pdev,
+		struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	int *arr = NULL;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		dev_dbg(&pdev->dev, "Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if ((size > 0) && ZERO_OR_NULL_PTR(arr))
+		return NULL;
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		dev_err(&pdev->dev, "Error in reading property: %s\n", prop);
+		goto arr_err;
+	}
+
+	return arr;
+arr_err:
+	devm_kfree(&pdev->dev, arr);
+	return NULL;
+}
+
+static struct msm_bus_fab_device_type *get_fab_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_fab_device_type *fab_dev;
+	unsigned int ret;
+	struct resource *res;
+	const char *base_name;
+
+	fab_dev = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_fab_device_type),
+			GFP_KERNEL);
+	if (!fab_dev)
+		return NULL;
+
+	ret = of_property_read_string(dev_node, "qcom,base-name", &base_name);
+	if (ret) {
+		dev_err(&pdev->dev, "Error: Unable to get base address name\n");
+		goto fab_dev_err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name);
+	if (!res) {
+		dev_err(&pdev->dev, "Error getting qos base addr %s\n",
+								base_name);
+		goto fab_dev_err;
+	}
+	fab_dev->pqos_base = res->start;
+	fab_dev->qos_range = resource_size(res);
+	fab_dev->bypass_qos_prg = of_property_read_bool(dev_node,
+						"qcom,bypass-qos-prg");
+
+	ret = of_property_read_u32(dev_node, "qcom,base-offset",
+			&fab_dev->base_offset);
+	if (ret)
+		dev_dbg(&pdev->dev, "Bus base offset is missing\n");
+
+	ret = of_property_read_u32(dev_node, "qcom,qos-off",
+			&fab_dev->qos_off);
+	if (ret)
+		dev_dbg(&pdev->dev, "Bus qos off is missing\n");
+
+
+	ret = of_property_read_u32(dev_node, "qcom,bus-type",
+						&fab_dev->bus_type);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus type is missing\n");
+		goto fab_dev_err;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,qos-freq",
+						&fab_dev->qos_freq);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Bus qos freq is missing\n");
+		fab_dev->qos_freq = DEFAULT_QOS_FREQ;
+	}
+
+	return fab_dev;
+
+fab_dev_err:
+	devm_kfree(&pdev->dev, fab_dev);
+	fab_dev = 0;
+	return NULL;
+}
+
+static void get_qos_params(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_info_type *node_info)
+{
+	const char *qos_mode = NULL;
+	unsigned int ret;
+	unsigned int temp;
+
+	ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode);
+
+	if (ret)
+		node_info->qos_params.mode = -1;
+	else
+		node_info->qos_params.mode = get_qos_mode(pdev, dev_node,
+								qos_mode);
+
+	of_property_read_u32(dev_node, "qcom,prio-lvl",
+					&node_info->qos_params.prio_lvl);
+
+	of_property_read_u32(dev_node, "qcom,prio1",
+						&node_info->qos_params.prio1);
+
+	of_property_read_u32(dev_node, "qcom,prio0",
+						&node_info->qos_params.prio0);
+
+	of_property_read_u32(dev_node, "qcom,reg-prio1",
+					&node_info->qos_params.reg_prio1);
+
+	of_property_read_u32(dev_node, "qcom,reg-prio0",
+					&node_info->qos_params.reg_prio0);
+
+	of_property_read_u32(dev_node, "qcom,prio-rd",
+					&node_info->qos_params.prio_rd);
+
+	of_property_read_u32(dev_node, "qcom,prio-wr",
+						&node_info->qos_params.prio_wr);
+
+	of_property_read_u32(dev_node, "qcom,gp",
+						&node_info->qos_params.gp);
+
+	of_property_read_u32(dev_node, "qcom,thmp",
+						&node_info->qos_params.thmp);
+
+	of_property_read_u32(dev_node, "qcom,ws",
+						&node_info->qos_params.ws);
+
+	ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp);
+
+	if (ret)
+		node_info->qos_params.bw_buffer = 0;
+	else
+		node_info->qos_params.bw_buffer = KBTOB(temp);
+
+}
+
+static int msm_bus_of_parse_clk_array(struct device_node *dev_node,
+			struct device_node *gdsc_node,
+			struct platform_device *pdev, struct nodeclk **clk_arr,
+			int *num_clks, int id)
+{
+	int ret = 0;
+	int idx = 0;
+	struct property *prop;
+	const char *clk_name;
+	int clks = 0;
+
+	clks = of_property_count_strings(dev_node, "clock-names");
+	if (clks < 0) {
+		dev_err(&pdev->dev, "No qos clks node %d\n", id);
+		ret = clks;
+		goto exit_of_parse_clk_array;
+	}
+
+	*num_clks = clks;
+	*clk_arr = devm_kzalloc(&pdev->dev,
+			(clks * sizeof(struct nodeclk)), GFP_KERNEL);
+
+	if (!(*clk_arr)) {
+		ret = -ENOMEM;
+		*num_clks = 0;
+		goto exit_of_parse_clk_array;
+	}
+
+	of_property_for_each_string(dev_node, "clock-names", prop, clk_name) {
+		char gdsc_string[MAX_REG_NAME];
+
+		(*clk_arr)[idx].clk = of_clk_get_by_name(dev_node, clk_name);
+
+		if (IS_ERR_OR_NULL((*clk_arr)[idx].clk)) {
+			dev_err(&pdev->dev,
+				"Failed to get clk %s for bus%d ", clk_name,
+									id);
+			continue;
+		}
+		if (strnstr(clk_name, "no-rate", strlen(clk_name)))
+			(*clk_arr)[idx].enable_only_clk = true;
+
+		scnprintf(gdsc_string, MAX_REG_NAME, "%s-supply", clk_name);
+
+		if (of_find_property(gdsc_node, gdsc_string, NULL))
+			scnprintf((*clk_arr)[idx].reg_name,
+				MAX_REG_NAME, "%s", clk_name);
+		else
+			scnprintf((*clk_arr)[idx].reg_name,
+					MAX_REG_NAME, "%c", '\0');
+
+		idx++;
+	}
+exit_of_parse_clk_array:
+	return ret;
+}
+
+static void get_agg_params(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_info_type *node_info)
+{
+	int ret;
+
+
+	ret = of_property_read_u32(dev_node, "qcom,buswidth",
+					&node_info->agg_params.buswidth);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Using default 8 bytes %d", node_info->id);
+		node_info->agg_params.buswidth = 8;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,agg-ports",
+				   &node_info->agg_params.num_aggports);
+	if (ret)
+		node_info->agg_params.num_aggports = node_info->num_qports;
+
+	ret = of_property_read_u32(dev_node, "qcom,agg-scheme",
+					&node_info->agg_params.agg_scheme);
+	if (ret) {
+		if (node_info->is_fab_dev)
+			node_info->agg_params.agg_scheme = DEFAULT_AGG_SCHEME;
+		else
+			node_info->agg_params.agg_scheme = AGG_SCHEME_NONE;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,vrail-comp",
+					&node_info->agg_params.vrail_comp);
+	if (ret) {
+		if (node_info->is_fab_dev)
+			node_info->agg_params.vrail_comp = DEFAULT_VRAIL_COMP;
+		else
+			node_info->agg_params.vrail_comp = 0;
+	}
+
+	if (node_info->agg_params.agg_scheme == AGG_SCHEME_1) {
+		uint32_t len = 0;
+		const uint32_t *util_levels;
+		int i, index = 0;
+
+		util_levels =
+			of_get_property(dev_node, "qcom,util-levels", &len);
+		if (!util_levels)
+			goto err_get_agg_params;
+
+		node_info->agg_params.num_util_levels =
+					len / (sizeof(uint32_t) * 2);
+		node_info->agg_params.util_levels = devm_kzalloc(&pdev->dev,
+			(node_info->agg_params.num_util_levels *
+			sizeof(struct node_util_levels_type)), GFP_KERNEL);
+
+		if (IS_ERR_OR_NULL(node_info->agg_params.util_levels))
+			goto err_get_agg_params;
+
+		for (i = 0; i < node_info->agg_params.num_util_levels; i++) {
+			node_info->agg_params.util_levels[i].threshold =
+				KBTOB(be32_to_cpu(util_levels[index++]));
+			node_info->agg_params.util_levels[i].util_fact =
+					be32_to_cpu(util_levels[index++]);
+			dev_dbg(&pdev->dev, "[%d]:Thresh:%llu util_fact:%d\n",
+				i,
+				node_info->agg_params.util_levels[i].threshold,
+				node_info->agg_params.util_levels[i].util_fact);
+		}
+	} else {
+		uint32_t util_fact;
+
+		ret = of_property_read_u32(dev_node, "qcom,util-fact",
+								&util_fact);
+		if (ret) {
+			if (node_info->is_fab_dev)
+				util_fact = DEFAULT_UTIL_FACT;
+			else
+				util_fact = 0;
+		}
+
+		if (util_fact) {
+			node_info->agg_params.num_util_levels = 1;
+			node_info->agg_params.util_levels =
+			devm_kzalloc(&pdev->dev,
+				(node_info->agg_params.num_util_levels *
+				sizeof(struct node_util_levels_type)),
+				GFP_KERNEL);
+			if (IS_ERR_OR_NULL(node_info->agg_params.util_levels))
+				goto err_get_agg_params;
+			node_info->agg_params.util_levels[0].util_fact =
+								util_fact;
+		}
+
+	}
+
+	return;
+err_get_agg_params:
+	node_info->agg_params.num_util_levels = 0;
+	node_info->agg_params.agg_scheme = DEFAULT_AGG_SCHEME;
+}
+
+static struct msm_bus_node_info_type *get_node_info_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev)
+{
+	struct msm_bus_node_info_type *node_info;
+	unsigned int ret;
+	int size;
+	int i;
+	struct device_node *con_node;
+	struct device_node *bus_dev;
+
+	node_info = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_node_info_type),
+			GFP_KERNEL);
+	if (!node_info) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for node_info\n");
+		return NULL;
+	}
+
+	ret = of_property_read_u32(dev_node, "cell-id", &node_info->id);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus node is missing cell-id\n");
+		goto node_info_err;
+	}
+	ret = of_property_read_string(dev_node, "label", &node_info->name);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus node is missing name\n");
+		goto node_info_err;
+	}
+	node_info->qport = get_arr(pdev, dev_node, "qcom,qport",
+			&node_info->num_qports);
+
+	if (of_get_property(dev_node, "qcom,connections", &size)) {
+		node_info->num_connections = size / sizeof(int);
+		node_info->connections = devm_kzalloc(&pdev->dev, size,
+				GFP_KERNEL);
+	} else {
+		node_info->num_connections = 0;
+		node_info->connections = 0;
+	}
+
+	for (i = 0; i < node_info->num_connections; i++) {
+		con_node = of_parse_phandle(dev_node, "qcom,connections", i);
+		if (IS_ERR_OR_NULL(con_node))
+			goto node_info_err;
+
+		if (of_property_read_u32(con_node, "cell-id",
+				&node_info->connections[i]))
+			goto node_info_err;
+		of_node_put(con_node);
+	}
+
+	if (of_get_property(dev_node, "qcom,blacklist", &size)) {
+		node_info->num_blist = size/sizeof(u32);
+		node_info->black_listed_connections = devm_kzalloc(&pdev->dev,
+		size, GFP_KERNEL);
+	} else {
+		node_info->num_blist = 0;
+		node_info->black_listed_connections = 0;
+	}
+
+	for (i = 0; i < node_info->num_blist; i++) {
+		con_node = of_parse_phandle(dev_node, "qcom,blacklist", i);
+		if (IS_ERR_OR_NULL(con_node))
+			goto node_info_err;
+
+		if (of_property_read_u32(con_node, "cell-id",
+				&node_info->black_listed_connections[i]))
+			goto node_info_err;
+		of_node_put(con_node);
+	}
+
+	bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0);
+	if (!IS_ERR_OR_NULL(bus_dev)) {
+		if (of_property_read_u32(bus_dev, "cell-id",
+			&node_info->bus_device_id)) {
+			dev_err(&pdev->dev, "Can't find bus device. Node %d",
+					node_info->id);
+			goto node_info_err;
+		}
+
+		of_node_put(bus_dev);
+	} else {
+		dev_dbg(&pdev->dev, "Can't find bdev phandle for %d",
+					node_info->id);
+	}
+
+	node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
+	node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
+
+
+	ret = of_property_read_u32(dev_node, "qcom,mas-rpm-id",
+						&node_info->mas_rpm_id);
+	if (ret) {
+		dev_dbg(&pdev->dev, "mas rpm id is missing\n");
+		node_info->mas_rpm_id = -1;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,slv-rpm-id",
+						&node_info->slv_rpm_id);
+	if (ret) {
+		dev_dbg(&pdev->dev, "slv rpm id is missing\n");
+		node_info->slv_rpm_id = -1;
+	}
+
+	get_agg_params(dev_node, pdev, node_info);
+	get_qos_params(dev_node, pdev, node_info);
+
+	return node_info;
+
+node_info_err:
+	devm_kfree(&pdev->dev, node_info);
+	node_info = 0;
+	return NULL;
+}
+
+static int get_bus_node_device_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_device_type * const node_device)
+{
+	bool enable_only;
+	bool setrate_only;
+	struct device_node *qos_clk_node;
+
+	node_device->node_info = get_node_info_data(dev_node, pdev);
+	if (IS_ERR_OR_NULL(node_device->node_info)) {
+		dev_err(&pdev->dev, "Error: Node info missing\n");
+		return -ENODATA;
+	}
+	node_device->ap_owned = of_property_read_bool(dev_node,
+							"qcom,ap-owned");
+
+	if (node_device->node_info->is_fab_dev) {
+		dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id);
+
+		if (!node_device->node_info->virt_dev) {
+			node_device->fabdev =
+				get_fab_device_info(dev_node, pdev);
+			if (IS_ERR_OR_NULL(node_device->fabdev)) {
+				dev_err(&pdev->dev,
+					"Error: Fabric device info missing\n");
+				devm_kfree(&pdev->dev, node_device->node_info);
+				return -ENODATA;
+			}
+		}
+
+		enable_only = of_property_read_bool(dev_node,
+							"qcom,enable-only-clk");
+		node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+		node_device->clk[ACTIVE_CTX].enable_only_clk = enable_only;
+
+		/*
+		 * Doesn't make sense to have a clk handle you can't enable or
+		 * set rate on.
+		 */
+		if (!enable_only) {
+			setrate_only = of_property_read_bool(dev_node,
+						"qcom,setrate-only-clk");
+			node_device->clk[DUAL_CTX].setrate_only_clk =
+								setrate_only;
+			node_device->clk[ACTIVE_CTX].setrate_only_clk =
+								setrate_only;
+		}
+
+		node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+							"bus_clk");
+
+		if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk)) {
+			int ret;
+
+			dev_err(&pdev->dev,
+				"%s:Failed to get bus clk for bus%d ctx%d",
+				__func__, node_device->node_info->id,
+								DUAL_CTX);
+			ret = (IS_ERR(node_device->clk[DUAL_CTX].clk) ?
+			PTR_ERR(node_device->clk[DUAL_CTX].clk) : -ENXIO);
+			return ret;
+		}
+
+		if (of_find_property(dev_node, "bus-gdsc-supply", NULL))
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%s", "bus-gdsc");
+		else
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+		node_device->clk[ACTIVE_CTX].clk = of_clk_get_by_name(dev_node,
+							"bus_a_clk");
+		if (IS_ERR_OR_NULL(node_device->clk[ACTIVE_CTX].clk)) {
+			int ret;
+
+			dev_err(&pdev->dev,
+				"Failed to get bus clk for bus%d ctx%d",
+				 node_device->node_info->id, ACTIVE_CTX);
+			ret = (IS_ERR(node_device->clk[DUAL_CTX].clk) ?
+			PTR_ERR(node_device->clk[DUAL_CTX].clk) : -ENXIO);
+			return ret;
+		}
+
+		if (of_find_property(dev_node, "bus-a-gdsc-supply", NULL))
+			scnprintf(node_device->clk[ACTIVE_CTX].reg_name,
+				MAX_REG_NAME, "%s", "bus-a-gdsc");
+		else
+			scnprintf(node_device->clk[ACTIVE_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+		node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
+							"bus_qos_clk");
+
+		if (IS_ERR_OR_NULL(node_device->bus_qos_clk.clk)) {
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus qos clk for %d",
+				__func__, node_device->node_info->id);
+			scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%c", '\0');
+		} else {
+			if (of_find_property(dev_node, "bus-qos-gdsc-supply",
+								NULL))
+				scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%s", "bus-qos-gdsc");
+			else
+				scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%c", '\0');
+		}
+
+		qos_clk_node = of_get_child_by_name(dev_node,
+						"qcom,node-qos-clks");
+
+		if (qos_clk_node) {
+			if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+						pdev,
+						&node_device->node_qos_clks,
+						&node_device->num_node_qos_clks,
+						node_device->node_info->id)) {
+				dev_info(&pdev->dev, "Bypass QoS programming");
+				node_device->fabdev->bypass_qos_prg = true;
+			}
+			of_node_put(qos_clk_node);
+		}
+
+		if (msmbus_coresight_init_adhoc(pdev, dev_node))
+			dev_warn(&pdev->dev,
+				 "Coresight support absent for bus: %d\n",
+				  node_device->node_info->id);
+	} else {
+		node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
+							"bus_qos_clk");
+
+		if (IS_ERR_OR_NULL(node_device->bus_qos_clk.clk))
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus qos clk for mas%d",
+				__func__, node_device->node_info->id);
+
+		if (of_find_property(dev_node, "bus-qos-gdsc-supply",
+									NULL))
+			scnprintf(node_device->bus_qos_clk.reg_name,
+				MAX_REG_NAME, "%s", "bus-qos-gdsc");
+		else
+			scnprintf(node_device->bus_qos_clk.reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+		enable_only = of_property_read_bool(dev_node,
+							"qcom,enable-only-clk");
+		node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+		node_device->bus_qos_clk.enable_only_clk = enable_only;
+
+		/*
+		 * Doesn't make sense to have a clk handle you can't enable or
+		 * set rate on.
+		 */
+		if (!enable_only) {
+			setrate_only = of_property_read_bool(dev_node,
+						"qcom,setrate-only-clk");
+			node_device->clk[DUAL_CTX].setrate_only_clk =
+								setrate_only;
+			node_device->clk[ACTIVE_CTX].setrate_only_clk =
+								setrate_only;
+		}
+
+		qos_clk_node = of_get_child_by_name(dev_node,
+						"qcom,node-qos-clks");
+
+		if (qos_clk_node) {
+			if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+						pdev,
+						&node_device->node_qos_clks,
+						&node_device->num_node_qos_clks,
+						node_device->node_info->id)) {
+				dev_info(&pdev->dev, "Bypass QoS programming");
+				node_device->fabdev->bypass_qos_prg = true;
+			}
+			of_node_put(qos_clk_node);
+		}
+
+		node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+							"node_clk");
+
+		if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus clk for bus%d ctx%d",
+				__func__, node_device->node_info->id,
+								DUAL_CTX);
+
+		if (of_find_property(dev_node, "node-gdsc-supply", NULL))
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%s", "node-gdsc");
+		else
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+	}
+	return 0;
+}
+
+struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev)
+{
+	struct device_node *of_node, *child_node;
+	struct msm_bus_device_node_registration *pdata;
+	unsigned int i = 0, j;
+	unsigned int ret;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_device_node_registration),
+			GFP_KERNEL);
+	if (!pdata)
+		return NULL;
+
+	pdata->num_devices = of_get_child_count(of_node);
+
+	pdata->info = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_node_device_type) *
+			pdata->num_devices, GFP_KERNEL);
+
+	if (!pdata->info)
+		goto node_reg_err;
+
+	ret = 0;
+	for_each_child_of_node(of_node, child_node) {
+		ret = get_bus_node_device_data(child_node, pdev,
+				&pdata->info[i]);
+		if (ret) {
+			dev_err(&pdev->dev, "Error: unable to initialize bus nodes\n");
+			goto node_reg_err_1;
+		}
+		pdata->info[i].of_node = child_node;
+		i++;
+	}
+
+	dev_dbg(&pdev->dev, "bus topology:\n");
+	for (i = 0; i < pdata->num_devices; i++) {
+		dev_dbg(&pdev->dev, "id %d\nnum_qports %d\nnum_connections %d",
+				pdata->info[i].node_info->id,
+				pdata->info[i].node_info->num_qports,
+				pdata->info[i].node_info->num_connections);
+		dev_dbg(&pdev->dev, "\nbus_device_id %d\n buswidth %d\n",
+				pdata->info[i].node_info->bus_device_id,
+				pdata->info[i].node_info->agg_params.buswidth);
+		for (j = 0; j < pdata->info[i].node_info->num_connections;
+									j++) {
+			dev_dbg(&pdev->dev, "connection[%d]: %d\n", j,
+				pdata->info[i].node_info->connections[j]);
+		}
+		for (j = 0; j < pdata->info[i].node_info->num_blist;
+									 j++) {
+			dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j,
+				pdata->info[i].node_info->
+				black_listed_connections[j]);
+		}
+		if (pdata->info[i].fabdev)
+			dev_dbg(&pdev->dev, "base_addr %zu\nbus_type %d\n",
+					(size_t)pdata->info[i].
+						fabdev->pqos_base,
+					pdata->info[i].fabdev->bus_type);
+	}
+	return pdata;
+
+node_reg_err_1:
+	devm_kfree(&pdev->dev, pdata->info);
+node_reg_err:
+	devm_kfree(&pdev->dev, pdata);
+	pdata = NULL;
+	return NULL;
+}
+
+static int msm_bus_of_get_ids(struct platform_device *pdev,
+			struct device_node *dev_node, int **dev_ids,
+			int *num_ids, char *prop_name)
+{
+	int ret = 0;
+	int size, i;
+	struct device_node *rule_node;
+	int *ids = NULL;
+
+	if (of_get_property(dev_node, prop_name, &size)) {
+		*num_ids = size / sizeof(int);
+		ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	} else {
+		dev_err(&pdev->dev, "No rule nodes, skipping node");
+		ret = -ENXIO;
+		goto exit_get_ids;
+	}
+
+	*dev_ids = ids;
+	for (i = 0; i < *num_ids; i++) {
+		rule_node = of_parse_phandle(dev_node, prop_name, i);
+		if (IS_ERR_OR_NULL(rule_node)) {
+			dev_err(&pdev->dev, "Can't get rule node id");
+			ret = -ENXIO;
+			goto err_get_ids;
+		}
+
+		if (of_property_read_u32(rule_node, "cell-id",
+				&ids[i])) {
+			dev_err(&pdev->dev, "Can't get rule node id");
+			ret = -ENXIO;
+			goto err_get_ids;
+		}
+		of_node_put(rule_node);
+	}
+exit_get_ids:
+	return ret;
+err_get_ids:
+	devm_kfree(&pdev->dev, ids);
+	of_node_put(rule_node);
+	ids = NULL;
+	return ret;
+}
+
+int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rules)
+{
+	int ret = 0;
+	struct device_node *of_node, *child_node;
+	int num_rules = 0;
+	int rule_idx = 0;
+	int bw_fld = 0;
+	int i;
+	struct bus_rule_type *local_rule = NULL;
+
+	of_node = pdev->dev.of_node;
+	num_rules = of_get_child_count(of_node);
+	local_rule = devm_kzalloc(&pdev->dev,
+				sizeof(struct bus_rule_type) * num_rules,
+				GFP_KERNEL);
+
+	if (IS_ERR_OR_NULL(local_rule)) {
+		ret = -ENOMEM;
+		goto exit_static_rules;
+	}
+
+	*static_rules = local_rule;
+	for_each_child_of_node(of_node, child_node) {
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].src_id,
+			&local_rule[rule_idx].num_src,
+			"qcom,src-nodes");
+
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].dst_node,
+			&local_rule[rule_idx].num_dst,
+			"qcom,dest-node");
+
+		ret = of_property_read_u32(child_node, "qcom,src-field",
+				&local_rule[rule_idx].src_field);
+		if (ret) {
+			dev_err(&pdev->dev, "src-field missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,src-op",
+				&local_rule[rule_idx].op);
+		if (ret) {
+			dev_err(&pdev->dev, "src-op missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,mode",
+				&local_rule[rule_idx].mode);
+		if (ret) {
+			dev_err(&pdev->dev, "mode missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld);
+		if (ret) {
+			dev_err(&pdev->dev, "thresh missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		} else
+			local_rule[rule_idx].thresh = KBTOB(bw_fld);
+
+		ret = of_property_read_u32(child_node, "qcom,dest-bw",
+								&bw_fld);
+		if (ret)
+			local_rule[rule_idx].dst_bw = 0;
+		else
+			local_rule[rule_idx].dst_bw = KBTOB(bw_fld);
+
+		rule_idx++;
+	}
+	ret = rule_idx;
+exit_static_rules:
+	return ret;
+err_static_rules:
+	for (i = 0; i < num_rules; i++) {
+		if (!IS_ERR_OR_NULL(local_rule)) {
+			if (!IS_ERR_OR_NULL(local_rule[i].src_id))
+				devm_kfree(&pdev->dev,
+						local_rule[i].src_id);
+			if (!IS_ERR_OR_NULL(local_rule[i].dst_node))
+				devm_kfree(&pdev->dev,
+						local_rule[i].dst_node);
+			devm_kfree(&pdev->dev, local_rule);
+		}
+	}
+	*static_rules = NULL;
+	return ret;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
new file mode 100644
index 0000000..4048595
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_of_rpmh.c
@@ -0,0 +1,802 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+#include "msm_bus_rpmh.h"
+
+#define DEFAULT_QOS_FREQ	19200
+#define DEFAULT_UTIL_FACT	100
+#define DEFAULT_VRAIL_COMP	100
+#define DEFAULT_AGG_SCHEME	AGG_SCHEME_LEG
+
+static int get_qos_mode(struct platform_device *pdev,
+			struct device_node *node, const char *qos_mode)
+{
+	static char const *qos_names[] = {"fixed", "limiter",
+						"bypass", "regulator"};
+	int i = 0;
+	int ret = -1;
+
+	if (!qos_mode)
+		goto exit_get_qos_mode;
+
+	for (i = 0; i < ARRAY_SIZE(qos_names); i++) {
+		if (!strcmp(qos_mode, qos_names[i]))
+			break;
+	}
+	if (i == ARRAY_SIZE(qos_names))
+		dev_err(&pdev->dev, "Cannot match mode qos %s using Bypass",
+				qos_mode);
+	else
+		ret = i;
+
+exit_get_qos_mode:
+	return ret;
+}
+
+static int *get_arr(struct platform_device *pdev,
+		struct device_node *node, const char *prop,
+		int *nports)
+{
+	int size = 0, ret;
+	int *arr = NULL;
+
+	if (of_get_property(node, prop, &size)) {
+		*nports = size / sizeof(int);
+	} else {
+		dev_dbg(&pdev->dev, "Property %s not available\n", prop);
+		*nports = 0;
+		return NULL;
+	}
+
+	arr = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	if ((size > 0) && ZERO_OR_NULL_PTR(arr)) {
+		dev_err(&pdev->dev, "Error: Failed to alloc mem for %s\n",
+				prop);
+		return NULL;
+	}
+
+	ret = of_property_read_u32_array(node, prop, (u32 *)arr, *nports);
+	if (ret) {
+		dev_err(&pdev->dev, "Error in reading property: %s\n", prop);
+		goto arr_err;
+	}
+
+	return arr;
+arr_err:
+	devm_kfree(&pdev->dev, arr);
+	return NULL;
+}
+
+static struct msm_bus_bcm_device_type *get_bcm_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_bcm_device_type *bcm_dev;
+
+	bcm_dev = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_bcm_device_type),
+			GFP_KERNEL);
+	if (!bcm_dev) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for bcm_dev\n");
+		goto bcm_dev_err;
+	}
+
+	return bcm_dev;
+
+bcm_dev_err:
+	devm_kfree(&pdev->dev, bcm_dev);
+	bcm_dev = 0;
+	return NULL;
+}
+
+static struct msm_bus_fab_device_type *get_fab_device_info(
+		struct device_node *dev_node,
+		struct platform_device *pdev)
+{
+	struct msm_bus_fab_device_type *fab_dev;
+	unsigned int ret;
+	struct resource *res;
+	const char *base_name;
+
+	fab_dev = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_fab_device_type),
+			GFP_KERNEL);
+	if (!fab_dev) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for fab_dev\n");
+		return NULL;
+	}
+
+	ret = of_property_read_string(dev_node, "qcom,base-name", &base_name);
+	if (ret) {
+		dev_err(&pdev->dev, "Error: Unable to get base address name\n");
+		goto fab_dev_err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, base_name);
+	if (!res) {
+		dev_err(&pdev->dev, "Error getting qos base addr %s\n",
+								base_name);
+		goto fab_dev_err;
+	}
+	fab_dev->pqos_base = res->start;
+	fab_dev->qos_range = resource_size(res);
+	fab_dev->bypass_qos_prg = of_property_read_bool(dev_node,
+						"qcom,bypass-qos-prg");
+
+	ret = of_property_read_u32(dev_node, "qcom,base-offset",
+			&fab_dev->base_offset);
+	if (ret)
+		dev_dbg(&pdev->dev, "Bus base offset is missing\n");
+
+	ret = of_property_read_u32(dev_node, "qcom,qos-off",
+			&fab_dev->qos_off);
+	if (ret)
+		dev_dbg(&pdev->dev, "Bus qos off is missing\n");
+
+
+	ret = of_property_read_u32(dev_node, "qcom,bus-type",
+						&fab_dev->bus_type);
+	if (ret)
+		dev_warn(&pdev->dev, "Bus type is missing\n");
+
+	ret = of_property_read_u32(dev_node, "qcom,qos-freq",
+						&fab_dev->qos_freq);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Bus qos freq is missing\n");
+		fab_dev->qos_freq = DEFAULT_QOS_FREQ;
+	}
+
+
+	return fab_dev;
+
+fab_dev_err:
+	devm_kfree(&pdev->dev, fab_dev);
+	fab_dev = 0;
+	return NULL;
+}
+
+static void get_qos_params(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_info_type *node_info)
+{
+	const char *qos_mode = NULL;
+	unsigned int ret;
+	unsigned int temp;
+
+	ret = of_property_read_string(dev_node, "qcom,qos-mode", &qos_mode);
+
+	if (ret)
+		node_info->qos_params.mode = -1;
+	else
+		node_info->qos_params.mode = get_qos_mode(pdev, dev_node,
+								qos_mode);
+
+	of_property_read_u32(dev_node, "qcom,prio-lvl",
+					&node_info->qos_params.prio_lvl);
+
+	of_property_read_u32(dev_node, "qcom,prio1",
+						&node_info->qos_params.prio1);
+
+	of_property_read_u32(dev_node, "qcom,prio0",
+						&node_info->qos_params.prio0);
+
+	of_property_read_u32(dev_node, "qcom,reg-prio1",
+					&node_info->qos_params.reg_prio1);
+
+	of_property_read_u32(dev_node, "qcom,reg-prio0",
+					&node_info->qos_params.reg_prio0);
+
+	of_property_read_u32(dev_node, "qcom,prio-rd",
+					&node_info->qos_params.prio_rd);
+
+	of_property_read_u32(dev_node, "qcom,prio-wr",
+						&node_info->qos_params.prio_wr);
+
+	of_property_read_u32(dev_node, "qcom,gp",
+						&node_info->qos_params.gp);
+
+	of_property_read_u32(dev_node, "qcom,thmp",
+						&node_info->qos_params.thmp);
+
+	of_property_read_u32(dev_node, "qcom,ws",
+						&node_info->qos_params.ws);
+
+	ret = of_property_read_u32(dev_node, "qcom,bw_buffer", &temp);
+
+	if (ret)
+		node_info->qos_params.bw_buffer = 0;
+	else
+		node_info->qos_params.bw_buffer = KBTOB(temp);
+
+}
+
+static int msm_bus_of_parse_clk_array(struct device_node *dev_node,
+			struct device_node *gdsc_node,
+			struct platform_device *pdev, struct nodeclk **clk_arr,
+			int *num_clks, int id)
+{
+	int ret = 0;
+	int idx = 0;
+	struct property *prop;
+	const char *clk_name;
+	int clks = 0;
+
+	clks = of_property_count_strings(dev_node, "clock-names");
+	if (clks < 0) {
+		dev_err(&pdev->dev, "No qos clks node %d\n", id);
+		ret = clks;
+		goto exit_of_parse_clk_array;
+	}
+
+	*num_clks = clks;
+	*clk_arr = devm_kzalloc(&pdev->dev,
+			(clks * sizeof(struct nodeclk)), GFP_KERNEL);
+
+	if (!(*clk_arr)) {
+		dev_err(&pdev->dev, "Error allocating clk nodes for %d\n", id);
+		ret = -ENOMEM;
+		*num_clks = 0;
+		goto exit_of_parse_clk_array;
+	}
+
+	of_property_for_each_string(dev_node, "clock-names", prop, clk_name) {
+		char gdsc_string[MAX_REG_NAME];
+
+		(*clk_arr)[idx].clk = of_clk_get_by_name(dev_node, clk_name);
+
+		if (IS_ERR_OR_NULL((*clk_arr)[idx].clk)) {
+			dev_err(&pdev->dev,
+				"Failed to get clk %s for bus%d ", clk_name,
+									id);
+			continue;
+		}
+		if (strnstr(clk_name, "no-rate", strlen(clk_name)))
+			(*clk_arr)[idx].enable_only_clk = true;
+
+		scnprintf(gdsc_string, MAX_REG_NAME, "%s-supply", clk_name);
+
+		if (of_find_property(gdsc_node, gdsc_string, NULL))
+			scnprintf((*clk_arr)[idx].reg_name,
+				MAX_REG_NAME, "%s", clk_name);
+		else
+			scnprintf((*clk_arr)[idx].reg_name,
+					MAX_REG_NAME, "%c", '\0');
+
+		idx++;
+	}
+exit_of_parse_clk_array:
+	return ret;
+}
+
+static void get_agg_params(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_info_type *node_info)
+{
+	int ret;
+
+
+	ret = of_property_read_u32(dev_node, "qcom,buswidth",
+					&node_info->agg_params.buswidth);
+	if (ret) {
+		dev_dbg(&pdev->dev, "Using default 8 bytes %d", node_info->id);
+		node_info->agg_params.buswidth = 8;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,agg-ports",
+				   &node_info->agg_params.num_aggports);
+	if (ret)
+		node_info->agg_params.num_aggports = node_info->num_qports;
+
+	ret = of_property_read_u32(dev_node, "qcom,agg-scheme",
+					&node_info->agg_params.agg_scheme);
+	if (ret) {
+		if (node_info->is_fab_dev)
+			node_info->agg_params.agg_scheme = DEFAULT_AGG_SCHEME;
+		else
+			node_info->agg_params.agg_scheme = AGG_SCHEME_NONE;
+	}
+
+	ret = of_property_read_u32(dev_node, "qcom,vrail-comp",
+					&node_info->agg_params.vrail_comp);
+	if (ret) {
+		if (node_info->is_fab_dev)
+			node_info->agg_params.vrail_comp = DEFAULT_VRAIL_COMP;
+		else
+			node_info->agg_params.vrail_comp = 0;
+	}
+}
+
+
+
+static struct msm_bus_node_info_type *get_node_info_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev)
+{
+	struct msm_bus_node_info_type *node_info;
+	unsigned int ret;
+	int size;
+	int i;
+	struct device_node *con_node;
+	struct device_node *bus_dev;
+	struct device_node *bcm_dev;
+
+	node_info = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_node_info_type),
+			GFP_KERNEL);
+	if (!node_info) {
+		dev_err(&pdev->dev,
+			"Error: Unable to allocate memory for node_info\n");
+		return NULL;
+	}
+
+	ret = of_property_read_u32(dev_node, "cell-id", &node_info->id);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus node is missing cell-id\n");
+		goto node_info_err;
+	}
+	ret = of_property_read_string(dev_node, "label", &node_info->name);
+	if (ret) {
+		dev_warn(&pdev->dev, "Bus node is missing name\n");
+		goto node_info_err;
+	}
+	node_info->qport = get_arr(pdev, dev_node, "qcom,qport",
+			&node_info->num_qports);
+
+	if (of_get_property(dev_node, "qcom,connections", &size)) {
+		node_info->num_connections = size / sizeof(int);
+		node_info->connections = devm_kzalloc(&pdev->dev, size,
+				GFP_KERNEL);
+	} else {
+		node_info->num_connections = 0;
+		node_info->connections = 0;
+	}
+
+	for (i = 0; i < node_info->num_connections; i++) {
+		con_node = of_parse_phandle(dev_node, "qcom,connections", i);
+		if (IS_ERR_OR_NULL(con_node))
+			goto node_info_err;
+
+		if (of_property_read_u32(con_node, "cell-id",
+				&node_info->connections[i]))
+			goto node_info_err;
+		of_node_put(con_node);
+	}
+
+	if (of_get_property(dev_node, "qcom,blacklist", &size)) {
+		node_info->num_blist = size/sizeof(u32);
+		node_info->black_listed_connections = devm_kzalloc(&pdev->dev,
+		size, GFP_KERNEL);
+	} else {
+		node_info->num_blist = 0;
+		node_info->black_listed_connections = 0;
+	}
+
+	for (i = 0; i < node_info->num_blist; i++) {
+		con_node = of_parse_phandle(dev_node, "qcom,blacklist", i);
+		if (IS_ERR_OR_NULL(con_node))
+			goto node_info_err;
+
+		if (of_property_read_u32(con_node, "cell-id",
+				&node_info->black_listed_connections[i]))
+			goto node_info_err;
+		of_node_put(con_node);
+	}
+
+	bus_dev = of_parse_phandle(dev_node, "qcom,bus-dev", 0);
+	if (!IS_ERR_OR_NULL(bus_dev)) {
+		if (of_property_read_u32(bus_dev, "cell-id",
+			&node_info->bus_device_id)) {
+			dev_err(&pdev->dev, "Can't find bus device. Node %d",
+					node_info->id);
+			goto node_info_err;
+		}
+
+		of_node_put(bus_dev);
+	} else
+		dev_dbg(&pdev->dev, "Can't find bdev phandle for %d",
+					node_info->id);
+
+	if (of_get_property(dev_node, "qcom,bcms", &size)) {
+		node_info->num_bcm_devs = size / sizeof(int);
+		node_info->bcm_dev_ids = devm_kzalloc(&pdev->dev, size,
+				GFP_KERNEL);
+	} else {
+		node_info->num_bcm_devs = 0;
+		node_info->bcm_devs = 0;
+	}
+
+	for (i = 0; i < node_info->num_bcm_devs; i++) {
+		bcm_dev = of_parse_phandle(dev_node, "qcom,bcms", i);
+		if (IS_ERR_OR_NULL(bcm_dev))
+			goto node_info_err;
+
+		if (of_property_read_u32(bcm_dev, "cell-id",
+				&node_info->bcm_dev_ids[i])){
+			dev_err(&pdev->dev, "Can't find bcm device. Node %d",
+					node_info->id);
+			goto node_info_err;
+		}
+		dev_err(&pdev->dev, "found bcm device. Node %d BCM:%d\n",
+				node_info->id, node_info->bcm_dev_ids[0]);
+
+		of_node_put(bcm_dev);
+	}
+
+	node_info->is_bcm_dev = of_property_read_bool(dev_node, "qcom,bcm-dev");
+	node_info->is_fab_dev = of_property_read_bool(dev_node, "qcom,fab-dev");
+	node_info->virt_dev = of_property_read_bool(dev_node, "qcom,virt-dev");
+
+	get_agg_params(dev_node, pdev, node_info);
+	get_qos_params(dev_node, pdev, node_info);
+
+	return node_info;
+
+node_info_err:
+	devm_kfree(&pdev->dev, node_info);
+	node_info = 0;
+	return NULL;
+}
+
+static int get_bus_node_device_data(
+		struct device_node * const dev_node,
+		struct platform_device * const pdev,
+		struct msm_bus_node_device_type * const node_device)
+{
+	bool enable_only;
+	bool setrate_only;
+
+	node_device->node_info = get_node_info_data(dev_node, pdev);
+	if (IS_ERR_OR_NULL(node_device->node_info)) {
+		dev_err(&pdev->dev, "Error: Node info missing\n");
+		return -ENODATA;
+	}
+	node_device->ap_owned = of_property_read_bool(dev_node,
+							"qcom,ap-owned");
+
+	if (node_device->node_info->is_bcm_dev) {
+
+		node_device->bcmdev = get_bcm_device_info(dev_node, pdev);
+
+		if (IS_ERR_OR_NULL(node_device->bcmdev)) {
+			dev_err(&pdev->dev,
+				"Error: BCM device info missing\n");
+			devm_kfree(&pdev->dev, node_device->node_info);
+			return -ENODATA;
+		}
+	}
+
+	if (node_device->node_info->is_fab_dev) {
+		struct device_node *qos_clk_node;
+
+		dev_dbg(&pdev->dev, "Dev %d\n", node_device->node_info->id);
+
+		if (!node_device->node_info->virt_dev) {
+			node_device->fabdev =
+				get_fab_device_info(dev_node, pdev);
+			if (IS_ERR_OR_NULL(node_device->fabdev)) {
+				dev_err(&pdev->dev,
+					"Error: Fabric device info missing\n");
+				devm_kfree(&pdev->dev, node_device->node_info);
+				return -ENODATA;
+			}
+		}
+
+		node_device->bus_qos_clk.clk = of_clk_get_by_name(dev_node,
+							"bus_qos_clk");
+
+		if (IS_ERR_OR_NULL(node_device->bus_qos_clk.clk)) {
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus qos clk for %d",
+				__func__, node_device->node_info->id);
+			scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%c", '\0');
+		} else {
+			if (of_find_property(dev_node, "bus-qos-gdsc-supply",
+								NULL))
+				scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%s", "bus-qos-gdsc");
+			else
+				scnprintf(node_device->bus_qos_clk.reg_name,
+					MAX_REG_NAME, "%c", '\0');
+		}
+
+		qos_clk_node = of_get_child_by_name(dev_node,
+						"qcom,node-qos-clks");
+
+		if (qos_clk_node) {
+			if (msm_bus_of_parse_clk_array(qos_clk_node, dev_node,
+						pdev,
+						&node_device->node_qos_clks,
+						&node_device->num_node_qos_clks,
+						node_device->node_info->id)) {
+				dev_info(&pdev->dev, "Bypass QoS programming");
+				node_device->fabdev->bypass_qos_prg = true;
+			}
+			of_node_put(qos_clk_node);
+		}
+	} else {
+		enable_only = of_property_read_bool(dev_node,
+							"qcom,enable-only-clk");
+		node_device->clk[DUAL_CTX].enable_only_clk = enable_only;
+
+		/*
+		 * Doesn't make sense to have a clk handle you can't enable or
+		 * set rate on.
+		 */
+		if (!enable_only) {
+			setrate_only = of_property_read_bool(dev_node,
+						"qcom,setrate-only-clk");
+			node_device->clk[DUAL_CTX].setrate_only_clk =
+								setrate_only;
+			node_device->clk[ACTIVE_CTX].setrate_only_clk =
+								setrate_only;
+		}
+
+		node_device->clk[DUAL_CTX].clk = of_clk_get_by_name(dev_node,
+							"node_clk");
+
+		if (IS_ERR_OR_NULL(node_device->clk[DUAL_CTX].clk))
+			dev_dbg(&pdev->dev,
+				"%s:Failed to get bus clk for bus%d ctx%d",
+				__func__, node_device->node_info->id,
+								DUAL_CTX);
+
+		if (of_find_property(dev_node, "node-gdsc-supply", NULL))
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%s", "node-gdsc");
+		else
+			scnprintf(node_device->clk[DUAL_CTX].reg_name,
+				MAX_REG_NAME, "%c", '\0');
+
+	}
+	return 0;
+}
+
+struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev)
+{
+	struct device_node *of_node, *child_node;
+	struct msm_bus_device_node_registration *pdata;
+	unsigned int i = 0, j;
+	unsigned int ret;
+
+	if (!pdev) {
+		pr_err("Error: Null platform device\n");
+		return NULL;
+	}
+
+	of_node = pdev->dev.of_node;
+
+	pdata = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_device_node_registration),
+			GFP_KERNEL);
+	if (!pdata) {
+		dev_err(&pdev->dev,
+				"Error: Memory allocation for pdata failed\n");
+		return NULL;
+	}
+
+	pdata->num_devices = of_get_child_count(of_node);
+
+	pdata->info = devm_kzalloc(&pdev->dev,
+			sizeof(struct msm_bus_node_device_type) *
+			pdata->num_devices, GFP_KERNEL);
+
+	if (!pdata->info) {
+		dev_err(&pdev->dev,
+			"Error: Memory allocation for pdata->info failed\n");
+		goto node_reg_err;
+	}
+
+	ret = 0;
+	for_each_child_of_node(of_node, child_node) {
+		ret = get_bus_node_device_data(child_node, pdev,
+				&pdata->info[i]);
+		if (ret) {
+			dev_err(&pdev->dev, "Error: unable to initialize bus nodes\n");
+			goto node_reg_err_1;
+		}
+		pdata->info[i].of_node = child_node;
+		i++;
+	}
+
+	dev_dbg(&pdev->dev, "bus topology:\n");
+	for (i = 0; i < pdata->num_devices; i++) {
+		dev_dbg(&pdev->dev, "id %d\nnum_qports %d\nnum_connections %d",
+				pdata->info[i].node_info->id,
+				pdata->info[i].node_info->num_qports,
+				pdata->info[i].node_info->num_connections);
+		dev_dbg(&pdev->dev, "\nbus_device_id %d\n buswidth %d\n",
+				pdata->info[i].node_info->bus_device_id,
+				pdata->info[i].node_info->agg_params.buswidth);
+		for (j = 0; j < pdata->info[i].node_info->num_connections;
+									j++) {
+			dev_dbg(&pdev->dev, "connection[%d]: %d\n", j,
+				pdata->info[i].node_info->connections[j]);
+		}
+		for (j = 0; j < pdata->info[i].node_info->num_blist;
+									 j++) {
+			dev_dbg(&pdev->dev, "black_listed_node[%d]: %d\n", j,
+				pdata->info[i].node_info->
+				black_listed_connections[j]);
+		}
+		if (pdata->info[i].fabdev)
+			dev_dbg(&pdev->dev, "base_addr %zu\nbus_type %d\n",
+					(size_t)pdata->info[i].
+						fabdev->pqos_base,
+					pdata->info[i].fabdev->bus_type);
+	}
+	return pdata;
+
+node_reg_err_1:
+	devm_kfree(&pdev->dev, pdata->info);
+node_reg_err:
+	devm_kfree(&pdev->dev, pdata);
+	pdata = NULL;
+	return NULL;
+}
+
+static int msm_bus_of_get_ids(struct platform_device *pdev,
+			struct device_node *dev_node, int **dev_ids,
+			int *num_ids, char *prop_name)
+{
+	int ret = 0;
+	int size, i;
+	struct device_node *rule_node;
+	int *ids = NULL;
+
+	if (of_get_property(dev_node, prop_name, &size)) {
+		*num_ids = size / sizeof(int);
+		ids = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
+	} else {
+		dev_err(&pdev->dev, "No rule nodes, skipping node");
+		ret = -ENXIO;
+		goto exit_get_ids;
+	}
+
+	*dev_ids = ids;
+	for (i = 0; i < *num_ids; i++) {
+		rule_node = of_parse_phandle(dev_node, prop_name, i);
+		if (IS_ERR_OR_NULL(rule_node)) {
+			dev_err(&pdev->dev, "Can't get rule node id");
+			ret = -ENXIO;
+			goto err_get_ids;
+		}
+
+		if (of_property_read_u32(rule_node, "cell-id",
+				&ids[i])) {
+			dev_err(&pdev->dev, "Can't get rule node id");
+			ret = -ENXIO;
+			goto err_get_ids;
+		}
+		of_node_put(rule_node);
+	}
+exit_get_ids:
+	return ret;
+err_get_ids:
+	devm_kfree(&pdev->dev, ids);
+	of_node_put(rule_node);
+	ids = NULL;
+	return ret;
+}
+
+int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rules)
+{
+	int ret = 0;
+	struct device_node *of_node, *child_node;
+	int num_rules = 0;
+	int rule_idx = 0;
+	int bw_fld = 0;
+	int i;
+	struct bus_rule_type *local_rule = NULL;
+
+	of_node = pdev->dev.of_node;
+	num_rules = of_get_child_count(of_node);
+	local_rule = devm_kzalloc(&pdev->dev,
+				sizeof(struct bus_rule_type) * num_rules,
+				GFP_KERNEL);
+
+	if (IS_ERR_OR_NULL(local_rule)) {
+		ret = -ENOMEM;
+		goto exit_static_rules;
+	}
+
+	*static_rules = local_rule;
+	for_each_child_of_node(of_node, child_node) {
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].src_id,
+			&local_rule[rule_idx].num_src,
+			"qcom,src-nodes");
+
+		ret = msm_bus_of_get_ids(pdev, child_node,
+			&local_rule[rule_idx].dst_node,
+			&local_rule[rule_idx].num_dst,
+			"qcom,dest-node");
+
+		ret = of_property_read_u32(child_node, "qcom,src-field",
+				&local_rule[rule_idx].src_field);
+		if (ret) {
+			dev_err(&pdev->dev, "src-field missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,src-op",
+				&local_rule[rule_idx].op);
+		if (ret) {
+			dev_err(&pdev->dev, "src-op missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,mode",
+				&local_rule[rule_idx].mode);
+		if (ret) {
+			dev_err(&pdev->dev, "mode missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		}
+
+		ret = of_property_read_u32(child_node, "qcom,thresh", &bw_fld);
+		if (ret) {
+			dev_err(&pdev->dev, "thresh missing");
+			ret = -ENXIO;
+			goto err_static_rules;
+		} else
+			local_rule[rule_idx].thresh = KBTOB(bw_fld);
+
+		ret = of_property_read_u32(child_node, "qcom,dest-bw",
+								&bw_fld);
+		if (ret)
+			local_rule[rule_idx].dst_bw = 0;
+		else
+			local_rule[rule_idx].dst_bw = KBTOB(bw_fld);
+
+		rule_idx++;
+	}
+	ret = rule_idx;
+exit_static_rules:
+	return ret;
+err_static_rules:
+	for (i = 0; i < num_rules; i++) {
+		if (!IS_ERR_OR_NULL(local_rule)) {
+			if (!IS_ERR_OR_NULL(local_rule[i].src_id))
+				devm_kfree(&pdev->dev,
+						local_rule[i].src_id);
+			if (!IS_ERR_OR_NULL(local_rule[i].dst_node))
+				devm_kfree(&pdev->dev,
+						local_rule[i].dst_node);
+			devm_kfree(&pdev->dev, local_rule);
+		}
+	}
+	*static_rules = NULL;
+	return ret;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
new file mode 100644
index 0000000..63fc336
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpm_smd.c
@@ -0,0 +1,240 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "AXI: %s(): " fmt, __func__
+
+#include "msm_bus_core.h"
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/rpm-smd.h>
+
+/* Stubs for backward compatibility */
+void msm_bus_rpm_set_mt_mask(void)
+{
+}
+
+bool msm_bus_rpm_is_mem_interleaved(void)
+{
+	return true;
+}
+
+struct commit_data {
+	struct msm_bus_node_hw_info *mas_arb;
+	struct msm_bus_node_hw_info *slv_arb;
+};
+
+#ifdef CONFIG_DEBUG_FS
+void msm_bus_rpm_fill_cdata_buffer(int *curr, char *buf, const int max_size,
+	void *cdata, int nmasters, int nslaves, int ntslaves)
+{
+	int c;
+	struct commit_data *cd = (struct commit_data *)cdata;
+
+	*curr += scnprintf(buf + *curr, max_size - *curr, "\nMas BW:\n");
+	for (c = 0; c < nmasters; c++)
+		*curr += scnprintf(buf + *curr, max_size - *curr,
+			"%d: %llu\t", cd->mas_arb[c].hw_id,
+			cd->mas_arb[c].bw);
+	*curr += scnprintf(buf + *curr, max_size - *curr, "\nSlave BW:\n");
+	for (c = 0; c < nslaves; c++) {
+		*curr += scnprintf(buf + *curr, max_size - *curr,
+		"%d: %llu\t", cd->slv_arb[c].hw_id,
+		cd->slv_arb[c].bw);
+	}
+}
+#endif
+
+static int msm_bus_rpm_compare_cdata(
+	struct msm_bus_fabric_registration *fab_pdata,
+	struct commit_data *cd1, struct commit_data *cd2)
+{
+	size_t n;
+	int ret;
+
+	n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nmasters * 2;
+	ret = memcmp(cd1->mas_arb, cd2->mas_arb, n);
+	if (ret) {
+		MSM_BUS_DBG("Master Arb Data not equal\n");
+		return ret;
+	}
+
+	n = sizeof(struct msm_bus_node_hw_info) * fab_pdata->nslaves * 2;
+	ret = memcmp(cd1->slv_arb, cd2->slv_arb, n);
+	if (ret) {
+		MSM_BUS_DBG("Master Arb Data not equal\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int msm_bus_rpm_req(int ctx, uint32_t rsc_type, uint32_t key,
+	struct msm_bus_node_hw_info *hw_info, bool valid)
+{
+	struct msm_rpm_request *rpm_req;
+	int ret = 0, msg_id;
+
+	if (ctx == ACTIVE_CTX)
+		ctx = MSM_RPM_CTX_ACTIVE_SET;
+	else if (ctx == DUAL_CTX)
+		ctx = MSM_RPM_CTX_SLEEP_SET;
+
+	rpm_req = msm_rpm_create_request(ctx, rsc_type, hw_info->hw_id, 1);
+	if (rpm_req == NULL) {
+		MSM_BUS_WARN("RPM: Couldn't create RPM Request\n");
+		return -ENXIO;
+	}
+
+	if (valid) {
+		ret = msm_rpm_add_kvp_data(rpm_req, key, (const uint8_t *)
+			&hw_info->bw, (int)(sizeof(uint64_t)));
+		if (ret) {
+			MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+				rsc_type);
+			goto free_rpm_request;
+		}
+
+		MSM_BUS_DBG("Added Key: %d, Val: %llu, size: %zu\n", key,
+			hw_info->bw, sizeof(uint64_t));
+	} else {
+		/* Invalidate RPM requests */
+		ret = msm_rpm_add_kvp_data(rpm_req, 0, NULL, 0);
+		if (ret) {
+			MSM_BUS_WARN("RPM: Add KVP failed for RPM Req:%u\n",
+				rsc_type);
+			goto free_rpm_request;
+		}
+	}
+
+	msg_id = msm_rpm_send_request(rpm_req);
+	if (!msg_id) {
+		MSM_BUS_WARN("RPM: No message ID for req\n");
+		ret = -ENXIO;
+		goto free_rpm_request;
+	}
+
+	ret = msm_rpm_wait_for_ack(msg_id);
+	if (ret) {
+		MSM_BUS_WARN("RPM: Ack failed\n");
+		goto free_rpm_request;
+	}
+
+free_rpm_request:
+	msm_rpm_free_request(rpm_req);
+
+	return ret;
+}
+
+static int msm_bus_rpm_commit_arb(struct msm_bus_fabric_registration
+	*fab_pdata, int ctx, void *rpm_data,
+	struct commit_data *cd, bool valid)
+{
+	int i, status = 0, rsc_type, key;
+
+	MSM_BUS_DBG("Context: %d\n", ctx);
+	rsc_type = RPM_BUS_MASTER_REQ;
+	key = RPM_MASTER_FIELD_BW;
+	for (i = 0; i < fab_pdata->nmasters; i++) {
+		if (!cd->mas_arb[i].dirty)
+			continue;
+
+		MSM_BUS_DBG("MAS HWID: %d, BW: %llu DIRTY: %d\n",
+			cd->mas_arb[i].hw_id,
+			cd->mas_arb[i].bw,
+			cd->mas_arb[i].dirty);
+		status = msm_bus_rpm_req(ctx, rsc_type, key,
+			&cd->mas_arb[i], valid);
+		if (status) {
+			MSM_BUS_ERR("RPM: Req fail: mas:%d, bw:%llu\n",
+				cd->mas_arb[i].hw_id,
+				cd->mas_arb[i].bw);
+			break;
+		cd->mas_arb[i].dirty = false;
+	}
+
+	rsc_type = RPM_BUS_SLAVE_REQ;
+	key = RPM_SLAVE_FIELD_BW;
+	for (i = 0; i < fab_pdata->nslaves; i++) {
+		if (!cd->slv_arb[i].dirty)
+			continue;
+
+		MSM_BUS_DBG("SLV HWID: %d, BW: %llu DIRTY: %d\n",
+			cd->slv_arb[i].hw_id,
+			cd->slv_arb[i].bw,
+			cd->slv_arb[i].dirty);
+		status = msm_bus_rpm_req(ctx, rsc_type, key,
+			&cd->slv_arb[i], valid);
+		if (status) {
+			MSM_BUS_ERR("RPM: Req fail: slv:%d, bw:%llu\n",
+				cd->slv_arb[i].hw_id,
+				cd->slv_arb[i].bw);
+			break;
+		cd->slv_arb[i].dirty = false;
+		}
+
+	return status;
+}
+
+/**
+* msm_bus_remote_hw_commit() - Commit the arbitration data to RPM
+* @fabric: Fabric for which the data should be committed
+**/
+int msm_bus_remote_hw_commit(struct msm_bus_fabric_registration
+	*fab_pdata, void *hw_data, void **cdata)
+{
+
+	int ret;
+	bool valid;
+	struct commit_data *dual_cd, *act_cd;
+	void *rpm_data = hw_data;
+
+	MSM_BUS_DBG("\nReached RPM Commit\n");
+	dual_cd = (struct commit_data *)cdata[DUAL_CTX];
+	act_cd = (struct commit_data *)cdata[ACTIVE_CTX];
+
+	/*
+	 * If the arb data for active set and sleep set is
+	 * different, commit both sets.
+	 * If the arb data for active set and sleep set is
+	 * the same, invalidate the sleep set.
+	 */
+	ret = msm_bus_rpm_compare_cdata(fab_pdata, act_cd, dual_cd);
+	if (!ret)
+		/* Invalidate sleep set.*/
+		valid = false;
+	else
+		valid = true;
+
+	ret = msm_bus_rpm_commit_arb(fab_pdata, DUAL_CTX, rpm_data,
+		dual_cd, valid);
+	if (ret)
+		MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+			fab_pdata->id, DUAL_CTX);
+
+	valid = true;
+	ret = msm_bus_rpm_commit_arb(fab_pdata, ACTIVE_CTX, rpm_data, act_cd,
+		valid);
+	if (ret)
+		MSM_BUS_ERR("Error comiting fabric:%d in %d ctx\n",
+			fab_pdata->id, ACTIVE_CTX);
+
+	return ret;
+}
+
+int msm_bus_rpm_hw_init(struct msm_bus_fabric_registration *pdata,
+	struct msm_bus_hw_algorithm *hw_algo)
+{
+	if (!pdata->ahb)
+		pdata->rpm_enabled = 1;
+	return 0;
+}
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
new file mode 100644
index 0000000..a7f265c
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rpmh.h
@@ -0,0 +1,194 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+#define _ARCH_ARM_MACH_MSM_BUS_ADHOC_H
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/msm_bus_rules.h>
+#include "msm_bus_core.h"
+
+struct msm_bus_node_device_type;
+
+struct link_node {
+	uint64_t lnode_ib[NUM_CTX];
+	uint64_t lnode_ab[NUM_CTX];
+	int next;
+	struct device *next_dev;
+	struct list_head link;
+	uint32_t in_use;
+	const char *cl_name;
+	unsigned int bus_dev_id;
+};
+
+/* New types introduced for adhoc topology */
+struct msm_bus_noc_ops {
+	int (*qos_init)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*set_bw)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq);
+	int (*limit_mport)(struct msm_bus_node_device_type *dev,
+			void __iomem *qos_base, uint32_t qos_off,
+			uint32_t qos_delta, uint32_t qos_freq, int enable_lim,
+			uint64_t lim_bw);
+	bool (*update_bw_reg)(int mode);
+};
+
+struct nodebw {
+	uint64_t sum_ab;
+	uint64_t last_sum_ab;
+	uint64_t last_max_ib;
+	uint64_t max_ib;
+	uint64_t max_ab;
+	uint64_t cur_clk_hz;
+	uint32_t util_used;
+	uint32_t vrail_used;
+};
+
+struct msm_bus_bcm_device_type {
+	uint32_t width;
+	uint32_t clk_domain;
+	uint32_t type;
+	uint32_t unit_size;
+	uint32_t addr;
+	int num_bus_devs;
+};
+
+struct msm_bus_fab_device_type {
+	void __iomem *qos_base;
+	phys_addr_t pqos_base;
+	size_t qos_range;
+	uint32_t base_offset;
+	uint32_t qos_freq;
+	uint32_t qos_off;
+	struct msm_bus_noc_ops noc_ops;
+	enum msm_bus_hw_sel bus_type;
+	bool bypass_qos_prg;
+};
+
+struct qos_params_type {
+	int mode;
+	unsigned int prio_lvl;
+	unsigned int prio_rd;
+	unsigned int prio_wr;
+	unsigned int prio1;
+	unsigned int prio0;
+	unsigned int reg_prio1;
+	unsigned int reg_prio0;
+	unsigned int gp;
+	unsigned int thmp;
+	unsigned int ws;
+	u64 bw_buffer;
+};
+
+struct node_util_levels_type {
+	uint64_t threshold;
+	uint32_t util_fact;
+};
+
+struct node_agg_params_type {
+	uint32_t agg_scheme;
+	uint32_t num_aggports;
+	unsigned int buswidth;
+	uint32_t vrail_comp;
+	uint32_t num_util_levels;
+	struct node_util_levels_type *util_levels;
+};
+
+struct msm_bus_node_info_type {
+	const char *name;
+	unsigned int id;
+	int mas_rpm_id;
+	int slv_rpm_id;
+	int num_ports;
+	int num_qports;
+	int *qport;
+	struct qos_params_type qos_params;
+	unsigned int num_connections;
+	unsigned int num_blist;
+	unsigned int num_bcm_devs;
+	bool is_fab_dev;
+	bool virt_dev;
+	bool is_bcm_dev;
+	bool is_traversed;
+	unsigned int *connections;
+	unsigned int *black_listed_connections;
+	unsigned int *bcm_dev_ids;
+	struct device **dev_connections;
+	struct device **black_connections;
+	struct device **bcm_devs;
+	int bcm_req_idx;
+	unsigned int bus_device_id;
+	struct device *bus_device;
+	struct rule_update_path_info rule;
+	uint64_t lim_bw;
+	bool defer_qos;
+	struct node_agg_params_type agg_params;
+};
+
+struct msm_bus_node_device_type {
+	struct msm_bus_node_info_type *node_info;
+	struct msm_bus_fab_device_type *fabdev;
+	struct msm_bus_bcm_device_type *bcmdev;
+	int num_lnodes;
+	struct link_node *lnode_list;
+	struct nodebw node_bw[NUM_CTX];
+	struct list_head link;
+	struct nodeclk clk[NUM_CTX];
+	struct nodeclk bus_qos_clk;
+	uint32_t num_node_qos_clks;
+	struct nodeclk *node_qos_clks;
+	unsigned int ap_owned;
+	struct device_node *of_node;
+	struct device dev;
+	bool dirty;
+	struct list_head dev_link;
+	struct list_head devlist;
+};
+
+static inline struct msm_bus_node_device_type *to_msm_bus_node(struct device *d)
+{
+	return container_of(d, struct msm_bus_node_device_type, dev);
+}
+
+
+int msm_bus_enable_limiter(struct msm_bus_node_device_type *nodedev,
+				int throttle_en, uint64_t lim_bw);
+int msm_bus_commit_data(struct list_head *clist);
+int bcm_remove_handoff_req(struct device *dev, void *data);
+void *msm_bus_realloc_devmem(struct device *dev, void *p, size_t old_size,
+					size_t new_size, gfp_t flags);
+
+extern struct msm_bus_device_node_registration
+	*msm_bus_of_to_pdata(struct platform_device *pdev);
+extern void msm_bus_arb_setops_adhoc(struct msm_bus_arb_ops *arb_ops);
+extern int msm_bus_bimc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_noc_set_ops(struct msm_bus_node_device_type *bus_dev);
+extern int msm_bus_of_get_static_rules(struct platform_device *pdev,
+					struct bus_rule_type **static_rule);
+extern int msm_rules_update_path(struct list_head *input_list,
+				struct list_head *output_list);
+extern void print_all_rules(void);
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_init(struct device *dev);
+#else
+static inline int msm_bus_floor_init(struct device *dev)
+{
+	return 0;
+}
+#endif /* CONFIG_DBG_BUS_VOTER */
+#endif /* _ARCH_ARM_MACH_MSM_BUS_ADHOC_H */
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_rules.c b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
new file mode 100644
index 0000000..5b5159d
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_bus_rules.c
@@ -0,0 +1,726 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/list_sort.h>
+#include <linux/msm-bus-board.h>
+#include <linux/msm_bus_rules.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/msm-bus.h>
+#include <trace/events/trace_msm_bus.h>
+
+struct node_vote_info {
+	int id;
+	u64 ib;
+	u64 ab;
+	u64 clk;
+};
+
+struct rules_def {
+	int rule_id;
+	int num_src;
+	int state;
+	struct node_vote_info *src_info;
+	struct bus_rule_type rule_ops;
+	bool state_change;
+	struct list_head link;
+};
+
+struct rule_node_info {
+	int id;
+	void *data;
+	struct raw_notifier_head rule_notify_list;
+	struct rules_def *cur_rule;
+	int num_rules;
+	struct list_head node_rules;
+	struct list_head link;
+	struct rule_apply_rcm_info apply;
+};
+
+DEFINE_MUTEX(msm_bus_rules_lock);
+static LIST_HEAD(node_list);
+static struct rule_node_info *get_node(u32 id, void *data);
+static int node_rules_compare(void *priv, struct list_head *a,
+					struct list_head *b);
+
+#define LE(op1, op2)	(op1 <= op2)
+#define LT(op1, op2)	(op1 < op2)
+#define GE(op1, op2)	(op1 >= op2)
+#define GT(op1, op2)	(op1 > op2)
+#define NB_ID		(0x201)
+
+static struct rule_node_info *get_node(u32 id, void *data)
+{
+	struct rule_node_info *node_it = NULL;
+	struct rule_node_info *node_match = NULL;
+
+	list_for_each_entry(node_it, &node_list, link) {
+		if (node_it->id == id) {
+			if (id == NB_ID) {
+				if (node_it->data == data) {
+					node_match = node_it;
+					break;
+				}
+			} else {
+				node_match = node_it;
+				break;
+			}
+		}
+	}
+	return node_match;
+}
+
+static struct rule_node_info *gen_node(u32 id, void *data)
+{
+	struct rule_node_info *node_it = NULL;
+	struct rule_node_info *node_match = NULL;
+
+	list_for_each_entry(node_it, &node_list, link) {
+		if (node_it->id == id) {
+			node_match = node_it;
+			break;
+		}
+	}
+
+	if (!node_match) {
+		node_match = kzalloc(sizeof(struct rule_node_info), GFP_KERNEL);
+			goto exit_node_match;
+
+		node_match->id = id;
+		node_match->cur_rule = NULL;
+		node_match->num_rules = 0;
+		node_match->data = data;
+		list_add_tail(&node_match->link, &node_list);
+		INIT_LIST_HEAD(&node_match->node_rules);
+		RAW_INIT_NOTIFIER_HEAD(&node_match->rule_notify_list);
+		pr_debug("Added new node %d to list\n", id);
+	}
+exit_node_match:
+	return node_match;
+}
+
+static bool do_compare_op(u64 op1, u64 op2, int op)
+{
+	bool ret = false;
+
+	switch (op) {
+	case OP_LE:
+		ret = LE(op1, op2);
+		break;
+	case OP_LT:
+		ret = LT(op1, op2);
+		break;
+	case OP_GT:
+		ret = GT(op1, op2);
+		break;
+	case OP_GE:
+		ret = GE(op1, op2);
+		break;
+	case OP_NOOP:
+		ret = true;
+		break;
+	default:
+		pr_info("Invalid OP %d", op);
+		break;
+	}
+	return ret;
+}
+
+static void update_src_id_vote(struct rule_update_path_info *inp_node,
+				struct rule_node_info *rule_node)
+{
+	struct rules_def *rule;
+	int i;
+
+	list_for_each_entry(rule, &rule_node->node_rules, link) {
+		for (i = 0; i < rule->num_src; i++) {
+			if (rule->src_info[i].id == inp_node->id) {
+				rule->src_info[i].ib = inp_node->ib;
+				rule->src_info[i].ab = inp_node->ab;
+				rule->src_info[i].clk = inp_node->clk;
+			}
+		}
+	}
+}
+
+static u64 get_field(struct rules_def *rule, int src_id)
+{
+	u64 field = 0;
+	int i;
+
+	for (i = 0; i < rule->num_src; i++) {
+		switch (rule->rule_ops.src_field) {
+		case FLD_IB:
+			field += rule->src_info[i].ib;
+			break;
+		case FLD_AB:
+			field += rule->src_info[i].ab;
+			break;
+		case FLD_CLK:
+			field += rule->src_info[i].clk;
+			break;
+		}
+	}
+
+	return field;
+}
+
+static bool check_rule(struct rules_def *rule,
+			struct rule_update_path_info *inp)
+{
+	bool ret = false;
+
+	if (!rule)
+		return ret;
+
+	switch (rule->rule_ops.op) {
+	case OP_LE:
+	case OP_LT:
+	case OP_GT:
+	case OP_GE:
+	{
+		u64 src_field = get_field(rule, inp->id);
+
+		ret = do_compare_op(src_field, rule->rule_ops.thresh,
+							rule->rule_ops.op);
+		break;
+	}
+	default:
+		pr_err("Unsupported op %d", rule->rule_ops.op);
+		break;
+	}
+	return ret;
+}
+
+static void match_rule(struct rule_update_path_info *inp_node,
+			struct rule_node_info *node)
+{
+	struct rules_def *rule;
+	int i;
+
+	list_for_each_entry(rule, &node->node_rules, link) {
+		for (i = 0; i < rule->num_src; i++) {
+			if (rule->src_info[i].id != inp_node->id)
+				continue;
+
+			if (check_rule(rule, inp_node)) {
+				trace_bus_rules_matches(
+				(node->cur_rule ?
+					node->cur_rule->rule_id : -1),
+				inp_node->id, inp_node->ab,
+				inp_node->ib, inp_node->clk);
+				if (rule->state ==
+					RULE_STATE_NOT_APPLIED)
+					rule->state_change = true;
+				rule->state = RULE_STATE_APPLIED;
+			} else {
+				if (rule->state ==
+					RULE_STATE_APPLIED)
+					rule->state_change = true;
+				rule->state = RULE_STATE_NOT_APPLIED;
+			}
+		}
+	}
+}
+
+static void apply_rule(struct rule_node_info *node,
+			struct list_head *output_list)
+{
+	struct rules_def *rule;
+	struct rules_def *last_rule;
+
+	last_rule = node->cur_rule;
+	node->cur_rule = NULL;
+	list_for_each_entry(rule, &node->node_rules, link) {
+		if ((rule->state == RULE_STATE_APPLIED) &&
+						!node->cur_rule)
+			node->cur_rule = rule;
+
+		if (node->id == NB_ID) {
+			if (rule->state_change) {
+				rule->state_change = false;
+				raw_notifier_call_chain(&node->rule_notify_list,
+					rule->state, (void *)&rule->rule_ops);
+			}
+		} else {
+			if ((rule->state == RULE_STATE_APPLIED) &&
+			     (node->cur_rule &&
+				(node->cur_rule->rule_id == rule->rule_id))) {
+				node->apply.id = rule->rule_ops.dst_node[0];
+				node->apply.throttle = rule->rule_ops.mode;
+				node->apply.lim_bw = rule->rule_ops.dst_bw;
+				node->apply.after_clk_commit = false;
+				if (last_rule != node->cur_rule)
+					list_add_tail(&node->apply.link,
+								output_list);
+				if (last_rule) {
+					if (node_rules_compare(NULL,
+						&last_rule->link,
+						&node->cur_rule->link) == -1)
+						node->apply.after_clk_commit =
+									true;
+				}
+			}
+			rule->state_change = false;
+		}
+	}
+
+}
+
+int msm_rules_update_path(struct list_head *input_list,
+			struct list_head *output_list)
+{
+	int ret = 0;
+	struct rule_update_path_info  *inp_node;
+	struct rule_node_info *node_it = NULL;
+
+	mutex_lock(&msm_bus_rules_lock);
+	list_for_each_entry(inp_node, input_list, link) {
+		list_for_each_entry(node_it, &node_list, link) {
+			update_src_id_vote(inp_node, node_it);
+			match_rule(inp_node, node_it);
+		}
+	}
+
+	list_for_each_entry(node_it, &node_list, link)
+		apply_rule(node_it, output_list);
+	mutex_unlock(&msm_bus_rules_lock);
+	return ret;
+}
+
+static bool ops_equal(int op1, int op2)
+{
+	bool ret = false;
+
+	switch (op1) {
+	case OP_GT:
+	case OP_GE:
+	case OP_LT:
+	case OP_LE:
+		if (abs(op1 - op2) <= 1)
+			ret = true;
+		break;
+	default:
+		ret = (op1 == op2);
+	}
+
+	return ret;
+}
+
+static bool is_throttle_rule(int mode)
+{
+	bool ret = true;
+
+	if (mode == THROTTLE_OFF)
+		ret = false;
+
+	return ret;
+}
+
+static int node_rules_compare(void *priv, struct list_head *a,
+					struct list_head *b)
+{
+	struct rules_def *ra = container_of(a, struct rules_def, link);
+	struct rules_def *rb = container_of(b, struct rules_def, link);
+	int ret = -1;
+	int64_t th_diff = 0;
+
+
+	if (ra->rule_ops.mode == rb->rule_ops.mode) {
+		if (ops_equal(ra->rule_ops.op, rb->rule_ops.op)) {
+			if ((ra->rule_ops.op == OP_LT) ||
+				(ra->rule_ops.op == OP_LE)) {
+				th_diff = ra->rule_ops.thresh -
+						rb->rule_ops.thresh;
+				if (th_diff > 0)
+					ret = 1;
+				else
+					ret = -1;
+			} else if ((ra->rule_ops.op == OP_GT) ||
+					(ra->rule_ops.op == OP_GE)) {
+				th_diff = rb->rule_ops.thresh -
+							ra->rule_ops.thresh;
+				if (th_diff > 0)
+					ret = 1;
+				else
+					ret = -1;
+			}
+		} else {
+			ret = ra->rule_ops.op - rb->rule_ops.op;
+		}
+	} else if (is_throttle_rule(ra->rule_ops.mode) &&
+				is_throttle_rule(rb->rule_ops.mode)) {
+		if (ra->rule_ops.mode == THROTTLE_ON)
+			ret = -1;
+		else
+			ret = 1;
+	} else if ((ra->rule_ops.mode == THROTTLE_OFF) &&
+		is_throttle_rule(rb->rule_ops.mode)) {
+		ret = 1;
+	} else if (is_throttle_rule(ra->rule_ops.mode) &&
+		(rb->rule_ops.mode == THROTTLE_OFF)) {
+		ret = -1;
+	}
+
+	return ret;
+}
+
+static void print_rules(struct rule_node_info *node_it)
+{
+	struct rules_def *node_rule = NULL;
+	int i;
+
+	if (!node_it) {
+		pr_err("%s: no node for found", __func__);
+		return;
+	}
+
+	pr_info("\n Now printing rules for Node %d  cur rule %d\n",
+			node_it->id,
+			(node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
+	list_for_each_entry(node_rule, &node_it->node_rules, link) {
+		pr_info("\n num Rules %d  rule Id %d\n",
+				node_it->num_rules, node_rule->rule_id);
+		pr_info("Rule: src_field %d\n", node_rule->rule_ops.src_field);
+		for (i = 0; i < node_rule->rule_ops.num_src; i++)
+			pr_info("Rule: src %d\n",
+					node_rule->rule_ops.src_id[i]);
+		for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+			pr_info("Rule: dst %d dst_bw %llu\n",
+						node_rule->rule_ops.dst_node[i],
+						node_rule->rule_ops.dst_bw);
+		pr_info("Rule: thresh %llu op %d mode %d State %d\n",
+					node_rule->rule_ops.thresh,
+					node_rule->rule_ops.op,
+					node_rule->rule_ops.mode,
+					node_rule->state);
+	}
+}
+
+void print_all_rules(void)
+{
+	struct rule_node_info *node_it = NULL;
+
+	list_for_each_entry(node_it, &node_list, link)
+		print_rules(node_it);
+}
+
+void print_rules_buf(char *buf, int max_buf)
+{
+	struct rule_node_info *node_it = NULL;
+	struct rules_def *node_rule = NULL;
+	int i;
+	int cnt = 0;
+
+	list_for_each_entry(node_it, &node_list, link) {
+		cnt += scnprintf(buf + cnt, max_buf - cnt,
+			"\n Now printing rules for Node %d cur_rule %d\n",
+			node_it->id,
+			(node_it->cur_rule ? node_it->cur_rule->rule_id : -1));
+		list_for_each_entry(node_rule, &node_it->node_rules, link) {
+			cnt += scnprintf(buf + cnt, max_buf - cnt,
+				"\nNum Rules:%d ruleId %d STATE:%d change:%d\n",
+				node_it->num_rules, node_rule->rule_id,
+				node_rule->state, node_rule->state_change);
+			cnt += scnprintf(buf + cnt, max_buf - cnt,
+				"Src_field %d\n",
+				node_rule->rule_ops.src_field);
+			for (i = 0; i < node_rule->rule_ops.num_src; i++)
+				cnt += scnprintf(buf + cnt, max_buf - cnt,
+					"Src %d Cur Ib %llu Ab %llu\n",
+					node_rule->rule_ops.src_id[i],
+					node_rule->src_info[i].ib,
+					node_rule->src_info[i].ab);
+			for (i = 0; i < node_rule->rule_ops.num_dst; i++)
+				cnt += scnprintf(buf + cnt, max_buf - cnt,
+					"Dst %d dst_bw %llu\n",
+					node_rule->rule_ops.dst_node[0],
+					node_rule->rule_ops.dst_bw);
+			cnt += scnprintf(buf + cnt, max_buf - cnt,
+					"Thresh %llu op %d mode %d\n",
+					node_rule->rule_ops.thresh,
+					node_rule->rule_ops.op,
+					node_rule->rule_ops.mode);
+		}
+	}
+}
+
+static int copy_rule(struct bus_rule_type *src, struct rules_def *node_rule,
+			struct notifier_block *nb)
+{
+	int i;
+	int ret = 0;
+
+	memcpy(&node_rule->rule_ops, src,
+				sizeof(struct bus_rule_type));
+	node_rule->rule_ops.src_id = kzalloc(
+			(sizeof(int) * node_rule->rule_ops.num_src),
+							GFP_KERNEL);
+	if (!node_rule->rule_ops.src_id) {
+		pr_err("%s:Failed to allocate for src_id",
+					__func__);
+		return -ENOMEM;
+	}
+	memcpy(node_rule->rule_ops.src_id, src->src_id,
+				sizeof(int) * src->num_src);
+
+
+	if (!nb) {
+		node_rule->rule_ops.dst_node = kzalloc(
+			(sizeof(int) * node_rule->rule_ops.num_dst),
+						GFP_KERNEL);
+		if (!node_rule->rule_ops.dst_node)
+			return -ENOMEM;
+		memcpy(node_rule->rule_ops.dst_node, src->dst_node,
+						sizeof(int) * src->num_dst);
+	}
+
+	node_rule->num_src = src->num_src;
+	node_rule->src_info = kzalloc(
+		(sizeof(struct node_vote_info) * node_rule->rule_ops.num_src),
+							GFP_KERNEL);
+	if (!node_rule->src_info) {
+		pr_err("%s:Failed to allocate for src_id",
+						__func__);
+		return -ENOMEM;
+	}
+	for (i = 0; i < src->num_src; i++)
+		node_rule->src_info[i].id = src->src_id[i];
+
+	return ret;
+}
+
+static bool __rule_register(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	struct rule_node_info *node = NULL;
+	int i, j;
+	struct rules_def *node_rule = NULL;
+	int num_dst = 0;
+	bool reg_success = true;
+
+	if (num_rules <= 0)
+		return false;
+
+	for (i = 0; i < num_rules; i++) {
+		if (nb)
+			num_dst = 1;
+		else
+			num_dst = rule[i].num_dst;
+
+		for (j = 0; j < num_dst; j++) {
+			int id = 0;
+
+			if (nb)
+				id = NB_ID;
+			else
+				id = rule[i].dst_node[j];
+
+			node = gen_node(id, nb);
+			if (!node) {
+				pr_info("Error getting rule");
+				reg_success = false;
+				goto exit_rule_register;
+			}
+			node_rule = kzalloc(sizeof(struct rules_def),
+						GFP_KERNEL);
+			if (!node_rule) {
+				reg_success = false;
+				goto exit_rule_register;
+			}
+
+			if (copy_rule(&rule[i], node_rule, nb)) {
+				pr_err("Error copying rule");
+				reg_success = false;
+				goto exit_rule_register;
+			}
+
+			node_rule->rule_id = node->num_rules++;
+			if (nb)
+				node->data = nb;
+
+			list_add_tail(&node_rule->link, &node->node_rules);
+		}
+	}
+	list_sort(NULL, &node->node_rules, node_rules_compare);
+	if (nb && nb != node->rule_notify_list.head)
+		raw_notifier_chain_register(&node->rule_notify_list, nb);
+exit_rule_register:
+	return reg_success;
+}
+
+static int comp_rules(struct bus_rule_type *rulea, struct bus_rule_type *ruleb)
+{
+	int ret = 1;
+
+	if (rulea->num_src == ruleb->num_src)
+		ret = memcmp(rulea->src_id, ruleb->src_id,
+				(sizeof(int) * rulea->num_src));
+	if (!ret && (rulea->num_dst == ruleb->num_dst))
+		ret = memcmp(rulea->dst_node, ruleb->dst_node,
+				(sizeof(int) * rulea->num_dst));
+	if (ret || (rulea->dst_bw != ruleb->dst_bw) ||
+		(rulea->op != ruleb->op) || (rulea->thresh != ruleb->thresh))
+		ret = 1;
+	return ret;
+}
+
+void msm_rule_register(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	if (!rule || num_rules <= 0)
+		return;
+
+	mutex_lock(&msm_bus_rules_lock);
+	__rule_register(num_rules, rule, nb);
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+static bool __rule_unregister(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	int i;
+	struct rule_node_info *node = NULL;
+	struct rule_node_info *node_tmp = NULL;
+	struct rules_def *node_rule;
+	struct rules_def *node_rule_tmp;
+	bool match_found = false;
+
+	if (num_rules <= 0)
+		return false;
+
+	if (nb) {
+		node = get_node(NB_ID, nb);
+		if (!node) {
+			pr_err("%s: Can't find node", __func__);
+			goto exit_unregister_rule;
+		}
+		match_found = true;
+		list_for_each_entry_safe(node_rule, node_rule_tmp,
+					&node->node_rules, link) {
+			if (comp_rules(&node_rule->rule_ops,
+					&rule[i]) == 0) {
+				list_del(&node_rule->link);
+				kfree(node_rule);
+				match_found = true;
+				node->num_rules--;
+				list_sort(NULL,
+					&node->node_rules,
+					node_rules_compare);
+				break;
+			}
+		}
+		if (!node->num_rules)
+			raw_notifier_chain_unregister(
+					&node->rule_notify_list, nb);
+	} else {
+		for (i = 0; i < num_rules; i++) {
+			match_found = false;
+
+			list_for_each_entry(node, &node_list, link) {
+				list_for_each_entry_safe(node_rule,
+				node_rule_tmp, &node->node_rules, link) {
+					if (comp_rules(&node_rule->rule_ops,
+						&rule[i]) != 0)
+						continue;
+					list_del(&node_rule->link);
+					kfree(node_rule);
+					match_found = true;
+					node->num_rules--;
+					list_sort(NULL,
+						&node->node_rules,
+						node_rules_compare);
+					break;
+				}
+			}
+		}
+	}
+
+	list_for_each_entry_safe(node, node_tmp,
+					&node_list, link) {
+		if (!node->num_rules) {
+			pr_debug("Deleting Rule node %d", node->id);
+			list_del(&node->link);
+			kfree(node);
+		}
+	}
+exit_unregister_rule:
+	return match_found;
+}
+
+void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
+					struct notifier_block *nb)
+{
+	if (!rule || num_rules <= 0)
+		return;
+
+	mutex_lock(&msm_bus_rules_lock);
+	__rule_unregister(num_rules, rule, nb);
+	mutex_unlock(&msm_bus_rules_lock);
+}
+
+bool msm_rule_update(struct bus_rule_type *old_rule,
+			struct bus_rule_type *new_rule,
+			struct notifier_block *nb)
+{
+	bool rc = true;
+
+	if (!old_rule || !new_rule) {
+		pr_err("%s:msm_rule_update: void rules, error\n", __func__);
+		return false;
+	}
+	mutex_lock(&msm_bus_rules_lock);
+	if (!__rule_unregister(1, old_rule, nb)) {
+		pr_err("%s:msm_rule_update: failed to unregister old rule\n",
+				__func__);
+		rc = false;
+		goto exit_rule_update;
+	}
+
+	if (!__rule_register(1, new_rule, nb)) {
+		/*
+		 * Registering new rule has failed for some reason, attempt
+		 * to re-register the old rule and return error.
+		 */
+		pr_err("%s:msm_rule_update: failed to register new rule\n",
+				__func__);
+		__rule_register(1, old_rule, nb);
+		rc = false;
+	}
+exit_rule_update:
+	mutex_unlock(&msm_bus_rules_lock);
+	return rc;
+}
+
+void msm_rule_evaluate_rules(int node)
+{
+	struct msm_bus_client_handle *handle;
+
+	handle = msm_bus_scale_register(node, node, "tmp-rm", false);
+	if (!handle)
+		return;
+	msm_bus_scale_update_bw(handle, 0, 0);
+	msm_bus_scale_unregister(handle);
+}
+
+bool msm_rule_are_rules_registered(void)
+{
+	bool ret = false;
+
+	if (list_empty(&node_list))
+		ret = false;
+	else
+		ret = true;
+
+	return ret;
+}
+
diff --git a/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c
new file mode 100644
index 0000000..fe5d153
--- /dev/null
+++ b/drivers/soc/qcom/msm_bus/msm_buspm_coresight_adhoc.c
@@ -0,0 +1,183 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/coresight.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/list.h>
+
+struct msmbus_coresight_adhoc_clock_drvdata {
+	const char			*csdev_name;
+	struct clk			*clk;
+	struct list_head		 list;
+};
+
+struct msmbus_coresight_adhoc_drvdata {
+	struct device			*dev;
+	struct coresight_device		*csdev;
+	struct coresight_desc		*desc;
+	struct list_head		 clocks;
+};
+
+static int msmbus_coresight_enable_adhoc(struct coresight_device *csdev)
+{
+	struct msmbus_coresight_adhoc_clock_drvdata *clk;
+	struct msmbus_coresight_adhoc_drvdata *drvdata =
+		dev_get_drvdata(csdev->dev.parent);
+	long rate;
+
+	list_for_each_entry(clk, &drvdata->clocks, list) {
+		if (!strcmp(dev_name(&csdev->dev), clk->csdev_name)) {
+			rate = clk_round_rate(clk->clk, 1L);
+			clk_set_rate(clk->clk, rate);
+			return clk_prepare_enable(clk->clk);
+		}
+	}
+
+	return -ENOENT;
+}
+
+static void msmbus_coresight_disable_adhoc(struct coresight_device *csdev)
+{
+	struct msmbus_coresight_adhoc_clock_drvdata *clk;
+	struct msmbus_coresight_adhoc_drvdata *drvdata =
+		dev_get_drvdata(csdev->dev.parent);
+
+	list_for_each_entry(clk, &drvdata->clocks, list) {
+		if (!strcmp(dev_name(&csdev->dev), clk->csdev_name))
+			clk_disable_unprepare(clk->clk);
+	}
+}
+
+static const struct coresight_ops_source msmbus_coresight_adhoc_source_ops = {
+	.enable		= msmbus_coresight_enable_adhoc,
+	.disable	= msmbus_coresight_disable_adhoc,
+};
+
+static const struct coresight_ops msmbus_coresight_cs_ops = {
+	.source_ops	= &msmbus_coresight_adhoc_source_ops,
+};
+
+void msmbus_coresight_remove_adhoc(struct platform_device *pdev)
+{
+	struct msmbus_coresight_adhoc_clock_drvdata *clk, *next_clk;
+	struct msmbus_coresight_adhoc_drvdata *drvdata =
+		platform_get_drvdata(pdev);
+
+	msmbus_coresight_disable_adhoc(drvdata->csdev);
+	coresight_unregister(drvdata->csdev);
+	list_for_each_entry_safe(clk, next_clk, &drvdata->clocks, list) {
+		list_del(&clk->list);
+		devm_kfree(&pdev->dev, clk);
+	}
+	devm_kfree(&pdev->dev, drvdata->desc);
+	devm_kfree(&pdev->dev, drvdata);
+	platform_set_drvdata(pdev, NULL);
+}
+EXPORT_SYMBOL(msmbus_coresight_remove_adhoc);
+
+static int buspm_of_get_clk_adhoc(struct device_node *of_node,
+	struct msmbus_coresight_adhoc_drvdata *drvdata, const char *name)
+{
+	struct msmbus_coresight_adhoc_clock_drvdata *clk;
+
+	clk = devm_kzalloc(drvdata->dev, sizeof(*clk), GFP_KERNEL);
+
+	if (!clk)
+		return -ENOMEM;
+
+	clk->csdev_name = name;
+
+	clk->clk = of_clk_get_by_name(of_node, "bus_clk");
+	if (IS_ERR(clk->clk)) {
+		pr_err("Error: unable to get clock for coresight node %s\n",
+			name);
+		goto err;
+	}
+
+	list_add(&clk->list, &drvdata->clocks);
+	return 0;
+
+err:
+	devm_kfree(drvdata->dev, clk);
+	return -EINVAL;
+}
+
+int msmbus_coresight_init_adhoc(struct platform_device *pdev,
+		struct device_node *of_node)
+{
+	int ret;
+	struct device *dev = &pdev->dev;
+	struct coresight_platform_data *pdata;
+	struct msmbus_coresight_adhoc_drvdata *drvdata;
+	struct coresight_desc *desc;
+
+	pdata = of_get_coresight_platform_data(dev, of_node);
+	if (IS_ERR(pdata))
+		return PTR_ERR(pdata);
+
+	drvdata = platform_get_drvdata(pdev);
+	if (IS_ERR_OR_NULL(drvdata)) {
+		drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+		if (!drvdata)
+			return -ENOMEM;
+		INIT_LIST_HEAD(&drvdata->clocks);
+		drvdata->dev = &pdev->dev;
+		platform_set_drvdata(pdev, drvdata);
+	}
+	ret = buspm_of_get_clk_adhoc(of_node, drvdata, pdata->name);
+	if (ret) {
+		pr_err("Error getting clocks\n");
+		ret = -ENXIO;
+		goto err1;
+	}
+
+	desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+	if (!desc) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+
+	desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+	desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_BUS;
+	desc->ops = &msmbus_coresight_cs_ops;
+	desc->pdata = pdata;
+	desc->dev = &pdev->dev;
+	drvdata->desc = desc;
+	drvdata->csdev = coresight_register(desc);
+	if (IS_ERR(drvdata->csdev)) {
+		pr_err("coresight: Coresight register failed\n");
+		ret = PTR_ERR(drvdata->csdev);
+		goto err0;
+	}
+
+	return 0;
+err0:
+	devm_kfree(dev, desc);
+err1:
+	devm_kfree(dev, drvdata);
+	platform_set_drvdata(pdev, NULL);
+	return ret;
+}
+EXPORT_SYMBOL(msmbus_coresight_init_adhoc);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM BusPM Adhoc CoreSight Driver");
diff --git a/drivers/soc/qcom/msm_glink_pkt.c b/drivers/soc/qcom/msm_glink_pkt.c
new file mode 100644
index 0000000..38d29e4
--- /dev/null
+++ b/drivers/soc/qcom/msm_glink_pkt.c
@@ -0,0 +1,1458 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * G-link Packet Driver -- Provides a binary G-link non-muxed packet port
+ *                       interface.
+ */
+
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <asm/ioctls.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/ipc_logging.h>
+#include <linux/termios.h>
+
+#include <soc/qcom/glink.h>
+
+#define MODULE_NAME "msm_glinkpkt"
+#define DEVICE_NAME "glinkpkt"
+#define WAKEUPSOURCE_TIMEOUT (2000) /* two seconds */
+#define CLOSE_WAIT_TIMEOUT 1000 /* one seconds */
+
+#define GLINK_PKT_IOCTL_MAGIC (0xC3)
+
+#define GLINK_PKT_IOCTL_QUEUE_RX_INTENT \
+	_IOW(GLINK_PKT_IOCTL_MAGIC, 0, unsigned int)
+
+#define SMD_DTR_SIG BIT(31)
+#define SMD_CTS_SIG BIT(30)
+#define SMD_CD_SIG BIT(29)
+#define SMD_RI_SIG BIT(28)
+
+#define map_to_smd_trans_signal(sigs) \
+	do { \
+		sigs &= 0x0fff; \
+		if (sigs & TIOCM_DTR) \
+			sigs |= SMD_DTR_SIG; \
+		if (sigs & TIOCM_RTS) \
+			sigs |= SMD_CTS_SIG; \
+		if (sigs & TIOCM_CD) \
+			sigs |= SMD_CD_SIG; \
+		if (sigs & TIOCM_RI) \
+			sigs |= SMD_RI_SIG; \
+	} while (0)
+
+#define map_from_smd_trans_signal(sigs) \
+	do { \
+		if (sigs & SMD_DTR_SIG) \
+			sigs |= TIOCM_DTR; \
+		if (sigs & SMD_CTS_SIG) \
+			sigs |= TIOCM_RTS; \
+		if (sigs & SMD_CD_SIG) \
+			sigs |= TIOCM_CD; \
+		if (sigs & SMD_RI_SIG) \
+			sigs |= TIOCM_RI; \
+		sigs &= 0x0fff; \
+	} while (0)
+
+/**
+ * glink_pkt_dev - G-Link packet device structure
+ * dev_list:	G-Link packets device list.
+ * open_cfg:	Transport configuration used to open Logical channel.
+ * dev_name:	Device node name used by the clients.
+ * handle:	Opaque Channel handle returned by G-Link.
+ * ch_lock:	Per channel lock for synchronization.
+ * ch_satet:	flag used to check the channel state.
+ * cdev:	structure to the internal character device.
+ * devicep:	Pointer to the G-Link pkt class device structure.
+ * i:		Index to this character device.
+ * ref_cnt:	number of references to this device.
+ * poll_mode:	flag to check polling mode.
+ * ch_read_wait_queue:	reader thread wait queue.
+ * ch_opened_wait_queue: open thread wait queue.
+ * ch_closed_wait_queue: close thread wait queue.
+ * pkt_list:	The pending Rx packets list.
+ * pkt_list_lock: Lock to protect @pkt_list.
+ * pa_ws:	Packet arrival Wakeup source.
+ * packet_arrival_work:	Hold the wakeup source worker info.
+ * pa_spinlock:	Packet arrival spinlock.
+ * ws_locked:	flag to check wakeup source state.
+ * sigs_updated: flag to check signal update.
+ * open_time_wait: wait time for channel to fully open.
+ * in_reset:	flag to check SSR state.
+ * link_info:	structure to hold link information.
+ * link_state_handle: handle to get link state events.
+ * link_up:	flag to check link is up or not.
+ */
+struct glink_pkt_dev {
+	struct list_head dev_list;
+	struct glink_open_config open_cfg;
+	const char *dev_name;
+	void *handle;
+	struct mutex ch_lock;
+	unsigned int ch_state;
+
+	struct cdev cdev;
+	struct device *devicep;
+
+	int i;
+	int ref_cnt;
+	int poll_mode;
+
+	wait_queue_head_t ch_read_wait_queue;
+	wait_queue_head_t ch_opened_wait_queue;
+	wait_queue_head_t ch_closed_wait_queue;
+	struct list_head pkt_list;
+	spinlock_t pkt_list_lock;
+
+	struct wakeup_source pa_ws;	/* Packet Arrival Wakeup Source */
+	struct work_struct packet_arrival_work;
+	spinlock_t pa_spinlock;
+	int ws_locked;
+	int sigs_updated;
+	int open_time_wait;
+	int in_reset;
+
+	struct glink_link_info link_info;
+	void *link_state_handle;
+	bool link_up;
+};
+
+/**
+ * glink_rx_pkt - Pointer to Rx packet
+ * list:	Chain the Rx packets into list.
+ * data:	pointer to the Rx data.
+ * pkt_ptiv:	private pointer to the Rx packet.
+ * size:	The size of received data.
+ */
+struct glink_rx_pkt {
+	struct list_head list;
+	const void *data;
+	const void *pkt_priv;
+	size_t size;
+};
+
+/**
+ * queue_rx_intent_work - Work item to Queue Rx intent.
+ * size:	The size of intent to be queued.
+ * devp:	Pointer to the device structure.
+ * work:	Hold the worker function information.
+ */
+struct queue_rx_intent_work {
+	size_t intent_size;
+	struct glink_pkt_dev *devp;
+	struct work_struct work;
+};
+
+/**
+ * notify_state_work - Work item to notify channel state.
+ * state:	Channel new state.
+ * devp:	Pointer to the device structure.
+ * work:	Hold the worker function information.
+ */
+struct notify_state_work {
+	unsigned int state;
+	struct glink_pkt_dev *devp;
+	void *handle;
+	struct work_struct work;
+};
+
+static DEFINE_MUTEX(glink_pkt_dev_lock_lha1);
+static LIST_HEAD(glink_pkt_dev_list);
+static DEFINE_MUTEX(glink_pkt_driver_lock_lha1);
+static LIST_HEAD(glink_pkt_driver_list);
+
+struct class *glink_pkt_classp;
+static dev_t glink_pkt_number;
+struct workqueue_struct *glink_pkt_wq;
+
+static int num_glink_pkt_ports;
+
+#define GLINK_PKT_IPC_LOG_PAGE_CNT 2
+static void *glink_pkt_ilctxt;
+
+enum {
+	GLINK_PKT_STATUS = 1U << 0,
+};
+
+static int msm_glink_pkt_debug_mask;
+module_param_named(debug_mask, msm_glink_pkt_debug_mask,
+		int, 0664);
+
+static void glink_pkt_queue_rx_intent_worker(struct work_struct *work);
+static void glink_pkt_notify_state_worker(struct work_struct *work);
+static bool glink_pkt_read_avail(struct glink_pkt_dev *devp);
+
+#define DEBUG
+
+#ifdef DEBUG
+
+#define GLINK_PKT_LOG_STRING(x...) \
+do { \
+	if (glink_pkt_ilctxt) \
+		ipc_log_string(glink_pkt_ilctxt, "<GLINK_PKT>: "x); \
+} while (0)
+
+#define GLINK_PKT_INFO(x...) \
+do { \
+	if (msm_glink_pkt_debug_mask & GLINK_PKT_STATUS) \
+		pr_info("Status: "x); \
+	GLINK_PKT_LOG_STRING(x); \
+} while (0)
+
+#define GLINK_PKT_ERR(x...) \
+do { \
+	pr_err_ratelimited("<GLINK_PKT> err: "x); \
+	GLINK_PKT_LOG_STRING(x); \
+} while (0)
+
+#else
+#define GLINK_PKT_INFO(x...) do {} while (0)
+#define GLINK_PKT_ERR(x...) do {} while (0)
+#endif
+
+static ssize_t open_timeout_store(struct device *d,
+				  struct device_attribute *attr,
+				  const char *buf,
+				  size_t n)
+{
+	struct glink_pkt_dev *devp;
+	long tmp;
+
+	mutex_lock(&glink_pkt_dev_lock_lha1);
+	list_for_each_entry(devp, &glink_pkt_dev_list, dev_list) {
+		if (devp->devicep == d) {
+			if (!kstrtol(buf, 0, &tmp)) {
+				devp->open_time_wait = tmp;
+				mutex_unlock(&glink_pkt_dev_lock_lha1);
+				return n;
+			}
+			mutex_unlock(&glink_pkt_dev_lock_lha1);
+			pr_err("%s: unable to convert: %s to an int\n",
+				__func__, buf);
+			return -EINVAL;
+		}
+	}
+	mutex_unlock(&glink_pkt_dev_lock_lha1);
+	GLINK_PKT_ERR("%s: unable to match device to valid port\n", __func__);
+	return -EINVAL;
+}
+
+static ssize_t open_timeout_show(struct device *d,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct glink_pkt_dev *devp;
+
+	mutex_lock(&glink_pkt_dev_lock_lha1);
+	list_for_each_entry(devp, &glink_pkt_dev_list, dev_list) {
+		if (devp->devicep == d) {
+			mutex_unlock(&glink_pkt_dev_lock_lha1);
+			return snprintf(buf, PAGE_SIZE, "%d\n",
+					devp->open_time_wait);
+		}
+	}
+	mutex_unlock(&glink_pkt_dev_lock_lha1);
+	GLINK_PKT_ERR("%s: unable to match device to valid port\n", __func__);
+	return -EINVAL;
+
+}
+
+static DEVICE_ATTR(open_timeout, 0664, open_timeout_show, open_timeout_store);
+
+/**
+ * packet_arrival_worker() - wakeup source timeout worker fn
+ * work:	Work struct queued
+ *
+ * This function used to keep the system awake to allow
+ * userspace client to read the received packet.
+ */
+static void packet_arrival_worker(struct work_struct *work)
+{
+	struct glink_pkt_dev *devp;
+	unsigned long flags;
+
+	devp = container_of(work, struct glink_pkt_dev,
+				    packet_arrival_work);
+	mutex_lock(&devp->ch_lock);
+	spin_lock_irqsave(&devp->pa_spinlock, flags);
+	if (devp->ws_locked) {
+		GLINK_PKT_INFO("%s locking glink_pkt_dev id:%d wakeup source\n",
+			__func__, devp->i);
+		/*
+		 * Keep system awake long enough to allow userspace client
+		 * to process the packet.
+		 */
+		__pm_wakeup_event(&devp->pa_ws, WAKEUPSOURCE_TIMEOUT);
+	}
+	spin_unlock_irqrestore(&devp->pa_spinlock, flags);
+	mutex_unlock(&devp->ch_lock);
+}
+
+/**
+ * glink_pkt_link_state_cb() - Callback to receive link state updates
+ * @cb_info: Information containing link & its state.
+ * @priv: Private data passed during the link state registration.
+ *
+ * This function is called by the GLINK core to notify the Glink Pkt drivers
+ * regarding the link state updates. This function is registered with the
+ * GLINK core by Glink pkt drivers with glink_register_link_state_cb().
+ */
+static void glink_pkt_link_state_cb(struct glink_link_state_cb_info *cb_info,
+				      void *priv)
+{
+	struct glink_pkt_dev *devp = (struct glink_pkt_dev *)priv;
+
+	if (!cb_info)
+		return;
+	if (!devp)
+		return;
+
+	if (cb_info->link_state == GLINK_LINK_STATE_UP) {
+		devp->link_up = true;
+		wake_up_interruptible(&devp->ch_opened_wait_queue);
+	} else if (cb_info->link_state == GLINK_LINK_STATE_DOWN) {
+		devp->link_up = false;
+	}
+}
+
+/**
+ * glink_pkt_notify_rx() - Rx data Callback from G-Link core layer
+ * handle:	Opaque Channel handle returned by GLink.
+ * priv:	private pointer to the channel.
+ * pkt_priv:	private pointer to the packet.
+ * ptr:	Pointer to the Rx data.
+ * size:	Size of the Rx data.
+ *
+ * This callback function is notified on receiving the data from
+ * remote channel.
+ */
+void glink_pkt_notify_rx(void *handle, const void *priv,
+				const void *pkt_priv,
+				const void *ptr, size_t size)
+{
+	struct glink_rx_pkt *pkt = NULL;
+	struct glink_pkt_dev *devp = (struct glink_pkt_dev *)priv;
+	unsigned long flags;
+
+	GLINK_PKT_INFO("%s(): priv[%p] data[%p] size[%zu]\n",
+		   __func__, pkt_priv, (char *)ptr, size);
+
+	pkt = kzalloc(sizeof(*pkt), GFP_ATOMIC);
+	if (!pkt) {
+		GLINK_PKT_ERR("%s: memory allocation failed\n", __func__);
+		return;
+	}
+
+	pkt->data = ptr;
+	pkt->pkt_priv = pkt_priv;
+	pkt->size = size;
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	list_add_tail(&pkt->list, &devp->pkt_list);
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+
+	spin_lock_irqsave(&devp->pa_spinlock, flags);
+	__pm_stay_awake(&devp->pa_ws);
+	devp->ws_locked = 1;
+	spin_unlock_irqrestore(&devp->pa_spinlock, flags);
+	wake_up(&devp->ch_read_wait_queue);
+	schedule_work(&devp->packet_arrival_work);
+}
+
+/**
+ * glink_pkt_notify_tx_done() - Tx done callback function
+ * handle:	Opaque Channel handle returned by GLink.
+ * priv:	private pointer to the channel.
+ * pkt_priv:	private pointer to the packet.
+ * ptr:	Pointer to the Tx data.
+ *
+ * This  callback function is notified when the remote core
+ * signals the Rx done to the local core.
+ */
+void glink_pkt_notify_tx_done(void *handle, const void *priv,
+				const void *pkt_priv, const void *ptr)
+{
+	GLINK_PKT_INFO("%s(): priv[%p] pkt_priv[%p] ptr[%p]\n",
+					__func__, priv, pkt_priv, ptr);
+	/* Free Tx buffer allocated in glink_pkt_write */
+	kfree(ptr);
+}
+
+/**
+ * glink_pkt_notify_state() - state notification callback function
+ * handle:	Opaque Channel handle returned by GLink.
+ * priv:	private pointer to the channel.
+ * event:	channel state
+ *
+ * This callback function is notified when the remote channel alters
+ * the channel state and send the event to local G-Link core.
+ */
+void glink_pkt_notify_state(void *handle, const void *priv, unsigned int event)
+{
+	struct glink_pkt_dev *devp = (struct glink_pkt_dev *)priv;
+	struct notify_state_work *work_item;
+
+	if ((devp->handle != NULL) && (devp->handle != handle)) {
+		GLINK_PKT_ERR("%s() event[%d] on incorrect channel [%s]\n",
+				__func__, event, devp->open_cfg.name);
+		return;
+	}
+	GLINK_PKT_INFO("%s(): event[%d] on [%s]\n", __func__, event,
+						devp->open_cfg.name);
+
+	work_item = kzalloc(sizeof(*work_item), GFP_ATOMIC);
+	if (!work_item) {
+		GLINK_PKT_ERR("%s() failed allocate work_item\n", __func__);
+		return;
+	}
+
+	work_item->state = event;
+	work_item->devp = devp;
+	work_item->handle = handle;
+	INIT_WORK(&work_item->work, glink_pkt_notify_state_worker);
+	queue_work(glink_pkt_wq, &work_item->work);
+}
+
+/**
+ * glink_pkt_rmt_rx_intent_req_cb() - Remote Rx intent request callback
+ * handle:	Opaque Channel handle returned by GLink.
+ * priv:	private pointer to the channel.
+ * sz:	the size of the requested Rx intent
+ *
+ * This callback function is notified when remote client
+ * request the intent from local client.
+ */
+bool glink_pkt_rmt_rx_intent_req_cb(void *handle, const void *priv, size_t sz)
+{
+	struct queue_rx_intent_work *work_item;
+
+	GLINK_PKT_INFO("%s(): QUEUE RX INTENT to receive size[%zu]\n",
+		   __func__, sz);
+
+	work_item = kzalloc(sizeof(*work_item), GFP_ATOMIC);
+	if (!work_item) {
+		GLINK_PKT_ERR("%s failed allocate work_item\n", __func__);
+		return false;
+	}
+
+	work_item->intent_size = sz;
+	work_item->devp = (struct glink_pkt_dev *)priv;
+	INIT_WORK(&work_item->work, glink_pkt_queue_rx_intent_worker);
+	queue_work(glink_pkt_wq, &work_item->work);
+
+	return true;
+}
+
+/**
+ * glink_pkt_notify_rx_sigs() - signals callback
+ * handle:      Opaque Channel handle returned by GLink.
+ * priv:        private pointer to the channel.
+ * old_sigs:    signal before modification
+ * new_sigs:    signal after modification
+ *
+ * This callback function is notified when remote client
+ * updated the signal.
+ */
+void glink_pkt_notify_rx_sigs(void *handle, const void *priv,
+			uint32_t old_sigs, uint32_t new_sigs)
+{
+	struct glink_pkt_dev *devp = (struct glink_pkt_dev *)priv;
+
+	GLINK_PKT_INFO("%s(): sigs old[%x] new[%x]\n",
+				__func__, old_sigs, new_sigs);
+	mutex_lock(&devp->ch_lock);
+	devp->sigs_updated = true;
+	mutex_unlock(&devp->ch_lock);
+	wake_up(&devp->ch_read_wait_queue);
+}
+
+/**
+ * glink_pkt_queue_rx_intent_worker() - Queue Rx worker function
+ *
+ * work:	Pointer to the work struct
+ *
+ * This function is used to queue the RX intent which
+ * can sleep during allocation of larger buffers.
+ */
+static void glink_pkt_queue_rx_intent_worker(struct work_struct *work)
+{
+	int ret;
+	struct queue_rx_intent_work *work_item =
+				container_of(work,
+				struct queue_rx_intent_work, work);
+	struct glink_pkt_dev *devp = work_item->devp;
+
+	if (!devp || !devp->handle) {
+		GLINK_PKT_ERR("%s: Invalid device Handle\n", __func__);
+		kfree(work_item);
+		return;
+	}
+
+	ret = glink_queue_rx_intent(devp->handle, devp, work_item->intent_size);
+	GLINK_PKT_INFO("%s: Triggered with size[%zu] ret[%d]\n",
+				__func__, work_item->intent_size, ret);
+	if (ret)
+		GLINK_PKT_ERR("%s queue_rx_intent failed\n", __func__);
+	kfree(work_item);
+}
+
+/**
+ * glink_pkt_notify_state_worker() - Notify state worker function
+ *
+ * work:	Pointer to the work struct
+ *
+ * This function is used to notify the channel state and update the
+ * internal data structure.
+ */
+static void glink_pkt_notify_state_worker(struct work_struct *work)
+{
+	struct notify_state_work *work_item =
+				container_of(work,
+				struct notify_state_work, work);
+	struct glink_pkt_dev *devp = work_item->devp;
+	unsigned int event = work_item->state;
+	void *handle = work_item->handle;
+
+	if (!devp) {
+		GLINK_PKT_ERR("%s: Invalid device Handle\n", __func__);
+		kfree(work_item);
+		return;
+	}
+
+	GLINK_PKT_INFO("%s(): event[%d] on [%s]\n", __func__,
+				event, devp->open_cfg.name);
+	mutex_lock(&devp->ch_lock);
+	devp->ch_state = event;
+	if (event == GLINK_CONNECTED) {
+		if (!devp->handle)
+			devp->handle = handle;
+		devp->in_reset = 0;
+		wake_up_interruptible(&devp->ch_opened_wait_queue);
+	} else if (event == GLINK_REMOTE_DISCONNECTED) {
+		devp->in_reset = 1;
+		wake_up(&devp->ch_read_wait_queue);
+		wake_up_interruptible(&devp->ch_opened_wait_queue);
+	} else if (event == GLINK_LOCAL_DISCONNECTED) {
+		if (devp->handle == handle)
+			devp->handle = NULL;
+		wake_up_interruptible(&devp->ch_closed_wait_queue);
+	}
+	mutex_unlock(&devp->ch_lock);
+	kfree(work_item);
+}
+
+/**
+ * glink_pkt_read_avail() - check any pending packets to read
+ * devp:	pointer to G-Link packet device.
+ *
+ * This function is used to check any pending data packets are
+ * available to read or not.
+ */
+static bool glink_pkt_read_avail(struct glink_pkt_dev *devp)
+{
+	bool list_is_empty;
+	unsigned long flags;
+
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	list_is_empty = list_empty(&devp->pkt_list);
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+	return !list_is_empty;
+}
+
+/**
+ * glink_pkt_read() - read() syscall for the glink_pkt device
+ * file:	Pointer to the file structure.
+ * buf:	Pointer to the userspace buffer.
+ * count:	Number bytes to read from the file.
+ * ppos:	Pointer to the position into the file.
+ *
+ * This function is used to Read the data from glink pkt device when
+ * userspace client do a read() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+ssize_t glink_pkt_read(struct file *file,
+		       char __user *buf,
+		       size_t count,
+		       loff_t *ppos)
+{
+	int ret = 0;
+	struct glink_pkt_dev *devp;
+	struct glink_rx_pkt *pkt = NULL;
+	unsigned long flags;
+
+	devp = file->private_data;
+
+	if (!devp) {
+		GLINK_PKT_ERR("%s on NULL glink_pkt_dev\n", __func__);
+		return -EINVAL;
+	}
+	if (!devp->handle) {
+		GLINK_PKT_ERR("%s on a closed glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -EINVAL;
+	}
+	if (devp->in_reset) {
+		GLINK_PKT_ERR("%s: notifying reset for glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -ENETRESET;
+	}
+
+	if (!glink_rx_intent_exists(devp->handle, count)) {
+		ret  = glink_queue_rx_intent(devp->handle, devp, count);
+		if (ret) {
+			GLINK_PKT_ERR("%s: failed to queue_rx_intent ret[%d]\n",
+					__func__, ret);
+			return ret;
+		}
+	}
+
+	GLINK_PKT_INFO("Begin %s on glink_pkt_dev id:%d buffer_size %zu\n",
+		__func__, devp->i, count);
+
+	ret = wait_event_interruptible(devp->ch_read_wait_queue,
+				     !devp->handle || devp->in_reset ||
+				     glink_pkt_read_avail(devp));
+	if (devp->in_reset) {
+		GLINK_PKT_ERR("%s: notifying reset for glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -ENETRESET;
+	}
+	if (!devp->handle) {
+		GLINK_PKT_ERR("%s on a closed glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -EINVAL;
+	}
+	if (ret < 0) {
+		/* qualify error message */
+		if (ret != -ERESTARTSYS) {
+			/* we get this anytime a signal comes in */
+			GLINK_PKT_ERR("%s: wait on dev id:%d ret %i\n",
+					__func__, devp->i, ret);
+		}
+		return ret;
+	}
+
+	spin_lock_irqsave(&devp->pkt_list_lock, flags);
+	pkt = list_first_entry(&devp->pkt_list, struct glink_rx_pkt, list);
+	if (pkt->size > count) {
+		GLINK_PKT_ERR("%s: Small Buff on dev Id:%d-[%zu > %zu]\n",
+				__func__, devp->i, pkt->size, count);
+		spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+		return -ETOOSMALL;
+	}
+	list_del(&pkt->list);
+	spin_unlock_irqrestore(&devp->pkt_list_lock, flags);
+
+	ret = copy_to_user(buf, pkt->data, pkt->size);
+	if (WARN_ON(ret != 0))
+		return ret;
+
+	ret = pkt->size;
+	glink_rx_done(devp->handle, pkt->data, false);
+	kfree(pkt);
+
+	mutex_lock(&devp->ch_lock);
+	spin_lock_irqsave(&devp->pa_spinlock, flags);
+	if (devp->poll_mode && !glink_pkt_read_avail(devp)) {
+		__pm_relax(&devp->pa_ws);
+		devp->ws_locked = 0;
+		devp->poll_mode = 0;
+		GLINK_PKT_INFO("%s unlocked pkt_dev id:%d wakeup_source\n",
+			__func__, devp->i);
+	}
+	spin_unlock_irqrestore(&devp->pa_spinlock, flags);
+	mutex_unlock(&devp->ch_lock);
+
+	GLINK_PKT_INFO("End %s on glink_pkt_dev id:%d ret[%d]\n",
+				__func__, devp->i, ret);
+	return ret;
+}
+
+/**
+ * glink_pkt_write() - write() syscall for the glink_pkt device
+ * file:	Pointer to the file structure.
+ * buf:	Pointer to the userspace buffer.
+ * count:	Number bytes to read from the file.
+ * ppos:	Pointer to the position into the file.
+ *
+ * This function is used to write the data to glink pkt device when
+ * userspace client do a write() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+ssize_t glink_pkt_write(struct file *file,
+		       const char __user *buf,
+		       size_t count,
+		       loff_t *ppos)
+{
+	int ret = 0;
+	struct glink_pkt_dev *devp;
+	void *data;
+
+	devp = file->private_data;
+
+	if (!count) {
+		GLINK_PKT_ERR("%s: data count is zero\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!devp) {
+		GLINK_PKT_ERR("%s on NULL glink_pkt_dev\n", __func__);
+		return -EINVAL;
+	}
+	if (!devp->handle) {
+		GLINK_PKT_ERR("%s on a closed glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -EINVAL;
+	}
+	if (devp->in_reset) {
+		GLINK_PKT_ERR("%s: notifying reset for glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+		return -ENETRESET;
+	};
+
+	GLINK_PKT_INFO("Begin %s on glink_pkt_dev id:%d buffer_size %zu\n",
+		__func__, devp->i, count);
+	data = kzalloc(count, GFP_KERNEL);
+	if (!data) {
+		GLINK_PKT_ERR("%s buffer allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = copy_from_user(data, buf, count);
+	if (WARN_ON(ret != 0))
+		return ret;
+
+	ret = glink_tx(devp->handle, data, data, count, GLINK_TX_REQ_INTENT);
+	if (ret) {
+		GLINK_PKT_ERR("%s glink_tx failed ret[%d]\n", __func__, ret);
+		kfree(data);
+		return ret;
+	}
+
+	GLINK_PKT_INFO("Finished %s on glink_pkt_dev id:%d buffer_size %zu\n",
+		__func__, devp->i, count);
+
+	return count;
+}
+
+/**
+ * glink_pkt_poll() - poll() syscall for the glink_pkt device
+ * file:	Pointer to the file structure.
+ * wait:	pointer to Poll table.
+ *
+ * This function is used to poll on the glink pkt device when
+ * userspace client do a poll() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+static unsigned int glink_pkt_poll(struct file *file, poll_table *wait)
+{
+	struct glink_pkt_dev *devp;
+	unsigned int mask = 0;
+
+	devp = file->private_data;
+	if (!devp || !devp->handle) {
+		GLINK_PKT_ERR("%s: Invalid device handle\n", __func__);
+		return POLLERR;
+	}
+	if (devp->in_reset)
+		return POLLHUP;
+
+	devp->poll_mode = 1;
+	poll_wait(file, &devp->ch_read_wait_queue, wait);
+	mutex_lock(&devp->ch_lock);
+	if (!devp->handle) {
+		mutex_unlock(&devp->ch_lock);
+		return POLLERR;
+	}
+	if (devp->in_reset) {
+		mutex_unlock(&devp->ch_lock);
+		return POLLHUP;
+	}
+
+	if (glink_pkt_read_avail(devp)) {
+		mask |= POLLIN | POLLRDNORM;
+		GLINK_PKT_INFO("%s sets POLLIN for glink_pkt_dev id: %d\n",
+			__func__, devp->i);
+	}
+
+	if (devp->sigs_updated) {
+		mask |= POLLPRI;
+		GLINK_PKT_INFO("%s sets POLLPRI for glink_pkt_dev id: %d\n",
+			__func__, devp->i);
+	}
+	mutex_unlock(&devp->ch_lock);
+
+	return mask;
+}
+
+/**
+ * glink_pkt_tiocmset() - set the signals for glink_pkt device
+ * devp:	Pointer to the glink_pkt device structure.
+ * cmd:		IOCTL command.
+ * arg:		Arguments to the ioctl call.
+ *
+ * This function is used to set the signals on the glink pkt device
+ * when userspace client do a ioctl() system call with TIOCMBIS,
+ * TIOCMBIC and TICOMSET.
+ */
+static int glink_pkt_tiocmset(struct glink_pkt_dev *devp, unsigned int cmd,
+							unsigned long arg)
+{
+	int ret;
+	uint32_t sigs;
+	uint32_t val;
+
+	ret = get_user(val, (uint32_t *)arg);
+	if (ret)
+		return ret;
+	map_to_smd_trans_signal(val);
+	ret = glink_sigs_local_get(devp->handle, &sigs);
+	if (ret < 0) {
+		GLINK_PKT_ERR("%s: Get signals failed[%d]\n", __func__, ret);
+		return ret;
+	}
+	switch (cmd) {
+	case TIOCMBIS:
+		sigs |= val;
+		break;
+	case TIOCMBIC:
+		sigs &= ~val;
+		break;
+	case TIOCMSET:
+		sigs = val;
+		break;
+	}
+	ret = glink_sigs_set(devp->handle, sigs);
+	GLINK_PKT_INFO("%s: sigs[0x%x] ret[%d]\n", __func__, sigs, ret);
+	return ret;
+}
+
+/**
+ * glink_pkt_ioctl() - ioctl() syscall for the glink_pkt device
+ * file:	Pointer to the file structure.
+ * cmd:		IOCTL command.
+ * arg:		Arguments to the ioctl call.
+ *
+ * This function is used to ioctl on the glink pkt device when
+ * userspace client do a ioctl() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+static long glink_pkt_ioctl(struct file *file, unsigned int cmd,
+					     unsigned long arg)
+{
+	int ret;
+	struct glink_pkt_dev *devp;
+	uint32_t size = 0;
+	uint32_t sigs = 0;
+
+	devp = file->private_data;
+	if (!devp || !devp->handle) {
+		GLINK_PKT_ERR("%s: Invalid device handle\n", __func__);
+		return -EINVAL;
+	}
+
+	mutex_lock(&devp->ch_lock);
+	switch (cmd) {
+	case TIOCMGET:
+		devp->sigs_updated = false;
+		ret = glink_sigs_remote_get(devp->handle, &sigs);
+		GLINK_PKT_INFO("%s: TIOCMGET ret[%d] sigs[0x%x]\n",
+					__func__, ret, sigs);
+		map_from_smd_trans_signal(sigs);
+		if (!ret)
+			ret = put_user(sigs, (uint32_t *)arg);
+		break;
+	case TIOCMSET:
+	case TIOCMBIS:
+	case TIOCMBIC:
+		ret = glink_pkt_tiocmset(devp, cmd, arg);
+		break;
+
+	case GLINK_PKT_IOCTL_QUEUE_RX_INTENT:
+		ret = get_user(size, (uint32_t *)arg);
+		GLINK_PKT_INFO("%s: intent size[%d]\n", __func__, size);
+		ret  = glink_queue_rx_intent(devp->handle, devp, size);
+		if (ret) {
+			GLINK_PKT_ERR("%s: failed to QUEUE_RX_INTENT ret[%d]\n",
+					__func__, ret);
+		}
+		break;
+	default:
+		GLINK_PKT_ERR("%s: Unrecognized ioctl command 0x%x\n",
+					__func__, cmd);
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	mutex_unlock(&devp->ch_lock);
+
+	return ret;
+}
+
+/**
+ * glink_pkt_open() - open() syscall for the glink_pkt device
+ * inode:	Pointer to the inode structure.
+ * file:	Pointer to the file structure.
+ *
+ * This function is used to open the glink pkt device when
+ * userspace client do a open() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+int glink_pkt_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct glink_pkt_dev *devp = NULL;
+	int wait_time_msecs;
+
+	devp = container_of(inode->i_cdev, struct glink_pkt_dev, cdev);
+	if (!devp) {
+		GLINK_PKT_ERR("%s on NULL device\n", __func__);
+		return -EINVAL;
+	}
+	GLINK_PKT_INFO("Begin %s() on dev id:%d open_time_wait[%d] by [%s]\n",
+		__func__, devp->i, devp->open_time_wait, current->comm);
+	file->private_data = devp;
+	wait_time_msecs = devp->open_time_wait * 1000;
+
+	mutex_lock(&devp->ch_lock);
+	/* waiting for previous close to complete */
+	if (devp->handle && devp->ref_cnt == 0) {
+		mutex_unlock(&devp->ch_lock);
+		if (wait_time_msecs < 0) {
+			ret = wait_event_interruptible(
+				devp->ch_opened_wait_queue,
+				devp->ch_state == GLINK_LOCAL_DISCONNECTED);
+		} else {
+			ret = wait_event_interruptible_timeout(
+				devp->ch_opened_wait_queue,
+				devp->ch_state == GLINK_LOCAL_DISCONNECTED,
+				msecs_to_jiffies(wait_time_msecs));
+			if (ret >= 0)
+				wait_time_msecs = jiffies_to_msecs(ret);
+			if (ret == 0)
+				ret = -ETIMEDOUT;
+		}
+		if (ret < 0) {
+			GLINK_PKT_ERR(
+			"%s:failed for prev close on dev id:%d rc:%d\n",
+			__func__, devp->i, ret);
+			return ret;
+		}
+		mutex_lock(&devp->ch_lock);
+	}
+
+	if (!devp->handle) {
+		mutex_unlock(&devp->ch_lock);
+		/*
+		 * Wait for the link to be complete up so we know
+		 * the remote is ready enough.
+		 */
+		if (wait_time_msecs < 0) {
+			ret = wait_event_interruptible(
+				devp->ch_opened_wait_queue,
+				devp->link_up == true);
+		} else {
+			ret = wait_event_interruptible_timeout(
+				devp->ch_opened_wait_queue,
+				devp->link_up == true,
+				msecs_to_jiffies(wait_time_msecs));
+			if (ret >= 0)
+				wait_time_msecs = jiffies_to_msecs(ret);
+			if (ret == 0)
+				ret = -ETIMEDOUT;
+		}
+		mutex_lock(&devp->ch_lock);
+		if (ret < 0) {
+			GLINK_PKT_ERR(
+				"%s: Link not up edge[%s] name[%s] rc:%d\n",
+				__func__, devp->open_cfg.edge,
+				devp->open_cfg.name, ret);
+			devp->handle = NULL;
+			goto error;
+		}
+
+		devp->handle = glink_open(&devp->open_cfg);
+		if (IS_ERR_OR_NULL(devp->handle)) {
+			GLINK_PKT_ERR(
+				"%s: open failed xprt[%s] edge[%s] name[%s]\n",
+				__func__, devp->open_cfg.transport,
+				devp->open_cfg.edge, devp->open_cfg.name);
+			ret = -ENODEV;
+			devp->handle = NULL;
+			goto error;
+		}
+
+		mutex_unlock(&devp->ch_lock);
+		/*
+		 * Wait for the channel to be complete open state so we know
+		 * the remote client is ready enough.
+		 */
+		if (wait_time_msecs < 0) {
+			ret = wait_event_interruptible(
+				devp->ch_opened_wait_queue,
+				devp->ch_state == GLINK_CONNECTED);
+		} else {
+			ret = wait_event_interruptible_timeout(
+				devp->ch_opened_wait_queue,
+				devp->ch_state == GLINK_CONNECTED,
+				msecs_to_jiffies(wait_time_msecs));
+			if (ret == 0)
+				ret = -ETIMEDOUT;
+		}
+		mutex_lock(&devp->ch_lock);
+		if (ret < 0) {
+			GLINK_PKT_ERR("%s: open failed on dev id:%d rc:%d\n",
+					__func__, devp->i, ret);
+			glink_close(devp->handle);
+			devp->handle = NULL;
+			goto error;
+		}
+	}
+	ret = 0;
+	devp->ref_cnt++;
+
+error:
+	mutex_unlock(&devp->ch_lock);
+	GLINK_PKT_INFO("END %s() on dev id:%d ref_cnt[%d] ret[%d]\n",
+			__func__, devp->i, devp->ref_cnt, ret);
+	return ret;
+}
+
+/**
+ * glink_pkt_release() - release operation on glink_pkt device
+ * inode:	Pointer to the inode structure.
+ * file:	Pointer to the file structure.
+ *
+ * This function is used to release the glink pkt device when
+ * userspace client do a close() system call. All input arguments are
+ * validated by the virtual file system before calling this function.
+ */
+int glink_pkt_release(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct glink_pkt_dev *devp = file->private_data;
+	unsigned long flags;
+
+	GLINK_PKT_INFO("%s() on dev id:%d by [%s] ref_cnt[%d]\n",
+			__func__, devp->i, current->comm, devp->ref_cnt);
+	mutex_lock(&devp->ch_lock);
+	if (devp->ref_cnt > 0)
+		devp->ref_cnt--;
+
+	if (devp->handle && devp->ref_cnt == 0) {
+		wake_up(&devp->ch_read_wait_queue);
+		wake_up_interruptible(&devp->ch_opened_wait_queue);
+		ret = glink_close(devp->handle);
+		if (ret)  {
+			GLINK_PKT_ERR("%s: close failed ret[%d]\n",
+						__func__, ret);
+		} else {
+			mutex_unlock(&devp->ch_lock);
+			ret = wait_event_interruptible_timeout(
+				devp->ch_closed_wait_queue,
+				devp->ch_state == GLINK_LOCAL_DISCONNECTED,
+				msecs_to_jiffies(CLOSE_WAIT_TIMEOUT));
+			if (ret == 0)
+				GLINK_PKT_ERR(
+				"%s(): close TIMEOUT on dev_id[%d]\n",
+				__func__, devp->i);
+			mutex_lock(&devp->ch_lock);
+		}
+		devp->poll_mode = 0;
+		spin_lock_irqsave(&devp->pa_spinlock, flags);
+		if (devp->ws_locked) {
+			__pm_relax(&devp->pa_ws);
+			devp->ws_locked = 0;
+		}
+		spin_unlock_irqrestore(&devp->pa_spinlock, flags);
+		devp->sigs_updated = false;
+		devp->in_reset = 0;
+	}
+	mutex_unlock(&devp->ch_lock);
+
+	if (flush_work(&devp->packet_arrival_work))
+		GLINK_PKT_INFO("%s: Flushed work for glink_pkt_dev id:%d\n",
+			__func__, devp->i);
+	return ret;
+}
+
+static const struct file_operations glink_pkt_fops = {
+	.owner = THIS_MODULE,
+	.open = glink_pkt_open,
+	.release = glink_pkt_release,
+	.read = glink_pkt_read,
+	.write = glink_pkt_write,
+	.poll = glink_pkt_poll,
+	.unlocked_ioctl = glink_pkt_ioctl,
+	.compat_ioctl = glink_pkt_ioctl,
+};
+
+/**
+ * glink_pkt_init_add_device() - Initialize G-Link packet device and add cdev
+ * devp:	pointer to G-Link packet device.
+ * i:		index of the G-Link packet device.
+ *
+ * return:	0 for success, Standard Linux errors
+ */
+static int glink_pkt_init_add_device(struct glink_pkt_dev *devp, int i)
+{
+	int ret = 0;
+
+	devp->open_cfg.notify_rx = glink_pkt_notify_rx;
+	devp->open_cfg.notify_tx_done = glink_pkt_notify_tx_done;
+	devp->open_cfg.notify_state = glink_pkt_notify_state;
+	devp->open_cfg.notify_rx_intent_req = glink_pkt_rmt_rx_intent_req_cb;
+	devp->open_cfg.notify_rx_sigs = glink_pkt_notify_rx_sigs;
+	devp->open_cfg.options |= GLINK_OPT_INITIAL_XPORT;
+	devp->open_cfg.priv = devp;
+
+	devp->link_up = false;
+	devp->link_info.edge = devp->open_cfg.edge;
+	devp->link_info.transport = devp->open_cfg.transport;
+	devp->link_info.glink_link_state_notif_cb =
+				glink_pkt_link_state_cb;
+	devp->i = i;
+	devp->poll_mode = 0;
+	devp->ws_locked = 0;
+	devp->ch_state = GLINK_LOCAL_DISCONNECTED;
+	/* Default timeout for open wait is 120sec */
+	devp->open_time_wait = 120;
+	mutex_init(&devp->ch_lock);
+	init_waitqueue_head(&devp->ch_read_wait_queue);
+	init_waitqueue_head(&devp->ch_opened_wait_queue);
+	init_waitqueue_head(&devp->ch_closed_wait_queue);
+	spin_lock_init(&devp->pa_spinlock);
+	INIT_LIST_HEAD(&devp->pkt_list);
+	spin_lock_init(&devp->pkt_list_lock);
+	wakeup_source_init(&devp->pa_ws, devp->dev_name);
+	INIT_WORK(&devp->packet_arrival_work, packet_arrival_worker);
+
+	devp->link_state_handle =
+		glink_register_link_state_cb(&devp->link_info, devp);
+	if (IS_ERR_OR_NULL(devp->link_state_handle)) {
+		GLINK_PKT_ERR(
+			"%s: link state cb reg. failed edge[%s] name[%s]\n",
+			__func__, devp->open_cfg.edge, devp->open_cfg.name);
+		ret = PTR_ERR(devp->link_state_handle);
+		return ret;
+	}
+	cdev_init(&devp->cdev, &glink_pkt_fops);
+	devp->cdev.owner = THIS_MODULE;
+
+	ret = cdev_add(&devp->cdev, (glink_pkt_number + i), 1);
+	if (ret) {
+		GLINK_PKT_ERR("%s: cdev_add() failed for dev id:%d ret:%i\n",
+			__func__, i, ret);
+		wakeup_source_trash(&devp->pa_ws);
+		return ret;
+	}
+
+	devp->devicep = device_create(glink_pkt_classp,
+			      NULL,
+			      (glink_pkt_number + i),
+			      NULL,
+			      devp->dev_name);
+
+	if (IS_ERR_OR_NULL(devp->devicep)) {
+		GLINK_PKT_ERR("%s: device_create() failed for dev id:%d\n",
+			__func__, i);
+		ret = -ENOMEM;
+		cdev_del(&devp->cdev);
+		wakeup_source_trash(&devp->pa_ws);
+		return ret;
+	}
+
+	if (device_create_file(devp->devicep, &dev_attr_open_timeout))
+		GLINK_PKT_ERR("%s: device_create_file() failed for id:%d\n",
+			__func__, i);
+
+	mutex_lock(&glink_pkt_dev_lock_lha1);
+	list_add(&devp->dev_list, &glink_pkt_dev_list);
+	mutex_unlock(&glink_pkt_dev_lock_lha1);
+
+	return ret;
+}
+
+/**
+ * glink_pkt_core_deinit- De-initialization for this module
+ *
+ * This function remove all the memory and unregister
+ * the char device region.
+ */
+static void glink_pkt_core_deinit(void)
+{
+	struct glink_pkt_dev *glink_pkt_devp;
+	struct glink_pkt_dev *index;
+
+	mutex_lock(&glink_pkt_dev_lock_lha1);
+	list_for_each_entry_safe(glink_pkt_devp, index, &glink_pkt_dev_list,
+							dev_list) {
+		if (glink_pkt_devp->link_state_handle)
+			glink_unregister_link_state_cb(
+				glink_pkt_devp->link_state_handle);
+		cdev_del(&glink_pkt_devp->cdev);
+		list_del(&glink_pkt_devp->dev_list);
+		device_destroy(glink_pkt_classp,
+			       MKDEV(MAJOR(glink_pkt_number),
+			       glink_pkt_devp->i));
+		kfree(glink_pkt_devp);
+	}
+	mutex_unlock(&glink_pkt_dev_lock_lha1);
+
+	if (!IS_ERR_OR_NULL(glink_pkt_classp))
+		class_destroy(glink_pkt_classp);
+
+	unregister_chrdev_region(MAJOR(glink_pkt_number), num_glink_pkt_ports);
+}
+
+/**
+ * glink_pkt_alloc_chrdev_region() - allocate the char device region
+ *
+ * This function allocate memory for G-Link packet character-device region and
+ * create the class.
+ */
+static int glink_pkt_alloc_chrdev_region(void)
+{
+	int ret;
+
+	ret = alloc_chrdev_region(&glink_pkt_number,
+			       0,
+			       num_glink_pkt_ports,
+			       DEVICE_NAME);
+	if (ret) {
+		GLINK_PKT_ERR("%s: alloc_chrdev_region() failed ret:%i\n",
+			__func__, ret);
+		return ret;
+	}
+
+	glink_pkt_classp = class_create(THIS_MODULE, DEVICE_NAME);
+	if (IS_ERR(glink_pkt_classp)) {
+		GLINK_PKT_ERR("%s: class_create() failed ENOMEM\n", __func__);
+		ret = -ENOMEM;
+		unregister_chrdev_region(MAJOR(glink_pkt_number),
+						num_glink_pkt_ports);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * parse_glinkpkt_devicetree() - parse device tree binding
+ *
+ * node:	pointer to device tree node
+ * glink_pkt_devp: pointer to GLINK PACKET device
+ *
+ * Return:	0 on success, -ENODEV on failure.
+ */
+static int parse_glinkpkt_devicetree(struct device_node *node,
+					struct glink_pkt_dev *glink_pkt_devp)
+{
+	char *key;
+
+	key = "qcom,glinkpkt-transport";
+	glink_pkt_devp->open_cfg.transport = of_get_property(node, key, NULL);
+	if (!glink_pkt_devp->open_cfg.transport)
+		goto error;
+	GLINK_PKT_INFO("%s transport = %s\n", __func__,
+			glink_pkt_devp->open_cfg.transport);
+
+	key = "qcom,glinkpkt-edge";
+	glink_pkt_devp->open_cfg.edge = of_get_property(node, key, NULL);
+	if (!glink_pkt_devp->open_cfg.edge)
+		goto error;
+	GLINK_PKT_INFO("%s edge = %s\n", __func__,
+			glink_pkt_devp->open_cfg.edge);
+
+	key = "qcom,glinkpkt-ch-name";
+	glink_pkt_devp->open_cfg.name = of_get_property(node, key, NULL);
+	if (!glink_pkt_devp->open_cfg.name)
+		goto error;
+	GLINK_PKT_INFO("%s ch_name = %s\n", __func__,
+			glink_pkt_devp->open_cfg.name);
+
+	key = "qcom,glinkpkt-dev-name";
+	glink_pkt_devp->dev_name = of_get_property(node, key, NULL);
+	if (!glink_pkt_devp->dev_name)
+		goto error;
+	GLINK_PKT_INFO("%s dev_name = %s\n", __func__,
+			glink_pkt_devp->dev_name);
+	return 0;
+
+error:
+	GLINK_PKT_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+
+}
+
+/**
+ * glink_pkt_devicetree_init() - Initialize the add char device
+ *
+ * pdev:	Pointer to device tree data.
+ *
+ * return:	0 on success, -ENODEV on failure.
+ */
+static int glink_pkt_devicetree_init(struct platform_device *pdev)
+{
+	int ret;
+	int i = 0;
+	struct device_node *node;
+	struct glink_pkt_dev *glink_pkt_devp;
+	int subnode_num = 0;
+
+	for_each_child_of_node(pdev->dev.of_node, node)
+		++subnode_num;
+	if (!subnode_num) {
+		GLINK_PKT_ERR("%s subnode_num = %d\n", __func__, subnode_num);
+		return 0;
+	}
+
+	num_glink_pkt_ports = subnode_num;
+
+	ret = glink_pkt_alloc_chrdev_region();
+	if (ret) {
+		GLINK_PKT_ERR("%s: chrdev_region allocation failed ret:%i\n",
+			__func__, ret);
+		return ret;
+	}
+
+	for_each_child_of_node(pdev->dev.of_node, node) {
+		glink_pkt_devp = kzalloc(sizeof(*glink_pkt_devp),
+						GFP_KERNEL);
+		if (IS_ERR_OR_NULL(glink_pkt_devp)) {
+			GLINK_PKT_ERR("%s: allocation failed id:%d\n",
+						__func__, i);
+			ret = -ENOMEM;
+			goto error_destroy;
+		}
+
+		ret = parse_glinkpkt_devicetree(node, glink_pkt_devp);
+		if (ret) {
+			GLINK_PKT_ERR("%s: failed to parse devicetree %d\n",
+						__func__, i);
+			kfree(glink_pkt_devp);
+			goto error_destroy;
+		}
+
+		ret = glink_pkt_init_add_device(glink_pkt_devp, i);
+		if (ret < 0) {
+			GLINK_PKT_ERR("%s: add device failed idx:%d ret=%d\n",
+					__func__, i, ret);
+			kfree(glink_pkt_devp);
+			goto error_destroy;
+		}
+		i++;
+	}
+
+	GLINK_PKT_INFO("G-Link Packet Port Driver Initialized.\n");
+	return 0;
+
+error_destroy:
+	glink_pkt_core_deinit();
+	return ret;
+}
+
+/**
+ * msm_glink_pkt_probe() - Probe a G-Link packet device
+ *
+ * pdev:	Pointer to device tree data.
+ *
+ * return:	0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to a G-Link packet device.
+ */
+static int msm_glink_pkt_probe(struct platform_device *pdev)
+{
+	int ret;
+
+	if (pdev) {
+		if (pdev->dev.of_node) {
+			GLINK_PKT_INFO("%s device tree implementation\n",
+							__func__);
+			ret = glink_pkt_devicetree_init(pdev);
+			if (ret)
+				GLINK_PKT_ERR("%s: device tree init failed\n",
+					__func__);
+		}
+	}
+
+	return 0;
+}
+
+static const struct of_device_id msm_glink_pkt_match_table[] = {
+	{ .compatible = "qcom,glinkpkt" },
+	{},
+};
+
+static struct platform_driver msm_glink_pkt_driver = {
+	.probe = msm_glink_pkt_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = msm_glink_pkt_match_table,
+	 },
+};
+
+/**
+ * glink_pkt_init() - Initialization function for this module
+ *
+ * returns:	0 on success, standard Linux error code otherwise.
+ */
+static int __init glink_pkt_init(void)
+{
+	int ret;
+
+	INIT_LIST_HEAD(&glink_pkt_dev_list);
+	INIT_LIST_HEAD(&glink_pkt_driver_list);
+	ret = platform_driver_register(&msm_glink_pkt_driver);
+	if (ret) {
+		GLINK_PKT_ERR("%s: msm_glink_driver register failed %d\n",
+			 __func__, ret);
+		return ret;
+	}
+
+	glink_pkt_ilctxt = ipc_log_context_create(GLINK_PKT_IPC_LOG_PAGE_CNT,
+						"glink_pkt", 0);
+	glink_pkt_wq = create_singlethread_workqueue("glink_pkt_wq");
+	if (!glink_pkt_wq) {
+		GLINK_PKT_ERR("%s: Error creating glink_pkt_wq\n", __func__);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+/**
+ * glink_pkt_cleanup() - Exit function for this module
+ *
+ * This function is used to cleanup the module during the exit.
+ */
+static void __exit glink_pkt_cleanup(void)
+{
+	glink_pkt_core_deinit();
+}
+
+module_init(glink_pkt_init);
+module_exit(glink_pkt_cleanup);
+
+MODULE_DESCRIPTION("MSM G-Link Packet Port");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
new file mode 100644
index 0000000..bf2929e
--- /dev/null
+++ b/drivers/soc/qcom/msm_smem.c
@@ -0,0 +1,1512 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ipc_logging.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+
+#include <soc/qcom/smem.h>
+
+
+#include "smem_private.h"
+
+#define MODEM_SBL_VERSION_INDEX 7
+#define SMEM_VERSION_INFO_SIZE (32 * 4)
+#define SMEM_VERSION 0x000B
+
+enum {
+	MSM_SMEM_DEBUG = 1U << 0,
+	MSM_SMEM_INFO = 1U << 1,
+};
+
+static int msm_smem_debug_mask = MSM_SMEM_INFO;
+module_param_named(debug_mask, msm_smem_debug_mask,
+			int, S_IRUGO | S_IWUSR | S_IWGRP);
+static void *smem_ipc_log_ctx;
+#define NUM_LOG_PAGES 4
+
+#define IPC_LOG(x...) do {                                   \
+		if (smem_ipc_log_ctx)                        \
+			ipc_log_string(smem_ipc_log_ctx, x); \
+	} while (0)
+
+
+#define LOG_ERR(x...) do {  \
+		pr_err(x);  \
+		IPC_LOG(x); \
+	} while (0)
+#define SMEM_DBG(x...) do {                               \
+		if (msm_smem_debug_mask & MSM_SMEM_DEBUG) \
+			IPC_LOG(x);                       \
+	} while (0)
+#define SMEM_INFO(x...) do {                             \
+		if (msm_smem_debug_mask & MSM_SMEM_INFO) \
+			IPC_LOG(x);                      \
+	} while (0)
+
+#define SMEM_SPINLOCK_SMEM_ALLOC       "S:3"
+
+static void *smem_ram_base;
+static resource_size_t smem_ram_size;
+static phys_addr_t smem_ram_phys;
+static remote_spinlock_t remote_spinlock;
+static uint32_t num_smem_areas;
+static struct smem_area *smem_areas;
+static struct ramdump_segment *smem_ramdump_segments;
+static int spinlocks_initialized;
+static void *smem_ramdump_dev;
+static DEFINE_MUTEX(spinlock_init_lock);
+static DEFINE_SPINLOCK(smem_init_check_lock);
+static int smem_module_inited;
+static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
+static DEFINE_MUTEX(smem_module_init_notifier_lock);
+static bool probe_done;
+
+/* smem security feature components */
+#define SMEM_TOC_IDENTIFIER 0x434f5424 /* "$TOC" */
+#define SMEM_TOC_MAX_EXCLUSIONS 4
+#define SMEM_PART_HDR_IDENTIFIER 0x54525024 /* "$PRT" */
+#define SMEM_ALLOCATION_CANARY 0xa5a5
+
+struct smem_toc_entry {
+	uint32_t offset;
+	uint32_t size;
+	uint32_t flags;
+	uint16_t host0;
+	uint16_t host1;
+	uint32_t size_cacheline;
+	uint32_t reserved[3];
+	uint32_t exclusion_sizes[SMEM_TOC_MAX_EXCLUSIONS];
+};
+
+struct smem_toc {
+	/* Identifier is a constant, set to SMEM_TOC_IDENTIFIER. */
+	uint32_t identifier;
+	uint32_t version;
+	uint32_t num_entries;
+	uint32_t reserved[5];
+	struct smem_toc_entry entry[];
+};
+
+struct smem_partition_header {
+	/* Identifier is a constant, set to SMEM_PART_HDR_IDENTIFIER. */
+	uint32_t identifier;
+	uint16_t host0;
+	uint16_t host1;
+	uint32_t size;
+	uint32_t offset_free_uncached;
+	uint32_t offset_free_cached;
+	uint32_t reserved[3];
+};
+
+struct smem_partition_allocation_header {
+	/* Canary is a constant, set to SMEM_ALLOCATION_CANARY */
+	uint16_t canary;
+	uint16_t smem_type;
+	uint32_t size; /* includes padding bytes */
+	uint16_t padding_data;
+	uint16_t padding_hdr;
+	uint32_t reserved[1];
+};
+
+struct smem_partition_info {
+	uint32_t partition_num;
+	uint32_t offset;
+	uint32_t size_cacheline;
+};
+
+static struct smem_partition_info partitions[NUM_SMEM_SUBSYSTEMS];
+/* end smem security feature components */
+
+/* Identifier for the SMEM target info struct. */
+#define SMEM_TARG_INFO_IDENTIFIER 0x49494953 /* "SIII" in little-endian. */
+
+struct smem_targ_info_type {
+	/* Identifier is a constant, set to SMEM_TARG_INFO_IDENTIFIER. */
+	uint32_t identifier;
+	uint32_t size;
+	phys_addr_t phys_base_addr;
+};
+
+struct restart_notifier_block {
+	unsigned int processor;
+	char *name;
+	struct notifier_block nb;
+};
+
+static int restart_notifier_cb(struct notifier_block *this,
+				unsigned long code,
+				void *data);
+
+static struct restart_notifier_block restart_notifiers[] = {
+	{SMEM_MODEM, "modem", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_Q6, "lpass", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_WCNSS, "wcnss", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_DSPS, "dsps", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_MODEM, "gss", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_Q6, "adsp", .nb.notifier_call = restart_notifier_cb},
+	{SMEM_DSPS, "slpi", .nb.notifier_call = restart_notifier_cb},
+};
+
+static int init_smem_remote_spinlock(void);
+
+/**
+ * is_probe_done() - Did the probe function successfully complete
+ *
+ * @return - true if probe successfully completed, false if otherwise
+ *
+ * Helper function for EPROBE_DEFER support.  If this function returns false,
+ * the calling function should immediately return -EPROBE_DEFER.
+ */
+static bool is_probe_done(void)
+{
+	return probe_done;
+}
+
+/**
+ * smem_phys_to_virt() - Convert a physical base and offset to virtual address
+ *
+ * @base: physical base address to check
+ * @offset: offset from the base to get the final address
+ * @returns: virtual SMEM address; NULL for failure
+ *
+ * Takes a physical address and an offset and checks if the resulting physical
+ * address would fit into one of the smem regions.  If so, returns the
+ * corresponding virtual address.  Otherwise returns NULL.
+ */
+static void *smem_phys_to_virt(phys_addr_t base, unsigned int offset)
+{
+	int i;
+	phys_addr_t phys_addr;
+	resource_size_t size;
+
+	if (OVERFLOW_ADD_UNSIGNED(phys_addr_t, base, offset))
+		return NULL;
+
+	if (!smem_areas) {
+		/*
+		 * Early boot - no area configuration yet, so default
+		 * to using the main memory region.
+		 *
+		 * To remove the MSM_SHARED_RAM_BASE and the static
+		 * mapping of SMEM in the future, add dump_stack()
+		 * to identify the early callers of smem_get_entry()
+		 * (which calls this function) and replace those calls
+		 * with a new function that knows how to lookup the
+		 * SMEM base address before SMEM has been probed.
+		 */
+		phys_addr = smem_ram_phys;
+		size = smem_ram_size;
+
+		if (base >= phys_addr && base + offset < phys_addr + size) {
+			if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)smem_ram_base, offset)) {
+				SMEM_INFO("%s: overflow %p %x\n", __func__,
+					smem_ram_base, offset);
+				return NULL;
+			}
+
+			return smem_ram_base + offset;
+		} else {
+			return NULL;
+		}
+	}
+	for (i = 0; i < num_smem_areas; ++i) {
+		phys_addr = smem_areas[i].phys_addr;
+		size = smem_areas[i].size;
+
+		if (base < phys_addr || base + offset >= phys_addr + size)
+			continue;
+
+		if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)smem_areas[i].virt_addr, offset)) {
+			SMEM_INFO("%s: overflow %p %x\n", __func__,
+				smem_areas[i].virt_addr, offset);
+			return NULL;
+		}
+
+		return smem_areas[i].virt_addr + offset;
+	}
+
+	return NULL;
+}
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Address of SMEM item (returned by smem_alloc(), etc)
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.  This function will not return a version of EPROBE_DEFER
+ * if the driver is not ready since the caller should obtain @smem_address from
+ * one of the other public APIs and get EPROBE_DEFER at that time, if
+ * applicable.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+	phys_addr_t phys_addr = 0;
+	int i;
+	void *vend;
+
+	if (!smem_areas)
+		return phys_addr;
+
+	for (i = 0; i < num_smem_areas; ++i) {
+		vend = (void *)(smem_areas[i].virt_addr + smem_areas[i].size);
+
+		if (smem_address >= smem_areas[i].virt_addr &&
+				smem_address < vend) {
+			phys_addr = smem_address - smem_areas[i].virt_addr;
+			phys_addr +=  smem_areas[i].phys_addr;
+			break;
+		}
+	}
+
+	return phys_addr;
+}
+EXPORT_SYMBOL(smem_virt_to_phys);
+
+/**
+ * __smem_get_entry_nonsecure - Get pointer and size of existing SMEM item
+ *
+ * @id:              ID of SMEM item
+ * @size:            Pointer to size variable for storing the result
+ * @skip_init_check: True means do not verify that SMEM has been initialized
+ * @use_rspinlock:   True to use the remote spinlock
+ * @returns:         Pointer to SMEM item or NULL if it doesn't exist
+ */
+static void *__smem_get_entry_nonsecure(unsigned int id, unsigned int *size,
+		bool skip_init_check, bool use_rspinlock)
+{
+	struct smem_shared *shared = smem_ram_base;
+	struct smem_heap_entry *toc = shared->heap_toc;
+	int use_spinlocks = spinlocks_initialized && use_rspinlock;
+	void *ret = 0;
+	unsigned long flags = 0;
+	int rc;
+
+	if (!skip_init_check && !smem_initialized_check())
+		return ret;
+
+	if (id >= SMEM_NUM_ITEMS)
+		return ret;
+
+	if (use_spinlocks) {
+		do {
+			rc = remote_spin_trylock_irqsave(&remote_spinlock,
+				flags);
+		} while (!rc);
+	}
+	/* toc is in device memory and cannot be speculatively accessed */
+	if (toc[id].allocated) {
+		phys_addr_t phys_base;
+
+		*size = toc[id].size;
+		barrier();
+
+		phys_base = toc[id].reserved & BASE_ADDR_MASK;
+		if (!phys_base)
+			phys_base = smem_ram_phys;
+		ret = smem_phys_to_virt(phys_base, toc[id].offset);
+	} else {
+		*size = 0;
+	}
+	if (use_spinlocks)
+		remote_spin_unlock_irqrestore(&remote_spinlock, flags);
+
+	return ret;
+}
+
+/**
+ * __smem_get_entry_secure - Get pointer and size of existing SMEM item with
+ *                   security support
+ *
+ * @id:              ID of SMEM item
+ * @size:            Pointer to size variable for storing the result
+ * @to_proc:         SMEM host that shares the item with apps
+ * @flags:           Item attribute flags
+ * @skip_init_check: True means do not verify that SMEM has been initialized
+ * @use_rspinlock:   True to use the remote spinlock
+ * @returns:         Pointer to SMEM item or NULL if it doesn't exist
+ */
+static void *__smem_get_entry_secure(unsigned int id,
+					unsigned int *size,
+					unsigned int to_proc,
+					unsigned int flags,
+					bool skip_init_check,
+					bool use_rspinlock)
+{
+	struct smem_partition_header *hdr;
+	unsigned long lflags = 0;
+	void *item = NULL;
+	struct smem_partition_allocation_header *alloc_hdr;
+	uint32_t partition_num;
+	uint32_t a_hdr_size;
+	int rc;
+
+	SMEM_DBG("%s(%u, %u, %u, %u, %d, %d)\n", __func__, id, *size, to_proc,
+					flags, skip_init_check, use_rspinlock);
+
+	if (!skip_init_check && !smem_initialized_check())
+		return NULL;
+
+	if (id >= SMEM_NUM_ITEMS) {
+		SMEM_INFO("%s: invalid id %d\n", __func__, id);
+		return NULL;
+	}
+
+	if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
+		SMEM_INFO("%s: id %u invalid to_proc %d\n", __func__, id,
+								to_proc);
+		return NULL;
+	}
+
+	if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
+		return __smem_get_entry_nonsecure(id, size, skip_init_check,
+								use_rspinlock);
+
+	partition_num = partitions[to_proc].partition_num;
+	hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
+	if (unlikely(!spinlocks_initialized)) {
+		rc = init_smem_remote_spinlock();
+		if (unlikely(rc)) {
+			SMEM_INFO(
+				"%s: id:%u remote spinlock init failed %d\n",
+						__func__, id, rc);
+			return NULL;
+		}
+	}
+	if (use_rspinlock) {
+		do {
+			rc = remote_spin_trylock_irqsave(&remote_spinlock,
+				lflags);
+		} while (!rc);
+	}
+	if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+		LOG_ERR(
+			"%s: SMEM corruption detected.  Partition %d to %d at %p\n",
+								__func__,
+								partition_num,
+								to_proc,
+								hdr);
+		BUG();
+	}
+
+	if (flags & SMEM_ITEM_CACHED_FLAG) {
+		a_hdr_size = ALIGN(sizeof(*alloc_hdr),
+				partitions[to_proc].size_cacheline);
+		for (alloc_hdr = (void *)(hdr) + hdr->size - a_hdr_size;
+				(void *)(alloc_hdr) > (void *)(hdr) +
+					hdr->offset_free_cached;
+				alloc_hdr = (void *)(alloc_hdr) -
+						alloc_hdr->size - a_hdr_size) {
+			if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
+				LOG_ERR(
+					"%s: SMEM corruption detected.  Partition %d to %d at %p\n",
+								__func__,
+								partition_num,
+								to_proc,
+								alloc_hdr);
+				BUG();
+
+			}
+			if (alloc_hdr->smem_type == id) {
+				/* 8 byte alignment to match legacy */
+				*size = ALIGN(alloc_hdr->size -
+						alloc_hdr->padding_data, 8);
+				item = (void *)(alloc_hdr) - alloc_hdr->size;
+				break;
+			}
+		}
+	} else {
+		for (alloc_hdr = (void *)(hdr) + sizeof(*hdr);
+				(void *)(alloc_hdr) < (void *)(hdr) +
+					hdr->offset_free_uncached;
+				alloc_hdr = (void *)(alloc_hdr) +
+						sizeof(*alloc_hdr) +
+						alloc_hdr->padding_hdr +
+						alloc_hdr->size) {
+			if (alloc_hdr->canary != SMEM_ALLOCATION_CANARY) {
+				LOG_ERR(
+					"%s: SMEM corruption detected.  Partition %d to %d at %p\n",
+								__func__,
+								partition_num,
+								to_proc,
+								alloc_hdr);
+				BUG();
+
+			}
+			if (alloc_hdr->smem_type == id) {
+				/* 8 byte alignment to match legacy */
+				*size = ALIGN(alloc_hdr->size -
+						alloc_hdr->padding_data, 8);
+				item = (void *)(alloc_hdr) +
+						sizeof(*alloc_hdr) +
+						alloc_hdr->padding_hdr;
+				break;
+			}
+		}
+	}
+	if (use_rspinlock)
+		remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+
+	return item;
+}
+
+static void *__smem_find(unsigned int id, unsigned int size_in,
+							bool skip_init_check)
+{
+	unsigned int size;
+	void *ptr;
+
+	ptr = __smem_get_entry_nonsecure(id, &size, skip_init_check, true);
+	if (!ptr)
+		return 0;
+
+	size_in = ALIGN(size_in, 8);
+	if (size_in != size) {
+		SMEM_INFO("smem_find(%u, %u): wrong size %u\n",
+			id, size_in, size);
+		return 0;
+	}
+
+	return ptr;
+}
+
+/**
+ * smem_find - Find existing item with security support
+ *
+ * @id:       ID of SMEM item
+ * @size_in:  Size of the SMEM item
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ *	if the driver is not ready
+ */
+void *smem_find(unsigned int id, unsigned int size_in, unsigned int to_proc,
+							unsigned int flags)
+{
+	unsigned int size;
+	void *ptr;
+
+	SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
+									flags);
+
+	/*
+	 * Handle the circular dependecy between SMEM and software implemented
+	 * remote spinlocks.  SMEM must initialize the remote spinlocks in
+	 * probe() before it is done.  EPROBE_DEFER handling will not resolve
+	 * this code path, so we must be intellegent to know that the spinlock
+	 * item is a special case.
+	 */
+	if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	ptr = smem_get_entry(id, &size, to_proc, flags);
+	if (!ptr)
+		return 0;
+
+	size_in = ALIGN(size_in, 8);
+	if (size_in != size) {
+		SMEM_INFO("smem_find(%u, %u, %u, %u): wrong size %u\n",
+			id, size_in, to_proc, flags, size);
+		return 0;
+	}
+
+	return ptr;
+}
+EXPORT_SYMBOL(smem_find);
+
+/**
+ * alloc_item_nonsecure - Allocate an SMEM item in the nonsecure partition
+ *
+ * @id:              ID of SMEM item
+ * @size_in:         Size to allocate
+ * @returns:         Pointer to SMEM item or NULL for error
+ *
+ * Assumes the id parameter is valid and does not already exist.  Assumes
+ * size_in is already adjusted for alignment, if necessary.  Requires the
+ * remote spinlock to already be locked.
+ */
+static void *alloc_item_nonsecure(unsigned int id, unsigned int size_in)
+{
+	void *smem_base = smem_ram_base;
+	struct smem_shared *shared = smem_base;
+	struct smem_heap_entry *toc = shared->heap_toc;
+	void *ret = NULL;
+
+	if (shared->heap_info.heap_remaining >= size_in) {
+		toc[id].offset = shared->heap_info.free_offset;
+		toc[id].size = size_in;
+		/*
+		 * wmb() is necessary to ensure the allocation data is
+		 * consistent before setting the allocated flag to prevent race
+		 * conditions with remote processors
+		 */
+		wmb();
+		toc[id].allocated = 1;
+
+		shared->heap_info.free_offset += size_in;
+		shared->heap_info.heap_remaining -= size_in;
+		ret = smem_base + toc[id].offset;
+		/*
+		 * wmb() is necessary to ensure the heap data is consistent
+		 * before continuing to prevent race conditions with remote
+		 * processors
+		 */
+		wmb();
+	} else {
+		SMEM_INFO("%s: id %u not enough memory %u (required %u)\n",
+			__func__, id, shared->heap_info.heap_remaining,
+			size_in);
+	}
+
+	return ret;
+}
+
+/**
+ * alloc_item_secure - Allocate an SMEM item in a secure partition
+ *
+ * @id:              ID of SMEM item
+ * @size_in:         Size to allocate
+ * @to_proc:         SMEM host that shares the item with apps
+ * @flags:           Item attribute flags
+ * @returns:         Pointer to SMEM item or NULL for error
+ *
+ * Assumes the id parameter is valid and does  not already exist.  Assumes
+ * size_in is the raw size requested by the client.  Assumes to_proc is a valid
+ * host, and a valid partition to that host exists.  Requires the remote
+ * spinlock to already be locked.
+ */
+static void *alloc_item_secure(unsigned int id, unsigned int size_in,
+				unsigned int to_proc, unsigned int flags)
+{
+	void *smem_base = smem_ram_base;
+	struct smem_partition_header *hdr;
+	struct smem_partition_allocation_header *alloc_hdr;
+	uint32_t a_hdr_size;
+	uint32_t a_data_size;
+	uint32_t size_cacheline;
+	uint32_t free_space;
+	uint32_t partition_num;
+	void *ret = NULL;
+
+	hdr = smem_base + partitions[to_proc].offset;
+	partition_num = partitions[to_proc].partition_num;
+
+	if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+		LOG_ERR(
+			"%s: SMEM corruption detected.  Partition %d to %d at %p\n",
+								__func__,
+								partition_num,
+								to_proc,
+								hdr);
+		BUG();
+	}
+
+	size_cacheline = partitions[to_proc].size_cacheline;
+	free_space = hdr->offset_free_cached -
+					hdr->offset_free_uncached;
+
+	if (flags & SMEM_ITEM_CACHED_FLAG) {
+		a_hdr_size = ALIGN(sizeof(*alloc_hdr), size_cacheline);
+		a_data_size = ALIGN(size_in, size_cacheline);
+		if (free_space < a_hdr_size + a_data_size) {
+			SMEM_INFO(
+				"%s: id %u not enough memory %u (required %u)\n",
+						__func__, id, free_space,
+						a_hdr_size + a_data_size);
+			return ret;
+		}
+		alloc_hdr = (void *)(hdr) + hdr->offset_free_cached -
+								a_hdr_size;
+		alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
+		alloc_hdr->smem_type = id;
+		alloc_hdr->size = a_data_size;
+		alloc_hdr->padding_data = a_data_size - size_in;
+		alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
+		hdr->offset_free_cached = hdr->offset_free_cached -
+						a_hdr_size - a_data_size;
+		ret = (void *)(alloc_hdr) - a_data_size;
+		/*
+		 * The SMEM protocol currently does not support cacheable
+		 * areas within the smem region, but if it ever does in the
+		 * future, then cache management needs to be done here.
+		 * The area of memory this item is allocated from will need to
+		 * be dynamically made cachable, and a cache flush of the
+		 * allocation header using __cpuc_flush_dcache_area and
+		 * outer_flush_area will need to be done.
+		 */
+	} else {
+		a_hdr_size = sizeof(*alloc_hdr);
+		a_data_size = ALIGN(size_in, 8);
+		if (free_space < a_hdr_size + a_data_size) {
+			SMEM_INFO(
+				"%s: id %u not enough memory %u (required %u)\n",
+						__func__, id, free_space,
+						a_hdr_size + a_data_size);
+			return ret;
+		}
+		alloc_hdr = (void *)(hdr) + hdr->offset_free_uncached;
+		alloc_hdr->canary = SMEM_ALLOCATION_CANARY;
+		alloc_hdr->smem_type = id;
+		alloc_hdr->size = a_data_size;
+		alloc_hdr->padding_data = a_data_size - size_in;
+		alloc_hdr->padding_hdr = a_hdr_size - sizeof(*alloc_hdr);
+		hdr->offset_free_uncached = hdr->offset_free_uncached +
+						a_hdr_size + a_data_size;
+		ret = alloc_hdr + 1;
+	}
+	/*
+	 * wmb() is necessary to ensure the heap and allocation data is
+	 * consistent before continuing to prevent race conditions with remote
+	 * processors
+	 */
+	wmb();
+
+	return ret;
+}
+
+/**
+ * smem_alloc - Find an existing item, otherwise allocate it with security
+ *		support
+ *
+ * @id:       ID of SMEM item
+ * @size_in:  Size of the SMEM item
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item, NULL if it couldn't be found/allocated,
+ *	or -EPROBE_DEFER if the driver is not ready
+ */
+void *smem_alloc(unsigned int id, unsigned int size_in, unsigned int to_proc,
+							unsigned int flags)
+{
+	unsigned long lflags;
+	void *ret = NULL;
+	int rc;
+	unsigned int size_out;
+	unsigned int a_size_in;
+
+	SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, size_in, to_proc,
+									flags);
+
+	if (!is_probe_done())
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (!smem_initialized_check())
+		return NULL;
+
+	if (id >= SMEM_NUM_ITEMS) {
+		SMEM_INFO("%s: invalid id %u\n", __func__, id);
+		return NULL;
+	}
+
+	if (!(flags & SMEM_ANY_HOST_FLAG) && to_proc >= NUM_SMEM_SUBSYSTEMS) {
+		SMEM_INFO("%s: invalid to_proc %u for id %u\n", __func__,
+								to_proc, id);
+		return NULL;
+	}
+
+	if (unlikely(!spinlocks_initialized)) {
+		rc = init_smem_remote_spinlock();
+		if (unlikely(rc)) {
+			SMEM_INFO("%s: id:%u remote spinlock init failed %d\n",
+							__func__, id, rc);
+			return NULL;
+		}
+	}
+
+	a_size_in = ALIGN(size_in, 8);
+	do {
+		rc = remote_spin_trylock_irqsave(&remote_spinlock, lflags);
+	} while (!rc);
+
+	ret = __smem_get_entry_secure(id, &size_out, to_proc, flags, true,
+									false);
+	if (ret) {
+		SMEM_INFO("%s: %u already allocated\n", __func__, id);
+		if (a_size_in == size_out) {
+			remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+			return ret;
+		}
+		remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+		SMEM_INFO("%s: id %u wrong size %u (expected %u)\n",
+			__func__, id, size_out, a_size_in);
+		return NULL;
+	}
+
+	if (id > SMEM_FIXED_ITEM_LAST) {
+		SMEM_INFO("%s: allocating %u size %u to_proc %u flags %u\n",
+					__func__, id, size_in, to_proc, flags);
+		if (flags & SMEM_ANY_HOST_FLAG || !partitions[to_proc].offset)
+			ret = alloc_item_nonsecure(id, a_size_in);
+		else
+			ret = alloc_item_secure(id, size_in, to_proc, flags);
+
+	} else {
+		SMEM_INFO("%s: attempted to allocate non-dynamic item %u\n",
+								__func__, id);
+	}
+
+	remote_spin_unlock_irqrestore(&remote_spinlock, lflags);
+	return ret;
+}
+EXPORT_SYMBOL(smem_alloc);
+
+/**
+ * smem_get_entry - Get existing item with security support
+ *
+ * @id:       ID of SMEM item
+ * @size:     Pointer to size variable for storing the result
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ *	if the driver isn't ready
+ */
+void *smem_get_entry(unsigned int id, unsigned int *size, unsigned int to_proc,
+							unsigned int flags)
+{
+	SMEM_DBG("%s(%u, %u, %u, %u)\n", __func__, id, *size, to_proc, flags);
+
+	/*
+	 * Handle the circular dependecy between SMEM and software implemented
+	 * remote spinlocks.  SMEM must initialize the remote spinlocks in
+	 * probe() before it is done.  EPROBE_DEFER handling will not resolve
+	 * this code path, so we must be intellegent to know that the spinlock
+	 * item is a special case.
+	 */
+	if (!is_probe_done() && id != SMEM_SPINLOCK_ARRAY)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return __smem_get_entry_secure(id, size, to_proc, flags, false, true);
+}
+EXPORT_SYMBOL(smem_get_entry);
+
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id:       ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item, NULL if it doesn't exist, or -EPROBE_DEFER
+ *	if the driver isn't ready
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned int id, unsigned int *size_out,
+				unsigned int to_proc, unsigned int flags)
+{
+	if (!is_probe_done())
+		return ERR_PTR(-EPROBE_DEFER);
+
+	return __smem_get_entry_secure(id, size_out, to_proc, flags, false,
+									false);
+}
+EXPORT_SYMBOL(smem_get_entry_no_rlock);
+
+/**
+ * smem_get_remote_spinlock - Remote spinlock pointer for unit testing.
+ *
+ * @returns: pointer to SMEM remote spinlock
+ */
+remote_spinlock_t *smem_get_remote_spinlock(void)
+{
+	if (unlikely(!spinlocks_initialized))
+		init_smem_remote_spinlock();
+	return &remote_spinlock;
+}
+EXPORT_SYMBOL(smem_get_remote_spinlock);
+
+/**
+ * smem_get_free_space() - Get the available allocation free space for a
+ *				partition
+ *
+ * @to_proc: remote SMEM host.  Determines the applicable partition
+ * @returns: size in bytes available to allocate
+ *
+ * Helper function for SMD so that SMD only scans the channel allocation
+ * table for a partition when it is reasonably certain that a channel has
+ * actually been created, because scanning can be expensive.  Creating a channel
+ * will consume some of the free space in a partition, so SMD can compare the
+ * last free space size against the current free space size to determine if
+ * a channel may have been created.  SMD can't do this directly, because the
+ * necessary partition internals are restricted to just SMEM.
+ */
+unsigned int smem_get_free_space(unsigned int to_proc)
+{
+	struct smem_partition_header *hdr;
+	struct smem_shared *shared;
+
+	if (to_proc >= NUM_SMEM_SUBSYSTEMS) {
+		pr_err("%s: invalid to_proc:%d\n", __func__, to_proc);
+		return UINT_MAX;
+	}
+
+	if (partitions[to_proc].offset) {
+		if (unlikely(OVERFLOW_ADD_UNSIGNED(uintptr_t,
+					(uintptr_t)smem_areas[0].virt_addr,
+					partitions[to_proc].offset))) {
+			pr_err("%s: unexpected overflow detected\n", __func__);
+			return UINT_MAX;
+		}
+		hdr = smem_areas[0].virt_addr + partitions[to_proc].offset;
+		return hdr->offset_free_cached - hdr->offset_free_uncached;
+	}
+	shared = smem_ram_base;
+	return shared->heap_info.heap_remaining;
+}
+EXPORT_SYMBOL(smem_get_free_space);
+
+/**
+ * smem_get_version() - Get the smem user version number
+ *
+ * @idx: SMEM user idx in SMEM_VERSION_INFO table.
+ * @returns: smem version number if success otherwise zero.
+ */
+unsigned int smem_get_version(unsigned int idx)
+{
+	int *version_array;
+
+	if (idx > 32) {
+		pr_err("%s: invalid idx:%d\n", __func__, idx);
+		return 0;
+	}
+
+	version_array = __smem_find(SMEM_VERSION_INFO, SMEM_VERSION_INFO_SIZE,
+							true);
+	if (version_array == NULL)
+		return 0;
+
+	return version_array[idx];
+}
+EXPORT_SYMBOL(smem_get_version);
+
+/**
+ * init_smem_remote_spinlock - Reentrant remote spinlock initialization
+ *
+ * @returns: success or error code for failure
+ */
+static int init_smem_remote_spinlock(void)
+{
+	int rc = 0;
+
+	/*
+	 * Optimistic locking.  Init only needs to be done once by the first
+	 * caller.  After that, serializing inits between different callers
+	 * is unnecessary.  The second check after the lock ensures init
+	 * wasn't previously completed by someone else before the lock could
+	 * be grabbed.
+	 */
+	if (!spinlocks_initialized) {
+		mutex_lock(&spinlock_init_lock);
+		if (!spinlocks_initialized) {
+			rc = remote_spin_lock_init(&remote_spinlock,
+						SMEM_SPINLOCK_SMEM_ALLOC);
+			if (!rc)
+				spinlocks_initialized = 1;
+		}
+		mutex_unlock(&spinlock_init_lock);
+	}
+	return rc;
+}
+
+/**
+ * smem_initialized_check - Reentrant check that smem has been initialized
+ *
+ * @returns: true if initialized, false if not.
+ */
+bool smem_initialized_check(void)
+{
+	static int checked;
+	static int is_inited;
+	unsigned long flags;
+	struct smem_shared *smem;
+
+	if (likely(checked)) {
+		if (unlikely(!is_inited))
+			LOG_ERR("%s: smem not initialized\n", __func__);
+		return is_inited;
+	}
+
+	spin_lock_irqsave(&smem_init_check_lock, flags);
+	if (checked) {
+		spin_unlock_irqrestore(&smem_init_check_lock, flags);
+		if (unlikely(!is_inited))
+			LOG_ERR("%s: smem not initialized\n", __func__);
+		return is_inited;
+	}
+
+	smem = smem_ram_base;
+
+	if (smem->heap_info.initialized != 1)
+		goto failed;
+	if (smem->heap_info.reserved != 0)
+		goto failed;
+
+	/*
+	 * The Modem SBL is now the Master SBL version and is required to
+	 * pre-initialize SMEM and fill in any necessary configuration
+	 * structures.  Without the extra configuration data, the SMEM driver
+	 * cannot be properly initialized.
+	 */
+	if (smem_get_version(MODEM_SBL_VERSION_INDEX) != SMEM_VERSION << 16) {
+		pr_err("%s: SBL version not correct\n", __func__);
+		goto failed;
+	}
+
+	is_inited = 1;
+	checked = 1;
+	spin_unlock_irqrestore(&smem_init_check_lock, flags);
+	return is_inited;
+
+failed:
+	is_inited = 0;
+	checked = 1;
+	spin_unlock_irqrestore(&smem_init_check_lock, flags);
+	LOG_ERR(
+		"%s: shared memory needs to be initialized by SBL before booting\n",
+								__func__);
+	return is_inited;
+}
+EXPORT_SYMBOL(smem_initialized_check);
+
+static int restart_notifier_cb(struct notifier_block *this,
+				unsigned long code,
+				void *data)
+{
+	struct restart_notifier_block *notifier;
+	struct notif_data *notifdata = data;
+	int ret;
+
+	switch (code) {
+
+	case SUBSYS_AFTER_SHUTDOWN:
+		notifier = container_of(this,
+					struct restart_notifier_block, nb);
+		SMEM_INFO("%s: ssrestart for processor %d ('%s')\n",
+				__func__, notifier->processor,
+				notifier->name);
+		remote_spin_release(&remote_spinlock, notifier->processor);
+		remote_spin_release_all(notifier->processor);
+		break;
+	case SUBSYS_SOC_RESET:
+		if (!(smem_ramdump_dev && notifdata->enable_mini_ramdumps))
+			break;
+	case SUBSYS_RAMDUMP_NOTIFICATION:
+		if (!(smem_ramdump_dev && (notifdata->enable_mini_ramdumps
+						|| notifdata->enable_ramdump)))
+			break;
+		SMEM_DBG("%s: saving ramdump\n", __func__);
+		/*
+		 * XPU protection does not currently allow the
+		 * auxiliary memory regions to be dumped.  If this
+		 * changes, then num_smem_areas + 1 should be passed
+		 * into do_elf_ramdump() to dump all regions.
+		 */
+		ret = do_elf_ramdump(smem_ramdump_dev,
+				smem_ramdump_segments, 1);
+		if (ret < 0)
+			LOG_ERR("%s: unable to dump smem %d\n", __func__, ret);
+		break;
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+static __init int modem_restart_late_init(void)
+{
+	int i;
+	void *handle;
+	struct restart_notifier_block *nb;
+
+	smem_ramdump_dev = create_ramdump_device("smem", NULL);
+	if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
+		LOG_ERR("%s: Unable to create smem ramdump device.\n",
+			__func__);
+		smem_ramdump_dev = NULL;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(restart_notifiers); i++) {
+		nb = &restart_notifiers[i];
+		handle = subsys_notif_register_notifier(nb->name, &nb->nb);
+		SMEM_DBG("%s: registering notif for '%s', handle=%p\n",
+				__func__, nb->name, handle);
+	}
+
+	return 0;
+}
+late_initcall(modem_restart_late_init);
+
+int smem_module_init_notifier_register(struct notifier_block *nb)
+{
+	int ret;
+
+	if (!nb)
+		return -EINVAL;
+	mutex_lock(&smem_module_init_notifier_lock);
+	ret = raw_notifier_chain_register(&smem_module_init_notifier_list, nb);
+	if (smem_module_inited)
+		nb->notifier_call(nb, 0, NULL);
+	mutex_unlock(&smem_module_init_notifier_lock);
+	return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_register);
+
+int smem_module_init_notifier_unregister(struct notifier_block *nb)
+{
+	int ret;
+
+	if (!nb)
+		return -EINVAL;
+	mutex_lock(&smem_module_init_notifier_lock);
+	ret = raw_notifier_chain_unregister(&smem_module_init_notifier_list,
+						nb);
+	mutex_unlock(&smem_module_init_notifier_lock);
+	return ret;
+}
+EXPORT_SYMBOL(smem_module_init_notifier_unregister);
+
+static void smem_module_init_notify(uint32_t state, void *data)
+{
+	mutex_lock(&smem_module_init_notifier_lock);
+	smem_module_inited = 1;
+	raw_notifier_call_chain(&smem_module_init_notifier_list,
+					state, data);
+	mutex_unlock(&smem_module_init_notifier_lock);
+}
+
+/**
+ * smem_init_security_partition - Init local structures for a secured smem
+ *                   partition that has apps as one of the hosts
+ *
+ * @entry:           Entry in the security TOC for the partition to init
+ * @num:             Partition ID
+ *
+ * Initialize local data structures to point to a secured smem partition
+ * that is accessible by apps and another processor.  Assumes that one of the
+ * listed hosts is apps.  Verifiess that the partition is valid, otherwise will
+ * skip.  Checks for memory corruption and will BUG() if detected.  Assumes
+ * smem_areas is already initialized and that smem_areas[0] corresponds to the
+ * smem region with the secured partitions.
+ */
+static void smem_init_security_partition(struct smem_toc_entry *entry,
+								uint32_t num)
+{
+	uint16_t remote_host;
+	struct smem_partition_header *hdr;
+
+	if (!entry->offset) {
+		SMEM_INFO("Skipping smem partition %d - bad offset\n", num);
+		return;
+	}
+	if (!entry->size) {
+		SMEM_INFO("Skipping smem partition %d - bad size\n", num);
+		return;
+	}
+	if (!entry->size_cacheline) {
+		SMEM_INFO("Skipping smem partition %d - bad cacheline\n", num);
+		return;
+	}
+
+	if (entry->host0 == SMEM_APPS)
+		remote_host = entry->host1;
+	else
+		remote_host = entry->host0;
+
+	if (remote_host >= NUM_SMEM_SUBSYSTEMS) {
+		SMEM_INFO("Skipping smem partition %d - bad remote:%d\n", num,
+								remote_host);
+		return;
+	}
+	if (partitions[remote_host].offset) {
+		SMEM_INFO("Skipping smem partition %d - duplicate of %d\n", num,
+					partitions[remote_host].partition_num);
+		return;
+	}
+
+	hdr = smem_areas[0].virt_addr + entry->offset;
+
+	if (entry->host0 != SMEM_APPS && entry->host1 != SMEM_APPS) {
+		SMEM_INFO(
+			"Non-APSS Partition %d offset:%x host0:%d host1:%d\n",
+			num, entry->offset, entry->host0, entry->host1);
+		return;
+	}
+
+	if (hdr->identifier != SMEM_PART_HDR_IDENTIFIER) {
+		LOG_ERR("Smem partition %d hdr magic is bad\n", num);
+		BUG();
+	}
+	if (!hdr->size) {
+		LOG_ERR("Smem partition %d size is 0\n", num);
+		BUG();
+	}
+	if (hdr->offset_free_uncached > hdr->size) {
+		LOG_ERR("Smem partition %d uncached heap exceeds size\n", num);
+		BUG();
+	}
+	if (hdr->offset_free_cached > hdr->size) {
+		LOG_ERR("Smem partition %d cached heap exceeds size\n", num);
+		BUG();
+	}
+	if (hdr->host0 != SMEM_APPS && hdr->host1 != SMEM_APPS) {
+		LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
+		BUG();
+	}
+	if (hdr->host0 != remote_host && hdr->host1 != remote_host) {
+		LOG_ERR("Smem partition %d hosts don't match TOC\n", num);
+		BUG();
+	}
+
+	partitions[remote_host].partition_num = num;
+	partitions[remote_host].offset = entry->offset;
+	partitions[remote_host].size_cacheline = entry->size_cacheline;
+	SMEM_INFO("Partition %d offset:%x remote:%d\n", num, entry->offset,
+								remote_host);
+}
+
+/**
+ * smem_init_security - Init local support for secured smem
+ *
+ * Looks for a valid security TOC, and if one is found, parses it looking for
+ * partitions that apps can access.  If any such partitions are found, do the
+ * required local initialization to support them.  Assumes smem_areas is inited
+ * and smem_area[0] corresponds to the smem region with the TOC.
+ */
+static void smem_init_security(void)
+{
+	struct smem_toc *toc;
+	uint32_t i;
+
+	SMEM_DBG("%s\n", __func__);
+
+	toc = smem_areas[0].virt_addr + smem_areas[0].size - 4 * 1024;
+
+	if (toc->identifier != SMEM_TOC_IDENTIFIER) {
+		LOG_ERR("%s failed: invalid TOC magic\n", __func__);
+		return;
+	}
+
+	for (i = 0; i < toc->num_entries; ++i) {
+		SMEM_DBG("Partition %d host0:%d host1:%d\n", i,
+							toc->entry[i].host0,
+							toc->entry[i].host1);
+		smem_init_security_partition(&toc->entry[i], i);
+	}
+
+	SMEM_DBG("%s done\n", __func__);
+}
+
+/**
+ * smem_init_target_info - Init smem target information
+ *
+ * @info_addr : smem target info physical address.
+ * @size : size of the smem target info structure.
+ *
+ * This function is used to initialize the smem_targ_info structure and checks
+ * for valid identifier, if identifier is valid initialize smem variables.
+ */
+static int smem_init_target_info(phys_addr_t info_addr, resource_size_t size)
+{
+	struct smem_targ_info_type *smem_targ_info;
+	void *smem_targ_info_addr;
+
+	smem_targ_info_addr = ioremap_nocache(info_addr, size);
+	if (!smem_targ_info_addr) {
+		LOG_ERR("%s: failed ioremap_nocache() of addr:%pa size:%pa\n",
+				__func__, &info_addr, &size);
+		return -ENODEV;
+	}
+	smem_targ_info =
+		(struct smem_targ_info_type __iomem *)smem_targ_info_addr;
+
+	if (smem_targ_info->identifier != SMEM_TARG_INFO_IDENTIFIER) {
+		LOG_ERR("%s failed: invalid TARGET INFO magic\n", __func__);
+		return -ENODEV;
+	}
+	smem_ram_phys = smem_targ_info->phys_base_addr;
+	smem_ram_size = smem_targ_info->size;
+	iounmap(smem_targ_info_addr);
+	return 0;
+}
+
+static int msm_smem_probe(struct platform_device *pdev)
+{
+	char *key;
+	struct resource *r;
+	phys_addr_t aux_mem_base;
+	resource_size_t aux_mem_size;
+	int temp_string_size = 11; /* max 3 digit count */
+	char temp_string[temp_string_size];
+	int ret;
+	struct ramdump_segment *ramdump_segments_tmp = NULL;
+	struct smem_area *smem_areas_tmp = NULL;
+	int smem_idx = 0;
+	bool security_enabled;
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"smem_targ_info_imem");
+	if (r) {
+		if (smem_init_target_info(r->start, resource_size(r)))
+			goto smem_targ_info_legacy;
+		goto smem_targ_info_done;
+	}
+
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"smem_targ_info_reg");
+	if (r) {
+		void *reg_base_addr;
+		uint64_t base_addr;
+
+		reg_base_addr = ioremap_nocache(r->start, resource_size(r));
+		base_addr = (uint32_t)readl_relaxed(reg_base_addr);
+		base_addr |=
+			((uint64_t)readl_relaxed(reg_base_addr + 0x4) << 32);
+		iounmap(reg_base_addr);
+		if ((base_addr == 0) || ((base_addr >> 32) != 0)) {
+			SMEM_INFO("%s: Invalid SMEM address\n", __func__);
+			goto smem_targ_info_legacy;
+		}
+		if (smem_init_target_info(base_addr,
+				sizeof(struct smem_targ_info_type)))
+			goto smem_targ_info_legacy;
+		goto smem_targ_info_done;
+	}
+
+smem_targ_info_legacy:
+	SMEM_INFO("%s: reading dt-specified SMEM address\n", __func__);
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "smem");
+	if (r) {
+		smem_ram_size = resource_size(r);
+		smem_ram_phys = r->start;
+	}
+
+smem_targ_info_done:
+	if (!smem_ram_phys || !smem_ram_size) {
+		LOG_ERR("%s: Missing SMEM TARGET INFO\n", __func__);
+		return -ENODEV;
+	}
+
+	smem_ram_base = ioremap_nocache(smem_ram_phys, smem_ram_size);
+
+	if (!smem_ram_base) {
+		LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+				__func__,
+				&smem_ram_phys, &smem_ram_size);
+		return -ENODEV;
+	}
+
+	if (!smem_initialized_check())
+		return -ENODEV;
+
+	/*
+	 * The software implementation requires smem_find(), which needs
+	 * smem_ram_base to be intitialized.  The remote spinlock item is
+	 * guaranteed to be allocated by the bootloader, so this is the
+	 * safest and earliest place to init the spinlock.
+	 */
+	ret = init_smem_remote_spinlock();
+	if (ret) {
+		LOG_ERR("%s: remote spinlock init failed %d\n", __func__, ret);
+		return ret;
+	}
+
+	key = "irq-reg-base";
+	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, key);
+	if (!r) {
+		LOG_ERR("%s: missing '%s'\n", __func__, key);
+		return -ENODEV;
+	}
+
+	num_smem_areas = 1;
+	while (1) {
+		scnprintf(temp_string, temp_string_size, "aux-mem%d",
+				num_smem_areas);
+		r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								temp_string);
+		if (!r)
+			break;
+
+		++num_smem_areas;
+		if (num_smem_areas > 999) {
+			LOG_ERR("%s: max num aux mem regions reached\n",
+								__func__);
+			break;
+		}
+	}
+	/* Initialize main SMEM region and SSR ramdump region */
+	smem_areas_tmp = kmalloc_array(num_smem_areas, sizeof(struct smem_area),
+				GFP_KERNEL);
+	if (!smem_areas_tmp) {
+		LOG_ERR("%s: smem areas kmalloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto free_smem_areas;
+	}
+
+	ramdump_segments_tmp = kcalloc(num_smem_areas,
+			sizeof(struct ramdump_segment), GFP_KERNEL);
+	if (!ramdump_segments_tmp) {
+		LOG_ERR("%s: ramdump segment kmalloc failed\n", __func__);
+		ret = -ENOMEM;
+		goto free_smem_areas;
+	}
+	smem_areas_tmp[smem_idx].phys_addr =  smem_ram_phys;
+	smem_areas_tmp[smem_idx].size = smem_ram_size;
+	smem_areas_tmp[smem_idx].virt_addr = smem_ram_base;
+
+	ramdump_segments_tmp[smem_idx].address = smem_ram_phys;
+	ramdump_segments_tmp[smem_idx].size = smem_ram_size;
+	++smem_idx;
+
+	/* Configure auxiliary SMEM regions */
+	while (1) {
+		scnprintf(temp_string, temp_string_size, "aux-mem%d",
+								smem_idx);
+		r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							temp_string);
+		if (!r)
+			break;
+		aux_mem_base = r->start;
+		aux_mem_size = resource_size(r);
+
+		ramdump_segments_tmp[smem_idx].address = aux_mem_base;
+		ramdump_segments_tmp[smem_idx].size = aux_mem_size;
+
+		smem_areas_tmp[smem_idx].phys_addr = aux_mem_base;
+		smem_areas_tmp[smem_idx].size = aux_mem_size;
+		smem_areas_tmp[smem_idx].virt_addr = ioremap_nocache(
+			(unsigned long)(smem_areas_tmp[smem_idx].phys_addr),
+			smem_areas_tmp[smem_idx].size);
+		SMEM_DBG("%s: %s = %pa %pa -> %p", __func__, temp_string,
+				&aux_mem_base, &aux_mem_size,
+				smem_areas_tmp[smem_idx].virt_addr);
+
+		if (!smem_areas_tmp[smem_idx].virt_addr) {
+			LOG_ERR("%s: ioremap_nocache() of addr:%pa size: %pa\n",
+				__func__,
+				&smem_areas_tmp[smem_idx].phys_addr,
+				&smem_areas_tmp[smem_idx].size);
+			ret = -ENOMEM;
+			goto free_smem_areas;
+		}
+
+		if (OVERFLOW_ADD_UNSIGNED(uintptr_t,
+				(uintptr_t)smem_areas_tmp[smem_idx].virt_addr,
+				smem_areas_tmp[smem_idx].size)) {
+			LOG_ERR(
+				"%s: invalid virtual address block %i: %p:%pa\n",
+					__func__, smem_idx,
+					smem_areas_tmp[smem_idx].virt_addr,
+					&smem_areas_tmp[smem_idx].size);
+			++smem_idx;
+			ret = -EINVAL;
+			goto free_smem_areas;
+		}
+
+		++smem_idx;
+		if (smem_idx > 999) {
+			LOG_ERR("%s: max num aux mem regions reached\n",
+							__func__);
+			break;
+		}
+	}
+
+	smem_areas = smem_areas_tmp;
+	smem_ramdump_segments = ramdump_segments_tmp;
+
+	key = "qcom,mpu-enabled";
+	security_enabled = of_property_read_bool(pdev->dev.of_node, key);
+	if (security_enabled) {
+		SMEM_INFO("smem security enabled\n");
+		smem_init_security();
+	}
+
+	probe_done = true;
+
+	ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	if (ret)
+		LOG_ERR("%s: of_platform_populate failed %d\n", __func__, ret);
+
+	return 0;
+
+free_smem_areas:
+	for (smem_idx = smem_idx - 1; smem_idx >= 1; --smem_idx)
+		iounmap(smem_areas_tmp[smem_idx].virt_addr);
+
+	num_smem_areas = 0;
+	kfree(ramdump_segments_tmp);
+	kfree(smem_areas_tmp);
+	return ret;
+}
+
+static const struct of_device_id msm_smem_match_table[] = {
+	{ .compatible = "qcom,smem" },
+	{},
+};
+
+static struct platform_driver msm_smem_driver = {
+	.probe = msm_smem_probe,
+	.driver = {
+		.name = "msm_smem",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smem_match_table,
+	},
+};
+
+int __init msm_smem_init(void)
+{
+	static bool registered;
+	int rc;
+
+	if (registered)
+		return 0;
+
+	registered = true;
+
+	smem_ipc_log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smem", 0);
+	if (!smem_ipc_log_ctx) {
+		pr_err("%s: unable to create logging context\n", __func__);
+		msm_smem_debug_mask = 0;
+	}
+
+	rc = platform_driver_register(&msm_smem_driver);
+	if (rc) {
+		LOG_ERR("%s: msm_smem_driver register failed %d\n",
+							__func__, rc);
+		return rc;
+	}
+
+	smem_module_init_notify(0, NULL);
+
+	return 0;
+}
+
+arch_initcall(msm_smem_init);
diff --git a/drivers/soc/qcom/msm_smp2p.c b/drivers/soc/qcom/msm_smp2p.c
new file mode 100644
index 0000000..35d836d
--- /dev/null
+++ b/drivers/soc/qcom/msm_smp2p.c
@@ -0,0 +1,1956 @@
+/* drivers/soc/qcom/smp2p.c
+ *
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ipc_logging.h>
+#include <linux/err.h>
+#include <soc/qcom/smem.h>
+#include "smp2p_private_api.h"
+#include "smp2p_private.h"
+
+#define NUM_LOG_PAGES 3
+
+/**
+ * struct msm_smp2p_out - This structure represents the outbound SMP2P entry.
+ *
+ * @remote_pid: Outbound processor ID.
+ * @name: Entry name.
+ * @out_edge_list: Adds this structure into smp2p_out_list_item::list.
+ * @msm_smp2p_notifier_list: Notifier block head used to notify for open event.
+ * @open_nb: Notifier block used to notify for open event.
+ * @l_smp2p_entry: Pointer to the actual entry in the SMEM item.
+ */
+struct msm_smp2p_out {
+	int remote_pid;
+	char name[SMP2P_MAX_ENTRY_NAME];
+	struct list_head out_edge_list;
+	struct raw_notifier_head msm_smp2p_notifier_list;
+	struct notifier_block *open_nb;
+	uint32_t __iomem *l_smp2p_entry;
+};
+
+/**
+ * struct smp2p_out_list_item - Maintains the state of outbound edge.
+ *
+ * @out_item_lock_lha1: Lock protecting all elements of the structure.
+ * @list: list of outbound entries (struct msm_smp2p_out).
+ * @smem_edge_out: Pointer to outbound smem item.
+ * @smem_edge_state: State of the outbound edge.
+ * @ops_ptr: Pointer to internal version-specific SMEM item access functions.
+ *
+ * @feature_ssr_ack_enabled: SSR ACK Support Enabled
+ * @restart_ack: Current cached state of the local ack bit
+ */
+struct smp2p_out_list_item {
+	spinlock_t out_item_lock_lha1;
+
+	struct list_head list;
+	struct smp2p_smem __iomem *smem_edge_out;
+	enum msm_smp2p_edge_state smem_edge_state;
+	struct smp2p_version_if *ops_ptr;
+
+	bool feature_ssr_ack_enabled;
+	bool restart_ack;
+};
+static struct smp2p_out_list_item out_list[SMP2P_NUM_PROCS];
+
+static void *log_ctx;
+static int smp2p_debug_mask = MSM_SMP2P_INFO | MSM_SMP2P_DEBUG;
+module_param_named(debug_mask, smp2p_debug_mask,
+		   int, 0664);
+
+/**
+ * struct smp2p_in - Represents the entry on remote processor.
+ *
+ * @name: Name of the entry.
+ * @remote_pid: Outbound processor ID.
+ * @in_edge_list: Adds this structure into smp2p_in_list_item::list.
+ * @in_notifier_list: List for notifier block for entry opening/updates.
+ * @prev_entry_val: Previous value of the entry.
+ * @entry_ptr: Points to the current value in smem item.
+ * @notifier_count: Counts the number of notifier registered per pid,entry.
+ */
+struct smp2p_in {
+	int remote_pid;
+	char name[SMP2P_MAX_ENTRY_NAME];
+	struct list_head in_edge_list;
+	struct raw_notifier_head in_notifier_list;
+	uint32_t prev_entry_val;
+	uint32_t __iomem *entry_ptr;
+	uint32_t notifier_count;
+};
+
+/**
+ * struct smp2p_in_list_item - Maintains the inbound edge state.
+ *
+ * @in_item_lock_lhb1: Lock protecting all elements of the structure.
+ * @list: List head for the entries on remote processor.
+ * @smem_edge_in: Pointer to the remote smem item.
+ */
+struct smp2p_in_list_item {
+	spinlock_t in_item_lock_lhb1;
+	struct list_head list;
+	struct smp2p_smem __iomem *smem_edge_in;
+	uint32_t item_size;
+	uint32_t safe_total_entries;
+};
+static struct smp2p_in_list_item in_list[SMP2P_NUM_PROCS];
+
+/**
+ * SMEM Item access function interface.
+ *
+ * This interface is used to help isolate the implementation of
+ * the functionality from any changes in the shared data structures
+ * that may happen as versions are changed.
+ *
+ * @is_supported: True if this version is supported by SMP2P
+ * @negotiate_features: Returns (sub)set of supported features
+ * @negotiation_complete:  Called when negotiation has been completed
+ * @find_entry: Finds existing / next empty entry
+ * @create_entry: Creates a new entry
+ * @read_entry: Reads the value of an entry
+ * @write_entry: Writes a new value to an entry
+ * @modify_entry: Does a read/modify/write of an entry
+ * validate_size: Verifies the size of the remote SMEM item to ensure that
+ *                an invalid item size doesn't result in an out-of-bounds
+ *                memory access.
+ */
+struct smp2p_version_if {
+	/* common functions */
+	bool is_supported;
+	uint32_t (*negotiate_features)(uint32_t features);
+	void (*negotiation_complete)(struct smp2p_out_list_item *);
+	void (*find_entry)(struct smp2p_smem __iomem *item,
+			uint32_t entries_total,	char *name,
+			uint32_t **entry_ptr, int *empty_spot);
+
+	/* outbound entry functions */
+	int (*create_entry)(struct msm_smp2p_out *);
+	int (*read_entry)(struct msm_smp2p_out *, uint32_t *);
+	int (*write_entry)(struct msm_smp2p_out *, uint32_t);
+	int (*modify_entry)(struct msm_smp2p_out *, uint32_t, uint32_t, bool);
+
+	/* inbound entry functions */
+	struct smp2p_smem __iomem *(*validate_size)(int remote_pid,
+			struct smp2p_smem __iomem *, uint32_t);
+};
+
+static int smp2p_do_negotiation(int remote_pid, struct smp2p_out_list_item *p);
+static void smp2p_send_interrupt(int remote_pid);
+
+/* v0 (uninitialized SMEM item) interface functions */
+static uint32_t smp2p_negotiate_features_v0(uint32_t features);
+static void smp2p_negotiation_complete_v0(struct smp2p_out_list_item *out_item);
+static void smp2p_find_entry_v0(struct smp2p_smem __iomem *item,
+		uint32_t entries_total, char *name, uint32_t **entry_ptr,
+		int *empty_spot);
+static int smp2p_out_create_v0(struct msm_smp2p_out *);
+static int smp2p_out_read_v0(struct msm_smp2p_out *, uint32_t *);
+static int smp2p_out_write_v0(struct msm_smp2p_out *, uint32_t);
+static int smp2p_out_modify_v0(struct msm_smp2p_out *,
+					uint32_t, uint32_t, bool);
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v0(int remote_pid,
+		struct smp2p_smem __iomem *smem_item, uint32_t size);
+
+/* v1 interface functions */
+static uint32_t smp2p_negotiate_features_v1(uint32_t features);
+static void smp2p_negotiation_complete_v1(struct smp2p_out_list_item *out_item);
+static void smp2p_find_entry_v1(struct smp2p_smem __iomem *item,
+		uint32_t entries_total, char *name, uint32_t **entry_ptr,
+		int *empty_spot);
+static int smp2p_out_create_v1(struct msm_smp2p_out *);
+static int smp2p_out_read_v1(struct msm_smp2p_out *, uint32_t *);
+static int smp2p_out_write_v1(struct msm_smp2p_out *, uint32_t);
+static int smp2p_out_modify_v1(struct msm_smp2p_out *,
+					uint32_t, uint32_t, bool);
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v1(int remote_pid,
+		struct smp2p_smem __iomem *smem_item, uint32_t size);
+
+/* Version interface functions */
+static struct smp2p_version_if version_if[] = {
+	[0] = {
+		.negotiate_features = smp2p_negotiate_features_v0,
+		.negotiation_complete = smp2p_negotiation_complete_v0,
+		.find_entry = smp2p_find_entry_v0,
+		.create_entry = smp2p_out_create_v0,
+		.read_entry = smp2p_out_read_v0,
+		.write_entry = smp2p_out_write_v0,
+		.modify_entry = smp2p_out_modify_v0,
+		.validate_size = smp2p_in_validate_size_v0,
+	},
+	[1] = {
+		.is_supported = true,
+		.negotiate_features = smp2p_negotiate_features_v1,
+		.negotiation_complete = smp2p_negotiation_complete_v1,
+		.find_entry = smp2p_find_entry_v1,
+		.create_entry = smp2p_out_create_v1,
+		.read_entry = smp2p_out_read_v1,
+		.write_entry = smp2p_out_write_v1,
+		.modify_entry = smp2p_out_modify_v1,
+		.validate_size = smp2p_in_validate_size_v1,
+	},
+};
+
+/* interrupt configuration (filled by device tree) */
+static struct smp2p_interrupt_config smp2p_int_cfgs[SMP2P_NUM_PROCS] = {
+	[SMP2P_MODEM_PROC].name = "modem",
+	[SMP2P_AUDIO_PROC].name = "lpass",
+	[SMP2P_SENSOR_PROC].name = "dsps",
+	[SMP2P_WIRELESS_PROC].name = "wcnss",
+	[SMP2P_CDSP_PROC].name = "cdsp",
+	[SMP2P_TZ_PROC].name = "tz",
+	[SMP2P_REMOTE_MOCK_PROC].name = "mock",
+};
+
+/**
+ * smp2p_get_log_ctx - Return log context for other SMP2P modules.
+ *
+ * @returns: Log context or NULL if none.
+ */
+void *smp2p_get_log_ctx(void)
+{
+	return log_ctx;
+}
+
+/**
+ * smp2p_get_debug_mask - Return debug mask.
+ *
+ * @returns: Current debug mask.
+ */
+int smp2p_get_debug_mask(void)
+{
+	return smp2p_debug_mask;
+}
+
+/**
+ * smp2p_interrupt_config -  Return interrupt configuration.
+ *
+ * @returns interrupt configuration array for usage by debugfs.
+ */
+struct smp2p_interrupt_config *smp2p_get_interrupt_config(void)
+{
+	return smp2p_int_cfgs;
+}
+
+/**
+ * smp2p_pid_to_name -  Lookup name for remote pid.
+ *
+ * @returns: name (may be NULL).
+ */
+const char *smp2p_pid_to_name(int remote_pid)
+{
+	if (remote_pid >= SMP2P_NUM_PROCS)
+		return NULL;
+
+	return smp2p_int_cfgs[remote_pid].name;
+}
+
+/**
+ * smp2p_get_in_item - Return pointer to remote smem item.
+ *
+ * @remote_pid: Processor ID of the remote system.
+ * @returns:    Pointer to inbound SMEM item
+ *
+ * This is used by debugfs to print the smem items.
+ */
+struct smp2p_smem __iomem *smp2p_get_in_item(int remote_pid)
+{
+	void *ret = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&in_list[remote_pid].in_item_lock_lhb1, flags);
+	if (remote_pid < SMP2P_NUM_PROCS)
+		ret = in_list[remote_pid].smem_edge_in;
+	spin_unlock_irqrestore(&in_list[remote_pid].in_item_lock_lhb1,
+								flags);
+
+	return ret;
+}
+
+/**
+ * smp2p_get_out_item - Return pointer to outbound SMEM item.
+ *
+ * @remote_pid: Processor ID of remote system.
+ * @state:      Edge state of the outbound SMEM item.
+ * @returns:    Pointer to outbound (remote) SMEM item.
+ */
+struct smp2p_smem __iomem *smp2p_get_out_item(int remote_pid, int *state)
+{
+	void *ret = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&out_list[remote_pid].out_item_lock_lha1, flags);
+	if (remote_pid < SMP2P_NUM_PROCS) {
+		ret = out_list[remote_pid].smem_edge_out;
+		if (state)
+			*state = out_list[remote_pid].smem_edge_state;
+	}
+	spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1, flags);
+
+	return ret;
+}
+
+/**
+ * smp2p_get_smem_item_id - Return the proper SMEM item ID.
+ *
+ * @write_id:	Processor that will write to the item.
+ * @read_id:    Processor that will read from the item.
+ * @returns:    SMEM ID
+ */
+static int smp2p_get_smem_item_id(int write_pid, int read_pid)
+{
+	int ret = -EINVAL;
+
+	switch (write_pid) {
+	case SMP2P_APPS_PROC:
+		ret = SMEM_SMP2P_APPS_BASE + read_pid;
+		break;
+	case SMP2P_MODEM_PROC:
+		ret = SMEM_SMP2P_MODEM_BASE + read_pid;
+		break;
+	case SMP2P_AUDIO_PROC:
+		ret = SMEM_SMP2P_AUDIO_BASE + read_pid;
+		break;
+	case SMP2P_SENSOR_PROC:
+		ret = SMEM_SMP2P_SENSOR_BASE + read_pid;
+		break;
+	case SMP2P_WIRELESS_PROC:
+		ret = SMEM_SMP2P_WIRLESS_BASE + read_pid;
+		break;
+	case SMP2P_CDSP_PROC:
+		ret = SMEM_SMP2P_CDSP_BASE + read_pid;
+		break;
+	case SMP2P_POWER_PROC:
+		ret = SMEM_SMP2P_POWER_BASE + read_pid;
+		break;
+	case SMP2P_TZ_PROC:
+		ret = SMEM_SMP2P_TZ_BASE + read_pid;
+		break;
+	}
+
+	return ret;
+}
+
+/**
+ * Return pointer to SMEM item owned by the local processor.
+ *
+ * @remote_pid: Remote processor ID
+ * @returns:    NULL for failure; otherwise pointer to SMEM item
+ *
+ * Must be called with out_item_lock_lha1 locked for mock proc.
+ */
+static void *smp2p_get_local_smem_item(int remote_pid)
+{
+	struct smp2p_smem __iomem *item_ptr = NULL;
+
+	if (remote_pid < SMP2P_REMOTE_MOCK_PROC) {
+		unsigned int size;
+		int smem_id;
+
+		/* lookup or allocate SMEM item */
+		smem_id = smp2p_get_smem_item_id(SMP2P_APPS_PROC, remote_pid);
+		if (smem_id >= 0) {
+			item_ptr = smem_get_entry(smem_id, &size,
+								remote_pid, 0);
+
+			if (!item_ptr) {
+				size = sizeof(struct smp2p_smem_item);
+				item_ptr = smem_alloc(smem_id, size,
+								remote_pid, 0);
+			}
+		}
+	} else if (remote_pid == SMP2P_REMOTE_MOCK_PROC) {
+		/*
+		 * This path is only used during unit testing so
+		 * the GFP_ATOMIC allocation should not be a
+		 * concern.
+		 */
+		if (!out_list[SMP2P_REMOTE_MOCK_PROC].smem_edge_out)
+			item_ptr = kzalloc(
+					sizeof(struct smp2p_smem_item),
+					GFP_ATOMIC);
+	}
+	return item_ptr;
+}
+
+/**
+ * smp2p_get_remote_smem_item - Return remote SMEM item.
+ *
+ * @remote_pid: Remote processor ID
+ * @out_item:   Pointer to the output item structure
+ * @returns:    NULL for failure; otherwise pointer to SMEM item
+ *
+ * Return pointer to SMEM item owned by the remote processor.
+ *
+ * Note that this function does an SMEM lookup which uses a remote spinlock,
+ * so this function should not be called more than necessary.
+ *
+ * Must be called with out_item_lock_lha1 and in_item_lock_lhb1 locked.
+ */
+static void *smp2p_get_remote_smem_item(int remote_pid,
+	struct smp2p_out_list_item *out_item)
+{
+	void *item_ptr = NULL;
+	unsigned int size = 0;
+
+	if (!out_item)
+		return item_ptr;
+
+	if (remote_pid < SMP2P_REMOTE_MOCK_PROC) {
+		int smem_id;
+
+		smem_id = smp2p_get_smem_item_id(remote_pid, SMP2P_APPS_PROC);
+		if (smem_id >= 0)
+			item_ptr = smem_get_entry(smem_id, &size,
+								remote_pid, 0);
+	} else if (remote_pid == SMP2P_REMOTE_MOCK_PROC) {
+		item_ptr = msm_smp2p_get_remote_mock_smem_item(&size);
+	}
+	item_ptr = out_item->ops_ptr->validate_size(remote_pid, item_ptr, size);
+
+	return item_ptr;
+}
+
+/**
+ * smp2p_ssr_ack_needed - Returns true if SSR ACK required
+ *
+ * @rpid: Remote processor ID
+ *
+ * Must be called with out_item_lock_lha1 and in_item_lock_lhb1 locked.
+ */
+static bool smp2p_ssr_ack_needed(uint32_t rpid)
+{
+	bool ssr_done;
+
+	if (!out_list[rpid].feature_ssr_ack_enabled)
+		return false;
+
+	ssr_done = SMP2P_GET_RESTART_DONE(in_list[rpid].smem_edge_in->flags);
+	if (ssr_done != out_list[rpid].restart_ack)
+		return true;
+
+	return false;
+}
+
+/**
+ * smp2p_do_ssr_ack - Handles SSR ACK
+ *
+ * @rpid: Remote processor ID
+ *
+ * Must be called with out_item_lock_lha1 and in_item_lock_lhb1 locked.
+ */
+static void smp2p_do_ssr_ack(uint32_t rpid)
+{
+	bool ack;
+
+	if (!smp2p_ssr_ack_needed(rpid))
+		return;
+
+	ack = !out_list[rpid].restart_ack;
+	SMP2P_INFO("%s: ssr ack pid %d: %d -> %d\n", __func__, rpid,
+			out_list[rpid].restart_ack, ack);
+	out_list[rpid].restart_ack = ack;
+	SMP2P_SET_RESTART_ACK(out_list[rpid].smem_edge_out->flags, ack);
+	smp2p_send_interrupt(rpid);
+}
+
+/**
+ * smp2p_negotiate_features_v1 - Initial feature negotiation.
+ *
+ * @features: Inbound feature set.
+ * @returns: Supported features (will be a same/subset of @features).
+ */
+static uint32_t smp2p_negotiate_features_v1(uint32_t features)
+{
+	return SMP2P_FEATURE_SSR_ACK;
+}
+
+/**
+ * smp2p_negotiation_complete_v1 - Negotiation completed
+ *
+ * @out_item:   Pointer to the output item structure
+ *
+ * Can be used to do final configuration based upon the negotiated feature set.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static void smp2p_negotiation_complete_v1(struct smp2p_out_list_item *out_item)
+{
+	uint32_t features;
+
+	features = SMP2P_GET_FEATURES(out_item->smem_edge_out->feature_version);
+
+	if (features & SMP2P_FEATURE_SSR_ACK)
+		out_item->feature_ssr_ack_enabled = true;
+}
+
+/**
+ * smp2p_find_entry_v1 - Search for an entry in SMEM item.
+ *
+ * @item: Pointer to the smem item.
+ * @entries_total: Total number of entries in @item.
+ * @name: Name of the entry.
+ * @entry_ptr: Set to pointer of entry if found, NULL otherwise.
+ * @empty_spot: If non-null, set to the value of the next empty entry.
+ *
+ * Searches for entry @name in the SMEM item.  If found, a pointer
+ * to the item is returned.  If it isn't found, the first empty
+ * index is returned in @empty_spot.
+ */
+static void smp2p_find_entry_v1(struct smp2p_smem __iomem *item,
+		uint32_t entries_total, char *name, uint32_t **entry_ptr,
+		int *empty_spot)
+{
+	int i;
+	struct smp2p_entry_v1 *pos;
+	char entry_name[SMP2P_MAX_ENTRY_NAME];
+
+	if (!item || !name || !entry_ptr) {
+		SMP2P_ERR("%s: invalid arguments %d %d %d\n",
+				__func__, !item, !name, !entry_ptr);
+		return;
+	}
+
+	*entry_ptr = NULL;
+	if (empty_spot)
+		*empty_spot = -1;
+
+	pos = (struct smp2p_entry_v1 *)(char *)(item + 1);
+	for (i = 0; i < entries_total; i++, ++pos) {
+		memcpy_fromio(entry_name, pos->name, SMP2P_MAX_ENTRY_NAME);
+		if (entry_name[0]) {
+			if (!strcmp(entry_name, name)) {
+				*entry_ptr = &pos->entry;
+				break;
+			}
+		} else if (empty_spot && *empty_spot < 0) {
+			*empty_spot = i;
+		}
+	}
+}
+
+/**
+ * smp2p_out_create_v1 - Creates a outbound SMP2P entry.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_create_v1(struct msm_smp2p_out *out_entry)
+{
+	struct smp2p_smem __iomem *smp2p_h_ptr;
+	struct smp2p_out_list_item *p_list;
+	uint32_t *state_entry_ptr;
+	uint32_t empty_spot;
+	uint32_t entries_total;
+	uint32_t entries_valid;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	p_list = &out_list[out_entry->remote_pid];
+	if (p_list->smem_edge_state != SMP2P_EDGE_STATE_OPENED) {
+		SMP2P_ERR("%s: item '%s':%d opened - wrong create called\n",
+			__func__, out_entry->name, out_entry->remote_pid);
+		return -ENODEV;
+	}
+
+	smp2p_h_ptr = p_list->smem_edge_out;
+	entries_total = SMP2P_GET_ENT_TOTAL(smp2p_h_ptr->valid_total_ent);
+	entries_valid = SMP2P_GET_ENT_VALID(smp2p_h_ptr->valid_total_ent);
+
+	p_list->ops_ptr->find_entry(smp2p_h_ptr, entries_total,
+			out_entry->name, &state_entry_ptr, &empty_spot);
+	if (state_entry_ptr) {
+		/* re-use existing entry */
+		out_entry->l_smp2p_entry = state_entry_ptr;
+
+		SMP2P_DBG("%s: item '%s':%d reused\n", __func__,
+				out_entry->name, out_entry->remote_pid);
+	} else if (entries_valid >= entries_total) {
+		/* need to allocate entry, but not more space */
+		SMP2P_ERR("%s: no space for item '%s':%d\n",
+			__func__, out_entry->name, out_entry->remote_pid);
+		return -ENOMEM;
+	} else {
+		/* allocate a new entry */
+		struct smp2p_entry_v1 *entry_ptr;
+
+		entry_ptr = (struct smp2p_entry_v1 *)((char *)(smp2p_h_ptr + 1)
+			+ empty_spot * sizeof(struct smp2p_entry_v1));
+		memcpy_toio(entry_ptr->name, out_entry->name,
+						sizeof(entry_ptr->name));
+		out_entry->l_smp2p_entry = &entry_ptr->entry;
+		++entries_valid;
+		SMP2P_DBG("%s: item '%s':%d fully created as entry %d of %d\n",
+				__func__, out_entry->name,
+				out_entry->remote_pid,
+				entries_valid, entries_total);
+		SMP2P_SET_ENT_VALID(smp2p_h_ptr->valid_total_ent,
+				entries_valid);
+		smp2p_send_interrupt(out_entry->remote_pid);
+	}
+	raw_notifier_call_chain(&out_entry->msm_smp2p_notifier_list,
+		  SMP2P_OPEN, 0);
+
+	return 0;
+}
+
+/**
+ * smp2p_out_read_v1 -  Read the data from an outbound entry.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: Out pointer, the data is available in this argument on success.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_read_v1(struct msm_smp2p_out *out_entry, uint32_t *data)
+{
+	struct smp2p_smem __iomem  *smp2p_h_ptr;
+	uint32_t remote_pid;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
+	remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+
+	if (remote_pid != out_entry->remote_pid)
+		return -EINVAL;
+
+	if (out_entry->l_smp2p_entry) {
+		*data = readl_relaxed(out_entry->l_smp2p_entry);
+	} else {
+		SMP2P_ERR("%s: '%s':%d not yet OPEN\n", __func__,
+				out_entry->name, remote_pid);
+		return -ENODEV;
+	}
+
+	return 0;
+}
+
+/**
+ * smp2p_out_write_v1 - Writes an outbound entry value.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: The data to be written.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_write_v1(struct msm_smp2p_out *out_entry, uint32_t data)
+{
+	struct smp2p_smem __iomem  *smp2p_h_ptr;
+	uint32_t remote_pid;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
+	remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+
+	if (remote_pid != out_entry->remote_pid)
+		return -EINVAL;
+
+	if (out_entry->l_smp2p_entry) {
+		writel_relaxed(data, out_entry->l_smp2p_entry);
+		smp2p_send_interrupt(remote_pid);
+	} else {
+		SMP2P_ERR("%s: '%s':%d not yet OPEN\n", __func__,
+				out_entry->name, remote_pid);
+		return -ENODEV;
+	}
+	return 0;
+}
+
+/**
+ * smp2p_out_modify_v1 - Modifies and outbound value.
+ *
+ * @set_mask:  Mask containing the bits that needs to be set.
+ * @clear_mask: Mask containing the bits that needs to be cleared.
+ * @send_irq: Flag to send interrupt to remote processor.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * The clear mask is applied first, so  if a bit is set in both clear and
+ * set mask, the result will be that the bit is set.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_modify_v1(struct msm_smp2p_out *out_entry,
+		uint32_t set_mask, uint32_t clear_mask, bool send_irq)
+{
+	struct smp2p_smem __iomem  *smp2p_h_ptr;
+	uint32_t remote_pid;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
+	remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+
+	if (remote_pid != out_entry->remote_pid)
+		return -EINVAL;
+
+	if (out_entry->l_smp2p_entry) {
+		uint32_t curr_value;
+
+		curr_value = readl_relaxed(out_entry->l_smp2p_entry);
+		writel_relaxed((curr_value & ~clear_mask) | set_mask,
+			out_entry->l_smp2p_entry);
+	} else {
+		SMP2P_ERR("%s: '%s':%d not yet OPEN\n", __func__,
+				out_entry->name, remote_pid);
+		return -ENODEV;
+	}
+
+	if (send_irq)
+		smp2p_send_interrupt(remote_pid);
+	return 0;
+}
+
+/**
+ * smp2p_in_validate_size_v1 - Size validation for version 1.
+ *
+ * @remote_pid: Remote processor ID.
+ * @smem_item:  Pointer to the inbound SMEM item.
+ * @size:       Size of the SMEM item.
+ * @returns:    Validated smem_item pointer (or NULL if size is too small).
+ *
+ * Validates we don't end up with out-of-bounds array access due to invalid
+ * smem item size.  If out-of-bound array access can't be avoided, then an
+ * error message is printed and NULL is returned to prevent usage of the
+ * item.
+ *
+ * Must be called with in_item_lock_lhb1 locked.
+ */
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v1(int remote_pid,
+		struct smp2p_smem __iomem *smem_item, uint32_t size)
+{
+	uint32_t total_entries;
+	unsigned int expected_size;
+	struct smp2p_smem __iomem *item_ptr;
+	struct smp2p_in_list_item *in_item;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !smem_item)
+		return NULL;
+
+	in_item = &in_list[remote_pid];
+	item_ptr = (struct smp2p_smem __iomem *)smem_item;
+
+	total_entries = SMP2P_GET_ENT_TOTAL(item_ptr->valid_total_ent);
+	if (total_entries > 0) {
+		in_item->safe_total_entries = total_entries;
+		in_item->item_size = size;
+
+		expected_size =	sizeof(struct smp2p_smem) +
+			(total_entries * sizeof(struct smp2p_entry_v1));
+
+		if (size < expected_size) {
+			unsigned int new_size;
+
+			new_size = size;
+			new_size -= sizeof(struct smp2p_smem);
+			new_size /= sizeof(struct smp2p_entry_v1);
+			in_item->safe_total_entries = new_size;
+
+			SMP2P_ERR(
+				"%s pid %d item too small for %d entries; expected: %d actual: %d; reduced to %d entries\n",
+				__func__, remote_pid, total_entries,
+				expected_size, size, new_size);
+		}
+	} else {
+		/*
+		 * Total entries is 0, so the entry is still being initialized
+		 * or is invalid.  Either way, treat it as if the item does
+		 * not exist yet.
+		 */
+		in_item->safe_total_entries = 0;
+		in_item->item_size = 0;
+	}
+	return item_ptr;
+}
+
+/**
+ * smp2p_negotiate_features_v0 - Initial feature negotiation.
+ *
+ * @features: Inbound feature set.
+ * @returns: 0 (no features supported for v0).
+ */
+static uint32_t smp2p_negotiate_features_v0(uint32_t features)
+{
+	/* no supported features */
+	return 0;
+}
+
+/**
+ * smp2p_negotiation_complete_v0 - Negotiation completed
+ *
+ * @out_item:   Pointer to the output item structure
+ *
+ * Can be used to do final configuration based upon the negotiated feature set.
+ */
+static void smp2p_negotiation_complete_v0(struct smp2p_out_list_item *out_item)
+{
+	SMP2P_ERR("%s: invalid negotiation complete for v0 pid %d\n",
+		__func__,
+		SMP2P_GET_REMOTE_PID(out_item->smem_edge_out->rem_loc_proc_id));
+}
+
+/**
+ * smp2p_find_entry_v0 - Stub function.
+ *
+ * @item: Pointer to the smem item.
+ * @entries_total: Total number of entries in @item.
+ * @name: Name of the entry.
+ * @entry_ptr: Set to pointer of entry if found, NULL otherwise.
+ * @empty_spot: If non-null, set to the value of the next empty entry.
+ *
+ * Entries cannot be searched for until item negotiation has been completed.
+ */
+static void smp2p_find_entry_v0(struct smp2p_smem __iomem *item,
+		uint32_t entries_total, char *name, uint32_t **entry_ptr,
+		int *empty_spot)
+{
+	if (entry_ptr)
+		*entry_ptr = NULL;
+
+	if (empty_spot)
+		*empty_spot = -1;
+
+	SMP2P_ERR("%s: invalid - item negotiation incomplete\n", __func__);
+}
+
+/**
+ * smp2p_out_create_v0 - Initial creation function.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * If the outbound SMEM item negotiation is not complete, then
+ * this function is called to start the negotiation process.
+ * Eventually when the negotiation process is complete, this
+ * function pointer is switched with the appropriate function
+ * for the version of SMP2P being created.
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static int smp2p_out_create_v0(struct msm_smp2p_out *out_entry)
+{
+	int edge_state;
+	struct smp2p_out_list_item *item_ptr;
+
+	if (!out_entry)
+		return -EINVAL;
+
+	edge_state = out_list[out_entry->remote_pid].smem_edge_state;
+
+	switch (edge_state) {
+	case SMP2P_EDGE_STATE_CLOSED:
+		/* start negotiation */
+		item_ptr = &out_list[out_entry->remote_pid];
+		edge_state = smp2p_do_negotiation(out_entry->remote_pid,
+				item_ptr);
+		break;
+
+	case SMP2P_EDGE_STATE_OPENING:
+		/* still negotiating */
+		break;
+
+	case SMP2P_EDGE_STATE_OPENED:
+		SMP2P_ERR("%s: item '%s':%d opened - wrong create called\n",
+			__func__, out_entry->name, out_entry->remote_pid);
+		break;
+
+	default:
+		SMP2P_ERR("%s: item '%s':%d invalid SMEM item state %d\n",
+			__func__, out_entry->name, out_entry->remote_pid,
+			edge_state);
+		break;
+	}
+	return 0;
+}
+
+/**
+ * smp2p_out_read_v0 - Stub function.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: Out pointer, the data is available in this argument on success.
+ * @returns: -ENODEV
+ */
+static int smp2p_out_read_v0(struct msm_smp2p_out *out_entry, uint32_t *data)
+{
+	SMP2P_ERR("%s: item '%s':%d not OPEN\n",
+		__func__, out_entry->name, out_entry->remote_pid);
+
+	return -ENODEV;
+}
+
+/**
+ * smp2p_out_write_v0 - Stub function.
+ *
+ * @out_entry: Pointer to the SMP2P entry structure.
+ * @data: The data to be written.
+ * @returns: -ENODEV
+ */
+static int smp2p_out_write_v0(struct msm_smp2p_out *out_entry, uint32_t data)
+{
+	SMP2P_ERR("%s: item '%s':%d not yet OPEN\n",
+		__func__, out_entry->name, out_entry->remote_pid);
+
+	return -ENODEV;
+}
+
+/**
+ * smp2p_out_modify_v0 - Stub function.
+ *
+ * @set_mask:  Mask containing the bits that needs to be set.
+ * @clear_mask: Mask containing the bits that needs to be cleared.
+ * @send_irq: Flag to send interrupt to remote processor.
+ * @returns: -ENODEV
+ */
+static int smp2p_out_modify_v0(struct msm_smp2p_out *out_entry,
+		uint32_t set_mask, uint32_t clear_mask, bool send_irq)
+{
+	SMP2P_ERR("%s: item '%s':%d not yet OPEN\n",
+		__func__, out_entry->name, out_entry->remote_pid);
+
+	return -ENODEV;
+}
+
+/**
+ * smp2p_in_validate_size_v0 - Stub function.
+ *
+ * @remote_pid: Remote processor ID.
+ * @smem_item:  Pointer to the inbound SMEM item.
+ * @size:       Size of the SMEM item.
+ * @returns:    Validated smem_item pointer (or NULL if size is too small).
+ *
+ * Validates we don't end up with out-of-bounds array access due to invalid
+ * smem item size.  If out-of-bound array access can't be avoided, then an
+ * error message is printed and NULL is returned to prevent usage of the
+ * item.
+ *
+ * Must be called with in_item_lock_lhb1 locked.
+ */
+static struct smp2p_smem __iomem *smp2p_in_validate_size_v0(int remote_pid,
+		struct smp2p_smem __iomem *smem_item, uint32_t size)
+{
+	struct smp2p_in_list_item *in_item;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !smem_item)
+		return NULL;
+
+	in_item = &in_list[remote_pid];
+
+	if (size < sizeof(struct smp2p_smem)) {
+		SMP2P_ERR(
+			"%s pid %d item size too small; expected: %zu actual: %d\n",
+			__func__, remote_pid,
+			sizeof(struct smp2p_smem), size);
+		smem_item = NULL;
+		in_item->item_size = 0;
+	} else {
+		in_item->item_size = size;
+	}
+	return smem_item;
+}
+
+/**
+ * smp2p_init_header - Initializes the header of the smem item.
+ *
+ * @header_ptr: Pointer to the smp2p header.
+ * @local_pid: Local processor ID.
+ * @remote_pid: Remote processor ID.
+ * @feature: Features of smp2p implementation.
+ * @version: Version of smp2p implementation.
+ *
+ * Initializes the header as defined in the protocol specification.
+ */
+void smp2p_init_header(struct smp2p_smem __iomem *header_ptr,
+		int local_pid, int remote_pid,
+		uint32_t features, uint32_t version)
+{
+	header_ptr->magic = SMP2P_MAGIC;
+	SMP2P_SET_LOCAL_PID(header_ptr->rem_loc_proc_id, local_pid);
+	SMP2P_SET_REMOTE_PID(header_ptr->rem_loc_proc_id, remote_pid);
+	SMP2P_SET_FEATURES(header_ptr->feature_version, features);
+	SMP2P_SET_ENT_TOTAL(header_ptr->valid_total_ent, SMP2P_MAX_ENTRY);
+	SMP2P_SET_ENT_VALID(header_ptr->valid_total_ent, 0);
+	header_ptr->flags = 0;
+
+	/* ensure that all fields are valid before version is written */
+	wmb();
+	SMP2P_SET_VERSION(header_ptr->feature_version, version);
+}
+
+/**
+ * smp2p_do_negotiation - Implements negotiation algorithm.
+ *
+ * @remote_pid: Remote processor ID.
+ * @out_item: Pointer to the outbound list item.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Must be called with out_item_lock_lha1 locked.  Will internally lock
+ * in_item_lock_lhb1.
+ */
+static int smp2p_do_negotiation(int remote_pid,
+		struct smp2p_out_list_item *out_item)
+{
+	struct smp2p_smem __iomem *r_smem_ptr;
+	struct smp2p_smem __iomem *l_smem_ptr;
+	uint32_t r_version;
+	uint32_t r_feature;
+	uint32_t l_version, l_feature;
+	int prev_state;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !out_item)
+		return -EINVAL;
+	if (out_item->smem_edge_state == SMP2P_EDGE_STATE_FAILED)
+		return -EPERM;
+
+	prev_state = out_item->smem_edge_state;
+
+	/* create local item */
+	if (!out_item->smem_edge_out) {
+		out_item->smem_edge_out = smp2p_get_local_smem_item(remote_pid);
+		if (!out_item->smem_edge_out) {
+			SMP2P_ERR(
+				"%s unable to allocate SMEM item for pid %d\n",
+				__func__, remote_pid);
+			return -ENODEV;
+		}
+		out_item->smem_edge_state = SMP2P_EDGE_STATE_OPENING;
+	}
+	l_smem_ptr = out_item->smem_edge_out;
+
+	/* retrieve remote side and version */
+	spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+	r_smem_ptr = smp2p_get_remote_smem_item(remote_pid, out_item);
+	spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+
+	r_version = 0;
+	if (r_smem_ptr) {
+		r_version = SMP2P_GET_VERSION(r_smem_ptr->feature_version);
+		r_feature = SMP2P_GET_FEATURES(r_smem_ptr->feature_version);
+	}
+
+	if (r_version == 0) {
+		/*
+		 * Either remote side doesn't exist, or is in the
+		 * process of being initialized (the version is set last).
+		 *
+		 * In either case, treat as if the other side doesn't exist
+		 * and write out our maximum supported version.
+		 */
+		r_smem_ptr = NULL;
+		r_version = ARRAY_SIZE(version_if) - 1;
+		r_feature = ~0U;
+	}
+
+	/* find maximum supported version and feature set */
+	l_version = min(r_version, (uint32_t)ARRAY_SIZE(version_if) - 1);
+	for (; l_version > 0; --l_version) {
+		if (!version_if[l_version].is_supported)
+			continue;
+
+		/* found valid version */
+		l_feature = version_if[l_version].negotiate_features(~0U);
+		if (l_version == r_version)
+			l_feature &= r_feature;
+		break;
+	}
+
+	if (l_version == 0) {
+		SMP2P_ERR(
+			"%s: negotiation failure pid %d: RV %d RF %x\n",
+			__func__, remote_pid, r_version, r_feature
+			);
+		SMP2P_SET_VERSION(l_smem_ptr->feature_version,
+			SMP2P_EDGE_STATE_FAILED);
+		smp2p_send_interrupt(remote_pid);
+		out_item->smem_edge_state = SMP2P_EDGE_STATE_FAILED;
+		return -EPERM;
+	}
+
+	/* update header and notify remote side */
+	smp2p_init_header(l_smem_ptr, SMP2P_APPS_PROC, remote_pid,
+		l_feature, l_version);
+	smp2p_send_interrupt(remote_pid);
+
+	/* handle internal state changes */
+	if (r_smem_ptr && l_version == r_version &&
+			l_feature == r_feature) {
+		struct msm_smp2p_out *pos;
+
+		/* negotiation complete */
+		out_item->ops_ptr = &version_if[l_version];
+		out_item->ops_ptr->negotiation_complete(out_item);
+		out_item->smem_edge_state = SMP2P_EDGE_STATE_OPENED;
+		SMP2P_INFO(
+			"%s: negotiation complete pid %d: State %d->%d F0x%08x\n",
+			__func__, remote_pid, prev_state,
+			out_item->smem_edge_state, l_feature);
+
+		/* create any pending outbound entries */
+		list_for_each_entry(pos, &out_item->list, out_edge_list) {
+			out_item->ops_ptr->create_entry(pos);
+		}
+
+		/* update inbound edge */
+		spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+		(void)out_item->ops_ptr->validate_size(remote_pid, r_smem_ptr,
+				in_list[remote_pid].item_size);
+		in_list[remote_pid].smem_edge_in = r_smem_ptr;
+		spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+	} else {
+		SMP2P_INFO("%s: negotiation pid %d: State %d->%d F0x%08x\n",
+			__func__, remote_pid, prev_state,
+			out_item->smem_edge_state, l_feature);
+	}
+	return 0;
+}
+
+/**
+ * msm_smp2p_out_open - Opens an outbound entry.
+ *
+ * @remote_pid: Outbound processor ID.
+ * @name: Name of the entry.
+ * @open_notifier: Notifier block for the open notification.
+ * @handle: Handle to the smem entry structure.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Opens an outbound entry with the name specified by entry, from the
+ * local processor to the remote processor(remote_pid). If the entry, remote_pid
+ * and open_notifier are valid, then handle will be set and zero will be
+ * returned. The smem item that holds this entry will be created if it has
+ * not been created according to the version negotiation algorithm.
+ * The open_notifier will be used to notify the clients about the
+ * availability of the entry.
+ */
+int msm_smp2p_out_open(int remote_pid, const char *name,
+				   struct notifier_block *open_notifier,
+				   struct msm_smp2p_out **handle)
+{
+	struct msm_smp2p_out *out_entry;
+	struct msm_smp2p_out *pos;
+	int ret = 0;
+	unsigned long flags;
+
+	if (handle)
+		*handle = NULL;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !name || !open_notifier || !handle)
+		return -EINVAL;
+
+	if ((remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+						__func__, remote_pid, name);
+		return -EPROBE_DEFER;
+	}
+
+	/* Allocate the smp2p object and node */
+	out_entry = kzalloc(sizeof(*out_entry), GFP_KERNEL);
+	if (!out_entry)
+		return -ENOMEM;
+
+	/* Handle duplicate registration */
+	spin_lock_irqsave(&out_list[remote_pid].out_item_lock_lha1, flags);
+	list_for_each_entry(pos, &out_list[remote_pid].list,
+			out_edge_list) {
+		if (!strcmp(pos->name, name)) {
+			spin_unlock_irqrestore(
+				&out_list[remote_pid].out_item_lock_lha1,
+				flags);
+			kfree(out_entry);
+			SMP2P_ERR("%s: duplicate registration '%s':%d\n",
+				__func__, name, remote_pid);
+			return -EBUSY;
+		}
+	}
+
+	out_entry->remote_pid = remote_pid;
+	RAW_INIT_NOTIFIER_HEAD(&out_entry->msm_smp2p_notifier_list);
+	strlcpy(out_entry->name, name, SMP2P_MAX_ENTRY_NAME);
+	out_entry->open_nb = open_notifier;
+	raw_notifier_chain_register(&out_entry->msm_smp2p_notifier_list,
+		  out_entry->open_nb);
+	list_add(&out_entry->out_edge_list, &out_list[remote_pid].list);
+
+	ret = out_list[remote_pid].ops_ptr->create_entry(out_entry);
+	if (ret) {
+		list_del(&out_entry->out_edge_list);
+		raw_notifier_chain_unregister(
+			&out_entry->msm_smp2p_notifier_list,
+			out_entry->open_nb);
+		spin_unlock_irqrestore(
+			&out_list[remote_pid].out_item_lock_lha1, flags);
+		kfree(out_entry);
+		SMP2P_ERR("%s: unable to open '%s':%d error %d\n",
+				__func__, name, remote_pid, ret);
+		return ret;
+	}
+	spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1,
+			flags);
+	*handle = out_entry;
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_smp2p_out_open);
+
+/**
+ * msm_smp2p_out_close - Closes the handle to an outbound entry.
+ *
+ * @handle: Pointer to smp2p out entry handle.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * The actual entry will not be deleted and can be re-opened at a later
+ * time.  The handle will be set to NULL.
+ */
+int msm_smp2p_out_close(struct msm_smp2p_out **handle)
+{
+	unsigned long flags;
+	struct msm_smp2p_out *out_entry;
+	struct smp2p_out_list_item *out_item;
+
+	if (!handle || !*handle)
+		return -EINVAL;
+
+	out_entry = *handle;
+	*handle = NULL;
+
+	if ((out_entry->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[out_entry->remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+			__func__, out_entry->remote_pid, out_entry->name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[out_entry->remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	list_del(&out_entry->out_edge_list);
+	raw_notifier_chain_unregister(&out_entry->msm_smp2p_notifier_list,
+		out_entry->open_nb);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	kfree(out_entry);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_smp2p_out_close);
+
+/**
+ * msm_smp2p_out_read - Allows reading the entry.
+ *
+ * @handle: Handle to the smem entry structure.
+ * @data: Out pointer that holds the read data.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Allows reading of the outbound entry for read-modify-write
+ * operation.
+ */
+int msm_smp2p_out_read(struct msm_smp2p_out *handle, uint32_t *data)
+{
+	int ret = -EINVAL;
+	unsigned long flags;
+	struct smp2p_out_list_item *out_item;
+
+	if (!handle || !data)
+		return ret;
+
+	if ((handle->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[handle->remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+			__func__, handle->remote_pid, handle->name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[handle->remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	ret = out_item->ops_ptr->read_entry(handle, data);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_out_read);
+
+/**
+ * msm_smp2p_out_write - Allows writing to the entry.
+ *
+ * @handle: Handle to smem entry structure.
+ * @data: Data that has to be written.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Writes a new value to the output entry. Multiple back-to-back writes
+ * may overwrite previous writes before the remote processor get a chance
+ * to see them leading to ABA race condition. The client must implement
+ * their own synchronization mechanism (such as echo mechanism) if this is
+ * not acceptable.
+ */
+int msm_smp2p_out_write(struct msm_smp2p_out *handle, uint32_t data)
+{
+	int ret = -EINVAL;
+	unsigned long flags;
+	struct smp2p_out_list_item *out_item;
+
+	if (!handle)
+		return ret;
+
+	if ((handle->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[handle->remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+			__func__, handle->remote_pid, handle->name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[handle->remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	ret = out_item->ops_ptr->write_entry(handle, data);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	return ret;
+
+}
+EXPORT_SYMBOL(msm_smp2p_out_write);
+
+/**
+ * msm_smp2p_out_modify - Modifies the entry.
+ *
+ * @handle: Handle to the smem entry structure.
+ * @set_mask: Specifies the bits that needs to be set.
+ * @clear_mask: Specifies the bits that needs to be cleared.
+ * @send_irq: Flag to send interrupt to remote processor.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * The modification is done by doing a bitwise AND of clear mask followed by
+ * the bit wise OR of set mask. The clear bit mask is applied first to the
+ * data, so if a bit is set in both the clear mask and the set mask, then in
+ * the result is a set bit.  Multiple back-to-back modifications may overwrite
+ * previous values before the remote processor gets a chance to see them
+ * leading to ABA race condition. The client must implement their own
+ * synchronization mechanism (such as echo mechanism) if this is not
+ * acceptable.
+ */
+int msm_smp2p_out_modify(struct msm_smp2p_out *handle, uint32_t set_mask,
+					uint32_t clear_mask, bool send_irq)
+{
+	int ret = -EINVAL;
+	unsigned long flags;
+	struct smp2p_out_list_item *out_item;
+
+	if (!handle)
+		return ret;
+
+	if ((handle->remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[handle->remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+			__func__, handle->remote_pid, handle->name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[handle->remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	ret = out_item->ops_ptr->modify_entry(handle, set_mask,
+						clear_mask, send_irq);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_out_modify);
+
+/**
+ * msm_smp2p_in_read - Read an entry on a remote processor.
+ *
+ * @remote_pid: Processor ID of the remote processor.
+ * @name: Name of the entry that is to be read.
+ * @data: Output pointer, the value will be placed here if successful.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+int msm_smp2p_in_read(int remote_pid, const char *name, uint32_t *data)
+{
+	unsigned long flags;
+	struct smp2p_out_list_item *out_item;
+	uint32_t *entry_ptr = NULL;
+
+	if (remote_pid >= SMP2P_NUM_PROCS)
+		return -EINVAL;
+
+	if ((remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+						__func__, remote_pid, name);
+		return -EPROBE_DEFER;
+	}
+
+	out_item = &out_list[remote_pid];
+	spin_lock_irqsave(&out_item->out_item_lock_lha1, flags);
+	spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+
+	if (in_list[remote_pid].smem_edge_in)
+		out_item->ops_ptr->find_entry(
+			in_list[remote_pid].smem_edge_in,
+			in_list[remote_pid].safe_total_entries,
+			(char *)name, &entry_ptr, NULL);
+
+	spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+	spin_unlock_irqrestore(&out_item->out_item_lock_lha1, flags);
+
+	if (!entry_ptr)
+		return -ENODEV;
+
+	*data = readl_relaxed(entry_ptr);
+	return 0;
+}
+EXPORT_SYMBOL(msm_smp2p_in_read);
+
+/**
+ * msm_smp2p_in_register -  Notifies the change in value of the entry.
+ *
+ * @pid: Remote processor ID.
+ * @name: Name of the entry.
+ * @in_notifier: Notifier block used to notify about the event.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Register for change notifications for a remote entry. If the remote entry
+ * does not exist yet, then the registration request will be held until the
+ * remote side opens. Once the entry is open, then the SMP2P_OPEN notification
+ * will be sent. Any changes to the entry will trigger a call to the notifier
+ * block with an SMP2P_ENTRY_UPDATE event and the data field will point to an
+ * msm_smp2p_update_notif structure containing the current and previous value.
+ */
+int msm_smp2p_in_register(int pid, const char *name,
+	struct notifier_block *in_notifier)
+{
+	struct smp2p_in *pos;
+	struct smp2p_in *in = NULL;
+	int ret;
+	unsigned long flags;
+	struct msm_smp2p_update_notif data;
+	uint32_t *entry_ptr;
+
+	if (pid >= SMP2P_NUM_PROCS || !name || !in_notifier)
+		return -EINVAL;
+
+	if ((pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+						__func__, pid, name);
+		return -EPROBE_DEFER;
+	}
+
+	/* Pre-allocate before spinlock since we will likely needed it */
+	in = kzalloc(sizeof(*in), GFP_KERNEL);
+	if (!in)
+		return -ENOMEM;
+
+	/* Search for existing entry */
+	spin_lock_irqsave(&out_list[pid].out_item_lock_lha1, flags);
+	spin_lock(&in_list[pid].in_item_lock_lhb1);
+
+	list_for_each_entry(pos, &in_list[pid].list, in_edge_list) {
+		if (!strcmp(pos->name, name)) {
+			kfree(in);
+			in = pos;
+			break;
+		}
+	}
+
+	/* Create and add it to the list */
+	if (!in->notifier_count) {
+		in->remote_pid = pid;
+		strlcpy(in->name, name, SMP2P_MAX_ENTRY_NAME);
+		RAW_INIT_NOTIFIER_HEAD(&in->in_notifier_list);
+		list_add(&in->in_edge_list, &in_list[pid].list);
+	}
+
+	ret = raw_notifier_chain_register(&in->in_notifier_list,
+			in_notifier);
+	if (ret) {
+		if (!in->notifier_count) {
+			list_del(&in->in_edge_list);
+			kfree(in);
+		}
+		SMP2P_DBG("%s: '%s':%d failed %d\n", __func__, name, pid, ret);
+		goto bail;
+	}
+	in->notifier_count++;
+
+	if (out_list[pid].smem_edge_state == SMP2P_EDGE_STATE_OPENED) {
+		out_list[pid].ops_ptr->find_entry(
+				in_list[pid].smem_edge_in,
+				in_list[pid].safe_total_entries, (char *)name,
+				&entry_ptr, NULL);
+		if (entry_ptr) {
+			in->entry_ptr = entry_ptr;
+			in->prev_entry_val = readl_relaxed(entry_ptr);
+
+			data.previous_value = in->prev_entry_val;
+			data.current_value = in->prev_entry_val;
+			in_notifier->notifier_call(in_notifier, SMP2P_OPEN,
+					(void *)&data);
+		}
+	}
+	SMP2P_DBG("%s: '%s':%d registered\n", __func__, name, pid);
+
+bail:
+	spin_unlock(&in_list[pid].in_item_lock_lhb1);
+	spin_unlock_irqrestore(&out_list[pid].out_item_lock_lha1, flags);
+	return ret;
+
+}
+EXPORT_SYMBOL(msm_smp2p_in_register);
+
+/**
+ * msm_smp2p_in_unregister - Unregister the notifier for remote entry.
+ *
+ * @remote_pid: Processor Id of the remote processor.
+ * @name: The name of the entry.
+ * @in_notifier: Notifier block passed during registration.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+int msm_smp2p_in_unregister(int remote_pid, const char *name,
+				struct notifier_block *in_notifier)
+{
+	struct smp2p_in *pos;
+	struct smp2p_in *in = NULL;
+	int ret = -ENODEV;
+	unsigned long flags;
+
+	if (remote_pid >= SMP2P_NUM_PROCS || !name || !in_notifier)
+		return -EINVAL;
+
+	if ((remote_pid != SMP2P_REMOTE_MOCK_PROC) &&
+			!smp2p_int_cfgs[remote_pid].is_configured) {
+		SMP2P_INFO("%s before msm_smp2p_init(): pid[%d] name[%s]\n",
+						__func__, remote_pid, name);
+		return -EPROBE_DEFER;
+	}
+
+	spin_lock_irqsave(&in_list[remote_pid].in_item_lock_lhb1, flags);
+	list_for_each_entry(pos, &in_list[remote_pid].list,
+			in_edge_list) {
+		if (!strcmp(pos->name, name)) {
+			in = pos;
+			break;
+		}
+	}
+	if (!in)
+		goto fail;
+
+	ret = raw_notifier_chain_unregister(&pos->in_notifier_list,
+			in_notifier);
+	if (ret == 0) {
+		pos->notifier_count--;
+		if (!pos->notifier_count) {
+			list_del(&pos->in_edge_list);
+			kfree(pos);
+			ret = 0;
+		}
+	} else {
+		SMP2P_ERR("%s: unregister failure '%s':%d\n", __func__,
+			name, remote_pid);
+		ret = -ENODEV;
+	}
+
+fail:
+	spin_unlock_irqrestore(&in_list[remote_pid].in_item_lock_lhb1, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_in_unregister);
+
+/**
+ * smp2p_send_interrupt - Send interrupt to remote system.
+ *
+ * @remote_pid:  Processor ID of the remote system
+ *
+ * Must be called with out_item_lock_lha1 locked.
+ */
+static void smp2p_send_interrupt(int remote_pid)
+{
+	if (smp2p_int_cfgs[remote_pid].name)
+		SMP2P_DBG("SMP2P Int Apps->%s(%d)\n",
+			smp2p_int_cfgs[remote_pid].name, remote_pid);
+
+	++smp2p_int_cfgs[remote_pid].out_interrupt_count;
+	if (remote_pid != SMP2P_REMOTE_MOCK_PROC &&
+			smp2p_int_cfgs[remote_pid].out_int_mask) {
+		/* flush any pending writes before triggering interrupt */
+		wmb();
+		writel_relaxed(smp2p_int_cfgs[remote_pid].out_int_mask,
+			smp2p_int_cfgs[remote_pid].out_int_ptr);
+		writel_relaxed(0,
+			smp2p_int_cfgs[remote_pid].out_int_ptr);
+	} else {
+		smp2p_remote_mock_rx_interrupt();
+	}
+}
+
+/**
+ * smp2p_in_edge_notify - Notifies the entry changed on remote processor.
+ *
+ * @pid: Processor ID of the remote processor.
+ *
+ * This function is invoked on an incoming interrupt, it scans
+ * the list of the clients registered for the entries on the remote
+ * processor and notifies them if  the data changes.
+ *
+ * Note:  Edge state must be OPENED to avoid a race condition with
+ *        out_list[pid].ops_ptr->find_entry.
+ */
+static void smp2p_in_edge_notify(int pid)
+{
+	struct smp2p_in *pos;
+	uint32_t *entry_ptr;
+	unsigned long flags;
+	struct smp2p_smem __iomem *smem_h_ptr;
+	uint32_t curr_data;
+	struct  msm_smp2p_update_notif data;
+
+	spin_lock_irqsave(&in_list[pid].in_item_lock_lhb1, flags);
+	smem_h_ptr = in_list[pid].smem_edge_in;
+	if (!smem_h_ptr) {
+		SMP2P_DBG("%s: No remote SMEM item for pid %d\n",
+			__func__, pid);
+		spin_unlock_irqrestore(&in_list[pid].in_item_lock_lhb1, flags);
+		return;
+	}
+
+	list_for_each_entry(pos, &in_list[pid].list, in_edge_list) {
+		if (pos->entry_ptr == NULL) {
+			/* entry not open - try to open it */
+			out_list[pid].ops_ptr->find_entry(smem_h_ptr,
+				in_list[pid].safe_total_entries, pos->name,
+				&entry_ptr, NULL);
+
+			if (entry_ptr) {
+				pos->entry_ptr = entry_ptr;
+				pos->prev_entry_val = 0;
+				data.previous_value = 0;
+				data.current_value = readl_relaxed(entry_ptr);
+				raw_notifier_call_chain(
+					    &pos->in_notifier_list,
+					    SMP2P_OPEN, (void *)&data);
+			}
+		}
+
+		if (pos->entry_ptr != NULL) {
+			/* send update notification */
+			curr_data = readl_relaxed(pos->entry_ptr);
+			if (curr_data != pos->prev_entry_val) {
+				data.previous_value = pos->prev_entry_val;
+				data.current_value = curr_data;
+				pos->prev_entry_val = curr_data;
+				raw_notifier_call_chain(
+					&pos->in_notifier_list,
+					SMP2P_ENTRY_UPDATE, (void *)&data);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&in_list[pid].in_item_lock_lhb1, flags);
+}
+
+/**
+ * smp2p_interrupt_handler - Incoming interrupt handler.
+ *
+ * @irq: Interrupt ID
+ * @data: Edge
+ * @returns: IRQ_HANDLED or IRQ_NONE for invalid interrupt
+ */
+static irqreturn_t smp2p_interrupt_handler(int irq, void *data)
+{
+	unsigned long flags;
+	uint32_t remote_pid = (uint32_t)(uintptr_t)data;
+
+	if (remote_pid >= SMP2P_NUM_PROCS) {
+		SMP2P_ERR("%s: invalid interrupt pid %d\n",
+			__func__, remote_pid);
+		return IRQ_NONE;
+	}
+
+	if (smp2p_int_cfgs[remote_pid].name)
+		SMP2P_DBG("SMP2P Int %s(%d)->Apps\n",
+			smp2p_int_cfgs[remote_pid].name, remote_pid);
+
+	spin_lock_irqsave(&out_list[remote_pid].out_item_lock_lha1, flags);
+	++smp2p_int_cfgs[remote_pid].in_interrupt_count;
+
+	if (out_list[remote_pid].smem_edge_state != SMP2P_EDGE_STATE_OPENED)
+		smp2p_do_negotiation(remote_pid, &out_list[remote_pid]);
+
+	if (out_list[remote_pid].smem_edge_state == SMP2P_EDGE_STATE_OPENED) {
+		bool do_restart_ack;
+
+		/*
+		 * Follow double-check pattern for restart ack since:
+		 * 1) we must notify clients of the X->0 transition
+		 *    that is part of the restart
+		 * 2) lock cannot be held during the
+		 *    smp2p_in_edge_notify() call because clients may do
+		 *    re-entrant calls into our APIs.
+		 *
+		 * smp2p_do_ssr_ack() will only do the ack if it is
+		 * necessary to handle the race condition exposed by
+		 * unlocking the spinlocks.
+		 */
+		spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+		do_restart_ack = smp2p_ssr_ack_needed(remote_pid);
+		spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+		spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1,
+			flags);
+
+		smp2p_in_edge_notify(remote_pid);
+
+		if (do_restart_ack) {
+			spin_lock_irqsave(
+				&out_list[remote_pid].out_item_lock_lha1,
+				flags);
+			spin_lock(&in_list[remote_pid].in_item_lock_lhb1);
+
+			smp2p_do_ssr_ack(remote_pid);
+
+			spin_unlock(&in_list[remote_pid].in_item_lock_lhb1);
+			spin_unlock_irqrestore(
+				&out_list[remote_pid].out_item_lock_lha1,
+				flags);
+		}
+	} else {
+		spin_unlock_irqrestore(&out_list[remote_pid].out_item_lock_lha1,
+			flags);
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * smp2p_reset_mock_edge - Reinitializes the mock edge.
+ *
+ * @returns: 0 on success, -EAGAIN to retry later.
+ *
+ * Reinitializes the mock edge to initial power-up state values.
+ */
+int smp2p_reset_mock_edge(void)
+{
+	const int rpid = SMP2P_REMOTE_MOCK_PROC;
+	unsigned long flags;
+	int ret = 0;
+
+	spin_lock_irqsave(&out_list[rpid].out_item_lock_lha1, flags);
+	spin_lock(&in_list[rpid].in_item_lock_lhb1);
+
+	if (!list_empty(&out_list[rpid].list) ||
+			!list_empty(&in_list[rpid].list)) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+
+	kfree(out_list[rpid].smem_edge_out);
+	out_list[rpid].smem_edge_out = NULL;
+	out_list[rpid].ops_ptr = &version_if[0];
+	out_list[rpid].smem_edge_state = SMP2P_EDGE_STATE_CLOSED;
+	out_list[rpid].feature_ssr_ack_enabled = false;
+	out_list[rpid].restart_ack = false;
+
+	in_list[rpid].smem_edge_in = NULL;
+	in_list[rpid].item_size = 0;
+	in_list[rpid].safe_total_entries = 0;
+
+fail:
+	spin_unlock(&in_list[rpid].in_item_lock_lhb1);
+	spin_unlock_irqrestore(&out_list[rpid].out_item_lock_lha1, flags);
+
+	return ret;
+}
+
+/**
+ * msm_smp2p_interrupt_handler - Triggers incoming interrupt.
+ *
+ * @remote_pid: Remote processor ID
+ *
+ * This function is used with the remote mock infrastructure
+ * used for testing. It simulates triggering of interrupt in
+ * a testing environment.
+ */
+void msm_smp2p_interrupt_handler(int remote_pid)
+{
+	smp2p_interrupt_handler(0, (void *)(uintptr_t)remote_pid);
+}
+
+/**
+ * msm_smp2p_probe - Device tree probe function.
+ *
+ * @pdev: Pointer to device tree data.
+ * @returns: 0 on success; -ENODEV otherwise
+ */
+static int msm_smp2p_probe(struct platform_device *pdev)
+{
+	struct resource *r;
+	void *irq_out_ptr = NULL;
+	char *key;
+	uint32_t edge;
+	int ret;
+	struct device_node *node;
+	uint32_t irq_bitmask;
+	uint32_t irq_line;
+	void *temp_p;
+	unsigned int temp_sz;
+
+	node = pdev->dev.of_node;
+
+	key = "qcom,remote-pid";
+	ret = of_property_read_u32(node, key, &edge);
+	if (ret) {
+		SMP2P_ERR("%s: missing edge '%s'\n", __func__, key);
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!r) {
+		SMP2P_ERR("%s: failed gathering irq-reg resource for edge %d\n"
+				, __func__, edge);
+		ret = -ENODEV;
+		goto fail;
+	}
+	irq_out_ptr = ioremap_nocache(r->start, resource_size(r));
+	if (!irq_out_ptr) {
+		SMP2P_ERR("%s: failed remap from phys to virt for edge %d\n",
+				__func__, edge);
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	key = "qcom,irq-bitmask";
+	ret = of_property_read_u32(node, key, &irq_bitmask);
+	if (ret)
+		goto missing_key;
+
+	key = "interrupts";
+	irq_line = platform_get_irq(pdev, 0);
+	if (irq_line == -ENXIO)
+		goto missing_key;
+
+	/*
+	 * We depend on the SMEM driver, so do a test access to see if SMEM is
+	 * ready.  We don't want any side effects at this time (so no alloc)
+	 * and the return doesn't matter, so long as it is not -EPROBE_DEFER.
+	 */
+	temp_p = smem_get_entry(
+		smp2p_get_smem_item_id(SMP2P_APPS_PROC, SMP2P_MODEM_PROC),
+		&temp_sz,
+		0,
+		SMEM_ANY_HOST_FLAG);
+	if (PTR_ERR(temp_p) == -EPROBE_DEFER) {
+		SMP2P_INFO("%s: edge:%d probe before smem ready\n", __func__,
+									edge);
+		ret = -EPROBE_DEFER;
+		goto fail;
+	}
+
+	ret = request_irq(irq_line, smp2p_interrupt_handler,
+			IRQF_TRIGGER_RISING, "smp2p", (void *)(uintptr_t)edge);
+	if (ret < 0) {
+		SMP2P_ERR("%s: request_irq() failed on %d (edge %d)\n",
+				__func__, irq_line, edge);
+		ret = -ENODEV;
+		goto fail;
+	}
+
+	ret = enable_irq_wake(irq_line);
+	if (ret < 0)
+		SMP2P_ERR("%s: enable_irq_wake() failed on %d (edge %d)\n",
+				__func__, irq_line, edge);
+
+	/*
+	 * Set entry (keep is_configured last to prevent usage before
+	 * initialization).
+	 */
+	smp2p_int_cfgs[edge].in_int_id = irq_line;
+	smp2p_int_cfgs[edge].out_int_mask = irq_bitmask;
+	smp2p_int_cfgs[edge].out_int_ptr = irq_out_ptr;
+	smp2p_int_cfgs[edge].is_configured = true;
+	return 0;
+
+missing_key:
+	SMP2P_ERR("%s: missing '%s' for edge %d\n", __func__, key, edge);
+	ret = -ENODEV;
+fail:
+	if (irq_out_ptr)
+		iounmap(irq_out_ptr);
+	return ret;
+}
+
+static const struct of_device_id msm_smp2p_match_table[] = {
+	{ .compatible = "qcom,smp2p" },
+	{},
+};
+
+static struct platform_driver msm_smp2p_driver = {
+	.probe = msm_smp2p_probe,
+	.driver = {
+		.name = "msm_smp2p",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smp2p_match_table,
+	},
+};
+
+/**
+ * msm_smp2p_init -  Initialization function for the module.
+ *
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+static int __init msm_smp2p_init(void)
+{
+	int i;
+	int rc;
+
+	for (i = 0; i < SMP2P_NUM_PROCS; i++) {
+		spin_lock_init(&out_list[i].out_item_lock_lha1);
+		INIT_LIST_HEAD(&out_list[i].list);
+		out_list[i].smem_edge_out = NULL;
+		out_list[i].smem_edge_state = SMP2P_EDGE_STATE_CLOSED;
+		out_list[i].ops_ptr = &version_if[0];
+		out_list[i].feature_ssr_ack_enabled = false;
+		out_list[i].restart_ack = false;
+
+		spin_lock_init(&in_list[i].in_item_lock_lhb1);
+		INIT_LIST_HEAD(&in_list[i].list);
+		in_list[i].smem_edge_in = NULL;
+	}
+
+	log_ctx = ipc_log_context_create(NUM_LOG_PAGES, "smp2p", 0);
+	if (!log_ctx)
+		SMP2P_ERR("%s: unable to create log context\n", __func__);
+
+	rc = platform_driver_register(&msm_smp2p_driver);
+	if (rc) {
+		SMP2P_ERR("%s: msm_smp2p_driver register failed %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	return 0;
+}
+module_init(msm_smp2p_init);
+
+MODULE_DESCRIPTION("MSM Shared Memory Point to Point");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
new file mode 100644
index 0000000..1e593e0
--- /dev/null
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -0,0 +1,1112 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/elf.h>
+#include <linux/mutex.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/rwsem.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/list_sort.h>
+#include <linux/idr.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include <linux/uaccess.h>
+#include <asm/setup.h>
+
+#include "peripheral-loader.h"
+
+#define pil_err(desc, fmt, ...)						\
+	dev_err(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+#define pil_info(desc, fmt, ...)					\
+	dev_info(desc->dev, "%s: " fmt, desc->name, ##__VA_ARGS__)
+
+#if defined(CONFIG_ARM)
+#define pil_memset_io(d, c, count) memset(d, c, count)
+#else
+#define pil_memset_io(d, c, count) memset_io(d, c, count)
+#endif
+
+#define PIL_NUM_DESC		10
+static void __iomem *pil_info_base;
+
+/**
+ * proxy_timeout - Override for proxy vote timeouts
+ * -1: Use driver-specified timeout
+ *  0: Hold proxy votes until shutdown
+ * >0: Specify a custom timeout in ms
+ */
+static int proxy_timeout_ms = -1;
+module_param(proxy_timeout_ms, int, 0644);
+
+static bool disable_timeouts;
+/**
+ * struct pil_mdt - Representation of <name>.mdt file in memory
+ * @hdr: ELF32 header
+ * @phdr: ELF32 program headers
+ */
+struct pil_mdt {
+	struct elf32_hdr hdr;
+	struct elf32_phdr phdr[];
+};
+
+/**
+ * struct pil_seg - memory map representing one segment
+ * @next: points to next seg mentor NULL if last segment
+ * @paddr: physical start address of segment
+ * @sz: size of segment
+ * @filesz: size of segment on disk
+ * @num: segment number
+ * @relocated: true if segment is relocated, false otherwise
+ *
+ * Loosely based on an elf program header. Contains all necessary information
+ * to load and initialize a segment of the image in memory.
+ */
+struct pil_seg {
+	phys_addr_t paddr;
+	unsigned long sz;
+	unsigned long filesz;
+	int num;
+	struct list_head list;
+	bool relocated;
+};
+
+/**
+ * struct pil_priv - Private state for a pil_desc
+ * @proxy: work item used to run the proxy unvoting routine
+ * @ws: wakeup source to prevent suspend during pil_boot
+ * @wname: name of @ws
+ * @desc: pointer to pil_desc this is private data for
+ * @seg: list of segments sorted by physical address
+ * @entry_addr: physical address where processor starts booting at
+ * @base_addr: smallest start address among all segments that are relocatable
+ * @region_start: address where relocatable region starts or lowest address
+ * for non-relocatable images
+ * @region_end: address where relocatable region ends or highest address for
+ * non-relocatable images
+ * @region: region allocated for relocatable images
+ * @unvoted_flag: flag to keep track if we have unvoted or not.
+ *
+ * This struct contains data for a pil_desc that should not be exposed outside
+ * of this file. This structure points to the descriptor and the descriptor
+ * points to this structure so that PIL drivers can't access the private
+ * data of a descriptor but this file can access both.
+ */
+struct pil_priv {
+	struct delayed_work proxy;
+	struct wakeup_source ws;
+	char wname[32];
+	struct pil_desc *desc;
+	struct list_head segs;
+	phys_addr_t entry_addr;
+	phys_addr_t base_addr;
+	phys_addr_t region_start;
+	phys_addr_t region_end;
+	void *region;
+	struct pil_image_info __iomem *info;
+	int id;
+	int unvoted_flag;
+	size_t region_size;
+};
+
+/**
+ * pil_do_ramdump() - Ramdump an image
+ * @desc: descriptor from pil_desc_init()
+ * @ramdump_dev: ramdump device returned from create_ramdump_device()
+ *
+ * Calls the ramdump API with a list of segments generated from the addresses
+ * that the descriptor corresponds to.
+ */
+int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+	struct pil_priv *priv = desc->priv;
+	struct pil_seg *seg;
+	int count = 0, ret;
+	struct ramdump_segment *ramdump_segs, *s;
+
+	list_for_each_entry(seg, &priv->segs, list)
+		count++;
+
+	ramdump_segs = kcalloc(count, sizeof(*ramdump_segs), GFP_KERNEL);
+	if (!ramdump_segs)
+		return -ENOMEM;
+
+	if (desc->subsys_vmid > 0)
+		ret = pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+
+	s = ramdump_segs;
+	list_for_each_entry(seg, &priv->segs, list) {
+		s->address = seg->paddr;
+		s->size = seg->sz;
+		s++;
+	}
+
+	ret = do_elf_ramdump(ramdump_dev, ramdump_segs, count);
+	kfree(ramdump_segs);
+
+	if (!ret && desc->subsys_vmid > 0)
+		ret = pil_assign_mem_to_subsys(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_do_ramdump);
+
+int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
+							size_t size)
+{
+	int ret;
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[1] = {desc->subsys_vmid};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
+	if (ret)
+		pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+				__func__, &addr, size, desc->subsys_vmid, ret);
+	return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_subsys);
+
+int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
+							size_t size)
+{
+	int ret;
+	int srcVM[1] = {desc->subsys_vmid};
+	int destVM[1] = {VMID_HLOS};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE | PERM_EXEC};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 1);
+	if (ret)
+		panic("%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+				__func__, &addr, size, desc->subsys_vmid, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_linux);
+
+int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	int ret;
+	int srcVM[1] = {VMID_HLOS};
+	int destVM[2] = {VMID_HLOS, desc->subsys_vmid};
+	int destVMperm[2] = {PERM_READ | PERM_WRITE, PERM_READ | PERM_WRITE};
+
+	ret = hyp_assign_phys(addr, size, srcVM, 1, destVM, destVMperm, 2);
+	if (ret)
+		pil_err(desc, "%s: failed for %pa address of size %zx - subsys VMid %d rc:%d\n",
+				__func__, &addr, size, desc->subsys_vmid, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_assign_mem_to_subsys_and_linux);
+
+int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
+						int VMid)
+{
+	int ret;
+	int srcVM[2] = {VMID_HLOS, desc->subsys_vmid};
+	int destVM[1] = {VMid};
+	int destVMperm[1] = {PERM_READ | PERM_WRITE};
+
+	if (VMid == VMID_HLOS)
+		destVMperm[0] = PERM_READ | PERM_WRITE | PERM_EXEC;
+
+	ret = hyp_assign_phys(addr, size, srcVM, 2, destVM, destVMperm, 1);
+	if (ret)
+		panic("%s: failed for %pa address of size %zx - subsys VMid %d. Fatal error.\n",
+				__func__, &addr, size, desc->subsys_vmid);
+
+	return ret;
+}
+EXPORT_SYMBOL(pil_reclaim_mem);
+
+/**
+ * pil_get_entry_addr() - Retrieve the entry address of a peripheral image
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns the physical address where the image boots at or 0 if unknown.
+ */
+phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+	return desc->priv ? desc->priv->entry_addr : 0;
+}
+EXPORT_SYMBOL(pil_get_entry_addr);
+
+static void __pil_proxy_unvote(struct pil_priv *priv)
+{
+	struct pil_desc *desc = priv->desc;
+
+	desc->ops->proxy_unvote(desc);
+	notify_proxy_unvote(desc->dev);
+	__pm_relax(&priv->ws);
+	module_put(desc->owner);
+
+}
+
+static void pil_proxy_unvote_work(struct work_struct *work)
+{
+	struct delayed_work *delayed = to_delayed_work(work);
+	struct pil_priv *priv = container_of(delayed, struct pil_priv, proxy);
+
+	__pil_proxy_unvote(priv);
+}
+
+static int pil_proxy_vote(struct pil_desc *desc)
+{
+	int ret = 0;
+	struct pil_priv *priv = desc->priv;
+
+	if (desc->ops->proxy_vote) {
+		__pm_stay_awake(&priv->ws);
+		ret = desc->ops->proxy_vote(desc);
+		if (ret)
+			__pm_relax(&priv->ws);
+	}
+
+	if (desc->proxy_unvote_irq)
+		enable_irq(desc->proxy_unvote_irq);
+	notify_proxy_vote(desc->dev);
+
+	return ret;
+}
+
+static void pil_proxy_unvote(struct pil_desc *desc, int immediate)
+{
+	struct pil_priv *priv = desc->priv;
+	unsigned long timeout;
+
+	if (proxy_timeout_ms == 0 && !immediate)
+		return;
+	else if (proxy_timeout_ms > 0)
+		timeout = proxy_timeout_ms;
+	else
+		timeout = desc->proxy_timeout;
+
+	if (desc->ops->proxy_unvote) {
+		if (WARN_ON(!try_module_get(desc->owner)))
+			return;
+
+		if (immediate)
+			timeout = 0;
+
+		if (!desc->proxy_unvote_irq || immediate)
+			schedule_delayed_work(&priv->proxy,
+					      msecs_to_jiffies(timeout));
+	}
+}
+
+static irqreturn_t proxy_unvote_intr_handler(int irq, void *dev_id)
+{
+	struct pil_desc *desc = dev_id;
+	struct pil_priv *priv = desc->priv;
+
+	pil_info(desc, "Power/Clock ready interrupt received\n");
+	if (!desc->priv->unvoted_flag) {
+		desc->priv->unvoted_flag = 1;
+		__pil_proxy_unvote(priv);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static bool segment_is_relocatable(const struct elf32_phdr *p)
+{
+	return !!(p->p_flags & BIT(27));
+}
+
+static phys_addr_t pil_reloc(const struct pil_priv *priv, phys_addr_t addr)
+{
+	return addr - priv->base_addr + priv->region_start;
+}
+
+static struct pil_seg *pil_init_seg(const struct pil_desc *desc,
+				  const struct elf32_phdr *phdr, int num)
+{
+	bool reloc = segment_is_relocatable(phdr);
+	const struct pil_priv *priv = desc->priv;
+	struct pil_seg *seg;
+
+	if (!reloc && memblock_overlaps_memory(phdr->p_paddr, phdr->p_memsz)) {
+		pil_err(desc, "Segment not relocatable,kernel memory would be overwritten[%#08lx, %#08lx)\n",
+		(unsigned long)phdr->p_paddr,
+		(unsigned long)(phdr->p_paddr + phdr->p_memsz));
+		return ERR_PTR(-EPERM);
+	}
+
+	if (phdr->p_filesz > phdr->p_memsz) {
+		pil_err(desc, "Segment %d: file size (%u) is greater than mem size (%u).\n",
+			num, phdr->p_filesz, phdr->p_memsz);
+		return ERR_PTR(-EINVAL);
+	}
+
+	seg = kmalloc(sizeof(*seg), GFP_KERNEL);
+	if (!seg)
+		return ERR_PTR(-ENOMEM);
+	seg->num = num;
+	seg->paddr = reloc ? pil_reloc(priv, phdr->p_paddr) : phdr->p_paddr;
+	seg->filesz = phdr->p_filesz;
+	seg->sz = phdr->p_memsz;
+	seg->relocated = reloc;
+	INIT_LIST_HEAD(&seg->list);
+
+	return seg;
+}
+
+#define segment_is_hash(flag) (((flag) & (0x7 << 24)) == (0x2 << 24))
+
+static int segment_is_loadable(const struct elf32_phdr *p)
+{
+	return (p->p_type == PT_LOAD) && !segment_is_hash(p->p_flags) &&
+		p->p_memsz;
+}
+
+static void pil_dump_segs(const struct pil_priv *priv)
+{
+	struct pil_seg *seg;
+	phys_addr_t seg_h_paddr;
+
+	list_for_each_entry(seg, &priv->segs, list) {
+		seg_h_paddr = seg->paddr + seg->sz;
+		pil_info(priv->desc, "%d: %pa %pa\n", seg->num,
+				&seg->paddr, &seg_h_paddr);
+	}
+}
+
+/*
+ * Ensure the entry address lies within the image limits and if the image is
+ * relocatable ensure it lies within a relocatable segment.
+ */
+static int pil_init_entry_addr(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+	struct pil_seg *seg;
+	phys_addr_t entry = mdt->hdr.e_entry;
+	bool image_relocated = priv->region;
+
+	if (image_relocated)
+		entry = pil_reloc(priv, entry);
+	priv->entry_addr = entry;
+
+	if (priv->desc->flags & PIL_SKIP_ENTRY_CHECK)
+		return 0;
+
+	list_for_each_entry(seg, &priv->segs, list) {
+		if (entry >= seg->paddr && entry < seg->paddr + seg->sz) {
+			if (!image_relocated)
+				return 0;
+			else if (seg->relocated)
+				return 0;
+		}
+	}
+	pil_err(priv->desc, "entry address %pa not within range\n", &entry);
+	pil_dump_segs(priv);
+	return -EADDRNOTAVAIL;
+}
+
+static int pil_alloc_region(struct pil_priv *priv, phys_addr_t min_addr,
+				phys_addr_t max_addr, size_t align)
+{
+	void *region;
+	size_t size = max_addr - min_addr;
+	size_t aligned_size;
+
+	/* Don't reallocate due to fragmentation concerns, just sanity check */
+	if (priv->region) {
+		if (WARN(priv->region_end - priv->region_start < size,
+			"Can't reuse PIL memory, too small\n"))
+			return -ENOMEM;
+		return 0;
+	}
+
+	if (align > SZ_4M)
+		aligned_size = ALIGN(size, SZ_4M);
+	else
+		aligned_size = ALIGN(size, SZ_1M);
+
+	priv->desc->attrs = 0;
+	priv->desc->attrs |= DMA_ATTR_SKIP_ZEROING | DMA_ATTR_NO_KERNEL_MAPPING;
+
+	region = dma_alloc_attrs(priv->desc->dev, aligned_size,
+				&priv->region_start, GFP_KERNEL,
+				priv->desc->attrs);
+
+	if (region == NULL) {
+		pil_err(priv->desc, "Failed to allocate relocatable region of size %zx\n",
+					size);
+		return -ENOMEM;
+	}
+
+	priv->region = region;
+	priv->region_end = priv->region_start + size;
+	priv->base_addr = min_addr;
+	priv->region_size = aligned_size;
+
+	return 0;
+}
+
+static int pil_setup_region(struct pil_priv *priv, const struct pil_mdt *mdt)
+{
+	const struct elf32_phdr *phdr;
+	phys_addr_t min_addr_r, min_addr_n, max_addr_r, max_addr_n, start, end;
+	size_t align = 0;
+	int i, ret = 0;
+	bool relocatable = false;
+
+	min_addr_n = min_addr_r = (phys_addr_t)ULLONG_MAX;
+	max_addr_n = max_addr_r = 0;
+
+	/* Find the image limits */
+	for (i = 0; i < mdt->hdr.e_phnum; i++) {
+		phdr = &mdt->phdr[i];
+		if (!segment_is_loadable(phdr))
+			continue;
+
+		start = phdr->p_paddr;
+		end = start + phdr->p_memsz;
+
+		if (segment_is_relocatable(phdr)) {
+			min_addr_r = min(min_addr_r, start);
+			max_addr_r = max(max_addr_r, end);
+			/*
+			 * Lowest relocatable segment dictates alignment of
+			 * relocatable region
+			 */
+			if (min_addr_r == start)
+				align = phdr->p_align;
+			relocatable = true;
+		} else {
+			min_addr_n = min(min_addr_n, start);
+			max_addr_n = max(max_addr_n, end);
+		}
+
+	}
+
+	/*
+	 * Align the max address to the next 4K boundary to satisfy iommus and
+	 * XPUs that operate on 4K chunks.
+	 */
+	max_addr_n = ALIGN(max_addr_n, SZ_4K);
+	max_addr_r = ALIGN(max_addr_r, SZ_4K);
+
+	if (relocatable) {
+		ret = pil_alloc_region(priv, min_addr_r, max_addr_r, align);
+	} else {
+		priv->region_start = min_addr_n;
+		priv->region_end = max_addr_n;
+		priv->base_addr = min_addr_n;
+	}
+
+	if (priv->info) {
+		__iowrite32_copy(&priv->info->start, &priv->region_start,
+					sizeof(priv->region_start) / 4);
+		writel_relaxed(priv->region_end - priv->region_start,
+				&priv->info->size);
+	}
+
+	return ret;
+}
+
+static int pil_cmp_seg(void *priv, struct list_head *a, struct list_head *b)
+{
+	int ret = 0;
+	struct pil_seg *seg_a = list_entry(a, struct pil_seg, list);
+	struct pil_seg *seg_b = list_entry(b, struct pil_seg, list);
+
+	if (seg_a->paddr < seg_b->paddr)
+		ret = -1;
+	else if (seg_a->paddr > seg_b->paddr)
+		ret = 1;
+
+	return ret;
+}
+
+static int pil_init_mmap(struct pil_desc *desc, const struct pil_mdt *mdt)
+{
+	struct pil_priv *priv = desc->priv;
+	const struct elf32_phdr *phdr;
+	struct pil_seg *seg;
+	int i, ret;
+
+	ret = pil_setup_region(priv, mdt);
+	if (ret)
+		return ret;
+
+
+	pil_info(desc, "loading from %pa to %pa\n", &priv->region_start,
+							&priv->region_end);
+
+	for (i = 0; i < mdt->hdr.e_phnum; i++) {
+		phdr = &mdt->phdr[i];
+		if (!segment_is_loadable(phdr))
+			continue;
+
+		seg = pil_init_seg(desc, phdr, i);
+		if (IS_ERR(seg))
+			return PTR_ERR(seg);
+
+		list_add_tail(&seg->list, &priv->segs);
+	}
+	list_sort(NULL, &priv->segs, pil_cmp_seg);
+
+	return pil_init_entry_addr(priv, mdt);
+}
+
+static void pil_release_mmap(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+	struct pil_seg *p, *tmp;
+	u64 zero = 0ULL;
+
+	if (priv->info) {
+		__iowrite32_copy(&priv->info->start, &zero,
+					sizeof(zero) / 4);
+		writel_relaxed(0, &priv->info->size);
+	}
+
+	list_for_each_entry_safe(p, tmp, &priv->segs, list) {
+		list_del(&p->list);
+		kfree(p);
+	}
+}
+
+#define IOMAP_SIZE SZ_1M
+
+struct pil_map_fw_info {
+	void *region;
+	unsigned long attrs;
+	phys_addr_t base_addr;
+	struct device *dev;
+};
+
+static void *map_fw_mem(phys_addr_t paddr, size_t size, void *data)
+{
+	struct pil_map_fw_info *info = data;
+
+	return dma_remap(info->dev, info->region, paddr, size,
+					info->attrs);
+}
+
+static void unmap_fw_mem(void *vaddr, size_t size, void *data)
+{
+	struct pil_map_fw_info *info = data;
+
+	dma_unremap(info->dev, vaddr, size);
+}
+
+static int pil_load_seg(struct pil_desc *desc, struct pil_seg *seg)
+{
+	int ret = 0, count;
+	phys_addr_t paddr;
+	char fw_name[30];
+	int num = seg->num;
+	const struct firmware *fw = NULL;
+	struct pil_map_fw_info map_fw_info = {
+		.attrs = desc->attrs,
+		.region = desc->priv->region,
+		.base_addr = desc->priv->region_start,
+		.dev = desc->dev,
+	};
+	void *map_data = desc->map_data ? desc->map_data : &map_fw_info;
+
+	if (seg->filesz) {
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d",
+				desc->fw_name, num);
+		ret = request_firmware_into_buf(&fw, fw_name, desc->dev,
+						map_data, seg->filesz);
+		if (ret < 0) {
+			pil_err(desc, "Failed to locate blob %s or blob is too big(rc:%d)\n",
+				fw_name, ret);
+			return ret;
+		}
+
+		if (ret != seg->filesz) {
+			pil_err(desc, "Blob size %u doesn't match %lu\n",
+					ret, seg->filesz);
+			return -EPERM;
+		}
+		ret = 0;
+	}
+
+	/* Zero out trailing memory */
+	paddr = seg->paddr + seg->filesz;
+	count = seg->sz - seg->filesz;
+	while (count > 0) {
+		int size;
+		u8 __iomem *buf;
+
+		size = min_t(size_t, IOMAP_SIZE, count);
+		buf = desc->map_fw_mem(paddr, size, map_data);
+		if (!buf) {
+			pil_err(desc, "Failed to map memory\n");
+			return -ENOMEM;
+		}
+		pil_memset_io(buf, 0, size);
+
+		desc->unmap_fw_mem(buf, size, map_data);
+
+		count -= size;
+		paddr += size;
+	}
+
+	if (desc->ops->verify_blob) {
+		ret = desc->ops->verify_blob(desc, seg->paddr, seg->sz);
+		if (ret)
+			pil_err(desc, "Blob%u failed verification(rc:%d)\n",
+								num, ret);
+	}
+
+	return ret;
+}
+
+static int pil_parse_devicetree(struct pil_desc *desc)
+{
+	struct device_node *ofnode = desc->dev->of_node;
+	int clk_ready = 0;
+
+	if (!ofnode)
+		return -EINVAL;
+
+	if (of_property_read_u32(ofnode, "qcom,mem-protect-id",
+					&desc->subsys_vmid))
+		pr_debug("Unable to read the addr-protect-id for %s\n",
+					desc->name);
+
+	if (desc->ops->proxy_unvote && of_find_property(ofnode,
+					"qcom,gpio-proxy-unvote",
+					NULL)) {
+		clk_ready = of_get_named_gpio(ofnode,
+				"qcom,gpio-proxy-unvote", 0);
+
+		if (clk_ready < 0) {
+			dev_dbg(desc->dev,
+				"[%s]: Error getting proxy unvoting gpio\n",
+				desc->name);
+			return clk_ready;
+		}
+
+		clk_ready = gpio_to_irq(clk_ready);
+		if (clk_ready < 0) {
+			dev_err(desc->dev,
+				"[%s]: Error getting proxy unvote IRQ\n",
+				desc->name);
+			return clk_ready;
+		}
+	}
+	desc->proxy_unvote_irq = clk_ready;
+	return 0;
+}
+
+/* Synchronize request_firmware() with suspend */
+static DECLARE_RWSEM(pil_pm_rwsem);
+
+/**
+ * pil_boot() - Load a peripheral image into memory and boot it
+ * @desc: descriptor from pil_desc_init()
+ *
+ * Returns 0 on success or -ERROR on failure.
+ */
+int pil_boot(struct pil_desc *desc)
+{
+	int ret;
+	char fw_name[30];
+	const struct pil_mdt *mdt;
+	const struct elf32_hdr *ehdr;
+	struct pil_seg *seg;
+	const struct firmware *fw;
+	struct pil_priv *priv = desc->priv;
+	bool mem_protect = false;
+	bool hyp_assign = false;
+
+	if (desc->shutdown_fail)
+		pil_err(desc, "Subsystem shutdown failed previously!\n");
+
+	/* Reinitialize for new image */
+	pil_release_mmap(desc);
+
+	down_read(&pil_pm_rwsem);
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", desc->fw_name);
+	ret = request_firmware(&fw, fw_name, desc->dev);
+	if (ret) {
+		pil_err(desc, "Failed to locate %s(rc:%d)\n", fw_name, ret);
+		goto out;
+	}
+
+	if (fw->size < sizeof(*ehdr)) {
+		pil_err(desc, "Not big enough to be an elf header\n");
+		ret = -EIO;
+		goto release_fw;
+	}
+
+	mdt = (const struct pil_mdt *)fw->data;
+	ehdr = &mdt->hdr;
+
+	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+		pil_err(desc, "Not an elf header\n");
+		ret = -EIO;
+		goto release_fw;
+	}
+
+	if (ehdr->e_phnum == 0) {
+		pil_err(desc, "No loadable segments\n");
+		ret = -EIO;
+		goto release_fw;
+	}
+	if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+	    sizeof(struct elf32_hdr) > fw->size) {
+		pil_err(desc, "Program headers not within mdt\n");
+		ret = -EIO;
+		goto release_fw;
+	}
+
+	ret = pil_init_mmap(desc, mdt);
+	if (ret)
+		goto release_fw;
+
+	desc->priv->unvoted_flag = 0;
+	ret = pil_proxy_vote(desc);
+	if (ret) {
+		pil_err(desc, "Failed to proxy vote(rc:%d)\n", ret);
+		goto release_fw;
+	}
+
+	if (desc->ops->init_image)
+		ret = desc->ops->init_image(desc, fw->data, fw->size);
+	if (ret) {
+		pil_err(desc, "Initializing image failed(rc:%d)\n", ret);
+		goto err_boot;
+	}
+
+	if (desc->ops->mem_setup)
+		ret = desc->ops->mem_setup(desc, priv->region_start,
+				priv->region_end - priv->region_start);
+	if (ret) {
+		pil_err(desc, "Memory setup error(rc:%d)\n", ret);
+		goto err_deinit_image;
+	}
+
+	if (desc->subsys_vmid > 0) {
+		/**
+		 * In case of modem ssr, we need to assign memory back to linux.
+		 * This is not true after cold boot since linux already owns it.
+		 * Also for secure boot devices, modem memory has to be released
+		 * after MBA is booted
+		 */
+		if (desc->modem_ssr) {
+			ret = pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+			if (ret)
+				pil_err(desc, "Failed to assign to linux, ret- %d\n",
+								ret);
+		}
+		ret = pil_assign_mem_to_subsys_and_linux(desc,
+				priv->region_start,
+				(priv->region_end - priv->region_start));
+		if (ret) {
+			pil_err(desc, "Failed to assign memory, ret - %d\n",
+								ret);
+			goto err_deinit_image;
+		}
+		hyp_assign = true;
+	}
+
+	list_for_each_entry(seg, &desc->priv->segs, list) {
+		ret = pil_load_seg(desc, seg);
+		if (ret)
+			goto err_deinit_image;
+	}
+
+	if (desc->subsys_vmid > 0) {
+		ret =  pil_reclaim_mem(desc, priv->region_start,
+				(priv->region_end - priv->region_start),
+				desc->subsys_vmid);
+		if (ret) {
+			pil_err(desc, "Failed to assign %s memory, ret - %d\n",
+							desc->name, ret);
+			goto err_deinit_image;
+		}
+		hyp_assign = false;
+	}
+
+	ret = desc->ops->auth_and_reset(desc);
+	if (ret) {
+		pil_err(desc, "Failed to bring out of reset(rc:%d)\n", ret);
+		goto err_auth_and_reset;
+	}
+	pil_info(desc, "Brought out of reset\n");
+	desc->modem_ssr = false;
+err_auth_and_reset:
+	if (ret && desc->subsys_vmid > 0) {
+		pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+		mem_protect = true;
+	}
+err_deinit_image:
+	if (ret && desc->ops->deinit_image)
+		desc->ops->deinit_image(desc);
+err_boot:
+	if (ret && desc->proxy_unvote_irq)
+		disable_irq(desc->proxy_unvote_irq);
+	pil_proxy_unvote(desc, ret);
+release_fw:
+	release_firmware(fw);
+out:
+	up_read(&pil_pm_rwsem);
+	if (ret) {
+		if (priv->region) {
+			if (desc->subsys_vmid > 0 && !mem_protect &&
+					hyp_assign) {
+				pil_reclaim_mem(desc, priv->region_start,
+					(priv->region_end -
+						priv->region_start),
+					VMID_HLOS);
+			}
+			dma_free_attrs(desc->dev, priv->region_size,
+					priv->region, priv->region_start,
+					desc->attrs);
+			priv->region = NULL;
+		}
+		pil_release_mmap(desc);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(pil_boot);
+
+/**
+ * pil_shutdown() - Shutdown a peripheral
+ * @desc: descriptor from pil_desc_init()
+ */
+void pil_shutdown(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+
+	if (desc->ops->shutdown) {
+		if (desc->ops->shutdown(desc))
+			desc->shutdown_fail = true;
+		else
+			desc->shutdown_fail = false;
+	}
+
+	if (desc->proxy_unvote_irq) {
+		disable_irq(desc->proxy_unvote_irq);
+		if (!desc->priv->unvoted_flag)
+			pil_proxy_unvote(desc, 1);
+	} else if (!proxy_timeout_ms)
+		pil_proxy_unvote(desc, 1);
+	else
+		flush_delayed_work(&priv->proxy);
+	desc->modem_ssr = true;
+}
+EXPORT_SYMBOL(pil_shutdown);
+
+/**
+ * pil_free_memory() - Free memory resources associated with a peripheral
+ * @desc: descriptor from pil_desc_init()
+ */
+void pil_free_memory(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+
+	if (priv->region) {
+		if (desc->subsys_vmid > 0)
+			pil_assign_mem_to_linux(desc, priv->region_start,
+				(priv->region_end - priv->region_start));
+		dma_free_attrs(desc->dev, priv->region_size,
+				priv->region, priv->region_start, desc->attrs);
+		priv->region = NULL;
+	}
+}
+EXPORT_SYMBOL(pil_free_memory);
+
+static DEFINE_IDA(pil_ida);
+
+bool is_timeout_disabled(void)
+{
+	return disable_timeouts;
+}
+/**
+ * pil_desc_init() - Initialize a pil descriptor
+ * @desc: descriptor to initialize
+ *
+ * Initialize a pil descriptor for use by other pil functions. This function
+ * must be called before calling pil_boot() or pil_shutdown().
+ *
+ * Returns 0 for success and -ERROR on failure.
+ */
+int pil_desc_init(struct pil_desc *desc)
+{
+	struct pil_priv *priv;
+	int ret;
+	void __iomem *addr;
+	char buf[sizeof(priv->info->name)];
+
+	if (WARN(desc->ops->proxy_unvote && !desc->ops->proxy_vote,
+				"Invalid proxy voting. Ignoring\n"))
+		((struct pil_reset_ops *)desc->ops)->proxy_unvote = NULL;
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	desc->priv = priv;
+	priv->desc = desc;
+
+	priv->id = ret = ida_simple_get(&pil_ida, 0, PIL_NUM_DESC, GFP_KERNEL);
+	if (priv->id < 0)
+		goto err;
+
+	if (pil_info_base) {
+		addr = pil_info_base + sizeof(struct pil_image_info) * priv->id;
+		priv->info = (struct pil_image_info __iomem *)addr;
+
+		strlcpy(buf, desc->name, sizeof(buf));
+		__iowrite32_copy(priv->info->name, buf, sizeof(buf) / 4);
+	}
+
+	ret = pil_parse_devicetree(desc);
+	if (ret)
+		goto err_parse_dt;
+
+	/* Ignore users who don't make any sense */
+	WARN(desc->ops->proxy_unvote && desc->proxy_unvote_irq == 0
+		 && !desc->proxy_timeout,
+		 "Invalid proxy unvote callback or a proxy timeout of 0 was specified or no proxy unvote IRQ was specified.\n");
+
+	if (desc->proxy_unvote_irq) {
+		ret = request_threaded_irq(desc->proxy_unvote_irq,
+				  NULL,
+				  proxy_unvote_intr_handler,
+				  IRQF_ONESHOT | IRQF_TRIGGER_RISING,
+				  desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev,
+				"Unable to request proxy unvote IRQ: %d\n",
+				ret);
+			goto err;
+		}
+		disable_irq(desc->proxy_unvote_irq);
+	}
+
+	snprintf(priv->wname, sizeof(priv->wname), "pil-%s", desc->name);
+	wakeup_source_init(&priv->ws, priv->wname);
+	INIT_DELAYED_WORK(&priv->proxy, pil_proxy_unvote_work);
+	INIT_LIST_HEAD(&priv->segs);
+
+	/* Make sure mapping functions are set. */
+	if (!desc->map_fw_mem)
+		desc->map_fw_mem = map_fw_mem;
+
+	if (!desc->unmap_fw_mem)
+		desc->unmap_fw_mem = unmap_fw_mem;
+
+	return 0;
+err_parse_dt:
+	ida_simple_remove(&pil_ida, priv->id);
+err:
+	kfree(priv);
+	return ret;
+}
+EXPORT_SYMBOL(pil_desc_init);
+
+/**
+ * pil_desc_release() - Release a pil descriptor
+ * @desc: descriptor to free
+ */
+void pil_desc_release(struct pil_desc *desc)
+{
+	struct pil_priv *priv = desc->priv;
+
+	if (priv) {
+		ida_simple_remove(&pil_ida, priv->id);
+		flush_delayed_work(&priv->proxy);
+		wakeup_source_trash(&priv->ws);
+	}
+	desc->priv = NULL;
+	kfree(priv);
+}
+EXPORT_SYMBOL(pil_desc_release);
+
+static int pil_pm_notify(struct notifier_block *b, unsigned long event, void *p)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		down_write(&pil_pm_rwsem);
+		break;
+	case PM_POST_SUSPEND:
+		up_write(&pil_pm_rwsem);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block pil_pm_notifier = {
+	.notifier_call = pil_pm_notify,
+};
+
+static int __init msm_pil_init(void)
+{
+	struct device_node *np;
+	struct resource res;
+	int i;
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-pil");
+	if (!np) {
+		pr_warn("pil: failed to find qcom,msm-imem-pil node\n");
+		goto out;
+	}
+	if (of_address_to_resource(np, 0, &res)) {
+		pr_warn("pil: address to resource on imem region failed\n");
+		goto out;
+	}
+	pil_info_base = ioremap(res.start, resource_size(&res));
+	if (!pil_info_base) {
+		pr_warn("pil: could not map imem region\n");
+		goto out;
+	}
+	if (__raw_readl(pil_info_base) == 0x53444247) {
+		pr_info("pil: pil-imem set to disable pil timeouts\n");
+		disable_timeouts = true;
+	}
+	for (i = 0; i < resource_size(&res)/sizeof(u32); i++)
+		writel_relaxed(0, pil_info_base + (i * sizeof(u32)));
+
+out:
+	return register_pm_notifier(&pil_pm_notifier);
+}
+device_initcall(msm_pil_init);
+
+static void __exit msm_pil_exit(void)
+{
+	unregister_pm_notifier(&pil_pm_notifier);
+	if (pil_info_base)
+		iounmap(pil_info_base);
+}
+module_exit(msm_pil_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Load peripheral images and bring peripherals out of reset");
diff --git a/drivers/soc/qcom/peripheral-loader.h b/drivers/soc/qcom/peripheral-loader.h
new file mode 100644
index 0000000..752a6ce
--- /dev/null
+++ b/drivers/soc/qcom/peripheral-loader.h
@@ -0,0 +1,151 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PERIPHERAL_LOADER_H
+#define __MSM_PERIPHERAL_LOADER_H
+
+struct device;
+struct module;
+struct pil_priv;
+
+/**
+ * struct pil_desc - PIL descriptor
+ * @name: string used for pil_get()
+ * @fw_name: firmware name
+ * @dev: parent device
+ * @ops: callback functions
+ * @owner: module the descriptor belongs to
+ * @proxy_timeout: delay in ms until proxy vote is removed
+ * @flags: bitfield for image flags
+ * @priv: DON'T USE - internal only
+ * @attrs: DMA attributes to be used during dma allocation.
+ * @proxy_unvote_irq: IRQ to trigger a proxy unvote. proxy_timeout
+ * is ignored if this is set.
+ * @map_fw_mem: Custom function used to map physical address space to virtual.
+ * This defaults to ioremap if not specified.
+ * @unmap_fw_mem: Custom function used to undo mapping by map_fw_mem.
+ * This defaults to iounmap if not specified.
+ * @shutdown_fail: Set if PIL op for shutting down subsystem fails.
+ * @modem_ssr: true if modem is restarting, false if booting for first time.
+ * @subsys_vmid: memprot id for the subsystem.
+ */
+struct pil_desc {
+	const char *name;
+	const char *fw_name;
+	struct device *dev;
+	const struct pil_reset_ops *ops;
+	struct module *owner;
+	unsigned long proxy_timeout;
+	unsigned long flags;
+#define PIL_SKIP_ENTRY_CHECK	BIT(0)
+	struct pil_priv *priv;
+	unsigned long attrs;
+	unsigned int proxy_unvote_irq;
+	void * (*map_fw_mem)(phys_addr_t phys, size_t size, void *data);
+	void (*unmap_fw_mem)(void *virt, size_t size, void *data);
+	void *map_data;
+	bool shutdown_fail;
+	bool modem_ssr;
+	u32 subsys_vmid;
+};
+
+/**
+ * struct pil_image_info - info in IMEM about image and where it is loaded
+ * @name: name of image (may or may not be NULL terminated)
+ * @start: indicates physical address where image starts (little endian)
+ * @size: size of image (little endian)
+ */
+struct pil_image_info {
+	char name[8];
+	__le64 start;
+	__le32 size;
+} __attribute__((__packed__));
+
+/**
+ * struct pil_reset_ops - PIL operations
+ * @init_image: prepare an image for authentication
+ * @mem_setup: prepare the image memory region
+ * @verify_blob: authenticate a program segment, called once for each loadable
+ *		 program segment (optional)
+ * @proxy_vote: make proxy votes before auth_and_reset (optional)
+ * @auth_and_reset: boot the processor
+ * @proxy_unvote: remove any proxy votes (optional)
+ * @deinit_image: restore actions performed in init_image if necessary
+ * @shutdown: shutdown the processor
+ */
+struct pil_reset_ops {
+	int (*init_image)(struct pil_desc *pil, const u8 *metadata,
+			  size_t size);
+	int (*mem_setup)(struct pil_desc *pil, phys_addr_t addr, size_t size);
+	int (*verify_blob)(struct pil_desc *pil, phys_addr_t phy_addr,
+			   size_t size);
+	int (*proxy_vote)(struct pil_desc *pil);
+	int (*auth_and_reset)(struct pil_desc *pil);
+	void (*proxy_unvote)(struct pil_desc *pil);
+	int (*deinit_image)(struct pil_desc *pil);
+	int (*shutdown)(struct pil_desc *pil);
+};
+
+#ifdef CONFIG_MSM_PIL
+extern int pil_desc_init(struct pil_desc *desc);
+extern int pil_boot(struct pil_desc *desc);
+extern void pil_shutdown(struct pil_desc *desc);
+extern void pil_free_memory(struct pil_desc *desc);
+extern void pil_desc_release(struct pil_desc *desc);
+extern phys_addr_t pil_get_entry_addr(struct pil_desc *desc);
+extern int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev);
+extern int pil_assign_mem_to_subsys(struct pil_desc *desc, phys_addr_t addr,
+						size_t size);
+extern int pil_assign_mem_to_linux(struct pil_desc *desc, phys_addr_t addr,
+						size_t size);
+extern int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size);
+extern int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr, size_t size,
+						int VMid);
+extern bool is_timeout_disabled(void);
+#else
+static inline int pil_desc_init(struct pil_desc *desc) { return 0; }
+static inline int pil_boot(struct pil_desc *desc) { return 0; }
+static inline void pil_shutdown(struct pil_desc *desc) { }
+static inline void pil_free_memory(struct pil_desc *desc) { }
+static inline void pil_desc_release(struct pil_desc *desc) { }
+static inline phys_addr_t pil_get_entry_addr(struct pil_desc *desc)
+{
+	return 0;
+}
+static inline int pil_do_ramdump(struct pil_desc *desc, void *ramdump_dev)
+{
+	return 0;
+}
+static inline int pil_assign_mem_to_subsys(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	return 0;
+}
+static inline int pil_assign_mem_to_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	return 0;
+}
+static inline int pil_assign_mem_to_subsys_and_linux(struct pil_desc *desc,
+						phys_addr_t addr, size_t size)
+{
+	return 0;
+}
+static inline int pil_reclaim_mem(struct pil_desc *desc, phys_addr_t addr,
+					size_t size, int VMid)
+{
+	return 0;
+}
+extern bool is_timeout_disabled(void) { return false; }
+#endif
+
+#endif
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
new file mode 100644
index 0000000..ffe72e6
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.c
@@ -0,0 +1,847 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+/* Q6 Register Offsets */
+#define QDSP6SS_RST_EVB			0x010
+#define QDSP6SS_DBG_CFG			0x018
+
+/* AXI Halting Registers */
+#define MSS_Q6_HALT_BASE		0x180
+#define MSS_MODEM_HALT_BASE		0x200
+#define MSS_NC_HALT_BASE		0x280
+
+/* RMB Status Register Values */
+#define STATUS_PBL_SUCCESS		0x1
+#define STATUS_XPU_UNLOCKED		0x1
+#define STATUS_XPU_UNLOCKED_SCRIBBLED	0x2
+
+/* PBL/MBA interface registers */
+#define RMB_MBA_IMAGE			0x00
+#define RMB_PBL_STATUS			0x04
+#define RMB_MBA_COMMAND			0x08
+#define RMB_MBA_STATUS			0x0C
+#define RMB_PMI_META_DATA		0x10
+#define RMB_PMI_CODE_START		0x14
+#define RMB_PMI_CODE_LENGTH		0x18
+#define RMB_PROTOCOL_VERSION		0x1C
+#define RMB_MBA_DEBUG_INFORMATION	0x20
+
+#define POLL_INTERVAL_US		50
+
+#define CMD_META_DATA_READY		0x1
+#define CMD_LOAD_READY			0x2
+#define CMD_PILFAIL_NFY_MBA		0xffffdead
+
+#define STATUS_META_DATA_AUTH_SUCCESS	0x3
+#define STATUS_AUTH_COMPLETE		0x4
+#define STATUS_MBA_UNLOCKED		0x6
+
+/* External BHS */
+#define EXTERNAL_BHS_ON			BIT(0)
+#define EXTERNAL_BHS_STATUS		BIT(4)
+#define BHS_TIMEOUT_US			50
+
+#define MSS_RESTART_PARAM_ID		0x2
+#define MSS_RESTART_ID			0xA
+
+#define MSS_MAGIC			0XAABADEAD
+enum scm_cmd {
+	PAS_MEM_SETUP_CMD = 2,
+};
+
+static int pbl_mba_boot_timeout_ms = 1000;
+module_param(pbl_mba_boot_timeout_ms, int, 0644);
+
+static int modem_auth_timeout_ms = 10000;
+module_param(modem_auth_timeout_ms, int, 0644);
+
+/* If set to 0xAABADEAD, MBA failures trigger a kernel panic */
+static uint modem_trigger_panic;
+module_param(modem_trigger_panic, uint, 0644);
+
+/* To set the modem debug cookie in DBG_CFG register for debugging */
+static uint modem_dbg_cfg;
+module_param(modem_dbg_cfg, uint, 0644);
+
+static void modem_log_rmb_regs(void __iomem *base)
+{
+	pr_err("RMB_MBA_IMAGE: %08x\n", readl_relaxed(base + RMB_MBA_IMAGE));
+	pr_err("RMB_PBL_STATUS: %08x\n", readl_relaxed(base + RMB_PBL_STATUS));
+	pr_err("RMB_MBA_COMMAND: %08x\n",
+				readl_relaxed(base + RMB_MBA_COMMAND));
+	pr_err("RMB_MBA_STATUS: %08x\n", readl_relaxed(base + RMB_MBA_STATUS));
+	pr_err("RMB_PMI_META_DATA: %08x\n",
+				readl_relaxed(base + RMB_PMI_META_DATA));
+	pr_err("RMB_PMI_CODE_START: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_START));
+	pr_err("RMB_PMI_CODE_LENGTH: %08x\n",
+				readl_relaxed(base + RMB_PMI_CODE_LENGTH));
+	pr_err("RMB_PROTOCOL_VERSION: %08x\n",
+				readl_relaxed(base + RMB_PROTOCOL_VERSION));
+	pr_err("RMB_MBA_DEBUG_INFORMATION: %08x\n",
+			readl_relaxed(base + RMB_MBA_DEBUG_INFORMATION));
+
+	if (modem_trigger_panic == MSS_MAGIC)
+		panic("%s: System ramdump is needed!!!\n", __func__);
+}
+
+static int pil_mss_power_up(struct q6v5_data *drv)
+{
+	int ret = 0;
+	u32 regval;
+
+	if (drv->vreg) {
+		ret = regulator_enable(drv->vreg);
+		if (ret)
+			dev_err(drv->desc.dev, "Failed to enable modem regulator(rc:%d)\n",
+									ret);
+	}
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval |= EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+
+		ret = readl_poll_timeout(drv->cxrail_bhs, regval,
+			regval & EXTERNAL_BHS_STATUS, 1, BHS_TIMEOUT_US);
+	}
+
+	return ret;
+}
+
+static int pil_mss_power_down(struct q6v5_data *drv)
+{
+	u32 regval;
+
+	if (drv->cxrail_bhs) {
+		regval = readl_relaxed(drv->cxrail_bhs);
+		regval &= ~EXTERNAL_BHS_ON;
+		writel_relaxed(regval, drv->cxrail_bhs);
+	}
+
+	if (drv->vreg)
+		return regulator_disable(drv->vreg);
+
+	return 0;
+}
+
+static int pil_mss_enable_clks(struct q6v5_data *drv)
+{
+	int ret;
+
+	ret = clk_prepare_enable(drv->ahb_clk);
+	if (ret)
+		goto err_ahb_clk;
+	ret = clk_prepare_enable(drv->axi_clk);
+	if (ret)
+		goto err_axi_clk;
+	ret = clk_prepare_enable(drv->rom_clk);
+	if (ret)
+		goto err_rom_clk;
+	ret = clk_prepare_enable(drv->gpll0_mss_clk);
+	if (ret)
+		goto err_gpll0_mss_clk;
+	ret = clk_prepare_enable(drv->snoc_axi_clk);
+	if (ret)
+		goto err_snoc_axi_clk;
+	ret = clk_prepare_enable(drv->mnoc_axi_clk);
+	if (ret)
+		goto err_mnoc_axi_clk;
+	return 0;
+err_mnoc_axi_clk:
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+err_snoc_axi_clk:
+	clk_disable_unprepare(drv->snoc_axi_clk);
+err_gpll0_mss_clk:
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+err_rom_clk:
+	clk_disable_unprepare(drv->rom_clk);
+err_axi_clk:
+	clk_disable_unprepare(drv->axi_clk);
+err_ahb_clk:
+	clk_disable_unprepare(drv->ahb_clk);
+	return ret;
+}
+
+static void pil_mss_disable_clks(struct q6v5_data *drv)
+{
+	clk_disable_unprepare(drv->mnoc_axi_clk);
+	clk_disable_unprepare(drv->snoc_axi_clk);
+	clk_disable_unprepare(drv->gpll0_mss_clk);
+	clk_disable_unprepare(drv->rom_clk);
+	clk_disable_unprepare(drv->axi_clk);
+	if (!drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+}
+
+static int pil_mss_restart_reg(struct q6v5_data *drv, u32 mss_restart)
+{
+	int ret = 0;
+	int scm_ret = 0;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = mss_restart;
+	desc.args[1] = 0;
+	desc.arginfo = SCM_ARGS(2);
+
+	if (drv->restart_reg && !drv->restart_reg_sec) {
+		writel_relaxed(mss_restart, drv->restart_reg);
+		mb();
+		udelay(2);
+	} else if (drv->restart_reg_sec) {
+		if (!is_scm_armv8()) {
+			ret = scm_call(SCM_SVC_PIL, MSS_RESTART_ID,
+					&mss_restart, sizeof(mss_restart),
+					&scm_ret, sizeof(scm_ret));
+		} else {
+			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+						MSS_RESTART_ID), &desc);
+			scm_ret = desc.ret[0];
+		}
+		if (ret || scm_ret)
+			pr_err("Secure MSS restart failed\n");
+	}
+
+	return ret;
+}
+
+static int pil_msa_wait_for_mba_ready(struct q6v5_data *drv)
+{
+	struct device *dev = drv->desc.dev;
+	int ret;
+	u32 status;
+	u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	/* Wait for PBL completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_PBL_STATUS, status,
+				 status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "PBL boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_PBL_SUCCESS) {
+		dev_err(dev, "PBL returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	/* Wait for MBA completion. */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status != 0, POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(dev, "MBA boot timed out (rc:%d)\n", ret);
+		return ret;
+	}
+	if (status != STATUS_XPU_UNLOCKED &&
+	    status != STATUS_XPU_UNLOCKED_SCRIBBLED) {
+		dev_err(dev, "MBA returned unexpected status %d\n", status);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int pil_mss_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+
+	if (drv->axi_halt_base) {
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_Q6_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_MODEM_HALT_BASE);
+		pil_q6v5_halt_axi_port(pil,
+			drv->axi_halt_base + MSS_NC_HALT_BASE);
+	}
+
+	if (drv->axi_halt_q6)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_q6);
+	if (drv->axi_halt_mss)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_mss);
+	if (drv->axi_halt_nc)
+		pil_q6v5_halt_axi_port(pil, drv->axi_halt_nc);
+
+	/*
+	 * Software workaround to avoid high MX current during LPASS/MSS
+	 * restart.
+	 */
+	if (drv->mx_spike_wa && drv->ahb_clk_vote) {
+		ret = clk_prepare_enable(drv->ahb_clk);
+		if (!ret)
+			assert_clamps(pil);
+		else
+			dev_err(pil->dev, "error turning ON AHB clock(rc:%d)\n",
+									ret);
+	}
+
+	ret = pil_mss_restart_reg(drv, 1);
+
+	if (drv->is_booted) {
+		pil_mss_disable_clks(drv);
+		pil_mss_power_down(drv);
+		drv->is_booted = false;
+	}
+
+	return ret;
+}
+
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret = 0;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
+
+	if (err_path) {
+		writel_relaxed(CMD_PILFAIL_NFY_MBA,
+				drv->rmb_base + RMB_MBA_COMMAND);
+		ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+				status == STATUS_MBA_UNLOCKED || status < 0,
+				POLL_INTERVAL_US, val);
+		if (ret)
+			dev_err(pil->dev, "MBA region unlock timed out(rc:%d)\n",
+									ret);
+		else if (status < 0)
+			dev_err(pil->dev, "MBA unlock returned err status: %d\n",
+						status);
+	}
+
+	ret = pil_mss_shutdown(pil);
+
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	/* In case of any failure where reclaiming MBA and DP memory
+	 * could not happen, free the memory here
+	 */
+	if (drv->q6->mba_dp_virt) {
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+		drv->q6->mba_dp_virt = NULL;
+	}
+
+	return ret;
+}
+
+int pil_mss_deinit_image(struct pil_desc *pil)
+{
+	return __pil_mss_deinit_image(pil, true);
+}
+
+int pil_mss_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_mx-uV", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_mx-uV property(rc:%d)\n", ret);
+		return ret;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_mx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vreg_mx voltage(rc:%d)\n",
+									ret);
+		return ret;
+	}
+
+	ret = regulator_enable(drv->vreg_mx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to enable vreg_mx(rc:%d)\n", ret);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+		return ret;
+	}
+
+	ret = pil_q6v5_make_proxy_votes(pil);
+	if (ret) {
+		regulator_disable(drv->vreg_mx);
+		regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+	}
+
+	return ret;
+}
+
+void pil_mss_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	pil_q6v5_remove_proxy_votes(pil);
+	regulator_disable(drv->vreg_mx);
+	regulator_set_voltage(drv->vreg_mx, 0, INT_MAX);
+}
+
+static int pil_mss_mem_setup(struct pil_desc *pil,
+					phys_addr_t addr, size_t size)
+{
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+
+	struct pas_init_image_req {
+		u32	proc;
+		u32	start_addr;
+		u32	len;
+	} request;
+	u32 scm_ret = 0;
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (!md->subsys_desc.pil_mss_memsetup)
+		return 0;
+
+	request.proc = md->pas_id;
+	request.start_addr = addr;
+	request.len = size;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
+				sizeof(request), &scm_ret, sizeof(scm_ret));
+	} else {
+		desc.args[0] = md->pas_id;
+		desc.args[1] = addr;
+		desc.args[2] = size;
+		desc.arginfo = SCM_ARGS(3);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+				&desc);
+		scm_ret = desc.ret[0];
+	}
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_mss_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	phys_addr_t start_addr = pil_get_entry_addr(pil);
+	int ret;
+
+	if (drv->mba_dp_phys)
+		start_addr = drv->mba_dp_phys;
+
+	/*
+	 * Bring subsystem out of reset and enable required
+	 * regulators and clocks.
+	 */
+	ret = pil_mss_power_up(drv);
+	if (ret)
+		goto err_power;
+
+	/* Deassert reset to subsystem and wait for propagation */
+	ret = pil_mss_restart_reg(drv, 0);
+	if (ret)
+		goto err_restart;
+
+	ret = pil_mss_enable_clks(drv);
+	if (ret)
+		goto err_clks;
+
+	if (modem_dbg_cfg)
+		writel_relaxed(modem_dbg_cfg, drv->reg_base + QDSP6SS_DBG_CFG);
+
+	/* Program Image Address */
+	if (drv->self_auth) {
+		writel_relaxed(start_addr, drv->rmb_base + RMB_MBA_IMAGE);
+		/*
+		 * Ensure write to RMB base occurs before reset
+		 * is released.
+		 */
+		mb();
+	} else {
+		writel_relaxed((start_addr >> 4) & 0x0FFFFFF0,
+				drv->reg_base + QDSP6SS_RST_EVB);
+	}
+
+	/* Program DP Address */
+	if (drv->dp_size) {
+		writel_relaxed(start_addr + SZ_1M, drv->rmb_base +
+			       RMB_PMI_CODE_START);
+		writel_relaxed(drv->dp_size, drv->rmb_base +
+			       RMB_PMI_CODE_LENGTH);
+	} else {
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+	}
+	/* Make sure RMB regs are written before bringing modem out of reset */
+	mb();
+
+	ret = pil_q6v5_reset(pil);
+	if (ret)
+		goto err_q6v5_reset;
+
+	/* Wait for MBA to start. Check for PBL and MBA errors while waiting. */
+	if (drv->self_auth) {
+		ret = pil_msa_wait_for_mba_ready(drv);
+		if (ret)
+			goto err_q6v5_reset;
+	}
+
+	dev_info(pil->dev, "MBA boot done\n");
+	drv->is_booted = true;
+
+	return 0;
+
+err_q6v5_reset:
+	modem_log_rmb_regs(drv->rmb_base);
+	pil_mss_disable_clks(drv);
+	if (drv->ahb_clk_vote)
+		clk_disable_unprepare(drv->ahb_clk);
+err_clks:
+	pil_mss_restart_reg(drv, 1);
+err_restart:
+	pil_mss_power_down(drv);
+err_power:
+	return ret;
+}
+
+int pil_mss_reset_load_mba(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	struct modem_data *md = dev_get_drvdata(pil->dev);
+	const struct firmware *fw, *dp_fw = NULL;
+	char fw_name_legacy[10] = "mba.b00";
+	char fw_name[10] = "mba.mbn";
+	char *dp_name = "msadp";
+	char *fw_name_p;
+	void *mba_dp_virt;
+	dma_addr_t mba_dp_phys, mba_dp_phys_end;
+	int ret, count;
+	const u8 *data;
+
+	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
+	ret = request_firmware(&fw, fw_name_p, pil->dev);
+	if (ret) {
+		dev_err(pil->dev, "Failed to locate %s (rc:%d)\n",
+						fw_name_p, ret);
+		return ret;
+	}
+
+	data = fw ? fw->data : NULL;
+	if (!data) {
+		dev_err(pil->dev, "MBA data is NULL\n");
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	drv->mba_dp_size = SZ_1M;
+
+	arch_setup_dma_ops(&md->mba_mem_dev, 0, 0, NULL, 0);
+
+	md->mba_mem_dev.coherent_dma_mask =
+		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	md->attrs_dma = 0;
+	md->attrs_dma |= DMA_ATTR_STRONGLY_ORDERED;
+
+	ret = request_firmware(&dp_fw, dp_name, pil->dev);
+	if (ret) {
+		dev_warn(pil->dev, "Debug policy not present - %s. Continue.\n",
+						dp_name);
+	} else {
+		if (!dp_fw || !dp_fw->data) {
+			dev_err(pil->dev, "Invalid DP firmware\n");
+			ret = -ENOMEM;
+			goto err_invalid_fw;
+		}
+		drv->dp_size = dp_fw->size;
+		drv->mba_dp_size += drv->dp_size;
+	}
+
+	mba_dp_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_dp_size,
+			&mba_dp_phys, GFP_KERNEL, md->attrs_dma);
+	if (!mba_dp_virt) {
+		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+		ret = -ENOMEM;
+		goto err_invalid_fw;
+	}
+
+	/* Make sure there are no mappings in PKMAP and fixmap */
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	drv->mba_dp_phys = mba_dp_phys;
+	drv->mba_dp_virt = mba_dp_virt;
+	mba_dp_phys_end = mba_dp_phys + drv->mba_dp_size;
+
+	dev_info(pil->dev, "Loading MBA and DP (if present) from %pa to %pa\n",
+					&mba_dp_phys, &mba_dp_phys_end);
+
+	/* Load the MBA image into memory */
+	count = fw->size;
+	memcpy(mba_dp_virt, data, count);
+	/* Ensure memcpy of the MBA memory is done before loading the DP */
+	wmb();
+
+	/* Load the DP image into memory */
+	if (drv->mba_dp_size > SZ_1M) {
+		memcpy(mba_dp_virt + SZ_1M, dp_fw->data, dp_fw->size);
+		/* Ensure memcpy is done before powering up modem */
+		wmb();
+	}
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+		if (ret) {
+			pr_err("scm_call to unprotect MBA and DP mem failed(rc:%d)\n",
+									ret);
+			goto err_mba_data;
+		}
+	}
+
+	ret = pil_mss_reset(pil);
+	if (ret) {
+		dev_err(pil->dev, "MBA boot failed(rc:%d)\n", ret);
+		goto err_mss_reset;
+	}
+
+	if (dp_fw)
+		release_firmware(dp_fw);
+	release_firmware(fw);
+
+	return 0;
+
+err_mss_reset:
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
+							drv->mba_dp_size);
+err_mba_data:
+	dma_free_attrs(&md->mba_mem_dev, drv->mba_dp_size, drv->mba_dp_virt,
+				drv->mba_dp_phys, md->attrs_dma);
+err_invalid_fw:
+	if (dp_fw)
+		release_firmware(dp_fw);
+	release_firmware(fw);
+	drv->mba_dp_virt = NULL;
+	return ret;
+}
+
+static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
+					size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	void *mdata_virt;
+	dma_addr_t mdata_phys;
+	s32 status;
+	int ret;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+	unsigned long attrs = 0;
+
+	drv->mba_mem_dev.coherent_dma_mask =
+		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	attrs |= DMA_ATTR_STRONGLY_ORDERED;
+	/* Make metadata physically contiguous and 4K aligned. */
+	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
+					GFP_KERNEL, attrs);
+	if (!mdata_virt) {
+		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
+		ret = -ENOMEM;
+		goto fail;
+	}
+	memcpy(mdata_virt, metadata, size);
+	/* wmb() ensures copy completes prior to starting authentication. */
+	wmb();
+
+	if (pil->subsys_vmid > 0) {
+		ret = pil_assign_mem_to_subsys(pil, mdata_phys,
+							ALIGN(size, SZ_4K));
+		if (ret) {
+			pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
+									ret);
+			dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt,
+							mdata_phys, attrs);
+			goto fail;
+		}
+	}
+
+	/* Initialize length counter to 0 */
+	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Pass address of meta-data to the MBA and perform authentication */
+	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
+	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+			status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
+			POLL_INTERVAL_US, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of headers timed out(rc:%d)\n",
+								ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for headers\n",
+				status);
+		ret = -EINVAL;
+	}
+
+	if (pil->subsys_vmid > 0)
+		pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
+
+	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, attrs);
+
+	if (!ret)
+		return ret;
+
+fail:
+	modem_log_rmb_regs(drv->rmb_base);
+	if (drv->q6) {
+		pil_mss_shutdown(pil);
+		if (pil->subsys_vmid > 0)
+			pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
+						drv->q6->mba_dp_size);
+		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+				drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+				drv->attrs_dma);
+		drv->q6->mba_dp_virt = NULL;
+
+	}
+	return ret;
+}
+
+static int pil_msa_mss_reset_mba_load_auth_mdt(struct pil_desc *pil,
+				  const u8 *metadata, size_t size)
+{
+	int ret;
+
+	ret = pil_mss_reset_load_mba(pil);
+	if (ret)
+		return ret;
+
+	return pil_msa_auth_modem_mdt(pil, metadata, size);
+}
+
+static int pil_msa_mba_verify_blob(struct pil_desc *pil, phys_addr_t phy_addr,
+				   size_t size)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	s32 status;
+	u32 img_length = readl_relaxed(drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	/* Begin image authentication */
+	if (img_length == 0) {
+		writel_relaxed(phy_addr, drv->rmb_base + RMB_PMI_CODE_START);
+		writel_relaxed(CMD_LOAD_READY, drv->rmb_base + RMB_MBA_COMMAND);
+	}
+	/* Increment length counter */
+	img_length += size;
+	writel_relaxed(img_length, drv->rmb_base + RMB_PMI_CODE_LENGTH);
+
+	status = readl_relaxed(drv->rmb_base + RMB_MBA_STATUS);
+	if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d\n", status);
+		modem_log_rmb_regs(drv->rmb_base);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int pil_msa_mba_auth(struct pil_desc *pil)
+{
+	struct modem_data *drv = dev_get_drvdata(pil->dev);
+	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
+	int ret;
+	s32 status;
+	u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+
+	/* Wait for all segments to be authenticated or an error to occur */
+	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
+		status == STATUS_AUTH_COMPLETE || status < 0, 50, val);
+	if (ret) {
+		dev_err(pil->dev, "MBA authentication of image timed out(rc:%d)\n",
+									ret);
+	} else if (status < 0) {
+		dev_err(pil->dev, "MBA returned error %d for image\n", status);
+		ret = -EINVAL;
+	}
+
+	if (drv->q6) {
+		if (drv->q6->mba_dp_virt) {
+			/* Reclaim MBA and DP (if allocated) memory. */
+			if (pil->subsys_vmid > 0)
+				pil_assign_mem_to_linux(pil,
+					drv->q6->mba_dp_phys,
+					drv->q6->mba_dp_size);
+			dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+					drv->q6->mba_dp_virt,
+					drv->q6->mba_dp_phys, drv->attrs_dma);
+
+			drv->q6->mba_dp_virt = NULL;
+		}
+	}
+	if (ret)
+		modem_log_rmb_regs(drv->rmb_base);
+	if (q6_drv->ahb_clk_vote)
+		clk_disable_unprepare(q6_drv->ahb_clk);
+
+	return ret;
+}
+
+/*
+ * To be used only if self-auth is disabled, or if the
+ * MBA image is loaded as segments and not in init_image.
+ */
+struct pil_reset_ops pil_msa_mss_ops = {
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.auth_and_reset = pil_mss_reset,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if self-auth is enabled and the MBA is to be loaded
+ * in init_image and the modem headers are also to be authenticated
+ * in init_image. Modem segments authenticated in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_mss_ops_selfauth = {
+	.init_image = pil_msa_mss_reset_mba_load_auth_mdt,
+	.proxy_vote = pil_mss_make_proxy_votes,
+	.proxy_unvote = pil_mss_remove_proxy_votes,
+	.mem_setup = pil_mss_mem_setup,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+	.deinit_image = pil_mss_deinit_image,
+	.shutdown = pil_mss_shutdown,
+};
+
+/*
+ * To be used if the modem headers are to be authenticated
+ * in init_image, and the modem segments in auth_and_reset.
+ */
+struct pil_reset_ops pil_msa_femto_mba_ops = {
+	.init_image = pil_msa_auth_modem_mdt,
+	.verify_blob = pil_msa_mba_verify_blob,
+	.auth_and_reset = pil_msa_mba_auth,
+};
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
new file mode 100644
index 0000000..3af6368
--- /dev/null
+++ b/drivers/soc/qcom/pil-msa.h
@@ -0,0 +1,48 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_PIL_MSA_H
+#define __MSM_PIL_MSA_H
+
+#include <soc/qcom/subsystem_restart.h>
+
+#include "peripheral-loader.h"
+
+#define VDD_MSS_UV	1000000
+
+struct modem_data {
+	struct q6v5_data *q6;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	void *ramdump_dev;
+	bool crash_shutdown;
+	u32 pas_id;
+	bool ignore_errors;
+	struct completion stop_ack;
+	void __iomem *rmb_base;
+	struct clk *xo;
+	struct pil_desc desc;
+	struct device mba_mem_dev;
+	unsigned long attrs_dma;
+};
+
+extern struct pil_reset_ops pil_msa_mss_ops;
+extern struct pil_reset_ops pil_msa_mss_ops_selfauth;
+extern struct pil_reset_ops pil_msa_femto_mba_ops;
+
+int pil_mss_reset_load_mba(struct pil_desc *pil);
+int pil_mss_make_proxy_votes(struct pil_desc *pil);
+void pil_mss_remove_proxy_votes(struct pil_desc *pil);
+int pil_mss_shutdown(struct pil_desc *pil);
+int pil_mss_deinit_image(struct pil_desc *pil);
+int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path);
+#endif
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
new file mode 100644
index 0000000..9308b8d
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -0,0 +1,425 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_gpio.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/smem.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+#include "pil-msa.h"
+
+#define MAX_VDD_MSS_UV		1150000
+#define PROXY_TIMEOUT_MS	10000
+#define MAX_SSR_REASON_LEN	81U
+#define STOP_ACK_TIMEOUT_MS	1000
+
+#define subsys_to_drv(d) container_of(d, struct modem_data, subsys_desc)
+
+static void log_modem_sfr(void)
+{
+	u32 size;
+	char *smem_reason, reason[MAX_SSR_REASON_LEN];
+
+	smem_reason = smem_get_entry_no_rlock(SMEM_SSR_REASON_MSS0, &size, 0,
+							SMEM_ANY_HOST_FLAG);
+	if (!smem_reason || !size) {
+		pr_err("modem subsystem failure reason: (unknown, smem_get_entry_no_rlock failed).\n");
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("modem subsystem failure reason: (unknown, empty string found).\n");
+		return;
+	}
+
+	strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN));
+	pr_err("modem subsystem failure reason: %s.\n", reason);
+
+	smem_reason[0] = '\0';
+	wmb();
+}
+
+static void restart_modem(struct modem_data *drv)
+{
+	log_modem_sfr();
+	drv->ignore_errors = true;
+	subsystem_restart_dev(drv->subsys);
+}
+
+static irqreturn_t modem_err_fatal_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	/* Ignore if we're the one that set the force stop GPIO */
+	if (drv->crash_shutdown)
+		return IRQ_HANDLED;
+
+	pr_err("Fatal error on the modem.\n");
+	subsys_set_crash_status(drv->subsys, true);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t modem_stop_ack_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	pr_info("Received stop ack interrupt from modem\n");
+	complete(&drv->stop_ack);
+	return IRQ_HANDLED;
+}
+
+static int modem_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	unsigned long ret;
+
+	if (subsys->is_not_loadable)
+		return 0;
+
+	if (!subsys_get_crash_status(drv->subsys) && force_stop &&
+	    subsys->force_stop_gpio) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		ret = wait_for_completion_timeout(&drv->stop_ack,
+				msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+		if (!ret)
+			pr_warn("Timed out on stop ack from modem.\n");
+		gpio_set_value(subsys->force_stop_gpio, 0);
+	}
+
+	if (drv->subsys_desc.ramdump_disable_gpio) {
+		drv->subsys_desc.ramdump_disable = gpio_get_value(
+					drv->subsys_desc.ramdump_disable_gpio);
+		 pr_warn("Ramdump disable gpio value is %d\n",
+			drv->subsys_desc.ramdump_disable);
+	}
+
+	pil_shutdown(&drv->q6->desc);
+
+	return 0;
+}
+
+static int modem_powerup(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+
+	if (subsys->is_not_loadable)
+		return 0;
+	/*
+	 * At this time, the modem is shutdown. Therefore this function cannot
+	 * run concurrently with the watchdog bite error handler, making it safe
+	 * to unset the flag below.
+	 */
+	reinit_completion(&drv->stop_ack);
+	drv->subsys_desc.ramdump_disable = 0;
+	drv->ignore_errors = false;
+	drv->q6->desc.fw_name = subsys->fw_name;
+	return pil_boot(&drv->q6->desc);
+}
+
+static void modem_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+
+	drv->crash_shutdown = true;
+	if (!subsys_get_crash_status(drv->subsys) &&
+		subsys->force_stop_gpio) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		mdelay(STOP_ACK_TIMEOUT_MS);
+	}
+}
+
+static int modem_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct modem_data *drv = subsys_to_drv(subsys);
+	int ret;
+
+	if (!enable)
+		return 0;
+
+	ret = pil_mss_make_proxy_votes(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_mss_reset_load_mba(&drv->q6->desc);
+	if (ret)
+		return ret;
+
+	ret = pil_do_ramdump(&drv->q6->desc, drv->ramdump_dev);
+	if (ret < 0)
+		pr_err("Unable to dump modem fw memory (rc = %d).\n", ret);
+
+	ret = __pil_mss_deinit_image(&drv->q6->desc, false);
+	if (ret < 0)
+		pr_err("Unable to free up resources (rc = %d).\n", ret);
+
+	pil_mss_remove_proxy_votes(&drv->q6->desc);
+	return ret;
+}
+
+static irqreturn_t modem_wdog_bite_intr_handler(int irq, void *dev_id)
+{
+	struct modem_data *drv = subsys_to_drv(dev_id);
+
+	if (drv->ignore_errors)
+		return IRQ_HANDLED;
+
+	pr_err("Watchdog bite received from modem software!\n");
+	if (drv->subsys_desc.system_debug &&
+			!gpio_get_value(drv->subsys_desc.err_fatal_gpio))
+		panic("%s: System ramdump requested. Triggering device restart!\n",
+							__func__);
+	subsys_set_crash_status(drv->subsys, true);
+	restart_modem(drv);
+	return IRQ_HANDLED;
+}
+
+static int pil_subsys_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	int ret;
+
+	drv->subsys_desc.name = "modem";
+	drv->subsys_desc.dev = &pdev->dev;
+	drv->subsys_desc.owner = THIS_MODULE;
+	drv->subsys_desc.shutdown = modem_shutdown;
+	drv->subsys_desc.powerup = modem_powerup;
+	drv->subsys_desc.ramdump = modem_ramdump;
+	drv->subsys_desc.crash_shutdown = modem_crash_shutdown;
+	drv->subsys_desc.err_fatal_handler = modem_err_fatal_intr_handler;
+	drv->subsys_desc.stop_ack_handler = modem_stop_ack_intr_handler;
+	drv->subsys_desc.wdog_bite_handler = modem_wdog_bite_intr_handler;
+
+	drv->q6->desc.modem_ssr = false;
+	drv->subsys = subsys_register(&drv->subsys_desc);
+	if (IS_ERR(drv->subsys)) {
+		ret = PTR_ERR(drv->subsys);
+		goto err_subsys;
+	}
+
+	drv->ramdump_dev = create_ramdump_device("modem", &pdev->dev);
+	if (!drv->ramdump_dev) {
+		pr_err("%s: Unable to create a modem ramdump device.\n",
+			__func__);
+		ret = -ENOMEM;
+		goto err_ramdump;
+	}
+
+	return 0;
+
+err_ramdump:
+	subsys_unregister(drv->subsys);
+err_subsys:
+	return ret;
+}
+
+static int pil_mss_loadable_init(struct modem_data *drv,
+					struct platform_device *pdev)
+{
+	struct q6v5_data *q6;
+	struct pil_desc *q6_desc;
+	struct resource *res;
+	struct property *prop;
+	int ret;
+
+	q6 = pil_q6v5_init(pdev);
+	if (IS_ERR_OR_NULL(q6))
+		return PTR_ERR(q6);
+	drv->q6 = q6;
+	drv->xo = q6->xo;
+
+	q6_desc = &q6->desc;
+	q6_desc->owner = THIS_MODULE;
+	q6_desc->proxy_timeout = PROXY_TIMEOUT_MS;
+
+	q6_desc->ops = &pil_msa_mss_ops;
+
+	q6->self_auth = of_property_read_bool(pdev->dev.of_node,
+							"qcom,pil-self-auth");
+	if (q6->self_auth) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						    "rmb_base");
+		q6->rmb_base = devm_ioremap_resource(&pdev->dev, res);
+		if (!q6->rmb_base)
+			return -ENOMEM;
+		drv->rmb_base = q6->rmb_base;
+		q6_desc->ops = &pil_msa_mss_ops_selfauth;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "restart_reg");
+	if (!res) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"restart_reg_sec");
+		q6->restart_reg_sec = true;
+	}
+
+	q6->restart_reg = devm_ioremap(&pdev->dev,
+						res->start, resource_size(res));
+	if (!q6->restart_reg)
+		return -ENOMEM;
+
+	q6->vreg = NULL;
+
+	prop = of_find_property(pdev->dev.of_node, "vdd_mss-supply", NULL);
+	if (prop) {
+		q6->vreg = devm_regulator_get(&pdev->dev, "vdd_mss");
+		if (IS_ERR(q6->vreg))
+			return PTR_ERR(q6->vreg);
+
+		ret = regulator_set_voltage(q6->vreg, VDD_MSS_UV,
+						MAX_VDD_MSS_UV);
+		if (ret)
+			dev_err(&pdev->dev, "Failed to set vreg voltage(rc:%d)\n",
+									ret);
+
+		ret = regulator_set_load(q6->vreg, 100000);
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Failed to set vreg mode(rc:%d)\n",
+									ret);
+			return ret;
+		}
+	}
+
+	q6->vreg_mx = devm_regulator_get(&pdev->dev, "vdd_mx");
+	if (IS_ERR(q6->vreg_mx))
+		return PTR_ERR(q6->vreg_mx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_mx-uV", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_mx-uV property\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+		"cxrail_bhs_reg");
+	if (res)
+		q6->cxrail_bhs = devm_ioremap(&pdev->dev, res->start,
+					  resource_size(res));
+
+	q6->ahb_clk = devm_clk_get(&pdev->dev, "iface_clk");
+	if (IS_ERR(q6->ahb_clk))
+		return PTR_ERR(q6->ahb_clk);
+
+	q6->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
+	if (IS_ERR(q6->axi_clk))
+		return PTR_ERR(q6->axi_clk);
+
+	q6->rom_clk = devm_clk_get(&pdev->dev, "mem_clk");
+	if (IS_ERR(q6->rom_clk))
+		return PTR_ERR(q6->rom_clk);
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+					"qcom,pas-id", &drv->pas_id);
+	if (ret)
+		dev_info(&pdev->dev, "No pas_id found.\n");
+
+	drv->subsys_desc.pil_mss_memsetup =
+	of_property_read_bool(pdev->dev.of_node, "qcom,pil-mss-memsetup");
+
+	/* Optional. */
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "gpll0_mss_clk") >= 0)
+		q6->gpll0_mss_clk = devm_clk_get(&pdev->dev, "gpll0_mss_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "snoc_axi_clk") >= 0)
+		q6->snoc_axi_clk = devm_clk_get(&pdev->dev, "snoc_axi_clk");
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,active-clock-names", "mnoc_axi_clk") >= 0)
+		q6->mnoc_axi_clk = devm_clk_get(&pdev->dev, "mnoc_axi_clk");
+
+	ret = pil_desc_init(q6_desc);
+
+	return ret;
+}
+
+static int pil_mss_driver_probe(struct platform_device *pdev)
+{
+	struct modem_data *drv;
+	int ret, is_not_loadable;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, drv);
+
+	is_not_loadable = of_property_read_bool(pdev->dev.of_node,
+							"qcom,is-not-loadable");
+	if (is_not_loadable) {
+		drv->subsys_desc.is_not_loadable = 1;
+	} else {
+		ret = pil_mss_loadable_init(drv, pdev);
+		if (ret)
+			return ret;
+	}
+	init_completion(&drv->stop_ack);
+
+	return pil_subsys_init(drv, pdev);
+}
+
+static int pil_mss_driver_exit(struct platform_device *pdev)
+{
+	struct modem_data *drv = platform_get_drvdata(pdev);
+
+	subsys_unregister(drv->subsys);
+	destroy_ramdump_device(drv->ramdump_dev);
+	pil_desc_release(&drv->q6->desc);
+	return 0;
+}
+
+static const struct of_device_id mss_match_table[] = {
+	{ .compatible = "qcom,pil-q6v5-mss" },
+	{ .compatible = "qcom,pil-q6v55-mss" },
+	{ .compatible = "qcom,pil-q6v56-mss" },
+	{}
+};
+
+static struct platform_driver pil_mss_driver = {
+	.probe = pil_mss_driver_probe,
+	.remove = pil_mss_driver_exit,
+	.driver = {
+		.name = "pil-q6v5-mss",
+		.of_match_table = mss_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pil_mss_init(void)
+{
+	return platform_driver_register(&pil_mss_driver);
+}
+module_init(pil_mss_init);
+
+static void __exit pil_mss_exit(void)
+{
+	platform_driver_unregister(&pil_mss_driver);
+}
+module_exit(pil_mss_exit);
+
+MODULE_DESCRIPTION("Support for booting modem subsystems with QDSP6v5 Hexagon processors");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
new file mode 100644
index 0000000..ee528f8
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -0,0 +1,796 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include "peripheral-loader.h"
+#include "pil-q6v5.h"
+
+/* QDSP6SS Register Offsets */
+#define QDSP6SS_RESET			0x014
+#define QDSP6SS_GFMUX_CTL		0x020
+#define QDSP6SS_PWR_CTL			0x030
+#define QDSP6V6SS_MEM_PWR_CTL		0x034
+#define QDSP6SS_BHS_STATUS		0x078
+#define QDSP6SS_MEM_PWR_CTL		0x0B0
+#define QDSP6SS_STRAP_ACC		0x110
+#define QDSP6V62SS_BHS_STATUS		0x0C4
+
+/* AXI Halt Register Offsets */
+#define AXI_HALTREQ			0x0
+#define AXI_HALTACK			0x4
+#define AXI_IDLE			0x8
+
+#define HALT_ACK_TIMEOUT_US		100000
+
+/* QDSP6SS_RESET */
+#define Q6SS_STOP_CORE			BIT(0)
+#define Q6SS_CORE_ARES			BIT(1)
+#define Q6SS_BUS_ARES_ENA		BIT(2)
+
+/* QDSP6SS_GFMUX_CTL */
+#define Q6SS_CLK_ENA			BIT(1)
+#define Q6SS_CLK_SRC_SEL_C		BIT(3)
+#define Q6SS_CLK_SRC_SEL_FIELD		0xC
+#define Q6SS_CLK_SRC_SWITCH_CLK_OVR	BIT(8)
+
+/* QDSP6SS_PWR_CTL */
+#define Q6SS_L2DATA_SLP_NRET_N_0	BIT(0)
+#define Q6SS_L2DATA_SLP_NRET_N_1	BIT(1)
+#define Q6SS_L2DATA_SLP_NRET_N_2	BIT(2)
+#define Q6SS_L2TAG_SLP_NRET_N		BIT(16)
+#define Q6SS_ETB_SLP_NRET_N		BIT(17)
+#define Q6SS_L2DATA_STBY_N		BIT(18)
+#define Q6SS_SLP_RET_N			BIT(19)
+#define Q6SS_CLAMP_IO			BIT(20)
+#define QDSS_BHS_ON			BIT(21)
+#define QDSS_LDO_BYP			BIT(22)
+
+/* QDSP6v55 parameters */
+#define QDSP6v55_LDO_ON                 BIT(26)
+#define QDSP6v55_LDO_BYP                BIT(25)
+#define QDSP6v55_BHS_ON                 BIT(24)
+#define QDSP6v55_CLAMP_WL               BIT(21)
+#define QDSP6v55_CLAMP_QMC_MEM          BIT(22)
+#define L1IU_SLP_NRET_N                 BIT(15)
+#define L1DU_SLP_NRET_N                 BIT(14)
+#define L2PLRU_SLP_NRET_N               BIT(13)
+#define QDSP6v55_BHS_EN_REST_ACK        BIT(0)
+
+#define HALT_CHECK_MAX_LOOPS            (200)
+#define BHS_CHECK_MAX_LOOPS             (200)
+#define QDSP6SS_XO_CBCR                 (0x0038)
+
+/* QDSP6v65 parameters */
+#define QDSP6SS_BOOT_CORE_START		(0x400)
+#define QDSP6SS_BOOT_CMD		(0x404)
+#define QDSP6SS_BOOT_STATUS		(0x408)
+#define QDSP6SS_SLEEP			(0x3C)
+#define SLEEP_CHECK_MAX_LOOPS		(200)
+#define BOOT_FSM_TIMEOUT		(10)
+
+#define QDSP6SS_ACC_OVERRIDE_VAL	0x20
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil)
+{
+	int ret;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+								ret);
+		return ret;
+	}
+
+	ret = clk_prepare_enable(drv->xo);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for XO(rc:%d)\n", ret);
+		goto out;
+	}
+
+	ret = clk_prepare_enable(drv->pnoc_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for pnoc(rc:%d)\n", ret);
+		goto err_pnoc_vote;
+	}
+
+	ret = clk_prepare_enable(drv->qdss_clk);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for qdss(rc:%d)\n", ret);
+		goto err_qdss_vote;
+	}
+
+	ret = regulator_set_voltage(drv->vreg_cx, uv, INT_MAX);
+	if (ret) {
+		dev_err(pil->dev, "Failed to request vdd_cx voltage(rc:%d)\n",
+								ret);
+		goto err_cx_voltage;
+	}
+
+	ret = regulator_set_load(drv->vreg_cx, 100000);
+	if (ret < 0) {
+		dev_err(pil->dev, "Failed to set vdd_cx mode(rc:%d)\n", ret);
+		goto err_cx_mode;
+	}
+
+	ret = regulator_enable(drv->vreg_cx);
+	if (ret) {
+		dev_err(pil->dev, "Failed to vote for vdd_cx(rc:%d)\n", ret);
+		goto err_cx_enable;
+	}
+
+	if (drv->vreg_pll) {
+		ret = regulator_enable(drv->vreg_pll);
+		if (ret) {
+			dev_err(pil->dev, "Failed to vote for vdd_pll(rc:%d)\n",
+									ret);
+			goto err_vreg_pll;
+		}
+	}
+
+	return 0;
+
+err_vreg_pll:
+	regulator_disable(drv->vreg_cx);
+err_cx_enable:
+	regulator_set_load(drv->vreg_cx, 0);
+err_cx_mode:
+	regulator_set_voltage(drv->vreg_cx, 0, uv);
+err_cx_voltage:
+	clk_disable_unprepare(drv->qdss_clk);
+err_qdss_vote:
+	clk_disable_unprepare(drv->pnoc_clk);
+err_pnoc_vote:
+	clk_disable_unprepare(drv->xo);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(pil_q6v5_make_proxy_votes);
+
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	int uv, ret = 0;
+
+	ret = of_property_read_u32(pil->dev->of_node, "vdd_cx-voltage", &uv);
+	if (ret) {
+		dev_err(pil->dev, "missing vdd_cx-voltage property(rc:%d)\n",
+									ret);
+		return;
+	}
+
+	if (drv->vreg_pll) {
+		regulator_disable(drv->vreg_pll);
+		regulator_set_load(drv->vreg_pll, 0);
+	}
+	regulator_disable(drv->vreg_cx);
+	regulator_set_load(drv->vreg_cx, 0);
+	regulator_set_voltage(drv->vreg_cx, 0, uv);
+	clk_disable_unprepare(drv->xo);
+	clk_disable_unprepare(drv->pnoc_clk);
+	clk_disable_unprepare(drv->qdss_clk);
+}
+EXPORT_SYMBOL(pil_q6v5_remove_proxy_votes);
+
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base)
+{
+	int ret;
+	u32 status;
+
+	/* Assert halt request */
+	writel_relaxed(1, halt_base + AXI_HALTREQ);
+
+	/* Wait for halt */
+	ret = readl_poll_timeout(halt_base + AXI_HALTACK,
+		status, status != 0, 50, HALT_ACK_TIMEOUT_US);
+	if (ret)
+		dev_warn(pil->dev, "Port %p halt timeout\n", halt_base);
+	else if (!readl_relaxed(halt_base + AXI_IDLE))
+		dev_warn(pil->dev, "Port %p halt failed\n", halt_base);
+
+	/* Clear halt request (port will remain halted until reset) */
+	writel_relaxed(0, halt_base + AXI_HALTREQ);
+}
+EXPORT_SYMBOL(pil_q6v5_halt_axi_port);
+
+void assert_clamps(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/*
+	 * Assert QDSP6 I/O clamp, memory wordline clamp, and compiler memory
+	 * clamp as a software workaround to avoid high MX current during
+	 * LPASS/MSS restart.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= (Q6SS_CLAMP_IO | QDSP6v55_CLAMP_WL |
+			QDSP6v55_CLAMP_QMC_MEM);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	/* To make sure asserting clamps is done before MSS restart*/
+	mb();
+}
+
+static void __pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	u32 val;
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	/* Turn off core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val &= ~Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Clamp IO */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Turn off Q6 memories */
+	val &= ~(Q6SS_L2DATA_SLP_NRET_N_0 | Q6SS_L2DATA_SLP_NRET_N_1 |
+		 Q6SS_L2DATA_SLP_NRET_N_2 | Q6SS_SLP_RET_N |
+		 Q6SS_L2TAG_SLP_NRET_N | Q6SS_ETB_SLP_NRET_N |
+		 Q6SS_L2DATA_STBY_N);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Assert Q6 resets */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Kill power at block headswitch */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSS_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+}
+
+void pil_q6v5_shutdown(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+	if (drv->qdsp6v55) {
+		/* Subsystem driver expected to halt bus and assert reset */
+		return;
+	}
+	__pil_q6v5_shutdown(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_shutdown);
+
+static int __pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSS_BHS_ON | QDSS_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	mb();
+	udelay(1);
+
+	/*
+	 * Turn on memories. L2 banks should be done individually
+	 * to minimize inrush current.
+	 */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
+	       Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_2;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_1;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= Q6SS_L2DATA_SLP_NRET_N_0;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_CORE_ARES;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+
+	/* Need a different clock source for v5.2.0 */
+	if (drv->qdsp6v5_2_0) {
+		val &= ~Q6SS_CLK_SRC_SEL_FIELD;
+		val |= Q6SS_CLK_SRC_SEL_C;
+	}
+
+	/* force clock on during source switch */
+	if (drv->qdsp6v56)
+		val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;
+
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	/* Start core execution */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~Q6SS_STOP_CORE;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	return 0;
+}
+
+static int q6v55_branch_clk_enable(struct q6v5_data *drv)
+{
+	u32 val, count;
+	void __iomem *cbcr_reg = drv->reg_base + QDSP6SS_XO_CBCR;
+
+	val = readl_relaxed(cbcr_reg);
+	val |= 0x1;
+	writel_relaxed(val, cbcr_reg);
+
+	for (count = HALT_CHECK_MAX_LOOPS; count > 0; count--) {
+		val = readl_relaxed(cbcr_reg);
+		if (!(val & BIT(31)))
+			return 0;
+		udelay(1);
+	}
+
+	dev_err(drv->desc.dev, "Failed to enable xo branch clock.\n");
+	return -EINVAL;
+}
+
+static int __pil_q6v65_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val, count;
+	unsigned long timeout;
+
+	val = readl_relaxed(drv->reg_base + QDSP6SS_SLEEP);
+	val |= 0x1;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_SLEEP);
+	for (count = SLEEP_CHECK_MAX_LOOPS; count > 0; count--) {
+		val = readl_relaxed(drv->reg_base + QDSP6SS_SLEEP);
+		if (!(val & BIT(31)))
+			break;
+		udelay(1);
+	}
+
+	if (!count) {
+		dev_err(drv->desc.dev, "Sleep clock did not come on in time\n");
+		return -ETIMEDOUT;
+	}
+
+	/* De-assert QDSP6 stop core */
+	writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CORE_START);
+	/* De-assert stop core before starting boot FSM */
+	mb();
+	/* Trigger boot FSM */
+	writel_relaxed(1, drv->reg_base + QDSP6SS_BOOT_CMD);
+
+	/* Wait for boot FSM to complete */
+	timeout = jiffies + usecs_to_jiffies(BOOT_FSM_TIMEOUT);
+	while (time_before(jiffies, timeout)) {
+		val = readl_relaxed(drv->reg_base + QDSP6SS_BOOT_STATUS);
+		if (val & BIT(0))
+			return 0;
+	}
+
+	dev_err(drv->desc.dev, "Boot FSM failed to complete.\n");
+	return -ETIMEDOUT;
+}
+
+static int __pil_q6v55_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+	u32 val;
+	int i;
+
+	/* Override the ACC value if required */
+	if (drv->override_acc)
+		writel_relaxed(QDSP6SS_ACC_OVERRIDE_VAL,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Override the ACC value with input value */
+	if (!of_property_read_u32(pil->dev->of_node, "qcom,override-acc-1",
+				&drv->override_acc_1))
+		writel_relaxed(drv->override_acc_1,
+				drv->reg_base + QDSP6SS_STRAP_ACC);
+
+	/* Assert resets, stop core */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* BHS require xo cbcr to be enabled */
+	i = q6v55_branch_clk_enable(drv);
+	if (i)
+		return i;
+
+	/* Enable power block headswitch, and wait for it to stabilize */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val |= QDSP6v55_BHS_ON;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+	mb();
+	udelay(1);
+
+	if (drv->qdsp6v62_1_2 || drv->qdsp6v62_1_5) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6V62SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	if (drv->qdsp6v61_1_1) {
+		for (i = BHS_CHECK_MAX_LOOPS; i > 0; i--) {
+			if (readl_relaxed(drv->reg_base + QDSP6SS_BHS_STATUS)
+			    & QDSP6v55_BHS_EN_REST_ACK)
+				break;
+			udelay(1);
+		}
+		if (!i) {
+			pr_err("%s: BHS_EN_REST_ACK not set!\n", __func__);
+			return -ETIMEDOUT;
+		}
+	}
+
+	/* Put LDO in bypass mode */
+	val |= QDSP6v55_LDO_BYP;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	if (drv->qdsp6v56_1_3) {
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2 and ETB memories 1 at a time */
+		for (i = 17; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_5 || drv->qdsp6v56_1_8
+					|| drv->qdsp6v56_1_10) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			val |= readl_relaxed(drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v56_1_8_inrush_current) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_MEM_PWR_CTL);
+		for (i = 19; i >= 6; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+
+		for (i = 0 ; i <= 5 ; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+						QDSP6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else if (drv->qdsp6v61_1_1 || drv->qdsp6v62_1_2 ||
+						drv->qdsp6v62_1_5) {
+		/* Deassert QDSP6 compiler memory clamp */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val &= ~QDSP6v55_CLAMP_QMC_MEM;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Deassert memory peripheral sleep and L2 memory standby */
+		val |= (Q6SS_L2DATA_STBY_N | Q6SS_SLP_RET_N);
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L1, L2, ETB and JU memories 1 at a time */
+		val = readl_relaxed(drv->reg_base +
+				QDSP6V6SS_MEM_PWR_CTL);
+
+		if (drv->qdsp6v62_1_5)
+			i = 29;
+		else
+			i = 28;
+
+		for ( ; i >= 0; i--) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base +
+					QDSP6V6SS_MEM_PWR_CTL);
+			/*
+			 * Wait for 1us for both memory peripheral and
+			 * data array to turn on.
+			 */
+			udelay(1);
+		}
+	} else {
+		/* Turn on memories. */
+		val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+		val |= 0xFFF00;
+		writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+		/* Turn on L2 banks 1 at a time */
+		for (i = 0; i <= 7; i++) {
+			val |= BIT(i);
+			writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+		}
+	}
+
+	/* Remove word line clamp */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_PWR_CTL);
+	val &= ~QDSP6v55_CLAMP_WL;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Remove IO clamp */
+	val &= ~Q6SS_CLAMP_IO;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_PWR_CTL);
+
+	/* Bring core out of reset */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_RESET);
+	val &= ~(Q6SS_CORE_ARES | Q6SS_STOP_CORE);
+	writel_relaxed(val, drv->reg_base + QDSP6SS_RESET);
+
+	/* Turn on core clock */
+	val = readl_relaxed(drv->reg_base + QDSP6SS_GFMUX_CTL);
+	val |= Q6SS_CLK_ENA;
+	writel_relaxed(val, drv->reg_base + QDSP6SS_GFMUX_CTL);
+
+	return 0;
+}
+
+int pil_q6v5_reset(struct pil_desc *pil)
+{
+	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
+
+
+	if (drv->qdsp6v65_1_0)
+		return __pil_q6v65_reset(pil);
+	else if (drv->qdsp6v55)
+		return __pil_q6v55_reset(pil);
+	else
+		return __pil_q6v5_reset(pil);
+}
+EXPORT_SYMBOL(pil_q6v5_reset);
+
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev)
+{
+	struct q6v5_data *drv;
+	struct resource *res;
+	struct pil_desc *desc;
+	struct property *prop;
+	int ret, vdd_pll;
+
+	drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+	if (!drv)
+		return ERR_PTR(-ENOMEM);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
+	drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+	if (!drv->reg_base)
+		return ERR_PTR(-ENOMEM);
+
+	desc = &drv->desc;
+	ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+				      &desc->name);
+	if (ret)
+		return ERR_PTR(ret);
+
+	desc->dev = &pdev->dev;
+
+	drv->qdsp6v5_2_0 = of_device_is_compatible(pdev->dev.of_node,
+						   "qcom,pil-femto-modem");
+
+	if (drv->qdsp6v5_2_0)
+		return drv;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "halt_base");
+	if (res) {
+		drv->axi_halt_base = devm_ioremap(&pdev->dev, res->start,
+							resource_size(res));
+		if (!drv->axi_halt_base) {
+			dev_err(&pdev->dev, "Failed to map axi_halt_base.\n");
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	if (!drv->axi_halt_base) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_q6");
+		if (res) {
+			drv->axi_halt_q6 = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_q6) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_q6.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_modem");
+		if (res) {
+			drv->axi_halt_mss = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_mss) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_mss.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+								"halt_nc");
+		if (res) {
+			drv->axi_halt_nc = devm_ioremap(&pdev->dev,
+					res->start, resource_size(res));
+			if (!drv->axi_halt_nc) {
+				dev_err(&pdev->dev, "Failed to map axi_halt_nc.\n");
+				return ERR_PTR(-ENOMEM);
+			}
+		}
+	}
+
+	if (!(drv->axi_halt_base || (drv->axi_halt_q6 && drv->axi_halt_mss
+					&& drv->axi_halt_nc))) {
+		dev_err(&pdev->dev, "halt bases for Q6 are not defined.\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	drv->qdsp6v55 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v55-mss");
+	drv->qdsp6v56 = of_device_is_compatible(pdev->dev.of_node,
+						"qcom,pil-q6v56-mss");
+
+	drv->qdsp6v56_1_3 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-3");
+	drv->qdsp6v56_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-5");
+
+	drv->qdsp6v56_1_8 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8");
+	drv->qdsp6v56_1_10 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v56-1-10");
+
+	drv->qdsp6v56_1_8_inrush_current = of_property_read_bool(
+						pdev->dev.of_node,
+						"qcom,qdsp6v56-1-8-inrush-current");
+
+	drv->qdsp6v61_1_1 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v61-1-1");
+
+	drv->qdsp6v62_1_2 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-2");
+
+	drv->qdsp6v62_1_5 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v62-1-5");
+
+	drv->qdsp6v65_1_0 = of_property_read_bool(pdev->dev.of_node,
+						"qcom,qdsp6v65-1-0");
+
+	drv->non_elf_image = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mba-image-is-not-elf");
+
+	drv->override_acc = of_property_read_bool(pdev->dev.of_node,
+						"qcom,override-acc");
+
+	drv->ahb_clk_vote = of_property_read_bool(pdev->dev.of_node,
+						"qcom,ahb-clk-vote");
+	drv->mx_spike_wa = of_property_read_bool(pdev->dev.of_node,
+						"qcom,mx-spike-wa");
+
+	drv->xo = devm_clk_get(&pdev->dev, "xo");
+	if (IS_ERR(drv->xo))
+		return ERR_CAST(drv->xo);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,pnoc-clk-vote")) {
+		drv->pnoc_clk = devm_clk_get(&pdev->dev, "pnoc_clk");
+		if (IS_ERR(drv->pnoc_clk))
+			return ERR_CAST(drv->pnoc_clk);
+	} else {
+		drv->pnoc_clk = NULL;
+	}
+
+	if (of_property_match_string(pdev->dev.of_node,
+			"qcom,proxy-clock-names", "qdss_clk") >= 0) {
+		drv->qdss_clk = devm_clk_get(&pdev->dev, "qdss_clk");
+		if (IS_ERR(drv->qdss_clk))
+			return ERR_CAST(drv->qdss_clk);
+	} else {
+		drv->qdss_clk = NULL;
+	}
+
+	drv->vreg_cx = devm_regulator_get(&pdev->dev, "vdd_cx");
+	if (IS_ERR(drv->vreg_cx))
+		return ERR_CAST(drv->vreg_cx);
+	prop = of_find_property(pdev->dev.of_node, "vdd_cx-voltage", NULL);
+	if (!prop) {
+		dev_err(&pdev->dev, "Missing vdd_cx-voltage property\n");
+		return ERR_CAST(prop);
+	}
+
+	ret = of_property_read_u32(pdev->dev.of_node, "qcom,vdd_pll",
+		&vdd_pll);
+	if (!ret) {
+		drv->vreg_pll = devm_regulator_get(&pdev->dev, "vdd_pll");
+		if (!IS_ERR_OR_NULL(drv->vreg_pll)) {
+			ret = regulator_set_voltage(drv->vreg_pll, vdd_pll,
+							vdd_pll);
+			if (ret) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll voltage(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+
+			ret = regulator_set_load(drv->vreg_pll, 10000);
+			if (ret < 0) {
+				dev_err(&pdev->dev, "Failed to set vdd_pll mode(rc:%d)\n",
+									ret);
+				return ERR_PTR(ret);
+			}
+		} else
+			drv->vreg_pll = NULL;
+	}
+
+	return drv;
+}
+EXPORT_SYMBOL(pil_q6v5_init);
diff --git a/drivers/soc/qcom/pil-q6v5.h b/drivers/soc/qcom/pil-q6v5.h
new file mode 100644
index 0000000..7f7bb97
--- /dev/null
+++ b/drivers/soc/qcom/pil-q6v5.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_PIL_Q6V5_H
+#define __MSM_PIL_Q6V5_H
+
+#include "peripheral-loader.h"
+
+struct regulator;
+struct clk;
+struct pil_device;
+struct platform_device;
+
+struct q6v5_data {
+	void __iomem *reg_base;
+	void __iomem *rmb_base;
+	void __iomem *cxrail_bhs;  /* External BHS register */
+	struct clk *xo;		   /* XO clock source */
+	struct clk *pnoc_clk;	   /* PNOC bus clock source */
+	struct clk *ahb_clk;	   /* PIL access to registers */
+	struct clk *axi_clk;	   /* CPU access to memory */
+	struct clk *core_clk;	   /* CPU core */
+	struct clk *reg_clk;	   /* CPU access registers */
+	struct clk *gpll0_mss_clk; /* GPLL0 to MSS connection */
+	struct clk *rom_clk;	   /* Boot ROM */
+	struct clk *snoc_axi_clk;
+	struct clk *mnoc_axi_clk;
+	struct clk *qdss_clk;
+	void __iomem *axi_halt_base; /* Halt base of q6, mss,
+				      * nc are in same 4K page
+				      */
+	void __iomem *axi_halt_q6;
+	void __iomem *axi_halt_mss;
+	void __iomem *axi_halt_nc;
+	void __iomem *restart_reg;
+	struct regulator *vreg;
+	struct regulator *vreg_cx;
+	struct regulator *vreg_mx;
+	struct regulator *vreg_pll;
+	bool is_booted;
+	struct pil_desc desc;
+	bool self_auth;
+	phys_addr_t mba_dp_phys;
+	void *mba_dp_virt;
+	size_t mba_dp_size;
+	size_t dp_size;
+	bool qdsp6v55;
+	bool qdsp6v5_2_0;
+	bool qdsp6v56;
+	bool qdsp6v56_1_3;
+	bool qdsp6v56_1_5;
+	bool qdsp6v56_1_8;
+	bool qdsp6v56_1_8_inrush_current;
+	bool qdsp6v56_1_10;
+	bool qdsp6v61_1_1;
+	bool qdsp6v62_1_2;
+	bool qdsp6v62_1_5;
+	bool qdsp6v65_1_0;
+	bool non_elf_image;
+	bool restart_reg_sec;
+	bool override_acc;
+	int override_acc_1;
+	bool ahb_clk_vote;
+	bool mx_spike_wa;
+};
+
+int pil_q6v5_make_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_remove_proxy_votes(struct pil_desc *pil);
+void pil_q6v5_halt_axi_port(struct pil_desc *pil, void __iomem *halt_base);
+void pil_q6v5_shutdown(struct pil_desc *pil);
+int pil_q6v5_reset(struct pil_desc *pil);
+void assert_clamps(struct pil_desc *pil);
+struct q6v5_data *pil_q6v5_init(struct platform_device *pdev);
+
+#endif
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
new file mode 100644
index 0000000..9c3f9431
--- /dev/null
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -0,0 +1,2232 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+#include <linux/qmi_encdec.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+#include <linux/hashtable.h>
+#include <linux/ipc_router.h>
+#include <linux/ipc_logging.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "qmi_interface_priv.h"
+
+#define BUILD_INSTANCE_ID(vers, ins) (((vers) & 0xFF) | (((ins) & 0xFF) << 8))
+#define LOOKUP_MASK 0xFFFFFFFF
+#define MAX_WQ_NAME_LEN 20
+#define QMI_REQ_RESP_LOG_PAGES 3
+#define QMI_IND_LOG_PAGES 2
+#define QMI_REQ_RESP_LOG(buf...) \
+do { \
+	if (qmi_req_resp_log_ctx) { \
+		ipc_log_string(qmi_req_resp_log_ctx, buf); \
+	} \
+} while (0) \
+
+#define QMI_IND_LOG(buf...) \
+do { \
+	if (qmi_ind_log_ctx) { \
+		ipc_log_string(qmi_ind_log_ctx, buf); \
+	} \
+} while (0) \
+
+static LIST_HEAD(svc_event_nb_list);
+static DEFINE_MUTEX(svc_event_nb_list_lock);
+
+struct qmi_notify_event_work {
+	unsigned int event;
+	void *oob_data;
+	size_t oob_data_len;
+	void *priv;
+	struct work_struct work;
+};
+static void qmi_notify_event_worker(struct work_struct *work);
+
+#define HANDLE_HASH_TBL_SZ 1
+static DEFINE_HASHTABLE(handle_hash_tbl, HANDLE_HASH_TBL_SZ);
+static DEFINE_MUTEX(handle_hash_tbl_lock);
+
+struct elem_info qmi_response_type_v01_ei[] = {
+	{
+		.data_type	= QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len	= 1,
+		.elem_size	= sizeof(uint16_t),
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= offsetof(struct qmi_response_type_v01,
+					   result),
+		.ei_array	= NULL,
+	},
+	{
+		.data_type      = QMI_SIGNED_2_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint16_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+		.offset         = offsetof(struct qmi_response_type_v01,
+					   error),
+		.ei_array       = NULL,
+	},
+	{
+		.data_type	= QMI_EOTI,
+		.elem_len	= 0,
+		.elem_size	= 0,
+		.is_array	= NO_ARRAY,
+		.tlv_type	= QMI_COMMON_TLV_TYPE,
+		.offset		= 0,
+		.ei_array	= NULL,
+	},
+};
+
+struct elem_info qmi_error_resp_type_v01_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = 0,
+		.ei_array  = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type = QMI_EOTI,
+		.elem_len  = 0,
+		.elem_size = 0,
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x00,
+		.offset    = 0,
+		.ei_array  = NULL,
+	},
+};
+
+struct msg_desc err_resp_desc = {
+	.max_msg_len = 7,
+	.msg_id = 0,
+	.ei_array = qmi_error_resp_type_v01_ei,
+};
+
+static DEFINE_MUTEX(qmi_svc_event_notifier_lock);
+static struct msm_ipc_port *qmi_svc_event_notifier_port;
+static struct workqueue_struct *qmi_svc_event_notifier_wq;
+static void qmi_svc_event_notifier_init(void);
+static void qmi_svc_event_worker(struct work_struct *work);
+static struct svc_event_nb *find_svc_event_nb(uint32_t service_id,
+					      uint32_t instance_id);
+DECLARE_WORK(qmi_svc_event_work, qmi_svc_event_worker);
+static void svc_resume_tx_worker(struct work_struct *work);
+static void clean_txn_info(struct qmi_handle *handle);
+static void *qmi_req_resp_log_ctx;
+static void *qmi_ind_log_ctx;
+
+/**
+ * qmi_log() - Pass log data to IPC logging framework
+ * @handle:	The pointer to the qmi_handle
+ * @cntl_flg:	Indicates the type(request/response/indications) of the message
+ * @txn_id:	Transaction ID of the message.
+ * @msg_id:	Message ID of the incoming/outgoing message.
+ * @msg_len:	Total size of the message.
+ *
+ * This function builds the data the would be passed on to the IPC logging
+ * framework. The data that would be passed corresponds to the information
+ * that is exchanged between the IPC Router and kernel modules during
+ * request/response/indication transactions.
+ */
+
+static void qmi_log(struct qmi_handle *handle,
+			unsigned char cntl_flag, uint16_t txn_id,
+			uint16_t msg_id, uint16_t msg_len)
+{
+	uint32_t service_id = 0;
+	const char *ops_type = NULL;
+
+	if (handle->handle_type == QMI_CLIENT_HANDLE) {
+		service_id = handle->dest_service_id;
+		if (cntl_flag == QMI_REQUEST_CONTROL_FLAG)
+			ops_type = "TX";
+		else if (cntl_flag == QMI_INDICATION_CONTROL_FLAG ||
+			cntl_flag == QMI_RESPONSE_CONTROL_FLAG)
+			ops_type = "RX";
+	} else if (handle->handle_type == QMI_SERVICE_HANDLE) {
+		service_id = handle->svc_ops_options->service_id;
+		if (cntl_flag == QMI_REQUEST_CONTROL_FLAG)
+			ops_type = "RX";
+		else if (cntl_flag == QMI_INDICATION_CONTROL_FLAG ||
+			cntl_flag == QMI_RESPONSE_CONTROL_FLAG)
+			ops_type = "TX";
+	}
+
+	/*
+	 * IPC Logging format is as below:-
+	 * <Type of module>(CLNT or  SERV)	:
+	 * <Opertaion Type> (Transmit/ RECV)	:
+	 * <Control Flag> (Req/Resp/Ind)	:
+	 * <Transaction ID>			:
+	 * <Message ID>				:
+	 * <Message Length>			:
+	 * <Service ID>				:
+	 */
+	if (qmi_req_resp_log_ctx &&
+		((cntl_flag == QMI_REQUEST_CONTROL_FLAG) ||
+		(cntl_flag == QMI_RESPONSE_CONTROL_FLAG))) {
+		QMI_REQ_RESP_LOG("%s %s CF:%x TI:%x MI:%x ML:%x SvcId: %x",
+		(handle->handle_type == QMI_CLIENT_HANDLE ? "QCCI" : "QCSI"),
+		ops_type, cntl_flag, txn_id, msg_id, msg_len, service_id);
+	} else if (qmi_ind_log_ctx &&
+		(cntl_flag == QMI_INDICATION_CONTROL_FLAG)) {
+		QMI_IND_LOG("%s %s CF:%x TI:%x MI:%x ML:%x SvcId: %x",
+		(handle->handle_type == QMI_CLIENT_HANDLE ? "QCCI" : "QCSI"),
+		ops_type, cntl_flag, txn_id, msg_id, msg_len, service_id);
+	}
+}
+
+/**
+ * add_req_handle() - Create and Add a request handle to the connection
+ * @conn_h: Connection handle over which the request has arrived.
+ * @msg_id: Message ID of the request.
+ * @txn_id: Transaction ID of the request.
+ *
+ * @return: Pointer to request handle on success, NULL on error.
+ *
+ * This function creates a request handle to track the request that arrives
+ * on a connection. This function then adds it to the connection's request
+ * handle list.
+ */
+static struct req_handle *add_req_handle(struct qmi_svc_clnt_conn *conn_h,
+					 uint16_t msg_id, uint16_t txn_id)
+{
+	struct req_handle *req_h;
+
+	req_h = kmalloc(sizeof(struct req_handle), GFP_KERNEL);
+	if (!req_h)
+		return NULL;
+
+	req_h->conn_h = conn_h;
+	req_h->msg_id = msg_id;
+	req_h->txn_id = txn_id;
+	list_add_tail(&req_h->list, &conn_h->req_handle_list);
+	return req_h;
+}
+
+/**
+ * verify_req_handle() - Verify the validity of a request handle
+ * @conn_h: Connection handle over which the request has arrived.
+ * @req_h: Request handle to be verified.
+ *
+ * @return: true on success, false on failure.
+ *
+ * This function is used to check if the request handle is present in
+ * the connection handle.
+ */
+static bool verify_req_handle(struct qmi_svc_clnt_conn *conn_h,
+			      struct req_handle *req_h)
+{
+	struct req_handle *temp_req_h;
+
+	list_for_each_entry(temp_req_h, &conn_h->req_handle_list, list) {
+		if (temp_req_h == req_h)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * rmv_req_handle() - Remove and destroy the request handle
+ * @req_h: Request handle to be removed and destroyed.
+ *
+ * @return: 0.
+ */
+static int rmv_req_handle(struct req_handle *req_h)
+{
+	list_del(&req_h->list);
+	kfree(req_h);
+	return 0;
+}
+
+/**
+ * add_svc_clnt_conn() - Create and add a connection handle to a service
+ * @handle: QMI handle in which the service is hosted.
+ * @clnt_addr: Address of the client connecting with the service.
+ * @clnt_addr_len: Length of the client address.
+ *
+ * @return: Pointer to connection handle on success, NULL on error.
+ *
+ * This function is used to create a connection handle that binds the service
+ * with a client. This function is called on a service's QMI handle when a
+ * client sends its first message to the service.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static struct qmi_svc_clnt_conn *add_svc_clnt_conn(
+	struct qmi_handle *handle, void *clnt_addr, size_t clnt_addr_len)
+{
+	struct qmi_svc_clnt_conn *conn_h;
+
+	conn_h = kmalloc(sizeof(struct qmi_svc_clnt_conn), GFP_KERNEL);
+	if (!conn_h)
+		return NULL;
+
+	conn_h->clnt_addr = kmalloc(clnt_addr_len, GFP_KERNEL);
+	if (!conn_h->clnt_addr) {
+		kfree(conn_h);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&conn_h->list);
+	conn_h->svc_handle = handle;
+	memcpy(conn_h->clnt_addr, clnt_addr, clnt_addr_len);
+	conn_h->clnt_addr_len = clnt_addr_len;
+	INIT_LIST_HEAD(&conn_h->req_handle_list);
+	INIT_DELAYED_WORK(&conn_h->resume_tx_work, svc_resume_tx_worker);
+	INIT_LIST_HEAD(&conn_h->pending_txn_list);
+	mutex_init(&conn_h->pending_txn_lock);
+	list_add_tail(&conn_h->list, &handle->conn_list);
+	return conn_h;
+}
+
+/**
+ * find_svc_clnt_conn() - Find the existence of a client<->service connection
+ * @handle: Service's QMI handle.
+ * @clnt_addr: Address of the client to be present in the connection.
+ * @clnt_addr_len: Length of the client address.
+ *
+ * @return: Pointer to connection handle if the matching connection is found,
+ *          NULL if the connection is not found.
+ *
+ * This function is used to find the existence of a client<->service connection
+ * handle in a service's QMI handle. This function tries to match the client
+ * address in the existing connections.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static struct qmi_svc_clnt_conn *find_svc_clnt_conn(
+	struct qmi_handle *handle, void *clnt_addr, size_t clnt_addr_len)
+{
+	struct qmi_svc_clnt_conn *conn_h;
+
+	list_for_each_entry(conn_h, &handle->conn_list, list) {
+		if (!memcmp(conn_h->clnt_addr, clnt_addr, clnt_addr_len))
+			return conn_h;
+	}
+	return NULL;
+}
+
+/**
+ * verify_svc_clnt_conn() - Verify the existence of a connection handle
+ * @handle: Service's QMI handle.
+ * @conn_h: Connection handle to be verified.
+ *
+ * @return: true on success, false on failure.
+ *
+ * This function is used to verify the existence of a connection in the
+ * connection list maintained by the service.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static bool verify_svc_clnt_conn(struct qmi_handle *handle,
+				 struct qmi_svc_clnt_conn *conn_h)
+{
+	struct qmi_svc_clnt_conn *temp_conn_h;
+
+	list_for_each_entry(temp_conn_h, &handle->conn_list, list) {
+		if (temp_conn_h == conn_h)
+			return true;
+	}
+	return false;
+}
+
+/**
+ * rmv_svc_clnt_conn() - Remove the connection handle info from the service
+ * @conn_h: Connection handle to be removed.
+ *
+ * This function removes a connection handle from a service's QMI handle.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static void rmv_svc_clnt_conn(struct qmi_svc_clnt_conn *conn_h)
+{
+	struct req_handle *req_h, *temp_req_h;
+	struct qmi_txn *txn_h, *temp_txn_h;
+
+	list_del(&conn_h->list);
+	list_for_each_entry_safe(req_h, temp_req_h,
+				 &conn_h->req_handle_list, list)
+		rmv_req_handle(req_h);
+
+	mutex_lock(&conn_h->pending_txn_lock);
+	list_for_each_entry_safe(txn_h, temp_txn_h,
+				 &conn_h->pending_txn_list, list) {
+		list_del(&txn_h->list);
+		kfree(txn_h->enc_data);
+		kfree(txn_h);
+	}
+	mutex_unlock(&conn_h->pending_txn_lock);
+	flush_delayed_work(&conn_h->resume_tx_work);
+	kfree(conn_h->clnt_addr);
+	kfree(conn_h);
+}
+
+/**
+ * qmi_event_notify() - Notification function to QMI client/service interface
+ * @event: Type of event that gets notified.
+ * @oob_data: Any out-of-band data associated with event.
+ * @oob_data_len: Length of the out-of-band data, if any.
+ * @priv: Private data.
+ *
+ * This function is called by the underlying transport to notify the QMI
+ * interface regarding any incoming event. This function is registered by
+ * QMI interface when it opens a port/handle with the underlying transport.
+ */
+static void qmi_event_notify(unsigned int event, void *oob_data,
+			     size_t oob_data_len, void *priv)
+{
+	struct qmi_notify_event_work *notify_work;
+	struct qmi_handle *handle;
+	uint32_t key = 0;
+
+	notify_work = kmalloc(sizeof(struct qmi_notify_event_work),
+			      GFP_KERNEL);
+	if (!notify_work)
+		return;
+
+	notify_work->event = event;
+	if (oob_data) {
+		notify_work->oob_data = kmalloc(oob_data_len, GFP_KERNEL);
+		if (!notify_work->oob_data) {
+			pr_err("%s: Couldn't allocate oob_data @ %d to %p\n",
+				__func__, event, priv);
+			kfree(notify_work);
+			return;
+		}
+		memcpy(notify_work->oob_data, oob_data, oob_data_len);
+	} else {
+		notify_work->oob_data = NULL;
+	}
+	notify_work->oob_data_len = oob_data_len;
+	notify_work->priv = priv;
+	INIT_WORK(&notify_work->work, qmi_notify_event_worker);
+
+	mutex_lock(&handle_hash_tbl_lock);
+	hash_for_each_possible(handle_hash_tbl, handle, handle_hash, key) {
+		if (handle == (struct qmi_handle *)priv) {
+			queue_work(handle->handle_wq,
+				   &notify_work->work);
+			mutex_unlock(&handle_hash_tbl_lock);
+			return;
+		}
+	}
+	mutex_unlock(&handle_hash_tbl_lock);
+	kfree(notify_work->oob_data);
+	kfree(notify_work);
+}
+
+static void qmi_notify_event_worker(struct work_struct *work)
+{
+	struct qmi_notify_event_work *notify_work =
+		container_of(work, struct qmi_notify_event_work, work);
+	struct qmi_handle *handle = (struct qmi_handle *)notify_work->priv;
+	unsigned long flags;
+
+	if (!handle)
+		return;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		kfree(notify_work->oob_data);
+		kfree(notify_work);
+		return;
+	}
+
+	switch (notify_work->event) {
+	case IPC_ROUTER_CTRL_CMD_DATA:
+		spin_lock_irqsave(&handle->notify_lock, flags);
+		handle->notify(handle, QMI_RECV_MSG, handle->notify_priv);
+		spin_unlock_irqrestore(&handle->notify_lock, flags);
+		break;
+
+	case IPC_ROUTER_CTRL_CMD_RESUME_TX:
+		if (handle->handle_type == QMI_CLIENT_HANDLE) {
+			queue_delayed_work(handle->handle_wq,
+					   &handle->resume_tx_work,
+					   msecs_to_jiffies(0));
+		} else if (handle->handle_type == QMI_SERVICE_HANDLE) {
+			struct msm_ipc_addr rtx_addr = {0};
+			struct qmi_svc_clnt_conn *conn_h;
+			union rr_control_msg *msg;
+
+			msg = (union rr_control_msg *)notify_work->oob_data;
+			rtx_addr.addrtype = MSM_IPC_ADDR_ID;
+			rtx_addr.addr.port_addr.node_id = msg->cli.node_id;
+			rtx_addr.addr.port_addr.port_id = msg->cli.port_id;
+			conn_h = find_svc_clnt_conn(handle, &rtx_addr,
+						    sizeof(rtx_addr));
+			if (conn_h)
+				queue_delayed_work(handle->handle_wq,
+						   &conn_h->resume_tx_work,
+						   msecs_to_jiffies(0));
+		}
+		break;
+
+	case IPC_ROUTER_CTRL_CMD_NEW_SERVER:
+	case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER:
+	case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT:
+		queue_delayed_work(handle->handle_wq,
+				   &handle->ctl_work, msecs_to_jiffies(0));
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&handle->handle_lock);
+	kfree(notify_work->oob_data);
+	kfree(notify_work);
+}
+
+/**
+ * clnt_resume_tx_worker() - Handle the Resume_Tx event
+ * @work : Pointer to the work strcuture.
+ *
+ * This function handles the resume_tx event for any QMI client that
+ * exists in the kernel space. This function parses the pending_txn_list of
+ * the handle and attempts a send for each transaction in that list.
+ */
+static void clnt_resume_tx_worker(struct work_struct *work)
+{
+	struct delayed_work *rtx_work = to_delayed_work(work);
+	struct qmi_handle *handle =
+		container_of(rtx_work, struct qmi_handle, resume_tx_work);
+	struct qmi_txn *pend_txn, *temp_txn;
+	int ret;
+	uint16_t msg_id;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset)
+		goto out_clnt_handle_rtx;
+
+	list_for_each_entry_safe(pend_txn, temp_txn,
+				&handle->pending_txn_list, list) {
+		ret = msm_ipc_router_send_msg(
+				(struct msm_ipc_port *)handle->src_port,
+				(struct msm_ipc_addr *)handle->dest_info,
+				pend_txn->enc_data, pend_txn->enc_data_len);
+
+		if (ret == -EAGAIN)
+			break;
+		msg_id = ((struct qmi_header *)pend_txn->enc_data)->msg_id;
+		kfree(pend_txn->enc_data);
+		if (ret < 0) {
+			pr_err("%s: Sending transaction %d from port %d failed",
+				__func__, pend_txn->txn_id,
+				((struct msm_ipc_port *)handle->src_port)->
+							this_port.port_id);
+			if (pend_txn->type == QMI_ASYNC_TXN) {
+				pend_txn->resp_cb(pend_txn->handle,
+						msg_id, pend_txn->resp,
+						pend_txn->resp_cb_data,
+						ret);
+				list_del(&pend_txn->list);
+				kfree(pend_txn);
+			} else if (pend_txn->type == QMI_SYNC_TXN) {
+				pend_txn->send_stat = ret;
+				wake_up(&pend_txn->wait_q);
+			}
+		} else {
+			list_del(&pend_txn->list);
+			list_add_tail(&pend_txn->list, &handle->txn_list);
+		}
+	}
+out_clnt_handle_rtx:
+	mutex_unlock(&handle->handle_lock);
+}
+
+/**
+ * svc_resume_tx_worker() - Handle the Resume_Tx event
+ * @work : Pointer to the work strcuture.
+ *
+ * This function handles the resume_tx event for any QMI service that
+ * exists in the kernel space. This function parses the pending_txn_list of
+ * the connection handle and attempts a send for each transaction in that list.
+ */
+static void svc_resume_tx_worker(struct work_struct *work)
+{
+	struct delayed_work *rtx_work = to_delayed_work(work);
+	struct qmi_svc_clnt_conn *conn_h =
+		container_of(rtx_work, struct qmi_svc_clnt_conn,
+			     resume_tx_work);
+	struct qmi_handle *handle = (struct qmi_handle *)conn_h->svc_handle;
+	struct qmi_txn *pend_txn, *temp_txn;
+	int ret;
+
+	mutex_lock(&conn_h->pending_txn_lock);
+	if (handle->handle_reset)
+		goto out_svc_handle_rtx;
+
+	list_for_each_entry_safe(pend_txn, temp_txn,
+				&conn_h->pending_txn_list, list) {
+		ret = msm_ipc_router_send_msg(
+				(struct msm_ipc_port *)handle->src_port,
+				(struct msm_ipc_addr *)conn_h->clnt_addr,
+				pend_txn->enc_data, pend_txn->enc_data_len);
+
+		if (ret == -EAGAIN)
+			break;
+		if (ret < 0)
+			pr_err("%s: Sending transaction %d from port %d failed",
+				__func__, pend_txn->txn_id,
+				((struct msm_ipc_port *)handle->src_port)->
+							this_port.port_id);
+		list_del(&pend_txn->list);
+		kfree(pend_txn->enc_data);
+		kfree(pend_txn);
+	}
+out_svc_handle_rtx:
+	mutex_unlock(&conn_h->pending_txn_lock);
+}
+
+/**
+ * handle_rmv_server() - Handle the server exit event
+ * @handle: Client handle on which the server exit event is received.
+ * @ctl_msg: Information about the server that is exiting.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static int handle_rmv_server(struct qmi_handle *handle,
+			     union rr_control_msg *ctl_msg)
+{
+	struct msm_ipc_addr *svc_addr;
+	unsigned long flags;
+
+	if (unlikely(!handle->dest_info))
+		return 0;
+
+	svc_addr = (struct msm_ipc_addr *)(handle->dest_info);
+	if (svc_addr->addr.port_addr.node_id == ctl_msg->srv.node_id &&
+	    svc_addr->addr.port_addr.port_id == ctl_msg->srv.port_id) {
+		/* Wakeup any threads waiting for the response */
+		handle->handle_reset = 1;
+		clean_txn_info(handle);
+
+		spin_lock_irqsave(&handle->notify_lock, flags);
+		handle->notify(handle, QMI_SERVER_EXIT, handle->notify_priv);
+		spin_unlock_irqrestore(&handle->notify_lock, flags);
+	}
+	return 0;
+}
+
+/**
+ * handle_rmv_client() - Handle the client exit event
+ * @handle: Service handle on which the client exit event is received.
+ * @ctl_msg: Information about the client that is exiting.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static int handle_rmv_client(struct qmi_handle *handle,
+			     union rr_control_msg *ctl_msg)
+{
+	struct qmi_svc_clnt_conn *conn_h;
+	struct msm_ipc_addr clnt_addr = {0};
+	unsigned long flags;
+
+	clnt_addr.addrtype = MSM_IPC_ADDR_ID;
+	clnt_addr.addr.port_addr.node_id = ctl_msg->cli.node_id;
+	clnt_addr.addr.port_addr.port_id = ctl_msg->cli.port_id;
+	conn_h = find_svc_clnt_conn(handle, &clnt_addr, sizeof(clnt_addr));
+	if (conn_h) {
+		spin_lock_irqsave(&handle->notify_lock, flags);
+		handle->svc_ops_options->disconnect_cb(handle, conn_h);
+		spin_unlock_irqrestore(&handle->notify_lock, flags);
+		rmv_svc_clnt_conn(conn_h);
+	}
+	return 0;
+}
+
+/**
+ * handle_ctl_msg: Worker function to handle the control events
+ * @work: Work item to map the QMI handle.
+ *
+ * This function is a worker function to handle the incoming control
+ * events like REMOVE_SERVER/REMOVE_CLIENT. The work item is unique
+ * to a handle and the workker function handles the control events on
+ * a specific handle.
+ */
+static void handle_ctl_msg(struct work_struct *work)
+{
+	struct delayed_work *ctl_work = to_delayed_work(work);
+	struct qmi_handle *handle =
+		container_of(ctl_work, struct qmi_handle, ctl_work);
+	unsigned int ctl_msg_len;
+	union rr_control_msg *ctl_msg = NULL;
+	struct msm_ipc_addr src_addr;
+	int rc;
+
+	mutex_lock(&handle->handle_lock);
+	while (1) {
+		if (handle->handle_reset)
+			break;
+
+		/* Read the messages */
+		rc = msm_ipc_router_read_msg(
+			(struct msm_ipc_port *)(handle->ctl_port),
+			&src_addr, (unsigned char **)&ctl_msg, &ctl_msg_len);
+		if (rc == -ENOMSG)
+			break;
+		if (rc < 0) {
+			pr_err("%s: Read failed %d\n", __func__, rc);
+			break;
+		}
+		if (ctl_msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER &&
+		    handle->handle_type == QMI_CLIENT_HANDLE)
+			handle_rmv_server(handle, ctl_msg);
+		else if (ctl_msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT &&
+			 handle->handle_type == QMI_SERVICE_HANDLE)
+			handle_rmv_client(handle, ctl_msg);
+		kfree(ctl_msg);
+	}
+	mutex_unlock(&handle->handle_lock);
+}
+
+struct qmi_handle *qmi_handle_create(
+	void (*notify)(struct qmi_handle *handle,
+		       enum qmi_event_type event, void *notify_priv),
+	void *notify_priv)
+{
+	struct qmi_handle *temp_handle;
+	struct msm_ipc_port *port_ptr, *ctl_port_ptr;
+	static uint32_t handle_count;
+	char wq_name[MAX_WQ_NAME_LEN];
+
+	temp_handle = kzalloc(sizeof(struct qmi_handle), GFP_KERNEL);
+	if (!temp_handle)
+		return NULL;
+	mutex_lock(&handle_hash_tbl_lock);
+	handle_count++;
+	scnprintf(wq_name, MAX_WQ_NAME_LEN, "qmi_hndl%08x", handle_count);
+	hash_add(handle_hash_tbl, &temp_handle->handle_hash, 0);
+	temp_handle->handle_wq = create_singlethread_workqueue(wq_name);
+	mutex_unlock(&handle_hash_tbl_lock);
+	if (!temp_handle->handle_wq) {
+		pr_err("%s: Couldn't create workqueue for handle\n", __func__);
+		goto handle_create_err1;
+	}
+
+	/* Initialize common elements */
+	temp_handle->handle_type = QMI_CLIENT_HANDLE;
+	temp_handle->next_txn_id = 1;
+	mutex_init(&temp_handle->handle_lock);
+	spin_lock_init(&temp_handle->notify_lock);
+	temp_handle->notify = notify;
+	temp_handle->notify_priv = notify_priv;
+	init_waitqueue_head(&temp_handle->reset_waitq);
+	INIT_DELAYED_WORK(&temp_handle->resume_tx_work, clnt_resume_tx_worker);
+	INIT_DELAYED_WORK(&temp_handle->ctl_work, handle_ctl_msg);
+
+	/* Initialize client specific elements */
+	INIT_LIST_HEAD(&temp_handle->txn_list);
+	INIT_LIST_HEAD(&temp_handle->pending_txn_list);
+
+	/* Initialize service specific elements */
+	INIT_LIST_HEAD(&temp_handle->conn_list);
+
+	port_ptr = msm_ipc_router_create_port(qmi_event_notify,
+					      (void *)temp_handle);
+	if (!port_ptr) {
+		pr_err("%s: IPC router port creation failed\n", __func__);
+		goto handle_create_err2;
+	}
+
+	ctl_port_ptr = msm_ipc_router_create_port(qmi_event_notify,
+						  (void *)temp_handle);
+	if (!ctl_port_ptr) {
+		pr_err("%s: IPC router ctl port creation failed\n", __func__);
+		goto handle_create_err3;
+	}
+	msm_ipc_router_bind_control_port(ctl_port_ptr);
+
+	temp_handle->src_port = port_ptr;
+	temp_handle->ctl_port = ctl_port_ptr;
+	return temp_handle;
+
+handle_create_err3:
+	msm_ipc_router_close_port(port_ptr);
+handle_create_err2:
+	destroy_workqueue(temp_handle->handle_wq);
+handle_create_err1:
+	mutex_lock(&handle_hash_tbl_lock);
+	hash_del(&temp_handle->handle_hash);
+	mutex_unlock(&handle_hash_tbl_lock);
+	kfree(temp_handle);
+	return NULL;
+}
+EXPORT_SYMBOL(qmi_handle_create);
+
+static void clean_txn_info(struct qmi_handle *handle)
+{
+	struct qmi_txn *txn_handle, *temp_txn_handle, *pend_txn;
+
+	list_for_each_entry_safe(pend_txn, temp_txn_handle,
+				&handle->pending_txn_list, list) {
+		if (pend_txn->type == QMI_ASYNC_TXN) {
+			list_del(&pend_txn->list);
+			pend_txn->resp_cb(pend_txn->handle,
+					((struct qmi_header *)
+					pend_txn->enc_data)->msg_id,
+					pend_txn->resp, pend_txn->resp_cb_data,
+					-ENETRESET);
+			kfree(pend_txn->enc_data);
+			kfree(pend_txn);
+		} else if (pend_txn->type == QMI_SYNC_TXN) {
+			kfree(pend_txn->enc_data);
+			wake_up(&pend_txn->wait_q);
+		}
+	}
+	list_for_each_entry_safe(txn_handle, temp_txn_handle,
+				 &handle->txn_list, list) {
+		if (txn_handle->type == QMI_ASYNC_TXN) {
+			list_del(&txn_handle->list);
+			kfree(txn_handle);
+		} else if (txn_handle->type == QMI_SYNC_TXN) {
+			wake_up(&txn_handle->wait_q);
+		}
+	}
+}
+
+int qmi_handle_destroy(struct qmi_handle *handle)
+{
+	DEFINE_WAIT(wait);
+
+	if (!handle)
+		return -EINVAL;
+
+	mutex_lock(&handle_hash_tbl_lock);
+	hash_del(&handle->handle_hash);
+	mutex_unlock(&handle_hash_tbl_lock);
+
+	mutex_lock(&handle->handle_lock);
+	handle->handle_reset = 1;
+	clean_txn_info(handle);
+	msm_ipc_router_close_port((struct msm_ipc_port *)(handle->ctl_port));
+	msm_ipc_router_close_port((struct msm_ipc_port *)(handle->src_port));
+	mutex_unlock(&handle->handle_lock);
+	flush_workqueue(handle->handle_wq);
+	destroy_workqueue(handle->handle_wq);
+
+	mutex_lock(&handle->handle_lock);
+	while (!list_empty(&handle->txn_list) ||
+		    !list_empty(&handle->pending_txn_list)) {
+		prepare_to_wait(&handle->reset_waitq, &wait,
+				TASK_UNINTERRUPTIBLE);
+		mutex_unlock(&handle->handle_lock);
+		schedule();
+		mutex_lock(&handle->handle_lock);
+		finish_wait(&handle->reset_waitq, &wait);
+	}
+	mutex_unlock(&handle->handle_lock);
+	kfree(handle->dest_info);
+	kfree(handle);
+	return 0;
+}
+EXPORT_SYMBOL(qmi_handle_destroy);
+
+int qmi_register_ind_cb(struct qmi_handle *handle,
+	void (*ind_cb)(struct qmi_handle *handle,
+		       unsigned int msg_id, void *msg,
+		       unsigned int msg_len, void *ind_cb_priv),
+	void *ind_cb_priv)
+{
+	if (!handle)
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		return -ENETRESET;
+	}
+
+	handle->ind_cb = ind_cb;
+	handle->ind_cb_priv = ind_cb_priv;
+	mutex_unlock(&handle->handle_lock);
+	return 0;
+}
+EXPORT_SYMBOL(qmi_register_ind_cb);
+
+static int qmi_encode_and_send_req(struct qmi_txn **ret_txn_handle,
+	struct qmi_handle *handle, enum txn_type type,
+	struct msg_desc *req_desc, void *req, unsigned int req_len,
+	struct msg_desc *resp_desc, void *resp, unsigned int resp_len,
+	void (*resp_cb)(struct qmi_handle *handle,
+			unsigned int msg_id, void *msg,
+			void *resp_cb_data, int stat),
+	void *resp_cb_data)
+{
+	struct qmi_txn *txn_handle;
+	int rc, encoded_req_len;
+	void *encoded_req;
+
+	if (!handle || !handle->dest_info ||
+	    !req_desc || !resp_desc || !resp)
+		return -EINVAL;
+
+	if ((!req && req_len) || (!req_len && req))
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		return -ENETRESET;
+	}
+
+	/* Allocate Transaction Info */
+	txn_handle = kzalloc(sizeof(struct qmi_txn), GFP_KERNEL);
+	if (!txn_handle) {
+		mutex_unlock(&handle->handle_lock);
+		return -ENOMEM;
+	}
+	txn_handle->type = type;
+	INIT_LIST_HEAD(&txn_handle->list);
+	init_waitqueue_head(&txn_handle->wait_q);
+
+	/* Cache the parameters passed & mark it as sync*/
+	txn_handle->handle = handle;
+	txn_handle->resp_desc = resp_desc;
+	txn_handle->resp = resp;
+	txn_handle->resp_len = resp_len;
+	txn_handle->resp_received = 0;
+	txn_handle->resp_cb = resp_cb;
+	txn_handle->resp_cb_data = resp_cb_data;
+	txn_handle->enc_data = NULL;
+	txn_handle->enc_data_len = 0;
+
+	/* Encode the request msg */
+	encoded_req_len = req_desc->max_msg_len + QMI_HEADER_SIZE;
+	encoded_req = kmalloc(encoded_req_len, GFP_KERNEL);
+	if (!encoded_req) {
+		rc = -ENOMEM;
+		goto encode_and_send_req_err1;
+	}
+	rc = qmi_kernel_encode(req_desc,
+		(void *)(encoded_req + QMI_HEADER_SIZE),
+		req_desc->max_msg_len, req);
+	if (rc < 0) {
+		pr_err("%s: Encode Failure %d\n", __func__, rc);
+		goto encode_and_send_req_err2;
+	}
+	encoded_req_len = rc;
+
+	/* Encode the header & Add to the txn_list */
+	if (!handle->next_txn_id)
+		handle->next_txn_id++;
+	txn_handle->txn_id = handle->next_txn_id++;
+	encode_qmi_header(encoded_req, QMI_REQUEST_CONTROL_FLAG,
+			  txn_handle->txn_id, req_desc->msg_id,
+			  encoded_req_len);
+	encoded_req_len += QMI_HEADER_SIZE;
+
+	/*
+	 * Check if this port has transactions queued to its pending list
+	 * and if there are any pending transactions then add the current
+	 * transaction to the pending list rather than sending it. This avoids
+	 * out-of-order message transfers.
+	 */
+	if (!list_empty(&handle->pending_txn_list)) {
+		rc = -EAGAIN;
+		goto append_pend_txn;
+	}
+
+	list_add_tail(&txn_handle->list, &handle->txn_list);
+	qmi_log(handle, QMI_REQUEST_CONTROL_FLAG, txn_handle->txn_id,
+			req_desc->msg_id, encoded_req_len);
+	/* Send the request */
+	rc = msm_ipc_router_send_msg((struct msm_ipc_port *)(handle->src_port),
+		(struct msm_ipc_addr *)handle->dest_info,
+		encoded_req, encoded_req_len);
+append_pend_txn:
+	if (rc == -EAGAIN) {
+		txn_handle->enc_data = encoded_req;
+		txn_handle->enc_data_len = encoded_req_len;
+		if (list_empty(&handle->pending_txn_list))
+			list_del(&txn_handle->list);
+		list_add_tail(&txn_handle->list, &handle->pending_txn_list);
+		if (ret_txn_handle)
+			*ret_txn_handle = txn_handle;
+		mutex_unlock(&handle->handle_lock);
+		return 0;
+	}
+	if (rc < 0) {
+		pr_err("%s: send_msg failed %d\n", __func__, rc);
+		goto encode_and_send_req_err3;
+	}
+	mutex_unlock(&handle->handle_lock);
+
+	kfree(encoded_req);
+	if (ret_txn_handle)
+		*ret_txn_handle = txn_handle;
+	return 0;
+
+encode_and_send_req_err3:
+	list_del(&txn_handle->list);
+encode_and_send_req_err2:
+	kfree(encoded_req);
+encode_and_send_req_err1:
+	kfree(txn_handle);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+
+int qmi_send_req_wait(struct qmi_handle *handle,
+		      struct msg_desc *req_desc,
+		      void *req, unsigned int req_len,
+		      struct msg_desc *resp_desc,
+		      void *resp, unsigned int resp_len,
+		      unsigned long timeout_ms)
+{
+	struct qmi_txn *txn_handle = NULL;
+	int rc;
+
+	/* Encode and send the request */
+	rc = qmi_encode_and_send_req(&txn_handle, handle, QMI_SYNC_TXN,
+				     req_desc, req, req_len,
+				     resp_desc, resp, resp_len,
+				     NULL, NULL);
+	if (rc < 0) {
+		pr_err("%s: Error encode & send req: %d\n", __func__, rc);
+		return rc;
+	}
+
+	/* Wait for the response */
+	if (!timeout_ms) {
+		wait_event(txn_handle->wait_q,
+			   (txn_handle->resp_received ||
+			    handle->handle_reset ||
+			   (txn_handle->send_stat < 0)));
+	} else {
+		rc = wait_event_timeout(txn_handle->wait_q,
+				(txn_handle->resp_received ||
+				handle->handle_reset ||
+				(txn_handle->send_stat < 0)),
+				msecs_to_jiffies(timeout_ms));
+		if (rc == 0)
+			rc = -ETIMEDOUT;
+	}
+
+	mutex_lock(&handle->handle_lock);
+	if (!txn_handle->resp_received) {
+		pr_err("%s: Response Wait Error %d\n", __func__, rc);
+		if (handle->handle_reset)
+			rc = -ENETRESET;
+		if (rc >= 0)
+			rc = -EFAULT;
+		if (txn_handle->send_stat < 0)
+			rc = txn_handle->send_stat;
+		goto send_req_wait_err;
+	}
+	rc = 0;
+
+send_req_wait_err:
+	list_del(&txn_handle->list);
+	kfree(txn_handle);
+	wake_up(&handle->reset_waitq);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_req_wait);
+
+int qmi_send_req_nowait(struct qmi_handle *handle,
+			struct msg_desc *req_desc,
+			void *req, unsigned int req_len,
+			struct msg_desc *resp_desc,
+			void *resp, unsigned int resp_len,
+			void (*resp_cb)(struct qmi_handle *handle,
+					unsigned int msg_id, void *msg,
+					void *resp_cb_data, int stat),
+			void *resp_cb_data)
+{
+	return qmi_encode_and_send_req(NULL, handle, QMI_ASYNC_TXN,
+				       req_desc, req, req_len,
+				       resp_desc, resp, resp_len,
+				       resp_cb, resp_cb_data);
+}
+EXPORT_SYMBOL(qmi_send_req_nowait);
+
+/**
+ * qmi_encode_and_send_resp() - Encode and send QMI response
+ * @handle: QMI service handle sending the response.
+ * @conn_h: Connection handle to which the response is sent.
+ * @req_h: Request handle for which the response is sent.
+ * @resp_desc: Message Descriptor describing the response structure.
+ * @resp: Response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function encodes and sends a response message from a service to
+ * a client identified from the connection handle. The request for which
+ * the response is sent is identified from the connection handle.
+ *
+ * This function must be called with handle->handle_lock locked.
+ */
+static int qmi_encode_and_send_resp(struct qmi_handle *handle,
+	struct qmi_svc_clnt_conn *conn_h, struct req_handle *req_h,
+	struct msg_desc *resp_desc, void *resp, unsigned int resp_len)
+{
+	struct qmi_txn *txn_handle;
+	uint16_t cntl_flag;
+	int rc;
+	int encoded_resp_len;
+	void *encoded_resp;
+
+	if (handle->handle_reset) {
+		rc = -ENETRESET;
+		goto encode_and_send_resp_err0;
+	}
+
+	if (handle->handle_type != QMI_SERVICE_HANDLE ||
+	    !verify_svc_clnt_conn(handle, conn_h) ||
+	    (req_h && !verify_req_handle(conn_h, req_h))) {
+		rc = -EINVAL;
+		goto encode_and_send_resp_err0;
+	}
+
+	/* Allocate Transaction Info */
+	txn_handle = kzalloc(sizeof(struct qmi_txn), GFP_KERNEL);
+	if (!txn_handle) {
+		rc = -ENOMEM;
+		goto encode_and_send_resp_err0;
+	}
+	INIT_LIST_HEAD(&txn_handle->list);
+	init_waitqueue_head(&txn_handle->wait_q);
+	txn_handle->handle = handle;
+	txn_handle->enc_data = NULL;
+	txn_handle->enc_data_len = 0;
+
+	/* Encode the response msg */
+	encoded_resp_len = resp_desc->max_msg_len + QMI_HEADER_SIZE;
+	encoded_resp = kmalloc(encoded_resp_len, GFP_KERNEL);
+	if (!encoded_resp) {
+		rc = -ENOMEM;
+		goto encode_and_send_resp_err1;
+	}
+	rc = qmi_kernel_encode(resp_desc,
+		(void *)(encoded_resp + QMI_HEADER_SIZE),
+		resp_desc->max_msg_len, resp);
+	if (rc < 0) {
+		pr_err("%s: Encode Failure %d\n", __func__, rc);
+		goto encode_and_send_resp_err2;
+	}
+	encoded_resp_len = rc;
+
+	/* Encode the header & Add to the txn_list */
+	if (req_h) {
+		txn_handle->txn_id = req_h->txn_id;
+		cntl_flag = QMI_RESPONSE_CONTROL_FLAG;
+	} else {
+		if (!handle->next_txn_id)
+			handle->next_txn_id++;
+		txn_handle->txn_id = handle->next_txn_id++;
+		cntl_flag = QMI_INDICATION_CONTROL_FLAG;
+	}
+	encode_qmi_header(encoded_resp, cntl_flag,
+			  txn_handle->txn_id, resp_desc->msg_id,
+			  encoded_resp_len);
+	encoded_resp_len += QMI_HEADER_SIZE;
+
+	qmi_log(handle, cntl_flag, txn_handle->txn_id,
+			resp_desc->msg_id, encoded_resp_len);
+	/*
+	 * Check if this svc_clnt has transactions queued to its pending list
+	 * and if there are any pending transactions then add the current
+	 * transaction to the pending list rather than sending it. This avoids
+	 * out-of-order message transfers.
+	 */
+	mutex_lock(&conn_h->pending_txn_lock);
+	if (list_empty(&conn_h->pending_txn_list))
+		rc = msm_ipc_router_send_msg(
+			(struct msm_ipc_port *)(handle->src_port),
+			(struct msm_ipc_addr *)conn_h->clnt_addr,
+			encoded_resp, encoded_resp_len);
+	else
+		rc = -EAGAIN;
+
+	if (req_h)
+		rmv_req_handle(req_h);
+	if (rc == -EAGAIN) {
+		txn_handle->enc_data = encoded_resp;
+		txn_handle->enc_data_len = encoded_resp_len;
+		list_add_tail(&txn_handle->list, &conn_h->pending_txn_list);
+		mutex_unlock(&conn_h->pending_txn_lock);
+		return 0;
+	}
+	mutex_unlock(&conn_h->pending_txn_lock);
+	if (rc < 0)
+		pr_err("%s: send_msg failed %d\n", __func__, rc);
+encode_and_send_resp_err2:
+	kfree(encoded_resp);
+encode_and_send_resp_err1:
+	kfree(txn_handle);
+encode_and_send_resp_err0:
+	return rc;
+}
+
+/**
+ * qmi_send_resp() - Send response to a request
+ * @handle: QMI handle from which the response is sent.
+ * @clnt: Client to which the response is sent.
+ * @req_handle: Request for which the response is sent.
+ * @resp_desc: Descriptor explaining the response structure.
+ * @resp: Pointer to the response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_resp(struct qmi_handle *handle, void *conn_handle,
+		  void *req_handle, struct msg_desc *resp_desc,
+		  void *resp, unsigned int resp_len)
+{
+	int rc;
+	struct qmi_svc_clnt_conn *conn_h;
+	struct req_handle *req_h;
+
+	if (!handle || !conn_handle || !req_handle ||
+	    !resp_desc || !resp || !resp_len)
+		return -EINVAL;
+
+	conn_h = (struct qmi_svc_clnt_conn *)conn_handle;
+	req_h = (struct req_handle *)req_handle;
+	mutex_lock(&handle->handle_lock);
+	rc = qmi_encode_and_send_resp(handle, conn_h, req_h,
+				      resp_desc, resp, resp_len);
+	if (rc < 0)
+		pr_err("%s: Error encoding and sending response\n", __func__);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_resp);
+
+/**
+ * qmi_send_resp_from_cb() - Send response to a request from request_cb
+ * @handle: QMI handle from which the response is sent.
+ * @clnt: Client to which the response is sent.
+ * @req_handle: Request for which the response is sent.
+ * @resp_desc: Descriptor explaining the response structure.
+ * @resp: Pointer to the response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_resp_from_cb(struct qmi_handle *handle, void *conn_handle,
+			  void *req_handle, struct msg_desc *resp_desc,
+			  void *resp, unsigned int resp_len)
+{
+	int rc;
+	struct qmi_svc_clnt_conn *conn_h;
+	struct req_handle *req_h;
+
+	if (!handle || !conn_handle || !req_handle ||
+	    !resp_desc || !resp || !resp_len)
+		return -EINVAL;
+
+	conn_h = (struct qmi_svc_clnt_conn *)conn_handle;
+	req_h = (struct req_handle *)req_handle;
+	rc = qmi_encode_and_send_resp(handle, conn_h, req_h,
+				      resp_desc, resp, resp_len);
+	if (rc < 0)
+		pr_err("%s: Error encoding and sending response\n", __func__);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_resp_from_cb);
+
+/**
+ * qmi_send_ind() - Send unsolicited event/indication to a client
+ * @handle: QMI handle from which the indication is sent.
+ * @clnt: Client to which the indication is sent.
+ * @ind_desc: Descriptor explaining the indication structure.
+ * @ind: Pointer to the indication structure.
+ * @ind_len: Length of the indication structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_ind(struct qmi_handle *handle, void *conn_handle,
+		 struct msg_desc *ind_desc, void *ind, unsigned int ind_len)
+{
+	int rc = 0;
+	struct qmi_svc_clnt_conn *conn_h;
+
+	if (!handle || !conn_handle || !ind_desc)
+		return -EINVAL;
+
+	if ((!ind && ind_len) || (ind && !ind_len))
+		return -EINVAL;
+
+	conn_h = (struct qmi_svc_clnt_conn *)conn_handle;
+	mutex_lock(&handle->handle_lock);
+	rc = qmi_encode_and_send_resp(handle, conn_h, NULL,
+				      ind_desc, ind, ind_len);
+	if (rc < 0)
+		pr_err("%s: Error encoding and sending ind.\n", __func__);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_ind);
+
+/**
+ * qmi_send_ind_from_cb() - Send indication to a client from registration_cb
+ * @handle: QMI handle from which the indication is sent.
+ * @clnt: Client to which the indication is sent.
+ * @ind_desc: Descriptor explaining the indication structure.
+ * @ind: Pointer to the indication structure.
+ * @ind_len: Length of the indication structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_ind_from_cb(struct qmi_handle *handle, void *conn_handle,
+		struct msg_desc *ind_desc, void *ind, unsigned int ind_len)
+{
+	int rc = 0;
+	struct qmi_svc_clnt_conn *conn_h;
+
+	if (!handle || !conn_handle || !ind_desc)
+		return -EINVAL;
+
+	if ((!ind && ind_len) || (ind && !ind_len))
+		return -EINVAL;
+
+	conn_h = (struct qmi_svc_clnt_conn *)conn_handle;
+	rc = qmi_encode_and_send_resp(handle, conn_h, NULL,
+				      ind_desc, ind, ind_len);
+	if (rc < 0)
+		pr_err("%s: Error encoding and sending ind.\n", __func__);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_send_ind_from_cb);
+
+/**
+ * translate_err_code() - Translate Linux error codes into QMI error codes
+ * @err: Standard Linux error codes to be translated.
+ *
+ * @return: Return QMI error code.
+ */
+static int translate_err_code(int err)
+{
+	int rc;
+
+	switch (err) {
+	case -ECONNREFUSED:
+		rc = QMI_ERR_CLIENT_IDS_EXHAUSTED_V01;
+		break;
+	case -EBADMSG:
+		rc = QMI_ERR_ENCODING_V01;
+		break;
+	case -ENOMEM:
+		rc = QMI_ERR_NO_MEMORY_V01;
+		break;
+	case -EOPNOTSUPP:
+		rc = QMI_ERR_MALFORMED_MSG_V01;
+		break;
+	case -ENOTSUPP:
+		rc = QMI_ERR_NOT_SUPPORTED_V01;
+		break;
+	default:
+		rc = QMI_ERR_INTERNAL_V01;
+		break;
+	}
+	return rc;
+}
+
+/**
+ * send_err_resp() - Send the error response
+ * @handle: Service handle from which the response is sent.
+ * @conn_h: Client<->Service connection on which the response is sent.
+ * @addr: Client address to which the error response is sent.
+ * @msg_id: Request message id for which the error response is sent.
+ * @txn_id: Request Transaction ID for which the error response is sent.
+ * @err: Error code to be sent.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function is used to send an error response from within the QMI
+ * service interface. This function is called when the service returns
+ * an error to the QMI interface while handling a request.
+ */
+static int send_err_resp(struct qmi_handle *handle,
+			 struct qmi_svc_clnt_conn *conn_h, void *addr,
+			 uint16_t msg_id, uint16_t txn_id, int err)
+{
+	struct qmi_response_type_v01 err_resp;
+	struct qmi_txn *txn_handle;
+	struct msm_ipc_addr *dest_addr;
+	int rc;
+	int encoded_resp_len;
+	void *encoded_resp;
+
+	if (handle->handle_reset)
+		return -ENETRESET;
+
+	err_resp.result = QMI_RESULT_FAILURE_V01;
+	err_resp.error = translate_err_code(err);
+
+	/* Allocate Transaction Info */
+	txn_handle = kzalloc(sizeof(struct qmi_txn), GFP_KERNEL);
+	if (!txn_handle)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&txn_handle->list);
+	init_waitqueue_head(&txn_handle->wait_q);
+	txn_handle->handle = handle;
+	txn_handle->enc_data = NULL;
+	txn_handle->enc_data_len = 0;
+
+	/* Encode the response msg */
+	encoded_resp_len = err_resp_desc.max_msg_len + QMI_HEADER_SIZE;
+	encoded_resp = kmalloc(encoded_resp_len, GFP_KERNEL);
+	if (!encoded_resp) {
+		rc = -ENOMEM;
+		goto encode_and_send_err_resp_err0;
+	}
+	rc = qmi_kernel_encode(&err_resp_desc,
+		(void *)(encoded_resp + QMI_HEADER_SIZE),
+		err_resp_desc.max_msg_len, &err_resp);
+	if (rc < 0) {
+		pr_err("%s: Encode Failure %d\n", __func__, rc);
+		goto encode_and_send_err_resp_err1;
+	}
+	encoded_resp_len = rc;
+
+	/* Encode the header & Add to the txn_list */
+	txn_handle->txn_id = txn_id;
+	encode_qmi_header(encoded_resp, QMI_RESPONSE_CONTROL_FLAG,
+			  txn_handle->txn_id, msg_id,
+			  encoded_resp_len);
+	encoded_resp_len += QMI_HEADER_SIZE;
+
+	qmi_log(handle, QMI_RESPONSE_CONTROL_FLAG, txn_id,
+			msg_id, encoded_resp_len);
+	/*
+	 * Check if this svc_clnt has transactions queued to its pending list
+	 * and if there are any pending transactions then add the current
+	 * transaction to the pending list rather than sending it. This avoids
+	 * out-of-order message transfers.
+	 */
+	if (!conn_h) {
+		dest_addr = (struct msm_ipc_addr *)addr;
+		goto tx_err_resp;
+	}
+
+	mutex_lock(&conn_h->pending_txn_lock);
+	dest_addr = (struct msm_ipc_addr *)conn_h->clnt_addr;
+	if (!list_empty(&conn_h->pending_txn_list)) {
+		rc = -EAGAIN;
+		goto queue_err_resp;
+	}
+tx_err_resp:
+	rc = msm_ipc_router_send_msg(
+			(struct msm_ipc_port *)(handle->src_port),
+			dest_addr, encoded_resp, encoded_resp_len);
+queue_err_resp:
+	if (rc == -EAGAIN && conn_h) {
+		txn_handle->enc_data = encoded_resp;
+		txn_handle->enc_data_len = encoded_resp_len;
+		list_add_tail(&txn_handle->list, &conn_h->pending_txn_list);
+		mutex_unlock(&conn_h->pending_txn_lock);
+		return 0;
+	}
+	if (conn_h)
+		mutex_unlock(&conn_h->pending_txn_lock);
+	if (rc < 0)
+		pr_err("%s: send_msg failed %d\n", __func__, rc);
+encode_and_send_err_resp_err1:
+	kfree(encoded_resp);
+encode_and_send_err_resp_err0:
+	kfree(txn_handle);
+	return rc;
+}
+
+/**
+ * handle_qmi_request() - Handle the QMI request
+ * @handle: QMI service handle on which the request has arrived.
+ * @req_msg: Request message to be handled.
+ * @txn_id: Transaction ID of the request message.
+ * @msg_id: Message ID of the request message.
+ * @msg_len: Message Length of the request message.
+ * @src_addr: Address of the source which sent the request.
+ * @src_addr_len: Length of the source address.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int handle_qmi_request(struct qmi_handle *handle,
+			      unsigned char *req_msg, uint16_t txn_id,
+			      uint16_t msg_id, uint16_t msg_len,
+			      void *src_addr, size_t src_addr_len)
+{
+	struct qmi_svc_clnt_conn *conn_h;
+	struct msg_desc *req_desc = NULL;
+	void *req_struct = NULL;
+	unsigned int req_struct_len = 0;
+	struct req_handle *req_h = NULL;
+	int rc = 0;
+
+	if (handle->handle_type != QMI_SERVICE_HANDLE)
+		return -EOPNOTSUPP;
+
+	conn_h = find_svc_clnt_conn(handle, src_addr, src_addr_len);
+	if (conn_h)
+		goto decode_req;
+
+	/* New client, establish a connection */
+	conn_h = add_svc_clnt_conn(handle, src_addr, src_addr_len);
+	if (!conn_h) {
+		pr_err("%s: Error adding a new conn_h\n", __func__);
+		rc = -ENOMEM;
+		goto out_handle_req;
+	}
+	rc = handle->svc_ops_options->connect_cb(handle, conn_h);
+	if (rc < 0) {
+		pr_err("%s: Error accepting new client\n", __func__);
+		rmv_svc_clnt_conn(conn_h);
+		conn_h = NULL;
+		goto out_handle_req;
+	}
+
+decode_req:
+	if (!msg_len)
+		goto process_req;
+
+	req_struct_len = handle->svc_ops_options->req_desc_cb(msg_id,
+							      &req_desc);
+	if (!req_desc || req_struct_len <= 0) {
+		pr_err("%s: Error getting req_desc for msg_id %d\n",
+			__func__, msg_id);
+		rc = -ENOTSUPP;
+		goto out_handle_req;
+	}
+
+	req_struct = kzalloc(req_struct_len, GFP_KERNEL);
+	if (!req_struct) {
+		rc = -ENOMEM;
+		goto out_handle_req;
+	}
+
+	rc = qmi_kernel_decode(req_desc, req_struct,
+				(void *)(req_msg + QMI_HEADER_SIZE), msg_len);
+	if (rc < 0) {
+		pr_err("%s: Error decoding msg_id %d\n", __func__, msg_id);
+		rc = -EBADMSG;
+		goto out_handle_req;
+	}
+
+process_req:
+	req_h = add_req_handle(conn_h, msg_id, txn_id);
+	if (!req_h) {
+		pr_err("%s: Error adding new request handle\n", __func__);
+		rc = -ENOMEM;
+		goto out_handle_req;
+	}
+	rc = handle->svc_ops_options->req_cb(handle, conn_h, req_h,
+					      msg_id, req_struct);
+	if (rc < 0) {
+		pr_err("%s: Error while req_cb\n", __func__);
+		/* Check if the error is before or after sending a response */
+		if (verify_req_handle(conn_h, req_h))
+			rmv_req_handle(req_h);
+		else
+			rc = 0;
+	}
+
+out_handle_req:
+	kfree(req_struct);
+	if (rc < 0)
+		send_err_resp(handle, conn_h, src_addr, msg_id, txn_id, rc);
+	return rc;
+}
+
+static struct qmi_txn *find_txn_handle(struct qmi_handle *handle,
+				       uint16_t txn_id)
+{
+	struct qmi_txn *txn_handle;
+
+	list_for_each_entry(txn_handle, &handle->txn_list, list) {
+		if (txn_handle->txn_id == txn_id)
+			return txn_handle;
+	}
+	return NULL;
+}
+
+static int handle_qmi_response(struct qmi_handle *handle,
+			       unsigned char *resp_msg, uint16_t txn_id,
+			       uint16_t msg_id, uint16_t msg_len)
+{
+	struct qmi_txn *txn_handle;
+	int rc;
+
+	/* Find the transaction handle */
+	txn_handle = find_txn_handle(handle, txn_id);
+	if (!txn_handle) {
+		pr_err("%s Response received for non-existent txn_id %d\n",
+			__func__, txn_id);
+		return 0;
+	}
+
+	/* Decode the message */
+	rc = qmi_kernel_decode(txn_handle->resp_desc, txn_handle->resp,
+			       (void *)(resp_msg + QMI_HEADER_SIZE), msg_len);
+	if (rc < 0) {
+		pr_err("%s: Response Decode Failure <%d: %d: %d> rc: %d\n",
+			__func__, txn_id, msg_id, msg_len, rc);
+		wake_up(&txn_handle->wait_q);
+		if (txn_handle->type == QMI_ASYNC_TXN) {
+			list_del(&txn_handle->list);
+			kfree(txn_handle);
+		}
+		return rc;
+	}
+
+	/* Handle async or sync resp */
+	switch (txn_handle->type) {
+	case QMI_SYNC_TXN:
+		txn_handle->resp_received = 1;
+		wake_up(&txn_handle->wait_q);
+		rc = 0;
+		break;
+
+	case QMI_ASYNC_TXN:
+		if (txn_handle->resp_cb)
+			txn_handle->resp_cb(txn_handle->handle, msg_id,
+					    txn_handle->resp,
+					    txn_handle->resp_cb_data, 0);
+		list_del(&txn_handle->list);
+		kfree(txn_handle);
+		rc = 0;
+		break;
+
+	default:
+		pr_err("%s: Unrecognized transaction type\n", __func__);
+		return -EFAULT;
+	}
+	return rc;
+}
+
+static int handle_qmi_indication(struct qmi_handle *handle, void *msg,
+				 unsigned int msg_id, unsigned int msg_len)
+{
+	if (handle->ind_cb)
+		handle->ind_cb(handle, msg_id, msg + QMI_HEADER_SIZE,
+				msg_len, handle->ind_cb_priv);
+	return 0;
+}
+
+int qmi_recv_msg(struct qmi_handle *handle)
+{
+	unsigned int recv_msg_len;
+	unsigned char *recv_msg = NULL;
+	struct msm_ipc_addr src_addr = {0};
+	unsigned char cntl_flag;
+	uint16_t txn_id, msg_id, msg_len;
+	int rc;
+
+	if (!handle)
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		return -ENETRESET;
+	}
+
+	/* Read the messages */
+	rc = msm_ipc_router_read_msg((struct msm_ipc_port *)(handle->src_port),
+				     &src_addr, &recv_msg, &recv_msg_len);
+	if (rc == -ENOMSG) {
+		mutex_unlock(&handle->handle_lock);
+		return rc;
+	}
+
+	if (rc < 0) {
+		pr_err("%s: Read failed %d\n", __func__, rc);
+		mutex_unlock(&handle->handle_lock);
+		return rc;
+	}
+
+	/* Decode the header & Handle the req, resp, indication message */
+	decode_qmi_header(recv_msg, &cntl_flag, &txn_id, &msg_id, &msg_len);
+
+	qmi_log(handle, cntl_flag, txn_id, msg_id, msg_len);
+	switch (cntl_flag) {
+	case QMI_REQUEST_CONTROL_FLAG:
+		rc = handle_qmi_request(handle, recv_msg, txn_id, msg_id,
+					msg_len, &src_addr, sizeof(src_addr));
+		break;
+
+	case QMI_RESPONSE_CONTROL_FLAG:
+		rc = handle_qmi_response(handle, recv_msg,
+					 txn_id, msg_id, msg_len);
+		break;
+
+	case QMI_INDICATION_CONTROL_FLAG:
+		rc = handle_qmi_indication(handle, recv_msg, msg_id, msg_len);
+		break;
+
+	default:
+		rc = -EFAULT;
+		pr_err("%s: Unsupported message type %d\n",
+			__func__, cntl_flag);
+		break;
+	}
+	kfree(recv_msg);
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_recv_msg);
+
+int qmi_connect_to_service(struct qmi_handle *handle,
+			   uint32_t service_id,
+			   uint32_t service_vers,
+			   uint32_t service_ins)
+{
+	struct msm_ipc_port_name svc_name;
+	struct msm_ipc_server_info svc_info;
+	struct msm_ipc_addr *svc_dest_addr;
+	int rc;
+	uint32_t instance_id;
+
+	if (!handle)
+		return -EINVAL;
+
+	svc_dest_addr = kzalloc(sizeof(struct msm_ipc_addr),
+				GFP_KERNEL);
+	if (!svc_dest_addr)
+		return -ENOMEM;
+
+	instance_id = BUILD_INSTANCE_ID(service_vers, service_ins);
+	svc_name.service = service_id;
+	svc_name.instance = instance_id;
+
+	rc = msm_ipc_router_lookup_server_name(&svc_name, &svc_info,
+						1, LOOKUP_MASK);
+	if (rc <= 0) {
+		pr_err("%s: Server %08x:%08x not found\n",
+			__func__, service_id, instance_id);
+		return -ENODEV;
+	}
+	svc_dest_addr->addrtype = MSM_IPC_ADDR_ID;
+	svc_dest_addr->addr.port_addr.node_id = svc_info.node_id;
+	svc_dest_addr->addr.port_addr.port_id = svc_info.port_id;
+	mutex_lock(&handle->handle_lock);
+	if (handle->handle_reset) {
+		mutex_unlock(&handle->handle_lock);
+		return -ENETRESET;
+	}
+	handle->dest_info = svc_dest_addr;
+	handle->dest_service_id = service_id;
+	mutex_unlock(&handle->handle_lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(qmi_connect_to_service);
+
+/**
+ * svc_event_add_svc_addr() - Add a specific service address to the list
+ * @event_nb:	Reference to the service event structure.
+ * @node_id:	Node id of the service address.
+ * @port_id:	Port id of the service address.
+ *
+ * Return: 0 on success, standard error code otheriwse.
+ *
+ * This function should be called with svc_addr_list_lock locked.
+ */
+static int svc_event_add_svc_addr(struct svc_event_nb *event_nb,
+				uint32_t node_id, uint32_t port_id)
+{
+
+	struct svc_addr *addr;
+
+	if (!event_nb)
+		return -EINVAL;
+	addr = kmalloc(sizeof(*addr), GFP_KERNEL);
+	if (!addr) {
+		pr_err("%s: Memory allocation failed for address list\n",
+			__func__);
+		return -ENOMEM;
+	}
+	addr->port_addr.node_id = node_id;
+	addr->port_addr.port_id = port_id;
+	list_add_tail(&addr->list_node, &event_nb->svc_addr_list);
+	return 0;
+}
+
+/**
+ * qmi_notify_svc_event_arrive() - Notify the clients about service arrival
+ * @service:	Service id for the specific service.
+ * @instance:	Instance id for the specific service.
+ * @node_id:	Node id of the processor where the service is hosted.
+ * @port_id:	Port id of the service port created by IPC Router.
+ *
+ * Return:	0 on Success or standard error code.
+ */
+static int qmi_notify_svc_event_arrive(uint32_t service,
+					uint32_t instance,
+					uint32_t node_id,
+					uint32_t port_id)
+{
+	struct svc_event_nb *temp;
+	unsigned long flags;
+	struct svc_addr *addr;
+	bool already_notified = false;
+
+	mutex_lock(&svc_event_nb_list_lock);
+	temp = find_svc_event_nb(service, instance);
+	if (!temp) {
+		mutex_unlock(&svc_event_nb_list_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&svc_event_nb_list_lock);
+
+	mutex_lock(&temp->svc_addr_list_lock);
+	list_for_each_entry(addr, &temp->svc_addr_list, list_node)
+		if (addr->port_addr.node_id == node_id &&
+		    addr->port_addr.port_id == port_id)
+			already_notified = true;
+	if (!already_notified) {
+		/*
+		 * Notify only if the clients are not notified about the
+		 * service during registration.
+		 */
+		svc_event_add_svc_addr(temp, node_id, port_id);
+		spin_lock_irqsave(&temp->nb_lock, flags);
+		raw_notifier_call_chain(&temp->svc_event_rcvr_list,
+				QMI_SERVER_ARRIVE, NULL);
+		spin_unlock_irqrestore(&temp->nb_lock, flags);
+	}
+	mutex_unlock(&temp->svc_addr_list_lock);
+
+	return 0;
+}
+
+/**
+ * qmi_notify_svc_event_exit() - Notify the clients about service exit
+ * @service:	Service id for the specific service.
+ * @instance:	Instance id for the specific service.
+ * @node_id:	Node id of the processor where the service is hosted.
+ * @port_id:	Port id of the service port created by IPC Router.
+ *
+ * Return:	0 on Success or standard error code.
+ */
+static int qmi_notify_svc_event_exit(uint32_t service,
+					uint32_t instance,
+					uint32_t node_id,
+					uint32_t port_id)
+{
+	struct svc_event_nb *temp;
+	unsigned long flags;
+	struct svc_addr *addr;
+	struct svc_addr *temp_addr;
+
+	mutex_lock(&svc_event_nb_list_lock);
+	temp = find_svc_event_nb(service, instance);
+	if (!temp) {
+		mutex_unlock(&svc_event_nb_list_lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&svc_event_nb_list_lock);
+
+	mutex_lock(&temp->svc_addr_list_lock);
+	list_for_each_entry_safe(addr, temp_addr, &temp->svc_addr_list,
+					list_node) {
+		if (addr->port_addr.node_id == node_id &&
+			addr->port_addr.port_id == port_id) {
+			/*
+			 * Notify only if an already notified service has
+			 * gone down.
+			 */
+			spin_lock_irqsave(&temp->nb_lock, flags);
+			raw_notifier_call_chain(&temp->svc_event_rcvr_list,
+						QMI_SERVER_EXIT, NULL);
+			spin_unlock_irqrestore(&temp->nb_lock, flags);
+			list_del(&addr->list_node);
+			kfree(addr);
+		}
+	}
+
+	mutex_unlock(&temp->svc_addr_list_lock);
+
+	return 0;
+}
+
+static struct svc_event_nb *find_svc_event_nb(uint32_t service_id,
+					      uint32_t instance_id)
+{
+	struct svc_event_nb *temp;
+
+	list_for_each_entry(temp, &svc_event_nb_list, list) {
+		if (temp->service_id == service_id &&
+		    temp->instance_id == instance_id)
+			return temp;
+	}
+	return NULL;
+}
+
+/**
+ * find_and_add_svc_event_nb() - Find/Add a notifier block for specific service
+ * @service_id:	Service Id of the service
+ * @instance_id:Instance Id of the service
+ *
+ * Return:	Pointer to svc_event_nb structure for the specified service
+ *
+ * This function should only be called after acquiring svc_event_nb_list_lock.
+ */
+static struct svc_event_nb *find_and_add_svc_event_nb(uint32_t service_id,
+						      uint32_t instance_id)
+{
+	struct svc_event_nb *temp;
+
+	temp = find_svc_event_nb(service_id, instance_id);
+	if (temp)
+		return temp;
+
+	temp = kzalloc(sizeof(struct svc_event_nb), GFP_KERNEL);
+	if (!temp)
+		return temp;
+
+	spin_lock_init(&temp->nb_lock);
+	temp->service_id = service_id;
+	temp->instance_id = instance_id;
+	INIT_LIST_HEAD(&temp->list);
+	INIT_LIST_HEAD(&temp->svc_addr_list);
+	RAW_INIT_NOTIFIER_HEAD(&temp->svc_event_rcvr_list);
+	mutex_init(&temp->svc_addr_list_lock);
+	list_add_tail(&temp->list, &svc_event_nb_list);
+
+	return temp;
+}
+
+int qmi_svc_event_notifier_register(uint32_t service_id,
+				    uint32_t service_vers,
+				    uint32_t service_ins,
+				    struct notifier_block *nb)
+{
+	struct svc_event_nb *temp;
+	unsigned long flags;
+	int ret;
+	int i;
+	int num_servers;
+	uint32_t instance_id;
+	struct msm_ipc_port_name svc_name;
+	struct msm_ipc_server_info *svc_info_arr = NULL;
+
+	mutex_lock(&qmi_svc_event_notifier_lock);
+	if (!qmi_svc_event_notifier_port && !qmi_svc_event_notifier_wq)
+		qmi_svc_event_notifier_init();
+	mutex_unlock(&qmi_svc_event_notifier_lock);
+
+	instance_id = BUILD_INSTANCE_ID(service_vers, service_ins);
+	mutex_lock(&svc_event_nb_list_lock);
+	temp = find_and_add_svc_event_nb(service_id, instance_id);
+	if (!temp) {
+		mutex_unlock(&svc_event_nb_list_lock);
+		return -EFAULT;
+	}
+	mutex_unlock(&svc_event_nb_list_lock);
+
+	mutex_lock(&temp->svc_addr_list_lock);
+	spin_lock_irqsave(&temp->nb_lock, flags);
+	ret = raw_notifier_chain_register(&temp->svc_event_rcvr_list, nb);
+	spin_unlock_irqrestore(&temp->nb_lock, flags);
+	if (!list_empty(&temp->svc_addr_list)) {
+		/* Notify this client only if Some services already exist. */
+		spin_lock_irqsave(&temp->nb_lock, flags);
+		nb->notifier_call(nb, QMI_SERVER_ARRIVE, NULL);
+		spin_unlock_irqrestore(&temp->nb_lock, flags);
+	} else {
+		/*
+		 * Check if we have missed a new server event that happened
+		 * earlier.
+		 */
+		svc_name.service = service_id;
+		svc_name.instance = instance_id;
+		num_servers = msm_ipc_router_lookup_server_name(&svc_name,
+								NULL,
+								0, LOOKUP_MASK);
+		if (num_servers > 0) {
+			svc_info_arr = kmalloc_array(num_servers,
+						sizeof(*svc_info_arr),
+						GFP_KERNEL);
+			if (!svc_info_arr)
+				return -ENOMEM;
+			num_servers = msm_ipc_router_lookup_server_name(
+								&svc_name,
+								svc_info_arr,
+								num_servers,
+								LOOKUP_MASK);
+			for (i = 0; i < num_servers; i++)
+				svc_event_add_svc_addr(temp,
+						svc_info_arr[i].node_id,
+						svc_info_arr[i].port_id);
+			kfree(svc_info_arr);
+
+			spin_lock_irqsave(&temp->nb_lock, flags);
+			raw_notifier_call_chain(&temp->svc_event_rcvr_list,
+						QMI_SERVER_ARRIVE, NULL);
+			spin_unlock_irqrestore(&temp->nb_lock, flags);
+		}
+	}
+	mutex_unlock(&temp->svc_addr_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(qmi_svc_event_notifier_register);
+
+int qmi_svc_event_notifier_unregister(uint32_t service_id,
+				      uint32_t service_vers,
+				      uint32_t service_ins,
+				      struct notifier_block *nb)
+{
+	int ret;
+	struct svc_event_nb *temp;
+	unsigned long flags;
+	uint32_t instance_id;
+
+	instance_id = BUILD_INSTANCE_ID(service_vers, service_ins);
+	mutex_lock(&svc_event_nb_list_lock);
+	temp = find_svc_event_nb(service_id, instance_id);
+	if (!temp) {
+		mutex_unlock(&svc_event_nb_list_lock);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&temp->nb_lock, flags);
+	ret = raw_notifier_chain_unregister(&temp->svc_event_rcvr_list, nb);
+	spin_unlock_irqrestore(&temp->nb_lock, flags);
+	mutex_unlock(&svc_event_nb_list_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(qmi_svc_event_notifier_unregister);
+
+/**
+ * qmi_svc_event_worker() - Read control messages over service event port
+ * @work:	Reference to the work structure queued.
+ *
+ */
+static void qmi_svc_event_worker(struct work_struct *work)
+{
+	union rr_control_msg *ctl_msg = NULL;
+	unsigned int ctl_msg_len;
+	struct msm_ipc_addr src_addr;
+	int ret;
+
+	while (1) {
+		ret = msm_ipc_router_read_msg(qmi_svc_event_notifier_port,
+			&src_addr, (unsigned char **)&ctl_msg, &ctl_msg_len);
+		if (ret == -ENOMSG)
+			break;
+		if (ret < 0) {
+			pr_err("%s:Error receiving control message\n",
+					__func__);
+			break;
+		}
+		if (ctl_msg->cmd == IPC_ROUTER_CTRL_CMD_NEW_SERVER)
+			qmi_notify_svc_event_arrive(ctl_msg->srv.service,
+							ctl_msg->srv.instance,
+							ctl_msg->srv.node_id,
+							ctl_msg->srv.port_id);
+		else if (ctl_msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
+			qmi_notify_svc_event_exit(ctl_msg->srv.service,
+							ctl_msg->srv.instance,
+							ctl_msg->srv.node_id,
+							ctl_msg->srv.port_id);
+		kfree(ctl_msg);
+	}
+}
+
+/**
+ * qmi_svc_event_notify() - Callback for any service event posted on the
+ *			    control port
+ * @event:	The event posted on the control port.
+ * @data:	Any out-of-band data associated with event.
+ * @odata_len:	Length of the out-of-band data, if any.
+ * @priv:	Private Data.
+ *
+ * This function is called by the underlying transport to notify the QMI
+ * interface regarding any incoming service related events. It is registered
+ * during service event control port creation.
+ */
+static void qmi_svc_event_notify(unsigned int event, void *data,
+				size_t odata_len, void *priv)
+{
+	if (event == IPC_ROUTER_CTRL_CMD_NEW_SERVER
+		|| event == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT
+		|| event == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
+		queue_work(qmi_svc_event_notifier_wq, &qmi_svc_event_work);
+}
+
+/**
+ * qmi_svc_event_notifier_init() - Create a control port to get service events
+ *
+ * This function is called during first service notifier registration. It
+ * creates a control port to get notification about server events so that
+ * respective clients can be notified about the events.
+ */
+static void qmi_svc_event_notifier_init(void)
+{
+	qmi_svc_event_notifier_wq = create_singlethread_workqueue(
+					"qmi_svc_event_wq");
+	if (!qmi_svc_event_notifier_wq) {
+		pr_err("%s: ctrl workqueue allocation failed\n", __func__);
+		return;
+	}
+	qmi_svc_event_notifier_port = msm_ipc_router_create_port(
+				qmi_svc_event_notify, NULL);
+	if (!qmi_svc_event_notifier_port) {
+		destroy_workqueue(qmi_svc_event_notifier_wq);
+		pr_err("%s: IPC Router Port creation failed\n", __func__);
+		return;
+	}
+	msm_ipc_router_bind_control_port(qmi_svc_event_notifier_port);
+}
+
+/**
+ * qmi_log_init() - Init function for IPC Logging
+ *
+ * Initialize log contexts for QMI request/response/indications.
+ */
+void qmi_log_init(void)
+{
+	qmi_req_resp_log_ctx =
+		ipc_log_context_create(QMI_REQ_RESP_LOG_PAGES,
+			"kqmi_req_resp", 0);
+	if (!qmi_req_resp_log_ctx)
+		pr_err("%s: Unable to create QMI IPC logging for Req/Resp",
+			__func__);
+	qmi_ind_log_ctx =
+		ipc_log_context_create(QMI_IND_LOG_PAGES, "kqmi_ind", 0);
+	if (!qmi_ind_log_ctx)
+		pr_err("%s: Unable to create QMI IPC %s",
+				"logging for Indications", __func__);
+}
+
+/**
+ * qmi_svc_register() - Register a QMI service with a QMI handle
+ * @handle: QMI handle on which the service has to be registered.
+ * @ops_options: Service specific operations and options.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_register(struct qmi_handle *handle, void *ops_options)
+{
+	struct qmi_svc_ops_options *svc_ops_options;
+	struct msm_ipc_addr svc_name;
+	int rc;
+	uint32_t instance_id;
+
+	svc_ops_options = (struct qmi_svc_ops_options *)ops_options;
+	if (!handle || !svc_ops_options)
+		return -EINVAL;
+
+	/* Check if the required elements of opts_options are filled */
+	if (!svc_ops_options->service_id || !svc_ops_options->service_vers ||
+	    !svc_ops_options->connect_cb || !svc_ops_options->disconnect_cb ||
+	    !svc_ops_options->req_desc_cb || !svc_ops_options->req_cb)
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	/* Check if another service/client is registered in that handle */
+	if (handle->handle_type == QMI_SERVICE_HANDLE || handle->dest_info) {
+		mutex_unlock(&handle->handle_lock);
+		return -EBUSY;
+	}
+	INIT_LIST_HEAD(&handle->conn_list);
+	mutex_unlock(&handle->handle_lock);
+
+	/*
+	 * Unlocked the handle_lock, because NEW_SERVER message will end up
+	 * in this handle's control port, which requires holding the same
+	 * mutex. Also it is safe to call register_server unlocked.
+	 */
+	/* Register the service */
+	instance_id = ((svc_ops_options->service_vers & 0xFF) |
+		       ((svc_ops_options->service_ins & 0xFF) << 8));
+	svc_name.addrtype = MSM_IPC_ADDR_NAME;
+	svc_name.addr.port_name.service = svc_ops_options->service_id;
+	svc_name.addr.port_name.instance = instance_id;
+	rc = msm_ipc_router_register_server(
+		(struct msm_ipc_port *)handle->src_port, &svc_name);
+	if (rc < 0) {
+		pr_err("%s: Error %d registering QMI service %08x:%08x\n",
+			__func__, rc, svc_ops_options->service_id,
+			instance_id);
+		return rc;
+	}
+	mutex_lock(&handle->handle_lock);
+	handle->svc_ops_options = svc_ops_options;
+	handle->handle_type = QMI_SERVICE_HANDLE;
+	mutex_unlock(&handle->handle_lock);
+	return rc;
+}
+EXPORT_SYMBOL(qmi_svc_register);
+
+
+/**
+ * qmi_svc_unregister() - Unregister the service from a QMI handle
+ * @handle: QMI handle from which the service has to be unregistered.
+ *
+ * return: 0 on success, < 0 on error.
+ */
+int qmi_svc_unregister(struct qmi_handle *handle)
+{
+	struct qmi_svc_clnt_conn *conn_h, *temp_conn_h;
+
+	if (!handle || handle->handle_type != QMI_SERVICE_HANDLE)
+		return -EINVAL;
+
+	mutex_lock(&handle->handle_lock);
+	handle->handle_type = QMI_CLIENT_HANDLE;
+	mutex_unlock(&handle->handle_lock);
+	/*
+	 * Unlocked the handle_lock, because REMOVE_SERVER message will end up
+	 * in this handle's control port, which requires holding the same
+	 * mutex. Also it is safe to call register_server unlocked.
+	 */
+	msm_ipc_router_unregister_server(
+		(struct msm_ipc_port *)handle->src_port);
+
+	mutex_lock(&handle->handle_lock);
+	list_for_each_entry_safe(conn_h, temp_conn_h,
+				 &handle->conn_list, list)
+		rmv_svc_clnt_conn(conn_h);
+	mutex_unlock(&handle->handle_lock);
+	return 0;
+}
+EXPORT_SYMBOL(qmi_svc_unregister);
+
+static int __init qmi_interface_init(void)
+{
+	qmi_log_init();
+	return 0;
+}
+module_init(qmi_interface_init);
+
+MODULE_DESCRIPTION("MSM QMI Interface");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/qmi_interface_priv.h b/drivers/soc/qcom/qmi_interface_priv.h
new file mode 100644
index 0000000..ef3e692
--- /dev/null
+++ b/drivers/soc/qcom/qmi_interface_priv.h
@@ -0,0 +1,123 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QMI_INTERFACE_PRIV_H_
+#define _QMI_INTERFACE_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+#include <linux/platform_device.h>
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+enum txn_type {
+	QMI_SYNC_TXN = 1,
+	QMI_ASYNC_TXN,
+};
+
+/**
+ * handle_type - Enum to identify QMI handle type
+ */
+enum handle_type {
+	QMI_CLIENT_HANDLE = 1,
+	QMI_SERVICE_HANDLE,
+};
+
+struct qmi_txn {
+	struct list_head list;
+	uint16_t txn_id;
+	enum txn_type type;
+	struct qmi_handle *handle;
+	void *enc_data;
+	unsigned int enc_data_len;
+	struct msg_desc *resp_desc;
+	void *resp;
+	unsigned int resp_len;
+	int resp_received;
+	int send_stat;
+	void (*resp_cb)(struct qmi_handle *handle, unsigned int msg_id,
+			void *msg, void *resp_cb_data, int stat);
+	void *resp_cb_data;
+	wait_queue_head_t wait_q;
+};
+
+/**
+ * svc_addr - Data structure to maintain a list of service addresses.
+ * @list_node: Service address list node used by "svc_addr_list"
+ * @port_addr: Service address in <node_id:port_id>.
+ */
+struct svc_addr {
+	struct list_head list_node;
+	struct msm_ipc_port_addr port_addr;
+};
+
+/**
+ * svc_event_nb - Service event notification structure.
+ * @nb_lock: Spinlock for the notifier block lists.
+ * @service_id: Service id for which list of notifier blocks are maintained.
+ * @instance_id: Instance id for which list of notifier blocks are maintained.
+ * @svc_event_rcvr_list: List of notifier blocks which clients have registered.
+ * @list: Used to chain this structure in a global list.
+ * @svc_addr_list_lock: Lock to protect @svc_addr_list.
+ * @svc_addr_list: List for mantaining all the address for a specific
+ *			<service_id:instance_id>.
+ */
+struct svc_event_nb {
+	spinlock_t nb_lock;
+	uint32_t service_id;
+	uint32_t instance_id;
+	struct raw_notifier_head svc_event_rcvr_list;
+	struct list_head list;
+	struct mutex svc_addr_list_lock;
+	struct list_head svc_addr_list;
+};
+
+/**
+ * req_handle - Data structure to store request information
+ * @list: Points to req_handle_list maintained per connection.
+ * @conn_h: Connection handle on which the concerned request is received.
+ * @msg_id: Message ID of the request.
+ * @txn_id: Transaction ID of the request.
+ */
+struct req_handle {
+	struct list_head list;
+	struct qmi_svc_clnt_conn *conn_h;
+	uint16_t msg_id;
+	uint16_t txn_id;
+};
+
+/**
+ * qmi_svc_clnt_conn - Data structure to identify client service connection
+ * @list: List to chain up the client conncection to the connection list.
+ * @svc_handle: Service side information of the connection.
+ * @clnt_addr: Client side information of the connection.
+ * @clnt_addr_len: Length of the client address.
+ * @req_handle_list: Pending requests in this connection.
+ * @pending_tx_list: Pending response/indications awaiting flow control.
+ */
+struct qmi_svc_clnt_conn {
+	struct list_head list;
+	void *svc_handle;
+	void *clnt_addr;
+	size_t clnt_addr_len;
+	struct list_head req_handle_list;
+	struct delayed_work resume_tx_work;
+	struct list_head pending_txn_list;
+	struct mutex pending_txn_lock;
+};
+
+#endif
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
new file mode 100644
index 0000000..f917ea9
--- /dev/null
+++ b/drivers/soc/qcom/ramdump.c
@@ -0,0 +1,411 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/elf.h>
+#include <linux/wait.h>
+#include <soc/qcom/ramdump.h>
+#include <linux/dma-mapping.h>
+#include <linux/of.h>
+
+#define RAMDUMP_WAIT_MSECS	120000
+
+struct ramdump_device {
+	char name[256];
+
+	unsigned int data_ready;
+	unsigned int consumer_present;
+	int ramdump_status;
+
+	struct completion ramdump_complete;
+	struct miscdevice device;
+
+	wait_queue_head_t dump_wait_q;
+	int nsegments;
+	struct ramdump_segment *segments;
+	size_t elfcore_size;
+	char *elfcore_buf;
+	unsigned long attrs;
+	bool complete_ramdump;
+};
+
+static int ramdump_open(struct inode *inode, struct file *filep)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	rd_dev->consumer_present = 1;
+	rd_dev->ramdump_status = 0;
+	return 0;
+}
+
+static int ramdump_release(struct inode *inode, struct file *filep)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	rd_dev->consumer_present = 0;
+	rd_dev->data_ready = 0;
+	complete(&rd_dev->ramdump_complete);
+	return 0;
+}
+
+static unsigned long offset_translate(loff_t user_offset,
+		struct ramdump_device *rd_dev, unsigned long *data_left,
+		void **vaddr)
+{
+	int i = 0;
+	*vaddr = NULL;
+
+	for (i = 0; i < rd_dev->nsegments; i++)
+		if (user_offset >= rd_dev->segments[i].size)
+			user_offset -= rd_dev->segments[i].size;
+		else
+			break;
+
+	if (i == rd_dev->nsegments) {
+		pr_debug("Ramdump(%s): offset_translate returning zero\n",
+				rd_dev->name);
+		*data_left = 0;
+		return 0;
+	}
+
+	*data_left = rd_dev->segments[i].size - user_offset;
+
+	pr_debug("Ramdump(%s): Returning address: %llx, data_left = %ld\n",
+		rd_dev->name, rd_dev->segments[i].address + user_offset,
+		*data_left);
+
+	if (rd_dev->segments[i].v_address)
+		*vaddr = rd_dev->segments[i].v_address + user_offset;
+
+	return rd_dev->segments[i].address + user_offset;
+}
+
+#define MAX_IOREMAP_SIZE SZ_1M
+
+static ssize_t ramdump_read(struct file *filep, char __user *buf, size_t count,
+			loff_t *pos)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	void *device_mem = NULL, *origdevice_mem = NULL, *vaddr = NULL;
+	unsigned long data_left = 0, bytes_before, bytes_after;
+	unsigned long addr = 0;
+	size_t copy_size = 0, alignsize;
+	unsigned char *alignbuf = NULL, *finalbuf = NULL;
+	int ret = 0;
+	loff_t orig_pos = *pos;
+
+	if ((filep->f_flags & O_NONBLOCK) && !rd_dev->data_ready)
+		return -EAGAIN;
+
+	ret = wait_event_interruptible(rd_dev->dump_wait_q, rd_dev->data_ready);
+	if (ret)
+		return ret;
+
+	if (*pos < rd_dev->elfcore_size) {
+		copy_size = rd_dev->elfcore_size - *pos;
+		copy_size = min(copy_size, count);
+
+		if (copy_to_user(buf, rd_dev->elfcore_buf + *pos, copy_size)) {
+			ret = -EFAULT;
+			goto ramdump_done;
+		}
+		*pos += copy_size;
+		count -= copy_size;
+		buf += copy_size;
+		if (count == 0)
+			return copy_size;
+	}
+
+	addr = offset_translate(*pos - rd_dev->elfcore_size, rd_dev,
+				&data_left, &vaddr);
+
+	/* EOF check */
+	if (data_left == 0) {
+		pr_debug("Ramdump(%s): Ramdump complete. %lld bytes read.",
+			rd_dev->name, *pos);
+		rd_dev->ramdump_status = 0;
+		ret = 0;
+		goto ramdump_done;
+	}
+
+	copy_size = min_t(size_t, count, (size_t)MAX_IOREMAP_SIZE);
+	copy_size = min_t(unsigned long, (unsigned long)copy_size, data_left);
+
+	rd_dev->attrs = 0;
+	rd_dev->attrs |= DMA_ATTR_SKIP_ZEROING;
+	device_mem = vaddr ?: dma_remap(rd_dev->device.parent, NULL, addr,
+						copy_size, rd_dev->attrs);
+	origdevice_mem = device_mem;
+
+	if (device_mem == NULL) {
+		pr_err("Ramdump(%s): Unable to ioremap: addr %lx, size %zd\n",
+			rd_dev->name, addr, copy_size);
+		rd_dev->ramdump_status = -1;
+		ret = -ENOMEM;
+		goto ramdump_done;
+	}
+
+	alignbuf = kzalloc(copy_size, GFP_KERNEL);
+	if (!alignbuf) {
+		pr_err("Ramdump(%s): Unable to alloc mem for aligned buf\n",
+				rd_dev->name);
+		rd_dev->ramdump_status = -1;
+		ret = -ENOMEM;
+		goto ramdump_done;
+	}
+
+	finalbuf = alignbuf;
+	alignsize = copy_size;
+
+	if ((unsigned long)device_mem & 0x7) {
+		bytes_before = 8 - ((unsigned long)device_mem & 0x7);
+		memcpy_fromio(alignbuf, device_mem, bytes_before);
+		device_mem += bytes_before;
+		alignbuf += bytes_before;
+		alignsize -= bytes_before;
+	}
+
+	if (alignsize & 0x7) {
+		bytes_after = alignsize & 0x7;
+		memcpy(alignbuf, device_mem, alignsize - bytes_after);
+		device_mem += alignsize - bytes_after;
+		alignbuf += (alignsize - bytes_after);
+		alignsize = bytes_after;
+		memcpy_fromio(alignbuf, device_mem, alignsize);
+	} else
+		memcpy(alignbuf, device_mem, alignsize);
+
+	if (copy_to_user(buf, finalbuf, copy_size)) {
+		pr_err("Ramdump(%s): Couldn't copy all data to user.",
+			rd_dev->name);
+		rd_dev->ramdump_status = -1;
+		ret = -EFAULT;
+		goto ramdump_done;
+	}
+
+	kfree(finalbuf);
+	if (!vaddr && origdevice_mem)
+		dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+
+	*pos += copy_size;
+
+	pr_debug("Ramdump(%s): Read %zd bytes from address %lx.",
+			rd_dev->name, copy_size, addr);
+
+	return *pos - orig_pos;
+
+ramdump_done:
+	if (!vaddr && origdevice_mem)
+		dma_unremap(rd_dev->device.parent, origdevice_mem, copy_size);
+
+	kfree(finalbuf);
+	rd_dev->data_ready = 0;
+	*pos = 0;
+	complete(&rd_dev->ramdump_complete);
+	return ret;
+}
+
+static unsigned int ramdump_poll(struct file *filep,
+					struct poll_table_struct *wait)
+{
+	struct ramdump_device *rd_dev = container_of(filep->private_data,
+				struct ramdump_device, device);
+	unsigned int mask = 0;
+
+	if (rd_dev->data_ready)
+		mask |= (POLLIN | POLLRDNORM);
+
+	poll_wait(filep, &rd_dev->dump_wait_q, wait);
+	return mask;
+}
+
+static const struct file_operations ramdump_file_ops = {
+	.open = ramdump_open,
+	.release = ramdump_release,
+	.read = ramdump_read,
+	.poll = ramdump_poll
+};
+
+void *create_ramdump_device(const char *dev_name, struct device *parent)
+{
+	int ret;
+	struct ramdump_device *rd_dev;
+
+	if (!dev_name) {
+		pr_err("%s: Invalid device name.\n", __func__);
+		return NULL;
+	}
+
+	rd_dev = kzalloc(sizeof(struct ramdump_device), GFP_KERNEL);
+
+	if (!rd_dev) {
+		pr_err("%s: Couldn't alloc space for ramdump device!",
+			__func__);
+		return NULL;
+	}
+
+	snprintf(rd_dev->name, ARRAY_SIZE(rd_dev->name), "ramdump_%s",
+		 dev_name);
+
+	init_completion(&rd_dev->ramdump_complete);
+
+	rd_dev->device.minor = MISC_DYNAMIC_MINOR;
+	rd_dev->device.name = rd_dev->name;
+	rd_dev->device.fops = &ramdump_file_ops;
+	rd_dev->device.parent = parent;
+	if (parent) {
+		rd_dev->complete_ramdump = of_property_read_bool(
+				parent->of_node, "qcom,complete-ramdump");
+		if (!rd_dev->complete_ramdump)
+			dev_info(parent,
+			"for %s segments only will be dumped.", dev_name);
+	}
+
+	init_waitqueue_head(&rd_dev->dump_wait_q);
+
+	ret = misc_register(&rd_dev->device);
+
+	if (ret) {
+		pr_err("%s: misc_register failed for %s (%d)", __func__,
+				dev_name, ret);
+		kfree(rd_dev);
+		return NULL;
+	}
+
+	return (void *)rd_dev;
+}
+EXPORT_SYMBOL(create_ramdump_device);
+
+void destroy_ramdump_device(void *dev)
+{
+	struct ramdump_device *rd_dev = dev;
+
+	if (IS_ERR_OR_NULL(rd_dev))
+		return;
+
+	misc_deregister(&rd_dev->device);
+	kfree(rd_dev);
+}
+EXPORT_SYMBOL(destroy_ramdump_device);
+
+static int _do_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments, bool use_elf)
+{
+	int ret, i;
+	struct ramdump_device *rd_dev = (struct ramdump_device *)handle;
+	Elf32_Phdr *phdr;
+	Elf32_Ehdr *ehdr;
+	unsigned long offset;
+
+	if (!rd_dev->consumer_present) {
+		pr_err("Ramdump(%s): No consumers. Aborting..\n", rd_dev->name);
+		return -EPIPE;
+	}
+
+	if (rd_dev->complete_ramdump) {
+		for (i = 0; i < nsegments-1; i++)
+			segments[i].size =
+			PAGE_ALIGN(segments[i+1].address - segments[i].address);
+
+		segments[nsegments-1].size =
+			PAGE_ALIGN(segments[nsegments-1].size);
+	} else {
+		for (i = 0; i < nsegments; i++)
+			segments[i].size = PAGE_ALIGN(segments[i].size);
+	}
+
+	rd_dev->segments = segments;
+	rd_dev->nsegments = nsegments;
+
+	if (use_elf) {
+		rd_dev->elfcore_size = sizeof(*ehdr) +
+				       sizeof(*phdr) * nsegments;
+		ehdr = kzalloc(rd_dev->elfcore_size, GFP_KERNEL);
+		rd_dev->elfcore_buf = (char *)ehdr;
+		if (!rd_dev->elfcore_buf)
+			return -ENOMEM;
+
+		memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
+		ehdr->e_ident[EI_CLASS] = ELFCLASS32;
+		ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
+		ehdr->e_ident[EI_VERSION] = EV_CURRENT;
+		ehdr->e_ident[EI_OSABI] = ELFOSABI_NONE;
+		ehdr->e_type = ET_CORE;
+		ehdr->e_version = EV_CURRENT;
+		ehdr->e_phoff = sizeof(*ehdr);
+		ehdr->e_ehsize = sizeof(*ehdr);
+		ehdr->e_phentsize = sizeof(*phdr);
+		ehdr->e_phnum = nsegments;
+
+		offset = rd_dev->elfcore_size;
+		phdr = (Elf32_Phdr *)(ehdr + 1);
+		for (i = 0; i < nsegments; i++, phdr++) {
+			phdr->p_type = PT_LOAD;
+			phdr->p_offset = offset;
+			phdr->p_vaddr = phdr->p_paddr = segments[i].address;
+			phdr->p_filesz = phdr->p_memsz = segments[i].size;
+			phdr->p_flags = PF_R | PF_W | PF_X;
+			offset += phdr->p_filesz;
+		}
+	}
+
+	rd_dev->data_ready = 1;
+	rd_dev->ramdump_status = -1;
+
+	reinit_completion(&rd_dev->ramdump_complete);
+
+	/* Tell userspace that the data is ready */
+	wake_up(&rd_dev->dump_wait_q);
+
+	/* Wait (with a timeout) to let the ramdump complete */
+	ret = wait_for_completion_timeout(&rd_dev->ramdump_complete,
+			msecs_to_jiffies(RAMDUMP_WAIT_MSECS));
+
+	if (!ret) {
+		pr_err("Ramdump(%s): Timed out waiting for userspace.\n",
+			rd_dev->name);
+		ret = -EPIPE;
+	} else
+		ret = (rd_dev->ramdump_status == 0) ? 0 : -EPIPE;
+
+	rd_dev->data_ready = 0;
+	rd_dev->elfcore_size = 0;
+	kfree(rd_dev->elfcore_buf);
+	rd_dev->elfcore_buf = NULL;
+	return ret;
+
+}
+
+int do_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+	return _do_ramdump(handle, segments, nsegments, false);
+}
+EXPORT_SYMBOL(do_ramdump);
+
+int
+do_elf_ramdump(void *handle, struct ramdump_segment *segments, int nsegments)
+{
+	return _do_ramdump(handle, segments, nsegments, true);
+}
+EXPORT_SYMBOL(do_elf_ramdump);
diff --git a/drivers/soc/qcom/rpmh.c b/drivers/soc/qcom/rpmh.c
new file mode 100644
index 0000000..d4b90f40
--- /dev/null
+++ b/drivers/soc/qcom/rpmh.c
@@ -0,0 +1,837 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mailbox_client.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+#include <soc/qcom/rpmh.h>
+#include <soc/qcom/tcs.h>
+
+#define RPMH_MAX_MBOXES			2
+#define RPMH_MAX_FAST_RES		32
+#define RPMH_MAX_REQ_IN_BATCH		10
+
+#define DEFINE_RPMH_MSG_ONSTACK(rc, s, q, c, name)	\
+	struct rpmh_msg name = {		\
+		.msg = { 0 },			\
+		.msg.state = s,			\
+		.msg.is_complete = true,	\
+		.msg.payload = &name.cmd,	\
+		.msg.num_payload = 1,		\
+		.cmd = { 0 },			\
+		.waitq = q,			\
+		.wait_count = c,		\
+		.rc = rc,			\
+		.bit = -1,			\
+		.free_cmd = NULL,		\
+	}
+
+struct rpmh_req {
+	u32 addr;
+	u32 sleep_val;
+	u32 wake_val;
+	struct list_head list;
+};
+
+struct rpmh_msg {
+	struct tcs_mbox_msg msg;
+	/* A single command for our use here */
+	struct tcs_cmd cmd;
+	wait_queue_head_t *waitq;
+	atomic_t *wait_count;
+	struct rpmh_client *rc;
+	int bit;
+	void *free_cmd;
+	int err; /* relay error from mbox for sync calls */
+};
+
+struct rpmh_mbox {
+	struct device_node *mbox_dn;
+	struct list_head resources;
+	spinlock_t lock;
+	struct rpmh_msg *msg_pool;
+	DECLARE_BITMAP(fast_req, RPMH_MAX_FAST_RES);
+	bool dirty;
+};
+
+struct rpmh_client {
+	struct device *dev;
+	struct mbox_client client;
+	struct mbox_chan *chan;
+	struct rpmh_mbox *rpmh;
+};
+
+static struct rpmh_mbox mbox_ctrlr[RPMH_MAX_MBOXES];
+DEFINE_MUTEX(rpmh_mbox_mutex);
+
+static struct rpmh_msg *get_msg_from_pool(struct rpmh_client *rc)
+{
+	struct rpmh_mbox *rpm = rc->rpmh;
+	struct rpmh_msg *msg = NULL;
+	int pos;
+
+	spin_lock(&rpm->lock);
+	pos = find_first_zero_bit(rpm->fast_req, RPMH_MAX_FAST_RES);
+	if (pos != RPMH_MAX_FAST_RES) {
+		bitmap_set(rpm->fast_req, pos, 1);
+		msg = &rpm->msg_pool[pos];
+		memset(msg, 0, sizeof(*msg));
+		msg->bit = pos;
+		msg->rc = rc;
+	}
+	spin_unlock(&rpm->lock);
+
+	return msg;
+}
+
+static inline int is_sleep_nonempty(struct rpmh_req *req)
+{
+	return (req->sleep_val != UINT_MAX);
+}
+
+static void rpmh_rx_cb(struct mbox_client *cl, void *msg)
+{
+	struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
+
+	atomic_dec(rpm_msg->wait_count);
+	wake_up_interruptible(rpm_msg->waitq);
+}
+
+static void rpmh_tx_done(struct mbox_client *cl, void *msg, int r)
+{
+	struct rpmh_msg *rpm_msg = container_of(msg, struct rpmh_msg, msg);
+	struct rpmh_mbox *rpm = rpm_msg->rc->rpmh;
+	atomic_t *wc = rpm_msg->wait_count;
+	wait_queue_head_t *waitq = rpm_msg->waitq;
+	void *free = rpm_msg->free_cmd;
+
+	rpm_msg->err = r;
+
+	if (r) {
+		dev_err(rpm_msg->rc->dev,
+			"RPMH TX fail in msg addr 0x%x, err=%d\n",
+			rpm_msg->msg.payload[0].addr, r);
+		/*
+		 * If we fail TX for a read, call then we won't get
+		 * a rx_callback. Force a rx_cb.
+		 */
+		if (rpm_msg->msg.is_read)
+			rpmh_rx_cb(cl, msg);
+	}
+
+	/*
+	 * Copy the child object pointers before freeing up the parent,
+	 * This way even if the parent (rpm_msg) object gets reused, we
+	 * can free up the child objects (free_cmd and wq/wc) parallely.
+	 * If you free up the children before the parent, then we run
+	 * into an issue that the stack allocated parent object may be
+	 * invalid before we can check the ->bit value.
+	 */
+
+	/* If we allocated the pool, set it as available */
+	if (rpm_msg->bit >= 0 && rpm_msg->bit != RPMH_MAX_FAST_RES) {
+		spin_lock(&rpm->lock);
+		bitmap_clear(rpm->fast_req, rpm_msg->bit, 1);
+		spin_unlock(&rpm->lock);
+	}
+
+	/* Nobody should be needing the request anymore */
+	kfree(free);
+
+	/* Signal the blocking thread we are done */
+	if (waitq) {
+		atomic_dec(wc);
+		wake_up_interruptible(waitq);
+	}
+}
+
+static struct rpmh_req *__find_req(struct rpmh_client *rc, u32 addr)
+{
+	struct rpmh_req *p, *req = NULL;
+
+	list_for_each_entry(p, &rc->rpmh->resources, list) {
+		if (p->addr == addr) {
+			req = p;
+			break;
+		}
+	}
+
+	return req;
+}
+
+static struct rpmh_req *cache_rpm_request(struct rpmh_client *rc,
+			enum rpmh_state state, struct tcs_cmd *cmd)
+{
+	struct rpmh_req *req;
+	struct rpmh_mbox *rpm = rc->rpmh;
+
+	spin_lock(&rpm->lock);
+	req = __find_req(rc, cmd->addr);
+	if (req)
+		goto existing;
+
+	req = kzalloc(sizeof(*req), GFP_ATOMIC);
+	if (!req) {
+		req = ERR_PTR(-ENOMEM);
+		goto unlock;
+	}
+
+	req->addr = cmd->addr;
+	req->sleep_val = req->wake_val = UINT_MAX;
+	INIT_LIST_HEAD(&req->list);
+	list_add_tail(&req->list, &rpm->resources);
+
+existing:
+	switch (state) {
+	case RPMH_ACTIVE_ONLY_STATE:
+	case RPMH_AWAKE_STATE:
+		if (req->sleep_val != UINT_MAX)
+			req->wake_val = cmd->data;
+		break;
+	case RPMH_WAKE_ONLY_STATE:
+		req->wake_val = cmd->data;
+		break;
+	case RPMH_SLEEP_STATE:
+		req->sleep_val = cmd->data;
+		break;
+	default:
+		break;
+	};
+
+unlock:
+	rpm->dirty = true;
+	spin_unlock(&rpm->lock);
+
+	return req;
+}
+
+/**
+ * __rpmh_write: Cache and send the RPMH request
+ *
+ * @rc: The RPMH client
+ * @state: Active/Sleep request type
+ * @rpm_msg: The data that needs to be sent (payload).
+ *
+ * Cache the RPMH request and send if the state is ACTIVE_ONLY.
+ * SLEEP/WAKE_ONLY requests are not sent to the controller at
+ * this time. Use rpmh_flush() to send them to the controller.
+ */
+int __rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
+			struct rpmh_msg *rpm_msg)
+{
+	struct rpmh_req *req;
+	int ret = 0;
+	int i;
+
+	/*
+	 * We cannot wait for completion for a sleep set, its done
+	 * outside the processor.
+	 */
+	if (rpm_msg->msg.is_complete &&
+		(state == RPMH_SLEEP_STATE || state == RPMH_WAKE_ONLY_STATE)) {
+		pr_err("Mismatch: sleep/wake set with completion.\n");
+		return -EINVAL;
+	}
+
+	/* Cache the request in our store and link the payload */
+	for (i = 0; i < rpm_msg->msg.num_payload; i++) {
+		req = cache_rpm_request(rc, state, &rpm_msg->msg.payload[i]);
+		if (IS_ERR(req))
+			return PTR_ERR(req);
+	}
+
+	rpm_msg->msg.state = state;
+
+	/* Send to mailbox only if active or awake */
+	if (state == RPMH_ACTIVE_ONLY_STATE || state == RPMH_AWAKE_STATE) {
+		ret = mbox_send_message(rc->chan, &rpm_msg->msg);
+		if (ret > 0)
+			ret = 0;
+	}
+
+	return ret;
+}
+
+/**
+ * rpmh_write_single_async: Write a single RPMH command
+ *
+ * @rc: The RPMh handle got from rpmh_get_dev_channel
+ * @state: Active/sleep set
+ * @addr: The ePCB address
+ * @data: The data
+ *
+ * Write a single value in fast-path. Fire and forget.
+ * May be called from atomic contexts.
+ */
+int rpmh_write_single_async(struct rpmh_client *rc, enum rpmh_state state,
+			u32 addr, u32 data)
+{
+	struct rpmh_msg *rpm_msg;
+
+	if (IS_ERR_OR_NULL(rc))
+		return -EINVAL;
+
+	rpm_msg = get_msg_from_pool(rc);
+	if (!rpm_msg)
+		return -ENOMEM;
+
+	rpm_msg->cmd.addr = addr;
+	rpm_msg->cmd.data = data;
+
+	rpm_msg->msg.payload = &rpm_msg->cmd;
+	rpm_msg->msg.num_payload = 1;
+
+	return __rpmh_write(rc, state, rpm_msg);
+}
+EXPORT_SYMBOL(rpmh_write_single_async);
+
+/**
+ * rpmh_write_single: Write a single RPMH command and
+ * wait for completion of the command.
+ *
+ * @rc: The RPMh handle got from rpmh_get_dev_channel
+ * @state: Active/sleep set
+ * @addr: The ePCB address
+ * @offset: Offset of the resource
+ * @data: The data
+ *
+ * Write a single value in slow-path and wait for the request to be
+ * complete. Blocks until the request is completed on the accelerator.
+ * Do not call from atomic contexts.
+ */
+int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
+			u32 addr, u32 data)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	atomic_t wait_count = ATOMIC_INIT(1);
+	DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+	int ret;
+
+	if (IS_ERR_OR_NULL(rc))
+		return -EINVAL;
+
+	might_sleep();
+
+	rpm_msg.cmd.addr = addr;
+	rpm_msg.cmd.data = data;
+
+	ret = __rpmh_write(rc, state, &rpm_msg);
+	if (ret < 0)
+		return ret;
+
+	ret = wait_event_interruptible(waitq, atomic_read(&wait_count) == 0);
+	if (ret)
+		return ret;
+
+	return rpm_msg.err;
+}
+EXPORT_SYMBOL(rpmh_write_single);
+
+struct rpmh_msg *__get_rpmh_msg_async(struct rpmh_client *rc,
+	enum rpmh_state state, struct tcs_cmd *cmd, int n, bool fast)
+{
+	struct rpmh_msg *rpm_msg;
+	struct tcs_cmd *tcs_cmd;
+
+	if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
+		return ERR_PTR(-EINVAL);
+
+	tcs_cmd = kcalloc(n, sizeof(*cmd), fast ? GFP_ATOMIC : GFP_KERNEL);
+	if (!tcs_cmd)
+		return ERR_PTR(-ENOMEM);
+	memcpy(tcs_cmd, cmd, n * sizeof(*tcs_cmd));
+
+	rpm_msg = get_msg_from_pool(rc);
+	if (!rpm_msg) {
+		kfree(tcs_cmd);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	rpm_msg->msg.state = state;
+	rpm_msg->msg.payload = tcs_cmd;
+	rpm_msg->msg.num_payload = n;
+	rpm_msg->free_cmd = tcs_cmd;
+
+	return rpm_msg;
+}
+
+/**
+ * rpmh_write_async: Write a batch of RPMH commands
+ *
+ * @rc: The RPMh handle got from rpmh_get_dev_channel
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in payload
+ *
+ * Write a batch of RPMH commands, the order of commands is maintained
+ * and will be sent as a single shot. By default the entire set of commands
+ * are considered active only (i.e, will not be cached in wake set, unless
+ * all of them have their corresponding sleep requests).
+ */
+int rpmh_write_async(struct rpmh_client *rc, enum rpmh_state state,
+			struct tcs_cmd *cmd, int n)
+{
+	struct rpmh_msg *rpm_msg = __get_rpmh_msg_async(rc, state, cmd, n,
+							true);
+
+	if (IS_ERR(rpm_msg))
+		return PTR_ERR(rpm_msg);
+
+	return __rpmh_write(rc, state, rpm_msg);
+}
+EXPORT_SYMBOL(rpmh_write_async);
+
+/**
+ * rpmh_write: Write a batch of RPMH commands
+ *
+ * @rc: The RPMh handle got from rpmh_get_dev_channel
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The number of elements in payload
+ *
+ * Write a batch of RPMH commands, the order of commands is maintained
+ * and will be sent as a single shot. By default the entire set of commands
+ * are considered active only (i.e, will not be cached in wake set, unless
+ * all of them have their corresponding sleep requests). All requests are
+ * sent as slow path requests.
+ *
+ * May sleep. Do not call from atomic contexts.
+ */
+int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
+			struct tcs_cmd *cmd, int n)
+{
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	atomic_t wait_count = ATOMIC_INIT(1);
+	DEFINE_RPMH_MSG_ONSTACK(rc, state, &waitq, &wait_count, rpm_msg);
+	int ret;
+
+	if (IS_ERR_OR_NULL(rc) || !cmd || n <= 0 || n > MAX_RPMH_PAYLOAD)
+		return -EINVAL;
+
+	might_sleep();
+
+	rpm_msg.msg.payload = cmd;
+	rpm_msg.msg.num_payload = n;
+
+	ret = __rpmh_write(rc, state, &rpm_msg);
+	if (ret < 0)
+		return ret;
+
+	ret = wait_event_interruptible(waitq, atomic_read(&wait_count) == 0);
+	if (ret)
+		return ret;
+
+	return rpm_msg.err;
+}
+EXPORT_SYMBOL(rpmh_write);
+
+/**
+ * rpmh_write_passthru: Write multiple batches of RPMH commands without caching
+ *
+ * @rc: The RPMh handle got from rpmh_get_dev_channel
+ * @state: Active/sleep set
+ * @cmd: The payload data
+ * @n: The array of count of elements in each batch, 0 terminated.
+ *
+ * Write a request to the mailbox controller without caching. If the request
+ * state is ACTIVE_ONLY, then the requests are treated as completion requests
+ * and sent to the controller immediately. The function waits until all the
+ * commands are complete. If the request was to SLEEP or WAKE_ONLY, then the
+ * request is sent as fire-n-forget and no ack is expected.
+ *
+ * May sleep. Do not call from atomic contexts for ACTIVE_ONLY requests.
+ */
+int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
+			struct tcs_cmd *cmd, int *n)
+{
+	struct rpmh_msg *rpm_msg[RPMH_MAX_REQ_IN_BATCH];
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	atomic_t wait_count = ATOMIC_INIT(0); /* overwritten */
+	int count = 0;
+	int ret, i = 0;
+
+	while (n[count++])
+		;
+	count--;
+	if (count >= RPMH_MAX_REQ_IN_BATCH)
+		return -EINVAL;
+
+	for (i = 0; i < count; i++) {
+		rpm_msg[i] = __get_rpmh_msg_async(rc, state, cmd, n[i], false);
+		if (IS_ERR_OR_NULL(rpm_msg[i]))
+			return PTR_ERR(rpm_msg[i]);
+		rpm_msg[i]->waitq = &waitq;
+		rpm_msg[i]->wait_count = &wait_count;
+		cmd += n[i];
+	}
+
+	if (state == RPMH_ACTIVE_ONLY_STATE) {
+		might_sleep();
+		atomic_set(&wait_count, count);
+		for (i = 0; i < count; i++) {
+			rpm_msg[i]->msg.is_complete = true;
+			/* Bypass caching and write to mailbox directly */
+			ret = mbox_send_message(rc->chan, &rpm_msg[i]->msg);
+			if (ret < 0)
+				return ret;
+		}
+		return wait_event_interruptible(waitq,
+					atomic_read(&wait_count) == 0);
+	} else {
+		for (i = 0; i < count; i++) {
+			ret = mbox_send_controller_data(rc->chan,
+						&rpm_msg[i]->msg);
+			/* Clean up our call by spoofing tx_done */
+			rpmh_tx_done(&rc->client, &rpm_msg[i]->msg, ret);
+		}
+		return 0;
+	}
+}
+EXPORT_SYMBOL(rpmh_write_passthru);
+
+/**
+ * rpmh_write_control: Write async control commands to the controller
+ *
+ * @rc: The RPMh handle got from rpmh_get_dev_channel
+ * @cmd: The payload data
+ * @n: The number of elements in payload
+ *
+ * Write control commands to the controller. The messages are always sent
+ * async.
+ *
+ * May be called from atomic contexts.
+ */
+int rpmh_write_control(struct rpmh_client *rc, struct tcs_cmd *cmd, int n)
+{
+	DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
+
+	if (IS_ERR_OR_NULL(rc))
+		return -EINVAL;
+
+	rpm_msg.msg.payload = cmd;
+	rpm_msg.msg.num_payload = n;
+	rpm_msg.msg.is_control = true;
+
+	return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
+}
+EXPORT_SYMBOL(rpmh_write_control);
+
+/**
+ * rpmh_invalidate: Invalidate all sleep and active sets
+ * sets.
+ *
+ * @rc: The RPMh handle got from rpmh_get_dev_channel
+ *
+ * Invalidate the sleep and active values in the TCS blocks.
+ * Nothing to do here.
+ */
+int rpmh_invalidate(struct rpmh_client *rc)
+{
+	DEFINE_RPMH_MSG_ONSTACK(rc, 0, NULL, NULL, rpm_msg);
+	struct rpmh_mbox *rpm = rc->rpmh;
+
+	if (IS_ERR_OR_NULL(rc))
+		return -EINVAL;
+
+	rpm_msg.msg.invalidate = true;
+
+	spin_lock(&rpm->lock);
+	rpm->dirty = true;
+	spin_unlock(&rpm->lock);
+
+	return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
+}
+EXPORT_SYMBOL(rpmh_invalidate);
+
+/**
+ * rpmh_read: Read a resource value
+ *
+ * @rc: The RPMh handle got from rpmh_get_dev_channel
+ * @addr: The ePCB address
+ * @resp: The store for the response received from RPMH
+ *
+ * Read a resource value from RPMH.
+ */
+int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp)
+{
+	int ret;
+	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
+	atomic_t wait_count = ATOMIC_INIT(2); /* wait for rx_cb and tx_done */
+	DEFINE_RPMH_MSG_ONSTACK(rc, RPMH_ACTIVE_ONLY_STATE,
+				&waitq, &wait_count, rpm_msg);
+
+	if (IS_ERR_OR_NULL(rc) || !resp)
+		return -EINVAL;
+
+	might_sleep();
+
+	rpm_msg.cmd.addr = addr;
+	rpm_msg.cmd.data = 0;
+
+	rpm_msg.msg.is_read = true;
+
+	ret = mbox_send_message(rc->chan, &rpm_msg.msg);
+	if (ret < 0)
+		return ret;
+
+	/* Wait until the response is received from RPMH */
+	ret = wait_event_interruptible(waitq, atomic_read(&wait_count) == 0);
+	if (ret)
+		return ret;
+
+	/* Read the data back from the tcs_mbox_msg structrure */
+	*resp = rpm_msg.cmd.data;
+
+	return rpm_msg.err;
+}
+EXPORT_SYMBOL(rpmh_read);
+
+int send_single(struct rpmh_client *rc, enum rpmh_state state, u32 addr,
+				u32 data)
+{
+	DEFINE_RPMH_MSG_ONSTACK(rc, state, NULL, NULL, rpm_msg);
+
+	rpm_msg.msg.is_complete = false;
+	rpm_msg.cmd.addr = addr;
+	rpm_msg.cmd.data = data;
+
+	return mbox_send_controller_data(rc->chan, &rpm_msg.msg);
+}
+
+/**
+ * rpmh_flush: Flushes the buffered active and sleep sets to TCS
+ *
+ * @rc: The RPMh handle got from rpmh_get_dev_channel
+ *
+ * This function is generally called from the sleep code from the last CPU
+ * that is powering down the entire system.
+ *
+ * Returns -EBUSY if the controller is busy, probably waiting on a response
+ * to a RPMH request sent earlier.
+ */
+int rpmh_flush(struct rpmh_client *rc)
+{
+	struct rpmh_req *p;
+	struct rpmh_mbox *rpm = rc->rpmh;
+	int ret;
+
+	if (IS_ERR_OR_NULL(rc))
+		return -EINVAL;
+
+	if (!mbox_controller_is_idle(rc->chan))
+		return -EBUSY;
+
+	spin_lock(&rpm->lock);
+	if (!rpm->dirty) {
+		pr_info("Skipping flush, TCS has latest data.\n");
+		spin_unlock(&rpm->lock);
+		return 0;
+	}
+	spin_unlock(&rpm->lock);
+
+	/*
+	 * Nobody else should be calling this function other than sleep,
+	 * hence we can run without locks.
+	 */
+	list_for_each_entry(p, &rc->rpmh->resources, list) {
+		if (p->sleep_val == INT_MAX || p->wake_val == INT_MAX)
+			continue;
+		ret = send_single(rc, RPMH_SLEEP_STATE, p->addr, p->sleep_val);
+		if (ret)
+			return ret;
+		ret = send_single(rc, RPMH_WAKE_ONLY_STATE, p->addr,
+						p->wake_val);
+		if (ret)
+			return ret;
+	}
+
+	spin_lock(&rpm->lock);
+	rpm->dirty = false;
+	spin_unlock(&rpm->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(rpmh_flush);
+
+/**
+ * get_mbox: Get the MBOX controller
+ * @pdev: the platform device
+ * @name: the MBOX name as specified in DT for the device.
+ * @index: the index in the mboxes property if name is not provided.
+ *
+ * Get the MBOX Device node. We will use that to know which
+ * MBOX controller this platform device is intending to talk
+ * to.
+ */
+static struct rpmh_mbox *get_mbox(struct platform_device *pdev,
+			const char *name, int index)
+{
+	int i;
+	struct property *prop;
+	struct of_phandle_args spec;
+	const char *mbox_name;
+	struct rpmh_mbox *rpmh;
+
+	if (index < 0) {
+		if (!name || !name[0])
+			return ERR_PTR(-EINVAL);
+		index = 0;
+		of_property_for_each_string(pdev->dev.of_node,
+				"mbox-names", prop, mbox_name) {
+			if (!strcmp(name, mbox_name))
+				break;
+			index++;
+		}
+	}
+
+	if (of_parse_phandle_with_args(pdev->dev.of_node, "mboxes",
+					"#mbox-cells", index, &spec)) {
+		dev_dbg(&pdev->dev, "%s: can't parse mboxes property\n",
+					__func__);
+		return ERR_PTR(-ENODEV);
+	}
+
+	for (i = 0; i < RPMH_MAX_MBOXES; i++)
+		if (mbox_ctrlr[i].mbox_dn == spec.np) {
+			rpmh = &mbox_ctrlr[i];
+			goto found;
+		}
+
+	/* A new MBOX */
+	for (i = 0; i < RPMH_MAX_MBOXES; i++)
+		if (!mbox_ctrlr[i].mbox_dn)
+			break;
+
+	/* More controllers than expected - not recoverable */
+	WARN_ON(i == RPMH_MAX_MBOXES);
+
+	rpmh = &mbox_ctrlr[i];
+
+	rpmh->msg_pool = kzalloc(sizeof(struct rpmh_msg) *
+				RPMH_MAX_FAST_RES, GFP_KERNEL);
+	if (!rpmh->msg_pool)
+		return ERR_PTR(-ENOMEM);
+
+	rpmh->mbox_dn = spec.np;
+	INIT_LIST_HEAD(&rpmh->resources);
+	spin_lock_init(&rpmh->lock);
+
+found:
+	of_node_put(spec.np);
+
+	return rpmh;
+}
+
+static struct rpmh_client *get_rpmh_client(struct platform_device *pdev,
+				const char *name, int index)
+{
+	struct rpmh_client *rc;
+	int ret = 0;
+
+	rc = kzalloc(sizeof(*rc), GFP_KERNEL);
+	if (!rc)
+		return ERR_PTR(-ENOMEM);
+
+	rc->client.rx_callback = rpmh_rx_cb;
+	rc->client.tx_prepare = NULL;
+	rc->client.tx_done = rpmh_tx_done;
+	rc->client.tx_block = false;
+	rc->client.knows_txdone = false;
+	rc->client.dev = &pdev->dev;
+	rc->dev = &pdev->dev;
+
+	rc->chan = ERR_PTR(-EINVAL);
+
+	/* Initialize by index or name, whichever is present */
+	if (index >= 0)
+		rc->chan = mbox_request_channel(&rc->client, index);
+	else if (name)
+		rc->chan = mbox_request_channel_byname(&rc->client, name);
+
+	if (IS_ERR_OR_NULL(rc->chan)) {
+		ret = PTR_ERR(rc->chan);
+		goto cleanup;
+	}
+
+	mutex_lock(&rpmh_mbox_mutex);
+	rc->rpmh = get_mbox(pdev, name, index);
+	mutex_unlock(&rpmh_mbox_mutex);
+
+	if (IS_ERR(rc->rpmh)) {
+		ret = PTR_ERR(rc->rpmh);
+		mbox_free_channel(rc->chan);
+		goto cleanup;
+	}
+
+	return rc;
+
+cleanup:
+	kfree(rc);
+	return ERR_PTR(ret);
+}
+
+/**
+ * rpmh_get_byname: Get the RPMh handle by mbox name
+ *
+ * @pdev: the platform device which needs to communicate with RPM
+ * accelerators
+ * @name: The mbox-name assigned to the client's mailbox handle
+ *
+ * May sleep.
+ */
+struct rpmh_client *rpmh_get_byname(struct platform_device *pdev,
+				const char *name)
+{
+	return get_rpmh_client(pdev, name, -1);
+}
+EXPORT_SYMBOL(rpmh_get_byname);
+
+/**
+ * rpmh_get_byindex: Get the RPMh handle by mbox index
+ *
+ * @pdev: the platform device which needs to communicate with RPM
+ * accelerators
+ * @index : The index of the mbox tuple as specified in order in DT
+ *
+ * May sleep.
+ */
+struct rpmh_client *rpmh_get_byindex(struct platform_device *pdev,
+				int index)
+{
+	return get_rpmh_client(pdev, NULL, index);
+}
+EXPORT_SYMBOL(rpmh_get_byindex);
+
+/**
+ * rpmh_release: Release the RPMH client
+ *
+ * @rc: The RPMh handle to be freed.
+ */
+void rpmh_release(struct rpmh_client *rc)
+{
+	if (rc && !IS_ERR_OR_NULL(rc->chan))
+		mbox_free_channel(rc->chan);
+
+	kfree(rc);
+}
+EXPORT_SYMBOL(rpmh_release);
diff --git a/drivers/soc/qcom/rq_stats.c b/drivers/soc/qcom/rq_stats.c
new file mode 100644
index 0000000..5850c46
--- /dev/null
+++ b/drivers/soc/qcom/rq_stats.c
@@ -0,0 +1,396 @@
+/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/hrtimer.h>
+#include <linux/cpu.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/notifier.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/rq_stats.h>
+#include <linux/cpufreq.h>
+#include <linux/kernel_stat.h>
+#include <linux/tick.h>
+#include <asm/smp_plat.h>
+#include <linux/suspend.h>
+
+#define MAX_LONG_SIZE 24
+#define DEFAULT_RQ_POLL_JIFFIES 1
+#define DEFAULT_DEF_TIMER_JIFFIES 5
+
+struct notifier_block freq_transition;
+struct notifier_block cpu_hotplug;
+
+struct cpu_load_data {
+	cputime64_t prev_cpu_idle;
+	cputime64_t prev_cpu_wall;
+	unsigned int avg_load_maxfreq;
+	unsigned int samples;
+	unsigned int window_size;
+	unsigned int cur_freq;
+	unsigned int policy_max;
+	cpumask_var_t related_cpus;
+	struct mutex cpu_load_mutex;
+};
+
+static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
+
+
+static int update_average_load(unsigned int freq, unsigned int cpu)
+{
+
+	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
+	cputime64_t cur_wall_time, cur_idle_time;
+	unsigned int idle_time, wall_time;
+	unsigned int cur_load, load_at_max_freq;
+
+	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);
+
+	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
+	pcpu->prev_cpu_wall = cur_wall_time;
+
+	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
+	pcpu->prev_cpu_idle = cur_idle_time;
+
+
+	if (unlikely(wall_time <= 0 || wall_time < idle_time))
+		return 0;
+
+	cur_load = 100 * (wall_time - idle_time) / wall_time;
+
+	/* Calculate the scaled load across CPU */
+	load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
+
+	if (!pcpu->avg_load_maxfreq) {
+		/* This is the first sample in this window*/
+		pcpu->avg_load_maxfreq = load_at_max_freq;
+		pcpu->window_size = wall_time;
+	} else {
+		/*
+		 * The is already a sample available in this window.
+		 * Compute weighted average with prev entry, so that we get
+		 * the precise weighted load.
+		 */
+		pcpu->avg_load_maxfreq =
+			((pcpu->avg_load_maxfreq * pcpu->window_size) +
+			(load_at_max_freq * wall_time)) /
+			(wall_time + pcpu->window_size);
+
+		pcpu->window_size += wall_time;
+	}
+
+	return 0;
+}
+
+static unsigned int report_load_at_max_freq(void)
+{
+	int cpu;
+	struct cpu_load_data *pcpu;
+	unsigned int total_load = 0;
+
+	for_each_online_cpu(cpu) {
+		pcpu = &per_cpu(cpuload, cpu);
+		mutex_lock(&pcpu->cpu_load_mutex);
+		update_average_load(pcpu->cur_freq, cpu);
+		total_load += pcpu->avg_load_maxfreq;
+		pcpu->avg_load_maxfreq = 0;
+		mutex_unlock(&pcpu->cpu_load_mutex);
+	}
+	return total_load;
+}
+
+static int cpufreq_transition_handler(struct notifier_block *nb,
+			unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freqs = data;
+	struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
+	int j;
+
+	switch (val) {
+	case CPUFREQ_POSTCHANGE:
+		for_each_cpu(j, this_cpu->related_cpus) {
+			struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
+
+			mutex_lock(&pcpu->cpu_load_mutex);
+			update_average_load(freqs->old, j);
+			pcpu->cur_freq = freqs->new;
+			mutex_unlock(&pcpu->cpu_load_mutex);
+		}
+		break;
+	}
+	return 0;
+}
+
+static void update_related_cpus(void)
+{
+	unsigned int cpu;
+
+	for_each_cpu(cpu, cpu_online_mask) {
+		struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+		struct cpufreq_policy cpu_policy;
+
+		cpufreq_get_policy(&cpu_policy, cpu);
+		cpumask_copy(this_cpu->related_cpus, cpu_policy.cpus);
+	}
+}
+static int cpu_hotplug_handler(struct notifier_block *nb,
+			unsigned long val, void *data)
+{
+	unsigned int cpu = (unsigned long)data;
+	struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
+
+	switch (val) {
+	case CPU_ONLINE:
+		if (!this_cpu->cur_freq)
+			this_cpu->cur_freq = cpufreq_quick_get(cpu);
+		update_related_cpus();
+		/* fall through */
+	case CPU_ONLINE_FROZEN:
+		this_cpu->avg_load_maxfreq = 0;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int system_suspend_handler(struct notifier_block *nb,
+				unsigned long val, void *data)
+{
+	switch (val) {
+	case PM_POST_HIBERNATION:
+	case PM_POST_SUSPEND:
+	case PM_POST_RESTORE:
+		rq_info.hotplug_disabled = 0;
+		break;
+	case PM_HIBERNATION_PREPARE:
+	case PM_SUSPEND_PREPARE:
+		rq_info.hotplug_disabled = 1;
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+	return NOTIFY_OK;
+}
+
+
+static ssize_t hotplug_disable_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	unsigned int val = rq_info.hotplug_disabled;
+
+	return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
+}
+
+static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
+
+static void def_work_fn(struct work_struct *work)
+{
+	/* Notify polling threads on change of value */
+	sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
+}
+
+static ssize_t run_queue_avg_show(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	unsigned int val = 0;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&rq_lock, flags);
+	/* rq avg currently available only on one core */
+	val = rq_info.rq_avg;
+	rq_info.rq_avg = 0;
+	spin_unlock_irqrestore(&rq_lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
+}
+
+static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
+
+static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
+				      struct kobj_attribute *attr, char *buf)
+{
+	int ret = 0;
+	unsigned long flags = 0;
+
+	spin_lock_irqsave(&rq_lock, flags);
+	ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
+		       jiffies_to_msecs(rq_info.rq_poll_jiffies));
+	spin_unlock_irqrestore(&rq_lock, flags);
+
+	return ret;
+}
+
+static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
+				       struct kobj_attribute *attr,
+				       const char *buf, size_t count)
+{
+	unsigned int val = 0;
+	unsigned long flags = 0;
+	static DEFINE_MUTEX(lock_poll_ms);
+
+	mutex_lock(&lock_poll_ms);
+
+	spin_lock_irqsave(&rq_lock, flags);
+	if (kstrtouint(buf, 0, &val))
+		count = -EINVAL;
+	else
+		rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
+	spin_unlock_irqrestore(&rq_lock, flags);
+
+	mutex_unlock(&lock_poll_ms);
+
+	return count;
+}
+
+static struct kobj_attribute run_queue_poll_ms_attr =
+	__ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
+			store_run_queue_poll_ms);
+
+static ssize_t show_def_timer_ms(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	int64_t diff;
+	unsigned int udiff;
+
+	diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
+	do_div(diff, 1000 * 1000);
+	udiff = (unsigned int) diff;
+
+	return snprintf(buf, MAX_LONG_SIZE, "%u\n", udiff);
+}
+
+static ssize_t store_def_timer_ms(struct kobject *kobj,
+		struct kobj_attribute *attr, const char *buf, size_t count)
+{
+	unsigned int val = 0;
+
+	if (kstrtouint(buf, 0, &val))
+		return -EINVAL;
+
+	rq_info.def_timer_jiffies = msecs_to_jiffies(val);
+
+	rq_info.def_start_time = ktime_to_ns(ktime_get());
+	return count;
+}
+
+static struct kobj_attribute def_timer_ms_attr =
+	__ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
+			store_def_timer_ms);
+
+static ssize_t show_cpu_normalized_load(struct kobject *kobj,
+		struct kobj_attribute *attr, char *buf)
+{
+	return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
+}
+
+static struct kobj_attribute cpu_normalized_load_attr =
+	__ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
+			NULL);
+
+static struct attribute *rq_attrs[] = {
+	&cpu_normalized_load_attr.attr,
+	&def_timer_ms_attr.attr,
+	&run_queue_avg_attr.attr,
+	&run_queue_poll_ms_attr.attr,
+	&hotplug_disabled_attr.attr,
+	NULL,
+};
+
+static struct attribute_group rq_attr_group = {
+	.attrs = rq_attrs,
+};
+
+static int init_rq_attribs(void)
+{
+	int err;
+
+	rq_info.rq_avg = 0;
+	rq_info.attr_group = &rq_attr_group;
+
+	/* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
+	rq_info.kobj = kobject_create_and_add("rq-stats",
+			&get_cpu_device(0)->kobj);
+	if (!rq_info.kobj)
+		return -ENOMEM;
+
+	err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
+	if (err)
+		kobject_put(rq_info.kobj);
+	else
+		kobject_uevent(rq_info.kobj, KOBJ_ADD);
+
+	return err;
+}
+
+static int __init msm_rq_stats_init(void)
+{
+	int ret;
+	int i;
+	struct cpufreq_policy cpu_policy;
+
+#ifndef CONFIG_SMP
+	/* Bail out if this is not an SMP Target */
+	rq_info.init = 0;
+	return -EPERM;
+#endif
+
+	rq_wq = create_singlethread_workqueue("rq_stats");
+	BUG_ON(!rq_wq);
+	INIT_WORK(&rq_info.def_timer_work, def_work_fn);
+	spin_lock_init(&rq_lock);
+	rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
+	rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
+	rq_info.rq_poll_last_jiffy = 0;
+	rq_info.def_timer_last_jiffy = 0;
+	rq_info.hotplug_disabled = 0;
+	ret = init_rq_attribs();
+
+	rq_info.init = 1;
+
+	for_each_possible_cpu(i) {
+		struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
+
+		mutex_init(&pcpu->cpu_load_mutex);
+		cpufreq_get_policy(&cpu_policy, i);
+		pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
+		if (cpu_online(i))
+			pcpu->cur_freq = cpufreq_quick_get(i);
+		cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
+	}
+	freq_transition.notifier_call = cpufreq_transition_handler;
+	cpu_hotplug.notifier_call = cpu_hotplug_handler;
+	cpufreq_register_notifier(&freq_transition,
+					CPUFREQ_TRANSITION_NOTIFIER);
+	register_hotcpu_notifier(&cpu_hotplug);
+
+	return ret;
+}
+late_initcall(msm_rq_stats_init);
+
+static int __init msm_rq_stats_early_init(void)
+{
+#ifndef CONFIG_SMP
+	/* Bail out if this is not an SMP Target */
+	rq_info.init = 0;
+	return -EPERM;
+#endif
+
+	pm_notifier(system_suspend_handler, 0);
+	return 0;
+}
+core_initcall(msm_rq_stats_early_init);
diff --git a/drivers/soc/qcom/scm-boot.c b/drivers/soc/qcom/scm-boot.c
new file mode 100644
index 0000000..369fb27
--- /dev/null
+++ b/drivers/soc/qcom/scm-boot.c
@@ -0,0 +1,111 @@
+/* Copyright (c) 2010, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/scm-boot.h>
+
+/*
+ * Set the cold/warm boot address for one of the CPU cores.
+ */
+int scm_set_boot_addr(phys_addr_t addr, unsigned int flags)
+{
+	struct {
+		u32 flags;
+		u32 addr;
+	} cmd;
+
+	cmd.addr = addr;
+	cmd.flags = flags;
+	return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR,
+			&cmd, sizeof(cmd), NULL, 0);
+}
+EXPORT_SYMBOL(scm_set_boot_addr);
+
+/**
+ *	scm_set_boot_addr_mc - Set entry physical address for cpus
+ *	@addr:	32bit physical address
+ *	@aff0:	Collective bitmask of the affinity-level-0 of the mpidr
+ *		1<<aff0_CPU0| 1<<aff0_CPU1....... | 1<<aff0_CPU32
+ *		Supports maximum 32 cpus under any affinity level.
+ *	@aff1:	Collective bitmask of the affinity-level-1 of the mpidr
+ *	@aff2:	Collective bitmask of the affinity-level-2 of the mpidr
+ *	@flags:	Flag to differentiate between coldboot vs warmboot
+ */
+int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
+		u32 aff1, u32 aff2, u32 flags)
+{
+	struct {
+		u32 addr;
+		u32 aff0;
+		u32 aff1;
+		u32 aff2;
+		u32 reserved;
+		u32 flags;
+	} cmd;
+	struct scm_desc desc = {0};
+
+	if (!is_scm_armv8()) {
+		cmd.addr = addr;
+		cmd.aff0 = aff0;
+		cmd.aff1 = aff1;
+		cmd.aff2 = aff2;
+		/*
+		 * Reserved for future chips with affinity level 3 effectively
+		 * 1 << 0
+		 */
+		cmd.reserved = ~0U;
+		cmd.flags = flags | SCM_FLAG_HLOS;
+		return scm_call(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC,
+				&cmd, sizeof(cmd), NULL, 0);
+	}
+
+	flags = flags | SCM_FLAG_HLOS;
+	desc.args[0] = addr;
+	desc.args[1] = aff0;
+	desc.args[2] = aff1;
+	desc.args[3] = aff2;
+	desc.args[4] = ~0ULL;
+	desc.args[5] = flags;
+	desc.arginfo = SCM_ARGS(6);
+
+	return scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC), &desc);
+}
+EXPORT_SYMBOL(scm_set_boot_addr_mc);
+
+/**
+ *	scm_set_warm_boot_addr_mc_for_all -
+ *	Set entry physical address for __all__ possible cpus
+ *	This API passes all_set mask to secure-os and relies
+ *	on secure-os to appropriately
+ *	set the boot-address on the current system.
+ *	@addr:	32bit physical address
+ */
+
+int scm_set_warm_boot_addr_mc_for_all(phys_addr_t addr)
+{
+	return scm_set_boot_addr_mc(addr, ~0U, ~0U, ~0U,
+			SCM_FLAG_WARMBOOT_MC);
+}
+EXPORT_SYMBOL(scm_set_warm_boot_addr_mc_for_all);
+
+/**
+ *	scm_is_mc_boot_available -
+ *	Checks if TZ supports the boot API for multi-cluster configuration
+ *	Returns true if available and false otherwise
+ */
+int scm_is_mc_boot_available(void)
+{
+	return scm_is_call_available(SCM_SVC_BOOT, SCM_BOOT_ADDR_MC);
+}
+EXPORT_SYMBOL(scm_is_mc_boot_available);
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
new file mode 100644
index 0000000..714c848
--- /dev/null
+++ b/drivers/soc/qcom/scm.c
@@ -0,0 +1,1235 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/cacheflush.h>
+#include <asm/compiler.h>
+
+#include <soc/qcom/scm.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/scm.h>
+
+#define SCM_ENOMEM		-5
+#define SCM_EOPNOTSUPP		-4
+#define SCM_EINVAL_ADDR		-3
+#define SCM_EINVAL_ARG		-2
+#define SCM_ERROR		-1
+#define SCM_INTERRUPTED		1
+#define SCM_EBUSY		-55
+#define SCM_V2_EBUSY		-12
+
+static DEFINE_MUTEX(scm_lock);
+
+/*
+ * MSM8996 V2 requires a lock to protect against
+ * concurrent accesses between the limits management
+ * driver and the clock controller
+ */
+DEFINE_MUTEX(scm_lmh_lock);
+
+#define SCM_EBUSY_WAIT_MS 30
+#define SCM_EBUSY_MAX_RETRY 67
+
+#define N_EXT_SCM_ARGS 7
+#define FIRST_EXT_ARG_IDX 3
+#define SMC_ATOMIC_SYSCALL 31
+#define N_REGISTER_ARGS (MAX_SCM_ARGS - N_EXT_SCM_ARGS + 1)
+#define SMC64_MASK 0x40000000
+#define SMC_ATOMIC_MASK 0x80000000
+#define IS_CALL_AVAIL_CMD 1
+
+#define SCM_BUF_LEN(__cmd_size, __resp_size)	\
+	(sizeof(struct scm_command) + sizeof(struct scm_response) + \
+		__cmd_size + __resp_size)
+/**
+ * struct scm_command - one SCM command buffer
+ * @len: total available memory for command and response
+ * @buf_offset: start of command buffer
+ * @resp_hdr_offset: start of response buffer
+ * @id: command to be executed
+ * @buf: buffer returned from scm_get_command_buffer()
+ *
+ * An SCM command is laid out in memory as follows:
+ *
+ *	------------------- <--- struct scm_command
+ *	| command header  |
+ *	------------------- <--- scm_get_command_buffer()
+ *	| command buffer  |
+ *	------------------- <--- struct scm_response and
+ *	| response header |      scm_command_to_response()
+ *	------------------- <--- scm_get_response_buffer()
+ *	| response buffer |
+ *	-------------------
+ *
+ * There can be arbitrary padding between the headers and buffers so
+ * you should always use the appropriate scm_get_*_buffer() routines
+ * to access the buffers in a safe manner.
+ */
+struct scm_command {
+	u32	len;
+	u32	buf_offset;
+	u32	resp_hdr_offset;
+	u32	id;
+	u32	buf[0];
+};
+
+/**
+ * struct scm_response - one SCM response buffer
+ * @len: total available memory for response
+ * @buf_offset: start of response data relative to start of scm_response
+ * @is_complete: indicates if the command has finished processing
+ */
+struct scm_response {
+	u32	len;
+	u32	buf_offset;
+	u32	is_complete;
+};
+
+#ifdef CONFIG_ARM64
+
+#define R0_STR "x0"
+#define R1_STR "x1"
+#define R2_STR "x2"
+#define R3_STR "x3"
+#define R4_STR "x4"
+#define R5_STR "x5"
+#define R6_STR "x6"
+
+/* Outer caches unsupported on ARM64 platforms */
+#define outer_inv_range(x, y)
+#define outer_flush_range(x, y)
+
+#define __cpuc_flush_dcache_area __flush_dcache_area
+
+#else
+
+#define R0_STR "r0"
+#define R1_STR "r1"
+#define R2_STR "r2"
+#define R3_STR "r3"
+#define R4_STR "r4"
+#define R5_STR "r5"
+
+#endif
+
+/**
+ * scm_command_to_response() - Get a pointer to a scm_response
+ * @cmd: command
+ *
+ * Returns a pointer to a response for a command.
+ */
+static inline struct scm_response *scm_command_to_response(
+		const struct scm_command *cmd)
+{
+	return (void *)cmd + cmd->resp_hdr_offset;
+}
+
+/**
+ * scm_get_command_buffer() - Get a pointer to a command buffer
+ * @cmd: command
+ *
+ * Returns a pointer to the command buffer of a command.
+ */
+static inline void *scm_get_command_buffer(const struct scm_command *cmd)
+{
+	return (void *)cmd->buf;
+}
+
+/**
+ * scm_get_response_buffer() - Get a pointer to a response buffer
+ * @rsp: response
+ *
+ * Returns a pointer to a response buffer of a response.
+ */
+static inline void *scm_get_response_buffer(const struct scm_response *rsp)
+{
+	return (void *)rsp + rsp->buf_offset;
+}
+
+static int scm_remap_error(int err)
+{
+	switch (err) {
+	case SCM_ERROR:
+		return -EIO;
+	case SCM_EINVAL_ADDR:
+	case SCM_EINVAL_ARG:
+		return -EINVAL;
+	case SCM_EOPNOTSUPP:
+		return -EOPNOTSUPP;
+	case SCM_ENOMEM:
+		return -ENOMEM;
+	case SCM_EBUSY:
+		return SCM_EBUSY;
+	case SCM_V2_EBUSY:
+		return SCM_V2_EBUSY;
+	}
+	return -EINVAL;
+}
+
+static u32 smc(u32 cmd_addr)
+{
+	int context_id;
+	register u32 r0 asm("r0") = 1;
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = cmd_addr;
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R0_STR)
+			__asmeq("%2", R1_STR)
+			__asmeq("%3", R2_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0)
+			: "r" (r0), "r" (r1), "r" (r2)
+			: "r3");
+	} while (r0 == SCM_INTERRUPTED);
+
+	return r0;
+}
+
+static int __scm_call(const struct scm_command *cmd)
+{
+	int ret;
+	u32 cmd_addr = virt_to_phys(cmd);
+
+	/*
+	 * Flush the command buffer so that the secure world sees
+	 * the correct data.
+	 */
+	__cpuc_flush_dcache_area((void *)cmd, cmd->len);
+	outer_flush_range(cmd_addr, cmd_addr + cmd->len);
+
+	ret = smc(cmd_addr);
+	if (ret < 0) {
+		if (ret != SCM_EBUSY)
+			pr_err("scm_call failed with error code %d\n", ret);
+		ret = scm_remap_error(ret);
+	}
+	return ret;
+}
+
+#ifndef CONFIG_ARM64
+static void scm_inv_range(unsigned long start, unsigned long end)
+{
+	u32 cacheline_size, ctr;
+
+	asm volatile("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
+	cacheline_size = 4 << ((ctr >> 16) & 0xf);
+
+	start = round_down(start, cacheline_size);
+	end = round_up(end, cacheline_size);
+	outer_inv_range(start, end);
+	while (start < end) {
+		asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (start)
+		     : "memory");
+		start += cacheline_size;
+	}
+	dsb();
+	isb();
+}
+#else
+
+static void scm_inv_range(unsigned long start, unsigned long end)
+{
+	dmac_inv_range((void *)start, (void *)end);
+}
+#endif
+
+/**
+ * scm_call_common() - Send an SCM command
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @cmd_buf: command buffer
+ * @cmd_len: length of the command buffer
+ * @resp_buf: response buffer
+ * @resp_len: length of the response buffer
+ * @scm_buf: internal scm structure used for passing data
+ * @scm_buf_len: length of the internal scm structure
+ *
+ * Core function to scm call. Initializes the given cmd structure with
+ * appropriate values and makes the actual scm call. Validation of cmd
+ * pointer and length must occur in the calling function.
+ *
+ * Returns the appropriate error code from the scm call
+ */
+
+static int scm_call_common(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+				size_t cmd_len, void *resp_buf, size_t resp_len,
+				struct scm_command *scm_buf,
+				size_t scm_buf_length)
+{
+	int ret;
+	struct scm_response *rsp;
+	unsigned long start, end;
+
+	scm_buf->len = scm_buf_length;
+	scm_buf->buf_offset = offsetof(struct scm_command, buf);
+	scm_buf->resp_hdr_offset = scm_buf->buf_offset + cmd_len;
+	scm_buf->id = (svc_id << 10) | cmd_id;
+
+	if (cmd_buf)
+		memcpy(scm_get_command_buffer(scm_buf), cmd_buf, cmd_len);
+
+	mutex_lock(&scm_lock);
+	ret = __scm_call(scm_buf);
+	mutex_unlock(&scm_lock);
+	if (ret)
+		return ret;
+
+	rsp = scm_command_to_response(scm_buf);
+	start = (unsigned long)rsp;
+
+	do {
+		scm_inv_range(start, start + sizeof(*rsp));
+	} while (!rsp->is_complete);
+
+	end = (unsigned long)scm_get_response_buffer(rsp) + resp_len;
+	scm_inv_range(start, end);
+
+	if (resp_buf)
+		memcpy(resp_buf, scm_get_response_buffer(rsp), resp_len);
+
+	return ret;
+}
+
+/*
+ * Sometimes the secure world may be busy waiting for a particular resource.
+ * In those situations, it is expected that the secure world returns a special
+ * error code (SCM_EBUSY). Retry any scm_call that fails with this error code,
+ * but with a timeout in place. Also, don't move this into scm_call_common,
+ * since we want the first attempt to be the "fastpath".
+ */
+static int _scm_call_retry(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+				size_t cmd_len, void *resp_buf, size_t resp_len,
+				struct scm_command *cmd,
+				size_t len)
+{
+	int ret, retry_count = 0;
+
+	do {
+		ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len,
+					resp_buf, resp_len, cmd, len);
+		if (ret == SCM_EBUSY)
+			msleep(SCM_EBUSY_WAIT_MS);
+		if (retry_count == 33)
+			pr_warn("scm: secure world has been busy for 1 second!\n");
+	} while (ret == SCM_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
+
+	if (ret == SCM_EBUSY)
+		pr_err("scm: secure world busy (rc = SCM_EBUSY)\n");
+
+	return ret;
+}
+
+/**
+ * scm_call_noalloc - Send an SCM command
+ *
+ * Same as scm_call except clients pass in a buffer (@scm_buf) to be used for
+ * scm internal structures. The buffer should be allocated with
+ * DEFINE_SCM_BUFFER to account for the proper alignment and size.
+ */
+int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len,
+		void *scm_buf, size_t scm_buf_len)
+{
+	int ret;
+	size_t len = SCM_BUF_LEN(cmd_len, resp_len);
+
+	if (cmd_len > scm_buf_len || resp_len > scm_buf_len ||
+	    len > scm_buf_len)
+		return -EINVAL;
+
+	if (!IS_ALIGNED((unsigned long)scm_buf, PAGE_SIZE))
+		return -EINVAL;
+
+	memset(scm_buf, 0, scm_buf_len);
+
+	ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
+				resp_len, scm_buf, len);
+	return ret;
+
+}
+
+#ifdef CONFIG_ARM64
+
+static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
+				u64 *ret1, u64 *ret2, u64 *ret3)
+{
+	register u64 r0 asm("r0") = x0;
+	register u64 r1 asm("r1") = x1;
+	register u64 r2 asm("r2") = x2;
+	register u64 r3 asm("r3") = x3;
+	register u64 r4 asm("r4") = x4;
+	register u64 r5 asm("r5") = x5;
+	register u64 r6 asm("r6") = 0;
+
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R1_STR)
+			__asmeq("%2", R2_STR)
+			__asmeq("%3", R3_STR)
+			__asmeq("%4", R0_STR)
+			__asmeq("%5", R1_STR)
+			__asmeq("%6", R2_STR)
+			__asmeq("%7", R3_STR)
+			__asmeq("%8", R4_STR)
+			__asmeq("%9", R5_STR)
+			__asmeq("%10", R6_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+			: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+			  "r" (r5), "r" (r6)
+			: "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+			  "x14", "x15", "x16", "x17");
+	} while (r0 == SCM_INTERRUPTED);
+
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	if (ret3)
+		*ret3 = r3;
+
+	return r0;
+}
+
+static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
+				u64 *ret1, u64 *ret2, u64 *ret3)
+{
+	register u32 r0 asm("r0") = w0;
+	register u32 r1 asm("r1") = w1;
+	register u32 r2 asm("r2") = w2;
+	register u32 r3 asm("r3") = w3;
+	register u32 r4 asm("r4") = w4;
+	register u32 r5 asm("r5") = w5;
+	register u32 r6 asm("r6") = 0;
+
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R1_STR)
+			__asmeq("%2", R2_STR)
+			__asmeq("%3", R3_STR)
+			__asmeq("%4", R0_STR)
+			__asmeq("%5", R1_STR)
+			__asmeq("%6", R2_STR)
+			__asmeq("%7", R3_STR)
+			__asmeq("%8", R4_STR)
+			__asmeq("%9", R5_STR)
+			__asmeq("%10", R6_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+			: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+			  "r" (r5), "r" (r6)
+			: "x7", "x8", "x9", "x10", "x11", "x12", "x13",
+			"x14", "x15", "x16", "x17");
+
+	} while (r0 == SCM_INTERRUPTED);
+
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	if (ret3)
+		*ret3 = r3;
+
+	return r0;
+}
+
+#else
+
+static int __scm_call_armv8_32(u32 w0, u32 w1, u32 w2, u32 w3, u32 w4, u32 w5,
+				u64 *ret1, u64 *ret2, u64 *ret3)
+{
+	register u32 r0 asm("r0") = w0;
+	register u32 r1 asm("r1") = w1;
+	register u32 r2 asm("r2") = w2;
+	register u32 r3 asm("r3") = w3;
+	register u32 r4 asm("r4") = w4;
+	register u32 r5 asm("r5") = w5;
+
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R1_STR)
+			__asmeq("%2", R2_STR)
+			__asmeq("%3", R3_STR)
+			__asmeq("%4", R0_STR)
+			__asmeq("%5", R1_STR)
+			__asmeq("%6", R2_STR)
+			__asmeq("%7", R3_STR)
+			__asmeq("%8", R4_STR)
+			__asmeq("%9", R5_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+			: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
+			 "r" (r5));
+
+	} while (r0 == SCM_INTERRUPTED);
+
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	if (ret3)
+		*ret3 = r3;
+
+	return r0;
+}
+
+static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
+				u64 *ret1, u64 *ret2, u64 *ret3)
+{
+	return 0;
+}
+#endif
+
+struct scm_extra_arg {
+	union {
+		u32 args32[N_EXT_SCM_ARGS];
+		u64 args64[N_EXT_SCM_ARGS];
+	};
+};
+
+static enum scm_interface_version {
+	SCM_UNKNOWN,
+	SCM_LEGACY,
+	SCM_ARMV8_32,
+	SCM_ARMV8_64,
+} scm_version = SCM_UNKNOWN;
+
+/* This will be set to specify SMC32 or SMC64 */
+static u32 scm_version_mask;
+
+bool is_scm_armv8(void)
+{
+	int ret;
+	u64 ret1, x0;
+
+	if (likely(scm_version != SCM_UNKNOWN))
+		return (scm_version == SCM_ARMV8_32) ||
+			(scm_version == SCM_ARMV8_64);
+	/*
+	 * This is a one time check that runs on the first ever
+	 * invocation of is_scm_armv8. We might be called in atomic
+	 * context so no mutexes etc. Also, we can't use the scm_call2
+	 * or scm_call2_APIs directly since they depend on this init.
+	 */
+
+	/* First try a SMC64 call */
+	scm_version = SCM_ARMV8_64;
+	ret1 = 0;
+	x0 = SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD) | SMC_ATOMIC_MASK;
+	ret = __scm_call_armv8_64(x0 | SMC64_MASK, SCM_ARGS(1), x0, 0, 0, 0,
+				  &ret1, NULL, NULL);
+	if (ret || !ret1) {
+		/* Try SMC32 call */
+		ret1 = 0;
+		ret = __scm_call_armv8_32(x0, SCM_ARGS(1), x0, 0, 0, 0,
+					  &ret1, NULL, NULL);
+		if (ret || !ret1)
+			scm_version = SCM_LEGACY;
+		else
+			scm_version = SCM_ARMV8_32;
+	} else
+		scm_version_mask = SMC64_MASK;
+
+	pr_debug("scm_call: scm version is %x, mask is %x\n", scm_version,
+		  scm_version_mask);
+
+	return (scm_version == SCM_ARMV8_32) ||
+			(scm_version == SCM_ARMV8_64);
+}
+EXPORT_SYMBOL(is_scm_armv8);
+
+/*
+ * If there are more than N_REGISTER_ARGS, allocate a buffer and place
+ * the additional arguments in it. The extra argument buffer will be
+ * pointed to by X5.
+ */
+static int allocate_extra_arg_buffer(struct scm_desc *desc, gfp_t flags)
+{
+	int i, j;
+	struct scm_extra_arg *argbuf;
+	int arglen = desc->arginfo & 0xf;
+	size_t argbuflen = PAGE_ALIGN(sizeof(struct scm_extra_arg));
+
+	desc->x5 = desc->args[FIRST_EXT_ARG_IDX];
+
+	if (likely(arglen <= N_REGISTER_ARGS)) {
+		desc->extra_arg_buf = NULL;
+		return 0;
+	}
+
+	argbuf = kzalloc(argbuflen, flags);
+	if (!argbuf) {
+		pr_err("scm_call: failed to alloc mem for extended argument buffer\n");
+		return -ENOMEM;
+	}
+
+	desc->extra_arg_buf = argbuf;
+
+	j = FIRST_EXT_ARG_IDX;
+	if (scm_version == SCM_ARMV8_64)
+		for (i = 0; i < N_EXT_SCM_ARGS; i++)
+			argbuf->args64[i] = desc->args[j++];
+	else
+		for (i = 0; i < N_EXT_SCM_ARGS; i++)
+			argbuf->args32[i] = desc->args[j++];
+	desc->x5 = virt_to_phys(argbuf);
+	__cpuc_flush_dcache_area(argbuf, argbuflen);
+	outer_flush_range(virt_to_phys(argbuf),
+			  virt_to_phys(argbuf) + argbuflen);
+
+	return 0;
+}
+
+/**
+ * scm_call2() - Invoke a syscall in the secure world
+ * @fn_id: The function ID for this syscall
+ * @desc: Descriptor structure containing arguments and return values
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ * This should *only* be called in pre-emptible context.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking scm_call and invalidated in the cache
+ * immediately after scm_call returns. An important point that must be noted
+ * is that on ARMV8 architectures, invalidation actually also causes a dirty
+ * cache line to be cleaned (flushed + unset-dirty-bit). Therefore it is of
+ * paramount importance that the buffer be flushed before invoking scm_call2,
+ * even if you don't care about the contents of that buffer.
+ *
+ * Note that cache maintenance on the argument buffer (desc->args) is taken care
+ * of by scm_call2; however, callers are responsible for any other cached
+ * buffers passed over to the secure world.
+*/
+int scm_call2(u32 fn_id, struct scm_desc *desc)
+{
+	int arglen = desc->arginfo & 0xf;
+	int ret, retry_count = 0;
+	u64 x0;
+
+	if (unlikely(!is_scm_armv8()))
+		return -ENODEV;
+
+	ret = allocate_extra_arg_buffer(desc, GFP_KERNEL);
+	if (ret)
+		return ret;
+
+	x0 = fn_id | scm_version_mask;
+
+	do {
+		mutex_lock(&scm_lock);
+
+		if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH)
+			mutex_lock(&scm_lmh_lock);
+
+		desc->ret[0] = desc->ret[1] = desc->ret[2] = 0;
+
+		pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
+			x0, desc->arginfo, desc->args[0], desc->args[1],
+			desc->args[2], desc->x5);
+
+		trace_scm_call_start(x0, desc);
+
+		if (scm_version == SCM_ARMV8_64)
+			ret = __scm_call_armv8_64(x0, desc->arginfo,
+						  desc->args[0], desc->args[1],
+						  desc->args[2], desc->x5,
+						  &desc->ret[0], &desc->ret[1],
+						  &desc->ret[2]);
+		else
+			ret = __scm_call_armv8_32(x0, desc->arginfo,
+						  desc->args[0], desc->args[1],
+						  desc->args[2], desc->x5,
+						  &desc->ret[0], &desc->ret[1],
+						  &desc->ret[2]);
+
+		trace_scm_call_end(desc);
+
+		if (SCM_SVC_ID(fn_id) == SCM_SVC_LMH)
+			mutex_unlock(&scm_lmh_lock);
+
+		mutex_unlock(&scm_lock);
+
+		if (ret == SCM_V2_EBUSY)
+			msleep(SCM_EBUSY_WAIT_MS);
+		if (retry_count == 33)
+			pr_warn("scm: secure world has been busy for 1 second!\n");
+	}  while (ret == SCM_V2_EBUSY && (retry_count++ < SCM_EBUSY_MAX_RETRY));
+
+	if (ret < 0)
+		pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+			x0, desc->arginfo, desc->args[0], desc->args[1],
+			desc->args[2], desc->x5, ret, desc->ret[0],
+			desc->ret[1], desc->ret[2]);
+
+	if (arglen > N_REGISTER_ARGS)
+		kfree(desc->extra_arg_buf);
+	if (ret < 0)
+		return scm_remap_error(ret);
+	return 0;
+}
+EXPORT_SYMBOL(scm_call2);
+
+/**
+ * scm_call2_atomic() - Invoke a syscall in the secure world
+ *
+ * Similar to scm_call2 except that this can be invoked in atomic context.
+ * There is also no retry mechanism implemented. Please ensure that the
+ * secure world syscall can be executed in such a context and can complete
+ * in a timely manner.
+ */
+int scm_call2_atomic(u32 fn_id, struct scm_desc *desc)
+{
+	int arglen = desc->arginfo & 0xf;
+	int ret;
+	u64 x0;
+
+	if (unlikely(!is_scm_armv8()))
+		return -ENODEV;
+
+	ret = allocate_extra_arg_buffer(desc, GFP_ATOMIC);
+	if (ret)
+		return ret;
+
+	x0 = fn_id | BIT(SMC_ATOMIC_SYSCALL) | scm_version_mask;
+
+	pr_debug("scm_call: func id %#llx, args: %#x, %#llx, %#llx, %#llx, %#llx\n",
+		x0, desc->arginfo, desc->args[0], desc->args[1],
+		desc->args[2], desc->x5);
+
+	if (scm_version == SCM_ARMV8_64)
+		ret = __scm_call_armv8_64(x0, desc->arginfo, desc->args[0],
+					  desc->args[1], desc->args[2],
+					  desc->x5, &desc->ret[0],
+					  &desc->ret[1], &desc->ret[2]);
+	else
+		ret = __scm_call_armv8_32(x0, desc->arginfo, desc->args[0],
+					  desc->args[1], desc->args[2],
+					  desc->x5, &desc->ret[0],
+					  &desc->ret[1], &desc->ret[2]);
+	if (ret < 0)
+		pr_err("scm_call failed: func id %#llx, arginfo: %#x, args: %#llx, %#llx, %#llx, %#llx, ret: %d, syscall returns: %#llx, %#llx, %#llx\n",
+			x0, desc->arginfo, desc->args[0], desc->args[1],
+			desc->args[2], desc->x5, ret, desc->ret[0],
+			desc->ret[1], desc->ret[2]);
+
+	if (arglen > N_REGISTER_ARGS)
+		kfree(desc->extra_arg_buf);
+	if (ret < 0)
+		return scm_remap_error(ret);
+	return ret;
+}
+
+/**
+ * scm_call() - Send an SCM command
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @cmd_buf: command buffer
+ * @cmd_len: length of the command buffer
+ * @resp_buf: response buffer
+ * @resp_len: length of the response buffer
+ *
+ * Sends a command to the SCM and waits for the command to finish processing.
+ *
+ * A note on cache maintenance:
+ * Note that any buffers that are expected to be accessed by the secure world
+ * must be flushed before invoking scm_call and invalidated in the cache
+ * immediately after scm_call returns. Cache maintenance on the command and
+ * response buffers is taken care of by scm_call; however, callers are
+ * responsible for any other cached buffers passed over to the secure world.
+ */
+int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+		void *resp_buf, size_t resp_len)
+{
+	struct scm_command *cmd;
+	int ret;
+	size_t len = SCM_BUF_LEN(cmd_len, resp_len);
+
+	if (cmd_len > len || resp_len > len)
+		return -EINVAL;
+
+	cmd = kzalloc(PAGE_ALIGN(len), GFP_KERNEL);
+	if (!cmd)
+		return -ENOMEM;
+
+	ret = scm_call_common(svc_id, cmd_id, cmd_buf, cmd_len, resp_buf,
+				resp_len, cmd, len);
+	if (unlikely(ret == SCM_EBUSY))
+		ret = _scm_call_retry(svc_id, cmd_id, cmd_buf, cmd_len,
+				      resp_buf, resp_len, cmd, PAGE_ALIGN(len));
+	kfree(cmd);
+	return ret;
+}
+EXPORT_SYMBOL(scm_call);
+
+#define SCM_CLASS_REGISTER	(0x2 << 8)
+#define SCM_MASK_IRQS		BIT(5)
+#define SCM_ATOMIC(svc, cmd, n) (((((svc) << 10)|((cmd) & 0x3ff)) << 12) | \
+				SCM_CLASS_REGISTER | \
+				SCM_MASK_IRQS | \
+				(n & 0xf))
+
+/**
+ * scm_call_atomic1() - Send an atomic SCM command with one argument
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
+{
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R0_STR)
+		__asmeq("%2", R1_STR)
+		__asmeq("%3", R2_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0)
+		: "r" (r0), "r" (r1), "r" (r2)
+		: "r3");
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic1);
+
+/**
+ * scm_call_atomic1_1() - SCM command with one argument and one return value
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @ret1: first return value
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1)
+{
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 1);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R1_STR)
+		__asmeq("%2", R0_STR)
+		__asmeq("%3", R1_STR)
+		__asmeq("%4", R2_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0), "=r" (r1)
+		: "r" (r0), "r" (r1), "r" (r2)
+		: "r3");
+	if (ret1)
+		*ret1 = r1;
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic1_1);
+
+/**
+ * scm_call_atomic2() - Send an atomic SCM command with two arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 2);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R0_STR)
+		__asmeq("%2", R1_STR)
+		__asmeq("%3", R2_STR)
+		__asmeq("%4", R3_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0)
+		: "r" (r0), "r" (r1), "r" (r2), "r" (r3));
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic2);
+
+/**
+ * scm_call_atomic3() - Send an atomic SCM command with three arguments
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ * @arg3: third argument
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3)
+{
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 3);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+	register u32 r4 asm("r4") = arg3;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R0_STR)
+		__asmeq("%2", R1_STR)
+		__asmeq("%3", R2_STR)
+		__asmeq("%4", R3_STR)
+		__asmeq("%5", R4_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0)
+		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4));
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic3);
+
+s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+		u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
+{
+	int ret;
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 4);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+	register u32 r4 asm("r4") = arg3;
+	register u32 r5 asm("r5") = arg4;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R1_STR)
+		__asmeq("%2", R2_STR)
+		__asmeq("%3", R0_STR)
+		__asmeq("%4", R1_STR)
+		__asmeq("%5", R2_STR)
+		__asmeq("%6", R3_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0), "=r" (r1), "=r" (r2)
+		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5));
+	ret = r0;
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic4_3);
+
+/**
+ * scm_call_atomic5_3() - SCM command with five argument and three return value
+ * @svc_id: service identifier
+ * @cmd_id: command identifier
+ * @arg1: first argument
+ * @arg2: second argument
+ * @arg3: third argument
+ * @arg4: fourth argument
+ * @arg5: fifth argument
+ * @ret1: first return value
+ * @ret2: second return value
+ * @ret3: third return value
+ *
+ * This shall only be used with commands that are guaranteed to be
+ * uninterruptable, atomic and SMP safe.
+ */
+s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+	u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
+{
+	int ret;
+	int context_id;
+	register u32 r0 asm("r0") = SCM_ATOMIC(svc, cmd, 5);
+	register u32 r1 asm("r1") = (uintptr_t)&context_id;
+	register u32 r2 asm("r2") = arg1;
+	register u32 r3 asm("r3") = arg2;
+	register u32 r4 asm("r4") = arg3;
+	register u32 r5 asm("r5") = arg4;
+	register u32 r6 asm("r6") = arg5;
+
+	asm volatile(
+		__asmeq("%0", R0_STR)
+		__asmeq("%1", R1_STR)
+		__asmeq("%2", R2_STR)
+		__asmeq("%3", R3_STR)
+		__asmeq("%4", R0_STR)
+		__asmeq("%5", R1_STR)
+		__asmeq("%6", R2_STR)
+		__asmeq("%7", R3_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+		"smc	#0\n"
+		: "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+		: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4), "r" (r5),
+		 "r" (r6));
+	ret = r0;
+
+	if (ret1)
+		*ret1 = r1;
+	if (ret2)
+		*ret2 = r2;
+	if (ret3)
+		*ret3 = r3;
+	return r0;
+}
+EXPORT_SYMBOL(scm_call_atomic5_3);
+
+u32 scm_get_version(void)
+{
+	int context_id;
+	static u32 version = -1;
+	register u32 r0 asm("r0");
+	register u32 r1 asm("r1");
+
+	if (version != -1)
+		return version;
+
+	mutex_lock(&scm_lock);
+
+	r0 = 0x1 << 8;
+	r1 = (uintptr_t)&context_id;
+	do {
+		asm volatile(
+			__asmeq("%0", R0_STR)
+			__asmeq("%1", R1_STR)
+			__asmeq("%2", R0_STR)
+			__asmeq("%3", R1_STR)
+#ifdef REQUIRES_SEC
+			".arch_extension sec\n"
+#endif
+			"smc	#0\n"
+			: "=r" (r0), "=r" (r1)
+			: "r" (r0), "r" (r1)
+			: "r2", "r3");
+	} while (r0 == SCM_INTERRUPTED);
+
+	version = r1;
+	mutex_unlock(&scm_lock);
+
+	return version;
+}
+EXPORT_SYMBOL(scm_get_version);
+
+#define SCM_IO_READ	0x1
+#define SCM_IO_WRITE	0x2
+
+u32 scm_io_read(phys_addr_t address)
+{
+	if (!is_scm_armv8()) {
+		return scm_call_atomic1(SCM_SVC_IO, SCM_IO_READ, address);
+	} else {
+		struct scm_desc desc = {
+			.args[0] = address,
+			.arginfo = SCM_ARGS(1),
+		};
+		scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_READ), &desc);
+		return desc.ret[0];
+	}
+}
+EXPORT_SYMBOL(scm_io_read);
+
+int scm_io_write(phys_addr_t address, u32 val)
+{
+	int ret;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call_atomic2(SCM_SVC_IO, SCM_IO_WRITE, address, val);
+	} else {
+		struct scm_desc desc = {
+			.args[0] = address,
+			.args[1] = val,
+			.arginfo = SCM_ARGS(2),
+		};
+		ret = scm_call2_atomic(SCM_SIP_FNID(SCM_SVC_IO, SCM_IO_WRITE),
+				       &desc);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(scm_io_write);
+
+int scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (!is_scm_armv8()) {
+		u32 ret_val = 0;
+		u32 svc_cmd = (svc_id << 10) | cmd_id;
+
+		ret = scm_call(SCM_SVC_INFO, IS_CALL_AVAIL_CMD, &svc_cmd,
+			sizeof(svc_cmd), &ret_val, sizeof(ret_val));
+		if (ret)
+			return ret;
+
+		return ret_val;
+	}
+	desc.arginfo = SCM_ARGS(1);
+	desc.args[0] = SCM_SIP_FNID(svc_id, cmd_id);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, IS_CALL_AVAIL_CMD), &desc);
+	if (ret)
+		return ret;
+
+	return desc.ret[0];
+}
+EXPORT_SYMBOL(scm_is_call_available);
+
+#define GET_FEAT_VERSION_CMD	3
+int scm_get_feat_version(u32 feat)
+{
+	struct scm_desc desc = {0};
+	int ret;
+
+	if (!is_scm_armv8()) {
+		if (scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD)) {
+			u32 version;
+			if (!scm_call(SCM_SVC_INFO, GET_FEAT_VERSION_CMD, &feat,
+				      sizeof(feat), &version, sizeof(version)))
+				return version;
+		}
+		return 0;
+	}
+
+	ret = scm_is_call_available(SCM_SVC_INFO, GET_FEAT_VERSION_CMD);
+	if (ret <= 0)
+		return 0;
+
+	desc.args[0] = feat;
+	desc.arginfo = SCM_ARGS(1);
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO, GET_FEAT_VERSION_CMD),
+			&desc);
+	if (!ret)
+		return desc.ret[0];
+
+	return 0;
+}
+EXPORT_SYMBOL(scm_get_feat_version);
+
+#define RESTORE_SEC_CFG    2
+int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
+{
+	struct scm_desc desc = {0};
+	int ret;
+	struct restore_sec_cfg {
+		u32 device_id;
+		u32 spare;
+	} cfg;
+
+	cfg.device_id = device_id;
+	cfg.spare = spare;
+
+	if (IS_ERR_OR_NULL(scm_ret))
+		return -EINVAL;
+
+	if (!is_scm_armv8())
+		return scm_call(SCM_SVC_MP, RESTORE_SEC_CFG, &cfg, sizeof(cfg),
+				scm_ret, sizeof(*scm_ret));
+
+	desc.args[0] = device_id;
+	desc.args[1] = spare;
+	desc.arginfo = SCM_ARGS(2);
+
+	ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP, RESTORE_SEC_CFG), &desc);
+	if (ret)
+		return ret;
+
+	*scm_ret = desc.ret[0];
+	return 0;
+}
+EXPORT_SYMBOL(scm_restore_sec_cfg);
+
+/*
+ * SCM call command ID to check secure mode
+ * Return zero for secure device.
+ * Return one for non secure device or secure
+ * device with debug enabled device.
+ */
+#define TZ_INFO_GET_SECURE_STATE	0x4
+bool scm_is_secure_device(void)
+{
+	struct scm_desc desc = {0};
+	int ret = 0, resp;
+
+	desc.args[0] = 0;
+	desc.arginfo = 0;
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_INFO, TZ_INFO_GET_SECURE_STATE, NULL,
+			0, &resp, sizeof(resp));
+	} else {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_INFO,
+				TZ_INFO_GET_SECURE_STATE),
+				&desc);
+		resp = desc.ret[0];
+	}
+
+	if (ret) {
+		pr_err("%s: SCM call failed\n", __func__);
+		return false;
+	}
+
+	if ((resp & BIT(0)) || (resp & BIT(2)))
+		return true;
+	else
+		return false;
+}
+EXPORT_SYMBOL(scm_is_secure_device);
diff --git a/drivers/soc/qcom/secure_buffer.c b/drivers/soc/qcom/secure_buffer.c
new file mode 100644
index 0000000..deb12a5
--- /dev/null
+++ b/drivers/soc/qcom/secure_buffer.c
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2011 Google, Inc
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/secure_buffer.h>
+
+DEFINE_MUTEX(secure_buffer_mutex);
+
+struct cp2_mem_chunks {
+	u32 chunk_list;
+	u32 chunk_list_size;
+	u32 chunk_size;
+} __attribute__ ((__packed__));
+
+struct cp2_lock_req {
+	struct cp2_mem_chunks chunks;
+	u32 mem_usage;
+	u32 lock;
+} __attribute__ ((__packed__));
+
+
+struct mem_prot_info {
+	phys_addr_t addr;
+	u64 size;
+};
+
+#define MEM_PROT_ASSIGN_ID		0x16
+#define MEM_PROTECT_LOCK_ID2		0x0A
+#define MEM_PROTECT_LOCK_ID2_FLAT	0x11
+#define V2_CHUNK_SIZE		SZ_1M
+#define FEATURE_ID_CP 12
+
+struct dest_vm_and_perm_info {
+	u32 vm;
+	u32 perm;
+	u64 ctx;
+	u32 ctx_size;
+};
+
+static void *qcom_secure_mem;
+#define QCOM_SECURE_MEM_SIZE (512*1024)
+
+static int secure_buffer_change_chunk(u32 chunks,
+				u32 nchunks,
+				u32 chunk_size,
+				int lock)
+{
+	struct cp2_lock_req request;
+	u32 resp;
+	int ret;
+	struct scm_desc desc = {0};
+
+	desc.args[0] = request.chunks.chunk_list = chunks;
+	desc.args[1] = request.chunks.chunk_list_size = nchunks;
+	desc.args[2] = request.chunks.chunk_size = chunk_size;
+	/* Usage is now always 0 */
+	desc.args[3] = request.mem_usage = 0;
+	desc.args[4] = request.lock = lock;
+	desc.args[5] = 0;
+	desc.arginfo = SCM_ARGS(6, SCM_RW, SCM_VAL, SCM_VAL, SCM_VAL, SCM_VAL,
+				SCM_VAL);
+
+	kmap_flush_unused();
+	kmap_atomic_flush_unused();
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_MP, MEM_PROTECT_LOCK_ID2,
+				&request, sizeof(request), &resp, sizeof(resp));
+	} else {
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+				MEM_PROTECT_LOCK_ID2_FLAT), &desc);
+		resp = desc.ret[0];
+	}
+
+	return ret;
+}
+
+
+
+static int secure_buffer_change_table(struct sg_table *table, int lock)
+{
+	int i, j;
+	int ret = -EINVAL;
+	u32 *chunk_list;
+	struct scatterlist *sg;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		int nchunks;
+		int size = sg->length;
+		int chunk_list_len;
+		phys_addr_t chunk_list_phys;
+
+		/*
+		 * This should theoretically be a phys_addr_t but the protocol
+		 * indicates this should be a u32.
+		 */
+		u32 base;
+		u64 tmp = sg_dma_address(sg);
+
+		WARN((tmp >> 32) & 0xffffffff,
+			"%s: there are ones in the upper 32 bits of the sg at %p! They will be truncated! Address: 0x%llx\n",
+			__func__, sg, tmp);
+		if (unlikely(!size || (size % V2_CHUNK_SIZE))) {
+			WARN(1,
+				"%s: chunk %d has invalid size: 0x%x. Must be a multiple of 0x%x\n",
+				__func__, i, size, V2_CHUNK_SIZE);
+			return -EINVAL;
+		}
+
+		base = (u32)tmp;
+
+		nchunks = size / V2_CHUNK_SIZE;
+		chunk_list_len = sizeof(u32)*nchunks;
+
+		chunk_list = kzalloc(chunk_list_len, GFP_KERNEL);
+
+		if (!chunk_list)
+			return -ENOMEM;
+
+		chunk_list_phys = virt_to_phys(chunk_list);
+		for (j = 0; j < nchunks; j++)
+			chunk_list[j] = base + j * V2_CHUNK_SIZE;
+
+		/*
+		 * Flush the chunk list before sending the memory to the
+		 * secure environment to ensure the data is actually present
+		 * in RAM
+		 */
+		dmac_flush_range(chunk_list, chunk_list + chunk_list_len);
+
+		ret = secure_buffer_change_chunk(virt_to_phys(chunk_list),
+				nchunks, V2_CHUNK_SIZE, lock);
+
+		if (!ret) {
+			/*
+			 * Set or clear the private page flag to communicate the
+			 * status of the chunk to other entities
+			 */
+			if (lock)
+				SetPagePrivate(sg_page(sg));
+			else
+				ClearPagePrivate(sg_page(sg));
+		}
+
+		kfree(chunk_list);
+	}
+
+	return ret;
+}
+
+int msm_secure_table(struct sg_table *table)
+{
+	int ret;
+
+	mutex_lock(&secure_buffer_mutex);
+	ret = secure_buffer_change_table(table, 1);
+	mutex_unlock(&secure_buffer_mutex);
+
+	return ret;
+
+}
+
+int msm_unsecure_table(struct sg_table *table)
+{
+	int ret;
+
+	mutex_lock(&secure_buffer_mutex);
+	ret = secure_buffer_change_table(table, 0);
+	mutex_unlock(&secure_buffer_mutex);
+	return ret;
+
+}
+
+static struct dest_vm_and_perm_info *
+populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
+		   size_t *size_in_bytes)
+{
+	struct dest_vm_and_perm_info *dest_info;
+	int i;
+	size_t size;
+
+	/* Ensure allocated size is less than PAGE_ALLOC_COSTLY_ORDER */
+	size = nelements * sizeof(*dest_info);
+	if (size > PAGE_SIZE)
+		return NULL;
+
+	dest_info = kzalloc(size, GFP_KERNEL);
+	if (!dest_info)
+		return NULL;
+
+	for (i = 0; i < nelements; i++) {
+		dest_info[i].vm = dest_vmids[i];
+		dest_info[i].perm = dest_perms[i];
+		dest_info[i].ctx = 0x0;
+		dest_info[i].ctx_size = 0;
+	}
+
+	*size_in_bytes = size;
+	return dest_info;
+}
+
+/* Must hold secure_buffer_mutex while allocated buffer is in use */
+static struct mem_prot_info *get_info_list_from_table(struct sg_table *table,
+						      size_t *size_in_bytes)
+{
+	int i;
+	struct scatterlist *sg;
+	struct mem_prot_info *info;
+	size_t size;
+
+	size = table->nents * sizeof(*info);
+
+	if (size >= QCOM_SECURE_MEM_SIZE) {
+		pr_err("%s: Not enough memory allocated. Required size %zd\n",
+				__func__, size);
+		return NULL;
+	}
+
+	if (!qcom_secure_mem) {
+		pr_err("%s is not functional as qcom_secure_mem is not allocated.\n",
+				__func__);
+		return NULL;
+	}
+
+	/* "Allocate" it */
+	info = qcom_secure_mem;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		info[i].addr = page_to_phys(sg_page(sg));
+		info[i].size = sg->length;
+	}
+
+	*size_in_bytes = size;
+	return info;
+}
+
+#define BATCH_MAX_SIZE SZ_2M
+#define BATCH_MAX_SECTIONS 32
+
+int hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems)
+{
+	int ret;
+	struct scm_desc desc = {0};
+	u32 *source_vm_copy;
+	size_t source_vm_copy_size;
+	struct dest_vm_and_perm_info *dest_vm_copy;
+	size_t dest_vm_copy_size;
+	struct mem_prot_info *sg_table_copy;
+	size_t sg_table_copy_size;
+
+	int batch_start, batch_end;
+	u64 batch_size;
+
+	/*
+	 * We can only pass cache-aligned sizes to hypervisor, so we need
+	 * to kmalloc and memcpy the source_vm_list here.
+	 */
+	source_vm_copy_size = sizeof(*source_vm_copy) * source_nelems;
+	source_vm_copy = kzalloc(source_vm_copy_size, GFP_KERNEL);
+	if (!source_vm_copy)
+		return -ENOMEM;
+
+	memcpy(source_vm_copy, source_vm_list, source_vm_copy_size);
+
+
+	dest_vm_copy = populate_dest_info(dest_vmids, dest_nelems, dest_perms,
+					  &dest_vm_copy_size);
+	if (!dest_vm_copy) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+
+	mutex_lock(&secure_buffer_mutex);
+
+	sg_table_copy = get_info_list_from_table(table, &sg_table_copy_size);
+	if (!sg_table_copy) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+
+	desc.args[0] = virt_to_phys(sg_table_copy);
+	desc.args[1] = sg_table_copy_size;
+	desc.args[2] = virt_to_phys(source_vm_copy);
+	desc.args[3] = source_vm_copy_size;
+	desc.args[4] = virt_to_phys(dest_vm_copy);
+	desc.args[5] = dest_vm_copy_size;
+	desc.args[6] = 0;
+
+	desc.arginfo = SCM_ARGS(7, SCM_RO, SCM_VAL, SCM_RO, SCM_VAL, SCM_RO,
+				SCM_VAL, SCM_VAL);
+
+	dmac_flush_range(source_vm_copy,
+			 (void *)source_vm_copy + source_vm_copy_size);
+	dmac_flush_range(sg_table_copy,
+			 (void *)sg_table_copy + sg_table_copy_size);
+	dmac_flush_range(dest_vm_copy,
+			 (void *)dest_vm_copy + dest_vm_copy_size);
+
+	batch_start = 0;
+	while (batch_start < table->nents) {
+		/* Ensure no size zero batches */
+		batch_size = sg_table_copy[batch_start].size;
+		batch_end = batch_start + 1;
+		while (1) {
+			u64 size;
+
+			if (batch_end >= table->nents)
+				break;
+			if (batch_end - batch_start >= BATCH_MAX_SECTIONS)
+				break;
+
+			size = sg_table_copy[batch_end].size;
+			if (size + batch_size >= BATCH_MAX_SIZE)
+				break;
+
+			batch_size += size;
+			batch_end++;
+		}
+
+		desc.args[0] = virt_to_phys(&sg_table_copy[batch_start]);
+		desc.args[1] = (batch_end - batch_start) *
+				sizeof(sg_table_copy[0]);
+
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+				MEM_PROT_ASSIGN_ID), &desc);
+		if (ret) {
+			pr_info("%s: Failed to assign memory protection, ret = %d\n",
+				__func__, ret);
+			break;
+		}
+		batch_start = batch_end;
+	}
+
+out_unlock:
+	mutex_unlock(&secure_buffer_mutex);
+	kfree(dest_vm_copy);
+out_free:
+	kfree(source_vm_copy);
+	return ret;
+}
+
+int hyp_assign_phys(phys_addr_t addr, u64 size, u32 *source_vm_list,
+			int source_nelems, int *dest_vmids,
+			int *dest_perms, int dest_nelems)
+{
+	struct sg_table *table;
+	int ret;
+
+	table = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret)
+		goto err1;
+
+	sg_set_page(table->sgl, phys_to_page(addr), size, 0);
+
+	ret = hyp_assign_table(table, source_vm_list, source_nelems, dest_vmids,
+						dest_perms, dest_nelems);
+	if (ret)
+		goto err2;
+
+	return ret;
+err2:
+	sg_free_table(table);
+err1:
+	kfree(table);
+	return ret;
+}
+
+const char *msm_secure_vmid_to_string(int secure_vmid)
+{
+	switch (secure_vmid) {
+	case VMID_HLOS:
+		return "VMID_HLOS";
+	case VMID_CP_TOUCH:
+		return "VMID_CP_TOUCH";
+	case VMID_CP_BITSTREAM:
+		return "VMID_CP_BITSTREAM";
+	case VMID_CP_PIXEL:
+		return "VMID_CP_PIXEL";
+	case VMID_CP_NON_PIXEL:
+		return "VMID_CP_NON_PIXEL";
+	case VMID_CP_CAMERA:
+		return "VMID_CP_CAMERA";
+	case VMID_HLOS_FREE:
+		return "VMID_HLOS_FREE";
+	case VMID_MSS_MSA:
+		return "VMID_MSS_MSA";
+	case VMID_MSS_NONMSA:
+		return "VMID_MSS_NONMSA";
+	case VMID_CP_SEC_DISPLAY:
+		return "VMID_CP_SEC_DISPLAY";
+	case VMID_CP_APP:
+		return "VMID_CP_APP";
+	case VMID_WLAN:
+		return "VMID_WLAN";
+	case VMID_WLAN_CE:
+		return "VMID_WLAN_CE";
+	case VMID_INVAL:
+		return "VMID_INVAL";
+	default:
+		return "Unknown VMID";
+	}
+}
+
+#define MAKE_CP_VERSION(major, minor, patch) \
+	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+bool msm_secure_v2_is_supported(void)
+{
+	int version = scm_get_feat_version(FEATURE_ID_CP);
+
+	/*
+	 * if the version is < 1.1.0 then dynamic buffer allocation is
+	 * not supported
+	 */
+	return version >= MAKE_CP_VERSION(1, 1, 0);
+}
+
+static int __init alloc_secure_shared_memory(void)
+{
+	int ret = 0;
+	dma_addr_t dma_handle;
+
+	qcom_secure_mem = kzalloc(QCOM_SECURE_MEM_SIZE, GFP_KERNEL);
+	if (!qcom_secure_mem) {
+		/* Fallback to CMA-DMA memory */
+		qcom_secure_mem = dma_alloc_coherent(NULL, QCOM_SECURE_MEM_SIZE,
+						&dma_handle, GFP_KERNEL);
+		if (!qcom_secure_mem) {
+			pr_err("Couldn't allocate memory for secure use-cases. hyp_assign_table will not work\n");
+			return -ENOMEM;
+		}
+	}
+
+	return ret;
+}
+pure_initcall(alloc_secure_shared_memory);
diff --git a/drivers/soc/qcom/smem_debug.c b/drivers/soc/qcom/smem_debug.c
new file mode 100644
index 0000000..51a483b
--- /dev/null
+++ b/drivers/soc/qcom/smem_debug.c
@@ -0,0 +1,139 @@
+/* arch/arm/mach-msm/smem_debug.c
+ *
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2009-2013,2016 The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+
+#include <soc/qcom/smem.h>
+
+#include "smem_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+#define SZ_SMEM_ALLOCATION_TABLE 8192
+
+static void debug_read_mem(struct seq_file *s)
+{
+	unsigned int n;
+	struct smem_heap_info *heap_info;
+	struct smem_heap_entry *toc;
+
+	heap_info = smem_find(SMEM_HEAP_INFO, sizeof(struct smem_heap_info),
+						0,
+						SMEM_ANY_HOST_FLAG);
+	if (!heap_info) {
+		seq_puts(s, "SMEM_HEAP_INFO is NULL\n");
+		return;
+	}
+	toc = smem_find(SMEM_ALLOCATION_TABLE, SZ_SMEM_ALLOCATION_TABLE,
+							0, SMEM_ANY_HOST_FLAG);
+	if (!toc) {
+		seq_puts(s, "SMEM_ALLOCATION_TABLE is NULL\n");
+		return;
+	}
+
+	seq_printf(s, "heap: init=%d free=%d remain=%d\n",
+		       heap_info->initialized,
+		       heap_info->free_offset,
+		       heap_info->heap_remaining);
+
+	for (n = 0; n < SMEM_NUM_ITEMS; n++) {
+		if (toc[n].allocated == 0)
+			continue;
+		seq_printf(s, "%04d: offset %08x size %08x\n",
+			       n, toc[n].offset, toc[n].size);
+	}
+}
+
+static void debug_read_smem_version(struct seq_file *s)
+{
+	uint32_t n, version;
+
+	for (n = 0; n < 32; n++) {
+		version = smem_get_version(n);
+		seq_printf(s, "entry %d: smem = %d  proc_comm = %d\n", n,
+			       version >> 16,
+			       version & 0xffff);
+	}
+}
+
+static void debug_read_build_id(struct seq_file *s)
+{
+	unsigned int size;
+	void *data;
+
+	data = smem_get_entry(SMEM_HW_SW_BUILD_ID, &size, 0,
+							SMEM_ANY_HOST_FLAG);
+	if (!data)
+		return;
+
+	seq_write(s, data, size);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	void (*show)(struct seq_file *) = s->private;
+
+	show(s);
+
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, umode_t mode,
+			 struct dentry *dent,
+			 void (*show)(struct seq_file *))
+{
+	struct dentry *file;
+
+	file = debugfs_create_file(name, mode, dent, show, &debug_ops);
+	if (!file)
+		pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smem_debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("smem", 0);
+	if (IS_ERR(dent))
+		return PTR_ERR(dent);
+
+	debug_create("mem", 0444, dent, debug_read_mem);
+	debug_create("version", 0444, dent, debug_read_smem_version);
+
+	/* NNV: this is google only stuff */
+	debug_create("build", 0444, dent, debug_read_build_id);
+
+	return 0;
+}
+
+late_initcall(smem_debugfs_init);
+#endif
diff --git a/drivers/soc/qcom/smem_private.h b/drivers/soc/qcom/smem_private.h
new file mode 100644
index 0000000..12decd4
--- /dev/null
+++ b/drivers/soc/qcom/smem_private.h
@@ -0,0 +1,104 @@
+/* Copyright (c) 2013,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_
+
+#include <linux/remote_spinlock.h>
+#include <soc/qcom/ramdump.h>
+
+
+#define SMD_HEAP_SIZE 512
+
+struct smem_heap_info {
+	unsigned int initialized;
+	unsigned int free_offset;
+	unsigned int heap_remaining;
+	unsigned int reserved;
+};
+
+struct smem_heap_entry {
+	unsigned int allocated;
+	unsigned int offset;
+	unsigned int size;
+	unsigned int reserved; /* [1:0] reserved, [31:2] aux smem base addr */
+};
+#define BASE_ADDR_MASK 0xfffffffc
+
+struct smem_proc_comm {
+	unsigned int command;
+	unsigned int status;
+	unsigned int data1;
+	unsigned int data2;
+};
+
+struct smem_shared {
+	struct smem_proc_comm proc_comm[4];
+	unsigned int version[32];
+	struct smem_heap_info heap_info;
+	struct smem_heap_entry heap_toc[SMD_HEAP_SIZE];
+};
+
+struct smem_area {
+	phys_addr_t phys_addr;
+	resource_size_t size;
+	void __iomem *virt_addr;
+};
+
+/* used for unit testing spinlocks */
+remote_spinlock_t *smem_get_remote_spinlock(void);
+
+bool smem_initialized_check(void);
+
+/**
+ * smem_module_init_notifier_register() - Register a smem module
+ *                                       init notifier block
+ * @nb: Notifier block to be registered
+ *
+ * In order to mark the dependency on SMEM Driver module initialization
+ * register a notifier using this API. Once the smem module_init is
+ * done, notification will be passed to the registered module.
+ */
+int smem_module_init_notifier_register(struct notifier_block *nb);
+
+/**
+ * smem_module_init_notifier_register() - Unregister a smem module
+ *                                       init notifier block
+ * @nb: Notifier block to be unregistered
+ */
+int smem_module_init_notifier_unregister(struct notifier_block *nb);
+
+/**
+ * smem_get_free_space() - Get the available allocation free space for a
+ *				partition
+ *
+ * @to_proc: remote SMEM host.  Determines the applicable partition
+ * @returns: size in bytes available to allocate
+ *
+ * Helper function for SMD so that SMD only scans the channel allocation
+ * table for a partition when it is reasonably certain that a channel has
+ * actually been created, because scanning can be expensive.  Creating a channel
+ * will consume some of the free space in a partition, so SMD can compare the
+ * last free space size against the current free space size to determine if
+ * a channel may have been created.  SMD can't do this directly, because the
+ * necessary partition internals are restricted to just SMEM.
+ */
+unsigned int smem_get_free_space(unsigned int to_proc);
+
+/**
+ * smem_get_version() - Get the smem user version number
+ *
+ * @idx: SMEM user idx in SMEM_VERSION_INFO table.
+ * @returns: smem version number if success otherwise zero.
+ */
+unsigned int smem_get_version(unsigned int idx);
+#endif /* _ARCH_ARM_MACH_MSM_SMEM_PRIVATE_H_ */
diff --git a/drivers/soc/qcom/smp2p_debug.c b/drivers/soc/qcom/smp2p_debug.c
new file mode 100644
index 0000000..8d98d07
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_debug.c
@@ -0,0 +1,335 @@
+/* drivers/soc/qcom/smp2p_debug.c
+ *
+ * Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include "smp2p_private.h"
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * Dump interrupt statistics.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_int_stats(struct seq_file *s)
+{
+	struct smp2p_interrupt_config *int_cfg;
+	int pid;
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg)
+		return;
+
+	seq_puts(s, "| Processor | Incoming Id | Incoming # |");
+	seq_puts(s, " Outgoing # | Base Ptr |   Mask   |\n");
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
+		if (!int_cfg[pid].is_configured &&
+				pid != SMP2P_REMOTE_MOCK_PROC)
+			continue;
+
+		seq_printf(s, "| %5s (%d) | %11u | %10u | %10u | %pK | %08x |\n",
+			int_cfg[pid].name,
+			pid, int_cfg[pid].in_int_id,
+			int_cfg[pid].in_interrupt_count,
+			int_cfg[pid].out_interrupt_count,
+			int_cfg[pid].out_int_ptr,
+			int_cfg[pid].out_int_mask);
+	}
+}
+
+/**
+ * Dump item header line 1.
+ *
+ * @buf:      output buffer
+ * @max:      length of output buffer
+ * @item_ptr: SMEM item pointer
+ * @state:    item state
+ * @returns: Number of bytes written to output buffer
+ */
+static int smp2p_item_header1(char *buf, int max, struct smp2p_smem *item_ptr,
+	enum msm_smp2p_edge_state state)
+{
+	int i = 0;
+	const char *state_text;
+
+	if (!item_ptr) {
+		i += scnprintf(buf + i, max - i, "None");
+		return i;
+	}
+
+	switch (state) {
+	case SMP2P_EDGE_STATE_CLOSED:
+		state_text = "State: Closed";
+		break;
+	case SMP2P_EDGE_STATE_OPENING:
+		state_text = "State: Opening";
+		break;
+	case SMP2P_EDGE_STATE_OPENED:
+		state_text = "State: Opened";
+		break;
+	default:
+		state_text = "";
+		break;
+	}
+
+	i += scnprintf(buf + i, max - i,
+		"%-14s LPID %d RPID %d",
+		state_text,
+		SMP2P_GET_LOCAL_PID(item_ptr->rem_loc_proc_id),
+		SMP2P_GET_REMOTE_PID(item_ptr->rem_loc_proc_id)
+		);
+
+	return i;
+}
+
+/**
+ * Dump item header line 2.
+ *
+ * @buf:      output buffer
+ * @max:      length of output buffer
+ * @item_ptr: SMEM item pointer
+ * @returns: Number of bytes written to output buffer
+ */
+static int smp2p_item_header2(char *buf, int max, struct smp2p_smem *item_ptr)
+{
+	int i = 0;
+
+	if (!item_ptr) {
+		i += scnprintf(buf + i, max - i, "None");
+		return i;
+	}
+
+	i += scnprintf(buf + i, max - i,
+		"Version: %08x Features: %08x",
+		SMP2P_GET_VERSION(item_ptr->feature_version),
+		SMP2P_GET_FEATURES(item_ptr->feature_version)
+		);
+
+	return i;
+}
+
+/**
+ * Dump item header line 3.
+ *
+ * @buf:      output buffer
+ * @max:      length of output buffer
+ * @item_ptr: SMEM item pointer
+ * @state:    item state
+ * @returns: Number of bytes written to output buffer
+ */
+static int smp2p_item_header3(char *buf, int max, struct smp2p_smem *item_ptr)
+{
+	int i = 0;
+
+	if (!item_ptr) {
+		i += scnprintf(buf + i, max - i, "None");
+		return i;
+	}
+
+	i += scnprintf(buf + i, max - i,
+		"Entries #/Max: %d/%d Flags: %c%c",
+		SMP2P_GET_ENT_VALID(item_ptr->valid_total_ent),
+		SMP2P_GET_ENT_TOTAL(item_ptr->valid_total_ent),
+		item_ptr->flags & SMP2P_FLAGS_RESTART_ACK_MASK ? 'A' : 'a',
+		item_ptr->flags & SMP2P_FLAGS_RESTART_DONE_MASK ? 'D' : 'd'
+		);
+
+	return i;
+}
+
+/**
+ * Dump individual input/output item pair.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_item(struct seq_file *s, int remote_pid)
+{
+	struct smp2p_smem *out_ptr;
+	struct smp2p_smem *in_ptr;
+	struct smp2p_interrupt_config *int_cfg;
+	char tmp_buff[64];
+	int state;
+	int entry;
+	struct smp2p_entry_v1 *out_entries = NULL;
+	struct smp2p_entry_v1 *in_entries = NULL;
+	int out_valid = 0;
+	int in_valid = 0;
+	char entry_name[SMP2P_MAX_ENTRY_NAME];
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg)
+		return;
+	if (!int_cfg[remote_pid].is_configured &&
+			remote_pid != SMP2P_REMOTE_MOCK_PROC)
+		return;
+
+	out_ptr = smp2p_get_out_item(remote_pid, &state);
+	in_ptr = smp2p_get_in_item(remote_pid);
+
+	if (!out_ptr && !in_ptr)
+		return;
+
+	/* print item headers */
+	seq_printf(s, "%s%s\n",
+		" ====================================== ",
+		"======================================");
+	scnprintf(tmp_buff, sizeof(tmp_buff),
+		"Apps(%d)->%s(%d)",
+		SMP2P_APPS_PROC, int_cfg[remote_pid].name, remote_pid);
+	seq_printf(s, "| %-37s", tmp_buff);
+
+	scnprintf(tmp_buff, sizeof(tmp_buff),
+		"%s(%d)->Apps(%d)",
+		int_cfg[remote_pid].name, remote_pid, SMP2P_APPS_PROC);
+	seq_printf(s, "| %-37s|\n", tmp_buff);
+	seq_printf(s, "%s%s\n",
+		" ====================================== ",
+		"======================================");
+
+	smp2p_item_header1(tmp_buff, sizeof(tmp_buff), out_ptr, state);
+	seq_printf(s, "| %-37s", tmp_buff);
+	smp2p_item_header1(tmp_buff, sizeof(tmp_buff), in_ptr, -1);
+	seq_printf(s, "| %-37s|\n", tmp_buff);
+
+	smp2p_item_header2(tmp_buff, sizeof(tmp_buff), out_ptr);
+	seq_printf(s, "| %-37s", tmp_buff);
+	smp2p_item_header2(tmp_buff, sizeof(tmp_buff), in_ptr);
+	seq_printf(s, "| %-37s|\n", tmp_buff);
+
+	smp2p_item_header3(tmp_buff, sizeof(tmp_buff), out_ptr);
+	seq_printf(s, "| %-37s", tmp_buff);
+	smp2p_item_header3(tmp_buff, sizeof(tmp_buff), in_ptr);
+	seq_printf(s, "| %-37s|\n", tmp_buff);
+
+	seq_printf(s, " %s%s\n",
+		"-------------------------------------- ",
+		"--------------------------------------");
+	seq_printf(s, "| %-37s",
+		"Entry Name       Value");
+	seq_printf(s, "| %-37s|\n",
+		"Entry Name       Value");
+	seq_printf(s, " %s%s\n",
+		"-------------------------------------- ",
+		"--------------------------------------");
+
+	/* print entries */
+	if (out_ptr) {
+		out_entries = (struct smp2p_entry_v1 *)((void *)out_ptr +
+				sizeof(struct smp2p_smem));
+		out_valid = SMP2P_GET_ENT_VALID(out_ptr->valid_total_ent);
+	}
+
+	if (in_ptr) {
+		in_entries = (struct smp2p_entry_v1 *)((void *)in_ptr +
+				sizeof(struct smp2p_smem));
+		in_valid = SMP2P_GET_ENT_VALID(in_ptr->valid_total_ent);
+	}
+
+	for (entry = 0; out_entries || in_entries; ++entry) {
+		if (out_entries && entry < out_valid) {
+			memcpy_fromio(entry_name, out_entries->name,
+							SMP2P_MAX_ENTRY_NAME);
+			scnprintf(tmp_buff, sizeof(tmp_buff),
+					"%-16s 0x%08x",
+					entry_name,
+					out_entries->entry);
+			++out_entries;
+		} else {
+			out_entries = NULL;
+			scnprintf(tmp_buff, sizeof(tmp_buff), "None");
+		}
+		seq_printf(s, "| %-37s", tmp_buff);
+
+		if (in_entries && entry < in_valid) {
+			memcpy_fromio(entry_name, in_entries->name,
+							SMP2P_MAX_ENTRY_NAME);
+			scnprintf(tmp_buff, sizeof(tmp_buff),
+					"%-16s 0x%08x",
+					entry_name,
+					in_entries->entry);
+			++in_entries;
+		} else {
+			in_entries = NULL;
+			scnprintf(tmp_buff, sizeof(tmp_buff), "None");
+		}
+		seq_printf(s, "| %-37s|\n", tmp_buff);
+	}
+	seq_printf(s, " %s%s\n\n",
+		"-------------------------------------- ",
+		"--------------------------------------");
+}
+
+/**
+ * Dump item state.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_items(struct seq_file *s)
+{
+	int pid;
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
+		smp2p_item(s, pid);
+}
+
+static struct dentry *dent;
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	void (*show)(struct seq_file *) = s->private;
+
+	show(s);
+
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+
+void debug_create(const char *name,
+			 void (*show)(struct seq_file *))
+{
+	struct dentry *file;
+
+	file = debugfs_create_file(name, 0444, dent, show, &debug_ops);
+	if (!file)
+		pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smp2p_debugfs_init(void)
+{
+	dent = debugfs_create_dir("smp2p", 0);
+	if (IS_ERR(dent))
+		return PTR_ERR(dent);
+
+	debug_create("int_stats", smp2p_int_stats);
+	debug_create("items", smp2p_items);
+
+	return 0;
+}
+
+late_initcall(smp2p_debugfs_init);
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/soc/qcom/smp2p_loopback.c b/drivers/soc/qcom/smp2p_loopback.c
new file mode 100644
index 0000000..0086381
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_loopback.c
@@ -0,0 +1,449 @@
+/* drivers/soc/qcom/smp2p_loopback.c
+ *
+ * Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/termios.h>
+#include <linux/module.h>
+#include <linux/remote_spinlock.h>
+#include "smem_private.h"
+#include "smp2p_private.h"
+
+/**
+ * struct smp2p_loopback_ctx - Representation of remote loopback object.
+ *
+ * @proc_id: Processor id of the processor that sends the loopback commands.
+ * @out: Handle to the  smem entry structure for providing the response.
+ * @out_nb: Notifies the opening of local entry.
+ * @out_is_active: Outbound entry events should be processed.
+ * @in_nb: Notifies changes in the remote entry.
+ * @in_is_active: Inbound entry events should be processed.
+ * @rmt_lpb_work: Work item that handles the incoming loopback commands.
+ * @rmt_cmd: Structure that holds the current and previous value of the entry.
+ */
+struct smp2p_loopback_ctx {
+	int proc_id;
+	struct msm_smp2p_out *out;
+	struct notifier_block out_nb;
+	bool out_is_active;
+	struct notifier_block in_nb;
+	bool in_is_active;
+	struct work_struct  rmt_lpb_work;
+	struct msm_smp2p_update_notif rmt_cmd;
+};
+
+static struct smp2p_loopback_ctx  remote_loopback[SMP2P_NUM_PROCS];
+static struct msm_smp2p_remote_mock remote_mock;
+
+/**
+ * remote_spinlock_test - Handles remote spinlock test.
+ *
+ * @ctx: Loopback context
+ */
+static void remote_spinlock_test(struct smp2p_loopback_ctx *ctx)
+{
+	uint32_t test_request;
+	uint32_t test_response;
+	unsigned long flags;
+	int n;
+	unsigned int lock_count = 0;
+	remote_spinlock_t *smem_spinlock;
+
+	test_request = 0x0;
+	SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
+	smem_spinlock = smem_get_remote_spinlock();
+	if (!smem_spinlock) {
+		pr_err("%s: unable to get remote spinlock\n", __func__);
+		return;
+	}
+
+	for (;;) {
+		remote_spin_lock_irqsave(smem_spinlock, flags);
+		++lock_count;
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_LOCKED);
+		(void)msm_smp2p_out_write(ctx->out, test_request);
+
+		for (n = 0; n < 10000; ++n) {
+			(void)msm_smp2p_in_read(ctx->proc_id,
+					"smp2p", &test_response);
+			test_response = SMP2P_GET_RMT_CMD(test_response);
+
+			if (test_response == SMP2P_LB_CMD_RSPIN_END)
+				break;
+
+			if (test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED)
+				SMP2P_ERR("%s: invalid spinlock command %x\n",
+					__func__, test_response);
+		}
+
+		if (test_response == SMP2P_LB_CMD_RSPIN_END) {
+			SMP2P_SET_RMT_CMD_TYPE_RESP(test_request);
+			SMP2P_SET_RMT_CMD(test_request,
+					SMP2P_LB_CMD_RSPIN_END);
+			SMP2P_SET_RMT_DATA(test_request, lock_count);
+			(void)msm_smp2p_out_write(ctx->out, test_request);
+			break;
+		}
+
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_UNLOCKED);
+		(void)msm_smp2p_out_write(ctx->out, test_request);
+		remote_spin_unlock_irqrestore(smem_spinlock, flags);
+	}
+	remote_spin_unlock_irqrestore(smem_spinlock, flags);
+}
+
+/**
+ * smp2p_rmt_lpb_worker - Handles incoming remote loopback commands.
+ *
+ * @work: Work Item scheduled to handle the incoming commands.
+ */
+static void smp2p_rmt_lpb_worker(struct work_struct *work)
+{
+	struct smp2p_loopback_ctx *ctx;
+	int lpb_cmd;
+	int lpb_cmd_type;
+	int lpb_data;
+
+	ctx = container_of(work, struct smp2p_loopback_ctx, rmt_lpb_work);
+
+	if (!ctx->in_is_active || !ctx->out_is_active)
+		return;
+
+	if (ctx->rmt_cmd.previous_value == ctx->rmt_cmd.current_value)
+		return;
+
+	lpb_cmd_type =  SMP2P_GET_RMT_CMD_TYPE(ctx->rmt_cmd.current_value);
+	lpb_cmd = SMP2P_GET_RMT_CMD(ctx->rmt_cmd.current_value);
+	lpb_data = SMP2P_GET_RMT_DATA(ctx->rmt_cmd.current_value);
+
+	if (lpb_cmd & SMP2P_RLPB_IGNORE)
+		return;
+
+	switch (lpb_cmd) {
+	case SMP2P_LB_CMD_NOOP:
+		/* Do nothing */
+		break;
+
+	case SMP2P_LB_CMD_ECHO:
+		SMP2P_SET_RMT_CMD_TYPE(ctx->rmt_cmd.current_value, 0);
+		SMP2P_SET_RMT_DATA(ctx->rmt_cmd.current_value,
+							lpb_data);
+		(void)msm_smp2p_out_write(ctx->out,
+					ctx->rmt_cmd.current_value);
+		break;
+
+	case SMP2P_LB_CMD_CLEARALL:
+		ctx->rmt_cmd.current_value = 0;
+		(void)msm_smp2p_out_write(ctx->out,
+					ctx->rmt_cmd.current_value);
+		break;
+
+	case SMP2P_LB_CMD_PINGPONG:
+		SMP2P_SET_RMT_CMD_TYPE(ctx->rmt_cmd.current_value, 0);
+		if (lpb_data) {
+			lpb_data--;
+			SMP2P_SET_RMT_DATA(ctx->rmt_cmd.current_value,
+					lpb_data);
+			(void)msm_smp2p_out_write(ctx->out,
+					ctx->rmt_cmd.current_value);
+		}
+		break;
+
+	case SMP2P_LB_CMD_RSPIN_START:
+		remote_spinlock_test(ctx);
+		break;
+
+	case SMP2P_LB_CMD_RSPIN_LOCKED:
+	case SMP2P_LB_CMD_RSPIN_UNLOCKED:
+	case SMP2P_LB_CMD_RSPIN_END:
+		/* not used for remote spinlock test */
+		break;
+
+	default:
+		SMP2P_DBG("%s: Unknown loopback command %x\n",
+				__func__, lpb_cmd);
+		break;
+	}
+}
+
+/**
+ * smp2p_rmt_in_edge_notify -  Schedules a work item to handle the commands.
+ *
+ * @nb: Notifier block, this is called when the value in remote entry changes.
+ * @event: Takes value SMP2P_ENTRY_UPDATE or SMP2P_OPEN based on the event.
+ * @data: Consists of previous and current value in case of entry update.
+ * @returns: 0 for success (return value required for notifier chains).
+ */
+static int smp2p_rmt_in_edge_notify(struct notifier_block *nb,
+				unsigned long event, void *data)
+{
+	struct smp2p_loopback_ctx *ctx;
+
+	if (!(event == SMP2P_ENTRY_UPDATE || event == SMP2P_OPEN))
+		return 0;
+
+	ctx = container_of(nb, struct smp2p_loopback_ctx, in_nb);
+	if (data && ctx->in_is_active) {
+		ctx->rmt_cmd = *(struct msm_smp2p_update_notif *)data;
+		schedule_work(&ctx->rmt_lpb_work);
+	}
+
+	return 0;
+}
+
+/**
+ * smp2p_rmt_out_edge_notify - Notifies on the opening of the outbound entry.
+ *
+ * @nb: Notifier block, this is called when the local entry is open.
+ * @event: Takes on value SMP2P_OPEN when the local entry is open.
+ * @data: Consist of current value of the remote entry, if entry is open.
+ * @returns: 0 for success (return value required for notifier chains).
+ */
+static int smp2p_rmt_out_edge_notify(struct notifier_block  *nb,
+				unsigned long event, void *data)
+{
+	struct smp2p_loopback_ctx *ctx;
+
+	ctx = container_of(nb, struct smp2p_loopback_ctx, out_nb);
+	if (event == SMP2P_OPEN)
+		SMP2P_DBG("%s: 'smp2p':%d opened\n", __func__,
+				ctx->proc_id);
+
+	return 0;
+}
+
+/**
+ * msm_smp2p_init_rmt_lpb -  Initializes the remote loopback object.
+ *
+ * @ctx: Pointer to remote loopback object that needs to be initialized.
+ * @pid: Processor id  of the processor that is sending the commands.
+ * @entry: Name of the entry that needs to be opened locally.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ */
+static int msm_smp2p_init_rmt_lpb(struct  smp2p_loopback_ctx *ctx,
+			int pid, const char *entry)
+{
+	int ret = 0;
+	int tmp;
+
+	if (!ctx || !entry || pid > SMP2P_NUM_PROCS)
+		return -EINVAL;
+
+	ctx->in_nb.notifier_call = smp2p_rmt_in_edge_notify;
+	ctx->out_nb.notifier_call = smp2p_rmt_out_edge_notify;
+	ctx->proc_id = pid;
+	ctx->in_is_active = true;
+	ctx->out_is_active = true;
+	tmp = msm_smp2p_out_open(pid, entry, &ctx->out_nb,
+						&ctx->out);
+	if (tmp) {
+		SMP2P_ERR("%s: open failed outbound entry '%s':%d - ret %d\n",
+				__func__, entry, pid, tmp);
+		ret = tmp;
+	}
+
+	tmp = msm_smp2p_in_register(ctx->proc_id,
+				SMP2P_RLPB_ENTRY_NAME,
+				&ctx->in_nb);
+	if (tmp) {
+		SMP2P_ERR("%s: unable to open inbound entry '%s':%d - ret %d\n",
+				__func__, entry, pid, tmp);
+		ret = tmp;
+	}
+
+	return ret;
+}
+
+/**
+ * msm_smp2p_init_rmt_lpb_proc - Wrapper over msm_smp2p_init_rmt_lpb
+ *
+ * @remote_pid: Processor ID of the processor that sends loopback command.
+ * @returns: Pointer to outbound entry handle.
+ */
+void *msm_smp2p_init_rmt_lpb_proc(int remote_pid)
+{
+	int tmp;
+	void *ret = NULL;
+
+	tmp = msm_smp2p_init_rmt_lpb(&remote_loopback[remote_pid],
+			remote_pid, SMP2P_RLPB_ENTRY_NAME);
+	if (!tmp)
+		ret = remote_loopback[remote_pid].out;
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_init_rmt_lpb_proc);
+
+/**
+ * msm_smp2p_deinit_rmt_lpb_proc - Unregister support for remote processor.
+ *
+ * @remote_pid:  Processor ID of the remote system.
+ * @returns: 0 on success, standard Linux error code otherwise.
+ *
+ * Unregister loopback support for remote processor.
+ */
+int msm_smp2p_deinit_rmt_lpb_proc(int remote_pid)
+{
+	int ret = 0;
+	int tmp;
+	struct smp2p_loopback_ctx *ctx;
+
+	if (remote_pid >= SMP2P_NUM_PROCS)
+		return -EINVAL;
+
+	ctx = &remote_loopback[remote_pid];
+
+	/* abort any pending notifications */
+	remote_loopback[remote_pid].out_is_active = false;
+	remote_loopback[remote_pid].in_is_active = false;
+	flush_work(&ctx->rmt_lpb_work);
+
+	/* unregister entries */
+	tmp = msm_smp2p_out_close(&remote_loopback[remote_pid].out);
+	remote_loopback[remote_pid].out = NULL;
+	if (tmp) {
+		SMP2P_ERR("%s: outbound 'smp2p':%d close failed %d\n",
+				__func__, remote_pid, tmp);
+		ret = tmp;
+	}
+
+	tmp = msm_smp2p_in_unregister(remote_pid,
+		SMP2P_RLPB_ENTRY_NAME, &remote_loopback[remote_pid].in_nb);
+	if (tmp) {
+		SMP2P_ERR("%s: inbound 'smp2p':%d close failed %d\n",
+				__func__, remote_pid, tmp);
+		ret = tmp;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(msm_smp2p_deinit_rmt_lpb_proc);
+
+/**
+ * msm_smp2p_set_remote_mock_exists - Sets the remote mock configuration.
+ *
+ * @item_exists: true = Remote mock SMEM item exists
+ *
+ * This is used in the testing environment to simulate the existence of the
+ * remote smem item in order to test the negotiation algorithm.
+ */
+void msm_smp2p_set_remote_mock_exists(bool item_exists)
+{
+	remote_mock.item_exists = item_exists;
+}
+EXPORT_SYMBOL(msm_smp2p_set_remote_mock_exists);
+
+/**
+ * msm_smp2p_get_remote_mock - Get remote mock object.
+ *
+ * @returns: Point to the remote mock object.
+ */
+void *msm_smp2p_get_remote_mock(void)
+{
+	return &remote_mock;
+}
+EXPORT_SYMBOL(msm_smp2p_get_remote_mock);
+
+/**
+ * msm_smp2p_get_remote_mock_smem_item - Returns a pointer to remote item.
+ *
+ * @size:    Size of item.
+ * @returns: Pointer to mock remote smem item.
+ */
+void *msm_smp2p_get_remote_mock_smem_item(uint32_t *size)
+{
+	void *ptr = NULL;
+
+	if (remote_mock.item_exists) {
+		*size = sizeof(remote_mock.remote_item);
+		ptr = &(remote_mock.remote_item);
+	}
+
+	return ptr;
+}
+EXPORT_SYMBOL(msm_smp2p_get_remote_mock_smem_item);
+
+/**
+ * smp2p_remote_mock_rx_interrupt - Triggers receive interrupt for mock proc.
+ *
+ * @returns: 0 for success
+ *
+ * This function simulates the receiving of interrupt by the mock remote
+ * processor in a testing environment.
+ */
+int smp2p_remote_mock_rx_interrupt(void)
+{
+	remote_mock.rx_interrupt_count++;
+	if (remote_mock.initialized)
+		complete(&remote_mock.cb_completion);
+	return 0;
+}
+EXPORT_SYMBOL(smp2p_remote_mock_rx_interrupt);
+
+/**
+ * smp2p_remote_mock_tx_interrupt - Calls the SMP2P interrupt handler.
+ *
+ * This function calls the interrupt handler of the Apps processor to simulate
+ * receiving interrupts from a remote processor.
+ */
+static void smp2p_remote_mock_tx_interrupt(void)
+{
+	msm_smp2p_interrupt_handler(SMP2P_REMOTE_MOCK_PROC);
+}
+
+/**
+ * smp2p_remote_mock_init - Initialize the remote mock and loopback objects.
+ *
+ * @returns: 0 for success
+ */
+static int __init smp2p_remote_mock_init(void)
+{
+	int i;
+	struct smp2p_interrupt_config *int_cfg;
+
+	smp2p_init_header(&remote_mock.remote_item.header,
+			SMP2P_REMOTE_MOCK_PROC, SMP2P_APPS_PROC,
+			0, 0);
+	remote_mock.rx_interrupt_count = 0;
+	remote_mock.rx_interrupt = smp2p_remote_mock_rx_interrupt;
+	remote_mock.tx_interrupt = smp2p_remote_mock_tx_interrupt;
+	remote_mock.item_exists = false;
+	init_completion(&remote_mock.cb_completion);
+	remote_mock.initialized = true;
+
+	for (i = 0; i < SMP2P_NUM_PROCS; i++) {
+		INIT_WORK(&(remote_loopback[i].rmt_lpb_work),
+				smp2p_rmt_lpb_worker);
+		if (i == SMP2P_REMOTE_MOCK_PROC)
+			/* do not register loopback for remote mock proc */
+			continue;
+
+		int_cfg = smp2p_get_interrupt_config();
+		if (!int_cfg) {
+			SMP2P_ERR("Remote processor config unavailable\n");
+			return 0;
+		}
+		if (!int_cfg[i].is_configured)
+			continue;
+
+		msm_smp2p_init_rmt_lpb(&remote_loopback[i],
+			i, SMP2P_RLPB_ENTRY_NAME);
+	}
+	return 0;
+}
+module_init(smp2p_remote_mock_init);
diff --git a/drivers/soc/qcom/smp2p_private.h b/drivers/soc/qcom/smp2p_private.h
new file mode 100644
index 0000000..b332f7b
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_private.h
@@ -0,0 +1,253 @@
+/* drivers/soc/qcom/smp2p_private.h
+ *
+ * Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ARCH_ARM_MACH_MSM_MSM_SMP2P_PRIVATE_H_
+#define _ARCH_ARM_MACH_MSM_MSM_SMP2P_PRIVATE_H_
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/ipc_logging.h>
+#include "smp2p_private_api.h"
+
+#define SMP2P_MAX_ENTRY 16
+#define SMP2P_FEATURE_SSR_ACK 0x1
+
+/* SMEM Item Header Macros */
+#define SMP2P_MAGIC 0x504D5324
+#define SMP2P_LOCAL_PID_MASK 0x0000ffff
+#define SMP2P_LOCAL_PID_BIT 0
+#define SMP2P_REMOTE_PID_MASK 0xffff0000
+#define SMP2P_REMOTE_PID_BIT 16
+#define SMP2P_VERSION_MASK 0x000000ff
+#define SMP2P_VERSION_BIT 0
+#define SMP2P_FEATURE_MASK 0xffffff00
+#define SMP2P_FEATURE_BIT 8
+#define SMP2P_ENT_TOTAL_MASK 0x0000ffff
+#define SMP2P_ENT_TOTAL_BIT 0
+#define SMP2P_ENT_VALID_MASK 0xffff0000
+#define SMP2P_ENT_VALID_BIT 16
+#define SMP2P_FLAGS_RESTART_DONE_BIT 0
+#define SMP2P_FLAGS_RESTART_DONE_MASK 0x1
+#define SMP2P_FLAGS_RESTART_ACK_BIT 1
+#define SMP2P_FLAGS_RESTART_ACK_MASK 0x2
+#define SMP2P_GPIO_NO_INT BIT(1)
+
+#define SMP2P_GET_BITS(hdr_val, mask, bit) \
+	(((hdr_val) & (mask)) >> (bit))
+#define SMP2P_SET_BITS(hdr_val, mask, bit, new_value) \
+	{\
+		hdr_val = (hdr_val & ~(mask)) \
+		| (((new_value) << (bit)) & (mask)); \
+	}
+
+#define SMP2P_GET_LOCAL_PID(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT)
+#define SMP2P_SET_LOCAL_PID(hdr, pid) \
+	SMP2P_SET_BITS(hdr, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT, pid)
+
+#define SMP2P_GET_REMOTE_PID(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT)
+#define SMP2P_SET_REMOTE_PID(hdr, pid) \
+	SMP2P_SET_BITS(hdr, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT, pid)
+
+#define SMP2P_GET_VERSION(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT)
+#define SMP2P_SET_VERSION(hdr, version) \
+	SMP2P_SET_BITS(hdr, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT, version)
+
+#define SMP2P_GET_FEATURES(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT)
+#define SMP2P_SET_FEATURES(hdr, features) \
+	SMP2P_SET_BITS(hdr, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT, features)
+
+#define SMP2P_GET_ENT_TOTAL(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT)
+#define SMP2P_SET_ENT_TOTAL(hdr, entries) \
+	SMP2P_SET_BITS(hdr, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT, entries)
+
+#define SMP2P_GET_ENT_VALID(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT)
+#define SMP2P_SET_ENT_VALID(hdr, entries) \
+	SMP2P_SET_BITS(hdr,  SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT,\
+		entries)
+
+#define SMP2P_GET_RESTART_DONE(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_FLAGS_RESTART_DONE_MASK, \
+			SMP2P_FLAGS_RESTART_DONE_BIT)
+#define SMP2P_SET_RESTART_DONE(hdr, value) \
+	SMP2P_SET_BITS(hdr, SMP2P_FLAGS_RESTART_DONE_MASK, \
+			SMP2P_FLAGS_RESTART_DONE_BIT, value)
+
+#define SMP2P_GET_RESTART_ACK(hdr) \
+	SMP2P_GET_BITS(hdr, SMP2P_FLAGS_RESTART_ACK_MASK, \
+			SMP2P_FLAGS_RESTART_ACK_BIT)
+#define SMP2P_SET_RESTART_ACK(hdr, value) \
+	SMP2P_SET_BITS(hdr, SMP2P_FLAGS_RESTART_ACK_MASK, \
+			SMP2P_FLAGS_RESTART_ACK_BIT, value)
+
+/* Loopback Command Macros */
+#define SMP2P_RMT_CMD_TYPE_MASK 0x80000000
+#define SMP2P_RMT_CMD_TYPE_BIT 31
+#define SMP2P_RMT_IGNORE_MASK 0x40000000
+#define SMP2P_RMT_IGNORE_BIT 30
+#define SMP2P_RMT_CMD_MASK 0x3f000000
+#define SMP2P_RMT_CMD_BIT 24
+#define SMP2P_RMT_DATA_MASK 0x00ffffff
+#define SMP2P_RMT_DATA_BIT 0
+
+#define SMP2P_GET_RMT_CMD_TYPE(val) \
+	SMP2P_GET_BITS(val, SMP2P_RMT_CMD_TYPE_MASK, SMP2P_RMT_CMD_TYPE_BIT)
+#define SMP2P_GET_RMT_CMD(val) \
+	SMP2P_GET_BITS(val, SMP2P_RMT_CMD_MASK, SMP2P_RMT_CMD_BIT)
+
+#define SMP2P_GET_RMT_DATA(val) \
+	SMP2P_GET_BITS(val, SMP2P_RMT_DATA_MASK, SMP2P_RMT_DATA_BIT)
+
+#define SMP2P_SET_RMT_CMD_TYPE(val, cmd_type) \
+	SMP2P_SET_BITS(val, SMP2P_RMT_CMD_TYPE_MASK, SMP2P_RMT_CMD_TYPE_BIT, \
+		cmd_type)
+#define SMP2P_SET_RMT_CMD_TYPE_REQ(val) \
+	SMP2P_SET_RMT_CMD_TYPE(val, 1)
+#define SMP2P_SET_RMT_CMD_TYPE_RESP(val) \
+	SMP2P_SET_RMT_CMD_TYPE(val, 0)
+
+#define SMP2P_SET_RMT_CMD(val, cmd) \
+	SMP2P_SET_BITS(val, SMP2P_RMT_CMD_MASK, SMP2P_RMT_CMD_BIT, \
+		cmd)
+#define SMP2P_SET_RMT_DATA(val, data) \
+	SMP2P_SET_BITS(val, SMP2P_RMT_DATA_MASK, SMP2P_RMT_DATA_BIT, data)
+
+enum {
+	SMP2P_LB_CMD_NOOP = 0x0,
+	SMP2P_LB_CMD_ECHO,
+	SMP2P_LB_CMD_CLEARALL,
+	SMP2P_LB_CMD_PINGPONG,
+	SMP2P_LB_CMD_RSPIN_START,
+	SMP2P_LB_CMD_RSPIN_LOCKED,
+	SMP2P_LB_CMD_RSPIN_UNLOCKED,
+	SMP2P_LB_CMD_RSPIN_END,
+};
+#define SMP2P_RLPB_IGNORE 0x40
+#define SMP2P_RLPB_ENTRY_NAME "smp2p"
+
+/* Debug Logging Macros */
+enum {
+	MSM_SMP2P_INFO = 1U << 0,
+	MSM_SMP2P_DEBUG = 1U << 1,
+	MSM_SMP2P_GPIO = 1U << 2,
+};
+
+#define SMP2P_IPC_LOG_STR(x...) do { \
+	if (smp2p_get_log_ctx()) \
+		ipc_log_string(smp2p_get_log_ctx(), x); \
+} while (0)
+
+#define SMP2P_DBG(x...) do {                              \
+	if (smp2p_get_debug_mask() & MSM_SMP2P_DEBUG) \
+		SMP2P_IPC_LOG_STR(x);  \
+} while (0)
+
+#define SMP2P_INFO(x...) do {                              \
+	if (smp2p_get_debug_mask() & MSM_SMP2P_INFO) \
+		SMP2P_IPC_LOG_STR(x);  \
+} while (0)
+
+#define SMP2P_ERR(x...) do {                              \
+	pr_err(x); \
+	SMP2P_IPC_LOG_STR(x);  \
+} while (0)
+
+#define SMP2P_GPIO(x...) do {                              \
+	if (smp2p_get_debug_mask() & MSM_SMP2P_GPIO) \
+		SMP2P_IPC_LOG_STR(x);  \
+} while (0)
+
+
+enum msm_smp2p_edge_state {
+	SMP2P_EDGE_STATE_CLOSED,
+	SMP2P_EDGE_STATE_OPENING,
+	SMP2P_EDGE_STATE_OPENED,
+	SMP2P_EDGE_STATE_FAILED = 0xff,
+};
+
+/**
+ * struct smp2p_smem - SMP2P SMEM Item Header
+ *
+ * @magic:  Set to "$SMP" -- used for identification / debug purposes
+ * @feature_version:  Feature and version fields
+ * @rem_loc_proc_id:  Remote (31:16) and Local (15:0) processor IDs
+ * @valid_total_ent:  Valid (31:16) and total (15:0) entries
+ * @flags:  Flags (bits 31:2 reserved)
+ */
+struct smp2p_smem {
+	uint32_t magic;
+	uint32_t feature_version;
+	uint32_t rem_loc_proc_id;
+	uint32_t valid_total_ent;
+	uint32_t flags;
+};
+
+struct smp2p_entry_v1 {
+	char name[SMP2P_MAX_ENTRY_NAME];
+	uint32_t entry;
+};
+
+struct smp2p_smem_item {
+	struct smp2p_smem header;
+	struct smp2p_entry_v1 entries[SMP2P_MAX_ENTRY];
+};
+
+/* Mock object for internal loopback testing. */
+struct msm_smp2p_remote_mock {
+	struct smp2p_smem_item remote_item;
+	int rx_interrupt_count;
+	int (*rx_interrupt)(void);
+	void (*tx_interrupt)(void);
+
+	bool item_exists;
+	bool initialized;
+	struct completion cb_completion;
+};
+
+void smp2p_init_header(struct smp2p_smem *header_ptr, int local_pid,
+		int remote_pid, uint32_t features, uint32_t version);
+void *msm_smp2p_get_remote_mock(void);
+int smp2p_remote_mock_rx_interrupt(void);
+int smp2p_reset_mock_edge(void);
+void msm_smp2p_interrupt_handler(int remote_pid);
+void msm_smp2p_set_remote_mock_exists(bool item_exists);
+void *msm_smp2p_get_remote_mock_smem_item(uint32_t *size);
+void *msm_smp2p_init_rmt_lpb_proc(int remote_pid);
+int msm_smp2p_deinit_rmt_lpb_proc(int remote_pid);
+void *smp2p_get_log_ctx(void);
+int smp2p_get_debug_mask(void);
+
+/* Inbound / outbound Interrupt configuration. */
+struct smp2p_interrupt_config {
+	bool is_configured;
+	uint32_t *out_int_ptr;
+	uint32_t out_int_mask;
+	int in_int_id;
+	const char *name;
+
+	/* interrupt stats */
+	unsigned int in_interrupt_count;
+	unsigned int out_interrupt_count;
+};
+
+struct smp2p_interrupt_config *smp2p_get_interrupt_config(void);
+const char *smp2p_pid_to_name(int remote_pid);
+struct smp2p_smem *smp2p_get_in_item(int remote_pid);
+struct smp2p_smem *smp2p_get_out_item(int remote_pid, int *state);
+void smp2p_gpio_open_test_entry(const char *name, int remote_pid, bool do_open);
+#endif
diff --git a/drivers/soc/qcom/smp2p_private_api.h b/drivers/soc/qcom/smp2p_private_api.h
new file mode 100644
index 0000000..5bff32f
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_private_api.h
@@ -0,0 +1,80 @@
+/* drivers/soc/qcom/smp2p_private_api.h
+ *
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _ARCH_ARM_MACH_MSM_SMP2P_PRIVATE_API_H_
+#define _ARCH_ARM_MACH_MSM_SMP2P_PRIVATE_API_H_
+
+#include <linux/notifier.h>
+
+struct msm_smp2p_out;
+
+/* Maximum size of the entry name and trailing null. */
+#define SMP2P_MAX_ENTRY_NAME 16
+
+/* Bits per entry */
+#define SMP2P_BITS_PER_ENTRY 32
+
+/* Processor ID's */
+enum {
+	SMP2P_APPS_PROC       = 0,
+	SMP2P_MODEM_PROC      = 1,
+	SMP2P_AUDIO_PROC      = 2,
+	SMP2P_SENSOR_PROC     = 3,
+	SMP2P_WIRELESS_PROC   = 4,
+	SMP2P_CDSP_PROC       = 5,
+	SMP2P_POWER_PROC      = 6,
+	SMP2P_TZ_PROC         = 7,
+	/* add new processors here */
+
+	SMP2P_REMOTE_MOCK_PROC = 15,
+	SMP2P_NUM_PROCS,
+};
+
+/**
+ * Notification events that are passed to notifier for incoming and outgoing
+ * entries.
+ *
+ * If the @metadata argument in the notifier is non-null, then it will
+ * point to the associated struct smux_meta_* structure.
+ */
+enum msm_smp2p_events {
+	SMP2P_OPEN,         /* data is NULL */
+	SMP2P_ENTRY_UPDATE, /* data => struct msm_smp2p_update_notif */
+};
+
+/**
+ * Passed in response to a SMP2P_ENTRY_UPDATE event.
+ *
+ * @prev_value:     previous value of entry
+ * @current_value:  latest value of entry
+ */
+struct msm_smp2p_update_notif {
+	uint32_t previous_value;
+	uint32_t current_value;
+};
+
+int msm_smp2p_out_open(int remote_pid, const char *entry,
+	struct notifier_block *open_notifier,
+	struct msm_smp2p_out **handle);
+int msm_smp2p_out_close(struct msm_smp2p_out **handle);
+int msm_smp2p_out_read(struct msm_smp2p_out *handle, uint32_t *data);
+int msm_smp2p_out_write(struct msm_smp2p_out *handle, uint32_t data);
+int msm_smp2p_out_modify(struct msm_smp2p_out *handle, uint32_t set_mask,
+	uint32_t clear_mask, bool send_irq);
+int msm_smp2p_in_read(int remote_pid, const char *entry, uint32_t *data);
+int msm_smp2p_in_register(int remote_pid, const char *entry,
+	struct notifier_block *in_notifier);
+int msm_smp2p_in_unregister(int remote_pid, const char *entry,
+	struct notifier_block *in_notifier);
+
+#endif /* _ARCH_ARM_MACH_MSM_SMP2P_PRIVATE_API_H_ */
diff --git a/drivers/soc/qcom/smp2p_sleepstate.c b/drivers/soc/qcom/smp2p_sleepstate.c
new file mode 100644
index 0000000..44192ff
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_sleepstate.c
@@ -0,0 +1,106 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include "smp2p_private.h"
+
+#define SET_DELAY (2 * HZ)
+#define PROC_AWAKE_ID 12 /* 12th bit */
+static int slst_gpio_base_id;
+
+/**
+ * sleepstate_pm_notifier() - PM notifier callback function.
+ * @nb:		Pointer to the notifier block.
+ * @event:	Suspend state event from PM module.
+ * @unused:	Null pointer from PM module.
+ *
+ * This function is register as callback function to get notifications
+ * from the PM module on the system suspend state.
+ */
+static int sleepstate_pm_notifier(struct notifier_block *nb,
+				unsigned long event, void *unused)
+{
+	switch (event) {
+	case PM_SUSPEND_PREPARE:
+		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 0);
+		break;
+
+	case PM_POST_SUSPEND:
+		gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block sleepstate_pm_nb = {
+	.notifier_call = sleepstate_pm_notifier,
+};
+
+static int smp2p_sleepstate_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct device_node *node = pdev->dev.of_node;
+
+	slst_gpio_base_id = of_get_gpio(node, 0);
+	if (slst_gpio_base_id == -EPROBE_DEFER) {
+		return slst_gpio_base_id;
+	} else if (slst_gpio_base_id < 0) {
+		SMP2P_ERR("%s: Error to get gpio %d\n",
+				__func__, slst_gpio_base_id);
+		return slst_gpio_base_id;
+	}
+
+
+	gpio_set_value(slst_gpio_base_id + PROC_AWAKE_ID, 1);
+
+	ret = register_pm_notifier(&sleepstate_pm_nb);
+	if (ret)
+		SMP2P_ERR("%s: power state notif error %d\n", __func__, ret);
+
+	return 0;
+}
+
+static const struct of_device_id msm_smp2p_slst_match_table[] = {
+	{.compatible = "qcom,smp2pgpio_sleepstate_3_out"},
+	{.compatible = "qcom,smp2pgpio-sleepstate-out"},
+	{},
+};
+
+static struct platform_driver smp2p_sleepstate_driver = {
+	.probe = smp2p_sleepstate_probe,
+	.driver = {
+		.name = "smp2p_sleepstate",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_smp2p_slst_match_table,
+	},
+};
+
+static int __init smp2p_sleepstate_init(void)
+{
+	int ret;
+
+	ret = platform_driver_register(&smp2p_sleepstate_driver);
+	if (ret) {
+		SMP2P_ERR("%s: smp2p_sleepstate_driver register failed %d\n",
+			 __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+module_init(smp2p_sleepstate_init);
+MODULE_DESCRIPTION("SMP2P SLEEP STATE");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/smp2p_spinlock_test.c b/drivers/soc/qcom/smp2p_spinlock_test.c
new file mode 100644
index 0000000..56a0af6
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_spinlock_test.c
@@ -0,0 +1,820 @@
+/* drivers/soc/qcom/smp2p_spinlock_test.c
+ *
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <linux/remote_spinlock.h>
+#include <soc/qcom/smem.h>
+#include "smem_private.h"
+#include "smp2p_private.h"
+#include "smp2p_test_common.h"
+
+#define RS_END_THIEF_PID_BIT 20
+#define RS_END_THIEF_MASK 0x00f00000
+
+/* Spinlock commands used for testing Apps<->RPM spinlocks. */
+enum RPM_SPINLOCK_CMDS {
+	RPM_CMD_INVALID,
+	RPM_CMD_START,
+	RPM_CMD_LOCKED,
+	RPM_CMD_UNLOCKED,
+	RPM_CMD_END,
+};
+
+/* Shared structure for testing Apps<->RPM spinlocks. */
+struct rpm_spinlock_test {
+	uint32_t apps_cmd;
+	uint32_t apps_lock_count;
+	uint32_t rpm_cmd;
+	uint32_t rpm_lock_count;
+};
+
+static uint32_t ut_remote_spinlock_run_time = 1;
+
+/**
+ * smp2p_ut_remote_spinlock_core - Verify remote spinlock.
+ *
+ * @s:           Pointer to output file
+ * @remote_pid:  Remote processor to test
+ * @use_trylock: Use trylock to prevent an Apps deadlock if the
+ *               remote spinlock fails.
+ */
+static void smp2p_ut_remote_spinlock_core(struct seq_file *s, int remote_pid,
+		bool use_trylock)
+{
+	int failed = 0;
+	unsigned int lock_count = 0;
+	struct msm_smp2p_out *handle = NULL;
+	int ret;
+	uint32_t test_request;
+	uint32_t test_response;
+	struct mock_cb_data cb_out;
+	struct mock_cb_data cb_in;
+	unsigned long flags;
+	unsigned int n;
+	bool have_lock;
+	bool timeout;
+	int failed_tmp;
+	int spinlock_owner;
+	remote_spinlock_t *smem_spinlock;
+	unsigned long end;
+
+	seq_printf(s, "Running %s for '%s' remote pid %d\n",
+		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
+
+	cb_out.initialized = false;
+	cb_in.initialized = false;
+	mock_cb_data_init(&cb_out);
+	mock_cb_data_init(&cb_in);
+	do {
+		smem_spinlock = smem_get_remote_spinlock();
+		UT_ASSERT_PTR(smem_spinlock, !=, NULL);
+
+		/* Open output entry */
+		ret = msm_smp2p_out_open(remote_pid, SMP2P_RLPB_ENTRY_NAME,
+			&cb_out.nb, &handle);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_out.cb_completion, HZ * 2),
+			>, 0);
+		UT_ASSERT_INT(cb_out.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_out.event_open, ==, 1);
+
+		/* Open inbound entry */
+		ret = msm_smp2p_in_register(remote_pid, SMP2P_RLPB_ENTRY_NAME,
+				&cb_in.nb);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ * 2),
+			>, 0);
+		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in.event_open, ==, 1);
+
+		/* Send start */
+		mock_cb_data_reset(&cb_in);
+		mock_cb_data_reset(&cb_out);
+		test_request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_START);
+		SMP2P_SET_RMT_DATA(test_request, 0x0);
+		ret = msm_smp2p_out_write(handle, test_request);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ * 2),
+			>, 0);
+		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
+		ret = msm_smp2p_in_read(remote_pid, SMP2P_RLPB_ENTRY_NAME,
+				&test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		test_response = SMP2P_GET_RMT_CMD(test_response);
+		if (test_response != SMP2P_LB_CMD_RSPIN_LOCKED &&
+				test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED) {
+			/* invalid response from remote - abort test */
+			test_request = 0x0;
+			SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+			SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
+			SMP2P_SET_RMT_DATA(test_request, 0x0);
+			ret = msm_smp2p_out_write(handle, test_request);
+			UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_LOCKED, ==,
+					test_response);
+		}
+
+		/* Run spinlock test */
+		if (use_trylock)
+			seq_puts(s, "\tUsing remote_spin_trylock\n");
+		else
+			seq_puts(s, "\tUsing remote_spin_lock\n");
+
+		flags = 0;
+		have_lock = false;
+		timeout = false;
+		spinlock_owner = 0;
+		test_request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
+		end = jiffies + (ut_remote_spinlock_run_time * HZ);
+		if (ut_remote_spinlock_run_time < 300) {
+			seq_printf(s, "\tRunning test for %u seconds; ",
+				ut_remote_spinlock_run_time);
+			seq_puts(s,
+				"on physical hardware please run >= 300 seconds by doing 'echo 300 >  ut_remote_spinlock_time'\n");
+		}
+		while (time_is_after_jiffies(end)) {
+			/* try to acquire spinlock */
+			if (use_trylock) {
+				unsigned long j_start = jiffies;
+
+				while (!remote_spin_trylock_irqsave(
+						smem_spinlock, flags)) {
+					if (jiffies_to_msecs(jiffies - j_start)
+							> 1000) {
+						seq_puts(s,
+							"\tFail: Timeout trying to get the lock\n");
+						timeout = true;
+						break;
+					}
+				}
+				if (timeout)
+					break;
+			} else {
+				remote_spin_lock_irqsave(smem_spinlock, flags);
+			}
+			have_lock = true;
+			++lock_count;
+
+			/* tell the remote side that we have the lock */
+			SMP2P_SET_RMT_DATA(test_request, lock_count);
+			SMP2P_SET_RMT_CMD(test_request,
+					SMP2P_LB_CMD_RSPIN_LOCKED);
+			ret = msm_smp2p_out_write(handle, test_request);
+			UT_ASSERT_INT(ret, ==, 0);
+
+			/* verify the other side doesn't say it has the lock */
+			for (n = 0; n < 1000; ++n) {
+				spinlock_owner =
+					remote_spin_owner(smem_spinlock);
+				if (spinlock_owner != SMEM_APPS) {
+					/* lock stolen by remote side */
+					seq_puts(s, "\tFail: Remote side: ");
+					seq_printf(s, "%d stole lock pid: %d\n",
+						remote_pid, spinlock_owner);
+					failed = true;
+					break;
+				}
+				spinlock_owner = 0;
+
+				ret = msm_smp2p_in_read(remote_pid,
+					SMP2P_RLPB_ENTRY_NAME, &test_response);
+				UT_ASSERT_INT(ret, ==, 0);
+				test_response =
+					SMP2P_GET_RMT_CMD(test_response);
+				UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_UNLOCKED, ==,
+					test_response);
+			}
+			if (failed)
+				break;
+
+			/* tell remote side we are unlocked and release lock */
+			SMP2P_SET_RMT_CMD(test_request,
+					SMP2P_LB_CMD_RSPIN_UNLOCKED);
+			(void)msm_smp2p_out_write(handle, test_request);
+			have_lock = false;
+			remote_spin_unlock_irqrestore(smem_spinlock, flags);
+		}
+		if (have_lock)
+			remote_spin_unlock_irqrestore(smem_spinlock, flags);
+
+		/* End test */
+		mock_cb_data_reset(&cb_in);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
+		SMP2P_SET_RMT_DATA(test_request, lock_count |
+				(spinlock_owner << RS_END_THIEF_PID_BIT));
+		(void)msm_smp2p_out_write(handle, test_request);
+
+		failed_tmp = failed;
+		failed = false;
+		do {
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ * 2),
+				>, 0);
+			reinit_completion(&cb_in.cb_completion);
+			ret = msm_smp2p_in_read(remote_pid,
+					SMP2P_RLPB_ENTRY_NAME, &test_response);
+			UT_ASSERT_INT(ret, ==, 0);
+		} while (!failed &&
+			SMP2P_GET_RMT_CMD(test_response) !=
+			SMP2P_LB_CMD_RSPIN_END);
+		if (failed)
+			break;
+		failed = failed_tmp;
+
+		test_response = SMP2P_GET_RMT_DATA(test_response);
+		seq_puts(s, "\tLocked spinlock ");
+		seq_printf(s, "local %u times; remote %u times",
+			lock_count,
+			test_response & ((1 << RS_END_THIEF_PID_BIT) - 1)
+			);
+		if (test_response & RS_END_THIEF_MASK) {
+			seq_puts(s, "Remote side reporting lock stolen by ");
+			seq_printf(s, "pid %d.\n",
+				SMP2P_GET_BITS(test_response,
+					RS_END_THIEF_MASK,
+					RS_END_THIEF_PID_BIT));
+			failed = 1;
+		}
+		seq_puts(s, "\n");
+
+		/* Cleanup */
+		ret = msm_smp2p_out_close(&handle);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_PTR(handle, ==, NULL);
+		ret = msm_smp2p_in_unregister(remote_pid,
+				SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		if (!failed && !timeout)
+			seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		if (handle) {
+			/* send end command */
+			test_request = 0;
+			SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
+			SMP2P_SET_RMT_DATA(test_request, lock_count);
+			(void)msm_smp2p_out_write(handle, test_request);
+			(void)msm_smp2p_out_close(&handle);
+		}
+		(void)msm_smp2p_in_unregister(remote_pid,
+				SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
+
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+}
+
+/**
+ * smp2p_ut_remote_spinlock_pid - Verify remote spinlock for a processor.
+ *
+ * @s:           Pointer to output file
+ * @pid:         Processor to test
+ * @use_trylock: Use trylock to prevent an Apps deadlock if the
+ *               remote spinlock fails.
+ */
+static void smp2p_ut_remote_spinlock_pid(struct seq_file *s, int pid,
+		bool use_trylock)
+{
+	struct smp2p_interrupt_config *int_cfg;
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg) {
+		seq_puts(s, "Remote processor config unavailable\n");
+		return;
+	}
+
+	if (pid >= SMP2P_NUM_PROCS || !int_cfg[pid].is_configured)
+		return;
+
+	msm_smp2p_deinit_rmt_lpb_proc(pid);
+	smp2p_ut_remote_spinlock_core(s, pid, use_trylock);
+	msm_smp2p_init_rmt_lpb_proc(pid);
+}
+
+/**
+ * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_ut_remote_spinlock(struct seq_file *s)
+{
+	int pid;
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
+		smp2p_ut_remote_spinlock_pid(s, pid, false);
+}
+
+/**
+ * smp2p_ut_remote_spin_trylock - Verify remote trylock for all processors.
+ *
+ * @s:   Pointer to output file
+ */
+static void smp2p_ut_remote_spin_trylock(struct seq_file *s)
+{
+	int pid;
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid)
+		smp2p_ut_remote_spinlock_pid(s, pid, true);
+}
+
+/**
+ * smp2p_ut_remote_spinlock - Verify remote spinlock for all processors.
+ *
+ * @s:   pointer to output file
+ *
+ * This test verifies inbound and outbound functionality for all
+ * configured remote processor.
+ */
+static void smp2p_ut_remote_spinlock_modem(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_pid(s, SMP2P_MODEM_PROC, false);
+}
+
+static void smp2p_ut_remote_spinlock_adsp(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_pid(s, SMP2P_AUDIO_PROC, false);
+}
+
+static void smp2p_ut_remote_spinlock_dsps(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_pid(s, SMP2P_SENSOR_PROC, false);
+}
+
+static void smp2p_ut_remote_spinlock_wcnss(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_pid(s, SMP2P_WIRELESS_PROC, false);
+}
+
+static void smp2p_ut_remote_spinlock_cdsp(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_pid(s, SMP2P_CDSP_PROC, false);
+}
+
+static void smp2p_ut_remote_spinlock_tz(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_pid(s, SMP2P_TZ_PROC, false);
+}
+
+/**
+ * smp2p_ut_remote_spinlock_rpm - Verify remote spinlock.
+ *
+ * @s:   pointer to output file
+ * @remote_pid:  Remote processor to test
+ */
+static void smp2p_ut_remote_spinlock_rpm(struct seq_file *s)
+{
+	int failed = 0;
+	unsigned long flags;
+	unsigned int n;
+	unsigned int test_num;
+	struct rpm_spinlock_test *data_ptr;
+	remote_spinlock_t *smem_spinlock;
+	bool have_lock;
+
+	seq_printf(s, "Running %s for Apps<->RPM Test\n",
+		   __func__);
+	do {
+		smem_spinlock = smem_get_remote_spinlock();
+		UT_ASSERT_PTR(smem_spinlock, !=, NULL);
+
+		data_ptr = smem_alloc(SMEM_ID_VENDOR0,
+				sizeof(struct rpm_spinlock_test), 0,
+				SMEM_ANY_HOST_FLAG);
+		UT_ASSERT_PTR(0, !=, data_ptr);
+
+		/* Send start */
+		writel_relaxed(0, &data_ptr->apps_lock_count);
+		writel_relaxed(RPM_CMD_START, &data_ptr->apps_cmd);
+
+		seq_puts(s, "\tWaiting for RPM to start test\n");
+		for (n = 0; n < 1000; ++n) {
+			if (readl_relaxed(&data_ptr->rpm_cmd) !=
+					RPM_CMD_INVALID)
+				break;
+			usleep_range(1000, 1200);
+		}
+		if (readl_relaxed(&data_ptr->rpm_cmd) == RPM_CMD_INVALID) {
+			/* timeout waiting for RPM */
+			writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
+			UT_ASSERT_INT(RPM_CMD_LOCKED, !=, RPM_CMD_INVALID);
+		}
+
+		/* Run spinlock test */
+		flags = 0;
+		have_lock = false;
+		for (test_num = 0; !failed && test_num < 10000; ++test_num) {
+			/* acquire spinlock */
+			remote_spin_lock_irqsave(smem_spinlock, flags);
+			have_lock = true;
+			data_ptr->apps_lock_count++;
+			writel_relaxed(data_ptr->apps_lock_count,
+				&data_ptr->apps_lock_count);
+			writel_relaxed(RPM_CMD_LOCKED, &data_ptr->apps_cmd);
+			/*
+			 * Ensure that the remote side sees our lock has
+			 * been acquired before we start polling their status.
+			 */
+			wmb();
+
+			/* verify the other side doesn't say it has the lock */
+			for (n = 0; n < 1000; ++n) {
+				UT_ASSERT_HEX(RPM_CMD_UNLOCKED, ==,
+					readl_relaxed(&data_ptr->rpm_cmd));
+			}
+			if (failed)
+				break;
+
+			/* release spinlock */
+			have_lock = false;
+			writel_relaxed(RPM_CMD_UNLOCKED, &data_ptr->apps_cmd);
+			/*
+			 * Ensure that our status-update write was committed
+			 * before we unlock the spinlock.
+			 */
+			wmb();
+			remote_spin_unlock_irqrestore(smem_spinlock, flags);
+		}
+		if (have_lock)
+			remote_spin_unlock_irqrestore(smem_spinlock, flags);
+
+		/* End test */
+		writel_relaxed(RPM_CMD_INVALID, &data_ptr->apps_cmd);
+		seq_printf(s, "\tLocked spinlock local %u remote %u\n",
+				readl_relaxed(&data_ptr->apps_lock_count),
+				readl_relaxed(&data_ptr->rpm_lock_count));
+
+		if (!failed)
+			seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+}
+
+struct rmt_spinlock_work_item {
+	struct work_struct work;
+	struct completion try_lock;
+	struct completion locked;
+	bool has_locked;
+};
+
+static void ut_remote_spinlock_ssr_worker(struct work_struct *work)
+{
+	remote_spinlock_t *smem_spinlock;
+	unsigned long flags;
+	struct rmt_spinlock_work_item *work_item =
+		container_of(work, struct rmt_spinlock_work_item, work);
+
+	work_item->has_locked = false;
+	complete(&work_item->try_lock);
+	smem_spinlock = smem_get_remote_spinlock();
+	if (!smem_spinlock) {
+		pr_err("%s Failed\n", __func__);
+		return;
+	}
+
+	remote_spin_lock_irqsave(smem_spinlock, flags);
+	remote_spin_unlock_irqrestore(smem_spinlock, flags);
+	work_item->has_locked = true;
+	complete(&work_item->locked);
+}
+
+/**
+ * smp2p_ut_remote_spinlock_ssr - Verify remote spinlock.
+ *
+ * @s:   pointer to output file
+ */
+static void smp2p_ut_remote_spinlock_ssr(struct seq_file *s)
+{
+	int failed = 0;
+	unsigned long flags;
+	remote_spinlock_t *smem_spinlock;
+	int spinlock_owner = 0;
+
+	struct workqueue_struct *ws = NULL;
+	struct rmt_spinlock_work_item work_item;
+
+	seq_printf(s, " Running %s Test\n",
+		   __func__);
+	do {
+		smem_spinlock = smem_get_remote_spinlock();
+		UT_ASSERT_PTR(smem_spinlock, !=, NULL);
+
+		ws = create_singlethread_workqueue("ut_remote_spinlock_ssr");
+		UT_ASSERT_PTR(ws, !=, NULL);
+		INIT_WORK(&work_item.work, ut_remote_spinlock_ssr_worker);
+		init_completion(&work_item.try_lock);
+		init_completion(&work_item.locked);
+
+		remote_spin_lock_irqsave(smem_spinlock, flags);
+		/* Unlock local spin lock and hold HW spinlock */
+		spin_unlock_irqrestore(&((smem_spinlock)->local), flags);
+
+		queue_work(ws, &work_item.work);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&work_item.try_lock, HZ * 2), >, 0);
+		UT_ASSERT_INT((int)work_item.has_locked, ==, 0);
+		spinlock_owner = remote_spin_owner(smem_spinlock);
+		UT_ASSERT_INT(spinlock_owner, ==, SMEM_APPS);
+		remote_spin_release_all(SMEM_APPS);
+
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&work_item.locked, HZ * 2), >, 0);
+
+		if (!failed)
+			seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+}
+
+/**
+ * smp2p_ut_remote_spinlock_track_core - Verify remote spinlock.
+ *
+ * @s:           Pointer to output file
+ * @remote_pid:  Remote processor to test
+ *
+ * This test has the remote subsystem grab the lock, and then has the local
+ * subsystem attempt to grab the lock using the trylock() API. It then verifies
+ * that the ID in the hw_spinlocks array matches the owner of the lock.
+ */
+static void smp2p_ut_remote_spinlock_track_core(struct seq_file *s,
+		int remote_pid)
+{
+	int failed = 0;
+	struct msm_smp2p_out *handle = NULL;
+	int ret;
+	uint32_t test_request;
+	uint32_t test_response;
+	struct mock_cb_data cb_out;
+	struct mock_cb_data cb_in;
+	unsigned long flags;
+	int stored_value;
+	remote_spinlock_t *smem_spinlock;
+
+	seq_printf(s, "Running %s for '%s' remote pid %d\n",
+		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
+
+	cb_out.initialized = false;
+	cb_in.initialized = false;
+	mock_cb_data_init(&cb_out);
+	mock_cb_data_init(&cb_in);
+	do {
+		smem_spinlock = smem_get_remote_spinlock();
+		UT_ASSERT_PTR(smem_spinlock, !=, NULL);
+
+		/* Open output entry */
+		ret = msm_smp2p_out_open(remote_pid, SMP2P_RLPB_ENTRY_NAME,
+			&cb_out.nb, &handle);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_out.cb_completion, HZ * 2),
+			>, 0);
+		UT_ASSERT_INT(cb_out.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_out.event_open, ==, 1);
+
+		/* Open inbound entry */
+		ret = msm_smp2p_in_register(remote_pid, SMP2P_RLPB_ENTRY_NAME,
+				&cb_in.nb);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ * 2),
+			>, 0);
+		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in.event_open, ==, 1);
+
+		/* Send start */
+		mock_cb_data_reset(&cb_in);
+		mock_cb_data_reset(&cb_out);
+		test_request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_START);
+		SMP2P_SET_RMT_DATA(test_request, 0x0);
+		ret = msm_smp2p_out_write(handle, test_request);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ * 2),
+			>, 0);
+		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
+		ret = msm_smp2p_in_read(remote_pid, SMP2P_RLPB_ENTRY_NAME,
+				&test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		test_response = SMP2P_GET_RMT_CMD(test_response);
+		if (test_response != SMP2P_LB_CMD_RSPIN_LOCKED &&
+				test_response != SMP2P_LB_CMD_RSPIN_UNLOCKED) {
+			/* invalid response from remote - abort test */
+			test_request = 0x0;
+			SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+			SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
+			SMP2P_SET_RMT_DATA(test_request, 0x0);
+			ret = msm_smp2p_out_write(handle, test_request);
+			UT_ASSERT_HEX(SMP2P_LB_CMD_RSPIN_LOCKED, ==,
+					test_response);
+		}
+
+		/* Run spinlock test */
+		flags = 0;
+		test_request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE_REQ(test_request);
+
+		/* try to acquire spinlock */
+		remote_spin_trylock_irqsave(smem_spinlock, flags);
+		/*
+		 * Need to check against the locking token (PID + 1)
+		 * because the remote_spin_owner() API only returns the
+		 * PID.
+		 */
+		stored_value = remote_spin_get_hw_spinlocks_element(
+				smem_spinlock);
+		UT_ASSERT_INT(stored_value, ==,
+			remote_spin_owner(smem_spinlock) + 1);
+		UT_ASSERT_INT(stored_value, ==, remote_pid + 1);
+
+		/* End test */
+		test_request = 0x0;
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
+		SMP2P_SET_RMT_DATA(test_request, 0x0);
+		(void)msm_smp2p_out_write(handle, test_request);
+
+		/* Cleanup */
+		ret = msm_smp2p_out_close(&handle);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_PTR(handle, ==, NULL);
+		ret = msm_smp2p_in_unregister(remote_pid,
+				SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		if (!failed)
+			seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		if (handle) {
+			/* send end command */
+			test_request = 0x0;
+			SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_RSPIN_END);
+			SMP2P_SET_RMT_DATA(test_request, 0x0);
+			(void)msm_smp2p_out_write(handle, test_request);
+			(void)msm_smp2p_out_close(&handle);
+		}
+		(void)msm_smp2p_in_unregister(remote_pid,
+				SMP2P_RLPB_ENTRY_NAME, &cb_in.nb);
+
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+}
+
+/**
+ * smp2p_ut_remote_spinlock_track - Verify PID tracking for modem.
+ *
+ * @s:	Pointer to output file
+ * @pid:		The processor to test
+ */
+static void smp2p_ut_remote_spinlock_track(struct seq_file *s, int pid)
+{
+	struct smp2p_interrupt_config *int_cfg;
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg) {
+		seq_puts(s, "Remote processor config unavailable\n");
+		return;
+	}
+
+	if (pid >= SMP2P_NUM_PROCS || !int_cfg[pid].is_configured)
+		return;
+
+	msm_smp2p_deinit_rmt_lpb_proc(pid);
+	smp2p_ut_remote_spinlock_track_core(s, pid);
+	msm_smp2p_init_rmt_lpb_proc(pid);
+}
+
+/**
+ * smp2p_ut_remote_spinlock_track - Verify PID tracking for all processors.
+ *
+ * @s:	Pointer to output file
+ *
+ * This test verifies PID tracking for all configured remote processors.
+ */
+static void smp2p_ut_remote_spinlock_track_modem(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_track(s, SMP2P_MODEM_PROC);
+}
+
+static void smp2p_ut_remote_spinlock_track_adsp(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_track(s, SMP2P_AUDIO_PROC);
+}
+
+static void smp2p_ut_remote_spinlock_track_dsps(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_track(s, SMP2P_SENSOR_PROC);
+}
+
+static void smp2p_ut_remote_spinlock_track_wcnss(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_track(s, SMP2P_WIRELESS_PROC);
+}
+
+static void smp2p_ut_remote_spinlock_track_cdsp(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_track(s, SMP2P_CDSP_PROC);
+}
+
+static void smp2p_ut_remote_spinlock_track_tz(struct seq_file *s)
+{
+	smp2p_ut_remote_spinlock_track(s, SMP2P_TZ_PROC);
+}
+
+static int __init smp2p_debugfs_init(void)
+{
+	/*
+	 * Add Unit Test entries.
+	 *
+	 * The idea with unit tests is that you can run all of them
+	 * from ADB shell by doing:
+	 *  adb shell
+	 *  cat ut*
+	 *
+	 * And if particular tests fail, you can then repeatedly run the
+	 * failing tests as you debug and resolve the failing test.
+	 */
+	smp2p_debug_create("ut_remote_spinlock",
+		smp2p_ut_remote_spinlock);
+	smp2p_debug_create("ut_remote_spin_trylock",
+		smp2p_ut_remote_spin_trylock);
+	smp2p_debug_create("ut_remote_spinlock_modem",
+		smp2p_ut_remote_spinlock_modem);
+	smp2p_debug_create("ut_remote_spinlock_adsp",
+		smp2p_ut_remote_spinlock_adsp);
+	smp2p_debug_create("ut_remote_spinlock_dsps",
+		smp2p_ut_remote_spinlock_dsps);
+	smp2p_debug_create("ut_remote_spinlock_wcnss",
+		smp2p_ut_remote_spinlock_wcnss);
+	smp2p_debug_create("ut_remote_spinlock_cdsp",
+		smp2p_ut_remote_spinlock_cdsp);
+	smp2p_debug_create("ut_remote_spinlock_tz",
+		smp2p_ut_remote_spinlock_tz);
+	smp2p_debug_create("ut_remote_spinlock_rpm",
+		smp2p_ut_remote_spinlock_rpm);
+	smp2p_debug_create_u32("ut_remote_spinlock_time",
+		&ut_remote_spinlock_run_time);
+	smp2p_debug_create("ut_remote_spinlock_ssr",
+		&smp2p_ut_remote_spinlock_ssr);
+	smp2p_debug_create("ut_remote_spinlock_track_modem",
+		&smp2p_ut_remote_spinlock_track_modem);
+	smp2p_debug_create("ut_remote_spinlock_track_adsp",
+		&smp2p_ut_remote_spinlock_track_adsp);
+	smp2p_debug_create("ut_remote_spinlock_track_dsps",
+		&smp2p_ut_remote_spinlock_track_dsps);
+	smp2p_debug_create("ut_remote_spinlock_track_wcnss",
+		&smp2p_ut_remote_spinlock_track_wcnss);
+	smp2p_debug_create("ut_remote_spinlock_track_cdsp",
+		&smp2p_ut_remote_spinlock_track_cdsp);
+	smp2p_debug_create("ut_remote_spinlock_track_tz",
+		&smp2p_ut_remote_spinlock_track_tz);
+	return 0;
+}
+module_init(smp2p_debugfs_init);
diff --git a/drivers/soc/qcom/smp2p_test.c b/drivers/soc/qcom/smp2p_test.c
new file mode 100644
index 0000000..aa8d0c8
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_test.c
@@ -0,0 +1,1324 @@
+/* drivers/soc/qcom/smp2p_test.c
+ *
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/debugfs.h>
+#include <linux/ctype.h>
+#include <linux/jiffies.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/module.h>
+#include <soc/qcom/subsystem_restart.h>
+#include "smp2p_private.h"
+#include "smp2p_test_common.h"
+
+/**
+ * smp2p_ut_local_basic - Basic sanity test using local loopback.
+ *
+ * @s: pointer to output file
+ *
+ * This test simulates a simple write and read
+ * when remote processor does not exist.
+ */
+static void smp2p_ut_local_basic(struct seq_file *s)
+{
+	int failed = 0;
+	struct msm_smp2p_out *smp2p_obj;
+	struct msm_smp2p_remote_mock *rmp = NULL;
+	int ret;
+	uint32_t test_request;
+	uint32_t test_response = 0;
+	static struct mock_cb_data cb_data;
+
+	seq_printf(s, "Running %s\n", __func__);
+	mock_cb_data_init(&cb_data);
+	do {
+		/* initialize mock edge and start opening */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+
+		rmp = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(rmp, !=, NULL);
+
+		rmp->rx_interrupt_count = 0;
+		memset(&rmp->remote_item, 0,
+			sizeof(struct smp2p_smem_item));
+
+		msm_smp2p_set_remote_mock_exists(false);
+
+		ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
+			&cb_data.nb, &smp2p_obj);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 0);
+		rmp->rx_interrupt_count = 0;
+
+		/* simulate response from remote side */
+		rmp->remote_item.header.magic = SMP2P_MAGIC;
+		SMP2P_SET_LOCAL_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+					SMP2P_REMOTE_MOCK_PROC);
+		SMP2P_SET_REMOTE_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+					SMP2P_APPS_PROC);
+		SMP2P_SET_VERSION(
+		rmp->remote_item.header.feature_version, 1);
+		SMP2P_SET_FEATURES(
+		rmp->remote_item.header.feature_version, 0);
+		SMP2P_SET_ENT_TOTAL(
+		rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+		SMP2P_SET_ENT_VALID(
+		rmp->remote_item.header.valid_total_ent, 0);
+		rmp->remote_item.header.flags = 0x0;
+		msm_smp2p_set_remote_mock_exists(true);
+		rmp->tx_interrupt();
+
+		/* verify port was opened */
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ / 2), >, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_open, ==, 1);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
+
+		/* do write (test outbound entries) */
+		rmp->rx_interrupt_count = 0;
+		test_request = 0xC0DE;
+		ret = msm_smp2p_out_write(smp2p_obj, test_request);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+		/* do read (test inbound entries) */
+		ret = msm_smp2p_out_read(smp2p_obj, &test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(test_request, ==, test_response);
+
+		ret = msm_smp2p_out_close(&smp2p_obj);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_PTR(smp2p_obj, ==, 0);
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+		(void)msm_smp2p_out_close(&smp2p_obj);
+	}
+}
+
+/**
+ * smp2p_ut_local_late_open - Verify post-negotiation opening.
+ *
+ * @s: pointer to output file
+ *
+ * Verify entry creation for opening entries after negotiation is complete.
+ */
+static void smp2p_ut_local_late_open(struct seq_file *s)
+{
+	int failed = 0;
+	struct msm_smp2p_out *smp2p_obj;
+	struct msm_smp2p_remote_mock *rmp = NULL;
+	int ret;
+	uint32_t test_request;
+	uint32_t test_response = 0;
+	static struct mock_cb_data cb_data;
+
+	seq_printf(s, "Running %s\n", __func__);
+	mock_cb_data_init(&cb_data);
+	do {
+		/* initialize mock edge */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+
+		rmp = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(rmp, !=, NULL);
+
+		rmp->rx_interrupt_count = 0;
+		memset(&rmp->remote_item, 0,
+			sizeof(struct smp2p_smem_item));
+		rmp->remote_item.header.magic = SMP2P_MAGIC;
+		SMP2P_SET_LOCAL_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_REMOTE_MOCK_PROC);
+		SMP2P_SET_REMOTE_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_APPS_PROC);
+		SMP2P_SET_VERSION(
+			rmp->remote_item.header.feature_version, 1);
+		SMP2P_SET_FEATURES(
+			rmp->remote_item.header.feature_version, 0);
+		SMP2P_SET_ENT_TOTAL(
+			rmp->remote_item.header.valid_total_ent,
+			SMP2P_MAX_ENTRY);
+		SMP2P_SET_ENT_VALID(
+		rmp->remote_item.header.valid_total_ent, 0);
+		rmp->remote_item.header.flags = 0x0;
+
+		msm_smp2p_set_remote_mock_exists(true);
+
+		ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
+			&cb_data.nb, &smp2p_obj);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		/* verify port was opened */
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ / 2),
+			>, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_open, ==, 1);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
+
+		/* do write (test outbound entries) */
+		rmp->rx_interrupt_count = 0;
+		test_request = 0xC0DE;
+		ret = msm_smp2p_out_write(smp2p_obj, test_request);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+		/* do read (test inbound entries) */
+		ret = msm_smp2p_out_read(smp2p_obj, &test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(test_request, ==, test_response);
+
+		ret = msm_smp2p_out_close(&smp2p_obj);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_PTR(smp2p_obj, ==, 0);
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+		(void)msm_smp2p_out_close(&smp2p_obj);
+	}
+}
+
+/**
+ * smp2p_ut_local_early_open - Verify pre-negotiation opening.
+ *
+ * @s: pointer to output file
+ *
+ * Verify entry creation for opening entries before negotiation is complete.
+ */
+static void smp2p_ut_local_early_open(struct seq_file *s)
+{
+	int failed = 0;
+	struct msm_smp2p_out *smp2p_obj;
+	struct msm_smp2p_remote_mock *rmp = NULL;
+	struct smp2p_smem *outbound_item;
+	int negotiation_state;
+	int ret;
+	uint32_t test_request;
+	uint32_t test_response = 0;
+	static struct mock_cb_data cb_data;
+
+	seq_printf(s, "Running %s\n", __func__);
+	mock_cb_data_init(&cb_data);
+	do {
+		/* initialize mock edge, but don't enable, yet */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+
+		rmp = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(rmp, !=, NULL);
+
+		rmp->rx_interrupt_count = 0;
+		memset(&rmp->remote_item, 0,
+			sizeof(struct smp2p_smem_item));
+		rmp->remote_item.header.magic = SMP2P_MAGIC;
+		SMP2P_SET_LOCAL_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_REMOTE_MOCK_PROC);
+		SMP2P_SET_REMOTE_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_APPS_PROC);
+		SMP2P_SET_VERSION(
+		rmp->remote_item.header.feature_version, 1);
+		SMP2P_SET_FEATURES(
+		rmp->remote_item.header.feature_version, 0);
+		SMP2P_SET_ENT_TOTAL(
+		rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+		SMP2P_SET_ENT_VALID(
+		rmp->remote_item.header.valid_total_ent, 0);
+		rmp->remote_item.header.flags = 0x0;
+
+		msm_smp2p_set_remote_mock_exists(false);
+		UT_ASSERT_PTR(NULL, ==,
+				smp2p_get_in_item(SMP2P_REMOTE_MOCK_PROC));
+
+		/* initiate open, but verify it doesn't complete */
+		ret = msm_smp2p_out_open(SMP2P_REMOTE_MOCK_PROC, "smp2p",
+			&cb_data.nb, &smp2p_obj);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ / 8),
+			==, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 0);
+		UT_ASSERT_INT(cb_data.event_open, ==, 0);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+		outbound_item = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
+				&negotiation_state);
+		UT_ASSERT_PTR(outbound_item, !=, NULL);
+		UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENING);
+		UT_ASSERT_INT(0, ==,
+			SMP2P_GET_ENT_VALID(outbound_item->valid_total_ent));
+
+		/* verify that read/write don't work yet */
+		rmp->rx_interrupt_count = 0;
+		test_request = 0x0;
+		ret = msm_smp2p_out_write(smp2p_obj, test_request);
+		UT_ASSERT_INT(ret, ==, -ENODEV);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
+
+		ret = msm_smp2p_out_read(smp2p_obj, &test_response);
+		UT_ASSERT_INT(ret, ==, -ENODEV);
+
+		/* allocate remote entry and verify open */
+		msm_smp2p_set_remote_mock_exists(true);
+		rmp->tx_interrupt();
+
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_data.cb_completion, HZ / 2),
+			>, 0);
+		UT_ASSERT_INT(cb_data.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_data.event_open, ==, 1);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
+
+		/* do write (test outbound entries) */
+		rmp->rx_interrupt_count = 0;
+		test_request = 0xC0DE;
+		ret = msm_smp2p_out_write(smp2p_obj, test_request);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+		/* do read (test inbound entries) */
+		ret = msm_smp2p_out_read(smp2p_obj, &test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(test_request, ==, test_response);
+
+		ret = msm_smp2p_out_close(&smp2p_obj);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_PTR(smp2p_obj, ==, 0);
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+		(void)msm_smp2p_out_close(&smp2p_obj);
+	}
+}
+
+/**
+ * smp2p_ut_mock_loopback - Exercise the remote loopback using remote mock.
+ *
+ * @s: pointer to output file
+ *
+ * This test exercises the remote loopback code using
+ * remote mock object. The remote mock object simulates the remote
+ * processor sending remote loopback commands to the local processor.
+ */
+static void smp2p_ut_mock_loopback(struct seq_file *s)
+{
+	int failed = 0;
+	struct msm_smp2p_remote_mock *rmp = NULL;
+	int ret;
+	uint32_t test_request = 0;
+	uint32_t test_response = 0;
+	struct msm_smp2p_out  *local;
+
+	seq_printf(s, "Running %s\n", __func__);
+	do {
+		/* Initialize the mock edge */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+
+		rmp = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(rmp, !=, NULL);
+
+		memset(&rmp->remote_item, 0,
+			sizeof(struct smp2p_smem_item));
+		rmp->remote_item.header.magic = SMP2P_MAGIC;
+		SMP2P_SET_LOCAL_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_REMOTE_MOCK_PROC);
+		SMP2P_SET_REMOTE_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_APPS_PROC);
+		SMP2P_SET_VERSION(
+		rmp->remote_item.header.feature_version, 1);
+		SMP2P_SET_FEATURES(
+		rmp->remote_item.header.feature_version, 0);
+		SMP2P_SET_ENT_TOTAL(
+		rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+		SMP2P_SET_ENT_VALID(
+		rmp->remote_item.header.valid_total_ent, 1);
+		rmp->remote_item.header.flags = 0x0;
+		msm_smp2p_set_remote_mock_exists(true);
+
+		/* Create test entry and attach loopback server */
+		rmp->rx_interrupt_count = 0;
+		reinit_completion(&rmp->cb_completion);
+		strlcpy(rmp->remote_item.entries[0].name, "smp2p",
+							SMP2P_MAX_ENTRY_NAME);
+		rmp->remote_item.entries[0].entry = 0;
+		rmp->tx_interrupt();
+
+		local = msm_smp2p_init_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&rmp->cb_completion, HZ / 2),
+			>, 0);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 2);
+
+		/* Send Echo Command */
+		rmp->rx_interrupt_count = 0;
+		reinit_completion(&rmp->cb_completion);
+		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
+		SMP2P_SET_RMT_DATA(test_request, 10);
+		rmp->remote_item.entries[0].entry = test_request;
+		rmp->tx_interrupt();
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&rmp->cb_completion, HZ / 2),
+			>, 0);
+
+		/* Verify Echo Response */
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+		ret = msm_smp2p_out_read(local,
+							&test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		test_response = SMP2P_GET_RMT_DATA(test_response);
+		UT_ASSERT_INT(test_response, ==, 10);
+
+		/* Send PINGPONG command */
+		test_request = 0;
+		test_response = 0;
+		rmp->rx_interrupt_count = 0;
+		reinit_completion(&rmp->cb_completion);
+		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
+		SMP2P_SET_RMT_DATA(test_request, 10);
+		rmp->remote_item.entries[0].entry = test_request;
+		rmp->tx_interrupt();
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&rmp->cb_completion, HZ / 2),
+			>, 0);
+
+		/* Verify PINGPONG Response */
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+		ret = msm_smp2p_out_read(local, &test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		test_response = SMP2P_GET_RMT_DATA(test_response);
+		UT_ASSERT_INT(test_response, ==, 9);
+
+		/* Send CLEARALL command */
+		test_request = 0;
+		test_response = 0;
+		rmp->rx_interrupt_count = 0;
+		reinit_completion(&rmp->cb_completion);
+		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
+		SMP2P_SET_RMT_DATA(test_request, 10);
+		rmp->remote_item.entries[0].entry = test_request;
+		rmp->tx_interrupt();
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&rmp->cb_completion, HZ / 2),
+			>, 0);
+
+		/* Verify CLEARALL response */
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+		ret = msm_smp2p_out_read(local, &test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		test_response = SMP2P_GET_RMT_DATA(test_response);
+		UT_ASSERT_INT(test_response, ==, 0);
+
+		ret = msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
+		UT_ASSERT_INT(ret, ==, 0);
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+		msm_smp2p_deinit_rmt_lpb_proc(SMP2P_REMOTE_MOCK_PROC);
+	}
+}
+
+/**
+ * smp2p_ut_remote_inout_core - Verify inbound/outbound functionality.
+ *
+ * @s: pointer to output file
+ * @remote_pid:  Remote processor to test
+ *
+ * This test verifies inbound/outbound functionality for the remote processor.
+ */
+static void smp2p_ut_remote_inout_core(struct seq_file *s, int remote_pid)
+{
+	int failed = 0;
+	struct msm_smp2p_out *handle;
+	int ret;
+	uint32_t test_request;
+	uint32_t test_response = 0;
+	static struct mock_cb_data cb_out;
+	static struct mock_cb_data cb_in;
+
+	seq_printf(s, "Running %s for '%s' remote pid %d\n",
+		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
+	mock_cb_data_init(&cb_out);
+	mock_cb_data_init(&cb_in);
+	do {
+		/* Open output entry */
+		ret = msm_smp2p_out_open(remote_pid, "smp2p",
+			&cb_out.nb, &handle);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_out.cb_completion, HZ / 2),
+			>, 0);
+		UT_ASSERT_INT(cb_out.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_out.event_open, ==, 1);
+
+		/* Open inbound entry */
+		ret = msm_smp2p_in_register(remote_pid, "smp2p",
+				&cb_in.nb);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ / 2),
+			>, 0);
+		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in.event_open, ==, 1);
+
+		/* Write an echo request */
+		mock_cb_data_reset(&cb_out);
+		mock_cb_data_reset(&cb_in);
+		test_request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_ECHO);
+		SMP2P_SET_RMT_DATA(test_request, 0xAA55);
+		ret = msm_smp2p_out_write(handle, test_request);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		/* Verify inbound reply */
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ / 2),
+			>, 0);
+		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
+		UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
+			    cb_in.entry_data.current_value), ==, 0xAA55);
+
+		ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
+		UT_ASSERT_INT(SMP2P_LB_CMD_ECHO, ==,
+				SMP2P_GET_RMT_CMD(test_response));
+		UT_ASSERT_INT(0xAA55, ==, SMP2P_GET_RMT_DATA(test_response));
+
+		/* Write a clear all request */
+		mock_cb_data_reset(&cb_in);
+		test_request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_CLEARALL);
+		SMP2P_SET_RMT_DATA(test_request, 0xAA55);
+		ret = msm_smp2p_out_write(handle, test_request);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		/* Verify inbound reply */
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ / 2),
+			>, 0);
+		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
+		UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
+			    cb_in.entry_data.current_value), ==, 0x0000);
+
+		ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
+		UT_ASSERT_INT(0x0000, ==, SMP2P_GET_RMT_DATA(test_response));
+
+		/* Write a decrement request */
+		mock_cb_data_reset(&cb_in);
+		test_request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_LB_CMD_PINGPONG);
+		SMP2P_SET_RMT_DATA(test_request, 0xAA55);
+		ret = msm_smp2p_out_write(handle, test_request);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		/* Verify inbound reply */
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ / 2),
+			>, 0);
+		UT_ASSERT_INT(cb_in.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in.event_entry_update, ==, 1);
+		UT_ASSERT_INT(SMP2P_GET_RMT_DATA(
+			    cb_in.entry_data.current_value), ==, 0xAA54);
+
+		ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(0, ==, SMP2P_GET_RMT_CMD_TYPE(test_response));
+		UT_ASSERT_INT(SMP2P_LB_CMD_PINGPONG, ==,
+				SMP2P_GET_RMT_CMD(test_response));
+		UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
+
+		/* Test the ignore flag */
+		mock_cb_data_reset(&cb_in);
+		test_request = 0x0;
+		SMP2P_SET_RMT_CMD_TYPE(test_request, 1);
+		SMP2P_SET_RMT_CMD(test_request, SMP2P_RLPB_IGNORE);
+		SMP2P_SET_RMT_DATA(test_request, 0xAA55);
+		ret = msm_smp2p_out_write(handle, test_request);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		UT_ASSERT_INT(
+			(int)wait_for_completion_timeout(
+					&cb_in.cb_completion, HZ / 2),
+			==, 0);
+		UT_ASSERT_INT(cb_in.cb_count, ==, 0);
+		UT_ASSERT_INT(cb_in.event_entry_update, ==, 0);
+		ret = msm_smp2p_in_read(remote_pid, "smp2p", &test_response);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(0xAA54, ==, SMP2P_GET_RMT_DATA(test_response));
+
+		/* Cleanup */
+		ret = msm_smp2p_out_close(&handle);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_PTR(handle, ==, 0);
+		ret = msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
+		UT_ASSERT_INT(ret, ==, 0);
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		if (handle)
+			(void)msm_smp2p_out_close(&handle);
+		(void)msm_smp2p_in_unregister(remote_pid, "smp2p", &cb_in.nb);
+
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+}
+
+/**
+ * smp2p_ut_remote_inout - Verify inbound/outbound functionality for all.
+ *
+ * @s: pointer to output file
+ *
+ * This test verifies inbound and outbound functionality for all
+ * configured remote processor.
+ */
+static void smp2p_ut_remote_inout(struct seq_file *s)
+{
+	struct smp2p_interrupt_config *int_cfg;
+	int pid;
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg) {
+		seq_puts(s, "Remote processor config unavailable\n");
+		return;
+	}
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
+		if (!int_cfg[pid].is_configured)
+			continue;
+
+		msm_smp2p_deinit_rmt_lpb_proc(pid);
+		smp2p_ut_remote_inout_core(s, pid);
+		msm_smp2p_init_rmt_lpb_proc(pid);
+	}
+}
+
+/**
+ * smp2p_ut_remote_out_max_entries_core - Verify open functionality.
+ *
+ * @s: pointer to output file
+ * @remote_pid:  Remote processor for which the test is executed.
+ *
+ * This test verifies open functionality by creating maximum outbound entries.
+ */
+static void smp2p_ut_remote_out_max_entries_core(struct seq_file *s,
+	int remote_pid)
+{
+	int j = 0;
+	int failed = 0;
+	struct msm_smp2p_out *handle[SMP2P_MAX_ENTRY];
+	int ret;
+	static struct mock_cb_data cb_out[SMP2P_MAX_ENTRY];
+	char entry_name[SMP2P_MAX_ENTRY_NAME];
+	int num_created;
+
+	seq_printf(s, "Running %s for '%s' remote pid %d\n",
+		   __func__, smp2p_pid_to_name(remote_pid), remote_pid);
+
+	for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+		handle[j] = NULL;
+		mock_cb_data_init(&cb_out[j]);
+	}
+
+	do {
+		num_created = 0;
+		for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+			/* Open as many output entries as possible */
+			scnprintf((char *)entry_name, SMP2P_MAX_ENTRY_NAME,
+				"smp2p%d", j);
+			ret = msm_smp2p_out_open(remote_pid, entry_name,
+				&cb_out[j].nb, &handle[j]);
+			if (ret == -ENOMEM)
+				/* hit max number */
+				break;
+			UT_ASSERT_INT(ret, ==, 0);
+			++num_created;
+		}
+		if (failed)
+			break;
+
+		/* verify we created more than 1 entry */
+		UT_ASSERT_INT(num_created, <=, SMP2P_MAX_ENTRY);
+		UT_ASSERT_INT(num_created, >, 0);
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+
+	/* cleanup */
+	for (j = 0; j < SMP2P_MAX_ENTRY; j++)
+		ret = msm_smp2p_out_close(&handle[j]);
+}
+
+/**
+ * smp2p_ut_remote_out_max_entries - Verify open for all configured processors.
+ *
+ * @s: pointer to output file
+ *
+ * This test verifies creating max number of entries for
+ * all configured remote processor.
+ */
+static void smp2p_ut_remote_out_max_entries(struct seq_file *s)
+{
+	struct smp2p_interrupt_config *int_cfg;
+	int pid;
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg) {
+		seq_puts(s, "Remote processor config unavailable\n");
+		return;
+	}
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
+		if (!int_cfg[pid].is_configured)
+			continue;
+
+		smp2p_ut_remote_out_max_entries_core(s, pid);
+	}
+}
+
+/**
+ * smp2p_ut_local_in_max_entries - Verify registering and unregistering.
+ *
+ * @s: pointer to output file
+ *
+ * This test verifies registering and unregistering for inbound entries using
+ * the remote mock processor.
+ */
+static void smp2p_ut_local_in_max_entries(struct seq_file *s)
+{
+	int j = 0;
+	int failed = 0;
+	struct msm_smp2p_remote_mock *rmp = NULL;
+	int ret;
+	static struct mock_cb_data cb_in[SMP2P_MAX_ENTRY];
+	static struct mock_cb_data cb_out;
+
+	seq_printf(s, "Running %s\n", __func__);
+
+	for (j = 0; j < SMP2P_MAX_ENTRY; j++)
+		mock_cb_data_init(&cb_in[j]);
+
+	mock_cb_data_init(&cb_out);
+
+	do {
+		/* Initialize mock edge */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+
+		rmp = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(rmp, !=, NULL);
+
+		rmp->rx_interrupt_count = 0;
+		memset(&rmp->remote_item, 0,
+			sizeof(struct smp2p_smem_item));
+		rmp->remote_item.header.magic = SMP2P_MAGIC;
+		SMP2P_SET_LOCAL_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_REMOTE_MOCK_PROC);
+		SMP2P_SET_REMOTE_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_APPS_PROC);
+		SMP2P_SET_VERSION(
+		rmp->remote_item.header.feature_version, 1);
+		SMP2P_SET_FEATURES(
+		rmp->remote_item.header.feature_version, 0);
+		SMP2P_SET_ENT_TOTAL(
+		rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+		SMP2P_SET_ENT_VALID(
+		rmp->remote_item.header.valid_total_ent, 0);
+		rmp->remote_item.header.flags = 0x0;
+		msm_smp2p_set_remote_mock_exists(true);
+
+		/* Create Max Entries in the remote mock object */
+		for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+			scnprintf(rmp->remote_item.entries[j].name,
+				SMP2P_MAX_ENTRY_NAME, "smp2p%d", j);
+			rmp->remote_item.entries[j].entry = 0;
+			rmp->tx_interrupt();
+		}
+
+		/* Register for in entries */
+		for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+			ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
+				rmp->remote_item.entries[j].name,
+				&(cb_in[j].nb));
+			UT_ASSERT_INT(ret, ==, 0);
+			UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+					&(cb_in[j].cb_completion), HZ / 2),
+				>, 0);
+			UT_ASSERT_INT(cb_in[j].cb_count, ==, 1);
+			UT_ASSERT_INT(cb_in[j].event_entry_update, ==, 0);
+		}
+		UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
+
+		/* Unregister */
+		for (j = 0; j < SMP2P_MAX_ENTRY; j++) {
+			ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+				rmp->remote_item.entries[j].name,
+				&(cb_in[j].nb));
+		    UT_ASSERT_INT(ret, ==, 0);
+		}
+		UT_ASSERT_INT(j, ==, SMP2P_MAX_ENTRY);
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+
+		for (j = 0; j < SMP2P_MAX_ENTRY; j++)
+			ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+				rmp->remote_item.entries[j].name,
+				&(cb_in[j].nb));
+	}
+}
+
+/**
+ * smp2p_ut_local_in_multiple - Verify Multiple Inbound Registration.
+ *
+ * @s: pointer to output file
+ *
+ * This test verifies multiple clients registering for same inbound entries
+ * using the remote mock processor.
+ */
+static void smp2p_ut_local_in_multiple(struct seq_file *s)
+{
+	int failed = 0;
+	struct msm_smp2p_remote_mock *rmp = NULL;
+	int ret;
+	static struct mock_cb_data cb_in_1;
+	static struct mock_cb_data cb_in_2;
+	static struct mock_cb_data cb_out;
+
+	seq_printf(s, "Running %s\n", __func__);
+
+	mock_cb_data_init(&cb_in_1);
+	mock_cb_data_init(&cb_in_2);
+	mock_cb_data_init(&cb_out);
+
+	do {
+		/* Initialize mock edge */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+
+		rmp = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(rmp, !=, NULL);
+
+		rmp->rx_interrupt_count = 0;
+		memset(&rmp->remote_item, 0,
+			sizeof(struct smp2p_smem_item));
+		rmp->remote_item.header.magic = SMP2P_MAGIC;
+		SMP2P_SET_LOCAL_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_REMOTE_MOCK_PROC);
+		SMP2P_SET_REMOTE_PID(
+		rmp->remote_item.header.rem_loc_proc_id,
+						SMP2P_APPS_PROC);
+		SMP2P_SET_VERSION(
+		rmp->remote_item.header.feature_version, 1);
+		SMP2P_SET_FEATURES(
+		rmp->remote_item.header.feature_version, 0);
+		SMP2P_SET_ENT_TOTAL(
+		rmp->remote_item.header.valid_total_ent, 1);
+		SMP2P_SET_ENT_VALID(
+		rmp->remote_item.header.valid_total_ent, 0);
+		rmp->remote_item.header.flags = 0x0;
+		msm_smp2p_set_remote_mock_exists(true);
+
+		/* Create an Entry in the remote mock object */
+		scnprintf(rmp->remote_item.entries[0].name,
+				SMP2P_MAX_ENTRY_NAME, "smp2p%d", 1);
+		rmp->remote_item.entries[0].entry = 0;
+		rmp->tx_interrupt();
+
+		/* Register multiple clients for the inbound entry */
+		ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
+				rmp->remote_item.entries[0].name,
+				&cb_in_1.nb);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+				&(cb_in_1.cb_completion), HZ / 2),
+				>, 0);
+		UT_ASSERT_INT(cb_in_1.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in_1.event_entry_update, ==, 0);
+
+		ret = msm_smp2p_in_register(SMP2P_REMOTE_MOCK_PROC,
+				rmp->remote_item.entries[0].name,
+				&cb_in_2.nb);
+		UT_ASSERT_INT(ret, ==, 0);
+		UT_ASSERT_INT(
+				(int)wait_for_completion_timeout(
+				&(cb_in_2.cb_completion), HZ / 2),
+				>, 0);
+		UT_ASSERT_INT(cb_in_2.cb_count, ==, 1);
+		UT_ASSERT_INT(cb_in_2.event_entry_update, ==, 0);
+
+
+		/* Unregister the clients */
+		ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+				rmp->remote_item.entries[0].name,
+				&(cb_in_1.nb));
+		UT_ASSERT_INT(ret, ==, 0);
+
+		ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+				rmp->remote_item.entries[0].name,
+				&(cb_in_2.nb));
+		UT_ASSERT_INT(ret, ==, 0);
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+
+		ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+				rmp->remote_item.entries[0].name,
+				&(cb_in_1.nb));
+
+		ret = msm_smp2p_in_unregister(SMP2P_REMOTE_MOCK_PROC,
+				rmp->remote_item.entries[0].name,
+				&(cb_in_2.nb));
+	}
+}
+
+/**
+ * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
+ *
+ * @s: pointer to output file
+ */
+static void smp2p_ut_local_ssr_ack(struct seq_file *s)
+{
+	int failed = 0;
+	struct msm_smp2p_remote_mock *rmp = NULL;
+	int ret;
+
+	seq_printf(s, "Running %s\n", __func__);
+	do {
+		struct smp2p_smem *rhdr;
+		struct smp2p_smem *lhdr;
+		int negotiation_state;
+
+		/* initialize v1 without SMP2P_FEATURE_SSR_ACK enabled */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+		rmp = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(rmp, !=, NULL);
+		rhdr = &rmp->remote_item.header;
+
+		rmp->rx_interrupt_count = 0;
+		memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
+		rhdr->magic = SMP2P_MAGIC;
+		SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
+				SMP2P_REMOTE_MOCK_PROC);
+		SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
+		SMP2P_SET_VERSION(rhdr->feature_version, 1);
+		SMP2P_SET_FEATURES(rhdr->feature_version, 0);
+		SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
+		SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
+		rhdr->flags = 0x0;
+		msm_smp2p_set_remote_mock_exists(true);
+		rmp->tx_interrupt();
+
+		/* verify edge is open */
+		lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
+					&negotiation_state);
+		UT_ASSERT_PTR(NULL, !=, lhdr);
+		UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+		/* verify no response to ack feature */
+		rmp->rx_interrupt_count = 0;
+		SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
+		rmp->tx_interrupt();
+		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
+		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
+
+		/* initialize v1 with SMP2P_FEATURE_SSR_ACK enabled */
+		ret = smp2p_reset_mock_edge();
+		UT_ASSERT_INT(ret, ==, 0);
+		rmp = msm_smp2p_get_remote_mock();
+		UT_ASSERT_PTR(rmp, !=, NULL);
+		rhdr = &rmp->remote_item.header;
+
+		rmp->rx_interrupt_count = 0;
+		memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
+		rhdr->magic = SMP2P_MAGIC;
+		SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
+				SMP2P_REMOTE_MOCK_PROC);
+		SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
+		SMP2P_SET_VERSION(rhdr->feature_version, 1);
+		SMP2P_SET_FEATURES(rhdr->feature_version,
+				SMP2P_FEATURE_SSR_ACK);
+		SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
+		SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
+		rmp->rx_interrupt_count = 0;
+		rhdr->flags = 0x0;
+		msm_smp2p_set_remote_mock_exists(true);
+		rmp->tx_interrupt();
+
+		/* verify edge is open */
+		lhdr = smp2p_get_out_item(SMP2P_REMOTE_MOCK_PROC,
+					&negotiation_state);
+		UT_ASSERT_PTR(NULL, !=, lhdr);
+		UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENED);
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+		/* verify response to ack feature */
+		rmp->rx_interrupt_count = 0;
+		SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
+		rmp->tx_interrupt();
+		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
+		UT_ASSERT_INT(1, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+		rmp->rx_interrupt_count = 0;
+		SMP2P_SET_RESTART_DONE(rhdr->flags, 0);
+		rmp->tx_interrupt();
+		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
+		UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
+		UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+}
+
+/**
+ * get_ssr_name_for_proc - Retrieve an SSR name from the provided list
+ *
+ * @names:	List of possible processor names
+ * @name_len:	The length of @names
+ * @index:	Index into @names
+ *
+ * Return: Pointer to the next processor name, NULL in error conditions
+ */
+static char *get_ssr_name_for_proc(char * const names[], size_t name_len,
+				   int index)
+{
+	if (index >= name_len) {
+		pr_err("%s: SSR failed; check subsys name table\n",
+				__func__);
+		return NULL;
+	}
+
+	return names[index];
+}
+
+/**
+ * smp2p_ut_local_ssr_ack - Verify SSR Done/ACK Feature
+ *
+ * @s: pointer to output file
+ * @rpid: Remote processor ID
+ * @int_cfg: Interrupt config
+ */
+static void smp2p_ut_remotesubsys_ssr_ack(struct seq_file *s, uint32_t rpid,
+		struct smp2p_interrupt_config *int_cfg)
+{
+	int failed = 0;
+
+	seq_printf(s, "Running %s\n", __func__);
+	do {
+		struct smp2p_smem *rhdr;
+		struct smp2p_smem *lhdr;
+		int negotiation_state;
+		int name_index;
+		int ret;
+		uint32_t ssr_done_start;
+		bool ssr_ack_enabled = false;
+		bool ssr_success = false;
+		char *name = NULL;
+
+		static char * const mpss_names[] = {"modem", "mpss"};
+		static char * const lpass_names[] = {"adsp", "lpass"};
+		static char * const sensor_names[] = {"slpi", "dsps"};
+		static char * const wcnss_names[] = {"wcnss"};
+
+		lhdr = smp2p_get_out_item(rpid, &negotiation_state);
+		UT_ASSERT_PTR(NULL, !=, lhdr);
+		UT_ASSERT_INT(SMP2P_EDGE_STATE_OPENED, ==, negotiation_state);
+
+		rhdr = smp2p_get_in_item(rpid);
+		UT_ASSERT_PTR(NULL, !=, rhdr);
+
+		/* get initial state of SSR flags */
+		if (SMP2P_GET_FEATURES(rhdr->feature_version)
+				& SMP2P_FEATURE_SSR_ACK)
+			ssr_ack_enabled = true;
+		else
+			ssr_ack_enabled = false;
+
+		ssr_done_start = SMP2P_GET_RESTART_DONE(rhdr->flags);
+		UT_ASSERT_INT(ssr_done_start, ==,
+				SMP2P_GET_RESTART_ACK(lhdr->flags));
+
+		/* trigger restart */
+		name_index = 0;
+		while (!ssr_success) {
+
+			switch (rpid) {
+			case SMP2P_MODEM_PROC:
+				name = get_ssr_name_for_proc(mpss_names,
+						ARRAY_SIZE(mpss_names),
+						name_index);
+				break;
+			case SMP2P_AUDIO_PROC:
+				name = get_ssr_name_for_proc(lpass_names,
+						ARRAY_SIZE(lpass_names),
+						name_index);
+				break;
+			case SMP2P_SENSOR_PROC:
+				name = get_ssr_name_for_proc(sensor_names,
+						ARRAY_SIZE(sensor_names),
+						name_index);
+				break;
+			case SMP2P_WIRELESS_PROC:
+				name = get_ssr_name_for_proc(wcnss_names,
+						ARRAY_SIZE(wcnss_names),
+						name_index);
+				break;
+			default:
+				pr_err("%s: Invalid proc ID %d given for ssr\n",
+						__func__, rpid);
+			}
+
+			if (!name) {
+				seq_puts(s, "\tSSR failed; check subsys name table\n");
+				failed = true;
+				break;
+			}
+
+			seq_printf(s, "Restarting '%s'\n", name);
+			ret = subsystem_restart(name);
+			if (ret == -ENODEV) {
+				seq_puts(s, "\tSSR call failed\n");
+				++name_index;
+				continue;
+			}
+			ssr_success = true;
+		}
+		if (failed)
+			break;
+
+		msleep(10*1000);
+
+		/* verify ack signaling */
+		if (ssr_ack_enabled) {
+			ssr_done_start ^= 1;
+			UT_ASSERT_INT(ssr_done_start, ==,
+					SMP2P_GET_RESTART_ACK(lhdr->flags));
+			UT_ASSERT_INT(ssr_done_start, ==,
+					SMP2P_GET_RESTART_DONE(rhdr->flags));
+			UT_ASSERT_INT(0, ==,
+					SMP2P_GET_RESTART_DONE(lhdr->flags));
+			seq_puts(s, "\tSSR ACK Enabled and Toggled\n");
+		} else {
+			UT_ASSERT_INT(0, ==,
+					SMP2P_GET_RESTART_DONE(lhdr->flags));
+			UT_ASSERT_INT(0, ==,
+					SMP2P_GET_RESTART_ACK(lhdr->flags));
+
+			UT_ASSERT_INT(0, ==,
+					SMP2P_GET_RESTART_DONE(rhdr->flags));
+			UT_ASSERT_INT(0, ==,
+					SMP2P_GET_RESTART_ACK(rhdr->flags));
+			seq_puts(s, "\tSSR ACK Disabled\n");
+		}
+
+		seq_puts(s, "\tOK\n");
+	} while (0);
+
+	if (failed) {
+		pr_err("%s: Failed\n", __func__);
+		seq_puts(s, "\tFailed\n");
+	}
+}
+
+/**
+ * smp2p_ut_remote_ssr_ack - Verify SSR Done/ACK Feature
+ *
+ * @s: pointer to output file
+ *
+ * Triggers SSR for each subsystem.
+ */
+static void smp2p_ut_remote_ssr_ack(struct seq_file *s)
+{
+	struct smp2p_interrupt_config *int_cfg;
+	int pid;
+
+	int_cfg = smp2p_get_interrupt_config();
+	if (!int_cfg) {
+		seq_puts(s,
+			"Remote processor config unavailable\n");
+		return;
+	}
+
+	for (pid = 0; pid < SMP2P_NUM_PROCS; ++pid) {
+		if (!int_cfg[pid].is_configured)
+			continue;
+
+		msm_smp2p_deinit_rmt_lpb_proc(pid);
+		smp2p_ut_remotesubsys_ssr_ack(s, pid, &int_cfg[pid]);
+		msm_smp2p_init_rmt_lpb_proc(pid);
+	}
+}
+
+static struct dentry *dent;
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	void (*show)(struct seq_file *) = s->private;
+
+	show(s);
+
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+
+void smp2p_debug_create(const char *name,
+			 void (*show)(struct seq_file *))
+{
+	struct dentry *file;
+
+	file = debugfs_create_file(name, 0444, dent, show, &debug_ops);
+	if (!file)
+		pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+void smp2p_debug_create_u32(const char *name, uint32_t *value)
+{
+	struct dentry *file;
+
+	file = debugfs_create_u32(name, 0644, dent, value);
+	if (!file)
+		pr_err("%s: unable to create file '%s'\n", __func__, name);
+}
+
+static int __init smp2p_debugfs_init(void)
+{
+	dent = debugfs_create_dir("smp2p_test", 0);
+	if (IS_ERR(dent))
+		return PTR_ERR(dent);
+
+	/*
+	 * Add Unit Test entries.
+	 *
+	 * The idea with unit tests is that you can run all of them
+	 * from ADB shell by doing:
+	 *  adb shell
+	 *  cat ut*
+	 *
+	 * And if particular tests fail, you can then repeatedly run the
+	 * failing tests as you debug and resolve the failing test.
+	 */
+	smp2p_debug_create("ut_local_basic",
+			smp2p_ut_local_basic);
+	smp2p_debug_create("ut_local_late_open",
+			smp2p_ut_local_late_open);
+	smp2p_debug_create("ut_local_early_open",
+			smp2p_ut_local_early_open);
+	smp2p_debug_create("ut_mock_loopback",
+			smp2p_ut_mock_loopback);
+	smp2p_debug_create("ut_remote_inout",
+			smp2p_ut_remote_inout);
+	smp2p_debug_create("ut_local_in_max_entries",
+		smp2p_ut_local_in_max_entries);
+	smp2p_debug_create("ut_remote_out_max_entries",
+			smp2p_ut_remote_out_max_entries);
+	smp2p_debug_create("ut_local_in_multiple",
+			smp2p_ut_local_in_multiple);
+	smp2p_debug_create("ut_local_ssr_ack",
+			smp2p_ut_local_ssr_ack);
+	smp2p_debug_create("ut_remote_ssr_ack",
+			smp2p_ut_remote_ssr_ack);
+
+	return 0;
+}
+module_init(smp2p_debugfs_init);
diff --git a/drivers/soc/qcom/smp2p_test_common.h b/drivers/soc/qcom/smp2p_test_common.h
new file mode 100644
index 0000000..0d22fec
--- /dev/null
+++ b/drivers/soc/qcom/smp2p_test_common.h
@@ -0,0 +1,214 @@
+/* drivers/soc/qcom/smp2p_test_common.h
+ *
+ * Copyright (c) 2013-2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SMP2P_TEST_COMMON_H_
+#define _SMP2P_TEST_COMMON_H_
+
+#include <linux/debugfs.h>
+
+/**
+ * Unit test assertion for logging test cases.
+ *
+ * @a lval
+ * @b rval
+ * @cmp comparison operator
+ *
+ * Assertion fails if (@a cmp @b) is not true which then
+ * logs the function and line number where the error occurred
+ * along with the values of @a and @b.
+ *
+ * Assumes that the following local variables exist:
+ * @s - sequential output file pointer
+ * @failed - set to true if test fails
+ */
+#define UT_ASSERT_INT(a, cmp, b) \
+	{ \
+	int a_tmp = (a); \
+	int b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
+		seq_printf(s, "%s:%d Fail: " #a "(%d) " #cmp " " #b "(%d)\n", \
+				__func__, __LINE__, \
+				a_tmp, b_tmp); \
+		failed = 1; \
+		break; \
+	} \
+	}
+
+#define UT_ASSERT_PTR(a, cmp, b) \
+	{ \
+	void *a_tmp = (a); \
+	void *b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
+		seq_printf(s, "%s:%d Fail: " #a "(%pK) " #cmp \
+				" " #b "(%pK)\n", \
+				__func__, __LINE__, \
+				a_tmp, b_tmp); \
+		failed = 1; \
+		break; \
+	} \
+	}
+
+#define UT_ASSERT_UINT(a, cmp, b) \
+	{ \
+	unsigned int a_tmp = (a); \
+	unsigned int b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
+		seq_printf(s, "%s:%d Fail: " #a "(%u) " #cmp " " #b "(%u)\n", \
+				__func__, __LINE__, \
+				a_tmp, b_tmp); \
+		failed = 1; \
+		break; \
+	} \
+	}
+
+#define UT_ASSERT_HEX(a, cmp, b) \
+	{ \
+	unsigned int a_tmp = (a); \
+	unsigned int b_tmp = (b); \
+	if (!((a_tmp)cmp(b_tmp))) { \
+		seq_printf(s, "%s:%d Fail: " #a "(%x) " #cmp " " #b "(%x)\n", \
+				__func__, __LINE__, \
+				a_tmp, b_tmp); \
+		failed = 1; \
+		break; \
+	} \
+	}
+
+/**
+ * In-range unit test assertion for test cases.
+ *
+ * @a lval
+ * @minv Minimum value
+ * @maxv Maximum value
+ *
+ * Assertion fails if @a is not on the exclusive range minv, maxv
+ * ((@a < @minv) or (@a > @maxv)).  In the failure case, the macro
+ * logs the function and line number where the error occurred along
+ * with the values of @a and @minv, @maxv.
+ *
+ * Assumes that the following local variables exist:
+ * @s - sequential output file pointer
+ * @failed - set to true if test fails
+ */
+#define UT_ASSERT_INT_IN_RANGE(a, minv, maxv) \
+	{ \
+	int a_tmp = (a); \
+	int minv_tmp = (minv); \
+	int maxv_tmp = (maxv); \
+	if (((a_tmp) < (minv_tmp)) || ((a_tmp) > (maxv_tmp))) { \
+		seq_printf(s, "%s:%d Fail: " #a "(%d) < " #minv "(%d) or " \
+				 #a "(%d) > " #maxv "(%d)\n", \
+				__func__, __LINE__, \
+				a_tmp, minv_tmp, a_tmp, maxv_tmp); \
+		failed = 1; \
+		break; \
+	} \
+	}
+
+/* Structure to track state changes for the notifier callback. */
+struct mock_cb_data {
+	bool initialized;
+	spinlock_t lock;
+	struct notifier_block nb;
+
+	/* events */
+	struct completion cb_completion;
+	int cb_count;
+	int event_open;
+	int event_entry_update;
+	struct msm_smp2p_update_notif entry_data;
+};
+
+void smp2p_debug_create(const char *name, void (*show)(struct seq_file *));
+void smp2p_debug_create_u32(const char *name, uint32_t *value);
+static inline int smp2p_test_notify(struct notifier_block *self,
+	unsigned long event, void *data);
+
+/**
+ * Reset mock callback data to default values.
+ *
+ * @cb:  Mock callback data
+ */
+static inline void mock_cb_data_reset(struct mock_cb_data *cb)
+{
+	reinit_completion(&cb->cb_completion);
+	cb->cb_count = 0;
+	cb->event_open = 0;
+	cb->event_entry_update = 0;
+	memset(&cb->entry_data, 0,
+		sizeof(struct msm_smp2p_update_notif));
+}
+
+
+/**
+ * Initialize mock callback data.
+ *
+ * @cb:  Mock callback data
+ */
+static inline void mock_cb_data_init(struct mock_cb_data *cb)
+{
+	if (!cb->initialized) {
+		init_completion(&cb->cb_completion);
+		spin_lock_init(&cb->lock);
+		cb->initialized = true;
+		cb->nb.notifier_call = smp2p_test_notify;
+		memset(&cb->entry_data, 0,
+			sizeof(struct msm_smp2p_update_notif));
+	}
+	mock_cb_data_reset(cb);
+}
+
+/**
+ * Notifier function passed into SMP2P for testing.
+ *
+ * @self:       Pointer to calling notifier block
+ * @event:	    Event
+ * @data:       Event-specific data
+ * @returns:    0
+ */
+static inline int smp2p_test_notify(struct notifier_block *self,
+		unsigned long event, void *data)
+{
+	struct mock_cb_data *cb_data_ptr;
+	unsigned long flags;
+
+	cb_data_ptr = container_of(self, struct mock_cb_data, nb);
+
+	spin_lock_irqsave(&cb_data_ptr->lock, flags);
+
+	switch (event) {
+	case SMP2P_OPEN:
+		++cb_data_ptr->event_open;
+		if (data) {
+			cb_data_ptr->entry_data =
+			*(struct msm_smp2p_update_notif *)(data);
+		}
+		break;
+	case SMP2P_ENTRY_UPDATE:
+		++cb_data_ptr->event_entry_update;
+		if (data) {
+			cb_data_ptr->entry_data =
+			*(struct msm_smp2p_update_notif *)(data);
+		}
+		break;
+	default:
+		pr_err("%s Unknown event\n", __func__);
+		break;
+	}
+
+	++cb_data_ptr->cb_count;
+	complete(&cb_data_ptr->cb_completion);
+	spin_unlock_irqrestore(&cb_data_ptr->lock, flags);
+	return 0;
+}
+#endif /* _SMP2P_TEST_COMMON_H_ */
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
new file mode 100644
index 0000000..cca38aa
--- /dev/null
+++ b/drivers/soc/qcom/socinfo.c
@@ -0,0 +1,1521 @@
+/*
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+/*
+ * SOC Info Routines
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/export.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/sys_soc.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <asm/system_misc.h>
+
+#include <soc/qcom/socinfo.h>
+#include <soc/qcom/smem.h>
+#include <soc/qcom/boot_stats.h>
+
+#define BUILD_ID_LENGTH 32
+#define SMEM_IMAGE_VERSION_BLOCKS_COUNT 32
+#define SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE 128
+#define SMEM_IMAGE_VERSION_SIZE 4096
+#define SMEM_IMAGE_VERSION_NAME_SIZE 75
+#define SMEM_IMAGE_VERSION_VARIANT_SIZE 20
+#define SMEM_IMAGE_VERSION_VARIANT_OFFSET 75
+#define SMEM_IMAGE_VERSION_OEM_SIZE 32
+#define SMEM_IMAGE_VERSION_OEM_OFFSET 96
+#define SMEM_IMAGE_VERSION_PARTITION_APPS 10
+
+enum {
+	HW_PLATFORM_UNKNOWN = 0,
+	HW_PLATFORM_SURF    = 1,
+	HW_PLATFORM_FFA     = 2,
+	HW_PLATFORM_FLUID   = 3,
+	HW_PLATFORM_SVLTE_FFA	= 4,
+	HW_PLATFORM_SVLTE_SURF	= 5,
+	HW_PLATFORM_MTP_MDM = 7,
+	HW_PLATFORM_MTP  = 8,
+	HW_PLATFORM_LIQUID  = 9,
+	/* Dragonboard platform id is assigned as 10 in CDT */
+	HW_PLATFORM_DRAGON	= 10,
+	HW_PLATFORM_QRD	= 11,
+	HW_PLATFORM_HRD	= 13,
+	HW_PLATFORM_DTV	= 14,
+	HW_PLATFORM_RCM	= 21,
+	HW_PLATFORM_STP = 23,
+	HW_PLATFORM_SBC = 24,
+	HW_PLATFORM_INVALID
+};
+
+const char *hw_platform[] = {
+	[HW_PLATFORM_UNKNOWN] = "Unknown",
+	[HW_PLATFORM_SURF] = "Surf",
+	[HW_PLATFORM_FFA] = "FFA",
+	[HW_PLATFORM_FLUID] = "Fluid",
+	[HW_PLATFORM_SVLTE_FFA] = "SVLTE_FFA",
+	[HW_PLATFORM_SVLTE_SURF] = "SLVTE_SURF",
+	[HW_PLATFORM_MTP_MDM] = "MDM_MTP_NO_DISPLAY",
+	[HW_PLATFORM_MTP] = "MTP",
+	[HW_PLATFORM_RCM] = "RCM",
+	[HW_PLATFORM_LIQUID] = "Liquid",
+	[HW_PLATFORM_DRAGON] = "Dragon",
+	[HW_PLATFORM_QRD] = "QRD",
+	[HW_PLATFORM_HRD] = "HRD",
+	[HW_PLATFORM_DTV] = "DTV",
+	[HW_PLATFORM_STP] = "STP",
+	[HW_PLATFORM_SBC] = "SBC",
+};
+
+enum {
+	ACCESSORY_CHIP_UNKNOWN = 0,
+	ACCESSORY_CHIP_CHARM = 58,
+};
+
+enum {
+	PLATFORM_SUBTYPE_QRD = 0x0,
+	PLATFORM_SUBTYPE_SKUAA = 0x1,
+	PLATFORM_SUBTYPE_SKUF = 0x2,
+	PLATFORM_SUBTYPE_SKUAB = 0x3,
+	PLATFORM_SUBTYPE_SKUG = 0x5,
+	PLATFORM_SUBTYPE_QRD_INVALID,
+};
+
+const char *qrd_hw_platform_subtype[] = {
+	[PLATFORM_SUBTYPE_QRD] = "QRD",
+	[PLATFORM_SUBTYPE_SKUAA] = "SKUAA",
+	[PLATFORM_SUBTYPE_SKUF] = "SKUF",
+	[PLATFORM_SUBTYPE_SKUAB] = "SKUAB",
+	[PLATFORM_SUBTYPE_SKUG] = "SKUG",
+	[PLATFORM_SUBTYPE_QRD_INVALID] = "INVALID",
+};
+
+enum {
+	PLATFORM_SUBTYPE_UNKNOWN = 0x0,
+	PLATFORM_SUBTYPE_CHARM = 0x1,
+	PLATFORM_SUBTYPE_STRANGE = 0x2,
+	PLATFORM_SUBTYPE_STRANGE_2A = 0x3,
+	PLATFORM_SUBTYPE_INVALID,
+};
+
+const char *hw_platform_subtype[] = {
+	[PLATFORM_SUBTYPE_UNKNOWN] = "Unknown",
+	[PLATFORM_SUBTYPE_CHARM] = "charm",
+	[PLATFORM_SUBTYPE_STRANGE] = "strange",
+	[PLATFORM_SUBTYPE_STRANGE_2A] = "strange_2a",
+	[PLATFORM_SUBTYPE_INVALID] = "Invalid",
+};
+
+/* Used to parse shared memory.  Must match the modem. */
+struct socinfo_v0_1 {
+	uint32_t format;
+	uint32_t id;
+	uint32_t version;
+	char build_id[BUILD_ID_LENGTH];
+};
+
+struct socinfo_v0_2 {
+	struct socinfo_v0_1 v0_1;
+	uint32_t raw_id;
+	uint32_t raw_version;
+};
+
+struct socinfo_v0_3 {
+	struct socinfo_v0_2 v0_2;
+	uint32_t hw_platform;
+};
+
+struct socinfo_v0_4 {
+	struct socinfo_v0_3 v0_3;
+	uint32_t platform_version;
+};
+
+struct socinfo_v0_5 {
+	struct socinfo_v0_4 v0_4;
+	uint32_t accessory_chip;
+};
+
+struct socinfo_v0_6 {
+	struct socinfo_v0_5 v0_5;
+	uint32_t hw_platform_subtype;
+};
+
+struct socinfo_v0_7 {
+	struct socinfo_v0_6 v0_6;
+	uint32_t pmic_model;
+	uint32_t pmic_die_revision;
+};
+
+struct socinfo_v0_8 {
+	struct socinfo_v0_7 v0_7;
+	uint32_t pmic_model_1;
+	uint32_t pmic_die_revision_1;
+	uint32_t pmic_model_2;
+	uint32_t pmic_die_revision_2;
+};
+
+struct socinfo_v0_9 {
+	struct socinfo_v0_8 v0_8;
+	uint32_t foundry_id;
+};
+
+struct socinfo_v0_10 {
+	struct socinfo_v0_9 v0_9;
+	uint32_t serial_number;
+};
+
+struct socinfo_v0_11 {
+	struct socinfo_v0_10 v0_10;
+	uint32_t num_pmics;
+	uint32_t pmic_array_offset;
+};
+
+struct socinfo_v0_12 {
+	struct socinfo_v0_11 v0_11;
+	uint32_t chip_family;
+	uint32_t raw_device_family;
+	uint32_t raw_device_number;
+};
+
+static union {
+	struct socinfo_v0_1 v0_1;
+	struct socinfo_v0_2 v0_2;
+	struct socinfo_v0_3 v0_3;
+	struct socinfo_v0_4 v0_4;
+	struct socinfo_v0_5 v0_5;
+	struct socinfo_v0_6 v0_6;
+	struct socinfo_v0_7 v0_7;
+	struct socinfo_v0_8 v0_8;
+	struct socinfo_v0_9 v0_9;
+	struct socinfo_v0_10 v0_10;
+	struct socinfo_v0_11 v0_11;
+	struct socinfo_v0_12 v0_12;
+} *socinfo;
+
+/* max socinfo format version supported */
+#define MAX_SOCINFO_FORMAT SOCINFO_VERSION(0, 12)
+
+static struct msm_soc_info cpu_of_id[] = {
+
+	/* 7x01 IDs */
+	[0]  = {MSM_CPU_UNKNOWN, "Unknown CPU"},
+	[1]  = {MSM_CPU_7X01, "MSM7X01"},
+	[16] = {MSM_CPU_7X01, "MSM7X01"},
+	[17] = {MSM_CPU_7X01, "MSM7X01"},
+	[18] = {MSM_CPU_7X01, "MSM7X01"},
+	[19] = {MSM_CPU_7X01, "MSM7X01"},
+	[23] = {MSM_CPU_7X01, "MSM7X01"},
+	[25] = {MSM_CPU_7X01, "MSM7X01"},
+	[26] = {MSM_CPU_7X01, "MSM7X01"},
+	[32] = {MSM_CPU_7X01, "MSM7X01"},
+	[33] = {MSM_CPU_7X01, "MSM7X01"},
+	[34] = {MSM_CPU_7X01, "MSM7X01"},
+	[35] = {MSM_CPU_7X01, "MSM7X01"},
+
+	/* 7x25 IDs */
+	[20] = {MSM_CPU_7X25, "MSM7X25"},
+	[21] = {MSM_CPU_7X25, "MSM7X25"},
+	[24] = {MSM_CPU_7X25, "MSM7X25"},
+	[27] = {MSM_CPU_7X25, "MSM7X25"},
+	[39] = {MSM_CPU_7X25, "MSM7X25"},
+	[40] = {MSM_CPU_7X25, "MSM7X25"},
+	[41] = {MSM_CPU_7X25, "MSM7X25"},
+	[42] = {MSM_CPU_7X25, "MSM7X25"},
+	[62] = {MSM_CPU_7X25, "MSM7X25"},
+	[63] = {MSM_CPU_7X25, "MSM7X25"},
+	[66] = {MSM_CPU_7X25, "MSM7X25"},
+
+
+	/* 7x27 IDs */
+	[43] = {MSM_CPU_7X27, "MSM7X27"},
+	[44] = {MSM_CPU_7X27, "MSM7X27"},
+	[61] = {MSM_CPU_7X27, "MSM7X27"},
+	[67] = {MSM_CPU_7X27, "MSM7X27"},
+	[68] = {MSM_CPU_7X27, "MSM7X27"},
+	[69] = {MSM_CPU_7X27, "MSM7X27"},
+
+
+	/* 8x50 IDs */
+	[30] = {MSM_CPU_8X50, "MSM8X50"},
+	[36] = {MSM_CPU_8X50, "MSM8X50"},
+	[37] = {MSM_CPU_8X50, "MSM8X50"},
+	[38] = {MSM_CPU_8X50, "MSM8X50"},
+
+	/* 7x30 IDs */
+	[59] = {MSM_CPU_7X30, "MSM7X30"},
+	[60] = {MSM_CPU_7X30, "MSM7X30"},
+
+	/* 8x55 IDs */
+	[74] = {MSM_CPU_8X55, "MSM8X55"},
+	[75] = {MSM_CPU_8X55, "MSM8X55"},
+	[85] = {MSM_CPU_8X55, "MSM8X55"},
+
+	/* 8x60 IDs */
+	[70] = {MSM_CPU_8X60, "MSM8X60"},
+	[71] = {MSM_CPU_8X60, "MSM8X60"},
+	[86] = {MSM_CPU_8X60, "MSM8X60"},
+
+	/* 8960 IDs */
+	[87] = {MSM_CPU_8960, "MSM8960"},
+
+	/* 7x25A IDs */
+	[88] = {MSM_CPU_7X25A, "MSM7X25A"},
+	[89] = {MSM_CPU_7X25A, "MSM7X25A"},
+	[96] = {MSM_CPU_7X25A, "MSM7X25A"},
+
+	/* 7x27A IDs */
+	[90] = {MSM_CPU_7X27A, "MSM7X27A"},
+	[91] = {MSM_CPU_7X27A, "MSM7X27A"},
+	[92] = {MSM_CPU_7X27A, "MSM7X27A"},
+	[97] = {MSM_CPU_7X27A, "MSM7X27A"},
+
+	/* FSM9xxx ID */
+	[94] = {FSM_CPU_9XXX, "FSM9XXX"},
+	[95] = {FSM_CPU_9XXX, "FSM9XXX"},
+
+	/*  7x25AA ID */
+	[98] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+	[99] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+	[100] = {MSM_CPU_7X25AA, "MSM7X25AA"},
+
+	/*  7x27AA ID */
+	[101] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+	[102] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+	[103] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+	[136] = {MSM_CPU_7X27AA, "MSM7X27AA"},
+
+	/* 9x15 ID */
+	[104] = {MSM_CPU_9615, "MSM9615"},
+	[105] = {MSM_CPU_9615, "MSM9615"},
+	[106] = {MSM_CPU_9615, "MSM9615"},
+	[107] = {MSM_CPU_9615, "MSM9615"},
+	[171] = {MSM_CPU_9615, "MSM9615"},
+
+	/* 8064 IDs */
+	[109] = {MSM_CPU_8064, "APQ8064"},
+
+	/* 8930 IDs */
+	[116] = {MSM_CPU_8930, "MSM8930"},
+	[117] = {MSM_CPU_8930, "MSM8930"},
+	[118] = {MSM_CPU_8930, "MSM8930"},
+	[119] = {MSM_CPU_8930, "MSM8930"},
+	[179] = {MSM_CPU_8930, "MSM8930"},
+
+	/* 8627 IDs */
+	[120] = {MSM_CPU_8627, "MSM8627"},
+	[121] = {MSM_CPU_8627, "MSM8627"},
+
+	/* 8660A ID */
+	[122] = {MSM_CPU_8960, "MSM8960"},
+
+	/* 8260A ID */
+	[123] = {MSM_CPU_8960, "MSM8960"},
+
+	/* 8060A ID */
+	[124] = {MSM_CPU_8960, "MSM8960"},
+
+	/* 8974 IDs */
+	[126] = {MSM_CPU_8974, "MSM8974"},
+	[184] = {MSM_CPU_8974, "MSM8974"},
+	[185] = {MSM_CPU_8974, "MSM8974"},
+	[186] = {MSM_CPU_8974, "MSM8974"},
+
+	/* 8974AA IDs */
+	[208] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+	[211] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+	[214] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+	[217] = {MSM_CPU_8974PRO_AA, "MSM8974PRO-AA"},
+
+	/* 8974AB IDs */
+	[209] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+	[212] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+	[215] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+	[218] = {MSM_CPU_8974PRO_AB, "MSM8974PRO-AB"},
+
+	/* 8974AC IDs */
+	[194] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+	[210] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+	[213] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+	[216] = {MSM_CPU_8974PRO_AC, "MSM8974PRO-AC"},
+
+	/* 8625 IDs */
+	[127] = {MSM_CPU_8625, "MSM8625"},
+	[128] = {MSM_CPU_8625, "MSM8625"},
+	[129] = {MSM_CPU_8625, "MSM8625"},
+	[137] = {MSM_CPU_8625, "MSM8625"},
+	[167] = {MSM_CPU_8625, "MSM8625"},
+
+	/* 8064 MPQ ID */
+	[130] = {MSM_CPU_8064, "APQ8064"},
+
+	/* 7x25AB IDs */
+	[131] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+	[132] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+	[133] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+	[135] = {MSM_CPU_7X25AB, "MSM7X25AB"},
+
+	/* 9625 IDs */
+	[134] = {MSM_CPU_9625, "MSM9625"},
+	[148] = {MSM_CPU_9625, "MSM9625"},
+	[149] = {MSM_CPU_9625, "MSM9625"},
+	[150] = {MSM_CPU_9625, "MSM9625"},
+	[151] = {MSM_CPU_9625, "MSM9625"},
+	[152] = {MSM_CPU_9625, "MSM9625"},
+	[173] = {MSM_CPU_9625, "MSM9625"},
+	[174] = {MSM_CPU_9625, "MSM9625"},
+	[175] = {MSM_CPU_9625, "MSM9625"},
+
+	/* 8960AB IDs */
+	[138] = {MSM_CPU_8960AB, "MSM8960AB"},
+	[139] = {MSM_CPU_8960AB, "MSM8960AB"},
+	[140] = {MSM_CPU_8960AB, "MSM8960AB"},
+	[141] = {MSM_CPU_8960AB, "MSM8960AB"},
+
+	/* 8930AA IDs */
+	[142] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[143] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[144] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[160] = {MSM_CPU_8930AA, "MSM8930AA"},
+	[180] = {MSM_CPU_8930AA, "MSM8930AA"},
+
+	/* 8226 IDs */
+	[145] = {MSM_CPU_8226, "MSM8626"},
+	[158] = {MSM_CPU_8226, "MSM8226"},
+	[159] = {MSM_CPU_8226, "MSM8526"},
+	[198] = {MSM_CPU_8226, "MSM8126"},
+	[199] = {MSM_CPU_8226, "APQ8026"},
+	[200] = {MSM_CPU_8226, "MSM8926"},
+	[205] = {MSM_CPU_8226, "MSM8326"},
+	[219] = {MSM_CPU_8226, "APQ8028"},
+	[220] = {MSM_CPU_8226, "MSM8128"},
+	[221] = {MSM_CPU_8226, "MSM8228"},
+	[222] = {MSM_CPU_8226, "MSM8528"},
+	[223] = {MSM_CPU_8226, "MSM8628"},
+	[224] = {MSM_CPU_8226, "MSM8928"},
+
+	/* 8610 IDs */
+	[147] = {MSM_CPU_8610, "MSM8610"},
+	[161] = {MSM_CPU_8610, "MSM8110"},
+	[162] = {MSM_CPU_8610, "MSM8210"},
+	[163] = {MSM_CPU_8610, "MSM8810"},
+	[164] = {MSM_CPU_8610, "MSM8212"},
+	[165] = {MSM_CPU_8610, "MSM8612"},
+	[166] = {MSM_CPU_8610, "MSM8112"},
+	[225] = {MSM_CPU_8610, "MSM8510"},
+	[226] = {MSM_CPU_8610, "MSM8512"},
+
+	/* 8064AB IDs */
+	[153] = {MSM_CPU_8064AB, "APQ8064AB"},
+
+	/* 8930AB IDs */
+	[154] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[155] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[156] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[157] = {MSM_CPU_8930AB, "MSM8930AB"},
+	[181] = {MSM_CPU_8930AB, "MSM8930AB"},
+
+	/* 8625Q IDs */
+	[168] = {MSM_CPU_8625Q, "MSM8225Q"},
+	[169] = {MSM_CPU_8625Q, "MSM8625Q"},
+	[170] = {MSM_CPU_8625Q, "MSM8125Q"},
+
+	/* 8064AA IDs */
+	[172] = {MSM_CPU_8064AA, "APQ8064AA"},
+
+	/* 8084 IDs */
+	[178] = {MSM_CPU_8084, "APQ8084"},
+
+	/* 9630 IDs */
+	[187] = {MSM_CPU_9630, "MDM9630"},
+	[227] = {MSM_CPU_9630, "MDM9630"},
+	[228] = {MSM_CPU_9630, "MDM9630"},
+	[229] = {MSM_CPU_9630, "MDM9630"},
+	[230] = {MSM_CPU_9630, "MDM9630"},
+	[231] = {MSM_CPU_9630, "MDM9630"},
+
+	/* FSM9900 ID */
+	[188] = {FSM_CPU_9900, "FSM9900"},
+	[189] = {FSM_CPU_9900, "FSM9900"},
+	[190] = {FSM_CPU_9900, "FSM9900"},
+	[191] = {FSM_CPU_9900, "FSM9900"},
+	[192] = {FSM_CPU_9900, "FSM9900"},
+	[193] = {FSM_CPU_9900, "FSM9900"},
+
+	/* 8916 IDs */
+	[206] = {MSM_CPU_8916, "MSM8916"},
+	[247] = {MSM_CPU_8916, "APQ8016"},
+	[248] = {MSM_CPU_8916, "MSM8216"},
+	[249] = {MSM_CPU_8916, "MSM8116"},
+	[250] = {MSM_CPU_8916, "MSM8616"},
+
+	/* 8936 IDs */
+	[233] = {MSM_CPU_8936, "MSM8936"},
+	[240] = {MSM_CPU_8936, "APQ8036"},
+	[242] = {MSM_CPU_8936, "MSM8236"},
+
+	/* 8939 IDs */
+	[239] = {MSM_CPU_8939, "MSM8939"},
+	[241] = {MSM_CPU_8939, "APQ8039"},
+	[263] = {MSM_CPU_8939, "MSM8239"},
+
+	/* 8909 IDs */
+	[245] = {MSM_CPU_8909, "MSM8909"},
+	[258] = {MSM_CPU_8909, "MSM8209"},
+	[259] = {MSM_CPU_8909, "MSM8208"},
+	[265] = {MSM_CPU_8909, "APQ8009"},
+	[260] = {MSM_CPU_8909, "MDMFERRUM"},
+	[261] = {MSM_CPU_8909, "MDMFERRUM"},
+	[262] = {MSM_CPU_8909, "MDMFERRUM"},
+	[300] = {MSM_CPU_8909, "MSM8909W"},
+	[301] = {MSM_CPU_8909, "APQ8009W"},
+
+	/* ZIRC IDs */
+	[234] = {MSM_CPU_ZIRC, "MSMZIRC"},
+	[235] = {MSM_CPU_ZIRC, "MSMZIRC"},
+	[236] = {MSM_CPU_ZIRC, "MSMZIRC"},
+	[237] = {MSM_CPU_ZIRC, "MSMZIRC"},
+	[238] = {MSM_CPU_ZIRC, "MSMZIRC"},
+
+	/* 8994 ID */
+	[207] = {MSM_CPU_8994, "MSM8994"},
+	[253] = {MSM_CPU_8994, "APQ8094"},
+
+	/* 8992 ID */
+	[251] = {MSM_CPU_8992, "MSM8992"},
+
+	/* FSM9010 ID */
+	[254] = {FSM_CPU_9010, "FSM9010"},
+	[255] = {FSM_CPU_9010, "FSM9010"},
+	[256] = {FSM_CPU_9010, "FSM9010"},
+	[257] = {FSM_CPU_9010, "FSM9010"},
+
+	/* Tellurium ID */
+	[264] = {MSM_CPU_TELLURIUM, "MSMTELLURIUM"},
+
+	/* 8996 IDs */
+	[246] = {MSM_CPU_8996, "MSM8996"},
+	[310] = {MSM_CPU_8996, "MSM8996"},
+	[311] = {MSM_CPU_8996, "APQ8096"},
+	[291] = {MSM_CPU_8996, "APQ8096"},
+	[305] = {MSM_CPU_8996, "MSM8996pro"},
+	[312] = {MSM_CPU_8996, "APQ8096pro"},
+
+	/* 8976 ID */
+	[266] = {MSM_CPU_8976, "MSM8976"},
+
+	/* 8929 IDs */
+	[268] = {MSM_CPU_8929, "MSM8929"},
+	[269] = {MSM_CPU_8929, "MSM8629"},
+	[270] = {MSM_CPU_8929, "MSM8229"},
+	[271] = {MSM_CPU_8929, "APQ8029"},
+
+	/* Cobalt IDs */
+	[292] = {MSM_CPU_COBALT, "MSMCOBALT"},
+	[319] = {MSM_CPU_COBALT, "APQCOBALT"},
+
+	/* Hamster ID */
+	[306] = {MSM_CPU_HAMSTER, "MSMHAMSTER"},
+
+	/* falcon ID */
+	[317] = {MSM_CPU_FALCON, "MSMFALCON"},
+
+	/* Skunk ID */
+	[321] = {MSM_CPU_SKUNK, "MSMSKUNK"},
+
+	/* Uninitialized IDs are not known to run Linux.
+	 * MSM_CPU_UNKNOWN is set to 0 to ensure these IDs are
+	 * considered as unknown CPU.
+	 */
+};
+
+static enum msm_cpu cur_cpu;
+static int current_image;
+static uint32_t socinfo_format;
+
+static struct socinfo_v0_1 dummy_socinfo = {
+	.format = SOCINFO_VERSION(0, 1),
+	.version = 1,
+};
+
+uint32_t socinfo_get_id(void)
+{
+	return (socinfo) ? socinfo->v0_1.id : 0;
+}
+EXPORT_SYMBOL_GPL(socinfo_get_id);
+
+static char *socinfo_get_id_string(void)
+{
+	return (socinfo) ? cpu_of_id[socinfo->v0_1.id].soc_id_string : NULL;
+}
+
+uint32_t socinfo_get_version(void)
+{
+	return (socinfo) ? socinfo->v0_1.version : 0;
+}
+
+char *socinfo_get_build_id(void)
+{
+	return (socinfo) ? socinfo->v0_1.build_id : NULL;
+}
+
+static char *msm_read_hardware_id(void)
+{
+	static char msm_soc_str[256] = "Qualcomm Technologies, Inc ";
+	static bool string_generated;
+	int ret = 0;
+
+	if (string_generated)
+		return msm_soc_str;
+	if (!socinfo)
+		goto err_path;
+	if (!cpu_of_id[socinfo->v0_1.id].soc_id_string)
+		goto err_path;
+
+	ret = strlcat(msm_soc_str, cpu_of_id[socinfo->v0_1.id].soc_id_string,
+			sizeof(msm_soc_str));
+	if (ret > sizeof(msm_soc_str))
+		goto err_path;
+
+	string_generated = true;
+	return msm_soc_str;
+err_path:
+	return "UNKNOWN SOC TYPE";
+}
+
+uint32_t socinfo_get_raw_id(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 2) ?
+			socinfo->v0_2.raw_id : 0)
+		: 0;
+}
+
+uint32_t socinfo_get_raw_version(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 2) ?
+			socinfo->v0_2.raw_version : 0)
+		: 0;
+}
+
+uint32_t socinfo_get_platform_type(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 3) ?
+			socinfo->v0_3.hw_platform : 0)
+		: 0;
+}
+
+
+uint32_t socinfo_get_platform_version(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 4) ?
+			socinfo->v0_4.platform_version : 0)
+		: 0;
+}
+
+/* This information is directly encoded by the machine id */
+/* Thus no external callers rely on this information at the moment */
+static uint32_t socinfo_get_accessory_chip(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 5) ?
+			socinfo->v0_5.accessory_chip : 0)
+		: 0;
+}
+
+uint32_t socinfo_get_platform_subtype(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 6) ?
+			socinfo->v0_6.hw_platform_subtype : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_foundry_id(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 9) ?
+			socinfo->v0_9.foundry_id : 0)
+		: 0;
+}
+
+uint32_t socinfo_get_serial_number(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 10) ?
+			socinfo->v0_10.serial_number : 0)
+		: 0;
+}
+EXPORT_SYMBOL(socinfo_get_serial_number);
+
+static uint32_t socinfo_get_chip_family(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 12) ?
+			socinfo->v0_12.chip_family : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_raw_device_family(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 12) ?
+			socinfo->v0_12.raw_device_family : 0)
+		: 0;
+}
+
+static uint32_t socinfo_get_raw_device_number(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 12) ?
+			socinfo->v0_12.raw_device_number : 0)
+		: 0;
+}
+
+enum pmic_model socinfo_get_pmic_model(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 7) ?
+			socinfo->v0_7.pmic_model : PMIC_MODEL_UNKNOWN)
+		: PMIC_MODEL_UNKNOWN;
+}
+
+uint32_t socinfo_get_pmic_die_revision(void)
+{
+	return socinfo ?
+		(socinfo_format >= SOCINFO_VERSION(0, 7) ?
+			socinfo->v0_7.pmic_die_revision : 0)
+		: 0;
+}
+
+static char *socinfo_get_image_version_base_address(void)
+{
+	return smem_find(SMEM_IMAGE_VERSION_TABLE,
+				SMEM_IMAGE_VERSION_SIZE, 0, SMEM_ANY_HOST_FLAG);
+}
+
+enum msm_cpu socinfo_get_msm_cpu(void)
+{
+	return cur_cpu;
+}
+EXPORT_SYMBOL_GPL(socinfo_get_msm_cpu);
+
+static ssize_t
+msm_get_vendor(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "Qualcomm\n");
+}
+
+static ssize_t
+msm_get_raw_id(struct device *dev,
+		struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_raw_id());
+}
+
+static ssize_t
+msm_get_raw_version(struct device *dev,
+		     struct device_attribute *attr,
+		     char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_raw_version());
+}
+
+static ssize_t
+msm_get_build_id(struct device *dev,
+		   struct device_attribute *attr,
+		   char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			socinfo_get_build_id());
+}
+
+static ssize_t
+msm_get_hw_platform(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_type;
+
+	hw_type = socinfo_get_platform_type();
+
+	return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			hw_platform[hw_type]);
+}
+
+static ssize_t
+msm_get_platform_version(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_platform_version());
+}
+
+static ssize_t
+msm_get_accessory_chip(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_accessory_chip());
+}
+
+static ssize_t
+msm_get_platform_subtype(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_subtype;
+
+	hw_subtype = socinfo_get_platform_subtype();
+	if (HW_PLATFORM_QRD == socinfo_get_platform_type()) {
+		if (hw_subtype >= PLATFORM_SUBTYPE_QRD_INVALID) {
+			pr_err("Invalid hardware platform sub type for qrd found\n");
+			hw_subtype = PLATFORM_SUBTYPE_QRD_INVALID;
+		}
+		return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+					qrd_hw_platform_subtype[hw_subtype]);
+	} else {
+		if (hw_subtype >= PLATFORM_SUBTYPE_INVALID) {
+			pr_err("Invalid hardware platform subtype\n");
+			hw_subtype = PLATFORM_SUBTYPE_INVALID;
+		}
+		return snprintf(buf, PAGE_SIZE, "%-.32s\n",
+			hw_platform_subtype[hw_subtype]);
+	}
+}
+
+static ssize_t
+msm_get_platform_subtype_id(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	uint32_t hw_subtype;
+
+	hw_subtype = socinfo_get_platform_subtype();
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		hw_subtype);
+}
+
+static ssize_t
+msm_get_foundry_id(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_foundry_id());
+}
+
+static ssize_t
+msm_get_serial_number(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_serial_number());
+}
+
+static ssize_t
+msm_get_chip_family(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_chip_family());
+}
+
+static ssize_t
+msm_get_raw_device_family(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_raw_device_family());
+}
+
+static ssize_t
+msm_get_raw_device_number(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "0x%x\n",
+		socinfo_get_raw_device_number());
+}
+
+static ssize_t
+msm_get_pmic_model(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+		socinfo_get_pmic_model());
+}
+
+static ssize_t
+msm_get_pmic_die_revision(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%u\n",
+			 socinfo_get_pmic_die_revision());
+}
+
+static ssize_t
+msm_get_image_version(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	char *string_address;
+
+	string_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(string_address)) {
+		pr_err("Failed to get image version base address");
+		return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "Unknown");
+	}
+	string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	return snprintf(buf, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s\n",
+			string_address);
+}
+
+static ssize_t
+msm_set_image_version(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf,
+			size_t count)
+{
+	char *store_address;
+
+	if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+		return count;
+	store_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(store_address)) {
+		pr_err("Failed to get image version base address");
+		return count;
+	}
+	store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	snprintf(store_address, SMEM_IMAGE_VERSION_NAME_SIZE, "%-.75s", buf);
+	return count;
+}
+
+static ssize_t
+msm_get_image_variant(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	char *string_address;
+
+	string_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(string_address)) {
+		pr_err("Failed to get image version base address");
+		return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE,
+		"Unknown");
+	}
+	string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	string_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
+	return snprintf(buf, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s\n",
+			string_address);
+}
+
+static ssize_t
+msm_set_image_variant(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf,
+			size_t count)
+{
+	char *store_address;
+
+	if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+		return count;
+	store_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(store_address)) {
+		pr_err("Failed to get image version base address");
+		return count;
+	}
+	store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	store_address += SMEM_IMAGE_VERSION_VARIANT_OFFSET;
+	snprintf(store_address, SMEM_IMAGE_VERSION_VARIANT_SIZE, "%-.20s", buf);
+	return count;
+}
+
+static ssize_t
+msm_get_image_crm_version(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	char *string_address;
+
+	string_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(string_address)) {
+		pr_err("Failed to get image version base address");
+		return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "Unknown");
+	}
+	string_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	string_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
+	return snprintf(buf, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.32s\n",
+			string_address);
+}
+
+static ssize_t
+msm_set_image_crm_version(struct device *dev,
+			struct device_attribute *attr,
+			const char *buf,
+			size_t count)
+{
+	char *store_address;
+
+	if (current_image != SMEM_IMAGE_VERSION_PARTITION_APPS)
+		return count;
+	store_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(store_address)) {
+		pr_err("Failed to get image version base address");
+		return count;
+	}
+	store_address += current_image * SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	store_address += SMEM_IMAGE_VERSION_OEM_OFFSET;
+	snprintf(store_address, SMEM_IMAGE_VERSION_OEM_SIZE, "%-.32s", buf);
+	return count;
+}
+
+static ssize_t
+msm_get_image_number(struct device *dev,
+			struct device_attribute *attr,
+			char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+			current_image);
+}
+
+static ssize_t
+msm_select_image(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t count)
+{
+	int ret, digit;
+
+	ret = kstrtoint(buf, 10, &digit);
+	if (ret)
+		return ret;
+	if (digit >= 0 && digit < SMEM_IMAGE_VERSION_BLOCKS_COUNT)
+		current_image = digit;
+	else
+		current_image = 0;
+	return count;
+}
+
+static ssize_t
+msm_get_images(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	int pos = 0;
+	int image;
+	char *image_address;
+
+	image_address = socinfo_get_image_version_base_address();
+	if (IS_ERR_OR_NULL(image_address))
+		return snprintf(buf, PAGE_SIZE, "Unavailable\n");
+
+	*buf = '\0';
+	for (image = 0; image < SMEM_IMAGE_VERSION_BLOCKS_COUNT; image++) {
+		if (*image_address == '\0') {
+			image_address += SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+			continue;
+		}
+
+		pos += snprintf(buf + pos, PAGE_SIZE - pos, "%d:\n",
+				image);
+		pos += snprintf(buf + pos, PAGE_SIZE - pos,
+				"\tCRM:\t\t%-.75s\n", image_address);
+		pos += snprintf(buf + pos, PAGE_SIZE - pos, "\tVariant:\t%-.20s\n",
+				image_address + SMEM_IMAGE_VERSION_VARIANT_OFFSET);
+		pos += snprintf(buf + pos, PAGE_SIZE - pos, "\tVersion:\t%-.32s\n\n",
+				image_address + SMEM_IMAGE_VERSION_OEM_OFFSET);
+
+		image_address += SMEM_IMAGE_VERSION_SINGLE_BLOCK_SIZE;
+	}
+
+	return pos;
+}
+
+static struct device_attribute msm_soc_attr_raw_version =
+	__ATTR(raw_version, S_IRUGO, msm_get_raw_version,  NULL);
+
+static struct device_attribute msm_soc_attr_raw_id =
+	__ATTR(raw_id, S_IRUGO, msm_get_raw_id,  NULL);
+
+static struct device_attribute msm_soc_attr_vendor =
+	__ATTR(vendor, S_IRUGO, msm_get_vendor,  NULL);
+
+static struct device_attribute msm_soc_attr_build_id =
+	__ATTR(build_id, S_IRUGO, msm_get_build_id, NULL);
+
+static struct device_attribute msm_soc_attr_hw_platform =
+	__ATTR(hw_platform, S_IRUGO, msm_get_hw_platform, NULL);
+
+
+static struct device_attribute msm_soc_attr_platform_version =
+	__ATTR(platform_version, S_IRUGO,
+			msm_get_platform_version, NULL);
+
+static struct device_attribute msm_soc_attr_accessory_chip =
+	__ATTR(accessory_chip, S_IRUGO,
+			msm_get_accessory_chip, NULL);
+
+static struct device_attribute msm_soc_attr_platform_subtype =
+	__ATTR(platform_subtype, S_IRUGO,
+			msm_get_platform_subtype, NULL);
+
+/* Platform Subtype String is being deprecated. Use Platform
+ * Subtype ID instead.
+ */
+static struct device_attribute msm_soc_attr_platform_subtype_id =
+	__ATTR(platform_subtype_id, S_IRUGO,
+			msm_get_platform_subtype_id, NULL);
+
+static struct device_attribute msm_soc_attr_foundry_id =
+	__ATTR(foundry_id, S_IRUGO,
+			msm_get_foundry_id, NULL);
+
+static struct device_attribute msm_soc_attr_serial_number =
+	__ATTR(serial_number, S_IRUGO,
+			msm_get_serial_number, NULL);
+
+static struct device_attribute msm_soc_attr_chip_family =
+	__ATTR(chip_family, S_IRUGO,
+			msm_get_chip_family, NULL);
+
+static struct device_attribute msm_soc_attr_raw_device_family =
+	__ATTR(raw_device_family, S_IRUGO,
+			msm_get_raw_device_family, NULL);
+
+static struct device_attribute msm_soc_attr_raw_device_number =
+	__ATTR(raw_device_number, S_IRUGO,
+			msm_get_raw_device_number, NULL);
+
+static struct device_attribute msm_soc_attr_pmic_model =
+	__ATTR(pmic_model, S_IRUGO,
+			msm_get_pmic_model, NULL);
+
+static struct device_attribute msm_soc_attr_pmic_die_revision =
+	__ATTR(pmic_die_revision, S_IRUGO,
+			msm_get_pmic_die_revision, NULL);
+
+static struct device_attribute image_version =
+	__ATTR(image_version, S_IRUGO | S_IWUSR,
+			msm_get_image_version, msm_set_image_version);
+
+static struct device_attribute image_variant =
+	__ATTR(image_variant, S_IRUGO | S_IWUSR,
+			msm_get_image_variant, msm_set_image_variant);
+
+static struct device_attribute image_crm_version =
+	__ATTR(image_crm_version, S_IRUGO | S_IWUSR,
+			msm_get_image_crm_version, msm_set_image_crm_version);
+
+static struct device_attribute select_image =
+	__ATTR(select_image, S_IRUGO | S_IWUSR,
+			msm_get_image_number, msm_select_image);
+
+static struct device_attribute images =
+	__ATTR(images, S_IRUGO, msm_get_images, NULL);
+
+static void * __init setup_dummy_socinfo(void)
+{
+	if (early_machine_is_apq8084()) {
+		dummy_socinfo.id = 178;
+		strlcpy(dummy_socinfo.build_id, "apq8084 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_mdm9630()) {
+		dummy_socinfo.id = 187;
+		strlcpy(dummy_socinfo.build_id, "mdm9630 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8909()) {
+		dummy_socinfo.id = 245;
+		strlcpy(dummy_socinfo.build_id, "msm8909 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8916()) {
+		dummy_socinfo.id = 206;
+		strlcpy(dummy_socinfo.build_id, "msm8916 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8939()) {
+		dummy_socinfo.id = 239;
+		strlcpy(dummy_socinfo.build_id, "msm8939 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8936()) {
+		dummy_socinfo.id = 233;
+		strlcpy(dummy_socinfo.build_id, "msm8936 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmzirc()) {
+		dummy_socinfo.id = 238;
+		strlcpy(dummy_socinfo.build_id, "msmzirc - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8994()) {
+		dummy_socinfo.id = 207;
+		strlcpy(dummy_socinfo.build_id, "msm8994 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8992()) {
+		dummy_socinfo.id = 251;
+		strlcpy(dummy_socinfo.build_id, "msm8992 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8976()) {
+		dummy_socinfo.id = 266;
+		strlcpy(dummy_socinfo.build_id, "msm8976 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmtellurium()) {
+		dummy_socinfo.id = 264;
+		strlcpy(dummy_socinfo.build_id, "msmtellurium - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8996()) {
+		dummy_socinfo.id = 246;
+		strlcpy(dummy_socinfo.build_id, "msm8996 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8996_auto()) {
+		dummy_socinfo.id = 310;
+		strlcpy(dummy_socinfo.build_id, "msm8996-auto - ",
+		sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msm8929()) {
+		dummy_socinfo.id = 268;
+		strlcpy(dummy_socinfo.build_id, "msm8929 - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmcobalt()) {
+		dummy_socinfo.id = 292;
+		strlcpy(dummy_socinfo.build_id, "msmcobalt - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmhamster()) {
+		dummy_socinfo.id = 306;
+		strlcpy(dummy_socinfo.build_id, "msmhamster - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmfalcon()) {
+		dummy_socinfo.id = 317;
+		strlcpy(dummy_socinfo.build_id, "msmfalcon - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_apqcobalt()) {
+		dummy_socinfo.id = 319;
+		strlcpy(dummy_socinfo.build_id, "apqcobalt - ",
+			sizeof(dummy_socinfo.build_id));
+	} else if (early_machine_is_msmskunk()) {
+		dummy_socinfo.id = 321;
+		strlcpy(dummy_socinfo.build_id, "msmskunk - ",
+			sizeof(dummy_socinfo.build_id));
+	}
+
+	strlcat(dummy_socinfo.build_id, "Dummy socinfo",
+		sizeof(dummy_socinfo.build_id));
+	return (void *) &dummy_socinfo;
+}
+
+static void __init populate_soc_sysfs_files(struct device *msm_soc_device)
+{
+	device_create_file(msm_soc_device, &msm_soc_attr_vendor);
+	device_create_file(msm_soc_device, &image_version);
+	device_create_file(msm_soc_device, &image_variant);
+	device_create_file(msm_soc_device, &image_crm_version);
+	device_create_file(msm_soc_device, &select_image);
+	device_create_file(msm_soc_device, &images);
+
+	switch (socinfo_format) {
+	case SOCINFO_VERSION(0, 12):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_chip_family);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_device_family);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_device_number);
+	case SOCINFO_VERSION(0, 11):
+	case SOCINFO_VERSION(0, 10):
+		 device_create_file(msm_soc_device,
+					&msm_soc_attr_serial_number);
+	case SOCINFO_VERSION(0, 9):
+		 device_create_file(msm_soc_device,
+					&msm_soc_attr_foundry_id);
+	case SOCINFO_VERSION(0, 8):
+	case SOCINFO_VERSION(0, 7):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_pmic_model);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_pmic_die_revision);
+	case SOCINFO_VERSION(0, 6):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_platform_subtype);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_platform_subtype_id);
+	case SOCINFO_VERSION(0, 5):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_accessory_chip);
+	case SOCINFO_VERSION(0, 4):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_platform_version);
+	case SOCINFO_VERSION(0, 3):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_hw_platform);
+	case SOCINFO_VERSION(0, 2):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_id);
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_raw_version);
+	case SOCINFO_VERSION(0, 1):
+		device_create_file(msm_soc_device,
+					&msm_soc_attr_build_id);
+		break;
+	default:
+		pr_err("Unknown socinfo format: v%u.%u\n",
+				SOCINFO_VERSION_MAJOR(socinfo_format),
+				SOCINFO_VERSION_MINOR(socinfo_format));
+		break;
+	}
+
+}
+
+static void  __init soc_info_populate(struct soc_device_attribute *soc_dev_attr)
+{
+	uint32_t soc_version = socinfo_get_version();
+
+	soc_dev_attr->soc_id   = kasprintf(GFP_KERNEL, "%d", socinfo_get_id());
+	soc_dev_attr->family  =  "Snapdragon";
+	soc_dev_attr->machine  = socinfo_get_id_string();
+	soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%u.%u",
+			SOCINFO_VERSION_MAJOR(soc_version),
+			SOCINFO_VERSION_MINOR(soc_version));
+	return;
+
+}
+
+static int __init socinfo_init_sysfs(void)
+{
+	struct device *msm_soc_device;
+	struct soc_device *soc_dev;
+	struct soc_device_attribute *soc_dev_attr;
+
+	if (!socinfo) {
+		pr_err("No socinfo found!\n");
+		return -ENODEV;
+	}
+
+	soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
+	if (!soc_dev_attr) {
+		pr_err("Soc Device alloc failed!\n");
+		return -ENOMEM;
+	}
+
+	soc_info_populate(soc_dev_attr);
+	soc_dev = soc_device_register(soc_dev_attr);
+	if (IS_ERR_OR_NULL(soc_dev)) {
+		kfree(soc_dev_attr);
+		 pr_err("Soc device register failed\n");
+		 return -EIO;
+	}
+
+	msm_soc_device = soc_device_to_device(soc_dev);
+	populate_soc_sysfs_files(msm_soc_device);
+	return 0;
+}
+
+late_initcall(socinfo_init_sysfs);
+
+static void socinfo_print(void)
+{
+	uint32_t f_maj = SOCINFO_VERSION_MAJOR(socinfo_format);
+	uint32_t f_min = SOCINFO_VERSION_MINOR(socinfo_format);
+	uint32_t v_maj = SOCINFO_VERSION_MAJOR(socinfo->v0_1.version);
+	uint32_t v_min = SOCINFO_VERSION_MINOR(socinfo->v0_1.version);
+
+	switch (socinfo_format) {
+	case SOCINFO_VERSION(0, 1):
+		pr_info("v%u.%u, id=%u, ver=%u.%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min);
+		break;
+	case SOCINFO_VERSION(0, 2):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version);
+		break;
+	case SOCINFO_VERSION(0, 3):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u, hw_plat=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform);
+		break;
+	case SOCINFO_VERSION(0, 4):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version);
+		break;
+	case SOCINFO_VERSION(0, 5):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u, hw_plat=%u,  hw_plat_ver=%u\n"
+			" accessory_chip=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip);
+		break;
+	case SOCINFO_VERSION(0, 6):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, "
+			 "raw_id=%u, raw_ver=%u, hw_plat=%u,  hw_plat_ver=%u\n"
+			" accessory_chip=%u hw_plat_subtype=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype);
+		break;
+	case SOCINFO_VERSION(0, 7):
+	case SOCINFO_VERSION(0, 8):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision);
+		break;
+	case SOCINFO_VERSION(0, 9):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id);
+		break;
+	case SOCINFO_VERSION(0, 10):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number);
+		break;
+	case SOCINFO_VERSION(0, 11):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics);
+		break;
+	case SOCINFO_VERSION(0, 12):
+		pr_info("v%u.%u, id=%u, ver=%u.%u, raw_id=%u, raw_ver=%u, hw_plat=%u, hw_plat_ver=%u\n accessory_chip=%u, hw_plat_subtype=%u, pmic_model=%u, pmic_die_revision=%u foundry_id=%u serial_number=%u num_pmics=%u chip_family=0x%x raw_device_family=0x%x raw_device_number=0x%x\n",
+			f_maj, f_min, socinfo->v0_1.id, v_maj, v_min,
+			socinfo->v0_2.raw_id, socinfo->v0_2.raw_version,
+			socinfo->v0_3.hw_platform,
+			socinfo->v0_4.platform_version,
+			socinfo->v0_5.accessory_chip,
+			socinfo->v0_6.hw_platform_subtype,
+			socinfo->v0_7.pmic_model,
+			socinfo->v0_7.pmic_die_revision,
+			socinfo->v0_9.foundry_id,
+			socinfo->v0_10.serial_number,
+			socinfo->v0_11.num_pmics,
+			socinfo->v0_12.chip_family,
+			socinfo->v0_12.raw_device_family,
+			socinfo->v0_12.raw_device_number);
+		break;
+
+	default:
+		pr_err("Unknown format found: v%u.%u\n", f_maj, f_min);
+		break;
+	}
+}
+
+static void socinfo_select_format(void)
+{
+	uint32_t f_maj = SOCINFO_VERSION_MAJOR(socinfo->v0_1.format);
+	uint32_t f_min = SOCINFO_VERSION_MINOR(socinfo->v0_1.format);
+
+	if (f_maj != 0) {
+		pr_err("Unsupported format v%u.%u. Falling back to dummy values.\n",
+			f_maj, f_min);
+		socinfo = setup_dummy_socinfo();
+	}
+
+	if (socinfo->v0_1.format > MAX_SOCINFO_FORMAT) {
+		pr_warn("Unsupported format v%u.%u. Falling back to v%u.%u.\n",
+			f_maj, f_min, SOCINFO_VERSION_MAJOR(MAX_SOCINFO_FORMAT),
+			SOCINFO_VERSION_MINOR(MAX_SOCINFO_FORMAT));
+		socinfo_format = MAX_SOCINFO_FORMAT;
+	} else {
+		socinfo_format = socinfo->v0_1.format;
+	}
+}
+
+int __init socinfo_init(void)
+{
+	static bool socinfo_init_done;
+	unsigned int size;
+
+	if (socinfo_init_done)
+		return 0;
+
+	socinfo = smem_get_entry(SMEM_HW_SW_BUILD_ID, &size, 0,
+				 SMEM_ANY_HOST_FLAG);
+	if (IS_ERR_OR_NULL(socinfo)) {
+		pr_warn("Can't find SMEM_HW_SW_BUILD_ID; falling back on dummy values.\n");
+		socinfo = setup_dummy_socinfo();
+	}
+
+	socinfo_select_format();
+
+	WARN(!socinfo_get_id(), "Unknown SOC ID!\n");
+
+	if (socinfo_get_id() >= ARRAY_SIZE(cpu_of_id))
+		BUG_ON("New IDs added! ID => CPU mapping needs an update.\n");
+	else
+		cur_cpu = cpu_of_id[socinfo->v0_1.id].generic_soc_type;
+
+	boot_stats_init();
+	socinfo_print();
+	arch_read_hardware_id = msm_read_hardware_id;
+	socinfo_init_done = true;
+
+	return 0;
+}
+subsys_initcall(socinfo_init);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
new file mode 100644
index 0000000..a7d5d37
--- /dev/null
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -0,0 +1,1160 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/delay.h>
+
+#include <linux/msm-bus-board.h>
+#include <linux/msm-bus.h>
+#include <linux/dma-mapping.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/ramdump.h>
+#include <soc/qcom/scm.h>
+
+#include <soc/qcom/smem.h>
+
+#include "peripheral-loader.h"
+
+#define XO_FREQ			19200000
+#define PROXY_TIMEOUT_MS	10000
+#define MAX_SSR_REASON_LEN	81U
+#define STOP_ACK_TIMEOUT_MS	1000
+#define CRASH_STOP_ACK_TO_MS	200
+
+#define ERR_READY	0
+#define PBL_DONE	1
+
+#define desc_to_data(d) container_of(d, struct pil_tz_data, desc)
+#define subsys_to_data(d) container_of(d, struct pil_tz_data, subsys_desc)
+
+/**
+ * struct reg_info - regulator info
+ * @reg: regulator handle
+ * @uV: voltage in uV
+ * @uA: current in uA
+ */
+struct reg_info {
+	struct regulator *reg;
+	int uV;
+	int uA;
+};
+
+/**
+ * struct pil_tz_data
+ * @regs: regulators that should be always on when the subsystem is
+ *	   brought out of reset
+ * @proxy_regs: regulators that should be on during pil proxy voting
+ * @clks: clocks that should be always on when the subsystem is
+ *	  brought out of reset
+ * @proxy_clks: clocks that should be on during pil proxy voting
+ * @reg_count: the number of always on regulators
+ * @proxy_reg_count: the number of proxy voting regulators
+ * @clk_count: the number of always on clocks
+ * @proxy_clk_count: the number of proxy voting clocks
+ * @smem_id: the smem id used for read the subsystem crash reason
+ * @ramdump_dev: ramdump device pointer
+ * @pas_id: the PAS id for tz
+ * @bus_client: bus client id
+ * @enable_bus_scaling: set to true if PIL needs to vote for
+ *			bus bandwidth
+ * @keep_proxy_regs_on: If set, during proxy unvoting, PIL removes the
+ *			voltage/current vote for proxy regulators but leaves
+ *			them enabled.
+ * @stop_ack: state of completion of stop ack
+ * @desc: PIL descriptor
+ * @subsys: subsystem device pointer
+ * @subsys_desc: subsystem descriptor
+ * @u32 bits_arr[2]: array of bit positions in SCSR registers
+ */
+struct pil_tz_data {
+	struct reg_info *regs;
+	struct reg_info *proxy_regs;
+	struct clk **clks;
+	struct clk **proxy_clks;
+	int reg_count;
+	int proxy_reg_count;
+	int clk_count;
+	int proxy_clk_count;
+	int smem_id;
+	void *ramdump_dev;
+	u32 pas_id;
+	u32 bus_client;
+	bool enable_bus_scaling;
+	bool keep_proxy_regs_on;
+	struct completion stop_ack;
+	struct pil_desc desc;
+	struct subsys_device *subsys;
+	struct subsys_desc subsys_desc;
+	void __iomem *irq_status;
+	void __iomem *irq_clear;
+	void __iomem *irq_mask;
+	void __iomem *err_status;
+	void __iomem *err_status_spare;
+	u32 bits_arr[2];
+};
+
+enum scm_cmd {
+	PAS_INIT_IMAGE_CMD = 1,
+	PAS_MEM_SETUP_CMD,
+	PAS_AUTH_AND_RESET_CMD = 5,
+	PAS_SHUTDOWN_CMD,
+};
+
+enum pas_id {
+	PAS_MODEM,
+	PAS_Q6,
+	PAS_DSPS,
+	PAS_TZAPPS,
+	PAS_MODEM_SW,
+	PAS_MODEM_FW,
+	PAS_WCNSS,
+	PAS_SECAPP,
+	PAS_GSS,
+	PAS_VIDC,
+	PAS_VPU,
+	PAS_BCSS,
+};
+
+static struct msm_bus_paths scm_pas_bw_tbl[] = {
+	{
+		.vectors = (struct msm_bus_vectors[]){
+			{
+				.src = MSM_BUS_MASTER_SPS,
+				.dst = MSM_BUS_SLAVE_EBI_CH0,
+			},
+		},
+		.num_paths = 1,
+	},
+	{
+		.vectors = (struct msm_bus_vectors[]){
+			{
+				.src = MSM_BUS_MASTER_SPS,
+				.dst = MSM_BUS_SLAVE_EBI_CH0,
+				.ib = 492 * 8 * 1000000UL,
+				.ab = 492 * 8 *  100000UL,
+			},
+		},
+		.num_paths = 1,
+	},
+};
+
+static struct msm_bus_scale_pdata scm_pas_bus_pdata = {
+	.usecase = scm_pas_bw_tbl,
+	.num_usecases = ARRAY_SIZE(scm_pas_bw_tbl),
+	.name = "scm_pas",
+};
+
+static uint32_t scm_perf_client;
+static int scm_pas_bw_count;
+static DEFINE_MUTEX(scm_pas_bw_mutex);
+
+static int scm_pas_enable_bw(void)
+{
+	int ret = 0;
+
+	if (!scm_perf_client)
+		return -EINVAL;
+
+	mutex_lock(&scm_pas_bw_mutex);
+	if (!scm_pas_bw_count) {
+		ret = msm_bus_scale_client_update_request(scm_perf_client, 1);
+		if (ret)
+			goto err_bus;
+		scm_pas_bw_count++;
+	}
+
+	mutex_unlock(&scm_pas_bw_mutex);
+	return ret;
+
+err_bus:
+	pr_err("scm-pas; Bandwidth request failed (%d)\n", ret);
+	msm_bus_scale_client_update_request(scm_perf_client, 0);
+
+	mutex_unlock(&scm_pas_bw_mutex);
+	return ret;
+}
+
+static void scm_pas_disable_bw(void)
+{
+	mutex_lock(&scm_pas_bw_mutex);
+	if (scm_pas_bw_count-- == 1)
+		msm_bus_scale_client_update_request(scm_perf_client, 0);
+	mutex_unlock(&scm_pas_bw_mutex);
+}
+
+static void scm_pas_init(int id)
+{
+	static int is_inited;
+
+	if (is_inited)
+		return;
+
+	scm_pas_bw_tbl[0].vectors[0].src = id;
+	scm_pas_bw_tbl[1].vectors[0].src = id;
+
+	scm_perf_client = msm_bus_scale_register_client(&scm_pas_bus_pdata);
+	if (!scm_perf_client)
+		pr_warn("scm-pas: Unable to register bus client\n");
+
+	is_inited = 1;
+}
+
+static int of_read_clocks(struct device *dev, struct clk ***clks_ref,
+			  const char *propname)
+{
+	long clk_count;
+	int i, len;
+	struct clk **clks;
+
+	if (!of_find_property(dev->of_node, propname, &len))
+		return 0;
+
+	clk_count = of_property_count_strings(dev->of_node, propname);
+	if (IS_ERR_VALUE(clk_count)) {
+		dev_err(dev, "Failed to get clock names\n");
+		return -EINVAL;
+	}
+
+	clks = devm_kzalloc(dev, sizeof(struct clk *) * clk_count,
+				GFP_KERNEL);
+	if (!clks)
+		return -ENOMEM;
+
+	for (i = 0; i < clk_count; i++) {
+		const char *clock_name;
+		char clock_freq_name[50];
+		u32 clock_rate = XO_FREQ;
+
+		of_property_read_string_index(dev->of_node,
+					      propname, i,
+					      &clock_name);
+		snprintf(clock_freq_name, ARRAY_SIZE(clock_freq_name),
+						"qcom,%s-freq", clock_name);
+		if (of_find_property(dev->of_node, clock_freq_name, &len))
+			if (of_property_read_u32(dev->of_node, clock_freq_name,
+								&clock_rate)) {
+				dev_err(dev, "Failed to read %s clock's freq\n",
+							clock_freq_name);
+				return -EINVAL;
+			}
+
+		clks[i] = devm_clk_get(dev, clock_name);
+		if (IS_ERR(clks[i])) {
+			int rc = PTR_ERR(clks[i]);
+
+			if (rc != -EPROBE_DEFER)
+				dev_err(dev, "Failed to get %s clock\n",
+								clock_name);
+			return rc;
+		}
+
+		/* Make sure rate-settable clocks' rates are set */
+		if (clk_get_rate(clks[i]) == 0)
+			clk_set_rate(clks[i], clk_round_rate(clks[i],
+								clock_rate));
+	}
+
+	*clks_ref = clks;
+	return clk_count;
+}
+
+static int of_read_regs(struct device *dev, struct reg_info **regs_ref,
+			const char *propname)
+{
+	long reg_count;
+	int i, len, rc;
+	struct reg_info *regs;
+
+	if (!of_find_property(dev->of_node, propname, &len))
+		return 0;
+
+	reg_count = of_property_count_strings(dev->of_node, propname);
+	if (IS_ERR_VALUE(reg_count)) {
+		dev_err(dev, "Failed to get regulator names\n");
+		return -EINVAL;
+	}
+
+	regs = devm_kzalloc(dev, sizeof(struct reg_info) * reg_count,
+				GFP_KERNEL);
+	if (!regs)
+		return -ENOMEM;
+
+	for (i = 0; i < reg_count; i++) {
+		const char *reg_name;
+		char reg_uV_uA_name[50];
+		u32 vdd_uV_uA[2];
+
+		of_property_read_string_index(dev->of_node,
+					      propname, i,
+					      &reg_name);
+
+		regs[i].reg = devm_regulator_get(dev, reg_name);
+		if (IS_ERR(regs[i].reg)) {
+			int rc = PTR_ERR(regs[i].reg);
+
+			if (rc != -EPROBE_DEFER)
+				dev_err(dev, "Failed to get %s\n regulator",
+								reg_name);
+			return rc;
+		}
+
+		/*
+		 * Read the voltage and current values for the corresponding
+		 * regulator. The device tree property name is "qcom," +
+		 *  "regulator_name" + "-uV-uA".
+		 */
+		rc = snprintf(reg_uV_uA_name, ARRAY_SIZE(reg_uV_uA_name),
+			 "qcom,%s-uV-uA", reg_name);
+		if (rc < strlen(reg_name) + 6) {
+			dev_err(dev, "Failed to hold reg_uV_uA_name\n");
+			return -EINVAL;
+		}
+
+		if (!of_find_property(dev->of_node, reg_uV_uA_name, &len))
+			continue;
+
+		len /= sizeof(vdd_uV_uA[0]);
+
+		/* There should be two entries: one for uV and one for uA */
+		if (len != 2) {
+			dev_err(dev, "Missing uV/uA value\n");
+			return -EINVAL;
+		}
+
+		rc = of_property_read_u32_array(dev->of_node, reg_uV_uA_name,
+					vdd_uV_uA, len);
+		if (rc) {
+			dev_err(dev, "Failed to read uV/uA values(rc:%d)\n",
+									rc);
+			return rc;
+		}
+
+		regs[i].uV = vdd_uV_uA[0];
+		regs[i].uA = vdd_uV_uA[1];
+	}
+
+	*regs_ref = regs;
+	return reg_count;
+}
+
+static int of_read_bus_pdata(struct platform_device *pdev,
+			     struct pil_tz_data *d)
+{
+	struct msm_bus_scale_pdata *pdata;
+
+	pdata = msm_bus_cl_get_pdata(pdev);
+
+	if (!pdata)
+		return -EINVAL;
+
+	d->bus_client = msm_bus_scale_register_client(pdata);
+	if (!d->bus_client)
+		pr_warn("%s: Unable to register bus client\n", __func__);
+
+	return 0;
+}
+
+static int piltz_resc_init(struct platform_device *pdev, struct pil_tz_data *d)
+{
+	int len, count, rc;
+	struct device *dev = &pdev->dev;
+
+	count = of_read_clocks(dev, &d->clks, "qcom,active-clock-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup clocks.\n");
+		return count;
+	}
+	d->clk_count = count;
+
+	count = of_read_clocks(dev, &d->proxy_clks, "qcom,proxy-clock-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup proxy clocks.\n");
+		return count;
+	}
+	d->proxy_clk_count = count;
+
+	count = of_read_regs(dev, &d->regs, "qcom,active-reg-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup regulators.\n");
+		return count;
+	}
+	d->reg_count = count;
+
+	count = of_read_regs(dev, &d->proxy_regs, "qcom,proxy-reg-names");
+	if (count < 0) {
+		dev_err(dev, "Failed to setup proxy regulators.\n");
+		return count;
+	}
+	d->proxy_reg_count = count;
+
+	if (of_find_property(dev->of_node, "qcom,msm-bus,name", &len)) {
+		d->enable_bus_scaling = true;
+		rc = of_read_bus_pdata(pdev, d);
+		if (rc) {
+			dev_err(dev, "Failed to setup bus scaling client.\n");
+			return rc;
+		}
+	}
+
+	return 0;
+}
+
+static int enable_regulators(struct pil_tz_data *d, struct device *dev,
+				struct reg_info *regs, int reg_count,
+				bool reg_no_enable)
+{
+	int i, rc = 0;
+
+	for (i = 0; i < reg_count; i++) {
+		if (regs[i].uV > 0) {
+			rc = regulator_set_voltage(regs[i].reg,
+					regs[i].uV, INT_MAX);
+			if (rc) {
+				dev_err(dev, "Failed to request voltage(rc:%d)\n",
+									rc);
+				goto err_voltage;
+			}
+		}
+
+		if (regs[i].uA > 0) {
+			rc = regulator_set_load(regs[i].reg,
+						regs[i].uA);
+			if (rc < 0) {
+				dev_err(dev, "Failed to set regulator mode(rc:%d)\n",
+									rc);
+				goto err_mode;
+			}
+		}
+
+		if (d->keep_proxy_regs_on && reg_no_enable)
+			continue;
+
+		rc = regulator_enable(regs[i].reg);
+		if (rc) {
+			dev_err(dev, "Regulator enable failed(rc:%d)\n", rc);
+			goto err_enable;
+		}
+	}
+
+	return 0;
+err_enable:
+	if (regs[i].uA > 0) {
+		regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+		regulator_set_load(regs[i].reg, 0);
+	}
+err_mode:
+	if (regs[i].uV > 0)
+		regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+err_voltage:
+	for (i--; i >= 0; i--) {
+		if (regs[i].uV > 0)
+			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+		if (regs[i].uA > 0)
+			regulator_set_load(regs[i].reg, 0);
+
+		if (d->keep_proxy_regs_on && reg_no_enable)
+			continue;
+		regulator_disable(regs[i].reg);
+	}
+
+	return rc;
+}
+
+static void disable_regulators(struct pil_tz_data *d, struct reg_info *regs,
+					int reg_count, bool reg_no_disable)
+{
+	int i;
+
+	for (i = 0; i < reg_count; i++) {
+		if (regs[i].uV > 0)
+			regulator_set_voltage(regs[i].reg, 0, INT_MAX);
+
+		if (regs[i].uA > 0)
+			regulator_set_load(regs[i].reg, 0);
+
+		if (d->keep_proxy_regs_on && reg_no_disable)
+			continue;
+		regulator_disable(regs[i].reg);
+	}
+}
+
+static int prepare_enable_clocks(struct device *dev, struct clk **clks,
+								int clk_count)
+{
+	int rc = 0;
+	int i;
+
+	for (i = 0; i < clk_count; i++) {
+		rc = clk_prepare_enable(clks[i]);
+		if (rc) {
+			dev_err(dev, "Clock enable failed(rc:%d)\n", rc);
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	for (i--; i >= 0; i--)
+		clk_disable_unprepare(clks[i]);
+
+	return rc;
+}
+
+static void disable_unprepare_clocks(struct clk **clks, int clk_count)
+{
+	int i;
+
+	for (i = --clk_count; i >= 0; i--)
+		clk_disable_unprepare(clks[i]);
+}
+
+static int pil_make_proxy_vote(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	int rc;
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	rc = enable_regulators(d, pil->dev, d->proxy_regs,
+					d->proxy_reg_count, false);
+	if (rc)
+		return rc;
+
+	rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+							d->proxy_clk_count);
+	if (rc)
+		goto err_clks;
+
+	if (d->bus_client) {
+		rc = msm_bus_scale_client_update_request(d->bus_client, 1);
+		if (rc) {
+			dev_err(pil->dev, "bandwidth request failed(rc:%d)\n",
+									rc);
+			goto err_bw;
+		}
+	} else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
+
+	return 0;
+err_bw:
+	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+err_clks:
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+
+	return rc;
+}
+
+static void pil_remove_proxy_vote(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+
+	if (d->subsys_desc.no_auth)
+		return;
+
+	if (d->bus_client)
+		msm_bus_scale_client_update_request(d->bus_client, 0);
+	else
+		WARN(d->enable_bus_scaling, "Bus scaling not set up for %s!\n",
+					d->subsys_desc.name);
+
+	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, true);
+}
+
+static int pil_init_image_trusted(struct pil_desc *pil,
+		const u8 *metadata, size_t size)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	struct pas_init_image_req {
+		u32	proc;
+		u32	image_addr;
+	} request;
+	u32 scm_ret = 0;
+	void *mdata_buf;
+	dma_addr_t mdata_phys;
+	int ret;
+	unsigned long attrs = 0;
+	struct device dev = {0};
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	ret = scm_pas_enable_bw();
+	if (ret)
+		return ret;
+	arch_setup_dma_ops(&dev, 0, 0, NULL, 0);
+
+	dev.coherent_dma_mask =
+		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+	attrs |= DMA_ATTR_STRONGLY_ORDERED;
+	mdata_buf = dma_alloc_attrs(&dev, size, &mdata_phys, GFP_KERNEL,
+					attrs);
+	if (!mdata_buf) {
+		pr_err("scm-pas: Allocation for metadata failed.\n");
+		scm_pas_disable_bw();
+		return -ENOMEM;
+	}
+
+	memcpy(mdata_buf, metadata, size);
+
+	request.proc = d->pas_id;
+	request.image_addr = mdata_phys;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
+				sizeof(request), &scm_ret, sizeof(scm_ret));
+	} else {
+		desc.args[0] = d->pas_id;
+		desc.args[1] = mdata_phys;
+		desc.arginfo = SCM_ARGS(2, SCM_VAL, SCM_RW);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD),
+				&desc);
+		scm_ret = desc.ret[0];
+	}
+
+	dma_free_attrs(&dev, size, mdata_buf, mdata_phys, attrs);
+	scm_pas_disable_bw();
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_mem_setup_trusted(struct pil_desc *pil, phys_addr_t addr,
+			       size_t size)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	struct pas_init_image_req {
+		u32	proc;
+		u32	start_addr;
+		u32	len;
+	} request;
+	u32 scm_ret = 0;
+	int ret;
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	request.proc = d->pas_id;
+	request.start_addr = addr;
+	request.len = size;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_PIL, PAS_MEM_SETUP_CMD, &request,
+				sizeof(request), &scm_ret, sizeof(scm_ret));
+	} else {
+		desc.args[0] = d->pas_id;
+		desc.args[1] = addr;
+		desc.args[2] = size;
+		desc.arginfo = SCM_ARGS(3);
+		ret = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_MEM_SETUP_CMD),
+				&desc);
+		scm_ret = desc.ret[0];
+	}
+	if (ret)
+		return ret;
+	return scm_ret;
+}
+
+static int pil_auth_and_reset(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	int rc;
+	u32 proc, scm_ret = 0;
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	desc.args[0] = proc = d->pas_id;
+	desc.arginfo = SCM_ARGS(1);
+
+	rc = enable_regulators(d, pil->dev, d->regs, d->reg_count, false);
+	if (rc)
+		return rc;
+
+	rc = prepare_enable_clocks(pil->dev, d->clks, d->clk_count);
+	if (rc)
+		goto err_clks;
+
+	rc = scm_pas_enable_bw();
+	if (rc)
+		goto err_reset;
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_PIL, PAS_AUTH_AND_RESET_CMD, &proc,
+				sizeof(proc), &scm_ret, sizeof(scm_ret));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL,
+			       PAS_AUTH_AND_RESET_CMD), &desc);
+		scm_ret = desc.ret[0];
+	}
+	scm_pas_disable_bw();
+	if (rc)
+		goto err_reset;
+
+	return scm_ret;
+err_reset:
+	disable_unprepare_clocks(d->clks, d->clk_count);
+err_clks:
+	disable_regulators(d, d->regs, d->reg_count, false);
+
+	return rc;
+}
+
+static int pil_shutdown_trusted(struct pil_desc *pil)
+{
+	struct pil_tz_data *d = desc_to_data(pil);
+	u32 proc, scm_ret = 0;
+	int rc;
+	struct scm_desc desc = {0};
+
+	if (d->subsys_desc.no_auth)
+		return 0;
+
+	desc.args[0] = proc = d->pas_id;
+	desc.arginfo = SCM_ARGS(1);
+
+	rc = enable_regulators(d, pil->dev, d->proxy_regs,
+					d->proxy_reg_count, true);
+	if (rc)
+		return rc;
+
+	rc = prepare_enable_clocks(pil->dev, d->proxy_clks,
+						d->proxy_clk_count);
+	if (rc)
+		goto err_clks;
+
+	if (!is_scm_armv8()) {
+		rc = scm_call(SCM_SVC_PIL, PAS_SHUTDOWN_CMD, &proc,
+			      sizeof(proc), &scm_ret, sizeof(scm_ret));
+	} else {
+		rc = scm_call2(SCM_SIP_FNID(SCM_SVC_PIL, PAS_SHUTDOWN_CMD),
+			       &desc);
+		scm_ret = desc.ret[0];
+	}
+
+	disable_unprepare_clocks(d->proxy_clks, d->proxy_clk_count);
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+
+	if (rc)
+		return rc;
+
+	disable_unprepare_clocks(d->clks, d->clk_count);
+	disable_regulators(d, d->regs, d->reg_count, false);
+
+	return scm_ret;
+err_clks:
+	disable_regulators(d, d->proxy_regs, d->proxy_reg_count, false);
+	return rc;
+}
+
+static struct pil_reset_ops pil_ops_trusted = {
+	.init_image = pil_init_image_trusted,
+	.mem_setup =  pil_mem_setup_trusted,
+	.auth_and_reset = pil_auth_and_reset,
+	.shutdown = pil_shutdown_trusted,
+	.proxy_vote = pil_make_proxy_vote,
+	.proxy_unvote = pil_remove_proxy_vote,
+};
+
+static void log_failure_reason(const struct pil_tz_data *d)
+{
+	u32 size;
+	char *smem_reason, reason[MAX_SSR_REASON_LEN];
+	const char *name = d->subsys_desc.name;
+
+	if (d->smem_id == -1)
+		return;
+
+	smem_reason = smem_get_entry_no_rlock(d->smem_id, &size, 0,
+							SMEM_ANY_HOST_FLAG);
+	if (!smem_reason || !size) {
+		pr_err("%s SFR: (unknown, smem_get_entry_no_rlock failed).\n",
+									name);
+		return;
+	}
+	if (!smem_reason[0]) {
+		pr_err("%s SFR: (unknown, empty string found).\n", name);
+		return;
+	}
+
+	strlcpy(reason, smem_reason, min(size, MAX_SSR_REASON_LEN));
+	pr_err("%s subsystem failure reason: %s.\n", name, reason);
+
+	smem_reason[0] = '\0';
+	wmb();
+}
+
+static int subsys_shutdown(const struct subsys_desc *subsys, bool force_stop)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+	int ret;
+
+	if (!subsys_get_crash_status(d->subsys) && force_stop &&
+						subsys->force_stop_gpio) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		ret = wait_for_completion_timeout(&d->stop_ack,
+				msecs_to_jiffies(STOP_ACK_TIMEOUT_MS));
+		if (!ret)
+			pr_warn("Timed out on stop ack from %s.\n",
+							subsys->name);
+		gpio_set_value(subsys->force_stop_gpio, 0);
+	}
+
+	pil_shutdown(&d->desc);
+	return 0;
+}
+
+static int subsys_powerup(const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+	int ret = 0;
+
+	if (subsys->stop_ack_irq)
+		reinit_completion(&d->stop_ack);
+
+	d->desc.fw_name = subsys->fw_name;
+	ret = pil_boot(&d->desc);
+
+	return ret;
+}
+
+static int subsys_ramdump(int enable, const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+
+	if (!enable)
+		return 0;
+
+	return pil_do_ramdump(&d->desc, d->ramdump_dev);
+}
+
+static void subsys_free_memory(const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+
+	pil_free_memory(&d->desc);
+}
+
+static void subsys_crash_shutdown(const struct subsys_desc *subsys)
+{
+	struct pil_tz_data *d = subsys_to_data(subsys);
+
+	if (subsys->force_stop_gpio > 0 &&
+				!subsys_get_crash_status(d->subsys)) {
+		gpio_set_value(subsys->force_stop_gpio, 1);
+		mdelay(CRASH_STOP_ACK_TO_MS);
+	}
+}
+
+static irqreturn_t subsys_err_fatal_intr_handler (int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+
+	pr_err("Fatal error on %s!\n", d->subsys_desc.name);
+	if (subsys_get_crash_status(d->subsys)) {
+		pr_err("%s: Ignoring error fatal, restart in progress\n",
+							d->subsys_desc.name);
+		return IRQ_HANDLED;
+	}
+	subsys_set_crash_status(d->subsys, true);
+	log_failure_reason(d);
+	subsystem_restart_dev(d->subsys);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_wdog_bite_irq_handler(int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+
+	if (subsys_get_crash_status(d->subsys))
+		return IRQ_HANDLED;
+	pr_err("Watchdog bite received from %s!\n", d->subsys_desc.name);
+
+	if (d->subsys_desc.system_debug &&
+			!gpio_get_value(d->subsys_desc.err_fatal_gpio))
+		panic("%s: System ramdump requested. Triggering device restart!\n",
+							__func__);
+	subsys_set_crash_status(d->subsys, true);
+	log_failure_reason(d);
+	subsystem_restart_dev(d->subsys);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t subsys_stop_ack_intr_handler(int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+
+	pr_info("Received stop ack interrupt from %s\n", d->subsys_desc.name);
+	complete(&d->stop_ack);
+	return IRQ_HANDLED;
+}
+
+static void check_pbl_done(struct pil_tz_data *d)
+{
+	uint32_t err_value;
+
+	err_value =  __raw_readl(d->err_status);
+	pr_debug("PBL_DONE received from %s!\n", d->subsys_desc.name);
+	if (err_value) {
+		uint32_t rmb_err_spare0;
+		uint32_t rmb_err_spare1;
+		uint32_t rmb_err_spare2;
+
+		rmb_err_spare2 =  __raw_readl(d->err_status_spare);
+		rmb_err_spare1 =  __raw_readl(d->err_status_spare-4);
+		rmb_err_spare0 =  __raw_readl(d->err_status_spare-8);
+
+		pr_err("PBL error status register: 0x%08x\n", err_value);
+
+		pr_err("PBL error status spare0 register: 0x%08x\n",
+			rmb_err_spare0);
+		pr_err("PBL error status spare1 register: 0x%08x\n",
+			rmb_err_spare1);
+		pr_err("PBL error status spare2 register: 0x%08x\n",
+			rmb_err_spare2);
+	}
+	__raw_writel(BIT(d->bits_arr[PBL_DONE]), d->irq_clear);
+}
+
+static void check_err_ready(struct pil_tz_data *d)
+{
+	uint32_t err_value;
+
+	err_value =  __raw_readl(d->err_status_spare);
+	if (!err_value) {
+		pr_debug("Subsystem error services up received from %s!\n",
+							d->subsys_desc.name);
+		__raw_writel(BIT(d->bits_arr[ERR_READY]), d->irq_clear);
+		complete_err_ready(d->subsys);
+	} else if (err_value == 0x44554d50) {
+		pr_err("wdog bite received from %s!\n", d->subsys_desc.name);
+		__raw_writel(BIT(d->bits_arr[ERR_READY]), d->irq_clear);
+		subsys_set_crash_status(d->subsys, true);
+		log_failure_reason(d);
+		subsystem_restart_dev(d->subsys);
+	}
+}
+
+static irqreturn_t subsys_generic_handler(int irq, void *dev_id)
+{
+	struct pil_tz_data *d = subsys_to_data(dev_id);
+	uint32_t status_val;
+
+	if (subsys_get_crash_status(d->subsys))
+		return IRQ_HANDLED;
+
+	status_val = __raw_readl(d->irq_status);
+
+	if (status_val & BIT(d->bits_arr[ERR_READY]))
+		check_err_ready(d);
+	else if (status_val & BIT(d->bits_arr[PBL_DONE]))
+		check_pbl_done(d);
+	return IRQ_HANDLED;
+}
+
+static void mask_scsr_irqs(struct pil_tz_data *d)
+{
+	uint32_t mask_val;
+	/* Masking all interrupts not handled by HLOS */
+	mask_val = ~0;
+	__raw_writel(mask_val & ~BIT(d->bits_arr[ERR_READY]) &
+			~BIT(d->bits_arr[PBL_DONE]), d->irq_mask);
+}
+
+static int pil_tz_driver_probe(struct platform_device *pdev)
+{
+	struct pil_tz_data *d;
+	struct resource *res;
+	u32 proxy_timeout;
+	int len, rc;
+
+	d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
+	if (!d)
+		return -ENOMEM;
+	platform_set_drvdata(pdev, d);
+
+	if (of_property_read_bool(pdev->dev.of_node, "qcom,pil-no-auth"))
+		d->subsys_desc.no_auth = true;
+
+	d->keep_proxy_regs_on = of_property_read_bool(pdev->dev.of_node,
+						"qcom,keep-proxy-regs-on");
+
+	rc = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
+				      &d->desc.name);
+	if (rc)
+		return rc;
+
+	/* Defaulting smem_id to be not present */
+	d->smem_id = -1;
+
+	if (of_find_property(pdev->dev.of_node, "qcom,smem-id", &len)) {
+		rc = of_property_read_u32(pdev->dev.of_node, "qcom,smem-id",
+						&d->smem_id);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to get the smem_id(rc:%d)\n",
+									rc);
+			return rc;
+		}
+	}
+
+	d->desc.dev = &pdev->dev;
+	d->desc.owner = THIS_MODULE;
+	d->desc.ops = &pil_ops_trusted;
+
+	d->desc.proxy_timeout = PROXY_TIMEOUT_MS;
+
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,proxy-timeout-ms",
+					&proxy_timeout);
+	if (!rc)
+		d->desc.proxy_timeout = proxy_timeout;
+
+	if (!d->subsys_desc.no_auth) {
+		rc = piltz_resc_init(pdev, d);
+		if (rc)
+			return -ENOENT;
+
+		rc = of_property_read_u32(pdev->dev.of_node, "qcom,pas-id",
+								&d->pas_id);
+		if (rc) {
+			dev_err(&pdev->dev, "Failed to find the pas_id(rc:%d)\n",
+									rc);
+			return rc;
+		}
+		scm_pas_init(MSM_BUS_MASTER_CRYPTO_CORE0);
+	}
+
+	rc = pil_desc_init(&d->desc);
+	if (rc)
+		return rc;
+
+	init_completion(&d->stop_ack);
+
+	d->subsys_desc.name = d->desc.name;
+	d->subsys_desc.owner = THIS_MODULE;
+	d->subsys_desc.dev = &pdev->dev;
+	d->subsys_desc.shutdown = subsys_shutdown;
+	d->subsys_desc.powerup = subsys_powerup;
+	d->subsys_desc.ramdump = subsys_ramdump;
+	d->subsys_desc.free_memory = subsys_free_memory;
+	d->subsys_desc.crash_shutdown = subsys_crash_shutdown;
+	if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,pil-generic-irq-handler")) {
+		d->subsys_desc.generic_handler = subsys_generic_handler;
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"sp2soc_irq_status");
+		d->irq_status = devm_ioremap_resource(&pdev->dev, res);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"sp2soc_irq_clr");
+		d->irq_clear = devm_ioremap_resource(&pdev->dev, res);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"sp2soc_irq_mask");
+		d->irq_mask = devm_ioremap_resource(&pdev->dev, res);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"rmb_err");
+		d->err_status = devm_ioremap_resource(&pdev->dev, res);
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"rmb_err_spare2");
+		d->err_status_spare = devm_ioremap_resource(&pdev->dev, res);
+		rc = of_property_read_u32_array(pdev->dev.of_node,
+		       "qcom,spss-scsr-bits", d->bits_arr, sizeof(d->bits_arr)/
+							sizeof(d->bits_arr[0]));
+		if (rc)
+			dev_err(&pdev->dev, "Failed to read qcom,spss-scsr-bits");
+		mask_scsr_irqs(d);
+
+	} else {
+		d->subsys_desc.err_fatal_handler =
+						subsys_err_fatal_intr_handler;
+		d->subsys_desc.wdog_bite_handler = subsys_wdog_bite_irq_handler;
+		d->subsys_desc.stop_ack_handler = subsys_stop_ack_intr_handler;
+	}
+	d->ramdump_dev = create_ramdump_device(d->subsys_desc.name,
+								&pdev->dev);
+	if (!d->ramdump_dev) {
+		rc = -ENOMEM;
+		goto err_ramdump;
+	}
+
+	d->subsys = subsys_register(&d->subsys_desc);
+	if (IS_ERR(d->subsys)) {
+		rc = PTR_ERR(d->subsys);
+		goto err_subsys;
+	}
+
+	return 0;
+err_subsys:
+	destroy_ramdump_device(d->ramdump_dev);
+err_ramdump:
+	pil_desc_release(&d->desc);
+
+	return rc;
+}
+
+static int pil_tz_driver_exit(struct platform_device *pdev)
+{
+	struct pil_tz_data *d = platform_get_drvdata(pdev);
+
+	subsys_unregister(d->subsys);
+	destroy_ramdump_device(d->ramdump_dev);
+	pil_desc_release(&d->desc);
+
+	return 0;
+}
+
+static const struct of_device_id pil_tz_match_table[] = {
+	{.compatible = "qcom,pil-tz-generic"},
+	{}
+};
+
+static struct platform_driver pil_tz_driver = {
+	.probe = pil_tz_driver_probe,
+	.remove = pil_tz_driver_exit,
+	.driver = {
+		.name = "subsys-pil-tz",
+		.of_match_table = pil_tz_match_table,
+		.owner = THIS_MODULE,
+	},
+};
+
+static int __init pil_tz_init(void)
+{
+	return platform_driver_register(&pil_tz_driver);
+}
+module_init(pil_tz_init);
+
+static void __exit pil_tz_exit(void)
+{
+	platform_driver_unregister(&pil_tz_driver);
+}
+module_exit(pil_tz_exit);
+
+MODULE_DESCRIPTION("Support for booting subsystems");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/subsystem_notif.c b/drivers/soc/qcom/subsystem_notif.c
new file mode 100644
index 0000000..f099dd5
--- /dev/null
+++ b/drivers/soc/qcom/subsystem_notif.c
@@ -0,0 +1,221 @@
+/* Copyright (c) 2011, 2013, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Subsystem Notifier -- Provides notifications
+ * of subsys events.
+ *
+ * Use subsys_notif_register_notifier to register for notifications
+ * and subsys_notif_queue_notification to send notifications.
+ *
+ */
+
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/stringify.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <soc/qcom/subsystem_notif.h>
+
+
+struct subsys_notif_info {
+	char name[50];
+	struct srcu_notifier_head subsys_notif_rcvr_list;
+	struct list_head list;
+};
+
+static LIST_HEAD(subsystem_list);
+static DEFINE_MUTEX(notif_lock);
+static DEFINE_MUTEX(notif_add_lock);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static void subsys_notif_reg_test_notifier(const char *);
+#endif
+
+static struct subsys_notif_info *_notif_find_subsys(const char *subsys_name)
+{
+	struct subsys_notif_info *subsys;
+
+	mutex_lock(&notif_lock);
+	list_for_each_entry(subsys, &subsystem_list, list)
+		if (!strcmp(subsys->name, subsys_name)) {
+			mutex_unlock(&notif_lock);
+			return subsys;
+		}
+	mutex_unlock(&notif_lock);
+
+	return NULL;
+}
+
+void *subsys_notif_register_notifier(
+			const char *subsys_name, struct notifier_block *nb)
+{
+	int ret;
+	struct subsys_notif_info *subsys = _notif_find_subsys(subsys_name);
+
+	if (!subsys) {
+
+		/* Possible first time reference to this subsystem. Add it. */
+		subsys = (struct subsys_notif_info *)
+				subsys_notif_add_subsys(subsys_name);
+
+		if (!subsys)
+			return ERR_PTR(-EINVAL);
+	}
+
+	ret = srcu_notifier_chain_register(
+		&subsys->subsys_notif_rcvr_list, nb);
+
+	if (ret < 0)
+		return ERR_PTR(ret);
+
+	return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_register_notifier);
+
+int subsys_notif_unregister_notifier(void *subsys_handle,
+				struct notifier_block *nb)
+{
+	int ret;
+	struct subsys_notif_info *subsys =
+			(struct subsys_notif_info *)subsys_handle;
+
+	if (!subsys)
+		return -EINVAL;
+
+	ret = srcu_notifier_chain_unregister(
+		&subsys->subsys_notif_rcvr_list, nb);
+
+	return ret;
+}
+EXPORT_SYMBOL(subsys_notif_unregister_notifier);
+
+void *subsys_notif_add_subsys(const char *subsys_name)
+{
+	struct subsys_notif_info *subsys = NULL;
+
+	if (!subsys_name)
+		goto done;
+
+	mutex_lock(&notif_add_lock);
+
+	subsys = _notif_find_subsys(subsys_name);
+
+	if (subsys) {
+		mutex_unlock(&notif_add_lock);
+		goto done;
+	}
+
+	subsys = kmalloc(sizeof(struct subsys_notif_info), GFP_KERNEL);
+
+	if (!subsys) {
+		mutex_unlock(&notif_add_lock);
+		return ERR_PTR(-EINVAL);
+	}
+
+	strlcpy(subsys->name, subsys_name, ARRAY_SIZE(subsys->name));
+
+	srcu_init_notifier_head(&subsys->subsys_notif_rcvr_list);
+
+	INIT_LIST_HEAD(&subsys->list);
+
+	mutex_lock(&notif_lock);
+	list_add_tail(&subsys->list, &subsystem_list);
+	mutex_unlock(&notif_lock);
+
+	#if defined(SUBSYS_RESTART_DEBUG)
+	subsys_notif_reg_test_notifier(subsys->name);
+	#endif
+
+	mutex_unlock(&notif_add_lock);
+
+done:
+	return subsys;
+}
+EXPORT_SYMBOL(subsys_notif_add_subsys);
+
+int subsys_notif_queue_notification(void *subsys_handle,
+					enum subsys_notif_type notif_type,
+					void *data)
+{
+	int ret = 0;
+	struct subsys_notif_info *subsys =
+		(struct subsys_notif_info *) subsys_handle;
+
+	if (!subsys)
+		return -EINVAL;
+
+	if (notif_type < 0 || notif_type >= SUBSYS_NOTIF_TYPE_COUNT)
+		return -EINVAL;
+
+		ret = srcu_notifier_call_chain(
+			&subsys->subsys_notif_rcvr_list, notif_type,
+			data);
+	return ret;
+}
+EXPORT_SYMBOL(subsys_notif_queue_notification);
+
+#if defined(SUBSYS_RESTART_DEBUG)
+static const char *notif_to_string(enum subsys_notif_type notif_type)
+{
+	switch (notif_type) {
+
+	case	SUBSYS_BEFORE_SHUTDOWN:
+		return __stringify(SUBSYS_BEFORE_SHUTDOWN);
+
+	case	SUBSYS_AFTER_SHUTDOWN:
+		return __stringify(SUBSYS_AFTER_SHUTDOWN);
+
+	case	SUBSYS_BEFORE_POWERUP:
+		return __stringify(SUBSYS_BEFORE_POWERUP);
+
+	case	SUBSYS_AFTER_POWERUP:
+		return __stringify(SUBSYS_AFTER_POWERUP);
+
+	default:
+		return "unknown";
+	}
+}
+
+static int subsys_notifier_test_call(struct notifier_block *this,
+				  unsigned long code,
+				  void *data)
+{
+	switch (code) {
+
+	default:
+		printk(KERN_WARNING "%s: Notification %s from subsystem %p\n",
+			__func__, notif_to_string(code), data);
+	break;
+
+	}
+
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nb = {
+	.notifier_call = subsys_notifier_test_call,
+};
+
+static void subsys_notif_reg_test_notifier(const char *subsys_name)
+{
+	void *handle = subsys_notif_register_notifier(subsys_name, &nb);
+
+	printk(KERN_WARNING "%s: Registered test notifier, handle=%p",
+			__func__, handle);
+}
+#endif
+
+MODULE_DESCRIPTION("Subsystem Restart Notifier");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
new file mode 100644
index 0000000..c6e288e
--- /dev/null
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -0,0 +1,1861 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "subsys-restart: %s(): " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/io.h>
+#include <linux/kthread.h>
+#include <linux/time.h>
+#include <linux/suspend.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/of_gpio.h>
+#include <linux/cdev.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/sysmon.h>
+
+#include <asm/current.h>
+
+#include "peripheral-loader.h"
+
+#define DISABLE_SSR 0x9889deed
+/* If set to 0x9889deed, call to subsystem_restart_dev() returns immediately */
+static uint disable_restart_work;
+module_param(disable_restart_work, uint, 0644);
+
+static int enable_debug;
+module_param(enable_debug, int, 0644);
+
+/* The maximum shutdown timeout is the product of MAX_LOOPS and DELAY_MS. */
+#define SHUTDOWN_ACK_MAX_LOOPS	100
+#define SHUTDOWN_ACK_DELAY_MS	100
+
+/**
+ * enum p_subsys_state - state of a subsystem (private)
+ * @SUBSYS_NORMAL: subsystem is operating normally
+ * @SUBSYS_CRASHED: subsystem has crashed and hasn't been shutdown
+ * @SUBSYS_RESTARTING: subsystem has been shutdown and is now restarting
+ *
+ * The 'private' side of the subsytem state used to determine where in the
+ * restart process the subsystem is.
+ */
+enum p_subsys_state {
+	SUBSYS_NORMAL,
+	SUBSYS_CRASHED,
+	SUBSYS_RESTARTING,
+};
+
+/**
+ * enum subsys_state - state of a subsystem (public)
+ * @SUBSYS_OFFLINING: subsystem is offlining
+ * @SUBSYS_OFFLINE: subsystem is offline
+ * @SUBSYS_ONLINE: subsystem is online
+ *
+ * The 'public' side of the subsytem state, exposed to userspace.
+ */
+enum subsys_state {
+	SUBSYS_OFFLINING,
+	SUBSYS_OFFLINE,
+	SUBSYS_ONLINE,
+};
+
+static const char * const subsys_states[] = {
+	[SUBSYS_OFFLINING] = "OFFLINING",
+	[SUBSYS_OFFLINE] = "OFFLINE",
+	[SUBSYS_ONLINE] = "ONLINE",
+};
+
+static const char * const restart_levels[] = {
+	[RESET_SOC] = "SYSTEM",
+	[RESET_SUBSYS_COUPLED] = "RELATED",
+};
+
+/**
+ * struct subsys_tracking - track state of a subsystem or restart order
+ * @p_state: private state of subsystem/order
+ * @state: public state of subsystem/order
+ * @s_lock: protects p_state
+ * @lock: protects subsystem/order callbacks and state
+ *
+ * Tracks the state of a subsystem or a set of subsystems (restart order).
+ * Doing this avoids the need to grab each subsystem's lock and update
+ * each subsystems state when restarting an order.
+ */
+struct subsys_tracking {
+	enum p_subsys_state p_state;
+	spinlock_t s_lock;
+	enum subsys_state state;
+	struct mutex lock;
+};
+
+/**
+ * struct subsys_soc_restart_order - subsystem restart order
+ * @subsystem_list: names of subsystems in this restart order
+ * @count: number of subsystems in order
+ * @track: state tracking and locking
+ * @subsys_ptrs: pointers to subsystems in this restart order
+ */
+struct subsys_soc_restart_order {
+	struct device_node **device_ptrs;
+	int count;
+
+	struct subsys_tracking track;
+	struct subsys_device **subsys_ptrs;
+	struct list_head list;
+};
+
+struct restart_log {
+	struct timeval time;
+	struct subsys_device *dev;
+	struct list_head list;
+};
+
+/**
+ * struct subsys_device - subsystem device
+ * @desc: subsystem descriptor
+ * @work: context for subsystem_restart_wq_func() for this device
+ * @ssr_wlock: prevents suspend during subsystem_restart()
+ * @wlname: name of wakeup source
+ * @device_restart_work: work struct for device restart
+ * @track: state tracking and locking
+ * @notify: subsys notify handle
+ * @dev: device
+ * @owner: module that provides @desc
+ * @count: reference count of subsystem_get()/subsystem_put()
+ * @id: ida
+ * @restart_level: restart level (0 - panic, 1 - related, 2 - independent, etc.)
+ * @restart_order: order of other devices this devices restarts with
+ * @crash_count: number of times the device has crashed
+ * @dentry: debugfs directory for this device
+ * @do_ramdump_on_put: ramdump on subsystem_put() if true
+ * @err_ready: completion variable to record error ready from subsystem
+ * @crashed: indicates if subsystem has crashed
+ * @notif_state: current state of subsystem in terms of subsys notifications
+ */
+struct subsys_device {
+	struct subsys_desc *desc;
+	struct work_struct work;
+	struct wakeup_source ssr_wlock;
+	char wlname[64];
+	struct work_struct device_restart_work;
+	struct subsys_tracking track;
+
+	void *notify;
+	struct device dev;
+	struct module *owner;
+	int count;
+	int id;
+	int restart_level;
+	int crash_count;
+	struct subsys_soc_restart_order *restart_order;
+#ifdef CONFIG_DEBUG_FS
+	struct dentry *dentry;
+#endif
+	bool do_ramdump_on_put;
+	struct cdev char_dev;
+	dev_t dev_no;
+	struct completion err_ready;
+	bool crashed;
+	int notif_state;
+	struct list_head list;
+};
+
+static struct subsys_device *to_subsys(struct device *d)
+{
+	return container_of(d, struct subsys_device, dev);
+}
+
+void complete_err_ready(struct subsys_device *subsys)
+{
+	complete(&subsys->err_ready);
+}
+
+static struct subsys_tracking *subsys_get_track(struct subsys_device *subsys)
+{
+	struct subsys_soc_restart_order *order = subsys->restart_order;
+
+	if (order)
+		return &order->track;
+	else
+		return &subsys->track;
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->name);
+}
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	enum subsys_state state = to_subsys(dev)->track.state;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", subsys_states[state]);
+}
+
+static ssize_t crash_count_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%d\n", to_subsys(dev)->crash_count);
+}
+
+static ssize_t
+restart_level_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	int level = to_subsys(dev)->restart_level;
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", restart_levels[level]);
+}
+
+static ssize_t restart_level_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	const char *p;
+	int i, orig_count = count;
+
+	p = memchr(buf, '\n', count);
+	if (p)
+		count = p - buf;
+
+	for (i = 0; i < ARRAY_SIZE(restart_levels); i++)
+		if (!strncasecmp(buf, restart_levels[i], count)) {
+			subsys->restart_level = i;
+			return orig_count;
+		}
+	return -EPERM;
+}
+
+static ssize_t firmware_name_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%s\n", to_subsys(dev)->desc->fw_name);
+}
+
+static ssize_t firmware_name_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	struct subsys_tracking *track = subsys_get_track(subsys);
+	const char *p;
+	int orig_count = count;
+
+	p = memchr(buf, '\n', count);
+	if (p)
+		count = p - buf;
+
+	pr_info("Changing subsys fw_name to %s\n", buf);
+	mutex_lock(&track->lock);
+	strlcpy(subsys->desc->fw_name, buf,
+			min(count + 1, sizeof(subsys->desc->fw_name)));
+	mutex_unlock(&track->lock);
+	return orig_count;
+}
+
+static ssize_t system_debug_show(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	char p[6] = "set";
+
+	if (!subsys->desc->system_debug)
+		strlcpy(p, "reset", sizeof(p));
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", p);
+}
+
+static ssize_t system_debug_store(struct device *dev,
+				struct device_attribute *attr, const char *buf,
+				size_t count)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+	const char *p;
+	int orig_count = count;
+
+	p = memchr(buf, '\n', count);
+	if (p)
+		count = p - buf;
+
+	if (!strncasecmp(buf, "set", count))
+		subsys->desc->system_debug = true;
+	else if (!strncasecmp(buf, "reset", count))
+		subsys->desc->system_debug = false;
+	else
+		return -EPERM;
+	return orig_count;
+}
+
+int subsys_get_restart_level(struct subsys_device *dev)
+{
+	return dev->restart_level;
+}
+EXPORT_SYMBOL(subsys_get_restart_level);
+
+static void subsys_set_state(struct subsys_device *subsys,
+			     enum subsys_state state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&subsys->track.s_lock, flags);
+	if (subsys->track.state != state) {
+		subsys->track.state = state;
+		spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+		sysfs_notify(&subsys->dev.kobj, NULL, "state");
+		return;
+	}
+	spin_unlock_irqrestore(&subsys->track.s_lock, flags);
+}
+
+/**
+ * subsytem_default_online() - Mark a subsystem as online by default
+ * @dev: subsystem to mark as online
+ *
+ * Marks a subsystem as "online" without increasing the reference count
+ * on the subsystem. This is typically used by subsystems that are already
+ * online when the kernel boots up.
+ */
+void subsys_default_online(struct subsys_device *dev)
+{
+	subsys_set_state(dev, SUBSYS_ONLINE);
+}
+EXPORT_SYMBOL(subsys_default_online);
+
+static struct device_attribute subsys_attrs[] = {
+	__ATTR_RO(name),
+	__ATTR_RO(state),
+	__ATTR_RO(crash_count),
+	__ATTR(restart_level, 0644, restart_level_show, restart_level_store),
+	__ATTR(firmware_name, 0644, firmware_name_show, firmware_name_store),
+	__ATTR(system_debug, 0644, system_debug_show, system_debug_store),
+	__ATTR_NULL,
+};
+
+static struct bus_type subsys_bus_type = {
+	.name		= "msm_subsys",
+	.dev_attrs	= subsys_attrs,
+};
+
+static DEFINE_IDA(subsys_ida);
+
+static int enable_ramdumps;
+module_param(enable_ramdumps, int, 0644);
+
+static int enable_mini_ramdumps;
+module_param(enable_mini_ramdumps, int, 0644);
+
+struct workqueue_struct *ssr_wq;
+static struct class *char_class;
+
+static LIST_HEAD(restart_log_list);
+static LIST_HEAD(subsys_list);
+static LIST_HEAD(ssr_order_list);
+static DEFINE_MUTEX(soc_order_reg_lock);
+static DEFINE_MUTEX(restart_log_mutex);
+static DEFINE_MUTEX(subsys_list_lock);
+static DEFINE_MUTEX(char_device_lock);
+static DEFINE_MUTEX(ssr_order_mutex);
+
+static struct subsys_soc_restart_order *
+update_restart_order(struct subsys_device *dev)
+{
+	int i;
+	struct subsys_soc_restart_order *order;
+	struct device_node *device = dev->desc->dev->of_node;
+
+	mutex_lock(&soc_order_reg_lock);
+	list_for_each_entry(order, &ssr_order_list, list) {
+		for (i = 0; i < order->count; i++) {
+			if (order->device_ptrs[i] == device) {
+				order->subsys_ptrs[i] = dev;
+				goto found;
+			}
+		}
+	}
+	order = NULL;
+found:
+	mutex_unlock(&soc_order_reg_lock);
+
+	return order;
+}
+
+static int max_restarts;
+module_param(max_restarts, int, 0644);
+
+static long max_history_time = 3600;
+module_param(max_history_time, long, 0644);
+
+static void do_epoch_check(struct subsys_device *dev)
+{
+	int n = 0;
+	struct timeval *time_first = NULL, *curr_time;
+	struct restart_log *r_log, *temp;
+	static int max_restarts_check;
+	static long max_history_time_check;
+
+	mutex_lock(&restart_log_mutex);
+
+	max_restarts_check = max_restarts;
+	max_history_time_check = max_history_time;
+
+	/* Check if epoch checking is enabled */
+	if (!max_restarts_check)
+		goto out;
+
+	r_log = kmalloc(sizeof(struct restart_log), GFP_KERNEL);
+	if (!r_log)
+		goto out;
+	r_log->dev = dev;
+	do_gettimeofday(&r_log->time);
+	curr_time = &r_log->time;
+	INIT_LIST_HEAD(&r_log->list);
+
+	list_add_tail(&r_log->list, &restart_log_list);
+
+	list_for_each_entry_safe(r_log, temp, &restart_log_list, list) {
+
+		if ((curr_time->tv_sec - r_log->time.tv_sec) >
+				max_history_time_check) {
+
+			pr_debug("Deleted node with restart_time = %ld\n",
+					r_log->time.tv_sec);
+			list_del(&r_log->list);
+			kfree(r_log);
+			continue;
+		}
+		if (!n) {
+			time_first = &r_log->time;
+			pr_debug("Time_first: %ld\n", time_first->tv_sec);
+		}
+		n++;
+		pr_debug("Restart_time: %ld\n", r_log->time.tv_sec);
+	}
+
+	if (time_first && n >= max_restarts_check) {
+		if ((curr_time->tv_sec - time_first->tv_sec) <
+				max_history_time_check)
+			panic("Subsystems have crashed %d times in less than %ld seconds!",
+				max_restarts_check, max_history_time_check);
+	}
+
+out:
+	mutex_unlock(&restart_log_mutex);
+}
+
+static int is_ramdump_enabled(struct subsys_device *dev)
+{
+	if (dev->desc->ramdump_disable_gpio)
+		return !dev->desc->ramdump_disable;
+
+	return enable_ramdumps;
+}
+
+static void send_sysmon_notif(struct subsys_device *dev)
+{
+	struct subsys_device *subsys;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(subsys, &subsys_list, list)
+		if ((subsys->notif_state > 0) && (subsys != dev))
+			sysmon_send_event(dev->desc, subsys->desc,
+						subsys->notif_state);
+	mutex_unlock(&subsys_list_lock);
+}
+
+static void for_each_subsys_device(struct subsys_device **list,
+		unsigned int count, void *data,
+		void (*fn)(struct subsys_device *, void *))
+{
+	while (count--) {
+		struct subsys_device *dev = *list++;
+
+		if (!dev)
+			continue;
+		fn(dev, data);
+	}
+}
+
+static void notify_each_subsys_device(struct subsys_device **list,
+		unsigned int count,
+		enum subsys_notif_type notif, void *data)
+{
+	struct subsys_device *subsys;
+
+	while (count--) {
+		struct subsys_device *dev = *list++;
+		struct notif_data notif_data;
+		struct platform_device *pdev;
+
+		if (!dev)
+			continue;
+
+		pdev = container_of(dev->desc->dev, struct platform_device,
+									dev);
+		dev->notif_state = notif;
+
+		mutex_lock(&subsys_list_lock);
+		list_for_each_entry(subsys, &subsys_list, list)
+			if (dev != subsys &&
+				subsys->track.state == SUBSYS_ONLINE)
+				sysmon_send_event(subsys->desc, dev->desc,
+								notif);
+		mutex_unlock(&subsys_list_lock);
+
+		if (notif == SUBSYS_AFTER_POWERUP &&
+				dev->track.state == SUBSYS_ONLINE)
+			send_sysmon_notif(dev);
+
+		notif_data.crashed = subsys_get_crash_status(dev);
+		notif_data.enable_ramdump = is_ramdump_enabled(dev);
+		notif_data.enable_mini_ramdumps = enable_mini_ramdumps;
+		notif_data.no_auth = dev->desc->no_auth;
+		notif_data.pdev = pdev;
+
+		subsys_notif_queue_notification(dev->notify, notif,
+								&notif_data);
+	}
+}
+
+static void enable_all_irqs(struct subsys_device *dev)
+{
+	if (dev->desc->err_ready_irq)
+		enable_irq(dev->desc->err_ready_irq);
+	if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+		enable_irq(dev->desc->wdog_bite_irq);
+		irq_set_irq_wake(dev->desc->wdog_bite_irq, 1);
+	}
+	if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+		enable_irq(dev->desc->err_fatal_irq);
+	if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+		enable_irq(dev->desc->stop_ack_irq);
+	if (dev->desc->generic_irq && dev->desc->generic_handler) {
+		enable_irq(dev->desc->generic_irq);
+		irq_set_irq_wake(dev->desc->generic_irq, 1);
+	}
+}
+
+static void disable_all_irqs(struct subsys_device *dev)
+{
+	if (dev->desc->err_ready_irq)
+		disable_irq(dev->desc->err_ready_irq);
+	if (dev->desc->wdog_bite_irq && dev->desc->wdog_bite_handler) {
+		disable_irq(dev->desc->wdog_bite_irq);
+		irq_set_irq_wake(dev->desc->wdog_bite_irq, 0);
+	}
+	if (dev->desc->err_fatal_irq && dev->desc->err_fatal_handler)
+		disable_irq(dev->desc->err_fatal_irq);
+	if (dev->desc->stop_ack_irq && dev->desc->stop_ack_handler)
+		disable_irq(dev->desc->stop_ack_irq);
+	if (dev->desc->generic_irq && dev->desc->generic_handler) {
+		disable_irq(dev->desc->generic_irq);
+		irq_set_irq_wake(dev->desc->generic_irq, 0);
+	}
+}
+
+static int wait_for_err_ready(struct subsys_device *subsys)
+{
+	int ret;
+
+	/*
+	 * If subsys is using generic_irq in which case err_ready_irq will be 0,
+	 * don't return.
+	 */
+	if ((subsys->desc->generic_irq <= 0 && !subsys->desc->err_ready_irq) ||
+				enable_debug == 1 || is_timeout_disabled())
+		return 0;
+
+	ret = wait_for_completion_timeout(&subsys->err_ready,
+					  msecs_to_jiffies(10000));
+	if (!ret) {
+		pr_err("[%s]: Error ready timed out\n", subsys->desc->name);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+static void subsystem_shutdown(struct subsys_device *dev, void *data)
+{
+	const char *name = dev->desc->name;
+
+	pr_info("[%p]: Shutting down %s\n", current, name);
+	if (dev->desc->shutdown(dev->desc, true) < 0)
+		panic("subsys-restart: [%p]: Failed to shutdown %s!",
+			current, name);
+	dev->crash_count++;
+	subsys_set_state(dev, SUBSYS_OFFLINE);
+	disable_all_irqs(dev);
+}
+
+static void subsystem_ramdump(struct subsys_device *dev, void *data)
+{
+	const char *name = dev->desc->name;
+
+	if (dev->desc->ramdump)
+		if (dev->desc->ramdump(is_ramdump_enabled(dev), dev->desc) < 0)
+			pr_warn("%s[%p]: Ramdump failed.\n", name, current);
+	dev->do_ramdump_on_put = false;
+}
+
+static void subsystem_free_memory(struct subsys_device *dev, void *data)
+{
+	if (dev->desc->free_memory)
+		dev->desc->free_memory(dev->desc);
+}
+
+static void subsystem_powerup(struct subsys_device *dev, void *data)
+{
+	const char *name = dev->desc->name;
+	int ret;
+
+	pr_info("[%p]: Powering up %s\n", current, name);
+	init_completion(&dev->err_ready);
+
+	if (dev->desc->powerup(dev->desc) < 0) {
+		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+								NULL);
+		panic("[%p]: Powerup error: %s!", current, name);
+	}
+	enable_all_irqs(dev);
+
+	ret = wait_for_err_ready(dev);
+	if (ret) {
+		notify_each_subsys_device(&dev, 1, SUBSYS_POWERUP_FAILURE,
+								NULL);
+		panic("[%p]: Timed out waiting for error ready: %s!",
+			current, name);
+	}
+	subsys_set_state(dev, SUBSYS_ONLINE);
+	subsys_set_crash_status(dev, false);
+}
+
+static int __find_subsys(struct device *dev, void *data)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+
+	return !strcmp(subsys->desc->name, data);
+}
+
+static struct subsys_device *find_subsys(const char *str)
+{
+	struct device *dev;
+
+	if (!str)
+		return NULL;
+
+	dev = bus_find_device(&subsys_bus_type, NULL, (void *)str,
+			__find_subsys);
+	return dev ? to_subsys(dev) : NULL;
+}
+
+static int subsys_start(struct subsys_device *subsys)
+{
+	int ret;
+
+	notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_POWERUP,
+								NULL);
+
+	init_completion(&subsys->err_ready);
+	ret = subsys->desc->powerup(subsys->desc);
+	if (ret) {
+		notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+									NULL);
+		return ret;
+	}
+	enable_all_irqs(subsys);
+
+	if (subsys->desc->is_not_loadable) {
+		subsys_set_state(subsys, SUBSYS_ONLINE);
+		return 0;
+	}
+
+	ret = wait_for_err_ready(subsys);
+	if (ret) {
+		/* pil-boot succeeded but we need to shutdown
+		 * the device because error ready timed out.
+		 */
+		notify_each_subsys_device(&subsys, 1, SUBSYS_POWERUP_FAILURE,
+									NULL);
+		subsys->desc->shutdown(subsys->desc, false);
+		disable_all_irqs(subsys);
+		return ret;
+	}
+	subsys_set_state(subsys, SUBSYS_ONLINE);
+
+	notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_POWERUP,
+								NULL);
+	return ret;
+}
+
+static void subsys_stop(struct subsys_device *subsys)
+{
+	const char *name = subsys->desc->name;
+
+	notify_each_subsys_device(&subsys, 1, SUBSYS_BEFORE_SHUTDOWN, NULL);
+	if (!of_property_read_bool(subsys->desc->dev->of_node,
+					"qcom,pil-force-shutdown")) {
+		subsys_set_state(subsys, SUBSYS_OFFLINING);
+		subsys->desc->sysmon_shutdown_ret =
+				sysmon_send_shutdown(subsys->desc);
+		if (subsys->desc->sysmon_shutdown_ret)
+			pr_debug("Graceful shutdown failed for %s\n", name);
+	}
+
+	subsys->desc->shutdown(subsys->desc, false);
+	subsys_set_state(subsys, SUBSYS_OFFLINE);
+	disable_all_irqs(subsys);
+	notify_each_subsys_device(&subsys, 1, SUBSYS_AFTER_SHUTDOWN, NULL);
+}
+
+int subsystem_set_fwname(const char *name, const char *fw_name)
+{
+	struct subsys_device *subsys;
+
+	if (!name)
+		return -EINVAL;
+
+	if (!fw_name)
+		return -EINVAL;
+
+	subsys = find_subsys(name);
+	if (!subsys)
+		return -EINVAL;
+
+	pr_debug("Changing subsys [%s] fw_name to [%s]\n", name, fw_name);
+	strlcpy(subsys->desc->fw_name, fw_name,
+		sizeof(subsys->desc->fw_name));
+
+	return 0;
+}
+EXPORT_SYMBOL(subsystem_set_fwname);
+
+int wait_for_shutdown_ack(struct subsys_desc *desc)
+{
+	int count;
+	struct subsys_device *dev;
+
+	if (!desc || !desc->shutdown_ack_gpio)
+		return 0;
+
+	dev = find_subsys(desc->name);
+	if (!dev)
+		return 0;
+
+	for (count = SHUTDOWN_ACK_MAX_LOOPS; count > 0; count--) {
+		if (gpio_get_value(desc->shutdown_ack_gpio))
+			return count;
+		else if (subsys_get_crash_status(dev))
+			break;
+		msleep(SHUTDOWN_ACK_DELAY_MS);
+	}
+
+	pr_err("[%s]: Timed out waiting for shutdown ack\n", desc->name);
+	return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(wait_for_shutdown_ack);
+
+void *__subsystem_get(const char *name, const char *fw_name)
+{
+	struct subsys_device *subsys;
+	struct subsys_device *subsys_d;
+	int ret;
+	void *retval;
+	struct subsys_tracking *track;
+
+	if (!name)
+		return NULL;
+
+	subsys = retval = find_subsys(name);
+	if (!subsys)
+		return ERR_PTR(-ENODEV);
+	if (!try_module_get(subsys->owner)) {
+		retval = ERR_PTR(-ENODEV);
+		goto err_module;
+	}
+
+	subsys_d = subsystem_get(subsys->desc->depends_on);
+	if (IS_ERR(subsys_d)) {
+		retval = subsys_d;
+		goto err_depends;
+	}
+
+	track = subsys_get_track(subsys);
+	mutex_lock(&track->lock);
+	if (!subsys->count) {
+		if (fw_name) {
+			pr_info("Changing subsys fw_name to %s\n", fw_name);
+			strlcpy(subsys->desc->fw_name, fw_name,
+				sizeof(subsys->desc->fw_name));
+		}
+		ret = subsys_start(subsys);
+		if (ret) {
+			retval = ERR_PTR(ret);
+			goto err_start;
+		}
+	}
+	subsys->count++;
+	mutex_unlock(&track->lock);
+	return retval;
+err_start:
+	mutex_unlock(&track->lock);
+	subsystem_put(subsys_d);
+err_depends:
+	module_put(subsys->owner);
+err_module:
+	put_device(&subsys->dev);
+	return retval;
+}
+
+/**
+ * subsytem_get() - Boot a subsystem
+ * @name: pointer to a string containing the name of the subsystem to boot
+ *
+ * This function returns a pointer if it succeeds. If an error occurs an
+ * ERR_PTR is returned.
+ *
+ * If this feature is disable, the value %NULL will be returned.
+ */
+void *subsystem_get(const char *name)
+{
+	return __subsystem_get(name, NULL);
+}
+EXPORT_SYMBOL(subsystem_get);
+
+/**
+ * subsystem_get_with_fwname() - Boot a subsystem using the firmware name passed
+ * @name: pointer to a string containing the name of the subsystem to boot
+ * @fw_name: pointer to a string containing the subsystem firmware image name
+ *
+ * This function returns a pointer if it succeeds. If an error occurs an
+ * ERR_PTR is returned.
+ *
+ * If this feature is disable, the value %NULL will be returned.
+ */
+void *subsystem_get_with_fwname(const char *name, const char *fw_name)
+{
+	return __subsystem_get(name, fw_name);
+}
+EXPORT_SYMBOL(subsystem_get_with_fwname);
+
+/**
+ * subsystem_put() - Shutdown a subsystem
+ * @peripheral_handle: pointer from a previous call to subsystem_get()
+ *
+ * This doesn't imply that a subsystem is shutdown until all callers of
+ * subsystem_get() have called subsystem_put().
+ */
+void subsystem_put(void *subsystem)
+{
+	struct subsys_device *subsys_d, *subsys = subsystem;
+	struct subsys_tracking *track;
+
+	if (IS_ERR_OR_NULL(subsys))
+		return;
+
+	track = subsys_get_track(subsys);
+	mutex_lock(&track->lock);
+	if (WARN(!subsys->count, "%s: %s: Reference count mismatch\n",
+			subsys->desc->name, __func__))
+		goto err_out;
+	if (!--subsys->count) {
+		subsys_stop(subsys);
+		if (subsys->do_ramdump_on_put)
+			subsystem_ramdump(subsys, NULL);
+		subsystem_free_memory(subsys, NULL);
+	}
+	mutex_unlock(&track->lock);
+
+	subsys_d = find_subsys(subsys->desc->depends_on);
+	if (subsys_d) {
+		subsystem_put(subsys_d);
+		put_device(&subsys_d->dev);
+	}
+	module_put(subsys->owner);
+	put_device(&subsys->dev);
+	return;
+err_out:
+	mutex_unlock(&track->lock);
+}
+EXPORT_SYMBOL(subsystem_put);
+
+static void subsystem_restart_wq_func(struct work_struct *work)
+{
+	struct subsys_device *dev = container_of(work,
+						struct subsys_device, work);
+	struct subsys_device **list;
+	struct subsys_desc *desc = dev->desc;
+	struct subsys_soc_restart_order *order = dev->restart_order;
+	struct subsys_tracking *track;
+	unsigned int count;
+	unsigned long flags;
+
+	/*
+	 * It's OK to not take the registration lock at this point.
+	 * This is because the subsystem list inside the relevant
+	 * restart order is not being traversed.
+	 */
+	if (order) {
+		list = order->subsys_ptrs;
+		count = order->count;
+		track = &order->track;
+	} else {
+		list = &dev;
+		count = 1;
+		track = &dev->track;
+	}
+
+	/*
+	 * If a system reboot/shutdown is under way, ignore subsystem errors.
+	 * However, print a message so that we know that a subsystem behaved
+	 * unexpectedly here.
+	 */
+	if (system_state == SYSTEM_RESTART
+		|| system_state == SYSTEM_POWER_OFF) {
+		WARN(1, "SSR aborted: %s, system reboot/shutdown is under way\n",
+			desc->name);
+		return;
+	}
+
+	mutex_lock(&track->lock);
+	do_epoch_check(dev);
+
+	if (dev->track.state == SUBSYS_OFFLINE) {
+		mutex_unlock(&track->lock);
+		WARN(1, "SSR aborted: %s subsystem not online\n", desc->name);
+		return;
+	}
+
+	/*
+	 * It's necessary to take the registration lock because the subsystem
+	 * list in the SoC restart order will be traversed and it shouldn't be
+	 * changed until _this_ restart sequence completes.
+	 */
+	mutex_lock(&soc_order_reg_lock);
+
+	pr_debug("[%p]: Starting restart sequence for %s\n", current,
+			desc->name);
+	notify_each_subsys_device(list, count, SUBSYS_BEFORE_SHUTDOWN, NULL);
+	for_each_subsys_device(list, count, NULL, subsystem_shutdown);
+	notify_each_subsys_device(list, count, SUBSYS_AFTER_SHUTDOWN, NULL);
+
+	notify_each_subsys_device(list, count, SUBSYS_RAMDUMP_NOTIFICATION,
+									NULL);
+
+	spin_lock_irqsave(&track->s_lock, flags);
+	track->p_state = SUBSYS_RESTARTING;
+	spin_unlock_irqrestore(&track->s_lock, flags);
+
+	/* Collect ram dumps for all subsystems in order here */
+	for_each_subsys_device(list, count, NULL, subsystem_ramdump);
+
+	for_each_subsys_device(list, count, NULL, subsystem_free_memory);
+
+	notify_each_subsys_device(list, count, SUBSYS_BEFORE_POWERUP, NULL);
+	for_each_subsys_device(list, count, NULL, subsystem_powerup);
+	notify_each_subsys_device(list, count, SUBSYS_AFTER_POWERUP, NULL);
+
+	pr_info("[%p]: Restart sequence for %s completed.\n",
+			current, desc->name);
+
+	mutex_unlock(&soc_order_reg_lock);
+	mutex_unlock(&track->lock);
+
+	spin_lock_irqsave(&track->s_lock, flags);
+	track->p_state = SUBSYS_NORMAL;
+	__pm_relax(&dev->ssr_wlock);
+	spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void __subsystem_restart_dev(struct subsys_device *dev)
+{
+	struct subsys_desc *desc = dev->desc;
+	const char *name = dev->desc->name;
+	struct subsys_tracking *track;
+	unsigned long flags;
+
+	pr_debug("Restarting %s [level=%s]!\n", desc->name,
+			restart_levels[dev->restart_level]);
+
+	track = subsys_get_track(dev);
+	/*
+	 * Allow drivers to call subsystem_restart{_dev}() as many times as
+	 * they want up until the point where the subsystem is shutdown.
+	 */
+	spin_lock_irqsave(&track->s_lock, flags);
+	if (track->p_state != SUBSYS_CRASHED &&
+					dev->track.state == SUBSYS_ONLINE) {
+		if (track->p_state != SUBSYS_RESTARTING) {
+			track->p_state = SUBSYS_CRASHED;
+			__pm_stay_awake(&dev->ssr_wlock);
+			queue_work(ssr_wq, &dev->work);
+		} else {
+			panic("Subsystem %s crashed during SSR!", name);
+		}
+	} else
+		WARN(dev->track.state == SUBSYS_OFFLINE,
+			"SSR aborted: %s subsystem not online\n", name);
+	spin_unlock_irqrestore(&track->s_lock, flags);
+}
+
+static void device_restart_work_hdlr(struct work_struct *work)
+{
+	struct subsys_device *dev = container_of(work, struct subsys_device,
+							device_restart_work);
+
+	notify_each_subsys_device(&dev, 1, SUBSYS_SOC_RESET, NULL);
+	/*
+	 * Temporary workaround until ramdump userspace application calls
+	 * sync() and fclose() on attempting the dump.
+	 */
+	msleep(100);
+	panic("subsys-restart: Resetting the SoC - %s crashed.",
+							dev->desc->name);
+}
+
+int subsystem_restart_dev(struct subsys_device *dev)
+{
+	const char *name;
+
+	if (!get_device(&dev->dev))
+		return -ENODEV;
+
+	if (!try_module_get(dev->owner)) {
+		put_device(&dev->dev);
+		return -ENODEV;
+	}
+
+	name = dev->desc->name;
+
+	/*
+	 * If a system reboot/shutdown is underway, ignore subsystem errors.
+	 * However, print a message so that we know that a subsystem behaved
+	 * unexpectedly here.
+	 */
+	if (system_state == SYSTEM_RESTART
+		|| system_state == SYSTEM_POWER_OFF) {
+		pr_err("%s crashed during a system poweroff/shutdown.\n", name);
+		return -EBUSY;
+	}
+
+	pr_info("Restart sequence requested for %s, restart_level = %s.\n",
+		name, restart_levels[dev->restart_level]);
+
+	if (WARN(disable_restart_work == DISABLE_SSR,
+		"subsys-restart: Ignoring restart request for %s.\n", name)) {
+		return 0;
+	}
+
+	switch (dev->restart_level) {
+
+	case RESET_SUBSYS_COUPLED:
+		__subsystem_restart_dev(dev);
+		break;
+	case RESET_SOC:
+		__pm_stay_awake(&dev->ssr_wlock);
+		schedule_work(&dev->device_restart_work);
+		return 0;
+	default:
+		panic("subsys-restart: Unknown restart level!\n");
+		break;
+	}
+	module_put(dev->owner);
+	put_device(&dev->dev);
+
+	return 0;
+}
+EXPORT_SYMBOL(subsystem_restart_dev);
+
+int subsystem_restart(const char *name)
+{
+	int ret;
+	struct subsys_device *dev = find_subsys(name);
+
+	if (!dev)
+		return -ENODEV;
+
+	ret = subsystem_restart_dev(dev);
+	put_device(&dev->dev);
+	return ret;
+}
+EXPORT_SYMBOL(subsystem_restart);
+
+int subsystem_crashed(const char *name)
+{
+	struct subsys_device *dev = find_subsys(name);
+	struct subsys_tracking *track;
+
+	if (!dev)
+		return -ENODEV;
+
+	if (!get_device(&dev->dev))
+		return -ENODEV;
+
+	track = subsys_get_track(dev);
+
+	mutex_lock(&track->lock);
+	dev->do_ramdump_on_put = true;
+	/*
+	 * TODO: Make this work with multiple consumers where one is calling
+	 * subsystem_restart() and another is calling this function. To do
+	 * so would require updating private state, etc.
+	 */
+	mutex_unlock(&track->lock);
+
+	put_device(&dev->dev);
+	return 0;
+}
+EXPORT_SYMBOL(subsystem_crashed);
+
+void subsys_set_crash_status(struct subsys_device *dev, bool crashed)
+{
+	dev->crashed = crashed;
+}
+
+bool subsys_get_crash_status(struct subsys_device *dev)
+{
+	return dev->crashed;
+}
+
+static struct subsys_device *desc_to_subsys(struct device *d)
+{
+	struct subsys_device *device, *subsys_dev = 0;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(device, &subsys_list, list)
+		if (device->desc->dev == d)
+			subsys_dev = device;
+	mutex_unlock(&subsys_list_lock);
+	return subsys_dev;
+}
+
+void notify_proxy_vote(struct device *device)
+{
+	struct subsys_device *dev = desc_to_subsys(device);
+
+	if (dev)
+		notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_VOTE, NULL);
+}
+
+void notify_proxy_unvote(struct device *device)
+{
+	struct subsys_device *dev = desc_to_subsys(device);
+
+	if (dev)
+		notify_each_subsys_device(&dev, 1, SUBSYS_PROXY_UNVOTE, NULL);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t subsys_debugfs_read(struct file *filp, char __user *ubuf,
+		size_t cnt, loff_t *ppos)
+{
+	int r;
+	char buf[40];
+	struct subsys_device *subsys = filp->private_data;
+
+	r = snprintf(buf, sizeof(buf), "%d\n", subsys->count);
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t subsys_debugfs_write(struct file *filp,
+		const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+	struct subsys_device *subsys = filp->private_data;
+	char buf[10];
+	char *cmp;
+
+	cnt = min(cnt, sizeof(buf) - 1);
+	if (copy_from_user(&buf, ubuf, cnt))
+		return -EFAULT;
+	buf[cnt] = '\0';
+	cmp = strstrip(buf);
+
+	if (!strcmp(cmp, "restart")) {
+		if (subsystem_restart_dev(subsys))
+			return -EIO;
+	} else if (!strcmp(cmp, "get")) {
+		if (subsystem_get(subsys->desc->name))
+			return -EIO;
+	} else if (!strcmp(cmp, "put")) {
+		subsystem_put(subsys);
+	} else {
+		return -EINVAL;
+	}
+
+	return cnt;
+}
+
+static const struct file_operations subsys_debugfs_fops = {
+	.open	= simple_open,
+	.read	= subsys_debugfs_read,
+	.write	= subsys_debugfs_write,
+};
+
+static struct dentry *subsys_base_dir;
+
+static int __init subsys_debugfs_init(void)
+{
+	subsys_base_dir = debugfs_create_dir("msm_subsys", NULL);
+	return !subsys_base_dir ? -ENOMEM : 0;
+}
+
+static void subsys_debugfs_exit(void)
+{
+	debugfs_remove_recursive(subsys_base_dir);
+}
+
+static int subsys_debugfs_add(struct subsys_device *subsys)
+{
+	if (!subsys_base_dir)
+		return -ENOMEM;
+
+	subsys->dentry = debugfs_create_file(subsys->desc->name,
+				0644, subsys_base_dir,
+				subsys, &subsys_debugfs_fops);
+	return !subsys->dentry ? -ENOMEM : 0;
+}
+
+static void subsys_debugfs_remove(struct subsys_device *subsys)
+{
+	debugfs_remove(subsys->dentry);
+}
+#else
+static int __init subsys_debugfs_init(void) { return 0; };
+static void subsys_debugfs_exit(void) { }
+static int subsys_debugfs_add(struct subsys_device *subsys) { return 0; }
+static void subsys_debugfs_remove(struct subsys_device *subsys) { }
+#endif
+
+static int subsys_device_open(struct inode *inode, struct file *file)
+{
+	struct subsys_device *device, *subsys_dev = 0;
+	void *retval;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(device, &subsys_list, list)
+		if (MINOR(device->dev_no) == iminor(inode))
+			subsys_dev = device;
+	mutex_unlock(&subsys_list_lock);
+
+	if (!subsys_dev)
+		return -EINVAL;
+
+	retval = subsystem_get_with_fwname(subsys_dev->desc->name,
+					subsys_dev->desc->fw_name);
+	if (IS_ERR(retval))
+		return PTR_ERR(retval);
+
+	return 0;
+}
+
+static int subsys_device_close(struct inode *inode, struct file *file)
+{
+	struct subsys_device *device, *subsys_dev = 0;
+
+	mutex_lock(&subsys_list_lock);
+	list_for_each_entry(device, &subsys_list, list)
+		if (MINOR(device->dev_no) == iminor(inode))
+			subsys_dev = device;
+	mutex_unlock(&subsys_list_lock);
+
+	if (!subsys_dev)
+		return -EINVAL;
+
+	subsystem_put(subsys_dev);
+	return 0;
+}
+
+static const struct file_operations subsys_device_fops = {
+		.owner = THIS_MODULE,
+		.open = subsys_device_open,
+		.release = subsys_device_close,
+};
+
+static void subsys_device_release(struct device *dev)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+
+	wakeup_source_trash(&subsys->ssr_wlock);
+	mutex_destroy(&subsys->track.lock);
+	ida_simple_remove(&subsys_ida, subsys->id);
+	kfree(subsys);
+}
+static irqreturn_t subsys_err_ready_intr_handler(int irq, void *subsys)
+{
+	struct subsys_device *subsys_dev = subsys;
+
+	dev_info(subsys_dev->desc->dev,
+		"Subsystem error monitoring/handling services are up\n");
+
+	if (subsys_dev->desc->is_not_loadable)
+		return IRQ_HANDLED;
+
+	complete(&subsys_dev->err_ready);
+	return IRQ_HANDLED;
+}
+
+static int subsys_char_device_add(struct subsys_device *subsys_dev)
+{
+	int ret = 0;
+	static int major, minor;
+	dev_t dev_no;
+
+	mutex_lock(&char_device_lock);
+	if (!major) {
+		ret = alloc_chrdev_region(&dev_no, 0, 4, "subsys");
+		if (ret < 0) {
+			pr_err("Failed to alloc subsys_dev region, err %d\n",
+									ret);
+			goto fail;
+		}
+		major = MAJOR(dev_no);
+		minor = MINOR(dev_no);
+	} else
+		dev_no = MKDEV(major, minor);
+
+	if (!device_create(char_class, subsys_dev->desc->dev, dev_no,
+			NULL, "subsys_%s", subsys_dev->desc->name)) {
+		pr_err("Failed to create subsys_%s device\n",
+						subsys_dev->desc->name);
+		goto fail_unregister_cdev_region;
+	}
+
+	cdev_init(&subsys_dev->char_dev, &subsys_device_fops);
+	subsys_dev->char_dev.owner = THIS_MODULE;
+	ret = cdev_add(&subsys_dev->char_dev, dev_no, 1);
+	if (ret < 0)
+		goto fail_destroy_device;
+
+	subsys_dev->dev_no = dev_no;
+	minor++;
+	mutex_unlock(&char_device_lock);
+
+	return 0;
+
+fail_destroy_device:
+	device_destroy(char_class, dev_no);
+fail_unregister_cdev_region:
+	unregister_chrdev_region(dev_no, 1);
+fail:
+	mutex_unlock(&char_device_lock);
+	return ret;
+}
+
+static void subsys_char_device_remove(struct subsys_device *subsys_dev)
+{
+	cdev_del(&subsys_dev->char_dev);
+	device_destroy(char_class, subsys_dev->dev_no);
+	unregister_chrdev_region(subsys_dev->dev_no, 1);
+}
+
+static void subsys_remove_restart_order(struct device_node *device)
+{
+	struct subsys_soc_restart_order *order;
+	int i;
+
+	mutex_lock(&ssr_order_mutex);
+	list_for_each_entry(order, &ssr_order_list, list)
+		for (i = 0; i < order->count; i++)
+			if (order->device_ptrs[i] == device)
+				order->subsys_ptrs[i] = NULL;
+	mutex_unlock(&ssr_order_mutex);
+}
+
+static struct subsys_soc_restart_order *ssr_parse_restart_orders(struct
+							subsys_desc * desc)
+{
+	int i, j, count, num = 0;
+	struct subsys_soc_restart_order *order, *tmp;
+	struct device *dev = desc->dev;
+	struct device_node *ssr_node;
+	uint32_t len;
+
+	if (!of_get_property(dev->of_node, "qcom,restart-group", &len))
+		return NULL;
+
+	count = len/sizeof(uint32_t);
+
+	order = devm_kzalloc(dev, sizeof(*order), GFP_KERNEL);
+	if (!order)
+		return ERR_PTR(-ENOMEM);
+
+	order->subsys_ptrs = devm_kzalloc(dev,
+				count * sizeof(struct subsys_device *),
+				GFP_KERNEL);
+	if (!order->subsys_ptrs)
+		return ERR_PTR(-ENOMEM);
+
+	order->device_ptrs = devm_kzalloc(dev,
+				count * sizeof(struct device_node *),
+				GFP_KERNEL);
+	if (!order->device_ptrs)
+		return ERR_PTR(-ENOMEM);
+
+	for (i = 0; i < count; i++) {
+		ssr_node = of_parse_phandle(dev->of_node,
+						"qcom,restart-group", i);
+		if (!ssr_node)
+			return ERR_PTR(-ENXIO);
+		of_node_put(ssr_node);
+		pr_info("%s device has been added to %s's restart group\n",
+						ssr_node->name, desc->name);
+		order->device_ptrs[i] = ssr_node;
+	}
+
+	/*
+	 * Check for similar restart groups. If found, return
+	 * without adding the new group to the ssr_order_list.
+	 */
+	mutex_lock(&ssr_order_mutex);
+	list_for_each_entry(tmp, &ssr_order_list, list) {
+		for (i = 0; i < count; i++) {
+			for (j = 0; j < count; j++) {
+				if (order->device_ptrs[j] !=
+					tmp->device_ptrs[i])
+					continue;
+				else
+					num++;
+			}
+		}
+
+		if (num == count && tmp->count == count)
+			goto err;
+		else if (num) {
+			tmp = ERR_PTR(-EINVAL);
+			goto err;
+		}
+	}
+
+	order->count = count;
+	mutex_init(&order->track.lock);
+	spin_lock_init(&order->track.s_lock);
+
+	INIT_LIST_HEAD(&order->list);
+	list_add_tail(&order->list, &ssr_order_list);
+	mutex_unlock(&ssr_order_mutex);
+
+	return order;
+err:
+	mutex_unlock(&ssr_order_mutex);
+	return tmp;
+}
+
+static int __get_gpio(struct subsys_desc *desc, const char *prop,
+		int *gpio)
+{
+	struct device_node *dnode = desc->dev->of_node;
+	int ret = -ENOENT;
+
+	if (of_find_property(dnode, prop, NULL)) {
+		*gpio = of_get_named_gpio(dnode, prop, 0);
+		ret = *gpio < 0 ? *gpio : 0;
+	}
+
+	return ret;
+}
+
+static int __get_irq(struct subsys_desc *desc, const char *prop,
+		unsigned int *irq, int *gpio)
+{
+	int ret, gpiol, irql;
+
+	ret = __get_gpio(desc, prop, &gpiol);
+	if (ret)
+		return ret;
+
+	irql = gpio_to_irq(gpiol);
+
+	if (irql == -ENOENT)
+		irql = -ENXIO;
+
+	if (irql < 0) {
+		pr_err("[%s]: Error getting IRQ \"%s\"\n", desc->name,
+				prop);
+		return irql;
+	}
+
+	if (gpio)
+		*gpio = gpiol;
+	*irq = irql;
+
+	return 0;
+}
+
+static int subsys_parse_devicetree(struct subsys_desc *desc)
+{
+	struct subsys_soc_restart_order *order;
+	int ret;
+
+	struct platform_device *pdev = container_of(desc->dev,
+					struct platform_device, dev);
+
+	ret = __get_irq(desc, "qcom,gpio-err-fatal", &desc->err_fatal_irq,
+							&desc->err_fatal_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_irq(desc, "qcom,gpio-err-ready", &desc->err_ready_irq,
+							NULL);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_irq(desc, "qcom,gpio-stop-ack", &desc->stop_ack_irq, NULL);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_gpio(desc, "qcom,gpio-force-stop", &desc->force_stop_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_gpio(desc, "qcom,gpio-ramdump-disable",
+			&desc->ramdump_disable_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = __get_gpio(desc, "qcom,gpio-shutdown-ack",
+			&desc->shutdown_ack_gpio);
+	if (ret && ret != -ENOENT)
+		return ret;
+
+	ret = platform_get_irq(pdev, 0);
+	if (ret > 0)
+		desc->wdog_bite_irq = ret;
+
+	if (of_property_read_bool(pdev->dev.of_node,
+					"qcom,pil-generic-irq-handler")) {
+		ret = platform_get_irq(pdev, 0);
+		if (ret > 0)
+			desc->generic_irq = ret;
+	}
+
+	order = ssr_parse_restart_orders(desc);
+	if (IS_ERR(order)) {
+		pr_err("Could not initialize SSR restart order, err = %ld\n",
+							PTR_ERR(order));
+		return PTR_ERR(order);
+	}
+
+	return 0;
+}
+
+static int subsys_setup_irqs(struct subsys_device *subsys)
+{
+	struct subsys_desc *desc = subsys->desc;
+	int ret;
+
+	if (desc->err_fatal_irq && desc->err_fatal_handler) {
+		ret = devm_request_irq(desc->dev, desc->err_fatal_irq,
+				desc->err_fatal_handler,
+				IRQF_TRIGGER_RISING, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register error fatal IRQ handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->err_fatal_irq);
+	}
+
+	if (desc->stop_ack_irq && desc->stop_ack_handler) {
+		ret = devm_request_irq(desc->dev, desc->stop_ack_irq,
+			desc->stop_ack_handler,
+			IRQF_TRIGGER_RISING, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register stop ack handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->stop_ack_irq);
+	}
+
+	if (desc->wdog_bite_irq && desc->wdog_bite_handler) {
+		ret = devm_request_irq(desc->dev, desc->wdog_bite_irq,
+			desc->wdog_bite_handler,
+			IRQF_TRIGGER_RISING, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register wdog bite handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->wdog_bite_irq);
+	}
+
+	if (desc->generic_irq && desc->generic_handler) {
+		ret = devm_request_irq(desc->dev, desc->generic_irq,
+			desc->generic_handler,
+			IRQF_TRIGGER_HIGH, desc->name, desc);
+		if (ret < 0) {
+			dev_err(desc->dev, "[%s]: Unable to register generic irq handler!: %d\n",
+				desc->name, ret);
+			return ret;
+		}
+		disable_irq(desc->generic_irq);
+	}
+
+	if (desc->err_ready_irq) {
+		ret = devm_request_irq(desc->dev,
+					desc->err_ready_irq,
+					subsys_err_ready_intr_handler,
+					IRQF_TRIGGER_RISING,
+					"error_ready_interrupt", subsys);
+		if (ret < 0) {
+			dev_err(desc->dev,
+				"[%s]: Unable to register err ready handler\n",
+				desc->name);
+			return ret;
+		}
+		disable_irq(desc->err_ready_irq);
+	}
+
+	return 0;
+}
+
+static void subsys_free_irqs(struct subsys_device *subsys)
+{
+	struct subsys_desc *desc = subsys->desc;
+
+	if (desc->err_fatal_irq && desc->err_fatal_handler)
+		devm_free_irq(desc->dev, desc->err_fatal_irq, desc);
+	if (desc->stop_ack_irq && desc->stop_ack_handler)
+		devm_free_irq(desc->dev, desc->stop_ack_irq, desc);
+	if (desc->wdog_bite_irq && desc->wdog_bite_handler)
+		devm_free_irq(desc->dev, desc->wdog_bite_irq, desc);
+	if (desc->err_ready_irq)
+		devm_free_irq(desc->dev, desc->err_ready_irq, subsys);
+}
+
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+	struct subsys_device *subsys;
+	struct device_node *ofnode = desc->dev->of_node;
+	int ret;
+
+	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
+	if (!subsys)
+		return ERR_PTR(-ENOMEM);
+
+	subsys->desc = desc;
+	subsys->owner = desc->owner;
+	subsys->dev.parent = desc->dev;
+	subsys->dev.bus = &subsys_bus_type;
+	subsys->dev.release = subsys_device_release;
+	subsys->notif_state = -1;
+	subsys->desc->sysmon_pid = -1;
+	strlcpy(subsys->desc->fw_name, desc->name,
+			sizeof(subsys->desc->fw_name));
+
+	subsys->notify = subsys_notif_add_subsys(desc->name);
+
+	snprintf(subsys->wlname, sizeof(subsys->wlname), "ssr(%s)", desc->name);
+	wakeup_source_init(&subsys->ssr_wlock, subsys->wlname);
+	INIT_WORK(&subsys->work, subsystem_restart_wq_func);
+	INIT_WORK(&subsys->device_restart_work, device_restart_work_hdlr);
+	spin_lock_init(&subsys->track.s_lock);
+
+	subsys->id = ida_simple_get(&subsys_ida, 0, 0, GFP_KERNEL);
+	if (subsys->id < 0) {
+		wakeup_source_trash(&subsys->ssr_wlock);
+		ret = subsys->id;
+		kfree(subsys);
+		return ERR_PTR(ret);
+	}
+
+	dev_set_name(&subsys->dev, "subsys%d", subsys->id);
+
+	mutex_init(&subsys->track.lock);
+
+	ret = subsys_debugfs_add(subsys);
+	if (ret) {
+		ida_simple_remove(&subsys_ida, subsys->id);
+		wakeup_source_trash(&subsys->ssr_wlock);
+		kfree(subsys);
+		return ERR_PTR(ret);
+	}
+
+	ret = device_register(&subsys->dev);
+	if (ret) {
+		subsys_debugfs_remove(subsys);
+		put_device(&subsys->dev);
+		kfree(subsys);
+		return ERR_PTR(ret);
+	}
+
+	ret = subsys_char_device_add(subsys);
+	if (ret)
+		goto err_register;
+
+	if (ofnode) {
+		ret = subsys_parse_devicetree(desc);
+		if (ret)
+			goto err_register;
+
+		subsys->restart_order = update_restart_order(subsys);
+
+		ret = subsys_setup_irqs(subsys);
+		if (ret < 0)
+			goto err_setup_irqs;
+
+		if (of_property_read_u32(ofnode, "qcom,ssctl-instance-id",
+					&desc->ssctl_instance_id))
+			pr_debug("Reading instance-id for %s failed\n",
+								desc->name);
+
+		if (of_property_read_u32(ofnode, "qcom,sysmon-id",
+					&subsys->desc->sysmon_pid))
+			pr_debug("Reading sysmon-id for %s failed\n",
+								desc->name);
+
+		subsys->desc->edge = of_get_property(ofnode, "qcom,edge",
+									NULL);
+		if (!subsys->desc->edge)
+			pr_debug("Reading qcom,edge for %s failed\n",
+								desc->name);
+	}
+
+	ret = sysmon_notifier_register(desc);
+	if (ret < 0)
+		goto err_sysmon_notifier;
+
+	if (subsys->desc->edge) {
+		ret = sysmon_glink_register(desc);
+		if (ret < 0)
+			goto err_sysmon_glink_register;
+	}
+	mutex_lock(&subsys_list_lock);
+	INIT_LIST_HEAD(&subsys->list);
+	list_add_tail(&subsys->list, &subsys_list);
+	mutex_unlock(&subsys_list_lock);
+
+	return subsys;
+err_sysmon_glink_register:
+	sysmon_notifier_unregister(subsys->desc);
+err_sysmon_notifier:
+	if (ofnode)
+		subsys_free_irqs(subsys);
+err_setup_irqs:
+	if (ofnode)
+		subsys_remove_restart_order(ofnode);
+err_register:
+	subsys_debugfs_remove(subsys);
+	device_unregister(&subsys->dev);
+	kfree(subsys);
+	return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(subsys_register);
+
+void subsys_unregister(struct subsys_device *subsys)
+{
+	struct subsys_device *subsys_dev, *tmp;
+	struct device_node *device = subsys->desc->dev->of_node;
+
+	if (IS_ERR_OR_NULL(subsys))
+		return;
+
+	if (get_device(&subsys->dev)) {
+		mutex_lock(&subsys_list_lock);
+		list_for_each_entry_safe(subsys_dev, tmp, &subsys_list, list)
+			if (subsys_dev == subsys)
+				list_del(&subsys->list);
+		mutex_unlock(&subsys_list_lock);
+
+		if (device) {
+			subsys_free_irqs(subsys);
+			subsys_remove_restart_order(device);
+		}
+		mutex_lock(&subsys->track.lock);
+		WARN_ON(subsys->count);
+		device_unregister(&subsys->dev);
+		mutex_unlock(&subsys->track.lock);
+		subsys_debugfs_remove(subsys);
+		subsys_char_device_remove(subsys);
+		sysmon_notifier_unregister(subsys->desc);
+		if (subsys->desc->edge)
+			sysmon_glink_unregister(subsys->desc);
+		put_device(&subsys->dev);
+	}
+}
+EXPORT_SYMBOL(subsys_unregister);
+
+static int subsys_panic(struct device *dev, void *data)
+{
+	struct subsys_device *subsys = to_subsys(dev);
+
+	if (subsys->desc->crash_shutdown)
+		subsys->desc->crash_shutdown(subsys->desc);
+	return 0;
+}
+
+static int ssr_panic_handler(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	bus_for_each_dev(&subsys_bus_type, NULL, NULL, subsys_panic);
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block panic_nb = {
+	.notifier_call  = ssr_panic_handler,
+};
+
+static int __init subsys_restart_init(void)
+{
+	int ret;
+
+	ssr_wq = alloc_workqueue("ssr_wq", WQ_CPU_INTENSIVE, 0);
+	BUG_ON(!ssr_wq);
+
+	ret = bus_register(&subsys_bus_type);
+	if (ret)
+		goto err_bus;
+	ret = subsys_debugfs_init();
+	if (ret)
+		goto err_debugfs;
+
+	char_class = class_create(THIS_MODULE, "subsys");
+	if (IS_ERR(char_class)) {
+		ret = -ENOMEM;
+		pr_err("Failed to create subsys_dev class\n");
+		goto err_class;
+	}
+
+	ret = atomic_notifier_chain_register(&panic_notifier_list,
+			&panic_nb);
+	if (ret)
+		goto err_soc;
+
+	return 0;
+
+err_soc:
+	class_destroy(char_class);
+err_class:
+	subsys_debugfs_exit();
+err_debugfs:
+	bus_unregister(&subsys_bus_type);
+err_bus:
+	destroy_workqueue(ssr_wq);
+	return ret;
+}
+arch_initcall(subsys_restart_init);
+
+MODULE_DESCRIPTION("Subsystem Restart Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/sysmon-glink.c b/drivers/soc/qcom/sysmon-glink.c
new file mode 100644
index 0000000..27d9b7f
--- /dev/null
+++ b/drivers/soc/qcom/sysmon-glink.c
@@ -0,0 +1,481 @@
+/*
+ * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/err.h>
+#include <linux/workqueue.h>
+
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/glink.h>
+
+#define TX_BUF_SIZE	50
+#define RX_BUF_SIZE	500
+#define TIMEOUT_MS	500
+
+/**
+ * struct sysmon_subsys - Sysmon info structure for subsystem
+ * name:	subsys_desc name
+ * edge:	name of the G-Link edge.
+ * handle:	glink_ssr channel used for this subsystem.
+ * rx_buf:	Buffer used to store received message.
+ * chan_open:	Set when GLINK_CONNECTED. Reset otherwise.
+ * event:	Last stored glink state event.
+ * glink_handle:	Notifier handle reference.
+ * resp_ready:	Completion struct for event response.
+ */
+struct sysmon_subsys {
+	const char		*name;
+	const char		*edge;
+	void			*handle;
+	struct glink_link_info	*link_info;
+	char			rx_buf[RX_BUF_SIZE];
+	bool			chan_open;
+	unsigned int	event;
+	void			*glink_handle;
+	int			intent_count;
+	struct completion	resp_ready;
+	struct mutex		lock;
+	struct workqueue_struct *glink_event_wq;
+	struct work_struct	work;
+	struct list_head	list;
+};
+
+static const char *notif_name[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[SUBSYS_BEFORE_SHUTDOWN] = "before_shutdown",
+	[SUBSYS_AFTER_SHUTDOWN]  = "after_shutdown",
+	[SUBSYS_BEFORE_POWERUP]  = "before_powerup",
+	[SUBSYS_AFTER_POWERUP]   = "after_powerup",
+};
+
+static LIST_HEAD(sysmon_glink_list);
+static DEFINE_MUTEX(sysmon_glink_list_lock);
+
+static struct sysmon_subsys *_find_subsys(struct subsys_desc *desc)
+{
+	struct sysmon_subsys *ss;
+
+	if (desc == NULL)
+		return NULL;
+
+	mutex_lock(&sysmon_glink_list_lock);
+	list_for_each_entry(ss, &sysmon_glink_list, list) {
+		if (!strcmp(ss->name, desc->name)) {
+			mutex_unlock(&sysmon_glink_list_lock);
+			return ss;
+		}
+	}
+	mutex_unlock(&sysmon_glink_list_lock);
+
+	return NULL;
+}
+
+static int sysmon_send_msg(struct sysmon_subsys *ss, const char *tx_buf,
+			   size_t len)
+{
+	int ret;
+	void *handle;
+
+	if (!ss->chan_open)
+		return -ENODEV;
+
+	if (!ss->handle)
+		return -EINVAL;
+
+	init_completion(&ss->resp_ready);
+	handle = ss->handle;
+
+	/* Register an intent to receive data */
+	if (!ss->intent_count) {
+		ret = glink_queue_rx_intent(handle, (void *)ss,
+						sizeof(ss->rx_buf));
+		if (ret) {
+			pr_err("Failed to register receive intent\n");
+			return ret;
+		}
+		ss->intent_count++;
+	}
+
+	pr_debug("Sending sysmon message: %s\n", tx_buf);
+	ret = glink_tx(handle, (void *)ss, (void *)tx_buf, len,
+						GLINK_TX_REQ_INTENT);
+	if (ret) {
+		pr_err("Failed to send sysmon message!\n");
+		return ret;
+	}
+
+	ret = wait_for_completion_timeout(&ss->resp_ready,
+				  msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret) {
+		pr_err("Timed out waiting for response\n");
+		return -ETIMEDOUT;
+	}
+	pr_debug("Received response: %s\n", ss->rx_buf);
+	return ret;
+}
+
+/**
+ * sysmon_send_event_no_qmi() - Notify a subsystem of another's state change
+ * @dest_desc:	Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc:	Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif:	ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds, but with something other than an acknowledgment.
+ *
+ * If CONFIG_MSM_SYSMON_GLINK_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif)
+{
+
+	char tx_buf[TX_BUF_SIZE];
+	int ret;
+	struct sysmon_subsys *ss = NULL;
+
+	ss = _find_subsys(dest_desc);
+	if (ss == NULL)
+		return -EINVAL;
+
+	if (event_desc == NULL || notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT
+			|| notif_name[notif] == NULL)
+		return -EINVAL;
+
+	snprintf(tx_buf, sizeof(tx_buf), "ssr:%s:%s", event_desc->name,
+		 notif_name[notif]);
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, strlen(tx_buf));
+	if (ret < 0) {
+		mutex_unlock(&ss->lock);
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, "ssr:ack")) {
+		mutex_unlock(&ss->lock);
+		pr_debug("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+		goto out;
+	}
+	mutex_unlock(&ss->lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event_no_qmi);
+
+/**
+ * sysmon_send_shutdown_no_qmi() - send shutdown command to a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to send to
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_GLINK_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+	struct sysmon_subsys *ss = NULL;
+	const char tx_buf[] = "system:shutdown";
+	const char expect[] = "system:ack";
+	int ret;
+
+	ss = _find_subsys(dest_desc);
+	if (ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, sizeof(tx_buf));
+	if (ret < 0) {
+		mutex_unlock(&ss->lock);
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, expect)) {
+		mutex_unlock(&ss->lock);
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+		goto out;
+	}
+	mutex_unlock(&ss->lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown_no_qmi);
+
+/**
+ * sysmon_get_reason_no_qmi() - Retrieve failure reason from a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to query
+ * @buf:	Caller-allocated buffer for the returned NULL-terminated reason
+ * @len:	Length of @buf
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_GLINK_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+				char *buf, size_t len)
+{
+	struct sysmon_subsys *ss = NULL;
+	const char tx_buf[] = "ssr:retrieve:sfr";
+	const char expect[] = "ssr:return:";
+	size_t prefix_len = ARRAY_SIZE(expect) - 1;
+	int ret;
+
+	ss = _find_subsys(dest_desc);
+	if (ss == NULL || buf == NULL || len == 0)
+		return -EINVAL;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, sizeof(tx_buf));
+	if (ret < 0) {
+		mutex_unlock(&ss->lock);
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strncmp(ss->rx_buf, expect, prefix_len)) {
+		mutex_unlock(&ss->lock);
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+		goto out;
+	}
+	strlcpy(buf, ss->rx_buf + prefix_len, len);
+	mutex_unlock(&ss->lock);
+out:
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason_no_qmi);
+
+static void glink_notify_rx(void *handle, const void *priv,
+		const void *pkt_priv, const void *ptr, size_t size)
+{
+	struct sysmon_subsys *ss = (struct sysmon_subsys *)priv;
+
+	if (!ss) {
+		pr_err("sysmon_subsys mapping failed\n");
+		return;
+	}
+
+	memset(ss->rx_buf, 0, sizeof(ss->rx_buf));
+	ss->intent_count--;
+	if (sizeof(ss->rx_buf) > size)
+		strlcpy(ss->rx_buf, ptr, size);
+	else
+		pr_warn("Invalid recv message size\n");
+	glink_rx_done(ss->handle, ptr, false);
+	complete(&ss->resp_ready);
+}
+
+static void glink_notify_tx_done(void *handle, const void *priv,
+		const void *pkt_priv, const void *ptr)
+{
+	struct sysmon_subsys *cb_data = (struct sysmon_subsys *)priv;
+
+	if (!cb_data)
+		pr_err("sysmon_subsys mapping failed\n");
+	else
+		pr_debug("tx_done notification!\n");
+}
+
+static void glink_notify_state(void *handle, const void *priv,
+		unsigned int event)
+{
+	struct sysmon_subsys *ss = (struct sysmon_subsys *)priv;
+
+	if (!ss) {
+		pr_err("sysmon_subsys mapping failed\n");
+		return;
+	}
+
+	mutex_lock(&ss->lock);
+	ss->event = event;
+	switch (event) {
+	case GLINK_CONNECTED:
+		ss->chan_open = true;
+		break;
+	case GLINK_REMOTE_DISCONNECTED:
+		ss->chan_open = false;
+		break;
+	default:
+		break;
+	}
+	mutex_unlock(&ss->lock);
+}
+
+static void glink_state_up_work_hdlr(struct work_struct *work)
+{
+	struct glink_open_config open_cfg;
+	struct sysmon_subsys *ss = container_of(work, struct sysmon_subsys,
+							work);
+	void *handle = NULL;
+
+	if (!ss) {
+		pr_err("Invalid sysmon_subsys struct parameter\n");
+		return;
+	}
+
+	memset(&open_cfg, 0, sizeof(struct glink_open_config));
+	open_cfg.priv = (void *)ss;
+	open_cfg.notify_rx = glink_notify_rx;
+	open_cfg.notify_tx_done = glink_notify_tx_done;
+	open_cfg.notify_state = glink_notify_state;
+	open_cfg.edge = ss->edge;
+	open_cfg.transport = "smd_trans";
+	open_cfg.name = "sys_mon";
+
+	handle = glink_open(&open_cfg);
+	if (IS_ERR_OR_NULL(handle)) {
+		pr_err("%s: %s: unable to open channel\n",
+					open_cfg.edge, open_cfg.name);
+		return;
+	}
+	ss->handle = handle;
+}
+
+static void glink_state_down_work_hdlr(struct work_struct *work)
+{
+	struct sysmon_subsys *ss = container_of(work, struct sysmon_subsys,
+							work);
+
+	if (ss->handle)
+		glink_close(ss->handle);
+	ss->handle = NULL;
+}
+
+static void sysmon_glink_cb(struct glink_link_state_cb_info *cb_info,
+					void *priv)
+{
+	struct sysmon_subsys *ss = (struct sysmon_subsys *)priv;
+
+	if (!cb_info || !ss) {
+		pr_err("Invalid parameters\n");
+		return;
+	}
+
+	mutex_lock(&ss->lock);
+	switch (cb_info->link_state) {
+	case GLINK_LINK_STATE_UP:
+		pr_debug("LINK UP %s\n", ss->edge);
+		INIT_WORK(&ss->work, glink_state_up_work_hdlr);
+		queue_work(ss->glink_event_wq, &ss->work);
+		break;
+	case GLINK_LINK_STATE_DOWN:
+		pr_debug("LINK DOWN %s\n", ss->edge);
+		INIT_WORK(&ss->work, glink_state_down_work_hdlr);
+		queue_work(ss->glink_event_wq, &ss->work);
+		break;
+	default:
+		pr_warn("Invalid event notification\n");
+		break;
+	}
+	mutex_unlock(&ss->lock);
+}
+
+int sysmon_glink_register(struct subsys_desc *desc)
+{
+	struct sysmon_subsys *ss;
+	struct glink_link_info *link_info;
+	int ret;
+
+	if (!desc)
+		return -EINVAL;
+
+	ss = kzalloc(sizeof(*ss), GFP_KERNEL);
+	if (!ss)
+		return -ENOMEM;
+
+	link_info = kzalloc(sizeof(struct glink_link_info), GFP_KERNEL);
+	if (!link_info) {
+		pr_err("Could not allocate link info structure\n");
+		kfree(ss);
+		return -ENOMEM;
+	}
+
+	ss->glink_event_wq = create_singlethread_workqueue(desc->name);
+	if (ss->glink_event_wq == NULL) {
+		ret = -ENOMEM;
+		goto err_wq;
+	}
+	mutex_init(&ss->lock);
+
+	ss->name = desc->name;
+	ss->handle = NULL;
+	ss->intent_count = 0;
+	ss->link_info = link_info;
+	ss->link_info->edge = ss->edge = desc->edge;
+	ss->link_info->transport = "smd_trans";
+	ss->link_info->glink_link_state_notif_cb = sysmon_glink_cb;
+
+	ss->glink_handle = glink_register_link_state_cb(ss->link_info,
+								(void *)ss);
+	if (IS_ERR_OR_NULL(ss->glink_handle)) {
+		pr_err("Could not register link state cb\n");
+		ret = PTR_ERR(ss->glink_handle);
+		goto err;
+	}
+
+	mutex_lock(&sysmon_glink_list_lock);
+	INIT_LIST_HEAD(&ss->list);
+	list_add_tail(&ss->list, &sysmon_glink_list);
+	mutex_unlock(&sysmon_glink_list_lock);
+	return 0;
+err:
+	destroy_workqueue(ss->glink_event_wq);
+err_wq:
+	kfree(link_info);
+	kfree(ss);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_glink_register);
+
+void sysmon_glink_unregister(struct subsys_desc *desc)
+{
+	struct sysmon_subsys *ss = NULL;
+
+	if (!desc)
+		return;
+
+	ss = _find_subsys(desc);
+	if (ss == NULL)
+		return;
+
+	list_del(&ss->list);
+	if (ss->handle)
+		glink_close(ss->handle);
+	destroy_workqueue(ss->glink_event_wq);
+	glink_unregister_link_state_cb(ss->glink_handle);
+	kfree(ss->link_info);
+	kfree(ss);
+}
+EXPORT_SYMBOL(sysmon_glink_unregister);
diff --git a/drivers/soc/qcom/sysmon-qmi.c b/drivers/soc/qcom/sysmon-qmi.c
new file mode 100644
index 0000000..a087ad6
--- /dev/null
+++ b/drivers/soc/qcom/sysmon-qmi.c
@@ -0,0 +1,732 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "sysmon-qmi: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/sysmon.h>
+
+#define QMI_RESP_BIT_SHIFT(x)			(x << 16)
+
+#define QMI_SSCTL_RESTART_REQ_V02		0x0020
+#define QMI_SSCTL_RESTART_RESP_V02		0x0020
+#define QMI_SSCTL_RESTART_READY_IND_V02		0x0020
+#define QMI_SSCTL_SHUTDOWN_REQ_V02		0x0021
+#define QMI_SSCTL_SHUTDOWN_RESP_V02		0x0021
+#define QMI_SSCTL_SHUTDOWN_READY_IND_V02	0x0021
+#define QMI_SSCTL_GET_FAILURE_REASON_REQ_V02	0x0022
+#define QMI_SSCTL_GET_FAILURE_REASON_RESP_V02	0x0022
+#define QMI_SSCTL_SUBSYS_EVENT_REQ_V02		0x0023
+#define QMI_SSCTL_SUBSYS_EVENT_RESP_V02		0x0023
+#define QMI_SSCTL_SUBSYS_EVENT_READY_IND_V02	0x0023
+
+#define QMI_SSCTL_ERROR_MSG_LENGTH		90
+#define QMI_SSCTL_SUBSYS_NAME_LENGTH		15
+#define QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH	40
+#define QMI_SSCTL_RESP_MSG_LENGTH		7
+#define QMI_SSCTL_EMPTY_MSG_LENGTH		0
+
+#define SSCTL_SERVICE_ID			0x2B
+#define SSCTL_VER_2				2
+#define SERVER_TIMEOUT				500
+#define SHUTDOWN_TIMEOUT			10000
+
+#define QMI_EOTI_DATA_TYPE	\
+{				\
+	.data_type = QMI_EOTI,	\
+	.elem_len  = 0,		\
+	.elem_size = 0,		\
+	.is_array  = NO_ARRAY,	\
+	.tlv_type  = 0x00,	\
+	.offset    = 0,		\
+	.ei_array  = NULL,	\
+},
+
+struct sysmon_qmi_data {
+	const char *name;
+	int instance_id;
+	struct work_struct svc_arrive;
+	struct work_struct svc_exit;
+	struct work_struct svc_rcv_msg;
+	struct qmi_handle *clnt_handle;
+	struct notifier_block notifier;
+	void *notif_handle;
+	bool legacy_version;
+	struct completion server_connect;
+	struct completion ind_recv;
+	struct list_head list;
+};
+
+static struct workqueue_struct *sysmon_wq;
+
+static LIST_HEAD(sysmon_list);
+static DEFINE_MUTEX(sysmon_list_lock);
+static DEFINE_MUTEX(sysmon_lock);
+
+static void sysmon_clnt_recv_msg(struct work_struct *work);
+static void sysmon_clnt_svc_arrive(struct work_struct *work);
+static void sysmon_clnt_svc_exit(struct work_struct *work);
+
+static const int notif_map[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[SUBSYS_BEFORE_POWERUP] = SSCTL_SSR_EVENT_BEFORE_POWERUP,
+	[SUBSYS_AFTER_POWERUP] = SSCTL_SSR_EVENT_AFTER_POWERUP,
+	[SUBSYS_BEFORE_SHUTDOWN] = SSCTL_SSR_EVENT_BEFORE_SHUTDOWN,
+	[SUBSYS_AFTER_SHUTDOWN] = SSCTL_SSR_EVENT_AFTER_SHUTDOWN,
+};
+
+static void sysmon_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
+			void *msg, unsigned int msg_len, void *ind_cb_priv)
+{
+	struct sysmon_qmi_data *data = NULL, *temp;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, (char *)ind_cb_priv))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return;
+
+	pr_debug("%s: Indication received from subsystem\n", data->name);
+	complete(&data->ind_recv);
+}
+
+static int sysmon_svc_event_notify(struct notifier_block *this,
+				      unsigned long code,
+				      void *_cmd)
+{
+	struct sysmon_qmi_data *data = container_of(this,
+					struct sysmon_qmi_data, notifier);
+
+	switch (code) {
+	case QMI_SERVER_ARRIVE:
+		queue_work(sysmon_wq, &data->svc_arrive);
+		break;
+	case QMI_SERVER_EXIT:
+		queue_work(sysmon_wq, &data->svc_exit);
+		break;
+	default:
+		break;
+	}
+	return 0;
+}
+
+static void sysmon_clnt_notify(struct qmi_handle *handle,
+			     enum qmi_event_type event, void *notify_priv)
+{
+	struct sysmon_qmi_data *data = container_of(notify_priv,
+					struct sysmon_qmi_data, svc_arrive);
+
+	switch (event) {
+	case QMI_RECV_MSG:
+		schedule_work(&data->svc_rcv_msg);
+		break;
+	default:
+		break;
+	}
+}
+
+static void sysmon_clnt_svc_arrive(struct work_struct *work)
+{
+	int rc;
+	struct sysmon_qmi_data *data = container_of(work,
+					struct sysmon_qmi_data, svc_arrive);
+
+	/* Create a Local client port for QMI communication */
+	data->clnt_handle = qmi_handle_create(sysmon_clnt_notify, work);
+	if (!data->clnt_handle) {
+		pr_err("QMI client handle alloc failed for %s\n", data->name);
+		return;
+	}
+
+	rc = qmi_connect_to_service(data->clnt_handle, SSCTL_SERVICE_ID,
+					SSCTL_VER_2, data->instance_id);
+	if (rc < 0) {
+		pr_err("%s: Could not connect handle to service\n",
+								data->name);
+		qmi_handle_destroy(data->clnt_handle);
+		data->clnt_handle = NULL;
+		return;
+	}
+	pr_info("Connection established between QMI handle and %s's SSCTL service\n"
+								, data->name);
+
+	rc = qmi_register_ind_cb(data->clnt_handle, sysmon_ind_cb,
+							(void *)data->name);
+	if (rc < 0)
+		pr_warn("%s: Could not register the indication callback\n",
+								data->name);
+}
+
+static void sysmon_clnt_svc_exit(struct work_struct *work)
+{
+	struct sysmon_qmi_data *data = container_of(work,
+					struct sysmon_qmi_data, svc_exit);
+
+	qmi_handle_destroy(data->clnt_handle);
+	data->clnt_handle = NULL;
+}
+
+static void sysmon_clnt_recv_msg(struct work_struct *work)
+{
+	int ret;
+	struct sysmon_qmi_data *data = container_of(work,
+					struct sysmon_qmi_data, svc_rcv_msg);
+
+	do {
+		pr_debug("%s: Notified about a Receive event\n", data->name);
+	} while ((ret = qmi_recv_msg(data->clnt_handle)) == 0);
+
+	if (ret != -ENOMSG)
+		pr_err("%s: Error receiving message\n", data->name);
+}
+
+struct qmi_ssctl_subsys_event_req_msg {
+	uint8_t subsys_name_len;
+	char subsys_name[QMI_SSCTL_SUBSYS_NAME_LENGTH];
+	enum ssctl_ssr_event_enum_type event;
+	uint8_t evt_driven_valid;
+	enum ssctl_ssr_event_driven_enum_type evt_driven;
+};
+
+struct qmi_ssctl_subsys_event_resp_msg {
+	struct qmi_response_type_v01 resp;
+};
+
+static struct elem_info qmi_ssctl_subsys_event_req_msg_ei[] = {
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      subsys_name_len),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len  = QMI_SSCTL_SUBSYS_NAME_LENGTH,
+		.elem_size = sizeof(char),
+		.is_array  = VAR_LEN_ARRAY,
+		.tlv_type  = 0x01,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      subsys_name),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint32_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      event),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      evt_driven_valid),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint32_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_req_msg,
+				      evt_driven),
+		.ei_array  = NULL,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_subsys_event_resp_msg_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct qmi_ssctl_subsys_event_resp_msg,
+				      resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_send_event() - Notify a subsystem of another's state change
+ * @dest_desc:	Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc:	Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif:	ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Reverts to using legacy sysmon API (sysmon_send_event_no_qmi()) if
+ * client handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds, but with something other than an acknowledgment.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif)
+{
+	struct qmi_ssctl_subsys_event_req_msg req;
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_ssctl_subsys_event_resp_msg resp = { { 0, 0 } };
+	struct sysmon_qmi_data *data = NULL, *temp;
+	const char *event_ss = event_desc->name;
+	const char *dest_ss = dest_desc->name;
+	int ret;
+
+	if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL
+		|| dest_ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, dest_desc->name))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return -EINVAL;
+
+	if (!data->clnt_handle) {
+		pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+								dest_ss);
+		ret = sysmon_send_event_no_qmi(dest_desc, event_desc, notif);
+		if (ret)
+			pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+		return ret;
+	}
+
+	snprintf(req.subsys_name, ARRAY_SIZE(req.subsys_name), "%s", event_ss);
+	req.subsys_name_len = strlen(req.subsys_name);
+	req.event = notif_map[notif];
+	req.evt_driven_valid = 1;
+	req.evt_driven = SSCTL_SSR_EVENT_FORCED;
+
+	req_desc.msg_id = QMI_SSCTL_SUBSYS_EVENT_REQ_V02;
+	req_desc.max_msg_len = QMI_SSCTL_SUBSYS_EVENT_REQ_LENGTH;
+	req_desc.ei_array = qmi_ssctl_subsys_event_req_msg_ei;
+
+	resp_desc.msg_id = QMI_SSCTL_SUBSYS_EVENT_RESP_V02;
+	resp_desc.max_msg_len = QMI_SSCTL_RESP_MSG_LENGTH;
+	resp_desc.ei_array = qmi_ssctl_subsys_event_resp_msg_ei;
+
+	mutex_lock(&sysmon_lock);
+	ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+		sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+	if (ret < 0) {
+		pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+		goto out;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_debug("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		ret = -EREMOTEIO;
+	}
+out:
+	mutex_unlock(&sysmon_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event);
+
+struct qmi_ssctl_shutdown_req_msg {
+};
+
+struct qmi_ssctl_shutdown_resp_msg {
+	struct qmi_response_type_v01 resp;
+};
+
+static struct elem_info qmi_ssctl_shutdown_req_msg_ei[] = {
+	QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_shutdown_resp_msg_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(struct qmi_ssctl_shutdown_resp_msg,
+				      resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_send_shutdown() - send shutdown command to a
+ * subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to send to
+ *
+ * Reverts to using legacy sysmon API (sysmon_send_shutdown_no_qmi()) if
+ * client handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown(struct subsys_desc *dest_desc)
+{
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_ssctl_shutdown_resp_msg resp = { { 0, 0 } };
+	struct sysmon_qmi_data *data = NULL, *temp;
+	const char *dest_ss = dest_desc->name;
+	char req = 0;
+	int ret, shutdown_ack_ret;
+
+	if (dest_ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, dest_desc->name))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return -EINVAL;
+
+	if (!data->clnt_handle) {
+		pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+								dest_ss);
+		ret = sysmon_send_shutdown_no_qmi(dest_desc);
+		if (ret)
+			pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+		return ret;
+	}
+
+	req_desc.msg_id = QMI_SSCTL_SHUTDOWN_REQ_V02;
+	req_desc.max_msg_len = QMI_SSCTL_EMPTY_MSG_LENGTH;
+	req_desc.ei_array = qmi_ssctl_shutdown_req_msg_ei;
+
+	resp_desc.msg_id = QMI_SSCTL_SHUTDOWN_RESP_V02;
+	resp_desc.max_msg_len = QMI_SSCTL_RESP_MSG_LENGTH;
+	resp_desc.ei_array = qmi_ssctl_shutdown_resp_msg_ei;
+
+	reinit_completion(&data->ind_recv);
+	mutex_lock(&sysmon_lock);
+	ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+		sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+	if (ret < 0) {
+		pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+		goto out;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		ret = -EREMOTEIO;
+		goto out;
+	}
+
+	shutdown_ack_ret = wait_for_shutdown_ack(dest_desc);
+	if (shutdown_ack_ret < 0) {
+		pr_err("shutdown_ack SMP2P bit for %s not set\n", data->name);
+		if (!&data->ind_recv.done) {
+			pr_err("QMI shutdown indication not received\n");
+			ret = shutdown_ack_ret;
+		}
+		goto out;
+	} else if (shutdown_ack_ret > 0)
+		goto out;
+
+	if (!wait_for_completion_timeout(&data->ind_recv,
+					msecs_to_jiffies(SHUTDOWN_TIMEOUT))) {
+		pr_err("Timed out waiting for shutdown indication from %s\n",
+							data->name);
+		ret = -ETIMEDOUT;
+	}
+out:
+	mutex_unlock(&sysmon_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown);
+
+struct qmi_ssctl_get_failure_reason_req_msg {
+};
+
+struct qmi_ssctl_get_failure_reason_resp_msg {
+	struct qmi_response_type_v01 resp;
+	uint8_t error_message_valid;
+	uint32_t error_message_len;
+	char error_message[QMI_SSCTL_ERROR_MSG_LENGTH];
+};
+
+static struct elem_info qmi_ssctl_get_failure_reason_req_msg_ei[] = {
+	QMI_EOTI_DATA_TYPE
+};
+
+static struct elem_info qmi_ssctl_get_failure_reason_resp_msg_ei[] = {
+	{
+		.data_type = QMI_STRUCT,
+		.elem_len  = 1,
+		.elem_size = sizeof(struct qmi_response_type_v01),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x02,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+							resp),
+		.ei_array  = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type = QMI_OPT_FLAG,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+						error_message_valid),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_DATA_LEN,
+		.elem_len  = 1,
+		.elem_size = sizeof(uint8_t),
+		.is_array  = NO_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+						error_message_len),
+		.ei_array  = NULL,
+	},
+	{
+		.data_type = QMI_UNSIGNED_1_BYTE,
+		.elem_len  = QMI_SSCTL_ERROR_MSG_LENGTH,
+		.elem_size = sizeof(char),
+		.is_array  = VAR_LEN_ARRAY,
+		.tlv_type  = 0x10,
+		.offset    = offsetof(
+			struct qmi_ssctl_get_failure_reason_resp_msg,
+						error_message),
+		.ei_array  = NULL,
+	},
+	QMI_EOTI_DATA_TYPE
+};
+
+/**
+ * sysmon_get_reason() - Retrieve failure reason from a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to query
+ * @buf:	Caller-allocated buffer for the returned NUL-terminated reason
+ * @len:	Length of @buf
+ *
+ * Reverts to using legacy sysmon API (sysmon_get_reason_no_qmi()) if client
+ * handle is not set.
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf, size_t len)
+{
+	struct msg_desc req_desc, resp_desc;
+	struct qmi_ssctl_get_failure_reason_resp_msg resp;
+	struct sysmon_qmi_data *data = NULL, *temp;
+	const char *dest_ss = dest_desc->name;
+	const char expect[] = "ssr:return:";
+	char req = 0;
+	int ret;
+
+	if (dest_ss == NULL || buf == NULL || len == 0)
+		return -EINVAL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(temp, &sysmon_list, list)
+		if (!strcmp(temp->name, dest_desc->name))
+			data = temp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (!data)
+		return -EINVAL;
+
+	if (!data->clnt_handle) {
+		pr_debug("No SSCTL_V2 support for %s. Revert to SSCTL_V0\n",
+								dest_ss);
+		ret = sysmon_get_reason_no_qmi(dest_desc, buf, len);
+		if (ret)
+			pr_debug("SSCTL_V0 implementation failed - %d\n", ret);
+
+		return ret;
+	}
+
+	req_desc.msg_id = QMI_SSCTL_GET_FAILURE_REASON_REQ_V02;
+	req_desc.max_msg_len = QMI_SSCTL_EMPTY_MSG_LENGTH;
+	req_desc.ei_array = qmi_ssctl_get_failure_reason_req_msg_ei;
+
+	resp_desc.msg_id = QMI_SSCTL_GET_FAILURE_REASON_RESP_V02;
+	resp_desc.max_msg_len = QMI_SSCTL_ERROR_MSG_LENGTH;
+	resp_desc.ei_array = qmi_ssctl_get_failure_reason_resp_msg_ei;
+
+	mutex_lock(&sysmon_lock);
+	ret = qmi_send_req_wait(data->clnt_handle, &req_desc, &req,
+		sizeof(req), &resp_desc, &resp, sizeof(resp), SERVER_TIMEOUT);
+	if (ret < 0) {
+		pr_err("QMI send req to %s failed, ret - %d\n", dest_ss, ret);
+		goto out;
+	}
+
+	/* Check the response */
+	if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01) {
+		pr_err("QMI request failed 0x%x\n",
+					QMI_RESP_BIT_SHIFT(resp.resp.error));
+		ret = -EREMOTEIO;
+		goto out;
+	}
+
+	if (!strcmp(resp.error_message, expect)) {
+		pr_err("Unexpected response %s\n", resp.error_message);
+		ret = -EPROTO;
+		goto out;
+	}
+	strlcpy(buf, resp.error_message, resp.error_message_len);
+out:
+	mutex_unlock(&sysmon_lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason);
+
+/**
+ * sysmon_notifier_register() - Initialize sysmon data for a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem
+ *
+ * Returns 0 for success. If the subsystem does not support SSCTL v2, a
+ * value of 0 is returned after adding the subsystem entry to the sysmon_list.
+ * In addition, if the SSCTL v2 support exists, the notifier block to receive
+ * events from the SSCTL service on the subsystem is registered.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_notifier_register(struct subsys_desc *desc)
+{
+	struct sysmon_qmi_data *data;
+	int rc = 0;
+
+	data = kmalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	data->name = desc->name;
+	data->instance_id = desc->ssctl_instance_id;
+	data->clnt_handle = NULL;
+	data->legacy_version = false;
+
+	mutex_lock(&sysmon_list_lock);
+	if (data->instance_id <= 0) {
+		pr_debug("SSCTL instance id not defined\n");
+		goto add_list;
+	}
+
+	if (sysmon_wq)
+		goto notif_register;
+
+	sysmon_wq = create_singlethread_workqueue("sysmon_wq");
+	if (!sysmon_wq) {
+		mutex_unlock(&sysmon_list_lock);
+		pr_err("Could not create workqueue\n");
+		kfree(data);
+		return -ENOMEM;
+	}
+
+notif_register:
+	data->notifier.notifier_call = sysmon_svc_event_notify;
+	init_completion(&data->ind_recv);
+
+	INIT_WORK(&data->svc_arrive, sysmon_clnt_svc_arrive);
+	INIT_WORK(&data->svc_exit, sysmon_clnt_svc_exit);
+	INIT_WORK(&data->svc_rcv_msg, sysmon_clnt_recv_msg);
+
+	rc = qmi_svc_event_notifier_register(SSCTL_SERVICE_ID, SSCTL_VER_2,
+					data->instance_id, &data->notifier);
+	if (rc < 0)
+		pr_err("Notifier register failed for %s\n", data->name);
+add_list:
+	INIT_LIST_HEAD(&data->list);
+	list_add_tail(&data->list, &sysmon_list);
+	mutex_unlock(&sysmon_list_lock);
+
+	return rc;
+}
+EXPORT_SYMBOL(sysmon_notifier_register);
+
+/**
+ * sysmon_notifier_unregister() - Cleanup the subsystem's sysmon data.
+ * @dest_desc:	Subsystem descriptor of the subsystem
+ *
+ * If the subsystem does not support SSCTL v2, its entry is simply removed from
+ * the sysmon_list. In addition, if the SSCTL v2 support exists, the notifier
+ * block to receive events from the SSCTL service is unregistered.
+ */
+void sysmon_notifier_unregister(struct subsys_desc *desc)
+{
+	struct sysmon_qmi_data *data = NULL, *sysmon_data, *tmp;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry_safe(sysmon_data, tmp, &sysmon_list, list)
+		if (!strcmp(sysmon_data->name, desc->name)) {
+			data = sysmon_data;
+			list_del(&data->list);
+		}
+
+	if (data == NULL)
+		goto exit;
+
+	if (data->instance_id > 0)
+		qmi_svc_event_notifier_unregister(SSCTL_SERVICE_ID,
+			SSCTL_VER_2, data->instance_id, &data->notifier);
+
+	if (sysmon_wq && list_empty(&sysmon_list))
+		destroy_workqueue(sysmon_wq);
+exit:
+	mutex_unlock(&sysmon_list_lock);
+	kfree(data);
+}
+EXPORT_SYMBOL(sysmon_notifier_unregister);
diff --git a/drivers/soc/qcom/sysmon.c b/drivers/soc/qcom/sysmon.c
new file mode 100644
index 0000000..9810c3f
--- /dev/null
+++ b/drivers/soc/qcom/sysmon.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (c) 2011-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/completion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <soc/qcom/hsic_sysmon.h>
+#include <soc/qcom/sysmon.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/smd.h>
+
+#define TX_BUF_SIZE	50
+#define RX_BUF_SIZE	500
+#define TIMEOUT_MS	500
+
+enum transports {
+	TRANSPORT_SMD,
+	TRANSPORT_HSIC,
+};
+
+struct sysmon_subsys {
+	struct mutex		lock;
+	struct smd_channel	*chan;
+	bool			chan_open;
+	struct completion	resp_ready;
+	char			rx_buf[RX_BUF_SIZE];
+	enum transports		transport;
+	struct device		*dev;
+	u32			pid;
+	struct list_head	list;
+};
+
+static const char *notif_name[SUBSYS_NOTIF_TYPE_COUNT] = {
+	[SUBSYS_BEFORE_SHUTDOWN] = "before_shutdown",
+	[SUBSYS_AFTER_SHUTDOWN]  = "after_shutdown",
+	[SUBSYS_BEFORE_POWERUP]  = "before_powerup",
+	[SUBSYS_AFTER_POWERUP]   = "after_powerup",
+};
+
+static LIST_HEAD(sysmon_list);
+static DEFINE_MUTEX(sysmon_list_lock);
+
+static int sysmon_send_smd(struct sysmon_subsys *ss, const char *tx_buf,
+			   size_t len)
+{
+	int ret;
+
+	if (!ss->chan_open)
+		return -ENODEV;
+
+	init_completion(&ss->resp_ready);
+	pr_debug("Sending SMD message: %s\n", tx_buf);
+	smd_write(ss->chan, tx_buf, len);
+	ret = wait_for_completion_timeout(&ss->resp_ready,
+				  msecs_to_jiffies(TIMEOUT_MS));
+	if (!ret)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int sysmon_send_hsic(struct sysmon_subsys *ss, const char *tx_buf,
+			    size_t len)
+{
+	int ret;
+	size_t actual_len;
+
+	pr_debug("Sending HSIC message: %s\n", tx_buf);
+	ret = hsic_sysmon_write(HSIC_SYSMON_DEV_EXT_MODEM,
+				tx_buf, len, TIMEOUT_MS);
+	if (ret)
+		return ret;
+	ret = hsic_sysmon_read(HSIC_SYSMON_DEV_EXT_MODEM, ss->rx_buf,
+			       ARRAY_SIZE(ss->rx_buf), &actual_len, TIMEOUT_MS);
+	return ret;
+}
+
+static int sysmon_send_msg(struct sysmon_subsys *ss, const char *tx_buf,
+			   size_t len)
+{
+	int ret;
+
+	switch (ss->transport) {
+	case TRANSPORT_SMD:
+		ret = sysmon_send_smd(ss, tx_buf, len);
+		break;
+	case TRANSPORT_HSIC:
+		ret = sysmon_send_hsic(ss, tx_buf, len);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (!ret)
+		pr_debug("Received response: %s\n", ss->rx_buf);
+
+	return ret;
+}
+
+/**
+ * sysmon_send_event_no_qmi() - Notify a subsystem of another's state change
+ * @dest_desc:	Subsystem descriptor of the subsystem the notification
+ * should be sent to
+ * @event_desc:	Subsystem descriptor of the subsystem that generated the
+ * notification
+ * @notif:	ID of the notification type (ex. SUBSYS_BEFORE_SHUTDOWN)
+ *
+ * Returns 0 for success, -EINVAL for invalid destination or notification IDs,
+ * -ENODEV if the transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds, but with something other than an acknowledgment.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif)
+{
+
+	char tx_buf[TX_BUF_SIZE];
+	int ret;
+	struct sysmon_subsys *tmp, *ss = NULL;
+	const char *event_ss = event_desc->name;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(tmp, &sysmon_list, list)
+		if (tmp->pid == dest_desc->sysmon_pid)
+			ss = tmp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (ss == NULL)
+		return -EINVAL;
+
+	if (ss->dev == NULL)
+		return -ENODEV;
+
+	if (notif < 0 || notif >= SUBSYS_NOTIF_TYPE_COUNT || event_ss == NULL ||
+						notif_name[notif] == NULL)
+		return -EINVAL;
+
+	snprintf(tx_buf, ARRAY_SIZE(tx_buf), "ssr:%s:%s", event_ss,
+		 notif_name[notif]);
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, strlen(tx_buf));
+	if (ret) {
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, "ssr:ack")) {
+		pr_debug("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+	}
+out:
+	mutex_unlock(&ss->lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_event_no_qmi);
+
+/**
+ * sysmon_send_shutdown_no_qmi() - send shutdown command to a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to send to
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+	struct sysmon_subsys *tmp, *ss = NULL;
+	const char tx_buf[] = "system:shutdown";
+	const char expect[] = "system:ack";
+	int ret;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(tmp, &sysmon_list, list)
+		if (tmp->pid == dest_desc->sysmon_pid)
+			ss = tmp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (ss == NULL)
+		return -EINVAL;
+
+	if (ss->dev == NULL)
+		return -ENODEV;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+	if (ret) {
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strcmp(ss->rx_buf, expect)) {
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+	}
+out:
+	mutex_unlock(&ss->lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_send_shutdown_no_qmi);
+
+/**
+ * sysmon_get_reason_no_qmi() - Retrieve failure reason from a subsystem.
+ * @dest_desc:	Subsystem descriptor of the subsystem to query
+ * @buf:	Caller-allocated buffer for the returned NUL-terminated reason
+ * @len:	Length of @buf
+ *
+ * Returns 0 for success, -EINVAL for an invalid destination, -ENODEV if
+ * the SMD transport channel is not open, -ETIMEDOUT if the destination
+ * subsystem does not respond, and -EPROTO if the destination subsystem
+ * responds with something unexpected.
+ *
+ * If CONFIG_MSM_SYSMON_COMM is not defined, always return success (0).
+ */
+int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+				char *buf, size_t len)
+{
+	struct sysmon_subsys *tmp, *ss = NULL;
+	const char tx_buf[] = "ssr:retrieve:sfr";
+	const char expect[] = "ssr:return:";
+	size_t prefix_len = ARRAY_SIZE(expect) - 1;
+	int ret;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry(tmp, &sysmon_list, list)
+		if (tmp->pid == dest_desc->sysmon_pid)
+			ss = tmp;
+	mutex_unlock(&sysmon_list_lock);
+
+	if (ss == NULL || buf == NULL || len == 0)
+		return -EINVAL;
+
+	if (ss->dev == NULL)
+		return -ENODEV;
+
+	mutex_lock(&ss->lock);
+	ret = sysmon_send_msg(ss, tx_buf, ARRAY_SIZE(tx_buf));
+	if (ret) {
+		pr_err("Message sending failed %d\n", ret);
+		goto out;
+	}
+
+	if (strncmp(ss->rx_buf, expect, prefix_len)) {
+		pr_err("Unexpected response %s\n", ss->rx_buf);
+		ret = -EPROTO;
+		goto out;
+	}
+	strlcpy(buf, ss->rx_buf + prefix_len, len);
+out:
+	mutex_unlock(&ss->lock);
+	return ret;
+}
+EXPORT_SYMBOL(sysmon_get_reason_no_qmi);
+
+static void sysmon_smd_notify(void *priv, unsigned int smd_event)
+{
+	struct sysmon_subsys *ss = priv;
+
+	switch (smd_event) {
+	case SMD_EVENT_DATA: {
+		if (smd_read_avail(ss->chan) > 0) {
+			smd_read_from_cb(ss->chan, ss->rx_buf,
+					 ARRAY_SIZE(ss->rx_buf));
+			complete(&ss->resp_ready);
+		}
+		break;
+	}
+	case SMD_EVENT_OPEN:
+		ss->chan_open = true;
+		break;
+	case SMD_EVENT_CLOSE:
+		ss->chan_open = false;
+		break;
+	}
+}
+
+static int sysmon_probe(struct platform_device *pdev)
+{
+	struct sysmon_subsys *ss;
+	int ret;
+
+	if (pdev->id < 0 || pdev->id >= SYSMON_NUM_SS)
+		return -ENODEV;
+
+	ss = devm_kzalloc(&pdev->dev, sizeof(*ss), GFP_KERNEL);
+	if (!ss)
+		return -ENOMEM;
+
+	mutex_init(&ss->lock);
+	if (pdev->id == SYSMON_SS_EXT_MODEM) {
+		ss->transport = TRANSPORT_HSIC;
+		ret = hsic_sysmon_open(HSIC_SYSMON_DEV_EXT_MODEM);
+		if (ret) {
+			pr_err("HSIC open failed\n");
+			return ret;
+		}
+	} else if (pdev->id < SMD_NUM_TYPE) {
+		ss->transport = TRANSPORT_SMD;
+		ret = smd_named_open_on_edge("sys_mon", pdev->id, &ss->chan,
+						ss, sysmon_smd_notify);
+		if (ret) {
+			pr_err("SMD open failed\n");
+			return ret;
+		}
+		smd_disable_read_intr(ss->chan);
+	} else
+		return -EINVAL;
+
+	ss->dev = &pdev->dev;
+	ss->pid = pdev->id;
+
+	mutex_lock(&sysmon_list_lock);
+	INIT_LIST_HEAD(&ss->list);
+	list_add_tail(&ss->list, &sysmon_list);
+	mutex_unlock(&sysmon_list_lock);
+	return 0;
+}
+
+static int sysmon_remove(struct platform_device *pdev)
+{
+	struct sysmon_subsys *sysmon, *tmp, *ss = NULL;
+
+	mutex_lock(&sysmon_list_lock);
+	list_for_each_entry_safe(sysmon, tmp, &sysmon_list, list) {
+		if (sysmon->pid == pdev->id) {
+			ss = sysmon;
+			list_del(&ss->list);
+		}
+	}
+	mutex_unlock(&sysmon_list_lock);
+
+	if (ss == NULL)
+		return -EINVAL;
+
+	mutex_lock(&ss->lock);
+	switch (ss->transport) {
+	case TRANSPORT_SMD:
+		smd_close(ss->chan);
+		break;
+	case TRANSPORT_HSIC:
+		hsic_sysmon_close(HSIC_SYSMON_DEV_EXT_MODEM);
+		break;
+	}
+	mutex_unlock(&ss->lock);
+
+	return 0;
+}
+
+static struct platform_driver sysmon_driver = {
+	.probe		= sysmon_probe,
+	.remove		= sysmon_remove,
+	.driver		= {
+		.name		= "sys_mon",
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init sysmon_init(void)
+{
+	return platform_driver_register(&sysmon_driver);
+}
+subsys_initcall(sysmon_init);
+
+static void __exit sysmon_exit(void)
+{
+	platform_driver_unregister(&sysmon_driver);
+}
+module_exit(sysmon_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("system monitor communication library");
+MODULE_ALIAS("platform:sys_mon");
diff --git a/drivers/soc/qcom/system_health_monitor.c b/drivers/soc/qcom/system_health_monitor.c
new file mode 100644
index 0000000..fcffa51
--- /dev/null
+++ b/drivers/soc/qcom/system_health_monitor.c
@@ -0,0 +1,965 @@
+/* Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/cdev.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/ipc_logging.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/qmi_encdec.h>
+#include <linux/ratelimit.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/srcu.h>
+#include <linux/thread_info.h>
+#include <linux/uaccess.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include "system_health_monitor_v01.h"
+
+#define MODULE_NAME "system_health_monitor"
+
+#define SUBSYS_NAME_LEN 256
+#define SSRESTART_STRLEN 256
+
+enum {
+	SHM_INFO_FLAG = 0x1,
+	SHM_DEBUG_FLAG = 0x2,
+};
+static int shm_debug_mask = SHM_INFO_FLAG;
+module_param_named(debug_mask, shm_debug_mask,
+		   int, 0664);
+static int shm_default_timeout_ms = 2000;
+module_param_named(default_timeout_ms, shm_default_timeout_ms,
+		   int, 0664);
+
+#define DEFAULT_SHM_RATELIMIT_INTERVAL (HZ / 5)
+#define DEFAULT_SHM_RATELIMIT_BURST 2
+
+#define SHM_ILCTXT_NUM_PAGES 2
+static void *shm_ilctxt;
+#define SHM_INFO_LOG(x...) do { \
+	if ((shm_debug_mask & SHM_INFO_FLAG) && shm_ilctxt) \
+		ipc_log_string(shm_ilctxt, x); \
+} while (0)
+
+#define SHM_DEBUG(x...) do { \
+	if ((shm_debug_mask & SHM_DEBUG_FLAG) && shm_ilctxt) \
+		ipc_log_string(shm_ilctxt, x); \
+} while (0)
+
+#define SHM_ERR(x...) do { \
+	if (shm_ilctxt) \
+		ipc_log_string(shm_ilctxt, x); \
+	pr_err(x); \
+} while (0)
+
+struct class *system_health_monitor_classp;
+static dev_t system_health_monitor_dev;
+static struct cdev system_health_monitor_cdev;
+static struct device *system_health_monitor_devp;
+
+#define SYSTEM_HEALTH_MONITOR_IOCTL_MAGIC (0xC3)
+
+#define CHECK_SYSTEM_HEALTH_IOCTL \
+	_IOR(SYSTEM_HEALTH_MONITOR_IOCTL_MAGIC, 0, unsigned int)
+
+static struct workqueue_struct *shm_svc_workqueue;
+static void shm_svc_recv_msg(struct work_struct *work);
+static DECLARE_DELAYED_WORK(work_recv_msg, shm_svc_recv_msg);
+static struct qmi_handle *shm_svc_handle;
+
+struct disconnect_work {
+	struct work_struct work;
+	void *conn_h;
+};
+static void shm_svc_disconnect_worker(struct work_struct *work);
+
+struct req_work {
+	struct work_struct work;
+	void *conn_h;
+	void *req_h;
+	unsigned int msg_id;
+	void *req;
+};
+static void shm_svc_req_worker(struct work_struct *work);
+
+/**
+ * struct hma_info - Information about a Health Monitor Agent(HMA)
+ * @list:		List to chain up the hma to the hma_list.
+ * @subsys_name:	Name of the remote subsystem that hosts this HMA.
+ * @ssrestart_string:	String to restart the subsystem that hosts this HMA.
+ * @conn_h:		Opaque connection handle to the HMA.
+ * @timeout:		Timeout as registered by the HMA.
+ * @check_count:	Count of the health check attempts.
+ * @report_count:	Count of the health reports handled.
+ * @reset_srcu:		Sleepable RCU to protect the reset state.
+ * @is_in_reset:	Flag to identify if the remote subsystem is in reset.
+ * @restart_nb:		Notifier block to receive subsystem restart events.
+ * @restart_nb_h:	Handle to subsystem restart notifier block.
+ * @rs:			Rate-limit the health check.
+ */
+struct hma_info {
+	struct list_head list;
+	char subsys_name[SUBSYS_NAME_LEN];
+	char ssrestart_string[SSRESTART_STRLEN];
+	void *conn_h;
+	uint32_t timeout;
+	atomic_t check_count;
+	atomic_t report_count;
+	struct srcu_struct reset_srcu;
+	atomic_t is_in_reset;
+	struct notifier_block restart_nb;
+	void *restart_nb_h;
+	struct ratelimit_state rs;
+};
+
+struct restart_work {
+	struct delayed_work dwork;
+	struct hma_info *hmap;
+	void *conn_h;
+	int check_count;
+};
+static void shm_svc_restart_worker(struct work_struct *work);
+
+static DEFINE_MUTEX(hma_info_list_lock);
+static LIST_HEAD(hma_info_list);
+
+static struct msg_desc shm_svc_register_req_desc = {
+	.max_msg_len = HMON_REGISTER_REQ_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_HEALTH_MON_REG_REQ_V01,
+	.ei_array = hmon_register_req_msg_v01_ei,
+};
+
+static struct msg_desc shm_svc_register_resp_desc = {
+	.max_msg_len = HMON_REGISTER_RESP_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_HEALTH_MON_REG_RESP_V01,
+	.ei_array = hmon_register_resp_msg_v01_ei,
+};
+
+static struct msg_desc shm_svc_health_check_ind_desc = {
+	.max_msg_len = HMON_HEALTH_CHECK_IND_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_HEALTH_MON_HEALTH_CHECK_IND_V01,
+	.ei_array = hmon_health_check_ind_msg_v01_ei,
+};
+
+static struct msg_desc shm_svc_health_check_complete_req_desc = {
+	.max_msg_len = HMON_HEALTH_CHECK_COMPLETE_REQ_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_HEALTH_MON_HEALTH_CHECK_COMPLETE_REQ_V01,
+	.ei_array = hmon_health_check_complete_req_msg_v01_ei,
+};
+
+static struct msg_desc shm_svc_health_check_complete_resp_desc = {
+	.max_msg_len = HMON_HEALTH_CHECK_COMPLETE_RESP_MSG_V01_MAX_MSG_LEN,
+	.msg_id = QMI_HEALTH_MON_HEALTH_CHECK_COMPLETE_RESP_V01,
+	.ei_array = hmon_health_check_complete_resp_msg_v01_ei,
+};
+
+/**
+ * restart_notifier_cb() - Callback to handle SSR events
+ * @this:	Reference to the notifier block.
+ * @code:	Type of SSR event.
+ * @data:	Data that needs to be handled as part of SSR event.
+ *
+ * This function is used to identify if a subsystem which hosts an HMA
+ * is already in reset, so that a duplicate subsystem restart is not
+ * triggered.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int restart_notifier_cb(struct notifier_block *this,
+			       unsigned long code, void *data)
+{
+	struct hma_info *tmp_hma_info =
+		container_of(this, struct hma_info, restart_nb);
+
+	if (code == SUBSYS_BEFORE_SHUTDOWN) {
+		atomic_set(&tmp_hma_info->is_in_reset, 1);
+		synchronize_srcu(&tmp_hma_info->reset_srcu);
+		SHM_INFO_LOG("%s: %s going to shutdown\n",
+			 __func__, tmp_hma_info->ssrestart_string);
+	} else if (code == SUBSYS_AFTER_POWERUP) {
+		atomic_set(&tmp_hma_info->is_in_reset, 0);
+		SHM_INFO_LOG("%s: %s powered up\n",
+			 __func__, tmp_hma_info->ssrestart_string);
+	}
+	return 0;
+}
+
+/**
+ * shm_svc_restart_worker() - Worker to restart a subsystem
+ * @work:	Reference to the work item being handled.
+ *
+ * This function restarts the subsystem which hosts an HMA. This function
+ * checks the following before triggering a restart:
+ * 1) Health check report is not received.
+ * 2) The subsystem has not undergone a reset.
+ * 3) The subsystem is not undergoing a reset.
+ */
+static void shm_svc_restart_worker(struct work_struct *work)
+{
+	int rc;
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct restart_work *rwp =
+		container_of(dwork, struct restart_work, dwork);
+	struct hma_info *tmp_hma_info = rwp->hmap;
+	int rcu_id;
+
+	if (rwp->check_count <= atomic_read(&tmp_hma_info->report_count)) {
+		SHM_INFO_LOG("%s: No Action on Health Check Attempt %d to %s\n",
+			 __func__, rwp->check_count,
+			 tmp_hma_info->subsys_name);
+		kfree(rwp);
+		return;
+	}
+
+	if (!tmp_hma_info->conn_h || rwp->conn_h != tmp_hma_info->conn_h) {
+		SHM_INFO_LOG(
+			"%s: Connection to %s is reset. No further action\n",
+			 __func__, tmp_hma_info->subsys_name);
+		kfree(rwp);
+		return;
+	}
+
+	rcu_id = srcu_read_lock(&tmp_hma_info->reset_srcu);
+	if (atomic_read(&tmp_hma_info->is_in_reset)) {
+		SHM_INFO_LOG(
+			"%s: %s is going thru restart. No further action\n",
+			 __func__, tmp_hma_info->subsys_name);
+		srcu_read_unlock(&tmp_hma_info->reset_srcu, rcu_id);
+		kfree(rwp);
+		return;
+	}
+
+	SHM_ERR("%s: HMA in %s failed to respond in time. Restarting %s...\n",
+		__func__, tmp_hma_info->subsys_name,
+		tmp_hma_info->ssrestart_string);
+	rc = subsystem_restart(tmp_hma_info->ssrestart_string);
+	if (rc < 0)
+		SHM_ERR("%s: Error %d restarting %s\n",
+			__func__, rc, tmp_hma_info->ssrestart_string);
+	srcu_read_unlock(&tmp_hma_info->reset_srcu, rcu_id);
+	kfree(rwp);
+}
+
+/**
+ * shm_send_health_check_ind() - Initiate a subsystem health check
+ * @tmp_hma_info:	Info about an HMA which resides in a subsystem.
+ *
+ * This function initiates a health check of a subsytem, which hosts the
+ * HMA, by sending a health check QMI indication message.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int shm_send_health_check_ind(struct hma_info *tmp_hma_info)
+{
+	int rc;
+	struct restart_work *rwp;
+
+	if (!tmp_hma_info->conn_h)
+		return 0;
+
+	/* Rate limit the health check as configured by the subsystem */
+	if (!__ratelimit(&tmp_hma_info->rs))
+		return 0;
+
+	rwp = kzalloc(sizeof(*rwp), GFP_KERNEL);
+	if (!rwp) {
+		SHM_ERR("%s: Error allocating restart work\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_DELAYED_WORK(&rwp->dwork, shm_svc_restart_worker);
+	rwp->hmap = tmp_hma_info;
+	rwp->conn_h = tmp_hma_info->conn_h;
+
+	rc = qmi_send_ind(shm_svc_handle, tmp_hma_info->conn_h,
+			  &shm_svc_health_check_ind_desc, NULL, 0);
+	if (rc < 0) {
+		SHM_ERR("%s: Send Error %d to %s\n",
+			__func__, rc, tmp_hma_info->subsys_name);
+		kfree(rwp);
+		return rc;
+	}
+
+	rwp->check_count = atomic_inc_return(&tmp_hma_info->check_count);
+	queue_delayed_work(shm_svc_workqueue, &rwp->dwork,
+			   msecs_to_jiffies(tmp_hma_info->timeout));
+	return 0;
+}
+
+/**
+ * kern_check_system_health() - Check the system health
+ *
+ * This function is used by the kernel drivers to initiate the
+ * system health check. This function in turn triggers SHM to send
+ * QMI message to all the HMAs connected to it.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int kern_check_system_health(void)
+{
+	int rc;
+	int final_rc = 0;
+	struct hma_info *tmp_hma_info;
+
+	mutex_lock(&hma_info_list_lock);
+	list_for_each_entry(tmp_hma_info, &hma_info_list, list) {
+		rc = shm_send_health_check_ind(tmp_hma_info);
+		if (rc < 0) {
+			SHM_ERR("%s by %s failed for %s - rc %d\n", __func__,
+				current->comm, tmp_hma_info->subsys_name, rc);
+			final_rc = rc;
+		}
+	}
+	mutex_unlock(&hma_info_list_lock);
+	return final_rc;
+}
+EXPORT_SYMBOL(kern_check_system_health);
+
+/**
+ * shm_svc_connect_cb() - Callback to handle connect event from an HMA
+ * @handle:	QMI Service handle in which a connect event is received.
+ * @conn_h:	Opaque reference to the connection handle.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int shm_svc_connect_cb(struct qmi_handle *handle, void *conn_h)
+{
+	SHM_DEBUG("%s: conn_h %p\n", __func__, conn_h);
+	return 0;
+}
+
+/**
+ * shm_svc_disconnect_worker() - Worker to handle disconnect event from an HMA
+ * @work:	Reference to the work item.
+ *
+ * This function handles the disconnect event from an HMA in a deferred manner.
+ */
+static void shm_svc_disconnect_worker(struct work_struct *work)
+{
+	struct hma_info *tmp_hma_info;
+	struct disconnect_work *dwp =
+		container_of(work, struct disconnect_work, work);
+
+	mutex_lock(&hma_info_list_lock);
+	list_for_each_entry(tmp_hma_info, &hma_info_list, list) {
+		if (dwp->conn_h == tmp_hma_info->conn_h) {
+			SHM_INFO_LOG("%s: conn_h %p to HMA in %s exited\n",
+				 __func__, dwp->conn_h,
+				 tmp_hma_info->subsys_name);
+			tmp_hma_info->conn_h = NULL;
+			atomic_set(&tmp_hma_info->report_count,
+				   atomic_read(&tmp_hma_info->check_count));
+			break;
+		}
+	}
+	mutex_unlock(&hma_info_list_lock);
+	kfree(dwp);
+}
+
+/**
+ * shm_svc_disconnect_cb() - Callback to handle disconnect event from an HMA
+ * @handle:	QMI Service handle in which a disconnect event is received.
+ * @conn_h:	Opaque reference to the connection handle.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int shm_svc_disconnect_cb(struct qmi_handle *handle, void *conn_h)
+{
+	struct disconnect_work *dwp;
+
+	dwp = kzalloc(sizeof(*dwp), GFP_ATOMIC);
+	if (!dwp) {
+		SHM_ERR("%s: Error allocating work item\n", __func__);
+		return -ENOMEM;
+	}
+
+	INIT_WORK(&dwp->work, shm_svc_disconnect_worker);
+	dwp->conn_h = conn_h;
+	queue_work(shm_svc_workqueue, &dwp->work);
+	return 0;
+}
+
+/**
+ * shm_svc_req_desc_cb() - Callback to identify the request descriptor
+ * @msg_id:	Message ID of the QMI request.
+ * @req_desc:	Request Descriptor of the QMI request.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int shm_svc_req_desc_cb(unsigned int msg_id,
+			       struct msg_desc **req_desc)
+{
+	int rc;
+
+	SHM_DEBUG("%s: called for msg_id %d\n", __func__, msg_id);
+	switch (msg_id) {
+	case QMI_HEALTH_MON_REG_REQ_V01:
+		*req_desc = &shm_svc_register_req_desc;
+		rc = sizeof(struct hmon_register_req_msg_v01);
+		break;
+
+	case QMI_HEALTH_MON_HEALTH_CHECK_COMPLETE_REQ_V01:
+		*req_desc = &shm_svc_health_check_complete_req_desc;
+		rc = sizeof(struct hmon_health_check_complete_req_msg_v01);
+		break;
+
+	default:
+		SHM_ERR("%s: Invalid msg_id %d\n", __func__, msg_id);
+		rc = -ENOTSUPP;
+	}
+	return rc;
+}
+
+/**
+ * handle_health_mon_reg_req() - Handle the HMA register QMI request
+ * @conn_h:	Opaque reference to the connection handle to an HMA.
+ * @req_h:	Opaque reference to the request handle.
+ * @buf:	Pointer to the QMI request structure.
+ *
+ * This function handles the register request from an HMA. The request
+ * contains the subsystem name which hosts the HMA and health check
+ * timeout for the HMA.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int handle_health_mon_reg_req(void *conn_h, void *req_h, void *buf)
+{
+	int rc;
+	struct hma_info *tmp_hma_info;
+	struct hmon_register_req_msg_v01 *req =
+		(struct hmon_register_req_msg_v01 *)buf;
+	struct hmon_register_resp_msg_v01 resp;
+	bool hma_info_found = false;
+
+	if (!req->name_valid) {
+		SHM_ERR("%s: host name invalid\n", __func__);
+		goto send_reg_resp;
+	}
+
+	mutex_lock(&hma_info_list_lock);
+	list_for_each_entry(tmp_hma_info, &hma_info_list, list) {
+		if (!strcmp(tmp_hma_info->subsys_name, req->name) &&
+		    !tmp_hma_info->conn_h) {
+			tmp_hma_info->conn_h = conn_h;
+			if (req->timeout_valid)
+				tmp_hma_info->timeout = req->timeout;
+			else
+				tmp_hma_info->timeout = shm_default_timeout_ms;
+			ratelimit_state_init(&tmp_hma_info->rs,
+					     DEFAULT_SHM_RATELIMIT_INTERVAL,
+					     DEFAULT_SHM_RATELIMIT_BURST);
+			SHM_INFO_LOG("%s: from %s timeout_ms %d\n",
+				 __func__, req->name, tmp_hma_info->timeout);
+			hma_info_found = true;
+		} else if (!strcmp(tmp_hma_info->subsys_name, req->name)) {
+			SHM_ERR("%s: Duplicate HMA from %s - cur %p, new %p\n",
+				__func__, req->name, tmp_hma_info->conn_h,
+				conn_h);
+		}
+	}
+	mutex_unlock(&hma_info_list_lock);
+
+send_reg_resp:
+	if (hma_info_found) {
+		memset(&resp, 0, sizeof(resp));
+	} else {
+		resp.resp.result = QMI_RESULT_FAILURE_V01;
+		resp.resp.error = QMI_ERR_INVALID_ID_V01;
+	}
+	rc = qmi_send_resp(shm_svc_handle, conn_h, req_h,
+			   &shm_svc_register_resp_desc, &resp, sizeof(resp));
+	if (rc < 0)
+		SHM_ERR("%s: send_resp failed to %s - rc %d\n",
+			__func__, req->name, rc);
+	return rc;
+}
+
+/**
+ * handle_health_mon_health_check_complete_req() - Handle the HMA health report
+ * @conn_h:	Opaque reference to the connection handle to an HMA.
+ * @req_h:	Opaque reference to the request handle.
+ * @buf:	Pointer to the QMI request structure.
+ *
+ * This function handles health reports from an HMA. The health report is sent
+ * in response to a health check QMI indication sent by SHM.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int handle_health_mon_health_check_complete_req(void *conn_h,
+						void *req_h, void *buf)
+{
+	int rc;
+	struct hma_info *tmp_hma_info;
+	struct hmon_health_check_complete_req_msg_v01 *req =
+		(struct hmon_health_check_complete_req_msg_v01 *)buf;
+	struct hmon_health_check_complete_resp_msg_v01 resp;
+	bool hma_info_found = false;
+
+	if (!req->result_valid) {
+		SHM_ERR("%s: Invalid result\n", __func__);
+		goto send_resp;
+	}
+
+	mutex_lock(&hma_info_list_lock);
+	list_for_each_entry(tmp_hma_info, &hma_info_list, list) {
+		if (tmp_hma_info->conn_h != conn_h)
+			continue;
+		hma_info_found = true;
+		if (req->result == HEALTH_MONITOR_CHECK_SUCCESS_V01) {
+			atomic_inc(&tmp_hma_info->report_count);
+			SHM_INFO_LOG("%s: %s Health Check Success\n",
+				 __func__, tmp_hma_info->subsys_name);
+		} else {
+			SHM_INFO_LOG("%s: %s Health Check Failure\n",
+				 __func__, tmp_hma_info->subsys_name);
+		}
+	}
+	mutex_unlock(&hma_info_list_lock);
+
+send_resp:
+	if (hma_info_found) {
+		memset(&resp, 0, sizeof(resp));
+	} else {
+		resp.resp.result = QMI_RESULT_FAILURE_V01;
+		resp.resp.error = QMI_ERR_INVALID_ID_V01;
+	}
+	rc = qmi_send_resp(shm_svc_handle, conn_h, req_h,
+			   &shm_svc_health_check_complete_resp_desc,
+			   &resp, sizeof(resp));
+	if (rc < 0)
+		SHM_ERR("%s: send_resp failed - rc %d\n",
+			__func__, rc);
+	return rc;
+}
+
+/**
+ * shm_svc_req_worker() - Worker to handle QMI requests
+ * @work:	Reference to the work item.
+ *
+ * This function handles QMI requests from HMAs in a deferred manner.
+ */
+static void shm_svc_req_worker(struct work_struct *work)
+{
+	struct req_work *rwp =
+		container_of(work, struct req_work, work);
+
+	switch (rwp->msg_id) {
+	case QMI_HEALTH_MON_REG_REQ_V01:
+		handle_health_mon_reg_req(rwp->conn_h, rwp->req_h, rwp->req);
+		break;
+
+	case QMI_HEALTH_MON_HEALTH_CHECK_COMPLETE_REQ_V01:
+		handle_health_mon_health_check_complete_req(rwp->conn_h,
+						rwp->req_h, rwp->req);
+		break;
+	default:
+		SHM_ERR("%s: Invalid msg_id %d\n", __func__, rwp->msg_id);
+	}
+	kfree(rwp->req);
+	kfree(rwp);
+}
+
+/**
+ * shm_svc_req_cb() - Callback to notify about QMI requests from HMA
+ * @handle;	QMI Service handle in which the request is received.
+ * @conn_h:	Opaque reference to the connection handle to an HMA.
+ * @req_h:	Opaque reference to the request handle.
+ * @msg_id:	Message ID of the request.
+ * @req:	Pointer to the request structure.
+ *
+ * This function is called by kernel QMI Service Interface to notify the
+ * incoming QMI request on the SHM service handle.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int shm_svc_req_cb(struct qmi_handle *handle, void *conn_h,
+			  void *req_h, unsigned int msg_id, void *req)
+{
+	struct req_work *rwp;
+	void *req_buf;
+	uint32_t req_sz = 0;
+
+	rwp = kzalloc(sizeof(*rwp), GFP_KERNEL);
+	if (!rwp) {
+		SHM_ERR("%s: Error allocating work item\n", __func__);
+		return -ENOMEM;
+	}
+
+	switch (msg_id) {
+	case QMI_HEALTH_MON_REG_REQ_V01:
+		req_sz = sizeof(struct hmon_register_req_msg_v01);
+		break;
+
+	case QMI_HEALTH_MON_HEALTH_CHECK_COMPLETE_REQ_V01:
+		req_sz = sizeof(struct hmon_health_check_complete_req_msg_v01);
+		break;
+
+	default:
+		SHM_ERR("%s: Invalid msg_id %d\n", __func__, msg_id);
+		kfree(rwp);
+		return -ENOTSUPP;
+	}
+
+	req_buf = kzalloc(req_sz, GFP_KERNEL);
+	if (!req_buf) {
+		SHM_ERR("%s: Error allocating request buffer\n", __func__);
+		kfree(rwp);
+		return -ENOMEM;
+	}
+	memcpy(req_buf, req, req_sz);
+
+	INIT_WORK(&rwp->work, shm_svc_req_worker);
+	rwp->conn_h = conn_h;
+	rwp->req_h = req_h;
+	rwp->msg_id = msg_id;
+	rwp->req = req_buf;
+	queue_work(shm_svc_workqueue, &rwp->work);
+	return 0;
+}
+
+/**
+ * shm_svc_recv_msg() - Worker to receive a QMI message
+ * @work:	Reference to the work item.
+ *
+ * This function handles any incoming QMI messages to the SHM QMI service.
+ */
+static void shm_svc_recv_msg(struct work_struct *work)
+{
+	int rc;
+
+	do {
+		SHM_DEBUG("%s: Notified about a receive event\n", __func__);
+	} while ((rc = qmi_recv_msg(shm_svc_handle)) == 0);
+
+	if (rc != -ENOMSG)
+		SHM_ERR("%s: Error %d receiving message\n", __func__, rc);
+}
+
+/**
+ * shm_svc_notify() - Callback function to receive SHM QMI service events
+ * @handle:	QMI handle in which the event is received.
+ * @event:	Type of the QMI event.
+ * @priv:	Opaque reference to the private data as registered by the
+ *		service.
+ */
+static void shm_svc_notify(struct qmi_handle *handle,
+			   enum qmi_event_type event, void *priv)
+{
+	switch (event) {
+	case QMI_RECV_MSG:
+		queue_delayed_work(shm_svc_workqueue, &work_recv_msg, 0);
+		break;
+	default:
+		break;
+	}
+}
+
+static struct qmi_svc_ops_options shm_svc_ops_options = {
+	.version = 1,
+	.service_id = HMON_SERVICE_ID_V01,
+	.service_vers = HMON_SERVICE_VERS_V01,
+	.service_ins = 0,
+	.connect_cb = shm_svc_connect_cb,
+	.disconnect_cb = shm_svc_disconnect_cb,
+	.req_desc_cb = shm_svc_req_desc_cb,
+	.req_cb = shm_svc_req_cb,
+};
+
+static int system_health_monitor_open(struct inode *inode, struct file *file)
+{
+	SHM_DEBUG("%s by %s\n", __func__, current->comm);
+	return 0;
+}
+
+static int system_health_monitor_release(struct inode *inode,
+					  struct file *file)
+{
+	SHM_DEBUG("%s by %s\n", __func__, current->comm);
+	return 0;
+}
+
+static ssize_t system_health_monitor_write(struct file *file,
+	const char __user *buf, size_t count, loff_t *ppos)
+{
+	SHM_ERR("%s by %s\n", __func__, current->comm);
+	return -ENOTSUPP;
+}
+
+static ssize_t system_health_monitor_read(struct file *file, char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	SHM_ERR("%s by %s\n", __func__, current->comm);
+	return -ENOTSUPP;
+}
+
+static long system_health_monitor_ioctl(struct file *file, unsigned int cmd,
+			  unsigned long arg)
+{
+	int rc;
+
+	switch (cmd) {
+	case CHECK_SYSTEM_HEALTH_IOCTL:
+		SHM_INFO_LOG("%s by %s\n", __func__, current->comm);
+		rc = kern_check_system_health();
+		break;
+	default:
+		SHM_ERR("%s: Invalid cmd %d by %s\n",
+			__func__, cmd, current->comm);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static const struct file_operations system_health_monitor_fops = {
+	.owner = THIS_MODULE,
+	.open = system_health_monitor_open,
+	.release = system_health_monitor_release,
+	.read = system_health_monitor_read,
+	.write = system_health_monitor_write,
+	.unlocked_ioctl = system_health_monitor_ioctl,
+	.compat_ioctl = system_health_monitor_ioctl,
+};
+
+/**
+ * start_system_health_monitor_service() - Start the SHM QMI service
+ *
+ * This function registers the SHM QMI service, if it is not already
+ * registered.
+ */
+static int start_system_health_monitor_service(void)
+{
+	int rc;
+
+	shm_svc_workqueue = create_singlethread_workqueue("shm_svc");
+	if (!shm_svc_workqueue) {
+		SHM_ERR("%s: Error creating workqueue\n", __func__);
+		return -EFAULT;
+	}
+
+	shm_svc_handle = qmi_handle_create(shm_svc_notify, NULL);
+	if (!shm_svc_handle) {
+		SHM_ERR("%s: Creating shm_svc_handle failed\n", __func__);
+		rc = -ENOMEM;
+		goto start_svc_error1;
+	}
+
+	rc = qmi_svc_register(shm_svc_handle, &shm_svc_ops_options);
+	if (rc < 0) {
+		SHM_ERR("%s: Registering shm svc failed - %d\n", __func__, rc);
+		goto start_svc_error2;
+	}
+	return 0;
+start_svc_error2:
+	qmi_handle_destroy(shm_svc_handle);
+start_svc_error1:
+	destroy_workqueue(shm_svc_workqueue);
+	return rc;
+}
+
+/**
+ * parse_devicetree() - Parse the device tree for HMA information
+ * @node:	Pointer to the device tree node.
+ * @hma:	HMA information which needs to be extracted.
+ *
+ * This function parses the device tree, extracts the HMA information.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int parse_devicetree(struct device_node *node,
+			    struct hma_info *hma)
+{
+	char *key;
+	const char *subsys_name;
+	const char *ssrestart_string;
+
+	key = "qcom,subsys-name";
+	subsys_name = of_get_property(node, key, NULL);
+	if (!subsys_name)
+		goto error;
+	strlcpy(hma->subsys_name, subsys_name, SUBSYS_NAME_LEN);
+
+	key = "qcom,ssrestart-string";
+	ssrestart_string = of_get_property(node, key, NULL);
+	if (!ssrestart_string)
+		goto error;
+	strlcpy(hma->ssrestart_string, ssrestart_string, SSRESTART_STRLEN);
+	return 0;
+error:
+	SHM_ERR("%s: missing key: %s\n", __func__, key);
+	return -ENODEV;
+}
+
+/**
+ * system_health_monitor_probe() - Probe function to construct HMA info
+ * @pdev:	Platform device pointing to a device tree node.
+ *
+ * This function extracts the HMA information from the device tree, constructs
+ * it and adds it to the global list.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int system_health_monitor_probe(struct platform_device *pdev)
+{
+	int rc;
+	struct hma_info *hma, *tmp_hma;
+	struct device_node *node;
+
+	mutex_lock(&hma_info_list_lock);
+	for_each_child_of_node(pdev->dev.of_node, node) {
+		hma = kzalloc(sizeof(*hma), GFP_KERNEL);
+		if (!hma) {
+			SHM_ERR("%s: Error allocation hma_info\n", __func__);
+			rc = -ENOMEM;
+			goto probe_err;
+		}
+
+		rc = parse_devicetree(node, hma);
+		if (rc) {
+			SHM_ERR("%s Failed to parse Device Tree\n", __func__);
+			kfree(hma);
+			goto probe_err;
+		}
+
+		init_srcu_struct(&hma->reset_srcu);
+		hma->restart_nb.notifier_call = restart_notifier_cb;
+		hma->restart_nb_h = subsys_notif_register_notifier(
+				hma->ssrestart_string, &hma->restart_nb);
+		if (IS_ERR_OR_NULL(hma->restart_nb_h)) {
+			cleanup_srcu_struct(&hma->reset_srcu);
+			kfree(hma);
+			rc = -EFAULT;
+			SHM_ERR("%s: Error registering restart notif for %s\n",
+				__func__, hma->ssrestart_string);
+			goto probe_err;
+		}
+
+		list_add_tail(&hma->list, &hma_info_list);
+		SHM_INFO_LOG("%s: Added HMA info for %s\n",
+			 __func__, hma->subsys_name);
+	}
+
+	rc = start_system_health_monitor_service();
+	if (rc) {
+		SHM_ERR("%s Failed to start service %d\n", __func__, rc);
+		goto probe_err;
+	}
+	mutex_unlock(&hma_info_list_lock);
+	return 0;
+probe_err:
+	list_for_each_entry_safe(hma, tmp_hma, &hma_info_list, list) {
+		list_del(&hma->list);
+		subsys_notif_unregister_notifier(hma->restart_nb_h,
+						 &hma->restart_nb);
+		cleanup_srcu_struct(&hma->reset_srcu);
+		kfree(hma);
+	}
+	mutex_unlock(&hma_info_list_lock);
+	return rc;
+}
+
+static const struct of_device_id system_health_monitor_match_table[] = {
+	{ .compatible = "qcom,system-health-monitor" },
+	{},
+};
+
+static struct platform_driver system_health_monitor_driver = {
+	.probe = system_health_monitor_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = system_health_monitor_match_table,
+	},
+};
+
+/**
+ * system_health_monitor_init() - Initialize the system health monitor module
+ *
+ * This functions registers a platform driver to probe for and extract the HMA
+ * information. This function registers the character device interface to the
+ * user-space.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+static int __init system_health_monitor_init(void)
+{
+	int rc;
+
+	shm_ilctxt = ipc_log_context_create(SHM_ILCTXT_NUM_PAGES, "shm", 0);
+	if (!shm_ilctxt) {
+		SHM_ERR("%s: Unable to create SHM logging context\n", __func__);
+		shm_debug_mask = 0;
+	}
+
+	rc = platform_driver_register(&system_health_monitor_driver);
+	if (rc) {
+		SHM_ERR("%s: system_health_monitor_driver register failed %d\n",
+			__func__, rc);
+		return rc;
+	}
+
+	rc = alloc_chrdev_region(&system_health_monitor_dev,
+				 0, 1, "system_health_monitor");
+	if (rc < 0) {
+		SHM_ERR("%s: alloc_chrdev_region() failed %d\n", __func__, rc);
+		return rc;
+	}
+
+	system_health_monitor_classp = class_create(THIS_MODULE,
+						"system_health_monitor");
+	if (IS_ERR_OR_NULL(system_health_monitor_classp)) {
+		SHM_ERR("%s: class_create() failed\n", __func__);
+		rc = -ENOMEM;
+		goto init_error1;
+	}
+
+	cdev_init(&system_health_monitor_cdev, &system_health_monitor_fops);
+	system_health_monitor_cdev.owner = THIS_MODULE;
+	rc = cdev_add(&system_health_monitor_cdev,
+		      system_health_monitor_dev, 1);
+	if (rc < 0) {
+		SHM_ERR("%s: cdev_add() failed - rc %d\n",
+			__func__, rc);
+		goto init_error2;
+	}
+
+	system_health_monitor_devp = device_create(system_health_monitor_classp,
+					NULL, system_health_monitor_dev, NULL,
+					"system_health_monitor");
+	if (IS_ERR_OR_NULL(system_health_monitor_devp)) {
+		SHM_ERR("%s: device_create() failed - rc %d\n",
+			__func__, rc);
+		rc = PTR_ERR(system_health_monitor_devp);
+		goto init_error3;
+	}
+	SHM_INFO_LOG("%s: Complete\n", __func__);
+	return 0;
+init_error3:
+	cdev_del(&system_health_monitor_cdev);
+init_error2:
+	class_destroy(system_health_monitor_classp);
+init_error1:
+	unregister_chrdev_region(MAJOR(system_health_monitor_dev), 1);
+	return rc;
+}
+
+module_init(system_health_monitor_init);
+MODULE_DESCRIPTION("System Health Monitor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/system_health_monitor_v01.c b/drivers/soc/qcom/system_health_monitor_v01.c
new file mode 100644
index 0000000..fe6d7c3
--- /dev/null
+++ b/drivers/soc/qcom/system_health_monitor_v01.c
@@ -0,0 +1,134 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/qmi_encdec.h>
+
+#include <soc/qcom/msm_qmi_interface.h>
+
+#include "system_health_monitor_v01.h"
+
+struct elem_info hmon_register_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct hmon_register_req_msg_v01,
+					   name_valid),
+	},
+	{
+		.data_type      = QMI_STRING,
+		.elem_len       = 255,
+		.elem_size      = sizeof(char),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct hmon_register_req_msg_v01,
+					   name),
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct hmon_register_req_msg_v01,
+					   timeout_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint32_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x11,
+		.offset         = offsetof(struct hmon_register_req_msg_v01,
+					   timeout),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info hmon_register_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct hmon_register_resp_msg_v01,
+					   resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info hmon_health_check_ind_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info hmon_health_check_complete_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(uint8_t),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				struct hmon_health_check_complete_req_msg_v01,
+				result_valid),
+	},
+	{
+		.data_type      = QMI_SIGNED_4_BYTE_ENUM,
+		.elem_len       = 1,
+		.elem_size      = sizeof(enum hmon_check_result_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(
+				struct hmon_health_check_complete_req_msg_v01,
+				result),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct elem_info hmon_health_check_complete_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.is_array       = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(
+				struct hmon_health_check_complete_resp_msg_v01,
+				resp),
+		.ei_array      = get_qmi_response_type_v01_ei(),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.is_array       = NO_ARRAY,
+		.is_array       = QMI_COMMON_TLV_TYPE,
+	},
+};
diff --git a/drivers/soc/qcom/system_health_monitor_v01.h b/drivers/soc/qcom/system_health_monitor_v01.h
new file mode 100644
index 0000000..ca0e42a
--- /dev/null
+++ b/drivers/soc/qcom/system_health_monitor_v01.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef SYSTEM_HEALTH_MONITOR_V01_H
+#define SYSTEM_HEALTH_MONITOR_V01_H
+
+#define HMON_SERVICE_ID_V01 0x3C
+#define HMON_SERVICE_VERS_V01 0x01
+
+#define QMI_HEALTH_MON_HEALTH_CHECK_COMPLETE_REQ_V01 0x0022
+#define QMI_HEALTH_MON_HEALTH_CHECK_COMPLETE_RESP_V01 0x0022
+#define QMI_HEALTH_MON_HEALTH_CHECK_IND_V01 0x0021
+#define QMI_HEALTH_MON_REG_RESP_V01 0x0020
+#define QMI_HEALTH_MON_REG_REQ_V01 0x0020
+
+struct hmon_register_req_msg_v01 {
+	uint8_t name_valid;
+	char name[256];
+	uint8_t timeout_valid;
+	uint32_t timeout;
+};
+#define HMON_REGISTER_REQ_MSG_V01_MAX_MSG_LEN 265
+extern struct elem_info hmon_register_req_msg_v01_ei[];
+
+struct hmon_register_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define HMON_REGISTER_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info hmon_register_resp_msg_v01_ei[];
+
+struct hmon_health_check_ind_msg_v01 {
+	char placeholder;
+};
+#define HMON_HEALTH_CHECK_IND_MSG_V01_MAX_MSG_LEN 0
+extern struct elem_info hmon_health_check_ind_msg_v01_ei[];
+
+enum hmon_check_result_v01 {
+	HMON_CHECK_RESULT_MIN_VAL_V01 = INT_MIN,
+	HEALTH_MONITOR_CHECK_SUCCESS_V01 = 0,
+	HEALTH_MONITOR_CHECK_FAILURE_V01 = 1,
+	HMON_CHECK_RESULT_MAX_VAL_V01 = INT_MAX,
+};
+
+struct hmon_health_check_complete_req_msg_v01 {
+	uint8_t result_valid;
+	enum hmon_check_result_v01 result;
+};
+#define HMON_HEALTH_CHECK_COMPLETE_REQ_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info hmon_health_check_complete_req_msg_v01_ei[];
+
+struct hmon_health_check_complete_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+#define HMON_HEALTH_CHECK_COMPLETE_RESP_MSG_V01_MAX_MSG_LEN 7
+extern struct elem_info hmon_health_check_complete_resp_msg_v01_ei[];
+
+#endif /* SYSTEM_HEALTH_MONITOR_V01_H */
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
new file mode 100644
index 0000000..78690d9
--- /dev/null
+++ b/drivers/soc/qcom/system_pm.c
@@ -0,0 +1,87 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+
+#include <soc/qcom/rpmh.h>
+
+#define PDC_TIME_VALID_SHIFT	31
+#define PDC_TIME_UPPER_MASK	0xFFFFFF
+
+static struct rpmh_client *rpmh_client;
+
+static int setup_wakeup(uint64_t sleep_val)
+{
+	struct tcs_cmd cmd[3] = { { 0 } };
+
+	cmd[0].data = sleep_val & 0xFFFFFFFF;
+	cmd[1].data = (sleep_val >> 32) & PDC_TIME_UPPER_MASK;
+	cmd[1].data |= 1 << PDC_TIME_VALID_SHIFT;
+
+	return rpmh_write_control(rpmh_client, cmd, ARRAY_SIZE(cmd));
+}
+
+/**
+ * system_sleep_enter() - Activties done when entering system low power modes
+ *
+ * @sleep_val: The qtimer value for the next wakeup time
+ *
+ * Returns 0 for success or error values from writing the timer value in the
+ * hardware block.
+ */
+int system_sleep_enter(uint64_t sleep_val)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(rpmh_client))
+		return -EFAULT;
+
+	ret = rpmh_flush(rpmh_client);
+	if (ret)
+		return ret;
+
+	return setup_wakeup(sleep_val);
+}
+EXPORT_SYMBOL(system_sleep_enter);
+
+/**
+ * system_sleep_exit() - Activities done when exiting system low power modes
+ */
+void system_sleep_exit(void)
+{
+}
+EXPORT_SYMBOL(system_sleep_exit);
+
+static int sys_pm_probe(struct platform_device *pdev)
+{
+	rpmh_client = rpmh_get_byindex(pdev, 0);
+	if (IS_ERR_OR_NULL(rpmh_client))
+		return PTR_ERR(rpmh_client);
+
+	return 0;
+}
+
+static const struct of_device_id sys_pm_drv_match[] = {
+	{ .compatible = "qcom,system-pm", },
+	{ }
+};
+
+static struct platform_driver sys_pm_driver = {
+	.probe = sys_pm_probe,
+	.driver = {
+		.name = KBUILD_MODNAME,
+		.of_match_table = sys_pm_drv_match,
+	},
+};
+builtin_platform_driver(sys_pm_driver);
diff --git a/drivers/soc/qcom/tracer_pkt.c b/drivers/soc/qcom/tracer_pkt.c
new file mode 100644
index 0000000..e233055
--- /dev/null
+++ b/drivers/soc/qcom/tracer_pkt.c
@@ -0,0 +1,255 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <asm/arch_timer.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <soc/qcom/tracer_pkt.h>
+#define CREATE_TRACE_POINTS
+#include "tracer_pkt_private.h"
+
+static unsigned int qdss_tracing;
+module_param_named(qdss_tracing_enable, qdss_tracing,
+		   uint, S_IRUGO | S_IWUSR | S_IWGRP);
+
+#define TRACER_PKT_VERSION 1
+#define MAX_CC_WLEN 3
+#define HEX_DUMP_HDR "Tracer Packet:"
+
+/**
+ * struct tracer_pkt_hdr - data structure defiining the tracer packet header
+ * @version:		Tracer Packet version.
+ * @reserved:		Reserved fields in the tracer packet.
+ * @id_valid:		Indicates the presence of a subsytem & transport ID.
+ * @qdss_tracing:	Enable the event logging to QDSS.
+ * @ccl:		Client cookie/private information length in words.
+ * @pkt_len:		Length of the tracer packet in words.
+ * @pkt_offset:		Offset into the packet to log events, in words.
+ * @clnt_event_cfg:	Client-specific event configuration bit mask.
+ * @glink_event_cfg:	G-Link-specific event configuration bit mask.
+ * @base_ts:		Base timestamp when the tracer packet is initialized.
+ * @cc:			Client cookie/private information.
+ */
+struct tracer_pkt_hdr {
+	uint16_t version:4;
+	uint16_t reserved:8;
+	uint16_t id_valid:1;
+	uint16_t qdss_tracing:1;
+	uint16_t ccl:2;
+	uint16_t pkt_len;
+	uint16_t pkt_offset;
+	uint16_t clnt_event_cfg;
+	uint32_t glink_event_cfg;
+	u64 base_ts;
+	uint32_t cc[MAX_CC_WLEN];
+} __attribute__((__packed__));
+
+/**
+ * struct tracer_pkt_event - data structure defining the tracer packet event
+ * @event_id:	Event ID.
+ * @event_ts:	Timestamp at which the event occurred.
+ */
+struct tracer_pkt_event {
+	uint32_t event_id;
+	uint32_t event_ts;
+};
+
+/**
+ * tracer_pkt_init() - initialize the tracer packet
+ * @data:		Pointer to the buffer to be initialized with a tracer
+ *			packet.
+ * @data_len:		Length of the buffer.
+ * @client_event_cfg:	Client-specific event configuration mask.
+ * @glink_event_cfg:	G-Link-specific event configuration mask.
+ * @pkt_priv:		Private/Cookie information to be added to the tracer
+ *			packet.
+ * @pkt_priv_len:	Length of the private data.
+ *
+ * This function is used to initialize a buffer with the tracer packet header.
+ * The tracer packet header includes the data as passed by the elements in the
+ * parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_init(void *data, size_t data_len,
+		    uint16_t client_event_cfg, uint32_t glink_event_cfg,
+		    void *pkt_priv, size_t pkt_priv_len)
+{
+	struct tracer_pkt_hdr *pkt_hdr;
+
+	if (!data || !data_len)
+		return -EINVAL;
+
+	if (!IS_ALIGNED(data_len, sizeof(uint32_t)))
+		return -EINVAL;
+
+	if (data_len < sizeof(*pkt_hdr))
+		return -ETOOSMALL;
+
+	pkt_hdr = (struct tracer_pkt_hdr *)data;
+	pkt_hdr->version = TRACER_PKT_VERSION;
+	pkt_hdr->reserved = 0;
+	pkt_hdr->id_valid = 0;
+	pkt_hdr->qdss_tracing = qdss_tracing ? true : false;
+	if (pkt_priv_len > MAX_CC_WLEN * sizeof(uint32_t))
+		pkt_hdr->ccl = MAX_CC_WLEN;
+	else
+		pkt_hdr->ccl = pkt_priv_len/sizeof(uint32_t) +
+				(pkt_priv_len & (sizeof(uint32_t) - 1) ? 1 : 0);
+	pkt_hdr->pkt_len = data_len / sizeof(uint32_t);
+	pkt_hdr->pkt_offset = sizeof(*pkt_hdr) / sizeof(uint32_t);
+	pkt_hdr->clnt_event_cfg = client_event_cfg;
+	pkt_hdr->glink_event_cfg = glink_event_cfg;
+	pkt_hdr->base_ts = arch_counter_get_cntvct();
+	memcpy(pkt_hdr->cc, pkt_priv, pkt_hdr->ccl * sizeof(uint32_t));
+	return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_init);
+
+/**
+ * tracer_pkt_set_event_cfg() - set the event configuration mask in the tracer
+ *				packet
+ * @data:		Pointer to the buffer to be initialized with event
+ *			configuration mask.
+ * @client_event_cfg:	Client-specific event configuration mask.
+ * @glink_event_cfg:	G-Link-specific event configuration mask.
+ *
+ * This function is used to initialize a buffer with the event configuration
+ * mask as passed by the elements in the parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_set_event_cfg(void *data, uint16_t client_event_cfg,
+			     uint32_t glink_event_cfg)
+{
+	struct tracer_pkt_hdr *pkt_hdr;
+
+	if (!data)
+		return -EINVAL;
+
+	pkt_hdr = (struct tracer_pkt_hdr *)data;
+	if (unlikely(pkt_hdr->version != TRACER_PKT_VERSION))
+		return -EINVAL;
+
+	pkt_hdr->clnt_event_cfg = client_event_cfg;
+	pkt_hdr->glink_event_cfg = glink_event_cfg;
+	return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_set_event_cfg);
+
+/**
+ * tracer_pkt_log_event() - log an event specific to the tracer packet
+ * @data:	Pointer to the buffer containing tracer packet.
+ * @event_id:	Event ID to be logged.
+ *
+ * This function is used to log an event specific to the tracer packet.
+ * The event is logged either into the tracer packet itself or a different
+ * tracing mechanism as configured.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_log_event(void *data, uint32_t event_id)
+{
+	struct tracer_pkt_hdr *pkt_hdr;
+	struct tracer_pkt_event event;
+
+	if (!data)
+		return -EINVAL;
+
+	pkt_hdr = (struct tracer_pkt_hdr *)data;
+	if (unlikely(pkt_hdr->version != TRACER_PKT_VERSION))
+		return -EINVAL;
+
+	if (qdss_tracing) {
+		trace_tracer_pkt_event(event_id, pkt_hdr->cc);
+		return 0;
+	}
+
+	if (unlikely((pkt_hdr->pkt_len - pkt_hdr->pkt_offset) *
+	    sizeof(uint32_t) < sizeof(event)))
+		return -ETOOSMALL;
+
+	event.event_id = event_id;
+	event.event_ts = (uint32_t)arch_counter_get_cntvct();
+	memcpy(data + (pkt_hdr->pkt_offset * sizeof(uint32_t)),
+		&event, sizeof(event));
+	pkt_hdr->pkt_offset += sizeof(event)/sizeof(uint32_t);
+	return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_log_event);
+
+/**
+ * tracer_pkt_calc_hex_dump_size() - calculate the hex dump size of a tracer
+ *				     packet
+ * @data:	Pointer to the buffer containing tracer packet.
+ * @data_len:	Length of the tracer packet buffer.
+ *
+ * This function is used to calculate the length of the buffer required to
+ * hold the hex dump of the tracer packet.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len)
+{
+	size_t hex_dump_size;
+	struct tracer_pkt_hdr *pkt_hdr;
+
+	if (!data || data_len <= 0)
+		return -EINVAL;
+
+	pkt_hdr = (struct tracer_pkt_hdr *)data;
+	if (unlikely(pkt_hdr->version != TRACER_PKT_VERSION))
+		return -EINVAL;
+
+	/*
+	 * Hex Dump Prefix + newline
+	 * 0x<first_word> + newline
+	 * ...
+	 * 0x<last_word> + newline + null-termination character.
+	 */
+	hex_dump_size = strlen(HEX_DUMP_HDR) + 1 + (pkt_hdr->pkt_len * 11) + 1;
+	return hex_dump_size;
+}
+EXPORT_SYMBOL(tracer_pkt_calc_hex_dump_size);
+
+/**
+ * tracer_pkt_hex_dump() - hex dump the tracer packet into a buffer
+ * @buf:	Buffer to contain the hex dump of the tracer packet.
+ * @buf_len:	Length of the hex dump buffer.
+ * @data:	Buffer containing the tracer packet.
+ * @data_len:	Length of the buffer containing the tracer packet.
+ *
+ * This function is used to dump the contents of the tracer packet into
+ * a buffer in a specific hexadecimal format. The hex dump buffer can then
+ * be dumped through debugfs.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data, size_t data_len)
+{
+	int i, j = 0;
+	char *dst = (char *)buf;
+
+	if (!buf || buf_len <= 0 || !data || data_len <= 0)
+		return -EINVAL;
+
+	if (buf_len < tracer_pkt_calc_hex_dump_size(data, data_len))
+		return -EINVAL;
+
+	j = scnprintf(dst, buf_len, "%s\n", HEX_DUMP_HDR);
+	for (i = 0; i < data_len/sizeof(uint32_t); i++)
+		j += scnprintf(dst + j, buf_len - j, "0x%08x\n",
+				*((uint32_t *)data + i));
+	dst[j] = '\0';
+	return 0;
+}
+EXPORT_SYMBOL(tracer_pkt_hex_dump);
diff --git a/drivers/soc/qcom/tracer_pkt_private.h b/drivers/soc/qcom/tracer_pkt_private.h
new file mode 100644
index 0000000..fc760e6
--- /dev/null
+++ b/drivers/soc/qcom/tracer_pkt_private.h
@@ -0,0 +1,50 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#if !defined(_TRACER_PKT_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACER_PKT_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM tracer_pkt
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE tracer_pkt_private
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(tracer_pkt_event,
+
+	TP_PROTO(uint32_t id, uint32_t *cc),
+
+	TP_ARGS(id, cc),
+
+	TP_STRUCT__entry(
+		__field(uint32_t, id)
+		__field(uint32_t, cc1)
+		__field(uint32_t, cc2)
+		__field(uint32_t, cc3)
+	),
+
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->cc1 = cc[0];
+		__entry->cc2 = cc[1];
+		__entry->cc3 = cc[2];
+	),
+
+	TP_printk("CC - 0x%08x:0x%08x:0x%08x, ID - %d",
+		__entry->cc1, __entry->cc2, __entry->cc3, __entry->id)
+);
+#endif /*_TRACER_PKT_TRACE_H*/
+
+#include <trace/define_trace.h>
+
diff --git a/drivers/soc/qcom/watchdog_v2.c b/drivers/soc/qcom/watchdog_v2.c
new file mode 100644
index 0000000..d58bfa1
--- /dev/null
+++ b/drivers/soc/qcom/watchdog_v2.c
@@ -0,0 +1,797 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/of.h>
+#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
+#include <linux/platform_device.h>
+#include <linux/wait.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/memory_dump.h>
+#include <soc/qcom/watchdog.h>
+
+#define MODULE_NAME "msm_watchdog"
+#define WDT0_ACCSCSSNBARK_INT 0
+#define TCSR_WDT_CFG	0x30
+#define WDT0_RST	0x04
+#define WDT0_EN		0x08
+#define WDT0_STS	0x0C
+#define WDT0_BARK_TIME	0x10
+#define WDT0_BITE_TIME	0x14
+
+#define WDOG_ABSENT	0
+
+#define EN		0
+#define UNMASKED_INT_EN 1
+
+#define MASK_SIZE		32
+#define SCM_SET_REGSAVE_CMD	0x2
+#define SCM_SVC_SEC_WDOG_DIS	0x7
+#define MAX_CPU_CTX_SIZE	2048
+
+static struct msm_watchdog_data *wdog_data;
+
+static int cpu_idle_pc_state[NR_CPUS];
+
+/*
+ * user_pet_enable:
+ *	Require userspace to write to a sysfs file every pet_time milliseconds.
+ *	Disabled by default on boot.
+ */
+struct msm_watchdog_data {
+	unsigned int __iomem phys_base;
+	size_t size;
+	void __iomem *base;
+	void __iomem *wdog_absent_base;
+	struct device *dev;
+	unsigned int pet_time;
+	unsigned int bark_time;
+	unsigned int bark_irq;
+	unsigned int bite_irq;
+	bool do_ipi_ping;
+	bool wakeup_irq_enable;
+	unsigned long long last_pet;
+	unsigned int min_slack_ticks;
+	unsigned long long min_slack_ns;
+	void *scm_regsave;
+	cpumask_t alive_mask;
+	struct mutex disable_lock;
+	bool irq_ppi;
+	struct msm_watchdog_data __percpu **wdog_cpu_dd;
+	struct notifier_block panic_blk;
+
+	bool enabled;
+	bool user_pet_enabled;
+
+	struct task_struct *watchdog_task;
+	struct timer_list pet_timer;
+	wait_queue_head_t pet_complete;
+
+	bool timer_expired;
+	bool user_pet_complete;
+};
+
+/*
+ * On the kernel command line specify
+ * watchdog_v2.enable=1 to enable the watchdog
+ * By default watchdog is turned on
+ */
+static int enable = 1;
+module_param(enable, int, 0);
+
+/*
+ * On the kernel command line specify
+ * watchdog_v2.WDT_HZ=<clock val in HZ> to set Watchdog
+ * ticks. By default it is set to 32765.
+ */
+static long WDT_HZ = 32765;
+module_param(WDT_HZ, long, 0);
+
+/*
+ * On the kernel command line specify
+ * watchdog_v2.ipi_opt_en=1 to enable the watchdog ipi ping
+ * optimization. By default it is turned off
+ */
+static int ipi_opt_en;
+module_param(ipi_opt_en, int, 0);
+
+static void dump_cpu_alive_mask(struct msm_watchdog_data *wdog_dd)
+{
+	static char alive_mask_buf[MASK_SIZE];
+
+	scnprintf(alive_mask_buf, MASK_SIZE, "%*pb1", cpumask_pr_args(
+				&wdog_dd->alive_mask));
+	dev_info(wdog_dd->dev, "cpu alive mask from last pet %s\n",
+				alive_mask_buf);
+}
+
+static int msm_watchdog_suspend(struct device *dev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)dev_get_drvdata(dev);
+	if (!enable)
+		return 0;
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	if (wdog_dd->wakeup_irq_enable) {
+		wdog_dd->last_pet = sched_clock();
+		return 0;
+	}
+	__raw_writel(0, wdog_dd->base + WDT0_EN);
+	/* Make sure watchdog is suspended before setting enable */
+	mb();
+	wdog_dd->enabled = false;
+	wdog_dd->last_pet = sched_clock();
+	return 0;
+}
+
+static int msm_watchdog_resume(struct device *dev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)dev_get_drvdata(dev);
+	if (!enable || wdog_dd->wakeup_irq_enable)
+		return 0;
+	__raw_writel(1, wdog_dd->base + WDT0_EN);
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	/* Make sure watchdog is reset before setting enable */
+	mb();
+	wdog_dd->enabled = true;
+	wdog_dd->last_pet = sched_clock();
+	return 0;
+}
+
+static int panic_wdog_handler(struct notifier_block *this,
+			      unsigned long event, void *ptr)
+{
+	struct msm_watchdog_data *wdog_dd = container_of(this,
+				struct msm_watchdog_data, panic_blk);
+	if (panic_timeout == 0) {
+		__raw_writel(0, wdog_dd->base + WDT0_EN);
+		/* Make sure watchdog is enabled before notifying the caller */
+		mb();
+	} else {
+		__raw_writel(WDT_HZ * (panic_timeout + 10),
+				wdog_dd->base + WDT0_BARK_TIME);
+		__raw_writel(WDT_HZ * (panic_timeout + 10),
+				wdog_dd->base + WDT0_BITE_TIME);
+		__raw_writel(1, wdog_dd->base + WDT0_RST);
+	}
+	return NOTIFY_DONE;
+}
+
+static void wdog_disable(struct msm_watchdog_data *wdog_dd)
+{
+	__raw_writel(0, wdog_dd->base + WDT0_EN);
+	/* Make sure watchdog is disabled before proceeding */
+	mb();
+	if (wdog_dd->irq_ppi) {
+		disable_percpu_irq(wdog_dd->bark_irq);
+		free_percpu_irq(wdog_dd->bark_irq, wdog_dd->wdog_cpu_dd);
+	} else
+		devm_free_irq(wdog_dd->dev, wdog_dd->bark_irq, wdog_dd);
+	enable = 0;
+	/*Ensure all cpus see update to enable*/
+	smp_mb();
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+						&wdog_dd->panic_blk);
+	del_timer_sync(&wdog_dd->pet_timer);
+	/* may be suspended after the first write above */
+	__raw_writel(0, wdog_dd->base + WDT0_EN);
+	/* Make sure watchdog is disabled before setting enable */
+	mb();
+	wdog_dd->enabled = false;
+	pr_info("MSM Apps Watchdog deactivated.\n");
+}
+
+static ssize_t wdog_disable_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	mutex_lock(&wdog_dd->disable_lock);
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", enable == 0 ? 1 : 0);
+	mutex_unlock(&wdog_dd->disable_lock);
+	return ret;
+}
+
+static ssize_t wdog_disable_set(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int ret;
+	u8 disable;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	ret = kstrtou8(buf, 10, &disable);
+	if (ret) {
+		dev_err(wdog_dd->dev, "invalid user input\n");
+		return ret;
+	}
+	if (disable == 1) {
+		mutex_lock(&wdog_dd->disable_lock);
+		if (enable == 0) {
+			pr_info("MSM Apps Watchdog already disabled\n");
+			mutex_unlock(&wdog_dd->disable_lock);
+			return count;
+		}
+		disable = 1;
+		if (!is_scm_armv8()) {
+			ret = scm_call(SCM_SVC_BOOT, SCM_SVC_SEC_WDOG_DIS,
+				       &disable, sizeof(disable), NULL, 0);
+		} else {
+			struct scm_desc desc = {0};
+
+			desc.args[0] = 1;
+			desc.arginfo = SCM_ARGS(1);
+			ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT,
+					SCM_SVC_SEC_WDOG_DIS), &desc);
+		}
+		if (ret) {
+			dev_err(wdog_dd->dev,
+					"Failed to deactivate secure wdog\n");
+			mutex_unlock(&wdog_dd->disable_lock);
+			return -EIO;
+		}
+		wdog_disable(wdog_dd);
+		mutex_unlock(&wdog_dd->disable_lock);
+	} else {
+		pr_err("invalid operation, only disable = 1 supported\n");
+		return -EINVAL;
+	}
+	return count;
+}
+
+static DEVICE_ATTR(disable, S_IWUSR | S_IRUSR, wdog_disable_get,
+							wdog_disable_set);
+
+/*
+ * Userspace Watchdog Support:
+ * Write 1 to the "user_pet_enabled" file to enable hw support for a
+ * userspace watchdog.
+ * Userspace is required to pet the watchdog by continuing to write 1
+ * to this file in the expected interval.
+ * Userspace may disable this requirement by writing 0 to this same
+ * file.
+ */
+static void __wdog_user_pet(struct msm_watchdog_data *wdog_dd)
+{
+	wdog_dd->user_pet_complete = true;
+	wake_up(&wdog_dd->pet_complete);
+}
+
+static ssize_t wdog_user_pet_enabled_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n",
+			wdog_dd->user_pet_enabled);
+	return ret;
+}
+
+static ssize_t wdog_user_pet_enabled_set(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t count)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	ret = strtobool(buf, &wdog_dd->user_pet_enabled);
+	if (ret) {
+		dev_err(wdog_dd->dev, "invalid user input\n");
+		return ret;
+	}
+
+	__wdog_user_pet(wdog_dd);
+
+	return count;
+}
+
+static DEVICE_ATTR(user_pet_enabled, S_IWUSR | S_IRUSR,
+		wdog_user_pet_enabled_get, wdog_user_pet_enabled_set);
+
+static ssize_t wdog_pet_time_get(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd = dev_get_drvdata(dev);
+
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", wdog_dd->pet_time);
+	return ret;
+}
+
+static DEVICE_ATTR(pet_time, S_IRUSR, wdog_pet_time_get, NULL);
+
+static void pet_watchdog(struct msm_watchdog_data *wdog_dd)
+{
+	int slack, i, count, prev_count = 0;
+	unsigned long long time_ns;
+	unsigned long long slack_ns;
+	unsigned long long bark_time_ns = wdog_dd->bark_time * 1000000ULL;
+
+	for (i = 0; i < 2; i++) {
+		count = (__raw_readl(wdog_dd->base + WDT0_STS) >> 1) & 0xFFFFF;
+		if (count != prev_count) {
+			prev_count = count;
+			i = 0;
+		}
+	}
+	slack = ((wdog_dd->bark_time * WDT_HZ) / 1000) - count;
+	if (slack < wdog_dd->min_slack_ticks)
+		wdog_dd->min_slack_ticks = slack;
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	time_ns = sched_clock();
+	slack_ns = (wdog_dd->last_pet + bark_time_ns) - time_ns;
+	if (slack_ns < wdog_dd->min_slack_ns)
+		wdog_dd->min_slack_ns = slack_ns;
+	wdog_dd->last_pet = time_ns;
+}
+
+static void keep_alive_response(void *info)
+{
+	int cpu = smp_processor_id();
+	struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)info;
+
+	cpumask_set_cpu(cpu, &wdog_dd->alive_mask);
+	/* Make sure alive mask is cleared and set in order */
+	smp_mb();
+}
+
+/*
+ * If this function does not return, it implies one of the
+ * other cpu's is not responsive.
+ */
+static void ping_other_cpus(struct msm_watchdog_data *wdog_dd)
+{
+	int cpu;
+
+	cpumask_clear(&wdog_dd->alive_mask);
+	/* Make sure alive mask is cleared and set in order */
+	smp_mb();
+	for_each_cpu(cpu, cpu_online_mask) {
+		if (!cpu_idle_pc_state[cpu])
+			smp_call_function_single(cpu, keep_alive_response,
+						 wdog_dd, 1);
+	}
+}
+
+static void pet_task_wakeup(unsigned long data)
+{
+	struct msm_watchdog_data *wdog_dd =
+		(struct msm_watchdog_data *)data;
+	wdog_dd->timer_expired = true;
+	wake_up(&wdog_dd->pet_complete);
+}
+
+static __ref int watchdog_kthread(void *arg)
+{
+	struct msm_watchdog_data *wdog_dd =
+		(struct msm_watchdog_data *)arg;
+	unsigned long delay_time = 0;
+	struct sched_param param = {.sched_priority = MAX_RT_PRIO-1};
+
+	sched_setscheduler(current, SCHED_FIFO, &param);
+	while (!kthread_should_stop()) {
+		while (wait_event_interruptible(
+			wdog_dd->pet_complete,
+			wdog_dd->timer_expired) != 0)
+			;
+
+		if (wdog_dd->do_ipi_ping)
+			ping_other_cpus(wdog_dd);
+
+		while (wait_event_interruptible(
+			wdog_dd->pet_complete,
+			wdog_dd->user_pet_complete) != 0)
+			;
+
+		wdog_dd->timer_expired = false;
+		wdog_dd->user_pet_complete = !wdog_dd->user_pet_enabled;
+
+		if (enable) {
+			delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+			pet_watchdog(wdog_dd);
+		}
+		/* Check again before scheduling
+		 * Could have been changed on other cpu
+		 */
+		mod_timer(&wdog_dd->pet_timer, jiffies + delay_time);
+	}
+	return 0;
+}
+
+static int wdog_cpu_pm_notify(struct notifier_block *self,
+			      unsigned long action, void *v)
+{
+	int cpu;
+
+	cpu = raw_smp_processor_id();
+
+	switch (action) {
+	case CPU_PM_ENTER:
+		cpu_idle_pc_state[cpu] = 1;
+		break;
+	case CPU_PM_ENTER_FAILED:
+	case CPU_PM_EXIT:
+		cpu_idle_pc_state[cpu] = 0;
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block wdog_cpu_pm_nb = {
+	.notifier_call = wdog_cpu_pm_notify,
+};
+
+static int msm_watchdog_remove(struct platform_device *pdev)
+{
+	struct msm_watchdog_data *wdog_dd =
+			(struct msm_watchdog_data *)platform_get_drvdata(pdev);
+
+	if (ipi_opt_en)
+		cpu_pm_unregister_notifier(&wdog_cpu_pm_nb);
+
+	mutex_lock(&wdog_dd->disable_lock);
+	if (enable)
+		wdog_disable(wdog_dd);
+
+	mutex_unlock(&wdog_dd->disable_lock);
+	device_remove_file(wdog_dd->dev, &dev_attr_disable);
+	if (wdog_dd->irq_ppi)
+		free_percpu(wdog_dd->wdog_cpu_dd);
+	dev_info(wdog_dd->dev, "MSM Watchdog Exit - Deactivated\n");
+	del_timer_sync(&wdog_dd->pet_timer);
+	kthread_stop(wdog_dd->watchdog_task);
+	kfree(wdog_dd);
+	return 0;
+}
+
+void msm_trigger_wdog_bite(void)
+{
+	if (!wdog_data)
+		return;
+	pr_info("Causing a watchdog bite!");
+	__raw_writel(1, wdog_data->base + WDT0_BITE_TIME);
+	/* Mke sure bite time is written before we reset */
+	mb();
+	__raw_writel(1, wdog_data->base + WDT0_RST);
+	/* Make sure we wait only after reset */
+	mb();
+	/* Delay to make sure bite occurs */
+	mdelay(10000);
+	pr_err("Wdog - STS: 0x%x, CTL: 0x%x, BARK TIME: 0x%x, BITE TIME: 0x%x",
+		__raw_readl(wdog_data->base + WDT0_STS),
+		__raw_readl(wdog_data->base + WDT0_EN),
+		__raw_readl(wdog_data->base + WDT0_BARK_TIME),
+		__raw_readl(wdog_data->base + WDT0_BITE_TIME));
+}
+
+static irqreturn_t wdog_bark_handler(int irq, void *dev_id)
+{
+	struct msm_watchdog_data *wdog_dd = (struct msm_watchdog_data *)dev_id;
+	unsigned long nanosec_rem;
+	unsigned long long t = sched_clock();
+
+	nanosec_rem = do_div(t, 1000000000);
+	dev_info(wdog_dd->dev, "Watchdog bark! Now = %lu.%06lu\n",
+			(unsigned long) t, nanosec_rem / 1000);
+
+	nanosec_rem = do_div(wdog_dd->last_pet, 1000000000);
+	dev_info(wdog_dd->dev, "Watchdog last pet at %lu.%06lu\n",
+			(unsigned long) wdog_dd->last_pet, nanosec_rem / 1000);
+	if (wdog_dd->do_ipi_ping)
+		dump_cpu_alive_mask(wdog_dd);
+	msm_trigger_wdog_bite();
+	panic("Failed to cause a watchdog bite! - Falling back to kernel panic!");
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t wdog_ppi_bark(int irq, void *dev_id)
+{
+	struct msm_watchdog_data *wdog_dd =
+			*(struct msm_watchdog_data **)(dev_id);
+	return wdog_bark_handler(irq, wdog_dd);
+}
+
+static void configure_bark_dump(struct msm_watchdog_data *wdog_dd)
+{
+	int ret;
+	struct msm_dump_entry dump_entry;
+	struct msm_dump_data *cpu_data;
+	int cpu;
+	void *cpu_buf;
+
+	cpu_data = kzalloc(sizeof(struct msm_dump_data) *
+			   num_present_cpus(), GFP_KERNEL);
+	if (!cpu_data)
+		goto out0;
+
+	cpu_buf = kzalloc(MAX_CPU_CTX_SIZE * num_present_cpus(),
+			  GFP_KERNEL);
+	if (!cpu_buf)
+		goto out1;
+
+	for_each_cpu(cpu, cpu_present_mask) {
+		cpu_data[cpu].addr = virt_to_phys(cpu_buf +
+						cpu * MAX_CPU_CTX_SIZE);
+		cpu_data[cpu].len = MAX_CPU_CTX_SIZE;
+		dump_entry.id = MSM_DUMP_DATA_CPU_CTX + cpu;
+		dump_entry.addr = virt_to_phys(&cpu_data[cpu]);
+		ret = msm_dump_data_register(MSM_DUMP_TABLE_APPS,
+					     &dump_entry);
+		/*
+		 * Don't free the buffers in case of error since
+		 * registration may have succeeded for some cpus.
+		 */
+		if (ret)
+			pr_err("cpu %d reg dump setup failed\n", cpu);
+	}
+
+	return;
+out1:
+	kfree(cpu_data);
+out0:
+	return;
+}
+
+static int init_watchdog_sysfs(struct msm_watchdog_data *wdog_dd)
+{
+	int error = 0;
+
+	error |= device_create_file(wdog_dd->dev, &dev_attr_disable);
+
+	if (of_property_read_bool(wdog_dd->dev->of_node,
+					"qcom,userspace-watchdog")) {
+		error |= device_create_file(wdog_dd->dev, &dev_attr_pet_time);
+		error |= device_create_file(wdog_dd->dev,
+					    &dev_attr_user_pet_enabled);
+	}
+
+	if (error)
+		dev_err(wdog_dd->dev, "cannot create sysfs attribute\n");
+
+	return error;
+}
+
+static void init_watchdog_data(struct msm_watchdog_data *wdog_dd)
+{
+	unsigned long delay_time;
+	uint32_t val;
+	u64 timeout;
+	int ret;
+
+	/*
+	 * Disable the watchdog for cluster 1 so that cluster 0 watchdog will
+	 * be mapped to the entire sub-system.
+	 */
+	if (wdog_dd->wdog_absent_base)
+		__raw_writel(2, wdog_dd->wdog_absent_base + WDOG_ABSENT);
+
+	if (wdog_dd->irq_ppi) {
+		wdog_dd->wdog_cpu_dd = alloc_percpu(struct msm_watchdog_data *);
+		if (!wdog_dd->wdog_cpu_dd) {
+			dev_err(wdog_dd->dev, "fail to allocate cpu data\n");
+			return;
+		}
+		*raw_cpu_ptr(wdog_dd->wdog_cpu_dd) = wdog_dd;
+		ret = request_percpu_irq(wdog_dd->bark_irq, wdog_ppi_bark,
+					"apps_wdog_bark",
+					wdog_dd->wdog_cpu_dd);
+		if (ret) {
+			dev_err(wdog_dd->dev, "failed to request bark irq\n");
+			free_percpu(wdog_dd->wdog_cpu_dd);
+			return;
+		}
+	} else {
+		ret = devm_request_irq(wdog_dd->dev, wdog_dd->bark_irq,
+				wdog_bark_handler, IRQF_TRIGGER_RISING,
+						"apps_wdog_bark", wdog_dd);
+		if (ret) {
+			dev_err(wdog_dd->dev, "failed to request bark irq\n");
+			return;
+		}
+	}
+	delay_time = msecs_to_jiffies(wdog_dd->pet_time);
+	wdog_dd->min_slack_ticks = UINT_MAX;
+	wdog_dd->min_slack_ns = ULLONG_MAX;
+	configure_bark_dump(wdog_dd);
+	timeout = (wdog_dd->bark_time * WDT_HZ)/1000;
+	__raw_writel(timeout, wdog_dd->base + WDT0_BARK_TIME);
+	__raw_writel(timeout + 3*WDT_HZ, wdog_dd->base + WDT0_BITE_TIME);
+
+	wdog_dd->panic_blk.notifier_call = panic_wdog_handler;
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &wdog_dd->panic_blk);
+	mutex_init(&wdog_dd->disable_lock);
+	init_waitqueue_head(&wdog_dd->pet_complete);
+	wdog_dd->timer_expired = false;
+	wdog_dd->user_pet_complete = true;
+	wdog_dd->user_pet_enabled = false;
+	wake_up_process(wdog_dd->watchdog_task);
+	init_timer(&wdog_dd->pet_timer);
+	wdog_dd->pet_timer.data = (unsigned long)wdog_dd;
+	wdog_dd->pet_timer.function = pet_task_wakeup;
+	wdog_dd->pet_timer.expires = jiffies + delay_time;
+	add_timer(&wdog_dd->pet_timer);
+
+	val = BIT(EN);
+	if (wdog_dd->wakeup_irq_enable)
+		val |= BIT(UNMASKED_INT_EN);
+	__raw_writel(val, wdog_dd->base + WDT0_EN);
+	__raw_writel(1, wdog_dd->base + WDT0_RST);
+	wdog_dd->last_pet = sched_clock();
+	wdog_dd->enabled = true;
+
+	init_watchdog_sysfs(wdog_dd);
+
+	if (wdog_dd->irq_ppi)
+		enable_percpu_irq(wdog_dd->bark_irq, 0);
+	if (ipi_opt_en)
+		cpu_pm_register_notifier(&wdog_cpu_pm_nb);
+	dev_info(wdog_dd->dev, "MSM Watchdog Initialized\n");
+}
+
+static const struct of_device_id msm_wdog_match_table[] = {
+	{ .compatible = "qcom,msm-watchdog" },
+	{}
+};
+
+static void dump_pdata(struct msm_watchdog_data *pdata)
+{
+	dev_dbg(pdata->dev, "wdog bark_time %d", pdata->bark_time);
+	dev_dbg(pdata->dev, "wdog pet_time %d", pdata->pet_time);
+	dev_dbg(pdata->dev, "wdog perform ipi ping %d", pdata->do_ipi_ping);
+	dev_dbg(pdata->dev, "wdog base address is 0x%lx\n", (unsigned long)
+								pdata->base);
+}
+
+static int msm_wdog_dt_to_pdata(struct platform_device *pdev,
+					struct msm_watchdog_data *pdata)
+{
+	struct device_node *node = pdev->dev.of_node;
+	struct resource *res;
+	int ret;
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wdt-base");
+	if (!res)
+		return -ENODEV;
+	pdata->size = resource_size(res);
+	pdata->phys_base = res->start;
+	if (unlikely(!(devm_request_mem_region(&pdev->dev, pdata->phys_base,
+					       pdata->size, "msm-watchdog")))) {
+
+		dev_err(&pdev->dev, "%s cannot reserve watchdog region\n",
+								__func__);
+		return -ENXIO;
+	}
+	pdata->base  = devm_ioremap(&pdev->dev, pdata->phys_base,
+							pdata->size);
+	if (!pdata->base) {
+		dev_err(&pdev->dev, "%s cannot map wdog register space\n",
+				__func__);
+		return -ENXIO;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+					   "wdt-absent-base");
+	if (res) {
+		pdata->wdog_absent_base  = devm_ioremap(&pdev->dev, res->start,
+							 resource_size(res));
+		if (!pdata->wdog_absent_base) {
+			dev_err(&pdev->dev,
+				"cannot map wdog absent register space\n");
+			return -ENXIO;
+		}
+	} else {
+		dev_info(&pdev->dev, "wdog absent resource not present\n");
+	}
+
+	pdata->bark_irq = platform_get_irq(pdev, 0);
+	pdata->bite_irq = platform_get_irq(pdev, 1);
+	ret = of_property_read_u32(node, "qcom,bark-time", &pdata->bark_time);
+	if (ret) {
+		dev_err(&pdev->dev, "reading bark time failed\n");
+		return -ENXIO;
+	}
+	ret = of_property_read_u32(node, "qcom,pet-time", &pdata->pet_time);
+	if (ret) {
+		dev_err(&pdev->dev, "reading pet time failed\n");
+		return -ENXIO;
+	}
+	pdata->do_ipi_ping = of_property_read_bool(node, "qcom,ipi-ping");
+	if (!pdata->bark_time) {
+		dev_err(&pdev->dev, "%s watchdog bark time not setup\n",
+								__func__);
+		return -ENXIO;
+	}
+	if (!pdata->pet_time) {
+		dev_err(&pdev->dev, "%s watchdog pet time not setup\n",
+								__func__);
+		return -ENXIO;
+	}
+	pdata->wakeup_irq_enable = of_property_read_bool(node,
+							 "qcom,wakeup-enable");
+
+	pdata->irq_ppi = irq_is_percpu(pdata->bark_irq);
+	dump_pdata(pdata);
+	return 0;
+}
+
+static int msm_watchdog_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct msm_watchdog_data *wdog_dd;
+
+	if (!pdev->dev.of_node || !enable)
+		return -ENODEV;
+	wdog_dd = kzalloc(sizeof(struct msm_watchdog_data), GFP_KERNEL);
+	if (!wdog_dd)
+		return -EIO;
+	ret = msm_wdog_dt_to_pdata(pdev, wdog_dd);
+	if (ret)
+		goto err;
+
+	wdog_data = wdog_dd;
+	wdog_dd->dev = &pdev->dev;
+	platform_set_drvdata(pdev, wdog_dd);
+	cpumask_clear(&wdog_dd->alive_mask);
+	wdog_dd->watchdog_task = kthread_create(watchdog_kthread, wdog_dd,
+			"msm_watchdog");
+	if (IS_ERR(wdog_dd->watchdog_task)) {
+		ret = PTR_ERR(wdog_dd->watchdog_task);
+		goto err;
+	}
+	init_watchdog_data(wdog_dd);
+	return 0;
+err:
+	kzfree(wdog_dd);
+	return ret;
+}
+
+static const struct dev_pm_ops msm_watchdog_dev_pm_ops = {
+	.suspend_noirq = msm_watchdog_suspend,
+	.resume_noirq = msm_watchdog_resume,
+};
+
+static struct platform_driver msm_watchdog_driver = {
+	.probe = msm_watchdog_probe,
+	.remove = msm_watchdog_remove,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.pm = &msm_watchdog_dev_pm_ops,
+		.of_match_table = msm_wdog_match_table,
+	},
+};
+
+static int init_watchdog(void)
+{
+	return platform_driver_register(&msm_watchdog_driver);
+}
+
+pure_initcall(init_watchdog);
+MODULE_DESCRIPTION("MSM Watchdog Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 6c00d6f..7bd27a4 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -24,8 +24,19 @@
 	  scripts (/init.rc), and it defines priority values with minimum free memory size
 	  for each priority.
 
+config ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+	bool "Android Low Memory Killer: detect oom_adj values"
+	depends on ANDROID_LOW_MEMORY_KILLER
+	default y
+	---help---
+	  Detect oom_adj values written to
+	  /sys/module/lowmemorykiller/parameters/adj and convert them
+	  to oom_score_adj values.
+
 source "drivers/staging/android/ion/Kconfig"
 
+source "drivers/staging/android/fiq_debugger/Kconfig"
+
 endif # if ANDROID
 
 endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 7ed1be7..21b0ff4 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -1,6 +1,7 @@
 ccflags-y += -I$(src)			# needed for trace events
 
 obj-y					+= ion/
+obj-$(CONFIG_FIQ_DEBUGGER)		+= fiq_debugger/
 
 obj-$(CONFIG_ASHMEM)			+= ashmem.o
 obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER)	+= lowmemorykiller.o
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index ca9a53c..3a52b29 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -409,22 +409,14 @@
 	}
 	get_file(asma->file);
 
-	/*
-	 * XXX - Reworked to use shmem_zero_setup() instead of
-	 * shmem_set_file while we're in staging. -jstultz
-	 */
-	if (vma->vm_flags & VM_SHARED) {
-		ret = shmem_zero_setup(vma);
-		if (ret) {
-			fput(asma->file);
-			goto out;
-		}
+	if (vma->vm_flags & VM_SHARED)
+		shmem_set_file(vma, asma->file);
+	else {
+		if (vma->vm_file)
+			fput(vma->vm_file);
+		vma->vm_file = asma->file;
 	}
 
-	if (vma->vm_file)
-		fput(vma->vm_file);
-	vma->vm_file = asma->file;
-
 out:
 	mutex_unlock(&ashmem_mutex);
 	return ret;
@@ -461,9 +453,9 @@
 		loff_t start = range->pgstart * PAGE_SIZE;
 		loff_t end = (range->pgend + 1) * PAGE_SIZE;
 
-		vfs_fallocate(range->asma->file,
-			      FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
-			      start, end - start);
+		range->asma->file->f_op->fallocate(range->asma->file,
+				FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+				start, end - start);
 		range->purged = ASHMEM_WAS_PURGED;
 		lru_del(range);
 
diff --git a/drivers/staging/android/fiq_debugger/Kconfig b/drivers/staging/android/fiq_debugger/Kconfig
new file mode 100644
index 0000000..60fc224
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/Kconfig
@@ -0,0 +1,58 @@
+config FIQ_DEBUGGER
+	bool "FIQ Mode Serial Debugger"
+	default n
+	depends on ARM || ARM64
+	help
+	  The FIQ serial debugger can accept commands even when the
+	  kernel is unresponsive due to being stuck with interrupts
+	  disabled.
+
+config FIQ_DEBUGGER_NO_SLEEP
+	bool "Keep serial debugger active"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Enables the serial debugger at boot. Passing
+	  fiq_debugger.no_sleep on the kernel commandline will
+	  override this config option.
+
+config FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+	bool "Don't disable wakeup IRQ when debugger is active"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Don't disable the wakeup irq when enabling the uart clock.  This will
+	  cause extra interrupts, but it makes the serial debugger usable with
+	  on some MSM radio builds that ignore the uart clock request in power
+	  collapse.
+
+config FIQ_DEBUGGER_CONSOLE
+	bool "Console on FIQ Serial Debugger port"
+	depends on FIQ_DEBUGGER
+	default n
+	help
+	  Enables a console so that printk messages are displayed on
+	  the debugger serial port as the occur.
+
+config FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+	bool "Put the FIQ debugger into console mode by default"
+	depends on FIQ_DEBUGGER_CONSOLE
+	default n
+	help
+	  If enabled, this puts the fiq debugger into console mode by default.
+	  Otherwise, the fiq debugger will start out in debug mode.
+
+config FIQ_DEBUGGER_UART_OVERLAY
+	bool "Install uart DT overlay"
+	depends on FIQ_DEBUGGER
+	select OF_OVERLAY
+	default n
+	help
+	  If enabled, fiq debugger is calling fiq_debugger_uart_overlay()
+	  that will apply overlay uart_overlay@0 to disable proper uart.
+
+config FIQ_WATCHDOG
+	bool
+	select FIQ_DEBUGGER
+	select PSTORE_RAM
+	default n
diff --git a/drivers/staging/android/fiq_debugger/Makefile b/drivers/staging/android/fiq_debugger/Makefile
new file mode 100644
index 0000000..a7ca487
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/Makefile
@@ -0,0 +1,4 @@
+obj-y			+= fiq_debugger.o
+obj-$(CONFIG_ARM)	+= fiq_debugger_arm.o
+obj-$(CONFIG_ARM64)	+= fiq_debugger_arm64.o
+obj-$(CONFIG_FIQ_WATCHDOG)	+= fiq_watchdog.o
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.c b/drivers/staging/android/fiq_debugger/fiq_debugger.c
new file mode 100644
index 0000000..b132cff
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.c
@@ -0,0 +1,1248 @@
+/*
+ * drivers/staging/android/fiq_debugger.c
+ *
+ * Serial Debugger Interface accessed through an FIQ interrupt.
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <stdarg.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/console.h>
+#include <linux/interrupt.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/kernel_stat.h>
+#include <linux/kmsg_dump.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/timer.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/wakelock.h>
+
+#ifdef CONFIG_FIQ_GLUE
+#include <asm/fiq_glue.h>
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_UART_OVERLAY
+#include <linux/of.h>
+#endif
+
+#include <linux/uaccess.h>
+
+#include "fiq_debugger.h"
+#include "fiq_debugger_priv.h"
+#include "fiq_debugger_ringbuf.h"
+
+#define DEBUG_MAX 64
+#define MAX_UNHANDLED_FIQ_COUNT 1000000
+
+#define MAX_FIQ_DEBUGGER_PORTS 4
+
+struct fiq_debugger_state {
+#ifdef CONFIG_FIQ_GLUE
+	struct fiq_glue_handler handler;
+#endif
+	struct fiq_debugger_output output;
+
+	int fiq;
+	int uart_irq;
+	int signal_irq;
+	int wakeup_irq;
+	bool wakeup_irq_no_set_wake;
+	struct clk *clk;
+	struct fiq_debugger_pdata *pdata;
+	struct platform_device *pdev;
+
+	char debug_cmd[DEBUG_MAX];
+	int debug_busy;
+	int debug_abort;
+
+	char debug_buf[DEBUG_MAX];
+	int debug_count;
+
+	bool no_sleep;
+	bool debug_enable;
+	bool ignore_next_wakeup_irq;
+	struct timer_list sleep_timer;
+	spinlock_t sleep_timer_lock;
+	bool uart_enabled;
+	struct wake_lock debugger_wake_lock;
+	bool console_enable;
+	int current_cpu;
+	atomic_t unhandled_fiq_count;
+	bool in_fiq;
+
+	struct work_struct work;
+	spinlock_t work_lock;
+	char work_cmd[DEBUG_MAX];
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+	spinlock_t console_lock;
+	struct console console;
+	struct tty_port tty_port;
+	struct fiq_debugger_ringbuf *tty_rbuf;
+	bool syslog_dumping;
+#endif
+
+	unsigned int last_irqs[NR_IRQS];
+	unsigned int last_local_timer_irqs[NR_CPUS];
+};
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+struct tty_driver *fiq_tty_driver;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_NO_SLEEP
+static bool initial_no_sleep = true;
+#else
+static bool initial_no_sleep;
+#endif
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE_DEFAULT_ENABLE
+static bool initial_debug_enable = true;
+static bool initial_console_enable = true;
+#else
+static bool initial_debug_enable;
+static bool initial_console_enable;
+#endif
+
+static bool fiq_kgdb_enable;
+static bool fiq_debugger_disable;
+
+module_param_named(no_sleep, initial_no_sleep, bool, 0644);
+module_param_named(debug_enable, initial_debug_enable, bool, 0644);
+module_param_named(console_enable, initial_console_enable, bool, 0644);
+module_param_named(kgdb_enable, fiq_kgdb_enable, bool, 0644);
+module_param_named(disable, fiq_debugger_disable, bool, 0644);
+
+#ifdef CONFIG_FIQ_DEBUGGER_WAKEUP_IRQ_ALWAYS_ON
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state) {}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state) {}
+#else
+static inline
+void fiq_debugger_enable_wakeup_irq(struct fiq_debugger_state *state)
+{
+	if (state->wakeup_irq < 0)
+		return;
+	enable_irq(state->wakeup_irq);
+	if (!state->wakeup_irq_no_set_wake)
+		enable_irq_wake(state->wakeup_irq);
+}
+static inline
+void fiq_debugger_disable_wakeup_irq(struct fiq_debugger_state *state)
+{
+	if (state->wakeup_irq < 0)
+		return;
+	disable_irq_nosync(state->wakeup_irq);
+	if (!state->wakeup_irq_no_set_wake)
+		disable_irq_wake(state->wakeup_irq);
+}
+#endif
+
+static inline bool fiq_debugger_have_fiq(struct fiq_debugger_state *state)
+{
+	return (state->fiq >= 0);
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_force_irq(struct fiq_debugger_state *state)
+{
+	unsigned int irq = state->signal_irq;
+
+	if (WARN_ON(!fiq_debugger_have_fiq(state)))
+		return;
+	if (state->pdata->force_irq) {
+		state->pdata->force_irq(state->pdev, irq);
+	} else {
+		struct irq_chip *chip = irq_get_chip(irq);
+		if (chip && chip->irq_retrigger)
+			chip->irq_retrigger(irq_get_irq_data(irq));
+	}
+}
+#endif
+
+static void fiq_debugger_uart_enable(struct fiq_debugger_state *state)
+{
+	if (state->clk)
+		clk_enable(state->clk);
+	if (state->pdata->uart_enable)
+		state->pdata->uart_enable(state->pdev);
+}
+
+static void fiq_debugger_uart_disable(struct fiq_debugger_state *state)
+{
+	if (state->pdata->uart_disable)
+		state->pdata->uart_disable(state->pdev);
+	if (state->clk)
+		clk_disable(state->clk);
+}
+
+static void fiq_debugger_uart_flush(struct fiq_debugger_state *state)
+{
+	if (state->pdata->uart_flush)
+		state->pdata->uart_flush(state->pdev);
+}
+
+static void fiq_debugger_putc(struct fiq_debugger_state *state, char c)
+{
+	state->pdata->uart_putc(state->pdev, c);
+}
+
+static void fiq_debugger_puts(struct fiq_debugger_state *state, char *s)
+{
+	unsigned c;
+	while ((c = *s++)) {
+		if (c == '\n')
+			fiq_debugger_putc(state, '\r');
+		fiq_debugger_putc(state, c);
+	}
+}
+
+static void fiq_debugger_prompt(struct fiq_debugger_state *state)
+{
+	fiq_debugger_puts(state, "debug> ");
+}
+
+static void fiq_debugger_dump_kernel_log(struct fiq_debugger_state *state)
+{
+	char buf[512];
+	size_t len;
+	struct kmsg_dumper dumper = { .active = true };
+
+
+	kmsg_dump_rewind_nolock(&dumper);
+	while (kmsg_dump_get_line_nolock(&dumper, true, buf,
+					 sizeof(buf) - 1, &len)) {
+		buf[len] = 0;
+		fiq_debugger_puts(state, buf);
+	}
+}
+
+static void fiq_debugger_printf(struct fiq_debugger_output *output,
+			       const char *fmt, ...)
+{
+	struct fiq_debugger_state *state;
+	char buf[256];
+	va_list ap;
+
+	state = container_of(output, struct fiq_debugger_state, output);
+	va_start(ap, fmt);
+	vsnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+
+	fiq_debugger_puts(state, buf);
+}
+
+/* Safe outside fiq context */
+static int fiq_debugger_printf_nfiq(void *cookie, const char *fmt, ...)
+{
+	struct fiq_debugger_state *state = cookie;
+	char buf[256];
+	va_list ap;
+	unsigned long irq_flags;
+
+	va_start(ap, fmt);
+	vsnprintf(buf, 128, fmt, ap);
+	va_end(ap);
+
+	local_irq_save(irq_flags);
+	fiq_debugger_puts(state, buf);
+	fiq_debugger_uart_flush(state);
+	local_irq_restore(irq_flags);
+	return state->debug_abort;
+}
+
+static void fiq_debugger_dump_irqs(struct fiq_debugger_state *state)
+{
+	int n;
+	struct irq_desc *desc;
+
+	fiq_debugger_printf(&state->output,
+			"irqnr       total  since-last   status  name\n");
+	for_each_irq_desc(n, desc) {
+		struct irqaction *act = desc->action;
+		if (!act && !kstat_irqs(n))
+			continue;
+		fiq_debugger_printf(&state->output, "%5d: %10u %11u %8x  %s\n", n,
+			kstat_irqs(n),
+			kstat_irqs(n) - state->last_irqs[n],
+			desc->status_use_accessors,
+			(act && act->name) ? act->name : "???");
+		state->last_irqs[n] = kstat_irqs(n);
+	}
+}
+
+static void fiq_debugger_do_ps(struct fiq_debugger_state *state)
+{
+	struct task_struct *g;
+	struct task_struct *p;
+	unsigned task_state;
+	static const char stat_nam[] = "RSDTtZX";
+
+	fiq_debugger_printf(&state->output, "pid   ppid  prio task            pc\n");
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		task_state = p->state ? __ffs(p->state) + 1 : 0;
+		fiq_debugger_printf(&state->output,
+			     "%5d %5d %4d ", p->pid, p->parent->pid, p->prio);
+		fiq_debugger_printf(&state->output, "%-13.13s %c", p->comm,
+			     task_state >= sizeof(stat_nam) ? '?' : stat_nam[task_state]);
+		if (task_state == TASK_RUNNING)
+			fiq_debugger_printf(&state->output, " running\n");
+		else
+			fiq_debugger_printf(&state->output, " %08lx\n",
+					thread_saved_pc(p));
+	} while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+}
+
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+	state->syslog_dumping = true;
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+	state->syslog_dumping = false;
+}
+#else
+extern int do_syslog(int type, char __user *bug, int count);
+static void fiq_debugger_begin_syslog_dump(struct fiq_debugger_state *state)
+{
+	do_syslog(5 /* clear */, NULL, 0);
+}
+
+static void fiq_debugger_end_syslog_dump(struct fiq_debugger_state *state)
+{
+	fiq_debugger_dump_kernel_log(state);
+}
+#endif
+
+static void fiq_debugger_do_sysrq(struct fiq_debugger_state *state, char rq)
+{
+	if ((rq == 'g' || rq == 'G') && !fiq_kgdb_enable) {
+		fiq_debugger_printf(&state->output, "sysrq-g blocked\n");
+		return;
+	}
+	fiq_debugger_begin_syslog_dump(state);
+	handle_sysrq(rq);
+	fiq_debugger_end_syslog_dump(state);
+}
+
+#ifdef CONFIG_KGDB
+static void fiq_debugger_do_kgdb(struct fiq_debugger_state *state)
+{
+	if (!fiq_kgdb_enable) {
+		fiq_debugger_printf(&state->output, "kgdb through fiq debugger not enabled\n");
+		return;
+	}
+
+	fiq_debugger_printf(&state->output, "enabling console and triggering kgdb\n");
+	state->console_enable = true;
+	handle_sysrq('g');
+}
+#endif
+
+static void fiq_debugger_schedule_work(struct fiq_debugger_state *state,
+		char *cmd)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&state->work_lock, flags);
+	if (state->work_cmd[0] != '\0') {
+		fiq_debugger_printf(&state->output, "work command processor busy\n");
+		spin_unlock_irqrestore(&state->work_lock, flags);
+		return;
+	}
+
+	strlcpy(state->work_cmd, cmd, sizeof(state->work_cmd));
+	spin_unlock_irqrestore(&state->work_lock, flags);
+
+	schedule_work(&state->work);
+}
+
+static void fiq_debugger_work(struct work_struct *work)
+{
+	struct fiq_debugger_state *state;
+	char work_cmd[DEBUG_MAX];
+	char *cmd;
+	unsigned long flags;
+
+	state = container_of(work, struct fiq_debugger_state, work);
+
+	spin_lock_irqsave(&state->work_lock, flags);
+
+	strlcpy(work_cmd, state->work_cmd, sizeof(work_cmd));
+	state->work_cmd[0] = '\0';
+
+	spin_unlock_irqrestore(&state->work_lock, flags);
+
+	cmd = work_cmd;
+	if (!strncmp(cmd, "reboot", 6)) {
+		cmd += 6;
+		while (*cmd == ' ')
+			cmd++;
+		if (cmd != '\0')
+			kernel_restart(cmd);
+		else
+			kernel_restart(NULL);
+	} else {
+		fiq_debugger_printf(&state->output, "unknown work command '%s'\n",
+				work_cmd);
+	}
+}
+
+/* This function CANNOT be called in FIQ context */
+static void fiq_debugger_irq_exec(struct fiq_debugger_state *state, char *cmd)
+{
+	if (!strcmp(cmd, "ps"))
+		fiq_debugger_do_ps(state);
+	if (!strcmp(cmd, "sysrq"))
+		fiq_debugger_do_sysrq(state, 'h');
+	if (!strncmp(cmd, "sysrq ", 6))
+		fiq_debugger_do_sysrq(state, cmd[6]);
+#ifdef CONFIG_KGDB
+	if (!strcmp(cmd, "kgdb"))
+		fiq_debugger_do_kgdb(state);
+#endif
+	if (!strncmp(cmd, "reboot", 6))
+		fiq_debugger_schedule_work(state, cmd);
+}
+
+static void fiq_debugger_help(struct fiq_debugger_state *state)
+{
+	fiq_debugger_printf(&state->output,
+				"FIQ Debugger commands:\n"
+				" pc            PC status\n"
+				" regs          Register dump\n"
+				" allregs       Extended Register dump\n"
+				" bt            Stack trace\n"
+				" reboot [<c>]  Reboot with command <c>\n"
+				" reset [<c>]   Hard reset with command <c>\n"
+				" irqs          Interupt status\n"
+				" kmsg          Kernel log\n"
+				" version       Kernel version\n");
+	fiq_debugger_printf(&state->output,
+				" sleep         Allow sleep while in FIQ\n"
+				" nosleep       Disable sleep while in FIQ\n"
+				" console       Switch terminal to console\n"
+				" cpu           Current CPU\n"
+				" cpu <number>  Switch to CPU<number>\n");
+	fiq_debugger_printf(&state->output,
+				" ps            Process list\n"
+				" sysrq         sysrq options\n"
+				" sysrq <param> Execute sysrq with <param>\n");
+#ifdef CONFIG_KGDB
+	fiq_debugger_printf(&state->output,
+				" kgdb          Enter kernel debugger\n");
+#endif
+}
+
+static void fiq_debugger_take_affinity(void *info)
+{
+	struct fiq_debugger_state *state = info;
+	struct cpumask cpumask;
+
+	cpumask_clear(&cpumask);
+	cpumask_set_cpu(get_cpu(), &cpumask);
+
+	irq_set_affinity(state->uart_irq, &cpumask);
+}
+
+static void fiq_debugger_switch_cpu(struct fiq_debugger_state *state, int cpu)
+{
+	if (!fiq_debugger_have_fiq(state))
+		smp_call_function_single(cpu, fiq_debugger_take_affinity, state,
+				false);
+	state->current_cpu = cpu;
+}
+
+static bool fiq_debugger_fiq_exec(struct fiq_debugger_state *state,
+			const char *cmd, const struct pt_regs *regs,
+			void *svc_sp)
+{
+	bool signal_helper = false;
+
+	if (!strcmp(cmd, "help") || !strcmp(cmd, "?")) {
+		fiq_debugger_help(state);
+	} else if (!strcmp(cmd, "pc")) {
+		fiq_debugger_dump_pc(&state->output, regs);
+	} else if (!strcmp(cmd, "regs")) {
+		fiq_debugger_dump_regs(&state->output, regs);
+	} else if (!strcmp(cmd, "allregs")) {
+		fiq_debugger_dump_allregs(&state->output, regs);
+	} else if (!strcmp(cmd, "bt")) {
+		fiq_debugger_dump_stacktrace(&state->output, regs, 100, svc_sp);
+	} else if (!strncmp(cmd, "reset", 5)) {
+		cmd += 5;
+		while (*cmd == ' ')
+			cmd++;
+		if (*cmd) {
+			char tmp_cmd[32];
+			strlcpy(tmp_cmd, cmd, sizeof(tmp_cmd));
+			machine_restart(tmp_cmd);
+		} else {
+			machine_restart(NULL);
+		}
+	} else if (!strcmp(cmd, "irqs")) {
+		fiq_debugger_dump_irqs(state);
+	} else if (!strcmp(cmd, "kmsg")) {
+		fiq_debugger_dump_kernel_log(state);
+	} else if (!strcmp(cmd, "version")) {
+		fiq_debugger_printf(&state->output, "%s\n", linux_banner);
+	} else if (!strcmp(cmd, "sleep")) {
+		state->no_sleep = false;
+		fiq_debugger_printf(&state->output, "enabling sleep\n");
+	} else if (!strcmp(cmd, "nosleep")) {
+		state->no_sleep = true;
+		fiq_debugger_printf(&state->output, "disabling sleep\n");
+	} else if (!strcmp(cmd, "console")) {
+		fiq_debugger_printf(&state->output, "console mode\n");
+		fiq_debugger_uart_flush(state);
+		state->console_enable = true;
+	} else if (!strcmp(cmd, "cpu")) {
+		fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+	} else if (!strncmp(cmd, "cpu ", 4)) {
+		unsigned long cpu = 0;
+		if (kstrtoul(cmd + 4, 10, &cpu) == 0)
+			fiq_debugger_switch_cpu(state, cpu);
+		else
+			fiq_debugger_printf(&state->output, "invalid cpu\n");
+		fiq_debugger_printf(&state->output, "cpu %d\n", state->current_cpu);
+	} else {
+		if (state->debug_busy) {
+			fiq_debugger_printf(&state->output,
+				"command processor busy. trying to abort.\n");
+			state->debug_abort = -1;
+		} else {
+			strcpy(state->debug_cmd, cmd);
+			state->debug_busy = 1;
+		}
+
+		return true;
+	}
+	if (!state->console_enable)
+		fiq_debugger_prompt(state);
+
+	return signal_helper;
+}
+
+static void fiq_debugger_sleep_timer_expired(unsigned long data)
+{
+	struct fiq_debugger_state *state = (struct fiq_debugger_state *)data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&state->sleep_timer_lock, flags);
+	if (state->uart_enabled && !state->no_sleep) {
+		if (state->debug_enable && !state->console_enable) {
+			state->debug_enable = false;
+			fiq_debugger_printf_nfiq(state,
+					"suspending fiq debugger\n");
+		}
+		state->ignore_next_wakeup_irq = true;
+		fiq_debugger_uart_disable(state);
+		state->uart_enabled = false;
+		fiq_debugger_enable_wakeup_irq(state);
+	}
+	wake_unlock(&state->debugger_wake_lock);
+	spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static void fiq_debugger_handle_wakeup(struct fiq_debugger_state *state)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&state->sleep_timer_lock, flags);
+	if (state->wakeup_irq >= 0 && state->ignore_next_wakeup_irq) {
+		state->ignore_next_wakeup_irq = false;
+	} else if (!state->uart_enabled) {
+		wake_lock(&state->debugger_wake_lock);
+		fiq_debugger_uart_enable(state);
+		state->uart_enabled = true;
+		fiq_debugger_disable_wakeup_irq(state);
+		mod_timer(&state->sleep_timer, jiffies + HZ / 2);
+	}
+	spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+}
+
+static irqreturn_t fiq_debugger_wakeup_irq_handler(int irq, void *dev)
+{
+	struct fiq_debugger_state *state = dev;
+
+	if (!state->no_sleep)
+		fiq_debugger_puts(state, "WAKEUP\n");
+	fiq_debugger_handle_wakeup(state);
+
+	return IRQ_HANDLED;
+}
+
+static
+void fiq_debugger_handle_console_irq_context(struct fiq_debugger_state *state)
+{
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+	if (state->tty_port.ops) {
+		int i;
+		int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+		for (i = 0; i < count; i++) {
+			int c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+			tty_insert_flip_char(&state->tty_port, c, TTY_NORMAL);
+			if (!fiq_debugger_ringbuf_consume(state->tty_rbuf, 1))
+				pr_warn("fiq tty failed to consume byte\n");
+		}
+		tty_flip_buffer_push(&state->tty_port);
+	}
+#endif
+}
+
+static void fiq_debugger_handle_irq_context(struct fiq_debugger_state *state)
+{
+	if (!state->no_sleep) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&state->sleep_timer_lock, flags);
+		wake_lock(&state->debugger_wake_lock);
+		mod_timer(&state->sleep_timer, jiffies + HZ * 5);
+		spin_unlock_irqrestore(&state->sleep_timer_lock, flags);
+	}
+	fiq_debugger_handle_console_irq_context(state);
+	if (state->debug_busy) {
+		fiq_debugger_irq_exec(state, state->debug_cmd);
+		if (!state->console_enable)
+			fiq_debugger_prompt(state);
+		state->debug_busy = 0;
+	}
+}
+
+static int fiq_debugger_getc(struct fiq_debugger_state *state)
+{
+	return state->pdata->uart_getc(state->pdev);
+}
+
+static bool fiq_debugger_handle_uart_interrupt(struct fiq_debugger_state *state,
+			int this_cpu, const struct pt_regs *regs, void *svc_sp)
+{
+	int c;
+	static int last_c;
+	int count = 0;
+	bool signal_helper = false;
+
+	if (this_cpu != state->current_cpu) {
+		if (state->in_fiq)
+			return false;
+
+		if (atomic_inc_return(&state->unhandled_fiq_count) !=
+					MAX_UNHANDLED_FIQ_COUNT)
+			return false;
+
+		fiq_debugger_printf(&state->output,
+			"fiq_debugger: cpu %d not responding, "
+			"reverting to cpu %d\n", state->current_cpu,
+			this_cpu);
+
+		atomic_set(&state->unhandled_fiq_count, 0);
+		fiq_debugger_switch_cpu(state, this_cpu);
+		return false;
+	}
+
+	state->in_fiq = true;
+
+	while ((c = fiq_debugger_getc(state)) != FIQ_DEBUGGER_NO_CHAR) {
+		count++;
+		if (!state->debug_enable) {
+			if ((c == 13) || (c == 10)) {
+				state->debug_enable = true;
+				state->debug_count = 0;
+				fiq_debugger_prompt(state);
+			}
+		} else if (c == FIQ_DEBUGGER_BREAK) {
+			state->console_enable = false;
+			fiq_debugger_puts(state, "fiq debugger mode\n");
+			state->debug_count = 0;
+			fiq_debugger_prompt(state);
+#ifdef CONFIG_FIQ_DEBUGGER_CONSOLE
+		} else if (state->console_enable && state->tty_rbuf) {
+			fiq_debugger_ringbuf_push(state->tty_rbuf, c);
+			signal_helper = true;
+#endif
+		} else if ((c >= ' ') && (c < 127)) {
+			if (state->debug_count < (DEBUG_MAX - 1)) {
+				state->debug_buf[state->debug_count++] = c;
+				fiq_debugger_putc(state, c);
+			}
+		} else if ((c == 8) || (c == 127)) {
+			if (state->debug_count > 0) {
+				state->debug_count--;
+				fiq_debugger_putc(state, 8);
+				fiq_debugger_putc(state, ' ');
+				fiq_debugger_putc(state, 8);
+			}
+		} else if ((c == 13) || (c == 10)) {
+			if (c == '\r' || (c == '\n' && last_c != '\r')) {
+				fiq_debugger_putc(state, '\r');
+				fiq_debugger_putc(state, '\n');
+			}
+			if (state->debug_count) {
+				state->debug_buf[state->debug_count] = 0;
+				state->debug_count = 0;
+				signal_helper |=
+					fiq_debugger_fiq_exec(state,
+							state->debug_buf,
+							regs, svc_sp);
+			} else {
+				fiq_debugger_prompt(state);
+			}
+		}
+		last_c = c;
+	}
+	if (!state->console_enable)
+		fiq_debugger_uart_flush(state);
+	if (state->pdata->fiq_ack)
+		state->pdata->fiq_ack(state->pdev, state->fiq);
+
+	/* poke sleep timer if necessary */
+	if (state->debug_enable && !state->no_sleep)
+		signal_helper = true;
+
+	atomic_set(&state->unhandled_fiq_count, 0);
+	state->in_fiq = false;
+
+	return signal_helper;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_fiq(struct fiq_glue_handler *h,
+		const struct pt_regs *regs, void *svc_sp)
+{
+	struct fiq_debugger_state *state =
+		container_of(h, struct fiq_debugger_state, handler);
+	unsigned int this_cpu = THREAD_INFO(svc_sp)->cpu;
+	bool need_irq;
+
+	need_irq = fiq_debugger_handle_uart_interrupt(state, this_cpu, regs,
+			svc_sp);
+	if (need_irq)
+		fiq_debugger_force_irq(state);
+}
+#endif
+
+/*
+ * When not using FIQs, we only use this single interrupt as an entry point.
+ * This just effectively takes over the UART interrupt and does all the work
+ * in this context.
+ */
+static irqreturn_t fiq_debugger_uart_irq(int irq, void *dev)
+{
+	struct fiq_debugger_state *state = dev;
+	bool not_done;
+
+	fiq_debugger_handle_wakeup(state);
+
+	/* handle the debugger irq in regular context */
+	not_done = fiq_debugger_handle_uart_interrupt(state, smp_processor_id(),
+					      get_irq_regs(),
+					      current_thread_info());
+	if (not_done)
+		fiq_debugger_handle_irq_context(state);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ * If FIQs are used, not everything can happen in fiq context.
+ * FIQ handler does what it can and then signals this interrupt to finish the
+ * job in irq context.
+ */
+static irqreturn_t fiq_debugger_signal_irq(int irq, void *dev)
+{
+	struct fiq_debugger_state *state = dev;
+
+	if (state->pdata->force_irq_ack)
+		state->pdata->force_irq_ack(state->pdev, state->signal_irq);
+
+	fiq_debugger_handle_irq_context(state);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_FIQ_GLUE
+static void fiq_debugger_resume(struct fiq_glue_handler *h)
+{
+	struct fiq_debugger_state *state =
+		container_of(h, struct fiq_debugger_state, handler);
+	if (state->pdata->uart_resume)
+		state->pdata->uart_resume(state->pdev);
+}
+#endif
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+struct tty_driver *fiq_debugger_console_device(struct console *co, int *index)
+{
+	*index = co->index;
+	return fiq_tty_driver;
+}
+
+static void fiq_debugger_console_write(struct console *co,
+				const char *s, unsigned int count)
+{
+	struct fiq_debugger_state *state;
+	unsigned long flags;
+
+	state = container_of(co, struct fiq_debugger_state, console);
+
+	if (!state->console_enable && !state->syslog_dumping)
+		return;
+
+	fiq_debugger_uart_enable(state);
+	spin_lock_irqsave(&state->console_lock, flags);
+	while (count--) {
+		if (*s == '\n')
+			fiq_debugger_putc(state, '\r');
+		fiq_debugger_putc(state, *s++);
+	}
+	fiq_debugger_uart_flush(state);
+	spin_unlock_irqrestore(&state->console_lock, flags);
+	fiq_debugger_uart_disable(state);
+}
+
+static struct console fiq_debugger_console = {
+	.name = "ttyFIQ",
+	.device = fiq_debugger_console_device,
+	.write = fiq_debugger_console_write,
+	.flags = CON_PRINTBUFFER | CON_ANYTIME | CON_ENABLED,
+};
+
+int fiq_tty_open(struct tty_struct *tty, struct file *filp)
+{
+	int line = tty->index;
+	struct fiq_debugger_state **states = tty->driver->driver_state;
+	struct fiq_debugger_state *state = states[line];
+
+	return tty_port_open(&state->tty_port, tty, filp);
+}
+
+void fiq_tty_close(struct tty_struct *tty, struct file *filp)
+{
+	tty_port_close(tty->port, tty, filp);
+}
+
+int  fiq_tty_write(struct tty_struct *tty, const unsigned char *buf, int count)
+{
+	int i;
+	int line = tty->index;
+	struct fiq_debugger_state **states = tty->driver->driver_state;
+	struct fiq_debugger_state *state = states[line];
+
+	if (!state->console_enable)
+		return count;
+
+	fiq_debugger_uart_enable(state);
+	spin_lock_irq(&state->console_lock);
+	for (i = 0; i < count; i++)
+		fiq_debugger_putc(state, *buf++);
+	spin_unlock_irq(&state->console_lock);
+	fiq_debugger_uart_disable(state);
+
+	return count;
+}
+
+int  fiq_tty_write_room(struct tty_struct *tty)
+{
+	return 16;
+}
+
+#ifdef CONFIG_CONSOLE_POLL
+static int fiq_tty_poll_init(struct tty_driver *driver, int line, char *options)
+{
+	return 0;
+}
+
+static int fiq_tty_poll_get_char(struct tty_driver *driver, int line)
+{
+	struct fiq_debugger_state **states = driver->driver_state;
+	struct fiq_debugger_state *state = states[line];
+	int c = NO_POLL_CHAR;
+
+	fiq_debugger_uart_enable(state);
+	if (fiq_debugger_have_fiq(state)) {
+		int count = fiq_debugger_ringbuf_level(state->tty_rbuf);
+		if (count > 0) {
+			c = fiq_debugger_ringbuf_peek(state->tty_rbuf, 0);
+			fiq_debugger_ringbuf_consume(state->tty_rbuf, 1);
+		}
+	} else {
+		c = fiq_debugger_getc(state);
+		if (c == FIQ_DEBUGGER_NO_CHAR)
+			c = NO_POLL_CHAR;
+	}
+	fiq_debugger_uart_disable(state);
+
+	return c;
+}
+
+static void fiq_tty_poll_put_char(struct tty_driver *driver, int line, char ch)
+{
+	struct fiq_debugger_state **states = driver->driver_state;
+	struct fiq_debugger_state *state = states[line];
+	fiq_debugger_uart_enable(state);
+	fiq_debugger_putc(state, ch);
+	fiq_debugger_uart_disable(state);
+}
+#endif
+
+static const struct tty_port_operations fiq_tty_port_ops;
+
+static const struct tty_operations fiq_tty_driver_ops = {
+	.write = fiq_tty_write,
+	.write_room = fiq_tty_write_room,
+	.open = fiq_tty_open,
+	.close = fiq_tty_close,
+#ifdef CONFIG_CONSOLE_POLL
+	.poll_init = fiq_tty_poll_init,
+	.poll_get_char = fiq_tty_poll_get_char,
+	.poll_put_char = fiq_tty_poll_put_char,
+#endif
+};
+
+static int fiq_debugger_tty_init(void)
+{
+	int ret;
+	struct fiq_debugger_state **states = NULL;
+
+	states = kzalloc(sizeof(*states) * MAX_FIQ_DEBUGGER_PORTS, GFP_KERNEL);
+	if (!states) {
+		pr_err("Failed to allocate fiq debugger state structres\n");
+		return -ENOMEM;
+	}
+
+	fiq_tty_driver = alloc_tty_driver(MAX_FIQ_DEBUGGER_PORTS);
+	if (!fiq_tty_driver) {
+		pr_err("Failed to allocate fiq debugger tty\n");
+		ret = -ENOMEM;
+		goto err_free_state;
+	}
+
+	fiq_tty_driver->owner		= THIS_MODULE;
+	fiq_tty_driver->driver_name	= "fiq-debugger";
+	fiq_tty_driver->name		= "ttyFIQ";
+	fiq_tty_driver->type		= TTY_DRIVER_TYPE_SERIAL;
+	fiq_tty_driver->subtype		= SERIAL_TYPE_NORMAL;
+	fiq_tty_driver->init_termios	= tty_std_termios;
+	fiq_tty_driver->flags		= TTY_DRIVER_REAL_RAW |
+					  TTY_DRIVER_DYNAMIC_DEV;
+	fiq_tty_driver->driver_state	= states;
+
+	fiq_tty_driver->init_termios.c_cflag =
+					B115200 | CS8 | CREAD | HUPCL | CLOCAL;
+	fiq_tty_driver->init_termios.c_ispeed = 115200;
+	fiq_tty_driver->init_termios.c_ospeed = 115200;
+
+	tty_set_operations(fiq_tty_driver, &fiq_tty_driver_ops);
+
+	ret = tty_register_driver(fiq_tty_driver);
+	if (ret) {
+		pr_err("Failed to register fiq tty: %d\n", ret);
+		goto err_free_tty;
+	}
+
+	pr_info("Registered FIQ tty driver\n");
+	return 0;
+
+err_free_tty:
+	put_tty_driver(fiq_tty_driver);
+	fiq_tty_driver = NULL;
+err_free_state:
+	kfree(states);
+	return ret;
+}
+
+static int fiq_debugger_tty_init_one(struct fiq_debugger_state *state)
+{
+	int ret;
+	struct device *tty_dev;
+	struct fiq_debugger_state **states = fiq_tty_driver->driver_state;
+
+	states[state->pdev->id] = state;
+
+	state->tty_rbuf = fiq_debugger_ringbuf_alloc(1024);
+	if (!state->tty_rbuf) {
+		pr_err("Failed to allocate fiq debugger ringbuf\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	tty_port_init(&state->tty_port);
+	state->tty_port.ops = &fiq_tty_port_ops;
+
+	tty_dev = tty_port_register_device(&state->tty_port, fiq_tty_driver,
+					   state->pdev->id, &state->pdev->dev);
+	if (IS_ERR(tty_dev)) {
+		pr_err("Failed to register fiq debugger tty device\n");
+		ret = PTR_ERR(tty_dev);
+		goto err;
+	}
+
+	device_set_wakeup_capable(tty_dev, 1);
+
+	pr_info("Registered fiq debugger ttyFIQ%d\n", state->pdev->id);
+
+	return 0;
+
+err:
+	fiq_debugger_ringbuf_free(state->tty_rbuf);
+	state->tty_rbuf = NULL;
+	return ret;
+}
+#endif
+
+static int fiq_debugger_dev_suspend(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+	if (state->pdata->uart_dev_suspend)
+		return state->pdata->uart_dev_suspend(pdev);
+	return 0;
+}
+
+static int fiq_debugger_dev_resume(struct device *dev)
+{
+	struct platform_device *pdev = to_platform_device(dev);
+	struct fiq_debugger_state *state = platform_get_drvdata(pdev);
+
+	if (state->pdata->uart_dev_resume)
+		return state->pdata->uart_dev_resume(pdev);
+	return 0;
+}
+
+static int fiq_debugger_probe(struct platform_device *pdev)
+{
+	int ret;
+	struct fiq_debugger_pdata *pdata = dev_get_platdata(&pdev->dev);
+	struct fiq_debugger_state *state;
+	int fiq;
+	int uart_irq;
+
+	if (pdev->id >= MAX_FIQ_DEBUGGER_PORTS)
+		return -EINVAL;
+
+	if (!pdata->uart_getc || !pdata->uart_putc)
+		return -EINVAL;
+	if ((pdata->uart_enable && !pdata->uart_disable) ||
+	    (!pdata->uart_enable && pdata->uart_disable))
+		return -EINVAL;
+
+	fiq = platform_get_irq_byname(pdev, "fiq");
+	uart_irq = platform_get_irq_byname(pdev, "uart_irq");
+
+	/* uart_irq mode and fiq mode are mutually exclusive, but one of them
+	 * is required */
+	if ((uart_irq < 0 && fiq < 0) || (uart_irq >= 0 && fiq >= 0))
+		return -EINVAL;
+	if (fiq >= 0 && !pdata->fiq_enable)
+		return -EINVAL;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL);
+	state->output.printf = fiq_debugger_printf;
+	setup_timer(&state->sleep_timer, fiq_debugger_sleep_timer_expired,
+		    (unsigned long)state);
+	state->pdata = pdata;
+	state->pdev = pdev;
+	state->no_sleep = initial_no_sleep;
+	state->debug_enable = initial_debug_enable;
+	state->console_enable = initial_console_enable;
+
+	state->fiq = fiq;
+	state->uart_irq = uart_irq;
+	state->signal_irq = platform_get_irq_byname(pdev, "signal");
+	state->wakeup_irq = platform_get_irq_byname(pdev, "wakeup");
+
+	INIT_WORK(&state->work, fiq_debugger_work);
+	spin_lock_init(&state->work_lock);
+
+	platform_set_drvdata(pdev, state);
+
+	spin_lock_init(&state->sleep_timer_lock);
+
+	if (state->wakeup_irq < 0 && fiq_debugger_have_fiq(state))
+		state->no_sleep = true;
+	state->ignore_next_wakeup_irq = !state->no_sleep;
+
+	wake_lock_init(&state->debugger_wake_lock,
+			WAKE_LOCK_SUSPEND, "serial-debug");
+
+	state->clk = clk_get(&pdev->dev, NULL);
+	if (IS_ERR(state->clk))
+		state->clk = NULL;
+
+	/* do not call pdata->uart_enable here since uart_init may still
+	 * need to do some initialization before uart_enable can work.
+	 * So, only try to manage the clock during init.
+	 */
+	if (state->clk)
+		clk_enable(state->clk);
+
+	if (pdata->uart_init) {
+		ret = pdata->uart_init(pdev);
+		if (ret)
+			goto err_uart_init;
+	}
+
+	fiq_debugger_printf_nfiq(state,
+				"<hit enter %sto activate fiq debugger>\n",
+				state->no_sleep ? "" : "twice ");
+
+#ifdef CONFIG_FIQ_GLUE
+	if (fiq_debugger_have_fiq(state)) {
+		state->handler.fiq = fiq_debugger_fiq;
+		state->handler.resume = fiq_debugger_resume;
+		ret = fiq_glue_register_handler(&state->handler);
+		if (ret) {
+			pr_err("%s: could not install fiq handler\n", __func__);
+			goto err_register_irq;
+		}
+
+		pdata->fiq_enable(pdev, state->fiq, 1);
+	} else
+#endif
+	{
+		ret = request_irq(state->uart_irq, fiq_debugger_uart_irq,
+				  IRQF_NO_SUSPEND, "debug", state);
+		if (ret) {
+			pr_err("%s: could not install irq handler\n", __func__);
+			goto err_register_irq;
+		}
+
+		/* for irq-only mode, we want this irq to wake us up, if it
+		 * can.
+		 */
+		enable_irq_wake(state->uart_irq);
+	}
+
+	if (state->clk)
+		clk_disable(state->clk);
+
+	if (state->signal_irq >= 0) {
+		ret = request_irq(state->signal_irq, fiq_debugger_signal_irq,
+			  IRQF_TRIGGER_RISING, "debug-signal", state);
+		if (ret)
+			pr_err("serial_debugger: could not install signal_irq");
+	}
+
+	if (state->wakeup_irq >= 0) {
+		ret = request_irq(state->wakeup_irq,
+				  fiq_debugger_wakeup_irq_handler,
+				  IRQF_TRIGGER_FALLING,
+				  "debug-wakeup", state);
+		if (ret) {
+			pr_err("serial_debugger: "
+				"could not install wakeup irq\n");
+			state->wakeup_irq = -1;
+		} else {
+			ret = enable_irq_wake(state->wakeup_irq);
+			if (ret) {
+				pr_err("serial_debugger: "
+					"could not enable wakeup\n");
+				state->wakeup_irq_no_set_wake = true;
+			}
+		}
+	}
+	if (state->no_sleep)
+		fiq_debugger_handle_wakeup(state);
+
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+	spin_lock_init(&state->console_lock);
+	state->console = fiq_debugger_console;
+	state->console.index = pdev->id;
+	if (!console_set_on_cmdline)
+		add_preferred_console(state->console.name,
+			state->console.index, NULL);
+	register_console(&state->console);
+	fiq_debugger_tty_init_one(state);
+#endif
+	return 0;
+
+err_register_irq:
+	if (pdata->uart_free)
+		pdata->uart_free(pdev);
+err_uart_init:
+	if (state->clk)
+		clk_disable(state->clk);
+	if (state->clk)
+		clk_put(state->clk);
+	wake_lock_destroy(&state->debugger_wake_lock);
+	platform_set_drvdata(pdev, NULL);
+	kfree(state);
+	return ret;
+}
+
+static const struct dev_pm_ops fiq_debugger_dev_pm_ops = {
+	.suspend	= fiq_debugger_dev_suspend,
+	.resume		= fiq_debugger_dev_resume,
+};
+
+static struct platform_driver fiq_debugger_driver = {
+	.probe	= fiq_debugger_probe,
+	.driver	= {
+		.name	= "fiq_debugger",
+		.pm	= &fiq_debugger_dev_pm_ops,
+	},
+};
+
+#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY)
+int fiq_debugger_uart_overlay(void)
+{
+	struct device_node *onp = of_find_node_by_path("/uart_overlay@0");
+	int ret;
+
+	if (!onp) {
+		pr_err("serial_debugger: uart overlay not found\n");
+		return -ENODEV;
+	}
+
+	ret = of_overlay_create(onp);
+	if (ret < 0) {
+		pr_err("serial_debugger: fail to create overlay: %d\n", ret);
+		of_node_put(onp);
+		return ret;
+	}
+
+	pr_info("serial_debugger: uart overlay applied\n");
+	return 0;
+}
+#endif
+
+static int __init fiq_debugger_init(void)
+{
+	if (fiq_debugger_disable) {
+		pr_err("serial_debugger: disabled\n");
+		return -ENODEV;
+	}
+#if defined(CONFIG_FIQ_DEBUGGER_CONSOLE)
+	fiq_debugger_tty_init();
+#endif
+#if defined(CONFIG_FIQ_DEBUGGER_UART_OVERLAY)
+	fiq_debugger_uart_overlay();
+#endif
+	return platform_driver_register(&fiq_debugger_driver);
+}
+
+postcore_initcall(fiq_debugger_init);
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger.h b/drivers/staging/android/fiq_debugger/fiq_debugger.h
new file mode 100644
index 0000000..c9ec4f8
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger.h
@@ -0,0 +1,64 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+#define _ARCH_ARM_MACH_TEGRA_FIQ_DEBUGGER_H_
+
+#include <linux/serial_core.h>
+
+#define FIQ_DEBUGGER_NO_CHAR NO_POLL_CHAR
+#define FIQ_DEBUGGER_BREAK 0x00ff0100
+
+#define FIQ_DEBUGGER_FIQ_IRQ_NAME	"fiq"
+#define FIQ_DEBUGGER_SIGNAL_IRQ_NAME	"signal"
+#define FIQ_DEBUGGER_WAKEUP_IRQ_NAME	"wakeup"
+
+/**
+ * struct fiq_debugger_pdata - fiq debugger platform data
+ * @uart_resume:	used to restore uart state right before enabling
+ *			the fiq.
+ * @uart_enable:	Do the work necessary to communicate with the uart
+ *			hw (enable clocks, etc.). This must be ref-counted.
+ * @uart_disable:	Do the work necessary to disable the uart hw
+ *			(disable clocks, etc.). This must be ref-counted.
+ * @uart_dev_suspend:	called during PM suspend, generally not needed
+ *			for real fiq mode debugger.
+ * @uart_dev_resume:	called during PM resume, generally not needed
+ *			for real fiq mode debugger.
+ */
+struct fiq_debugger_pdata {
+	int (*uart_init)(struct platform_device *pdev);
+	void (*uart_free)(struct platform_device *pdev);
+	int (*uart_resume)(struct platform_device *pdev);
+	int (*uart_getc)(struct platform_device *pdev);
+	void (*uart_putc)(struct platform_device *pdev, unsigned int c);
+	void (*uart_flush)(struct platform_device *pdev);
+	void (*uart_enable)(struct platform_device *pdev);
+	void (*uart_disable)(struct platform_device *pdev);
+
+	int (*uart_dev_suspend)(struct platform_device *pdev);
+	int (*uart_dev_resume)(struct platform_device *pdev);
+
+	void (*fiq_enable)(struct platform_device *pdev, unsigned int fiq,
+								bool enable);
+	void (*fiq_ack)(struct platform_device *pdev, unsigned int fiq);
+
+	void (*force_irq)(struct platform_device *pdev, unsigned int irq);
+	void (*force_irq_ack)(struct platform_device *pdev, unsigned int irq);
+};
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c
new file mode 100644
index 0000000..8b3e013
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(unsigned cpsr)
+{
+	switch (cpsr & MODE_MASK) {
+	case USR_MODE: return "USR";
+	case FIQ_MODE: return "FIQ";
+	case IRQ_MODE: return "IRQ";
+	case SVC_MODE: return "SVC";
+	case ABT_MODE: return "ABT";
+	case UND_MODE: return "UND";
+	case SYSTEM_MODE: return "SYS";
+	default: return "???";
+	}
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	output->printf(output, " pc %08x cpsr %08x mode %s\n",
+		regs->ARM_pc, regs->ARM_cpsr, mode_name(regs->ARM_cpsr));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	output->printf(output,
+			" r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+			regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+	output->printf(output,
+			" r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+			regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
+	output->printf(output,
+			" r8 %08x  r9 %08x r10 %08x r11 %08x  mode %s\n",
+			regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp,
+			mode_name(regs->ARM_cpsr));
+	output->printf(output,
+			" ip %08x  sp %08x  lr %08x  pc %08x cpsr %08x\n",
+			regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc,
+			regs->ARM_cpsr);
+}
+
+struct mode_regs {
+	unsigned long sp_svc;
+	unsigned long lr_svc;
+	unsigned long spsr_svc;
+
+	unsigned long sp_abt;
+	unsigned long lr_abt;
+	unsigned long spsr_abt;
+
+	unsigned long sp_und;
+	unsigned long lr_und;
+	unsigned long spsr_und;
+
+	unsigned long sp_irq;
+	unsigned long lr_irq;
+	unsigned long spsr_irq;
+
+	unsigned long r8_fiq;
+	unsigned long r9_fiq;
+	unsigned long r10_fiq;
+	unsigned long r11_fiq;
+	unsigned long r12_fiq;
+	unsigned long sp_fiq;
+	unsigned long lr_fiq;
+	unsigned long spsr_fiq;
+};
+
+static void __naked get_mode_regs(struct mode_regs *regs)
+{
+	asm volatile (
+	"mrs	r1, cpsr\n"
+	"msr	cpsr_c, #0xd3 @(SVC_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xd7 @(ABT_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xdb @(UND_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xd2 @(IRQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r13 - r14}\n"
+	"mrs	r2, spsr\n"
+	"msr	cpsr_c, #0xd1 @(FIQ_MODE | PSR_I_BIT | PSR_F_BIT)\n"
+	"stmia	r0!, {r2, r8 - r14}\n"
+	"mrs	r2, spsr\n"
+	"stmia	r0!, {r2}\n"
+	"msr	cpsr_c, r1\n"
+	"bx	lr\n");
+}
+
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	struct mode_regs mode_regs;
+	unsigned long mode = regs->ARM_cpsr & MODE_MASK;
+
+	fiq_debugger_dump_regs(output, regs);
+	get_mode_regs(&mode_regs);
+
+	output->printf(output,
+			"%csvc: sp %08x  lr %08x  spsr %08x\n",
+			mode == SVC_MODE ? '*' : ' ',
+			mode_regs.sp_svc, mode_regs.lr_svc, mode_regs.spsr_svc);
+	output->printf(output,
+			"%cabt: sp %08x  lr %08x  spsr %08x\n",
+			mode == ABT_MODE ? '*' : ' ',
+			mode_regs.sp_abt, mode_regs.lr_abt, mode_regs.spsr_abt);
+	output->printf(output,
+			"%cund: sp %08x  lr %08x  spsr %08x\n",
+			mode == UND_MODE ? '*' : ' ',
+			mode_regs.sp_und, mode_regs.lr_und, mode_regs.spsr_und);
+	output->printf(output,
+			"%cirq: sp %08x  lr %08x  spsr %08x\n",
+			mode == IRQ_MODE ? '*' : ' ',
+			mode_regs.sp_irq, mode_regs.lr_irq, mode_regs.spsr_irq);
+	output->printf(output,
+			"%cfiq: r8 %08x  r9 %08x  r10 %08x  r11 %08x  r12 %08x\n",
+			mode == FIQ_MODE ? '*' : ' ',
+			mode_regs.r8_fiq, mode_regs.r9_fiq, mode_regs.r10_fiq,
+			mode_regs.r11_fiq, mode_regs.r12_fiq);
+	output->printf(output,
+			" fiq: sp %08x  lr %08x  spsr %08x\n",
+			mode_regs.sp_fiq, mode_regs.lr_fiq, mode_regs.spsr_fiq);
+}
+
+struct stacktrace_state {
+	struct fiq_debugger_output *output;
+	unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+	struct stacktrace_state *sts = d;
+
+	if (sts->depth) {
+		sts->output->printf(sts->output,
+			"  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+			frame->pc, frame->pc, frame->lr, frame->lr,
+			frame->sp, frame->fp);
+		sts->depth--;
+		return 0;
+	}
+	sts->output->printf(sts->output, "  ...\n");
+
+	return sts->depth == 0;
+}
+
+struct frame_tail {
+	struct frame_tail *fp;
+	unsigned long sp;
+	unsigned long lr;
+} __attribute__((packed));
+
+static struct frame_tail *user_backtrace(struct fiq_debugger_output *output,
+					struct frame_tail *tail)
+{
+	struct frame_tail buftail[2];
+
+	/* Also check accessibility of one struct frame_tail beyond */
+	if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) {
+		output->printf(output, "  invalid frame pointer %p\n",
+				tail);
+		return NULL;
+	}
+	if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) {
+		output->printf(output,
+			"  failed to copy frame pointer %p\n", tail);
+		return NULL;
+	}
+
+	output->printf(output, "  %p\n", buftail[0].lr);
+
+	/* frame pointers should strictly progress back up the stack
+	 * (towards higher addresses) */
+	if (tail >= buftail[0].fp)
+		return NULL;
+
+	return buftail[0].fp-1;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+		const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+	struct frame_tail *tail;
+	struct thread_info *real_thread_info = THREAD_INFO(ssp);
+	struct stacktrace_state sts;
+
+	sts.depth = depth;
+	sts.output = output;
+	*current_thread_info() = *real_thread_info;
+
+	if (!current)
+		output->printf(output, "current NULL\n");
+	else
+		output->printf(output, "pid: %d  comm: %s\n",
+			current->pid, current->comm);
+	fiq_debugger_dump_regs(output, regs);
+
+	if (!user_mode(regs)) {
+		struct stackframe frame;
+		frame.fp = regs->ARM_fp;
+		frame.sp = regs->ARM_sp;
+		frame.lr = regs->ARM_lr;
+		frame.pc = regs->ARM_pc;
+		output->printf(output,
+			"  pc: %p (%pF), lr %p (%pF), sp %p, fp %p\n",
+			regs->ARM_pc, regs->ARM_pc, regs->ARM_lr, regs->ARM_lr,
+			regs->ARM_sp, regs->ARM_fp);
+		walk_stackframe(&frame, report_trace, &sts);
+		return;
+	}
+
+	tail = ((struct frame_tail *) regs->ARM_fp) - 1;
+	while (depth-- && tail && !((unsigned long) tail & 3))
+		tail = user_backtrace(output, tail);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
new file mode 100644
index 0000000..97246bc
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_arm64.c
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ptrace.h>
+#include <asm/stacktrace.h>
+
+#include "fiq_debugger_priv.h"
+
+static char *mode_name(const struct pt_regs *regs)
+{
+	if (compat_user_mode(regs)) {
+		return "USR";
+	} else {
+		switch (processor_mode(regs)) {
+		case PSR_MODE_EL0t: return "EL0t";
+		case PSR_MODE_EL1t: return "EL1t";
+		case PSR_MODE_EL1h: return "EL1h";
+		case PSR_MODE_EL2t: return "EL2t";
+		case PSR_MODE_EL2h: return "EL2h";
+		default: return "???";
+		}
+	}
+}
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	output->printf(output, " pc %016lx cpsr %08lx mode %s\n",
+		regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch32(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	output->printf(output, " r0 %08x  r1 %08x  r2 %08x  r3 %08x\n",
+			regs->compat_usr(0), regs->compat_usr(1),
+			regs->compat_usr(2), regs->compat_usr(3));
+	output->printf(output, " r4 %08x  r5 %08x  r6 %08x  r7 %08x\n",
+			regs->compat_usr(4), regs->compat_usr(5),
+			regs->compat_usr(6), regs->compat_usr(7));
+	output->printf(output, " r8 %08x  r9 %08x r10 %08x r11 %08x\n",
+			regs->compat_usr(8), regs->compat_usr(9),
+			regs->compat_usr(10), regs->compat_usr(11));
+	output->printf(output, " ip %08x  sp %08x  lr %08x  pc %08x\n",
+			regs->compat_usr(12), regs->compat_sp,
+			regs->compat_lr, regs->pc);
+	output->printf(output, " cpsr %08x (%s)\n",
+			regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs_aarch64(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+
+	output->printf(output, "  x0 %016lx   x1 %016lx\n",
+			regs->regs[0], regs->regs[1]);
+	output->printf(output, "  x2 %016lx   x3 %016lx\n",
+			regs->regs[2], regs->regs[3]);
+	output->printf(output, "  x4 %016lx   x5 %016lx\n",
+			regs->regs[4], regs->regs[5]);
+	output->printf(output, "  x6 %016lx   x7 %016lx\n",
+			regs->regs[6], regs->regs[7]);
+	output->printf(output, "  x8 %016lx   x9 %016lx\n",
+			regs->regs[8], regs->regs[9]);
+	output->printf(output, " x10 %016lx  x11 %016lx\n",
+			regs->regs[10], regs->regs[11]);
+	output->printf(output, " x12 %016lx  x13 %016lx\n",
+			regs->regs[12], regs->regs[13]);
+	output->printf(output, " x14 %016lx  x15 %016lx\n",
+			regs->regs[14], regs->regs[15]);
+	output->printf(output, " x16 %016lx  x17 %016lx\n",
+			regs->regs[16], regs->regs[17]);
+	output->printf(output, " x18 %016lx  x19 %016lx\n",
+			regs->regs[18], regs->regs[19]);
+	output->printf(output, " x20 %016lx  x21 %016lx\n",
+			regs->regs[20], regs->regs[21]);
+	output->printf(output, " x22 %016lx  x23 %016lx\n",
+			regs->regs[22], regs->regs[23]);
+	output->printf(output, " x24 %016lx  x25 %016lx\n",
+			regs->regs[24], regs->regs[25]);
+	output->printf(output, " x26 %016lx  x27 %016lx\n",
+			regs->regs[26], regs->regs[27]);
+	output->printf(output, " x28 %016lx  x29 %016lx\n",
+			regs->regs[28], regs->regs[29]);
+	output->printf(output, " x30 %016lx   sp %016lx\n",
+			regs->regs[30], regs->sp);
+	output->printf(output, "  pc %016lx cpsr %08x (%s)\n",
+			regs->pc, regs->pstate, mode_name(regs));
+}
+
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	if (compat_user_mode(regs))
+		fiq_debugger_dump_regs_aarch32(output, regs);
+	else
+		fiq_debugger_dump_regs_aarch64(output, regs);
+}
+
+#define READ_SPECIAL_REG(x) ({ \
+	u64 val; \
+	asm volatile ("mrs %0, " # x : "=r"(val)); \
+	val; \
+})
+
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs)
+{
+	u32 pstate = READ_SPECIAL_REG(CurrentEl);
+	bool in_el2 = (pstate & PSR_MODE_MASK) >= PSR_MODE_EL2t;
+
+	fiq_debugger_dump_regs(output, regs);
+
+	output->printf(output, " sp_el0   %016lx\n",
+			READ_SPECIAL_REG(sp_el0));
+
+	if (in_el2)
+		output->printf(output, " sp_el1   %016lx\n",
+				READ_SPECIAL_REG(sp_el1));
+
+	output->printf(output, " elr_el1  %016lx\n",
+			READ_SPECIAL_REG(elr_el1));
+
+	output->printf(output, " spsr_el1 %08lx\n",
+			READ_SPECIAL_REG(spsr_el1));
+
+	if (in_el2) {
+		output->printf(output, " spsr_irq %08lx\n",
+				READ_SPECIAL_REG(spsr_irq));
+		output->printf(output, " spsr_abt %08lx\n",
+				READ_SPECIAL_REG(spsr_abt));
+		output->printf(output, " spsr_und %08lx\n",
+				READ_SPECIAL_REG(spsr_und));
+		output->printf(output, " spsr_fiq %08lx\n",
+				READ_SPECIAL_REG(spsr_fiq));
+		output->printf(output, " spsr_el2 %08lx\n",
+				READ_SPECIAL_REG(elr_el2));
+		output->printf(output, " spsr_el2 %08lx\n",
+				READ_SPECIAL_REG(spsr_el2));
+	}
+}
+
+struct stacktrace_state {
+	struct fiq_debugger_output *output;
+	unsigned int depth;
+};
+
+static int report_trace(struct stackframe *frame, void *d)
+{
+	struct stacktrace_state *sts = d;
+
+	if (sts->depth) {
+		sts->output->printf(sts->output, "%pF:\n", frame->pc);
+		sts->output->printf(sts->output,
+				"  pc %016lx   sp %016lx   fp %016lx\n",
+				frame->pc, frame->sp, frame->fp);
+		sts->depth--;
+		return 0;
+	}
+	sts->output->printf(sts->output, "  ...\n");
+
+	return sts->depth == 0;
+}
+
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+		const struct pt_regs *regs, unsigned int depth, void *ssp)
+{
+	struct thread_info *real_thread_info = THREAD_INFO(ssp);
+	struct stacktrace_state sts;
+
+	sts.depth = depth;
+	sts.output = output;
+	*current_thread_info() = *real_thread_info;
+
+	if (!current)
+		output->printf(output, "current NULL\n");
+	else
+		output->printf(output, "pid: %d  comm: %s\n",
+			current->pid, current->comm);
+	fiq_debugger_dump_regs(output, regs);
+
+	if (!user_mode(regs)) {
+		struct stackframe frame;
+		frame.fp = regs->regs[29];
+		frame.sp = regs->sp;
+		frame.pc = regs->pc;
+		output->printf(output, "\n");
+		walk_stackframe(current, &frame, report_trace, &sts);
+	}
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h
new file mode 100644
index 0000000..d5d051f
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_priv.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Colin Cross <ccross@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_DEBUGGER_PRIV_H_
+#define _FIQ_DEBUGGER_PRIV_H_
+
+#define THREAD_INFO(sp) ((struct thread_info *) \
+		((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
+
+struct fiq_debugger_output {
+	void (*printf)(struct fiq_debugger_output *output, const char *fmt, ...);
+};
+
+struct pt_regs;
+
+void fiq_debugger_dump_pc(struct fiq_debugger_output *output,
+		const struct pt_regs *regs);
+void fiq_debugger_dump_regs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs);
+void fiq_debugger_dump_allregs(struct fiq_debugger_output *output,
+		const struct pt_regs *regs);
+void fiq_debugger_dump_stacktrace(struct fiq_debugger_output *output,
+		const struct pt_regs *regs, unsigned int depth, void *ssp);
+
+#endif
diff --git a/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
new file mode 100644
index 0000000..10c3c5d
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
@@ -0,0 +1,94 @@
+/*
+ * drivers/staging/android/fiq_debugger/fiq_debugger_ringbuf.h
+ *
+ * simple lockless ringbuffer
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+struct fiq_debugger_ringbuf {
+	int len;
+	int head;
+	int tail;
+	u8 buf[];
+};
+
+
+static inline struct fiq_debugger_ringbuf *fiq_debugger_ringbuf_alloc(int len)
+{
+	struct fiq_debugger_ringbuf *rbuf;
+
+	rbuf = kzalloc(sizeof(*rbuf) + len, GFP_KERNEL);
+	if (rbuf == NULL)
+		return NULL;
+
+	rbuf->len = len;
+	rbuf->head = 0;
+	rbuf->tail = 0;
+	smp_mb();
+
+	return rbuf;
+}
+
+static inline void fiq_debugger_ringbuf_free(struct fiq_debugger_ringbuf *rbuf)
+{
+	kfree(rbuf);
+}
+
+static inline int fiq_debugger_ringbuf_level(struct fiq_debugger_ringbuf *rbuf)
+{
+	int level = rbuf->head - rbuf->tail;
+
+	if (level < 0)
+		level = rbuf->len + level;
+
+	return level;
+}
+
+static inline int fiq_debugger_ringbuf_room(struct fiq_debugger_ringbuf *rbuf)
+{
+	return rbuf->len - fiq_debugger_ringbuf_level(rbuf) - 1;
+}
+
+static inline u8
+fiq_debugger_ringbuf_peek(struct fiq_debugger_ringbuf *rbuf, int i)
+{
+	return rbuf->buf[(rbuf->tail + i) % rbuf->len];
+}
+
+static inline int
+fiq_debugger_ringbuf_consume(struct fiq_debugger_ringbuf *rbuf, int count)
+{
+	count = min(count, fiq_debugger_ringbuf_level(rbuf));
+
+	rbuf->tail = (rbuf->tail + count) % rbuf->len;
+	smp_mb();
+
+	return count;
+}
+
+static inline int
+fiq_debugger_ringbuf_push(struct fiq_debugger_ringbuf *rbuf, u8 datum)
+{
+	if (fiq_debugger_ringbuf_room(rbuf) == 0)
+		return 0;
+
+	rbuf->buf[rbuf->head] = datum;
+	smp_mb();
+	rbuf->head = (rbuf->head + 1) % rbuf->len;
+	smp_mb();
+
+	return 1;
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.c b/drivers/staging/android/fiq_debugger/fiq_watchdog.c
new file mode 100644
index 0000000..194b541
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.c
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/pstore_ram.h>
+
+#include "fiq_watchdog.h"
+#include "fiq_debugger_priv.h"
+
+static DEFINE_RAW_SPINLOCK(fiq_watchdog_lock);
+
+static void fiq_watchdog_printf(struct fiq_debugger_output *output,
+				const char *fmt, ...)
+{
+	char buf[256];
+	va_list ap;
+	int len;
+
+	va_start(ap, fmt);
+	len = vscnprintf(buf, sizeof(buf), fmt, ap);
+	va_end(ap);
+
+	ramoops_console_write_buf(buf, len);
+}
+
+struct fiq_debugger_output fiq_watchdog_output = {
+	.printf = fiq_watchdog_printf,
+};
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp)
+{
+	char msg[24];
+	int len;
+
+	raw_spin_lock(&fiq_watchdog_lock);
+
+	len = scnprintf(msg, sizeof(msg), "watchdog fiq cpu %d\n",
+			THREAD_INFO(svc_sp)->cpu);
+	ramoops_console_write_buf(msg, len);
+
+	fiq_debugger_dump_stacktrace(&fiq_watchdog_output, regs, 100, svc_sp);
+
+	raw_spin_unlock(&fiq_watchdog_lock);
+}
diff --git a/drivers/staging/android/fiq_debugger/fiq_watchdog.h b/drivers/staging/android/fiq_debugger/fiq_watchdog.h
new file mode 100644
index 0000000..c6b507f
--- /dev/null
+++ b/drivers/staging/android/fiq_debugger/fiq_watchdog.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _FIQ_WATCHDOG_H_
+#define _FIQ_WATCHDOG_H_
+
+void fiq_watchdog_triggered(const struct pt_regs *regs, void *svc_sp);
+
+#endif
diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig
index c8fb413..3ae8285 100644
--- a/drivers/staging/android/ion/Kconfig
+++ b/drivers/staging/android/ion/Kconfig
@@ -1,6 +1,6 @@
 menuconfig ION
 	bool "Ion Memory Manager"
-	depends on HAVE_MEMBLOCK && HAS_DMA && MMU
+	depends on HAVE_MEMBLOCK && HAS_DMA && MMU && ION_MSM
 	select GENERIC_ALLOCATOR
 	select DMA_SHARED_BUFFER
 	---help---
@@ -36,19 +36,32 @@
 config ION_HISI
 	tristate "Ion for Hisilicon"
 	depends on ARCH_HISI && ION
-	select ION_OF
 	help
 	  Choose this option if you wish to use ion on Hisilicon Platform.
 
 source "drivers/staging/android/ion/hisilicon/Kconfig"
 
-config ION_OF
-	bool "Devicetree support for Ion"
-	depends on ION && OF_ADDRESS
+config ION_POOL_CACHE_POLICY
+	bool "Ion set page pool cache policy"
+	depends on ION && X86
+	default y if X86
 	help
-	  Provides base support for defining Ion heaps in devicetree
-	  and setting them up. Also includes functions for platforms
-	  to parse the devicetree and expand for their own custom
-	  extensions
+	  Choose this option if need to explicity set cache policy of the
+	  pages in the page pool.
 
-	  If using Ion and devicetree, you should say Y here
+config ION_MSM
+	tristate "Ion for MSM"
+	depends on ARCH_QCOM && CMA
+	select MSM_SECURE_BUFFER
+	help
+	  Choose this option if you wish to use ion on an MSM target.
+	  Features include allocating heaps from device tree, buffer
+	  cache maintenance, and a custom ioctl/compat_ioctl. Enable
+	  utility functions used by ion_system_heap.
+
+config ALLOC_BUFFERS_IN_4K_CHUNKS
+	bool "Turns off allocation optimization and allocate only 4K pages"
+	depends on ARCH_QCOM && ION
+	help
+          Choose this option if you want ION to allocate buffers in
+          only 4KB chunks.
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index 5d630a0..309b9cc 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,13 +1,12 @@
-obj-$(CONFIG_ION) +=	ion.o ion-ioctl.o ion_heap.o \
-			ion_page_pool.o ion_system_heap.o \
-			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o
+obj-$(CONFIG_ION) +=	ion.o ion_heap.o ion_page_pool.o ion_system_heap.o \
+			ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o \
+			ion_system_secure_heap.o
 obj-$(CONFIG_ION_TEST) += ion_test.o
 ifdef CONFIG_COMPAT
 obj-$(CONFIG_ION) += compat_ion.o
 endif
-
 obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o
 obj-$(CONFIG_ION_TEGRA) += tegra/
 obj-$(CONFIG_ION_HISI) += hisilicon/
-obj-$(CONFIG_ION_OF) += ion_of.o
 
+obj-$(CONFIG_ION_MSM) += msm/
diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h
index 9da8f91..b3b3430 100644
--- a/drivers/staging/android/ion/compat_ion.h
+++ b/drivers/staging/android/ion/compat_ion.h
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/compat_ion.h
  *
  * Copyright (C) 2013 Google, Inc.
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -21,6 +22,8 @@
 
 long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
 
+#define compat_ion_user_handle_t compat_int_t
+
 #else
 
 #define compat_ion_ioctl  NULL
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 209a8f7..3cc7835 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -3,6 +3,7 @@
  * drivers/staging/android/ion/ion.c
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -15,7 +16,6 @@
  *
  */
 
-#include <linux/device.h>
 #include <linux/err.h>
 #include <linux/file.h>
 #include <linux/freezer.h>
@@ -23,6 +23,7 @@
 #include <linux/anon_inodes.h>
 #include <linux/kthread.h>
 #include <linux/list.h>
+#include <linux/list_sort.h>
 #include <linux/memblock.h>
 #include <linux/miscdevice.h>
 #include <linux/export.h>
@@ -36,11 +37,91 @@
 #include <linux/debugfs.h>
 #include <linux/dma-buf.h>
 #include <linux/idr.h>
+#include <linux/msm_ion.h>
+#include <linux/msm_dma_iommu_mapping.h>
+#include <trace/events/kmem.h>
+
 
 #include "ion.h"
 #include "ion_priv.h"
 #include "compat_ion.h"
 
+/**
+ * struct ion_device - the metadata of the ion device node
+ * @dev:		the actual misc device
+ * @buffers:		an rb tree of all the existing buffers
+ * @buffer_lock:	lock protecting the tree of buffers
+ * @lock:		rwsem protecting the tree of heaps and clients
+ * @heaps:		list of all the heaps in the system
+ * @user_clients:	list of all the clients created from userspace
+ */
+struct ion_device {
+	struct miscdevice dev;
+	struct rb_root buffers;
+	/* Protects rb_tree */
+	struct mutex buffer_lock;
+	struct rw_semaphore lock;
+	struct plist_head heaps;
+	long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
+			     unsigned long arg);
+	struct rb_root clients;
+	struct dentry *debug_root;
+	struct dentry *heaps_debug_root;
+	struct dentry *clients_debug_root;
+};
+
+/**
+ * struct ion_client - a process/hw block local address space
+ * @node:		node in the tree of all clients
+ * @dev:		backpointer to ion device
+ * @handles:		an rb tree of all the handles in this client
+ * @idr:		an idr space for allocating handle ids
+ * @lock:		lock protecting the tree of handles
+ * @name:		used for debugging
+ * @display_name:	used for debugging (unique version of @name)
+ * @display_serial:	used for debugging (to make display_name unique)
+ * @task:		used for debugging
+ *
+ * A client represents a list of buffers this client may access.
+ * The mutex stored here is used to protect both handles tree
+ * as well as the handles themselves, and should be held while modifying either.
+ */
+struct ion_client {
+	struct rb_node node;
+	struct ion_device *dev;
+	struct rb_root handles;
+	struct idr idr;
+	/* Protects idr */
+	struct mutex lock;
+	char *name;
+	char *display_name;
+	int display_serial;
+	struct task_struct *task;
+	pid_t pid;
+	struct dentry *debug_root;
+};
+
+/**
+ * ion_handle - a client local reference to a buffer
+ * @ref:		reference count
+ * @client:		back pointer to the client the buffer resides in
+ * @buffer:		pointer to the buffer
+ * @node:		node in the client's handle rbtree
+ * @kmap_cnt:		count of times this client has mapped to kernel
+ * @id:			client-unique id allocated by client->idr
+ *
+ * Modifications to node, map_cnt or mapping should be protected by the
+ * lock in the client.  Other fields are never changed after initialization.
+ */
+struct ion_handle {
+	struct kref ref;
+	struct ion_client *client;
+	struct ion_buffer *buffer;
+	struct rb_node node;
+	unsigned int kmap_cnt;
+	int id;
+};
+
 bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
 {
 	return (buffer->flags & ION_FLAG_CACHED) &&
@@ -100,10 +181,10 @@
 
 /* this function should only be called while dev->lock is held */
 static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
-					    struct ion_device *dev,
-					    unsigned long len,
-					    unsigned long align,
-					    unsigned long flags)
+				     struct ion_device *dev,
+				     unsigned long len,
+				     unsigned long align,
+				     unsigned long flags)
 {
 	struct ion_buffer *buffer;
 	struct sg_table *table;
@@ -131,16 +212,21 @@
 			goto err2;
 	}
 
-	if (buffer->sg_table == NULL) {
-		WARN_ONCE(1, "This heap needs to set the sgtable");
+	buffer->dev = dev;
+	buffer->size = len;
+	buffer->flags = flags;
+	INIT_LIST_HEAD(&buffer->vmas);
+
+	table = heap->ops->map_dma(heap, buffer);
+	if (WARN_ONCE(!table,
+		      "heap->ops->map_dma should return ERR_PTR on error"))
+		table = ERR_PTR(-EINVAL);
+	if (IS_ERR(table)) {
 		ret = -EINVAL;
 		goto err1;
 	}
 
-	table = buffer->sg_table;
-	buffer->dev = dev;
-	buffer->size = len;
-
+	buffer->sg_table = table;
 	if (ion_buffer_fault_user_mappings(buffer)) {
 		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
 		struct scatterlist *sg;
@@ -149,7 +235,7 @@
 		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
 		if (!buffer->pages) {
 			ret = -ENOMEM;
-			goto err1;
+			goto err;
 		}
 
 		for_each_sg(table->sgl, sg, table->nents, i) {
@@ -160,9 +246,6 @@
 		}
 	}
 
-	buffer->dev = dev;
-	buffer->size = len;
-	INIT_LIST_HEAD(&buffer->vmas);
 	mutex_init(&buffer->lock);
 	/*
 	 * this will set up dma addresses for the sglist -- it is not
@@ -178,11 +261,15 @@
 		sg_dma_address(sg) = sg_phys(sg);
 		sg_dma_len(sg) = sg->length;
 	}
+
 	mutex_lock(&dev->buffer_lock);
 	ion_buffer_add(dev, buffer);
 	mutex_unlock(&dev->buffer_lock);
+	atomic_add(len, &heap->total_allocated);
 	return buffer;
 
+err:
+	heap->ops->unmap_dma(heap, buffer);
 err1:
 	heap->ops->free(buffer);
 err2:
@@ -194,6 +281,9 @@
 {
 	if (WARN_ON(buffer->kmap_cnt > 0))
 		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
+	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
+
+	atomic_sub(buffer->size, &buffer->heap->total_allocated);
 	buffer->heap->ops->free(buffer);
 	vfree(buffer->pages);
 	kfree(buffer);
@@ -205,6 +295,8 @@
 	struct ion_heap *heap = buffer->heap;
 	struct ion_device *dev = buffer->dev;
 
+	msm_dma_buf_freed(buffer);
+
 	mutex_lock(&dev->buffer_lock);
 	rb_erase(&buffer->node, &dev->buffers);
 	mutex_unlock(&dev->buffer_lock);
@@ -228,6 +320,9 @@
 static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
 {
 	mutex_lock(&buffer->lock);
+	if (buffer->handle_count == 0)
+		atomic_add(buffer->size, &buffer->heap->total_handles);
+
 	buffer->handle_count++;
 	mutex_unlock(&buffer->lock);
 }
@@ -252,12 +347,13 @@
 		task = current->group_leader;
 		get_task_comm(buffer->task_comm, task);
 		buffer->pid = task_pid_nr(task);
+		atomic_sub(buffer->size, &buffer->heap->total_handles);
 	}
 	mutex_unlock(&buffer->lock);
 }
 
 static struct ion_handle *ion_handle_create(struct ion_client *client,
-					    struct ion_buffer *buffer)
+				     struct ion_buffer *buffer)
 {
 	struct ion_handle *handle;
 
@@ -297,14 +393,23 @@
 	kfree(handle);
 }
 
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
+{
+	return handle->buffer;
+}
+
 static void ion_handle_get(struct ion_handle *handle)
 {
 	kref_get(&handle->ref);
 }
 
-int ion_handle_put_nolock(struct ion_handle *handle)
+static int ion_handle_put_nolock(struct ion_handle *handle)
 {
-	return kref_put(&handle->ref, ion_handle_destroy);
+	int ret;
+
+	ret = kref_put(&handle->ref, ion_handle_destroy);
+
+	return ret;
 }
 
 int ion_handle_put(struct ion_handle *handle)
@@ -337,8 +442,8 @@
 	return ERR_PTR(-EINVAL);
 }
 
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
-					       int id)
+static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
+						      int id)
 {
 	struct ion_handle *handle;
 
@@ -350,7 +455,7 @@
 }
 
 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
-					       int id)
+					int id)
 {
 	struct ion_handle *handle;
 
@@ -408,6 +513,19 @@
 	struct ion_buffer *buffer = NULL;
 	struct ion_heap *heap;
 	int ret;
+	const unsigned int MAX_DBG_STR_LEN = 64;
+	char dbg_str[MAX_DBG_STR_LEN];
+	unsigned int dbg_str_idx = 0;
+
+	dbg_str[0] = '\0';
+
+	/*
+	 * For now, we don't want to fault in pages individually since
+	 * clients are already doing manual cache maintenance. In
+	 * other words, the implicit caching infrastructure is in
+	 * place (in code) but should not be used.
+	 */
+	flags |= ION_FLAG_CACHED_NEEDS_SYNC;
 
 	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
 		 len, align, heap_id_mask, flags);
@@ -427,17 +545,53 @@
 		/* if the caller didn't specify this heap id */
 		if (!((1 << heap->id) & heap_id_mask))
 			continue;
+		trace_ion_alloc_buffer_start(client->name, heap->name, len,
+					     heap_id_mask, flags);
 		buffer = ion_buffer_create(heap, dev, len, align, flags);
+		trace_ion_alloc_buffer_end(client->name, heap->name, len,
+					   heap_id_mask, flags);
 		if (!IS_ERR(buffer))
 			break;
+
+		trace_ion_alloc_buffer_fallback(client->name, heap->name, len,
+						heap_id_mask, flags,
+						PTR_ERR(buffer));
+		if (dbg_str_idx < MAX_DBG_STR_LEN) {
+			unsigned int len_left;
+			int ret_value;
+
+			len_left = MAX_DBG_STR_LEN - dbg_str_idx - 1;
+			ret_value = snprintf(&dbg_str[dbg_str_idx],
+					     len_left, "%s ", heap->name);
+
+			if (ret_value >= len_left) {
+				/* overflow */
+				dbg_str[MAX_DBG_STR_LEN - 1] = '\0';
+				dbg_str_idx = MAX_DBG_STR_LEN;
+			} else if (ret_value >= 0) {
+				dbg_str_idx += ret_value;
+			} else {
+				/* error */
+				dbg_str[MAX_DBG_STR_LEN - 1] = '\0';
+			}
+		}
 	}
 	up_read(&dev->lock);
 
-	if (buffer == NULL)
+	if (!buffer) {
+		trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+					    heap_id_mask, flags, -ENODEV);
 		return ERR_PTR(-ENODEV);
+	}
 
-	if (IS_ERR(buffer))
+	if (IS_ERR(buffer)) {
+		trace_ion_alloc_buffer_fail(client->name, dbg_str, len,
+					    heap_id_mask, flags,
+					    PTR_ERR(buffer));
+		pr_debug("ION is unable to allocate 0x%zx bytes (alignment: 0x%zx) from heap(s) %sfor client %s\n",
+			 len, align, dbg_str, client->name);
 		return ERR_CAST(buffer);
+	}
 
 	handle = ion_handle_create(client, buffer);
 
@@ -462,10 +616,15 @@
 }
 EXPORT_SYMBOL(ion_alloc);
 
-void ion_free_nolock(struct ion_client *client,
-		     struct ion_handle *handle)
+static void ion_free_nolock(struct ion_client *client,
+			    struct ion_handle *handle)
 {
-	if (!ion_handle_validate(client, handle)) {
+	bool valid_handle;
+
+	WARN_ON(client != handle->client);
+
+	valid_handle = ion_handle_validate(client, handle);
+	if (!valid_handle) {
 		WARN(1, "%s: invalid handle passed to free.\n", __func__);
 		return;
 	}
@@ -482,6 +641,32 @@
 }
 EXPORT_SYMBOL(ion_free);
 
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+	     ion_phys_addr_t *addr, size_t *len)
+{
+	struct ion_buffer *buffer;
+	int ret;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+
+	buffer = handle->buffer;
+
+	if (!buffer->heap->ops->phys) {
+		pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
+		       __func__, buffer->heap->name, buffer->heap->type);
+		mutex_unlock(&client->lock);
+		return -ENODEV;
+	}
+	mutex_unlock(&client->lock);
+	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
+	return ret;
+}
+EXPORT_SYMBOL(ion_phys);
+
 static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
 {
 	void *vaddr;
@@ -492,7 +677,7 @@
 	}
 	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
 	if (WARN_ONCE(vaddr == NULL,
-		      "heap->ops->map_kernel should return ERR_PTR on error"))
+			"heap->ops->map_kernel should return ERR_PTR on error"))
 		return ERR_PTR(-EINVAL);
 	if (IS_ERR(vaddr))
 		return vaddr;
@@ -614,9 +799,10 @@
 {
 	struct ion_client *client = s->private;
 	struct rb_node *n;
-	size_t sizes[ION_NUM_HEAP_IDS] = {0};
-	const char *names[ION_NUM_HEAP_IDS] = {NULL};
-	int i;
+
+	seq_printf(s, "%16.16s: %16.16s : %16.16s : %12.12s\n",
+		   "heap_name", "size_in_bytes", "handle refcount",
+		   "buffer");
 
 	mutex_lock(&debugfs_mutex);
 	if (!is_client_alive(client)) {
@@ -630,21 +816,17 @@
 	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
 		struct ion_handle *handle = rb_entry(n, struct ion_handle,
 						     node);
-		unsigned int id = handle->buffer->heap->id;
 
-		if (!names[id])
-			names[id] = handle->buffer->heap->name;
-		sizes[id] += handle->buffer->size;
+		seq_printf(s, "%16.16s: %16zx : %16d : %12p",
+			   handle->buffer->heap->name,
+			   handle->buffer->size,
+			   atomic_read(&handle->ref.refcount),
+			   handle->buffer);
+
+		seq_puts(s, "\n");
 	}
 	mutex_unlock(&client->lock);
 	mutex_unlock(&debugfs_mutex);
-
-	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
-	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
-		if (!names[i])
-			continue;
-		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
-	}
 	return 0;
 }
 
@@ -661,14 +843,14 @@
 };
 
 static int ion_get_client_serial(const struct rb_root *root,
-				 const unsigned char *name)
+					const unsigned char *name)
 {
 	int serial = -1;
 	struct rb_node *node;
 
 	for (node = rb_first(root); node; node = rb_next(node)) {
 		struct ion_client *client = rb_entry(node, struct ion_client,
-						     node);
+						node);
 
 		if (strcmp(client->name, name))
 			continue;
@@ -715,6 +897,7 @@
 	client->handles = RB_ROOT;
 	idr_init(&client->idr);
 	mutex_init(&client->lock);
+
 	client->task = task;
 	client->pid = pid;
 	client->name = kstrdup(name, GFP_KERNEL);
@@ -743,14 +926,14 @@
 	rb_insert_color(&client->node, &dev->clients);
 
 	client->debug_root = debugfs_create_file(client->display_name, 0664,
-						 dev->clients_debug_root,
-						 client, &debug_client_fops);
+						dev->clients_debug_root,
+						client, &debug_client_fops);
 	if (!client->debug_root) {
 		char buf[256], *path;
 
 		path = dentry_path(dev->clients_debug_root, buf, 256);
 		pr_err("Failed to create client debugfs at %s/%s\n",
-		       path, client->display_name);
+			path, client->display_name);
 	}
 
 	up_write(&dev->lock);
@@ -788,6 +971,7 @@
 		put_task_struct(client->task);
 	rb_erase(&client->node, &dev->clients);
 	debugfs_remove_recursive(client->debug_root);
+
 	up_write(&dev->lock);
 
 	kfree(client->display_name);
@@ -797,6 +981,133 @@
 }
 EXPORT_SYMBOL(ion_client_destroy);
 
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+			 unsigned long *flags)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to %s.\n",
+		       __func__, __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	*flags = buffer->flags;
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_flags);
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+			size_t *size)
+{
+	struct ion_buffer *buffer;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to %s.\n",
+		       __func__, __func__);
+		mutex_unlock(&client->lock);
+		return -EINVAL;
+	}
+	buffer = handle->buffer;
+	mutex_lock(&buffer->lock);
+	*size = buffer->size;
+	mutex_unlock(&buffer->lock);
+	mutex_unlock(&client->lock);
+
+	return 0;
+}
+EXPORT_SYMBOL(ion_handle_get_size);
+
+/**
+ * ion_sg_table - get an sg_table for the buffer
+ *
+ * NOTE: most likely you should NOT being using this API.
+ * You should be using Ion as a DMA Buf exporter and using
+ * the sg_table returned by dma_buf_map_attachment.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+			      struct ion_handle *handle)
+{
+	struct ion_buffer *buffer;
+	struct sg_table *table;
+
+	mutex_lock(&client->lock);
+	if (!ion_handle_validate(client, handle)) {
+		pr_err("%s: invalid handle passed to map_dma.\n",
+		       __func__);
+		mutex_unlock(&client->lock);
+		return ERR_PTR(-EINVAL);
+	}
+	buffer = handle->buffer;
+	table = buffer->sg_table;
+	mutex_unlock(&client->lock);
+	return table;
+}
+EXPORT_SYMBOL(ion_sg_table);
+
+struct sg_table *ion_create_chunked_sg_table(phys_addr_t buffer_base,
+					     size_t chunk_size,
+					     size_t total_size)
+{
+	struct sg_table *table;
+	int i, n_chunks, ret;
+	struct scatterlist *sg;
+
+	table = kzalloc(sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	n_chunks = DIV_ROUND_UP(total_size, chunk_size);
+	pr_debug("creating sg_table with %d chunks\n", n_chunks);
+
+	ret = sg_alloc_table(table, n_chunks, GFP_KERNEL);
+	if (ret)
+		goto err0;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		dma_addr_t addr = buffer_base + i * chunk_size;
+
+		sg_dma_address(sg) = addr;
+		sg->length = chunk_size;
+	}
+
+	return table;
+err0:
+	kfree(table);
+	return ERR_PTR(ret);
+}
+
+static struct sg_table *ion_dupe_sg_table(struct sg_table *orig_table)
+{
+	int ret, i;
+	struct scatterlist *sg, *sg_orig;
+	struct sg_table *table;
+
+	table = kzalloc(sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return NULL;
+
+	ret = sg_alloc_table(table, orig_table->nents, GFP_KERNEL);
+	if (ret) {
+		kfree(table);
+		return NULL;
+	}
+
+	sg_orig = orig_table->sgl;
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		memcpy(sg, sg_orig, sizeof(*sg));
+		sg_orig = sg_next(sg_orig);
+	}
+	return table;
+}
+
 static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
 				       struct device *dev,
 				       enum dma_data_direction direction);
@@ -806,22 +1117,31 @@
 {
 	struct dma_buf *dmabuf = attachment->dmabuf;
 	struct ion_buffer *buffer = dmabuf->priv;
+	struct sg_table *table;
+
+	table = ion_dupe_sg_table(buffer->sg_table);
+	if (!table)
+		return NULL;
 
 	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
-	return buffer->sg_table;
+	return table;
 }
 
 static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
 			      struct sg_table *table,
 			      enum dma_data_direction direction)
 {
+	sg_free_table(table);
+	kfree(table);
 }
 
 void ion_pages_sync_for_device(struct device *dev, struct page *page,
-			       size_t size, enum dma_data_direction dir)
+		size_t size, enum dma_data_direction dir)
 {
 	struct scatterlist sg;
 
+	WARN_ONCE(!dev, "A device is required for dma_sync\n");
+
 	sg_init_table(&sg, 1);
 	sg_set_page(&sg, page, size, 0);
 	/*
@@ -858,7 +1178,7 @@
 
 		if (ion_buffer_page_is_dirty(page))
 			ion_pages_sync_for_device(dev, ion_buffer_page(page),
-						  PAGE_SIZE, dir);
+							PAGE_SIZE, dir);
 
 		ion_buffer_page_clean(buffer->pages + i);
 	}
@@ -921,6 +1241,9 @@
 		break;
 	}
 	mutex_unlock(&buffer->lock);
+
+	if (buffer->heap->ops->unmap_user)
+		buffer->heap->ops->unmap_user(buffer->heap, buffer);
 }
 
 static const struct vm_operations_struct ion_vma_ops = {
@@ -936,7 +1259,7 @@
 
 	if (!buffer->heap->ops->map_user) {
 		pr_err("%s: this heap does not define a method for mapping to userspace\n",
-		       __func__);
+			__func__);
 		return -EINVAL;
 	}
 
@@ -945,6 +1268,7 @@
 							VM_DONTDUMP;
 		vma->vm_private_data = buffer;
 		vma->vm_ops = &ion_vma_ops;
+		vma->vm_flags |= VM_MIXEDMAP;
 		ion_vm_open(vma);
 		return 0;
 	}
@@ -1027,7 +1351,7 @@
 };
 
 struct dma_buf *ion_share_dma_buf(struct ion_client *client,
-				  struct ion_handle *handle)
+						struct ion_handle *handle)
 {
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
 	struct ion_buffer *buffer;
@@ -1072,7 +1396,6 @@
 	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
 	if (fd < 0)
 		dma_buf_put(dmabuf);
-
 	return fd;
 }
 EXPORT_SYMBOL(ion_share_dma_buf_fd);
@@ -1135,7 +1458,7 @@
 }
 EXPORT_SYMBOL(ion_import_dma_buf_fd);
 
-int ion_sync_for_device(struct ion_client *client, int fd)
+static int ion_sync_for_device(struct ion_client *client, int fd)
 {
 	struct dma_buf *dmabuf;
 	struct ion_buffer *buffer;
@@ -1159,47 +1482,134 @@
 	return 0;
 }
 
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int ion_ioctl_dir(unsigned int cmd)
 {
+	switch (cmd) {
+	case ION_IOC_SYNC:
+	case ION_IOC_FREE:
+	case ION_IOC_CUSTOM:
+		return _IOC_WRITE;
+	default:
+		return _IOC_DIR(cmd);
+	}
+}
+
+static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+	struct ion_client *client = filp->private_data;
 	struct ion_device *dev = client->dev;
-	struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps);
-	int ret = -EINVAL, cnt = 0, max_cnt;
-	struct ion_heap *heap;
-	struct ion_heap_data hdata;
+	struct ion_handle *cleanup_handle = NULL;
+	int ret = 0;
+	unsigned int dir;
 
-	memset(&hdata, 0, sizeof(hdata));
+	union {
+		struct ion_fd_data fd;
+		struct ion_allocation_data allocation;
+		struct ion_handle_data handle;
+		struct ion_custom_data custom;
+	} data;
 
-	down_read(&dev->lock);
-	if (!buffer) {
-		query->cnt = dev->heap_cnt;
-		ret = 0;
-		goto out;
+	dir = ion_ioctl_dir(cmd);
+
+	if (_IOC_SIZE(cmd) > sizeof(data))
+		return -EINVAL;
+
+	if (dir & _IOC_WRITE)
+		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+
+	switch (cmd) {
+	case ION_IOC_ALLOC:
+	{
+		struct ion_handle *handle;
+
+		handle = ion_alloc(client, data.allocation.len,
+				   data.allocation.align,
+				   data.allocation.heap_id_mask,
+				   data.allocation.flags);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+
+		data.allocation.handle = handle->id;
+
+		cleanup_handle = handle;
+		break;
 	}
+	case ION_IOC_FREE:
+	{
+		struct ion_handle *handle;
 
-	if (query->cnt <= 0)
-		goto out;
-
-	max_cnt = query->cnt;
-
-	plist_for_each_entry(heap, &dev->heaps, node) {
-		strncpy(hdata.name, heap->name, MAX_HEAP_NAME);
-		hdata.name[sizeof(hdata.name) - 1] = '\0';
-		hdata.type = heap->type;
-		hdata.heap_id = heap->id;
-
-		if (copy_to_user(&buffer[cnt], &hdata, sizeof(hdata))) {
-			ret = -EFAULT;
-			goto out;
+		mutex_lock(&client->lock);
+		handle = ion_handle_get_by_id_nolock(client,
+						     data.handle.handle);
+		if (IS_ERR(handle)) {
+			mutex_unlock(&client->lock);
+			return PTR_ERR(handle);
 		}
+		ion_free_nolock(client, handle);
+		ion_handle_put_nolock(handle);
+		mutex_unlock(&client->lock);
+		break;
+	}
+	case ION_IOC_SHARE:
+	case ION_IOC_MAP:
+	{
+		struct ion_handle *handle;
 
-		cnt++;
-		if (cnt >= max_cnt)
-			break;
+		handle = ion_handle_get_by_id(client, data.handle.handle);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+		data.fd.fd = ion_share_dma_buf_fd(client, handle);
+		ion_handle_put(handle);
+		if (data.fd.fd < 0)
+			ret = data.fd.fd;
+		break;
+	}
+	case ION_IOC_IMPORT:
+	{
+		struct ion_handle *handle;
+
+		handle = ion_import_dma_buf_fd(client, data.fd.fd);
+		if (IS_ERR(handle))
+			ret = PTR_ERR(handle);
+		else
+			data.handle.handle = handle->id;
+		break;
+	}
+	case ION_IOC_SYNC:
+	{
+		ret = ion_sync_for_device(client, data.fd.fd);
+		break;
+	}
+	case ION_IOC_CUSTOM:
+	{
+		if (!dev->custom_ioctl)
+			return -ENOTTY;
+		ret = dev->custom_ioctl(client, data.custom.cmd,
+						data.custom.arg);
+		break;
+	}
+	case ION_IOC_CLEAN_CACHES:
+		return client->dev->custom_ioctl(client,
+						ION_IOC_CLEAN_CACHES, arg);
+	case ION_IOC_INV_CACHES:
+		return client->dev->custom_ioctl(client,
+						ION_IOC_INV_CACHES, arg);
+	case ION_IOC_CLEAN_INV_CACHES:
+		return client->dev->custom_ioctl(client,
+						ION_IOC_CLEAN_INV_CACHES, arg);
+	default:
+		return -ENOTTY;
 	}
 
-	query->cnt = cnt;
-out:
-	up_read(&dev->lock);
+	if (dir & _IOC_READ) {
+		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
+			if (cleanup_handle)
+				ion_free(client, cleanup_handle);
+			return -EFAULT;
+		}
+	}
 	return ret;
 }
 
@@ -1255,6 +1665,110 @@
 	return size;
 }
 
+/**
+ * Create a mem_map of the heap.
+ * @param s seq_file to log error message to.
+ * @param heap The heap to create mem_map for.
+ * @param mem_map The mem map to be created.
+ */
+void ion_debug_mem_map_create(struct seq_file *s, struct ion_heap *heap,
+			      struct list_head *mem_map)
+{
+	struct ion_device *dev = heap->dev;
+	struct rb_node *cnode;
+	size_t size;
+	struct ion_client *client;
+
+	if (!heap->ops->phys)
+		return;
+
+	down_read(&dev->lock);
+	for (cnode = rb_first(&dev->clients); cnode; cnode = rb_next(cnode)) {
+		struct rb_node *hnode;
+
+		client = rb_entry(cnode, struct ion_client, node);
+
+		mutex_lock(&client->lock);
+		for (hnode = rb_first(&client->handles);
+		     hnode;
+		     hnode = rb_next(hnode)) {
+			struct ion_handle *handle = rb_entry(
+				hnode, struct ion_handle, node);
+			if (handle->buffer->heap == heap) {
+				struct mem_map_data *data =
+					kzalloc(sizeof(*data), GFP_KERNEL);
+				if (!data)
+					goto inner_error;
+				heap->ops->phys(heap, handle->buffer,
+						&data->addr, &size);
+				data->size = (unsigned long)size;
+				data->addr_end = data->addr + data->size - 1;
+				data->client_name = kstrdup(client->name,
+							GFP_KERNEL);
+				if (!data->client_name) {
+					kfree(data);
+					goto inner_error;
+				}
+				list_add(&data->node, mem_map);
+			}
+		}
+		mutex_unlock(&client->lock);
+	}
+	up_read(&dev->lock);
+	return;
+
+inner_error:
+	seq_puts(s,
+		 "ERROR: out of memory. Part of memory map will not be logged\n");
+	mutex_unlock(&client->lock);
+	up_read(&dev->lock);
+}
+
+/**
+ * Free the memory allocated by ion_debug_mem_map_create
+ * @param mem_map The mem map to free.
+ */
+static void ion_debug_mem_map_destroy(struct list_head *mem_map)
+{
+	if (mem_map) {
+		struct mem_map_data *data, *tmp;
+
+		list_for_each_entry_safe(data, tmp, mem_map, node) {
+			list_del(&data->node);
+			kfree(data->client_name);
+			kfree(data);
+		}
+	}
+}
+
+static int mem_map_cmp(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct mem_map_data *d1, *d2;
+
+	d1 = list_entry(a, struct mem_map_data, node);
+	d2 = list_entry(b, struct mem_map_data, node);
+	if (d1->addr == d2->addr)
+		return d1->size - d2->size;
+	return d1->addr - d2->addr;
+}
+
+/**
+ * Print heap debug information.
+ * @param s seq_file to log message to.
+ * @param heap pointer to heap that we will print debug information for.
+ */
+static void ion_heap_print_debug(struct seq_file *s, struct ion_heap *heap)
+{
+	if (heap->ops->print_debug) {
+		struct list_head mem_map = LIST_HEAD_INIT(mem_map);
+
+		ion_debug_mem_map_create(s, heap, &mem_map);
+		list_sort(NULL, &mem_map, mem_map_cmp);
+		heap->ops->print_debug(heap, s, &mem_map);
+		ion_debug_mem_map_destroy(&mem_map);
+	}
+}
+
 static int ion_debug_heap_show(struct seq_file *s, void *unused)
 {
 	struct ion_heap *heap = s->private;
@@ -1311,12 +1825,13 @@
 	seq_printf(s, "%16s %16zu\n", "total ", total_size);
 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
 		seq_printf(s, "%16s %16zu\n", "deferred free",
-			   heap->free_list_size);
+				heap->free_list_size);
 	seq_puts(s, "----------------------------------------------------\n");
 
 	if (heap->debug_show)
 		heap->debug_show(heap, s, unused);
 
+	ion_heap_print_debug(s, heap);
 	return 0;
 }
 
@@ -1332,6 +1847,29 @@
 	.release = single_release,
 };
 
+void show_ion_usage(struct ion_device *dev)
+{
+	struct ion_heap *heap;
+
+	if (!down_read_trylock(&dev->lock)) {
+		pr_err("Ion output would deadlock, can't print debug information\n");
+		return;
+	}
+
+	pr_info("%16.s %16.s %16.s\n", "Heap name", "Total heap size",
+		"Total orphaned size");
+	pr_info("---------------------------------\n");
+	plist_for_each_entry(heap, &dev->heaps, node) {
+		pr_info("%16.s 0x%16.x 0x%16.x\n",
+			heap->name, atomic_read(&heap->total_allocated),
+			atomic_read(&heap->total_allocated) -
+			atomic_read(&heap->total_handles));
+		if (heap->debug_show)
+			heap->debug_show(heap, NULL, 0);
+	}
+	up_read(&dev->lock);
+}
+
 static int debug_shrink_set(void *data, u64 val)
 {
 	struct ion_heap *heap = data;
@@ -1371,7 +1909,8 @@
 {
 	struct dentry *debug_file;
 
-	if (!heap->ops->allocate || !heap->ops->free)
+	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
+	    !heap->ops->unmap_dma)
 		pr_err("%s: can not add heap with invalid ops struct.\n",
 		       __func__);
 
@@ -1393,15 +1932,15 @@
 	plist_node_init(&heap->node, -heap->id);
 	plist_add(&heap->node, &dev->heaps);
 	debug_file = debugfs_create_file(heap->name, 0664,
-					 dev->heaps_debug_root, heap,
-					 &debug_heap_fops);
+					dev->heaps_debug_root, heap,
+					&debug_heap_fops);
 
 	if (!debug_file) {
 		char buf[256], *path;
 
 		path = dentry_path(dev->heaps_debug_root, buf, 256);
 		pr_err("Failed to create heap debugfs at %s/%s\n",
-		       path, heap->name);
+			path, heap->name);
 	}
 
 	if (heap->shrinker.count_objects && heap->shrinker.scan_objects) {
@@ -1416,15 +1955,38 @@
 
 			path = dentry_path(dev->heaps_debug_root, buf, 256);
 			pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
-			       path, debug_name);
+				path, debug_name);
 		}
 	}
 
-	dev->heap_cnt++;
 	up_write(&dev->lock);
 }
 EXPORT_SYMBOL(ion_device_add_heap);
 
+int ion_walk_heaps(struct ion_client *client, int heap_id,
+		   enum ion_heap_type type, void *data,
+		   int (*f)(struct ion_heap *heap, void *data))
+{
+	int ret_val = 0;
+	struct ion_heap *heap;
+	struct ion_device *dev = client->dev;
+	/*
+	 * traverse the list of heaps available in this system
+	 * and find the heap that is specified.
+	 */
+	down_write(&dev->lock);
+	plist_for_each_entry(heap, &dev->heaps, node) {
+		if (ION_HEAP(heap->id) != heap_id ||
+		    type != heap->type)
+			continue;
+		ret_val = f(heap, data);
+		break;
+	}
+	up_write(&dev->lock);
+	return ret_val;
+}
+EXPORT_SYMBOL(ion_walk_heaps);
+
 struct ion_device *ion_device_create(long (*custom_ioctl)
 				     (struct ion_client *client,
 				      unsigned int cmd,
@@ -1485,3 +2047,38 @@
 	kfree(dev);
 }
 EXPORT_SYMBOL(ion_device_destroy);
+
+void __init ion_reserve(struct ion_platform_data *data)
+{
+	int i;
+
+	for (i = 0; i < data->nr; i++) {
+		if (data->heaps[i].size == 0)
+			continue;
+
+		if (data->heaps[i].base == 0) {
+			phys_addr_t paddr;
+
+			paddr = memblock_alloc_base(data->heaps[i].size,
+						    data->heaps[i].align,
+						    MEMBLOCK_ALLOC_ANYWHERE);
+			if (!paddr) {
+				pr_err("%s: error allocating memblock for heap %d\n",
+				       __func__, i);
+				continue;
+			}
+			data->heaps[i].base = paddr;
+		} else {
+			int ret = memblock_reserve(data->heaps[i].base,
+					       data->heaps[i].size);
+			if (ret)
+				pr_err("memblock reserve of %zx@%pa failed\n",
+				       data->heaps[i].size,
+				       &data->heaps[i].base);
+		}
+		pr_info("%s: %s reserved base %pa size %zu\n", __func__,
+			data->heaps[i].name,
+			&data->heaps[i].base,
+			data->heaps[i].size);
+	}
+}
diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h
index 93dafb4..da7c083 100644
--- a/drivers/staging/android/ion/ion.h
+++ b/drivers/staging/android/ion/ion.h
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/ion.h
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -17,8 +18,7 @@
 #ifndef _LINUX_ION_H
 #define _LINUX_ION_H
 
-#include <linux/types.h>
-
+#include <linux/err.h>
 #include "../uapi/ion.h"
 
 struct ion_handle;
@@ -34,7 +34,7 @@
  * be converted to phys_addr_t.  For the time being many kernel interfaces
  * do not accept phys_addr_t's that would have to
  */
-#define ion_phys_addr_t unsigned long
+#define ion_phys_addr_t dma_addr_t
 
 /**
  * struct ion_platform_heap - defines a heap in the given platform
@@ -45,6 +45,9 @@
  * @name:	used for debug purposes
  * @base:	base address of heap in physical memory if applicable
  * @size:	size of the heap in bytes if applicable
+ * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
+ * @extra_data:	Extra data specific to each heap type
+ * @priv:	heap private data
  * @align:	required alignment in physical memory if applicable
  * @priv:	private info passed from the board file
  *
@@ -56,22 +59,39 @@
 	const char *name;
 	ion_phys_addr_t base;
 	size_t size;
+	unsigned int has_outer_cache;
+	void *extra_data;
 	ion_phys_addr_t align;
 	void *priv;
 };
 
 /**
  * struct ion_platform_data - array of platform heaps passed from board file
- * @nr:		number of structures in the array
- * @heaps:	array of platform_heap structions
+ * @has_outer_cache:    set to 1 if outer cache is used, 0 otherwise.
+ * @nr:    number of structures in the array
+ * @heaps: array of platform_heap structions
  *
  * Provided by the board file in the form of platform data to a platform device.
  */
 struct ion_platform_data {
+	unsigned int has_outer_cache;
 	int nr;
 	struct ion_platform_heap *heaps;
 };
 
+#ifdef CONFIG_ION
+
+/**
+ * ion_reserve() - reserve memory for ion heaps if applicable
+ * @data:	platform data specifying starting physical address and
+ *		size
+ *
+ * Calls memblock reserve to set aside memory for heaps that are
+ * located at specific memory addresses or of specific sizes not
+ * managed by the kernel
+ */
+void ion_reserve(struct ion_platform_data *data);
+
 /**
  * ion_client_create() -  allocate a client and returns it
  * @dev:		the global ion device
@@ -119,6 +139,36 @@
 void ion_free(struct ion_client *client, struct ion_handle *handle);
 
 /**
+ * ion_phys - returns the physical address and len of a handle
+ * @client:	the client
+ * @handle:	the handle
+ * @addr:	a pointer to put the address in
+ * @len:	a pointer to put the length in
+ *
+ * This function queries the heap for a particular handle to get the
+ * handle's physical address.  It't output is only correct if
+ * a heap returns physically contiguous memory -- in other cases
+ * this api should not be implemented -- ion_sg_table should be used
+ * instead.  Returns -EINVAL if the handle is invalid.  This has
+ * no implications on the reference counting of the handle --
+ * the returned value may not be valid if the caller is not
+ * holding a reference.
+ */
+int ion_phys(struct ion_client *client, struct ion_handle *handle,
+	     ion_phys_addr_t *addr, size_t *len);
+
+/**
+ * ion_map_dma - return an sg_table describing a handle
+ * @client:	the client
+ * @handle:	the handle
+ *
+ * This function returns the sg_table describing
+ * a particular ion handle.
+ */
+struct sg_table *ion_sg_table(struct ion_client *client,
+			      struct ion_handle *handle);
+
+/**
  * ion_map_kernel - create mapping for the given handle
  * @client:	the client
  * @handle:	handle to map
@@ -173,4 +223,67 @@
  */
 struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd);
 
+#else
+static inline void ion_reserve(struct ion_platform_data *data) {}
+
+static inline struct ion_client *ion_client_create(
+	struct ion_device *dev, unsigned int heap_id_mask, const char *name)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_client_destroy(struct ion_client *client) {}
+
+static inline struct ion_handle *ion_alloc(struct ion_client *client,
+					   size_t len, size_t align,
+					   unsigned int heap_id_mask,
+					   unsigned int flags)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_free(struct ion_client *client,
+			    struct ion_handle *handle) {}
+
+static inline int ion_phys(struct ion_client *client, struct ion_handle *handle,
+			   ion_phys_addr_t *addr, size_t *len)
+{
+	return -ENODEV;
+}
+
+static inline struct sg_table *ion_sg_table(struct ion_client *client,
+					    struct ion_handle *handle)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void *ion_map_kernel(struct ion_client *client,
+				   struct ion_handle *handle)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline void ion_unmap_kernel(struct ion_client *client,
+				    struct ion_handle *handle) {}
+
+static inline int ion_share_dma_buf(struct ion_client *client,
+				    struct ion_handle *handle)
+{
+	return -ENODEV;
+}
+
+static inline struct ion_handle *ion_import_dma_buf(struct ion_client *client,
+						    int fd)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int ion_handle_get_flags(struct ion_client *client,
+				       struct ion_handle *handle,
+				       unsigned long *flags)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_ION */
 #endif /* _LINUX_ION_H */
diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c
index a8ea973..eeeeb28 100644
--- a/drivers/staging/android/ion/ion_carveout_heap.c
+++ b/drivers/staging/android/ion/ion_carveout_heap.c
@@ -25,17 +25,15 @@
 #include "ion.h"
 #include "ion_priv.h"
 
-#define ION_CARVEOUT_ALLOCATE_FAIL	-1
-
 struct ion_carveout_heap {
 	struct ion_heap heap;
 	struct gen_pool *pool;
 	ion_phys_addr_t base;
 };
 
-static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
-					     unsigned long size,
-					     unsigned long align)
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap,
+				      unsigned long size,
+				      unsigned long align)
 {
 	struct ion_carveout_heap *carveout_heap =
 		container_of(heap, struct ion_carveout_heap, heap);
@@ -47,8 +45,8 @@
 	return offset;
 }
 
-static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
-			      unsigned long size)
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+		       unsigned long size)
 {
 	struct ion_carveout_heap *carveout_heap =
 		container_of(heap, struct ion_carveout_heap, heap);
@@ -58,6 +56,19 @@
 	gen_pool_free(carveout_heap->pool, addr, size);
 }
 
+static int ion_carveout_heap_phys(struct ion_heap *heap,
+				  struct ion_buffer *buffer,
+				  ion_phys_addr_t *addr, size_t *len)
+{
+	struct sg_table *table = buffer->priv_virt;
+	struct page *page = sg_page(table->sgl);
+	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
+
+	*addr = paddr;
+	*len = buffer->size;
+	return 0;
+}
+
 static int ion_carveout_heap_allocate(struct ion_heap *heap,
 				      struct ion_buffer *buffer,
 				      unsigned long size, unsigned long align,
@@ -84,7 +95,7 @@
 	}
 
 	sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
-	buffer->sg_table = table;
+	buffer->priv_virt = table;
 
 	return 0;
 
@@ -98,14 +109,16 @@
 static void ion_carveout_heap_free(struct ion_buffer *buffer)
 {
 	struct ion_heap *heap = buffer->heap;
-	struct sg_table *table = buffer->sg_table;
+	struct sg_table *table = buffer->priv_virt;
 	struct page *page = sg_page(table->sgl);
+	struct device *dev = heap->priv;
+
 	ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
 
 	ion_heap_buffer_zero(buffer);
 
 	if (ion_buffer_cached(buffer))
-		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+		dma_sync_sg_for_device(dev, table->sgl, table->nents,
 				       DMA_BIDIRECTIONAL);
 
 	ion_carveout_free(heap, paddr, buffer->size);
@@ -113,9 +126,23 @@
 	kfree(table);
 }
 
+static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
+						  struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
+					struct ion_buffer *buffer)
+{
+}
+
 static struct ion_heap_ops carveout_heap_ops = {
 	.allocate = ion_carveout_heap_allocate,
 	.free = ion_carveout_heap_free,
+	.phys = ion_carveout_heap_phys,
+	.map_dma = ion_carveout_heap_map_dma,
+	.unmap_dma = ion_carveout_heap_unmap_dma,
 	.map_user = ion_heap_map_user,
 	.map_kernel = ion_heap_map_kernel,
 	.unmap_kernel = ion_heap_unmap_kernel,
@@ -128,11 +155,12 @@
 
 	struct page *page;
 	size_t size;
+	struct device *dev = heap_data->priv;
 
 	page = pfn_to_page(PFN_DOWN(heap_data->base));
 	size = heap_data->size;
 
-	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+	ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
 
 	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
 	if (ret)
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 70495dc..e6b70ff 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/ion_chunk_heap.c
  *
  * Copyright (C) 2012 Google, Inc.
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -34,9 +35,9 @@
 };
 
 static int ion_chunk_heap_allocate(struct ion_heap *heap,
-				   struct ion_buffer *buffer,
-				   unsigned long size, unsigned long align,
-				   unsigned long flags)
+				      struct ion_buffer *buffer,
+				      unsigned long size, unsigned long align,
+				      unsigned long flags)
 {
 	struct ion_chunk_heap *chunk_heap =
 		container_of(heap, struct ion_chunk_heap, heap);
@@ -71,11 +72,11 @@
 		if (!paddr)
 			goto err;
 		sg_set_page(sg, pfn_to_page(PFN_DOWN(paddr)),
-			    chunk_heap->chunk_size, 0);
+				chunk_heap->chunk_size, 0);
 		sg = sg_next(sg);
 	}
 
-	buffer->sg_table = table;
+	buffer->priv_virt = table;
 	chunk_heap->allocated += allocated_size;
 	return 0;
 err:
@@ -95,17 +96,18 @@
 	struct ion_heap *heap = buffer->heap;
 	struct ion_chunk_heap *chunk_heap =
 		container_of(heap, struct ion_chunk_heap, heap);
-	struct sg_table *table = buffer->sg_table;
+	struct sg_table *table = buffer->priv_virt;
 	struct scatterlist *sg;
 	int i;
 	unsigned long allocated_size;
+	struct device *dev = heap->priv;
 
 	allocated_size = ALIGN(buffer->size, chunk_heap->chunk_size);
 
 	ion_heap_buffer_zero(buffer);
 
 	if (ion_buffer_cached(buffer))
-		dma_sync_sg_for_device(NULL, table->sgl, table->nents,
+		dma_sync_sg_for_device(dev, table->sgl, table->nents,
 				       DMA_BIDIRECTIONAL);
 
 	for_each_sg(table->sgl, sg, table->nents, i) {
@@ -117,9 +119,22 @@
 	kfree(table);
 }
 
+static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
+					       struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
+				     struct ion_buffer *buffer)
+{
+}
+
 static struct ion_heap_ops chunk_heap_ops = {
 	.allocate = ion_chunk_heap_allocate,
 	.free = ion_chunk_heap_free,
+	.map_dma = ion_chunk_heap_map_dma,
+	.unmap_dma = ion_chunk_heap_unmap_dma,
 	.map_user = ion_heap_map_user,
 	.map_kernel = ion_heap_map_kernel,
 	.unmap_kernel = ion_heap_unmap_kernel,
@@ -131,11 +146,12 @@
 	int ret;
 	struct page *page;
 	size_t size;
+	struct device *dev = heap_data->priv;
 
 	page = pfn_to_page(PFN_DOWN(heap_data->base));
 	size = heap_data->size;
 
-	ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
+	ion_pages_sync_for_device(dev, page, size, DMA_BIDIRECTIONAL);
 
 	ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
 	if (ret)
@@ -160,8 +176,8 @@
 	chunk_heap->heap.ops = &chunk_heap_ops;
 	chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK;
 	chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
-	pr_debug("%s: base %lu size %zu align %ld\n", __func__,
-		 chunk_heap->base, heap_data->size, heap_data->align);
+	pr_debug("%s: base %pad size %zu align %pad\n", __func__,
+		 &chunk_heap->base, heap_data->size, &heap_data->align);
 
 	return &chunk_heap->heap;
 
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index 6c7de74..14bf139 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -4,6 +4,8 @@
  * Copyright (C) Linaro 2012
  * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
  *
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
  * may be copied, distributed, and modified under those terms.
@@ -20,49 +22,65 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/dma-mapping.h>
+#include <linux/msm_ion.h>
+
+#include <asm/cacheflush.h>
+#include <soc/qcom/secure_buffer.h>
 
 #include "ion.h"
 #include "ion_priv.h"
 
 #define ION_CMA_ALLOCATE_FAILED -1
 
-struct ion_cma_heap {
-	struct ion_heap heap;
-	struct device *dev;
-};
-
-#define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap)
-
 struct ion_cma_buffer_info {
 	void *cpu_addr;
 	dma_addr_t handle;
 	struct sg_table *table;
+	bool is_cached;
 };
 
+static int cma_heap_has_outer_cache;
+/*
+ * Create scatter-list for the already allocated DMA buffer.
+ * This function could be replace by dma_common_get_sgtable
+ * as soon as it will avalaible.
+ */
+static int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
+			       void *cpu_addr, dma_addr_t handle, size_t size)
+{
+	struct page *page = pfn_to_page(PFN_DOWN(handle));
+	int ret;
+
+	ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+	if (unlikely(ret))
+		return ret;
+
+	sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
+	return 0;
+}
 
 /* ION CMA heap operations functions */
 static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
 			    unsigned long len, unsigned long align,
 			    unsigned long flags)
 {
-	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
-	struct device *dev = cma_heap->dev;
+	struct device *dev = heap->priv;
 	struct ion_cma_buffer_info *info;
 
 	dev_dbg(dev, "Request buffer allocation len %ld\n", len);
 
-	if (buffer->flags & ION_FLAG_CACHED)
-		return -EINVAL;
-
-	if (align > PAGE_SIZE)
-		return -EINVAL;
-
 	info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
 	if (!info)
 		return ION_CMA_ALLOCATE_FAILED;
 
-	info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle),
-						GFP_HIGHUSER | __GFP_ZERO);
+	if (!ION_IS_CACHED(flags))
+		info->cpu_addr = dma_alloc_writecombine(dev, len,
+							&info->handle,
+							GFP_KERNEL);
+	else
+		info->cpu_addr = dma_alloc_nonconsistent(dev, len,
+							 &info->handle,
+							 GFP_KERNEL);
 
 	if (!info->cpu_addr) {
 		dev_err(dev, "Fail to allocate buffer\n");
@@ -71,21 +89,18 @@
 
 	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
 	if (!info->table)
-		goto free_mem;
+		goto err;
 
-	if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle,
-			    len))
-		goto free_table;
+	info->is_cached = ION_IS_CACHED(flags);
+
+	ion_cma_get_sgtable(dev,
+			    info->table, info->cpu_addr, info->handle, len);
+
 	/* keep this for memory release */
 	buffer->priv_virt = info;
-	buffer->sg_table = info->table;
 	dev_dbg(dev, "Allocate buffer %p\n", buffer);
 	return 0;
 
-free_table:
-	kfree(info->table);
-free_mem:
-	dma_free_coherent(dev, len, info->cpu_addr, info->handle);
 err:
 	kfree(info);
 	return ION_CMA_ALLOCATE_FAILED;
@@ -93,35 +108,66 @@
 
 static void ion_cma_free(struct ion_buffer *buffer)
 {
-	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-	struct device *dev = cma_heap->dev;
+	struct device *dev = buffer->heap->priv;
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 
 	dev_dbg(dev, "Release buffer %p\n", buffer);
 	/* release memory */
 	dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
-	/* release sg table */
 	sg_free_table(info->table);
+	/* release sg table */
 	kfree(info->table);
 	kfree(info);
 }
 
+/* return physical address in addr */
+static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
+			ion_phys_addr_t *addr, size_t *len)
+{
+	struct device *dev = heap->priv;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
+		&info->handle);
+
+	*addr = info->handle;
+	*len = buffer->size;
+
+	return 0;
+}
+
+static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
+					     struct ion_buffer *buffer)
+{
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	return info->table;
+}
+
+static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
+				   struct ion_buffer *buffer)
+{
+}
+
 static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
 			struct vm_area_struct *vma)
 {
-	struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
-	struct device *dev = cma_heap->dev;
+	struct device *dev = buffer->heap->priv;
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
 
-	return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
-				 buffer->size);
+	if (info->is_cached)
+		return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
+				info->handle, buffer->size);
+	else
+		return dma_mmap_writecombine(dev, vma, info->cpu_addr,
+				info->handle, buffer->size);
 }
 
 static void *ion_cma_map_kernel(struct ion_heap *heap,
 				struct ion_buffer *buffer)
 {
 	struct ion_cma_buffer_info *info = buffer->priv_virt;
-	/* kernel memory mapping has been done at allocation time */
+
 	return info->cpu_addr;
 }
 
@@ -130,36 +176,168 @@
 {
 }
 
+static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
+			       const struct list_head *mem_map)
+{
+	if (mem_map) {
+		struct mem_map_data *data;
+
+		seq_puts(s, "\nMemory Map\n");
+		seq_printf(s, "%16.s %14.s %14.s %14.s\n",
+			   "client", "start address", "end address",
+			   "size");
+
+		list_for_each_entry(data, mem_map, node) {
+			const char *client_name = "(null)";
+
+			if (data->client_name)
+				client_name = data->client_name;
+
+			seq_printf(s, "%16.s 0x%14pa 0x%14pa %14lu (0x%lx)\n",
+				   client_name, &data->addr,
+				   &data->addr_end,
+				   data->size, data->size);
+		}
+	}
+	return 0;
+}
+
 static struct ion_heap_ops ion_cma_ops = {
 	.allocate = ion_cma_allocate,
 	.free = ion_cma_free,
+	.map_dma = ion_cma_heap_map_dma,
+	.unmap_dma = ion_cma_heap_unmap_dma,
+	.phys = ion_cma_phys,
 	.map_user = ion_cma_mmap,
 	.map_kernel = ion_cma_map_kernel,
 	.unmap_kernel = ion_cma_unmap_kernel,
+	.print_debug = ion_cma_print_debug,
 };
 
 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
 {
-	struct ion_cma_heap *cma_heap;
+	struct ion_heap *heap;
 
-	cma_heap = kzalloc(sizeof(struct ion_cma_heap), GFP_KERNEL);
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 
-	if (!cma_heap)
+	if (!heap)
 		return ERR_PTR(-ENOMEM);
 
-	cma_heap->heap.ops = &ion_cma_ops;
+	heap->ops = &ion_cma_ops;
 	/*
-	 * get device from private heaps data, later it will be
+	 * set device as private heaps data, later it will be
 	 * used to make the link with reserved CMA memory
 	 */
-	cma_heap->dev = data->priv;
-	cma_heap->heap.type = ION_HEAP_TYPE_DMA;
-	return &cma_heap->heap;
+	heap->priv = data->priv;
+	heap->type = ION_HEAP_TYPE_DMA;
+	cma_heap_has_outer_cache = data->has_outer_cache;
+	return heap;
 }
 
 void ion_cma_heap_destroy(struct ion_heap *heap)
 {
-	struct ion_cma_heap *cma_heap = to_cma_heap(heap);
+	kfree(heap);
+}
 
-	kfree(cma_heap);
+static void ion_secure_cma_free(struct ion_buffer *buffer)
+{
+	int ret = 0;
+	u32 source_vm;
+	int dest_vmid;
+	int dest_perms;
+	struct ion_cma_buffer_info *info = buffer->priv_virt;
+
+	source_vm = get_secure_vmid(buffer->flags);
+	if (source_vm < 0) {
+		pr_err("%s: Failed to get secure vmid\n", __func__);
+		return;
+	}
+	dest_vmid = VMID_HLOS;
+	dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
+
+	ret = hyp_assign_table(info->table, &source_vm, 1,
+			       &dest_vmid, &dest_perms, 1);
+	if (ret) {
+		pr_err("%s: Not freeing memory since assign failed\n",
+		       __func__);
+		return;
+	}
+
+	ion_cma_free(buffer);
+}
+
+static int ion_secure_cma_allocate(
+			struct ion_heap *heap,
+			struct ion_buffer *buffer, unsigned long len,
+			unsigned long align, unsigned long flags)
+{
+	int ret = 0;
+	int source_vm;
+	int dest_vm;
+	int dest_perms;
+	struct ion_cma_buffer_info *info;
+
+	source_vm = VMID_HLOS;
+	dest_vm = get_secure_vmid(flags);
+	if (dest_vm < 0) {
+		pr_err("%s: Failed to get secure vmid\n", __func__);
+		return -EINVAL;
+	}
+	dest_perms = PERM_READ | PERM_WRITE;
+
+	ret = ion_cma_allocate(heap, buffer, len, align, flags);
+	if (ret) {
+		dev_err(heap->priv, "Unable to allocate cma buffer");
+		return ret;
+	}
+
+	info = buffer->priv_virt;
+	ret = hyp_assign_table(info->table, &source_vm, 1,
+			       &dest_vm, &dest_perms, 1);
+	if (ret) {
+		pr_err("%s: Assign call failed\n", __func__);
+		goto err;
+	}
+	return ret;
+
+err:
+	ion_secure_cma_free(buffer);
+	return ret;
+}
+
+static struct ion_heap_ops ion_secure_cma_ops = {
+	.allocate = ion_secure_cma_allocate,
+	.free = ion_secure_cma_free,
+	.map_dma = ion_cma_heap_map_dma,
+	.unmap_dma = ion_cma_heap_unmap_dma,
+	.phys = ion_cma_phys,
+	.map_user = ion_cma_mmap,
+	.map_kernel = ion_cma_map_kernel,
+	.unmap_kernel = ion_cma_unmap_kernel,
+	.print_debug = ion_cma_print_debug,
+};
+
+struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *data)
+{
+	struct ion_heap *heap;
+
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+
+	heap->ops = &ion_secure_cma_ops;
+	/*
+	 *  set device as private heaps data, later it will be
+	 * used to make the link with reserved CMA memory
+	 */
+	heap->priv = data->priv;
+	heap->type = ION_HEAP_TYPE_HYP_CMA;
+	cma_heap_has_outer_cache = data->has_outer_cache;
+	return heap;
+}
+
+void ion_cma_secure_heap_destroy(struct ion_heap *heap)
+{
+	kfree(heap);
 }
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 4e5c0f1..e75166a 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/ion_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -22,6 +23,9 @@
 #include <linux/sched.h>
 #include <linux/scatterlist.h>
 #include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/highmem.h>
+#include <linux/dma-mapping.h>
 #include "ion.h"
 #include "ion_priv.h"
 
@@ -38,7 +42,7 @@
 	struct page **tmp = pages;
 
 	if (!pages)
-		return NULL;
+		return ERR_PTR(-ENOMEM);
 
 	if (buffer->flags & ION_FLAG_CACHED)
 		pgprot = PAGE_KERNEL;
@@ -93,7 +97,7 @@
 		}
 		len = min(len, remainder);
 		ret = remap_pfn_range(vma, addr, page_to_pfn(page), len,
-				      vma->vm_page_prot);
+				vma->vm_page_prot);
 		if (ret)
 			return ret;
 		addr += len;
@@ -116,7 +120,7 @@
 }
 
 static int ion_heap_sglist_zero(struct scatterlist *sgl, unsigned int nents,
-				pgprot_t pgprot)
+						pgprot_t pgprot)
 {
 	int p = 0;
 	int ret = 0;
@@ -181,7 +185,7 @@
 }
 
 static size_t _ion_heap_freelist_drain(struct ion_heap *heap, size_t size,
-				       bool skip_pools)
+				bool skip_pools)
 {
 	struct ion_buffer *buffer;
 	size_t total_drained = 0;
@@ -266,7 +270,7 @@
 }
 
 static unsigned long ion_heap_shrink_count(struct shrinker *shrinker,
-					   struct shrink_control *sc)
+						struct shrink_control *sc)
 {
 	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
 					     shrinker);
@@ -279,7 +283,7 @@
 }
 
 static unsigned long ion_heap_shrink_scan(struct shrinker *shrinker,
-					  struct shrink_control *sc)
+						struct shrink_control *sc)
 {
 	struct ion_heap *heap = container_of(shrinker, struct ion_heap,
 					     shrinker);
@@ -342,14 +346,15 @@
 	}
 
 	if (IS_ERR_OR_NULL(heap)) {
-		pr_err("%s: error creating heap %s type %d base %lu size %zu\n",
+		pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
 		       __func__, heap_data->name, heap_data->type,
-		       heap_data->base, heap_data->size);
+		       &heap_data->base, heap_data->size);
 		return ERR_PTR(-EINVAL);
 	}
 
 	heap->name = heap_data->name;
 	heap->id = heap_data->id;
+	heap->priv = heap_data->priv;
 	return heap;
 }
 EXPORT_SYMBOL(ion_heap_create);
diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c
index aea89c1..38d4175 100644
--- a/drivers/staging/android/ion/ion_page_pool.c
+++ b/drivers/staging/android/ion/ion_page_pool.c
@@ -1,7 +1,8 @@
 /*
- * drivers/staging/android/ion/ion_mem_pool.c
+ * drivers/staging/android/ion/ion_page_pool.c
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -22,23 +23,35 @@
 #include <linux/init.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
+#include <linux/vmalloc.h>
 #include "ion_priv.h"
 
 static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
 {
-	struct page *page = alloc_pages(pool->gfp_mask, pool->order);
+	struct page *page;
+
+	page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);
 
 	if (!page)
 		return NULL;
-	if (!pool->cached)
-		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
-					  DMA_BIDIRECTIONAL);
+
+	if (pool->gfp_mask & __GFP_ZERO)
+		if (msm_ion_heap_high_order_page_zero(pool->dev, page,
+						      pool->order))
+			goto error_free_pages;
+
+	ion_page_pool_alloc_set_cache_policy(pool, page);
+
 	return page;
+error_free_pages:
+	__free_pages(page, pool->order);
+	return NULL;
 }
 
 static void ion_page_pool_free_pages(struct ion_page_pool *pool,
 				     struct page *page)
 {
+	ion_page_pool_free_set_cache_policy(pool, page);
 	__free_pages(page, pool->order);
 }
 
@@ -74,21 +87,45 @@
 	return page;
 }
 
-struct page *ion_page_pool_alloc(struct ion_page_pool *pool)
+void *ion_page_pool_alloc(struct ion_page_pool *pool, bool *from_pool)
 {
 	struct page *page = NULL;
 
 	BUG_ON(!pool);
 
-	mutex_lock(&pool->mutex);
-	if (pool->high_count)
-		page = ion_page_pool_remove(pool, true);
-	else if (pool->low_count)
-		page = ion_page_pool_remove(pool, false);
-	mutex_unlock(&pool->mutex);
+	*from_pool = true;
 
-	if (!page)
+	if (mutex_trylock(&pool->mutex)) {
+		if (pool->high_count)
+			page = ion_page_pool_remove(pool, true);
+		else if (pool->low_count)
+			page = ion_page_pool_remove(pool, false);
+		mutex_unlock(&pool->mutex);
+	}
+	if (!page) {
 		page = ion_page_pool_alloc_pages(pool);
+		*from_pool = false;
+	}
+	return page;
+}
+
+/*
+ * Tries to allocate from only the specified Pool and returns NULL otherwise
+ */
+void *ion_page_pool_alloc_pool_only(struct ion_page_pool *pool)
+{
+	struct page *page = NULL;
+
+	if (!pool)
+		return NULL;
+
+	if (mutex_trylock(&pool->mutex)) {
+		if (pool->high_count)
+			page = ion_page_pool_remove(pool, true);
+		else if (pool->low_count)
+			page = ion_page_pool_remove(pool, false);
+		mutex_unlock(&pool->mutex);
+	}
 
 	return page;
 }
@@ -97,14 +134,17 @@
 {
 	int ret;
 
-	BUG_ON(pool->order != compound_order(page));
-
 	ret = ion_page_pool_add(pool, page);
 	if (ret)
 		ion_page_pool_free_pages(pool, page);
 }
 
-static int ion_page_pool_total(struct ion_page_pool *pool, bool high)
+void ion_page_pool_free_immediate(struct ion_page_pool *pool, struct page *page)
+{
+	ion_page_pool_free_pages(pool, page);
+}
+
+int ion_page_pool_total(struct ion_page_pool *pool, bool high)
 {
 	int count = pool->low_count;
 
@@ -115,7 +155,7 @@
 }
 
 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
-			 int nr_to_scan)
+				int nr_to_scan)
 {
 	int freed = 0;
 	bool high;
@@ -148,23 +188,22 @@
 	return freed;
 }
 
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
-					   bool cached)
+struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
+					   unsigned int order)
 {
 	struct ion_page_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
 
 	if (!pool)
 		return NULL;
+	pool->dev = dev;
 	pool->high_count = 0;
 	pool->low_count = 0;
 	INIT_LIST_HEAD(&pool->low_items);
 	INIT_LIST_HEAD(&pool->high_items);
-	pool->gfp_mask = gfp_mask | __GFP_COMP;
+	pool->gfp_mask = gfp_mask;
 	pool->order = order;
 	mutex_init(&pool->mutex);
 	plist_node_init(&pool->list, order);
-	if (cached)
-		pool->cached = true;
 
 	return pool;
 }
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index 3c3b324..49d947e 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/ion_priv.h
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -23,13 +24,37 @@
 #include <linux/mm_types.h>
 #include <linux/mutex.h>
 #include <linux/rbtree.h>
+#include <linux/seq_file.h>
+
 #include <linux/sched.h>
 #include <linux/shrinker.h>
 #include <linux/types.h>
-#include <linux/miscdevice.h>
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+#include <asm/cacheflush.h>
+#endif
+#include <linux/device.h>
 
 #include "ion.h"
 
+struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
+
+/**
+ * struct mem_map_data - represents information about the memory map for a heap
+ * @node:		list node used to store in the list of mem_map_data
+ * @addr:		start address of memory region.
+ * @addr:		end address of memory region.
+ * @size:		size of memory region
+ * @client_name:		name of the client who owns this buffer.
+ *
+ */
+struct mem_map_data {
+	struct list_head node;
+	ion_phys_addr_t addr;
+	ion_phys_addr_t addr_end;
+	unsigned long size;
+	const char *client_name;
+};
+
 /**
  * struct ion_buffer - metadata for a particular buffer
  * @ref:		reference count
@@ -41,11 +66,16 @@
  * @size:		size of the buffer
  * @priv_virt:		private data to the buffer representable as
  *			a void *
+ * @priv_phys:		private data to the buffer representable as
+ *			an ion_phys_addr_t (and someday a phys_addr_t)
  * @lock:		protects the buffers cnt fields
  * @kmap_cnt:		number of times the buffer is mapped to the kernel
- * @vaddr:		the kernel mapping if kmap_cnt is not zero
- * @dmap_cnt:		number of times the buffer is mapped for dma
- * @sg_table:		the sg table for the buffer if dmap_cnt is not zero
+ * @vaddr:		the kenrel mapping if kmap_cnt is not zero
+ * @sg_table:		the sg table for the buffer.  Note that if you need
+ *			an sg_table for this buffer, you should likely be
+ *			using Ion as a DMA Buf exporter and using
+ *			dma_buf_map_attachment rather than trying to use this
+ *			field directly.
  * @pages:		flat array of pages in the buffer -- used by fault
  *			handler and only valid for buffers that are faulted in
  * @vmas:		list of vma's mapping this buffer
@@ -66,11 +96,13 @@
 	unsigned long flags;
 	unsigned long private_flags;
 	size_t size;
-	void *priv_virt;
+	union {
+		void *priv_virt;
+		ion_phys_addr_t priv_phys;
+	};
 	struct mutex lock;
 	int kmap_cnt;
 	void *vaddr;
-	int dmap_cnt;
 	struct sg_table *sg_table;
 	struct page **pages;
 	struct list_head vmas;
@@ -82,87 +114,21 @@
 void ion_buffer_destroy(struct ion_buffer *buffer);
 
 /**
- * struct ion_device - the metadata of the ion device node
- * @dev:		the actual misc device
- * @buffers:		an rb tree of all the existing buffers
- * @buffer_lock:	lock protecting the tree of buffers
- * @lock:		rwsem protecting the tree of heaps and clients
- * @heaps:		list of all the heaps in the system
- * @user_clients:	list of all the clients created from userspace
- */
-struct ion_device {
-	struct miscdevice dev;
-	struct rb_root buffers;
-	struct mutex buffer_lock;
-	struct rw_semaphore lock;
-	struct plist_head heaps;
-	long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
-			     unsigned long arg);
-	struct rb_root clients;
-	struct dentry *debug_root;
-	struct dentry *heaps_debug_root;
-	struct dentry *clients_debug_root;
-	int heap_cnt;
-};
-
-/**
- * struct ion_client - a process/hw block local address space
- * @node:		node in the tree of all clients
- * @dev:		backpointer to ion device
- * @handles:		an rb tree of all the handles in this client
- * @idr:		an idr space for allocating handle ids
- * @lock:		lock protecting the tree of handles
- * @name:		used for debugging
- * @display_name:	used for debugging (unique version of @name)
- * @display_serial:	used for debugging (to make display_name unique)
- * @task:		used for debugging
- *
- * A client represents a list of buffers this client may access.
- * The mutex stored here is used to protect both handles tree
- * as well as the handles themselves, and should be held while modifying either.
- */
-struct ion_client {
-	struct rb_node node;
-	struct ion_device *dev;
-	struct rb_root handles;
-	struct idr idr;
-	struct mutex lock;
-	const char *name;
-	char *display_name;
-	int display_serial;
-	struct task_struct *task;
-	pid_t pid;
-	struct dentry *debug_root;
-};
-
-/**
- * ion_handle - a client local reference to a buffer
- * @ref:		reference count
- * @client:		back pointer to the client the buffer resides in
- * @buffer:		pointer to the buffer
- * @node:		node in the client's handle rbtree
- * @kmap_cnt:		count of times this client has mapped to kernel
- * @id:			client-unique id allocated by client->idr
- *
- * Modifications to node, map_cnt or mapping should be protected by the
- * lock in the client.  Other fields are never changed after initialization.
- */
-struct ion_handle {
-	struct kref ref;
-	struct ion_client *client;
-	struct ion_buffer *buffer;
-	struct rb_node node;
-	unsigned int kmap_cnt;
-	int id;
-};
-
-/**
  * struct ion_heap_ops - ops to operate on a given heap
  * @allocate:		allocate memory
- * @free:		free memory
+ * @free:		free memory. Will be called with
+ *			ION_PRIV_FLAG_SHRINKER_FREE set in buffer flags when
+ *			called from a shrinker. In that case, the pages being
+ *			free'd must be truly free'd back to the system, not put
+ *			in a page pool or otherwise cached.
+ * @phys		get physical address of a buffer (only define on
+ *			physically contiguous heaps)
+ * @map_dma		map the memory for dma to a scatterlist
+ * @unmap_dma		unmap the memory for dma
  * @map_kernel		map memory to the kernel
  * @unmap_kernel	unmap memory to the kernel
  * @map_user		map memory to userspace
+ * @unmap_user		unmap memory to userspace
  *
  * allocate, phys, and map_user return 0 on success, -errno on error.
  * map_dma and map_kernel return pointer on success, ERR_PTR on
@@ -176,11 +142,19 @@
 			struct ion_buffer *buffer, unsigned long len,
 			unsigned long align, unsigned long flags);
 	void (*free)(struct ion_buffer *buffer);
+	int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
+		    ion_phys_addr_t *addr, size_t *len);
+	struct sg_table * (*map_dma)(struct ion_heap *heap,
+				     struct ion_buffer *buffer);
+	void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
 	void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
 	void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
 	int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
 			struct vm_area_struct *vma);
 	int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
+	void (*unmap_user)(struct ion_heap *mapper, struct ion_buffer *buffer);
+	int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
+			   const struct list_head *mem_map);
 };
 
 /**
@@ -211,6 +185,7 @@
  *			MUST be unique
  * @name:		used for debugging
  * @shrinker:		a shrinker for the heap
+ * @priv:		private heap data
  * @free_list:		free list head if deferred free is used
  * @free_list_size	size of the deferred free list in bytes
  * @lock:		protects the free list
@@ -233,6 +208,7 @@
 	unsigned int id;
 	const char *name;
 	struct shrinker shrinker;
+	void *priv;
 	struct list_head free_list;
 	size_t free_list_size;
 	spinlock_t free_lock;
@@ -240,6 +216,8 @@
 	struct task_struct *task;
 
 	int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
+	atomic_t total_allocated;
+	atomic_t total_handles;
 };
 
 /**
@@ -283,6 +261,12 @@
  */
 void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap);
 
+struct pages_mem {
+	struct page **pages;
+	u32 size;
+	void (*free_fn)(const void *);
+};
+
 /**
  * some helpers for common operations on buffers using the sg_table
  * and vaddr fields
@@ -294,6 +278,42 @@
 int ion_heap_buffer_zero(struct ion_buffer *buffer);
 int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot);
 
+int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page,
+				      int order);
+struct ion_heap *get_ion_heap(int heap_id);
+int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *sg,
+			       size_t size);
+int msm_ion_heap_pages_zero(struct page **pages, int num_pages);
+int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem);
+void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem);
+
+long msm_ion_custom_ioctl(struct ion_client *client,
+			  unsigned int cmd,
+			  unsigned long arg);
+
+int ion_heap_is_system_secure_heap_type(enum ion_heap_type type);
+int get_secure_vmid(unsigned long flags);
+bool is_secure_vmid_valid(int vmid);
+
+/**
+ * Functions to help assign/unassign sg_table for System Secure Heap
+ */
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid);
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid);
+int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *data);
+int ion_system_secure_heap_drain(struct ion_heap *heap, void *data);
+
+/**
+ * ion_heap_init_shrinker
+ * @heap:		the heap
+ *
+ * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op
+ * this function will be called to setup a shrinker to shrink the freelists
+ * and call the heap's shrink op.
+ */
+void ion_heap_init_shrinker(struct ion_heap *heap);
+
 /**
  * ion_heap_init_shrinker
  * @heap:		the heap
@@ -336,7 +356,7 @@
 size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size);
 
 /**
- * ion_heap_freelist_shrink - drain the deferred free
+ * ion_heap_freelist_drain_from_shrinker - drain the deferred free
  *				list, skipping any heap-specific
  *				pooling or caching mechanisms
  *
@@ -352,11 +372,11 @@
  * page pools or otherwise cache the pages. Everything must be
  * genuinely free'd back to the system. If you're free'ing from a
  * shrinker you probably want to use this. Note that this relies on
- * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE
- * flag.
+ * the heap.ops.free callback honoring the
+ * ION_PRIV_FLAG_SHRINKER_FREE flag.
  */
-size_t ion_heap_freelist_shrink(struct ion_heap *heap,
-					size_t size);
+size_t ion_heap_freelist_drain_from_shrinker(struct ion_heap *heap,
+					     size_t size);
 
 /**
  * ion_heap_freelist_size - returns the size of the freelist in bytes
@@ -384,8 +404,37 @@
 
 struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *);
 void ion_chunk_heap_destroy(struct ion_heap *);
+#ifdef CONFIG_CMA
 struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
 void ion_cma_heap_destroy(struct ion_heap *);
+#else
+static inline struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *h)
+{
+	return NULL;
+}
+
+static inline void ion_cma_heap_destroy(struct ion_heap *h) {}
+#endif
+
+struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *heap);
+void ion_system_secure_heap_destroy(struct ion_heap *heap);
+
+struct ion_heap *ion_cma_secure_heap_create(struct ion_platform_heap *heap);
+void ion_cma_secure_heap_destroy(struct ion_heap *heap);
+
+/**
+ * kernel api to allocate/free from carveout -- used when carveout is
+ * used to back an architecture specific custom heap
+ */
+ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
+				      unsigned long align);
+void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
+		       unsigned long size);
+/**
+ * The carveout heap returns physical addresses, since 0 may be a valid
+ * physical address, this is used to indicate allocation failed
+ */
+#define ION_CARVEOUT_ALLOCATE_FAIL -1
 
 /**
  * functions for creating and destroying a heap pool -- allows you
@@ -406,7 +455,6 @@
  * @gfp_mask:		gfp_mask to use from alloc
  * @order:		order of pages in the pool
  * @list:		plist node for list of pools
- * @cached:		it's cached pool or not
  *
  * Allows you to keep a pool of pre allocated pages to use from your heap.
  * Keeping a pool of pages that is ready for dma, ie any cached mapping have
@@ -416,20 +464,54 @@
 struct ion_page_pool {
 	int high_count;
 	int low_count;
-	bool cached;
 	struct list_head high_items;
 	struct list_head low_items;
 	struct mutex mutex;
+	struct device *dev;
 	gfp_t gfp_mask;
 	unsigned int order;
 	struct plist_node list;
 };
 
-struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order,
-					   bool cached);
+struct ion_page_pool *ion_page_pool_create(struct device *dev, gfp_t gfp_mask,
+					   unsigned int order);
 void ion_page_pool_destroy(struct ion_page_pool *);
-struct page *ion_page_pool_alloc(struct ion_page_pool *);
-void ion_page_pool_free(struct ion_page_pool *, struct page *);
+void *ion_page_pool_alloc(struct ion_page_pool *a, bool *from_pool);
+void *ion_page_pool_alloc_pool_only(struct ion_page_pool *a);
+void ion_page_pool_free(struct ion_page_pool *a, struct page *b);
+void ion_page_pool_free_immediate(struct ion_page_pool *, struct page *);
+int ion_page_pool_total(struct ion_page_pool *pool, bool high);
+size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap, int vmid);
+
+#ifdef CONFIG_ION_POOL_CACHE_POLICY
+static inline void ion_page_pool_alloc_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){
+	void *va = page_address(page);
+
+	if (va)
+		set_memory_wc((unsigned long)va, 1 << pool->order);
+}
+
+static inline void ion_page_pool_free_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){
+	void *va = page_address(page);
+
+	if (va)
+		set_memory_wb((unsigned long)va, 1 << pool->order);
+
+}
+#else
+static inline void ion_page_pool_alloc_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){ }
+
+static inline void ion_page_pool_free_set_cache_policy
+				(struct ion_page_pool *pool,
+				struct page *page){ }
+#endif
+
 
 /** ion_page_pool_shrink - shrinks the size of the memory cached in the pool
  * @pool:		the pool
@@ -452,22 +534,15 @@
 void ion_pages_sync_for_device(struct device *dev, struct page *page,
 		size_t size, enum dma_data_direction dir);
 
-long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-
-int ion_sync_for_device(struct ion_client *client, int fd);
-
-struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
-						int id);
-
-void ion_free_nolock(struct ion_client *client, struct ion_handle *handle);
-
-int ion_handle_put_nolock(struct ion_handle *handle);
+int ion_walk_heaps(struct ion_client *client, int heap_id,
+		   enum ion_heap_type type, void *data,
+		   int (*f)(struct ion_heap *heap, void *data));
 
 struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
-						int id);
+					int id);
 
 int ion_handle_put(struct ion_handle *handle);
 
-int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query);
+void show_ion_usage(struct ion_device *dev);
 
 #endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7e023d5..6bcce90 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -2,6 +2,7 @@
  * drivers/staging/android/ion/ion_system_heap.c
  *
  * Copyright (C) 2011 Google, Inc.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
  *
  * This software is licensed under the terms of the GNU General Public
  * License version 2, as published by the Free Software Foundation, and
@@ -19,129 +20,331 @@
 #include <linux/err.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
+#include <linux/msm_ion.h>
 #include <linux/scatterlist.h>
 #include <linux/seq_file.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
 #include "ion.h"
 #include "ion_priv.h"
+#include <linux/dma-mapping.h>
+#include <trace/events/kmem.h>
+#include <soc/qcom/secure_buffer.h>
 
-#define NUM_ORDERS ARRAY_SIZE(orders)
-
-static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
+static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_NOWARN |
 				     __GFP_NORETRY) & ~__GFP_RECLAIM;
-static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_ZERO);
-static const unsigned int orders[] = {8, 4, 0};
+static gfp_t low_order_gfp_flags  = (GFP_HIGHUSER | __GFP_NOWARN);
 
+#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
+static const unsigned int orders[] = {9, 8, 4, 0};
+#else
+static const unsigned int orders[] = {0};
+#endif
+
+static const int num_orders = ARRAY_SIZE(orders);
 static int order_to_index(unsigned int order)
 {
 	int i;
-
-	for (i = 0; i < NUM_ORDERS; i++)
+	for (i = 0; i < num_orders; i++)
 		if (order == orders[i])
 			return i;
 	BUG();
 	return -1;
 }
 
-static inline unsigned int order_to_size(int order)
+static unsigned int order_to_size(int order)
 {
 	return PAGE_SIZE << order;
 }
 
 struct ion_system_heap {
 	struct ion_heap heap;
-	struct ion_page_pool *uncached_pools[NUM_ORDERS];
-	struct ion_page_pool *cached_pools[NUM_ORDERS];
+	struct ion_page_pool **uncached_pools;
+	struct ion_page_pool **cached_pools;
+	struct ion_page_pool **secure_pools[VMID_LAST];
+	/* Prevents unnecessary page splitting */
+	struct mutex split_page_mutex;
 };
 
-/**
- * The page from page-pool are all zeroed before. We need do cache
- * clean for cached buffer. The uncached buffer are always non-cached
- * since it's allocated. So no need for non-cached pages.
+struct page_info {
+	struct page *page;
+	bool from_pool;
+	unsigned int order;
+	struct list_head list;
+};
+
+/*
+ * Used by ion_system_secure_heap only
+ * Since no lock is held, results are approximate.
  */
+size_t ion_system_heap_secure_page_pool_total(struct ion_heap *heap,
+					      int vmid_flags)
+{
+	struct ion_system_heap *sys_heap;
+	struct ion_page_pool *pool;
+	size_t total = 0;
+	int vmid, i;
+
+	sys_heap = container_of(heap, struct ion_system_heap, heap);
+	vmid = get_secure_vmid(vmid_flags);
+	if (!is_secure_vmid_valid(vmid))
+		return 0;
+
+	for (i = 0; i < num_orders; i++) {
+		pool = sys_heap->secure_pools[vmid][i];
+		total += ion_page_pool_total(pool, true);
+	}
+
+	return total << PAGE_SHIFT;
+}
+
 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
 				      struct ion_buffer *buffer,
-				      unsigned long order)
+				      unsigned long order,
+				      bool *from_pool)
 {
 	bool cached = ion_buffer_cached(buffer);
-	struct ion_page_pool *pool;
 	struct page *page;
+	struct ion_page_pool *pool;
+	int vmid = get_secure_vmid(buffer->flags);
+	struct device *dev = heap->heap.priv;
 
-	if (!cached)
-		pool = heap->uncached_pools[order_to_index(order)];
-	else
-		pool = heap->cached_pools[order_to_index(order)];
+	if (*from_pool) {
+		if (vmid > 0)
+			pool = heap->secure_pools[vmid][order_to_index(order)];
+		else if (!cached)
+			pool = heap->uncached_pools[order_to_index(order)];
+		else
+			pool = heap->cached_pools[order_to_index(order)];
 
-	page = ion_page_pool_alloc(pool);
+		page = ion_page_pool_alloc(pool, from_pool);
+	} else {
+		gfp_t gfp_mask = low_order_gfp_flags;
 
-	if (cached)
-		ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
+		if (order)
+			gfp_mask = high_order_gfp_flags;
+		page = alloc_pages(gfp_mask, order);
+		ion_pages_sync_for_device(dev, page, PAGE_SIZE << order,
 					  DMA_BIDIRECTIONAL);
+	}
+	if (!page)
+		return 0;
+
 	return page;
 }
 
+/*
+ * For secure pages that need to be freed and not added back to the pool; the
+ *  hyp_unassign should be called before calling this function
+ */
 static void free_buffer_page(struct ion_system_heap *heap,
-			     struct ion_buffer *buffer, struct page *page)
+			     struct ion_buffer *buffer, struct page *page,
+			     unsigned int order)
 {
-	struct ion_page_pool *pool;
-	unsigned int order = compound_order(page);
 	bool cached = ion_buffer_cached(buffer);
+	int vmid = get_secure_vmid(buffer->flags);
 
-	/* go to system */
-	if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
+	if (!(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
+		struct ion_page_pool *pool;
+
+		if (vmid > 0)
+			pool = heap->secure_pools[vmid][order_to_index(order)];
+		else if (cached)
+			pool = heap->cached_pools[order_to_index(order)];
+		else
+			pool = heap->uncached_pools[order_to_index(order)];
+
+		if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)
+			ion_page_pool_free_immediate(pool, page);
+		else
+			ion_page_pool_free(pool, page);
+	} else {
 		__free_pages(page, order);
-		return;
 	}
-
-	if (!cached)
-		pool = heap->uncached_pools[order_to_index(order)];
-	else
-		pool = heap->cached_pools[order_to_index(order)];
-
-	ion_page_pool_free(pool, page);
 }
 
+static struct page *alloc_from_secure_pool_order(struct ion_system_heap *heap,
+						 struct ion_buffer *buffer,
+						 unsigned long order)
+{
+	int vmid = get_secure_vmid(buffer->flags);
+	struct ion_page_pool *pool;
 
-static struct page *alloc_largest_available(struct ion_system_heap *heap,
-					    struct ion_buffer *buffer,
-					    unsigned long size,
-					    unsigned int max_order)
+	if (!is_secure_vmid_valid(vmid))
+		return NULL;
+
+	pool = heap->secure_pools[vmid][order_to_index(order)];
+	return ion_page_pool_alloc_pool_only(pool);
+}
+
+static struct page *split_page_from_secure_pool(struct ion_system_heap *heap,
+						struct ion_buffer *buffer)
+{
+	int i, j;
+	struct page *page;
+	unsigned int order;
+
+	mutex_lock(&heap->split_page_mutex);
+
+	/*
+	 * Someone may have just split a page and returned the unused portion
+	 * back to the pool, so try allocating from the pool one more time
+	 * before splitting. We want to maintain large pages sizes when
+	 * possible.
+	 */
+	page = alloc_from_secure_pool_order(heap, buffer, 0);
+	if (page)
+		goto got_page;
+
+	for (i = num_orders - 2; i >= 0; i--) {
+		order = orders[i];
+		page = alloc_from_secure_pool_order(heap, buffer, order);
+		if (!page)
+			continue;
+
+		split_page(page, order);
+		break;
+	}
+	/* Return the remaining order-0 pages to the pool */
+	if (page)
+		for (j = 1; j < (1 << order); j++)
+			free_buffer_page(heap, buffer, page + j, 0);
+
+got_page:
+	mutex_unlock(&heap->split_page_mutex);
+
+	return page;
+}
+
+static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
+						 struct ion_buffer *buffer,
+						 unsigned long size,
+						 unsigned int max_order)
 {
 	struct page *page;
+	struct page_info *info;
+	int i;
+	bool from_pool;
+
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return NULL;
+
+	for (i = 0; i < num_orders; i++) {
+		if (size < order_to_size(orders[i]))
+			continue;
+		if (max_order < orders[i])
+			continue;
+		from_pool = !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC);
+		page = alloc_buffer_page(heap, buffer, orders[i], &from_pool);
+		if (!page)
+			continue;
+
+		info->page = page;
+		info->order = orders[i];
+		info->from_pool = from_pool;
+		INIT_LIST_HEAD(&info->list);
+		return info;
+	}
+	kfree(info);
+
+	return NULL;
+}
+
+static struct page_info *alloc_from_pool_preferred(
+		struct ion_system_heap *heap, struct ion_buffer *buffer,
+		unsigned long size, unsigned int max_order)
+{
+	struct page *page;
+	struct page_info *info;
 	int i;
 
-	for (i = 0; i < NUM_ORDERS; i++) {
+	info = kmalloc(sizeof(*info), GFP_KERNEL);
+	if (!info)
+		return NULL;
+
+	for (i = 0; i < num_orders; i++) {
 		if (size < order_to_size(orders[i]))
 			continue;
 		if (max_order < orders[i])
 			continue;
 
-		page = alloc_buffer_page(heap, buffer, orders[i]);
+		page = alloc_from_secure_pool_order(heap, buffer, orders[i]);
 		if (!page)
 			continue;
 
-		return page;
+		info->page = page;
+		info->order = orders[i];
+		info->from_pool = true;
+		INIT_LIST_HEAD(&info->list);
+		return info;
 	}
 
-	return NULL;
+	page = split_page_from_secure_pool(heap, buffer);
+	if (page) {
+		info->page = page;
+		info->order = 0;
+		info->from_pool = true;
+		INIT_LIST_HEAD(&info->list);
+		return info;
+	}
+
+	kfree(info);
+	return alloc_largest_available(heap, buffer, size, max_order);
+}
+
+static unsigned int process_info(struct page_info *info,
+				 struct scatterlist *sg,
+				 struct scatterlist *sg_sync,
+				 struct pages_mem *data, unsigned int i)
+{
+	struct page *page = info->page;
+	unsigned int j;
+
+	if (sg_sync) {
+		sg_set_page(sg_sync, page, (1 << info->order) * PAGE_SIZE, 0);
+		sg_dma_address(sg_sync) = page_to_phys(page);
+	}
+	sg_set_page(sg, page, (1 << info->order) * PAGE_SIZE, 0);
+	/*
+	 * This is not correct - sg_dma_address needs a dma_addr_t
+	 * that is valid for the the targeted device, but this works
+	 * on the currently targeted hardware.
+	 */
+	sg_dma_address(sg) = page_to_phys(page);
+	if (data) {
+		for (j = 0; j < (1 << info->order); ++j)
+			data->pages[i++] = nth_page(page, j);
+	}
+	list_del(&info->list);
+	kfree(info);
+	return i;
 }
 
 static int ion_system_heap_allocate(struct ion_heap *heap,
-				    struct ion_buffer *buffer,
-				    unsigned long size, unsigned long align,
-				    unsigned long flags)
+				     struct ion_buffer *buffer,
+				     unsigned long size, unsigned long align,
+				     unsigned long flags)
 {
 	struct ion_system_heap *sys_heap = container_of(heap,
 							struct ion_system_heap,
 							heap);
 	struct sg_table *table;
+	struct sg_table table_sync = {0};
 	struct scatterlist *sg;
+	struct scatterlist *sg_sync;
+	int ret;
 	struct list_head pages;
-	struct page *page, *tmp_page;
+	struct list_head pages_from_pool;
+	struct page_info *info, *tmp_info;
 	int i = 0;
+	unsigned int nents_sync = 0;
 	unsigned long size_remaining = PAGE_ALIGN(size);
 	unsigned int max_order = orders[0];
+	struct pages_mem data;
+	unsigned int sz;
+	int vmid = get_secure_vmid(buffer->flags);
+	struct device *dev = heap->priv;
 
 	if (align > PAGE_SIZE)
 		return -EINVAL;
@@ -149,111 +352,289 @@
 	if (size / PAGE_SIZE > totalram_pages / 2)
 		return -ENOMEM;
 
+	data.size = 0;
 	INIT_LIST_HEAD(&pages);
+	INIT_LIST_HEAD(&pages_from_pool);
+
 	while (size_remaining > 0) {
-		page = alloc_largest_available(sys_heap, buffer, size_remaining,
-					       max_order);
-		if (!page)
-			goto free_pages;
-		list_add_tail(&page->lru, &pages);
-		size_remaining -= PAGE_SIZE << compound_order(page);
-		max_order = compound_order(page);
+		if (is_secure_vmid_valid(vmid))
+			info = alloc_from_pool_preferred(
+					sys_heap, buffer, size_remaining,
+					max_order);
+		else
+			info = alloc_largest_available(
+					sys_heap, buffer, size_remaining,
+					max_order);
+
+		if (!info)
+			goto err;
+
+		sz = (1 << info->order) * PAGE_SIZE;
+
+		if (info->from_pool) {
+			list_add_tail(&info->list, &pages_from_pool);
+		} else {
+			list_add_tail(&info->list, &pages);
+			data.size += sz;
+			++nents_sync;
+		}
+		size_remaining -= sz;
+		max_order = info->order;
 		i++;
 	}
-	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+
+	ret = msm_ion_heap_alloc_pages_mem(&data);
+
+	if (ret)
+		goto err;
+
+	table = kzalloc(sizeof(*table), GFP_KERNEL);
 	if (!table)
-		goto free_pages;
+		goto err_free_data_pages;
 
-	if (sg_alloc_table(table, i, GFP_KERNEL))
-		goto free_table;
+	ret = sg_alloc_table(table, i, GFP_KERNEL);
+	if (ret)
+		goto err1;
 
-	sg = table->sgl;
-	list_for_each_entry_safe(page, tmp_page, &pages, lru) {
-		sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
-		sg = sg_next(sg);
-		list_del(&page->lru);
+	if (nents_sync) {
+		ret = sg_alloc_table(&table_sync, nents_sync, GFP_KERNEL);
+		if (ret)
+			goto err_free_sg;
 	}
 
-	buffer->sg_table = table;
+	i = 0;
+	sg = table->sgl;
+	sg_sync = table_sync.sgl;
+
+	/*
+	 * We now have two separate lists. One list contains pages from the
+	 * pool and the other pages from buddy. We want to merge these
+	 * together while preserving the ordering of the pages (higher order
+	 * first).
+	 */
+	do {
+		info = list_first_entry_or_null(&pages, struct page_info, list);
+		tmp_info = list_first_entry_or_null(&pages_from_pool,
+						    struct page_info, list);
+		if (info && tmp_info) {
+			if (info->order >= tmp_info->order) {
+				i = process_info(info, sg, sg_sync, &data, i);
+				sg_sync = sg_next(sg_sync);
+			} else {
+				i = process_info(tmp_info, sg, 0, 0, i);
+			}
+		} else if (info) {
+			i = process_info(info, sg, sg_sync, &data, i);
+			sg_sync = sg_next(sg_sync);
+		} else if (tmp_info) {
+			i = process_info(tmp_info, sg, 0, 0, i);
+		}
+		sg = sg_next(sg);
+
+	} while (sg);
+
+	ret = msm_ion_heap_pages_zero(data.pages, data.size >> PAGE_SHIFT);
+	if (ret) {
+		pr_err("Unable to zero pages\n");
+		goto err_free_sg2;
+	}
+
+	if (nents_sync) {
+		dma_sync_sg_for_device(dev, table_sync.sgl, table_sync.nents,
+				       DMA_BIDIRECTIONAL);
+		if (vmid > 0) {
+			ret = ion_system_secure_heap_assign_sg(&table_sync,
+							       vmid);
+			if (ret)
+				goto err_free_sg2;
+		}
+	}
+
+	buffer->priv_virt = table;
+	if (nents_sync)
+		sg_free_table(&table_sync);
+	msm_ion_heap_free_pages_mem(&data);
 	return 0;
 
-free_table:
+err_free_sg2:
+	/* We failed to zero buffers. Bypass pool */
+	buffer->flags |= ION_PRIV_FLAG_SHRINKER_FREE;
+
+	if (vmid > 0)
+		ion_system_secure_heap_unassign_sg(table, vmid);
+
+	for_each_sg(table->sgl, sg, table->nents, i)
+		free_buffer_page(sys_heap, buffer, sg_page(sg),
+				 get_order(sg->length));
+	if (nents_sync)
+		sg_free_table(&table_sync);
+err_free_sg:
+	sg_free_table(table);
+err1:
 	kfree(table);
-free_pages:
-	list_for_each_entry_safe(page, tmp_page, &pages, lru)
-		free_buffer_page(sys_heap, buffer, page);
+err_free_data_pages:
+	msm_ion_heap_free_pages_mem(&data);
+err:
+	list_for_each_entry_safe(info, tmp_info, &pages, list) {
+		free_buffer_page(sys_heap, buffer, info->page, info->order);
+		kfree(info);
+	}
+	list_for_each_entry_safe(info, tmp_info, &pages_from_pool, list) {
+		free_buffer_page(sys_heap, buffer, info->page, info->order);
+		kfree(info);
+	}
 	return -ENOMEM;
 }
 
-static void ion_system_heap_free(struct ion_buffer *buffer)
+void ion_system_heap_free(struct ion_buffer *buffer)
 {
-	struct ion_system_heap *sys_heap = container_of(buffer->heap,
+	struct ion_heap *heap = buffer->heap;
+	struct ion_system_heap *sys_heap = container_of(heap,
 							struct ion_system_heap,
 							heap);
-	struct sg_table *table = buffer->sg_table;
+	struct sg_table *table = buffer->priv_virt;
 	struct scatterlist *sg;
+	LIST_HEAD(pages);
 	int i;
+	int vmid = get_secure_vmid(buffer->flags);
+	struct device *dev = heap->priv;
 
-	/* zero the buffer before goto page pool */
-	if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
-		ion_heap_buffer_zero(buffer);
+	if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) &&
+	    !(buffer->flags & ION_FLAG_POOL_FORCE_ALLOC)) {
+		if (vmid < 0)
+			msm_ion_heap_sg_table_zero(dev, table, buffer->size);
+	} else if (vmid > 0) {
+		if (ion_system_secure_heap_unassign_sg(table, vmid))
+			return;
+	}
 
 	for_each_sg(table->sgl, sg, table->nents, i)
-		free_buffer_page(sys_heap, buffer, sg_page(sg));
+		free_buffer_page(sys_heap, buffer, sg_page(sg),
+				 get_order(sg->length));
 	sg_free_table(table);
 	kfree(table);
 }
 
-static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
-				  int nr_to_scan)
+struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
+					 struct ion_buffer *buffer)
 {
-	struct ion_page_pool *uncached_pool;
-	struct ion_page_pool *cached_pool;
+	return buffer->priv_virt;
+}
+
+void ion_system_heap_unmap_dma(struct ion_heap *heap,
+			       struct ion_buffer *buffer)
+{
+}
+
+static int ion_secure_page_pool_shrink(
+		struct ion_system_heap *sys_heap,
+		int vmid, int order_idx, int nr_to_scan)
+{
+	int ret, freed = 0;
+	int order = orders[order_idx];
+	struct page *page, *tmp;
+	struct sg_table sgt;
+	struct scatterlist *sg;
+	struct ion_page_pool *pool = sys_heap->secure_pools[vmid][order_idx];
+	LIST_HEAD(pages);
+
+	if (nr_to_scan == 0)
+		return ion_page_pool_total(pool, true);
+
+	while (freed < nr_to_scan) {
+		page = ion_page_pool_alloc_pool_only(pool);
+		if (!page)
+			break;
+		list_add(&page->lru, &pages);
+		freed += (1 << order);
+	}
+
+	if (!freed)
+		return freed;
+
+	ret = sg_alloc_table(&sgt, (freed >> order), GFP_KERNEL);
+	if (ret)
+		goto out1;
+	sg = sgt.sgl;
+	list_for_each_entry(page, &pages, lru) {
+		sg_set_page(sg, page, (1 << order) * PAGE_SIZE, 0);
+		sg_dma_address(sg) = page_to_phys(page);
+		sg = sg_next(sg);
+	}
+
+	if (ion_system_secure_heap_unassign_sg(&sgt, vmid))
+		goto out2;
+
+	list_for_each_entry_safe(page, tmp, &pages, lru) {
+		list_del(&page->lru);
+		ion_page_pool_free_immediate(pool, page);
+	}
+
+	sg_free_table(&sgt);
+	return freed;
+
+out1:
+	/* Restore pages to secure pool */
+	list_for_each_entry_safe(page, tmp, &pages, lru) {
+		list_del(&page->lru);
+		ion_page_pool_free(pool, page);
+	}
+	return 0;
+out2:
+	/*
+	 * The security state of the pages is unknown after a failure;
+	 * They can neither be added back to the secure pool nor buddy system.
+	 */
+	sg_free_table(&sgt);
+	return 0;
+}
+
+static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+					int nr_to_scan)
+{
 	struct ion_system_heap *sys_heap;
 	int nr_total = 0;
-	int i, nr_freed;
+	int i, j, nr_freed = 0;
 	int only_scan = 0;
+	struct ion_page_pool *pool;
 
 	sys_heap = container_of(heap, struct ion_system_heap, heap);
 
 	if (!nr_to_scan)
 		only_scan = 1;
 
-	for (i = 0; i < NUM_ORDERS; i++) {
-		uncached_pool = sys_heap->uncached_pools[i];
-		cached_pool = sys_heap->cached_pools[i];
+	for (i = 0; i < num_orders; i++) {
+		nr_freed = 0;
 
-		if (only_scan) {
-			nr_total += ion_page_pool_shrink(uncached_pool,
-							 gfp_mask,
-							 nr_to_scan);
+		for (j = 0; j < VMID_LAST; j++) {
+			if (is_secure_vmid_valid(j))
+				nr_freed += ion_secure_page_pool_shrink(
+						sys_heap, j, i, nr_to_scan);
+		}
 
-			nr_total += ion_page_pool_shrink(cached_pool,
-							 gfp_mask,
-							 nr_to_scan);
-		} else {
-			nr_freed = ion_page_pool_shrink(uncached_pool,
-							gfp_mask,
-							nr_to_scan);
+		pool = sys_heap->uncached_pools[i];
+		nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+
+		pool = sys_heap->cached_pools[i];
+		nr_freed += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
+		nr_total += nr_freed;
+
+		if (!only_scan) {
 			nr_to_scan -= nr_freed;
-			nr_total += nr_freed;
-			if (nr_to_scan <= 0)
-				break;
-			nr_freed = ion_page_pool_shrink(cached_pool,
-							gfp_mask,
-							nr_to_scan);
-			nr_to_scan -= nr_freed;
-			nr_total += nr_freed;
+			/* shrink completed */
 			if (nr_to_scan <= 0)
 				break;
 		}
 	}
+
 	return nr_total;
 }
 
 static struct ion_heap_ops system_heap_ops = {
 	.allocate = ion_system_heap_allocate,
 	.free = ion_system_heap_free,
+	.map_dma = ion_system_heap_map_dma,
+	.unmap_dma = ion_system_heap_unmap_dma,
 	.map_kernel = ion_heap_map_kernel,
 	.unmap_kernel = ion_heap_unmap_kernel,
 	.map_user = ion_heap_map_user,
@@ -264,72 +645,144 @@
 				      void *unused)
 {
 
-	struct ion_system_heap *sys_heap = container_of(heap,
-							struct ion_system_heap,
-							heap);
-	int i;
+	struct ion_system_heap *sys_heap = container_of(
+					heap, struct ion_system_heap, heap);
+	bool use_seq = s;
+	unsigned long uncached_total = 0;
+	unsigned long cached_total = 0;
+	unsigned long secure_total = 0;
 	struct ion_page_pool *pool;
+	int i, j;
 
-	for (i = 0; i < NUM_ORDERS; i++) {
+	for (i = 0; i < num_orders; i++) {
 		pool = sys_heap->uncached_pools[i];
+		if (use_seq) {
+			seq_printf(s,
+				   "%d order %u highmem pages in uncached pool = %lu total\n",
+				   pool->high_count, pool->order,
+				   (1 << pool->order) * PAGE_SIZE *
+					pool->high_count);
+			seq_printf(s,
+				   "%d order %u lowmem pages in uncached pool = %lu total\n",
+				   pool->low_count, pool->order,
+				   (1 << pool->order) * PAGE_SIZE *
+					pool->low_count);
+		}
 
-		seq_printf(s, "%d order %u highmem pages uncached %lu total\n",
-			   pool->high_count, pool->order,
-			   (PAGE_SIZE << pool->order) * pool->high_count);
-		seq_printf(s, "%d order %u lowmem pages uncached %lu total\n",
-			   pool->low_count, pool->order,
-			   (PAGE_SIZE << pool->order) * pool->low_count);
+		uncached_total += (1 << pool->order) * PAGE_SIZE *
+			pool->high_count;
+		uncached_total += (1 << pool->order) * PAGE_SIZE *
+			pool->low_count;
 	}
 
-	for (i = 0; i < NUM_ORDERS; i++) {
+	for (i = 0; i < num_orders; i++) {
 		pool = sys_heap->cached_pools[i];
+		if (use_seq) {
+			seq_printf(s,
+				   "%d order %u highmem pages in cached pool = %lu total\n",
+				   pool->high_count, pool->order,
+				   (1 << pool->order) * PAGE_SIZE *
+					pool->high_count);
+			seq_printf(s,
+				   "%d order %u lowmem pages in cached pool = %lu total\n",
+				   pool->low_count, pool->order,
+				   (1 << pool->order) * PAGE_SIZE *
+					pool->low_count);
+		}
 
-		seq_printf(s, "%d order %u highmem pages cached %lu total\n",
-			   pool->high_count, pool->order,
-			   (PAGE_SIZE << pool->order) * pool->high_count);
-		seq_printf(s, "%d order %u lowmem pages cached %lu total\n",
-			   pool->low_count, pool->order,
-			   (PAGE_SIZE << pool->order) * pool->low_count);
+		cached_total += (1 << pool->order) * PAGE_SIZE *
+			pool->high_count;
+		cached_total += (1 << pool->order) * PAGE_SIZE *
+			pool->low_count;
 	}
+
+	for (i = 0; i < num_orders; i++) {
+		for (j = 0; j < VMID_LAST; j++) {
+			if (!is_secure_vmid_valid(j))
+				continue;
+			pool = sys_heap->secure_pools[j][i];
+
+			if (use_seq) {
+				seq_printf(s,
+					   "VMID %d: %d order %u highmem pages in secure pool = %lu total\n",
+					   j, pool->high_count, pool->order,
+					   (1 << pool->order) * PAGE_SIZE *
+						pool->high_count);
+				seq_printf(s,
+					   "VMID  %d: %d order %u lowmem pages in secure pool = %lu total\n",
+					   j, pool->low_count, pool->order,
+					   (1 << pool->order) * PAGE_SIZE *
+						pool->low_count);
+			}
+
+			secure_total += (1 << pool->order) * PAGE_SIZE *
+					 pool->high_count;
+			secure_total += (1 << pool->order) * PAGE_SIZE *
+					 pool->low_count;
+		}
+	}
+
+	if (use_seq) {
+		seq_puts(s, "--------------------------------------------\n");
+		seq_printf(s, "uncached pool = %lu cached pool = %lu secure pool = %lu\n",
+			   uncached_total, cached_total, secure_total);
+		seq_printf(s, "pool total (uncached + cached + secure) = %lu\n",
+			   uncached_total + cached_total + secure_total);
+		seq_puts(s, "--------------------------------------------\n");
+	} else {
+		pr_info("-------------------------------------------------\n");
+		pr_info("uncached pool = %lu cached pool = %lu secure pool = %lu\n",
+			uncached_total, cached_total, secure_total);
+		pr_info("pool total (uncached + cached + secure) = %lu\n",
+			uncached_total + cached_total + secure_total);
+		pr_info("-------------------------------------------------\n");
+	}
+
 	return 0;
 }
 
 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
 {
 	int i;
-
-	for (i = 0; i < NUM_ORDERS; i++)
+	for (i = 0; i < num_orders; i++)
 		if (pools[i])
 			ion_page_pool_destroy(pools[i]);
 }
 
-static int ion_system_heap_create_pools(struct ion_page_pool **pools,
-					bool cached)
+/**
+ * ion_system_heap_create_pools - Creates pools for all orders
+ *
+ * If this fails you don't need to destroy any pools. It's all or
+ * nothing. If it succeeds you'll eventually need to use
+ * ion_system_heap_destroy_pools to destroy the pools.
+ */
+static int ion_system_heap_create_pools(struct device *dev,
+					struct ion_page_pool **pools)
 {
 	int i;
-	gfp_t gfp_flags = low_order_gfp_flags;
-
-	for (i = 0; i < NUM_ORDERS; i++) {
+	for (i = 0; i < num_orders; i++) {
 		struct ion_page_pool *pool;
+		gfp_t gfp_flags = low_order_gfp_flags;
 
-		if (orders[i] > 4)
+		if (orders[i])
 			gfp_flags = high_order_gfp_flags;
-
-		pool = ion_page_pool_create(gfp_flags, orders[i], cached);
+		pool = ion_page_pool_create(dev, gfp_flags, orders[i]);
 		if (!pool)
 			goto err_create_pool;
 		pools[i] = pool;
 	}
 	return 0;
-
 err_create_pool:
 	ion_system_heap_destroy_pools(pools);
-	return -ENOMEM;
+	return 1;
 }
 
-struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused)
+struct ion_heap *ion_system_heap_create(struct ion_platform_heap *data)
 {
 	struct ion_system_heap *heap;
+	int i;
+	int pools_size = sizeof(struct ion_page_pool *) * num_orders;
+	struct device *dev = data->priv;
 
 	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
 	if (!heap)
@@ -338,19 +791,50 @@
 	heap->heap.type = ION_HEAP_TYPE_SYSTEM;
 	heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
 
-	if (ion_system_heap_create_pools(heap->uncached_pools, false))
-		goto free_heap;
+	heap->uncached_pools = kzalloc(pools_size, GFP_KERNEL);
+	if (!heap->uncached_pools)
+		goto err_alloc_uncached_pools;
 
-	if (ion_system_heap_create_pools(heap->cached_pools, true))
-		goto destroy_uncached_pools;
+	heap->cached_pools = kzalloc(pools_size, GFP_KERNEL);
+	if (!heap->cached_pools)
+		goto err_alloc_cached_pools;
+
+	for (i = 0; i < VMID_LAST; i++) {
+		if (is_secure_vmid_valid(i)) {
+			heap->secure_pools[i] = kzalloc(pools_size, GFP_KERNEL);
+			if (!heap->secure_pools[i])
+				goto err_create_secure_pools;
+			if (ion_system_heap_create_pools(
+					dev, heap->secure_pools[i]))
+				goto err_create_secure_pools;
+		}
+	}
+
+	if (ion_system_heap_create_pools(dev, heap->uncached_pools))
+		goto err_create_uncached_pools;
+
+	if (ion_system_heap_create_pools(dev, heap->cached_pools))
+		goto err_create_cached_pools;
+
+	mutex_init(&heap->split_page_mutex);
 
 	heap->heap.debug_show = ion_system_heap_debug_show;
 	return &heap->heap;
 
-destroy_uncached_pools:
+err_create_cached_pools:
 	ion_system_heap_destroy_pools(heap->uncached_pools);
-
-free_heap:
+err_create_uncached_pools:
+	kfree(heap->cached_pools);
+err_create_secure_pools:
+	for (i = 0; i < VMID_LAST; i++) {
+		if (heap->secure_pools[i]) {
+			ion_system_heap_destroy_pools(heap->secure_pools[i]);
+			kfree(heap->secure_pools[i]);
+		}
+	}
+err_alloc_cached_pools:
+	kfree(heap->uncached_pools);
+err_alloc_uncached_pools:
 	kfree(heap);
 	return ERR_PTR(-ENOMEM);
 }
@@ -360,12 +844,20 @@
 	struct ion_system_heap *sys_heap = container_of(heap,
 							struct ion_system_heap,
 							heap);
-	int i;
+	int i, j;
 
-	for (i = 0; i < NUM_ORDERS; i++) {
-		ion_page_pool_destroy(sys_heap->uncached_pools[i]);
-		ion_page_pool_destroy(sys_heap->cached_pools[i]);
+	for (i = 0; i < VMID_LAST; i++) {
+		if (!is_secure_vmid_valid(i))
+			continue;
+		for (j = 0; j < num_orders; j++)
+			ion_secure_page_pool_shrink(sys_heap, i, j, UINT_MAX);
+
+		ion_system_heap_destroy_pools(sys_heap->secure_pools[i]);
 	}
+	ion_system_heap_destroy_pools(sys_heap->uncached_pools);
+	ion_system_heap_destroy_pools(sys_heap->cached_pools);
+	kfree(sys_heap->uncached_pools);
+	kfree(sys_heap->cached_pools);
 	kfree(sys_heap);
 }
 
@@ -380,11 +872,12 @@
 	struct sg_table *table;
 	unsigned long i;
 	int ret;
+	struct device *dev = heap->priv;
 
 	if (align > (PAGE_SIZE << order))
 		return -EINVAL;
 
-	page = alloc_pages(low_order_gfp_flags, order);
+	page = alloc_pages(low_order_gfp_flags | __GFP_ZERO, order);
 	if (!page)
 		return -ENOMEM;
 
@@ -394,36 +887,34 @@
 	for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
 		__free_page(page + i);
 
-	table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+	table = kzalloc(sizeof(*table), GFP_KERNEL);
 	if (!table) {
 		ret = -ENOMEM;
-		goto free_pages;
+		goto out;
 	}
 
 	ret = sg_alloc_table(table, 1, GFP_KERNEL);
 	if (ret)
-		goto free_table;
+		goto out;
 
 	sg_set_page(table->sgl, page, len, 0);
 
-	buffer->sg_table = table;
+	buffer->priv_virt = table;
 
-	ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
+	ion_pages_sync_for_device(dev, page, len, DMA_BIDIRECTIONAL);
 
 	return 0;
 
-free_table:
-	kfree(table);
-free_pages:
+out:
 	for (i = 0; i < len >> PAGE_SHIFT; i++)
 		__free_page(page + i);
-
+	kfree(table);
 	return ret;
 }
 
-static void ion_system_contig_heap_free(struct ion_buffer *buffer)
+void ion_system_contig_heap_free(struct ion_buffer *buffer)
 {
-	struct sg_table *table = buffer->sg_table;
+	struct sg_table *table = buffer->priv_virt;
 	struct page *page = sg_page(table->sgl);
 	unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
 	unsigned long i;
@@ -434,9 +925,34 @@
 	kfree(table);
 }
 
+static int ion_system_contig_heap_phys(struct ion_heap *heap,
+				       struct ion_buffer *buffer,
+				       ion_phys_addr_t *addr, size_t *len)
+{
+	struct sg_table *table = buffer->priv_virt;
+	struct page *page = sg_page(table->sgl);
+	*addr = page_to_phys(page);
+	*len = buffer->size;
+	return 0;
+}
+
+struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
+						struct ion_buffer *buffer)
+{
+	return buffer->priv_virt;
+}
+
+void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
+				      struct ion_buffer *buffer)
+{
+}
+
 static struct ion_heap_ops kmalloc_ops = {
 	.allocate = ion_system_contig_heap_allocate,
 	.free = ion_system_contig_heap_free,
+	.phys = ion_system_contig_heap_phys,
+	.map_dma = ion_system_contig_heap_map_dma,
+	.unmap_dma = ion_system_contig_heap_unmap_dma,
 	.map_kernel = ion_heap_map_kernel,
 	.unmap_kernel = ion_heap_unmap_kernel,
 	.map_user = ion_heap_map_user,
diff --git a/drivers/staging/android/ion/ion_system_secure_heap.c b/drivers/staging/android/ion/ion_system_secure_heap.c
new file mode 100644
index 0000000..5bf484b
--- /dev/null
+++ b/drivers/staging/android/ion/ion_system_secure_heap.c
@@ -0,0 +1,428 @@
+/*
+ *
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/msm_ion.h>
+#include <soc/qcom/secure_buffer.h>
+#include <linux/workqueue.h>
+#include <linux/uaccess.h>
+#include "ion.h"
+#include "ion_priv.h"
+
+struct ion_system_secure_heap {
+	struct ion_heap *sys_heap;
+	struct ion_heap heap;
+
+	/* Protects prefetch_list */
+	spinlock_t work_lock;
+	bool destroy_heap;
+	struct list_head prefetch_list;
+	struct delayed_work prefetch_work;
+};
+
+struct prefetch_info {
+	struct list_head list;
+	int vmid;
+	size_t size;
+	bool shrink;
+};
+
+/*
+ * The video client may not hold the last reference count on the
+ * ion_buffer(s). Delay for a short time after the video client sends
+ * the IOC_DRAIN event to increase the chance that the reference
+ * count drops to zero. Time in milliseconds.
+ */
+#define SHRINK_DELAY 1000
+
+static bool is_cp_flag_present(unsigned long flags)
+{
+	return flags && (ION_FLAG_CP_TOUCH ||
+			ION_FLAG_CP_BITSTREAM ||
+			ION_FLAG_CP_PIXEL ||
+			ION_FLAG_CP_NON_PIXEL ||
+			ION_FLAG_CP_CAMERA);
+}
+
+int ion_system_secure_heap_unassign_sg(struct sg_table *sgt, int source_vmid)
+{
+	u32 dest_vmid = VMID_HLOS;
+	u32 dest_perms = PERM_READ | PERM_WRITE | PERM_EXEC;
+	struct scatterlist *sg;
+	int ret, i;
+
+	ret = hyp_assign_table(sgt, &source_vmid, 1,
+			       &dest_vmid, &dest_perms, 1);
+	if (ret) {
+		pr_err("%s: Not freeing memory since assign call failed. VMID %d\n",
+		       __func__, source_vmid);
+		return -ENXIO;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		ClearPagePrivate(sg_page(sg));
+	return 0;
+}
+
+int ion_system_secure_heap_assign_sg(struct sg_table *sgt, int dest_vmid)
+{
+	u32 source_vmid = VMID_HLOS;
+	u32 dest_perms = PERM_READ | PERM_WRITE;
+	struct scatterlist *sg;
+	int ret, i;
+
+	ret = hyp_assign_table(sgt, &source_vmid, 1,
+			       &dest_vmid, &dest_perms, 1);
+	if (ret) {
+		pr_err("%s: Assign call failed. VMID %d\n",
+		       __func__, dest_vmid);
+		return -EINVAL;
+	}
+
+	for_each_sg(sgt->sgl, sg, sgt->nents, i)
+		SetPagePrivate(sg_page(sg));
+	return 0;
+}
+
+static void ion_system_secure_heap_free(struct ion_buffer *buffer)
+{
+	struct ion_heap *heap = buffer->heap;
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+	buffer->heap = secure_heap->sys_heap;
+	secure_heap->sys_heap->ops->free(buffer);
+}
+
+static int ion_system_secure_heap_allocate(
+					struct ion_heap *heap,
+					struct ion_buffer *buffer,
+					unsigned long size, unsigned long align,
+					unsigned long flags)
+{
+	int ret = 0;
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+
+	if (!ion_heap_is_system_secure_heap_type(secure_heap->heap.type) ||
+	    !is_cp_flag_present(flags)) {
+		pr_info("%s: Incorrect heap type or incorrect flags\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	ret = secure_heap->sys_heap->ops->allocate(secure_heap->sys_heap,
+						buffer, size, align, flags);
+	if (ret) {
+		pr_info("%s: Failed to get allocation for %s, ret = %d\n",
+			__func__, heap->name, ret);
+		return ret;
+	}
+	return ret;
+}
+
+static void process_one_prefetch(struct ion_heap *sys_heap,
+				 struct prefetch_info *info)
+{
+	struct ion_buffer buffer;
+	struct sg_table *sg_table;
+	int ret;
+
+	buffer.heap = sys_heap;
+	buffer.flags = 0;
+
+	ret = sys_heap->ops->allocate(sys_heap, &buffer, info->size,
+						PAGE_SIZE, buffer.flags);
+	if (ret) {
+		pr_debug("%s: Failed to prefetch 0x%zx, ret = %d\n",
+			 __func__, info->size, ret);
+		return;
+	}
+
+	sg_table = sys_heap->ops->map_dma(sys_heap, &buffer);
+	if (IS_ERR_OR_NULL(sg_table))
+		goto out;
+
+	ret = ion_system_secure_heap_assign_sg(sg_table,
+					       get_secure_vmid(info->vmid));
+	if (ret)
+		goto unmap;
+
+	/* Now free it to the secure heap */
+	buffer.heap = sys_heap;
+	buffer.flags = info->vmid;
+
+unmap:
+	sys_heap->ops->unmap_dma(sys_heap, &buffer);
+out:
+	sys_heap->ops->free(&buffer);
+}
+
+static void process_one_shrink(struct ion_heap *sys_heap,
+			       struct prefetch_info *info)
+{
+	struct ion_buffer buffer;
+	size_t pool_size, size;
+	int ret;
+
+	buffer.heap = sys_heap;
+	buffer.flags = info->vmid;
+
+	pool_size = ion_system_heap_secure_page_pool_total(sys_heap,
+							   info->vmid);
+	size = min(pool_size, info->size);
+	ret = sys_heap->ops->allocate(sys_heap, &buffer, size, PAGE_SIZE,
+				      buffer.flags);
+	if (ret) {
+		pr_debug("%s: Failed to shrink 0x%zx, ret = %d\n",
+			 __func__, info->size, ret);
+		return;
+	}
+
+	buffer.private_flags = ION_PRIV_FLAG_SHRINKER_FREE;
+	sys_heap->ops->free(&buffer);
+}
+
+static void ion_system_secure_heap_prefetch_work(struct work_struct *work)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(work,
+						struct ion_system_secure_heap,
+						prefetch_work.work);
+	struct ion_heap *sys_heap = secure_heap->sys_heap;
+	struct prefetch_info *info, *tmp;
+	unsigned long flags;
+
+	spin_lock_irqsave(&secure_heap->work_lock, flags);
+	list_for_each_entry_safe(info, tmp,
+				 &secure_heap->prefetch_list, list) {
+		list_del(&info->list);
+		spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+
+		if (info->shrink)
+			process_one_shrink(sys_heap, info);
+		else
+			process_one_prefetch(sys_heap, info);
+
+		kfree(info);
+		spin_lock_irqsave(&secure_heap->work_lock, flags);
+	}
+	spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+}
+
+static int alloc_prefetch_info(
+			struct ion_prefetch_regions __user *user_regions,
+			bool shrink, struct list_head *items)
+{
+	struct prefetch_info *info;
+	size_t __user *user_sizes;
+	int err;
+	unsigned int nr_sizes, vmid, i;
+
+	err = get_user(nr_sizes, &user_regions->nr_sizes);
+	err |= get_user(user_sizes, &user_regions->sizes);
+	err |= get_user(vmid, &user_regions->vmid);
+	if (err)
+		return -EFAULT;
+
+	if (!is_secure_vmid_valid(get_secure_vmid(vmid)))
+		return -EINVAL;
+
+	if (nr_sizes > 0x10)
+		return -EINVAL;
+
+	for (i = 0; i < nr_sizes; i++) {
+		info = kzalloc(sizeof(*info), GFP_KERNEL);
+		if (!info)
+			return -ENOMEM;
+
+		err = get_user(info->size, &user_sizes[i]);
+		if (err)
+			goto out_free;
+
+		info->vmid = vmid;
+		info->shrink = shrink;
+		INIT_LIST_HEAD(&info->list);
+		list_add_tail(&info->list, items);
+	}
+	return err;
+out_free:
+	kfree(info);
+	return err;
+}
+
+static int __ion_system_secure_heap_resize(struct ion_heap *heap, void *ptr,
+					   bool shrink)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+	struct ion_prefetch_data *data = ptr;
+	int i, ret = 0;
+	struct prefetch_info *info, *tmp;
+	unsigned long flags;
+	LIST_HEAD(items);
+
+	if ((int)heap->type != ION_HEAP_TYPE_SYSTEM_SECURE)
+		return -EINVAL;
+
+	if (data->nr_regions > 0x10)
+		return -EINVAL;
+
+	for (i = 0; i < data->nr_regions; i++) {
+		ret = alloc_prefetch_info(&data->regions[i], shrink, &items);
+		if (ret)
+			goto out_free;
+	}
+
+	spin_lock_irqsave(&secure_heap->work_lock, flags);
+	if (secure_heap->destroy_heap) {
+		spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+		goto out_free;
+	}
+	list_splice_init(&items, &secure_heap->prefetch_list);
+	schedule_delayed_work(&secure_heap->prefetch_work,
+			      shrink ? msecs_to_jiffies(SHRINK_DELAY) : 0);
+	spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+
+	return 0;
+
+out_free:
+	list_for_each_entry_safe(info, tmp, &items, list) {
+		list_del(&info->list);
+		kfree(info);
+	}
+	return ret;
+}
+
+int ion_system_secure_heap_prefetch(struct ion_heap *heap, void *ptr)
+{
+	return __ion_system_secure_heap_resize(heap, ptr, false);
+}
+
+int ion_system_secure_heap_drain(struct ion_heap *heap, void *ptr)
+{
+	return __ion_system_secure_heap_resize(heap, ptr, true);
+}
+
+static struct sg_table *ion_system_secure_heap_map_dma(
+			struct ion_heap *heap, struct ion_buffer *buffer)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+
+	return secure_heap->sys_heap->ops->map_dma(secure_heap->sys_heap,
+							buffer);
+}
+
+static void ion_system_secure_heap_unmap_dma(struct ion_heap *heap,
+					     struct ion_buffer *buffer)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+
+	secure_heap->sys_heap->ops->unmap_dma(secure_heap->sys_heap,
+							buffer);
+}
+
+static void *ion_system_secure_heap_map_kernel(struct ion_heap *heap,
+					       struct ion_buffer *buffer)
+{
+	pr_info("%s: Kernel mapping from secure heap %s disallowed\n",
+		__func__, heap->name);
+	return ERR_PTR(-EINVAL);
+}
+
+static void ion_system_secure_heap_unmap_kernel(struct ion_heap *heap,
+						struct ion_buffer *buffer)
+{
+}
+
+static int ion_system_secure_heap_map_user(struct ion_heap *mapper,
+					   struct ion_buffer *buffer,
+					   struct vm_area_struct *vma)
+{
+	pr_info("%s: Mapping from secure heap %s disallowed\n",
+		__func__, mapper->name);
+	return -EINVAL;
+}
+
+static int ion_system_secure_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
+					 int nr_to_scan)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+
+	return secure_heap->sys_heap->ops->shrink(secure_heap->sys_heap,
+						gfp_mask, nr_to_scan);
+}
+
+static struct ion_heap_ops system_secure_heap_ops = {
+	.allocate = ion_system_secure_heap_allocate,
+	.free = ion_system_secure_heap_free,
+	.map_dma = ion_system_secure_heap_map_dma,
+	.unmap_dma = ion_system_secure_heap_unmap_dma,
+	.map_kernel = ion_system_secure_heap_map_kernel,
+	.unmap_kernel = ion_system_secure_heap_unmap_kernel,
+	.map_user = ion_system_secure_heap_map_user,
+	.shrink = ion_system_secure_heap_shrink,
+};
+
+struct ion_heap *ion_system_secure_heap_create(struct ion_platform_heap *unused)
+{
+	struct ion_system_secure_heap *heap;
+
+	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+	if (!heap)
+		return ERR_PTR(-ENOMEM);
+	heap->heap.ops = &system_secure_heap_ops;
+	heap->heap.type = ION_HEAP_TYPE_SYSTEM_SECURE;
+	heap->sys_heap = get_ion_heap(ION_SYSTEM_HEAP_ID);
+
+	heap->destroy_heap = false;
+	heap->work_lock = __SPIN_LOCK_UNLOCKED(heap->work_lock);
+	INIT_LIST_HEAD(&heap->prefetch_list);
+	INIT_DELAYED_WORK(&heap->prefetch_work,
+			  ion_system_secure_heap_prefetch_work);
+	return &heap->heap;
+}
+
+void ion_system_secure_heap_destroy(struct ion_heap *heap)
+{
+	struct ion_system_secure_heap *secure_heap = container_of(heap,
+						struct ion_system_secure_heap,
+						heap);
+	unsigned long flags;
+	LIST_HEAD(items);
+	struct prefetch_info *info, *tmp;
+
+	/* Stop any pending/future work */
+	spin_lock_irqsave(&secure_heap->work_lock, flags);
+	secure_heap->destroy_heap = true;
+	list_splice_init(&secure_heap->prefetch_list, &items);
+	spin_unlock_irqrestore(&secure_heap->work_lock, flags);
+
+	cancel_delayed_work_sync(&secure_heap->prefetch_work);
+
+	list_for_each_entry_safe(info, tmp, &items, list) {
+		list_del(&info->list);
+		kfree(info);
+	}
+
+	kfree(heap);
+}
diff --git a/drivers/staging/android/ion/msm/Makefile b/drivers/staging/android/ion/msm/Makefile
new file mode 100644
index 0000000..c4c01c4
--- /dev/null
+++ b/drivers/staging/android/ion/msm/Makefile
@@ -0,0 +1,4 @@
+obj-y += msm_ion.o
+ifdef CONFIG_COMPAT
+obj-y += compat_msm_ion.o
+endif
diff --git a/drivers/staging/android/ion/msm/compat_msm_ion.c b/drivers/staging/android/ion/msm/compat_msm_ion.c
new file mode 100644
index 0000000..0759918
--- /dev/null
+++ b/drivers/staging/android/ion/msm/compat_msm_ion.c
@@ -0,0 +1,207 @@
+/* Copyright (c) 2014,2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/compat.h>
+#include <linux/ion.h>
+#include <linux/msm_ion.h>
+#include <linux/uaccess.h>
+#include "../ion_priv.h"
+#include "../compat_ion.h"
+
+struct compat_ion_flush_data {
+	compat_ion_user_handle_t handle;
+	compat_int_t fd;
+	compat_uptr_t vaddr;
+	compat_uint_t offset;
+	compat_uint_t length;
+};
+
+struct compat_ion_prefetch_regions {
+	compat_uint_t vmid;
+	compat_uptr_t sizes;
+	compat_uint_t nr_sizes;
+};
+
+struct compat_ion_prefetch_data {
+	compat_int_t heap_id;
+	compat_ulong_t len;
+	compat_uptr_t regions;
+	compat_uint_t nr_regions;
+};
+
+#define COMPAT_ION_IOC_CLEAN_CACHES    _IOWR(ION_IOC_MSM_MAGIC, 0, \
+						struct compat_ion_flush_data)
+#define COMPAT_ION_IOC_INV_CACHES      _IOWR(ION_IOC_MSM_MAGIC, 1, \
+						struct compat_ion_flush_data)
+#define COMPAT_ION_IOC_CLEAN_INV_CACHES        _IOWR(ION_IOC_MSM_MAGIC, 2, \
+						struct compat_ion_flush_data)
+#define COMPAT_ION_IOC_PREFETCH                _IOWR(ION_IOC_MSM_MAGIC, 3, \
+						struct compat_ion_prefetch_data)
+#define COMPAT_ION_IOC_DRAIN                   _IOWR(ION_IOC_MSM_MAGIC, 4, \
+						struct compat_ion_prefetch_data)
+
+static int compat_get_ion_flush_data(
+			struct compat_ion_flush_data __user *data32,
+			struct ion_flush_data __user *data)
+{
+	compat_ion_user_handle_t h;
+	compat_int_t i;
+	compat_uptr_t u;
+	compat_ulong_t l;
+	int err;
+
+	err = get_user(h, &data32->handle);
+	err |= put_user(h, &data->handle);
+	err |= get_user(i, &data32->fd);
+	err |= put_user(i, &data->fd);
+	err |= get_user(u, &data32->vaddr);
+	/* upper bits won't get set, zero them */
+	err |= put_user(NULL, &data->vaddr);
+	err |= put_user(u, (compat_uptr_t *)&data->vaddr);
+	err |= get_user(l, &data32->offset);
+	err |= put_user(l, &data->offset);
+	err |= get_user(l, &data32->length);
+	err |= put_user(l, &data->length);
+
+	return err;
+}
+
+static int compat_get_ion_prefetch_data(
+			struct compat_ion_prefetch_data __user *data32,
+			struct ion_prefetch_data __user *data,
+			size_t stack_offset)
+{
+	compat_int_t i;
+	compat_ulong_t l;
+	compat_uint_t u;
+	int err, j, k;
+	compat_uint_t nr_regions, nr_sizes;
+	struct compat_ion_prefetch_regions __user *regions32;
+	struct ion_prefetch_regions __user *regions;
+	compat_uptr_t ptr;
+
+	err = get_user(i, &data32->heap_id);
+	err |= put_user(i, &data->heap_id);
+	err |= get_user(l, &data32->len);
+	err |= put_user(l, &data->len);
+	err |= get_user(nr_regions, &data32->nr_regions);
+	err |= put_user(nr_regions, &data->nr_regions);
+	err |= get_user(ptr, &data32->regions);
+	regions32 = compat_ptr(ptr);
+	if (err)
+		return err;
+
+	stack_offset += nr_regions * sizeof(*regions);
+	regions = compat_alloc_user_space(stack_offset);
+	if (!regions)
+		return -EFAULT;
+	err |= put_user(regions, &data->regions);
+
+	for (k = 0; k < nr_regions; k++) {
+		compat_size_t __user *sizes32;
+		size_t __user *sizes;
+
+		err |= get_user(u, &regions32[k].vmid);
+		err |= put_user(u, &regions[k].vmid);
+		err |= get_user(nr_sizes, &regions32[k].nr_sizes);
+		err |= put_user(nr_sizes, &regions[k].nr_sizes);
+		err |= get_user(ptr, &regions32[k].sizes);
+		sizes32 = compat_ptr(ptr);
+		if (err)
+			return -EFAULT;
+
+		stack_offset += nr_sizes * sizeof(*sizes);
+		sizes = compat_alloc_user_space(stack_offset);
+		if (!sizes)
+			return -EFAULT;
+		err |= put_user(sizes, &regions[k].sizes);
+
+		for (j = 0; j < nr_sizes; j++) {
+			compat_size_t s;
+
+			err |= get_user(s, &sizes32[j]);
+			err |= put_user(s, &sizes[j]);
+		}
+	}
+
+	return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+	case COMPAT_ION_IOC_CLEAN_CACHES:
+		return ION_IOC_CLEAN_CACHES;
+	case COMPAT_ION_IOC_INV_CACHES:
+		return ION_IOC_INV_CACHES;
+	case COMPAT_ION_IOC_CLEAN_INV_CACHES:
+		return ION_IOC_CLEAN_INV_CACHES;
+	case COMPAT_ION_IOC_PREFETCH:
+		return ION_IOC_PREFETCH;
+	case COMPAT_ION_IOC_DRAIN:
+		return ION_IOC_DRAIN;
+	default:
+		return cmd;
+	}
+}
+
+long compat_msm_ion_ioctl(struct ion_client *client, unsigned int cmd,
+			  unsigned long arg)
+{
+	switch (cmd) {
+	case COMPAT_ION_IOC_CLEAN_CACHES:
+	case COMPAT_ION_IOC_INV_CACHES:
+	case COMPAT_ION_IOC_CLEAN_INV_CACHES:
+	{
+		struct compat_ion_flush_data __user *data32;
+		struct ion_flush_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_ion_flush_data(data32, data);
+		if (err)
+			return err;
+
+		return msm_ion_custom_ioctl(client, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	case COMPAT_ION_IOC_PREFETCH:
+	case COMPAT_ION_IOC_DRAIN:
+	{
+		struct compat_ion_prefetch_data __user *data32;
+		struct ion_prefetch_data __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_ion_prefetch_data(data32, data, sizeof(*data));
+		if (err)
+			return err;
+
+		return msm_ion_custom_ioctl(client, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	default:
+		if (is_compat_task())
+			return -ENOIOCTLCMD;
+		else
+			return msm_ion_custom_ioctl(client, cmd, arg);
+	}
+}
diff --git a/drivers/staging/android/ion/msm/compat_msm_ion.h b/drivers/staging/android/ion/msm/compat_msm_ion.h
new file mode 100644
index 0000000..64b5903
--- /dev/null
+++ b/drivers/staging/android/ion/msm/compat_msm_ion.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2014,2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_COMPAT_ION_H
+#define _LINUX_COMPAT_ION_H
+
+#include <linux/ion.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+
+long compat_msm_ion_ioctl(struct ion_client *client, unsigned int cmd,
+			  unsigned long arg);
+
+#define compat_ion_user_handle_t compat_int_t
+
+#else
+
+#define compat_msm_ion_ioctl  msm_ion_custom_ioctl
+
+#endif
+#endif
diff --git a/drivers/staging/android/ion/msm/msm_ion.c b/drivers/staging/android/ion/msm/msm_ion.c
new file mode 100644
index 0000000..c728b30
--- /dev/null
+++ b/drivers/staging/android/ion/msm/msm_ion.c
@@ -0,0 +1,1052 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/mm.h>
+#include <linux/mm_types.h>
+#include <linux/sched.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
+#include <linux/memblock.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/vmalloc.h>
+#include <linux/highmem.h>
+#include <linux/cma.h>
+#include <linux/module.h>
+#include <linux/show_mem_notifier.h>
+#include <asm/cacheflush.h>
+#include "../ion_priv.h"
+#include "compat_msm_ion.h"
+#include <soc/qcom/secure_buffer.h>
+
+#define ION_COMPAT_STR	"qcom,msm-ion"
+
+static struct ion_device *idev;
+static int num_heaps;
+static struct ion_heap **heaps;
+
+struct ion_heap_desc {
+	unsigned int id;
+	enum ion_heap_type type;
+	const char *name;
+	unsigned int permission_type;
+};
+
+#ifdef CONFIG_OF
+static struct ion_heap_desc ion_heap_meta[] = {
+	{
+		.id	= ION_SYSTEM_HEAP_ID,
+		.name	= ION_SYSTEM_HEAP_NAME,
+	},
+	{
+		.id	= ION_SYSTEM_CONTIG_HEAP_ID,
+		.name	= ION_KMALLOC_HEAP_NAME,
+	},
+	{
+		.id	= ION_SECURE_HEAP_ID,
+		.name	= ION_SECURE_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MM_HEAP_ID,
+		.name	= ION_MM_HEAP_NAME,
+		.permission_type = IPT_TYPE_MM_CARVEOUT,
+	},
+	{
+		.id	= ION_MM_FIRMWARE_HEAP_ID,
+		.name	= ION_MM_FIRMWARE_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_MFC_HEAP_ID,
+		.name	= ION_MFC_HEAP_NAME,
+		.permission_type = IPT_TYPE_MFC_SHAREDMEM,
+	},
+	{
+		.id	= ION_SF_HEAP_ID,
+		.name	= ION_SF_HEAP_NAME,
+	},
+	{
+		.id	= ION_QSECOM_HEAP_ID,
+		.name	= ION_QSECOM_HEAP_NAME,
+	},
+	{
+		.id	= ION_SPSS_HEAP_ID,
+		.name	= ION_SPSS_HEAP_NAME,
+	},
+	{
+		.id	= ION_AUDIO_HEAP_ID,
+		.name	= ION_AUDIO_HEAP_NAME,
+	},
+	{
+		.id	= ION_PIL1_HEAP_ID,
+		.name	= ION_PIL1_HEAP_NAME,
+	},
+	{
+		.id	= ION_PIL2_HEAP_ID,
+		.name	= ION_PIL2_HEAP_NAME,
+	},
+	{
+		.id	= ION_CP_WB_HEAP_ID,
+		.name	= ION_WB_HEAP_NAME,
+	},
+	{
+		.id	= ION_CAMERA_HEAP_ID,
+		.name	= ION_CAMERA_HEAP_NAME,
+	},
+	{
+		.id	= ION_ADSP_HEAP_ID,
+		.name	= ION_ADSP_HEAP_NAME,
+	},
+	{
+		.id	= ION_SECURE_DISPLAY_HEAP_ID,
+		.name	= ION_SECURE_DISPLAY_HEAP_NAME,
+	}
+};
+#endif
+
+static int msm_ion_lowmem_notifier(struct notifier_block *nb,
+				   unsigned long action, void *data)
+{
+	show_ion_usage(idev);
+	return 0;
+}
+
+static struct notifier_block msm_ion_nb = {
+	.notifier_call = msm_ion_lowmem_notifier,
+};
+
+struct ion_client *msm_ion_client_create(const char *name)
+{
+	/*
+	 * The assumption is that if there is a NULL device, the ion
+	 * driver has not yet probed.
+	 */
+	if (!idev)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (IS_ERR(idev))
+		return (struct ion_client *)idev;
+
+	return ion_client_create(idev, name);
+}
+EXPORT_SYMBOL(msm_ion_client_create);
+
+static int ion_no_pages_cache_ops(
+			struct ion_client *client,
+			struct ion_handle *handle,
+			void *vaddr,
+			unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	unsigned long size_to_vmap, total_size;
+	int i, j, ret;
+	void *ptr = NULL;
+	ion_phys_addr_t buff_phys = 0;
+	ion_phys_addr_t buff_phys_start = 0;
+	size_t buf_length = 0;
+
+	ret = ion_phys(client, handle, &buff_phys_start, &buf_length);
+	if (ret)
+		return -EINVAL;
+
+	buff_phys = buff_phys_start;
+
+	if (!vaddr) {
+		/*
+		 * Split the vmalloc space into smaller regions in
+		 * order to clean and/or invalidate the cache.
+		 */
+		size_to_vmap = ((VMALLOC_END - VMALLOC_START) / 8);
+		total_size = buf_length;
+
+		for (i = 0; i < total_size; i += size_to_vmap) {
+			size_to_vmap = min(size_to_vmap, total_size - i);
+			for (j = 0; !ptr && j < 10 && size_to_vmap; ++j) {
+				ptr = ioremap(buff_phys, size_to_vmap);
+				if (ptr) {
+					switch (cmd) {
+					case ION_IOC_CLEAN_CACHES:
+						__dma_clean_area(
+							ptr,
+							size_to_vmap);
+						break;
+					case ION_IOC_INV_CACHES:
+						__dma_inv_area(
+							ptr,
+							size_to_vmap);
+						break;
+					case ION_IOC_CLEAN_INV_CACHES:
+						__dma_flush_area(
+							ptr,
+							size_to_vmap);
+						break;
+					default:
+						return -EINVAL;
+					}
+					buff_phys += size_to_vmap;
+				} else {
+					size_to_vmap >>= 1;
+				}
+			}
+			if (!ptr) {
+				pr_err("Couldn't io-remap the memory\n");
+				return -EINVAL;
+			}
+			iounmap(ptr);
+		}
+	} else {
+		switch (cmd) {
+		case ION_IOC_CLEAN_CACHES:
+			__dma_clean_area(vaddr, length);
+			break;
+		case ION_IOC_INV_CACHES:
+			__dma_inv_area(vaddr, length);
+			break;
+		case ION_IOC_CLEAN_INV_CACHES:
+			__dma_flush_area(vaddr, length);
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static void __do_cache_ops(struct page *page, unsigned int offset,
+			   unsigned int length,
+			   void (*op)(const void *, size_t))
+{
+	unsigned int left = length;
+	unsigned long pfn;
+	void *vaddr;
+
+	pfn = page_to_pfn(page) + offset / PAGE_SIZE;
+	page = pfn_to_page(pfn);
+	offset &= ~PAGE_MASK;
+
+	if (!PageHighMem(page)) {
+		vaddr = page_address(page) + offset;
+		op(vaddr, length);
+		goto out;
+	}
+
+	do {
+		unsigned int len;
+
+		len = left;
+		if (len + offset > PAGE_SIZE)
+			len = PAGE_SIZE - offset;
+
+		page = pfn_to_page(pfn);
+		vaddr = kmap_atomic(page);
+		op(vaddr + offset, len);
+		kunmap_atomic(vaddr);
+
+		offset = 0;
+		pfn++;
+		left -= len;
+	} while (left);
+
+out:
+	return;
+}
+
+static int ion_pages_cache_ops(
+			struct ion_client *client,
+			struct ion_handle *handle,
+			void *vaddr, unsigned int offset, unsigned int length,
+			unsigned int cmd)
+{
+	struct sg_table *table = NULL;
+	struct scatterlist *sg;
+	int i;
+	unsigned int len = 0;
+	void (*op)(const void *, size_t);
+
+
+	table = ion_sg_table(client, handle);
+	if (IS_ERR_OR_NULL(table))
+		return PTR_ERR(table);
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+		op = __dma_clean_area;
+		break;
+	case ION_IOC_INV_CACHES:
+		op = __dma_inv_area;
+		break;
+	case ION_IOC_CLEAN_INV_CACHES:
+		op = __dma_flush_area;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		len += sg->length;
+		if (len < offset)
+			continue;
+
+		__do_cache_ops(sg_page(sg), sg->offset, sg->length, op);
+
+		if (len > length + offset)
+			break;
+	}
+	return 0;
+}
+
+static int ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			   void *uaddr, unsigned long offset, unsigned long len,
+			   unsigned int cmd)
+{
+	int ret = -EINVAL;
+	unsigned long flags;
+	struct sg_table *table;
+	struct page *page;
+
+	ret = ion_handle_get_flags(client, handle, &flags);
+	if (ret)
+		return -EINVAL;
+
+	if (!ION_IS_CACHED(flags))
+		return 0;
+
+	if (flags & ION_FLAG_SECURE)
+		return 0;
+
+	table = ion_sg_table(client, handle);
+
+	if (IS_ERR_OR_NULL(table))
+		return PTR_ERR(table);
+
+	page = sg_page(table->sgl);
+
+	if (page)
+		ret = ion_pages_cache_ops(client, handle, uaddr,
+					  offset, len, cmd);
+	else
+		ret = ion_no_pages_cache_ops(client, handle, uaddr,
+					     offset, len, cmd);
+
+	return ret;
+}
+
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *vaddr, unsigned long len, unsigned int cmd)
+{
+	return ion_do_cache_op(client, handle, vaddr, 0, len, cmd);
+}
+EXPORT_SYMBOL(msm_ion_do_cache_op);
+
+static void msm_ion_allocate(struct ion_platform_heap *heap)
+{
+	if (!heap->base && heap->extra_data) {
+		WARN(1, "Specifying carveout heaps without a base is deprecated. Convert to the DMA heap type instead");
+		return;
+	}
+}
+
+#ifdef CONFIG_OF
+static int msm_init_extra_data(struct device_node *node,
+			       struct ion_platform_heap *heap,
+			       const struct ion_heap_desc *heap_desc)
+{
+	int ret = 0;
+
+	switch ((int)heap->type) {
+	case ION_HEAP_TYPE_CARVEOUT:
+	{
+		heap->extra_data = kzalloc(sizeof(*heap->extra_data),
+					   GFP_KERNEL);
+		if (!heap->extra_data)
+			ret = -ENOMEM;
+		break;
+	}
+	default:
+		heap->extra_data = 0;
+		break;
+	}
+	return ret;
+}
+
+#define MAKE_HEAP_TYPE_MAPPING(h) { .name = #h, \
+			.heap_type = ION_HEAP_TYPE_##h, }
+
+static struct heap_types_info {
+	const char *name;
+	int heap_type;
+} heap_types_info[] = {
+	MAKE_HEAP_TYPE_MAPPING(SYSTEM),
+	MAKE_HEAP_TYPE_MAPPING(SYSTEM_CONTIG),
+	MAKE_HEAP_TYPE_MAPPING(CARVEOUT),
+	MAKE_HEAP_TYPE_MAPPING(CHUNK),
+	MAKE_HEAP_TYPE_MAPPING(DMA),
+	MAKE_HEAP_TYPE_MAPPING(SYSTEM_SECURE),
+	MAKE_HEAP_TYPE_MAPPING(HYP_CMA),
+};
+
+static int msm_ion_get_heap_type_from_dt_node(struct device_node *node,
+					      int *heap_type)
+{
+	const char *name;
+	int i, ret = -EINVAL;
+
+	ret = of_property_read_string(node, "qcom,ion-heap-type", &name);
+	if (ret)
+		goto out;
+	for (i = 0; i < ARRAY_SIZE(heap_types_info); ++i) {
+		if (!strcmp(heap_types_info[i].name, name)) {
+			*heap_type = heap_types_info[i].heap_type;
+			ret = 0;
+			goto out;
+		}
+	}
+	WARN(1, "Unknown heap type: %s. You might need to update heap_types_info in %s",
+	     name, __FILE__);
+out:
+	return ret;
+}
+
+static int msm_ion_populate_heap(struct device_node *node,
+				 struct ion_platform_heap *heap)
+{
+	unsigned int i;
+	int ret = -EINVAL, heap_type = -1;
+	unsigned int len = ARRAY_SIZE(ion_heap_meta);
+
+	for (i = 0; i < len; ++i) {
+		if (ion_heap_meta[i].id == heap->id) {
+			heap->name = ion_heap_meta[i].name;
+			ret = msm_ion_get_heap_type_from_dt_node(
+							node, &heap_type);
+			if (ret)
+				break;
+			heap->type = heap_type;
+			ret = msm_init_extra_data(node, heap,
+						  &ion_heap_meta[i]);
+			break;
+		}
+	}
+	if (ret)
+		pr_err("%s: Unable to populate heap, error: %d", __func__, ret);
+	return ret;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+	unsigned int i;
+
+	for (i = 0; i < pdata->nr; ++i)
+		kfree(pdata->heaps[i].extra_data);
+	kfree(pdata->heaps);
+	kfree(pdata);
+}
+
+static void msm_ion_get_heap_dt_data(struct device_node *node,
+				     struct ion_platform_heap *heap)
+{
+	struct device_node *pnode;
+
+	pnode = of_parse_phandle(node, "memory-region", 0);
+	if (pnode) {
+		const __be32 *basep;
+		u64 size;
+		u64 base;
+
+		basep = of_get_address(pnode,  0, &size, NULL);
+		if (!basep) {
+			base = cma_get_base(dev_get_cma_area(heap->priv));
+			size = cma_get_size(dev_get_cma_area(heap->priv));
+		} else {
+			base = of_translate_address(pnode, basep);
+			WARN(base == OF_BAD_ADDR, "Failed to parse DT node for heap %s\n",
+			     heap->name);
+		}
+		heap->base = base;
+		heap->size = size;
+		of_node_put(pnode);
+	}
+}
+
+static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
+{
+	struct ion_platform_data *pdata = 0;
+	struct ion_platform_heap *heaps = NULL;
+	struct device_node *node;
+	struct platform_device *new_dev = NULL;
+	const struct device_node *dt_node = pdev->dev.of_node;
+	const __be32 *val;
+	int ret = -EINVAL;
+	u32 num_heaps = 0;
+	int idx = 0;
+
+	for_each_available_child_of_node(dt_node, node)
+		num_heaps++;
+
+	if (!num_heaps)
+		return ERR_PTR(-EINVAL);
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	heaps = kcalloc(num_heaps, sizeof(struct ion_platform_heap),
+			GFP_KERNEL);
+	if (!heaps) {
+		kfree(pdata);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	pdata->heaps = heaps;
+	pdata->nr = num_heaps;
+
+	for_each_available_child_of_node(dt_node, node) {
+		new_dev = of_platform_device_create(node, NULL, &pdev->dev);
+		if (!new_dev) {
+			pr_err("Failed to create device %s\n", node->name);
+			goto free_heaps;
+		}
+
+		pdata->heaps[idx].priv = &new_dev->dev;
+		val = of_get_address(node, 0, NULL, NULL);
+		if (!val) {
+			pr_err("%s: Unable to find reg key", __func__);
+			goto free_heaps;
+		}
+		pdata->heaps[idx].id = (u32)of_read_number(val, 1);
+
+		ret = msm_ion_populate_heap(node, &pdata->heaps[idx]);
+		if (ret)
+			goto free_heaps;
+
+		msm_ion_get_heap_dt_data(node, &pdata->heaps[idx]);
+
+		++idx;
+	}
+	return pdata;
+
+free_heaps:
+	free_pdata(pdata);
+	return ERR_PTR(ret);
+}
+#else
+static struct ion_platform_data *msm_ion_parse_dt(struct platform_device *pdev)
+{
+	return NULL;
+}
+
+static void free_pdata(const struct ion_platform_data *pdata)
+{
+}
+#endif
+
+static int check_vaddr_bounds(unsigned long start, unsigned long end)
+{
+	struct mm_struct *mm = current->active_mm;
+	struct vm_area_struct *vma;
+	int ret = 1;
+
+	if (end < start)
+		goto out;
+
+	vma = find_vma(mm, start);
+	if (vma && vma->vm_start < end) {
+		if (start < vma->vm_start)
+			goto out;
+		if (end > vma->vm_end)
+			goto out;
+		ret = 0;
+	}
+
+out:
+	return ret;
+}
+
+int ion_heap_is_system_secure_heap_type(enum ion_heap_type type)
+{
+	return type == ((enum ion_heap_type)ION_HEAP_TYPE_SYSTEM_SECURE);
+}
+
+int ion_heap_allow_heap_secure(enum ion_heap_type type)
+{
+	return false;
+}
+
+bool is_secure_vmid_valid(int vmid)
+{
+	return (vmid == VMID_CP_TOUCH ||
+		vmid == VMID_CP_BITSTREAM ||
+		vmid == VMID_CP_PIXEL ||
+		vmid == VMID_CP_NON_PIXEL ||
+		vmid == VMID_CP_CAMERA ||
+		vmid == VMID_CP_SEC_DISPLAY ||
+		vmid == VMID_CP_APP);
+}
+
+int get_secure_vmid(unsigned long flags)
+{
+	if (flags & ION_FLAG_CP_TOUCH)
+		return VMID_CP_TOUCH;
+	if (flags & ION_FLAG_CP_BITSTREAM)
+		return VMID_CP_BITSTREAM;
+	if (flags & ION_FLAG_CP_PIXEL)
+		return VMID_CP_PIXEL;
+	if (flags & ION_FLAG_CP_NON_PIXEL)
+		return VMID_CP_NON_PIXEL;
+	if (flags & ION_FLAG_CP_CAMERA)
+		return VMID_CP_CAMERA;
+	if (flags & ION_FLAG_CP_SEC_DISPLAY)
+		return VMID_CP_SEC_DISPLAY;
+	if (flags & ION_FLAG_CP_APP)
+		return VMID_CP_APP;
+	return -EINVAL;
+}
+/* fix up the cases where the ioctl direction bits are incorrect */
+static unsigned int msm_ion_ioctl_dir(unsigned int cmd)
+{
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+	case ION_IOC_INV_CACHES:
+	case ION_IOC_CLEAN_INV_CACHES:
+	case ION_IOC_PREFETCH:
+	case ION_IOC_DRAIN:
+		return _IOC_WRITE;
+	default:
+		return _IOC_DIR(cmd);
+	}
+}
+
+long msm_ion_custom_ioctl(struct ion_client *client,
+			  unsigned int cmd,
+			  unsigned long arg)
+{
+	unsigned int dir;
+	union {
+		struct ion_flush_data flush_data;
+		struct ion_prefetch_data prefetch_data;
+	} data;
+
+	dir = msm_ion_ioctl_dir(cmd);
+
+	if (_IOC_SIZE(cmd) > sizeof(data))
+		return -EINVAL;
+
+	if (dir & _IOC_WRITE)
+		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
+			return -EFAULT;
+
+	switch (cmd) {
+	case ION_IOC_CLEAN_CACHES:
+	case ION_IOC_INV_CACHES:
+	case ION_IOC_CLEAN_INV_CACHES:
+	{
+		unsigned long start, end;
+		struct ion_handle *handle = NULL;
+		int ret;
+		struct mm_struct *mm = current->active_mm;
+
+		if (data.flush_data.handle > 0) {
+			handle = ion_handle_get_by_id(
+					client, (int)data.flush_data.handle);
+			if (IS_ERR(handle)) {
+				pr_info("%s: Could not find handle: %d\n",
+					__func__, (int)data.flush_data.handle);
+				return PTR_ERR(handle);
+			}
+		} else {
+			handle = ion_import_dma_buf_fd(client,
+						       data.flush_data.fd);
+			if (IS_ERR(handle)) {
+				pr_info("%s: Could not import handle: %p\n",
+					__func__, handle);
+				return -EINVAL;
+			}
+		}
+
+		down_read(&mm->mmap_sem);
+
+		start = (unsigned long)data.flush_data.vaddr;
+		end = (unsigned long)data.flush_data.vaddr
+			+ data.flush_data.length;
+
+		if (check_vaddr_bounds(start, end)) {
+			pr_err("%s: virtual address %p is out of bounds\n",
+			       __func__, data.flush_data.vaddr);
+			ret = -EINVAL;
+		} else {
+			ret = ion_do_cache_op(
+				client, handle, data.flush_data.vaddr,
+				data.flush_data.offset,
+				data.flush_data.length, cmd);
+		}
+		up_read(&mm->mmap_sem);
+
+		ion_free(client, handle);
+
+		if (ret < 0)
+			return ret;
+		break;
+	}
+	case ION_IOC_PREFETCH:
+	{
+		int ret;
+
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+				     ION_HEAP_TYPE_SYSTEM_SECURE,
+				     (void *)&data.prefetch_data,
+				     ion_system_secure_heap_prefetch);
+		if (ret)
+			return ret;
+		break;
+	}
+	case ION_IOC_DRAIN:
+	{
+		int ret;
+
+		ret = ion_walk_heaps(client, data.prefetch_data.heap_id,
+				     ION_HEAP_TYPE_SYSTEM_SECURE,
+				     (void *)&data.prefetch_data,
+				     ion_system_secure_heap_drain);
+
+		if (ret)
+			return ret;
+		break;
+	}
+
+	default:
+		return -ENOTTY;
+	}
+	return 0;
+}
+
+#define MAX_VMAP_RETRIES 10
+
+/**
+ * An optimized page-zero'ing function. vmaps arrays of pages in large
+ * chunks to minimize the number of memsets and vmaps/vunmaps.
+ *
+ * Note that the `pages' array should be composed of all 4K pages.
+ *
+ * NOTE: This function does not guarantee synchronization of the caches
+ * and thus caller is responsible for handling any cache maintenance
+ * operations needed.
+ */
+int msm_ion_heap_pages_zero(struct page **pages, int num_pages)
+{
+	int i, j, npages_to_vmap;
+	void *ptr = NULL;
+
+	/*
+	 * As an optimization, we manually zero out all of the pages
+	 * in one fell swoop here. To safeguard against insufficient
+	 * vmalloc space, we only vmap `npages_to_vmap' at a time,
+	 * starting with a conservative estimate of 1/8 of the total
+	 * number of vmalloc pages available.
+	 */
+	npages_to_vmap = ((VMALLOC_END - VMALLOC_START) / 8)
+			>> PAGE_SHIFT;
+	for (i = 0; i < num_pages; i += npages_to_vmap) {
+		npages_to_vmap = min(npages_to_vmap, num_pages - i);
+		for (j = 0; !ptr && j < MAX_VMAP_RETRIES && npages_to_vmap;
+			++j) {
+			ptr = vmap(&pages[i], npages_to_vmap,
+				   VM_IOREMAP, PAGE_KERNEL);
+			if (!ptr)
+				npages_to_vmap >>= 1;
+		}
+		if (!ptr)
+			return -ENOMEM;
+
+		memset(ptr, 0, npages_to_vmap * PAGE_SIZE);
+		vunmap(ptr);
+	}
+
+	return 0;
+}
+
+int msm_ion_heap_alloc_pages_mem(struct pages_mem *pages_mem)
+{
+	struct page **pages;
+	unsigned int page_tbl_size;
+
+	pages_mem->free_fn = kfree;
+	page_tbl_size = sizeof(struct page *) * (pages_mem->size >> PAGE_SHIFT);
+	if (page_tbl_size > SZ_8K) {
+		/*
+		 * Do fallback to ensure we have a balance between
+		 * performance and availability.
+		 */
+		pages = kmalloc(page_tbl_size,
+				__GFP_COMP | __GFP_NORETRY |
+				__GFP_NOWARN);
+		if (!pages) {
+			pages = vmalloc(page_tbl_size);
+			pages_mem->free_fn = vfree;
+		}
+	} else {
+		pages = kmalloc(page_tbl_size, GFP_KERNEL);
+	}
+
+	if (!pages)
+		return -ENOMEM;
+
+	pages_mem->pages = pages;
+	return 0;
+}
+
+void msm_ion_heap_free_pages_mem(struct pages_mem *pages_mem)
+{
+	pages_mem->free_fn(pages_mem->pages);
+}
+
+int msm_ion_heap_high_order_page_zero(struct device *dev, struct page *page,
+				      int order)
+{
+	int i, ret;
+	struct pages_mem pages_mem;
+	int npages = 1 << order;
+
+	pages_mem.size = npages * PAGE_SIZE;
+
+	if (msm_ion_heap_alloc_pages_mem(&pages_mem))
+		return -ENOMEM;
+
+	for (i = 0; i < (1 << order); ++i)
+		pages_mem.pages[i] = page + i;
+
+	ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
+	dma_sync_single_for_device(dev, page_to_phys(page), pages_mem.size,
+				   DMA_BIDIRECTIONAL);
+	msm_ion_heap_free_pages_mem(&pages_mem);
+	return ret;
+}
+
+int msm_ion_heap_sg_table_zero(struct device *dev, struct sg_table *table,
+			       size_t size)
+{
+	struct scatterlist *sg;
+	int i, j, ret = 0, npages = 0;
+	struct pages_mem pages_mem;
+
+	pages_mem.size = PAGE_ALIGN(size);
+
+	if (msm_ion_heap_alloc_pages_mem(&pages_mem))
+		return -ENOMEM;
+
+	for_each_sg(table->sgl, sg, table->nents, i) {
+		struct page *page = sg_page(sg);
+		unsigned long len = sg->length;
+		/* needed to make dma_sync_sg_for_device work: */
+		sg->dma_address = sg_phys(sg);
+
+		for (j = 0; j < len / PAGE_SIZE; j++)
+			pages_mem.pages[npages++] = page + j;
+	}
+
+	ret = msm_ion_heap_pages_zero(pages_mem.pages, npages);
+	dma_sync_sg_for_device(dev, table->sgl, table->nents,
+			       DMA_BIDIRECTIONAL);
+	msm_ion_heap_free_pages_mem(&pages_mem);
+	return ret;
+}
+
+static struct ion_heap *msm_ion_heap_create(struct ion_platform_heap *heap_data)
+{
+	struct ion_heap *heap = NULL;
+
+	switch ((int)heap_data->type) {
+	case ION_HEAP_TYPE_SYSTEM_SECURE:
+		heap = ion_system_secure_heap_create(heap_data);
+		break;
+	case ION_HEAP_TYPE_HYP_CMA:
+		heap = ion_cma_secure_heap_create(heap_data);
+		break;
+	default:
+		heap = ion_heap_create(heap_data);
+	}
+
+	if (IS_ERR_OR_NULL(heap)) {
+		pr_err("%s: error creating heap %s type %d base %pa size %zu\n",
+		       __func__, heap_data->name, heap_data->type,
+		       &heap_data->base, heap_data->size);
+		return ERR_PTR(-EINVAL);
+	}
+
+	heap->name = heap_data->name;
+	heap->id = heap_data->id;
+	heap->priv = heap_data->priv;
+	return heap;
+}
+
+static void msm_ion_heap_destroy(struct ion_heap *heap)
+{
+	if (!heap)
+		return;
+
+	switch ((int)heap->type) {
+	case ION_HEAP_TYPE_SYSTEM_SECURE:
+		ion_system_secure_heap_destroy(heap);
+		break;
+
+	case ION_HEAP_TYPE_HYP_CMA:
+		ion_cma_secure_heap_destroy(heap);
+		break;
+	default:
+		ion_heap_destroy(heap);
+	}
+}
+
+struct ion_heap *get_ion_heap(int heap_id)
+{
+	int i;
+	struct ion_heap *heap;
+
+	for (i = 0; i < num_heaps; i++) {
+		heap = heaps[i];
+		if (heap->id == heap_id)
+			return heap;
+	}
+
+	pr_err("%s: heap_id %d not found\n", __func__, heap_id);
+	return NULL;
+}
+
+static int msm_ion_probe(struct platform_device *pdev)
+{
+	static struct ion_device *new_dev;
+	struct ion_platform_data *pdata;
+	unsigned int pdata_needs_to_be_freed;
+	int err = -1;
+	int i;
+
+	if (pdev->dev.of_node) {
+		pdata = msm_ion_parse_dt(pdev);
+		if (IS_ERR(pdata))
+			return PTR_ERR(pdata);
+		pdata_needs_to_be_freed = 1;
+	} else {
+		pdata = pdev->dev.platform_data;
+		pdata_needs_to_be_freed = 0;
+	}
+
+	num_heaps = pdata->nr;
+
+	heaps = kcalloc(pdata->nr, sizeof(struct ion_heap *), GFP_KERNEL);
+
+	if (!heaps) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	new_dev = ion_device_create(compat_msm_ion_ioctl);
+	if (IS_ERR_OR_NULL(new_dev)) {
+		/*
+		 * set this to the ERR to indicate to the clients
+		 * that Ion failed to probe.
+		 */
+		idev = new_dev;
+		err = PTR_ERR(new_dev);
+		goto out;
+	}
+
+	/* create the heaps as specified in the board file */
+	for (i = 0; i < num_heaps; i++) {
+		struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+		msm_ion_allocate(heap_data);
+
+		heap_data->has_outer_cache = pdata->has_outer_cache;
+		heaps[i] = msm_ion_heap_create(heap_data);
+		if (IS_ERR_OR_NULL(heaps[i])) {
+			heaps[i] = 0;
+			continue;
+		} else {
+			if (heap_data->size)
+				pr_info("ION heap %s created at %pa with size %zx\n",
+					heap_data->name,
+					&heap_data->base,
+					heap_data->size);
+			else
+				pr_info("ION heap %s created\n",
+					heap_data->name);
+		}
+
+		ion_device_add_heap(new_dev, heaps[i]);
+	}
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
+
+	platform_set_drvdata(pdev, new_dev);
+	/*
+	 * intentionally set this at the very end to allow probes to be deferred
+	 * completely until Ion is setup
+	 */
+	idev = new_dev;
+
+	show_mem_notifier_register(&msm_ion_nb);
+	return 0;
+
+out:
+	kfree(heaps);
+	if (pdata_needs_to_be_freed)
+		free_pdata(pdata);
+	return err;
+}
+
+static int msm_ion_remove(struct platform_device *pdev)
+{
+	struct ion_device *idev = platform_get_drvdata(pdev);
+	int i;
+
+	for (i = 0; i < num_heaps; i++)
+		msm_ion_heap_destroy(heaps[i]);
+
+	ion_device_destroy(idev);
+	kfree(heaps);
+	return 0;
+}
+
+static const struct of_device_id msm_ion_match_table[] = {
+	{.compatible = ION_COMPAT_STR},
+	{},
+};
+
+static struct platform_driver msm_ion_driver = {
+	.probe = msm_ion_probe,
+	.remove = msm_ion_remove,
+	.driver = {
+		.name = "ion-msm",
+		.of_match_table = msm_ion_match_table,
+	},
+};
+
+static int __init msm_ion_init(void)
+{
+	return platform_driver_register(&msm_ion_driver);
+}
+
+static void __exit msm_ion_exit(void)
+{
+	platform_driver_unregister(&msm_ion_driver);
+}
+
+subsys_initcall(msm_ion_init);
+module_exit(msm_ion_exit);
diff --git a/drivers/staging/android/ion/msm/msm_ion.h b/drivers/staging/android/ion/msm/msm_ion.h
new file mode 100644
index 0000000..68cc8b0
--- /dev/null
+++ b/drivers/staging/android/ion/msm/msm_ion.h
@@ -0,0 +1,192 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_MSM_ION_H
+#define _MSM_MSM_ION_H
+
+#include "../ion.h"
+#include "../../uapi/msm_ion.h"
+
+enum ion_permission_type {
+	IPT_TYPE_MM_CARVEOUT = 0,
+	IPT_TYPE_MFC_SHAREDMEM = 1,
+	IPT_TYPE_MDP_WRITEBACK = 2,
+};
+
+/*
+ * This flag allows clients when mapping into the IOMMU to specify to
+ * defer un-mapping from the IOMMU until the buffer memory is freed.
+ */
+#define ION_IOMMU_UNMAP_DELAYED 1
+
+/*
+ * This flag allows clients to defer unsecuring a buffer until the buffer
+ * is actually freed.
+ */
+#define ION_UNSECURE_DELAYED	1
+
+/**
+ * struct ion_cp_heap_pdata - defines a content protection heap in the given
+ * platform
+ * @permission_type:	Memory ID used to identify the memory to TZ
+ * @align:		Alignment requirement for the memory
+ * @secure_base:	Base address for securing the heap.
+ *			Note: This might be different from actual base address
+ *			of this heap in the case of a shared heap.
+ * @secure_size:	Memory size for securing the heap.
+ *			Note: This might be different from actual size
+ *			of this heap in the case of a shared heap.
+ * @fixed_position	If nonzero, position in the fixed area.
+ * @iommu_map_all:	Indicates whether we should map whole heap into IOMMU.
+ * @iommu_2x_map_domain: Indicates the domain to use for overmapping.
+ * @request_ion_region:	function to be called when the number of allocations
+ *			goes from 0 -> 1
+ * @release_ion_region:	function to be called when the number of allocations
+ *			goes from 1 -> 0
+ * @setup_ion_region:	function to be called upon ion registration
+ * @allow_nonsecure_alloc: allow non-secure allocations from this heap. For
+ *			secure heaps, this flag must be set so allow non-secure
+ *			allocations. For non-secure heaps, this flag is ignored.
+ *
+ */
+struct ion_cp_heap_pdata {
+	enum ion_permission_type permission_type;
+	unsigned int align;
+	ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
+	size_t secure_size; /* Size used for securing heap when heap is shared*/
+	int is_cma;
+	enum ion_fixed_position fixed_position;
+	int iommu_map_all;
+	int iommu_2x_map_domain;
+	int (*request_ion_region)(void *);
+	int (*release_ion_region)(void *);
+	void *(*setup_ion_region)(void);
+	int allow_nonsecure_alloc;
+};
+
+/**
+ * struct ion_co_heap_pdata - defines a carveout heap in the given platform
+ * @adjacent_mem_id:	Id of heap that this heap must be adjacent to.
+ * @align:		Alignment requirement for the memory
+ * @fixed_position	If nonzero, position in the fixed area.
+ * @request_ion_region:	function to be called when the number of allocations
+ *			goes from 0 -> 1
+ * @release_ion_region:	function to be called when the number of allocations
+ *			goes from 1 -> 0
+ * @setup_ion_region:	function to be called upon ion registration
+ * @memory_type:Memory type used for the heap
+ *
+ */
+struct ion_co_heap_pdata {
+	int adjacent_mem_id;
+	unsigned int align;
+	enum ion_fixed_position fixed_position;
+	int (*request_ion_region)(void *);
+	int (*release_ion_region)(void *);
+	void *(*setup_ion_region)(void);
+};
+
+struct msm_ion_prefetch_info {
+	struct list_head list;
+	int heap_id;
+	unsigned long *sizes;
+	int nr_sizes;
+};
+
+/**
+ * struct ion_cma_pdata - extra data for CMA regions
+ * @default_prefetch_size - default size to use for prefetching
+ */
+struct ion_cma_pdata {
+	unsigned long default_prefetch_size;
+};
+
+#ifdef CONFIG_ION
+/**
+ *  msm_ion_client_create - allocate a client using the ion_device specified in
+ *				drivers/staging/android/ion/msm/msm_ion.c
+ *
+ * name is the same as ion_client_create, return values
+ * are the same as ion_client_create.
+ */
+
+struct ion_client *msm_ion_client_create(const char *name);
+
+/**
+ * ion_handle_get_flags - get the flags for a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the flags
+ * @flags - pointer to store the flags
+ *
+ * Gets the current flags for a handle. These flags indicate various options
+ * of the buffer (caching, security, etc.)
+ */
+int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
+			 unsigned long *flags);
+
+/**
+ * ion_handle_get_size - get the allocated size of a given handle
+ *
+ * @client - client who allocated the handle
+ * @handle - handle to get the size
+ * @size - pointer to store the size
+ *
+ * gives the allocated size of a handle. returns 0 on success, negative
+ * value on error
+ *
+ * NOTE: This is intended to be used only to get a size to pass to map_iommu.
+ * You should *NOT* rely on this for any other usage.
+ */
+
+int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
+			size_t *size);
+/**
+ * msm_ion_do_cache_op - do cache operations.
+ *
+ * @client - pointer to ION client.
+ * @handle - pointer to buffer handle.
+ * @vaddr -  virtual address to operate on.
+ * @len - Length of data to do cache operation on.
+ * @cmd - Cache operation to perform:
+ *		ION_IOC_CLEAN_CACHES
+ *		ION_IOC_INV_CACHES
+ *		ION_IOC_CLEAN_INV_CACHES
+ *
+ * Returns 0 on success
+ */
+int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
+			void *vaddr, unsigned long len, unsigned int cmd);
+
+#else
+static inline struct ion_client *msm_ion_client_create(const char *name)
+{
+	return ERR_PTR(-ENODEV);
+}
+
+static inline int ion_handle_get_size(struct ion_client *client,
+				      struct ion_handle *handle, size_t *size)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ion_do_cache_op(
+			struct ion_client *client,
+			struct ion_handle *handle, void *vaddr,
+			unsigned long len, unsigned int cmd)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_ION */
+
+#endif
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index ec3b665..68f0217 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -43,6 +43,9 @@
 #include <linux/profile.h>
 #include <linux/notifier.h>
 
+#define CREATE_TRACE_POINTS
+#include "trace/lowmemorykiller.h"
+
 static u32 lowmem_debug_level = 1;
 static short lowmem_adj[6] = {
 	0,
@@ -160,11 +163,16 @@
 			     p->comm, p->pid, oom_score_adj, tasksize);
 	}
 	if (selected) {
+		long cache_size = other_file * (long)(PAGE_SIZE / 1024);
+		long cache_limit = minfree * (long)(PAGE_SIZE / 1024);
+		long free = other_free * (long)(PAGE_SIZE / 1024);
+
 		task_lock(selected);
 		send_sig(SIGKILL, selected, 0);
 		if (selected->mm)
 			task_set_lmk_waiting(selected);
 		task_unlock(selected);
+		trace_lowmemory_kill(selected, cache_size, cache_limit, free);
 		lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
 				 "   to free %ldkB on behalf of '%s' (%d) because\n"
 				 "   cache %ldkB is below limit %ldkB for oom_score_adj %hd\n"
@@ -173,10 +181,9 @@
 			     selected_oom_score_adj,
 			     selected_tasksize * (long)(PAGE_SIZE / 1024),
 			     current->comm, current->pid,
-			     other_file * (long)(PAGE_SIZE / 1024),
-			     minfree * (long)(PAGE_SIZE / 1024),
+			     cache_size, cache_limit,
 			     min_score_adj,
-			     other_free * (long)(PAGE_SIZE / 1024));
+			     free);
 		lowmem_deathpending_timeout = jiffies + HZ;
 		rem += selected_tasksize;
 	}
@@ -200,12 +207,96 @@
 }
 device_initcall(lowmem_init);
 
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+static short lowmem_oom_adj_to_oom_score_adj(short oom_adj)
+{
+	if (oom_adj == OOM_ADJUST_MAX)
+		return OOM_SCORE_ADJ_MAX;
+	else
+		return (oom_adj * OOM_SCORE_ADJ_MAX) / -OOM_DISABLE;
+}
+
+static void lowmem_autodetect_oom_adj_values(void)
+{
+	int i;
+	short oom_adj;
+	short oom_score_adj;
+	int array_size = ARRAY_SIZE(lowmem_adj);
+
+	if (lowmem_adj_size < array_size)
+		array_size = lowmem_adj_size;
+
+	if (array_size <= 0)
+		return;
+
+	oom_adj = lowmem_adj[array_size - 1];
+	if (oom_adj > OOM_ADJUST_MAX)
+		return;
+
+	oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+	if (oom_score_adj <= OOM_ADJUST_MAX)
+		return;
+
+	lowmem_print(1, "lowmem_shrink: convert oom_adj to oom_score_adj:\n");
+	for (i = 0; i < array_size; i++) {
+		oom_adj = lowmem_adj[i];
+		oom_score_adj = lowmem_oom_adj_to_oom_score_adj(oom_adj);
+		lowmem_adj[i] = oom_score_adj;
+		lowmem_print(1, "oom_adj %d => oom_score_adj %d\n",
+			     oom_adj, oom_score_adj);
+	}
+}
+
+static int lowmem_adj_array_set(const char *val, const struct kernel_param *kp)
+{
+	int ret;
+
+	ret = param_array_ops.set(val, kp);
+
+	/* HACK: Autodetect oom_adj values in lowmem_adj array */
+	lowmem_autodetect_oom_adj_values();
+
+	return ret;
+}
+
+static int lowmem_adj_array_get(char *buffer, const struct kernel_param *kp)
+{
+	return param_array_ops.get(buffer, kp);
+}
+
+static void lowmem_adj_array_free(void *arg)
+{
+	param_array_ops.free(arg);
+}
+
+static struct kernel_param_ops lowmem_adj_array_ops = {
+	.set = lowmem_adj_array_set,
+	.get = lowmem_adj_array_get,
+	.free = lowmem_adj_array_free,
+};
+
+static const struct kparam_array __param_arr_adj = {
+	.max = ARRAY_SIZE(lowmem_adj),
+	.num = &lowmem_adj_size,
+	.ops = &param_ops_short,
+	.elemsize = sizeof(lowmem_adj[0]),
+	.elem = lowmem_adj,
+};
+#endif
+
 /*
  * not really modular, but the easiest way to keep compat with existing
  * bootargs behaviour is to continue using module_param here.
  */
 module_param_named(cost, lowmem_shrinker.seeks, int, 0644);
+#ifdef CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES
+module_param_cb(adj, &lowmem_adj_array_ops,
+		.arr = &__param_arr_adj,
+		S_IRUGO | S_IWUSR);
+__MODULE_PARM_TYPE(adj, "array of short");
+#else
 module_param_array_named(adj, lowmem_adj, short, &lowmem_adj_size, 0644);
+#endif
 module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
 			 0644);
 module_param_named(debug_level, lowmem_debug_level, uint, 0644);
diff --git a/drivers/staging/android/trace/lowmemorykiller.h b/drivers/staging/android/trace/lowmemorykiller.h
new file mode 100644
index 0000000..f43d3fa
--- /dev/null
+++ b/drivers/staging/android/trace/lowmemorykiller.h
@@ -0,0 +1,41 @@
+#undef TRACE_SYSTEM
+#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace
+#define TRACE_SYSTEM lowmemorykiller
+
+#if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LOWMEMORYKILLER_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(lowmemory_kill,
+	TP_PROTO(struct task_struct *killed_task, long cache_size, \
+		 long cache_limit, long free),
+
+	TP_ARGS(killed_task, cache_size, cache_limit, free),
+
+	TP_STRUCT__entry(
+			__array(char, comm, TASK_COMM_LEN)
+			__field(pid_t, pid)
+			__field(long, pagecache_size)
+			__field(long, pagecache_limit)
+			__field(long, free)
+	),
+
+	TP_fast_assign(
+			memcpy(__entry->comm, killed_task->comm, TASK_COMM_LEN);
+			__entry->pid = killed_task->pid;
+			__entry->pagecache_size = cache_size;
+			__entry->pagecache_limit = cache_limit;
+			__entry->free = free;
+	),
+
+	TP_printk("%s (%d), page cache %ldkB (limit %ldkB), free %ldKb",
+		__entry->comm, __entry->pid, __entry->pagecache_size,
+		__entry->pagecache_limit, __entry->free)
+);
+
+
+#endif /* if !defined(_TRACE_LOWMEMORYKILLER_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 14cd873..ccc2a7c 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -44,26 +44,32 @@
 			       * must be last so device specific heaps always
 			       * are at the end of this enum
 			       */
+	ION_NUM_HEAPS = 16,
 };
 
+#define ION_HEAP_SYSTEM_MASK		((1 << ION_HEAP_TYPE_SYSTEM))
+#define ION_HEAP_SYSTEM_CONTIG_MASK	((1 << ION_HEAP_TYPE_SYSTEM_CONTIG))
+#define ION_HEAP_CARVEOUT_MASK		((1 << ION_HEAP_TYPE_CARVEOUT))
+#define ION_HEAP_TYPE_DMA_MASK		((1 << ION_HEAP_TYPE_DMA))
+
 #define ION_NUM_HEAP_IDS		(sizeof(unsigned int) * 8)
 
 /**
  * allocation flags - the lower 16 bits are used by core ion, the upper 16
  * bits are reserved for use by the heaps themselves.
  */
-
-/*
- * mappings of this buffer should be cached, ion will do cache maintenance
- * when the buffer is mapped for dma
- */
-#define ION_FLAG_CACHED 1
-
-/*
- * mappings of this buffer will created at mmap time, if this is set
- * caches must be managed manually
- */
-#define ION_FLAG_CACHED_NEEDS_SYNC 2
+#define ION_FLAG_CACHED 1		/*
+					 * mappings of this buffer should be
+					 * cached, ion will do cache
+					 * maintenance when the buffer is
+					 * mapped for dma
+					 */
+#define ION_FLAG_CACHED_NEEDS_SYNC 2	/*
+					 * mappings of this buffer will created
+					 * at mmap time, if this is set
+					 * caches must be managed
+					 * manually
+					 */
 
 /**
  * DOC: Ion Userspace API
@@ -128,36 +134,6 @@
 	unsigned long arg;
 };
 
-#define MAX_HEAP_NAME			32
-
-/**
- * struct ion_heap_data - data about a heap
- * @name - first 32 characters of the heap name
- * @type - heap type
- * @heap_id - heap id for the heap
- */
-struct ion_heap_data {
-	char name[MAX_HEAP_NAME];
-	__u32 type;
-	__u32 heap_id;
-	__u32 reserved0;
-	__u32 reserved1;
-	__u32 reserved2;
-};
-
-/**
- * struct ion_heap_query - collection of data about all heaps
- * @cnt - total number of heaps to be copied
- * @heaps - buffer to copy heap data
- */
-struct ion_heap_query {
-	__u32 cnt; /* Total number of heaps to be copied */
-	__u32 reserved0; /* align to 64bits */
-	__u64 heaps; /* buffer to be populated */
-	__u32 reserved1;
-	__u32 reserved2;
-};
-
 #define ION_IOC_MAGIC		'I'
 
 /**
@@ -224,13 +200,4 @@
  */
 #define ION_IOC_CUSTOM		_IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
 
-/**
- * DOC: ION_IOC_HEAP_QUERY - information about available heaps
- *
- * Takes an ion_heap_query structure and populates information about
- * available Ion heaps.
- */
-#define ION_IOC_HEAP_QUERY     _IOWR(ION_IOC_MAGIC, 8, \
-					struct ion_heap_query)
-
 #endif /* _UAPI_LINUX_ION_H */
diff --git a/drivers/staging/android/uapi/msm_ion.h b/drivers/staging/android/uapi/msm_ion.h
new file mode 100644
index 0000000..2ba850c
--- /dev/null
+++ b/drivers/staging/android/uapi/msm_ion.h
@@ -0,0 +1,206 @@
+#ifndef _UAPI_MSM_ION_H
+#define _UAPI_MSM_ION_H
+
+#include "ion.h"
+
+enum msm_ion_heap_types {
+	ION_HEAP_TYPE_MSM_START = ION_HEAP_TYPE_CUSTOM + 1,
+	ION_HEAP_TYPE_SECURE_DMA = ION_HEAP_TYPE_MSM_START,
+	ION_HEAP_TYPE_SYSTEM_SECURE,
+	ION_HEAP_TYPE_HYP_CMA,
+	/*
+	 * if you add a heap type here you should also add it to
+	 * heap_types_info[] in msm_ion.c
+	 */
+};
+
+/**
+ * These are the only ids that should be used for Ion heap ids.
+ * The ids listed are the order in which allocation will be attempted
+ * if specified. Don't swap the order of heap ids unless you know what
+ * you are doing!
+ * Id's are spaced by purpose to allow new Id's to be inserted in-between (for
+ * possible fallbacks)
+ */
+
+enum ion_heap_ids {
+	INVALID_HEAP_ID = -1,
+	ION_CP_MM_HEAP_ID = 8,
+	ION_SECURE_HEAP_ID = 9,
+	ION_SECURE_DISPLAY_HEAP_ID = 10,
+	ION_CP_MFC_HEAP_ID = 12,
+	ION_SPSS_HEAP_ID = 13, /* Secure Processor ION heap */
+	ION_CP_WB_HEAP_ID = 16, /* 8660 only */
+	ION_CAMERA_HEAP_ID = 20, /* 8660 only */
+	ION_SYSTEM_CONTIG_HEAP_ID = 21,
+	ION_ADSP_HEAP_ID = 22,
+	ION_PIL1_HEAP_ID = 23, /* Currently used for other PIL images */
+	ION_SF_HEAP_ID = 24,
+	ION_SYSTEM_HEAP_ID = 25,
+	ION_PIL2_HEAP_ID = 26, /* Currently used for modem firmware images */
+	ION_QSECOM_HEAP_ID = 27,
+	ION_AUDIO_HEAP_ID = 28,
+
+	ION_MM_FIRMWARE_HEAP_ID = 29,
+
+	ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_FLAG_SECURE flag */
+};
+
+/*
+ * The IOMMU heap is deprecated! Here are some aliases for backwards
+ * compatibility:
+ */
+#define ION_IOMMU_HEAP_ID ION_SYSTEM_HEAP_ID
+#define ION_HEAP_TYPE_IOMMU ION_HEAP_TYPE_SYSTEM
+
+#define ION_SPSS_HEAP_ID ION_SPSS_HEAP_ID
+
+enum ion_fixed_position {
+	NOT_FIXED,
+	FIXED_LOW,
+	FIXED_MIDDLE,
+	FIXED_HIGH,
+};
+
+enum cp_mem_usage {
+	VIDEO_BITSTREAM = 0x1,
+	VIDEO_PIXEL = 0x2,
+	VIDEO_NONPIXEL = 0x3,
+	DISPLAY_SECURE_CP_USAGE = 0x4,
+	CAMERA_SECURE_CP_USAGE = 0x5,
+	MAX_USAGE = 0x6,
+	UNKNOWN = 0x7FFFFFFF,
+};
+
+/**
+ * Flags to be used when allocating from the secure heap for
+ * content protection
+ */
+#define ION_FLAG_CP_TOUCH ((1 << 17))
+#define ION_FLAG_CP_BITSTREAM ((1 << 18))
+#define ION_FLAG_CP_PIXEL  ((1 << 19))
+#define ION_FLAG_CP_NON_PIXEL ((1 << 20))
+#define ION_FLAG_CP_CAMERA ((1 << 21))
+#define ION_FLAG_CP_HLOS ((1 << 22))
+#define ION_FLAG_CP_HLOS_FREE ((1 << 23))
+#define ION_FLAG_CP_SEC_DISPLAY ((1 << 25))
+#define ION_FLAG_CP_APP ((1 << 26))
+
+/**
+ * Flag to use when allocating to indicate that a heap is secure.
+ * Do NOT use BIT macro since it is defined in #ifdef __KERNEL__
+ */
+#define ION_FLAG_SECURE (1 << (ION_HEAP_ID_RESERVED))
+
+/**
+ * Flag for clients to force contiguous memort allocation
+ *
+ * Use of this flag is carefully monitored!
+ */
+#define ION_FLAG_FORCE_CONTIGUOUS ((1 << 30))
+
+/*
+ * Used in conjunction with heap which pool memory to force an allocation
+ * to come from the page allocator directly instead of from the pool allocation
+ */
+#define ION_FLAG_POOL_FORCE_ALLOC ((1 << 16))
+
+/**
+ * Deprecated! Please use the corresponding ION_FLAG_*
+ */
+#define ION_SECURE ION_FLAG_SECURE
+#define ION_FORCE_CONTIGUOUS ION_FLAG_FORCE_CONTIGUOUS
+
+/**
+ * Macro should be used with ion_heap_ids defined above.
+ */
+#define ION_HEAP(bit) (1 << (bit))
+
+#define ION_ADSP_HEAP_NAME	"adsp"
+#define ION_SYSTEM_HEAP_NAME	"system"
+#define ION_VMALLOC_HEAP_NAME	ION_SYSTEM_HEAP_NAME
+#define ION_KMALLOC_HEAP_NAME	"kmalloc"
+#define ION_AUDIO_HEAP_NAME	"audio"
+#define ION_SF_HEAP_NAME	"sf"
+#define ION_MM_HEAP_NAME	"mm"
+#define ION_CAMERA_HEAP_NAME	"camera_preview"
+#define ION_IOMMU_HEAP_NAME	"iommu"
+#define ION_MFC_HEAP_NAME	"mfc"
+#define ION_SPSS_HEAP_NAME	"spss"
+#define ION_WB_HEAP_NAME	"wb"
+#define ION_MM_FIRMWARE_HEAP_NAME	"mm_fw"
+#define ION_PIL1_HEAP_NAME  "pil_1"
+#define ION_PIL2_HEAP_NAME  "pil_2"
+#define ION_QSECOM_HEAP_NAME	"qsecom"
+#define ION_SECURE_HEAP_NAME	"secure_heap"
+#define ION_SECURE_DISPLAY_HEAP_NAME "secure_display"
+
+#define ION_SET_CACHED(__cache)		((__cache) | ION_FLAG_CACHED)
+#define ION_SET_UNCACHED(__cache)	((__cache) & ~ION_FLAG_CACHED)
+
+#define ION_IS_CACHED(__flags)	((__flags) & ION_FLAG_CACHED)
+
+/* struct ion_flush_data - data passed to ion for flushing caches
+ *
+ * @handle:	handle with data to flush
+ * @fd:		fd to flush
+ * @vaddr:	userspace virtual address mapped with mmap
+ * @offset:	offset into the handle to flush
+ * @length:	length of handle to flush
+ *
+ * Performs cache operations on the handle. If p is the start address
+ * of the handle, p + offset through p + offset + length will have
+ * the cache operations performed
+ */
+struct ion_flush_data {
+	ion_user_handle_t handle;
+	int fd;
+	void *vaddr;
+	unsigned int offset;
+	unsigned int length;
+};
+
+struct ion_prefetch_regions {
+	unsigned int vmid;
+	size_t __user *sizes;
+	unsigned int nr_sizes;
+};
+
+struct ion_prefetch_data {
+	int heap_id;
+	unsigned long len;
+	struct ion_prefetch_regions __user *regions;
+	unsigned int nr_regions;
+};
+
+#define ION_IOC_MSM_MAGIC 'M'
+
+/**
+ * DOC: ION_IOC_CLEAN_CACHES - clean the caches
+ *
+ * Clean the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 0, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_IOC_INV_CACHES - invalidate the caches
+ *
+ * Invalidate the caches of the handle specified.
+ */
+#define ION_IOC_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 1, \
+						struct ion_flush_data)
+/**
+ * DOC: ION_IOC_CLEAN_INV_CACHES - clean and invalidate the caches
+ *
+ * Clean and invalidate the caches of the handle specified.
+ */
+#define ION_IOC_CLEAN_INV_CACHES	_IOWR(ION_IOC_MSM_MAGIC, 2, \
+						struct ion_flush_data)
+
+#define ION_IOC_PREFETCH		_IOWR(ION_IOC_MSM_MAGIC, 3, \
+						struct ion_prefetch_data)
+
+#define ION_IOC_DRAIN			_IOWR(ION_IOC_MSM_MAGIC, 4, \
+						struct ion_prefetch_data)
+
+#endif
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index f2303f3..2e2b88a 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -131,6 +131,9 @@
 	struct uart_state *state = tty->driver_data;
 	struct uart_port *port = state->uart_port;
 
+	if (port && port->ops->wake_peer)
+		port->ops->wake_peer(port);
+
 	if (port && !uart_tx_stopped(port))
 		port->ops->start_tx(port);
 }
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index b97cde7..ba9eb16 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -105,4 +105,10 @@
 	  inside (i.e. STiH407).
 	  Say 'Y' or 'M' if you have one such device.
 
+config USB_DWC3_MSM
+	tristate "QTI MSM Platforms"
+	depends on ARCH_QCOM || COMPILE_TEST
+	help
+	  Applicable to QTI MSM Platforms with DesignWare Core USB3 IP,
+	  say 'Y' or 'M' if you have one such device.
 endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 22420e1..8eb7ede 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -1,5 +1,6 @@
 # define_trace.h needs to know how to find our header
 CFLAGS_trace.o				:= -I$(src)
+CFLAGS_dwc3-msm.o			:= -Idrivers/usb/host -Idrivers/base/power
 
 obj-$(CONFIG_USB_DWC3)			+= dwc3.o
 
@@ -39,3 +40,4 @@
 obj-$(CONFIG_USB_DWC3_KEYSTONE)		+= dwc3-keystone.o
 obj-$(CONFIG_USB_DWC3_OF_SIMPLE)	+= dwc3-of-simple.o
 obj-$(CONFIG_USB_DWC3_ST)		+= dwc3-st.o
+obj-$(CONFIG_USB_DWC3_MSM)		+= dwc3-msm.o dbm.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index fea4469..65fe33b 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -35,6 +35,7 @@
 #include <linux/of.h>
 #include <linux/acpi.h>
 #include <linux/pinctrl/consumer.h>
+#include <linux/irq.h>
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
@@ -49,6 +50,20 @@
 
 #define DWC3_DEFAULT_AUTOSUSPEND_DELAY	5000 /* ms */
 
+void dwc3_usb3_phy_suspend(struct dwc3 *dwc, int suspend)
+{
+	u32			reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+
+	if (suspend)
+		reg |= DWC3_GUSB3PIPECTL_SUSPHY;
+	else
+		reg &= ~DWC3_GUSB3PIPECTL_SUSPHY;
+
+	dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
+}
+
 /**
  * dwc3_get_dr_mode - Validates and sets dr_mode
  * @dwc: pointer to our context structure
@@ -59,9 +74,8 @@
 	struct device *dev = dwc->dev;
 	unsigned int hw_mode;
 
-	if (dwc->dr_mode == USB_DR_MODE_UNKNOWN)
-		dwc->dr_mode = USB_DR_MODE_OTG;
 
+	dwc->is_drd = 0;
 	mode = dwc->dr_mode;
 	hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
 
@@ -97,6 +111,9 @@
 		dwc->dr_mode = mode;
 	}
 
+	if (dwc->dr_mode == USB_DR_MODE_OTG)
+		dwc->is_drd = 1;
+
 	return 0;
 }
 
@@ -108,6 +125,37 @@
 	reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
 	reg |= DWC3_GCTL_PRTCAPDIR(mode);
 	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+	/*
+	 * Set this bit so that device attempts three more times at SS, even
+	 * if it failed previously to operate in SS mode.
+	 */
+	reg |= DWC3_GCTL_U2RSTECN;
+	reg &= ~(DWC3_GCTL_SOFITPSYNC);
+	reg &= ~(DWC3_GCTL_PWRDNSCALEMASK);
+	reg |= DWC3_GCTL_PWRDNSCALE(2);
+	reg |= DWC3_GCTL_U2EXIT_LFPS;
+	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+	if (mode == DWC3_GCTL_PRTCAP_OTG || mode == DWC3_GCTL_PRTCAP_HOST) {
+		/*
+		 * Allow ITP generated off of ref clk based counter instead
+		 * of UTMI/ULPI clk based counter, when superspeed only is
+		 * active so that UTMI/ULPI PHY can be suspened.
+		 *
+		 * Starting with revision 2.50A, GFLADJ_REFCLK_LPM_SEL is used
+		 * instead.
+		 */
+		if (dwc->revision < DWC3_REVISION_250A) {
+			reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+			reg |= DWC3_GCTL_SOFITPSYNC;
+			dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+		} else {
+			reg = dwc3_readl(dwc->regs, DWC3_GFLADJ);
+			reg |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+			dwc3_writel(dwc->regs, DWC3_GFLADJ, reg);
+		}
+	}
 }
 
 u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type)
@@ -125,17 +173,36 @@
 }
 
 /**
- * dwc3_core_soft_reset - Issues core soft reset and PHY reset
+ * Peforms initialization of HS and SS PHYs.
+ * If used as a part of POR or init sequence it is recommended
+ * that we should perform hard reset of the PHYs prior to invoking
+ * this function.
  * @dwc: pointer to our context structure
- */
-static int dwc3_core_soft_reset(struct dwc3 *dwc)
+*/
+static int dwc3_init_usb_phys(struct dwc3 *dwc)
 {
-	u32		reg;
-	int		retries = 1000;
 	int		ret;
 
-	usb_phy_init(dwc->usb2_phy);
-	usb_phy_init(dwc->usb3_phy);
+	/* Bring up PHYs */
+	ret = usb_phy_init(dwc->usb2_phy);
+	if (ret) {
+		pr_err("%s: usb_phy_init(dwc->usb2_phy) returned %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	ret = usb_phy_init(dwc->usb3_phy);
+	if (ret == -EBUSY) {
+		/*
+		 * Setting Max speed as high when USB3 PHY initialiation
+		 * is failing and USB superspeed can't be supported.
+		 */
+		dwc->maximum_speed = USB_SPEED_HIGH;
+	} else if (ret) {
+		pr_err("%s: usb_phy_init(dwc->usb3_phy) returned %d\n",
+				__func__, ret);
+		return ret;
+	}
 	ret = phy_init(dwc->usb2_generic_phy);
 	if (ret < 0)
 		return ret;
@@ -146,27 +213,34 @@
 		return ret;
 	}
 
-	/*
-	 * We're resetting only the device side because, if we're in host mode,
-	 * XHCI driver will reset the host block. If dwc3 was configured for
-	 * host-only mode, then we can return early.
-	 */
-	if (dwc->dr_mode == USB_DR_MODE_HOST)
-		return 0;
+	return 0;
+}
 
-	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-	reg |= DWC3_DCTL_CSFTRST;
-	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+/**
+ * dwc3_core_reset - Issues core soft reset and PHY reset
+ * @dwc: pointer to our context structure
+ */
+static int dwc3_core_reset(struct dwc3 *dwc)
+{
+	int		ret;
 
-	do {
-		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
-		if (!(reg & DWC3_DCTL_CSFTRST))
-			return 0;
+	/* Reset PHYs */
+	usb_phy_reset(dwc->usb2_phy);
+	usb_phy_reset(dwc->usb3_phy);
 
-		udelay(1);
-	} while (--retries);
+	/* Initialize PHYs */
+	ret = dwc3_init_usb_phys(dwc);
+	if (ret) {
+		pr_err("%s: dwc3_init_phys returned %d\n",
+				__func__, ret);
+		return ret;
+	}
 
-	return -ETIMEDOUT;
+	dwc3_notify_event(dwc, DWC3_CONTROLLER_RESET_EVENT);
+
+	dwc3_notify_event(dwc, DWC3_CONTROLLER_POST_RESET_EVENT);
+
+	return 0;
 }
 
 /**
@@ -300,7 +374,7 @@
  *
  * Returns 0 on success otherwise negative errno.
  */
-static int dwc3_event_buffers_setup(struct dwc3 *dwc)
+int dwc3_event_buffers_setup(struct dwc3 *dwc)
 {
 	struct dwc3_event_buffer	*evt;
 
@@ -600,7 +674,7 @@
  *
  * Returns 0 on success otherwise negative errno.
  */
-static int dwc3_core_init(struct dwc3 *dwc)
+int dwc3_core_init(struct dwc3 *dwc)
 {
 	u32			hwparams4 = dwc->hwparams.hwparams4;
 	u32			reg;
@@ -635,11 +709,12 @@
 	}
 
 	/* issue device SoftReset too */
-	ret = dwc3_soft_reset(dwc);
+	ret = dwc3_core_reset(dwc);
 	if (ret)
 		goto err0;
 
-	ret = dwc3_core_soft_reset(dwc);
+	/* issue device SoftReset too */
+	ret = dwc3_soft_reset(dwc);
 	if (ret)
 		goto err0;
 
@@ -713,14 +788,31 @@
 	if (dwc->revision < DWC3_REVISION_190A)
 		reg |= DWC3_GCTL_U2RSTECN;
 
-	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+	ret = dwc3_get_dr_mode(dwc);
+	if (ret)
+		goto err0;
 
 	dwc3_core_num_eps(dwc);
 
-	ret = dwc3_setup_scratch_buffers(dwc);
+	/*
+	 * Disable clock gating to work around a known HW bug that causes the
+	 * internal RAM clock to get stuck when entering low power modes.
+	 */
+	if (dwc->disable_clk_gating) {
+		dev_dbg(dwc->dev, "Disabling controller clock gating.\n");
+		reg |= DWC3_GCTL_DSBLCLKGTNG;
+	}
+
+	dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+	ret = dwc3_alloc_scratch_buffers(dwc);
 	if (ret)
 		goto err1;
 
+	ret = dwc3_setup_scratch_buffers(dwc);
+	if (ret)
+		goto err2;
+
 	/* Adjust Frame Length */
 	dwc3_frame_length_adjustment(dwc);
 
@@ -730,14 +822,15 @@
 	if (ret < 0)
 		goto err2;
 
-	ret = phy_power_on(dwc->usb3_generic_phy);
-	if (ret < 0)
-		goto err3;
-
-	ret = dwc3_event_buffers_setup(dwc);
-	if (ret) {
-		dev_err(dwc->dev, "failed to setup event buffers\n");
-		goto err4;
+	/*
+	 * clear Elastic buffer mode in GUSBPIPE_CTRL(0) register, otherwise
+	 * it results in high link errors and could cause SS mode transfer
+	 * failure.
+	 */
+	if (!dwc->nominal_elastic_buffer) {
+		reg = dwc3_readl(dwc->regs, DWC3_GUSB3PIPECTL(0));
+		reg &= ~DWC3_GUSB3PIPECTL_ELASTIC_BUF_MODE;
+		dwc3_writel(dwc->regs, DWC3_GUSB3PIPECTL(0), reg);
 	}
 
 	switch (dwc->dr_mode) {
@@ -768,16 +861,8 @@
 
 	return 0;
 
-err4:
-	phy_power_off(dwc->usb3_generic_phy);
-
-err3:
-	phy_power_off(dwc->usb2_generic_phy);
-
 err2:
-	usb_phy_set_suspend(dwc->usb2_phy, 1);
-	usb_phy_set_suspend(dwc->usb3_phy, 1);
-
+	dwc3_free_scratch_buffers(dwc);
 err1:
 	usb_phy_shutdown(dwc->usb2_phy);
 	usb_phy_shutdown(dwc->usb3_phy);
@@ -919,6 +1004,88 @@
 	}
 }
 
+/* XHCI reset, resets other CORE registers as well, re-init those */
+void dwc3_post_host_reset_core_init(struct dwc3 *dwc)
+{
+	dwc3_core_init(dwc);
+	dwc3_gadget_restart(dwc);
+}
+
+static void (*notify_event)(struct dwc3 *, unsigned int);
+void dwc3_set_notifier(void (*notify)(struct dwc3 *, unsigned int))
+{
+	notify_event = notify;
+}
+EXPORT_SYMBOL(dwc3_set_notifier);
+
+int dwc3_notify_event(struct dwc3 *dwc, unsigned int event)
+{
+	int ret = 0;
+
+	if (dwc->notify_event)
+		dwc->notify_event(dwc, event);
+	else
+		ret = -ENODEV;
+
+	return ret;
+}
+EXPORT_SYMBOL(dwc3_notify_event);
+
+int dwc3_core_pre_init(struct dwc3 *dwc)
+{
+	int ret;
+
+	dwc3_cache_hwparams(dwc);
+
+	ret = dwc3_phy_setup(dwc);
+	if (ret)
+		goto err0;
+
+	if (!dwc->ev_buf) {
+		ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
+		if (ret) {
+			dev_err(dwc->dev, "failed to allocate event buffers\n");
+			ret = -ENOMEM;
+			goto err1;
+		}
+	}
+
+	ret = dwc3_core_init(dwc);
+	if (ret) {
+		dev_err(dwc->dev, "failed to initialize core\n");
+		goto err2;
+	}
+
+	ret = phy_power_on(dwc->usb2_generic_phy);
+	if (ret < 0)
+		goto err3;
+
+	ret = phy_power_on(dwc->usb3_generic_phy);
+	if (ret < 0)
+		goto err4;
+
+	ret = dwc3_event_buffers_setup(dwc);
+	if (ret) {
+		dev_err(dwc->dev, "failed to setup event buffers\n");
+		goto err5;
+	}
+
+	return ret;
+
+err5:
+	phy_power_off(dwc->usb3_generic_phy);
+err4:
+	phy_power_off(dwc->usb2_generic_phy);
+err3:
+	dwc3_core_exit(dwc);
+err2:
+	dwc3_free_event_buffers(dwc);
+err1:
+	dwc3_ulpi_exit(dwc);
+err0:
+	return ret;
+}
+
 #define DWC3_ALIGN_MASK		(16 - 1)
 
 static int dwc3_probe(struct platform_device *pdev)
@@ -929,6 +1096,7 @@
 	u8			lpm_nyet_threshold;
 	u8			tx_de_emphasis;
 	u8			hird_threshold;
+	int			irq;
 
 	int			ret;
 
@@ -943,12 +1111,37 @@
 	dwc->mem = mem;
 	dwc->dev = dev;
 
+	dwc->notify_event = notify_event;
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(dev, "missing IRQ\n");
+		return -ENODEV;
+	}
+	dwc->xhci_resources[1].start = res->start;
+	dwc->xhci_resources[1].end = res->end;
+	dwc->xhci_resources[1].flags = res->flags;
+	dwc->xhci_resources[1].name = res->name;
+
+	irq = platform_get_irq(to_platform_device(dwc->dev), 0);
+
+	/* will be enabled in dwc3_msm_resume() */
+	irq_set_status_flags(irq, IRQ_NOAUTOEN);
+	ret = devm_request_threaded_irq(dev, irq, NULL, dwc3_interrupt,
+			IRQF_SHARED | IRQF_ONESHOT, "dwc3", dwc);
+	if (ret) {
+		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
+				irq, ret);
+		return -ENODEV;
+	}
+
+	dwc->irq = irq;
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	if (!res) {
 		dev_err(dev, "missing memory resource\n");
 		return -ENODEV;
 	}
 
+	dwc->reg_phys = res->start;
 	dwc->xhci_resources[0].start = res->start;
 	dwc->xhci_resources[0].end = dwc->xhci_resources[0].start +
 					DWC3_XHCI_REGS_END;
@@ -984,6 +1177,12 @@
 
 	dwc->maximum_speed = usb_get_maximum_speed(dev);
 	dwc->dr_mode = usb_get_dr_mode(dev);
+
+	if (dwc->dr_mode == USB_DR_MODE_UNKNOWN) {
+		dwc->dr_mode = USB_DR_MODE_OTG;
+		dwc->is_drd = 1;
+	}
+
 	dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
 
 	dwc->has_lpm_erratum = device_property_read_bool(dev,
@@ -1035,14 +1234,19 @@
 	device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
 				 &dwc->fladj);
 
+	if (dwc->enable_bus_suspend) {
+		pm_runtime_set_autosuspend_delay(dev, 500);
+		pm_runtime_use_autosuspend(dev);
+	}
+
 	dwc->lpm_nyet_threshold = lpm_nyet_threshold;
 	dwc->tx_de_emphasis = tx_de_emphasis;
 
 	dwc->hird_threshold = hird_threshold
 		| (dwc->is_utmi_l1_suspend << 4);
 
+	init_waitqueue_head(&dwc->wait_linkstate);
 	platform_set_drvdata(pdev, dwc);
-	dwc3_cache_hwparams(dwc);
 
 	ret = dwc3_core_get_phy(dwc);
 	if (ret)
@@ -1056,37 +1260,11 @@
 		dma_set_coherent_mask(dev, dev->parent->coherent_dma_mask);
 	}
 
+	pm_runtime_no_callbacks(dev);
 	pm_runtime_set_active(dev);
-	pm_runtime_use_autosuspend(dev);
-	pm_runtime_set_autosuspend_delay(dev, DWC3_DEFAULT_AUTOSUSPEND_DELAY);
 	pm_runtime_enable(dev);
-	ret = pm_runtime_get_sync(dev);
-	if (ret < 0)
-		goto err1;
-
 	pm_runtime_forbid(dev);
 
-	ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
-	if (ret) {
-		dev_err(dwc->dev, "failed to allocate event buffers\n");
-		ret = -ENOMEM;
-		goto err2;
-	}
-
-	ret = dwc3_get_dr_mode(dwc);
-	if (ret)
-		goto err3;
-
-	ret = dwc3_alloc_scratch_buffers(dwc);
-	if (ret)
-		goto err3;
-
-	ret = dwc3_core_init(dwc);
-	if (ret) {
-		dev_err(dev, "failed to initialize core\n");
-		goto err4;
-	}
-
 	/* Check the maximum_speed parameter */
 	switch (dwc->maximum_speed) {
 	case USB_SPEED_LOW:
@@ -1114,32 +1292,28 @@
 		break;
 	}
 
+	/* Adjust Frame Length */
+	dwc3_frame_length_adjustment(dwc);
+
+	/* Hardcode number of eps */
+	dwc->num_in_eps = 16;
+	dwc->num_out_eps = 16;
+
 	ret = dwc3_core_init_mode(dwc);
 	if (ret)
-		goto err5;
+		goto err0;
 
-	dwc3_debugfs_init(dwc);
-	pm_runtime_put(dev);
+	ret = dwc3_debugfs_init(dwc);
+	if (ret) {
+		dev_err(dev, "failed to initialize debugfs\n");
+		goto err_core_init;
+	}
 
+	pm_runtime_allow(dev);
 	return 0;
 
-err5:
-	dwc3_event_buffers_cleanup(dwc);
-
-err4:
-	dwc3_free_scratch_buffers(dwc);
-
-err3:
-	dwc3_free_event_buffers(dwc);
-	dwc3_ulpi_exit(dwc);
-
-err2:
-	pm_runtime_allow(&pdev->dev);
-
-err1:
-	pm_runtime_put_sync(&pdev->dev);
-	pm_runtime_disable(&pdev->dev);
-
+err_core_init:
+	dwc3_core_exit_mode(dwc);
 err0:
 	/*
 	 * restore res->start back to its original value so that, in case the
@@ -1250,8 +1424,9 @@
 	struct dwc3     *dwc = dev_get_drvdata(dev);
 	int		ret;
 
-	if (dwc3_runtime_checks(dwc))
-		return -EBUSY;
+	/* Check if platform glue driver handling PM, if not then handle here */
+	if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT))
+		return 0;
 
 	ret = dwc3_suspend_common(dwc);
 	if (ret)
@@ -1267,6 +1442,10 @@
 	struct dwc3     *dwc = dev_get_drvdata(dev);
 	int		ret;
 
+	/* Check if platform glue driver handling PM, if not then handle here */
+	if (!dwc3_notify_event(dwc, DWC3_CORE_PM_RESUME_EVENT))
+		return 0;
+
 	device_init_wakeup(dev, false);
 
 	ret = dwc3_resume_common(dwc);
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 6b60e42..968237d 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -26,6 +26,9 @@
 #include <linux/dma-mapping.h>
 #include <linux/mm.h>
 #include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+
 
 #include <linux/usb/ch9.h>
 #include <linux/usb/gadget.h>
@@ -45,7 +48,7 @@
 #define DWC3_SCRATCHBUF_SIZE	4096	/* each buffer is assumed to be 4KiB */
 #define DWC3_EVENT_SIZE		4	/* bytes */
 #define DWC3_EVENT_MAX_NUM	64	/* 2 events/endpoint */
-#define DWC3_EVENT_BUFFERS_SIZE	(DWC3_EVENT_SIZE * DWC3_EVENT_MAX_NUM)
+#define DWC3_EVENT_BUFFERS_SIZE	(2 * PAGE_SIZE)
 #define DWC3_EVENT_TYPE_MASK	0xfe
 
 #define DWC3_EVENT_TYPE_DEV	0
@@ -59,6 +62,8 @@
 #define DWC3_DEVICE_EVENT_WAKEUP		4
 #define DWC3_DEVICE_EVENT_HIBER_REQ		5
 #define DWC3_DEVICE_EVENT_EOPF			6
+/* For version 2.30a and above */
+#define DWC3_DEVICE_EVENT_SUSPEND		6
 #define DWC3_DEVICE_EVENT_SOF			7
 #define DWC3_DEVICE_EVENT_ERRATIC_ERROR		9
 #define DWC3_DEVICE_EVENT_CMD_CMPL		10
@@ -156,6 +161,10 @@
 
 /* Bit fields */
 
+/* Global SoC Bus Configuration Register 1 */
+#define DWC3_GSBUSCFG1_PIPETRANSLIMIT_MASK	(0x0f << 8)
+#define DWC3_GSBUSCFG1_PIPETRANSLIMIT(n)	((n) << 8)
+
 /* Global Debug Queue/FIFO Space Available Register */
 #define DWC3_GDBGFIFOSPACE_NUM(n)	((n) & 0x1f)
 #define DWC3_GDBGFIFOSPACE_TYPE(n)	(((n) << 5) & 0x1e0)
@@ -176,7 +185,10 @@
 
 /* Global Configuration Register */
 #define DWC3_GCTL_PWRDNSCALE(n)	((n) << 19)
+#define DWC3_GCTL_PWRDNSCALEMASK (0xFFF80000)
 #define DWC3_GCTL_U2RSTECN	(1 << 16)
+#define DWC3_GCTL_SOFITPSYNC	(1 << 10)
+#define DWC3_GCTL_U2EXIT_LFPS	(1 << 2)
 #define DWC3_GCTL_RAMCLKSEL(x)	(((x) & DWC3_GCTL_CLK_MASK) << 6)
 #define DWC3_GCTL_CLK_BUS	(0)
 #define DWC3_GCTL_CLK_PIPE	(1)
@@ -198,9 +210,17 @@
 #define DWC3_GCTL_GBLHIBERNATIONEN	(1 << 1)
 #define DWC3_GCTL_DSBLCLKGTNG		(1 << 0)
 
+/* Global User Control Register */
+#define DWC3_GUCTL_REFCLKPER		(0x3FF << 22)
+
+/* Global Debug LTSSM Register */
+#define DWC3_GDBGLTSSM_LINKSTATE_MASK	(0xF << 22)
+
 /* Global USB2 PHY Configuration Register */
 #define DWC3_GUSB2PHYCFG_PHYSOFTRST	(1 << 31)
+#define DWC3_GUSB2PHYCFG_ENBLSLPM	(1 << 8)
 #define DWC3_GUSB2PHYCFG_U2_FREECLK_EXISTS	(1 << 30)
+
 #define DWC3_GUSB2PHYCFG_SUSPHY		(1 << 6)
 #define DWC3_GUSB2PHYCFG_ULPI_UTMI	(1 << 4)
 #define DWC3_GUSB2PHYCFG_ENBLSLPM	(1 << 8)
@@ -235,6 +255,8 @@
 #define DWC3_GUSB3PIPECTL_RX_DETOPOLL	(1 << 8)
 #define DWC3_GUSB3PIPECTL_TX_DEEPH_MASK	DWC3_GUSB3PIPECTL_TX_DEEPH(3)
 #define DWC3_GUSB3PIPECTL_TX_DEEPH(n)	((n) << 1)
+#define DWC3_GUSB3PIPECTL_DELAYP1TRANS  (1 << 18)
+#define DWC3_GUSB3PIPECTL_ELASTIC_BUF_MODE	(1 << 0)
 
 /* Global TX Fifo Size Register */
 #define DWC3_GTXFIFOSIZ_TXFDEF(n)	((n) & 0xffff)
@@ -292,6 +314,11 @@
 #define DWC3_GFLADJ_30MHZ_SDBND_SEL		(1 << 7)
 #define DWC3_GFLADJ_30MHZ_MASK			0x3f
 
+#define DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1      (1 << 31)
+#define DWC3_GFLADJ_REFCLK_240MHZ_DECR          (0x7F << 24)
+#define DWC3_GFLADJ_REFCLK_LPM_SEL              (1 << 23)
+#define DWC3_GFLADJ_REFCLK_FLADJ                (0x3FFF << 8)
+
 /* Global User Control Register 2 */
 #define DWC3_GUCTL2_RST_ACTBITLATER		(1 << 14)
 
@@ -364,6 +391,8 @@
 #define DWC3_DEVTEN_ERRTICERREN		(1 << 9)
 #define DWC3_DEVTEN_SOFEN		(1 << 7)
 #define DWC3_DEVTEN_EOPFEN		(1 << 6)
+/* For version 2.30a and above*/
+#define DWC3_DEVTEN_SUSPEND		(1 << 6)
 #define DWC3_DEVTEN_HIBERNATIONREQEVTEN	(1 << 5)
 #define DWC3_DEVTEN_WKUPEVTEN		(1 << 4)
 #define DWC3_DEVTEN_ULSTCNGEN		(1 << 3)
@@ -405,6 +434,7 @@
 #define DWC3_DGCMD_SET_LMP		0x01
 #define DWC3_DGCMD_SET_PERIODIC_PAR	0x02
 #define DWC3_DGCMD_XMIT_FUNCTION	0x03
+#define DWC3_DGCMD_XMIT_DEV		0x07
 
 /* These apply for core versions 1.94a and later */
 #define DWC3_DGCMD_SET_SCRATCHPAD_ADDR_LO	0x04
@@ -501,8 +531,10 @@
  * @started_list: list of started requests on this endpoint
  * @lock: spinlock for endpoint request queue traversal
  * @regs: pointer to first endpoint register
+ * @trb_dma_pool: dma pool used to get aligned trb memory pool
  * @trb_pool: array of transaction buffers
  * @trb_pool_dma: dma address of @trb_pool
+ * @num_trbs: num of trbs in the trb dma pool
  * @trb_enqueue: enqueue 'pointer' into TRB array
  * @trb_dequeue: dequeue 'pointer' into TRB array
  * @desc: usb_endpoint_descriptor pointer
@@ -527,8 +559,10 @@
 	spinlock_t		lock;
 	void __iomem		*regs;
 
+	struct dma_pool		*trb_dma_pool;
 	struct dwc3_trb		*trb_pool;
 	dma_addr_t		trb_pool_dma;
+	u32			num_trbs;
 	const struct usb_ss_ep_comp_descriptor *comp_desc;
 	struct dwc3		*dwc;
 
@@ -736,6 +770,18 @@
 	__le64	dma_adr[DWC3_MAX_HIBER_SCRATCHBUFS];
 };
 
+#define DWC3_CONTROLLER_ERROR_EVENT		0
+#define DWC3_CONTROLLER_RESET_EVENT		1
+#define DWC3_CONTROLLER_POST_RESET_EVENT	2
+#define DWC3_CORE_PM_SUSPEND_EVENT		3
+#define DWC3_CORE_PM_RESUME_EVENT		4
+#define DWC3_CONTROLLER_CONNDONE_EVENT		5
+#define DWC3_CONTROLLER_NOTIFY_OTG_EVENT	6
+#define DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT	7
+#define DWC3_CONTROLLER_RESTART_USB_SESSION	8
+
+#define MAX_INTR_STATS				10
+
 /**
  * struct dwc3 - representation of our controller
  * @ctrl_req: usb control request which is used for ep0
@@ -758,6 +804,7 @@
  * @regs_size: address space size
  * @fladj: frame length adjustment
  * @irq_gadget: peripheral controller's IRQ number
+ * @reg_phys: physical base address of dwc3 core register address space
  * @nr_scratch: number of scratch buffers
  * @u1u2: only used on revisions <1.83a for workaround
  * @maximum_speed: maximum speed requested (mainly for testing purposes)
@@ -833,6 +880,19 @@
  * 	1	- -3.5dB de-emphasis
  * 	2	- No de-emphasis
  * 	3	- Reserved
+ * @is_drd: device supports dual-role or not
+ * @err_evt_seen: previous event in queue was erratic error
+ * @usb3_u1u2_disable: if true, disable U1U2 low power modes in Superspeed mode.
+ * @in_lpm: indicates if controller is in low power mode (no clocks)
+ * @tx_fifo_size: Available RAM size for TX fifo allocation
+ * @irq: irq number
+ * @bh: tasklet which handles the interrupt
+ * @irq_cnt: total irq count
+ * @bh_completion_time: time taken for taklet completion
+ * @bh_handled_evt_cnt: no. of events handled by tasklet per interrupt
+ * @bh_dbg_index: index for capturing bh_completion_time and bh_handled_evt_cnt
+ * @wait_linkstate: waitqueue for waiting LINK to move into required state
+ * @vbus_draw: current to be drawn from USB
  */
 struct dwc3 {
 	struct usb_ctrlrequest	*ctrl_req;
@@ -871,6 +931,7 @@
 
 	void __iomem		*regs;
 	size_t			regs_size;
+	phys_addr_t		reg_phys;
 
 	enum usb_dr_mode	dr_mode;
 	enum usb_phy_interface	hsphy_mode;
@@ -948,6 +1009,9 @@
 	const char		*hsphy_interface;
 
 	unsigned		connected:1;
+	void (*notify_event)(struct dwc3 *, unsigned int);
+	struct work_struct	wakeup_work;
+
 	unsigned		delayed_status:1;
 	unsigned		ep0_bounced:1;
 	unsigned		ep0_expect_in:1;
@@ -978,6 +1042,37 @@
 
 	unsigned		tx_de_emphasis_quirk:1;
 	unsigned		tx_de_emphasis:2;
+	unsigned		is_drd:1;
+	/* Indicate if the gadget was powered by the otg driver */
+	unsigned		vbus_active:1;
+	/* Indicate if software connect was issued by the usb_gadget_driver */
+	unsigned		softconnect:1;
+	unsigned		nominal_elastic_buffer:1;
+	unsigned		err_evt_seen:1;
+	unsigned		usb3_u1u2_disable:1;
+	/* Indicate if need to disable controller internal clkgating */
+	unsigned		disable_clk_gating:1;
+	unsigned		enable_bus_suspend:1;
+
+	atomic_t		in_lpm;
+	int			tx_fifo_size;
+	bool			b_suspend;
+	unsigned int		vbus_draw;
+
+	/* IRQ timing statistics */
+	int			irq;
+	struct tasklet_struct	bh;
+	unsigned long		irq_cnt;
+	unsigned int		bh_completion_time[MAX_INTR_STATS];
+	unsigned int		bh_handled_evt_cnt[MAX_INTR_STATS];
+	unsigned int		bh_dbg_index;
+	ktime_t			irq_start_time[MAX_INTR_STATS];
+	ktime_t			t_pwr_evt_irq;
+	unsigned int		irq_completion_time[MAX_INTR_STATS];
+	unsigned int		irq_event_count[MAX_INTR_STATS];
+	unsigned int		irq_dbg_index;
+
+	wait_queue_head_t	wait_linkstate;
 };
 
 /* -------------------------------------------------------------------------- */
@@ -1152,17 +1247,22 @@
 #if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
 int dwc3_gadget_init(struct dwc3 *dwc);
 void dwc3_gadget_exit(struct dwc3 *dwc);
+void dwc3_gadget_restart(struct dwc3 *dwc);
 int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode);
 int dwc3_gadget_get_link_state(struct dwc3 *dwc);
 int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state);
 int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
 		struct dwc3_gadget_ep_cmd_params *params);
 int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param);
+void dwc3_gadget_enable_irq(struct dwc3 *dwc);
+void dwc3_gadget_disable_irq(struct dwc3 *dwc);
 #else
 static inline int dwc3_gadget_init(struct dwc3 *dwc)
 { return 0; }
 static inline void dwc3_gadget_exit(struct dwc3 *dwc)
 { }
+static inline void dwc3_gadget_restart(struct dwc3 *dwc)
+{ }
 static inline int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
 { return 0; }
 static inline int dwc3_gadget_get_link_state(struct dwc3 *dwc)
@@ -1177,6 +1277,10 @@
 static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc,
 		int cmd, u32 param)
 { return 0; }
+static inline void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+{ }
+static inline void dwc3_gadget_disable_irq(struct dwc3 *dwc)
+{ }
 #endif
 
 /* power management interface */
@@ -1210,4 +1314,13 @@
 { }
 #endif
 
+int dwc3_core_init(struct dwc3 *dwc);
+int dwc3_core_pre_init(struct dwc3 *dwc);
+void dwc3_post_host_reset_core_init(struct dwc3 *dwc);
+int dwc3_event_buffers_setup(struct dwc3 *dwc);
+void dwc3_usb3_phy_suspend(struct dwc3 *dwc, int suspend);
+
+extern void dwc3_set_notifier(
+		void (*notify)(struct dwc3 *dwc3, unsigned int event));
+extern int dwc3_notify_event(struct dwc3 *dwc3, unsigned int event);
 #endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/dbm.c b/drivers/usb/dwc3/dbm.c
new file mode 100644
index 0000000..285cd5a
--- /dev/null
+++ b/drivers/usb/dwc3/dbm.c
@@ -0,0 +1,623 @@
+/*
+ * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+
+#include "dbm.h"
+
+/**
+*  USB DBM Hardware registers.
+*
+*/
+enum dbm_reg {
+	DBM_EP_CFG,
+	DBM_DATA_FIFO,
+	DBM_DATA_FIFO_SIZE,
+	DBM_DATA_FIFO_EN,
+	DBM_GEVNTADR,
+	DBM_GEVNTSIZ,
+	DBM_DBG_CNFG,
+	DBM_HW_TRB0_EP,
+	DBM_HW_TRB1_EP,
+	DBM_HW_TRB2_EP,
+	DBM_HW_TRB3_EP,
+	DBM_PIPE_CFG,
+	DBM_SOFT_RESET,
+	DBM_GEN_CFG,
+	DBM_GEVNTADR_LSB,
+	DBM_GEVNTADR_MSB,
+	DBM_DATA_FIFO_LSB,
+	DBM_DATA_FIFO_MSB,
+	DBM_DATA_FIFO_ADDR_EN,
+	DBM_DATA_FIFO_SIZE_EN,
+};
+
+struct dbm_reg_data {
+	u32 offset;
+	unsigned int ep_mult;
+};
+
+#define DBM_1_4_NUM_EP		4
+#define DBM_1_5_NUM_EP		8
+
+struct dbm {
+	void __iomem *base;
+	const struct dbm_reg_data *reg_table;
+
+	struct device		*dev;
+	struct list_head	head;
+
+	int dbm_num_eps;
+	u8 ep_num_mapping[DBM_1_5_NUM_EP];
+	bool dbm_reset_ep_after_lpm;
+
+	bool is_1p4;
+};
+
+static const struct dbm_reg_data dbm_1_4_regtable[] = {
+	[DBM_EP_CFG]		= { 0x0000, 0x4 },
+	[DBM_DATA_FIFO]		= { 0x0010, 0x4 },
+	[DBM_DATA_FIFO_SIZE]	= { 0x0020, 0x4 },
+	[DBM_DATA_FIFO_EN]	= { 0x0030, 0x0 },
+	[DBM_GEVNTADR]		= { 0x0034, 0x0 },
+	[DBM_GEVNTSIZ]		= { 0x0038, 0x0 },
+	[DBM_DBG_CNFG]		= { 0x003C, 0x0 },
+	[DBM_HW_TRB0_EP]	= { 0x0040, 0x4 },
+	[DBM_HW_TRB1_EP]	= { 0x0050, 0x4 },
+	[DBM_HW_TRB2_EP]	= { 0x0060, 0x4 },
+	[DBM_HW_TRB3_EP]	= { 0x0070, 0x4 },
+	[DBM_PIPE_CFG]		= { 0x0080, 0x0 },
+	[DBM_SOFT_RESET]	= { 0x0084, 0x0 },
+	[DBM_GEN_CFG]		= { 0x0088, 0x0 },
+	[DBM_GEVNTADR_LSB]	= { 0x0098, 0x0 },
+	[DBM_GEVNTADR_MSB]	= { 0x009C, 0x0 },
+	[DBM_DATA_FIFO_LSB]	= { 0x00A0, 0x8 },
+	[DBM_DATA_FIFO_MSB]	= { 0x00A4, 0x8 },
+};
+
+static const struct dbm_reg_data dbm_1_5_regtable[] = {
+	[DBM_EP_CFG]		= { 0x0000, 0x4 },
+	[DBM_DATA_FIFO]		= { 0x0280, 0x4 },
+	[DBM_DATA_FIFO_SIZE]	= { 0x0080, 0x4 },
+	[DBM_DATA_FIFO_EN]	= { 0x026C, 0x0 },
+	[DBM_GEVNTADR]		= { 0x0270, 0x0 },
+	[DBM_GEVNTSIZ]		= { 0x0268, 0x0 },
+	[DBM_DBG_CNFG]		= { 0x0208, 0x0 },
+	[DBM_HW_TRB0_EP]	= { 0x0220, 0x4 },
+	[DBM_HW_TRB1_EP]	= { 0x0230, 0x4 },
+	[DBM_HW_TRB2_EP]	= { 0x0240, 0x4 },
+	[DBM_HW_TRB3_EP]	= { 0x0250, 0x4 },
+	[DBM_PIPE_CFG]		= { 0x0274, 0x0 },
+	[DBM_SOFT_RESET]	= { 0x020C, 0x0 },
+	[DBM_GEN_CFG]		= { 0x0210, 0x0 },
+	[DBM_GEVNTADR_LSB]	= { 0x0260, 0x0 },
+	[DBM_GEVNTADR_MSB]	= { 0x0264, 0x0 },
+	[DBM_DATA_FIFO_LSB]	= { 0x0100, 0x8 },
+	[DBM_DATA_FIFO_MSB]	= { 0x0104, 0x8 },
+	[DBM_DATA_FIFO_ADDR_EN]	= { 0x0200, 0x0 },
+	[DBM_DATA_FIFO_SIZE_EN]	= { 0x0204, 0x0 },
+};
+
+static LIST_HEAD(dbm_list);
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @dbm - DBM specific data
+ * @reg - DBM register, used to look up the offset value
+ * @ep - endpoint number
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void msm_dbm_write_ep_reg_field(struct dbm *dbm,
+					      enum dbm_reg reg, int ep,
+					      const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 offset = dbm->reg_table[reg].offset +
+			(dbm->reg_table[reg].ep_mult * ep);
+	u32 tmp = ioread32(dbm->base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, dbm->base + offset);
+}
+
+#define msm_dbm_write_reg_field(d, r, m, v) \
+	msm_dbm_write_ep_reg_field(d, r, 0, m, v)
+
+/**
+ *
+ * Read register with debug info.
+ *
+ * @dbm - DBM specific data
+ * @reg - DBM register, used to look up the offset value
+ * @ep - endpoint number
+ *
+ * @return u32
+ */
+static inline u32 msm_dbm_read_ep_reg(struct dbm *dbm, enum dbm_reg reg, int ep)
+{
+	u32 offset = dbm->reg_table[reg].offset +
+			(dbm->reg_table[reg].ep_mult * ep);
+	return ioread32(dbm->base + offset);
+}
+
+#define msm_dbm_read_reg(d, r) msm_dbm_read_ep_reg(d, r, 0)
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @dbm - DBM specific data
+ * @reg - DBM register, used to look up the offset value
+ * @ep - endpoint number
+ *
+ */
+static inline void msm_dbm_write_ep_reg(struct dbm *dbm, enum dbm_reg reg,
+					int ep, u32 val)
+{
+	u32 offset = dbm->reg_table[reg].offset +
+			(dbm->reg_table[reg].ep_mult * ep);
+	iowrite32(val, dbm->base + offset);
+}
+
+#define msm_dbm_write_reg(d, r, v) msm_dbm_write_ep_reg(d, r, 0, v)
+
+/**
+ * Return DBM EP number according to usb endpoint number.
+ *
+ */
+static int find_matching_dbm_ep(struct dbm *dbm, u8 usb_ep)
+{
+	int i;
+
+	for (i = 0; i < dbm->dbm_num_eps; i++)
+		if (dbm->ep_num_mapping[i] == usb_ep)
+			return i;
+
+	pr_err("%s: No DBM EP matches USB EP %d", __func__, usb_ep);
+	return -ENODEV; /* Not found */
+}
+
+
+/**
+ * Reset the DBM registers upon initialization.
+ *
+ */
+int dbm_soft_reset(struct dbm *dbm, bool reset)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	pr_debug("%s DBM reset\n", (reset ? "Enter" : "Exit"));
+
+	msm_dbm_write_reg_field(dbm, DBM_SOFT_RESET, DBM_SFT_RST_MASK, reset);
+
+	return 0;
+}
+
+/**
+ * Soft reset specific DBM ep.
+ * This function is called by the function driver upon events
+ * such as transfer aborting, USB re-enumeration and USB
+ * disconnection.
+ *
+ * @dbm_ep - DBM ep number.
+ * @enter_reset - should we enter a reset state or get out of it.
+ *
+ */
+static int ep_soft_reset(struct dbm *dbm, u8 dbm_ep, bool enter_reset)
+{
+	pr_debug("Setting DBM ep %d reset to %d\n", dbm_ep, enter_reset);
+
+	if (dbm_ep >= dbm->dbm_num_eps) {
+		pr_err("Invalid DBM ep index %d\n", dbm_ep);
+		return -ENODEV;
+	}
+
+	if (enter_reset) {
+		msm_dbm_write_reg_field(dbm, DBM_SOFT_RESET,
+			DBM_SFT_RST_EPS_MASK & 1 << dbm_ep, 1);
+	} else {
+		msm_dbm_write_reg_field(dbm, DBM_SOFT_RESET,
+			DBM_SFT_RST_EPS_MASK & 1 << dbm_ep, 0);
+	}
+
+	return 0;
+}
+
+
+/**
+ * Soft reset specific DBM ep (by USB EP number).
+ * This function is called by the function driver upon events
+ * such as transfer aborting, USB re-enumeration and USB
+ * disconnection.
+ *
+ * The function relies on ep_soft_reset() for checking
+ * the legality of the resulting DBM ep number.
+ *
+ * @usb_ep - USB ep number.
+ * @enter_reset - should we enter a reset state or get out of it.
+ *
+ */
+int dbm_ep_soft_reset(struct dbm *dbm, u8 usb_ep, bool enter_reset)
+{
+	int dbm_ep;
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+	pr_debug("Setting USB ep %d reset to %d\n", usb_ep, enter_reset);
+	return ep_soft_reset(dbm, dbm_ep, enter_reset);
+}
+
+/**
+ * Configure a USB DBM ep to work in BAM mode.
+ *
+ *
+ * @usb_ep - USB physical EP number.
+ * @producer - producer/consumer.
+ * @disable_wb - disable write back to system memory.
+ * @internal_mem - use internal USB memory for data fifo.
+ * @ioc - enable interrupt on completion.
+ *
+ * @return int - DBM ep number.
+ */
+int dbm_ep_config(struct dbm *dbm, u8 usb_ep, u8 bam_pipe, bool producer,
+		  bool disable_wb, bool internal_mem, bool ioc)
+{
+	int dbm_ep;
+	u32 ep_cfg;
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	pr_debug("Configuring DBM ep\n");
+
+	dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+	if (dbm_ep < 0) {
+		pr_err("usb ep index %d has no corresponding dbm ep\n", usb_ep);
+		return -ENODEV;
+	}
+
+	/* Due to HW issue, EP 7 can be set as IN EP only */
+	if (!dbm->is_1p4 && dbm_ep == 7 && producer) {
+		pr_err("last DBM EP can't be OUT EP\n");
+		return -ENODEV;
+	}
+
+	/* First, reset the dbm endpoint */
+	ep_soft_reset(dbm, dbm_ep, 0);
+
+	/* Set ioc bit for dbm_ep if needed */
+	msm_dbm_write_reg_field(dbm, DBM_DBG_CNFG,
+		DBM_ENABLE_IOC_MASK & 1 << dbm_ep, ioc ? 1 : 0);
+
+	ep_cfg = (producer ? DBM_PRODUCER : 0) |
+		(disable_wb ? DBM_DISABLE_WB : 0) |
+		(internal_mem ? DBM_INT_RAM_ACC : 0);
+
+	msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep,
+		DBM_PRODUCER | DBM_DISABLE_WB | DBM_INT_RAM_ACC, ep_cfg >> 8);
+
+	msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep, USB3_EPNUM,
+		usb_ep);
+
+	if (dbm->is_1p4) {
+		msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep,
+				DBM_BAM_PIPE_NUM, bam_pipe);
+		msm_dbm_write_reg_field(dbm, DBM_PIPE_CFG, 0x000000ff, 0xe4);
+	}
+
+	msm_dbm_write_ep_reg_field(dbm, DBM_EP_CFG, dbm_ep, DBM_EN_EP, 1);
+
+	return dbm_ep;
+}
+
+/**
+ * Return number of configured DBM endpoints.
+ */
+int dbm_get_num_of_eps_configured(struct dbm *dbm)
+{
+	int i;
+	int count = 0;
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	for (i = 0; i < dbm->dbm_num_eps; i++)
+		if (dbm->ep_num_mapping[i])
+			count++;
+
+	return count;
+}
+
+/**
+ * Configure a USB DBM ep to work in normal mode.
+ *
+ * @usb_ep - USB ep number.
+ *
+ */
+int dbm_ep_unconfig(struct dbm *dbm, u8 usb_ep)
+{
+	int dbm_ep;
+	u32 data;
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	pr_debug("Unconfiguring DB ep\n");
+
+	dbm_ep = find_matching_dbm_ep(dbm, usb_ep);
+
+	if (dbm_ep < 0) {
+		pr_err("usb ep index %d has no corresponding dbm ep\n", usb_ep);
+		return -ENODEV;
+	}
+
+	dbm->ep_num_mapping[dbm_ep] = 0;
+
+	data = msm_dbm_read_ep_reg(dbm, DBM_EP_CFG, dbm_ep);
+	data &= (~0x1);
+	msm_dbm_write_ep_reg(dbm, DBM_EP_CFG, dbm_ep, data);
+
+	/* Reset the dbm endpoint */
+	ep_soft_reset(dbm, dbm_ep, true);
+	/*
+	 * The necessary delay between asserting and deasserting the dbm ep
+	 * reset is based on the number of active endpoints. If there is more
+	 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
+	 * delay will suffice.
+	 *
+	 * As this function can be called in atomic context, sleeping variants
+	 * for delay are not possible - albeit a 1ms delay.
+	 */
+	if (dbm_get_num_of_eps_configured(dbm) > 1)
+		udelay(1000);
+	else
+		udelay(10);
+	ep_soft_reset(dbm, dbm_ep, false);
+
+	return 0;
+}
+
+/**
+ * Configure the DBM with the USB3 core event buffer.
+ * This function is called by the SNPS UDC upon initialization.
+ *
+ * @addr - address of the event buffer.
+ * @size - size of the event buffer.
+ *
+ */
+int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi, int size)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	pr_debug("Configuring event buffer\n");
+
+	if (size < 0) {
+		pr_err("Invalid size. size = %d", size);
+		return -EINVAL;
+	}
+
+	/* In case event buffer is already configured, Do nothing. */
+	if (msm_dbm_read_reg(dbm, DBM_GEVNTSIZ))
+		return 0;
+
+	if (!dbm->is_1p4 || sizeof(phys_addr_t) > sizeof(u32)) {
+		msm_dbm_write_reg(dbm, DBM_GEVNTADR_LSB, addr_lo);
+		msm_dbm_write_reg(dbm, DBM_GEVNTADR_MSB, addr_hi);
+	} else {
+		msm_dbm_write_reg(dbm, DBM_GEVNTADR, addr_lo);
+	}
+
+	msm_dbm_write_reg_field(dbm, DBM_GEVNTSIZ, DBM_GEVNTSIZ_MASK, size);
+
+	return 0;
+}
+
+
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+				u32 size, u8 dst_pipe_idx)
+{
+	u8 dbm_ep = dst_pipe_idx;
+	u32 lo = lower_32_bits(addr);
+	u32 hi = upper_32_bits(addr);
+
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return -EPERM;
+	}
+
+	dbm->ep_num_mapping[dbm_ep] = dep_num;
+
+	if (!dbm->is_1p4 || sizeof(addr) > sizeof(u32)) {
+		msm_dbm_write_ep_reg(dbm, DBM_DATA_FIFO_LSB, dbm_ep, lo);
+		msm_dbm_write_ep_reg(dbm, DBM_DATA_FIFO_MSB, dbm_ep, hi);
+	} else {
+		msm_dbm_write_ep_reg(dbm, DBM_DATA_FIFO, dbm_ep, addr);
+	}
+
+	msm_dbm_write_ep_reg_field(dbm, DBM_DATA_FIFO_SIZE, dbm_ep,
+		DBM_DATA_FIFO_SIZE_MASK, size);
+
+	return 0;
+}
+
+void dbm_set_speed(struct dbm *dbm, bool speed)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return;
+	}
+
+	msm_dbm_write_reg(dbm, DBM_GEN_CFG, speed);
+}
+
+void dbm_enable(struct dbm *dbm)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return;
+	}
+
+	if (dbm->is_1p4) /* no-op */
+		return;
+
+	msm_dbm_write_reg(dbm, DBM_DATA_FIFO_ADDR_EN, 0x000000FF);
+	msm_dbm_write_reg(dbm, DBM_DATA_FIFO_SIZE_EN, 0x000000FF);
+}
+
+bool dbm_reset_ep_after_lpm(struct dbm *dbm)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return false;
+	}
+
+	return dbm->dbm_reset_ep_after_lpm;
+}
+
+bool dbm_l1_lpm_interrupt(struct dbm *dbm)
+{
+	if (!dbm) {
+		pr_err("%s: dbm pointer is NULL!\n", __func__);
+		return false;
+	}
+
+	return !dbm->is_1p4;
+}
+
+static const struct of_device_id msm_dbm_id_table[] = {
+	{ .compatible = "qcom,usb-dbm-1p4", .data = &dbm_1_4_regtable },
+	{ .compatible = "qcom,usb-dbm-1p5", .data = &dbm_1_5_regtable },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, msm_dbm_id_table);
+
+static int msm_dbm_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node;
+	const struct of_device_id *match;
+	struct dbm *dbm;
+	struct resource *res;
+
+	dbm = devm_kzalloc(&pdev->dev, sizeof(*dbm), GFP_KERNEL);
+	if (!dbm)
+		return -ENOMEM;
+
+	match = of_match_node(msm_dbm_id_table, node);
+	if (!match) {
+		dev_err(&pdev->dev, "Unsupported DBM module\n");
+		return -ENODEV;
+	}
+	dbm->reg_table = match->data;
+
+	if (!strcmp(match->compatible, "qcom,usb-dbm-1p4")) {
+		dbm->dbm_num_eps = DBM_1_4_NUM_EP;
+		dbm->is_1p4 = true;
+	} else {
+		dbm->dbm_num_eps = DBM_1_5_NUM_EP;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "missing memory base resource\n");
+		return -ENODEV;
+	}
+
+	dbm->base = devm_ioremap_nocache(&pdev->dev, res->start,
+		resource_size(res));
+	if (!dbm->base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		return -ENOMEM;
+	}
+
+	dbm->dbm_reset_ep_after_lpm = of_property_read_bool(node,
+			"qcom,reset-ep-after-lpm-resume");
+
+	dbm->dev = &pdev->dev;
+
+	platform_set_drvdata(pdev, dbm);
+
+	list_add_tail(&dbm->head, &dbm_list);
+
+	return 0;
+}
+
+static struct platform_driver msm_dbm_driver = {
+	.probe		= msm_dbm_probe,
+	.driver = {
+		.name	= "msm-usb-dbm",
+		.of_match_table = of_match_ptr(msm_dbm_id_table),
+	},
+};
+
+module_platform_driver(msm_dbm_driver);
+
+static struct dbm *of_usb_find_dbm(struct device_node *node)
+{
+	struct dbm  *dbm;
+
+	list_for_each_entry(dbm, &dbm_list, head) {
+		if (node != dbm->dev->of_node)
+			continue;
+		return dbm;
+	}
+	return ERR_PTR(-ENODEV);
+}
+
+struct dbm *usb_get_dbm_by_phandle(struct device *dev, const char *phandle)
+{
+	struct device_node *node;
+
+	if (!dev->of_node) {
+		dev_dbg(dev, "device does not have a device node entry\n");
+		return ERR_PTR(-EINVAL);
+	}
+
+	node = of_parse_phandle(dev->of_node, phandle, 0);
+	if (!node) {
+		dev_dbg(dev, "failed to get %s phandle in %s node\n", phandle,
+			dev->of_node->full_name);
+		return ERR_PTR(-ENODEV);
+	}
+
+	return of_usb_find_dbm(node);
+}
+
+MODULE_DESCRIPTION("MSM USB DBM driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/dwc3/dbm.h b/drivers/usb/dwc3/dbm.h
new file mode 100644
index 0000000..260afc2
--- /dev/null
+++ b/drivers/usb/dwc3/dbm.h
@@ -0,0 +1,74 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DBM_H
+#define __DBM_H
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+/**
+ *  USB DBM  Hardware registers bitmask.
+ *
+ */
+/* DBM_EP_CFG */
+#define DBM_EN_EP		0x00000001
+#define USB3_EPNUM		0x0000003E
+#define DBM_BAM_PIPE_NUM	0x000000C0
+#define DBM_PRODUCER		0x00000100
+#define DBM_DISABLE_WB		0x00000200
+#define DBM_INT_RAM_ACC		0x00000400
+
+/* DBM_DATA_FIFO_SIZE */
+#define DBM_DATA_FIFO_SIZE_MASK	0x0000ffff
+
+/* DBM_GEVNTSIZ */
+#define DBM_GEVNTSIZ_MASK	0x0000ffff
+
+/* DBM_DBG_CNFG */
+#define DBM_ENABLE_IOC_MASK	0x0000000f
+
+/* DBM_SOFT_RESET */
+#define DBM_SFT_RST_EP0		0x00000001
+#define DBM_SFT_RST_EP1		0x00000002
+#define DBM_SFT_RST_EP2		0x00000004
+#define DBM_SFT_RST_EP3		0x00000008
+#define DBM_SFT_RST_EPS_MASK	0x0000000F
+#define DBM_SFT_RST_MASK	0x80000000
+#define DBM_EN_MASK		0x00000002
+
+/* DBM TRB configurations */
+#define DBM_TRB_BIT		0x80000000
+#define DBM_TRB_DATA_SRC	0x40000000
+#define DBM_TRB_DMA		0x20000000
+#define DBM_TRB_EP_NUM(ep)	(ep<<24)
+
+struct dbm;
+
+struct dbm *usb_get_dbm_by_phandle(struct device *dev, const char *phandle);
+
+int dbm_soft_reset(struct dbm *dbm, bool enter_reset);
+int dbm_ep_config(struct dbm  *dbm, u8 usb_ep, u8 bam_pipe, bool producer,
+			bool disable_wb, bool internal_mem, bool ioc);
+int dbm_ep_unconfig(struct dbm *dbm, u8 usb_ep);
+int dbm_get_num_of_eps_configured(struct dbm *dbm);
+int dbm_event_buffer_config(struct dbm *dbm, u32 addr_lo, u32 addr_hi,
+				int size);
+int dbm_data_fifo_config(struct dbm *dbm, u8 dep_num, phys_addr_t addr,
+				u32 size, u8 dst_pipe_idx);
+void dbm_set_speed(struct dbm *dbm, bool speed);
+void dbm_enable(struct dbm *dbm);
+int dbm_ep_soft_reset(struct dbm *dbm, u8 usb_ep, bool enter_reset);
+bool dbm_reset_ep_after_lpm(struct dbm *dbm);
+bool dbm_l1_lpm_interrupt(struct dbm *dbm);
+
+#endif /* __DBM_H */
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index 33ab2a2..c289d27 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -313,11 +313,11 @@
 void dwc3_trace(void (*trace)(struct va_format *), const char *fmt, ...);
 
 #ifdef CONFIG_DEBUG_FS
-extern void dwc3_debugfs_init(struct dwc3 *);
+extern int dwc3_debugfs_init(struct dwc3 *);
 extern void dwc3_debugfs_exit(struct dwc3 *);
 #else
-static inline void dwc3_debugfs_init(struct dwc3 *d)
-{  }
+static inline int dwc3_debugfs_init(struct dwc3 *d)
+{ return 0; }
 static inline void dwc3_debugfs_exit(struct dwc3 *d)
 {  }
 #endif
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index 31926dd..4444888 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -843,23 +843,25 @@
 	}
 }
 
-void dwc3_debugfs_init(struct dwc3 *dwc)
+int dwc3_debugfs_init(struct dwc3 *dwc)
 {
 	struct dentry		*root;
 	struct dentry           *file;
+	int			ret;
 
 	root = debugfs_create_dir(dev_name(dwc->dev), NULL);
 	if (IS_ERR_OR_NULL(root)) {
 		if (!root)
 			dev_err(dwc->dev, "Can't create debugfs root\n");
-		return;
+		ret = -ENOMEM;
+		goto err0;
 	}
 	dwc->root = root;
 
 	dwc->regset = kzalloc(sizeof(*dwc->regset), GFP_KERNEL);
 	if (!dwc->regset) {
-		debugfs_remove_recursive(root);
-		return;
+		ret = -ENOMEM;
+		goto err1;
 	}
 
 	dwc->regset->regs = dwc3_regs;
@@ -891,6 +893,13 @@
 
 		dwc3_debugfs_create_endpoint_dirs(dwc, root);
 	}
+
+	return 0;
+
+err1:
+	debugfs_remove_recursive(root);
+err0:
+	return ret;
 }
 
 void dwc3_debugfs_exit(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
new file mode 100644
index 0000000..c9a4e45
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -0,0 +1,3451 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/cpu.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/list.h>
+#include <linux/uaccess.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_wakeup.h>
+#include <linux/power_supply.h>
+#include <linux/cdev.h>
+#include <linux/completion.h>
+#include <linux/msm-bus.h>
+#include <linux/irq.h>
+#include <linux/extcon.h>
+#include <linux/reset.h>
+
+#include "power.h"
+#include "core.h"
+#include "gadget.h"
+#include "dbm.h"
+#include "debug.h"
+#include "xhci.h"
+
+/* time out to wait for USB cable status notification (in ms)*/
+#define SM_INIT_TIMEOUT 30000
+
+/* AHB2PHY register offsets */
+#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
+
+/* AHB2PHY read/write waite value */
+#define ONE_READ_WRITE_WAIT 0x11
+
+/* cpu to fix usb interrupt */
+static int cpu_to_affin;
+module_param(cpu_to_affin, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(cpu_to_affin, "affin usb irq to this cpu");
+
+/* XHCI registers */
+#define USB3_HCSPARAMS1		(0x4)
+#define USB3_PORTSC		(0x420)
+
+/**
+ *  USB QSCRATCH Hardware registers
+ *
+ */
+#define QSCRATCH_REG_OFFSET	(0x000F8800)
+#define QSCRATCH_GENERAL_CFG	(QSCRATCH_REG_OFFSET + 0x08)
+#define CGCTL_REG		(QSCRATCH_REG_OFFSET + 0x28)
+#define PWR_EVNT_IRQ_STAT_REG    (QSCRATCH_REG_OFFSET + 0x58)
+#define PWR_EVNT_IRQ_MASK_REG    (QSCRATCH_REG_OFFSET + 0x5C)
+
+#define PWR_EVNT_POWERDOWN_IN_P3_MASK		BIT(2)
+#define PWR_EVNT_POWERDOWN_OUT_P3_MASK		BIT(3)
+#define PWR_EVNT_LPM_IN_L2_MASK			BIT(4)
+#define PWR_EVNT_LPM_OUT_L2_MASK		BIT(5)
+#define PWR_EVNT_LPM_OUT_L1_MASK		BIT(13)
+
+/* QSCRATCH_GENERAL_CFG register bit offset */
+#define PIPE_UTMI_CLK_SEL	BIT(0)
+#define PIPE3_PHYSTATUS_SW	BIT(3)
+#define PIPE_UTMI_CLK_DIS	BIT(8)
+
+#define HS_PHY_CTRL_REG		(QSCRATCH_REG_OFFSET + 0x10)
+#define UTMI_OTG_VBUS_VALID	BIT(20)
+#define SW_SESSVLD_SEL		BIT(28)
+
+#define SS_PHY_CTRL_REG		(QSCRATCH_REG_OFFSET + 0x30)
+#define LANE0_PWR_PRESENT	BIT(24)
+
+/* GSI related registers */
+#define GSI_TRB_ADDR_BIT_53_MASK	(1 << 21)
+#define GSI_TRB_ADDR_BIT_55_MASK	(1 << 23)
+
+#define	GSI_GENERAL_CFG_REG		(QSCRATCH_REG_OFFSET + 0xFC)
+#define	GSI_RESTART_DBL_PNTR_MASK	BIT(20)
+#define	GSI_CLK_EN_MASK			BIT(12)
+#define	BLOCK_GSI_WR_GO_MASK		BIT(1)
+#define	GSI_EN_MASK			BIT(0)
+
+#define GSI_DBL_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x110) + (n*4))
+#define GSI_DBL_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x120) + (n*4))
+#define GSI_RING_BASE_ADDR_L(n)	((QSCRATCH_REG_OFFSET + 0x130) + (n*4))
+#define GSI_RING_BASE_ADDR_H(n)	((QSCRATCH_REG_OFFSET + 0x140) + (n*4))
+
+#define	GSI_IF_STS	(QSCRATCH_REG_OFFSET + 0x1A4)
+#define	GSI_WR_CTRL_STATE_MASK	BIT(15)
+
+struct dwc3_msm_req_complete {
+	struct list_head list_item;
+	struct usb_request *req;
+	void (*orig_complete)(struct usb_ep *ep,
+			      struct usb_request *req);
+};
+
+enum dwc3_id_state {
+	DWC3_ID_GROUND = 0,
+	DWC3_ID_FLOAT,
+};
+
+/* for type c cable */
+enum plug_orientation {
+	ORIENTATION_NONE,
+	ORIENTATION_CC1,
+	ORIENTATION_CC2,
+};
+
+/* Input bits to state machine (mdwc->inputs) */
+
+#define ID			0
+#define B_SESS_VLD		1
+#define B_SUSPEND		2
+
+struct dwc3_msm {
+	struct device *dev;
+	void __iomem *base;
+	void __iomem *ahb2phy_base;
+	struct platform_device	*dwc3;
+	const struct usb_ep_ops *original_ep_ops[DWC3_ENDPOINTS_NUM];
+	struct list_head req_complete_list;
+	struct clk		*xo_clk;
+	struct clk		*core_clk;
+	long			core_clk_rate;
+	struct clk		*iface_clk;
+	struct clk		*sleep_clk;
+	struct clk		*utmi_clk;
+	unsigned int		utmi_clk_rate;
+	struct clk		*utmi_clk_src;
+	struct clk		*bus_aggr_clk;
+	struct clk		*cfg_ahb_clk;
+	struct reset_control	*core_reset;
+	struct regulator	*dwc3_gdsc;
+
+	struct usb_phy		*hs_phy, *ss_phy;
+
+	struct dbm		*dbm;
+
+	/* VBUS regulator for host mode */
+	struct regulator	*vbus_reg;
+	int			vbus_retry_count;
+	bool			resume_pending;
+	atomic_t                pm_suspended;
+	int			hs_phy_irq;
+	int			ss_phy_irq;
+	struct work_struct	resume_work;
+	struct work_struct	restart_usb_work;
+	bool			in_restart;
+	struct workqueue_struct *dwc3_wq;
+	struct delayed_work	sm_work;
+	unsigned long		inputs;
+	unsigned int		max_power;
+	bool			charging_disabled;
+	enum usb_otg_state	otg_state;
+	struct work_struct	bus_vote_w;
+	unsigned int		bus_vote;
+	u32			bus_perf_client;
+	struct msm_bus_scale_pdata	*bus_scale_table;
+	struct power_supply	*usb_psy;
+	bool			in_host_mode;
+	unsigned int		tx_fifo_size;
+	bool			vbus_active;
+	bool			suspend;
+	bool			disable_host_mode_pm;
+	enum dwc3_id_state	id_state;
+	unsigned long		lpm_flags;
+#define MDWC3_SS_PHY_SUSPEND		BIT(0)
+#define MDWC3_ASYNC_IRQ_WAKE_CAPABILITY	BIT(1)
+#define MDWC3_POWER_COLLAPSE		BIT(2)
+
+	unsigned int		irq_to_affin;
+	struct notifier_block	dwc3_cpu_notifier;
+
+	struct extcon_dev	*extcon_vbus;
+	struct extcon_dev	*extcon_id;
+	struct notifier_block	vbus_nb;
+	struct notifier_block	id_nb;
+
+	int			pwr_event_irq;
+	atomic_t                in_p3;
+	unsigned int		lpm_to_suspend_delay;
+	bool			init;
+	enum plug_orientation	typec_orientation;
+};
+
+#define USB_HSPHY_3P3_VOL_MIN		3050000 /* uV */
+#define USB_HSPHY_3P3_VOL_MAX		3300000 /* uV */
+#define USB_HSPHY_3P3_HPM_LOAD		16000	/* uA */
+
+#define USB_HSPHY_1P8_VOL_MIN		1800000 /* uV */
+#define USB_HSPHY_1P8_VOL_MAX		1800000 /* uV */
+#define USB_HSPHY_1P8_HPM_LOAD		19000	/* uA */
+
+#define USB_SSPHY_1P8_VOL_MIN		1800000 /* uV */
+#define USB_SSPHY_1P8_VOL_MAX		1800000 /* uV */
+#define USB_SSPHY_1P8_HPM_LOAD		23000	/* uA */
+
+#define DSTS_CONNECTSPD_SS		0x4
+
+
+static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc);
+static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA);
+
+/**
+ *
+ * Read register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ *
+ * @return u32
+ */
+static inline u32 dwc3_msm_read_reg(void *base, u32 offset)
+{
+	u32 val = ioread32(base + offset);
+	return val;
+}
+
+/**
+ * Read register masked field with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ *
+ * @return u32
+ */
+static inline u32 dwc3_msm_read_reg_field(void *base,
+					  u32 offset,
+					  const u32 mask)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 val = ioread32(base + offset);
+
+	val &= mask;		/* clear other bits */
+	val >>= shift;
+	return val;
+}
+
+/**
+ *
+ * Write register with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @val - value to write.
+ *
+ */
+static inline void dwc3_msm_write_reg(void *base, u32 offset, u32 val)
+{
+	iowrite32(val, base + offset);
+}
+
+/**
+ * Write register masked field with debug info.
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask.
+ * @val - value to write.
+ *
+ */
+static inline void dwc3_msm_write_reg_field(void *base, u32 offset,
+					    const u32 mask, u32 val)
+{
+	u32 shift = find_first_bit((void *)&mask, 32);
+	u32 tmp = ioread32(base + offset);
+
+	tmp &= ~mask;		/* clear written bits */
+	val = tmp | (val << shift);
+	iowrite32(val, base + offset);
+}
+
+/**
+ * Write register and read back masked value to confirm it is written
+ *
+ * @base - DWC3 base virtual address.
+ * @offset - register offset.
+ * @mask - register bitmask specifying what should be updated
+ * @val - value to write.
+ *
+ */
+static inline void dwc3_msm_write_readback(void *base, u32 offset,
+					    const u32 mask, u32 val)
+{
+	u32 write_val, tmp = ioread32(base + offset);
+
+	tmp &= ~mask;		/* retain other bits */
+	write_val = tmp | val;
+
+	iowrite32(write_val, base + offset);
+
+	/* Read back to see if val was written */
+	tmp = ioread32(base + offset);
+	tmp &= mask;		/* clear other bits */
+
+	if (tmp != val)
+		pr_err("%s: write: %x to QSCRATCH: %x FAILED\n",
+			__func__, val, offset);
+}
+
+static bool dwc3_msm_is_host_superspeed(struct dwc3_msm *mdwc)
+{
+	int i, num_ports;
+	u32 reg;
+
+	reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
+	num_ports = HCS_MAX_PORTS(reg);
+
+	for (i = 0; i < num_ports; i++) {
+		reg = dwc3_msm_read_reg(mdwc->base, USB3_PORTSC + i*0x10);
+		if ((reg & PORT_PE) && DEV_SUPERSPEED(reg))
+			return true;
+	}
+
+	return false;
+}
+
+static inline bool dwc3_msm_is_dev_superspeed(struct dwc3_msm *mdwc)
+{
+	u8 speed;
+
+	speed = dwc3_msm_read_reg(mdwc->base, DWC3_DSTS) & DWC3_DSTS_CONNECTSPD;
+	return !!(speed & DSTS_CONNECTSPD_SS);
+}
+
+static inline bool dwc3_msm_is_superspeed(struct dwc3_msm *mdwc)
+{
+	if (mdwc->in_host_mode)
+		return dwc3_msm_is_host_superspeed(mdwc);
+
+	return dwc3_msm_is_dev_superspeed(mdwc);
+}
+
+#if IS_ENABLED(CONFIG_USB_DWC3_GADGET) || IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)
+/**
+ * Configure the DBM with the BAM's data fifo.
+ * This function is called by the USB BAM Driver
+ * upon initialization.
+ *
+ * @ep - pointer to usb endpoint.
+ * @addr - address of data fifo.
+ * @size - size of data fifo.
+ *
+ */
+int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
+			 u32 size, u8 dst_pipe_idx)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	dev_dbg(mdwc->dev, "%s\n", __func__);
+
+	return	dbm_data_fifo_config(mdwc->dbm, dep->number, addr, size,
+						dst_pipe_idx);
+}
+
+
+/**
+* Cleanups for msm endpoint on request complete.
+*
+* Also call original request complete.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to usb_request instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+static void dwc3_msm_req_complete_func(struct usb_ep *ep,
+				       struct usb_request *request)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct dwc3_msm_req_complete *req_complete = NULL;
+
+	/* Find original request complete function and remove it from list */
+	list_for_each_entry(req_complete, &mdwc->req_complete_list, list_item) {
+		if (req_complete->req == request)
+			break;
+	}
+	if (!req_complete || req_complete->req != request) {
+		dev_err(dep->dwc->dev, "%s: could not find the request\n",
+					__func__);
+		return;
+	}
+	list_del(&req_complete->list_item);
+
+	/*
+	 * Release another one TRB to the pool since DBM queue took 2 TRBs
+	 * (normal and link), and the dwc3/gadget.c :: dwc3_gadget_giveback
+	 * released only one.
+	 */
+	dep->trb_dequeue++;
+
+	/* Unconfigure dbm ep */
+	dbm_ep_unconfig(mdwc->dbm, dep->number);
+
+	/*
+	 * If this is the last endpoint we unconfigured, than reset also
+	 * the event buffers; unless unconfiguring the ep due to lpm,
+	 * in which case the event buffer only gets reset during the
+	 * block reset.
+	 */
+	if (dbm_get_num_of_eps_configured(mdwc->dbm) == 0 &&
+		!dbm_reset_ep_after_lpm(mdwc->dbm))
+		dbm_event_buffer_config(mdwc->dbm, 0, 0, 0);
+
+	/*
+	 * Call original complete function, notice that dwc->lock is already
+	 * taken by the caller of this function (dwc3_gadget_giveback()).
+	 */
+	request->complete = req_complete->orig_complete;
+	if (request->complete)
+		request->complete(ep, request);
+
+	kfree(req_complete);
+}
+
+
+/**
+* Helper function
+*
+* Reset  DBM endpoint.
+*
+* @mdwc - pointer to dwc3_msm instance.
+* @dep - pointer to dwc3_ep instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+static int __dwc3_msm_dbm_ep_reset(struct dwc3_msm *mdwc, struct dwc3_ep *dep)
+{
+	int ret;
+
+	dev_dbg(mdwc->dev, "Resetting dbm endpoint %d\n", dep->number);
+
+	/* Reset the dbm endpoint */
+	ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, true);
+	if (ret) {
+		dev_err(mdwc->dev, "%s: failed to assert dbm ep reset\n",
+				__func__);
+		return ret;
+	}
+
+	/*
+	 * The necessary delay between asserting and deasserting the dbm ep
+	 * reset is based on the number of active endpoints. If there is more
+	 * than one endpoint, a 1 msec delay is required. Otherwise, a shorter
+	 * delay will suffice.
+	 */
+	if (dbm_get_num_of_eps_configured(mdwc->dbm) > 1)
+		usleep_range(1000, 1200);
+	else
+		udelay(10);
+	ret = dbm_ep_soft_reset(mdwc->dbm, dep->number, false);
+	if (ret) {
+		dev_err(mdwc->dev, "%s: failed to deassert dbm ep reset\n",
+				__func__);
+		return ret;
+	}
+
+	return 0;
+}
+
+/**
+* Reset the DBM endpoint which is linked to the given USB endpoint.
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+
+int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	return __dwc3_msm_dbm_ep_reset(mdwc, dep);
+}
+EXPORT_SYMBOL(msm_dwc3_reset_dbm_ep);
+
+
+/**
+* Helper function.
+* See the header of the dwc3_msm_ep_queue function.
+*
+* @dwc3_ep - pointer to dwc3_ep instance.
+* @req - pointer to dwc3_request instance.
+*
+* @return int - 0 on success, negative on error.
+*/
+static int __dwc3_msm_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
+{
+	struct dwc3_trb *trb;
+	struct dwc3_trb *trb_link;
+	struct dwc3_gadget_ep_cmd_params params;
+	u32 cmd;
+	int ret = 0;
+
+	/* We push the request to the dep->started_list list to indicate that
+	 * this request is issued with start transfer. The request will be out
+	 * from this list in 2 cases. The first is that the transfer will be
+	 * completed (not if the transfer is endless using a circular TRBs with
+	 * with link TRB). The second case is an option to do stop stransfer,
+	 * this can be initiated by the function driver when calling dequeue.
+	 */
+	req->started = true;
+	list_add_tail(&req->list, &dep->started_list);
+
+	/* First, prepare a normal TRB, point to the fake buffer */
+	trb = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
+	dep->trb_enqueue++;
+	memset(trb, 0, sizeof(*trb));
+
+	req->trb = trb;
+	trb->bph = DBM_TRB_BIT | DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
+	trb->size = DWC3_TRB_SIZE_LENGTH(req->request.length);
+	trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_HWO |
+		DWC3_TRB_CTRL_CHN | (req->direction ? 0 : DWC3_TRB_CTRL_CSP);
+	req->trb_dma = dwc3_trb_dma_offset(dep, trb);
+
+	/* Second, prepare a Link TRB that points to the first TRB*/
+	trb_link = &dep->trb_pool[dep->trb_enqueue & DWC3_TRB_NUM];
+	dep->trb_enqueue++;
+	memset(trb_link, 0, sizeof(*trb_link));
+
+	trb_link->bpl = lower_32_bits(req->trb_dma);
+	trb_link->bph = DBM_TRB_BIT |
+			DBM_TRB_DMA | DBM_TRB_EP_NUM(dep->number);
+	trb_link->size = 0;
+	trb_link->ctrl = DWC3_TRBCTL_LINK_TRB | DWC3_TRB_CTRL_HWO;
+
+	/*
+	 * Now start the transfer
+	 */
+	memset(&params, 0, sizeof(params));
+	params.param0 = 0; /* TDAddr High */
+	params.param1 = lower_32_bits(req->trb_dma); /* DAddr Low */
+
+	/* DBM requires IOC to be set */
+	cmd = DWC3_DEPCMD_STARTTRANSFER | DWC3_DEPCMD_CMDIOC;
+	ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+	if (ret < 0) {
+		dev_dbg(dep->dwc->dev,
+			"%s: failed to send STARTTRANSFER command\n",
+			__func__);
+
+		list_del(&req->list);
+		return ret;
+	}
+	dep->flags |= DWC3_EP_BUSY;
+	dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
+
+	return ret;
+}
+
+/**
+* Queue a usb request to the DBM endpoint.
+* This function should be called after the endpoint
+* was enabled by the ep_enable.
+*
+* This function prepares special structure of TRBs which
+* is familiar with the DBM HW, so it will possible to use
+* this endpoint in DBM mode.
+*
+* The TRBs prepared by this function, is one normal TRB
+* which point to a fake buffer, followed by a link TRB
+* that points to the first TRB.
+*
+* The API of this function follow the regular API of
+* usb_ep_queue (see usb_ep_ops in include/linuk/usb/gadget.h).
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to usb_request instance.
+* @gfp_flags - possible flags.
+*
+* @return int - 0 on success, negative on error.
+*/
+static int dwc3_msm_ep_queue(struct usb_ep *ep,
+			     struct usb_request *request, gfp_t gfp_flags)
+{
+	struct dwc3_request *req = to_dwc3_request(request);
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct dwc3_msm_req_complete *req_complete;
+	unsigned long flags;
+	int ret = 0, size;
+	u8 bam_pipe;
+	bool producer;
+	bool disable_wb;
+	bool internal_mem;
+	bool ioc;
+	bool superspeed;
+
+	if (!(request->udc_priv & MSM_SPS_MODE)) {
+		/* Not SPS mode, call original queue */
+		dev_vdbg(mdwc->dev, "%s: not sps mode, use regular queue\n",
+					__func__);
+
+		return (mdwc->original_ep_ops[dep->number])->queue(ep,
+								request,
+								gfp_flags);
+	}
+
+	/* HW restriction regarding TRB size (8KB) */
+	if (req->request.length < 0x2000) {
+		dev_err(mdwc->dev, "%s: Min TRB size is 8KB\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Override req->complete function, but before doing that,
+	 * store it's original pointer in the req_complete_list.
+	 */
+	req_complete = kzalloc(sizeof(*req_complete), gfp_flags);
+	if (!req_complete)
+		return -ENOMEM;
+
+	req_complete->req = request;
+	req_complete->orig_complete = request->complete;
+	list_add_tail(&req_complete->list_item, &mdwc->req_complete_list);
+	request->complete = dwc3_msm_req_complete_func;
+
+	/*
+	 * Configure the DBM endpoint
+	 */
+	bam_pipe = request->udc_priv & MSM_PIPE_ID_MASK;
+	producer = ((request->udc_priv & MSM_PRODUCER) ? true : false);
+	disable_wb = ((request->udc_priv & MSM_DISABLE_WB) ? true : false);
+	internal_mem = ((request->udc_priv & MSM_INTERNAL_MEM) ? true : false);
+	ioc = ((request->udc_priv & MSM_ETD_IOC) ? true : false);
+
+	ret = dbm_ep_config(mdwc->dbm, dep->number, bam_pipe, producer,
+				disable_wb, internal_mem, ioc);
+	if (ret < 0) {
+		dev_err(mdwc->dev,
+			"error %d after calling dbm_ep_config\n", ret);
+		return ret;
+	}
+
+	dev_vdbg(dwc->dev, "%s: queing request %p to ep %s length %d\n",
+			__func__, request, ep->name, request->length);
+	size = dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTSIZ(0));
+	dbm_event_buffer_config(mdwc->dbm,
+		dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRLO(0)),
+		dwc3_msm_read_reg(mdwc->base, DWC3_GEVNTADRHI(0)),
+		DWC3_GEVNTSIZ_SIZE(size));
+
+	/*
+	 * We must obtain the lock of the dwc3 core driver,
+	 * including disabling interrupts, so we will be sure
+	 * that we are the only ones that configure the HW device
+	 * core and ensure that we queuing the request will finish
+	 * as soon as possible so we will release back the lock.
+	 */
+	spin_lock_irqsave(&dwc->lock, flags);
+	if (!dep->endpoint.desc) {
+		dev_err(mdwc->dev,
+			"%s: trying to queue request %p to disabled ep %s\n",
+			__func__, request, ep->name);
+		ret = -EPERM;
+		goto err;
+	}
+
+	if (dep->number == 0 || dep->number == 1) {
+		dev_err(mdwc->dev,
+			"%s: trying to queue dbm request %p to control ep %s\n",
+			__func__, request, ep->name);
+		ret = -EPERM;
+		goto err;
+	}
+
+
+	if (dep->trb_dequeue != dep->trb_enqueue ||
+			!list_empty(&dep->pending_list)
+			|| !list_empty(&dep->started_list)) {
+		dev_err(mdwc->dev,
+			"%s: trying to queue dbm request %p tp ep %s\n",
+			__func__, request, ep->name);
+		ret = -EPERM;
+		goto err;
+	} else {
+		dep->trb_dequeue = 0;
+		dep->trb_enqueue = 0;
+	}
+
+	ret = __dwc3_msm_ep_queue(dep, req);
+	if (ret < 0) {
+		dev_err(mdwc->dev,
+			"error %d after calling __dwc3_msm_ep_queue\n", ret);
+		goto err;
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	superspeed = dwc3_msm_is_dev_superspeed(mdwc);
+	dbm_set_speed(mdwc->dbm, (u8)superspeed);
+
+	return 0;
+
+err:
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	kfree(req_complete);
+	return ret;
+}
+
+/*
+* Returns XferRscIndex for the EP. This is stored at StartXfer GSI EP OP
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+* @return int - XferRscIndex
+*/
+static inline int gsi_get_xfer_index(struct usb_ep *ep)
+{
+	struct dwc3_ep			*dep = to_dwc3_ep(ep);
+
+	return dep->resource_index;
+}
+
+/*
+* Fills up the GSI channel information needed in call to IPA driver
+* for GSI channel creation.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @ch_info - output parameter with requested channel info
+*/
+static void gsi_get_channel_info(struct usb_ep *ep,
+			struct gsi_channel_info *ch_info)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	int last_trb_index = 0;
+	struct dwc3	*dwc = dep->dwc;
+	struct usb_gsi_request *request = ch_info->ch_req;
+
+	/* Provide physical USB addresses for DEPCMD and GEVENTCNT registers */
+	ch_info->depcmd_low_addr = (u32)(dwc->reg_phys +
+						DWC3_DEPCMD);
+	ch_info->depcmd_hi_addr = 0;
+
+	ch_info->xfer_ring_base_addr = dwc3_trb_dma_offset(dep,
+							&dep->trb_pool[0]);
+	/* Convert to multipled of 1KB */
+	ch_info->const_buffer_size = request->buf_len/1024;
+
+	/* IN direction */
+	if (dep->direction) {
+		/*
+		 * Multiply by size of each TRB for xfer_ring_len in bytes.
+		 * 2n + 2 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
+		 * extra Xfer TRB followed by n ZLP TRBs + 1 LINK TRB.
+		 */
+		ch_info->xfer_ring_len = (2 * request->num_bufs + 2) * 0x10;
+		last_trb_index = 2 * request->num_bufs + 2;
+	} else { /* OUT direction */
+		/*
+		 * Multiply by size of each TRB for xfer_ring_len in bytes.
+		 * n + 1 TRBs as per GSI h/w requirement. n Xfer TRBs + 1
+		 * LINK TRB.
+		 */
+		ch_info->xfer_ring_len = (request->num_bufs + 1) * 0x10;
+		last_trb_index = request->num_bufs + 1;
+	}
+
+	/* Store last 16 bits of LINK TRB address as per GSI hw requirement */
+	ch_info->last_trb_addr = (dwc3_trb_dma_offset(dep,
+			&dep->trb_pool[last_trb_index - 1]) & 0x0000FFFF);
+	ch_info->gevntcount_low_addr = (u32)(dwc->reg_phys +
+			DWC3_GEVNTCOUNT(ep->ep_intr_num));
+	ch_info->gevntcount_hi_addr = 0;
+
+	dev_dbg(dwc->dev,
+	"depcmd_laddr=%x last_trb_addr=%x gevtcnt_laddr=%x gevtcnt_haddr=%x",
+		ch_info->depcmd_low_addr, ch_info->last_trb_addr,
+		ch_info->gevntcount_low_addr, ch_info->gevntcount_hi_addr);
+}
+
+/*
+* Perform StartXfer on GSI EP. Stores XferRscIndex.
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+* @return int - 0 on success
+*/
+static int gsi_startxfer_for_ep(struct usb_ep *ep)
+{
+	int ret;
+	struct dwc3_gadget_ep_cmd_params params;
+	u32				cmd;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3	*dwc = dep->dwc;
+
+	memset(&params, 0, sizeof(params));
+	params.param0 = GSI_TRB_ADDR_BIT_53_MASK | GSI_TRB_ADDR_BIT_55_MASK;
+	params.param0 |= (ep->ep_intr_num << 16);
+	params.param1 = lower_32_bits(dwc3_trb_dma_offset(dep,
+						&dep->trb_pool[0]));
+	cmd = DWC3_DEPCMD_STARTTRANSFER;
+	cmd |= DWC3_DEPCMD_PARAM(0);
+	ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+
+	if (ret < 0)
+		dev_dbg(dwc->dev, "Fail StrtXfr on GSI EP#%d\n", dep->number);
+	dep->resource_index = dwc3_gadget_ep_get_transfer_index(dep);
+	dev_dbg(dwc->dev, "XferRsc = %x", dep->resource_index);
+	return ret;
+}
+
+/*
+* Store Ring Base and Doorbell Address for GSI EP
+* for GSI channel creation.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @dbl_addr - Doorbell address obtained from IPA driver
+*/
+static void gsi_store_ringbase_dbl_info(struct usb_ep *ep, u32 dbl_addr)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3	*dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	int n = ep->ep_intr_num - 1;
+
+	dwc3_msm_write_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n),
+			dwc3_trb_dma_offset(dep, &dep->trb_pool[0]));
+	dwc3_msm_write_reg(mdwc->base, GSI_DBL_ADDR_L(n), dbl_addr);
+
+	dev_dbg(mdwc->dev, "Ring Base Addr %d = %x", n,
+			dwc3_msm_read_reg(mdwc->base, GSI_RING_BASE_ADDR_L(n)));
+	dev_dbg(mdwc->dev, "GSI DB Addr %d = %x", n,
+			dwc3_msm_read_reg(mdwc->base, GSI_DBL_ADDR_L(n)));
+}
+
+/*
+* Rings Doorbell for IN GSI Channel
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request. This is used to pass in the
+* address of the GSI doorbell obtained from IPA driver
+*/
+static void gsi_ring_in_db(struct usb_ep *ep, struct usb_gsi_request *request)
+{
+	void __iomem *gsi_dbl_address_lsb;
+	void __iomem *gsi_dbl_address_msb;
+	dma_addr_t offset;
+	u64 dbl_addr = *((u64 *)request->buf_base_addr);
+	u32 dbl_lo_addr = (dbl_addr & 0xFFFFFFFF);
+	u32 dbl_hi_addr = (dbl_addr >> 32);
+	u32 num_trbs = (request->num_bufs * 2 + 2);
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3	*dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	gsi_dbl_address_lsb = devm_ioremap_nocache(mdwc->dev,
+					dbl_lo_addr, sizeof(u32));
+	if (!gsi_dbl_address_lsb)
+		dev_dbg(mdwc->dev, "Failed to get GSI DBL address LSB\n");
+
+	gsi_dbl_address_msb = devm_ioremap_nocache(mdwc->dev,
+					dbl_hi_addr, sizeof(u32));
+	if (!gsi_dbl_address_msb)
+		dev_dbg(mdwc->dev, "Failed to get GSI DBL address MSB\n");
+
+	offset = dwc3_trb_dma_offset(dep, &dep->trb_pool[num_trbs-1]);
+	dev_dbg(mdwc->dev, "Writing link TRB addr: %pa to %p (%x)\n",
+	&offset, gsi_dbl_address_lsb, dbl_lo_addr);
+
+	writel_relaxed(offset, gsi_dbl_address_lsb);
+	writel_relaxed(0, gsi_dbl_address_msb);
+}
+
+/*
+* Sets HWO bit for TRBs and performs UpdateXfer for OUT EP.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request. Used to determine num of TRBs for OUT EP.
+*
+* @return int - 0 on success
+*/
+static int gsi_updatexfer_for_ep(struct usb_ep *ep,
+					struct usb_gsi_request *request)
+{
+	int i;
+	int ret;
+	u32				cmd;
+	int num_trbs = request->num_bufs + 1;
+	struct dwc3_trb *trb;
+	struct dwc3_gadget_ep_cmd_params params;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+
+	for (i = 0; i < num_trbs - 1; i++) {
+		trb = &dep->trb_pool[i];
+		trb->ctrl |= DWC3_TRB_CTRL_HWO;
+	}
+
+	memset(&params, 0, sizeof(params));
+	cmd = DWC3_DEPCMD_UPDATETRANSFER;
+	cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
+	ret = dwc3_send_gadget_ep_cmd(dep, cmd, &params);
+	dep->flags |= DWC3_EP_BUSY;
+	if (ret < 0)
+		dev_dbg(dwc->dev, "UpdateXfr fail on GSI EP#%d\n", dep->number);
+	return ret;
+}
+
+/*
+* Perform EndXfer on particular GSI EP.
+*
+* @usb_ep - pointer to usb_ep instance.
+*/
+static void gsi_endxfer_for_ep(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3	*dwc = dep->dwc;
+
+	dwc3_stop_active_transfer(dwc, dep->number, true);
+}
+
+/*
+* Allocates and configures TRBs for GSI EPs.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request.
+*
+* @return int - 0 on success
+*/
+static int gsi_prepare_trbs(struct usb_ep *ep, struct usb_gsi_request *req)
+{
+	int i = 0;
+	dma_addr_t buffer_addr = req->dma;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3		*dwc = dep->dwc;
+	struct dwc3_trb *trb;
+	int num_trbs = (dep->direction) ? (2 * (req->num_bufs) + 2)
+					: (req->num_bufs + 1);
+
+	dep->trb_dma_pool = dma_pool_create(ep->name, dwc->dev,
+					num_trbs * sizeof(struct dwc3_trb),
+					num_trbs * sizeof(struct dwc3_trb), 0);
+	if (!dep->trb_dma_pool) {
+		dev_err(dep->dwc->dev, "failed to alloc trb dma pool for %s\n",
+				dep->name);
+		return -ENOMEM;
+	}
+
+	dep->num_trbs = num_trbs;
+
+	dep->trb_pool = dma_pool_alloc(dep->trb_dma_pool,
+					   GFP_KERNEL, &dep->trb_pool_dma);
+	if (!dep->trb_pool) {
+		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
+				dep->name);
+		return -ENOMEM;
+	}
+
+	/* IN direction */
+	if (dep->direction) {
+		for (i = 0; i < num_trbs ; i++) {
+			trb = &dep->trb_pool[i];
+			memset(trb, 0, sizeof(*trb));
+			/* Set up first n+1 TRBs for ZLPs */
+			if (i < (req->num_bufs + 1)) {
+				trb->bpl = 0;
+				trb->bph = 0;
+				trb->size = 0;
+				trb->ctrl = DWC3_TRBCTL_NORMAL
+						| DWC3_TRB_CTRL_IOC;
+				continue;
+			}
+
+			/* Setup n TRBs pointing to valid buffers */
+			trb->bpl = lower_32_bits(buffer_addr);
+			trb->bph = 0;
+			trb->size = 0;
+			trb->ctrl = DWC3_TRBCTL_NORMAL
+					| DWC3_TRB_CTRL_IOC;
+			buffer_addr += req->buf_len;
+
+			/* Set up the Link TRB at the end */
+			if (i == (num_trbs - 1)) {
+				trb->bpl = dwc3_trb_dma_offset(dep,
+							&dep->trb_pool[0]);
+				trb->bph = (1 << 23) | (1 << 21)
+						| (ep->ep_intr_num << 16);
+				trb->size = 0;
+				trb->ctrl = DWC3_TRBCTL_LINK_TRB
+						| DWC3_TRB_CTRL_HWO;
+			}
+		}
+	} else { /* OUT direction */
+
+		for (i = 0; i < num_trbs ; i++) {
+
+			trb = &dep->trb_pool[i];
+			memset(trb, 0, sizeof(*trb));
+			trb->bpl = lower_32_bits(buffer_addr);
+			trb->bph = 0;
+			trb->size = req->buf_len;
+			trb->ctrl = DWC3_TRBCTL_NORMAL | DWC3_TRB_CTRL_IOC
+					| DWC3_TRB_CTRL_CSP
+					| DWC3_TRB_CTRL_ISP_IMI;
+			buffer_addr += req->buf_len;
+
+			/* Set up the Link TRB at the end */
+			if (i == (num_trbs - 1)) {
+				trb->bpl = dwc3_trb_dma_offset(dep,
+							&dep->trb_pool[0]);
+				trb->bph = (1 << 23) | (1 << 21)
+						| (ep->ep_intr_num << 16);
+				trb->size = 0;
+				trb->ctrl = DWC3_TRBCTL_LINK_TRB
+						| DWC3_TRB_CTRL_HWO;
+			}
+		}
+	}
+	return 0;
+}
+
+/*
+* Frees TRBs for GSI EPs.
+*
+* @usb_ep - pointer to usb_ep instance.
+*
+*/
+static void gsi_free_trbs(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+
+	if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
+		return;
+
+	/*  Free TRBs and TRB pool for EP */
+	if (dep->trb_dma_pool) {
+		dma_pool_free(dep->trb_dma_pool, dep->trb_pool,
+						dep->trb_pool_dma);
+		dma_pool_destroy(dep->trb_dma_pool);
+		dep->trb_pool = NULL;
+		dep->trb_pool_dma = 0;
+		dep->trb_dma_pool = NULL;
+	}
+}
+/*
+* Configures GSI EPs. For GSI EPs we need to set interrupter numbers.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request.
+*/
+static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3		*dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct dwc3_gadget_ep_cmd_params params;
+	const struct usb_endpoint_descriptor *desc = ep->desc;
+	const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
+	u32			reg;
+
+	memset(&params, 0x00, sizeof(params));
+
+	/* Configure GSI EP */
+	params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
+		| DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
+
+	/* Burst size is only needed in SuperSpeed mode */
+	if (dwc->gadget.speed == USB_SPEED_SUPER) {
+		u32 burst = dep->endpoint.maxburst - 1;
+
+		params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
+	}
+
+	if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
+		params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
+					| DWC3_DEPCFG_STREAM_EVENT_EN;
+		dep->stream_capable = true;
+	}
+
+	/* Set EP number */
+	params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
+
+	/* Set interrupter number for GSI endpoints */
+	params.param1 |= DWC3_DEPCFG_INT_NUM(ep->ep_intr_num);
+
+	/* Enable XferInProgress and XferComplete Interrupts */
+	params.param1 |= DWC3_DEPCFG_XFER_COMPLETE_EN;
+	params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
+	params.param1 |= DWC3_DEPCFG_FIFO_ERROR_EN;
+	/*
+	 * We must use the lower 16 TX FIFOs even though
+	 * HW might have more
+	 */
+	/* Remove FIFO Number for GSI EP*/
+	if (dep->direction)
+		params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
+
+	params.param0 |= DWC3_DEPCFG_ACTION_INIT;
+
+	dev_dbg(mdwc->dev, "Set EP config to params = %x %x %x, for %s\n",
+	params.param0, params.param1, params.param2, dep->name);
+
+	dwc3_send_gadget_ep_cmd(dep, DWC3_DEPCMD_SETEPCONFIG, &params);
+
+	/* Set XferRsc Index for GSI EP */
+	if (!(dep->flags & DWC3_EP_ENABLED)) {
+		memset(&params, 0x00, sizeof(params));
+		params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
+		dwc3_send_gadget_ep_cmd(dep,
+				DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
+
+		dep->endpoint.desc = desc;
+		dep->comp_desc = comp_desc;
+		dep->type = usb_endpoint_type(desc);
+		dep->flags |= DWC3_EP_ENABLED;
+		reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
+		reg |= DWC3_DALEPENA_EP(dep->number);
+		dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
+	}
+
+}
+
+/*
+* Enables USB wrapper for GSI
+*
+* @usb_ep - pointer to usb_ep instance.
+*/
+static void gsi_enable(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_GENERAL_CFG_REG, GSI_CLK_EN_MASK, 1);
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 1);
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_GENERAL_CFG_REG, GSI_RESTART_DBL_PNTR_MASK, 0);
+	dev_dbg(mdwc->dev, "%s: Enable GSI\n", __func__);
+	dwc3_msm_write_reg_field(mdwc->base,
+			GSI_GENERAL_CFG_REG, GSI_EN_MASK, 1);
+}
+
+/*
+* Block or allow doorbell towards GSI
+*
+* @usb_ep - pointer to usb_ep instance.
+* @request - pointer to GSI request. In this case num_bufs is used as a bool
+* to set or clear the doorbell bit
+*/
+static void gsi_set_clear_dbell(struct usb_ep *ep,
+					bool block_db)
+{
+
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	dwc3_msm_write_reg_field(mdwc->base,
+		GSI_GENERAL_CFG_REG, BLOCK_GSI_WR_GO_MASK, block_db);
+}
+
+/*
+* Performs necessary checks before stopping GSI channels
+*
+* @usb_ep - pointer to usb_ep instance to access DWC3 regs
+*/
+static bool gsi_check_ready_to_suspend(struct usb_ep *ep, bool f_suspend)
+{
+	u32	timeout = 1500;
+	u32	reg = 0;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	while (dwc3_msm_read_reg_field(mdwc->base,
+		GSI_IF_STS, GSI_WR_CTRL_STATE_MASK)) {
+		if (!timeout--) {
+			dev_err(mdwc->dev,
+			"Unable to suspend GSI ch. WR_CTRL_STATE != 0\n");
+			return false;
+		}
+	}
+	/* Check for U3 only if we are not handling Function Suspend */
+	if (!f_suspend) {
+		reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+		if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U3) {
+			dev_err(mdwc->dev, "Unable to suspend GSI ch\n");
+			return false;
+		}
+	}
+
+	return true;
+}
+
+
+/**
+* Performs GSI operations or GSI EP related operations.
+*
+* @usb_ep - pointer to usb_ep instance.
+* @op_data - pointer to opcode related data.
+* @op - GSI related or GSI EP related op code.
+*
+* @return int - 0 on success, negative on error.
+* Also returns XferRscIdx for GSI_EP_OP_GET_XFER_IDX.
+*/
+static int dwc3_msm_gsi_ep_op(struct usb_ep *ep,
+		void *op_data, enum gsi_ep_op op)
+{
+	u32 ret = 0;
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct usb_gsi_request *request;
+	struct gsi_channel_info *ch_info;
+	bool block_db, f_suspend;
+
+	switch (op) {
+	case GSI_EP_OP_PREPARE_TRBS:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "EP_OP_PREPARE_TRBS for %s\n", ep->name);
+		ret = gsi_prepare_trbs(ep, request);
+		break;
+	case GSI_EP_OP_FREE_TRBS:
+		dev_dbg(mdwc->dev, "EP_OP_FREE_TRBS for %s\n", ep->name);
+		gsi_free_trbs(ep);
+		break;
+	case GSI_EP_OP_CONFIG:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "EP_OP_CONFIG for %s\n", ep->name);
+		gsi_configure_ep(ep, request);
+		break;
+	case GSI_EP_OP_STARTXFER:
+		dev_dbg(mdwc->dev, "EP_OP_STARTXFER for %s\n", ep->name);
+		ret = gsi_startxfer_for_ep(ep);
+		break;
+	case GSI_EP_OP_GET_XFER_IDX:
+		dev_dbg(mdwc->dev, "EP_OP_GET_XFER_IDX for %s\n", ep->name);
+		ret = gsi_get_xfer_index(ep);
+		break;
+	case GSI_EP_OP_STORE_DBL_INFO:
+		dev_dbg(mdwc->dev, "EP_OP_STORE_DBL_INFO\n");
+		gsi_store_ringbase_dbl_info(ep, *((u32 *)op_data));
+		break;
+	case GSI_EP_OP_ENABLE_GSI:
+		dev_dbg(mdwc->dev, "EP_OP_ENABLE_GSI\n");
+		gsi_enable(ep);
+		break;
+	case GSI_EP_OP_GET_CH_INFO:
+		ch_info = (struct gsi_channel_info *)op_data;
+		gsi_get_channel_info(ep, ch_info);
+		break;
+	case GSI_EP_OP_RING_IN_DB:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "RING IN EP DB\n");
+		gsi_ring_in_db(ep, request);
+		break;
+	case GSI_EP_OP_UPDATEXFER:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "EP_OP_UPDATEXFER\n");
+		ret = gsi_updatexfer_for_ep(ep, request);
+		break;
+	case GSI_EP_OP_ENDXFER:
+		request = (struct usb_gsi_request *)op_data;
+		dev_dbg(mdwc->dev, "EP_OP_ENDXFER for %s\n", ep->name);
+		gsi_endxfer_for_ep(ep);
+		break;
+	case GSI_EP_OP_SET_CLR_BLOCK_DBL:
+		block_db = *((bool *)op_data);
+		dev_dbg(mdwc->dev, "EP_OP_SET_CLR_BLOCK_DBL %d\n",
+						block_db);
+		gsi_set_clear_dbell(ep, block_db);
+		break;
+	case GSI_EP_OP_CHECK_FOR_SUSPEND:
+		dev_dbg(mdwc->dev, "EP_OP_CHECK_FOR_SUSPEND\n");
+		f_suspend = *((bool *)op_data);
+		ret = gsi_check_ready_to_suspend(ep, f_suspend);
+		break;
+	case GSI_EP_OP_DISABLE:
+		dev_dbg(mdwc->dev, "EP_OP_DISABLE\n");
+		ret = ep->ops->disable(ep);
+		break;
+	default:
+		dev_err(mdwc->dev, "%s: Invalid opcode GSI EP\n", __func__);
+	}
+
+	return ret;
+}
+
+/**
+ * Configure MSM endpoint.
+ * This function do specific configurations
+ * to an endpoint which need specific implementaion
+ * in the MSM architecture.
+ *
+ * This function should be called by usb function/class
+ * layer which need a support from the specific MSM HW
+ * which wrap the USB3 core. (like GSI or DBM specific endpoints)
+ *
+ * @ep - a pointer to some usb_ep instance
+ *
+ * @return int - 0 on success, negetive on error.
+ */
+int msm_ep_config(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct usb_ep_ops *new_ep_ops;
+
+
+	/* Save original ep ops for future restore*/
+	if (mdwc->original_ep_ops[dep->number]) {
+		dev_err(mdwc->dev,
+			"ep [%s,%d] already configured as msm endpoint\n",
+			ep->name, dep->number);
+		return -EPERM;
+	}
+	mdwc->original_ep_ops[dep->number] = ep->ops;
+
+	/* Set new usb ops as we like */
+	new_ep_ops = kzalloc(sizeof(struct usb_ep_ops), GFP_ATOMIC);
+	if (!new_ep_ops)
+		return -ENOMEM;
+
+	(*new_ep_ops) = (*ep->ops);
+	new_ep_ops->queue = dwc3_msm_ep_queue;
+	new_ep_ops->gsi_ep_op = dwc3_msm_gsi_ep_op;
+	ep->ops = new_ep_ops;
+
+	/*
+	 * Do HERE more usb endpoint configurations
+	 * which are specific to MSM.
+	 */
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_ep_config);
+
+/**
+ * Un-configure MSM endpoint.
+ * Tear down configurations done in the
+ * dwc3_msm_ep_config function.
+ *
+ * @ep - a pointer to some usb_ep instance
+ *
+ * @return int - 0 on success, negative on error.
+ */
+int msm_ep_unconfig(struct usb_ep *ep)
+{
+	struct dwc3_ep *dep = to_dwc3_ep(ep);
+	struct dwc3 *dwc = dep->dwc;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	struct usb_ep_ops *old_ep_ops;
+
+	/* Restore original ep ops */
+	if (!mdwc->original_ep_ops[dep->number]) {
+		dev_err(mdwc->dev,
+			"ep [%s,%d] was not configured as msm endpoint\n",
+			ep->name, dep->number);
+		return -EINVAL;
+	}
+	old_ep_ops = (struct usb_ep_ops	*)ep->ops;
+	ep->ops = mdwc->original_ep_ops[dep->number];
+	mdwc->original_ep_ops[dep->number] = NULL;
+	kfree(old_ep_ops);
+
+	/*
+	 * Do HERE more usb endpoint un-configurations
+	 * which are specific to MSM.
+	 */
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_ep_unconfig);
+#endif /* (CONFIG_USB_DWC3_GADGET) || (CONFIG_USB_DWC3_DUAL_ROLE) */
+
+static void dwc3_resume_work(struct work_struct *w);
+
+static void dwc3_restart_usb_work(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm,
+						restart_usb_work);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	unsigned int timeout = 50;
+
+	dev_dbg(mdwc->dev, "%s\n", __func__);
+
+	if (atomic_read(&dwc->in_lpm) || !dwc->is_drd) {
+		dev_dbg(mdwc->dev, "%s failed!!!\n", __func__);
+		return;
+	}
+
+	/* guard against concurrent VBUS handling */
+	mdwc->in_restart = true;
+
+	if (!mdwc->vbus_active) {
+		dev_dbg(mdwc->dev, "%s bailing out in disconnect\n", __func__);
+		dwc->err_evt_seen = false;
+		mdwc->in_restart = false;
+		return;
+	}
+
+	/* Reset active USB connection */
+	dwc3_resume_work(&mdwc->resume_work);
+
+	/* Make sure disconnect is processed before sending connect */
+	while (--timeout && !pm_runtime_suspended(mdwc->dev))
+		msleep(20);
+
+	if (!timeout) {
+		dev_dbg(mdwc->dev,
+			"Not in LPM after disconnect, forcing suspend...\n");
+		pm_runtime_suspend(mdwc->dev);
+	}
+
+	/* Force reconnect only if cable is still connected */
+	if (mdwc->vbus_active) {
+		mdwc->in_restart = false;
+		dwc3_resume_work(&mdwc->resume_work);
+	}
+
+	dwc->err_evt_seen = false;
+	flush_delayed_work(&mdwc->sm_work);
+}
+
+/*
+ * Check whether the DWC3 requires resetting the ep
+ * after going to Low Power Mode (lpm)
+ */
+bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
+{
+	struct dwc3 *dwc = container_of(gadget, struct dwc3, gadget);
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+
+	return dbm_reset_ep_after_lpm(mdwc->dbm);
+}
+EXPORT_SYMBOL(msm_dwc3_reset_ep_after_lpm);
+
+/*
+ * Config Global Distributed Switch Controller (GDSC)
+ * to support controller power collapse
+ */
+static int dwc3_msm_config_gdsc(struct dwc3_msm *mdwc, int on)
+{
+	int ret;
+
+	if (IS_ERR_OR_NULL(mdwc->dwc3_gdsc))
+		return -EPERM;
+
+	if (on) {
+		ret = regulator_enable(mdwc->dwc3_gdsc);
+		if (ret) {
+			dev_err(mdwc->dev, "unable to enable usb3 gdsc\n");
+			return ret;
+		}
+	} else {
+		ret = regulator_disable(mdwc->dwc3_gdsc);
+		if (ret) {
+			dev_err(mdwc->dev, "unable to disable usb3 gdsc\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static int dwc3_msm_link_clk_reset(struct dwc3_msm *mdwc, bool assert)
+{
+	int ret = 0;
+
+	if (assert) {
+		disable_irq(mdwc->pwr_event_irq);
+		/* Using asynchronous block reset to the hardware */
+		dev_dbg(mdwc->dev, "block_reset ASSERT\n");
+		clk_disable_unprepare(mdwc->utmi_clk);
+		clk_disable_unprepare(mdwc->sleep_clk);
+		clk_disable_unprepare(mdwc->core_clk);
+		clk_disable_unprepare(mdwc->iface_clk);
+		ret = reset_control_assert(mdwc->core_reset);
+		if (ret)
+			dev_err(mdwc->dev, "dwc3 core_reset assert failed\n");
+	} else {
+		dev_dbg(mdwc->dev, "block_reset DEASSERT\n");
+		ret = reset_control_deassert(mdwc->core_reset);
+		if (ret)
+			dev_err(mdwc->dev, "dwc3 core_reset deassert failed\n");
+		ndelay(200);
+		clk_prepare_enable(mdwc->iface_clk);
+		clk_prepare_enable(mdwc->core_clk);
+		clk_prepare_enable(mdwc->sleep_clk);
+		clk_prepare_enable(mdwc->utmi_clk);
+		enable_irq(mdwc->pwr_event_irq);
+	}
+
+	return ret;
+}
+
+static void dwc3_msm_update_ref_clk(struct dwc3_msm *mdwc)
+{
+	u32 guctl, gfladj = 0;
+
+	guctl = dwc3_msm_read_reg(mdwc->base, DWC3_GUCTL);
+	guctl &= ~DWC3_GUCTL_REFCLKPER;
+
+	/* GFLADJ register is used starting with revision 2.50a */
+	if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) >= DWC3_REVISION_250A) {
+		gfladj = dwc3_msm_read_reg(mdwc->base, DWC3_GFLADJ);
+		gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
+		gfladj &= ~DWC3_GFLADJ_REFCLK_240MHZ_DECR;
+		gfladj &= ~DWC3_GFLADJ_REFCLK_LPM_SEL;
+		gfladj &= ~DWC3_GFLADJ_REFCLK_FLADJ;
+	}
+
+	/* Refer to SNPS Databook Table 6-55 for calculations used */
+	switch (mdwc->utmi_clk_rate) {
+	case 19200000:
+		guctl |= 52 << __ffs(DWC3_GUCTL_REFCLKPER);
+		gfladj |= 12 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
+		gfladj |= DWC3_GFLADJ_REFCLK_240MHZDECR_PLS1;
+		gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+		gfladj |= 200 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
+		break;
+	case 24000000:
+		guctl |= 41 << __ffs(DWC3_GUCTL_REFCLKPER);
+		gfladj |= 10 << __ffs(DWC3_GFLADJ_REFCLK_240MHZ_DECR);
+		gfladj |= DWC3_GFLADJ_REFCLK_LPM_SEL;
+		gfladj |= 2032 << __ffs(DWC3_GFLADJ_REFCLK_FLADJ);
+		break;
+	default:
+		dev_warn(mdwc->dev, "Unsupported utmi_clk_rate: %u\n",
+				mdwc->utmi_clk_rate);
+		break;
+	}
+
+	dwc3_msm_write_reg(mdwc->base, DWC3_GUCTL, guctl);
+	if (gfladj)
+		dwc3_msm_write_reg(mdwc->base, DWC3_GFLADJ, gfladj);
+}
+
+/* Initialize QSCRATCH registers for HSPHY and SSPHY operation */
+static void dwc3_msm_qscratch_reg_init(struct dwc3_msm *mdwc)
+{
+	if (dwc3_msm_read_reg(mdwc->base, DWC3_GSNPSID) < DWC3_REVISION_250A)
+		/* On older cores set XHCI_REV bit to specify revision 1.0 */
+		dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
+					 BIT(2), 1);
+
+	/*
+	 * Enable master clock for RAMs to allow BAM to access RAMs when
+	 * RAM clock gating is enabled via DWC3's GCTL. Otherwise issues
+	 * are seen where RAM clocks get turned OFF in SS mode
+	 */
+	dwc3_msm_write_reg(mdwc->base, CGCTL_REG,
+		dwc3_msm_read_reg(mdwc->base, CGCTL_REG) | 0x18);
+
+}
+
+static void dwc3_msm_notify_event(struct dwc3 *dwc, unsigned int event)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dwc->dev->parent);
+	u32 reg;
+
+	if (dwc->revision < DWC3_REVISION_230A)
+		return;
+
+	switch (event) {
+	case DWC3_CONTROLLER_ERROR_EVENT:
+		dev_info(mdwc->dev,
+			"DWC3_CONTROLLER_ERROR_EVENT received, irq cnt %lu\n",
+			dwc->irq_cnt);
+
+		dwc3_gadget_disable_irq(dwc);
+
+		/* prevent core from generating interrupts until recovery */
+		reg = dwc3_msm_read_reg(mdwc->base, DWC3_GCTL);
+		reg |= DWC3_GCTL_CORESOFTRESET;
+		dwc3_msm_write_reg(mdwc->base, DWC3_GCTL, reg);
+
+		/* restart USB which performs full reset and reconnect */
+		schedule_work(&mdwc->restart_usb_work);
+		break;
+	case DWC3_CONTROLLER_RESET_EVENT:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESET_EVENT received\n");
+		/* HS & SSPHYs get reset as part of core soft reset */
+		dwc3_msm_qscratch_reg_init(mdwc);
+		break;
+	case DWC3_CONTROLLER_POST_RESET_EVENT:
+		dev_dbg(mdwc->dev,
+				"DWC3_CONTROLLER_POST_RESET_EVENT received\n");
+
+		/*
+		 * Below sequence is used when controller is working without
+		 * having ssphy and only USB high speed is supported.
+		 */
+		if (dwc->maximum_speed == USB_SPEED_HIGH) {
+			dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
+				dwc3_msm_read_reg(mdwc->base,
+				QSCRATCH_GENERAL_CFG)
+				| PIPE_UTMI_CLK_DIS);
+
+			usleep_range(2, 5);
+
+
+			dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
+				dwc3_msm_read_reg(mdwc->base,
+				QSCRATCH_GENERAL_CFG)
+				| PIPE_UTMI_CLK_SEL
+				| PIPE3_PHYSTATUS_SW);
+
+			usleep_range(2, 5);
+
+			dwc3_msm_write_reg(mdwc->base, QSCRATCH_GENERAL_CFG,
+				dwc3_msm_read_reg(mdwc->base,
+				QSCRATCH_GENERAL_CFG)
+				& ~PIPE_UTMI_CLK_DIS);
+		}
+
+		dwc3_msm_update_ref_clk(mdwc);
+		dwc->tx_fifo_size = mdwc->tx_fifo_size;
+		break;
+	case DWC3_CONTROLLER_CONNDONE_EVENT:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_CONNDONE_EVENT received\n");
+		/*
+		 * Add power event if the dbm indicates coming out of L1 by
+		 * interrupt
+		 */
+		if (mdwc->dbm && dbm_l1_lpm_interrupt(mdwc->dbm))
+			dwc3_msm_write_reg_field(mdwc->base,
+					PWR_EVNT_IRQ_MASK_REG,
+					PWR_EVNT_LPM_OUT_L1_MASK, 1);
+
+		atomic_set(&dwc->in_lpm, 0);
+		break;
+	case DWC3_CONTROLLER_NOTIFY_OTG_EVENT:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_NOTIFY_OTG_EVENT received\n");
+		if (dwc->enable_bus_suspend) {
+			mdwc->suspend = dwc->b_suspend;
+			queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+		}
+		break;
+	case DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT received\n");
+		dwc3_msm_gadget_vbus_draw(mdwc, dwc->vbus_draw);
+		break;
+	case DWC3_CONTROLLER_RESTART_USB_SESSION:
+		dev_dbg(mdwc->dev, "DWC3_CONTROLLER_RESTART_USB_SESSION received\n");
+		dwc3_restart_usb_work(&mdwc->restart_usb_work);
+		break;
+	default:
+		dev_dbg(mdwc->dev, "unknown dwc3 event\n");
+		break;
+	}
+}
+
+static void dwc3_msm_block_reset(struct dwc3_msm *mdwc, bool core_reset)
+{
+	int ret  = 0;
+
+	if (core_reset) {
+		ret = dwc3_msm_link_clk_reset(mdwc, 1);
+		if (ret)
+			return;
+
+		usleep_range(1000, 1200);
+		ret = dwc3_msm_link_clk_reset(mdwc, 0);
+		if (ret)
+			return;
+
+		usleep_range(10000, 12000);
+	}
+
+	if (mdwc->dbm) {
+		/* Reset the DBM */
+		dbm_soft_reset(mdwc->dbm, 1);
+		usleep_range(1000, 1200);
+		dbm_soft_reset(mdwc->dbm, 0);
+
+		/*enable DBM*/
+		dwc3_msm_write_reg_field(mdwc->base, QSCRATCH_GENERAL_CFG,
+			DBM_EN_MASK, 0x1);
+		dbm_enable(mdwc->dbm);
+	}
+}
+
+static void dwc3_msm_power_collapse_por(struct dwc3_msm *mdwc)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	u32 val;
+
+	/* Configure AHB2PHY for one wait state read/write */
+	if (mdwc->ahb2phy_base) {
+		clk_prepare_enable(mdwc->cfg_ahb_clk);
+		val = readl_relaxed(mdwc->ahb2phy_base +
+				PERIPH_SS_AHB2PHY_TOP_CFG);
+		if (val != ONE_READ_WRITE_WAIT) {
+			writel_relaxed(ONE_READ_WRITE_WAIT,
+				mdwc->ahb2phy_base + PERIPH_SS_AHB2PHY_TOP_CFG);
+			/* complete above write before configuring USB PHY. */
+			mb();
+		}
+		clk_disable_unprepare(mdwc->cfg_ahb_clk);
+	}
+
+	if (!mdwc->init) {
+		dwc3_core_pre_init(dwc);
+		mdwc->init = true;
+	}
+
+	dwc3_core_init(dwc);
+	/* Re-configure event buffers */
+	dwc3_event_buffers_setup(dwc);
+}
+
+static int dwc3_msm_prepare_suspend(struct dwc3_msm *mdwc)
+{
+	unsigned long timeout;
+	u32 reg = 0;
+
+	if ((mdwc->in_host_mode || mdwc->vbus_active)
+			&& dwc3_msm_is_superspeed(mdwc)) {
+		if (!atomic_read(&mdwc->in_p3)) {
+			dev_err(mdwc->dev, "Not in P3,aborting LPM sequence\n");
+			return -EBUSY;
+		}
+	}
+
+	/* Clear previous L2 events */
+	dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
+		PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK);
+
+	/* Prepare HSPHY for suspend */
+	reg = dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0));
+	dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
+		reg | DWC3_GUSB2PHYCFG_ENBLSLPM | DWC3_GUSB2PHYCFG_SUSPHY);
+
+	/* Wait for PHY to go into L2 */
+	timeout = jiffies + msecs_to_jiffies(5);
+	while (!time_after(jiffies, timeout)) {
+		reg = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
+		if (reg & PWR_EVNT_LPM_IN_L2_MASK)
+			break;
+	}
+	if (!(reg & PWR_EVNT_LPM_IN_L2_MASK))
+		dev_err(mdwc->dev, "could not transition HS PHY to L2\n");
+
+	/* Clear L2 event bit */
+	dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG,
+		PWR_EVNT_LPM_IN_L2_MASK);
+
+	return 0;
+}
+
+static void dwc3_msm_bus_vote_w(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, bus_vote_w);
+	int ret;
+
+	ret = msm_bus_scale_client_update_request(mdwc->bus_perf_client,
+			mdwc->bus_vote);
+	if (ret)
+		dev_err(mdwc->dev, "Failed to reset bus bw vote %d\n", ret);
+}
+
+static void dwc3_set_phy_speed_flags(struct dwc3_msm *mdwc)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	int i, num_ports;
+	u32 reg;
+
+	mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
+	if (mdwc->in_host_mode) {
+		reg = dwc3_msm_read_reg(mdwc->base, USB3_HCSPARAMS1);
+		num_ports = HCS_MAX_PORTS(reg);
+		for (i = 0; i < num_ports; i++) {
+			reg = dwc3_msm_read_reg(mdwc->base,
+					USB3_PORTSC + i*0x10);
+			if (reg & PORT_PE) {
+				if (DEV_HIGHSPEED(reg) || DEV_FULLSPEED(reg))
+					mdwc->hs_phy->flags |= PHY_HSFS_MODE;
+				else if (DEV_LOWSPEED(reg))
+					mdwc->hs_phy->flags |= PHY_LS_MODE;
+			}
+		}
+	} else {
+		if (dwc->gadget.speed == USB_SPEED_HIGH ||
+			dwc->gadget.speed == USB_SPEED_FULL)
+			mdwc->hs_phy->flags |= PHY_HSFS_MODE;
+		else if (dwc->gadget.speed == USB_SPEED_LOW)
+			mdwc->hs_phy->flags |= PHY_LS_MODE;
+	}
+}
+
+
+static int dwc3_msm_suspend(struct dwc3_msm *mdwc)
+{
+	int ret;
+	bool can_suspend_ssphy;
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	struct dwc3_event_buffer *evt;
+
+	if (atomic_read(&dwc->in_lpm)) {
+		dev_dbg(mdwc->dev, "%s: Already suspended\n", __func__);
+		return 0;
+	}
+
+	if (!mdwc->in_host_mode) {
+		evt = dwc->ev_buf;
+		if ((evt->flags & DWC3_EVENT_PENDING)) {
+			dev_dbg(mdwc->dev,
+				"%s: %d device events pending, abort suspend\n",
+				__func__, evt->count / 4);
+			return -EBUSY;
+		}
+	}
+
+	if (!mdwc->vbus_active && dwc->is_drd &&
+		mdwc->otg_state == OTG_STATE_B_PERIPHERAL) {
+		/*
+		 * In some cases, the pm_runtime_suspend may be called by
+		 * usb_bam when there is pending lpm flag. However, if this is
+		 * done when cable was disconnected and otg state has not
+		 * yet changed to IDLE, then it means OTG state machine
+		 * is running and we race against it. So cancel LPM for now,
+		 * and OTG state machine will go for LPM later, after completing
+		 * transition to IDLE state.
+		*/
+		dev_dbg(mdwc->dev,
+			"%s: cable disconnected while not in idle otg state\n",
+			__func__);
+		return -EBUSY;
+	}
+
+	/*
+	 * Check if device is not in CONFIGURED state
+	 * then check controller state of L2 and break
+	 * LPM sequence. Check this for device bus suspend case.
+	 */
+	if ((dwc->is_drd && mdwc->otg_state == OTG_STATE_B_SUSPEND) &&
+		(dwc->gadget.state != USB_STATE_CONFIGURED)) {
+		pr_err("%s(): Trying to go in LPM with state:%d\n",
+					__func__, dwc->gadget.state);
+		pr_err("%s(): LPM is not performed.\n", __func__);
+		return -EBUSY;
+	}
+
+	ret = dwc3_msm_prepare_suspend(mdwc);
+	if (ret)
+		return ret;
+
+	/* Initialize variables here */
+	can_suspend_ssphy = !(mdwc->in_host_mode &&
+				dwc3_msm_is_host_superspeed(mdwc));
+
+	/* Disable core irq */
+	if (dwc->irq)
+		disable_irq(dwc->irq);
+
+	/* disable power event irq, hs and ss phy irq is used as wake up src */
+	disable_irq(mdwc->pwr_event_irq);
+
+	dwc3_set_phy_speed_flags(mdwc);
+	/* Suspend HS PHY */
+	usb_phy_set_suspend(mdwc->hs_phy, 1);
+
+	/* Suspend SS PHY */
+	if (can_suspend_ssphy) {
+		/* indicate phy about SS mode */
+		if (dwc3_msm_is_superspeed(mdwc))
+			mdwc->ss_phy->flags |= DEVICE_IN_SS_MODE;
+		usb_phy_set_suspend(mdwc->ss_phy, 1);
+		mdwc->lpm_flags |= MDWC3_SS_PHY_SUSPEND;
+	}
+
+	/* make sure above writes are completed before turning off clocks */
+	wmb();
+
+	/* Disable clocks */
+	if (mdwc->bus_aggr_clk)
+		clk_disable_unprepare(mdwc->bus_aggr_clk);
+	clk_disable_unprepare(mdwc->utmi_clk);
+
+	clk_set_rate(mdwc->core_clk, 19200000);
+	clk_disable_unprepare(mdwc->core_clk);
+	/*
+	 * Disable iface_clk only after core_clk as core_clk has FSM
+	 * depedency on iface_clk. Hence iface_clk should be turned off
+	 * after core_clk is turned off.
+	 */
+	clk_disable_unprepare(mdwc->iface_clk);
+	/* USB PHY no more requires TCXO */
+	clk_disable_unprepare(mdwc->xo_clk);
+
+	/* Perform controller power collapse */
+	if (!mdwc->in_host_mode && !mdwc->vbus_active) {
+		mdwc->lpm_flags |= MDWC3_POWER_COLLAPSE;
+		dev_dbg(mdwc->dev, "%s: power collapse\n", __func__);
+		dwc3_msm_config_gdsc(mdwc, 0);
+		clk_disable_unprepare(mdwc->sleep_clk);
+	}
+
+	/* Remove bus voting */
+	if (mdwc->bus_perf_client) {
+		mdwc->bus_vote = 0;
+		schedule_work(&mdwc->bus_vote_w);
+	}
+
+	/*
+	 * release wakeup source with timeout to defer system suspend to
+	 * handle case where on USB cable disconnect, SUSPEND and DISCONNECT
+	 * event is received.
+	 */
+	if (mdwc->lpm_to_suspend_delay) {
+		dev_dbg(mdwc->dev, "defer suspend with %d(msecs)\n",
+					mdwc->lpm_to_suspend_delay);
+		pm_wakeup_event(mdwc->dev, mdwc->lpm_to_suspend_delay);
+	} else {
+		pm_relax(mdwc->dev);
+	}
+
+	atomic_set(&dwc->in_lpm, 1);
+
+	/*
+	 * with DCP or during cable disconnect, we dont require wakeup
+	 * using HS_PHY_IRQ or SS_PHY_IRQ. Hence enable wakeup only in
+	 * case of host bus suspend and device bus suspend.
+	 */
+	if (mdwc->vbus_active || mdwc->in_host_mode) {
+		enable_irq_wake(mdwc->hs_phy_irq);
+		enable_irq(mdwc->hs_phy_irq);
+		if (mdwc->ss_phy_irq) {
+			enable_irq_wake(mdwc->ss_phy_irq);
+			enable_irq(mdwc->ss_phy_irq);
+		}
+		/*
+		 * Enable power event irq during bus suspend in host mode for
+		 * mapping MPM pin for DP so that wakeup can happen in system
+		 * suspend.
+		 */
+		if (mdwc->in_host_mode) {
+			enable_irq(mdwc->pwr_event_irq);
+			enable_irq_wake(mdwc->pwr_event_irq);
+		}
+		mdwc->lpm_flags |= MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
+	}
+
+	dev_info(mdwc->dev, "DWC3 in low power mode\n");
+	return 0;
+}
+
+static int dwc3_msm_resume(struct dwc3_msm *mdwc)
+{
+	int ret;
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(mdwc->dev, "%s: exiting lpm\n", __func__);
+
+	if (!atomic_read(&dwc->in_lpm)) {
+		dev_dbg(mdwc->dev, "%s: Already resumed\n", __func__);
+		return 0;
+	}
+
+	pm_stay_awake(mdwc->dev);
+
+	/* Enable bus voting */
+	if (mdwc->bus_perf_client) {
+		mdwc->bus_vote = 1;
+		schedule_work(&mdwc->bus_vote_w);
+	}
+
+	/* Vote for TCXO while waking up USB HSPHY */
+	ret = clk_prepare_enable(mdwc->xo_clk);
+	if (ret)
+		dev_err(mdwc->dev, "%s failed to vote TCXO buffer%d\n",
+						__func__, ret);
+
+	/* Restore controller power collapse */
+	if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
+		dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
+		dwc3_msm_config_gdsc(mdwc, 1);
+		ret = reset_control_assert(mdwc->core_reset);
+		if (ret)
+			dev_err(mdwc->dev, "%s:core_reset assert failed\n",
+					__func__);
+		/* HW requires a short delay for reset to take place properly */
+		usleep_range(1000, 1200);
+		ret = reset_control_deassert(mdwc->core_reset);
+		if (ret)
+			dev_err(mdwc->dev, "%s:core_reset deassert failed\n",
+					__func__);
+		clk_prepare_enable(mdwc->sleep_clk);
+	}
+
+	/*
+	 * Enable clocks
+	 * Turned ON iface_clk before core_clk due to FSM depedency.
+	 */
+	clk_prepare_enable(mdwc->iface_clk);
+	clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
+	clk_prepare_enable(mdwc->core_clk);
+	clk_prepare_enable(mdwc->utmi_clk);
+	if (mdwc->bus_aggr_clk)
+		clk_prepare_enable(mdwc->bus_aggr_clk);
+
+	/* Resume SS PHY */
+	if (mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
+		mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
+		if (mdwc->typec_orientation == ORIENTATION_CC1)
+			mdwc->ss_phy->flags |= PHY_LANE_A;
+		if (mdwc->typec_orientation == ORIENTATION_CC2)
+			mdwc->ss_phy->flags |= PHY_LANE_B;
+		usb_phy_set_suspend(mdwc->ss_phy, 0);
+		mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
+		mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
+	}
+
+	mdwc->hs_phy->flags &= ~(PHY_HSFS_MODE | PHY_LS_MODE);
+	/* Resume HS PHY */
+	usb_phy_set_suspend(mdwc->hs_phy, 0);
+
+	/* Recover from controller power collapse */
+	if (mdwc->lpm_flags & MDWC3_POWER_COLLAPSE) {
+		u32 tmp;
+
+		dev_dbg(mdwc->dev, "%s: exit power collapse\n", __func__);
+
+		dwc3_msm_power_collapse_por(mdwc);
+
+		/* Get initial P3 status and enable IN_P3 event */
+		tmp = dwc3_msm_read_reg_field(mdwc->base,
+			DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+		atomic_set(&mdwc->in_p3, tmp == DWC3_LINK_STATE_U3);
+		dwc3_msm_write_reg_field(mdwc->base, PWR_EVNT_IRQ_MASK_REG,
+					PWR_EVNT_POWERDOWN_IN_P3_MASK, 1);
+
+		mdwc->lpm_flags &= ~MDWC3_POWER_COLLAPSE;
+	}
+
+	atomic_set(&dwc->in_lpm, 0);
+
+	/* Disable HSPHY auto suspend */
+	dwc3_msm_write_reg(mdwc->base, DWC3_GUSB2PHYCFG(0),
+		dwc3_msm_read_reg(mdwc->base, DWC3_GUSB2PHYCFG(0)) &
+				~(DWC3_GUSB2PHYCFG_ENBLSLPM |
+					DWC3_GUSB2PHYCFG_SUSPHY));
+
+	/* Disable wakeup capable for HS_PHY IRQ & SS_PHY_IRQ if enabled */
+	if (mdwc->lpm_flags & MDWC3_ASYNC_IRQ_WAKE_CAPABILITY) {
+		disable_irq_wake(mdwc->hs_phy_irq);
+		disable_irq_nosync(mdwc->hs_phy_irq);
+		if (mdwc->ss_phy_irq) {
+			disable_irq_wake(mdwc->ss_phy_irq);
+			disable_irq_nosync(mdwc->ss_phy_irq);
+		}
+		if (mdwc->in_host_mode) {
+			disable_irq_wake(mdwc->pwr_event_irq);
+			disable_irq(mdwc->pwr_event_irq);
+		}
+		mdwc->lpm_flags &= ~MDWC3_ASYNC_IRQ_WAKE_CAPABILITY;
+	}
+
+	dev_info(mdwc->dev, "DWC3 exited from low power mode\n");
+
+	/* enable power evt irq for IN P3 detection */
+	enable_irq(mdwc->pwr_event_irq);
+
+	/* Enable core irq */
+	if (dwc->irq)
+		enable_irq(dwc->irq);
+
+	/*
+	 * Handle other power events that could not have been handled during
+	 * Low Power Mode
+	 */
+	dwc3_pwr_event_handler(mdwc);
+
+	return 0;
+}
+
+/**
+ * dwc3_ext_event_notify - callback to handle events from external transceiver
+ *
+ * Returns 0 on success
+ */
+static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
+{
+	/* Flush processing any pending events before handling new ones */
+	flush_delayed_work(&mdwc->sm_work);
+
+	if (mdwc->id_state == DWC3_ID_FLOAT) {
+		dev_dbg(mdwc->dev, "XCVR: ID set\n");
+		set_bit(ID, &mdwc->inputs);
+	} else {
+		dev_dbg(mdwc->dev, "XCVR: ID clear\n");
+		clear_bit(ID, &mdwc->inputs);
+	}
+
+	if (mdwc->vbus_active && !mdwc->in_restart) {
+		dev_dbg(mdwc->dev, "XCVR: BSV set\n");
+		set_bit(B_SESS_VLD, &mdwc->inputs);
+	} else {
+		dev_dbg(mdwc->dev, "XCVR: BSV clear\n");
+		clear_bit(B_SESS_VLD, &mdwc->inputs);
+	}
+
+	if (mdwc->suspend) {
+		dev_dbg(mdwc->dev, "XCVR: SUSP set\n");
+		set_bit(B_SUSPEND, &mdwc->inputs);
+	} else {
+		dev_dbg(mdwc->dev, "XCVR: SUSP clear\n");
+		clear_bit(B_SUSPEND, &mdwc->inputs);
+	}
+
+	schedule_delayed_work(&mdwc->sm_work, 0);
+}
+
+static void dwc3_resume_work(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
+
+	dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
+
+	/*
+	 * exit LPM first to meet resume timeline from device side.
+	 * resume_pending flag would prevent calling
+	 * dwc3_msm_resume() in case we are here due to system
+	 * wide resume without usb cable connected. This flag is set
+	 * only in case of power event irq in lpm.
+	 */
+	if (mdwc->resume_pending) {
+		dwc3_msm_resume(mdwc);
+		mdwc->resume_pending = false;
+	}
+
+	if (atomic_read(&mdwc->pm_suspended))
+		/* let pm resume kick in resume work later */
+		return;
+	dwc3_ext_event_notify(mdwc);
+}
+
+static void dwc3_pwr_event_handler(struct dwc3_msm *mdwc)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	u32 irq_stat, irq_clear = 0;
+
+	irq_stat = dwc3_msm_read_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG);
+	dev_dbg(mdwc->dev, "%s irq_stat=%X\n", __func__, irq_stat);
+
+	/* Check for P3 events */
+	if ((irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) &&
+			(irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK)) {
+		/* Can't tell if entered or exit P3, so check LINKSTATE */
+		u32 ls = dwc3_msm_read_reg_field(mdwc->base,
+				DWC3_GDBGLTSSM, DWC3_GDBGLTSSM_LINKSTATE_MASK);
+		dev_dbg(mdwc->dev, "%s link state = 0x%04x\n", __func__, ls);
+		atomic_set(&mdwc->in_p3, ls == DWC3_LINK_STATE_U3);
+
+		irq_stat &= ~(PWR_EVNT_POWERDOWN_OUT_P3_MASK |
+				PWR_EVNT_POWERDOWN_IN_P3_MASK);
+		irq_clear |= (PWR_EVNT_POWERDOWN_OUT_P3_MASK |
+				PWR_EVNT_POWERDOWN_IN_P3_MASK);
+	} else if (irq_stat & PWR_EVNT_POWERDOWN_OUT_P3_MASK) {
+		atomic_set(&mdwc->in_p3, 0);
+		irq_stat &= ~PWR_EVNT_POWERDOWN_OUT_P3_MASK;
+		irq_clear |= PWR_EVNT_POWERDOWN_OUT_P3_MASK;
+	} else if (irq_stat & PWR_EVNT_POWERDOWN_IN_P3_MASK) {
+		atomic_set(&mdwc->in_p3, 1);
+		irq_stat &= ~PWR_EVNT_POWERDOWN_IN_P3_MASK;
+		irq_clear |= PWR_EVNT_POWERDOWN_IN_P3_MASK;
+	}
+
+	/* Clear L2 exit */
+	if (irq_stat & PWR_EVNT_LPM_OUT_L2_MASK) {
+		irq_stat &= ~PWR_EVNT_LPM_OUT_L2_MASK;
+		irq_stat |= PWR_EVNT_LPM_OUT_L2_MASK;
+	}
+
+	/* Handle exit from L1 events */
+	if (irq_stat & PWR_EVNT_LPM_OUT_L1_MASK) {
+		dev_dbg(mdwc->dev, "%s: handling PWR_EVNT_LPM_OUT_L1_MASK\n",
+				__func__);
+		if (usb_gadget_wakeup(&dwc->gadget))
+			dev_err(mdwc->dev, "%s failed to take dwc out of L1\n",
+					__func__);
+		irq_stat &= ~PWR_EVNT_LPM_OUT_L1_MASK;
+		irq_clear |= PWR_EVNT_LPM_OUT_L1_MASK;
+	}
+
+	/* Unhandled events */
+	if (irq_stat)
+		dev_dbg(mdwc->dev, "%s: unexpected PWR_EVNT, irq_stat=%X\n",
+			__func__, irq_stat);
+
+	dwc3_msm_write_reg(mdwc->base, PWR_EVNT_IRQ_STAT_REG, irq_clear);
+}
+
+static irqreturn_t msm_dwc3_pwr_irq_thread(int irq, void *_mdwc)
+{
+	struct dwc3_msm *mdwc = _mdwc;
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(mdwc->dev, "%s\n", __func__);
+
+	if (atomic_read(&dwc->in_lpm))
+		dwc3_resume_work(&mdwc->resume_work);
+	else
+		dwc3_pwr_event_handler(mdwc);
+
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t msm_dwc3_pwr_irq(int irq, void *data)
+{
+	struct dwc3_msm *mdwc = data;
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dwc->t_pwr_evt_irq = ktime_get();
+	dev_dbg(mdwc->dev, "%s received\n", __func__);
+	/*
+	 * When in Low Power Mode, can't read PWR_EVNT_IRQ_STAT_REG to acertain
+	 * which interrupts have been triggered, as the clocks are disabled.
+	 * Resume controller by waking up pwr event irq thread.After re-enabling
+	 * clocks, dwc3_msm_resume will call dwc3_pwr_event_handler to handle
+	 * all other power events.
+	 */
+	if (atomic_read(&dwc->in_lpm)) {
+		/* set this to call dwc3_msm_resume() */
+		mdwc->resume_pending = true;
+		return IRQ_WAKE_THREAD;
+	}
+
+	dwc3_pwr_event_handler(mdwc);
+	return IRQ_HANDLED;
+}
+
+static int dwc3_cpu_notifier_cb(struct notifier_block *nfb,
+		unsigned long action, void *hcpu)
+{
+	uint32_t cpu = (uintptr_t)hcpu;
+	struct dwc3_msm *mdwc =
+			container_of(nfb, struct dwc3_msm, dwc3_cpu_notifier);
+
+	if (cpu == cpu_to_affin && action == CPU_ONLINE) {
+		pr_debug("%s: cpu online:%u irq:%d\n", __func__,
+				cpu_to_affin, mdwc->irq_to_affin);
+		irq_set_affinity(mdwc->irq_to_affin, get_cpu_mask(cpu));
+	}
+
+	return NOTIFY_OK;
+}
+
+static void dwc3_otg_sm_work(struct work_struct *w);
+
+static int dwc3_msm_get_clk_gdsc(struct dwc3_msm *mdwc)
+{
+	int ret;
+
+	mdwc->dwc3_gdsc = devm_regulator_get(mdwc->dev, "USB3_GDSC");
+	if (IS_ERR(mdwc->dwc3_gdsc))
+		mdwc->dwc3_gdsc = NULL;
+
+	mdwc->xo_clk = devm_clk_get(mdwc->dev, "xo");
+	if (IS_ERR(mdwc->xo_clk)) {
+		dev_err(mdwc->dev, "%s unable to get TCXO buffer handle\n",
+								__func__);
+		ret = PTR_ERR(mdwc->xo_clk);
+		return ret;
+	}
+	clk_set_rate(mdwc->xo_clk, 19200000);
+
+	mdwc->iface_clk = devm_clk_get(mdwc->dev, "iface_clk");
+	if (IS_ERR(mdwc->iface_clk)) {
+		dev_err(mdwc->dev, "failed to get iface_clk\n");
+		ret = PTR_ERR(mdwc->iface_clk);
+		return ret;
+	}
+
+	/*
+	 * DWC3 Core requires its CORE CLK (aka master / bus clk) to
+	 * run at 125Mhz in SSUSB mode and >60MHZ for HSUSB mode.
+	 * On newer platform it can run at 150MHz as well.
+	 */
+	mdwc->core_clk = devm_clk_get(mdwc->dev, "core_clk");
+	if (IS_ERR(mdwc->core_clk)) {
+		dev_err(mdwc->dev, "failed to get core_clk\n");
+		ret = PTR_ERR(mdwc->core_clk);
+		return ret;
+	}
+
+	mdwc->core_reset = devm_reset_control_get(mdwc->dev, "core_reset");
+	if (IS_ERR(mdwc->core_reset)) {
+		dev_err(mdwc->dev, "failed to get core_reset\n");
+		return PTR_ERR(mdwc->core_reset);
+	}
+
+	/*
+	 * Get Max supported clk frequency for USB Core CLK and request
+	 * to set the same.
+	 */
+	mdwc->core_clk_rate = clk_round_rate(mdwc->core_clk, LONG_MAX);
+	if (IS_ERR_VALUE(mdwc->core_clk_rate)) {
+		dev_err(mdwc->dev, "fail to get core clk max freq.\n");
+	} else {
+		ret = clk_set_rate(mdwc->core_clk, mdwc->core_clk_rate);
+		if (ret)
+			dev_err(mdwc->dev, "fail to set core_clk freq:%d\n",
+									ret);
+	}
+
+	mdwc->sleep_clk = devm_clk_get(mdwc->dev, "sleep_clk");
+	if (IS_ERR(mdwc->sleep_clk)) {
+		dev_err(mdwc->dev, "failed to get sleep_clk\n");
+		ret = PTR_ERR(mdwc->sleep_clk);
+		return ret;
+	}
+
+	clk_set_rate(mdwc->sleep_clk, 32000);
+	mdwc->utmi_clk_rate = 19200000;
+	mdwc->utmi_clk = devm_clk_get(mdwc->dev, "utmi_clk");
+	if (IS_ERR(mdwc->utmi_clk)) {
+		dev_err(mdwc->dev, "failed to get utmi_clk\n");
+		ret = PTR_ERR(mdwc->utmi_clk);
+		return ret;
+	}
+
+	clk_set_rate(mdwc->utmi_clk, mdwc->utmi_clk_rate);
+	mdwc->bus_aggr_clk = devm_clk_get(mdwc->dev, "bus_aggr_clk");
+	if (IS_ERR(mdwc->bus_aggr_clk))
+		mdwc->bus_aggr_clk = NULL;
+
+	if (of_property_match_string(mdwc->dev->of_node,
+				"clock-names", "cfg_ahb_clk") >= 0) {
+		mdwc->cfg_ahb_clk = devm_clk_get(mdwc->dev, "cfg_ahb_clk");
+		if (IS_ERR(mdwc->cfg_ahb_clk)) {
+			ret = PTR_ERR(mdwc->cfg_ahb_clk);
+			mdwc->cfg_ahb_clk = NULL;
+			if (ret != -EPROBE_DEFER)
+				dev_err(mdwc->dev,
+					"failed to get cfg_ahb_clk ret %d\n",
+					ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int dwc3_msm_id_notifier(struct notifier_block *nb,
+	unsigned long event, void *ptr)
+{
+	struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
+	struct extcon_dev *edev = ptr;
+	enum dwc3_id_state id;
+	int cc_state;
+
+	if (!edev) {
+		dev_err(mdwc->dev, "%s: edev null\n", __func__);
+		goto done;
+	}
+
+	id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
+
+	dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
+
+	cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
+	if (cc_state < 0)
+		mdwc->typec_orientation = ORIENTATION_NONE;
+	else
+		mdwc->typec_orientation =
+			cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
+
+	if (mdwc->id_state != id) {
+		mdwc->id_state = id;
+		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+	}
+
+done:
+	return NOTIFY_DONE;
+}
+
+static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
+	unsigned long event, void *ptr)
+{
+	struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	struct extcon_dev *edev = ptr;
+	int cc_state;
+
+	if (!edev) {
+		dev_err(mdwc->dev, "%s: edev null\n", __func__);
+		goto done;
+	}
+
+	dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
+
+	if (mdwc->vbus_active == event)
+		return NOTIFY_DONE;
+
+	cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
+	if (cc_state < 0)
+		mdwc->typec_orientation = ORIENTATION_NONE;
+	else
+		mdwc->typec_orientation =
+			cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
+
+	mdwc->vbus_active = event;
+	if (dwc->is_drd && !mdwc->in_restart)
+		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+done:
+	return NOTIFY_DONE;
+}
+
+static int dwc3_msm_extcon_register(struct dwc3_msm *mdwc)
+{
+	struct device_node *node = mdwc->dev->of_node;
+	struct extcon_dev *edev;
+	int ret = 0;
+
+	if (!of_property_read_bool(node, "extcon"))
+		return 0;
+
+	edev = extcon_get_edev_by_phandle(mdwc->dev, 0);
+	if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV)
+		return PTR_ERR(edev);
+
+	if (!IS_ERR(edev)) {
+		mdwc->extcon_vbus = edev;
+		mdwc->vbus_nb.notifier_call = dwc3_msm_vbus_notifier;
+		ret = extcon_register_notifier(edev, EXTCON_USB,
+				&mdwc->vbus_nb);
+		if (ret < 0) {
+			dev_err(mdwc->dev, "failed to register notifier for USB\n");
+			return ret;
+		}
+	}
+
+	/* if a second phandle was provided, use it to get a separate edev */
+	if (of_count_phandle_with_args(node, "extcon", NULL) > 1) {
+		edev = extcon_get_edev_by_phandle(mdwc->dev, 1);
+		if (IS_ERR(edev) && PTR_ERR(edev) != -ENODEV) {
+			ret = PTR_ERR(edev);
+			goto err;
+		}
+	}
+
+	if (!IS_ERR(edev)) {
+		mdwc->extcon_id = edev;
+		mdwc->id_nb.notifier_call = dwc3_msm_id_notifier;
+		ret = extcon_register_notifier(edev, EXTCON_USB_HOST,
+				&mdwc->id_nb);
+		if (ret < 0) {
+			dev_err(mdwc->dev, "failed to register notifier for USB-HOST\n");
+			goto err;
+		}
+	}
+
+	return 0;
+err:
+	if (mdwc->extcon_vbus)
+		extcon_unregister_notifier(mdwc->extcon_vbus, EXTCON_USB,
+				&mdwc->vbus_nb);
+	return ret;
+}
+
+static ssize_t mode_show(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	if (mdwc->vbus_active)
+		return snprintf(buf, PAGE_SIZE, "peripheral\n");
+	if (mdwc->id_state == DWC3_ID_GROUND)
+		return snprintf(buf, PAGE_SIZE, "host\n");
+
+	return snprintf(buf, PAGE_SIZE, "none\n");
+}
+
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	if (sysfs_streq(buf, "peripheral")) {
+		mdwc->vbus_active = true;
+		mdwc->id_state = DWC3_ID_FLOAT;
+	} else if (sysfs_streq(buf, "host")) {
+		mdwc->vbus_active = false;
+		mdwc->id_state = DWC3_ID_GROUND;
+	} else {
+		mdwc->vbus_active = false;
+		mdwc->id_state = DWC3_ID_FLOAT;
+	}
+
+	dwc3_ext_event_notify(mdwc);
+
+	return count;
+}
+
+static DEVICE_ATTR_RW(mode);
+
+static int dwc3_msm_probe(struct platform_device *pdev)
+{
+	struct device_node *node = pdev->dev.of_node, *dwc3_node;
+	struct device	*dev = &pdev->dev;
+	struct dwc3_msm *mdwc;
+	struct dwc3	*dwc;
+	struct resource *res;
+	void __iomem *tcsr;
+	bool host_mode;
+	int ret = 0;
+	int ext_hub_reset_gpio;
+	u32 val;
+
+	mdwc = devm_kzalloc(&pdev->dev, sizeof(*mdwc), GFP_KERNEL);
+	if (!mdwc)
+		return -ENOMEM;
+
+	if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
+		dev_err(&pdev->dev, "setting DMA mask to 64 failed.\n");
+		if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
+			dev_err(&pdev->dev, "setting DMA mask to 32 failed.\n");
+			return -EOPNOTSUPP;
+		}
+	}
+
+	platform_set_drvdata(pdev, mdwc);
+	mdwc->dev = &pdev->dev;
+
+	INIT_LIST_HEAD(&mdwc->req_complete_list);
+	INIT_WORK(&mdwc->resume_work, dwc3_resume_work);
+	INIT_WORK(&mdwc->restart_usb_work, dwc3_restart_usb_work);
+	INIT_WORK(&mdwc->bus_vote_w, dwc3_msm_bus_vote_w);
+	INIT_DELAYED_WORK(&mdwc->sm_work, dwc3_otg_sm_work);
+
+	mdwc->dwc3_wq = alloc_ordered_workqueue("dwc3_wq", 0);
+	if (!mdwc->dwc3_wq) {
+		pr_err("%s: Unable to create workqueue dwc3_wq\n", __func__);
+		return -ENOMEM;
+	}
+
+	/* Get all clks and gdsc reference */
+	ret = dwc3_msm_get_clk_gdsc(mdwc);
+	if (ret) {
+		dev_err(&pdev->dev, "error getting clock or gdsc.\n");
+		return ret;
+	}
+
+	mdwc->id_state = DWC3_ID_FLOAT;
+	set_bit(ID, &mdwc->inputs);
+
+	mdwc->charging_disabled = of_property_read_bool(node,
+				"qcom,charging-disabled");
+
+	ret = of_property_read_u32(node, "qcom,lpm-to-suspend-delay-ms",
+				&mdwc->lpm_to_suspend_delay);
+	if (ret) {
+		dev_dbg(&pdev->dev, "setting lpm_to_suspend_delay to zero.\n");
+		mdwc->lpm_to_suspend_delay = 0;
+	}
+
+	/*
+	 * DWC3 has separate IRQ line for OTG events (ID/BSV) and for
+	 * DP and DM linestate transitions during low power mode.
+	 */
+	mdwc->hs_phy_irq = platform_get_irq_byname(pdev, "hs_phy_irq");
+	if (mdwc->hs_phy_irq < 0) {
+		dev_err(&pdev->dev, "pget_irq for hs_phy_irq failed\n");
+		ret = -EINVAL;
+		goto err;
+	} else {
+		irq_set_status_flags(mdwc->hs_phy_irq, IRQ_NOAUTOEN);
+		ret = devm_request_threaded_irq(&pdev->dev, mdwc->hs_phy_irq,
+					msm_dwc3_pwr_irq,
+					msm_dwc3_pwr_irq_thread,
+					IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
+					| IRQF_ONESHOT, "hs_phy_irq", mdwc);
+		if (ret) {
+			dev_err(&pdev->dev, "irqreq hs_phy_irq failed: %d\n",
+					ret);
+			goto err;
+		}
+	}
+
+	mdwc->ss_phy_irq = platform_get_irq_byname(pdev, "ss_phy_irq");
+	if (mdwc->ss_phy_irq < 0) {
+		dev_dbg(&pdev->dev, "pget_irq for ss_phy_irq failed\n");
+	} else {
+		irq_set_status_flags(mdwc->ss_phy_irq, IRQ_NOAUTOEN);
+		ret = devm_request_threaded_irq(&pdev->dev, mdwc->ss_phy_irq,
+					msm_dwc3_pwr_irq,
+					msm_dwc3_pwr_irq_thread,
+					IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME
+					| IRQF_ONESHOT, "ss_phy_irq", mdwc);
+		if (ret) {
+			dev_err(&pdev->dev, "irqreq ss_phy_irq failed: %d\n",
+					ret);
+			goto err;
+		}
+	}
+
+	mdwc->pwr_event_irq = platform_get_irq_byname(pdev, "pwr_event_irq");
+	if (mdwc->pwr_event_irq < 0) {
+		dev_err(&pdev->dev, "pget_irq for pwr_event_irq failed\n");
+		ret = -EINVAL;
+		goto err;
+	} else {
+		/* will be enabled in dwc3_msm_resume() */
+		irq_set_status_flags(mdwc->pwr_event_irq, IRQ_NOAUTOEN);
+		ret = devm_request_threaded_irq(&pdev->dev, mdwc->pwr_event_irq,
+					msm_dwc3_pwr_irq,
+					msm_dwc3_pwr_irq_thread,
+					IRQF_TRIGGER_RISING | IRQF_EARLY_RESUME,
+					"msm_dwc3", mdwc);
+		if (ret) {
+			dev_err(&pdev->dev, "irqreq pwr_event_irq failed: %d\n",
+					ret);
+			goto err;
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tcsr_base");
+	if (!res) {
+		dev_dbg(&pdev->dev, "missing TCSR memory resource\n");
+	} else {
+		tcsr = devm_ioremap_nocache(&pdev->dev, res->start,
+			resource_size(res));
+		if (IS_ERR_OR_NULL(tcsr)) {
+			dev_dbg(&pdev->dev, "tcsr ioremap failed\n");
+		} else {
+			/* Enable USB3 on the primary USB port. */
+			writel_relaxed(0x1, tcsr);
+			/*
+			 * Ensure that TCSR write is completed before
+			 * USB registers initialization.
+			 */
+			mb();
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core_base");
+	if (!res) {
+		dev_err(&pdev->dev, "missing memory base resource\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	mdwc->base = devm_ioremap_nocache(&pdev->dev, res->start,
+			resource_size(res));
+	if (!mdwc->base) {
+		dev_err(&pdev->dev, "ioremap failed\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"ahb2phy_base");
+	if (res) {
+		mdwc->ahb2phy_base = devm_ioremap_nocache(&pdev->dev,
+					res->start, resource_size(res));
+		if (IS_ERR_OR_NULL(mdwc->ahb2phy_base)) {
+			dev_err(dev, "couldn't find ahb2phy_base addr.\n");
+			mdwc->ahb2phy_base = NULL;
+		} else {
+			/*
+			 * On some targets cfg_ahb_clk depends upon usb gdsc
+			 * regulator. If cfg_ahb_clk is enabled without
+			 * turning on usb gdsc regulator clk is stuck off.
+			 */
+			dwc3_msm_config_gdsc(mdwc, 1);
+			clk_prepare_enable(mdwc->cfg_ahb_clk);
+			/* Configure AHB2PHY for one wait state read/write*/
+			val = readl_relaxed(mdwc->ahb2phy_base +
+					PERIPH_SS_AHB2PHY_TOP_CFG);
+			if (val != ONE_READ_WRITE_WAIT) {
+				writel_relaxed(ONE_READ_WRITE_WAIT,
+					mdwc->ahb2phy_base +
+					PERIPH_SS_AHB2PHY_TOP_CFG);
+				/* complete above write before using USB PHY */
+				mb();
+			}
+			clk_disable_unprepare(mdwc->cfg_ahb_clk);
+			dwc3_msm_config_gdsc(mdwc, 0);
+		}
+	}
+
+	if (of_get_property(pdev->dev.of_node, "qcom,usb-dbm", NULL)) {
+		mdwc->dbm = usb_get_dbm_by_phandle(&pdev->dev, "qcom,usb-dbm");
+		if (IS_ERR(mdwc->dbm)) {
+			dev_err(&pdev->dev, "unable to get dbm device\n");
+			ret = -EPROBE_DEFER;
+			goto err;
+		}
+		/*
+		 * Add power event if the dbm indicates coming out of L1
+		 * by interrupt
+		 */
+		if (dbm_l1_lpm_interrupt(mdwc->dbm)) {
+			if (!mdwc->pwr_event_irq) {
+				dev_err(&pdev->dev,
+					"need pwr_event_irq exiting L1\n");
+				ret = -EINVAL;
+				goto err;
+			}
+		}
+	}
+
+	ext_hub_reset_gpio = of_get_named_gpio(node,
+					"qcom,ext-hub-reset-gpio", 0);
+
+	if (gpio_is_valid(ext_hub_reset_gpio)
+		&& (!devm_gpio_request(&pdev->dev, ext_hub_reset_gpio,
+			"qcom,ext-hub-reset-gpio"))) {
+		/* reset external hub */
+		gpio_direction_output(ext_hub_reset_gpio, 1);
+		/*
+		 * Hub reset should be asserted for minimum 5microsec
+		 * before deasserting.
+		 */
+		usleep_range(5, 1000);
+		gpio_direction_output(ext_hub_reset_gpio, 0);
+	}
+
+	if (of_property_read_u32(node, "qcom,dwc-usb3-msm-tx-fifo-size",
+				 &mdwc->tx_fifo_size))
+		dev_err(&pdev->dev,
+			"unable to read platform data tx fifo size\n");
+
+	mdwc->disable_host_mode_pm = of_property_read_bool(node,
+				"qcom,disable-host-mode-pm");
+
+	dwc3_set_notifier(&dwc3_msm_notify_event);
+
+	/* Assumes dwc3 is the first DT child of dwc3-msm */
+	dwc3_node = of_get_next_available_child(node, NULL);
+	if (!dwc3_node) {
+		dev_err(&pdev->dev, "failed to find dwc3 child\n");
+		ret = -ENODEV;
+		goto err;
+	}
+
+	ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+	if (ret) {
+		dev_err(&pdev->dev,
+				"failed to add create dwc3 core\n");
+		of_node_put(dwc3_node);
+		goto err;
+	}
+
+	mdwc->dwc3 = of_find_device_by_node(dwc3_node);
+	of_node_put(dwc3_node);
+	if (!mdwc->dwc3) {
+		dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
+		goto put_dwc3;
+	}
+
+	mdwc->hs_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
+							"usb-phy", 0);
+	if (IS_ERR(mdwc->hs_phy)) {
+		dev_err(&pdev->dev, "unable to get hsphy device\n");
+		ret = PTR_ERR(mdwc->hs_phy);
+		goto put_dwc3;
+	}
+	mdwc->ss_phy = devm_usb_get_phy_by_phandle(&mdwc->dwc3->dev,
+							"usb-phy", 1);
+	if (IS_ERR(mdwc->ss_phy)) {
+		dev_err(&pdev->dev, "unable to get ssphy device\n");
+		ret = PTR_ERR(mdwc->ss_phy);
+		goto put_dwc3;
+	}
+
+	mdwc->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+	if (mdwc->bus_scale_table) {
+		mdwc->bus_perf_client =
+			msm_bus_scale_register_client(mdwc->bus_scale_table);
+	}
+
+	dwc = platform_get_drvdata(mdwc->dwc3);
+	if (!dwc) {
+		dev_err(&pdev->dev, "Failed to get dwc3 device\n");
+		goto put_dwc3;
+	}
+
+	mdwc->irq_to_affin = platform_get_irq(mdwc->dwc3, 0);
+	mdwc->dwc3_cpu_notifier.notifier_call = dwc3_cpu_notifier_cb;
+
+	if (cpu_to_affin)
+		register_cpu_notifier(&mdwc->dwc3_cpu_notifier);
+
+	/*
+	 * Clocks and regulators will not be turned on until the first time
+	 * runtime PM resume is called. This is to allow for booting up with
+	 * charger already connected so as not to disturb PHY line states.
+	 */
+	mdwc->lpm_flags = MDWC3_POWER_COLLAPSE | MDWC3_SS_PHY_SUSPEND;
+	atomic_set(&dwc->in_lpm, 1);
+	pm_runtime_set_suspended(mdwc->dev);
+	pm_runtime_set_autosuspend_delay(mdwc->dev, 1000);
+	pm_runtime_use_autosuspend(mdwc->dev);
+	pm_runtime_enable(mdwc->dev);
+	device_init_wakeup(mdwc->dev, 1);
+
+	if (of_property_read_bool(node, "qcom,disable-dev-mode-pm"))
+		pm_runtime_get_noresume(mdwc->dev);
+
+	ret = dwc3_msm_extcon_register(mdwc);
+	if (ret)
+		goto put_dwc3;
+
+	/* Update initial VBUS/ID state from extcon */
+	if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
+							EXTCON_USB))
+		dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
+	if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
+							EXTCON_USB_HOST))
+		dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
+
+	device_create_file(&pdev->dev, &dev_attr_mode);
+
+	schedule_delayed_work(&mdwc->sm_work, 0);
+
+	host_mode = usb_get_dr_mode(&mdwc->dwc3->dev) == USB_DR_MODE_HOST;
+	if (!dwc->is_drd && host_mode) {
+		dev_dbg(&pdev->dev, "DWC3 in host only mode\n");
+		mdwc->id_state = DWC3_ID_GROUND;
+		dwc3_ext_event_notify(mdwc);
+	}
+
+	return 0;
+
+put_dwc3:
+	platform_device_put(mdwc->dwc3);
+	if (mdwc->bus_perf_client)
+		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+err:
+	return ret;
+}
+
+static int dwc3_msm_remove_children(struct device *dev, void *data)
+{
+	device_unregister(dev);
+	return 0;
+}
+
+static int dwc3_msm_remove(struct platform_device *pdev)
+{
+	struct dwc3_msm	*mdwc = platform_get_drvdata(pdev);
+	int ret_pm;
+
+	device_remove_file(&pdev->dev, &dev_attr_mode);
+
+	if (cpu_to_affin)
+		unregister_cpu_notifier(&mdwc->dwc3_cpu_notifier);
+
+	/*
+	 * In case of system suspend, pm_runtime_get_sync fails.
+	 * Hence turn ON the clocks manually.
+	 */
+	ret_pm = pm_runtime_get_sync(mdwc->dev);
+	if (ret_pm < 0) {
+		dev_err(mdwc->dev,
+			"pm_runtime_get_sync failed with %d\n", ret_pm);
+		clk_prepare_enable(mdwc->utmi_clk);
+		clk_prepare_enable(mdwc->core_clk);
+		clk_prepare_enable(mdwc->iface_clk);
+		clk_prepare_enable(mdwc->sleep_clk);
+		if (mdwc->bus_aggr_clk)
+			clk_prepare_enable(mdwc->bus_aggr_clk);
+		clk_prepare_enable(mdwc->xo_clk);
+	}
+
+	cancel_delayed_work_sync(&mdwc->sm_work);
+
+	if (mdwc->hs_phy)
+		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+	platform_device_put(mdwc->dwc3);
+	device_for_each_child(&pdev->dev, NULL, dwc3_msm_remove_children);
+
+	pm_runtime_disable(mdwc->dev);
+	pm_runtime_barrier(mdwc->dev);
+	pm_runtime_put_sync(mdwc->dev);
+	pm_runtime_set_suspended(mdwc->dev);
+	device_wakeup_disable(mdwc->dev);
+
+	if (mdwc->bus_perf_client)
+		msm_bus_scale_unregister_client(mdwc->bus_perf_client);
+
+	if (!IS_ERR_OR_NULL(mdwc->vbus_reg))
+		regulator_disable(mdwc->vbus_reg);
+
+	disable_irq(mdwc->hs_phy_irq);
+	if (mdwc->ss_phy_irq)
+		disable_irq(mdwc->ss_phy_irq);
+	disable_irq(mdwc->pwr_event_irq);
+
+	clk_disable_unprepare(mdwc->utmi_clk);
+	clk_set_rate(mdwc->core_clk, 19200000);
+	clk_disable_unprepare(mdwc->core_clk);
+	clk_disable_unprepare(mdwc->iface_clk);
+	clk_disable_unprepare(mdwc->sleep_clk);
+	clk_disable_unprepare(mdwc->xo_clk);
+	clk_put(mdwc->xo_clk);
+
+	dwc3_msm_config_gdsc(mdwc, 0);
+
+	return 0;
+}
+
+#define VBUS_REG_CHECK_DELAY	(msecs_to_jiffies(1000))
+
+/**
+ * dwc3_otg_start_host -  helper function for starting/stoping the host
+ * controller driver.
+ *
+ * @mdwc: Pointer to the dwc3_msm structure.
+ * @on: start / stop the host controller driver.
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_otg_start_host(struct dwc3_msm *mdwc, int on)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	int ret = 0;
+
+	if (!dwc->xhci)
+		return -EINVAL;
+
+	/*
+	 * The vbus_reg pointer could have multiple values
+	 * NULL: regulator_get() hasn't been called, or was previously deferred
+	 * IS_ERR: regulator could not be obtained, so skip using it
+	 * Valid pointer otherwise
+	 */
+	if (!mdwc->vbus_reg) {
+		mdwc->vbus_reg = devm_regulator_get_optional(mdwc->dev,
+					"vbus_dwc3");
+		if (IS_ERR(mdwc->vbus_reg) &&
+				PTR_ERR(mdwc->vbus_reg) == -EPROBE_DEFER) {
+			/* regulators may not be ready, so retry again later */
+			mdwc->vbus_reg = NULL;
+			return -EPROBE_DEFER;
+		}
+	}
+
+	if (on) {
+		dev_dbg(mdwc->dev, "%s: turn on host\n", __func__);
+
+		pm_runtime_get_sync(mdwc->dev);
+		mdwc->hs_phy->flags |= PHY_HOST_MODE;
+		mdwc->ss_phy->flags |= PHY_HOST_MODE;
+		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+		if (!IS_ERR(mdwc->vbus_reg))
+			ret = regulator_enable(mdwc->vbus_reg);
+		if (ret) {
+			dev_err(mdwc->dev, "unable to enable vbus_reg\n");
+			mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+			mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+			pm_runtime_put_sync(mdwc->dev);
+			return ret;
+		}
+
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+
+		/*
+		 * FIXME If micro A cable is disconnected during system suspend,
+		 * xhci platform device will be removed before runtime pm is
+		 * enabled for xhci device. Due to this, disable_depth becomes
+		 * greater than one and runtimepm is not enabled for next microA
+		 * connect. Fix this by calling pm_runtime_init for xhci device.
+		 */
+		pm_runtime_init(&dwc->xhci->dev);
+		ret = platform_device_add(dwc->xhci);
+		if (ret) {
+			dev_err(mdwc->dev,
+				"%s: failed to add XHCI pdev ret=%d\n",
+				__func__, ret);
+			if (!IS_ERR(mdwc->vbus_reg))
+				regulator_disable(mdwc->vbus_reg);
+			mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+			mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+			pm_runtime_put_sync(mdwc->dev);
+			return ret;
+		}
+
+		/*
+		 * In some cases it is observed that USB PHY is not going into
+		 * suspend with host mode suspend functionality. Hence disable
+		 * XHCI's runtime PM here if disable_host_mode_pm is set.
+		 */
+		if (mdwc->disable_host_mode_pm)
+			pm_runtime_disable(&dwc->xhci->dev);
+
+		mdwc->in_host_mode = true;
+		dwc3_usb3_phy_suspend(dwc, true);
+
+		/* xHCI should have incremented child count as necessary */
+		pm_runtime_mark_last_busy(mdwc->dev);
+		pm_runtime_put_sync_autosuspend(mdwc->dev);
+	} else {
+		dev_dbg(mdwc->dev, "%s: turn off host\n", __func__);
+
+		if (!IS_ERR(mdwc->vbus_reg))
+			ret = regulator_disable(mdwc->vbus_reg);
+		if (ret) {
+			dev_err(mdwc->dev, "unable to disable vbus_reg\n");
+			return ret;
+		}
+
+		pm_runtime_get_sync(mdwc->dev);
+		usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+		mdwc->hs_phy->flags &= ~PHY_HOST_MODE;
+		mdwc->ss_phy->flags &= ~PHY_HOST_MODE;
+		platform_device_del(dwc->xhci);
+
+		/*
+		 * Perform USB hardware RESET (both core reset and DBM reset)
+		 * when moving from host to peripheral. This is required for
+		 * peripheral mode to work.
+		 */
+		dwc3_msm_block_reset(mdwc, true);
+
+		dwc3_usb3_phy_suspend(dwc, false);
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+
+		mdwc->in_host_mode = false;
+
+		/* re-init core and OTG registers as block reset clears these */
+		dwc3_post_host_reset_core_init(dwc);
+		pm_runtime_mark_last_busy(mdwc->dev);
+		pm_runtime_put_sync_autosuspend(mdwc->dev);
+	}
+
+	return 0;
+}
+
+static void dwc3_override_vbus_status(struct dwc3_msm *mdwc, bool vbus_present)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	/* Update OTG VBUS Valid from HSPHY to controller */
+	dwc3_msm_write_readback(mdwc->base, HS_PHY_CTRL_REG,
+		vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL :
+		UTMI_OTG_VBUS_VALID,
+		vbus_present ? UTMI_OTG_VBUS_VALID | SW_SESSVLD_SEL : 0);
+
+	/* Update only if Super Speed is supported */
+	if (dwc->maximum_speed == USB_SPEED_SUPER) {
+		/* Update VBUS Valid from SSPHY to controller */
+		dwc3_msm_write_readback(mdwc->base, SS_PHY_CTRL_REG,
+			LANE0_PWR_PRESENT,
+			vbus_present ? LANE0_PWR_PRESENT : 0);
+	}
+}
+
+/**
+ * dwc3_otg_start_peripheral -  bind/unbind the peripheral controller.
+ *
+ * @mdwc: Pointer to the dwc3_msm structure.
+ * @on:   Turn ON/OFF the gadget.
+ *
+ * Returns 0 on success otherwise negative errno.
+ */
+static int dwc3_otg_start_peripheral(struct dwc3_msm *mdwc, int on)
+{
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	pm_runtime_get_sync(mdwc->dev);
+
+	if (on) {
+		dev_dbg(mdwc->dev, "%s: turn on gadget %s\n",
+					__func__, dwc->gadget.name);
+
+		dwc3_override_vbus_status(mdwc, true);
+		usb_phy_notify_connect(mdwc->hs_phy, USB_SPEED_HIGH);
+		usb_phy_notify_connect(mdwc->ss_phy, USB_SPEED_SUPER);
+
+		/*
+		 * Core reset is not required during start peripheral. Only
+		 * DBM reset is required, hence perform only DBM reset here.
+		 */
+		dwc3_msm_block_reset(mdwc, false);
+
+		dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
+		usb_gadget_vbus_connect(&dwc->gadget);
+	} else {
+		dev_dbg(mdwc->dev, "%s: turn off gadget %s\n",
+					__func__, dwc->gadget.name);
+		usb_gadget_vbus_disconnect(&dwc->gadget);
+		usb_phy_notify_disconnect(mdwc->hs_phy, USB_SPEED_HIGH);
+		usb_phy_notify_disconnect(mdwc->ss_phy, USB_SPEED_SUPER);
+		dwc3_override_vbus_status(mdwc, false);
+		dwc3_usb3_phy_suspend(dwc, false);
+	}
+
+	pm_runtime_put_sync(mdwc->dev);
+
+	return 0;
+}
+
+static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned int mA)
+{
+	union power_supply_propval pval = {0,};
+
+	if (mdwc->charging_disabled)
+		return 0;
+
+	if (mdwc->max_power == mA)
+		return 0;
+
+	if (!mdwc->usb_psy) {
+		mdwc->usb_psy = power_supply_get_by_name("usb");
+		if (!mdwc->usb_psy) {
+			dev_warn(mdwc->dev, "Could not get usb power_supply\n");
+			return -ENODEV;
+		}
+	}
+
+	dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
+
+	if (mdwc->max_power <= 2 && mA > 2) {
+		/* Enable Charging */
+		pval.intval = true;
+		if (power_supply_set_property(mdwc->usb_psy,
+					POWER_SUPPLY_PROP_ONLINE, &pval))
+			goto psy_error;
+		pval.intval = 1000 * mA;
+		if (power_supply_set_property(mdwc->usb_psy,
+					POWER_SUPPLY_PROP_CURRENT_MAX, &pval))
+			goto psy_error;
+	} else if (mdwc->max_power > 0 && (mA == 0 || mA == 2)) {
+		/* Disable charging */
+		pval.intval = false;
+		if (power_supply_set_property(mdwc->usb_psy,
+					POWER_SUPPLY_PROP_ONLINE, &pval))
+			goto psy_error;
+	} else {
+		/* Enable charging */
+		pval.intval = true;
+		if (power_supply_set_property(mdwc->usb_psy,
+					POWER_SUPPLY_PROP_ONLINE, &pval))
+			goto psy_error;
+	}
+
+	/* Set max current limit in uA */
+	pval.intval = 1000 * mA;
+	if (power_supply_set_property(mdwc->usb_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval))
+		goto psy_error;
+
+	mdwc->max_power = mA;
+	return 0;
+
+psy_error:
+	dev_dbg(mdwc->dev, "power supply error when setting property\n");
+	return -ENXIO;
+}
+
+
+/**
+ * dwc3_otg_sm_work - workqueue function.
+ *
+ * @w: Pointer to the dwc3 otg workqueue
+ *
+ * NOTE: After any change in otg_state, we must reschdule the state machine.
+ */
+static void dwc3_otg_sm_work(struct work_struct *w)
+{
+	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, sm_work.work);
+	struct dwc3 *dwc = NULL;
+	bool work = 0;
+	int ret = 0;
+	unsigned long delay = 0;
+	const char *state;
+
+	if (mdwc->dwc3)
+		dwc = platform_get_drvdata(mdwc->dwc3);
+
+	if (!dwc) {
+		dev_err(mdwc->dev, "dwc is NULL.\n");
+		return;
+	}
+
+	state = usb_otg_state_string(mdwc->otg_state);
+	dev_dbg(mdwc->dev, "%s state\n", state);
+
+	/* Check OTG state */
+	switch (mdwc->otg_state) {
+	case OTG_STATE_UNDEFINED:
+		/* Do nothing if no cable connected */
+		if (test_bit(ID, &mdwc->inputs) &&
+				!test_bit(B_SESS_VLD, &mdwc->inputs))
+			break;
+
+		mdwc->otg_state = OTG_STATE_B_IDLE;
+		/* fall-through */
+	case OTG_STATE_B_IDLE:
+		if (!test_bit(ID, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "!id\n");
+			mdwc->otg_state = OTG_STATE_A_IDLE;
+			work = 1;
+		} else if (test_bit(B_SESS_VLD, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "b_sess_vld\n");
+			/*
+			 * Increment pm usage count upon cable connect. Count
+			 * is decremented in OTG_STATE_B_PERIPHERAL state on
+			 * cable disconnect or in bus suspend.
+			 */
+			pm_runtime_get_sync(mdwc->dev);
+			dwc3_otg_start_peripheral(mdwc, 1);
+			mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
+			work = 1;
+		} else {
+			dwc3_msm_gadget_vbus_draw(mdwc, 0);
+			dev_dbg(mdwc->dev, "Cable disconnected\n");
+		}
+		break;
+
+	case OTG_STATE_B_PERIPHERAL:
+		if (!test_bit(B_SESS_VLD, &mdwc->inputs) ||
+				!test_bit(ID, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "!id || !bsv\n");
+			mdwc->otg_state = OTG_STATE_B_IDLE;
+			dwc3_otg_start_peripheral(mdwc, 0);
+			/*
+			 * Decrement pm usage count upon cable disconnect
+			 * which was incremented upon cable connect in
+			 * OTG_STATE_B_IDLE state
+			 */
+			pm_runtime_put_sync(mdwc->dev);
+			work = 1;
+		} else if (test_bit(B_SUSPEND, &mdwc->inputs) &&
+			test_bit(B_SESS_VLD, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "BPER bsv && susp\n");
+			mdwc->otg_state = OTG_STATE_B_SUSPEND;
+			/*
+			 * Decrement pm usage count upon bus suspend.
+			 * Count was incremented either upon cable
+			 * connect in OTG_STATE_B_IDLE or host
+			 * initiated resume after bus suspend in
+			 * OTG_STATE_B_SUSPEND state
+			 */
+			pm_runtime_mark_last_busy(mdwc->dev);
+			pm_runtime_put_autosuspend(mdwc->dev);
+		}
+		break;
+
+	case OTG_STATE_B_SUSPEND:
+		if (!test_bit(B_SESS_VLD, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "BSUSP: !bsv\n");
+			mdwc->otg_state = OTG_STATE_B_IDLE;
+			dwc3_otg_start_peripheral(mdwc, 0);
+		} else if (!test_bit(B_SUSPEND, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "BSUSP !susp\n");
+			mdwc->otg_state = OTG_STATE_B_PERIPHERAL;
+			/*
+			 * Increment pm usage count upon host
+			 * initiated resume. Count was decremented
+			 * upon bus suspend in
+			 * OTG_STATE_B_PERIPHERAL state.
+			 */
+			pm_runtime_get_sync(mdwc->dev);
+		}
+		break;
+
+	case OTG_STATE_A_IDLE:
+		/* Switch to A-Device*/
+		if (test_bit(ID, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "id\n");
+			mdwc->otg_state = OTG_STATE_B_IDLE;
+			mdwc->vbus_retry_count = 0;
+			work = 1;
+		} else {
+			mdwc->otg_state = OTG_STATE_A_HOST;
+			ret = dwc3_otg_start_host(mdwc, 1);
+			if ((ret == -EPROBE_DEFER) &&
+						mdwc->vbus_retry_count < 3) {
+				/*
+				 * Get regulator failed as regulator driver is
+				 * not up yet. Will try to start host after 1sec
+				 */
+				mdwc->otg_state = OTG_STATE_A_IDLE;
+				dev_dbg(mdwc->dev, "Unable to get vbus regulator. Retrying...\n");
+				delay = VBUS_REG_CHECK_DELAY;
+				work = 1;
+				mdwc->vbus_retry_count++;
+			} else if (ret) {
+				dev_err(mdwc->dev, "unable to start host\n");
+				mdwc->otg_state = OTG_STATE_A_IDLE;
+				goto ret;
+			}
+		}
+		break;
+
+	case OTG_STATE_A_HOST:
+		if (test_bit(ID, &mdwc->inputs)) {
+			dev_dbg(mdwc->dev, "id\n");
+			dwc3_otg_start_host(mdwc, 0);
+			mdwc->otg_state = OTG_STATE_B_IDLE;
+			mdwc->vbus_retry_count = 0;
+			work = 1;
+		} else {
+			dev_dbg(mdwc->dev, "still in a_host state. Resuming root hub.\n");
+			if (dwc)
+				pm_runtime_resume(&dwc->xhci->dev);
+		}
+		break;
+
+	default:
+		dev_err(mdwc->dev, "%s: invalid otg-state\n", __func__);
+
+	}
+
+	if (work)
+		schedule_delayed_work(&mdwc->sm_work, delay);
+
+ret:
+	return;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dwc3_msm_pm_suspend(struct device *dev)
+{
+	int ret = 0;
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+
+	dev_dbg(dev, "dwc3-msm PM suspend\n");
+
+	flush_workqueue(mdwc->dwc3_wq);
+	if (!atomic_read(&dwc->in_lpm)) {
+		dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
+		return -EBUSY;
+	}
+
+	ret = dwc3_msm_suspend(mdwc);
+	if (!ret)
+		atomic_set(&mdwc->pm_suspended, 1);
+
+	return ret;
+}
+
+static int dwc3_msm_pm_resume(struct device *dev)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "dwc3-msm PM resume\n");
+
+	/* flush to avoid race in read/write of pm_suspended */
+	flush_workqueue(mdwc->dwc3_wq);
+	atomic_set(&mdwc->pm_suspended, 0);
+
+	/* kick in otg state machine */
+	queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int dwc3_msm_runtime_idle(struct device *dev)
+{
+	dev_dbg(dev, "DWC3-msm runtime idle\n");
+
+	return 0;
+}
+
+static int dwc3_msm_runtime_suspend(struct device *dev)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "DWC3-msm runtime suspend\n");
+
+	return dwc3_msm_suspend(mdwc);
+}
+
+static int dwc3_msm_runtime_resume(struct device *dev)
+{
+	struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+
+	dev_dbg(dev, "DWC3-msm runtime resume\n");
+
+	return dwc3_msm_resume(mdwc);
+}
+#endif
+
+static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
+	SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
+				dwc3_msm_runtime_idle)
+};
+
+static const struct of_device_id of_dwc3_matach[] = {
+	{
+		.compatible = "qcom,dwc-usb3-msm",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, of_dwc3_matach);
+
+static struct platform_driver dwc3_msm_driver = {
+	.probe		= dwc3_msm_probe,
+	.remove		= dwc3_msm_remove,
+	.driver		= {
+		.name	= "msm-dwc3",
+		.pm	= &dwc3_msm_dev_pm_ops,
+		.of_match_table	= of_dwc3_matach,
+	},
+};
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 MSM Glue Layer");
+
+static int dwc3_msm_init(void)
+{
+	return platform_driver_register(&dwc3_msm_driver);
+}
+module_init(dwc3_msm_init);
+
+static void __exit dwc3_msm_exit(void)
+{
+	platform_driver_unregister(&dwc3_msm_driver);
+}
+module_exit(dwc3_msm_exit);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 1dfa56a5f..a9789cc 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -22,12 +22,14 @@
 #include <linux/spinlock.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/ratelimit.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/list.h>
 #include <linux/dma-mapping.h>
 
 #include <linux/usb/ch9.h>
+#include <linux/usb/composite.h>
 #include <linux/usb/gadget.h>
 
 #include "debug.h"
@@ -35,6 +37,8 @@
 #include "gadget.h"
 #include "io.h"
 
+static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, bool remote_wakeup);
+static int dwc3_gadget_wakeup_int(struct dwc3 *dwc);
 /**
  * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
  * @dwc: pointer to our context structure
@@ -234,7 +238,7 @@
 		struct dwc3_gadget_ep_cmd_params *params)
 {
 	struct dwc3		*dwc = dep->dwc;
-	u32			timeout = 500;
+	u32			timeout = 1500;
 	u32			reg;
 
 	int			cmd_status = 0;
@@ -313,6 +317,9 @@
 
 	if (timeout == 0) {
 		ret = -ETIMEDOUT;
+		dwc3_trace(trace_dwc3_gadget, "Command Timed Out");
+		dev_err(dwc->dev, "%s command timeout for %s\n",
+			dwc3_gadget_ep_cmd_string(cmd), dep->name);
 		cmd_status = -ETIMEDOUT;
 	}
 
@@ -350,29 +357,23 @@
 	return dwc3_send_gadget_ep_cmd(dep, cmd, &params);
 }
 
-static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
-		struct dwc3_trb *trb)
-{
-	u32		offset = (char *) trb - (char *) dep->trb_pool;
-
-	return dep->trb_pool_dma + offset;
-}
-
 static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
 {
 	struct dwc3		*dwc = dep->dwc;
+	u32			num_trbs = DWC3_TRB_NUM;
 
 	if (dep->trb_pool)
 		return 0;
 
-	dep->trb_pool = dma_alloc_coherent(dwc->dev,
-			sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+	dep->trb_pool = dma_zalloc_coherent(dwc->dev,
+			sizeof(struct dwc3_trb) * num_trbs,
 			&dep->trb_pool_dma, GFP_KERNEL);
 	if (!dep->trb_pool) {
 		dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
 				dep->name);
 		return -ENOMEM;
 	}
+	dep->num_trbs = num_trbs;
 
 	return 0;
 }
@@ -381,11 +382,17 @@
 {
 	struct dwc3		*dwc = dep->dwc;
 
-	dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
-			dep->trb_pool, dep->trb_pool_dma);
+	/* Freeing of GSI EP TRBs are handled by GSI EP ops. */
+	if (dep->endpoint.ep_type == EP_TYPE_GSI)
+		return;
 
-	dep->trb_pool = NULL;
-	dep->trb_pool_dma = 0;
+	if (dep->trb_pool && dep->trb_pool_dma) {
+		dma_free_coherent(dwc->dev,
+				sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
+				dep->trb_pool, dep->trb_pool_dma);
+		dep->trb_pool = NULL;
+		dep->trb_pool_dma = 0;
+	}
 }
 
 static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
@@ -554,14 +561,19 @@
 
 	if (!(dep->flags & DWC3_EP_ENABLED)) {
 		ret = dwc3_gadget_start_config(dwc, dep);
-		if (ret)
+		if (ret) {
+			dev_err(dwc->dev, "start_config() failed for %s\n",
+								dep->name);
 			return ret;
+		}
 	}
 
 	ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, modify,
 			restore);
-	if (ret)
+	if (ret) {
+		dev_err(dwc->dev, "set_ep_config() failed for %s\n", dep->name);
 		return ret;
+	}
 
 	if (!(dep->flags & DWC3_EP_ENABLED)) {
 		struct dwc3_trb	*trb_st_hw;
@@ -598,7 +610,6 @@
 	return 0;
 }
 
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
 static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
 {
 	struct dwc3_request		*req;
@@ -634,7 +645,10 @@
 
 	dwc3_trace(trace_dwc3_gadget, "Disabling %s", dep->name);
 
-	dwc3_remove_requests(dwc, dep);
+	if (dep->endpoint.ep_type == EP_TYPE_NORMAL)
+		dwc3_remove_requests(dwc, dep);
+	else if (dep->endpoint.ep_type == EP_TYPE_GSI)
+		dwc3_stop_active_transfer(dwc, dep->number, true);
 
 	/* make sure HW endpoint isn't stalled */
 	if (dep->flags & DWC3_EP_STALL)
@@ -650,6 +664,22 @@
 	dep->type = 0;
 	dep->flags = 0;
 
+	/* Keep GSI ep names with "-gsi" suffix */
+	if (!strnstr(dep->name, "gsi", 10)) {
+		snprintf(dep->name, sizeof(dep->name), "ep%d%s",
+			dep->number >> 1,
+			(dep->number & 1) ? "in" : "out");
+	}
+
+	/*
+	 * Clean up ep ring of non-control endpoint to avoid getting
+	 * xferInProgress due to stale trbs with HWO bit set from previous
+	 * composition when update transfer cmd is issued.
+	 */
+	if (dep->number > 1 && dep->trb_pool)
+		memset(&dep->trb_pool[0], 0,
+			sizeof(struct dwc3_trb) * dep->num_trbs);
+
 	return 0;
 }
 
@@ -1104,6 +1134,30 @@
 	return ret;
 }
 
+static int dwc3_gadget_wakeup(struct usb_gadget *g)
+{
+	struct dwc3	*dwc = gadget_to_dwc(g);
+
+	schedule_work(&dwc->wakeup_work);
+	return 0;
+}
+
+static inline enum dwc3_link_state dwc3_get_link_state(struct dwc3 *dwc)
+{
+	u32 reg;
+
+	reg = dwc3_readl(dwc->regs, DWC3_DSTS);
+	return DWC3_DSTS_USBLNKST(reg);
+}
+
+static bool dwc3_gadget_is_suspended(struct dwc3 *dwc)
+{
+	if (atomic_read(&dwc->in_lpm) ||
+			dwc->link_state == DWC3_LINK_STATE_U3)
+		return true;
+	return false;
+}
+
 static void __dwc3_gadget_ep_zlp_complete(struct usb_ep *ep,
 		struct usb_request *request)
 {
@@ -1142,6 +1196,26 @@
 	int				ret;
 
 	spin_lock_irqsave(&dwc->lock, flags);
+	if (!dep->endpoint.desc) {
+		dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
+				request, ep->name);
+		ret = -ESHUTDOWN;
+		goto out;
+	}
+
+	if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
+				request, req->dep->name)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (dwc3_gadget_is_suspended(dwc)) {
+		if (dwc->gadget.remote_wakeup)
+			dwc3_gadget_wakeup(&dwc->gadget);
+		ret =  dwc->gadget.remote_wakeup ? -EAGAIN : -ENOTSUPP;
+		goto out;
+	}
+
 	ret = __dwc3_gadget_ep_queue(dep, req);
 
 	/*
@@ -1154,6 +1228,7 @@
 	    (request->length % ep->maxpacket == 0))
 		ret = __dwc3_gadget_ep_queue_zlp(dwc, dep);
 
+out:
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
 	return ret;
@@ -1171,6 +1246,11 @@
 	unsigned long			flags;
 	int				ret = 0;
 
+	if (atomic_read(&dwc->in_lpm)) {
+		dev_err(dwc->dev, "Unable to dequeue while in LPM\n");
+		return -EAGAIN;
+	}
+
 	trace_dwc3_ep_dequeue(req);
 
 	spin_lock_irqsave(&dwc->lock, flags);
@@ -1270,6 +1350,11 @@
 
 	int				ret;
 
+	if (!ep->desc) {
+		dev_err(dwc->dev, "(%s)'s desc is NULL.\n", dep->name);
+		return -EINVAL;
+	}
+
 	spin_lock_irqsave(&dwc->lock, flags);
 	ret = __dwc3_gadget_ep_set_halt(dep, value, false);
 	spin_unlock_irqrestore(&dwc->lock, flags);
@@ -1408,15 +1493,204 @@
 	return 0;
 }
 
-static int dwc3_gadget_wakeup(struct usb_gadget *g)
+#define DWC3_PM_RESUME_RETRIES		20    /* Max Number of retries */
+#define DWC3_PM_RESUME_DELAY		100   /* 100 msec */
+
+static void dwc3_gadget_wakeup_work(struct work_struct *w)
 {
-	struct dwc3		*dwc = gadget_to_dwc(g);
-	unsigned long		flags;
+	struct dwc3		*dwc;
 	int			ret;
+	static int		retry_count;
+
+	dwc = container_of(w, struct dwc3, wakeup_work);
+
+	ret = pm_runtime_get_sync(dwc->dev);
+	if (ret) {
+		/* pm_runtime_get_sync returns -EACCES error between
+		 * late_suspend and early_resume, wait for system resume to
+		 * finish and queue work again
+		 */
+		pr_debug("PM runtime get sync failed, ret %d\n", ret);
+		if (ret == -EACCES) {
+			pm_runtime_put_noidle(dwc->dev);
+			if (retry_count == DWC3_PM_RESUME_RETRIES) {
+				retry_count = 0;
+				pr_err("pm_runtime_get_sync timed out\n");
+				return;
+			}
+			msleep(DWC3_PM_RESUME_DELAY);
+			retry_count++;
+			schedule_work(&dwc->wakeup_work);
+			return;
+		}
+	}
+	retry_count = 0;
+
+	ret = dwc3_gadget_wakeup_int(dwc);
+
+	if (ret)
+		pr_err("Remote wakeup failed. ret = %d.\n", ret);
+	else
+		pr_debug("Remote wakeup succeeded.\n");
+
+	pm_runtime_put_noidle(dwc->dev);
+}
+
+static int dwc3_gadget_wakeup_int(struct dwc3 *dwc)
+{
+	bool			link_recover_only = false;
+
+	u32			reg;
+	int			ret = 0;
+	u8			link_state;
+	unsigned long		flags;
+
+	pr_debug("%s(): Entry\n", __func__);
+	disable_irq(dwc->irq);
+	spin_lock_irqsave(&dwc->lock, flags);
+	/*
+	 * According to the Databook Remote wakeup request should
+	 * be issued only when the device is in early suspend state.
+	 *
+	 * We can check that via USB Link State bits in DSTS register.
+	 */
+	link_state = dwc3_get_link_state(dwc);
+
+	switch (link_state) {
+	case DWC3_LINK_STATE_RX_DET:	/* in HS, means Early Suspend */
+	case DWC3_LINK_STATE_U3:	/* in HS, means SUSPEND */
+		break;
+	case DWC3_LINK_STATE_U1:
+		if (dwc->gadget.speed != USB_SPEED_SUPER) {
+			link_recover_only = true;
+			break;
+		}
+		/* Intentional fallthrough */
+	default:
+		dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
+				link_state);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Enable LINK STATUS change event */
+	reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+	reg |= DWC3_DEVTEN_ULSTCNGEN;
+	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+	/*
+	 * memory barrier is required to make sure that required events
+	 * with core is enabled before performing RECOVERY mechnism.
+	 */
+	mb();
+
+	ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
+	if (ret < 0) {
+		dev_err(dwc->dev, "failed to put link in Recovery\n");
+		/* Disable LINK STATUS change */
+		reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+		reg &= ~DWC3_DEVTEN_ULSTCNGEN;
+		dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+		/* Required to complete this operation before returning */
+		mb();
+		goto out;
+	}
+
+	/* Recent versions do this automatically */
+	if (dwc->revision < DWC3_REVISION_194A) {
+		/* write zeroes to Link Change Request */
+		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+		reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
+		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	enable_irq(dwc->irq);
+
+	/*
+	 * Have bigger value (16 sec) for timeout since some host PCs driving
+	 * resume for very long time (e.g. 8 sec)
+	 */
+	ret = wait_event_interruptible_timeout(dwc->wait_linkstate,
+			(dwc->link_state < DWC3_LINK_STATE_U3) ||
+			(dwc->link_state == DWC3_LINK_STATE_SS_DIS),
+			msecs_to_jiffies(16000));
 
 	spin_lock_irqsave(&dwc->lock, flags);
-	ret = __dwc3_gadget_wakeup(dwc);
+	/* Disable link status change event */
+	reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
+	reg &= ~DWC3_DEVTEN_ULSTCNGEN;
+	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
+	/*
+	 * Complete this write before we go ahead and perform resume
+	 * as we don't need link status change notificaiton anymore.
+	 */
+	mb();
+
+	if (!ret) {
+		dev_dbg(dwc->dev, "Timeout moving into state(%d)\n",
+							dwc->link_state);
+		ret = -EINVAL;
+		spin_unlock_irqrestore(&dwc->lock, flags);
+		goto out1;
+	} else {
+		ret = 0;
+		/*
+		 * If USB is disconnected OR received RESET from host,
+		 * don't perform resume
+		 */
+		if (dwc->link_state == DWC3_LINK_STATE_SS_DIS ||
+				dwc->gadget.state == USB_STATE_DEFAULT)
+			link_recover_only = true;
+	}
+
+	/*
+	 * According to DWC3 databook, the controller does not
+	 * trigger a wakeup event when remote-wakeup is used.
+	 * Hence, after remote-wakeup sequence is complete, and
+	 * the device is back at U0 state, it is required that
+	 * the resume sequence is initiated by SW.
+	 */
+	if (!link_recover_only)
+		dwc3_gadget_wakeup_interrupt(dwc, true);
+
 	spin_unlock_irqrestore(&dwc->lock, flags);
+	pr_debug("%s: Exit\n", __func__);
+	return ret;
+
+out:
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	enable_irq(dwc->irq);
+
+out1:
+	return ret;
+}
+
+static int dwc_gadget_func_wakeup(struct usb_gadget *g, int interface_id)
+{
+	int ret = 0;
+	struct dwc3 *dwc = gadget_to_dwc(g);
+
+	if (!g || (g->speed != USB_SPEED_SUPER))
+		return -ENOTSUPP;
+
+	if (dwc3_gadget_is_suspended(dwc)) {
+		pr_debug("USB bus is suspended. Scheduling wakeup and returning -EAGAIN.\n");
+		dwc3_gadget_wakeup(&dwc->gadget);
+		return -EAGAIN;
+	}
+
+	if (dwc->revision < DWC3_REVISION_220A) {
+		ret = dwc3_send_gadget_generic_command(dwc,
+			DWC3_DGCMD_XMIT_FUNCTION, interface_id);
+	} else {
+		ret = dwc3_send_gadget_generic_command(dwc,
+			DWC3_DGCMD_XMIT_DEV, 0x1 | (interface_id << 4));
+	}
+
+	if (ret)
+		pr_err("Function wakeup HW command failed.\n");
+	else
+		pr_debug("Function wakeup HW command succeeded.\n");
 
 	return ret;
 }
@@ -1434,13 +1708,11 @@
 	return 0;
 }
 
+#define DWC3_SOFT_RESET_TIMEOUT		10  /* 10 msec */
 static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
 {
 	u32			reg;
-	u32			timeout = 500;
-
-	if (pm_runtime_suspended(dwc->dev))
-		return 0;
+	u32			timeout = 1500;
 
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 	if (is_on) {
@@ -1451,6 +1723,10 @@
 
 		if (dwc->revision >= DWC3_REVISION_194A)
 			reg &= ~DWC3_DCTL_KEEP_CONNECT;
+
+		dwc3_event_buffers_setup(dwc);
+		dwc3_gadget_restart(dwc);
+		reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 		reg |= DWC3_DCTL_RUN_STOP;
 
 		if (dwc->has_hibernation)
@@ -1458,6 +1734,10 @@
 
 		dwc->pullups_connected = true;
 	} else {
+		dwc3_gadget_disable_irq(dwc);
+		__dwc3_gadget_ep_disable(dwc->eps[0]);
+		__dwc3_gadget_ep_disable(dwc->eps[1]);
+
 		reg &= ~DWC3_DCTL_RUN_STOP;
 
 		if (dwc->has_hibernation && !suspend)
@@ -1473,8 +1753,11 @@
 		reg &= DWC3_DSTS_DEVCTRLHLT;
 	} while (--timeout && !(!is_on ^ !reg));
 
-	if (!timeout)
+	if (!timeout) {
+		dev_err(dwc->dev, "failed to %s controller\n",
+				is_on ? "start" : "stop");
 		return -ETIMEDOUT;
+	}
 
 	dwc3_trace(trace_dwc3_gadget, "gadget %s data soft-%s",
 			dwc->gadget_driver
@@ -1484,6 +1767,16 @@
 	return 0;
 }
 
+static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+
+	dwc->vbus_draw = mA;
+	dev_dbg(dwc->dev, "Notify controller from %s. mA = %u\n", __func__, mA);
+	dwc3_notify_event(dwc, DWC3_CONTROLLER_SET_CURRENT_DRAW_EVENT);
+	return 0;
+}
+
 static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
 {
 	struct dwc3		*dwc = gadget_to_dwc(g);
@@ -1491,15 +1784,34 @@
 	int			ret;
 
 	is_on = !!is_on;
+	dwc->softconnect = is_on;
+	if ((dwc->is_drd && !dwc->vbus_active) || !dwc->gadget_driver) {
+		/*
+		 * Need to wait for vbus_session(on) from otg driver or to
+		 * the udc_start.
+		 */
+		return 0;
+	}
 
+	pm_runtime_get_sync(dwc->dev);
 	spin_lock_irqsave(&dwc->lock, flags);
+	/*
+	 * If we are here after bus suspend notify otg state machine to
+	 * increment pm usage count of dwc to prevent pm_runtime_suspend
+	 * during enumeration.
+	 */
+	dwc->b_suspend = false;
+	dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
 	ret = dwc3_gadget_run_stop(dwc, is_on, false);
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
+	pm_runtime_mark_last_busy(dwc->dev);
+	pm_runtime_put_autosuspend(dwc->dev);
+
 	return ret;
 }
 
-static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
+void dwc3_gadget_enable_irq(struct dwc3 *dwc)
 {
 	u32			reg;
 
@@ -1509,22 +1821,30 @@
 			DWC3_DEVTEN_CMDCMPLTEN |
 			DWC3_DEVTEN_ERRTICERREN |
 			DWC3_DEVTEN_WKUPEVTEN |
-			DWC3_DEVTEN_ULSTCNGEN |
 			DWC3_DEVTEN_CONNECTDONEEN |
 			DWC3_DEVTEN_USBRSTEN |
 			DWC3_DEVTEN_DISCONNEVTEN);
 
+	/*
+	 * Enable SUSPENDEVENT(BIT:6) for version 230A and above
+	 * else enable USB Link change event (BIT:3) for older version
+	 */
+	if (dwc->revision < DWC3_REVISION_230A)
+		reg |= DWC3_DEVTEN_ULSTCNGEN;
+	else
+		reg |= DWC3_DEVTEN_SUSPEND;
+
 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
 }
 
-static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
+void dwc3_gadget_disable_irq(struct dwc3 *dwc)
 {
 	/* mask all interrupts */
 	dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
 }
 
-static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
 static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
+static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc);
 
 /**
  * dwc3_gadget_setup_nump - Calculate and initialize NUMP field of DCFG
@@ -1567,6 +1887,50 @@
 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 }
 
+static int dwc3_gadget_vbus_session(struct usb_gadget *_gadget, int is_active)
+{
+	struct dwc3 *dwc = gadget_to_dwc(_gadget);
+	unsigned long flags;
+
+	if (!dwc->is_drd)
+		return -EPERM;
+
+	is_active = !!is_active;
+
+	spin_lock_irqsave(&dwc->lock, flags);
+
+	/* Mark that the vbus was powered */
+	dwc->vbus_active = is_active;
+
+	/*
+	 * Check if upper level usb_gadget_driver was already registered with
+	 * this udc controller driver (if dwc3_gadget_start was called)
+	 */
+	if (dwc->gadget_driver && dwc->softconnect) {
+		if (dwc->vbus_active) {
+			/*
+			 * Both vbus was activated by otg and pullup was
+			 * signaled by the gadget driver.
+			 */
+			dwc3_gadget_run_stop(dwc, 1, false);
+		} else {
+			dwc3_gadget_run_stop(dwc, 0, false);
+		}
+	}
+
+	/*
+	 * Clearing run/stop bit might occur before disconnect event is seen.
+	 * Make sure to let gadget driver know in that case.
+	 */
+	if (!dwc->vbus_active) {
+		dev_dbg(dwc->dev, "calling disconnect from %s\n", __func__);
+		dwc3_gadget_disconnect_interrupt(dwc);
+	}
+
+	spin_unlock_irqrestore(&dwc->lock, flags);
+	return 0;
+}
+
 static int __dwc3_gadget_start(struct dwc3 *dwc)
 {
 	struct dwc3_ep		*dep;
@@ -1607,7 +1971,7 @@
 			break;
 		default:
 			dev_err(dwc->dev, "invalid dwc->maximum_speed (%d)\n",
-				dwc->maximum_speed);
+					dwc->maximum_speed);
 			/* fall through */
 		case USB_SPEED_SUPER:
 			reg |= DWC3_DCFG_SUPERSPEED;
@@ -1616,36 +1980,42 @@
 	}
 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
 
-	/*
-	 * We are telling dwc3 that we want to use DCFG.NUMP as ACK TP's NUMP
-	 * field instead of letting dwc3 itself calculate that automatically.
-	 *
-	 * This way, we maximize the chances that we'll be able to get several
-	 * bursts of data without going through any sort of endpoint throttling.
+	/* Programs the number of outstanding pipelined transfer requests
+	 * the AXI master pushes to the AXI slave.
 	 */
-	reg = dwc3_readl(dwc->regs, DWC3_GRXTHRCFG);
-	reg &= ~DWC3_GRXTHRCFG_PKTCNTSEL;
-	dwc3_writel(dwc->regs, DWC3_GRXTHRCFG, reg);
+	if (dwc->revision >= DWC3_REVISION_270A) {
+		reg = dwc3_readl(dwc->regs, DWC3_GSBUSCFG1);
+		reg &= ~DWC3_GSBUSCFG1_PIPETRANSLIMIT_MASK;
+		reg |= DWC3_GSBUSCFG1_PIPETRANSLIMIT(0xe);
+		dwc3_writel(dwc->regs, DWC3_GSBUSCFG1, reg);
+	}
 
 	dwc3_gadget_setup_nump(dwc);
 
 	/* Start with SuperSpeed Default */
 	dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
 
+	dwc->delayed_status = false;
+	/* reinitialize physical ep0-1 */
 	dep = dwc->eps[0];
+	dep->flags = 0;
+	dep->endpoint.maxburst = 1;
 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
 			false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
-		goto err0;
+		return ret;
 	}
 
 	dep = dwc->eps[1];
+	dep->flags = 0;
+	dep->endpoint.maxburst = 1;
 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
 			false);
 	if (ret) {
 		dev_err(dwc->dev, "failed to enable %s\n", dep->name);
-		goto err1;
+		__dwc3_gadget_ep_disable(dwc->eps[0]);
+		return ret;
 	}
 
 	/* begin to receive SETUP packets */
@@ -1654,63 +2024,50 @@
 
 	dwc3_gadget_enable_irq(dwc);
 
-	return 0;
-
-err1:
-	__dwc3_gadget_ep_disable(dwc->eps[0]);
-
-err0:
 	return ret;
 }
 
+/* Required gadget re-initialization before switching to gadget in OTG mode */
+void dwc3_gadget_restart(struct dwc3 *dwc)
+{
+	__dwc3_gadget_start(dwc);
+}
+
 static int dwc3_gadget_start(struct usb_gadget *g,
 		struct usb_gadget_driver *driver)
 {
 	struct dwc3		*dwc = gadget_to_dwc(g);
 	unsigned long		flags;
 	int			ret = 0;
-	int			irq;
-
-	irq = dwc->irq_gadget;
-	ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
-			IRQF_SHARED, "dwc3", dwc->ev_buf);
-	if (ret) {
-		dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
-				irq, ret);
-		goto err0;
-	}
 
 	spin_lock_irqsave(&dwc->lock, flags);
+
 	if (dwc->gadget_driver) {
 		dev_err(dwc->dev, "%s is already bound to %s\n",
 				dwc->gadget.name,
 				dwc->gadget_driver->driver.name);
 		ret = -EBUSY;
-		goto err1;
+		goto err0;
 	}
 
 	dwc->gadget_driver	= driver;
 
-	if (pm_runtime_active(dwc->dev))
-		__dwc3_gadget_start(dwc);
-
+	/*
+	 * For DRD, this might get called by gadget driver during bootup
+	 * even though host mode might be active. Don't actually perform
+	 * device-specific initialization until device mode is activated.
+	 * In that case dwc3_gadget_restart() will handle it.
+	 */
 	spin_unlock_irqrestore(&dwc->lock, flags);
-
 	return 0;
 
-err1:
-	spin_unlock_irqrestore(&dwc->lock, flags);
-	free_irq(irq, dwc);
-
 err0:
+	spin_unlock_irqrestore(&dwc->lock, flags);
 	return ret;
 }
 
 static void __dwc3_gadget_stop(struct dwc3 *dwc)
 {
-	if (pm_runtime_suspended(dwc->dev))
-		return;
-
 	dwc3_gadget_disable_irq(dwc);
 	__dwc3_gadget_ep_disable(dwc->eps[0]);
 	__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -1718,26 +2075,41 @@
 
 static int dwc3_gadget_stop(struct usb_gadget *g)
 {
-	struct dwc3		*dwc = gadget_to_dwc(g);
-	unsigned long		flags;
+	struct dwc3	*dwc = gadget_to_dwc(g);
+	unsigned long	flags;
+
+	pm_runtime_get_sync(dwc->dev);
+	tasklet_kill(&dwc->bh);
 
 	spin_lock_irqsave(&dwc->lock, flags);
 	__dwc3_gadget_stop(dwc);
-	dwc->gadget_driver	= NULL;
+	dwc->gadget_driver = NULL;
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
-	free_irq(dwc->irq_gadget, dwc->ev_buf);
+	pm_runtime_mark_last_busy(dwc->dev);
+	pm_runtime_put_autosuspend(dwc->dev);
 
 	return 0;
 }
 
+static int dwc3_gadget_restart_usb_session(struct usb_gadget *g)
+{
+	struct dwc3		*dwc = gadget_to_dwc(g);
+
+	return dwc3_notify_event(dwc, DWC3_CONTROLLER_RESTART_USB_SESSION);
+}
+
 static const struct usb_gadget_ops dwc3_gadget_ops = {
 	.get_frame		= dwc3_gadget_get_frame,
 	.wakeup			= dwc3_gadget_wakeup,
+	.func_wakeup		= dwc_gadget_func_wakeup,
 	.set_selfpowered	= dwc3_gadget_set_selfpowered,
+	.vbus_session		= dwc3_gadget_vbus_session,
+	.vbus_draw		= dwc3_gadget_vbus_draw,
 	.pullup			= dwc3_gadget_pullup,
 	.udc_start		= dwc3_gadget_start,
 	.udc_stop		= dwc3_gadget_stop,
+	.restart		= dwc3_gadget_restart_usb_session,
 };
 
 /* -------------------------------------------------------------------------- */
@@ -2215,7 +2587,7 @@
 	}
 }
 
-static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
+void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
 {
 	struct dwc3_ep *dep;
 	struct dwc3_gadget_ep_cmd_params params;
@@ -2316,6 +2688,10 @@
 {
 	int			reg;
 
+	dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
+	dwc->b_suspend = false;
+	dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
+
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
 	reg &= ~DWC3_DCTL_INITU1ENA;
 	dwc3_writel(dwc->regs, DWC3_DCTL, reg);
@@ -2327,9 +2703,11 @@
 
 	dwc->gadget.speed = USB_SPEED_UNKNOWN;
 	dwc->setup_packet_pending = false;
+	dwc->link_state = DWC3_LINK_STATE_SS_DIS;
 	usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
 
 	dwc->connected = false;
+	wake_up_interruptible(&dwc->wait_linkstate);
 }
 
 static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
@@ -2369,6 +2747,13 @@
 			dwc3_gadget_disconnect_interrupt(dwc);
 	}
 
+	dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
+	dwc->b_suspend = false;
+	dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
+
+	dwc3_usb3_phy_suspend(dwc, false);
+	usb_gadget_vbus_draw(&dwc->gadget, 0);
+
 	dwc3_reset_gadget(dwc);
 
 	reg = dwc3_readl(dwc->regs, DWC3_DCTL);
@@ -2383,6 +2768,10 @@
 	reg = dwc3_readl(dwc->regs, DWC3_DCFG);
 	reg &= ~(DWC3_DCFG_DEVADDR_MASK);
 	dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+
+	dwc->gadget.speed = USB_SPEED_UNKNOWN;
+	dwc->link_state = DWC3_LINK_STATE_U0;
+	wake_up_interruptible(&dwc->wait_linkstate);
 }
 
 static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
@@ -2503,6 +2892,13 @@
 		dwc3_writel(dwc->regs, DWC3_DCTL, reg);
 	}
 
+
+	/*
+	 * In HS mode this allows SS phy suspend. In SS mode this allows ss phy
+	 * suspend in P3 state and generates IN_P3 power event irq.
+	 */
+	dwc3_usb3_phy_suspend(dwc, true);
+
 	dep = dwc->eps[0];
 	ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
 			false);
@@ -2519,6 +2915,7 @@
 		return;
 	}
 
+	dwc3_notify_event(dwc, DWC3_CONTROLLER_CONNDONE_EVENT);
 	/*
 	 * Configure PHY via GUSB3PIPECTLn if required.
 	 *
@@ -2528,18 +2925,43 @@
 	 */
 }
 
-static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
+static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc, bool remote_wakeup)
 {
-	/*
-	 * TODO take core out of low power mode when that's
-	 * implemented.
-	 */
+	bool perform_resume = true;
 
-	if (dwc->gadget_driver && dwc->gadget_driver->resume) {
-		spin_unlock(&dwc->lock);
-		dwc->gadget_driver->resume(&dwc->gadget);
-		spin_lock(&dwc->lock);
+	dev_dbg(dwc->dev, "%s\n", __func__);
+
+	/*
+	 * Identify if it is called from wakeup_interrupt() context for bus
+	 * resume or as part of remote wakeup. And based on that check for
+	 * U3 state. as we need to handle case of L1 resume i.e. where we
+	 * don't want to perform resume.
+	 */
+	if (!remote_wakeup && dwc->link_state != DWC3_LINK_STATE_U3)
+		perform_resume = false;
+
+	/* Only perform resume from L2 or Early Suspend states */
+	if (perform_resume) {
+
+		/*
+		 * In case of remote wake up dwc3_gadget_wakeup_work()
+		 * is doing pm_runtime_get_sync().
+		 */
+		dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
+		dwc->b_suspend = false;
+		dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
+
+		/*
+		 * set state to U0 as function level resume is trying to queue
+		 * notification over USB interrupt endpoint which would fail
+		 * due to state is not being updated.
+		 */
+		dwc->link_state = DWC3_LINK_STATE_U0;
+		dwc3_resume_gadget(dwc);
+		return;
 	}
+
+	dwc->link_state = DWC3_LINK_STATE_U0;
 }
 
 static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
@@ -2639,18 +3061,42 @@
 		break;
 	}
 
+	dev_dbg(dwc->dev, "Going from (%d)--->(%d)\n", dwc->link_state, next);
 	dwc->link_state = next;
+	wake_up_interruptible(&dwc->wait_linkstate);
 }
 
 static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc,
-					  unsigned int evtinfo)
+			unsigned int evtinfo)
 {
-	enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+	enum dwc3_link_state    next = evtinfo & DWC3_LINK_STATE_MASK;
 
-	if (dwc->link_state != next && next == DWC3_LINK_STATE_U3)
+	dev_dbg(dwc->dev, "%s Entry to %d\n", __func__, next);
+
+	if (dwc->link_state != next && next == DWC3_LINK_STATE_U3) {
+		/*
+		 * When first connecting the cable, even before the initial
+		 * DWC3_DEVICE_EVENT_RESET or DWC3_DEVICE_EVENT_CONNECT_DONE
+		 * events, the controller sees a DWC3_DEVICE_EVENT_SUSPEND
+		 * event. In such a case, ignore.
+		 * Ignore suspend event until device side usb is not into
+		 * CONFIGURED state.
+		 */
+		if (dwc->gadget.state != USB_STATE_CONFIGURED) {
+			pr_err("%s(): state:%d. Ignore SUSPEND.\n",
+						__func__, dwc->gadget.state);
+			return;
+		}
+
 		dwc3_suspend_gadget(dwc);
 
+		dev_dbg(dwc->dev, "Notify OTG from %s\n", __func__);
+		dwc->b_suspend = true;
+		dwc3_notify_event(dwc, DWC3_CONTROLLER_NOTIFY_OTG_EVENT);
+	}
+
 	dwc->link_state = next;
+	dwc3_trace(trace_dwc3_gadget, "link state %d", dwc->link_state);
 }
 
 static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
@@ -2691,7 +3137,7 @@
 		dwc3_gadget_conndone_interrupt(dwc);
 		break;
 	case DWC3_DEVICE_EVENT_WAKEUP:
-		dwc3_gadget_wakeup_interrupt(dwc);
+		dwc3_gadget_wakeup_interrupt(dwc, false);
 		break;
 	case DWC3_DEVICE_EVENT_HIBER_REQ:
 		if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
@@ -2703,8 +3149,7 @@
 	case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
 		dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
 		break;
-	case DWC3_DEVICE_EVENT_EOPF:
-		/* It changed to be suspend event for version 2.30a and above */
+	case DWC3_DEVICE_EVENT_SUSPEND:
 		if (dwc->revision < DWC3_REVISION_230A) {
 			dwc3_trace(trace_dwc3_gadget, "End of Periodic Frame");
 		} else {
@@ -2734,12 +3179,25 @@
 	default:
 		dev_WARN(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
 	}
+
+	dwc->err_evt_seen = (event->type == DWC3_DEVICE_EVENT_ERRATIC_ERROR);
 }
 
 static void dwc3_process_event_entry(struct dwc3 *dwc,
 		const union dwc3_event *event)
 {
 	trace_dwc3_event(event->raw);
+	/* skip event processing in absence of vbus */
+	if (!dwc->vbus_active) {
+		dev_err(dwc->dev, "SKIP EVT:%x", event->raw);
+		return;
+	}
+
+	/* If run/stop is cleared don't process any more events */
+	if (!dwc->pullups_connected) {
+		dev_err(dwc->dev, "SKIP_EVT_PULLUP:%x", event->raw);
+		return;
+	}
 
 	/* Endpoint IRQ, handle it and return early */
 	if (event->type.is_devspec == 0) {
@@ -2757,13 +3215,14 @@
 	}
 }
 
-static irqreturn_t dwc3_process_event_buf(struct dwc3_event_buffer *evt)
+static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc)
 {
-	struct dwc3 *dwc = evt->dwc;
+	struct dwc3_event_buffer *evt;
 	irqreturn_t ret = IRQ_NONE;
 	int left;
 	u32 reg;
 
+	evt = dwc->ev_buf;
 	left = evt->count;
 
 	if (!(evt->flags & DWC3_EVENT_PENDING))
@@ -2776,6 +3235,19 @@
 
 		dwc3_process_event_entry(dwc, &event);
 
+		if (dwc->err_evt_seen) {
+			/*
+			 * if erratic error, skip remaining events
+			 * while controller undergoes reset
+			 */
+			evt->lpos = (evt->lpos + left) %
+					DWC3_EVENT_BUFFERS_SIZE;
+			dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), left);
+			if (dwc3_notify_event(dwc, DWC3_CONTROLLER_ERROR_EVENT))
+				dwc->err_evt_seen = 0;
+			break;
+		}
+
 		/*
 		 * FIXME we wrap around correctly to the next entry as
 		 * almost all entries are 4 bytes in size. There is one
@@ -2791,6 +3263,7 @@
 		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), 4);
 	}
 
+	dwc->bh_handled_evt_cnt[dwc->bh_dbg_index] += (evt->count / 4);
 	evt->count = 0;
 	evt->flags &= ~DWC3_EVENT_PENDING;
 	ret = IRQ_HANDLED;
@@ -2803,38 +3276,60 @@
 	return ret;
 }
 
-static irqreturn_t dwc3_thread_interrupt(int irq, void *_evt)
+static void dwc3_interrupt_bh(unsigned long param)
 {
-	struct dwc3_event_buffer *evt = _evt;
-	struct dwc3 *dwc = evt->dwc;
+	struct dwc3 *dwc = (struct dwc3 *) param;
+
+	pm_runtime_get(dwc->dev);
+	dwc3_thread_interrupt(dwc->irq, dwc);
+	enable_irq(dwc->irq);
+}
+
+static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
+{
+	struct dwc3 *dwc = _dwc;
 	unsigned long flags;
 	irqreturn_t ret = IRQ_NONE;
+	unsigned int temp_time;
+	ktime_t start_time;
+
+	start_time = ktime_get();
 
 	spin_lock_irqsave(&dwc->lock, flags);
-	ret = dwc3_process_event_buf(evt);
+	dwc->bh_handled_evt_cnt[dwc->bh_dbg_index] = 0;
+
+	ret = dwc3_process_event_buf(dwc);
+
 	spin_unlock_irqrestore(&dwc->lock, flags);
 
+	temp_time = ktime_to_us(ktime_sub(ktime_get(), start_time));
+	dwc->bh_completion_time[dwc->bh_dbg_index] = temp_time;
+	dwc->bh_dbg_index = (dwc->bh_dbg_index + 1) % 10;
+
+	pm_runtime_put(dwc->dev);
 	return ret;
 }
 
-static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt)
+static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc)
 {
-	struct dwc3 *dwc = evt->dwc;
+	struct dwc3_event_buffer *evt;
 	u32 count;
 	u32 reg;
 
-	if (pm_runtime_suspended(dwc->dev)) {
-		pm_runtime_get(dwc->dev);
-		disable_irq_nosync(dwc->irq_gadget);
-		dwc->pending_events = true;
-		return IRQ_HANDLED;
-	}
+	evt = dwc->ev_buf;
 
 	count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0));
 	count &= DWC3_GEVNTCOUNT_MASK;
 	if (!count)
 		return IRQ_NONE;
 
+	if (count > evt->length) {
+		dev_err(dwc->dev, "HUGE_EVCNT(%d)", count);
+		evt->lpos = (evt->lpos + count) % DWC3_EVENT_BUFFERS_SIZE;
+		dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count);
+		return IRQ_HANDLED;
+	}
+
 	evt->count = count;
 	evt->flags |= DWC3_EVENT_PENDING;
 
@@ -2846,11 +3341,37 @@
 	return IRQ_WAKE_THREAD;
 }
 
-static irqreturn_t dwc3_interrupt(int irq, void *_evt)
+irqreturn_t dwc3_interrupt(int irq, void *_dwc)
 {
-	struct dwc3_event_buffer	*evt = _evt;
+	struct dwc3			*dwc = _dwc;
+	irqreturn_t			ret = IRQ_NONE;
+	irqreturn_t			status;
+	unsigned int			temp_cnt = 0;
+	ktime_t				start_time;
 
-	return dwc3_check_event_buf(evt);
+	start_time = ktime_get();
+	dwc->irq_cnt++;
+
+	/* controller reset is still pending */
+	if (dwc->err_evt_seen)
+		return IRQ_HANDLED;
+
+	status = dwc3_check_event_buf(dwc);
+	if (status == IRQ_WAKE_THREAD)
+		ret = status;
+
+	dwc->irq_start_time[dwc->irq_dbg_index] = start_time;
+	dwc->irq_completion_time[dwc->irq_dbg_index] =
+		ktime_us_delta(ktime_get(), start_time);
+	dwc->irq_event_count[dwc->irq_dbg_index] = temp_cnt / 4;
+	dwc->irq_dbg_index = (dwc->irq_dbg_index + 1) % MAX_INTR_STATS;
+
+	if (ret == IRQ_WAKE_THREAD) {
+		disable_irq_nosync(irq);
+		tasklet_schedule(&dwc->bh);
+	}
+
+	return IRQ_HANDLED;
 }
 
 /**
@@ -2889,6 +3410,8 @@
 
 	dwc->irq_gadget = irq;
 
+	INIT_WORK(&dwc->wakeup_work, dwc3_gadget_wakeup_work);
+
 	dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
 			&dwc->ctrl_req_addr, GFP_KERNEL);
 	if (!dwc->ctrl_req) {
@@ -2926,6 +3449,9 @@
 		goto err4;
 	}
 
+	dwc->bh.func = dwc3_interrupt_bh;
+	dwc->bh.data = (unsigned long)dwc;
+
 	dwc->gadget.ops			= &dwc3_gadget_ops;
 	dwc->gadget.speed		= USB_SPEED_UNKNOWN;
 	dwc->gadget.sg_supported	= true;
@@ -2976,6 +3502,13 @@
 		goto err5;
 	}
 
+	if (!dwc->is_drd) {
+		pm_runtime_no_callbacks(&dwc->gadget.dev);
+		pm_runtime_set_active(&dwc->gadget.dev);
+		pm_runtime_enable(&dwc->gadget.dev);
+		pm_runtime_get(&dwc->gadget.dev);
+	}
+
 	return 0;
 
 err5:
@@ -3005,6 +3538,11 @@
 
 void dwc3_gadget_exit(struct dwc3 *dwc)
 {
+	if (dwc->is_drd) {
+		pm_runtime_put(&dwc->gadget.dev);
+		pm_runtime_disable(&dwc->gadget.dev);
+	}
+
 	usb_del_gadget_udc(&dwc->gadget);
 
 	dwc3_gadget_free_endpoints(dwc);
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index e4a1d97..0fcb8ed 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -87,6 +87,16 @@
 int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
 		gfp_t gfp_flags);
 int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
+void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
+irqreturn_t dwc3_interrupt(int irq, void *_dwc);
+
+static inline dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
+		struct dwc3_trb *trb)
+{
+	u32 offset = (char *) trb - (char *) dep->trb_pool;
+
+	return dep->trb_pool_dma + offset;
+}
 
 /**
  * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index f6533c6..800bcae 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -72,6 +72,7 @@
 		return -ENOMEM;
 	}
 
+	arch_setup_dma_ops(&xhci->dev, 0, 0, NULL, 0);
 	dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
 
 	xhci->dev.parent	= dwc->dev;
@@ -103,18 +104,9 @@
 	phy_create_lookup(dwc->usb3_generic_phy, "usb3-phy",
 			  dev_name(&xhci->dev));
 
-	ret = platform_device_add(xhci);
-	if (ret) {
-		dev_err(dwc->dev, "failed to register xHCI device\n");
-		goto err2;
-	}
+	/* Platform device gets added as part of state machine */
 
 	return 0;
-err2:
-	phy_remove_lookup(dwc->usb2_generic_phy, "usb2-phy",
-			  dev_name(&xhci->dev));
-	phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
-			  dev_name(&xhci->dev));
 err1:
 	platform_device_put(xhci);
 	return ret;
@@ -126,5 +118,6 @@
 			  dev_name(&dwc->xhci->dev));
 	phy_remove_lookup(dwc->usb3_generic_phy, "usb3-phy",
 			  dev_name(&dwc->xhci->dev));
-	platform_device_unregister(dwc->xhci);
+	if (!dwc->is_drd)
+		platform_device_unregister(dwc->xhci);
 }
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 8ad2032..87f2b73 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -209,6 +209,21 @@
 config USB_F_TCM
 	tristate
 
+config USB_F_MTP
+	tristate
+
+config USB_F_PTP
+        tristate
+
+config USB_F_AUDIO_SRC
+	tristate
+
+config USB_F_ACC
+	tristate
+
+config USB_F_DIAG
+	tristate
+
 # this first set of drivers all depend on bulk-capable hardware.
 
 config USB_CONFIGFS
@@ -362,6 +377,44 @@
 	  implemented in kernel space (for instance Ethernet, serial or
 	  mass storage) and other are implemented in user space.
 
+config USB_CONFIGFS_F_MTP
+	boolean "MTP gadget"
+	depends on USB_CONFIGFS
+	select USB_F_MTP
+	help
+	  USB gadget MTP support
+
+config USB_CONFIGFS_F_PTP
+	boolean "PTP gadget"
+	depends on USB_CONFIGFS && USB_CONFIGFS_F_MTP
+	select USB_F_PTP
+	help
+	  USB gadget PTP support
+
+config USB_CONFIGFS_F_ACC
+	boolean "Accessory gadget"
+	depends on USB_CONFIGFS
+	select USB_F_ACC
+	help
+	  USB gadget Accessory support
+
+config USB_CONFIGFS_F_AUDIO_SRC
+	boolean "Audio Source gadget"
+	depends on USB_CONFIGFS && USB_CONFIGFS_F_ACC
+	depends on SND
+	select SND_PCM
+	select USB_F_AUDIO_SRC
+	help
+	  USB gadget Audio Source support
+
+config USB_CONFIGFS_UEVENT
+	boolean "Uevent notification of Gadget state"
+	depends on USB_CONFIGFS
+	help
+	  Enable uevent notifications to userspace when the gadget
+	  state changes. The gadget can be in any of the following
+	  three states: "CONNECTED/DISCONNECTED/CONFIGURED"
+
 config USB_CONFIGFS_F_UAC1
 	bool "Audio Class 1.0"
 	depends on USB_CONFIGFS
@@ -457,6 +510,14 @@
 	  Both protocols can work on USB2.0 and USB3.0.
 	  UAS utilizes the USB 3.0 feature called streams support.
 
+config USB_CONFIGFS_F_DIAG
+	bool "USB Diag function"
+	select USB_F_DIAG
+	depends on USB_CONFIGFS
+	help
+	  Diag function driver enables support for Qualcomm diagnostics
+	  port over USB.
+
 choice
 	tristate "USB Gadget Drivers"
 	default USB_ETH
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 32176f7..1d369eb 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1983,6 +1983,12 @@
 	struct usb_composite_dev	*cdev = get_gadget_data(gadget);
 	unsigned long			flags;
 
+	if (cdev == NULL) {
+		WARN(1, "%s: Calling disconnect on a Gadget that is \
+			 not connected\n", __func__);
+		return;
+	}
+
 	/* REVISIT:  should we have config and device level
 	 * disconnect callbacks?
 	 */
@@ -2160,14 +2166,18 @@
 			usb_ep_dequeue(cdev->gadget->ep0, cdev->os_desc_req);
 
 		kfree(cdev->os_desc_req->buf);
+		cdev->os_desc_req->buf = NULL;
 		usb_ep_free_request(cdev->gadget->ep0, cdev->os_desc_req);
+		cdev->os_desc_req = NULL;
 	}
 	if (cdev->req) {
 		if (cdev->setup_pending)
 			usb_ep_dequeue(cdev->gadget->ep0, cdev->req);
 
 		kfree(cdev->req->buf);
+		cdev->req->buf = NULL;
 		usb_ep_free_request(cdev->gadget->ep0, cdev->req);
+		cdev->req = NULL;
 	}
 	cdev->next_string_id = 0;
 	device_remove_file(&cdev->gadget->dev, &dev_attr_suspended);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 3984787..44746a4 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -9,6 +9,31 @@
 #include "u_f.h"
 #include "u_os_desc.h"
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+#include <linux/platform_device.h>
+#include <linux/kdev_t.h>
+#include <linux/usb/ch9.h>
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+extern int acc_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl);
+void acc_disconnect(void);
+#endif
+static struct class *android_class;
+static struct device *android_device;
+static int index;
+
+struct device *create_function_device(char *name)
+{
+	if (android_device && !IS_ERR(android_device))
+		return device_create(android_class, android_device,
+			MKDEV(0, index++), NULL, name);
+	else
+		return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(create_function_device);
+#endif
+
 int check_user_usb_string(const char *name,
 		struct usb_gadget_strings *stringtab_dev)
 {
@@ -60,6 +85,12 @@
 	bool use_os_desc;
 	char b_vendor_code;
 	char qw_sign[OS_STRING_QW_SIGN_LEN];
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	bool connected;
+	bool sw_connected;
+	struct work_struct work;
+	struct device *dev;
+#endif
 };
 
 static inline struct gadget_info *to_gadget_info(struct config_item *item)
@@ -265,7 +296,7 @@
 
 	mutex_lock(&gi->lock);
 
-	if (!strlen(name)) {
+	if (!strlen(name) || strcmp(name, "none") == 0) {
 		ret = unregister_gadget(gi);
 		if (ret)
 			goto err;
@@ -395,6 +426,11 @@
 	}
 
 	f = usb_get_function(fi);
+	if (f == NULL) {
+		/* Are we trying to symlink PTP without MTP function? */
+		ret = -EINVAL; /* Invalid Configuration */
+		goto out;
+	}
 	if (IS_ERR(f)) {
 		ret = PTR_ERR(f);
 		goto out;
@@ -1367,6 +1403,60 @@
 	return ret;
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static void android_work(struct work_struct *data)
+{
+	struct gadget_info *gi = container_of(data, struct gadget_info, work);
+	struct usb_composite_dev *cdev = &gi->cdev;
+	char *disconnected[2] = { "USB_STATE=DISCONNECTED", NULL };
+	char *connected[2]    = { "USB_STATE=CONNECTED", NULL };
+	char *configured[2]   = { "USB_STATE=CONFIGURED", NULL };
+	/* 0-connected 1-configured 2-disconnected*/
+	bool status[3] = { false, false, false };
+	unsigned long flags;
+	bool uevent_sent = false;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		status[1] = true;
+
+	if (gi->connected != gi->sw_connected) {
+		if (gi->connected)
+			status[0] = true;
+		else
+			status[2] = true;
+		gi->sw_connected = gi->connected;
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	if (status[0]) {
+		kobject_uevent_env(&android_device->kobj,
+					KOBJ_CHANGE, connected);
+		pr_info("%s: sent uevent %s\n", __func__, connected[0]);
+		uevent_sent = true;
+	}
+
+	if (status[1]) {
+		kobject_uevent_env(&android_device->kobj,
+					KOBJ_CHANGE, configured);
+		pr_info("%s: sent uevent %s\n", __func__, configured[0]);
+		uevent_sent = true;
+	}
+
+	if (status[2]) {
+		kobject_uevent_env(&android_device->kobj,
+					KOBJ_CHANGE, disconnected);
+		pr_info("%s: sent uevent %s\n", __func__, disconnected[0]);
+		uevent_sent = true;
+	}
+
+	if (!uevent_sent) {
+		pr_info("%s: did not send uevent (%d %d %p)\n", __func__,
+			gi->connected, gi->sw_connected, cdev->config);
+	}
+}
+#endif
+
 static void configfs_composite_unbind(struct usb_gadget *gadget)
 {
 	struct usb_composite_dev	*cdev;
@@ -1386,14 +1476,79 @@
 	set_gadget_data(gadget, NULL);
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static int android_setup(struct usb_gadget *gadget,
+			const struct usb_ctrlrequest *c)
+{
+	struct usb_composite_dev *cdev = get_gadget_data(gadget);
+	unsigned long flags;
+	struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+	int value = -EOPNOTSUPP;
+	struct usb_function_instance *fi;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (!gi->connected) {
+		gi->connected = 1;
+		schedule_work(&gi->work);
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+	list_for_each_entry(fi, &gi->available_func, cfs_list) {
+		if (fi != NULL && fi->f != NULL && fi->f->setup != NULL) {
+			value = fi->f->setup(fi->f, c);
+			if (value >= 0)
+				break;
+		}
+	}
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+	if (value < 0)
+		value = acc_ctrlrequest(cdev, c);
+#endif
+
+	if (value < 0)
+		value = composite_setup(gadget, c);
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (c->bRequest == USB_REQ_SET_CONFIGURATION &&
+						cdev->config) {
+		schedule_work(&gi->work);
+	}
+	spin_unlock_irqrestore(&cdev->lock, flags);
+
+	return value;
+}
+
+static void android_disconnect(struct usb_gadget *gadget)
+{
+	struct usb_composite_dev        *cdev = get_gadget_data(gadget);
+	struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+
+	/* accessory HID support can be active while the
+		accessory function is not actually enabled,
+		so we need to inform it when we are disconnected.
+	*/
+
+#ifdef CONFIG_USB_CONFIGFS_F_ACC
+	acc_disconnect();
+#endif
+	gi->connected = 0;
+	schedule_work(&gi->work);
+	composite_disconnect(gadget);
+}
+#endif
+
 static const struct usb_gadget_driver configfs_driver_template = {
 	.bind           = configfs_composite_bind,
 	.unbind         = configfs_composite_unbind,
-
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	.setup          = android_setup,
+	.reset          = android_disconnect,
+	.disconnect     = android_disconnect,
+#else
 	.setup          = composite_setup,
 	.reset          = composite_disconnect,
 	.disconnect     = composite_disconnect,
-
+#endif
 	.suspend	= composite_suspend,
 	.resume		= composite_resume,
 
@@ -1405,6 +1560,89 @@
 	.match_existing_only = 1,
 };
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+static ssize_t state_show(struct device *pdev, struct device_attribute *attr,
+			char *buf)
+{
+	struct gadget_info *dev = dev_get_drvdata(pdev);
+	struct usb_composite_dev *cdev;
+	char *state = "DISCONNECTED";
+	unsigned long flags;
+
+	if (!dev)
+		goto out;
+
+	cdev = &dev->cdev;
+
+	if (!cdev)
+		goto out;
+
+	spin_lock_irqsave(&cdev->lock, flags);
+	if (cdev->config)
+		state = "CONFIGURED";
+	else if (dev->connected)
+		state = "CONNECTED";
+	spin_unlock_irqrestore(&cdev->lock, flags);
+out:
+	return sprintf(buf, "%s\n", state);
+}
+
+static DEVICE_ATTR(state, S_IRUGO, state_show, NULL);
+
+static struct device_attribute *android_usb_attributes[] = {
+	&dev_attr_state,
+	NULL
+};
+
+static int android_device_create(struct gadget_info *gi)
+{
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+
+	INIT_WORK(&gi->work, android_work);
+	android_device = device_create(android_class, NULL,
+				MKDEV(0, 0), NULL, "android0");
+	if (IS_ERR(android_device))
+		return PTR_ERR(android_device);
+
+	dev_set_drvdata(android_device, gi);
+
+	attrs = android_usb_attributes;
+	while ((attr = *attrs++)) {
+		int err;
+
+		err = device_create_file(android_device, attr);
+		if (err) {
+			device_destroy(android_device->class,
+				       android_device->devt);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+static void android_device_destroy(void)
+{
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+
+	attrs = android_usb_attributes;
+	while ((attr = *attrs++))
+		device_remove_file(android_device, attr);
+	device_destroy(android_device->class, android_device->devt);
+}
+#else
+static inline int android_device_create(struct gadget_info *gi)
+{
+	return 0;
+}
+
+static inline void android_device_destroy(void)
+{
+}
+#endif
+
 static struct config_group *gadgets_make(
 		struct config_group *group,
 		const char *name)
@@ -1456,7 +1694,11 @@
 	if (!gi->composite.gadget_driver.function)
 		goto err;
 
+	if (android_device_create(gi) < 0)
+		goto err;
+
 	return &gi->group;
+
 err:
 	kfree(gi);
 	return ERR_PTR(-ENOMEM);
@@ -1465,6 +1707,7 @@
 static void gadgets_drop(struct config_group *group, struct config_item *item)
 {
 	config_item_put(item);
+	android_device_destroy();
 }
 
 static struct configfs_group_operations gadgets_ops = {
@@ -1504,6 +1747,13 @@
 	config_group_init(&gadget_subsys.su_group);
 
 	ret = configfs_register_subsystem(&gadget_subsys);
+
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	android_class = class_create(THIS_MODULE, "android_usb");
+	if (IS_ERR(android_class))
+		return PTR_ERR(android_class);
+#endif
+
 	return ret;
 }
 module_init(gadget_cfs_init);
@@ -1511,5 +1761,10 @@
 static void __exit gadget_cfs_exit(void)
 {
 	configfs_unregister_subsystem(&gadget_subsys);
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+	if (!IS_ERR(android_class))
+		class_destroy(android_class);
+#endif
+
 }
 module_exit(gadget_cfs_exit);
diff --git a/drivers/usb/gadget/function/Makefile b/drivers/usb/gadget/function/Makefile
index cb8c225..459953d 100644
--- a/drivers/usb/gadget/function/Makefile
+++ b/drivers/usb/gadget/function/Makefile
@@ -46,3 +46,13 @@
 obj-$(CONFIG_USB_F_PRINTER)	+= usb_f_printer.o
 usb_f_tcm-y			:= f_tcm.o
 obj-$(CONFIG_USB_F_TCM)		+= usb_f_tcm.o
+usb_f_mtp-y                     := f_mtp.o
+obj-$(CONFIG_USB_F_MTP)         += usb_f_mtp.o
+usb_f_ptp-y                     := f_ptp.o
+obj-$(CONFIG_USB_F_PTP)         += usb_f_ptp.o
+usb_f_audio_source-y            := f_audio_source.o
+obj-$(CONFIG_USB_F_AUDIO_SRC)   += usb_f_audio_source.o
+usb_f_accessory-y               := f_accessory.o
+obj-$(CONFIG_USB_F_ACC)         += usb_f_accessory.o
+usb_f_diag-y			:= f_diag.o
+obj-$(CONFIG_USB_F_DIAG)	+= usb_f_diag.o
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
new file mode 100644
index 0000000..2ca16a57
--- /dev/null
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -0,0 +1,1326 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/hid.h>
+#include <linux/hiddev.h>
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_accessory.h>
+
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#define MAX_INST_NAME_LEN        40
+#define BULK_BUFFER_SIZE    16384
+#define ACC_STRING_SIZE     256
+
+#define PROTOCOL_VERSION    2
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+
+struct acc_hid_dev {
+	struct list_head	list;
+	struct hid_device *hid;
+	struct acc_dev *dev;
+	/* accessory defined ID */
+	int id;
+	/* HID report descriptor */
+	u8 *report_desc;
+	/* length of HID report descriptor */
+	int report_desc_len;
+	/* number of bytes of report_desc we have received so far */
+	int report_desc_offset;
+};
+
+struct acc_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+
+	/* set to 1 when we connect */
+	int online:1;
+	/* Set to 1 when we disconnect.
+	 * Not cleared until our file is closed.
+	 */
+	int disconnected:1;
+
+	/* strings sent by the host */
+	char manufacturer[ACC_STRING_SIZE];
+	char model[ACC_STRING_SIZE];
+	char description[ACC_STRING_SIZE];
+	char version[ACC_STRING_SIZE];
+	char uri[ACC_STRING_SIZE];
+	char serial[ACC_STRING_SIZE];
+
+	/* for acc_complete_set_string */
+	int string_index;
+
+	/* set to 1 if we have a pending start request */
+	int start_requested;
+
+	int audio_mode;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+
+	struct list_head tx_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* delayed work for handling ACCESSORY_START */
+	struct delayed_work start_work;
+
+	/* worker for registering and unregistering hid devices */
+	struct work_struct hid_work;
+
+	/* list of active HID devices */
+	struct list_head	hid_list;
+
+	/* list of new HID devices to register */
+	struct list_head	new_hid_list;
+
+	/* list of dead HID devices to unregister */
+	struct list_head	dead_hid_list;
+};
+
+static struct usb_interface_descriptor acc_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 2,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor acc_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_descriptor_header *fs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_fullspeed_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_acc_descs[] = {
+	(struct usb_descriptor_header *) &acc_interface_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_in_desc,
+	(struct usb_descriptor_header *) &acc_highspeed_out_desc,
+	NULL,
+};
+
+static struct usb_string acc_string_defs[] = {
+	[INTERFACE_STRING_INDEX].s	= "Android Accessory Interface",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings acc_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= acc_string_defs,
+};
+
+static struct usb_gadget_strings *acc_strings[] = {
+	&acc_string_table,
+	NULL,
+};
+
+/* temporary variable used between acc_open() and acc_gadget_bind() */
+static struct acc_dev *_acc_dev;
+
+struct acc_instance {
+	struct usb_function_instance func_inst;
+	const char *name;
+};
+
+static inline struct acc_dev *func_to_dev(struct usb_function *f)
+{
+	return container_of(f, struct acc_dev, function);
+}
+
+static struct usb_request *acc_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void acc_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+/* add a request to the tail of a list */
+static void req_put(struct acc_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request *req_get(struct acc_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void acc_set_disconnected(struct acc_dev *dev)
+{
+	dev->online = 0;
+	dev->disconnected = 1;
+}
+
+static void acc_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	if (req->status == -ESHUTDOWN) {
+		pr_debug("acc_complete_in set disconnected");
+		acc_set_disconnected(dev);
+	}
+
+	req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void acc_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	dev->rx_done = 1;
+	if (req->status == -ESHUTDOWN) {
+		pr_debug("acc_complete_out set disconnected");
+		acc_set_disconnected(dev);
+	}
+
+	wake_up(&dev->read_wq);
+}
+
+static void acc_complete_set_string(struct usb_ep *ep, struct usb_request *req)
+{
+	struct acc_dev	*dev = ep->driver_data;
+	char *string_dest = NULL;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_string, err %d\n", req->status);
+		return;
+	}
+
+	switch (dev->string_index) {
+	case ACCESSORY_STRING_MANUFACTURER:
+		string_dest = dev->manufacturer;
+		break;
+	case ACCESSORY_STRING_MODEL:
+		string_dest = dev->model;
+		break;
+	case ACCESSORY_STRING_DESCRIPTION:
+		string_dest = dev->description;
+		break;
+	case ACCESSORY_STRING_VERSION:
+		string_dest = dev->version;
+		break;
+	case ACCESSORY_STRING_URI:
+		string_dest = dev->uri;
+		break;
+	case ACCESSORY_STRING_SERIAL:
+		string_dest = dev->serial;
+		break;
+	}
+	if (string_dest) {
+		unsigned long flags;
+
+		if (length >= ACC_STRING_SIZE)
+			length = ACC_STRING_SIZE - 1;
+
+		spin_lock_irqsave(&dev->lock, flags);
+		memcpy(string_dest, req->buf, length);
+		/* ensure zero termination */
+		string_dest[length] = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	} else {
+		pr_err("unknown accessory string index %d\n",
+			dev->string_index);
+	}
+}
+
+static void acc_complete_set_hid_report_desc(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	struct acc_dev *dev = hid->dev;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_set_hid_report_desc, err %d\n",
+			req->status);
+		return;
+	}
+
+	memcpy(hid->report_desc + hid->report_desc_offset, req->buf, length);
+	hid->report_desc_offset += length;
+	if (hid->report_desc_offset == hid->report_desc_len) {
+		/* After we have received the entire report descriptor
+		 * we schedule work to initialize the HID device
+		 */
+		schedule_work(&dev->hid_work);
+	}
+}
+
+static void acc_complete_send_hid_event(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct acc_hid_dev *hid = req->context;
+	int length = req->actual;
+
+	if (req->status != 0) {
+		pr_err("acc_complete_send_hid_event, err %d\n", req->status);
+		return;
+	}
+
+	hid_report_raw_event(hid->hid, HID_INPUT_REPORT, req->buf, length, 1);
+}
+
+static int acc_hid_parse(struct hid_device *hid)
+{
+	struct acc_hid_dev *hdev = hid->driver_data;
+
+	hid_parse_report(hid, hdev->report_desc, hdev->report_desc_len);
+	return 0;
+}
+
+static int acc_hid_start(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_stop(struct hid_device *hid)
+{
+}
+
+static int acc_hid_open(struct hid_device *hid)
+{
+	return 0;
+}
+
+static void acc_hid_close(struct hid_device *hid)
+{
+}
+
+static int acc_hid_raw_request(struct hid_device *hid, unsigned char reportnum,
+	__u8 *buf, size_t len, unsigned char rtype, int reqtype)
+{
+	return 0;
+}
+
+static struct hid_ll_driver acc_hid_ll_driver = {
+	.parse = acc_hid_parse,
+	.start = acc_hid_start,
+	.stop = acc_hid_stop,
+	.open = acc_hid_open,
+	.close = acc_hid_close,
+	.raw_request = acc_hid_raw_request,
+};
+
+static struct acc_hid_dev *acc_hid_new(struct acc_dev *dev,
+		int id, int desc_len)
+{
+	struct acc_hid_dev *hdev;
+
+	hdev = kzalloc(sizeof(*hdev), GFP_ATOMIC);
+	if (!hdev)
+		return NULL;
+	hdev->report_desc = kzalloc(desc_len, GFP_ATOMIC);
+	if (!hdev->report_desc) {
+		kfree(hdev);
+		return NULL;
+	}
+	hdev->dev = dev;
+	hdev->id = id;
+	hdev->report_desc_len = desc_len;
+
+	return hdev;
+}
+
+static struct acc_hid_dev *acc_hid_get(struct list_head *list, int id)
+{
+	struct acc_hid_dev *hid;
+
+	list_for_each_entry(hid, list, list) {
+		if (hid->id == id)
+			return hid;
+	}
+	return NULL;
+}
+
+static int acc_register_hid(struct acc_dev *dev, int id, int desc_length)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	/* report descriptor length must be > 0 */
+	if (desc_length <= 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	/* replace HID if one already exists with this ID */
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (hid)
+		list_move(&hid->list, &dev->dead_hid_list);
+
+	hid = acc_hid_new(dev, id, desc_length);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -ENOMEM;
+	}
+
+	list_add(&hid->list, &dev->new_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* schedule work to register the HID device */
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int acc_unregister_hid(struct acc_dev *dev, int id)
+{
+	struct acc_hid_dev *hid;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	hid = acc_hid_get(&dev->hid_list, id);
+	if (!hid)
+		hid = acc_hid_get(&dev->new_hid_list, id);
+	if (!hid) {
+		spin_unlock_irqrestore(&dev->lock, flags);
+		return -EINVAL;
+	}
+
+	list_move(&hid->list, &dev->dead_hid_list);
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+	return 0;
+}
+
+static int create_bulk_endpoints(struct acc_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_in, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_in;
+		req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = acc_request_new(dev->ep_out, BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = acc_complete_out;
+		dev->rx_req[i] = req;
+	}
+
+	return 0;
+
+fail:
+	pr_err("acc_bind() could not allocate requests\n");
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+	return -1;
+}
+
+static ssize_t acc_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req;
+	ssize_t r = count;
+	unsigned xfer;
+	int ret = 0;
+
+	pr_debug("acc_read(%zu)\n", count);
+
+	if (dev->disconnected) {
+		pr_debug("acc_read disconnected");
+		return -ENODEV;
+	}
+
+	if (count > BULK_BUFFER_SIZE)
+		count = BULK_BUFFER_SIZE;
+
+	/* we will block until we're online */
+	pr_debug("acc_read: waiting for online\n");
+	ret = wait_event_interruptible(dev->read_wq, dev->online);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+
+	if (dev->rx_done) {
+		// last req cancelled. try to get it.
+		req = dev->rx_req[0];
+		goto copy_data;
+	}
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		pr_debug("rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		ret = usb_ep_dequeue(dev->ep_out, req);
+		if (ret != 0) {
+			// cancel failed. There can be a data already received.
+			// it will be retrieved in the next read.
+			pr_debug("acc_read: cancelling failed %d", ret);
+		}
+		goto done;
+	}
+
+copy_data:
+	dev->rx_done = 0;
+	if (dev->online) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		pr_debug("rx %p %u\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	pr_debug("acc_read returning %zd\n", r);
+	return r;
+}
+
+static ssize_t acc_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct acc_dev *dev = fp->private_data;
+	struct usb_request *req = 0;
+	ssize_t r = count;
+	unsigned xfer;
+	int ret;
+
+	pr_debug("acc_write(%zu)\n", count);
+
+	if (!dev->online || dev->disconnected) {
+		pr_debug("acc_write disconnected or not online");
+		return -ENODEV;
+	}
+
+	while (count > 0) {
+		if (!dev->online) {
+			pr_debug("acc_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = req_get(dev, &dev->tx_idle)) || !dev->online));
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > BULK_BUFFER_SIZE) {
+			xfer = BULK_BUFFER_SIZE;
+			/* ZLP, They will be more TX requests so not yet. */
+			req->zero = 0;
+		} else {
+			xfer = count;
+			/* If the data length is a multple of the
+			 * maxpacket size then send a zero length packet(ZLP).
+			*/
+			req->zero = ((xfer % dev->ep_in->maxpacket) == 0);
+		}
+		if (copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			pr_debug("acc_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		req_put(dev, &dev->tx_idle, req);
+
+	pr_debug("acc_write returning %zd\n", r);
+	return r;
+}
+
+static long acc_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct acc_dev *dev = fp->private_data;
+	char *src = NULL;
+	int ret;
+
+	switch (code) {
+	case ACCESSORY_GET_STRING_MANUFACTURER:
+		src = dev->manufacturer;
+		break;
+	case ACCESSORY_GET_STRING_MODEL:
+		src = dev->model;
+		break;
+	case ACCESSORY_GET_STRING_DESCRIPTION:
+		src = dev->description;
+		break;
+	case ACCESSORY_GET_STRING_VERSION:
+		src = dev->version;
+		break;
+	case ACCESSORY_GET_STRING_URI:
+		src = dev->uri;
+		break;
+	case ACCESSORY_GET_STRING_SERIAL:
+		src = dev->serial;
+		break;
+	case ACCESSORY_IS_START_REQUESTED:
+		return dev->start_requested;
+	case ACCESSORY_GET_AUDIO_MODE:
+		return dev->audio_mode;
+	}
+	if (!src)
+		return -EINVAL;
+
+	ret = strlen(src) + 1;
+	if (copy_to_user((void __user *)value, src, ret))
+		ret = -EFAULT;
+	return ret;
+}
+
+static int acc_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_open\n");
+	if (atomic_xchg(&_acc_dev->open_excl, 1))
+		return -EBUSY;
+
+	_acc_dev->disconnected = 0;
+	fp->private_data = _acc_dev;
+	return 0;
+}
+
+static int acc_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "acc_release\n");
+
+	WARN_ON(!atomic_xchg(&_acc_dev->open_excl, 0));
+	_acc_dev->disconnected = 0;
+	return 0;
+}
+
+/* file operations for /dev/usb_accessory */
+static const struct file_operations acc_fops = {
+	.owner = THIS_MODULE,
+	.read = acc_read,
+	.write = acc_write,
+	.unlocked_ioctl = acc_ioctl,
+	.open = acc_open,
+	.release = acc_release,
+};
+
+static int acc_hid_probe(struct hid_device *hdev,
+		const struct hid_device_id *id)
+{
+	int ret;
+
+	ret = hid_parse(hdev);
+	if (ret)
+		return ret;
+	return hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+}
+
+static struct miscdevice acc_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = "usb_accessory",
+	.fops = &acc_fops,
+};
+
+static const struct hid_device_id acc_hid_table[] = {
+	{ HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) },
+	{ }
+};
+
+static struct hid_driver acc_hid_driver = {
+	.name = "USB accessory",
+	.id_table = acc_hid_table,
+	.probe = acc_hid_probe,
+};
+
+int acc_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct acc_dev	*dev = _acc_dev;
+	int	value = -EOPNOTSUPP;
+	struct acc_hid_dev *hid;
+	int offset;
+	u8 b_requestType = ctrl->bRequestType;
+	u8 b_request = ctrl->bRequest;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long flags;
+
+/*
+	printk(KERN_INFO "acc_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			b_requestType, b_request,
+			w_value, w_index, w_length);
+*/
+
+	if (b_requestType == (USB_DIR_OUT | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_START) {
+			dev->start_requested = 1;
+			schedule_delayed_work(
+				&dev->start_work, msecs_to_jiffies(10));
+			value = 0;
+		} else if (b_request == ACCESSORY_SEND_STRING) {
+			dev->string_index = w_index;
+			cdev->gadget->ep0->driver_data = dev;
+			cdev->req->complete = acc_complete_set_string;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SET_AUDIO_MODE &&
+				w_index == 0 && w_length == 0) {
+			dev->audio_mode = w_value;
+			value = 0;
+		} else if (b_request == ACCESSORY_REGISTER_HID) {
+			value = acc_register_hid(dev, w_value, w_index);
+		} else if (b_request == ACCESSORY_UNREGISTER_HID) {
+			value = acc_unregister_hid(dev, w_value);
+		} else if (b_request == ACCESSORY_SET_HID_REPORT_DESC) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->new_hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			offset = w_index;
+			if (offset != hid->report_desc_offset
+				|| offset + w_length > hid->report_desc_len) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_set_hid_report_desc;
+			value = w_length;
+		} else if (b_request == ACCESSORY_SEND_HID_EVENT) {
+			spin_lock_irqsave(&dev->lock, flags);
+			hid = acc_hid_get(&dev->hid_list, w_value);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			if (!hid) {
+				value = -EINVAL;
+				goto err;
+			}
+			cdev->req->context = hid;
+			cdev->req->complete = acc_complete_send_hid_event;
+			value = w_length;
+		}
+	} else if (b_requestType == (USB_DIR_IN | USB_TYPE_VENDOR)) {
+		if (b_request == ACCESSORY_GET_PROTOCOL) {
+			*((u16 *)cdev->req->buf) = PROTOCOL_VERSION;
+			value = sizeof(u16);
+
+			/* clear any string left over from a previous session */
+			memset(dev->manufacturer, 0, sizeof(dev->manufacturer));
+			memset(dev->model, 0, sizeof(dev->model));
+			memset(dev->description, 0, sizeof(dev->description));
+			memset(dev->version, 0, sizeof(dev->version));
+			memset(dev->uri, 0, sizeof(dev->uri));
+			memset(dev->serial, 0, sizeof(dev->serial));
+			dev->start_requested = 0;
+			dev->audio_mode = 0;
+		}
+	}
+
+	if (value >= 0) {
+		cdev->req->zero = 0;
+		cdev->req->length = value;
+		value = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (value < 0)
+			ERROR(cdev, "%s setup response queue error\n",
+				__func__);
+	}
+
+err:
+	if (value == -EOPNOTSUPP)
+		VDBG(cdev,
+			"unknown class-specific control req "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+	return value;
+}
+EXPORT_SYMBOL_GPL(acc_ctrlrequest);
+
+static int
+__acc_function_bind(struct usb_configuration *c,
+			struct usb_function *f, bool configfs)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct acc_dev	*dev = func_to_dev(f);
+	int			id;
+	int			ret;
+
+	DBG(cdev, "acc_function_bind dev: %p\n", dev);
+
+	if (configfs) {
+		if (acc_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+			ret = usb_string_id(c->cdev);
+			if (ret < 0)
+				return ret;
+			acc_string_defs[INTERFACE_STRING_INDEX].id = ret;
+			acc_interface_desc.iInterface = ret;
+		}
+		dev->cdev = c->cdev;
+	}
+	ret = hid_register_driver(&acc_hid_driver);
+	if (ret)
+		return ret;
+
+	dev->start_requested = 0;
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	acc_interface_desc.bInterfaceNumber = id;
+
+	/* allocate endpoints */
+	ret = create_bulk_endpoints(dev, &acc_fullspeed_in_desc,
+			&acc_fullspeed_out_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		acc_highspeed_in_desc.bEndpointAddress =
+			acc_fullspeed_in_desc.bEndpointAddress;
+		acc_highspeed_out_desc.bEndpointAddress =
+			acc_fullspeed_out_desc.bEndpointAddress;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+			gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
+			f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static int
+acc_function_bind_configfs(struct usb_configuration *c,
+			struct usb_function *f) {
+	return __acc_function_bind(c, f, true);
+}
+
+static void
+kill_all_hid_devices(struct acc_dev *dev)
+{
+	struct acc_hid_dev *hid;
+	struct list_head *entry, *temp;
+	unsigned long flags;
+
+	/* do nothing if usb accessory device doesn't exist */
+	if (!dev)
+		return;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_for_each_safe(entry, temp, &dev->hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		list_add(&hid->list, &dev->dead_hid_list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	schedule_work(&dev->hid_work);
+}
+
+static void
+acc_hid_unbind(struct acc_dev *dev)
+{
+	hid_unregister_driver(&acc_hid_driver);
+	kill_all_hid_devices(dev);
+}
+
+static void
+acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_request *req;
+	int i;
+
+	while ((req = req_get(dev, &dev->tx_idle)))
+		acc_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		acc_request_free(dev->rx_req[i], dev->ep_out);
+
+	acc_hid_unbind(dev);
+}
+
+static void acc_start_work(struct work_struct *data)
+{
+	char *envp[2] = { "ACCESSORY=START", NULL };
+	kobject_uevent_env(&acc_device.this_device->kobj, KOBJ_CHANGE, envp);
+}
+
+static int acc_hid_init(struct acc_hid_dev *hdev)
+{
+	struct hid_device *hid;
+	int ret;
+
+	hid = hid_allocate_device();
+	if (IS_ERR(hid))
+		return PTR_ERR(hid);
+
+	hid->ll_driver = &acc_hid_ll_driver;
+	hid->dev.parent = acc_device.this_device;
+
+	hid->bus = BUS_USB;
+	hid->vendor = HID_ANY_ID;
+	hid->product = HID_ANY_ID;
+	hid->driver_data = hdev;
+	ret = hid_add_device(hid);
+	if (ret) {
+		pr_err("can't add hid device: %d\n", ret);
+		hid_destroy_device(hid);
+		return ret;
+	}
+
+	hdev->hid = hid;
+	return 0;
+}
+
+static void acc_hid_delete(struct acc_hid_dev *hid)
+{
+	kfree(hid->report_desc);
+	kfree(hid);
+}
+
+static void acc_hid_work(struct work_struct *data)
+{
+	struct acc_dev *dev = _acc_dev;
+	struct list_head	*entry, *temp;
+	struct acc_hid_dev *hid;
+	struct list_head	new_list, dead_list;
+	unsigned long flags;
+
+	INIT_LIST_HEAD(&new_list);
+
+	spin_lock_irqsave(&dev->lock, flags);
+
+	/* copy hids that are ready for initialization to new_list */
+	list_for_each_safe(entry, temp, &dev->new_hid_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (hid->report_desc_offset == hid->report_desc_len)
+			list_move(&hid->list, &new_list);
+	}
+
+	if (list_empty(&dev->dead_hid_list)) {
+		INIT_LIST_HEAD(&dead_list);
+	} else {
+		/* move all of dev->dead_hid_list to dead_list */
+		dead_list.prev = dev->dead_hid_list.prev;
+		dead_list.next = dev->dead_hid_list.next;
+		dead_list.next->prev = &dead_list;
+		dead_list.prev->next = &dead_list;
+		INIT_LIST_HEAD(&dev->dead_hid_list);
+	}
+
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	/* register new HID devices */
+	list_for_each_safe(entry, temp, &new_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		if (acc_hid_init(hid)) {
+			pr_err("can't add HID device %p\n", hid);
+			acc_hid_delete(hid);
+		} else {
+			spin_lock_irqsave(&dev->lock, flags);
+			list_move(&hid->list, &dev->hid_list);
+			spin_unlock_irqrestore(&dev->lock, flags);
+		}
+	}
+
+	/* remove dead HID devices */
+	list_for_each_safe(entry, temp, &dead_list) {
+		hid = list_entry(entry, struct acc_hid_dev, list);
+		list_del(&hid->list);
+		if (hid->hid)
+			hid_destroy_device(hid->hid);
+		acc_hid_delete(hid);
+	}
+}
+
+static int acc_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "acc_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	dev->online = 1;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void acc_function_disable(struct usb_function *f)
+{
+	struct acc_dev	*dev = func_to_dev(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "acc_function_disable\n");
+	acc_set_disconnected(dev);
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int acc_setup(void)
+{
+	struct acc_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	atomic_set(&dev->open_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->hid_list);
+	INIT_LIST_HEAD(&dev->new_hid_list);
+	INIT_LIST_HEAD(&dev->dead_hid_list);
+	INIT_DELAYED_WORK(&dev->start_work, acc_start_work);
+	INIT_WORK(&dev->hid_work, acc_hid_work);
+
+	/* _acc_dev must be set before calling usb_gadget_register_driver */
+	_acc_dev = dev;
+
+	ret = misc_register(&acc_device);
+	if (ret)
+		goto err;
+
+	return 0;
+
+err:
+	kfree(dev);
+	pr_err("USB accessory gadget driver failed to initialize\n");
+	return ret;
+}
+
+void acc_disconnect(void)
+{
+	/* unregister all HID devices if USB is disconnected */
+	kill_all_hid_devices(_acc_dev);
+}
+EXPORT_SYMBOL_GPL(acc_disconnect);
+
+static void acc_cleanup(void)
+{
+	misc_deregister(&acc_device);
+	kfree(_acc_dev);
+	_acc_dev = NULL;
+}
+static struct acc_instance *to_acc_instance(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct acc_instance,
+		func_inst.group);
+}
+
+static void acc_attr_release(struct config_item *item)
+{
+	struct acc_instance *fi_acc = to_acc_instance(item);
+
+	usb_put_function_instance(&fi_acc->func_inst);
+}
+
+static struct configfs_item_operations acc_item_ops = {
+	.release        = acc_attr_release,
+};
+
+static struct config_item_type acc_func_type = {
+	.ct_item_ops    = &acc_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static struct acc_instance *to_fi_acc(struct usb_function_instance *fi)
+{
+	return container_of(fi, struct acc_instance, func_inst);
+}
+
+static int acc_set_inst_name(struct usb_function_instance *fi, const char *name)
+{
+	struct acc_instance *fi_acc;
+	char *ptr;
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ptr = kstrndup(name, name_len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	fi_acc = to_fi_acc(fi);
+	fi_acc->name = ptr;
+	return 0;
+}
+
+static void acc_free_inst(struct usb_function_instance *fi)
+{
+	struct acc_instance *fi_acc;
+
+	fi_acc = to_fi_acc(fi);
+	kfree(fi_acc->name);
+	acc_cleanup();
+}
+
+static struct usb_function_instance *acc_alloc_inst(void)
+{
+	struct acc_instance *fi_acc;
+	struct acc_dev *dev;
+	int err;
+
+	fi_acc = kzalloc(sizeof(*fi_acc), GFP_KERNEL);
+	if (!fi_acc)
+		return ERR_PTR(-ENOMEM);
+	fi_acc->func_inst.set_inst_name = acc_set_inst_name;
+	fi_acc->func_inst.free_func_inst = acc_free_inst;
+
+	err = acc_setup();
+	if (err) {
+		kfree(fi_acc);
+		pr_err("Error setting ACCESSORY\n");
+		return ERR_PTR(err);
+	}
+
+	config_group_init_type_name(&fi_acc->func_inst.group,
+					"", &acc_func_type);
+	dev = _acc_dev;
+	return  &fi_acc->func_inst;
+}
+
+static void acc_free(struct usb_function *f)
+{
+/*NO-OP: no function specific resource allocation in mtp_alloc*/
+}
+
+int acc_ctrlrequest_configfs(struct usb_function *f,
+			const struct usb_ctrlrequest *ctrl) {
+	if (f->config != NULL && f->config->cdev != NULL)
+		return acc_ctrlrequest(f->config->cdev, ctrl);
+	else
+		return -1;
+}
+
+static struct usb_function *acc_alloc(struct usb_function_instance *fi)
+{
+	struct acc_dev *dev = _acc_dev;
+
+	pr_info("acc_alloc\n");
+
+	dev->function.name = "accessory";
+	dev->function.strings = acc_strings,
+	dev->function.fs_descriptors = fs_acc_descs;
+	dev->function.hs_descriptors = hs_acc_descs;
+	dev->function.bind = acc_function_bind_configfs;
+	dev->function.unbind = acc_function_unbind;
+	dev->function.set_alt = acc_function_set_alt;
+	dev->function.disable = acc_function_disable;
+	dev->function.free_func = acc_free;
+	dev->function.setup = acc_ctrlrequest_configfs;
+
+	return &dev->function;
+}
+DECLARE_USB_FUNCTION_INIT(accessory, acc_alloc_inst, acc_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_audio_source.c b/drivers/usb/gadget/function/f_audio_source.c
new file mode 100644
index 0000000..bcd8174
--- /dev/null
+++ b/drivers/usb/gadget/function/f_audio_source.c
@@ -0,0 +1,1060 @@
+/*
+ * Gadget Function Driver for USB audio source device
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/usb/audio.h>
+#include <linux/wait.h>
+#include <sound/core.h>
+#include <sound/initval.h>
+#include <sound/pcm.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#define SAMPLE_RATE 44100
+#define FRAMES_PER_MSEC (SAMPLE_RATE / 1000)
+
+#define IN_EP_MAX_PACKET_SIZE 256
+
+/* Number of requests to allocate */
+#define IN_EP_REQ_COUNT 4
+
+#define AUDIO_AC_INTERFACE	0
+#define AUDIO_AS_INTERFACE	1
+#define AUDIO_NUM_INTERFACES	2
+#define MAX_INST_NAME_LEN     40
+
+/* B.3.1  Standard AC Interface Descriptor */
+static struct usb_interface_descriptor ac_interface_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOCONTROL,
+};
+
+DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
+
+#define UAC_DT_AC_HEADER_LENGTH	UAC_DT_AC_HEADER_SIZE(AUDIO_NUM_INTERFACES)
+/* 1 input terminal, 1 output terminal and 1 feature unit */
+#define UAC_DT_TOTAL_LENGTH (UAC_DT_AC_HEADER_LENGTH \
+	+ UAC_DT_INPUT_TERMINAL_SIZE + UAC_DT_OUTPUT_TERMINAL_SIZE \
+	+ UAC_DT_FEATURE_UNIT_SIZE(0))
+/* B.3.2  Class-Specific AC Interface Descriptor */
+static struct uac1_ac_header_descriptor_2 ac_header_desc = {
+	.bLength =		UAC_DT_AC_HEADER_LENGTH,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_HEADER,
+	.bcdADC =		__constant_cpu_to_le16(0x0100),
+	.wTotalLength =		__constant_cpu_to_le16(UAC_DT_TOTAL_LENGTH),
+	.bInCollection =	AUDIO_NUM_INTERFACES,
+	.baInterfaceNr = {
+		[0] =		AUDIO_AC_INTERFACE,
+		[1] =		AUDIO_AS_INTERFACE,
+	}
+};
+
+#define INPUT_TERMINAL_ID	1
+static struct uac_input_terminal_descriptor input_terminal_desc = {
+	.bLength =		UAC_DT_INPUT_TERMINAL_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_INPUT_TERMINAL,
+	.bTerminalID =		INPUT_TERMINAL_ID,
+	.wTerminalType =	UAC_INPUT_TERMINAL_MICROPHONE,
+	.bAssocTerminal =	0,
+	.wChannelConfig =	0x3,
+};
+
+DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
+
+#define FEATURE_UNIT_ID		2
+static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
+	.bLength		= UAC_DT_FEATURE_UNIT_SIZE(0),
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_FEATURE_UNIT,
+	.bUnitID		= FEATURE_UNIT_ID,
+	.bSourceID		= INPUT_TERMINAL_ID,
+	.bControlSize		= 2,
+};
+
+#define OUTPUT_TERMINAL_ID	3
+static struct uac1_output_terminal_descriptor output_terminal_desc = {
+	.bLength		= UAC_DT_OUTPUT_TERMINAL_SIZE,
+	.bDescriptorType	= USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype	= UAC_OUTPUT_TERMINAL,
+	.bTerminalID		= OUTPUT_TERMINAL_ID,
+	.wTerminalType		= UAC_TERMINAL_STREAMING,
+	.bAssocTerminal		= FEATURE_UNIT_ID,
+	.bSourceID		= FEATURE_UNIT_ID,
+};
+
+/* B.4.1  Standard AS Interface Descriptor */
+static struct usb_interface_descriptor as_interface_alt_0_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	0,
+	.bNumEndpoints =	0,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+static struct usb_interface_descriptor as_interface_alt_1_desc = {
+	.bLength =		USB_DT_INTERFACE_SIZE,
+	.bDescriptorType =	USB_DT_INTERFACE,
+	.bAlternateSetting =	1,
+	.bNumEndpoints =	1,
+	.bInterfaceClass =	USB_CLASS_AUDIO,
+	.bInterfaceSubClass =	USB_SUBCLASS_AUDIOSTREAMING,
+};
+
+/* B.4.2  Class-Specific AS Interface Descriptor */
+static struct uac1_as_header_descriptor as_header_desc = {
+	.bLength =		UAC_DT_AS_HEADER_SIZE,
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_AS_GENERAL,
+	.bTerminalLink =	INPUT_TERMINAL_ID,
+	.bDelay =		1,
+	.wFormatTag =		UAC_FORMAT_TYPE_I_PCM,
+};
+
+DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
+
+static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
+	.bLength =		UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
+	.bDescriptorType =	USB_DT_CS_INTERFACE,
+	.bDescriptorSubtype =	UAC_FORMAT_TYPE,
+	.bFormatType =		UAC_FORMAT_TYPE_I,
+	.bSubframeSize =	2,
+	.bBitResolution =	16,
+	.bSamFreqType =		1,
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor hs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		4, /* poll 1 per millisecond */
+};
+
+/* Standard ISO IN Endpoint Descriptor for highspeed */
+static struct usb_endpoint_descriptor fs_as_in_ep_desc  = {
+	.bLength =		USB_DT_ENDPOINT_AUDIO_SIZE,
+	.bDescriptorType =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes =		USB_ENDPOINT_SYNC_SYNC
+				| USB_ENDPOINT_XFER_ISOC,
+	.wMaxPacketSize =	__constant_cpu_to_le16(IN_EP_MAX_PACKET_SIZE),
+	.bInterval =		1, /* poll 1 per millisecond */
+};
+
+/* Class-specific AS ISO OUT Endpoint Descriptor */
+static struct uac_iso_endpoint_descriptor as_iso_in_desc = {
+	.bLength =		UAC_ISO_ENDPOINT_DESC_SIZE,
+	.bDescriptorType =	USB_DT_CS_ENDPOINT,
+	.bDescriptorSubtype =	UAC_EP_GENERAL,
+	.bmAttributes =		1,
+	.bLockDelayUnits =	1,
+	.wLockDelay =		__constant_cpu_to_le16(1),
+};
+
+static struct usb_descriptor_header *hs_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&hs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_audio_desc[] = {
+	(struct usb_descriptor_header *)&ac_interface_desc,
+	(struct usb_descriptor_header *)&ac_header_desc,
+
+	(struct usb_descriptor_header *)&input_terminal_desc,
+	(struct usb_descriptor_header *)&output_terminal_desc,
+	(struct usb_descriptor_header *)&feature_unit_desc,
+
+	(struct usb_descriptor_header *)&as_interface_alt_0_desc,
+	(struct usb_descriptor_header *)&as_interface_alt_1_desc,
+	(struct usb_descriptor_header *)&as_header_desc,
+
+	(struct usb_descriptor_header *)&as_type_i_desc,
+
+	(struct usb_descriptor_header *)&fs_as_in_ep_desc,
+	(struct usb_descriptor_header *)&as_iso_in_desc,
+	NULL,
+};
+
+static struct snd_pcm_hardware audio_hw_info = {
+	.info =			SNDRV_PCM_INFO_MMAP |
+				SNDRV_PCM_INFO_MMAP_VALID |
+				SNDRV_PCM_INFO_BATCH |
+				SNDRV_PCM_INFO_INTERLEAVED |
+				SNDRV_PCM_INFO_BLOCK_TRANSFER,
+
+	.formats		= SNDRV_PCM_FMTBIT_S16_LE,
+	.channels_min		= 2,
+	.channels_max		= 2,
+	.rate_min		= SAMPLE_RATE,
+	.rate_max		= SAMPLE_RATE,
+
+	.buffer_bytes_max =	1024 * 1024,
+	.period_bytes_min =	64,
+	.period_bytes_max =	512 * 1024,
+	.periods_min =		2,
+	.periods_max =		1024,
+};
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_config {
+	int	card;
+	int	device;
+};
+
+struct audio_dev {
+	struct usb_function		func;
+	struct snd_card			*card;
+	struct snd_pcm			*pcm;
+	struct snd_pcm_substream *substream;
+
+	struct list_head		idle_reqs;
+	struct usb_ep			*in_ep;
+
+	spinlock_t			lock;
+
+	/* beginning, end and current position in our buffer */
+	void				*buffer_start;
+	void				*buffer_end;
+	void				*buffer_pos;
+
+	/* byte size of a "period" */
+	unsigned int			period;
+	/* bytes sent since last call to snd_pcm_period_elapsed */
+	unsigned int			period_offset;
+	/* time we started playing */
+	ktime_t				start_time;
+	/* number of frames sent since start_time */
+	s64				frames_sent;
+	struct audio_source_config	*config;
+};
+
+static inline struct audio_dev *func_to_audio(struct usb_function *f)
+{
+	return container_of(f, struct audio_dev, func);
+}
+
+/*-------------------------------------------------------------------------*/
+
+struct audio_source_instance {
+	struct usb_function_instance func_inst;
+	const char *name;
+	struct audio_source_config *config;
+	struct device *audio_device;
+};
+
+static void audio_source_attr_release(struct config_item *item);
+
+static struct configfs_item_operations audio_source_item_ops = {
+	.release        = audio_source_attr_release,
+};
+
+static struct config_item_type audio_source_func_type = {
+	.ct_item_ops    = &audio_source_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+		struct device_attribute *attr, char *buf);
+
+static DEVICE_ATTR(pcm, S_IRUGO, audio_source_pcm_show, NULL);
+
+static struct device_attribute *audio_source_function_attributes[] = {
+	&dev_attr_pcm,
+	NULL
+};
+
+/*--------------------------------------------------------------------------*/
+
+static struct usb_request *audio_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+	req->length = buffer_size;
+	return req;
+}
+
+static void audio_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static void audio_req_put(struct audio_dev *audio, struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	list_add_tail(&req->list, &audio->idle_reqs);
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static struct usb_request *audio_req_get(struct audio_dev *audio)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	if (list_empty(&audio->idle_reqs)) {
+		req = 0;
+	} else {
+		req = list_first_entry(&audio->idle_reqs, struct usb_request,
+				list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&audio->lock, flags);
+	return req;
+}
+
+/* send the appropriate number of packets to match our bitrate */
+static void audio_send(struct audio_dev *audio)
+{
+	struct snd_pcm_runtime *runtime;
+	struct usb_request *req;
+	int length, length1, length2, ret;
+	s64 msecs;
+	s64 frames;
+	ktime_t now;
+
+	/* audio->substream will be null if we have been closed */
+	if (!audio->substream)
+		return;
+	/* audio->buffer_pos will be null if we have been stopped */
+	if (!audio->buffer_pos)
+		return;
+
+	runtime = audio->substream->runtime;
+
+	/* compute number of frames to send */
+	now = ktime_get();
+	msecs = ktime_to_ns(now) - ktime_to_ns(audio->start_time);
+	do_div(msecs, 1000000);
+	frames = msecs * SAMPLE_RATE;
+	do_div(frames, 1000);
+
+	/* Readjust our frames_sent if we fall too far behind.
+	 * If we get too far behind it is better to drop some frames than
+	 * to keep sending data too fast in an attempt to catch up.
+	 */
+	if (frames - audio->frames_sent > 10 * FRAMES_PER_MSEC)
+		audio->frames_sent = frames - FRAMES_PER_MSEC;
+
+	frames -= audio->frames_sent;
+
+	/* We need to send something to keep the pipeline going */
+	if (frames <= 0)
+		frames = FRAMES_PER_MSEC;
+
+	while (frames > 0) {
+		req = audio_req_get(audio);
+		if (!req)
+			break;
+
+		length = frames_to_bytes(runtime, frames);
+		if (length > IN_EP_MAX_PACKET_SIZE)
+			length = IN_EP_MAX_PACKET_SIZE;
+
+		if (audio->buffer_pos + length > audio->buffer_end)
+			length1 = audio->buffer_end - audio->buffer_pos;
+		else
+			length1 = length;
+		memcpy(req->buf, audio->buffer_pos, length1);
+		if (length1 < length) {
+			/* Wrap around and copy remaining length
+			 * at beginning of buffer.
+			 */
+			length2 = length - length1;
+			memcpy(req->buf + length1, audio->buffer_start,
+					length2);
+			audio->buffer_pos = audio->buffer_start + length2;
+		} else {
+			audio->buffer_pos += length1;
+			if (audio->buffer_pos >= audio->buffer_end)
+				audio->buffer_pos = audio->buffer_start;
+		}
+
+		req->length = length;
+		ret = usb_ep_queue(audio->in_ep, req, GFP_ATOMIC);
+		if (ret < 0) {
+			pr_err("usb_ep_queue failed ret: %d\n", ret);
+			audio_req_put(audio, req);
+			break;
+		}
+
+		frames -= bytes_to_frames(runtime, length);
+		audio->frames_sent += bytes_to_frames(runtime, length);
+	}
+}
+
+static void audio_control_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	/* nothing to do here */
+}
+
+static void audio_data_complete(struct usb_ep *ep, struct usb_request *req)
+{
+	struct audio_dev *audio = req->context;
+
+	pr_debug("audio_data_complete req->status %d req->actual %d\n",
+		req->status, req->actual);
+
+	audio_req_put(audio, req);
+
+	if (!audio->buffer_start || req->status)
+		return;
+
+	audio->period_offset += req->actual;
+	if (audio->period_offset >= audio->period) {
+		snd_pcm_period_elapsed(audio->substream);
+		audio->period_offset = 0;
+	}
+	audio_send(audio);
+}
+
+static int audio_set_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	int value = -EOPNOTSUPP;
+	u16 ep = le16_to_cpu(ctrl->wIndex);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	switch (ctrl->bRequest) {
+	case UAC_SET_CUR:
+	case UAC_SET_MIN:
+	case UAC_SET_MAX:
+	case UAC_SET_RES:
+		value = len;
+		break;
+	default:
+		break;
+	}
+
+	return value;
+}
+
+static int audio_get_endpoint_req(struct usb_function *f,
+		const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int value = -EOPNOTSUPP;
+	u8 ep = ((le16_to_cpu(ctrl->wIndex) >> 8) & 0xFF);
+	u16 len = le16_to_cpu(ctrl->wLength);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u8 *buf = cdev->req->buf;
+
+	pr_debug("bRequest 0x%x, w_value 0x%04x, len %d, endpoint %d\n",
+			ctrl->bRequest, w_value, len, ep);
+
+	if (w_value == UAC_EP_CS_ATTR_SAMPLE_RATE << 8) {
+		switch (ctrl->bRequest) {
+		case UAC_GET_CUR:
+		case UAC_GET_MIN:
+		case UAC_GET_MAX:
+		case UAC_GET_RES:
+			/* return our sample rate */
+			buf[0] = (u8)SAMPLE_RATE;
+			buf[1] = (u8)(SAMPLE_RATE >> 8);
+			buf[2] = (u8)(SAMPLE_RATE >> 16);
+			value = 3;
+			break;
+		default:
+			break;
+		}
+	}
+
+	return value;
+}
+
+static int
+audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
+{
+	struct usb_composite_dev *cdev = f->config->cdev;
+	struct usb_request *req = cdev->req;
+	int value = -EOPNOTSUPP;
+	u16 w_index = le16_to_cpu(ctrl->wIndex);
+	u16 w_value = le16_to_cpu(ctrl->wValue);
+	u16 w_length = le16_to_cpu(ctrl->wLength);
+
+	/* composite driver infrastructure handles everything; interface
+	 * activation uses set_alt().
+	 */
+	switch (ctrl->bRequestType) {
+	case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_set_endpoint_req(f, ctrl);
+		break;
+
+	case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
+		value = audio_get_endpoint_req(f, ctrl);
+		break;
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		pr_debug("audio req%02x.%02x v%04x i%04x l%d\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+		req->zero = 0;
+		req->length = value;
+		req->complete = audio_control_complete;
+		value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
+		if (value < 0)
+			pr_err("audio response on err %d\n", value);
+	}
+
+	/* device either stalls (value < 0) or reports success */
+	return value;
+}
+
+static int audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	pr_debug("audio_set_alt intf %d, alt %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, audio->in_ep);
+	if (ret)
+		return ret;
+
+	usb_ep_enable(audio->in_ep);
+	return 0;
+}
+
+static void audio_disable(struct usb_function *f)
+{
+	struct audio_dev	*audio = func_to_audio(f);
+
+	pr_debug("audio_disable\n");
+	usb_ep_disable(audio->in_ep);
+}
+
+static void audio_free_func(struct usb_function *f)
+{
+	/* no-op */
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void audio_build_desc(struct audio_dev *audio)
+{
+	u8 *sam_freq;
+	int rate;
+
+	/* Set channel numbers */
+	input_terminal_desc.bNrChannels = 2;
+	as_type_i_desc.bNrChannels = 2;
+
+	/* Set sample rates */
+	rate = SAMPLE_RATE;
+	sam_freq = as_type_i_desc.tSamFreq[0];
+	memcpy(sam_freq, &rate, 3);
+}
+
+
+static int snd_card_setup(struct usb_configuration *c,
+	struct audio_source_config *config);
+static struct audio_source_instance *to_fi_audio_source(
+	const struct usb_function_instance *fi);
+
+
+/* audio function driver setup/binding */
+static int
+audio_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct audio_dev *audio = func_to_audio(f);
+	int status;
+	struct usb_ep *ep;
+	struct usb_request *req;
+	int i;
+	int err;
+
+	if (IS_ENABLED(CONFIG_USB_CONFIGFS)) {
+		struct audio_source_instance *fi_audio =
+				to_fi_audio_source(f->fi);
+		struct audio_source_config *config =
+				fi_audio->config;
+
+		err = snd_card_setup(c, config);
+		if (err)
+			return err;
+	}
+
+	audio_build_desc(audio);
+
+	/* allocate instance-specific interface IDs, and patch descriptors */
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	ac_interface_desc.bInterfaceNumber = status;
+
+	/* AUDIO_AC_INTERFACE */
+	ac_header_desc.baInterfaceNr[0] = status;
+
+	status = usb_interface_id(c, f);
+	if (status < 0)
+		goto fail;
+	as_interface_alt_0_desc.bInterfaceNumber = status;
+	as_interface_alt_1_desc.bInterfaceNumber = status;
+
+	/* AUDIO_AS_INTERFACE */
+	ac_header_desc.baInterfaceNr[1] = status;
+
+	status = -ENODEV;
+
+	/* allocate our endpoint */
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_as_in_ep_desc);
+	if (!ep)
+		goto fail;
+	audio->in_ep = ep;
+	ep->driver_data = audio; /* claim */
+
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		hs_as_in_ep_desc.bEndpointAddress =
+			fs_as_in_ep_desc.bEndpointAddress;
+
+	f->fs_descriptors = fs_audio_desc;
+	f->hs_descriptors = hs_audio_desc;
+
+	for (i = 0, status = 0; i < IN_EP_REQ_COUNT && status == 0; i++) {
+		req = audio_request_new(ep, IN_EP_MAX_PACKET_SIZE);
+		if (req) {
+			req->context = audio;
+			req->complete = audio_data_complete;
+			audio_req_put(audio, req);
+		} else
+			status = -ENOMEM;
+	}
+
+fail:
+	return status;
+}
+
+static void
+audio_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct audio_dev *audio = func_to_audio(f);
+	struct usb_request *req;
+
+	while ((req = audio_req_get(audio)))
+		audio_request_free(req, audio->in_ep);
+
+	snd_card_free_when_closed(audio->card);
+	audio->card = NULL;
+	audio->pcm = NULL;
+	audio->substream = NULL;
+	audio->in_ep = NULL;
+
+	if (IS_ENABLED(CONFIG_USB_CONFIGFS)) {
+		struct audio_source_instance *fi_audio =
+				to_fi_audio_source(f->fi);
+		struct audio_source_config *config =
+				fi_audio->config;
+
+		config->card = -1;
+		config->device = -1;
+	}
+}
+
+static void audio_pcm_playback_start(struct audio_dev *audio)
+{
+	audio->start_time = ktime_get();
+	audio->frames_sent = 0;
+	audio_send(audio);
+}
+
+static void audio_pcm_playback_stop(struct audio_dev *audio)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	audio->buffer_start = 0;
+	audio->buffer_end = 0;
+	audio->buffer_pos = 0;
+	spin_unlock_irqrestore(&audio->lock, flags);
+}
+
+static int audio_pcm_open(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = substream->private_data;
+
+	runtime->private_data = audio;
+	runtime->hw = audio_hw_info;
+	snd_pcm_limit_hw_rates(runtime);
+	runtime->hw.channels_max = 2;
+
+	audio->substream = substream;
+	return 0;
+}
+
+static int audio_pcm_close(struct snd_pcm_substream *substream)
+{
+	struct audio_dev *audio = substream->private_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(&audio->lock, flags);
+	audio->substream = NULL;
+	spin_unlock_irqrestore(&audio->lock, flags);
+
+	return 0;
+}
+
+static int audio_pcm_hw_params(struct snd_pcm_substream *substream,
+				struct snd_pcm_hw_params *params)
+{
+	unsigned int channels = params_channels(params);
+	unsigned int rate = params_rate(params);
+
+	if (rate != SAMPLE_RATE)
+		return -EINVAL;
+	if (channels != 2)
+		return -EINVAL;
+
+	return snd_pcm_lib_alloc_vmalloc_buffer(substream,
+		params_buffer_bytes(params));
+}
+
+static int audio_pcm_hw_free(struct snd_pcm_substream *substream)
+{
+	return snd_pcm_lib_free_vmalloc_buffer(substream);
+}
+
+static int audio_pcm_prepare(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+
+	audio->period = snd_pcm_lib_period_bytes(substream);
+	audio->period_offset = 0;
+	audio->buffer_start = runtime->dma_area;
+	audio->buffer_end = audio->buffer_start
+		+ snd_pcm_lib_buffer_bytes(substream);
+	audio->buffer_pos = audio->buffer_start;
+
+	return 0;
+}
+
+static snd_pcm_uframes_t audio_pcm_pointer(struct snd_pcm_substream *substream)
+{
+	struct snd_pcm_runtime *runtime = substream->runtime;
+	struct audio_dev *audio = runtime->private_data;
+	ssize_t bytes = audio->buffer_pos - audio->buffer_start;
+
+	/* return offset of next frame to fill in our buffer */
+	return bytes_to_frames(runtime, bytes);
+}
+
+static int audio_pcm_playback_trigger(struct snd_pcm_substream *substream,
+					int cmd)
+{
+	struct audio_dev *audio = substream->runtime->private_data;
+	int ret = 0;
+
+	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
+		audio_pcm_playback_start(audio);
+		break;
+
+	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		audio_pcm_playback_stop(audio);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct audio_dev _audio_dev = {
+	.func = {
+		.name = "audio_source",
+		.bind = audio_bind,
+		.unbind = audio_unbind,
+		.set_alt = audio_set_alt,
+		.setup = audio_setup,
+		.disable = audio_disable,
+		.free_func = audio_free_func,
+	},
+	.lock = __SPIN_LOCK_UNLOCKED(_audio_dev.lock),
+	.idle_reqs = LIST_HEAD_INIT(_audio_dev.idle_reqs),
+};
+
+static struct snd_pcm_ops audio_playback_ops = {
+	.open		= audio_pcm_open,
+	.close		= audio_pcm_close,
+	.ioctl		= snd_pcm_lib_ioctl,
+	.hw_params	= audio_pcm_hw_params,
+	.hw_free	= audio_pcm_hw_free,
+	.prepare	= audio_pcm_prepare,
+	.trigger	= audio_pcm_playback_trigger,
+	.pointer	= audio_pcm_pointer,
+};
+
+int audio_source_bind_config(struct usb_configuration *c,
+		struct audio_source_config *config)
+{
+	struct audio_dev *audio;
+	int err;
+
+	config->card = -1;
+	config->device = -1;
+
+	audio = &_audio_dev;
+
+	err = snd_card_setup(c, config);
+	if (err)
+		return err;
+
+	err = usb_add_function(c, &audio->func);
+	if (err)
+		goto add_fail;
+
+	return 0;
+
+add_fail:
+	snd_card_free(audio->card);
+	return err;
+}
+
+static int snd_card_setup(struct usb_configuration *c,
+		struct audio_source_config *config)
+{
+	struct audio_dev *audio;
+	struct snd_card *card;
+	struct snd_pcm *pcm;
+	int err;
+
+	audio = &_audio_dev;
+
+	err = snd_card_new(&c->cdev->gadget->dev,
+			SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
+			THIS_MODULE, 0, &card);
+	if (err)
+		return err;
+
+	err = snd_pcm_new(card, "USB audio source", 0, 1, 0, &pcm);
+	if (err)
+		goto pcm_fail;
+
+	pcm->private_data = audio;
+	pcm->info_flags = 0;
+	audio->pcm = pcm;
+
+	strlcpy(pcm->name, "USB gadget audio", sizeof(pcm->name));
+
+	snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &audio_playback_ops);
+	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
+				NULL, 0, 64 * 1024);
+
+	strlcpy(card->driver, "audio_source", sizeof(card->driver));
+	strlcpy(card->shortname, card->driver, sizeof(card->shortname));
+	strlcpy(card->longname, "USB accessory audio source",
+		sizeof(card->longname));
+
+	err = snd_card_register(card);
+	if (err)
+		goto register_fail;
+
+	config->card = pcm->card->number;
+	config->device = pcm->device;
+	audio->card = card;
+	return 0;
+
+register_fail:
+pcm_fail:
+	snd_card_free(audio->card);
+	return err;
+}
+
+static struct audio_source_instance *to_audio_source_instance(
+					struct config_item *item)
+{
+	return container_of(to_config_group(item), struct audio_source_instance,
+		func_inst.group);
+}
+
+static struct audio_source_instance *to_fi_audio_source(
+					const struct usb_function_instance *fi)
+{
+	return container_of(fi, struct audio_source_instance, func_inst);
+}
+
+static void audio_source_attr_release(struct config_item *item)
+{
+	struct audio_source_instance *fi_audio = to_audio_source_instance(item);
+
+	usb_put_function_instance(&fi_audio->func_inst);
+}
+
+static int audio_source_set_inst_name(struct usb_function_instance *fi,
+					const char *name)
+{
+	struct audio_source_instance *fi_audio;
+	char *ptr;
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ptr = kstrndup(name, name_len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	fi_audio = to_fi_audio_source(fi);
+	fi_audio->name = ptr;
+
+	return 0;
+}
+
+static void audio_source_free_inst(struct usb_function_instance *fi)
+{
+	struct audio_source_instance *fi_audio;
+
+	fi_audio = to_fi_audio_source(fi);
+	device_destroy(fi_audio->audio_device->class,
+			fi_audio->audio_device->devt);
+	kfree(fi_audio->name);
+	kfree(fi_audio->config);
+}
+
+static ssize_t audio_source_pcm_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct audio_source_instance *fi_audio = dev_get_drvdata(dev);
+	struct audio_source_config *config = fi_audio->config;
+
+	/* print PCM card and device numbers */
+	return sprintf(buf, "%d %d\n", config->card, config->device);
+}
+
+struct device *create_function_device(char *name);
+
+static struct usb_function_instance *audio_source_alloc_inst(void)
+{
+	struct audio_source_instance *fi_audio;
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+	struct device *dev;
+	void *err_ptr;
+	int err = 0;
+
+	fi_audio = kzalloc(sizeof(*fi_audio), GFP_KERNEL);
+	if (!fi_audio)
+		return ERR_PTR(-ENOMEM);
+
+	fi_audio->func_inst.set_inst_name = audio_source_set_inst_name;
+	fi_audio->func_inst.free_func_inst = audio_source_free_inst;
+
+	fi_audio->config = kzalloc(sizeof(struct audio_source_config),
+							GFP_KERNEL);
+	if (!fi_audio->config) {
+		err_ptr = ERR_PTR(-ENOMEM);
+		goto fail_audio;
+	}
+
+	config_group_init_type_name(&fi_audio->func_inst.group, "",
+						&audio_source_func_type);
+	dev = create_function_device("f_audio_source");
+
+	if (IS_ERR(dev)) {
+		err_ptr = dev;
+		goto fail_audio_config;
+	}
+
+	fi_audio->config->card = -1;
+	fi_audio->config->device = -1;
+	fi_audio->audio_device = dev;
+
+	attrs = audio_source_function_attributes;
+	if (attrs) {
+		while ((attr = *attrs++) && !err)
+			err = device_create_file(dev, attr);
+		if (err) {
+			err_ptr = ERR_PTR(-EINVAL);
+			goto fail_device;
+		}
+	}
+
+	dev_set_drvdata(dev, fi_audio);
+	_audio_dev.config = fi_audio->config;
+
+	return  &fi_audio->func_inst;
+
+fail_device:
+	device_destroy(dev->class, dev->devt);
+fail_audio_config:
+	kfree(fi_audio->config);
+fail_audio:
+	kfree(fi_audio);
+	return err_ptr;
+
+}
+
+static struct usb_function *audio_source_alloc(struct usb_function_instance *fi)
+{
+	return &_audio_dev.func;
+}
+
+DECLARE_USB_FUNCTION_INIT(audio_source, audio_source_alloc_inst,
+			audio_source_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_diag.c b/drivers/usb/gadget/function/f_diag.c
new file mode 100644
index 0000000..767f53c
--- /dev/null
+++ b/drivers/usb/gadget/function/f_diag.c
@@ -0,0 +1,1151 @@
+/* drivers/usb/gadget/f_diag.c
+ * Diag Function Device - Route ARM9 and ARM11 DIAG messages
+ * between HOST and DEVICE.
+ * Copyright (C) 2007 Google, Inc.
+ * Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+ * Author: Brian Swetland <swetland@google.com>
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kref.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/ratelimit.h>
+
+#include <linux/usb/usbdiag.h>
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/kmemleak.h>
+
+#define MAX_INST_NAME_LEN	40
+
+/* dload specific suppot */
+#define PID_MAGIC_ID		0x71432909
+#define SERIAL_NUM_MAGIC_ID	0x61945374
+#define SERIAL_NUMBER_LENGTH	128
+
+struct magic_num_struct {
+	uint32_t pid;
+	uint32_t serial_num;
+};
+
+struct dload_struct {
+	uint32_t	pid;
+	char		serial_number[SERIAL_NUMBER_LENGTH];
+	struct magic_num_struct magic_struct;
+};
+
+/* for configfs support */
+struct diag_opts {
+	struct usb_function_instance func_inst;
+	char *name;
+};
+
+static inline struct diag_opts *to_diag_opts(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct diag_opts,
+			    func_inst.group);
+}
+
+static DEFINE_SPINLOCK(ch_lock);
+static LIST_HEAD(usb_diag_ch_list);
+
+static struct dload_struct __iomem *diag_dload;
+
+static struct usb_interface_descriptor intf_desc = {
+	.bLength            =	sizeof(intf_desc),
+	.bDescriptorType    =	USB_DT_INTERFACE,
+	.bNumEndpoints      =	2,
+	.bInterfaceClass    =	0xFF,
+	.bInterfaceSubClass =	0xFF,
+	.bInterfaceProtocol =	0xFF,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(512),
+	.bInterval        =	0,
+};
+static struct usb_endpoint_descriptor fs_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor hs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(512),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor fs_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(64),
+	.bInterval        =	0,
+};
+
+static struct usb_endpoint_descriptor ss_bulk_in_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_IN,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_in_comp_desc = {
+	.bLength =		sizeof(ss_bulk_in_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_endpoint_descriptor ss_bulk_out_desc = {
+	.bLength          =	USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType  =	USB_DT_ENDPOINT,
+	.bEndpointAddress =	USB_DIR_OUT,
+	.bmAttributes     =	USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize   =	cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ss_bulk_out_comp_desc = {
+	.bLength =		sizeof(ss_bulk_out_comp_desc),
+	.bDescriptorType =	USB_DT_SS_ENDPOINT_COMP,
+
+	/* the following 2 values can be tweaked if necessary */
+	/* .bMaxBurst =		0, */
+	/* .bmAttributes =	0, */
+};
+
+static struct usb_descriptor_header *fs_diag_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &fs_bulk_in_desc,
+	(struct usb_descriptor_header *) &fs_bulk_out_desc,
+	NULL,
+	};
+static struct usb_descriptor_header *hs_diag_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &hs_bulk_in_desc,
+	(struct usb_descriptor_header *) &hs_bulk_out_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_diag_desc[] = {
+	(struct usb_descriptor_header *) &intf_desc,
+	(struct usb_descriptor_header *) &ss_bulk_in_desc,
+	(struct usb_descriptor_header *) &ss_bulk_in_comp_desc,
+	(struct usb_descriptor_header *) &ss_bulk_out_desc,
+	(struct usb_descriptor_header *) &ss_bulk_out_comp_desc,
+	NULL,
+};
+
+/**
+ * struct diag_context - USB diag function driver private structure
+ * @function: function structure for USB interface
+ * @out: USB OUT endpoint struct
+ * @in: USB IN endpoint struct
+ * @in_desc: USB IN endpoint descriptor struct
+ * @out_desc: USB OUT endpoint descriptor struct
+ * @read_pool: List of requests used for Rx (OUT ep)
+ * @write_pool: List of requests used for Tx (IN ep)
+ * @lock: Spinlock to proctect read_pool, write_pool lists
+ * @cdev: USB composite device struct
+ * @ch: USB diag channel
+ *
+ */
+struct diag_context {
+	struct usb_function function;
+	struct usb_ep *out;
+	struct usb_ep *in;
+	struct list_head read_pool;
+	struct list_head write_pool;
+	spinlock_t lock;
+	unsigned int configured;
+	struct usb_composite_dev *cdev;
+	struct usb_diag_ch *ch;
+	struct kref kref;
+
+	/* pkt counters */
+	unsigned long dpkts_tolaptop;
+	unsigned long dpkts_tomodem;
+	unsigned int dpkts_tolaptop_pending;
+
+	/* A list node inside the diag_dev_list */
+	struct list_head list_item;
+};
+
+static struct list_head diag_dev_list;
+
+static inline struct diag_context *func_to_diag(struct usb_function *f)
+{
+	return container_of(f, struct diag_context, function);
+}
+
+/**
+ * kref_put_spinlock_irqsave - decrement refcount for object.
+ * @kref: object.
+ * @release: pointer to the function that will clean up the object when the
+ *	     last reference to the object is released.
+ *	     This pointer is required, and it is not acceptable to pass kfree
+ *	     in as this function.
+ * @lock: lock to take in release case
+ *
+ * Behaves identical to kref_put with one exception.  If the reference count
+ * drops to zero, the lock will be taken atomically wrt dropping the reference
+ * count.  The release function has to call spin_unlock() without _irqrestore.
+ */
+static inline int kref_put_spinlock_irqsave(struct kref *kref,
+		void (*release)(struct kref *kref),
+		spinlock_t *lock)
+{
+	unsigned long flags;
+
+	WARN_ON(release == NULL);
+	if (atomic_add_unless(&kref->refcount, -1, 1))
+		return 0;
+	spin_lock_irqsave(lock, flags);
+	if (atomic_dec_and_test(&kref->refcount)) {
+		release(kref);
+		local_irq_restore(flags);
+		return 1;
+	}
+	spin_unlock_irqrestore(lock, flags);
+	return 0;
+}
+
+/* Called with ctxt->lock held; i.e. only use with kref_put_spinlock_irqsave */
+static void diag_context_release(struct kref *kref)
+{
+	struct diag_context *ctxt =
+		container_of(kref, struct diag_context, kref);
+
+	spin_unlock(&ctxt->lock);
+	kfree(ctxt);
+}
+
+static void diag_update_pid_and_serial_num(struct diag_context *ctxt)
+{
+	struct usb_composite_dev *cdev = ctxt->cdev;
+	struct usb_gadget_strings **table;
+	struct usb_string *s;
+	struct usb_gadget_string_container *uc;
+	struct dload_struct local_diag_dload = { 0 };
+
+	/*
+	 * update pid and serial number to dload only if diag
+	 * interface is zeroth interface.
+	 */
+	if (intf_desc.bInterfaceNumber)
+		return;
+
+	if (!diag_dload) {
+		pr_debug("%s: unable to update PID and serial_no\n", __func__);
+		return;
+	}
+
+	/* update pid */
+	local_diag_dload.magic_struct.pid = PID_MAGIC_ID;
+	local_diag_dload.pid = cdev->desc.idProduct;
+	local_diag_dload.magic_struct.serial_num = SERIAL_NUM_MAGIC_ID;
+
+	list_for_each_entry(uc, &cdev->gstrings, list) {
+		table = (struct usb_gadget_strings **)uc->stash;
+		if (!table) {
+			pr_err("%s: can't update dload cookie\n", __func__);
+			break;
+		}
+
+		for (s = (*table)->strings; s && s->s; s++) {
+			if (s->id == cdev->desc.iSerialNumber) {
+				strlcpy(local_diag_dload.serial_number, s->s,
+					SERIAL_NUMBER_LENGTH);
+				goto update_dload;
+			}
+		}
+
+	}
+
+update_dload:
+	pr_debug("%s: dload:%p pid:%x serial_num:%s\n",
+				__func__, diag_dload, local_diag_dload.pid,
+				local_diag_dload.serial_number);
+
+	memcpy_toio(diag_dload, &local_diag_dload, sizeof(local_diag_dload));
+}
+
+static void diag_write_complete(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct diag_context *ctxt = ep->driver_data;
+	struct diag_request *d_req = req->context;
+	unsigned long flags;
+
+	ctxt->dpkts_tolaptop_pending--;
+
+	if (!req->status) {
+		if ((req->length >= ep->maxpacket) &&
+				((req->length % ep->maxpacket) == 0)) {
+			ctxt->dpkts_tolaptop_pending++;
+			req->length = 0;
+			d_req->actual = req->actual;
+			d_req->status = req->status;
+			/* Queue zero length packet */
+			if (!usb_ep_queue(ctxt->in, req, GFP_ATOMIC))
+				return;
+			ctxt->dpkts_tolaptop_pending--;
+		} else {
+			ctxt->dpkts_tolaptop++;
+		}
+	}
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, &ctxt->write_pool);
+	if (req->length != 0) {
+		d_req->actual = req->actual;
+		d_req->status = req->status;
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	if (ctxt->ch && ctxt->ch->notify)
+		ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_WRITE_DONE, d_req);
+
+	kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+			&ctxt->lock);
+}
+
+static void diag_read_complete(struct usb_ep *ep,
+		struct usb_request *req)
+{
+	struct diag_context *ctxt = ep->driver_data;
+	struct diag_request *d_req = req->context;
+	unsigned long flags;
+
+	d_req->actual = req->actual;
+	d_req->status = req->status;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_add_tail(&req->list, &ctxt->read_pool);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	ctxt->dpkts_tomodem++;
+
+	if (ctxt->ch && ctxt->ch->notify)
+		ctxt->ch->notify(ctxt->ch->priv, USB_DIAG_READ_DONE, d_req);
+
+	kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+			&ctxt->lock);
+}
+
+/**
+ * usb_diag_open() - Open a diag channel over USB
+ * @name: Name of the channel
+ * @priv: Private structure pointer which will be passed in notify()
+ * @notify: Callback function to receive notifications
+ *
+ * This function iterates overs the available channels and returns
+ * the channel handler if the name matches. The notify callback is called
+ * for CONNECT, DISCONNECT, READ_DONE and WRITE_DONE events.
+ *
+ */
+struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
+		void (*notify)(void *, unsigned int, struct diag_request *))
+{
+	struct usb_diag_ch *ch;
+	unsigned long flags;
+	int found = 0;
+
+	spin_lock_irqsave(&ch_lock, flags);
+	/* Check if we already have a channel with this name */
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		if (!strcmp(name, ch->name)) {
+			found = 1;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ch_lock, flags);
+
+	if (!found) {
+		ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+		if (!ch)
+			return ERR_PTR(-ENOMEM);
+	}
+
+	ch->name = name;
+	ch->priv = priv;
+	ch->notify = notify;
+
+	spin_lock_irqsave(&ch_lock, flags);
+	list_add_tail(&ch->list, &usb_diag_ch_list);
+	spin_unlock_irqrestore(&ch_lock, flags);
+
+	return ch;
+}
+EXPORT_SYMBOL(usb_diag_open);
+
+/**
+ * usb_diag_close() - Close a diag channel over USB
+ * @ch: Channel handler
+ *
+ * This function closes the diag channel.
+ *
+ */
+void usb_diag_close(struct usb_diag_ch *ch)
+{
+	struct diag_context *dev = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ch_lock, flags);
+	ch->priv = NULL;
+	ch->notify = NULL;
+	/* Free-up the resources if channel is no more active */
+	list_del(&ch->list);
+	list_for_each_entry(dev, &diag_dev_list, list_item)
+		if (dev->ch == ch)
+			dev->ch = NULL;
+	kfree(ch);
+
+	spin_unlock_irqrestore(&ch_lock, flags);
+}
+EXPORT_SYMBOL(usb_diag_close);
+
+static void free_reqs(struct diag_context *ctxt)
+{
+	struct list_head *act, *tmp;
+	struct usb_request *req;
+
+	list_for_each_safe(act, tmp, &ctxt->write_pool) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ctxt->in, req);
+	}
+
+	list_for_each_safe(act, tmp, &ctxt->read_pool) {
+		req = list_entry(act, struct usb_request, list);
+		list_del(&req->list);
+		usb_ep_free_request(ctxt->out, req);
+	}
+}
+
+/**
+ * usb_diag_alloc_req() - Allocate USB requests
+ * @ch: Channel handler
+ * @n_write: Number of requests for Tx
+ * @n_read: Number of requests for Rx
+ *
+ * This function allocate read and write USB requests for the interface
+ * associated with this channel. The actual buffer is not allocated.
+ * The buffer is passed by diag char driver.
+ *
+ */
+int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	struct usb_request *req;
+	int i;
+	unsigned long flags;
+
+	if (!ctxt)
+		return -ENODEV;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	/* Free previous session's stale requests */
+	free_reqs(ctxt);
+	for (i = 0; i < n_write; i++) {
+		req = usb_ep_alloc_request(ctxt->in, GFP_ATOMIC);
+		if (!req)
+			goto fail;
+		kmemleak_not_leak(req);
+		req->complete = diag_write_complete;
+		list_add_tail(&req->list, &ctxt->write_pool);
+	}
+
+	for (i = 0; i < n_read; i++) {
+		req = usb_ep_alloc_request(ctxt->out, GFP_ATOMIC);
+		if (!req)
+			goto fail;
+		kmemleak_not_leak(req);
+		req->complete = diag_read_complete;
+		list_add_tail(&req->list, &ctxt->read_pool);
+	}
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	return 0;
+fail:
+	free_reqs(ctxt);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+	return -ENOMEM;
+
+}
+EXPORT_SYMBOL(usb_diag_alloc_req);
+#define DWC3_MAX_REQUEST_SIZE (16 * 1024 * 1024)
+/**
+ * usb_diag_request_size - Max request size for controller
+ * @ch: Channel handler
+ *
+ * Infom max request size so that diag driver can split packets
+ * in chunks of max size which controller can handle.
+ */
+int usb_diag_request_size(struct usb_diag_ch *ch)
+{
+	return DWC3_MAX_REQUEST_SIZE;
+}
+EXPORT_SYMBOL(usb_diag_request_size);
+
+/**
+ * usb_diag_read() - Read data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on OUT endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Rx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. READ_DONE event is notified after
+ * completion of OUT request.
+ *
+ */
+int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	unsigned long flags;
+	struct usb_request *req;
+	struct usb_ep *out;
+	static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+	if (!ctxt)
+		return -ENODEV;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+
+	if (!ctxt->configured || !ctxt->out) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		return -EIO;
+	}
+
+	out = ctxt->out;
+
+	if (list_empty(&ctxt->read_pool)) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+		return -EAGAIN;
+	}
+
+	req = list_first_entry(&ctxt->read_pool, struct usb_request, list);
+	list_del(&req->list);
+	kref_get(&ctxt->kref); /* put called in complete callback */
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	req->buf = d_req->buf;
+	req->length = d_req->length;
+	req->context = d_req;
+
+	/* make sure context is still valid after releasing lock */
+	if (ctxt != ch->priv_usb) {
+		usb_ep_free_request(out, req);
+		kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+				&ctxt->lock);
+		return -EIO;
+	}
+
+	if (usb_ep_queue(out, req, GFP_ATOMIC)) {
+		/* If error add the link to linked list again*/
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->read_pool);
+		/* 1 error message for every 10 sec */
+		if (__ratelimit(&rl))
+			ERROR(ctxt->cdev, "%s: cannot queue read request\n",
+								__func__);
+
+		if (kref_put(&ctxt->kref, diag_context_release))
+			/* diag_context_release called spin_unlock already */
+			local_irq_restore(flags);
+		else
+			spin_unlock_irqrestore(&ctxt->lock, flags);
+		return -EIO;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_diag_read);
+
+/**
+ * usb_diag_write() - Write data from USB diag channel
+ * @ch: Channel handler
+ * @d_req: Diag request struct
+ *
+ * Enqueue a request on IN endpoint of the interface corresponding to this
+ * channel. This function returns proper error code when interface is not
+ * in configured state, no Tx requests available and ep queue is failed.
+ *
+ * This function operates asynchronously. WRITE_DONE event is notified after
+ * completion of IN request.
+ *
+ */
+int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+	struct diag_context *ctxt = ch->priv_usb;
+	unsigned long flags;
+	struct usb_request *req = NULL;
+	struct usb_ep *in;
+	static DEFINE_RATELIMIT_STATE(rl, 10*HZ, 1);
+
+	if (!ctxt)
+		return -ENODEV;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+
+	if (!ctxt->configured || !ctxt->in) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		return -EIO;
+	}
+
+	in = ctxt->in;
+
+	if (list_empty(&ctxt->write_pool)) {
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+		ERROR(ctxt->cdev, "%s: no requests available\n", __func__);
+		return -EAGAIN;
+	}
+
+	req = list_first_entry(&ctxt->write_pool, struct usb_request, list);
+	list_del(&req->list);
+	kref_get(&ctxt->kref); /* put called in complete callback */
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+
+	req->buf = d_req->buf;
+	req->length = d_req->length;
+	req->context = d_req;
+
+	/* make sure context is still valid after releasing lock */
+	if (ctxt != ch->priv_usb) {
+		usb_ep_free_request(in, req);
+		kref_put_spinlock_irqsave(&ctxt->kref, diag_context_release,
+				&ctxt->lock);
+		return -EIO;
+	}
+
+	ctxt->dpkts_tolaptop_pending++;
+	if (usb_ep_queue(in, req, GFP_ATOMIC)) {
+		/* If error add the link to linked list again*/
+		spin_lock_irqsave(&ctxt->lock, flags);
+		list_add_tail(&req->list, &ctxt->write_pool);
+		ctxt->dpkts_tolaptop_pending--;
+		/* 1 error message for every 10 sec */
+		if (__ratelimit(&rl))
+			ERROR(ctxt->cdev, "%s: cannot queue read request\n",
+								__func__);
+
+		if (kref_put(&ctxt->kref, diag_context_release))
+			/* diag_context_release called spin_unlock already */
+			local_irq_restore(flags);
+		else
+			spin_unlock_irqrestore(&ctxt->lock, flags);
+		return -EIO;
+	}
+
+	/*
+	 * It's possible that both write completion AND unbind could have been
+	 * completed asynchronously by this point. Since they both release the
+	 * kref, ctxt is _NOT_ guaranteed to be valid here.
+	 */
+
+	return 0;
+}
+EXPORT_SYMBOL(usb_diag_write);
+
+static void diag_function_disable(struct usb_function *f)
+{
+	struct diag_context  *dev = func_to_diag(f);
+	unsigned long flags;
+
+	DBG(dev->cdev, "diag_function_disable\n");
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->configured = 0;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (dev->ch && dev->ch->notify)
+		dev->ch->notify(dev->ch->priv, USB_DIAG_DISCONNECT, NULL);
+
+	usb_ep_disable(dev->in);
+	dev->in->driver_data = NULL;
+
+	usb_ep_disable(dev->out);
+	dev->out->driver_data = NULL;
+	if (dev->ch)
+		dev->ch->priv_usb = NULL;
+}
+
+static void diag_free_func(struct usb_function *f)
+{
+	struct diag_context *ctxt = func_to_diag(f);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	list_del(&ctxt->list_item);
+	if (kref_put(&ctxt->kref, diag_context_release))
+		/* diag_context_release called spin_unlock already */
+		local_irq_restore(flags);
+	else
+		spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+static int diag_function_set_alt(struct usb_function *f,
+		unsigned int intf, unsigned int alt)
+{
+	struct diag_context  *dev = func_to_diag(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	unsigned long flags;
+	int rc = 0;
+
+	if (config_ep_by_speed(cdev->gadget, f, dev->in) ||
+	    config_ep_by_speed(cdev->gadget, f, dev->out)) {
+		dev->in->desc = NULL;
+		dev->out->desc = NULL;
+		return -EINVAL;
+	}
+
+	if (!dev->ch)
+		return -ENODEV;
+
+	/*
+	 * Indicate to the diag channel that the active diag device is dev.
+	 * Since a few diag devices can point to the same channel.
+	 */
+	dev->ch->priv_usb = dev;
+
+	dev->in->driver_data = dev;
+	rc = usb_ep_enable(dev->in);
+	if (rc) {
+		ERROR(dev->cdev, "can't enable %s, result %d\n",
+						dev->in->name, rc);
+		return rc;
+	}
+	dev->out->driver_data = dev;
+	rc = usb_ep_enable(dev->out);
+	if (rc) {
+		ERROR(dev->cdev, "can't enable %s, result %d\n",
+						dev->out->name, rc);
+		usb_ep_disable(dev->in);
+		return rc;
+	}
+
+	dev->dpkts_tolaptop = 0;
+	dev->dpkts_tomodem = 0;
+	dev->dpkts_tolaptop_pending = 0;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	dev->configured = 1;
+	spin_unlock_irqrestore(&dev->lock, flags);
+
+	if (dev->ch->notify)
+		dev->ch->notify(dev->ch->priv, USB_DIAG_CONNECT, NULL);
+
+	return rc;
+}
+
+static void diag_function_unbind(struct usb_configuration *c,
+		struct usb_function *f)
+{
+	struct diag_context *ctxt = func_to_diag(f);
+	unsigned long flags;
+
+	if (gadget_is_superspeed(c->cdev->gadget))
+		usb_free_descriptors(f->ss_descriptors);
+	if (gadget_is_dualspeed(c->cdev->gadget))
+		usb_free_descriptors(f->hs_descriptors);
+
+	usb_free_descriptors(f->fs_descriptors);
+
+	/*
+	 * Channel priv_usb may point to other diag function.
+	 * Clear the priv_usb only if the channel is used by the
+	 * diag dev we unbind here.
+	 */
+	if (ctxt->ch && ctxt->ch->priv_usb == ctxt)
+		ctxt->ch->priv_usb = NULL;
+
+	spin_lock_irqsave(&ctxt->lock, flags);
+	/* Free any pending USB requests from last session */
+	free_reqs(ctxt);
+	spin_unlock_irqrestore(&ctxt->lock, flags);
+}
+
+static int diag_function_bind(struct usb_configuration *c,
+		struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct diag_context *ctxt = func_to_diag(f);
+	struct usb_ep *ep;
+	int status = -ENODEV;
+
+	ctxt->cdev = c->cdev;
+
+	intf_desc.bInterfaceNumber =  usb_interface_id(c, f);
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_in_desc);
+	if (!ep)
+		goto fail;
+	ctxt->in = ep;
+	ep->driver_data = ctxt;
+
+	ep = usb_ep_autoconfig(cdev->gadget, &fs_bulk_out_desc);
+	if (!ep)
+		goto fail;
+	ctxt->out = ep;
+	ep->driver_data = ctxt;
+
+	status = -ENOMEM;
+	/* copy descriptors, and track endpoint copies */
+	f->fs_descriptors = usb_copy_descriptors(fs_diag_desc);
+	if (!f->fs_descriptors)
+		goto fail;
+
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		hs_bulk_in_desc.bEndpointAddress =
+				fs_bulk_in_desc.bEndpointAddress;
+		hs_bulk_out_desc.bEndpointAddress =
+				fs_bulk_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->hs_descriptors = usb_copy_descriptors(hs_diag_desc);
+		if (!f->hs_descriptors)
+			goto fail;
+	}
+
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		ss_bulk_in_desc.bEndpointAddress =
+				fs_bulk_in_desc.bEndpointAddress;
+		ss_bulk_out_desc.bEndpointAddress =
+				fs_bulk_out_desc.bEndpointAddress;
+
+		/* copy descriptors, and track endpoint copies */
+		f->ss_descriptors = usb_copy_descriptors(ss_diag_desc);
+		if (!f->ss_descriptors)
+			goto fail;
+	}
+
+	/* Allow only first diag channel to update pid and serial no */
+	if (ctxt == list_first_entry(&diag_dev_list,
+				struct diag_context, list_item))
+		diag_update_pid_and_serial_num(ctxt);
+
+	return 0;
+fail:
+	if (f->ss_descriptors)
+		usb_free_descriptors(f->ss_descriptors);
+	if (f->hs_descriptors)
+		usb_free_descriptors(f->hs_descriptors);
+	if (f->fs_descriptors)
+		usb_free_descriptors(f->fs_descriptors);
+	if (ctxt->out)
+		ctxt->out->driver_data = NULL;
+	if (ctxt->in)
+		ctxt->in->driver_data = NULL;
+	return status;
+
+}
+
+static struct diag_context *diag_context_init(const char *name)
+{
+	struct diag_context *dev;
+	struct usb_diag_ch *_ch;
+	int found = 0;
+
+	pr_debug("%s\n", __func__);
+
+	list_for_each_entry(_ch, &usb_diag_ch_list, list) {
+		if (!strcmp(name, _ch->name)) {
+			found = 1;
+			break;
+		}
+	}
+	if (!found) {
+		pr_err("%s: unable to get diag usb channel\n", __func__);
+		return ERR_PTR(-ENODEV);
+	}
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
+
+	list_add_tail(&dev->list_item, &diag_dev_list);
+
+	/*
+	 * A few diag devices can point to the same channel, in case that
+	 * the diag devices belong to different configurations, however
+	 * only the active diag device will claim the channel by setting
+	 * the ch->priv_usb (see diag_function_set_alt).
+	 */
+	dev->ch = _ch;
+
+	dev->function.name = _ch->name;
+	dev->function.fs_descriptors = fs_diag_desc;
+	dev->function.hs_descriptors = hs_diag_desc;
+	dev->function.bind = diag_function_bind;
+	dev->function.unbind = diag_function_unbind;
+	dev->function.set_alt = diag_function_set_alt;
+	dev->function.disable = diag_function_disable;
+	dev->function.free_func = diag_free_func;
+	kref_init(&dev->kref);
+	spin_lock_init(&dev->lock);
+	INIT_LIST_HEAD(&dev->read_pool);
+	INIT_LIST_HEAD(&dev->write_pool);
+
+	return dev;
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static char debug_buffer[PAGE_SIZE];
+
+static ssize_t debug_read_stats(struct file *file, char __user *ubuf,
+		size_t count, loff_t *ppos)
+{
+	char *buf = debug_buffer;
+	int temp = 0;
+	struct usb_diag_ch *ch;
+
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		struct diag_context *ctxt = ch->priv_usb;
+		unsigned long flags;
+
+		if (ctxt) {
+			spin_lock_irqsave(&ctxt->lock, flags);
+			temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+					"---Name: %s---\n"
+					"endpoints: %s, %s\n"
+					"dpkts_tolaptop: %lu\n"
+					"dpkts_tomodem:  %lu\n"
+					"pkts_tolaptop_pending: %u\n",
+					ch->name,
+					ctxt->in->name, ctxt->out->name,
+					ctxt->dpkts_tolaptop,
+					ctxt->dpkts_tomodem,
+					ctxt->dpkts_tolaptop_pending);
+			spin_unlock_irqrestore(&ctxt->lock, flags);
+		}
+	}
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
+}
+
+static ssize_t debug_reset_stats(struct file *file, const char __user *buf,
+				 size_t count, loff_t *ppos)
+{
+	struct usb_diag_ch *ch;
+
+	list_for_each_entry(ch, &usb_diag_ch_list, list) {
+		struct diag_context *ctxt = ch->priv_usb;
+		unsigned long flags;
+
+		if (ctxt) {
+			spin_lock_irqsave(&ctxt->lock, flags);
+			ctxt->dpkts_tolaptop = 0;
+			ctxt->dpkts_tomodem = 0;
+			ctxt->dpkts_tolaptop_pending = 0;
+			spin_unlock_irqrestore(&ctxt->lock, flags);
+		}
+	}
+
+	return count;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return 0;
+}
+
+static const struct file_operations debug_fdiag_ops = {
+	.open = debug_open,
+	.read = debug_read_stats,
+	.write = debug_reset_stats,
+};
+
+struct dentry *dent_diag;
+static void fdiag_debugfs_init(void)
+{
+	struct dentry *dent_diag_status;
+
+	dent_diag = debugfs_create_dir("usb_diag", 0);
+	if (!dent_diag || IS_ERR(dent_diag))
+		return;
+
+	dent_diag_status = debugfs_create_file("status", 0444, dent_diag, 0,
+			&debug_fdiag_ops);
+
+	if (!dent_diag_status || IS_ERR(dent_diag_status)) {
+		debugfs_remove(dent_diag);
+		dent_diag = NULL;
+		return;
+	}
+}
+
+static void fdiag_debugfs_remove(void)
+{
+	debugfs_remove_recursive(dent_diag);
+}
+#else
+static inline void fdiag_debugfs_init(void) {}
+static inline void fdiag_debugfs_remove(void) {}
+#endif
+
+static void diag_opts_release(struct config_item *item)
+{
+	struct diag_opts *opts = to_diag_opts(item);
+
+	usb_put_function_instance(&opts->func_inst);
+}
+
+static struct configfs_item_operations diag_item_ops = {
+	.release	= diag_opts_release,
+};
+
+static struct config_item_type diag_func_type = {
+	.ct_item_ops	= &diag_item_ops,
+	.ct_owner	= THIS_MODULE,
+};
+
+static int diag_set_inst_name(struct usb_function_instance *fi,
+	const char *name)
+{
+	struct diag_opts *opts = container_of(fi, struct diag_opts, func_inst);
+	char *ptr;
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ptr = kstrndup(name, name_len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	opts->name = ptr;
+
+	return 0;
+}
+
+static void diag_free_inst(struct usb_function_instance *f)
+{
+	struct diag_opts *opts;
+
+	opts = container_of(f, struct diag_opts, func_inst);
+	kfree(opts->name);
+	kfree(opts);
+}
+
+static struct usb_function_instance *diag_alloc_inst(void)
+{
+	struct diag_opts *opts;
+
+	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
+	if (!opts)
+		return ERR_PTR(-ENOMEM);
+
+	opts->func_inst.set_inst_name = diag_set_inst_name;
+	opts->func_inst.free_func_inst = diag_free_inst;
+	config_group_init_type_name(&opts->func_inst.group, "",
+				    &diag_func_type);
+
+	return &opts->func_inst;
+}
+
+static struct usb_function *diag_alloc(struct usb_function_instance *fi)
+{
+	struct diag_opts *opts;
+	struct diag_context *dev;
+
+	opts = container_of(fi, struct diag_opts, func_inst);
+
+	dev = diag_context_init(opts->name);
+	if (IS_ERR(dev))
+		return ERR_CAST(dev);
+
+	return &dev->function;
+}
+
+DECLARE_USB_FUNCTION(diag, diag_alloc_inst, diag_alloc);
+
+static int __init diag_init(void)
+{
+	struct device_node *np;
+	int ret;
+
+	INIT_LIST_HEAD(&diag_dev_list);
+
+	fdiag_debugfs_init();
+
+	ret = usb_function_register(&diagusb_func);
+	if (ret) {
+		pr_err("%s: failed to register diag %d\n", __func__, ret);
+		return ret;
+	}
+
+	np = of_find_compatible_node(NULL, NULL, "qcom,msm-imem-diag-dload");
+	if (!np)
+		np = of_find_compatible_node(NULL, NULL, "qcom,android-usb");
+
+	if (!np)
+		pr_warn("diag: failed to find diag_dload imem node\n");
+
+	diag_dload  = np ? of_iomap(np, 0) : NULL;
+
+	return ret;
+}
+
+static void __exit diag_exit(void)
+{
+	struct list_head *act, *tmp;
+	struct usb_diag_ch *_ch;
+	unsigned long flags;
+
+	if (diag_dload)
+		iounmap(diag_dload);
+
+	usb_function_unregister(&diagusb_func);
+
+	fdiag_debugfs_remove();
+
+	list_for_each_safe(act, tmp, &usb_diag_ch_list) {
+		_ch = list_entry(act, struct usb_diag_ch, list);
+
+		spin_lock_irqsave(&ch_lock, flags);
+		/* Free if diagchar is not using the channel anymore */
+		if (!_ch->priv) {
+			list_del(&_ch->list);
+			kfree(_ch);
+		}
+		spin_unlock_irqrestore(&ch_lock, flags);
+	}
+
+}
+
+module_init(diag_init);
+module_exit(diag_exit);
+
+MODULE_DESCRIPTION("Diag function driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 17989b7..edf4515 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -24,6 +24,7 @@
 #include <linux/hid.h>
 #include <linux/module.h>
 #include <linux/uio.h>
+#include <linux/ipc_logging.h>
 #include <asm/unaligned.h>
 
 #include <linux/usb/composite.h>
@@ -41,6 +42,15 @@
 
 #define FUNCTIONFS_MAGIC	0xa647361 /* Chosen by a honest dice roll ;) */
 
+#define NUM_PAGES	10 /* # of pages for ipc logging */
+
+static void *ffs_ipc_log;
+#define ffs_log(fmt, ...) do { \
+	ipc_log_string(ffs_ipc_log, "%s: " fmt,  __func__, \
+			##__VA_ARGS__); \
+	pr_debug(fmt, ##__VA_ARGS__); \
+} while (0)
+
 /* Reference counter handling */
 static void ffs_data_get(struct ffs_data *ffs);
 static void ffs_data_put(struct ffs_data *ffs);
@@ -278,6 +288,9 @@
 
 	spin_unlock_irq(&ffs->ev.waitq.lock);
 
+	ffs_log("enter: state %d setup_state %d flags %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	req->buf      = data;
 	req->length   = len;
 
@@ -302,11 +315,18 @@
 	}
 
 	ffs->setup_state = FFS_NO_SETUP;
+
+	ffs_log("exit: state %d setup_state %d flags %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	return req->status ? req->status : req->actual;
 }
 
 static int __ffs_ep0_stall(struct ffs_data *ffs)
 {
+	ffs_log("state %d setup_state %d flags %lu can_stall %d", ffs->state,
+		ffs->setup_state, ffs->flags, ffs->ev.can_stall);
+
 	if (ffs->ev.can_stall) {
 		pr_vdebug("ep0 stall\n");
 		usb_ep_set_halt(ffs->gadget->ep0);
@@ -327,6 +347,9 @@
 
 	ENTER();
 
+	ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	/* Fast check if setup was canceled */
 	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
 		return -EIDRM;
@@ -455,6 +478,9 @@
 		break;
 	}
 
+	ffs_log("exit:ret %zu state %d setup_state %d flags %lu", ret,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	mutex_unlock(&ffs->mutex);
 	return ret;
 }
@@ -488,6 +514,10 @@
 			ffs->ev.count * sizeof *ffs->ev.types);
 
 	spin_unlock_irq(&ffs->ev.waitq.lock);
+
+	ffs_log("state %d setup_state %d flags %lu #evt %zu", ffs->state,
+		ffs->setup_state, ffs->flags, n);
+
 	mutex_unlock(&ffs->mutex);
 
 	return unlikely(copy_to_user(buf, events, size)) ? -EFAULT : size;
@@ -503,6 +533,9 @@
 
 	ENTER();
 
+	ffs_log("enter:len %zu state %d setup_state %d flags %lu", len,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	/* Fast check if setup was canceled */
 	if (ffs_setup_state_clear_cancelled(ffs) == FFS_SETUP_CANCELLED)
 		return -EIDRM;
@@ -591,8 +624,12 @@
 
 	spin_unlock_irq(&ffs->ev.waitq.lock);
 done_mutex:
+	ffs_log("exit:ret %d state %d setup_state %d flags %lu", ret,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	mutex_unlock(&ffs->mutex);
 	kfree(data);
+
 	return ret;
 }
 
@@ -602,6 +639,9 @@
 
 	ENTER();
 
+	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	if (unlikely(ffs->state == FFS_CLOSING))
 		return -EBUSY;
 
@@ -617,6 +657,9 @@
 
 	ENTER();
 
+	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	ffs_data_closed(ffs);
 
 	return 0;
@@ -630,6 +673,9 @@
 
 	ENTER();
 
+	ffs_log("state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	if (code == FUNCTIONFS_INTERFACE_REVMAP) {
 		struct ffs_function *func = ffs->func;
 		ret = func ? ffs_func_revmap_intf(func, value) : -ENODEV;
@@ -648,6 +694,9 @@
 	unsigned int mask = POLLWRNORM;
 	int ret;
 
+	ffs_log("enter:state %d setup_state %d flags %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	poll_wait(file, &ffs->ev.waitq, wait);
 
 	ret = ffs_mutex_lock(&ffs->mutex, file->f_flags & O_NONBLOCK);
@@ -678,6 +727,8 @@
 		break;
 	}
 
+	ffs_log("exit: mask %u", mask);
+
 	mutex_unlock(&ffs->mutex);
 
 	return mask;
@@ -703,6 +754,7 @@
 	if (likely(req->context)) {
 		struct ffs_ep *ep = _ep->driver_data;
 		ep->status = req->status ? req->status : req->actual;
+		ffs_log("ep status %d for req %p", ep->status, req);
 		complete(req->context);
 	}
 }
@@ -758,6 +810,8 @@
 					 io_data->req->actual;
 	bool kiocb_has_eventfd = io_data->kiocb->ki_flags & IOCB_EVENTFD;
 
+	ffs_log("enter: ret %d", ret);
+
 	if (io_data->read && ret > 0) {
 		use_mm(io_data->mm);
 		ret = ffs_copy_to_iter(io_data->buf, ret, &io_data->data);
@@ -775,6 +829,8 @@
 		kfree(io_data->to_free);
 	kfree(io_data->buf);
 	kfree(io_data);
+
+	ffs_log("exit");
 }
 
 static void ffs_epfile_async_io_complete(struct usb_ep *_ep,
@@ -784,8 +840,12 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	INIT_WORK(&io_data->work, ffs_user_copy_worker);
 	schedule_work(&io_data->work);
+
+	ffs_log("exit");
 }
 
 static void __ffs_epfile_read_buffer_free(struct ffs_epfile *epfile)
@@ -879,6 +939,8 @@
 	ssize_t ret, data_len = -EINVAL;
 	int halt;
 
+	ffs_log("enter: epfile name %s", epfile->name);
+
 	/* Are we still active? */
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
@@ -1049,6 +1111,9 @@
 	mutex_unlock(&epfile->mutex);
 error:
 	kfree(data);
+
+	ffs_log("exit: ret %zu", ret);
+
 	return ret;
 }
 
@@ -1059,12 +1124,18 @@
 
 	ENTER();
 
+	ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+		epfile->ffs->setup_state, epfile->ffs->flags);
+
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
 	file->private_data = epfile;
 	ffs_data_opened(epfile->ffs);
 
+	ffs_log("exit:state %d setup_state %d flag %lu", epfile->ffs->state,
+		epfile->ffs->setup_state, epfile->ffs->flags);
+
 	return 0;
 }
 
@@ -1076,6 +1147,9 @@
 
 	ENTER();
 
+	ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+		epfile->ffs->setup_state, epfile->ffs->flags);
+
 	spin_lock_irq(&epfile->ffs->eps_lock);
 
 	if (likely(io_data && io_data->ep && io_data->req))
@@ -1085,6 +1159,8 @@
 
 	spin_unlock_irq(&epfile->ffs->eps_lock);
 
+	ffs_log("exit: value %d", value);
+
 	return value;
 }
 
@@ -1095,6 +1171,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	if (!is_sync_kiocb(kiocb)) {
 		p = kmalloc(sizeof(io_data), GFP_KERNEL);
 		if (unlikely(!p))
@@ -1121,6 +1199,9 @@
 		kfree(p);
 	else
 		*from = p->data;
+
+	ffs_log("exit");
+
 	return res;
 }
 
@@ -1131,6 +1212,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	if (!is_sync_kiocb(kiocb)) {
 		p = kmalloc(sizeof(io_data), GFP_KERNEL);
 		if (unlikely(!p))
@@ -1169,6 +1252,9 @@
 	} else {
 		*to = p->data;
 	}
+
+	ffs_log("enter");
+
 	return res;
 }
 
@@ -1180,8 +1266,13 @@
 	ENTER();
 
 	__ffs_epfile_read_buffer_free(epfile);
+	ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+		epfile->ffs->setup_state, epfile->ffs->flags);
+
 	ffs_data_closed(epfile->ffs);
 
+	ffs_log("exit");
+
 	return 0;
 }
 
@@ -1193,6 +1284,9 @@
 
 	ENTER();
 
+	ffs_log("enter:state %d setup_state %d flag %lu", epfile->ffs->state,
+		epfile->ffs->setup_state, epfile->ffs->flags);
+
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
@@ -1243,6 +1337,8 @@
 	}
 	spin_unlock_irq(&epfile->ffs->eps_lock);
 
+	ffs_log("exit:ret %d", ret);
+
 	return ret;
 }
 
@@ -1274,6 +1370,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	inode = new_inode(sb);
 
 	if (likely(inode)) {
@@ -1293,6 +1391,8 @@
 			inode->i_op  = iops;
 	}
 
+	ffs_log("exit");
+
 	return inode;
 }
 
@@ -1307,6 +1407,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	dentry = d_alloc_name(sb->s_root, name);
 	if (unlikely(!dentry))
 		return NULL;
@@ -1318,6 +1420,9 @@
 	}
 
 	d_add(dentry, inode);
+
+	ffs_log("exit");
+
 	return dentry;
 }
 
@@ -1343,6 +1448,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	ffs->sb              = sb;
 	data->ffs_data       = NULL;
 	sb->s_fs_info        = ffs;
@@ -1367,6 +1474,8 @@
 					 &ffs_ep0_operations)))
 		return -ENOMEM;
 
+	ffs_log("exit");
+
 	return 0;
 }
 
@@ -1374,6 +1483,8 @@
 {
 	ENTER();
 
+	ffs_log("enter");
+
 	if (!opts || !*opts)
 		return 0;
 
@@ -1456,6 +1567,8 @@
 		opts = comma + 1;
 	}
 
+	ffs_log("exit");
+
 	return 0;
 }
 
@@ -1481,6 +1594,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	ret = ffs_fs_parse_opts(&data, opts);
 	if (unlikely(ret < 0))
 		return ERR_PTR(ret);
@@ -1510,6 +1625,9 @@
 		ffs_release_dev(data.ffs_data);
 		ffs_data_put(data.ffs_data);
 	}
+
+	ffs_log("exit");
+
 	return rv;
 }
 
@@ -1518,12 +1636,16 @@
 {
 	ENTER();
 
+	ffs_log("enter");
+
 	kill_litter_super(sb);
 	if (sb->s_fs_info) {
 		ffs_release_dev(sb->s_fs_info);
 		ffs_data_closed(sb->s_fs_info);
 		ffs_data_put(sb->s_fs_info);
 	}
+
+	ffs_log("exit");
 }
 
 static struct file_system_type ffs_fs_type = {
@@ -1549,6 +1671,8 @@
 	else
 		pr_err("failed registering file system (%d)\n", ret);
 
+	ffs_ipc_log = ipc_log_context_create(NUM_PAGES, "f_fs", 0);
+
 	return ret;
 }
 
@@ -1570,25 +1694,37 @@
 {
 	ENTER();
 
+	ffs_log("enter");
+
 	atomic_inc(&ffs->ref);
+
+	ffs_log("exit");
 }
 
 static void ffs_data_opened(struct ffs_data *ffs)
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	atomic_inc(&ffs->ref);
 	if (atomic_add_return(1, &ffs->opened) == 1 &&
 			ffs->state == FFS_DEACTIVATED) {
 		ffs->state = FFS_CLOSING;
 		ffs_data_reset(ffs);
 	}
+
+	ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
 }
 
 static void ffs_data_put(struct ffs_data *ffs)
 {
 	ENTER();
 
+	ffs_log("enter");
+
 	if (unlikely(atomic_dec_and_test(&ffs->ref))) {
 		pr_info("%s(): freeing\n", __func__);
 		ffs_data_clear(ffs);
@@ -1597,12 +1733,17 @@
 		kfree(ffs->dev_name);
 		kfree(ffs);
 	}
+
+	ffs_log("exit");
 }
 
 static void ffs_data_closed(struct ffs_data *ffs)
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu opened %d", ffs->state,
+		ffs->setup_state, ffs->flags, atomic_read(&ffs->opened));
+
 	if (atomic_dec_and_test(&ffs->opened)) {
 		if (ffs->no_disconnect) {
 			ffs->state = FFS_DEACTIVATED;
@@ -1623,6 +1764,9 @@
 		ffs_data_reset(ffs);
 	}
 
+	ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	ffs_data_put(ffs);
 }
 
@@ -1634,6 +1778,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	atomic_set(&ffs->ref, 1);
 	atomic_set(&ffs->opened, 0);
 	ffs->state = FFS_READ_DESCRIPTORS;
@@ -1645,6 +1791,8 @@
 	/* XXX REVISIT need to update it in some places, or do we? */
 	ffs->ev.can_stall = 1;
 
+	ffs_log("exit");
+
 	return ffs;
 }
 
@@ -1652,6 +1800,11 @@
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
+	pr_debug("%s: ffs->gadget= %p, ffs->flags= %lu\n",
+				__func__, ffs->gadget, ffs->flags);
 	ffs_closed(ffs);
 
 	BUG_ON(ffs->gadget);
@@ -1665,12 +1818,18 @@
 	kfree(ffs->raw_descs_data);
 	kfree(ffs->raw_strings);
 	kfree(ffs->stringtabs);
+
+	ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
 }
 
 static void ffs_data_reset(struct ffs_data *ffs)
 {
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	ffs_data_clear(ffs);
 
 	ffs->epfiles = NULL;
@@ -1693,6 +1852,9 @@
 	ffs->state = FFS_READ_DESCRIPTORS;
 	ffs->setup_state = FFS_NO_SETUP;
 	ffs->flags = 0;
+
+	ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
 }
 
 
@@ -1703,6 +1865,9 @@
 
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	if (WARN_ON(ffs->state != FFS_ACTIVE
 		 || test_and_set_bit(FFS_FL_BOUND, &ffs->flags)))
 		return -EBADFD;
@@ -1728,6 +1893,10 @@
 	}
 
 	ffs->gadget = cdev->gadget;
+
+	ffs_log("exit: state %d setup_state %d flag %lu gadget %p\n",
+			ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
+
 	ffs_data_get(ffs);
 	return 0;
 }
@@ -1741,6 +1910,8 @@
 		ffs->ep0req = NULL;
 		ffs->gadget = NULL;
 		clear_bit(FFS_FL_BOUND, &ffs->flags);
+		ffs_log("state %d setup_state %d flag %lu gadget %p\n",
+			ffs->state, ffs->setup_state, ffs->flags, ffs->gadget);
 		ffs_data_put(ffs);
 	}
 }
@@ -1752,6 +1923,9 @@
 
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	count = ffs->eps_count;
 	epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
 	if (!epfiles)
@@ -1776,6 +1950,10 @@
 	}
 
 	ffs->epfiles = epfiles;
+
+	ffs_log("exit: epfile name %s state %d setup_state %d flag %lu",
+		epfile->name, ffs->state, ffs->setup_state, ffs->flags);
+
 	return 0;
 }
 
@@ -1785,6 +1963,8 @@
 
 	ENTER();
 
+	ffs_log("enter: epfilename %s", epfile->name);
+
 	for (; count; --count, ++epfile) {
 		BUG_ON(mutex_is_locked(&epfile->mutex) ||
 		       waitqueue_active(&epfile->wait));
@@ -1796,6 +1976,8 @@
 	}
 
 	kfree(epfiles);
+
+	ffs_log("exit");
 }
 
 static void ffs_func_eps_disable(struct ffs_function *func)
@@ -1805,6 +1987,9 @@
 	unsigned count            = func->ffs->eps_count;
 	unsigned long flags;
 
+	ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+		func->ffs->setup_state, func->ffs->flags);
+
 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
 	do {
 		/* pending requests get nuked */
@@ -1819,6 +2004,8 @@
 		}
 	} while (--count);
 	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
+
+	ffs_log("exit");
 }
 
 static int ffs_func_eps_enable(struct ffs_function *func)
@@ -1830,6 +2017,9 @@
 	unsigned long flags;
 	int ret = 0;
 
+	ffs_log("enter: state %d setup_state %d flag %lu", func->ffs->state,
+		func->ffs->setup_state, func->ffs->flags);
+
 	spin_lock_irqsave(&func->ffs->eps_lock, flags);
 	do {
 		struct usb_endpoint_descriptor *ds;
@@ -1854,11 +2044,20 @@
 
 		ep->ep->driver_data = ep;
 		ep->ep->desc = ds;
+
+		ret = config_ep_by_speed(func->gadget, &func->function, ep->ep);
+		if (ret) {
+			pr_err("%s(): config_ep_by_speed(%d) err for %s\n",
+						__func__, ret, ep->ep->name);
+			break;
+		}
+
 		ret = usb_ep_enable(ep->ep);
 		if (likely(!ret)) {
 			epfile->ep = ep;
 			epfile->in = usb_endpoint_dir_in(ds);
 			epfile->isoc = usb_endpoint_xfer_isoc(ds);
+			ffs_log("usb_ep_enable %s", ep->ep->name);
 		} else {
 			break;
 		}
@@ -1870,6 +2069,8 @@
 	} while (--count);
 	spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
 
+	ffs_log("exit: ret %d", ret);
+
 	return ret;
 }
 
@@ -1910,6 +2111,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	/* At least two bytes are required: length and type */
 	if (len < 2) {
 		pr_vdebug("descriptor too short\n");
@@ -2026,6 +2229,8 @@
 #undef __entity_check_STRING
 #undef __entity_check_ENDPOINT
 
+	ffs_log("exit: desc type %d length %d", _ds->bDescriptorType, length);
+
 	return length;
 }
 
@@ -2037,6 +2242,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	for (;;) {
 		int ret;
 
@@ -2064,6 +2271,8 @@
 		data += ret;
 		++num;
 	}
+
+	ffs_log("exit: len %u", len);
 }
 
 static int __ffs_data_do_entity(enum ffs_entity_type type,
@@ -2075,6 +2284,8 @@
 
 	ENTER();
 
+	ffs_log("enter: type %u", type);
+
 	switch (type) {
 	case FFS_DESCRIPTOR:
 		break;
@@ -2113,6 +2324,8 @@
 		break;
 	}
 
+	ffs_log("exit");
+
 	return 0;
 }
 
@@ -2122,6 +2335,8 @@
 	u16 bcd_version = le16_to_cpu(desc->bcdVersion);
 	u16 w_index = le16_to_cpu(desc->wIndex);
 
+	ffs_log("enter");
+
 	if (bcd_version != 1) {
 		pr_vdebug("unsupported os descriptors version: %d",
 			  bcd_version);
@@ -2139,6 +2354,8 @@
 		return -EINVAL;
 	}
 
+	ffs_log("exit: size of desc %zu", sizeof(*desc));
+
 	return sizeof(*desc);
 }
 
@@ -2158,6 +2375,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u os desc type %d", len, type);
+
 	/* loop over all ext compat/ext prop descriptors */
 	while (feature_count--) {
 		ret = entity(type, h, data, len, priv);
@@ -2168,6 +2387,9 @@
 		data += ret;
 		len -= ret;
 	}
+
+	ffs_log("exit");
+
 	return _len - len;
 }
 
@@ -2181,6 +2403,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	for (num = 0; num < count; ++num) {
 		int ret;
 		enum ffs_os_desc_type type;
@@ -2230,6 +2454,9 @@
 		len -= ret;
 		data += ret;
 	}
+
+	ffs_log("exit");
+
 	return _len - len;
 }
 
@@ -2245,6 +2472,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %u", len);
+
 	switch (type) {
 	case FFS_OS_DESC_EXT_COMPAT: {
 		struct usb_ext_compat_desc *d = data;
@@ -2293,6 +2522,9 @@
 		pr_vdebug("unknown descriptor: %d\n", type);
 		return -EINVAL;
 	}
+
+	ffs_log("exit");
+
 	return length;
 }
 
@@ -2306,6 +2538,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %zu", len);
+
 	if (get_unaligned_le32(data + 4) != len)
 		goto error;
 
@@ -2418,10 +2652,13 @@
 	ffs->ss_descs_count	= counts[2];
 	ffs->ms_os_descs_count	= os_descs_count;
 
+	ffs_log("exit");
+
 	return 0;
 
 error:
 	kfree(_data);
+	ffs_log("exit: ret %d", ret);
 	return ret;
 }
 
@@ -2435,6 +2672,8 @@
 
 	ENTER();
 
+	ffs_log("enter: len %zu", len);
+
 	if (unlikely(get_unaligned_le32(data) != FUNCTIONFS_STRINGS_MAGIC ||
 		     get_unaligned_le32(data + 4) != len))
 		goto error;
@@ -2548,12 +2787,14 @@
 	ffs->stringtabs = stringtabs;
 	ffs->raw_strings = _data;
 
+	ffs_log("exit");
 	return 0;
 
 error_free:
 	kfree(stringtabs);
 error:
 	kfree(_data);
+	ffs_log("exit: -EINVAL");
 	return -EINVAL;
 }
 
@@ -2566,6 +2807,9 @@
 	enum usb_functionfs_event_type rem_type1, rem_type2 = type;
 	int neg = 0;
 
+	ffs_log("enter: type %d state %d setup_state %d flag %lu", type,
+		ffs->state, ffs->setup_state, ffs->flags);
+
 	/*
 	 * Abort any unhandled setup
 	 *
@@ -2625,6 +2869,9 @@
 	wake_up_locked(&ffs->ev.waitq);
 	if (ffs->ffs_eventfd)
 		eventfd_signal(ffs->ffs_eventfd, 1);
+
+	ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
 }
 
 static void ffs_event_add(struct ffs_data *ffs,
@@ -2659,6 +2906,8 @@
 	int idx;
 	static const char *speed_names[] = { "full", "high", "super" };
 
+	ffs_log("enter");
+
 	if (type != FFS_DESCRIPTOR)
 		return 0;
 
@@ -2734,6 +2983,8 @@
 	}
 	ffs_dump_mem(": Rewritten ep desc", ds, ds->bLength);
 
+	ffs_log("exit");
+
 	return 0;
 }
 
@@ -2745,6 +2996,8 @@
 	unsigned idx;
 	u8 newValue;
 
+	ffs_log("enter: type %d", type);
+
 	switch (type) {
 	default:
 	case FFS_DESCRIPTOR:
@@ -2789,6 +3042,9 @@
 
 	pr_vdebug("%02x -> %02x\n", *valuep, newValue);
 	*valuep = newValue;
+
+	ffs_log("exit: newValue %d", newValue);
+
 	return 0;
 }
 
@@ -2799,6 +3055,8 @@
 	struct ffs_function *func = priv;
 	u8 length = 0;
 
+	ffs_log("enter: type %d", type);
+
 	switch (type) {
 	case FFS_OS_DESC_EXT_COMPAT: {
 		struct usb_ext_compat_desc *desc = data;
@@ -2868,6 +3126,8 @@
 		pr_vdebug("unknown descriptor: %d\n", type);
 	}
 
+	ffs_log("exit");
+
 	return length;
 }
 
@@ -2881,6 +3141,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	/*
 	 * Legacy gadget triggers binding in functionfs_ready_callback,
 	 * which already uses locking; taking the same lock here would
@@ -2915,6 +3177,8 @@
 	ffs_opts->refcnt++;
 	func->function.strings = func->ffs->stringtabs;
 
+	ffs_log("exit");
+
 	return ffs_opts;
 }
 
@@ -2925,10 +3189,8 @@
 	struct ffs_data *ffs = func->ffs;
 
 	const int full = !!func->ffs->fs_descs_count;
-	const int high = gadget_is_dualspeed(func->gadget) &&
-		func->ffs->hs_descs_count;
-	const int super = gadget_is_superspeed(func->gadget) &&
-		func->ffs->ss_descs_count;
+	const int high = func->ffs->hs_descs_count;
+	const int super = func->ffs->ss_descs_count;
 
 	int fs_len, hs_len, ss_len, ret, i;
 	struct ffs_ep *eps_ptr;
@@ -2960,6 +3222,9 @@
 
 	ENTER();
 
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	/* Has descriptors only for speeds gadget does not support */
 	if (unlikely(!(full | high | super)))
 		return -ENOTSUPP;
@@ -3076,10 +3341,15 @@
 
 	/* And we're done */
 	ffs_event_add(ffs, FUNCTIONFS_BIND);
+
+	ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	return 0;
 
 error:
 	/* XXX Do we need to release all claimed endpoints here? */
+	ffs_log("exit: ret %d", ret);
 	return ret;
 }
 
@@ -3090,6 +3360,8 @@
 	struct ffs_function *func = ffs_func_from_usb(f);
 	int ret;
 
+	ffs_log("enter");
+
 	if (IS_ERR(ffs_opts))
 		return PTR_ERR(ffs_opts);
 
@@ -3097,6 +3369,8 @@
 	if (ret && !--ffs_opts->refcnt)
 		functionfs_unbind(func->ffs);
 
+	ffs_log("exit: ret %d", ret);
+
 	return ret;
 }
 
@@ -3107,7 +3381,12 @@
 {
 	struct ffs_data *ffs = container_of(work,
 		struct ffs_data, reset_work);
+
+	ffs_log("enter");
+
 	ffs_data_reset(ffs);
+
+	ffs_log("exit");
 }
 
 static int ffs_func_set_alt(struct usb_function *f,
@@ -3117,6 +3396,8 @@
 	struct ffs_data *ffs = func->ffs;
 	int ret = 0, intf;
 
+	ffs_log("enter");
+
 	if (alt != (unsigned)-1) {
 		intf = ffs_func_revmap_intf(func, interface);
 		if (unlikely(intf < 0))
@@ -3146,12 +3427,17 @@
 	ret = ffs_func_eps_enable(func);
 	if (likely(ret >= 0))
 		ffs_event_add(ffs, FUNCTIONFS_ENABLE);
+
+	ffs_log("exit: ret %d", ret);
+
 	return ret;
 }
 
 static void ffs_func_disable(struct usb_function *f)
 {
+	ffs_log("enter");
 	ffs_func_set_alt(f, 0, (unsigned)-1);
+	ffs_log("exit");
 }
 
 static int ffs_func_setup(struct usb_function *f,
@@ -3164,6 +3450,8 @@
 
 	ENTER();
 
+	ffs_log("enter");
+
 	pr_vdebug("creq->bRequestType = %02x\n", creq->bRequestType);
 	pr_vdebug("creq->bRequest     = %02x\n", creq->bRequest);
 	pr_vdebug("creq->wValue       = %04x\n", le16_to_cpu(creq->wValue));
@@ -3211,6 +3499,8 @@
 	__ffs_event_add(ffs, FUNCTIONFS_SETUP);
 	spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
 
+	ffs_log("exit");
+
 	return 0;
 }
 
@@ -3239,13 +3529,23 @@
 static void ffs_func_suspend(struct usb_function *f)
 {
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_SUSPEND);
+
+	ffs_log("exit");
 }
 
 static void ffs_func_resume(struct usb_function *f)
 {
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_event_add(ffs_func_from_usb(f)->ffs, FUNCTIONFS_RESUME);
+
+	ffs_log("exit");
 }
 
 
@@ -3262,11 +3562,15 @@
 	short *nums = func->interfaces_nums;
 	unsigned count = func->ffs->interfaces_count;
 
+	ffs_log("enter");
+
 	for (; count; --count, ++nums) {
 		if (*nums >= 0 && *nums == intf)
 			return nums - func->interfaces_nums;
 	}
 
+	ffs_log("exit");
+
 	return -EDOM;
 }
 
@@ -3279,6 +3583,8 @@
 {
 	struct ffs_dev *dev;
 
+	ffs_log("enter");
+
 	list_for_each_entry(dev, &ffs_devices, entry) {
 		if (!dev->name || !name)
 			continue;
@@ -3286,6 +3592,8 @@
 			return dev;
 	}
 
+	ffs_log("exit");
+
 	return NULL;
 }
 
@@ -3296,12 +3604,16 @@
 {
 	struct ffs_dev *dev;
 
+	ffs_log("enter");
+
 	if (list_is_singular(&ffs_devices)) {
 		dev = list_first_entry(&ffs_devices, struct ffs_dev, entry);
 		if (dev->single)
 			return dev;
 	}
 
+	ffs_log("exit");
+
 	return NULL;
 }
 
@@ -3312,11 +3624,17 @@
 {
 	struct ffs_dev *dev;
 
+	ffs_log("enter");
+
 	dev = _ffs_get_single_dev();
 	if (dev)
 		return dev;
 
-	return _ffs_do_find_dev(name);
+	dev = _ffs_do_find_dev(name);
+
+	ffs_log("exit");
+
+	return dev;
 }
 
 /* Configfs support *********************************************************/
@@ -3438,6 +3756,10 @@
 	unsigned long flags;
 
 	ENTER();
+
+	ffs_log("enter: state %d setup_state %d flag %lu", ffs->state,
+		ffs->setup_state, ffs->flags);
+
 	if (ffs->func == func) {
 		ffs_func_eps_disable(func);
 		ffs->func = NULL;
@@ -3467,6 +3789,9 @@
 	func->interfaces_nums = NULL;
 
 	ffs_event_add(ffs, FUNCTIONFS_UNBIND);
+
+	ffs_log("exit: state %d setup_state %d flag %lu", ffs->state,
+	ffs->setup_state, ffs->flags);
 }
 
 static struct usb_function *ffs_alloc(struct usb_function_instance *fi)
@@ -3530,12 +3855,16 @@
 {
 	struct ffs_dev *existing;
 
+	ffs_log("enter");
+
 	existing = _ffs_do_find_dev(name);
 	if (existing)
 		return -EBUSY;
 
 	dev->name = name;
 
+	ffs_log("exit");
+
 	return 0;
 }
 
@@ -3546,10 +3875,14 @@
 {
 	int ret;
 
+	ffs_log("enter");
+
 	ffs_dev_lock();
 	ret = _ffs_name_dev(dev, name);
 	ffs_dev_unlock();
 
+	ffs_log("exit");
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(ffs_name_dev);
@@ -3558,6 +3891,8 @@
 {
 	int ret;
 
+	ffs_log("enter");
+
 	ret = 0;
 	ffs_dev_lock();
 
@@ -3567,6 +3902,9 @@
 		dev->single = true;
 
 	ffs_dev_unlock();
+
+	ffs_log("exit");
+
 	return ret;
 }
 EXPORT_SYMBOL_GPL(ffs_single_dev);
@@ -3576,6 +3914,9 @@
  */
 static void _ffs_free_dev(struct ffs_dev *dev)
 {
+
+	ffs_log("enter");
+
 	list_del(&dev->entry);
 	if (dev->name_allocated)
 		kfree(dev->name);
@@ -3587,6 +3928,8 @@
 	kfree(dev);
 	if (list_empty(&ffs_devices))
 		functionfs_cleanup();
+
+	ffs_log("exit");
 }
 
 static void *ffs_acquire_dev(const char *dev_name)
@@ -3594,6 +3937,9 @@
 	struct ffs_dev *ffs_dev;
 
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_dev_lock();
 
 	ffs_dev = _ffs_find_dev(dev_name);
@@ -3608,6 +3954,9 @@
 		ffs_dev->mounted = true;
 
 	ffs_dev_unlock();
+
+	ffs_log("exit");
+
 	return ffs_dev;
 }
 
@@ -3616,6 +3965,9 @@
 	struct ffs_dev *ffs_dev;
 
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_dev_lock();
 
 	ffs_dev = ffs_data->private_data;
@@ -3627,6 +3979,8 @@
 	}
 
 	ffs_dev_unlock();
+
+	ffs_log("exit");
 }
 
 static int ffs_ready(struct ffs_data *ffs)
@@ -3635,6 +3989,9 @@
 	int ret = 0;
 
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_dev_lock();
 
 	ffs_obj = ffs->private_data;
@@ -3659,6 +4016,9 @@
 	set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
 done:
 	ffs_dev_unlock();
+
+	ffs_log("exit");
+
 	return ret;
 }
 
@@ -3668,11 +4028,16 @@
 	struct f_fs_opts *opts;
 
 	ENTER();
+
+	ffs_log("enter");
+
 	ffs_dev_lock();
 
 	ffs_obj = ffs->private_data;
-	if (!ffs_obj)
+	if (!ffs_obj) {
+		ffs_dev_unlock();
 		goto done;
+	}
 
 	ffs_obj->desc_ready = false;
 
@@ -3680,19 +4045,28 @@
 	    ffs_obj->ffs_closed_callback)
 		ffs_obj->ffs_closed_callback(ffs);
 
-	if (ffs_obj->opts)
+	if (ffs_obj->opts) {
 		opts = ffs_obj->opts;
-	else
+	} else {
+		ffs_dev_unlock();
 		goto done;
+	}
 
 	if (opts->no_configfs || !opts->func_inst.group.cg_item.ci_parent
-	    || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount))
+	    || !atomic_read(&opts->func_inst.group.cg_item.ci_kref.refcount)) {
+		ffs_dev_unlock();
 		goto done;
+	}
 
-	unregister_gadget_item(ffs_obj->opts->
-			       func_inst.group.cg_item.ci_parent->ci_parent);
-done:
 	ffs_dev_unlock();
+
+	if (test_bit(FFS_FL_BOUND, &ffs->flags)) {
+		unregister_gadget_item(opts->
+			       func_inst.group.cg_item.ci_parent->ci_parent);
+		ffs_log("unreg gadget done");
+	}
+done:
+	ffs_log("exit");
 }
 
 /* Misc helper functions ****************************************************/
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index a5719f2..a832d27 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1167,6 +1167,65 @@
 	kfree(opts);
 }
 
+#ifdef CONFIG_USB_CONFIGFS_UEVENT
+extern struct device *create_function_device(char *name);
+static ssize_t alsa_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct usb_function_instance *fi_midi = dev_get_drvdata(dev);
+	struct f_midi *midi;
+
+	if (!fi_midi->f)
+		dev_warn(dev, "f_midi: function not set\n");
+
+	if (fi_midi && fi_midi->f) {
+		midi = func_to_midi(fi_midi->f);
+		if (midi->rmidi && midi->rmidi->card)
+			return sprintf(buf, "%d %d\n",
+			midi->rmidi->card->number, midi->rmidi->device);
+	}
+
+	/* print PCM card and device numbers */
+	return sprintf(buf, "%d %d\n", -1, -1);
+}
+
+static DEVICE_ATTR(alsa, S_IRUGO, alsa_show, NULL);
+
+static struct device_attribute *alsa_function_attributes[] = {
+	&dev_attr_alsa,
+	NULL
+};
+
+static int create_alsa_device(struct usb_function_instance *fi)
+{
+	struct device *dev;
+	struct device_attribute **attrs;
+	struct device_attribute *attr;
+	int err = 0;
+
+	dev = create_function_device("f_midi");
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+
+	attrs = alsa_function_attributes;
+	if (attrs) {
+		while ((attr = *attrs++) && !err)
+			err = device_create_file(dev, attr);
+		if (err) {
+			device_destroy(dev->class, dev->devt);
+			return -EINVAL;
+		}
+	}
+	dev_set_drvdata(dev, fi);
+	return 0;
+}
+#else
+static int create_alsa_device(struct usb_function_instance *fi)
+{
+	return 0;
+}
+#endif
+
 static struct usb_function_instance *f_midi_alloc_inst(void)
 {
 	struct f_midi_opts *opts;
@@ -1184,6 +1243,11 @@
 	opts->in_ports = 1;
 	opts->out_ports = 1;
 
+	if (create_alsa_device(&opts->func_inst)) {
+		kfree(opts);
+		return ERR_PTR(-ENODEV);
+	}
+
 	config_group_init_type_name(&opts->func_inst.group, "",
 				    &midi_func_type);
 
@@ -1201,6 +1265,7 @@
 	mutex_lock(&opts->lock);
 	kfifo_free(&midi->in_req_fifo);
 	kfree(midi);
+	opts->func_inst.f = NULL;
 	--opts->refcnt;
 	mutex_unlock(&opts->lock);
 }
@@ -1280,6 +1345,7 @@
 	midi->func.disable	= f_midi_disable;
 	midi->func.free_func	= f_midi_free;
 
+	fi->f = &midi->func;
 	return &midi->func;
 
 setup_fail:
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
new file mode 100644
index 0000000..e21d3e0
--- /dev/null
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -0,0 +1,1532 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* #define DEBUG */
+/* #define VERBOSE_DEBUG */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+
+#include <linux/types.h>
+#include <linux/file.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+
+#include <linux/usb.h>
+#include <linux/usb_usual.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/f_mtp.h>
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#include "configfs.h"
+
+#define MTP_BULK_BUFFER_SIZE       16384
+#define INTR_BUFFER_SIZE           28
+#define MAX_INST_NAME_LEN          40
+
+/* String IDs */
+#define INTERFACE_STRING_INDEX	0
+
+/* values for mtp_dev.state */
+#define STATE_OFFLINE               0   /* initial state, disconnected */
+#define STATE_READY                 1   /* ready for userspace calls */
+#define STATE_BUSY                  2   /* processing userspace calls */
+#define STATE_CANCELED              3   /* transaction canceled by host */
+#define STATE_ERROR                 4   /* error from completion routine */
+
+/* number of tx and rx requests to allocate */
+#define TX_REQ_MAX 4
+#define RX_REQ_MAX 2
+#define INTR_REQ_MAX 5
+
+/* ID for Microsoft MTP OS String */
+#define MTP_OS_STRING_ID   0xEE
+
+/* MTP class reqeusts */
+#define MTP_REQ_CANCEL              0x64
+#define MTP_REQ_GET_EXT_EVENT_DATA  0x65
+#define MTP_REQ_RESET               0x66
+#define MTP_REQ_GET_DEVICE_STATUS   0x67
+
+/* constants for device status */
+#define MTP_RESPONSE_OK             0x2001
+#define MTP_RESPONSE_DEVICE_BUSY    0x2019
+#define DRIVER_NAME "mtp"
+
+static const char mtp_shortname[] = DRIVER_NAME "_usb";
+
+struct mtp_dev {
+	struct usb_function function;
+	struct usb_composite_dev *cdev;
+	spinlock_t lock;
+
+	struct usb_ep *ep_in;
+	struct usb_ep *ep_out;
+	struct usb_ep *ep_intr;
+
+	int state;
+
+	/* synchronize access to our device file */
+	atomic_t open_excl;
+	/* to enforce only one ioctl at a time */
+	atomic_t ioctl_excl;
+
+	struct list_head tx_idle;
+	struct list_head intr_idle;
+
+	wait_queue_head_t read_wq;
+	wait_queue_head_t write_wq;
+	wait_queue_head_t intr_wq;
+	struct usb_request *rx_req[RX_REQ_MAX];
+	int rx_done;
+
+	/* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+	 * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
+	 */
+	struct workqueue_struct *wq;
+	struct work_struct send_file_work;
+	struct work_struct receive_file_work;
+	struct file *xfer_file;
+	loff_t xfer_file_offset;
+	int64_t xfer_file_length;
+	unsigned xfer_send_header;
+	uint16_t xfer_command;
+	uint32_t xfer_transaction_id;
+	int xfer_result;
+};
+
+static struct usb_interface_descriptor mtp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_VENDOR_SPEC,
+	.bInterfaceSubClass     = USB_SUBCLASS_VENDOR_SPEC,
+	.bInterfaceProtocol     = 0,
+};
+
+static struct usb_interface_descriptor ptp_interface_desc = {
+	.bLength                = USB_DT_INTERFACE_SIZE,
+	.bDescriptorType        = USB_DT_INTERFACE,
+	.bInterfaceNumber       = 0,
+	.bNumEndpoints          = 3,
+	.bInterfaceClass        = USB_CLASS_STILL_IMAGE,
+	.bInterfaceSubClass     = 1,
+	.bInterfaceProtocol     = 1,
+};
+
+static struct usb_endpoint_descriptor mtp_ss_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_ss_in_comp_desc = {
+	.bLength                = sizeof(mtp_ss_in_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+	/* .bMaxBurst           = DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor mtp_ss_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_ss_out_comp_desc = {
+	.bLength                = sizeof(mtp_ss_out_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+	/* .bMaxBurst           = DYNAMIC, */
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_highspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+	.wMaxPacketSize         = __constant_cpu_to_le16(512),
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_in_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_fullspeed_out_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_OUT,
+	.bmAttributes           = USB_ENDPOINT_XFER_BULK,
+};
+
+static struct usb_endpoint_descriptor mtp_intr_desc = {
+	.bLength                = USB_DT_ENDPOINT_SIZE,
+	.bDescriptorType        = USB_DT_ENDPOINT,
+	.bEndpointAddress       = USB_DIR_IN,
+	.bmAttributes           = USB_ENDPOINT_XFER_INT,
+	.wMaxPacketSize         = __constant_cpu_to_le16(INTR_BUFFER_SIZE),
+	.bInterval              = 6,
+};
+
+static struct usb_ss_ep_comp_descriptor mtp_intr_ss_comp_desc = {
+	.bLength                = sizeof(mtp_intr_ss_comp_desc),
+	.bDescriptorType        = USB_DT_SS_ENDPOINT_COMP,
+	.wBytesPerInterval      = cpu_to_le16(INTR_BUFFER_SIZE),
+};
+
+static struct usb_descriptor_header *fs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_mtp_descs[] = {
+	(struct usb_descriptor_header *) &mtp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_ss_in_desc,
+	(struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+	(struct usb_descriptor_header *) &mtp_ss_out_desc,
+	(struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	(struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *fs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_fullspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *hs_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_in_desc,
+	(struct usb_descriptor_header *) &mtp_highspeed_out_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	NULL,
+};
+
+static struct usb_descriptor_header *ss_ptp_descs[] = {
+	(struct usb_descriptor_header *) &ptp_interface_desc,
+	(struct usb_descriptor_header *) &mtp_ss_in_desc,
+	(struct usb_descriptor_header *) &mtp_ss_in_comp_desc,
+	(struct usb_descriptor_header *) &mtp_ss_out_desc,
+	(struct usb_descriptor_header *) &mtp_ss_out_comp_desc,
+	(struct usb_descriptor_header *) &mtp_intr_desc,
+	(struct usb_descriptor_header *) &mtp_intr_ss_comp_desc,
+	NULL,
+};
+
+static struct usb_string mtp_string_defs[] = {
+	/* Naming interface "MTP" so libmtp will recognize us */
+	[INTERFACE_STRING_INDEX].s	= "MTP",
+	{  },	/* end of list */
+};
+
+static struct usb_gadget_strings mtp_string_table = {
+	.language		= 0x0409,	/* en-US */
+	.strings		= mtp_string_defs,
+};
+
+static struct usb_gadget_strings *mtp_strings[] = {
+	&mtp_string_table,
+	NULL,
+};
+
+/* Microsoft MTP OS String */
+static u8 mtp_os_string[] = {
+	18, /* sizeof(mtp_os_string) */
+	USB_DT_STRING,
+	/* Signature field: "MSFT100" */
+	'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0,
+	/* vendor code */
+	1,
+	/* padding */
+	0
+};
+
+/* Microsoft Extended Configuration Descriptor Header Section */
+struct mtp_ext_config_desc_header {
+	__le32	dwLength;
+	__u16	bcdVersion;
+	__le16	wIndex;
+	__u8	bCount;
+	__u8	reserved[7];
+};
+
+/* Microsoft Extended Configuration Descriptor Function Section */
+struct mtp_ext_config_desc_function {
+	__u8	bFirstInterfaceNumber;
+	__u8	bInterfaceCount;
+	__u8	compatibleID[8];
+	__u8	subCompatibleID[8];
+	__u8	reserved[6];
+};
+
+/* MTP Extended Configuration Descriptor */
+struct {
+	struct mtp_ext_config_desc_header	header;
+	struct mtp_ext_config_desc_function    function;
+} mtp_ext_config_desc = {
+	.header = {
+		.dwLength = __constant_cpu_to_le32(sizeof(mtp_ext_config_desc)),
+		.bcdVersion = __constant_cpu_to_le16(0x0100),
+		.wIndex = __constant_cpu_to_le16(4),
+		.bCount = 1,
+	},
+	.function = {
+		.bFirstInterfaceNumber = 0,
+		.bInterfaceCount = 1,
+		.compatibleID = { 'M', 'T', 'P' },
+	},
+};
+
+struct mtp_device_status {
+	__le16	wLength;
+	__le16	wCode;
+};
+
+struct mtp_data_header {
+	/* length of packet, including this header */
+	__le32	length;
+	/* container type (2 for data packet) */
+	__le16	type;
+	/* MTP command code */
+	__le16	command;
+	/* MTP transaction ID */
+	__le32	transaction_id;
+};
+
+struct mtp_instance {
+	struct usb_function_instance func_inst;
+	const char *name;
+	struct mtp_dev *dev;
+	char mtp_ext_compat_id[16];
+	struct usb_os_desc mtp_os_desc;
+};
+
+/* temporary variable used between mtp_open() and mtp_gadget_bind() */
+static struct mtp_dev *_mtp_dev;
+
+static inline struct mtp_dev *func_to_mtp(struct usb_function *f)
+{
+	return container_of(f, struct mtp_dev, function);
+}
+
+static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size)
+{
+	struct usb_request *req = usb_ep_alloc_request(ep, GFP_KERNEL);
+	if (!req)
+		return NULL;
+
+	/* now allocate buffers for the requests */
+	req->buf = kmalloc(buffer_size, GFP_KERNEL);
+	if (!req->buf) {
+		usb_ep_free_request(ep, req);
+		return NULL;
+	}
+
+	return req;
+}
+
+static void mtp_request_free(struct usb_request *req, struct usb_ep *ep)
+{
+	if (req) {
+		kfree(req->buf);
+		usb_ep_free_request(ep, req);
+	}
+}
+
+static inline int mtp_lock(atomic_t *excl)
+{
+	if (atomic_inc_return(excl) == 1) {
+		return 0;
+	} else {
+		atomic_dec(excl);
+		return -1;
+	}
+}
+
+static inline void mtp_unlock(atomic_t *excl)
+{
+	atomic_dec(excl);
+}
+
+/* add a request to the tail of a list */
+static void mtp_req_put(struct mtp_dev *dev, struct list_head *head,
+		struct usb_request *req)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	list_add_tail(&req->list, head);
+	spin_unlock_irqrestore(&dev->lock, flags);
+}
+
+/* remove a request from the head of a list */
+static struct usb_request
+*mtp_req_get(struct mtp_dev *dev, struct list_head *head)
+{
+	unsigned long flags;
+	struct usb_request *req;
+
+	spin_lock_irqsave(&dev->lock, flags);
+	if (list_empty(head)) {
+		req = 0;
+	} else {
+		req = list_first_entry(head, struct usb_request, list);
+		list_del(&req->list);
+	}
+	spin_unlock_irqrestore(&dev->lock, flags);
+	return req;
+}
+
+static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->tx_idle, req);
+
+	wake_up(&dev->write_wq);
+}
+
+static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	dev->rx_done = 1;
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	wake_up(&dev->read_wq);
+}
+
+static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (req->status != 0)
+		dev->state = STATE_ERROR;
+
+	mtp_req_put(dev, &dev->intr_idle, req);
+
+	wake_up(&dev->intr_wq);
+}
+
+static int mtp_create_bulk_endpoints(struct mtp_dev *dev,
+				struct usb_endpoint_descriptor *in_desc,
+				struct usb_endpoint_descriptor *out_desc,
+				struct usb_endpoint_descriptor *intr_desc)
+{
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	struct usb_ep *ep;
+	int i;
+
+	DBG(cdev, "create_bulk_endpoints dev: %p\n", dev);
+
+	ep = usb_ep_autoconfig(cdev->gadget, in_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_in failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_in = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, out_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_out failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_out = ep;
+
+	ep = usb_ep_autoconfig(cdev->gadget, intr_desc);
+	if (!ep) {
+		DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n");
+		return -ENODEV;
+	}
+	DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name);
+	ep->driver_data = dev;		/* claim the endpoint */
+	dev->ep_intr = ep;
+
+	/* now allocate requests for our endpoints */
+	for (i = 0; i < TX_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_in, MTP_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_in;
+		mtp_req_put(dev, &dev->tx_idle, req);
+	}
+	for (i = 0; i < RX_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_out, MTP_BULK_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_out;
+		dev->rx_req[i] = req;
+	}
+	for (i = 0; i < INTR_REQ_MAX; i++) {
+		req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE);
+		if (!req)
+			goto fail;
+		req->complete = mtp_complete_intr;
+		mtp_req_put(dev, &dev->intr_idle, req);
+	}
+
+	return 0;
+
+fail:
+	pr_err("mtp_bind() could not allocate requests\n");
+	return -1;
+}
+
+static ssize_t mtp_read(struct file *fp, char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req;
+	ssize_t r = count;
+	unsigned xfer;
+	int ret = 0;
+
+	DBG(cdev, "mtp_read(%zu)\n", count);
+
+	if (count > MTP_BULK_BUFFER_SIZE)
+		return -EINVAL;
+
+	/* we will block until we're online */
+	DBG(cdev, "mtp_read: waiting for online state\n");
+	ret = wait_event_interruptible(dev->read_wq,
+		dev->state != STATE_OFFLINE);
+	if (ret < 0) {
+		r = ret;
+		goto done;
+	}
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+requeue_req:
+	/* queue a request */
+	req = dev->rx_req[0];
+	req->length = count;
+	dev->rx_done = 0;
+	ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL);
+	if (ret < 0) {
+		r = -EIO;
+		goto done;
+	} else {
+		DBG(cdev, "rx %p queue\n", req);
+	}
+
+	/* wait for a request to complete */
+	ret = wait_event_interruptible(dev->read_wq, dev->rx_done);
+	if (ret < 0) {
+		r = ret;
+		usb_ep_dequeue(dev->ep_out, req);
+		goto done;
+	}
+	if (dev->state == STATE_BUSY) {
+		/* If we got a 0-len packet, throw it back and try again. */
+		if (req->actual == 0)
+			goto requeue_req;
+
+		DBG(cdev, "rx %p %d\n", req, req->actual);
+		xfer = (req->actual < count) ? req->actual : count;
+		r = xfer;
+		if (copy_to_user(buf, req->buf, xfer))
+			r = -EFAULT;
+	} else
+		r = -EIO;
+
+done:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "mtp_read returning %zd\n", r);
+	return r;
+}
+
+static ssize_t mtp_write(struct file *fp, const char __user *buf,
+	size_t count, loff_t *pos)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	ssize_t r = count;
+	unsigned xfer;
+	int sendZLP = 0;
+	int ret;
+
+	DBG(cdev, "mtp_write(%zu)\n", count);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED) {
+		/* report cancelation to userspace */
+		dev->state = STATE_READY;
+		spin_unlock_irq(&dev->lock);
+		return -ECANCELED;
+	}
+	if (dev->state == STATE_OFFLINE) {
+		spin_unlock_irq(&dev->lock);
+		return -ENODEV;
+	}
+	dev->state = STATE_BUSY;
+	spin_unlock_irq(&dev->lock);
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+		sendZLP = 1;
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		if (dev->state != STATE_BUSY) {
+			DBG(cdev, "mtp_write dev->error\n");
+			r = -EIO;
+			break;
+		}
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			((req = mtp_req_get(dev, &dev->tx_idle))
+				|| dev->state != STATE_BUSY));
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > MTP_BULK_BUFFER_SIZE)
+			xfer = MTP_BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+		if (xfer && copy_from_user(req->buf, buf, xfer)) {
+			r = -EFAULT;
+			break;
+		}
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "mtp_write: xfer error %d\n", ret);
+			r = -EIO;
+			break;
+		}
+
+		buf += xfer;
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		r = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+
+	DBG(cdev, "mtp_write returning %zd\n", r);
+	return r;
+}
+
+/* read from a local file and write to USB */
+static void send_file_work(struct work_struct *data)
+{
+	struct mtp_dev *dev = container_of(data, struct mtp_dev,
+						send_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *req = 0;
+	struct mtp_data_header *header;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int xfer, ret, hdr_size;
+	int r = 0;
+	int sendZLP = 0;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+
+	if (dev->xfer_send_header) {
+		hdr_size = sizeof(struct mtp_data_header);
+		count += hdr_size;
+	} else {
+		hdr_size = 0;
+	}
+
+	/* we need to send a zero length packet to signal the end of transfer
+	 * if the transfer size is aligned to a packet boundary.
+	 */
+	if ((count & (dev->ep_in->maxpacket - 1)) == 0)
+		sendZLP = 1;
+
+	while (count > 0 || sendZLP) {
+		/* so we exit after sending ZLP */
+		if (count == 0)
+			sendZLP = 0;
+
+		/* get an idle tx request to use */
+		req = 0;
+		ret = wait_event_interruptible(dev->write_wq,
+			(req = mtp_req_get(dev, &dev->tx_idle))
+			|| dev->state != STATE_BUSY);
+		if (dev->state == STATE_CANCELED) {
+			r = -ECANCELED;
+			break;
+		}
+		if (!req) {
+			r = ret;
+			break;
+		}
+
+		if (count > MTP_BULK_BUFFER_SIZE)
+			xfer = MTP_BULK_BUFFER_SIZE;
+		else
+			xfer = count;
+
+		if (hdr_size) {
+			/* prepend MTP data header */
+			header = (struct mtp_data_header *)req->buf;
+			header->length = __cpu_to_le32(count);
+			header->type = __cpu_to_le16(2); /* data packet */
+			header->command = __cpu_to_le16(dev->xfer_command);
+			header->transaction_id =
+					__cpu_to_le32(dev->xfer_transaction_id);
+		}
+
+		ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size,
+								&offset);
+		if (ret < 0) {
+			r = ret;
+			break;
+		}
+		xfer = ret + hdr_size;
+		hdr_size = 0;
+
+		req->length = xfer;
+		ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
+		if (ret < 0) {
+			DBG(cdev, "send_file_work: xfer error %d\n", ret);
+			dev->state = STATE_ERROR;
+			r = -EIO;
+			break;
+		}
+
+		count -= xfer;
+
+		/* zero this so we don't try to free it on error exit */
+		req = 0;
+	}
+
+	if (req)
+		mtp_req_put(dev, &dev->tx_idle, req);
+
+	DBG(cdev, "send_file_work returning %d\n", r);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+/* read from USB and write to a local file */
+static void receive_file_work(struct work_struct *data)
+{
+	struct mtp_dev *dev = container_of(data, struct mtp_dev,
+						receive_file_work);
+	struct usb_composite_dev *cdev = dev->cdev;
+	struct usb_request *read_req = NULL, *write_req = NULL;
+	struct file *filp;
+	loff_t offset;
+	int64_t count;
+	int ret, cur_buf = 0;
+	int r = 0;
+
+	/* read our parameters */
+	smp_rmb();
+	filp = dev->xfer_file;
+	offset = dev->xfer_file_offset;
+	count = dev->xfer_file_length;
+
+	DBG(cdev, "receive_file_work(%lld)\n", count);
+
+	while (count > 0 || write_req) {
+		if (count > 0) {
+			/* queue a request */
+			read_req = dev->rx_req[cur_buf];
+			cur_buf = (cur_buf + 1) % RX_REQ_MAX;
+
+			read_req->length = (count > MTP_BULK_BUFFER_SIZE
+					? MTP_BULK_BUFFER_SIZE : count);
+			dev->rx_done = 0;
+			ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL);
+			if (ret < 0) {
+				r = -EIO;
+				dev->state = STATE_ERROR;
+				break;
+			}
+		}
+
+		if (write_req) {
+			DBG(cdev, "rx %p %d\n", write_req, write_req->actual);
+			ret = vfs_write(filp, write_req->buf, write_req->actual,
+				&offset);
+			DBG(cdev, "vfs_write %d\n", ret);
+			if (ret != write_req->actual) {
+				r = -EIO;
+				dev->state = STATE_ERROR;
+				break;
+			}
+			write_req = NULL;
+		}
+
+		if (read_req) {
+			/* wait for our last read to complete */
+			ret = wait_event_interruptible(dev->read_wq,
+				dev->rx_done || dev->state != STATE_BUSY);
+			if (dev->state == STATE_CANCELED) {
+				r = -ECANCELED;
+				if (!dev->rx_done)
+					usb_ep_dequeue(dev->ep_out, read_req);
+				break;
+			}
+			/* if xfer_file_length is 0xFFFFFFFF, then we read until
+			 * we get a zero length packet
+			 */
+			if (count != 0xFFFFFFFF)
+				count -= read_req->actual;
+			if (read_req->actual < read_req->length) {
+				/*
+				 * short packet is used to signal EOF for
+				 * sizes > 4 gig
+				 */
+				DBG(cdev, "got short packet\n");
+				count = 0;
+			}
+
+			write_req = read_req;
+			read_req = NULL;
+		}
+	}
+
+	DBG(cdev, "receive_file_work returning %d\n", r);
+	/* write the result */
+	dev->xfer_result = r;
+	smp_wmb();
+}
+
+static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event)
+{
+	struct usb_request *req = NULL;
+	int ret;
+	int length = event->length;
+
+	DBG(dev->cdev, "mtp_send_event(%zu)\n", event->length);
+
+	if (length < 0 || length > INTR_BUFFER_SIZE)
+		return -EINVAL;
+	if (dev->state == STATE_OFFLINE)
+		return -ENODEV;
+
+	ret = wait_event_interruptible_timeout(dev->intr_wq,
+			(req = mtp_req_get(dev, &dev->intr_idle)),
+			msecs_to_jiffies(1000));
+	if (!req)
+		return -ETIME;
+
+	if (copy_from_user(req->buf, (void __user *)event->data, length)) {
+		mtp_req_put(dev, &dev->intr_idle, req);
+		return -EFAULT;
+	}
+	req->length = length;
+	ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL);
+	if (ret)
+		mtp_req_put(dev, &dev->intr_idle, req);
+
+	return ret;
+}
+
+static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
+{
+	struct mtp_dev *dev = fp->private_data;
+	struct file *filp = NULL;
+	int ret = -EINVAL;
+
+	if (mtp_lock(&dev->ioctl_excl))
+		return -EBUSY;
+
+	switch (code) {
+	case MTP_SEND_FILE:
+	case MTP_RECEIVE_FILE:
+	case MTP_SEND_FILE_WITH_HEADER:
+	{
+		struct mtp_file_range	mfr;
+		struct work_struct *work;
+
+		spin_lock_irq(&dev->lock);
+		if (dev->state == STATE_CANCELED) {
+			/* report cancelation to userspace */
+			dev->state = STATE_READY;
+			spin_unlock_irq(&dev->lock);
+			ret = -ECANCELED;
+			goto out;
+		}
+		if (dev->state == STATE_OFFLINE) {
+			spin_unlock_irq(&dev->lock);
+			ret = -ENODEV;
+			goto out;
+		}
+		dev->state = STATE_BUSY;
+		spin_unlock_irq(&dev->lock);
+
+		if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) {
+			ret = -EFAULT;
+			goto fail;
+		}
+		/* hold a reference to the file while we are working with it */
+		filp = fget(mfr.fd);
+		if (!filp) {
+			ret = -EBADF;
+			goto fail;
+		}
+
+		/* write the parameters */
+		dev->xfer_file = filp;
+		dev->xfer_file_offset = mfr.offset;
+		dev->xfer_file_length = mfr.length;
+		smp_wmb();
+
+		if (code == MTP_SEND_FILE_WITH_HEADER) {
+			work = &dev->send_file_work;
+			dev->xfer_send_header = 1;
+			dev->xfer_command = mfr.command;
+			dev->xfer_transaction_id = mfr.transaction_id;
+		} else if (code == MTP_SEND_FILE) {
+			work = &dev->send_file_work;
+			dev->xfer_send_header = 0;
+		} else {
+			work = &dev->receive_file_work;
+		}
+
+		/* We do the file transfer on a work queue so it will run
+		 * in kernel context, which is necessary for vfs_read and
+		 * vfs_write to use our buffers in the kernel address space.
+		 */
+		queue_work(dev->wq, work);
+		/* wait for operation to complete */
+		flush_workqueue(dev->wq);
+		fput(filp);
+
+		/* read the result */
+		smp_rmb();
+		ret = dev->xfer_result;
+		break;
+	}
+	case MTP_SEND_EVENT:
+	{
+		struct mtp_event	event;
+		/* return here so we don't change dev->state below,
+		 * which would interfere with bulk transfer state.
+		 */
+		if (copy_from_user(&event, (void __user *)value, sizeof(event)))
+			ret = -EFAULT;
+		else
+			ret = mtp_send_event(dev, &event);
+		goto out;
+	}
+	}
+
+fail:
+	spin_lock_irq(&dev->lock);
+	if (dev->state == STATE_CANCELED)
+		ret = -ECANCELED;
+	else if (dev->state != STATE_OFFLINE)
+		dev->state = STATE_READY;
+	spin_unlock_irq(&dev->lock);
+out:
+	mtp_unlock(&dev->ioctl_excl);
+	DBG(dev->cdev, "ioctl returning %d\n", ret);
+	return ret;
+}
+
+static int mtp_open(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_open\n");
+	if (mtp_lock(&_mtp_dev->open_excl))
+		return -EBUSY;
+
+	/* clear any error condition */
+	if (_mtp_dev->state != STATE_OFFLINE)
+		_mtp_dev->state = STATE_READY;
+
+	fp->private_data = _mtp_dev;
+	return 0;
+}
+
+static int mtp_release(struct inode *ip, struct file *fp)
+{
+	printk(KERN_INFO "mtp_release\n");
+
+	mtp_unlock(&_mtp_dev->open_excl);
+	return 0;
+}
+
+/* file operations for /dev/mtp_usb */
+static const struct file_operations mtp_fops = {
+	.owner = THIS_MODULE,
+	.read = mtp_read,
+	.write = mtp_write,
+	.unlocked_ioctl = mtp_ioctl,
+	.open = mtp_open,
+	.release = mtp_release,
+};
+
+static struct miscdevice mtp_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = mtp_shortname,
+	.fops = &mtp_fops,
+};
+
+static int mtp_ctrlrequest(struct usb_composite_dev *cdev,
+				const struct usb_ctrlrequest *ctrl)
+{
+	struct mtp_dev *dev = _mtp_dev;
+	int	value = -EOPNOTSUPP;
+	u16	w_index = le16_to_cpu(ctrl->wIndex);
+	u16	w_value = le16_to_cpu(ctrl->wValue);
+	u16	w_length = le16_to_cpu(ctrl->wLength);
+	unsigned long	flags;
+
+	VDBG(cdev, "mtp_ctrlrequest "
+			"%02x.%02x v%04x i%04x l%u\n",
+			ctrl->bRequestType, ctrl->bRequest,
+			w_value, w_index, w_length);
+
+	/* Handle MTP OS string */
+	if (ctrl->bRequestType ==
+			(USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE)
+			&& ctrl->bRequest == USB_REQ_GET_DESCRIPTOR
+			&& (w_value >> 8) == USB_DT_STRING
+			&& (w_value & 0xFF) == MTP_OS_STRING_ID) {
+		value = (w_length < sizeof(mtp_os_string)
+				? w_length : sizeof(mtp_os_string));
+		memcpy(cdev->req->buf, mtp_os_string, value);
+	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) {
+		/* Handle MTP OS descriptor */
+		DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == 1
+				&& (ctrl->bRequestType & USB_DIR_IN)
+				&& (w_index == 4 || w_index == 5)) {
+			value = (w_length < sizeof(mtp_ext_config_desc) ?
+					w_length : sizeof(mtp_ext_config_desc));
+			memcpy(cdev->req->buf, &mtp_ext_config_desc, value);
+		}
+	} else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) {
+		DBG(cdev, "class request: %d index: %d value: %d length: %d\n",
+			ctrl->bRequest, w_index, w_value, w_length);
+
+		if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0
+				&& w_value == 0) {
+			DBG(cdev, "MTP_REQ_CANCEL\n");
+
+			spin_lock_irqsave(&dev->lock, flags);
+			if (dev->state == STATE_BUSY) {
+				dev->state = STATE_CANCELED;
+				wake_up(&dev->read_wq);
+				wake_up(&dev->write_wq);
+			}
+			spin_unlock_irqrestore(&dev->lock, flags);
+
+			/* We need to queue a request to read the remaining
+			 *  bytes, but we don't actually need to look at
+			 * the contents.
+			 */
+			value = w_length;
+		} else if (ctrl->bRequest == MTP_REQ_GET_DEVICE_STATUS
+				&& w_index == 0 && w_value == 0) {
+			struct mtp_device_status *status = cdev->req->buf;
+			status->wLength =
+				__constant_cpu_to_le16(sizeof(*status));
+
+			DBG(cdev, "MTP_REQ_GET_DEVICE_STATUS\n");
+			spin_lock_irqsave(&dev->lock, flags);
+			/* device status is "busy" until we report
+			 * the cancelation to userspace
+			 */
+			if (dev->state == STATE_CANCELED)
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_DEVICE_BUSY);
+			else
+				status->wCode =
+					__cpu_to_le16(MTP_RESPONSE_OK);
+			spin_unlock_irqrestore(&dev->lock, flags);
+			value = sizeof(*status);
+		}
+	}
+
+	/* respond with data transfer or status phase? */
+	if (value >= 0) {
+		int rc;
+		cdev->req->zero = value < w_length;
+		cdev->req->length = value;
+		rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC);
+		if (rc < 0)
+			ERROR(cdev, "%s: response queue error\n", __func__);
+	}
+	return value;
+}
+
+static int
+mtp_function_bind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct usb_composite_dev *cdev = c->cdev;
+	struct mtp_dev	*dev = func_to_mtp(f);
+	int			id;
+	int			ret;
+	struct mtp_instance *fi_mtp;
+
+	dev->cdev = cdev;
+	DBG(cdev, "mtp_function_bind dev: %p\n", dev);
+
+	/* allocate interface ID(s) */
+	id = usb_interface_id(c, f);
+	if (id < 0)
+		return id;
+	mtp_interface_desc.bInterfaceNumber = id;
+
+	if (mtp_string_defs[INTERFACE_STRING_INDEX].id == 0) {
+		ret = usb_string_id(c->cdev);
+		if (ret < 0)
+			return ret;
+		mtp_string_defs[INTERFACE_STRING_INDEX].id = ret;
+		mtp_interface_desc.iInterface = ret;
+	}
+
+	fi_mtp = container_of(f->fi, struct mtp_instance, func_inst);
+
+	if (cdev->use_os_string) {
+		f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
+					GFP_KERNEL);
+		if (!f->os_desc_table)
+			return -ENOMEM;
+		f->os_desc_n = 1;
+		f->os_desc_table[0].os_desc = &fi_mtp->mtp_os_desc;
+	}
+
+	/* allocate endpoints */
+	ret = mtp_create_bulk_endpoints(dev, &mtp_fullspeed_in_desc,
+			&mtp_fullspeed_out_desc, &mtp_intr_desc);
+	if (ret)
+		return ret;
+
+	/* support high speed hardware */
+	if (gadget_is_dualspeed(c->cdev->gadget)) {
+		mtp_highspeed_in_desc.bEndpointAddress =
+			mtp_fullspeed_in_desc.bEndpointAddress;
+		mtp_highspeed_out_desc.bEndpointAddress =
+			mtp_fullspeed_out_desc.bEndpointAddress;
+	}
+	/* support super speed hardware */
+	if (gadget_is_superspeed(c->cdev->gadget)) {
+		unsigned max_burst;
+
+		/* Calculate bMaxBurst, we know packet size is 1024 */
+		max_burst = min_t(unsigned, MTP_BULK_BUFFER_SIZE / 1024, 15);
+		mtp_ss_in_desc.bEndpointAddress =
+			mtp_fullspeed_in_desc.bEndpointAddress;
+		mtp_ss_in_comp_desc.bMaxBurst = max_burst;
+		mtp_ss_out_desc.bEndpointAddress =
+			mtp_fullspeed_out_desc.bEndpointAddress;
+		mtp_ss_out_comp_desc.bMaxBurst = max_burst;
+	}
+
+	DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n",
+		gadget_is_superspeed(c->cdev->gadget) ? "super" :
+		(gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full"),
+		f->name, dev->ep_in->name, dev->ep_out->name);
+	return 0;
+}
+
+static void
+mtp_function_unbind(struct usb_configuration *c, struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_request *req;
+	int i;
+
+	mtp_string_defs[INTERFACE_STRING_INDEX].id = 0;
+	while ((req = mtp_req_get(dev, &dev->tx_idle)))
+		mtp_request_free(req, dev->ep_in);
+	for (i = 0; i < RX_REQ_MAX; i++)
+		mtp_request_free(dev->rx_req[i], dev->ep_out);
+	while ((req = mtp_req_get(dev, &dev->intr_idle)))
+		mtp_request_free(req, dev->ep_intr);
+	dev->state = STATE_OFFLINE;
+	kfree(f->os_desc_table);
+	f->os_desc_n = 0;
+}
+
+static int mtp_function_set_alt(struct usb_function *f,
+		unsigned intf, unsigned alt)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev *cdev = f->config->cdev;
+	int ret;
+
+	DBG(cdev, "mtp_function_set_alt intf: %d alt: %d\n", intf, alt);
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_in);
+	if (ret)
+		return ret;
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_out);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_out);
+	if (ret) {
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+
+	ret = config_ep_by_speed(cdev->gadget, f, dev->ep_intr);
+	if (ret)
+		return ret;
+
+	ret = usb_ep_enable(dev->ep_intr);
+	if (ret) {
+		usb_ep_disable(dev->ep_out);
+		usb_ep_disable(dev->ep_in);
+		return ret;
+	}
+	dev->state = STATE_READY;
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+	return 0;
+}
+
+static void mtp_function_disable(struct usb_function *f)
+{
+	struct mtp_dev	*dev = func_to_mtp(f);
+	struct usb_composite_dev	*cdev = dev->cdev;
+
+	DBG(cdev, "mtp_function_disable\n");
+	dev->state = STATE_OFFLINE;
+	usb_ep_disable(dev->ep_in);
+	usb_ep_disable(dev->ep_out);
+	usb_ep_disable(dev->ep_intr);
+
+	/* readers may be blocked waiting for us to go online */
+	wake_up(&dev->read_wq);
+
+	VDBG(cdev, "%s disabled\n", dev->function.name);
+}
+
+static int __mtp_setup(struct mtp_instance *fi_mtp)
+{
+	struct mtp_dev *dev;
+	int ret;
+
+	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+
+	if (fi_mtp != NULL)
+		fi_mtp->dev = dev;
+
+	if (!dev)
+		return -ENOMEM;
+
+	spin_lock_init(&dev->lock);
+	init_waitqueue_head(&dev->read_wq);
+	init_waitqueue_head(&dev->write_wq);
+	init_waitqueue_head(&dev->intr_wq);
+	atomic_set(&dev->open_excl, 0);
+	atomic_set(&dev->ioctl_excl, 0);
+	INIT_LIST_HEAD(&dev->tx_idle);
+	INIT_LIST_HEAD(&dev->intr_idle);
+
+	dev->wq = create_singlethread_workqueue("f_mtp");
+	if (!dev->wq) {
+		ret = -ENOMEM;
+		goto err1;
+	}
+	INIT_WORK(&dev->send_file_work, send_file_work);
+	INIT_WORK(&dev->receive_file_work, receive_file_work);
+
+	_mtp_dev = dev;
+
+	ret = misc_register(&mtp_device);
+	if (ret)
+		goto err2;
+
+	return 0;
+
+err2:
+	destroy_workqueue(dev->wq);
+err1:
+	_mtp_dev = NULL;
+	kfree(dev);
+	printk(KERN_ERR "mtp gadget driver failed to initialize\n");
+	return ret;
+}
+
+static int mtp_setup_configfs(struct mtp_instance *fi_mtp)
+{
+	return __mtp_setup(fi_mtp);
+}
+
+
+static void mtp_cleanup(void)
+{
+	struct mtp_dev *dev = _mtp_dev;
+
+	if (!dev)
+		return;
+
+	misc_deregister(&mtp_device);
+	destroy_workqueue(dev->wq);
+	_mtp_dev = NULL;
+	kfree(dev);
+}
+
+static struct mtp_instance *to_mtp_instance(struct config_item *item)
+{
+	return container_of(to_config_group(item), struct mtp_instance,
+		func_inst.group);
+}
+
+static void mtp_attr_release(struct config_item *item)
+{
+	struct mtp_instance *fi_mtp = to_mtp_instance(item);
+	usb_put_function_instance(&fi_mtp->func_inst);
+}
+
+static struct configfs_item_operations mtp_item_ops = {
+	.release        = mtp_attr_release,
+};
+
+static struct config_item_type mtp_func_type = {
+	.ct_item_ops    = &mtp_item_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+
+static struct mtp_instance *to_fi_mtp(struct usb_function_instance *fi)
+{
+	return container_of(fi, struct mtp_instance, func_inst);
+}
+
+static int mtp_set_inst_name(struct usb_function_instance *fi, const char *name)
+{
+	struct mtp_instance *fi_mtp;
+	char *ptr;
+	int name_len;
+
+	name_len = strlen(name) + 1;
+	if (name_len > MAX_INST_NAME_LEN)
+		return -ENAMETOOLONG;
+
+	ptr = kstrndup(name, name_len, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	fi_mtp = to_fi_mtp(fi);
+	fi_mtp->name = ptr;
+
+	return 0;
+}
+
+static void mtp_free_inst(struct usb_function_instance *fi)
+{
+	struct mtp_instance *fi_mtp;
+
+	fi_mtp = to_fi_mtp(fi);
+	kfree(fi_mtp->name);
+	mtp_cleanup();
+	kfree(fi_mtp);
+}
+
+struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config)
+{
+	struct mtp_instance *fi_mtp;
+	int ret = 0;
+	struct usb_os_desc *descs[1];
+	char *names[1];
+
+	fi_mtp = kzalloc(sizeof(*fi_mtp), GFP_KERNEL);
+	if (!fi_mtp)
+		return ERR_PTR(-ENOMEM);
+	fi_mtp->func_inst.set_inst_name = mtp_set_inst_name;
+	fi_mtp->func_inst.free_func_inst = mtp_free_inst;
+
+	fi_mtp->mtp_os_desc.ext_compat_id = fi_mtp->mtp_ext_compat_id;
+	INIT_LIST_HEAD(&fi_mtp->mtp_os_desc.ext_prop);
+	descs[0] = &fi_mtp->mtp_os_desc;
+	names[0] = "MTP";
+
+	if (mtp_config) {
+		ret = mtp_setup_configfs(fi_mtp);
+		if (ret) {
+			kfree(fi_mtp);
+			pr_err("Error setting MTP\n");
+			return ERR_PTR(ret);
+		}
+	} else
+		fi_mtp->dev = _mtp_dev;
+
+	config_group_init_type_name(&fi_mtp->func_inst.group,
+					"", &mtp_func_type);
+	usb_os_desc_prepare_interf_dir(&fi_mtp->func_inst.group, 1,
+					descs, names, THIS_MODULE);
+
+	return  &fi_mtp->func_inst;
+}
+EXPORT_SYMBOL_GPL(alloc_inst_mtp_ptp);
+
+static struct usb_function_instance *mtp_alloc_inst(void)
+{
+		return alloc_inst_mtp_ptp(true);
+}
+
+static int mtp_ctrlreq_configfs(struct usb_function *f,
+				const struct usb_ctrlrequest *ctrl)
+{
+	return mtp_ctrlrequest(f->config->cdev, ctrl);
+}
+
+static void mtp_free(struct usb_function *f)
+{
+	/*NO-OP: no function specific resource allocation in mtp_alloc*/
+}
+
+struct usb_function *function_alloc_mtp_ptp(struct usb_function_instance *fi,
+					bool mtp_config)
+{
+	struct mtp_instance *fi_mtp = to_fi_mtp(fi);
+	struct mtp_dev *dev;
+
+	/*
+	 * PTP piggybacks on MTP function so make sure we have
+	 * created MTP function before we associate this PTP
+	 * function with a gadget configuration.
+	 */
+	if (fi_mtp->dev == NULL) {
+		pr_err("Error: Create MTP function before linking"
+				" PTP function with a gadget configuration\n");
+		pr_err("\t1: Delete existing PTP function if any\n");
+		pr_err("\t2: Create MTP function\n");
+		pr_err("\t3: Create and symlink PTP function"
+				" with a gadget configuration\n");
+		return NULL;
+	}
+
+	dev = fi_mtp->dev;
+	dev->function.name = DRIVER_NAME;
+	dev->function.strings = mtp_strings;
+	if (mtp_config) {
+		dev->function.fs_descriptors = fs_mtp_descs;
+		dev->function.hs_descriptors = hs_mtp_descs;
+		dev->function.ss_descriptors = ss_mtp_descs;
+	} else {
+		dev->function.fs_descriptors = fs_ptp_descs;
+		dev->function.hs_descriptors = hs_ptp_descs;
+		dev->function.ss_descriptors = ss_ptp_descs;
+	}
+	dev->function.bind = mtp_function_bind;
+	dev->function.unbind = mtp_function_unbind;
+	dev->function.set_alt = mtp_function_set_alt;
+	dev->function.disable = mtp_function_disable;
+	dev->function.setup = mtp_ctrlreq_configfs;
+	dev->function.free_func = mtp_free;
+
+	return &dev->function;
+}
+EXPORT_SYMBOL_GPL(function_alloc_mtp_ptp);
+
+static struct usb_function *mtp_alloc(struct usb_function_instance *fi)
+{
+	return function_alloc_mtp_ptp(fi, true);
+}
+
+DECLARE_USB_FUNCTION_INIT(mtp, mtp_alloc_inst, mtp_alloc);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/gadget/function/f_mtp.h b/drivers/usb/gadget/function/f_mtp.h
new file mode 100644
index 0000000..7adb1ff
--- /dev/null
+++ b/drivers/usb/gadget/function/f_mtp.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Badhri Jagan Sridharan <badhri@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+extern struct usb_function_instance *alloc_inst_mtp_ptp(bool mtp_config);
+extern struct usb_function *function_alloc_mtp_ptp(
+			struct usb_function_instance *fi, bool mtp_config);
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 6396037..e837536 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -335,7 +335,6 @@
 	NULL,
 };
 
-
 /* super speed support: */
 
 static struct usb_endpoint_descriptor ss_ncm_notify_desc = {
diff --git a/drivers/usb/gadget/function/f_ptp.c b/drivers/usb/gadget/function/f_ptp.c
new file mode 100644
index 0000000..da3e4d5
--- /dev/null
+++ b/drivers/usb/gadget/function/f_ptp.c
@@ -0,0 +1,38 @@
+/*
+ * Gadget Function Driver for PTP
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * Author: Badhri Jagan Sridharan <badhri@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+
+#include <linux/configfs.h>
+#include <linux/usb/composite.h>
+
+#include "f_mtp.h"
+
+static struct usb_function_instance *ptp_alloc_inst(void)
+{
+	return alloc_inst_mtp_ptp(false);
+}
+
+static struct usb_function *ptp_alloc(struct usb_function_instance *fi)
+{
+	return function_alloc_mtp_ptp(fi, false);
+}
+
+DECLARE_USB_FUNCTION_INIT(ptp, ptp_alloc_inst, ptp_alloc);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Badhri Jagan Sridharan");
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index 16562e4..b899ced 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -70,6 +70,16 @@
  *   - MS-Windows drivers sometimes emit undocumented requests.
  */
 
+static unsigned int rndis_dl_max_pkt_per_xfer = 3;
+module_param(rndis_dl_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_dl_max_pkt_per_xfer,
+	"Maximum packets per transfer for DL aggregation");
+
+static unsigned int rndis_ul_max_pkt_per_xfer = 3;
+module_param(rndis_ul_max_pkt_per_xfer, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer,
+       "Maximum packets per transfer for UL aggregation");
+
 struct f_rndis {
 	struct gether			port;
 	u8				ctrl_id, data_id;
@@ -452,7 +462,9 @@
 static void rndis_command_complete(struct usb_ep *ep, struct usb_request *req)
 {
 	struct f_rndis			*rndis = req->context;
+	struct usb_composite_dev	*cdev = rndis->port.func.config->cdev;
 	int				status;
+	rndis_init_msg_type		*buf;
 
 	/* received RNDIS command from USB_CDC_SEND_ENCAPSULATED_COMMAND */
 //	spin_lock(&dev->lock);
@@ -460,6 +472,21 @@
 	if (status < 0)
 		pr_err("RNDIS command error %d, %d/%d\n",
 			status, req->actual, req->length);
+
+	buf = (rndis_init_msg_type *)req->buf;
+
+	if (buf->MessageType == RNDIS_MSG_INIT) {
+		if (buf->MaxTransferSize > 2048)
+			rndis->port.multi_pkt_xfer = 1;
+		else
+			rndis->port.multi_pkt_xfer = 0;
+		DBG(cdev, "%s: MaxTransferSize: %d : Multi_pkt_txr: %s\n",
+				__func__, buf->MaxTransferSize,
+				rndis->port.multi_pkt_xfer ? "enabled" :
+							    "disabled");
+		if (rndis_dl_max_pkt_per_xfer <= 1)
+			rndis->port.multi_pkt_xfer = 0;
+	}
 //	spin_unlock(&dev->lock);
 }
 
@@ -795,6 +822,7 @@
 
 	rndis_set_param_medium(rndis->params, RNDIS_MEDIUM_802_3, 0);
 	rndis_set_host_mac(rndis->params, rndis->ethaddr);
+	rndis_set_max_pkt_xfer(rndis->params, rndis_ul_max_pkt_per_xfer);
 
 	if (rndis->manufacturer && rndis->vendorID &&
 			rndis_set_param_vendor(rndis->params, rndis->vendorID,
@@ -980,6 +1008,8 @@
 	rndis->port.header_len = sizeof(struct rndis_packet_msg_type);
 	rndis->port.wrap = rndis_add_header;
 	rndis->port.unwrap = rndis_rm_hdr;
+	rndis->port.ul_max_pkts_per_xfer = rndis_ul_max_pkt_per_xfer;
+	rndis->port.dl_max_pkts_per_xfer = rndis_dl_max_pkt_per_xfer;
 
 	rndis->port.func.name = "rndis";
 	/* descriptors are per-instance copies */
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index ab6ac1b..418a2fd 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -42,6 +42,16 @@
 
 #include "rndis.h"
 
+int rndis_ul_max_pkt_per_xfer_rcvd;
+module_param(rndis_ul_max_pkt_per_xfer_rcvd, int, S_IRUGO);
+MODULE_PARM_DESC(rndis_ul_max_pkt_per_xfer_rcvd,
+		"Max num of REMOTE_NDIS_PACKET_MSGs received in a single transfer");
+
+int rndis_ul_max_xfer_size_rcvd;
+module_param(rndis_ul_max_xfer_size_rcvd, int, S_IRUGO);
+MODULE_PARM_DESC(rndis_ul_max_xfer_size_rcvd,
+		"Max size of bus transfer received");
+
 
 /* The driver for your USB chip needs to support ep0 OUT to work with
  * RNDIS, plus all three CDC Ethernet endpoints (interrupt not optional).
@@ -579,12 +589,12 @@
 	resp->MinorVersion = cpu_to_le32(RNDIS_MINOR_VERSION);
 	resp->DeviceFlags = cpu_to_le32(RNDIS_DF_CONNECTIONLESS);
 	resp->Medium = cpu_to_le32(RNDIS_MEDIUM_802_3);
-	resp->MaxPacketsPerTransfer = cpu_to_le32(1);
-	resp->MaxTransferSize = cpu_to_le32(
-		  params->dev->mtu
+	resp->MaxPacketsPerTransfer = cpu_to_le32(params->max_pkt_per_xfer);
+	resp->MaxTransferSize = cpu_to_le32(params->max_pkt_per_xfer *
+		(params->dev->mtu
 		+ sizeof(struct ethhdr)
 		+ sizeof(struct rndis_packet_msg_type)
-		+ 22);
+		+ 22));
 	resp->PacketAlignmentFactor = cpu_to_le32(0);
 	resp->AFListOffset = cpu_to_le32(0);
 	resp->AFListSize = cpu_to_le32(0);
@@ -963,6 +973,8 @@
 	params->dev = dev;
 	params->filter = cdc_filter;
 
+	rndis_ul_max_xfer_size_rcvd = 0;
+	rndis_ul_max_pkt_per_xfer_rcvd = 0;
 	return 0;
 }
 EXPORT_SYMBOL_GPL(rndis_set_param_dev);
@@ -995,6 +1007,13 @@
 }
 EXPORT_SYMBOL_GPL(rndis_set_param_medium);
 
+void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer)
+{
+	pr_debug("%s:\n", __func__);
+
+	params->max_pkt_per_xfer = max_pkt_per_xfer;
+}
+
 void rndis_add_hdr(struct sk_buff *skb)
 {
 	struct rndis_packet_msg_type *header;
@@ -1061,23 +1080,73 @@
 			struct sk_buff *skb,
 			struct sk_buff_head *list)
 {
-	/* tmp points to a struct rndis_packet_msg_type */
-	__le32 *tmp = (void *)skb->data;
+	int num_pkts = 1;
 
-	/* MessageType, MessageLength */
-	if (cpu_to_le32(RNDIS_MSG_PACKET)
-			!= get_unaligned(tmp++)) {
-		dev_kfree_skb_any(skb);
-		return -EINVAL;
-	}
-	tmp++;
+	if (skb->len > rndis_ul_max_xfer_size_rcvd)
+		rndis_ul_max_xfer_size_rcvd = skb->len;
 
-	/* DataOffset, DataLength */
-	if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
-		dev_kfree_skb_any(skb);
-		return -EOVERFLOW;
+	while (skb->len) {
+		struct rndis_packet_msg_type *hdr;
+		struct sk_buff          *skb2;
+		u32             msg_len, data_offset, data_len;
+
+		/* some rndis hosts send extra byte to avoid zlp, ignore it */
+		if (skb->len == 1) {
+			dev_kfree_skb_any(skb);
+			return 0;
+		}
+
+		if (skb->len < sizeof *hdr) {
+			pr_err("invalid rndis pkt: skblen:%u hdr_len:%zu",
+					skb->len, sizeof *hdr);
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+
+		hdr = (void *)skb->data;
+		msg_len = le32_to_cpu(hdr->MessageLength);
+		data_offset = le32_to_cpu(hdr->DataOffset);
+		data_len = le32_to_cpu(hdr->DataLength);
+
+		if (skb->len < msg_len ||
+				((data_offset + data_len + 8) > msg_len)) {
+			pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
+					le32_to_cpu(hdr->MessageType),
+					msg_len, data_offset, data_len, skb->len);
+			dev_kfree_skb_any(skb);
+			return -EOVERFLOW;
+		}
+		if (le32_to_cpu(hdr->MessageType) != RNDIS_MSG_PACKET) {
+			pr_err("invalid rndis message: %d/%d/%d/%d, len:%d\n",
+					le32_to_cpu(hdr->MessageType),
+					msg_len, data_offset, data_len, skb->len);
+			dev_kfree_skb_any(skb);
+			return -EINVAL;
+		}
+
+		skb_pull(skb, data_offset + 8);
+
+		if (msg_len == skb->len) {
+			skb_trim(skb, data_len);
+			break;
+		}
+
+		skb2 = skb_clone(skb, GFP_ATOMIC);
+		if (!skb2) {
+			pr_err("%s:skb clone failed\n", __func__);
+			dev_kfree_skb_any(skb);
+			return -ENOMEM;
+		}
+
+		skb_pull(skb, msg_len - sizeof *hdr);
+		skb_trim(skb2, data_len);
+		skb_queue_tail(list, skb2);
+
+		num_pkts++;
 	}
-	skb_trim(skb, get_unaligned_le32(tmp++));
+
+	if (num_pkts > rndis_ul_max_pkt_per_xfer_rcvd)
+		rndis_ul_max_pkt_per_xfer_rcvd = num_pkts;
 
 	skb_queue_tail(list, skb);
 	return 0;
diff --git a/drivers/usb/gadget/function/rndis.h b/drivers/usb/gadget/function/rndis.h
index ef92eb6..310cac3 100644
--- a/drivers/usb/gadget/function/rndis.h
+++ b/drivers/usb/gadget/function/rndis.h
@@ -190,6 +190,7 @@
 	struct net_device	*dev;
 
 	u32			vendorID;
+	u8			max_pkt_per_xfer;
 	const char		*vendorDescr;
 	void			(*resp_avail)(void *v);
 	void			*v;
@@ -206,6 +207,7 @@
 			    const char *vendorDescr);
 int  rndis_set_param_medium(struct rndis_params *params, u32 medium,
 			     u32 speed);
+void rndis_set_max_pkt_xfer(struct rndis_params *params, u8 max_pkt_per_xfer);
 void rndis_add_hdr(struct sk_buff *skb);
 int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
 			struct sk_buff_head *list);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 5d1bd13..cb1ecfa 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -53,6 +53,8 @@
  * blocks and still have efficient handling. */
 #define GETHER_MAX_ETH_FRAME_LEN 15412
 
+static struct workqueue_struct	*uether_wq;
+
 struct eth_dev {
 	/* lock is held while accessing port_usb
 	 */
@@ -64,19 +66,27 @@
 
 	spinlock_t		req_lock;	/* guard {rx,tx}_reqs */
 	struct list_head	tx_reqs, rx_reqs;
-	atomic_t		tx_qlen;
+	unsigned		tx_qlen;
+/* Minimum number of TX USB request queued to UDC */
+#define TX_REQ_THRESHOLD	5
+	int			no_tx_req_used;
+	int			tx_skb_hold_count;
+	u32			tx_req_bufsize;
 
 	struct sk_buff_head	rx_frames;
 
 	unsigned		qmult;
 
 	unsigned		header_len;
+	unsigned		ul_max_pkts_per_xfer;
+	unsigned		dl_max_pkts_per_xfer;
 	struct sk_buff		*(*wrap)(struct gether *, struct sk_buff *skb);
 	int			(*unwrap)(struct gether *,
 						struct sk_buff *skb,
 						struct sk_buff_head *list);
 
 	struct work_struct	work;
+	struct work_struct	rx_work;
 
 	unsigned long		todo;
 #define	WORK_RX_MEMORY		0
@@ -221,9 +231,13 @@
 	size += out->maxpacket - 1;
 	size -= size % out->maxpacket;
 
+	if (dev->ul_max_pkts_per_xfer)
+		size *= dev->ul_max_pkts_per_xfer;
+
 	if (dev->port_usb->is_fixed)
 		size = max_t(size_t, size, dev->port_usb->fixed_out_len);
 
+	DBG(dev, "%s: size: %zd\n", __func__, size);
 	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
 	if (skb == NULL) {
 		DBG(dev, "no rx skb\n");
@@ -250,18 +264,16 @@
 		DBG(dev, "rx submit --> %d\n", retval);
 		if (skb)
 			dev_kfree_skb_any(skb);
-		spin_lock_irqsave(&dev->req_lock, flags);
-		list_add(&req->list, &dev->rx_reqs);
-		spin_unlock_irqrestore(&dev->req_lock, flags);
 	}
 	return retval;
 }
 
 static void rx_complete(struct usb_ep *ep, struct usb_request *req)
 {
-	struct sk_buff	*skb = req->context, *skb2;
+	struct sk_buff	*skb = req->context;
 	struct eth_dev	*dev = ep->driver_data;
 	int		status = req->status;
+	bool		queue = 0;
 
 	switch (status) {
 
@@ -277,6 +289,10 @@
 				status = dev->unwrap(dev->port_usb,
 							skb,
 							&dev->rx_frames);
+				if (status == -EINVAL)
+					dev->net->stats.rx_errors++;
+				else if (status == -EOVERFLOW)
+					dev->net->stats.rx_over_errors++;
 			} else {
 				dev_kfree_skb_any(skb);
 				status = -ENOTCONN;
@@ -285,30 +301,8 @@
 		} else {
 			skb_queue_tail(&dev->rx_frames, skb);
 		}
-		skb = NULL;
-
-		skb2 = skb_dequeue(&dev->rx_frames);
-		while (skb2) {
-			if (status < 0
-					|| ETH_HLEN > skb2->len
-					|| skb2->len > GETHER_MAX_ETH_FRAME_LEN) {
-				dev->net->stats.rx_errors++;
-				dev->net->stats.rx_length_errors++;
-				DBG(dev, "rx length %d\n", skb2->len);
-				dev_kfree_skb_any(skb2);
-				goto next_frame;
-			}
-			skb2->protocol = eth_type_trans(skb2, dev->net);
-			dev->net->stats.rx_packets++;
-			dev->net->stats.rx_bytes += skb2->len;
-
-			/* no buffer copies needed, unless hardware can't
-			 * use skb buffers.
-			 */
-			status = netif_rx(skb2);
-next_frame:
-			skb2 = skb_dequeue(&dev->rx_frames);
-		}
+		if (!status)
+			queue = 1;
 		break;
 
 	/* software-driven interface shutdown */
@@ -331,22 +325,20 @@
 		/* FALLTHROUGH */
 
 	default:
+		queue = 1;
+		dev_kfree_skb_any(skb);
 		dev->net->stats.rx_errors++;
 		DBG(dev, "rx status %d\n", status);
 		break;
 	}
 
-	if (skb)
-		dev_kfree_skb_any(skb);
-	if (!netif_running(dev->net)) {
 clean:
-		spin_lock(&dev->req_lock);
-		list_add(&req->list, &dev->rx_reqs);
-		spin_unlock(&dev->req_lock);
-		req = NULL;
-	}
-	if (req)
-		rx_submit(dev, req, GFP_ATOMIC);
+	spin_lock(&dev->req_lock);
+	list_add(&req->list, &dev->rx_reqs);
+	spin_unlock(&dev->req_lock);
+
+	if (queue)
+		queue_work(uether_wq, &dev->rx_work);
 }
 
 static int prealloc(struct list_head *list, struct usb_ep *ep, unsigned n)
@@ -411,16 +403,24 @@
 {
 	struct usb_request	*req;
 	unsigned long		flags;
+	int			req_cnt = 0;
 
 	/* fill unused rxq slots with some skb */
 	spin_lock_irqsave(&dev->req_lock, flags);
 	while (!list_empty(&dev->rx_reqs)) {
+		/* break the nexus of continuous completion and re-submission*/
+		if (++req_cnt > qlen(dev->gadget, dev->qmult))
+			break;
+
 		req = container_of(dev->rx_reqs.next,
 				struct usb_request, list);
 		list_del_init(&req->list);
 		spin_unlock_irqrestore(&dev->req_lock, flags);
 
 		if (rx_submit(dev, req, gfp_flags) < 0) {
+			spin_lock_irqsave(&dev->req_lock, flags);
+			list_add(&req->list, &dev->rx_reqs);
+			spin_unlock_irqrestore(&dev->req_lock, flags);
 			defer_kevent(dev, WORK_RX_MEMORY);
 			return;
 		}
@@ -430,6 +430,36 @@
 	spin_unlock_irqrestore(&dev->req_lock, flags);
 }
 
+static void process_rx_w(struct work_struct *work)
+{
+	struct eth_dev	*dev = container_of(work, struct eth_dev, rx_work);
+	struct sk_buff	*skb;
+	int		status = 0;
+
+	if (!dev->port_usb)
+		return;
+
+	while ((skb = skb_dequeue(&dev->rx_frames))) {
+		if (status < 0
+				|| ETH_HLEN > skb->len
+				|| skb->len > ETH_FRAME_LEN) {
+			dev->net->stats.rx_errors++;
+			dev->net->stats.rx_length_errors++;
+			DBG(dev, "rx length %d\n", skb->len);
+			dev_kfree_skb_any(skb);
+			continue;
+		}
+		skb->protocol = eth_type_trans(skb, dev->net);
+		dev->net->stats.rx_packets++;
+		dev->net->stats.rx_bytes += skb->len;
+
+		status = netif_rx_ni(skb);
+	}
+
+	if (netif_running(dev->net))
+		rx_fill(dev, GFP_KERNEL);
+}
+
 static void eth_work(struct work_struct *work)
 {
 	struct eth_dev	*dev = container_of(work, struct eth_dev, work);
@@ -447,6 +477,11 @@
 {
 	struct sk_buff	*skb = req->context;
 	struct eth_dev	*dev = ep->driver_data;
+	struct net_device *net = dev->net;
+	struct usb_request *new_req;
+	struct usb_ep *in;
+	int length;
+	int retval;
 
 	switch (req->status) {
 	default:
@@ -457,16 +492,74 @@
 	case -ESHUTDOWN:		/* disconnect etc */
 		break;
 	case 0:
-		dev->net->stats.tx_bytes += skb->len;
+		if (!req->zero)
+			dev->net->stats.tx_bytes += req->length-1;
+		else
+			dev->net->stats.tx_bytes += req->length;
 	}
 	dev->net->stats.tx_packets++;
 
 	spin_lock(&dev->req_lock);
-	list_add(&req->list, &dev->tx_reqs);
-	spin_unlock(&dev->req_lock);
-	dev_kfree_skb_any(skb);
+	list_add_tail(&req->list, &dev->tx_reqs);
 
-	atomic_dec(&dev->tx_qlen);
+	if (dev->port_usb->multi_pkt_xfer) {
+		dev->no_tx_req_used--;
+		req->length = 0;
+		in = dev->port_usb->in_ep;
+
+		if (!list_empty(&dev->tx_reqs)) {
+			new_req = container_of(dev->tx_reqs.next,
+					struct usb_request, list);
+			list_del(&new_req->list);
+			spin_unlock(&dev->req_lock);
+			if (new_req->length > 0) {
+				length = new_req->length;
+
+				/* NCM requires no zlp if transfer is
+				 * dwNtbInMaxSize */
+				if (dev->port_usb->is_fixed &&
+					length == dev->port_usb->fixed_in_len &&
+					(length % in->maxpacket) == 0)
+					new_req->zero = 0;
+				else
+					new_req->zero = 1;
+
+				/* use zlp framing on tx for strict CDC-Ether
+				 * conformance, though any robust network rx
+				 * path ignores extra padding. and some hardware
+				 * doesn't like to write zlps.
+				 */
+				if (new_req->zero && !dev->zlp &&
+						(length % in->maxpacket) == 0) {
+					new_req->zero = 0;
+					length++;
+				}
+
+				new_req->length = length;
+				retval = usb_ep_queue(in, new_req, GFP_ATOMIC);
+				switch (retval) {
+				default:
+					DBG(dev, "tx queue err %d\n", retval);
+					break;
+				case 0:
+					spin_lock(&dev->req_lock);
+					dev->no_tx_req_used++;
+					spin_unlock(&dev->req_lock);
+					netif_trans_update(net);
+				}
+			} else {
+				spin_lock(&dev->req_lock);
+				list_add(&new_req->list, &dev->tx_reqs);
+				spin_unlock(&dev->req_lock);
+			}
+		} else {
+			spin_unlock(&dev->req_lock);
+		}
+	} else {
+		spin_unlock(&dev->req_lock);
+		dev_kfree_skb_any(skb);
+	}
+
 	if (netif_carrier_ok(dev->net))
 		netif_wake_queue(dev->net);
 }
@@ -476,6 +569,26 @@
 	return cdc_filter & USB_CDC_PACKET_TYPE_PROMISCUOUS;
 }
 
+static void alloc_tx_buffer(struct eth_dev *dev)
+{
+	struct list_head	*act;
+	struct usb_request	*req;
+
+	dev->tx_req_bufsize = (dev->dl_max_pkts_per_xfer *
+				(dev->net->mtu
+				+ sizeof(struct ethhdr)
+				/* size of rndis_packet_msg_type */
+				+ 44
+				+ 22));
+
+	list_for_each(act, &dev->tx_reqs) {
+		req = container_of(act, struct usb_request, list);
+		if (!req->buf)
+			req->buf = kmalloc(dev->tx_req_bufsize,
+						GFP_ATOMIC);
+	}
+}
+
 static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 					struct net_device *net)
 {
@@ -502,6 +615,10 @@
 		return NETDEV_TX_OK;
 	}
 
+	/* Allocate memory for tx_reqs to support multi packet transfer */
+	if (dev->port_usb->multi_pkt_xfer && !dev->tx_req_bufsize)
+		alloc_tx_buffer(dev);
+
 	/* apply outgoing CDC or RNDIS filters */
 	if (skb && !is_promisc(cdc_filter)) {
 		u8		*dest = skb->data;
@@ -565,9 +682,37 @@
 		}
 	}
 
-	length = skb->len;
-	req->buf = skb->data;
-	req->context = skb;
+	spin_lock_irqsave(&dev->req_lock, flags);
+	dev->tx_skb_hold_count++;
+	spin_unlock_irqrestore(&dev->req_lock, flags);
+
+	if (dev->port_usb->multi_pkt_xfer) {
+		memcpy(req->buf + req->length, skb->data, skb->len);
+		req->length = req->length + skb->len;
+		length = req->length;
+		dev_kfree_skb_any(skb);
+
+		spin_lock_irqsave(&dev->req_lock, flags);
+		if (dev->tx_skb_hold_count < dev->dl_max_pkts_per_xfer) {
+			if (dev->no_tx_req_used > TX_REQ_THRESHOLD) {
+				list_add(&req->list, &dev->tx_reqs);
+				spin_unlock_irqrestore(&dev->req_lock, flags);
+				goto success;
+			}
+		}
+
+		dev->no_tx_req_used++;
+		spin_unlock_irqrestore(&dev->req_lock, flags);
+
+		spin_lock_irqsave(&dev->lock, flags);
+		dev->tx_skb_hold_count = 0;
+		spin_unlock_irqrestore(&dev->lock, flags);
+	} else {
+		length = skb->len;
+		req->buf = skb->data;
+		req->context = skb;
+	}
+
 	req->complete = tx_complete;
 
 	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
@@ -583,11 +728,27 @@
 	 * though any robust network rx path ignores extra padding.
 	 * and some hardware doesn't like to write zlps.
 	 */
-	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
+	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0) {
+		req->zero = 0;
 		length++;
+	}
 
 	req->length = length;
 
+	/* throttle highspeed IRQ rate back slightly */
+	if (gadget_is_dualspeed(dev->gadget) &&
+			 (dev->gadget->speed == USB_SPEED_HIGH)) {
+		dev->tx_qlen++;
+		if (dev->tx_qlen == (dev->qmult/2)) {
+			req->no_interrupt = 0;
+			dev->tx_qlen = 0;
+		} else {
+			req->no_interrupt = 1;
+		}
+	} else {
+		req->no_interrupt = 0;
+	}
+
 	retval = usb_ep_queue(in, req, GFP_ATOMIC);
 	switch (retval) {
 	default:
@@ -595,11 +756,11 @@
 		break;
 	case 0:
 		netif_trans_update(net);
-		atomic_inc(&dev->tx_qlen);
 	}
 
 	if (retval) {
-		dev_kfree_skb_any(skb);
+		if (!dev->port_usb->multi_pkt_xfer)
+			dev_kfree_skb_any(skb);
 drop:
 		dev->net->stats.tx_dropped++;
 multiframe:
@@ -609,6 +770,7 @@
 		list_add(&req->list, &dev->tx_reqs);
 		spin_unlock_irqrestore(&dev->req_lock, flags);
 	}
+success:
 	return NETDEV_TX_OK;
 }
 
@@ -622,7 +784,7 @@
 	rx_fill(dev, gfp_flags);
 
 	/* and open the tx floodgates */
-	atomic_set(&dev->tx_qlen, 0);
+	dev->tx_qlen = 0;
 	netif_wake_queue(dev->net);
 }
 
@@ -768,6 +930,7 @@
 	spin_lock_init(&dev->lock);
 	spin_lock_init(&dev->req_lock);
 	INIT_WORK(&dev->work, eth_work);
+	INIT_WORK(&dev->rx_work, process_rx_w);
 	INIT_LIST_HEAD(&dev->tx_reqs);
 	INIT_LIST_HEAD(&dev->rx_reqs);
 
@@ -830,6 +993,7 @@
 	spin_lock_init(&dev->lock);
 	spin_lock_init(&dev->req_lock);
 	INIT_WORK(&dev->work, eth_work);
+	INIT_WORK(&dev->rx_work, process_rx_w);
 	INIT_LIST_HEAD(&dev->tx_reqs);
 	INIT_LIST_HEAD(&dev->rx_reqs);
 
@@ -1065,8 +1229,13 @@
 		dev->header_len = link->header_len;
 		dev->unwrap = link->unwrap;
 		dev->wrap = link->wrap;
+		dev->ul_max_pkts_per_xfer = link->ul_max_pkts_per_xfer;
+		dev->dl_max_pkts_per_xfer = link->dl_max_pkts_per_xfer;
 
 		spin_lock(&dev->lock);
+		dev->tx_skb_hold_count = 0;
+		dev->no_tx_req_used = 0;
+		dev->tx_req_bufsize = 0;
 		dev->port_usb = link;
 		if (netif_running(dev->net)) {
 			if (link->open)
@@ -1111,6 +1280,7 @@
 {
 	struct eth_dev		*dev = link->ioport;
 	struct usb_request	*req;
+	struct sk_buff		*skb;
 
 	WARN_ON(!dev);
 	if (!dev)
@@ -1133,6 +1303,8 @@
 		list_del(&req->list);
 
 		spin_unlock(&dev->req_lock);
+		if (link->multi_pkt_xfer)
+			kfree(req->buf);
 		usb_ep_free_request(link->in_ep, req);
 		spin_lock(&dev->req_lock);
 	}
@@ -1151,6 +1323,12 @@
 		spin_lock(&dev->req_lock);
 	}
 	spin_unlock(&dev->req_lock);
+
+	spin_lock(&dev->rx_frames.lock);
+	while ((skb = __skb_dequeue(&dev->rx_frames)))
+		dev_kfree_skb_any(skb);
+	spin_unlock(&dev->rx_frames.lock);
+
 	link->out_ep->desc = NULL;
 
 	/* finish forgetting about this USB link episode */
@@ -1164,5 +1342,23 @@
 }
 EXPORT_SYMBOL_GPL(gether_disconnect);
 
-MODULE_LICENSE("GPL");
+static int __init gether_init(void)
+{
+	uether_wq  = create_singlethread_workqueue("uether");
+	if (!uether_wq) {
+		pr_err("%s: Unable to create workqueue: uether\n", __func__);
+		return -ENOMEM;
+	}
+	return 0;
+}
+module_init(gether_init);
+
+static void __exit gether_exit(void)
+{
+	destroy_workqueue(uether_wq);
+
+}
+module_exit(gether_exit);
 MODULE_AUTHOR("David Brownell");
+MODULE_DESCRIPTION("ethernet over USB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 81d94a7..3ff09d1 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -74,6 +74,9 @@
 	bool				is_fixed;
 	u32				fixed_out_len;
 	u32				fixed_in_len;
+	unsigned		ul_max_pkts_per_xfer;
+	unsigned		dl_max_pkts_per_xfer;
+	bool				multi_pkt_xfer;
 	bool				supports_multi_frame;
 	struct sk_buff			*(*wrap)(struct gether *port,
 						struct sk_buff *skb);
diff --git a/drivers/usb/gadget/functions.c b/drivers/usb/gadget/functions.c
index b13f839..389c1f3 100644
--- a/drivers/usb/gadget/functions.c
+++ b/drivers/usb/gadget/functions.c
@@ -58,7 +58,7 @@
 	struct usb_function *f;
 
 	f = fi->fd->alloc_func(fi);
-	if (IS_ERR(f))
+	if ((f == NULL) || IS_ERR(f))
 		return f;
 	f->fi = fi;
 	return f;
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 9483489..03fe5c5 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -707,6 +707,57 @@
 EXPORT_SYMBOL_GPL(usb_gadget_disconnect);
 
 /**
+ * usb_gsi_ep_op - performs operation on GSI accelerated EP based on EP op code
+ *
+ * Operations such as EP configuration, TRB allocation, StartXfer etc.
+ * See gsi_ep_op for more details.
+ */
+int usb_gsi_ep_op(struct usb_ep *ep,
+		struct usb_gsi_request *req, enum gsi_ep_op op)
+{
+	if (ep->ops->gsi_ep_op)
+		return ep->ops->gsi_ep_op(ep, req, op);
+
+	return -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(usb_gsi_ep_op);
+
+/**
+ * usb_gadget_func_wakeup - send a function remote wakeup up notification
+ * to the host connected to this gadget
+ * @gadget: controller used to wake up the host
+ * @interface_id: the interface which triggered the remote wakeup event
+ *
+ * Returns zero on success. Otherwise, negative error code is returned.
+ */
+int usb_gadget_func_wakeup(struct usb_gadget *gadget,
+	int interface_id)
+{
+	if (gadget->speed != USB_SPEED_SUPER)
+		return -EOPNOTSUPP;
+
+	if (!gadget->ops->func_wakeup)
+		return -EOPNOTSUPP;
+
+	return gadget->ops->func_wakeup(gadget, interface_id);
+}
+EXPORT_SYMBOL(usb_gadget_func_wakeup);
+
+/**
+ * usb_gadget_restart - software-controlled reset of USB peripheral connection
+ * @gadget:the peripheral being reset
+ *
+ * Informs controller driver for Vbus LOW followed by Vbus HIGH notification.
+ * This performs full hardware reset and re-initialization.
+  */
+int usb_gadget_restart(struct usb_gadget *gadget)
+{
+	if (!gadget->ops->restart)
+		return -EOPNOTSUPP;
+	return gadget->ops->restart(gadget);
+}
+EXPORT_SYMBOL(usb_gadget_restart);
+/**
  * usb_gadget_deactivate - deactivate function which is not ready to work
  * @gadget: the peripheral being deactivated
  *
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ed56bf9..c86abfc 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -198,6 +198,15 @@
 		goto put_hcd;
 	}
 
+	if (pdev->dev.parent)
+		pm_runtime_resume(pdev->dev.parent);
+
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+	pm_runtime_get_sync(&pdev->dev);
+
 	xhci = hcd_to_xhci(hcd);
 	match = of_match_node(usb_xhci_of_match, pdev->dev.of_node);
 	if (match) {
@@ -238,14 +247,17 @@
 			goto put_usb3_hcd;
 	}
 
-	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
+	ret = usb_add_hcd(hcd, irq, IRQF_SHARED | IRQF_ONESHOT);
 	if (ret)
 		goto disable_usb_phy;
 
-	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
+	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED | IRQF_ONESHOT);
 	if (ret)
 		goto dealloc_usb2_hcd;
 
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_put_autosuspend(&pdev->dev);
+
 	return 0;
 
 
@@ -274,6 +286,8 @@
 	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
 	struct clk *clk = xhci->clk;
 
+	pm_runtime_disable(&dev->dev);
+
 	usb_remove_hcd(xhci->shared_hcd);
 	usb_phy_shutdown(hcd->usb_phy);
 
@@ -287,33 +301,57 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int xhci_plat_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int xhci_plat_runtime_idle(struct device *dev)
 {
-	struct usb_hcd	*hcd = dev_get_drvdata(dev);
-	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
-
 	/*
-	 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
-	 * to do wakeup during suspend. Since xhci_plat_suspend is currently
-	 * only designed for system suspend, device_may_wakeup() is enough
-	 * to dertermine whether host is allowed to do wakeup. Need to
-	 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
-	 * also applies to runtime suspend.
+	 * When pm_runtime_put_autosuspend() is called on this device,
+	 * after this idle callback returns the PM core will schedule the
+	 * autosuspend if there is any remaining time until expiry. However,
+	 * when reaching this point because the child_count becomes 0, the
+	 * core does not honor autosuspend in that case and results in
+	 * idle/suspend happening immediately. In order to have a delay
+	 * before suspend we have to call pm_runtime_autosuspend() manually.
 	 */
-	return xhci_suspend(xhci, device_may_wakeup(dev));
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_autosuspend(dev);
+	return -EBUSY;
 }
 
-static int xhci_plat_resume(struct device *dev)
+static int xhci_plat_runtime_suspend(struct device *dev)
 {
-	struct usb_hcd	*hcd = dev_get_drvdata(dev);
-	struct xhci_hcd	*xhci = hcd_to_xhci(hcd);
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
 
-	return xhci_resume(xhci, 0);
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat runtime suspend\n");
+
+	return xhci_suspend(xhci, true);
+}
+
+static int xhci_plat_runtime_resume(struct device *dev)
+{
+	struct usb_hcd *hcd = dev_get_drvdata(dev);
+	struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+	int ret;
+
+	if (!xhci)
+		return 0;
+
+	dev_dbg(dev, "xhci-plat runtime resume\n");
+
+	ret = xhci_resume(xhci, false);
+	pm_runtime_mark_last_busy(dev);
+
+	return ret;
 }
 
 static const struct dev_pm_ops xhci_plat_pm_ops = {
-	SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
+	SET_SYSTEM_SLEEP_PM_OPS(NULL, NULL)
+	SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume,
+			   xhci_plat_runtime_idle)
 };
 #define DEV_PM_OPS	(&xhci_plat_pm_ops)
 #else
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index b9c409a..c5527d4 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -6,6 +6,14 @@
 config USB_PHY
 	def_bool n
 
+config USB_OTG_WAKELOCK
+	bool "Hold a wakelock when USB connected"
+	depends on WAKELOCK
+	select USB_OTG_UTILS
+	help
+	  Select this to automatically hold a wakelock when USB is
+	  connected, preventing suspend.
+
 #
 # USB Transceiver Drivers
 #
@@ -208,4 +216,33 @@
 	  Provides read/write operations to the ULPI phy register set for
 	  controllers with a viewport register (e.g. Chipidea/ARC controllers).
 
+config DUAL_ROLE_USB_INTF
+	bool "Generic DUAL ROLE sysfs interface"
+	depends on SYSFS && USB_PHY
+	help
+	  A generic sysfs interface to track and change the state of
+	  dual role usb phys. The usb phy drivers can register to
+	  this interface to expose it capabilities to the userspace
+	  and thereby allowing userspace to change the port mode.
+
+config USB_MSM_SSPHY_QMP
+	tristate "MSM SSUSB QMP PHY Driver"
+	depends on ARCH_QCOM
+	select USB_PHY
+	help
+	  Enable this to support the SuperSpeed USB transceiver on MSM chips.
+	  This driver supports the PHY which uses the QSCRATCH-based register
+	  set for its control sequences, normally paired with newer DWC3-based
+	  SuperSpeed controllers.
+
+config MSM_QUSB_PHY
+	tristate "MSM QUSB2 PHY Driver"
+	depends on ARCH_QCOM
+	select USB_PHY
+	help
+	  Enable this to support the QUSB2 PHY on MSM chips. This driver supports
+	  the high-speed PHY which is usually paired with either the ChipIdea or
+	  Synopsys DWC3 USB IPs on MSM SOCs. This driver expects to configure the
+	  PHY with a dedicated register I/O memory region.
+
 endmenu
diff --git a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile
index b433e5d..ce98866 100644
--- a/drivers/usb/phy/Makefile
+++ b/drivers/usb/phy/Makefile
@@ -3,6 +3,8 @@
 #
 obj-$(CONFIG_USB_PHY)			+= phy.o
 obj-$(CONFIG_OF)			+= of.o
+obj-$(CONFIG_USB_OTG_WAKELOCK)		+= otg-wakelock.o
+obj-$(CONFIG_DUAL_ROLE_USB_INTF)	+= class-dual-role.o
 
 # transceiver drivers, keep the list sorted
 
@@ -26,3 +28,5 @@
 obj-$(CONFIG_USB_ULPI)			+= phy-ulpi.o
 obj-$(CONFIG_USB_ULPI_VIEWPORT)		+= phy-ulpi-viewport.o
 obj-$(CONFIG_KEYSTONE_USB_PHY)		+= phy-keystone.o
+obj-$(CONFIG_USB_MSM_SSPHY_QMP)     	+= phy-msm-ssusb-qmp.o
+obj-$(CONFIG_MSM_QUSB_PHY)              += phy-msm-qusb.o phy-msm-qusb-v2.o
diff --git a/drivers/usb/phy/class-dual-role.c b/drivers/usb/phy/class-dual-role.c
new file mode 100644
index 0000000..51fcb54
--- /dev/null
+++ b/drivers/usb/phy/class-dual-role.c
@@ -0,0 +1,529 @@
+/*
+ * class-dual-role.c
+ *
+ * Copyright (C) 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/ctype.h>
+#include <linux/device.h>
+#include <linux/usb/class-dual-role.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+
+#define DUAL_ROLE_NOTIFICATION_TIMEOUT 2000
+
+static ssize_t dual_role_store_property(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count);
+static ssize_t dual_role_show_property(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf);
+
+#define DUAL_ROLE_ATTR(_name)				\
+{							\
+	.attr = { .name = #_name },			\
+	.show = dual_role_show_property,		\
+	.store = dual_role_store_property,		\
+}
+
+static struct device_attribute dual_role_attrs[] = {
+	DUAL_ROLE_ATTR(supported_modes),
+	DUAL_ROLE_ATTR(mode),
+	DUAL_ROLE_ATTR(power_role),
+	DUAL_ROLE_ATTR(data_role),
+	DUAL_ROLE_ATTR(powers_vconn),
+};
+
+struct class *dual_role_class;
+EXPORT_SYMBOL_GPL(dual_role_class);
+
+static struct device_type dual_role_dev_type;
+
+static char *kstrdupcase(const char *str, gfp_t gfp, bool to_upper)
+{
+	char *ret, *ustr;
+
+	ustr = ret = kmalloc(strlen(str) + 1, gfp);
+
+	if (!ret)
+		return NULL;
+
+	while (*str)
+		*ustr++ = to_upper ? toupper(*str++) : tolower(*str++);
+
+	*ustr = 0;
+
+	return ret;
+}
+
+static void dual_role_changed_work(struct work_struct *work)
+{
+	struct dual_role_phy_instance *dual_role =
+	    container_of(work, struct dual_role_phy_instance,
+			 changed_work);
+
+	dev_dbg(&dual_role->dev, "%s\n", __func__);
+	kobject_uevent(&dual_role->dev.kobj, KOBJ_CHANGE);
+}
+
+void dual_role_instance_changed(struct dual_role_phy_instance *dual_role)
+{
+	dev_dbg(&dual_role->dev, "%s\n", __func__);
+	pm_wakeup_event(&dual_role->dev, DUAL_ROLE_NOTIFICATION_TIMEOUT);
+	schedule_work(&dual_role->changed_work);
+}
+EXPORT_SYMBOL_GPL(dual_role_instance_changed);
+
+int dual_role_get_property(struct dual_role_phy_instance *dual_role,
+			   enum dual_role_property prop,
+			   unsigned int *val)
+{
+	return dual_role->desc->get_property(dual_role, prop, val);
+}
+EXPORT_SYMBOL_GPL(dual_role_get_property);
+
+int dual_role_set_property(struct dual_role_phy_instance *dual_role,
+			   enum dual_role_property prop,
+			   const unsigned int *val)
+{
+	if (!dual_role->desc->set_property)
+		return -ENODEV;
+
+	return dual_role->desc->set_property(dual_role, prop, val);
+}
+EXPORT_SYMBOL_GPL(dual_role_set_property);
+
+int dual_role_property_is_writeable(struct dual_role_phy_instance *dual_role,
+				    enum dual_role_property prop)
+{
+	if (!dual_role->desc->property_is_writeable)
+		return -ENODEV;
+
+	return dual_role->desc->property_is_writeable(dual_role, prop);
+}
+EXPORT_SYMBOL_GPL(dual_role_property_is_writeable);
+
+static void dual_role_dev_release(struct device *dev)
+{
+	struct dual_role_phy_instance *dual_role =
+	    container_of(dev, struct dual_role_phy_instance, dev);
+	pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+	kfree(dual_role);
+}
+
+static struct dual_role_phy_instance *__must_check
+__dual_role_register(struct device *parent,
+		     const struct dual_role_phy_desc *desc)
+{
+	struct device *dev;
+	struct dual_role_phy_instance *dual_role;
+	int rc;
+
+	dual_role = kzalloc(sizeof(*dual_role), GFP_KERNEL);
+	if (!dual_role)
+		return ERR_PTR(-ENOMEM);
+
+	dev = &dual_role->dev;
+
+	device_initialize(dev);
+
+	dev->class = dual_role_class;
+	dev->type = &dual_role_dev_type;
+	dev->parent = parent;
+	dev->release = dual_role_dev_release;
+	dev_set_drvdata(dev, dual_role);
+	dual_role->desc = desc;
+
+	rc = dev_set_name(dev, "%s", desc->name);
+	if (rc)
+		goto dev_set_name_failed;
+
+	INIT_WORK(&dual_role->changed_work, dual_role_changed_work);
+
+	rc = device_init_wakeup(dev, true);
+	if (rc)
+		goto wakeup_init_failed;
+
+	rc = device_add(dev);
+	if (rc)
+		goto device_add_failed;
+
+	dual_role_instance_changed(dual_role);
+
+	return dual_role;
+
+device_add_failed:
+	device_init_wakeup(dev, false);
+wakeup_init_failed:
+dev_set_name_failed:
+	put_device(dev);
+	kfree(dual_role);
+
+	return ERR_PTR(rc);
+}
+
+static void dual_role_instance_unregister(struct dual_role_phy_instance
+					  *dual_role)
+{
+	cancel_work_sync(&dual_role->changed_work);
+	device_init_wakeup(&dual_role->dev, false);
+	device_unregister(&dual_role->dev);
+}
+
+static void devm_dual_role_release(struct device *dev, void *res)
+{
+	struct dual_role_phy_instance **dual_role = res;
+
+	dual_role_instance_unregister(*dual_role);
+}
+
+struct dual_role_phy_instance *__must_check
+devm_dual_role_instance_register(struct device *parent,
+				 const struct dual_role_phy_desc *desc)
+{
+	struct dual_role_phy_instance **ptr, *dual_role;
+
+	ptr = devres_alloc(devm_dual_role_release, sizeof(*ptr), GFP_KERNEL);
+
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+	dual_role = __dual_role_register(parent, desc);
+	if (IS_ERR(dual_role)) {
+		devres_free(ptr);
+	} else {
+		*ptr = dual_role;
+		devres_add(parent, ptr);
+	}
+	return dual_role;
+}
+EXPORT_SYMBOL_GPL(devm_dual_role_instance_register);
+
+static int devm_dual_role_match(struct device *dev, void *res, void *data)
+{
+	struct dual_role_phy_instance **r = res;
+
+	if (WARN_ON(!r || !*r))
+		return 0;
+
+	return *r == data;
+}
+
+void devm_dual_role_instance_unregister(struct device *dev,
+					struct dual_role_phy_instance
+					*dual_role)
+{
+	int rc;
+
+	rc = devres_release(dev, devm_dual_role_release,
+			    devm_dual_role_match, dual_role);
+	WARN_ON(rc);
+}
+EXPORT_SYMBOL_GPL(devm_dual_role_instance_unregister);
+
+void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role)
+{
+	return dual_role->drv_data;
+}
+EXPORT_SYMBOL_GPL(dual_role_get_drvdata);
+
+/***************** Device attribute functions **************************/
+
+/* port type */
+static char *supported_modes_text[] = {
+	"ufp dfp", "dfp", "ufp"
+};
+
+/* current mode */
+static char *mode_text[] = {
+	"ufp", "dfp", "none"
+};
+
+/* Power role */
+static char *pr_text[] = {
+	"source", "sink", "none"
+};
+
+/* Data role */
+static char *dr_text[] = {
+	"host", "device", "none"
+};
+
+/* Vconn supply */
+static char *vconn_supply_text[] = {
+	"n", "y"
+};
+
+static ssize_t dual_role_show_property(struct device *dev,
+				       struct device_attribute *attr, char *buf)
+{
+	ssize_t ret = 0;
+	struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+	const ptrdiff_t off = attr - dual_role_attrs;
+	unsigned int value;
+
+	if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) {
+		value = dual_role->desc->supported_modes;
+	} else {
+		ret = dual_role_get_property(dual_role, off, &value);
+
+		if (ret < 0) {
+			if (ret == -ENODATA)
+				dev_dbg(dev,
+					"driver has no data for `%s' property\n",
+					attr->attr.name);
+			else if (ret != -ENODEV)
+				dev_err(dev,
+					"driver failed to report `%s' property: %zd\n",
+					attr->attr.name, ret);
+			return ret;
+		}
+	}
+
+	if (off == DUAL_ROLE_PROP_SUPPORTED_MODES) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL !=
+			ARRAY_SIZE(supported_modes_text));
+		if (value < DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					supported_modes_text[value]);
+		else
+			return -EIO;
+	} else if (off == DUAL_ROLE_PROP_MODE) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_MODE_TOTAL !=
+			ARRAY_SIZE(mode_text));
+		if (value < DUAL_ROLE_PROP_MODE_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					mode_text[value]);
+		else
+			return -EIO;
+	} else if (off == DUAL_ROLE_PROP_PR) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_PR_TOTAL != ARRAY_SIZE(pr_text));
+		if (value < DUAL_ROLE_PROP_PR_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					pr_text[value]);
+		else
+			return -EIO;
+	} else if (off == DUAL_ROLE_PROP_DR) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_DR_TOTAL != ARRAY_SIZE(dr_text));
+		if (value < DUAL_ROLE_PROP_DR_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					dr_text[value]);
+		else
+			return -EIO;
+	} else if (off == DUAL_ROLE_PROP_VCONN_SUPPLY) {
+		BUILD_BUG_ON(DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL !=
+				ARRAY_SIZE(vconn_supply_text));
+		if (value < DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL)
+			return snprintf(buf, PAGE_SIZE, "%s\n",
+					vconn_supply_text[value]);
+		else
+			return -EIO;
+	} else
+		return -EIO;
+}
+
+static ssize_t dual_role_store_property(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	ssize_t ret;
+	struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+	const ptrdiff_t off = attr - dual_role_attrs;
+	unsigned int value;
+	int total, i;
+	char *dup_buf, **text_array;
+	bool result = false;
+
+	dup_buf = kstrdupcase(buf, GFP_KERNEL, false);
+	switch (off) {
+	case DUAL_ROLE_PROP_MODE:
+		total = DUAL_ROLE_PROP_MODE_TOTAL;
+		text_array = mode_text;
+		break;
+	case DUAL_ROLE_PROP_PR:
+		total = DUAL_ROLE_PROP_PR_TOTAL;
+		text_array = pr_text;
+		break;
+	case DUAL_ROLE_PROP_DR:
+		total = DUAL_ROLE_PROP_DR_TOTAL;
+		text_array = dr_text;
+		break;
+	case DUAL_ROLE_PROP_VCONN_SUPPLY:
+		ret = strtobool(dup_buf, &result);
+		value = result;
+		if (!ret)
+			goto setprop;
+	default:
+		ret = -EINVAL;
+		goto error;
+	}
+
+	for (i = 0; i <= total; i++) {
+		if (i == total) {
+			ret = -ENOTSUPP;
+			goto error;
+		}
+		if (!strncmp(*(text_array + i), dup_buf,
+			     strlen(*(text_array + i)))) {
+			value = i;
+			break;
+		}
+	}
+
+setprop:
+	ret = dual_role->desc->set_property(dual_role, off, &value);
+
+error:
+	kfree(dup_buf);
+
+	if (ret < 0)
+		return ret;
+
+	return count;
+}
+
+static umode_t dual_role_attr_is_visible(struct kobject *kobj,
+					 struct attribute *attr, int attrno)
+{
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+	umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+	int i;
+
+	if (attrno == DUAL_ROLE_PROP_SUPPORTED_MODES)
+		return mode;
+
+	for (i = 0; i < dual_role->desc->num_properties; i++) {
+		int property = dual_role->desc->properties[i];
+
+		if (property == attrno) {
+			if (dual_role->desc->property_is_writeable &&
+			    dual_role_property_is_writeable(dual_role, property)
+			    > 0)
+				mode |= S_IWUSR;
+
+			return mode;
+		}
+	}
+
+	return 0;
+}
+
+static struct attribute *__dual_role_attrs[ARRAY_SIZE(dual_role_attrs) + 1];
+
+static struct attribute_group dual_role_attr_group = {
+	.attrs = __dual_role_attrs,
+	.is_visible = dual_role_attr_is_visible,
+};
+
+static const struct attribute_group *dual_role_attr_groups[] = {
+	&dual_role_attr_group,
+	NULL,
+};
+
+void dual_role_init_attrs(struct device_type *dev_type)
+{
+	int i;
+
+	dev_type->groups = dual_role_attr_groups;
+
+	for (i = 0; i < ARRAY_SIZE(dual_role_attrs); i++)
+		__dual_role_attrs[i] = &dual_role_attrs[i].attr;
+}
+
+int dual_role_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+	struct dual_role_phy_instance *dual_role = dev_get_drvdata(dev);
+	int ret = 0, j;
+	char *prop_buf;
+	char *attrname;
+
+	dev_dbg(dev, "uevent\n");
+
+	if (!dual_role || !dual_role->desc) {
+		dev_dbg(dev, "No dual_role phy yet\n");
+		return ret;
+	}
+
+	dev_dbg(dev, "DUAL_ROLE_NAME=%s\n", dual_role->desc->name);
+
+	ret = add_uevent_var(env, "DUAL_ROLE_NAME=%s", dual_role->desc->name);
+	if (ret)
+		return ret;
+
+	prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+	if (!prop_buf)
+		return -ENOMEM;
+
+	for (j = 0; j < dual_role->desc->num_properties; j++) {
+		struct device_attribute *attr;
+		char *line;
+
+		attr = &dual_role_attrs[dual_role->desc->properties[j]];
+
+		ret = dual_role_show_property(dev, attr, prop_buf);
+		if (ret == -ENODEV || ret == -ENODATA) {
+			ret = 0;
+			continue;
+		}
+
+		if (ret < 0)
+			goto out;
+		line = strnchr(prop_buf, PAGE_SIZE, '\n');
+		if (line)
+			*line = 0;
+
+		attrname = kstrdupcase(attr->attr.name, GFP_KERNEL, true);
+		if (!attrname)
+			ret = -ENOMEM;
+
+		dev_dbg(dev, "prop %s=%s\n", attrname, prop_buf);
+
+		ret = add_uevent_var(env, "DUAL_ROLE_%s=%s", attrname,
+				     prop_buf);
+		kfree(attrname);
+		if (ret)
+			goto out;
+	}
+
+out:
+	free_page((unsigned long)prop_buf);
+
+	return ret;
+}
+
+/******************* Module Init ***********************************/
+
+static int __init dual_role_class_init(void)
+{
+	dual_role_class = class_create(THIS_MODULE, "dual_role_usb");
+
+	if (IS_ERR(dual_role_class))
+		return PTR_ERR(dual_role_class);
+
+	dual_role_class->dev_uevent = dual_role_uevent;
+	dual_role_init_attrs(&dual_role_dev_type);
+
+	return 0;
+}
+
+static void __exit dual_role_class_exit(void)
+{
+	class_destroy(dual_role_class);
+}
+
+subsys_initcall(dual_role_class_init);
+module_exit(dual_role_class_exit);
diff --git a/drivers/usb/phy/otg-wakelock.c b/drivers/usb/phy/otg-wakelock.c
new file mode 100644
index 0000000..479376b
--- /dev/null
+++ b/drivers/usb/phy/otg-wakelock.c
@@ -0,0 +1,173 @@
+/*
+ * otg-wakelock.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/wakelock.h>
+#include <linux/spinlock.h>
+#include <linux/usb/otg.h>
+
+#define TEMPORARY_HOLD_TIME	2000
+
+static bool enabled = true;
+static struct usb_phy *otgwl_xceiv;
+static struct notifier_block otgwl_nb;
+
+/*
+ * otgwl_spinlock is held while the VBUS lock is grabbed or dropped and the
+ * held field is updated to match.
+ */
+
+static DEFINE_SPINLOCK(otgwl_spinlock);
+
+/*
+ * Only one lock, but since these 3 fields are associated with each other...
+ */
+
+struct otgwl_lock {
+	char name[40];
+	struct wake_lock wakelock;
+	bool held;
+};
+
+/*
+ * VBUS present lock.  Also used as a timed lock on charger
+ * connect/disconnect and USB host disconnect, to allow the system
+ * to react to the change in power.
+ */
+
+static struct otgwl_lock vbus_lock;
+
+static void otgwl_hold(struct otgwl_lock *lock)
+{
+	if (!lock->held) {
+		wake_lock(&lock->wakelock);
+		lock->held = true;
+	}
+}
+
+static void otgwl_temporary_hold(struct otgwl_lock *lock)
+{
+	wake_lock_timeout(&lock->wakelock,
+			  msecs_to_jiffies(TEMPORARY_HOLD_TIME));
+	lock->held = false;
+}
+
+static void otgwl_drop(struct otgwl_lock *lock)
+{
+	if (lock->held) {
+		wake_unlock(&lock->wakelock);
+		lock->held = false;
+	}
+}
+
+static void otgwl_handle_event(unsigned long event)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&otgwl_spinlock, irqflags);
+
+	if (!enabled) {
+		otgwl_drop(&vbus_lock);
+		spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+		return;
+	}
+
+	switch (event) {
+	case USB_EVENT_VBUS:
+	case USB_EVENT_ENUMERATED:
+		otgwl_hold(&vbus_lock);
+		break;
+
+	case USB_EVENT_NONE:
+	case USB_EVENT_ID:
+	case USB_EVENT_CHARGER:
+		otgwl_temporary_hold(&vbus_lock);
+		break;
+
+	default:
+		break;
+	}
+
+	spin_unlock_irqrestore(&otgwl_spinlock, irqflags);
+}
+
+static int otgwl_otg_notifications(struct notifier_block *nb,
+				   unsigned long event, void *unused)
+{
+	otgwl_handle_event(event);
+	return NOTIFY_OK;
+}
+
+static int set_enabled(const char *val, const struct kernel_param *kp)
+{
+	int rv = param_set_bool(val, kp);
+
+	if (rv)
+		return rv;
+
+	if (otgwl_xceiv)
+		otgwl_handle_event(otgwl_xceiv->last_event);
+
+	return 0;
+}
+
+static struct kernel_param_ops enabled_param_ops = {
+	.set = set_enabled,
+	.get = param_get_bool,
+};
+
+module_param_cb(enabled, &enabled_param_ops, &enabled, 0644);
+MODULE_PARM_DESC(enabled, "enable wakelock when VBUS present");
+
+static int __init otg_wakelock_init(void)
+{
+	int ret;
+	struct usb_phy *phy;
+
+	phy = usb_get_phy(USB_PHY_TYPE_USB2);
+
+	if (IS_ERR(phy)) {
+		pr_err("%s: No USB transceiver found\n", __func__);
+		return PTR_ERR(phy);
+	}
+	otgwl_xceiv = phy;
+
+	snprintf(vbus_lock.name, sizeof(vbus_lock.name), "vbus-%s",
+		 dev_name(otgwl_xceiv->dev));
+	wake_lock_init(&vbus_lock.wakelock, WAKE_LOCK_SUSPEND,
+		       vbus_lock.name);
+
+	otgwl_nb.notifier_call = otgwl_otg_notifications;
+	ret = usb_register_notifier(otgwl_xceiv, &otgwl_nb);
+
+	if (ret) {
+		pr_err("%s: usb_register_notifier on transceiver %s"
+		       " failed\n", __func__,
+		       dev_name(otgwl_xceiv->dev));
+		otgwl_xceiv = NULL;
+		wake_lock_destroy(&vbus_lock.wakelock);
+		return ret;
+	}
+
+	otgwl_handle_event(otgwl_xceiv->last_event);
+	return ret;
+}
+
+late_initcall(otg_wakelock_init);
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
new file mode 100644
index 0000000..58eb287
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -0,0 +1,967 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/usb/phy.h>
+#include <linux/reset.h>
+
+#define QUSB2PHY_PWR_CTRL1		0x210
+#define PWR_CTRL1_POWR_DOWN		BIT(0)
+
+#define QUSB2PHY_PLL_COMMON_STATUS_ONE	0x1A0
+#define CORE_READY_STATUS		BIT(0)
+
+/* In case Efuse register shows zero, use this value */
+#define TUNE2_DEFAULT_HIGH_NIBBLE	0xB
+#define TUNE2_DEFAULT_LOW_NIBBLE	0x3
+
+/* Get TUNE2's high nibble value read from efuse */
+#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask)	((val >> pos) & mask)
+
+#define QUSB2PHY_INTR_CTRL		0x22C
+#define DMSE_INTR_HIGH_SEL              BIT(4)
+#define DPSE_INTR_HIGH_SEL              BIT(3)
+#define CHG_DET_INTR_EN                 BIT(2)
+#define DMSE_INTR_EN                    BIT(1)
+#define DPSE_INTR_EN                    BIT(0)
+
+#define QUSB2PHY_INTR_STAT		0x230
+#define DMSE_INTERRUPT			BIT(1)
+#define DPSE_INTERRUPT			BIT(0)
+
+#define QUSB2PHY_PORT_TUNE2		0x240
+
+#define QUSB2PHY_1P8_VOL_MIN           1800000 /* uV */
+#define QUSB2PHY_1P8_VOL_MAX           1800000 /* uV */
+#define QUSB2PHY_1P8_HPM_LOAD          30000   /* uA */
+
+#define QUSB2PHY_3P3_VOL_MIN		3075000 /* uV */
+#define QUSB2PHY_3P3_VOL_MAX		3200000 /* uV */
+#define QUSB2PHY_3P3_HPM_LOAD		30000	/* uA */
+
+#define LINESTATE_DP			BIT(0)
+#define LINESTATE_DM			BIT(1)
+
+unsigned int phy_tune2;
+module_param(phy_tune2, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(phy_tune2, "QUSB PHY v2 TUNE2");
+
+struct qusb_phy {
+	struct usb_phy		phy;
+	void __iomem		*base;
+	void __iomem		*tune2_efuse_reg;
+
+	struct clk		*ref_clk_src;
+	struct clk		*ref_clk;
+	struct clk		*cfg_ahb_clk;
+	struct reset_control	*phy_reset;
+
+	struct regulator	*vdd;
+	struct regulator	*vdda33;
+	struct regulator	*vdda18;
+	int			vdd_levels[3]; /* none, low, high */
+	int			init_seq_len;
+	int			*qusb_phy_init_seq;
+	int			host_init_seq_len;
+	int			*qusb_phy_host_init_seq;
+
+	u32			tune2_val;
+	int			tune2_efuse_bit_pos;
+	int			tune2_efuse_num_of_bits;
+
+	bool			power_enabled;
+	bool			clocks_enabled;
+	bool			cable_connected;
+	bool			suspended;
+	bool			rm_pulldown;
+
+	struct regulator_desc	dpdm_rdesc;
+	struct regulator_dev	*dpdm_rdev;
+
+	/* emulation targets specific */
+	void __iomem		*emu_phy_base;
+	bool			emulation;
+	int			*emu_init_seq;
+	int			emu_init_seq_len;
+	int			*phy_pll_reset_seq;
+	int			phy_pll_reset_seq_len;
+	int			*emu_dcm_reset_seq;
+	int			emu_dcm_reset_seq_len;
+};
+
+static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
+{
+	dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n",
+			__func__, qphy->clocks_enabled, on);
+
+	if (!qphy->clocks_enabled && on) {
+		clk_prepare_enable(qphy->ref_clk_src);
+		clk_prepare_enable(qphy->ref_clk);
+		clk_prepare_enable(qphy->cfg_ahb_clk);
+		qphy->clocks_enabled = true;
+	}
+
+	if (qphy->clocks_enabled && !on) {
+		clk_disable_unprepare(qphy->ref_clk);
+		clk_disable_unprepare(qphy->ref_clk_src);
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		qphy->clocks_enabled = false;
+	}
+
+	dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d\n", __func__,
+						qphy->clocks_enabled);
+}
+
+static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
+{
+	int min, ret;
+
+	min = high ? 1 : 0; /* low or none? */
+	ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min],
+						qphy->vdd_levels[2]);
+	if (ret) {
+		dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n");
+		return ret;
+	}
+
+	dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n",
+			qphy->vdd_levels[min], qphy->vdd_levels[2]);
+	return ret;
+}
+
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
+						bool toggle_vdd)
+{
+	int ret = 0;
+
+	dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
+			__func__, on ? "on" : "off", qphy->power_enabled);
+
+	if (toggle_vdd && qphy->power_enabled == on) {
+		dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
+		return 0;
+	}
+
+	if (!on)
+		goto disable_vdda33;
+
+	if (toggle_vdd) {
+		ret = qusb_phy_config_vdd(qphy, true);
+		if (ret) {
+			dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+								ret);
+			goto err_vdd;
+		}
+
+		ret = regulator_enable(qphy->vdd);
+		if (ret) {
+			dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+			goto unconfig_vdd;
+		}
+	}
+
+	ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret);
+		goto disable_vdd;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN,
+						QUSB2PHY_1P8_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda18:%d\n", ret);
+		goto put_vdda18_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda18);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret);
+		goto unset_vdda18;
+	}
+
+	ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret);
+		goto disable_vdda18;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN,
+						QUSB2PHY_3P3_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda33:%d\n", ret);
+		goto put_vdda33_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda33);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret);
+		goto unset_vdd33;
+	}
+
+	if (toggle_vdd)
+		qphy->power_enabled = true;
+
+	pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+	return ret;
+
+disable_vdda33:
+	ret = regulator_disable(qphy->vdda33);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
+
+unset_vdd33:
+	ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda33:%d\n", ret);
+
+put_vdda33_lpm:
+	ret = regulator_set_load(qphy->vdda33, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n");
+
+disable_vdda18:
+	ret = regulator_disable(qphy->vdda18);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
+
+unset_vdda18:
+	ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda18:%d\n", ret);
+
+put_vdda18_lpm:
+	ret = regulator_set_load(qphy->vdda18, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
+
+disable_vdd:
+	if (toggle_vdd) {
+		ret = regulator_disable(qphy->vdd);
+		if (ret)
+			dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+								ret);
+
+unconfig_vdd:
+		ret = qusb_phy_config_vdd(qphy, false);
+		if (ret)
+			dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+								ret);
+	}
+err_vdd:
+	if (toggle_vdd)
+		qphy->power_enabled = false;
+	dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+	return ret;
+}
+
+static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
+{
+	u8 num_of_bits;
+	u32 bit_mask = 1;
+
+	pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
+				qphy->tune2_efuse_num_of_bits,
+				qphy->tune2_efuse_bit_pos);
+
+	/* get bit mask based on number of bits to use with efuse reg */
+	if (qphy->tune2_efuse_num_of_bits) {
+		num_of_bits = qphy->tune2_efuse_num_of_bits;
+		bit_mask = (bit_mask << num_of_bits) - 1;
+	}
+
+	/*
+	 * Read EFUSE register having TUNE2 parameter's high nibble.
+	 * If efuse register shows value as 0x0, then use default value
+	 * as 0xB as high nibble. Otherwise use efuse register based
+	 * value for this purpose.
+	 */
+	qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
+	pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
+				__func__, bit_mask, qphy->tune2_val);
+
+	qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
+				qphy->tune2_efuse_bit_pos, bit_mask);
+
+	if (!qphy->tune2_val)
+		qphy->tune2_val = TUNE2_DEFAULT_HIGH_NIBBLE;
+
+	/* Get TUNE2 byte value using high and low nibble value */
+	qphy->tune2_val = ((qphy->tune2_val << 0x4) |
+					TUNE2_DEFAULT_LOW_NIBBLE);
+}
+
+static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
+		unsigned long delay)
+{
+	int i;
+
+	pr_debug("Seq count:%d\n", cnt);
+	for (i = 0; i < cnt; i = i+2) {
+		pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]);
+		writel_relaxed(seq[i], base + seq[i+1]);
+		if (delay)
+			usleep_range(delay, (delay + 2000));
+	}
+}
+
+static void qusb_phy_host_init(struct usb_phy *phy)
+{
+	u8 reg;
+	int ret;
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	/* Perform phy reset */
+	ret = reset_control_assert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+	usleep_range(100, 150);
+	ret = reset_control_deassert(qphy->phy_reset);
+		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+	qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq,
+			qphy->host_init_seq_len, 0);
+
+	/* Ensure above write is completed before turning ON ref clk */
+	wmb();
+
+	/* Require to get phy pll lock successfully */
+	usleep_range(150, 160);
+
+	reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_COMMON_STATUS_ONE);
+	dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+	if (!(reg & CORE_READY_STATUS)) {
+		dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+		WARN_ON(1);
+	}
+}
+
+static int qusb_phy_init(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	int ret;
+	u8 reg;
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	ret = qusb_phy_enable_power(qphy, true, true);
+	if (ret)
+		return ret;
+
+	qusb_phy_enable_clocks(qphy, true);
+
+	/* Perform phy reset */
+	ret = reset_control_assert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+	usleep_range(100, 150);
+	ret = reset_control_deassert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+	if (qphy->emulation) {
+		if (qphy->emu_init_seq)
+			qusb_phy_write_seq(qphy->emu_phy_base + 0x8000,
+				qphy->emu_init_seq,
+					qphy->emu_init_seq_len, 10000);
+
+		if (qphy->qusb_phy_init_seq)
+			qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+					qphy->init_seq_len, 0);
+
+		/* Wait for 5ms as per QUSB2 RUMI sequence */
+		usleep_range(5000, 7000);
+
+		if (qphy->phy_pll_reset_seq)
+			qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq,
+					qphy->phy_pll_reset_seq_len, 10000);
+
+		if (qphy->emu_dcm_reset_seq)
+			qusb_phy_write_seq(qphy->emu_phy_base,
+					qphy->emu_dcm_reset_seq,
+					qphy->emu_dcm_reset_seq_len, 10000);
+
+		return 0;
+	}
+
+	/* Disable the PHY */
+	writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+			PWR_CTRL1_POWR_DOWN,
+			qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	if (qphy->qusb_phy_init_seq)
+		qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+				qphy->init_seq_len, 0);
+	/*
+	 * Check for EFUSE value only if tune2_efuse_reg is available
+	 * and try to read EFUSE value only once i.e. not every USB
+	 * cable connect case.
+	 */
+	if (qphy->tune2_efuse_reg) {
+		if (!qphy->tune2_val)
+			qusb_phy_get_tune2_param(qphy);
+
+		pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__,
+				qphy->tune2_val);
+		writel_relaxed(qphy->tune2_val,
+				qphy->base + QUSB2PHY_PORT_TUNE2);
+	}
+
+	/* If phy_tune2 modparam set, override tune2 value */
+	if (phy_tune2) {
+		pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
+						__func__, phy_tune2);
+		writel_relaxed(phy_tune2,
+				qphy->base + QUSB2PHY_PORT_TUNE2);
+	}
+
+	/* ensure above writes are completed before re-enabling PHY */
+	wmb();
+
+	/* Enable the PHY */
+	writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) &
+			~PWR_CTRL1_POWR_DOWN,
+			qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* Ensure above write is completed before turning ON ref clk */
+	wmb();
+
+	/* Require to get phy pll lock successfully */
+	usleep_range(150, 160);
+
+	reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_COMMON_STATUS_ONE);
+	dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+	if (!(reg & CORE_READY_STATUS)) {
+		dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+		WARN_ON(1);
+	}
+	return 0;
+}
+
+static void qusb_phy_shutdown(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	qusb_phy_enable_clocks(qphy, true);
+
+	/* Disable the PHY */
+	writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+			PWR_CTRL1_POWR_DOWN,
+			qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* Makes sure that above write goes through */
+	wmb();
+
+	qusb_phy_enable_clocks(qphy, false);
+}
+
+static u32 qusb_phy_get_linestate(struct qusb_phy *qphy)
+{
+	u32 linestate = 0;
+
+	if (qphy->cable_connected) {
+		if (qphy->phy.flags & PHY_HSFS_MODE)
+			linestate |= LINESTATE_DP;
+		else if (qphy->phy.flags & PHY_LS_MODE)
+			linestate |= LINESTATE_DM;
+	}
+	return linestate;
+}
+
+/**
+ * Performs QUSB2 PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	u32 linestate = 0, intr_mask = 0;
+
+	if (qphy->suspended && suspend) {
+		dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
+			__func__);
+		return 0;
+	}
+
+	if (suspend) {
+		/* Bus suspend case */
+		if (qphy->cable_connected ||
+			(qphy->phy.flags & PHY_HOST_MODE)) {
+			/* Disable all interrupts */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_INTR_CTRL);
+
+			linestate = qusb_phy_get_linestate(qphy);
+			/*
+			 * D+/D- interrupts are level-triggered, but we are
+			 * only interested if the line state changes, so enable
+			 * the high/low trigger based on current state. In
+			 * other words, enable the triggers _opposite_ of what
+			 * the current D+/D- levels are.
+			 * e.g. if currently D+ high, D- low (HS 'J'/Suspend),
+			 * configure the mask to trigger on D+ low OR D- high
+			 */
+			intr_mask = DMSE_INTERRUPT | DPSE_INTERRUPT;
+			if (!(linestate & LINESTATE_DP)) /* D+ low */
+				intr_mask |= DPSE_INTR_HIGH_SEL;
+			if (!(linestate & LINESTATE_DM)) /* D- low */
+				intr_mask |= DMSE_INTR_HIGH_SEL;
+
+			writel_relaxed(intr_mask,
+				qphy->base + QUSB2PHY_INTR_CTRL);
+
+			dev_dbg(phy->dev, "%s: intr_mask = %x\n",
+			__func__, intr_mask);
+
+			/* Makes sure that above write goes through */
+			wmb();
+			qusb_phy_enable_clocks(qphy, false);
+		} else { /* Cable disconnect case */
+			/* Disable all interrupts */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_INTR_CTRL);
+
+			/* Put PHY into non-driving mode */
+			writel_relaxed(0x23,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+
+			/* Makes sure that above write goes through */
+			wmb();
+
+			qusb_phy_enable_clocks(qphy, false);
+			qusb_phy_enable_power(qphy, false, true);
+		}
+		qphy->suspended = true;
+	} else {
+		/* Bus resume case */
+		if (qphy->cable_connected ||
+			(qphy->phy.flags & PHY_HOST_MODE)) {
+			qusb_phy_enable_clocks(qphy, true);
+			/* Clear all interrupts on resume */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_INTR_CTRL);
+
+			/* Makes sure that above write goes through */
+			wmb();
+		} else { /* Cable connect case */
+			qusb_phy_enable_power(qphy, true, true);
+			qusb_phy_enable_clocks(qphy, true);
+		}
+		qphy->suspended = false;
+	}
+
+	return 0;
+}
+
+static int qusb_phy_notify_connect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = true;
+
+	if (qphy->qusb_phy_host_init_seq && qphy->phy.flags & PHY_HOST_MODE)
+		qusb_phy_host_init(phy);
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_notify_disconnect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = false;
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+	int ret = 0;
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+
+	if (qphy->rm_pulldown) {
+		ret = qusb_phy_enable_power(qphy, true, false);
+		if (ret >= 0) {
+			qphy->rm_pulldown = true;
+			dev_dbg(qphy->phy.dev, "dpdm_enable:rm_pulldown:%d\n",
+							qphy->rm_pulldown);
+		}
+	}
+
+	return ret;
+}
+
+static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+	int ret = 0;
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+
+	if (!qphy->rm_pulldown) {
+		ret = qusb_phy_enable_power(qphy, false, false);
+		if (ret >= 0) {
+			qphy->rm_pulldown = false;
+			dev_dbg(qphy->phy.dev, "dpdm_disable:rm_pulldown:%d\n",
+							qphy->rm_pulldown);
+		}
+	}
+
+	return ret;
+}
+
+static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
+					qphy->rm_pulldown);
+	return qphy->rm_pulldown;
+}
+
+static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
+	.enable		= qusb_phy_dpdm_regulator_enable,
+	.disable	= qusb_phy_dpdm_regulator_disable,
+	.is_enabled	= qusb_phy_dpdm_regulator_is_enabled,
+};
+
+static int qusb_phy_regulator_init(struct qusb_phy *qphy)
+{
+	struct device *dev = qphy->phy.dev;
+	struct regulator_config cfg = {};
+	struct regulator_init_data *init_data;
+
+	init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+	if (!init_data)
+		return -ENOMEM;
+
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+	qphy->dpdm_rdesc.owner = THIS_MODULE;
+	qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+	qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops;
+	qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+	cfg.dev = dev;
+	cfg.init_data = init_data;
+	cfg.driver_data = qphy;
+	cfg.of_node = dev->of_node;
+
+	qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg);
+	if (IS_ERR(qphy->dpdm_rdev))
+		return PTR_ERR(qphy->dpdm_rdev);
+
+	return 0;
+}
+
+static int qusb_phy_probe(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0;
+
+	qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+	if (!qphy)
+		return -ENOMEM;
+
+	qphy->phy.dev = dev;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"qusb_phy_base");
+	qphy->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(qphy->base))
+		return PTR_ERR(qphy->base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"emu_phy_base");
+	if (res) {
+		qphy->emu_phy_base = devm_ioremap_resource(dev, res);
+		if (IS_ERR(qphy->emu_phy_base)) {
+			dev_dbg(dev, "couldn't ioremap emu_phy_base\n");
+			qphy->emu_phy_base = NULL;
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"tune2_efuse_addr");
+	if (res) {
+		qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
+							resource_size(res));
+		if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) {
+			ret = of_property_read_u32(dev->of_node,
+					"qcom,tune2-efuse-bit-pos",
+					&qphy->tune2_efuse_bit_pos);
+			if (!ret) {
+				ret = of_property_read_u32(dev->of_node,
+						"qcom,tune2-efuse-num-bits",
+						&qphy->tune2_efuse_num_of_bits);
+			}
+
+			if (ret) {
+				dev_err(dev,
+				"DT Value for tune2 efuse is invalid.\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(qphy->ref_clk_src))
+		dev_dbg(dev, "clk get failed for ref_clk_src\n");
+
+	qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+	if (IS_ERR(qphy->ref_clk))
+		dev_dbg(dev, "clk get failed for ref_clk\n");
+	else
+		clk_set_rate(qphy->ref_clk, 19200000);
+
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "cfg_ahb_clk") >= 0) {
+		qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+		if (IS_ERR(qphy->cfg_ahb_clk)) {
+			ret = PTR_ERR(qphy->cfg_ahb_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev,
+				"clk get failed for cfg_ahb_clk ret %d\n", ret);
+			return ret;
+		}
+	}
+
+	qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+	if (IS_ERR(qphy->phy_reset))
+		return PTR_ERR(qphy->phy_reset);
+
+	qphy->emulation = of_property_read_bool(dev->of_node,
+					"qcom,emulation");
+
+	of_get_property(dev->of_node, "qcom,emu-init-seq", &size);
+	if (size) {
+		qphy->emu_init_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->emu_init_seq) {
+			qphy->emu_init_seq_len =
+				(size / sizeof(*qphy->emu_init_seq));
+			if (qphy->emu_init_seq_len % 2) {
+				dev_err(dev, "invalid emu_init_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,emu-init-seq",
+				qphy->emu_init_seq,
+				qphy->emu_init_seq_len);
+		} else {
+			dev_dbg(dev,
+			"error allocating memory for emu_init_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size);
+	if (size) {
+		qphy->phy_pll_reset_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->phy_pll_reset_seq) {
+			qphy->phy_pll_reset_seq_len =
+				(size / sizeof(*qphy->phy_pll_reset_seq));
+			if (qphy->phy_pll_reset_seq_len % 2) {
+				dev_err(dev, "invalid phy_pll_reset_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,phy-pll-reset-seq",
+				qphy->phy_pll_reset_seq,
+				qphy->phy_pll_reset_seq_len);
+		} else {
+			dev_dbg(dev,
+			"error allocating memory for phy_pll_reset_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size);
+	if (size) {
+		qphy->emu_dcm_reset_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->emu_dcm_reset_seq) {
+			qphy->emu_dcm_reset_seq_len =
+				(size / sizeof(*qphy->emu_dcm_reset_seq));
+			if (qphy->emu_dcm_reset_seq_len % 2) {
+				dev_err(dev, "invalid emu_dcm_reset_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,emu-dcm-reset-seq",
+				qphy->emu_dcm_reset_seq,
+				qphy->emu_dcm_reset_seq_len);
+		} else {
+			dev_dbg(dev,
+			"error allocating memory for emu_dcm_reset_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
+	if (size) {
+		qphy->qusb_phy_init_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->qusb_phy_init_seq) {
+			qphy->init_seq_len =
+				(size / sizeof(*qphy->qusb_phy_init_seq));
+			if (qphy->init_seq_len % 2) {
+				dev_err(dev, "invalid init_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,qusb-phy-init-seq",
+				qphy->qusb_phy_init_seq,
+				qphy->init_seq_len);
+		} else {
+			dev_err(dev,
+			"error allocating memory for phy_init_seq\n");
+		}
+	}
+
+	qphy->host_init_seq_len = of_property_count_elems_of_size(dev->of_node,
+				"qcom,qusb-phy-host-init-seq",
+				sizeof(*qphy->qusb_phy_host_init_seq));
+	if (qphy->host_init_seq_len > 0) {
+		qphy->qusb_phy_host_init_seq = devm_kcalloc(dev,
+					qphy->host_init_seq_len,
+					sizeof(*qphy->qusb_phy_host_init_seq),
+					GFP_KERNEL);
+		if (qphy->qusb_phy_host_init_seq)
+			of_property_read_u32_array(dev->of_node,
+				"qcom,qusb-phy-host-init-seq",
+				qphy->qusb_phy_host_init_seq,
+				qphy->host_init_seq_len);
+		else
+			return -ENOMEM;
+	}
+
+	ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+					 (u32 *) qphy->vdd_levels,
+					 ARRAY_SIZE(qphy->vdd_levels));
+	if (ret) {
+		dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+		return ret;
+	}
+
+	qphy->vdd = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(qphy->vdd)) {
+		dev_err(dev, "unable to get vdd supply\n");
+		return PTR_ERR(qphy->vdd);
+	}
+
+	qphy->vdda33 = devm_regulator_get(dev, "vdda33");
+	if (IS_ERR(qphy->vdda33)) {
+		dev_err(dev, "unable to get vdda33 supply\n");
+		return PTR_ERR(qphy->vdda33);
+	}
+
+	qphy->vdda18 = devm_regulator_get(dev, "vdda18");
+	if (IS_ERR(qphy->vdda18)) {
+		dev_err(dev, "unable to get vdda18 supply\n");
+		return PTR_ERR(qphy->vdda18);
+	}
+
+	platform_set_drvdata(pdev, qphy);
+
+	qphy->phy.label			= "msm-qusb-phy-v2";
+	qphy->phy.init			= qusb_phy_init;
+	qphy->phy.set_suspend           = qusb_phy_set_suspend;
+	qphy->phy.shutdown		= qusb_phy_shutdown;
+	qphy->phy.type			= USB_PHY_TYPE_USB2;
+	qphy->phy.notify_connect        = qusb_phy_notify_connect;
+	qphy->phy.notify_disconnect     = qusb_phy_notify_disconnect;
+
+	ret = usb_add_phy_dev(&qphy->phy);
+	if (ret)
+		return ret;
+
+	ret = qusb_phy_regulator_init(qphy);
+	if (ret)
+		usb_remove_phy(&qphy->phy);
+
+	return ret;
+}
+
+static int qusb_phy_remove(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy = platform_get_drvdata(pdev);
+
+	usb_remove_phy(&qphy->phy);
+
+	if (qphy->clocks_enabled) {
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		clk_disable_unprepare(qphy->ref_clk);
+		clk_disable_unprepare(qphy->ref_clk_src);
+		qphy->clocks_enabled = false;
+	}
+
+	qusb_phy_enable_power(qphy, false, true);
+
+	return 0;
+}
+
+static const struct of_device_id qusb_phy_id_table[] = {
+	{ .compatible = "qcom,qusb2phy-v2", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, qusb_phy_id_table);
+
+static struct platform_driver qusb_phy_driver = {
+	.probe		= qusb_phy_probe,
+	.remove		= qusb_phy_remove,
+	.driver = {
+		.name	= "msm-qusb-phy-v2",
+		.of_match_table = of_match_ptr(qusb_phy_id_table),
+	},
+};
+
+module_platform_driver(qusb_phy_driver);
+
+MODULE_DESCRIPTION("MSM QUSB2 PHY v2 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
new file mode 100644
index 0000000..76b034e
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -0,0 +1,1051 @@
+/*
+ * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/usb/phy.h>
+#include <linux/reset.h>
+
+#define QUSB2PHY_PLL_STATUS	0x38
+#define QUSB2PHY_PLL_LOCK	BIT(5)
+
+#define QUSB2PHY_PORT_QC1	0x70
+#define VDM_SRC_EN		BIT(4)
+#define VDP_SRC_EN		BIT(2)
+
+#define QUSB2PHY_PORT_QC2	0x74
+#define RDM_UP_EN		BIT(1)
+#define RDP_UP_EN		BIT(3)
+#define RPUM_LOW_EN		BIT(4)
+#define RPUP_LOW_EN		BIT(5)
+
+#define QUSB2PHY_PORT_POWERDOWN		0xB4
+#define CLAMP_N_EN			BIT(5)
+#define FREEZIO_N			BIT(1)
+#define POWER_DOWN			BIT(0)
+
+#define QUSB2PHY_PWR_CTRL1		0x210
+#define PWR_CTRL1_CLAMP_N_EN		BIT(1)
+#define PWR_CTRL1_POWR_DOWN		BIT(0)
+
+#define QUSB2PHY_PLL_COMMON_STATUS_ONE	0x1A0
+#define CORE_READY_STATUS		BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL1	0xC0
+#define TERM_SELECT			BIT(4)
+#define XCVR_SELECT_FS			BIT(2)
+#define OP_MODE_NON_DRIVE		BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_CTRL2	0xC4
+#define UTMI_ULPI_SEL			BIT(7)
+#define UTMI_TEST_MUX_SEL		BIT(6)
+
+#define QUSB2PHY_PLL_TEST		0x04
+#define CLK_REF_SEL			BIT(7)
+
+#define QUSB2PHY_PORT_TUNE1             0x80
+#define QUSB2PHY_PORT_TUNE2             0x84
+#define QUSB2PHY_PORT_TUNE3             0x88
+#define QUSB2PHY_PORT_TUNE4             0x8C
+
+/* In case Efuse register shows zero, use this value */
+#define TUNE2_DEFAULT_HIGH_NIBBLE	0xB
+#define TUNE2_DEFAULT_LOW_NIBBLE	0x3
+
+/* Get TUNE2's high nibble value read from efuse */
+#define TUNE2_HIGH_NIBBLE_VAL(val, pos, mask)	((val >> pos) & mask)
+
+#define QUSB2PHY_PORT_INTR_CTRL         0xBC
+#define CHG_DET_INTR_EN                 BIT(4)
+#define DMSE_INTR_HIGH_SEL              BIT(3)
+#define DMSE_INTR_EN                    BIT(2)
+#define DPSE_INTR_HIGH_SEL              BIT(1)
+#define DPSE_INTR_EN                    BIT(0)
+
+#define QUSB2PHY_PORT_UTMI_STATUS	0xF4
+#define LINESTATE_DP			BIT(0)
+#define LINESTATE_DM			BIT(1)
+
+
+#define QUSB2PHY_1P8_VOL_MIN           1800000 /* uV */
+#define QUSB2PHY_1P8_VOL_MAX           1800000 /* uV */
+#define QUSB2PHY_1P8_HPM_LOAD          30000   /* uA */
+
+#define QUSB2PHY_3P3_VOL_MIN		3075000 /* uV */
+#define QUSB2PHY_3P3_VOL_MAX		3200000 /* uV */
+#define QUSB2PHY_3P3_HPM_LOAD		30000	/* uA */
+
+#define QUSB2PHY_REFCLK_ENABLE		BIT(0)
+
+unsigned int tune2;
+module_param(tune2, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
+
+struct qusb_phy {
+	struct usb_phy		phy;
+	void __iomem		*base;
+	void __iomem		*tune2_efuse_reg;
+	void __iomem		*ref_clk_base;
+
+	struct clk		*ref_clk_src;
+	struct clk		*ref_clk;
+	struct clk		*cfg_ahb_clk;
+	struct reset_control	*phy_reset;
+
+	struct regulator	*vdd;
+	struct regulator	*vdda33;
+	struct regulator	*vdda18;
+	int			vdd_levels[3]; /* none, low, high */
+	int			init_seq_len;
+	int			*qusb_phy_init_seq;
+	u32			major_rev;
+
+	u32			tune2_val;
+	int			tune2_efuse_bit_pos;
+	int			tune2_efuse_num_of_bits;
+
+	bool			power_enabled;
+	bool			clocks_enabled;
+	bool			cable_connected;
+	bool			suspended;
+	bool			ulpi_mode;
+	bool			rm_pulldown;
+	bool			is_se_clk;
+
+	struct regulator_desc	dpdm_rdesc;
+	struct regulator_dev	*dpdm_rdev;
+
+	/* emulation targets specific */
+	void __iomem		*emu_phy_base;
+	bool			emulation;
+	int			*emu_init_seq;
+	int			emu_init_seq_len;
+	int			*phy_pll_reset_seq;
+	int			phy_pll_reset_seq_len;
+	int			*emu_dcm_reset_seq;
+	int			emu_dcm_reset_seq_len;
+};
+
+static void qusb_phy_enable_clocks(struct qusb_phy *qphy, bool on)
+{
+	dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d on:%d\n",
+			__func__, qphy->clocks_enabled, on);
+
+	if (!qphy->clocks_enabled && on) {
+		clk_prepare_enable(qphy->ref_clk_src);
+		clk_prepare_enable(qphy->ref_clk);
+		clk_prepare_enable(qphy->cfg_ahb_clk);
+		qphy->clocks_enabled = true;
+	}
+
+	if (qphy->clocks_enabled && !on) {
+		clk_disable_unprepare(qphy->ref_clk);
+		clk_disable_unprepare(qphy->ref_clk_src);
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		qphy->clocks_enabled = false;
+	}
+
+	dev_dbg(qphy->phy.dev, "%s(): clocks_enabled:%d\n", __func__,
+						qphy->clocks_enabled);
+}
+
+static int qusb_phy_config_vdd(struct qusb_phy *qphy, int high)
+{
+	int min, ret;
+
+	min = high ? 1 : 0; /* low or none? */
+	ret = regulator_set_voltage(qphy->vdd, qphy->vdd_levels[min],
+						qphy->vdd_levels[2]);
+	if (ret) {
+		dev_err(qphy->phy.dev, "unable to set voltage for qusb vdd\n");
+		return ret;
+	}
+
+	dev_dbg(qphy->phy.dev, "min_vol:%d max_vol:%d\n",
+			qphy->vdd_levels[min], qphy->vdd_levels[2]);
+	return ret;
+}
+
+static int qusb_phy_enable_power(struct qusb_phy *qphy, bool on,
+						bool toggle_vdd)
+{
+	int ret = 0;
+
+	dev_dbg(qphy->phy.dev, "%s turn %s regulators. power_enabled:%d\n",
+			__func__, on ? "on" : "off", qphy->power_enabled);
+
+	if (toggle_vdd && qphy->power_enabled == on) {
+		dev_dbg(qphy->phy.dev, "PHYs' regulators are already ON.\n");
+		return 0;
+	}
+
+	if (!on)
+		goto disable_vdda33;
+
+	if (toggle_vdd) {
+		ret = qusb_phy_config_vdd(qphy, true);
+		if (ret) {
+			dev_err(qphy->phy.dev, "Unable to config VDD:%d\n",
+								ret);
+			goto err_vdd;
+		}
+
+		ret = regulator_enable(qphy->vdd);
+		if (ret) {
+			dev_err(qphy->phy.dev, "Unable to enable VDD\n");
+			goto unconfig_vdd;
+		}
+	}
+
+	ret = regulator_set_load(qphy->vdda18, QUSB2PHY_1P8_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda18:%d\n", ret);
+		goto disable_vdd;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda18, QUSB2PHY_1P8_VOL_MIN,
+						QUSB2PHY_1P8_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda18:%d\n", ret);
+		goto put_vdda18_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda18);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda18:%d\n", ret);
+		goto unset_vdda18;
+	}
+
+	ret = regulator_set_load(qphy->vdda33, QUSB2PHY_3P3_HPM_LOAD);
+	if (ret < 0) {
+		dev_err(qphy->phy.dev, "Unable to set HPM of vdda33:%d\n", ret);
+		goto disable_vdda18;
+	}
+
+	ret = regulator_set_voltage(qphy->vdda33, QUSB2PHY_3P3_VOL_MIN,
+						QUSB2PHY_3P3_VOL_MAX);
+	if (ret) {
+		dev_err(qphy->phy.dev,
+				"Unable to set voltage for vdda33:%d\n", ret);
+		goto put_vdda33_lpm;
+	}
+
+	ret = regulator_enable(qphy->vdda33);
+	if (ret) {
+		dev_err(qphy->phy.dev, "Unable to enable vdda33:%d\n", ret);
+		goto unset_vdd33;
+	}
+
+	if (toggle_vdd)
+		qphy->power_enabled = true;
+
+	pr_debug("%s(): QUSB PHY's regulators are turned ON.\n", __func__);
+	return ret;
+
+disable_vdda33:
+	ret = regulator_disable(qphy->vdda33);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda33:%d\n", ret);
+
+unset_vdd33:
+	ret = regulator_set_voltage(qphy->vdda33, 0, QUSB2PHY_3P3_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda33:%d\n", ret);
+
+put_vdda33_lpm:
+	ret = regulator_set_load(qphy->vdda33, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set (0) HPM of vdda33\n");
+
+disable_vdda18:
+	ret = regulator_disable(qphy->vdda18);
+	if (ret)
+		dev_err(qphy->phy.dev, "Unable to disable vdda18:%d\n", ret);
+
+unset_vdda18:
+	ret = regulator_set_voltage(qphy->vdda18, 0, QUSB2PHY_1P8_VOL_MAX);
+	if (ret)
+		dev_err(qphy->phy.dev,
+			"Unable to set (0) voltage for vdda18:%d\n", ret);
+
+put_vdda18_lpm:
+	ret = regulator_set_load(qphy->vdda18, 0);
+	if (ret < 0)
+		dev_err(qphy->phy.dev, "Unable to set LPM of vdda18\n");
+
+disable_vdd:
+	if (toggle_vdd) {
+		ret = regulator_disable(qphy->vdd);
+		if (ret)
+			dev_err(qphy->phy.dev, "Unable to disable vdd:%d\n",
+								ret);
+
+unconfig_vdd:
+		ret = qusb_phy_config_vdd(qphy, false);
+		if (ret)
+			dev_err(qphy->phy.dev, "Unable unconfig VDD:%d\n",
+								ret);
+	}
+err_vdd:
+	if (toggle_vdd)
+		qphy->power_enabled = false;
+	dev_dbg(qphy->phy.dev, "QUSB PHY's regulators are turned OFF.\n");
+	return ret;
+}
+
+static void qusb_phy_get_tune2_param(struct qusb_phy *qphy)
+{
+	u8 num_of_bits;
+	u32 bit_mask = 1;
+
+	pr_debug("%s(): num_of_bits:%d bit_pos:%d\n", __func__,
+				qphy->tune2_efuse_num_of_bits,
+				qphy->tune2_efuse_bit_pos);
+
+	/* get bit mask based on number of bits to use with efuse reg */
+	if (qphy->tune2_efuse_num_of_bits) {
+		num_of_bits = qphy->tune2_efuse_num_of_bits;
+		bit_mask = (bit_mask << num_of_bits) - 1;
+	}
+
+	/*
+	 * Read EFUSE register having TUNE2 parameter's high nibble.
+	 * If efuse register shows value as 0x0, then use default value
+	 * as 0xB as high nibble. Otherwise use efuse register based
+	 * value for this purpose.
+	 */
+	qphy->tune2_val = readl_relaxed(qphy->tune2_efuse_reg);
+	pr_debug("%s(): bit_mask:%d efuse based tune2 value:%d\n",
+				__func__, bit_mask, qphy->tune2_val);
+
+	qphy->tune2_val = TUNE2_HIGH_NIBBLE_VAL(qphy->tune2_val,
+				qphy->tune2_efuse_bit_pos, bit_mask);
+
+	if (!qphy->tune2_val)
+		qphy->tune2_val = TUNE2_DEFAULT_HIGH_NIBBLE;
+
+	/* Get TUNE2 byte value using high and low nibble value */
+	qphy->tune2_val = ((qphy->tune2_val << 0x4) |
+					TUNE2_DEFAULT_LOW_NIBBLE);
+}
+
+static void qusb_phy_write_seq(void __iomem *base, u32 *seq, int cnt,
+		unsigned long delay)
+{
+	int i;
+
+	pr_debug("Seq count:%d\n", cnt);
+	for (i = 0; i < cnt; i = i+2) {
+		pr_debug("write 0x%02x to 0x%02x\n", seq[i], seq[i+1]);
+		writel_relaxed(seq[i], base + seq[i+1]);
+		if (delay)
+			usleep_range(delay, (delay + 2000));
+	}
+}
+
+static int qusb_phy_init(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	int ret, reset_val = 0;
+	u8 reg;
+	bool pll_lock_fail = false;
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	ret = qusb_phy_enable_power(qphy, true, true);
+	if (ret)
+		return ret;
+
+	qusb_phy_enable_clocks(qphy, true);
+
+	/*
+	 * ref clock is enabled by default after power on reset. Linux clock
+	 * driver will disable this clock as part of late init if peripheral
+	 * driver(s) does not explicitly votes for it. Linux clock driver also
+	 * does not disable the clock until late init even if peripheral
+	 * driver explicitly requests it and cannot defer the probe until late
+	 * init. Hence, Explicitly disable the clock using register write to
+	 * allow QUSB PHY PLL to lock properly.
+	 */
+	if (qphy->ref_clk_base) {
+		writel_relaxed((readl_relaxed(qphy->ref_clk_base) &
+					~QUSB2PHY_REFCLK_ENABLE),
+					qphy->ref_clk_base);
+		/* Make sure that above write complete to get ref clk OFF */
+		wmb();
+	}
+
+	/* Perform phy reset */
+	ret = reset_control_assert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
+	usleep_range(100, 150);
+	ret = reset_control_deassert(qphy->phy_reset);
+	if (ret)
+		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
+
+	if (qphy->emulation) {
+		if (qphy->emu_init_seq)
+			qusb_phy_write_seq(qphy->emu_phy_base,
+				qphy->emu_init_seq, qphy->emu_init_seq_len, 0);
+
+		if (qphy->qusb_phy_init_seq)
+			qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+					qphy->init_seq_len, 0);
+
+		/* Wait for 5ms as per QUSB2 RUMI sequence */
+		usleep_range(5000, 7000);
+
+		if (qphy->phy_pll_reset_seq)
+			qusb_phy_write_seq(qphy->base, qphy->phy_pll_reset_seq,
+					qphy->phy_pll_reset_seq_len, 10000);
+
+		if (qphy->emu_dcm_reset_seq)
+			qusb_phy_write_seq(qphy->emu_phy_base,
+					qphy->emu_dcm_reset_seq,
+					qphy->emu_dcm_reset_seq_len, 10000);
+
+		return 0;
+	}
+
+	/* Disable the PHY */
+	if (qphy->major_rev < 2)
+		writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+	else
+		writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+				PWR_CTRL1_POWR_DOWN,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* configure for ULPI mode if requested */
+	if (qphy->ulpi_mode)
+		writel_relaxed(0x0, qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+	/* save reset value to override based on clk scheme */
+	if (qphy->ref_clk_base)
+		reset_val = readl_relaxed(qphy->base + QUSB2PHY_PLL_TEST);
+
+	if (qphy->qusb_phy_init_seq)
+		qusb_phy_write_seq(qphy->base, qphy->qusb_phy_init_seq,
+				qphy->init_seq_len, 0);
+
+	/*
+	 * Check for EFUSE value only if tune2_efuse_reg is available
+	 * and try to read EFUSE value only once i.e. not every USB
+	 * cable connect case.
+	 */
+	if (qphy->tune2_efuse_reg) {
+		if (!qphy->tune2_val)
+			qusb_phy_get_tune2_param(qphy);
+
+		pr_debug("%s(): Programming TUNE2 parameter as:%x\n", __func__,
+				qphy->tune2_val);
+		writel_relaxed(qphy->tune2_val,
+				qphy->base + QUSB2PHY_PORT_TUNE2);
+	}
+
+	/* If tune2 modparam set, override tune2 value */
+	if (tune2) {
+		pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
+						__func__, tune2);
+		writel_relaxed(tune2,
+				qphy->base + QUSB2PHY_PORT_TUNE2);
+	}
+
+	/* ensure above writes are completed before re-enabling PHY */
+	wmb();
+
+	/* Enable the PHY */
+	if (qphy->major_rev < 2)
+		writel_relaxed(CLAMP_N_EN | FREEZIO_N,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+	else
+		writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) &
+				~PWR_CTRL1_POWR_DOWN,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* Ensure above write is completed before turning ON ref clk */
+	wmb();
+
+	/* Require to get phy pll lock successfully */
+	usleep_range(150, 160);
+
+	/* Turn on phy ref_clk if DIFF_CLK else select SE_CLK */
+	if (qphy->ref_clk_base) {
+		if (!qphy->is_se_clk) {
+			reset_val &= ~CLK_REF_SEL;
+			writel_relaxed((readl_relaxed(qphy->ref_clk_base) |
+					QUSB2PHY_REFCLK_ENABLE),
+					qphy->ref_clk_base);
+		} else {
+			reset_val |= CLK_REF_SEL;
+			writel_relaxed(reset_val,
+					qphy->base + QUSB2PHY_PLL_TEST);
+		}
+
+		/* Make sure above write is completed to get PLL source clock */
+		wmb();
+
+		/* Required to get PHY PLL lock successfully */
+		usleep_range(100, 110);
+	}
+
+	if (qphy->major_rev < 2) {
+		reg = readb_relaxed(qphy->base + QUSB2PHY_PLL_STATUS);
+		dev_dbg(phy->dev, "QUSB2PHY_PLL_STATUS:%x\n", reg);
+		if (!(reg & QUSB2PHY_PLL_LOCK))
+			pll_lock_fail = true;
+	} else {
+		reg = readb_relaxed(qphy->base +
+				QUSB2PHY_PLL_COMMON_STATUS_ONE);
+		dev_dbg(phy->dev, "QUSB2PHY_PLL_COMMON_STATUS_ONE:%x\n", reg);
+		if (!(reg & CORE_READY_STATUS))
+			pll_lock_fail = true;
+	}
+
+	if (pll_lock_fail) {
+		dev_err(phy->dev, "QUSB PHY PLL LOCK fails:%x\n", reg);
+		WARN_ON(1);
+	}
+
+	return 0;
+}
+
+static void qusb_phy_shutdown(struct usb_phy *phy)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	dev_dbg(phy->dev, "%s\n", __func__);
+
+	qusb_phy_enable_clocks(qphy, true);
+
+	/* Disable the PHY */
+	if (qphy->major_rev < 2)
+		writel_relaxed(CLAMP_N_EN | FREEZIO_N | POWER_DOWN,
+				qphy->base + QUSB2PHY_PORT_POWERDOWN);
+	else
+		writel_relaxed(readl_relaxed(qphy->base + QUSB2PHY_PWR_CTRL1) |
+				PWR_CTRL1_POWR_DOWN,
+				qphy->base + QUSB2PHY_PWR_CTRL1);
+
+	/* Make sure above write complete before turning off clocks */
+	wmb();
+
+	qusb_phy_enable_clocks(qphy, false);
+}
+/**
+ * Performs QUSB2 PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int qusb_phy_set_suspend(struct usb_phy *phy, int suspend)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+	u32 linestate = 0, intr_mask = 0;
+
+	if (qphy->suspended && suspend) {
+		dev_dbg(phy->dev, "%s: USB PHY is already suspended\n",
+			__func__);
+		return 0;
+	}
+
+	if (suspend) {
+		/* Bus suspend case */
+		if (qphy->cable_connected ||
+			(qphy->phy.flags & PHY_HOST_MODE)) {
+			/* Clear all interrupts */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+			linestate = readl_relaxed(qphy->base +
+					QUSB2PHY_PORT_UTMI_STATUS);
+
+			/*
+			 * D+/D- interrupts are level-triggered, but we are
+			 * only interested if the line state changes, so enable
+			 * the high/low trigger based on current state. In
+			 * other words, enable the triggers _opposite_ of what
+			 * the current D+/D- levels are.
+			 * e.g. if currently D+ high, D- low (HS 'J'/Suspend),
+			 * configure the mask to trigger on D+ low OR D- high
+			 */
+			intr_mask = DPSE_INTR_EN | DMSE_INTR_EN;
+			if (!(linestate & LINESTATE_DP)) /* D+ low */
+				intr_mask |= DPSE_INTR_HIGH_SEL;
+			if (!(linestate & LINESTATE_DM)) /* D- low */
+				intr_mask |= DMSE_INTR_HIGH_SEL;
+
+			writel_relaxed(intr_mask,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+
+			qusb_phy_enable_clocks(qphy, false);
+		} else { /* Disconnect case */
+			/* Disable all interrupts */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+			/*
+			 * Phy in non-driving mode leaves Dp and Dm lines in
+			 * high-Z state. Controller power collapse is not
+			 * switching phy to non-driving mode causing charger
+			 * detection failure. Bring phy to non-driving mode by
+			 * overriding controller output via UTMI interface.
+			 */
+			writel_relaxed(TERM_SELECT | XCVR_SELECT_FS |
+				OP_MODE_NON_DRIVE,
+				qphy->base + QUSB2PHY_PORT_UTMI_CTRL1);
+			writel_relaxed(UTMI_ULPI_SEL | UTMI_TEST_MUX_SEL,
+				qphy->base + QUSB2PHY_PORT_UTMI_CTRL2);
+
+
+			qusb_phy_enable_clocks(qphy, false);
+			qusb_phy_enable_power(qphy, false, true);
+		}
+		qphy->suspended = true;
+	} else {
+		/* Bus suspend case */
+		if (qphy->cable_connected ||
+			(qphy->phy.flags & PHY_HOST_MODE)) {
+			qusb_phy_enable_clocks(qphy, true);
+			/* Clear all interrupts on resume */
+			writel_relaxed(0x00,
+				qphy->base + QUSB2PHY_PORT_INTR_CTRL);
+		} else {
+			qusb_phy_enable_power(qphy, true, true);
+			qusb_phy_enable_clocks(qphy, true);
+		}
+		qphy->suspended = false;
+	}
+
+	return 0;
+}
+
+static int qusb_phy_notify_connect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = true;
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_notify_disconnect(struct usb_phy *phy,
+					enum usb_device_speed speed)
+{
+	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
+
+	qphy->cable_connected = false;
+
+	dev_dbg(phy->dev, "QUSB PHY: connect notification cable_connected=%d\n",
+							qphy->cable_connected);
+	return 0;
+}
+
+static int qusb_phy_dpdm_regulator_enable(struct regulator_dev *rdev)
+{
+	int ret = 0;
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+
+	if (qphy->rm_pulldown) {
+		ret = qusb_phy_enable_power(qphy, true, false);
+		if (ret >= 0) {
+			qphy->rm_pulldown = true;
+			dev_dbg(qphy->phy.dev, "dpdm_enable:rm_pulldown:%d\n",
+							qphy->rm_pulldown);
+		}
+	}
+
+	return ret;
+}
+
+static int qusb_phy_dpdm_regulator_disable(struct regulator_dev *rdev)
+{
+	int ret = 0;
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s\n", __func__);
+
+	if (!qphy->rm_pulldown) {
+		ret = qusb_phy_enable_power(qphy, false, false);
+		if (ret >= 0) {
+			qphy->rm_pulldown = false;
+			dev_dbg(qphy->phy.dev, "dpdm_disable:rm_pulldown:%d\n",
+							qphy->rm_pulldown);
+		}
+	}
+
+	return ret;
+}
+
+static int qusb_phy_dpdm_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct qusb_phy *qphy = rdev_get_drvdata(rdev);
+
+	dev_dbg(qphy->phy.dev, "%s qphy->rm_pulldown = %d\n", __func__,
+					qphy->rm_pulldown);
+	return qphy->rm_pulldown;
+}
+
+static struct regulator_ops qusb_phy_dpdm_regulator_ops = {
+	.enable		= qusb_phy_dpdm_regulator_enable,
+	.disable	= qusb_phy_dpdm_regulator_disable,
+	.is_enabled	= qusb_phy_dpdm_regulator_is_enabled,
+};
+
+static int qusb_phy_regulator_init(struct qusb_phy *qphy)
+{
+	struct device *dev = qphy->phy.dev;
+	struct regulator_config cfg = {};
+	struct regulator_init_data *init_data;
+
+	init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+	if (!init_data)
+		return -ENOMEM;
+
+	init_data->constraints.valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+	qphy->dpdm_rdesc.owner = THIS_MODULE;
+	qphy->dpdm_rdesc.type = REGULATOR_VOLTAGE;
+	qphy->dpdm_rdesc.ops = &qusb_phy_dpdm_regulator_ops;
+	qphy->dpdm_rdesc.name = kbasename(dev->of_node->full_name);
+
+	cfg.dev = dev;
+	cfg.init_data = init_data;
+	cfg.driver_data = qphy;
+	cfg.of_node = dev->of_node;
+
+	qphy->dpdm_rdev = devm_regulator_register(dev, &qphy->dpdm_rdesc, &cfg);
+	if (IS_ERR(qphy->dpdm_rdev))
+		return PTR_ERR(qphy->dpdm_rdev);
+
+	return 0;
+}
+
+static int qusb_phy_probe(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0;
+	const char *phy_type;
+	bool hold_phy_reset;
+
+	qphy = devm_kzalloc(dev, sizeof(*qphy), GFP_KERNEL);
+	if (!qphy)
+		return -ENOMEM;
+
+	qphy->phy.dev = dev;
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"qusb_phy_base");
+	qphy->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(qphy->base))
+		return PTR_ERR(qphy->base);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"emu_phy_base");
+	if (res) {
+		qphy->emu_phy_base = devm_ioremap_resource(dev, res);
+		if (IS_ERR(qphy->emu_phy_base)) {
+			dev_dbg(dev, "couldn't ioremap emu_phy_base\n");
+			qphy->emu_phy_base = NULL;
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"tune2_efuse_addr");
+	if (res) {
+		qphy->tune2_efuse_reg = devm_ioremap_nocache(dev, res->start,
+							resource_size(res));
+		if (!IS_ERR_OR_NULL(qphy->tune2_efuse_reg)) {
+			ret = of_property_read_u32(dev->of_node,
+					"qcom,tune2-efuse-bit-pos",
+					&qphy->tune2_efuse_bit_pos);
+			if (!ret) {
+				ret = of_property_read_u32(dev->of_node,
+						"qcom,tune2-efuse-num-bits",
+						&qphy->tune2_efuse_num_of_bits);
+			}
+
+			if (ret) {
+				dev_err(dev, "DT Value for tune2 efuse is invalid.\n");
+				return -EINVAL;
+			}
+		}
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"ref_clk_addr");
+	if (res) {
+		qphy->ref_clk_base = devm_ioremap_nocache(dev,
+				res->start, resource_size(res));
+		if (IS_ERR(qphy->ref_clk_base)) {
+			dev_dbg(dev, "ref_clk_address is not available.\n");
+			return PTR_ERR(qphy->ref_clk_base);
+		}
+
+		ret = of_property_read_string(dev->of_node,
+				"qcom,phy-clk-scheme", &phy_type);
+		if (ret) {
+			dev_err(dev, "error need qsub_phy_clk_scheme.\n");
+			return ret;
+		}
+
+		if (!strcasecmp(phy_type, "cml")) {
+			qphy->is_se_clk = false;
+		} else if (!strcasecmp(phy_type, "cmos")) {
+			qphy->is_se_clk = true;
+		} else {
+			dev_err(dev, "erro invalid qusb_phy_clk_scheme\n");
+			return -EINVAL;
+		}
+	}
+
+	qphy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(qphy->ref_clk_src))
+		dev_dbg(dev, "clk get failed for ref_clk_src\n");
+
+	qphy->ref_clk = devm_clk_get(dev, "ref_clk");
+	if (IS_ERR(qphy->ref_clk))
+		dev_dbg(dev, "clk get failed for ref_clk\n");
+	else
+		clk_set_rate(qphy->ref_clk, 19200000);
+
+	qphy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+	if (IS_ERR(qphy->cfg_ahb_clk))
+		return PTR_ERR(qphy->cfg_ahb_clk);
+
+	qphy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+	if (IS_ERR(qphy->phy_reset))
+		return PTR_ERR(qphy->phy_reset);
+
+	qphy->emulation = of_property_read_bool(dev->of_node,
+					"qcom,emulation");
+
+	of_get_property(dev->of_node, "qcom,emu-init-seq", &size);
+	if (size) {
+		qphy->emu_init_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->emu_init_seq) {
+			qphy->emu_init_seq_len =
+				(size / sizeof(*qphy->emu_init_seq));
+			if (qphy->emu_init_seq_len % 2) {
+				dev_err(dev, "invalid emu_init_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,emu-init-seq",
+				qphy->emu_init_seq,
+				qphy->emu_init_seq_len);
+		} else {
+			dev_dbg(dev, "error allocating memory for emu_init_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,phy-pll-reset-seq", &size);
+	if (size) {
+		qphy->phy_pll_reset_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->phy_pll_reset_seq) {
+			qphy->phy_pll_reset_seq_len =
+				(size / sizeof(*qphy->phy_pll_reset_seq));
+			if (qphy->phy_pll_reset_seq_len % 2) {
+				dev_err(dev, "invalid phy_pll_reset_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,phy-pll-reset-seq",
+				qphy->phy_pll_reset_seq,
+				qphy->phy_pll_reset_seq_len);
+		} else {
+			dev_dbg(dev, "error allocating memory for phy_pll_reset_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,emu-dcm-reset-seq", &size);
+	if (size) {
+		qphy->emu_dcm_reset_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->emu_dcm_reset_seq) {
+			qphy->emu_dcm_reset_seq_len =
+				(size / sizeof(*qphy->emu_dcm_reset_seq));
+			if (qphy->emu_dcm_reset_seq_len % 2) {
+				dev_err(dev, "invalid emu_dcm_reset_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,emu-dcm-reset-seq",
+				qphy->emu_dcm_reset_seq,
+				qphy->emu_dcm_reset_seq_len);
+		} else {
+			dev_dbg(dev, "error allocating memory for emu_dcm_reset_seq\n");
+		}
+	}
+
+	size = 0;
+	of_get_property(dev->of_node, "qcom,qusb-phy-init-seq", &size);
+	if (size) {
+		qphy->qusb_phy_init_seq = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (qphy->qusb_phy_init_seq) {
+			qphy->init_seq_len =
+				(size / sizeof(*qphy->qusb_phy_init_seq));
+			if (qphy->init_seq_len % 2) {
+				dev_err(dev, "invalid init_seq_len\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,qusb-phy-init-seq",
+				qphy->qusb_phy_init_seq,
+				qphy->init_seq_len);
+		} else {
+			dev_err(dev, "error allocating memory for phy_init_seq\n");
+		}
+	}
+
+	qphy->ulpi_mode = false;
+	ret = of_property_read_string(dev->of_node, "phy_type", &phy_type);
+
+	if (!ret) {
+		if (!strcasecmp(phy_type, "ulpi"))
+			qphy->ulpi_mode = true;
+	} else {
+		dev_err(dev, "error reading phy_type property\n");
+		return ret;
+	}
+
+	hold_phy_reset = of_property_read_bool(dev->of_node, "qcom,hold-reset");
+
+	/* use default major revision as 2 */
+	qphy->major_rev = 2;
+	ret = of_property_read_u32(dev->of_node, "qcom,major-rev",
+						&qphy->major_rev);
+
+	ret = of_property_read_u32_array(dev->of_node, "qcom,vdd-voltage-level",
+					 (u32 *) qphy->vdd_levels,
+					 ARRAY_SIZE(qphy->vdd_levels));
+	if (ret) {
+		dev_err(dev, "error reading qcom,vdd-voltage-level property\n");
+		return ret;
+	}
+
+	qphy->vdd = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(qphy->vdd)) {
+		dev_err(dev, "unable to get vdd supply\n");
+		return PTR_ERR(qphy->vdd);
+	}
+
+	qphy->vdda33 = devm_regulator_get(dev, "vdda33");
+	if (IS_ERR(qphy->vdda33)) {
+		dev_err(dev, "unable to get vdda33 supply\n");
+		return PTR_ERR(qphy->vdda33);
+	}
+
+	qphy->vdda18 = devm_regulator_get(dev, "vdda18");
+	if (IS_ERR(qphy->vdda18)) {
+		dev_err(dev, "unable to get vdda18 supply\n");
+		return PTR_ERR(qphy->vdda18);
+	}
+
+	platform_set_drvdata(pdev, qphy);
+
+	qphy->phy.label			= "msm-qusb-phy";
+	qphy->phy.init			= qusb_phy_init;
+	qphy->phy.set_suspend           = qusb_phy_set_suspend;
+	qphy->phy.shutdown		= qusb_phy_shutdown;
+	qphy->phy.type			= USB_PHY_TYPE_USB2;
+	qphy->phy.notify_connect        = qusb_phy_notify_connect;
+	qphy->phy.notify_disconnect     = qusb_phy_notify_disconnect;
+
+	/*
+	 * On some platforms multiple QUSB PHYs are available. If QUSB PHY is
+	 * not used, there is leakage current seen with QUSB PHY related voltage
+	 * rail. Hence keep QUSB PHY into reset state explicitly here.
+	 */
+	if (hold_phy_reset) {
+		ret = reset_control_assert(qphy->phy_reset);
+		if (ret)
+			dev_err(dev, "%s:phy_reset assert failed\n", __func__);
+	}
+
+	ret = usb_add_phy_dev(&qphy->phy);
+	if (ret)
+		return ret;
+
+	ret = qusb_phy_regulator_init(qphy);
+	if (ret)
+		usb_remove_phy(&qphy->phy);
+
+	return ret;
+}
+
+static int qusb_phy_remove(struct platform_device *pdev)
+{
+	struct qusb_phy *qphy = platform_get_drvdata(pdev);
+
+	usb_remove_phy(&qphy->phy);
+
+	if (qphy->clocks_enabled) {
+		clk_disable_unprepare(qphy->cfg_ahb_clk);
+		clk_disable_unprepare(qphy->ref_clk);
+		clk_disable_unprepare(qphy->ref_clk_src);
+		qphy->clocks_enabled = false;
+	}
+
+	qusb_phy_enable_power(qphy, false, true);
+
+	return 0;
+}
+
+static const struct of_device_id qusb_phy_id_table[] = {
+	{ .compatible = "qcom,qusb2phy", },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, qusb_phy_id_table);
+
+static struct platform_driver qusb_phy_driver = {
+	.probe		= qusb_phy_probe,
+	.remove		= qusb_phy_remove,
+	.driver = {
+		.name	= "msm-qusb-phy",
+		.of_match_table = of_match_ptr(qusb_phy_id_table),
+	},
+};
+
+module_platform_driver(qusb_phy_driver);
+
+MODULE_DESCRIPTION("MSM QUSB2 PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
new file mode 100644
index 0000000..9a7f0c7
--- /dev/null
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -0,0 +1,791 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/usb/phy.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+
+enum core_ldo_levels {
+	CORE_LEVEL_NONE = 0,
+	CORE_LEVEL_MIN,
+	CORE_LEVEL_MAX,
+};
+
+#define INIT_MAX_TIME_USEC			1000
+
+/* default CORE votlage and load values */
+#define USB_SSPHY_1P2_VOL_MIN		1200000 /* uV */
+#define USB_SSPHY_1P2_VOL_MAX		1200000 /* uV */
+#define USB_SSPHY_HPM_LOAD		23000	/* uA */
+
+/* USB3PHY_PCIE_USB3_PCS_PCS_STATUS bit */
+#define PHYSTATUS				BIT(6)
+
+/* PCIE_USB3_PHY_AUTONOMOUS_MODE_CTRL bits */
+#define ARCVR_DTCT_EN		BIT(0)
+#define ALFPS_DTCT_EN		BIT(1)
+#define ARCVR_DTCT_EVENT_SEL	BIT(4)
+
+/* PCIE_USB3_PHY_PCS_MISC_TYPEC_CTRL bits */
+
+/* 0 - selects Lane A. 1 - selects Lane B */
+#define SW_PORTSELECT		BIT(0)
+/* port select mux: 1 - sw control. 0 - HW control*/
+#define SW_PORTSELECT_MX	BIT(1)
+
+enum qmp_phy_rev_reg {
+	USB3_PHY_PCS_STATUS,
+	USB3_PHY_AUTONOMOUS_MODE_CTRL,
+	USB3_PHY_LFPS_RXTERM_IRQ_CLEAR,
+	USB3_PHY_POWER_DOWN_CONTROL,
+	USB3_PHY_SW_RESET,
+	USB3_PHY_START,
+	USB3_PHY_PCS_MISC_TYPEC_CTRL,
+	USB3_PHY_REG_MAX,
+};
+
+/* reg values to write */
+struct qmp_reg_val {
+	u32 offset;
+	u32 val;
+	u32 delay;
+};
+
+struct msm_ssphy_qmp {
+	struct usb_phy		phy;
+	void __iomem		*base;
+	void __iomem		*vls_clamp_reg;
+	void __iomem		*tcsr_usb3_dp_phymode;
+
+	struct regulator	*vdd;
+	int			vdd_levels[3]; /* none, low, high */
+	struct regulator	*core_ldo;
+	int			core_voltage_levels[3];
+	struct clk		*ref_clk_src;
+	struct clk		*ref_clk;
+	struct clk		*aux_clk;
+	struct clk		*cfg_ahb_clk;
+	struct clk		*pipe_clk;
+	struct reset_control	*phy_reset;
+	struct reset_control	*phy_phy_reset;
+
+	bool			clk_enabled;
+	bool			cable_connected;
+	bool			in_suspend;
+	bool			emulation;
+	unsigned int		*phy_reg; /* revision based offset */
+	unsigned int		*qmp_phy_init_seq;
+	int			init_seq_len;
+	unsigned int		*qmp_phy_reg_offset;
+	int			reg_offset_cnt;
+};
+
+static const struct of_device_id msm_usb_id_table[] = {
+	{
+		.compatible = "qcom,usb-ssphy-qmp",
+	},
+	{
+		.compatible = "qcom,usb-ssphy-qmp-v1",
+	},
+	{
+		.compatible = "qcom,usb-ssphy-qmp-v2",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, msm_usb_id_table);
+
+static inline char *get_cable_status_str(struct msm_ssphy_qmp *phy)
+{
+	return phy->cable_connected ? "connected" : "disconnected";
+}
+
+static void msm_ssusb_qmp_clr_lfps_rxterm_int(struct msm_ssphy_qmp *phy)
+{
+	writeb_relaxed(1, phy->base +
+			phy->phy_reg[USB3_PHY_LFPS_RXTERM_IRQ_CLEAR]);
+	/* flush the previous write before next write */
+	wmb();
+	writeb_relaxed(0, phy->base +
+			phy->phy_reg[USB3_PHY_LFPS_RXTERM_IRQ_CLEAR]);
+}
+
+static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy,
+		int enable)
+{
+	u8 val;
+	unsigned int autonomous_mode_offset =
+			phy->phy_reg[USB3_PHY_AUTONOMOUS_MODE_CTRL];
+
+	dev_dbg(phy->phy.dev, "enabling QMP autonomous mode with cable %s\n",
+			get_cable_status_str(phy));
+
+	if (enable) {
+		msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+		if (phy->phy.flags & DEVICE_IN_SS_MODE) {
+			val =
+			readb_relaxed(phy->base + autonomous_mode_offset);
+			val |= ARCVR_DTCT_EN;
+			val |= ALFPS_DTCT_EN;
+			val &= ~ARCVR_DTCT_EVENT_SEL;
+			writeb_relaxed(val, phy->base + autonomous_mode_offset);
+		}
+
+		/* clamp phy level shifter to perform autonomous detection */
+		writel_relaxed(0x1, phy->vls_clamp_reg);
+	} else {
+		writel_relaxed(0x0, phy->vls_clamp_reg);
+		writeb_relaxed(0, phy->base + autonomous_mode_offset);
+		msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
+	}
+}
+
+
+static int msm_ssusb_qmp_config_vdd(struct msm_ssphy_qmp *phy, int high)
+{
+	int min, ret;
+
+	min = high ? 1 : 0; /* low or none? */
+	ret = regulator_set_voltage(phy->vdd, phy->vdd_levels[min],
+				    phy->vdd_levels[2]);
+	if (ret) {
+		dev_err(phy->phy.dev, "unable to set voltage for ssusb vdd\n");
+		return ret;
+	}
+
+	dev_dbg(phy->phy.dev, "min_vol:%d max_vol:%d\n",
+		phy->vdd_levels[min], phy->vdd_levels[2]);
+	return ret;
+}
+
+static int msm_ssusb_qmp_ldo_enable(struct msm_ssphy_qmp *phy, int on)
+{
+	int rc = 0;
+
+	dev_dbg(phy->phy.dev, "reg (%s)\n", on ? "HPM" : "LPM");
+
+	if (!on)
+		goto disable_regulators;
+
+
+	rc = regulator_set_load(phy->core_ldo, USB_SSPHY_HPM_LOAD);
+	if (rc < 0) {
+		dev_err(phy->phy.dev, "Unable to set HPM of core_ldo\n");
+		return rc;
+	}
+
+	rc = regulator_set_voltage(phy->core_ldo,
+			phy->core_voltage_levels[CORE_LEVEL_MIN],
+			phy->core_voltage_levels[CORE_LEVEL_MAX]);
+	if (rc) {
+		dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
+		goto put_core_ldo_lpm;
+	}
+
+	rc = regulator_enable(phy->core_ldo);
+	if (rc) {
+		dev_err(phy->phy.dev, "Unable to enable core_ldo\n");
+		goto unset_core_ldo;
+	}
+
+	return 0;
+
+disable_regulators:
+	rc = regulator_disable(phy->core_ldo);
+	if (rc)
+		dev_err(phy->phy.dev, "Unable to disable core_ldo\n");
+
+unset_core_ldo:
+	rc = regulator_set_voltage(phy->core_ldo,
+			phy->core_voltage_levels[CORE_LEVEL_NONE],
+			phy->core_voltage_levels[CORE_LEVEL_MAX]);
+	if (rc)
+		dev_err(phy->phy.dev, "unable to set voltage for core_ldo\n");
+
+put_core_ldo_lpm:
+	rc = regulator_set_load(phy->core_ldo, 0);
+	if (rc < 0)
+		dev_err(phy->phy.dev, "Unable to set LPM of core_ldo\n");
+
+	return rc < 0 ? rc : 0;
+}
+
+static int configure_phy_regs(struct usb_phy *uphy,
+				const struct qmp_reg_val *reg)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+
+	if (!reg) {
+		dev_err(uphy->dev, "NULL PHY configuration\n");
+		return -EINVAL;
+	}
+
+	while (reg->offset != -1) {
+		writel_relaxed(reg->val, phy->base + reg->offset);
+		if (reg->delay)
+			usleep_range(reg->delay, reg->delay + 10);
+		reg++;
+	}
+	return 0;
+}
+
+/* SSPHY Initialization */
+static int msm_ssphy_qmp_init(struct usb_phy *uphy)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+	int ret, val;
+	unsigned int init_timeout_usec = INIT_MAX_TIME_USEC;
+	const struct qmp_reg_val *reg = NULL;
+
+	dev_dbg(uphy->dev, "Initializing QMP phy\n");
+
+	if (phy->emulation)
+		return 0;
+
+	if (!phy->clk_enabled) {
+		if (phy->ref_clk_src)
+			clk_prepare_enable(phy->ref_clk_src);
+		if (phy->ref_clk)
+			clk_prepare_enable(phy->ref_clk);
+		clk_prepare_enable(phy->aux_clk);
+		clk_prepare_enable(phy->cfg_ahb_clk);
+		clk_set_rate(phy->pipe_clk, 125000000);
+		clk_prepare_enable(phy->pipe_clk);
+		phy->clk_enabled = true;
+	}
+
+	writel_relaxed(0x01,
+		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+
+	/* select usb3 phy mode */
+	if (phy->tcsr_usb3_dp_phymode)
+		writel_relaxed(0x0, phy->tcsr_usb3_dp_phymode);
+
+	/* Make sure that above write completed to get PHY into POWER DOWN */
+	mb();
+
+	reg = (struct qmp_reg_val *)phy->qmp_phy_init_seq;
+
+	/* Main configuration */
+	ret = configure_phy_regs(uphy, reg);
+	if (ret) {
+		dev_err(uphy->dev, "Failed the main PHY configuration\n");
+		return ret;
+	}
+
+	/* perform lane selection */
+	val = -EINVAL;
+	if (phy->phy.flags & PHY_LANE_A)
+		val = SW_PORTSELECT_MX;
+
+	if (phy->phy.flags & PHY_LANE_B)
+		val = SW_PORTSELECT | SW_PORTSELECT_MX;
+
+	if (val > 0)
+		writel_relaxed(val,
+			phy->base + phy->phy_reg[USB3_PHY_PCS_MISC_TYPEC_CTRL]);
+
+	writel_relaxed(0x03, phy->base + phy->phy_reg[USB3_PHY_START]);
+	writel_relaxed(0x00, phy->base + phy->phy_reg[USB3_PHY_SW_RESET]);
+
+	/* Make sure above write completed to bring PHY out of reset */
+	mb();
+
+	/* Wait for PHY initialization to be done */
+	do {
+		if (readl_relaxed(phy->base +
+			phy->phy_reg[USB3_PHY_PCS_STATUS]) & PHYSTATUS)
+			usleep_range(1, 2);
+		else
+			break;
+	} while (--init_timeout_usec);
+
+	if (!init_timeout_usec) {
+		dev_err(uphy->dev, "QMP PHY initialization timeout\n");
+		dev_err(uphy->dev, "USB3_PHY_PCS_STATUS:%x\n",
+				readl_relaxed(phy->base +
+					phy->phy_reg[USB3_PHY_PCS_STATUS]));
+		return -EBUSY;
+	};
+
+	return 0;
+}
+
+static int msm_ssphy_qmp_reset(struct usb_phy *uphy)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+	int ret;
+
+	dev_dbg(uphy->dev, "Resetting QMP phy\n");
+
+	/* Assert USB3 PHY reset */
+	ret = reset_control_assert(phy->phy_phy_reset);
+	if (ret) {
+		dev_err(uphy->dev, "phy_phy_reset assert failed\n");
+		goto exit;
+	}
+
+	/* Assert USB3 PHY CSR reset */
+	ret = reset_control_assert(phy->phy_reset);
+	if (ret) {
+		dev_err(uphy->dev, "phy_reset assert failed\n");
+		goto deassert_phy_phy_reset;
+	}
+
+	/* Deassert USB3 PHY CSR reset */
+	ret = reset_control_deassert(phy->phy_reset);
+	if (ret) {
+		dev_err(uphy->dev, "phy_reset deassert failed\n");
+		goto deassert_phy_phy_reset;
+	}
+
+	/* Deassert USB3 PHY reset */
+	ret = reset_control_deassert(phy->phy_phy_reset);
+	if (ret) {
+		dev_err(uphy->dev, "phy_phy_reset deassert failed\n");
+		goto exit;
+	}
+
+	return 0;
+
+deassert_phy_phy_reset:
+	ret = reset_control_deassert(phy->phy_phy_reset);
+	if (ret)
+		dev_err(uphy->dev, "phy_phy_reset deassert failed\n");
+exit:
+	phy->in_suspend = false;
+
+	return ret;
+}
+
+static int msm_ssphy_power_enable(struct msm_ssphy_qmp *phy, bool on)
+{
+	bool host = phy->phy.flags & PHY_HOST_MODE;
+	int ret = 0;
+
+	/*
+	 * Turn off the phy's LDOs when cable is disconnected for device mode
+	 * with external vbus_id indication.
+	 */
+	if (!host && !phy->cable_connected) {
+		if (on) {
+			ret = regulator_enable(phy->vdd);
+			if (ret)
+				dev_err(phy->phy.dev,
+					"regulator_enable(phy->vdd) failed, ret=%d",
+					ret);
+
+			ret = msm_ssusb_qmp_ldo_enable(phy, 1);
+			if (ret)
+				dev_err(phy->phy.dev,
+				"msm_ssusb_qmp_ldo_enable(1) failed, ret=%d\n",
+				ret);
+		} else {
+			ret = msm_ssusb_qmp_ldo_enable(phy, 0);
+			if (ret)
+				dev_err(phy->phy.dev,
+					"msm_ssusb_qmp_ldo_enable(0) failed, ret=%d\n",
+					ret);
+
+			ret = regulator_disable(phy->vdd);
+			if (ret)
+				dev_err(phy->phy.dev, "regulator_disable(phy->vdd) failed, ret=%d",
+					ret);
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * Performs QMP PHY suspend/resume functionality.
+ *
+ * @uphy - usb phy pointer.
+ * @suspend - to enable suspend or not. 1 - suspend, 0 - resume
+ *
+ */
+static int msm_ssphy_qmp_set_suspend(struct usb_phy *uphy, int suspend)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+
+	dev_dbg(uphy->dev, "QMP PHY set_suspend for %s called with cable %s\n",
+			(suspend ? "suspend" : "resume"),
+			get_cable_status_str(phy));
+
+	if (phy->in_suspend == suspend) {
+		dev_dbg(uphy->dev, "%s: USB PHY is already %s.\n",
+			__func__, (suspend ? "suspended" : "resumed"));
+		return 0;
+	}
+
+	if (suspend) {
+		if (!phy->cable_connected)
+			writel_relaxed(0x00,
+			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+		else
+			msm_ssusb_qmp_enable_autonomous(phy, 1);
+
+		clk_disable_unprepare(phy->cfg_ahb_clk);
+		clk_disable_unprepare(phy->aux_clk);
+		clk_disable_unprepare(phy->pipe_clk);
+		if (phy->ref_clk)
+			clk_disable_unprepare(phy->ref_clk);
+		if (phy->ref_clk_src)
+			clk_disable_unprepare(phy->ref_clk_src);
+		phy->clk_enabled = false;
+		phy->in_suspend = true;
+		msm_ssphy_power_enable(phy, 0);
+		dev_dbg(uphy->dev, "QMP PHY is suspend\n");
+	} else {
+		msm_ssphy_power_enable(phy, 1);
+		clk_prepare_enable(phy->pipe_clk);
+		if (!phy->clk_enabled) {
+			if (phy->ref_clk_src)
+				clk_prepare_enable(phy->ref_clk_src);
+			if (phy->ref_clk)
+				clk_prepare_enable(phy->ref_clk);
+			clk_prepare_enable(phy->aux_clk);
+			clk_prepare_enable(phy->cfg_ahb_clk);
+			phy->clk_enabled = true;
+		}
+		if (!phy->cable_connected) {
+			writel_relaxed(0x01,
+			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+		} else  {
+			msm_ssusb_qmp_enable_autonomous(phy, 0);
+		}
+		phy->in_suspend = false;
+		dev_dbg(uphy->dev, "QMP PHY is resumed\n");
+	}
+
+	return 0;
+}
+
+static int msm_ssphy_qmp_notify_connect(struct usb_phy *uphy,
+				       enum usb_device_speed speed)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+
+	dev_dbg(uphy->dev, "QMP phy connect notification\n");
+	phy->cable_connected = true;
+	dev_dbg(uphy->dev, "cable_connected=%d\n", phy->cable_connected);
+	return 0;
+}
+
+static int msm_ssphy_qmp_notify_disconnect(struct usb_phy *uphy,
+				       enum usb_device_speed speed)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+
+	dev_dbg(uphy->dev, "QMP phy disconnect notification\n");
+	dev_dbg(uphy->dev, " cable_connected=%d\n", phy->cable_connected);
+	phy->cable_connected = false;
+	return 0;
+}
+
+static int msm_ssphy_qmp_probe(struct platform_device *pdev)
+{
+	struct msm_ssphy_qmp *phy;
+	struct device *dev = &pdev->dev;
+	struct resource *res;
+	int ret = 0, size = 0, len;
+
+	phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
+	if (!phy)
+		return -ENOMEM;
+
+	phy->aux_clk = devm_clk_get(dev, "aux_clk");
+	if (IS_ERR(phy->aux_clk)) {
+		ret = PTR_ERR(phy->aux_clk);
+		phy->aux_clk = NULL;
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to get aux_clk\n");
+		goto err;
+	}
+
+	clk_set_rate(phy->aux_clk, clk_round_rate(phy->aux_clk, ULONG_MAX));
+
+	if (of_property_match_string(pdev->dev.of_node,
+				"clock-names", "cfg_ahb_clk") >= 0) {
+		phy->cfg_ahb_clk = devm_clk_get(dev, "cfg_ahb_clk");
+		if (IS_ERR(phy->cfg_ahb_clk)) {
+			ret = PTR_ERR(phy->cfg_ahb_clk);
+			if (ret != -EPROBE_DEFER)
+				dev_err(dev,
+				"failed to get cfg_ahb_clk ret %d\n", ret);
+			goto err;
+		}
+	}
+
+	phy->pipe_clk = devm_clk_get(dev, "pipe_clk");
+	if (IS_ERR(phy->pipe_clk)) {
+		ret = PTR_ERR(phy->pipe_clk);
+		phy->pipe_clk = NULL;
+		if (ret != -EPROBE_DEFER)
+			dev_err(dev, "failed to get pipe_clk\n");
+		goto err;
+	}
+
+	phy->phy_reset = devm_reset_control_get(dev, "phy_reset");
+	if (IS_ERR(phy->phy_reset)) {
+		ret = PTR_ERR(phy->phy_reset);
+		dev_dbg(dev, "failed to get phy_reset\n");
+		goto err;
+	}
+
+	phy->phy_phy_reset = devm_reset_control_get(dev, "phy_phy_reset");
+	if (IS_ERR(phy->phy_phy_reset)) {
+		ret = PTR_ERR(phy->phy_phy_reset);
+		dev_dbg(dev, "failed to get phy_phy_reset\n");
+		goto err;
+	}
+
+	of_get_property(dev->of_node, "qcom,qmp-phy-reg-offset", &size);
+	if (size) {
+		phy->qmp_phy_reg_offset = devm_kzalloc(dev,
+						size, GFP_KERNEL);
+		if (phy->qmp_phy_reg_offset) {
+			phy->reg_offset_cnt =
+				(size / sizeof(*phy->qmp_phy_reg_offset));
+			if (phy->reg_offset_cnt > USB3_PHY_REG_MAX) {
+				dev_err(dev, "invalid reg offset count\n");
+				return -EINVAL;
+			}
+
+			of_property_read_u32_array(dev->of_node,
+				"qcom,qmp-phy-reg-offset",
+				phy->qmp_phy_reg_offset,
+				phy->reg_offset_cnt);
+		} else {
+			dev_err(dev, "err mem alloc for qmp_phy_reg_offset\n");
+			return -ENOMEM;
+		}
+		phy->phy_reg = phy->qmp_phy_reg_offset;
+	} else {
+		dev_err(dev, "err provide qcom,qmp-phy-reg-offset\n");
+		return -EINVAL;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						"qmp_phy_base");
+	if (!res) {
+		dev_err(dev, "failed getting qmp_phy_base\n");
+		return -ENODEV;
+	}
+	phy->base = devm_ioremap_resource(dev, res);
+	if (IS_ERR(phy->base)) {
+		ret = PTR_ERR(phy->base);
+		goto err;
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"vls_clamp_reg");
+	if (!res) {
+		dev_err(dev, "failed getting vls_clamp_reg\n");
+		return -ENODEV;
+	}
+	phy->vls_clamp_reg = devm_ioremap_resource(dev, res);
+	if (IS_ERR(phy->vls_clamp_reg)) {
+		dev_err(dev, "couldn't find vls_clamp_reg address.\n");
+		return PTR_ERR(phy->vls_clamp_reg);
+	}
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+			"tcsr_usb3_dp_phymode");
+	if (res) {
+		phy->tcsr_usb3_dp_phymode = devm_ioremap_resource(dev, res);
+		if (IS_ERR(phy->tcsr_usb3_dp_phymode)) {
+			dev_err(dev, "err getting tcsr_usb3_dp_phymode addr\n");
+			return PTR_ERR(phy->tcsr_usb3_dp_phymode);
+		}
+	}
+
+	phy->emulation = of_property_read_bool(dev->of_node,
+						"qcom,emulation");
+	if (!phy->emulation) {
+		of_get_property(dev->of_node, "qcom,qmp-phy-init-seq", &size);
+		if (size) {
+			if (size % sizeof(*phy->qmp_phy_init_seq)) {
+				dev_err(dev, "invalid init_seq_len\n");
+				return -EINVAL;
+			}
+			phy->qmp_phy_init_seq = devm_kzalloc(dev,
+							size, GFP_KERNEL);
+			if (phy->qmp_phy_init_seq) {
+				phy->init_seq_len =
+					(size / sizeof(*phy->qmp_phy_init_seq));
+
+				of_property_read_u32_array(dev->of_node,
+					"qcom,qmp-phy-init-seq",
+					phy->qmp_phy_init_seq,
+					phy->init_seq_len);
+			} else {
+				dev_err(dev, "error allocating memory for phy_init_seq\n");
+				return -EINVAL;
+			}
+		} else {
+			dev_err(dev, "error need qmp-phy-init-seq\n");
+			return -EINVAL;
+		}
+	}
+
+	/* Set default core voltage values */
+	phy->core_voltage_levels[CORE_LEVEL_NONE] = 0;
+	phy->core_voltage_levels[CORE_LEVEL_MIN] = USB_SSPHY_1P2_VOL_MIN;
+	phy->core_voltage_levels[CORE_LEVEL_MAX] = USB_SSPHY_1P2_VOL_MAX;
+
+	if (of_get_property(dev->of_node, "qcom,core-voltage-level", &len) &&
+		len == sizeof(phy->core_voltage_levels)) {
+		ret = of_property_read_u32_array(dev->of_node,
+				"qcom,core-voltage-level",
+				(u32 *)phy->core_voltage_levels,
+				len / sizeof(u32));
+		if (ret) {
+			dev_err(dev, "err qcom,core-voltage-level property\n");
+			goto err;
+		}
+	}
+
+	if (of_get_property(dev->of_node, "qcom,vdd-voltage-level", &len) &&
+		len == sizeof(phy->vdd_levels)) {
+		ret = of_property_read_u32_array(dev->of_node,
+				"qcom,vdd-voltage-level",
+				(u32 *) phy->vdd_levels,
+				len / sizeof(u32));
+		if (ret) {
+			dev_err(dev, "err qcom,vdd-voltage-level property\n");
+			goto err;
+		}
+	} else {
+		ret = -EINVAL;
+		dev_err(dev, "error invalid inputs for vdd-voltage-level\n");
+		goto err;
+	}
+
+	phy->vdd = devm_regulator_get(dev, "vdd");
+	if (IS_ERR(phy->vdd)) {
+		dev_err(dev, "unable to get vdd supply\n");
+		ret = PTR_ERR(phy->vdd);
+		goto err;
+	}
+
+	phy->core_ldo = devm_regulator_get(dev, "core");
+	if (IS_ERR(phy->core_ldo)) {
+		dev_err(dev, "unable to get core ldo supply\n");
+		ret = PTR_ERR(phy->core_ldo);
+		goto err;
+	}
+
+	ret = msm_ssusb_qmp_config_vdd(phy, 1);
+	if (ret) {
+		dev_err(dev, "ssusb vdd_dig configuration failed\n");
+		goto err;
+	}
+
+	ret = regulator_enable(phy->vdd);
+	if (ret) {
+		dev_err(dev, "unable to enable the ssusb vdd_dig\n");
+		goto unconfig_ss_vdd;
+	}
+
+	ret = msm_ssusb_qmp_ldo_enable(phy, 1);
+	if (ret) {
+		dev_err(dev, "ssusb vreg enable failed\n");
+		goto disable_ss_vdd;
+	}
+
+	phy->ref_clk_src = devm_clk_get(dev, "ref_clk_src");
+	if (IS_ERR(phy->ref_clk_src))
+		phy->ref_clk_src = NULL;
+	phy->ref_clk = devm_clk_get(dev, "ref_clk");
+	if (IS_ERR(phy->ref_clk))
+		phy->ref_clk = NULL;
+
+	platform_set_drvdata(pdev, phy);
+
+	if (of_property_read_bool(dev->of_node, "qcom,vbus-valid-override"))
+		phy->phy.flags |= PHY_VBUS_VALID_OVERRIDE;
+
+	phy->phy.dev			= dev;
+	phy->phy.init			= msm_ssphy_qmp_init;
+	phy->phy.set_suspend		= msm_ssphy_qmp_set_suspend;
+	phy->phy.notify_connect		= msm_ssphy_qmp_notify_connect;
+	phy->phy.notify_disconnect	= msm_ssphy_qmp_notify_disconnect;
+	phy->phy.reset			= msm_ssphy_qmp_reset;
+	phy->phy.type			= USB_PHY_TYPE_USB3;
+
+	ret = usb_add_phy_dev(&phy->phy);
+	if (ret)
+		goto disable_ss_ldo;
+	return 0;
+
+disable_ss_ldo:
+	msm_ssusb_qmp_ldo_enable(phy, 0);
+disable_ss_vdd:
+	regulator_disable(phy->vdd);
+unconfig_ss_vdd:
+	msm_ssusb_qmp_config_vdd(phy, 0);
+err:
+	return ret;
+}
+
+static int msm_ssphy_qmp_remove(struct platform_device *pdev)
+{
+	struct msm_ssphy_qmp *phy = platform_get_drvdata(pdev);
+
+	if (!phy)
+		return 0;
+
+	usb_remove_phy(&phy->phy);
+	if (phy->ref_clk)
+		clk_disable_unprepare(phy->ref_clk);
+	if (phy->ref_clk_src)
+		clk_disable_unprepare(phy->ref_clk_src);
+	msm_ssusb_qmp_ldo_enable(phy, 0);
+	regulator_disable(phy->vdd);
+	msm_ssusb_qmp_config_vdd(phy, 0);
+	clk_disable_unprepare(phy->aux_clk);
+	clk_disable_unprepare(phy->cfg_ahb_clk);
+	clk_disable_unprepare(phy->pipe_clk);
+	kfree(phy);
+	return 0;
+}
+
+static struct platform_driver msm_ssphy_qmp_driver = {
+	.probe		= msm_ssphy_qmp_probe,
+	.remove		= msm_ssphy_qmp_remove,
+	.driver = {
+		.name	= "msm-usb-ssphy-qmp",
+		.of_match_table = of_match_ptr(msm_usb_id_table),
+	},
+};
+
+module_platform_driver(msm_ssphy_qmp_driver);
+
+MODULE_DESCRIPTION("MSM USB SS QMP PHY driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 3c20af9..00bc52b 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -25,6 +25,7 @@
 endmenu
 
 source "drivers/video/backlight/Kconfig"
+source "drivers/video/adf/Kconfig"
 
 config VGASTATE
        tristate
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9ad3c17..1a8c4ce 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -1,6 +1,7 @@
 obj-$(CONFIG_VGASTATE)            += vgastate.o
 obj-$(CONFIG_HDMI)                += hdmi.o
 
+obj-$(CONFIG_ADF)		  += adf/
 obj-$(CONFIG_VT)		  += console/
 obj-$(CONFIG_LOGO)		  += logo/
 obj-y				  += backlight/
diff --git a/drivers/video/adf/Kconfig b/drivers/video/adf/Kconfig
new file mode 100644
index 0000000..2777db4
--- /dev/null
+++ b/drivers/video/adf/Kconfig
@@ -0,0 +1,14 @@
+menuconfig ADF
+	depends on SYNC
+	depends on DMA_SHARED_BUFFER
+	tristate "Atomic Display Framework"
+
+menuconfig ADF_FBDEV
+	depends on ADF
+	depends on FB
+	tristate "Helper for implementing the fbdev API in ADF drivers"
+
+menuconfig ADF_MEMBLOCK
+	depends on ADF
+	depends on HAVE_MEMBLOCK
+	bool "Helper for using memblocks as buffers in ADF drivers"
diff --git a/drivers/video/adf/Makefile b/drivers/video/adf/Makefile
new file mode 100644
index 0000000..cdf34a6
--- /dev/null
+++ b/drivers/video/adf/Makefile
@@ -0,0 +1,17 @@
+ccflags-y := -Idrivers/staging/android
+
+CFLAGS_adf.o := -I$(src)
+
+obj-$(CONFIG_ADF) += adf_core.o
+
+adf_core-y := adf.o \
+	adf_client.o \
+	adf_fops.o \
+	adf_format.o \
+	adf_sysfs.o
+
+adf_core-$(CONFIG_COMPAT) += adf_fops32.o
+
+obj-$(CONFIG_ADF_FBDEV) += adf_fbdev.o
+
+obj-$(CONFIG_ADF_MEMBLOCK) += adf_memblock.o
diff --git a/drivers/video/adf/adf.c b/drivers/video/adf/adf.c
new file mode 100644
index 0000000..42c30c0
--- /dev/null
+++ b/drivers/video/adf/adf.c
@@ -0,0 +1,1188 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * adf_modeinfo_{set_name,set_vrefresh} modified from
+ * drivers/gpu/drm/drm_modes.c
+ * adf_format_validate_yuv modified from framebuffer_check in
+ * drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#define CREATE_TRACE_POINTS
+#include "adf_trace.h"
+
+#define ADF_SHORT_FENCE_TIMEOUT (1 * MSEC_PER_SEC)
+#define ADF_LONG_FENCE_TIMEOUT (10 * MSEC_PER_SEC)
+
+static DEFINE_IDR(adf_devices);
+
+static void adf_fence_wait(struct adf_device *dev, struct sync_fence *fence)
+{
+	/* sync_fence_wait() dumps debug information on timeout.  Experience
+	   has shown that if the pipeline gets stuck, a short timeout followed
+	   by a longer one provides useful information for debugging. */
+	int err = sync_fence_wait(fence, ADF_SHORT_FENCE_TIMEOUT);
+	if (err >= 0)
+		return;
+
+	if (err == -ETIME)
+		err = sync_fence_wait(fence, ADF_LONG_FENCE_TIMEOUT);
+
+	if (err < 0)
+		dev_warn(&dev->base.dev, "error waiting on fence: %d\n", err);
+}
+
+void adf_buffer_cleanup(struct adf_buffer *buf)
+{
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(buf->dma_bufs); i++)
+		if (buf->dma_bufs[i])
+			dma_buf_put(buf->dma_bufs[i]);
+
+	if (buf->acquire_fence)
+		sync_fence_put(buf->acquire_fence);
+}
+
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+		struct adf_buffer *buf)
+{
+	/* calling adf_buffer_mapping_cleanup() is safe even if mapping is
+	   uninitialized or partially-initialized, as long as it was
+	   zeroed on allocation */
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(mapping->sg_tables); i++) {
+		if (mapping->sg_tables[i])
+			dma_buf_unmap_attachment(mapping->attachments[i],
+					mapping->sg_tables[i], DMA_TO_DEVICE);
+		if (mapping->attachments[i])
+			dma_buf_detach(buf->dma_bufs[i],
+					mapping->attachments[i]);
+	}
+}
+
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post)
+{
+	size_t i;
+
+	if (post->state)
+		dev->ops->state_free(dev, post->state);
+
+	for (i = 0; i < post->config.n_bufs; i++) {
+		adf_buffer_mapping_cleanup(&post->config.mappings[i],
+				&post->config.bufs[i]);
+		adf_buffer_cleanup(&post->config.bufs[i]);
+	}
+
+	kfree(post->config.custom_data);
+	kfree(post->config.mappings);
+	kfree(post->config.bufs);
+	kfree(post);
+}
+
+static void adf_sw_advance_timeline(struct adf_device *dev)
+{
+#ifdef CONFIG_SW_SYNC
+	sw_sync_timeline_inc(dev->timeline, 1);
+#else
+	BUG();
+#endif
+}
+
+static void adf_post_work_func(struct kthread_work *work)
+{
+	struct adf_device *dev =
+			container_of(work, struct adf_device, post_work);
+	struct adf_pending_post *post, *next;
+	struct list_head saved_list;
+
+	mutex_lock(&dev->post_lock);
+	memcpy(&saved_list, &dev->post_list, sizeof(saved_list));
+	list_replace_init(&dev->post_list, &saved_list);
+	mutex_unlock(&dev->post_lock);
+
+	list_for_each_entry_safe(post, next, &saved_list, head) {
+		int i;
+
+		for (i = 0; i < post->config.n_bufs; i++) {
+			struct sync_fence *fence =
+					post->config.bufs[i].acquire_fence;
+			if (fence)
+				adf_fence_wait(dev, fence);
+		}
+
+		dev->ops->post(dev, &post->config, post->state);
+
+		if (dev->ops->advance_timeline)
+			dev->ops->advance_timeline(dev, &post->config,
+					post->state);
+		else
+			adf_sw_advance_timeline(dev);
+
+		list_del(&post->head);
+		if (dev->onscreen)
+			adf_post_cleanup(dev, dev->onscreen);
+		dev->onscreen = post;
+	}
+}
+
+void adf_attachment_free(struct adf_attachment_list *attachment)
+{
+	list_del(&attachment->head);
+	kfree(attachment);
+}
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+		enum adf_event_type type)
+{
+	struct rb_root *root = &obj->event_refcount;
+	struct rb_node **new = &(root->rb_node);
+	struct rb_node *parent = NULL;
+	struct adf_event_refcount *refcount;
+
+	while (*new) {
+		refcount = container_of(*new, struct adf_event_refcount, node);
+		parent = *new;
+
+		if (refcount->type > type)
+			new = &(*new)->rb_left;
+		else if (refcount->type < type)
+			new = &(*new)->rb_right;
+		else
+			return refcount;
+	}
+
+	refcount = kzalloc(sizeof(*refcount), GFP_KERNEL);
+	if (!refcount)
+		return NULL;
+	refcount->type = type;
+
+	rb_link_node(&refcount->node, parent, new);
+	rb_insert_color(&refcount->node, root);
+	return refcount;
+}
+
+/**
+ * adf_event_get - increase the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed.  ops are allowed
+ * to sleep, so adf_event_get() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, or -%EINVAL if the object does not support the
+ * requested event type.
+ */
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type)
+{
+	struct adf_event_refcount *refcount;
+	int old_refcount;
+	int ret;
+
+	ret = adf_obj_check_supports_event(obj, type);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&obj->event_lock);
+
+	refcount = adf_obj_find_event_refcount(obj, type);
+	if (!refcount) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	old_refcount = refcount->refcount++;
+
+	if (old_refcount == 0) {
+		obj->ops->set_event(obj, type, true);
+		trace_adf_event_enable(obj, type);
+	}
+
+done:
+	mutex_unlock(&obj->event_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_event_get);
+
+/**
+ * adf_event_put - decrease the refcount for an event
+ *
+ * @obj: the object that produces the event
+ * @type: the event type
+ *
+ * ADF will call the object's set_event() op if needed.  ops are allowed
+ * to sleep, so adf_event_put() must NOT be called from an atomic context.
+ *
+ * Returns 0 if successful, -%EINVAL if the object does not support the
+ * requested event type, or -%EALREADY if the refcount is already 0.
+ */
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type)
+{
+	struct adf_event_refcount *refcount;
+	int old_refcount;
+	int ret;
+
+	ret = adf_obj_check_supports_event(obj, type);
+	if (ret < 0)
+		return ret;
+
+
+	mutex_lock(&obj->event_lock);
+
+	refcount = adf_obj_find_event_refcount(obj, type);
+	if (!refcount) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	old_refcount = refcount->refcount--;
+
+	if (WARN_ON(old_refcount == 0)) {
+		refcount->refcount++;
+		ret = -EALREADY;
+	} else if (old_refcount == 1) {
+		obj->ops->set_event(obj, type, false);
+		trace_adf_event_disable(obj, type);
+	}
+
+done:
+	mutex_unlock(&obj->event_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_event_put);
+
+/**
+ * adf_vsync_wait - wait for a vsync event on a display interface
+ *
+ * @intf: the display interface
+ * @timeout: timeout in jiffies (0 = wait indefinitely)
+ *
+ * adf_vsync_wait() may sleep, so it must NOT be called from an atomic context.
+ *
+ * This function returns -%ERESTARTSYS if it is interrupted by a signal.
+ * If @timeout == 0 then this function returns 0 on vsync. If @timeout > 0 then
+ * this function returns the number of remaining jiffies or -%ETIMEDOUT on
+ * timeout.
+ */
+int adf_vsync_wait(struct adf_interface *intf, long timeout)
+{
+	ktime_t timestamp;
+	int ret;
+	unsigned long flags;
+
+	read_lock_irqsave(&intf->vsync_lock, flags);
+	timestamp = intf->vsync_timestamp;
+	read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+	adf_vsync_get(intf);
+	if (timeout) {
+		ret = wait_event_interruptible_timeout(intf->vsync_wait,
+				!ktime_equal(timestamp,
+						intf->vsync_timestamp),
+				msecs_to_jiffies(timeout));
+		if (ret == 0 && ktime_equal(timestamp, intf->vsync_timestamp))
+			ret = -ETIMEDOUT;
+	} else {
+		ret = wait_event_interruptible(intf->vsync_wait,
+				!ktime_equal(timestamp,
+						intf->vsync_timestamp));
+	}
+	adf_vsync_put(intf);
+
+	return ret;
+}
+EXPORT_SYMBOL(adf_vsync_wait);
+
+static void adf_event_queue(struct adf_obj *obj, struct adf_event *event)
+{
+	struct adf_file *file;
+	unsigned long flags;
+
+	trace_adf_event(obj, event->type);
+
+	spin_lock_irqsave(&obj->file_lock, flags);
+
+	list_for_each_entry(file, &obj->file_list, head)
+		if (test_bit(event->type, file->event_subscriptions))
+			adf_file_queue_event(file, event);
+
+	spin_unlock_irqrestore(&obj->file_lock, flags);
+}
+
+/**
+ * adf_event_notify - notify userspace of a driver-private event
+ *
+ * @obj: the ADF object that produced the event
+ * @event: the event
+ *
+ * adf_event_notify() may be called safely from an atomic context.  It will
+ * copy @event if needed, so @event may point to a variable on the stack.
+ *
+ * Drivers must NOT call adf_event_notify() for vsync and hotplug events.
+ * ADF provides adf_vsync_notify() and
+ * adf_hotplug_notify_{connected,disconnected}() for these events.
+ */
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event)
+{
+	if (WARN_ON(event->type == ADF_EVENT_VSYNC ||
+			event->type == ADF_EVENT_HOTPLUG))
+		return -EINVAL;
+
+	adf_event_queue(obj, event);
+	return 0;
+}
+EXPORT_SYMBOL(adf_event_notify);
+
+/**
+ * adf_vsync_notify - notify ADF of a display interface's vsync event
+ *
+ * @intf: the display interface
+ * @timestamp: the time the vsync occurred
+ *
+ * adf_vsync_notify() may be called safely from an atomic context.
+ */
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp)
+{
+	unsigned long flags;
+	struct adf_vsync_event event;
+
+	write_lock_irqsave(&intf->vsync_lock, flags);
+	intf->vsync_timestamp = timestamp;
+	write_unlock_irqrestore(&intf->vsync_lock, flags);
+
+	wake_up_interruptible_all(&intf->vsync_wait);
+
+	event.base.type = ADF_EVENT_VSYNC;
+	event.base.length = sizeof(event);
+	event.timestamp = ktime_to_ns(timestamp);
+	adf_event_queue(&intf->base, &event.base);
+}
+EXPORT_SYMBOL(adf_vsync_notify);
+
+void adf_hotplug_notify(struct adf_interface *intf, bool connected,
+		struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+	unsigned long flags;
+	struct adf_hotplug_event event;
+	struct drm_mode_modeinfo *old_modelist;
+
+	write_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+	old_modelist = intf->modelist;
+	intf->hotplug_detect = connected;
+	intf->modelist = modelist;
+	intf->n_modes = n_modes;
+	write_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+	kfree(old_modelist);
+
+	event.base.length = sizeof(event);
+	event.base.type = ADF_EVENT_HOTPLUG;
+	event.connected = connected;
+	adf_event_queue(&intf->base, &event.base);
+}
+
+/**
+ * adf_hotplug_notify_connected - notify ADF of a display interface being
+ * connected to a display
+ *
+ * @intf: the display interface
+ * @modelist: hardware modes supported by display
+ * @n_modes: length of modelist
+ *
+ * @modelist is copied as needed, so it may point to a variable on the stack.
+ *
+ * adf_hotplug_notify_connected() may NOT be called safely from an atomic
+ * context.
+ *
+ * Returns 0 on success or error code (<0) on error.
+ */
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+		struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+	struct drm_mode_modeinfo *modelist_copy;
+
+	if (n_modes > ADF_MAX_MODES)
+		return -ENOMEM;
+
+	modelist_copy = kzalloc(sizeof(modelist_copy[0]) * n_modes,
+			GFP_KERNEL);
+	if (!modelist_copy)
+		return -ENOMEM;
+	memcpy(modelist_copy, modelist, sizeof(modelist_copy[0]) * n_modes);
+
+	adf_hotplug_notify(intf, true, modelist_copy, n_modes);
+	return 0;
+}
+EXPORT_SYMBOL(adf_hotplug_notify_connected);
+
+/**
+ * adf_hotplug_notify_disconnected - notify ADF of a display interface being
+ * disconnected from a display
+ *
+ * @intf: the display interface
+ *
+ * adf_hotplug_notify_disconnected() may be called safely from an atomic
+ * context.
+ */
+void adf_hotplug_notify_disconnected(struct adf_interface *intf)
+{
+	adf_hotplug_notify(intf, false, NULL, 0);
+}
+EXPORT_SYMBOL(adf_hotplug_notify_disconnected);
+
+static int adf_obj_init(struct adf_obj *obj, enum adf_obj_type type,
+		struct idr *idr, struct adf_device *parent,
+		const struct adf_obj_ops *ops, const char *fmt, va_list args)
+{
+	int ret;
+
+	if (ops && ops->supports_event && !ops->set_event) {
+		pr_err("%s: %s implements supports_event but not set_event\n",
+				__func__, adf_obj_type_str(type));
+		return -EINVAL;
+	}
+
+	ret = idr_alloc(idr, obj, 0, 0, GFP_KERNEL);
+	if (ret < 0) {
+		pr_err("%s: allocating object id failed: %d\n", __func__, ret);
+		return ret;
+	}
+	obj->id = ret;
+
+	vscnprintf(obj->name, sizeof(obj->name), fmt, args);
+
+	obj->type = type;
+	obj->ops = ops;
+	obj->parent = parent;
+	mutex_init(&obj->event_lock);
+	obj->event_refcount = RB_ROOT;
+	spin_lock_init(&obj->file_lock);
+	INIT_LIST_HEAD(&obj->file_list);
+	return 0;
+}
+
+static void adf_obj_destroy(struct adf_obj *obj, struct idr *idr)
+{
+	struct rb_node *node = rb_first(&obj->event_refcount);
+
+	while (node) {
+		struct adf_event_refcount *refcount =
+				container_of(node, struct adf_event_refcount,
+						node);
+		rb_erase(&refcount->node, &obj->event_refcount);
+		kfree(refcount);
+		node = rb_first(&obj->event_refcount);
+	}
+
+	mutex_destroy(&obj->event_lock);
+	idr_remove(idr, obj->id);
+}
+
+/**
+ * adf_device_init - initialize ADF-internal data for a display device
+ * and create sysfs entries
+ *
+ * @dev: the display device
+ * @parent: the device's parent device
+ * @ops: the device's associated ops
+ * @fmt: formatting string for the display device's name
+ *
+ * @fmt specifies the device's sysfs filename and the name returned to
+ * userspace through the %ADF_GET_DEVICE_DATA ioctl.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_device_init(struct adf_device *dev, struct device *parent,
+		const struct adf_device_ops *ops, const char *fmt, ...)
+{
+	int ret;
+	va_list args;
+
+	if (!ops->validate || !ops->post) {
+		pr_err("%s: device must implement validate and post\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (!ops->complete_fence && !ops->advance_timeline) {
+		if (!IS_ENABLED(CONFIG_SW_SYNC)) {
+			pr_err("%s: device requires sw_sync but it is not enabled in the kernel\n",
+					__func__);
+			return -EINVAL;
+		}
+	} else if (!(ops->complete_fence && ops->advance_timeline)) {
+		pr_err("%s: device must implement both complete_fence and advance_timeline, or implement neither\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	memset(dev, 0, sizeof(*dev));
+
+	va_start(args, fmt);
+	ret = adf_obj_init(&dev->base, ADF_OBJ_DEVICE, &adf_devices, dev,
+			&ops->base, fmt, args);
+	va_end(args);
+	if (ret < 0)
+		return ret;
+
+	dev->dev = parent;
+	dev->ops = ops;
+	idr_init(&dev->overlay_engines);
+	idr_init(&dev->interfaces);
+	mutex_init(&dev->client_lock);
+	INIT_LIST_HEAD(&dev->post_list);
+	mutex_init(&dev->post_lock);
+	init_kthread_worker(&dev->post_worker);
+	INIT_LIST_HEAD(&dev->attached);
+	INIT_LIST_HEAD(&dev->attach_allowed);
+
+	dev->post_thread = kthread_run(kthread_worker_fn,
+			&dev->post_worker, dev->base.name);
+	if (IS_ERR(dev->post_thread)) {
+		ret = PTR_ERR(dev->post_thread);
+		dev->post_thread = NULL;
+
+		pr_err("%s: failed to run config posting thread: %d\n",
+				__func__, ret);
+		goto err;
+	}
+	init_kthread_work(&dev->post_work, adf_post_work_func);
+
+	ret = adf_device_sysfs_init(dev);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+
+err:
+	adf_device_destroy(dev);
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_init);
+
+/**
+ * adf_device_destroy - clean up ADF-internal data for a display device
+ *
+ * @dev: the display device
+ */
+void adf_device_destroy(struct adf_device *dev)
+{
+	struct adf_attachment_list *entry, *next;
+
+	idr_destroy(&dev->interfaces);
+	idr_destroy(&dev->overlay_engines);
+
+	if (dev->post_thread) {
+		flush_kthread_worker(&dev->post_worker);
+		kthread_stop(dev->post_thread);
+	}
+
+	if (dev->onscreen)
+		adf_post_cleanup(dev, dev->onscreen);
+	adf_device_sysfs_destroy(dev);
+	list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+		adf_attachment_free(entry);
+	}
+	list_for_each_entry_safe(entry, next, &dev->attached, head) {
+		adf_attachment_free(entry);
+	}
+	mutex_destroy(&dev->post_lock);
+	mutex_destroy(&dev->client_lock);
+
+	if (dev->timeline)
+		sync_timeline_destroy(&dev->timeline->obj);
+
+	adf_obj_destroy(&dev->base, &adf_devices);
+}
+EXPORT_SYMBOL(adf_device_destroy);
+
+/**
+ * adf_interface_init - initialize ADF-internal data for a display interface
+ * and create sysfs entries
+ *
+ * @intf: the display interface
+ * @dev: the interface's "parent" display device
+ * @type: interface type (see enum @adf_interface_type)
+ * @idx: which interface of type @type;
+ *	e.g. interface DSI.1 -> @type=%ADF_INTF_TYPE_DSI, @idx=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @ops: the interface's associated ops
+ * @fmt: formatting string for the display interface's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_INTERFACE_DATA ioctl.  It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_interface_init(struct adf_interface *intf, struct adf_device *dev,
+		enum adf_interface_type type, u32 idx, u32 flags,
+		const struct adf_interface_ops *ops, const char *fmt, ...)
+{
+	int ret;
+	va_list args;
+	const u32 allowed_flags = ADF_INTF_FLAG_PRIMARY |
+			ADF_INTF_FLAG_EXTERNAL;
+
+	if (dev->n_interfaces == ADF_MAX_INTERFACES) {
+		pr_err("%s: parent device %s has too many interfaces\n",
+				__func__, dev->base.name);
+		return -ENOMEM;
+	}
+
+	if (type >= ADF_INTF_MEMORY && type <= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+		pr_err("%s: invalid interface type %u\n", __func__, type);
+		return -EINVAL;
+	}
+
+	if (flags & ~allowed_flags) {
+		pr_err("%s: invalid interface flags 0x%X\n", __func__,
+				flags & ~allowed_flags);
+		return -EINVAL;
+	}
+
+	memset(intf, 0, sizeof(*intf));
+
+	va_start(args, fmt);
+	ret = adf_obj_init(&intf->base, ADF_OBJ_INTERFACE, &dev->interfaces,
+			dev, ops ? &ops->base : NULL, fmt, args);
+	va_end(args);
+	if (ret < 0)
+		return ret;
+
+	intf->type = type;
+	intf->idx = idx;
+	intf->flags = flags;
+	intf->ops = ops;
+	intf->dpms_state = DRM_MODE_DPMS_OFF;
+	init_waitqueue_head(&intf->vsync_wait);
+	rwlock_init(&intf->vsync_lock);
+	rwlock_init(&intf->hotplug_modelist_lock);
+
+	ret = adf_interface_sysfs_init(intf);
+	if (ret < 0)
+		goto err;
+	dev->n_interfaces++;
+
+	return 0;
+
+err:
+	adf_obj_destroy(&intf->base, &dev->interfaces);
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for a display interface
+ *
+ * @intf: the display interface
+ */
+void adf_interface_destroy(struct adf_interface *intf)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	struct adf_attachment_list *entry, *next;
+
+	mutex_lock(&dev->client_lock);
+	list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+		if (entry->attachment.interface == intf) {
+			adf_attachment_free(entry);
+			dev->n_attach_allowed--;
+		}
+	}
+	list_for_each_entry_safe(entry, next, &dev->attached, head) {
+		if (entry->attachment.interface == intf) {
+			adf_device_detach_op(dev,
+					entry->attachment.overlay_engine, intf);
+			adf_attachment_free(entry);
+			dev->n_attached--;
+		}
+	}
+	kfree(intf->modelist);
+	adf_interface_sysfs_destroy(intf);
+	adf_obj_destroy(&intf->base, &dev->interfaces);
+	dev->n_interfaces--;
+	mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_destroy);
+
+static bool adf_overlay_engine_has_custom_formats(
+		const struct adf_overlay_engine_ops *ops)
+{
+	size_t i;
+	for (i = 0; i < ops->n_supported_formats; i++)
+		if (!adf_format_is_standard(ops->supported_formats[i]))
+			return true;
+	return false;
+}
+
+/**
+ * adf_overlay_engine_init - initialize ADF-internal data for an
+ * overlay engine and create sysfs entries
+ *
+ * @eng: the overlay engine
+ * @dev: the overlay engine's "parent" display device
+ * @ops: the overlay engine's associated ops
+ * @fmt: formatting string for the overlay engine's name
+ *
+ * @dev must have previously been initialized with adf_device_init().
+ *
+ * @fmt affects the name returned to userspace through the
+ * %ADF_GET_OVERLAY_ENGINE_DATA ioctl.  It does not affect the sysfs filename,
+ * which is derived from @dev's name.
+ *
+ * Returns 0 on success or error code (<0) on failure.
+ */
+int adf_overlay_engine_init(struct adf_overlay_engine *eng,
+		struct adf_device *dev,
+		const struct adf_overlay_engine_ops *ops, const char *fmt, ...)
+{
+	int ret;
+	va_list args;
+
+	if (!ops->supported_formats) {
+		pr_err("%s: overlay engine must support at least one format\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (ops->n_supported_formats > ADF_MAX_SUPPORTED_FORMATS) {
+		pr_err("%s: overlay engine supports too many formats\n",
+				__func__);
+		return -EINVAL;
+	}
+
+	if (adf_overlay_engine_has_custom_formats(ops) &&
+			!dev->ops->validate_custom_format) {
+		pr_err("%s: overlay engine has custom formats but parent device %s does not implement validate_custom_format\n",
+				__func__, dev->base.name);
+		return -EINVAL;
+	}
+
+	memset(eng, 0, sizeof(*eng));
+
+	va_start(args, fmt);
+	ret = adf_obj_init(&eng->base, ADF_OBJ_OVERLAY_ENGINE,
+			&dev->overlay_engines, dev, &ops->base, fmt, args);
+	va_end(args);
+	if (ret < 0)
+		return ret;
+
+	eng->ops = ops;
+
+	ret = adf_overlay_engine_sysfs_init(eng);
+	if (ret < 0)
+		goto err;
+
+	return 0;
+
+err:
+	adf_obj_destroy(&eng->base, &dev->overlay_engines);
+	return ret;
+}
+EXPORT_SYMBOL(adf_overlay_engine_init);
+
+/**
+ * adf_interface_destroy - clean up ADF-internal data for an overlay engine
+ *
+ * @eng: the overlay engine
+ */
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng)
+{
+	struct adf_device *dev = adf_overlay_engine_parent(eng);
+	struct adf_attachment_list *entry, *next;
+
+	mutex_lock(&dev->client_lock);
+	list_for_each_entry_safe(entry, next, &dev->attach_allowed, head) {
+		if (entry->attachment.overlay_engine == eng) {
+			adf_attachment_free(entry);
+			dev->n_attach_allowed--;
+		}
+	}
+	list_for_each_entry_safe(entry, next, &dev->attached, head) {
+		if (entry->attachment.overlay_engine == eng) {
+			adf_device_detach_op(dev, eng,
+					entry->attachment.interface);
+			adf_attachment_free(entry);
+			dev->n_attached--;
+		}
+	}
+	adf_overlay_engine_sysfs_destroy(eng);
+	adf_obj_destroy(&eng->base, &dev->overlay_engines);
+	mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_overlay_engine_destroy);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	struct adf_attachment_list *entry;
+	list_for_each_entry(entry, list, head) {
+		if (entry->attachment.interface == intf &&
+				entry->attachment.overlay_engine == eng)
+			return entry;
+	}
+	return NULL;
+}
+
+int adf_attachment_validate(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	struct adf_device *intf_dev = adf_interface_parent(intf);
+	struct adf_device *eng_dev = adf_overlay_engine_parent(eng);
+
+	if (intf_dev != dev) {
+		dev_err(&dev->base.dev, "can't attach interface %s belonging to device %s\n",
+				intf->base.name, intf_dev->base.name);
+		return -EINVAL;
+	}
+
+	if (eng_dev != dev) {
+		dev_err(&dev->base.dev, "can't attach overlay engine %s belonging to device %s\n",
+				eng->base.name, eng_dev->base.name);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/**
+ * adf_attachment_allow - add a new entry to the list of allowed
+ * attachments
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * adf_attachment_allow() indicates that the underlying display hardware allows
+ * @intf to scan out @eng's output.  It is intended to be called at
+ * driver initialization for each supported overlay engine + interface pair.
+ *
+ * Returns 0 on success, -%EALREADY if the entry already exists, or -errno on
+ * any other failure.
+ */
+int adf_attachment_allow(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	int ret;
+	struct adf_attachment_list *entry = NULL;
+
+	ret = adf_attachment_validate(dev, eng, intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->client_lock);
+
+	if (dev->n_attach_allowed == ADF_MAX_ATTACHMENTS) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	if (adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+		ret = -EALREADY;
+		goto done;
+	}
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	entry->attachment.interface = intf;
+	entry->attachment.overlay_engine = eng;
+	list_add_tail(&entry->head, &dev->attach_allowed);
+	dev->n_attach_allowed++;
+
+done:
+	mutex_unlock(&dev->client_lock);
+	if (ret < 0)
+		kfree(entry);
+
+	return ret;
+}
+EXPORT_SYMBOL(adf_attachment_allow);
+
+/**
+ * adf_obj_type_str - string representation of an adf_obj_type
+ *
+ * @type: the object type
+ */
+const char *adf_obj_type_str(enum adf_obj_type type)
+{
+	switch (type) {
+	case ADF_OBJ_OVERLAY_ENGINE:
+		return "overlay engine";
+
+	case ADF_OBJ_INTERFACE:
+		return "interface";
+
+	case ADF_OBJ_DEVICE:
+		return "device";
+
+	default:
+		return "unknown";
+	}
+}
+EXPORT_SYMBOL(adf_obj_type_str);
+
+/**
+ * adf_interface_type_str - string representation of an adf_interface's type
+ *
+ * @intf: the interface
+ */
+const char *adf_interface_type_str(struct adf_interface *intf)
+{
+	switch (intf->type) {
+	case ADF_INTF_DSI:
+		return "DSI";
+
+	case ADF_INTF_eDP:
+		return "eDP";
+
+	case ADF_INTF_DPI:
+		return "DPI";
+
+	case ADF_INTF_VGA:
+		return "VGA";
+
+	case ADF_INTF_DVI:
+		return "DVI";
+
+	case ADF_INTF_HDMI:
+		return "HDMI";
+
+	case ADF_INTF_MEMORY:
+		return "memory";
+
+	default:
+		if (intf->type >= ADF_INTF_TYPE_DEVICE_CUSTOM) {
+			if (intf->ops && intf->ops->type_str)
+				return intf->ops->type_str(intf);
+			return "custom";
+		}
+		return "unknown";
+	}
+}
+EXPORT_SYMBOL(adf_interface_type_str);
+
+/**
+ * adf_event_type_str - string representation of an adf_event_type
+ *
+ * @obj: ADF object that produced the event
+ * @type: event type
+ */
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type)
+{
+	switch (type) {
+	case ADF_EVENT_VSYNC:
+		return "vsync";
+
+	case ADF_EVENT_HOTPLUG:
+		return "hotplug";
+
+	default:
+		if (type >= ADF_EVENT_DEVICE_CUSTOM) {
+			if (obj->ops && obj->ops->event_type_str)
+				return obj->ops->event_type_str(obj, type);
+			return "custom";
+		}
+		return "unknown";
+	}
+}
+EXPORT_SYMBOL(adf_event_type_str);
+
+/**
+ * adf_format_str - string representation of an ADF/DRM fourcc format
+ *
+ * @format: format fourcc
+ * @buf: target buffer for the format's string representation
+ */
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE])
+{
+	buf[0] = format & 0xFF;
+	buf[1] = (format >> 8) & 0xFF;
+	buf[2] = (format >> 16) & 0xFF;
+	buf[3] = (format >> 24) & 0xFF;
+	buf[4] = '\0';
+}
+EXPORT_SYMBOL(adf_format_str);
+
+/**
+ * adf_format_validate_yuv - validate the number and size of planes in buffers
+ * with a custom YUV format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @num_planes: expected number of planes
+ * @hsub: expected horizontal chroma subsampling factor, in pixels
+ * @vsub: expected vertical chroma subsampling factor, in pixels
+ * @cpp: expected bytes per pixel for each plane (length @num_planes)
+ *
+ * adf_format_validate_yuv() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.
+ *
+ * Returns 0 if @buf has the expected number of planes and each plane
+ * has sufficient size, or -EINVAL otherwise.
+ */
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+		u8 num_planes, u8 hsub, u8 vsub, u8 cpp[])
+{
+	u8 i;
+
+	if (num_planes != buf->n_planes) {
+		char format_str[ADF_FORMAT_STR_SIZE];
+		adf_format_str(buf->format, format_str);
+		dev_err(&dev->base.dev, "%u planes expected for format %s but %u planes provided\n",
+				num_planes, format_str, buf->n_planes);
+		return -EINVAL;
+	}
+
+	if (buf->w == 0 || buf->w % hsub) {
+		dev_err(&dev->base.dev, "bad buffer width %u\n", buf->w);
+		return -EINVAL;
+	}
+
+	if (buf->h == 0 || buf->h % vsub) {
+		dev_err(&dev->base.dev, "bad buffer height %u\n", buf->h);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < num_planes; i++) {
+		u32 width = buf->w / (i != 0 ? hsub : 1);
+		u32 height = buf->h / (i != 0 ? vsub : 1);
+		u8 cpp = adf_format_plane_cpp(buf->format, i);
+		u32 last_line_size;
+
+		if (buf->pitch[i] < (u64) width * cpp) {
+			dev_err(&dev->base.dev, "plane %u pitch is shorter than buffer width (pitch = %u, width = %u, bpp = %u)\n",
+					i, buf->pitch[i], width, cpp * 8);
+			return -EINVAL;
+		}
+
+		switch (dev->ops->quirks.buffer_padding) {
+		case ADF_BUFFER_PADDED_TO_PITCH:
+			last_line_size = buf->pitch[i];
+			break;
+
+		case ADF_BUFFER_UNPADDED:
+			last_line_size = width * cpp;
+			break;
+
+		default:
+			BUG();
+		}
+
+		if ((u64) (height - 1) * buf->pitch[i] + last_line_size +
+				buf->offset[i] > buf->dma_bufs[i]->size) {
+			dev_err(&dev->base.dev, "plane %u buffer too small (height = %u, pitch = %u, offset = %u, size = %zu)\n",
+					i, height, buf->pitch[i],
+					buf->offset[i], buf->dma_bufs[i]->size);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(adf_format_validate_yuv);
+
+/**
+ * adf_modeinfo_set_name - sets the name of a mode from its display resolution
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_name() fills in @mode->name in the format
+ * "[hdisplay]x[vdisplay](i)".  It is intended to help drivers create
+ * ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode)
+{
+	bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
+
+	snprintf(mode->name, DRM_DISPLAY_MODE_LEN, "%dx%d%s",
+		 mode->hdisplay, mode->vdisplay,
+		 interlaced ? "i" : "");
+}
+EXPORT_SYMBOL(adf_modeinfo_set_name);
+
+/**
+ * adf_modeinfo_set_vrefresh - sets the vrefresh of a mode from its other
+ * timing data
+ *
+ * @mode: mode
+ *
+ * adf_modeinfo_set_vrefresh() calculates @mode->vrefresh from
+ * @mode->{h,v}display and @mode->flags.  It is intended to help drivers
+ * create ADF/DRM-style modelists from other mode formats.
+ */
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode)
+{
+	int refresh = 0;
+	unsigned int calc_val;
+
+	if (mode->vrefresh > 0)
+		return;
+
+	if (mode->htotal <= 0 || mode->vtotal <= 0)
+		return;
+
+	/* work out vrefresh the value will be x1000 */
+	calc_val = (mode->clock * 1000);
+	calc_val /= mode->htotal;
+	refresh = (calc_val + mode->vtotal / 2) / mode->vtotal;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		refresh *= 2;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		refresh /= 2;
+	if (mode->vscan > 1)
+		refresh /= mode->vscan;
+
+	mode->vrefresh = refresh;
+}
+EXPORT_SYMBOL(adf_modeinfo_set_vrefresh);
+
+static int __init adf_init(void)
+{
+	int err;
+
+	err = adf_sysfs_init();
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static void __exit adf_exit(void)
+{
+	adf_sysfs_destroy();
+}
+
+module_init(adf_init);
+module_exit(adf_exit);
diff --git a/drivers/video/adf/adf.h b/drivers/video/adf/adf.h
new file mode 100644
index 0000000..3bcf1fa
--- /dev/null
+++ b/drivers/video/adf/adf.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_H
+#define __VIDEO_ADF_ADF_H
+
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <video/adf.h>
+#include "sync.h"
+
+struct adf_event_refcount {
+	struct rb_node node;
+	enum adf_event_type type;
+	int refcount;
+};
+
+void adf_buffer_cleanup(struct adf_buffer *buf);
+void adf_buffer_mapping_cleanup(struct adf_buffer_mapping *mapping,
+		struct adf_buffer *buf);
+void adf_post_cleanup(struct adf_device *dev, struct adf_pending_post *post);
+
+struct adf_attachment_list *adf_attachment_find(struct list_head *list,
+		struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_attachment_validate(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf);
+void adf_attachment_free(struct adf_attachment_list *attachment);
+
+struct adf_event_refcount *adf_obj_find_event_refcount(struct adf_obj *obj,
+		enum adf_event_type type);
+
+static inline int adf_obj_check_supports_event(struct adf_obj *obj,
+		enum adf_event_type type)
+{
+	if (!obj->ops || !obj->ops->supports_event)
+		return -EOPNOTSUPP;
+	if (!obj->ops->supports_event(obj, type))
+		return -EINVAL;
+	return 0;
+}
+
+static inline int adf_device_attach_op(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	if (!dev->ops->attach)
+		return 0;
+
+	return dev->ops->attach(dev, eng, intf);
+}
+
+static inline int adf_device_detach_op(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	if (!dev->ops->detach)
+		return 0;
+
+	return dev->ops->detach(dev, eng, intf);
+}
+
+#endif /* __VIDEO_ADF_ADF_H */
diff --git a/drivers/video/adf/adf_client.c b/drivers/video/adf/adf_client.c
new file mode 100644
index 0000000..8061d8e
--- /dev/null
+++ b/drivers/video/adf/adf_client.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kthread.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+
+#include "sw_sync.h"
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+static inline bool vsync_active(u8 state)
+{
+	return state == DRM_MODE_DPMS_ON || state == DRM_MODE_DPMS_STANDBY;
+}
+
+/**
+ * adf_interface_blank - set interface's DPMS state
+ *
+ * @intf: the interface
+ * @state: one of %DRM_MODE_DPMS_*
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_blank(struct adf_interface *intf, u8 state)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	u8 prev_state;
+	bool disable_vsync;
+	bool enable_vsync;
+	int ret = 0;
+	struct adf_event_refcount *vsync_refcount;
+
+	if (!intf->ops || !intf->ops->blank)
+		return -EOPNOTSUPP;
+
+	if (state > DRM_MODE_DPMS_OFF)
+		return -EINVAL;
+
+	mutex_lock(&dev->client_lock);
+	if (state != DRM_MODE_DPMS_ON)
+		flush_kthread_worker(&dev->post_worker);
+	mutex_lock(&intf->base.event_lock);
+
+	vsync_refcount = adf_obj_find_event_refcount(&intf->base,
+			ADF_EVENT_VSYNC);
+	if (!vsync_refcount) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	prev_state = intf->dpms_state;
+	if (prev_state == state) {
+		ret = -EBUSY;
+		goto done;
+	}
+
+	disable_vsync = vsync_active(prev_state) &&
+			!vsync_active(state) &&
+			vsync_refcount->refcount;
+	enable_vsync = !vsync_active(prev_state) &&
+			vsync_active(state) &&
+			vsync_refcount->refcount;
+
+	if (disable_vsync)
+		intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+				false);
+
+	ret = intf->ops->blank(intf, state);
+	if (ret < 0) {
+		if (disable_vsync)
+			intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+					true);
+		goto done;
+	}
+
+	if (enable_vsync)
+		intf->base.ops->set_event(&intf->base, ADF_EVENT_VSYNC,
+				true);
+
+	intf->dpms_state = state;
+done:
+	mutex_unlock(&intf->base.event_lock);
+	mutex_unlock(&dev->client_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_blank);
+
+/**
+ * adf_interface_blank - get interface's current DPMS state
+ *
+ * @intf: the interface
+ *
+ * Returns one of %DRM_MODE_DPMS_*.
+ */
+u8 adf_interface_dpms_state(struct adf_interface *intf)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	u8 dpms_state;
+
+	mutex_lock(&dev->client_lock);
+	dpms_state = intf->dpms_state;
+	mutex_unlock(&dev->client_lock);
+
+	return dpms_state;
+}
+EXPORT_SYMBOL(adf_interface_dpms_state);
+
+/**
+ * adf_interface_current_mode - get interface's current display mode
+ *
+ * @intf: the interface
+ * @mode: returns the current mode
+ */
+void adf_interface_current_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo *mode)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+
+	mutex_lock(&dev->client_lock);
+	memcpy(mode, &intf->current_mode, sizeof(*mode));
+	mutex_unlock(&dev->client_lock);
+}
+EXPORT_SYMBOL(adf_interface_current_mode);
+
+/**
+ * adf_interface_modelist - get interface's modelist
+ *
+ * @intf: the interface
+ * @modelist: storage for the modelist (optional)
+ * @n_modes: length of @modelist
+ *
+ * If @modelist is not NULL, adf_interface_modelist() will copy up to @n_modes
+ * modelist entries into @modelist.
+ *
+ * Returns the length of the modelist.
+ */
+size_t adf_interface_modelist(struct adf_interface *intf,
+		struct drm_mode_modeinfo *modelist, size_t n_modes)
+{
+	unsigned long flags;
+	size_t retval;
+
+	read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+	if (modelist)
+		memcpy(modelist, intf->modelist, sizeof(modelist[0]) *
+				min(n_modes, intf->n_modes));
+	retval = intf->n_modes;
+	read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+	return retval;
+}
+EXPORT_SYMBOL(adf_interface_modelist);
+
+/**
+ * adf_interface_set_mode - set interface's display mode
+ *
+ * @intf: the interface
+ * @mode: the new mode
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_set_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo *mode)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	int ret = 0;
+
+	if (!intf->ops || !intf->ops->modeset)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&dev->client_lock);
+	flush_kthread_worker(&dev->post_worker);
+
+	ret = intf->ops->modeset(intf, mode);
+	if (ret < 0)
+		goto done;
+
+	memcpy(&intf->current_mode, mode, sizeof(*mode));
+done:
+	mutex_unlock(&dev->client_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_set_mode);
+
+/**
+ * adf_interface_screen_size - get size of screen connected to interface
+ *
+ * @intf: the interface
+ * @width_mm: returns the screen width in mm
+ * @height_mm: returns the screen width in mm
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width_mm,
+		u16 *height_mm)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	int ret;
+
+	if (!intf->ops || !intf->ops->screen_size)
+		return -EOPNOTSUPP;
+
+	mutex_lock(&dev->client_lock);
+	ret = intf->ops->screen_size(intf, width_mm, height_mm);
+	mutex_unlock(&dev->client_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_get_screen_size);
+
+/**
+ * adf_overlay_engine_supports_format - returns whether a format is in an
+ * overlay engine's supported list
+ *
+ * @eng: the overlay engine
+ * @format: format fourcc
+ */
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+		u32 format)
+{
+	size_t i;
+	for (i = 0; i < eng->ops->n_supported_formats; i++)
+		if (format == eng->ops->supported_formats[i])
+			return true;
+
+	return false;
+}
+EXPORT_SYMBOL(adf_overlay_engine_supports_format);
+
+static int adf_buffer_validate(struct adf_buffer *buf)
+{
+	struct adf_overlay_engine *eng = buf->overlay_engine;
+	struct device *dev = &eng->base.dev;
+	struct adf_device *parent = adf_overlay_engine_parent(eng);
+	u8 hsub, vsub, num_planes, cpp[ADF_MAX_PLANES], i;
+
+	if (!adf_overlay_engine_supports_format(eng, buf->format)) {
+		char format_str[ADF_FORMAT_STR_SIZE];
+		adf_format_str(buf->format, format_str);
+		dev_err(dev, "unsupported format %s\n", format_str);
+		return -EINVAL;
+	}
+
+	if (!adf_format_is_standard(buf->format))
+		return parent->ops->validate_custom_format(parent, buf);
+
+	hsub = adf_format_horz_chroma_subsampling(buf->format);
+	vsub = adf_format_vert_chroma_subsampling(buf->format);
+	num_planes = adf_format_num_planes(buf->format);
+	for (i = 0; i < num_planes; i++)
+		cpp[i] = adf_format_plane_cpp(buf->format, i);
+
+	return adf_format_validate_yuv(parent, buf, num_planes, hsub, vsub,
+			cpp);
+}
+
+static int adf_buffer_map(struct adf_device *dev, struct adf_buffer *buf,
+		struct adf_buffer_mapping *mapping)
+{
+	int ret = 0;
+	size_t i;
+
+	for (i = 0; i < buf->n_planes; i++) {
+		struct dma_buf_attachment *attachment;
+		struct sg_table *sg_table;
+
+		attachment = dma_buf_attach(buf->dma_bufs[i], dev->dev);
+		if (IS_ERR(attachment)) {
+			ret = PTR_ERR(attachment);
+			dev_err(&dev->base.dev, "attaching plane %zu failed: %d\n",
+					i, ret);
+			goto done;
+		}
+		mapping->attachments[i] = attachment;
+
+		sg_table = dma_buf_map_attachment(attachment, DMA_TO_DEVICE);
+		if (IS_ERR(sg_table)) {
+			ret = PTR_ERR(sg_table);
+			dev_err(&dev->base.dev, "mapping plane %zu failed: %d",
+					i, ret);
+			goto done;
+		} else if (!sg_table) {
+			ret = -ENOMEM;
+			dev_err(&dev->base.dev, "mapping plane %zu failed\n",
+					i);
+			goto done;
+		}
+		mapping->sg_tables[i] = sg_table;
+	}
+
+done:
+	if (ret < 0)
+		adf_buffer_mapping_cleanup(mapping, buf);
+
+	return ret;
+}
+
+static struct sync_fence *adf_sw_complete_fence(struct adf_device *dev)
+{
+	struct sync_pt *pt;
+	struct sync_fence *complete_fence;
+
+	if (!dev->timeline) {
+		dev->timeline = sw_sync_timeline_create(dev->base.name);
+		if (!dev->timeline)
+			return ERR_PTR(-ENOMEM);
+		dev->timeline_max = 1;
+	}
+
+	dev->timeline_max++;
+	pt = sw_sync_pt_create(dev->timeline, dev->timeline_max);
+	if (!pt)
+		goto err_pt_create;
+	complete_fence = sync_fence_create(dev->base.name, pt);
+	if (!complete_fence)
+		goto err_fence_create;
+
+	return complete_fence;
+
+err_fence_create:
+	sync_pt_free(pt);
+err_pt_create:
+	dev->timeline_max--;
+	return ERR_PTR(-ENOSYS);
+}
+
+/**
+ * adf_device_post - flip to a new set of buffers
+ *
+ * @dev: device targeted by the flip
+ * @intfs: interfaces targeted by the flip
+ * @n_intfs: number of targeted interfaces
+ * @bufs: description of buffers displayed
+ * @n_bufs: number of buffers displayed
+ * @custom_data: driver-private data
+ * @custom_data_size: size of driver-private data
+ *
+ * adf_device_post() will copy @intfs, @bufs, and @custom_data, so they may
+ * point to variables on the stack.  adf_device_post() also takes its own
+ * reference on each of the dma-bufs in @bufs.  The adf_device_post_nocopy()
+ * variant transfers ownership of these resources to ADF instead.
+ *
+ * On success, returns a sync fence which signals when the buffers are removed
+ * from the screen.  On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_device_post(struct adf_device *dev,
+		struct adf_interface **intfs, size_t n_intfs,
+		struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+		size_t custom_data_size)
+{
+	struct adf_interface **intfs_copy = NULL;
+	struct adf_buffer *bufs_copy = NULL;
+	void *custom_data_copy = NULL;
+	struct sync_fence *ret;
+	size_t i;
+
+	intfs_copy = kzalloc(sizeof(intfs_copy[0]) * n_intfs, GFP_KERNEL);
+	if (!intfs_copy)
+		return ERR_PTR(-ENOMEM);
+
+	bufs_copy = kzalloc(sizeof(bufs_copy[0]) * n_bufs, GFP_KERNEL);
+	if (!bufs_copy) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err_alloc;
+	}
+
+	custom_data_copy = kzalloc(custom_data_size, GFP_KERNEL);
+	if (!custom_data_copy) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err_alloc;
+	}
+
+	for (i = 0; i < n_bufs; i++) {
+		size_t j;
+		for (j = 0; j < bufs[i].n_planes; j++)
+			get_dma_buf(bufs[i].dma_bufs[j]);
+	}
+
+	memcpy(intfs_copy, intfs, sizeof(intfs_copy[0]) * n_intfs);
+	memcpy(bufs_copy, bufs, sizeof(bufs_copy[0]) * n_bufs);
+	memcpy(custom_data_copy, custom_data, custom_data_size);
+
+	ret = adf_device_post_nocopy(dev, intfs_copy, n_intfs, bufs_copy,
+			n_bufs, custom_data_copy, custom_data_size);
+	if (IS_ERR(ret))
+		goto err_post;
+
+	return ret;
+
+err_post:
+	for (i = 0; i < n_bufs; i++) {
+		size_t j;
+		for (j = 0; j < bufs[i].n_planes; j++)
+			dma_buf_put(bufs[i].dma_bufs[j]);
+	}
+err_alloc:
+	kfree(custom_data_copy);
+	kfree(bufs_copy);
+	kfree(intfs_copy);
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_post);
+
+/**
+ * adf_device_post_nocopy - flip to a new set of buffers
+ *
+ * adf_device_post_nocopy() has the same behavior as adf_device_post(),
+ * except ADF does not copy @intfs, @bufs, or @custom_data, and it does
+ * not take an extra reference on the dma-bufs in @bufs.
+ *
+ * @intfs, @bufs, and @custom_data must point to buffers allocated by
+ * kmalloc().  On success, ADF takes ownership of these buffers and the dma-bufs
+ * in @bufs, and will kfree()/dma_buf_put() them when they are no longer needed.
+ * On failure, adf_device_post_nocopy() does NOT take ownership of these
+ * buffers or the dma-bufs, and the caller must clean them up.
+ *
+ * adf_device_post_nocopy() is mainly intended for implementing ADF's ioctls.
+ * Clients may find the nocopy variant useful in limited cases, but most should
+ * call adf_device_post() instead.
+ */
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+		struct adf_interface **intfs, size_t n_intfs,
+		struct adf_buffer *bufs, size_t n_bufs,
+		void *custom_data, size_t custom_data_size)
+{
+	struct adf_pending_post *cfg;
+	struct adf_buffer_mapping *mappings;
+	struct sync_fence *ret;
+	size_t i;
+	int err;
+
+	cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+	if (!cfg)
+		return ERR_PTR(-ENOMEM);
+
+	mappings = kzalloc(sizeof(mappings[0]) * n_bufs, GFP_KERNEL);
+	if (!mappings) {
+		ret = ERR_PTR(-ENOMEM);
+		goto err_alloc;
+	}
+
+	mutex_lock(&dev->client_lock);
+
+	for (i = 0; i < n_bufs; i++) {
+		err = adf_buffer_validate(&bufs[i]);
+		if (err < 0) {
+			ret = ERR_PTR(err);
+			goto err_buf;
+		}
+
+		err = adf_buffer_map(dev, &bufs[i], &mappings[i]);
+		if (err < 0) {
+			ret = ERR_PTR(err);
+			goto err_buf;
+		}
+	}
+
+	INIT_LIST_HEAD(&cfg->head);
+	cfg->config.n_bufs = n_bufs;
+	cfg->config.bufs = bufs;
+	cfg->config.mappings = mappings;
+	cfg->config.custom_data = custom_data;
+	cfg->config.custom_data_size = custom_data_size;
+
+	err = dev->ops->validate(dev, &cfg->config, &cfg->state);
+	if (err < 0) {
+		ret = ERR_PTR(err);
+		goto err_buf;
+	}
+
+	mutex_lock(&dev->post_lock);
+
+	if (dev->ops->complete_fence)
+		ret = dev->ops->complete_fence(dev, &cfg->config,
+				cfg->state);
+	else
+		ret = adf_sw_complete_fence(dev);
+
+	if (IS_ERR(ret))
+		goto err_fence;
+
+	list_add_tail(&cfg->head, &dev->post_list);
+	queue_kthread_work(&dev->post_worker, &dev->post_work);
+	mutex_unlock(&dev->post_lock);
+	mutex_unlock(&dev->client_lock);
+	kfree(intfs);
+	return ret;
+
+err_fence:
+	mutex_unlock(&dev->post_lock);
+
+err_buf:
+	for (i = 0; i < n_bufs; i++)
+		adf_buffer_mapping_cleanup(&mappings[i], &bufs[i]);
+
+	mutex_unlock(&dev->client_lock);
+	kfree(mappings);
+
+err_alloc:
+	kfree(cfg);
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_post_nocopy);
+
+static void adf_attachment_list_to_array(struct adf_device *dev,
+		struct list_head *src, struct adf_attachment *dst, size_t size)
+{
+	struct adf_attachment_list *entry;
+	size_t i = 0;
+
+	if (!dst)
+		return;
+
+	list_for_each_entry(entry, src, head) {
+		if (i == size)
+			return;
+		dst[i] = entry->attachment;
+		i++;
+	}
+}
+
+/**
+ * adf_device_attachments - get device's list of active attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the active attachment list.
+ */
+size_t adf_device_attachments(struct adf_device *dev,
+		struct adf_attachment *attachments, size_t n_attachments)
+{
+	size_t retval;
+
+	mutex_lock(&dev->client_lock);
+	adf_attachment_list_to_array(dev, &dev->attached, attachments,
+			n_attachments);
+	retval = dev->n_attached;
+	mutex_unlock(&dev->client_lock);
+
+	return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments);
+
+/**
+ * adf_device_attachments_allowed - get device's list of allowed attachments
+ *
+ * @dev: the device
+ * @attachments: storage for the attachment list (optional)
+ * @n_attachments: length of @attachments
+ *
+ * If @attachments is not NULL, adf_device_attachments_allowed() will copy up to
+ * @n_attachments entries into @attachments.
+ *
+ * Returns the length of the allowed attachment list.
+ */
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+		struct adf_attachment *attachments, size_t n_attachments)
+{
+	size_t retval;
+
+	mutex_lock(&dev->client_lock);
+	adf_attachment_list_to_array(dev, &dev->attach_allowed, attachments,
+			n_attachments);
+	retval = dev->n_attach_allowed;
+	mutex_unlock(&dev->client_lock);
+
+	return retval;
+}
+EXPORT_SYMBOL(adf_device_attachments_allowed);
+
+/**
+ * adf_device_attached - return whether an overlay engine and interface are
+ * attached
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf)
+{
+	struct adf_attachment_list *attachment;
+
+	mutex_lock(&dev->client_lock);
+	attachment = adf_attachment_find(&dev->attached, eng, intf);
+	mutex_unlock(&dev->client_lock);
+
+	return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attached);
+
+/**
+ * adf_device_attach_allowed - return whether the ADF device supports attaching
+ * an overlay engine and interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ */
+bool adf_device_attach_allowed(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf)
+{
+	struct adf_attachment_list *attachment;
+
+	mutex_lock(&dev->client_lock);
+	attachment = adf_attachment_find(&dev->attach_allowed, eng, intf);
+	mutex_unlock(&dev->client_lock);
+
+	return attachment != NULL;
+}
+EXPORT_SYMBOL(adf_device_attach_allowed);
+/**
+ * adf_device_attach - attach an overlay engine to an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if attaching @intf and @eng is not allowed,
+ * -%EALREADY if @intf and @eng are already attached, or -errno on any other
+ * failure.
+ */
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf)
+{
+	int ret;
+	struct adf_attachment_list *attachment = NULL;
+
+	ret = adf_attachment_validate(dev, eng, intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->client_lock);
+
+	if (dev->n_attached == ADF_MAX_ATTACHMENTS) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	if (!adf_attachment_find(&dev->attach_allowed, eng, intf)) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (adf_attachment_find(&dev->attached, eng, intf)) {
+		ret = -EALREADY;
+		goto done;
+	}
+
+	ret = adf_device_attach_op(dev, eng, intf);
+	if (ret < 0)
+		goto done;
+
+	attachment = kzalloc(sizeof(*attachment), GFP_KERNEL);
+	if (!attachment) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	attachment->attachment.interface = intf;
+	attachment->attachment.overlay_engine = eng;
+	list_add_tail(&attachment->head, &dev->attached);
+	dev->n_attached++;
+
+done:
+	mutex_unlock(&dev->client_lock);
+	if (ret < 0)
+		kfree(attachment);
+
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_attach);
+
+/**
+ * adf_device_detach - detach an overlay engine from an interface
+ *
+ * @dev: the parent device
+ * @eng: the overlay engine
+ * @intf: the interface
+ *
+ * Returns 0 on success, -%EINVAL if @intf and @eng are not attached,
+ * or -errno on any other failure.
+ */
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf)
+{
+	int ret;
+	struct adf_attachment_list *attachment;
+
+	ret = adf_attachment_validate(dev, eng, intf);
+	if (ret < 0)
+		return ret;
+
+	mutex_lock(&dev->client_lock);
+
+	attachment = adf_attachment_find(&dev->attached, eng, intf);
+	if (!attachment) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	ret = adf_device_detach_op(dev, eng, intf);
+	if (ret < 0)
+		goto done;
+
+	adf_attachment_free(attachment);
+	dev->n_attached--;
+done:
+	mutex_unlock(&dev->client_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_device_detach);
+
+/**
+ * adf_interface_simple_buffer_alloc - allocate a simple buffer
+ *
+ * @intf: target interface
+ * @w: width in pixels
+ * @h: height in pixels
+ * @format: format fourcc
+ * @dma_buf: returns the allocated buffer
+ * @offset: returns the byte offset of the allocated buffer's first pixel
+ * @pitch: returns the allocated buffer's pitch
+ *
+ * See &struct adf_simple_buffer_alloc for a description of simple buffers and
+ * their limitations.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+		u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch)
+{
+	if (!intf->ops || !intf->ops->alloc_simple_buffer)
+		return -EOPNOTSUPP;
+
+	if (!adf_format_is_rgb(format))
+		return -EINVAL;
+
+	return intf->ops->alloc_simple_buffer(intf, w, h, format, dma_buf,
+			offset, pitch);
+}
+EXPORT_SYMBOL(adf_interface_simple_buffer_alloc);
+
+/**
+ * adf_interface_simple_post - flip to a single buffer
+ *
+ * @intf: interface targeted by the flip
+ * @buf: buffer to display
+ *
+ * adf_interface_simple_post() can be used generically for simple display
+ * configurations, since the client does not need to provide any driver-private
+ * configuration data.
+ *
+ * adf_interface_simple_post() has the same copying semantics as
+ * adf_device_post().
+ *
+ * On success, returns a sync fence which signals when the buffer is removed
+ * from the screen.  On failure, returns ERR_PTR(-errno).
+ */
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+		struct adf_buffer *buf)
+{
+	size_t custom_data_size = 0;
+	void *custom_data = NULL;
+	struct sync_fence *ret;
+
+	if (intf->ops && intf->ops->describe_simple_post) {
+		int err;
+
+		custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+		if (!custom_data) {
+			ret = ERR_PTR(-ENOMEM);
+			goto done;
+		}
+
+		err = intf->ops->describe_simple_post(intf, buf, custom_data,
+				&custom_data_size);
+		if (err < 0) {
+			ret = ERR_PTR(err);
+			goto done;
+		}
+	}
+
+	ret = adf_device_post(adf_interface_parent(intf), &intf, 1, buf, 1,
+			custom_data, custom_data_size);
+done:
+	kfree(custom_data);
+	return ret;
+}
+EXPORT_SYMBOL(adf_interface_simple_post);
diff --git a/drivers/video/adf/adf_fbdev.c b/drivers/video/adf/adf_fbdev.c
new file mode 100644
index 0000000..a5b53bc
--- /dev/null
+++ b/drivers/video/adf/adf_fbdev.c
@@ -0,0 +1,665 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/vmalloc.h>
+
+#include <video/adf.h>
+#include <video/adf_client.h>
+#include <video/adf_fbdev.h>
+#include <video/adf_format.h>
+
+#include "adf.h"
+
+struct adf_fbdev_format {
+	u32 fourcc;
+	u32 bpp;
+	u32 r_length;
+	u32 g_length;
+	u32 b_length;
+	u32 a_length;
+	u32 r_offset;
+	u32 g_offset;
+	u32 b_offset;
+	u32 a_offset;
+};
+
+static const struct adf_fbdev_format format_table[] = {
+	{DRM_FORMAT_RGB332, 8, 3, 3, 2, 0, 5, 2, 0, 0},
+	{DRM_FORMAT_BGR233, 8, 3, 3, 2, 0, 0, 3, 5, 0},
+
+	{DRM_FORMAT_XRGB4444, 16, 4, 4, 4, 0, 8, 4, 0, 0},
+	{DRM_FORMAT_XBGR4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+	{DRM_FORMAT_RGBX4444, 16, 4, 4, 4, 0, 12, 8, 4, 0},
+	{DRM_FORMAT_BGRX4444, 16, 4, 4, 4, 0, 0, 4, 8, 0},
+
+	{DRM_FORMAT_ARGB4444, 16, 4, 4, 4, 4, 8, 4, 0, 12},
+	{DRM_FORMAT_ABGR4444, 16, 4, 4, 4, 4, 0, 4, 8, 12},
+	{DRM_FORMAT_RGBA4444, 16, 4, 4, 4, 4, 12, 8, 4, 0},
+	{DRM_FORMAT_BGRA4444, 16, 4, 4, 4, 4, 0, 4, 8, 0},
+
+	{DRM_FORMAT_XRGB1555, 16, 5, 5, 5, 0, 10, 5, 0, 0},
+	{DRM_FORMAT_XBGR1555, 16, 5, 5, 5, 0, 0, 5, 10, 0},
+	{DRM_FORMAT_RGBX5551, 16, 5, 5, 5, 0, 11, 6, 1, 0},
+	{DRM_FORMAT_BGRX5551, 16, 5, 5, 5, 0, 1, 6, 11, 0},
+
+	{DRM_FORMAT_ARGB1555, 16, 5, 5, 5, 1, 10, 5, 0, 15},
+	{DRM_FORMAT_ABGR1555, 16, 5, 5, 5, 1, 0, 5, 10, 15},
+	{DRM_FORMAT_RGBA5551, 16, 5, 5, 5, 1, 11, 6, 1, 0},
+	{DRM_FORMAT_BGRA5551, 16, 5, 5, 5, 1, 1, 6, 11, 0},
+
+	{DRM_FORMAT_RGB565, 16, 5, 6, 5, 0, 11, 5, 0, 0},
+	{DRM_FORMAT_BGR565, 16, 5, 6, 5, 0, 0, 5, 11, 0},
+
+	{DRM_FORMAT_RGB888, 24, 8, 8, 8, 0, 16, 8, 0, 0},
+	{DRM_FORMAT_BGR888, 24, 8, 8, 8, 0, 0, 8, 16, 0},
+
+	{DRM_FORMAT_XRGB8888, 32, 8, 8, 8, 0, 16, 8, 0, 0},
+	{DRM_FORMAT_XBGR8888, 32, 8, 8, 8, 0, 0, 8, 16, 0},
+	{DRM_FORMAT_RGBX8888, 32, 8, 8, 8, 0, 24, 16, 8, 0},
+	{DRM_FORMAT_BGRX8888, 32, 8, 8, 8, 0, 8, 16, 24, 0},
+
+	{DRM_FORMAT_ARGB8888, 32, 8, 8, 8, 8, 16, 8, 0, 24},
+	{DRM_FORMAT_ABGR8888, 32, 8, 8, 8, 8, 0, 8, 16, 24},
+	{DRM_FORMAT_RGBA8888, 32, 8, 8, 8, 8, 24, 16, 8, 0},
+	{DRM_FORMAT_BGRA8888, 32, 8, 8, 8, 8, 8, 16, 24, 0},
+
+	{DRM_FORMAT_XRGB2101010, 32, 10, 10, 10, 0, 20, 10, 0, 0},
+	{DRM_FORMAT_XBGR2101010, 32, 10, 10, 10, 0, 0, 10, 20, 0},
+	{DRM_FORMAT_RGBX1010102, 32, 10, 10, 10, 0, 22, 12, 2, 0},
+	{DRM_FORMAT_BGRX1010102, 32, 10, 10, 10, 0, 2, 12, 22, 0},
+
+	{DRM_FORMAT_ARGB2101010, 32, 10, 10, 10, 2, 20, 10, 0, 30},
+	{DRM_FORMAT_ABGR2101010, 32, 10, 10, 10, 2, 0, 10, 20, 30},
+	{DRM_FORMAT_RGBA1010102, 32, 10, 10, 10, 2, 22, 12, 2, 0},
+	{DRM_FORMAT_BGRA1010102, 32, 10, 10, 10, 2, 2, 12, 22, 0},
+};
+
+static u32 drm_fourcc_from_fb_var(struct fb_var_screeninfo *var)
+{
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+		const struct adf_fbdev_format *f = &format_table[i];
+		if (var->red.length == f->r_length &&
+			var->red.offset == f->r_offset &&
+			var->green.length == f->g_length &&
+			var->green.offset == f->g_offset &&
+			var->blue.length == f->b_length &&
+			var->blue.offset == f->b_offset &&
+			var->transp.length == f->a_length &&
+			(var->transp.length == 0 ||
+					var->transp.offset == f->a_offset))
+			return f->fourcc;
+	}
+
+	return 0;
+}
+
+static const struct adf_fbdev_format *fbdev_format_info(u32 format)
+{
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(format_table); i++) {
+		const struct adf_fbdev_format *f = &format_table[i];
+		if (f->fourcc == format)
+			return f;
+	}
+
+	BUG();
+}
+
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+		struct fb_videomode *vmode)
+{
+	memset(vmode, 0, sizeof(*vmode));
+
+	vmode->refresh = mode->vrefresh;
+
+	vmode->xres = mode->hdisplay;
+	vmode->yres = mode->vdisplay;
+
+	vmode->pixclock = mode->clock ? KHZ2PICOS(mode->clock) : 0;
+	vmode->left_margin = mode->htotal - mode->hsync_end;
+	vmode->right_margin = mode->hsync_start - mode->hdisplay;
+	vmode->upper_margin = mode->vtotal - mode->vsync_end;
+	vmode->lower_margin = mode->vsync_start - mode->vdisplay;
+	vmode->hsync_len = mode->hsync_end - mode->hsync_start;
+	vmode->vsync_len = mode->vsync_end - mode->vsync_start;
+
+	vmode->sync = 0;
+	if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+		vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
+	if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+		vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
+	if (mode->flags & DRM_MODE_FLAG_PCSYNC)
+		vmode->sync |= FB_SYNC_COMP_HIGH_ACT;
+	if (mode->flags & DRM_MODE_FLAG_BCAST)
+		vmode->sync |= FB_SYNC_BROADCAST;
+
+	vmode->vmode = 0;
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		vmode->vmode |= FB_VMODE_INTERLACED;
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		vmode->vmode |= FB_VMODE_DOUBLE;
+}
+EXPORT_SYMBOL(adf_modeinfo_to_fb_videomode);
+
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+		struct drm_mode_modeinfo *mode)
+{
+	memset(mode, 0, sizeof(*mode));
+
+	mode->hdisplay = vmode->xres;
+	mode->hsync_start = mode->hdisplay + vmode->right_margin;
+	mode->hsync_end = mode->hsync_start + vmode->hsync_len;
+	mode->htotal = mode->hsync_end + vmode->left_margin;
+
+	mode->vdisplay = vmode->yres;
+	mode->vsync_start = mode->vdisplay + vmode->lower_margin;
+	mode->vsync_end = mode->vsync_start + vmode->vsync_len;
+	mode->vtotal = mode->vsync_end + vmode->upper_margin;
+
+	mode->clock = vmode->pixclock ? PICOS2KHZ(vmode->pixclock) : 0;
+
+	mode->flags = 0;
+	if (vmode->sync & FB_SYNC_HOR_HIGH_ACT)
+		mode->flags |= DRM_MODE_FLAG_PHSYNC;
+	if (vmode->sync & FB_SYNC_VERT_HIGH_ACT)
+		mode->flags |= DRM_MODE_FLAG_PVSYNC;
+	if (vmode->sync & FB_SYNC_COMP_HIGH_ACT)
+		mode->flags |= DRM_MODE_FLAG_PCSYNC;
+	if (vmode->sync & FB_SYNC_BROADCAST)
+		mode->flags |= DRM_MODE_FLAG_BCAST;
+	if (vmode->vmode & FB_VMODE_INTERLACED)
+		mode->flags |= DRM_MODE_FLAG_INTERLACE;
+	if (vmode->vmode & FB_VMODE_DOUBLE)
+		mode->flags |= DRM_MODE_FLAG_DBLSCAN;
+
+	if (vmode->refresh)
+		mode->vrefresh = vmode->refresh;
+	else
+		adf_modeinfo_set_vrefresh(mode);
+
+	if (vmode->name)
+		strlcpy(mode->name, vmode->name, sizeof(mode->name));
+	else
+		adf_modeinfo_set_name(mode);
+}
+EXPORT_SYMBOL(adf_modeinfo_from_fb_videomode);
+
+static int adf_fbdev_post(struct adf_fbdev *fbdev)
+{
+	struct adf_buffer buf;
+	struct sync_fence *complete_fence;
+	int ret = 0;
+
+	memset(&buf, 0, sizeof(buf));
+	buf.overlay_engine = fbdev->eng;
+	buf.w = fbdev->info->var.xres;
+	buf.h = fbdev->info->var.yres;
+	buf.format = fbdev->format;
+	buf.dma_bufs[0] = fbdev->dma_buf;
+	buf.offset[0] = fbdev->offset +
+			fbdev->info->var.yoffset * fbdev->pitch +
+			fbdev->info->var.xoffset *
+			(fbdev->info->var.bits_per_pixel / 8);
+	buf.pitch[0] = fbdev->pitch;
+	buf.n_planes = 1;
+
+	complete_fence = adf_interface_simple_post(fbdev->intf, &buf);
+	if (IS_ERR(complete_fence)) {
+		ret = PTR_ERR(complete_fence);
+		goto done;
+	}
+
+	sync_fence_put(complete_fence);
+done:
+	return ret;
+}
+
+static const u16 vga_palette[][3] = {
+	{0x0000, 0x0000, 0x0000},
+	{0x0000, 0x0000, 0xAAAA},
+	{0x0000, 0xAAAA, 0x0000},
+	{0x0000, 0xAAAA, 0xAAAA},
+	{0xAAAA, 0x0000, 0x0000},
+	{0xAAAA, 0x0000, 0xAAAA},
+	{0xAAAA, 0x5555, 0x0000},
+	{0xAAAA, 0xAAAA, 0xAAAA},
+	{0x5555, 0x5555, 0x5555},
+	{0x5555, 0x5555, 0xFFFF},
+	{0x5555, 0xFFFF, 0x5555},
+	{0x5555, 0xFFFF, 0xFFFF},
+	{0xFFFF, 0x5555, 0x5555},
+	{0xFFFF, 0x5555, 0xFFFF},
+	{0xFFFF, 0xFFFF, 0x5555},
+	{0xFFFF, 0xFFFF, 0xFFFF},
+};
+
+static int adf_fb_alloc(struct adf_fbdev *fbdev)
+{
+	int ret;
+
+	ret = adf_interface_simple_buffer_alloc(fbdev->intf,
+			fbdev->default_xres_virtual,
+			fbdev->default_yres_virtual,
+			fbdev->default_format,
+			&fbdev->dma_buf, &fbdev->offset, &fbdev->pitch);
+	if (ret < 0) {
+		dev_err(fbdev->info->dev, "allocating fb failed: %d\n", ret);
+		return ret;
+	}
+
+	fbdev->vaddr = dma_buf_vmap(fbdev->dma_buf);
+	if (!fbdev->vaddr) {
+		ret = -ENOMEM;
+		dev_err(fbdev->info->dev, "vmapping fb failed\n");
+		goto err_vmap;
+	}
+	fbdev->info->fix.line_length = fbdev->pitch;
+	fbdev->info->var.xres_virtual = fbdev->default_xres_virtual;
+	fbdev->info->var.yres_virtual = fbdev->default_yres_virtual;
+	fbdev->info->fix.smem_len = fbdev->dma_buf->size;
+	fbdev->info->screen_base = fbdev->vaddr;
+
+	return 0;
+
+err_vmap:
+	dma_buf_put(fbdev->dma_buf);
+	return ret;
+}
+
+static void adf_fb_destroy(struct adf_fbdev *fbdev)
+{
+	dma_buf_vunmap(fbdev->dma_buf, fbdev->vaddr);
+	dma_buf_put(fbdev->dma_buf);
+}
+
+static void adf_fbdev_set_format(struct adf_fbdev *fbdev, u32 format)
+{
+	size_t i;
+	const struct adf_fbdev_format *info = fbdev_format_info(format);
+	for (i = 0; i < ARRAY_SIZE(vga_palette); i++) {
+		u16 r = vga_palette[i][0];
+		u16 g = vga_palette[i][1];
+		u16 b = vga_palette[i][2];
+
+		r >>= (16 - info->r_length);
+		g >>= (16 - info->g_length);
+		b >>= (16 - info->b_length);
+
+		fbdev->pseudo_palette[i] =
+			(r << info->r_offset) |
+			(g << info->g_offset) |
+			(b << info->b_offset);
+
+		if (info->a_length) {
+			u16 a = BIT(info->a_length) - 1;
+			fbdev->pseudo_palette[i] |= (a << info->a_offset);
+		}
+	}
+
+	fbdev->info->var.bits_per_pixel = adf_format_bpp(format);
+	fbdev->info->var.red.length = info->r_length;
+	fbdev->info->var.red.offset = info->r_offset;
+	fbdev->info->var.green.length = info->g_length;
+	fbdev->info->var.green.offset = info->g_offset;
+	fbdev->info->var.blue.length = info->b_length;
+	fbdev->info->var.blue.offset = info->b_offset;
+	fbdev->info->var.transp.length = info->a_length;
+	fbdev->info->var.transp.offset = info->a_offset;
+	fbdev->format = format;
+}
+
+static void adf_fbdev_fill_modelist(struct adf_fbdev *fbdev)
+{
+	struct drm_mode_modeinfo *modelist;
+	struct fb_videomode fbmode;
+	size_t n_modes, i;
+	int ret = 0;
+
+	n_modes = adf_interface_modelist(fbdev->intf, NULL, 0);
+	modelist = kzalloc(sizeof(modelist[0]) * n_modes, GFP_KERNEL);
+	if (!modelist) {
+		dev_warn(fbdev->info->dev, "allocating new modelist failed; keeping old modelist\n");
+		return;
+	}
+	adf_interface_modelist(fbdev->intf, modelist, n_modes);
+
+	fb_destroy_modelist(&fbdev->info->modelist);
+
+	for (i = 0; i < n_modes; i++) {
+		adf_modeinfo_to_fb_videomode(&modelist[i], &fbmode);
+		ret = fb_add_videomode(&fbmode, &fbdev->info->modelist);
+		if (ret < 0)
+			dev_warn(fbdev->info->dev, "adding mode %s to modelist failed: %d\n",
+					modelist[i].name, ret);
+	}
+
+	kfree(modelist);
+}
+
+/**
+ * adf_fbdev_open - default implementation of fbdev open op
+ */
+int adf_fbdev_open(struct fb_info *info, int user)
+{
+	struct adf_fbdev *fbdev = info->par;
+	int ret;
+
+	mutex_lock(&fbdev->refcount_lock);
+
+	if (unlikely(fbdev->refcount == UINT_MAX)) {
+		ret = -EMFILE;
+		goto done;
+	}
+
+	if (!fbdev->refcount) {
+		struct drm_mode_modeinfo mode;
+		struct fb_videomode fbmode;
+		struct adf_device *dev = adf_interface_parent(fbdev->intf);
+
+		ret = adf_device_attach(dev, fbdev->eng, fbdev->intf);
+		if (ret < 0 && ret != -EALREADY)
+			goto done;
+
+		ret = adf_fb_alloc(fbdev);
+		if (ret < 0)
+			goto done;
+
+		adf_interface_current_mode(fbdev->intf, &mode);
+		adf_modeinfo_to_fb_videomode(&mode, &fbmode);
+		fb_videomode_to_var(&fbdev->info->var, &fbmode);
+
+		adf_fbdev_set_format(fbdev, fbdev->default_format);
+		adf_fbdev_fill_modelist(fbdev);
+	}
+
+	ret = adf_fbdev_post(fbdev);
+	if (ret < 0) {
+		if (!fbdev->refcount)
+			adf_fb_destroy(fbdev);
+		goto done;
+	}
+
+	fbdev->refcount++;
+done:
+	mutex_unlock(&fbdev->refcount_lock);
+	return ret;
+}
+EXPORT_SYMBOL(adf_fbdev_open);
+
+/**
+ * adf_fbdev_release - default implementation of fbdev release op
+ */
+int adf_fbdev_release(struct fb_info *info, int user)
+{
+	struct adf_fbdev *fbdev = info->par;
+	mutex_lock(&fbdev->refcount_lock);
+	BUG_ON(!fbdev->refcount);
+	fbdev->refcount--;
+	if (!fbdev->refcount)
+		adf_fb_destroy(fbdev);
+	mutex_unlock(&fbdev->refcount_lock);
+	return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_release);
+
+/**
+ * adf_fbdev_check_var - default implementation of fbdev check_var op
+ */
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	struct adf_fbdev *fbdev = info->par;
+	bool valid_format = true;
+	u32 format = drm_fourcc_from_fb_var(var);
+	u32 pitch = var->xres_virtual * var->bits_per_pixel / 8;
+
+	if (!format) {
+		dev_dbg(info->dev, "%s: unrecognized format\n", __func__);
+		valid_format = false;
+	}
+
+	if (valid_format && var->grayscale) {
+		dev_dbg(info->dev, "%s: grayscale modes not supported\n",
+				__func__);
+		valid_format = false;
+	}
+
+	if (valid_format && var->nonstd) {
+		dev_dbg(info->dev, "%s: nonstandard formats not supported\n",
+				__func__);
+		valid_format = false;
+	}
+
+	if (valid_format && !adf_overlay_engine_supports_format(fbdev->eng,
+			format)) {
+		char format_str[ADF_FORMAT_STR_SIZE];
+		adf_format_str(format, format_str);
+		dev_dbg(info->dev, "%s: format %s not supported by overlay engine %s\n",
+				__func__, format_str, fbdev->eng->base.name);
+		valid_format = false;
+	}
+
+	if (valid_format && pitch > fbdev->pitch) {
+		dev_dbg(info->dev, "%s: fb pitch too small for var (pitch = %u, xres_virtual = %u, bits_per_pixel = %u)\n",
+				__func__, fbdev->pitch, var->xres_virtual,
+				var->bits_per_pixel);
+		valid_format = false;
+	}
+
+	if (valid_format && var->yres_virtual > fbdev->default_yres_virtual) {
+		dev_dbg(info->dev, "%s: fb height too small for var (h = %u, yres_virtual = %u)\n",
+				__func__, fbdev->default_yres_virtual,
+				var->yres_virtual);
+		valid_format = false;
+	}
+
+	if (valid_format) {
+		var->activate = info->var.activate;
+		var->height = info->var.height;
+		var->width = info->var.width;
+		var->accel_flags = info->var.accel_flags;
+		var->rotate = info->var.rotate;
+		var->colorspace = info->var.colorspace;
+		/* userspace can't change these */
+	} else {
+		/* if any part of the format is invalid then fixing it up is
+		   impractical, so save just the modesetting bits and
+		   overwrite everything else */
+		struct fb_videomode mode;
+		fb_var_to_videomode(&mode, var);
+		memcpy(var, &info->var, sizeof(*var));
+		fb_videomode_to_var(var, &mode);
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_check_var);
+
+/**
+ * adf_fbdev_set_par - default implementation of fbdev set_par op
+ */
+int adf_fbdev_set_par(struct fb_info *info)
+{
+	struct adf_fbdev *fbdev = info->par;
+	struct adf_interface *intf = fbdev->intf;
+	struct fb_videomode vmode;
+	struct drm_mode_modeinfo mode;
+	int ret;
+	u32 format = drm_fourcc_from_fb_var(&info->var);
+
+	fb_var_to_videomode(&vmode, &info->var);
+	adf_modeinfo_from_fb_videomode(&vmode, &mode);
+	ret = adf_interface_set_mode(intf, &mode);
+	if (ret < 0)
+		return ret;
+
+	ret = adf_fbdev_post(fbdev);
+	if (ret < 0)
+		return ret;
+
+	if (format != fbdev->format)
+		adf_fbdev_set_format(fbdev, format);
+
+	return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_set_par);
+
+/**
+ * adf_fbdev_blank - default implementation of fbdev blank op
+ */
+int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+	struct adf_fbdev *fbdev = info->par;
+	struct adf_interface *intf = fbdev->intf;
+	u8 dpms_state;
+
+	switch (blank) {
+	case FB_BLANK_UNBLANK:
+		dpms_state = DRM_MODE_DPMS_ON;
+		break;
+	case FB_BLANK_NORMAL:
+		dpms_state = DRM_MODE_DPMS_STANDBY;
+		break;
+	case FB_BLANK_VSYNC_SUSPEND:
+		dpms_state = DRM_MODE_DPMS_SUSPEND;
+		break;
+	case FB_BLANK_HSYNC_SUSPEND:
+		dpms_state = DRM_MODE_DPMS_STANDBY;
+		break;
+	case FB_BLANK_POWERDOWN:
+		dpms_state = DRM_MODE_DPMS_OFF;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return adf_interface_blank(intf, dpms_state);
+}
+EXPORT_SYMBOL(adf_fbdev_blank);
+
+/**
+ * adf_fbdev_pan_display - default implementation of fbdev pan_display op
+ */
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	struct adf_fbdev *fbdev = info->par;
+	return adf_fbdev_post(fbdev);
+}
+EXPORT_SYMBOL(adf_fbdev_pan_display);
+
+/**
+ * adf_fbdev_mmap - default implementation of fbdev mmap op
+ */
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+	struct adf_fbdev *fbdev = info->par;
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	return dma_buf_mmap(fbdev->dma_buf, vma, 0);
+}
+EXPORT_SYMBOL(adf_fbdev_mmap);
+
+/**
+ * adf_fbdev_init - initialize helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ * @interface: the ADF interface that will display the framebuffer
+ * @eng: the ADF overlay engine that will scan out the framebuffer
+ * @xres_virtual: the virtual width of the framebuffer
+ * @yres_virtual: the virtual height of the framebuffer
+ * @format: the format of the framebuffer
+ * @fbops: the device's fbdev ops
+ * @fmt: formatting for the framebuffer identification string
+ * @...: variable arguments
+ *
+ * @format must be a standard, non-indexed RGB format, i.e.,
+ * adf_format_is_rgb(@format) && @format != @DRM_FORMAT_C8.
+ *
+ * Returns 0 on success or -errno on failure.
+ */
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+		struct adf_overlay_engine *eng,
+		u16 xres_virtual, u16 yres_virtual, u32 format,
+		struct fb_ops *fbops, const char *fmt, ...)
+{
+	struct adf_device *parent = adf_interface_parent(interface);
+	struct device *dev = &parent->base.dev;
+	u16 width_mm, height_mm;
+	va_list args;
+	int ret;
+
+	if (!adf_format_is_rgb(format) ||
+			format == DRM_FORMAT_C8) {
+		dev_err(dev, "fbdev helper does not support format %u\n",
+				format);
+		return -EINVAL;
+	}
+
+	memset(fbdev, 0, sizeof(*fbdev));
+	fbdev->intf = interface;
+	fbdev->eng = eng;
+	fbdev->info = framebuffer_alloc(0, dev);
+	if (!fbdev->info) {
+		dev_err(dev, "allocating framebuffer device failed\n");
+		return -ENOMEM;
+	}
+	mutex_init(&fbdev->refcount_lock);
+	fbdev->default_xres_virtual = xres_virtual;
+	fbdev->default_yres_virtual = yres_virtual;
+	fbdev->default_format = format;
+
+	fbdev->info->flags = FBINFO_FLAG_DEFAULT;
+	ret = adf_interface_get_screen_size(interface, &width_mm, &height_mm);
+	if (ret < 0) {
+		width_mm = 0;
+		height_mm = 0;
+	}
+	fbdev->info->var.width = width_mm;
+	fbdev->info->var.height = height_mm;
+	fbdev->info->var.activate = FB_ACTIVATE_VBL;
+	va_start(args, fmt);
+	vsnprintf(fbdev->info->fix.id, sizeof(fbdev->info->fix.id), fmt, args);
+	va_end(args);
+	fbdev->info->fix.type = FB_TYPE_PACKED_PIXELS;
+	fbdev->info->fix.visual = FB_VISUAL_TRUECOLOR;
+	fbdev->info->fix.xpanstep = 1;
+	fbdev->info->fix.ypanstep = 1;
+	INIT_LIST_HEAD(&fbdev->info->modelist);
+	fbdev->info->fbops = fbops;
+	fbdev->info->pseudo_palette = fbdev->pseudo_palette;
+	fbdev->info->par = fbdev;
+
+	ret = register_framebuffer(fbdev->info);
+	if (ret < 0) {
+		dev_err(dev, "registering framebuffer failed: %d\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL(adf_fbdev_init);
+
+/**
+ * adf_fbdev_destroy - destroy helper to wrap ADF device in fbdev API
+ *
+ * @fbdev: the fbdev helper
+ */
+void adf_fbdev_destroy(struct adf_fbdev *fbdev)
+{
+	unregister_framebuffer(fbdev->info);
+	BUG_ON(fbdev->refcount);
+	mutex_destroy(&fbdev->refcount_lock);
+	framebuffer_release(fbdev->info);
+}
+EXPORT_SYMBOL(adf_fbdev_destroy);
diff --git a/drivers/video/adf/adf_fops.c b/drivers/video/adf/adf_fops.c
new file mode 100644
index 0000000..8726617
--- /dev/null
+++ b/drivers/video/adf/adf_fops.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/bitops.h>
+#include <linux/circ_buf.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+
+#include <video/adf_client.h>
+#include <video/adf_format.h>
+
+#include "sw_sync.h"
+#include "sync.h"
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+#ifdef CONFIG_COMPAT
+#include "adf_fops32.h"
+#endif
+
+static int adf_obj_set_event(struct adf_obj *obj, struct adf_file *file,
+		struct adf_set_event __user *arg)
+{
+	struct adf_set_event data;
+	bool enabled;
+	unsigned long flags;
+	int err;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	err = adf_obj_check_supports_event(obj, data.type);
+	if (err < 0)
+		return err;
+
+	spin_lock_irqsave(&obj->file_lock, flags);
+	if (data.enabled)
+		enabled = test_and_set_bit(data.type,
+				file->event_subscriptions);
+	else
+		enabled = test_and_clear_bit(data.type,
+				file->event_subscriptions);
+	spin_unlock_irqrestore(&obj->file_lock, flags);
+
+	if (data.enabled == enabled)
+		return -EALREADY;
+
+	if (data.enabled)
+		adf_event_get(obj, data.type);
+	else
+		adf_event_put(obj, data.type);
+
+	return 0;
+}
+
+static int adf_obj_copy_custom_data_to_user(struct adf_obj *obj,
+		void __user *dst, size_t *dst_size)
+{
+	void *custom_data;
+	size_t custom_data_size;
+	int ret;
+
+	if (!obj->ops || !obj->ops->custom_data) {
+		dev_dbg(&obj->dev, "%s: no custom_data op\n", __func__);
+		return 0;
+	}
+
+	custom_data = kzalloc(ADF_MAX_CUSTOM_DATA_SIZE, GFP_KERNEL);
+	if (!custom_data)
+		return -ENOMEM;
+
+	ret = obj->ops->custom_data(obj, custom_data, &custom_data_size);
+	if (ret < 0)
+		goto done;
+
+	if (copy_to_user(dst, custom_data, min(*dst_size, custom_data_size))) {
+		ret = -EFAULT;
+		goto done;
+	}
+	*dst_size = custom_data_size;
+
+done:
+	kfree(custom_data);
+	return ret;
+}
+
+static int adf_eng_get_data(struct adf_overlay_engine *eng,
+		struct adf_overlay_engine_data __user *arg)
+{
+	struct adf_device *dev = adf_overlay_engine_parent(eng);
+	struct adf_overlay_engine_data data;
+	size_t n_supported_formats;
+	u32 *supported_formats = NULL;
+	int ret = 0;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	strlcpy(data.name, eng->base.name, sizeof(data.name));
+
+	if (data.n_supported_formats > ADF_MAX_SUPPORTED_FORMATS)
+		return -EINVAL;
+
+	n_supported_formats = data.n_supported_formats;
+	data.n_supported_formats = eng->ops->n_supported_formats;
+
+	if (n_supported_formats) {
+		supported_formats = kzalloc(n_supported_formats *
+				sizeof(supported_formats[0]), GFP_KERNEL);
+		if (!supported_formats)
+			return -ENOMEM;
+	}
+
+	memcpy(supported_formats, eng->ops->supported_formats,
+			sizeof(u32) * min(n_supported_formats,
+					eng->ops->n_supported_formats));
+
+	mutex_lock(&dev->client_lock);
+	ret = adf_obj_copy_custom_data_to_user(&eng->base, arg->custom_data,
+			&data.custom_data_size);
+	mutex_unlock(&dev->client_lock);
+
+	if (ret < 0)
+		goto done;
+
+	if (copy_to_user(arg, &data, sizeof(data))) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+	if (supported_formats && copy_to_user(arg->supported_formats,
+			supported_formats,
+			n_supported_formats * sizeof(supported_formats[0])))
+		ret = -EFAULT;
+
+done:
+	kfree(supported_formats);
+	return ret;
+}
+
+static int adf_buffer_import(struct adf_device *dev,
+		struct adf_buffer_config __user *cfg, struct adf_buffer *buf)
+{
+	struct adf_buffer_config user_buf;
+	size_t i;
+	int ret = 0;
+
+	if (copy_from_user(&user_buf, cfg, sizeof(user_buf)))
+		return -EFAULT;
+
+	memset(buf, 0, sizeof(*buf));
+
+	if (user_buf.n_planes > ADF_MAX_PLANES) {
+		dev_err(&dev->base.dev, "invalid plane count %u\n",
+				user_buf.n_planes);
+		return -EINVAL;
+	}
+
+	buf->overlay_engine = idr_find(&dev->overlay_engines,
+			user_buf.overlay_engine);
+	if (!buf->overlay_engine) {
+		dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+				user_buf.overlay_engine);
+		return -ENOENT;
+	}
+
+	buf->w = user_buf.w;
+	buf->h = user_buf.h;
+	buf->format = user_buf.format;
+	for (i = 0; i < user_buf.n_planes; i++) {
+		buf->dma_bufs[i] = dma_buf_get(user_buf.fd[i]);
+		if (IS_ERR(buf->dma_bufs[i])) {
+			ret = PTR_ERR(buf->dma_bufs[i]);
+			dev_err(&dev->base.dev, "importing dma_buf fd %d failed: %d\n",
+					user_buf.fd[i], ret);
+			buf->dma_bufs[i] = NULL;
+			goto done;
+		}
+		buf->offset[i] = user_buf.offset[i];
+		buf->pitch[i] = user_buf.pitch[i];
+	}
+	buf->n_planes = user_buf.n_planes;
+
+	if (user_buf.acquire_fence >= 0) {
+		buf->acquire_fence = sync_fence_fdget(user_buf.acquire_fence);
+		if (!buf->acquire_fence) {
+			dev_err(&dev->base.dev, "getting fence fd %d failed\n",
+					user_buf.acquire_fence);
+			ret = -EINVAL;
+			goto done;
+		}
+	}
+
+done:
+	if (ret < 0)
+		adf_buffer_cleanup(buf);
+	return ret;
+}
+
+static int adf_device_post_config(struct adf_device *dev,
+		struct adf_post_config __user *arg)
+{
+	struct sync_fence *complete_fence;
+	int complete_fence_fd;
+	struct adf_buffer *bufs = NULL;
+	struct adf_interface **intfs = NULL;
+	size_t n_intfs, n_bufs, i;
+	void *custom_data = NULL;
+	size_t custom_data_size;
+	int ret = 0;
+
+	complete_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+	if (complete_fence_fd < 0)
+		return complete_fence_fd;
+
+	if (get_user(n_intfs, &arg->n_interfaces)) {
+		ret = -EFAULT;
+		goto err_get_user;
+	}
+
+	if (n_intfs > ADF_MAX_INTERFACES) {
+		ret = -EINVAL;
+		goto err_get_user;
+	}
+
+	if (get_user(n_bufs, &arg->n_bufs)) {
+		ret = -EFAULT;
+		goto err_get_user;
+	}
+
+	if (n_bufs > ADF_MAX_BUFFERS) {
+		ret = -EINVAL;
+		goto err_get_user;
+	}
+
+	if (get_user(custom_data_size, &arg->custom_data_size)) {
+		ret = -EFAULT;
+		goto err_get_user;
+	}
+
+	if (custom_data_size > ADF_MAX_CUSTOM_DATA_SIZE) {
+		ret = -EINVAL;
+		goto err_get_user;
+	}
+
+	if (n_intfs) {
+		intfs = kmalloc(sizeof(intfs[0]) * n_intfs, GFP_KERNEL);
+		if (!intfs) {
+			ret = -ENOMEM;
+			goto err_get_user;
+		}
+	}
+
+	for (i = 0; i < n_intfs; i++) {
+		u32 intf_id;
+		if (get_user(intf_id, &arg->interfaces[i])) {
+			ret = -EFAULT;
+			goto err_get_user;
+		}
+
+		intfs[i] = idr_find(&dev->interfaces, intf_id);
+		if (!intfs[i]) {
+			ret = -EINVAL;
+			goto err_get_user;
+		}
+	}
+
+	if (n_bufs) {
+		bufs = kzalloc(sizeof(bufs[0]) * n_bufs, GFP_KERNEL);
+		if (!bufs) {
+			ret = -ENOMEM;
+			goto err_get_user;
+		}
+	}
+
+	for (i = 0; i < n_bufs; i++) {
+		ret = adf_buffer_import(dev, &arg->bufs[i], &bufs[i]);
+		if (ret < 0) {
+			memset(&bufs[i], 0, sizeof(bufs[i]));
+			goto err_import;
+		}
+	}
+
+	if (custom_data_size) {
+		custom_data = kzalloc(custom_data_size, GFP_KERNEL);
+		if (!custom_data) {
+			ret = -ENOMEM;
+			goto err_import;
+		}
+
+		if (copy_from_user(custom_data, arg->custom_data,
+				custom_data_size)) {
+			ret = -EFAULT;
+			goto err_import;
+		}
+	}
+
+	if (put_user(complete_fence_fd, &arg->complete_fence)) {
+		ret = -EFAULT;
+		goto err_import;
+	}
+
+	complete_fence = adf_device_post_nocopy(dev, intfs, n_intfs, bufs,
+			n_bufs, custom_data, custom_data_size);
+	if (IS_ERR(complete_fence)) {
+		ret = PTR_ERR(complete_fence);
+		goto err_import;
+	}
+
+	sync_fence_install(complete_fence, complete_fence_fd);
+	return 0;
+
+err_import:
+	for (i = 0; i < n_bufs; i++)
+		adf_buffer_cleanup(&bufs[i]);
+
+err_get_user:
+	kfree(custom_data);
+	kfree(bufs);
+	kfree(intfs);
+	put_unused_fd(complete_fence_fd);
+	return ret;
+}
+
+static int adf_intf_simple_post_config(struct adf_interface *intf,
+		struct adf_simple_post_config __user *arg)
+{
+	struct adf_device *dev = intf->base.parent;
+	struct sync_fence *complete_fence;
+	int complete_fence_fd;
+	struct adf_buffer buf;
+	int ret = 0;
+
+	complete_fence_fd = get_unused_fd_flags(O_CLOEXEC);
+	if (complete_fence_fd < 0)
+		return complete_fence_fd;
+
+	ret = adf_buffer_import(dev, &arg->buf, &buf);
+	if (ret < 0)
+		goto err_import;
+
+	if (put_user(complete_fence_fd, &arg->complete_fence)) {
+		ret = -EFAULT;
+		goto err_put_user;
+	}
+
+	complete_fence = adf_interface_simple_post(intf, &buf);
+	if (IS_ERR(complete_fence)) {
+		ret = PTR_ERR(complete_fence);
+		goto err_put_user;
+	}
+
+	sync_fence_install(complete_fence, complete_fence_fd);
+	return 0;
+
+err_put_user:
+	adf_buffer_cleanup(&buf);
+err_import:
+	put_unused_fd(complete_fence_fd);
+	return ret;
+}
+
+static int adf_intf_simple_buffer_alloc(struct adf_interface *intf,
+		struct adf_simple_buffer_alloc __user *arg)
+{
+	struct adf_simple_buffer_alloc data;
+	struct dma_buf *dma_buf;
+	int ret = 0;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	data.fd = get_unused_fd_flags(O_CLOEXEC);
+	if (data.fd < 0)
+		return data.fd;
+
+	ret = adf_interface_simple_buffer_alloc(intf, data.w, data.h,
+			data.format, &dma_buf, &data.offset, &data.pitch);
+	if (ret < 0)
+		goto err_alloc;
+
+	if (copy_to_user(arg, &data, sizeof(*arg))) {
+		ret = -EFAULT;
+		goto err_copy;
+	}
+
+	fd_install(data.fd, dma_buf->file);
+	return 0;
+
+err_copy:
+	dma_buf_put(dma_buf);
+
+err_alloc:
+	put_unused_fd(data.fd);
+	return ret;
+}
+
+static int adf_copy_attachment_list_to_user(
+		struct adf_attachment_config __user *to, size_t n_to,
+		struct adf_attachment *from, size_t n_from)
+{
+	struct adf_attachment_config *temp;
+	size_t n = min(n_to, n_from);
+	size_t i;
+	int ret = 0;
+
+	if (!n)
+		return 0;
+
+	temp = kzalloc(n * sizeof(temp[0]), GFP_KERNEL);
+	if (!temp)
+		return -ENOMEM;
+
+	for (i = 0; i < n; i++) {
+		temp[i].interface = from[i].interface->base.id;
+		temp[i].overlay_engine = from[i].overlay_engine->base.id;
+	}
+
+	if (copy_to_user(to, temp, n * sizeof(to[0]))) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+done:
+	kfree(temp);
+	return ret;
+}
+
+static int adf_device_get_data(struct adf_device *dev,
+		struct adf_device_data __user *arg)
+{
+	struct adf_device_data data;
+	size_t n_attach;
+	struct adf_attachment *attach = NULL;
+	size_t n_allowed_attach;
+	struct adf_attachment *allowed_attach = NULL;
+	int ret = 0;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	if (data.n_attachments > ADF_MAX_ATTACHMENTS ||
+			data.n_allowed_attachments > ADF_MAX_ATTACHMENTS)
+		return -EINVAL;
+
+	strlcpy(data.name, dev->base.name, sizeof(data.name));
+
+	if (data.n_attachments) {
+		attach = kzalloc(data.n_attachments * sizeof(attach[0]),
+				GFP_KERNEL);
+		if (!attach)
+			return -ENOMEM;
+	}
+	n_attach = adf_device_attachments(dev, attach, data.n_attachments);
+
+	if (data.n_allowed_attachments) {
+		allowed_attach = kzalloc(data.n_allowed_attachments *
+				sizeof(allowed_attach[0]), GFP_KERNEL);
+		if (!allowed_attach) {
+			ret = -ENOMEM;
+			goto done;
+		}
+	}
+	n_allowed_attach = adf_device_attachments_allowed(dev, allowed_attach,
+			data.n_allowed_attachments);
+
+	mutex_lock(&dev->client_lock);
+	ret = adf_obj_copy_custom_data_to_user(&dev->base, arg->custom_data,
+			&data.custom_data_size);
+	mutex_unlock(&dev->client_lock);
+
+	if (ret < 0)
+		goto done;
+
+	ret = adf_copy_attachment_list_to_user(arg->attachments,
+			data.n_attachments, attach, n_attach);
+	if (ret < 0)
+		goto done;
+
+	ret = adf_copy_attachment_list_to_user(arg->allowed_attachments,
+			data.n_allowed_attachments, allowed_attach,
+			n_allowed_attach);
+	if (ret < 0)
+		goto done;
+
+	data.n_attachments = n_attach;
+	data.n_allowed_attachments = n_allowed_attach;
+
+	if (copy_to_user(arg, &data, sizeof(data)))
+		ret = -EFAULT;
+
+done:
+	kfree(allowed_attach);
+	kfree(attach);
+	return ret;
+}
+
+static int adf_device_handle_attachment(struct adf_device *dev,
+		struct adf_attachment_config __user *arg, bool attach)
+{
+	struct adf_attachment_config data;
+	struct adf_overlay_engine *eng;
+	struct adf_interface *intf;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	eng = idr_find(&dev->overlay_engines, data.overlay_engine);
+	if (!eng) {
+		dev_err(&dev->base.dev, "invalid overlay engine id %u\n",
+				data.overlay_engine);
+		return -EINVAL;
+	}
+
+	intf = idr_find(&dev->interfaces, data.interface);
+	if (!intf) {
+		dev_err(&dev->base.dev, "invalid interface id %u\n",
+				data.interface);
+		return -EINVAL;
+	}
+
+	if (attach)
+		return adf_device_attach(dev, eng, intf);
+	else
+		return adf_device_detach(dev, eng, intf);
+}
+
+static int adf_intf_set_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo __user *arg)
+{
+	struct drm_mode_modeinfo mode;
+
+	if (copy_from_user(&mode, arg, sizeof(mode)))
+		return -EFAULT;
+
+	return adf_interface_set_mode(intf, &mode);
+}
+
+static int adf_intf_get_data(struct adf_interface *intf,
+		struct adf_interface_data __user *arg)
+{
+	struct adf_device *dev = adf_interface_parent(intf);
+	struct adf_interface_data data;
+	struct drm_mode_modeinfo *modelist;
+	size_t modelist_size;
+	int err;
+	int ret = 0;
+	unsigned long flags;
+
+	if (copy_from_user(&data, arg, sizeof(data)))
+		return -EFAULT;
+
+	strlcpy(data.name, intf->base.name, sizeof(data.name));
+
+	data.type = intf->type;
+	data.id = intf->idx;
+	data.flags = intf->flags;
+
+	err = adf_interface_get_screen_size(intf, &data.width_mm,
+			&data.height_mm);
+	if (err < 0) {
+		data.width_mm = 0;
+		data.height_mm = 0;
+	}
+
+	modelist = kmalloc(sizeof(modelist[0]) * ADF_MAX_MODES, GFP_KERNEL);
+	if (!modelist)
+		return -ENOMEM;
+
+	mutex_lock(&dev->client_lock);
+	read_lock_irqsave(&intf->hotplug_modelist_lock, flags);
+	data.hotplug_detect = intf->hotplug_detect;
+	modelist_size = min(data.n_available_modes, intf->n_modes) *
+			sizeof(intf->modelist[0]);
+	memcpy(modelist, intf->modelist, modelist_size);
+	data.n_available_modes = intf->n_modes;
+	read_unlock_irqrestore(&intf->hotplug_modelist_lock, flags);
+
+	if (copy_to_user(arg->available_modes, modelist, modelist_size)) {
+		ret = -EFAULT;
+		goto done;
+	}
+
+	data.dpms_state = intf->dpms_state;
+	memcpy(&data.current_mode, &intf->current_mode,
+			sizeof(intf->current_mode));
+
+	ret = adf_obj_copy_custom_data_to_user(&intf->base, arg->custom_data,
+			&data.custom_data_size);
+done:
+	mutex_unlock(&dev->client_lock);
+	kfree(modelist);
+
+	if (ret < 0)
+		return ret;
+
+	if (copy_to_user(arg, &data, sizeof(data)))
+		ret = -EFAULT;
+
+	return ret;
+}
+
+static inline long adf_obj_custom_ioctl(struct adf_obj *obj, unsigned int cmd,
+		unsigned long arg)
+{
+	if (obj->ops && obj->ops->ioctl)
+		return obj->ops->ioctl(obj, cmd, arg);
+	return -ENOTTY;
+}
+
+static long adf_overlay_engine_ioctl(struct adf_overlay_engine *eng,
+		struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case ADF_SET_EVENT:
+		return adf_obj_set_event(&eng->base, file,
+				(struct adf_set_event __user *)arg);
+
+	case ADF_GET_OVERLAY_ENGINE_DATA:
+		return adf_eng_get_data(eng,
+			(struct adf_overlay_engine_data __user *)arg);
+
+	case ADF_BLANK:
+	case ADF_POST_CONFIG:
+	case ADF_SET_MODE:
+	case ADF_GET_DEVICE_DATA:
+	case ADF_GET_INTERFACE_DATA:
+	case ADF_SIMPLE_POST_CONFIG:
+	case ADF_SIMPLE_BUFFER_ALLOC:
+	case ADF_ATTACH:
+	case ADF_DETACH:
+		return -EINVAL;
+
+	default:
+		return adf_obj_custom_ioctl(&eng->base, cmd, arg);
+	}
+}
+
+static long adf_interface_ioctl(struct adf_interface *intf,
+		struct adf_file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case ADF_SET_EVENT:
+		return adf_obj_set_event(&intf->base, file,
+				(struct adf_set_event __user *)arg);
+
+	case ADF_BLANK:
+		return adf_interface_blank(intf, arg);
+
+	case ADF_SET_MODE:
+		return adf_intf_set_mode(intf,
+				(struct drm_mode_modeinfo __user *)arg);
+
+	case ADF_GET_INTERFACE_DATA:
+		return adf_intf_get_data(intf,
+				(struct adf_interface_data __user *)arg);
+
+	case ADF_SIMPLE_POST_CONFIG:
+		return adf_intf_simple_post_config(intf,
+				(struct adf_simple_post_config __user *)arg);
+
+	case ADF_SIMPLE_BUFFER_ALLOC:
+		return adf_intf_simple_buffer_alloc(intf,
+				(struct adf_simple_buffer_alloc __user *)arg);
+
+	case ADF_POST_CONFIG:
+	case ADF_GET_DEVICE_DATA:
+	case ADF_GET_OVERLAY_ENGINE_DATA:
+	case ADF_ATTACH:
+	case ADF_DETACH:
+		return -EINVAL;
+
+	default:
+		return adf_obj_custom_ioctl(&intf->base, cmd, arg);
+	}
+}
+
+static long adf_device_ioctl(struct adf_device *dev, struct adf_file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case ADF_SET_EVENT:
+		return adf_obj_set_event(&dev->base, file,
+				(struct adf_set_event __user *)arg);
+
+	case ADF_POST_CONFIG:
+		return adf_device_post_config(dev,
+				(struct adf_post_config __user *)arg);
+
+	case ADF_GET_DEVICE_DATA:
+		return adf_device_get_data(dev,
+				(struct adf_device_data __user *)arg);
+
+	case ADF_ATTACH:
+		return adf_device_handle_attachment(dev,
+				(struct adf_attachment_config __user *)arg,
+				true);
+
+	case ADF_DETACH:
+		return adf_device_handle_attachment(dev,
+				(struct adf_attachment_config __user *)arg,
+				false);
+
+	case ADF_BLANK:
+	case ADF_SET_MODE:
+	case ADF_GET_INTERFACE_DATA:
+	case ADF_GET_OVERLAY_ENGINE_DATA:
+	case ADF_SIMPLE_POST_CONFIG:
+	case ADF_SIMPLE_BUFFER_ALLOC:
+		return -EINVAL;
+
+	default:
+		return adf_obj_custom_ioctl(&dev->base, cmd, arg);
+	}
+}
+
+static int adf_file_open(struct inode *inode, struct file *file)
+{
+	struct adf_obj *obj;
+	struct adf_file *fpriv = NULL;
+	unsigned long flags;
+	int ret = 0;
+
+	obj = adf_obj_sysfs_find(iminor(inode));
+	if (!obj)
+		return -ENODEV;
+
+	dev_dbg(&obj->dev, "opening %s\n", dev_name(&obj->dev));
+
+	if (!try_module_get(obj->parent->ops->owner)) {
+		dev_err(&obj->dev, "getting owner module failed\n");
+		return -ENODEV;
+	}
+
+	fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+	if (!fpriv) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	INIT_LIST_HEAD(&fpriv->head);
+	fpriv->obj = obj;
+	init_waitqueue_head(&fpriv->event_wait);
+
+	file->private_data = fpriv;
+
+	if (obj->ops && obj->ops->open) {
+		ret = obj->ops->open(obj, inode, file);
+		if (ret < 0)
+			goto done;
+	}
+
+	spin_lock_irqsave(&obj->file_lock, flags);
+	list_add_tail(&fpriv->head, &obj->file_list);
+	spin_unlock_irqrestore(&obj->file_lock, flags);
+
+done:
+	if (ret < 0) {
+		kfree(fpriv);
+		module_put(obj->parent->ops->owner);
+	}
+	return ret;
+}
+
+static int adf_file_release(struct inode *inode, struct file *file)
+{
+	struct adf_file *fpriv = file->private_data;
+	struct adf_obj *obj = fpriv->obj;
+	enum adf_event_type event_type;
+	unsigned long flags;
+
+	if (obj->ops && obj->ops->release)
+		obj->ops->release(obj, inode, file);
+
+	spin_lock_irqsave(&obj->file_lock, flags);
+	list_del(&fpriv->head);
+	spin_unlock_irqrestore(&obj->file_lock, flags);
+
+	for_each_set_bit(event_type, fpriv->event_subscriptions,
+			ADF_EVENT_TYPE_MAX) {
+		adf_event_put(obj, event_type);
+	}
+
+	kfree(fpriv);
+	module_put(obj->parent->ops->owner);
+
+	dev_dbg(&obj->dev, "released %s\n", dev_name(&obj->dev));
+	return 0;
+}
+
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct adf_file *fpriv = file->private_data;
+	struct adf_obj *obj = fpriv->obj;
+	long ret = -EINVAL;
+
+	dev_dbg(&obj->dev, "%s ioctl %u\n", dev_name(&obj->dev), _IOC_NR(cmd));
+
+	switch (obj->type) {
+	case ADF_OBJ_OVERLAY_ENGINE:
+		ret = adf_overlay_engine_ioctl(adf_obj_to_overlay_engine(obj),
+				fpriv, cmd, arg);
+		break;
+
+	case ADF_OBJ_INTERFACE:
+		ret = adf_interface_ioctl(adf_obj_to_interface(obj), fpriv, cmd,
+				arg);
+		break;
+
+	case ADF_OBJ_DEVICE:
+		ret = adf_device_ioctl(adf_obj_to_device(obj), fpriv, cmd, arg);
+		break;
+	}
+
+	return ret;
+}
+
+static inline bool adf_file_event_available(struct adf_file *fpriv)
+{
+	int head = fpriv->event_head;
+	int tail = fpriv->event_tail;
+	return CIRC_CNT(head, tail, sizeof(fpriv->event_buf)) != 0;
+}
+
+void adf_file_queue_event(struct adf_file *fpriv, struct adf_event *event)
+{
+	int head = fpriv->event_head;
+	int tail = fpriv->event_tail;
+	size_t space = CIRC_SPACE(head, tail, sizeof(fpriv->event_buf));
+	size_t space_to_end =
+			CIRC_SPACE_TO_END(head, tail, sizeof(fpriv->event_buf));
+
+	if (space < event->length) {
+		dev_dbg(&fpriv->obj->dev,
+				"insufficient buffer space for event %u\n",
+				event->type);
+		return;
+	}
+
+	if (space_to_end >= event->length) {
+		memcpy(fpriv->event_buf + head, event, event->length);
+	} else {
+		memcpy(fpriv->event_buf + head, event, space_to_end);
+		memcpy(fpriv->event_buf, (u8 *)event + space_to_end,
+				event->length - space_to_end);
+	}
+
+	smp_wmb();
+	fpriv->event_head = (fpriv->event_head + event->length) &
+			(sizeof(fpriv->event_buf) - 1);
+	wake_up_interruptible_all(&fpriv->event_wait);
+}
+
+static ssize_t adf_file_copy_to_user(struct adf_file *fpriv,
+		char __user *buffer, size_t buffer_size)
+{
+	int head, tail;
+	u8 *event_buf;
+	size_t cnt, cnt_to_end, copy_size = 0;
+	ssize_t ret = 0;
+	unsigned long flags;
+
+	event_buf = kmalloc(min(buffer_size, sizeof(fpriv->event_buf)),
+			GFP_KERNEL);
+	if (!event_buf)
+		return -ENOMEM;
+
+	spin_lock_irqsave(&fpriv->obj->file_lock, flags);
+
+	if (!adf_file_event_available(fpriv))
+		goto out;
+
+	head = fpriv->event_head;
+	tail = fpriv->event_tail;
+
+	cnt = CIRC_CNT(head, tail, sizeof(fpriv->event_buf));
+	cnt_to_end = CIRC_CNT_TO_END(head, tail, sizeof(fpriv->event_buf));
+	copy_size = min(buffer_size, cnt);
+
+	if (cnt_to_end >= copy_size) {
+		memcpy(event_buf, fpriv->event_buf + tail, copy_size);
+	} else {
+		memcpy(event_buf, fpriv->event_buf + tail, cnt_to_end);
+		memcpy(event_buf + cnt_to_end, fpriv->event_buf,
+				copy_size - cnt_to_end);
+	}
+
+	fpriv->event_tail = (fpriv->event_tail + copy_size) &
+			(sizeof(fpriv->event_buf) - 1);
+
+out:
+	spin_unlock_irqrestore(&fpriv->obj->file_lock, flags);
+	if (copy_size) {
+		if (copy_to_user(buffer, event_buf, copy_size))
+			ret = -EFAULT;
+		else
+			ret = copy_size;
+	}
+	kfree(event_buf);
+	return ret;
+}
+
+ssize_t adf_file_read(struct file *filp, char __user *buffer,
+		 size_t count, loff_t *offset)
+{
+	struct adf_file *fpriv = filp->private_data;
+	int err;
+
+	err = wait_event_interruptible(fpriv->event_wait,
+			adf_file_event_available(fpriv));
+	if (err < 0)
+		return err;
+
+	return adf_file_copy_to_user(fpriv, buffer, count);
+}
+
+unsigned int adf_file_poll(struct file *filp, struct poll_table_struct *wait)
+{
+	struct adf_file *fpriv = filp->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(filp, &fpriv->event_wait, wait);
+
+	if (adf_file_event_available(fpriv))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
+}
+
+const struct file_operations adf_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = adf_file_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = adf_file_compat_ioctl,
+#endif
+	.open = adf_file_open,
+	.release = adf_file_release,
+	.llseek = default_llseek,
+	.read = adf_file_read,
+	.poll = adf_file_poll,
+};
diff --git a/drivers/video/adf/adf_fops.h b/drivers/video/adf/adf_fops.h
new file mode 100644
index 0000000..90a3a74
--- /dev/null
+++ b/drivers/video/adf/adf_fops.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_FOPS_H
+#define __VIDEO_ADF_ADF_FOPS_H
+
+#include <linux/bitmap.h>
+#include <linux/fs.h>
+
+extern const struct file_operations adf_fops;
+
+struct adf_file {
+	struct list_head head;
+	struct adf_obj *obj;
+
+	DECLARE_BITMAP(event_subscriptions, ADF_EVENT_TYPE_MAX);
+	u8 event_buf[4096];
+	int event_head;
+	int event_tail;
+	wait_queue_head_t event_wait;
+};
+
+void adf_file_queue_event(struct adf_file *file, struct adf_event *event);
+long adf_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS_H */
diff --git a/drivers/video/adf/adf_fops32.c b/drivers/video/adf/adf_fops32.c
new file mode 100644
index 0000000..d299a81
--- /dev/null
+++ b/drivers/video/adf/adf_fops32.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/uaccess.h>
+#include <video/adf.h>
+
+#include "adf_fops.h"
+#include "adf_fops32.h"
+
+long adf_compat_post_config(struct file *file,
+		struct adf_post_config32 __user *arg)
+{
+	struct adf_post_config32 cfg32;
+	struct adf_post_config __user *cfg;
+	int ret;
+
+	if (copy_from_user(&cfg32, arg, sizeof(cfg32)))
+		return -EFAULT;
+
+	cfg = compat_alloc_user_space(sizeof(*cfg));
+	if (!access_ok(VERIFY_WRITE, cfg, sizeof(*cfg)))
+		return -EFAULT;
+
+	if (put_user(cfg32.n_interfaces, &cfg->n_interfaces) ||
+			put_user(compat_ptr(cfg32.interfaces),
+					&cfg->interfaces) ||
+			put_user(cfg32.n_bufs, &cfg->n_bufs) ||
+			put_user(compat_ptr(cfg32.bufs), &cfg->bufs) ||
+			put_user(cfg32.custom_data_size,
+					&cfg->custom_data_size) ||
+			put_user(compat_ptr(cfg32.custom_data),
+					&cfg->custom_data))
+		return -EFAULT;
+
+	ret = adf_file_ioctl(file, ADF_POST_CONFIG, (unsigned long)cfg);
+	if (ret < 0)
+		return ret;
+
+	if (copy_in_user(&arg->complete_fence, &cfg->complete_fence,
+			sizeof(cfg->complete_fence)))
+		return -EFAULT;
+
+	return 0;
+}
+
+long adf_compat_get_device_data(struct file *file,
+		struct adf_device_data32 __user *arg)
+{
+	struct adf_device_data32 data32;
+	struct adf_device_data __user *data;
+	int ret;
+
+	if (copy_from_user(&data32, arg, sizeof(data32)))
+		return -EFAULT;
+
+	data = compat_alloc_user_space(sizeof(*data));
+	if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+		return -EFAULT;
+
+	if (put_user(data32.n_attachments, &data->n_attachments) ||
+			put_user(compat_ptr(data32.attachments),
+					&data->attachments) ||
+			put_user(data32.n_allowed_attachments,
+					&data->n_allowed_attachments) ||
+			put_user(compat_ptr(data32.allowed_attachments),
+					&data->allowed_attachments) ||
+			put_user(data32.custom_data_size,
+					&data->custom_data_size) ||
+			put_user(compat_ptr(data32.custom_data),
+					&data->custom_data))
+		return -EFAULT;
+
+	ret = adf_file_ioctl(file, ADF_GET_DEVICE_DATA, (unsigned long)data);
+	if (ret < 0)
+		return ret;
+
+	if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+			copy_in_user(&arg->n_attachments, &data->n_attachments,
+					sizeof(arg->n_attachments)) ||
+			copy_in_user(&arg->n_allowed_attachments,
+					&data->n_allowed_attachments,
+					sizeof(arg->n_allowed_attachments)) ||
+			copy_in_user(&arg->custom_data_size,
+					&data->custom_data_size,
+					sizeof(arg->custom_data_size)))
+		return -EFAULT;
+
+	return 0;
+}
+
+long adf_compat_get_interface_data(struct file *file,
+		struct adf_interface_data32 __user *arg)
+{
+	struct adf_interface_data32 data32;
+	struct adf_interface_data __user *data;
+	int ret;
+
+	if (copy_from_user(&data32, arg, sizeof(data32)))
+		return -EFAULT;
+
+	data = compat_alloc_user_space(sizeof(*data));
+	if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+		return -EFAULT;
+
+	if (put_user(data32.n_available_modes, &data->n_available_modes) ||
+			put_user(compat_ptr(data32.available_modes),
+					&data->available_modes) ||
+			put_user(data32.custom_data_size,
+					&data->custom_data_size) ||
+			put_user(compat_ptr(data32.custom_data),
+					&data->custom_data))
+		return -EFAULT;
+
+	ret = adf_file_ioctl(file, ADF_GET_INTERFACE_DATA, (unsigned long)data);
+	if (ret < 0)
+		return ret;
+
+	if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+			copy_in_user(&arg->type, &data->type,
+					sizeof(arg->type)) ||
+			copy_in_user(&arg->id, &data->id, sizeof(arg->id)) ||
+			copy_in_user(&arg->flags, &data->flags,
+					sizeof(arg->flags)) ||
+			copy_in_user(&arg->dpms_state, &data->dpms_state,
+					sizeof(arg->dpms_state)) ||
+			copy_in_user(&arg->hotplug_detect,
+					&data->hotplug_detect,
+					sizeof(arg->hotplug_detect)) ||
+			copy_in_user(&arg->width_mm, &data->width_mm,
+					sizeof(arg->width_mm)) ||
+			copy_in_user(&arg->height_mm, &data->height_mm,
+					sizeof(arg->height_mm)) ||
+			copy_in_user(&arg->current_mode, &data->current_mode,
+					sizeof(arg->current_mode)) ||
+			copy_in_user(&arg->n_available_modes,
+					&data->n_available_modes,
+					sizeof(arg->n_available_modes)) ||
+			copy_in_user(&arg->custom_data_size,
+					&data->custom_data_size,
+					sizeof(arg->custom_data_size)))
+		return -EFAULT;
+
+	return 0;
+}
+
+long adf_compat_get_overlay_engine_data(struct file *file,
+		struct adf_overlay_engine_data32 __user *arg)
+{
+	struct adf_overlay_engine_data32 data32;
+	struct adf_overlay_engine_data __user *data;
+	int ret;
+
+	if (copy_from_user(&data32, arg, sizeof(data32)))
+		return -EFAULT;
+
+	data = compat_alloc_user_space(sizeof(*data));
+	if (!access_ok(VERIFY_WRITE, data, sizeof(*data)))
+		return -EFAULT;
+
+	if (put_user(data32.n_supported_formats, &data->n_supported_formats) ||
+			put_user(compat_ptr(data32.supported_formats),
+					&data->supported_formats) ||
+			put_user(data32.custom_data_size,
+					&data->custom_data_size) ||
+			put_user(compat_ptr(data32.custom_data),
+					&data->custom_data))
+		return -EFAULT;
+
+	ret = adf_file_ioctl(file, ADF_GET_OVERLAY_ENGINE_DATA,
+			(unsigned long)data);
+	if (ret < 0)
+		return ret;
+
+	if (copy_in_user(arg->name, data->name, sizeof(arg->name)) ||
+			copy_in_user(&arg->n_supported_formats,
+					&data->n_supported_formats,
+					sizeof(arg->n_supported_formats)) ||
+			copy_in_user(&arg->custom_data_size,
+					&data->custom_data_size,
+					sizeof(arg->custom_data_size)))
+		return -EFAULT;
+
+	return 0;
+}
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg)
+{
+	switch (cmd) {
+	case ADF_POST_CONFIG32:
+		return adf_compat_post_config(file, compat_ptr(arg));
+
+	case ADF_GET_DEVICE_DATA32:
+		return adf_compat_get_device_data(file, compat_ptr(arg));
+
+	case ADF_GET_INTERFACE_DATA32:
+		return adf_compat_get_interface_data(file, compat_ptr(arg));
+
+	case ADF_GET_OVERLAY_ENGINE_DATA32:
+		return adf_compat_get_overlay_engine_data(file,
+				compat_ptr(arg));
+
+	default:
+		return adf_file_ioctl(file, cmd, arg);
+	}
+}
diff --git a/drivers/video/adf/adf_fops32.h b/drivers/video/adf/adf_fops32.h
new file mode 100644
index 0000000..64034ce
--- /dev/null
+++ b/drivers/video/adf/adf_fops32.h
@@ -0,0 +1,78 @@
+#ifndef __VIDEO_ADF_ADF_FOPS32_H
+#define __VIDEO_ADF_ADF_FOPS32_H
+
+#include <linux/compat.h>
+#include <linux/ioctl.h>
+
+#include <video/adf.h>
+
+#define ADF_POST_CONFIG32 \
+		_IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config32)
+#define ADF_GET_DEVICE_DATA32 \
+		_IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data32)
+#define ADF_GET_INTERFACE_DATA32 \
+		_IOR(ADF_IOCTL_TYPE, 5, struct adf_interface_data32)
+#define ADF_GET_OVERLAY_ENGINE_DATA32 \
+		_IOR(ADF_IOCTL_TYPE, 6, struct adf_overlay_engine_data32)
+
+struct adf_post_config32 {
+	compat_size_t n_interfaces;
+	compat_uptr_t interfaces;
+
+	compat_size_t n_bufs;
+	compat_uptr_t bufs;
+
+	compat_size_t custom_data_size;
+	compat_uptr_t custom_data;
+
+	__s32 complete_fence;
+};
+
+struct adf_device_data32 {
+	char name[ADF_NAME_LEN];
+
+	compat_size_t n_attachments;
+	compat_uptr_t attachments;
+
+	compat_size_t n_allowed_attachments;
+	compat_uptr_t allowed_attachments;
+
+	compat_size_t custom_data_size;
+	compat_uptr_t custom_data;
+};
+
+struct adf_interface_data32 {
+	char name[ADF_NAME_LEN];
+
+	__u8 type;
+	__u32 id;
+	/* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+	__u32 flags;
+
+	__u8 dpms_state;
+	__u8 hotplug_detect;
+	__u16 width_mm;
+	__u16 height_mm;
+
+	struct drm_mode_modeinfo current_mode;
+	compat_size_t n_available_modes;
+	compat_uptr_t available_modes;
+
+	compat_size_t custom_data_size;
+	compat_uptr_t custom_data;
+};
+
+struct adf_overlay_engine_data32 {
+	char name[ADF_NAME_LEN];
+
+	compat_size_t n_supported_formats;
+	compat_uptr_t supported_formats;
+
+	compat_size_t custom_data_size;
+	compat_uptr_t custom_data;
+};
+
+long adf_file_compat_ioctl(struct file *file, unsigned int cmd,
+		unsigned long arg);
+
+#endif /* __VIDEO_ADF_ADF_FOPS32_H */
diff --git a/drivers/video/adf/adf_format.c b/drivers/video/adf/adf_format.c
new file mode 100644
index 0000000..e3f22c7
--- /dev/null
+++ b/drivers/video/adf/adf_format.c
@@ -0,0 +1,280 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ * modified from drivers/gpu/drm/drm_crtc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <drm/drm_fourcc.h>
+#include <video/adf_format.h>
+
+bool adf_format_is_standard(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+	case DRM_FORMAT_XRGB4444:
+	case DRM_FORMAT_XBGR4444:
+	case DRM_FORMAT_RGBX4444:
+	case DRM_FORMAT_BGRX4444:
+	case DRM_FORMAT_ARGB4444:
+	case DRM_FORMAT_ABGR4444:
+	case DRM_FORMAT_RGBA4444:
+	case DRM_FORMAT_BGRA4444:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_AYUV:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return true;
+	default:
+		return false;
+	}
+}
+EXPORT_SYMBOL(adf_format_is_standard);
+
+bool adf_format_is_rgb(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+		return true;
+
+	default:
+		return false;
+	}
+}
+EXPORT_SYMBOL(adf_format_is_rgb);
+
+u8 adf_format_num_planes(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 3;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(adf_format_num_planes);
+
+u8 adf_format_bpp(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_C8:
+	case DRM_FORMAT_RGB332:
+	case DRM_FORMAT_BGR233:
+		return 8;
+
+	case DRM_FORMAT_XRGB1555:
+	case DRM_FORMAT_XBGR1555:
+	case DRM_FORMAT_RGBX5551:
+	case DRM_FORMAT_BGRX5551:
+	case DRM_FORMAT_ARGB1555:
+	case DRM_FORMAT_ABGR1555:
+	case DRM_FORMAT_RGBA5551:
+	case DRM_FORMAT_BGRA5551:
+	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
+		return 16;
+
+	case DRM_FORMAT_RGB888:
+	case DRM_FORMAT_BGR888:
+		return 24;
+
+	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_BGRA8888:
+		return 32;
+
+	default:
+		pr_debug("%s: unsupported pixel format %u\n", __func__, format);
+		return 0;
+	}
+}
+EXPORT_SYMBOL(adf_format_bpp);
+
+u8 adf_format_plane_cpp(u32 format, int plane)
+{
+	if (plane >= adf_format_num_planes(format))
+		return 0;
+
+	switch (format) {
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+		return 2;
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+		return plane ? 2 : 1;
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV444:
+	case DRM_FORMAT_YVU444:
+		return 1;
+	default:
+		return adf_format_bpp(format) / 8;
+	}
+}
+EXPORT_SYMBOL(adf_format_plane_cpp);
+
+u8 adf_format_horz_chroma_subsampling(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV411:
+	case DRM_FORMAT_YVU411:
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+		return 4;
+	case DRM_FORMAT_YUYV:
+	case DRM_FORMAT_YVYU:
+	case DRM_FORMAT_UYVY:
+	case DRM_FORMAT_VYUY:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+	case DRM_FORMAT_NV16:
+	case DRM_FORMAT_NV61:
+	case DRM_FORMAT_YUV422:
+	case DRM_FORMAT_YVU422:
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(adf_format_horz_chroma_subsampling);
+
+u8 adf_format_vert_chroma_subsampling(u32 format)
+{
+	switch (format) {
+	case DRM_FORMAT_YUV410:
+	case DRM_FORMAT_YVU410:
+		return 4;
+	case DRM_FORMAT_YUV420:
+	case DRM_FORMAT_YVU420:
+	case DRM_FORMAT_NV12:
+	case DRM_FORMAT_NV21:
+		return 2;
+	default:
+		return 1;
+	}
+}
+EXPORT_SYMBOL(adf_format_vert_chroma_subsampling);
diff --git a/drivers/video/adf/adf_memblock.c b/drivers/video/adf/adf_memblock.c
new file mode 100644
index 0000000..285218a
--- /dev/null
+++ b/drivers/video/adf/adf_memblock.c
@@ -0,0 +1,166 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+struct adf_memblock_pdata {
+	phys_addr_t base;
+};
+
+static struct sg_table *adf_memblock_map(struct dma_buf_attachment *attach,
+		enum dma_data_direction direction)
+{
+	struct adf_memblock_pdata *pdata = attach->dmabuf->priv;
+	unsigned long pfn = PFN_DOWN(pdata->base);
+	struct page *page = pfn_to_page(pfn);
+	struct sg_table *table;
+	int nents, ret;
+
+	table = kzalloc(sizeof(*table), GFP_KERNEL);
+	if (!table)
+		return ERR_PTR(-ENOMEM);
+
+	ret = sg_alloc_table(table, 1, GFP_KERNEL);
+	if (ret < 0)
+		goto err_alloc;
+
+	sg_set_page(table->sgl, page, attach->dmabuf->size, 0);
+
+	nents = dma_map_sg(attach->dev, table->sgl, 1, direction);
+	if (!nents) {
+		ret = -EINVAL;
+		goto err_map;
+	}
+
+	return table;
+
+err_map:
+	sg_free_table(table);
+err_alloc:
+	kfree(table);
+	return ERR_PTR(ret);
+}
+
+static void adf_memblock_unmap(struct dma_buf_attachment *attach,
+		struct sg_table *table, enum dma_data_direction direction)
+{
+	dma_unmap_sg(attach->dev, table->sgl, 1, direction);
+	sg_free_table(table);
+}
+
+static void __init_memblock adf_memblock_release(struct dma_buf *buf)
+{
+	struct adf_memblock_pdata *pdata = buf->priv;
+	int err = memblock_free(pdata->base, buf->size);
+
+	if (err < 0)
+		pr_warn("%s: freeing memblock failed: %d\n", __func__, err);
+	kfree(pdata);
+}
+
+static void *adf_memblock_do_kmap(struct dma_buf *buf, unsigned long pgoffset,
+		bool atomic)
+{
+	struct adf_memblock_pdata *pdata = buf->priv;
+	unsigned long pfn = PFN_DOWN(pdata->base) + pgoffset;
+	struct page *page = pfn_to_page(pfn);
+
+	if (atomic)
+		return kmap_atomic(page);
+	else
+		return kmap(page);
+}
+
+static void *adf_memblock_kmap_atomic(struct dma_buf *buf,
+		unsigned long pgoffset)
+{
+	return adf_memblock_do_kmap(buf, pgoffset, true);
+}
+
+static void adf_memblock_kunmap_atomic(struct dma_buf *buf,
+		unsigned long pgoffset, void *vaddr)
+{
+	kunmap_atomic(vaddr);
+}
+
+static void *adf_memblock_kmap(struct dma_buf *buf, unsigned long pgoffset)
+{
+	return adf_memblock_do_kmap(buf, pgoffset, false);
+}
+
+static void adf_memblock_kunmap(struct dma_buf *buf, unsigned long pgoffset,
+		void *vaddr)
+{
+	kunmap(vaddr);
+}
+
+static int adf_memblock_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+	struct adf_memblock_pdata *pdata = buf->priv;
+
+	return remap_pfn_range(vma, vma->vm_start, PFN_DOWN(pdata->base),
+			vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
+struct dma_buf_ops adf_memblock_ops = {
+	.map_dma_buf = adf_memblock_map,
+	.unmap_dma_buf = adf_memblock_unmap,
+	.release = adf_memblock_release,
+	.kmap_atomic = adf_memblock_kmap_atomic,
+	.kunmap_atomic = adf_memblock_kunmap_atomic,
+	.kmap = adf_memblock_kmap,
+	.kunmap = adf_memblock_kunmap,
+	.mmap = adf_memblock_mmap,
+};
+
+/**
+ * adf_memblock_export - export a memblock reserved area as a dma-buf
+ *
+ * @base: base physical address
+ * @size: memblock size
+ * @flags: mode flags for the dma-buf's file
+ *
+ * @base and @size must be page-aligned.
+ *
+ * Returns a dma-buf on success or ERR_PTR(-errno) on failure.
+ */
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags)
+{
+	struct adf_memblock_pdata *pdata;
+	struct dma_buf *buf;
+	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+	if (PAGE_ALIGN(base) != base || PAGE_ALIGN(size) != size)
+		return ERR_PTR(-EINVAL);
+
+	pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+	if (!pdata)
+		return ERR_PTR(-ENOMEM);
+
+	pdata->base = base;
+	exp_info.ops = &adf_memblock_ops;
+	exp_info.size = size;
+	exp_info.flags = flags;
+	exp_info.priv = pdata;
+
+	buf = dma_buf_export(&exp_info);
+	if (IS_ERR(buf))
+		kfree(pdata);
+
+	return buf;
+}
+EXPORT_SYMBOL(adf_memblock_export);
diff --git a/drivers/video/adf/adf_sysfs.c b/drivers/video/adf/adf_sysfs.c
new file mode 100644
index 0000000..8c659c7
--- /dev/null
+++ b/drivers/video/adf/adf_sysfs.c
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <video/adf_client.h>
+
+#include "adf.h"
+#include "adf_fops.h"
+#include "adf_sysfs.h"
+
+static struct class *adf_class;
+static int adf_major;
+static DEFINE_IDR(adf_minors);
+
+#define dev_to_adf_interface(p) \
+	adf_obj_to_interface(container_of(p, struct adf_obj, dev))
+
+static ssize_t dpms_state_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	return scnprintf(buf, PAGE_SIZE, "%u\n",
+			adf_interface_dpms_state(intf));
+}
+
+static ssize_t dpms_state_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	u8 dpms_state;
+	int err;
+
+	err = kstrtou8(buf, 0, &dpms_state);
+	if (err < 0)
+		return err;
+
+	err = adf_interface_blank(intf, dpms_state);
+	if (err < 0)
+		return err;
+
+	return count;
+}
+
+static ssize_t current_mode_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	struct drm_mode_modeinfo mode;
+
+	adf_interface_current_mode(intf, &mode);
+
+	if (mode.name[0]) {
+		return scnprintf(buf, PAGE_SIZE, "%s\n", mode.name);
+	} else {
+		bool interlaced = !!(mode.flags & DRM_MODE_FLAG_INTERLACE);
+		return scnprintf(buf, PAGE_SIZE, "%ux%u%s\n", mode.hdisplay,
+				mode.vdisplay, interlaced ? "i" : "");
+	}
+}
+
+static ssize_t type_show(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	return scnprintf(buf, PAGE_SIZE, "%s\n",
+			adf_interface_type_str(intf));
+}
+
+static ssize_t vsync_timestamp_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	ktime_t timestamp;
+	unsigned long flags;
+
+	read_lock_irqsave(&intf->vsync_lock, flags);
+	memcpy(&timestamp, &intf->vsync_timestamp, sizeof(timestamp));
+	read_unlock_irqrestore(&intf->vsync_lock, flags);
+
+	return scnprintf(buf, PAGE_SIZE, "%llu\n", ktime_to_ns(timestamp));
+}
+
+static ssize_t hotplug_detect_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct adf_interface *intf = dev_to_adf_interface(dev);
+	return scnprintf(buf, PAGE_SIZE, "%u\n", intf->hotplug_detect);
+}
+
+static struct device_attribute adf_interface_attrs[] = {
+	__ATTR(dpms_state, S_IRUGO|S_IWUSR, dpms_state_show, dpms_state_store),
+	__ATTR_RO(current_mode),
+	__ATTR_RO(hotplug_detect),
+	__ATTR_RO(type),
+	__ATTR_RO(vsync_timestamp),
+};
+
+int adf_obj_sysfs_init(struct adf_obj *obj, struct device *parent)
+{
+	int ret = idr_alloc(&adf_minors, obj, 0, 0, GFP_KERNEL);
+	if (ret < 0) {
+		pr_err("%s: allocating adf minor failed: %d\n", __func__,
+				ret);
+		return ret;
+	}
+
+	obj->minor = ret;
+	obj->dev.parent = parent;
+	obj->dev.class = adf_class;
+	obj->dev.devt = MKDEV(adf_major, obj->minor);
+
+	ret = device_register(&obj->dev);
+	if (ret < 0) {
+		pr_err("%s: registering adf object failed: %d\n", __func__,
+				ret);
+		goto err_device_register;
+	}
+
+	return 0;
+
+err_device_register:
+	idr_remove(&adf_minors, obj->minor);
+	return ret;
+}
+
+static char *adf_device_devnode(struct device *dev, umode_t *mode,
+		kuid_t *uid, kgid_t *gid)
+{
+	struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+	return kasprintf(GFP_KERNEL, "adf%d", obj->id);
+}
+
+static char *adf_interface_devnode(struct device *dev, umode_t *mode,
+		kuid_t *uid, kgid_t *gid)
+{
+	struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+	struct adf_interface *intf = adf_obj_to_interface(obj);
+	struct adf_device *parent = adf_interface_parent(intf);
+	return kasprintf(GFP_KERNEL, "adf-interface%d.%d",
+			parent->base.id, intf->base.id);
+}
+
+static char *adf_overlay_engine_devnode(struct device *dev, umode_t *mode,
+		kuid_t *uid, kgid_t *gid)
+{
+	struct adf_obj *obj = container_of(dev, struct adf_obj, dev);
+	struct adf_overlay_engine *eng = adf_obj_to_overlay_engine(obj);
+	struct adf_device *parent = adf_overlay_engine_parent(eng);
+	return kasprintf(GFP_KERNEL, "adf-overlay-engine%d.%d",
+			parent->base.id, eng->base.id);
+}
+
+static void adf_noop_release(struct device *dev)
+{
+}
+
+static struct device_type adf_device_type = {
+	.name = "adf_device",
+	.devnode = adf_device_devnode,
+	.release = adf_noop_release,
+};
+
+static struct device_type adf_interface_type = {
+	.name = "adf_interface",
+	.devnode = adf_interface_devnode,
+	.release = adf_noop_release,
+};
+
+static struct device_type adf_overlay_engine_type = {
+	.name = "adf_overlay_engine",
+	.devnode = adf_overlay_engine_devnode,
+	.release = adf_noop_release,
+};
+
+int adf_device_sysfs_init(struct adf_device *dev)
+{
+	dev->base.dev.type = &adf_device_type;
+	dev_set_name(&dev->base.dev, "%s", dev->base.name);
+	return adf_obj_sysfs_init(&dev->base, dev->dev);
+}
+
+int adf_interface_sysfs_init(struct adf_interface *intf)
+{
+	struct adf_device *parent = adf_interface_parent(intf);
+	size_t i, j;
+	int ret;
+
+	intf->base.dev.type = &adf_interface_type;
+	dev_set_name(&intf->base.dev, "%s-interface%d", parent->base.name,
+			intf->base.id);
+
+	ret = adf_obj_sysfs_init(&intf->base, &parent->base.dev);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++) {
+		ret = device_create_file(&intf->base.dev,
+				&adf_interface_attrs[i]);
+		if (ret < 0) {
+			dev_err(&intf->base.dev, "creating sysfs attribute %s failed: %d\n",
+					adf_interface_attrs[i].attr.name, ret);
+			goto err;
+		}
+	}
+
+	return 0;
+
+err:
+	for (j = 0; j < i; j++)
+		device_remove_file(&intf->base.dev, &adf_interface_attrs[j]);
+	return ret;
+}
+
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng)
+{
+	struct adf_device *parent = adf_overlay_engine_parent(eng);
+
+	eng->base.dev.type = &adf_overlay_engine_type;
+	dev_set_name(&eng->base.dev, "%s-overlay-engine%d", parent->base.name,
+			eng->base.id);
+
+	return adf_obj_sysfs_init(&eng->base, &parent->base.dev);
+}
+
+struct adf_obj *adf_obj_sysfs_find(int minor)
+{
+	return idr_find(&adf_minors, minor);
+}
+
+void adf_obj_sysfs_destroy(struct adf_obj *obj)
+{
+	idr_remove(&adf_minors, obj->minor);
+	device_unregister(&obj->dev);
+}
+
+void adf_device_sysfs_destroy(struct adf_device *dev)
+{
+	adf_obj_sysfs_destroy(&dev->base);
+}
+
+void adf_interface_sysfs_destroy(struct adf_interface *intf)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(adf_interface_attrs); i++)
+		device_remove_file(&intf->base.dev, &adf_interface_attrs[i]);
+	adf_obj_sysfs_destroy(&intf->base);
+}
+
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng)
+{
+	adf_obj_sysfs_destroy(&eng->base);
+}
+
+int adf_sysfs_init(void)
+{
+	struct class *class;
+	int ret;
+
+	class = class_create(THIS_MODULE, "adf");
+	if (IS_ERR(class)) {
+		ret = PTR_ERR(class);
+		pr_err("%s: creating class failed: %d\n", __func__, ret);
+		return ret;
+	}
+
+	ret = register_chrdev(0, "adf", &adf_fops);
+	if (ret < 0) {
+		pr_err("%s: registering device failed: %d\n", __func__, ret);
+		goto err_chrdev;
+	}
+
+	adf_class = class;
+	adf_major = ret;
+	return 0;
+
+err_chrdev:
+	class_destroy(adf_class);
+	return ret;
+}
+
+void adf_sysfs_destroy(void)
+{
+	idr_destroy(&adf_minors);
+	class_destroy(adf_class);
+}
diff --git a/drivers/video/adf/adf_sysfs.h b/drivers/video/adf/adf_sysfs.h
new file mode 100644
index 0000000..0613ac3
--- /dev/null
+++ b/drivers/video/adf/adf_sysfs.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __VIDEO_ADF_ADF_SYSFS_H
+#define __VIDEO_ADF_ADF_SYSFS_H
+
+struct adf_device;
+struct adf_interface;
+struct adf_overlay_engine;
+
+int adf_device_sysfs_init(struct adf_device *dev);
+void adf_device_sysfs_destroy(struct adf_device *dev);
+int adf_interface_sysfs_init(struct adf_interface *intf);
+void adf_interface_sysfs_destroy(struct adf_interface *intf);
+int adf_overlay_engine_sysfs_init(struct adf_overlay_engine *eng);
+void adf_overlay_engine_sysfs_destroy(struct adf_overlay_engine *eng);
+struct adf_obj *adf_obj_sysfs_find(int minor);
+
+int adf_sysfs_init(void);
+void adf_sysfs_destroy(void);
+
+#endif /* __VIDEO_ADF_ADF_SYSFS_H */
diff --git a/drivers/video/adf/adf_trace.h b/drivers/video/adf/adf_trace.h
new file mode 100644
index 0000000..3cb2a84
--- /dev/null
+++ b/drivers/video/adf/adf_trace.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM adf
+
+#if !defined(__VIDEO_ADF_ADF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __VIDEO_ADF_ADF_TRACE_H
+
+#include <linux/tracepoint.h>
+#include <video/adf.h>
+
+TRACE_EVENT(adf_event,
+	TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+	TP_ARGS(obj, type),
+
+	TP_STRUCT__entry(
+		__string(name, obj->name)
+		__field(enum adf_event_type, type)
+		__array(char, type_str, 32)
+	),
+	TP_fast_assign(
+		__assign_str(name, obj->name);
+		__entry->type = type;
+		strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+				sizeof(__entry->type_str));
+	),
+	TP_printk("obj=%s type=%u (%s)",
+			__get_str(name),
+			__entry->type,
+			__entry->type_str)
+);
+
+TRACE_EVENT(adf_event_enable,
+	TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+	TP_ARGS(obj, type),
+
+	TP_STRUCT__entry(
+		__string(name, obj->name)
+		__field(enum adf_event_type, type)
+		__array(char, type_str, 32)
+	),
+	TP_fast_assign(
+		__assign_str(name, obj->name);
+		__entry->type = type;
+		strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+				sizeof(__entry->type_str));
+	),
+	TP_printk("obj=%s type=%u (%s)",
+			__get_str(name),
+			__entry->type,
+			__entry->type_str)
+);
+
+TRACE_EVENT(adf_event_disable,
+	TP_PROTO(struct adf_obj *obj, enum adf_event_type type),
+	TP_ARGS(obj, type),
+
+	TP_STRUCT__entry(
+		__string(name, obj->name)
+		__field(enum adf_event_type, type)
+		__array(char, type_str, 32)
+	),
+	TP_fast_assign(
+		__assign_str(name, obj->name);
+		__entry->type = type;
+		strlcpy(__entry->type_str, adf_event_type_str(obj, type),
+				sizeof(__entry->type_str));
+	),
+	TP_printk("obj=%s type=%u (%s)",
+			__get_str(name),
+			__entry->type,
+			__entry->type_str)
+);
+
+#endif /* __VIDEO_ADF_ADF_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE adf_trace
+#include <trace/define_trace.h>
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index 2e30db1..fa13fa8 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -18,6 +18,8 @@
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/platform_data/ds2482.h>
 #include <asm/delay.h>
 
 #include "../w1.h"
@@ -97,7 +99,8 @@
 static int ds2482_probe(struct i2c_client *client,
 			const struct i2c_device_id *id);
 static int ds2482_remove(struct i2c_client *client);
-
+static int ds2482_suspend(struct device *dev);
+static int ds2482_resume(struct device *dev);
 
 /**
  * Driver data (common to all clients)
@@ -108,9 +111,15 @@
 };
 MODULE_DEVICE_TABLE(i2c, ds2482_id);
 
+static const struct dev_pm_ops ds2482_pm_ops = {
+	.suspend = ds2482_suspend,
+	.resume = ds2482_resume,
+};
+
 static struct i2c_driver ds2482_driver = {
 	.driver = {
 		.name	= "ds2482",
+		.pm = &ds2482_pm_ops,
 	},
 	.probe		= ds2482_probe,
 	.remove		= ds2482_remove,
@@ -132,6 +141,7 @@
 struct ds2482_data {
 	struct i2c_client	*client;
 	struct mutex		access_lock;
+	int			slpz_gpio;
 
 	/* 1-wire interface(s) */
 	int			w1_count;	/* 1 or 8 */
@@ -460,11 +470,31 @@
 	return retval;
 }
 
+static int ds2482_suspend(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct ds2482_data *data = i2c_get_clientdata(client);
+
+	if (data->slpz_gpio >= 0)
+		gpio_set_value(data->slpz_gpio, 0);
+	return 0;
+}
+
+static int ds2482_resume(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct ds2482_data *data = i2c_get_clientdata(client);
+
+	if (data->slpz_gpio >= 0)
+		gpio_set_value(data->slpz_gpio, 1);
+	return 0;
+}
 
 static int ds2482_probe(struct i2c_client *client,
 			const struct i2c_device_id *id)
 {
 	struct ds2482_data *data;
+	struct ds2482_platform_data *pdata;
 	int err = -ENODEV;
 	int temp1;
 	int idx;
@@ -531,6 +561,16 @@
 		}
 	}
 
+	pdata = client->dev.platform_data;
+	data->slpz_gpio = pdata ? pdata->slpz_gpio : -1;
+
+	if (data->slpz_gpio >= 0) {
+		err = gpio_request_one(data->slpz_gpio, GPIOF_OUT_INIT_HIGH,
+				       "ds2482.slpz");
+		if (err < 0)
+			goto exit_w1_remove;
+	}
+
 	return 0;
 
 exit_w1_remove:
@@ -555,6 +595,11 @@
 			w1_remove_master_device(&data->w1_ch[idx].w1_bm);
 	}
 
+	if (data->slpz_gpio >= 0) {
+		gpio_set_value(data->slpz_gpio, 0);
+		gpio_free(data->slpz_gpio);
+	}
+
 	/* Free the memory */
 	kfree(data);
 	return 0;
diff --git a/fs/Kconfig b/fs/Kconfig
index 4bd03a2..20a8d95 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -227,6 +227,7 @@
 source "fs/adfs/Kconfig"
 source "fs/affs/Kconfig"
 source "fs/ecryptfs/Kconfig"
+source "fs/sdcardfs/Kconfig"
 source "fs/hfs/Kconfig"
 source "fs/hfsplus/Kconfig"
 source "fs/befs/Kconfig"
diff --git a/fs/Makefile b/fs/Makefile
index ed2b632..f207d43 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -3,7 +3,7 @@
 #
 # 14 Sep 2000, Christoph Hellwig <hch@infradead.org>
 # Rewritten to use lists instead of if-statements.
-# 
+#
 
 obj-y :=	open.o read_write.o file_table.o super.o \
 		char_dev.o stat.o exec.o pipe.o namei.o fcntl.o \
@@ -61,7 +61,7 @@
 
 obj-$(CONFIG_PROFILING)		+= dcookies.o
 obj-$(CONFIG_DLM)		+= dlm/
- 
+
 # Do not add any filesystems before this line
 obj-$(CONFIG_FSCACHE)		+= fscache/
 obj-$(CONFIG_REISERFS_FS)	+= reiserfs/
@@ -83,6 +83,7 @@
 obj-$(CONFIG_HFSPLUS_FS)	+= hfsplus/ # Before hfs to find wrapped HFS+
 obj-$(CONFIG_HFS_FS)		+= hfs/
 obj-$(CONFIG_ECRYPT_FS)		+= ecryptfs/
+obj-$(CONFIG_SDCARD_FS)		+= sdcardfs/
 obj-$(CONFIG_VXFS_FS)		+= freevxfs/
 obj-$(CONFIG_NFS_FS)		+= nfs/
 obj-$(CONFIG_EXPORTFS)		+= exportfs/
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c7cc95..972741b 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3184,6 +3184,7 @@
 		return ERR_PTR(error);
 	return res;
 }
+EXPORT_SYMBOL(d_absolute_path);
 
 /*
  * same as __d_path but appends "(deleted)" for unlinked files.
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 10db912..dc4a34f 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -34,6 +34,7 @@
 #include <linux/mutex.h>
 #include <linux/anon_inodes.h>
 #include <linux/device.h>
+#include <linux/freezer.h>
 #include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/mman.h>
@@ -1663,7 +1664,8 @@
 			}
 
 			spin_unlock_irqrestore(&ep->lock, flags);
-			if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
+			if (!freezable_schedule_hrtimeout_range(to, slack,
+								HRTIMER_MODE_ABS))
 				timed_out = 1;
 
 			spin_lock_irqsave(&ep->lock, flags);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index a8a750f..20ee0e4 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2450,7 +2450,8 @@
 		ext4_group_t i, struct ext4_group_desc *desc);
 extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
 				ext4_fsblk_t block, unsigned long count);
-extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
+extern int ext4_trim_fs(struct super_block *, struct fstrim_range *,
+				unsigned long blkdev_flags);
 
 /* inode.c */
 int ext4_inode_is_fast_symlink(struct inode *inode);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index f74d5ee..bdef750 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -18,6 +18,7 @@
 #include "ext4.h"
 #include "xattr.h"
 #include "truncate.h"
+#include <trace/events/android_fs.h>
 
 #define EXT4_XATTR_SYSTEM_DATA	"data"
 #define EXT4_MIN_INLINE_DATA_SIZE	((sizeof(__le32) * EXT4_N_BLOCKS))
@@ -500,6 +501,9 @@
 		return -EAGAIN;
 	}
 
+	trace_android_fs_dataread_start(inode, page_offset(page), PAGE_SIZE,
+					current->pid, current->comm);
+
 	/*
 	 * Current inline data can only exist in the 1st page,
 	 * So for all the other pages, just set them uptodate.
@@ -511,6 +515,8 @@
 		SetPageUptodate(page);
 	}
 
+	trace_android_fs_dataread_end(inode, page_offset(page), PAGE_SIZE);
+
 	up_read(&EXT4_I(inode)->xattr_sem);
 
 	unlock_page(page);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9c06472..0241cab 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -44,6 +44,7 @@
 #include "truncate.h"
 
 #include <trace/events/ext4.h>
+#include <trace/events/android_fs.h>
 
 #define MPAGE_DA_EXTENT_TAIL 0x01
 
@@ -1183,6 +1184,8 @@
 	pgoff_t index;
 	unsigned from, to;
 
+	trace_android_fs_datawrite_start(inode, pos, len,
+					 current->pid, current->comm);
 	trace_ext4_write_begin(inode, pos, len, flags);
 	/*
 	 * Reserve one block more for addition to orphan list in case
@@ -1320,6 +1323,7 @@
 	int ret = 0, ret2;
 	int i_size_changed = 0;
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_write_end(inode, pos, len, copied);
 	if (ext4_has_inline_data(inode)) {
 		ret = ext4_write_inline_data_end(inode, pos, len,
@@ -1419,6 +1423,7 @@
 	unsigned from, to;
 	int size_changed = 0;
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_journalled_write_end(inode, pos, len, copied);
 	from = pos & (PAGE_SIZE - 1);
 	to = from + len;
@@ -2897,6 +2902,8 @@
 					len, flags, pagep, fsdata);
 	}
 	*fsdata = (void *)0;
+	trace_android_fs_datawrite_start(inode, pos, len,
+					 current->pid, current->comm);
 	trace_ext4_da_write_begin(inode, pos, len, flags);
 
 	if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -3015,6 +3022,7 @@
 		return ext4_write_end(file, mapping, pos,
 				      len, copied, page, fsdata);
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_ext4_da_write_end(inode, pos, len, copied);
 	start = pos & (PAGE_SIZE - 1);
 	end = start + copied - 1;
@@ -3587,12 +3595,31 @@
 	if (ext4_has_inline_data(inode))
 		return 0;
 
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (iov_iter_rw(iter) == READ))
+		trace_android_fs_dataread_start(inode, offset, count,
+						current->pid,
+						current->comm);
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (iov_iter_rw(iter) == WRITE))
+		trace_android_fs_datawrite_start(inode, offset, count,
+						 current->pid,
+						 current->comm);
+
 	trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter));
 	if (iov_iter_rw(iter) == READ)
 		ret = ext4_direct_IO_read(iocb, iter);
 	else
 		ret = ext4_direct_IO_write(iocb, iter);
 	trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret);
+
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (iov_iter_rw(iter) == READ))
+		trace_android_fs_dataread_end(inode, offset, count);
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (iov_iter_rw(iter) == WRITE))
+		trace_android_fs_datawrite_end(inode, offset, count);
+
 	return ret;
 }
 
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bf5ae8e..6fbdf15 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -735,11 +735,13 @@
 		return err;
 	}
 
+	case FIDTRIM:
 	case FITRIM:
 	{
 		struct request_queue *q = bdev_get_queue(sb->s_bdev);
 		struct fstrim_range range;
 		int ret = 0;
+		int flags  = cmd == FIDTRIM ? BLKDEV_DISCARD_SECURE : 0;
 
 		if (!capable(CAP_SYS_ADMIN))
 			return -EPERM;
@@ -747,13 +749,15 @@
 		if (!blk_queue_discard(q))
 			return -EOPNOTSUPP;
 
+		if ((flags & BLKDEV_DISCARD_SECURE) && !blk_queue_secure_erase(q))
+			return -EOPNOTSUPP;
 		if (copy_from_user(&range, (struct fstrim_range __user *)arg,
 		    sizeof(range)))
 			return -EFAULT;
 
 		range.minlen = max((unsigned int)range.minlen,
 				   q->limits.discard_granularity);
-		ret = ext4_trim_fs(sb, &range);
+		ret = ext4_trim_fs(sb, &range, flags);
 		if (ret < 0)
 			return ret;
 
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f418f55..81e4b56 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2770,7 +2770,8 @@
 }
 
 static inline int ext4_issue_discard(struct super_block *sb,
-		ext4_group_t block_group, ext4_grpblk_t cluster, int count)
+		ext4_group_t block_group, ext4_grpblk_t cluster, int count,
+		unsigned long flags)
 {
 	ext4_fsblk_t discard_block;
 
@@ -2779,7 +2780,7 @@
 	count = EXT4_C2B(EXT4_SB(sb), count);
 	trace_ext4_discard_blocks(sb,
 			(unsigned long long) discard_block, count);
-	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0);
+	return sb_issue_discard(sb, discard_block, count, GFP_NOFS, flags);
 }
 
 /*
@@ -2801,7 +2802,7 @@
 	if (test_opt(sb, DISCARD)) {
 		err = ext4_issue_discard(sb, entry->efd_group,
 					 entry->efd_start_cluster,
-					 entry->efd_count);
+					 entry->efd_count, 0);
 		if (err && err != -EOPNOTSUPP)
 			ext4_msg(sb, KERN_WARNING, "discard request in"
 				 " group:%d block:%d count:%d failed"
@@ -4847,7 +4848,8 @@
 		 * them with group lock_held
 		 */
 		if (test_opt(sb, DISCARD)) {
-			err = ext4_issue_discard(sb, block_group, bit, count);
+			err = ext4_issue_discard(sb, block_group, bit, count,
+						 0);
 			if (err && err != -EOPNOTSUPP)
 				ext4_msg(sb, KERN_WARNING, "discard request in"
 					 " group:%d block:%d count:%lu failed"
@@ -5043,13 +5045,15 @@
  * @count:	number of blocks to TRIM
  * @group:	alloc. group we are working with
  * @e4b:	ext4 buddy for the group
+ * @blkdev_flags: flags for the block device
  *
  * Trim "count" blocks starting at "start" in the "group". To assure that no
  * one will allocate those blocks, mark it as used in buddy bitmap. This must
  * be called with under the group lock.
  */
 static int ext4_trim_extent(struct super_block *sb, int start, int count,
-			     ext4_group_t group, struct ext4_buddy *e4b)
+			    ext4_group_t group, struct ext4_buddy *e4b,
+			    unsigned long blkdev_flags)
 __releases(bitlock)
 __acquires(bitlock)
 {
@@ -5070,7 +5074,7 @@
 	 */
 	mb_mark_used(e4b, &ex);
 	ext4_unlock_group(sb, group);
-	ret = ext4_issue_discard(sb, group, start, count);
+	ret = ext4_issue_discard(sb, group, start, count, blkdev_flags);
 	ext4_lock_group(sb, group);
 	mb_free_blocks(NULL, e4b, start, ex.fe_len);
 	return ret;
@@ -5083,6 +5087,7 @@
  * @start:		first group block to examine
  * @max:		last group block to examine
  * @minblocks:		minimum extent block count
+ * @blkdev_flags:	flags for the block device
  *
  * ext4_trim_all_free walks through group's buddy bitmap searching for free
  * extents. When the free block is found, ext4_trim_extent is called to TRIM
@@ -5097,7 +5102,7 @@
 static ext4_grpblk_t
 ext4_trim_all_free(struct super_block *sb, ext4_group_t group,
 		   ext4_grpblk_t start, ext4_grpblk_t max,
-		   ext4_grpblk_t minblocks)
+		   ext4_grpblk_t minblocks, unsigned long blkdev_flags)
 {
 	void *bitmap;
 	ext4_grpblk_t next, count = 0, free_count = 0;
@@ -5130,7 +5135,8 @@
 
 		if ((next - start) >= minblocks) {
 			ret = ext4_trim_extent(sb, start,
-					       next - start, group, &e4b);
+					       next - start, group, &e4b,
+					       blkdev_flags);
 			if (ret && ret != -EOPNOTSUPP)
 				break;
 			ret = 0;
@@ -5172,6 +5178,7 @@
  * ext4_trim_fs() -- trim ioctl handle function
  * @sb:			superblock for filesystem
  * @range:		fstrim_range structure
+ * @blkdev_flags:	flags for the block device
  *
  * start:	First Byte to trim
  * len:		number of Bytes to trim from start
@@ -5180,7 +5187,8 @@
  * start to start+len. For each such a group ext4_trim_all_free function
  * is invoked to trim all free space.
  */
-int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
+int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range,
+			unsigned long blkdev_flags)
 {
 	struct ext4_group_info *grp;
 	ext4_group_t group, first_group, last_group;
@@ -5236,7 +5244,7 @@
 
 		if (grp->bb_free >= minlen) {
 			cnt = ext4_trim_all_free(sb, group, first_cluster,
-						end, minlen);
+						end, minlen, blkdev_flags);
 			if (cnt < 0) {
 				ret = cnt;
 				break;
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index a81b829..77cf54c 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -45,6 +45,7 @@
 #include <linux/cleancache.h>
 
 #include "ext4.h"
+#include <trace/events/android_fs.h>
 
 static inline bool ext4_bio_encrypted(struct bio *bio)
 {
@@ -55,6 +56,17 @@
 #endif
 }
 
+static void
+ext4_trace_read_completion(struct bio *bio)
+{
+	struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+	if (first_page != NULL)
+		trace_android_fs_dataread_end(first_page->mapping->host,
+					      page_offset(first_page),
+					      bio->bi_iter.bi_size);
+}
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -72,6 +84,9 @@
 	struct bio_vec *bv;
 	int i;
 
+	if (trace_android_fs_dataread_start_enabled())
+		ext4_trace_read_completion(bio);
+
 	if (ext4_bio_encrypted(bio)) {
 		if (bio->bi_error) {
 			fscrypt_release_ctx(bio->bi_private);
@@ -95,6 +110,24 @@
 	bio_put(bio);
 }
 
+static void
+ext4_submit_bio_read(struct bio *bio)
+{
+	if (trace_android_fs_dataread_start_enabled()) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL) {
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				current->comm);
+		}
+	}
+	submit_bio(bio);
+}
+
 int ext4_mpage_readpages(struct address_space *mapping,
 			 struct list_head *pages, struct page *page,
 			 unsigned nr_pages)
@@ -235,7 +268,7 @@
 		 */
 		if (bio && (last_block_in_bio != blocks[0] - 1)) {
 		submit_and_realloc:
-			submit_bio(bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		}
 		if (bio == NULL) {
@@ -268,14 +301,14 @@
 		if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
 		     (relative_block == map.m_len)) ||
 		    (first_hole != blocks_per_page)) {
-			submit_bio(bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		} else
 			last_block_in_bio = blocks[blocks_per_page - 1];
 		goto next_page;
 	confused:
 		if (bio) {
-			submit_bio(bio);
+			ext4_submit_bio_read(bio);
 			bio = NULL;
 		}
 		if (!PageUptodate(page))
@@ -288,6 +321,6 @@
 	}
 	BUG_ON(pages && !list_empty(pages));
 	if (bio)
-		submit_bio(bio);
+		ext4_submit_bio_read(bio);
 	return 0;
 }
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9ae194f..b3cf04e 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -28,6 +28,7 @@
 #include "segment.h"
 #include "trace.h"
 #include <trace/events/f2fs.h>
+#include <trace/events/android_fs.h>
 
 static void f2fs_read_end_io(struct bio *bio)
 {
@@ -1606,6 +1607,8 @@
 	block_t blkaddr = NULL_ADDR;
 	int err = 0;
 
+	trace_android_fs_datawrite_start(inode, pos, len,
+					 current->pid, current->comm);
 	trace_f2fs_write_begin(inode, pos, len, flags);
 
 	/*
@@ -1697,6 +1700,7 @@
 {
 	struct inode *inode = page->mapping->host;
 
+	trace_android_fs_datawrite_end(inode, pos, len);
 	trace_f2fs_write_end(inode, pos, len, copied);
 
 	/*
@@ -1758,6 +1762,16 @@
 
 	trace_f2fs_direct_IO_enter(inode, offset, count, rw);
 
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (iov_iter_rw(iter) == READ))
+		trace_android_fs_dataread_start(inode, offset,
+						count, current->pid,
+						current->comm);
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (iov_iter_rw(iter) == WRITE))
+		trace_android_fs_datawrite_start(inode, offset, count,
+						 current->pid, current->comm);
+
 	down_read(&F2FS_I(inode)->dio_rwsem[rw]);
 	err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio);
 	up_read(&F2FS_I(inode)->dio_rwsem[rw]);
@@ -1769,6 +1783,13 @@
 			f2fs_write_failed(mapping, offset + count);
 	}
 
+	if (trace_android_fs_dataread_start_enabled() &&
+	    (iov_iter_rw(iter) == READ))
+		trace_android_fs_dataread_end(inode, offset, count);
+	if (trace_android_fs_datawrite_start_enabled() &&
+	    (iov_iter_rw(iter) == WRITE))
+		trace_android_fs_datawrite_end(inode, offset, count);
+
 	trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
 
 	return err;
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 5f1a67f..d534f44 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -13,6 +13,7 @@
 
 #include "f2fs.h"
 #include "node.h"
+#include <trace/events/android_fs.h>
 
 bool f2fs_may_inline_data(struct inode *inode)
 {
@@ -82,14 +83,22 @@
 {
 	struct page *ipage;
 
+	trace_android_fs_dataread_start(inode, page_offset(page),
+					PAGE_SIZE, current->pid,
+					current->comm);
+
 	ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino);
 	if (IS_ERR(ipage)) {
+		trace_android_fs_dataread_end(inode, page_offset(page),
+					      PAGE_SIZE);
 		unlock_page(page);
 		return PTR_ERR(ipage);
 	}
 
 	if (!f2fs_has_inline_data(inode)) {
 		f2fs_put_page(ipage, 1);
+		trace_android_fs_dataread_end(inode, page_offset(page),
+					      PAGE_SIZE);
 		return -EAGAIN;
 	}
 
@@ -101,6 +110,8 @@
 	if (!PageUptodate(page))
 		SetPageUptodate(page);
 	f2fs_put_page(ipage, 1);
+	trace_android_fs_dataread_end(inode, page_offset(page),
+				      PAGE_SIZE);
 	unlock_page(page);
 	return 0;
 }
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 05713a5..ffec69d 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -2104,7 +2104,7 @@
 	    (dirtytime && (inode->i_state & I_DIRTY_INODE)))
 		return;
 
-	if (unlikely(block_dump))
+	if (unlikely(block_dump > 1))
 		block_dump___mark_inode_dirty(inode);
 
 	spin_lock(&inode->i_lock);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 70ea57c..e920bf0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -13,12 +13,14 @@
 #include <linux/poll.h>
 #include <linux/uio.h>
 #include <linux/miscdevice.h>
+#include <linux/namei.h>
 #include <linux/pagemap.h>
 #include <linux/file.h>
 #include <linux/slab.h>
 #include <linux/pipe_fs_i.h>
 #include <linux/swap.h>
 #include <linux/splice.h>
+#include <linux/freezer.h>
 
 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
 MODULE_ALIAS("devname:fuse");
@@ -449,7 +451,9 @@
 	 * Either request is already in userspace, or it was forced.
 	 * Wait it out.
 	 */
-	wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
+	while (!test_bit(FR_FINISHED, &req->flags))
+		wait_event_freezable(req->waitq,
+				test_bit(FR_FINISHED, &req->flags));
 }
 
 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
@@ -1875,6 +1879,10 @@
 		cs->move_pages = 0;
 
 	err = copy_out_args(cs, &req->out, nbytes);
+	if (req->in.h.opcode == FUSE_CANONICAL_PATH) {
+		req->out.h.error = kern_path((char *)req->out.args[0].value, 0,
+							req->canonical_path);
+	}
 	fuse_copy_finish(cs);
 
 	spin_lock(&fpq->lock);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index b3ebe51..95f4e51 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -262,6 +262,50 @@
 	goto out;
 }
 
+/*
+ * Get the canonical path. Since we must translate to a path, this must be done
+ * in the context of the userspace daemon, however, the userspace daemon cannot
+ * look up paths on its own. Instead, we handle the lookup as a special case
+ * inside of the write request.
+ */
+static void fuse_dentry_canonical_path(const struct path *path, struct path *canonical_path) {
+	struct inode *inode = path->dentry->d_inode;
+	struct fuse_conn *fc = get_fuse_conn(inode);
+	struct fuse_req *req;
+	int err;
+	char *path_name;
+
+	req = fuse_get_req(fc, 1);
+	err = PTR_ERR(req);
+	if (IS_ERR(req))
+		goto default_path;
+
+	path_name = (char*)__get_free_page(GFP_KERNEL);
+	if (!path_name) {
+		fuse_put_request(fc, req);
+		goto default_path;
+	}
+
+	req->in.h.opcode = FUSE_CANONICAL_PATH;
+	req->in.h.nodeid = get_node_id(inode);
+	req->in.numargs = 0;
+	req->out.numargs = 1;
+	req->out.args[0].size = PATH_MAX;
+	req->out.args[0].value = path_name;
+	req->canonical_path = canonical_path;
+	req->out.argvar = 1;
+	fuse_request_send(fc, req);
+	err = req->out.h.error;
+	fuse_put_request(fc, req);
+	free_page((unsigned long)path_name);
+	if (!err)
+		return;
+default_path:
+	canonical_path->dentry = path->dentry;
+	canonical_path->mnt = path->mnt;
+	path_get(canonical_path);
+}
+
 static int invalid_nodeid(u64 nodeid)
 {
 	return !nodeid || nodeid == FUSE_ROOT_ID;
@@ -284,6 +328,7 @@
 	.d_revalidate	= fuse_dentry_revalidate,
 	.d_init		= fuse_dentry_init,
 	.d_release	= fuse_dentry_release,
+	.d_canonical_path = fuse_dentry_canonical_path,
 };
 
 const struct dentry_operations fuse_root_dentry_operations = {
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 9130794..6b30a12 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -368,6 +368,9 @@
 	/** Inode used in the request or NULL */
 	struct inode *inode;
 
+	/** Path used for completing d_canonical_path */
+	struct path *canonical_path;
+
 	/** AIO control block */
 	struct fuse_io_priv *io;
 
diff --git a/fs/mpage.c b/fs/mpage.c
index d2413af..2bb117d 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -31,6 +31,14 @@
 #include <linux/cleancache.h>
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/android_fs.h>
+
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_datawrite_end);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_start);
+EXPORT_TRACEPOINT_SYMBOL(android_fs_dataread_end);
+
 /*
  * I/O completion handler for multipage BIOs.
  *
@@ -48,6 +56,16 @@
 	struct bio_vec *bv;
 	int i;
 
+	if (trace_android_fs_dataread_end_enabled() &&
+	    (bio_data_dir(bio) == READ)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL)
+			trace_android_fs_dataread_end(first_page->mapping->host,
+						      page_offset(first_page),
+						      bio->bi_iter.bi_size);
+	}
+
 	bio_for_each_segment_all(bv, bio, i) {
 		struct page *page = bv->bv_page;
 		page_endio(page, op_is_write(bio_op(bio)), bio->bi_error);
@@ -58,6 +76,18 @@
 
 static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
 {
+	if (trace_android_fs_dataread_start_enabled() && (op == REQ_OP_READ)) {
+		struct page *first_page = bio->bi_io_vec[0].bv_page;
+
+		if (first_page != NULL) {
+			trace_android_fs_dataread_start(
+				first_page->mapping->host,
+				page_offset(first_page),
+				bio->bi_iter.bi_size,
+				current->pid,
+				current->comm);
+		}
+	}
 	bio->bi_end_io = mpage_end_io;
 	bio_set_op_attrs(bio, op, op_flags);
 	guard_bio_eod(op, bio);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 69d1ea3..4dc09da 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -702,6 +702,8 @@
 	struct fsnotify_group *group;
 	struct inode *inode;
 	struct path path;
+	struct path alteredpath;
+	struct path *canonical_path = &path;
 	struct fd f;
 	int ret;
 	unsigned flags = 0;
@@ -741,13 +743,22 @@
 	if (ret)
 		goto fput_and_out;
 
+	/* support stacked filesystems */
+	if(path.dentry && path.dentry->d_op) {
+		if (path.dentry->d_op->d_canonical_path) {
+			path.dentry->d_op->d_canonical_path(&path, &alteredpath);
+			canonical_path = &alteredpath;
+			path_put(&path);
+		}
+	}
+
 	/* inode held in place by reference to path; group by fget on fd */
-	inode = path.dentry->d_inode;
+	inode = canonical_path->dentry->d_inode;
 	group = f.file->private_data;
 
 	/* create/update an inode mark */
 	ret = inotify_update_watch(group, inode, mask);
-	path_put(&path);
+	path_put(canonical_path);
 fput_and_out:
 	fdput(f);
 	return ret;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index ca651ac..c01eeaa 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2912,8 +2912,8 @@
 	ONE("cgroup",  S_IRUGO, proc_cgroup_show),
 #endif
 	ONE("oom_score",  S_IRUGO, proc_oom_score),
-	REG("oom_adj",    S_IRUGO|S_IWUSR, proc_oom_adj_operations),
-	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+	REG("oom_adj",    S_IRUSR, proc_oom_adj_operations),
+	REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
 	REG("loginuid",   S_IWUSR|S_IRUGO, proc_loginuid_operations),
 	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
@@ -3301,8 +3301,8 @@
 	ONE("cgroup",  S_IRUGO, proc_cgroup_show),
 #endif
 	ONE("oom_score", S_IRUGO, proc_oom_score),
-	REG("oom_adj",   S_IRUGO|S_IWUSR, proc_oom_adj_operations),
-	REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
+	REG("oom_adj",   S_IRUSR, proc_oom_adj_operations),
+	REG("oom_score_adj", S_IRUSR, proc_oom_score_adj_operations),
 #ifdef CONFIG_AUDITSYSCALL
 	REG("loginuid",  S_IWUSR|S_IRUGO, proc_loginuid_operations),
 	REG("sessionid",  S_IRUGO, proc_sessionid_operations),
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 35b92d8..87cf40b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -127,6 +127,57 @@
 }
 #endif
 
+static void seq_print_vma_name(struct seq_file *m, struct vm_area_struct *vma)
+{
+	const char __user *name = vma_get_anon_name(vma);
+	struct mm_struct *mm = vma->vm_mm;
+
+	unsigned long page_start_vaddr;
+	unsigned long page_offset;
+	unsigned long num_pages;
+	unsigned long max_len = NAME_MAX;
+	int i;
+
+	page_start_vaddr = (unsigned long)name & PAGE_MASK;
+	page_offset = (unsigned long)name - page_start_vaddr;
+	num_pages = DIV_ROUND_UP(page_offset + max_len, PAGE_SIZE);
+
+	seq_puts(m, "[anon:");
+
+	for (i = 0; i < num_pages; i++) {
+		int len;
+		int write_len;
+		const char *kaddr;
+		long pages_pinned;
+		struct page *page;
+		unsigned int gup_flags = 0;
+
+		pages_pinned = get_user_pages_remote(current, mm, page_start_vaddr,
+				1, gup_flags, &page, NULL);
+		if (pages_pinned < 1) {
+			seq_puts(m, "<fault>]");
+			return;
+		}
+
+		kaddr = (const char *)kmap(page);
+		len = min(max_len, PAGE_SIZE - page_offset);
+		write_len = strnlen(kaddr + page_offset, len);
+		seq_write(m, kaddr + page_offset, write_len);
+		kunmap(page);
+		put_page(page);
+
+		/* if strnlen hit a null terminator then we're done */
+		if (write_len != len)
+			break;
+
+		max_len -= len;
+		page_offset = 0;
+		page_start_vaddr += PAGE_SIZE;
+	}
+
+	seq_putc(m, ']');
+}
+
 static void vma_stop(struct proc_maps_private *priv)
 {
 	struct mm_struct *mm = priv->mm;
@@ -347,6 +398,11 @@
 
 		if (is_stack(priv, vma))
 			name = "[stack]";
+
+		if (vma_get_anon_name(vma)) {
+			seq_pad(m, ' ');
+			seq_print_vma_name(m, vma);
+		}
 	}
 
 done:
@@ -760,6 +816,12 @@
 
 	show_map_vma(m, vma, is_pid);
 
+	if (vma_get_anon_name(vma)) {
+		seq_puts(m, "Name:           ");
+		seq_print_vma_name(m, vma);
+		seq_putc(m, '\n');
+	}
+
 	seq_printf(m,
 		   "Size:           %8lu kB\n"
 		   "Rss:            %8lu kB\n"
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 6ad831b..bd2dc34 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -549,6 +549,12 @@
 	return 0;
 }
 
+void notrace ramoops_console_write_buf(const char *buf, size_t size)
+{
+	struct ramoops_context *cxt = &oops_cxt;
+	persistent_ram_write(cxt->cprz, buf, size);
+}
+
 static int ramoops_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
diff --git a/fs/sdcardfs/Kconfig b/fs/sdcardfs/Kconfig
new file mode 100644
index 0000000..a1c1033
--- /dev/null
+++ b/fs/sdcardfs/Kconfig
@@ -0,0 +1,13 @@
+config SDCARD_FS
+	tristate "sdcard file system"
+	depends on CONFIGFS_FS
+	default n
+	help
+	  Sdcardfs is based on Wrapfs file system.
+
+config SDCARD_FS_FADV_NOACTIVE
+	bool "sdcardfs fadvise noactive support"
+	depends on FADV_NOACTIVE
+	default y
+	help
+	  Sdcardfs supports fadvise noactive mode.
diff --git a/fs/sdcardfs/Makefile b/fs/sdcardfs/Makefile
new file mode 100644
index 0000000..b84fbb2
--- /dev/null
+++ b/fs/sdcardfs/Makefile
@@ -0,0 +1,7 @@
+SDCARDFS_VERSION="0.1"
+
+EXTRA_CFLAGS += -DSDCARDFS_VERSION=\"$(SDCARDFS_VERSION)\"
+
+obj-$(CONFIG_SDCARD_FS) += sdcardfs.o
+
+sdcardfs-y := dentry.o file.o inode.o main.o super.o lookup.o mmap.o packagelist.o derived_perm.o
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
new file mode 100644
index 0000000..f22de8a
--- /dev/null
+++ b/fs/sdcardfs/dentry.c
@@ -0,0 +1,185 @@
+/*
+ * fs/sdcardfs/dentry.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#include "linux/ctype.h"
+
+/*
+ * returns: -ERRNO if error (returned to user)
+ *          0: tell VFS to invalidate dentry
+ *          1: dentry is valid
+ */
+static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+	int err = 1;
+	struct path parent_lower_path, lower_path;
+	struct dentry *parent_dentry = NULL;
+	struct dentry *parent_lower_dentry = NULL;
+	struct dentry *lower_cur_parent_dentry = NULL;
+	struct dentry *lower_dentry = NULL;
+
+	if (flags & LOOKUP_RCU)
+		return -ECHILD;
+
+	spin_lock(&dentry->d_lock);
+	if (IS_ROOT(dentry)) {
+		spin_unlock(&dentry->d_lock);
+		return 1;
+	}
+	spin_unlock(&dentry->d_lock);
+
+	/* check uninitialized obb_dentry and
+	 * whether the base obbpath has been changed or not */
+	if (is_obbpath_invalid(dentry)) {
+		d_drop(dentry);
+		return 0;
+	}
+
+	parent_dentry = dget_parent(dentry);
+	sdcardfs_get_lower_path(parent_dentry, &parent_lower_path);
+	sdcardfs_get_real_lower(dentry, &lower_path);
+	parent_lower_dentry = parent_lower_path.dentry;
+	lower_dentry = lower_path.dentry;
+	lower_cur_parent_dentry = dget_parent(lower_dentry);
+
+	spin_lock(&lower_dentry->d_lock);
+	if (d_unhashed(lower_dentry)) {
+		spin_unlock(&lower_dentry->d_lock);
+		d_drop(dentry);
+		err = 0;
+		goto out;
+	}
+	spin_unlock(&lower_dentry->d_lock);
+
+	if (parent_lower_dentry != lower_cur_parent_dentry) {
+		d_drop(dentry);
+		err = 0;
+		goto out;
+	}
+
+	if (dentry < lower_dentry) {
+		spin_lock(&dentry->d_lock);
+		spin_lock(&lower_dentry->d_lock);
+	} else {
+		spin_lock(&lower_dentry->d_lock);
+		spin_lock(&dentry->d_lock);
+	}
+
+	if (dentry->d_name.len != lower_dentry->d_name.len) {
+		__d_drop(dentry);
+		err = 0;
+	} else if (strncasecmp(dentry->d_name.name, lower_dentry->d_name.name,
+				dentry->d_name.len) != 0) {
+		__d_drop(dentry);
+		err = 0;
+	}
+
+	if (dentry < lower_dentry) {
+		spin_unlock(&lower_dentry->d_lock);
+		spin_unlock(&dentry->d_lock);
+	} else {
+		spin_unlock(&dentry->d_lock);
+		spin_unlock(&lower_dentry->d_lock);
+	}
+
+out:
+	dput(parent_dentry);
+	dput(lower_cur_parent_dentry);
+	sdcardfs_put_lower_path(parent_dentry, &parent_lower_path);
+	sdcardfs_put_real_lower(dentry, &lower_path);
+	return err;
+}
+
+static void sdcardfs_d_release(struct dentry *dentry)
+{
+	/* release and reset the lower paths */
+	if(has_graft_path(dentry)) {
+		sdcardfs_put_reset_orig_path(dentry);
+	}
+	sdcardfs_put_reset_lower_path(dentry);
+	free_dentry_private_data(dentry);
+	return;
+}
+
+static int sdcardfs_hash_ci(const struct dentry *dentry,
+				struct qstr *qstr)
+{
+	/*
+	 * This function is copy of vfat_hashi.
+	 * FIXME Should we support national language?
+	 *       Refer to vfat_hashi()
+	 * struct nls_table *t = MSDOS_SB(dentry->d_sb)->nls_io;
+	 */
+	const unsigned char *name;
+	unsigned int len;
+	unsigned long hash;
+
+	name = qstr->name;
+	//len = vfat_striptail_len(qstr);
+	len = qstr->len;
+
+	hash = init_name_hash(dentry);
+	while (len--)
+		//hash = partial_name_hash(nls_tolower(t, *name++), hash);
+		hash = partial_name_hash(tolower(*name++), hash);
+	qstr->hash = end_name_hash(hash);
+
+	return 0;
+}
+
+/*
+ * Case insensitive compare of two vfat names.
+ */
+static int sdcardfs_cmp_ci(const struct dentry *dentry,
+		unsigned int len, const char *str, const struct qstr *name)
+{
+	/* This function is copy of vfat_cmpi */
+	// FIXME Should we support national language?
+	//struct nls_table *t = MSDOS_SB(parent->d_sb)->nls_io;
+	//unsigned int alen, blen;
+
+	/* A filename cannot end in '.' or we treat it like it has none */
+	/*
+	alen = vfat_striptail_len(name);
+	blen = __vfat_striptail_len(len, str);
+	if (alen == blen) {
+		if (nls_strnicmp(t, name->name, str, alen) == 0)
+			return 0;
+	}
+	*/
+	if (name->len == len) {
+		if (strncasecmp(name->name, str, len) == 0)
+			return 0;
+	}
+	return 1;
+}
+
+static void sdcardfs_canonical_path(const struct path *path, struct path *actual_path) {
+	sdcardfs_get_real_lower(path->dentry, actual_path);
+}
+
+const struct dentry_operations sdcardfs_ci_dops = {
+	.d_revalidate	= sdcardfs_d_revalidate,
+	.d_release	= sdcardfs_d_release,
+	.d_hash 	= sdcardfs_hash_ci,
+	.d_compare	= sdcardfs_cmp_ci,
+	.d_canonical_path = sdcardfs_canonical_path,
+};
+
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
new file mode 100644
index 0000000..97b79cc
--- /dev/null
+++ b/fs/sdcardfs/derived_perm.c
@@ -0,0 +1,265 @@
+/*
+ * fs/sdcardfs/derived_perm.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+
+/* copy derived state from parent inode */
+static void inherit_derived_state(struct inode *parent, struct inode *child)
+{
+	struct sdcardfs_inode_info *pi = SDCARDFS_I(parent);
+	struct sdcardfs_inode_info *ci = SDCARDFS_I(child);
+
+	ci->perm = PERM_INHERIT;
+	ci->userid = pi->userid;
+	ci->d_uid = pi->d_uid;
+	ci->under_android = pi->under_android;
+}
+
+/* helper function for derived state */
+void setup_derived_state(struct inode *inode, perm_t perm,
+                        userid_t userid, uid_t uid, bool under_android)
+{
+	struct sdcardfs_inode_info *info = SDCARDFS_I(inode);
+
+	info->perm = perm;
+	info->userid = userid;
+	info->d_uid = uid;
+	info->under_android = under_android;
+}
+
+/* While renaming, there is a point where we want the path from dentry, but the name from newdentry */
+void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry)
+{
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	struct sdcardfs_inode_info *info = SDCARDFS_I(dentry->d_inode);
+	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(parent->d_inode);
+	appid_t appid;
+
+	/* By default, each inode inherits from its parent.
+	 * the properties are maintained on its private fields
+	 * because the inode attributes will be modified with that of
+	 * its lower inode.
+	 * The derived state will be updated on the last
+	 * stage of each system call by fix_derived_permission(inode).
+	 */
+
+	inherit_derived_state(parent->d_inode, dentry->d_inode);
+
+	/* Derive custom permissions based on parent and current node */
+	switch (parent_info->perm) {
+		case PERM_INHERIT:
+			/* Already inherited above */
+			break;
+		case PERM_PRE_ROOT:
+			/* Legacy internal layout places users at top level */
+			info->perm = PERM_ROOT;
+			info->userid = simple_strtoul(newdentry->d_name.name, NULL, 10);
+			break;
+		case PERM_ROOT:
+			/* Assume masked off by default. */
+			if (!strcasecmp(newdentry->d_name.name, "Android")) {
+				/* App-specific directories inside; let anyone traverse */
+				info->perm = PERM_ANDROID;
+				info->under_android = true;
+			}
+			break;
+		case PERM_ANDROID:
+			if (!strcasecmp(newdentry->d_name.name, "data")) {
+				/* App-specific directories inside; let anyone traverse */
+				info->perm = PERM_ANDROID_DATA;
+			} else if (!strcasecmp(newdentry->d_name.name, "obb")) {
+				/* App-specific directories inside; let anyone traverse */
+				info->perm = PERM_ANDROID_OBB;
+				/* Single OBB directory is always shared */
+			} else if (!strcasecmp(newdentry->d_name.name, "media")) {
+				/* App-specific directories inside; let anyone traverse */
+				info->perm = PERM_ANDROID_MEDIA;
+			}
+			break;
+		case PERM_ANDROID_DATA:
+		case PERM_ANDROID_OBB:
+		case PERM_ANDROID_MEDIA:
+			appid = get_appid(sbi->pkgl_id, newdentry->d_name.name);
+			if (appid != 0) {
+				info->d_uid = multiuser_get_uid(parent_info->userid, appid);
+			}
+			break;
+	}
+}
+
+void get_derived_permission(struct dentry *parent, struct dentry *dentry)
+{
+	get_derived_permission_new(parent, dentry, dentry);
+}
+
+void get_derive_permissions_recursive(struct dentry *parent) {
+	struct dentry *dentry;
+	list_for_each_entry(dentry, &parent->d_subdirs, d_child) {
+		if (dentry->d_inode) {
+			inode_lock(dentry->d_inode);
+			get_derived_permission(parent, dentry);
+			fix_derived_permission(dentry->d_inode);
+			get_derive_permissions_recursive(dentry);
+			inode_unlock(dentry->d_inode);
+		}
+	}
+}
+
+/* main function for updating derived permission */
+inline void update_derived_permission_lock(struct dentry *dentry)
+{
+	struct dentry *parent;
+
+	if(!dentry || !dentry->d_inode) {
+		printk(KERN_ERR "sdcardfs: %s: invalid dentry\n", __func__);
+		return;
+	}
+	/* FIXME:
+	 * 1. need to check whether the dentry is updated or not
+	 * 2. remove the root dentry update
+	 */
+	inode_lock(dentry->d_inode);
+	if(IS_ROOT(dentry)) {
+		//setup_default_pre_root_state(dentry->d_inode);
+	} else {
+		parent = dget_parent(dentry);
+		if(parent) {
+			get_derived_permission(parent, dentry);
+			dput(parent);
+		}
+	}
+	fix_derived_permission(dentry->d_inode);
+	inode_unlock(dentry->d_inode);
+}
+
+int need_graft_path(struct dentry *dentry)
+{
+	int ret = 0;
+	struct dentry *parent = dget_parent(dentry);
+	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(parent->d_inode);
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+
+	if(parent_info->perm == PERM_ANDROID &&
+			!strcasecmp(dentry->d_name.name, "obb")) {
+
+		/* /Android/obb is the base obbpath of DERIVED_UNIFIED */
+		if(!(sbi->options.multiuser == false
+				&& parent_info->userid == 0)) {
+			ret = 1;
+		}
+	}
+	dput(parent);
+	return ret;
+}
+
+int is_obbpath_invalid(struct dentry *dent)
+{
+	int ret = 0;
+	struct sdcardfs_dentry_info *di = SDCARDFS_D(dent);
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dent->d_sb);
+	char *path_buf, *obbpath_s;
+
+	/* check the base obbpath has been changed.
+	 * this routine can check an uninitialized obb dentry as well.
+	 * regarding the uninitialized obb, refer to the sdcardfs_mkdir() */
+	spin_lock(&di->lock);
+	if(di->orig_path.dentry) {
+ 		if(!di->lower_path.dentry) {
+			ret = 1;
+		} else {
+			path_get(&di->lower_path);
+			//lower_parent = lock_parent(lower_path->dentry);
+
+			path_buf = kmalloc(PATH_MAX, GFP_ATOMIC);
+			if(!path_buf) {
+				ret = 1;
+				printk(KERN_ERR "sdcardfs: fail to allocate path_buf in %s.\n", __func__);
+			} else {
+				obbpath_s = d_path(&di->lower_path, path_buf, PATH_MAX);
+				if (d_unhashed(di->lower_path.dentry) ||
+					strcasecmp(sbi->obbpath_s, obbpath_s)) {
+					ret = 1;
+				}
+				kfree(path_buf);
+			}
+
+			//unlock_dir(lower_parent);
+			path_put(&di->lower_path);
+		}
+	}
+	spin_unlock(&di->lock);
+	return ret;
+}
+
+int is_base_obbpath(struct dentry *dentry)
+{
+	int ret = 0;
+	struct dentry *parent = dget_parent(dentry);
+	struct sdcardfs_inode_info *parent_info= SDCARDFS_I(parent->d_inode);
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+
+	spin_lock(&SDCARDFS_D(dentry)->lock);
+	if (sbi->options.multiuser) {
+		if(parent_info->perm == PERM_PRE_ROOT &&
+				!strcasecmp(dentry->d_name.name, "obb")) {
+			ret = 1;
+		}
+	} else  if (parent_info->perm == PERM_ANDROID &&
+			!strcasecmp(dentry->d_name.name, "obb")) {
+		ret = 1;
+	}
+	spin_unlock(&SDCARDFS_D(dentry)->lock);
+	return ret;
+}
+
+/* The lower_path will be stored to the dentry's orig_path
+ * and the base obbpath will be copyed to the lower_path variable.
+ * if an error returned, there's no change in the lower_path
+ * returns: -ERRNO if error (0: no error) */
+int setup_obb_dentry(struct dentry *dentry, struct path *lower_path)
+{
+	int err = 0;
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	struct path obbpath;
+
+	/* A local obb dentry must have its own orig_path to support rmdir
+	 * and mkdir of itself. Usually, we expect that the sbi->obbpath
+	 * is avaiable on this stage. */
+	sdcardfs_set_orig_path(dentry, lower_path);
+
+	err = kern_path(sbi->obbpath_s,
+			LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &obbpath);
+
+	if(!err) {
+		/* the obbpath base has been found */
+		printk(KERN_INFO "sdcardfs: the sbi->obbpath is found\n");
+		pathcpy(lower_path, &obbpath);
+	} else {
+		/* if the sbi->obbpath is not available, we can optionally
+		 * setup the lower_path with its orig_path.
+		 * but, the current implementation just returns an error
+		 * because the sdcard daemon also regards this case as
+		 * a lookup fail. */
+		printk(KERN_INFO "sdcardfs: the sbi->obbpath is not available\n");
+	}
+	return err;
+}
+
+
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
new file mode 100644
index 0000000..c249fa9
--- /dev/null
+++ b/fs/sdcardfs/file.c
@@ -0,0 +1,356 @@
+/*
+ * fs/sdcardfs/file.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#ifdef CONFIG_SDCARD_FS_FADV_NOACTIVE
+#include <linux/backing-dev.h>
+#endif
+
+static ssize_t sdcardfs_read(struct file *file, char __user *buf,
+			   size_t count, loff_t *ppos)
+{
+	int err;
+	struct file *lower_file;
+	struct dentry *dentry = file->f_path.dentry;
+#ifdef CONFIG_SDCARD_FS_FADV_NOACTIVE
+	struct backing_dev_info *bdi;
+#endif
+
+	lower_file = sdcardfs_lower_file(file);
+
+#ifdef CONFIG_SDCARD_FS_FADV_NOACTIVE
+	if (file->f_mode & FMODE_NOACTIVE) {
+		if (!(lower_file->f_mode & FMODE_NOACTIVE)) {
+			bdi = lower_file->f_mapping->backing_dev_info;
+			lower_file->f_ra.ra_pages = bdi->ra_pages * 2;
+			spin_lock(&lower_file->f_lock);
+			lower_file->f_mode |= FMODE_NOACTIVE;
+			spin_unlock(&lower_file->f_lock);
+		}
+	}
+#endif
+
+	err = vfs_read(lower_file, buf, count, ppos);
+	/* update our inode atime upon a successful lower read */
+	if (err >= 0)
+		fsstack_copy_attr_atime(d_inode(dentry),
+					file_inode(lower_file));
+
+	return err;
+}
+
+static ssize_t sdcardfs_write(struct file *file, const char __user *buf,
+			    size_t count, loff_t *ppos)
+{
+	int err;
+	struct file *lower_file;
+	struct dentry *dentry = file->f_path.dentry;
+
+	/* check disk space */
+	if (!check_min_free_space(dentry, count, 0)) {
+		printk(KERN_INFO "No minimum free space.\n");
+		return -ENOSPC;
+	}
+
+	lower_file = sdcardfs_lower_file(file);
+	err = vfs_write(lower_file, buf, count, ppos);
+	/* update our inode times+sizes upon a successful lower write */
+	if (err >= 0) {
+		fsstack_copy_inode_size(d_inode(dentry),
+					file_inode(lower_file));
+		fsstack_copy_attr_times(d_inode(dentry),
+					file_inode(lower_file));
+	}
+
+	return err;
+}
+
+static int sdcardfs_readdir(struct file *file, struct dir_context *ctx)
+{
+	int err;
+	struct file *lower_file = NULL;
+	struct dentry *dentry = file->f_path.dentry;
+
+	lower_file = sdcardfs_lower_file(file);
+
+	lower_file->f_pos = file->f_pos;
+	err = iterate_dir(lower_file, ctx);
+	file->f_pos = lower_file->f_pos;
+	if (err >= 0)		/* copy the atime */
+		fsstack_copy_attr_atime(d_inode(dentry),
+					file_inode(lower_file));
+	return err;
+}
+
+static long sdcardfs_unlocked_ioctl(struct file *file, unsigned int cmd,
+				  unsigned long arg)
+{
+	long err = -ENOTTY;
+	struct file *lower_file;
+
+	lower_file = sdcardfs_lower_file(file);
+
+	/* XXX: use vfs_ioctl if/when VFS exports it */
+	if (!lower_file || !lower_file->f_op)
+		goto out;
+	if (lower_file->f_op->unlocked_ioctl)
+		err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
+
+out:
+	return err;
+}
+
+#ifdef CONFIG_COMPAT
+static long sdcardfs_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	long err = -ENOTTY;
+	struct file *lower_file;
+
+	lower_file = sdcardfs_lower_file(file);
+
+	/* XXX: use vfs_ioctl if/when VFS exports it */
+	if (!lower_file || !lower_file->f_op)
+		goto out;
+	if (lower_file->f_op->compat_ioctl)
+		err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
+
+out:
+	return err;
+}
+#endif
+
+static int sdcardfs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	int err = 0;
+	bool willwrite;
+	struct file *lower_file;
+	const struct vm_operations_struct *saved_vm_ops = NULL;
+
+	/* this might be deferred to mmap's writepage */
+	willwrite = ((vma->vm_flags | VM_SHARED | VM_WRITE) == vma->vm_flags);
+
+	/*
+	 * File systems which do not implement ->writepage may use
+	 * generic_file_readonly_mmap as their ->mmap op.  If you call
+	 * generic_file_readonly_mmap with VM_WRITE, you'd get an -EINVAL.
+	 * But we cannot call the lower ->mmap op, so we can't tell that
+	 * writeable mappings won't work.  Therefore, our only choice is to
+	 * check if the lower file system supports the ->writepage, and if
+	 * not, return EINVAL (the same error that
+	 * generic_file_readonly_mmap returns in that case).
+	 */
+	lower_file = sdcardfs_lower_file(file);
+	if (willwrite && !lower_file->f_mapping->a_ops->writepage) {
+		err = -EINVAL;
+		printk(KERN_ERR "sdcardfs: lower file system does not "
+		       "support writeable mmap\n");
+		goto out;
+	}
+
+	/*
+	 * find and save lower vm_ops.
+	 *
+	 * XXX: the VFS should have a cleaner way of finding the lower vm_ops
+	 */
+	if (!SDCARDFS_F(file)->lower_vm_ops) {
+		err = lower_file->f_op->mmap(lower_file, vma);
+		if (err) {
+			printk(KERN_ERR "sdcardfs: lower mmap failed %d\n", err);
+			goto out;
+		}
+		saved_vm_ops = vma->vm_ops; /* save: came from lower ->mmap */
+		err = do_munmap(current->mm, vma->vm_start,
+				vma->vm_end - vma->vm_start);
+		if (err) {
+			printk(KERN_ERR "sdcardfs: do_munmap failed %d\n", err);
+			goto out;
+		}
+	}
+
+	/*
+	 * Next 3 lines are all I need from generic_file_mmap.  I definitely
+	 * don't want its test for ->readpage which returns -ENOEXEC.
+	 */
+	file_accessed(file);
+	vma->vm_ops = &sdcardfs_vm_ops;
+
+	file->f_mapping->a_ops = &sdcardfs_aops; /* set our aops */
+	if (!SDCARDFS_F(file)->lower_vm_ops) /* save for our ->fault */
+		SDCARDFS_F(file)->lower_vm_ops = saved_vm_ops;
+
+out:
+	return err;
+}
+
+static int sdcardfs_open(struct inode *inode, struct file *file)
+{
+	int err = 0;
+	struct file *lower_file = NULL;
+	struct path lower_path;
+	struct dentry *dentry = file->f_path.dentry;
+	struct dentry *parent = dget_parent(dentry);
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	const struct cred *saved_cred = NULL;
+
+	/* don't open unhashed/deleted files */
+	if (d_unhashed(dentry)) {
+		err = -ENOENT;
+		goto out_err;
+	}
+
+	if(!check_caller_access_to_name(parent->d_inode, dentry->d_name.name)) {
+		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                         "	dentry: %s, task:%s\n",
+						 __func__, dentry->d_name.name, current->comm);
+		err = -EACCES;
+		goto out_err;
+	}
+
+	/* save current_cred and override it */
+	OVERRIDE_CRED(sbi, saved_cred);
+
+	file->private_data =
+		kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
+	if (!SDCARDFS_F(file)) {
+		err = -ENOMEM;
+		goto out_revert_cred;
+	}
+
+	/* open lower object and link sdcardfs's file struct to lower's */
+	sdcardfs_get_lower_path(file->f_path.dentry, &lower_path);
+	lower_file = dentry_open(&lower_path, file->f_flags, current_cred());
+	path_put(&lower_path);
+	if (IS_ERR(lower_file)) {
+		err = PTR_ERR(lower_file);
+		lower_file = sdcardfs_lower_file(file);
+		if (lower_file) {
+			sdcardfs_set_lower_file(file, NULL);
+			fput(lower_file); /* fput calls dput for lower_dentry */
+		}
+	} else {
+		sdcardfs_set_lower_file(file, lower_file);
+	}
+
+	if (err)
+		kfree(SDCARDFS_F(file));
+	else {
+		sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
+	}
+
+out_revert_cred:
+	REVERT_CRED(saved_cred);
+out_err:
+	dput(parent);
+	return err;
+}
+
+static int sdcardfs_flush(struct file *file, fl_owner_t id)
+{
+	int err = 0;
+	struct file *lower_file = NULL;
+
+	lower_file = sdcardfs_lower_file(file);
+	if (lower_file && lower_file->f_op && lower_file->f_op->flush) {
+		filemap_write_and_wait(file->f_mapping);
+		err = lower_file->f_op->flush(lower_file, id);
+	}
+
+	return err;
+}
+
+/* release all lower object references & free the file info structure */
+static int sdcardfs_file_release(struct inode *inode, struct file *file)
+{
+	struct file *lower_file;
+
+	lower_file = sdcardfs_lower_file(file);
+	if (lower_file) {
+		sdcardfs_set_lower_file(file, NULL);
+		fput(lower_file);
+	}
+
+	kfree(SDCARDFS_F(file));
+	return 0;
+}
+
+static int sdcardfs_fsync(struct file *file, loff_t start, loff_t end,
+			int datasync)
+{
+	int err;
+	struct file *lower_file;
+	struct path lower_path;
+	struct dentry *dentry = file->f_path.dentry;
+
+	err = __generic_file_fsync(file, start, end, datasync);
+	if (err)
+		goto out;
+
+	lower_file = sdcardfs_lower_file(file);
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	err = vfs_fsync_range(lower_file, start, end, datasync);
+	sdcardfs_put_lower_path(dentry, &lower_path);
+out:
+	return err;
+}
+
+static int sdcardfs_fasync(int fd, struct file *file, int flag)
+{
+	int err = 0;
+	struct file *lower_file = NULL;
+
+	lower_file = sdcardfs_lower_file(file);
+	if (lower_file->f_op && lower_file->f_op->fasync)
+		err = lower_file->f_op->fasync(fd, lower_file, flag);
+
+	return err;
+}
+
+const struct file_operations sdcardfs_main_fops = {
+	.llseek		= generic_file_llseek,
+	.read		= sdcardfs_read,
+	.write		= sdcardfs_write,
+	.unlocked_ioctl	= sdcardfs_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= sdcardfs_compat_ioctl,
+#endif
+	.mmap		= sdcardfs_mmap,
+	.open		= sdcardfs_open,
+	.flush		= sdcardfs_flush,
+	.release	= sdcardfs_file_release,
+	.fsync		= sdcardfs_fsync,
+	.fasync		= sdcardfs_fasync,
+};
+
+/* trimmed directory options */
+const struct file_operations sdcardfs_dir_fops = {
+	.llseek		= generic_file_llseek,
+	.read		= generic_read_dir,
+	.iterate	= sdcardfs_readdir,
+	.unlocked_ioctl	= sdcardfs_unlocked_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl	= sdcardfs_compat_ioctl,
+#endif
+	.open		= sdcardfs_open,
+	.release	= sdcardfs_file_release,
+	.flush		= sdcardfs_flush,
+	.fsync		= sdcardfs_fsync,
+	.fasync		= sdcardfs_fasync,
+};
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
new file mode 100644
index 0000000..f95283e
--- /dev/null
+++ b/fs/sdcardfs/inode.c
@@ -0,0 +1,806 @@
+/*
+ * fs/sdcardfs/inode.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+
+/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
+const struct cred * override_fsids(struct sdcardfs_sb_info* sbi)
+{
+	struct cred * cred;
+	const struct cred * old_cred;
+
+	cred = prepare_creds();
+	if (!cred)
+		return NULL;
+
+	cred->fsuid = make_kuid(&init_user_ns, sbi->options.fs_low_uid);
+	cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
+
+	old_cred = override_creds(cred);
+
+	return old_cred;
+}
+
+/* Do not directly use this function, use REVERT_CRED() instead. */
+void revert_fsids(const struct cred * old_cred)
+{
+	const struct cred * cur_cred;
+
+	cur_cred = current->cred;
+	revert_creds(old_cred);
+	put_cred(cur_cred);
+}
+
+static int sdcardfs_create(struct inode *dir, struct dentry *dentry,
+			 umode_t mode, bool want_excl)
+{
+	int err;
+	struct dentry *lower_dentry;
+	struct dentry *lower_parent_dentry = NULL;
+	struct path lower_path;
+	const struct cred *saved_cred = NULL;
+
+	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
+		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+						 "  dentry: %s, task:%s\n",
+						 __func__, dentry->d_name.name, current->comm);
+		err = -EACCES;
+		goto out_eacces;
+	}
+
+	/* save current_cred and override it */
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	lower_dentry = lower_path.dentry;
+	lower_parent_dentry = lock_parent(lower_dentry);
+
+	/* set last 16bytes of mode field to 0664 */
+	mode = (mode & S_IFMT) | 00664;
+	err = vfs_create(d_inode(lower_parent_dentry), lower_dentry, mode, want_excl);
+	if (err)
+		goto out;
+
+	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, SDCARDFS_I(dir)->userid);
+	if (err)
+		goto out;
+	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
+	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+
+out:
+	unlock_dir(lower_parent_dentry);
+	sdcardfs_put_lower_path(dentry, &lower_path);
+	REVERT_CRED(saved_cred);
+out_eacces:
+	return err;
+}
+
+#if 0
+static int sdcardfs_link(struct dentry *old_dentry, struct inode *dir,
+		       struct dentry *new_dentry)
+{
+	struct dentry *lower_old_dentry;
+	struct dentry *lower_new_dentry;
+	struct dentry *lower_dir_dentry;
+	u64 file_size_save;
+	int err;
+	struct path lower_old_path, lower_new_path;
+
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
+
+	file_size_save = i_size_read(d_inode(old_dentry));
+	sdcardfs_get_lower_path(old_dentry, &lower_old_path);
+	sdcardfs_get_lower_path(new_dentry, &lower_new_path);
+	lower_old_dentry = lower_old_path.dentry;
+	lower_new_dentry = lower_new_path.dentry;
+	lower_dir_dentry = lock_parent(lower_new_dentry);
+
+	err = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
+		       lower_new_dentry, NULL);
+	if (err || !d_inode(lower_new_dentry))
+		goto out;
+
+	err = sdcardfs_interpose(new_dentry, dir->i_sb, &lower_new_path);
+	if (err)
+		goto out;
+	fsstack_copy_attr_times(dir, d_inode(lower_new_dentry));
+	fsstack_copy_inode_size(dir, d_inode(lower_new_dentry));
+	set_nlink(d_inode(old_dentry),
+		  sdcardfs_lower_inode(d_inode(old_dentry))->i_nlink);
+	i_size_write(d_inode(new_dentry), file_size_save);
+out:
+	unlock_dir(lower_dir_dentry);
+	sdcardfs_put_lower_path(old_dentry, &lower_old_path);
+	sdcardfs_put_lower_path(new_dentry, &lower_new_path);
+	REVERT_CRED();
+	return err;
+}
+#endif
+
+static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int err;
+	struct dentry *lower_dentry;
+	struct inode *lower_dir_inode = sdcardfs_lower_inode(dir);
+	struct dentry *lower_dir_dentry;
+	struct path lower_path;
+	const struct cred *saved_cred = NULL;
+
+	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
+		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+						 "  dentry: %s, task:%s\n",
+						 __func__, dentry->d_name.name, current->comm);
+		err = -EACCES;
+		goto out_eacces;
+	}
+
+	/* save current_cred and override it */
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	lower_dentry = lower_path.dentry;
+	dget(lower_dentry);
+	lower_dir_dentry = lock_parent(lower_dentry);
+
+	err = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
+
+	/*
+	 * Note: unlinking on top of NFS can cause silly-renamed files.
+	 * Trying to delete such files results in EBUSY from NFS
+	 * below.  Silly-renamed files will get deleted by NFS later on, so
+	 * we just need to detect them here and treat such EBUSY errors as
+	 * if the upper file was successfully deleted.
+	 */
+	if (err == -EBUSY && lower_dentry->d_flags & DCACHE_NFSFS_RENAMED)
+		err = 0;
+	if (err)
+		goto out;
+	fsstack_copy_attr_times(dir, lower_dir_inode);
+	fsstack_copy_inode_size(dir, lower_dir_inode);
+	set_nlink(d_inode(dentry),
+		  sdcardfs_lower_inode(d_inode(dentry))->i_nlink);
+	d_inode(dentry)->i_ctime = dir->i_ctime;
+	d_drop(dentry); /* this is needed, else LTP fails (VFS won't do it) */
+out:
+	unlock_dir(lower_dir_dentry);
+	dput(lower_dentry);
+	sdcardfs_put_lower_path(dentry, &lower_path);
+	REVERT_CRED(saved_cred);
+out_eacces:
+	return err;
+}
+
+#if 0
+static int sdcardfs_symlink(struct inode *dir, struct dentry *dentry,
+			  const char *symname)
+{
+	int err;
+	struct dentry *lower_dentry;
+	struct dentry *lower_parent_dentry = NULL;
+	struct path lower_path;
+
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
+
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	lower_dentry = lower_path.dentry;
+	lower_parent_dentry = lock_parent(lower_dentry);
+
+	err = vfs_symlink(d_inode(lower_parent_dentry), lower_dentry, symname);
+	if (err)
+		goto out;
+	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
+	if (err)
+		goto out;
+	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
+	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+
+out:
+	unlock_dir(lower_parent_dentry);
+	sdcardfs_put_lower_path(dentry, &lower_path);
+	REVERT_CRED();
+	return err;
+}
+#endif
+
+static int touch(char *abs_path, mode_t mode) {
+	struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
+	if (IS_ERR(filp)) {
+		if (PTR_ERR(filp) == -EEXIST) {
+			return 0;
+		}
+		else {
+			printk(KERN_ERR "sdcardfs: failed to open(%s): %ld\n",
+						abs_path, PTR_ERR(filp));
+			return PTR_ERR(filp);
+		}
+	}
+	filp_close(filp, current->files);
+	return 0;
+}
+
+static int sdcardfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+	int err;
+	int make_nomedia_in_obb = 0;
+	struct dentry *lower_dentry;
+	struct dentry *lower_parent_dentry = NULL;
+	struct path lower_path;
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+	const struct cred *saved_cred = NULL;
+	struct sdcardfs_inode_info *pi = SDCARDFS_I(dir);
+	char *page_buf;
+	char *nomedia_dir_name;
+	char *nomedia_fullpath;
+	int fullpath_namelen;
+	int touch_err = 0;
+
+	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
+		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+						 "  dentry: %s, task:%s\n",
+						 __func__, dentry->d_name.name, current->comm);
+		err = -EACCES;
+		goto out_eacces;
+	}
+
+	/* save current_cred and override it */
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+	/* check disk space */
+	if (!check_min_free_space(dentry, 0, 1)) {
+		printk(KERN_INFO "sdcardfs: No minimum free space.\n");
+		err = -ENOSPC;
+		goto out_revert;
+	}
+
+	/* the lower_dentry is negative here */
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	lower_dentry = lower_path.dentry;
+	lower_parent_dentry = lock_parent(lower_dentry);
+
+	/* set last 16bytes of mode field to 0775 */
+	mode = (mode & S_IFMT) | 00775;
+	err = vfs_mkdir(d_inode(lower_parent_dentry), lower_dentry, mode);
+
+	if (err)
+		goto out;
+
+	/* if it is a local obb dentry, setup it with the base obbpath */
+	if(need_graft_path(dentry)) {
+
+		err = setup_obb_dentry(dentry, &lower_path);
+		if(err) {
+			/* if the sbi->obbpath is not available, the lower_path won't be
+			 * changed by setup_obb_dentry() but the lower path is saved to
+			 * its orig_path. this dentry will be revalidated later.
+			 * but now, the lower_path should be NULL */
+			sdcardfs_put_reset_lower_path(dentry);
+
+			/* the newly created lower path which saved to its orig_path or
+			 * the lower_path is the base obbpath.
+			 * therefore, an additional path_get is required */
+			path_get(&lower_path);
+		} else
+			make_nomedia_in_obb = 1;
+	}
+
+	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path, pi->userid);
+	if (err)
+		goto out;
+
+	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
+	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+	/* update number of links on parent directory */
+	set_nlink(dir, sdcardfs_lower_inode(dir)->i_nlink);
+
+	if ((!sbi->options.multiuser) && (!strcasecmp(dentry->d_name.name, "obb"))
+		&& (pi->perm == PERM_ANDROID) && (pi->userid == 0))
+		make_nomedia_in_obb = 1;
+
+	/* When creating /Android/data and /Android/obb, mark them as .nomedia */
+	if (make_nomedia_in_obb ||
+		((pi->perm == PERM_ANDROID) && (!strcasecmp(dentry->d_name.name, "data")))) {
+
+		page_buf = (char *)__get_free_page(GFP_KERNEL);
+		if (!page_buf) {
+			printk(KERN_ERR "sdcardfs: failed to allocate page buf\n");
+			goto out;
+		}
+
+		nomedia_dir_name = d_absolute_path(&lower_path, page_buf, PAGE_SIZE);
+		if (IS_ERR(nomedia_dir_name)) {
+			free_page((unsigned long)page_buf);
+			printk(KERN_ERR "sdcardfs: failed to get .nomedia dir name\n");
+			goto out;
+		}
+
+		fullpath_namelen = page_buf + PAGE_SIZE - nomedia_dir_name - 1;
+		fullpath_namelen += strlen("/.nomedia");
+		nomedia_fullpath = kzalloc(fullpath_namelen + 1, GFP_KERNEL);
+		if (!nomedia_fullpath) {
+			free_page((unsigned long)page_buf);
+			printk(KERN_ERR "sdcardfs: failed to allocate .nomedia fullpath buf\n");
+			goto out;
+		}
+
+		strcpy(nomedia_fullpath, nomedia_dir_name);
+		free_page((unsigned long)page_buf);
+		strcat(nomedia_fullpath, "/.nomedia");
+		touch_err = touch(nomedia_fullpath, 0664);
+		if (touch_err) {
+			printk(KERN_ERR "sdcardfs: failed to touch(%s): %d\n",
+							nomedia_fullpath, touch_err);
+			kfree(nomedia_fullpath);
+			goto out;
+		}
+		kfree(nomedia_fullpath);
+	}
+out:
+	unlock_dir(lower_parent_dentry);
+	sdcardfs_put_lower_path(dentry, &lower_path);
+out_revert:
+	REVERT_CRED(saved_cred);
+out_eacces:
+	return err;
+}
+
+static int sdcardfs_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct dentry *lower_dentry;
+	struct dentry *lower_dir_dentry;
+	int err;
+	struct path lower_path;
+	const struct cred *saved_cred = NULL;
+
+	if(!check_caller_access_to_name(dir, dentry->d_name.name)) {
+		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+						 "  dentry: %s, task:%s\n",
+						 __func__, dentry->d_name.name, current->comm);
+		err = -EACCES;
+		goto out_eacces;
+	}
+
+	/* save current_cred and override it */
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+	/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
+	 * the dentry on the original path should be deleted. */
+	sdcardfs_get_real_lower(dentry, &lower_path);
+
+	lower_dentry = lower_path.dentry;
+	lower_dir_dentry = lock_parent(lower_dentry);
+
+	err = vfs_rmdir(d_inode(lower_dir_dentry), lower_dentry);
+	if (err)
+		goto out;
+
+	d_drop(dentry);	/* drop our dentry on success (why not VFS's job?) */
+	if (d_inode(dentry))
+		clear_nlink(d_inode(dentry));
+	fsstack_copy_attr_times(dir, d_inode(lower_dir_dentry));
+	fsstack_copy_inode_size(dir, d_inode(lower_dir_dentry));
+	set_nlink(dir, d_inode(lower_dir_dentry)->i_nlink);
+
+out:
+	unlock_dir(lower_dir_dentry);
+	sdcardfs_put_real_lower(dentry, &lower_path);
+	REVERT_CRED(saved_cred);
+out_eacces:
+	return err;
+}
+
+#if 0
+static int sdcardfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
+			dev_t dev)
+{
+	int err;
+	struct dentry *lower_dentry;
+	struct dentry *lower_parent_dentry = NULL;
+	struct path lower_path;
+
+	OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
+
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	lower_dentry = lower_path.dentry;
+	lower_parent_dentry = lock_parent(lower_dentry);
+
+	err = vfs_mknod(d_inode(lower_parent_dentry), lower_dentry, mode, dev);
+	if (err)
+		goto out;
+
+	err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
+	if (err)
+		goto out;
+	fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
+	fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
+
+out:
+	unlock_dir(lower_parent_dentry);
+	sdcardfs_put_lower_path(dentry, &lower_path);
+	REVERT_CRED();
+	return err;
+}
+#endif
+
+/*
+ * The locking rules in sdcardfs_rename are complex.  We could use a simpler
+ * superblock-level name-space lock for renames and copy-ups.
+ */
+static int sdcardfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+			 struct inode *new_dir, struct dentry *new_dentry,
+			 unsigned int flags)
+{
+	int err = 0;
+	struct dentry *lower_old_dentry = NULL;
+	struct dentry *lower_new_dentry = NULL;
+	struct dentry *lower_old_dir_dentry = NULL;
+	struct dentry *lower_new_dir_dentry = NULL;
+	struct dentry *trap = NULL;
+	struct dentry *new_parent = NULL;
+	struct path lower_old_path, lower_new_path;
+	const struct cred *saved_cred = NULL;
+
+	if (flags)
+		return -EINVAL;
+
+	if(!check_caller_access_to_name(old_dir, old_dentry->d_name.name) ||
+		!check_caller_access_to_name(new_dir, new_dentry->d_name.name)) {
+		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+						 "  new_dentry: %s, task:%s\n",
+						 __func__, new_dentry->d_name.name, current->comm);
+		err = -EACCES;
+		goto out_eacces;
+	}
+
+	/* save current_cred and override it */
+	OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred);
+
+	sdcardfs_get_real_lower(old_dentry, &lower_old_path);
+	sdcardfs_get_lower_path(new_dentry, &lower_new_path);
+	lower_old_dentry = lower_old_path.dentry;
+	lower_new_dentry = lower_new_path.dentry;
+	lower_old_dir_dentry = dget_parent(lower_old_dentry);
+	lower_new_dir_dentry = dget_parent(lower_new_dentry);
+
+	trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+	/* source should not be ancestor of target */
+	if (trap == lower_old_dentry) {
+		err = -EINVAL;
+		goto out;
+	}
+	/* target should not be ancestor of source */
+	if (trap == lower_new_dentry) {
+		err = -ENOTEMPTY;
+		goto out;
+	}
+
+	err = vfs_rename(d_inode(lower_old_dir_dentry), lower_old_dentry,
+			 d_inode(lower_new_dir_dentry), lower_new_dentry,
+			 NULL, 0);
+	if (err)
+		goto out;
+
+	/* Copy attrs from lower dir, but i_uid/i_gid */
+	sdcardfs_copy_and_fix_attrs(new_dir, d_inode(lower_new_dir_dentry));
+	fsstack_copy_inode_size(new_dir, d_inode(lower_new_dir_dentry));
+
+	if (new_dir != old_dir) {
+		sdcardfs_copy_and_fix_attrs(old_dir, d_inode(lower_old_dir_dentry));
+		fsstack_copy_inode_size(old_dir, d_inode(lower_old_dir_dentry));
+
+		/* update the derived permission of the old_dentry
+		 * with its new parent
+		 */
+		new_parent = dget_parent(new_dentry);
+		if(new_parent) {
+			if(d_inode(old_dentry)) {
+				update_derived_permission_lock(old_dentry);
+			}
+			dput(new_parent);
+		}
+	}
+	/* At this point, not all dentry information has been moved, so
+	 * we pass along new_dentry for the name.*/
+	inode_lock(d_inode(old_dentry));
+	get_derived_permission_new(new_dentry->d_parent, old_dentry, new_dentry);
+	fix_derived_permission(d_inode(old_dentry));
+	get_derive_permissions_recursive(old_dentry);
+	inode_unlock(d_inode(old_dentry));
+out:
+	unlock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+	dput(lower_old_dir_dentry);
+	dput(lower_new_dir_dentry);
+	sdcardfs_put_real_lower(old_dentry, &lower_old_path);
+	sdcardfs_put_lower_path(new_dentry, &lower_new_path);
+	REVERT_CRED(saved_cred);
+out_eacces:
+	return err;
+}
+
+#if 0
+static int sdcardfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
+{
+	int err;
+	struct dentry *lower_dentry;
+	struct path lower_path;
+	/* XXX readlink does not requires overriding credential */
+
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	lower_dentry = lower_path.dentry;
+	if (!d_inode(lower_dentry)->i_op ||
+	    !d_inode(lower_dentry)->i_op->readlink) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	err = d_inode(lower_dentry)->i_op->readlink(lower_dentry,
+						    buf, bufsiz);
+	if (err < 0)
+		goto out;
+	fsstack_copy_attr_atime(d_inode(dentry), d_inode(lower_dentry));
+
+out:
+	sdcardfs_put_lower_path(dentry, &lower_path);
+	return err;
+}
+#endif
+
+#if 0
+static const char *sdcardfs_follow_link(struct dentry *dentry, void **cookie)
+{
+	char *buf;
+	int len = PAGE_SIZE, err;
+	mm_segment_t old_fs;
+
+	/* This is freed by the put_link method assuming a successful call. */
+	buf = kmalloc(len, GFP_KERNEL);
+	if (!buf) {
+		buf = ERR_PTR(-ENOMEM);
+		return buf;
+	}
+
+	/* read the symlink, and then we will follow it */
+	old_fs = get_fs();
+	set_fs(KERNEL_DS);
+	err = sdcardfs_readlink(dentry, buf, len);
+	set_fs(old_fs);
+	if (err < 0) {
+		kfree(buf);
+		buf = ERR_PTR(err);
+	} else {
+		buf[err] = '\0';
+	}
+	return *cookie = buf;
+}
+#endif
+
+static int sdcardfs_permission(struct inode *inode, int mask)
+{
+	int err;
+
+	/*
+	 * Permission check on sdcardfs inode.
+	 * Calling process should have AID_SDCARD_RW permission
+	 */
+	err = generic_permission(inode, mask);
+
+	/* XXX
+	 * Original sdcardfs code calls inode_permission(lower_inode,.. )
+	 * for checking inode permission. But doing such things here seems
+	 * duplicated work, because the functions called after this func,
+	 * such as vfs_create, vfs_unlink, vfs_rename, and etc,
+	 * does exactly same thing, i.e., they calls inode_permission().
+	 * So we just let they do the things.
+	 * If there are any security hole, just uncomment following if block.
+	 */
+#if 0
+	if (!err) {
+		/*
+		 * Permission check on lower_inode(=EXT4).
+		 * we check it with AID_MEDIA_RW permission
+		 */
+		struct inode *lower_inode;
+		OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
+
+		lower_inode = sdcardfs_lower_inode(inode);
+		err = inode_permission(lower_inode, mask);
+
+		REVERT_CRED();
+	}
+#endif
+	return err;
+
+}
+
+static int sdcardfs_setattr(struct dentry *dentry, struct iattr *ia)
+{
+	int err;
+	struct dentry *lower_dentry;
+	struct inode *inode;
+	struct inode *lower_inode;
+	struct path lower_path;
+	struct iattr lower_ia;
+	struct dentry *parent;
+
+	inode = d_inode(dentry);
+
+	/*
+	 * Check if user has permission to change dentry.  We don't check if
+	 * this user can change the lower inode: that should happen when
+	 * calling notify_change on the lower inode.
+	 */
+	err = setattr_prepare(dentry, ia);
+
+	/* no vfs_XXX operations required, cred overriding will be skipped. wj*/
+	if (!err) {
+		/* check the Android group ID */
+		parent = dget_parent(dentry);
+		if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
+			printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+							 "  dentry: %s, task:%s\n",
+							 __func__, dentry->d_name.name, current->comm);
+			err = -EACCES;
+		}
+		dput(parent);
+	}
+
+	if (err)
+		goto out_err;
+
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	lower_dentry = lower_path.dentry;
+	lower_inode = sdcardfs_lower_inode(inode);
+
+	/* prepare our own lower struct iattr (with the lower file) */
+	memcpy(&lower_ia, ia, sizeof(lower_ia));
+	if (ia->ia_valid & ATTR_FILE)
+		lower_ia.ia_file = sdcardfs_lower_file(ia->ia_file);
+
+	lower_ia.ia_valid &= ~(ATTR_UID | ATTR_GID | ATTR_MODE);
+
+	/*
+	 * If shrinking, first truncate upper level to cancel writing dirty
+	 * pages beyond the new eof; and also if its' maxbytes is more
+	 * limiting (fail with -EFBIG before making any change to the lower
+	 * level).  There is no need to vmtruncate the upper level
+	 * afterwards in the other cases: we fsstack_copy_inode_size from
+	 * the lower level.
+	 */
+	if (current->mm)
+		down_write(&current->mm->mmap_sem);
+	if (ia->ia_valid & ATTR_SIZE) {
+		err = inode_newsize_ok(inode, ia->ia_size);
+		if (err) {
+			if (current->mm)
+				up_write(&current->mm->mmap_sem);
+			goto out;
+		}
+		truncate_setsize(inode, ia->ia_size);
+	}
+
+	/*
+	 * mode change is for clearing setuid/setgid bits. Allow lower fs
+	 * to interpret this in its own way.
+	 */
+	if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+		lower_ia.ia_valid &= ~ATTR_MODE;
+
+	/* notify the (possibly copied-up) lower inode */
+	/*
+	 * Note: we use d_inode(lower_dentry), because lower_inode may be
+	 * unlinked (no inode->i_sb and i_ino==0.  This happens if someone
+	 * tries to open(), unlink(), then ftruncate() a file.
+	 */
+	inode_lock(d_inode(lower_dentry));
+	err = notify_change(lower_dentry, &lower_ia, /* note: lower_ia */
+			NULL);
+	inode_unlock(d_inode(lower_dentry));
+	if (current->mm)
+		up_write(&current->mm->mmap_sem);
+	if (err)
+		goto out;
+
+	/* get attributes from the lower inode and update derived permissions */
+	sdcardfs_copy_and_fix_attrs(inode, lower_inode);
+
+	/*
+	 * Not running fsstack_copy_inode_size(inode, lower_inode), because
+	 * VFS should update our inode size, and notify_change on
+	 * lower_inode should update its size.
+	 */
+
+out:
+	sdcardfs_put_lower_path(dentry, &lower_path);
+out_err:
+	return err;
+}
+
+static int sdcardfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+		 struct kstat *stat)
+{
+	struct dentry *lower_dentry;
+	struct inode *inode;
+	struct inode *lower_inode;
+	struct path lower_path;
+	struct dentry *parent;
+
+	parent = dget_parent(dentry);
+	if(!check_caller_access_to_name(d_inode(parent), dentry->d_name.name)) {
+		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+						 "  dentry: %s, task:%s\n",
+						 __func__, dentry->d_name.name, current->comm);
+		dput(parent);
+		return -EACCES;
+	}
+	dput(parent);
+
+	inode = d_inode(dentry);
+
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	lower_dentry = lower_path.dentry;
+	lower_inode = sdcardfs_lower_inode(inode);
+
+
+	sdcardfs_copy_and_fix_attrs(inode, lower_inode);
+	fsstack_copy_inode_size(inode, lower_inode);
+
+
+	generic_fillattr(inode, stat);
+	sdcardfs_put_lower_path(dentry, &lower_path);
+	return 0;
+}
+
+const struct inode_operations sdcardfs_symlink_iops = {
+	.permission	= sdcardfs_permission,
+	.setattr	= sdcardfs_setattr,
+	/* XXX Following operations are implemented,
+	 *     but FUSE(sdcard) or FAT does not support them
+	 *     These methods are *NOT* perfectly tested.
+	.readlink	= sdcardfs_readlink,
+	.follow_link	= sdcardfs_follow_link,
+	.put_link	= kfree_put_link,
+	 */
+};
+
+const struct inode_operations sdcardfs_dir_iops = {
+	.create		= sdcardfs_create,
+	.lookup		= sdcardfs_lookup,
+#if 0
+	.permission	= sdcardfs_permission,
+#endif
+	.unlink		= sdcardfs_unlink,
+	.mkdir		= sdcardfs_mkdir,
+	.rmdir		= sdcardfs_rmdir,
+	.rename		= sdcardfs_rename,
+	.setattr	= sdcardfs_setattr,
+	.getattr	= sdcardfs_getattr,
+	/* XXX Following operations are implemented,
+	 *     but FUSE(sdcard) or FAT does not support them
+	 *     These methods are *NOT* perfectly tested.
+	.symlink	= sdcardfs_symlink,
+	.link		= sdcardfs_link,
+	.mknod		= sdcardfs_mknod,
+	 */
+};
+
+const struct inode_operations sdcardfs_main_iops = {
+	.permission	= sdcardfs_permission,
+	.setattr	= sdcardfs_setattr,
+	.getattr	= sdcardfs_getattr,
+};
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
new file mode 100644
index 0000000..2d94870
--- /dev/null
+++ b/fs/sdcardfs/lookup.c
@@ -0,0 +1,384 @@
+/*
+ * fs/sdcardfs/lookup.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#include "linux/delay.h"
+
+/* The dentry cache is just so we have properly sized dentries */
+static struct kmem_cache *sdcardfs_dentry_cachep;
+
+int sdcardfs_init_dentry_cache(void)
+{
+	sdcardfs_dentry_cachep =
+		kmem_cache_create("sdcardfs_dentry",
+				  sizeof(struct sdcardfs_dentry_info),
+				  0, SLAB_RECLAIM_ACCOUNT, NULL);
+
+	return sdcardfs_dentry_cachep ? 0 : -ENOMEM;
+}
+
+void sdcardfs_destroy_dentry_cache(void)
+{
+	if (sdcardfs_dentry_cachep)
+		kmem_cache_destroy(sdcardfs_dentry_cachep);
+}
+
+void free_dentry_private_data(struct dentry *dentry)
+{
+	if (!dentry || !dentry->d_fsdata)
+		return;
+	kmem_cache_free(sdcardfs_dentry_cachep, dentry->d_fsdata);
+	dentry->d_fsdata = NULL;
+}
+
+/* allocate new dentry private data */
+int new_dentry_private_data(struct dentry *dentry)
+{
+	struct sdcardfs_dentry_info *info = SDCARDFS_D(dentry);
+
+	/* use zalloc to init dentry_info.lower_path */
+	info = kmem_cache_zalloc(sdcardfs_dentry_cachep, GFP_ATOMIC);
+	if (!info)
+		return -ENOMEM;
+
+	spin_lock_init(&info->lock);
+	dentry->d_fsdata = info;
+
+	return 0;
+}
+
+struct inode_data {
+	struct inode *lower_inode;
+	userid_t id;
+};
+
+static int sdcardfs_inode_test(struct inode *inode, void *candidate_data/*void *candidate_lower_inode*/)
+{
+	struct inode *current_lower_inode = sdcardfs_lower_inode(inode);
+	userid_t current_userid = SDCARDFS_I(inode)->userid;
+	if (current_lower_inode == ((struct inode_data *)candidate_data)->lower_inode &&
+			current_userid == ((struct inode_data *)candidate_data)->id)
+		return 1; /* found a match */
+	else
+		return 0; /* no match */
+}
+
+static int sdcardfs_inode_set(struct inode *inode, void *lower_inode)
+{
+	/* we do actual inode initialization in sdcardfs_iget */
+	return 0;
+}
+
+struct inode *sdcardfs_iget(struct super_block *sb, struct inode *lower_inode, userid_t id)
+{
+	struct sdcardfs_inode_info *info;
+	struct inode_data data;
+	struct inode *inode; /* the new inode to return */
+	int err;
+
+	data.id = id;
+	data.lower_inode = lower_inode;
+	inode = iget5_locked(sb, /* our superblock */
+			     /*
+			      * hashval: we use inode number, but we can
+			      * also use "(unsigned long)lower_inode"
+			      * instead.
+			      */
+			     lower_inode->i_ino, /* hashval */
+			     sdcardfs_inode_test,	/* inode comparison function */
+			     sdcardfs_inode_set, /* inode init function */
+			     &data); /* data passed to test+set fxns */
+	if (!inode) {
+		err = -EACCES;
+		iput(lower_inode);
+		return ERR_PTR(err);
+	}
+	/* if found a cached inode, then just return it */
+	if (!(inode->i_state & I_NEW))
+		return inode;
+
+	/* initialize new inode */
+	info = SDCARDFS_I(inode);
+
+	inode->i_ino = lower_inode->i_ino;
+	if (!igrab(lower_inode)) {
+		err = -ESTALE;
+		return ERR_PTR(err);
+	}
+	sdcardfs_set_lower_inode(inode, lower_inode);
+
+	inode->i_version++;
+
+	/* use different set of inode ops for symlinks & directories */
+	if (S_ISDIR(lower_inode->i_mode))
+		inode->i_op = &sdcardfs_dir_iops;
+	else if (S_ISLNK(lower_inode->i_mode))
+		inode->i_op = &sdcardfs_symlink_iops;
+	else
+		inode->i_op = &sdcardfs_main_iops;
+
+	/* use different set of file ops for directories */
+	if (S_ISDIR(lower_inode->i_mode))
+		inode->i_fop = &sdcardfs_dir_fops;
+	else
+		inode->i_fop = &sdcardfs_main_fops;
+
+	inode->i_mapping->a_ops = &sdcardfs_aops;
+
+	inode->i_atime.tv_sec = 0;
+	inode->i_atime.tv_nsec = 0;
+	inode->i_mtime.tv_sec = 0;
+	inode->i_mtime.tv_nsec = 0;
+	inode->i_ctime.tv_sec = 0;
+	inode->i_ctime.tv_nsec = 0;
+
+	/* properly initialize special inodes */
+	if (S_ISBLK(lower_inode->i_mode) || S_ISCHR(lower_inode->i_mode) ||
+	    S_ISFIFO(lower_inode->i_mode) || S_ISSOCK(lower_inode->i_mode))
+		init_special_inode(inode, lower_inode->i_mode,
+				   lower_inode->i_rdev);
+
+	/* all well, copy inode attributes */
+	sdcardfs_copy_and_fix_attrs(inode, lower_inode);
+	fsstack_copy_inode_size(inode, lower_inode);
+
+	unlock_new_inode(inode);
+	return inode;
+}
+
+/*
+ * Connect a sdcardfs inode dentry/inode with several lower ones.  This is
+ * the classic stackable file system "vnode interposition" action.
+ *
+ * @dentry: sdcardfs's dentry which interposes on lower one
+ * @sb: sdcardfs's super_block
+ * @lower_path: the lower path (caller does path_get/put)
+ */
+int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
+		     struct path *lower_path, userid_t id)
+{
+	int err = 0;
+	struct inode *inode;
+	struct inode *lower_inode;
+	struct super_block *lower_sb;
+
+	lower_inode = lower_path->dentry->d_inode;
+	lower_sb = sdcardfs_lower_super(sb);
+
+	/* check that the lower file system didn't cross a mount point */
+	if (lower_inode->i_sb != lower_sb) {
+		err = -EXDEV;
+		goto out;
+	}
+
+	/*
+	 * We allocate our new inode below by calling sdcardfs_iget,
+	 * which will initialize some of the new inode's fields
+	 */
+
+	/* inherit lower inode number for sdcardfs's inode */
+	inode = sdcardfs_iget(sb, lower_inode, id);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		goto out;
+	}
+
+	d_add(dentry, inode);
+	update_derived_permission_lock(dentry);
+out:
+	return err;
+}
+
+/*
+ * Main driver function for sdcardfs's lookup.
+ *
+ * Returns: NULL (ok), ERR_PTR if an error occurred.
+ * Fills in lower_parent_path with <dentry,mnt> on success.
+ */
+static struct dentry *__sdcardfs_lookup(struct dentry *dentry,
+		unsigned int flags, struct path *lower_parent_path, userid_t id)
+{
+	int err = 0;
+	struct vfsmount *lower_dir_mnt;
+	struct dentry *lower_dir_dentry = NULL;
+	struct dentry *lower_dentry;
+	const char *name;
+	struct path lower_path;
+	struct qstr this;
+	struct sdcardfs_sb_info *sbi;
+
+	sbi = SDCARDFS_SB(dentry->d_sb);
+	/* must initialize dentry operations */
+	d_set_d_op(dentry, &sdcardfs_ci_dops);
+
+	if (IS_ROOT(dentry))
+		goto out;
+
+	name = dentry->d_name.name;
+
+	/* now start the actual lookup procedure */
+	lower_dir_dentry = lower_parent_path->dentry;
+	lower_dir_mnt = lower_parent_path->mnt;
+
+	/* Use vfs_path_lookup to check if the dentry exists or not */
+	err = vfs_path_lookup(lower_dir_dentry, lower_dir_mnt, name, 0,
+				&lower_path);
+
+	/* no error: handle positive dentries */
+	if (!err) {
+		/* check if the dentry is an obb dentry
+		 * if true, the lower_inode must be replaced with
+		 * the inode of the graft path */
+
+		if(need_graft_path(dentry)) {
+
+			/* setup_obb_dentry()
+ 			 * The lower_path will be stored to the dentry's orig_path
+			 * and the base obbpath will be copyed to the lower_path variable.
+			 * if an error returned, there's no change in the lower_path
+			 * 		returns: -ERRNO if error (0: no error) */
+			err = setup_obb_dentry(dentry, &lower_path);
+
+			if(err) {
+				/* if the sbi->obbpath is not available, we can optionally
+				 * setup the lower_path with its orig_path.
+				 * but, the current implementation just returns an error
+				 * because the sdcard daemon also regards this case as
+				 * a lookup fail. */
+				printk(KERN_INFO "sdcardfs: base obbpath is not available\n");
+				sdcardfs_put_reset_orig_path(dentry);
+				goto out;
+			}
+		}
+
+		sdcardfs_set_lower_path(dentry, &lower_path);
+		err = sdcardfs_interpose(dentry, dentry->d_sb, &lower_path, id);
+		if (err) /* path_put underlying path on error */
+			sdcardfs_put_reset_lower_path(dentry);
+		goto out;
+	}
+
+	/*
+	 * We don't consider ENOENT an error, and we want to return a
+	 * negative dentry.
+	 */
+	if (err && err != -ENOENT)
+		goto out;
+
+	/* instatiate a new negative dentry */
+	this.name = name;
+	this.len = strlen(name);
+	this.hash = full_name_hash(dentry, this.name, this.len);
+	lower_dentry = d_lookup(lower_dir_dentry, &this);
+	if (lower_dentry)
+		goto setup_lower;
+
+	lower_dentry = d_alloc(lower_dir_dentry, &this);
+	if (!lower_dentry) {
+		err = -ENOMEM;
+		goto out;
+	}
+	d_add(lower_dentry, NULL); /* instantiate and hash */
+
+setup_lower:
+	lower_path.dentry = lower_dentry;
+	lower_path.mnt = mntget(lower_dir_mnt);
+	sdcardfs_set_lower_path(dentry, &lower_path);
+
+	/*
+	 * If the intent is to create a file, then don't return an error, so
+	 * the VFS will continue the process of making this negative dentry
+	 * into a positive one.
+	 */
+	if (flags & (LOOKUP_CREATE|LOOKUP_RENAME_TARGET))
+		err = 0;
+
+out:
+	return ERR_PTR(err);
+}
+
+/*
+ * On success:
+ * 	fills dentry object appropriate values and returns NULL.
+ * On fail (== error)
+ * 	returns error ptr
+ *
+ * @dir : Parent inode.
+ * @dentry : Target dentry to lookup. we should set each of fields.
+ *	     (dentry->d_name is initialized already)
+ * @nd : nameidata of parent inode
+ */
+struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
+			     unsigned int flags)
+{
+	struct dentry *ret = NULL, *parent;
+	struct path lower_parent_path;
+	int err = 0;
+	const struct cred *saved_cred = NULL;
+
+	parent = dget_parent(dentry);
+
+	if(!check_caller_access_to_name(parent->d_inode, dentry->d_name.name)) {
+		ret = ERR_PTR(-EACCES);
+		printk(KERN_INFO "%s: need to check the caller's gid in packages.list\n"
+                         "	dentry: %s, task:%s\n",
+						 __func__, dentry->d_name.name, current->comm);
+		goto out_err;
+        }
+
+	/* save current_cred and override it */
+	OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred);
+
+	sdcardfs_get_lower_path(parent, &lower_parent_path);
+
+	/* allocate dentry private data.  We free it in ->d_release */
+	err = new_dentry_private_data(dentry);
+	if (err) {
+		ret = ERR_PTR(err);
+		goto out;
+	}
+
+	ret = __sdcardfs_lookup(dentry, flags, &lower_parent_path, SDCARDFS_I(dir)->userid);
+	if (IS_ERR(ret))
+	{
+		goto out;
+	}
+	if (ret)
+		dentry = ret;
+	if (dentry->d_inode) {
+		fsstack_copy_attr_times(dentry->d_inode,
+					sdcardfs_lower_inode(dentry->d_inode));
+		/* get drived permission */
+		inode_lock(dentry->d_inode);
+		get_derived_permission(parent, dentry);
+		fix_derived_permission(dentry->d_inode);
+		inode_unlock(dentry->d_inode);
+	}
+	/* update parent directory's atime */
+	fsstack_copy_attr_atime(parent->d_inode,
+				sdcardfs_lower_inode(parent->d_inode));
+
+out:
+	sdcardfs_put_lower_path(parent, &lower_parent_path);
+	REVERT_CRED(saved_cred);
+out_err:
+	dput(parent);
+	return ret;
+}
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
new file mode 100644
index 0000000..a652228
--- /dev/null
+++ b/fs/sdcardfs/main.c
@@ -0,0 +1,402 @@
+/*
+ * fs/sdcardfs/main.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/parser.h>
+
+enum {
+	Opt_fsuid,
+	Opt_fsgid,
+	Opt_gid,
+	Opt_debug,
+	Opt_lower_fs,
+	Opt_mask,
+	Opt_multiuser, // May need?
+	Opt_userid,
+	Opt_reserved_mb,
+	Opt_err,
+};
+
+static const match_table_t sdcardfs_tokens = {
+	{Opt_fsuid, "fsuid=%u"},
+	{Opt_fsgid, "fsgid=%u"},
+	{Opt_gid, "gid=%u"},
+	{Opt_debug, "debug"},
+	{Opt_mask, "mask=%u"},
+	{Opt_userid, "userid=%d"},
+	{Opt_multiuser, "multiuser"},
+	{Opt_reserved_mb, "reserved_mb=%u"},
+	{Opt_err, NULL}
+};
+
+static int parse_options(struct super_block *sb, char *options, int silent,
+				int *debug, struct sdcardfs_mount_options *opts)
+{
+	char *p;
+	substring_t args[MAX_OPT_ARGS];
+	int option;
+
+	/* by default, we use AID_MEDIA_RW as uid, gid */
+	opts->fs_low_uid = AID_MEDIA_RW;
+	opts->fs_low_gid = AID_MEDIA_RW;
+	opts->mask = 0;
+	opts->multiuser = false;
+	opts->fs_user_id = 0;
+	opts->gid = 0;
+	/* by default, 0MB is reserved */
+	opts->reserved_mb = 0;
+
+	*debug = 0;
+
+	if (!options)
+		return 0;
+
+	while ((p = strsep(&options, ",")) != NULL) {
+		int token;
+		if (!*p)
+			continue;
+
+		token = match_token(p, sdcardfs_tokens, args);
+
+		switch (token) {
+		case Opt_debug:
+			*debug = 1;
+			break;
+		case Opt_fsuid:
+			if (match_int(&args[0], &option))
+				return 0;
+			opts->fs_low_uid = option;
+			break;
+		case Opt_fsgid:
+			if (match_int(&args[0], &option))
+				return 0;
+			opts->fs_low_gid = option;
+			break;
+		case Opt_gid:
+			if (match_int(&args[0], &option))
+				return 0;
+			opts->gid = option;
+			break;
+		case Opt_userid:
+			if (match_int(&args[0], &option))
+				return 0;
+			opts->fs_user_id = option;
+			break;
+		case Opt_mask:
+			if (match_int(&args[0], &option))
+				return 0;
+			opts->mask = option;
+			break;
+		case Opt_multiuser:
+			opts->multiuser = true;
+			break;
+		case Opt_reserved_mb:
+			if (match_int(&args[0], &option))
+				return 0;
+			opts->reserved_mb = option;
+			break;
+		/* unknown option */
+		default:
+			if (!silent) {
+				printk( KERN_ERR "Unrecognized mount option \"%s\" "
+						"or missing value", p);
+			}
+			return -EINVAL;
+		}
+	}
+
+	if (*debug) {
+		printk( KERN_INFO "sdcardfs : options - debug:%d\n", *debug);
+		printk( KERN_INFO "sdcardfs : options - uid:%d\n",
+							opts->fs_low_uid);
+		printk( KERN_INFO "sdcardfs : options - gid:%d\n",
+							opts->fs_low_gid);
+	}
+
+	return 0;
+}
+
+#if 0
+/*
+ * our custom d_alloc_root work-alike
+ *
+ * we can't use d_alloc_root if we want to use our own interpose function
+ * unchanged, so we simply call our own "fake" d_alloc_root
+ */
+static struct dentry *sdcardfs_d_alloc_root(struct super_block *sb)
+{
+	struct dentry *ret = NULL;
+
+	if (sb) {
+		static const struct qstr name = {
+			.name = "/",
+			.len = 1
+		};
+
+		ret = d_alloc(NULL, &name);
+		if (ret) {
+			d_set_d_op(ret, &sdcardfs_ci_dops);
+			ret->d_sb = sb;
+			ret->d_parent = ret;
+		}
+	}
+	return ret;
+}
+#endif
+
+DEFINE_MUTEX(sdcardfs_super_list_lock);
+LIST_HEAD(sdcardfs_super_list);
+EXPORT_SYMBOL_GPL(sdcardfs_super_list_lock);
+EXPORT_SYMBOL_GPL(sdcardfs_super_list);
+
+/*
+ * There is no need to lock the sdcardfs_super_info's rwsem as there is no
+ * way anyone can have a reference to the superblock at this point in time.
+ */
+static int sdcardfs_read_super(struct super_block *sb, const char *dev_name,
+						void *raw_data, int silent)
+{
+	int err = 0;
+	int debug;
+	struct super_block *lower_sb;
+	struct path lower_path;
+	struct sdcardfs_sb_info *sb_info;
+	struct inode *inode;
+
+	printk(KERN_INFO "sdcardfs version 2.0\n");
+
+	if (!dev_name) {
+		printk(KERN_ERR
+		       "sdcardfs: read_super: missing dev_name argument\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	printk(KERN_INFO "sdcardfs: dev_name -> %s\n", dev_name);
+	printk(KERN_INFO "sdcardfs: options -> %s\n", (char *)raw_data);
+
+	/* parse lower path */
+	err = kern_path(dev_name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+			&lower_path);
+	if (err) {
+		printk(KERN_ERR	"sdcardfs: error accessing lower directory '%s'\n", dev_name);
+		goto out;
+	}
+
+	/* allocate superblock private data */
+	sb->s_fs_info = kzalloc(sizeof(struct sdcardfs_sb_info), GFP_KERNEL);
+	if (!SDCARDFS_SB(sb)) {
+		printk(KERN_CRIT "sdcardfs: read_super: out of memory\n");
+		err = -ENOMEM;
+		goto out_free;
+	}
+
+	sb_info = sb->s_fs_info;
+	/* parse options */
+	err = parse_options(sb, raw_data, silent, &debug, &sb_info->options);
+	if (err) {
+		printk(KERN_ERR	"sdcardfs: invalid options\n");
+		goto out_freesbi;
+	}
+
+	/* set the lower superblock field of upper superblock */
+	lower_sb = lower_path.dentry->d_sb;
+	atomic_inc(&lower_sb->s_active);
+	sdcardfs_set_lower_super(sb, lower_sb);
+
+	/* inherit maxbytes from lower file system */
+	sb->s_maxbytes = lower_sb->s_maxbytes;
+
+	/*
+	 * Our c/m/atime granularity is 1 ns because we may stack on file
+	 * systems whose granularity is as good.
+	 */
+	sb->s_time_gran = 1;
+
+	sb->s_magic = SDCARDFS_SUPER_MAGIC;
+	sb->s_op = &sdcardfs_sops;
+
+	/* get a new inode and allocate our root dentry */
+	inode = sdcardfs_iget(sb, lower_path.dentry->d_inode, 0);
+	if (IS_ERR(inode)) {
+		err = PTR_ERR(inode);
+		goto out_sput;
+	}
+	sb->s_root = d_make_root(inode);
+	if (!sb->s_root) {
+		err = -ENOMEM;
+		goto out_iput;
+	}
+	d_set_d_op(sb->s_root, &sdcardfs_ci_dops);
+
+	/* link the upper and lower dentries */
+	sb->s_root->d_fsdata = NULL;
+	err = new_dentry_private_data(sb->s_root);
+	if (err)
+		goto out_freeroot;
+
+	/* set the lower dentries for s_root */
+	sdcardfs_set_lower_path(sb->s_root, &lower_path);
+
+	/*
+	 * No need to call interpose because we already have a positive
+	 * dentry, which was instantiated by d_make_root.  Just need to
+	 * d_rehash it.
+	 */
+	d_rehash(sb->s_root);
+
+	/* setup permission policy */
+	sb_info->obbpath_s = kzalloc(PATH_MAX, GFP_KERNEL);
+	mutex_lock(&sdcardfs_super_list_lock);
+	if(sb_info->options.multiuser) {
+		setup_derived_state(sb->s_root->d_inode, PERM_PRE_ROOT, sb_info->options.fs_user_id, AID_ROOT, false);
+		snprintf(sb_info->obbpath_s, PATH_MAX, "%s/obb", dev_name);
+		/*err =  prepare_dir(sb_info->obbpath_s,
+					sb_info->options.fs_low_uid,
+					sb_info->options.fs_low_gid, 00755);*/
+	} else {
+		setup_derived_state(sb->s_root->d_inode, PERM_ROOT, sb_info->options.fs_low_uid, AID_ROOT, false);
+		snprintf(sb_info->obbpath_s, PATH_MAX, "%s/Android/obb", dev_name);
+	}
+	fix_derived_permission(sb->s_root->d_inode);
+	sb_info->sb = sb;
+	list_add(&sb_info->list, &sdcardfs_super_list);
+	mutex_unlock(&sdcardfs_super_list_lock);
+
+	if (!silent)
+		printk(KERN_INFO "sdcardfs: mounted on top of %s type %s\n",
+				dev_name, lower_sb->s_type->name);
+	goto out; /* all is well */
+
+	/* no longer needed: free_dentry_private_data(sb->s_root); */
+out_freeroot:
+	dput(sb->s_root);
+out_iput:
+	iput(inode);
+out_sput:
+	/* drop refs we took earlier */
+	atomic_dec(&lower_sb->s_active);
+out_freesbi:
+	kfree(SDCARDFS_SB(sb));
+	sb->s_fs_info = NULL;
+out_free:
+	path_put(&lower_path);
+
+out:
+	return err;
+}
+
+/* A feature which supports mount_nodev() with options */
+static struct dentry *mount_nodev_with_options(struct file_system_type *fs_type,
+        int flags, const char *dev_name, void *data,
+        int (*fill_super)(struct super_block *, const char *, void *, int))
+
+{
+	int error;
+	struct super_block *s = sget(fs_type, NULL, set_anon_super, flags, NULL);
+
+	if (IS_ERR(s))
+		return ERR_CAST(s);
+
+	s->s_flags = flags;
+
+	error = fill_super(s, dev_name, data, flags & MS_SILENT ? 1 : 0);
+	if (error) {
+		deactivate_locked_super(s);
+		return ERR_PTR(error);
+	}
+	s->s_flags |= MS_ACTIVE;
+	return dget(s->s_root);
+}
+
+struct dentry *sdcardfs_mount(struct file_system_type *fs_type, int flags,
+			    const char *dev_name, void *raw_data)
+{
+	/*
+	 * dev_name is a lower_path_name,
+	 * raw_data is a option string.
+	 */
+	return mount_nodev_with_options(fs_type, flags, dev_name,
+					raw_data, sdcardfs_read_super);
+}
+
+void sdcardfs_kill_sb(struct super_block *sb) {
+	struct sdcardfs_sb_info *sbi;
+	if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
+		sbi = SDCARDFS_SB(sb);
+		mutex_lock(&sdcardfs_super_list_lock);
+		list_del(&sbi->list);
+		mutex_unlock(&sdcardfs_super_list_lock);
+	}
+	generic_shutdown_super(sb);
+}
+
+static struct file_system_type sdcardfs_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= SDCARDFS_NAME,
+	.mount		= sdcardfs_mount,
+	.kill_sb	= sdcardfs_kill_sb,
+	.fs_flags	= 0,
+};
+
+static int __init init_sdcardfs_fs(void)
+{
+	int err;
+
+	pr_info("Registering sdcardfs " SDCARDFS_VERSION "\n");
+
+	err = sdcardfs_init_inode_cache();
+	if (err)
+		goto out;
+	err = sdcardfs_init_dentry_cache();
+	if (err)
+		goto out;
+	err = packagelist_init();
+	if (err)
+		goto out;
+	err = register_filesystem(&sdcardfs_fs_type);
+out:
+	if (err) {
+		sdcardfs_destroy_inode_cache();
+		sdcardfs_destroy_dentry_cache();
+		packagelist_exit();
+	}
+	return err;
+}
+
+static void __exit exit_sdcardfs_fs(void)
+{
+	sdcardfs_destroy_inode_cache();
+	sdcardfs_destroy_dentry_cache();
+	packagelist_exit();
+	unregister_filesystem(&sdcardfs_fs_type);
+	pr_info("Completed sdcardfs module unload\n");
+}
+
+MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University"
+	      " (http://www.fsl.cs.sunysb.edu/)");
+MODULE_DESCRIPTION("Wrapfs " SDCARDFS_VERSION
+		   " (http://wrapfs.filesystems.org/)");
+MODULE_LICENSE("GPL");
+
+module_init(init_sdcardfs_fs);
+module_exit(exit_sdcardfs_fs);
diff --git a/fs/sdcardfs/mmap.c b/fs/sdcardfs/mmap.c
new file mode 100644
index 0000000..ac5f3de
--- /dev/null
+++ b/fs/sdcardfs/mmap.c
@@ -0,0 +1,80 @@
+/*
+ * fs/sdcardfs/mmap.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+
+static int sdcardfs_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	int err;
+	struct file *file, *lower_file;
+	const struct vm_operations_struct *lower_vm_ops;
+	struct vm_area_struct lower_vma;
+
+	memcpy(&lower_vma, vma, sizeof(struct vm_area_struct));
+	file = lower_vma.vm_file;
+	lower_vm_ops = SDCARDFS_F(file)->lower_vm_ops;
+	BUG_ON(!lower_vm_ops);
+
+	lower_file = sdcardfs_lower_file(file);
+	/*
+	 * XXX: vm_ops->fault may be called in parallel.  Because we have to
+	 * resort to temporarily changing the vma->vm_file to point to the
+	 * lower file, a concurrent invocation of sdcardfs_fault could see a
+	 * different value.  In this workaround, we keep a different copy of
+	 * the vma structure in our stack, so we never expose a different
+	 * value of the vma->vm_file called to us, even temporarily.  A
+	 * better fix would be to change the calling semantics of ->fault to
+	 * take an explicit file pointer.
+	 */
+	lower_vma.vm_file = lower_file;
+	err = lower_vm_ops->fault(&lower_vma, vmf);
+	return err;
+}
+
+static ssize_t sdcardfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+{
+	/*
+     * This function returns zero on purpose in order to support direct IO.
+	 * __dentry_open checks a_ops->direct_IO and returns EINVAL if it is null.
+     *
+	 * However, this function won't be called by certain file operations
+     * including generic fs functions.  * reads and writes are delivered to
+     * the lower file systems and the direct IOs will be handled by them.
+	 *
+     * NOTE: exceptionally, on the recent kernels (since Linux 3.8.x),
+     * swap_writepage invokes this function directly.
+	 */
+	printk(KERN_INFO "%s, operation is not supported\n", __func__);
+	return 0;
+}
+
+/*
+ * XXX: the default address_space_ops for sdcardfs is empty.  We cannot set
+ * our inode->i_mapping->a_ops to NULL because too many code paths expect
+ * the a_ops vector to be non-NULL.
+ */
+const struct address_space_operations sdcardfs_aops = {
+	/* empty on purpose */
+	.direct_IO	= sdcardfs_direct_IO,
+};
+
+const struct vm_operations_struct sdcardfs_vm_ops = {
+	.fault		= sdcardfs_fault,
+};
diff --git a/fs/sdcardfs/multiuser.h b/fs/sdcardfs/multiuser.h
new file mode 100644
index 0000000..923ba10
--- /dev/null
+++ b/fs/sdcardfs/multiuser.h
@@ -0,0 +1,37 @@
+/*
+ * fs/sdcardfs/multiuser.h
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#define MULTIUSER_APP_PER_USER_RANGE 100000
+
+typedef uid_t userid_t;
+typedef uid_t appid_t;
+
+static inline userid_t multiuser_get_user_id(uid_t uid) {
+    return uid / MULTIUSER_APP_PER_USER_RANGE;
+}
+
+static inline appid_t multiuser_get_app_id(uid_t uid) {
+    return uid % MULTIUSER_APP_PER_USER_RANGE;
+}
+
+static inline uid_t multiuser_get_uid(userid_t userId, appid_t appId) {
+    return userId * MULTIUSER_APP_PER_USER_RANGE + (appId % MULTIUSER_APP_PER_USER_RANGE);
+}
+
diff --git a/fs/sdcardfs/packagelist.c b/fs/sdcardfs/packagelist.c
new file mode 100644
index 0000000..31e2145
--- /dev/null
+++ b/fs/sdcardfs/packagelist.c
@@ -0,0 +1,444 @@
+/*
+ * fs/sdcardfs/packagelist.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+#include <linux/hashtable.h>
+#include <linux/delay.h>
+
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/configfs.h>
+
+#define STRING_BUF_SIZE		(512)
+
+struct hashtable_entry {
+	struct hlist_node hlist;
+	void *key;
+	unsigned int value;
+};
+
+struct sb_list {
+	struct super_block *sb;
+	struct list_head list;
+};
+
+struct packagelist_data {
+	DECLARE_HASHTABLE(package_to_appid,8);
+	struct mutex hashtable_lock;
+
+};
+
+static struct packagelist_data *pkgl_data_all;
+
+static struct kmem_cache *hashtable_entry_cachep;
+
+static unsigned int str_hash(const char *key) {
+	int i;
+	unsigned int h = strlen(key);
+	char *data = (char *)key;
+
+	for (i = 0; i < strlen(key); i++) {
+		h = h * 31 + *data;
+		data++;
+	}
+	return h;
+}
+
+appid_t get_appid(void *pkgl_id, const char *app_name)
+{
+	struct packagelist_data *pkgl_dat = pkgl_data_all;
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = str_hash(app_name);
+	appid_t ret_id;
+
+	mutex_lock(&pkgl_dat->hashtable_lock);
+	hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash) {
+		if (!strcasecmp(app_name, hash_cur->key)) {
+			ret_id = (appid_t)hash_cur->value;
+			mutex_unlock(&pkgl_dat->hashtable_lock);
+			return ret_id;
+		}
+	}
+	mutex_unlock(&pkgl_dat->hashtable_lock);
+	return 0;
+}
+
+/* Kernel has already enforced everything we returned through
+ * derive_permissions_locked(), so this is used to lock down access
+ * even further, such as enforcing that apps hold sdcard_rw. */
+int check_caller_access_to_name(struct inode *parent_node, const char* name) {
+
+	/* Always block security-sensitive files at root */
+	if (parent_node && SDCARDFS_I(parent_node)->perm == PERM_ROOT) {
+		if (!strcasecmp(name, "autorun.inf")
+			|| !strcasecmp(name, ".android_secure")
+			|| !strcasecmp(name, "android_secure")) {
+			return 0;
+		}
+	}
+
+	/* Root always has access; access for any other UIDs should always
+	 * be controlled through packages.list. */
+	if (from_kuid(&init_user_ns, current_fsuid()) == 0) {
+		return 1;
+	}
+
+	/* No extra permissions to enforce */
+	return 1;
+}
+
+/* This function is used when file opening. The open flags must be
+ * checked before calling check_caller_access_to_name() */
+int open_flags_to_access_mode(int open_flags) {
+	if((open_flags & O_ACCMODE) == O_RDONLY) {
+		return 0; /* R_OK */
+	} else if ((open_flags & O_ACCMODE) == O_WRONLY) {
+		return 1; /* W_OK */
+	} else {
+		/* Probably O_RDRW, but treat as default to be safe */
+		return 1; /* R_OK | W_OK */
+	}
+}
+
+static int insert_str_to_int_lock(struct packagelist_data *pkgl_dat, char *key,
+		unsigned int value)
+{
+	struct hashtable_entry *hash_cur;
+	struct hashtable_entry *new_entry;
+	unsigned int hash = str_hash(key);
+
+	hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash) {
+		if (!strcasecmp(key, hash_cur->key)) {
+			hash_cur->value = value;
+			return 0;
+		}
+	}
+	new_entry = kmem_cache_alloc(hashtable_entry_cachep, GFP_KERNEL);
+	if (!new_entry)
+		return -ENOMEM;
+	new_entry->key = kstrdup(key, GFP_KERNEL);
+	new_entry->value = value;
+	hash_add(pkgl_dat->package_to_appid, &new_entry->hlist, hash);
+	return 0;
+}
+
+static void fixup_perms(struct super_block *sb) {
+	if (sb && sb->s_magic == SDCARDFS_SUPER_MAGIC) {
+		inode_lock(sb->s_root->d_inode);
+		get_derive_permissions_recursive(sb->s_root);
+		inode_unlock(sb->s_root->d_inode);
+	}
+}
+
+static int insert_str_to_int(struct packagelist_data *pkgl_dat, char *key,
+		unsigned int value) {
+	int ret;
+	struct sdcardfs_sb_info *sbinfo;
+	mutex_lock(&sdcardfs_super_list_lock);
+	mutex_lock(&pkgl_dat->hashtable_lock);
+	ret = insert_str_to_int_lock(pkgl_dat, key, value);
+	mutex_unlock(&pkgl_dat->hashtable_lock);
+
+	list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+		if (sbinfo) {
+			fixup_perms(sbinfo->sb);
+		}
+	}
+	mutex_unlock(&sdcardfs_super_list_lock);
+	return ret;
+}
+
+static void remove_str_to_int_lock(struct hashtable_entry *h_entry) {
+	kfree(h_entry->key);
+	hash_del(&h_entry->hlist);
+	kmem_cache_free(hashtable_entry_cachep, h_entry);
+}
+
+static void remove_str_to_int(struct packagelist_data *pkgl_dat, const char *key)
+{
+	struct sdcardfs_sb_info *sbinfo;
+	struct hashtable_entry *hash_cur;
+	unsigned int hash = str_hash(key);
+	mutex_lock(&sdcardfs_super_list_lock);
+	mutex_lock(&pkgl_dat->hashtable_lock);
+	hash_for_each_possible(pkgl_dat->package_to_appid, hash_cur, hlist, hash) {
+		if (!strcasecmp(key, hash_cur->key)) {
+			remove_str_to_int_lock(hash_cur);
+			break;
+		}
+	}
+	mutex_unlock(&pkgl_dat->hashtable_lock);
+	list_for_each_entry(sbinfo, &sdcardfs_super_list, list) {
+		if (sbinfo) {
+			fixup_perms(sbinfo->sb);
+		}
+	}
+	mutex_unlock(&sdcardfs_super_list_lock);
+	return;
+}
+
+static void remove_all_hashentrys(struct packagelist_data *pkgl_dat)
+{
+	struct hashtable_entry *hash_cur;
+	struct hlist_node *h_t;
+	int i;
+	mutex_lock(&pkgl_dat->hashtable_lock);
+	hash_for_each_safe(pkgl_dat->package_to_appid, i, h_t, hash_cur, hlist)
+		remove_str_to_int_lock(hash_cur);
+	mutex_unlock(&pkgl_dat->hashtable_lock);
+	hash_init(pkgl_dat->package_to_appid);
+}
+
+static struct packagelist_data * packagelist_create(void)
+{
+	struct packagelist_data *pkgl_dat;
+
+	pkgl_dat = kmalloc(sizeof(*pkgl_dat), GFP_KERNEL | __GFP_ZERO);
+	if (!pkgl_dat) {
+                printk(KERN_ERR "sdcardfs: Failed to create hash\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	mutex_init(&pkgl_dat->hashtable_lock);
+	hash_init(pkgl_dat->package_to_appid);
+
+	return pkgl_dat;
+}
+
+static void packagelist_destroy(struct packagelist_data *pkgl_dat)
+{
+	remove_all_hashentrys(pkgl_dat);
+	printk(KERN_INFO "sdcardfs: destroyed packagelist pkgld\n");
+	kfree(pkgl_dat);
+}
+
+struct package_appid {
+	struct config_item item;
+	int add_pid;
+};
+
+static inline struct package_appid *to_package_appid(struct config_item *item)
+{
+	return item ? container_of(item, struct package_appid, item) : NULL;
+}
+
+static ssize_t package_appid_attr_show(struct config_item *item,
+				      char *page)
+{
+	ssize_t count;
+	count = sprintf(page, "%d\n", get_appid(pkgl_data_all, item->ci_name));
+	return count;
+}
+
+static ssize_t package_appid_attr_store(struct config_item *item,
+				       const char *page, size_t count)
+{
+	struct package_appid *package_appid = to_package_appid(item);
+	unsigned long tmp;
+	char *p = (char *) page;
+	int ret;
+
+	tmp = simple_strtoul(p, &p, 10);
+	if (!p || (*p && (*p != '\n')))
+		return -EINVAL;
+
+	if (tmp > INT_MAX)
+		return -ERANGE;
+	ret = insert_str_to_int(pkgl_data_all, item->ci_name, (unsigned int)tmp);
+	package_appid->add_pid = tmp;
+	if (ret)
+		return ret;
+
+	return count;
+}
+
+static struct configfs_attribute package_appid_attr_add_pid = {
+	.ca_owner = THIS_MODULE,
+	.ca_name = "appid",
+	.ca_mode = S_IRUGO | S_IWUGO,
+	.show = package_appid_attr_show,
+	.store = package_appid_attr_store,
+};
+
+static struct configfs_attribute *package_appid_attrs[] = {
+	&package_appid_attr_add_pid,
+	NULL,
+};
+
+static void package_appid_release(struct config_item *item)
+{
+	printk(KERN_INFO "sdcardfs: removing %s\n", item->ci_dentry->d_name.name);
+	/* item->ci_name is freed already, so we rely on the dentry */
+	remove_str_to_int(pkgl_data_all, item->ci_dentry->d_name.name);
+	kfree(to_package_appid(item));
+}
+
+static struct configfs_item_operations package_appid_item_ops = {
+	.release		= package_appid_release,
+};
+
+static struct config_item_type package_appid_type = {
+	.ct_item_ops	= &package_appid_item_ops,
+	.ct_attrs	= package_appid_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+
+struct sdcardfs_packages {
+	struct config_group group;
+};
+
+static inline struct sdcardfs_packages *to_sdcardfs_packages(struct config_item *item)
+{
+	return item ? container_of(to_config_group(item), struct sdcardfs_packages, group) : NULL;
+}
+
+static struct config_item *sdcardfs_packages_make_item(struct config_group *group, const char *name)
+{
+	struct package_appid *package_appid;
+
+	package_appid = kzalloc(sizeof(struct package_appid), GFP_KERNEL);
+	if (!package_appid)
+		return ERR_PTR(-ENOMEM);
+
+	config_item_init_type_name(&package_appid->item, name,
+				   &package_appid_type);
+
+	package_appid->add_pid = 0;
+
+	return &package_appid->item;
+}
+
+static ssize_t packages_attr_show(struct config_item *item,
+					 char *page)
+{
+	struct hashtable_entry *hash_cur;
+	struct hlist_node *h_t;
+	int i;
+	int count = 0, written = 0;
+	char errormsg[] = "<truncated>\n";
+
+	mutex_lock(&pkgl_data_all->hashtable_lock);
+	hash_for_each_safe(pkgl_data_all->package_to_appid, i, h_t, hash_cur, hlist) {
+		written = scnprintf(page + count, PAGE_SIZE - sizeof(errormsg) - count, "%s %d\n", (char *)hash_cur->key, hash_cur->value);
+		if (count + written == PAGE_SIZE - sizeof(errormsg)) {
+			count += scnprintf(page + count, PAGE_SIZE - count, errormsg);
+			break;
+		}
+		count += written;
+	}
+	mutex_unlock(&pkgl_data_all->hashtable_lock);
+
+	return count;
+}
+
+static struct configfs_attribute sdcardfs_packages_attr_description = {
+	.ca_owner = THIS_MODULE,
+	.ca_name = "packages_gid.list",
+	.ca_mode = S_IRUGO,
+	.show = packages_attr_show,
+};
+
+static struct configfs_attribute *sdcardfs_packages_attrs[] = {
+	&sdcardfs_packages_attr_description,
+	NULL,
+};
+
+static void sdcardfs_packages_release(struct config_item *item)
+{
+
+	printk(KERN_INFO "sdcardfs: destroyed something?\n");
+	kfree(to_sdcardfs_packages(item));
+}
+
+static struct configfs_item_operations sdcardfs_packages_item_ops = {
+	.release	= sdcardfs_packages_release,
+};
+
+/*
+ * Note that, since no extra work is required on ->drop_item(),
+ * no ->drop_item() is provided.
+ */
+static struct configfs_group_operations sdcardfs_packages_group_ops = {
+	.make_item	= sdcardfs_packages_make_item,
+};
+
+static struct config_item_type sdcardfs_packages_type = {
+	.ct_item_ops	= &sdcardfs_packages_item_ops,
+	.ct_group_ops	= &sdcardfs_packages_group_ops,
+	.ct_attrs	= sdcardfs_packages_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct configfs_subsystem sdcardfs_packages_subsys = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf = "sdcardfs",
+			.ci_type = &sdcardfs_packages_type,
+		},
+	},
+};
+
+static int configfs_sdcardfs_init(void)
+{
+	int ret;
+	struct configfs_subsystem *subsys = &sdcardfs_packages_subsys;
+
+	config_group_init(&subsys->su_group);
+	mutex_init(&subsys->su_mutex);
+	ret = configfs_register_subsystem(subsys);
+	if (ret) {
+		printk(KERN_ERR "Error %d while registering subsystem %s\n",
+		       ret,
+		       subsys->su_group.cg_item.ci_namebuf);
+	}
+	return ret;
+}
+
+static void configfs_sdcardfs_exit(void)
+{
+	configfs_unregister_subsystem(&sdcardfs_packages_subsys);
+}
+
+int packagelist_init(void)
+{
+	hashtable_entry_cachep =
+		kmem_cache_create("packagelist_hashtable_entry",
+					sizeof(struct hashtable_entry), 0, 0, NULL);
+	if (!hashtable_entry_cachep) {
+		printk(KERN_ERR "sdcardfs: failed creating pkgl_hashtable entry slab cache\n");
+		return -ENOMEM;
+	}
+
+	pkgl_data_all = packagelist_create();
+	configfs_sdcardfs_init();
+        return 0;
+}
+
+void packagelist_exit(void)
+{
+	configfs_sdcardfs_exit();
+	packagelist_destroy(pkgl_data_all);
+	if (hashtable_entry_cachep)
+		kmem_cache_destroy(hashtable_entry_cachep);
+}
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
new file mode 100644
index 0000000..7cb14ac
--- /dev/null
+++ b/fs/sdcardfs/sdcardfs.h
@@ -0,0 +1,530 @@
+/*
+ * fs/sdcardfs/sdcardfs.h
+ *
+ * The sdcardfs v2.0
+ *   This file system replaces the sdcard daemon on Android
+ *   On version 2.0, some of the daemon functions have been ported
+ *   to support the multi-user concepts of Android 4.4
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _SDCARDFS_H_
+#define _SDCARDFS_H_
+
+#include <linux/dcache.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/seq_file.h>
+#include <linux/statfs.h>
+#include <linux/fs_stack.h>
+#include <linux/magic.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/security.h>
+#include <linux/string.h>
+#include <linux/list.h>
+#include "multiuser.h"
+
+/* the file system name */
+#define SDCARDFS_NAME "sdcardfs"
+
+/* sdcardfs root inode number */
+#define SDCARDFS_ROOT_INO     1
+
+/* useful for tracking code reachability */
+#define UDBG printk(KERN_DEFAULT "DBG:%s:%s:%d\n", __FILE__, __func__, __LINE__)
+
+#define SDCARDFS_DIRENT_SIZE 256
+
+/* temporary static uid settings for development */
+#define AID_ROOT             0	/* uid for accessing /mnt/sdcard & extSdcard */
+#define AID_MEDIA_RW      1023	/* internal media storage write access */
+
+#define AID_SDCARD_RW     1015	/* external storage write access */
+#define AID_SDCARD_R      1028	/* external storage read access */
+#define AID_SDCARD_PICS   1033	/* external storage photos access */
+#define AID_SDCARD_AV     1034	/* external storage audio/video access */
+#define AID_SDCARD_ALL    1035	/* access all users external storage */
+
+#define AID_PACKAGE_INFO  1027
+
+#define fix_derived_permission(x)	\
+	do {						\
+		(x)->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(x)->d_uid);	\
+		(x)->i_gid = make_kgid(&init_user_ns, get_gid(SDCARDFS_I(x)));	\
+		(x)->i_mode = ((x)->i_mode & S_IFMT) | get_mode(SDCARDFS_I(x));\
+	} while (0)
+
+
+/* OVERRIDE_CRED() and REVERT_CRED()
+ * 	OVERRID_CRED()
+ * 		backup original task->cred
+ * 		and modifies task->cred->fsuid/fsgid to specified value.
+ *	REVERT_CRED()
+ * 		restore original task->cred->fsuid/fsgid.
+ * These two macro should be used in pair, and OVERRIDE_CRED() should be
+ * placed at the beginning of a function, right after variable declaration.
+ */
+#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred)		\
+	saved_cred = override_fsids(sdcardfs_sbi);	\
+	if (!saved_cred) { return -ENOMEM; }
+
+#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred)	\
+	saved_cred = override_fsids(sdcardfs_sbi);	\
+	if (!saved_cred) { return ERR_PTR(-ENOMEM); }
+
+#define REVERT_CRED(saved_cred)	revert_fsids(saved_cred)
+
+#define DEBUG_CRED()		\
+	printk("KAKJAGI: %s:%d fsuid %d fsgid %d\n", 	\
+		__FUNCTION__, __LINE__, 		\
+		(int)current->cred->fsuid, 		\
+		(int)current->cred->fsgid);
+
+/* Android 5.0 support */
+
+/* Permission mode for a specific node. Controls how file permissions
+ * are derived for children nodes. */
+typedef enum {
+    /* Nothing special; this node should just inherit from its parent. */
+    PERM_INHERIT,
+    /* This node is one level above a normal root; used for legacy layouts
+     * which use the first level to represent user_id. */
+    PERM_PRE_ROOT,
+    /* This node is "/" */
+    PERM_ROOT,
+    /* This node is "/Android" */
+    PERM_ANDROID,
+    /* This node is "/Android/data" */
+    PERM_ANDROID_DATA,
+    /* This node is "/Android/obb" */
+    PERM_ANDROID_OBB,
+    /* This node is "/Android/media" */
+    PERM_ANDROID_MEDIA,
+} perm_t;
+
+struct sdcardfs_sb_info;
+struct sdcardfs_mount_options;
+
+/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
+const struct cred * override_fsids(struct sdcardfs_sb_info* sbi);
+/* Do not directly use this function, use REVERT_CRED() instead. */
+void revert_fsids(const struct cred * old_cred);
+
+/* operations vectors defined in specific files */
+extern const struct file_operations sdcardfs_main_fops;
+extern const struct file_operations sdcardfs_dir_fops;
+extern const struct inode_operations sdcardfs_main_iops;
+extern const struct inode_operations sdcardfs_dir_iops;
+extern const struct inode_operations sdcardfs_symlink_iops;
+extern const struct super_operations sdcardfs_sops;
+extern const struct dentry_operations sdcardfs_ci_dops;
+extern const struct address_space_operations sdcardfs_aops, sdcardfs_dummy_aops;
+extern const struct vm_operations_struct sdcardfs_vm_ops;
+
+extern int sdcardfs_init_inode_cache(void);
+extern void sdcardfs_destroy_inode_cache(void);
+extern int sdcardfs_init_dentry_cache(void);
+extern void sdcardfs_destroy_dentry_cache(void);
+extern int new_dentry_private_data(struct dentry *dentry);
+extern void free_dentry_private_data(struct dentry *dentry);
+extern struct dentry *sdcardfs_lookup(struct inode *dir, struct dentry *dentry,
+				unsigned int flags);
+extern struct inode *sdcardfs_iget(struct super_block *sb,
+				 struct inode *lower_inode, userid_t id);
+extern int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
+			    struct path *lower_path, userid_t id);
+
+/* file private data */
+struct sdcardfs_file_info {
+	struct file *lower_file;
+	const struct vm_operations_struct *lower_vm_ops;
+};
+
+/* sdcardfs inode data in memory */
+struct sdcardfs_inode_info {
+	struct inode *lower_inode;
+	/* state derived based on current position in hierachy */
+	perm_t perm;
+	userid_t userid;
+	uid_t d_uid;
+	bool under_android;
+
+	struct inode vfs_inode;
+};
+
+
+/* sdcardfs dentry data in memory */
+struct sdcardfs_dentry_info {
+	spinlock_t lock;	/* protects lower_path */
+	struct path lower_path;
+	struct path orig_path;
+};
+
+struct sdcardfs_mount_options {
+	uid_t fs_low_uid;
+	gid_t fs_low_gid;
+	userid_t fs_user_id;
+	gid_t gid;
+	mode_t mask;
+	bool multiuser;
+	unsigned int reserved_mb;
+};
+
+/* sdcardfs super-block data in memory */
+struct sdcardfs_sb_info {
+	struct super_block *sb;
+	struct super_block *lower_sb;
+	/* derived perm policy : some of options have been added
+	 * to sdcardfs_mount_options (Android 4.4 support) */
+	struct sdcardfs_mount_options options;
+	spinlock_t lock;	/* protects obbpath */
+	char *obbpath_s;
+	struct path obbpath;
+	void *pkgl_id;
+	struct list_head list;
+};
+
+/*
+ * inode to private data
+ *
+ * Since we use containers and the struct inode is _inside_ the
+ * sdcardfs_inode_info structure, SDCARDFS_I will always (given a non-NULL
+ * inode pointer), return a valid non-NULL pointer.
+ */
+static inline struct sdcardfs_inode_info *SDCARDFS_I(const struct inode *inode)
+{
+	return container_of(inode, struct sdcardfs_inode_info, vfs_inode);
+}
+
+/* dentry to private data */
+#define SDCARDFS_D(dent) ((struct sdcardfs_dentry_info *)(dent)->d_fsdata)
+
+/* superblock to private data */
+#define SDCARDFS_SB(super) ((struct sdcardfs_sb_info *)(super)->s_fs_info)
+
+/* file to private Data */
+#define SDCARDFS_F(file) ((struct sdcardfs_file_info *)((file)->private_data))
+
+/* file to lower file */
+static inline struct file *sdcardfs_lower_file(const struct file *f)
+{
+	return SDCARDFS_F(f)->lower_file;
+}
+
+static inline void sdcardfs_set_lower_file(struct file *f, struct file *val)
+{
+	SDCARDFS_F(f)->lower_file = val;
+}
+
+/* inode to lower inode. */
+static inline struct inode *sdcardfs_lower_inode(const struct inode *i)
+{
+	return SDCARDFS_I(i)->lower_inode;
+}
+
+static inline void sdcardfs_set_lower_inode(struct inode *i, struct inode *val)
+{
+	SDCARDFS_I(i)->lower_inode = val;
+}
+
+/* superblock to lower superblock */
+static inline struct super_block *sdcardfs_lower_super(
+	const struct super_block *sb)
+{
+	return SDCARDFS_SB(sb)->lower_sb;
+}
+
+static inline void sdcardfs_set_lower_super(struct super_block *sb,
+					  struct super_block *val)
+{
+	SDCARDFS_SB(sb)->lower_sb = val;
+}
+
+/* path based (dentry/mnt) macros */
+static inline void pathcpy(struct path *dst, const struct path *src)
+{
+	dst->dentry = src->dentry;
+	dst->mnt = src->mnt;
+}
+
+/* sdcardfs_get_pname functions calls path_get()
+ * therefore, the caller must call "proper" path_put functions
+ */
+#define SDCARDFS_DENT_FUNC(pname) \
+static inline void sdcardfs_get_##pname(const struct dentry *dent, \
+					struct path *pname) \
+{ \
+	spin_lock(&SDCARDFS_D(dent)->lock); \
+	pathcpy(pname, &SDCARDFS_D(dent)->pname); \
+	path_get(pname); \
+	spin_unlock(&SDCARDFS_D(dent)->lock); \
+	return; \
+} \
+static inline void sdcardfs_put_##pname(const struct dentry *dent, \
+					struct path *pname) \
+{ \
+	path_put(pname); \
+	return; \
+} \
+static inline void sdcardfs_set_##pname(const struct dentry *dent, \
+					struct path *pname) \
+{ \
+	spin_lock(&SDCARDFS_D(dent)->lock); \
+	pathcpy(&SDCARDFS_D(dent)->pname, pname); \
+	spin_unlock(&SDCARDFS_D(dent)->lock); \
+	return; \
+} \
+static inline void sdcardfs_reset_##pname(const struct dentry *dent) \
+{ \
+	spin_lock(&SDCARDFS_D(dent)->lock); \
+	SDCARDFS_D(dent)->pname.dentry = NULL; \
+	SDCARDFS_D(dent)->pname.mnt = NULL; \
+	spin_unlock(&SDCARDFS_D(dent)->lock); \
+	return; \
+} \
+static inline void sdcardfs_put_reset_##pname(const struct dentry *dent) \
+{ \
+	struct path pname; \
+	spin_lock(&SDCARDFS_D(dent)->lock); \
+	if(SDCARDFS_D(dent)->pname.dentry) { \
+		pathcpy(&pname, &SDCARDFS_D(dent)->pname); \
+		SDCARDFS_D(dent)->pname.dentry = NULL; \
+		SDCARDFS_D(dent)->pname.mnt = NULL; \
+		spin_unlock(&SDCARDFS_D(dent)->lock); \
+		path_put(&pname); \
+	} else \
+		spin_unlock(&SDCARDFS_D(dent)->lock); \
+	return; \
+}
+
+SDCARDFS_DENT_FUNC(lower_path)
+SDCARDFS_DENT_FUNC(orig_path)
+
+static inline int get_gid(struct sdcardfs_inode_info *info) {
+	struct sdcardfs_sb_info *sb_info = SDCARDFS_SB(info->vfs_inode.i_sb);
+	if (sb_info->options.gid == AID_SDCARD_RW) {
+		/* As an optimization, certain trusted system components only run
+		 * as owner but operate across all users. Since we're now handing
+		 * out the sdcard_rw GID only to trusted apps, we're okay relaxing
+		 * the user boundary enforcement for the default view. The UIDs
+		 * assigned to app directories are still multiuser aware. */
+		return AID_SDCARD_RW;
+	} else {
+		return multiuser_get_uid(info->userid, sb_info->options.gid);
+	}
+}
+static inline int get_mode(struct sdcardfs_inode_info *info) {
+	int owner_mode;
+	int filtered_mode;
+	struct sdcardfs_sb_info *sb_info = SDCARDFS_SB(info->vfs_inode.i_sb);
+	int visible_mode = 0775 & ~sb_info->options.mask;
+
+	if (info->perm == PERM_PRE_ROOT) {
+		/* Top of multi-user view should always be visible to ensure
+		* secondary users can traverse inside. */
+		visible_mode = 0711;
+	} else if (info->under_android) {
+		/* Block "other" access to Android directories, since only apps
+		* belonging to a specific user should be in there; we still
+		* leave +x open for the default view. */
+		if (sb_info->options.gid == AID_SDCARD_RW) {
+			visible_mode = visible_mode & ~0006;
+		} else {
+			visible_mode = visible_mode & ~0007;
+		}
+	}
+	owner_mode = info->lower_inode->i_mode & 0700;
+	filtered_mode = visible_mode & (owner_mode | (owner_mode >> 3) | (owner_mode >> 6));
+	return filtered_mode;
+}
+
+static inline int has_graft_path(const struct dentry *dent)
+{
+	int ret = 0;
+
+	spin_lock(&SDCARDFS_D(dent)->lock);
+	if (SDCARDFS_D(dent)->orig_path.dentry != NULL)
+		ret = 1;
+	spin_unlock(&SDCARDFS_D(dent)->lock);
+
+	return ret;
+}
+
+static inline void sdcardfs_get_real_lower(const struct dentry *dent,
+						struct path *real_lower)
+{
+	/* in case of a local obb dentry
+	 * the orig_path should be returned
+	 */
+	if(has_graft_path(dent))
+		sdcardfs_get_orig_path(dent, real_lower);
+	else
+		sdcardfs_get_lower_path(dent, real_lower);
+}
+
+static inline void sdcardfs_put_real_lower(const struct dentry *dent,
+						struct path *real_lower)
+{
+	if(has_graft_path(dent))
+		sdcardfs_put_orig_path(dent, real_lower);
+	else
+		sdcardfs_put_lower_path(dent, real_lower);
+}
+
+extern struct mutex sdcardfs_super_list_lock;
+extern struct list_head sdcardfs_super_list;
+
+/* for packagelist.c */
+extern appid_t get_appid(void *pkgl_id, const char *app_name);
+extern int check_caller_access_to_name(struct inode *parent_node, const char* name);
+extern int open_flags_to_access_mode(int open_flags);
+extern int packagelist_init(void);
+extern void packagelist_exit(void);
+
+/* for derived_perm.c */
+extern void setup_derived_state(struct inode *inode, perm_t perm,
+			userid_t userid, uid_t uid, bool under_android);
+extern void get_derived_permission(struct dentry *parent, struct dentry *dentry);
+extern void get_derived_permission_new(struct dentry *parent, struct dentry *dentry, struct dentry *newdentry);
+extern void get_derive_permissions_recursive(struct dentry *parent);
+
+extern void update_derived_permission_lock(struct dentry *dentry);
+extern int need_graft_path(struct dentry *dentry);
+extern int is_base_obbpath(struct dentry *dentry);
+extern int is_obbpath_invalid(struct dentry *dentry);
+extern int setup_obb_dentry(struct dentry *dentry, struct path *lower_path);
+
+/* locking helpers */
+static inline struct dentry *lock_parent(struct dentry *dentry)
+{
+	struct dentry *dir = dget_parent(dentry);
+	inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
+	return dir;
+}
+
+static inline void unlock_dir(struct dentry *dir)
+{
+	inode_unlock(d_inode(dir));
+	dput(dir);
+}
+
+static inline int prepare_dir(const char *path_s, uid_t uid, gid_t gid, mode_t mode)
+{
+	int err;
+	struct dentry *dent;
+	struct iattr attrs;
+	struct path parent;
+
+	dent = kern_path_locked(path_s, &parent);
+	if (IS_ERR(dent)) {
+		err = PTR_ERR(dent);
+		if (err == -EEXIST)
+			err = 0;
+		goto out_unlock;
+	}
+
+	err = vfs_mkdir(d_inode(parent.dentry), dent, mode);
+	if (err) {
+		if (err == -EEXIST)
+			err = 0;
+		goto out_dput;
+	}
+
+	attrs.ia_uid = make_kuid(&init_user_ns, uid);
+	attrs.ia_gid = make_kgid(&init_user_ns, gid);
+	attrs.ia_valid = ATTR_UID | ATTR_GID;
+	inode_lock(d_inode(dent));
+	notify_change(dent, &attrs, NULL);
+	inode_unlock(d_inode(dent));
+
+out_dput:
+	dput(dent);
+
+out_unlock:
+	/* parent dentry locked by lookup_create */
+	inode_unlock(d_inode(parent.dentry));
+	path_put(&parent);
+	return err;
+}
+
+/*
+ * Return 1, if a disk has enough free space, otherwise 0.
+ * We assume that any files can not be overwritten.
+ */
+static inline int check_min_free_space(struct dentry *dentry, size_t size, int dir)
+{
+	int err;
+	struct path lower_path;
+	struct kstatfs statfs;
+	u64 avail;
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+
+	if (sbi->options.reserved_mb) {
+		/* Get fs stat of lower filesystem. */
+		sdcardfs_get_lower_path(dentry, &lower_path);
+		err = vfs_statfs(&lower_path, &statfs);
+		sdcardfs_put_lower_path(dentry, &lower_path);
+
+		if (unlikely(err))
+			return 0;
+
+		/* Invalid statfs informations. */
+		if (unlikely(statfs.f_bsize == 0))
+			return 0;
+
+		/* if you are checking directory, set size to f_bsize. */
+		if (unlikely(dir))
+			size = statfs.f_bsize;
+
+		/* available size */
+		avail = statfs.f_bavail * statfs.f_bsize;
+
+		/* not enough space */
+		if ((u64)size > avail)
+			return 0;
+
+		/* enough space */
+		if ((avail - size) > (sbi->options.reserved_mb * 1024 * 1024))
+			return 1;
+
+		return 0;
+	} else
+		return 1;
+}
+
+/* Copies attrs and maintains sdcardfs managed attrs */
+static inline void sdcardfs_copy_and_fix_attrs(struct inode *dest, const struct inode *src)
+{
+	dest->i_mode = (src->i_mode  & S_IFMT) | get_mode(SDCARDFS_I(dest));
+	dest->i_uid = make_kuid(&init_user_ns, SDCARDFS_I(dest)->d_uid);
+	dest->i_gid = make_kgid(&init_user_ns, get_gid(SDCARDFS_I(dest)));
+	dest->i_rdev = src->i_rdev;
+	dest->i_atime = src->i_atime;
+	dest->i_mtime = src->i_mtime;
+	dest->i_ctime = src->i_ctime;
+	dest->i_blkbits = src->i_blkbits;
+	dest->i_flags = src->i_flags;
+	set_nlink(dest, src->i_nlink);
+}
+#endif	/* not _SDCARDFS_H_ */
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
new file mode 100644
index 0000000..1d64901
--- /dev/null
+++ b/fs/sdcardfs/super.c
@@ -0,0 +1,222 @@
+/*
+ * fs/sdcardfs/super.c
+ *
+ * Copyright (c) 2013 Samsung Electronics Co. Ltd
+ *   Authors: Daeho Jeong, Woojoong Lee, Seunghwan Hyun,
+ *               Sunghwan Yun, Sungjong Seo
+ *
+ * This program has been developed as a stackable file system based on
+ * the WrapFS which written by
+ *
+ * Copyright (c) 1998-2011 Erez Zadok
+ * Copyright (c) 2009     Shrikar Archak
+ * Copyright (c) 2003-2011 Stony Brook University
+ * Copyright (c) 2003-2011 The Research Foundation of SUNY
+ *
+ * This file is dual licensed.  It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#include "sdcardfs.h"
+
+/*
+ * The inode cache is used with alloc_inode for both our inode info and the
+ * vfs inode.
+ */
+static struct kmem_cache *sdcardfs_inode_cachep;
+
+/* final actions when unmounting a file system */
+static void sdcardfs_put_super(struct super_block *sb)
+{
+	struct sdcardfs_sb_info *spd;
+	struct super_block *s;
+
+	spd = SDCARDFS_SB(sb);
+	if (!spd)
+		return;
+
+	if(spd->obbpath_s) {
+		kfree(spd->obbpath_s);
+		path_put(&spd->obbpath);
+	}
+
+	/* decrement lower super references */
+	s = sdcardfs_lower_super(sb);
+	sdcardfs_set_lower_super(sb, NULL);
+	atomic_dec(&s->s_active);
+
+	kfree(spd);
+	sb->s_fs_info = NULL;
+}
+
+static int sdcardfs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+	int err;
+	struct path lower_path;
+	u32 min_blocks;
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(dentry->d_sb);
+
+	sdcardfs_get_lower_path(dentry, &lower_path);
+	err = vfs_statfs(&lower_path, buf);
+	sdcardfs_put_lower_path(dentry, &lower_path);
+
+	if (sbi->options.reserved_mb) {
+		/* Invalid statfs informations. */
+		if (buf->f_bsize == 0) {
+			printk(KERN_ERR "Returned block size is zero.\n");
+			return -EINVAL;
+		}
+
+		min_blocks = ((sbi->options.reserved_mb * 1024 * 1024)/buf->f_bsize);
+		buf->f_blocks -= min_blocks;
+
+		if (buf->f_bavail > min_blocks)
+			buf->f_bavail -= min_blocks;
+		else
+			buf->f_bavail = 0;
+
+		/* Make reserved blocks invisiable to media storage */
+		buf->f_bfree = buf->f_bavail;
+	}
+
+	/* set return buf to our f/s to avoid confusing user-level utils */
+	buf->f_type = SDCARDFS_SUPER_MAGIC;
+
+	return err;
+}
+
+/*
+ * @flags: numeric mount options
+ * @options: mount options string
+ */
+static int sdcardfs_remount_fs(struct super_block *sb, int *flags, char *options)
+{
+	int err = 0;
+
+	/*
+	 * The VFS will take care of "ro" and "rw" flags among others.  We
+	 * can safely accept a few flags (RDONLY, MANDLOCK), and honor
+	 * SILENT, but anything else left over is an error.
+	 */
+	if ((*flags & ~(MS_RDONLY | MS_MANDLOCK | MS_SILENT)) != 0) {
+		printk(KERN_ERR
+		       "sdcardfs: remount flags 0x%x unsupported\n", *flags);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+/*
+ * Called by iput() when the inode reference count reached zero
+ * and the inode is not hashed anywhere.  Used to clear anything
+ * that needs to be, before the inode is completely destroyed and put
+ * on the inode free list.
+ */
+static void sdcardfs_evict_inode(struct inode *inode)
+{
+	struct inode *lower_inode;
+
+	truncate_inode_pages(&inode->i_data, 0);
+	clear_inode(inode);
+	/*
+	 * Decrement a reference to a lower_inode, which was incremented
+	 * by our read_inode when it was created initially.
+	 */
+	lower_inode = sdcardfs_lower_inode(inode);
+	sdcardfs_set_lower_inode(inode, NULL);
+	iput(lower_inode);
+}
+
+static struct inode *sdcardfs_alloc_inode(struct super_block *sb)
+{
+	struct sdcardfs_inode_info *i;
+
+	i = kmem_cache_alloc(sdcardfs_inode_cachep, GFP_KERNEL);
+	if (!i)
+		return NULL;
+
+	/* memset everything up to the inode to 0 */
+	memset(i, 0, offsetof(struct sdcardfs_inode_info, vfs_inode));
+
+	i->vfs_inode.i_version = 1;
+	return &i->vfs_inode;
+}
+
+static void sdcardfs_destroy_inode(struct inode *inode)
+{
+	kmem_cache_free(sdcardfs_inode_cachep, SDCARDFS_I(inode));
+}
+
+/* sdcardfs inode cache constructor */
+static void init_once(void *obj)
+{
+	struct sdcardfs_inode_info *i = obj;
+
+	inode_init_once(&i->vfs_inode);
+}
+
+int sdcardfs_init_inode_cache(void)
+{
+	int err = 0;
+
+	sdcardfs_inode_cachep =
+		kmem_cache_create("sdcardfs_inode_cache",
+				  sizeof(struct sdcardfs_inode_info), 0,
+				  SLAB_RECLAIM_ACCOUNT, init_once);
+	if (!sdcardfs_inode_cachep)
+		err = -ENOMEM;
+	return err;
+}
+
+/* sdcardfs inode cache destructor */
+void sdcardfs_destroy_inode_cache(void)
+{
+	if (sdcardfs_inode_cachep)
+		kmem_cache_destroy(sdcardfs_inode_cachep);
+}
+
+/*
+ * Used only in nfs, to kill any pending RPC tasks, so that subsequent
+ * code can actually succeed and won't leave tasks that need handling.
+ */
+static void sdcardfs_umount_begin(struct super_block *sb)
+{
+	struct super_block *lower_sb;
+
+	lower_sb = sdcardfs_lower_super(sb);
+	if (lower_sb && lower_sb->s_op && lower_sb->s_op->umount_begin)
+		lower_sb->s_op->umount_begin(lower_sb);
+}
+
+static int sdcardfs_show_options(struct seq_file *m, struct dentry *root)
+{
+	struct sdcardfs_sb_info *sbi = SDCARDFS_SB(root->d_sb);
+	struct sdcardfs_mount_options *opts = &sbi->options;
+
+	if (opts->fs_low_uid != 0)
+		seq_printf(m, ",uid=%u", opts->fs_low_uid);
+	if (opts->fs_low_gid != 0)
+		seq_printf(m, ",gid=%u", opts->fs_low_gid);
+
+	if (opts->multiuser)
+		seq_printf(m, ",multiuser");
+
+	if (opts->reserved_mb != 0)
+		seq_printf(m, ",reserved=%uMB", opts->reserved_mb);
+
+	return 0;
+};
+
+const struct super_operations sdcardfs_sops = {
+	.put_super	= sdcardfs_put_super,
+	.statfs		= sdcardfs_statfs,
+	.remount_fs	= sdcardfs_remount_fs,
+	.evict_inode	= sdcardfs_evict_inode,
+	.umount_begin	= sdcardfs_umount_begin,
+	.show_options	= sdcardfs_show_options,
+	.alloc_inode	= sdcardfs_alloc_inode,
+	.destroy_inode	= sdcardfs_destroy_inode,
+	.drop_inode	= generic_delete_inode,
+};
diff --git a/fs/super.c b/fs/super.c
index c183835..0bed501 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -837,7 +837,7 @@
 	struct super_block *sb, *p = NULL;
 
 	spin_lock(&sb_lock);
-	list_for_each_entry(sb, &super_blocks, s_list) {
+	list_for_each_entry_reverse(sb, &super_blocks, s_list) {
 		if (hlist_unhashed(&sb->s_instances))
 			continue;
 		sb->s_count++;
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 85959d8..69c867c0 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -460,7 +460,8 @@
 				 new_flags, vma->anon_vma,
 				 vma->vm_file, vma->vm_pgoff,
 				 vma_policy(vma),
-				 NULL_VM_UFFD_CTX);
+				 NULL_VM_UFFD_CTX,
+				 vma_get_anon_name(vma));
 		if (prev)
 			vma = prev;
 		else
@@ -839,7 +840,8 @@
 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
 				 vma_policy(vma),
-				 ((struct vm_userfaultfd_ctx){ ctx }));
+				 ((struct vm_userfaultfd_ctx){ ctx }),
+				 vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			goto next;
@@ -976,7 +978,8 @@
 		prev = vma_merge(mm, prev, start, vma_end, new_flags,
 				 vma->anon_vma, vma->vm_file, vma->vm_pgoff,
 				 vma_policy(vma),
-				 NULL_VM_UFFD_CTX);
+				 NULL_VM_UFFD_CTX,
+				 vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			goto next;
diff --git a/include/dt-bindings/clock/qcom,camcc-skunk.h b/include/dt-bindings/clock/qcom,camcc-skunk.h
new file mode 100644
index 0000000..ea54fab
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,camcc-skunk.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_CAM_CC_SKUNK_H
+#define _DT_BINDINGS_CLK_MSM_CAM_CC_SKUNK_H
+
+#define CAM_CC_BPS_AHB_CLK					0
+#define CAM_CC_BPS_AREG_CLK					1
+#define CAM_CC_BPS_AXI_CLK					2
+#define CAM_CC_BPS_CLK						3
+#define CAM_CC_BPS_CLK_SRC					4
+#define CAM_CC_CAMNOC_ATB_CLK					5
+#define CAM_CC_CAMNOC_AXI_CLK					6
+#define CAM_CC_CCI_CLK						7
+#define CAM_CC_CCI_CLK_SRC					8
+#define CAM_CC_CPAS_AHB_CLK					9
+#define CAM_CC_CPHY_RX_CLK_SRC					10
+#define CAM_CC_CSI0PHYTIMER_CLK					11
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC				12
+#define CAM_CC_CSI1PHYTIMER_CLK					13
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC				14
+#define CAM_CC_CSI2PHYTIMER_CLK					15
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC				16
+#define CAM_CC_CSIPHY0_CLK					17
+#define CAM_CC_CSIPHY1_CLK					18
+#define CAM_CC_CSIPHY2_CLK					19
+#define CAM_CC_DEBUG_CLK					20
+#define CAM_CC_FAST_AHB_CLK_SRC					21
+#define CAM_CC_FD_CORE_CLK					22
+#define CAM_CC_FD_CORE_CLK_SRC					23
+#define CAM_CC_FD_CORE_UAR_CLK					24
+#define CAM_CC_ICP_APB_CLK					25
+#define CAM_CC_ICP_ATB_CLK					26
+#define CAM_CC_ICP_CLK						27
+#define CAM_CC_ICP_CLK_SRC					28
+#define CAM_CC_ICP_CTI_CLK					29
+#define CAM_CC_ICP_TS_CLK					30
+#define CAM_CC_IFE_0_AXI_CLK					31
+#define CAM_CC_IFE_0_CLK					32
+#define CAM_CC_IFE_0_CLK_SRC					33
+#define CAM_CC_IFE_0_CPHY_RX_CLK				34
+#define CAM_CC_IFE_0_CSID_CLK					35
+#define CAM_CC_IFE_0_CSID_CLK_SRC				36
+#define CAM_CC_IFE_0_DSP_CLK					37
+#define CAM_CC_IFE_1_AXI_CLK					38
+#define CAM_CC_IFE_1_CLK					39
+#define CAM_CC_IFE_1_CLK_SRC					40
+#define CAM_CC_IFE_1_CPHY_RX_CLK				41
+#define CAM_CC_IFE_1_CSID_CLK					42
+#define CAM_CC_IFE_1_CSID_CLK_SRC				43
+#define CAM_CC_IFE_1_DSP_CLK					44
+#define CAM_CC_IFE_LITE_CLK					45
+#define CAM_CC_IFE_LITE_CLK_SRC					46
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK				47
+#define CAM_CC_IFE_LITE_CSID_CLK				48
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC				49
+#define CAM_CC_IPE_0_AHB_CLK					50
+#define CAM_CC_IPE_0_AREG_CLK					51
+#define CAM_CC_IPE_0_AXI_CLK					52
+#define CAM_CC_IPE_0_CLK					53
+#define CAM_CC_IPE_0_CLK_SRC					54
+#define CAM_CC_IPE_1_AHB_CLK					55
+#define CAM_CC_IPE_1_AREG_CLK					56
+#define CAM_CC_IPE_1_AXI_CLK					57
+#define CAM_CC_IPE_1_CLK					58
+#define CAM_CC_IPE_1_CLK_SRC					59
+#define CAM_CC_JPEG_CLK						60
+#define CAM_CC_JPEG_CLK_SRC					61
+#define CAM_CC_LRME_CLK						62
+#define CAM_CC_LRME_CLK_SRC					63
+#define CAM_CC_MCLK0_CLK					64
+#define CAM_CC_MCLK0_CLK_SRC					65
+#define CAM_CC_MCLK1_CLK					66
+#define CAM_CC_MCLK1_CLK_SRC					67
+#define CAM_CC_MCLK2_CLK					68
+#define CAM_CC_MCLK2_CLK_SRC					69
+#define CAM_CC_MCLK3_CLK					70
+#define CAM_CC_MCLK3_CLK_SRC					71
+#define CAM_CC_PLL0						72
+#define CAM_CC_PLL0_OUT_EVEN					73
+#define CAM_CC_PLL0_OUT_MAIN					74
+#define CAM_CC_PLL0_OUT_ODD					75
+#define CAM_CC_PLL0_OUT_TEST					76
+#define CAM_CC_PLL1						77
+#define CAM_CC_PLL1_OUT_EVEN					78
+#define CAM_CC_PLL1_OUT_MAIN					79
+#define CAM_CC_PLL1_OUT_ODD					80
+#define CAM_CC_PLL1_OUT_TEST					81
+#define CAM_CC_PLL2						82
+#define CAM_CC_PLL2_OUT_EVEN					83
+#define CAM_CC_PLL2_OUT_MAIN					84
+#define CAM_CC_PLL2_OUT_ODD					85
+#define CAM_CC_PLL2_OUT_TEST					86
+#define CAM_CC_PLL3						87
+#define CAM_CC_PLL3_OUT_EVEN					88
+#define CAM_CC_PLL3_OUT_MAIN					89
+#define CAM_CC_PLL3_OUT_ODD					90
+#define CAM_CC_PLL3_OUT_TEST					91
+#define CAM_CC_PLL_TEST_CLK					92
+#define CAM_CC_SLOW_AHB_CLK_SRC					93
+#define CAM_CC_SOC_AHB_CLK					94
+#define CAM_CC_SPDM_BPS_CLK					95
+#define CAM_CC_SPDM_IFE_0_CLK					96
+#define CAM_CC_SPDM_IFE_0_CSID_CLK				97
+#define CAM_CC_SPDM_IPE_0_CLK					98
+#define CAM_CC_SPDM_IPE_1_CLK					99
+#define CAM_CC_SPDM_JPEG_CLK					100
+#define CAM_CC_SYS_TMR_CLK					101
+
+#define TITAN_CAM_CC_BPS_BCR					0
+#define TITAN_CAM_CC_CAMNOC_BCR					1
+#define TITAN_CAM_CC_CCI_BCR					2
+#define TITAN_CAM_CC_CPAS_BCR					3
+#define TITAN_CAM_CC_CSI0PHY_BCR				4
+#define TITAN_CAM_CC_CSI1PHY_BCR				5
+#define TITAN_CAM_CC_CSI2PHY_BCR				6
+#define TITAN_CAM_CC_FD_BCR					7
+#define TITAN_CAM_CC_ICP_BCR					8
+#define TITAN_CAM_CC_IFE_0_BCR					9
+#define TITAN_CAM_CC_IFE_1_BCR					10
+#define TITAN_CAM_CC_IFE_LITE_BCR				11
+#define TITAN_CAM_CC_IPE_0_BCR					12
+#define TITAN_CAM_CC_IPE_1_BCR					13
+#define TITAN_CAM_CC_JPEG_BCR					14
+#define TITAN_CAM_CC_LRME_BCR					15
+#define TITAN_CAM_CC_MCLK0_BCR					16
+#define TITAN_CAM_CC_MCLK1_BCR					17
+#define TITAN_CAM_CC_MCLK2_BCR					18
+#define TITAN_CAM_CC_MCLK3_BCR					19
+#define TITAN_CAM_CC_TITAN_TOP_BCR				20
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-skunk.h b/include/dt-bindings/clock/qcom,dispcc-skunk.h
new file mode 100644
index 0000000..835ebcb
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-skunk.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_DISP_CC_SKUNK_H
+#define _DT_BINDINGS_CLK_MSM_DISP_CC_SKUNK_H
+
+#define DISP_CC_DEBUG_CLK					0
+#define DISP_CC_MDSS_AHB_CLK					1
+#define DISP_CC_MDSS_AXI_CLK					2
+#define DISP_CC_MDSS_BYTE0_CLK					3
+#define DISP_CC_MDSS_BYTE0_CLK_SRC				4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK				5
+#define DISP_CC_MDSS_BYTE1_CLK					6
+#define DISP_CC_MDSS_BYTE1_CLK_SRC				7
+#define DISP_CC_MDSS_BYTE1_INTF_CLK				8
+#define DISP_CC_MDSS_DP_AUX_CLK					9
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC				10
+#define DISP_CC_MDSS_DP_CRYPTO_CLK				11
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC				12
+#define DISP_CC_MDSS_DP_LINK_CLK				13
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC				14
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK				15
+#define DISP_CC_MDSS_DP_PIXEL1_CLK				16
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC				17
+#define DISP_CC_MDSS_DP_PIXEL_CLK				18
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC				19
+#define DISP_CC_MDSS_ESC0_CLK					20
+#define DISP_CC_MDSS_ESC0_CLK_SRC				21
+#define DISP_CC_MDSS_ESC1_CLK					22
+#define DISP_CC_MDSS_ESC1_CLK_SRC				23
+#define DISP_CC_MDSS_MDP_CLK					24
+#define DISP_CC_MDSS_MDP_CLK_SRC				25
+#define DISP_CC_MDSS_MDP_LUT_CLK				26
+#define DISP_CC_MDSS_PCLK0_CLK					27
+#define DISP_CC_MDSS_PCLK0_CLK_SRC				28
+#define DISP_CC_MDSS_PCLK1_CLK					29
+#define DISP_CC_MDSS_PCLK1_CLK_SRC				30
+#define DISP_CC_MDSS_QDSS_AT_CLK				31
+#define DISP_CC_MDSS_QDSS_TSCTR_DIV8_CLK			32
+#define DISP_CC_MDSS_ROT_CLK					33
+#define DISP_CC_MDSS_ROT_CLK_SRC				34
+#define DISP_CC_MDSS_RSCC_AHB_CLK				35
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK				36
+#define DISP_CC_MDSS_SPDM_DEBUG_CLK				37
+#define DISP_CC_MDSS_SPDM_DP_CRYPTO_CLK				38
+#define DISP_CC_MDSS_SPDM_DP_PIXEL1_CLK				39
+#define DISP_CC_MDSS_SPDM_DP_PIXEL_CLK				40
+#define DISP_CC_MDSS_SPDM_MDP_CLK				41
+#define DISP_CC_MDSS_SPDM_PCLK0_CLK				42
+#define DISP_CC_MDSS_SPDM_PCLK1_CLK				43
+#define DISP_CC_MDSS_SPDM_ROT_CLK				44
+#define DISP_CC_MDSS_VSYNC_CLK					45
+#define DISP_CC_MDSS_VSYNC_CLK_SRC				46
+#define DISP_CC_PLL0						47
+#define DISP_CC_PLL0_OUT_EVEN					48
+#define DISP_CC_PLL0_OUT_MAIN					49
+#define DISP_CC_PLL0_OUT_ODD					50
+#define DISP_CC_PLL0_OUT_TEST					51
+
+#define DISP_CC_DISP_CC_MDSS_CORE_BCR				0
+#define DISP_CC_DISP_CC_MDSS_GCC_CLOCKS_BCR			1
+#define DISP_CC_DISP_CC_MDSS_RSCC_BCR				2
+#define DISP_CC_DISP_CC_MDSS_SPDM_BCR				3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-skunk.h b/include/dt-bindings/clock/qcom,gcc-skunk.h
new file mode 100644
index 0000000..7c4a93c
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-skunk.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GCC_SKUNK_H
+#define _DT_BINDINGS_CLK_MSM_GCC_SKUNK_H
+
+/* GCC clocks */
+#define GCC_AGGRE_NOC_PCIE_TBU_CLK				0
+#define GCC_AGGRE_UFS_CARD_AXI_CLK				1
+#define GCC_AGGRE_UFS_PHY_AXI_CLK				2
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK				3
+#define GCC_AGGRE_USB3_SEC_AXI_CLK				4
+#define GCC_BOOT_ROM_AHB_CLK					5
+#define GCC_CAMERA_AHB_CLK					6
+#define GCC_CAMERA_AXI_CLK					7
+#define GCC_CAMERA_XO_CLK					8
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK				9
+#define GCC_CFG_NOC_USB3_SEC_AXI_CLK				10
+#define GCC_CPUSS_AHB_CLK					11
+#define GCC_CPUSS_AHB_CLK_SRC					12
+#define GCC_CPUSS_DVM_BUS_CLK					13
+#define GCC_CPUSS_GNOC_CLK					14
+#define GCC_CPUSS_RBCPR_CLK					15
+#define GCC_CPUSS_RBCPR_CLK_SRC					16
+#define GCC_CXO_TX1_CLKREF_CLK					17
+#define GCC_DDRSS_GPU_AXI_CLK					18
+#define GCC_DISP_AHB_CLK					19
+#define GCC_DISP_AXI_CLK					20
+#define GCC_DISP_GPLL0_CLK_SRC					21
+#define GCC_DISP_GPLL0_DIV_CLK_SRC				22
+#define GCC_DISP_XO_CLK						23
+#define GCC_GP1_CLK						24
+#define GCC_GP1_CLK_SRC						25
+#define GCC_GP2_CLK						26
+#define GCC_GP2_CLK_SRC						27
+#define GCC_GP3_CLK						28
+#define GCC_GP3_CLK_SRC						29
+#define GCC_GPU_CFG_AHB_CLK					30
+#define GCC_GPU_GPLL0_CLK_SRC					31
+#define GCC_GPU_GPLL0_DIV_CLK_SRC				32
+#define GCC_GPU_MEMNOC_GFX_CLK					33
+#define GCC_GPU_SNOC_DVM_GFX_CLK				34
+#define GCC_MMSS_QM_AHB_CLK					35
+#define GCC_MMSS_QM_CORE_CLK					36
+#define GCC_MMSS_QM_CORE_CLK_SRC				37
+#define GCC_MSS_AXIS2_CLK					38
+#define GCC_MSS_CFG_AHB_CLK					39
+#define GCC_MSS_GPLL0_DIV_CLK_SRC				40
+#define GCC_MSS_MFAB_AXIS_CLK					41
+#define GCC_MSS_Q6_MEMNOC_AXI_CLK				42
+#define GCC_MSS_SNOC_AXI_CLK					43
+#define GCC_PCIE_0_AUX_CLK					44
+#define GCC_PCIE_0_AUX_CLK_SRC					45
+#define GCC_PCIE_0_CFG_AHB_CLK					46
+#define GCC_PCIE_0_CLKREF_CLK					47
+#define GCC_PCIE_0_MSTR_AXI_CLK					48
+#define GCC_PCIE_0_PIPE_CLK					49
+#define GCC_PCIE_0_SLV_AXI_CLK					50
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK				51
+#define GCC_PCIE_1_AUX_CLK					52
+#define GCC_PCIE_1_AUX_CLK_SRC					53
+#define GCC_PCIE_1_CFG_AHB_CLK					54
+#define GCC_PCIE_1_CLKREF_CLK					55
+#define GCC_PCIE_1_MSTR_AXI_CLK					56
+#define GCC_PCIE_1_PIPE_CLK					57
+#define GCC_PCIE_1_SLV_AXI_CLK					58
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK				59
+#define GCC_PCIE_PHY_AUX_CLK					60
+#define GCC_PCIE_PHY_REFGEN_CLK					61
+#define GCC_PCIE_PHY_REFGEN_CLK_SRC				62
+#define GCC_PDM2_CLK						63
+#define GCC_PDM2_CLK_SRC					64
+#define GCC_PDM_AHB_CLK						65
+#define GCC_PDM_XO4_CLK						66
+#define GCC_PRNG_AHB_CLK					67
+#define GCC_QMIP_CAMERA_AHB_CLK					68
+#define GCC_QMIP_DISP_AHB_CLK					69
+#define GCC_QMIP_VIDEO_AHB_CLK					70
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK				71
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC				72
+#define GCC_QUPV3_WRAP0_CORE_CLK				73
+#define GCC_QUPV3_WRAP0_S0_CLK					74
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				75
+#define GCC_QUPV3_WRAP0_S1_CLK					76
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				77
+#define GCC_QUPV3_WRAP0_S2_CLK					78
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				79
+#define GCC_QUPV3_WRAP0_S3_CLK					80
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				81
+#define GCC_QUPV3_WRAP0_S4_CLK					82
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				83
+#define GCC_QUPV3_WRAP0_S5_CLK					84
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				85
+#define GCC_QUPV3_WRAP0_S6_CLK					86
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC				87
+#define GCC_QUPV3_WRAP0_S7_CLK					88
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC				89
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK				90
+#define GCC_QUPV3_WRAP1_CORE_CLK				91
+#define GCC_QUPV3_WRAP1_S0_CLK					92
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				93
+#define GCC_QUPV3_WRAP1_S1_CLK					94
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				95
+#define GCC_QUPV3_WRAP1_S2_CLK					96
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				97
+#define GCC_QUPV3_WRAP1_S3_CLK					98
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				99
+#define GCC_QUPV3_WRAP1_S4_CLK					100
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				101
+#define GCC_QUPV3_WRAP1_S5_CLK					102
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				103
+#define GCC_QUPV3_WRAP1_S6_CLK					104
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC				105
+#define GCC_QUPV3_WRAP1_S7_CLK					106
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC				107
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				108
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				109
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				110
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				111
+#define GCC_RX1_USB2_CLKREF_CLK					112
+#define GCC_RX2_QLINK_CLKREF_CLK				113
+#define GCC_RX3_MODEM_CLKREF_CLK				114
+#define GCC_SDCC2_AHB_CLK					115
+#define GCC_SDCC2_APPS_CLK					116
+#define GCC_SDCC2_APPS_CLK_SRC					117
+#define GCC_SDCC4_AHB_CLK					118
+#define GCC_SDCC4_APPS_CLK					119
+#define GCC_SDCC4_APPS_CLK_SRC					120
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				121
+#define GCC_TSIF_AHB_CLK					122
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK				123
+#define GCC_TSIF_REF_CLK					124
+#define GCC_TSIF_REF_CLK_SRC					125
+#define GCC_UFS_CARD_AHB_CLK					126
+#define GCC_UFS_CARD_AXI_CLK					127
+#define GCC_UFS_CARD_AXI_CLK_SRC				128
+#define GCC_UFS_CARD_CLKREF_CLK					129
+#define GCC_UFS_CARD_ICE_CORE_CLK				130
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				131
+#define GCC_UFS_CARD_PHY_AUX_CLK				132
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				133
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				134
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				135
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				136
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK				137
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			138
+#define GCC_UFS_MEM_CLKREF_CLK					139
+#define GCC_UFS_PHY_AHB_CLK					140
+#define GCC_UFS_PHY_AXI_CLK					141
+#define GCC_UFS_PHY_AXI_CLK_SRC					142
+#define GCC_UFS_PHY_ICE_CORE_CLK				143
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				144
+#define GCC_UFS_PHY_PHY_AUX_CLK					145
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				146
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				147
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				148
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				149
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				150
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				151
+#define GCC_USB30_PRIM_MASTER_CLK				152
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				153
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				154
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			155
+#define GCC_USB30_PRIM_SLEEP_CLK				156
+#define GCC_USB30_SEC_MASTER_CLK				157
+#define GCC_USB30_SEC_MASTER_CLK_SRC				158
+#define GCC_USB30_SEC_MOCK_UTMI_CLK				159
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				160
+#define GCC_USB30_SEC_SLEEP_CLK					161
+#define GCC_USB3_PRIM_CLKREF_CLK				162
+#define GCC_USB3_PRIM_PHY_AUX_CLK				163
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				164
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				165
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				166
+#define GCC_USB3_SEC_CLKREF_CLK					167
+#define GCC_USB3_SEC_PHY_AUX_CLK				168
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				169
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				170
+#define GCC_USB3_SEC_PHY_PIPE_CLK				171
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				172
+#define GCC_VIDEO_AHB_CLK					173
+#define GCC_VIDEO_AXI_CLK					174
+#define GCC_VIDEO_XO_CLK					175
+#define GPLL0							176
+#define GPLL0_OUT_EVEN						177
+#define GPLL0_OUT_MAIN						178
+#define GPLL1							179
+#define GPLL1_OUT_MAIN						180
+
+/* RPMh controlled clocks */
+#define RPMH_CXO_CLK						0
+#define RPMH_CXO_A_CLK						1
+#define RPMH_QDSS_CLK						2
+#define RPMH_QDSS_A_CLK						3
+
+/* GCC reset clocks */
+#define GCC_GPU_BCR						0
+#define GCC_MMSS_BCR						1
+#define GCC_PCIE_0_BCR						2
+#define GCC_PCIE_1_BCR						3
+#define GCC_PCIE_PHY_BCR					4
+#define GCC_PDM_BCR						5
+#define GCC_PRNG_BCR						6
+#define GCC_QUPV3_WRAPPER_0_BCR					7
+#define GCC_QUPV3_WRAPPER_1_BCR					8
+#define GCC_SDCC2_BCR						9
+#define GCC_SDCC4_BCR						10
+#define GCC_TSIF_BCR						11
+#define GCC_UFS_CARD_BCR					12
+#define GCC_UFS_PHY_BCR						13
+#define GCC_USB30_PRIM_BCR					14
+#define GCC_USB30_SEC_BCR					15
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR				16
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-skunk.h b/include/dt-bindings/clock/qcom,gpucc-skunk.h
new file mode 100644
index 0000000..97a1014
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-skunk.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_GPU_CC_SKUNK_H
+#define _DT_BINDINGS_CLK_MSM_GPU_CC_SKUNK_H
+
+#define GPU_CC_ACD_AHB_CLK					0
+#define GPU_CC_ACD_CXO_CLK					1
+#define GPU_CC_AHB_CLK						2
+#define GPU_CC_CRC_AHB_CLK					3
+#define GPU_CC_CX_APB_CLK					4
+#define GPU_CC_CX_GFX3D_CLK					5
+#define GPU_CC_CX_GFX3D_SLV_CLK					6
+#define GPU_CC_CX_GMU_CLK					7
+#define GPU_CC_CX_QDSS_AT_CLK					8
+#define GPU_CC_CX_QDSS_TRIG_CLK					9
+#define GPU_CC_CX_QDSS_TSCTR_CLK				10
+#define GPU_CC_CX_SNOC_DVM_CLK					11
+#define GPU_CC_CXO_AON_CLK					12
+#define GPU_CC_CXO_CLK						13
+#define GPU_CC_DEBUG_CLK					14
+#define GPU_CC_GX_CXO_CLK					15
+#define GPU_CC_GX_GMU_CLK					16
+#define GPU_CC_GX_QDSS_TSCTR_CLK				17
+#define GPU_CC_GX_VSENSE_CLK					18
+#define GPU_CC_PLL0						19
+#define GPU_CC_PLL0_OUT_EVEN					20
+#define GPU_CC_PLL0_OUT_MAIN					21
+#define GPU_CC_PLL0_OUT_ODD					22
+#define GPU_CC_PLL0_OUT_TEST					23
+#define GPU_CC_PLL1						24
+#define GPU_CC_PLL1_OUT_EVEN					25
+#define GPU_CC_PLL1_OUT_MAIN					26
+#define GPU_CC_PLL1_OUT_ODD					27
+#define GPU_CC_PLL1_OUT_TEST					28
+#define GPU_CC_PLL_TEST_CLK					29
+#define GPU_CC_RBCPR_AHB_CLK					30
+#define GPU_CC_RBCPR_CLK					31
+#define GPU_CC_RBCPR_CLK_SRC					32
+#define GPU_CC_SLEEP_CLK					33
+#define GPU_CC_SPDM_GX_GFX3D_DIV_CLK				34
+
+#define GPUCC_GPU_CC_ACD_BCR					0
+#define GPUCC_GPU_CC_CX_BCR					1
+#define GPUCC_GPU_CC_GFX3D_AON_BCR				2
+#define GPUCC_GPU_CC_GMU_BCR					3
+#define GPUCC_GPU_CC_GX_BCR					4
+#define GPUCC_GPU_CC_RBCPR_BCR					5
+#define GPUCC_GPU_CC_SPDM_BCR					6
+#define GPUCC_GPU_CC_XO_BCR					7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-skunk.h b/include/dt-bindings/clock/qcom,videocc-skunk.h
new file mode 100644
index 0000000..695b589
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-skunk.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_VIDEO_CC_SKUNK_H
+#define _DT_BINDINGS_CLK_MSM_VIDEO_CC_SKUNK_H
+
+#define VIDEO_CC_APB_CLK					0
+#define VIDEO_CC_AT_CLK						1
+#define VIDEO_CC_DEBUG_CLK					2
+#define VIDEO_CC_QDSS_TRIG_CLK					3
+#define VIDEO_CC_QDSS_TSCTR_DIV8_CLK				4
+#define VIDEO_CC_VCODEC0_AXI_CLK				5
+#define VIDEO_CC_VCODEC0_CORE_CLK				6
+#define VIDEO_CC_VCODEC1_AXI_CLK				7
+#define VIDEO_CC_VCODEC1_CORE_CLK				8
+#define VIDEO_CC_VENUS_AHB_CLK					9
+#define VIDEO_CC_VENUS_CLK_SRC					10
+#define VIDEO_CC_VENUS_CTL_AXI_CLK				11
+#define VIDEO_CC_VENUS_CTL_CORE_CLK				12
+#define VIDEO_PLL0						13
+#define VIDEO_PLL0_OUT_EVEN					14
+#define VIDEO_PLL0_OUT_MAIN					15
+#define VIDEO_PLL0_OUT_ODD					16
+#define VIDEO_PLL0_OUT_TEST					17
+
+#define VIDEO_CC_INTERFACE_BCR					0
+#define VIDEO_CC_VCODEC0_BCR					1
+#define VIDEO_CC_VCODEC1_BCR					2
+#define VIDEO_CC_VENUS_BCR					3
+
+#endif
diff --git a/include/dt-bindings/msm/msm-bus-ids.h b/include/dt-bindings/msm/msm-bus-ids.h
new file mode 100644
index 0000000..d27b138
--- /dev/null
+++ b/include/dt-bindings/msm/msm-bus-ids.h
@@ -0,0 +1,866 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_IDS_H
+#define __MSM_BUS_IDS_H
+
+/* Aggregation types */
+#define AGG_SCHEME_NONE	0
+#define AGG_SCHEME_LEG	1
+#define AGG_SCHEME_1	2
+
+/* Topology related enums */
+#define	MSM_BUS_FAB_DEFAULT 0
+#define	MSM_BUS_FAB_APPSS 0
+#define	MSM_BUS_FAB_SYSTEM 1024
+#define	MSM_BUS_FAB_MMSS 2048
+#define	MSM_BUS_FAB_SYSTEM_FPB 3072
+#define	MSM_BUS_FAB_CPSS_FPB 4096
+
+#define	MSM_BUS_FAB_BIMC 0
+#define	MSM_BUS_FAB_SYS_NOC 1024
+#define	MSM_BUS_FAB_MMSS_NOC 2048
+#define	MSM_BUS_FAB_OCMEM_NOC 3072
+#define	MSM_BUS_FAB_PERIPH_NOC 4096
+#define	MSM_BUS_FAB_CONFIG_NOC 5120
+#define	MSM_BUS_FAB_OCMEM_VNOC 6144
+#define	MSM_BUS_FAB_MMSS_AHB 2049
+#define	MSM_BUS_FAB_A0_NOC 6145
+#define	MSM_BUS_FAB_A1_NOC 6146
+#define	MSM_BUS_FAB_A2_NOC 6147
+#define	MSM_BUS_FAB_GNOC 6148
+#define	MSM_BUS_FAB_CR_VIRT 6149
+
+#define	MSM_BUS_BCM_MC0 0
+
+#define	MSM_BUS_MASTER_FIRST 1
+#define	MSM_BUS_MASTER_AMPSS_M0 1
+#define	MSM_BUS_MASTER_AMPSS_M1 2
+#define	MSM_BUS_APPSS_MASTER_FAB_MMSS 3
+#define	MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
+#define	MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
+#define	MSM_BUS_MASTER_SPS 6
+#define	MSM_BUS_MASTER_ADM_PORT0 7
+#define	MSM_BUS_MASTER_ADM_PORT1 8
+#define	MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
+#define	MSM_BUS_MASTER_ADM1_PORT1 10
+#define	MSM_BUS_MASTER_LPASS_PROC 11
+#define	MSM_BUS_MASTER_MSS_PROCI 12
+#define	MSM_BUS_MASTER_MSS_PROCD 13
+#define	MSM_BUS_MASTER_MSS_MDM_PORT0 14
+#define	MSM_BUS_MASTER_LPASS 15
+#define	MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
+#define	MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
+#define	MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
+#define	MSM_BUS_MASTER_ADM1_CI 19
+#define	MSM_BUS_MASTER_ADM0_CI 20
+#define	MSM_BUS_MASTER_MSS_MDM_PORT1 21
+#define	MSM_BUS_MASTER_MDP_PORT0 22
+#define	MSM_BUS_MASTER_MDP_PORT1 23
+#define	MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
+#define	MSM_BUS_MASTER_ROTATOR 25
+#define	MSM_BUS_MASTER_GRAPHICS_3D 26
+#define	MSM_BUS_MASTER_JPEG_DEC 27
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
+#define	MSM_BUS_MASTER_VFE 29
+#define	MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
+#define	MSM_BUS_MASTER_VPE 30
+#define	MSM_BUS_MASTER_JPEG_ENC 31
+#define	MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
+#define	MSM_BUS_MMSS_MASTER_APPS_FAB 33
+#define	MSM_BUS_MASTER_HD_CODEC_PORT0 34
+#define	MSM_BUS_MASTER_HD_CODEC_PORT1 35
+#define	MSM_BUS_MASTER_SPDM 36
+#define	MSM_BUS_MASTER_RPM 37
+#define	MSM_BUS_MASTER_MSS 38
+#define	MSM_BUS_MASTER_RIVA 39
+#define	MSM_BUS_MASTER_SNOC_VMEM 40
+#define	MSM_BUS_MASTER_MSS_SW_PROC 41
+#define	MSM_BUS_MASTER_MSS_FW_PROC 42
+#define	MSM_BUS_MASTER_HMSS 43
+#define	MSM_BUS_MASTER_GSS_NAV 44
+#define	MSM_BUS_MASTER_PCIE 45
+#define	MSM_BUS_MASTER_SATA 46
+#define	MSM_BUS_MASTER_CRYPTO 47
+#define	MSM_BUS_MASTER_VIDEO_CAP 48
+#define	MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
+#define	MSM_BUS_MASTER_VIDEO_ENC 50
+#define	MSM_BUS_MASTER_VIDEO_DEC 51
+#define	MSM_BUS_MASTER_LPASS_AHB 52
+#define	MSM_BUS_MASTER_QDSS_BAM 53
+#define	MSM_BUS_MASTER_SNOC_CFG 54
+#define	MSM_BUS_MASTER_CRYPTO_CORE0 55
+#define	MSM_BUS_MASTER_CRYPTO_CORE1 56
+#define	MSM_BUS_MASTER_MSS_NAV 57
+#define	MSM_BUS_MASTER_OCMEM_DMA 58
+#define	MSM_BUS_MASTER_WCSS 59
+#define	MSM_BUS_MASTER_QDSS_ETR 60
+#define	MSM_BUS_MASTER_USB3 61
+#define	MSM_BUS_MASTER_JPEG 62
+#define	MSM_BUS_MASTER_VIDEO_P0 63
+#define	MSM_BUS_MASTER_VIDEO_P1 64
+#define	MSM_BUS_MASTER_MSS_PROC 65
+#define	MSM_BUS_MASTER_JPEG_OCMEM 66
+#define	MSM_BUS_MASTER_MDP_OCMEM 67
+#define	MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
+#define	MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
+#define	MSM_BUS_MASTER_VFE_OCMEM 70
+#define	MSM_BUS_MASTER_CNOC_ONOC_CFG 71
+#define	MSM_BUS_MASTER_RPM_INST 72
+#define	MSM_BUS_MASTER_RPM_DATA 73
+#define	MSM_BUS_MASTER_RPM_SYS 74
+#define	MSM_BUS_MASTER_DEHR 75
+#define	MSM_BUS_MASTER_QDSS_DAP 76
+#define	MSM_BUS_MASTER_TIC 77
+#define	MSM_BUS_MASTER_SDCC_1 78
+#define	MSM_BUS_MASTER_SDCC_3 79
+#define	MSM_BUS_MASTER_SDCC_4 80
+#define	MSM_BUS_MASTER_SDCC_2 81
+#define	MSM_BUS_MASTER_TSIF 82
+#define	MSM_BUS_MASTER_BAM_DMA 83
+#define	MSM_BUS_MASTER_BLSP_2 84
+#define	MSM_BUS_MASTER_USB_HSIC 85
+#define	MSM_BUS_MASTER_BLSP_1 86
+#define	MSM_BUS_MASTER_USB_HS 87
+#define	MSM_BUS_MASTER_PNOC_CFG 88
+#define	MSM_BUS_MASTER_V_OCMEM_GFX3D 89
+#define	MSM_BUS_MASTER_IPA 90
+#define	MSM_BUS_MASTER_QPIC 91
+#define	MSM_BUS_MASTER_MDPE 92
+#define	MSM_BUS_MASTER_USB_HS2 93
+#define	MSM_BUS_MASTER_VPU 94
+#define	MSM_BUS_MASTER_UFS 95
+#define	MSM_BUS_MASTER_BCAST 96
+#define	MSM_BUS_MASTER_CRYPTO_CORE2 97
+#define	MSM_BUS_MASTER_EMAC 98
+#define	MSM_BUS_MASTER_VPU_1 99
+#define	MSM_BUS_MASTER_PCIE_1 100
+#define	MSM_BUS_MASTER_USB3_1 101
+#define	MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
+#define	MSM_BUS_MASTER_CNOC_MNOC_CFG 103
+#define	MSM_BUS_MASTER_TCU_0 104
+#define	MSM_BUS_MASTER_TCU_1 105
+#define	MSM_BUS_MASTER_CPP 106
+#define	MSM_BUS_MASTER_AUDIO 107
+#define	MSM_BUS_MASTER_PCIE_2 108
+#define	MSM_BUS_MASTER_VFE1 109
+#define	MSM_BUS_MASTER_XM_USB_HS1 110
+#define	MSM_BUS_MASTER_PCNOC_BIMC_1 111
+#define	MSM_BUS_MASTER_BIMC_PCNOC   112
+#define	MSM_BUS_MASTER_XI_USB_HSIC  113
+#define	MSM_BUS_MASTER_SGMII	    114
+#define	MSM_BUS_SPMI_FETCHER 115
+#define	MSM_BUS_MASTER_GNOC_BIMC 116
+#define	MSM_BUS_MASTER_CRVIRT_A2NOC 117
+#define	MSM_BUS_MASTER_CNOC_A2NOC 118
+#define	MSM_BUS_MASTER_WLAN 119
+#define	MSM_BUS_MASTER_MSS_CE 120
+#define	MSM_BUS_MASTER_MASTER_LAST 121
+
+#define	MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
+
+#define	MSM_BUS_SNOC_MM_INT_0 10000
+#define	MSM_BUS_SNOC_MM_INT_1 10001
+#define	MSM_BUS_SNOC_MM_INT_2 10002
+#define	MSM_BUS_SNOC_MM_INT_BIMC 10003
+#define	MSM_BUS_SNOC_INT_0 10004
+#define	MSM_BUS_SNOC_INT_1 10005
+#define	MSM_BUS_SNOC_INT_BIMC 10006
+#define	MSM_BUS_SNOC_BIMC_0_MAS 10007
+#define	MSM_BUS_SNOC_BIMC_1_MAS 10008
+#define	MSM_BUS_SNOC_QDSS_INT 10009
+#define	MSM_BUS_PNOC_SNOC_MAS 10010
+#define	MSM_BUS_PNOC_SNOC_SLV 10011
+#define	MSM_BUS_PNOC_INT_0 10012
+#define	MSM_BUS_PNOC_INT_1 10013
+#define	MSM_BUS_PNOC_M_0 10014
+#define	MSM_BUS_PNOC_M_1 10015
+#define	MSM_BUS_BIMC_SNOC_MAS 10016
+#define	MSM_BUS_BIMC_SNOC_SLV 10017
+#define	MSM_BUS_PNOC_SLV_0 10018
+#define	MSM_BUS_PNOC_SLV_1 10019
+#define	MSM_BUS_PNOC_SLV_2 10020
+#define	MSM_BUS_PNOC_SLV_3 10021
+#define	MSM_BUS_PNOC_SLV_4 10022
+#define	MSM_BUS_PNOC_SLV_8 10023
+#define	MSM_BUS_PNOC_SLV_9 10024
+#define	MSM_BUS_SNOC_BIMC_0_SLV 10025
+#define	MSM_BUS_SNOC_BIMC_1_SLV 10026
+#define	MSM_BUS_MNOC_BIMC_MAS 10027
+#define	MSM_BUS_MNOC_BIMC_SLV 10028
+#define	MSM_BUS_BIMC_MNOC_MAS 10029
+#define	MSM_BUS_BIMC_MNOC_SLV 10030
+#define	MSM_BUS_SNOC_BIMC_MAS 10031
+#define	MSM_BUS_SNOC_BIMC_SLV 10032
+#define	MSM_BUS_CNOC_SNOC_MAS 10033
+#define	MSM_BUS_CNOC_SNOC_SLV 10034
+#define	MSM_BUS_SNOC_CNOC_MAS 10035
+#define	MSM_BUS_SNOC_CNOC_SLV 10036
+#define	MSM_BUS_OVNOC_SNOC_MAS 10037
+#define	MSM_BUS_OVNOC_SNOC_SLV 10038
+#define	MSM_BUS_SNOC_OVNOC_MAS 10039
+#define	MSM_BUS_SNOC_OVNOC_SLV 10040
+#define	MSM_BUS_SNOC_PNOC_MAS 10041
+#define	MSM_BUS_SNOC_PNOC_SLV 10042
+#define	MSM_BUS_BIMC_INT_APPS_EBI 10043
+#define	MSM_BUS_BIMC_INT_APPS_SNOC 10044
+#define	MSM_BUS_SNOC_BIMC_2_MAS 10045
+#define	MSM_BUS_SNOC_BIMC_2_SLV 10046
+#define	MSM_BUS_PNOC_SLV_5	10047
+#define	MSM_BUS_PNOC_SLV_7	10048
+#define	MSM_BUS_PNOC_INT_2 10049
+#define	MSM_BUS_PNOC_INT_3 10050
+#define	MSM_BUS_PNOC_INT_4 10051
+#define	MSM_BUS_PNOC_INT_5 10052
+#define	MSM_BUS_PNOC_INT_6 10053
+#define	MSM_BUS_PNOC_INT_7 10054
+#define	MSM_BUS_BIMC_SNOC_1_MAS 10055
+#define	MSM_BUS_BIMC_SNOC_1_SLV 10056
+#define	MSM_BUS_PNOC_A1NOC_MAS 10057
+#define	MSM_BUS_PNOC_A1NOC_SLV 10058
+#define	MSM_BUS_CNOC_A1NOC_MAS 10059
+#define	MSM_BUS_A0NOC_SNOC_MAS 10060
+#define	MSM_BUS_A0NOC_SNOC_SLV 10061
+#define	MSM_BUS_A1NOC_SNOC_SLV 10062
+#define	MSM_BUS_A1NOC_SNOC_MAS 10063
+#define	MSM_BUS_A2NOC_SNOC_MAS 10064
+#define	MSM_BUS_A2NOC_SNOC_SLV 10065
+#define	MSM_BUS_SNOC_INT_2 10066
+#define	MSM_BUS_A0NOC_QDSS_INT	10067
+#define	MSM_BUS_INT_LAST 10068
+
+#define	MSM_BUS_INT_TEST_ID	20000
+#define	MSM_BUS_INT_TEST_LAST	20050
+
+#define	MSM_BUS_SLAVE_FIRST 512
+#define	MSM_BUS_SLAVE_EBI_CH0 512
+#define	MSM_BUS_SLAVE_EBI_CH1 513
+#define	MSM_BUS_SLAVE_AMPSS_L2 514
+#define	MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
+#define	MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
+#define	MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
+#define	MSM_BUS_SLAVE_SPS 518
+#define	MSM_BUS_SLAVE_SYSTEM_IMEM 519
+#define	MSM_BUS_SLAVE_AMPSS 520
+#define	MSM_BUS_SLAVE_MSS 521
+#define	MSM_BUS_SLAVE_LPASS 522
+#define	MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
+#define	MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
+#define	MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
+#define	MSM_BUS_SLAVE_CORESIGHT 526
+#define	MSM_BUS_SLAVE_RIVA 527
+#define	MSM_BUS_SLAVE_SMI 528
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS 529
+#define	MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
+#define	MSM_BUS_SLAVE_MM_IMEM 531
+#define	MSM_BUS_SLAVE_CRYPTO 532
+#define	MSM_BUS_SLAVE_SPDM 533
+#define	MSM_BUS_SLAVE_RPM 534
+#define	MSM_BUS_SLAVE_RPM_MSG_RAM 535
+#define	MSM_BUS_SLAVE_MPM 536
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
+#define	MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
+#define	MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
+#define	MSM_BUS_SLAVE_GSBI1_UART 542
+#define	MSM_BUS_SLAVE_GSBI2_UART 543
+#define	MSM_BUS_SLAVE_GSBI3_UART 544
+#define	MSM_BUS_SLAVE_GSBI4_UART 545
+#define	MSM_BUS_SLAVE_GSBI5_UART 546
+#define	MSM_BUS_SLAVE_GSBI6_UART 547
+#define	MSM_BUS_SLAVE_GSBI7_UART 548
+#define	MSM_BUS_SLAVE_GSBI8_UART 549
+#define	MSM_BUS_SLAVE_GSBI9_UART 550
+#define	MSM_BUS_SLAVE_GSBI10_UART 551
+#define	MSM_BUS_SLAVE_GSBI11_UART 552
+#define	MSM_BUS_SLAVE_GSBI12_UART 553
+#define	MSM_BUS_SLAVE_GSBI1_QUP 554
+#define	MSM_BUS_SLAVE_GSBI2_QUP 555
+#define	MSM_BUS_SLAVE_GSBI3_QUP 556
+#define	MSM_BUS_SLAVE_GSBI4_QUP 557
+#define	MSM_BUS_SLAVE_GSBI5_QUP 558
+#define	MSM_BUS_SLAVE_GSBI6_QUP 559
+#define	MSM_BUS_SLAVE_GSBI7_QUP 560
+#define	MSM_BUS_SLAVE_GSBI8_QUP 561
+#define	MSM_BUS_SLAVE_GSBI9_QUP 562
+#define	MSM_BUS_SLAVE_GSBI10_QUP 563
+#define	MSM_BUS_SLAVE_GSBI11_QUP 564
+#define	MSM_BUS_SLAVE_GSBI12_QUP 565
+#define	MSM_BUS_SLAVE_EBI2_NAND 566
+#define	MSM_BUS_SLAVE_EBI2_CS0 567
+#define	MSM_BUS_SLAVE_EBI2_CS1 568
+#define	MSM_BUS_SLAVE_EBI2_CS2 569
+#define	MSM_BUS_SLAVE_EBI2_CS3 570
+#define	MSM_BUS_SLAVE_EBI2_CS4 571
+#define	MSM_BUS_SLAVE_EBI2_CS5 572
+#define	MSM_BUS_SLAVE_USB_FS1 573
+#define	MSM_BUS_SLAVE_USB_FS2 574
+#define	MSM_BUS_SLAVE_TSIF 575
+#define	MSM_BUS_SLAVE_MSM_TSSC 576
+#define	MSM_BUS_SLAVE_MSM_PDM 577
+#define	MSM_BUS_SLAVE_MSM_DIMEM 578
+#define	MSM_BUS_SLAVE_MSM_TCSR 579
+#define	MSM_BUS_SLAVE_MSM_PRNG 580
+#define	MSM_BUS_SLAVE_GSS 581
+#define	MSM_BUS_SLAVE_SATA 582
+#define	MSM_BUS_SLAVE_USB3 583
+#define	MSM_BUS_SLAVE_WCSS 584
+#define	MSM_BUS_SLAVE_OCIMEM 585
+#define	MSM_BUS_SLAVE_SNOC_OCMEM 586
+#define	MSM_BUS_SLAVE_SERVICE_SNOC 587
+#define	MSM_BUS_SLAVE_QDSS_STM 588
+#define	MSM_BUS_SLAVE_CAMERA_CFG 589
+#define	MSM_BUS_SLAVE_DISPLAY_CFG 590
+#define	MSM_BUS_SLAVE_OCMEM_CFG 591
+#define	MSM_BUS_SLAVE_CPR_CFG 592
+#define	MSM_BUS_SLAVE_CPR_XPU_CFG 593
+#define	MSM_BUS_SLAVE_MISC_CFG 594
+#define	MSM_BUS_SLAVE_MISC_XPU_CFG 595
+#define	MSM_BUS_SLAVE_VENUS_CFG 596
+#define	MSM_BUS_SLAVE_MISC_VENUS_CFG 597
+#define	MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
+#define	MSM_BUS_SLAVE_MMSS_CLK_CFG 599
+#define	MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
+#define	MSM_BUS_SLAVE_MNOC_MPU_CFG 601
+#define	MSM_BUS_SLAVE_ONOC_MPU_CFG 602
+#define	MSM_BUS_SLAVE_SERVICE_MNOC 603
+#define	MSM_BUS_SLAVE_OCMEM 604
+#define	MSM_BUS_SLAVE_SERVICE_ONOC 605
+#define	MSM_BUS_SLAVE_SDCC_1 606
+#define	MSM_BUS_SLAVE_SDCC_3 607
+#define	MSM_BUS_SLAVE_SDCC_2 608
+#define	MSM_BUS_SLAVE_SDCC_4 609
+#define	MSM_BUS_SLAVE_BAM_DMA 610
+#define	MSM_BUS_SLAVE_BLSP_2 611
+#define	MSM_BUS_SLAVE_USB_HSIC 612
+#define	MSM_BUS_SLAVE_BLSP_1 613
+#define	MSM_BUS_SLAVE_USB_HS 614
+#define	MSM_BUS_SLAVE_PDM 615
+#define	MSM_BUS_SLAVE_PERIPH_APU_CFG 616
+#define	MSM_BUS_SLAVE_PNOC_MPU_CFG 617
+#define	MSM_BUS_SLAVE_PRNG 618
+#define	MSM_BUS_SLAVE_SERVICE_PNOC 619
+#define	MSM_BUS_SLAVE_CLK_CTL 620
+#define	MSM_BUS_SLAVE_CNOC_MSS 621
+#define	MSM_BUS_SLAVE_SECURITY 622
+#define	MSM_BUS_SLAVE_TCSR 623
+#define	MSM_BUS_SLAVE_TLMM 624
+#define	MSM_BUS_SLAVE_CRYPTO_0_CFG 625
+#define	MSM_BUS_SLAVE_CRYPTO_1_CFG 626
+#define	MSM_BUS_SLAVE_IMEM_CFG 627
+#define	MSM_BUS_SLAVE_MESSAGE_RAM 628
+#define	MSM_BUS_SLAVE_BIMC_CFG 629
+#define	MSM_BUS_SLAVE_BOOT_ROM 630
+#define	MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
+#define	MSM_BUS_SLAVE_PMIC_ARB 632
+#define	MSM_BUS_SLAVE_SPDM_WRAPPER 633
+#define	MSM_BUS_SLAVE_DEHR_CFG 634
+#define	MSM_BUS_SLAVE_QDSS_CFG 635
+#define	MSM_BUS_SLAVE_RBCPR_CFG 636
+#define	MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
+#define	MSM_BUS_SLAVE_SNOC_MPU_CFG 638
+#define	MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
+#define	MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
+#define	MSM_BUS_SLAVE_PNOC_CFG 641
+#define	MSM_BUS_SLAVE_SNOC_CFG 642
+#define	MSM_BUS_SLAVE_EBI1_DLL_CFG 643
+#define	MSM_BUS_SLAVE_PHY_APU_CFG 644
+#define	MSM_BUS_SLAVE_EBI1_PHY_CFG 645
+#define	MSM_BUS_SLAVE_SERVICE_CNOC 646
+#define	MSM_BUS_SLAVE_IPS_CFG 647
+#define	MSM_BUS_SLAVE_QPIC 648
+#define	MSM_BUS_SLAVE_DSI_CFG 649
+#define	MSM_BUS_SLAVE_UFS_CFG 650
+#define	MSM_BUS_SLAVE_RBCPR_CX_CFG 651
+#define	MSM_BUS_SLAVE_RBCPR_MX_CFG 652
+#define	MSM_BUS_SLAVE_PCIE_CFG 653
+#define	MSM_BUS_SLAVE_USB_PHYS_CFG 654
+#define	MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
+#define	MSM_BUS_SLAVE_AVSYNC_CFG 656
+#define	MSM_BUS_SLAVE_CRYPTO_2_CFG 657
+#define	MSM_BUS_SLAVE_VPU_CFG 658
+#define	MSM_BUS_SLAVE_BCAST_CFG 659
+#define	MSM_BUS_SLAVE_KLM_CFG 660
+#define	MSM_BUS_SLAVE_GENI_IR_CFG 661
+#define	MSM_BUS_SLAVE_OCMEM_GFX 662
+#define	MSM_BUS_SLAVE_CATS_128 663
+#define	MSM_BUS_SLAVE_OCMEM_64 664
+#define	MSM_BUS_SLAVE_PCIE_0 665
+#define	MSM_BUS_SLAVE_PCIE_1 666
+#define	MSM_BUS_SLAVE_PCIE_0_CFG 667
+#define	MSM_BUS_SLAVE_PCIE_1_CFG 668
+#define	MSM_BUS_SLAVE_SRVC_MNOC 669
+#define	MSM_BUS_SLAVE_USB_HS2 670
+#define	MSM_BUS_SLAVE_AUDIO 671
+#define	MSM_BUS_SLAVE_TCU 672
+#define	MSM_BUS_SLAVE_APPSS 673
+#define	MSM_BUS_SLAVE_PCIE_PARF 674
+#define	MSM_BUS_SLAVE_USB3_PHY_CFG 675
+#define	MSM_BUS_SLAVE_IPA_CFG 676
+#define	MSM_BUS_SLAVE_A0NOC_SNOC 677
+#define	MSM_BUS_SLAVE_A1NOC_SNOC 678
+#define	MSM_BUS_SLAVE_A2NOC_SNOC 679
+#define	MSM_BUS_SLAVE_HMSS_L3 680
+#define	MSM_BUS_SLAVE_PIMEM_CFG 681
+#define	MSM_BUS_SLAVE_DCC_CFG 682
+#define	MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
+#define	MSM_BUS_SLAVE_PCIE_2_CFG 684
+#define	MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
+#define	MSM_BUS_SLAVE_A0NOC_CFG 686
+#define	MSM_BUS_SLAVE_A1NOC_CFG 687
+#define	MSM_BUS_SLAVE_A2NOC_CFG 688
+#define	MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
+#define	MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
+#define	MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
+#define	MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
+#define	MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
+#define	MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
+#define	MSM_BUS_SLAVE_MMAGIC_CFG 695
+#define	MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
+#define	MSM_BUS_SLAVE_SSC_CFG 697
+#define	MSM_BUS_SLAVE_DSA_CFG 698
+#define	MSM_BUS_SLAVE_DSA_MPU_CFG 699
+#define	MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
+#define	MSM_BUS_SLAVE_SMMU_CPP_CFG 701
+#define	MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
+#define	MSM_BUS_SLAVE_SMMU_MDP_CFG 703
+#define	MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
+#define	MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
+#define	MSM_BUS_SLAVE_SMMU_VFE_CFG 706
+#define	MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
+#define	MSM_BUS_SLAVE_VMEM_CFG 708
+#define	MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
+#define	MSM_BUS_SLAVE_VMEM 710
+#define	MSM_BUS_SLAVE_AHB2PHY 711
+#define	MSM_BUS_SLAVE_PIMEM 712
+#define	MSM_BUS_SLAVE_SNOC_VMEM 713
+#define	MSM_BUS_SLAVE_PCIE_2 714
+#define	MSM_BUS_SLAVE_RBCPR_MX 715
+#define	MSM_BUS_SLAVE_RBCPR_CX 716
+#define	MSM_BUS_SLAVE_BIMC_PCNOC 717
+#define	MSM_BUS_SLAVE_PCNOC_BIMC_1 718
+#define	MSM_BUS_SLAVE_SGMII 719
+#define	MSM_BUS_SLAVE_SPMI_FETCHER 720
+#define	MSM_BUS_PNOC_SLV_6 721
+#define	MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
+#define	MSM_BUS_SLAVE_WLAN 723
+#define	MSM_BUS_SLAVE_CRVIRT_A2NOC 724
+#define	MSM_BUS_SLAVE_CNOC_A2NOC 725
+#define	MSM_BUS_SLAVE_GLM 726
+#define	MSM_BUS_SLAVE_GNOC_BIMC 727
+#define	MSM_BUS_SLAVE_GNOC_SNOC 728
+#define	MSM_BUS_SLAVE_QM_CFG 729
+#define	MSM_BUS_SLAVE_TLMM_EAST 730
+#define	MSM_BUS_SLAVE_TLMM_NORTH 731
+#define	MSM_BUS_SLAVE_TLMM_WEST 732
+#define	MSM_BUS_SLAVE_SKL 733
+#define	MSM_BUS_SLAVE_LAST 734
+
+#define	MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM  MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
+#define	MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
+
+/*
+ * ID's used in RPM messages
+ */
+#define	ICBID_MASTER_APPSS_PROC 0
+#define	ICBID_MASTER_MSS_PROC 1
+#define	ICBID_MASTER_MNOC_BIMC 2
+#define	ICBID_MASTER_SNOC_BIMC 3
+#define	ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
+#define	ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
+#define	ICBID_MASTER_CNOC_MNOC_CFG 5
+#define	ICBID_MASTER_GFX3D 6
+#define	ICBID_MASTER_JPEG 7
+#define	ICBID_MASTER_MDP 8
+#define	ICBID_MASTER_MDP0 ICBID_MASTER_MDP
+#define	ICBID_MASTER_MDPS ICBID_MASTER_MDP
+#define	ICBID_MASTER_VIDEO 9
+#define	ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
+#define	ICBID_MASTER_VIDEO_P1 10
+#define	ICBID_MASTER_VFE 11
+#define	ICBID_MASTER_VFE0 ICBID_MASTER_VFE
+#define	ICBID_MASTER_CNOC_ONOC_CFG 12
+#define	ICBID_MASTER_JPEG_OCMEM 13
+#define	ICBID_MASTER_MDP_OCMEM 14
+#define	ICBID_MASTER_VIDEO_P0_OCMEM 15
+#define	ICBID_MASTER_VIDEO_P1_OCMEM 16
+#define	ICBID_MASTER_VFE_OCMEM 17
+#define	ICBID_MASTER_LPASS_AHB 18
+#define	ICBID_MASTER_QDSS_BAM 19
+#define	ICBID_MASTER_SNOC_CFG 20
+#define	ICBID_MASTER_BIMC_SNOC 21
+#define	ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
+#define	ICBID_MASTER_CNOC_SNOC 22
+#define	ICBID_MASTER_CRYPTO 23
+#define	ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
+#define	ICBID_MASTER_CRYPTO_CORE1 24
+#define	ICBID_MASTER_LPASS_PROC 25
+#define	ICBID_MASTER_MSS 26
+#define	ICBID_MASTER_MSS_NAV 27
+#define	ICBID_MASTER_OCMEM_DMA 28
+#define	ICBID_MASTER_PNOC_SNOC 29
+#define	ICBID_MASTER_WCSS 30
+#define	ICBID_MASTER_QDSS_ETR 31
+#define	ICBID_MASTER_USB3 32
+#define	ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
+#define	ICBID_MASTER_SDCC_1 33
+#define	ICBID_MASTER_SDCC_3 34
+#define	ICBID_MASTER_SDCC_2 35
+#define	ICBID_MASTER_SDCC_4 36
+#define	ICBID_MASTER_TSIF 37
+#define	ICBID_MASTER_BAM_DMA 38
+#define	ICBID_MASTER_BLSP_2 39
+#define	ICBID_MASTER_USB_HSIC 40
+#define	ICBID_MASTER_BLSP_1 41
+#define	ICBID_MASTER_USB_HS 42
+#define	ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
+#define	ICBID_MASTER_PNOC_CFG 43
+#define	ICBID_MASTER_SNOC_PNOC 44
+#define	ICBID_MASTER_RPM_INST 45
+#define	ICBID_MASTER_RPM_DATA 46
+#define	ICBID_MASTER_RPM_SYS 47
+#define	ICBID_MASTER_DEHR 48
+#define	ICBID_MASTER_QDSS_DAP 49
+#define	ICBID_MASTER_SPDM 50
+#define	ICBID_MASTER_TIC 51
+#define	ICBID_MASTER_SNOC_CNOC 52
+#define	ICBID_MASTER_GFX3D_OCMEM 53
+#define	ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
+#define	ICBID_MASTER_OVIRT_SNOC 54
+#define	ICBID_MASTER_SNOC_OVIRT 55
+#define	ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
+#define	ICBID_MASTER_ONOC_OVIRT 56
+#define	ICBID_MASTER_USB_HS2 57
+#define	ICBID_MASTER_QPIC 58
+#define	ICBID_MASTER_IPA 59
+#define	ICBID_MASTER_DSI 60
+#define	ICBID_MASTER_MDP1 61
+#define	ICBID_MASTER_MDPE ICBID_MASTER_MDP1
+#define	ICBID_MASTER_VPU_PROC 62
+#define	ICBID_MASTER_VPU 63
+#define	ICBID_MASTER_VPU0 ICBID_MASTER_VPU
+#define	ICBID_MASTER_CRYPTO_CORE2 64
+#define	ICBID_MASTER_PCIE_0 65
+#define	ICBID_MASTER_PCIE_1 66
+#define	ICBID_MASTER_SATA 67
+#define	ICBID_MASTER_UFS 68
+#define	ICBID_MASTER_USB3_1 69
+#define	ICBID_MASTER_VIDEO_OCMEM 70
+#define	ICBID_MASTER_VPU1 71
+#define	ICBID_MASTER_VCAP 72
+#define	ICBID_MASTER_EMAC 73
+#define	ICBID_MASTER_BCAST 74
+#define	ICBID_MASTER_MMSS_PROC 75
+#define	ICBID_MASTER_SNOC_BIMC_1 76
+#define	ICBID_MASTER_SNOC_PCNOC 77
+#define	ICBID_MASTER_AUDIO 78
+#define	ICBID_MASTER_MM_INT_0 79
+#define	ICBID_MASTER_MM_INT_1 80
+#define	ICBID_MASTER_MM_INT_2 81
+#define	ICBID_MASTER_MM_INT_BIMC 82
+#define	ICBID_MASTER_MSS_INT 83
+#define	ICBID_MASTER_PCNOC_CFG 84
+#define	ICBID_MASTER_PCNOC_INT_0 85
+#define	ICBID_MASTER_PCNOC_INT_1 86
+#define	ICBID_MASTER_PCNOC_M_0 87
+#define	ICBID_MASTER_PCNOC_M_1 88
+#define	ICBID_MASTER_PCNOC_S_0 89
+#define	ICBID_MASTER_PCNOC_S_1 90
+#define	ICBID_MASTER_PCNOC_S_2 91
+#define	ICBID_MASTER_PCNOC_S_3 92
+#define	ICBID_MASTER_PCNOC_S_4 93
+#define	ICBID_MASTER_PCNOC_S_6 94
+#define	ICBID_MASTER_PCNOC_S_7 95
+#define	ICBID_MASTER_PCNOC_S_8 96
+#define	ICBID_MASTER_PCNOC_S_9 97
+#define	ICBID_MASTER_QDSS_INT 98
+#define	ICBID_MASTER_SNOC_INT_0	99
+#define	ICBID_MASTER_SNOC_INT_1 100
+#define	ICBID_MASTER_SNOC_INT_BIMC 101
+#define	ICBID_MASTER_TCU_0 102
+#define	ICBID_MASTER_TCU_1 103
+#define	ICBID_MASTER_BIMC_INT_0 104
+#define	ICBID_MASTER_BIMC_INT_1 105
+#define	ICBID_MASTER_CAMERA 106
+#define	ICBID_MASTER_RICA 107
+#define	ICBID_MASTER_SNOC_BIMC_2 108
+#define	ICBID_MASTER_BIMC_SNOC_1 109
+#define	ICBID_MASTER_A0NOC_SNOC 110
+#define	ICBID_MASTER_A1NOC_SNOC 111
+#define	ICBID_MASTER_A2NOC_SNOC 112
+#define	ICBID_MASTER_PIMEM 113
+#define	ICBID_MASTER_SNOC_VMEM 114
+#define	ICBID_MASTER_CPP 115
+#define	ICBID_MASTER_CNOC_A1NOC 116
+#define	ICBID_MASTER_PNOC_A1NOC 117
+#define	ICBID_MASTER_HMSS 118
+#define	ICBID_MASTER_PCIE_2 119
+#define	ICBID_MASTER_ROTATOR 120
+#define	ICBID_MASTER_VENUS_VMEM 121
+#define	ICBID_MASTER_DCC 122
+#define	ICBID_MASTER_MCDMA 123
+#define	ICBID_MASTER_PCNOC_INT_2 124
+#define	ICBID_MASTER_PCNOC_INT_3 125
+#define	ICBID_MASTER_PCNOC_INT_4 126
+#define	ICBID_MASTER_PCNOC_INT_5 127
+#define	ICBID_MASTER_PCNOC_INT_6 128
+#define	ICBID_MASTER_PCNOC_S_5 129
+#define	ICBID_MASTER_SENSORS_AHB 130
+#define	ICBID_MASTER_SENSORS_PROC 131
+#define	ICBID_MASTER_QSPI 132
+#define	ICBID_MASTER_VFE1 133
+#define	ICBID_MASTER_SNOC_INT_2 134
+#define	ICBID_MASTER_SMMNOC_BIMC 135
+#define	ICBID_MASTER_CRVIRT_A1NOC 136
+#define	ICBID_MASTER_XM_USB_HS1 137
+#define	ICBID_MASTER_XI_USB_HS1 138
+#define	ICBID_MASTER_PCNOC_BIMC_1 139
+#define	ICBID_MASTER_BIMC_PCNOC 140
+#define	ICBID_MASTER_XI_HSIC 141
+#define	ICBID_MASTER_SGMII  142
+#define	ICBID_MASTER_SPMI_FETCHER 143
+#define	ICBID_MASTER_GNOC_BIMC 144
+#define	ICBID_MASTER_CRVIRT_A2NOC 145
+#define	ICBID_MASTER_CNOC_A2NOC 146
+#define	ICBID_MASTER_WLAN 147
+#define	ICBID_MASTER_MSS_CE 148
+
+#define	ICBID_SLAVE_EBI1 0
+#define	ICBID_SLAVE_APPSS_L2 1
+#define	ICBID_SLAVE_BIMC_SNOC 2
+#define	ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
+#define	ICBID_SLAVE_CAMERA_CFG 3
+#define	ICBID_SLAVE_DISPLAY_CFG 4
+#define	ICBID_SLAVE_OCMEM_CFG 5
+#define	ICBID_SLAVE_CPR_CFG 6
+#define	ICBID_SLAVE_CPR_XPU_CFG 7
+#define	ICBID_SLAVE_MISC_CFG 8
+#define	ICBID_SLAVE_MISC_XPU_CFG 9
+#define	ICBID_SLAVE_VENUS_CFG 10
+#define	ICBID_SLAVE_GFX3D_CFG 11
+#define	ICBID_SLAVE_MMSS_CLK_CFG 12
+#define	ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
+#define	ICBID_SLAVE_MNOC_MPU_CFG 14
+#define	ICBID_SLAVE_ONOC_MPU_CFG 15
+#define	ICBID_SLAVE_MNOC_BIMC 16
+#define	ICBID_SLAVE_SERVICE_MNOC 17
+#define	ICBID_SLAVE_OCMEM 18
+#define	ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
+#define	ICBID_SLAVE_SERVICE_ONOC 19
+#define	ICBID_SLAVE_APPSS 20
+#define	ICBID_SLAVE_LPASS 21
+#define	ICBID_SLAVE_USB3 22
+#define	ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
+#define	ICBID_SLAVE_WCSS 23
+#define	ICBID_SLAVE_SNOC_BIMC 24
+#define	ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
+#define	ICBID_SLAVE_SNOC_CNOC 25
+#define	ICBID_SLAVE_IMEM 26
+#define	ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
+#define	ICBID_SLAVE_SNOC_OVIRT 27
+#define	ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
+#define	ICBID_SLAVE_SNOC_PNOC 28
+#define	ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
+#define	ICBID_SLAVE_SERVICE_SNOC 29
+#define	ICBID_SLAVE_QDSS_STM 30
+#define	ICBID_SLAVE_SDCC_1 31
+#define	ICBID_SLAVE_SDCC_3 32
+#define	ICBID_SLAVE_SDCC_2 33
+#define	ICBID_SLAVE_SDCC_4 34
+#define	ICBID_SLAVE_TSIF 35
+#define	ICBID_SLAVE_BAM_DMA 36
+#define	ICBID_SLAVE_BLSP_2 37
+#define	ICBID_SLAVE_USB_HSIC 38
+#define	ICBID_SLAVE_BLSP_1 39
+#define	ICBID_SLAVE_USB_HS 40
+#define	ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
+#define	ICBID_SLAVE_PDM 41
+#define	ICBID_SLAVE_PERIPH_APU_CFG 42
+#define	ICBID_SLAVE_PNOC_MPU_CFG 43
+#define	ICBID_SLAVE_PRNG 44
+#define	ICBID_SLAVE_PNOC_SNOC 45
+#define	ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
+#define	ICBID_SLAVE_SERVICE_PNOC 46
+#define	ICBID_SLAVE_CLK_CTL 47
+#define	ICBID_SLAVE_CNOC_MSS 48
+#define	ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
+#define	ICBID_SLAVE_SECURITY 49
+#define	ICBID_SLAVE_TCSR 50
+#define	ICBID_SLAVE_TLMM 51
+#define	ICBID_SLAVE_CRYPTO_0_CFG 52
+#define	ICBID_SLAVE_CRYPTO_1_CFG 53
+#define	ICBID_SLAVE_IMEM_CFG 54
+#define	ICBID_SLAVE_MESSAGE_RAM 55
+#define	ICBID_SLAVE_BIMC_CFG 56
+#define	ICBID_SLAVE_BOOT_ROM 57
+#define	ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
+#define	ICBID_SLAVE_PMIC_ARB 59
+#define	ICBID_SLAVE_SPDM_WRAPPER 60
+#define	ICBID_SLAVE_DEHR_CFG 61
+#define	ICBID_SLAVE_MPM 62
+#define	ICBID_SLAVE_QDSS_CFG 63
+#define	ICBID_SLAVE_RBCPR_CFG 64
+#define	ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
+#define	ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
+#define	ICBID_SLAVE_CNOC_MNOC_CFG 66
+#define	ICBID_SLAVE_SNOC_MPU_CFG 67
+#define	ICBID_SLAVE_CNOC_ONOC_CFG 68
+#define	ICBID_SLAVE_PNOC_CFG 69
+#define	ICBID_SLAVE_SNOC_CFG 70
+#define	ICBID_SLAVE_EBI1_DLL_CFG 71
+#define	ICBID_SLAVE_PHY_APU_CFG 72
+#define	ICBID_SLAVE_EBI1_PHY_CFG 73
+#define	ICBID_SLAVE_RPM 74
+#define	ICBID_SLAVE_CNOC_SNOC 75
+#define	ICBID_SLAVE_SERVICE_CNOC 76
+#define	ICBID_SLAVE_OVIRT_SNOC 77
+#define	ICBID_SLAVE_OVIRT_OCMEM 78
+#define	ICBID_SLAVE_USB_HS2 79
+#define	ICBID_SLAVE_QPIC 80
+#define	ICBID_SLAVE_IPS_CFG 81
+#define	ICBID_SLAVE_DSI_CFG 82
+#define	ICBID_SLAVE_USB3_1 83
+#define	ICBID_SLAVE_PCIE_0 84
+#define	ICBID_SLAVE_PCIE_1 85
+#define	ICBID_SLAVE_PSS_SMMU_CFG 86
+#define	ICBID_SLAVE_CRYPTO_2_CFG 87
+#define	ICBID_SLAVE_PCIE_0_CFG 88
+#define	ICBID_SLAVE_PCIE_1_CFG 89
+#define	ICBID_SLAVE_SATA_CFG 90
+#define	ICBID_SLAVE_SPSS_GENI_IR 91
+#define	ICBID_SLAVE_UFS_CFG 92
+#define	ICBID_SLAVE_AVSYNC_CFG 93
+#define	ICBID_SLAVE_VPU_CFG 94
+#define	ICBID_SLAVE_USB_PHY_CFG 95
+#define	ICBID_SLAVE_RBCPR_MX_CFG 96
+#define	ICBID_SLAVE_PCIE_PARF 97
+#define	ICBID_SLAVE_VCAP_CFG 98
+#define	ICBID_SLAVE_EMAC_CFG 99
+#define	ICBID_SLAVE_BCAST_CFG 100
+#define	ICBID_SLAVE_KLM_CFG 101
+#define	ICBID_SLAVE_DISPLAY_PWM 102
+#define	ICBID_SLAVE_GENI 103
+#define	ICBID_SLAVE_SNOC_BIMC_1 104
+#define	ICBID_SLAVE_AUDIO 105
+#define	ICBID_SLAVE_CATS_0 106
+#define	ICBID_SLAVE_CATS_1 107
+#define	ICBID_SLAVE_MM_INT_0 108
+#define	ICBID_SLAVE_MM_INT_1 109
+#define	ICBID_SLAVE_MM_INT_2 110
+#define	ICBID_SLAVE_MM_INT_BIMC 111
+#define	ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
+#define	ICBID_SLAVE_MSS_INT 113
+#define	ICBID_SLAVE_PCNOC_INT_0 114
+#define	ICBID_SLAVE_PCNOC_INT_1 115
+#define	ICBID_SLAVE_PCNOC_M_0 116
+#define	ICBID_SLAVE_PCNOC_M_1 117
+#define	ICBID_SLAVE_PCNOC_S_0 118
+#define	ICBID_SLAVE_PCNOC_S_1 119
+#define	ICBID_SLAVE_PCNOC_S_2 120
+#define	ICBID_SLAVE_PCNOC_S_3 121
+#define	ICBID_SLAVE_PCNOC_S_4 122
+#define	ICBID_SLAVE_PCNOC_S_6 123
+#define	ICBID_SLAVE_PCNOC_S_7 124
+#define	ICBID_SLAVE_PCNOC_S_8 125
+#define	ICBID_SLAVE_PCNOC_S_9 126
+#define	ICBID_SLAVE_PRNG_XPU_CFG 127
+#define	ICBID_SLAVE_QDSS_INT 128
+#define	ICBID_SLAVE_RPM_XPU_CFG 129
+#define	ICBID_SLAVE_SNOC_INT_0 130
+#define	ICBID_SLAVE_SNOC_INT_1 131
+#define	ICBID_SLAVE_SNOC_INT_BIMC 132
+#define	ICBID_SLAVE_TCU 133
+#define	ICBID_SLAVE_BIMC_INT_0 134
+#define	ICBID_SLAVE_BIMC_INT_1 135
+#define	ICBID_SLAVE_RICA_CFG 136
+#define	ICBID_SLAVE_SNOC_BIMC_2 137
+#define	ICBID_SLAVE_BIMC_SNOC_1 138
+#define	ICBID_SLAVE_PNOC_A1NOC 139
+#define	ICBID_SLAVE_SNOC_VMEM 140
+#define	ICBID_SLAVE_A0NOC_SNOC 141
+#define	ICBID_SLAVE_A1NOC_SNOC 142
+#define	ICBID_SLAVE_A2NOC_SNOC 143
+#define	ICBID_SLAVE_A0NOC_CFG 144
+#define	ICBID_SLAVE_A0NOC_MPU_CFG 145
+#define	ICBID_SLAVE_A0NOC_SMMU_CFG 146
+#define	ICBID_SLAVE_A1NOC_CFG 147
+#define	ICBID_SLAVE_A1NOC_MPU_CFG 148
+#define	ICBID_SLAVE_A1NOC_SMMU_CFG 149
+#define	ICBID_SLAVE_A2NOC_CFG 150
+#define	ICBID_SLAVE_A2NOC_MPU_CFG 151
+#define	ICBID_SLAVE_A2NOC_SMMU_CFG 152
+#define	ICBID_SLAVE_AHB2PHY 153
+#define	ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
+#define	ICBID_SLAVE_DCC_CFG 155
+#define	ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
+#define	ICBID_SLAVE_DSA_CFG 157
+#define	ICBID_SLAVE_DSA_MPU_CFG 158
+#define	ICBID_SLAVE_SSC_MPU_CFG 159
+#define	ICBID_SLAVE_HMSS_L3 160
+#define	ICBID_SLAVE_LPASS_SMMU_CFG 161
+#define	ICBID_SLAVE_MMAGIC_CFG 162
+#define	ICBID_SLAVE_PCIE20_AHB2PHY 163
+#define	ICBID_SLAVE_PCIE_2 164
+#define	ICBID_SLAVE_PCIE_2_CFG 165
+#define	ICBID_SLAVE_PIMEM 166
+#define	ICBID_SLAVE_PIMEM_CFG 167
+#define	ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
+#define	ICBID_SLAVE_RBCPR_CX 169
+#define	ICBID_SLAVE_RBCPR_MX 170
+#define	ICBID_SLAVE_SMMU_CPP_CFG 171
+#define	ICBID_SLAVE_SMMU_JPEG_CFG 172
+#define	ICBID_SLAVE_SMMU_MDP_CFG 173
+#define	ICBID_SLAVE_SMMU_ROTATOR_CFG 174
+#define	ICBID_SLAVE_SMMU_VENUS_CFG 175
+#define	ICBID_SLAVE_SMMU_VFE_CFG 176
+#define	ICBID_SLAVE_SSC_CFG 177
+#define	ICBID_SLAVE_VENUS_THROTTLE_CFG 178
+#define	ICBID_SLAVE_VMEM 179
+#define	ICBID_SLAVE_VMEM_CFG 180
+#define	ICBID_SLAVE_QDSS_MPU_CFG 181
+#define	ICBID_SLAVE_USB3_PHY_CFG 182
+#define	ICBID_SLAVE_IPA_CFG 183
+#define	ICBID_SLAVE_PCNOC_INT_2 184
+#define	ICBID_SLAVE_PCNOC_INT_3 185
+#define	ICBID_SLAVE_PCNOC_INT_4 186
+#define	ICBID_SLAVE_PCNOC_INT_5 187
+#define	ICBID_SLAVE_PCNOC_INT_6 188
+#define	ICBID_SLAVE_PCNOC_S_5 189
+#define	ICBID_SLAVE_QSPI 190
+#define	ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
+#define	ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
+#define	ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
+#define	ICBID_SLAVE_MSS_MPU_CFG 194
+#define	ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
+#define	ICBID_SLAVE_SKL 196
+#define	ICBID_SLAVE_SNOC_INT_2 197
+#define	ICBID_SLAVE_SMMNOC_BIMC 198
+#define	ICBID_SLAVE_CRVIRT_A1NOC 199
+#define	ICBID_SLAVE_SGMII	 200
+#define	ICBID_SLAVE_QHS4_APPS	 201
+#define	ICBID_SLAVE_BIMC_PCNOC   202
+#define	ICBID_SLAVE_PCNOC_BIMC_1 203
+#define	ICBID_SLAVE_SPMI_FETCHER 204
+#define	ICBID_SLAVE_MMSS_SMMU_CFG 205
+#define	ICBID_SLAVE_WLAN 206
+#define	ICBID_SLAVE_CRVIRT_A2NOC 207
+#define	ICBID_SLAVE_CNOC_A2NOC 208
+#define	ICBID_SLAVE_GLM 209
+#define	ICBID_SLAVE_GNOC_BIMC 210
+#define	ICBID_SLAVE_GNOC_SNOC 211
+#define	ICBID_SLAVE_QM_CFG 212
+#define	ICBID_SLAVE_TLMM_EAST 213
+#define	ICBID_SLAVE_TLMM_NORTH 214
+#define	ICBID_SLAVE_TLMM_WEST 215
+#endif
diff --git a/include/dt-bindings/msm/msm-bus-rule-ops.h b/include/dt-bindings/msm/msm-bus-rule-ops.h
new file mode 100644
index 0000000..516d8f4
--- /dev/null
+++ b/include/dt-bindings/msm/msm-bus-rule-ops.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_BUS_RULE_OPS_H
+#define __MSM_BUS_RULE_OPS_H
+
+#define FLD_IB	0
+#define FLD_AB	1
+#define FLD_CLK	2
+
+#define OP_LE	0
+#define OP_LT	1
+#define OP_GE	2
+#define OP_GT	3
+#define OP_NOOP	4
+
+#define RULE_STATE_NOT_APPLIED	0
+#define RULE_STATE_APPLIED	1
+
+#define THROTTLE_ON	0
+#define THROTTLE_OFF	1
+#define THROTTLE_REG	2
+
+
+#endif
diff --git a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
index aafa76c..64e2dc7 100644
--- a/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
+++ b/include/dt-bindings/pinctrl/qcom,pmic-gpio.h
@@ -89,15 +89,30 @@
 #define PMA8084_GPIO_S4			2
 #define PMA8084_GPIO_L6			3
 
+/* ATEST MUX selection for analog-pass-through mode */
+#define PMIC_GPIO_AOUT_ATEST1		0
+#define PMIC_GPIO_AOUT_ATEST2		1
+#define PMIC_GPIO_AOUT_ATEST3		2
+#define PMIC_GPIO_AOUT_ATEST4		3
+
+/* DTEST buffer for digital input mode */
+#define PMIC_GPIO_DIN_DTEST1		0
+#define PMIC_GPIO_DIN_DTEST2		1
+#define PMIC_GPIO_DIN_DTEST3		2
+#define PMIC_GPIO_DIN_DTEST4		3
+
 /* To be used with "function" */
 #define PMIC_GPIO_FUNC_NORMAL		"normal"
 #define PMIC_GPIO_FUNC_PAIRED		"paired"
 #define PMIC_GPIO_FUNC_FUNC1		"func1"
 #define PMIC_GPIO_FUNC_FUNC2		"func2"
+#define PMIC_GPIO_FUNC_FUNC3		"func3"
+#define PMIC_GPIO_FUNC_FUNC4		"func4"
 #define PMIC_GPIO_FUNC_DTEST1		"dtest1"
 #define PMIC_GPIO_FUNC_DTEST2		"dtest2"
 #define PMIC_GPIO_FUNC_DTEST3		"dtest3"
 #define PMIC_GPIO_FUNC_DTEST4		"dtest4"
+#define PMIC_GPIO_FUNC_ANALOG		"analog"
 
 #define PM8038_GPIO1_2_LPG_DRV		PMIC_GPIO_FUNC_FUNC1
 #define PM8038_GPIO3_5V_BOOST_EN	PMIC_GPIO_FUNC_FUNC1
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator.h b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
new file mode 100644
index 0000000..30a847d
--- /dev/null
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_RPMH_REGULATOR_H
+#define __QCOM_RPMH_REGULATOR_H
+
+/* This offset is needed as 0 is considered an invalid voltage. */
+#define RPMH_REGULATOR_LEVEL_OFFSET	1
+
+#define RPMH_REGULATOR_LEVEL_MIN	(0 + RPMH_REGULATOR_LEVEL_OFFSET)
+
+#define RPMH_REGULATOR_LEVEL_OFF	(0 + RPMH_REGULATOR_LEVEL_OFFSET)
+#define RPMH_REGULATOR_LEVEL_RETENTION	(16 + RPMH_REGULATOR_LEVEL_OFFSET)
+#define RPMH_REGULATOR_LEVEL_MIN_SVS	(48 + RPMH_REGULATOR_LEVEL_OFFSET)
+#define RPMH_REGULATOR_LEVEL_LOW_SVS	(64 + RPMH_REGULATOR_LEVEL_OFFSET)
+#define RPMH_REGULATOR_LEVEL_SVS	(128 + RPMH_REGULATOR_LEVEL_OFFSET)
+#define RPMH_REGULATOR_LEVEL_SVS_L1	(192 + RPMH_REGULATOR_LEVEL_OFFSET)
+#define RPMH_REGULATOR_LEVEL_NOM	(256 + RPMH_REGULATOR_LEVEL_OFFSET)
+#define RPMH_REGULATOR_LEVEL_NOM_L1	(320 + RPMH_REGULATOR_LEVEL_OFFSET)
+#define RPMH_REGULATOR_LEVEL_TURBO	(384 + RPMH_REGULATOR_LEVEL_OFFSET)
+
+#define RPMH_REGULATOR_LEVEL_MAX	(65535 + RPMH_REGULATOR_LEVEL_OFFSET)
+
+#endif
diff --git a/include/dt-bindings/soc/qcom,tcs-mbox.h b/include/dt-bindings/soc/qcom,tcs-mbox.h
new file mode 100644
index 0000000..a62869a
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,tcs-mbox.h
@@ -0,0 +1,16 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define SLEEP_TCS	0
+#define WAKE_TCS	1
+#define ACTIVE_TCS	2
+#define CONTROL_TCS	3
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
new file mode 100644
index 0000000..a460889
--- /dev/null
+++ b/include/linux/Kbuild
@@ -0,0 +1,2 @@
+header-y += if_pppolac.h
+header-y += if_pppopns.h
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h
index 8c98113..eff56cb 100644
--- a/include/linux/amba/mmci.h
+++ b/include/linux/amba/mmci.h
@@ -5,6 +5,15 @@
 #define AMBA_MMCI_H
 
 #include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/sdio_func.h>
+
+struct embedded_sdio_data {
+        struct sdio_cis cis;
+        struct sdio_cccr cccr;
+        struct sdio_embedded_func *funcs;
+        int num_funcs;
+};
 
 /**
  * struct mmci_platform_data - platform configuration for the MMCI
@@ -31,6 +40,7 @@
 	int	gpio_wp;
 	int	gpio_cd;
 	bool	cd_invert;
+	struct embedded_sdio_data *embedded_sdio;
 };
 
 #endif
diff --git a/include/linux/android_aid.h b/include/linux/android_aid.h
new file mode 100644
index 0000000..6f1fa179
--- /dev/null
+++ b/include/linux/android_aid.h
@@ -0,0 +1,28 @@
+/* include/linux/android_aid.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_ANDROID_AID_H
+#define _LINUX_ANDROID_AID_H
+
+/* AIDs that the kernel treats differently */
+#define AID_OBSOLETE_000 KGIDT_INIT(3001)  /* was NET_BT_ADMIN */
+#define AID_OBSOLETE_001 KGIDT_INIT(3002)  /* was NET_BT */
+#define AID_INET         KGIDT_INIT(3003)
+#define AID_NET_RAW      KGIDT_INIT(3004)
+#define AID_NET_ADMIN    KGIDT_INIT(3005)
+#define AID_NET_BW_STATS KGIDT_INIT(3006)  /* read bandwidth statistics */
+#define AID_NET_BW_ACCT  KGIDT_INIT(3007)  /* change bandwidth statistics accounting */
+
+#endif
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index cd395ec..fc08c407 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -169,7 +169,7 @@
 				 * throttling rules. Don't do it again. */
 
 	/* request only flags */
-	__REQ_SORTED,		/* elevator knows about this request */
+	__REQ_SORTED = __REQ_RAHEAD, /* elevator knows about this request */
 	__REQ_SOFTBARRIER,	/* may not be passed by ioscheduler */
 	__REQ_NOMERGE,		/* don't touch this for merging */
 	__REQ_STARTED,		/* drive already may have started this one */
@@ -189,6 +189,7 @@
 	__REQ_PM,		/* runtime pm request */
 	__REQ_HASHED,		/* on IO scheduler merge hash */
 	__REQ_MQ_INFLIGHT,	/* track inflight for MQ */
+	__REQ_URGENT,		/* urgent request */
 	__REQ_NR_BITS,		/* stops here */
 };
 
@@ -198,6 +199,7 @@
 #define REQ_SYNC		(1ULL << __REQ_SYNC)
 #define REQ_META		(1ULL << __REQ_META)
 #define REQ_PRIO		(1ULL << __REQ_PRIO)
+#define REQ_URGENT		(1ULL << __REQ_URGENT)
 #define REQ_NOIDLE		(1ULL << __REQ_NOIDLE)
 #define REQ_INTEGRITY		(1ULL << __REQ_INTEGRITY)
 
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index c47c358..53d4ea5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -196,6 +196,9 @@
 
 	/* for bidi */
 	struct request *next_rq;
+
+	ktime_t			lat_hist_io_start;
+	int			lat_hist_enabled;
 };
 
 #define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
@@ -1700,6 +1703,79 @@
 extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
 extern int bdev_dax_supported(struct super_block *, int);
 extern bool bdev_dax_capable(struct block_device *);
+
+/*
+ * X-axis for IO latency histogram support.
+ */
+static const u_int64_t latency_x_axis_us[] = {
+	100,
+	200,
+	300,
+	400,
+	500,
+	600,
+	700,
+	800,
+	900,
+	1000,
+	1200,
+	1400,
+	1600,
+	1800,
+	2000,
+	2500,
+	3000,
+	4000,
+	5000,
+	6000,
+	7000,
+	9000,
+	10000
+};
+
+#define BLK_IO_LAT_HIST_DISABLE         0
+#define BLK_IO_LAT_HIST_ENABLE          1
+#define BLK_IO_LAT_HIST_ZERO            2
+
+struct io_latency_state {
+	u_int64_t	latency_y_axis_read[ARRAY_SIZE(latency_x_axis_us) + 1];
+	u_int64_t	latency_reads_elems;
+	u_int64_t	latency_y_axis_write[ARRAY_SIZE(latency_x_axis_us) + 1];
+	u_int64_t	latency_writes_elems;
+};
+
+static inline void
+blk_update_latency_hist(struct io_latency_state *s,
+			int read,
+			u_int64_t delta_us)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++) {
+		if (delta_us < (u_int64_t)latency_x_axis_us[i]) {
+			if (read)
+				s->latency_y_axis_read[i]++;
+			else
+				s->latency_y_axis_write[i]++;
+			break;
+		}
+	}
+	if (i == ARRAY_SIZE(latency_x_axis_us)) {
+		/* Overflowed the histogram */
+		if (read)
+			s->latency_y_axis_read[i]++;
+		else
+			s->latency_y_axis_write[i]++;
+	}
+	if (read)
+		s->latency_reads_elems++;
+	else
+		s->latency_writes_elems++;
+}
+
+void blk_zero_latency_hist(struct io_latency_state *s);
+ssize_t blk_latency_hist_show(struct io_latency_state *s, char *buf);
+
 #else /* CONFIG_BLOCK */
 
 struct block_device;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 5b17de6..0f4548c 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -441,6 +441,7 @@
 	void (*css_free)(struct cgroup_subsys_state *css);
 	void (*css_reset)(struct cgroup_subsys_state *css);
 
+	int (*allow_attach)(struct cgroup_taskset *tset);
 	int (*can_attach)(struct cgroup_taskset *tset);
 	void (*cancel_attach)(struct cgroup_taskset *tset);
 	void (*attach)(struct cgroup_taskset *tset);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c83c23f..af84de6 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -570,6 +570,16 @@
 	pr_cont_kernfs_path(cgrp->kn);
 }
 
+/*
+ * Default Android check for whether the current process is allowed to move a
+ * task across cgroups, either because CAP_SYS_NICE is set or because the uid
+ * of the calling process is the same as the moved task or because we are
+ * running as root.
+ * Returns 0 if this is allowed, or -EACCES otherwise.
+ */
+int subsys_cgroup_allow_attach(struct cgroup_taskset *tset);
+
+
 #else /* !CONFIG_CGROUPS */
 
 struct cgroup_subsys_state;
@@ -596,6 +606,11 @@
 {
 	return true;
 }
+
+static inline int subsys_cgroup_allow_attach(void *tset)
+{
+	return -EINVAL;
+}
 #endif /* !CONFIG_CGROUPS */
 
 /*
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index a428aec..994f52a 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -13,6 +13,7 @@
 
 #include <linux/io.h>
 #include <linux/of.h>
+#include <linux/mutex.h>
 
 #ifdef CONFIG_COMMON_CLK
 
@@ -177,6 +178,8 @@
  *		directory is provided as an argument.  Called with
  *		prepare_lock held.  Returns 0 on success, -EERROR otherwise.
  *
+ * @set_flags: Set custom flags which deal with hardware specifics. Returns 0
+ *	       on success, -EERROR otherwise.
  *
  * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
  * implementations to split any work between atomic (enable) and sleepable
@@ -217,6 +220,7 @@
 	int		(*set_phase)(struct clk_hw *hw, int degrees);
 	void		(*init)(struct clk_hw *hw);
 	int		(*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+	int		(*set_flags)(struct clk_hw *hw, unsigned int flags);
 };
 
 /**
@@ -228,6 +232,9 @@
  * @parent_names: array of string names for all possible parents
  * @num_parents: number of possible parents
  * @flags: framework-level hints and quirks
+ * @vdd_class: voltage scaling requirement class
+ * @rate_max: maximum clock rate in Hz supported at each voltage level
+ * @num_rate_max: number of maximum voltage level supported
  */
 struct clk_init_data {
 	const char		*name;
@@ -235,8 +242,69 @@
 	const char		* const *parent_names;
 	u8			num_parents;
 	unsigned long		flags;
+	struct clk_vdd_class	*vdd_class;
+	unsigned long		*rate_max;
+	int			num_rate_max;
 };
 
+struct regulator;
+
+/**
+ * struct clk_vdd_class - Voltage scaling class
+ * @class_name: name of the class
+ * @regulator: array of regulators
+ * @num_regulators: size of regulator array. Standard regulator APIs will be
+			used if this field > 0
+ * @set_vdd: function to call when applying a new voltage setting
+ * @vdd_uv: sorted 2D array of legal voltage settings. Indexed by level, then
+		regulator
+ * @level_votes: array of votes for each level
+ * @num_levels: specifies the size of level_votes array
+ * @cur_level: the currently set voltage level
+ * @lock: lock to protect this struct
+ */
+struct clk_vdd_class {
+	const char *class_name;
+	struct regulator **regulator;
+	int num_regulators;
+	int (*set_vdd)(struct clk_vdd_class *v_class, int level);
+	int *vdd_uv;
+	int *level_votes;
+	int num_levels;
+	unsigned long cur_level;
+	struct mutex lock;
+};
+
+#define DEFINE_VDD_CLASS(_name, _set_vdd, _num_levels) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.set_vdd = _set_vdd, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGULATORS(_name, _num_levels, _num_regulators, _vdd_uv) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.vdd_uv = _vdd_uv, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.level_votes = (int [_num_levels]) {}, \
+		.num_levels = _num_levels, \
+		.cur_level = _num_levels, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
+#define DEFINE_VDD_REGS_INIT(_name, _num_regulators) \
+	struct clk_vdd_class _name = { \
+		.class_name = #_name, \
+		.regulator = (struct regulator * [_num_regulators]) {}, \
+		.num_regulators = _num_regulators, \
+		.lock = __MUTEX_INITIALIZER(_name.lock) \
+	}
+
 /**
  * struct clk_hw - handle for traversing from a struct clk to its corresponding
  * hardware-specific structure.  struct clk_hw should be declared within struct
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 123c027..1325b23 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -420,6 +420,16 @@
  */
 struct clk *clk_get_sys(const char *dev_id, const char *con_id);
 
+/**
+ * clk_set_flags - set the custom HW specific flags for this clock
+ * @clk: clock source
+ * @flags: custom flags which would be hardware specific, defined for specific
+ *	   hardware.
+ *
+ * Returns success 0 or negative errno.
+ */
+int clk_set_flags(struct clk *clk, unsigned long flags);
+
 #else /* !CONFIG_HAVE_CLK */
 
 static inline struct clk *clk_get(struct device *dev, const char *id)
diff --git a/include/linux/clk/qcom.h b/include/linux/clk/qcom.h
new file mode 100644
index 0000000..e2fee60
--- /dev/null
+++ b/include/linux/clk/qcom.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_CLK_QCOM_H_
+#define __LINUX_CLK_QCOM_H_
+
+enum branch_mem_flags {
+	CLKFLAG_RETAIN_PERIPH,
+	CLKFLAG_NORETAIN_PERIPH,
+	CLKFLAG_RETAIN_MEM,
+	CLKFLAG_NORETAIN_MEM,
+	CLKFLAG_PERIPH_OFF_SET,
+	CLKFLAG_PERIPH_OFF_CLEAR,
+};
+
+#endif  /* __LINUX_CLK_QCOM_H_ */
diff --git a/include/linux/coresight-cti.h b/include/linux/coresight-cti.h
new file mode 100644
index 0000000..10ef2af
--- /dev/null
+++ b/include/linux/coresight-cti.h
@@ -0,0 +1,93 @@
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_CTI_H
+#define _LINUX_CORESIGHT_CTI_H
+
+#include <linux/list.h>
+
+struct coresight_cti_data {
+	int nr_ctis;
+	const char **names;
+};
+
+struct coresight_cti {
+	const char *name;
+	struct list_head link;
+};
+
+#ifdef CONFIG_CORESIGHT_CTI
+extern struct coresight_cti *coresight_cti_get(const char *name);
+extern void coresight_cti_put(struct coresight_cti *cti);
+extern int coresight_cti_map_trigin(
+			struct coresight_cti *cti, int trig, int ch);
+extern int coresight_cti_map_trigout(
+			struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_unmap_trigin(
+			struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_unmap_trigout(
+			struct coresight_cti *cti, int trig, int ch);
+extern void coresight_cti_reset(struct coresight_cti *cti);
+extern int coresight_cti_set_trig(struct coresight_cti *cti, int ch);
+extern void coresight_cti_clear_trig(struct coresight_cti *cti, int ch);
+extern int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch);
+extern int coresight_cti_enable_gate(struct coresight_cti *cti, int ch);
+extern void coresight_cti_disable_gate(struct coresight_cti *cti, int ch);
+extern void coresight_cti_ctx_save(void);
+extern void coresight_cti_ctx_restore(void);
+extern int coresight_cti_ack_trig(struct coresight_cti *cti, int trig);
+#else
+static inline struct coresight_cti *coresight_cti_get(const char *name)
+{
+	return NULL;
+}
+static inline void coresight_cti_put(struct coresight_cti *cti) {}
+static inline int coresight_cti_map_trigin(
+			struct coresight_cti *cti, int trig, int ch)
+{
+	return -ENODEV;
+}
+static inline int coresight_cti_map_trigout(
+			struct coresight_cti *cti, int trig, int ch)
+{
+	return -ENODEV;
+}
+static inline void coresight_cti_unmap_trigin(
+			struct coresight_cti *cti, int trig, int ch) {}
+static inline void coresight_cti_unmap_trigout(
+			struct coresight_cti *cti, int trig, int ch) {}
+static inline void coresight_cti_reset(struct coresight_cti *cti) {}
+static inline int coresight_cti_set_trig(struct coresight_cti *cti, int ch)
+{
+	return -ENODEV;
+}
+static inline void coresight_cti_clear_trig(struct coresight_cti *cti, int ch)
+{}
+static inline int coresight_cti_pulse_trig(struct coresight_cti *cti, int ch)
+{
+	return -ENODEV;
+}
+static inline int coresight_cti_enable_gate(struct coresight_cti *cti, int ch)
+{
+	return -ENODEV;
+}
+static inline void coresight_cti_disable_gate(struct coresight_cti *cti, int ch)
+{}
+static inline void coresight_cti_ctx_save(void){}
+static inline void coresight_cti_ctx_restore(void){}
+static inline int coresight_cti_ack_trig(struct coresight_cti *cti, int trig)
+{
+	return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/coresight-stm.h b/include/linux/coresight-stm.h
index a978bb8..4619158 100644
--- a/include/linux/coresight-stm.h
+++ b/include/linux/coresight-stm.h
@@ -1,6 +1,109 @@
-#ifndef __LINUX_CORESIGHT_STM_H_
-#define __LINUX_CORESIGHT_STM_H_
+#ifndef __LINUX_CORESIGHT_STM_H
+#define __LINUX_CORESIGHT_STM_H
 
+#include <asm/local.h>
+#include <linux/stm.h>
+#include <linux/bitmap.h>
 #include <uapi/linux/coresight-stm.h>
 
+#define BYTES_PER_CHANNEL		256
+
+enum stm_pkt_type {
+	STM_PKT_TYPE_DATA	= 0x98,
+	STM_PKT_TYPE_FLAG	= 0xE8,
+	STM_PKT_TYPE_TRIG	= 0xF8,
+};
+
+#define stm_channel_off(type, opts)	(type & ~opts)
+
+#define stm_channel_addr(drvdata, ch)	(drvdata->chs.base +	\
+					(ch * BYTES_PER_CHANNEL))
+
+#define stm_log_inv(entity_id, proto_id, data, size)			\
+	stm_trace(STM_FLAG_NONE, entity_id, proto_id, data, size)
+
+#define stm_log_inv_ts(entity_id, proto_id, data, size)			\
+	stm_trace(STM_FLAG_TIMESTAMPED, entity_id, proto_id,		\
+		  data, size)
+
+#define stm_log_gtd(entity_id, proto_id, data, size)			\
+	stm_trace(STM_FLAG_GUARANTEED, entity_id, proto_id,		\
+		  data, size)
+
+#define stm_log_gtd_ts(entity_id, proto_id, data, size)			\
+	stm_trace(STM_FLAG_GUARANTEED | STM_OPTION_TIMESTAMPED,	\
+		  entity_id, proto_id, data, size)
+
+#define stm_log(entity_id, data, size)					\
+	stm_log_inv_ts(entity_id, 0, data, size)
+
+/**
+ * struct channel_space - central management entity for extended ports
+ * @base:		memory mapped base address where channels start.
+ * @phys:		physical base address of channel region.
+ * @guaraneed:		is the channel delivery guaranteed.
+ * @bitmap:		channel info for OST packet
+ */
+struct channel_space {
+	void __iomem		*base;
+	phys_addr_t		phys;
+	unsigned long		*guaranteed;
+	unsigned long		*bitmap;
+};
+
+/**
+ * struct stm_drvdata - specifics associated to an STM component
+ * @base:		memory mapped base address for this component.
+ * @dev:		the device entity associated to this component.
+ * @atclk:		optional clock for the core parts of the STM.
+ * @csdev:		component vitals needed by the framework.
+ * @spinlock:		only one at a time pls.
+ * @chs:		the channels accociated to this STM.
+ * @entities		currently configured OST entities.
+ * @stm:		structure associated to the generic STM interface.
+ * @mode:		this tracer's mode, i.e sysFS, or disabled.
+ * @traceid:		value of the current ID for this component.
+ * @write_bytes:	Maximus bytes this STM can write at a time.
+ * @stmsper:		settings for register STMSPER.
+ * @stmspscr:		settings for register STMSPSCR.
+ * @numsp:		the total number of stimulus port support by this STM.
+ * @stmheer:		settings for register STMHEER.
+ * @stmheter:		settings for register STMHETER.
+ * @stmhebsr:		settings for register STMHEBSR.
+ */
+struct stm_drvdata {
+	void __iomem		*base;
+	struct device		*dev;
+	struct clk		*atclk;
+	struct coresight_device	*csdev;
+	spinlock_t		spinlock;
+	struct channel_space	chs;
+	bool			enable;
+	DECLARE_BITMAP(entities, OST_ENTITY_MAX);
+	struct stm_data		stm;
+	local_t			mode;
+	u8			traceid;
+	u32			write_bytes;
+	u32			stmsper;
+	u32			stmspscr;
+	u32			numsp;
+	u32			stmheer;
+	u32			stmheter;
+	u32			stmhebsr;
+};
+
+#ifdef CONFIG_CORESIGHT_STM
+extern int stm_trace(uint32_t flags, uint8_t entity_id, uint8_t proto_id,
+		     const void *data, uint32_t size);
+
+void stm_send(void *addr, const void *data, u32 size, u8 write_bytes);
+#else
+static inline int stm_trace(uint32_t flags, uint8_t entity_id,
+			    uint8_t proto_id, const void *data, uint32_t size)
+{
+	return 0;
+}
+static inline void stm_send(void *addr, const void *data, u32 size,
+			    u8 write_bytes) {}
+#endif
 #endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 2a5982c..9535e79 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012, 2016, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -41,6 +41,13 @@
 
 extern struct bus_type coresight_bustype;
 
+enum coresight_clk_rate {
+	CORESIGHT_CLK_RATE_OFF,
+	CORESIGHT_CLK_RATE_TRACE = 1000,
+	CORESIGHT_CLK_RATE_HSTRACE = 2000,
+	CORESIGHT_CLK_RATE_FIXED = 3000,
+};
+
 enum coresight_dev_type {
 	CORESIGHT_DEV_TYPE_NONE,
 	CORESIGHT_DEV_TYPE_SINK,
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index b886dc1..aae03c4 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -28,6 +28,21 @@
 	struct device dev;
 };
 
+struct cpu_pstate_pwr {
+	unsigned int freq;
+	uint32_t power;
+};
+
+struct cpu_pwr_stats {
+	int cpu;
+	long temp;
+	struct cpu_pstate_pwr *ptable;
+	bool throttling;
+	int len;
+};
+
+extern struct cpu_pwr_stats *get_cpu_pwr_stats(void);
+
 extern void boot_cpu_init(void);
 extern void boot_cpu_state_init(void);
 
@@ -253,4 +268,11 @@
 static inline void cpuhp_report_idle_dead(void) { }
 #endif /* #ifdef CONFIG_HOTPLUG_CPU */
 
+#define IDLE_START 1
+#define IDLE_END 2
+
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+void idle_notifier_call_chain(unsigned long val);
+
 #endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 5beed7b..9b0477e 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -142,6 +142,7 @@
 	int (*d_manage)(struct dentry *, bool);
 	struct dentry *(*d_real)(struct dentry *, const struct inode *,
 				 unsigned int);
+	void (*d_canonical_path)(const struct path *, struct path *);
 } ____cacheline_aligned;
 
 /*
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 2de4e2e..70e8299 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -262,6 +262,9 @@
  *			the governor may consider slowing the frequency down.
  *			Specify 0 to use the default. Valid value = 0 to 100.
  *			downdifferential < upthreshold must hold.
+ * @simple_scaling:	Setting this flag will scale the clocks up only if the
+ *			load is above @upthreshold and will scale the clocks
+ *			down only if the load is below @downdifferential.
  *
  * If the fed devfreq_simple_ondemand_data pointer is NULL to the governor,
  * the governor uses the default values.
@@ -269,6 +272,7 @@
 struct devfreq_simple_ondemand_data {
 	unsigned int upthreshold;
 	unsigned int downdifferential;
+	unsigned int simple_scaling;
 };
 #endif
 
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ef7962e..0e1e050 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -408,6 +408,12 @@
 void *dm_get_mdptr(struct mapped_device *md);
 
 /*
+ * Export the device via the ioctl interface (uses mdptr).
+ */
+int dm_ioctl_export(struct mapped_device *md, const char *name,
+		    const char *uuid);
+
+/*
  * A device can still be used while suspended, but I/O is deferred.
  */
 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
diff --git a/include/linux/device.h b/include/linux/device.h
index bc41e87..f54e6dd 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -833,6 +833,7 @@
 	struct cma *cma_area;		/* contiguous memory area for dma
 					   allocations */
 #endif
+	struct removed_region *removed_mem;
 	/* arch specific additions */
 	struct dev_archdata	archdata;
 
diff --git a/include/linux/dma-mapping-fast.h b/include/linux/dma-mapping-fast.h
new file mode 100644
index 0000000..aa9fcfe
--- /dev/null
+++ b/include/linux/dma-mapping-fast.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_DMA_MAPPING_FAST_H
+#define __LINUX_DMA_MAPPING_FAST_H
+
+#include <linux/iommu.h>
+#include <linux/io-pgtable-fast.h>
+
+struct dma_fast_smmu_mapping {
+	struct device		*dev;
+	struct iommu_domain	*domain;
+	dma_addr_t	 base;
+	size_t		 size;
+	size_t		 num_4k_pages;
+
+	unsigned int	bitmap_size;
+	unsigned long	*bitmap;
+	unsigned long	next_start;
+	unsigned long	upcoming_stale_bit;
+	bool		have_stale_tlbs;
+
+	dma_addr_t	pgtbl_dma_handle;
+	av8l_fast_iopte	*pgtbl_pmds;
+
+	spinlock_t	lock;
+	struct notifier_block notifier;
+};
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST
+int fast_smmu_attach_device(struct device *dev,
+			    struct dma_iommu_mapping *mapping);
+void fast_smmu_detach_device(struct device *dev,
+			     struct dma_iommu_mapping *mapping);
+#else
+static inline int fast_smmu_attach_device(struct device *dev,
+					  struct dma_iommu_mapping *mapping)
+{
+	return -ENODEV;
+}
+
+static inline void fast_smmu_detach_device(struct device *dev,
+					   struct dma_iommu_mapping *mapping)
+{
+}
+#endif
+
+#endif /* __LINUX_DMA_MAPPING_FAST_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 08528af..542cc16 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -61,6 +61,32 @@
  * allocation failure reports (similarly to __GFP_NOWARN).
  */
 #define DMA_ATTR_NO_WARN	(1UL << 8)
+/*
+ * DMA_ATTR_STRONGLY_ORDERED: Specifies that accesses to the mapping must
+ * not be buffered, reordered, merged with other accesses, or unaligned.
+ * No speculative access may occur in this mapping.
+ */
+#define DMA_ATTR_STRONGLY_ORDERED	(1UL << 9)
+/*
+ * DMA_ATTR_SKIP_ZEROING: Do not zero mapping.
+ */
+#define DMA_ATTR_SKIP_ZEROING		(1UL << 10)
+/*
+ * DMA_ATTR_NO_DELAYED_UNMAP: Used by msm specific lazy mapping to indicate
+ * that the mapping can be freed on unmap, rather than when the ion_buffer
+ * is freed.
+ */
+#define DMA_ATTR_NO_DELAYED_UNMAP	(1UL << 11)
+/*
+ * DMA_ATTR_EXEC_MAPPING: The mapping has executable permissions.
+ */
+#define DMA_ATTR_EXEC_MAPPING		(1UL << 12)
+/*
+ * DMA_ATTR_IOMMU_USE_UPSTREAM_HINT: Normally an smmu will override any bus
+ * attributes (i.e cacheablilty) provided by the client device. Some hardware
+ * may be designed to use the original attributes instead.
+ */
+#define DMA_ATTR_IOMMU_USE_UPSTREAM_HINT	(1UL << 13)
 
 /*
  * A dma_addr_t can hold any valid DMA or bus address for the platform.
@@ -121,6 +147,10 @@
 	int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
 	int (*dma_supported)(struct device *dev, u64 mask);
 	int (*set_dma_mask)(struct device *dev, u64 mask);
+	void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle,
+			size_t size, unsigned long attrs);
+	void (*unremap)(struct device *dev, void *remapped_address,
+			size_t size);
 #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
 	u64 (*get_required_mask)(struct device *dev);
 #endif
@@ -182,7 +212,7 @@
 					      enum dma_data_direction dir,
 					      unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	dma_addr_t addr;
 
 	kmemcheck_mark_initialized(ptr, size);
@@ -201,7 +231,7 @@
 					  enum dma_data_direction dir,
 					  unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->unmap_page)
@@ -217,7 +247,7 @@
 				   int nents, enum dma_data_direction dir,
 				   unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	int i, ents;
 	struct scatterlist *s;
 
@@ -235,7 +265,7 @@
 				      int nents, enum dma_data_direction dir,
 				      unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	debug_dma_unmap_sg(dev, sg, nents, dir);
@@ -247,7 +277,7 @@
 				      size_t offset, size_t size,
 				      enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	dma_addr_t addr;
 
 	kmemcheck_mark_initialized(page_address(page) + offset, size);
@@ -261,7 +291,7 @@
 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
 				  size_t size, enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->unmap_page)
@@ -275,7 +305,7 @@
 					  enum dma_data_direction dir,
 					  unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	dma_addr_t addr;
 
 	BUG_ON(!valid_dma_direction(dir));
@@ -296,7 +326,7 @@
 				      size_t size, enum dma_data_direction dir,
 				      unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->unmap_resource)
@@ -308,7 +338,7 @@
 					   size_t size,
 					   enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_single_for_cpu)
@@ -320,7 +350,7 @@
 					      dma_addr_t addr, size_t size,
 					      enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_single_for_device)
@@ -360,7 +390,7 @@
 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
 		    int nelems, enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_sg_for_cpu)
@@ -372,7 +402,7 @@
 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 		       int nelems, enum dma_data_direction dir)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!valid_dma_direction(dir));
 	if (ops->sync_sg_for_device)
@@ -396,7 +426,8 @@
 void *dma_common_pages_remap(struct page **pages, size_t size,
 			unsigned long vm_flags, pgprot_t prot,
 			const void *caller);
-void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
+			   bool nowarn);
 
 /**
  * dma_mmap_attrs - map a coherent DMA allocation into user space
@@ -415,7 +446,7 @@
 dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
 	       dma_addr_t dma_addr, size_t size, unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	BUG_ON(!ops);
 	if (ops->mmap)
 		return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
@@ -433,7 +464,7 @@
 		      dma_addr_t dma_addr, size_t size,
 		      unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	BUG_ON(!ops);
 	if (ops->get_sgtable)
 		return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
@@ -451,7 +482,7 @@
 				       dma_addr_t *dma_handle, gfp_t flag,
 				       unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 	void *cpu_addr;
 
 	BUG_ON(!ops);
@@ -473,7 +504,7 @@
 				     void *cpu_addr, dma_addr_t dma_handle,
 				     unsigned long attrs)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	BUG_ON(!ops);
 	WARN_ON(irqs_disabled());
@@ -531,7 +562,7 @@
 #ifndef HAVE_ARCH_DMA_SUPPORTED
 static inline int dma_supported(struct device *dev, u64 mask)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	if (!ops)
 		return 0;
@@ -544,7 +575,7 @@
 #ifndef HAVE_ARCH_DMA_SET_MASK
 static inline int dma_set_mask(struct device *dev, u64 mask)
 {
-	struct dma_map_ops *ops = get_dma_ops(dev);
+	const struct dma_map_ops *ops = get_dma_ops(dev);
 
 	if (ops->set_dma_mask)
 		return ops->set_dma_mask(dev, mask);
@@ -555,6 +586,35 @@
 	return 0;
 }
 #endif
+static inline void *dma_remap(struct device *dev, void *cpu_addr,
+		dma_addr_t dma_handle, size_t size, unsigned long attrs)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (!ops->remap) {
+		WARN_ONCE(1, "Remap function not implemented for %pS\n",
+				ops->remap);
+		return NULL;
+	}
+
+	return ops->remap(dev, cpu_addr, dma_handle, size, attrs);
+}
+
+
+static inline void dma_unremap(struct device *dev, void *remapped_addr,
+				size_t size)
+{
+	const struct dma_map_ops *ops = get_dma_ops(dev);
+
+	if (!ops->unremap) {
+		WARN_ONCE(1, "unremap function not implemented for %pS\n",
+				ops->unremap);
+		return;
+	}
+
+	return ops->unremap(dev, remapped_addr, size);
+}
+
 
 static inline u64 dma_get_mask(struct device *dev)
 {
@@ -764,6 +824,29 @@
 #define dma_mmap_writecombine dma_mmap_wc
 #endif
 
+static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
+					dma_addr_t *dma_handle, gfp_t flag)
+{
+	unsigned long attrs = DMA_ATTR_NON_CONSISTENT;
+
+	return dma_alloc_attrs(dev, size, dma_handle, flag, attrs);
+}
+
+static inline void dma_free_nonconsistent(struct device *dev, size_t size,
+					void *cpu_addr, dma_addr_t dma_handle)
+{
+	unsigned long attrs = DMA_ATTR_NON_CONSISTENT;
+
+	return dma_free_attrs(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline int dma_mmap_nonconsistent(struct device *dev,
+		struct vm_area_struct *vma, void *cpu_addr,
+		dma_addr_t dma_addr, size_t size)
+{
+	return -ENODEV;
+}
+
 #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)        dma_addr_t ADDR_NAME
 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME)          __u32 LEN_NAME
diff --git a/include/linux/ecm_ipa.h b/include/linux/ecm_ipa.h
new file mode 100644
index 0000000..9fe9c37
--- /dev/null
+++ b/include/linux/ecm_ipa.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ECM_IPA_H_
+#define _ECM_IPA_H_
+
+#include <linux/ipa.h>
+
+/*
+ * @priv: private data given upon ipa_connect
+ * @evt: event enum, should be IPA_WRITE_DONE
+ * @data: for tx path the data field is the sent socket buffer.
+ */
+typedef void (*ecm_ipa_callback)(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data);
+
+/*
+ * struct ecm_ipa_params - parameters for ecm_ipa initialization API
+ *
+ * @device_ready_notify: callback supplied by USB core driver.
+ * This callback shall be called by the Netdev once the device
+ * is ready to receive data from tethered PC.
+ * @ecm_ipa_rx_dp_notify: ecm_ipa will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (USB->IPA), once IPA driver receive data packets
+ * from USB pipe destined for Apps this callback will be called.
+ * @ecm_ipa_tx_dp_notify: ecm_ipa will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (IPA->USB), once IPA driver send packets destined
+ * for USB, IPA BAM will notify for Tx-complete.
+ * @priv: ecm_ipa will set this pointer (out parameter).
+ * This pointer will hold the network device for later interaction
+ * with ecm_ipa APIs
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ * @skip_ep_cfg: boolean field that determines if Apps-processor
+ *  should or should not configure this end-point.
+ */
+struct ecm_ipa_params {
+	void (*device_ready_notify)(void);
+	ecm_ipa_callback ecm_ipa_rx_dp_notify;
+	ecm_ipa_callback ecm_ipa_tx_dp_notify;
+	u8 host_ethaddr[ETH_ALEN];
+	u8 device_ethaddr[ETH_ALEN];
+	void *private;
+	bool skip_ep_cfg;
+};
+
+
+#ifdef CONFIG_ECM_IPA
+
+int ecm_ipa_init(struct ecm_ipa_params *params);
+
+int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+		void *priv);
+
+int ecm_ipa_disconnect(void *priv);
+
+void ecm_ipa_cleanup(void *priv);
+
+#else /* CONFIG_ECM_IPA*/
+
+static inline int ecm_ipa_init(struct ecm_ipa_params *params)
+{
+	return 0;
+}
+
+static inline int ecm_ipa_connect(u32 usb_to_ipa_hdl, u32 ipa_to_usb_hdl,
+		void *priv)
+{
+	return 0;
+}
+
+static inline int ecm_ipa_disconnect(void *priv)
+{
+	return 0;
+}
+
+static inline void ecm_ipa_cleanup(void *priv)
+{
+
+}
+#endif /* CONFIG_ECM_IPA*/
+
+#endif /* _ECM_IPA_H_ */
diff --git a/include/linux/esoc_client.h b/include/linux/esoc_client.h
new file mode 100644
index 0000000..77a8b50
--- /dev/null
+++ b/include/linux/esoc_client.h
@@ -0,0 +1,52 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __ESOC_CLIENT_H_
+#define __ESOC_CLIENT_H_
+
+#include <linux/device.h>
+#include <linux/esoc_ctrl.h>
+#include <linux/notifier.h>
+
+/*
+ * struct esoc_desc: Describes an external soc
+ * @name: external soc name
+ * @priv: private data for external soc
+ */
+struct esoc_desc {
+	const char *name;
+	const char *link;
+	void *priv;
+};
+
+#ifdef CONFIG_ESOC_CLIENT
+/* Can return probe deferral */
+struct esoc_desc *devm_register_esoc_client(struct device *dev,
+							const char *name);
+void devm_unregister_esoc_client(struct device *dev,
+						struct esoc_desc *esoc_desc);
+int esoc_register_client_notifier(struct notifier_block *nb);
+#else
+static inline struct esoc_desc *devm_register_esoc_client(struct device *dev,
+							const char *name)
+{
+	return NULL;
+}
+static inline void devm_unregister_esoc_client(struct device *dev,
+						struct esoc_desc *esoc_desc)
+{
+}
+static inline int esoc_register_client_notifier(struct notifier_block *nb)
+{
+	return -EIO;
+}
+#endif
+#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index b871c0c..22fd849 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -65,6 +65,12 @@
 #define EXTCON_JACK_SPDIF_IN	26	/* Sony Philips Digital InterFace */
 #define EXTCON_JACK_SPDIF_OUT	27
 
+/* connector orientation 0 - CC1, 1 - CC2 */
+#define EXTCON_USB_CC		28
+
+/* connector speed 0 - High Speed, 1 - super speed */
+#define EXTCON_USB_SPEED	29
+
 /* Display external connector */
 #define EXTCON_DISP_HDMI	40	/* High-Definition Multimedia Interface */
 #define EXTCON_DISP_MHL		41	/* Mobile High-Definition Link */
diff --git a/include/linux/fence.h b/include/linux/fence.h
index 0d76305..8d7265f 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -108,6 +108,7 @@
  * @get_driver_name: returns the driver name.
  * @get_timeline_name: return the name of the context this fence belongs to.
  * @enable_signaling: enable software signaling of fence.
+ * @disable_signaling: disable software signaling of fence (optional).
  * @signaled: [optional] peek whether the fence is signaled, can be null.
  * @wait: custom wait implementation, or fence_default_wait.
  * @release: [optional] called on destruction of fence, can be null
@@ -167,6 +168,7 @@
 	const char * (*get_driver_name)(struct fence *fence);
 	const char * (*get_timeline_name)(struct fence *fence);
 	bool (*enable_signaling)(struct fence *fence);
+	void (*disable_signaling)(struct fence *fence);
 	bool (*signaled)(struct fence *fence);
 	signed long (*wait)(struct fence *fence, bool intr, signed long timeout);
 	void (*release)(struct fence *fence);
diff --git a/include/linux/gpio_event.h b/include/linux/gpio_event.h
new file mode 100644
index 0000000..2613fc5
--- /dev/null
+++ b/include/linux/gpio_event.h
@@ -0,0 +1,170 @@
+/* include/linux/gpio_event.h
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_GPIO_EVENT_H
+#define _LINUX_GPIO_EVENT_H
+
+#include <linux/input.h>
+
+struct gpio_event_input_devs {
+	int count;
+	struct input_dev *dev[];
+};
+enum {
+	GPIO_EVENT_FUNC_UNINIT  = 0x0,
+	GPIO_EVENT_FUNC_INIT    = 0x1,
+	GPIO_EVENT_FUNC_SUSPEND = 0x2,
+	GPIO_EVENT_FUNC_RESUME  = 0x3,
+};
+struct gpio_event_info {
+	int (*func)(struct gpio_event_input_devs *input_devs,
+		    struct gpio_event_info *info,
+		    void **data, int func);
+	int (*event)(struct gpio_event_input_devs *input_devs,
+		     struct gpio_event_info *info,
+		     void **data, unsigned int dev, unsigned int type,
+		     unsigned int code, int value); /* out events */
+	bool no_suspend;
+};
+
+struct gpio_event_platform_data {
+	const char *name;
+	struct gpio_event_info **info;
+	size_t info_count;
+	int (*power)(const struct gpio_event_platform_data *pdata, bool on);
+	const char *names[]; /* If name is NULL, names contain a NULL */
+			     /* terminated list of input devices to create */
+};
+
+#define GPIO_EVENT_DEV_NAME "gpio-event"
+
+/* Key matrix */
+
+enum gpio_event_matrix_flags {
+	/* unset: drive active output low, set: drive active output high */
+	GPIOKPF_ACTIVE_HIGH              = 1U << 0,
+	GPIOKPF_DEBOUNCE                 = 1U << 1,
+	GPIOKPF_REMOVE_SOME_PHANTOM_KEYS = 1U << 2,
+	GPIOKPF_REMOVE_PHANTOM_KEYS      = GPIOKPF_REMOVE_SOME_PHANTOM_KEYS |
+					   GPIOKPF_DEBOUNCE,
+	GPIOKPF_DRIVE_INACTIVE           = 1U << 3,
+	GPIOKPF_LEVEL_TRIGGERED_IRQ      = 1U << 4,
+	GPIOKPF_PRINT_UNMAPPED_KEYS      = 1U << 16,
+	GPIOKPF_PRINT_MAPPED_KEYS        = 1U << 17,
+	GPIOKPF_PRINT_PHANTOM_KEYS       = 1U << 18,
+};
+
+#define MATRIX_CODE_BITS (10)
+#define MATRIX_KEY_MASK ((1U << MATRIX_CODE_BITS) - 1)
+#define MATRIX_KEY(dev, code) \
+	(((dev) << MATRIX_CODE_BITS) | (code & MATRIX_KEY_MASK))
+
+extern int gpio_event_matrix_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_matrix_info {
+	/* initialize to gpio_event_matrix_func */
+	struct gpio_event_info info;
+	/* size must be ninputs * noutputs */
+	const unsigned short *keymap;
+	unsigned int *input_gpios;
+	unsigned int *output_gpios;
+	unsigned int ninputs;
+	unsigned int noutputs;
+	/* time to wait before reading inputs after driving each output */
+	ktime_t settle_time;
+	/* time to wait before scanning the keypad a second time */
+	ktime_t debounce_delay;
+	ktime_t poll_time;
+	unsigned flags;
+};
+
+/* Directly connected inputs and outputs */
+
+enum gpio_event_direct_flags {
+	GPIOEDF_ACTIVE_HIGH         = 1U << 0,
+/*	GPIOEDF_USE_DOWN_IRQ        = 1U << 1, */
+/*	GPIOEDF_USE_IRQ             = (1U << 2) | GPIOIDF_USE_DOWN_IRQ, */
+	GPIOEDF_PRINT_KEYS          = 1U << 8,
+	GPIOEDF_PRINT_KEY_DEBOUNCE  = 1U << 9,
+	GPIOEDF_PRINT_KEY_UNSTABLE  = 1U << 10,
+};
+
+struct gpio_event_direct_entry {
+	uint32_t gpio:16;
+	uint32_t code:10;
+	uint32_t dev:6;
+};
+
+/* inputs */
+extern int gpio_event_input_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_input_info {
+	/* initialize to gpio_event_input_func */
+	struct gpio_event_info info;
+	ktime_t debounce_time;
+	ktime_t poll_time;
+	uint16_t flags;
+	uint16_t type;
+	const struct gpio_event_direct_entry *keymap;
+	size_t keymap_size;
+};
+
+/* outputs */
+extern int gpio_event_output_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+extern int gpio_event_output_event(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data,
+			unsigned int dev, unsigned int type,
+			unsigned int code, int value);
+struct gpio_event_output_info {
+	/* initialize to gpio_event_output_func and gpio_event_output_event */
+	struct gpio_event_info info;
+	uint16_t flags;
+	uint16_t type;
+	const struct gpio_event_direct_entry *keymap;
+	size_t keymap_size;
+};
+
+
+/* axes */
+
+enum gpio_event_axis_flags {
+	GPIOEAF_PRINT_UNKNOWN_DIRECTION  = 1U << 16,
+	GPIOEAF_PRINT_RAW                = 1U << 17,
+	GPIOEAF_PRINT_EVENT              = 1U << 18,
+};
+
+extern int gpio_event_axis_func(struct gpio_event_input_devs *input_devs,
+			struct gpio_event_info *info, void **data, int func);
+struct gpio_event_axis_info {
+	/* initialize to gpio_event_axis_func */
+	struct gpio_event_info info;
+	uint8_t  count; /* number of gpios for this axis */
+	uint8_t  dev; /* device index when using multiple input devices */
+	uint8_t  type; /* EV_REL or EV_ABS */
+	uint16_t code;
+	uint16_t decoded_size;
+	uint16_t (*map)(struct gpio_event_axis_info *info, uint16_t in);
+	uint32_t *gpio;
+	uint32_t flags;
+};
+#define gpio_axis_2bit_gray_map gpio_axis_4bit_gray_map
+#define gpio_axis_3bit_gray_map gpio_axis_4bit_gray_map
+uint16_t gpio_axis_4bit_gray_map(
+			struct gpio_event_axis_info *info, uint16_t in);
+uint16_t gpio_axis_5bit_singletrack_map(
+			struct gpio_event_axis_info *info, uint16_t in);
+
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index bb3f329..4c70716 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -39,6 +39,12 @@
 
 void kmap_flush_unused(void);
 
+#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
+void kmap_atomic_flush_unused(void);
+#else
+static inline void kmap_atomic_flush_unused(void) { }
+#endif
+
 struct page *kmap_to_page(void *addr);
 
 #else /* CONFIG_HIGHMEM */
@@ -80,6 +86,7 @@
 #define kmap_atomic_pfn(pfn)	kmap_atomic(pfn_to_page(pfn))
 
 #define kmap_flush_unused()	do {} while(0)
+#define kmap_atomic_flush_unused()	do {} while (0)
 #endif
 
 #endif /* CONFIG_HIGHMEM */
diff --git a/include/linux/if_pppolac.h b/include/linux/if_pppolac.h
new file mode 100644
index 0000000..e40aa10
--- /dev/null
+++ b/include/linux/if_pppolac.h
@@ -0,0 +1,23 @@
+/* include/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOLAC_H
+#define __LINUX_IF_PPPOLAC_H
+
+#include <uapi/linux/if_pppolac.h>
+
+#endif /* __LINUX_IF_PPPOLAC_H */
diff --git a/include/linux/if_pppopns.h b/include/linux/if_pppopns.h
new file mode 100644
index 0000000..4ac621a9
--- /dev/null
+++ b/include/linux/if_pppopns.h
@@ -0,0 +1,23 @@
+/* include/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IF_PPPOPNS_H
+#define __LINUX_IF_PPPOPNS_H
+
+#include <uapi/linux/if_pppopns.h>
+
+#endif /* __LINUX_IF_PPPOPNS_H */
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index ba7a9b0..325727a 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -43,6 +43,25 @@
 	u32 seq_sent, seq_recv;
 	int ppp_flags;
 };
+
+struct pppolac_opt {
+	__u32		local;
+	__u32		remote;
+	__u32		recv_sequence;
+	__u32		xmit_sequence;
+	atomic_t	sequencing;
+	int		(*backlog_rcv)(struct sock *sk_udp, struct sk_buff *skb);
+};
+
+struct pppopns_opt {
+	__u16		local;
+	__u16		remote;
+	__u32		recv_sequence;
+	__u32		xmit_sequence;
+	void		(*data_ready)(struct sock *sk_raw);
+	int		(*backlog_rcv)(struct sock *sk_raw, struct sk_buff *skb);
+};
+
 #include <net/sock.h>
 
 struct pppox_sock {
@@ -53,6 +72,8 @@
 	union {
 		struct pppoe_opt pppoe;
 		struct pptp_opt  pptp;
+		struct pppolac_opt lac;
+		struct pppopns_opt pns;
 	} proto;
 	__be16			num;
 };
diff --git a/include/linux/initramfs.h b/include/linux/initramfs.h
new file mode 100644
index 0000000..fc7da63
--- /dev/null
+++ b/include/linux/initramfs.h
@@ -0,0 +1,32 @@
+/*
+ * include/linux/initramfs.h
+ *
+ * Copyright (C) 2015, Google
+ * Rom Lemarchand <romlem@android.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _LINUX_INITRAMFS_H
+#define _LINUX_INITRAMFS_H
+
+#include <linux/kconfig.h>
+
+#if IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
+
+int __init default_rootfs(void);
+
+#endif
+
+#endif /* _LINUX_INITRAMFS_H */
diff --git a/include/linux/io-pgtable-fast.h b/include/linux/io-pgtable-fast.h
new file mode 100644
index 0000000..ab5a1dc
--- /dev/null
+++ b/include/linux/io-pgtable-fast.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_IO_PGTABLE_FAST_H
+#define __LINUX_IO_PGTABLE_FAST_H
+
+#include <linux/notifier.h>
+
+typedef u64 av8l_fast_iopte;
+
+#define iopte_pmd_offset(pmds, iova) (pmds + (iova >> 12))
+
+int av8l_fast_map_public(av8l_fast_iopte *ptep, phys_addr_t paddr, size_t size,
+			 int prot);
+void av8l_fast_unmap_public(av8l_fast_iopte *ptep, size_t size);
+
+/* events for notifiers passed to av8l_register_notify */
+#define MAPPED_OVER_STALE_TLB 1
+
+
+#ifdef CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB
+/*
+ * Doesn't matter what we use as long as bit 0 is unset.  The reason why we
+ * need a different value at all is that there are certain hardware
+ * platforms with erratum that require that a PTE actually be zero'd out
+ * and not just have its valid bit unset.
+ */
+#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0xa
+
+void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds, bool skip_sync);
+void av8l_register_notify(struct notifier_block *nb);
+
+#else  /* !CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
+
+#define AV8L_FAST_PTE_UNMAPPED_NEED_TLBI 0
+
+static inline void av8l_fast_clear_stale_ptes(av8l_fast_iopte *puds,
+					      bool skip_sync)
+{
+}
+
+static inline void av8l_register_notify(struct notifier_block *nb)
+{
+}
+
+#endif	/* CONFIG_IOMMU_IO_PGTABLE_FAST_PROVE_TLB */
+
+#endif /* __LINUX_IO_PGTABLE_FAST_H */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 436dc21..0e04308 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -31,6 +31,9 @@
 #define IOMMU_CACHE	(1 << 2) /* DMA cache coherency */
 #define IOMMU_NOEXEC	(1 << 3)
 #define IOMMU_MMIO	(1 << 4) /* e.g. things like MSI doorbells */
+#define IOMMU_PRIV	(1 << 5)
+/* Use upstream device's bus attribute */
+#define IOMMU_USE_UPSTREAM_HINT	(1 << 6)
 
 struct iommu_ops;
 struct iommu_group;
@@ -40,8 +43,12 @@
 struct notifier_block;
 
 /* iommu fault flags */
-#define IOMMU_FAULT_READ	0x0
-#define IOMMU_FAULT_WRITE	0x1
+#define IOMMU_FAULT_READ                (1 << 0)
+#define IOMMU_FAULT_WRITE               (1 << 1)
+#define IOMMU_FAULT_TRANSLATION         (1 << 2)
+#define IOMMU_FAULT_PERMISSION          (1 << 3)
+#define IOMMU_FAULT_EXTERNAL            (1 << 4)
+#define IOMMU_FAULT_TRANSACTION_STALLED (1 << 5)
 
 typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
 			struct device *, unsigned long, int, void *);
@@ -52,6 +59,10 @@
 	bool force_aperture;       /* DMA only allowed in mappable range? */
 };
 
+struct iommu_pgtbl_info {
+	void *pmds;
+};
+
 /* Domain feature flags */
 #define __IOMMU_DOMAIN_PAGING	(1U << 0)  /* Support for iommu_map/unmap */
 #define __IOMMU_DOMAIN_DMA_API	(1U << 1)  /* Domain for use in DMA-API
@@ -114,6 +125,19 @@
 	DOMAIN_ATTR_FSL_PAMU_ENABLE,
 	DOMAIN_ATTR_FSL_PAMUV1,
 	DOMAIN_ATTR_NESTING,	/* two stages of translation */
+	DOMAIN_ATTR_PT_BASE_ADDR,
+	DOMAIN_ATTR_CONTEXT_BANK,
+	DOMAIN_ATTR_DYNAMIC,
+	DOMAIN_ATTR_TTBR0,
+	DOMAIN_ATTR_CONTEXTIDR,
+	DOMAIN_ATTR_PROCID,
+	DOMAIN_ATTR_NON_FATAL_FAULTS,
+	DOMAIN_ATTR_S1_BYPASS,
+	DOMAIN_ATTR_ATOMIC,
+	DOMAIN_ATTR_SECURE_VMID,
+	DOMAIN_ATTR_FAST,
+	DOMAIN_ATTR_PGTBL_INFO,
+	DOMAIN_ATTR_USE_UPSTREAM_HINT,
 	DOMAIN_ATTR_MAX,
 };
 
@@ -131,6 +155,8 @@
 	int			prot;
 };
 
+extern struct dentry *iommu_debugfs_top;
+
 #ifdef CONFIG_IOMMU_API
 
 /**
@@ -145,6 +171,7 @@
  * @map_sg: map a scatter-gather list of physically contiguous memory chunks
  * to an iommu domain
  * @iova_to_phys: translate iova to physical address
+ * @iova_to_phys_hard: translate iova to physical address using IOMMU hardware
  * @add_device: add device to iommu grouping
  * @remove_device: remove device from iommu grouping
  * @device_group: find iommu group for a particular device
@@ -159,6 +186,12 @@
  * @domain_get_windows: Return the number of windows for a domain
  * @of_xlate: add OF master IDs to iommu grouping
  * @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @trigger_fault: trigger a fault on the device attached to an iommu domain
+ * @reg_read: read an IOMMU register
+ * @reg_write: write an IOMMU register
+ * @tlbi_domain: Invalidate all TLBs covering an iommu domain
+ * @enable_config_clocks: Enable all config clocks for this domain's IOMMU
+ * @disable_config_clocks: Disable all config clocks for this domain's IOMMU
  */
 struct iommu_ops {
 	bool (*capable)(enum iommu_cap);
@@ -176,6 +209,8 @@
 	size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
 			 struct scatterlist *sg, unsigned int nents, int prot);
 	phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
+	phys_addr_t (*iova_to_phys_hard)(struct iommu_domain *domain,
+					 dma_addr_t iova);
 	int (*add_device)(struct device *dev);
 	void (*remove_device)(struct device *dev);
 	struct iommu_group *(*device_group)(struct device *dev);
@@ -198,6 +233,14 @@
 	int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
 	/* Get the number of windows per domain */
 	u32 (*domain_get_windows)(struct iommu_domain *domain);
+	void (*trigger_fault)(struct iommu_domain *domain, unsigned long flags);
+	unsigned long (*reg_read)(struct iommu_domain *domain,
+				  unsigned long offset);
+	void (*reg_write)(struct iommu_domain *domain, unsigned long val,
+			  unsigned long offset);
+	void (*tlbi_domain)(struct iommu_domain *domain);
+	int (*enable_config_clocks)(struct iommu_domain *domain);
+	void (*disable_config_clocks)(struct iommu_domain *domain);
 
 	int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
 
@@ -222,6 +265,8 @@
 extern void iommu_detach_device(struct iommu_domain *domain,
 				struct device *dev);
 extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
+extern size_t iommu_pgsize(unsigned long pgsize_bitmap,
+			   unsigned long addr_merge, size_t size);
 extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
 		     phys_addr_t paddr, size_t size, int prot);
 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -230,6 +275,8 @@
 				struct scatterlist *sg,unsigned int nents,
 				int prot);
 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
+extern phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
+					   dma_addr_t iova);
 extern void iommu_set_fault_handler(struct iommu_domain *domain,
 			iommu_fault_handler_t handler, void *token);
 
@@ -301,6 +348,11 @@
  * Specifically, -ENOSYS is returned if a fault handler isn't installed
  * (though fault handlers can also return -ENOSYS, in case they want to
  * elicit the default behavior of the IOMMU drivers).
+
+ * Client fault handler returns -EBUSY to signal to the IOMMU driver
+ * that the client will take responsibility for any further fault
+ * handling, including clearing fault status registers or retrying
+ * the faulting transaction.
  */
 static inline int report_iommu_fault(struct iommu_domain *domain,
 		struct device *dev, unsigned long iova, int flags)
@@ -326,11 +378,37 @@
 	return domain->ops->map_sg(domain, iova, sg, nents, prot);
 }
 
+extern void iommu_trigger_fault(struct iommu_domain *domain,
+				unsigned long flags);
+
+extern unsigned long iommu_reg_read(struct iommu_domain *domain,
+				    unsigned long offset);
+extern void iommu_reg_write(struct iommu_domain *domain, unsigned long offset,
+			    unsigned long val);
 /* PCI device grouping function */
 extern struct iommu_group *pci_device_group(struct device *dev);
 /* Generic device grouping function */
 extern struct iommu_group *generic_device_group(struct device *dev);
 
+static inline void iommu_tlbiall(struct iommu_domain *domain)
+{
+	if (domain->ops->tlbi_domain)
+		domain->ops->tlbi_domain(domain);
+}
+
+static inline int iommu_enable_config_clocks(struct iommu_domain *domain)
+{
+	if (domain->ops->enable_config_clocks)
+		return domain->ops->enable_config_clocks(domain);
+	return 0;
+}
+
+static inline void iommu_disable_config_clocks(struct iommu_domain *domain)
+{
+	if (domain->ops->disable_config_clocks)
+		domain->ops->disable_config_clocks(domain);
+}
+
 /**
  * struct iommu_fwspec - per-device IOMMU instance data
  * @ops: ops for this device's IOMMU
@@ -434,6 +512,12 @@
 	return 0;
 }
 
+static inline phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
+						  dma_addr_t iova)
+{
+	return 0;
+}
+
 static inline void iommu_set_fault_handler(struct iommu_domain *domain,
 				iommu_fault_handler_t handler, void *token)
 {
@@ -563,6 +647,35 @@
 {
 }
 
+static inline void iommu_trigger_fault(struct iommu_domain *domain,
+				       unsigned long flags)
+{
+}
+
+static inline unsigned long iommu_reg_read(struct iommu_domain *domain,
+					   unsigned long offset)
+{
+	return 0;
+}
+
+static inline void iommu_reg_write(struct iommu_domain *domain,
+				   unsigned long val, unsigned long offset)
+{
+}
+
+static inline void iommu_tlbiall(struct iommu_domain *domain)
+{
+}
+
+static inline int iommu_enable_config_clocks(struct iommu_domain *domain)
+{
+	return 0;
+}
+
+static inline void iommu_disable_config_clocks(struct iommu_domain *domain)
+{
+}
+
 static inline int iommu_fwspec_init(struct device *dev,
 				    struct fwnode_handle *iommu_fwnode,
 				    const struct iommu_ops *ops)
diff --git a/include/linux/ion.h b/include/linux/ion.h
new file mode 100644
index 0000000..9d72374
--- /dev/null
+++ b/include/linux/ion.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_ION_H__
+#define __LINUX_ION_H__
+
+#include "../../drivers/staging/android/ion/ion.h"
+
+#endif /* __LINUX_ION_H__ */
diff --git a/include/linux/ipa.h b/include/linux/ipa.h
new file mode 100644
index 0000000..d173b4c
--- /dev/null
+++ b/include/linux/ipa.h
@@ -0,0 +1,2176 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_H_
+#define _IPA_H_
+
+#include <linux/msm_ipa.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <linux/msm-sps.h>
+#include <linux/if_ether.h>
+#include "linux/msm_gsi.h"
+
+#define IPA_APPS_MAX_BW_IN_MBPS 700
+/**
+ * enum ipa_transport_type
+ * transport type: either GSI or SPS
+ */
+enum ipa_transport_type {
+	IPA_TRANSPORT_TYPE_SPS,
+	IPA_TRANSPORT_TYPE_GSI
+};
+
+/**
+ * enum ipa_nat_en_type - NAT setting type in IPA end-point
+ */
+enum ipa_nat_en_type {
+	IPA_BYPASS_NAT,
+	IPA_SRC_NAT,
+	IPA_DST_NAT,
+};
+
+/**
+ * enum ipa_mode_type - mode setting type in IPA end-point
+ * @BASIC: basic mode
+ * @ENABLE_FRAMING_HDLC: not currently supported
+ * @ENABLE_DEFRAMING_HDLC: not currently supported
+ * @DMA: all data arriving IPA will not go through IPA logic blocks, this
+ *  allows IPA to work as DMA for specific pipes.
+ */
+enum ipa_mode_type {
+	IPA_BASIC,
+	IPA_ENABLE_FRAMING_HDLC,
+	IPA_ENABLE_DEFRAMING_HDLC,
+	IPA_DMA,
+};
+
+/**
+ *  enum ipa_aggr_en_type - aggregation setting type in IPA
+ *  end-point
+ */
+enum ipa_aggr_en_type {
+	IPA_BYPASS_AGGR,
+	IPA_ENABLE_AGGR,
+	IPA_ENABLE_DEAGGR,
+};
+
+/**
+ *  enum ipa_aggr_type - type of aggregation in IPA end-point
+ */
+enum ipa_aggr_type {
+	IPA_MBIM_16 = 0,
+	IPA_HDLC    = 1,
+	IPA_TLP     = 2,
+	IPA_RNDIS   = 3,
+	IPA_GENERIC = 4,
+	IPA_QCMAP   = 6,
+};
+
+/**
+ * enum ipa_aggr_mode - global aggregation mode
+ */
+enum ipa_aggr_mode {
+	IPA_MBIM_AGGR,
+	IPA_QCNCM_AGGR,
+};
+
+/**
+ * enum ipa_dp_evt_type - type of event client callback is
+ * invoked for on data path
+ * @IPA_RECEIVE: data is struct sk_buff
+ * @IPA_WRITE_DONE: data is struct sk_buff
+ */
+enum ipa_dp_evt_type {
+	IPA_RECEIVE,
+	IPA_WRITE_DONE,
+	IPA_CLIENT_START_POLL,
+	IPA_CLIENT_COMP_NAPI,
+};
+
+/**
+ * enum hdr_total_len_or_pad_type - type vof value held by TOTAL_LEN_OR_PAD
+ * field in header configuration register.
+ * @IPA_HDR_PAD: field is used as padding length
+ * @IPA_HDR_TOTAL_LEN: field is used as total length
+ */
+enum hdr_total_len_or_pad_type {
+	IPA_HDR_PAD = 0,
+	IPA_HDR_TOTAL_LEN = 1,
+};
+
+/**
+ * struct ipa_ep_cfg_nat - NAT configuration in IPA end-point
+ * @nat_en:	This defines the default NAT mode for the pipe: in case of
+ *		filter miss - the default NAT mode defines the NATing operation
+ *		on the packet. Valid for Input Pipes only (IPA consumer)
+ */
+struct ipa_ep_cfg_nat {
+	enum ipa_nat_en_type nat_en;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr - header configuration in IPA end-point
+ *
+ * @hdr_len:Header length in bytes to be added/removed. Assuming
+ *			header len is constant per endpoint. Valid for
+ *			both Input and Output Pipes
+ * @hdr_ofst_metadata_valid:	0: Metadata_Ofst  value is invalid, i.e., no
+ *			metadata within header.
+ *			1: Metadata_Ofst  value is valid, i.e., metadata
+ *			within header is in offset Metadata_Ofst Valid
+ *			for Input Pipes only (IPA Consumer) (for output
+ *			pipes, metadata already set within the header)
+ * @hdr_ofst_metadata:	Offset within header in which metadata resides
+ *			Size of metadata - 4bytes
+ *			Example -  Stream ID/SSID/mux ID.
+ *			Valid for  Input Pipes only (IPA Consumer) (for output
+ *			pipes, metadata already set within the header)
+ * @hdr_additional_const_len:	Defines the constant length that should be added
+ *			to the payload length in order for IPA to update
+ *			correctly the length field within the header
+ *			(valid only in case Hdr_Ofst_Pkt_Size_Valid=1)
+ *			Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size_valid:	0: Hdr_Ofst_Pkt_Size  value is invalid, i.e., no
+ *			length field within the inserted header
+ *			1: Hdr_Ofst_Pkt_Size  value is valid, i.e., a
+ *			packet length field resides within the header
+ *			Valid for Output Pipes (IPA Producer)
+ * @hdr_ofst_pkt_size:	Offset within header in which packet size reside. Upon
+ *			Header Insertion, IPA will update this field within the
+ *			header with the packet length . Assumption is that
+ *			header length field size is constant and is 2Bytes
+ *			Valid for Output Pipes (IPA Producer)
+ * @hdr_a5_mux:	Determines whether A5 Mux header should be added to the packet.
+ *			This bit is valid only when Hdr_En=01(Header Insertion)
+ *			SW should set this bit for IPA-to-A5 pipes.
+ *			0: Do not insert A5 Mux Header
+ *			1: Insert A5 Mux Header
+ *			Valid for Output Pipes (IPA Producer)
+ * @hdr_remove_additional:	bool switch, remove more of the header
+ *			based on the aggregation configuration (register
+ *			HDR_LEN_INC_DEAGG_HDR)
+ * @hdr_metadata_reg_valid:	bool switch, metadata from
+ *			register INIT_HDR_METADATA_n is valid.
+ *			(relevant only for IPA Consumer pipes)
+ */
+struct ipa_ep_cfg_hdr {
+	u32  hdr_len;
+	u32  hdr_ofst_metadata_valid;
+	u32  hdr_ofst_metadata;
+	u32  hdr_additional_const_len;
+	u32  hdr_ofst_pkt_size_valid;
+	u32  hdr_ofst_pkt_size;
+	u32  hdr_a5_mux;
+	u32  hdr_remove_additional;
+	u32  hdr_metadata_reg_valid;
+};
+
+/**
+ * struct ipa_ep_cfg_hdr_ext - extended header configuration in IPA end-point
+ * @hdr_pad_to_alignment: Pad packet to specified alignment
+ *	(2^pad to alignment value), i.e. value of 3 means pad to 2^3 = 8 bytes
+ *	alignment. Alignment is to 0,2 up to 32 bytes (IPAv2 does not support 64
+ *	byte alignment). Valid for Output Pipes only (IPA Producer).
+ * @hdr_total_len_or_pad_offset: Offset to length field containing either
+ *	total length or pad length, per hdr_total_len_or_pad config
+ * @hdr_payload_len_inc_padding: 0-IPA_ENDP_INIT_HDR_n's
+ *	HDR_OFST_PKT_SIZE does
+ *	not includes padding bytes size, payload_len = packet length,
+ *	1-IPA_ENDP_INIT_HDR_n's HDR_OFST_PKT_SIZE includes
+ *	padding bytes size, payload_len = packet length + padding
+ * @hdr_total_len_or_pad: field is used as PAD length ot as Total length
+ *	(header + packet + padding)
+ * @hdr_total_len_or_pad_valid: 0-Ignore TOTAL_LEN_OR_PAD field, 1-Process
+ *	TOTAL_LEN_OR_PAD field
+ * @hdr_little_endian: 0-Big Endian, 1-Little Endian
+ */
+struct ipa_ep_cfg_hdr_ext {
+	u32 hdr_pad_to_alignment;
+	u32 hdr_total_len_or_pad_offset;
+	bool hdr_payload_len_inc_padding;
+	enum hdr_total_len_or_pad_type hdr_total_len_or_pad;
+	bool hdr_total_len_or_pad_valid;
+	bool hdr_little_endian;
+};
+
+/**
+ * struct ipa_ep_cfg_mode - mode configuration in IPA end-point
+ * @mode:	Valid for Input Pipes only (IPA Consumer)
+ * @dst:	This parameter specifies the output pipe to which the packets
+ *		will be routed to.
+ *		This parameter is valid for Mode=DMA and not valid for
+ *		Mode=Basic
+ *		Valid for Input Pipes only (IPA Consumer)
+ */
+struct ipa_ep_cfg_mode {
+	enum ipa_mode_type mode;
+	enum ipa_client_type dst;
+};
+
+/**
+ * struct ipa_ep_cfg_aggr - aggregation configuration in IPA end-point
+ *
+ * @aggr_en:	Valid for both Input and Output Pipes
+ * @aggr:	aggregation type (Valid for both Input and Output Pipes)
+ * @aggr_byte_limit:	Limit of aggregated packet size in KB (<=32KB) When set
+ *			to 0, there is no size limitation on the aggregation.
+ *			When both, Aggr_Byte_Limit and Aggr_Time_Limit are set
+ *			to 0, there is no aggregation, every packet is sent
+ *			independently according to the aggregation structure
+ *			Valid for Output Pipes only (IPA Producer )
+ * @aggr_time_limit:	Timer to close aggregated packet (<=32ms) When set to 0,
+ *			there is no time limitation on the aggregation.  When
+ *			both, Aggr_Byte_Limit and Aggr_Time_Limit are set to 0,
+ *			there is no aggregation, every packet is sent
+ *			independently according to the aggregation structure
+ *			Valid for Output Pipes only (IPA Producer)
+ * @aggr_pkt_limit: Defines if EOF close aggregation or not. if set to false
+ *			HW closes aggregation (sends EOT) only based on its
+ *			aggregation config (byte/time limit, etc). if set to
+ *			true EOF closes aggregation in addition to HW based
+ *			aggregation closure. Valid for Output Pipes only (IPA
+ *			Producer). EOF affects only Pipes configured for
+ *			generic aggregation.
+ * @aggr_hard_byte_limit_en: If set to 1, byte-limit aggregation for this
+ *			pipe will apply a hard-limit behavior which will not
+ *			allow frames to be closed with more than byte-limit
+ *			bytes. If set to 0, previous byte-limit behavior
+ *			will apply - frames close once a packet causes the
+ *			accumulated byte-count to cross the byte-limit
+ *			threshold (closed frame will contain that packet).
+ * @aggr_sw_eof_active: 0: EOF does not close aggregation. HW closes aggregation
+ *			(sends EOT) only based on its aggregation config
+ *			(byte/time limit, etc).
+ *			1: EOF closes aggregation in addition to HW based
+ *			aggregation closure. Valid for Output Pipes only (IPA
+ *			Producer). EOF affects only Pipes configured for generic
+ *			aggregation.
+ */
+struct ipa_ep_cfg_aggr {
+	enum ipa_aggr_en_type aggr_en;
+	enum ipa_aggr_type aggr;
+	u32 aggr_byte_limit;
+	u32 aggr_time_limit;
+	u32 aggr_pkt_limit;
+	u32 aggr_hard_byte_limit_en;
+	bool aggr_sw_eof_active;
+};
+
+/**
+ * struct ipa_ep_cfg_route - route configuration in IPA end-point
+ * @rt_tbl_hdl:	Defines the default routing table index to be used in case there
+ *		is no filter rule matching, valid for Input Pipes only (IPA
+ *		Consumer). Clients should set this to 0 which will cause default
+ *		v4 and v6 routes setup internally by IPA driver to be used for
+ *		this end-point
+ */
+struct ipa_ep_cfg_route {
+	u32 rt_tbl_hdl;
+};
+
+/**
+ * struct ipa_ep_cfg_holb - head of line blocking configuration in IPA end-point
+ * @en: enable(1 => ok to drop pkt)/disable(0 => never drop pkt)
+ * @tmr_val: duration in units of 128 IPA clk clock cyles [0,511], 1 clk=1.28us
+ *	     IPAv2.5 support 32 bit HOLB timeout value, previous versions
+ *	     supports 16 bit
+ */
+struct ipa_ep_cfg_holb {
+	u16 en;
+	u32 tmr_val;
+};
+
+/**
+ * struct ipa_ep_cfg_deaggr - deaggregation configuration in IPA end-point
+ * @deaggr_hdr_len: Deaggregation Header length in bytes. Valid only for Input
+ *	Pipes, which are configured for 'Generic' deaggregation.
+ * @packet_offset_valid: - 0: PACKET_OFFSET is not used, 1: PACKET_OFFSET is
+ *	used.
+ * @packet_offset_location: Location of packet offset field, which specifies
+ *	the offset to the packet from the start of the packet offset field.
+ * @max_packet_len: DEAGGR Max Packet Length in Bytes. A Packet with higher
+ *	size wil be treated as an error. 0 - Packet Length is not Bound,
+ *	IPA should not check for a Max Packet Length.
+ */
+struct ipa_ep_cfg_deaggr {
+	u32 deaggr_hdr_len;
+	bool packet_offset_valid;
+	u32 packet_offset_location;
+	u32 max_packet_len;
+};
+
+/**
+ * enum ipa_cs_offload - checksum offload setting
+ */
+enum ipa_cs_offload {
+	IPA_DISABLE_CS_OFFLOAD,
+	IPA_ENABLE_CS_OFFLOAD_UL,
+	IPA_ENABLE_CS_OFFLOAD_DL,
+	IPA_CS_RSVD
+};
+
+/**
+ * struct ipa_ep_cfg_cfg - IPA ENDP_INIT Configuration register
+ * @frag_offload_en: - 0 - IP packet fragment handling is disabled. IP packet
+ *	fragments should be sent to SW. SW is responsible for
+ *	configuring filter rules, and IP packet filter exception should be
+ *	used to send all fragments to SW. 1 - IP packet fragment
+ *	handling is enabled. IPA checks for fragments and uses frag
+ *	rules table for processing fragments. Valid only for Input Pipes
+ *	(IPA Consumer)
+ * @cs_offload_en: Checksum offload enable: 00: Disable checksum offload, 01:
+ *	Enable checksum calculation offload (UL) - For output pipe
+ *	(IPA producer) specifies that checksum trailer is to be added.
+ *	For input pipe (IPA consumer) specifies presence of checksum
+ *	header and IPA checksum calculation accordingly. 10: Enable
+ *	checksum calculation offload (DL) - For output pipe (IPA
+ *	producer) specifies that checksum trailer is to be added. For
+ *	input pipe (IPA consumer) specifies IPA checksum calculation.
+ *	11: Reserved
+ * @cs_metadata_hdr_offset: Offset in Words (4 bytes) within header in which
+ *	checksum meta info header (4 bytes) starts (UL). Values are 0-15, which
+ *	mean 0 - 60 byte checksum header offset. Valid for input
+ *	pipes only (IPA consumer)
+ * @gen_qmb_master_sel: Select bit for ENDP GEN-QMB master. This is used to
+ *	separate DDR & PCIe transactions in-order to limit them as
+ *	a group (using MAX_WRITES/READS limiation). Valid for input and
+ *	output pipes (IPA consumer+producer)
+ */
+struct ipa_ep_cfg_cfg {
+	bool frag_offload_en;
+	enum ipa_cs_offload cs_offload_en;
+	u8 cs_metadata_hdr_offset;
+	u8 gen_qmb_master_sel;
+};
+
+/**
+ * struct ipa_ep_cfg_metadata_mask - Endpoint initialization hdr metadata mask
+ * @metadata_mask: Mask specifying which metadata bits to write to
+ *	IPA_ENDP_INIT_HDR_n.s HDR_OFST_METADATA. Only
+ *	masked metadata bits (set to 1) will be written. Valid for Output
+ *	Pipes only (IPA Producer)
+ */
+struct ipa_ep_cfg_metadata_mask {
+	u32 metadata_mask;
+};
+
+/**
+ * struct ipa_ep_cfg_metadata - Meta Data configuration in IPA end-point
+ * @md:	This defines the meta data from tx data descriptor
+ * @qmap_id: qmap id
+ */
+struct ipa_ep_cfg_metadata {
+	u32 qmap_id;
+};
+
+/**
+ * struct ipa_ep_cfg_seq - HPS/DPS sequencer type configuration in IPA end-point
+ * @set_dynamic:  0 - HPS/DPS seq type is configured statically,
+ *		   1 - HPS/DPS seq type is set to seq_type
+ * @seq_type: HPS/DPS sequencer type configuration
+ */
+struct ipa_ep_cfg_seq {
+	bool set_dynamic;
+	int seq_type;
+};
+
+/**
+ * struct ipa_ep_cfg - configuration of IPA end-point
+ * @nat:		NAT parmeters
+ * @hdr:		Header parameters
+ * @hdr_ext:		Extended header parameters
+ * @mode:		Mode parameters
+ * @aggr:		Aggregation parameters
+ * @deaggr:		Deaggregation params
+ * @route:		Routing parameters
+ * @cfg:		Configuration register data
+ * @metadata_mask:	Hdr metadata mask
+ * @meta:		Meta Data
+ * @seq:		HPS/DPS sequencers configuration
+ */
+struct ipa_ep_cfg {
+	struct ipa_ep_cfg_nat nat;
+	struct ipa_ep_cfg_hdr hdr;
+	struct ipa_ep_cfg_hdr_ext hdr_ext;
+	struct ipa_ep_cfg_mode mode;
+	struct ipa_ep_cfg_aggr aggr;
+	struct ipa_ep_cfg_deaggr deaggr;
+	struct ipa_ep_cfg_route route;
+	struct ipa_ep_cfg_cfg cfg;
+	struct ipa_ep_cfg_metadata_mask metadata_mask;
+	struct ipa_ep_cfg_metadata meta;
+	struct ipa_ep_cfg_seq seq;
+};
+
+/**
+ * struct ipa_ep_cfg_ctrl - Control configuration in IPA end-point
+ * @ipa_ep_suspend: 0 - ENDP is enabled, 1 - ENDP is suspended (disabled).
+ *			Valid for PROD Endpoints
+ * @ipa_ep_delay:   0 - ENDP is free-running, 1 - ENDP is delayed.
+ *			SW controls the data flow of an endpoint usind this bit.
+ *			Valid for CONS Endpoints
+ */
+struct ipa_ep_cfg_ctrl {
+	bool ipa_ep_suspend;
+	bool ipa_ep_delay;
+};
+
+/**
+ * x should be in bytes
+ */
+#define IPA_NUM_OF_FIFO_DESC(x) (x/sizeof(struct sps_iovec))
+typedef void (*ipa_notify_cb)(void *priv, enum ipa_dp_evt_type evt,
+		       unsigned long data);
+
+/**
+ * struct ipa_connect_params - low-level client connect input parameters. Either
+ * client allocates the data and desc FIFO and specifies that in data+desc OR
+ * specifies sizes and pipe_mem pref and IPA does the allocation.
+ *
+ * @ipa_ep_cfg:	IPA EP configuration
+ * @client:	type of "client"
+ * @client_bam_hdl:	 client SPS handle
+ * @client_ep_idx:	 client PER EP index
+ * @priv:	callback cookie
+ * @notify:	callback
+ *		priv - callback cookie evt - type of event data - data relevant
+ *		to event.  May not be valid. See event_type enum for valid
+ *		cases.
+ * @desc_fifo_sz:	size of desc FIFO
+ * @data_fifo_sz:	size of data FIFO
+ * @pipe_mem_preferred:	if true, try to alloc the FIFOs in pipe mem, fallback
+ *			to sys mem if pipe mem alloc fails
+ * @desc:	desc FIFO meta-data when client has allocated it
+ * @data:	data FIFO meta-data when client has allocated it
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ *  by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ */
+struct ipa_connect_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	unsigned long client_bam_hdl;
+	u32 client_ep_idx;
+	void *priv;
+	ipa_notify_cb notify;
+	u32 desc_fifo_sz;
+	u32 data_fifo_sz;
+	bool pipe_mem_preferred;
+	struct sps_mem_buffer desc;
+	struct sps_mem_buffer data;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+};
+
+/**
+ *  struct ipa_sps_params - SPS related output parameters resulting from
+ *  low/high level client connect
+ *  @ipa_bam_hdl:	IPA SPS handle
+ *  @ipa_ep_idx:	IPA PER EP index
+ *  @desc:	desc FIFO meta-data
+ *  @data:	data FIFO meta-data
+ */
+struct ipa_sps_params {
+	unsigned long ipa_bam_hdl;
+	u32 ipa_ep_idx;
+	struct sps_mem_buffer desc;
+	struct sps_mem_buffer data;
+};
+
+/**
+ * struct ipa_tx_intf - interface tx properties
+ * @num_props:	number of tx properties
+ * @prop:	the tx properties array
+ */
+struct ipa_tx_intf {
+	u32 num_props;
+	struct ipa_ioc_tx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_rx_intf - interface rx properties
+ * @num_props:	number of rx properties
+ * @prop:	the rx properties array
+ */
+struct ipa_rx_intf {
+	u32 num_props;
+	struct ipa_ioc_rx_intf_prop *prop;
+};
+
+/**
+ * struct ipa_ext_intf - interface ext properties
+ * @excp_pipe_valid:	is next field valid?
+ * @excp_pipe:	exception packets should be routed to this pipe
+ * @num_props:	number of ext properties
+ * @prop:	the ext properties array
+ */
+struct ipa_ext_intf {
+	bool excp_pipe_valid;
+	enum ipa_client_type excp_pipe;
+	u32 num_props;
+	struct ipa_ioc_ext_intf_prop *prop;
+};
+
+/**
+ * struct ipa_sys_connect_params - information needed to setup an IPA end-point
+ * in system-BAM mode
+ * @ipa_ep_cfg:	IPA EP configuration
+ * @client:	the type of client who "owns" the EP
+ * @desc_fifo_sz: size of desc FIFO. This number is used to allocate the desc
+ *		fifo for BAM. For GSI, this size is used by IPA driver as a
+ *		baseline to calculate the GSI ring size in the following way:
+ *		For PROD pipes, GSI ring is 4 * desc_fifo_sz.
+		For PROD pipes, GSI ring is 2 * desc_fifo_sz.
+ * @priv:	callback cookie
+ * @notify:	callback
+ *		priv - callback cookie
+ *		evt - type of event
+ *		data - data relevant to event.  May not be valid. See event_type
+ *		enum for valid cases.
+ * @skip_ep_cfg: boolean field that determines if EP should be configured
+ *  by IPA driver
+ * @keep_ipa_awake: when true, IPA will not be clock gated
+ * @napi_enabled: when true, IPA call client callback to start polling
+ */
+struct ipa_sys_connect_params {
+	struct ipa_ep_cfg ipa_ep_cfg;
+	enum ipa_client_type client;
+	u32 desc_fifo_sz;
+	void *priv;
+	ipa_notify_cb notify;
+	bool skip_ep_cfg;
+	bool keep_ipa_awake;
+	bool napi_enabled;
+	bool recycle_enabled;
+};
+
+/**
+ * struct ipa_tx_meta - meta-data for the TX packet
+ * @dma_address: dma mapped address of TX packet
+ * @dma_address_valid: is above field valid?
+ */
+struct ipa_tx_meta {
+	u8 pkt_init_dst_ep;
+	bool pkt_init_dst_ep_valid;
+	bool pkt_init_dst_ep_remote;
+	dma_addr_t dma_address;
+	bool dma_address_valid;
+};
+
+/**
+ * typedef ipa_msg_free_fn - callback function
+ * @param buff - [in] the message payload to free
+ * @param len - [in] size of message payload
+ * @param type - [in] the message type
+ *
+ * Message callback registered by kernel client with IPA driver to
+ * free message payload after IPA driver processing is complete
+ *
+ * No return value
+ */
+typedef void (*ipa_msg_free_fn)(void *buff, u32 len, u32 type);
+
+/**
+ * typedef ipa_msg_pull_fn - callback function
+ * @param buff - [in] where to copy message payload
+ * @param len - [in] size of buffer to copy payload into
+ * @param type - [in] the message type
+ *
+ * Message callback registered by kernel client with IPA driver for
+ * IPA driver to pull messages from the kernel client upon demand from
+ * user-space
+ *
+ * Returns how many bytes were copied into the buffer.
+ */
+typedef int (*ipa_msg_pull_fn)(void *buff, u32 len, u32 type);
+
+/**
+ * enum ipa_voltage_level - IPA Voltage levels
+ */
+enum ipa_voltage_level {
+	IPA_VOLTAGE_UNSPECIFIED,
+	IPA_VOLTAGE_SVS = IPA_VOLTAGE_UNSPECIFIED,
+	IPA_VOLTAGE_NOMINAL,
+	IPA_VOLTAGE_TURBO,
+	IPA_VOLTAGE_MAX,
+};
+
+/**
+ * enum ipa_rm_event - IPA RM events
+ *
+ * Indicate the resource state change
+ */
+enum ipa_rm_event {
+	IPA_RM_RESOURCE_GRANTED,
+	IPA_RM_RESOURCE_RELEASED
+};
+
+typedef void (*ipa_rm_notify_cb)(void *user_data,
+		enum ipa_rm_event event,
+		unsigned long data);
+/**
+ * struct ipa_rm_register_params - information needed to
+ *      register IPA RM client with IPA RM
+ *
+ * @user_data: IPA RM client provided information
+ *		to be passed to notify_cb callback below
+ * @notify_cb: callback which is called by resource
+ *		to notify the IPA RM client about its state
+ *		change IPA RM client is expected to perform non
+ *		blocking operations only in notify_cb and
+ *		release notification context as soon as
+ *		possible.
+ */
+struct ipa_rm_register_params {
+	void *user_data;
+	ipa_rm_notify_cb notify_cb;
+};
+
+/**
+ * struct ipa_rm_create_params - information needed to initialize
+ *				the resource
+ * @name: resource name
+ * @floor_voltage: floor voltage needed for client to operate in maximum
+ *		bandwidth.
+ * @reg_params: register parameters, contains are ignored
+ *		for consumer resource NULL should be provided
+ *		for consumer resource
+ * @request_resource: function which should be called to request resource,
+ *			NULL should be provided for producer resource
+ * @release_resource: function which should be called to release resource,
+ *			NULL should be provided for producer resource
+ *
+ * IPA RM client is expected to perform non blocking operations only
+ * in request_resource and release_resource functions and
+ * release notification context as soon as possible.
+ */
+struct ipa_rm_create_params {
+	enum ipa_rm_resource_name name;
+	enum ipa_voltage_level floor_voltage;
+	struct ipa_rm_register_params reg_params;
+	int (*request_resource)(void);
+	int (*release_resource)(void);
+};
+
+/**
+ * struct ipa_rm_perf_profile - information regarding IPA RM client performance
+ * profile
+ *
+ * @max_bandwidth_mbps: maximum bandwidth need of the client in Mbps
+ */
+struct ipa_rm_perf_profile {
+	u32 max_supported_bandwidth_mbps;
+};
+
+#define A2_MUX_HDR_NAME_V4_PREF "dmux_hdr_v4_"
+#define A2_MUX_HDR_NAME_V6_PREF "dmux_hdr_v6_"
+
+/**
+ * enum teth_tethering_mode - Tethering mode (Rmnet / MBIM)
+ */
+enum teth_tethering_mode {
+	TETH_TETHERING_MODE_RMNET,
+	TETH_TETHERING_MODE_MBIM,
+	TETH_TETHERING_MODE_MAX,
+};
+
+/**
+ * teth_bridge_init_params - Parameters used for in/out USB API
+ * @usb_notify_cb:	Callback function which should be used by the caller.
+ * Output parameter.
+ * @private_data:	Data for the callback function. Should be used by the
+ * caller. Output parameter.
+ * @skip_ep_cfg: boolean field that determines if Apps-processor
+ *  should or should not confiugre this end-point.
+ */
+struct teth_bridge_init_params {
+	ipa_notify_cb usb_notify_cb;
+	void *private_data;
+	enum ipa_client_type client;
+	bool skip_ep_cfg;
+};
+
+/**
+ * struct teth_bridge_connect_params - Parameters used in teth_bridge_connect()
+ * @ipa_usb_pipe_hdl:	IPA to USB pipe handle, returned from ipa_connect()
+ * @usb_ipa_pipe_hdl:	USB to IPA pipe handle, returned from ipa_connect()
+ * @tethering_mode:	Rmnet or MBIM
+ * @ipa_client_type:    IPA "client" name (IPA_CLIENT_USB#_PROD)
+ */
+struct teth_bridge_connect_params {
+	u32 ipa_usb_pipe_hdl;
+	u32 usb_ipa_pipe_hdl;
+	enum teth_tethering_mode tethering_mode;
+	enum ipa_client_type client_type;
+};
+
+/**
+ * struct  ipa_tx_data_desc - information needed
+ * to send data packet to HW link: link to data descriptors
+ * priv: client specific private data
+ * @pyld_buffer: pointer to the data buffer that holds frame
+ * @pyld_len: length of the data packet
+ */
+struct ipa_tx_data_desc {
+	struct list_head link;
+	void *priv;
+	void *pyld_buffer;
+	u16  pyld_len;
+};
+
+/**
+ * struct  ipa_rx_data - information needed
+ * to send to wlan driver on receiving data from ipa hw
+ * @skb: skb
+ * @dma_addr: DMA address of this Rx packet
+ */
+struct ipa_rx_data {
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+};
+
+/**
+ * enum ipa_irq_type - IPA Interrupt Type
+ * Used to register handlers for IPA interrupts
+ *
+ * Below enum is a logical mapping and not the actual interrupt bit in HW
+ */
+enum ipa_irq_type {
+	IPA_BAD_SNOC_ACCESS_IRQ,
+	IPA_EOT_COAL_IRQ,
+	IPA_UC_IRQ_0,
+	IPA_UC_IRQ_1,
+	IPA_UC_IRQ_2,
+	IPA_UC_IRQ_3,
+	IPA_UC_IN_Q_NOT_EMPTY_IRQ,
+	IPA_UC_RX_CMD_Q_NOT_FULL_IRQ,
+	IPA_UC_TX_CMD_Q_NOT_FULL_IRQ,
+	IPA_UC_TO_PROC_ACK_Q_NOT_FULL_IRQ,
+	IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ,
+	IPA_RX_ERR_IRQ,
+	IPA_DEAGGR_ERR_IRQ,
+	IPA_TX_ERR_IRQ,
+	IPA_STEP_MODE_IRQ,
+	IPA_PROC_ERR_IRQ,
+	IPA_TX_SUSPEND_IRQ,
+	IPA_TX_HOLB_DROP_IRQ,
+	IPA_BAM_IDLE_IRQ,
+	IPA_BAM_GSI_IDLE_IRQ = IPA_BAM_IDLE_IRQ,
+	IPA_IRQ_MAX
+};
+
+/**
+ * struct ipa_tx_suspend_irq_data - interrupt data for IPA_TX_SUSPEND_IRQ
+ * @endpoints: bitmask of endpoints which case IPA_TX_SUSPEND_IRQ interrupt
+ * @dma_addr: DMA address of this Rx packet
+ */
+struct ipa_tx_suspend_irq_data {
+	u32 endpoints;
+};
+
+
+/**
+ * typedef ipa_irq_handler_t - irq handler/callback type
+ * @param ipa_irq_type - [in] interrupt type
+ * @param private_data - [in, out] the client private data
+ * @param interrupt_data - [out] interrupt information data
+ *
+ * callback registered by ipa_add_interrupt_handler function to
+ * handle a specific interrupt type
+ *
+ * No return value
+ */
+typedef void (*ipa_irq_handler_t)(enum ipa_irq_type interrupt,
+				void *private_data,
+				void *interrupt_data);
+
+/**
+ * struct IpaHwBamStats_t - Strucuture holding the BAM statistics
+ *
+ * @bamFifoFull : Number of times Bam Fifo got full - For In Ch: Good,
+ * For Out Ch: Bad
+ * @bamFifoEmpty : Number of times Bam Fifo got empty - For In Ch: Bad,
+ * For Out Ch: Good
+ * @bamFifoUsageHigh : Number of times Bam fifo usage went above 75% -
+ * For In Ch: Good, For Out Ch: Bad
+ * @bamFifoUsageLow : Number of times Bam fifo usage went below 25% -
+ * For In Ch: Bad, For Out Ch: Good
+*/
+struct IpaHwBamStats_t {
+	u32 bamFifoFull;
+	u32 bamFifoEmpty;
+	u32 bamFifoUsageHigh;
+	u32 bamFifoUsageLow;
+	u32 bamUtilCount;
+} __packed;
+
+/**
+ * struct IpaHwRingStats_t - Strucuture holding the Ring statistics
+ *
+ * @ringFull : Number of times Transfer Ring got full - For In Ch: Good,
+ * For Out Ch: Bad
+ * @ringEmpty : Number of times Transfer Ring got empty - For In Ch: Bad,
+ * For Out Ch: Good
+ * @ringUsageHigh : Number of times Transfer Ring usage went above 75% -
+ * For In Ch: Good, For Out Ch: Bad
+ * @ringUsageLow : Number of times Transfer Ring usage went below 25% -
+ * For In Ch: Bad, For Out Ch: Good
+*/
+struct IpaHwRingStats_t {
+	u32 ringFull;
+	u32 ringEmpty;
+	u32 ringUsageHigh;
+	u32 ringUsageLow;
+	u32 RingUtilCount;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDIRxInfoData_t - Structure holding the WDI Rx channel
+ * structures
+ *
+ * @max_outstanding_pkts : Number of outstanding packets in Rx Ring
+ * @num_pkts_processed : Number of packets processed - cumulative
+ * @rx_ring_rp_value : Read pointer last advertized to the WLAN FW
+ * @rx_ind_ring_stats : Ring info
+ * @bam_stats : BAM info
+ * @num_bam_int_handled : Number of Bam Interrupts handled by FW
+ * @num_db : Number of times the doorbell was rung
+ * @num_unexpected_db : Number of unexpected doorbells
+ * @num_pkts_in_dis_uninit_state : number of completions we
+ *		received in disabled or uninitialized state
+ * @num_ic_inj_vdev_change : Number of times the Imm Cmd is
+ *		injected due to vdev_id change
+ * @num_ic_inj_fw_desc_change : Number of times the Imm Cmd is
+ *		injected due to fw_desc change
+*/
+struct IpaHwStatsWDIRxInfoData_t {
+	u32 max_outstanding_pkts;
+	u32 num_pkts_processed;
+	u32 rx_ring_rp_value;
+	struct IpaHwRingStats_t rx_ind_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32 num_bam_int_handled;
+	u32 num_db;
+	u32 num_unexpected_db;
+	u32 num_pkts_in_dis_uninit_state;
+	u32 num_ic_inj_vdev_change;
+	u32 num_ic_inj_fw_desc_change;
+	u32 reserved1;
+	u32 reserved2;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDITxInfoData_t  - Structure holding the WDI Tx channel
+ * structures
+ *
+ * @num_pkts_processed : Number of packets processed - cumulative
+ * @copy_engine_doorbell_value : latest value of doorbell written to copy engine
+ * @num_db_fired : Number of DB from uC FW to Copy engine
+ * @tx_comp_ring_stats : ring info
+ * @bam_stats : BAM info
+ * @num_db : Number of times the doorbell was rung
+ * @num_unexpected_db : Number of unexpected doorbells
+ * @num_bam_int_handled : Number of Bam Interrupts handled by FW
+ * @num_bam_int_in_non_running_state : Number of Bam interrupts while not in
+ * Running state
+ * @num_qmb_int_handled : Number of QMB interrupts handled
+*/
+struct IpaHwStatsWDITxInfoData_t {
+	u32 num_pkts_processed;
+	u32 copy_engine_doorbell_value;
+	u32 num_db_fired;
+	struct IpaHwRingStats_t tx_comp_ring_stats;
+	struct IpaHwBamStats_t bam_stats;
+	u32 num_db;
+	u32 num_unexpected_db;
+	u32 num_bam_int_handled;
+	u32 num_bam_int_in_non_running_state;
+	u32 num_qmb_int_handled;
+	u32 num_bam_int_handled_while_wait_for_bam;
+} __packed;
+
+/**
+ * struct IpaHwStatsWDIInfoData_t - Structure holding the WDI channel structures
+ *
+ * @rx_ch_stats : RX stats
+ * @tx_ch_stats : TX stats
+*/
+struct IpaHwStatsWDIInfoData_t {
+	struct IpaHwStatsWDIRxInfoData_t rx_ch_stats;
+	struct IpaHwStatsWDITxInfoData_t tx_ch_stats;
+} __packed;
+
+
+/**
+ * struct  ipa_wdi_ul_params - WDI_RX configuration
+ * @rdy_ring_base_pa: physical address of the base of the Rx ring (containing
+ * Rx buffers)
+ * @rdy_ring_size: size of the Rx ring in bytes
+ * @rdy_ring_rp_pa: physical address of the location through which IPA uc is
+ * reading (WDI-1.0)
+ * @rdy_comp_ring_base_pa: physical address of the base of the Rx completion
+ * ring (WDI-2.0)
+ * @rdy_comp_ring_wp_pa: physical address of the location through which IPA
+ * uc is writing (WDI-2.0)
+ * @rdy_comp_ring_size: size of the Rx_completion ring in bytes
+ * expected to communicate about the Read pointer into the Rx Ring
+ */
+struct ipa_wdi_ul_params {
+	phys_addr_t rdy_ring_base_pa;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_ring_rp_pa;
+	phys_addr_t rdy_comp_ring_base_pa;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
+};
+
+/**
+ * struct  ipa_wdi_ul_params_smmu - WDI_RX configuration (with WLAN SMMU)
+ * @rdy_ring: SG table describing the Rx ring (containing Rx buffers)
+ * @rdy_ring_size: size of the Rx ring in bytes
+ * @rdy_ring_rp_pa: physical address of the location through which IPA uc is
+ * expected to communicate about the Read pointer into the Rx Ring
+ */
+struct ipa_wdi_ul_params_smmu {
+	struct sg_table rdy_ring;
+	u32 rdy_ring_size;
+	phys_addr_t rdy_ring_rp_pa;
+	struct sg_table rdy_comp_ring;
+	phys_addr_t rdy_comp_ring_wp_pa;
+	u32 rdy_comp_ring_size;
+	u32 *rdy_ring_rp_va;
+	u32 *rdy_comp_ring_wp_va;
+};
+
+/**
+ * struct  ipa_wdi_dl_params - WDI_TX configuration
+ * @comp_ring_base_pa: physical address of the base of the Tx completion ring
+ * @comp_ring_size: size of the Tx completion ring in bytes
+ * @ce_ring_base_pa: physical address of the base of the Copy Engine Source
+ * Ring
+ * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to
+ * write into to trigger the copy engine
+ * @ce_ring_size: Copy Engine Ring size in bytes
+ * @num_tx_buffers: Number of pkt buffers allocated
+ */
+struct ipa_wdi_dl_params {
+	phys_addr_t comp_ring_base_pa;
+	u32 comp_ring_size;
+	phys_addr_t ce_ring_base_pa;
+	phys_addr_t ce_door_bell_pa;
+	u32 ce_ring_size;
+	u32 num_tx_buffers;
+};
+
+/**
+ * struct  ipa_wdi_dl_params_smmu - WDI_TX configuration (with WLAN SMMU)
+ * @comp_ring: SG table describing the Tx completion ring
+ * @comp_ring_size: size of the Tx completion ring in bytes
+ * @ce_ring: SG table describing the Copy Engine Source Ring
+ * @ce_door_bell_pa: physical address of the doorbell that the IPA uC has to
+ * write into to trigger the copy engine
+ * @ce_ring_size: Copy Engine Ring size in bytes
+ * @num_tx_buffers: Number of pkt buffers allocated
+ */
+struct ipa_wdi_dl_params_smmu {
+	struct sg_table comp_ring;
+	u32 comp_ring_size;
+	struct sg_table ce_ring;
+	phys_addr_t ce_door_bell_pa;
+	u32 ce_ring_size;
+	u32 num_tx_buffers;
+};
+
+/**
+ * struct  ipa_wdi_in_params - information provided by WDI client
+ * @sys: IPA EP configuration info
+ * @ul: WDI_RX configuration info
+ * @dl: WDI_TX configuration info
+ * @ul_smmu: WDI_RX configuration info when WLAN uses SMMU
+ * @dl_smmu: WDI_TX configuration info when WLAN uses SMMU
+ * @smmu_enabled: true if WLAN uses SMMU
+ */
+struct ipa_wdi_in_params {
+	struct ipa_sys_connect_params sys;
+	union {
+		struct ipa_wdi_ul_params ul;
+		struct ipa_wdi_dl_params dl;
+		struct ipa_wdi_ul_params_smmu ul_smmu;
+		struct ipa_wdi_dl_params_smmu dl_smmu;
+	} u;
+	bool smmu_enabled;
+};
+
+/**
+ * struct  ipa_wdi_out_params - information provided to WDI client
+ * @uc_door_bell_pa: physical address of IPA uc doorbell
+ * @clnt_hdl: opaque handle assigned to client
+ */
+struct ipa_wdi_out_params {
+	phys_addr_t uc_door_bell_pa;
+	u32 clnt_hdl;
+};
+
+/**
+ * struct ipa_wdi_db_params - information provided to retrieve
+ *       physical address of uC doorbell
+ * @client:	type of "client" (IPA_CLIENT_WLAN#_PROD/CONS)
+ * @uc_door_bell_pa: physical address of IPA uc doorbell
+ */
+struct ipa_wdi_db_params {
+	enum ipa_client_type client;
+	phys_addr_t uc_door_bell_pa;
+};
+
+/**
+ * struct  ipa_wdi_uc_ready_params - uC ready CB parameters
+ * @is_uC_ready: uC loaded or not
+ * @priv : callback cookie
+ * @notify:	callback
+ */
+typedef void (*ipa_uc_ready_cb)(void *priv);
+struct ipa_wdi_uc_ready_params {
+	bool is_uC_ready;
+	void *priv;
+	ipa_uc_ready_cb notify;
+};
+
+/**
+ * struct  ipa_wdi_buffer_info - address info of a WLAN allocated buffer
+ * @pa: physical address of the buffer
+ * @iova: IOVA of the buffer as embedded inside the WDI descriptors
+ * @size: size in bytes of the buffer
+ * @result: result of map or unmap operations (out param)
+ *
+ * IPA driver will create/release IOMMU mapping in IPA SMMU from iova->pa
+ */
+struct ipa_wdi_buffer_info {
+	phys_addr_t pa;
+	unsigned long iova;
+	size_t size;
+	int result;
+};
+
+/**
+ * struct ipa_gsi_ep_config - IPA GSI endpoint configurations
+ *
+ * @ipa_ep_num: IPA EP pipe number
+ * @ipa_gsi_chan_num: GSI channel number
+ * @ipa_if_tlv: number of IPA_IF TLV
+ * @ipa_if_aos: number of IPA_IF AOS
+ * @ee: Execution environment
+ */
+struct ipa_gsi_ep_config {
+	int ipa_ep_num;
+	int ipa_gsi_chan_num;
+	int ipa_if_tlv;
+	int ipa_if_aos;
+	int ee;
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+/*
+ * Connect / Disconnect
+ */
+int ipa_connect(const struct ipa_connect_params *in, struct ipa_sps_params *sps,
+		u32 *clnt_hdl);
+int ipa_disconnect(u32 clnt_hdl);
+
+/*
+ * Resume / Suspend
+ */
+int ipa_reset_endpoint(u32 clnt_hdl);
+
+/*
+ * Remove ep delay
+ */
+int ipa_clear_endpoint_delay(u32 clnt_hdl);
+
+/*
+ * Disable ep
+ */
+int ipa_disable_endpoint(u32 clnt_hdl);
+
+/*
+ * Configuration
+ */
+int ipa_cfg_ep(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_nat(u32 clnt_hdl, const struct ipa_ep_cfg_nat *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr(u32 clnt_hdl, const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
+
+int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+			const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
+
+int ipa_cfg_ep_mode(u32 clnt_hdl, const struct ipa_ep_cfg_mode *ipa_ep_cfg);
+
+int ipa_cfg_ep_aggr(u32 clnt_hdl, const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_deaggr(u32 clnt_hdl,
+		      const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
+
+int ipa_cfg_ep_route(u32 clnt_hdl, const struct ipa_ep_cfg_route *ipa_ep_cfg);
+
+int ipa_cfg_ep_holb(u32 clnt_hdl, const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa_cfg_ep_cfg(u32 clnt_hdl, const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
+
+int ipa_cfg_ep_metadata_mask(u32 clnt_hdl, const struct ipa_ep_cfg_metadata_mask
+		*ipa_ep_cfg);
+
+int ipa_cfg_ep_holb_by_client(enum ipa_client_type client,
+				const struct ipa_ep_cfg_holb *ipa_ep_cfg);
+
+int ipa_cfg_ep_ctrl(u32 clnt_hdl, const struct ipa_ep_cfg_ctrl *ep_ctrl);
+
+/*
+ * Header removal / addition
+ */
+int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs);
+
+int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls);
+
+int ipa_commit_hdr(void);
+
+int ipa_reset_hdr(void);
+
+int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup);
+
+int ipa_put_hdr(u32 hdr_hdl);
+
+int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy);
+
+/*
+ * Header Processing Context
+ */
+int ipa_add_hdr_proc_ctx(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs);
+
+int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls);
+
+/*
+ * Routing
+ */
+int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+
+int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
+
+int ipa_commit_rt(enum ipa_ip_type ip);
+
+int ipa_reset_rt(enum ipa_ip_type ip);
+
+int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup);
+
+int ipa_put_rt_tbl(u32 rt_tbl_hdl);
+
+int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in);
+
+int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules);
+
+/*
+ * Filtering
+ */
+int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules);
+
+int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls);
+
+int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules);
+
+int ipa_commit_flt(enum ipa_ip_type ip);
+
+int ipa_reset_flt(enum ipa_ip_type ip);
+
+/*
+ * NAT
+ */
+int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem);
+
+int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init);
+
+int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma);
+
+int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del);
+
+/*
+ * Messaging
+ */
+int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
+		  ipa_msg_free_fn callback);
+int ipa_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback);
+int ipa_deregister_pull_msg(struct ipa_msg_meta *meta);
+
+/*
+ * Interface
+ */
+int ipa_register_intf(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx);
+int ipa_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
+		       const struct ipa_rx_intf *rx,
+		       const struct ipa_ext_intf *ext);
+int ipa_deregister_intf(const char *name);
+
+/*
+ * Aggregation
+ */
+int ipa_set_aggr_mode(enum ipa_aggr_mode mode);
+
+int ipa_set_qcncm_ndp_sig(char sig[3]);
+
+int ipa_set_single_ndp_per_mbim(bool enable);
+
+/*
+ * Data path
+ */
+int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata);
+
+/*
+ * To transfer multiple data packets
+ * While passing the data descriptor list, the anchor node
+ * should be of type struct ipa_tx_data_desc not list_head
+*/
+int ipa_tx_dp_mul(enum ipa_client_type dst,
+			struct ipa_tx_data_desc *data_desc);
+
+void ipa_free_skb(struct ipa_rx_data *);
+int ipa_rx_poll(u32 clnt_hdl, int budget);
+void ipa_recycle_wan_skb(struct sk_buff *skb);
+
+/*
+ * System pipes
+ */
+int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl);
+
+int ipa_teardown_sys_pipe(u32 clnt_hdl);
+
+int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out);
+int ipa_disconnect_wdi_pipe(u32 clnt_hdl);
+int ipa_enable_wdi_pipe(u32 clnt_hdl);
+int ipa_disable_wdi_pipe(u32 clnt_hdl);
+int ipa_resume_wdi_pipe(u32 clnt_hdl);
+int ipa_suspend_wdi_pipe(u32 clnt_hdl);
+int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
+u16 ipa_get_smem_restr_bytes(void);
+/*
+ * To retrieve doorbell physical address of
+ * wlan pipes
+ */
+int ipa_uc_wdi_get_dbpa(struct ipa_wdi_db_params *out);
+
+/*
+ * To register uC ready callback if uC not ready
+ * and also check uC readiness
+ * if uC not ready only, register callback
+ */
+int ipa_uc_reg_rdyCB(struct ipa_wdi_uc_ready_params *param);
+/*
+ * To de-register uC ready callback
+ */
+int ipa_uc_dereg_rdyCB(void);
+
+int ipa_create_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+int ipa_release_wdi_mapping(u32 num_buffers, struct ipa_wdi_buffer_info *info);
+
+/*
+ * Resource manager
+ */
+int ipa_rm_create_resource(struct ipa_rm_create_params *create_params);
+
+int ipa_rm_delete_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params);
+
+int ipa_rm_set_perf_profile(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_perf_profile *profile);
+
+int ipa_rm_add_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_add_dependency_sync(enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_delete_dependency(enum ipa_rm_resource_name resource_name,
+			enum ipa_rm_resource_name depends_on_name);
+
+int ipa_rm_request_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_release_resource(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_notify_completion(enum ipa_rm_event event,
+		enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
+				 unsigned long msecs);
+
+int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_request_resource(
+				enum ipa_rm_resource_name resource_name);
+
+int ipa_rm_inactivity_timer_release_resource(
+				enum ipa_rm_resource_name resource_name);
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+int teth_bridge_init(struct teth_bridge_init_params *params);
+
+int teth_bridge_disconnect(enum ipa_client_type client);
+
+int teth_bridge_connect(struct teth_bridge_connect_params *connect_params);
+
+/*
+ * Tethering client info
+ */
+void ipa_set_client(int index, enum ipacm_client_enum client, bool uplink);
+
+enum ipacm_client_enum ipa_get_client(int pipe_idx);
+
+bool ipa_get_client_uplink(int pipe_idx);
+
+/*
+ * IPADMA
+ */
+int ipa_dma_init(void);
+
+int ipa_dma_enable(void);
+
+int ipa_dma_disable(void);
+
+int ipa_dma_sync_memcpy(u64 dest, u64 src, int len);
+
+int ipa_dma_async_memcpy(u64 dest, u64 src, int len,
+			void (*user_cb)(void *user1), void *user_param);
+
+int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len);
+
+void ipa_dma_destroy(void);
+
+/*
+ * mux id
+ */
+int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in);
+
+/*
+ * interrupts
+ */
+int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data);
+
+int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt);
+
+int ipa_restore_suspend_handler(void);
+
+/*
+ * Miscellaneous
+ */
+void ipa_bam_reg_dump(void);
+
+int ipa_get_ep_mapping(enum ipa_client_type client);
+
+bool ipa_is_ready(void);
+
+void ipa_proxy_clk_vote(void);
+void ipa_proxy_clk_unvote(void);
+
+enum ipa_hw_type ipa_get_hw_type(void);
+
+bool ipa_is_client_handle_valid(u32 clnt_hdl);
+
+enum ipa_client_type ipa_get_client_mapping(int pipe_idx);
+
+enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(int pipe_idx);
+
+bool ipa_get_modem_cfg_emb_pipe_flt(void);
+
+enum ipa_transport_type ipa_get_transport_type(void);
+
+struct device *ipa_get_dma_dev(void);
+struct iommu_domain *ipa_get_smmu_domain(void);
+
+int ipa_disable_apps_wan_cons_deaggr(uint32_t agg_size, uint32_t agg_count);
+
+struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx);
+
+int ipa_stop_gsi_channel(u32 clnt_hdl);
+
+typedef void (*ipa_ready_cb)(void *user_data);
+
+/**
+* ipa_register_ipa_ready_cb() - register a callback to be invoked
+* when IPA core driver initialization is complete.
+*
+* @ipa_ready_cb:    CB to be triggered.
+* @user_data:       Data to be sent to the originator of the CB.
+*
+* Note: This function is expected to be utilized when ipa_is_ready
+* function returns false.
+* An IPA client may also use this function directly rather than
+* calling ipa_is_ready beforehand, as if this API returns -EEXIST,
+* this means IPA initialization is complete (and no callback will
+* be triggered).
+* When the callback is triggered, the client MUST perform his
+* operations in a different context.
+*
+* The function will return 0 on success, -ENOMEM on memory issues and
+* -EEXIST if IPA initialization is complete already.
+*/
+int ipa_register_ipa_ready_cb(void (*ipa_ready_cb)(void *user_data),
+			      void *user_data);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+/*
+ * Connect / Disconnect
+ */
+static inline int ipa_connect(const struct ipa_connect_params *in,
+		struct ipa_sps_params *sps,	u32 *clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_disconnect(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+/*
+ * Resume / Suspend
+ */
+static inline int ipa_reset_endpoint(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+/*
+ * Remove ep delay
+ */
+static inline int ipa_clear_endpoint_delay(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+/*
+ * Disable ep
+ */
+static inline int ipa_disable_endpoint(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+/*
+ * Configuration
+ */
+static inline int ipa_cfg_ep(u32 clnt_hdl,
+		const struct ipa_ep_cfg *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_nat(u32 clnt_hdl,
+		const struct ipa_ep_cfg_nat *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_hdr(u32 clnt_hdl,
+		const struct ipa_ep_cfg_hdr *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_hdr_ext(u32 clnt_hdl,
+		const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_mode(u32 clnt_hdl,
+		const struct ipa_ep_cfg_mode *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_aggr(u32 clnt_hdl,
+		const struct ipa_ep_cfg_aggr *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_deaggr(u32 clnt_hdl,
+		const struct ipa_ep_cfg_deaggr *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_route(u32 clnt_hdl,
+		const struct ipa_ep_cfg_route *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_holb(u32 clnt_hdl,
+		const struct ipa_ep_cfg_holb *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_cfg(u32 clnt_hdl,
+		const struct ipa_ep_cfg_cfg *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_metadata_mask(u32 clnt_hdl,
+		const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg)
+{
+	return -EPERM;
+}
+
+static inline int ipa_cfg_ep_ctrl(u32 clnt_hdl,
+			const struct ipa_ep_cfg_ctrl *ep_ctrl)
+{
+	return -EPERM;
+}
+
+/*
+ * Header removal / addition
+ */
+static inline int ipa_add_hdr(struct ipa_ioc_add_hdr *hdrs)
+{
+	return -EPERM;
+}
+
+static inline int ipa_del_hdr(struct ipa_ioc_del_hdr *hdls)
+{
+	return -EPERM;
+}
+
+static inline int ipa_commit_hdr(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_reset_hdr(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_get_hdr(struct ipa_ioc_get_hdr *lookup)
+{
+	return -EPERM;
+}
+
+static inline int ipa_put_hdr(u32 hdr_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_copy_hdr(struct ipa_ioc_copy_hdr *copy)
+{
+	return -EPERM;
+}
+
+/*
+ * Header Processing Context
+ */
+static inline int ipa_add_hdr_proc_ctx(
+				struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs)
+{
+	return -EPERM;
+}
+
+static inline int ipa_del_hdr_proc_ctx(struct ipa_ioc_del_hdr_proc_ctx *hdls)
+{
+	return -EPERM;
+}
+/*
+ * Routing
+ */
+static inline int ipa_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
+{
+	return -EPERM;
+}
+
+static inline int ipa_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls)
+{
+	return -EPERM;
+}
+
+static inline int ipa_commit_rt(enum ipa_ip_type ip)
+{
+	return -EPERM;
+}
+
+static inline int ipa_reset_rt(enum ipa_ip_type ip)
+{
+	return -EPERM;
+}
+
+static inline int ipa_get_rt_tbl(struct ipa_ioc_get_rt_tbl *lookup)
+{
+	return -EPERM;
+}
+
+static inline int ipa_put_rt_tbl(u32 rt_tbl_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_query_rt_index(struct ipa_ioc_get_rt_tbl_indx *in)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mdfy_rt_rule(struct ipa_ioc_mdfy_rt_rule *rules)
+{
+	return -EPERM;
+}
+
+/*
+ * Filtering
+ */
+static inline int ipa_add_flt_rule(struct ipa_ioc_add_flt_rule *rules)
+{
+	return -EPERM;
+}
+
+static inline int ipa_del_flt_rule(struct ipa_ioc_del_flt_rule *hdls)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mdfy_flt_rule(struct ipa_ioc_mdfy_flt_rule *rules)
+{
+	return -EPERM;
+}
+
+static inline int ipa_commit_flt(enum ipa_ip_type ip)
+{
+	return -EPERM;
+}
+
+static inline int ipa_reset_flt(enum ipa_ip_type ip)
+{
+	return -EPERM;
+}
+
+/*
+ * NAT
+ */
+static inline int allocate_nat_device(struct ipa_ioc_nat_alloc_mem *mem)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_nat_init_cmd(struct ipa_ioc_v4_nat_init *init)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_nat_dma_cmd(struct ipa_ioc_nat_dma_cmd *dma)
+{
+	return -EPERM;
+}
+
+
+static inline int ipa_nat_del_cmd(struct ipa_ioc_v4_nat_del *del)
+{
+	return -EPERM;
+}
+
+/*
+ * Messaging
+ */
+static inline int ipa_send_msg(struct ipa_msg_meta *meta, void *buff,
+		ipa_msg_free_fn callback)
+{
+	return -EPERM;
+}
+
+static inline int ipa_register_pull_msg(struct ipa_msg_meta *meta,
+		ipa_msg_pull_fn callback)
+{
+	return -EPERM;
+}
+
+static inline int ipa_deregister_pull_msg(struct ipa_msg_meta *meta)
+{
+	return -EPERM;
+}
+
+/*
+ * Interface
+ */
+static inline int ipa_register_intf(const char *name,
+				     const struct ipa_tx_intf *tx,
+				     const struct ipa_rx_intf *rx)
+{
+	return -EPERM;
+}
+
+static inline int ipa_register_intf_ext(const char *name,
+		const struct ipa_tx_intf *tx,
+		const struct ipa_rx_intf *rx,
+		const struct ipa_ext_intf *ext)
+{
+	return -EPERM;
+}
+
+static inline int ipa_deregister_intf(const char *name)
+{
+	return -EPERM;
+}
+
+/*
+ * Aggregation
+ */
+static inline int ipa_set_aggr_mode(enum ipa_aggr_mode mode)
+{
+	return -EPERM;
+}
+
+static inline int ipa_set_qcncm_ndp_sig(char sig[3])
+{
+	return -EPERM;
+}
+
+static inline int ipa_set_single_ndp_per_mbim(bool enable)
+{
+	return -EPERM;
+}
+
+/*
+ * Data path
+ */
+static inline int ipa_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
+		struct ipa_tx_meta *metadata)
+{
+	return -EPERM;
+}
+
+/*
+ * To transfer multiple data packets
+ */
+static inline int ipa_tx_dp_mul(
+	enum ipa_client_type dst,
+	struct ipa_tx_data_desc *data_desc)
+{
+	return -EPERM;
+}
+
+static inline void ipa_free_skb(struct ipa_rx_data *rx_in)
+{
+}
+
+static inline int ipa_rx_poll(u32 clnt_hdl, int budget)
+{
+	return -EPERM;
+}
+
+static inline void ipa_recycle_wan_skb(struct sk_buff *skb)
+{
+}
+
+/*
+ * System pipes
+ */
+static inline u16 ipa_get_smem_restr_bytes(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_setup_sys_pipe(struct ipa_sys_connect_params *sys_in,
+		u32 *clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_teardown_sys_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_connect_wdi_pipe(struct ipa_wdi_in_params *in,
+		struct ipa_wdi_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_disconnect_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_enable_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_disable_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_resume_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_suspend_wdi_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_wdi_get_dbpa(
+	struct ipa_wdi_db_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_reg_rdyCB(
+	struct ipa_wdi_uc_ready_params *param)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_dereg_rdyCB(void)
+{
+	return -EPERM;
+}
+
+
+/*
+ * Resource manager
+ */
+static inline int ipa_rm_create_resource(
+		struct ipa_rm_create_params *create_params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_delete_resource(
+		enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_register(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_set_perf_profile(
+		enum ipa_rm_resource_name resource_name,
+		struct ipa_rm_perf_profile *profile)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_deregister(enum ipa_rm_resource_name resource_name,
+			struct ipa_rm_register_params *reg_params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency(
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_add_dependency_sync(
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_delete_dependency(
+		enum ipa_rm_resource_name resource_name,
+		enum ipa_rm_resource_name depends_on_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_request_resource(
+		enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_release_resource(
+		enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_notify_completion(enum ipa_rm_event event,
+		enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_init(
+		enum ipa_rm_resource_name resource_name,
+			unsigned long msecs)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_destroy(
+		enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_request_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+static inline int ipa_rm_inactivity_timer_release_resource(
+				enum ipa_rm_resource_name resource_name)
+{
+	return -EPERM;
+}
+
+/*
+ * Tethering bridge (Rmnet / MBIM)
+ */
+static inline int teth_bridge_init(struct teth_bridge_init_params *params)
+{
+	return -EPERM;
+}
+
+static inline int teth_bridge_disconnect(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline int teth_bridge_connect(struct teth_bridge_connect_params
+				      *connect_params)
+{
+	return -EPERM;
+}
+
+/*
+ * Tethering client info
+ */
+static inline void ipa_set_client(int index, enum ipacm_client_enum client,
+	bool uplink)
+{
+}
+
+static inline enum ipacm_client_enum ipa_get_client(int pipe_idx)
+{
+	return -EPERM;
+}
+
+static inline bool ipa_get_client_uplink(int pipe_idx)
+{
+	return -EPERM;
+}
+
+/*
+ * IPADMA
+ */
+static inline int ipa_dma_init(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_dma_enable(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_dma_disable(void)
+{
+	return -EPERM;
+}
+
+static inline int ipa_dma_sync_memcpy(phys_addr_t dest, phys_addr_t src
+			, int len)
+{
+	return -EPERM;
+}
+
+static inline int ipa_dma_async_memcpy(phys_addr_t dest, phys_addr_t src
+			, int len, void (*user_cb)(void *user1),
+			void *user_param)
+{
+	return -EPERM;
+}
+
+static inline int ipa_dma_uc_memcpy(phys_addr_t dest, phys_addr_t src, int len)
+{
+	return -EPERM;
+}
+
+static inline void ipa_dma_destroy(void)
+{
+}
+
+/*
+ * mux id
+ */
+static inline int ipa_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
+{
+	return -EPERM;
+}
+
+/*
+ * interrupts
+ */
+static inline int ipa_add_interrupt_handler(enum ipa_irq_type interrupt,
+		ipa_irq_handler_t handler,
+		bool deferred_flag,
+		void *private_data)
+{
+	return -EPERM;
+}
+
+static inline int ipa_remove_interrupt_handler(enum ipa_irq_type interrupt)
+{
+	return -EPERM;
+}
+
+static inline int ipa_restore_suspend_handler(void)
+{
+	return -EPERM;
+}
+
+/*
+ * Miscellaneous
+ */
+static inline void ipa_bam_reg_dump(void)
+{
+}
+
+static inline int ipa_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats)
+{
+	return -EPERM;
+}
+
+static inline int ipa_get_ep_mapping(enum ipa_client_type client)
+{
+	return -EPERM;
+}
+
+static inline bool ipa_is_ready(void)
+{
+	return false;
+}
+
+static inline void ipa_proxy_clk_vote(void)
+{
+}
+
+static inline void ipa_proxy_clk_unvote(void)
+{
+}
+
+static inline enum ipa_hw_type ipa_get_hw_type(void)
+{
+	return IPA_HW_None;
+}
+
+static inline bool ipa_is_client_handle_valid(u32 clnt_hdl)
+{
+	return -EINVAL;
+}
+
+static inline enum ipa_client_type ipa_get_client_mapping(int pipe_idx)
+{
+	return -EINVAL;
+}
+
+static inline enum ipa_rm_resource_name ipa_get_rm_resource_from_ep(
+	int pipe_idx)
+{
+	return -EFAULT;
+}
+
+static inline bool ipa_get_modem_cfg_emb_pipe_flt(void)
+{
+	return -EINVAL;
+}
+
+static inline enum ipa_transport_type ipa_get_transport_type(void)
+{
+	return -EFAULT;
+}
+
+static inline struct device *ipa_get_dma_dev(void)
+{
+	return NULL;
+}
+
+static inline struct iommu_domain *ipa_get_smmu_domain(void)
+{
+	return NULL;
+}
+
+static inline int ipa_create_wdi_mapping(u32 num_buffers,
+		struct ipa_wdi_buffer_info *info)
+{
+	return -EINVAL;
+}
+
+static inline int ipa_release_wdi_mapping(u32 num_buffers,
+		struct ipa_wdi_buffer_info *info)
+{
+	return -EINVAL;
+}
+
+static inline int ipa_disable_apps_wan_cons_deaggr(void)
+{
+	return -EINVAL;
+}
+
+static inline struct ipa_gsi_ep_config *ipa_get_gsi_ep_info(int ipa_ep_idx)
+{
+	return NULL;
+}
+
+static inline int ipa_stop_gsi_channel(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_register_ipa_ready_cb(
+	void (*ipa_ready_cb)(void *user_data),
+	void *user_data)
+{
+	return -EPERM;
+}
+
+#endif /* (CONFIG_IPA || CONFIG_IPA3) */
+
+#endif /* _IPA_H_ */
diff --git a/include/linux/ipa_mhi.h b/include/linux/ipa_mhi.h
new file mode 100644
index 0000000..4d3b974
--- /dev/null
+++ b/include/linux/ipa_mhi.h
@@ -0,0 +1,161 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef IPA_MHI_H_
+#define IPA_MHI_H_
+
+#include <linux/ipa.h>
+#include <linux/types.h>
+
+/**
+ * enum ipa_mhi_event_type - event type for mhi callback
+ *
+ * @IPA_MHI_EVENT_READY: IPA MHI is ready and IPA uC is loaded. After getting
+ *	this event MHI client is expected to call to ipa_mhi_start() API
+ * @IPA_MHI_EVENT_DATA_AVAILABLE: downlink data available on MHI channel
+ */
+enum ipa_mhi_event_type {
+	IPA_MHI_EVENT_READY,
+	IPA_MHI_EVENT_DATA_AVAILABLE,
+	IPA_MHI_EVENT_MAX,
+};
+
+typedef void (*mhi_client_cb)(void *priv, enum ipa_mhi_event_type event,
+	unsigned long data);
+
+/**
+ * struct ipa_mhi_msi_info - parameters for MSI (Message Signaled Interrupts)
+ * @addr_low: MSI lower base physical address
+ * @addr_hi: MSI higher base physical address
+ * @data: Data Pattern to use when generating the MSI
+ * @mask: Mask indicating number of messages assigned by the host to device
+ *
+ * msi value is written according to this formula:
+ *	((data & ~mask) | (mmio.msiVec & mask))
+ */
+struct ipa_mhi_msi_info {
+	u32 addr_low;
+	u32 addr_hi;
+	u32 data;
+	u32 mask;
+};
+
+/**
+ * struct ipa_mhi_init_params - parameters for IPA MHI initialization API
+ *
+ * @msi: MSI (Message Signaled Interrupts) parameters
+ * @mmio_addr: MHI MMIO physical address
+ * @first_ch_idx: First channel ID for hardware accelerated channels.
+ * @first_er_idx: First event ring ID for hardware accelerated channels.
+ * @assert_bit40: should assert bit 40 in order to access host space.
+ *	if PCIe iATU is configured then not need to assert bit40
+ * @notify: client callback
+ * @priv: client private data to be provided in client callback
+ * @test_mode: flag to indicate if IPA MHI is in unit test mode
+ */
+struct ipa_mhi_init_params {
+	struct ipa_mhi_msi_info msi;
+	u32 mmio_addr;
+	u32 first_ch_idx;
+	u32 first_er_idx;
+	bool assert_bit40;
+	mhi_client_cb notify;
+	void *priv;
+	bool test_mode;
+};
+
+/**
+ * struct ipa_mhi_start_params - parameters for IPA MHI start API
+ *
+ * @host_ctrl_addr: Base address of MHI control data structures
+ * @host_data_addr: Base address of MHI data buffers
+ * @channel_context_addr: channel context array address in host address space
+ * @event_context_addr: event context array address in host address space
+ */
+struct ipa_mhi_start_params {
+	u32 host_ctrl_addr;
+	u32 host_data_addr;
+	u64 channel_context_array_addr;
+	u64 event_context_array_addr;
+};
+
+/**
+ * struct ipa_mhi_connect_params - parameters for IPA MHI channel connect API
+ *
+ * @sys: IPA EP configuration info
+ * @channel_id: MHI channel id
+ */
+struct ipa_mhi_connect_params {
+	struct ipa_sys_connect_params sys;
+	u8 channel_id;
+};
+
+/* bit #40 in address should be asserted for MHI transfers over pcie */
+#define IPA_MHI_HOST_ADDR(addr) ((addr) | BIT_ULL(40))
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+int ipa_mhi_init(struct ipa_mhi_init_params *params);
+
+int ipa_mhi_start(struct ipa_mhi_start_params *params);
+
+int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in, u32 *clnt_hdl);
+
+int ipa_mhi_disconnect_pipe(u32 clnt_hdl);
+
+int ipa_mhi_suspend(bool force);
+
+int ipa_mhi_resume(void);
+
+void ipa_mhi_destroy(void);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+static inline int ipa_mhi_init(struct ipa_mhi_init_params *params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_start(struct ipa_mhi_start_params *params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_connect_pipe(struct ipa_mhi_connect_params *in,
+	u32 *clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_disconnect_pipe(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_suspend(bool force)
+{
+	return -EPERM;
+}
+
+static inline int ipa_mhi_resume(void)
+{
+	return -EPERM;
+}
+
+static inline void ipa_mhi_destroy(void)
+{
+
+}
+
+#endif /* (CONFIG_IPA || CONFIG_IPA3) */
+
+#endif /* IPA_MHI_H_ */
diff --git a/include/linux/ipa_odu_bridge.h b/include/linux/ipa_odu_bridge.h
new file mode 100644
index 0000000..5d30a97
--- /dev/null
+++ b/include/linux/ipa_odu_bridge.h
@@ -0,0 +1,84 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_ODO_BRIDGE_H_
+#define _IPA_ODO_BRIDGE_H_
+
+#include <linux/ipa.h>
+
+/**
+ * struct odu_bridge_params - parameters for odu bridge initialization API
+ *
+ * @netdev_name: network interface name
+ * @priv: private data that will be supplied to client's callback
+ * @tx_dp_notify: callback for handling SKB. the following event are supported:
+ *	IPA_WRITE_DONE:	will be called after client called to odu_bridge_tx_dp()
+ *			Client is expected to free the skb.
+ *	IPA_RECEIVE:	will be called for delivering skb to APPS.
+ *			Client is expected to deliver the skb to network stack.
+ * @send_dl_skb: callback for sending skb on downlink direction to adapter.
+ *		Client is expected to free the skb.
+ * @device_ethaddr: device Ethernet address in network order.
+ * @ipa_desc_size: IPA Sys Pipe Desc Size
+ */
+struct odu_bridge_params {
+	const char *netdev_name;
+	void *priv;
+	ipa_notify_cb tx_dp_notify;
+	int (*send_dl_skb)(void *priv, struct sk_buff *skb);
+	u8 device_ethaddr[ETH_ALEN];
+	u32 ipa_desc_size;
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+int odu_bridge_init(struct odu_bridge_params *params);
+
+int odu_bridge_connect(void);
+
+int odu_bridge_disconnect(void);
+
+int odu_bridge_tx_dp(struct sk_buff *skb, struct ipa_tx_meta *metadata);
+
+int odu_bridge_cleanup(void);
+
+#else
+
+static inline int odu_bridge_init(struct odu_bridge_params *params)
+{
+	return -EPERM;
+}
+
+static inline int odu_bridge_disconnect(void)
+{
+	return -EPERM;
+}
+
+static inline int odu_bridge_connect(void)
+{
+	return -EPERM;
+}
+
+static inline int odu_bridge_tx_dp(struct sk_buff *skb,
+						struct ipa_tx_meta *metadata)
+{
+	return -EPERM;
+}
+
+static inline int odu_bridge_cleanup(void)
+{
+	return -EPERM;
+}
+
+#endif /* CONFIG_IPA || defined CONFIG_IPA3 */
+
+#endif /* _IPA_ODO_BRIDGE_H */
diff --git a/include/linux/ipa_uc_offload.h b/include/linux/ipa_uc_offload.h
new file mode 100644
index 0000000..0277e87
--- /dev/null
+++ b/include/linux/ipa_uc_offload.h
@@ -0,0 +1,259 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_UC_OFFLOAD_H_
+#define _IPA_UC_OFFLOAD_H_
+
+#include <linux/ipa.h>
+
+/**
+ * enum ipa_uc_offload_proto
+ * Protocol type: either WDI or Neutrino
+ *
+ * @IPA_UC_WDI: wdi Protocol
+ * @IPA_UC_NTN: Neutrino Protocol
+ */
+enum ipa_uc_offload_proto {
+	IPA_UC_INVALID = 0,
+	IPA_UC_WDI = 1,
+	IPA_UC_NTN = 2,
+	IPA_UC_MAX_PROT_SIZE
+};
+
+/**
+ * struct ipa_hdr_info - Header to install on IPA HW
+ *
+ * @hdr: header to install on IPA HW
+ * @hdr_len: length of header
+ * @dst_mac_addr_offset: destination mac address offset
+ * @hdr_type: layer two header type
+ */
+struct ipa_hdr_info {
+	u8 *hdr;
+	u8 hdr_len;
+	u8 dst_mac_addr_offset;
+	enum ipa_hdr_l2_type hdr_type;
+};
+
+/**
+ * struct ipa_uc_offload_intf_params - parameters for uC offload
+ *	interface registration
+ *
+ * @netdev_name: network interface name
+ * @notify:	callback for exception/embedded packets
+ * @priv: callback cookie
+ * @hdr_info: header information
+ * @meta_data: meta data if any
+ * @meta_data_mask: meta data mask
+ * @proto: uC offload protocol type
+ * @alt_dst_pipe: alternate routing output pipe
+ */
+struct ipa_uc_offload_intf_params {
+	const char *netdev_name;
+	ipa_notify_cb notify;
+	void *priv;
+	struct ipa_hdr_info hdr_info[IPA_IP_MAX];
+	u8 is_meta_data_valid;
+	u32 meta_data;
+	u32 meta_data_mask;
+	enum ipa_uc_offload_proto proto;
+	enum ipa_client_type alt_dst_pipe;
+};
+
+/**
+ * struct  ipa_ntn_setup_info - NTN TX/Rx configuration
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @ring_base_pa: physical address of the base of the Tx/Rx ring
+ * @ntn_ring_size: size of the Tx/Rx ring (in terms of elements)
+ * @buff_pool_base_pa: physical address of the base of the Tx/Rx
+ *						buffer pool
+ * @num_buffers: Rx/Tx buffer pool size (in terms of elements)
+ * @data_buff_size: size of the each data buffer allocated in DDR
+ * @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN Ring's
+ *						tail pointer
+ */
+struct ipa_ntn_setup_info {
+	enum ipa_client_type client;
+	phys_addr_t ring_base_pa;
+	u32 ntn_ring_size;
+
+	phys_addr_t buff_pool_base_pa;
+	u32 num_buffers;
+	u32 data_buff_size;
+
+	phys_addr_t ntn_reg_base_ptr_pa;
+};
+
+/**
+ * struct ipa_uc_offload_out_params - out parameters for uC offload
+ *
+ * @clnt_hndl: Handle that client need to pass during
+ *	further operations
+ */
+struct ipa_uc_offload_out_params {
+	u32 clnt_hndl;
+};
+
+/**
+ * struct  ipa_ntn_conn_in_params - NTN TX/Rx connect parameters
+ * @ul: parameters to connect UL pipe(from Neutrino to IPA)
+ * @dl: parameters to connect DL pipe(from IPA to Neutrino)
+ */
+struct ipa_ntn_conn_in_params {
+	struct ipa_ntn_setup_info ul;
+	struct ipa_ntn_setup_info dl;
+};
+
+/**
+ * struct  ipa_ntn_conn_out_params - information provided
+ *				to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ */
+struct ipa_ntn_conn_out_params {
+	phys_addr_t ul_uc_db_pa;
+	phys_addr_t dl_uc_db_pa;
+};
+
+/**
+ * struct  ipa_uc_offload_conn_in_params - information provided by
+ *		uC offload client
+ * @clnt_hndl: Handle that return as part of reg interface
+ * @proto: Protocol to use for offload data path
+ * @ntn: uC RX/Tx configuration info
+ */
+struct ipa_uc_offload_conn_in_params {
+	u32 clnt_hndl;
+	union {
+		struct ipa_ntn_conn_in_params ntn;
+	} u;
+};
+
+/**
+ * struct  ipa_uc_offload_conn_out_params - information provided
+ *		to uC offload client
+ * @ul_uc_db_pa: physical address of IPA uc doorbell for UL
+ * @dl_uc_db_pa: physical address of IPA uc doorbell for DL
+ * @clnt_hdl: opaque handle assigned to offload client
+ */
+struct ipa_uc_offload_conn_out_params {
+	union {
+		struct ipa_ntn_conn_out_params ntn;
+	} u;
+};
+
+/**
+ * struct  ipa_perf_profile - To set BandWidth profile
+ *
+ * @client: type of "client" (IPA_CLIENT_ODU#_PROD/CONS)
+ * @max_supported_bw_mbps: maximum bandwidth needed (in Mbps)
+ */
+struct ipa_perf_profile {
+	enum ipa_client_type client;
+	u32 max_supported_bw_mbps;
+};
+
+#if defined CONFIG_IPA || defined CONFIG_IPA3
+
+/**
+ * ipa_uc_offload_reg_intf - Client should call this function to
+ * init uC offload data path
+ *
+ * @init:	[in] initialization parameters
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_reg_intf(
+	struct ipa_uc_offload_intf_params *in,
+	struct ipa_uc_offload_out_params *out);
+
+/**
+ * ipa_uc_offload_cleanup - Client Driver should call this
+ * function before unload and after disconnect
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_cleanup(u32 clnt_hdl);
+
+/**
+ * ipa_uc_offload_conn_pipes - Client should call this
+ * function to connect uC pipe for offload data path
+ *
+ * @in:	[in] input parameters from client
+ * @out: [out] output params to client
+ *
+ * Note: Should not be called from atomic context and only
+ * after checking IPA readiness using ipa_register_ipa_ready_cb()
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *in,
+			struct ipa_uc_offload_conn_out_params *out);
+
+/**
+ * ipa_uc_offload_disconn_pipes() - Client should call this
+ *		function to disconnect uC pipe to disable offload data path
+ * @clnt_hdl:	[in] opaque client handle assigned by IPA to client
+ *
+ * Note: Should not be called from atomic context
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_uc_offload_disconn_pipes(u32 clnt_hdl);
+
+/**
+ * ipa_set_perf_profile() - Client should call this function to
+ *		set IPA clock Band Width based on data rates
+ * @profile: [in] BandWidth profile to use
+ *
+ * Returns: 0 on success, negative on failure
+ */
+int ipa_set_perf_profile(struct ipa_perf_profile *profile);
+
+#else /* (CONFIG_IPA || CONFIG_IPA3) */
+
+static inline int ipa_uc_offload_reg_intf(
+		struct ipa_uc_offload_intf_params *in,
+		struct ipa_uc_offload_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uC_offload_cleanup(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_offload_conn_pipes(
+		struct ipa_uc_offload_conn_in_params *in,
+		struct ipa_uc_offload_conn_out_params *out)
+{
+	return -EPERM;
+}
+
+static inline int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
+{
+	return -EPERM;
+}
+
+static inline int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+	return -EPERM;
+}
+
+#endif /* CONFIG_IPA3 */
+
+#endif /* _IPA_UC_OFFLOAD_H_ */
diff --git a/include/linux/ipa_usb.h b/include/linux/ipa_usb.h
new file mode 100644
index 0000000..0fe0e36
--- /dev/null
+++ b/include/linux/ipa_usb.h
@@ -0,0 +1,330 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPA_USB_H_
+#define _IPA_USB_H_
+
+enum ipa_usb_teth_prot {
+	IPA_USB_RNDIS = 0,
+	IPA_USB_ECM = 1,
+	IPA_USB_RMNET = 2,
+	IPA_USB_MBIM = 3,
+	IPA_USB_DIAG = 4,
+	IPA_USB_MAX_TETH_PROT_SIZE
+};
+
+/**
+ * ipa_usb_teth_params - parameters for RDNIS/ECM initialization API
+ *
+ * @host_ethaddr:        host Ethernet address in network order
+ * @device_ethaddr:      device Ethernet address in network order
+ */
+struct ipa_usb_teth_params {
+	u8 host_ethaddr[ETH_ALEN];
+	u8 device_ethaddr[ETH_ALEN];
+};
+
+enum ipa_usb_notify_event {
+	IPA_USB_DEVICE_READY,
+	IPA_USB_REMOTE_WAKEUP,
+	IPA_USB_SUSPEND_COMPLETED
+};
+
+enum ipa_usb_max_usb_packet_size {
+	IPA_USB_HIGH_SPEED_512B = 512,
+	IPA_USB_SUPER_SPEED_1024B = 1024
+};
+
+/**
+ * ipa_usb_teth_prot_params - parameters for connecting RNDIS
+ *
+ * @max_xfer_size_bytes_to_dev:   max size of UL packets in bytes
+ * @max_packet_number_to_dev:     max number of UL aggregated packets
+ * @max_xfer_size_bytes_to_host:  max size of DL packets in bytes
+ *
+ */
+struct ipa_usb_teth_prot_params {
+	u32 max_xfer_size_bytes_to_dev;
+	u32 max_packet_number_to_dev;
+	u32 max_xfer_size_bytes_to_host;
+};
+
+/**
+ * ipa_usb_xdci_connect_params - parameters required to start IN, OUT
+ * channels, and connect RNDIS/ECM/teth_bridge
+ *
+ * @max_pkt_size:          high speed or full speed
+ * @ipa_to_usb_xferrscidx: Transfer Resource Index (XferRscIdx) for IN channel.
+ *                         The hardware-assigned transfer resource index for the
+ *                         transfer, which was returned in response to the
+ *                         Start Transfer command. This field is used for
+ *                         "Update Transfer" command.
+ *                         Should be 0 =< ipa_to_usb_xferrscidx <= 127.
+ * @ipa_to_usb_xferrscidx_valid: true if xferRscIdx should be updated for IN
+ *                         channel
+ * @usb_to_ipa_xferrscidx: Transfer Resource Index (XferRscIdx) for OUT channel
+ *                         Should be 0 =< usb_to_ipa_xferrscidx <= 127.
+ * @usb_to_ipa_xferrscidx_valid: true if xferRscIdx should be updated for OUT
+ *                         channel
+ * @teth_prot:             tethering protocol
+ * @teth_prot_params:      parameters for connecting the tethering protocol.
+ * @max_supported_bandwidth_mbps: maximum bandwidth need of the client in Mbps
+ */
+struct ipa_usb_xdci_connect_params {
+	enum ipa_usb_max_usb_packet_size max_pkt_size;
+	u8 ipa_to_usb_xferrscidx;
+	bool ipa_to_usb_xferrscidx_valid;
+	u8 usb_to_ipa_xferrscidx;
+	bool usb_to_ipa_xferrscidx_valid;
+	enum ipa_usb_teth_prot teth_prot;
+	struct ipa_usb_teth_prot_params teth_prot_params;
+	u32 max_supported_bandwidth_mbps;
+};
+
+/**
+ * ipa_usb_xdci_chan_scratch - xDCI protocol SW config area of
+ * channel scratch
+ *
+ * @last_trb_addr_iova:  Address (iova LSB - based on alignment restrictions) of
+ *                       last TRB in queue. Used to identify roll over case
+ * @const_buffer_size:   TRB buffer size in KB (similar to IPA aggregation
+ *                       configuration). Must be aligned to max USB Packet Size.
+ *                       Should be 1 <= const_buffer_size <= 31.
+ * @depcmd_low_addr:     Used to generate "Update Transfer" command
+ * @depcmd_hi_addr:      Used to generate "Update Transfer" command.
+ */
+struct ipa_usb_xdci_chan_scratch {
+	u16 last_trb_addr_iova;
+	u8 const_buffer_size;
+	u32 depcmd_low_addr;
+	u8 depcmd_hi_addr;
+};
+
+/**
+ * ipa_usb_xdci_chan_params - xDCI channel related properties
+ *
+ * @client:              type of "client"
+ * @ipa_ep_cfg:          IPA EP configuration
+ * @keep_ipa_awake:      when true, IPA will not be clock gated
+ * @teth_prot:           tethering protocol for which the channel is created
+ * @gevntcount_low_addr: GEVNCOUNT low address for event scratch
+ * @gevntcount_hi_addr:  GEVNCOUNT high address for event scratch
+ * @dir:                 channel direction
+ * @xfer_ring_len:       length of transfer ring in bytes (must be integral
+ *                       multiple of transfer element size - 16B for xDCI)
+ * @xfer_ring_base_addr: physical base address of transfer ring. Address must be
+ *                       aligned to xfer_ring_len rounded to power of two
+ * @xfer_scratch:        parameters for xDCI channel scratch
+ * @xfer_ring_base_addr_iova: IO virtual address mapped to xfer_ring_base_addr
+ * @data_buff_base_len:  length of data buffer allocated by USB driver
+ * @data_buff_base_addr: physical base address for the data buffer (where TRBs
+ *                       points)
+ * @data_buff_base_addr_iova:  IO virtual address mapped to data_buff_base_addr
+ *
+ */
+struct ipa_usb_xdci_chan_params {
+	/* IPA EP params */
+	enum ipa_client_type client;
+	struct ipa_ep_cfg ipa_ep_cfg;
+	bool keep_ipa_awake;
+	enum ipa_usb_teth_prot teth_prot;
+	/* event ring params */
+	u32 gevntcount_low_addr;
+	u8 gevntcount_hi_addr;
+	/* transfer ring params */
+	enum gsi_chan_dir dir;
+	u16 xfer_ring_len;
+	u64 xfer_ring_base_addr;
+	struct ipa_usb_xdci_chan_scratch xfer_scratch;
+	u64 xfer_ring_base_addr_iova;
+	u32 data_buff_base_len;
+	u64 data_buff_base_addr;
+	u64 data_buff_base_addr_iova;
+};
+
+/**
+ * ipa_usb_chan_out_params - out parameters for channel request
+ *
+ * @clnt_hdl:            opaque client handle assigned by IPA to client
+ * @db_reg_phs_addr_lsb: Physical address of doorbell register where the 32
+ *                       LSBs of the doorbell value should be written
+ * @db_reg_phs_addr_msb: Physical address of doorbell register where the 32
+ *                       MSBs of the doorbell value should be written
+ *
+ */
+struct ipa_req_chan_out_params {
+	u32 clnt_hdl;
+	u32 db_reg_phs_addr_lsb;
+	u32 db_reg_phs_addr_msb;
+};
+
+#ifdef CONFIG_IPA3
+
+/**
+ * ipa_usb_init_teth_prot - Peripheral should call this function to initialize
+ * RNDIS/ECM/teth_bridge/DPL, prior to calling ipa_usb_xdci_connect()
+ *
+ * @usb_teth_type: tethering protocol type
+ * @teth_params:   pointer to tethering protocol parameters.
+ *                 Should be struct ipa_usb_teth_params for RNDIS/ECM,
+ *                 or NULL for teth_bridge
+ * @ipa_usb_notify_cb: will be called to notify USB driver on certain events
+ * @user_data:     cookie used for ipa_usb_notify_cb
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+			   struct ipa_usb_teth_params *teth_params,
+			   int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+			   void *),
+			   void *user_data);
+
+/**
+ * ipa_usb_xdci_connect - Peripheral should call this function to start IN &
+ * OUT xDCI channels, and connect RNDIS/ECM/MBIM/RMNET.
+ * For DPL, only starts IN channel.
+ *
+ * @ul_chan_params: parameters for allocating UL xDCI channel. containing
+ *              required info on event and transfer rings, and IPA EP
+ *              configuration
+ * @ul_out_params: [out] opaque client handle assigned by IPA to client & DB
+ *              registers physical address for UL channel
+ * @dl_chan_params: parameters for allocating DL xDCI channel. containing
+ *              required info on event and transfer rings, and IPA EP
+ *              configuration
+ * @dl_out_params: [out] opaque client handle assigned by IPA to client & DB
+ *              registers physical address for DL channel
+ * @connect_params: handles and scratch params of the required channels,
+ *              tethering protocol and the tethering protocol parameters.
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_connect(struct ipa_usb_xdci_chan_params *ul_chan_params,
+			 struct ipa_usb_xdci_chan_params *dl_chan_params,
+			 struct ipa_req_chan_out_params *ul_out_params,
+			 struct ipa_req_chan_out_params *dl_out_params,
+			 struct ipa_usb_xdci_connect_params *connect_params);
+
+/**
+ * ipa_usb_xdci_disconnect - Peripheral should call this function to stop
+ * IN & OUT xDCI channels
+ * For DPL, only stops IN channel.
+ *
+ * @ul_clnt_hdl:    client handle received from ipa_usb_xdci_connect()
+ *                  for OUT channel
+ * @dl_clnt_hdl:    client handle received from ipa_usb_xdci_connect()
+ *                  for IN channel
+ * @teth_prot:      tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+			    enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_deinit_teth_prot - Peripheral should call this function to deinit
+ * RNDIS/ECM/MBIM/RMNET
+ *
+ * @teth_prot: tethering protocol
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_xdci_suspend - Peripheral should call this function to suspend
+ * IN & OUT or DPL xDCI channels
+ *
+ * @ul_clnt_hdl: client handle previously obtained from
+ *               ipa_usb_xdci_connect() for OUT channel
+ * @dl_clnt_hdl: client handle previously obtained from
+ *               ipa_usb_xdci_connect() for IN channel
+ * @teth_prot:   tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ * Note: for DPL, the ul will be ignored as irrelevant
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+			 enum ipa_usb_teth_prot teth_prot);
+
+/**
+ * ipa_usb_xdci_resume - Peripheral should call this function to resume
+ * IN & OUT or DPL xDCI channels
+ *
+ * @ul_clnt_hdl:   client handle received from ipa_usb_xdci_connect()
+ *                 for OUT channel
+ * @dl_clnt_hdl:   client handle received from ipa_usb_xdci_connect()
+ *                 for IN channel
+ * @teth_prot:   tethering protocol
+ *
+ * Note: Should not be called from atomic context
+ * Note: for DPL, the ul will be ignored as irrelevant
+ *
+ * @Return 0 on success, negative on failure
+ */
+int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+			enum ipa_usb_teth_prot teth_prot);
+
+#else /* CONFIG_IPA3 */
+
+static inline int ipa_usb_init_teth_prot(enum ipa_usb_teth_prot teth_prot,
+			   struct ipa_usb_teth_params *teth_params,
+			   int (*ipa_usb_notify_cb)(enum ipa_usb_notify_event,
+			   void *),
+			   void *user_data)
+{
+	return -EPERM;
+}
+
+static inline int ipa_usb_xdci_connect(
+			 struct ipa_usb_xdci_chan_params *ul_chan_params,
+			 struct ipa_usb_xdci_chan_params *dl_chan_params,
+			 struct ipa_req_chan_out_params *ul_out_params,
+			 struct ipa_req_chan_out_params *dl_out_params,
+			 struct ipa_usb_xdci_connect_params *connect_params)
+{
+	return -EPERM;
+}
+
+static inline int ipa_usb_xdci_disconnect(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+			    enum ipa_usb_teth_prot teth_prot)
+{
+	return -EPERM;
+}
+
+static inline int ipa_usb_deinit_teth_prot(enum ipa_usb_teth_prot teth_prot)
+{
+	return -EPERM;
+}
+
+static inline int ipa_usb_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+			 enum ipa_usb_teth_prot teth_prot)
+{
+	return -EPERM;
+}
+
+static inline int ipa_usb_xdci_resume(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
+			enum ipa_usb_teth_prot teth_prot)
+{
+	return -EPERM;
+}
+
+
+#endif /* CONFIG_IPA3 */
+
+#endif /* _IPA_USB_H_ */
diff --git a/include/linux/ipc_logging.h b/include/linux/ipc_logging.h
new file mode 100644
index 0000000..780a82d
--- /dev/null
+++ b/include/linux/ipc_logging.h
@@ -0,0 +1,290 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_LOGGING_H
+#define _IPC_LOGGING_H
+
+#include <linux/types.h>
+
+#define MAX_MSG_SIZE 255
+
+enum {
+	TSV_TYPE_MSG_START = 1,
+	TSV_TYPE_SKB = TSV_TYPE_MSG_START,
+	TSV_TYPE_STRING,
+	TSV_TYPE_MSG_END = TSV_TYPE_STRING,
+};
+
+struct tsv_header {
+	unsigned char type;
+	unsigned char size; /* size of data field */
+};
+
+struct encode_context {
+	struct tsv_header hdr;
+	char buff[MAX_MSG_SIZE];
+	int offset;
+};
+
+struct decode_context {
+	int output_format;      /* 0 = debugfs */
+	char *buff;             /* output buffer */
+	int size;               /* size of output buffer */
+};
+
+#if defined(CONFIG_IPC_LOGGING)
+/*
+ * ipc_log_context_create: Create a debug log context
+ *                         Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name     : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages, const char *modname,
+		uint16_t user_version);
+
+/*
+ * msg_encode_start: Start encoding a log message
+ *
+ * @ectxt: Temporary storage to hold the encoded message
+ * @type:  Root event type defined by the module which is logging
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type);
+
+/*
+ * tsv_timestamp_write: Writes the current timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt);
+
+/*
+ * tsv_qtimer_write: Writes the current QTimer timestamp count
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ */
+int tsv_qtimer_write(struct encode_context *ectxt);
+
+/*
+ * tsv_pointer_write: Writes a data pointer
+ *
+ * @ectxt:   Context initialized by calling msg_encode_start()
+ * @pointer: Pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n:     Integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n);
+
+/*
+ * tsv_int32_write: Writes a 32-bit integer value
+ *
+ * @ectxt: Context initialized by calling msg_encode_start()
+ * @n:     Integer to write
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+			 void *data, int data_size);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void msg_encode_end(struct encode_context *ectxt);
+
+/*
+ * msg_encode_end: Complete the message encode process
+ *
+ * @ectxt: Temporary storage which holds the encoded message
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt);
+
+/*
+ * ipc_log_string: Helper function to log a string
+ *
+ * @ilctxt: Debug Log Context created using ipc_log_context_create()
+ * @fmt:    Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...) __printf(2, 3);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ilctxt:  logging context
+ * @buff:    buffer to receive the data
+ * @size:    size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized.  This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ilctxt, char *buff, int size);
+
+/*
+ * Print a string to decode context.
+ * @dctxt   Decode context
+ * @args   printf args
+ */
+#define IPC_SPRINTF_DECODE(dctxt, args...) \
+do { \
+	int i; \
+	i = scnprintf(dctxt->buff, dctxt->size, args); \
+	dctxt->buff += i; \
+	dctxt->size -= i; \
+} while (0)
+
+/*
+ * tsv_timestamp_read: Reads a timestamp
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_qtimer_read: Reads a QTimer timestamp
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_qtimer_read(struct encode_context *ectxt,
+		     struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_pointer_read: Reads a data pointer
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+		      struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+		       struct decode_context *dctxt, const char *format);
+
+/*
+ * tsv_int32_read: Reads a 32-bit integer value
+ *
+ * @ectxt:  Context retrieved by reading from log space
+ * @dctxt:  Temporary storage to hold the decoded message
+ * @format: Output format while dumping through DEBUGFS
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+			 struct decode_context *dctxt, const char *format);
+
+/*
+ * add_deserialization_func: Register a deserialization function to
+ *                           to unpack the subevents of a main event
+ *
+ * @ctxt: Debug log context to which the deserialization function has
+ *        to be registered
+ * @type: Main/Root event, defined by the module which is logging, to
+ *        which this deserialization function has to be registered.
+ * @dfune: Deserialization function to be registered
+ *
+ * return 0 on success, -ve value on FAILURE
+ */
+int add_deserialization_func(void *ctxt, int type,
+			void (*dfunc)(struct encode_context *,
+				      struct decode_context *));
+
+/*
+ * ipc_log_context_destroy: Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt);
+
+#else
+
+static inline void *ipc_log_context_create(int max_num_pages,
+	const char *modname, uint16_t user_version)
+{ return NULL; }
+
+static inline void msg_encode_start(struct encode_context *ectxt,
+	uint32_t type) { }
+
+static inline int tsv_timestamp_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_qtimer_write(struct encode_context *ectxt)
+{ return -EINVAL; }
+
+static inline int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{ return -EINVAL; }
+
+static inline int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{ return -EINVAL; }
+
+static inline int tsv_byte_array_write(struct encode_context *ectxt,
+			 void *data, int data_size)
+{ return -EINVAL; }
+
+static inline void msg_encode_end(struct encode_context *ectxt) { }
+
+static inline void ipc_log_write(void *ctxt, struct encode_context *ectxt) { }
+
+static inline int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{ return -EINVAL; }
+
+static inline int ipc_log_extract(void *ilctxt, char *buff, int size)
+{ return -EINVAL; }
+
+#define IPC_SPRINTF_DECODE(dctxt, args...) do { } while (0)
+
+static inline void tsv_timestamp_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_qtimer_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format) { }
+
+static inline void tsv_pointer_read(struct encode_context *ectxt,
+		      struct decode_context *dctxt, const char *format) { }
+
+static inline int32_t tsv_int32_read(struct encode_context *ectxt,
+		       struct decode_context *dctxt, const char *format)
+{ return 0; }
+
+static inline void tsv_byte_array_read(struct encode_context *ectxt,
+			 struct decode_context *dctxt, const char *format) { }
+
+static inline int add_deserialization_func(void *ctxt, int type,
+			void (*dfunc)(struct encode_context *,
+				      struct decode_context *))
+{ return 0; }
+
+static inline int ipc_log_context_destroy(void *ctxt)
+{ return 0; }
+
+#endif
+
+#endif
diff --git a/include/linux/ipc_router.h b/include/linux/ipc_router.h
new file mode 100644
index 0000000..8adf723
--- /dev/null
+++ b/include/linux/ipc_router.h
@@ -0,0 +1,346 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_H
+#define _IPC_ROUTER_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/pm.h>
+#include <linux/msm_ipc.h>
+#include <linux/device.h>
+#include <linux/kref.h>
+
+/* Maximum Wakeup Source Name Size */
+#define MAX_WS_NAME_SZ 32
+
+#define IPC_RTR_ERR(buf, ...) \
+	pr_err("IPC_RTR: " buf, __VA_ARGS__)
+
+/**
+ * enum msm_ipc_router_event - Events that will be generated by IPC Router
+ */
+enum msm_ipc_router_event {
+	IPC_ROUTER_CTRL_CMD_DATA = 1,
+	IPC_ROUTER_CTRL_CMD_HELLO,
+	IPC_ROUTER_CTRL_CMD_BYE,
+	IPC_ROUTER_CTRL_CMD_NEW_SERVER,
+	IPC_ROUTER_CTRL_CMD_REMOVE_SERVER,
+	IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT,
+	IPC_ROUTER_CTRL_CMD_RESUME_TX,
+};
+
+/**
+ * rr_control_msg - Control message structure
+ * @cmd: Command identifier for HELLO message in Version 1.
+ * @hello: Message structure for HELLO message in Version 2.
+ * @srv: Message structure for NEW_SERVER/REMOVE_SERVER events.
+ * @cli: Message structure for REMOVE_CLIENT event.
+ */
+union rr_control_msg {
+	uint32_t cmd;
+	struct {
+		uint32_t cmd;
+		uint32_t checksum;
+		uint32_t versions;
+		uint32_t capability;
+		uint32_t reserved;
+	} hello;
+	struct {
+		uint32_t cmd;
+		uint32_t service;
+		uint32_t instance;
+		uint32_t node_id;
+		uint32_t port_id;
+	} srv;
+	struct {
+		uint32_t cmd;
+		uint32_t node_id;
+		uint32_t port_id;
+	} cli;
+};
+
+struct comm_mode_info {
+	int mode;
+	void *xprt_info;
+};
+
+enum ipc_rtr_af_event_type {
+	IPCRTR_AF_INIT = 1,
+	IPCRTR_AF_DEINIT,
+};
+
+/**
+ * msm_ipc_port - Definition of IPC Router port
+ * @list: List(local/control ports) in which this port is present.
+ * @ref: Reference count for this port.
+ * @this_port: Contains port's node_id and port_id information.
+ * @port_name: Contains service & instance info if the port hosts a service.
+ * @type: Type of the port - Client, Service, Control or Security Config.
+ * @flags: Flags to identify the port state.
+ * @port_lock_lhc3: Lock to protect access to the port information.
+ * @mode_info: Communication mode of the port owner.
+ * @port_rx_q: Receive queue where incoming messages are queued.
+ * @port_rx_q_lock_lhc3: Lock to protect access to the port's rx_q.
+ * @rx_ws_name: Name of the receive wakeup source.
+ * @port_rx_ws: Wakeup source to prevent suspend until the rx_q is empty.
+ * @port_rx_wait_q: Wait queue to wait for the incoming messages.
+ * @restart_state: Flag to hold the restart state information.
+ * @restart_lock: Lock to protect access to the restart_state.
+ * @restart_wait: Wait Queue to wait for any restart events.
+ * @endpoint: Contains the information related to user-space interface.
+ * @notify: Function to notify the incoming events on the port.
+ * @check_send_permissions: Function to check access control from this port.
+ * @num_tx: Number of packets transmitted.
+ * @num_rx: Number of packets received.
+ * @num_tx_bytes: Number of bytes transmitted.
+ * @num_rx_bytes: Number of bytes received.
+ * @priv: Private information registered by the port owner.
+ */
+struct msm_ipc_port {
+	struct list_head list;
+	struct kref ref;
+
+	struct msm_ipc_port_addr this_port;
+	struct msm_ipc_port_name port_name;
+	uint32_t type;
+	unsigned int flags;
+	struct mutex port_lock_lhc3;
+	struct comm_mode_info mode_info;
+
+	struct msm_ipc_port_addr dest_addr;
+	int conn_status;
+
+	struct list_head port_rx_q;
+	struct mutex port_rx_q_lock_lhc3;
+	char rx_ws_name[MAX_WS_NAME_SZ];
+	struct wakeup_source *port_rx_ws;
+	wait_queue_head_t port_rx_wait_q;
+	wait_queue_head_t port_tx_wait_q;
+
+	int restart_state;
+	spinlock_t restart_lock;
+	wait_queue_head_t restart_wait;
+
+	void *rport_info;
+	void *endpoint;
+	void (*notify)(unsigned int event, void *oob_data,
+		       size_t oob_data_len, void *priv);
+	int (*check_send_permissions)(void *data);
+
+	uint32_t num_tx;
+	uint32_t num_rx;
+	unsigned long num_tx_bytes;
+	unsigned long num_rx_bytes;
+	void *priv;
+};
+
+#ifdef CONFIG_IPC_ROUTER
+/**
+ * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
+ * @notify: Callback function to notify any event on the port.
+ *   @event: Event ID to be handled.
+ *   @oob_data: Any out-of-band data associated with the event.
+ *   @oob_data_len: Size of the out-of-band data, if valid.
+ *   @priv: Private data registered during the port creation.
+ * @priv: Private info to be passed while the notification is generated.
+ *
+ * @return: Pointer to the port on success, NULL on error.
+ */
+struct msm_ipc_port *msm_ipc_router_create_port(
+	void (*notify)(unsigned int event, void *oob_data,
+		       size_t oob_data_len, void *priv),
+	void *priv);
+
+/**
+ * msm_ipc_router_bind_control_port() - Bind a port as a control port
+ * @port_ptr: Port which needs to be marked as a control port.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ */
+int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr);
+
+/**
+ * msm_ipc_router_lookup_server_name() - Resolve server address
+ * @srv_name: Name<service:instance> of the server to be resolved.
+ * @srv_info: Buffer to hold the resolved address.
+ * @num_entries_in_array: Number of server info the buffer can hold.
+ * @lookup_mask: Mask to specify the range of instances to be resolved.
+ *
+ * @return: Number of server addresses resolved on success, < 0 on error.
+ */
+int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
+				      struct msm_ipc_server_info *srv_info,
+				      int num_entries_in_array,
+				      uint32_t lookup_mask);
+
+/**
+ * msm_ipc_router_send_msg() - Send a message/packet
+ * @src: Sender's address/port.
+ * @dest: Destination address.
+ * @data: Pointer to the data to be sent.
+ * @data_len: Length of the data to be sent.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int msm_ipc_router_send_msg(struct msm_ipc_port *src,
+			    struct msm_ipc_addr *dest,
+			    void *data, unsigned int data_len);
+
+/**
+ * msm_ipc_router_get_curr_pkt_size() - Get the packet size of the first
+ *                                      packet in the rx queue
+ * @port_ptr: Port which owns the rx queue.
+ *
+ * @return: Returns the size of the first packet, if available.
+ *          0 if no packets available, < 0 on error.
+ */
+int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr);
+
+/**
+ * msm_ipc_router_read_msg() - Read a message/packet
+ * @port_ptr: Receiver's port/address.
+ * @data: Pointer containing the address of the received data.
+ * @src: Address of the sender/source.
+ * @len: Length of the data being read.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
+			    struct msm_ipc_addr *src,
+			    unsigned char **data,
+			    unsigned int *len);
+
+/**
+ * msm_ipc_router_close_port() - Close the port
+ * @port_ptr: Pointer to the port to be closed.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr);
+
+/**
+ * msm_ipc_router_register_server() - Register a service on a port
+ * @server_port: IPC Router port with which a service is registered.
+ * @name: Service name <service_id:instance_id> that gets registered.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ */
+int msm_ipc_router_register_server(struct msm_ipc_port *server_port,
+				   struct msm_ipc_addr *name);
+
+/**
+ * msm_ipc_router_unregister_server() - Unregister a service from a port
+ * @server_port: Port with with a service is already registered.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ */
+int msm_ipc_router_unregister_server(struct msm_ipc_port *server_port);
+
+/**
+ * register_ipcrtr_af_init_notifier() - Register for ipc router socket
+ *				address family initialization callback
+ * @nb: Notifier block which will be notified once address family is
+ *	initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int register_ipcrtr_af_init_notifier(struct notifier_block *nb);
+
+/**
+ * unregister_ipcrtr_af_init_notifier() - Unregister for ipc router socket
+ *					address family initialization callback
+ * @nb: Notifier block which will be notified once address family is
+ *	initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb);
+
+#else
+
+struct msm_ipc_port *msm_ipc_router_create_port(
+	void (*notify)(unsigned int event, void *oob_data,
+		       size_t oob_data_len, void *priv),
+	void *priv)
+{
+	return NULL;
+}
+
+static inline int msm_ipc_router_bind_control_port(
+		struct msm_ipc_port *port_ptr)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
+				      struct msm_ipc_server_info *srv_info,
+				      int num_entries_in_array,
+				      uint32_t lookup_mask)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_send_msg(struct msm_ipc_port *src,
+			    struct msm_ipc_addr *dest,
+			    void *data, unsigned int data_len)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
+			    struct msm_ipc_addr *src,
+			    unsigned char **data,
+			    unsigned int *len)
+{
+	return -ENODEV;
+}
+
+int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ipc_router_register_server(
+			struct msm_ipc_port *server_port,
+			struct msm_ipc_addr *name)
+{
+	return -ENODEV;
+}
+
+static inline int msm_ipc_router_unregister_server(
+			struct msm_ipc_port *server_port)
+{
+	return -ENODEV;
+}
+
+int register_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+#endif
+
+#endif
diff --git a/include/linux/ipc_router_xprt.h b/include/linux/ipc_router_xprt.h
new file mode 100644
index 0000000..e33a10a
--- /dev/null
+++ b/include/linux/ipc_router_xprt.h
@@ -0,0 +1,175 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_XPRT_H
+#define _IPC_ROUTER_XPRT_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ipc.h>
+#include <linux/ipc_router.h>
+#include <linux/kref.h>
+
+#define IPC_ROUTER_XPRT_EVENT_DATA  1
+#define IPC_ROUTER_XPRT_EVENT_OPEN  2
+#define IPC_ROUTER_XPRT_EVENT_CLOSE 3
+
+#define FRAG_PKT_WRITE_ENABLE 0x1
+
+/**
+ * rr_header_v1 - IPC Router header version 1
+ * @version: Version information.
+ * @type: IPC Router Message Type.
+ * @src_node_id: Source Node ID of the message.
+ * @src_port_id: Source Port ID of the message.
+ * @control_flag: Flag to indicate flow control.
+ * @size: Size of the IPC Router payload.
+ * @dst_node_id: Destination Node ID of the message.
+ * @dst_port_id: Destination Port ID of the message.
+ */
+struct rr_header_v1 {
+	uint32_t version;
+	uint32_t type;
+	uint32_t src_node_id;
+	uint32_t src_port_id;
+	uint32_t control_flag;
+	uint32_t size;
+	uint32_t dst_node_id;
+	uint32_t dst_port_id;
+};
+
+/**
+ * rr_header_v2 - IPC Router header version 2
+ * @version: Version information.
+ * @type: IPC Router Message Type.
+ * @control_flag: Flags to indicate flow control, optional header etc.
+ * @opt_len: Combined size of the all optional headers in units of words.
+ * @size: Size of the IPC Router payload.
+ * @src_node_id: Source Node ID of the message.
+ * @src_port_id: Source Port ID of the message.
+ * @dst_node_id: Destination Node ID of the message.
+ * @dst_port_id: Destination Port ID of the message.
+ */
+struct rr_header_v2 {
+	uint8_t version;
+	uint8_t type;
+	uint8_t control_flag;
+	uint8_t opt_len;
+	uint32_t size;
+	uint16_t src_node_id;
+	uint16_t src_port_id;
+	uint16_t dst_node_id;
+	uint16_t dst_port_id;
+} __attribute__((__packed__));
+
+union rr_header {
+	struct rr_header_v1 hdr_v1;
+	struct rr_header_v2 hdr_v2;
+};
+
+/**
+ * rr_opt_hdr - Optional header for IPC Router header version 2
+ * @len: Total length of the optional header.
+ * @data: Pointer to the actual optional header.
+ */
+struct rr_opt_hdr {
+	size_t len;
+	unsigned char *data;
+};
+
+#define IPC_ROUTER_HDR_SIZE sizeof(union rr_header)
+#define IPCR_WORD_SIZE 4
+
+/**
+ * rr_packet - Router to Router packet structure
+ * @list: Pointer to prev & next packets in a port's rx list.
+ * @hdr: Header information extracted from or prepended to a packet.
+ * @opt_hdr: Optinal header information.
+ * @pkt_fragment_q: Queue of SKBs containing payload.
+ * @length: Length of data in the chain of SKBs
+ * @ref: Reference count for the packet.
+ */
+struct rr_packet {
+	struct list_head list;
+	struct rr_header_v1 hdr;
+	struct rr_opt_hdr opt_hdr;
+	struct sk_buff_head *pkt_fragment_q;
+	uint32_t length;
+	struct kref ref;
+};
+
+/**
+ * msm_ipc_router_xprt - Structure to hold XPRT specific information
+ * @name: Name of the XPRT.
+ * @link_id: Network cluster ID to which the XPRT belongs to.
+ * @priv: XPRT's private data.
+ * @get_version: Method to get header version supported by the XPRT.
+ * @set_version: Method to set header version in XPRT.
+ * @get_option: Method to get XPRT specific options.
+ * @read_avail: Method to get data size available to be read from the XPRT.
+ * @read: Method to read data from the XPRT.
+ * @write_avail: Method to get write space available in the XPRT.
+ * @write: Method to write data to the XPRT.
+ * @close: Method to close the XPRT.
+ * @sft_close_done: Method to indicate to the XPRT that handling of reset
+ *                  event is complete.
+ */
+struct msm_ipc_router_xprt {
+	char *name;
+	uint32_t link_id;
+	void *priv;
+
+	int (*get_version)(struct msm_ipc_router_xprt *xprt);
+	int (*get_option)(struct msm_ipc_router_xprt *xprt);
+	void (*set_version)(struct msm_ipc_router_xprt *xprt,
+			    unsigned int version);
+	int (*read_avail)(struct msm_ipc_router_xprt *xprt);
+	int (*read)(void *data, uint32_t len,
+		    struct msm_ipc_router_xprt *xprt);
+	int (*write_avail)(struct msm_ipc_router_xprt *xprt);
+	int (*write)(void *data, uint32_t len,
+		     struct msm_ipc_router_xprt *xprt);
+	int (*close)(struct msm_ipc_router_xprt *xprt);
+	void (*sft_close_done)(struct msm_ipc_router_xprt *xprt);
+};
+
+void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
+				unsigned int event,
+				void *data);
+
+/**
+ * create_pkt() - Create a Router packet
+ * @data: SKB queue to be contained inside the packet.
+ *
+ * @return: pointer to packet on success, NULL on failure.
+ */
+struct rr_packet *create_pkt(struct sk_buff_head *data);
+struct rr_packet *clone_pkt(struct rr_packet *pkt);
+void release_pkt(struct rr_packet *pkt);
+
+/**
+ * ipc_router_peek_pkt_size() - Peek into the packet header to get potential
+ *				packet size
+ * @data: Starting address of the packet which points to router header.
+ *
+ * @returns: potential packet size on success, < 0 on error.
+ *
+ * This function is used by the underlying transport abstraction layer to
+ * peek into the potential packet size of an incoming packet. This information
+ * is used to perform link layer fragmentation and re-assembly
+ */
+int ipc_router_peek_pkt_size(char *data);
+
+#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index a064997..7aebe23 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -40,6 +40,7 @@
 	__s32		accept_ra_rt_info_max_plen;
 #endif
 #endif
+	__s32		accept_ra_rt_table;
 	__s32		proxy_ndp;
 	__s32		accept_source_route;
 	__s32		accept_ra_from_local;
diff --git a/include/linux/keychord.h b/include/linux/keychord.h
new file mode 100644
index 0000000..08cf540
--- /dev/null
+++ b/include/linux/keychord.h
@@ -0,0 +1,23 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_KEYCHORD_H_
+#define __LINUX_KEYCHORD_H_
+
+#include <uapi/linux/keychord.h>
+
+#endif	/* __LINUX_KEYCHORD_H_ */
diff --git a/include/linux/keycombo.h b/include/linux/keycombo.h
new file mode 100644
index 0000000..c6db262
--- /dev/null
+++ b/include/linux/keycombo.h
@@ -0,0 +1,36 @@
+/*
+ * include/linux/keycombo.h - platform data structure for keycombo driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYCOMBO_H
+#define _LINUX_KEYCOMBO_H
+
+#define KEYCOMBO_NAME "keycombo"
+
+/*
+ * if key_down_fn and key_up_fn are both present, you are guaranteed that
+ * key_down_fn will return before key_up_fn is called, and that key_up_fn
+ * is called iff key_down_fn is called.
+ */
+struct keycombo_platform_data {
+	void (*key_down_fn)(void *);
+	void (*key_up_fn)(void *);
+	void *priv;
+	int key_down_delay; /* Time in ms */
+	int *keys_up;
+	int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYCOMBO_H */
diff --git a/include/linux/keyreset.h b/include/linux/keyreset.h
new file mode 100644
index 0000000..2e34afa
--- /dev/null
+++ b/include/linux/keyreset.h
@@ -0,0 +1,29 @@
+/*
+ * include/linux/keyreset.h - platform data structure for resetkeys driver
+ *
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_KEYRESET_H
+#define _LINUX_KEYRESET_H
+
+#define KEYRESET_NAME "keyreset"
+
+struct keyreset_platform_data {
+	int (*reset_fn)(void);
+	int key_down_delay;
+	int *keys_up;
+	int keys_down[]; /* 0 terminated */
+};
+
+#endif /* _LINUX_KEYRESET_H */
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
index 4434871..78f01ea 100644
--- a/include/linux/mailbox_client.h
+++ b/include/linux/mailbox_client.h
@@ -44,8 +44,10 @@
 					      const char *name);
 struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
 int mbox_send_message(struct mbox_chan *chan, void *mssg);
+int mbox_send_controller_data(struct mbox_chan *chan, void *mssg);
 void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
 bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
 void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
+bool mbox_controller_is_idle(struct mbox_chan *chan); /* atomic */
 
 #endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
index 74deadb..30a4ed2 100644
--- a/include/linux/mailbox_controller.h
+++ b/include/linux/mailbox_controller.h
@@ -24,6 +24,12 @@
  *		transmission of data is reported by the controller via
  *		mbox_chan_txdone (if it has some TX ACK irq). It must not
  *		sleep.
+ * @send_controller_data:
+ *		Send data for the controller driver. This could be data to
+ *		configure the controller or data that may be cached in the
+ *		controller and not transmitted immediately. There is no ACK
+ *		for this request and the request is not buffered in the
+ *		controller. Must not sleep.
  * @startup:	Called when a client requests the chan. The controller
  *		could ask clients for additional parameters of communication
  *		to be provided via client's chan_data. This call may
@@ -46,6 +52,7 @@
  */
 struct mbox_chan_ops {
 	int (*send_data)(struct mbox_chan *chan, void *data);
+	int (*send_controller_data)(struct mbox_chan *chan, void *data);
 	int (*startup)(struct mbox_chan *chan);
 	void (*shutdown)(struct mbox_chan *chan);
 	bool (*last_tx_done)(struct mbox_chan *chan);
@@ -67,6 +74,7 @@
  * @txpoll_period:	If 'txdone_poll' is in effect, the API polls for
  *			last TX's status after these many millisecs
  * @of_xlate:		Controller driver specific mapping of channel via DT
+ * @is_idle:		Return if the controller is idle.
  * @poll_hrt:		API private. hrtimer used to poll for TXDONE on all
  *			channels.
  * @node:		API private. To hook into list of controllers.
@@ -81,6 +89,7 @@
 	unsigned txpoll_period;
 	struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
 				      const struct of_phandle_args *sp);
+	bool (*is_idle)(struct mbox_controller *mbox);
 	/* Internal to API */
 	struct hrtimer poll_hrt;
 	struct list_head node;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 5b759c9..da25f07 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -93,6 +93,8 @@
 int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
 int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
 ulong choose_memblock_flags(void);
+unsigned long memblock_region_resize_late_begin(void);
+void memblock_region_resize_late_end(unsigned long);
 
 /* Low level functions */
 int memblock_add_range(struct memblock_type *type,
@@ -339,6 +341,7 @@
 int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
 bool memblock_is_reserved(phys_addr_t addr);
 bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
+int memblock_overlaps_memory(phys_addr_t base, phys_addr_t size);
 
 extern void __memblock_dump_all(void);
 
diff --git a/include/linux/memory-state-time.h b/include/linux/memory-state-time.h
new file mode 100644
index 0000000..d2212b0
--- /dev/null
+++ b/include/linux/memory-state-time.h
@@ -0,0 +1,42 @@
+/* include/linux/memory-state-time.h
+ *
+ * Copyright (C) 2016 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/workqueue.h>
+
+#define UPDATE_MEMORY_STATE(BLOCK, VALUE) BLOCK->update_call(BLOCK, VALUE)
+
+struct memory_state_update_block;
+
+typedef void (*memory_state_update_fn_t)(struct memory_state_update_block *ub,
+		int value);
+
+/* This struct is populated when you pass it to a memory_state_register*
+ * function. The update_call function is used for an update and defined in the
+ * typedef memory_state_update_fn_t
+ */
+struct memory_state_update_block {
+	memory_state_update_fn_t update_call;
+	int id;
+};
+
+/* Register a frequency struct memory_state_update_block to provide updates to
+ * memory_state_time about frequency changes using its update_call function.
+ */
+struct memory_state_update_block *memory_state_register_frequency_source(void);
+
+/* Register a bandwidth struct memory_state_update_block to provide updates to
+ * memory_state_time about bandwidth changes using its update_call function.
+ */
+struct memory_state_update_block *memory_state_register_bandwidth_source(void);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a92c8d7..46c927f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1138,6 +1138,7 @@
 extern void show_free_areas(unsigned int flags);
 extern bool skip_free_areas_node(unsigned int flags, int nid);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file);
 int shmem_zero_setup(struct vm_area_struct *);
 #ifdef CONFIG_SHMEM
 bool shmem_mapping(struct address_space *mapping);
@@ -1974,7 +1975,7 @@
 extern struct vm_area_struct *vma_merge(struct mm_struct *,
 	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
 	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
-	struct mempolicy *, struct vm_userfaultfd_ctx);
+	struct mempolicy *, struct vm_userfaultfd_ctx, const char __user *);
 extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
 extern int split_vma(struct mm_struct *,
 	struct vm_area_struct *, unsigned long addr, int new_below);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4a8aced..4d740f2 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -326,11 +326,18 @@
 	/*
 	 * For areas with an address space and backing store,
 	 * linkage into the address_space->i_mmap interval tree.
+	 *
+	 * For private anonymous mappings, a pointer to a null terminated string
+	 * in the user process containing the name given to the vma, or NULL
+	 * if unnamed.
 	 */
-	struct {
-		struct rb_node rb;
-		unsigned long rb_subtree_last;
-	} shared;
+	union {
+		struct {
+			struct rb_node rb;
+			unsigned long rb_subtree_last;
+		} shared;
+		const char __user *anon_name;
+	};
 
 	/*
 	 * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
@@ -615,4 +622,13 @@
 	unsigned long val;
 } swp_entry_t;
 
+/* Return the name for an anonymous mapping or NULL for a file-backed mapping */
+static inline const char __user *vma_get_anon_name(struct vm_area_struct *vma)
+{
+	if (vma->vm_file)
+		return NULL;
+
+	return vma->anon_name;
+}
+
 #endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 2b953eb..ea4019c 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -142,6 +142,11 @@
 
 	/* Allow other commands during this ongoing data transfer or busy wait */
 	bool			cap_cmd_during_tfr;
+
+	ktime_t			io_start;
+#ifdef CONFIG_BLOCK
+	int			lat_hist_enabled;
+#endif
 };
 
 struct mmc_card;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 0b24394..fac3b5c 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -16,6 +16,7 @@
 #include <linux/sched.h>
 #include <linux/device.h>
 #include <linux/fault-inject.h>
+#include <linux/blkdev.h>
 
 #include <linux/mmc/core.h>
 #include <linux/mmc/card.h>
@@ -397,6 +398,20 @@
 	int			dsr_req;	/* DSR value is valid */
 	u32			dsr;	/* optional driver stage (DSR) value */
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+	struct {
+		struct sdio_cis			*cis;
+		struct sdio_cccr		*cccr;
+		struct sdio_embedded_func	*funcs;
+		int				num_funcs;
+	} embedded_sdio_data;
+#endif
+
+#ifdef CONFIG_BLOCK
+	int			latency_hist_enabled;
+	struct io_latency_state io_lat_s;
+#endif
+
 	unsigned long		private[0] ____cacheline_aligned;
 };
 
@@ -406,6 +421,14 @@
 void mmc_free_host(struct mmc_host *);
 int mmc_of_parse(struct mmc_host *host);
 
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+extern void mmc_set_embedded_sdio_data(struct mmc_host *host,
+				       struct sdio_cis *cis,
+				       struct sdio_cccr *cccr,
+				       struct sdio_embedded_func *funcs,
+				       int num_funcs);
+#endif
+
 static inline void *mmc_priv(struct mmc_host *host)
 {
 	return (void *)host->private;
diff --git a/include/linux/mmc/pm.h b/include/linux/mmc/pm.h
index 4a13920..6e2d6a1 100644
--- a/include/linux/mmc/pm.h
+++ b/include/linux/mmc/pm.h
@@ -26,5 +26,6 @@
 
 #define MMC_PM_KEEP_POWER	(1 << 0)	/* preserve card power during suspend */
 #define MMC_PM_WAKE_SDIO_IRQ	(1 << 1)	/* wake up host system on SDIO IRQ assertion */
+#define MMC_PM_IGNORE_PM_NOTIFY	(1 << 2)	/* ignore mmc pm notify */
 
 #endif /* LINUX_MMC_PM_H */
diff --git a/include/linux/mmc/sdio_func.h b/include/linux/mmc/sdio_func.h
index aab032a..d0a69e7 100644
--- a/include/linux/mmc/sdio_func.h
+++ b/include/linux/mmc/sdio_func.h
@@ -23,6 +23,14 @@
 typedef void (sdio_irq_handler_t)(struct sdio_func *);
 
 /*
+ * Structure used to hold embedded SDIO device data from platform layer
+ */
+struct sdio_embedded_func {
+	uint8_t f_class;
+	uint32_t f_maxblksize;
+};
+
+/*
  * SDIO function CIS tuple (unknown to the core)
  */
 struct sdio_func_tuple {
@@ -128,6 +136,8 @@
 extern unsigned int sdio_align_size(struct sdio_func *func, unsigned int sz);
 
 extern u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret);
+extern u8 sdio_readb_ext(struct sdio_func *func, unsigned int addr, int *err_ret,
+	unsigned in);
 extern u16 sdio_readw(struct sdio_func *func, unsigned int addr, int *err_ret);
 extern u32 sdio_readl(struct sdio_func *func, unsigned int addr, int *err_ret);
 
diff --git a/include/linux/msm-bus-board.h b/include/linux/msm-bus-board.h
new file mode 100644
index 0000000..d6c97c6
--- /dev/null
+++ b/include/linux/msm-bus-board.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_ARCH_MSM_BUS_BOARD_H
+#define __ASM_ARCH_MSM_BUS_BOARD_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+
+enum context {
+	DUAL_CTX,
+	ACTIVE_CTX,
+	NUM_CTX
+};
+
+struct msm_bus_fabric_registration {
+	unsigned int id;
+	const char *name;
+	struct msm_bus_node_info *info;
+	unsigned int len;
+	int ahb;
+	const char *fabclk[NUM_CTX];
+	const char *iface_clk;
+	unsigned int offset;
+	unsigned int haltid;
+	unsigned int rpm_enabled;
+	unsigned int nmasters;
+	unsigned int nslaves;
+	unsigned int ntieredslaves;
+	bool il_flag;
+	const struct msm_bus_board_algorithm *board_algo;
+	int hw_sel;
+	void *hw_data;
+	uint32_t qos_freq;
+	uint32_t qos_baseoffset;
+	u64 nr_lim_thresh;
+	uint32_t eff_fact;
+	uint32_t qos_delta;
+	bool virt;
+};
+
+struct msm_bus_device_node_registration {
+	struct msm_bus_node_device_type *info;
+	unsigned int num_devices;
+	bool virt;
+};
+
+enum msm_bus_bw_tier_type {
+	MSM_BUS_BW_TIER1 = 1,
+	MSM_BUS_BW_TIER2,
+	MSM_BUS_BW_COUNT,
+	MSM_BUS_BW_SIZE = 0x7FFFFFFF,
+};
+
+struct msm_bus_halt_vector {
+	uint32_t haltval;
+	uint32_t haltmask;
+};
+
+extern struct msm_bus_fabric_registration msm_bus_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_cpss_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8960_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sg_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8960_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8064_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8064_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9615_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9615_def_fab_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8930_apps_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_mm_fabric_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_sys_fpb_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8930_cpss_fpb_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_8974_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_mmss_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_config_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_8974_ocmem_vnoc_pdata;
+
+extern struct msm_bus_fabric_registration msm_bus_9625_sys_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_bimc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_periph_noc_pdata;
+extern struct msm_bus_fabric_registration msm_bus_9625_config_noc_pdata;
+
+extern int msm_bus_device_match_adhoc(struct device *dev, void *id);
+
+void msm_bus_rpm_set_mt_mask(void);
+int msm_bus_board_rpm_get_il_ids(uint16_t *id);
+int msm_bus_board_get_iid(int id);
+
+#define NFAB_MSM8226 6
+#define NFAB_MSM8610 5
+
+/*
+ * These macros specify the convention followed for allocating
+ * ids to fabrics, masters and slaves for 8x60.
+ *
+ * A node can be identified as a master/slave/fabric by using
+ * these ids.
+ */
+#define FABRIC_ID_KEY 1024
+#define SLAVE_ID_KEY ((FABRIC_ID_KEY) >> 1)
+#define MAX_FAB_KEY 7168  /* OR(All fabric ids) */
+#define INT_NODE_START 10000
+
+#define GET_FABID(id) ((id) & MAX_FAB_KEY)
+
+#define NODE_ID(id) ((id) & (FABRIC_ID_KEY - 1))
+#define IS_SLAVE(id) ((NODE_ID(id)) >= SLAVE_ID_KEY ? 1 : 0)
+#define CHECK_ID(iid, id) (((iid & id) != id) ? -ENXIO : iid)
+
+/*
+ * The following macros are used to format the data for port halt
+ * and unhalt requests.
+ */
+#define MSM_BUS_CLK_HALT 0x1
+#define MSM_BUS_CLK_HALT_MASK 0x1
+#define MSM_BUS_CLK_HALT_FIELDSIZE 0x1
+#define MSM_BUS_CLK_UNHALT 0x0
+
+#define MSM_BUS_MASTER_SHIFT(master, fieldsize) \
+	((master) * (fieldsize))
+
+#define MSM_BUS_SET_BITFIELD(word, fieldmask, fieldvalue) \
+	{	\
+		(word) &= ~(fieldmask);	\
+		(word) |= (fieldvalue);	\
+	}
+
+
+#define MSM_BUS_MASTER_HALT(u32haltmask, u32haltval, master) \
+	(\
+	MSM_BUS_SET_BITFIELD(u32haltmask, \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE), \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE))\
+	MSM_BUS_SET_BITFIELD(u32haltval, \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE), \
+		MSM_BUS_CLK_HALT<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE))\
+	)
+
+#define MSM_BUS_MASTER_UNHALT(u32haltmask, u32haltval, master) \
+	(\
+	MSM_BUS_SET_BITFIELD(u32haltmask, \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE), \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE))\
+	MSM_BUS_SET_BITFIELD(u32haltval, \
+		MSM_BUS_CLK_HALT_MASK<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE), \
+		MSM_BUS_CLK_UNHALT<<MSM_BUS_MASTER_SHIFT((master),\
+		MSM_BUS_CLK_HALT_FIELDSIZE))\
+	)
+
+#define RPM_BUS_SLAVE_REQ	0x766c7362
+#define RPM_BUS_MASTER_REQ	0x73616d62
+
+enum msm_bus_rpm_slave_field_type {
+	RPM_SLAVE_FIELD_BW = 0x00007762,
+};
+
+enum msm_bus_rpm_mas_field_type {
+	RPM_MASTER_FIELD_BW =		0x00007762,
+	RPM_MASTER_FIELD_BW_T0 =	0x30747762,
+	RPM_MASTER_FIELD_BW_T1 =	0x31747762,
+	RPM_MASTER_FIELD_BW_T2 =	0x32747762,
+};
+
+#include <dt-bindings/msm/msm-bus-ids.h>
+
+
+#endif /*__ASM_ARCH_MSM_BUS_BOARD_H */
diff --git a/include/linux/msm-bus.h b/include/linux/msm-bus.h
new file mode 100644
index 0000000..6ebdaa2
--- /dev/null
+++ b/include/linux/msm-bus.h
@@ -0,0 +1,214 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_H
+#define _ARCH_ARM_MACH_MSM_BUS_H
+
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/platform_device.h>
+
+/*
+ * Macros for clients to convert their data to ib and ab
+ * Ws : Time window over which to transfer the data in SECONDS
+ * Bs : Size of the data block in bytes
+ * Per : Recurrence period
+ * Tb : Throughput bandwidth to prevent stalling
+ * R  : Ratio of actual bandwidth used to Tb
+ * Ib : Instantaneous bandwidth
+ * Ab : Arbitrated bandwidth
+ *
+ * IB_RECURRBLOCK and AB_RECURRBLOCK:
+ * These are used if the requirement is to transfer a
+ * recurring block of data over a known time window.
+ *
+ * IB_THROUGHPUTBW and AB_THROUGHPUTBW:
+ * These are used for CPU style masters. Here the requirement
+ * is to have minimum throughput bandwidth available to avoid
+ * stalling.
+ */
+#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws)))
+#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per)))
+#define IB_THROUGHPUTBW(Tb) (Tb)
+#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R))
+
+struct msm_bus_vectors {
+	int src; /* Master */
+	int dst; /* Slave */
+	uint64_t ab; /* Arbitrated bandwidth */
+	uint64_t ib; /* Instantaneous bandwidth */
+};
+
+struct msm_bus_paths {
+	int num_paths;
+	struct msm_bus_vectors *vectors;
+};
+
+struct msm_bus_scale_pdata {
+	struct msm_bus_paths *usecase;
+	int num_usecases;
+	const char *name;
+	/*
+	 * If the active_only flag is set to 1, the BW request is applied
+	 * only when at least one CPU is active (powered on). If the flag
+	 * is set to 0, then the BW request is always applied irrespective
+	 * of the CPU state.
+	 */
+	unsigned int active_only;
+};
+
+struct msm_bus_client_handle {
+	char *name;
+	int mas;
+	int slv;
+	int first_hop;
+	struct device *mas_dev;
+	u64 cur_act_ib;
+	u64 cur_act_ab;
+	u64 cur_slp_ib;
+	u64 cur_slp_ab;
+	bool active_only;
+};
+
+/* Scaling APIs */
+
+/*
+ * This function returns a handle to the client. This should be used to
+ * call msm_bus_scale_client_update_request.
+ * The function returns 0 if bus driver is unable to register a client
+ */
+
+#if (defined(CONFIG_QCOM_BUS_SCALING) ||\
+	defined(CONFIG_QCOM_BUS_TOPOLOGY_ADHOC))
+int __init msm_bus_fabric_init_driver(void);
+uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
+int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
+void msm_bus_scale_unregister_client(uint32_t cl);
+int msm_bus_scale_client_update_context(uint32_t cl, bool active_only,
+							unsigned int ctx_idx);
+
+struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name,
+							bool active_only);
+void msm_bus_scale_unregister(struct msm_bus_client_handle *cl);
+int msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib);
+int msm_bus_scale_update_bw_context(struct msm_bus_client_handle *cl,
+		u64 act_ab, u64 act_ib, u64 slp_ib, u64 slp_ab);
+/* AXI Port configuration APIs */
+int msm_bus_axi_porthalt(int master_port);
+int msm_bus_axi_portunhalt(int master_port);
+
+#else
+static inline int __init msm_bus_fabric_init_driver(void) { return 0; }
+static struct msm_bus_client_handle dummy_cl;
+
+static inline uint32_t
+msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
+{
+	return 1;
+}
+
+static inline int
+msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
+{
+	return 0;
+}
+
+static inline int
+msm_bus_scale_client_update_context(uint32_t cl, bool active_only,
+							unsigned int ctx_idx)
+{
+	return 0;
+}
+
+static inline void
+msm_bus_scale_unregister_client(uint32_t cl)
+{
+}
+
+static inline int msm_bus_axi_porthalt(int master_port)
+{
+	return 0;
+}
+
+static inline int msm_bus_axi_portunhalt(int master_port)
+{
+	return 0;
+}
+
+static inline struct msm_bus_client_handle*
+msm_bus_scale_register(uint32_t mas, uint32_t slv, char *name,
+							bool active_only)
+{
+	return &dummy_cl;
+}
+
+static inline void msm_bus_scale_unregister(struct msm_bus_client_handle *cl)
+{
+}
+
+static inline int
+msm_bus_scale_update_bw(struct msm_bus_client_handle *cl, u64 ab, u64 ib)
+{
+	return 0;
+}
+
+static inline int
+msm_bus_scale_update_bw_context(struct msm_bus_client_handle *cl, u64 act_ab,
+				u64 act_ib, u64 slp_ib, u64 slp_ab)
+
+{
+	return 0;
+}
+
+#endif
+
+#if defined(CONFIG_OF) && defined(CONFIG_QCOM_BUS_SCALING)
+struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+		struct platform_device *pdev, struct device_node *of_node);
+struct msm_bus_scale_pdata *msm_bus_cl_get_pdata(struct platform_device *pdev);
+void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata);
+#else
+static inline struct msm_bus_scale_pdata
+*msm_bus_cl_get_pdata(struct platform_device *pdev)
+{
+	return NULL;
+}
+
+static inline struct msm_bus_scale_pdata *msm_bus_pdata_from_node(
+		struct platform_device *pdev, struct device_node *of_node)
+{
+	return NULL;
+}
+
+static inline void msm_bus_cl_clear_pdata(struct msm_bus_scale_pdata *pdata)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_BUS_VOTER
+int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
+						bool active_only);
+int msm_bus_floor_vote(const char *name, u64 floor_hz);
+#else
+static inline int msm_bus_floor_vote(const char *name, u64 floor_hz)
+{
+	return -EINVAL;
+}
+
+static inline int msm_bus_floor_vote_context(const char *name, u64 floor_hz,
+						bool active_only)
+{
+	return -EINVAL;
+}
+#endif /*defined(CONFIG_DEBUG_BUS_VOTER) && defined(CONFIG_BUS_TOPOLOGY_ADHOC)*/
+#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/
diff --git a/include/linux/msm-sps.h b/include/linux/msm-sps.h
new file mode 100644
index 0000000..4a9b8a8
--- /dev/null
+++ b/include/linux/msm-sps.h
@@ -0,0 +1,1639 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/* Smart-Peripheral-Switch (SPS) API. */
+
+#ifndef _SPS_H_
+#define _SPS_H_
+
+#include <linux/types.h>	/* u32 */
+
+#if defined(CONFIG_PHYS_ADDR_T_64BIT) || defined(CONFIG_ARM_LPAE)
+
+/* Returns upper 4bits of 36bits physical address */
+#define SPS_GET_UPPER_ADDR(addr) ((addr & 0xF00000000ULL) >> 32)
+
+/* Returns 36bits physical address from 32bit address &
+ * flags word
+ */
+#define DESC_FULL_ADDR(flags, addr) ((((phys_addr_t)flags & 0xF) << 32) | addr)
+
+/* Returns flags word with flags and 4bit upper address
+ * from flags and 36bit physical address
+ */
+#define DESC_FLAG_WORD(flags, addr) (((addr & 0xF00000000ULL) >> 32) | flags)
+
+#else
+
+#define SPS_GET_UPPER_ADDR(addr) (0)
+#define DESC_FULL_ADDR(flags, addr) (addr)
+#define DESC_FLAG_WORD(flags, addr) (flags)
+
+#endif
+
+/* Returns upper 4bits of 36bits physical address from
+ * flags word
+ */
+#define DESC_UPPER_ADDR(flags) ((flags & 0xF))
+
+/* Returns lower 32bits of 36bits physical address */
+#define SPS_GET_LOWER_ADDR(addr) ((u32)(addr & 0xFFFFFFFF))
+
+/* SPS device handle indicating use of system memory */
+#define SPS_DEV_HANDLE_MEM       (~0x0ul>>1)
+
+/* SPS device handle indicating use of BAM-DMA */
+
+/* SPS device handle invalid value */
+#define SPS_DEV_HANDLE_INVALID   0
+
+/* BAM invalid IRQ value */
+#define SPS_IRQ_INVALID          0
+
+/* Invalid address value */
+#define SPS_ADDR_INVALID      (0xDEADBEEF)
+
+/* Invalid peripheral device enumeration class */
+#define SPS_CLASS_INVALID     ((unsigned long)-1)
+
+/*
+ * This value specifies different configurations for an SPS connection.
+ * A non-default value instructs the SPS driver to search for the configuration
+ * in the fixed connection mapping table.
+ */
+#define SPS_CONFIG_DEFAULT       0
+
+/*
+ * This value instructs the SPS driver to use the default BAM-DMA channel
+ * threshold
+ */
+#define SPS_DMA_THRESHOLD_DEFAULT   0
+
+/* Flag bits supported by SPS hardware for struct sps_iovec */
+#define SPS_IOVEC_FLAG_INT  0x8000  /* Generate interrupt */
+#define SPS_IOVEC_FLAG_EOT  0x4000  /* Generate end-of-transfer indication */
+#define SPS_IOVEC_FLAG_EOB  0x2000  /* Generate end-of-block indication */
+#define SPS_IOVEC_FLAG_NWD  0x1000  /* notify when done */
+#define SPS_IOVEC_FLAG_CMD  0x0800  /* command descriptor */
+#define SPS_IOVEC_FLAG_LOCK  0x0400  /* pipe lock */
+#define SPS_IOVEC_FLAG_UNLOCK  0x0200  /* pipe unlock */
+#define SPS_IOVEC_FLAG_IMME 0x0100  /* immediate command descriptor */
+#define SPS_IOVEC_FLAG_NO_SUBMIT 0x0020  /* Do not submit descriptor to HW */
+#define SPS_IOVEC_FLAG_DEFAULT   0x0010  /* Use driver default */
+
+/* Maximum descriptor/iovec size */
+#define SPS_IOVEC_MAX_SIZE   (32 * 1024 - 1)  /* 32K-1 bytes due to HW limit */
+
+/* BAM device options flags */
+
+/*
+ * BAM will be configured and enabled at boot.  Otherwise, BAM will be
+ * configured and enabled when first pipe connect occurs.
+ */
+#define SPS_BAM_OPT_ENABLE_AT_BOOT  1UL
+/* BAM IRQ is disabled */
+#define SPS_BAM_OPT_IRQ_DISABLED    (1UL << 1)
+/* BAM peripheral is a BAM-DMA */
+#define SPS_BAM_OPT_BAMDMA          (1UL << 2)
+/* BAM IRQ is registered for apps wakeup */
+#define SPS_BAM_OPT_IRQ_WAKEUP      (1UL << 3)
+/* Ignore external block pipe reset */
+#define SPS_BAM_NO_EXT_P_RST        (1UL << 4)
+/* Don't enable local clock gating */
+#define SPS_BAM_NO_LOCAL_CLK_GATING (1UL << 5)
+/* Don't enable writeback cancel*/
+#define SPS_BAM_CANCEL_WB           (1UL << 6)
+/* BAM uses SMMU */
+#define SPS_BAM_SMMU_EN             (1UL << 9)
+/* Confirm resource status before access BAM*/
+#define SPS_BAM_RES_CONFIRM         (1UL << 7)
+/* Hold memory for BAM DMUX */
+#define SPS_BAM_HOLD_MEM            (1UL << 8)
+/* Use cached write pointer */
+#define SPS_BAM_CACHED_WP           (1UL << 10)
+
+/* BAM device management flags */
+
+/* BAM global device control is managed remotely */
+#define SPS_BAM_MGR_DEVICE_REMOTE   1UL
+/* BAM device supports multiple execution environments */
+#define SPS_BAM_MGR_MULTI_EE        (1UL << 1)
+/* BAM pipes are *not* allocated locally */
+#define SPS_BAM_MGR_PIPE_NO_ALLOC   (1UL << 2)
+/* BAM pipes are *not* configured locally */
+#define SPS_BAM_MGR_PIPE_NO_CONFIG  (1UL << 3)
+/* BAM pipes are *not* controlled locally */
+#define SPS_BAM_MGR_PIPE_NO_CTRL    (1UL << 4)
+/* "Globbed" management properties */
+#define SPS_BAM_MGR_NONE            \
+	(SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_PIPE_NO_ALLOC | \
+	 SPS_BAM_MGR_PIPE_NO_CONFIG | SPS_BAM_MGR_PIPE_NO_CTRL)
+#define SPS_BAM_MGR_LOCAL           0
+#define SPS_BAM_MGR_LOCAL_SHARED    SPS_BAM_MGR_MULTI_EE
+#define SPS_BAM_MGR_REMOTE_SHARED   \
+	(SPS_BAM_MGR_DEVICE_REMOTE | SPS_BAM_MGR_MULTI_EE | \
+	 SPS_BAM_MGR_PIPE_NO_ALLOC)
+#define SPS_BAM_MGR_ACCESS_MASK     SPS_BAM_MGR_NONE
+
+/*
+ * BAM security configuration
+ */
+#define SPS_BAM_NUM_EES             4
+#define SPS_BAM_SEC_DO_NOT_CONFIG   0
+#define SPS_BAM_SEC_DO_CONFIG       0x0A434553
+
+/* BAM pipe selection */
+#define SPS_BAM_PIPE(n)             (1UL << (n))
+
+/* This enum specifies the operational mode for an SPS connection */
+enum sps_mode {
+	SPS_MODE_SRC = 0,  /* end point is the source (producer) */
+	SPS_MODE_DEST,	   /* end point is the destination (consumer) */
+};
+
+
+/*
+ * This enum is a set of bit flag options for SPS connection.
+ * The enums should be OR'd together to create the option set
+ * for the SPS connection.
+ */
+enum sps_option {
+	/*
+	 * Options to enable specific SPS hardware interrupts.
+	 * These bit flags are also used to indicate interrupt source
+	 * for the SPS_EVENT_IRQ event.
+	 */
+	SPS_O_DESC_DONE = 0x00000001,  /* Descriptor processed */
+	SPS_O_INACTIVE  = 0x00000002,  /* Inactivity timeout */
+	SPS_O_WAKEUP    = 0x00000004,  /* Peripheral wake up */
+	SPS_O_OUT_OF_DESC = 0x00000008,/* Out of descriptors */
+	SPS_O_ERROR     = 0x00000010,  /* Error */
+	SPS_O_EOT       = 0x00000020,  /* End-of-transfer */
+	SPS_O_RST_ERROR = 0x00000040,  /* Pipe reset unsucessful error */
+	SPS_O_HRESP_ERROR = 0x00000080,/* Errorneous Hresponse by AHB MASTER */
+
+	/* Options to enable hardware features */
+	SPS_O_STREAMING = 0x00010000,  /* Enable streaming mode (no EOT) */
+	/* Use MTI/SETPEND instead of BAM interrupt */
+	SPS_O_IRQ_MTI   = 0x00020000,
+	/* NWD bit written with EOT for BAM2BAM producer pipe */
+	SPS_O_WRITE_NWD   = 0x00040000,
+       /* EOT set after pipe SW offset advanced */
+	SPS_O_LATE_EOT   = 0x00080000,
+
+	/* Options to enable software features */
+	/* Do not disable a pipe during disconnection */
+	SPS_O_NO_DISABLE      = 0x00800000,
+	/* Transfer operation should be polled */
+	SPS_O_POLL      = 0x01000000,
+	/* Disable queuing of transfer events for the connection end point */
+	SPS_O_NO_Q      = 0x02000000,
+	SPS_O_FLOWOFF   = 0x04000000,  /* Graceful halt */
+	/* SPS_O_WAKEUP will be disabled after triggered */
+	SPS_O_WAKEUP_IS_ONESHOT = 0x08000000,
+	/**
+	 * Client must read each descriptor from the FIFO
+	 * using sps_get_iovec()
+	 */
+	SPS_O_ACK_TRANSFERS = 0x10000000,
+	/* Connection is automatically enabled */
+	SPS_O_AUTO_ENABLE = 0x20000000,
+	/* DISABLE endpoint synchronization for config/enable/disable */
+	SPS_O_NO_EP_SYNC = 0x40000000,
+	/* Allow partial polling duing IRQ mode */
+	SPS_O_HYBRID = 0x80000000,
+};
+
+/**
+ * This enum specifies BAM DMA channel priority.  Clients should use
+ * SPS_DMA_PRI_DEFAULT unless a specific priority is required.
+ */
+enum sps_dma_priority {
+	SPS_DMA_PRI_DEFAULT = 0,
+	SPS_DMA_PRI_LOW,
+	SPS_DMA_PRI_MED,
+	SPS_DMA_PRI_HIGH,
+};
+
+/*
+ * This enum specifies the ownership of a connection resource.
+ * Remote or shared ownership is only possible/meaningful on the processor
+ * that controls resource.
+ */
+enum sps_owner {
+	SPS_OWNER_LOCAL = 0x1,	/* Resource is owned by local processor */
+	SPS_OWNER_REMOTE = 0x2,	/* Resource is owned by a satellite processor */
+};
+
+/* This enum indicates the event associated with a client event trigger */
+enum sps_event {
+	SPS_EVENT_INVALID = 0,
+
+	SPS_EVENT_EOT,		/* End-of-transfer */
+	SPS_EVENT_DESC_DONE,	/* Descriptor processed */
+	SPS_EVENT_OUT_OF_DESC,	/* Out of descriptors */
+	SPS_EVENT_WAKEUP,	/* Peripheral wake up */
+	SPS_EVENT_FLOWOFF,	/* Graceful halt (idle) */
+	SPS_EVENT_INACTIVE,	/* Inactivity timeout */
+	SPS_EVENT_ERROR,	/* Error */
+	SPS_EVENT_RST_ERROR,    /* Pipe Reset unsuccessful */
+	SPS_EVENT_HRESP_ERROR,  /* Errorneous Hresponse by AHB Master*/
+	SPS_EVENT_MAX,
+};
+
+/*
+ * This enum specifies the event trigger mode and is an argument for the
+ * sps_register_event() function.
+ */
+enum sps_trigger {
+	/* Trigger with payload for callback */
+	SPS_TRIGGER_CALLBACK = 0,
+	/* Trigger without payload for wait or poll */
+	SPS_TRIGGER_WAIT,
+};
+
+/*
+ * This enum indicates the desired halting mechanism and is an argument for the
+ * sps_flow_off() function
+ */
+enum sps_flow_off {
+	SPS_FLOWOFF_FORCED = 0,	/* Force hardware into halt state */
+	/* Allow hardware to empty pipe before halting */
+	SPS_FLOWOFF_GRACEFUL,
+};
+
+/*
+ * This enum indicates the target memory heap and is an argument for the
+ * sps_mem_alloc() function.
+ */
+enum sps_mem {
+	SPS_MEM_LOCAL = 0,  /* SPS subsystem local (pipe) memory */
+	SPS_MEM_UC,	    /* Microcontroller (ARM7) local memory */
+};
+
+/*
+ * This enum indicates a timer control operation and is an argument for the
+ * sps_timer_ctrl() function.
+ */
+enum sps_timer_op {
+	SPS_TIMER_OP_CONFIG = 0,
+	SPS_TIMER_OP_RESET,
+/*   SPS_TIMER_OP_START,   Not supported by hardware yet */
+/*   SPS_TIMER_OP_STOP,    Not supported by hardware yet */
+	SPS_TIMER_OP_READ,
+};
+
+/*
+ * This enum indicates the inactivity timer operating mode and is an
+ * argument for the sps_timer_ctrl() function.
+ */
+enum sps_timer_mode {
+	SPS_TIMER_MODE_ONESHOT = 0,
+/*   SPS_TIMER_MODE_PERIODIC,    Not supported by hardware yet */
+};
+
+/* This enum indicates the cases when callback the user of BAM */
+enum sps_callback_case {
+	SPS_CALLBACK_BAM_ERROR_IRQ = 1,     /* BAM ERROR IRQ */
+	SPS_CALLBACK_BAM_HRESP_ERR_IRQ,	    /* Erroneous HResponse */
+	SPS_CALLBACK_BAM_TIMER_IRQ,	    /* Inactivity timer */
+	SPS_CALLBACK_BAM_RES_REQ,	    /* Request resource */
+	SPS_CALLBACK_BAM_RES_REL,	    /* Release resource */
+	SPS_CALLBACK_BAM_POLL,	            /* To poll each pipe */
+};
+
+/*
+ * This enum indicates the command type in a command element
+ */
+enum sps_command_type {
+	SPS_WRITE_COMMAND = 0,
+	SPS_READ_COMMAND,
+};
+
+/**
+ * struct msm_sps_platform_data - SPS Platform specific data.
+ * @bamdma_restricted_pipes - Bitmask of pipes restricted from local use.
+ *
+ */
+struct msm_sps_platform_data {
+	u32 bamdma_restricted_pipes;
+};
+
+/**
+ * This data type corresponds to the native I/O vector (BAM descriptor)
+ * supported by SPS hardware
+ *
+ * @addr - Buffer physical address.
+ * @size - Buffer size in bytes.
+ * @flags -Flag bitmask (see SPS_IOVEC_FLAG_ #defines).
+ *
+ */
+struct sps_iovec {
+	u32 addr;
+	u32 size:16;
+	u32 flags:16;
+};
+
+/**
+ * This data type corresponds to the native Command Element
+ * supported by SPS hardware
+ *
+ * @addr - register address.
+ * @command - command type.
+ * @data - for write command: content to be written into peripheral register.
+ *         for read command: dest addr to write peripheral register value to.
+ * @mask - register mask.
+ * @reserved - for future usage.
+ *
+ */
+struct sps_command_element {
+	u32 addr:24;
+	u32 command:8;
+	u32 data;
+	u32 mask;
+	u32 reserved;
+};
+
+/*
+ * BAM device's security configuation
+ */
+struct sps_bam_pipe_sec_config_props {
+	u32 pipe_mask;
+	u32 vmid;
+};
+
+struct sps_bam_sec_config_props {
+	/* Per-EE configuration - This is a pipe bit mask for each EE */
+	struct sps_bam_pipe_sec_config_props ees[SPS_BAM_NUM_EES];
+};
+
+/**
+ * This struct defines a BAM device. The client must memset() this struct to
+ * zero before writing device information.  A value of zero for uninitialized
+ * values will instruct the SPS driver to use general defaults or
+ * hardware/BIOS supplied values.
+ *
+ *
+ * @options - See SPS_BAM_OPT_* bit flag.
+ * @phys_addr - BAM base physical address (not peripheral address).
+ * @virt_addr - BAM base virtual address.
+ * @virt_size - For virtual mapping.
+ * @irq - IRQ enum for use in ISR vector install.
+ * @num_pipes - number of pipes. Can be read from hardware.
+ * @summing_threshold - BAM event threshold.
+ *
+ * @periph_class - Peripheral device enumeration class.
+ * @periph_dev_id - Peripheral global device ID.
+ * @periph_phys_addr - Peripheral base physical address, for BAM-DMA only.
+ * @periph_virt_addr - Peripheral base virtual address.
+ * @periph_virt_size - Size for virtual mapping.
+ *
+ * @callback - callback function for BAM user.
+ * @user - pointer to user data.
+ *
+ * @event_threshold - Pipe event threshold.
+ * @desc_size - Size (bytes) of descriptor FIFO.
+ * @data_size - Size (bytes) of data FIFO.
+ * @desc_mem_id - Heap ID for default descriptor FIFO allocations.
+ * @data_mem_id - Heap ID for default data FIFO allocations.
+ *
+ * @manage - BAM device management flags (see SPS_BAM_MGR_*).
+ * @restricted_pipes - Bitmask of pipes restricted from local use.
+ * @ee - Local execution environment index.
+ *
+ * @irq_gen_addr - MTI interrupt generation address. This configuration only
+ * applies to BAM rev 1 and 2 hardware. MTIs are only supported on BAMs when
+ * global config is controlled by a remote processor.
+ * NOTE: This address must correspond to the MTI associated with the "irq" IRQ
+ * enum specified above.
+ *
+ * @sec_config - must be set to SPS_BAM_SEC_DO_CONFIG to perform BAM security
+ * configuration.  Only the processor that manages the BAM is allowed to
+ * perform the configuration. The global (top-level) BAM interrupt will be
+ * assigned to the EE of the processor that manages the BAM.
+ *
+ * @p_sec_config_props - BAM device's security configuation
+ *
+ */
+struct sps_bam_props {
+
+	/* BAM device properties. */
+
+	u32 options;
+	phys_addr_t phys_addr;
+	void *virt_addr;
+	u32 virt_size;
+	u32 irq;
+	u32 num_pipes;
+	u32 summing_threshold;
+
+	/* Peripheral device properties */
+
+	u32 periph_class;
+	u32 periph_dev_id;
+	phys_addr_t periph_phys_addr;
+	void *periph_virt_addr;
+	u32 periph_virt_size;
+
+	/* Connection pipe parameter defaults. */
+
+	u32 event_threshold;
+	u32 desc_size;
+	u32 data_size;
+	u32 desc_mem_id;
+	u32 data_mem_id;
+
+	/* Feedback to BAM user */
+	void (*callback)(enum sps_callback_case, void *);
+	void *user;
+
+	/* Security properties */
+
+	u32 manage;
+	u32 restricted_pipes;
+	u32 ee;
+
+	/* Log Level property */
+	u32 ipc_loglevel;
+
+	/* BAM MTI interrupt generation */
+
+	u32 irq_gen_addr;
+
+	/* Security configuration properties */
+
+	u32 sec_config;
+	struct sps_bam_sec_config_props *p_sec_config_props;
+
+	/* Logging control */
+
+	bool constrained_logging;
+	u32 logging_number;
+};
+
+/**
+ *  This struct specifies memory buffer properties.
+ *
+ * @base - Buffer virtual address.
+ * @phys_base - Buffer physical address.
+ * @size - Specifies buffer size (or maximum size).
+ * @min_size - If non-zero, specifies buffer minimum size.
+ *
+ */
+struct sps_mem_buffer {
+	void *base;
+	phys_addr_t phys_base;
+	unsigned long iova;
+	u32 size;
+	u32 min_size;
+};
+
+/**
+ * This struct defines a connection's end point and is used as the argument
+ * for the sps_connect(), sps_get_config(), and sps_set_config() functions.
+ * For system mode pipe, use SPS_DEV_HANDLE_MEM for the end point that
+ * corresponds to system memory.
+ *
+ * The client can force SPS to reserve a specific pipe on a BAM.
+ * If the pipe is in use, the sps_connect/set_config() will fail.
+ *
+ * @source - Source BAM.
+ * @src_pipe_index - BAM pipe index, 0 to 30.
+ * @destination - Destination BAM.
+ * @dest_pipe_index - BAM pipe index, 0 to 30.
+ *
+ * @mode - specifies which end (source or destination) of the connection will
+ * be controlled/referenced by the client.
+ *
+ * @config - This value is for future use and should be set to
+ * SPS_CONFIG_DEFAULT or left as default from sps_get_config().
+ *
+ * @options - OR'd connection end point options (see SPS_O defines).
+ *
+ * WARNING: The memory provided should be physically contiguous and non-cached.
+ * The user can use one of the following:
+ * 1. sps_alloc_mem() - allocated from pipe-memory.
+ * 2. dma_alloc_coherent() - allocate coherent DMA memory.
+ * 3. dma_map_single() - for using memory allocated by kmalloc().
+ *
+ * @desc - Descriptor FIFO.
+ * @data - Data FIFO (BAM-to-BAM mode only).
+ *
+ * @event_thresh - Pipe event threshold or derivative.
+ * @lock_group - The lock group this pipe belongs to.
+ *
+ * @sps_reserved - Reserved word - client must not modify.
+ *
+ */
+struct sps_connect {
+	unsigned long source;
+	unsigned long source_iova;
+	u32 src_pipe_index;
+	unsigned long destination;
+	unsigned long dest_iova;
+	u32 dest_pipe_index;
+
+	enum sps_mode mode;
+
+	u32 config;
+
+	enum sps_option options;
+
+	struct sps_mem_buffer desc;
+	struct sps_mem_buffer data;
+
+	u32 event_thresh;
+
+	u32 lock_group;
+
+	/* SETPEND/MTI interrupt generation parameters */
+
+	u32 irq_gen_addr;
+	u32 irq_gen_data;
+
+	u32 sps_reserved;
+
+};
+
+/**
+ * This struct defines a satellite connection's end point.  The client of the
+ * SPS driver on the satellite processor must call sps_get_config() to
+ * initialize a struct sps_connect, then copy the values from the struct
+ * sps_satellite to the struct sps_connect before making the sps_connect()
+ * call to the satellite SPS driver.
+ *
+ */
+struct sps_satellite {
+	/**
+	 * These values must be copied to either the source or destination
+	 * corresponding values in the connect struct.
+	 */
+	phys_addr_t dev;
+	u32 pipe_index;
+
+	/**
+	 * These values must be copied to the corresponding values in the
+	 * connect struct
+	 */
+	u32 config;
+	enum sps_option options;
+
+};
+
+/**
+ * This struct defines parameters for allocation of a BAM DMA channel. The
+ * client must memset() this struct to zero before writing allocation
+ * information.  A value of zero for uninitialized values will instruct
+ * the SPS driver to use defaults or "don't care".
+ *
+ * @dev - Associated BAM device handle, or SPS_DEV_HANDLE_DMA.
+ *
+ * @src_owner - Source owner processor ID.
+ * @dest_owner - Destination owner processor ID.
+ *
+ */
+struct sps_alloc_dma_chan {
+	unsigned long dev;
+
+	/* BAM DMA channel configuration parameters */
+
+	u32 threshold;
+	enum sps_dma_priority priority;
+
+	/**
+	 * Owner IDs are global host processor identifiers used by the system
+	 * SROT when establishing execution environments.
+	 */
+	u32 src_owner;
+	u32 dest_owner;
+
+};
+
+/**
+ * This struct defines parameters for an allocated BAM DMA channel.
+ *
+ * @dev - BAM DMA device handle.
+ * @dest_pipe_index - Destination/input/write pipe index.
+ * @src_pipe_index - Source/output/read pipe index.
+ *
+ */
+struct sps_dma_chan {
+	unsigned long dev;
+	u32 dest_pipe_index;
+	u32 src_pipe_index;
+};
+
+/**
+ * This struct is an argument passed payload when triggering a callback event
+ * object registered for an SPS connection end point.
+ *
+ * @user - Pointer registered with sps_register_event().
+ *
+ * @event_id - Which event.
+ *
+ * @iovec - The associated I/O vector. If the end point is a system-mode
+ * producer, the size will reflect the actual number of bytes written to the
+ * buffer by the pipe. NOTE: If this I/O vector was part of a set submitted to
+ * sps_transfer(), then the vector array itself will be	updated with all of
+ * the actual counts.
+ *
+ * @user - Pointer registered with the transfer.
+ *
+ */
+struct sps_event_notify {
+	void *user;
+
+	enum sps_event event_id;
+
+	/* Data associated with the event */
+
+	union {
+		/* Data for SPS_EVENT_IRQ */
+		struct {
+			u32 mask;
+		} irq;
+
+		/* Data for SPS_EVENT_EOT or SPS_EVENT_DESC_DONE */
+
+		struct {
+			struct sps_iovec iovec;
+			void *user;
+		} transfer;
+
+		/* Data for SPS_EVENT_ERROR */
+
+		struct {
+			u32 status;
+		} err;
+
+	} data;
+};
+
+/**
+ * This struct defines a event registration parameters and is used as the
+ * argument for the sps_register_event() function.
+ *
+ * @options - Event options that will trigger the event object.
+ * @mode - Event trigger mode.
+ *
+ * @xfer_done - a pointer to a completion object. NULL if not in use.
+ *
+ * @callback - a callback to call on completion. NULL if not in use.
+ *
+ * @user - User pointer that will be provided in event callback data.
+ *
+ */
+struct sps_register_event {
+	enum sps_option options;
+	enum sps_trigger mode;
+	struct completion *xfer_done;
+	void (*callback)(struct sps_event_notify *notify);
+	void *user;
+};
+
+/**
+ * This struct defines a system memory transfer's parameters and is used as the
+ * argument for the sps_transfer() function.
+ *
+ * @iovec_phys - Physical address of I/O vectors buffer.
+ * @iovec - Pointer to I/O vectors buffer.
+ * @iovec_count - Number of I/O vectors.
+ * @user - User pointer passed in callback event.
+ *
+ */
+struct sps_transfer {
+	phys_addr_t iovec_phys;
+	struct sps_iovec *iovec;
+	u32 iovec_count;
+	void *user;
+};
+
+/**
+ * This struct defines a timer control operation parameters and is used as an
+ * argument for the sps_timer_ctrl() function.
+ *
+ * @op - Timer control operation.
+ * @timeout_msec - Inactivity timeout (msec).
+ *
+ */
+struct sps_timer_ctrl {
+	enum sps_timer_op op;
+
+	/**
+	 * The following configuration parameters must be set when the timer
+	 * control operation is SPS_TIMER_OP_CONFIG.
+	 */
+	enum sps_timer_mode mode;
+	u32 timeout_msec;
+};
+
+/**
+ * This struct defines a timer control operation result and is used as an
+ * argument for the sps_timer_ctrl() function.
+ */
+struct sps_timer_result {
+	u32 current_timer;
+};
+
+
+/*----------------------------------------------------------------------------
+ * Functions specific to sps interface
+ * ---------------------------------------------------------------------------
+ */
+struct sps_pipe;	/* Forward declaration */
+
+#ifdef CONFIG_SPS
+/**
+ * Register a BAM device
+ *
+ * This function registers a BAM device with the SPS driver. For each
+ *peripheral that includes a BAM, the peripheral driver must register
+ * the BAM with the SPS driver.
+ *
+ * A requirement is that the peripheral driver must remain attached
+ * to the SPS driver until the BAM is deregistered. Otherwise, the
+ * system may attempt to unload the SPS driver. BAM registrations would
+ * be lost.
+ *
+ * @bam_props - Pointer to struct for BAM device properties.
+ *
+ * @dev_handle - Device handle will be written to this location (output).
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_register_bam_device(const struct sps_bam_props *bam_props,
+			    unsigned long *dev_handle);
+
+/**
+ * Deregister a BAM device
+ *
+ * This function deregisters a BAM device from the SPS driver. The peripheral
+ * driver should deregister a BAM when the peripheral driver is shut down or
+ * when BAM use should be disabled.
+ *
+ * A BAM cannot be deregistered if any of its pipes is in an active connection.
+ *
+ * When all BAMs have been deregistered, the system is free to unload the
+ * SPS driver.
+ *
+ * @dev_handle - BAM device handle.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_deregister_bam_device(unsigned long dev_handle);
+
+/**
+ * Allocate client state context
+ *
+ * This function allocate and initializes a client state context struct.
+ *
+ * @return pointer to client state context
+ *
+ */
+struct sps_pipe *sps_alloc_endpoint(void);
+
+/**
+ * Free client state context
+ *
+ * This function de-initializes and free a client state context struct.
+ *
+ * @ctx - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_free_endpoint(struct sps_pipe *h);
+
+/**
+ * Get the configuration parameters for an SPS connection end point
+ *
+ * This function retrieves the configuration parameters for an SPS connection
+ * end point.
+ * This function may be called before the end point is connected (before
+ * sps_connect is called). This allows the client to specify parameters before
+ * the connection is established.
+ *
+ * The client must call this function to fill it's struct sps_connect
+ * struct before modifying values and passing the struct to sps_set_config().
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @config - Pointer to buffer for the end point's configuration parameters.
+ * Must not be NULL.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_config(struct sps_pipe *h, struct sps_connect *config);
+
+/**
+ * Allocate memory from the SPS Pipe-Memory.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @mem - memory type - N/A.
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
+		  struct sps_mem_buffer *mem_buffer);
+
+/**
+ * Free memory from the SPS Pipe-Memory.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_free_mem(struct sps_pipe *h, struct sps_mem_buffer *mem_buffer);
+
+/**
+ * Connect an SPS connection end point
+ *
+ * This function creates a connection between two SPS peripherals or between
+ * an SPS peripheral and the local host processor (via system memory, end
+ *point SPS_DEV_HANDLE_MEM). Establishing the connection includes
+ * initialization of the SPS hardware and allocation of any other connection
+ * resources (buffer memory, etc.).
+ *
+ * This function requires the client to specify both the source and
+ * destination end points of the SPS connection. However, the handle
+ * returned applies only to the end point of the connection that the client
+ * controls. The end point under control must be specified by the
+ * enum sps_mode mode argument, either SPS_MODE_SRC, SPS_MODE_DEST, or
+ * SPS_MODE_CTL. Note that SPS_MODE_CTL is only supported for I/O
+ * accelerator connections, and only a limited set of control operations are
+ * allowed (TBD).
+ *
+ * For a connection involving system memory
+ * (SPS_DEV_HANDLE_MEM), the peripheral end point must be
+ * specified. For example, SPS_MODE_SRC must be specified for a
+ * BAM-to-system connection, since the BAM pipe is the data
+ * producer.
+ *
+ * For a specific peripheral-to-peripheral connection, there may be more than
+ * one required configuration. For example, there might be high-performance
+ * and low-power configurations for a connection between the two peripherals.
+ * The config argument allows the client to specify different configurations,
+ * which may require different system resource allocations and hardware
+ * initialization.
+ *
+ * A client is allowed to create one and only one connection for its
+ * struct sps_pipe. The handle is used to identify the connection end point
+ * in subsequent SPS driver calls. A specific connection source or
+ * destination end point can be associated with one and only one
+ * struct sps_pipe.
+ *
+ * The client must establish an open device handle to the SPS. To do so, the
+ * client must attach to the SPS driver and open the SPS device by calling
+ * the following functions.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @connect - Pointer to connection parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_connect(struct sps_pipe *h, struct sps_connect *connect);
+
+/**
+ * Disconnect an SPS connection end point
+ *
+ * This function disconnects an SPS connection end point.
+ * The SPS hardware associated with that end point will be disabled.
+ * For a connection involving system memory (SPS_DEV_HANDLE_MEM), all
+ * connection resources are deallocated. For a peripheral-to-peripheral
+ * connection, the resources associated with the connection will not be
+ * deallocated until both end points are closed.
+ *
+ * The client must call sps_connect() for the handle before calling
+ * this function.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_disconnect(struct sps_pipe *h);
+
+/**
+ * Register an event object for an SPS connection end point
+ *
+ * This function registers a callback event object for an SPS connection end
+ *point. The registered event object will be triggered for the set of
+ * events specified in reg->options that are enabled for the end point.
+ *
+ * There can only be one registered event object for each event. If an event
+ * object is already registered for an event, it will be replaced. If
+ *reg->event handle is NULL, then any registered event object for the
+ * event will be deregistered. Option bits in reg->options not associated
+ * with events are ignored.
+ *
+ * The client must call sps_connect() for the handle before calling
+ * this function.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @reg - Pointer to event registration parameters
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_register_event(struct sps_pipe *h, struct sps_register_event *reg);
+
+/**
+ * Perform a single DMA transfer on an SPS connection end point
+ *
+ * This function submits a DMA transfer request consisting of a single buffer
+ * for an SPS connection end point associated with a peripheral-to/from-memory
+ * connection. The request will be submitted immediately to hardware if the
+ * hardware is idle (data flow off, no other pending transfers). Otherwise, it
+ * will be queued for later handling in the SPS driver work loop.
+ *
+ * The data buffer must be DMA ready. The client is responsible for insuring
+ *physically contiguous memory, cache maintenance, and memory barrier. For
+ * more information, see Appendix A.
+ *
+ * The client must not modify the data buffer until the completion indication is
+ * received.
+ *
+ * This function cannot be used if transfer queuing is disabled (see option
+ * SPS_O_NO_Q). The client must set the SPS_O_EOT option to receive a callback
+ * event trigger when the transfer is complete. The SPS driver will insure the
+ * appropriate flags in the I/O vectors are set to generate the completion
+ * indication.
+ *
+ * The return value from this function may indicate that an error occurred.
+ * Possible causes include invalid arguments.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @addr - Physical address of buffer to transfer.
+ *
+ * WARNING: The memory provided	should be physically contiguous and
+ * non-cached.
+ *
+ * The user can use one of the following:
+ * 1. sps_alloc_mem() - allocated from pipe-memory.
+ * 2. dma_alloc_coherent() - allocate DMA memory.
+ * 3. dma_map_single() for memory allocated by kmalloc().
+ *
+ * @size - Size in bytes of buffer to transfer
+ *
+ * @user - User pointer that will be returned to user as part of
+ *  event payload
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_transfer_one(struct sps_pipe *h, phys_addr_t addr, u32 size,
+		     void *user, u32 flags);
+
+/**
+ * Read event queue for an SPS connection end point
+ *
+ * This function reads event queue for an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @event - pointer to client's event data buffer
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_event(struct sps_pipe *h, struct sps_event_notify *event);
+
+/**
+ * Get processed I/O vector (completed transfers)
+ *
+ * This function fetches the next processed I/O vector.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @iovec - Pointer to I/O vector struct (output).
+ * This struct will be zeroed if there are no more processed I/O vectors.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec);
+
+/**
+ * Enable an SPS connection end point
+ *
+ * This function enables an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_flow_on(struct sps_pipe *h);
+
+/**
+ * Disable an SPS connection end point
+ *
+ * This function disables an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @mode - Desired mode for disabling pipe data flow
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode);
+
+/**
+ * Perform a Multiple DMA transfer on an SPS connection end point
+ *
+ * This function submits a DMA transfer request for an SPS connection end point
+ * associated with a peripheral-to/from-memory connection. The request will be
+ * submitted immediately to hardware if the hardware is idle (data flow off, no
+ * other pending transfers). Otherwise, it will be queued for later handling in
+ * the SPS driver work loop.
+ *
+ * The data buffers referenced by the I/O vectors must be DMA ready.
+ * The client is responsible for insuring physically contiguous memory,
+ * any cache maintenance, and memory barrier. For more information,
+ * see Appendix A.
+ *
+ * The I/O vectors must specify physical addresses for the referenced buffers.
+ *
+ * The client must not modify the data buffers referenced by I/O vectors until
+ * the completion indication is received.
+ *
+ * If transfer queuing is disabled (see option SPS_O_NO_Q), the client is
+ * responsible for setting the appropriate flags in the I/O vectors to generate
+ * the completion indication. Also, the client is responsible for enabling the
+ * appropriate connection callback event options for completion indication (see
+ * sps_connect(), sps_set_config()).
+ *
+ * If transfer queuing is enabled, the client must set the SPS_O_EOT option to
+ * receive a callback event trigger when the transfer is complete. The SPS
+ * driver will insure the appropriate flags in the I/O vectors are set to
+ * generate the completion indication. The client must not set any flags in the
+ * I/O vectors, as this may cause the SPS driver to become out of sync with the
+ * hardware.
+ *
+ * The return value from this function may indicate that an error occurred.
+ * Possible causes include invalid arguments. If transfer queuing is disabled,
+ * an error will occur if the pipe is already processing a transfer.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @transfer - Pointer to transfer parameter struct
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_transfer(struct sps_pipe *h, struct sps_transfer *transfer);
+
+/**
+ * Determine whether an SPS connection end point FIFO is empty
+ *
+ * This function returns the empty state of an SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @empty - pointer to client's empty status word (boolean)
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty);
+
+/**
+ * Reset an SPS BAM device
+ *
+ * This function resets an SPS BAM device.
+ *
+ * @dev - device handle for the BAM
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_device_reset(unsigned long dev);
+
+/**
+ * Set the configuration parameters for an SPS connection end point
+ *
+ * This function sets the configuration parameters for an SPS connection
+ * end point. This function may be called before the end point is connected
+ * (before sps_connect is called). This allows the client to specify
+ *parameters before the connection is established. The client is allowed
+ * to pre-allocate resources and override driver defaults.
+ *
+ * The client must call sps_get_config() to fill it's struct sps_connect
+ * struct before modifying values and passing the struct to this function.
+ * Only those parameters that differ from the current configuration will
+ * be processed.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @config - Pointer to the end point's new configuration parameters.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_set_config(struct sps_pipe *h, struct sps_connect *config);
+
+/**
+ * Set ownership of an SPS connection end point
+ *
+ * This function sets the ownership of an SPS connection end point to
+ * either local (default) or non-local. This function is used to
+ * retrieve the struct sps_connect data that must be used by a
+ * satellite processor when calling sps_connect().
+ *
+ * Non-local ownership is only possible/meaningful on the processor
+ * that controls resource allocations (apps processor). Setting ownership
+ * to non-local on a satellite processor will fail.
+ *
+ * Setting ownership from non-local to local will succeed only if the
+ * owning satellite processor has properly brought the end point to
+ * an idle condition.
+ *
+ * This function will succeed if the connection end point is already in
+ * the specified ownership state.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @owner - New ownership of the connection end point
+ *
+ * @connect - Pointer to buffer for satellite processor connect data.
+ *  Can be NULL to avoid retrieving the connect data. Will be ignored
+ *  if the end point ownership is set to local.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
+		  struct sps_satellite *connect);
+
+#ifdef CONFIG_SPS_SUPPORT_BAMDMA
+/**
+ * Allocate a BAM DMA channel
+ *
+ * This function allocates a BAM DMA channel. A "BAM DMA" is a special
+ * DMA peripheral with a BAM front end. The DMA peripheral acts as a conduit
+ * for data to flow into a consumer pipe and then out of a producer pipe.
+ * It's primarily purpose is to serve as a path for interprocessor communication
+ * that allows each processor to control and protect it's own memory space.
+ *
+ * @alloc - Pointer to struct for BAM DMA channel allocation properties.
+ *
+ * @chan - Allocated channel information will be written to this
+ *  location (output).
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan);
+
+/**
+ * Free a BAM DMA channel
+ *
+ * This function frees a BAM DMA channel.
+ *
+ * @chan - Pointer to information for channel to free
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_free_dma_chan(struct sps_dma_chan *chan);
+
+/**
+ * Get the BAM handle for BAM-DMA.
+ *
+ * The BAM handle should be use as source/destination in the sps_connect().
+ *
+ * @return handle on success, zero on error
+ *
+ */
+unsigned long sps_dma_get_bam_handle(void);
+
+/**
+ * Free the BAM handle for BAM-DMA.
+ *
+ */
+void sps_dma_free_bam_handle(unsigned long h);
+#else
+static inline int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline int sps_free_dma_chan(struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline unsigned long sps_dma_get_bam_handle(void)
+{
+	return 0;
+}
+
+static inline void sps_dma_free_bam_handle(unsigned long h)
+{
+}
+#endif
+
+/**
+ * Get number of free transfer entries for an SPS connection end point
+ *
+ * This function returns the number of free transfer entries for an
+ * SPS connection end point.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @count - pointer to count status
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_free_count(struct sps_pipe *h, u32 *count);
+
+/**
+ * Perform timer control
+ *
+ * This function performs timer control operations.
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @timer_ctrl - Pointer to timer control specification
+ *
+ * @timer_result - Pointer to buffer for timer operation result.
+ *  This argument can be NULL if no result is expected for the operation.
+ *  If non-NULL, the current timer value will always provided.
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_timer_ctrl(struct sps_pipe *h,
+		   struct sps_timer_ctrl *timer_ctrl,
+		   struct sps_timer_result *timer_result);
+
+/**
+ * Find the handle of a BAM device based on the physical address
+ *
+ * This function finds a BAM device in the BAM registration list that
+ * matches the specified physical address, and returns its handle.
+ *
+ * @phys_addr - physical address of the BAM
+ *
+ * @h - device handle of the BAM
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_phy2h(phys_addr_t phys_addr, unsigned long *handle);
+
+/**
+ * Setup desc/data FIFO for bam-to-bam connection
+ *
+ * @mem_buffer - Pointer to struct for allocated memory properties.
+ *
+ * @addr - address of FIFO
+ *
+ * @size - FIFO size
+ *
+ * @use_offset - use address offset instead of absolute address
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
+		  u32 addr, u32 size, int use_offset);
+
+/**
+ * Get the number of unused descriptors in the descriptor FIFO
+ * of a pipe
+ *
+ * @h - client context for SPS connection end point
+ *
+ * @desc_num - number of unused descriptors
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_unused_desc_num(struct sps_pipe *h, u32 *desc_num);
+
+/**
+ * Get the debug info of BAM registers and descriptor FIFOs
+ *
+ * @dev - BAM device handle
+ *
+ * @option - debugging option
+ *
+ * @para - parameter used for an option (such as pipe combination)
+ *
+ * @tb_sel - testbus selection
+ *
+ * @desc_sel - selection of descriptors
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_get_bam_debug_info(unsigned long dev, u32 option, u32 para,
+		u32 tb_sel, u32 desc_sel);
+
+/**
+ * Vote for or relinquish BAM DMA clock
+ *
+ * @clk_on - to turn on or turn off the clock
+ *
+ * @return 0 on success, negative value on error
+ *
+ */
+int sps_ctrl_bam_dma_clk(bool clk_on);
+
+/*
+ * sps_pipe_reset - reset a pipe of a BAM.
+ * @dev:	BAM device handle
+ * @pipe:	pipe index
+ *
+ * This function resets a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_reset(unsigned long dev, u32 pipe);
+
+/*
+ * sps_pipe_disable - disable a pipe of a BAM.
+ * @dev:	BAM device handle
+ * @pipe:	pipe index
+ *
+ * This function disables a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_disable(unsigned long dev, u32 pipe);
+
+/*
+ * sps_pipe_pending_desc - checking pending descriptor.
+ * @dev:	BAM device handle
+ * @pipe:	pipe index
+ * @pending:	indicate if there is any pending descriptor.
+ *
+ * This function checks if a pipe of a BAM has any pending descriptor.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_pending_desc(unsigned long dev, u32 pipe, bool *pending);
+
+/*
+ * sps_bam_process_irq - process IRQ of a BAM.
+ * @dev:	BAM device handle
+ *
+ * This function processes any pending IRQ of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_bam_process_irq(unsigned long dev);
+
+/*
+ * sps_get_bam_addr - get address info of a BAM.
+ * @dev:	BAM device handle
+ * @base:	beginning address
+ * @size:	address range size
+ *
+ * This function returns the address info of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
+				u32 *size);
+
+/*
+ * sps_pipe_inject_zlt - inject a ZLT with EOT.
+ * @dev:	BAM device handle
+ * @pipe_index:	pipe index
+ *
+ * This function injects a ZLT with EOT for a pipe of a BAM.
+ *
+ * Return: 0 on success, negative value on error
+ */
+int sps_pipe_inject_zlt(unsigned long dev, u32 pipe_index);
+#else
+static inline int sps_register_bam_device(const struct sps_bam_props
+			*bam_props, unsigned long *dev_handle)
+{
+	return -EPERM;
+}
+
+static inline int sps_deregister_bam_device(unsigned long dev_handle)
+{
+	return -EPERM;
+}
+
+static inline struct sps_pipe *sps_alloc_endpoint(void)
+{
+	return NULL;
+}
+
+static inline int sps_free_endpoint(struct sps_pipe *h)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	return -EPERM;
+}
+
+static inline int sps_alloc_mem(struct sps_pipe *h, enum sps_mem mem,
+		  struct sps_mem_buffer *mem_buffer)
+{
+	return -EPERM;
+}
+
+static inline int sps_free_mem(struct sps_pipe *h,
+				struct sps_mem_buffer *mem_buffer)
+{
+	return -EPERM;
+}
+
+static inline int sps_connect(struct sps_pipe *h, struct sps_connect *connect)
+{
+	return -EPERM;
+}
+
+static inline int sps_disconnect(struct sps_pipe *h)
+{
+	return -EPERM;
+}
+
+static inline int sps_register_event(struct sps_pipe *h,
+					struct sps_register_event *reg)
+{
+	return -EPERM;
+}
+
+static inline int sps_transfer_one(struct sps_pipe *h, phys_addr_t addr,
+					u32 size, void *user, u32 flags)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_event(struct sps_pipe *h,
+				struct sps_event_notify *event)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_iovec(struct sps_pipe *h, struct sps_iovec *iovec)
+{
+	return -EPERM;
+}
+
+static inline int sps_flow_on(struct sps_pipe *h)
+{
+	return -EPERM;
+}
+
+static inline int sps_flow_off(struct sps_pipe *h, enum sps_flow_off mode)
+{
+	return -EPERM;
+}
+
+static inline int sps_transfer(struct sps_pipe *h,
+				struct sps_transfer *transfer)
+{
+	return -EPERM;
+}
+
+static inline int sps_is_pipe_empty(struct sps_pipe *h, u32 *empty)
+{
+	return -EPERM;
+}
+
+static inline int sps_device_reset(unsigned long dev)
+{
+	return -EPERM;
+}
+
+static inline int sps_set_config(struct sps_pipe *h, struct sps_connect *config)
+{
+	return -EPERM;
+}
+
+static inline int sps_set_owner(struct sps_pipe *h, enum sps_owner owner,
+		  struct sps_satellite *connect)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_free_count(struct sps_pipe *h, u32 *count)
+{
+	return -EPERM;
+}
+
+static inline int sps_alloc_dma_chan(const struct sps_alloc_dma_chan *alloc,
+		       struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline int sps_free_dma_chan(struct sps_dma_chan *chan)
+{
+	return -EPERM;
+}
+
+static inline unsigned long sps_dma_get_bam_handle(void)
+{
+	return 0;
+}
+
+static inline void sps_dma_free_bam_handle(unsigned long h)
+{
+}
+
+static inline int sps_timer_ctrl(struct sps_pipe *h,
+		   struct sps_timer_ctrl *timer_ctrl,
+		   struct sps_timer_result *timer_result)
+{
+	return -EPERM;
+}
+
+static inline int sps_phy2h(phys_addr_t phys_addr, unsigned long *handle)
+{
+	return -EPERM;
+}
+
+static inline int sps_setup_bam2bam_fifo(struct sps_mem_buffer *mem_buffer,
+		  u32 addr, u32 size, int use_offset)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_unused_desc_num(struct sps_pipe *h, u32 *desc_num)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_bam_debug_info(unsigned long dev, u32 option,
+		u32 para, u32 tb_sel, u32 desc_sel)
+{
+	return -EPERM;
+}
+
+static inline int sps_ctrl_bam_dma_clk(bool clk_on)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_reset(unsigned long dev, u32 pipe)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_disable(unsigned long dev, u32 pipe)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_pending_desc(unsigned long dev, u32 pipe,
+					bool *pending)
+{
+	return -EPERM;
+}
+
+static inline int sps_bam_process_irq(unsigned long dev)
+{
+	return -EPERM;
+}
+
+static inline int sps_get_bam_addr(unsigned long dev, phys_addr_t *base,
+				u32 *size)
+{
+	return -EPERM;
+}
+
+static inline int sps_pipe_inject_zlt(unsigned long dev, u32 pipe_index)
+{
+	return -EPERM;
+}
+#endif
+
+#endif /* _SPS_H_ */
diff --git a/include/linux/msm_bus_rules.h b/include/linux/msm_bus_rules.h
new file mode 100644
index 0000000..1cc83fa
--- /dev/null
+++ b/include/linux/msm_bus_rules.h
@@ -0,0 +1,63 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_BUS_RULES_H
+#define _ARCH_ARM_MACH_MSM_BUS_RULES_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/notifier.h>
+#include <dt-bindings/msm/msm-bus-rule-ops.h>
+
+#define MAX_NODES		(5)
+
+struct rule_update_path_info {
+	u32 id;
+	u64 ab;
+	u64 ib;
+	u64 clk;
+	bool added;
+	struct list_head link;
+};
+
+struct rule_apply_rcm_info {
+	u32 id;
+	u64 lim_bw;
+	int throttle;
+	bool after_clk_commit;
+	struct list_head link;
+};
+
+struct bus_rule_type {
+	int num_src;
+	int *src_id;
+	int src_field;
+	int op;
+	u64 thresh;
+	int num_dst;
+	int *dst_node;
+	u64 dst_bw;
+	int mode;
+	void *client_data;
+};
+
+void msm_rule_register(int num_rules, struct bus_rule_type *rule,
+				struct notifier_block *nb);
+void msm_rule_unregister(int num_rules, struct bus_rule_type *rule,
+						struct notifier_block *nb);
+bool msm_rule_update(struct bus_rule_type *old_rule,
+				struct bus_rule_type *new_rule,
+				struct notifier_block *nb);
+void msm_rule_evaluate_rules(int node);
+void print_rules_buf(char *buf, int count);
+bool msm_rule_are_rules_registered(void);
+#endif /* _ARCH_ARM_MACH_MSM_BUS_RULES_H */
diff --git a/include/linux/msm_dma_iommu_mapping.h b/include/linux/msm_dma_iommu_mapping.h
new file mode 100644
index 0000000..8d70c12
--- /dev/null
+++ b/include/linux/msm_dma_iommu_mapping.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_MSM_DMA_IOMMU_MAPPING_H
+#define _LINUX_MSM_DMA_IOMMU_MAPPING_H
+
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+
+#ifdef CONFIG_QCOM_LAZY_MAPPING
+/*
+ * This function is not taking a reference to the dma_buf here. It is expected
+ * that clients hold reference to the dma_buf until they are done with mapping
+ * and unmapping.
+ */
+int msm_dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
+		   enum dma_data_direction dir, struct dma_buf *dma_buf,
+		   unsigned long attrs);
+
+static inline int msm_dma_map_sg_lazy(struct device *dev,
+			       struct scatterlist *sg, int nents,
+			       enum dma_data_direction dir,
+			       struct dma_buf *dma_buf)
+{
+	return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, 0);
+}
+
+static inline int msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
+				  int nents, enum dma_data_direction dir,
+				  struct dma_buf *dma_buf)
+{
+	unsigned long attrs;
+
+	attrs = DMA_ATTR_NO_DELAYED_UNMAP;
+	return msm_dma_map_sg_attrs(dev, sg, nents, dir, dma_buf, attrs);
+}
+
+void msm_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, int nents,
+		      enum dma_data_direction dir, struct dma_buf *dma_buf);
+
+int msm_dma_unmap_all_for_dev(struct device *dev);
+
+/*
+ * Below is private function only to be called by framework (ION) and not by
+ * clients.
+ */
+void msm_dma_buf_freed(void *buffer);
+
+#else /*CONFIG_QCOM_LAZY_MAPPING*/
+
+static inline int msm_dma_map_sg_attrs(struct device *dev,
+			struct scatterlist *sg, int nents,
+			enum dma_data_direction dir, struct dma_buf *dma_buf,
+			unsigned long attrs)
+{
+	return -EINVAL;
+}
+
+static inline int msm_dma_map_sg_lazy(struct device *dev,
+			       struct scatterlist *sg, int nents,
+			       enum dma_data_direction dir,
+			       struct dma_buf *dma_buf)
+{
+	return -EINVAL;
+}
+
+static inline int msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
+				  int nents, enum dma_data_direction dir,
+				  struct dma_buf *dma_buf)
+{
+	return -EINVAL;
+}
+
+static inline void msm_dma_unmap_sg(struct device *dev,
+					struct scatterlist *sgl, int nents,
+					enum dma_data_direction dir,
+					struct dma_buf *dma_buf)
+{
+}
+
+int msm_dma_unmap_all_for_dev(struct device *dev)
+{
+	return 0;
+}
+
+static inline void msm_dma_buf_freed(void *buffer) {}
+#endif /*CONFIG_QCOM_LAZY_MAPPING*/
+
+#endif
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
new file mode 100644
index 0000000..fb2607d
--- /dev/null
+++ b/include/linux/msm_gsi.h
@@ -0,0 +1,1235 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef MSM_GSI_H
+#define MSM_GSI_H
+#include <linux/types.h>
+
+enum gsi_ver {
+	GSI_VER_ERR = 0,
+	GSI_VER_1_0 = 1,
+	GSI_VER_1_2 = 2,
+	GSI_VER_1_3 = 3,
+	GSI_VER_MAX,
+};
+
+enum gsi_status {
+	GSI_STATUS_SUCCESS = 0,
+	GSI_STATUS_ERROR = 1,
+	GSI_STATUS_RING_INSUFFICIENT_SPACE = 2,
+	GSI_STATUS_RING_EMPTY = 3,
+	GSI_STATUS_RES_ALLOC_FAILURE = 4,
+	GSI_STATUS_BAD_STATE = 5,
+	GSI_STATUS_INVALID_PARAMS = 6,
+	GSI_STATUS_UNSUPPORTED_OP = 7,
+	GSI_STATUS_NODEV = 8,
+	GSI_STATUS_POLL_EMPTY = 9,
+	GSI_STATUS_EVT_RING_INCOMPATIBLE = 10,
+	GSI_STATUS_TIMED_OUT = 11,
+	GSI_STATUS_AGAIN = 12,
+};
+
+enum gsi_per_evt {
+	GSI_PER_EVT_GLOB_ERROR,
+	GSI_PER_EVT_GLOB_GP1,
+	GSI_PER_EVT_GLOB_GP2,
+	GSI_PER_EVT_GLOB_GP3,
+	GSI_PER_EVT_GENERAL_BREAK_POINT,
+	GSI_PER_EVT_GENERAL_BUS_ERROR,
+	GSI_PER_EVT_GENERAL_CMD_FIFO_OVERFLOW,
+	GSI_PER_EVT_GENERAL_MCS_STACK_OVERFLOW,
+};
+
+/**
+ * gsi_per_notify - Peripheral callback info
+ *
+ * @user_data: cookie supplied in gsi_register_device
+ * @evt_id:    type of notification
+ * @err_desc:  error related information
+ *
+ */
+struct gsi_per_notify {
+	void *user_data;
+	enum gsi_per_evt evt_id;
+	union {
+		uint16_t err_desc;
+	} data;
+};
+
+enum gsi_intr_type {
+	GSI_INTR_MSI = 0x0,
+	GSI_INTR_IRQ = 0x1
+};
+
+
+/**
+ * gsi_per_props - Peripheral related properties
+ *
+ * @gsi:        GSI core version
+ * @ee:         EE where this driver and peripheral driver runs
+ * @intr:       control interrupt type
+ * @intvec:     write data for MSI write
+ * @msi_addr:   MSI address
+ * @irq:        IRQ number
+ * @phys_addr:  physical address of GSI block
+ * @size:       register size of GSI block
+ * @notify_cb:  general notification callback
+ * @req_clk_cb: callback to request peripheral clock
+ *		granted should be set to true if request is completed
+ *		synchronously, false otherwise (peripheral needs
+ *		to call gsi_complete_clk_grant later when request is
+ *		completed)
+ *		if this callback is not provided, then GSI will assume
+ *		peripheral is clocked at all times
+ * @rel_clk_cb: callback to release peripheral clock
+ * @user_data:  cookie used for notifications
+ *
+ * All the callbacks are in interrupt context
+ *
+ */
+struct gsi_per_props {
+	enum gsi_ver ver;
+	unsigned int ee;
+	enum gsi_intr_type intr;
+	uint32_t intvec;
+	uint64_t msi_addr;
+	unsigned int irq;
+	phys_addr_t phys_addr;
+	unsigned long size;
+	void (*notify_cb)(struct gsi_per_notify *notify);
+	void (*req_clk_cb)(void *user_data, bool *granted);
+	int (*rel_clk_cb)(void *user_data);
+	void *user_data;
+};
+
+enum gsi_evt_err {
+	GSI_EVT_OUT_OF_BUFFERS_ERR = 0x0,
+	GSI_EVT_OUT_OF_RESOURCES_ERR = 0x1,
+	GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR = 0x2,
+	GSI_EVT_EVT_RING_EMPTY_ERR = 0x3,
+};
+
+/**
+ * gsi_evt_err_notify - event ring error callback info
+ *
+ * @user_data: cookie supplied in gsi_alloc_evt_ring
+ * @evt_id:    type of error
+ * @err_desc:  more info about the error
+ *
+ */
+struct gsi_evt_err_notify {
+	void *user_data;
+	enum gsi_evt_err evt_id;
+	uint16_t err_desc;
+};
+
+enum gsi_evt_chtype {
+	GSI_EVT_CHTYPE_MHI_EV = 0x0,
+	GSI_EVT_CHTYPE_XHCI_EV = 0x1,
+	GSI_EVT_CHTYPE_GPI_EV = 0x2,
+	GSI_EVT_CHTYPE_XDCI_EV = 0x3
+};
+
+enum gsi_evt_ring_elem_size {
+	GSI_EVT_RING_RE_SIZE_4B = 4,
+	GSI_EVT_RING_RE_SIZE_16B = 16,
+};
+
+/**
+ * gsi_evt_ring_props - Event ring related properties
+ *
+ * @intf:            interface type (of the associated channel)
+ * @intr:            interrupt type
+ * @re_size:         size of event ring element
+ * @ring_len:        length of ring in bytes (must be integral multiple of
+ *                   re_size)
+ * @ring_base_addr:  physical base address of ring. Address must be aligned to
+ *		     ring_len rounded to power of two
+ * @ring_base_vaddr: virtual base address of ring (set to NULL when not
+ *                   applicable)
+ * @int_modt:        cycles base interrupt moderation (32KHz clock)
+ * @int_modc:        interrupt moderation packet counter
+ * @intvec:          write data for MSI write
+ * @msi_addr:        MSI address
+ * @rp_update_addr:  physical address to which event read pointer should be
+ *                   written on every event generation. must be set to 0 when
+ *                   no update is desdired
+ * @exclusive:       if true, only one GSI channel can be associated with this
+ *                   event ring. if false, the event ring can be shared among
+ *                   multiple GSI channels but in that case no polling
+ *                   (GSI_CHAN_MODE_POLL) is supported on any of those channels
+ * @err_cb:          error notification callback
+ * @user_data:       cookie used for error notifications
+ * @evchid_valid:    is evchid valid?
+ * @evchid:          the event ID that is being specifically requested (this is
+ *                   relevant for MHI where doorbell routing requires ERs to be
+ *                   physically contiguous)
+ */
+struct gsi_evt_ring_props {
+	enum gsi_evt_chtype intf;
+	enum gsi_intr_type intr;
+	enum gsi_evt_ring_elem_size re_size;
+	uint16_t ring_len;
+	uint64_t ring_base_addr;
+	void *ring_base_vaddr;
+	uint16_t int_modt;
+	uint8_t int_modc;
+	uint32_t intvec;
+	uint64_t msi_addr;
+	uint64_t rp_update_addr;
+	bool exclusive;
+	void (*err_cb)(struct gsi_evt_err_notify *notify);
+	void *user_data;
+	bool evchid_valid;
+	uint8_t evchid;
+};
+
+enum gsi_chan_mode {
+	GSI_CHAN_MODE_CALLBACK = 0x0,
+	GSI_CHAN_MODE_POLL = 0x1,
+};
+
+enum gsi_chan_prot {
+	GSI_CHAN_PROT_MHI = 0x0,
+	GSI_CHAN_PROT_XHCI = 0x1,
+	GSI_CHAN_PROT_GPI = 0x2,
+	GSI_CHAN_PROT_XDCI = 0x3
+};
+
+enum gsi_chan_dir {
+	GSI_CHAN_DIR_FROM_GSI = 0x0,
+	GSI_CHAN_DIR_TO_GSI = 0x1
+};
+
+enum gsi_max_prefetch {
+	GSI_ONE_PREFETCH_SEG = 0x0,
+	GSI_TWO_PREFETCH_SEG = 0x1
+};
+
+enum gsi_chan_evt {
+	GSI_CHAN_EVT_INVALID = 0x0,
+	GSI_CHAN_EVT_SUCCESS = 0x1,
+	GSI_CHAN_EVT_EOT = 0x2,
+	GSI_CHAN_EVT_OVERFLOW = 0x3,
+	GSI_CHAN_EVT_EOB = 0x4,
+	GSI_CHAN_EVT_OOB = 0x5,
+	GSI_CHAN_EVT_DB_MODE = 0x6,
+	GSI_CHAN_EVT_UNDEFINED = 0x10,
+	GSI_CHAN_EVT_RE_ERROR = 0x11,
+};
+
+/**
+ * gsi_chan_xfer_notify - Channel callback info
+ *
+ * @chan_user_data: cookie supplied in gsi_alloc_channel
+ * @xfer_user_data: cookie of the gsi_xfer_elem that caused the
+ *                  event to be generated
+ * @evt_id:         type of event triggered by the associated TRE
+ *                  (corresponding to xfer_user_data)
+ * @bytes_xfered:   number of bytes transferred by the associated TRE
+ *                  (corresponding to xfer_user_data)
+ *
+ */
+struct gsi_chan_xfer_notify {
+	void *chan_user_data;
+	void *xfer_user_data;
+	enum gsi_chan_evt evt_id;
+	uint16_t bytes_xfered;
+};
+
+enum gsi_chan_err {
+	GSI_CHAN_INVALID_TRE_ERR = 0x0,
+	GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR = 0x1,
+	GSI_CHAN_OUT_OF_BUFFERS_ERR = 0x2,
+	GSI_CHAN_OUT_OF_RESOURCES_ERR = 0x3,
+	GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
+	GSI_CHAN_HWO_1_ERR = 0x5
+};
+
+/**
+ * gsi_chan_err_notify - Channel general callback info
+ *
+ * @chan_user_data: cookie supplied in gsi_alloc_channel
+ * @evt_id:         type of error
+ * @err_desc:  more info about the error
+ *
+ */
+struct gsi_chan_err_notify {
+	void *chan_user_data;
+	enum gsi_chan_err evt_id;
+	uint16_t err_desc;
+};
+
+enum gsi_chan_ring_elem_size {
+	GSI_CHAN_RE_SIZE_4B = 4,
+	GSI_CHAN_RE_SIZE_16B = 16,
+	GSI_CHAN_RE_SIZE_32B = 32,
+};
+
+enum gsi_chan_use_db_eng {
+	GSI_CHAN_DIRECT_MODE = 0x0,
+	GSI_CHAN_DB_MODE = 0x1,
+};
+
+/**
+ * gsi_chan_props - Channel related properties
+ *
+ * @prot:            interface type
+ * @dir:             channel direction
+ * @ch_id:           virtual channel ID
+ * @evt_ring_hdl:    handle of associated event ring. set to ~0 if no
+ *                   event ring associated
+ * @re_size:         size of channel ring element
+ * @ring_len:        length of ring in bytes (must be integral multiple of
+ *                   re_size)
+ * @max_re_expected: maximal number of ring elements expected to be queued.
+ *                   used for data path statistics gathering. if 0 provided
+ *                   ring_len / re_size will be used.
+ * @ring_base_addr:  physical base address of ring. Address must be aligned to
+ *                   ring_len rounded to power of two
+ * @ring_base_vaddr: virtual base address of ring (set to NULL when not
+ *                   applicable)
+ * @use_db_eng:      0 => direct mode (doorbells are written directly to RE
+ *                   engine)
+ *                   1 => DB mode (doorbells are written to DB engine)
+ * @max_prefetch:    limit number of pre-fetch segments for channel
+ * @low_weight:      low channel weight (priority of channel for RE engine
+ *                   round robin algorithm); must be >= 1
+ * @xfer_cb:         transfer notification callback, this callback happens
+ *                   on event boundaries
+ *
+ *                   e.g. 1
+ *
+ *                   out TD with 3 REs
+ *
+ *                   RE1: EOT=0, EOB=0, CHAIN=1;
+ *                   RE2: EOT=0, EOB=0, CHAIN=1;
+ *                   RE3: EOT=1, EOB=0, CHAIN=0;
+ *
+ *                   the callback will be triggered for RE3 using the
+ *                   xfer_user_data of that RE
+ *
+ *                   e.g. 2
+ *
+ *                   in REs
+ *
+ *                   RE1: EOT=1, EOB=0, CHAIN=0;
+ *                   RE2: EOT=1, EOB=0, CHAIN=0;
+ *                   RE3: EOT=1, EOB=0, CHAIN=0;
+ *
+ *	             received packet consumes all of RE1, RE2 and part of RE3
+ *	             for EOT condition. there will be three callbacks in below
+ *	             order
+ *
+ *	             callback for RE1 using GSI_CHAN_EVT_OVERFLOW
+ *	             callback for RE2 using GSI_CHAN_EVT_OVERFLOW
+ *	             callback for RE3 using GSI_CHAN_EVT_EOT
+ *
+ * @err_cb:          error notification callback
+ * @chan_user_data:  cookie used for notifications
+ *
+ * All the callbacks are in interrupt context
+ *
+ */
+struct gsi_chan_props {
+	enum gsi_chan_prot prot;
+	enum gsi_chan_dir dir;
+	uint8_t ch_id;
+	unsigned long evt_ring_hdl;
+	enum gsi_chan_ring_elem_size re_size;
+	uint16_t ring_len;
+	uint16_t max_re_expected;
+	uint64_t ring_base_addr;
+	void *ring_base_vaddr;
+	enum gsi_chan_use_db_eng use_db_eng;
+	enum gsi_max_prefetch max_prefetch;
+	uint8_t low_weight;
+	void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
+	void (*err_cb)(struct gsi_chan_err_notify *notify);
+	void *chan_user_data;
+};
+
+enum gsi_xfer_flag {
+	GSI_XFER_FLAG_CHAIN = 0x1,
+	GSI_XFER_FLAG_EOB = 0x100,
+	GSI_XFER_FLAG_EOT = 0x200,
+	GSI_XFER_FLAG_BEI = 0x400
+};
+
+enum gsi_xfer_elem_type {
+	GSI_XFER_ELEM_DATA,
+	GSI_XFER_ELEM_IMME_CMD,
+};
+
+/**
+ * gsi_xfer_elem - Metadata about a single transfer
+ *
+ * @addr:           physical address of buffer
+ * @len:            size of buffer for GSI_XFER_ELEM_DATA:
+ *		    for outbound transfers this is the number of bytes to
+ *		    transfer.
+ *		    for inbound transfers, this is the maximum number of
+ *		    bytes the host expects from device in this transfer
+ *
+ *                  immediate command opcode for GSI_XFER_ELEM_IMME_CMD
+ * @flags:          transfer flags, OR of all the applicable flags
+ *
+ *		    GSI_XFER_FLAG_BEI: Block event interrupt
+ *		    1: Event generated by this ring element must not assert
+ *		    an interrupt to the host
+ *		    0: Event generated by this ring element must assert an
+ *		    interrupt to the host
+ *
+ *		    GSI_XFER_FLAG_EOT: Interrupt on end of transfer
+ *		    1: If an EOT condition is encountered when processing
+ *		    this ring element, an event is generated by the device
+ *		    with its completion code set to EOT.
+ *		    0: If an EOT condition is encountered for this ring
+ *		    element, a completion event is not be generated by the
+ *		    device, unless IEOB is 1
+ *
+ *		    GSI_XFER_FLAG_EOB: Interrupt on end of block
+ *		    1: Device notifies host after processing this ring element
+ *		    by sending a completion event
+ *		    0: Completion event is not required after processing this
+ *		    ring element
+ *
+ *		    GSI_XFER_FLAG_CHAIN: Chain bit that identifies the ring
+ *		    elements in a TD
+ *
+ * @type:           transfer type
+ *
+ *		    GSI_XFER_ELEM_DATA: for all data transfers
+ *		    GSI_XFER_ELEM_IMME_CMD: for IPA immediate commands
+ *
+ * @xfer_user_data: cookie used in xfer_cb
+ *
+ */
+struct gsi_xfer_elem {
+	uint64_t addr;
+	uint16_t len;
+	uint16_t flags;
+	enum gsi_xfer_elem_type type;
+	void *xfer_user_data;
+};
+
+/**
+ * gsi_gpi_channel_scratch - GPI protocol SW config area of
+ * channel scratch
+ *
+ * @max_outstanding_tre: Used for the prefetch management sequence by the
+ *                       sequencer. Defines the maximum number of allowed
+ *                       outstanding TREs in IPA/GSI (in Bytes). RE engine
+ *                       prefetch will be limited by this configuration. It
+ *                       is suggested to configure this value to IPA_IF
+ *                       channel TLV queue size times element size. To disable
+ *                       the feature in doorbell mode (DB Mode=1). Maximum
+ *                       outstanding TREs should be set to 64KB
+ *                       (or any value larger or equal to ring length . RLEN)
+ * @outstanding_threshold: Used for the prefetch management sequence by the
+ *                       sequencer. Defines the threshold (in Bytes) as to when
+ *                       to update the channel doorbell. Should be smaller than
+ *                       Maximum outstanding TREs. value. It is suggested to
+ *                       configure this value to 2 * element size.
+ */
+struct __packed gsi_gpi_channel_scratch {
+	uint64_t resvd1;
+	uint32_t resvd2:16;
+	uint32_t max_outstanding_tre:16;
+	uint32_t resvd3:16;
+	uint32_t outstanding_threshold:16;
+};
+
+/**
+ * gsi_mhi_channel_scratch - MHI protocol SW config area of
+ * channel scratch
+ *
+ * @mhi_host_wp_addr:    Valid only when UL/DL Sync En is asserted. Defines
+ *                       address in host from which channel write pointer
+ *                       should be read in polling mode
+ * @assert_bit40:        1: bit #41 in address should be asserted upon
+ *                       IPA_IF.ProcessDescriptor routine (for MHI over PCIe
+ *                       transfers)
+ *                       0: bit #41 in address should be deasserted upon
+ *                       IPA_IF.ProcessDescriptor routine (for non-MHI over
+ *                       PCIe transfers)
+ * @polling_configuration: Uplink channels: Defines timer to poll on MHI
+ *                       context. Range: 1 to 31 milliseconds.
+ *                       Downlink channel: Defines transfer ring buffer
+ *                       availability threshold to poll on MHI context in
+ *                       multiple of 8. Range: 0 to 31, meaning 0 to 258 ring
+ *                       elements. E.g., value of 2 indicates 16 ring elements.
+ *                       Valid only when Burst Mode Enabled is set to 1
+ * @burst_mode_enabled:  0: Burst mode is disabled for this channel
+ *                       1: Burst mode is enabled for this channel
+ * @polling_mode:        0: the channel is not in polling mode, meaning the
+ *                       host should ring DBs.
+ *                       1: the channel is in polling mode, meaning the host
+ * @oob_mod_threshold:   Defines OOB moderation threshold. Units are in 8
+ *                       ring elements.
+ *                       should not ring DBs until notified of DB mode/OOB mode
+ * @max_outstanding_tre: Used for the prefetch management sequence by the
+ *                       sequencer. Defines the maximum number of allowed
+ *                       outstanding TREs in IPA/GSI (in Bytes). RE engine
+ *                       prefetch will be limited by this configuration. It
+ *                       is suggested to configure this value to IPA_IF
+ *                       channel TLV queue size times element size.
+ *                       To disable the feature in doorbell mode (DB Mode=1).
+ *                       Maximum outstanding TREs should be set to 64KB
+ *                       (or any value larger or equal to ring length . RLEN)
+ * @outstanding_threshold: Used for the prefetch management sequence by the
+ *                       sequencer. Defines the threshold (in Bytes) as to when
+ *                       to update the channel doorbell. Should be smaller than
+ *                       Maximum outstanding TREs. value. It is suggested to
+ *                       configure this value to min(TLV_FIFO_SIZE/2,8) *
+ *                       element size.
+ */
+struct __packed gsi_mhi_channel_scratch {
+	uint64_t mhi_host_wp_addr;
+	uint32_t rsvd1:1;
+	uint32_t assert_bit40:1;
+	uint32_t polling_configuration:5;
+	uint32_t burst_mode_enabled:1;
+	uint32_t polling_mode:1;
+	uint32_t oob_mod_threshold:5;
+	uint32_t resvd2:2;
+	uint32_t max_outstanding_tre:16;
+	uint32_t resvd3:16;
+	uint32_t outstanding_threshold:16;
+};
+
+/**
+ * gsi_xdci_channel_scratch - xDCI protocol SW config area of
+ * channel scratch
+ *
+ * @const_buffer_size:   TRB buffer size in KB (similar to IPA aggregationi
+ *                       configuration). Must be aligned to Max USB Packet Size
+ * @xferrscidx: Transfer Resource Index (XferRscIdx). The hardware-assigned
+ *                       transfer resource index for the transfer, which was
+ *                       returned in response to the Start Transfer command.
+ *                       This field is used for "Update Transfer" command
+ * @last_trb_addr:       Address (LSB - based on alignment restrictions) of
+ *                       last TRB in queue. Used to identify rollover case
+ * @depcmd_low_addr:     Used to generate "Update Transfer" command
+ * @max_outstanding_tre: Used for the prefetch management sequence by the
+ *                       sequencer. Defines the maximum number of allowed
+ *                       outstanding TREs in IPA/GSI (in Bytes). RE engine
+ *                       prefetch will be limited by this configuration. It
+ *                       is suggested to configure this value to IPA_IF
+ *                       channel TLV queue size times element size.
+ *                       To disable the feature in doorbell mode (DB Mode=1)
+ *                       Maximum outstanding TREs should be set to 64KB
+ *                       (or any value larger or equal to ring length . RLEN)
+ * @depcmd_hi_addr: Used to generate "Update Transfer" command
+ * @outstanding_threshold: Used for the prefetch management sequence by the
+ *                       sequencer. Defines the threshold (in Bytes) as to when
+ *                       to update the channel doorbell. Should be smaller than
+ *                       Maximum outstanding TREs. value. It is suggested to
+ *                       configure this value to 2 * element size. for MBIM the
+ *                       suggested configuration is the element size.
+ */
+struct __packed gsi_xdci_channel_scratch {
+	uint32_t last_trb_addr:16;
+	uint32_t resvd1:4;
+	uint32_t xferrscidx:7;
+	uint32_t const_buffer_size:5;
+	uint32_t depcmd_low_addr;
+	uint32_t depcmd_hi_addr:8;
+	uint32_t resvd2:8;
+	uint32_t max_outstanding_tre:16;
+	uint32_t resvd3:16;
+	uint32_t outstanding_threshold:16;
+};
+
+/**
+ * gsi_channel_scratch - channel scratch SW config area
+ *
+ */
+union __packed gsi_channel_scratch {
+	struct __packed gsi_gpi_channel_scratch gpi;
+	struct __packed gsi_mhi_channel_scratch mhi;
+	struct __packed gsi_xdci_channel_scratch xdci;
+	struct __packed {
+		uint32_t word1;
+		uint32_t word2;
+		uint32_t word3;
+		uint32_t word4;
+	} data;
+};
+
+/**
+ * gsi_mhi_evt_scratch - MHI protocol SW config area of
+ * event scratch
+ */
+struct __packed gsi_mhi_evt_scratch {
+	uint32_t resvd1;
+	uint32_t resvd2;
+};
+
+/**
+ * gsi_xdci_evt_scratch - xDCI protocol SW config area of
+ * event scratch
+ *
+ */
+struct __packed gsi_xdci_evt_scratch {
+	uint32_t gevntcount_low_addr;
+	uint32_t gevntcount_hi_addr:8;
+	uint32_t resvd1:24;
+};
+
+/**
+ * gsi_evt_scratch - event scratch SW config area
+ *
+ */
+union __packed gsi_evt_scratch {
+	struct __packed gsi_mhi_evt_scratch mhi;
+	struct __packed gsi_xdci_evt_scratch xdci;
+	struct __packed {
+		uint32_t word1;
+		uint32_t word2;
+	} data;
+};
+
+/**
+ * gsi_device_scratch - EE scratch config parameters
+ *
+ * @mhi_base_chan_idx_valid: is mhi_base_chan_idx valid?
+ * @mhi_base_chan_idx:       base index of IPA MHI channel indexes.
+ *                           IPA MHI channel index = GSI channel ID +
+ *                           MHI base channel index
+ * @max_usb_pkt_size_valid:  is max_usb_pkt_size valid?
+ * @max_usb_pkt_size:        max USB packet size in bytes (valid values are
+ *                           512 and 1024)
+ */
+struct gsi_device_scratch {
+	bool mhi_base_chan_idx_valid;
+	uint8_t mhi_base_chan_idx;
+	bool max_usb_pkt_size_valid;
+	uint16_t max_usb_pkt_size;
+};
+
+/**
+ * gsi_chan_info - information about channel occupancy
+ *
+ * @wp: channel write pointer (physical address)
+ * @rp: channel read pointer (physical address)
+ * @evt_valid: is evt* info valid?
+ * @evt_wp: event ring write pointer (physical address)
+ * @evt_rp: event ring read pointer (physical address)
+ */
+struct gsi_chan_info {
+	uint64_t wp;
+	uint64_t rp;
+	bool evt_valid;
+	uint64_t evt_wp;
+	uint64_t evt_rp;
+};
+
+#ifdef CONFIG_GSI
+/**
+ * gsi_register_device - Peripheral should call this function to
+ * register itself with GSI before invoking any other APIs
+ *
+ * @props:  Peripheral properties
+ * @dev_hdl:  Handle populated by GSI, opaque to client
+ *
+ * @Return -GSI_STATUS_AGAIN if request should be re-tried later
+ *	   other error codes for failure
+ */
+int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl);
+
+/**
+ * gsi_complete_clk_grant - Peripheral should call this function to
+ * grant the clock resource requested by GSI previously that could not
+ * be granted synchronously. GSI will release the clock resource using
+ * the rel_clk_cb when appropriate
+ *
+ * @dev_hdl:	   Client handle previously obtained from
+ *	   gsi_register_device
+ *
+ * @Return gsi_status
+ */
+int gsi_complete_clk_grant(unsigned long dev_hdl);
+
+/**
+ * gsi_write_device_scratch - Peripheral should call this function to
+ * write to the EE scratch area
+ *
+ * @dev_hdl:  Client handle previously obtained from
+ *            gsi_register_device
+ * @val:      Value to write
+ *
+ * @Return gsi_status
+ */
+int gsi_write_device_scratch(unsigned long dev_hdl,
+		struct gsi_device_scratch *val);
+
+/**
+ * gsi_deregister_device - Peripheral should call this function to
+ * de-register itself with GSI
+ *
+ * @dev_hdl:  Client handle previously obtained from
+ *            gsi_register_device
+ * @force:    When set to true, cleanup is performed even if there
+ *            are in use resources like channels, event rings, etc.
+ *            this would be used after GSI reset to recover from some
+ *            fatal error
+ *            When set to false, there must not exist any allocated
+ *            channels and event rings.
+ *
+ * @Return gsi_status
+ */
+int gsi_deregister_device(unsigned long dev_hdl, bool force);
+
+/**
+ * gsi_alloc_evt_ring - Peripheral should call this function to
+ * allocate an event ring
+ *
+ * @props:	   Event ring properties
+ * @dev_hdl:	   Client handle previously obtained from
+ *	   gsi_register_device
+ * @evt_ring_hdl:  Handle populated by GSI, opaque to client
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
+		unsigned long *evt_ring_hdl);
+
+/**
+ * gsi_write_evt_ring_scratch - Peripheral should call this function to
+ * write to the scratch area of the event ring context
+ *
+ * @evt_ring_hdl:  Client handle previously obtained from
+ *	   gsi_alloc_evt_ring
+ * @val:           Value to write
+ *
+ * @Return gsi_status
+ */
+int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
+		union __packed gsi_evt_scratch val);
+
+/**
+ * gsi_dealloc_evt_ring - Peripheral should call this function to
+ * de-allocate an event ring. There should not exist any active
+ * channels using this event ring
+ *
+ * @evt_ring_hdl:  Client handle previously obtained from
+ *	   gsi_alloc_evt_ring
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl);
+
+/**
+ * gsi_query_evt_ring_db_addr - Peripheral should call this function to
+ * query the physical addresses of the event ring doorbell registers
+ *
+ * @evt_ring_hdl:    Client handle previously obtained from
+ *	     gsi_alloc_evt_ring
+ * @db_addr_wp_lsb:  Physical address of doorbell register where the 32
+ *                   LSBs of the doorbell value should be written
+ * @db_addr_wp_msb:  Physical address of doorbell register where the 32
+ *                   MSBs of the doorbell value should be written
+ *
+ * @Return gsi_status
+ */
+int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
+		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
+
+/**
+ * gsi_reset_evt_ring - Peripheral should call this function to
+ * reset an event ring to recover from error state
+ *
+ * @evt_ring_hdl:  Client handle previously obtained from
+ *             gsi_alloc_evt_ring
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_reset_evt_ring(unsigned long evt_ring_hdl);
+
+/**
+ * gsi_get_evt_ring_cfg - This function returns the current config
+ * of the specified event ring
+ *
+ * @evt_ring_hdl:  Client handle previously obtained from
+ *             gsi_alloc_evt_ring
+ * @props:         where to copy properties to
+ * @scr:           where to copy scratch info to
+ *
+ * @Return gsi_status
+ */
+int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
+		struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
+
+/**
+ * gsi_set_evt_ring_cfg - This function applies the supplied config
+ * to the specified event ring.
+ *
+ * exclusive property of the event ring cannot be changed after
+ * gsi_alloc_evt_ring
+ *
+ * @evt_ring_hdl:  Client handle previously obtained from
+ *             gsi_alloc_evt_ring
+ * @props:         the properties to apply
+ * @scr:           the scratch info to apply
+ *
+ * @Return gsi_status
+ */
+int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
+		struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr);
+
+/**
+ * gsi_alloc_channel - Peripheral should call this function to
+ * allocate a channel
+ *
+ * @props:     Channel properties
+ * @dev_hdl:   Client handle previously obtained from
+ *             gsi_register_device
+ * @chan_hdl:  Handle populated by GSI, opaque to client
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_alloc_channel(struct gsi_chan_props *props, unsigned long dev_hdl,
+		unsigned long *chan_hdl);
+
+/**
+ * gsi_write_channel_scratch - Peripheral should call this function to
+ * write to the scratch area of the channel context
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ * @val:       Value to write
+ *
+ * @Return gsi_status
+ */
+int gsi_write_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch val);
+
+/**
+ * gsi_start_channel - Peripheral should call this function to
+ * start a channel i.e put into running state
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_start_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_stop_channel - Peripheral should call this function to
+ * stop a channel. Stop will happen on a packet boundary
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
+ *	   other error codes for failure
+ */
+int gsi_stop_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_reset_channel - Peripheral should call this function to
+ * reset a channel to recover from error state
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_reset_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_dealloc_channel - Peripheral should call this function to
+ * de-allocate a channel
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return gsi_status
+ */
+int gsi_dealloc_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_stop_db_channel - Peripheral should call this function to
+ * stop a channel when all transfer elements till the doorbell
+ * have been processed
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ *
+ * This function can sleep
+ *
+ * @Return -GSI_STATUS_AGAIN if client should call stop/stop_db again
+ *	   other error codes for failure
+ */
+int gsi_stop_db_channel(unsigned long chan_hdl);
+
+/**
+ * gsi_query_channel_db_addr - Peripheral should call this function to
+ * query the physical addresses of the channel doorbell registers
+ *
+ * @chan_hdl:        Client handle previously obtained from
+ *	     gsi_alloc_channel
+ * @db_addr_wp_lsb:  Physical address of doorbell register where the 32
+ *                   LSBs of the doorbell value should be written
+ * @db_addr_wp_msb:  Physical address of doorbell register where the 32
+ *                   MSBs of the doorbell value should be written
+ *
+ * @Return gsi_status
+ */
+int gsi_query_channel_db_addr(unsigned long chan_hdl,
+		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb);
+
+/**
+ * gsi_query_channel_info - Peripheral can call this function to query the
+ * channel and associated event ring (if any) status.
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ * @info:      Where to read the values into
+ *
+ * @Return gsi_status
+ */
+int gsi_query_channel_info(unsigned long chan_hdl,
+		struct gsi_chan_info *info);
+
+/**
+ * gsi_is_channel_empty - Peripheral can call this function to query if
+ * the channel is empty. This is only applicable to GPI. "Empty" means
+ * GSI has consumed all descriptors for a TO_GSI channel and SW has
+ * processed all completed descriptors for a FROM_GSI channel.
+ *
+ * @chan_hdl:  Client handle previously obtained from gsi_alloc_channel
+ * @is_empty:  set by GSI based on channel emptiness
+ *
+ * @Return gsi_status
+ */
+int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty);
+
+/**
+ * gsi_get_channel_cfg - This function returns the current config
+ * of the specified channel
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ * @props:     where to copy properties to
+ * @scr:       where to copy scratch info to
+ *
+ * @Return gsi_status
+ */
+int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
+		union gsi_channel_scratch *scr);
+
+/**
+ * gsi_set_channel_cfg - This function applies the supplied config
+ * to the specified channel
+ *
+ * ch_id and evt_ring_hdl of the channel cannot be changed after
+ * gsi_alloc_channel
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ * @props:     the properties to apply
+ * @scr:       the scratch info to apply
+ *
+ * @Return gsi_status
+ */
+int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
+		union gsi_channel_scratch *scr);
+
+/**
+ * gsi_poll_channel - Peripheral should call this function to query for
+ * completed transfer descriptors.
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ * @notify:    Information about the completed transfer if any
+ *
+ * @Return gsi_status (GSI_STATUS_POLL_EMPTY is returned if no transfers
+ * completed)
+ */
+int gsi_poll_channel(unsigned long chan_hdl,
+		struct gsi_chan_xfer_notify *notify);
+
+/**
+ * gsi_config_channel_mode - Peripheral should call this function
+ * to configure the channel mode.
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ * @mode:      Mode to move the channel into
+ *
+ * @Return gsi_status
+ */
+int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode);
+
+/**
+ * gsi_queue_xfer - Peripheral should call this function
+ * to queue transfers on the given channel
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ * @num_xfers: Number of transfer in the array @ xfer
+ * @xfer:      Array of num_xfers transfer descriptors
+ * @ring_db:   If true, tell HW about these queued xfers
+ *             If false, do not notify HW at this time
+ *
+ * @Return gsi_status
+ */
+int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
+		struct gsi_xfer_elem *xfer, bool ring_db);
+
+/**
+ * gsi_start_xfer - Peripheral should call this function to
+ * inform HW about queued xfers
+ *
+ * @chan_hdl:  Client handle previously obtained from
+ *             gsi_alloc_channel
+ *
+ * @Return gsi_status
+ */
+int gsi_start_xfer(unsigned long chan_hdl);
+
+/**
+ * gsi_configure_regs - Peripheral should call this function
+ * to configure the GSI registers before/after the FW is
+ * loaded but before it is enabled.
+ *
+ * @gsi_base_addr: Base address of GSI register space
+ * @gsi_size: Mapping size of the GSI register space
+ * @per_base_addr: Base address of the peripheral using GSI
+ *
+ * @Return gsi_status
+ */
+int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
+		phys_addr_t per_base_addr);
+
+/**
+ * gsi_enable_fw - Peripheral should call this function
+ * to enable the GSI FW after the FW has been loaded to the SRAM.
+ *
+ * @gsi_base_addr: Base address of GSI register space
+ * @gsi_size: Mapping size of the GSI register space
+
+ * @Return gsi_status
+ */
+int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size);
+
+/*
+ * Here is a typical sequence of calls
+ *
+ * gsi_register_device
+ *
+ * gsi_write_device_scratch (if the protocol needs this)
+ *
+ * gsi_alloc_evt_ring (for as many event rings as needed)
+ * gsi_write_evt_ring_scratch
+ *
+ * gsi_alloc_channel (for as many channels as needed; channels can have
+ * no event ring, an exclusive event ring or a shared event ring)
+ * gsi_write_channel_scratch
+ * gsi_start_channel
+ * gsi_queue_xfer/gsi_start_xfer
+ * gsi_config_channel_mode/gsi_poll_channel (if clients wants to poll on
+ * xfer completions)
+ * gsi_stop_db_channel/gsi_stop_channel
+ *
+ * gsi_dealloc_channel
+ *
+ * gsi_dealloc_evt_ring
+ *
+ * gsi_deregister_device
+ *
+ */
+#else
+static inline int gsi_register_device(struct gsi_per_props *props,
+		unsigned long *dev_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_complete_clk_grant(unsigned long dev_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_write_device_scratch(unsigned long dev_hdl,
+		struct gsi_device_scratch *val)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_deregister_device(unsigned long dev_hdl, bool force)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props,
+		unsigned long dev_hdl,
+		unsigned long *evt_ring_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
+		union __packed gsi_evt_scratch val)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
+		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_alloc_channel(struct gsi_chan_props *props,
+		unsigned long dev_hdl,
+		unsigned long *chan_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_write_channel_scratch(unsigned long chan_hdl,
+		union __packed gsi_channel_scratch val)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_start_channel(unsigned long chan_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_stop_channel(unsigned long chan_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_reset_channel(unsigned long chan_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_dealloc_channel(unsigned long chan_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_stop_db_channel(unsigned long chan_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_query_channel_db_addr(unsigned long chan_hdl,
+		uint32_t *db_addr_wp_lsb, uint32_t *db_addr_wp_msb)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_query_channel_info(unsigned long chan_hdl,
+		struct gsi_chan_info *info)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_poll_channel(unsigned long chan_hdl,
+		struct gsi_chan_xfer_notify *notify)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_config_channel_mode(unsigned long chan_hdl,
+		enum gsi_chan_mode mode)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
+		struct gsi_xfer_elem *xfer, bool ring_db)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_start_xfer(unsigned long chan_hdl)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_get_channel_cfg(unsigned long chan_hdl,
+		struct gsi_chan_props *props,
+		union gsi_channel_scratch *scr)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_set_channel_cfg(unsigned long chan_hdl,
+		struct gsi_chan_props *props,
+		union gsi_channel_scratch *scr)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
+		struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
+		struct gsi_evt_ring_props *props, union gsi_evt_scratch *scr)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+
+static inline int gsi_configure_regs(phys_addr_t gsi_base_addr, u32 gsi_size,
+		phys_addr_t per_base_addr)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+static inline int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
+{
+	return -GSI_STATUS_UNSUPPORTED_OP;
+}
+#endif
+#endif
diff --git a/include/linux/msm_ion.h b/include/linux/msm_ion.h
new file mode 100644
index 0000000..08b35d7
--- /dev/null
+++ b/include/linux/msm_ion.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LINUX_MSM_ION_H__
+#define __LINUX_MSM_ION_H__
+
+#include "../../drivers/staging/android/ion/msm/msm_ion.h"
+
+#endif /* __LINUX_MSM_ION_H__ */
diff --git a/include/linux/msm_remote_spinlock.h b/include/linux/msm_remote_spinlock.h
new file mode 100644
index 0000000..2357b4f
--- /dev/null
+++ b/include/linux/msm_remote_spinlock.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2009, 2011, 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/*
+ * Part of this this code is based on the standard ARM spinlock
+ * implementation (asm/spinlock.h) found in the 2.6.29 kernel.
+ */
+
+#ifndef __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+#define __ASM__ARCH_QC_REMOTE_SPINLOCK_H
+
+#include <linux/io.h>
+#include <linux/types.h>
+
+#define REMOTE_SPINLOCK_NUM_PID 128
+#define REMOTE_SPINLOCK_TID_START REMOTE_SPINLOCK_NUM_PID
+
+/* Remote spinlock definitions. */
+
+typedef struct {
+	volatile uint32_t lock;
+} raw_remote_spinlock_t;
+
+typedef raw_remote_spinlock_t *_remote_spinlock_t;
+
+#define remote_spinlock_id_t const char *
+
+#if defined(CONFIG_REMOTE_SPINLOCK_MSM)
+int _remote_spin_lock_init(remote_spinlock_id_t, _remote_spinlock_t *lock);
+void _remote_spin_release_all(uint32_t pid);
+void _remote_spin_lock(_remote_spinlock_t *lock);
+void _remote_spin_unlock(_remote_spinlock_t *lock);
+int _remote_spin_trylock(_remote_spinlock_t *lock);
+int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid);
+int _remote_spin_owner(_remote_spinlock_t *lock);
+void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock, uint32_t tid);
+void _remote_spin_unlock_rlock(_remote_spinlock_t *lock);
+int _remote_spin_get_hw_spinlocks_element(_remote_spinlock_t *lock);
+#else
+static inline
+int _remote_spin_lock_init(remote_spinlock_id_t id, _remote_spinlock_t *lock)
+{
+	return -EINVAL;
+}
+static inline void _remote_spin_release_all(uint32_t pid) {}
+static inline void _remote_spin_lock(_remote_spinlock_t *lock) {}
+static inline void _remote_spin_unlock(_remote_spinlock_t *lock) {}
+static inline int _remote_spin_trylock(_remote_spinlock_t *lock)
+{
+	return -ENODEV;
+}
+static inline int _remote_spin_release(_remote_spinlock_t *lock, uint32_t pid)
+{
+	return -ENODEV;
+}
+static inline int _remote_spin_owner(_remote_spinlock_t *lock)
+{
+	return -ENODEV;
+}
+static inline void _remote_spin_lock_rlock_id(_remote_spinlock_t *lock,
+					      uint32_t tid) {}
+static inline void _remote_spin_unlock_rlock(_remote_spinlock_t *lock) {}
+static inline int _remote_spin_get_hw_spinlocks_element(
+		_remote_spinlock_t *lock)
+{
+	return -ENODEV;
+}
+#endif
+#endif /* __ASM__ARCH_QC_REMOTE_SPINLOCK_H */
diff --git a/include/linux/msm_rtb.h b/include/linux/msm_rtb.h
new file mode 100644
index 0000000..2280942
--- /dev/null
+++ b/include/linux/msm_rtb.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MSM_RTB_H__
+#define __MSM_RTB_H__
+
+/*
+ * These numbers are used from the kernel command line and sysfs
+ * to control filtering. Remove items from here with extreme caution.
+ */
+enum logk_event_type {
+	LOGK_NONE = 0,
+	LOGK_READL = 1,
+	LOGK_WRITEL = 2,
+	LOGK_LOGBUF = 3,
+	LOGK_HOTPLUG = 4,
+	LOGK_CTXID = 5,
+	LOGK_TIMESTAMP = 6,
+	LOGK_L2CPREAD = 7,
+	LOGK_L2CPWRITE = 8,
+	LOGK_IRQ = 9,
+};
+
+#define LOGTYPE_NOPC 0x80
+
+struct msm_rtb_platform_data {
+	unsigned int size;
+};
+
+#if defined(CONFIG_QCOM_RTB)
+/*
+ * returns 1 if data was logged, 0 otherwise
+ */
+int uncached_logk_pc(enum logk_event_type log_type, void *caller,
+				void *data);
+
+/*
+ * returns 1 if data was logged, 0 otherwise
+ */
+int uncached_logk(enum logk_event_type log_type, void *data);
+
+#define ETB_WAYPOINT  do { \
+				BRANCH_TO_NEXT_ISTR; \
+				nop(); \
+				BRANCH_TO_NEXT_ISTR; \
+				nop(); \
+			} while (0)
+
+#define BRANCH_TO_NEXT_ISTR  asm volatile("b .+4\n" : : : "memory")
+/*
+ * both the mb and the isb are needed to ensure enough waypoints for
+ * etb tracing
+ */
+#define LOG_BARRIER	do { \
+				mb(); \
+				isb();\
+			 } while (0)
+#else
+
+static inline int uncached_logk_pc(enum logk_event_type log_type,
+					void *caller,
+					void *data) { return 0; }
+
+static inline int uncached_logk(enum logk_event_type log_type,
+					void *data) { return 0; }
+
+#define ETB_WAYPOINT
+#define BRANCH_TO_NEXT_ISTR
+/*
+ * Due to a GCC bug, we need to have a nop here in order to prevent an extra
+ * read from being generated after the write.
+ */
+#define LOG_BARRIER		nop()
+#endif
+#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index f29abda..a2866f6 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -78,6 +78,8 @@
 extern void done_path_create(struct path *, struct dentry *);
 extern struct dentry *kern_path_locked(const char *, struct path *);
 extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
+extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
+		const char *, unsigned int, struct path *);
 
 extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
 extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e16a2a9..7c0f6ad 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2815,6 +2815,7 @@
  */
 struct softnet_data {
 	struct list_head	poll_list;
+	struct napi_struct	*current_napi;
 	struct sk_buff_head	process_queue;
 
 	/* stats */
@@ -3312,6 +3313,7 @@
 gro_result_t napi_gro_frags(struct napi_struct *napi);
 struct packet_offload *gro_find_receive_by_type(__be16 type);
 struct packet_offload *gro_find_complete_by_type(__be16 type);
+extern struct napi_struct *get_current_napi_context(void);
 
 static inline void napi_free_frags(struct napi_struct *napi)
 {
diff --git a/include/linux/netfilter/xt_qtaguid.h b/include/linux/netfilter/xt_qtaguid.h
new file mode 100644
index 0000000..ca60fbd
--- /dev/null
+++ b/include/linux/netfilter/xt_qtaguid.h
@@ -0,0 +1,13 @@
+#ifndef _XT_QTAGUID_MATCH_H
+#define _XT_QTAGUID_MATCH_H
+
+/* For now we just replace the xt_owner.
+ * FIXME: make iptables aware of qtaguid. */
+#include <linux/netfilter/xt_owner.h>
+
+#define XT_QTAGUID_UID    XT_OWNER_UID
+#define XT_QTAGUID_GID    XT_OWNER_GID
+#define XT_QTAGUID_SOCKET XT_OWNER_SOCKET
+#define xt_qtaguid_match_info xt_owner_match_info
+
+#endif /* _XT_QTAGUID_MATCH_H */
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644
index 0000000..eadc69033
--- /dev/null
+++ b/include/linux/netfilter/xt_quota2.h
@@ -0,0 +1,25 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+
+enum xt_quota_flags {
+	XT_QUOTA_INVERT    = 1 << 0,
+	XT_QUOTA_GROW      = 1 << 1,
+	XT_QUOTA_PACKET    = 1 << 2,
+	XT_QUOTA_NO_CHANGE = 1 << 3,
+	XT_QUOTA_MASK      = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+	char name[15];
+	u_int8_t flags;
+
+	/* Comparison-invariant */
+	aligned_u64 quota;
+
+	/* Used internally by the kernel */
+	struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index a78c35c..780949d 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -14,8 +14,11 @@
  * may be used to reset the timeout - for code which intentionally
  * disables interrupts for a long time. This call is stateless.
  */
-#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR_NMI)
 #include <asm/nmi.h>
+#endif
+
+#if defined(CONFIG_HAVE_NMI_WATCHDOG) || defined(CONFIG_HARDLOCKUP_DETECTOR)
 extern void touch_nmi_watchdog(void);
 #else
 static inline void touch_nmi_watchdog(void)
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 4341f32..501d461 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -63,6 +63,27 @@
 extern unsigned long of_get_flat_dt_root(void);
 extern int of_get_flat_dt_size(void);
 
+/*
+ * early_init_dt_scan_chosen - scan the device tree for ramdisk and bootargs
+ *
+ * The boot arguments will be placed into the memory pointed to by @data.
+ * That memory should be COMMAND_LINE_SIZE big and initialized to be a valid
+ * (possibly empty) string.  Logic for what will be in @data after this
+ * function finishes:
+ *
+ * - CONFIG_CMDLINE_FORCE=true
+ *     CONFIG_CMDLINE
+ * - CONFIG_CMDLINE_EXTEND=true, @data is non-empty string
+ *     @data + dt bootargs (even if dt bootargs are empty)
+ * - CONFIG_CMDLINE_EXTEND=true, @data is empty string
+ *     CONFIG_CMDLINE + dt bootargs (even if dt bootargs are empty)
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=non-empty:
+ *     dt bootargs
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is non-empty string
+ *     @data is left unchanged
+ * - CMDLINE_FROM_BOOTLOADER=true, dt bootargs=empty, @data is empty string
+ *     CONFIG_CMDLINE (or "" if that's not defined)
+ */
 extern int early_init_dt_scan_chosen(unsigned long node, const char *uname,
 				     int depth, void *data);
 extern int early_init_dt_scan_memory(unsigned long node, const char *uname,
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index f8e1992..dea0fd5 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -14,6 +14,7 @@
 	phys_addr_t			base;
 	phys_addr_t			size;
 	void				*priv;
+	int				fixup;
 };
 
 struct reserved_mem_ops {
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4741ecd..531b8b1 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1168,6 +1168,11 @@
 int perf_event_max_stack_handler(struct ctl_table *table, int write,
 				 void __user *buffer, size_t *lenp, loff_t *ppos);
 
+static inline bool perf_paranoid_any(void)
+{
+	return sysctl_perf_event_paranoid > 2;
+}
+
 static inline bool perf_paranoid_tracepoint_raw(void)
 {
 	return sysctl_perf_event_paranoid > -1;
diff --git a/include/linux/phy/phy-qcom-ufs.h b/include/linux/phy/phy-qcom-ufs.h
index 9d18e9f..7945fea 100644
--- a/include/linux/phy/phy-qcom-ufs.h
+++ b/include/linux/phy/phy-qcom-ufs.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2013-2015, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -51,9 +51,12 @@
 void ufs_qcom_phy_disable_iface_clk(struct phy *phy);
 int ufs_qcom_phy_start_serdes(struct phy *phy);
 int ufs_qcom_phy_set_tx_lane_enable(struct phy *phy, u32 tx_lanes);
+int ufs_qcom_phy_ctrl_rx_linecfg(struct phy *generic_phy, bool ctrl);
 int ufs_qcom_phy_calibrate_phy(struct phy *phy, bool is_rate_B);
 int ufs_qcom_phy_is_pcs_ready(struct phy *phy);
 void ufs_qcom_phy_save_controller_version(struct phy *phy,
 			u8 major, u16 minor, u16 step);
+const char *ufs_qcom_phy_name(struct phy *phy);
+int ufs_qcom_phy_configure_lpm(struct phy *generic_phy, bool enable);
 
 #endif /* PHY_QCOM_UFS_H_ */
diff --git a/include/linux/platform_data/ds2482.h b/include/linux/platform_data/ds2482.h
new file mode 100644
index 0000000..5a6879e2a
--- /dev/null
+++ b/include/linux/platform_data/ds2482.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __PLATFORM_DATA_DS2482__
+#define __PLATFORM_DATA_DS2482__
+
+struct ds2482_platform_data {
+	int		slpz_gpio;
+};
+
+#endif /* __PLATFORM_DATA_DS2482__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 0f65d36..ff49ab3 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -9,6 +9,8 @@
 #include <linux/miscdevice.h>
 #include <linux/device.h>
 #include <linux/workqueue.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
 
 enum {
 	PM_QOS_RESERVED = 0,
@@ -42,7 +44,22 @@
 #define PM_QOS_FLAG_NO_POWER_OFF	(1 << 0)
 #define PM_QOS_FLAG_REMOTE_WAKEUP	(1 << 1)
 
+enum pm_qos_req_type {
+	PM_QOS_REQ_ALL_CORES = 0,
+	PM_QOS_REQ_AFFINE_CORES,
+#ifdef CONFIG_SMP
+	PM_QOS_REQ_AFFINE_IRQ,
+#endif
+};
+
 struct pm_qos_request {
+	enum pm_qos_req_type type;
+	struct cpumask cpus_affine;
+#ifdef CONFIG_SMP
+	uint32_t irq;
+	/* Internal structure members */
+	struct irq_affinity_notify irq_notify;
+#endif
 	struct plist_node node;
 	int pm_qos_class;
 	struct delayed_work work; /* for pm_qos_update_request_timeout */
@@ -62,7 +79,7 @@
 struct dev_pm_qos_request {
 	enum dev_pm_qos_req_type type;
 	union {
-		struct plist_node pnode;
+		struct pm_qos_request lat;
 		struct pm_qos_flags_request flr;
 	} data;
 	struct device *dev;
@@ -83,6 +100,7 @@
 struct pm_qos_constraints {
 	struct plist_head list;
 	s32 target_value;	/* Do not change to 64 bit */
+	s32 target_per_cpu[NR_CPUS];
 	s32 default_value;
 	s32 no_constraint_value;
 	enum pm_qos_type type;
@@ -115,8 +133,9 @@
 	return req->dev != NULL;
 }
 
-int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
-			 enum pm_qos_req_action action, int value);
+int pm_qos_update_target(struct pm_qos_constraints *c,
+				struct pm_qos_request *req,
+				enum pm_qos_req_action action, int value);
 bool pm_qos_update_flags(struct pm_qos_flags *pqf,
 			 struct pm_qos_flags_request *req,
 			 enum pm_qos_req_action action, s32 val);
@@ -129,6 +148,8 @@
 void pm_qos_remove_request(struct pm_qos_request *req);
 
 int pm_qos_request(int pm_qos_class);
+int pm_qos_request_for_cpu(int pm_qos_class, int cpu);
+int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask);
 int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
 int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
 int pm_qos_request_active(struct pm_qos_request *req);
@@ -166,7 +187,7 @@
 
 static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
 {
-	return dev->power.qos->resume_latency_req->data.pnode.prio;
+	return dev->power.qos->resume_latency_req->data.lat.node.prio;
 }
 
 static inline s32 dev_pm_qos_requested_flags(struct device *dev)
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 3965503..ecfb4cac 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -18,6 +18,7 @@
 #include <linux/leds.h>
 #include <linux/spinlock.h>
 #include <linux/notifier.h>
+#include <linux/types.h>
 
 /*
  * All voltages, currents, charges, energies, time and temperatures in uV,
@@ -148,6 +149,12 @@
 	POWER_SUPPLY_PROP_SCOPE,
 	POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT,
 	POWER_SUPPLY_PROP_CALIBRATE,
+	/* Local extensions */
+	POWER_SUPPLY_PROP_USB_HC,
+	POWER_SUPPLY_PROP_USB_OTG,
+	POWER_SUPPLY_PROP_CHARGE_ENABLED,
+	/* Local extensions of type int64_t */
+	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
 	POWER_SUPPLY_PROP_MODEL_NAME,
 	POWER_SUPPLY_PROP_MANUFACTURER,
@@ -175,6 +182,7 @@
 union power_supply_propval {
 	int intval;
 	const char *strval;
+	int64_t int64val;
 };
 
 struct device_node;
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index c668c86..485cc8e 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -71,6 +71,8 @@
 ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
 	char *str, size_t len);
 
+void ramoops_console_write_buf(const char *buf, size_t size);
+
 /*
  * Ramoops platform data
  * @mem_size	memory size for ramoops
diff --git a/include/linux/qmi_encdec.h b/include/linux/qmi_encdec.h
new file mode 100644
index 0000000..66c3d84
--- /dev/null
+++ b/include/linux/qmi_encdec.h
@@ -0,0 +1,184 @@
+/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QMI_ENCDEC_H_
+#define _QMI_ENCDEC_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+
+#define QMI_REQUEST_CONTROL_FLAG 0x00
+#define QMI_RESPONSE_CONTROL_FLAG 0x02
+#define QMI_INDICATION_CONTROL_FLAG 0x04
+#define QMI_HEADER_SIZE 7
+
+/**
+ * elem_type - Enum to identify the data type of elements in a data
+ *             structure.
+ */
+enum elem_type {
+	QMI_OPT_FLAG = 1,
+	QMI_DATA_LEN,
+	QMI_UNSIGNED_1_BYTE,
+	QMI_UNSIGNED_2_BYTE,
+	QMI_UNSIGNED_4_BYTE,
+	QMI_UNSIGNED_8_BYTE,
+	QMI_SIGNED_2_BYTE_ENUM,
+	QMI_SIGNED_4_BYTE_ENUM,
+	QMI_STRUCT,
+	QMI_STRING,
+	QMI_EOTI,
+};
+
+/**
+ * array_type - Enum to identify if an element in a data structure is
+ *              an array. If so, then is it a static length array or a
+ *              variable length array.
+ */
+enum array_type {
+	NO_ARRAY = 0,
+	STATIC_ARRAY = 1,
+	VAR_LEN_ARRAY = 2,
+};
+
+/**
+ * elem_info - Data structure to specify information about an element
+ *               in a data structure. An array of this data structure
+ *               can be used to specify info about a complex data
+ *               structure to be encoded/decoded.
+ *
+ * @data_type: Data type of this element.
+ * @elem_len: Array length of this element, if an array.
+ * @elem_size: Size of a single instance of this data type.
+ * @is_array: Array type of this element.
+ * @tlv_type: QMI message specific type to identify which element
+ *            is present in an incoming message.
+ * @offset: To identify the address of the first instance of this
+ *          element in the data structure.
+ * @ei_array: Array to provide information about the nested structure
+ *            within a data structure to be encoded/decoded.
+ */
+struct elem_info {
+	enum elem_type data_type;
+	uint32_t elem_len;
+	uint32_t elem_size;
+	enum array_type is_array;
+	uint8_t tlv_type;
+	uint32_t offset;
+	struct elem_info *ei_array;
+};
+
+/**
+ * @msg_desc - Describe about the main/outer structure to be
+ *		  encoded/decoded.
+ *
+ * @max_msg_len: Maximum possible length of the QMI message.
+ * @ei_array: Array to provide information about a data structure.
+ */
+struct msg_desc {
+	uint16_t msg_id;
+	int max_msg_len;
+	struct elem_info *ei_array;
+};
+
+struct qmi_header {
+	unsigned char cntl_flag;
+	uint16_t txn_id;
+	uint16_t msg_id;
+	uint16_t msg_len;
+} __attribute__((__packed__));
+
+static inline void encode_qmi_header(unsigned char *buf,
+			unsigned char cntl_flag, uint16_t txn_id,
+			uint16_t msg_id, uint16_t msg_len)
+{
+	struct qmi_header *hdr = (struct qmi_header *)buf;
+
+	hdr->cntl_flag = cntl_flag;
+	hdr->txn_id = txn_id;
+	hdr->msg_id = msg_id;
+	hdr->msg_len = msg_len;
+}
+
+static inline void decode_qmi_header(unsigned char *buf,
+			unsigned char *cntl_flag, uint16_t *txn_id,
+			uint16_t *msg_id, uint16_t *msg_len)
+{
+	struct qmi_header *hdr = (struct qmi_header *)buf;
+
+	*cntl_flag = hdr->cntl_flag;
+	*txn_id = hdr->txn_id;
+	*msg_id = hdr->msg_id;
+	*msg_len = hdr->msg_len;
+}
+
+#ifdef CONFIG_QMI_ENCDEC
+/**
+ * qmi_kernel_encode() - Encode to QMI message wire format
+ * @desc: Pointer to structure descriptor.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @out_buf_len: Length of the out buffer.
+ * @in_c_struct: C Structure to be encoded.
+ *
+ * @return: size of encoded message on success, < 0 on error.
+ */
+int qmi_kernel_encode(struct msg_desc *desc,
+		      void *out_buf, uint32_t out_buf_len,
+		      void *in_c_struct);
+
+/**
+ * qmi_kernel_decode() - Decode to C Structure format
+ * @desc: Pointer to structure descriptor.
+ * @out_c_struct: Buffer to hold the decoded C structure.
+ * @in_buf: Buffer containg the QMI message to be decoded.
+ * @in_buf_len: Length of the incoming QMI message.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_kernel_decode(struct msg_desc *desc, void *out_c_struct,
+		      void *in_buf, uint32_t in_buf_len);
+
+/**
+ * qmi_verify_max_msg_len() - Verify the maximum length of a QMI message
+ * @desc: Pointer to structure descriptor.
+ *
+ * @return: true if the maximum message length embedded in structure
+ *          descriptor matches the calculated value, else false.
+ */
+bool qmi_verify_max_msg_len(struct msg_desc *desc);
+
+#else
+static inline int qmi_kernel_encode(struct msg_desc *desc,
+				    void *out_buf, uint32_t out_buf_len,
+				    void *in_c_struct)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int qmi_kernel_decode(struct msg_desc *desc,
+				    void *out_c_struct,
+				    void *in_buf, uint32_t in_buf_len)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline bool qmi_verify_max_msg_len(struct msg_desc *desc)
+{
+	return false;
+}
+#endif
+
+#endif
diff --git a/include/linux/regulator/stub-regulator.h b/include/linux/regulator/stub-regulator.h
new file mode 100644
index 0000000..1155d82
--- /dev/null
+++ b/include/linux/regulator/stub-regulator.h
@@ -0,0 +1,54 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __STUB_REGULATOR_H__
+#define __STUB_REGULATOR_H__
+
+#include <linux/regulator/machine.h>
+
+#define STUB_REGULATOR_DRIVER_NAME "stub-regulator"
+
+/**
+ * struct stub_regulator_pdata - stub regulator device data
+ * @init_data:		regulator constraints
+ * @hpm_min_load:	minimum load in uA that will result in the regulator
+ *			being set to high power mode
+ * @system_uA:		current drawn from regulator not accounted for by any
+ *			regulator framework consumer
+ */
+struct stub_regulator_pdata {
+	struct regulator_init_data	init_data;
+	int				hpm_min_load;
+	int				system_uA;
+};
+
+#ifdef CONFIG_REGULATOR_STUB
+
+/**
+ * regulator_stub_init() - register platform driver for stub-regulator
+ *
+ * This initialization function should be called in systems in which driver
+ * registration ordering must be controlled precisely.
+ */
+
+int __init regulator_stub_init(void);
+
+#else
+
+static inline int __init regulator_stub_init(void)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_REGULATOR_STUB */
+
+#endif
diff --git a/include/linux/remote_spinlock.h b/include/linux/remote_spinlock.h
new file mode 100644
index 0000000..a0b6609
--- /dev/null
+++ b/include/linux/remote_spinlock.h
@@ -0,0 +1,100 @@
+/* Copyright (c) 2008-2009, 2011, 2013-2015 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_REMOTE_SPINLOCK_H
+#define __LINUX_REMOTE_SPINLOCK_H
+
+#include <linux/spinlock.h>
+#include <linux/msm_remote_spinlock.h>
+
+/* Grabbing a local spin lock before going for a remote lock has several
+ * advantages:
+ * 1. Get calls to preempt enable/disable and IRQ save/restore for free.
+ * 2. For UP kernel, there is no overhead.
+ * 3. Reduces the possibility of executing the remote spin lock code. This is
+ *    especially useful when the remote CPUs' mutual exclusion instructions
+ *    don't work with the local CPUs' instructions. In such cases, one has to
+ *    use software based mutex algorithms (e.g. Lamport's bakery algorithm)
+ *    which could get expensive when the no. of contending CPUs is high.
+ * 4. In the case of software based mutex algorithm the exection time will be
+ *    smaller since the no. of contending CPUs is reduced by having just one
+ *    contender for all the local CPUs.
+ * 5. Get most of the spin lock debug features for free.
+ * 6. The code will continue to work "gracefully" even when the remote spin
+ *    lock code is stubbed out for debug purposes or when there is no remote
+ *    CPU in some board/machine types.
+ */
+typedef struct {
+	spinlock_t local;
+	_remote_spinlock_t remote;
+} remote_spinlock_t;
+
+#define remote_spin_lock_init(lock, id) \
+	({ \
+		spin_lock_init(&((lock)->local)); \
+		_remote_spin_lock_init(id, &((lock)->remote)); \
+	})
+#define remote_spin_lock(lock) \
+	do { \
+		spin_lock(&((lock)->local)); \
+		_remote_spin_lock(&((lock)->remote)); \
+	} while (0)
+#define remote_spin_unlock(lock) \
+	do { \
+		_remote_spin_unlock(&((lock)->remote)); \
+		spin_unlock(&((lock)->local)); \
+	} while (0)
+#define remote_spin_lock_irqsave(lock, flags) \
+	do { \
+		spin_lock_irqsave(&((lock)->local), flags); \
+		_remote_spin_lock(&((lock)->remote)); \
+	} while (0)
+#define remote_spin_unlock_irqrestore(lock, flags) \
+	do { \
+		_remote_spin_unlock(&((lock)->remote)); \
+		spin_unlock_irqrestore(&((lock)->local), flags); \
+	} while (0)
+#define remote_spin_trylock(lock) \
+	({ \
+		spin_trylock(&((lock)->local)) \
+		? _remote_spin_trylock(&((lock)->remote)) \
+			? 1 \
+			: ({ spin_unlock(&((lock)->local)); 0; }) \
+		: 0; \
+	})
+#define remote_spin_trylock_irqsave(lock, flags) \
+	({ \
+		spin_trylock_irqsave(&((lock)->local), flags) \
+		? _remote_spin_trylock(&((lock)->remote)) \
+			? 1 \
+			: ({ spin_unlock_irqrestore(&((lock)->local), flags); \
+				0; }) \
+		: 0; \
+	})
+#define remote_spin_lock_rlock_id(lock, tid) \
+	_remote_spin_lock_rlock_id(&((lock)->remote), tid)
+
+#define remote_spin_unlock_rlock(lock) \
+	_remote_spin_unlock_rlock(&((lock)->remote))
+
+#define remote_spin_release(lock, pid) \
+	_remote_spin_release(&((lock)->remote), pid)
+
+#define remote_spin_release_all(pid) \
+	_remote_spin_release_all(pid)
+
+#define remote_spin_owner(lock) \
+	_remote_spin_owner(&((lock)->remote))
+
+#define remote_spin_get_hw_spinlocks_element(lock) \
+	_remote_spin_get_hw_spinlocks_element(&((lock)->remote))
+#endif
diff --git a/include/linux/rndis_ipa.h b/include/linux/rndis_ipa.h
new file mode 100644
index 0000000..05d0a66
--- /dev/null
+++ b/include/linux/rndis_ipa.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RNDIS_IPA_H_
+#define _RNDIS_IPA_H_
+
+#include <linux/ipa.h>
+
+/*
+ * @priv: private data given upon ipa_connect
+ * @evt: event enum, should be IPA_WRITE_DONE
+ * @data: for tx path the data field is the sent socket buffer.
+ */
+typedef void (*ipa_callback)(void *priv,
+		enum ipa_dp_evt_type evt,
+		unsigned long data);
+
+/*
+ * struct ipa_usb_init_params - parameters for driver initialization API
+ *
+ * @device_ready_notify: callback supplied by USB core driver
+ * This callback shall be called by the Netdev once the device
+ * is ready to receive data from tethered PC.
+ * @ipa_rx_notify: The network driver will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (USB->IPA), once IPA driver receive data packets
+ * from USB pipe destined for Apps this callback will be called.
+ * @ipa_tx_notify: The network driver will set this callback (out parameter).
+ * this callback shall be supplied for ipa_connect upon pipe
+ * connection (IPA->USB), once IPA driver send packets destined
+ * for USB, IPA BAM will notify for Tx-complete.
+ * @host_ethaddr: host Ethernet address in network order
+ * @device_ethaddr: device Ethernet address in network order
+ * @private: The network driver will set this pointer (out parameter).
+ * This pointer will hold the network device for later interaction
+ * with between USB driver and the network driver.
+ * @skip_ep_cfg: boolean field that determines if Apps-processor
+ *  should or should not configure this end-point.
+ */
+struct ipa_usb_init_params {
+	void (*device_ready_notify)(void);
+	ipa_callback ipa_rx_notify;
+	ipa_callback ipa_tx_notify;
+	u8 host_ethaddr[ETH_ALEN];
+	u8 device_ethaddr[ETH_ALEN];
+	void *private;
+	bool skip_ep_cfg;
+};
+
+#ifdef CONFIG_RNDIS_IPA
+
+int rndis_ipa_init(struct ipa_usb_init_params *params);
+
+int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
+			u32 ipa_to_usb_hdl,
+			u32 max_xfer_size_bytes_to_dev,
+			u32 max_packet_number_to_dev,
+			u32 max_xfer_size_bytes_to_host,
+			void *private);
+
+int rndis_ipa_pipe_disconnect_notify(void *private);
+
+void rndis_ipa_cleanup(void *private);
+
+#else /* CONFIG_RNDIS_IPA*/
+
+static inline int rndis_ipa_init(struct ipa_usb_init_params *params)
+{
+	return -ENOMEM;
+}
+
+static inline int rndis_ipa_pipe_connect_notify(u32 usb_to_ipa_hdl,
+			u32 ipa_to_usb_hdl,
+			u32 max_xfer_size_bytes_to_dev,
+			u32 max_packet_number_to_dev,
+			u32 max_xfer_size_bytes_to_host,
+			void *private)
+{
+	return -ENOMEM;
+}
+
+static inline int rndis_ipa_pipe_disconnect_notify(void *private)
+{
+	return -ENOMEM;
+}
+
+static inline void rndis_ipa_cleanup(void *private)
+{
+
+}
+#endif /* CONFIG_RNDIS_IPA */
+
+#endif /* _RNDIS_IPA_H_ */
diff --git a/include/linux/rq_stats.h b/include/linux/rq_stats.h
new file mode 100644
index 0000000..44cd842
--- /dev/null
+++ b/include/linux/rq_stats.h
@@ -0,0 +1,31 @@
+/* Copyright (c) 2011,2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+struct rq_data {
+	unsigned int rq_avg;
+	unsigned long rq_poll_jiffies;
+	unsigned long def_timer_jiffies;
+	unsigned long rq_poll_last_jiffy;
+	unsigned long rq_poll_total_jiffies;
+	unsigned long def_timer_last_jiffy;
+	unsigned int hotplug_disabled;
+	int64_t def_start_time;
+	struct attribute_group *attr_group;
+	struct kobject *kobj;
+	struct work_struct def_timer_work;
+	int init;
+};
+
+extern spinlock_t rq_lock;
+extern struct rq_data rq_info;
+extern struct workqueue_struct *rq_wq;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index e9c009d..4c1a2f1 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -174,6 +174,9 @@
 extern unsigned long nr_iowait_cpu(int cpu);
 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
 
+extern void sched_update_nr_prod(int cpu, long delta, bool inc);
+extern void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg);
+
 extern void calc_global_load(unsigned long ticks);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
@@ -315,6 +318,25 @@
 /* Task command name length */
 #define TASK_COMM_LEN 16
 
+extern const char *sched_window_reset_reasons[];
+
+enum task_event {
+	PUT_PREV_TASK   = 0,
+	PICK_NEXT_TASK  = 1,
+	TASK_WAKE       = 2,
+	TASK_MIGRATE    = 3,
+	TASK_UPDATE     = 4,
+	IRQ_UPDATE	= 5,
+};
+
+/* Note: this need to be in sync with migrate_type_names array */
+enum migrate_types {
+	GROUP_TO_RQ,
+	RQ_TO_GROUP,
+	RQ_TO_RQ,
+	GROUP_TO_GROUP,
+};
+
 #include <linux/spinlock.h>
 
 /*
@@ -1335,6 +1357,47 @@
 };
 #endif
 
+#define RAVG_HIST_SIZE_MAX  5
+#define NUM_BUSY_BUCKETS 10
+
+/* ravg represents frequency scaled cpu-demand of tasks */
+struct ravg {
+	/*
+	 * 'mark_start' marks the beginning of an event (task waking up, task
+	 * starting to execute, task being preempted) within a window
+	 *
+	 * 'sum' represents how runnable a task has been within current
+	 * window. It incorporates both running time and wait time and is
+	 * frequency scaled.
+	 *
+	 * 'sum_history' keeps track of history of 'sum' seen over previous
+	 * RAVG_HIST_SIZE windows. Windows where task was entirely sleeping are
+	 * ignored.
+	 *
+	 * 'demand' represents maximum sum seen over previous
+	 * sysctl_sched_ravg_hist_size windows. 'demand' could drive frequency
+	 * demand for tasks.
+	 *
+	 * 'curr_window' represents task's contribution to cpu busy time
+	 * statistics (rq->curr_runnable_sum) in current window
+	 *
+	 * 'prev_window' represents task's contribution to cpu busy time
+	 * statistics (rq->prev_runnable_sum) in previous window
+	 *
+	 * 'pred_demand' represents task's current predicted cpu busy time
+	 *
+	 * 'busy_buckets' groups historical busy time into different buckets
+	 * used for prediction
+	 */
+	u64 mark_start;
+	u32 sum, demand;
+	u32 sum_history[RAVG_HIST_SIZE_MAX];
+	u32 curr_window, prev_window;
+	u16 active_windows;
+	u32 pred_demand;
+	u8 busy_buckets[NUM_BUSY_BUCKETS];
+};
+
 struct sched_entity {
 	struct load_weight	load;		/* for load-balancing */
 	struct rb_node		run_node;
@@ -1505,6 +1568,20 @@
 	const struct sched_class *sched_class;
 	struct sched_entity se;
 	struct sched_rt_entity rt;
+#ifdef CONFIG_SCHED_HMP
+	struct ravg ravg;
+	/*
+	 * 'init_load_pct' represents the initial task load assigned to children
+	 * of this task
+	 */
+	u32 init_load_pct;
+	u64 last_wake_ts;
+	u64 last_switch_out_ts;
+	u64 last_cpu_selected_ts;
+	struct related_thread_group *grp;
+	struct list_head grp_list;
+	u64 cpu_cycles;
+#endif
 #ifdef CONFIG_CGROUP_SCHED
 	struct task_group *sched_task_group;
 #endif
@@ -2254,6 +2331,7 @@
 /*
  * Per process flags
  */
+#define PF_WAKE_UP_IDLE 0x00000002	/* try to wake up on an idle CPU */
 #define PF_EXITING	0x00000004	/* getting shut down */
 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
@@ -2436,6 +2514,93 @@
 }
 #endif
 
+struct sched_load {
+	unsigned long prev_load;
+	unsigned long new_task_load;
+	unsigned long predicted_load;
+};
+
+extern int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle);
+extern u32 sched_get_wake_up_idle(struct task_struct *p);
+
+struct cpu_cycle_counter_cb {
+	u64 (*get_cpu_cycle_counter)(int cpu);
+};
+
+#ifdef CONFIG_SCHED_HMP
+extern int sched_set_window(u64 window_start, unsigned int window_size);
+extern unsigned long sched_get_busy(int cpu);
+extern void sched_get_cpus_busy(struct sched_load *busy,
+				const struct cpumask *query_cpus);
+extern void sched_set_io_is_busy(int val);
+extern int sched_set_boost(int enable);
+extern int sched_set_init_task_load(struct task_struct *p, int init_load_pct);
+extern u32 sched_get_init_task_load(struct task_struct *p);
+extern int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cpu_pwr_cost(int cpu);
+extern int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost);
+extern unsigned int sched_get_static_cluster_pwr_cost(int cpu);
+extern int sched_update_freq_max_load(const cpumask_t *cpumask);
+extern void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+							u32 fmin, u32 fmax);
+extern void sched_set_cpu_cstate(int cpu, int cstate,
+			 int wakeup_energy, int wakeup_latency);
+extern void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
+				int wakeup_energy, int wakeup_latency);
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern u64 sched_ktime_clock(void);
+extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
+extern unsigned int sched_get_group_id(struct task_struct *p);
+
+#else /* CONFIG_SCHED_HMP */
+static inline u64 sched_ktime_clock(void)
+{
+	return 0;
+}
+
+static inline int
+register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+	return 0;
+}
+
+static inline int sched_set_window(u64 window_start, unsigned int window_size)
+{
+	return -EINVAL;
+}
+static inline unsigned long sched_get_busy(int cpu)
+{
+	return 0;
+}
+static inline void sched_get_cpus_busy(struct sched_load *busy,
+				       const struct cpumask *query_cpus) {};
+
+static inline void sched_set_io_is_busy(int val) {};
+
+static inline int sched_set_boost(int enable)
+{
+	return -EINVAL;
+}
+
+static inline int sched_update_freq_max_load(const cpumask_t *cpumask)
+{
+	return 0;
+}
+
+static inline void sched_update_cpu_freq_min_max(const cpumask_t *cpus,
+					u32 fmin, u32 fmax) { }
+
+static inline void
+sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
+{
+}
+
+static inline void sched_set_cluster_dstate(const cpumask_t *cluster_cpus,
+			int dstate, int wakeup_energy, int wakeup_latency)
+{
+}
+#endif /* CONFIG_SCHED_HMP */
+
 #ifdef CONFIG_NO_HZ_COMMON
 void calc_load_enter_idle(void);
 void calc_load_exit_idle(void);
@@ -2444,6 +2609,14 @@
 static inline void calc_load_exit_idle(void) { }
 #endif /* CONFIG_NO_HZ_COMMON */
 
+static inline void set_wake_up_idle(bool enabled)
+{
+	if (enabled)
+		current->flags |= PF_WAKE_UP_IDLE;
+	else
+		current->flags &= ~PF_WAKE_UP_IDLE;
+}
+
 /*
  * Do not use outside of architecture code which knows its limitations.
  *
@@ -2461,6 +2634,7 @@
 
 
 extern void sched_clock_init(void);
+extern int sched_clock_initialized(void);
 
 #ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
 static inline void sched_clock_tick(void)
@@ -2537,7 +2711,7 @@
 task_sched_runtime(struct task_struct *task);
 
 /* sched_exec is called by processes performing an exec */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_HMP)
 extern void sched_exec(void);
 #else
 #define sched_exec()   {}
@@ -2672,6 +2846,7 @@
 
 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
 extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_process_no_notif(struct task_struct *tsk);
 extern void wake_up_new_task(struct task_struct *tsk);
 #ifdef CONFIG_SMP
  extern void kick_process(struct task_struct *tsk);
@@ -2680,6 +2855,12 @@
 #endif
 extern int sched_fork(unsigned long clone_flags, struct task_struct *p);
 extern void sched_dead(struct task_struct *p);
+#ifdef CONFIG_SCHED_HMP
+extern void sched_exit(struct task_struct *p);
+#else
+static inline void sched_exit(struct task_struct *p) { }
+#endif
+
 
 extern void proc_caches_init(void);
 extern void flush_signals(struct task_struct *);
@@ -3508,6 +3689,8 @@
 
 #endif /* CONFIG_SMP */
 
+extern struct atomic_notifier_head load_alert_notifier_head;
+
 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
 
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 22db1e6..6726f05 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -18,6 +18,32 @@
 extern unsigned int sysctl_sched_min_granularity;
 extern unsigned int sysctl_sched_wakeup_granularity;
 extern unsigned int sysctl_sched_child_runs_first;
+extern unsigned int sysctl_sched_wake_to_idle;
+
+#ifdef CONFIG_SCHED_HMP
+extern int sysctl_sched_freq_inc_notify;
+extern int sysctl_sched_freq_dec_notify;
+extern unsigned int sysctl_sched_window_stats_policy;
+extern unsigned int sysctl_sched_ravg_hist_size;
+extern unsigned int sysctl_sched_cpu_high_irqload;
+extern unsigned int sysctl_sched_init_task_load_pct;
+extern unsigned int sysctl_sched_spill_nr_run;
+extern unsigned int sysctl_sched_spill_load_pct;
+extern unsigned int sysctl_sched_upmigrate_pct;
+extern unsigned int sysctl_sched_downmigrate_pct;
+extern unsigned int sysctl_early_detection_duration;
+extern unsigned int sysctl_sched_boost;
+extern unsigned int sysctl_sched_small_wakee_task_load_pct;
+extern unsigned int sysctl_sched_big_waker_task_load_pct;
+extern unsigned int sysctl_sched_select_prev_cpu_us;
+extern unsigned int sysctl_sched_enable_colocation;
+extern unsigned int sysctl_sched_restrict_cluster_spill;
+extern unsigned int sysctl_sched_new_task_windows;
+extern unsigned int sysctl_sched_pred_alert_freq;
+extern unsigned int sysctl_sched_freq_aggregate;
+extern unsigned int sysctl_sched_enable_thread_grouping;
+extern unsigned int sysctl_sched_freq_aggregate_threshold_pct;
+#endif /* CONFIG_SCHED_HMP */
 
 enum sched_tunable_scaling {
 	SCHED_TUNABLESCALING_NONE,
@@ -43,6 +69,18 @@
 		loff_t *ppos);
 #endif
 
+extern int sched_migrate_notify_proc_handler(struct ctl_table *table,
+		int write, void __user *buffer, size_t *lenp, loff_t *ppos);
+
+extern int sched_hmp_proc_update_handler(struct ctl_table *table,
+		int write, void __user *buffer, size_t *lenp, loff_t *ppos);
+
+extern int sched_boost_handler(struct ctl_table *table, int write,
+			void __user *buffer, size_t *lenp, loff_t *ppos);
+
+extern int sched_window_update_handler(struct ctl_table *table,
+		 int write, void __user *buffer, size_t *lenp, loff_t *ppos);
+
 /*
  *  control realtime throttling:
  *
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 3442014..1a94397 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -66,6 +66,7 @@
 	void		(*set_ldisc)(struct uart_port *, struct ktermios *);
 	void		(*pm)(struct uart_port *, unsigned int state,
 			      unsigned int oldstate);
+	void		(*wake_peer)(struct uart_port *);
 
 	/*
 	 * Return a string describing the type of the port
diff --git a/include/linux/show_mem_notifier.h b/include/linux/show_mem_notifier.h
new file mode 100644
index 0000000..b1265f8
--- /dev/null
+++ b/include/linux/show_mem_notifier.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/notifier.h>
+
+int show_mem_notifier_register(struct notifier_block *nb);
+
+int show_mem_notifier_unregister(struct notifier_block *nb);
+
+void show_mem_call_notifiers(void);
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
new file mode 100644
index 0000000..f91bdea
--- /dev/null
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __LLCC_QCOM__
+#define __LLCC_QCOM__
+
+/**
+ * llcc_slice_desc - Cache slice descriptor
+ * @llcc_slice_id: llcc slice id
+ * @llcc_slice_size: Size allocated for the llcc slice
+ * @dev: pointer to llcc device
+ */
+struct llcc_slice_desc {
+	int llcc_slice_id;
+	size_t llcc_slice_size;
+	struct device *dev;
+};
+
+/**
+ * llcc_slice_config - Data associated with the llcc slice
+ * @name: name of the use case associated with the llcc slice
+ * @usecase_id: usecase id for which the llcc slice is used
+ * @slice_id: llcc slice id assigned to each slice
+ * @max_cap: maximum capacity of the llcc slice
+ * @priority: priority of the llcc slice
+ * @fixed_size: whether the llcc slice can grow beyond its size
+ * @bonus_ways: bonus ways associated with llcc slice
+ * @res_ways: reserved ways associated with llcc slice
+ * @cache_mode: mode of the llcce slice
+ * @probe_target_ways: Probe only reserved and bonus ways on a cache miss
+ * @dis_cap_alloc: Disable capacity based allocation
+ * @retain_on_pc: Retain through power collapse
+ * @activate_on_init: activate the slice on init
+ */
+struct llcc_slice_config {
+	const char *name;
+	int usecase_id;
+	int slice_id;
+	u32 max_cap;
+	u32 priority;
+	bool fixed_size;
+	u32 bonus_ways;
+	u32 res_ways;
+	u32 cache_mode;
+	u32 probe_target_ways;
+	bool dis_cap_alloc;
+	bool retain_on_pc;
+	u32 activate_on_init;
+};
+
+#ifdef CONFIG_QCOM_LLCC
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @dev: Device pointer of the client
+ * @name: Name of the use case
+ */
+struct llcc_slice_desc *llcc_slice_getd(struct device *dev, const char *name);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_get_slice_id - get slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_get_slice_size - llcc slice size
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc);
+
+/**
+ * qcom_llcc_probe - program the sct table
+ * @pdev: platform device pointer
+ * @table: soc sct table
+ */
+int qcom_llcc_probe(struct platform_device *pdev,
+		      const struct llcc_slice_config *table, u32 sz);
+/**
+ * qcom_llcc_remove - clean up llcc driver
+ * @pdev: platform driver pointer
+ */
+int qcom_llcc_remove(struct platform_device *pdev);
+#else
+static inline struct llcc_slice_desc *llcc_slice_getd(struct device *dev,
+			const char *name)
+{
+	return NULL;
+}
+
+static inline void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+
+};
+
+static inline int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+	return -EINVAL;
+}
+
+static inline size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+	return 0;
+}
+static inline int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+	return -EINVAL;
+}
+
+static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+	return -EINVAL;
+}
+static inline int qcom_llcc_probe(struct platform_device *pdev,
+		      const struct llcc_slice_config *table, u32 sz)
+{
+	return -ENODEV;
+}
+
+static inline int qcom_llcc_remove(struct platform_device *pdev)
+{
+	return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/stm.h b/include/linux/stm.h
index 8369d8a..2ff852b 100644
--- a/include/linux/stm.h
+++ b/include/linux/stm.h
@@ -88,10 +88,14 @@
 	unsigned int		sw_nchannels;
 	unsigned int		sw_mmiosz;
 	unsigned int		hw_override;
+	bool			(*ost_configured)(void);
 	ssize_t			(*packet)(struct stm_data *, unsigned int,
 					  unsigned int, unsigned int,
 					  unsigned int, unsigned int,
 					  const unsigned char *);
+	ssize_t			(*ost_packet)(struct stm_data *stm_data,
+					  unsigned int size,
+					  const unsigned char *buf);
 	phys_addr_t		(*mmio_addr)(struct stm_data *, unsigned int,
 					     unsigned int, unsigned int);
 	int			(*link)(struct stm_data *, unsigned int,
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d971837..448321b 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -434,6 +434,7 @@
 extern bool pm_save_wakeup_count(unsigned int count);
 extern void pm_wakep_autosleep_enabled(bool set);
 extern void pm_print_active_wakeup_sources(void);
+extern void pm_get_active_wakeup_sources(char *pending_sources, size_t max);
 
 static inline void lock_system_sleep(void)
 {
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 62be0786..9e207e3 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -27,6 +27,8 @@
 static inline void tick_cleanup_dead_cpu(int cpu) { }
 #endif /* !CONFIG_GENERIC_CLOCKEVENTS */
 
+extern u64 jiffy_to_ktime_ns(u64 *now, u64 *jiffy_ktime_ns);
+
 #if defined(CONFIG_GENERIC_CLOCKEVENTS) && defined(CONFIG_SUSPEND)
 extern void tick_freeze(void);
 extern void tick_unfreeze(void);
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index be00761..41d81fb 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -8,6 +8,7 @@
 #include <linux/hardirq.h>
 #include <linux/perf_event.h>
 #include <linux/tracepoint.h>
+#include <linux/coresight-stm.h>
 
 struct trace_array;
 struct trace_buffer;
@@ -206,7 +207,8 @@
 				  struct trace_event_file *trace_file,
 				  unsigned long len);
 
-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer,
+			       unsigned long len);
 
 enum {
 	TRACE_EVENT_FL_FILTERED_BIT,
diff --git a/include/linux/usb/class-dual-role.h b/include/linux/usb/class-dual-role.h
new file mode 100644
index 0000000..c6df223
--- /dev/null
+++ b/include/linux/usb/class-dual-role.h
@@ -0,0 +1,129 @@
+#ifndef __LINUX_CLASS_DUAL_ROLE_H__
+#define __LINUX_CLASS_DUAL_ROLE_H__
+
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+
+struct device;
+
+enum dual_role_supported_modes {
+	DUAL_ROLE_SUPPORTED_MODES_DFP_AND_UFP = 0,
+	DUAL_ROLE_SUPPORTED_MODES_DFP,
+	DUAL_ROLE_SUPPORTED_MODES_UFP,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_SUPPORTED_MODES_TOTAL,
+};
+
+enum {
+	DUAL_ROLE_PROP_MODE_UFP = 0,
+	DUAL_ROLE_PROP_MODE_DFP,
+	DUAL_ROLE_PROP_MODE_NONE,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_MODE_TOTAL,
+};
+
+enum {
+	DUAL_ROLE_PROP_PR_SRC = 0,
+	DUAL_ROLE_PROP_PR_SNK,
+	DUAL_ROLE_PROP_PR_NONE,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_PR_TOTAL,
+
+};
+
+enum {
+	DUAL_ROLE_PROP_DR_HOST = 0,
+	DUAL_ROLE_PROP_DR_DEVICE,
+	DUAL_ROLE_PROP_DR_NONE,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_DR_TOTAL,
+};
+
+enum {
+	DUAL_ROLE_PROP_VCONN_SUPPLY_NO = 0,
+	DUAL_ROLE_PROP_VCONN_SUPPLY_YES,
+/*The following should be the last element*/
+	DUAL_ROLE_PROP_VCONN_SUPPLY_TOTAL,
+};
+
+enum dual_role_property {
+	DUAL_ROLE_PROP_SUPPORTED_MODES = 0,
+	DUAL_ROLE_PROP_MODE,
+	DUAL_ROLE_PROP_PR,
+	DUAL_ROLE_PROP_DR,
+	DUAL_ROLE_PROP_VCONN_SUPPLY,
+};
+
+struct dual_role_phy_instance;
+
+/* Description of typec port */
+struct dual_role_phy_desc {
+	/* /sys/class/dual_role_usb/<name>/ */
+	const char *name;
+	enum dual_role_supported_modes supported_modes;
+	enum dual_role_property *properties;
+	size_t num_properties;
+
+	/* Callback for "cat /sys/class/dual_role_usb/<name>/<property>" */
+	int (*get_property)(struct dual_role_phy_instance *dual_role,
+			     enum dual_role_property prop,
+			     unsigned int *val);
+	/* Callback for "echo <value> >
+	 *                      /sys/class/dual_role_usb/<name>/<property>" */
+	int (*set_property)(struct dual_role_phy_instance *dual_role,
+			     enum dual_role_property prop,
+			     const unsigned int *val);
+	/* Decides whether userspace can change a specific property */
+	int (*property_is_writeable)(struct dual_role_phy_instance *dual_role,
+				      enum dual_role_property prop);
+};
+
+struct dual_role_phy_instance {
+	const struct dual_role_phy_desc *desc;
+
+	/* Driver private data */
+	void *drv_data;
+
+	struct device dev;
+	struct work_struct changed_work;
+};
+
+#if IS_ENABLED(CONFIG_DUAL_ROLE_USB_INTF)
+extern void dual_role_instance_changed(struct dual_role_phy_instance
+				       *dual_role);
+extern struct dual_role_phy_instance *__must_check
+devm_dual_role_instance_register(struct device *parent,
+				 const struct dual_role_phy_desc *desc);
+extern void devm_dual_role_instance_unregister(struct device *dev,
+					       struct dual_role_phy_instance
+					       *dual_role);
+extern int dual_role_get_property(struct dual_role_phy_instance *dual_role,
+				  enum dual_role_property prop,
+				  unsigned int *val);
+extern int dual_role_set_property(struct dual_role_phy_instance *dual_role,
+				  enum dual_role_property prop,
+				  const unsigned int *val);
+extern int dual_role_property_is_writeable(struct dual_role_phy_instance
+					   *dual_role,
+					   enum dual_role_property prop);
+extern void *dual_role_get_drvdata(struct dual_role_phy_instance *dual_role);
+#else /* CONFIG_DUAL_ROLE_USB_INTF */
+static inline void dual_role_instance_changed(struct dual_role_phy_instance
+				       *dual_role){}
+static inline struct dual_role_phy_instance *__must_check
+devm_dual_role_instance_register(struct device *parent,
+				 const struct dual_role_phy_desc *desc)
+{
+	return ERR_PTR(-ENOSYS);
+}
+static inline void devm_dual_role_instance_unregister(struct device *dev,
+					       struct dual_role_phy_instance
+					       *dual_role){}
+static inline void *dual_role_get_drvdata(struct dual_role_phy_instance
+		*dual_role)
+{
+	return ERR_PTR(-ENOSYS);
+}
+#endif /* CONFIG_DUAL_ROLE_USB_INTF */
+#endif /* __LINUX_CLASS_DUAL_ROLE_H__ */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 4616a49..93f0253 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -581,6 +581,7 @@
 	struct config_group group;
 	struct list_head cfs_list;
 	struct usb_function_driver *fd;
+	struct usb_function *f;
 	int (*set_inst_name)(struct usb_function_instance *inst,
 			      const char *name);
 	void (*free_func_inst)(struct usb_function_instance *inst);
diff --git a/include/linux/usb/f_accessory.h b/include/linux/usb/f_accessory.h
new file mode 100644
index 0000000..ebe3c4d
--- /dev/null
+++ b/include/linux/usb/f_accessory.h
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_ACCESSORY_H
+#define __LINUX_USB_F_ACCESSORY_H
+
+#include <uapi/linux/usb/f_accessory.h>
+
+#endif /* __LINUX_USB_F_ACCESSORY_H */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
new file mode 100644
index 0000000..4e84177
--- /dev/null
+++ b/include/linux/usb/f_mtp.h
@@ -0,0 +1,23 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __LINUX_USB_F_MTP_H
+#define __LINUX_USB_F_MTP_H
+
+#include <uapi/linux/usb/f_mtp.h>
+
+#endif /* __LINUX_USB_F_MTP_H */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 8e81f9e..a625a7b 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -27,7 +27,95 @@
 
 #define UDC_TRACE_STR_MAX	512
 
+/*
+ * The following are bit fields describing the usb_request.udc_priv word.
+ * These bit fields are set by function drivers that wish to queue
+ * usb_requests with sps/bam parameters.
+ */
+#define MSM_PIPE_ID_MASK		(0x1F)
+#define MSM_TX_PIPE_ID_OFS		(16)
+#define MSM_SPS_MODE			BIT(5)
+#define MSM_IS_FINITE_TRANSFER		BIT(6)
+#define MSM_PRODUCER			BIT(7)
+#define MSM_DISABLE_WB			BIT(8)
+#define MSM_ETD_IOC			BIT(9)
+#define MSM_INTERNAL_MEM		BIT(10)
+#define MSM_VENDOR_ID			BIT(16)
+
 struct usb_ep;
+struct usb_gadget;
+
+enum ep_type {
+	EP_TYPE_NORMAL = 0,
+	EP_TYPE_GSI,
+};
+
+/* Operations codes for GSI enabled EPs */
+enum gsi_ep_op {
+	GSI_EP_OP_CONFIG = 0,
+	GSI_EP_OP_STARTXFER,
+	GSI_EP_OP_STORE_DBL_INFO,
+	GSI_EP_OP_ENABLE_GSI,
+	GSI_EP_OP_UPDATEXFER,
+	GSI_EP_OP_RING_IN_DB,
+	GSI_EP_OP_ENDXFER,
+	GSI_EP_OP_GET_CH_INFO,
+	GSI_EP_OP_GET_XFER_IDX,
+	GSI_EP_OP_PREPARE_TRBS,
+	GSI_EP_OP_FREE_TRBS,
+	GSI_EP_OP_SET_CLR_BLOCK_DBL,
+	GSI_EP_OP_CHECK_FOR_SUSPEND,
+	GSI_EP_OP_DISABLE,
+};
+
+/*
+ * @buf_base_addr: Base pointer to buffer allocated for each GSI enabled EP.
+ *	TRBs point to buffers that are split from this pool. The size of the
+ *	buffer is num_bufs times buf_len. num_bufs and buf_len are determined
+	based on desired performance and aggregation size.
+ * @dma: DMA address corresponding to buf_base_addr.
+ * @num_bufs: Number of buffers associated with the GSI enabled EP. This
+ *	corresponds to the number of non-zlp TRBs allocated for the EP.
+ *	The value is determined based on desired performance for the EP.
+ * @buf_len: Size of each individual buffer is determined based on aggregation
+ *	negotiated as per the protocol. In case of no aggregation supported by
+ *	the protocol, we use default values.
+ */
+struct usb_gsi_request {
+	void *buf_base_addr;
+	dma_addr_t dma;
+	size_t num_bufs;
+	size_t buf_len;
+};
+
+/*
+ * @last_trb_addr: Address (LSB - based on alignment restrictions) of
+ *	last TRB in queue. Used to identify rollover case.
+ * @const_buffer_size: TRB buffer size in KB (similar to IPA aggregation
+ *	configuration). Must be aligned to Max USB Packet Size.
+ *	Should be 1 <= const_buffer_size <= 31.
+ * @depcmd_low_addr: Used by GSI hardware to write "Update Transfer" cmd
+ * @depcmd_hi_addr: Used to write "Update Transfer" command.
+ * @gevntcount_low_addr: GEVNCOUNT low address for GSI hardware to read and
+ *	clear processed events.
+ * @gevntcount_hi_addr:	GEVNCOUNT high address.
+ * @xfer_ring_len: length of transfer ring in bytes (must be integral
+ *	multiple of TRB size - 16B for xDCI).
+ * @xfer_ring_base_addr: physical base address of transfer ring. Address must
+ *	be aligned to xfer_ring_len rounded to power of two.
+ * @ch_req: Used to pass request specific info for certain operations on GSI EP
+ */
+struct gsi_channel_info {
+	u16 last_trb_addr;
+	u8 const_buffer_size;
+	u32 depcmd_low_addr;
+	u8 depcmd_hi_addr;
+	u32 gevntcount_low_addr;
+	u8 gevntcount_hi_addr;
+	u16 xfer_ring_len;
+	u64 xfer_ring_base_addr;
+	struct usb_gsi_request *ch_req;
+};
 
 /**
  * struct usb_request - describes one i/o request
@@ -71,6 +159,7 @@
  *	Note that for writes (IN transfers) some data bytes may still
  *	reside in a device-side FIFO when the request is reported as
  *	complete.
+ * @udc_priv: Vendor private data in usage by the UDC.
  *
  * These are allocated/freed through the endpoint they're used with.  The
  * hardware's driver can add extra per-request data to the memory it returns,
@@ -111,6 +200,7 @@
 
 	int			status;
 	unsigned		actual;
+	unsigned int		udc_priv;
 };
 
 /*-------------------------------------------------------------------------*/
@@ -140,6 +230,9 @@
 
 	int (*fifo_status) (struct usb_ep *ep);
 	void (*fifo_flush) (struct usb_ep *ep);
+	int (*gsi_ep_op)(struct usb_ep *ep, void *op_data,
+		enum gsi_ep_op op);
+
 };
 
 /**
@@ -203,6 +296,8 @@
  *	enabled and remains valid until the endpoint is disabled.
  * @comp_desc: In case of SuperSpeed support, this is the endpoint companion
  *	descriptor that is used to configure the endpoint
+ * @ep_type: Used to specify type of EP eg. normal vs h/w accelerated.
+ * @ep_intr_num: Interrupter number for EP.
  *
  * the bus controller driver lists all the general purpose endpoints in
  * gadget->ep_list.  the control endpoint (gadget->ep0) is not in that list,
@@ -226,6 +321,8 @@
 	u8			address;
 	const struct usb_endpoint_descriptor	*desc;
 	const struct usb_ss_ep_comp_descriptor	*comp_desc;
+	enum ep_type		ep_type;
+	u8			ep_intr_num;
 };
 
 /*-------------------------------------------------------------------------*/
@@ -243,6 +340,10 @@
 int usb_ep_set_wedge(struct usb_ep *ep);
 int usb_ep_fifo_status(struct usb_ep *ep);
 void usb_ep_fifo_flush(struct usb_ep *ep);
+int usb_gsi_ep_op(struct usb_ep *ep,
+		struct usb_gsi_request *req, enum gsi_ep_op op);
+int usb_gadget_restart(struct usb_gadget *gadget);
+int usb_gadget_func_wakeup(struct usb_gadget *gadget, int  interface_id);
 #else
 static inline void usb_ep_set_maxpacket_limit(struct usb_ep *ep,
 		unsigned maxpacket_limit)
@@ -272,6 +373,14 @@
 { return 0; }
 static inline void usb_ep_fifo_flush(struct usb_ep *ep)
 { }
+
+static int usb_gsi_ep_op(struct usb_ep *ep,
+		struct usb_gsi_request *req, enum gsi_ep_op op)
+{ return 0; }
+static int usb_gadget_restart(struct usb_gadget *gadget)
+{ return 0; }
+static int usb_gadget_func_wakeup(struct usb_gadget *gadget, int  interface_id)
+{ return 0; }
 #endif /* USB_GADGET */
 
 /*-------------------------------------------------------------------------*/
@@ -284,7 +393,6 @@
 };
 
 
-struct usb_gadget;
 struct usb_gadget_driver;
 struct usb_udc;
 
@@ -307,6 +415,8 @@
 	struct usb_ep *(*match_ep)(struct usb_gadget *,
 			struct usb_endpoint_descriptor *,
 			struct usb_ss_ep_comp_descriptor *);
+	int	(*func_wakeup)(struct usb_gadget *, int interface_id);
+	int	(*restart)(struct usb_gadget *);
 };
 
 /**
@@ -404,6 +514,8 @@
 	unsigned			is_selfpowered:1;
 	unsigned			deactivated:1;
 	unsigned			connected:1;
+	bool				l1_supported;
+	bool				remote_wakeup;
 };
 #define work_to_gadget(w)	(container_of((w), struct usb_gadget, work))
 
@@ -855,4 +967,33 @@
 
 extern void usb_ep_autoconfig_reset(struct usb_gadget *);
 
+#ifdef CONFIG_USB_DWC3_MSM
+int msm_ep_config(struct usb_ep *ep);
+int msm_ep_unconfig(struct usb_ep *ep);
+void dwc3_tx_fifo_resize_request(struct usb_ep *ep, bool qdss_enable);
+int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr, u32 size,
+	u8 dst_pipe_idx);
+bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget);
+int msm_dwc3_reset_dbm_ep(struct usb_ep *ep);
+#else
+static inline int msm_data_fifo_config(struct usb_ep *ep, phys_addr_t addr,
+	u32 size, u8 dst_pipe_idx)
+{	return -ENODEV; }
+
+static inline int msm_ep_config(struct usb_ep *ep)
+{ return -ENODEV; }
+
+static inline int msm_ep_unconfig(struct usb_ep *ep)
+{ return -ENODEV; }
+
+static inline void dwc3_tx_fifo_resize_request(struct usb_ep *ep,
+	bool qdss_enable)
+{ }
+
+static inline bool msm_dwc3_reset_ep_after_lpm(struct usb_gadget *gadget)
+{ return false; }
+
+static inline int msm_dwc3_reset_dbm_ep(struct usb_ep *ep)
+{ return -ENODEV; }
+#endif
 #endif /* __LINUX_USB_GADGET_H */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 31a8068..263f20a 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -12,6 +12,17 @@
 #include <linux/notifier.h>
 #include <linux/usb.h>
 
+#define ENABLE_DP_MANUAL_PULLUP	BIT(0)
+#define ENABLE_SECONDARY_PHY	BIT(1)
+#define PHY_HOST_MODE		BIT(2)
+#define PHY_CHARGER_CONNECTED	BIT(3)
+#define PHY_VBUS_VALID_OVERRIDE	BIT(4)
+#define DEVICE_IN_SS_MODE	BIT(5)
+#define PHY_LANE_A		BIT(6)
+#define PHY_LANE_B		BIT(7)
+#define PHY_HSFS_MODE		BIT(8)
+#define PHY_LS_MODE		BIT(9)
+
 enum usb_phy_interface {
 	USBPHY_INTERFACE_MODE_UNKNOWN,
 	USBPHY_INTERFACE_MODE_UTMI,
@@ -44,6 +55,7 @@
 	OTG_STATE_B_IDLE,
 	OTG_STATE_B_SRP_INIT,
 	OTG_STATE_B_PERIPHERAL,
+	OTG_STATE_B_SUSPEND,
 
 	/* extra dual-role default-b states */
 	OTG_STATE_B_WAIT_ACON,
@@ -122,6 +134,9 @@
 			enum usb_device_speed speed);
 	int	(*notify_disconnect)(struct usb_phy *x,
 			enum usb_device_speed speed);
+
+	/* reset the PHY clocks */
+	int     (*reset)(struct usb_phy *x);
 };
 
 /**
@@ -196,6 +211,15 @@
 	return x->set_vbus(x, false);
 }
 
+static inline int
+usb_phy_reset(struct usb_phy *x)
+{
+	if (x && x->reset)
+		return x->reset(x);
+
+	return 0;
+}
+
 /* for usb host and peripheral controller drivers */
 #if IS_ENABLED(CONFIG_USB_PHY)
 extern struct usb_phy *usb_get_phy(enum usb_phy_type type);
diff --git a/include/linux/usb/usbdiag.h b/include/linux/usb/usbdiag.h
new file mode 100644
index 0000000..b56c411
--- /dev/null
+++ b/include/linux/usb/usbdiag.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2008-2010, 2012-2014, The Linux Foundation.
+ * All rights reserved.
+ *
+ * All source code in this file is licensed under the following license except
+ * where indicated.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ * See the GNU General Public License for more details.
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can find it at http://www.fsf.org
+ */
+
+#ifndef _DRIVERS_USB_DIAG_H_
+#define _DRIVERS_USB_DIAG_H_
+
+#include <linux/err.h>
+
+#define DIAG_LEGACY		"diag"
+#define DIAG_MDM		"diag_mdm"
+#define DIAG_QSC		"diag_qsc"
+#define DIAG_MDM2		"diag_mdm2"
+
+#define USB_DIAG_CONNECT	0
+#define USB_DIAG_DISCONNECT	1
+#define USB_DIAG_WRITE_DONE	2
+#define USB_DIAG_READ_DONE	3
+
+struct diag_request {
+	char *buf;
+	int length;
+	int actual;
+	int status;
+	void *context;
+};
+
+struct usb_diag_ch {
+	const char *name;
+	struct list_head list;
+	void (*notify)(void *priv, unsigned int event,
+			struct diag_request *d_req);
+	void *priv;
+	void *priv_usb;
+};
+
+#if IS_ENABLED(CONFIG_USB_F_DIAG)
+int usb_diag_request_size(struct usb_diag_ch *ch);
+struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
+		void (*notify)(void *, unsigned int, struct diag_request *));
+void usb_diag_close(struct usb_diag_ch *ch);
+int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read);
+int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req);
+int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req);
+#else
+static inline struct usb_diag_ch *usb_diag_open(const char *name, void *priv,
+		void (*notify)(void *, unsigned int, struct diag_request *))
+{
+	return ERR_PTR(-ENODEV);
+}
+static inline void usb_diag_close(struct usb_diag_ch *ch)
+{
+}
+static inline
+int usb_diag_alloc_req(struct usb_diag_ch *ch, int n_write, int n_read)
+{
+	return -ENODEV;
+}
+static inline
+int usb_diag_read(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+	return -ENODEV;
+}
+static inline
+int usb_diag_write(struct usb_diag_ch *ch, struct diag_request *d_req)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_USB_F_DIAG */
+#endif /* _DRIVERS_USB_DIAG_H_ */
diff --git a/include/linux/wakelock.h b/include/linux/wakelock.h
new file mode 100644
index 0000000..f4a698a
--- /dev/null
+++ b/include/linux/wakelock.h
@@ -0,0 +1,67 @@
+/* include/linux/wakelock.h
+ *
+ * Copyright (C) 2007-2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_WAKELOCK_H
+#define _LINUX_WAKELOCK_H
+
+#include <linux/ktime.h>
+#include <linux/device.h>
+
+/* A wake_lock prevents the system from entering suspend or other low power
+ * states when active. If the type is set to WAKE_LOCK_SUSPEND, the wake_lock
+ * prevents a full system suspend.
+ */
+
+enum {
+	WAKE_LOCK_SUSPEND, /* Prevent suspend */
+	WAKE_LOCK_TYPE_COUNT
+};
+
+struct wake_lock {
+	struct wakeup_source ws;
+};
+
+static inline void wake_lock_init(struct wake_lock *lock, int type,
+				  const char *name)
+{
+	wakeup_source_init(&lock->ws, name);
+}
+
+static inline void wake_lock_destroy(struct wake_lock *lock)
+{
+	wakeup_source_trash(&lock->ws);
+}
+
+static inline void wake_lock(struct wake_lock *lock)
+{
+	__pm_stay_awake(&lock->ws);
+}
+
+static inline void wake_lock_timeout(struct wake_lock *lock, long timeout)
+{
+	__pm_wakeup_event(&lock->ws, jiffies_to_msecs(timeout));
+}
+
+static inline void wake_unlock(struct wake_lock *lock)
+{
+	__pm_relax(&lock->ws);
+}
+
+static inline int wake_lock_active(struct wake_lock *lock)
+{
+	return lock->ws.active;
+}
+
+#endif
diff --git a/include/linux/wakeup_reason.h b/include/linux/wakeup_reason.h
new file mode 100644
index 0000000..d84d8c3
--- /dev/null
+++ b/include/linux/wakeup_reason.h
@@ -0,0 +1,32 @@
+/*
+ * include/linux/wakeup_reason.h
+ *
+ * Logs the reason which caused the kernel to resume
+ * from the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_WAKEUP_REASON_H
+#define _LINUX_WAKEUP_REASON_H
+
+#define MAX_SUSPEND_ABORT_LEN 256
+
+void log_wakeup_reason(int irq);
+int check_wakeup_reason(int irq);
+
+#ifdef CONFIG_SUSPEND
+void log_suspend_abort_reason(const char *fmt, ...);
+#else
+static inline void log_suspend_abort_reason(const char *fmt, ...) { }
+#endif
+
+#endif /* _LINUX_WAKEUP_REASON_H */
diff --git a/include/linux/wlan_plat.h b/include/linux/wlan_plat.h
new file mode 100644
index 0000000..8e8b06f
--- /dev/null
+++ b/include/linux/wlan_plat.h
@@ -0,0 +1,30 @@
+/* include/linux/wlan_plat.h
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _LINUX_WLAN_PLAT_H_
+#define _LINUX_WLAN_PLAT_H_
+
+#define WLAN_PLAT_NODFS_FLAG	0x01
+
+struct wifi_platform_data {
+	int (*set_power)(int val);
+	int (*set_reset)(int val);
+	int (*set_carddetect)(int val);
+	void *(*mem_prealloc)(int section, unsigned long size);
+	int (*get_mac_addr)(unsigned char *buf);
+	int (*get_wake_irq)(void);
+	void *(*get_country_code)(char *ccode, u32 flags);
+};
+
+#endif
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 8f998af..e1bd2bc 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -242,6 +242,8 @@
 void addrconf_prefix_rcv(struct net_device *dev,
 			 u8 *opt, int len, bool sllao);
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table);
+
 /*
  *	anycast prototypes (anycast.c)
  */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 14b51d7..3fe01b4 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3030,6 +3030,7 @@
  *	beaconing mode (AP, IBSS, Mesh, ...).
  * @WIPHY_FLAG_HAS_STATIC_WEP: The device supports static WEP key installation
  *	before connection.
+ * @WIPHY_FLAG_DFS_OFFLOAD: The driver handles all the DFS related operations.
  */
 enum wiphy_flags {
 	/* use hole at 0 */
@@ -3056,6 +3057,7 @@
 	WIPHY_FLAG_SUPPORTS_5_10_MHZ		= BIT(22),
 	WIPHY_FLAG_HAS_CHANNEL_SWITCH		= BIT(23),
 	WIPHY_FLAG_HAS_STATIC_WEP		= BIT(24),
+	WIPHY_FLAG_DFS_OFFLOAD                  = BIT(25)
 };
 
 /**
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 456e4a6..521885c 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -30,6 +30,8 @@
 	int			suppress_prefixlen;
 	char			iifname[IFNAMSIZ];
 	char			oifname[IFNAMSIZ];
+	kuid_t			uid_start;
+	kuid_t			uid_end;
 	struct rcu_head		rcu;
 };
 
@@ -89,6 +91,8 @@
 	[FRA_FWMARK]	= { .type = NLA_U32 }, \
 	[FRA_FWMASK]	= { .type = NLA_U32 }, \
 	[FRA_TABLE]     = { .type = NLA_U32 }, \
+	[FRA_UID_START]	= { .type = NLA_U32 }, \
+	[FRA_UID_END]	= { .type = NLA_U32 }, \
 	[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
 	[FRA_SUPPRESS_IFGROUP] = { .type = NLA_U32 }, \
 	[FRA_GOTO]	= { .type = NLA_U32 }, \
diff --git a/include/net/flow.h b/include/net/flow.h
index 035aa77..6bbbca8 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -11,6 +11,7 @@
 #include <linux/in6.h>
 #include <linux/atomic.h>
 #include <net/flow_dissector.h>
+#include <linux/uidgid.h>
 
 /*
  * ifindex generation is per-net namespace, and loopback is
@@ -37,6 +38,7 @@
 #define FLOWI_FLAG_SKIP_NH_OIF		0x04
 	__u32	flowic_secid;
 	struct flowi_tunnel flowic_tun_key;
+	kuid_t  flowic_uid;
 };
 
 union flowi_uli {
@@ -74,6 +76,7 @@
 #define flowi4_flags		__fl_common.flowic_flags
 #define flowi4_secid		__fl_common.flowic_secid
 #define flowi4_tun_key		__fl_common.flowic_tun_key
+#define flowi4_uid		__fl_common.flowic_uid
 
 	/* (saddr,daddr) must be grouped, same order as in IP header */
 	__be32			saddr;
@@ -93,7 +96,8 @@
 				      __u32 mark, __u8 tos, __u8 scope,
 				      __u8 proto, __u8 flags,
 				      __be32 daddr, __be32 saddr,
-				      __be16 dport, __be16 sport)
+				      __be16 dport, __be16 sport,
+				      kuid_t uid)
 {
 	fl4->flowi4_oif = oif;
 	fl4->flowi4_iif = LOOPBACK_IFINDEX;
@@ -104,6 +108,7 @@
 	fl4->flowi4_flags = flags;
 	fl4->flowi4_secid = 0;
 	fl4->flowi4_tun_key.tun_id = 0;
+	fl4->flowi4_uid = uid;
 	fl4->daddr = daddr;
 	fl4->saddr = saddr;
 	fl4->fl4_dport = dport;
@@ -131,6 +136,7 @@
 #define flowi6_flags		__fl_common.flowic_flags
 #define flowi6_secid		__fl_common.flowic_secid
 #define flowi6_tun_key		__fl_common.flowic_tun_key
+#define flowi6_uid		__fl_common.flowic_uid
 	struct in6_addr		daddr;
 	struct in6_addr		saddr;
 	/* Note: flowi6_tos is encoded in flowlabel, too. */
@@ -176,6 +182,7 @@
 #define flowi_flags	u.__fl_common.flowic_flags
 #define flowi_secid	u.__fl_common.flowic_secid
 #define flowi_tun_key	u.__fl_common.flowic_tun_key
+#define flowi_uid	u.__fl_common.flowic_uid
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
 static inline struct flowi *flowi4_to_flowi(struct flowi4 *fl4)
diff --git a/include/net/ip.h b/include/net/ip.h
index d3a1078..b043c7d 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -178,6 +178,7 @@
 				/* -1 if not needed */ 
 	int	    bound_dev_if;
 	u8  	    tos;
+	kuid_t	    uid;
 }; 
 
 #define IP_REPLY_ARG_NOSRCCHECK 1
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index f83e78d..fc89e35 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -140,7 +140,7 @@
 		  const struct in6_addr *gwaddr);
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif,
-		     u32 mark);
+		     u32 mark, kuid_t uid);
 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu);
 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark);
 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
diff --git a/include/net/rmnet_config.h b/include/net/rmnet_config.h
new file mode 100644
index 0000000..5f5685a
--- /dev/null
+++ b/include/net/rmnet_config.h
@@ -0,0 +1,47 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data config definition
+ */
+
+#ifndef _RMNET_CONFIG_H_
+#define _RMNET_CONFIG_H_
+
+#include <linux/skbuff.h>
+
+struct rmnet_phys_ep_conf_s {
+	void (*recycle)(struct sk_buff *); /* Destruct function */
+	void *config;
+};
+
+struct rmnet_map_header_s {
+#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS
+	uint8_t  pad_len:6;
+	uint8_t  reserved_bit:1;
+	uint8_t  cd_bit:1;
+#else
+	uint8_t  cd_bit:1;
+	uint8_t  reserved_bit:1;
+	uint8_t  pad_len:6;
+#endif /* RMNET_USE_BIG_ENDIAN_STRUCTS */
+	uint8_t  mux_id;
+	uint16_t pkt_len;
+}  __aligned(1);
+
+#define RMNET_MAP_GET_MUX_ID(Y) (((struct rmnet_map_header_s *)Y->data)->mux_id)
+#define RMNET_MAP_GET_CD_BIT(Y) (((struct rmnet_map_header_s *)Y->data)->cd_bit)
+#define RMNET_MAP_GET_PAD(Y) (((struct rmnet_map_header_s *)Y->data)->pad_len)
+#define RMNET_MAP_GET_CMD_START(Y) ((struct rmnet_map_control_command_s *) \
+				  (Y->data + sizeof(struct rmnet_map_header_s)))
+#define RMNET_MAP_GET_LENGTH(Y) (ntohs( \
+			       ((struct rmnet_map_header_s *)Y->data)->pkt_len))
+
+#endif /* _RMNET_CONFIG_H_ */
diff --git a/include/net/route.h b/include/net/route.h
index 0429d47..e56c5a4 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -153,7 +153,8 @@
 	flowi4_init_output(fl4, oif, sk ? sk->sk_mark : 0, tos,
 			   RT_SCOPE_UNIVERSE, proto,
 			   sk ? inet_sk_flowi_flags(sk) : 0,
-			   daddr, saddr, dport, sport);
+			   daddr, saddr, dport, sport,
+			   sk ? sock_i_uid(sk) : GLOBAL_ROOT_UID);
 	if (sk)
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
 	return ip_route_output_flow(net, fl4, sk);
@@ -269,7 +270,8 @@
 		flow_flags |= FLOWI_FLAG_ANYSRC;
 
 	flowi4_init_output(fl4, oif, sk->sk_mark, tos, RT_SCOPE_UNIVERSE,
-			   protocol, flow_flags, dst, src, dport, sport);
+			   protocol, flow_flags, dst, src, dport, sport,
+			   sock_i_uid(sk));
 }
 
 static inline struct rtable *ip_route_connect(struct flowi4 *fl4,
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 123979f..2700f92 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -271,6 +271,7 @@
 extern int sysctl_tcp_invalid_ratelimit;
 extern int sysctl_tcp_pacing_ss_ratio;
 extern int sysctl_tcp_pacing_ca_ratio;
+extern int sysctl_tcp_default_init_rwnd;
 
 extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 8a95631..027a05d 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -182,6 +182,12 @@
 	unsigned broken_fua:1;		/* Don't set FUA bit */
 	unsigned lun_in_cdb:1;		/* Store LUN bits in CDB[1] */
 	unsigned synchronous_alua:1;	/* Synchronous ALUA commands */
+	unsigned use_rpm_auto:1; /* Enable runtime PM auto suspend */
+
+#define SCSI_DEFAULT_AUTOSUSPEND_DELAY  -1
+	int autosuspend_delay;
+	/* If non-zero, use timeout (in jiffies) for all commands */
+	unsigned int timeout_override;
 
 	atomic_t disk_events_disable_depth; /* disable depth for disk events */
 
@@ -427,6 +433,8 @@
 extern void sdev_enable_disk_events(struct scsi_device *sdev);
 extern int scsi_vpd_lun_id(struct scsi_device *, char *, size_t);
 extern int scsi_vpd_tpg_id(struct scsi_device *, int *);
+extern void scsi_set_cmd_timeout_override(struct scsi_device *sdev,
+					  unsigned int timeout);
 
 #ifdef CONFIG_PM
 extern int scsi_autopm_get_device(struct scsi_device *);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 7e4cd53..7e5430d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -669,6 +669,12 @@
 	unsigned short_inquiry:1;
 
 	/*
+	 * Set "DBD" field in mode_sense caching mode page in case it is
+	 * mandatory by LLD standard.
+	 */
+	unsigned set_dbd_for_caching:1;
+
+	/*
 	 * Optional work queue to be utilized by the transport
 	 */
 	char work_q_name[20];
diff --git a/include/soc/qcom/boot_stats.h b/include/soc/qcom/boot_stats.h
new file mode 100644
index 0000000..c81fc24
--- /dev/null
+++ b/include/soc/qcom/boot_stats.h
@@ -0,0 +1,17 @@
+/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_MSM_BOOT_STATS
+int boot_stats_init(void);
+#else
+static inline int boot_stats_init(void) { return 0; }
+#endif
diff --git a/include/soc/qcom/glink.h b/include/soc/qcom/glink.h
new file mode 100644
index 0000000..7b86481
--- /dev/null
+++ b/include/soc/qcom/glink.h
@@ -0,0 +1,432 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_H_
+#define _SOC_QCOM_GLINK_H_
+
+#include <linux/types.h>
+
+/* Maximum size (including null) for channel, edge, or transport names */
+#define GLINK_NAME_SIZE 32
+
+/* Maximum packet size for TX and RX */
+#define GLINK_MAX_PKT_SIZE SZ_1M
+
+/**
+ * G-Link Port State Notification Values
+ */
+enum {
+	GLINK_CONNECTED,
+	GLINK_LOCAL_DISCONNECTED,
+	GLINK_REMOTE_DISCONNECTED,
+};
+
+/**
+ * G-Link Open Options
+ *
+ * Used to define the glink_open_config::options field which is passed into
+ * glink_open().
+ */
+enum {
+	GLINK_OPT_INITIAL_XPORT = BIT(0),
+	GLINK_OPT_RX_INTENT_NOTIF = BIT(1),
+};
+
+/**
+ * Open configuration.
+ *
+ * priv:			Private data passed into user callbacks
+ * options:			Open option flags
+ * rx_intent_req_timeout_ms:	Timeout for requesting an RX intent, in
+ *			milliseconds; if set to 0, timeout is infinite
+ * notify_rx:			Receive notification function (required)
+ * notify_tx_done:		Transmit-done notification function (required)
+ * notify_state:		State-change notification (required)
+ * notify_rx_intent_req:	Receive intent request (optional)
+ * notify_rxv:			Receive notification function for vector buffers
+ *			(required if notify_rx is not provided)
+ * notify_sig:			Signal-change notification (optional)
+ * notify_rx_tracer_pkt:	Receive notification for tracer packet
+ * notify_remote_rx_intent:	Receive notification for remote-queued RX intent
+ *
+ * This structure is passed into the glink_open() call to setup
+ * configuration handles.  All unused fields should be set to 0.
+ *
+ * The structure is copied internally before the call to glink_open() returns.
+ */
+struct glink_open_config {
+	void *priv;
+	uint32_t options;
+
+	const char *transport;
+	const char *edge;
+	const char *name;
+	unsigned int rx_intent_req_timeout_ms;
+
+	void (*notify_rx)(void *handle, const void *priv, const void *pkt_priv,
+			const void *ptr, size_t size);
+	void (*notify_tx_done)(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr);
+	void (*notify_state)(void *handle, const void *priv,
+			unsigned int event);
+	bool (*notify_rx_intent_req)(void *handle, const void *priv,
+			size_t req_size);
+	void (*notify_rxv)(void *handle, const void *priv, const void *pkt_priv,
+			   void *iovec, size_t size,
+			   void * (*vbuf_provider)(void *iovec, size_t offset,
+						 size_t *size),
+			   void * (*pbuf_provider)(void *iovec, size_t offset,
+						 size_t *size));
+	void (*notify_rx_sigs)(void *handle, const void *priv,
+			uint32_t old_sigs, uint32_t new_sigs);
+	void (*notify_rx_abort)(void *handle, const void *priv,
+			const void *pkt_priv);
+	void (*notify_tx_abort)(void *handle, const void *priv,
+			const void *pkt_priv);
+	void (*notify_rx_tracer_pkt)(void *handle, const void *priv,
+			const void *pkt_priv, const void *ptr, size_t size);
+	void (*notify_remote_rx_intent)(void *handle, const void *priv,
+					size_t size);
+};
+
+enum glink_link_state {
+	GLINK_LINK_STATE_UP,
+	GLINK_LINK_STATE_DOWN,
+};
+
+/**
+ * Data structure containing information during Link State callback
+ * transport:	String identifying the transport.
+ * edge:	String identifying the edge.
+ * link_state:	Link state(UP?DOWN).
+ */
+struct glink_link_state_cb_info {
+	const char *transport;
+	const char *edge;
+	enum glink_link_state link_state;
+};
+
+/**
+ * Data structure containing information for link state registration
+ * transport:	String identifying the transport.
+ * edge:	String identifying the edge.
+ * glink_link_state_notif_cb:	Callback function used to pass the event.
+ */
+struct glink_link_info {
+	const char *transport;
+	const char *edge;
+	void (*glink_link_state_notif_cb)(
+			struct glink_link_state_cb_info *cb_info,
+			void *priv);
+};
+
+enum tx_flags {
+	GLINK_TX_REQ_INTENT = 0x1,
+	GLINK_TX_SINGLE_THREADED = 0x2,
+	GLINK_TX_TRACER_PKT = 0x4,
+	GLINK_TX_ATOMIC = 0x8,
+};
+
+#ifdef CONFIG_MSM_GLINK
+/**
+ * Open GLINK channel.
+ *
+ * @cfg_ptr:	Open configuration structure (the structure is copied before
+ *		glink_open returns).  All unused fields should be zero-filled.
+ *
+ * This should not be called from link state callback context by clients.
+ * It is recommended that client should invoke this function from their own
+ * thread.
+ *
+ * Return:  Pointer to channel on success, PTR_ERR() with standard Linux
+ * error code on failure.
+ */
+void *glink_open(const struct glink_open_config *cfg_ptr);
+
+/**
+ * glink_close() - Close a previously opened channel.
+ *
+ * @handle:	handle to close
+ *
+ * Once the closing process has been completed, the GLINK_LOCAL_DISCONNECTED
+ * state event will be sent and the channel can be reopened.
+ *
+ * Return:  0 on success; -EINVAL for invalid handle, -EBUSY is close is
+ * already in progress, standard Linux Error code otherwise.
+ */
+int glink_close(void *handle);
+
+/**
+ * glink_tx() - Transmit packet.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @data:	pointer to the data
+ * @size:	size of data
+ * @tx_flags:	Flags to specify transmit specific options
+ *
+ * Return:	-EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *		transmit operation (not fully opened); -EAGAIN if remote side
+ *		has not provided a receive intent that is big enough.
+ */
+int glink_tx(void *handle, void *pkt_priv, void *data, size_t size,
+							uint32_t tx_flags);
+
+/**
+ * glink_queue_rx_intent() - Register an intent to receive data.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data type that is returned when a packet is received
+ * size:	maximum size of data to receive
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_queue_rx_intent(void *handle, const void *pkt_priv, size_t size);
+
+/**
+ * glink_rx_intent_exists() - Check if an intent of size exists.
+ *
+ * @handle:	handle returned by glink_open()
+ * @size:	size of an intent to check or 0 for any intent
+ *
+ * Return:	TRUE if an intent exists with greater than or equal to the size
+ *		else FALSE
+ */
+bool glink_rx_intent_exists(void *handle, size_t size);
+
+/**
+ * glink_rx_done() - Return receive buffer to remote side.
+ *
+ * @handle:	handle returned by glink_open()
+ * @ptr:	data pointer provided in the notify_rx() call
+ * @reuse:	if true, receive intent is re-used
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_rx_done(void *handle, const void *ptr, bool reuse);
+
+/**
+ * glink_txv() - Transmit a packet in vector form.
+ *
+ * @handle:	handle returned by glink_open()
+ * @pkt_priv:	opaque data value that will be returned to client with
+ *		notify_tx_done notification
+ * @iovec:	pointer to the vector (must remain valid until notify_tx_done
+ *		notification)
+ * @size:	size of data/vector
+ * @vbuf_provider: Client provided helper function to iterate the vector
+ *		in physical address space
+ * @pbuf_provider: Client provided helper function to iterate the vector
+ *		in virtual address space
+ * @tx_flags:	Flags to specify transmit specific options
+ *
+ * Return: -EINVAL for invalid handle; -EBUSY if channel isn't ready for
+ *           transmit operation (not fully opened); -EAGAIN if remote side has
+ *           not provided a receive intent that is big enough.
+ */
+int glink_txv(void *handle, void *pkt_priv,
+	      void *iovec, size_t size,
+	      void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	      void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+	      uint32_t tx_flags);
+
+/**
+ * glink_sigs_set() - Set the local signals for the GLINK channel
+ *
+ * @handle:	handle returned by glink_open()
+ * @sigs:	modified signal value
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_set(void *handle, uint32_t sigs);
+
+/**
+ * glink_sigs_local_get() - Get the local signals for the GLINK channel
+ *
+ * handle:	handle returned by glink_open()
+ * sigs:	Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_local_get(void *handle, uint32_t *sigs);
+
+/**
+ * glink_sigs_remote_get() - Get the Remote signals for the GLINK channel
+ *
+ * handle:	handle returned by glink_open()
+ * sigs:	Pointer to hold the signals
+ *
+ * Return: 0 for success; standard Linux error code for failure case
+ */
+int glink_sigs_remote_get(void *handle, uint32_t *sigs);
+
+/**
+ * glink_register_link_state_cb() - Register for link state notification
+ * @link_info:	Data structure containing the link identification and callback.
+ * @priv:	Private information to be passed with the callback.
+ *
+ * This function is used to register a notifier to receive the updates about a
+ * link's/transport's state. This notifier needs to be registered first before
+ * an attempt to open a channel.
+ *
+ * Return: a reference to the notifier handle.
+ */
+void *glink_register_link_state_cb(struct glink_link_info *link_info,
+				   void *priv);
+
+/**
+ * glink_unregister_link_state_cb() - Unregister the link state notification
+ * notif_handle:	Handle to be unregistered.
+ *
+ * This function is used to unregister a notifier to stop receiving the updates
+ * about a link's/transport's state.
+ */
+void glink_unregister_link_state_cb(void *notif_handle);
+
+/**
+ * glink_qos_latency() - Register the latency QoS requirement
+ * @handle:	Channel handle in which the latency is required.
+ * @latency_us:	Latency requirement in units of micro-seconds.
+ * @pkt_size:	Worst case packet size for which the latency is required.
+ *
+ * This function is used to register the latency requirement for a channel
+ * and ensures that the latency requirement for this channel is met without
+ * impacting the existing latency requirements of other channels.
+ *
+ * Return: 0 if QoS request is achievable, standard Linux error codes on error
+ */
+int glink_qos_latency(void *handle, unsigned long latency_us, size_t pkt_size);
+
+/**
+ * glink_qos_cancel() - Cancel or unregister the QoS request
+ * @handle:	Channel handle for which the QoS request is cancelled.
+ *
+ * This function is used to cancel/unregister the QoS requests for a channel.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_cancel(void *handle);
+
+/**
+ * glink_qos_start() - Start of the transmission requiring QoS
+ * @handle:	Channel handle in which the transmit activity is performed.
+ *
+ * This function is called by the clients to indicate G-Link regarding the
+ * start of the transmission which requires a certain QoS. The clients
+ * must account for the QoS ramp time to ensure meeting the QoS.
+ *
+ * Return: 0 on success, standard Linux error codes on failure
+ */
+int glink_qos_start(void *handle);
+
+/**
+ * glink_qos_get_ramp_time() - Get the QoS ramp time
+ * @handle:	Channel handle for which the QoS ramp time is required.
+ * @pkt_size:	Worst case packet size.
+ *
+ * This function is called by the clients to obtain the ramp time required
+ * to meet the QoS requirements.
+ *
+ * Return: QoS ramp time is returned in units of micro-seconds
+ */
+unsigned long glink_qos_get_ramp_time(void *handle, size_t pkt_size);
+
+#else /* CONFIG_MSM_GLINK */
+static inline void *glink_open(const struct glink_open_config *cfg_ptr)
+{
+	return NULL;
+}
+
+static inline int glink_close(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline int glink_tx(void *handle, void *pkt_priv, void *data,
+					size_t size, uint32_t tx_flags)
+{
+	return -ENODEV;
+}
+
+static inline int glink_queue_rx_intent(void *handle, const void *pkt_priv,
+								size_t size)
+{
+	return -ENODEV;
+}
+
+static inline bool glink_rx_intent_exists(void *handle, size_t size)
+{
+	return -ENODEV;
+}
+
+static inline int glink_rx_done(void *handle, const void *ptr, bool reuse)
+{
+	return -ENODEV;
+}
+
+static inline int glink_txv(void *handle, void *pkt_priv,
+	      void *iovec, size_t size,
+	      void * (*vbuf_provider)(void *iovec, size_t offset, size_t *size),
+	      void * (*pbuf_provider)(void *iovec, size_t offset, size_t *size),
+	      uint32_t tx_flags)
+{
+	return -ENODEV;
+}
+
+static inline int glink_sigs_set(void *handle, uint32_t sigs)
+{
+	return -ENODEV;
+}
+
+static inline int glink_sigs_local_get(void *handle, uint32_t *sigs)
+{
+	return -ENODEV;
+}
+
+static inline int glink_sigs_remote_get(void *handle, uint32_t *sigs)
+{
+	return -ENODEV;
+}
+
+static inline void *glink_register_link_state_cb(
+				struct glink_link_info *link_info, void *priv)
+{
+	return NULL;
+}
+
+static inline void glink_unregister_link_state_cb(void *notif_handle)
+{
+}
+
+static inline int glink_qos_latency(void *handle, unsigned long latency_us,
+				    size_t pkt_size)
+{
+	return -ENODEV;
+}
+
+static inline int glink_qos_cancel(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline int glink_qos_start(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline unsigned long glink_qos_get_ramp_time(void *handle,
+						    size_t pkt_size)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_GLINK */
+#endif /* _SOC_QCOM_GLINK_H_ */
diff --git a/include/soc/qcom/glink_rpm_xprt.h b/include/soc/qcom/glink_rpm_xprt.h
new file mode 100644
index 0000000..8dfd437
--- /dev/null
+++ b/include/soc/qcom/glink_rpm_xprt.h
@@ -0,0 +1,78 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _SOC_QCOM_GLINK_RPM_XPRT_H_
+#define _SOC_QCOM_GLINK_RPM_XPRT_H_
+
+#include <linux/types.h>
+
+#ifdef CONFIG_MSM_GLINK
+
+/**
+ * glink_rpm_rx_poll() - Poll and receive any available events
+ * @handle:	Channel handle in which this operation is performed.
+ *
+ * This function is used to poll and receive events and packets while the
+ * receive interrupt from RPM is disabled.
+ *
+ * Note that even if a return value > 0 is returned indicating that some events
+ * were processed, clients should only use the notification functions passed
+ * into glink_open() to determine if an entire packet has been received since
+ * some events may be internal details that are not visible to clients.
+ *
+ * Return: 0 for no packets available; > 0 for events available; standard
+ * Linux error codes on failure.
+ */
+int glink_rpm_rx_poll(void *handle);
+
+/**
+ * glink_rpm_mask_rx_interrupt() - Mask or unmask the RPM receive interrupt
+ * @handle:	Channel handle in which this operation is performed.
+ * @mask:	Flag to mask or unmask the interrupt.
+ * @pstruct:	Pointer to any platform specific data.
+ *
+ * This function is used to mask or unmask the receive interrupt from RPM.
+ * "mask" set to true indicates masking the interrupt and when set to false
+ * indicates unmasking the interrupt.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int glink_rpm_mask_rx_interrupt(void *handle, bool mask, void *pstruct);
+
+/**
+ * glink_wait_link_down() - Return whether read/write indices in FIFO are all 0.
+ * @handle:	Channel handle in which this operation is performed.
+ *
+ * This function returns the status of the read/write indices in the FIFO.
+ *
+ * Return: 1 if the indices are all 0, 0 otherwise.
+ */
+int glink_wait_link_down(void *handle);
+
+#else
+static inline int glink_rpm_rx_poll(void *handle)
+{
+	return -ENODEV;
+}
+
+static inline int glink_rpm_mask_rx_interrupt(void *handle, bool mask,
+		void *pstruct)
+{
+	return -ENODEV;
+}
+static inline int glink_wait_link_down(void *handle)
+{
+	return -ENODEV;
+}
+
+#endif /* CONFIG_MSM_GLINK */
+
+#endif /* _SOC_QCOM_GLINK_RPM_XPRT_H_ */
diff --git a/include/soc/qcom/memory_dump.h b/include/soc/qcom/memory_dump.h
new file mode 100644
index 0000000..a7b87aa
--- /dev/null
+++ b/include/soc/qcom/memory_dump.h
@@ -0,0 +1,129 @@
+/* Copyright (c) 2012, 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_MEMORY_DUMP_H
+#define __MSM_MEMORY_DUMP_H
+
+#include <linux/types.h>
+
+enum dump_client_type {
+	MSM_CPU_CTXT = 0,
+	MSM_L1_CACHE,
+	MSM_L2_CACHE,
+	MSM_OCMEM,
+	MSM_TMC_ETFETB,
+	MSM_ETM0_REG,
+	MSM_ETM1_REG,
+	MSM_ETM2_REG,
+	MSM_ETM3_REG,
+	MSM_TMC0_REG, /* TMC_ETR */
+	MSM_TMC1_REG, /* TMC_ETF */
+	MSM_LOG_BUF,
+	MSM_LOG_BUF_FIRST_IDX,
+	MAX_NUM_CLIENTS,
+};
+
+struct msm_client_dump {
+	enum dump_client_type id;
+	unsigned long start_addr;
+	unsigned long end_addr;
+};
+
+#ifdef CONFIG_QCOM_MEMORY_DUMP
+extern int msm_dump_tbl_register(struct msm_client_dump *client_entry);
+#else
+static inline int msm_dump_tbl_register(struct msm_client_dump *entry)
+{
+	return -EIO;
+}
+#endif
+
+
+#if defined(CONFIG_QCOM_MEMORY_DUMP) || defined(CONFIG_QCOM_MEMORY_DUMP_V2)
+extern uint32_t msm_dump_table_version(void);
+#else
+static inline uint32_t msm_dump_table_version(void)
+{
+	return 0;
+}
+#endif
+
+#define MSM_DUMP_MAKE_VERSION(ma, mi)	((ma << 20) | mi)
+#define MSM_DUMP_MAJOR(val)		(val >> 20)
+#define MSM_DUMP_MINOR(val)		(val & 0xFFFFF)
+
+
+#define MAX_NUM_ENTRIES		0x120
+
+enum msm_dump_data_ids {
+	MSM_DUMP_DATA_CPU_CTX = 0x00,
+	MSM_DUMP_DATA_L1_INST_CACHE = 0x60,
+	MSM_DUMP_DATA_L1_DATA_CACHE = 0x80,
+	MSM_DUMP_DATA_ETM_REG = 0xA0,
+	MSM_DUMP_DATA_L2_CACHE = 0xC0,
+	MSM_DUMP_DATA_L3_CACHE = 0xD0,
+	MSM_DUMP_DATA_OCMEM = 0xE0,
+	MSM_DUMP_DATA_CNSS_WLAN = 0xE1,
+	MSM_DUMP_DATA_WIGIG = 0xE2,
+	MSM_DUMP_DATA_PMIC = 0xE4,
+	MSM_DUMP_DATA_DBGUI_REG = 0xE5,
+	MSM_DUMP_DATA_DCC_REG = 0xE6,
+	MSM_DUMP_DATA_DCC_SRAM = 0xE7,
+	MSM_DUMP_DATA_MISC = 0xE8,
+	MSM_DUMP_DATA_VSENSE = 0xE9,
+	MSM_DUMP_DATA_RPM = 0xEA,
+	MSM_DUMP_DATA_SCANDUMP = 0xEB,
+	MSM_DUMP_DATA_TMC_ETF = 0xF0,
+	MSM_DUMP_DATA_TMC_REG = 0x100,
+	MSM_DUMP_DATA_LOG_BUF = 0x110,
+	MSM_DUMP_DATA_LOG_BUF_FIRST_IDX = 0x111,
+	MSM_DUMP_DATA_MAX = MAX_NUM_ENTRIES,
+};
+
+enum msm_dump_table_ids {
+	MSM_DUMP_TABLE_APPS,
+	MSM_DUMP_TABLE_MAX = MAX_NUM_ENTRIES,
+};
+
+enum msm_dump_type {
+	MSM_DUMP_TYPE_DATA,
+	MSM_DUMP_TYPE_TABLE,
+};
+
+struct msm_dump_data {
+	uint32_t version;
+	uint32_t magic;
+	char name[32];
+	uint64_t addr;
+	uint64_t len;
+	uint32_t reserved;
+};
+
+struct msm_dump_entry {
+	uint32_t id;
+	char name[32];
+	uint32_t type;
+	uint64_t addr;
+};
+
+#ifdef CONFIG_QCOM_MEMORY_DUMP_V2
+extern int msm_dump_data_register(enum msm_dump_table_ids id,
+				  struct msm_dump_entry *entry);
+#else
+static inline int msm_dump_data_register(enum msm_dump_table_ids id,
+					 struct msm_dump_entry *entry)
+{
+	return -EINVAL;
+}
+#endif
+
+#endif
diff --git a/include/soc/qcom/msm_qmi_interface.h b/include/soc/qcom/msm_qmi_interface.h
new file mode 100644
index 0000000..349ca2f
--- /dev/null
+++ b/include/soc/qcom/msm_qmi_interface.h
@@ -0,0 +1,501 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_QMI_INTERFACE_H_
+#define _MSM_QMI_INTERFACE_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+#include <linux/qmi_encdec.h>
+#include <linux/workqueue.h>
+
+#define QMI_COMMON_TLV_TYPE 0
+
+enum qmi_event_type {
+	QMI_RECV_MSG = 1,
+	QMI_SERVER_ARRIVE,
+	QMI_SERVER_EXIT,
+};
+
+/**
+ * struct qmi_handle - QMI Handle Data Structure
+ * @handle_hash: Hash Table Node in which this handle is present.
+ * @src_port: Pointer to port used for message exchange.
+ * @ctl_port: Pointer to port used for out-of-band event exchange.
+ * @handle_type: Type of handle(Service/Client).
+ * @next_txn_id: Transaction ID of the next outgoing request.
+ * @handle_wq: Workqueue to handle any handle-specific events.
+ * @handle_lock: Lock to protect access to elements in the handle.
+ * @notify_lock: Lock to protect and generate notification atomically.
+ * @notify: Function to notify the handle owner of an event.
+ * @notify_priv: Private info to be passed during the notifcation.
+ * @handle_reset: Flag to hold the reset state of the handle.
+ * @reset_waitq: Wait queue to wait for any reset events.
+ * @ctl_work: Work to handle the out-of-band events for this handle.
+ * @dest_info: Destination to which this handle is connected to.
+ * @dest_service_id: service id of the service that client connected to.
+ * @txn_list: List of transactions waiting for the response.
+ * @ind_cb: Function to notify the handle owner of an indication message.
+ * @ind_cb_priv: Private info to be passed during an indication notification.
+ * @resume_tx_work: Work to resume the tx when the transport is not busy.
+ * @pending_txn_list: List of requests pending tx due to busy transport.
+ * @conn_list: List of connections handled by the service.
+ * @svc_ops_options: Service specific operations and options.
+ */
+struct qmi_handle {
+	struct hlist_node handle_hash;
+	void *src_port;
+	void *ctl_port;
+	unsigned int handle_type;
+	uint16_t next_txn_id;
+	struct workqueue_struct *handle_wq;
+	struct mutex handle_lock;
+	spinlock_t notify_lock;
+	void (*notify)(struct qmi_handle *handle, enum qmi_event_type event,
+			void *notify_priv);
+	void *notify_priv;
+	int handle_reset;
+	wait_queue_head_t reset_waitq;
+	struct delayed_work ctl_work;
+
+	/* Client specific elements */
+	void *dest_info;
+	uint32_t dest_service_id;
+	struct list_head txn_list;
+	void (*ind_cb)(struct qmi_handle *handle,
+			unsigned int msg_id, void *msg,
+			unsigned int msg_len, void *ind_cb_priv);
+	void *ind_cb_priv;
+	struct delayed_work resume_tx_work;
+	struct list_head pending_txn_list;
+
+	/* Service specific elements */
+	struct list_head conn_list;
+	struct qmi_svc_ops_options *svc_ops_options;
+};
+
+enum qmi_result_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	QMI_RESULT_SUCCESS_V01 = 0,
+	QMI_RESULT_FAILURE_V01 = 1,
+	QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+enum qmi_error_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	QMI_ERR_TYPE_MIN_ENUM_VAL_V01 = INT_MIN,
+	QMI_ERR_NONE_V01 = 0x0000,
+	QMI_ERR_MALFORMED_MSG_V01 = 0x0001,
+	QMI_ERR_NO_MEMORY_V01 = 0x0002,
+	QMI_ERR_INTERNAL_V01 = 0x0003,
+	QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 = 0x0005,
+	QMI_ERR_INVALID_ID_V01 = 0x0029,
+	QMI_ERR_ENCODING_V01 = 0x003A,
+	QMI_ERR_INCOMPATIBLE_STATE_V01 = 0x005A,
+	QMI_ERR_NOT_SUPPORTED_V01 = 0x005E,
+	QMI_ERR_TYPE_MAX_ENUM_VAL_V01 = INT_MAX,
+};
+
+struct qmi_response_type_v01 {
+	enum qmi_result_type_v01 result;
+	enum qmi_error_type_v01 error;
+};
+
+/**
+ * qmi_svc_ops_options - Operations and options to be specified when
+ *                       a service registers.
+ * @version: Version field to identify the ops_options structure.
+ * @service_id: Service ID of the service.
+ * @service_vers: Version to identify the client-service compatibility.
+ * @service_ins: Instance ID registered by the service.
+ * @connect_cb: Callback when a new client connects with the service.
+ * @disconnect_cb: Callback when the client exits the connection.
+ * @req_desc_cb: Callback to get request structure and its descriptor
+ *               for a message id.
+ * @req_cb: Callback to process the request.
+ */
+struct qmi_svc_ops_options {
+	unsigned int version;
+	uint32_t service_id;
+	uint32_t service_vers;
+	uint32_t service_ins;
+	int (*connect_cb)(struct qmi_handle *handle,
+			  void *conn_handle);
+	int (*disconnect_cb)(struct qmi_handle *handle,
+			     void *conn_handle);
+	int (*req_desc_cb)(unsigned int msg_id,
+			   struct msg_desc **req_desc);
+	int (*req_cb)(struct qmi_handle *handle,
+		      void *conn_handle,
+		      void *req_handle,
+		      unsigned int msg_id,
+		      void *req);
+};
+
+#ifdef CONFIG_MSM_QMI_INTERFACE
+
+/* Element info array describing common qmi response structure */
+extern struct elem_info qmi_response_type_v01_ei[];
+#define get_qmi_response_type_v01_ei() qmi_response_type_v01_ei
+
+/**
+ * qmi_handle_create() - Create a QMI handle
+ * @notify: Callback to notify events on the handle created.
+ * @notify_priv: Private information to be passed along with the notification.
+ *
+ * @return: Valid QMI handle on success, NULL on error.
+ */
+struct qmi_handle *qmi_handle_create(
+	void (*notify)(struct qmi_handle *handle,
+		       enum qmi_event_type event, void *notify_priv),
+	void *notify_priv);
+
+/**
+ * qmi_handle_destroy() - Destroy the QMI handle
+ * @handle: QMI handle to be destroyed.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_handle_destroy(struct qmi_handle *handle);
+
+/**
+ * qmi_register_ind_cb() - Register the indication callback function
+ * @handle: QMI handle with which the function is registered.
+ * @ind_cb: Callback function to be registered.
+ * @ind_cb_priv: Private data to be passed with the indication callback.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_register_ind_cb(struct qmi_handle *handle,
+	void (*ind_cb)(struct qmi_handle *handle,
+		       unsigned int msg_id, void *msg,
+		       unsigned int msg_len, void *ind_cb_priv),
+	void *ind_cb_priv);
+
+/**
+ * qmi_send_req_wait() - Send a synchronous QMI request
+ * @handle: QMI handle through which the QMI request is sent.
+ * @request_desc: Structure describing the request data structure.
+ * @req: Buffer containing the request data structure.
+ * @req_len: Length of the request data structure.
+ * @resp_desc: Structure describing the response data structure.
+ * @resp: Buffer to hold the response data structure.
+ * @resp_len: Length of the response data structure.
+ * @timeout_ms: Timeout before a response is received.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_req_wait(struct qmi_handle *handle,
+		      struct msg_desc *req_desc,
+		      void *req, unsigned int req_len,
+		      struct msg_desc *resp_desc,
+		      void *resp, unsigned int resp_len,
+		      unsigned long timeout_ms);
+
+/**
+ * qmi_send_req_nowait() - Send an asynchronous QMI request
+ * @handle: QMI handle through which the QMI request is sent.
+ * @request_desc: Structure describing the request data structure.
+ * @req: Buffer containing the request data structure.
+ * @req_len: Length of the request data structure.
+ * @resp_desc: Structure describing the response data structure.
+ * @resp: Buffer to hold the response data structure.
+ * @resp_len: Length of the response data structure.
+ * @resp_cb: Callback function to be invoked when the response arrives.
+ * @resp_cb_data: Private information to be passed along with the callback.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_req_nowait(struct qmi_handle *handle,
+			struct msg_desc *req_desc,
+			void *req, unsigned int req_len,
+			struct msg_desc *resp_desc,
+			void *resp, unsigned int resp_len,
+			void (*resp_cb)(struct qmi_handle *handle,
+					unsigned int msg_id, void *msg,
+					void *resp_cb_data,
+					int stat),
+			void *resp_cb_data);
+
+/**
+ * qmi_recv_msg() - Receive the QMI message
+ * @handle: Handle for which the QMI message has to be received.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_recv_msg(struct qmi_handle *handle);
+
+/**
+ * qmi_connect_to_service() - Connect the QMI handle with a QMI service
+ * @handle: QMI handle to be connected with the QMI service.
+ * @service_id: Service id to identify the QMI service.
+ * @service_vers: Version to identify the compatibility.
+ * @service_ins: Instance id to identify the instance of the QMI service.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_connect_to_service(struct qmi_handle *handle,
+			   uint32_t service_id,
+			   uint32_t service_vers,
+			   uint32_t service_ins);
+
+/**
+ * qmi_svc_event_notifier_register() - Register a notifier block to receive
+ *                                     events regarding a QMI service
+ * @service_id: Service ID to identify the QMI service.
+ * @service_vers: Version to identify the compatibility.
+ * @service_ins: Instance ID to identify the instance of the QMI service.
+ * @nb: Notifier block used to receive the event.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_event_notifier_register(uint32_t service_id,
+				    uint32_t service_vers,
+				    uint32_t service_ins,
+				    struct notifier_block *nb);
+
+/**
+ * qmi_svc_event_notifier_unregister() - Unregister service event
+ *                                       notifier block
+ * @service_id: Service ID to identify the QMI service.
+ * @service_vers: Version to identify the compatibility.
+ * @service_ins: Instance ID to identify the instance of the QMI service.
+ * @nb: Notifier block registered to receive the events.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_event_notifier_unregister(uint32_t service_id,
+				      uint32_t service_vers,
+				      uint32_t service_ins,
+				      struct notifier_block *nb);
+
+/**
+ * qmi_svc_register() - Register a QMI service with a QMI handle
+ * @handle: QMI handle on which the service has to be registered.
+ * @ops_options: Service specific operations and options.
+ *
+ * @return: 0 if successfully registered, < 0 on error.
+ */
+int qmi_svc_register(struct qmi_handle *handle,
+		     void *ops_options);
+
+/**
+ * qmi_send_resp() - Send response to a request
+ * @handle: QMI handle from which the response is sent.
+ * @clnt: Client to which the response is sent.
+ * @req_handle: Request for which the response is sent.
+ * @resp_desc: Descriptor explaining the response structure.
+ * @resp: Pointer to the response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_resp(struct qmi_handle *handle,
+		  void *conn_handle,
+		  void *req_handle,
+		  struct msg_desc *resp_desc,
+		  void *resp,
+		  unsigned int resp_len);
+
+/**
+ * qmi_send_resp_from_cb() - Send response to a request from request_cb
+ * @handle: QMI handle from which the response is sent.
+ * @clnt: Client to which the response is sent.
+ * @req_handle: Request for which the response is sent.
+ * @resp_desc: Descriptor explaining the response structure.
+ * @resp: Pointer to the response structure.
+ * @resp_len: Length of the response structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_resp_from_cb(struct qmi_handle *handle,
+			  void *conn_handle,
+			  void *req_handle,
+			  struct msg_desc *resp_desc,
+			  void *resp,
+			  unsigned int resp_len);
+
+/**
+ * qmi_send_ind() - Send unsolicited event/indication to a client
+ * @handle: QMI handle from which the indication is sent.
+ * @clnt: Client to which the indication is sent.
+ * @ind_desc: Descriptor explaining the indication structure.
+ * @ind: Pointer to the indication structure.
+ * @ind_len: Length of the indication structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_ind(struct qmi_handle *handle,
+		 void *conn_handle,
+		 struct msg_desc *ind_desc,
+		 void *ind,
+		 unsigned int ind_len);
+
+/**
+ * qmi_send_ind_from_cb() - Send indication to a client from registration_cb
+ * @handle: QMI handle from which the indication is sent.
+ * @clnt: Client to which the indication is sent.
+ * @ind_desc: Descriptor explaining the indication structure.
+ * @ind: Pointer to the indication structure.
+ * @ind_len: Length of the indication structure.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_send_ind_from_cb(struct qmi_handle *handle,
+			 void *conn_handle,
+			 struct msg_desc *ind_desc,
+			 void *ind,
+			 unsigned int ind_len);
+
+/**
+ * qmi_svc_unregister() - Unregister the service from a QMI handle
+ * @handle: QMI handle from which the service has to be unregistered.
+ *
+ * return: 0 on success, < 0 on error.
+ */
+int qmi_svc_unregister(struct qmi_handle *handle);
+
+#else
+
+#define get_qmi_response_type_v01_ei() NULL
+
+static inline struct qmi_handle *qmi_handle_create(
+	void (*notify)(struct qmi_handle *handle,
+		       enum qmi_event_type event, void *notify_priv),
+	void *notify_priv)
+{
+	return NULL;
+}
+
+static inline int qmi_handle_destroy(struct qmi_handle *handle)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_register_ind_cb(struct qmi_handle *handle,
+	void (*ind_cb)(struct qmi_handle *handle,
+		       unsigned int msg_id, void *msg,
+		       unsigned int msg_len, void *ind_cb_priv),
+	void *ind_cb_priv)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_req_wait(struct qmi_handle *handle,
+				    struct msg_desc *req_desc,
+				    void *req, unsigned int req_len,
+				    struct msg_desc *resp_desc,
+				    void *resp, unsigned int resp_len,
+				    unsigned long timeout_ms)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_req_nowait(struct qmi_handle *handle,
+				struct msg_desc *req_desc,
+				void *req, unsigned int req_len,
+				struct msg_desc *resp_desc,
+				void *resp, unsigned int resp_len,
+				void (*resp_cb)(struct qmi_handle *handle,
+						unsigned int msg_id, void *msg,
+						void *resp_cb_data),
+				void *resp_cb_data)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_recv_msg(struct qmi_handle *handle)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_connect_to_service(struct qmi_handle *handle,
+					 uint32_t service_id,
+					 uint32_t service_vers,
+					 uint32_t service_ins)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_svc_event_notifier_register(uint32_t service_id,
+						  uint32_t service_vers,
+						  uint32_t service_ins,
+						  struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_svc_event_notifier_unregister(uint32_t service_id,
+						    uint32_t service_vers,
+						    uint32_t service_ins,
+						    struct notifier_block *nb)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_svc_register(struct qmi_handle *handle,
+				   void *ops_options)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_resp(struct qmi_handle *handle,
+				void *conn_handle,
+				void *req_handle,
+				struct msg_desc *resp_desc,
+				void *resp,
+				unsigned int resp_len)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_resp_from_cb(struct qmi_handle *handle,
+					void *conn_handle,
+					void *req_handle,
+					struct msg_desc *resp_desc,
+					void *resp,
+					unsigned int resp_len)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_ind(struct qmi_handle *handle,
+			       void *conn_handle,
+			       struct msg_desc *ind_desc,
+			       void *ind,
+			       unsigned int ind_len)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_send_ind_from_cb(struct qmi_handle *handle,
+				       void *conn_handle,
+				       struct msg_desc *ind_desc,
+				       void *ind,
+				       unsigned int ind_len)
+{
+	return -ENODEV;
+}
+
+static inline int qmi_svc_unregister(struct qmi_handle *handle)
+{
+	return -ENODEV;
+}
+
+#endif
+
+#endif
diff --git a/include/soc/qcom/ramdump.h b/include/soc/qcom/ramdump.h
new file mode 100644
index 0000000..50a17c8
--- /dev/null
+++ b/include/soc/qcom/ramdump.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RAMDUMP_HEADER
+#define _RAMDUMP_HEADER
+
+struct device;
+
+struct ramdump_segment {
+	unsigned long address;
+	void *v_address;
+	unsigned long size;
+};
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+extern void *create_ramdump_device(const char *dev_name, struct device *parent);
+extern void destroy_ramdump_device(void *dev);
+extern int do_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments);
+extern int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments);
+
+#else
+static inline void *create_ramdump_device(const char *dev_name,
+		struct device *parent)
+{
+	return NULL;
+}
+
+static inline void destroy_ramdump_device(void *dev)
+{
+}
+
+static inline int do_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments)
+{
+	return -ENODEV;
+}
+
+static inline int do_elf_ramdump(void *handle, struct ramdump_segment *segments,
+		int nsegments)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff --git a/include/soc/qcom/restart.h b/include/soc/qcom/restart.h
new file mode 100644
index 0000000..bd0f139
--- /dev/null
+++ b/include/soc/qcom/restart.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ASM_ARCH_MSM_RESTART_H_
+#define _ASM_ARCH_MSM_RESTART_H_
+
+#define RESTART_NORMAL 0x0
+#define RESTART_DLOAD  0x1
+
+void msm_set_restart_mode(int mode);
+extern int pmic_reset_irq;
+
+#endif
+
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
new file mode 100644
index 0000000..bf16ce9
--- /dev/null
+++ b/include/soc/qcom/rpmh.h
@@ -0,0 +1,97 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __SOC_QCOM_RPMH_H__
+#define __SOC_QCOM_RPMH_H__
+
+#include <soc/qcom/tcs.h>
+
+struct rpmh_client;
+
+#ifdef CONFIG_QTI_RPMH_API
+int rpmh_write_single(struct rpmh_client *rc, enum rpmh_state state,
+			u32 addr, u32 data);
+
+int rpmh_write_single_async(struct rpmh_client *rc,
+			enum rpmh_state state, u32 addr, u32 data);
+
+int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
+			struct tcs_cmd *cmd, int n);
+
+int rpmh_write_async(struct rpmh_client *rc, enum rpmh_state state,
+			struct tcs_cmd *cmd, int n);
+
+int rpmh_write_passthru(struct rpmh_client *rc, enum rpmh_state state,
+			struct tcs_cmd *cmd, int *n);
+
+int rpmh_write_control(struct rpmh_client *rc, struct tcs_cmd *cmd, int n);
+
+int rpmh_invalidate(struct rpmh_client *rc);
+
+int rpmh_flush(struct rpmh_client *rc);
+
+int rpmh_read(struct rpmh_client *rc, u32 addr, u32 *resp);
+
+struct rpmh_client *rpmh_get_byname(struct platform_device *pdev,
+			const char *name);
+
+struct rpmh_client *rpmh_get_byindex(struct platform_device *pdev,
+			int index);
+
+void rpmh_release(struct rpmh_client *rc);
+#else
+static inline int rpmh_write_single(struct rpmh_client *rc,
+			enum rpmh_state state, u32 addr, u32 data)
+{ return -ENODEV; }
+
+static inline int rpmh_write_single_async(struct rpmh_client *rc,
+			enum rpmh_state state, u32 addr, u32 data)
+{ return -ENODEV; }
+
+static inline int rpmh_write(struct rpmh_client *rc, enum rpmh_state state,
+			struct tcs_cmd *cmd, int n)
+{ return -ENODEV; }
+
+static inline int rpmh_write_async(struct rpmh_client *rc,
+			enum rpmh_state state, struct tcs_cmd *cmd, int n)
+{ return -ENODEV; }
+
+static inline int rpmh_write_passthru(struct rpmh_client *rc,
+			enum rpmh_state state, struct tcs_cmd *cmd, int *n)
+{ return -ENODEV; }
+
+static inline int rpmh_write_control(struct rpmh_client *rc,
+			struct tcs_cmd *cmd, int n)
+{ return -ENODEV; }
+
+static inline int rpmh_invalidate(struct rpmh_client *rc)
+{ return -ENODEV; }
+
+static inline int rpmh_flush(struct rpmh_client *rc)
+{ return -ENODEV; }
+
+static inline int rpmh_read(struct rpmh_client *rc, u32 addr,
+			u32 *resp)
+{ return -ENODEV; }
+
+static inline struct rpmh_client *rpmh_get_byname(struct platform_device *pdev,
+			const char *name)
+{ return ERR_PTR(-ENODEV); }
+
+static inline struct rpmh_client *rpmh_get_byindex(struct platform_device *pdev,
+			int index)
+{ return ERR_PTR(-ENODEV); }
+
+static inline void rpmh_release(struct rpmh_client *rc) { }
+#endif /* CONFIG_QTI_RPMH_API */
+
+#endif /* __SOC_QCOM_RPMH_H__ */
diff --git a/include/soc/qcom/scm-boot.h b/include/soc/qcom/scm-boot.h
new file mode 100644
index 0000000..b986608
--- /dev/null
+++ b/include/soc/qcom/scm-boot.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2010, 2012, 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_SCM_BOOT_H
+#define __MACH_SCM_BOOT_H
+
+#define SCM_BOOT_ADDR			0x1
+#define SCM_FLAG_COLDBOOT_CPU1		0x01
+#define SCM_FLAG_COLDBOOT_CPU2		0x08
+#define SCM_FLAG_COLDBOOT_CPU3		0x20
+#define SCM_FLAG_WARMBOOT_CPU1		0x02
+#define SCM_FLAG_WARMBOOT_CPU0		0x04
+#define SCM_FLAG_WARMBOOT_CPU2		0x10
+#define SCM_FLAG_WARMBOOT_CPU3		0x40
+
+/* Multicluster Variants */
+#define SCM_BOOT_ADDR_MC		0x11
+#define SCM_FLAG_COLDBOOT_MC		0x02
+#define SCM_FLAG_WARMBOOT_MC		0x04
+
+#ifdef CONFIG_ARM64
+#define SCM_FLAG_HLOS			0x01
+#else
+#define SCM_FLAG_HLOS			0x0
+#endif
+
+#ifdef CONFIG_QCOM_SCM
+int scm_set_boot_addr(phys_addr_t addr, unsigned int flags);
+int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
+		u32 aff1, u32 aff2, u32 flags);
+int scm_set_warm_boot_addr_mc_for_all(phys_addr_t addr);
+int scm_is_mc_boot_available(void);
+#else
+static inline int scm_set_boot_addr(phys_addr_t addr, unsigned int flags)
+{
+	WARN_ONCE(1, "CONFIG_QCOM_SCM disabled, SCM call will fail silently\n");
+	return 0;
+}
+static inline int scm_set_boot_addr_mc(phys_addr_t addr, u32 aff0,
+		u32 aff1, u32 aff2, u32 flags)
+{
+	WARN_ONCE(1, "CONFIG_QCOM_SCM disabled, SCM call will fail silently\n");
+	return 0;
+}
+static inline int scm_set_warm_boot_addr_mc_for_all(phys_addr_t addr)
+{
+	WARN_ONCE(1, "CONFIG_QCOM_SCM disabled, SCM call will fail silently\n");
+	return 0;
+}
+static inline int scm_is_mc_boot_available(void)
+{
+	WARN_ONCE(1, "CONFIG_QCOM_SCM disabled, SCM call will fail silently\n");
+	return 0;
+}
+#endif
+
+#endif
diff --git a/include/soc/qcom/scm.h b/include/soc/qcom/scm.h
new file mode 100644
index 0000000..ac8b2eb
--- /dev/null
+++ b/include/soc/qcom/scm.h
@@ -0,0 +1,237 @@
+/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef __MACH_SCM_H
+#define __MACH_SCM_H
+
+#define SCM_SVC_BOOT			0x1
+#define SCM_SVC_PIL			0x2
+#define SCM_SVC_UTIL			0x3
+#define SCM_SVC_TZ			0x4
+#define SCM_SVC_IO			0x5
+#define SCM_SVC_INFO			0x6
+#define SCM_SVC_SSD			0x7
+#define SCM_SVC_FUSE			0x8
+#define SCM_SVC_PWR			0x9
+#define SCM_SVC_MP			0xC
+#define SCM_SVC_DCVS			0xD
+#define SCM_SVC_ES			0x10
+#define SCM_SVC_HDCP			0x11
+#define SCM_SVC_MDTP			0x12
+#define SCM_SVC_LMH			0x13
+#define SCM_SVC_SMMU_PROGRAM		0x15
+#define SCM_SVC_QDSS			0x16
+#define SCM_SVC_TZSCHEDULER		0xFC
+
+#define SCM_FUSE_READ			0x7
+#define SCM_CMD_HDCP			0x01
+
+/* SCM Features */
+#define SCM_SVC_SEC_CAMERA		0xD
+
+#define DEFINE_SCM_BUFFER(__n) \
+static char __n[PAGE_SIZE] __aligned(PAGE_SIZE);
+
+#define SCM_BUFFER_SIZE(__buf)	sizeof(__buf)
+
+#define SCM_BUFFER_PHYS(__buf)	virt_to_phys(__buf)
+
+#define SCM_SIP_FNID(s, c) (((((s) & 0xFF) << 8) | ((c) & 0xFF)) | 0x02000000)
+#define SCM_QSEEOS_FNID(s, c) (((((s) & 0xFF) << 8) | ((c) & 0xFF)) | \
+			      0x32000000)
+#define SCM_SVC_ID(s) (((s) & 0xFF00) >> 8)
+
+#define MAX_SCM_ARGS 10
+#define MAX_SCM_RETS 3
+
+enum scm_arg_types {
+	SCM_VAL,
+	SCM_RO,
+	SCM_RW,
+	SCM_BUFVAL,
+};
+
+#define SCM_ARGS_IMPL(num, a, b, c, d, e, f, g, h, i, j, ...) (\
+			(((a) & 0xff) << 4) | \
+			(((b) & 0xff) << 6) | \
+			(((c) & 0xff) << 8) | \
+			(((d) & 0xff) << 10) | \
+			(((e) & 0xff) << 12) | \
+			(((f) & 0xff) << 14) | \
+			(((g) & 0xff) << 16) | \
+			(((h) & 0xff) << 18) | \
+			(((i) & 0xff) << 20) | \
+			(((j) & 0xff) << 22) | \
+			(num & 0xffff))
+
+#define SCM_ARGS(...) SCM_ARGS_IMPL(__VA_ARGS__, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+
+/**
+ * struct scm_desc
+ * @arginfo: Metadata describing the arguments in args[]
+ * @args: The array of arguments for the secure syscall
+ * @ret: The values returned by the secure syscall
+ * @extra_arg_buf: The buffer containing extra arguments
+		   (that don't fit in available registers)
+ * @x5: The 4rd argument to the secure syscall or physical address of
+	extra_arg_buf
+ */
+struct scm_desc {
+	u32 arginfo;
+	u64 args[MAX_SCM_ARGS];
+	u64 ret[MAX_SCM_RETS];
+
+	/* private */
+	void *extra_arg_buf;
+	u64 x5;
+};
+
+#ifdef CONFIG_QCOM_SCM
+extern int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf, size_t cmd_len,
+		void *resp_buf, size_t resp_len);
+
+extern int scm_call2(u32 cmd_id, struct scm_desc *desc);
+
+extern int scm_call2_atomic(u32 cmd_id, struct scm_desc *desc);
+
+extern int scm_call_noalloc(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len,
+		void *scm_buf, size_t scm_buf_size);
+
+
+extern s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1);
+extern s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1);
+extern s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2);
+extern s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3);
+extern s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
+		u32 arg4, u32 *ret1, u32 *ret2);
+extern s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2, u32 arg3,
+		u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3);
+
+#define SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF))
+
+extern u32 scm_get_version(void);
+extern int scm_is_call_available(u32 svc_id, u32 cmd_id);
+extern int scm_get_feat_version(u32 feat);
+extern bool is_scm_armv8(void);
+extern int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret);
+extern u32 scm_io_read(phys_addr_t address);
+extern int scm_io_write(phys_addr_t address, u32 val);
+extern bool scm_is_secure_device(void);
+
+#define SCM_HDCP_MAX_REG 5
+
+struct scm_hdcp_req {
+	u32 addr;
+	u32 val;
+};
+
+extern struct mutex scm_lmh_lock;
+
+#else
+
+static inline int scm_call(u32 svc_id, u32 cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+	return 0;
+}
+
+static inline int scm_call2(u32 cmd_id, struct scm_desc *desc)
+{
+	return 0;
+}
+
+static inline int scm_call2_atomic(u32 cmd_id, struct scm_desc *desc)
+{
+	return 0;
+}
+
+static inline int scm_call_noalloc(u32 svc_id, u32 cmd_id,
+		const void *cmd_buf, size_t cmd_len, void *resp_buf,
+		size_t resp_len, void *scm_buf, size_t scm_buf_size)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic1(u32 svc, u32 cmd, u32 arg1)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic1_1(u32 svc, u32 cmd, u32 arg1, u32 *ret1)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic2(u32 svc, u32 cmd, u32 arg1, u32 arg2)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+		u32 arg3)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic4_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+		u32 arg3, u32 arg4, u32 *ret1, u32 *ret2)
+{
+	return 0;
+}
+
+static inline s32 scm_call_atomic5_3(u32 svc, u32 cmd, u32 arg1, u32 arg2,
+	u32 arg3, u32 arg4, u32 arg5, u32 *ret1, u32 *ret2, u32 *ret3)
+{
+	return 0;
+}
+
+static inline u32 scm_get_version(void)
+{
+	return 0;
+}
+
+static inline int scm_is_call_available(u32 svc_id, u32 cmd_id)
+{
+	return 0;
+}
+
+static inline int scm_get_feat_version(u32 feat)
+{
+	return 0;
+}
+
+static inline bool is_scm_armv8(void)
+{
+	return true;
+}
+
+static inline int scm_restore_sec_cfg(u32 device_id, u32 spare, int *scm_ret)
+{
+	return 0;
+}
+
+static inline u32 scm_io_read(phys_addr_t address)
+{
+	return 0;
+}
+
+static inline int scm_io_write(phys_addr_t address, u32 val)
+{
+	return 0;
+}
+
+inline bool scm_is_secure_device(void)
+{
+	return false;
+}
+#endif
+#endif
diff --git a/include/soc/qcom/secure_buffer.h b/include/soc/qcom/secure_buffer.h
new file mode 100644
index 0000000..7615776
--- /dev/null
+++ b/include/soc/qcom/secure_buffer.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __QCOM_SECURE_BUFFER_H__
+#define __QCOM_SECURE_BUFFER_H__
+
+#include <linux/scatterlist.h>
+
+/*
+ * if you add a secure VMID here make sure you update
+ * msm_secure_vmid_to_string.
+ * Make sure to keep the VMID_LAST as the last entry in the enum.
+ * This is needed in ion to create a list and it's sized using VMID_LAST.
+ */
+enum vmid {
+	VMID_HLOS = 0x3,
+	VMID_CP_TOUCH = 0x8,
+	VMID_CP_BITSTREAM = 0x9,
+	VMID_CP_PIXEL = 0xA,
+	VMID_CP_NON_PIXEL = 0xB,
+	VMID_CP_CAMERA = 0xD,
+	VMID_HLOS_FREE = 0xE,
+	VMID_MSS_MSA = 0xF,
+	VMID_MSS_NONMSA = 0x10,
+	VMID_CP_SEC_DISPLAY = 0x11,
+	VMID_CP_APP = 0x12,
+	VMID_WLAN = 0x18,
+	VMID_WLAN_CE = 0x19,
+	VMID_LAST,
+	VMID_INVAL = -1
+};
+
+#define PERM_READ                       0x4
+#define PERM_WRITE                      0x2
+#define PERM_EXEC			0x1
+
+#ifdef CONFIG_QCOM_SECURE_BUFFER
+int msm_secure_table(struct sg_table *table);
+int msm_unsecure_table(struct sg_table *table);
+int hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems);
+int hyp_assign_phys(phys_addr_t addr, u64 size,
+			u32 *source_vmlist, int source_nelems,
+			int *dest_vmids, int *dest_perms, int dest_nelems);
+bool msm_secure_v2_is_supported(void);
+const char *msm_secure_vmid_to_string(int secure_vmid);
+#else
+static inline int msm_secure_table(struct sg_table *table)
+{
+	return -EINVAL;
+}
+static inline int msm_unsecure_table(struct sg_table *table)
+{
+	return -EINVAL;
+}
+static inline int hyp_assign_table(struct sg_table *table,
+			u32 *source_vm_list, int source_nelems,
+			int *dest_vmids, int *dest_perms,
+			int dest_nelems)
+{
+	return -EINVAL;
+}
+
+static inline int hyp_assign_phys(phys_addr_t addr, u64 size,
+			u32 *source_vmlist, int source_nelems,
+			int *dest_vmids, int *dest_perms, int dest_nelems)
+{
+	return -EINVAL;
+}
+
+static inline bool msm_secure_v2_is_supported(void)
+{
+	return false;
+}
+static inline const char *msm_secure_vmid_to_string(int secure_vmid)
+{
+	return "N/A";
+}
+#endif
+#endif
diff --git a/include/soc/qcom/smem.h b/include/soc/qcom/smem.h
new file mode 100644
index 0000000..bef98d6
--- /dev/null
+++ b/include/soc/qcom/smem.h
@@ -0,0 +1,248 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SMEM_H_
+#define _ARCH_ARM_MACH_MSM_SMEM_H_
+
+#include <linux/types.h>
+
+enum {
+	SMEM_APPS,
+	SMEM_MODEM,
+	SMEM_Q6,
+	SMEM_DSPS,
+	SMEM_WCNSS,
+	SMEM_CDSP,
+	SMEM_RPM,
+	SMEM_TZ,
+	SMEM_SPSS,
+	SMEM_HYP,
+	NUM_SMEM_SUBSYSTEMS,
+};
+
+/*
+ * Flag options for the XXX_to_proc() API
+ *
+ * SMEM_ITEM_CACHED_FLAG - Indicates this operation should use cachable smem
+ *
+ * SMEM_ANY_HOST_FLAG - Indicates this operation should not apply to smem items
+ *                      which are limited to a specific host pairing.  Will
+ *                      cause this operation to ignore the to_proc parameter.
+ */
+#define SMEM_ITEM_CACHED_FLAG 1
+#define SMEM_ANY_HOST_FLAG 2
+
+#define SMEM_NUM_SMD_STREAM_CHANNELS        64
+
+/**
+ * OVERFLOW_ADD_UNSIGNED() - check for unsigned overflow
+ *
+ * @type: type to check for overflow
+ * @a: left value to use
+ * @b: right value to use
+ * @returns: true if a + b will result in overflow; false otherwise
+ */
+#define OVERFLOW_ADD_UNSIGNED(type, a, b) \
+	(((type)~0 - (a)) < (b) ? true : false)
+
+enum {
+	/* fixed items */
+	SMEM_PROC_COMM = 0,
+	SMEM_HEAP_INFO,
+	SMEM_ALLOCATION_TABLE,
+	SMEM_VERSION_INFO,
+	SMEM_HW_RESET_DETECT,
+	SMEM_AARM_WARM_BOOT,
+	SMEM_DIAG_ERR_MESSAGE,
+	SMEM_SPINLOCK_ARRAY,
+	SMEM_MEMORY_BARRIER_LOCATION,
+	SMEM_FIXED_ITEM_LAST = SMEM_MEMORY_BARRIER_LOCATION,
+
+	/* dynamic items */
+	SMEM_AARM_PARTITION_TABLE,
+	SMEM_AARM_BAD_BLOCK_TABLE,
+	SMEM_ERR_CRASH_LOG_ADSP,
+	SMEM_WM_UUID,
+	SMEM_CHANNEL_ALLOC_TBL,
+	SMEM_SMD_BASE_ID,
+	SMEM_SMEM_LOG_IDX = SMEM_SMD_BASE_ID + SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_SMEM_LOG_EVENTS,
+	SMEM_XBL_LOADER_CORE_INFO,
+	SMEM_SMEM_STATIC_LOG_EVENTS,
+	SMEM_CHARGER_BATTERY_INFO = SMEM_SMEM_STATIC_LOG_EVENTS,
+	SMEM_SMEM_SLOW_CLOCK_SYNC,
+	SMEM_SMEM_SLOW_CLOCK_VALUE,
+	SMEM_BIO_LED_BUF,
+	SMEM_SMSM_SHARED_STATE,
+	SMEM_SMSM_INT_INFO,
+	SMEM_SMSM_SLEEP_DELAY,
+	SMEM_SMSM_LIMIT_SLEEP,
+	SMEM_SLEEP_POWER_COLLAPSE_DISABLED,
+	SMEM_KEYPAD_KEYS_PRESSED,
+	SMEM_KEYPAD_STATE_UPDATED,
+	SMEM_KEYPAD_STATE_IDX,
+	SMEM_GPIO_INT,
+	SMEM_SMP2P_CDSP_BASE,
+	SMEM_SMD_PROFILES = SMEM_SMP2P_CDSP_BASE + 8,
+	SMEM_TSSC_BUSY,
+	SMEM_HS_SUSPEND_FILTER_INFO,
+	SMEM_BATT_INFO,
+	SMEM_APPS_BOOT_MODE,
+	SMEM_VERSION_FIRST,
+	SMEM_VERSION_SMD = SMEM_VERSION_FIRST,
+	SMEM_VERSION_LAST = SMEM_VERSION_FIRST + 24,
+	SMEM_OSS_RRCASN1_BUF1,
+	SMEM_OSS_RRCASN1_BUF2,
+	SMEM_ID_VENDOR0,
+	SMEM_ID_VENDOR1,
+	SMEM_ID_VENDOR2,
+	SMEM_HW_SW_BUILD_ID,
+	SMEM_SMD_BASE_ID_2,
+	SMEM_SMD_FIFO_BASE_ID_2 = SMEM_SMD_BASE_ID_2 +
+						SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_CHANNEL_ALLOC_TBL_2 = SMEM_SMD_FIFO_BASE_ID_2 +
+						SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_I2C_MUTEX = SMEM_CHANNEL_ALLOC_TBL_2 +
+						SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_SCLK_CONVERSION,
+	SMEM_SMD_SMSM_INTR_MUX,
+	SMEM_SMSM_CPU_INTR_MASK,
+	SMEM_APPS_DEM_SLAVE_DATA,
+	SMEM_QDSP6_DEM_SLAVE_DATA,
+	SMEM_VSENSE_DATA,
+	SMEM_CLKREGIM_SOURCES,
+	SMEM_SMD_FIFO_BASE_ID,
+	SMEM_USABLE_RAM_PARTITION_TABLE = SMEM_SMD_FIFO_BASE_ID +
+						SMEM_NUM_SMD_STREAM_CHANNELS,
+	SMEM_POWER_ON_STATUS_INFO,
+	SMEM_DAL_AREA,
+	SMEM_SMEM_LOG_POWER_IDX,
+	SMEM_SMEM_LOG_POWER_WRAP,
+	SMEM_SMEM_LOG_POWER_EVENTS,
+	SMEM_ERR_CRASH_LOG,
+	SMEM_ERR_F3_TRACE_LOG,
+	SMEM_SMD_BRIDGE_ALLOC_TABLE,
+	SMEM_SMDLITE_TABLE,
+	SMEM_SD_IMG_UPGRADE_STATUS,
+	SMEM_SEFS_INFO,
+	SMEM_RESET_LOG,
+	SMEM_RESET_LOG_SYMBOLS,
+	SMEM_MODEM_SW_BUILD_ID,
+	SMEM_SMEM_LOG_MPROC_WRAP,
+	SMEM_BOOT_INFO_FOR_APPS,
+	SMEM_SMSM_SIZE_INFO,
+	SMEM_SMD_LOOPBACK_REGISTER,
+	SMEM_SSR_REASON_MSS0,
+	SMEM_SSR_REASON_WCNSS0,
+	SMEM_SSR_REASON_LPASS0,
+	SMEM_SSR_REASON_DSPS0,
+	SMEM_SSR_REASON_VCODEC0,
+	SMEM_SMP2P_APPS_BASE = 427,
+	SMEM_SMP2P_MODEM_BASE = SMEM_SMP2P_APPS_BASE + 8,    /* 435 */
+	SMEM_SMP2P_AUDIO_BASE = SMEM_SMP2P_MODEM_BASE + 8,   /* 443 */
+	SMEM_SMP2P_WIRLESS_BASE = SMEM_SMP2P_AUDIO_BASE + 8, /* 451 */
+	SMEM_SMP2P_POWER_BASE = SMEM_SMP2P_WIRLESS_BASE + 8, /* 459 */
+	SMEM_FLASH_DEVICE_INFO = SMEM_SMP2P_POWER_BASE + 8,  /* 467 */
+	SMEM_BAM_PIPE_MEMORY,     /* 468 */
+	SMEM_IMAGE_VERSION_TABLE, /* 469 */
+	SMEM_LC_DEBUGGER, /* 470 */
+	SMEM_FLASH_NAND_DEV_INFO, /* 471 */
+	SMEM_A2_BAM_DESCRIPTOR_FIFO, /* 472 */
+	SMEM_CPR_CONFIG, /* 473 */
+	SMEM_CLOCK_INFO, /* 474 */
+	SMEM_IPC_FIFO, /* 475 */
+	SMEM_RF_EEPROM_DATA, /* 476 */
+	SMEM_COEX_MDM_WCN, /* 477 */
+	SMEM_GLINK_NATIVE_XPRT_DESCRIPTOR, /* 478 */
+	SMEM_GLINK_NATIVE_XPRT_FIFO_0, /* 479 */
+	SMEM_GLINK_NATIVE_XPRT_FIFO_1, /* 480 */
+	SMEM_SMP2P_SENSOR_BASE, /* 481 */
+	SMEM_SMP2P_TZ_BASE = SMEM_SMP2P_SENSOR_BASE + 8, /* 489 */
+	SMEM_IPA_FILTER_TABLE = SMEM_SMP2P_TZ_BASE + 8, /* 497 */
+	SMEM_NUM_ITEMS, /* 498 */
+};
+
+#ifdef CONFIG_MSM_SMEM
+void *smem_alloc(unsigned int id, unsigned int size_in, unsigned int to_proc,
+						unsigned int flags);
+void *smem_find(unsigned int id, unsigned int size_in, unsigned int to_proc,
+						unsigned int flags);
+void *smem_get_entry(unsigned int id, unsigned int *size, unsigned int to_proc,
+						unsigned int flags);
+
+/**
+ * smem_get_entry_no_rlock - Get existing item without using remote spinlock
+ *
+ * @id:       ID of SMEM item
+ * @size_out: Pointer to size variable for storing the result
+ * @to_proc:  SMEM host that shares the item with apps
+ * @flags:    Item attribute flags
+ * @returns:  Pointer to SMEM item or NULL if it doesn't exist
+ *
+ * This function does not lock the remote spinlock and should only be used in
+ * failure-recover cases such as retrieving the subsystem failure reason during
+ * subsystem restart.
+ */
+void *smem_get_entry_no_rlock(unsigned int id, unsigned int *size_out, unsigned int to_proc,
+							unsigned int flags);
+
+/**
+ * smem_virt_to_phys() - Convert SMEM address to physical address.
+ *
+ * @smem_address: Virtual address returned by smem_alloc()
+ * @returns: Physical address (or NULL if there is a failure)
+ *
+ * This function should only be used if an SMEM item needs to be handed
+ * off to a DMA engine.
+ */
+phys_addr_t smem_virt_to_phys(void *smem_address);
+
+/**
+ * SMEM initialization function that registers for a SMEM platform driver.
+ *
+ * @returns: success on successful driver registration.
+ */
+int __init msm_smem_init(void);
+
+#else
+static inline void *smem_alloc(unsigned int id, unsigned int size_in,
+				unsigned int to_proc, unsigned int flags)
+{
+	return NULL;
+}
+static inline void *smem_find(unsigned int id, unsigned int size_in,
+				unsigned int to_proc, unsigned int flags)
+{
+	return NULL;
+}
+static inline void *smem_get_entry(unsigned int id, unsigned int *size,
+				unsigned int to_proc, unsigned int flags)
+{
+	return NULL;
+}
+static inline void *smem_get_entry_no_rlock(unsigned int id,
+						unsigned int *size_out,
+						unsigned int to_proc,
+					       unsigned int flags)
+{
+	return NULL;
+}
+static inline phys_addr_t smem_virt_to_phys(void *smem_address)
+{
+	return (phys_addr_t) NULL;
+}
+static inline int __init msm_smem_init(void)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_SMEM  */
+#endif /* _ARCH_ARM_MACH_MSM_SMEM_H_ */
diff --git a/include/soc/qcom/socinfo.h b/include/soc/qcom/socinfo.h
new file mode 100644
index 0000000..20410f5
--- /dev/null
+++ b/include/soc/qcom/socinfo.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2009-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ARCH_ARM_MACH_MSM_SOCINFO_H_
+#define _ARCH_ARM_MACH_MSM_SOCINFO_H_
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/of_fdt.h>
+#include <linux/of.h>
+
+#include <asm/cputype.h>
+/*
+ * SOC version type with major number in the upper 16 bits and minor
+ * number in the lower 16 bits.  For example:
+ *   1.0 -> 0x00010000
+ *   2.3 -> 0x00020003
+ */
+#define SOCINFO_VERSION_MAJOR(ver) (((ver) & 0xffff0000) >> 16)
+#define SOCINFO_VERSION_MINOR(ver) ((ver) & 0x0000ffff)
+#define SOCINFO_VERSION(maj, min)  ((((maj) & 0xffff) << 16)|((min) & 0xffff))
+
+#ifdef CONFIG_OF
+#define of_board_is_cdp()	of_machine_is_compatible("qcom,cdp")
+#define of_board_is_sim()	of_machine_is_compatible("qcom,sim")
+#define of_board_is_rumi()	of_machine_is_compatible("qcom,rumi")
+#define of_board_is_fluid()	of_machine_is_compatible("qcom,fluid")
+#define of_board_is_liquid()	of_machine_is_compatible("qcom,liquid")
+#define of_board_is_dragonboard()	\
+	of_machine_is_compatible("qcom,dragonboard")
+#define of_board_is_cdp()	of_machine_is_compatible("qcom,cdp")
+#define of_board_is_mtp()	of_machine_is_compatible("qcom,mtp")
+#define of_board_is_qrd()	of_machine_is_compatible("qcom,qrd")
+#define of_board_is_xpm()	of_machine_is_compatible("qcom,xpm")
+#define of_board_is_skuf()	of_machine_is_compatible("qcom,skuf")
+#define of_board_is_sbc()	of_machine_is_compatible("qcom,sbc")
+
+#define machine_is_msm8974()	of_machine_is_compatible("qcom,msm8974")
+#define machine_is_msm9625()	of_machine_is_compatible("qcom,msm9625")
+#define machine_is_msm8610()	of_machine_is_compatible("qcom,msm8610")
+#define machine_is_msm8226()	of_machine_is_compatible("qcom,msm8226")
+#define machine_is_apq8074()	of_machine_is_compatible("qcom,apq8074")
+#define machine_is_msm8926()	of_machine_is_compatible("qcom,msm8926")
+
+#define early_machine_is_msm8610()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8610")
+#define early_machine_is_msm8909()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8909")
+#define early_machine_is_msm8916()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8916")
+#define early_machine_is_msm8936()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8936")
+#define early_machine_is_msm8939()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8939")
+#define early_machine_is_apq8084()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apq8084")
+#define early_machine_is_mdm9630()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,mdm9630")
+#define early_machine_is_msmzirc()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmzirc")
+#define early_machine_is_fsm9900()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9900")
+#define early_machine_is_msm8994()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8994")
+#define early_machine_is_msm8992()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8992")
+#define early_machine_is_fsm9010()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,fsm9010")
+#define early_machine_is_msm8976()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8976")
+#define early_machine_is_msmtellurium()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmtellurium")
+#define early_machine_is_msm8996()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8996")
+#define early_machine_is_msm8996_auto()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8996-cdp")
+#define early_machine_is_msm8929()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msm8929")
+#define early_machine_is_msmcobalt()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmcobalt")
+#define early_machine_is_apqcobalt()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,apqcobalt")
+#define early_machine_is_msmhamster()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmhamster")
+#define early_machine_is_msmfalcon()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmfalcon")
+#define early_machine_is_msmskunk()	\
+	of_flat_dt_is_compatible(of_get_flat_dt_root(), "qcom,msmskunk")
+#else
+#define of_board_is_sim()		0
+#define of_board_is_rumi()		0
+#define of_board_is_fluid()		0
+#define of_board_is_liquid()		0
+#define of_board_is_dragonboard()	0
+#define of_board_is_cdp()		0
+#define of_board_is_mtp()		0
+#define of_board_is_qrd()		0
+#define of_board_is_xpm()		0
+#define of_board_is_skuf()		0
+#define of_board_is_sbc()		0
+
+#define machine_is_msm8974()		0
+#define machine_is_msm9625()		0
+#define machine_is_msm8610()		0
+#define machine_is_msm8226()		0
+#define machine_is_apq8074()		0
+#define machine_is_msm8926()		0
+
+#define early_machine_is_msm8610()	0
+#define early_machine_is_msm8909()	0
+#define early_machine_is_msm8916()	0
+#define early_machine_is_msm8936()	0
+#define early_machine_is_msm8939()	0
+#define early_machine_is_apq8084()	0
+#define early_machine_is_mdm9630()	0
+#define early_machine_is_fsm9900()	0
+#define early_machine_is_fsm9010()	0
+#define early_machine_is_msmtellurium()	0
+#define early_machine_is_msm8996()	0
+#define early_machine_is_msm8976() 0
+#define early_machine_is_msm8929()	0
+#define early_machine_is_msmcobalt()	0
+#define early_machine_is_apqcobalt()	0
+#define early_machine_is_msmhamster()	0
+#define early_machine_is_msmfalcon()	0
+#define early_machine_is_msmskunk()	0
+#endif
+
+#define PLATFORM_SUBTYPE_MDM	1
+#define PLATFORM_SUBTYPE_INTERPOSERV3 2
+#define PLATFORM_SUBTYPE_SGLTE	6
+
+enum msm_cpu {
+	MSM_CPU_UNKNOWN = 0,
+	MSM_CPU_7X01,
+	MSM_CPU_7X25,
+	MSM_CPU_7X27,
+	MSM_CPU_8X50,
+	MSM_CPU_8X50A,
+	MSM_CPU_7X30,
+	MSM_CPU_8X55,
+	MSM_CPU_8X60,
+	MSM_CPU_8960,
+	MSM_CPU_8960AB,
+	MSM_CPU_7X27A,
+	FSM_CPU_9XXX,
+	MSM_CPU_7X25A,
+	MSM_CPU_7X25AA,
+	MSM_CPU_7X25AB,
+	MSM_CPU_8064,
+	MSM_CPU_8064AB,
+	MSM_CPU_8064AA,
+	MSM_CPU_8930,
+	MSM_CPU_8930AA,
+	MSM_CPU_8930AB,
+	MSM_CPU_7X27AA,
+	MSM_CPU_9615,
+	MSM_CPU_8974,
+	MSM_CPU_8974PRO_AA,
+	MSM_CPU_8974PRO_AB,
+	MSM_CPU_8974PRO_AC,
+	MSM_CPU_8627,
+	MSM_CPU_8625,
+	MSM_CPU_9625,
+	MSM_CPU_8909,
+	MSM_CPU_8916,
+	MSM_CPU_8936,
+	MSM_CPU_8939,
+	MSM_CPU_8226,
+	MSM_CPU_8610,
+	MSM_CPU_8625Q,
+	MSM_CPU_8084,
+	MSM_CPU_9630,
+	FSM_CPU_9900,
+	MSM_CPU_ZIRC,
+	MSM_CPU_8994,
+	MSM_CPU_8992,
+	FSM_CPU_9010,
+	MSM_CPU_TELLURIUM,
+	MSM_CPU_8996,
+	MSM_CPU_8976,
+	MSM_CPU_8929,
+	MSM_CPU_COBALT,
+	MSM_CPU_HAMSTER,
+	MSM_CPU_FALCON,
+	MSM_CPU_SKUNK,
+
+};
+
+struct msm_soc_info {
+	enum msm_cpu generic_soc_type;
+	char *soc_id_string;
+};
+
+enum pmic_model {
+	PMIC_MODEL_PM8058	= 13,
+	PMIC_MODEL_PM8028	= 14,
+	PMIC_MODEL_PM8901	= 15,
+	PMIC_MODEL_PM8027	= 16,
+	PMIC_MODEL_ISL_9519	= 17,
+	PMIC_MODEL_PM8921	= 18,
+	PMIC_MODEL_PM8018	= 19,
+	PMIC_MODEL_PM8015	= 20,
+	PMIC_MODEL_PM8014	= 21,
+	PMIC_MODEL_PM8821	= 22,
+	PMIC_MODEL_PM8038	= 23,
+	PMIC_MODEL_PM8922	= 24,
+	PMIC_MODEL_PM8917	= 25,
+	PMIC_MODEL_UNKNOWN	= 0xFFFFFFFF
+};
+
+enum msm_cpu socinfo_get_msm_cpu(void);
+uint32_t socinfo_get_id(void);
+uint32_t socinfo_get_version(void);
+uint32_t socinfo_get_raw_id(void);
+char *socinfo_get_build_id(void);
+uint32_t socinfo_get_platform_type(void);
+uint32_t socinfo_get_platform_subtype(void);
+uint32_t socinfo_get_platform_version(void);
+uint32_t socinfo_get_serial_number(void);
+enum pmic_model socinfo_get_pmic_model(void);
+uint32_t socinfo_get_pmic_die_revision(void);
+int __init socinfo_init(void) __must_check;
+
+#endif
diff --git a/include/soc/qcom/subsystem_notif.h b/include/soc/qcom/subsystem_notif.h
new file mode 100644
index 0000000..db421ca
--- /dev/null
+++ b/include/soc/qcom/subsystem_notif.h
@@ -0,0 +1,87 @@
+/* Copyright (c) 2011, 2013 - 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Subsystem restart notifier API header
+ *
+ */
+
+#ifndef _SUBSYS_NOTIFIER_H
+#define _SUBSYS_NOTIFIER_H
+
+#include <linux/notifier.h>
+
+enum subsys_notif_type {
+	SUBSYS_BEFORE_SHUTDOWN,
+	SUBSYS_AFTER_SHUTDOWN,
+	SUBSYS_BEFORE_POWERUP,
+	SUBSYS_AFTER_POWERUP,
+	SUBSYS_RAMDUMP_NOTIFICATION,
+	SUBSYS_POWERUP_FAILURE,
+	SUBSYS_PROXY_VOTE,
+	SUBSYS_PROXY_UNVOTE,
+	SUBSYS_SOC_RESET,
+	SUBSYS_NOTIF_TYPE_COUNT
+};
+
+#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
+/* Use the subsys_notif_register_notifier API to register for notifications for
+ * a particular subsystem. This API will return a handle that can be used to
+ * un-reg for notifications using the subsys_notif_unregister_notifier API by
+ * passing in that handle as an argument.
+ *
+ * On receiving a notification, the second (unsigned long) argument of the
+ * notifier callback will contain the notification type, and the third (void *)
+ * argument will contain the handle that was returned by
+ * subsys_notif_register_notifier.
+ */
+void *subsys_notif_register_notifier(
+			const char *subsys_name, struct notifier_block *nb);
+int subsys_notif_unregister_notifier(void *subsys_handle,
+				struct notifier_block *nb);
+
+/* Use the subsys_notif_init_subsys API to initialize the notifier chains form
+ * a particular subsystem. This API will return a handle that can be used to
+ * queue notifications using the subsys_notif_queue_notification API by passing
+ * in that handle as an argument.
+ */
+void *subsys_notif_add_subsys(const char *);
+int subsys_notif_queue_notification(void *subsys_handle,
+					enum subsys_notif_type notif_type,
+					void *data);
+#else
+
+static inline void *subsys_notif_register_notifier(
+			const char *subsys_name, struct notifier_block *nb)
+{
+	return NULL;
+}
+
+static inline int subsys_notif_unregister_notifier(void *subsys_handle,
+					struct notifier_block *nb)
+{
+	return 0;
+}
+
+static inline void *subsys_notif_add_subsys(const char *subsys_name)
+{
+	return NULL;
+}
+
+static inline int subsys_notif_queue_notification(void *subsys_handle,
+					enum subsys_notif_type notif_type,
+					void *data)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff --git a/include/soc/qcom/subsystem_restart.h b/include/soc/qcom/subsystem_restart.h
new file mode 100644
index 0000000..a32de45
--- /dev/null
+++ b/include/soc/qcom/subsystem_restart.h
@@ -0,0 +1,191 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SUBSYS_RESTART_H
+#define __SUBSYS_RESTART_H
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+struct subsys_device;
+
+enum {
+	RESET_SOC = 0,
+	RESET_SUBSYS_COUPLED,
+	RESET_LEVEL_MAX
+};
+
+struct device;
+struct module;
+
+/**
+ * struct subsys_desc - subsystem descriptor
+ * @name: name of subsystem
+ * @fw_name: firmware name
+ * @depends_on: subsystem this subsystem depends on to operate
+ * @dev: parent device
+ * @owner: module the descriptor belongs to
+ * @shutdown: Stop a subsystem
+ * @powerup: Start a subsystem
+ * @crash_shutdown: Shutdown a subsystem when the system crashes (can't sleep)
+ * @ramdump: Collect a ramdump of the subsystem
+ * @free_memory: Free the memory associated with this subsystem
+ * @is_not_loadable: Indicate if subsystem firmware is not loadable via pil
+ * framework
+ * @no_auth: Set if subsystem does not rely on PIL to authenticate and bring
+ * it out of reset
+ * @ssctl_instance_id: Instance id used to connect with SSCTL service
+ * @sysmon_pid:	pdev id that sysmon is probed with for the subsystem
+ * @sysmon_shutdown_ret: Return value for the call to sysmon_send_shutdown
+ * @system_debug: If "set", triggers a device restart when the
+ * subsystem's wdog bite handler is invoked.
+ * @edge: GLINK logical name of the subsystem
+ */
+struct subsys_desc {
+	const char *name;
+	char fw_name[256];
+	const char *depends_on;
+	struct device *dev;
+	struct module *owner;
+
+	int (*shutdown)(const struct subsys_desc *desc, bool force_stop);
+	int (*powerup)(const struct subsys_desc *desc);
+	void (*crash_shutdown)(const struct subsys_desc *desc);
+	int (*ramdump)(int, const struct subsys_desc *desc);
+	void (*free_memory)(const struct subsys_desc *desc);
+	irqreturn_t (*err_fatal_handler)(int irq, void *dev_id);
+	irqreturn_t (*stop_ack_handler)(int irq, void *dev_id);
+	irqreturn_t (*wdog_bite_handler)(int irq, void *dev_id);
+	irqreturn_t (*generic_handler)(int irq, void *dev_id);
+	int is_not_loadable;
+	int err_fatal_gpio;
+	unsigned int err_fatal_irq;
+	unsigned int err_ready_irq;
+	unsigned int stop_ack_irq;
+	unsigned int wdog_bite_irq;
+	unsigned int generic_irq;
+	int force_stop_gpio;
+	int ramdump_disable_gpio;
+	int shutdown_ack_gpio;
+	int ramdump_disable;
+	bool no_auth;
+	bool pil_mss_memsetup;
+	int ssctl_instance_id;
+	u32 sysmon_pid;
+	int sysmon_shutdown_ret;
+	bool system_debug;
+	const char *edge;
+};
+
+/**
+ * struct notif_data - additional notif information
+ * @crashed: indicates if subsystem has crashed
+ * @enable_ramdump: ramdumps disabled if set to 0
+ * @enable_mini_ramdumps: enable flag for minimized critical-memory-only
+ * ramdumps
+ * @no_auth: set if subsystem does not use PIL to bring it out of reset
+ * @pdev: subsystem platform device pointer
+ */
+struct notif_data {
+	bool crashed;
+	int enable_ramdump;
+	int enable_mini_ramdumps;
+	bool no_auth;
+	struct platform_device *pdev;
+};
+
+#if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
+
+extern int subsys_get_restart_level(struct subsys_device *dev);
+extern int subsystem_restart_dev(struct subsys_device *dev);
+extern int subsystem_restart(const char *name);
+extern int subsystem_crashed(const char *name);
+
+extern void *subsystem_get(const char *name);
+extern void *subsystem_get_with_fwname(const char *name, const char *fw_name);
+extern int subsystem_set_fwname(const char *name, const char *fw_name);
+extern void subsystem_put(void *subsystem);
+
+extern struct subsys_device *subsys_register(struct subsys_desc *desc);
+extern void subsys_unregister(struct subsys_device *dev);
+
+extern void subsys_default_online(struct subsys_device *dev);
+extern void subsys_set_crash_status(struct subsys_device *dev, bool crashed);
+extern bool subsys_get_crash_status(struct subsys_device *dev);
+void notify_proxy_vote(struct device *device);
+void notify_proxy_unvote(struct device *device);
+void complete_err_ready(struct subsys_device *subsys);
+extern int wait_for_shutdown_ack(struct subsys_desc *desc);
+#else
+
+static inline int subsys_get_restart_level(struct subsys_device *dev)
+{
+	return 0;
+}
+
+static inline int subsystem_restart_dev(struct subsys_device *dev)
+{
+	return 0;
+}
+
+static inline int subsystem_restart(const char *name)
+{
+	return 0;
+}
+
+static inline int subsystem_crashed(const char *name)
+{
+	return 0;
+}
+
+static inline void *subsystem_get(const char *name)
+{
+	return NULL;
+}
+
+static inline void *subsystem_get_with_fwname(const char *name,
+				const char *fw_name) {
+	return NULL;
+}
+
+static inline int subsystem_set_fwname(const char *name,
+				const char *fw_name) {
+	return 0;
+}
+
+static inline void subsystem_put(void *subsystem) { }
+
+static inline
+struct subsys_device *subsys_register(struct subsys_desc *desc)
+{
+	return NULL;
+}
+
+static inline void subsys_unregister(struct subsys_device *dev) { }
+
+static inline void subsys_default_online(struct subsys_device *dev) { }
+static inline
+void subsys_set_crash_status(struct subsys_device *dev, bool crashed) { }
+static inline bool subsys_get_crash_status(struct subsys_device *dev)
+{
+	return false;
+}
+static inline void notify_proxy_vote(struct device *device) { }
+static inline void notify_proxy_unvote(struct device *device) { }
+static inline int wait_for_shutdown_ack(struct subsys_desc *desc)
+{
+	return -EOPNOTSUPP;
+}
+#endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
+
+#endif
diff --git a/include/soc/qcom/sysmon.h b/include/soc/qcom/sysmon.h
new file mode 100644
index 0000000..56860db
--- /dev/null
+++ b/include/soc/qcom/sysmon.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MSM_SYSMON_H
+#define __MSM_SYSMON_H
+
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+/**
+ * enum ssctl_ssr_event_enum_type - Subsystem notification type.
+ */
+enum ssctl_ssr_event_enum_type {
+	SSCTL_SSR_EVENT_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+	SSCTL_SSR_EVENT_BEFORE_POWERUP = 0,
+	SSCTL_SSR_EVENT_AFTER_POWERUP = 1,
+	SSCTL_SSR_EVENT_BEFORE_SHUTDOWN = 2,
+	SSCTL_SSR_EVENT_AFTER_SHUTDOWN = 3,
+	SSCTL_SSR_EVENT_ENUM_TYPE_MAX_ENUM_VAL = 2147483647
+};
+
+/**
+ * enum ssctl_ssr_event_driven_enum_type - Subsystem shutdown type.
+ */
+enum ssctl_ssr_event_driven_enum_type {
+	SSCTL_SSR_EVENT_DRIVEN_ENUM_TYPE_MIN_ENUM_VAL = -2147483647,
+	SSCTL_SSR_EVENT_FORCED = 0,
+	SSCTL_SSR_EVENT_GRACEFUL = 1,
+	SSCTL_SSR_EVENT_DRIVEN_ENUM_TYPE_MAX_ENUM_VAL = 2147483647
+};
+
+#if defined(CONFIG_MSM_SYSMON_COMM) || defined(CONFIG_MSM_SYSMON_GLINK_COMM)
+extern int sysmon_send_event(struct subsys_desc *dest_desc,
+			struct subsys_desc *event_desc,
+			enum subsys_notif_type notif);
+extern int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+				struct subsys_desc *event_desc,
+				enum subsys_notif_type notif);
+extern int sysmon_get_reason(struct subsys_desc *dest_desc, char *buf,
+				size_t len);
+extern int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+				char *buf, size_t len);
+extern int sysmon_send_shutdown(struct subsys_desc *dest_desc);
+extern int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc);
+extern int sysmon_notifier_register(struct subsys_desc *desc);
+extern void sysmon_notifier_unregister(struct subsys_desc *desc);
+#else
+static inline int sysmon_send_event(struct subsys_desc *dest_desc,
+					struct subsys_desc *event_desc,
+					enum subsys_notif_type notif)
+{
+	return 0;
+}
+static inline int sysmon_send_event_no_qmi(struct subsys_desc *dest_desc,
+						struct subsys_desc *event_desc,
+						enum subsys_notif_type notif)
+{
+	return 0;
+}
+static inline int sysmon_get_reason(struct subsys_desc *dest_desc,
+					char *buf, size_t len)
+{
+	return 0;
+}
+static inline int sysmon_get_reason_no_qmi(struct subsys_desc *dest_desc,
+						char *buf, size_t len)
+{
+	return 0;
+}
+static inline int sysmon_send_shutdown(struct subsys_desc *dest_desc)
+{
+	return 0;
+}
+static inline int sysmon_send_shutdown_no_qmi(struct subsys_desc *dest_desc)
+{
+	return 0;
+}
+static inline int sysmon_notifier_register(struct subsys_desc *desc)
+{
+	return 0;
+}
+static inline void sysmon_notifier_unregister(struct subsys_desc *desc)
+{
+}
+#endif
+
+#if defined(CONFIG_MSM_SYSMON_GLINK_COMM)
+extern int sysmon_glink_register(struct subsys_desc *desc);
+extern void sysmon_glink_unregister(struct subsys_desc *desc);
+#else
+static inline int sysmon_glink_register(struct subsys_desc *desc)
+{
+	return 0;
+}
+static inline void sysmon_glink_unregister(struct subsys_desc *desc)
+{
+}
+#endif
+#endif
diff --git a/include/soc/qcom/system_health_monitor.h b/include/soc/qcom/system_health_monitor.h
new file mode 100644
index 0000000..5ce027c
--- /dev/null
+++ b/include/soc/qcom/system_health_monitor.h
@@ -0,0 +1,34 @@
+/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SYSTEM_HEALTH_MONITOR_H
+#define SYSTEM_HEALTH_MONITOR_H
+
+#ifdef CONFIG_SYSTEM_HEALTH_MONITOR
+/**
+ * kern_check_system_health() - Check the system health
+ *
+ * This function is used by the kernel drivers to initiate the
+ * system health check. This function in turn trigger SHM to send
+ * QMI message to all the HMAs connected to it.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int kern_check_system_health(void);
+#else
+static inline int kern_check_system_health(void)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_SYSTEM_HEALTH_MONITOR */
+
+#endif /* SYSTEM_HEALTH_MONITOR_H */
diff --git a/include/soc/qcom/system_pm.h b/include/soc/qcom/system_pm.h
new file mode 100644
index 0000000..0be089b
--- /dev/null
+++ b/include/soc/qcom/system_pm.h
@@ -0,0 +1,28 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __SOC_QCOM_SYS_PM_H__
+#define __SOC_QCOM_SYS_PM_H__
+
+#ifdef CONFIG_QTI_SYSTEM_PM
+int system_sleep_enter(uint64_t sleep_val);
+
+void system_sleep_exit(void);
+#else
+static inline int system_sleep_enter(uint64_t sleep_val)
+{ return -ENODEV; }
+
+static inline void system_sleep_exit(void)
+{ }
+#endif /* CONFIG_QTI_SYSTEM_PM */
+
+#endif /* __SOC_QCOM_SYS_PM_H__ */
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
new file mode 100644
index 0000000..112b68b
--- /dev/null
+++ b/include/soc/qcom/tcs.h
@@ -0,0 +1,42 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SOC_QCOM_TCS_H__
+#define __SOC_QCOM_TCS_H__
+
+#define MAX_RPMH_PAYLOAD	16
+
+struct tcs_cmd {
+	u32 addr;		/* slv_id:18:16 | offset:0:15 */
+	u32 data;		/* data for resource (or read response) */
+	bool complete;		/* wait for completion before sending next */
+};
+
+enum rpmh_state {
+	RPMH_SLEEP_STATE,	/* Sleep */
+	RPMH_WAKE_ONLY_STATE,	/* Wake only */
+	RPMH_ACTIVE_ONLY_STATE,	/* Active only (= AMC) */
+	RPMH_AWAKE_STATE,	/* Use Wake TCS for Wake & Active (AMC = 0) */
+};
+
+struct tcs_mbox_msg {
+	enum rpmh_state state;	/* request state */
+	bool is_complete;	/* wait for resp from accelerator */
+	bool is_read;		/* expecting a response from RPMH */
+	bool is_control;	/* private control messages */
+	bool invalidate;	/* invalidate sleep/wake commands */
+	u32 num_payload;	/* Limited to MAX_RPMH_PAYLOAD in one msg */
+	struct tcs_cmd *payload;/* array of tcs_cmds */
+};
+
+#endif /* __SOC_QCOM_TCS_H__ */
diff --git a/include/soc/qcom/tracer_pkt.h b/include/soc/qcom/tracer_pkt.h
new file mode 100644
index 0000000..2657b79
--- /dev/null
+++ b/include/soc/qcom/tracer_pkt.h
@@ -0,0 +1,130 @@
+/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _TRACER_PKT_H_
+#define _TRACER_PKT_H_
+
+#include <linux/err.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_TRACER_PKT
+
+/**
+ * tracer_pkt_init() - initialize the tracer packet
+ * @data:		Pointer to the buffer to be initialized with a tracer
+ *			packet.
+ * @data_len:		Length of the buffer.
+ * @client_event_cfg:	Client-specific event configuration mask.
+ * @glink_event_cfg:	G-Link-specific event configuration mask.
+ * @pkt_priv:		Private/Cookie information to be added to the tracer
+ *			packet.
+ * @pkt_priv_len:	Length of the private data.
+ *
+ * This function is used to initialize a buffer with the tracer packet header.
+ * The tracer packet header includes the data as passed by the elements in the
+ * parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_init(void *data, size_t data_len,
+		    uint16_t client_event_cfg, uint32_t glink_event_cfg,
+		    void *pkt_priv, size_t pkt_priv_len);
+
+/**
+ * tracer_pkt_set_event_cfg() - set the event configuration mask in the tracer
+ *				packet
+ * @data:		Pointer to the buffer to be initialized with event
+ *			configuration mask.
+ * @client_event_cfg:	Client-specific event configuration mask.
+ * @glink_event_cfg:	G-Link-specific event configuration mask.
+ *
+ * This function is used to initialize a buffer with the event configuration
+ * mask as passed by the elements in the parameters.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_set_event_cfg(void *data, uint16_t client_event_cfg,
+			     uint32_t glink_event_cfg);
+
+/**
+ * tracer_pkt_log_event() - log an event specific to the tracer packet
+ * @data:	Pointer to the buffer containing tracer packet.
+ * @event_id:	Event ID to be logged.
+ *
+ * This function is used to log an event specific to the tracer packet.
+ * The event is logged either into the tracer packet itself or a different
+ * tracing mechanism as configured.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_log_event(void *data, uint32_t event_id);
+
+/**
+ * tracer_pkt_calc_hex_dump_size() - calculate the hex dump size of a tracer
+ *				     packet
+ * @data:	Pointer to the buffer containing tracer packet.
+ * @data_len:	Length of the tracer packet buffer.
+ *
+ * This function is used to calculate the length of the buffer required to
+ * hold the hex dump of the tracer packet.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len);
+
+/**
+ * tracer_pkt_hex_dump() - hex dump the tracer packet into a buffer
+ * @buf:	Buffer to contain the hex dump of the tracer packet.
+ * @buf_len:	Length of the hex dump buffer.
+ * @data:	Buffer containing the tracer packet.
+ * @data_len:	Length of the buffer containing the tracer packet.
+ *
+ * This function is used to dump the contents of the tracer packet into
+ * a buffer in a specific hexadecimal format. The hex dump buffer can then
+ * be dumped through debugfs.
+ *
+ * Return: 0 on success, standard Linux error codes on failure.
+ */
+int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data, size_t data_len);
+
+#else
+
+static inline int tracer_pkt_init(void *data, size_t data_len,
+		    uint16_t client_event_cfg, uint32_t glink_event_cfg,
+		    void *pkt_priv, size_t pkt_priv_len)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int tracer_pkt_set_event_cfg(uint16_t client_event_cfg,
+					   uint32_t glink_event_cfg)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int tracer_pkt_log_event(void *data, uint32_t event_id)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline size_t tracer_pkt_calc_hex_dump_size(void *data, size_t data_len)
+{
+	return -EOPNOTSUPP;
+}
+
+static inline int tracer_pkt_hex_dump(void *buf, size_t buf_len, void *data,
+				      size_t data_len)
+{
+	return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_TRACER_PKT */
+#endif /* _TRACER_PKT_H_ */
diff --git a/include/soc/qcom/watchdog.h b/include/soc/qcom/watchdog.h
new file mode 100644
index 0000000..2697573
--- /dev/null
+++ b/include/soc/qcom/watchdog.h
@@ -0,0 +1,29 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _ASM_ARCH_MSM_WATCHDOG_H_
+#define _ASM_ARCH_MSM_WATCHDOG_H_
+
+#ifdef CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC
+#define WDOG_BITE_ON_PANIC 1
+#else
+#define WDOG_BITE_ON_PANIC 0
+#endif
+
+#ifdef CONFIG_QCOM_WATCHDOG_V2
+void msm_trigger_wdog_bite(void);
+#else
+static inline void msm_trigger_wdog_bite(void) { }
+#endif
+
+#endif
diff --git a/include/trace/events/android_fs.h b/include/trace/events/android_fs.h
new file mode 100644
index 0000000..531da43
--- /dev/null
+++ b/include/trace/events/android_fs.h
@@ -0,0 +1,31 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM android_fs
+
+#if !defined(_TRACE_ANDROID_FS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_H
+
+#include <linux/tracepoint.h>
+#include <trace/events/android_fs_template.h>
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_dataread_start,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *command),
+	TP_ARGS(inode, offset, bytes, pid, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_dataread_end,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	TP_ARGS(inode, offset, bytes));
+
+DEFINE_EVENT(android_fs_data_start_template, android_fs_datawrite_start,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *command),
+	TP_ARGS(inode, offset, bytes, pid, command));
+
+DEFINE_EVENT(android_fs_data_end_template, android_fs_datawrite_end,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	TP_ARGS(inode, offset, bytes));
+
+#endif /* _TRACE_ANDROID_FS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/android_fs_template.h b/include/trace/events/android_fs_template.h
new file mode 100644
index 0000000..618988b
--- /dev/null
+++ b/include/trace/events/android_fs_template.h
@@ -0,0 +1,79 @@
+#if !defined(_TRACE_ANDROID_FS_TEMPLATE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_ANDROID_FS_TEMPLATE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(android_fs_data_start_template,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes,
+		 pid_t pid, char *command),
+	TP_ARGS(inode, offset, bytes, pid, command),
+	TP_STRUCT__entry(
+		__array(char, path, MAX_FILTER_STR_VAL);
+		__field(char *, pathname);
+		__field(loff_t,	offset);
+		__field(int,	bytes);
+		__field(loff_t,	i_size);
+		__string(cmdline, command);
+		__field(pid_t,	pid);
+		__field(ino_t,	ino);
+	),
+	TP_fast_assign(
+		{
+			struct dentry *d;
+
+			/*
+			 * Grab a reference to the inode here because
+			 * d_obtain_alias() will either drop the inode
+			 * reference if it locates an existing dentry
+			 * or transfer the reference to the new dentry
+			 * created. In our case, the file is still open,
+			 * so the dentry is guaranteed to exist (connected),
+			 * so d_obtain_alias() drops the reference we
+			 * grabbed here.
+			 */
+			ihold(inode);
+			d = d_obtain_alias(inode);
+			if (!IS_ERR(d)) {
+				__entry->pathname = dentry_path(d,
+							__entry->path,
+							MAX_FILTER_STR_VAL);
+				dput(d);
+			} else
+				__entry->pathname = ERR_PTR(-EINVAL);
+			__entry->offset		= offset;
+			__entry->bytes		= bytes;
+			__entry->i_size		= i_size_read(inode);
+			__assign_str(cmdline, command);
+			__entry->pid		= pid;
+			__entry->ino		= inode->i_ino;
+		}
+	),
+	TP_printk("entry_name %s, offset %llu, bytes %d, cmdline %s,"
+		  " pid %d, i_size %llu, ino %lu",
+		  (IS_ERR(__entry->pathname) ? "ERROR" : __entry->pathname),
+		  __entry->offset, __entry->bytes, __get_str(cmdline),
+		  __entry->pid, __entry->i_size,
+		  (unsigned long) __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(android_fs_data_end_template,
+	TP_PROTO(struct inode *inode, loff_t offset, int bytes),
+	TP_ARGS(inode, offset, bytes),
+	TP_STRUCT__entry(
+		__field(ino_t,	ino);
+		__field(loff_t,	offset);
+		__field(int,	bytes);
+	),
+	TP_fast_assign(
+		{
+			__entry->ino		= inode->i_ino;
+			__entry->offset		= offset;
+			__entry->bytes		= bytes;
+		}
+	),
+	TP_printk("ino %lu, offset %llu, bytes %d",
+		  (unsigned long) __entry->ino,
+		  __entry->offset, __entry->bytes)
+);
+
+#endif /* _TRACE_ANDROID_FS_TEMPLATE_H */
diff --git a/include/trace/events/cpufreq_interactive.h b/include/trace/events/cpufreq_interactive.h
new file mode 100644
index 0000000..951e6ca
--- /dev/null
+++ b/include/trace/events/cpufreq_interactive.h
@@ -0,0 +1,112 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq_interactive
+
+#if !defined(_TRACE_CPUFREQ_INTERACTIVE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CPUFREQ_INTERACTIVE_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(set,
+	TP_PROTO(u32 cpu_id, unsigned long targfreq,
+	         unsigned long actualfreq),
+	TP_ARGS(cpu_id, targfreq, actualfreq),
+
+	TP_STRUCT__entry(
+	    __field(          u32, cpu_id    )
+	    __field(unsigned long, targfreq   )
+	    __field(unsigned long, actualfreq )
+	   ),
+
+	TP_fast_assign(
+	    __entry->cpu_id = (u32) cpu_id;
+	    __entry->targfreq = targfreq;
+	    __entry->actualfreq = actualfreq;
+	),
+
+	TP_printk("cpu=%u targ=%lu actual=%lu",
+	      __entry->cpu_id, __entry->targfreq,
+	      __entry->actualfreq)
+);
+
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
+	TP_PROTO(u32 cpu_id, unsigned long targfreq,
+	     unsigned long actualfreq),
+	TP_ARGS(cpu_id, targfreq, actualfreq)
+);
+
+DECLARE_EVENT_CLASS(loadeval,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+		    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg),
+
+	    TP_STRUCT__entry(
+		    __field(unsigned long, cpu_id    )
+		    __field(unsigned long, load      )
+		    __field(unsigned long, curtarg   )
+		    __field(unsigned long, curactual )
+		    __field(unsigned long, newtarg   )
+	    ),
+
+	    TP_fast_assign(
+		    __entry->cpu_id = cpu_id;
+		    __entry->load = load;
+		    __entry->curtarg = curtarg;
+		    __entry->curactual = curactual;
+		    __entry->newtarg = newtarg;
+	    ),
+
+	    TP_printk("cpu=%lu load=%lu cur=%lu actual=%lu targ=%lu",
+		      __entry->cpu_id, __entry->load, __entry->curtarg,
+		      __entry->curactual, __entry->newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_target,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_already,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+DEFINE_EVENT(loadeval, cpufreq_interactive_notyet,
+	    TP_PROTO(unsigned long cpu_id, unsigned long load,
+		     unsigned long curtarg, unsigned long curactual,
+		     unsigned long newtarg),
+	    TP_ARGS(cpu_id, load, curtarg, curactual, newtarg)
+);
+
+TRACE_EVENT(cpufreq_interactive_boost,
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
+);
+
+TRACE_EVENT(cpufreq_interactive_unboost,
+	    TP_PROTO(const char *s),
+	    TP_ARGS(s),
+	    TP_STRUCT__entry(
+		    __string(s, s)
+	    ),
+	    TP_fast_assign(
+		    __assign_str(s, s);
+	    ),
+	    TP_printk("%s", __get_str(s))
+);
+
+#endif /* _TRACE_CPUFREQ_INTERACTIVE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/gpu.h b/include/trace/events/gpu.h
new file mode 100644
index 0000000..7e15cdf
--- /dev/null
+++ b/include/trace/events/gpu.h
@@ -0,0 +1,143 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gpu
+
+#if !defined(_TRACE_GPU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_GPU_H
+
+#include <linux/tracepoint.h>
+#include <linux/time.h>
+
+#define show_secs_from_ns(ns) \
+	({ \
+		u64 t = ns + (NSEC_PER_USEC / 2); \
+		do_div(t, NSEC_PER_SEC); \
+		t; \
+	})
+
+#define show_usecs_from_ns(ns) \
+	({ \
+		u64 t = ns + (NSEC_PER_USEC / 2) ; \
+		u32 rem; \
+		do_div(t, NSEC_PER_USEC); \
+		rem = do_div(t, USEC_PER_SEC); \
+	})
+
+/*
+ * The gpu_sched_switch event indicates that a switch from one GPU context to
+ * another occurred on one of the GPU hardware blocks.
+ *
+ * The gpu_name argument identifies the GPU hardware block.  Each independently
+ * scheduled GPU hardware block should have a different name.  This may be used
+ * in different ways for different GPUs.  For example, if a GPU includes
+ * multiple processing cores it may use names "GPU 0", "GPU 1", etc.  If a GPU
+ * includes a separately scheduled 2D and 3D hardware block, it might use the
+ * names "2D" and "3D".
+ *
+ * The timestamp argument is the timestamp at which the switch occurred on the
+ * GPU. These timestamps are in units of nanoseconds and must use
+ * approximately the same time as sched_clock, though they need not come from
+ * any CPU clock. The timestamps for a single hardware block must be
+ * monotonically nondecreasing.  This means that if a variable compensation
+ * offset is used to translate from some other clock to the sched_clock, then
+ * care must be taken when increasing that offset, and doing so may result in
+ * multiple events with the same timestamp.
+ *
+ * The next_ctx_id argument identifies the next context that was running on
+ * the GPU hardware block.  A value of 0 indicates that the hardware block
+ * will be idle.
+ *
+ * The next_prio argument indicates the priority of the next context at the
+ * time of the event.  The exact numeric values may mean different things for
+ * different GPUs, but they should follow the rule that lower values indicate a
+ * higher priority.
+ *
+ * The next_job_id argument identifies the batch of work that the GPU will be
+ * working on.  This should correspond to a job_id that was previously traced
+ * as a gpu_job_enqueue event when the batch of work was created.
+ */
+TRACE_EVENT(gpu_sched_switch,
+
+	TP_PROTO(const char *gpu_name, u64 timestamp,
+		u32 next_ctx_id, s32 next_prio, u32 next_job_id),
+
+	TP_ARGS(gpu_name, timestamp, next_ctx_id, next_prio, next_job_id),
+
+	TP_STRUCT__entry(
+		__string(       gpu_name,       gpu_name        )
+		__field(        u64,            timestamp       )
+		__field(        u32,            next_ctx_id     )
+		__field(        s32,            next_prio       )
+		__field(        u32,            next_job_id     )
+	),
+
+	TP_fast_assign(
+		__assign_str(gpu_name, gpu_name);
+		__entry->timestamp = timestamp;
+		__entry->next_ctx_id = next_ctx_id;
+		__entry->next_prio = next_prio;
+		__entry->next_job_id = next_job_id;
+	),
+
+	TP_printk("gpu_name=%s ts=%llu.%06lu next_ctx_id=%lu next_prio=%ld "
+		"next_job_id=%lu",
+		__get_str(gpu_name),
+		(unsigned long long)show_secs_from_ns(__entry->timestamp),
+		(unsigned long)show_usecs_from_ns(__entry->timestamp),
+		(unsigned long)__entry->next_ctx_id,
+		(long)__entry->next_prio,
+		(unsigned long)__entry->next_job_id)
+);
+
+/*
+ * The gpu_job_enqueue event indicates that a batch of work has been queued up
+ * to be processed by the GPU.  This event is not intended to indicate that
+ * the batch of work has been submitted to the GPU hardware, but rather that
+ * it has been submitted to the GPU kernel driver.
+ *
+ * This event should be traced on the thread that initiated the work being
+ * queued.  For example, if a batch of work is submitted to the kernel by a
+ * userland thread, the event should be traced on that thread.
+ *
+ * The ctx_id field identifies the GPU context in which the batch of work
+ * being queued is to be run.
+ *
+ * The job_id field identifies the batch of work being queued within the given
+ * GPU context.  The first batch of work submitted for a given GPU context
+ * should have a job_id of 0, and each subsequent batch of work should
+ * increment the job_id by 1.
+ *
+ * The type field identifies the type of the job being enqueued.  The job
+ * types may be different for different GPU hardware.  For example, a GPU may
+ * differentiate between "2D", "3D", and "compute" jobs.
+ */
+TRACE_EVENT(gpu_job_enqueue,
+
+	TP_PROTO(u32 ctx_id, u32 job_id, const char *type),
+
+	TP_ARGS(ctx_id, job_id, type),
+
+	TP_STRUCT__entry(
+		__field(        u32,            ctx_id          )
+		__field(        u32,            job_id          )
+		__string(       type,           type            )
+	),
+
+	TP_fast_assign(
+		__entry->ctx_id = ctx_id;
+		__entry->job_id = job_id;
+		__assign_str(type, type);
+	),
+
+	TP_printk("ctx_id=%lu job_id=%lu type=%s",
+		(unsigned long)__entry->ctx_id,
+		(unsigned long)__entry->job_id,
+		__get_str(type))
+);
+
+#undef show_secs_from_ns
+#undef show_usecs_from_ns
+
+#endif /* _TRACE_GPU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 6b2e154..5b8c6f8 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -1,3 +1,15 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM kmem
 
@@ -317,6 +329,550 @@
 		__entry->change_ownership)
 );
 
+
+DECLARE_EVENT_CLASS(ion_alloc,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags),
+
+	TP_STRUCT__entry(
+		__array(char,		client_name, 64)
+		__field(const char *,	heap_name)
+		__field(size_t,		len)
+		__field(unsigned int,	mask)
+		__field(unsigned int,	flags)
+	),
+
+	TP_fast_assign(
+		strlcpy(__entry->client_name, client_name, 64);
+		__entry->heap_name	= heap_name;
+		__entry->len		= len;
+		__entry->mask		= mask;
+		__entry->flags		= flags;
+	),
+
+	TP_printk("client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x",
+		__entry->client_name,
+		__entry->heap_name,
+		__entry->len,
+		__entry->mask,
+		__entry->flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_start,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DEFINE_EVENT(ion_alloc, ion_alloc_buffer_end,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags)
+);
+
+DECLARE_EVENT_CLASS(ion_alloc_error,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error),
+
+	TP_STRUCT__entry(
+		__field(const char *,	client_name)
+		__field(const char *,	heap_name)
+		__field(size_t,		len)
+		__field(unsigned int,	mask)
+		__field(unsigned int,	flags)
+		__field(long,		error)
+	),
+
+	TP_fast_assign(
+		__entry->client_name	= client_name;
+		__entry->heap_name	= heap_name;
+		__entry->len		= len;
+		__entry->mask		= mask;
+		__entry->flags		= flags;
+		__entry->error		= error;
+	),
+
+	TP_printk(
+	"client_name=%s heap_name=%s len=%zu mask=0x%x flags=0x%x error=%ld",
+		__entry->client_name,
+		__entry->heap_name,
+		__entry->len,
+		__entry->mask,
+		__entry->flags,
+		__entry->error)
+);
+
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fallback,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+DEFINE_EVENT(ion_alloc_error, ion_alloc_buffer_fail,
+
+	TP_PROTO(const char *client_name,
+		 const char *heap_name,
+		 size_t len,
+		 unsigned int mask,
+		 unsigned int flags,
+		 long error),
+
+	TP_ARGS(client_name, heap_name, len, mask, flags, error)
+);
+
+
+DECLARE_EVENT_CLASS(alloc_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries),
+
+	TP_STRUCT__entry(
+		__field(int, tries)
+	),
+
+	TP_fast_assign(
+		__entry->tries = tries;
+	),
+
+	TP_printk("tries=%d",
+		__entry->tries)
+);
+
+DEFINE_EVENT(alloc_retry, ion_cp_alloc_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, migrate_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DEFINE_EVENT(alloc_retry, dma_alloc_contiguous_retry,
+
+	TP_PROTO(int tries),
+
+	TP_ARGS(tries)
+);
+
+DECLARE_EVENT_CLASS(migrate_pages,
+
+	TP_PROTO(int mode),
+
+	TP_ARGS(mode),
+
+	TP_STRUCT__entry(
+		__field(int, mode)
+	),
+
+	TP_fast_assign(
+		__entry->mode = mode;
+	),
+
+	TP_printk("mode=%d",
+		__entry->mode)
+);
+
+DEFINE_EVENT(migrate_pages, migrate_pages_start,
+
+	TP_PROTO(int mode),
+
+	TP_ARGS(mode)
+);
+
+DEFINE_EVENT(migrate_pages, migrate_pages_end,
+
+	TP_PROTO(int mode),
+
+	TP_ARGS(mode)
+);
+
+DECLARE_EVENT_CLASS(ion_alloc_pages,
+
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order),
+
+	TP_STRUCT__entry(
+		__field(gfp_t, gfp_flags)
+		__field(unsigned int, order)
+		),
+
+	TP_fast_assign(
+		__entry->gfp_flags = gfp_flags;
+		__entry->order = order;
+		),
+
+	TP_printk("gfp_flags=%s order=%d",
+		show_gfp_flags(__entry->gfp_flags),
+		__entry->order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_start,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_end,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_iommu_fail,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_start,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_end,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+	);
+
+DEFINE_EVENT(ion_alloc_pages, alloc_pages_sys_fail,
+	TP_PROTO(gfp_t gfp_flags,
+		unsigned int order),
+
+	TP_ARGS(gfp_flags, order)
+
+	);
+
+DECLARE_EVENT_CLASS(smmu_map,
+
+	TP_PROTO(unsigned long va,
+		phys_addr_t pa,
+		unsigned long chunk_size,
+		size_t len),
+
+	TP_ARGS(va, pa, chunk_size, len),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, va)
+		__field(phys_addr_t, pa)
+		__field(unsigned long, chunk_size)
+		__field(size_t, len)
+		),
+
+	TP_fast_assign(
+		__entry->va = va;
+		__entry->pa = pa;
+		__entry->chunk_size = chunk_size;
+		__entry->len = len;
+		),
+
+	TP_printk("v_addr=%p p_addr=%pa chunk_size=0x%lx len=%zu",
+		(void *)__entry->va,
+		&__entry->pa,
+		__entry->chunk_size,
+		__entry->len)
+	);
+
+DEFINE_EVENT(smmu_map, iommu_map_range,
+	TP_PROTO(unsigned long va,
+		phys_addr_t pa,
+		unsigned long chunk_size,
+		size_t len),
+
+	TP_ARGS(va, pa, chunk_size, len)
+	);
+
+DECLARE_EVENT_CLASS(ion_secure_cma_add_to_pool,
+
+	TP_PROTO(unsigned long len,
+		 int pool_total,
+		 bool is_prefetch),
+
+	TP_ARGS(len, pool_total, is_prefetch),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, len)
+		__field(int, pool_total)
+		__field(bool, is_prefetch)
+		),
+
+	TP_fast_assign(
+		__entry->len = len;
+		__entry->pool_total = pool_total;
+		__entry->is_prefetch = is_prefetch;
+		),
+
+	TP_printk("len %lx, pool total %x is_prefetch %d",
+		__entry->len,
+		__entry->pool_total,
+		__entry->is_prefetch)
+	);
+
+DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_start,
+	TP_PROTO(unsigned long len,
+		int pool_total,
+		bool is_prefetch),
+
+	TP_ARGS(len, pool_total, is_prefetch)
+	);
+
+DEFINE_EVENT(ion_secure_cma_add_to_pool, ion_secure_cma_add_to_pool_end,
+	TP_PROTO(unsigned long len,
+		int pool_total,
+		bool is_prefetch),
+
+	TP_ARGS(len, pool_total, is_prefetch)
+	);
+
+DECLARE_EVENT_CLASS(ion_secure_cma_shrink_pool,
+
+	TP_PROTO(unsigned long drained_size,
+		 unsigned long skipped_size),
+
+	TP_ARGS(drained_size, skipped_size),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, drained_size)
+		__field(unsigned long, skipped_size)
+		),
+
+	TP_fast_assign(
+		__entry->drained_size = drained_size;
+		__entry->skipped_size = skipped_size;
+		),
+
+	TP_printk("drained size %lx, skipped size %lx",
+		__entry->drained_size,
+		__entry->skipped_size)
+	);
+
+DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_start,
+	TP_PROTO(unsigned long drained_size,
+		 unsigned long skipped_size),
+
+	TP_ARGS(drained_size, skipped_size)
+	);
+
+DEFINE_EVENT(ion_secure_cma_shrink_pool, ion_secure_cma_shrink_pool_end,
+	TP_PROTO(unsigned long drained_size,
+		 unsigned long skipped_size),
+
+	TP_ARGS(drained_size, skipped_size)
+	);
+
+TRACE_EVENT(ion_prefetching,
+
+	TP_PROTO(unsigned long len),
+
+	TP_ARGS(len),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, len)
+		),
+
+	TP_fast_assign(
+		__entry->len = len;
+		),
+
+	TP_printk("prefetch size %lx",
+		__entry->len)
+	);
+
+DECLARE_EVENT_CLASS(ion_secure_cma_allocate,
+
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags),
+
+	TP_STRUCT__entry(
+		__field(const char *, heap_name)
+		__field(unsigned long, len)
+		__field(unsigned long, align)
+		__field(unsigned long, flags)
+		),
+
+	TP_fast_assign(
+		__entry->heap_name = heap_name;
+		__entry->len = len;
+		__entry->align = align;
+		__entry->flags = flags;
+		),
+
+	TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
+		__entry->heap_name,
+		__entry->len,
+		__entry->align,
+		__entry->flags)
+	);
+
+DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_start,
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags)
+	);
+
+DEFINE_EVENT(ion_secure_cma_allocate, ion_secure_cma_allocate_end,
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags)
+	);
+
+DECLARE_EVENT_CLASS(ion_cp_secure_buffer,
+
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags),
+
+	TP_STRUCT__entry(
+		__field(const char *, heap_name)
+		__field(unsigned long, len)
+		__field(unsigned long, align)
+		__field(unsigned long, flags)
+		),
+
+	TP_fast_assign(
+		__entry->heap_name = heap_name;
+		__entry->len = len;
+		__entry->align = align;
+		__entry->flags = flags;
+		),
+
+	TP_printk("heap_name=%s len=%lx align=%lx flags=%lx",
+		__entry->heap_name,
+		__entry->len,
+		__entry->align,
+		__entry->flags)
+	);
+
+DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_start,
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags)
+	);
+
+DEFINE_EVENT(ion_cp_secure_buffer, ion_cp_secure_buffer_end,
+	TP_PROTO(const char *heap_name,
+		unsigned long len,
+		unsigned long align,
+		unsigned long flags),
+
+	TP_ARGS(heap_name, len, align, flags)
+	);
+
+DECLARE_EVENT_CLASS(iommu_sec_ptbl_map_range,
+
+	TP_PROTO(int sec_id,
+		int num,
+		unsigned long va,
+		unsigned int pa,
+		size_t len),
+
+	TP_ARGS(sec_id, num, va, pa, len),
+
+	TP_STRUCT__entry(
+		__field(int, sec_id)
+		__field(int, num)
+		__field(unsigned long, va)
+		__field(unsigned int, pa)
+		__field(size_t, len)
+	),
+
+	TP_fast_assign(
+		__entry->sec_id = sec_id;
+		__entry->num = num;
+		__entry->va = va;
+		__entry->pa = pa;
+		__entry->len = len;
+	),
+
+	TP_printk("sec_id=%d num=%d va=%lx pa=%u len=%zu",
+		__entry->sec_id,
+		__entry->num,
+		__entry->va,
+		__entry->pa,
+		__entry->len)
+	);
+
+DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_start,
+
+	TP_PROTO(int sec_id,
+		int num,
+		unsigned long va,
+		unsigned int pa,
+		size_t len),
+
+	TP_ARGS(sec_id, num, va, pa, len)
+	);
+
+DEFINE_EVENT(iommu_sec_ptbl_map_range, iommu_sec_ptbl_map_range_end,
+
+	TP_PROTO(int sec_id,
+		int num,
+		unsigned long va,
+		unsigned int pa,
+		size_t len),
+
+	TP_ARGS(sec_id, num, va, pa, len)
+	);
 #endif /* _TRACE_KMEM_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 54e3aad..070be71 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -147,6 +147,31 @@
 	TP_ARGS(frequency, cpu_id)
 );
 
+TRACE_EVENT(cpu_frequency_limits,
+
+	TP_PROTO(unsigned int max_freq, unsigned int min_freq,
+		unsigned int cpu_id),
+
+	TP_ARGS(max_freq, min_freq, cpu_id),
+
+	TP_STRUCT__entry(
+		__field(	u32,		min_freq	)
+		__field(	u32,		max_freq	)
+		__field(	u32,		cpu_id		)
+	),
+
+	TP_fast_assign(
+		__entry->min_freq = min_freq;
+		__entry->max_freq = max_freq;
+		__entry->cpu_id = cpu_id;
+	),
+
+	TP_printk("min=%lu max=%lu cpu_id=%lu",
+		  (unsigned long)__entry->min_freq,
+		  (unsigned long)__entry->max_freq,
+		  (unsigned long)__entry->cpu_id)
+);
+
 TRACE_EVENT(device_pm_callback_start,
 
 	TP_PROTO(struct device *dev, const char *pm_ops, int event),
@@ -300,6 +325,25 @@
 	TP_ARGS(name, state, cpu_id)
 );
 
+TRACE_EVENT(clock_set_parent,
+
+	TP_PROTO(const char *name, const char *parent_name),
+
+	TP_ARGS(name, parent_name),
+
+	TP_STRUCT__entry(
+		__string(       name,           name            )
+		__string(       parent_name,    parent_name     )
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__assign_str(parent_name, parent_name);
+	),
+
+	TP_printk("%s parent=%s", __get_str(name), __get_str(parent_name))
+);
+
 /*
  * The power domain events are used for power domains transitions
  */
diff --git a/include/trace/events/rpmh.h b/include/trace/events/rpmh.h
new file mode 100644
index 0000000..62e7216
--- /dev/null
+++ b/include/trace/events/rpmh.h
@@ -0,0 +1,108 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rpmh
+
+#if !defined(_TRACE_RPMH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RPMH_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(rpmh_ack_recvd,
+
+	TP_PROTO(int m, u32 addr, int errno),
+
+	TP_ARGS(m, addr, errno),
+
+	TP_STRUCT__entry(
+		__field(int, m)
+		__field(u32, addr)
+		__field(int, errno)
+	),
+
+	TP_fast_assign(
+		__entry->m = m;
+		__entry->addr = addr;
+		__entry->errno = errno;
+	),
+
+	TP_printk("ack: tcs-m:%d addr: 0x%08x errno: %d",
+			__entry->m, __entry->addr, __entry->errno)
+);
+
+DEFINE_EVENT(rpmh_ack_recvd, rpmh_notify_irq,
+	TP_PROTO(int m, u32 addr, int err),
+	TP_ARGS(m, addr, err)
+);
+
+DEFINE_EVENT(rpmh_ack_recvd, rpmh_notify,
+	TP_PROTO(int m, u32 addr, int err),
+	TP_ARGS(m, addr, err)
+);
+
+TRACE_EVENT(rpmh_send_msg,
+
+	TP_PROTO(void *b, int m, int n, u32 h, u32 a, u32 v, bool c),
+
+	TP_ARGS(b, m, n, h, a, v, c),
+
+	TP_STRUCT__entry(
+		__field(void *, base)
+		__field(int, m)
+		__field(int, n)
+		__field(u32, hdr)
+		__field(u32, addr)
+		__field(u32, data)
+		__field(bool, complete)
+	),
+
+	TP_fast_assign(
+		__entry->base = b;
+		__entry->m = m;
+		__entry->n = n;
+		__entry->hdr = h;
+		__entry->addr = a;
+		__entry->data = v;
+		__entry->complete = c;
+	),
+
+	TP_printk("msg: base: 0x%p  tcs(m): %d cmd(n): %d msgid: 0x%08x addr: 0x%08x data: 0x%08x complete: %d",
+			__entry->base + (672 * __entry->m) + (20 * __entry->n),
+			__entry->m, __entry->n, __entry->hdr,
+			__entry->addr, __entry->data, __entry->complete)
+);
+
+TRACE_EVENT(rpmh_control_msg,
+
+	TP_PROTO(void *r, u32 v),
+
+	TP_ARGS(r, v),
+
+	TP_STRUCT__entry(
+		__field(void *, reg)
+		__field(u32, data)
+	),
+
+	TP_fast_assign(
+		__entry->reg = r;
+		__entry->data = v;
+	),
+
+	TP_printk("ctrl-msg: reg: 0x%p data: 0x%08x",
+			__entry->reg, __entry->data)
+);
+
+#endif /* _TRACE_RPMH_H */
+
+#define TRACE_INCLUDE_FILE rpmh
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9b90c57..a52c343 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -8,6 +8,8 @@
 #include <linux/tracepoint.h>
 #include <linux/binfmts.h>
 
+struct rq;
+
 /*
  * Tracepoint for calling kthread_stop, performed to end a kthread:
  */
@@ -51,6 +53,561 @@
 );
 
 /*
+ * Tracepoint for task enqueue/dequeue:
+ */
+TRACE_EVENT(sched_enq_deq_task,
+
+	TP_PROTO(struct task_struct *p, bool enqueue, unsigned int cpus_allowed),
+
+	TP_ARGS(p, enqueue, cpus_allowed),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,	TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(	int,	prio			)
+		__field(	int,	cpu			)
+		__field(	bool,	enqueue			)
+		__field(unsigned int,	nr_running		)
+		__field(unsigned long,	cpu_load		)
+		__field(unsigned int,	rt_nr_running		)
+		__field(unsigned int,	cpus_allowed		)
+#ifdef CONFIG_SCHED_HMP
+		__field(unsigned int,	demand			)
+		__field(unsigned int,	pred_demand		)
+#endif
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid		= p->pid;
+		__entry->prio		= p->prio;
+		__entry->cpu		= task_cpu(p);
+		__entry->enqueue	= enqueue;
+		__entry->nr_running	= task_rq(p)->nr_running;
+		__entry->cpu_load	= task_rq(p)->cpu_load[0];
+		__entry->rt_nr_running	= task_rq(p)->rt.rt_nr_running;
+		__entry->cpus_allowed	= cpus_allowed;
+#ifdef CONFIG_SCHED_HMP
+		__entry->demand		= p->ravg.demand;
+		__entry->pred_demand	= p->ravg.pred_demand;
+#endif
+	),
+
+	TP_printk("cpu=%d %s comm=%s pid=%d prio=%d nr_running=%u cpu_load=%lu rt_nr_running=%u affine=%x"
+#ifdef CONFIG_SCHED_HMP
+		 " demand=%u pred_demand=%u"
+#endif
+			, __entry->cpu,
+			__entry->enqueue ? "enqueue" : "dequeue",
+			__entry->comm, __entry->pid,
+			__entry->prio, __entry->nr_running,
+			__entry->cpu_load, __entry->rt_nr_running, __entry->cpus_allowed
+#ifdef CONFIG_SCHED_HMP
+			, __entry->demand, __entry->pred_demand
+#endif
+			)
+);
+
+#ifdef CONFIG_SCHED_HMP
+
+struct group_cpu_time;
+struct migration_sum_data;
+extern const char *task_event_names[];
+extern const char *migrate_type_names[];
+
+TRACE_EVENT(sched_task_load,
+
+	TP_PROTO(struct task_struct *p, bool boost, int reason,
+		 bool sync, bool need_idle, u32 flags, int best_cpu),
+
+	TP_ARGS(p, boost, reason, sync, need_idle, flags, best_cpu),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,	TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(unsigned int,	demand			)
+		__field(	bool,	boost			)
+		__field(	int,	reason			)
+		__field(	bool,	sync			)
+		__field(	bool,	need_idle		)
+		__field(	u32,	flags			)
+		__field(	int,	best_cpu		)
+		__field(	u64,	latency			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid		= p->pid;
+		__entry->demand		= p->ravg.demand;
+		__entry->boost		= boost;
+		__entry->reason		= reason;
+		__entry->sync		= sync;
+		__entry->need_idle	= need_idle;
+		__entry->flags		= flags;
+		__entry->best_cpu	= best_cpu;
+		__entry->latency	= p->state == TASK_WAKING ?
+						      sched_ktime_clock() -
+						      p->ravg.mark_start : 0;
+	),
+
+	TP_printk("%d (%s): demand=%u boost=%d reason=%d sync=%d need_idle=%d flags=%x best_cpu=%d latency=%llu",
+		__entry->pid, __entry->comm, __entry->demand,
+		__entry->boost, __entry->reason, __entry->sync,
+		__entry->need_idle, __entry->flags,
+		__entry->best_cpu, __entry->latency)
+);
+
+TRACE_EVENT(sched_set_preferred_cluster,
+
+	TP_PROTO(struct related_thread_group *grp, u64 total_demand),
+
+	TP_ARGS(grp, total_demand),
+
+	TP_STRUCT__entry(
+		__field(		int,	id			)
+		__field(		u64,	demand			)
+		__field(		int,	cluster_first_cpu	)
+	),
+
+	TP_fast_assign(
+		__entry->id			= grp->id;
+		__entry->demand			= total_demand;
+		__entry->cluster_first_cpu	= grp->preferred_cluster ?
+							cluster_first_cpu(grp->preferred_cluster)
+							: -1;
+	),
+
+	TP_printk("group_id %d total_demand %llu preferred_cluster_first_cpu %d",
+			__entry->id, __entry->demand,
+			__entry->cluster_first_cpu)
+);
+
+DECLARE_EVENT_CLASS(sched_cpu_load,
+
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+
+	TP_ARGS(rq, idle, irqload, power_cost, temp),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu			)
+		__field(unsigned int, idle			)
+		__field(unsigned int, nr_running		)
+		__field(unsigned int, nr_big_tasks		)
+		__field(unsigned int, load_scale_factor		)
+		__field(unsigned int, capacity			)
+		__field(	 u64, cumulative_runnable_avg	)
+		__field(	 u64, irqload			)
+		__field(unsigned int, max_freq			)
+		__field(unsigned int, power_cost		)
+		__field(	 int, cstate			)
+		__field(	 int, dstate			)
+		__field(	 int, temp			)
+	),
+
+	TP_fast_assign(
+		__entry->cpu			= rq->cpu;
+		__entry->idle			= idle;
+		__entry->nr_running		= rq->nr_running;
+		__entry->nr_big_tasks		= rq->hmp_stats.nr_big_tasks;
+		__entry->load_scale_factor	= cpu_load_scale_factor(rq->cpu);
+		__entry->capacity		= cpu_capacity(rq->cpu);
+		__entry->cumulative_runnable_avg = rq->hmp_stats.cumulative_runnable_avg;
+		__entry->irqload		= irqload;
+		__entry->max_freq		= cpu_max_freq(rq->cpu);
+		__entry->power_cost		= power_cost;
+		__entry->cstate			= rq->cstate;
+		__entry->dstate			= rq->cluster->dstate;
+		__entry->temp			= temp;
+	),
+
+	TP_printk("cpu %u idle %d nr_run %u nr_big %u lsf %u capacity %u cr_avg %llu irqload %llu fmax %u power_cost %u cstate %d dstate %d temp %d",
+	__entry->cpu, __entry->idle, __entry->nr_running, __entry->nr_big_tasks,
+	__entry->load_scale_factor, __entry->capacity,
+	__entry->cumulative_runnable_avg, __entry->irqload,
+	__entry->max_freq, __entry->power_cost, __entry->cstate,
+	__entry->dstate, __entry->temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_wakeup,
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_lb,
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+DEFINE_EVENT(sched_cpu_load, sched_cpu_load_cgroup,
+	TP_PROTO(struct rq *rq, int idle, u64 irqload, unsigned int power_cost, int temp),
+	TP_ARGS(rq, idle, irqload, power_cost, temp)
+);
+
+TRACE_EVENT(sched_set_boost,
+
+	TP_PROTO(int ref_count),
+
+	TP_ARGS(ref_count),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, ref_count			)
+	),
+
+	TP_fast_assign(
+		__entry->ref_count = ref_count;
+	),
+
+	TP_printk("ref_count=%d", __entry->ref_count)
+);
+
+TRACE_EVENT(sched_update_task_ravg,
+
+	TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
+		 u64 wallclock, u64 irqtime, u64 cycles, u64 exec_time,
+		 struct group_cpu_time *cpu_time),
+
+	TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(	pid_t,	cur_pid			)
+		__field(unsigned int,	cur_freq		)
+		__field(	u64,	wallclock		)
+		__field(	u64,	mark_start		)
+		__field(	u64,	delta_m			)
+		__field(	u64,	win_start		)
+		__field(	u64,	delta			)
+		__field(	u64,	irqtime			)
+		__field(enum task_event,	evt		)
+		__field(unsigned int,	demand			)
+		__field(unsigned int,	sum			)
+		__field(	 int,	cpu			)
+		__field(unsigned int,	pred_demand		)
+		__field(	u64,	rq_cs			)
+		__field(	u64,	rq_ps			)
+		__field(	u64,	grp_cs			)
+		__field(	u64,	grp_ps			)
+		__field(	u64,	grp_nt_cs			)
+		__field(	u64,	grp_nt_ps			)
+		__field(	u32,	curr_window		)
+		__field(	u32,	prev_window		)
+		__field(	u64,	nt_cs			)
+		__field(	u64,	nt_ps			)
+		__field(	u32,	active_windows		)
+	),
+
+	TP_fast_assign(
+		__entry->wallclock      = wallclock;
+		__entry->win_start      = rq->window_start;
+		__entry->delta          = (wallclock - rq->window_start);
+		__entry->evt            = evt;
+		__entry->cpu            = rq->cpu;
+		__entry->cur_pid        = rq->curr->pid;
+		__entry->cur_freq       = cpu_cycles_to_freq(cycles, exec_time);
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->mark_start     = p->ravg.mark_start;
+		__entry->delta_m        = (wallclock - p->ravg.mark_start);
+		__entry->demand         = p->ravg.demand;
+		__entry->sum            = p->ravg.sum;
+		__entry->irqtime        = irqtime;
+		__entry->pred_demand     = p->ravg.pred_demand;
+		__entry->rq_cs          = rq->curr_runnable_sum;
+		__entry->rq_ps          = rq->prev_runnable_sum;
+		__entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
+		__entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
+		__entry->grp_nt_cs = cpu_time ? cpu_time->nt_curr_runnable_sum : 0;
+		__entry->grp_nt_ps = cpu_time ? cpu_time->nt_prev_runnable_sum : 0;
+		__entry->curr_window	= p->ravg.curr_window;
+		__entry->prev_window	= p->ravg.prev_window;
+		__entry->nt_cs		= rq->nt_curr_runnable_sum;
+		__entry->nt_ps		= rq->nt_prev_runnable_sum;
+		__entry->active_windows	= p->ravg.active_windows;
+	),
+
+	TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu pred_demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu"
+		, __entry->wallclock, __entry->win_start, __entry->delta,
+		task_event_names[__entry->evt], __entry->cpu,
+		__entry->cur_freq, __entry->cur_pid,
+		__entry->pid, __entry->comm, __entry->mark_start,
+		__entry->delta_m, __entry->demand,
+		__entry->sum, __entry->irqtime, __entry->pred_demand,
+		__entry->rq_cs, __entry->rq_ps, __entry->curr_window,
+		__entry->prev_window, __entry->nt_cs, __entry->nt_ps,
+		__entry->active_windows, __entry->grp_cs,
+		__entry->grp_ps, __entry->grp_nt_cs, __entry->grp_nt_ps)
+);
+
+TRACE_EVENT(sched_get_task_cpu_cycles,
+
+	TP_PROTO(int cpu, int event, u64 cycles, u64 exec_time),
+
+	TP_ARGS(cpu, event, cycles, exec_time),
+
+	TP_STRUCT__entry(
+		__field(int,		cpu		)
+		__field(int,		event		)
+		__field(u64,		cycles		)
+		__field(u64,		exec_time	)
+		__field(u32,		freq		)
+		__field(u32,		legacy_freq	)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= cpu;
+		__entry->event		= event;
+		__entry->cycles		= cycles;
+		__entry->exec_time	= exec_time;
+		__entry->freq		= cpu_cycles_to_freq(cycles, exec_time);
+		__entry->legacy_freq	= cpu_cur_freq(cpu);
+	),
+
+	TP_printk("cpu=%d event=%d cycles=%llu exec_time=%llu freq=%u legacy_freq=%u",
+		  __entry->cpu, __entry->event, __entry->cycles,
+		  __entry->exec_time, __entry->freq, __entry->legacy_freq)
+);
+
+TRACE_EVENT(sched_update_history,
+
+	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int samples,
+			enum task_event evt),
+
+	TP_ARGS(rq, p, runtime, samples, evt),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(	pid_t,	pid			)
+		__field(unsigned int,	runtime			)
+		__field(	 int,	samples			)
+		__field(enum task_event,	evt		)
+		__field(unsigned int,	demand			)
+		__field(unsigned int,	pred_demand		)
+		__array(	 u32,	hist, RAVG_HIST_SIZE_MAX)
+		__field(unsigned int,	nr_big_tasks		)
+		__field(	 int,	cpu			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->runtime        = runtime;
+		__entry->samples        = samples;
+		__entry->evt            = evt;
+		__entry->demand         = p->ravg.demand;
+		__entry->pred_demand     = p->ravg.pred_demand;
+		memcpy(__entry->hist, p->ravg.sum_history,
+					RAVG_HIST_SIZE_MAX * sizeof(u32));
+		__entry->nr_big_tasks   = rq->hmp_stats.nr_big_tasks;
+		__entry->cpu            = rq->cpu;
+	),
+
+	TP_printk("%d (%s): runtime %u samples %d event %s demand %u pred_demand %u"
+		" (hist: %u %u %u %u %u) cpu %d nr_big %u",
+		__entry->pid, __entry->comm,
+		__entry->runtime, __entry->samples,
+		task_event_names[__entry->evt],
+		__entry->demand, __entry->pred_demand,
+		__entry->hist[0], __entry->hist[1],
+		__entry->hist[2], __entry->hist[3],
+		__entry->hist[4], __entry->cpu, __entry->nr_big_tasks)
+);
+
+TRACE_EVENT(sched_reset_all_window_stats,
+
+	TP_PROTO(u64 window_start, u64 window_size, u64 time_taken,
+		int reason, unsigned int old_val, unsigned int new_val),
+
+	TP_ARGS(window_start, window_size, time_taken,
+		reason, old_val, new_val),
+
+	TP_STRUCT__entry(
+		__field(	u64,	window_start		)
+		__field(	u64,	window_size		)
+		__field(	u64,	time_taken		)
+		__field(	int,	reason			)
+		__field(unsigned int,	old_val			)
+		__field(unsigned int,	new_val			)
+	),
+
+	TP_fast_assign(
+		__entry->window_start = window_start;
+		__entry->window_size = window_size;
+		__entry->time_taken = time_taken;
+		__entry->reason	= reason;
+		__entry->old_val = old_val;
+		__entry->new_val = new_val;
+	),
+
+	TP_printk("time_taken %llu window_start %llu window_size %llu reason %s old_val %u new_val %u",
+		  __entry->time_taken, __entry->window_start,
+		  __entry->window_size,
+		  sched_window_reset_reasons[__entry->reason],
+		  __entry->old_val, __entry->new_val)
+);
+
+TRACE_EVENT(sched_update_pred_demand,
+
+	TP_PROTO(struct rq *rq, struct task_struct *p, u32 runtime, int pct,
+		 unsigned int pred_demand),
+
+	TP_ARGS(rq, p, runtime, pct, pred_demand),
+
+	TP_STRUCT__entry(
+		__array(	char,	comm,   TASK_COMM_LEN	)
+		__field(       pid_t,	pid			)
+		__field(unsigned int,	runtime			)
+		__field(	 int,	pct			)
+		__field(unsigned int,	pred_demand		)
+		__array(	  u8,	bucket, NUM_BUSY_BUCKETS)
+		__field(	 int,	cpu			)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
+		__entry->pid            = p->pid;
+		__entry->runtime        = runtime;
+		__entry->pct            = pct;
+		__entry->pred_demand     = pred_demand;
+		memcpy(__entry->bucket, p->ravg.busy_buckets,
+					NUM_BUSY_BUCKETS * sizeof(u8));
+		__entry->cpu            = rq->cpu;
+	),
+
+	TP_printk("%d (%s): runtime %u pct %d cpu %d pred_demand %u (buckets: %u %u %u %u %u %u %u %u %u %u)",
+		__entry->pid, __entry->comm,
+		__entry->runtime, __entry->pct, __entry->cpu,
+		__entry->pred_demand, __entry->bucket[0], __entry->bucket[1],
+		__entry->bucket[2], __entry->bucket[3] ,__entry->bucket[4],
+		__entry->bucket[5], __entry->bucket[6], __entry->bucket[7],
+		__entry->bucket[8], __entry->bucket[9])
+);
+
+TRACE_EVENT(sched_migration_update_sum,
+
+	TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct migration_sum_data *d),
+
+	TP_ARGS(p, migrate_type, d),
+
+	TP_STRUCT__entry(
+		__field(int,		tcpu			)
+		__field(int,		pid			)
+		__field(	u64,	cs			)
+		__field(	u64,	ps			)
+		__field(	s64,	nt_cs			)
+		__field(	s64,	nt_ps			)
+		__field(enum migrate_types,	migrate_type	)
+		__field(	s64,	src_cs			)
+		__field(	s64,	src_ps			)
+		__field(	s64,	dst_cs			)
+		__field(	s64,	dst_ps			)
+		__field(	s64,	src_nt_cs		)
+		__field(	s64,	src_nt_ps		)
+		__field(	s64,	dst_nt_cs		)
+		__field(	s64,	dst_nt_ps		)
+	),
+
+	TP_fast_assign(
+		__entry->tcpu		= task_cpu(p);
+		__entry->pid		= p->pid;
+		__entry->migrate_type	= migrate_type;
+		__entry->src_cs		= d->src_rq ?
+						d->src_rq->curr_runnable_sum :
+						d->src_cpu_time->curr_runnable_sum;
+		__entry->src_ps		= d->src_rq ?
+						d->src_rq->prev_runnable_sum :
+						d->src_cpu_time->prev_runnable_sum;
+		__entry->dst_cs		= d->dst_rq ?
+						d->dst_rq->curr_runnable_sum :
+						d->dst_cpu_time->curr_runnable_sum;
+		__entry->dst_ps		= d->dst_rq ?
+						d->dst_rq->prev_runnable_sum :
+						d->dst_cpu_time->prev_runnable_sum;
+		__entry->src_nt_cs		= d->src_rq ?
+						d->src_rq->nt_curr_runnable_sum :
+						d->src_cpu_time->nt_curr_runnable_sum;
+		__entry->src_nt_ps		= d->src_rq ?
+						d->src_rq->nt_prev_runnable_sum :
+						d->src_cpu_time->nt_prev_runnable_sum;
+		__entry->dst_nt_cs		= d->dst_rq ?
+						d->dst_rq->nt_curr_runnable_sum :
+						d->dst_cpu_time->nt_curr_runnable_sum;
+		__entry->dst_nt_ps		= d->dst_rq ?
+						d->dst_rq->nt_prev_runnable_sum :
+						d->dst_cpu_time->nt_prev_runnable_sum;
+	),
+
+	TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld",
+		__entry->pid, __entry->tcpu, migrate_type_names[__entry->migrate_type],
+		__entry->src_cs, __entry->src_ps, __entry->dst_cs, __entry->dst_ps,
+		__entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
+);
+
+TRACE_EVENT(sched_get_busy,
+
+	TP_PROTO(int cpu, u64 load, u64 nload, u64 pload, int early),
+
+	TP_ARGS(cpu, load, nload, pload, early),
+
+	TP_STRUCT__entry(
+		__field(	int,	cpu			)
+		__field(	u64,	load			)
+		__field(	u64,	nload			)
+		__field(	u64,	pload			)
+		__field(	int,	early			)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= cpu;
+		__entry->load		= load;
+		__entry->nload		= nload;
+		__entry->pload		= pload;
+		__entry->early		= early;
+	),
+
+	TP_printk("cpu %d load %lld new_task_load %lld predicted_load %lld early %d",
+		__entry->cpu, __entry->load, __entry->nload,
+		__entry->pload, __entry->early)
+);
+
+TRACE_EVENT(sched_freq_alert,
+
+	TP_PROTO(int cpu, int pd_notif, int check_groups, struct rq *rq,
+		u64 new_load),
+
+	TP_ARGS(cpu, pd_notif, check_groups, rq, new_load),
+
+	TP_STRUCT__entry(
+		__field(	int,	cpu			)
+		__field(	int,	pd_notif		)
+		__field(	int,	check_groups		)
+		__field(	u64,	old_busy_time		)
+		__field(	u64,	ps			)
+		__field(	u64,	new_load		)
+		__field(	u64,	old_pred		)
+		__field(	u64,	new_pred		)
+	),
+
+	TP_fast_assign(
+		__entry->cpu		= cpu;
+		__entry->pd_notif	= pd_notif;
+		__entry->check_groups	= check_groups;
+		__entry->old_busy_time	= rq->old_busy_time;
+		__entry->ps		= rq->prev_runnable_sum;
+		__entry->new_load	= new_load;
+		__entry->old_pred	= rq->old_estimated_time;
+		__entry->new_pred	= rq->hmp_stats.pred_demands_sum;
+	),
+
+	TP_printk("cpu %d pd_notif=%d check_groups %d old_busy_time=%llu prev_sum=%lld new_load=%llu old_pred=%llu new_pred=%llu",
+		__entry->cpu, __entry->pd_notif, __entry->check_groups,
+		__entry->old_busy_time, __entry->ps, __entry->new_load,
+		__entry->old_pred, __entry->new_pred)
+);
+
+#endif	/* CONFIG_SCHED_HMP */
+
+/*
  * Tracepoint for waking up a task:
  */
 DECLARE_EVENT_CLASS(sched_wakeup_template,
@@ -166,14 +723,15 @@
  */
 TRACE_EVENT(sched_migrate_task,
 
-	TP_PROTO(struct task_struct *p, int dest_cpu),
+	TP_PROTO(struct task_struct *p, int dest_cpu, unsigned int load),
 
-	TP_ARGS(p, dest_cpu),
+	TP_ARGS(p, dest_cpu, load),
 
 	TP_STRUCT__entry(
 		__array(	char,	comm,	TASK_COMM_LEN	)
 		__field(	pid_t,	pid			)
 		__field(	int,	prio			)
+		__field(unsigned int,	load			)
 		__field(	int,	orig_cpu		)
 		__field(	int,	dest_cpu		)
 	),
@@ -182,15 +740,91 @@
 		memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
 		__entry->pid		= p->pid;
 		__entry->prio		= p->prio;
+		__entry->load		= load;
 		__entry->orig_cpu	= task_cpu(p);
 		__entry->dest_cpu	= dest_cpu;
 	),
 
-	TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
-		  __entry->comm, __entry->pid, __entry->prio,
+	TP_printk("comm=%s pid=%d prio=%d load=%d orig_cpu=%d dest_cpu=%d",
+		  __entry->comm, __entry->pid, __entry->prio,  __entry->load,
 		  __entry->orig_cpu, __entry->dest_cpu)
 );
 
+/*
+ * Tracepoint for a CPU going offline/online:
+ */
+TRACE_EVENT(sched_cpu_hotplug,
+
+	TP_PROTO(int affected_cpu, int error, int status),
+
+	TP_ARGS(affected_cpu, error, status),
+
+	TP_STRUCT__entry(
+		__field(	int,	affected_cpu		)
+		__field(	int,	error			)
+		__field(	int,	status			)
+	),
+
+	TP_fast_assign(
+		__entry->affected_cpu	= affected_cpu;
+		__entry->error		= error;
+		__entry->status		= status;
+	),
+
+	TP_printk("cpu %d %s error=%d", __entry->affected_cpu,
+		__entry->status ? "online" : "offline", __entry->error)
+);
+
+/*
+ * Tracepoint for load balancing:
+ */
+#if NR_CPUS > 32
+#error "Unsupported NR_CPUS for lb tracepoint."
+#endif
+TRACE_EVENT(sched_load_balance,
+
+	TP_PROTO(int cpu, enum cpu_idle_type idle, int balance,
+		 unsigned long group_mask, int busiest_nr_running,
+		 unsigned long imbalance, unsigned int env_flags, int ld_moved,
+		 unsigned int balance_interval),
+
+	TP_ARGS(cpu, idle, balance, group_mask, busiest_nr_running,
+		imbalance, env_flags, ld_moved, balance_interval),
+
+	TP_STRUCT__entry(
+		__field(	int,			cpu)
+		__field(	enum cpu_idle_type,	idle)
+		__field(	int,			balance)
+		__field(	unsigned long,		group_mask)
+		__field(	int,			busiest_nr_running)
+		__field(	unsigned long,		imbalance)
+		__field(	unsigned int,		env_flags)
+		__field(	int,			ld_moved)
+		__field(	unsigned int,		balance_interval)
+	),
+
+	TP_fast_assign(
+		__entry->cpu			= cpu;
+		__entry->idle			= idle;
+		__entry->balance		= balance;
+		__entry->group_mask		= group_mask;
+		__entry->busiest_nr_running	= busiest_nr_running;
+		__entry->imbalance		= imbalance;
+		__entry->env_flags		= env_flags;
+		__entry->ld_moved		= ld_moved;
+		__entry->balance_interval	= balance_interval;
+	),
+
+	TP_printk("cpu=%d state=%s balance=%d group=%#lx busy_nr=%d imbalance=%ld flags=%#x ld_moved=%d bal_int=%d",
+		  __entry->cpu,
+		  __entry->idle == CPU_IDLE ? "idle" :
+		  (__entry->idle == CPU_NEWLY_IDLE ? "newly_idle" : "busy"),
+		  __entry->balance,
+		  __entry->group_mask, __entry->busiest_nr_running,
+		  __entry->imbalance, __entry->env_flags, __entry->ld_moved,
+		  __entry->balance_interval)
+);
+
 DECLARE_EVENT_CLASS(sched_process_template,
 
 	TP_PROTO(struct task_struct *p),
@@ -219,7 +853,7 @@
 DEFINE_EVENT(sched_process_template, sched_process_free,
 	     TP_PROTO(struct task_struct *p),
 	     TP_ARGS(p));
-	     
+
 
 /*
  * Tracepoint for a task exiting:
@@ -374,6 +1008,30 @@
 	     TP_ARGS(tsk, delay));
 
 /*
+ * Tracepoint for recording the cause of uninterruptible sleep.
+ */
+TRACE_EVENT(sched_blocked_reason,
+
+	TP_PROTO(struct task_struct *tsk),
+
+	TP_ARGS(tsk),
+
+	TP_STRUCT__entry(
+		__field( pid_t,	pid	)
+		__field( void*, caller	)
+		__field( bool, io_wait	)
+	),
+
+	TP_fast_assign(
+		__entry->pid	= tsk->pid;
+		__entry->caller = (void*)get_wchan(tsk);
+		__entry->io_wait = tsk->in_iowait;
+	),
+
+	TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
+);
+
+/*
  * Tracepoint for accounting runtime (time the task is executing
  * on a CPU).
  */
@@ -562,6 +1220,28 @@
 
 	TP_printk("cpu=%d", __entry->cpu)
 );
+
+TRACE_EVENT(sched_get_nr_running_avg,
+
+	TP_PROTO(int avg, int big_avg, int iowait_avg),
+
+	TP_ARGS(avg, big_avg, iowait_avg),
+
+	TP_STRUCT__entry(
+		__field( int,	avg			)
+		__field( int,	big_avg			)
+		__field( int,	iowait_avg		)
+	),
+
+	TP_fast_assign(
+		__entry->avg		= avg;
+		__entry->big_avg	= big_avg;
+		__entry->iowait_avg	= iowait_avg;
+	),
+
+	TP_printk("avg=%d big_avg=%d iowait_avg=%d",
+		__entry->avg, __entry->big_avg, __entry->iowait_avg)
+);
 #endif /* _TRACE_SCHED_H */
 
 /* This part must be outside protection */
diff --git a/include/trace/events/scm.h b/include/trace/events/scm.h
new file mode 100644
index 0000000..b07a38d
--- /dev/null
+++ b/include/trace/events/scm.h
@@ -0,0 +1,68 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM scm
+
+#if !defined(_TRACE_SCM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCM_H
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+#include <soc/qcom/scm.h>
+
+TRACE_EVENT(scm_call_start,
+
+	TP_PROTO(u64 x0, struct scm_desc *p),
+
+	TP_ARGS(x0, p),
+
+	TP_STRUCT__entry(
+		__field(u64, x0)
+		__field(u32, arginfo)
+		__array(u64, args, MAX_SCM_ARGS)
+		__field(u64, x5)
+	),
+
+	TP_fast_assign(
+		__entry->x0		= x0;
+		__entry->arginfo	= p->arginfo;
+		memcpy(__entry->args, p->args, MAX_SCM_ARGS);
+		__entry->x5		= p->x5;
+	),
+
+	TP_printk("func id=%#llx (args: %#x, %#llx, %#llx, %#llx, %#llx)",
+		__entry->x0, __entry->arginfo, __entry->args[0],
+		__entry->args[1], __entry->args[2], __entry->x5)
+);
+
+
+TRACE_EVENT(scm_call_end,
+
+	TP_PROTO(struct scm_desc *p),
+
+	TP_ARGS(p),
+
+	TP_STRUCT__entry(
+		__array(u64, ret, MAX_SCM_RETS)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->ret, p->ret, MAX_SCM_RETS);
+	),
+
+	TP_printk("ret: %#llx, %#llx, %#llx",
+		__entry->ret[0], __entry->ret[1], __entry->ret[2])
+);
+#endif /* _TRACE_SCM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/trace_msm_bus.h b/include/trace/events/trace_msm_bus.h
new file mode 100644
index 0000000..3c08aac
--- /dev/null
+++ b/include/trace/events/trace_msm_bus.h
@@ -0,0 +1,237 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_bus
+
+#if !defined(_TRACE_MSM_BUS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MSM_BUS_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(bus_update_request,
+
+	TP_PROTO(int sec, int nsec, const char *name, int src, int dest,
+		unsigned long long ab, unsigned long long ib),
+
+	TP_ARGS(sec, nsec, name, src, dest, ab, ib),
+
+	TP_STRUCT__entry(
+		__field(int, sec)
+		__field(int, nsec)
+		__string(name, name)
+		__field(int, src)
+		__field(int, dest)
+		__field(u64, ab)
+		__field(u64, ib)
+	),
+
+	TP_fast_assign(
+		__entry->sec = sec;
+		__entry->nsec = nsec;
+		__assign_str(name, name);
+		__entry->src = src;
+		__entry->dest = dest;
+		__entry->ab = ab;
+		__entry->ib = ib;
+	),
+
+	TP_printk("time= %u.%09u name=%s src=%d dest=%d ab=%llu ib=%llu",
+		__entry->sec,
+		__entry->nsec,
+		__get_str(name),
+		__entry->src,
+		__entry->dest,
+		(unsigned long long)__entry->ab,
+		(unsigned long long)__entry->ib)
+);
+
+TRACE_EVENT(bus_update_request_end,
+
+	TP_PROTO(const char *name),
+
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("client-name=%s", __get_str(name))
+);
+
+TRACE_EVENT(bus_bimc_config_limiter,
+
+	TP_PROTO(int mas_id, unsigned long long cur_lim_bw),
+
+	TP_ARGS(mas_id, cur_lim_bw),
+
+	TP_STRUCT__entry(
+		__field(int, mas_id)
+		__field(u64, cur_lim_bw)
+	),
+
+	TP_fast_assign(
+		__entry->mas_id = mas_id;
+		__entry->cur_lim_bw = cur_lim_bw;
+	),
+
+	TP_printk("Master=%d cur_lim_bw=%llu",
+		__entry->mas_id,
+		(unsigned long long)__entry->cur_lim_bw)
+);
+
+TRACE_EVENT(bus_avail_bw,
+
+	TP_PROTO(unsigned long long cur_bimc_bw, unsigned long long cur_mdp_bw),
+
+	TP_ARGS(cur_bimc_bw, cur_mdp_bw),
+
+	TP_STRUCT__entry(
+		__field(u64, cur_bimc_bw)
+		__field(u64, cur_mdp_bw)
+	),
+
+	TP_fast_assign(
+		__entry->cur_bimc_bw = cur_bimc_bw;
+		__entry->cur_mdp_bw = cur_mdp_bw;
+	),
+
+	TP_printk("cur_bimc_bw = %llu cur_mdp_bw = %llu",
+		(unsigned long long)__entry->cur_bimc_bw,
+		(unsigned long long)__entry->cur_mdp_bw)
+);
+
+TRACE_EVENT(bus_rules_matches,
+
+	TP_PROTO(int node_id, int rule_id, unsigned long long node_ab,
+		unsigned long long node_ib, unsigned long long node_clk),
+
+	TP_ARGS(node_id, rule_id, node_ab, node_ib, node_clk),
+
+	TP_STRUCT__entry(
+		__field(int, node_id)
+		__field(int, rule_id)
+		__field(u64, node_ab)
+		__field(u64, node_ib)
+		__field(u64, node_clk)
+	),
+
+	TP_fast_assign(
+		__entry->node_id = node_id;
+		__entry->rule_id = rule_id;
+		__entry->node_ab = node_ab;
+		__entry->node_ib = node_ib;
+		__entry->node_clk = node_clk;
+	),
+
+	TP_printk("Rule match node%d rule%d node-ab%llu:ib%llu:clk%llu",
+		__entry->node_id, __entry->rule_id,
+		(unsigned long long)__entry->node_ab,
+		(unsigned long long)__entry->node_ib,
+		(unsigned long long)__entry->node_clk)
+);
+
+TRACE_EVENT(bus_bke_params,
+
+	TP_PROTO(u32 gc, u32 gp, u32 thl, u32 thm, u32 thh),
+
+	TP_ARGS(gc, gp, thl, thm, thh),
+
+	TP_STRUCT__entry(
+		__field(u32, gc)
+		__field(u32, gp)
+		__field(u32, thl)
+		__field(u32, thm)
+		__field(u32, thh)
+	),
+
+	TP_fast_assign(
+		__entry->gc = gc;
+		__entry->gp = gp;
+		__entry->thl = thl;
+		__entry->thm = thm;
+		__entry->thh = thh;
+	),
+
+	TP_printk("BKE Params GC=0x%x GP=0x%x THL=0x%x THM=0x%x THH=0x%x",
+		__entry->gc, __entry->gp, __entry->thl, __entry->thm,
+			__entry->thh)
+);
+
+TRACE_EVENT(bus_client_status,
+
+	TP_PROTO(const char *name, int src, int dest,
+		unsigned long long ab, unsigned long long ib, int active_only),
+
+	TP_ARGS(name, src, dest, ab, ib, active_only),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, src)
+		__field(int, dest)
+		__field(u64, ab)
+		__field(u64, ib)
+		__field(int, active_only)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->src = src;
+		__entry->dest = dest;
+		__entry->ab = ab;
+		__entry->ib = ib;
+		__entry->active_only = active_only;
+	),
+
+	TP_printk("name=%s src=%d dest=%d ab=%llu ib=%llu active_only=%d",
+		__get_str(name),
+		__entry->src,
+		__entry->dest,
+		(unsigned long long)__entry->ab,
+		(unsigned long long)__entry->ib,
+		__entry->active_only)
+);
+
+TRACE_EVENT(bus_agg_bw,
+
+	TP_PROTO(unsigned int node_id, int rpm_id, int ctx_set,
+		unsigned long long agg_ab),
+
+	TP_ARGS(node_id, rpm_id, ctx_set, agg_ab),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, node_id)
+		__field(int, rpm_id)
+		__field(int, ctx_set)
+		__field(u64, agg_ab)
+	),
+
+	TP_fast_assign(
+		__entry->node_id = node_id;
+		__entry->rpm_id = rpm_id;
+		__entry->ctx_set = ctx_set;
+		__entry->agg_ab = agg_ab;
+	),
+
+	TP_printk("node_id:%u rpm_id:%d rpm_ctx:%d agg_ab:%llu",
+		__entry->node_id,
+		__entry->rpm_id,
+		__entry->ctx_set,
+		(unsigned long long)__entry->agg_ab)
+);
+#endif
+#define TRACE_INCLUDE_FILE trace_msm_bus
+#include <trace/define_trace.h>
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
new file mode 100644
index 0000000..6dc4735
--- /dev/null
+++ b/include/trace/events/ufs.h
@@ -0,0 +1,238 @@
+/*
+ * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ufs
+
+#if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_UFS_H
+
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(ufshcd_state_change_template,
+	TP_PROTO(const char *dev_name, int state),
+
+	TP_ARGS(dev_name, state),
+
+	TP_STRUCT__entry(
+		__string(dev_name, dev_name)
+		__field(int, state)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name);
+		__entry->state = state;
+	),
+
+	TP_printk("%s: state changed to %s",
+		__get_str(dev_name), __entry->state ? "ON" : "OFF")
+);
+
+DEFINE_EVENT_PRINT(ufshcd_state_change_template, ufshcd_clk_gating,
+	TP_PROTO(const char *dev_name, int state),
+	TP_ARGS(dev_name, state),
+	TP_printk("%s: state changed to %s", __get_str(dev_name),
+		__print_symbolic(__entry->state,
+				{ CLKS_OFF, "CLKS_OFF" },
+				{ CLKS_ON, "CLKS_ON" },
+				{ REQ_CLKS_OFF, "REQ_CLKS_OFF" },
+				{ REQ_CLKS_ON, "REQ_CLKS_ON" }))
+);
+
+DEFINE_EVENT_PRINT(ufshcd_state_change_template, ufshcd_hibern8_on_idle,
+	TP_PROTO(const char *dev_name, int state),
+	TP_ARGS(dev_name, state),
+	TP_printk("%s: state changed to %s", __get_str(dev_name),
+		__print_symbolic(__entry->state,
+			{ HIBERN8_ENTERED, "HIBERN8_ENTER" },
+			{ HIBERN8_EXITED, "HIBERN8_EXIT" },
+			{ REQ_HIBERN8_ENTER, "REQ_HIBERN8_ENTER" },
+			{ REQ_HIBERN8_EXIT, "REQ_HIBERN8_EXIT" }))
+);
+
+DEFINE_EVENT(ufshcd_state_change_template, ufshcd_auto_bkops_state,
+	TP_PROTO(const char *dev_name, int state),
+	TP_ARGS(dev_name, state));
+
+TRACE_EVENT(ufshcd_clk_scaling,
+
+	TP_PROTO(const char *dev_name, const char *state, const char *clk,
+		u32 prev_state, u32 curr_state),
+
+	TP_ARGS(dev_name, state, clk, prev_state, curr_state),
+
+	TP_STRUCT__entry(
+		__string(dev_name, dev_name)
+		__string(state, state)
+		__string(clk, clk)
+		__field(u32, prev_state)
+		__field(u32, curr_state)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name);
+		__assign_str(state, state);
+		__assign_str(clk, clk);
+		__entry->prev_state = prev_state;
+		__entry->curr_state = curr_state;
+	),
+
+	TP_printk("%s: %s %s from %u to %u Hz",
+		__get_str(dev_name), __get_str(state), __get_str(clk),
+		__entry->prev_state, __entry->curr_state)
+);
+
+DECLARE_EVENT_CLASS(ufshcd_profiling_template,
+	TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+		 int err),
+
+	TP_ARGS(dev_name, profile_info, time_us, err),
+
+	TP_STRUCT__entry(
+		__string(dev_name, dev_name)
+		__string(profile_info, profile_info)
+		__field(s64, time_us)
+		__field(int, err)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name);
+		__assign_str(profile_info, profile_info);
+		__entry->time_us = time_us;
+		__entry->err = err;
+	),
+
+	TP_printk("%s: %s: took %lld usecs, err %d",
+		__get_str(dev_name), __get_str(profile_info),
+		__entry->time_us, __entry->err)
+);
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_hibern8,
+	TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+		 int err),
+	TP_ARGS(dev_name, profile_info, time_us, err));
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_gating,
+	TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+		 int err),
+	TP_ARGS(dev_name, profile_info, time_us, err));
+
+DEFINE_EVENT(ufshcd_profiling_template, ufshcd_profile_clk_scaling,
+	TP_PROTO(const char *dev_name, const char *profile_info, s64 time_us,
+		 int err),
+	TP_ARGS(dev_name, profile_info, time_us, err));
+
+DECLARE_EVENT_CLASS(ufshcd_template,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		 int dev_state, int link_state),
+
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state),
+
+	TP_STRUCT__entry(
+		__field(s64, usecs)
+		__field(int, err)
+		__string(dev_name, dev_name)
+		__field(int, dev_state)
+		__field(int, link_state)
+	),
+
+	TP_fast_assign(
+		__entry->usecs = usecs;
+		__entry->err = err;
+		__assign_str(dev_name, dev_name);
+		__entry->dev_state = dev_state;
+		__entry->link_state = link_state;
+	),
+
+	TP_printk(
+		"%s: took %lld usecs, dev_state: %s, link_state: %s, err %d",
+		__get_str(dev_name),
+		__entry->usecs,
+		__print_symbolic(__entry->dev_state,
+			{ UFS_ACTIVE_PWR_MODE, "ACTIVE" },
+			{ UFS_SLEEP_PWR_MODE, "SLEEP" },
+			{ UFS_POWERDOWN_PWR_MODE, "POWERDOWN" }),
+		__print_symbolic(__entry->link_state,
+			{ UIC_LINK_OFF_STATE, "LINK_OFF" },
+			{ UIC_LINK_ACTIVE_STATE, "LINK_ACTIVE" },
+			{ UIC_LINK_HIBERN8_STATE, "LINK_HIBERN8" }),
+		__entry->err
+	)
+);
+
+DEFINE_EVENT(ufshcd_template, ufshcd_system_suspend,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_system_resume,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_runtime_suspend,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_runtime_resume,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+DEFINE_EVENT(ufshcd_template, ufshcd_init,
+	TP_PROTO(const char *dev_name, int err, s64 usecs,
+		int dev_state, int link_state),
+	TP_ARGS(dev_name, err, usecs, dev_state, link_state));
+
+TRACE_EVENT(ufshcd_command,
+	TP_PROTO(const char *dev_name, const char *str, unsigned int tag,
+			u32 doorbell, int transfer_len, u32 intr, u64 lba,
+			u8 opcode),
+
+	TP_ARGS(dev_name, str, tag, doorbell, transfer_len, intr, lba, opcode),
+
+	TP_STRUCT__entry(
+		__string(dev_name, dev_name)
+		__string(str, str)
+		__field(unsigned int, tag)
+		__field(u32, doorbell)
+		__field(int, transfer_len)
+		__field(u32, intr)
+		__field(u64, lba)
+		__field(u8, opcode)
+	),
+
+	TP_fast_assign(
+		__assign_str(dev_name, dev_name);
+		__assign_str(str, str);
+		__entry->tag = tag;
+		__entry->doorbell = doorbell;
+		__entry->transfer_len = transfer_len;
+		__entry->intr = intr;
+		__entry->lba = lba;
+		__entry->opcode = opcode;
+	),
+
+	TP_printk(
+		"%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x",
+		__get_str(str), __get_str(dev_name), __entry->tag,
+		__entry->doorbell, __entry->transfer_len,
+		__entry->intr, __entry->lba, (u32)__entry->opcode
+	)
+);
+
+#endif /* if !defined(_TRACE_UFS_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 467e12f..7bbd5d3 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -685,7 +685,8 @@
 									\
 	{ assign; }							\
 									\
-	trace_event_buffer_commit(&fbuffer);				\
+	trace_event_buffer_commit(&fbuffer,				\
+				  sizeof(*entry) + __data_size);	\
 }
 /*
  * The ftrace_test_probe is compiled out, it is only here as a build time check
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index cd2be1c..f8e6b3b 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -22,7 +22,7 @@
 header-y += netfilter_ipv6/
 header-y += usb/
 header-y += wimax/
-
+header-y += msm_ipa.h
 genhdr-y += version.h
 
 ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/uapi/asm/a.out.h \
@@ -92,6 +92,7 @@
 header-y += coff.h
 header-y += connector.h
 header-y += const.h
+header-y += coresight-stm.h
 header-y += cramfs_fs.h
 header-y += cuda.h
 header-y += cyclades.h
@@ -116,6 +117,7 @@
 header-y += elf.h
 header-y += errno.h
 header-y += errqueue.h
+header-y += esoc_ctrl.h
 header-y += ethtool.h
 header-y += eventpoll.h
 header-y += fadvise.h
@@ -201,6 +203,7 @@
 header-y += input-event-codes.h
 header-y += in_route.h
 header-y += ioctl.h
+header-y += ion.h
 header-y += ip6_tunnel.h
 header-y += ipc.h
 header-y += ip.h
@@ -278,6 +281,9 @@
 header-y += mroute.h
 header-y += msdos_fs.h
 header-y += msg.h
+header-y += msm_ion.h
+header-y += msm_ipc.h
+header-y += msm_rmnet.h
 header-y += mtio.h
 header-y += nbd.h
 header-y += ncp_fs.h
@@ -299,6 +305,7 @@
 header-y += netlink_diag.h
 header-y += netlink.h
 header-y += netrom.h
+header-y += net_map.h
 header-y += net_namespace.h
 header-y += net_tstamp.h
 header-y += nfc.h
@@ -364,6 +371,7 @@
 header-y += rio_cm_cdev.h
 header-y += rio_mport_cdev.h
 header-y += romfs_fs.h
+header-y += rmnet_data.h
 header-y += rose.h
 header-y += route.h
 header-y += rtc.h
@@ -475,3 +483,6 @@
 header-y += zorro.h
 header-y += zorro_ids.h
 header-y += userfaultfd.h
+header-y += ipa_qmi_service_v01.h
+header-y += msm_ipa.h
+header-y += rmnet_ipa_fd_ioctl.h
diff --git a/include/uapi/linux/coresight-stm.h b/include/uapi/linux/coresight-stm.h
index 7e4272c..bdedcbbe 100644
--- a/include/uapi/linux/coresight-stm.h
+++ b/include/uapi/linux/coresight-stm.h
@@ -1,8 +1,18 @@
 #ifndef __UAPI_CORESIGHT_STM_H_
 #define __UAPI_CORESIGHT_STM_H_
 
-#define STM_FLAG_TIMESTAMPED   BIT(3)
-#define STM_FLAG_GUARANTEED    BIT(7)
+#define STM_FLAG_NONE		0x00
+#define STM_FLAG_TIMESTAMPED	0x08
+#define STM_FLAG_GUARANTEED	0x80
+
+#define	OST_ENTITY_NONE			0x00
+#define	OST_ENTITY_FTRACE_EVENTS	0x01
+#define	OST_ENTITY_TRACE_PRINTK		0x02
+#define	OST_ENTITY_TRACE_MARKER		0x04
+#define	OST_ENTITY_DEV_NODE		0x08
+#define	OST_ENTITY_DIAG			0xEE
+#define	OST_ENTITY_QVIEW		0xFE
+#define	OST_ENTITY_MAX			0xFF
 
 /*
  * The CoreSight STM supports guaranteed and invariant timing
diff --git a/include/uapi/linux/esoc_ctrl.h b/include/uapi/linux/esoc_ctrl.h
new file mode 100644
index 0000000..1b17e1c
--- /dev/null
+++ b/include/uapi/linux/esoc_ctrl.h
@@ -0,0 +1,75 @@
+#ifndef _UAPI_ESOC_CTRL_H_
+#define _UAPI_ESOC_CTRL_H_
+
+#include <linux/types.h>
+
+#define ESOC_CODE		0xCC
+
+#define ESOC_CMD_EXE		_IOW(ESOC_CODE, 1, __u32)
+#define ESOC_WAIT_FOR_REQ	_IOR(ESOC_CODE, 2, __u32)
+#define ESOC_NOTIFY		_IOW(ESOC_CODE, 3, __u32)
+#define ESOC_GET_STATUS		_IOR(ESOC_CODE, 4, __u32)
+#define ESOC_WAIT_FOR_CRASH	_IOR(ESOC_CODE, 6, __u32)
+#define ESOC_REG_REQ_ENG	_IO(ESOC_CODE, 7)
+#define ESOC_REG_CMD_ENG	_IO(ESOC_CODE, 8)
+
+/*Link types for communication with external SOCs*/
+#define HSIC		"HSIC"
+#define HSICPCIe	"HSIC+PCIe"
+#define PCIe		"PCIe"
+
+enum esoc_evt {
+	ESOC_RUN_STATE = 0x1,
+	ESOC_UNEXPECTED_RESET,
+	ESOC_ERR_FATAL,
+	ESOC_IN_DEBUG,
+	ESOC_REQ_ENG_ON,
+	ESOC_REQ_ENG_OFF,
+	ESOC_CMD_ENG_ON,
+	ESOC_CMD_ENG_OFF,
+	ESOC_INVALID_STATE,
+};
+
+enum esoc_cmd {
+	ESOC_PWR_ON = 1,
+	ESOC_PWR_OFF,
+	ESOC_FORCE_PWR_OFF,
+	ESOC_RESET,
+	ESOC_PREPARE_DEBUG,
+	ESOC_EXE_DEBUG,
+	ESOC_EXIT_DEBUG,
+};
+
+enum esoc_notify {
+	ESOC_IMG_XFER_DONE = 1,
+	ESOC_BOOT_DONE,
+	ESOC_BOOT_FAIL,
+	ESOC_IMG_XFER_RETRY,
+	ESOC_IMG_XFER_FAIL,
+	ESOC_UPGRADE_AVAILABLE,
+	ESOC_DEBUG_DONE,
+	ESOC_DEBUG_FAIL,
+	ESOC_PRIMARY_CRASH,
+	ESOC_PRIMARY_REBOOT,
+};
+
+enum esoc_req {
+	ESOC_REQ_IMG = 1,
+	ESOC_REQ_DEBUG,
+	ESOC_REQ_SHUTDOWN,
+};
+
+#ifdef __KERNEL__
+/**
+ * struct esoc_handle: Handle for clients of esoc
+ * @name: name of the external soc.
+ * @link: link of external soc.
+ * @id: id of external soc.
+ */
+struct esoc_handle {
+	const char *name;
+	const char *link;
+	unsigned int id;
+};
+#endif
+#endif
diff --git a/include/uapi/linux/fib_rules.h b/include/uapi/linux/fib_rules.h
index 14404b3..436f0f3 100644
--- a/include/uapi/linux/fib_rules.h
+++ b/include/uapi/linux/fib_rules.h
@@ -49,6 +49,8 @@
 	FRA_TABLE,	/* Extended table id */
 	FRA_FWMASK,	/* mask for netfilter mark */
 	FRA_OIFNAME,
+	FRA_UID_START,	/* UID range */
+	FRA_UID_END,
 	FRA_PAD,
 	FRA_L3MDEV,	/* iif or oif is l3mdev goto its table */
 	__FRA_MAX
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index acb2b61..92d9c68 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -236,6 +236,8 @@
 #define FICLONERANGE	_IOW(0x94, 13, struct file_clone_range)
 #define FIDEDUPERANGE	_IOWR(0x94, 54, struct file_dedupe_range)
 
+#define FIDTRIM	_IOWR('f', 128, struct fstrim_range)	/* Deep discard trim */
+
 #define	FS_IOC_GETFLAGS			_IOR('f', 1, long)
 #define	FS_IOC_SETFLAGS			_IOW('f', 2, long)
 #define	FS_IOC_GETVERSION		_IOR('v', 1, long)
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index 42fa977..0932378 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -375,6 +375,7 @@
 	FUSE_READDIRPLUS   = 44,
 	FUSE_RENAME2       = 45,
 	FUSE_LSEEK         = 46,
+	FUSE_CANONICAL_PATH= 2016,
 
 	/* CUSE specific operations */
 	CUSE_INIT          = 4096,
diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h
index 4d024d7..e762447 100644
--- a/include/uapi/linux/if_arp.h
+++ b/include/uapi/linux/if_arp.h
@@ -59,6 +59,7 @@
 #define ARPHRD_LAPB	516		/* LAPB				*/
 #define ARPHRD_DDCMP    517		/* Digital's DDCMP protocol     */
 #define ARPHRD_RAWHDLC	518		/* Raw HDLC			*/
+#define ARPHRD_RAWIP	530		/* Raw IP			*/
 
 #define ARPHRD_TUNNEL	768		/* IPIP tunnel			*/
 #define ARPHRD_TUNNEL6	769		/* IP6IP6 tunnel       		*/
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 117d02e..cae866f 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -103,7 +103,9 @@
 
 #define ETH_P_802_3_MIN	0x0600		/* If the value in the ethernet type is less than this value
 					 * then the frame is Ethernet II. Else it is 802.3 */
-
+#define ETH_P_MAP	0xDA1A		/* Multiplexing and Aggregation Protocol
+					 *  NOT AN OFFICIALLY REGISTERED ID ]
+					 */
 /*
  *	Non DIX types. Won't clash for 1500 types.
  */
diff --git a/include/uapi/linux/if_pppolac.h b/include/uapi/linux/if_pppolac.h
new file mode 100644
index 0000000..b7eb8153
--- /dev/null
+++ b/include/uapi/linux/if_pppolac.h
@@ -0,0 +1,33 @@
+/* include/uapi/linux/if_pppolac.h
+ *
+ * Header for PPP on L2TP Access Concentrator / PPPoLAC Socket (RFC 2661)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOLAC_H
+#define _UAPI_LINUX_IF_PPPOLAC_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppolac {
+	sa_family_t	sa_family;	/* AF_PPPOX */
+	unsigned int	sa_protocol;	/* PX_PROTO_OLAC */
+	int		udp_socket;
+	struct __attribute__((packed)) {
+		__u16	tunnel, session;
+	} local, remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOLAC_H */
diff --git a/include/uapi/linux/if_pppopns.h b/include/uapi/linux/if_pppopns.h
new file mode 100644
index 0000000..a392b52
--- /dev/null
+++ b/include/uapi/linux/if_pppopns.h
@@ -0,0 +1,32 @@
+/* include/uapi/linux/if_pppopns.h
+ *
+ * Header for PPP on PPTP Network Server / PPPoPNS Socket (RFC 2637)
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Chia-chi Yeh <chiachi@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _UAPI_LINUX_IF_PPPOPNS_H
+#define _UAPI_LINUX_IF_PPPOPNS_H
+
+#include <linux/socket.h>
+#include <linux/types.h>
+
+struct sockaddr_pppopns {
+	sa_family_t	sa_family;	/* AF_PPPOX */
+	unsigned int	sa_protocol;	/* PX_PROTO_OPNS */
+	int		tcp_socket;
+	__u16		local;
+	__u16		remote;
+} __attribute__((packed));
+
+#endif /* _UAPI_LINUX_IF_PPPOPNS_H */
diff --git a/include/uapi/linux/if_pppox.h b/include/uapi/linux/if_pppox.h
index d37bbb1..6aad18a 100644
--- a/include/uapi/linux/if_pppox.h
+++ b/include/uapi/linux/if_pppox.h
@@ -24,6 +24,8 @@
 #include <linux/if.h>
 #include <linux/if_ether.h>
 #include <linux/if_pppol2tp.h>
+#include <linux/if_pppolac.h>
+#include <linux/if_pppopns.h>
 #include <linux/in.h>
 #include <linux/in6.h>
 
@@ -59,7 +61,9 @@
 #define PX_PROTO_OE    0 /* Currently just PPPoE */
 #define PX_PROTO_OL2TP 1 /* Now L2TP also */
 #define PX_PROTO_PPTP  2
-#define PX_MAX_PROTO   3
+#define PX_PROTO_OLAC  3
+#define PX_PROTO_OPNS  4
+#define PX_MAX_PROTO   5
 
 struct sockaddr_pppox {
 	__kernel_sa_family_t sa_family;       /* address family, AF_PPPOX */
diff --git a/include/uapi/linux/ion.h b/include/uapi/linux/ion.h
new file mode 120000
index 0000000..17e8dbb
--- /dev/null
+++ b/include/uapi/linux/ion.h
@@ -0,0 +1 @@
+../../../drivers/staging/android/uapi/ion.h
\ No newline at end of file
diff --git a/include/uapi/linux/ipa_qmi_service_v01.h b/include/uapi/linux/ipa_qmi_service_v01.h
new file mode 100644
index 0000000..b2c40a4
--- /dev/null
+++ b/include/uapi/linux/ipa_qmi_service_v01.h
@@ -0,0 +1,1623 @@
+/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * This header file defines the types and structures that were defined in
+ * ipa. It contains the constant values defined, enums, structures,
+ * messages, and service message IDs (in that order) Structures that were
+ * defined in the IDL as messages contain mandatory elements, optional
+ * elements, a combination of mandatory and optional elements (mandatory
+ * always come before optionals in the structure), or nothing (null message)
+
+ * An optional element in a message is preceded by a uint8_t value that must be
+ * set to true if the element is going to be included. When decoding a received
+ * message, the uint8_t values will be set to true or false by the decode
+ * routine, and should be checked before accessing the values that they
+ * correspond to.
+
+ * Variable sized arrays are defined as static sized arrays with an unsigned
+ * integer (32 bit) preceding it that must be set to the number of elements
+ * in the array that are valid. For Example:
+
+ * uint32_t test_opaque_len;
+ * uint8_t test_opaque[16];
+
+ * If only 4 elements are added to test_opaque[] then test_opaque_len must be
+ * set to 4 before sending the message.  When decoding, the _len value is set
+ * by the decode routine and should be checked so that the correct number of
+ * elements in the array will be accessed.
+ */
+#ifndef IPA_QMI_SERVICE_V01_H
+#define IPA_QMI_SERVICE_V01_H
+
+#define QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01 2
+#define QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01 2
+#define QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01 2
+#define QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01 2
+#define QMI_IPA_MAX_FILTERS_V01 64
+#define QMI_IPA_MAX_PIPES_V01 20
+#define QMI_IPA_MAX_APN_V01 8
+
+#define IPA_INT_MAX	((int)(~0U>>1))
+#define IPA_INT_MIN	(-IPA_INT_MAX - 1)
+
+/* IPA definition as msm_qmi_interface.h */
+
+enum ipa_qmi_result_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_QMI_RESULT_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+	IPA_QMI_RESULT_SUCCESS_V01 = 0,
+	IPA_QMI_RESULT_FAILURE_V01 = 1,
+	IPA_QMI_RESULT_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
+enum ipa_qmi_error_type_v01 {
+	/* To force a 32 bit signed enum. Do not change or use*/
+	IPA_QMI_ERROR_TYPE_MIN_ENUM_VAL_V01 = IPA_INT_MIN,
+	IPA_QMI_ERR_NONE_V01 = 0x0000,
+	IPA_QMI_ERR_MALFORMED_MSG_V01 = 0x0001,
+	IPA_QMI_ERR_NO_MEMORY_V01 = 0x0002,
+	IPA_QMI_ERR_INTERNAL_V01 = 0x0003,
+	IPA_QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 = 0x0005,
+	IPA_QMI_ERR_INVALID_ID_V01 = 0x0029,
+	IPA_QMI_ERR_ENCODING_V01 = 0x003A,
+	IPA_QMI_ERR_INCOMPATIBLE_STATE_V01 = 0x005A,
+	IPA_QMI_ERR_NOT_SUPPORTED_V01 = 0x005E,
+	IPA_QMI_ERROR_TYPE_MAX_ENUM_VAL_V01 = IPA_INT_MAX,
+};
+
+struct ipa_qmi_response_type_v01 {
+	enum ipa_qmi_result_type_v01 result;
+	enum ipa_qmi_error_type_v01 error;
+};
+
+enum ipa_platform_type_enum_v01 {
+	IPA_PLATFORM_TYPE_ENUM_MIN_ENUM_VAL_V01 =
+	-2147483647, /* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PLATFORM_TYPE_INVALID_V01 = 0,
+	/*  Invalid platform identifier */
+	QMI_IPA_PLATFORM_TYPE_TN_V01 = 1,
+	/*  Platform identifier -	Data card device */
+	QMI_IPA_PLATFORM_TYPE_LE_V01 = 2,
+	/*  Platform identifier -	Data router device */
+	QMI_IPA_PLATFORM_TYPE_MSM_ANDROID_V01 = 3,
+	/*  Platform identifier -	MSM device with Android HLOS */
+	QMI_IPA_PLATFORM_TYPE_MSM_WINDOWS_V01 = 4,
+	/*  Platform identifier -	MSM device with Windows HLOS */
+	QMI_IPA_PLATFORM_TYPE_MSM_QNX_V01 = 5,
+	/*  Platform identifier -	MSM device with QNX HLOS */
+	IPA_PLATFORM_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+struct ipa_hdr_tbl_info_type_v01 {
+	uint32_t modem_offset_start;
+	/*	Offset from the start of IPA Shared memory from which
+	 *	modem driver may insert header table entries.
+	 */
+	uint32_t modem_offset_end;
+	/*	Offset from the start of IPA shared mem beyond which modem
+	 *	driver shall not insert header table entries. The space
+	 *	available for the modem driver shall include the
+	 *	modem_offset_start and modem_offset_end.
+	 */
+};  /* Type */
+
+struct ipa_route_tbl_info_type_v01 {
+	uint32_t route_tbl_start_addr;
+	/*	Identifies the start of the routing table. Denotes the offset
+	 *	from the start of the IPA Shared Mem
+	 */
+
+	uint32_t num_indices;
+	/*	Number of indices (starting from 0) that is being allocated to
+	 *	the modem. The number indicated here is also included in the
+	 *	allocation. The value of num_indices shall not exceed 31
+	 *	(5 bits used to specify the routing table index), unless there
+	 *	is a change in the hardware.
+	 */
+};  /* Type */
+
+struct ipa_modem_mem_info_type_v01 {
+
+	uint32_t block_start_addr;
+	/*	Identifies the start of the memory block allocated for the
+	 *	modem. Denotes the offset from the start of the IPA Shared Mem
+	 */
+
+	uint32_t size;
+	/*	Size of the block allocated for the modem driver */
+};  /* Type */
+
+struct ipa_hdr_proc_ctx_tbl_info_type_v01 {
+
+	uint32_t modem_offset_start;
+	/*  Offset from the start of IPA shared memory from which the modem
+	 *	driver may insert header processing context table entries.
+	 */
+
+	uint32_t modem_offset_end;
+	/*  Offset from the start of IPA shared memory beyond which the modem
+	 *	driver may not insert header proc table entries. The space
+	 *	available for the modem driver includes modem_offset_start and
+	 *	modem_offset_end.
+	 */
+};  /* Type */
+
+struct ipa_zip_tbl_info_type_v01 {
+
+	uint32_t modem_offset_start;
+	/*  Offset from the start of IPA shared memory from which the modem
+	 *	driver may insert compression/decompression command entries.
+	 */
+
+	uint32_t modem_offset_end;
+	/*  Offset from the start of IPA shared memory beyond which the modem
+	 *	driver may not insert compression/decompression command entries.
+	 *	The space available for the modem driver includes
+	 *  modem_offset_start and modem_offset_end.
+	 */
+};  /* Type */
+
+/**
+ * Request Message; Requests the modem IPA driver
+ * to perform initialization
+ */
+struct ipa_init_modem_driver_req_msg_v01 {
+
+	/* Optional */
+	/*  Platform info */
+	uint8_t platform_type_valid;
+	/* Must be set to true if platform_type is being passed */
+	enum ipa_platform_type_enum_v01 platform_type;
+	/*   Provides information about the platform (ex. TN/MN/LE/MSM,etc) */
+
+	/* Optional */
+	/*  Header table info */
+	uint8_t hdr_tbl_info_valid;
+	/* Must be set to true if hdr_tbl_info is being passed */
+	struct ipa_hdr_tbl_info_type_v01 hdr_tbl_info;
+	/*	Provides information about the header table */
+
+	/* Optional */
+	/*  IPV4 Routing table info */
+	uint8_t v4_route_tbl_info_valid;
+	/* Must be set to true if v4_route_tbl_info is being passed */
+	struct ipa_route_tbl_info_type_v01 v4_route_tbl_info;
+	/*	Provides information about the IPV4 routing table */
+
+	/* Optional */
+	/*  IPV6 Routing table info */
+	uint8_t v6_route_tbl_info_valid;
+	/* Must be set to true if v6_route_tbl_info is being passed */
+	struct ipa_route_tbl_info_type_v01 v6_route_tbl_info;
+	/*	Provides information about the IPV6 routing table */
+
+	/* Optional */
+	/*  IPV4 Filter table start address */
+	uint8_t v4_filter_tbl_start_addr_valid;
+	/* Must be set to true if v4_filter_tbl_start_addr is being passed */
+	uint32_t v4_filter_tbl_start_addr;
+	/*	Provides information about the starting address of IPV4 filter
+	 *	table in IPAv2 or non-hashable IPv4 filter table in IPAv3.
+	 *	Denotes the offset from the start of the IPA Shared Mem
+	 */
+
+	/* Optional */
+	/* IPV6 Filter table start address */
+	uint8_t v6_filter_tbl_start_addr_valid;
+	/* Must be set to true if v6_filter_tbl_start_addr is being passed */
+	uint32_t v6_filter_tbl_start_addr;
+	/*	Provides information about the starting address of IPV6 filter
+	 *	table in IPAv2 or non-hashable IPv6 filter table in IPAv3.
+	 *	Denotes the offset from the start of the IPA Shared Mem
+	 */
+
+	/* Optional */
+	/*  Modem memory block */
+	uint8_t modem_mem_info_valid;
+	/* Must be set to true if modem_mem_info is being passed */
+	struct ipa_modem_mem_info_type_v01 modem_mem_info;
+	/*  Provides information about the start address and the size of
+	 *	the memory block that is being allocated to the modem driver.
+	 *	Denotes the physical address
+	 */
+
+	/* Optional */
+	/*  Destination end point for control commands from modem */
+	uint8_t ctrl_comm_dest_end_pt_valid;
+	/* Must be set to true if ctrl_comm_dest_end_pt is being passed */
+	uint32_t ctrl_comm_dest_end_pt;
+	/*  Provides information about the destination end point on the
+	 *	application processor to which the modem driver can send
+	 *	control commands. The value of this parameter cannot exceed
+	 *	19 since IPA only supports 20 end points.
+	 */
+
+	/* Optional */
+	/*  Modem Bootup Information */
+	uint8_t is_ssr_bootup_valid;
+	/* Must be set to true if is_ssr_bootup is being passed */
+	uint8_t is_ssr_bootup;
+	/*	Specifies whether the modem is booting up after a modem only
+	 *	sub-system restart or not. This will let the modem driver
+	 *	know that it doesn't have to reinitialize some of the HW
+	 *	blocks because IPA has not been reset since the previous
+	 *	initialization.
+	 */
+
+	/* Optional */
+	/*  Header Processing Context Table Information */
+	uint8_t hdr_proc_ctx_tbl_info_valid;
+	/* Must be set to true if hdr_proc_ctx_tbl_info is being passed */
+	struct ipa_hdr_proc_ctx_tbl_info_type_v01 hdr_proc_ctx_tbl_info;
+	/* Provides information about the header processing context table.
+	*/
+
+	/* Optional */
+	/*  Compression Decompression Table Information */
+	uint8_t zip_tbl_info_valid;
+	/* Must be set to true if zip_tbl_info is being passed */
+	struct ipa_zip_tbl_info_type_v01 zip_tbl_info;
+	/* Provides information about the zip table.
+	*/
+
+	/* Optional */
+	/*  IPv4 Hashable Routing Table Information */
+	/** Must be set to true if v4_hash_route_tbl_info is being passed */
+	uint8_t v4_hash_route_tbl_info_valid;
+	struct ipa_route_tbl_info_type_v01 v4_hash_route_tbl_info;
+
+	/* Optional */
+	/*  IPv6 Hashable Routing Table Information */
+	/** Must be set to true if v6_hash_route_tbl_info is being passed */
+	uint8_t v6_hash_route_tbl_info_valid;
+	struct ipa_route_tbl_info_type_v01 v6_hash_route_tbl_info;
+
+	/*
+	* Optional
+	* IPv4 Hashable Filter Table Start Address
+	* Must be set to true if v4_hash_filter_tbl_start_addr
+	* is being passed
+	*/
+	uint8_t v4_hash_filter_tbl_start_addr_valid;
+	uint32_t v4_hash_filter_tbl_start_addr;
+	/* Identifies the starting address of the IPv4 hashable filter
+	* table in IPAv3 onwards. Denotes the offset from the start of
+	* the IPA shared memory.
+	*/
+
+	/* Optional
+	* IPv6 Hashable Filter Table Start Address
+	* Must be set to true if v6_hash_filter_tbl_start_addr
+	* is being passed
+	*/
+	uint8_t v6_hash_filter_tbl_start_addr_valid;
+	uint32_t v6_hash_filter_tbl_start_addr;
+	/* Identifies the starting address of the IPv6 hashable filter
+	* table in IPAv3 onwards. Denotes the offset from the start of
+	* the IPA shared memory.
+	*/
+};  /* Message */
+
+/* Response Message; Requests the modem IPA driver about initialization */
+struct ipa_init_modem_driver_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.*/
+
+	/* Optional */
+	/* Destination end point for control commands from master driver */
+	uint8_t ctrl_comm_dest_end_pt_valid;
+	/* Must be set to true if ctrl_comm_dest_ep is being passed */
+	uint32_t ctrl_comm_dest_end_pt;
+	/*	Provides information about the destination end point on the
+	 *	modem processor to which the master driver can send control
+	 *	commands. The value of this parameter cannot exceed 19 since
+	 *	IPA only supports 20 end points. This field is looked at only
+	 *	if the result in TLV RESULT_CODE is	QMI_RESULT_SUCCESS
+	 */
+
+	/* Optional */
+	/*  Default end point */
+	uint8_t default_end_pt_valid;
+	/* Must be set to true if default_end_pt is being passed */
+	uint32_t default_end_pt;
+	/*  Provides information about the default end point. The master
+	 *	driver may or may not set the register in the hardware with
+	 *	this value. The value of this parameter cannot exceed 19
+	 *	since IPA only supports 20 end points. This field is looked
+	 *	at only if the result in TLV RESULT_CODE is QMI_RESULT_SUCCESS
+	 */
+
+	/* Optional */
+	/*  Modem Driver Initialization Pending */
+	uint8_t modem_driver_init_pending_valid;
+	/* Must be set to true if modem_driver_init_pending is being passed */
+	uint8_t modem_driver_init_pending;
+	/*
+	 * Identifies if second level message handshake is needed
+	 *	between drivers to indicate when IPA HWP loading is completed.
+	 *	If this is set by modem driver, AP driver will need to wait
+	 *	for a INIT_MODEM_DRIVER_CMPLT message before communicating with
+	 *	IPA HWP.
+	 */
+};  /* Message */
+
+/*
+ * Request Message; Request from Modem IPA driver to indicate
+ *	modem driver init completion
+ */
+struct ipa_init_modem_driver_cmplt_req_msg_v01 {
+	/* Mandatory */
+	/*  Modem Driver init complete status; */
+	uint8_t status;
+	/*
+	 * Specifies whether the modem driver initialization is complete
+	 *	including the micro controller image loading.
+	 */
+};  /* Message */
+
+/*
+ * Response Message; Request from Modem IPA driver to indicate
+ *	modem driver init completion
+ */
+struct ipa_init_modem_driver_cmplt_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+};  /* Message */
+
+/*	Request Message; This is the message that is exchanged between the
+ *	control point and the service in order to register for indications.
+ */
+struct ipa_indication_reg_req_msg_v01 {
+	/* Optional */
+	/*  Master driver initialization completion */
+	uint8_t master_driver_init_complete_valid;
+	/* Must be set to true if master_driver_init_complete is being passed */
+	uint8_t master_driver_init_complete;
+	/*  If set to TRUE, this field indicates that the client is
+	 *	interested in getting indications about the completion
+	 *	of the initialization sequence of the master driver.
+	 *	Setting this field in the request message makes sense
+	 *	only when the QMI_IPA_INDICATION_REGISTER_REQ is being
+	 *	originated from the modem driver
+	 */
+
+	/* Optional */
+	/*  Data Usage Quota Reached */
+	uint8_t data_usage_quota_reached_valid;
+	/*  Must be set to true if data_usage_quota_reached is being passed */
+	uint8_t data_usage_quota_reached;
+	/*  If set to TRUE, this field indicates that the client wants to
+	 *  receive indications about reaching the data usage quota that
+	 *  previously set via QMI_IPA_SET_DATA_USAGE_QUOTA. Setting this field
+	 *  in the request message makes sense only when the
+	 *  QMI_IPA_INDICATION_REGISTER_REQ is being originated from the Master
+	 *  driver
+	 */
+};  /* Message */
+
+
+/* Response Message; This is the message that is exchanged between the
+ *	control point and the service in order to register for indications.
+ */
+struct ipa_indication_reg_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+};  /* Message */
+
+
+/*	Indication Message; Indication sent to the Modem IPA driver from
+ *	master IPA driver about initialization being complete.
+ */
+struct ipa_master_driver_init_complt_ind_msg_v01 {
+	/* Mandatory */
+	/*  Master driver initialization completion status */
+	struct ipa_qmi_response_type_v01 master_driver_init_status;
+	/*	Indicates the status of initialization. If everything went
+	 *	as expected, this field is set to SUCCESS. ERROR is set
+	 *	otherwise. Extended error info may be used to convey
+	 *	additional information about the error
+	 */
+};  /* Message */
+
+struct ipa_ipfltr_range_eq_16_type_v01 {
+	uint8_t offset;
+	/*	Specifies the offset from the IHL (Internet Header length) */
+
+	uint16_t range_low;
+	/*	Specifies the lower bound of the range */
+
+	uint16_t range_high;
+	/*	Specifies the upper bound of the range */
+};  /* Type */
+
+struct ipa_ipfltr_mask_eq_32_type_v01 {
+	uint8_t offset;
+	/*	Specifies the offset either from IHL or from the start of
+	 *	the IP packet. This depends on the equation that this structure
+	 *	is used in.
+	 */
+
+	uint32_t mask;
+	/*	Specifies the mask that has to be used in the comparison.
+	 *	The field is ANDed with the mask and compared against the value.
+	 */
+
+	uint32_t value;
+	/*	Specifies the 32 bit value that used in the comparison. */
+};  /* Type */
+
+struct ipa_ipfltr_eq_16_type_v01 {
+	uint8_t offset;
+	/*  Specifies the offset into the packet */
+
+	uint16_t value;
+	/* Specifies the 16 bit value that should be used in the comparison. */
+};  /* Type */
+
+struct ipa_ipfltr_eq_32_type_v01 {
+	uint8_t offset;
+	/* Specifies the offset into the packet */
+
+	uint32_t value;
+	/* Specifies the 32 bit value that should be used in the comparison. */
+};  /* Type */
+
+struct ipa_ipfltr_mask_eq_128_type_v01 {
+	uint8_t offset;
+	/* Specifies the offset into the packet */
+
+	uint8_t mask[16];
+	/*  Specifies the mask that has to be used in the comparison.
+	 *	The field is ANDed with the mask and compared against the value.
+	 */
+
+	uint8_t value[16];
+	/* Specifies the 128 bit value that should be used in the comparison. */
+};  /* Type */
+
+
+struct ipa_filter_rule_type_v01 {
+	uint16_t rule_eq_bitmap;
+	/* 16-bit Bitmask to indicate how many eqs are valid in this rule */
+
+	uint8_t tos_eq_present;
+	/* Specifies if a type of service check rule is present */
+
+	uint8_t tos_eq;
+	/* The value to check against the type of service (ipv4) field */
+
+	uint8_t protocol_eq_present;
+	/* Specifies if a protocol check rule is present */
+
+	uint8_t protocol_eq;
+	/* The value to check against the protocol field */
+
+	uint8_t num_ihl_offset_range_16;
+	/*  The number of 16 bit range check rules at the location
+	 *	determined by IP header length plus a given offset offset
+	 *	in this rule. See the definition of the ipa_filter_range_eq_16
+	 *	for better understanding. The value of this field cannot exceed
+	 *	IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS which is set as 2
+	 */
+
+	struct ipa_ipfltr_range_eq_16_type_v01
+		ihl_offset_range_16[QMI_IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS_V01];
+	/*	Array of the registered IP header length offset 16 bit range
+	 *	check rules.
+	 */
+
+	uint8_t num_offset_meq_32;
+	/*  The number of 32 bit masked comparison rules present
+	 *  in this rule
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		offset_meq_32[QMI_IPA_IPFLTR_NUM_MEQ_32_EQNS_V01];
+	/*  An array of all the possible 32bit masked comparison rules
+	 *	in this rule
+	 */
+
+	uint8_t tc_eq_present;
+	/*  Specifies if the traffic class rule is present in this rule */
+
+	uint8_t tc_eq;
+	/* The value against which the IPV4 traffic class field has to
+	* be checked
+	*/
+
+	uint8_t flow_eq_present;
+	/* Specifies if the "flow equals" rule is present in this rule */
+
+	uint32_t flow_eq;
+	/* The value against which the IPV6 flow field has to be checked */
+
+	uint8_t ihl_offset_eq_16_present;
+	/*	Specifies if there is a 16 bit comparison required at the
+	 *	location in	the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_16_type_v01 ihl_offset_eq_16;
+	/* The 16 bit comparison equation */
+
+	uint8_t ihl_offset_eq_32_present;
+	/*	Specifies if there is a 32 bit comparison required at the
+	 *	location in the packet determined by "Intenet Header length
+	 *	+ specified offset"
+	 */
+
+	struct ipa_ipfltr_eq_32_type_v01 ihl_offset_eq_32;
+	/*	The 32 bit comparison equation */
+
+	uint8_t num_ihl_offset_meq_32;
+	/*	The number of 32 bit masked comparison equations in this
+	 *	rule. The location of the packet to be compared is
+	 *	determined by the IP Header length + the give offset
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+		ihl_offset_meq_32[QMI_IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS_V01];
+	/*	Array of 32 bit masked comparison equations.
+	*/
+
+	uint8_t num_offset_meq_128;
+	/*	The number of 128 bit comparison equations in this rule */
+
+	struct ipa_ipfltr_mask_eq_128_type_v01
+		offset_meq_128[QMI_IPA_IPFLTR_NUM_MEQ_128_EQNS_V01];
+	/*	Array of 128 bit comparison equations. The location in the
+	 *	packet is determined by the specified offset
+	 */
+
+	uint8_t metadata_meq32_present;
+	/*  Boolean indicating if the 32 bit masked comparison equation
+	 *	is present or not. Comparison is done against the metadata
+	 *	in IPA. Metadata can either be extracted from the packet
+	 *	header or from the "metadata" register.
+	 */
+
+	struct ipa_ipfltr_mask_eq_32_type_v01
+			metadata_meq32;
+	/* The metadata  32 bit masked comparison equation */
+
+	uint8_t ipv4_frag_eq_present;
+	/* Specifies if the IPv4 Fragment equation is present in this rule */
+};  /* Type */
+
+
+enum ipa_ip_type_enum_v01 {
+	IPA_IP_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use*/
+	QMI_IPA_IP_TYPE_INVALID_V01 = 0,
+	/*  Invalid IP type identifier */
+	QMI_IPA_IP_TYPE_V4_V01 = 1,
+	/*  IP V4 type */
+	QMI_IPA_IP_TYPE_V6_V01 = 2,
+	/*  IP V6 type */
+	QMI_IPA_IP_TYPE_V4V6_V01 = 3,
+	/*  Applies to both IP types */
+	IPA_IP_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+
+enum ipa_filter_action_enum_v01 {
+	IPA_FILTER_ACTION_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum. Do not change or use */
+	QMI_IPA_FILTER_ACTION_INVALID_V01 = 0,
+	/*  Invalid action on filter hit */
+	QMI_IPA_FILTER_ACTION_SRC_NAT_V01 = 1,
+	/*  Pass packet to NAT block for Source NAT */
+	QMI_IPA_FILTER_ACTION_DST_NAT_V01 = 2,
+	/*  Pass packet to NAT block for Destination NAT */
+	QMI_IPA_FILTER_ACTION_ROUTING_V01 = 3,
+	/*  Pass packet to Routing block */
+	QMI_IPA_FILTER_ACTION_EXCEPTION_V01 = 4,
+	/*  Treat packet as exception and send to exception pipe */
+	IPA_FILTER_ACTION_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+struct ipa_filter_spec_type_v01 {
+	uint32_t filter_spec_identifier;
+	/*	This field is used to identify a filter spec in the list
+	 *	of filter specs being sent from the client. This field
+	 *	is applicable only in the filter install request and response.
+	 */
+
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*	This field identifies the IP type for which this rule is
+	 *	applicable. The driver needs to identify the filter table
+	 *	(V6 or V4) and this field is essential for that
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*	This field specifies the rules in the filter spec. These rules
+	 *	are the ones that are matched against fields in the packet.
+	 */
+
+	enum ipa_filter_action_enum_v01 filter_action;
+	/*	This field specifies the action to be taken when a filter match
+	 *	occurs. The remote side should install this information into the
+	 *	hardware along with the filter equations.
+	 */
+
+	uint8_t is_routing_table_index_valid;
+	/*	Specifies whether the routing table index is present or not.
+	 *	If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+	 *	parameter need not be provided.
+	 */
+
+	uint32_t route_table_index;
+	/*	This is the index in the routing table that should be used
+	 *	to route the packets if the filter rule is hit
+	 */
+
+	uint8_t is_mux_id_valid;
+	/*	Specifies whether the mux_id is valid */
+
+	uint32_t mux_id;
+	/*	This field identifies the QMAP MUX ID. As a part of QMAP
+	 *	protocol, several data calls may be multiplexed over the
+	 *	same physical transport channel. This identifier is used to
+	 *	identify one such data call. The maximum value for this
+	 *	identifier is 255.
+	 */
+};  /* Type */
+
+struct ipa_filter_spec_ex_type_v01 {
+	enum ipa_ip_type_enum_v01 ip_type;
+	/*	This field identifies the IP type for which this rule is
+	 *	applicable. The driver needs to identify the filter table
+	 *	(V6 or V4) and this field is essential for that
+	 */
+
+	struct ipa_filter_rule_type_v01 filter_rule;
+	/*	This field specifies the rules in the filter spec. These rules
+	 *	are the ones that are matched against fields in the packet.
+	 */
+
+	enum ipa_filter_action_enum_v01 filter_action;
+	/*	This field specifies the action to be taken when a filter match
+	 *	occurs. The remote side should install this information into the
+	 *	hardware along with the filter equations.
+	 */
+
+	uint8_t is_routing_table_index_valid;
+	/*	Specifies whether the routing table index is present or not.
+	 *	If the action is "QMI_IPA_FILTER_ACTION_EXCEPTION", this
+	 *	parameter need not be provided.
+	 */
+
+	uint32_t route_table_index;
+	/*	This is the index in the routing table that should be used
+	 *	to route the packets if the filter rule is hit
+	 */
+
+	uint8_t is_mux_id_valid;
+	/*	Specifies whether the mux_id is valid */
+
+	uint32_t mux_id;
+	/*	This field identifies the QMAP MUX ID. As a part of QMAP
+	 *	protocol, several data calls may be multiplexed over the
+	 *	same physical transport channel. This identifier is used to
+	 *	identify one such data call. The maximum value for this
+	 *	identifier is 255.
+	 */
+
+	uint32_t rule_id;
+	/* Rule Id of the given filter. The Rule Id is populated in the rule
+	* header when installing the rule in IPA.
+	*/
+
+	uint8_t is_rule_hashable;
+	/** Specifies whether the given rule is hashable.
+	*/
+};  /* Type */
+
+
+/*  Request Message; This is the message that is exchanged between the
+ *	control point and the service in order to request the installation
+ *	of filtering rules in the hardware block by the remote side.
+ */
+struct ipa_install_fltr_rule_req_msg_v01 {
+	/* Optional
+	* IP type that this rule applies to
+	* Filter specification to be installed in the hardware
+	*/
+	uint8_t filter_spec_list_valid;
+	/* Must be set to true if filter_spec_list is being passed */
+	uint32_t filter_spec_list_len;
+	/* Must be set to # of elements in filter_spec_list */
+	struct ipa_filter_spec_type_v01
+		filter_spec_list[QMI_IPA_MAX_FILTERS_V01];
+	/*	This structure defines the list of filters that have
+	 *		to be installed in the hardware. The driver installing
+	 *		these rules shall do so in the same order as specified
+	 *		in this list.
+	 */
+
+	/* Optional */
+	/*  Pipe index to intall rule */
+	uint8_t source_pipe_index_valid;
+	/* Must be set to true if source_pipe_index is being passed */
+	uint32_t source_pipe_index;
+	/*	This is the source pipe on which the filter rule is to be
+	 *	installed. The requestor may always not know the pipe
+	 *	indices. If not specified, the receiver shall install
+	 *	this rule on all the pipes that it controls through
+	 *	which data may be fed into IPA.
+	 */
+
+	/* Optional */
+	/*  Total number of IPv4 filters in the filter spec list */
+	uint8_t num_ipv4_filters_valid;
+	/* Must be set to true if num_ipv4_filters is being passed */
+	uint32_t num_ipv4_filters;
+	/*   Number of IPv4 rules included in filter spec list */
+
+	/* Optional */
+	/*  Total number of IPv6 filters in the filter spec list */
+	uint8_t num_ipv6_filters_valid;
+	/* Must be set to true if num_ipv6_filters is being passed */
+	uint32_t num_ipv6_filters;
+	/* Number of IPv6 rules included in filter spec list */
+
+	/* Optional */
+	/*  List of XLAT filter indices in the filter spec list */
+	uint8_t xlat_filter_indices_list_valid;
+	/* Must be set to true if xlat_filter_indices_list
+	 * is being passed
+	 */
+	uint32_t xlat_filter_indices_list_len;
+	/* Must be set to # of elements in xlat_filter_indices_list */
+	uint32_t xlat_filter_indices_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of XLAT filter indices. Filter rules at specified indices
+	 * will need to be modified by the receiver if the PDN is XLAT
+	 * before installing them on the associated IPA consumer pipe.
+	 */
+
+	/* Optional */
+	/*  Extended Filter Specification  */
+	uint8_t filter_spec_ex_list_valid;
+	/* Must be set to true if filter_spec_ex_list is being passed */
+	uint32_t filter_spec_ex_list_len;
+	/* Must be set to # of elements in filter_spec_ex_list */
+	struct ipa_filter_spec_ex_type_v01
+		filter_spec_ex_list[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * List of filter specifications of filters that must be installed in
+	 *	the IPAv3.x hardware.
+	 *	The driver installing these rules must do so in the same
+	 *	order as specified in this list.
+	 */
+};  /* Message */
+
+struct ipa_filter_rule_identifier_to_handle_map_v01 {
+	uint32_t filter_spec_identifier;
+	/*	This field is used to identify a filter spec in the list of
+	 *	filter specs being sent from the client. This field is
+	 *	applicable only in the filter install request and response.
+	 */
+	uint32_t filter_handle;
+	/*  This field is used to identify a rule in any subsequent message.
+	 *	This is a value that is provided by the server to the control
+	 *	point
+	 */
+};  /* Type */
+
+/* Response Message; This is the message that is exchanged between the
+ * control point and the service in order to request the
+ * installation of filtering rules in the hardware block by
+ * the remote side.
+ */
+struct ipa_install_fltr_rule_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*	Standard response type.
+	 *	Standard response type. Contains the following data members:
+	 *	- qmi_result_type -- QMI_RESULT_SUCCESS or QMI_RESULT_FAILURE
+	 *	- qmi_error_type  -- Error code. Possible error code values are
+	 *	described in the error codes section of each message definition.
+	 */
+
+	/* Optional */
+	/*  Filter Handle List */
+	uint8_t filter_handle_list_valid;
+	/* Must be set to true if filter_handle_list is being passed */
+	uint32_t filter_handle_list_len;
+	/* Must be set to # of elements in filter_handle_list */
+	struct ipa_filter_rule_identifier_to_handle_map_v01
+		filter_handle_list[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * List of handles returned to the control point. Each handle is
+	 *	mapped to the rule identifier that was specified in the
+	 *	request message. Any further reference to the rule is done
+	 *	using the filter handle.
+	 */
+
+	/* Optional */
+	/*  Rule id List */
+	uint8_t rule_id_valid;
+	/* Must be set to true if rule_id is being passed */
+	uint32_t rule_id_len;
+	/* Must be set to # of elements in rule_id */
+	uint32_t rule_id[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * List of rule ids returned to the control point.
+	 *	Any further reference to the rule is done using the
+	 *	filter rule id specified in this list.
+	 */
+};  /* Message */
+
+struct ipa_filter_handle_to_index_map_v01 {
+	uint32_t filter_handle;
+	/*	This is a handle that was given to the remote client that
+	 *	requested the rule addition.
+	 */
+	uint32_t filter_index;
+	/*	This index denotes the location in a filter table, where the
+	 *	filter rule has been installed. The maximum value of this
+	 *	field is 64.
+	 */
+};  /* Type */
+
+/* Request Message; This is the message that is exchanged between the
+ * control point and the service in order to notify the remote driver
+ * of the installation of the filter rule supplied earlier by the
+ * remote driver.
+ */
+struct ipa_fltr_installed_notif_req_msg_v01 {
+	/*	Mandatory	*/
+	/*  Pipe index	*/
+	uint32_t source_pipe_index;
+	/*	This is the source pipe on which the filter rule has been
+	 *	installed or was attempted to be installed
+	 */
+
+	/* Mandatory */
+	/*  Installation Status */
+	enum ipa_qmi_result_type_v01 install_status;
+	/*	This is the status of installation. If this indicates
+	 *	SUCCESS, other optional fields carry additional
+	 *	information
+	 */
+
+	/* Mandatory */
+	/*  List of Filter Indices */
+	uint32_t filter_index_list_len;
+	/* Must be set to # of elements in filter_index_list */
+	struct ipa_filter_handle_to_index_map_v01
+		filter_index_list[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * Provides the list of filter indices and the corresponding
+	 *	filter handle. If the installation_status indicates a
+	 *	failure, the filter indices must be set to a reserve
+	 *	index (255).
+	 */
+
+	/* Optional */
+	/*  Embedded pipe index */
+	uint8_t embedded_pipe_index_valid;
+	/* Must be set to true if embedded_pipe_index is being passed */
+	uint32_t embedded_pipe_index;
+	/*	This index denotes the embedded pipe number on which a call to
+	 *	the same PDN has been made. If this field is set, it denotes
+	 *	that this is a use case where PDN sharing is happening. The
+	 *	embedded pipe is used to send data from the embedded client
+	 *	in the device
+	 */
+
+	/* Optional */
+	/*  Retain Header Configuration */
+	uint8_t retain_header_valid;
+	/* Must be set to true if retain_header is being passed */
+	uint8_t retain_header;
+	/*	This field indicates if the driver installing the rule has
+	 *	turned on the "retain header" bit. If this is true, the
+	 *	header that is removed by IPA is reinserted after the
+	 *	packet processing is completed.
+	 */
+
+	/* Optional */
+	/*  Embedded call Mux Id */
+	uint8_t embedded_call_mux_id_valid;
+	/**< Must be set to true if embedded_call_mux_id is being passed */
+	uint32_t embedded_call_mux_id;
+	/*	This identifies one of the many calls that have been originated
+	 *	on the embedded pipe. This is how we identify the PDN gateway
+	 *	to which traffic from the source pipe has to flow.
+	 */
+
+	/* Optional */
+	/*  Total number of IPv4 filters in the filter index list */
+	uint8_t num_ipv4_filters_valid;
+	/* Must be set to true if num_ipv4_filters is being passed */
+	uint32_t num_ipv4_filters;
+	/* Number of IPv4 rules included in filter index list */
+
+	/* Optional */
+	/*  Total number of IPv6 filters in the filter index list */
+	uint8_t num_ipv6_filters_valid;
+	/* Must be set to true if num_ipv6_filters is being passed */
+	uint32_t num_ipv6_filters;
+	/* Number of IPv6 rules included in filter index list */
+
+	/* Optional */
+	/*  Start index on IPv4 filters installed on source pipe */
+	uint8_t start_ipv4_filter_idx_valid;
+	/* Must be set to true if start_ipv4_filter_idx is being passed */
+	uint32_t start_ipv4_filter_idx;
+	/* Start index of IPv4 rules in filter index list */
+
+	/* Optional */
+	/*  Start index on IPv6 filters installed on source pipe */
+	uint8_t start_ipv6_filter_idx_valid;
+	/* Must be set to true if start_ipv6_filter_idx is being passed */
+	uint32_t start_ipv6_filter_idx;
+	/* Start index of IPv6 rules in filter index list */
+
+	/* Optional */
+	/*  List of Rule Ids */
+	uint8_t rule_id_valid;
+	/* Must be set to true if rule_id is being passed */
+	uint32_t rule_id_len;
+	/* Must be set to # of elements in rule_id */
+	uint32_t rule_id[QMI_IPA_MAX_FILTERS_V01];
+	/*
+	 * Provides the list of Rule Ids of rules added in IPA on the given
+	 *	source pipe index. If the install_status TLV indicates a
+	 *	failure, the Rule Ids in this list must be set to a reserved
+	 *	index (255).
+	 */
+};  /* Message */
+
+/* Response Message; This is the message that is exchanged between the
+ * control point and the service in order to notify the remote driver
+ * of the installation of the filter rule supplied earlier by the
+ * remote driver.
+ */
+struct ipa_fltr_installed_notif_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/*	Standard response type */
+};  /* Message */
+
+/* Request Message; Notifies the remote driver of the need to clear the data
+ * path to prevent the IPA from being blocked at the head of the processing
+ * pipeline
+ */
+struct ipa_enable_force_clear_datapath_req_msg_v01 {
+	/* Mandatory */
+	/*  Pipe Mask */
+	uint32_t source_pipe_bitmask;
+	/* Set of consumer (source) pipes that must be clear of
+	 * active data transfers.
+	 */
+
+	/* Mandatory */
+	/* Request ID */
+	uint32_t request_id;
+	/* Identifies the ID of the request that is sent to the server
+	 * The same request ID is used in the message to remove the force_clear
+	 * request. The server is expected to keep track of the request ID and
+	 * the source_pipe_bitmask so that it can revert as needed
+	 */
+
+	/* Optional */
+	/*  Source Throttle State */
+	uint8_t throttle_source_valid;
+	/* Must be set to true if throttle_source is being passed */
+	uint8_t throttle_source;
+	/*  Specifies whether the server is to throttle the data from
+	 *	these consumer (source) pipes after clearing the exisiting
+	 *	data present in the IPA that were pulled from these pipes
+	 *	The server is expected to put all the source pipes in the
+	 *	source_pipe_bitmask in the same state
+	 */
+};  /* Message */
+
+/* Response Message; Notifies the remote driver of the need to clear the
+ * data path to prevent the IPA from being blocked at the head of the
+ * processing pipeline
+ */
+struct ipa_enable_force_clear_datapath_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type */
+};  /* Message */
+
+/* Request Message; Notifies the remote driver that the forceful clearing
+ * of the data path can be lifted
+ */
+struct ipa_disable_force_clear_datapath_req_msg_v01 {
+	/* Mandatory */
+	/* Request ID */
+	uint32_t request_id;
+	/* Identifies the request that was sent to the server to
+	 * forcibly clear the data path. This request simply undoes
+	 * the operation done in that request
+	 */
+};  /* Message */
+
+/* Response Message; Notifies the remote driver that the forceful clearing
+ * of the data path can be lifted
+ */
+struct ipa_disable_force_clear_datapath_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type */
+};  /* Message */
+
+enum ipa_peripheral_speed_enum_v01 {
+	IPA_PERIPHERAL_SPEED_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PER_USB_FS_V01 = 1,
+	/*  Full-speed USB connection */
+	QMI_IPA_PER_USB_HS_V01 = 2,
+	/*  High-speed USB connection */
+	QMI_IPA_PER_USB_SS_V01 = 3,
+	/*  Super-speed USB connection */
+	QMI_IPA_PER_WLAN_V01 = 4,
+	/*  WLAN connection */
+	IPA_PERIPHERAL_SPEED_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use*/
+};
+
+enum ipa_pipe_mode_enum_v01 {
+	IPA_PIPE_MODE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PIPE_MODE_HW_V01 = 1,
+	/*  Pipe is connected with a hardware block */
+	QMI_IPA_PIPE_MODE_SW_V01 = 2,
+	/*  Pipe is controlled by the software */
+	IPA_PIPE_MODE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+enum ipa_peripheral_type_enum_v01 {
+	IPA_PERIPHERAL_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_PERIPHERAL_USB_V01 = 1,
+	/*  Specifies a USB peripheral */
+	QMI_IPA_PERIPHERAL_HSIC_V01 = 2,
+	/*  Specifies an HSIC peripheral */
+	QMI_IPA_PERIPHERAL_PCIE_V01 = 3,
+	/*  Specifies a PCIe	peripheral */
+	IPA_PERIPHERAL_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+struct ipa_config_req_msg_v01 {
+	/* Optional */
+	/*  Peripheral Type */
+	uint8_t peripheral_type_valid;
+	/* Must be set to true if peripheral_type is being passed */
+	enum ipa_peripheral_type_enum_v01 peripheral_type;
+	/* Informs the remote driver about the perhipheral for
+	 * which this configuration information is relevant. Values:
+	 *	- QMI_IPA_PERIPHERAL_USB (1) -- Specifies a USB peripheral
+	 *	- QMI_IPA_PERIPHERAL_HSIC(2) -- Specifies an HSIC peripheral
+	 *	- QMI_IPA_PERIPHERAL_PCIE(3) -- Specifies a PCIe peripheral
+	 */
+
+	/* Optional */
+	/*  HW Deaggregation Support */
+	uint8_t hw_deaggr_supported_valid;
+	/* Must be set to true if hw_deaggr_supported is being passed */
+	uint8_t hw_deaggr_supported;
+	/* Informs the remote driver whether the local IPA driver
+	 * allows de-aggregation to be performed in the hardware
+	 */
+
+	/* Optional */
+	/*  Maximum Aggregation Frame Size */
+	uint8_t max_aggr_frame_size_valid;
+	/* Must be set to true if max_aggr_frame_size is being passed */
+	uint32_t max_aggr_frame_size;
+	/* Specifies the maximum size of the aggregated frame that
+	 * the remote driver can expect from this execution environment
+	 *	- Valid range: 128 bytes to 32768 bytes
+	 */
+
+	/* Optional */
+	/*  IPA Ingress Pipe Mode */
+	uint8_t ipa_ingress_pipe_mode_valid;
+	/* Must be set to true if ipa_ingress_pipe_mode is being passed */
+
+	enum ipa_pipe_mode_enum_v01 ipa_ingress_pipe_mode;
+	/* Indicates to the remote driver if the ingress pipe into the
+	 *	IPA is in direct connection with another hardware block or
+	 *	if the producer of data to this ingress pipe is a software
+	 *  module. Values:
+	 *	-QMI_IPA_PIPE_MODE_HW(1) --Pipe is connected with hardware block
+	 *	-QMI_IPA_PIPE_MODE_SW(2) --Pipe is controlled by the software
+	 */
+
+	/* Optional */
+	/*  Peripheral Speed Info */
+	uint8_t peripheral_speed_info_valid;
+	/* Must be set to true if peripheral_speed_info is being passed */
+
+	enum ipa_peripheral_speed_enum_v01 peripheral_speed_info;
+	/* Indicates the speed that the peripheral connected to the IPA supports
+	 * Values:
+	 *	- QMI_IPA_PER_USB_FS (1) --  Full-speed USB connection
+	 *	- QMI_IPA_PER_USB_HS (2) --  High-speed USB connection
+	 *	- QMI_IPA_PER_USB_SS (3) --  Super-speed USB connection
+	 *  - QMI_IPA_PER_WLAN   (4) --  WLAN connection
+	 */
+
+	/* Optional */
+	/*  Downlink Accumulation Time limit */
+	uint8_t dl_accumulation_time_limit_valid;
+	/* Must be set to true if dl_accumulation_time_limit is being passed */
+	uint32_t dl_accumulation_time_limit;
+	/* Informs the remote driver about the time for which data
+	 * is accumulated in the downlink direction before it is pushed into the
+	 * IPA (downlink is with respect to the WWAN air interface)
+	 * - Units: milliseconds
+	 * - Maximum value: 255
+	 */
+
+	/* Optional */
+	/*  Downlink Accumulation Packet limit */
+	uint8_t dl_accumulation_pkt_limit_valid;
+	/* Must be set to true if dl_accumulation_pkt_limit is being passed */
+	uint32_t dl_accumulation_pkt_limit;
+	/* Informs the remote driver about the number of packets
+	 * that are to be accumulated in the downlink direction before it is
+	 * pushed into the IPA - Maximum value: 1023
+	 */
+
+	/* Optional */
+	/*  Downlink Accumulation Byte Limit */
+	uint8_t dl_accumulation_byte_limit_valid;
+	/* Must be set to true if dl_accumulation_byte_limit is being passed */
+	uint32_t dl_accumulation_byte_limit;
+	/* Inform the remote driver about the number of bytes
+	 * that are to be accumulated in the downlink direction before it
+	 * is pushed into the IPA - Maximum value: TBD
+	 */
+
+	/* Optional */
+	/*  Uplink Accumulation Time Limit */
+	uint8_t ul_accumulation_time_limit_valid;
+	/* Must be set to true if ul_accumulation_time_limit is being passed */
+	uint32_t ul_accumulation_time_limit;
+	/* Inform thes remote driver about the time for which data
+	 * is to be accumulated in the uplink direction before it is pushed into
+	 * the IPA (downlink is with respect to the WWAN air interface).
+	 * - Units: milliseconds
+	 * - Maximum value: 255
+	 */
+
+	/* Optional */
+	/*  HW Control Flags */
+	uint8_t hw_control_flags_valid;
+	/* Must be set to true if hw_control_flags is being passed */
+	uint32_t hw_control_flags;
+	/* Informs the remote driver about the hardware control flags:
+	 *	- Bit 0: IPA_HW_FLAG_HALT_SYSTEM_ON_NON_TERMINAL_FAILURE --
+	 *	Indicates to the hardware that it must not continue with
+	 *	any subsequent operation even if the failure is not terminal
+	 *	- Bit 1: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_ERORR --
+	 *	Indicates to the hardware that it is not required to report
+	 *	channel errors to the host.
+	 *	- Bit 2: IPA_HW_FLAG_NO_REPORT_MHI_CHANNEL_WAKE_UP --
+	 *	Indicates to the hardware that it is not required to generate
+	 *	wake-up events to the host.
+	 *	- Bit 4: IPA_HW_FLAG_WORK_OVER_DDR --
+	 *	Indicates to the hardware that it is accessing addresses in
+	 *  the DDR and not over PCIe
+	 *	- Bit 5: IPA_HW_FLAG_INTERRUPT_MODE_CTRL_FLAG --
+	 *	Indicates whether the device must
+	 *	raise an event to let the host know that it is going into an
+	 *	interrupt mode (no longer polling for data/buffer availability)
+	 */
+
+	/* Optional */
+	/*  Uplink MSI Event Threshold */
+	uint8_t ul_msi_event_threshold_valid;
+	/* Must be set to true if ul_msi_event_threshold is being passed */
+	uint32_t ul_msi_event_threshold;
+	/* Informs the remote driver about the threshold that will
+	 * cause an interrupt (MSI) to be fired to the host. This ensures
+	 * that the remote driver does not accumulate an excesive number of
+	 * events before firing an interrupt.
+	 * This threshold is applicable for data moved in the UL direction.
+	 * - Maximum value: 65535
+	 */
+
+	/* Optional */
+	/*  Downlink MSI Event Threshold */
+	uint8_t dl_msi_event_threshold_valid;
+	/* Must be set to true if dl_msi_event_threshold is being passed */
+	uint32_t dl_msi_event_threshold;
+	/* Informs the remote driver about the threshold that will
+	 * cause an interrupt (MSI) to be fired to the host. This ensures
+	 * that the remote driver does not accumulate an excesive number of
+	 * events before firing an interrupt
+	 * This threshold is applicable for data that is moved in the
+	 * DL direction - Maximum value: 65535
+	 */
+
+	/* Optional */
+	/*  Uplink Fifo Size */
+	uint8_t ul_fifo_size_valid;
+	/* Must be set to true if ul_fifo_size is being passed */
+	uint32_t ul_fifo_size;
+	/*
+	 * Informs the remote driver about the total Uplink xDCI
+	 *	buffer size that holds the complete aggregated frame
+	 *	or BAM data fifo size of the peripheral channel/pipe(in Bytes).
+	 *	This deprecates the max_aggr_frame_size field. This TLV
+	 *	deprecates max_aggr_frame_size TLV from version 1.9 onwards
+	 *	and the max_aggr_frame_size TLV will be ignored in the presence
+	 *	of this TLV.
+	 */
+
+	/* Optional */
+	/*  Downlink Fifo Size */
+	uint8_t dl_fifo_size_valid;
+	/* Must be set to true if dl_fifo_size is being passed */
+	uint32_t dl_fifo_size;
+	/*
+	 * Informs the remote driver about the total Downlink xDCI buffering
+	 *	capacity or BAM data fifo size of the peripheral channel/pipe.
+	 *	(In Bytes). dl_fifo_size = n * dl_buf_size. This deprecates the
+	 *	max_aggr_frame_size field. If this value is set
+	 *	max_aggr_frame_size is ignored.
+	 */
+
+	/* Optional */
+	/*  Downlink Buffer Size */
+	uint8_t dl_buf_size_valid;
+	/* Must be set to true if dl_buf_size is being passed */
+	uint32_t dl_buf_size;
+	/* Informs the remote driver about the single xDCI buffer size.
+	* This is applicable only in GSI mode(in Bytes).\n
+	*/
+};  /* Message */
+
+/* Response Message; Notifies the remote driver of the configuration
+ * information
+ */
+struct ipa_config_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+}; /* Message */
+
+enum ipa_stats_type_enum_v01 {
+	IPA_STATS_TYPE_ENUM_MIN_ENUM_VAL_V01 = -2147483647,
+	/* To force a 32 bit signed enum.  Do not change or use */
+	QMI_IPA_STATS_TYPE_INVALID_V01 = 0,
+	/* Invalid stats type identifier */
+	QMI_IPA_STATS_TYPE_PIPE_V01 = 1,
+	/* Pipe stats type */
+	QMI_IPA_STATS_TYPE_FILTER_RULES_V01 = 2,
+	/* Filter rule stats type */
+	IPA_STATS_TYPE_ENUM_MAX_ENUM_VAL_V01 = 2147483647
+	/* To force a 32 bit signed enum.  Do not change or use */
+};
+
+struct ipa_pipe_stats_info_type_v01 {
+	uint32_t pipe_index;
+	/* Pipe index for statistics to be retrieved. */
+
+	uint64_t num_ipv4_packets;
+	/* Accumulated number of IPv4 packets over this pipe. */
+
+	uint64_t num_ipv4_bytes;
+	/* Accumulated number of IPv4 bytes over this pipe. */
+
+	uint64_t num_ipv6_packets;
+	/* Accumulated number of IPv6 packets over this pipe. */
+
+	uint64_t num_ipv6_bytes;
+	/* Accumulated number of IPv6 bytes over this pipe. */
+};
+
+struct ipa_stats_type_filter_rule_v01 {
+	uint32_t filter_rule_index;
+	/* Filter rule index for statistics to be retrieved. */
+
+	uint64_t num_packets;
+	/* Accumulated number of packets over this filter rule. */
+};
+
+/* Request Message; Retrieve the data statistics collected on modem
+ * IPA driver.
+ */
+struct ipa_get_data_stats_req_msg_v01 {
+	/* Mandatory */
+	/*  Stats Type  */
+	enum ipa_stats_type_enum_v01 ipa_stats_type;
+	/* Indicates the type of statistics to be retrieved. */
+
+	/* Optional */
+	/* Reset Statistics */
+	uint8_t reset_stats_valid;
+	/* Must be set to true if reset_stats is being passed */
+	uint8_t reset_stats;
+	/* Option to reset the specific type of data statistics
+	 * currently collected.
+	 */
+};  /* Message */
+
+/* Response Message; Retrieve the data statistics collected
+ * on modem IPA driver.
+ */
+struct ipa_get_data_stats_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type. */
+
+	/* Optional */
+	/*  Stats Type  */
+	uint8_t ipa_stats_type_valid;
+	/* Must be set to true if ipa_stats_type is passed */
+	enum ipa_stats_type_enum_v01 ipa_stats_type;
+	/* Indicates the type of statistics that are retrieved. */
+
+	/* Optional */
+	/*  Uplink Source Pipe Statistics List */
+	uint8_t ul_src_pipe_stats_list_valid;
+	/* Must be set to true if ul_src_pipe_stats_list is being passed */
+	uint32_t ul_src_pipe_stats_list_len;
+	/* Must be set to # of elements in ul_src_pipe_stats_list */
+	struct ipa_pipe_stats_info_type_v01
+		ul_src_pipe_stats_list[QMI_IPA_MAX_PIPES_V01];
+	/* List of all Uplink pipe statistics that are retrieved. */
+
+	/* Optional */
+	/*  Downlink Destination Pipe Statistics List */
+	uint8_t dl_dst_pipe_stats_list_valid;
+	/* Must be set to true if dl_dst_pipe_stats_list is being passed */
+	uint32_t dl_dst_pipe_stats_list_len;
+	/* Must be set to # of elements in dl_dst_pipe_stats_list */
+	struct ipa_pipe_stats_info_type_v01
+		dl_dst_pipe_stats_list[QMI_IPA_MAX_PIPES_V01];
+	/* List of all Downlink pipe statistics that are retrieved. */
+
+	/* Optional */
+	/*  Downlink Filter Rule Stats List */
+	uint8_t dl_filter_rule_stats_list_valid;
+	/* Must be set to true if dl_filter_rule_stats_list is being passed */
+	uint32_t dl_filter_rule_stats_list_len;
+	/* Must be set to # of elements in dl_filter_rule_stats_list */
+	struct ipa_stats_type_filter_rule_v01
+		dl_filter_rule_stats_list[QMI_IPA_MAX_FILTERS_V01];
+	/* List of all Downlink filter rule statistics retrieved. */
+};  /* Message */
+
+struct ipa_apn_data_stats_info_type_v01 {
+	uint32_t mux_id;
+	/* Indicates the MUX ID associated with the APN for which the data
+	 * usage statistics is queried
+	 */
+
+	uint64_t num_ul_packets;
+	/* Accumulated number of uplink packets corresponding to
+	 * this Mux ID
+	 */
+
+	uint64_t num_ul_bytes;
+	/* Accumulated number of uplink bytes corresponding to
+	 * this Mux ID
+	 */
+
+	uint64_t num_dl_packets;
+	/* Accumulated number of downlink packets corresponding
+	 * to this Mux ID
+	 */
+
+	uint64_t num_dl_bytes;
+	/* Accumulated number of downlink bytes corresponding to
+	 * this Mux ID
+	 */
+};  /* Type */
+
+/* Request Message; Retrieve the APN data statistics collected from modem */
+struct ipa_get_apn_data_stats_req_msg_v01 {
+	/* Optional */
+	/*  Mux ID List */
+	uint8_t mux_id_list_valid;
+	/* Must be set to true if mux_id_list is being passed */
+	uint32_t mux_id_list_len;
+	/* Must be set to # of elements in mux_id_list */
+	uint32_t mux_id_list[QMI_IPA_MAX_APN_V01];
+	/* The list of MUX IDs associated with APNs for which the data usage
+	 * statistics is being retrieved
+	 */
+};  /* Message */
+
+/* Response Message; Retrieve the APN data statistics collected from modem */
+struct ipa_get_apn_data_stats_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.*/
+
+	/* Optional */
+	/* APN Data Statistics List */
+	uint8_t apn_data_stats_list_valid;
+	/* Must be set to true if apn_data_stats_list is being passed */
+	uint32_t apn_data_stats_list_len;
+	/* Must be set to # of elements in apn_data_stats_list */
+	struct ipa_apn_data_stats_info_type_v01
+		apn_data_stats_list[QMI_IPA_MAX_APN_V01];
+	/* List of APN data retrieved as per request on mux_id.
+	* For now, only one APN monitoring is supported on modem driver.
+	* Making this as list for expandability to support more APNs in future.
+	*/
+};  /* Message */
+
+struct ipa_data_usage_quota_info_type_v01 {
+	uint32_t mux_id;
+	/* Indicates the MUX ID associated with the APN for which the data usage
+	 * quota needs to be set
+	 */
+
+	uint64_t num_Mbytes;
+	/* Number of Mega-bytes of quota value to be set on this APN associated
+	 * with this Mux ID.
+	 */
+};  /* Type */
+
+/* Request Message; Master driver sets a data usage quota value on
+ * modem driver
+ */
+struct ipa_set_data_usage_quota_req_msg_v01 {
+	/* Optional */
+	/* APN Quota List */
+	uint8_t apn_quota_list_valid;
+	/* Must be set to true if apn_quota_list is being passed */
+	uint32_t apn_quota_list_len;
+	/* Must be set to # of elements in apn_quota_list */
+	struct ipa_data_usage_quota_info_type_v01
+		apn_quota_list[QMI_IPA_MAX_APN_V01];
+	/* The list of APNs on which a data usage quota to be set on modem
+	 * driver. For now, only one APN monitoring is supported on modem
+	 * driver. Making this as list for expandability to support more
+	 * APNs in future.
+	 */
+};  /* Message */
+
+/* Response Message; Master driver sets a data usage on modem driver. */
+struct ipa_set_data_usage_quota_resp_msg_v01 {
+	/* Mandatory */
+	/* Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/* Standard response type.*/
+};  /* Message */
+
+/* Indication Message; Modem driver sends this indication to master
+ * driver when the data usage quota is reached
+ */
+struct ipa_data_usage_quota_reached_ind_msg_v01 {
+	/* Mandatory */
+	/*  APN Quota List */
+	struct ipa_data_usage_quota_info_type_v01 apn;
+	/* This message indicates which APN has the previously set quota
+	 * reached. For now, only one APN monitoring is supported on modem
+	 * driver.
+	 */
+};  /* Message */
+
+/* Request Message; Master driver request modem driver to terminate
+ * the current data usage quota monitoring session.
+ */
+struct ipa_stop_data_usage_quota_req_msg_v01 {
+	/* This element is a placeholder to prevent the declaration of
+     *  an empty struct.  DO NOT USE THIS FIELD UNDER ANY CIRCUMSTANCE
+	 */
+	char __placeholder;
+};  /* Message */
+
+/* Response Message; Master driver request modem driver to terminate
+ * the current quota monitoring session.
+ */
+struct ipa_stop_data_usage_quota_resp_msg_v01 {
+	/* Mandatory */
+	/*  Result Code */
+	struct ipa_qmi_response_type_v01 resp;
+	/**<   Standard response type.*/
+};  /* Message */
+
+/*Service Message Definition*/
+#define QMI_IPA_INDICATION_REGISTER_REQ_V01 0x0020
+#define QMI_IPA_INDICATION_REGISTER_RESP_V01 0x0020
+#define QMI_IPA_INIT_MODEM_DRIVER_REQ_V01 0x0021
+#define QMI_IPA_INIT_MODEM_DRIVER_RESP_V01 0x0021
+#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_V01 0x0022
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_V01 0x0023
+#define QMI_IPA_INSTALL_FILTER_RULE_RESP_V01 0x0023
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_V01 0x0024
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01 0x0024
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0025
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0025
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_V01 0x0026
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01 0x0026
+#define QMI_IPA_CONFIG_REQ_V01 0x0027
+#define QMI_IPA_CONFIG_RESP_V01 0x0027
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0028
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0028
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_V01 0x0029
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_V01 0x0029
+#define QMI_IPA_GET_DATA_STATS_REQ_V01 0x0030
+#define QMI_IPA_GET_DATA_STATS_RESP_V01 0x0030
+#define QMI_IPA_GET_APN_DATA_STATS_REQ_V01 0x0031
+#define QMI_IPA_GET_APN_DATA_STATS_RESP_V01 0x0031
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01 0x0032
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_V01 0x0032
+#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01 0x0033
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01 0x0034
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_V01 0x0034
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_V01 0x0035
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_V01 0x0035
+
+/* add for max length*/
+#define QMI_IPA_INIT_MODEM_DRIVER_REQ_MAX_MSG_LEN_V01 134
+#define QMI_IPA_INIT_MODEM_DRIVER_RESP_MAX_MSG_LEN_V01 25
+#define QMI_IPA_INDICATION_REGISTER_REQ_MAX_MSG_LEN_V01 8
+#define QMI_IPA_INDICATION_REGISTER_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_INSTALL_FILTER_RULE_REQ_MAX_MSG_LEN_V01 22369
+#define QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01 783
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_REQ_MAX_MSG_LEN_V01 834
+#define QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_MASTER_DRIVER_INIT_COMPLETE_IND_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_MAX_MSG_LEN_V01 15
+
+
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_REQ_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_MAX_MSG_LEN_V01 7
+
+
+#define QMI_IPA_CONFIG_REQ_MAX_MSG_LEN_V01 102
+#define QMI_IPA_CONFIG_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 18
+#define QMI_IPA_DISABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_REQ_MAX_MSG_LEN_V01 7
+#define QMI_IPA_ENABLE_LINK_LOW_PWR_STATE_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_GET_DATA_STATS_REQ_MAX_MSG_LEN_V01 11
+#define QMI_IPA_GET_DATA_STATS_RESP_MAX_MSG_LEN_V01 2234
+#define QMI_IPA_GET_APN_DATA_STATS_REQ_MAX_MSG_LEN_V01 36
+#define QMI_IPA_GET_APN_DATA_STATS_RESP_MAX_MSG_LEN_V01 299
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 100
+#define QMI_IPA_SET_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_MAX_MSG_LEN_V01 0
+#define QMI_IPA_STOP_DATA_USAGE_QUOTA_RESP_MAX_MSG_LEN_V01 7
+
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_REQ_MAX_MSG_LEN_V01 4
+#define QMI_IPA_INIT_MODEM_DRIVER_CMPLT_RESP_MAX_MSG_LEN_V01 7
+
+/* Service Object Accessor */
+
+#endif/* IPA_QMI_SERVICE_V01_H */
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 8c27723..1049c78 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -164,6 +164,7 @@
 	DEVCONF_ACCEPT_DAD,
 	DEVCONF_FORCE_TLLAO,
 	DEVCONF_NDISC_NOTIFY,
+	DEVCONF_ACCEPT_RA_RT_TABLE,
 	DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
 	DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
 	DEVCONF_SUPPRESS_FRAG_NDISC,
diff --git a/include/uapi/linux/keychord.h b/include/uapi/linux/keychord.h
new file mode 100644
index 0000000..ea7cf4d
--- /dev/null
+++ b/include/uapi/linux/keychord.h
@@ -0,0 +1,52 @@
+/*
+ *  Key chord input driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _UAPI_LINUX_KEYCHORD_H_
+#define _UAPI_LINUX_KEYCHORD_H_
+
+#include <linux/input.h>
+
+#define KEYCHORD_VERSION		1
+
+/*
+ * One or more input_keychord structs are written to /dev/keychord
+ * at once to specify the list of keychords to monitor.
+ * Reading /dev/keychord returns the id of a keychord when the
+ * keychord combination is pressed.  A keychord is signalled when
+ * all of the keys in the keycode list are in the pressed state.
+ * The order in which the keys are pressed does not matter.
+ * The keychord will not be signalled if keys not in the keycode
+ * list are pressed.
+ * Keychords will not be signalled on key release events.
+ */
+struct input_keychord {
+	/* should be KEYCHORD_VERSION */
+	__u16 version;
+	/*
+	 * client specified ID, returned from read()
+	 * when this keychord is pressed.
+	 */
+	__u16 id;
+
+	/* number of keycodes in this keychord */
+	__u16 count;
+
+	/* variable length array of keycodes */
+	__u16 keycodes[];
+};
+
+#endif	/* _UAPI_LINUX_KEYCHORD_H_ */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 9bd5594..270b764 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -53,6 +53,8 @@
 #define REISER2FS_SUPER_MAGIC_STRING	"ReIsEr2Fs"
 #define REISER2FS_JR_SUPER_MAGIC_STRING	"ReIsEr3Fs"
 
+#define SDCARDFS_SUPER_MAGIC	0xb550ca10
+
 #define SMB_SUPER_MAGIC		0x517B
 #define CGROUP_SUPER_MAGIC	0x27e0eb
 #define CGROUP2_SUPER_MAGIC	0x63677270
diff --git a/include/uapi/linux/msm_ion.h b/include/uapi/linux/msm_ion.h
new file mode 120000
index 0000000..94349d2
--- /dev/null
+++ b/include/uapi/linux/msm_ion.h
@@ -0,0 +1 @@
+../../../drivers/staging/android/uapi/msm_ion.h
\ No newline at end of file
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
new file mode 100644
index 0000000..89ffeb6
--- /dev/null
+++ b/include/uapi/linux/msm_ipa.h
@@ -0,0 +1,1762 @@
+#ifndef _UAPI_MSM_IPA_H_
+#define _UAPI_MSM_IPA_H_
+
+#ifndef __KERNEL__
+#include <stdint.h>
+#include <stddef.h>
+#include <sys/stat.h>
+#endif
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+/**
+ * unique magic number of the IPA device
+ */
+#define IPA_IOC_MAGIC 0xCF
+
+/**
+ * name of the default routing tables for v4 and v6
+ */
+#define IPA_DFLT_RT_TBL_NAME "ipa_dflt_rt"
+
+/**
+ *   the commands supported by IPA driver
+ */
+#define IPA_IOCTL_ADD_HDR            0
+#define IPA_IOCTL_DEL_HDR            1
+#define IPA_IOCTL_ADD_RT_RULE        2
+#define IPA_IOCTL_DEL_RT_RULE        3
+#define IPA_IOCTL_ADD_FLT_RULE       4
+#define IPA_IOCTL_DEL_FLT_RULE       5
+#define IPA_IOCTL_COMMIT_HDR         6
+#define IPA_IOCTL_RESET_HDR          7
+#define IPA_IOCTL_COMMIT_RT          8
+#define IPA_IOCTL_RESET_RT           9
+#define IPA_IOCTL_COMMIT_FLT        10
+#define IPA_IOCTL_RESET_FLT         11
+#define IPA_IOCTL_DUMP              12
+#define IPA_IOCTL_GET_RT_TBL        13
+#define IPA_IOCTL_PUT_RT_TBL        14
+#define IPA_IOCTL_COPY_HDR          15
+#define IPA_IOCTL_QUERY_INTF        16
+#define IPA_IOCTL_QUERY_INTF_TX_PROPS 17
+#define IPA_IOCTL_QUERY_INTF_RX_PROPS 18
+#define IPA_IOCTL_GET_HDR           19
+#define IPA_IOCTL_PUT_HDR           20
+#define IPA_IOCTL_SET_FLT        21
+#define IPA_IOCTL_ALLOC_NAT_MEM  22
+#define IPA_IOCTL_V4_INIT_NAT    23
+#define IPA_IOCTL_NAT_DMA        24
+#define IPA_IOCTL_V4_DEL_NAT     26
+#define IPA_IOCTL_PULL_MSG       27
+#define IPA_IOCTL_GET_NAT_OFFSET 28
+#define IPA_IOCTL_RM_ADD_DEPENDENCY 29
+#define IPA_IOCTL_RM_DEL_DEPENDENCY 30
+#define IPA_IOCTL_GENERATE_FLT_EQ 31
+#define IPA_IOCTL_QUERY_INTF_EXT_PROPS 32
+#define IPA_IOCTL_QUERY_EP_MAPPING 33
+#define IPA_IOCTL_QUERY_RT_TBL_INDEX 34
+#define IPA_IOCTL_WRITE_QMAPID 35
+#define IPA_IOCTL_MDFY_FLT_RULE 36
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD	37
+#define IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL	38
+#define IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED	39
+#define IPA_IOCTL_ADD_HDR_PROC_CTX 40
+#define IPA_IOCTL_DEL_HDR_PROC_CTX 41
+#define IPA_IOCTL_MDFY_RT_RULE 42
+#define IPA_IOCTL_ADD_RT_RULE_AFTER 43
+#define IPA_IOCTL_ADD_FLT_RULE_AFTER 44
+#define IPA_IOCTL_GET_HW_VERSION 45
+#define IPA_IOCTL_MAX 46
+
+/**
+ * max size of the header to be inserted
+ */
+#define IPA_HDR_MAX_SIZE 64
+
+/**
+ * max size of the name of the resource (routing table, header)
+ */
+#define IPA_RESOURCE_NAME_MAX 32
+
+/**
+ * max number of interface properties
+ */
+#define IPA_NUM_PROPS_MAX 35
+
+/**
+ * size of the mac address
+ */
+#define IPA_MAC_ADDR_SIZE  6
+
+/**
+ * max number of mbim streams
+ */
+#define IPA_MBIM_MAX_STREAM_NUM 8
+
+/**
+ *  size of the ipv6 address
+ */
+#define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
+
+/**
+ * the attributes of the rule (routing or filtering)
+ */
+#define IPA_FLT_TOS			(1ul << 0)
+#define IPA_FLT_PROTOCOL		(1ul << 1)
+#define IPA_FLT_SRC_ADDR		(1ul << 2)
+#define IPA_FLT_DST_ADDR		(1ul << 3)
+#define IPA_FLT_SRC_PORT_RANGE		(1ul << 4)
+#define IPA_FLT_DST_PORT_RANGE		(1ul << 5)
+#define IPA_FLT_TYPE			(1ul << 6)
+#define IPA_FLT_CODE			(1ul << 7)
+#define IPA_FLT_SPI			(1ul << 8)
+#define IPA_FLT_SRC_PORT		(1ul << 9)
+#define IPA_FLT_DST_PORT		(1ul << 10)
+#define IPA_FLT_TC			(1ul << 11)
+#define IPA_FLT_FLOW_LABEL		(1ul << 12)
+#define IPA_FLT_NEXT_HDR		(1ul << 13)
+#define IPA_FLT_META_DATA		(1ul << 14)
+#define IPA_FLT_FRAGMENT		(1ul << 15)
+#define IPA_FLT_TOS_MASKED		(1ul << 16)
+#define IPA_FLT_MAC_SRC_ADDR_ETHER_II	(1ul << 17)
+#define IPA_FLT_MAC_DST_ADDR_ETHER_II	(1ul << 18)
+#define IPA_FLT_MAC_SRC_ADDR_802_3	(1ul << 19)
+#define IPA_FLT_MAC_DST_ADDR_802_3	(1ul << 20)
+#define IPA_FLT_MAC_ETHER_TYPE		(1ul << 21)
+
+/**
+ * enum ipa_client_type - names for the various IPA "clients"
+ * these are from the perspective of the clients, for e.g.
+ * HSIC1_PROD means HSIC client is the producer and IPA is the
+ * consumer
+ */
+enum ipa_client_type {
+	IPA_CLIENT_PROD,
+	IPA_CLIENT_HSIC1_PROD = IPA_CLIENT_PROD,
+	IPA_CLIENT_WLAN1_PROD,
+	IPA_CLIENT_HSIC2_PROD,
+	IPA_CLIENT_USB2_PROD,
+	IPA_CLIENT_HSIC3_PROD,
+	IPA_CLIENT_USB3_PROD,
+	IPA_CLIENT_HSIC4_PROD,
+	IPA_CLIENT_USB4_PROD,
+	IPA_CLIENT_HSIC5_PROD,
+	IPA_CLIENT_USB_PROD,
+	IPA_CLIENT_A5_WLAN_AMPDU_PROD,
+	IPA_CLIENT_A2_EMBEDDED_PROD,
+	IPA_CLIENT_A2_TETHERED_PROD,
+	IPA_CLIENT_APPS_LAN_WAN_PROD,
+	IPA_CLIENT_APPS_CMD_PROD,
+	IPA_CLIENT_ODU_PROD,
+	IPA_CLIENT_MHI_PROD,
+	IPA_CLIENT_Q6_LAN_PROD,
+	IPA_CLIENT_Q6_WAN_PROD,
+	IPA_CLIENT_Q6_CMD_PROD,
+	IPA_CLIENT_MEMCPY_DMA_SYNC_PROD,
+	IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD,
+	IPA_CLIENT_Q6_DECOMP_PROD,
+	IPA_CLIENT_Q6_DECOMP2_PROD,
+	IPA_CLIENT_UC_USB_PROD,
+
+	/* Below PROD client type is only for test purpose */
+	IPA_CLIENT_TEST_PROD,
+	IPA_CLIENT_TEST1_PROD,
+	IPA_CLIENT_TEST2_PROD,
+	IPA_CLIENT_TEST3_PROD,
+	IPA_CLIENT_TEST4_PROD,
+
+	IPA_CLIENT_CONS,
+	IPA_CLIENT_HSIC1_CONS = IPA_CLIENT_CONS,
+	IPA_CLIENT_WLAN1_CONS,
+	IPA_CLIENT_HSIC2_CONS,
+	IPA_CLIENT_USB2_CONS,
+	IPA_CLIENT_WLAN2_CONS,
+	IPA_CLIENT_HSIC3_CONS,
+	IPA_CLIENT_USB3_CONS,
+	IPA_CLIENT_WLAN3_CONS,
+	IPA_CLIENT_HSIC4_CONS,
+	IPA_CLIENT_USB4_CONS,
+	IPA_CLIENT_WLAN4_CONS,
+	IPA_CLIENT_HSIC5_CONS,
+	IPA_CLIENT_USB_CONS,
+	IPA_CLIENT_USB_DPL_CONS,
+	IPA_CLIENT_A2_EMBEDDED_CONS,
+	IPA_CLIENT_A2_TETHERED_CONS,
+	IPA_CLIENT_A5_LAN_WAN_CONS,
+	IPA_CLIENT_APPS_LAN_CONS,
+	IPA_CLIENT_APPS_WAN_CONS,
+	IPA_CLIENT_ODU_EMB_CONS,
+	IPA_CLIENT_ODU_TETH_CONS,
+	IPA_CLIENT_MHI_CONS,
+	IPA_CLIENT_Q6_LAN_CONS,
+	IPA_CLIENT_Q6_WAN_CONS,
+	IPA_CLIENT_Q6_DUN_CONS,
+	IPA_CLIENT_MEMCPY_DMA_SYNC_CONS,
+	IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS,
+	IPA_CLIENT_Q6_DECOMP_CONS,
+	IPA_CLIENT_Q6_DECOMP2_CONS,
+	IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS,
+	/* Below CONS client type is only for test purpose */
+	IPA_CLIENT_TEST_CONS,
+	IPA_CLIENT_TEST1_CONS,
+	IPA_CLIENT_TEST2_CONS,
+	IPA_CLIENT_TEST3_CONS,
+	IPA_CLIENT_TEST4_CONS,
+
+	IPA_CLIENT_MAX,
+};
+
+#define IPA_CLIENT_IS_APPS_CONS(client) \
+	((client) == IPA_CLIENT_APPS_LAN_CONS || \
+	(client) == IPA_CLIENT_APPS_WAN_CONS)
+
+#define IPA_CLIENT_IS_USB_CONS(client) \
+	((client) == IPA_CLIENT_USB_CONS || \
+	(client) == IPA_CLIENT_USB2_CONS || \
+	(client) == IPA_CLIENT_USB3_CONS || \
+	(client) == IPA_CLIENT_USB_DPL_CONS || \
+	(client) == IPA_CLIENT_USB4_CONS)
+
+#define IPA_CLIENT_IS_WLAN_CONS(client) \
+	((client) == IPA_CLIENT_WLAN1_CONS || \
+	(client) == IPA_CLIENT_WLAN2_CONS || \
+	(client) == IPA_CLIENT_WLAN3_CONS || \
+	(client) == IPA_CLIENT_WLAN4_CONS)
+
+#define IPA_CLIENT_IS_ODU_CONS(client) \
+	((client) == IPA_CLIENT_ODU_EMB_CONS || \
+	(client) == IPA_CLIENT_ODU_TETH_CONS)
+
+#define IPA_CLIENT_IS_Q6_CONS(client) \
+	((client) == IPA_CLIENT_Q6_LAN_CONS || \
+	(client) == IPA_CLIENT_Q6_WAN_CONS || \
+	(client) == IPA_CLIENT_Q6_DUN_CONS || \
+	(client) == IPA_CLIENT_Q6_DECOMP_CONS || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_CONS || \
+	(client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS)
+
+#define IPA_CLIENT_IS_Q6_PROD(client) \
+	((client) == IPA_CLIENT_Q6_LAN_PROD || \
+	(client) == IPA_CLIENT_Q6_WAN_PROD || \
+	(client) == IPA_CLIENT_Q6_CMD_PROD || \
+	(client) == IPA_CLIENT_Q6_DECOMP_PROD || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_PROD)
+
+#define IPA_CLIENT_IS_Q6_NON_ZIP_CONS(client) \
+	((client) == IPA_CLIENT_Q6_LAN_CONS || \
+	(client) == IPA_CLIENT_Q6_WAN_CONS || \
+	(client) == IPA_CLIENT_Q6_DUN_CONS || \
+	(client) == IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS)
+
+#define IPA_CLIENT_IS_Q6_ZIP_CONS(client) \
+	((client) == IPA_CLIENT_Q6_DECOMP_CONS || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_CONS)
+
+#define IPA_CLIENT_IS_Q6_NON_ZIP_PROD(client) \
+	((client) == IPA_CLIENT_Q6_LAN_PROD || \
+	(client) == IPA_CLIENT_Q6_WAN_PROD || \
+	(client) == IPA_CLIENT_Q6_CMD_PROD)
+
+#define IPA_CLIENT_IS_Q6_ZIP_PROD(client) \
+	((client) == IPA_CLIENT_Q6_DECOMP_PROD || \
+	(client) == IPA_CLIENT_Q6_DECOMP2_PROD)
+
+#define IPA_CLIENT_IS_MEMCPY_DMA_CONS(client) \
+	((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_CONS || \
+	(client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS)
+
+#define IPA_CLIENT_IS_MEMCPY_DMA_PROD(client) \
+	((client) == IPA_CLIENT_MEMCPY_DMA_SYNC_PROD || \
+	(client) == IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD)
+
+#define IPA_CLIENT_IS_MHI_CONS(client) \
+	((client) == IPA_CLIENT_MHI_CONS)
+
+#define IPA_CLIENT_IS_MHI(client) \
+	((client) == IPA_CLIENT_MHI_CONS || \
+	(client) == IPA_CLIENT_MHI_PROD)
+
+#define IPA_CLIENT_IS_TEST_PROD(client) \
+	((client) == IPA_CLIENT_TEST_PROD || \
+	(client) == IPA_CLIENT_TEST1_PROD || \
+	(client) == IPA_CLIENT_TEST2_PROD || \
+	(client) == IPA_CLIENT_TEST3_PROD || \
+	(client) == IPA_CLIENT_TEST4_PROD)
+
+#define IPA_CLIENT_IS_TEST_CONS(client) \
+	((client) == IPA_CLIENT_TEST_CONS || \
+	(client) == IPA_CLIENT_TEST1_CONS || \
+	(client) == IPA_CLIENT_TEST2_CONS || \
+	(client) == IPA_CLIENT_TEST3_CONS || \
+	(client) == IPA_CLIENT_TEST4_CONS)
+
+#define IPA_CLIENT_IS_TEST(client) \
+	(IPA_CLIENT_IS_TEST_PROD(client) || IPA_CLIENT_IS_TEST_CONS(client))
+
+/**
+ * enum ipa_ip_type - Address family: IPv4 or IPv6
+ */
+enum ipa_ip_type {
+	IPA_IP_v4,
+	IPA_IP_v6,
+	IPA_IP_MAX
+};
+
+/**
+ * enum ipa_rule_type - Type of routing or filtering rule
+ * Hashable: Rule will be located at the hashable tables
+ * Non_Hashable: Rule will be located at the non-hashable tables
+ */
+enum ipa_rule_type {
+	IPA_RULE_HASHABLE,
+	IPA_RULE_NON_HASHABLE,
+	IPA_RULE_TYPE_MAX
+};
+
+/**
+ * enum ipa_flt_action - action field of filtering rule
+ *
+ * Pass to routing: 5'd0
+ * Pass to source NAT: 5'd1
+ * Pass to destination NAT: 5'd2
+ * Pass to default output pipe (e.g., Apps or Modem): 5'd3
+ */
+enum ipa_flt_action {
+	IPA_PASS_TO_ROUTING,
+	IPA_PASS_TO_SRC_NAT,
+	IPA_PASS_TO_DST_NAT,
+	IPA_PASS_TO_EXCEPTION
+};
+
+/**
+ * enum ipa_wlan_event - Events for wlan client
+ *
+ * wlan client connect: New wlan client connected
+ * wlan client disconnect: wlan client disconnected
+ * wlan client power save: wlan client moved to power save
+ * wlan client normal: wlan client moved out of power save
+ * sw routing enable: ipa routing is disabled
+ * sw routing disable: ipa routing is enabled
+ * wlan ap connect: wlan AP(access point) is up
+ * wlan ap disconnect: wlan AP(access point) is down
+ * wlan sta connect: wlan STA(station) is up
+ * wlan sta disconnect: wlan STA(station) is down
+ * wlan client connect ex: new wlan client connected
+ * wlan scc switch: wlan interfaces in scc mode
+ * wlan mcc switch: wlan interfaces in mcc mode
+ * wlan wdi enable: wdi data path completed
+ * wlan wdi disable: wdi data path teardown
+ */
+enum ipa_wlan_event {
+	WLAN_CLIENT_CONNECT,
+	WLAN_CLIENT_DISCONNECT,
+	WLAN_CLIENT_POWER_SAVE_MODE,
+	WLAN_CLIENT_NORMAL_MODE,
+	SW_ROUTING_ENABLE,
+	SW_ROUTING_DISABLE,
+	WLAN_AP_CONNECT,
+	WLAN_AP_DISCONNECT,
+	WLAN_STA_CONNECT,
+	WLAN_STA_DISCONNECT,
+	WLAN_CLIENT_CONNECT_EX,
+	WLAN_SWITCH_TO_SCC,
+	WLAN_SWITCH_TO_MCC,
+	WLAN_WDI_ENABLE,
+	WLAN_WDI_DISABLE,
+	IPA_WLAN_EVENT_MAX
+};
+
+/**
+ * enum ipa_wan_event - Events for wan client
+ *
+ * wan default route add/del
+ * wan embms connect: New wan embms interface connected
+ */
+enum ipa_wan_event {
+	WAN_UPSTREAM_ROUTE_ADD = IPA_WLAN_EVENT_MAX,
+	WAN_UPSTREAM_ROUTE_DEL,
+	WAN_EMBMS_CONNECT,
+	WAN_XLAT_CONNECT,
+	IPA_WAN_EVENT_MAX
+};
+
+enum ipa_ecm_event {
+	ECM_CONNECT = IPA_WAN_EVENT_MAX,
+	ECM_DISCONNECT,
+	IPA_ECM_EVENT_MAX,
+};
+
+enum ipa_tethering_stats_event {
+	IPA_TETHERING_STATS_UPDATE_STATS = IPA_ECM_EVENT_MAX,
+	IPA_TETHERING_STATS_UPDATE_NETWORK_STATS,
+	IPA_TETHERING_STATS_EVENT_MAX,
+	IPA_EVENT_MAX_NUM = IPA_TETHERING_STATS_EVENT_MAX
+};
+
+#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
+
+/**
+ * enum ipa_rm_resource_name - IPA RM clients identification names
+ *
+ * Add new mapping to ipa_rm_prod_index() / ipa_rm_cons_index()
+ * when adding new entry to this enum.
+ */
+enum ipa_rm_resource_name {
+	IPA_RM_RESOURCE_PROD = 0,
+	IPA_RM_RESOURCE_Q6_PROD = IPA_RM_RESOURCE_PROD,
+	IPA_RM_RESOURCE_USB_PROD,
+	IPA_RM_RESOURCE_USB_DPL_DUMMY_PROD,
+	IPA_RM_RESOURCE_HSIC_PROD,
+	IPA_RM_RESOURCE_STD_ECM_PROD,
+	IPA_RM_RESOURCE_RNDIS_PROD,
+	IPA_RM_RESOURCE_WWAN_0_PROD,
+	IPA_RM_RESOURCE_WLAN_PROD,
+	IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+	IPA_RM_RESOURCE_MHI_PROD,
+	IPA_RM_RESOURCE_PROD_MAX,
+
+	IPA_RM_RESOURCE_Q6_CONS = IPA_RM_RESOURCE_PROD_MAX,
+	IPA_RM_RESOURCE_USB_CONS,
+	IPA_RM_RESOURCE_USB_DPL_CONS,
+	IPA_RM_RESOURCE_HSIC_CONS,
+	IPA_RM_RESOURCE_WLAN_CONS,
+	IPA_RM_RESOURCE_APPS_CONS,
+	IPA_RM_RESOURCE_ODU_ADAPT_CONS,
+	IPA_RM_RESOURCE_MHI_CONS,
+	IPA_RM_RESOURCE_MAX
+};
+
+/**
+ * enum ipa_hw_type - IPA hardware version type
+ * @IPA_HW_None: IPA hardware version not defined
+ * @IPA_HW_v1_0: IPA hardware version 1.0
+ * @IPA_HW_v1_1: IPA hardware version 1.1
+ * @IPA_HW_v2_0: IPA hardware version 2.0
+ * @IPA_HW_v2_1: IPA hardware version 2.1
+ * @IPA_HW_v2_5: IPA hardware version 2.5
+ * @IPA_HW_v2_6: IPA hardware version 2.6
+ * @IPA_HW_v2_6L: IPA hardware version 2.6L
+ * @IPA_HW_v3_0: IPA hardware version 3.0
+ * @IPA_HW_v3_1: IPA hardware version 3.1
+ * @IPA_HW_v3_5: IPA hardware version 3.5
+ * @IPA_HW_v3_5_1: IPA hardware version 3.5.1
+ */
+enum ipa_hw_type {
+	IPA_HW_None = 0,
+	IPA_HW_v1_0 = 1,
+	IPA_HW_v1_1 = 2,
+	IPA_HW_v2_0 = 3,
+	IPA_HW_v2_1 = 4,
+	IPA_HW_v2_5 = 5,
+	IPA_HW_v2_6 = IPA_HW_v2_5,
+	IPA_HW_v2_6L = 6,
+	IPA_HW_v3_0 = 10,
+	IPA_HW_v3_1 = 11,
+	IPA_HW_v3_5 = 12,
+	IPA_HW_v3_5_1 = 13,
+	IPA_HW_MAX
+};
+
+/**
+ * struct ipa_rule_attrib - attributes of a routing/filtering
+ * rule, all in LE
+ * @attrib_mask: what attributes are valid
+ * @src_port_lo: low port of src port range
+ * @src_port_hi: high port of src port range
+ * @dst_port_lo: low port of dst port range
+ * @dst_port_hi: high port of dst port range
+ * @type: ICMP/IGMP type
+ * @code: ICMP/IGMP code
+ * @spi: IPSec SPI
+ * @src_port: exact src port
+ * @dst_port: exact dst port
+ * @meta_data: meta-data val
+ * @meta_data_mask: meta-data mask
+ * @u.v4.tos: type of service
+ * @u.v4.protocol: protocol
+ * @u.v4.src_addr: src address value
+ * @u.v4.src_addr_mask: src address mask
+ * @u.v4.dst_addr: dst address value
+ * @u.v4.dst_addr_mask: dst address mask
+ * @u.v6.tc: traffic class
+ * @u.v6.flow_label: flow label
+ * @u.v6.next_hdr: next header
+ * @u.v6.src_addr: src address val
+ * @u.v6.src_addr_mask: src address mask
+ * @u.v6.dst_addr: dst address val
+ * @u.v6.dst_addr_mask: dst address mask
+ */
+struct ipa_rule_attrib {
+	uint32_t attrib_mask;
+	uint16_t src_port_lo;
+	uint16_t src_port_hi;
+	uint16_t dst_port_lo;
+	uint16_t dst_port_hi;
+	uint8_t type;
+	uint8_t code;
+	uint8_t tos_value;
+	uint8_t tos_mask;
+	uint32_t spi;
+	uint16_t src_port;
+	uint16_t dst_port;
+	uint32_t meta_data;
+	uint32_t meta_data_mask;
+	uint8_t src_mac_addr[ETH_ALEN];
+	uint8_t src_mac_addr_mask[ETH_ALEN];
+	uint8_t dst_mac_addr[ETH_ALEN];
+	uint8_t dst_mac_addr_mask[ETH_ALEN];
+	uint16_t ether_type;
+	union {
+		struct {
+			uint8_t tos;
+			uint8_t protocol;
+			uint32_t src_addr;
+			uint32_t src_addr_mask;
+			uint32_t dst_addr;
+			uint32_t dst_addr_mask;
+		} v4;
+		struct {
+			uint8_t tc;
+			uint32_t flow_label;
+			uint8_t next_hdr;
+			uint32_t src_addr[4];
+			uint32_t src_addr_mask[4];
+			uint32_t dst_addr[4];
+			uint32_t dst_addr_mask[4];
+		} v6;
+	} u;
+};
+
+/*! @brief The maximum number of Mask Equal 32 Eqns */
+#define IPA_IPFLTR_NUM_MEQ_32_EQNS 2
+
+/*! @brief The maximum number of IHL offset Mask Equal 32 Eqns */
+#define IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS 2
+
+/*! @brief The maximum number of Mask Equal 128 Eqns */
+#define IPA_IPFLTR_NUM_MEQ_128_EQNS 2
+
+/*! @brief The maximum number of IHL offset Range Check 16 Eqns */
+#define IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS 2
+
+/*! @brief Offset and 16 bit comparison equation */
+struct ipa_ipfltr_eq_16 {
+	int8_t offset;
+	uint16_t value;
+};
+
+/*! @brief Offset and 32 bit comparison equation */
+struct ipa_ipfltr_eq_32 {
+	int8_t offset;
+	uint32_t value;
+};
+
+/*! @brief Offset and 128 bit masked comparison equation */
+struct ipa_ipfltr_mask_eq_128 {
+	int8_t offset;
+	uint8_t mask[16];
+	uint8_t value[16];
+};
+
+/*! @brief Offset and 32 bit masked comparison equation */
+struct ipa_ipfltr_mask_eq_32 {
+	int8_t offset;
+	uint32_t mask;
+	uint32_t value;
+};
+
+/*! @brief Equation for identifying a range. Ranges are inclusive */
+struct ipa_ipfltr_range_eq_16 {
+	int8_t offset;
+	uint16_t range_low;
+	uint16_t range_high;
+};
+
+/*! @brief Rule equations which are set according to DS filter installation */
+struct ipa_ipfltri_rule_eq {
+	/*! 16-bit Bitmask to indicate how many eqs are valid in this rule  */
+	uint16_t rule_eq_bitmap;
+	/*! Specifies if a type of service check rule is present */
+	uint8_t tos_eq_present;
+	/*! The value to check against the type of service (ipv4) field */
+	uint8_t tos_eq;
+	/*! Specifies if a protocol check rule is present */
+	uint8_t protocol_eq_present;
+	/*! The value to check against the protocol (ipv6) field */
+	uint8_t protocol_eq;
+	/*! The number of ip header length offset 16 bit range check
+	 * rules in this rule
+	 */
+	uint8_t num_ihl_offset_range_16;
+	/*! An array of the registered ip header length offset 16 bit
+	* range check rules
+	*/
+	struct ipa_ipfltr_range_eq_16
+		ihl_offset_range_16[IPA_IPFLTR_NUM_IHL_RANGE_16_EQNS];
+	/*! The number of mask equal 32 rules present in this rule */
+	uint8_t num_offset_meq_32;
+	/*! An array of all the possible mask equal 32 rules in this rule */
+	struct ipa_ipfltr_mask_eq_32
+		offset_meq_32[IPA_IPFLTR_NUM_MEQ_32_EQNS];
+	/*! Specifies if the traffic class rule is present in this rule */
+	uint8_t tc_eq_present;
+	/*! The value to check the traffic class (ipv4) field against */
+	uint8_t tc_eq;
+	/*! Specifies if the flow equals rule is present in this rule */
+	uint8_t fl_eq_present;
+	/*! The value to check the flow (ipv6) field against */
+	uint32_t fl_eq;
+	/*! The number of ip header length offset 16 bit equations in this
+	* rule
+	*/
+	uint8_t ihl_offset_eq_16_present;
+	/*! The ip header length offset 16 bit equation */
+	struct ipa_ipfltr_eq_16 ihl_offset_eq_16;
+	/*! The number of ip header length offset 32 bit equations in this
+	 * rule
+	 */
+	uint8_t ihl_offset_eq_32_present;
+	/*! The ip header length offset 32 bit equation */
+	struct ipa_ipfltr_eq_32 ihl_offset_eq_32;
+	/*! The number of ip header length offset 32 bit mask equations in
+	 * this rule
+	 */
+	uint8_t num_ihl_offset_meq_32;
+	/*! The ip header length offset 32 bit mask equation */
+	struct ipa_ipfltr_mask_eq_32
+		ihl_offset_meq_32[IPA_IPFLTR_NUM_IHL_MEQ_32_EQNS];
+	/*! The number of ip header length offset 128 bit equations in this
+	 * rule
+	 */
+	uint8_t num_offset_meq_128;
+	/*! The ip header length offset 128 bit equation */
+	struct ipa_ipfltr_mask_eq_128
+		offset_meq_128[IPA_IPFLTR_NUM_MEQ_128_EQNS];
+	/*! The metadata 32 bit masked comparison equation present or not */
+	/* Metadata based rules are added internally by IPA driver */
+	uint8_t metadata_meq32_present;
+	/*! The metadata 32 bit masked comparison equation */
+	struct ipa_ipfltr_mask_eq_32 metadata_meq32;
+	/*! Specifies if the Fragment equation is present in this rule */
+	uint8_t ipv4_frag_eq_present;
+};
+
+/**
+ * struct ipa_flt_rule - attributes of a filtering rule
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ * @to_uc: bool switch to pass packet to micro-controller
+ * @action: action field
+ * @rt_tbl_hdl: handle of table from "get"
+ * @attrib: attributes of the rule
+ * @eq_attrib: attributes of the rule in equation form (valid when
+ * eq_attrib_type is true)
+ * @rt_tbl_idx: index of RT table referred to by filter rule (valid when
+ * eq_attrib_type is true and non-exception action)
+ * @eq_attrib_type: true if equation level form used to specify attributes
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @rule_id: rule_id to be assigned to the filter rule. In case client specifies
+ *  rule_id as 0 the driver will assign a new rule_id
+ */
+struct ipa_flt_rule {
+	uint8_t retain_hdr;
+	uint8_t to_uc;
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_hdl;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	uint32_t rt_tbl_idx;
+	uint8_t eq_attrib_type;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint16_t rule_id;
+};
+
+/**
+ * enum ipa_hdr_l2_type - L2 header type
+ * IPA_HDR_L2_NONE: L2 header which isn't Ethernet II and isn't 802_3
+ * IPA_HDR_L2_ETHERNET_II: L2 header of type Ethernet II
+ * IPA_HDR_L2_802_3: L2 header of type 802_3
+ */
+enum ipa_hdr_l2_type {
+	IPA_HDR_L2_NONE,
+	IPA_HDR_L2_ETHERNET_II,
+	IPA_HDR_L2_802_3,
+	IPA_HDR_L2_MAX,
+};
+
+/**
+ * enum ipa_hdr_l2_type - Processing context type
+ * IPA_HDR_PROC_NONE: No processing context
+ * IPA_HDR_PROC_ETHII_TO_ETHII: Process Ethernet II to Ethernet II
+ * IPA_HDR_PROC_ETHII_TO_802_3: Process Ethernet II to 802_3
+ * IPA_HDR_PROC_802_3_TO_ETHII: Process 802_3 to Ethernet II
+ * IPA_HDR_PROC_802_3_TO_802_3: Process 802_3 to 802_3
+ */
+enum ipa_hdr_proc_type {
+	IPA_HDR_PROC_NONE,
+	IPA_HDR_PROC_ETHII_TO_ETHII,
+	IPA_HDR_PROC_ETHII_TO_802_3,
+	IPA_HDR_PROC_802_3_TO_ETHII,
+	IPA_HDR_PROC_802_3_TO_802_3,
+	IPA_HDR_PROC_MAX,
+};
+
+/**
+ * struct ipa_rt_rule - attributes of a routing rule
+ * @dst: dst "client"
+ * @hdr_hdl: handle to the dynamic header
+	it is not an index or an offset
+ * @hdr_proc_ctx_hdl: handle to header processing context. if it is provided
+	hdr_hdl shall be 0
+ * @attrib: attributes of the rule
+ * @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
+ *  IPA will use the rule and will not look for other rules that may have
+ *  higher priority
+ * @hashable: bool switch. is this rule hashable or not?
+ *  ipa uses hashable rules to cache their hit results to be used in
+ *  consecutive packets
+ * @retain_hdr: bool switch to instruct IPA core to add back to the packet
+ *  the header removed as part of header removal
+ */
+struct ipa_rt_rule {
+	enum ipa_client_type dst;
+	uint32_t hdr_hdl;
+	uint32_t hdr_proc_ctx_hdl;
+	struct ipa_rule_attrib attrib;
+	uint8_t max_prio;
+	uint8_t hashable;
+	uint8_t retain_hdr;
+};
+
+/**
+ * struct ipa_hdr_add - header descriptor includes in and out
+ * parameters
+ * @name: name of the header
+ * @hdr: actual header to be inserted
+ * @hdr_len: size of above header
+ * @type: l2 header type
+ * @is_partial: header not fully specified
+ * @hdr_hdl: out parameter, handle to header, valid when status is 0
+ * @status:	out parameter, status of header add operation,
+ *		0 for success,
+ *		-1 for failure
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_hdr_add {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	enum ipa_hdr_l2_type type;
+	uint8_t is_partial;
+	uint32_t hdr_hdl;
+	int status;
+	uint8_t is_eth2_ofst_valid;
+	uint16_t eth2_ofst;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - header addition parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be written to IPA HW also?
+ * @num_hdrs: num of headers that follow
+ * @ipa_hdr_add hdr:	all headers need to go here back to
+ *			back, no pointers
+ */
+struct ipa_ioc_add_hdr {
+	uint8_t commit;
+	uint8_t num_hdrs;
+	struct ipa_hdr_add hdr[0];
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_add - processing context descriptor includes
+ * in and out parameters
+ * @type: processing context type
+ * @hdr_hdl: in parameter, handle to header
+ * @proc_ctx_hdl: out parameter, handle to proc_ctx, valid when status is 0
+ * @status:	out parameter, status of header add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_proc_ctx_add {
+	enum ipa_hdr_proc_type type;
+	uint32_t hdr_hdl;
+	uint32_t proc_ctx_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_hdr - processing context addition parameters (support
+ * multiple processing context and commit)
+ * @commit: should processing context be written to IPA HW also?
+ * @num_proc_ctxs: num of processing context that follow
+ * @proc_ctx:	all processing context need to go here back to
+ *			back, no pointers
+ */
+struct ipa_ioc_add_hdr_proc_ctx {
+	uint8_t commit;
+	uint8_t num_proc_ctxs;
+	struct ipa_hdr_proc_ctx_add proc_ctx[0];
+};
+
+/**
+ * struct ipa_ioc_copy_hdr - retrieve a copy of the specified
+ * header - caller can then derive the complete header
+ * @name: name of the header resource
+ * @hdr:	out parameter, contents of specified header,
+ *	valid only when ioctl return val is non-negative
+ * @hdr_len: out parameter, size of above header
+ *	valid only when ioctl return val is non-negative
+ * @type: l2 header type
+ *	valid only when ioctl return val is non-negative
+ * @is_partial:	out parameter, indicates whether specified header is partial
+ *		valid only when ioctl return val is non-negative
+ * @is_eth2_ofst_valid: is eth2_ofst field valid?
+ * @eth2_ofst: offset to start of Ethernet-II/802.3 header
+ */
+struct ipa_ioc_copy_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t hdr[IPA_HDR_MAX_SIZE];
+	uint8_t hdr_len;
+	enum ipa_hdr_l2_type type;
+	uint8_t is_partial;
+	uint8_t is_eth2_ofst_valid;
+	uint16_t eth2_ofst;
+};
+
+/**
+ * struct ipa_ioc_get_hdr - header entry lookup parameters, if lookup was
+ * successful caller must call put to release the reference count when done
+ * @name: name of the header resource
+ * @hdl:	out parameter, handle of header entry
+ *		valid only when ioctl return val is non-negative
+ */
+struct ipa_ioc_get_hdr {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_hdr_del - header descriptor includes in and out
+ * parameters
+ *
+ * @hdl: handle returned from header add operation
+ * @status:	out parameter, status of header remove operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_hdr - header deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should headers be removed from IPA HW also?
+ * @num_hdls: num of headers being removed
+ * @ipa_hdr_del hdl: all handles need to go here back to back, no pointers
+ */
+struct ipa_ioc_del_hdr {
+	uint8_t commit;
+	uint8_t num_hdls;
+	struct ipa_hdr_del hdl[0];
+};
+
+/**
+ * struct ipa_hdr_proc_ctx_del - processing context descriptor includes
+ * in and out parameters
+ * @hdl: handle returned from processing context add operation
+ * @status:	out parameter, status of header remove operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_hdr_proc_ctx_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * ipa_ioc_del_hdr_proc_ctx - processing context deletion parameters (support
+ * multiple headers and commit)
+ * @commit: should processing contexts be removed from IPA HW also?
+ * @num_hdls: num of processing contexts being removed
+ * @ipa_hdr_proc_ctx_del hdl:	all handles need to go here back to back,
+  *				no pointers
+ */
+struct ipa_ioc_del_hdr_proc_ctx {
+	uint8_t commit;
+	uint8_t num_hdls;
+	struct ipa_hdr_proc_ctx_del hdl[0];
+};
+
+/**
+ * struct ipa_rt_rule_add - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear:	add at back of routing table, it is NOT possible to add rules at
+ *		the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of routing rule add operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_add {
+	struct ipa_rt_rule rule;
+	uint8_t at_rear;
+	uint32_t rt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule_after - routing rule addition after a specific
+ * rule parameters(supports multiple rules and commit);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @add_after_hdl: the rules will be added after this specific rule
+ * @ipa_rt_rule_add rules: all rules need to go back to back here, no pointers
+ *			   at_rear field will be ignored when using this IOCTL
+ */
+struct ipa_ioc_add_rt_rule_after {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	struct ipa_rt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_mdfy - routing rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @rt_rule_hdl: handle to rule which supposed to modify
+ * @status:	output parameter, status of routing rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_rt_rule_mdfy {
+	struct ipa_rt_rule rule;
+	uint32_t rt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_mdfy_rt_rule - routing rule modify parameters (supports
+ * multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of routing rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	struct ipa_rt_rule_mdfy rules[0];
+};
+
+/**
+ * struct ipa_rt_rule_del - routing rule descriptor includes in
+ * and out parameters
+ * @hdl: handle returned from route rule add operation
+ * @status:	output parameter, status of route rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_rt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @ipa_rt_rule_del hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_rt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_rt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl_indx - routing table index lookup parameters
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @index:	output parameter, routing table index, valid only when ioctl
+ *		return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl_indx {
+	enum ipa_ip_type ip;
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t idx;
+};
+
+/**
+ * struct ipa_flt_rule_add - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of filtering table?
+ * @flt_rule_hdl: out parameter, handle to rule, valid when status is 0
+ * @status:	output parameter, status of filtering rule add   operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_add {
+	struct ipa_flt_rule rule;
+	uint8_t at_rear;
+	uint32_t flt_rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule - filtering rule addition parameters (supports
+ * multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ *	valid only when global is 0
+ * @global: does this apply to global filter table of specific IP family
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_add_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t global;
+	uint8_t num_rules;
+	struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_ioc_add_flt_rule_after - filtering rule addition after specific
+ * rule parameters (supports multiple rules and commit)
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @ep:	which "clients" pipe does this rule apply to?
+ * @num_rules: number of filtering rules that follow
+ * @add_after_hdl: rules will be added after the rule with this handle
+ * @rules: all rules need to go back to back here, no pointers. at rear field
+ *	   is ignored when using this IOCTL
+ */
+struct ipa_ioc_add_flt_rule_after {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	enum ipa_client_type ep;
+	uint8_t num_rules;
+	uint32_t add_after_hdl;
+	struct ipa_flt_rule_add rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_mdfy - filtering rule descriptor includes
+ * in and out parameters
+ * @rule: actual rule to be added
+ * @flt_rule_hdl: handle to rule
+ * @status:	output parameter, status of filtering rule modify  operation,
+ *		0 for success,
+ *		-1 for failure
+ *
+ */
+struct ipa_flt_rule_mdfy {
+	struct ipa_flt_rule rule;
+	uint32_t rule_hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_mdfy_flt_rule - filtering rule modify parameters (supports
+ * multiple rules and commit)
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @num_rules: number of filtering rules that follow
+ * @rules: all rules need to go back to back here, no pointers
+ */
+struct ipa_ioc_mdfy_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_rules;
+	struct ipa_flt_rule_mdfy rules[0];
+};
+
+/**
+ * struct ipa_flt_rule_del - filtering rule descriptor includes
+ * in and out parameters
+ *
+ * @hdl: handle returned from filtering rule add operation
+ * @status:	output parameter, status of filtering rule delete operation,
+ *		0 for success,
+ *		-1 for failure
+ */
+struct ipa_flt_rule_del {
+	uint32_t hdl;
+	int status;
+};
+
+/**
+ * struct ipa_ioc_del_flt_rule - filtering rule deletion parameters (supports
+ * multiple headers and commit)
+ * @commit: should rules be removed from IPA HW also?
+ * @ip: IP family of rules
+ * @num_hdls: num of rules being removed
+ * @hdl: all handles need to go back to back here, no pointers
+ */
+struct ipa_ioc_del_flt_rule {
+	uint8_t commit;
+	enum ipa_ip_type ip;
+	uint8_t num_hdls;
+	struct ipa_flt_rule_del hdl[0];
+};
+
+/**
+ * struct ipa_ioc_get_rt_tbl - routing table lookup parameters, if lookup was
+ * successful caller must call put to release the reference
+ * count when done
+ * @ip: IP family of table
+ * @name: name of routing table resource
+ * @htl:	output parameter, handle of routing table, valid only when ioctl
+ *		return val is non-negative
+ */
+struct ipa_ioc_get_rt_tbl {
+	enum ipa_ip_type ip;
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t hdl;
+};
+
+/**
+ * struct ipa_ioc_query_intf - used to lookup number of tx and
+ * rx properties of interface
+ * @name: name of interface
+ * @num_tx_props:	output parameter, number of tx properties
+ *			valid only when ioctl return val is non-negative
+ * @num_rx_props:	output parameter, number of rx properties
+ *			valid only when ioctl return val is non-negative
+ * @num_ext_props:	output parameter, number of ext properties
+ *			valid only when ioctl return val is non-negative
+ * @excp_pipe:		exception packets of this interface should be
+ *			routed to this pipe
+ */
+struct ipa_ioc_query_intf {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_tx_props;
+	uint32_t num_rx_props;
+	uint32_t num_ext_props;
+	enum ipa_client_type excp_pipe;
+};
+
+/**
+ * struct ipa_ioc_tx_intf_prop - interface tx property
+ * @ip: IP family of routing rule
+ * @attrib: routing rule
+ * @dst_pipe: routing output pipe
+ * @alt_dst_pipe: alternate routing output pipe
+ * @hdr_name: name of associated header if any, empty string when no header
+ * @hdr_l2_type: type of associated header if any, use NONE when no header
+ */
+struct ipa_ioc_tx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type dst_pipe;
+	enum ipa_client_type alt_dst_pipe;
+	char hdr_name[IPA_RESOURCE_NAME_MAX];
+	enum ipa_hdr_l2_type hdr_l2_type;
+};
+
+/**
+ * struct ipa_ioc_query_intf_tx_props - interface tx propertie
+ * @name: name of interface
+ * @num_tx_props: number of TX properties
+ * @tx[0]: output parameter, the tx properties go here back to back
+ */
+struct ipa_ioc_query_intf_tx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_tx_props;
+	struct ipa_ioc_tx_intf_prop tx[0];
+};
+
+/**
+ * struct ipa_ioc_ext_intf_prop - interface extended property
+ * @ip: IP family of routing rule
+ * @eq_attrib: attributes of the rule in equation form
+ * @action: action field
+ * @rt_tbl_idx: index of RT table referred to by filter rule
+ * @mux_id: MUX_ID
+ * @filter_hdl: handle of filter (as specified by provider of filter rule)
+ * @is_xlat_rule: it is xlat flt rule or not
+ */
+struct ipa_ioc_ext_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+	enum ipa_flt_action action;
+	uint32_t rt_tbl_idx;
+	uint8_t mux_id;
+	uint32_t filter_hdl;
+	uint8_t is_xlat_rule;
+	uint32_t rule_id;
+	uint8_t is_rule_hashable;
+};
+
+/**
+ * struct ipa_ioc_query_intf_ext_props - interface ext propertie
+ * @name: name of interface
+ * @num_ext_props: number of EXT properties
+ * @ext[0]: output parameter, the ext properties go here back to back
+ */
+struct ipa_ioc_query_intf_ext_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_ext_props;
+	struct ipa_ioc_ext_intf_prop ext[0];
+};
+
+/**
+ * struct ipa_ioc_rx_intf_prop - interface rx property
+ * @ip: IP family of filtering rule
+ * @attrib: filtering rule
+ * @src_pipe: input pipe
+ * @hdr_l2_type: type of associated header if any, use NONE when no header
+ */
+struct ipa_ioc_rx_intf_prop {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	enum ipa_client_type src_pipe;
+	enum ipa_hdr_l2_type hdr_l2_type;
+};
+
+/**
+ * struct ipa_ioc_query_intf_rx_props - interface rx propertie
+ * @name: name of interface
+ * @num_rx_props: number of RX properties
+ * @rx: output parameter, the rx properties go here back to back
+ */
+struct ipa_ioc_query_intf_rx_props {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint32_t num_rx_props;
+	struct ipa_ioc_rx_intf_prop rx[0];
+};
+
+/**
+ * struct ipa_ioc_nat_alloc_mem - nat table memory allocation
+ * properties
+ * @dev_name: input parameter, the name of table
+ * @size: input parameter, size of table in bytes
+ * @offset: output parameter, offset into page in case of system memory
+ */
+struct ipa_ioc_nat_alloc_mem {
+	char dev_name[IPA_RESOURCE_NAME_MAX];
+	size_t size;
+	off_t offset;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_init - nat table initialization
+ * parameters
+ * @tbl_index: input parameter, index of the table
+ * @ipv4_rules_offset: input parameter, ipv4 rules address offset
+ * @expn_rules_offset: input parameter, ipv4 expansion rules address offset
+ * @index_offset: input parameter, index rules offset
+ * @index_expn_offset: input parameter, index expansion rules offset
+ * @table_entries: input parameter, ipv4 rules table size in entries
+ * @expn_table_entries: input parameter, ipv4 expansion rules table size
+ * @ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_init {
+	uint8_t tbl_index;
+	uint32_t ipv4_rules_offset;
+	uint32_t expn_rules_offset;
+
+	uint32_t index_offset;
+	uint32_t index_expn_offset;
+
+	uint16_t table_entries;
+	uint16_t expn_table_entries;
+	uint32_t ip_addr;
+};
+
+/**
+ * struct ipa_ioc_v4_nat_del - nat table delete parameter
+ * @table_index: input parameter, index of the table
+ * @public_ip_addr: input parameter, public ip address
+ */
+struct ipa_ioc_v4_nat_del {
+	uint8_t table_index;
+	uint32_t public_ip_addr;
+};
+
+/**
+ * struct ipa_ioc_nat_dma_one - nat dma command parameter
+ * @table_index: input parameter, index of the table
+ * @base_addr:	type of table, from which the base address of the table
+ *		can be inferred
+ * @offset: destination offset within the NAT table
+ * @data: data to be written.
+ */
+struct ipa_ioc_nat_dma_one {
+	uint8_t table_index;
+	uint8_t base_addr;
+
+	uint32_t offset;
+	uint16_t data;
+
+};
+
+/**
+ * struct ipa_ioc_nat_dma_cmd - To hold multiple nat dma commands
+ * @entries: number of dma commands in use
+ * @dma: data pointer to the dma commands
+ */
+struct ipa_ioc_nat_dma_cmd {
+	uint8_t entries;
+	struct ipa_ioc_nat_dma_one dma[0];
+
+};
+
+/**
+ * struct ipa_msg_meta - Format of the message meta-data.
+ * @msg_type: the type of the message
+ * @rsvd: reserved bits for future use.
+ * @msg_len: the length of the message in bytes
+ *
+ * For push model:
+ * Client in user-space should issue a read on the device (/dev/ipa) with a
+ * sufficiently large buffer in a continuous loop, call will block when there is
+ * no message to read. Upon return, client can read the ipa_msg_meta from start
+ * of buffer to find out type and length of message
+ * size of buffer supplied >= (size of largest message + size of metadata)
+ *
+ * For pull model:
+ * Client in user-space can also issue a pull msg IOCTL to device (/dev/ipa)
+ * with a payload containing space for the ipa_msg_meta and the message specific
+ * payload length.
+ * size of buffer supplied == (len of specific message  + size of metadata)
+ */
+struct ipa_msg_meta {
+	uint8_t msg_type;
+	uint8_t rsvd;
+	uint16_t msg_len;
+};
+
+/**
+ * struct ipa_wlan_msg - To hold information about wlan client
+ * @name: name of the wlan interface
+ * @mac_addr: mac address of wlan client
+ *
+ * wlan drivers need to pass name of wlan iface and mac address of
+ * wlan client along with ipa_wlan_event, whenever a wlan client is
+ * connected/disconnected/moved to power save/come out of power save
+ */
+struct ipa_wlan_msg {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t mac_addr[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * enum ipa_wlan_hdr_attrib_type - attribute type
+ * in wlan client header
+ *
+ * WLAN_HDR_ATTRIB_MAC_ADDR: attrib type mac address
+ * WLAN_HDR_ATTRIB_STA_ID: attrib type station id
+ */
+enum ipa_wlan_hdr_attrib_type {
+	WLAN_HDR_ATTRIB_MAC_ADDR,
+	WLAN_HDR_ATTRIB_STA_ID
+};
+
+/**
+ * struct ipa_wlan_hdr_attrib_val - header attribute value
+ * @attrib_type: type of attribute
+ * @offset: offset of attribute within header
+ * @u.mac_addr: mac address
+ * @u.sta_id: station id
+ */
+struct ipa_wlan_hdr_attrib_val {
+	enum ipa_wlan_hdr_attrib_type attrib_type;
+	uint8_t offset;
+	union {
+		uint8_t mac_addr[IPA_MAC_ADDR_SIZE];
+		uint8_t sta_id;
+	} u;
+};
+
+/**
+ * struct ipa_wlan_msg_ex - To hold information about wlan client
+ * @name: name of the wlan interface
+ * @num_of_attribs: number of attributes
+ * @attrib_val: holds attribute values
+ *
+ * wlan drivers need to pass name of wlan iface and mac address
+ * of wlan client or station id along with ipa_wlan_event,
+ * whenever a wlan client is connected/disconnected/moved to
+ * power save/come out of power save
+ */
+struct ipa_wlan_msg_ex {
+	char name[IPA_RESOURCE_NAME_MAX];
+	uint8_t num_of_attribs;
+	struct ipa_wlan_hdr_attrib_val attribs[0];
+};
+
+struct ipa_ecm_msg {
+	char name[IPA_RESOURCE_NAME_MAX];
+	int ifindex;
+};
+
+/**
+ * struct ipa_wan_msg - To hold information about wan client
+ * @name: name of the wan interface
+ *
+ * CnE need to pass the name of default wan iface when connected/disconnected.
+ * CNE need to pass the gw info in wlan AP+STA mode.
+ * netmgr need to pass the name of wan eMBMS iface when connected.
+ */
+struct ipa_wan_msg {
+	char upstream_ifname[IPA_RESOURCE_NAME_MAX];
+	char tethered_ifname[IPA_RESOURCE_NAME_MAX];
+	enum ipa_ip_type ip;
+	uint32_t ipv4_addr_gw;
+	uint32_t ipv6_addr_gw[IPA_WAN_MSG_IPv6_ADDR_GW_LEN];
+};
+
+/**
+ * struct ipa_ioc_rm_dependency - parameters for add/delete dependency
+ * @resource_name: name of dependent resource
+ * @depends_on_name: name of its dependency
+ */
+struct ipa_ioc_rm_dependency {
+	enum ipa_rm_resource_name resource_name;
+	enum ipa_rm_resource_name depends_on_name;
+};
+
+struct ipa_ioc_generate_flt_eq {
+	enum ipa_ip_type ip;
+	struct ipa_rule_attrib attrib;
+	struct ipa_ipfltri_rule_eq eq_attrib;
+};
+
+/**
+ * struct ipa_ioc_write_qmapid - to write mux id to endpoint meta register
+ * @mux_id: mux id of wan
+ */
+struct ipa_ioc_write_qmapid {
+	enum ipa_client_type client;
+	uint8_t qmap_id;
+};
+
+enum ipacm_client_enum {
+	IPACM_CLIENT_USB = 1,
+	IPACM_CLIENT_WLAN,
+	IPACM_CLIENT_MAX
+};
+/**
+ *   actual IOCTLs supported by IPA driver
+ */
+#define IPA_IOC_ADD_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_HDR, \
+					struct ipa_ioc_add_hdr *)
+#define IPA_IOC_DEL_HDR _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_HDR, \
+					struct ipa_ioc_del_hdr *)
+#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE, \
+					struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_RT_RULE_AFTER, \
+					struct ipa_ioc_add_rt_rule_after *)
+#define IPA_IOC_DEL_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_RT_RULE, \
+					struct ipa_ioc_del_rt_rule *)
+#define IPA_IOC_ADD_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE, \
+					struct ipa_ioc_add_flt_rule *)
+#define IPA_IOC_ADD_FLT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_ADD_FLT_RULE_AFTER, \
+					struct ipa_ioc_add_flt_rule_after *)
+#define IPA_IOC_DEL_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_DEL_FLT_RULE, \
+					struct ipa_ioc_del_flt_rule *)
+#define IPA_IOC_COMMIT_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_COMMIT_HDR)
+#define IPA_IOC_RESET_HDR _IO(IPA_IOC_MAGIC,\
+					IPA_IOCTL_RESET_HDR)
+#define IPA_IOC_COMMIT_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_RT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_RESET_RT, \
+					enum ipa_ip_type)
+#define IPA_IOC_COMMIT_FLT _IOW(IPA_IOC_MAGIC, \
+					IPA_IOCTL_COMMIT_FLT, \
+					enum ipa_ip_type)
+#define IPA_IOC_RESET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_RESET_FLT, \
+			enum ipa_ip_type)
+#define IPA_IOC_DUMP _IO(IPA_IOC_MAGIC, \
+			IPA_IOCTL_DUMP)
+#define IPA_IOC_GET_RT_TBL _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_RT_TBL, \
+				struct ipa_ioc_get_rt_tbl *)
+#define IPA_IOC_PUT_RT_TBL _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_RT_TBL, \
+				uint32_t)
+#define IPA_IOC_COPY_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_COPY_HDR, \
+				struct ipa_ioc_copy_hdr *)
+#define IPA_IOC_QUERY_INTF _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF, \
+				struct ipa_ioc_query_intf *)
+#define IPA_IOC_QUERY_INTF_TX_PROPS _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_INTF_TX_PROPS, \
+				struct ipa_ioc_query_intf_tx_props *)
+#define IPA_IOC_QUERY_INTF_RX_PROPS _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_RX_PROPS, \
+					struct ipa_ioc_query_intf_rx_props *)
+#define IPA_IOC_QUERY_INTF_EXT_PROPS _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_QUERY_INTF_EXT_PROPS, \
+					struct ipa_ioc_query_intf_ext_props *)
+#define IPA_IOC_GET_HDR _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HDR, \
+				struct ipa_ioc_get_hdr *)
+#define IPA_IOC_PUT_HDR _IOW(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PUT_HDR, \
+				uint32_t)
+#define IPA_IOC_ALLOC_NAT_MEM _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ALLOC_NAT_MEM, \
+				struct ipa_ioc_nat_alloc_mem *)
+#define IPA_IOC_V4_INIT_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_INIT_NAT, \
+				struct ipa_ioc_v4_nat_init *)
+#define IPA_IOC_NAT_DMA _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NAT_DMA, \
+				struct ipa_ioc_nat_dma_cmd *)
+#define IPA_IOC_V4_DEL_NAT _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_V4_DEL_NAT, \
+				struct ipa_ioc_v4_nat_del *)
+#define IPA_IOC_GET_NAT_OFFSET _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_NAT_OFFSET, \
+				uint32_t *)
+#define IPA_IOC_SET_FLT _IOW(IPA_IOC_MAGIC, \
+			IPA_IOCTL_SET_FLT, \
+			uint32_t)
+#define IPA_IOC_PULL_MSG _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_PULL_MSG, \
+				struct ipa_msg_meta *)
+#define IPA_IOC_RM_ADD_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_ADD_DEPENDENCY, \
+				struct ipa_ioc_rm_dependency *)
+#define IPA_IOC_RM_DEL_DEPENDENCY _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_RM_DEL_DEPENDENCY, \
+				struct ipa_ioc_rm_dependency *)
+#define IPA_IOC_GENERATE_FLT_EQ _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GENERATE_FLT_EQ, \
+				struct ipa_ioc_generate_flt_eq *)
+#define IPA_IOC_QUERY_EP_MAPPING _IOR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_EP_MAPPING, \
+				uint32_t)
+#define IPA_IOC_QUERY_RT_TBL_INDEX _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_QUERY_RT_TBL_INDEX, \
+				struct ipa_ioc_get_rt_tbl_indx *)
+#define IPA_IOC_WRITE_QMAPID  _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_WRITE_QMAPID, \
+				struct ipa_ioc_write_qmapid *)
+#define IPA_IOC_MDFY_FLT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_FLT_RULE, \
+					struct ipa_ioc_mdfy_flt_rule *)
+#define IPA_IOC_MDFY_RT_RULE _IOWR(IPA_IOC_MAGIC, \
+					IPA_IOCTL_MDFY_RT_RULE, \
+					struct ipa_ioc_mdfy_rt_rule *)
+
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_ADD _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_ADD, \
+				struct ipa_wan_msg *)
+
+#define IPA_IOC_NOTIFY_WAN_UPSTREAM_ROUTE_DEL _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_UPSTREAM_ROUTE_DEL, \
+				struct ipa_wan_msg *)
+#define IPA_IOC_NOTIFY_WAN_EMBMS_CONNECTED _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_NOTIFY_WAN_EMBMS_CONNECTED, \
+				struct ipa_wan_msg *)
+#define IPA_IOC_ADD_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_ADD_HDR_PROC_CTX, \
+				struct ipa_ioc_add_hdr_proc_ctx *)
+#define IPA_IOC_DEL_HDR_PROC_CTX _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_DEL_HDR_PROC_CTX, \
+				struct ipa_ioc_del_hdr_proc_ctx *)
+
+#define IPA_IOC_GET_HW_VERSION _IOWR(IPA_IOC_MAGIC, \
+				IPA_IOCTL_GET_HW_VERSION, \
+				enum ipa_hw_type *)
+
+/*
+ * unique magic number of the Tethering bridge ioctls
+ */
+#define TETH_BRIDGE_IOC_MAGIC 0xCE
+
+/*
+ * Ioctls supported by Tethering bridge driver
+ */
+#define TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE	0
+#define TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS	1
+#define TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS	2
+#define TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES	3
+#define TETH_BRIDGE_IOCTL_MAX			4
+
+
+/**
+ * enum teth_link_protocol_type - link protocol (IP / Ethernet)
+ */
+enum teth_link_protocol_type {
+	TETH_LINK_PROTOCOL_IP,
+	TETH_LINK_PROTOCOL_ETHERNET,
+	TETH_LINK_PROTOCOL_MAX,
+};
+
+/**
+ * enum teth_aggr_protocol_type - Aggregation protocol (MBIM / TLP)
+ */
+enum teth_aggr_protocol_type {
+	TETH_AGGR_PROTOCOL_NONE,
+	TETH_AGGR_PROTOCOL_MBIM,
+	TETH_AGGR_PROTOCOL_TLP,
+	TETH_AGGR_PROTOCOL_MAX,
+};
+
+/**
+ * struct teth_aggr_params_link - Aggregation parameters for uplink/downlink
+ * @aggr_prot:			Aggregation protocol (MBIM / TLP)
+ * @max_transfer_size_byte:	Maximal size of aggregated packet in bytes.
+ *				Default value is 16*1024.
+ * @max_datagrams:		Maximal number of IP packets in an aggregated
+ *				packet. Default value is 16
+ */
+struct teth_aggr_params_link {
+	enum teth_aggr_protocol_type aggr_prot;
+	uint32_t max_transfer_size_byte;
+	uint32_t max_datagrams;
+};
+
+
+/**
+ * struct teth_aggr_params - Aggregation parmeters
+ * @ul:	Uplink parameters
+ * @dl: Downlink parmaeters
+ */
+struct teth_aggr_params {
+	struct teth_aggr_params_link ul;
+	struct teth_aggr_params_link dl;
+};
+
+/**
+ * struct teth_aggr_capabilities - Aggregation capabilities
+ * @num_protocols:		Number of protocols described in the array
+ * @prot_caps[]:		Array of aggregation capabilities per protocol
+ */
+struct teth_aggr_capabilities {
+	uint16_t num_protocols;
+	struct teth_aggr_params_link prot_caps[0];
+};
+
+/**
+ * struct teth_ioc_set_bridge_mode
+ * @link_protocol: link protocol (IP / Ethernet)
+ * @lcid: logical channel number
+ */
+struct teth_ioc_set_bridge_mode {
+	enum teth_link_protocol_type link_protocol;
+	uint16_t lcid;
+};
+
+/**
+ * struct teth_ioc_set_aggr_params
+ * @aggr_params: Aggregation parmeters
+ * @lcid: logical channel number
+ */
+struct teth_ioc_aggr_params {
+	struct teth_aggr_params aggr_params;
+	uint16_t lcid;
+};
+
+
+#define TETH_BRIDGE_IOC_SET_BRIDGE_MODE _IOW(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_SET_BRIDGE_MODE, \
+				struct teth_ioc_set_bridge_mode *)
+#define TETH_BRIDGE_IOC_SET_AGGR_PARAMS _IOW(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_SET_AGGR_PARAMS, \
+				struct teth_ioc_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_PARAMS _IOR(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_GET_AGGR_PARAMS, \
+				struct teth_ioc_aggr_params *)
+#define TETH_BRIDGE_IOC_GET_AGGR_CAPABILITIES _IOWR(TETH_BRIDGE_IOC_MAGIC, \
+				TETH_BRIDGE_IOCTL_GET_AGGR_CAPABILITIES, \
+				struct teth_aggr_capabilities *)
+
+/*
+ * unique magic number of the ODU bridge ioctls
+ */
+#define ODU_BRIDGE_IOC_MAGIC 0xCD
+
+/*
+ * Ioctls supported by ODU bridge driver
+ */
+#define ODU_BRIDGE_IOCTL_SET_MODE	0
+#define ODU_BRIDGE_IOCTL_SET_LLV6_ADDR	1
+#define ODU_BRIDGE_IOCTL_MAX		2
+
+/**
+ * enum odu_bridge_mode - bridge mode
+ *			(ROUTER MODE / BRIDGE MODE)
+ */
+enum odu_bridge_mode {
+	ODU_BRIDGE_MODE_ROUTER,
+	ODU_BRIDGE_MODE_BRIDGE,
+	ODU_BRIDGE_MODE_MAX,
+};
+
+#define ODU_BRIDGE_IOC_SET_MODE _IOW(ODU_BRIDGE_IOC_MAGIC, \
+				ODU_BRIDGE_IOCTL_SET_MODE, \
+				enum odu_bridge_mode)
+
+#define ODU_BRIDGE_IOC_SET_LLV6_ADDR _IOW(ODU_BRIDGE_IOC_MAGIC, \
+				ODU_BRIDGE_IOCTL_SET_LLV6_ADDR, \
+				struct in6_addr *)
+
+#endif /* _UAPI_MSM_IPA_H_ */
diff --git a/include/uapi/linux/msm_ipc.h b/include/uapi/linux/msm_ipc.h
new file mode 100644
index 0000000..29711c0
--- /dev/null
+++ b/include/uapi/linux/msm_ipc.h
@@ -0,0 +1,91 @@
+#ifndef _UAPI_MSM_IPC_H_
+#define _UAPI_MSM_IPC_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct msm_ipc_port_addr {
+	uint32_t node_id;
+	uint32_t port_id;
+};
+
+struct msm_ipc_port_name {
+	uint32_t service;
+	uint32_t instance;
+};
+
+struct msm_ipc_addr {
+	unsigned char  addrtype;
+	union {
+		struct msm_ipc_port_addr port_addr;
+		struct msm_ipc_port_name port_name;
+	} addr;
+};
+
+#define MSM_IPC_WAIT_FOREVER	(~0)  /* timeout for permanent subscription */
+
+/*
+ * Socket API
+ */
+
+#ifndef AF_MSM_IPC
+#define AF_MSM_IPC		27
+#endif
+
+#ifndef PF_MSM_IPC
+#define PF_MSM_IPC		AF_MSM_IPC
+#endif
+
+#define MSM_IPC_ADDR_NAME		1
+#define MSM_IPC_ADDR_ID			2
+
+struct sockaddr_msm_ipc {
+	unsigned short family;
+	struct msm_ipc_addr address;
+	unsigned char reserved;
+};
+
+struct config_sec_rules_args {
+	int num_group_info;
+	uint32_t service_id;
+	uint32_t instance_id;
+	unsigned int reserved;
+	gid_t group_id[0];
+};
+
+#define IPC_ROUTER_IOCTL_MAGIC (0xC3)
+
+#define IPC_ROUTER_IOCTL_GET_VERSION \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 0, unsigned int)
+
+#define IPC_ROUTER_IOCTL_GET_MTU \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 1, unsigned int)
+
+#define IPC_ROUTER_IOCTL_LOOKUP_SERVER \
+	_IOWR(IPC_ROUTER_IOCTL_MAGIC, 2, struct sockaddr_msm_ipc)
+
+#define IPC_ROUTER_IOCTL_GET_CURR_PKT_SIZE \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 3, unsigned int)
+
+#define IPC_ROUTER_IOCTL_BIND_CONTROL_PORT \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 4, unsigned int)
+
+#define IPC_ROUTER_IOCTL_CONFIG_SEC_RULES \
+	_IOR(IPC_ROUTER_IOCTL_MAGIC, 5, struct config_sec_rules_args)
+
+struct msm_ipc_server_info {
+	uint32_t node_id;
+	uint32_t port_id;
+	uint32_t service;
+	uint32_t instance;
+};
+
+struct server_lookup_args {
+	struct msm_ipc_port_name port_name;
+	int num_entries_in_array;
+	int num_entries_found;
+	uint32_t lookup_mask;
+	struct msm_ipc_server_info srv_info[0];
+};
+
+#endif
diff --git a/include/uapi/linux/msm_rmnet.h b/include/uapi/linux/msm_rmnet.h
new file mode 100644
index 0000000..788d7a8
--- /dev/null
+++ b/include/uapi/linux/msm_rmnet.h
@@ -0,0 +1,152 @@
+#ifndef _UAPI_MSM_RMNET_H_
+#define _UAPI_MSM_RMNET_H_
+
+/* Bitmap macros for RmNET driver operation mode. */
+#define RMNET_MODE_NONE     (0x00)
+#define RMNET_MODE_LLP_ETH  (0x01)
+#define RMNET_MODE_LLP_IP   (0x02)
+#define RMNET_MODE_QOS      (0x04)
+#define RMNET_MODE_MASK     (RMNET_MODE_LLP_ETH | \
+			     RMNET_MODE_LLP_IP  | \
+			     RMNET_MODE_QOS)
+
+#define RMNET_IS_MODE_QOS(mode)  \
+	((mode & RMNET_MODE_QOS) == RMNET_MODE_QOS)
+#define RMNET_IS_MODE_IP(mode)   \
+	((mode & RMNET_MODE_LLP_IP) == RMNET_MODE_LLP_IP)
+
+/* IOCTL commands
+ * Values chosen to not conflict with other drivers in the ecosystem
+ */
+
+#define RMNET_IOCTL_SET_LLP_ETHERNET 0x000089F1 /* Set Ethernet protocol  */
+#define RMNET_IOCTL_SET_LLP_IP       0x000089F2 /* Set RAWIP protocol     */
+#define RMNET_IOCTL_GET_LLP          0x000089F3 /* Get link protocol      */
+#define RMNET_IOCTL_SET_QOS_ENABLE   0x000089F4 /* Set QoS header enabled */
+#define RMNET_IOCTL_SET_QOS_DISABLE  0x000089F5 /* Set QoS header disabled*/
+#define RMNET_IOCTL_GET_QOS          0x000089F6 /* Get QoS header state   */
+#define RMNET_IOCTL_GET_OPMODE       0x000089F7 /* Get operation mode     */
+#define RMNET_IOCTL_OPEN             0x000089F8 /* Open transport port    */
+#define RMNET_IOCTL_CLOSE            0x000089F9 /* Close transport port   */
+#define RMNET_IOCTL_FLOW_ENABLE      0x000089FA /* Flow enable            */
+#define RMNET_IOCTL_FLOW_DISABLE     0x000089FB /* Flow disable           */
+#define RMNET_IOCTL_FLOW_SET_HNDL    0x000089FC /* Set flow handle        */
+#define RMNET_IOCTL_EXTENDED         0x000089FD /* Extended IOCTLs        */
+
+/* RmNet Data Required IOCTLs */
+#define RMNET_IOCTL_GET_SUPPORTED_FEATURES     0x0000   /* Get features    */
+#define RMNET_IOCTL_SET_MRU                    0x0001   /* Set MRU         */
+#define RMNET_IOCTL_GET_MRU                    0x0002   /* Get MRU         */
+#define RMNET_IOCTL_GET_EPID                   0x0003   /* Get endpoint ID */
+#define RMNET_IOCTL_GET_DRIVER_NAME            0x0004   /* Get driver name */
+#define RMNET_IOCTL_ADD_MUX_CHANNEL            0x0005   /* Add MUX ID      */
+#define RMNET_IOCTL_SET_EGRESS_DATA_FORMAT     0x0006   /* Set EDF         */
+#define RMNET_IOCTL_SET_INGRESS_DATA_FORMAT    0x0007   /* Set IDF         */
+#define RMNET_IOCTL_SET_AGGREGATION_COUNT      0x0008   /* Set agg count   */
+#define RMNET_IOCTL_GET_AGGREGATION_COUNT      0x0009   /* Get agg count   */
+#define RMNET_IOCTL_SET_AGGREGATION_SIZE       0x000A   /* Set agg size    */
+#define RMNET_IOCTL_GET_AGGREGATION_SIZE       0x000B   /* Get agg size    */
+#define RMNET_IOCTL_FLOW_CONTROL               0x000C   /* Do flow control */
+#define RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL   0x000D   /* For legacy use  */
+#define RMNET_IOCTL_GET_HWSW_MAP               0x000E   /* Get HW/SW map   */
+#define RMNET_IOCTL_SET_RX_HEADROOM            0x000F   /* RX Headroom     */
+#define RMNET_IOCTL_GET_EP_PAIR                0x0010   /* Endpoint pair   */
+#define RMNET_IOCTL_SET_QOS_VERSION            0x0011   /* 8/6 byte QoS hdr*/
+#define RMNET_IOCTL_GET_QOS_VERSION            0x0012   /* 8/6 byte QoS hdr*/
+#define RMNET_IOCTL_GET_SUPPORTED_QOS_MODES    0x0013   /* Get QoS modes   */
+#define RMNET_IOCTL_SET_SLEEP_STATE            0x0014   /* Set sleep state */
+#define RMNET_IOCTL_SET_XLAT_DEV_INFO          0x0015   /* xlat dev name   */
+#define RMNET_IOCTL_DEREGISTER_DEV             0x0016   /* Dereg a net dev */
+#define RMNET_IOCTL_GET_SG_SUPPORT             0x0017   /* Query sg support*/
+
+/* Return values for the RMNET_IOCTL_GET_SUPPORTED_FEATURES IOCTL */
+#define RMNET_IOCTL_FEAT_NOTIFY_MUX_CHANNEL              (1<<0)
+#define RMNET_IOCTL_FEAT_SET_EGRESS_DATA_FORMAT          (1<<1)
+#define RMNET_IOCTL_FEAT_SET_INGRESS_DATA_FORMAT         (1<<2)
+#define RMNET_IOCTL_FEAT_SET_AGGREGATION_COUNT           (1<<3)
+#define RMNET_IOCTL_FEAT_GET_AGGREGATION_COUNT           (1<<4)
+#define RMNET_IOCTL_FEAT_SET_AGGREGATION_SIZE            (1<<5)
+#define RMNET_IOCTL_FEAT_GET_AGGREGATION_SIZE            (1<<6)
+#define RMNET_IOCTL_FEAT_FLOW_CONTROL                    (1<<7)
+#define RMNET_IOCTL_FEAT_GET_DFLT_CONTROL_CHANNEL        (1<<8)
+#define RMNET_IOCTL_FEAT_GET_HWSW_MAP                    (1<<9)
+
+/* Input values for the RMNET_IOCTL_SET_EGRESS_DATA_FORMAT IOCTL  */
+#define RMNET_IOCTL_EGRESS_FORMAT_MAP                  (1<<1)
+#define RMNET_IOCTL_EGRESS_FORMAT_AGGREGATION          (1<<2)
+#define RMNET_IOCTL_EGRESS_FORMAT_MUXING               (1<<3)
+#define RMNET_IOCTL_EGRESS_FORMAT_CHECKSUM             (1<<4)
+
+/* Input values for the RMNET_IOCTL_SET_INGRESS_DATA_FORMAT IOCTL */
+#define RMNET_IOCTL_INGRESS_FORMAT_MAP                 (1<<1)
+#define RMNET_IOCTL_INGRESS_FORMAT_DEAGGREGATION       (1<<2)
+#define RMNET_IOCTL_INGRESS_FORMAT_DEMUXING            (1<<3)
+#define RMNET_IOCTL_INGRESS_FORMAT_CHECKSUM            (1<<4)
+#define RMNET_IOCTL_INGRESS_FORMAT_AGG_DATA            (1<<5)
+
+/* User space may not have this defined. */
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+struct rmnet_ioctl_extended_s {
+	uint32_t   extended_ioctl;
+	union {
+		uint32_t data; /* Generic data field for most extended IOCTLs */
+
+		/* Return values for
+		 *    RMNET_IOCTL_GET_DRIVER_NAME
+		 *    RMNET_IOCTL_GET_DFLT_CONTROL_CHANNEL
+		 */
+		int8_t if_name[IFNAMSIZ];
+
+		/* Input values for the RMNET_IOCTL_ADD_MUX_CHANNEL IOCTL */
+		struct {
+			uint32_t  mux_id;
+			int8_t    vchannel_name[IFNAMSIZ];
+		} rmnet_mux_val;
+
+		/* Input values for the RMNET_IOCTL_FLOW_CONTROL IOCTL */
+		struct {
+			uint8_t   flow_mode;
+			uint8_t   mux_id;
+		} flow_control_prop;
+
+		/* Return values for RMNET_IOCTL_GET_EP_PAIR */
+		struct {
+			uint32_t   consumer_pipe_num;
+			uint32_t   producer_pipe_num;
+		} ipa_ep_pair;
+
+		struct {
+			uint32_t __data; /* Placeholder for legacy data*/
+			uint32_t agg_size;
+			uint32_t agg_count;
+		} ingress_format;
+	} u;
+};
+
+struct rmnet_ioctl_data_s {
+	union {
+		uint32_t	operation_mode;
+		uint32_t	tcm_handle;
+	} u;
+};
+
+#define RMNET_IOCTL_QOS_MODE_6   (1<<0)
+#define RMNET_IOCTL_QOS_MODE_8   (1<<1)
+
+/* QMI QoS header definition */
+struct QMI_QOS_HDR_S {
+	unsigned char    version;
+	unsigned char    flags;
+	uint32_t         flow_id;
+} __attribute((__packed__));
+
+/* QMI QoS 8-byte header. */
+struct qmi_qos_hdr8_s {
+	struct QMI_QOS_HDR_S   hdr;
+	uint8_t                reserved[2];
+} __attribute((__packed__));
+
+#endif /* _UAPI_MSM_RMNET_H_ */
diff --git a/include/uapi/linux/net_map.h b/include/uapi/linux/net_map.h
new file mode 100644
index 0000000..a5d6d58
--- /dev/null
+++ b/include/uapi/linux/net_map.h
@@ -0,0 +1,8 @@
+#ifndef _NET_MAP_H_
+#define _NET_MAP_H_
+
+#define RMNET_IP_VER_MASK 0xF0
+#define RMNET_IPV4        0x40
+#define RMNET_IPV6        0x60
+
+#endif /* _NET_MAP_H_ */
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 208ae93..faaa28b 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -4,6 +4,7 @@
  * Header file for Xtables timer target module.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and forward-ported to 2.6.34
@@ -32,12 +33,19 @@
 #include <linux/types.h>
 
 #define MAX_IDLETIMER_LABEL_SIZE 28
+#define NLMSG_MAX_SIZE 64
+
+#define NL_EVENT_TYPE_INACTIVE 0
+#define NL_EVENT_TYPE_ACTIVE 1
 
 struct idletimer_tg_info {
 	__u32 timeout;
 
 	char label[MAX_IDLETIMER_LABEL_SIZE];
 
+	/* Use netlink messages for notification in addition to sysfs */
+	__u8 send_nl_msg;
+
 	/* for kernel module internal use only */
 	struct idletimer_tg *timer __attribute__((aligned(8)));
 };
diff --git a/include/uapi/linux/netfilter/xt_socket.h b/include/uapi/linux/netfilter/xt_socket.h
index 87644f8..7f00df6 100644
--- a/include/uapi/linux/netfilter/xt_socket.h
+++ b/include/uapi/linux/netfilter/xt_socket.h
@@ -26,4 +26,11 @@
 			   | XT_SOCKET_NOWILDCARD \
 			   | XT_SOCKET_RESTORESKMARK)
 
+struct sock *xt_socket_lookup_slow_v4(struct net *net,
+				      const struct sk_buff *skb,
+				      const struct net_device *indev);
+struct sock *xt_socket_lookup_slow_v6(struct net *net,
+				      const struct sk_buff *skb,
+				      const struct net_device *indev);
+
 #endif /* _XT_SOCKET_H */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index a8d0759..c1af9b3 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -197,4 +197,13 @@
 # define PR_CAP_AMBIENT_LOWER		3
 # define PR_CAP_AMBIENT_CLEAR_ALL	4
 
+/* Sets the timerslack for arbitrary threads
+ * arg2 slack value, 0 means "use default"
+ * arg3 pid of the thread whose timer slack needs to be set
+ */
+#define PR_SET_TIMERSLACK_PID	127
+
+#define PR_SET_VMA		0x53564d41
+# define PR_SET_VMA_ANON_NAME		0
+
 #endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/rmnet_data.h b/include/uapi/linux/rmnet_data.h
new file mode 100644
index 0000000..7044df4
--- /dev/null
+++ b/include/uapi/linux/rmnet_data.h
@@ -0,0 +1,236 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration specification
+ */
+
+#ifndef _RMNET_DATA_H_
+#define _RMNET_DATA_H_
+
+/* Constants */
+#define RMNET_LOCAL_LOGICAL_ENDPOINT -1
+
+#define RMNET_EGRESS_FORMAT__RESERVED__         (1<<0)
+#define RMNET_EGRESS_FORMAT_MAP                 (1<<1)
+#define RMNET_EGRESS_FORMAT_AGGREGATION         (1<<2)
+#define RMNET_EGRESS_FORMAT_MUXING              (1<<3)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV3         (1<<4)
+#define RMNET_EGRESS_FORMAT_MAP_CKSUMV4         (1<<5)
+
+#define RMNET_INGRESS_FIX_ETHERNET              (1<<0)
+#define RMNET_INGRESS_FORMAT_MAP                (1<<1)
+#define RMNET_INGRESS_FORMAT_DEAGGREGATION      (1<<2)
+#define RMNET_INGRESS_FORMAT_DEMUXING           (1<<3)
+#define RMNET_INGRESS_FORMAT_MAP_COMMANDS       (1<<4)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV3        (1<<5)
+#define RMNET_INGRESS_FORMAT_MAP_CKSUMV4        (1<<6)
+
+/* Netlink API */
+#define RMNET_NETLINK_PROTO 31
+#define RMNET_MAX_STR_LEN  16
+#define RMNET_NL_DATA_MAX_LEN 64
+
+#define RMNET_NETLINK_MSG_COMMAND    0
+#define RMNET_NETLINK_MSG_RETURNCODE 1
+#define RMNET_NETLINK_MSG_RETURNDATA 2
+
+struct rmnet_nl_msg_s {
+	uint16_t reserved;
+	uint16_t message_type;
+	uint16_t reserved2:14;
+	uint16_t crd:2;
+	union {
+		uint16_t arg_length;
+		uint16_t return_code;
+	};
+	union {
+		uint8_t data[RMNET_NL_DATA_MAX_LEN];
+		struct {
+			uint8_t  dev[RMNET_MAX_STR_LEN];
+			uint32_t flags;
+			uint16_t agg_size;
+			uint16_t agg_count;
+			uint8_t  tail_spacing;
+		} data_format;
+		struct {
+			uint8_t dev[RMNET_MAX_STR_LEN];
+			int32_t ep_id;
+			uint8_t operating_mode;
+			uint8_t next_dev[RMNET_MAX_STR_LEN];
+		} local_ep_config;
+		struct {
+			uint32_t id;
+			uint8_t  vnd_name[RMNET_MAX_STR_LEN];
+		} vnd;
+		struct {
+			uint32_t id;
+			uint32_t map_flow_id;
+			uint32_t tc_flow_id;
+		} flow_control;
+	};
+};
+
+enum rmnet_netlink_message_types_e {
+	/* RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE - Register RMNET data driver
+	 *                                          on a particular device.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE,
+
+	/* RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE - Unregister RMNET data
+	 *                                            driver on a particular
+	 *                                            device.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE,
+
+	/* RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED - Get if RMNET data
+	 *                                            driver is registered on a
+	 *                                            particular device.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: 1 if registered, 0 if not
+	 */
+	RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED,
+
+	/* RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT - Sets the egress data
+	 *                                             format for a particular
+	 *                                             link.
+	 * Args: uint32_t egress_flags
+	 *       char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT,
+
+	/* RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT - Gets the egress data
+	 *                                             format for a particular
+	 *                                             link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: 4-bytes data: uint32_t egress_flags
+	 */
+	RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT,
+
+	/* RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT - Sets the ingress data
+	 *                                              format for a particular
+	 *                                              link.
+	 * Args: uint32_t ingress_flags
+	 *       char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT,
+
+	/* RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT - Gets the ingress data
+	 *                                              format for a particular
+	 *                                              link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 * Returns: 4-bytes data: uint32_t ingress_flags
+	 */
+	RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT,
+
+	/* RMNET_NETLINK_SET_LOGICAL_EP_CONFIG - Sets the logical endpoint
+	 *                                       configuration for a particular
+	 *                                       link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 *     int32_t logical_ep_id, valid values are -1 through 31
+	 *     uint8_t rmnet_mode: one of none, vnd, bridged
+	 *     char[] egress_dev_name: Egress device if operating in bridge mode
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_SET_LOGICAL_EP_CONFIG,
+
+	/* RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG - Un-sets the logical endpoint
+	 *                                       configuration for a particular
+	 *                                       link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 *       int32_t logical_ep_id, valid values are -1 through 31
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG,
+
+	/* RMNET_NETLINK_GET_LOGICAL_EP_CONFIG - Gets the logical endpoint
+	 *                                       configuration for a particular
+	 *                                       link.
+	 * Args: char[] dev_name: Null terminated ASCII string, max length: 15
+	 *        int32_t logical_ep_id, valid values are -1 through 31
+	 * Returns: uint8_t rmnet_mode: one of none, vnd, bridged
+	 * char[] egress_dev_name: Egress device
+	 */
+	RMNET_NETLINK_GET_LOGICAL_EP_CONFIG,
+
+	/* RMNET_NETLINK_NEW_VND - Creates a new virtual network device node
+	 * Args: int32_t node number
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_NEW_VND,
+
+	/* RMNET_NETLINK_NEW_VND_WITH_PREFIX - Creates a new virtual network
+	 *                                     device node with the specified
+	 *                                     prefix for the device name
+	 * Args: int32_t node number
+	 *       char[] vnd_name - Use as prefix
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_NEW_VND_WITH_PREFIX,
+
+	/* RMNET_NETLINK_GET_VND_NAME - Gets the string name of a VND from ID
+	 * Args: int32_t node number
+	 * Returns: char[] vnd_name
+	 */
+	RMNET_NETLINK_GET_VND_NAME,
+
+	/* RMNET_NETLINK_FREE_VND - Removes virtual network device node
+	 * Args: int32_t node number
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_FREE_VND,
+
+	/* RMNET_NETLINK_ADD_VND_TC_FLOW - Add flow control handle on VND
+	 * Args: int32_t node number
+	 *       uint32_t MAP Flow Handle
+	 *       uint32_t TC Flow Handle
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_ADD_VND_TC_FLOW,
+
+	/* RMNET_NETLINK_DEL_VND_TC_FLOW - Removes flow control handle on VND
+	 * Args: int32_t node number
+	 *       uint32_t MAP Flow Handle
+	 * Returns: status code
+	 */
+	RMNET_NETLINK_DEL_VND_TC_FLOW
+};
+
+enum rmnet_config_endpoint_modes_e {
+	/* Pass the frame up the stack with no modifications to skb->dev      */
+	RMNET_EPMODE_NONE,
+	/* Replace skb->dev to a virtual rmnet device and pass up the stack   */
+	RMNET_EPMODE_VND,
+	/* Pass the frame directly to another device with dev_queue_xmit().   */
+	RMNET_EPMODE_BRIDGE,
+	/* Must be the last item in the list                                  */
+	RMNET_EPMODE_LENGTH
+};
+
+enum rmnet_config_return_codes_e {
+	RMNET_CONFIG_OK,
+	RMNET_CONFIG_UNKNOWN_MESSAGE,
+	RMNET_CONFIG_UNKNOWN_ERROR,
+	RMNET_CONFIG_NOMEM,
+	RMNET_CONFIG_DEVICE_IN_USE,
+	RMNET_CONFIG_INVALID_REQUEST,
+	RMNET_CONFIG_NO_SUCH_DEVICE,
+	RMNET_CONFIG_BAD_ARGUMENTS,
+	RMNET_CONFIG_BAD_EGRESS_DEVICE,
+	RMNET_CONFIG_TC_HANDLE_FULL
+};
+
+#endif /* _RMNET_DATA_H_ */
diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
new file mode 100644
index 0000000..228bfe8
--- /dev/null
+++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
@@ -0,0 +1,158 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RMNET_IPA_FD_IOCTL_H
+#define _RMNET_IPA_FD_IOCTL_H
+
+#include <linux/ioctl.h>
+#include <linux/ipa_qmi_service_v01.h>
+#include <linux/msm_ipa.h>
+
+/**
+ * unique magic number of the IPA_WAN device
+ */
+#define WAN_IOC_MAGIC 0x69
+
+#define WAN_IOCTL_ADD_FLT_RULE		0
+#define WAN_IOCTL_ADD_FLT_INDEX		1
+#define WAN_IOCTL_VOTE_FOR_BW_MBPS	2
+#define WAN_IOCTL_POLL_TETHERING_STATS  3
+#define WAN_IOCTL_SET_DATA_QUOTA        4
+#define WAN_IOCTL_SET_TETHER_CLIENT_PIPE 5
+#define WAN_IOCTL_QUERY_TETHER_STATS     6
+#define WAN_IOCTL_RESET_TETHER_STATS     7
+#define WAN_IOCTL_QUERY_DL_FILTER_STATS  8
+#define WAN_IOCTL_ADD_FLT_RULE_EX        9
+
+/* User space may not have this defined. */
+#ifndef IFNAMSIZ
+#define IFNAMSIZ 16
+#endif
+
+/**
+ * struct wan_ioctl_poll_tethering_stats - structure used for
+ *                                         WAN_IOCTL_POLL_TETHERING_STATS IOCTL.
+ *
+ * @polling_interval_secs: Polling interval in seconds.
+ * @reset_stats:           Indicate whether to reset the stats (use 1) or not.
+ *
+ * The structure to be used by the user space in order to request for the
+ * tethering stats to be polled. Setting the interval to 0 indicates to stop
+ * the polling process.
+ */
+struct wan_ioctl_poll_tethering_stats {
+	uint64_t polling_interval_secs;
+	uint8_t  reset_stats;
+};
+
+/**
+ * struct wan_ioctl_set_data_quota - structure used for
+ *                                   WAN_IOCTL_SET_DATA_QUOTA IOCTL.
+ *
+ * @interface_name:  Name of the interface on which to set the quota.
+ * @quota_mbytes:    Quota (in Mbytes) for the above interface.
+ * @set_quota:       Indicate whether to set the quota (use 1) or
+ *                   unset the quota.
+ *
+ * The structure to be used by the user space in order to request
+ * a quota to be set on a specific interface (by specifying its name).
+ */
+struct wan_ioctl_set_data_quota {
+	char     interface_name[IFNAMSIZ];
+	uint64_t quota_mbytes;
+	uint8_t  set_quota;
+};
+
+struct wan_ioctl_set_tether_client_pipe {
+	/* enum of tether interface */
+	enum ipacm_client_enum ipa_client;
+	uint8_t reset_client;
+	uint32_t ul_src_pipe_len;
+	uint32_t ul_src_pipe_list[QMI_IPA_MAX_PIPES_V01];
+	uint32_t dl_dst_pipe_len;
+	uint32_t dl_dst_pipe_list[QMI_IPA_MAX_PIPES_V01];
+};
+
+struct wan_ioctl_query_tether_stats {
+	/* Name of the upstream interface */
+	char upstreamIface[IFNAMSIZ];
+	/* Name of the tethered interface */
+	char tetherIface[IFNAMSIZ];
+	/* enum of tether interface */
+	enum ipacm_client_enum ipa_client;
+	uint64_t ipv4_tx_packets;
+	uint64_t ipv4_tx_bytes;
+	uint64_t ipv4_rx_packets;
+	uint64_t ipv4_rx_bytes;
+	uint64_t ipv6_tx_packets;
+	uint64_t ipv6_tx_bytes;
+	uint64_t ipv6_rx_packets;
+	uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_reset_tether_stats {
+	/* Name of the upstream interface, not support now */
+	char upstreamIface[IFNAMSIZ];
+	/* Indicate whether to reset the stats (use 1) or not */
+	uint8_t reset_stats;
+};
+
+struct wan_ioctl_query_dl_filter_stats {
+	/* Indicate whether to reset the filter stats (use 1) or not*/
+	uint8_t reset_stats;
+	/* Modem response QMI */
+	struct ipa_get_data_stats_resp_msg_v01 stats_resp;
+	/* provide right index to 1st firewall rule */
+	uint32_t index;
+};
+
+#define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_RULE, \
+		struct ipa_install_fltr_rule_req_msg_v01 *)
+
+#define WAN_IOC_ADD_FLT_RULE_INDEX _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_INDEX, \
+		struct ipa_fltr_installed_notif_req_msg_v01 *)
+
+#define WAN_IOC_VOTE_FOR_BW_MBPS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_VOTE_FOR_BW_MBPS, \
+		uint32_t *)
+
+#define WAN_IOC_POLL_TETHERING_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_POLL_TETHERING_STATS, \
+		struct wan_ioctl_poll_tethering_stats *)
+
+#define WAN_IOC_SET_DATA_QUOTA _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_DATA_QUOTA, \
+		struct wan_ioctl_set_data_quota *)
+
+#define WAN_IOC_SET_TETHER_CLIENT_PIPE _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_SET_TETHER_CLIENT_PIPE, \
+		struct wan_ioctl_set_tether_client_pipe *)
+
+#define WAN_IOC_QUERY_TETHER_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_TETHER_STATS, \
+		struct wan_ioctl_query_tether_stats *)
+
+#define WAN_IOC_RESET_TETHER_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_RESET_TETHER_STATS, \
+		struct wan_ioctl_reset_tether_stats *)
+
+#define WAN_IOC_QUERY_DL_FILTER_STATS _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_QUERY_DL_FILTER_STATS, \
+		struct wan_ioctl_query_dl_filter_stats *)
+
+#define WAN_IOC_ADD_FLT_RULE_EX _IOWR(WAN_IOC_MAGIC, \
+		WAN_IOCTL_ADD_FLT_RULE_EX, \
+		struct ipa_install_fltr_rule_req_ex_msg_v01 *)
+
+#endif /* _RMNET_IPA_FD_IOCTL_H */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 5a78be5..8f97ca5 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -311,6 +311,7 @@
 	RTA_TABLE,
 	RTA_MARK,
 	RTA_MFC_STATS,
+	RTA_UID,
 	RTA_VIA,
 	RTA_NEWDST,
 	RTA_PREF,
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 99dbed8..ac3921e 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -270,4 +270,7 @@
 /* MPS2 UART */
 #define PORT_MPS2UART	116
 
+/* QTI EUD UART */
+#define PORT_EUD_UART	117
+
 #endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/usb/f_accessory.h b/include/uapi/linux/usb/f_accessory.h
new file mode 100644
index 0000000..0baeb7d
--- /dev/null
+++ b/include/uapi/linux/usb/f_accessory.h
@@ -0,0 +1,146 @@
+/*
+ * Gadget Function Driver for Android USB accessories
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_ACCESSORY_H
+#define _UAPI_LINUX_USB_F_ACCESSORY_H
+
+/* Use Google Vendor ID when in accessory mode */
+#define USB_ACCESSORY_VENDOR_ID 0x18D1
+
+
+/* Product ID to use when in accessory mode */
+#define USB_ACCESSORY_PRODUCT_ID 0x2D00
+
+/* Product ID to use when in accessory mode and adb is enabled */
+#define USB_ACCESSORY_ADB_PRODUCT_ID 0x2D01
+
+/* Indexes for strings sent by the host via ACCESSORY_SEND_STRING */
+#define ACCESSORY_STRING_MANUFACTURER   0
+#define ACCESSORY_STRING_MODEL          1
+#define ACCESSORY_STRING_DESCRIPTION    2
+#define ACCESSORY_STRING_VERSION        3
+#define ACCESSORY_STRING_URI            4
+#define ACCESSORY_STRING_SERIAL         5
+
+/* Control request for retrieving device's protocol version
+ *
+ *	requestType:    USB_DIR_IN | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_GET_PROTOCOL
+ *	value:          0
+ *	index:          0
+ *	data            version number (16 bits little endian)
+ *                     1 for original accessory support
+ *                     2 adds HID and device to host audio support
+ */
+#define ACCESSORY_GET_PROTOCOL  51
+
+/* Control request for host to send a string to the device
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SEND_STRING
+ *	value:          0
+ *	index:          string ID
+ *	data            zero terminated UTF8 string
+ *
+ *  The device can later retrieve these strings via the
+ *  ACCESSORY_GET_STRING_* ioctls
+ */
+#define ACCESSORY_SEND_STRING   52
+
+/* Control request for starting device in accessory mode.
+ * The host sends this after setting all its strings to the device.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_START
+ *	value:          0
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_START         53
+
+/* Control request for registering a HID device.
+ * Upon registering, a unique ID is sent by the accessory in the
+ * value parameter. This ID will be used for future commands for
+ * the device
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_REGISTER_HID_DEVICE
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          total length of the HID report descriptor
+ *	data            none
+ */
+#define ACCESSORY_REGISTER_HID         54
+
+/* Control request for unregistering a HID device.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_REGISTER_HID
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_UNREGISTER_HID         55
+
+/* Control request for sending the HID report descriptor.
+ * If the HID descriptor is longer than the endpoint zero max packet size,
+ * the descriptor will be sent in multiple ACCESSORY_SET_HID_REPORT_DESC
+ * commands. The data for the descriptor must be sent sequentially
+ * if multiple packets are needed.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SET_HID_REPORT_DESC
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          offset of data in descriptor
+ *                      (needed when HID descriptor is too big for one packet)
+ *	data            the HID report descriptor
+ */
+#define ACCESSORY_SET_HID_REPORT_DESC         56
+
+/* Control request for sending HID events.
+ *
+ *	requestType:    USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SEND_HID_EVENT
+ *	value:          Accessory assigned ID for the HID device
+ *	index:          0
+ *	data            the HID report for the event
+ */
+#define ACCESSORY_SEND_HID_EVENT         57
+
+/* Control request for setting the audio mode.
+ *
+ *	requestType:	USB_DIR_OUT | USB_TYPE_VENDOR
+ *	request:        ACCESSORY_SET_AUDIO_MODE
+ *	value:          0 - no audio
+ *                     1 - device to host, 44100 16-bit stereo PCM
+ *	index:          0
+ *	data            none
+ */
+#define ACCESSORY_SET_AUDIO_MODE         58
+
+/* ioctls for retrieving strings set by the host */
+#define ACCESSORY_GET_STRING_MANUFACTURER   _IOW('M', 1, char[256])
+#define ACCESSORY_GET_STRING_MODEL          _IOW('M', 2, char[256])
+#define ACCESSORY_GET_STRING_DESCRIPTION    _IOW('M', 3, char[256])
+#define ACCESSORY_GET_STRING_VERSION        _IOW('M', 4, char[256])
+#define ACCESSORY_GET_STRING_URI            _IOW('M', 5, char[256])
+#define ACCESSORY_GET_STRING_SERIAL         _IOW('M', 6, char[256])
+/* returns 1 if there is a start request pending */
+#define ACCESSORY_IS_START_REQUESTED        _IO('M', 7)
+/* returns audio mode (set via the ACCESSORY_SET_AUDIO_MODE control request) */
+#define ACCESSORY_GET_AUDIO_MODE            _IO('M', 8)
+
+#endif /* _UAPI_LINUX_USB_F_ACCESSORY_H */
diff --git a/include/uapi/linux/usb/f_mtp.h b/include/uapi/linux/usb/f_mtp.h
new file mode 100644
index 0000000..5032918
--- /dev/null
+++ b/include/uapi/linux/usb/f_mtp.h
@@ -0,0 +1,61 @@
+/*
+ * Gadget Function Driver for MTP
+ *
+ * Copyright (C) 2010 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_USB_F_MTP_H
+#define _UAPI_LINUX_USB_F_MTP_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct mtp_file_range {
+	/* file descriptor for file to transfer */
+	int			fd;
+	/* offset in file for start of transfer */
+	loff_t		offset;
+	/* number of bytes to transfer */
+	int64_t		length;
+	/* MTP command ID for data header,
+	 * used only for MTP_SEND_FILE_WITH_HEADER
+	 */
+	uint16_t	command;
+	/* MTP transaction ID for data header,
+	 * used only for MTP_SEND_FILE_WITH_HEADER
+	 */
+	uint32_t	transaction_id;
+};
+
+struct mtp_event {
+	/* size of the event */
+	size_t		length;
+	/* event data to send */
+	void		*data;
+};
+
+/* Sends the specified file range to the host */
+#define MTP_SEND_FILE              _IOW('M', 0, struct mtp_file_range)
+/* Receives data from the host and writes it to a file.
+ * The file is created if it does not exist.
+ */
+#define MTP_RECEIVE_FILE           _IOW('M', 1, struct mtp_file_range)
+/* Sends an event to the host via the interrupt endpoint */
+#define MTP_SEND_EVENT             _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER  _IOW('M', 4, struct mtp_file_range)
+
+#endif /* _UAPI_LINUX_USB_F_MTP_H */
diff --git a/include/uapi/scsi/ufs/Kbuild b/include/uapi/scsi/ufs/Kbuild
new file mode 100644
index 0000000..cc3ef20
--- /dev/null
+++ b/include/uapi/scsi/ufs/Kbuild
@@ -0,0 +1,3 @@
+# UAPI Header export list
+header-y += ioctl.h
+header-y += ufs.h
diff --git a/include/uapi/scsi/ufs/ioctl.h b/include/uapi/scsi/ufs/ioctl.h
new file mode 100644
index 0000000..56b2f46
--- /dev/null
+++ b/include/uapi/scsi/ufs/ioctl.h
@@ -0,0 +1,57 @@
+#ifndef UAPI_UFS_IOCTL_H_
+#define UAPI_UFS_IOCTL_H_
+
+#include <linux/types.h>
+
+/*
+ *  IOCTL opcode for ufs queries has the following opcode after
+ *  SCSI_IOCTL_GET_PCI
+ */
+#define UFS_IOCTL_QUERY			0x5388
+
+/**
+ * struct ufs_ioctl_query_data - used to transfer data to and from user via ioctl
+ * @opcode: type of data to query (descriptor/attribute/flag)
+ * @idn: id of the data structure
+ * @buf_size: number of allocated bytes/data size on return
+ * @buffer: data location
+ *
+ * Received: buffer and buf_size (available space for transfered data)
+ * Submitted: opcode, idn, length, buf_size
+ */
+struct ufs_ioctl_query_data {
+	/*
+	 * User should select one of the opcode defined in "enum query_opcode".
+	 * Please check include/uapi/scsi/ufs/ufs.h for the definition of it.
+	 * Note that only UPIU_QUERY_OPCODE_READ_DESC,
+	 * UPIU_QUERY_OPCODE_READ_ATTR & UPIU_QUERY_OPCODE_READ_FLAG are
+	 * supported as of now. All other query_opcode would be considered
+	 * invalid.
+	 * As of now only read query operations are supported.
+	 */
+	__u32 opcode;
+	/*
+	 * User should select one of the idn from "enum flag_idn" or "enum
+	 * attr_idn" or "enum desc_idn" based on whether opcode above is
+	 * attribute, flag or descriptor.
+	 * Please check include/uapi/scsi/ufs/ufs.h for the definition of it.
+	 */
+	__u8 idn;
+	/*
+	 * User should specify the size of the buffer (buffer[0] below) where
+	 * it wants to read the query data (attribute/flag/descriptor).
+	 * As we might end up reading less data then what is specified in
+	 * buf_size. So we are updating buf_size to what exactly we have read.
+	 */
+	__u16 buf_size;
+	/*
+	 * placeholder for the start of the data buffer where kernel will copy
+	 * the query data (attribute/flag/descriptor) read from the UFS device
+	 * Note:
+	 * For Read/Write Attribute you will have to allocate 4 bytes
+	 * For Read/Write Flag you will have to allocate 1 byte
+	 */
+	__u8 buffer[0];
+};
+
+#endif /* UAPI_UFS_IOCTL_H_ */
diff --git a/include/uapi/scsi/ufs/ufs.h b/include/uapi/scsi/ufs/ufs.h
new file mode 100644
index 0000000..cd82b76
--- /dev/null
+++ b/include/uapi/scsi/ufs/ufs.h
@@ -0,0 +1,71 @@
+#ifndef UAPI_UFS_H_
+#define UAPI_UFS_H_
+
+#define MAX_QUERY_IDN	0x12
+
+/* Flag idn for Query Requests*/
+enum flag_idn {
+	QUERY_FLAG_IDN_FDEVICEINIT		= 0x01,
+	QUERY_FLAG_IDN_PERMANENT_WPE		= 0x02,
+	QUERY_FLAG_IDN_PWR_ON_WPE		= 0x03,
+	QUERY_FLAG_IDN_BKOPS_EN			= 0x04,
+	QUERY_FLAG_IDN_RESERVED1		= 0x05,
+	QUERY_FLAG_IDN_PURGE_ENABLE		= 0x06,
+	QUERY_FLAG_IDN_RESERVED2		= 0x07,
+	QUERY_FLAG_IDN_FPHYRESOURCEREMOVAL      = 0x08,
+	QUERY_FLAG_IDN_BUSY_RTC			= 0x09,
+};
+
+/* Attribute idn for Query requests */
+enum attr_idn {
+	QUERY_ATTR_IDN_BOOT_LU_EN		= 0x00,
+	QUERY_ATTR_IDN_RESERVED			= 0x01,
+	QUERY_ATTR_IDN_POWER_MODE		= 0x02,
+	QUERY_ATTR_IDN_ACTIVE_ICC_LVL		= 0x03,
+	QUERY_ATTR_IDN_OOO_DATA_EN		= 0x04,
+	QUERY_ATTR_IDN_BKOPS_STATUS		= 0x05,
+	QUERY_ATTR_IDN_PURGE_STATUS		= 0x06,
+	QUERY_ATTR_IDN_MAX_DATA_IN		= 0x07,
+	QUERY_ATTR_IDN_MAX_DATA_OUT		= 0x08,
+	QUERY_ATTR_IDN_DYN_CAP_NEEDED		= 0x09,
+	QUERY_ATTR_IDN_REF_CLK_FREQ		= 0x0A,
+	QUERY_ATTR_IDN_CONF_DESC_LOCK		= 0x0B,
+	QUERY_ATTR_IDN_MAX_NUM_OF_RTT		= 0x0C,
+	QUERY_ATTR_IDN_EE_CONTROL		= 0x0D,
+	QUERY_ATTR_IDN_EE_STATUS		= 0x0E,
+	QUERY_ATTR_IDN_SECONDS_PASSED		= 0x0F,
+	QUERY_ATTR_IDN_CNTX_CONF		= 0x10,
+	QUERY_ATTR_IDN_CORR_PRG_BLK_NUM		= 0x11,
+};
+
+#define QUERY_ATTR_IDN_BOOT_LU_EN_MAX	0x02
+
+/* Descriptor idn for Query requests */
+enum desc_idn {
+	QUERY_DESC_IDN_DEVICE		= 0x0,
+	QUERY_DESC_IDN_CONFIGURAION	= 0x1,
+	QUERY_DESC_IDN_UNIT		= 0x2,
+	QUERY_DESC_IDN_RFU_0		= 0x3,
+	QUERY_DESC_IDN_INTERCONNECT	= 0x4,
+	QUERY_DESC_IDN_STRING		= 0x5,
+	QUERY_DESC_IDN_RFU_1		= 0x6,
+	QUERY_DESC_IDN_GEOMETRY		= 0x7,
+	QUERY_DESC_IDN_POWER		= 0x8,
+	QUERY_DESC_IDN_RFU_2		= 0x9,
+	QUERY_DESC_IDN_MAX,
+};
+
+/* UTP QUERY Transaction Specific Fields OpCode */
+enum query_opcode {
+	UPIU_QUERY_OPCODE_NOP		= 0x0,
+	UPIU_QUERY_OPCODE_READ_DESC	= 0x1,
+	UPIU_QUERY_OPCODE_WRITE_DESC	= 0x2,
+	UPIU_QUERY_OPCODE_READ_ATTR	= 0x3,
+	UPIU_QUERY_OPCODE_WRITE_ATTR	= 0x4,
+	UPIU_QUERY_OPCODE_READ_FLAG	= 0x5,
+	UPIU_QUERY_OPCODE_SET_FLAG	= 0x6,
+	UPIU_QUERY_OPCODE_CLEAR_FLAG	= 0x7,
+	UPIU_QUERY_OPCODE_TOGGLE_FLAG	= 0x8,
+	UPIU_QUERY_OPCODE_MAX,
+};
+#endif /* UAPI_UFS_H_ */
diff --git a/include/uapi/video/adf.h b/include/uapi/video/adf.h
new file mode 100644
index 0000000..c5d2e62
--- /dev/null
+++ b/include/uapi/video/adf.h
@@ -0,0 +1,321 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_VIDEO_ADF_H_
+#define _UAPI_VIDEO_ADF_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_mode.h>
+
+#define ADF_NAME_LEN 32
+#define ADF_MAX_CUSTOM_DATA_SIZE 4096
+
+enum adf_interface_type {
+	ADF_INTF_DSI = 0,
+	ADF_INTF_eDP = 1,
+	ADF_INTF_DPI = 2,
+	ADF_INTF_VGA = 3,
+	ADF_INTF_DVI = 4,
+	ADF_INTF_HDMI = 5,
+	ADF_INTF_MEMORY = 6,
+	ADF_INTF_TYPE_DEVICE_CUSTOM = 128,
+	ADF_INTF_TYPE_MAX = (~(__u32)0),
+};
+
+#define ADF_INTF_FLAG_PRIMARY (1 << 0)
+#define ADF_INTF_FLAG_EXTERNAL (1 << 1)
+
+enum adf_event_type {
+	ADF_EVENT_VSYNC = 0,
+	ADF_EVENT_HOTPLUG = 1,
+	ADF_EVENT_DEVICE_CUSTOM = 128,
+	ADF_EVENT_TYPE_MAX = 255,
+};
+
+/**
+ * struct adf_set_event - start or stop subscribing to ADF events
+ *
+ * @type: the type of event to (un)subscribe
+ * @enabled: subscribe or unsubscribe
+ *
+ * After subscribing to an event, userspace may poll() the ADF object's fd
+ * to wait for events or read() to consume the event's data.
+ *
+ * ADF reserves event types 0 to %ADF_EVENT_DEVICE_CUSTOM-1 for its own events.
+ * Devices may use event types %ADF_EVENT_DEVICE_CUSTOM to %ADF_EVENT_TYPE_MAX-1
+ * for driver-private events.
+ */
+struct adf_set_event {
+	__u8 type;
+	__u8 enabled;
+};
+
+/**
+ * struct adf_event - common header for ADF event data
+ *
+ * @type: event type
+ * @length: total size of event data, header inclusive
+ */
+struct adf_event {
+	__u8 type;
+	__u32 length;
+};
+
+/**
+ * struct adf_vsync_event - ADF vsync event
+ *
+ * @base: event header (see &struct adf_event)
+ * @timestamp: time of vsync event, in nanoseconds
+ */
+struct adf_vsync_event {
+	struct adf_event base;
+	__aligned_u64 timestamp;
+};
+
+/**
+ * struct adf_vsync_event - ADF display hotplug event
+ *
+ * @base: event header (see &struct adf_event)
+ * @connected: whether a display is now connected to the interface
+ */
+struct adf_hotplug_event {
+	struct adf_event base;
+	__u8 connected;
+};
+
+#define ADF_MAX_PLANES 4
+/**
+ * struct adf_buffer_config - description of buffer displayed by adf_post_config
+ *
+ * @overlay_engine: id of the target overlay engine
+ * @w: width of display region in pixels
+ * @h: height of display region in pixels
+ * @format: DRM-style fourcc, see drm_fourcc.h for standard formats
+ * @fd: dma_buf fd for each plane
+ * @offset: location of first pixel to scan out, in bytes
+ * @pitch: stride (i.e. length of a scanline including padding) in bytes
+ * @n_planes: number of planes in buffer
+ * @acquire_fence: sync_fence fd which will clear when the buffer is
+ *	ready for display, or <0 if the buffer is already ready
+ */
+struct adf_buffer_config {
+	__u32 overlay_engine;
+
+	__u32 w;
+	__u32 h;
+	__u32 format;
+
+	__s32 fd[ADF_MAX_PLANES];
+	__u32 offset[ADF_MAX_PLANES];
+	__u32 pitch[ADF_MAX_PLANES];
+	__u8 n_planes;
+
+	__s32 acquire_fence;
+};
+#define ADF_MAX_BUFFERS (4096 / sizeof(struct adf_buffer_config))
+
+/**
+ * struct adf_post_config - request to flip to a new set of buffers
+ *
+ * @n_interfaces: number of interfaces targeted by the flip (input)
+ * @interfaces: ids of interfaces targeted by the flip (input)
+ * @n_bufs: number of buffers displayed (input)
+ * @bufs: description of buffers displayed (input)
+ * @custom_data_size: size of driver-private data (input)
+ * @custom_data: driver-private data (input)
+ * @complete_fence: sync_fence fd which will clear when this
+ *	configuration has left the screen (output)
+ */
+struct adf_post_config {
+	size_t n_interfaces;
+	__u32 __user *interfaces;
+
+	size_t n_bufs;
+	struct adf_buffer_config __user *bufs;
+
+	size_t custom_data_size;
+	void __user *custom_data;
+
+	__s32 complete_fence;
+};
+#define ADF_MAX_INTERFACES (4096 / sizeof(__u32))
+
+/**
+ * struct adf_simple_buffer_allocate - request to allocate a "simple" buffer
+ *
+ * @w: width of buffer in pixels (input)
+ * @h: height of buffer in pixels (input)
+ * @format: DRM-style fourcc (input)
+ *
+ * @fd: dma_buf fd (output)
+ * @offset: location of first pixel, in bytes (output)
+ * @pitch: length of a scanline including padding, in bytes (output)
+ *
+ * Simple buffers are analogous to DRM's "dumb" buffers.  They have a single
+ * plane of linear RGB data which can be allocated and scanned out without
+ * any driver-private ioctls or data.
+ *
+ * @format must be a standard RGB format defined in drm_fourcc.h.
+ *
+ * ADF clients must NOT assume that an interface can scan out a simple buffer
+ * allocated by a different ADF interface, even if the two interfaces belong to
+ * the same ADF device.
+ */
+struct adf_simple_buffer_alloc {
+	__u16 w;
+	__u16 h;
+	__u32 format;
+
+	__s32 fd;
+	__u32 offset;
+	__u32 pitch;
+};
+
+/**
+ * struct adf_simple_post_config - request to flip to a single buffer without
+ * driver-private data
+ *
+ * @buf: description of buffer displayed (input)
+ * @complete_fence: sync_fence fd which will clear when this buffer has left the
+ * screen (output)
+ */
+struct adf_simple_post_config {
+	struct adf_buffer_config buf;
+	__s32 complete_fence;
+};
+
+/**
+ * struct adf_attachment_config - description of attachment between an overlay
+ * engine and an interface
+ *
+ * @overlay_engine: id of the overlay engine
+ * @interface: id of the interface
+ */
+struct adf_attachment_config {
+	__u32 overlay_engine;
+	__u32 interface;
+};
+
+/**
+ * struct adf_device_data - describes a display device
+ *
+ * @name: display device's name
+ * @n_attachments: the number of current attachments
+ * @attachments: list of current attachments
+ * @n_allowed_attachments: the number of allowed attachments
+ * @allowed_attachments: list of allowed attachments
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_device_data {
+	char name[ADF_NAME_LEN];
+
+	size_t n_attachments;
+	struct adf_attachment_config __user *attachments;
+
+	size_t n_allowed_attachments;
+	struct adf_attachment_config __user *allowed_attachments;
+
+	size_t custom_data_size;
+	void __user *custom_data;
+};
+#define ADF_MAX_ATTACHMENTS (4096 / sizeof(struct adf_attachment_config))
+
+/**
+ * struct adf_device_data - describes a display interface
+ *
+ * @name: display interface's name
+ * @type: interface type (see enum @adf_interface_type)
+ * @id: which interface of type @type;
+ *	e.g. interface DSI.1 -> @type=@ADF_INTF_TYPE_DSI, @id=1
+ * @flags: informational flags (bitmask of %ADF_INTF_FLAG_* values)
+ * @dpms_state: DPMS state (one of @DRM_MODE_DPMS_* defined in drm_mode.h)
+ * @hotplug_detect: whether a display is plugged in
+ * @width_mm: screen width in millimeters, or 0 if unknown
+ * @height_mm: screen height in millimeters, or 0 if unknown
+ * @current_mode: current display mode
+ * @n_available_modes: the number of hardware display modes
+ * @available_modes: list of hardware display modes
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_interface_data {
+	char name[ADF_NAME_LEN];
+
+	__u32 type;
+	__u32 id;
+	/* e.g. type=ADF_INTF_TYPE_DSI, id=1 => DSI.1 */
+	__u32 flags;
+
+	__u8 dpms_state;
+	__u8 hotplug_detect;
+	__u16 width_mm;
+	__u16 height_mm;
+
+	struct drm_mode_modeinfo current_mode;
+	size_t n_available_modes;
+	struct drm_mode_modeinfo __user *available_modes;
+
+	size_t custom_data_size;
+	void __user *custom_data;
+};
+#define ADF_MAX_MODES (4096 / sizeof(struct drm_mode_modeinfo))
+
+/**
+ * struct adf_overlay_engine_data - describes an overlay engine
+ *
+ * @name: overlay engine's name
+ * @n_supported_formats: number of supported formats
+ * @supported_formats: list of supported formats
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ */
+struct adf_overlay_engine_data {
+	char name[ADF_NAME_LEN];
+
+	size_t n_supported_formats;
+	__u32 __user *supported_formats;
+
+	size_t custom_data_size;
+	void __user *custom_data;
+};
+#define ADF_MAX_SUPPORTED_FORMATS (4096 / sizeof(__u32))
+
+#define ADF_IOCTL_TYPE		'D'
+#define ADF_IOCTL_NR_CUSTOM	128
+
+#define ADF_SET_EVENT		_IOW(ADF_IOCTL_TYPE, 0, struct adf_set_event)
+#define ADF_BLANK		_IOW(ADF_IOCTL_TYPE, 1, __u8)
+#define ADF_POST_CONFIG		_IOW(ADF_IOCTL_TYPE, 2, struct adf_post_config)
+#define ADF_SET_MODE		_IOW(ADF_IOCTL_TYPE, 3, \
+					struct drm_mode_modeinfo)
+#define ADF_GET_DEVICE_DATA	_IOR(ADF_IOCTL_TYPE, 4, struct adf_device_data)
+#define ADF_GET_INTERFACE_DATA	_IOR(ADF_IOCTL_TYPE, 5, \
+					struct adf_interface_data)
+#define ADF_GET_OVERLAY_ENGINE_DATA \
+				_IOR(ADF_IOCTL_TYPE, 6, \
+					struct adf_overlay_engine_data)
+#define ADF_SIMPLE_POST_CONFIG	_IOW(ADF_IOCTL_TYPE, 7, \
+					struct adf_simple_post_config)
+#define ADF_SIMPLE_BUFFER_ALLOC	_IOW(ADF_IOCTL_TYPE, 8, \
+					struct adf_simple_buffer_alloc)
+#define ADF_ATTACH		_IOW(ADF_IOCTL_TYPE, 9, \
+					struct adf_attachment_config)
+#define ADF_DETACH		_IOW(ADF_IOCTL_TYPE, 10, \
+					struct adf_attachment_config)
+
+#endif /* _UAPI_VIDEO_ADF_H_ */
diff --git a/include/video/adf.h b/include/video/adf.h
new file mode 100644
index 0000000..34f10e5
--- /dev/null
+++ b/include/video/adf.h
@@ -0,0 +1,502 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_H
+#define _VIDEO_ADF_H
+
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/idr.h>
+#include <linux/kref.h>
+#include <linux/kthread.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <uapi/video/adf.h>
+#include "sync.h"
+
+struct adf_obj;
+struct adf_obj_ops;
+struct adf_device;
+struct adf_device_ops;
+struct adf_interface;
+struct adf_interface_ops;
+struct adf_overlay_engine;
+struct adf_overlay_engine_ops;
+
+/**
+ * struct adf_buffer - buffer displayed by adf_post
+ *
+ * @overlay_engine: target overlay engine
+ * @w: width of display region in pixels
+ * @h: height of display region in pixels
+ * @format: DRM-style fourcc, see drm_fourcc.h for standard formats
+ * @dma_bufs: dma_buf for each plane
+ * @offset: location of first pixel to scan out, in bytes
+ * @pitch: length of a scanline including padding, in bytes
+ * @n_planes: number of planes in buffer
+ * @acquire_fence: sync_fence which will clear when the buffer is
+ *	ready for display
+ *
+ * &struct adf_buffer is the in-kernel counterpart to the userspace-facing
+ * &struct adf_buffer_config.
+ */
+struct adf_buffer {
+	struct adf_overlay_engine *overlay_engine;
+
+	u32 w;
+	u32 h;
+	u32 format;
+
+	struct dma_buf *dma_bufs[ADF_MAX_PLANES];
+	u32 offset[ADF_MAX_PLANES];
+	u32 pitch[ADF_MAX_PLANES];
+	u8 n_planes;
+
+	struct sync_fence *acquire_fence;
+};
+
+/**
+ * struct adf_buffer_mapping - state for mapping a &struct adf_buffer into the
+ * display device
+ *
+ * @attachments: dma-buf attachment for each plane
+ * @sg_tables: SG tables for each plane
+ */
+struct adf_buffer_mapping {
+	struct dma_buf_attachment *attachments[ADF_MAX_PLANES];
+	struct sg_table *sg_tables[ADF_MAX_PLANES];
+};
+
+/**
+ * struct adf_post - request to flip to a new set of buffers
+ *
+ * @n_bufs: number of buffers displayed
+ * @bufs: buffers displayed
+ * @mappings: in-device mapping state for each buffer
+ * @custom_data_size: size of driver-private data
+ * @custom_data: driver-private data
+ *
+ * &struct adf_post is the in-kernel counterpart to the userspace-facing
+ * &struct adf_post_config.
+ */
+struct adf_post {
+	size_t n_bufs;
+	struct adf_buffer *bufs;
+	struct adf_buffer_mapping *mappings;
+
+	size_t custom_data_size;
+	void *custom_data;
+};
+
+/**
+ * struct adf_attachment - description of attachment between an overlay engine
+ * and an interface
+ *
+ * @overlay_engine: the overlay engine
+ * @interface: the interface
+ *
+ * &struct adf_attachment is the in-kernel counterpart to the userspace-facing
+ * &struct adf_attachment_config.
+ */
+struct adf_attachment {
+	struct adf_overlay_engine *overlay_engine;
+	struct adf_interface *interface;
+};
+
+struct adf_pending_post {
+	struct list_head head;
+	struct adf_post config;
+	void *state;
+};
+
+enum adf_obj_type {
+	ADF_OBJ_OVERLAY_ENGINE = 0,
+	ADF_OBJ_INTERFACE = 1,
+	ADF_OBJ_DEVICE = 2,
+};
+
+/**
+ * struct adf_obj_ops - common ADF object implementation ops
+ *
+ * @open: handle opening the object's device node
+ * @release: handle releasing an open file
+ * @ioctl: handle custom ioctls
+ *
+ * @supports_event: return whether the object supports generating events of type
+ *	@type
+ * @set_event: enable or disable events of type @type
+ * @event_type_str: return a string representation of custom event @type
+ *	(@type >= %ADF_EVENT_DEVICE_CUSTOM).
+ *
+ * @custom_data: copy up to %ADF_MAX_CUSTOM_DATA_SIZE bytes of driver-private
+ *	data into @data (allocated by ADF) and return the number of copied bytes
+ *	in @size.  Return 0 on success or an error code (<0) on failure.
+ */
+struct adf_obj_ops {
+	/* optional */
+	int (*open)(struct adf_obj *obj, struct inode *inode,
+			struct file *file);
+	/* optional */
+	void (*release)(struct adf_obj *obj, struct inode *inode,
+			struct file *file);
+	/* optional */
+	long (*ioctl)(struct adf_obj *obj, unsigned int cmd, unsigned long arg);
+
+	/* optional */
+	bool (*supports_event)(struct adf_obj *obj, enum adf_event_type type);
+	/* required if supports_event is implemented */
+	void (*set_event)(struct adf_obj *obj, enum adf_event_type type,
+			bool enabled);
+	/* optional */
+	const char *(*event_type_str)(struct adf_obj *obj,
+			enum adf_event_type type);
+
+	/* optional */
+	int (*custom_data)(struct adf_obj *obj, void *data, size_t *size);
+};
+
+struct adf_obj {
+	enum adf_obj_type type;
+	char name[ADF_NAME_LEN];
+	struct adf_device *parent;
+
+	const struct adf_obj_ops *ops;
+
+	struct device dev;
+
+	struct spinlock file_lock;
+	struct list_head file_list;
+
+	struct mutex event_lock;
+	struct rb_root event_refcount;
+
+	int id;
+	int minor;
+};
+
+/**
+ * struct adf_device_quirks - common display device quirks
+ *
+ * @buffer_padding: whether the last scanline of a buffer extends to the
+ * 	buffer's pitch (@ADF_BUFFER_PADDED_TO_PITCH) or just to the visible
+ * 	width (@ADF_BUFFER_UNPADDED)
+ */
+struct adf_device_quirks {
+	/* optional, defaults to ADF_BUFFER_PADDED_TO_PITCH */
+	enum {
+		ADF_BUFFER_PADDED_TO_PITCH = 0,
+		ADF_BUFFER_UNPADDED = 1,
+	} buffer_padding;
+};
+
+/**
+ * struct adf_device_ops - display device implementation ops
+ *
+ * @owner: device's module
+ * @base: common operations (see &struct adf_obj_ops)
+ * @quirks: device's quirks (see &struct adf_device_quirks)
+ *
+ * @attach: attach overlay engine @eng to interface @intf.  Return 0 on success
+ *	or error code (<0) on failure.
+ * @detach: detach overlay engine @eng from interface @intf.  Return 0 on
+ *	success or error code (<0) on failure.
+ *
+ * @validate_custom_format: validate the number and size of planes
+ *	in buffers with a custom format (i.e., not one of the @DRM_FORMAT_*
+ *	types defined in drm/drm_fourcc.h).  Return 0 if the buffer is valid or
+ *	an error code (<0) otherwise.
+ *
+ * @validate: validate that the proposed configuration @cfg is legal.  The
+ *	driver may optionally allocate and return some driver-private state in
+ *	@driver_state, which will be passed to the corresponding post().  The
+ *	driver may NOT commit any changes to hardware.  Return 0 if @cfg is
+ *	valid or an error code (<0) otherwise.
+ * @complete_fence: create a hardware-backed sync fence to be signaled when
+ *	@cfg is removed from the screen.  If unimplemented, ADF automatically
+ *	creates an sw_sync fence.  Return the sync fence on success or a
+ *	PTR_ERR() on failure.
+ * @post: flip @cfg onto the screen.  Wait for the display to begin scanning out
+ *	@cfg before returning.
+ * @advance_timeline: signal the sync fence for the last configuration to leave
+ *	the display.  If unimplemented, ADF automatically advances an sw_sync
+ *	timeline.
+ * @state_free: free driver-private state allocated during validate()
+ */
+struct adf_device_ops {
+	/* required */
+	struct module *owner;
+	const struct adf_obj_ops base;
+	/* optional */
+	const struct adf_device_quirks quirks;
+
+	/* optional */
+	int (*attach)(struct adf_device *dev, struct adf_overlay_engine *eng,
+			struct adf_interface *intf);
+	/* optional */
+	int (*detach)(struct adf_device *dev, struct adf_overlay_engine *eng,
+			struct adf_interface *intf);
+
+	/* required if any of the device's overlay engines supports at least one
+	   custom format */
+	int (*validate_custom_format)(struct adf_device *dev,
+			struct adf_buffer *buf);
+
+	/* required */
+	int (*validate)(struct adf_device *dev, struct adf_post *cfg,
+			void **driver_state);
+	/* optional */
+	struct sync_fence *(*complete_fence)(struct adf_device *dev,
+			struct adf_post *cfg, void *driver_state);
+	/* required */
+	void (*post)(struct adf_device *dev, struct adf_post *cfg,
+			void *driver_state);
+	/* required if complete_fence is implemented */
+	void (*advance_timeline)(struct adf_device *dev,
+			struct adf_post *cfg, void *driver_state);
+	/* required if validate allocates driver state */
+	void (*state_free)(struct adf_device *dev, void *driver_state);
+};
+
+struct adf_attachment_list {
+	struct adf_attachment attachment;
+	struct list_head head;
+};
+
+struct adf_device {
+	struct adf_obj base;
+	struct device *dev;
+
+	const struct adf_device_ops *ops;
+
+	struct mutex client_lock;
+
+	struct idr interfaces;
+	size_t n_interfaces;
+	struct idr overlay_engines;
+
+	struct list_head post_list;
+	struct mutex post_lock;
+	struct kthread_worker post_worker;
+	struct task_struct *post_thread;
+	struct kthread_work post_work;
+
+	struct list_head attached;
+	size_t n_attached;
+	struct list_head attach_allowed;
+	size_t n_attach_allowed;
+
+	struct adf_pending_post *onscreen;
+
+	struct sw_sync_timeline *timeline;
+	int timeline_max;
+};
+
+/**
+ * struct adf_interface_ops - display interface implementation ops
+ *
+ * @base: common operations (see &struct adf_obj_ops)
+ *
+ * @blank: change the display's DPMS state.  Return 0 on success or error
+ *	code (<0) on failure.
+ *
+ * @alloc_simple_buffer: allocate a buffer with the specified @w, @h, and
+ *	@format.  @format will be a standard RGB format (i.e.,
+ *	adf_format_is_rgb(@format) == true).  Return 0 on success or error code
+ *	(<0) on failure.  On success, return the buffer, offset, and pitch in
+ *	@dma_buf, @offset, and @pitch respectively.
+ * @describe_simple_post: provide driver-private data needed to post a single
+ *	buffer @buf.  Copy up to ADF_MAX_CUSTOM_DATA_SIZE bytes into @data
+ *	(allocated by ADF) and return the number of bytes in @size.  Return 0 on
+ *	success or error code (<0) on failure.
+ *
+ * @modeset: change the interface's mode.  @mode is not necessarily part of the
+ *	modelist passed to adf_hotplug_notify_connected(); the driver may
+ *	accept or reject custom modes at its discretion.  Return 0 on success or
+ *	error code (<0) if the mode could not be set.
+ *
+ * @screen_size: copy the screen dimensions in millimeters into @width_mm
+ *	and @height_mm.  Return 0 on success or error code (<0) if the display
+ *	dimensions are unknown.
+ *
+ * @type_str: return a string representation of custom @intf->type
+ *	(@intf->type >= @ADF_INTF_TYPE_DEVICE_CUSTOM).
+ */
+struct adf_interface_ops {
+	const struct adf_obj_ops base;
+
+	/* optional */
+	int (*blank)(struct adf_interface *intf, u8 state);
+
+	/* optional */
+	int (*alloc_simple_buffer)(struct adf_interface *intf,
+			u16 w, u16 h, u32 format,
+			struct dma_buf **dma_buf, u32 *offset, u32 *pitch);
+	/* optional */
+	int (*describe_simple_post)(struct adf_interface *intf,
+			struct adf_buffer *fb, void *data, size_t *size);
+
+	/* optional */
+	int (*modeset)(struct adf_interface *intf,
+			struct drm_mode_modeinfo *mode);
+
+	/* optional */
+	int (*screen_size)(struct adf_interface *intf, u16 *width_mm,
+			u16 *height_mm);
+
+	/* optional */
+	const char *(*type_str)(struct adf_interface *intf);
+};
+
+struct adf_interface {
+	struct adf_obj base;
+	const struct adf_interface_ops *ops;
+
+	struct drm_mode_modeinfo current_mode;
+
+	enum adf_interface_type type;
+	u32 idx;
+	u32 flags;
+
+	wait_queue_head_t vsync_wait;
+	ktime_t vsync_timestamp;
+	rwlock_t vsync_lock;
+
+	u8 dpms_state;
+
+	bool hotplug_detect;
+	struct drm_mode_modeinfo *modelist;
+	size_t n_modes;
+	rwlock_t hotplug_modelist_lock;
+};
+
+/**
+ * struct adf_interface_ops - overlay engine implementation ops
+ *
+ * @base: common operations (see &struct adf_obj_ops)
+ *
+ * @supported_formats: list of fourccs the overlay engine can scan out
+ * @n_supported_formats: length of supported_formats, up to
+ *	ADF_MAX_SUPPORTED_FORMATS
+ */
+struct adf_overlay_engine_ops {
+	const struct adf_obj_ops base;
+
+	/* required */
+	const u32 *supported_formats;
+	/* required */
+	const size_t n_supported_formats;
+};
+
+struct adf_overlay_engine {
+	struct adf_obj base;
+
+	const struct adf_overlay_engine_ops *ops;
+};
+
+#define adf_obj_to_device(ptr) \
+	container_of((ptr), struct adf_device, base)
+
+#define adf_obj_to_interface(ptr) \
+	container_of((ptr), struct adf_interface, base)
+
+#define adf_obj_to_overlay_engine(ptr) \
+	container_of((ptr), struct adf_overlay_engine, base)
+
+int __printf(4, 5) adf_device_init(struct adf_device *dev,
+		struct device *parent, const struct adf_device_ops *ops,
+		const char *fmt, ...);
+void adf_device_destroy(struct adf_device *dev);
+int __printf(7, 8) adf_interface_init(struct adf_interface *intf,
+		struct adf_device *dev, enum adf_interface_type type, u32 idx,
+		u32 flags, const struct adf_interface_ops *ops, const char *fmt,
+		...);
+void adf_interface_destroy(struct adf_interface *intf);
+static inline struct adf_device *adf_interface_parent(
+		struct adf_interface *intf)
+{
+	return intf->base.parent;
+}
+int __printf(4, 5) adf_overlay_engine_init(struct adf_overlay_engine *eng,
+		struct adf_device *dev,
+		const struct adf_overlay_engine_ops *ops, const char *fmt, ...);
+void adf_overlay_engine_destroy(struct adf_overlay_engine *eng);
+static inline struct adf_device *adf_overlay_engine_parent(
+		struct adf_overlay_engine *eng)
+{
+	return eng->base.parent;
+}
+
+int adf_attachment_allow(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf);
+
+const char *adf_obj_type_str(enum adf_obj_type type);
+const char *adf_interface_type_str(struct adf_interface *intf);
+const char *adf_event_type_str(struct adf_obj *obj, enum adf_event_type type);
+
+#define ADF_FORMAT_STR_SIZE 5
+void adf_format_str(u32 format, char buf[ADF_FORMAT_STR_SIZE]);
+int adf_format_validate_yuv(struct adf_device *dev, struct adf_buffer *buf,
+		u8 num_planes, u8 hsub, u8 vsub, u8 cpp[]);
+/**
+ * adf_format_validate_rgb - validate the number and size of planes in buffers
+ * with a custom RGB format.
+ *
+ * @dev: ADF device performing the validation
+ * @buf: buffer to validate
+ * @cpp: expected bytes per pixel
+ *
+ * adf_format_validate_rgb() is intended to be called as a helper from @dev's
+ * validate_custom_format() op.  @buf must have a single RGB plane.
+ *
+ * Returns 0 if @buf has a single plane with sufficient size, or -EINVAL
+ * otherwise.
+ */
+static inline int adf_format_validate_rgb(struct adf_device *dev,
+		struct adf_buffer *buf, u8 cpp)
+{
+	return adf_format_validate_yuv(dev, buf, 1, 1, 1, &cpp);
+}
+
+int adf_event_get(struct adf_obj *obj, enum adf_event_type type);
+int adf_event_put(struct adf_obj *obj, enum adf_event_type type);
+int adf_event_notify(struct adf_obj *obj, struct adf_event *event);
+
+static inline void adf_vsync_get(struct adf_interface *intf)
+{
+	adf_event_get(&intf->base, ADF_EVENT_VSYNC);
+}
+
+static inline void adf_vsync_put(struct adf_interface *intf)
+{
+	adf_event_put(&intf->base, ADF_EVENT_VSYNC);
+}
+
+int adf_vsync_wait(struct adf_interface *intf, long timeout);
+void adf_vsync_notify(struct adf_interface *intf, ktime_t timestamp);
+
+int adf_hotplug_notify_connected(struct adf_interface *intf,
+		struct drm_mode_modeinfo *modelist, size_t n_modes);
+void adf_hotplug_notify_disconnected(struct adf_interface *intf);
+
+void adf_modeinfo_set_name(struct drm_mode_modeinfo *mode);
+void adf_modeinfo_set_vrefresh(struct drm_mode_modeinfo *mode);
+
+#endif /* _VIDEO_ADF_H */
diff --git a/include/video/adf_client.h b/include/video/adf_client.h
new file mode 100644
index 0000000..983f2b6
--- /dev/null
+++ b/include/video/adf_client.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_CLIENT_H_
+#define _VIDEO_ADF_CLIENT_H_
+
+#include <video/adf.h>
+
+int adf_interface_blank(struct adf_interface *intf, u8 state);
+u8 adf_interface_dpms_state(struct adf_interface *intf);
+
+void adf_interface_current_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo *mode);
+size_t adf_interface_modelist(struct adf_interface *intf,
+		struct drm_mode_modeinfo *modelist, size_t n_modes);
+int adf_interface_set_mode(struct adf_interface *intf,
+		struct drm_mode_modeinfo *mode);
+int adf_interface_get_screen_size(struct adf_interface *intf, u16 *width,
+		u16 *height);
+int adf_interface_simple_buffer_alloc(struct adf_interface *intf, u16 w, u16 h,
+		u32 format, struct dma_buf **dma_buf, u32 *offset, u32 *pitch);
+struct sync_fence *adf_interface_simple_post(struct adf_interface *intf,
+		struct adf_buffer *buf);
+
+bool adf_overlay_engine_supports_format(struct adf_overlay_engine *eng,
+		u32 format);
+
+size_t adf_device_attachments(struct adf_device *dev,
+		struct adf_attachment *attachments, size_t n_attachments);
+size_t adf_device_attachments_allowed(struct adf_device *dev,
+		struct adf_attachment *attachments, size_t n_attachments);
+bool adf_device_attached(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf);
+bool adf_device_attach_allowed(struct adf_device *dev,
+		struct adf_overlay_engine *eng, struct adf_interface *intf);
+int adf_device_attach(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf);
+int adf_device_detach(struct adf_device *dev, struct adf_overlay_engine *eng,
+		struct adf_interface *intf);
+
+struct sync_fence *adf_device_post(struct adf_device *dev,
+		struct adf_interface **intfs, size_t n_intfs,
+		struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+		size_t custom_data_size);
+struct sync_fence *adf_device_post_nocopy(struct adf_device *dev,
+		struct adf_interface **intfs, size_t n_intfs,
+		struct adf_buffer *bufs, size_t n_bufs, void *custom_data,
+		size_t custom_data_size);
+
+#endif /* _VIDEO_ADF_CLIENT_H_ */
diff --git a/include/video/adf_fbdev.h b/include/video/adf_fbdev.h
new file mode 100644
index 0000000..b722c6b
--- /dev/null
+++ b/include/video/adf_fbdev.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_FBDEV_H_
+#define _VIDEO_ADF_FBDEV_H_
+
+#include <linux/fb.h>
+#include <linux/mutex.h>
+#include <video/adf.h>
+
+struct adf_fbdev {
+	struct adf_interface *intf;
+	struct adf_overlay_engine *eng;
+	struct fb_info *info;
+	u32 pseudo_palette[16];
+
+	unsigned int refcount;
+	struct mutex refcount_lock;
+
+	struct dma_buf *dma_buf;
+	u32 offset;
+	u32 pitch;
+	void *vaddr;
+	u32 format;
+
+	u16 default_xres_virtual;
+	u16 default_yres_virtual;
+	u32 default_format;
+};
+
+#if IS_ENABLED(CONFIG_ADF_FBDEV)
+void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+		struct fb_videomode *vmode);
+void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+		struct drm_mode_modeinfo *mode);
+
+int adf_fbdev_init(struct adf_fbdev *fbdev, struct adf_interface *interface,
+		struct adf_overlay_engine *eng,
+		u16 xres_virtual, u16 yres_virtual, u32 format,
+		struct fb_ops *fbops, const char *fmt, ...);
+void adf_fbdev_destroy(struct adf_fbdev *fbdev);
+
+int adf_fbdev_open(struct fb_info *info, int user);
+int adf_fbdev_release(struct fb_info *info, int user);
+int adf_fbdev_check_var(struct fb_var_screeninfo *var, struct fb_info *info);
+int adf_fbdev_set_par(struct fb_info *info);
+int adf_fbdev_blank(int blank, struct fb_info *info);
+int adf_fbdev_pan_display(struct fb_var_screeninfo *var, struct fb_info *info);
+int adf_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma);
+#else
+static inline void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
+		struct fb_videomode *vmode)
+{
+	WARN_ONCE(1, "%s: CONFIG_ADF_FBDEV is disabled\n", __func__);
+}
+
+static inline void adf_modeinfo_from_fb_videomode(const struct fb_videomode *vmode,
+		struct drm_mode_modeinfo *mode)
+{
+	WARN_ONCE(1, "%s: CONFIG_ADF_FBDEV is disabled\n", __func__);
+}
+
+static inline int adf_fbdev_init(struct adf_fbdev *fbdev,
+		struct adf_interface *interface,
+		struct adf_overlay_engine *eng,
+		u16 xres_virtual, u16 yres_virtual, u32 format,
+		struct fb_ops *fbops, const char *fmt, ...)
+{
+	return -ENODEV;
+}
+
+static inline void adf_fbdev_destroy(struct adf_fbdev *fbdev) { }
+
+static inline int adf_fbdev_open(struct fb_info *info, int user)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_release(struct fb_info *info, int user)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_check_var(struct fb_var_screeninfo *var,
+		struct fb_info *info)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_set_par(struct fb_info *info)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_blank(int blank, struct fb_info *info)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_pan_display(struct fb_var_screeninfo *var,
+		struct fb_info *info)
+{
+	return -ENODEV;
+}
+
+static inline int adf_fbdev_mmap(struct fb_info *info,
+		struct vm_area_struct *vma)
+{
+	return -ENODEV;
+}
+#endif
+
+#endif /* _VIDEO_ADF_FBDEV_H_ */
diff --git a/include/video/adf_format.h b/include/video/adf_format.h
new file mode 100644
index 0000000..e03182c
--- /dev/null
+++ b/include/video/adf_format.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_FORMAT_H
+#define _VIDEO_ADF_FORMAT_H
+
+bool adf_format_is_standard(u32 format);
+bool adf_format_is_rgb(u32 format);
+u8 adf_format_num_planes(u32 format);
+u8 adf_format_bpp(u32 format);
+u8 adf_format_plane_cpp(u32 format, int plane);
+u8 adf_format_horz_chroma_subsampling(u32 format);
+u8 adf_format_vert_chroma_subsampling(u32 format);
+
+#endif /* _VIDEO_ADF_FORMAT_H */
diff --git a/include/video/adf_memblock.h b/include/video/adf_memblock.h
new file mode 100644
index 0000000..6256e0e
--- /dev/null
+++ b/include/video/adf_memblock.h
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _VIDEO_ADF_MEMBLOCK_H_
+#define _VIDEO_ADF_MEMBLOCK_H_
+
+struct dma_buf *adf_memblock_export(phys_addr_t base, size_t size, int flags);
+
+#endif /* _VIDEO_ADF_MEMBLOCK_H_ */
diff --git a/init/Kconfig b/init/Kconfig
index 34407f1..f595c26 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1165,6 +1165,23 @@
 
 endif # CGROUPS
 
+config SCHED_HMP
+	bool "Scheduler support for heterogenous multi-processor systems"
+	depends on SMP && FAIR_GROUP_SCHED
+	help
+	  This feature will let the scheduler optimize task placement on
+	  systems made of heterogeneous cpus i.e cpus that differ either
+	  in their instructions per-cycle capability or the maximum
+	  frequency they can attain.
+
+config SCHED_HMP_CSTATE_AWARE
+	bool "CPU C-state aware scheduler"
+	depends on SCHED_HMP
+	help
+	  This feature will let the HMP scheduler optimize task placement
+	  with CPUs C-state. If this is enabled, scheduler places tasks
+	  onto the shallowest C-state CPU among the most power efficient CPUs.
+
 config CHECKPOINT_RESTORE
 	bool "Checkpoint/restore support" if EXPERT
 	select PROC_CHILDREN
diff --git a/init/Makefile b/init/Makefile
index c4fb455..d210b23 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -5,11 +5,8 @@
 ccflags-y := -fno-function-sections -fno-data-sections
 
 obj-y                          := main.o version.o mounts.o
-ifneq ($(CONFIG_BLK_DEV_INITRD),y)
 obj-y                          += noinitramfs.o
-else
 obj-$(CONFIG_BLK_DEV_INITRD)   += initramfs.o
-endif
 obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o
 
 ifneq ($(CONFIG_ARCH_INIT_TASK),y)
@@ -20,6 +17,7 @@
 mounts-$(CONFIG_BLK_DEV_RAM)	+= do_mounts_rd.o
 mounts-$(CONFIG_BLK_DEV_INITRD)	+= do_mounts_initrd.o
 mounts-$(CONFIG_BLK_DEV_MD)	+= do_mounts_md.o
+mounts-$(CONFIG_BLK_DEV_DM)	+= do_mounts_dm.o
 
 # dependencies on generated files need to be listed explicitly
 $(obj)/version.o: include/generated/compile.h
diff --git a/init/do_mounts.c b/init/do_mounts.c
index dea5de9..1902a1c8 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -566,6 +566,7 @@
 	wait_for_device_probe();
 
 	md_run_setup();
+	dm_run_setup();
 
 	if (saved_root_name[0]) {
 		root_device_name = saved_root_name;
diff --git a/init/do_mounts.h b/init/do_mounts.h
index 067af1d..ecb2757 100644
--- a/init/do_mounts.h
+++ b/init/do_mounts.h
@@ -74,3 +74,13 @@
 static inline void md_run_setup(void) {}
 
 #endif
+
+#ifdef CONFIG_BLK_DEV_DM
+
+void dm_run_setup(void);
+
+#else
+
+static inline void dm_run_setup(void) {}
+
+#endif
diff --git a/init/do_mounts_dm.c b/init/do_mounts_dm.c
new file mode 100644
index 0000000..a557c5e
--- /dev/null
+++ b/init/do_mounts_dm.c
@@ -0,0 +1,426 @@
+/* do_mounts_dm.c
+ * Copyright (C) 2010 The Chromium OS Authors <chromium-os-dev@chromium.org>
+ *                    All Rights Reserved.
+ * Based on do_mounts_md.c
+ *
+ * This file is released under the GPL.
+ */
+#include <linux/device-mapper.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+
+#include "do_mounts.h"
+#include "../drivers/md/dm.h"
+
+#define DM_MAX_NAME 32
+#define DM_MAX_UUID 129
+#define DM_NO_UUID "none"
+
+#define DM_MSG_PREFIX "init"
+
+/* Separators used for parsing the dm= argument. */
+#define DM_FIELD_SEP ' '
+#define DM_LINE_SEP ','
+
+/*
+ * When the device-mapper and any targets are compiled into the kernel
+ * (not a module), one target may be created and used as the root device at
+ * boot time with the parameters given with the boot line dm=...
+ * The code for that is here.
+ */
+
+struct dm_setup_target {
+	sector_t begin;
+	sector_t length;
+	char *type;
+	char *params;
+	/* simple singly linked list */
+	struct dm_setup_target *next;
+};
+
+static struct {
+	int minor;
+	int ro;
+	char name[DM_MAX_NAME];
+	char uuid[DM_MAX_UUID];
+	char *targets;
+	struct dm_setup_target *target;
+	int target_count;
+} dm_setup_args __initdata;
+
+static __initdata int dm_early_setup;
+
+static size_t __init get_dm_option(char *str, char **next, char sep)
+{
+	size_t len = 0;
+	char *endp = NULL;
+
+	if (!str)
+		return 0;
+
+	endp = strchr(str, sep);
+	if (!endp) {  /* act like strchrnul */
+		len = strlen(str);
+		endp = str + len;
+	} else {
+		len = endp - str;
+	}
+
+	if (endp == str)
+		return 0;
+
+	if (!next)
+		return len;
+
+	if (*endp == 0) {
+		/* Don't advance past the nul. */
+		*next = endp;
+	} else {
+		*next = endp + 1;
+	}
+	return len;
+}
+
+static int __init dm_setup_args_init(void)
+{
+	dm_setup_args.minor = 0;
+	dm_setup_args.ro = 0;
+	dm_setup_args.target = NULL;
+	dm_setup_args.target_count = 0;
+	return 0;
+}
+
+static int __init dm_setup_cleanup(void)
+{
+	struct dm_setup_target *target = dm_setup_args.target;
+	struct dm_setup_target *old_target = NULL;
+	while (target) {
+		kfree(target->type);
+		kfree(target->params);
+		old_target = target;
+		target = target->next;
+		kfree(old_target);
+		dm_setup_args.target_count--;
+	}
+	BUG_ON(dm_setup_args.target_count);
+	return 0;
+}
+
+static char * __init dm_setup_parse_device_args(char *str)
+{
+	char *next = NULL;
+	size_t len = 0;
+
+	/* Grab the logical name of the device to be exported to udev */
+	len = get_dm_option(str, &next, DM_FIELD_SEP);
+	if (!len) {
+		DMERR("failed to parse device name");
+		goto parse_fail;
+	}
+	len = min(len + 1, sizeof(dm_setup_args.name));
+	strlcpy(dm_setup_args.name, str, len);  /* includes nul */
+	str = skip_spaces(next);
+
+	/* Grab the UUID value or "none" */
+	len = get_dm_option(str, &next, DM_FIELD_SEP);
+	if (!len) {
+		DMERR("failed to parse device uuid");
+		goto parse_fail;
+	}
+	len = min(len + 1, sizeof(dm_setup_args.uuid));
+	strlcpy(dm_setup_args.uuid, str, len);
+	str = skip_spaces(next);
+
+	/* Determine if the table/device will be read only or read-write */
+	if (!strncmp("ro,", str, 3)) {
+		dm_setup_args.ro = 1;
+	} else if (!strncmp("rw,", str, 3)) {
+		dm_setup_args.ro = 0;
+	} else {
+		DMERR("failed to parse table mode");
+		goto parse_fail;
+	}
+	str = skip_spaces(str + 3);
+
+	return str;
+
+parse_fail:
+	return NULL;
+}
+
+static void __init dm_substitute_devices(char *str, size_t str_len)
+{
+	char *candidate = str;
+	char *candidate_end = str;
+	char old_char;
+	size_t len = 0;
+	dev_t dev;
+
+	if (str_len < 3)
+		return;
+
+	while (str && *str) {
+		candidate = strchr(str, '/');
+		if (!candidate)
+			break;
+
+		/* Avoid embedded slashes */
+		if (candidate != str && *(candidate - 1) != DM_FIELD_SEP) {
+			str = strchr(candidate, DM_FIELD_SEP);
+			continue;
+		}
+
+		len = get_dm_option(candidate, &candidate_end, DM_FIELD_SEP);
+		str = skip_spaces(candidate_end);
+		if (len < 3 || len > 37)  /* name_to_dev_t max; maj:mix min */
+			continue;
+
+		/* Temporarily terminate with a nul */
+		if (*candidate_end)
+			candidate_end--;
+		old_char = *candidate_end;
+		*candidate_end = '\0';
+
+		DMDEBUG("converting candidate device '%s' to dev_t", candidate);
+		/* Use the boot-time specific device naming */
+		dev = name_to_dev_t(candidate);
+		*candidate_end = old_char;
+
+		DMDEBUG(" -> %u", dev);
+		/* No suitable replacement found */
+		if (!dev)
+			continue;
+
+		/* Rewrite the /dev/path as a major:minor */
+		len = snprintf(candidate, len, "%u:%u", MAJOR(dev), MINOR(dev));
+		if (!len) {
+			DMERR("error substituting device major/minor.");
+			break;
+		}
+		candidate += len;
+		/* Pad out with spaces (fixing our nul) */
+		while (candidate < candidate_end)
+			*(candidate++) = DM_FIELD_SEP;
+	}
+}
+
+static int __init dm_setup_parse_targets(char *str)
+{
+	char *next = NULL;
+	size_t len = 0;
+	struct dm_setup_target **target = NULL;
+
+	/* Targets are defined as per the table format but with a
+	 * comma as a newline separator. */
+	target = &dm_setup_args.target;
+	while (str && *str) {
+		*target = kzalloc(sizeof(struct dm_setup_target), GFP_KERNEL);
+		if (!*target) {
+			DMERR("failed to allocate memory for target %d",
+			      dm_setup_args.target_count);
+			goto parse_fail;
+		}
+		dm_setup_args.target_count++;
+
+		(*target)->begin = simple_strtoull(str, &next, 10);
+		if (!next || *next != DM_FIELD_SEP) {
+			DMERR("failed to parse starting sector for target %d",
+			      dm_setup_args.target_count - 1);
+			goto parse_fail;
+		}
+		str = skip_spaces(next + 1);
+
+		(*target)->length = simple_strtoull(str, &next, 10);
+		if (!next || *next != DM_FIELD_SEP) {
+			DMERR("failed to parse length for target %d",
+			      dm_setup_args.target_count - 1);
+			goto parse_fail;
+		}
+		str = skip_spaces(next + 1);
+
+		len = get_dm_option(str, &next, DM_FIELD_SEP);
+		if (!len ||
+		    !((*target)->type = kstrndup(str, len, GFP_KERNEL))) {
+			DMERR("failed to parse type for target %d",
+			      dm_setup_args.target_count - 1);
+			goto parse_fail;
+		}
+		str = skip_spaces(next);
+
+		len = get_dm_option(str, &next, DM_LINE_SEP);
+		if (!len ||
+		    !((*target)->params = kstrndup(str, len, GFP_KERNEL))) {
+			DMERR("failed to parse params for target %d",
+			      dm_setup_args.target_count - 1);
+			goto parse_fail;
+		}
+		str = skip_spaces(next);
+
+		/* Before moving on, walk through the copied target and
+		 * attempt to replace all /dev/xxx with the major:minor number.
+		 * It may not be possible to resolve them traditionally at
+		 * boot-time. */
+		dm_substitute_devices((*target)->params, len);
+
+		target = &((*target)->next);
+	}
+	DMDEBUG("parsed %d targets", dm_setup_args.target_count);
+
+	return 0;
+
+parse_fail:
+	return 1;
+}
+
+/*
+ * Parse the command-line parameters given our kernel, but do not
+ * actually try to invoke the DM device now; that is handled by
+ * dm_setup_drive after the low-level disk drivers have initialised.
+ * dm format is as follows:
+ *  dm="name uuid fmode,[table line 1],[table line 2],..."
+ * May be used with root=/dev/dm-0 as it always uses the first dm minor.
+ */
+
+static int __init dm_setup(char *str)
+{
+	dm_setup_args_init();
+
+	str = dm_setup_parse_device_args(str);
+	if (!str) {
+		DMDEBUG("str is NULL");
+		goto parse_fail;
+	}
+
+	/* Target parsing is delayed until we have dynamic memory */
+	dm_setup_args.targets = str;
+
+	printk(KERN_INFO "dm: will configure '%s' on dm-%d\n",
+	       dm_setup_args.name, dm_setup_args.minor);
+
+	dm_early_setup = 1;
+	return 1;
+
+parse_fail:
+	printk(KERN_WARNING "dm: Invalid arguments supplied to dm=.\n");
+	return 0;
+}
+
+
+static void __init dm_setup_drive(void)
+{
+	struct mapped_device *md = NULL;
+	struct dm_table *table = NULL;
+	struct dm_setup_target *target;
+	char *uuid = dm_setup_args.uuid;
+	fmode_t fmode = FMODE_READ;
+
+	/* Finish parsing the targets. */
+	if (dm_setup_parse_targets(dm_setup_args.targets))
+		goto parse_fail;
+
+	if (dm_create(dm_setup_args.minor, &md)) {
+		DMDEBUG("failed to create the device");
+		goto dm_create_fail;
+	}
+	DMDEBUG("created device '%s'", dm_device_name(md));
+
+	/* In addition to flagging the table below, the disk must be
+	 * set explicitly ro/rw. */
+	set_disk_ro(dm_disk(md), dm_setup_args.ro);
+
+	if (!dm_setup_args.ro)
+		fmode |= FMODE_WRITE;
+	if (dm_table_create(&table, fmode, dm_setup_args.target_count, md)) {
+		DMDEBUG("failed to create the table");
+		goto dm_table_create_fail;
+	}
+
+	dm_lock_md_type(md);
+	target = dm_setup_args.target;
+	while (target) {
+		DMINFO("adding target '%llu %llu %s %s'",
+		       (unsigned long long) target->begin,
+		       (unsigned long long) target->length, target->type,
+		       target->params);
+		if (dm_table_add_target(table, target->type, target->begin,
+					target->length, target->params)) {
+			DMDEBUG("failed to add the target to the table");
+			goto add_target_fail;
+		}
+		target = target->next;
+	}
+
+	if (dm_table_complete(table)) {
+		DMDEBUG("failed to complete the table");
+		goto table_complete_fail;
+	}
+
+	if (dm_get_md_type(md) == DM_TYPE_NONE) {
+		dm_set_md_type(md, dm_table_get_type(table));
+		if (dm_setup_md_queue(md, table)) {
+			DMWARN("unable to set up device queue for new table.");
+			goto setup_md_queue_fail;
+		}
+	} else if (dm_get_md_type(md) != dm_table_get_type(table)) {
+		DMWARN("can't change device type after initial table load.");
+		goto setup_md_queue_fail;
+        }
+
+	/* Suspend the device so that we can bind it to the table. */
+	if (dm_suspend(md, 0)) {
+		DMDEBUG("failed to suspend the device pre-bind");
+		goto suspend_fail;
+	}
+
+	/* Bind the table to the device. This is the only way to associate
+	 * md->map with the table and set the disk capacity directly. */
+	if (dm_swap_table(md, table)) {  /* should return NULL. */
+		DMDEBUG("failed to bind the device to the table");
+		goto table_bind_fail;
+	}
+
+	/* Finally, resume and the device should be ready. */
+	if (dm_resume(md)) {
+		DMDEBUG("failed to resume the device");
+		goto resume_fail;
+	}
+
+	/* Export the dm device via the ioctl interface */
+	if (!strcmp(DM_NO_UUID, dm_setup_args.uuid))
+		uuid = NULL;
+	if (dm_ioctl_export(md, dm_setup_args.name, uuid)) {
+		DMDEBUG("failed to export device with given name and uuid");
+		goto export_fail;
+	}
+	printk(KERN_INFO "dm: dm-%d is ready\n", dm_setup_args.minor);
+
+	dm_unlock_md_type(md);
+	dm_setup_cleanup();
+	return;
+
+export_fail:
+resume_fail:
+table_bind_fail:
+suspend_fail:
+setup_md_queue_fail:
+table_complete_fail:
+add_target_fail:
+	dm_unlock_md_type(md);
+dm_table_create_fail:
+	dm_put(md);
+dm_create_fail:
+	dm_setup_cleanup();
+parse_fail:
+	printk(KERN_WARNING "dm: starting dm-%d (%s) failed\n",
+	       dm_setup_args.minor, dm_setup_args.name);
+}
+
+__setup("dm=", dm_setup);
+
+void __init dm_run_setup(void)
+{
+	if (!dm_early_setup)
+		return;
+	printk(KERN_INFO "dm: attempting early device configuration.\n");
+	dm_setup_drive();
+}
diff --git a/init/initramfs.c b/init/initramfs.c
index b32ad7d..f8ce812 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -18,6 +18,7 @@
 #include <linux/dirent.h>
 #include <linux/syscalls.h>
 #include <linux/utime.h>
+#include <linux/initramfs.h>
 
 static ssize_t __init xwrite(int fd, const char *p, size_t count)
 {
@@ -605,9 +606,25 @@
 }
 #endif
 
+static int __initdata do_skip_initramfs;
+
+static int __init skip_initramfs_param(char *str)
+{
+	if (*str)
+		return 0;
+	do_skip_initramfs = 1;
+	return 1;
+}
+__setup("skip_initramfs", skip_initramfs_param);
+
 static int __init populate_rootfs(void)
 {
-	char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
+	char *err;
+
+	if (do_skip_initramfs)
+		return default_rootfs();
+
+	err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
 	if (err)
 		panic("%s", err); /* Failed to decompress INTERNAL initramfs */
 	if (initrd_start) {
diff --git a/init/noinitramfs.c b/init/noinitramfs.c
index 267739d..bcc8bcb0 100644
--- a/init/noinitramfs.c
+++ b/init/noinitramfs.c
@@ -21,11 +21,16 @@
 #include <linux/stat.h>
 #include <linux/kdev_t.h>
 #include <linux/syscalls.h>
+#include <linux/kconfig.h>
+#include <linux/initramfs.h>
 
 /*
  * Create a simple rootfs that is similar to the default initramfs
  */
-static int __init default_rootfs(void)
+#if !IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
+static
+#endif
+int __init default_rootfs(void)
 {
 	int err;
 
@@ -49,4 +54,6 @@
 	printk(KERN_WARNING "Failed to create a rootfs\n");
 	return err;
 }
+#if !IS_BUILTIN(CONFIG_BLK_DEV_INITRD)
 rootfs_initcall(default_rootfs);
+#endif
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 85bc9be..b848fe8 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2842,6 +2842,45 @@
 	return ret;
 }
 
+int subsys_cgroup_allow_attach(struct cgroup_taskset *tset)
+{
+	const struct cred *cred = current_cred(), *tcred;
+	struct task_struct *task;
+	struct cgroup_subsys_state *css;
+
+	if (capable(CAP_SYS_NICE))
+		return 0;
+
+	cgroup_taskset_for_each(task, css, tset) {
+		tcred = __task_cred(task);
+
+		if (current != task && !uid_eq(cred->euid, tcred->uid) &&
+		    !uid_eq(cred->euid, tcred->suid))
+			return -EACCES;
+	}
+
+	return 0;
+}
+
+static int cgroup_allow_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
+{
+	struct cgroup_subsys_state *css;
+	int i;
+	int ret;
+
+	for_each_css(css, i, cgrp) {
+		if (css->ss->allow_attach) {
+			ret = css->ss->allow_attach(tset);
+			if (ret)
+				return ret;
+		} else {
+			return -EACCES;
+		}
+	}
+
+	return 0;
+}
+
 static int cgroup_procs_write_permission(struct task_struct *task,
 					 struct cgroup *dst_cgrp,
 					 struct kernfs_open_file *of)
@@ -2856,8 +2895,24 @@
 	 */
 	if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
 	    !uid_eq(cred->euid, tcred->uid) &&
-	    !uid_eq(cred->euid, tcred->suid))
-		ret = -EACCES;
+	    !uid_eq(cred->euid, tcred->suid)) {
+		/*
+		 * if the default permission check fails, give each
+		 * cgroup a chance to extend the permission check
+		 */
+		struct cgroup_taskset tset = {
+			.src_csets = LIST_HEAD_INIT(tset.src_csets),
+			.dst_csets = LIST_HEAD_INIT(tset.dst_csets),
+			.csets = &tset.src_csets,
+		};
+		struct css_set *cset;
+		cset = task_css_set(task);
+		list_add(&cset->mg_node, &tset.src_csets);
+		ret = cgroup_allow_attach(dst_cgrp, &tset);
+		list_del(&tset.src_csets);
+		if (ret)
+			ret = -EACCES;
+	}
 
 	if (!ret && cgroup_on_dfl(dst_cgrp)) {
 		struct super_block *sb = of->file->f_path.dentry->d_sb;
diff --git a/kernel/configs/README.android b/kernel/configs/README.android
new file mode 100644
index 0000000..2e2d7c0
--- /dev/null
+++ b/kernel/configs/README.android
@@ -0,0 +1,15 @@
+The android-*.config files in this directory are meant to be used as a base
+for an Android kernel config. All devices should have the options in
+android-base.config enabled. While not mandatory, the options in
+android-recommended.config enable advanced Android features.
+
+Assuming you already have a minimalist defconfig for your device, a possible
+way to enable these options would be:
+
+     ARCH=<arch> scripts/kconfig/merge_config.sh <path_to>/<device>_defconfig kernel/configs/android-base.config kernel/configs/android-recommended.config
+
+This will generate a .config that can then be used to save a new defconfig or
+compile a new kernel with Android features enabled.
+
+Because there is no tool to consistently generate these config fragments,
+lets keep them alphabetically sorted instead of random.
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 1a8f34f..adc552e 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -21,6 +21,7 @@
 CONFIG_DEFAULT_SECURITY_SELINUX=y
 CONFIG_EMBEDDED=y
 CONFIG_FB=y
+CONFIG_HARDENED_USERCOPY=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_INET6_AH=y
 CONFIG_INET6_ESP=y
@@ -76,6 +77,9 @@
 CONFIG_NETFILTER_XT_MATCH_MARK=y
 CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
 CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
 CONFIG_NETFILTER_XT_MATCH_QUOTA=y
 CONFIG_NETFILTER_XT_MATCH_SOCKET=y
 CONFIG_NETFILTER_XT_MATCH_STATE=y
@@ -124,26 +128,37 @@
 CONFIG_PM_AUTOSLEEP=y
 CONFIG_PM_WAKELOCKS=y
 CONFIG_PPP=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
 CONFIG_PPP_MPPE=y
 CONFIG_PREEMPT=y
+CONFIG_PROFILING=y
 CONFIG_QUOTA=y
+CONFIG_RANDOMIZE_BASE=y
 CONFIG_RTC_CLASS=y
 CONFIG_RT_GROUP_SCHED=y
 CONFIG_SECCOMP=y
 CONFIG_SECURITY=y
 CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
 CONFIG_SECURITY_SELINUX=y
 CONFIG_SETEND_EMULATION=y
 CONFIG_STAGING=y
 CONFIG_SWP_EMULATION=y
 CONFIG_SYNC=y
 CONFIG_TUN=y
+CONFIG_UID_CPUTIME=y
 CONFIG_UNIX=y
 CONFIG_USB_GADGET=y
 CONFIG_USB_CONFIGFS=y
 CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_F_ACC=y
+CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
+CONFIG_USB_CONFIGFS_UEVENT=y
 CONFIG_USB_CONFIGFS_F_MIDI=y
 CONFIG_USB_OTG_WAKELOCK=y
 CONFIG_XFRM_USER=y
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index 297756b..437f337 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -10,6 +10,7 @@
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
 CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_CC_STACKPROTECTOR_STRONG=y
 CONFIG_COMPACTION=y
 CONFIG_DEBUG_RODATA=y
 CONFIG_DM_CRYPT=y
@@ -75,6 +76,8 @@
 CONFIG_INPUT_EVDEV=y
 CONFIG_INPUT_GPIO=y
 CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_KEYCHORD=y
+CONFIG_INPUT_KEYRESET=y
 CONFIG_INPUT_MISC=y
 CONFIG_INPUT_TABLET=y
 CONFIG_INPUT_UINPUT=y
@@ -118,6 +121,7 @@
 CONFIG_TMPFS=y
 CONFIG_TMPFS_POSIX_ACL=y
 CONFIG_UHID=y
+CONFIG_MEMORY_STATE_TIME=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_HIDDEV=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 29de1a9..19444fc 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1154,6 +1154,7 @@
 void enable_nonboot_cpus(void)
 {
 	int cpu, error;
+	struct device *cpu_device;
 
 	/* Allow everyone to use the CPU hotplug again */
 	cpu_maps_update_begin();
@@ -1171,6 +1172,12 @@
 		trace_suspend_resume(TPS("CPU_ON"), cpu, false);
 		if (!error) {
 			pr_info("CPU%d is up\n", cpu);
+			cpu_device = get_cpu_device(cpu);
+			if (!cpu_device)
+				pr_err("%s: failed to get cpu%d device\n",
+				       __func__, cpu);
+			else
+				kobject_uevent(&cpu_device->kobj, KOBJ_ONLINE);
 			continue;
 		}
 		pr_warn("Error taking CPU%d up: %d\n", cpu, error);
@@ -1932,3 +1939,23 @@
 {
 	per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
 }
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+	atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+	atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+	atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 29f815d..4b7b6cb 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -99,6 +99,7 @@
 
 	/* user-configured CPUs and Memory Nodes allow to tasks */
 	cpumask_var_t cpus_allowed;
+	cpumask_var_t cpus_requested;
 	nodemask_t mems_allowed;
 
 	/* effective CPUs and Memory Nodes allow to tasks */
@@ -398,7 +399,7 @@
 
 static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q)
 {
-	return	cpumask_subset(p->cpus_allowed, q->cpus_allowed) &&
+	return	cpumask_subset(p->cpus_requested, q->cpus_requested) &&
 		nodes_subset(p->mems_allowed, q->mems_allowed) &&
 		is_cpu_exclusive(p) <= is_cpu_exclusive(q) &&
 		is_mem_exclusive(p) <= is_mem_exclusive(q);
@@ -498,7 +499,7 @@
 	cpuset_for_each_child(c, css, par) {
 		if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) &&
 		    c != cur &&
-		    cpumask_intersects(trial->cpus_allowed, c->cpus_allowed))
+		    cpumask_intersects(trial->cpus_requested, c->cpus_requested))
 			goto out;
 		if ((is_mem_exclusive(trial) || is_mem_exclusive(c)) &&
 		    c != cur &&
@@ -957,17 +958,18 @@
 	if (!*buf) {
 		cpumask_clear(trialcs->cpus_allowed);
 	} else {
-		retval = cpulist_parse(buf, trialcs->cpus_allowed);
+		retval = cpulist_parse(buf, trialcs->cpus_requested);
 		if (retval < 0)
 			return retval;
 
-		if (!cpumask_subset(trialcs->cpus_allowed,
-				    top_cpuset.cpus_allowed))
+		if (!cpumask_subset(trialcs->cpus_requested, cpu_present_mask))
 			return -EINVAL;
+
+		cpumask_and(trialcs->cpus_allowed, trialcs->cpus_requested, cpu_active_mask);
 	}
 
 	/* Nothing to do if the cpus didn't change */
-	if (cpumask_equal(cs->cpus_allowed, trialcs->cpus_allowed))
+	if (cpumask_equal(cs->cpus_requested, trialcs->cpus_requested))
 		return 0;
 
 	retval = validate_change(cs, trialcs);
@@ -976,6 +978,7 @@
 
 	spin_lock_irq(&callback_lock);
 	cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
+	cpumask_copy(cs->cpus_requested, trialcs->cpus_requested);
 	spin_unlock_irq(&callback_lock);
 
 	/* use trialcs->cpus_allowed as a temp variable */
@@ -1760,7 +1763,7 @@
 
 	switch (type) {
 	case FILE_CPULIST:
-		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_allowed));
+		seq_printf(sf, "%*pbl\n", cpumask_pr_args(cs->cpus_requested));
 		break;
 	case FILE_MEMLIST:
 		seq_printf(sf, "%*pbl\n", nodemask_pr_args(&cs->mems_allowed));
@@ -1949,11 +1952,14 @@
 		return ERR_PTR(-ENOMEM);
 	if (!alloc_cpumask_var(&cs->cpus_allowed, GFP_KERNEL))
 		goto free_cs;
+	if (!alloc_cpumask_var(&cs->cpus_requested, GFP_KERNEL))
+		goto free_allowed;
 	if (!alloc_cpumask_var(&cs->effective_cpus, GFP_KERNEL))
-		goto free_cpus;
+		goto free_requested;
 
 	set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
 	cpumask_clear(cs->cpus_allowed);
+	cpumask_clear(cs->cpus_requested);
 	nodes_clear(cs->mems_allowed);
 	cpumask_clear(cs->effective_cpus);
 	nodes_clear(cs->effective_mems);
@@ -1962,7 +1968,9 @@
 
 	return &cs->css;
 
-free_cpus:
+free_requested:
+	free_cpumask_var(cs->cpus_requested);
+free_allowed:
 	free_cpumask_var(cs->cpus_allowed);
 free_cs:
 	kfree(cs);
@@ -2025,6 +2033,7 @@
 	cs->mems_allowed = parent->mems_allowed;
 	cs->effective_mems = parent->mems_allowed;
 	cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
+	cpumask_copy(cs->cpus_requested, parent->cpus_requested);
 	cpumask_copy(cs->effective_cpus, parent->cpus_allowed);
 	spin_unlock_irq(&callback_lock);
 out_unlock:
@@ -2059,6 +2068,7 @@
 
 	free_cpumask_var(cs->effective_cpus);
 	free_cpumask_var(cs->cpus_allowed);
+	free_cpumask_var(cs->cpus_requested);
 	kfree(cs);
 }
 
@@ -2094,12 +2104,30 @@
 	task->mems_allowed = current->mems_allowed;
 }
 
+static int cpuset_allow_attach(struct cgroup_taskset *tset)
+{
+	const struct cred *cred = current_cred(), *tcred;
+	struct task_struct *task;
+	struct cgroup_subsys_state *css;
+
+	cgroup_taskset_for_each(task, css, tset) {
+		tcred = __task_cred(task);
+
+		if ((current != task) && !capable(CAP_SYS_ADMIN) &&
+		     cred->euid.val != tcred->uid.val && cred->euid.val != tcred->suid.val)
+			return -EACCES;
+	}
+
+	return 0;
+}
+
 struct cgroup_subsys cpuset_cgrp_subsys = {
 	.css_alloc	= cpuset_css_alloc,
 	.css_online	= cpuset_css_online,
 	.css_offline	= cpuset_css_offline,
 	.css_free	= cpuset_css_free,
 	.can_attach	= cpuset_can_attach,
+	.allow_attach	= cpuset_allow_attach,
 	.cancel_attach	= cpuset_cancel_attach,
 	.attach		= cpuset_attach,
 	.post_attach	= cpuset_post_attach,
@@ -2123,8 +2151,11 @@
 		BUG();
 	if (!alloc_cpumask_var(&top_cpuset.effective_cpus, GFP_KERNEL))
 		BUG();
+	if (!alloc_cpumask_var(&top_cpuset.cpus_requested, GFP_KERNEL))
+		BUG();
 
 	cpumask_setall(top_cpuset.cpus_allowed);
+	cpumask_setall(top_cpuset.cpus_requested);
 	nodes_setall(top_cpuset.mems_allowed);
 	cpumask_setall(top_cpuset.effective_cpus);
 	nodes_setall(top_cpuset.effective_mems);
@@ -2258,7 +2289,7 @@
 		goto retry;
 	}
 
-	cpumask_and(&new_cpus, cs->cpus_allowed, parent_cs(cs)->effective_cpus);
+	cpumask_and(&new_cpus, cs->cpus_requested, parent_cs(cs)->effective_cpus);
 	nodes_and(new_mems, cs->mems_allowed, parent_cs(cs)->effective_mems);
 
 	cpus_updated = !cpumask_equal(&new_cpus, cs->effective_cpus);
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
index fc1ef73..0b89128 100644
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
@@ -216,7 +216,7 @@
 	int i;
 	int diag, dtab_count;
 	int key;
-
+	static int last_crlf;
 
 	diag = kdbgetintenv("DTABCOUNT", &dtab_count);
 	if (diag)
@@ -237,6 +237,9 @@
 		return buffer;
 	if (key != 9)
 		tab = 0;
+	if (key != 10 && key != 13)
+		last_crlf = 0;
+
 	switch (key) {
 	case 8: /* backspace */
 		if (cp > buffer) {
@@ -254,7 +257,12 @@
 			*cp = tmp;
 		}
 		break;
-	case 13: /* enter */
+	case 10: /* new line */
+	case 13: /* carriage return */
+		/* handle \n after \r */
+		if (last_crlf && last_crlf != key)
+			break;
+		last_crlf = key;
 		*lastchar++ = '\n';
 		*lastchar++ = '\0';
 		if (!KDB_STATE(KGDB_TRANS)) {
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6ee1feb..e032f03 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -389,8 +389,13 @@
  *   0 - disallow raw tracepoint access for unpriv
  *   1 - disallow cpu events for unpriv
  *   2 - disallow kernel profiling for unpriv
+ *   3 - disallow all unpriv perf event use
  */
+#ifdef CONFIG_SECURITY_PERF_EVENTS_RESTRICT
+int sysctl_perf_event_paranoid __read_mostly = 3;
+#else
 int sysctl_perf_event_paranoid __read_mostly = 2;
+#endif
 
 /* Minimum for 512 kiB + 1 user control page */
 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
@@ -9536,6 +9541,9 @@
 	if (flags & ~PERF_FLAG_ALL)
 		return -EINVAL;
 
+	if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
 	err = perf_copy_attr(attr_uptr, &attr);
 	if (err)
 		return err;
diff --git a/kernel/fork.c b/kernel/fork.c
index 997ac1d..27e9af6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1007,7 +1007,8 @@
 
 	mm = get_task_mm(task);
 	if (mm && mm != current->mm &&
-			!ptrace_may_access(task, mode)) {
+			!ptrace_may_access(task, mode) &&
+			!capable(CAP_SYS_RESOURCE)) {
 		mmput(mm);
 		mm = ERR_PTR(-EACCES);
 	}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6b66959..4f64490 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -332,6 +332,9 @@
 	desc->affinity_notify = notify;
 	raw_spin_unlock_irqrestore(&desc->lock, flags);
 
+	if (!notify && old_notify)
+		cancel_work_sync(&old_notify->work);
+
 	if (old_notify)
 		kref_put(&old_notify->kref, old_notify->release);
 
diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c
index 0374a59..e99d860 100644
--- a/kernel/locking/spinlock_debug.c
+++ b/kernel/locking/spinlock_debug.c
@@ -12,6 +12,8 @@
 #include <linux/debug_locks.h>
 #include <linux/delay.h>
 #include <linux/export.h>
+#include <linux/bug.h>
+#include <soc/qcom/watchdog.h>
 
 void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
 			  struct lock_class_key *key)
@@ -64,6 +66,11 @@
 		owner ? owner->comm : "<none>",
 		owner ? task_pid_nr(owner) : -1,
 		lock->owner_cpu);
+#ifdef CONFIG_DEBUG_SPINLOCK_BITE_ON_BUG
+	msm_trigger_wdog_bite();
+#elif defined(CONFIG_DEBUG_SPINLOCK_PANIC_ON_BUG)
+	BUG();
+#endif
 	dump_stack();
 }
 
@@ -114,7 +121,7 @@
 		__delay(1);
 	}
 	/* lockup suspected: */
-	spin_dump(lock, "lockup suspected");
+	spin_bug(lock, "lockup suspected");
 #ifdef CONFIG_SMP
 	trigger_all_cpu_backtrace();
 #endif
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index e8517b6..6b00a29 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -1,6 +1,7 @@
 config SUSPEND
 	bool "Suspend to RAM and standby"
 	depends on ARCH_SUSPEND_POSSIBLE
+	select RTC_LIB
 	default y
 	---help---
 	  Allow the system to enter sleep states in which main memory is
@@ -28,6 +29,15 @@
 	  of suspend, or they are content with invoking sync() from
 	  user-space before invoking suspend.  Say Y if that's your case.
 
+config WAKELOCK
+	bool "Android's method of preventing suspend"
+	default y
+	---help---
+	  This allows applications to prevent the CPU from suspending while
+	  they need it.
+
+	  Say Y if you are running an android userspace.
+
 config HIBERNATE_CALLBACKS
 	bool
 
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index eb4f717..80578f2 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -14,3 +14,5 @@
 obj-$(CONFIG_PM_WAKELOCKS)	+= wakelock.o
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
+
+obj-$(CONFIG_SUSPEND)	+= wakeup_reason.o
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 2fba066..b911dec 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -18,6 +18,7 @@
 #include <linux/workqueue.h>
 #include <linux/kmod.h>
 #include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
 
 /* 
  * Timeout for stopping processes
@@ -34,6 +35,9 @@
 	unsigned int elapsed_msecs;
 	bool wakeup = false;
 	int sleep_usecs = USEC_PER_MSEC;
+#ifdef CONFIG_PM_SLEEP
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
+#endif
 
 	start = ktime_get_boottime();
 
@@ -63,6 +67,11 @@
 			break;
 
 		if (pm_wakeup_pending()) {
+#ifdef CONFIG_PM_SLEEP
+			pm_get_active_wakeup_sources(suspend_abort,
+				MAX_SUSPEND_ABORT_LEN);
+			log_suspend_abort_reason(suspend_abort);
+#endif
 			wakeup = true;
 			break;
 		}
@@ -81,18 +90,20 @@
 	elapsed = ktime_sub(end, start);
 	elapsed_msecs = ktime_to_ms(elapsed);
 
-	if (todo) {
+	if (wakeup) {
 		pr_cont("\n");
-		pr_err("Freezing of tasks %s after %d.%03d seconds "
-		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
-		       wakeup ? "aborted" : "failed",
+		pr_err("Freezing of tasks aborted after %d.%03d seconds",
+		       elapsed_msecs / 1000, elapsed_msecs % 1000);
+	} else if (todo) {
+		pr_cont("\n");
+		pr_err("Freezing of tasks failed after %d.%03d seconds"
+		       " (%d tasks refusing to freeze, wq_busy=%d):\n",
 		       elapsed_msecs / 1000, elapsed_msecs % 1000,
 		       todo - wq_busy, wq_busy);
 
 		if (wq_busy)
 			show_workqueue_state();
 
-		if (!wakeup) {
 			read_lock(&tasklist_lock);
 			for_each_process_thread(g, p) {
 				if (p != current && !freezer_should_skip(p)
@@ -100,7 +111,6 @@
 					sched_show_task(p);
 			}
 			read_unlock(&tasklist_lock);
-		}
 	} else {
 		pr_cont("(elapsed %d.%03d seconds) ", elapsed_msecs / 1000,
 			elapsed_msecs % 1000);
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 168ff44..311c14f 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -43,6 +43,8 @@
 #include <linux/kernel.h>
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
 
 #include <linux/uaccess.h>
 #include <linux/export.h>
@@ -67,6 +69,8 @@
 static struct pm_qos_constraints cpu_dma_constraints = {
 	.list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
 	.target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
+	.target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+				PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE },
 	.default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
 	.no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
 	.type = PM_QOS_MIN,
@@ -81,6 +85,8 @@
 static struct pm_qos_constraints network_lat_constraints = {
 	.list = PLIST_HEAD_INIT(network_lat_constraints.list),
 	.target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
+	.target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+				PM_QOS_NETWORK_LAT_DEFAULT_VALUE },
 	.default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
 	.no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
 	.type = PM_QOS_MIN,
@@ -91,11 +97,12 @@
 	.name = "network_latency",
 };
 
-
 static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
 static struct pm_qos_constraints network_tput_constraints = {
 	.list = PLIST_HEAD_INIT(network_tput_constraints.list),
 	.target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
+	.target_per_cpu = { [0 ... (NR_CPUS - 1)] =
+				PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE },
 	.default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
 	.no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
 	.type = PM_QOS_MAX,
@@ -259,22 +266,52 @@
 	.release        = single_release,
 };
 
+static inline void pm_qos_set_value_for_cpus(struct pm_qos_constraints *c)
+{
+	struct pm_qos_request *req = NULL;
+	int cpu;
+	s32 qos_val[NR_CPUS] = { [0 ... (NR_CPUS - 1)] = c->default_value };
+
+	plist_for_each_entry(req, &c->list, node) {
+		for_each_cpu(cpu, &req->cpus_affine) {
+			switch (c->type) {
+			case PM_QOS_MIN:
+				if (qos_val[cpu] > req->node.prio)
+					qos_val[cpu] = req->node.prio;
+				break;
+			case PM_QOS_MAX:
+				if (req->node.prio > qos_val[cpu])
+					qos_val[cpu] = req->node.prio;
+				break;
+			default:
+				BUG();
+				break;
+			}
+		}
+	}
+
+	for_each_possible_cpu(cpu)
+		c->target_per_cpu[cpu] = qos_val[cpu];
+}
+
 /**
  * pm_qos_update_target - manages the constraints list and calls the notifiers
  *  if needed
  * @c: constraints data struct
- * @node: request to add to the list, to update or to remove
+ * @req: request to add to the list, to update or to remove
  * @action: action to take on the constraints list
  * @value: value of the request to add or update
  *
  * This function returns 1 if the aggregated constraint value has changed, 0
  *  otherwise.
  */
-int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
-			 enum pm_qos_req_action action, int value)
+int pm_qos_update_target(struct pm_qos_constraints *c,
+				struct pm_qos_request *req,
+				enum pm_qos_req_action action, int value)
 {
 	unsigned long flags;
 	int prev_value, curr_value, new_value;
+	struct plist_node *node = &req->node;
 	int ret;
 
 	spin_lock_irqsave(&pm_qos_lock, flags);
@@ -306,6 +343,7 @@
 
 	curr_value = pm_qos_get_value(c);
 	pm_qos_set_value(c, curr_value);
+	pm_qos_set_value_for_cpus(c);
 
 	spin_unlock_irqrestore(&pm_qos_lock, flags);
 
@@ -398,12 +436,50 @@
 }
 EXPORT_SYMBOL_GPL(pm_qos_request);
 
+int pm_qos_request_for_cpu(int pm_qos_class, int cpu)
+{
+	return pm_qos_array[pm_qos_class]->constraints->target_per_cpu[cpu];
+}
+EXPORT_SYMBOL(pm_qos_request_for_cpu);
+
 int pm_qos_request_active(struct pm_qos_request *req)
 {
 	return req->pm_qos_class != 0;
 }
 EXPORT_SYMBOL_GPL(pm_qos_request_active);
 
+int pm_qos_request_for_cpumask(int pm_qos_class, struct cpumask *mask)
+{
+	unsigned long irqflags;
+	int cpu;
+	struct pm_qos_constraints *c = NULL;
+	int val;
+
+	spin_lock_irqsave(&pm_qos_lock, irqflags);
+	c = pm_qos_array[pm_qos_class]->constraints;
+	val = c->default_value;
+
+	for_each_cpu(cpu, mask) {
+		switch (c->type) {
+		case PM_QOS_MIN:
+			if (c->target_per_cpu[cpu] < val)
+				val = c->target_per_cpu[cpu];
+			break;
+		case PM_QOS_MAX:
+			if (c->target_per_cpu[cpu] > val)
+				val = c->target_per_cpu[cpu];
+			break;
+		default:
+			BUG();
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&pm_qos_lock, irqflags);
+
+	return val;
+}
+EXPORT_SYMBOL(pm_qos_request_for_cpumask);
+
 static void __pm_qos_update_request(struct pm_qos_request *req,
 			   s32 new_value)
 {
@@ -412,7 +488,7 @@
 	if (new_value != req->node.prio)
 		pm_qos_update_target(
 			pm_qos_array[req->pm_qos_class]->constraints,
-			&req->node, PM_QOS_UPDATE_REQ, new_value);
+			req, PM_QOS_UPDATE_REQ, new_value);
 }
 
 /**
@@ -430,6 +506,41 @@
 	__pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
 }
 
+#ifdef CONFIG_SMP
+static void pm_qos_irq_release(struct kref *ref)
+{
+	unsigned long flags;
+	struct irq_affinity_notify *notify = container_of(ref,
+					struct irq_affinity_notify, kref);
+	struct pm_qos_request *req = container_of(notify,
+					struct pm_qos_request, irq_notify);
+	struct pm_qos_constraints *c =
+				pm_qos_array[req->pm_qos_class]->constraints;
+
+	spin_lock_irqsave(&pm_qos_lock, flags);
+	cpumask_setall(&req->cpus_affine);
+	spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+	pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, c->default_value);
+}
+
+static void pm_qos_irq_notify(struct irq_affinity_notify *notify,
+		const cpumask_t *mask)
+{
+	unsigned long flags;
+	struct pm_qos_request *req = container_of(notify,
+					struct pm_qos_request, irq_notify);
+	struct pm_qos_constraints *c =
+				pm_qos_array[req->pm_qos_class]->constraints;
+
+	spin_lock_irqsave(&pm_qos_lock, flags);
+	cpumask_copy(&req->cpus_affine, mask);
+	spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+	pm_qos_update_target(c, req, PM_QOS_UPDATE_REQ, req->node.prio);
+}
+#endif
+
 /**
  * pm_qos_add_request - inserts new qos request into the list
  * @req: pointer to a preallocated handle
@@ -453,11 +564,56 @@
 		WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
 		return;
 	}
+
+	switch (req->type) {
+	case PM_QOS_REQ_AFFINE_CORES:
+		if (cpumask_empty(&req->cpus_affine)) {
+			req->type = PM_QOS_REQ_ALL_CORES;
+			cpumask_setall(&req->cpus_affine);
+			WARN(1, KERN_ERR "Affine cores not set for request with affinity flag\n");
+		}
+		break;
+#ifdef CONFIG_SMP
+	case PM_QOS_REQ_AFFINE_IRQ:
+		if (irq_can_set_affinity(req->irq)) {
+			int ret = 0;
+			struct irq_desc *desc = irq_to_desc(req->irq);
+			struct cpumask *mask = desc->irq_data.common->affinity;
+
+			/* Get the current affinity */
+			cpumask_copy(&req->cpus_affine, mask);
+			req->irq_notify.irq = req->irq;
+			req->irq_notify.notify = pm_qos_irq_notify;
+			req->irq_notify.release = pm_qos_irq_release;
+
+			ret = irq_set_affinity_notifier(req->irq,
+					&req->irq_notify);
+			if (ret) {
+				WARN(1, KERN_ERR "IRQ affinity notify set failed\n");
+				req->type = PM_QOS_REQ_ALL_CORES;
+				cpumask_setall(&req->cpus_affine);
+			}
+		} else {
+			req->type = PM_QOS_REQ_ALL_CORES;
+			cpumask_setall(&req->cpus_affine);
+			WARN(1, KERN_ERR "IRQ-%d not set for request with affinity flag\n",
+					req->irq);
+		}
+		break;
+#endif
+	default:
+		WARN(1, KERN_ERR "Unknown request type %d\n", req->type);
+		/* fall through */
+	case PM_QOS_REQ_ALL_CORES:
+		cpumask_setall(&req->cpus_affine);
+		break;
+	}
+
 	req->pm_qos_class = pm_qos_class;
 	INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
 	trace_pm_qos_add_request(pm_qos_class, value);
 	pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
-			     &req->node, PM_QOS_ADD_REQ, value);
+			     req, PM_QOS_ADD_REQ, value);
 }
 EXPORT_SYMBOL_GPL(pm_qos_add_request);
 
@@ -520,7 +676,7 @@
 	if (new_value != req->node.prio)
 		pm_qos_update_target(
 			pm_qos_array[req->pm_qos_class]->constraints,
-			&req->node, PM_QOS_UPDATE_REQ, new_value);
+			req, PM_QOS_UPDATE_REQ, new_value);
 
 	schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
 }
@@ -548,7 +704,7 @@
 
 	trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
 	pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
-			     &req->node, PM_QOS_REMOVE_REQ,
+			     req, PM_QOS_REMOVE_REQ,
 			     PM_QOS_DEFAULT_VALUE);
 	memset(req, 0, sizeof(*req));
 }
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 6ccb08f..2d0c99b 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -26,9 +26,11 @@
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/ftrace.h>
+#include <linux/rtc.h>
 #include <trace/events/power.h>
 #include <linux/compiler.h>
 #include <linux/moduleparam.h>
+#include <linux/wakeup_reason.h>
 
 #include "power.h"
 
@@ -322,7 +324,8 @@
  */
 static int suspend_enter(suspend_state_t state, bool *wakeup)
 {
-	int error;
+	char suspend_abort[MAX_SUSPEND_ABORT_LEN];
+	int error, last_dev;
 
 	error = platform_suspend_prepare(state);
 	if (error)
@@ -330,7 +333,11 @@
 
 	error = dpm_suspend_late(PMSG_SUSPEND);
 	if (error) {
+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+		last_dev %= REC_FAILED_NUM;
 		pr_err("PM: late suspend of devices failed\n");
+		log_suspend_abort_reason("%s device failed to power down",
+			suspend_stats.failed_devs[last_dev]);
 		goto Platform_finish;
 	}
 	error = platform_suspend_prepare_late(state);
@@ -339,7 +346,11 @@
 
 	error = dpm_suspend_noirq(PMSG_SUSPEND);
 	if (error) {
+		last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+		last_dev %= REC_FAILED_NUM;
 		pr_err("PM: noirq suspend of devices failed\n");
+		log_suspend_abort_reason("noirq suspend of %s device failed",
+			suspend_stats.failed_devs[last_dev]);
 		goto Platform_early_resume;
 	}
 	error = platform_suspend_prepare_noirq(state);
@@ -363,8 +374,10 @@
 	}
 
 	error = disable_nonboot_cpus();
-	if (error || suspend_test(TEST_CPUS))
+	if (error || suspend_test(TEST_CPUS)) {
+		log_suspend_abort_reason("Disabling non-boot cpus failed");
 		goto Enable_cpus;
+	}
 
 	arch_suspend_disable_irqs();
 	BUG_ON(!irqs_disabled());
@@ -380,6 +393,9 @@
 				state, false);
 			events_check_enabled = false;
 		} else if (*wakeup) {
+			pm_get_active_wakeup_sources(suspend_abort,
+				MAX_SUSPEND_ABORT_LEN);
+			log_suspend_abort_reason(suspend_abort);
 			error = -EBUSY;
 		}
 		syscore_resume();
@@ -427,6 +443,7 @@
 	error = dpm_suspend_start(PMSG_SUSPEND);
 	if (error) {
 		pr_err("PM: Some devices failed to suspend, or early wake event detected\n");
+		log_suspend_abort_reason("Some devices failed to suspend, or early wake event detected");
 		goto Recover_platform;
 	}
 	suspend_test_finish("suspend devices");
@@ -527,6 +544,18 @@
 	return error;
 }
 
+static void pm_suspend_marker(char *annotation)
+{
+	struct timespec ts;
+	struct rtc_time tm;
+
+	getnstimeofday(&ts);
+	rtc_time_to_tm(ts.tv_sec, &tm);
+	pr_info("PM: suspend %s %d-%02d-%02d %02d:%02d:%02d.%09lu UTC\n",
+		annotation, tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
+		tm.tm_hour, tm.tm_min, tm.tm_sec, ts.tv_nsec);
+}
+
 /**
  * pm_suspend - Externally visible function for suspending the system.
  * @state: System sleep state to enter.
@@ -541,6 +570,7 @@
 	if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
 		return -EINVAL;
 
+	pm_suspend_marker("entry");
 	error = enter_state(state);
 	if (error) {
 		suspend_stats.fail++;
@@ -548,6 +578,7 @@
 	} else {
 		suspend_stats.success++;
 	}
+	pm_suspend_marker("exit");
 	return error;
 }
 EXPORT_SYMBOL(pm_suspend);
diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c
new file mode 100644
index 0000000..252611f
--- /dev/null
+++ b/kernel/power/wakeup_reason.c
@@ -0,0 +1,225 @@
+/*
+ * kernel/power/wakeup_reason.c
+ *
+ * Logs the reasons which caused the kernel to resume from
+ * the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wakeup_reason.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+
+
+#define MAX_WAKEUP_REASON_IRQS 32
+static int irq_list[MAX_WAKEUP_REASON_IRQS];
+static int irqcount;
+static bool suspend_abort;
+static char abort_reason[MAX_SUSPEND_ABORT_LEN];
+static struct kobject *wakeup_reason;
+static DEFINE_SPINLOCK(resume_reason_lock);
+
+static ktime_t last_monotime; /* monotonic time before last suspend */
+static ktime_t curr_monotime; /* monotonic time after last suspend */
+static ktime_t last_stime; /* monotonic boottime offset before last suspend */
+static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
+
+static ssize_t last_resume_reason_show(struct kobject *kobj, struct kobj_attribute *attr,
+		char *buf)
+{
+	int irq_no, buf_offset = 0;
+	struct irq_desc *desc;
+	spin_lock(&resume_reason_lock);
+	if (suspend_abort) {
+		buf_offset = sprintf(buf, "Abort: %s", abort_reason);
+	} else {
+		for (irq_no = 0; irq_no < irqcount; irq_no++) {
+			desc = irq_to_desc(irq_list[irq_no]);
+			if (desc && desc->action && desc->action->name)
+				buf_offset += sprintf(buf + buf_offset, "%d %s\n",
+						irq_list[irq_no], desc->action->name);
+			else
+				buf_offset += sprintf(buf + buf_offset, "%d\n",
+						irq_list[irq_no]);
+		}
+	}
+	spin_unlock(&resume_reason_lock);
+	return buf_offset;
+}
+
+static ssize_t last_suspend_time_show(struct kobject *kobj,
+			struct kobj_attribute *attr, char *buf)
+{
+	struct timespec sleep_time;
+	struct timespec total_time;
+	struct timespec suspend_resume_time;
+
+	/*
+	 * total_time is calculated from monotonic bootoffsets because
+	 * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
+	 */
+	total_time = ktime_to_timespec(ktime_sub(curr_stime, last_stime));
+
+	/*
+	 * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
+	 * time interval before entering suspend and post suspend.
+	 */
+	suspend_resume_time = ktime_to_timespec(ktime_sub(curr_monotime, last_monotime));
+
+	/* sleep_time = total_time - suspend_resume_time */
+	sleep_time = timespec_sub(total_time, suspend_resume_time);
+
+	/* Export suspend_resume_time and sleep_time in pair here. */
+	return sprintf(buf, "%lu.%09lu %lu.%09lu\n",
+				suspend_resume_time.tv_sec, suspend_resume_time.tv_nsec,
+				sleep_time.tv_sec, sleep_time.tv_nsec);
+}
+
+static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
+static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
+
+static struct attribute *attrs[] = {
+	&resume_reason.attr,
+	&suspend_time.attr,
+	NULL,
+};
+static struct attribute_group attr_group = {
+	.attrs = attrs,
+};
+
+/*
+ * logs all the wake up reasons to the kernel
+ * stores the irqs to expose them to the userspace via sysfs
+ */
+void log_wakeup_reason(int irq)
+{
+	struct irq_desc *desc;
+	desc = irq_to_desc(irq);
+	if (desc && desc->action && desc->action->name)
+		printk(KERN_INFO "Resume caused by IRQ %d, %s\n", irq,
+				desc->action->name);
+	else
+		printk(KERN_INFO "Resume caused by IRQ %d\n", irq);
+
+	spin_lock(&resume_reason_lock);
+	if (irqcount == MAX_WAKEUP_REASON_IRQS) {
+		spin_unlock(&resume_reason_lock);
+		printk(KERN_WARNING "Resume caused by more than %d IRQs\n",
+				MAX_WAKEUP_REASON_IRQS);
+		return;
+	}
+
+	irq_list[irqcount++] = irq;
+	spin_unlock(&resume_reason_lock);
+}
+
+int check_wakeup_reason(int irq)
+{
+	int irq_no;
+	int ret = false;
+
+	spin_lock(&resume_reason_lock);
+	for (irq_no = 0; irq_no < irqcount; irq_no++)
+		if (irq_list[irq_no] == irq) {
+			ret = true;
+			break;
+	}
+	spin_unlock(&resume_reason_lock);
+	return ret;
+}
+
+void log_suspend_abort_reason(const char *fmt, ...)
+{
+	va_list args;
+
+	spin_lock(&resume_reason_lock);
+
+	//Suspend abort reason has already been logged.
+	if (suspend_abort) {
+		spin_unlock(&resume_reason_lock);
+		return;
+	}
+
+	suspend_abort = true;
+	va_start(args, fmt);
+	vsnprintf(abort_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
+	va_end(args);
+	spin_unlock(&resume_reason_lock);
+}
+
+/* Detects a suspend and clears all the previous wake up reasons*/
+static int wakeup_reason_pm_event(struct notifier_block *notifier,
+		unsigned long pm_event, void *unused)
+{
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		spin_lock(&resume_reason_lock);
+		irqcount = 0;
+		suspend_abort = false;
+		spin_unlock(&resume_reason_lock);
+		/* monotonic time since boot */
+		last_monotime = ktime_get();
+		/* monotonic time since boot including the time spent in suspend */
+		last_stime = ktime_get_boottime();
+		break;
+	case PM_POST_SUSPEND:
+		/* monotonic time since boot */
+		curr_monotime = ktime_get();
+		/* monotonic time since boot including the time spent in suspend */
+		curr_stime = ktime_get_boottime();
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block wakeup_reason_pm_notifier_block = {
+	.notifier_call = wakeup_reason_pm_event,
+};
+
+/* Initializes the sysfs parameter
+ * registers the pm_event notifier
+ */
+int __init wakeup_reason_init(void)
+{
+	int retval;
+
+	retval = register_pm_notifier(&wakeup_reason_pm_notifier_block);
+	if (retval)
+		printk(KERN_WARNING "[%s] failed to register PM notifier %d\n",
+				__func__, retval);
+
+	wakeup_reason = kobject_create_and_add("wakeup_reasons", kernel_kobj);
+	if (!wakeup_reason) {
+		printk(KERN_WARNING "[%s] failed to create a sysfs kobject\n",
+				__func__);
+		return 1;
+	}
+	retval = sysfs_create_group(wakeup_reason, &attr_group);
+	if (retval) {
+		kobject_put(wakeup_reason);
+		printk(KERN_WARNING "[%s] failed to create a sysfs group %d\n",
+				__func__, retval);
+	}
+	return 0;
+}
+
+late_initcall(wakeup_reason_init);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index f7a55e9..b38f3fb 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -56,6 +56,10 @@
 #include "braille.h"
 #include "internal.h"
 
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+extern void printascii(char *);
+#endif
+
 int console_printk[4] = {
 	CONSOLE_LOGLEVEL_DEFAULT,	/* console_loglevel */
 	MESSAGE_LOGLEVEL_DEFAULT,	/* default_message_loglevel */
@@ -1870,6 +1874,10 @@
 		}
 	}
 
+#ifdef CONFIG_EARLY_PRINTK_DIRECT
+	printascii(text);
+#endif
+
 	if (level == LOGLEVEL_DEFAULT)
 		level = default_message_loglevel;
 
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 5e59b83..1302fff 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -17,8 +17,9 @@
 
 obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o swait.o completion.o idle.o
+obj-y += wait.o swait.o completion.o idle.o sched_avg.o
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
+obj-$(CONFIG_SCHED_HMP) += hmp.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
 obj-$(CONFIG_SCHED_DEBUG) += debug.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 154fd68..fe20e30 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -91,6 +91,8 @@
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
+ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
+
 DEFINE_MUTEX(sched_domains_mutex);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
@@ -758,6 +760,7 @@
 	if (!(flags & ENQUEUE_RESTORE))
 		sched_info_queued(rq, p);
 	p->sched_class->enqueue_task(rq, p, flags);
+	trace_sched_enq_deq_task(p, 1, cpumask_bits(&p->cpus_allowed)[0]);
 }
 
 static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -766,6 +769,7 @@
 	if (!(flags & DEQUEUE_SAVE))
 		sched_info_dequeued(rq, p);
 	p->sched_class->dequeue_task(rq, p, flags);
+	trace_sched_enq_deq_task(p, 0, cpumask_bits(&p->cpus_allowed)[0]);
 }
 
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -998,8 +1002,9 @@
 
 	p->on_rq = TASK_ON_RQ_MIGRATING;
 	dequeue_task(rq, p, 0);
+	double_lock_balance(rq, cpu_rq(new_cpu));
 	set_task_cpu(p, new_cpu);
-	raw_spin_unlock(&rq->lock);
+	double_rq_unlock(cpu_rq(new_cpu), rq);
 
 	rq = cpu_rq(new_cpu);
 
@@ -1050,6 +1055,8 @@
 	struct migration_arg *arg = data;
 	struct task_struct *p = arg->task;
 	struct rq *rq = this_rq();
+	int src_cpu = cpu_of(rq);
+	bool moved = false;
 
 	/*
 	 * The original target cpu might have gone down and we might
@@ -1071,15 +1078,21 @@
 	 * we're holding p->pi_lock.
 	 */
 	if (task_rq(p) == rq) {
-		if (task_on_rq_queued(p))
+		if (task_on_rq_queued(p)) {
 			rq = __migrate_task(rq, p, arg->dest_cpu);
-		else
+			moved = true;
+		} else {
 			p->wake_cpu = arg->dest_cpu;
+		}
 	}
 	raw_spin_unlock(&rq->lock);
 	raw_spin_unlock(&p->pi_lock);
 
 	local_irq_enable();
+
+	if (moved)
+		notify_migration(src_cpu, arg->dest_cpu, false, p);
+
 	return 0;
 }
 
@@ -1161,7 +1174,8 @@
 	if (cpumask_equal(&p->cpus_allowed, new_mask))
 		goto out;
 
-	if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
+	dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+	if (dest_cpu >= nr_cpu_ids) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -1246,13 +1260,15 @@
 #endif
 #endif
 
-	trace_sched_migrate_task(p, new_cpu);
+	trace_sched_migrate_task(p, new_cpu, pct_task_load(p));
 
 	if (task_cpu(p) != new_cpu) {
 		if (p->sched_class->migrate_task_rq)
 			p->sched_class->migrate_task_rq(p);
 		p->se.nr_migrations++;
 		perf_event_task_migrate(p);
+
+		fixup_busy_time(p, new_cpu);
 	}
 
 	__set_task_cpu(p, new_cpu);
@@ -1456,7 +1472,7 @@
 		 * yield - it could be a while.
 		 */
 		if (unlikely(queued)) {
-			ktime_t to = ktime_set(0, NSEC_PER_SEC/HZ);
+			ktime_t to = ktime_set(0, NSEC_PER_SEC);
 
 			set_current_state(TASK_UNINTERRUPTIBLE);
 			schedule_hrtimeout(&to, HRTIMER_MODE_REL);
@@ -1798,6 +1814,8 @@
 
 void scheduler_ipi(void)
 {
+	int cpu = smp_processor_id();
+
 	/*
 	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
 	 * TIF_NEED_RESCHED remotely (for the first time) will also send
@@ -1805,9 +1823,18 @@
 	 */
 	preempt_fold_need_resched();
 
-	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
+	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick() &&
+							!got_boost_kick())
 		return;
 
+	if (got_boost_kick()) {
+		struct rq *rq = cpu_rq(cpu);
+
+		if (rq->curr->sched_class == &fair_sched_class)
+			check_for_migration(rq, rq->curr);
+		clear_boost_kick(cpu);
+	}
+
 	/*
 	 * Not all reschedule IPI handlers call irq_enter/irq_exit, since
 	 * traditionally all their work was done from the interrupt return
@@ -2009,6 +2036,18 @@
 {
 	unsigned long flags;
 	int cpu, success = 0;
+#ifdef CONFIG_SMP
+	unsigned int old_load;
+	struct rq *rq;
+	u64 wallclock;
+	struct related_thread_group *grp = NULL;
+	int src_cpu;
+	bool notif_required = false;
+	bool freq_notif_allowed = !(wake_flags & WF_NO_NOTIFIER);
+	bool check_group = false;
+#endif
+
+	wake_flags &= ~WF_NO_NOTIFIER;
 
 	/*
 	 * If we are going to wake up a thread waiting for CONDITION we
@@ -2082,14 +2121,33 @@
 	 */
 	smp_cond_load_acquire(&p->on_cpu, !VAL);
 
+	rq = cpu_rq(task_cpu(p));
+	raw_spin_lock(&rq->lock);
+	old_load = task_load(p);
+	wallclock = sched_ktime_clock();
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
+	raw_spin_unlock(&rq->lock);
+
+	rcu_read_lock();
+	grp = task_related_thread_group(p);
+	if (update_preferred_cluster(grp, p, old_load))
+		set_preferred_cluster(grp);
+	rcu_read_unlock();
+	check_group = grp != NULL;
+
 	p->sched_contributes_to_load = !!task_contributes_to_load(p);
 	p->state = TASK_WAKING;
 
 	cpu = select_task_rq(p, p->wake_cpu, SD_BALANCE_WAKE, wake_flags);
-	if (task_cpu(p) != cpu) {
+	src_cpu = task_cpu(p);
+	if (src_cpu != cpu) {
 		wake_flags |= WF_MIGRATED;
 		set_task_cpu(p, cpu);
+		notif_required = true;
 	}
+
+	set_task_last_wake(p, wallclock);
 #endif /* CONFIG_SMP */
 
 	ttwu_queue(p, cpu, wake_flags);
@@ -2098,6 +2156,19 @@
 out:
 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
 
+#ifdef CONFIG_SMP
+	if (freq_notif_allowed) {
+		if (notif_required && !same_freq_domain(src_cpu, cpu)) {
+			check_for_freq_change(cpu_rq(cpu),
+						false, check_group);
+			check_for_freq_change(cpu_rq(src_cpu),
+						false, check_group);
+		} else if (success) {
+			check_for_freq_change(cpu_rq(cpu), true, false);
+		}
+	}
+#endif
+
 	return success;
 }
 
@@ -2114,9 +2185,13 @@
 {
 	struct rq *rq = task_rq(p);
 
-	if (WARN_ON_ONCE(rq != this_rq()) ||
-	    WARN_ON_ONCE(p == current))
+	if (rq != this_rq() || p == current) {
+		printk_deferred("%s: Failed to wakeup task %d (%s), rq = %p,"
+			" this_rq = %p, p = %p, current = %p\n", __func__,
+			task_pid_nr(p), p->comm, rq, this_rq(), p, current);
+
 		return;
+	}
 
 	lockdep_assert_held(&rq->lock);
 
@@ -2139,8 +2214,14 @@
 
 	trace_sched_waking(p);
 
-	if (!task_on_rq_queued(p))
+	if (!task_on_rq_queued(p)) {
+		u64 wallclock = sched_ktime_clock();
+
+		update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+		update_task_ravg(p, rq, TASK_WAKE, wallclock, 0);
 		ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+		set_task_last_wake(p, wallclock);
+	}
 
 	ttwu_do_wakeup(rq, p, 0, cookie);
 	ttwu_stat(p, smp_processor_id(), 0);
@@ -2166,6 +2247,26 @@
 }
 EXPORT_SYMBOL(wake_up_process);
 
+/**
+ * wake_up_process_no_notif - Wake up a specific process without notifying
+ * governor
+ * @p: The process to be woken up.
+ *
+ * Attempt to wake up the nominated process and move it to the set of runnable
+ * processes.
+ *
+ * Return: 1 if the process was woken up, 0 if it was already running.
+ *
+ * It may be assumed that this function implies a write memory barrier before
+ * changing the task state if and only if any tasks are woken up.
+ */
+int wake_up_process_no_notif(struct task_struct *p)
+{
+	WARN_ON(task_is_stopped_or_traced(p));
+	return try_to_wake_up(p, TASK_NORMAL, WF_NO_NOTIFIER);
+}
+EXPORT_SYMBOL(wake_up_process_no_notif);
+
 int wake_up_state(struct task_struct *p, unsigned int state)
 {
 	return try_to_wake_up(p, state, 0);
@@ -2188,6 +2289,44 @@
 	dl_se->dl_yielded = 0;
 }
 
+#ifdef CONFIG_SCHED_HMP
+/*
+ * sched_exit() - Set EXITING_TASK_MARKER in task's ravg.demand field
+ *
+ * Stop accounting (exiting) task's future cpu usage
+ *
+ * We need this so that reset_all_windows_stats() can function correctly.
+ * reset_all_window_stats() depends on do_each_thread/for_each_thread task
+ * iterators to reset *all* task's statistics. Exiting tasks however become
+ * invisible to those iterators. sched_exit() is called on a exiting task prior
+ * to being removed from task_list, which will let reset_all_window_stats()
+ * function correctly.
+ */
+void sched_exit(struct task_struct *p)
+{
+	unsigned long flags;
+	int cpu = get_cpu();
+	struct rq *rq = cpu_rq(cpu);
+	u64 wallclock;
+
+	sched_set_group_id(p, 0);
+
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	/* rq->curr == p */
+	wallclock = sched_ktime_clock();
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	dequeue_task(rq, p, 0);
+	reset_task_stats(p);
+	p->ravg.mark_start = wallclock;
+	p->ravg.sum_history[0] = EXITING_TASK_MARKER;
+	enqueue_task(rq, p, 0);
+	clear_ed_task(p, rq);
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	put_cpu();
+}
+#endif /* CONFIG_SCHED_HMP */
+
 /*
  * Perform scheduler related setup for a newly forked process p.
  * p is forked by current.
@@ -2564,6 +2703,8 @@
 	struct rq *rq;
 
 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
+	init_new_task_load(p);
+	add_new_task_to_grp(p);
 	p->state = TASK_RUNNING;
 #ifdef CONFIG_SMP
 	/*
@@ -2579,6 +2720,7 @@
 	rq = __task_rq_lock(p, &rf);
 	post_init_entity_util_avg(&p->se);
 
+	mark_task_starting(p);
 	activate_task(rq, p, 0);
 	p->on_rq = TASK_ON_RQ_QUEUED;
 	trace_sched_wakeup_new(p);
@@ -2971,7 +3113,7 @@
 	*load = rq->load.weight;
 }
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_HMP)
 
 /*
  * sched_exec - execve() is a valuable balancing opportunity, because at
@@ -3077,16 +3219,32 @@
 	int cpu = smp_processor_id();
 	struct rq *rq = cpu_rq(cpu);
 	struct task_struct *curr = rq->curr;
+	u64 wallclock;
+	bool early_notif;
+	u32 old_load;
+	struct related_thread_group *grp;
 
 	sched_clock_tick();
 
 	raw_spin_lock(&rq->lock);
+
+	old_load = task_load(curr);
+	set_window_start(rq);
+
 	update_rq_clock(rq);
 	curr->sched_class->task_tick(rq, curr, 0);
 	cpu_load_update_active(rq);
 	calc_global_load_tick(rq);
+
+	wallclock = sched_ktime_clock();
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	early_notif = early_detection_notify(rq, wallclock);
+
 	raw_spin_unlock(&rq->lock);
 
+	if (early_notif)
+		atomic_notifier_call_chain(&load_alert_notifier_head,
+					0, (void *)(long)cpu);
 	perf_event_task_tick();
 
 #ifdef CONFIG_SMP
@@ -3094,6 +3252,15 @@
 	trigger_load_balance(rq);
 #endif
 	rq_last_tick_reset(rq);
+
+	rcu_read_lock();
+	grp = task_related_thread_group(curr);
+	if (update_preferred_cluster(grp, curr, old_load))
+		set_preferred_cluster(grp);
+	rcu_read_unlock();
+
+	if (curr->sched_class == &fair_sched_class)
+		check_for_migration(rq, curr);
 }
 
 #ifdef CONFIG_NO_HZ_FULL
@@ -3227,6 +3394,9 @@
 	if (panic_on_warn)
 		panic("scheduling while atomic\n");
 
+#ifdef CONFIG_PANIC_ON_SCHED_BUG
+	BUG();
+#endif
 	dump_stack();
 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 }
@@ -3337,6 +3507,7 @@
 	struct pin_cookie cookie;
 	struct rq *rq;
 	int cpu;
+	u64 wallclock;
 
 	cpu = smp_processor_id();
 	rq = cpu_rq(cpu);
@@ -3389,6 +3560,11 @@
 		update_rq_clock(rq);
 
 	next = pick_next_task(rq, prev, cookie);
+
+	wallclock = sched_ktime_clock();
+	update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
+	update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+
 	clear_tsk_need_resched(prev);
 	clear_preempt_need_resched();
 	rq->clock_skip_update = 0;
@@ -3398,6 +3574,8 @@
 		rq->curr = next;
 		++*switch_count;
 
+		set_task_last_switch_out(prev, wallclock);
+
 		trace_sched_switch(preempt, prev, next);
 		rq = context_switch(rq, prev, next, cookie); /* unlocks the rq */
 	} else {
@@ -5273,10 +5451,11 @@
 	struct rq *rq = cpu_rq(cpu);
 	unsigned long flags;
 
+	__sched_fork(0, idle);
+
 	raw_spin_lock_irqsave(&idle->pi_lock, flags);
 	raw_spin_lock(&rq->lock);
 
-	__sched_fork(0, idle);
 	idle->state = TASK_RUNNING;
 	idle->se.exec_start = sched_clock();
 
@@ -5586,7 +5765,10 @@
 		rq = __migrate_task(rq, next, dest_cpu);
 		if (rq != dead_rq) {
 			raw_spin_unlock(&rq->lock);
+			raw_spin_unlock(&next->pi_lock);
+			notify_migration(dead_rq->cpu, dest_cpu, true, next);
 			rq = dead_rq;
+			raw_spin_lock(&next->pi_lock);
 			raw_spin_lock(&rq->lock);
 		}
 		raw_spin_unlock(&next->pi_lock);
@@ -6030,6 +6212,7 @@
 {
 	struct rq *rq = cpu_rq(cpu);
 	struct sched_domain *tmp;
+	unsigned long next_balance = rq->next_balance;
 
 	/* Remove the sched domains which do not contribute to scheduling. */
 	for (tmp = sd; tmp; ) {
@@ -6061,6 +6244,17 @@
 			sd->child = NULL;
 	}
 
+	for (tmp = sd; tmp; ) {
+		unsigned long interval;
+
+		interval = msecs_to_jiffies(tmp->balance_interval);
+		if (time_after(next_balance, tmp->last_balance + interval))
+			next_balance = tmp->last_balance + interval;
+
+		tmp = tmp->parent;
+	}
+	rq->next_balance = next_balance;
+
 	sched_domain_debug(sd, cpu);
 
 	rq_attach_root(rq, rd);
@@ -7386,7 +7580,11 @@
 static void sched_rq_cpu_starting(unsigned int cpu)
 {
 	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
 
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	set_window_start(rq);
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
 	rq->calc_load_update = calc_load_update;
 	update_max_interval();
 }
@@ -7407,6 +7605,8 @@
 	/* Handle pending wakeups and then migrate everything off */
 	sched_ttwu_pending();
 	raw_spin_lock_irqsave(&rq->lock, flags);
+	migrate_sync_cpu(cpu);
+
 	if (rq->rd) {
 		BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
 		set_rq_offline(rq);
@@ -7414,6 +7614,9 @@
 	migrate_tasks(rq);
 	BUG_ON(rq->nr_running != 1);
 	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	clear_hmp_request(cpu);
+
 	calc_load_migrate(rq);
 	update_max_interval();
 	nohz_balance_exit_idle(cpu);
@@ -7459,6 +7662,8 @@
 		cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
 	mutex_unlock(&sched_domains_mutex);
 
+	update_cluster_topology();
+
 	/* Move init over to a non-isolated CPU */
 	if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
 		BUG();
@@ -7530,6 +7735,11 @@
 	for (i = 0; i < WAIT_TABLE_SIZE; i++)
 		init_waitqueue_head(bit_wait_table + i);
 
+#ifdef CONFIG_SCHED_HMP
+	pr_info("HMP scheduling enabled.\n");
+	init_clusters();
+#endif
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	alloc_size += 2 * nr_cpu_ids * sizeof(void **);
 #endif
@@ -7646,7 +7856,36 @@
 		rq->idle_stamp = 0;
 		rq->avg_idle = 2*sysctl_sched_migration_cost;
 		rq->max_idle_balance_cost = sysctl_sched_migration_cost;
+		rq->push_task = NULL;
+#ifdef CONFIG_SCHED_HMP
+		cpumask_set_cpu(i, &rq->freq_domain_cpumask);
+		rq->hmp_stats.cumulative_runnable_avg = 0;
+		rq->window_start = 0;
+		rq->hmp_stats.nr_big_tasks = 0;
+		rq->hmp_flags = 0;
+		rq->cur_irqload = 0;
+		rq->avg_irqload = 0;
+		rq->irqload_ts = 0;
+		rq->static_cpu_pwr_cost = 0;
+		rq->cc.cycles = 1;
+		rq->cc.time = 1;
+		rq->cstate = 0;
+		rq->wakeup_latency = 0;
+		rq->wakeup_energy = 0;
 
+		/*
+		 * All cpus part of same cluster by default. This avoids the
+		 * need to check for rq->cluster being non-NULL in hot-paths
+		 * like select_best_cpu()
+		 */
+		rq->cluster = &init_cluster;
+		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+		rq->old_busy_time = 0;
+		rq->old_estimated_time = 0;
+		rq->old_busy_time_group = 0;
+		rq->hmp_stats.pred_demands_sum = 0;
+#endif
 		INIT_LIST_HEAD(&rq->cfs_tasks);
 
 		rq_attach_root(rq, &def_root_domain);
@@ -7662,6 +7901,8 @@
 		atomic_set(&rq->nr_iowait, 0);
 	}
 
+	set_hmp_defaults();
+
 	set_load_weight(&init_task);
 
 	/*
@@ -7703,6 +7944,14 @@
 	return (nested == preempt_offset);
 }
 
+static int __might_sleep_init_called;
+int __init __might_sleep_init(void)
+{
+	__might_sleep_init_called = 1;
+	return 0;
+}
+early_initcall(__might_sleep_init);
+
 void __might_sleep(const char *file, int line, int preempt_offset)
 {
 	/*
@@ -7728,8 +7977,10 @@
 
 	rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */
 	if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
-	     !is_idle_task(current)) ||
-	    system_state != SYSTEM_RUNNING || oops_in_progress)
+	     !is_idle_task(current)) || oops_in_progress)
+		return;
+	if (system_state != SYSTEM_RUNNING &&
+	    (!__might_sleep_init_called || system_state != SYSTEM_BOOTING))
 		return;
 	if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
 		return;
@@ -7758,6 +8009,9 @@
 		print_ip_sym(preempt_disable_ip);
 		pr_cont("\n");
 	}
+#ifdef CONFIG_PANIC_ON_SCHED_BUG
+	BUG();
+#endif
 	dump_stack();
 	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
 }
@@ -8359,7 +8613,7 @@
 
 #ifdef CONFIG_CGROUP_SCHED
 
-static inline struct task_group *css_tg(struct cgroup_subsys_state *css)
+inline struct task_group *css_tg(struct cgroup_subsys_state *css)
 {
 	return css ? container_of(css, struct task_group, css) : NULL;
 }
@@ -8746,6 +9000,13 @@
 #endif /* CONFIG_RT_GROUP_SCHED */
 
 static struct cftype cpu_files[] = {
+#ifdef CONFIG_SCHED_HMP
+	{
+		.name = "upmigrate_discourage",
+		.read_u64 = cpu_upmigrate_discourage_read_u64,
+		.write_u64 = cpu_upmigrate_discourage_write_u64,
+	},
+#endif
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	{
 		.name = "shares",
@@ -8791,6 +9052,7 @@
 	.fork		= cpu_cgroup_fork,
 	.can_attach	= cpu_cgroup_can_attach,
 	.attach		= cpu_cgroup_attach,
+	.allow_attach   = subsys_cgroup_allow_attach,
 	.legacy_cftypes	= cpu_files,
 	.early_init	= true,
 };
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 5ebee31..6ce492f 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -46,12 +46,15 @@
 	struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
 	s64 delta;
 	int cpu;
+	u64 wallclock;
+	bool account = true;
 
 	if (!sched_clock_irqtime)
 		return;
 
 	cpu = smp_processor_id();
-	delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
+	wallclock = sched_clock_cpu(cpu);
+	delta = wallclock - irqtime->irq_start_time;
 	irqtime->irq_start_time += delta;
 
 	u64_stats_update_begin(&irqtime->sync);
@@ -65,6 +68,13 @@
 		irqtime->hardirq_time += delta;
 	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
 		irqtime->softirq_time += delta;
+	else
+		account = false;
+
+	if (account)
+		sched_account_irqtime(cpu, curr, delta, wallclock);
+	else if (curr != this_cpu_ksoftirqd())
+		sched_account_irqstart(cpu, curr, wallclock);
 
 	u64_stats_update_end(&irqtime->sync);
 }
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 37e2449..ffca478 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -820,6 +820,41 @@
 
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p)
+{
+	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p,
+			 u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+}
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_dl(struct rq *rq, struct task_struct *p) { }
+
+#endif	/* CONFIG_SCHED_HMP */
+
 static inline
 void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
 {
@@ -829,6 +864,7 @@
 	WARN_ON(!dl_prio(prio));
 	dl_rq->dl_nr_running++;
 	add_nr_running(rq_of_dl_rq(dl_rq), 1);
+	inc_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
 
 	inc_dl_deadline(dl_rq, deadline);
 	inc_dl_migration(dl_se, dl_rq);
@@ -843,6 +879,7 @@
 	WARN_ON(!dl_rq->dl_nr_running);
 	dl_rq->dl_nr_running--;
 	sub_nr_running(rq_of_dl_rq(dl_rq), 1);
+	dec_hmp_sched_stats_dl(rq_of_dl_rq(dl_rq), dl_task_of(dl_se));
 
 	dec_dl_deadline(dl_rq, dl_se->deadline);
 	dec_dl_migration(dl_se, dl_rq);
@@ -1500,9 +1537,11 @@
 		goto retry;
 	}
 
+	next_task->on_rq = TASK_ON_RQ_MIGRATING;
 	deactivate_task(rq, next_task, 0);
 	set_task_cpu(next_task, later_rq->cpu);
 	activate_task(later_rq, next_task, 0);
+	next_task->on_rq = TASK_ON_RQ_QUEUED;
 	ret = 1;
 
 	resched_curr(later_rq);
@@ -1588,9 +1627,11 @@
 
 			resched = true;
 
+			p->on_rq = TASK_ON_RQ_MIGRATING;
 			deactivate_task(src_rq, p, 0);
 			set_task_cpu(p, this_cpu);
 			activate_task(this_rq, p, 0);
+			p->on_rq = TASK_ON_RQ_QUEUED;
 			dmin = p->dl.deadline;
 
 			/* Is there any other task even earlier? */
@@ -1803,6 +1844,9 @@
 	.switched_to		= switched_to_dl,
 
 	.update_curr		= update_curr_dl,
+#ifdef CONFIG_SCHED_HMP
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_dl,
+#endif
 };
 
 #ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index fa178b6..5671f26 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -534,6 +534,14 @@
 			cfs_rq->throttled);
 	SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
 			cfs_rq->throttle_count);
+	SEQ_printf(m, "  .%-30s: %d\n", "runtime_enabled",
+			cfs_rq->runtime_enabled);
+#ifdef CONFIG_SCHED_HMP
+	SEQ_printf(m, "  .%-30s: %d\n", "nr_big_tasks",
+			cfs_rq->hmp_stats.nr_big_tasks);
+	SEQ_printf(m, "  .%-30s: %llu\n", "cumulative_runnable_avg",
+			cfs_rq->hmp_stats.cumulative_runnable_avg);
+#endif
 #endif
 
 #ifdef CONFIG_FAIR_GROUP_SCHED
@@ -622,6 +630,23 @@
 	P(cpu_load[2]);
 	P(cpu_load[3]);
 	P(cpu_load[4]);
+#ifdef CONFIG_SMP
+	P(cpu_capacity);
+#endif
+#ifdef CONFIG_SCHED_HMP
+	P(static_cpu_pwr_cost);
+	P(cluster->static_cluster_pwr_cost);
+	P(cluster->load_scale_factor);
+	P(cluster->capacity);
+	P(cluster->max_possible_capacity);
+	P(cluster->efficiency);
+	P(cluster->cur_freq);
+	P(cluster->max_freq);
+	P(cluster->exec_scale_factor);
+	P(hmp_stats.nr_big_tasks);
+	SEQ_printf(m, "  .%-30s: %llu\n", "hmp_stats.cumulative_runnable_avg",
+			rq->hmp_stats.cumulative_runnable_avg);
+#endif
 #undef P
 #undef PN
 
@@ -700,6 +725,14 @@
 	PN(sysctl_sched_wakeup_granularity);
 	P(sysctl_sched_child_runs_first);
 	P(sysctl_sched_features);
+#ifdef CONFIG_SCHED_HMP
+	P(sched_upmigrate);
+	P(sched_downmigrate);
+	P(sched_init_task_load_windows);
+	P(min_capacity);
+	P(max_capacity);
+	P(sched_ravg_window);
+#endif
 #undef PN
 #undef P
 
@@ -861,6 +894,9 @@
 void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 {
 	unsigned long nr_switches;
+	unsigned int load_avg;
+
+	load_avg = pct_task_load(p);
 
 	SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
 						get_nr_threads(p));
@@ -918,7 +954,12 @@
 		P_SCHEDSTAT(se.statistics.nr_wakeups_affine_attempts);
 		P_SCHEDSTAT(se.statistics.nr_wakeups_passive);
 		P_SCHEDSTAT(se.statistics.nr_wakeups_idle);
-
+#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
+		__P(load_avg);
+#ifdef CONFIG_SCHED_HMP
+		P(ravg.demand);
+#endif
+#endif
 		avg_atom = p->se.sum_exec_runtime;
 		if (nr_switches)
 			avg_atom = div64_ul(avg_atom, nr_switches);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c242944..e84ec63 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -31,9 +31,8 @@
 #include <linux/migrate.h>
 #include <linux/task_work.h>
 
-#include <trace/events/sched.h>
-
 #include "sched.h"
+#include <trace/events/sched.h>
 
 /*
  * Targeted preemption latency for CPU-bound tasks:
@@ -81,6 +80,14 @@
 unsigned int sysctl_sched_child_runs_first __read_mostly;
 
 /*
+ * Controls whether, when SD_SHARE_PKG_RESOURCES is on, if all
+ * tasks go to idle CPUs when woken. If this is off, note that the
+ * per-task flag PF_WAKE_UP_IDLE can still cause a task to go to an
+ * idle CPU upon being woken.
+ */
+unsigned int __read_mostly sysctl_sched_wake_to_idle;
+
+/*
  * SCHED_OTHER wake-up granularity.
  * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  *
@@ -242,6 +249,9 @@
 	return mul_u64_u32_shr(delta_exec, fact, shift);
 }
 
+#ifdef CONFIG_SMP
+static int active_load_balance_cpu_stop(void *data);
+#endif
 
 const struct sched_class fair_sched_class;
 
@@ -937,6 +947,7 @@
 			}
 
 			trace_sched_stat_blocked(tsk, delta);
+			trace_sched_blocked_reason(tsk);
 
 			/*
 			 * Blocking time is in units of nanosecs, so shift by
@@ -2670,6 +2681,25 @@
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
 #ifdef CONFIG_SMP
+u32 sched_get_wake_up_idle(struct task_struct *p)
+{
+	u32 enabled = p->flags & PF_WAKE_UP_IDLE;
+
+	return !!enabled;
+}
+
+int sched_set_wake_up_idle(struct task_struct *p, int wake_up_idle)
+{
+	int enable = !!wake_up_idle;
+
+	if (enable)
+		p->flags |= PF_WAKE_UP_IDLE;
+	else
+		p->flags &= ~PF_WAKE_UP_IDLE;
+
+	return 0;
+}
+
 /* Precomputed fixed inverse multiplies for multiplication by y^n */
 static const u32 runnable_avg_yN_inv[] = {
 	0xffffffff, 0xfa83b2da, 0xf5257d14, 0xefe4b99a, 0xeac0c6e6, 0xe5b906e6,
@@ -2755,6 +2785,920 @@
 	return contrib + runnable_avg_yN_sum[n];
 }
 
+#ifdef CONFIG_SCHED_HMP
+
+/* CPU selection flag */
+#define SBC_FLAG_PREV_CPU				0x1
+#define SBC_FLAG_BEST_CAP_CPU				0x2
+#define SBC_FLAG_CPU_COST				0x4
+#define SBC_FLAG_MIN_COST				0x8
+#define SBC_FLAG_IDLE_LEAST_LOADED			0x10
+#define SBC_FLAG_IDLE_CSTATE				0x20
+#define SBC_FLAG_COST_CSTATE_TIE_BREAKER		0x40
+#define SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER	0x80
+#define SBC_FLAG_CSTATE_LOAD				0x100
+#define SBC_FLAG_BEST_SIBLING				0x200
+
+/* Cluster selection flag */
+#define SBC_FLAG_COLOC_CLUSTER				0x10000
+#define SBC_FLAG_WAKER_CLUSTER				0x20000
+#define SBC_FLAG_BACKUP_CLUSTER				0x40000
+
+struct cpu_select_env {
+	struct task_struct *p;
+	struct related_thread_group *rtg;
+	u8 reason;
+	u8 need_idle:1;
+	u8 need_waker_cluster:1;
+	u8 sync:1;
+	u8 ignore_prev_cpu:1;
+	enum sched_boost_type boost_type;
+	int prev_cpu;
+	DECLARE_BITMAP(candidate_list, NR_CPUS);
+	DECLARE_BITMAP(backup_list, NR_CPUS);
+	u64 task_load;
+	u64 cpu_load;
+	u32 sbc_best_flag;
+	u32 sbc_best_cluster_flag;
+};
+
+struct cluster_cpu_stats {
+	int best_idle_cpu, least_loaded_cpu;
+	int best_capacity_cpu, best_cpu, best_sibling_cpu;
+	int min_cost, best_sibling_cpu_cost;
+	int best_cpu_cstate;
+	u64 min_load, best_load, best_sibling_cpu_load;
+	s64 highest_spare_capacity;
+};
+
+static int spill_threshold_crossed(struct cpu_select_env *env, struct rq *rq)
+{
+	u64 total_load;
+
+	total_load = env->task_load + env->cpu_load;
+
+	if (total_load > sched_spill_load ||
+	    (rq->nr_running + 1) > sysctl_sched_spill_nr_run)
+		return 1;
+
+	return 0;
+}
+
+static int skip_cpu(int cpu, struct cpu_select_env *env)
+{
+	int tcpu = task_cpu(env->p);
+	int skip = 0;
+
+	if (!env->reason)
+		return 0;
+
+	if (is_reserved(cpu))
+		return 1;
+
+	switch (env->reason) {
+	case UP_MIGRATION:
+		skip = !idle_cpu(cpu);
+		break;
+	case IRQLOAD_MIGRATION:
+		/* Purposely fall through */
+	default:
+		skip = (cpu == tcpu);
+		break;
+	}
+
+	return skip;
+}
+
+static inline int
+acceptable_capacity(struct sched_cluster *cluster, struct cpu_select_env *env)
+{
+	int tcpu;
+
+	if (!env->reason)
+		return 1;
+
+	tcpu = task_cpu(env->p);
+	switch (env->reason) {
+	case UP_MIGRATION:
+		return cluster->capacity > cpu_capacity(tcpu);
+
+	case DOWN_MIGRATION:
+		return cluster->capacity < cpu_capacity(tcpu);
+
+	default:
+		break;
+	}
+
+	return 1;
+}
+
+static int
+skip_cluster(struct sched_cluster *cluster, struct cpu_select_env *env)
+{
+	if (!test_bit(cluster->id, env->candidate_list))
+		return 1;
+
+	if (!acceptable_capacity(cluster, env)) {
+		__clear_bit(cluster->id, env->candidate_list);
+		return 1;
+	}
+
+	return 0;
+}
+
+static struct sched_cluster *
+select_least_power_cluster(struct cpu_select_env *env)
+{
+	struct sched_cluster *cluster;
+
+	if (env->rtg) {
+		env->task_load = scale_load_to_cpu(task_load(env->p),
+			cluster_first_cpu(env->rtg->preferred_cluster));
+		env->sbc_best_cluster_flag |= SBC_FLAG_COLOC_CLUSTER;
+		return env->rtg->preferred_cluster;
+	}
+
+	for_each_sched_cluster(cluster) {
+		if (!skip_cluster(cluster, env)) {
+			int cpu = cluster_first_cpu(cluster);
+
+			env->task_load = scale_load_to_cpu(task_load(env->p),
+									 cpu);
+			if (task_load_will_fit(env->p, env->task_load, cpu,
+					       env->boost_type))
+				return cluster;
+
+			__set_bit(cluster->id, env->backup_list);
+			__clear_bit(cluster->id, env->candidate_list);
+		}
+	}
+
+	return NULL;
+}
+
+static struct sched_cluster *
+next_candidate(const unsigned long *list, int start, int end)
+{
+	int cluster_id;
+
+	cluster_id = find_next_bit(list, end, start - 1 + 1);
+	if (cluster_id >= end)
+		return NULL;
+
+	return sched_cluster[cluster_id];
+}
+
+static void
+update_spare_capacity(struct cluster_cpu_stats *stats,
+		      struct cpu_select_env *env, int cpu, int capacity,
+		      u64 cpu_load)
+{
+	s64 spare_capacity = sched_ravg_window - cpu_load;
+
+	if (spare_capacity > 0 &&
+	    (spare_capacity > stats->highest_spare_capacity ||
+	     (spare_capacity == stats->highest_spare_capacity &&
+	      ((!env->need_waker_cluster &&
+		capacity > cpu_capacity(stats->best_capacity_cpu)) ||
+	       (env->need_waker_cluster &&
+		cpu_rq(cpu)->nr_running <
+		cpu_rq(stats->best_capacity_cpu)->nr_running))))) {
+		/*
+		 * If sync waker is the only runnable of CPU, cr_avg of the
+		 * CPU is 0 so we have high chance to place the wakee on the
+		 * waker's CPU which likely causes preemtion of the waker.
+		 * This can lead migration of preempted waker.  Place the
+		 * wakee on the real idle CPU when it's possible by checking
+		 * nr_running to avoid such preemption.
+		 */
+		stats->highest_spare_capacity = spare_capacity;
+		stats->best_capacity_cpu = cpu;
+	}
+}
+
+static inline void find_backup_cluster(
+struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+	struct sched_cluster *next = NULL;
+	int i;
+
+	while (!bitmap_empty(env->backup_list, num_clusters)) {
+		next = next_candidate(env->backup_list, 0, num_clusters);
+		__clear_bit(next->id, env->backup_list);
+		for_each_cpu_and(i, &env->p->cpus_allowed, &next->cpus) {
+			trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+			sched_irqload(i), power_cost(i, task_load(env->p) +
+					cpu_cravg_sync(i, env->sync)), 0);
+
+			update_spare_capacity(stats, env, i, next->capacity,
+					  cpu_load_sync(i, env->sync));
+		}
+		env->sbc_best_cluster_flag = SBC_FLAG_BACKUP_CLUSTER;
+	}
+}
+
+struct sched_cluster *
+next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
+					struct cluster_cpu_stats *stats)
+{
+	struct sched_cluster *next = NULL;
+
+	__clear_bit(cluster->id, env->candidate_list);
+
+	if (env->rtg && preferred_cluster(cluster, env->p))
+		return NULL;
+
+	do {
+		if (bitmap_empty(env->candidate_list, num_clusters))
+			return NULL;
+
+		next = next_candidate(env->candidate_list, 0, num_clusters);
+		if (next) {
+			if (next->min_power_cost > stats->min_cost) {
+				clear_bit(next->id, env->candidate_list);
+				next = NULL;
+				continue;
+			}
+
+			if (skip_cluster(next, env))
+				next = NULL;
+		}
+	} while (!next);
+
+	env->task_load = scale_load_to_cpu(task_load(env->p),
+					cluster_first_cpu(next));
+	return next;
+}
+
+#ifdef CONFIG_SCHED_HMP_CSTATE_AWARE
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+				   struct cpu_select_env *env, int cpu_cost)
+{
+	int cpu_cstate;
+	int prev_cpu = env->prev_cpu;
+
+	cpu_cstate = cpu_rq(cpu)->cstate;
+
+	if (env->need_idle) {
+		stats->min_cost = cpu_cost;
+		if (idle_cpu(cpu)) {
+			if (cpu_cstate < stats->best_cpu_cstate ||
+				(cpu_cstate == stats->best_cpu_cstate &&
+							cpu == prev_cpu)) {
+				stats->best_idle_cpu = cpu;
+				stats->best_cpu_cstate = cpu_cstate;
+			}
+		} else {
+			if (env->cpu_load < stats->min_load ||
+				(env->cpu_load == stats->min_load &&
+							cpu == prev_cpu)) {
+				stats->least_loaded_cpu = cpu;
+				stats->min_load = env->cpu_load;
+			}
+		}
+
+		return;
+	}
+
+	if (cpu_cost < stats->min_cost)  {
+		stats->min_cost = cpu_cost;
+		stats->best_cpu_cstate = cpu_cstate;
+		stats->best_load = env->cpu_load;
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_CPU_COST;
+		return;
+	}
+
+	/* CPU cost is the same. Start breaking the tie by C-state */
+
+	if (cpu_cstate > stats->best_cpu_cstate)
+		return;
+
+	if (cpu_cstate < stats->best_cpu_cstate) {
+		stats->best_cpu_cstate = cpu_cstate;
+		stats->best_load = env->cpu_load;
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
+		return;
+	}
+
+	/* C-state is the same. Use prev CPU to break the tie */
+	if (cpu == prev_cpu) {
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_COST_CSTATE_PREV_CPU_TIE_BREAKER;
+		return;
+	}
+
+	if (stats->best_cpu != prev_cpu &&
+	    ((cpu_cstate == 0 && env->cpu_load < stats->best_load) ||
+	    (cpu_cstate > 0 && env->cpu_load > stats->best_load))) {
+		stats->best_load = env->cpu_load;
+		stats->best_cpu = cpu;
+		env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
+	}
+}
+#else /* CONFIG_SCHED_HMP_CSTATE_AWARE */
+static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+				   struct cpu_select_env *env, int cpu_cost)
+{
+	int prev_cpu = env->prev_cpu;
+
+	if (cpu != prev_cpu && cpus_share_cache(prev_cpu, cpu)) {
+		if (stats->best_sibling_cpu_cost > cpu_cost ||
+		    (stats->best_sibling_cpu_cost == cpu_cost &&
+		     stats->best_sibling_cpu_load > env->cpu_load)) {
+			stats->best_sibling_cpu_cost = cpu_cost;
+			stats->best_sibling_cpu_load = env->cpu_load;
+			stats->best_sibling_cpu = cpu;
+		}
+	}
+
+	if ((cpu_cost < stats->min_cost) ||
+	    ((stats->best_cpu != prev_cpu &&
+	      stats->min_load > env->cpu_load) || cpu == prev_cpu)) {
+		if (env->need_idle) {
+			if (idle_cpu(cpu)) {
+				stats->min_cost = cpu_cost;
+				stats->best_idle_cpu = cpu;
+			}
+		} else {
+			stats->min_cost = cpu_cost;
+			stats->min_load = env->cpu_load;
+			stats->best_cpu = cpu;
+			env->sbc_best_flag = SBC_FLAG_MIN_COST;
+		}
+	}
+}
+#endif /* CONFIG_SCHED_HMP_CSTATE_AWARE */
+
+static void update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
+					 struct cpu_select_env *env)
+{
+	int cpu_cost;
+
+	cpu_cost = power_cost(cpu, task_load(env->p) +
+				cpu_cravg_sync(cpu, env->sync));
+	if (cpu_cost <= stats->min_cost)
+		__update_cluster_stats(cpu, stats, env, cpu_cost);
+}
+
+static void find_best_cpu_in_cluster(struct sched_cluster *c,
+	 struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+	int i;
+	struct cpumask search_cpus;
+
+	cpumask_and(&search_cpus, tsk_cpus_allowed(env->p), &c->cpus);
+	if (env->ignore_prev_cpu)
+		cpumask_clear_cpu(env->prev_cpu, &search_cpus);
+
+	for_each_cpu(i, &search_cpus) {
+		env->cpu_load = cpu_load_sync(i, env->sync);
+
+		trace_sched_cpu_load_wakeup(cpu_rq(i), idle_cpu(i),
+			sched_irqload(i),
+			power_cost(i, task_load(env->p) +
+					cpu_cravg_sync(i, env->sync)), 0);
+
+		if (unlikely(!cpu_active(i)) || skip_cpu(i, env))
+			continue;
+
+		update_spare_capacity(stats, env, i, c->capacity,
+				      env->cpu_load);
+
+		if (env->boost_type == SCHED_BOOST_ON_ALL ||
+		    env->need_waker_cluster ||
+		    sched_cpu_high_irqload(i) ||
+		    spill_threshold_crossed(env, cpu_rq(i)))
+			continue;
+
+		update_cluster_stats(i, stats, env);
+	}
+}
+
+static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
+{
+	stats->best_cpu = stats->best_idle_cpu = -1;
+	stats->best_capacity_cpu = stats->best_sibling_cpu  = -1;
+	stats->min_cost = stats->best_sibling_cpu_cost = INT_MAX;
+	stats->min_load	= stats->best_sibling_cpu_load = ULLONG_MAX;
+	stats->highest_spare_capacity = 0;
+	stats->least_loaded_cpu = -1;
+	stats->best_cpu_cstate = INT_MAX;
+	/* No need to initialize stats->best_load */
+}
+
+/*
+ * Should task be woken to any available idle cpu?
+ *
+ * Waking tasks to idle cpu has mixed implications on both performance and
+ * power. In many cases, scheduler can't estimate correctly impact of using idle
+ * cpus on either performance or power. PF_WAKE_UP_IDLE allows external kernel
+ * module to pass a strong hint to scheduler that the task in question should be
+ * woken to idle cpu, generally to improve performance.
+ */
+static inline int wake_to_idle(struct task_struct *p)
+{
+	return (current->flags & PF_WAKE_UP_IDLE) ||
+		 (p->flags & PF_WAKE_UP_IDLE) || sysctl_sched_wake_to_idle;
+}
+
+static inline bool
+bias_to_prev_cpu(struct cpu_select_env *env, struct cluster_cpu_stats *stats)
+{
+	int prev_cpu;
+	struct task_struct *task = env->p;
+	struct sched_cluster *cluster;
+
+	if (env->boost_type != SCHED_BOOST_NONE || env->reason ||
+	    !task->ravg.mark_start ||
+	    env->need_idle || !sched_short_sleep_task_threshold)
+		return false;
+
+	prev_cpu = env->prev_cpu;
+	if (!cpumask_test_cpu(prev_cpu, tsk_cpus_allowed(task)) ||
+					unlikely(!cpu_active(prev_cpu)))
+		return false;
+
+	if (task->ravg.mark_start - task->last_cpu_selected_ts >=
+				sched_long_cpu_selection_threshold)
+		return false;
+
+	/*
+	 * This function should be used by task wake up path only as it's
+	 * assuming p->last_switch_out_ts as last sleep time.
+	 * p->last_switch_out_ts can denote last preemption time as well as
+	 * last sleep time.
+	 */
+	if (task->ravg.mark_start - task->last_switch_out_ts >=
+					sched_short_sleep_task_threshold)
+		return false;
+
+	env->task_load = scale_load_to_cpu(task_load(task), prev_cpu);
+	cluster = cpu_rq(prev_cpu)->cluster;
+
+	if (!task_load_will_fit(task, env->task_load, prev_cpu,
+				sched_boost_type())) {
+
+		__set_bit(cluster->id, env->backup_list);
+		__clear_bit(cluster->id, env->candidate_list);
+		return false;
+	}
+
+	env->cpu_load = cpu_load_sync(prev_cpu, env->sync);
+	if (sched_cpu_high_irqload(prev_cpu) ||
+			spill_threshold_crossed(env, cpu_rq(prev_cpu))) {
+		update_spare_capacity(stats, env, prev_cpu,
+				cluster->capacity, env->cpu_load);
+		env->ignore_prev_cpu = 1;
+		return false;
+	}
+
+	return true;
+}
+
+static inline bool
+wake_to_waker_cluster(struct cpu_select_env *env)
+{
+	return !env->need_idle && !env->reason && env->sync &&
+	       task_load(current) > sched_big_waker_task_load &&
+	       task_load(env->p) < sched_small_wakee_task_load;
+}
+
+static inline int
+cluster_allowed(struct task_struct *p, struct sched_cluster *cluster)
+{
+	cpumask_t tmp_mask;
+
+	cpumask_and(&tmp_mask, &cluster->cpus, cpu_active_mask);
+	cpumask_and(&tmp_mask, &tmp_mask, &p->cpus_allowed);
+
+	return !cpumask_empty(&tmp_mask);
+}
+
+
+/* return cheapest cpu that can fit this task */
+static int select_best_cpu(struct task_struct *p, int target, int reason,
+			   int sync)
+{
+	struct sched_cluster *cluster, *pref_cluster = NULL;
+	struct cluster_cpu_stats stats;
+	struct related_thread_group *grp;
+	unsigned int sbc_flag = 0;
+
+	struct cpu_select_env env = {
+		.p			= p,
+		.reason			= reason,
+		.need_idle		= wake_to_idle(p),
+		.need_waker_cluster	= 0,
+		.boost_type		= sched_boost_type(),
+		.sync			= sync,
+		.prev_cpu		= target,
+		.ignore_prev_cpu	= 0,
+		.rtg			= NULL,
+		.sbc_best_flag		= 0,
+		.sbc_best_cluster_flag	= 0,
+	};
+
+	bitmap_copy(env.candidate_list, all_cluster_ids, NR_CPUS);
+	bitmap_zero(env.backup_list, NR_CPUS);
+
+	init_cluster_cpu_stats(&stats);
+
+	rcu_read_lock();
+
+	grp = task_related_thread_group(p);
+
+	if (grp && grp->preferred_cluster) {
+		pref_cluster = grp->preferred_cluster;
+		if (!cluster_allowed(p, pref_cluster))
+			clear_bit(pref_cluster->id, env.candidate_list);
+		else
+			env.rtg = grp;
+	} else {
+		cluster = cpu_rq(smp_processor_id())->cluster;
+		if (wake_to_waker_cluster(&env) &&
+		    cluster_allowed(p, cluster)) {
+			env.need_waker_cluster = 1;
+			bitmap_zero(env.candidate_list, NR_CPUS);
+			__set_bit(cluster->id, env.candidate_list);
+			env.sbc_best_cluster_flag = SBC_FLAG_WAKER_CLUSTER;
+
+		} else if (bias_to_prev_cpu(&env, &stats)) {
+			sbc_flag = SBC_FLAG_PREV_CPU;
+			goto out;
+		}
+	}
+
+retry:
+	cluster = select_least_power_cluster(&env);
+
+	if (!cluster)
+		goto out;
+
+	/*
+	 * 'cluster' now points to the minimum power cluster which can satisfy
+	 * task's perf goals. Walk down the cluster list starting with that
+	 * cluster. For non-small tasks, skip clusters that don't have
+	 * mostly_idle/idle cpus
+	 */
+
+	do {
+		find_best_cpu_in_cluster(cluster, &env, &stats);
+
+	} while ((cluster = next_best_cluster(cluster, &env, &stats)));
+
+	if (env.need_idle) {
+		if (stats.best_idle_cpu >= 0) {
+			target = stats.best_idle_cpu;
+			sbc_flag |= SBC_FLAG_IDLE_CSTATE;
+		} else if (stats.least_loaded_cpu >= 0) {
+			target = stats.least_loaded_cpu;
+			sbc_flag |= SBC_FLAG_IDLE_LEAST_LOADED;
+		}
+	} else if (stats.best_cpu >= 0) {
+		if (stats.best_cpu != task_cpu(p) &&
+				stats.min_cost == stats.best_sibling_cpu_cost) {
+			stats.best_cpu = stats.best_sibling_cpu;
+			sbc_flag |= SBC_FLAG_BEST_SIBLING;
+		}
+		sbc_flag |= env.sbc_best_flag;
+		target = stats.best_cpu;
+	} else {
+		if (env.rtg) {
+			env.rtg = NULL;
+			goto retry;
+		}
+
+		find_backup_cluster(&env, &stats);
+		if (stats.best_capacity_cpu >= 0) {
+			target = stats.best_capacity_cpu;
+			sbc_flag |= SBC_FLAG_BEST_CAP_CPU;
+		}
+	}
+	p->last_cpu_selected_ts = sched_ktime_clock();
+	sbc_flag |= env.sbc_best_cluster_flag;
+out:
+	rcu_read_unlock();
+	trace_sched_task_load(p, sched_boost(), env.reason, env.sync,
+					env.need_idle, sbc_flag, target);
+	return target;
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static inline struct task_group *next_task_group(struct task_group *tg)
+{
+	tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list);
+
+	return (&tg->list == &task_groups) ? NULL : tg;
+}
+
+/* Iterate over all cfs_rq in a cpu */
+#define for_each_cfs_rq(cfs_rq, tg, cpu)	\
+	for (tg = container_of(&task_groups, struct task_group, list);	\
+		((tg = next_task_group(tg)) && (cfs_rq = tg->cfs_rq[cpu]));)
+
+void reset_cfs_rq_hmp_stats(int cpu, int reset_cra)
+{
+	struct task_group *tg;
+	struct cfs_rq *cfs_rq;
+
+	rcu_read_lock();
+
+	for_each_cfs_rq(cfs_rq, tg, cpu)
+		reset_hmp_stats(&cfs_rq->hmp_stats, reset_cra);
+
+	rcu_read_unlock();
+}
+
+static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq);
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra);
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra);
+
+/* Add task's contribution to a cpu' HMP statistics */
+void inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+
+	/*
+	 * Although below check is not strictly required  (as
+	 * inc/dec_nr_big_task and inc/dec_cumulative_runnable_avg called
+	 * from inc_cfs_rq_hmp_stats() have similar checks), we gain a bit on
+	 * efficiency by short-circuiting for_each_sched_entity() loop when
+	 * sched_disable_window_stats
+	 */
+	if (sched_disable_window_stats)
+		return;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		inc_cfs_rq_hmp_stats(cfs_rq, p, change_cra);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Update rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	if (!se)
+		inc_rq_hmp_stats(rq, p, change_cra);
+}
+
+static void fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+				       u32 new_task_load, u32 new_pred_demand)
+{
+	struct cfs_rq *cfs_rq;
+	struct sched_entity *se = &p->se;
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+
+		fixup_cumulative_runnable_avg(&cfs_rq->hmp_stats, p,
+					      task_load_delta,
+					      pred_demand_delta);
+		fixup_nr_big_tasks(&cfs_rq->hmp_stats, p, task_load_delta);
+		if (cfs_rq_throttled(cfs_rq))
+			break;
+	}
+
+	/* Fix up rq->hmp_stats only if we didn't find any throttled cfs_rq */
+	if (!se) {
+		fixup_cumulative_runnable_avg(&rq->hmp_stats, p,
+					      task_load_delta,
+					      pred_demand_delta);
+		fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+	}
+}
+
+static int task_will_be_throttled(struct task_struct *p);
+
+#else	/* CONFIG_CFS_BANDWIDTH */
+
+inline void reset_cfs_rq_hmp_stats(int cpu, int reset_cra) { }
+
+static void
+fixup_hmp_sched_stats_fair(struct rq *rq, struct task_struct *p,
+			   u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+	fixup_nr_big_tasks(&rq->hmp_stats, p, task_load_delta);
+}
+
+static inline int task_will_be_throttled(struct task_struct *p)
+{
+	return 0;
+}
+
+void inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra)
+{
+	inc_nr_big_task(&rq->hmp_stats, p);
+}
+
+#endif	/* CONFIG_CFS_BANDWIDTH */
+
+/*
+ * Reset balance_interval at all sched_domain levels of given cpu, so that it
+ * honors kick.
+ */
+static inline void reset_balance_interval(int cpu)
+{
+	struct sched_domain *sd;
+
+	if (cpu >= nr_cpu_ids)
+		return;
+
+	rcu_read_lock();
+	for_each_domain(cpu, sd)
+		sd->balance_interval = 0;
+	rcu_read_unlock();
+}
+
+/*
+ * Check if a task is on the "wrong" cpu (i.e its current cpu is not the ideal
+ * cpu as per its demand or priority)
+ *
+ * Returns reason why task needs to be migrated
+ */
+static inline int migration_needed(struct task_struct *p, int cpu)
+{
+	int nice;
+	struct related_thread_group *grp;
+
+	if (p->state != TASK_RUNNING || p->nr_cpus_allowed == 1)
+		return 0;
+
+	/* No need to migrate task that is about to be throttled */
+	if (task_will_be_throttled(p))
+		return 0;
+
+	if (sched_boost_type() == SCHED_BOOST_ON_BIG) {
+		if (cpu_capacity(cpu) != max_capacity)
+			return UP_MIGRATION;
+		return 0;
+	}
+
+	if (sched_cpu_high_irqload(cpu))
+		return IRQLOAD_MIGRATION;
+
+	nice = task_nice(p);
+	rcu_read_lock();
+	grp = task_related_thread_group(p);
+	if (!grp && (nice > SCHED_UPMIGRATE_MIN_NICE ||
+	       upmigrate_discouraged(p)) && cpu_capacity(cpu) > min_capacity) {
+		rcu_read_unlock();
+		return DOWN_MIGRATION;
+	}
+
+	if (!grp && !task_will_fit(p, cpu)) {
+		rcu_read_unlock();
+		return UP_MIGRATION;
+	}
+	rcu_read_unlock();
+
+	return 0;
+}
+
+static inline int
+kick_active_balance(struct rq *rq, struct task_struct *p, int new_cpu)
+{
+	unsigned long flags;
+	int rc = 0;
+
+	/* Invoke active balance to force migrate currently running task */
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (!rq->active_balance) {
+		rq->active_balance = 1;
+		rq->push_cpu = new_cpu;
+		get_task_struct(p);
+		rq->push_task = p;
+		rc = 1;
+	}
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	return rc;
+}
+
+static DEFINE_RAW_SPINLOCK(migration_lock);
+
+/*
+ * Check if currently running task should be migrated to a better cpu.
+ *
+ * Todo: Effect this via changes to nohz_balancer_kick() and load balance?
+ */
+void check_for_migration(struct rq *rq, struct task_struct *p)
+{
+	int cpu = cpu_of(rq), new_cpu;
+	int active_balance = 0, reason;
+
+	reason = migration_needed(p, cpu);
+	if (!reason)
+		return;
+
+	raw_spin_lock(&migration_lock);
+	new_cpu = select_best_cpu(p, cpu, reason, 0);
+
+	if (new_cpu != cpu) {
+		active_balance = kick_active_balance(rq, p, new_cpu);
+		if (active_balance)
+			mark_reserved(new_cpu);
+	}
+
+	raw_spin_unlock(&migration_lock);
+
+	if (active_balance)
+		stop_one_cpu_nowait(cpu, active_load_balance_cpu_stop, rq,
+					&rq->active_balance_work);
+}
+
+#ifdef CONFIG_CFS_BANDWIDTH
+
+static void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq)
+{
+	cfs_rq->hmp_stats.nr_big_tasks = 0;
+	cfs_rq->hmp_stats.cumulative_runnable_avg = 0;
+	cfs_rq->hmp_stats.pred_demands_sum = 0;
+}
+
+static void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+		 struct task_struct *p, int change_cra)
+{
+	inc_nr_big_task(&cfs_rq->hmp_stats, p);
+	if (change_cra)
+		inc_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+		 struct task_struct *p, int change_cra)
+{
+	dec_nr_big_task(&cfs_rq->hmp_stats, p);
+	if (change_cra)
+		dec_cumulative_runnable_avg(&cfs_rq->hmp_stats, p);
+}
+
+static void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+			 struct cfs_rq *cfs_rq)
+{
+	stats->nr_big_tasks += cfs_rq->hmp_stats.nr_big_tasks;
+	stats->cumulative_runnable_avg +=
+				cfs_rq->hmp_stats.cumulative_runnable_avg;
+	stats->pred_demands_sum += cfs_rq->hmp_stats.pred_demands_sum;
+}
+
+static void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+				 struct cfs_rq *cfs_rq)
+{
+	stats->nr_big_tasks -= cfs_rq->hmp_stats.nr_big_tasks;
+	stats->cumulative_runnable_avg -=
+				cfs_rq->hmp_stats.cumulative_runnable_avg;
+	stats->pred_demands_sum -= cfs_rq->hmp_stats.pred_demands_sum;
+
+	BUG_ON(stats->nr_big_tasks < 0 ||
+		(s64)stats->cumulative_runnable_avg < 0);
+	verify_pred_demands_sum(stats);
+}
+
+#else	/* CONFIG_CFS_BANDWIDTH */
+
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+#endif	/* CONFIG_CFS_BANDWIDTH */
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline void init_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq) { }
+
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+static inline void inc_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+			 struct cfs_rq *cfs_rq)
+{
+}
+
+static inline void dec_throttled_cfs_rq_hmp_stats(struct hmp_sched_stats *stats,
+			 struct cfs_rq *cfs_rq)
+{
+}
+#endif	/* CONFIG_SCHED_HMP */
+
 #define cap_scale(v, s) ((v)*(s) >> SCHED_CAPACITY_SHIFT)
 
 /*
@@ -3272,6 +4216,12 @@
 	return 0;
 }
 
+static inline void inc_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
+static inline void dec_cfs_rq_hmp_stats(struct cfs_rq *cfs_rq,
+	 struct task_struct *p, int change_cra) { }
+
 #endif /* CONFIG_SMP */
 
 static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -3883,6 +4833,33 @@
 	return cfs_bandwidth_used() && cfs_rq->throttled;
 }
 
+/*
+ * Check if task is part of a hierarchy where some cfs_rq does not have any
+ * runtime left.
+ *
+ * We can't rely on throttled_hierarchy() to do this test, as
+ * cfs_rq->throttle_count will not be updated yet when this function is called
+ * from scheduler_tick()
+ */
+static int task_will_be_throttled(struct task_struct *p)
+{
+	struct sched_entity *se = &p->se;
+	struct cfs_rq *cfs_rq;
+
+	if (!cfs_bandwidth_used())
+		return 0;
+
+	for_each_sched_entity(se) {
+		cfs_rq = cfs_rq_of(se);
+		if (!cfs_rq->runtime_enabled)
+			continue;
+		if (cfs_rq->runtime_remaining <= 0)
+			return 1;
+	}
+
+	return 0;
+}
+
 /* check whether cfs_rq, or any parent, is throttled */
 static inline int throttled_hierarchy(struct cfs_rq *cfs_rq)
 {
@@ -3960,13 +4937,20 @@
 		if (dequeue)
 			dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP);
 		qcfs_rq->h_nr_running -= task_delta;
+#ifdef CONFIG_SCHED_HMP
+		dec_throttled_cfs_rq_hmp_stats(&qcfs_rq->hmp_stats, cfs_rq);
+#endif
 
 		if (qcfs_rq->load.weight)
 			dequeue = 0;
 	}
 
-	if (!se)
+	if (!se) {
 		sub_nr_running(rq, task_delta);
+#ifdef CONFIG_SCHED_HMP
+		dec_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, cfs_rq);
+#endif
+	}
 
 	cfs_rq->throttled = 1;
 	cfs_rq->throttled_clock = rq_clock(rq);
@@ -3987,6 +4971,12 @@
 		start_cfs_bandwidth(cfs_b);
 
 	raw_spin_unlock(&cfs_b->lock);
+
+	/* Log effect on hmp stats after throttling */
+	trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
+				sched_irqload(cpu_of(rq)),
+				power_cost(cpu_of(rq), 0),
+				cpu_temp(cpu_of(rq)));
 }
 
 void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
@@ -3996,6 +4986,7 @@
 	struct sched_entity *se;
 	int enqueue = 1;
 	long task_delta;
+	struct cfs_rq *tcfs_rq = cfs_rq;
 
 	se = cfs_rq->tg->se[cpu_of(rq)];
 
@@ -4023,17 +5014,30 @@
 		if (enqueue)
 			enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP);
 		cfs_rq->h_nr_running += task_delta;
+#ifdef CONFIG_SCHED_HMP
+		inc_throttled_cfs_rq_hmp_stats(&cfs_rq->hmp_stats, tcfs_rq);
+#endif
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 	}
 
-	if (!se)
+	if (!se) {
 		add_nr_running(rq, task_delta);
+#ifdef CONFIG_SCHED_HMP
+		inc_throttled_cfs_rq_hmp_stats(&rq->hmp_stats, tcfs_rq);
+#endif
+	}
 
 	/* determine whether we need to wake up potentially idle cpu */
 	if (rq->curr == rq->idle && rq->cfs.nr_running)
 		resched_curr(rq);
+
+	/* Log effect on hmp stats after un-throttling */
+	trace_sched_cpu_load_cgroup(rq, idle_cpu(cpu_of(rq)),
+				sched_irqload(cpu_of(rq)),
+				power_cost(cpu_of(rq), 0),
+				cpu_temp(cpu_of(rq)));
 }
 
 static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b,
@@ -4371,6 +5375,7 @@
 {
 	cfs_rq->runtime_enabled = 0;
 	INIT_LIST_HEAD(&cfs_rq->throttled_list);
+	init_cfs_rq_hmp_stats(cfs_rq);
 }
 
 void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -4561,6 +5566,7 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running++;
+		inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
 
 		flags = ENQUEUE_WAKEUP;
 	}
@@ -4568,6 +5574,7 @@
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running++;
+		inc_cfs_rq_hmp_stats(cfs_rq, p, 1);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
@@ -4576,8 +5583,10 @@
 		update_cfs_shares(cfs_rq);
 	}
 
-	if (!se)
+	if (!se) {
 		add_nr_running(rq, 1);
+		inc_rq_hmp_stats(rq, p, 1);
+	}
 
 	hrtick_update(rq);
 }
@@ -4608,6 +5617,7 @@
 		if (cfs_rq_throttled(cfs_rq))
 			break;
 		cfs_rq->h_nr_running--;
+		dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
 
 		/* Don't dequeue parent if it has other entities besides us */
 		if (cfs_rq->load.weight) {
@@ -4627,6 +5637,7 @@
 	for_each_sched_entity(se) {
 		cfs_rq = cfs_rq_of(se);
 		cfs_rq->h_nr_running--;
+		dec_cfs_rq_hmp_stats(cfs_rq, p, 1);
 
 		if (cfs_rq_throttled(cfs_rq))
 			break;
@@ -4635,8 +5646,10 @@
 		update_cfs_shares(cfs_rq);
 	}
 
-	if (!se)
+	if (!se) {
 		sub_nr_running(rq, 1);
+		dec_rq_hmp_stats(rq, p, 1);
+	}
 
 	hrtick_update(rq);
 }
@@ -5631,6 +6644,10 @@
 	int want_affine = 0;
 	int sync = wake_flags & WF_SYNC;
 
+#ifdef CONFIG_SCHED_HMP
+	return select_best_cpu(p, prev_cpu, 0, sync);
+#endif
+
 	if (sd_flag & SD_BALANCE_WAKE) {
 		record_wakee(p);
 		want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
@@ -6246,6 +7263,13 @@
 #define LBF_NEED_BREAK	0x02
 #define LBF_DST_PINNED  0x04
 #define LBF_SOME_PINNED	0x08
+#define LBF_SCHED_BOOST_ACTIVE_BALANCE 0x40
+#define LBF_BIG_TASK_ACTIVE_BALANCE 0x80
+#define LBF_HMP_ACTIVE_BALANCE (LBF_SCHED_BOOST_ACTIVE_BALANCE | \
+				LBF_BIG_TASK_ACTIVE_BALANCE)
+#define LBF_IGNORE_BIG_TASKS 0x100
+#define LBF_IGNORE_PREFERRED_CLUSTER_TASKS 0x200
+#define LBF_MOVED_RELATED_THREAD_GROUP_TASK 0x400
 
 struct lb_env {
 	struct sched_domain	*sd;
@@ -6262,6 +7286,8 @@
 	long			imbalance;
 	/* The set of CPUs under consideration for load-balancing */
 	struct cpumask		*cpus;
+	unsigned int		busiest_grp_capacity;
+	unsigned int		busiest_nr_running;
 
 	unsigned int		flags;
 
@@ -6368,6 +7394,7 @@
 int can_migrate_task(struct task_struct *p, struct lb_env *env)
 {
 	int tsk_cache_hot;
+	int twf, group_cpus;
 
 	lockdep_assert_held(&env->src_rq->lock);
 
@@ -6414,6 +7441,34 @@
 	/* Record that we found atleast one task that could run on dst_cpu */
 	env->flags &= ~LBF_ALL_PINNED;
 
+	if (cpu_capacity(env->dst_cpu) > cpu_capacity(env->src_cpu) &&
+		nr_big_tasks(env->src_rq) && !is_big_task(p))
+		return 0;
+
+	twf = task_will_fit(p, env->dst_cpu);
+
+	/*
+	 * Attempt to not pull tasks that don't fit. We may get lucky and find
+	 * one that actually fits.
+	 */
+	if (env->flags & LBF_IGNORE_BIG_TASKS && !twf)
+		return 0;
+
+	if (env->flags & LBF_IGNORE_PREFERRED_CLUSTER_TASKS &&
+	    !preferred_cluster(rq_cluster(cpu_rq(env->dst_cpu)), p))
+		return 0;
+
+	/*
+	 * Group imbalance can sometimes cause work to be pulled across groups
+	 * even though the group could have managed the imbalance on its own.
+	 * Prevent inter-cluster migrations for big tasks when the number of
+	 * tasks is lower than the capacity of the group.
+	 */
+	group_cpus = DIV_ROUND_UP(env->busiest_grp_capacity,
+						 SCHED_CAPACITY_SCALE);
+	if (!twf && env->busiest_nr_running <= group_cpus)
+		return 0;
+
 	if (task_running(env->src_rq, p)) {
 		schedstat_inc(p->se.statistics.nr_failed_migrations_running);
 		return 0;
@@ -6421,15 +7476,16 @@
 
 	/*
 	 * Aggressive migration if:
-	 * 1) destination numa is preferred
-	 * 2) task is cache cold, or
-	 * 3) too many balance attempts have failed.
+	 * 1) IDLE or NEWLY_IDLE balance.
+	 * 2) destination numa is preferred
+	 * 3) task is cache cold, or
+	 * 4) too many balance attempts have failed.
 	 */
 	tsk_cache_hot = migrate_degrades_locality(p, env);
 	if (tsk_cache_hot == -1)
 		tsk_cache_hot = task_hot(p, env);
 
-	if (tsk_cache_hot <= 0 ||
+	if (env->idle != CPU_NOT_IDLE || tsk_cache_hot <= 0 ||
 	    env->sd->nr_balance_failed > env->sd->cache_nice_tries) {
 		if (tsk_cache_hot == 1) {
 			schedstat_inc(env->sd->lb_hot_gained[env->idle]);
@@ -6451,7 +7507,11 @@
 
 	p->on_rq = TASK_ON_RQ_MIGRATING;
 	deactivate_task(env->src_rq, p, 0);
+	double_lock_balance(env->src_rq, env->dst_rq);
 	set_task_cpu(p, env->dst_cpu);
+	if (task_in_related_thread_group(p))
+		env->flags |= LBF_MOVED_RELATED_THREAD_GROUP_TASK;
+	double_unlock_balance(env->src_rq, env->dst_rq);
 }
 
 /*
@@ -6498,12 +7558,20 @@
 	struct task_struct *p;
 	unsigned long load;
 	int detached = 0;
+	int orig_loop = env->loop;
 
 	lockdep_assert_held(&env->src_rq->lock);
 
 	if (env->imbalance <= 0)
 		return 0;
 
+	if (cpu_capacity(env->dst_cpu) < cpu_capacity(env->src_cpu) &&
+							!sched_boost())
+		env->flags |= LBF_IGNORE_BIG_TASKS;
+	else if (!same_cluster(env->dst_cpu, env->src_cpu))
+		env->flags |= LBF_IGNORE_PREFERRED_CLUSTER_TASKS;
+
+redo:
 	while (!list_empty(tasks)) {
 		/*
 		 * We don't want to steal all, otherwise we may be treated likewise,
@@ -6565,6 +7633,15 @@
 		list_move_tail(&p->se.group_node, tasks);
 	}
 
+	if (env->flags & (LBF_IGNORE_BIG_TASKS |
+			LBF_IGNORE_PREFERRED_CLUSTER_TASKS) && !detached) {
+		tasks = &env->src_rq->cfs_tasks;
+		env->flags &= ~(LBF_IGNORE_BIG_TASKS |
+				LBF_IGNORE_PREFERRED_CLUSTER_TASKS);
+		env->loop = orig_loop;
+		goto redo;
+	}
+
 	/*
 	 * Right now, this is one of only two places we collect this stat
 	 * so we can safely collect detach_one_task() stats here rather
@@ -6729,6 +7806,10 @@
 	unsigned long group_capacity;
 	unsigned long group_util; /* Total utilization of the group */
 	unsigned int sum_nr_running; /* Nr tasks running in the group */
+#ifdef CONFIG_SCHED_HMP
+	unsigned long sum_nr_big_tasks;
+	u64 group_cpu_load; /* Scaled load of all CPUs of the group */
+#endif
 	unsigned int idle_cpus;
 	unsigned int group_weight;
 	enum group_type group_type;
@@ -6771,10 +7852,64 @@
 			.avg_load = 0UL,
 			.sum_nr_running = 0,
 			.group_type = group_other,
+#ifdef CONFIG_SCHED_HMP
+			.sum_nr_big_tasks = 0UL,
+			.group_cpu_load = 0ULL,
+#endif
 		},
 	};
 }
 
+#ifdef CONFIG_SCHED_HMP
+
+static int
+bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
+{
+	int local_cpu, busiest_cpu;
+	int local_capacity, busiest_capacity;
+	int local_pwr_cost, busiest_pwr_cost;
+	int nr_cpus;
+
+	if (!sysctl_sched_restrict_cluster_spill || sched_boost())
+		return 0;
+
+	local_cpu = group_first_cpu(sds->local);
+	busiest_cpu = group_first_cpu(sds->busiest);
+
+	local_capacity = cpu_max_possible_capacity(local_cpu);
+	busiest_capacity = cpu_max_possible_capacity(busiest_cpu);
+
+	local_pwr_cost = cpu_max_power_cost(local_cpu);
+	busiest_pwr_cost = cpu_max_power_cost(busiest_cpu);
+
+	if (local_capacity < busiest_capacity ||
+			(local_capacity == busiest_capacity &&
+			local_pwr_cost <= busiest_pwr_cost))
+		return 0;
+
+	if (local_capacity > busiest_capacity &&
+			sds->busiest_stat.sum_nr_big_tasks)
+		return 0;
+
+	nr_cpus = cpumask_weight(sched_group_cpus(sds->busiest));
+	if ((sds->busiest_stat.group_cpu_load < nr_cpus * sched_spill_load) &&
+		(sds->busiest_stat.sum_nr_running <
+			nr_cpus * sysctl_sched_spill_nr_run))
+		return 1;
+
+	return 0;
+}
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline int
+bail_inter_cluster_balance(struct lb_env *env, struct sd_lb_stats *sds)
+{
+	return 0;
+}
+
+#endif	/* CONFIG_SCHED_HMP */
+
 /**
  * get_sd_load_idx - Obtain the load index for a given sched domain.
  * @sd: The sched_domain whose load_idx is to be obtained.
@@ -7037,6 +8172,11 @@
 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
 		struct rq *rq = cpu_rq(i);
 
+		trace_sched_cpu_load_lb(cpu_rq(i), idle_cpu(i),
+				     sched_irqload(i),
+				     power_cost(i, 0),
+				     cpu_temp(i));
+
 		/* Bias balancing toward cpus of our domain */
 		if (local_group)
 			load = target_load(i, load_idx);
@@ -7051,6 +8191,11 @@
 		if (nr_running > 1)
 			*overload = true;
 
+#ifdef CONFIG_SCHED_HMP
+		sgs->sum_nr_big_tasks += rq->hmp_stats.nr_big_tasks;
+		sgs->group_cpu_load += cpu_load(i);
+#endif
+
 #ifdef CONFIG_NUMA_BALANCING
 		sgs->nr_numa_running += rq->nr_numa_running;
 		sgs->nr_preferred_running += rq->nr_preferred_running;
@@ -7076,6 +8221,38 @@
 	sgs->group_type = group_classify(group, sgs);
 }
 
+#ifdef CONFIG_SCHED_HMP
+static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
+						  struct sd_lb_stats *sds,
+						  struct sched_group *sg,
+						  struct sg_lb_stats *sgs)
+{
+	if (env->idle != CPU_NOT_IDLE &&
+	    cpu_capacity(env->dst_cpu) > group_rq_capacity(sg)) {
+		if (sched_boost() && !sds->busiest && sgs->sum_nr_running) {
+			env->flags |= LBF_SCHED_BOOST_ACTIVE_BALANCE;
+			return true;
+		}
+
+		if (sgs->sum_nr_big_tasks >
+				sds->busiest_stat.sum_nr_big_tasks) {
+			env->flags |= LBF_BIG_TASK_ACTIVE_BALANCE;
+			return true;
+		}
+	}
+
+	return false;
+}
+#else
+static bool update_sd_pick_busiest_active_balance(struct lb_env *env,
+						  struct sd_lb_stats *sds,
+						  struct sched_group *sg,
+						  struct sg_lb_stats *sgs)
+{
+	return false;
+}
+#endif
+
 /**
  * update_sd_pick_busiest - return 1 on busiest group
  * @env: The load balancing environment.
@@ -7096,6 +8273,9 @@
 {
 	struct sg_lb_stats *busiest = &sds->busiest_stat;
 
+	if (update_sd_pick_busiest_active_balance(env, sds, sg, sgs))
+		return true;
+
 	if (sgs->group_type > busiest->group_type)
 		return true;
 
@@ -7217,6 +8397,8 @@
 		if (update_sd_pick_busiest(env, sds, sg, sgs)) {
 			sds->busiest = sg;
 			sds->busiest_stat = *sgs;
+			env->busiest_nr_running = sgs->sum_nr_running;
+			env->busiest_grp_capacity = sgs->group_capacity;
 		}
 
 next_group:
@@ -7465,6 +8647,12 @@
 	if (!sds.busiest || busiest->sum_nr_running == 0)
 		goto out_balanced;
 
+	if (env->flags & LBF_HMP_ACTIVE_BALANCE)
+		goto force_balance;
+
+	if (bail_inter_cluster_balance(env, &sds))
+		goto out_balanced;
+
 	sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load)
 						/ sds.total_capacity;
 
@@ -7526,6 +8714,57 @@
 	return NULL;
 }
 
+#ifdef CONFIG_SCHED_HMP
+static struct rq *find_busiest_queue_hmp(struct lb_env *env,
+				     struct sched_group *group)
+{
+	struct rq *busiest = NULL, *busiest_big = NULL;
+	u64 max_runnable_avg = 0, max_runnable_avg_big = 0;
+	int max_nr_big = 0, nr_big;
+	bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
+	int i;
+
+	for_each_cpu(i, sched_group_cpus(group)) {
+		struct rq *rq = cpu_rq(i);
+		u64 cumulative_runnable_avg =
+				rq->hmp_stats.cumulative_runnable_avg;
+
+		if (!cpumask_test_cpu(i, env->cpus))
+			continue;
+
+
+		if (find_big) {
+			nr_big = nr_big_tasks(rq);
+			if (nr_big > max_nr_big ||
+			    (nr_big > 0 && nr_big == max_nr_big &&
+			     cumulative_runnable_avg > max_runnable_avg_big)) {
+				max_runnable_avg_big = cumulative_runnable_avg;
+				busiest_big = rq;
+				max_nr_big = nr_big;
+				continue;
+			}
+		}
+
+		if (cumulative_runnable_avg > max_runnable_avg) {
+			max_runnable_avg = cumulative_runnable_avg;
+			busiest = rq;
+		}
+	}
+
+	if (busiest_big)
+		return busiest_big;
+
+	env->flags &= ~LBF_BIG_TASK_ACTIVE_BALANCE;
+	return busiest;
+}
+#else
+static inline struct rq *find_busiest_queue_hmp(struct lb_env *env,
+					struct sched_group *group)
+{
+	return NULL;
+}
+#endif
+
 /*
  * find_busiest_queue - find the busiest runqueue among the cpus in group.
  */
@@ -7536,6 +8775,10 @@
 	unsigned long busiest_load = 0, busiest_capacity = 1;
 	int i;
 
+#ifdef CONFIG_SCHED_HMP
+	return find_busiest_queue_hmp(env, group);
+#endif
+
 	for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
 		unsigned long capacity, wl;
 		enum fbq_type rt;
@@ -7603,12 +8846,16 @@
  * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
  * so long as it is large enough.
  */
-#define MAX_PINNED_INTERVAL	512
+#define MAX_PINNED_INTERVAL	16
+#define NEED_ACTIVE_BALANCE_THRESHOLD 10
 
 static int need_active_balance(struct lb_env *env)
 {
 	struct sched_domain *sd = env->sd;
 
+	if (env->flags & LBF_HMP_ACTIVE_BALANCE)
+		return 1;
+
 	if (env->idle == CPU_NEWLY_IDLE) {
 
 		/*
@@ -7633,7 +8880,8 @@
 			return 1;
 	}
 
-	return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
+	return unlikely(sd->nr_balance_failed >
+			sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD);
 }
 
 static int active_load_balance_cpu_stop(void *data);
@@ -7680,10 +8928,10 @@
 			struct sched_domain *sd, enum cpu_idle_type idle,
 			int *continue_balancing)
 {
-	int ld_moved, cur_ld_moved, active_balance = 0;
+	int ld_moved = 0, cur_ld_moved, active_balance = 0;
 	struct sched_domain *sd_parent = sd->parent;
-	struct sched_group *group;
-	struct rq *busiest;
+	struct sched_group *group = NULL;
+	struct rq *busiest = NULL;
 	unsigned long flags;
 	struct cpumask *cpus = this_cpu_cpumask_var_ptr(load_balance_mask);
 
@@ -7697,6 +8945,11 @@
 		.cpus		= cpus,
 		.fbq_type	= all,
 		.tasks		= LIST_HEAD_INIT(env.tasks),
+		.imbalance	= 0,
+		.flags		= 0,
+		.loop		= 0,
+		.busiest_nr_running     = 0,
+		.busiest_grp_capacity   = 0,
 	};
 
 	/*
@@ -7749,6 +9002,13 @@
 more_balance:
 		raw_spin_lock_irqsave(&busiest->lock, flags);
 
+		/* The world might have changed. Validate assumptions */
+		if (busiest->nr_running <= 1) {
+			raw_spin_unlock_irqrestore(&busiest->lock, flags);
+			env.flags &= ~LBF_ALL_PINNED;
+			goto no_move;
+		}
+
 		/*
 		 * cur_ld_moved - load moved in current iteration
 		 * ld_moved     - cumulative load moved across iterations
@@ -7836,15 +9096,18 @@
 		}
 	}
 
+no_move:
 	if (!ld_moved) {
-		schedstat_inc(sd->lb_failed[idle]);
+		if (!(env.flags & LBF_HMP_ACTIVE_BALANCE))
+			schedstat_inc(sd->lb_failed[idle]);
 		/*
 		 * Increment the failure counter only on periodic balance.
 		 * We do not want newidle balance, which can be very
 		 * frequent, pollute the failure counter causing
 		 * excessive cache_hot migrations and active balances.
 		 */
-		if (idle != CPU_NEWLY_IDLE)
+		if (idle != CPU_NEWLY_IDLE &&
+		    !(env.flags & LBF_HMP_ACTIVE_BALANCE))
 			sd->nr_balance_failed++;
 
 		if (need_active_balance(&env)) {
@@ -7878,14 +9141,28 @@
 				stop_one_cpu_nowait(cpu_of(busiest),
 					active_load_balance_cpu_stop, busiest,
 					&busiest->active_balance_work);
+				*continue_balancing = 0;
 			}
 
 			/* We've kicked active balancing, force task migration. */
-			sd->nr_balance_failed = sd->cache_nice_tries+1;
+			sd->nr_balance_failed = sd->cache_nice_tries +
+					NEED_ACTIVE_BALANCE_THRESHOLD - 1;
 		}
-	} else
+	} else {
 		sd->nr_balance_failed = 0;
 
+		/* Assumes one 'busiest' cpu that we pulled tasks from */
+		if (!same_freq_domain(this_cpu, cpu_of(busiest))) {
+			int check_groups = !!(env.flags &
+					 LBF_MOVED_RELATED_THREAD_GROUP_TASK);
+
+			check_for_freq_change(this_rq, false, check_groups);
+			check_for_freq_change(busiest, false, check_groups);
+		} else {
+			check_for_freq_change(this_rq, true, false);
+		}
+	}
+
 	if (likely(!active_balance)) {
 		/* We were unbalanced, so reset the balancing interval */
 		sd->balance_interval = sd->min_interval;
@@ -7933,6 +9210,11 @@
 
 	ld_moved = 0;
 out:
+	trace_sched_load_balance(this_cpu, idle, *continue_balancing,
+				 group ? group->cpumask[0] : 0,
+				 busiest ? busiest->nr_running : 0,
+				 env.imbalance, env.flags, ld_moved,
+				 sd->balance_interval);
 	return ld_moved;
 }
 
@@ -8027,9 +9309,12 @@
 
 		/*
 		 * Stop searching for tasks to pull if there are
-		 * now runnable tasks on this rq.
+		 * now runnable tasks on the balance rq or if
+		 * continue_balancing has been unset (only possible
+		 * due to active migration).
 		 */
-		if (pulled_task || this_rq->nr_running > 0)
+		if (pulled_task || this_rq->nr_running > 0 ||
+						!continue_balancing)
 			break;
 	}
 	rcu_read_unlock();
@@ -8074,8 +9359,23 @@
 	int busiest_cpu = cpu_of(busiest_rq);
 	int target_cpu = busiest_rq->push_cpu;
 	struct rq *target_rq = cpu_rq(target_cpu);
-	struct sched_domain *sd;
+	struct sched_domain *sd = NULL;
 	struct task_struct *p = NULL;
+	struct task_struct *push_task;
+	int push_task_detached = 0;
+	struct lb_env env = {
+		.sd			= sd,
+		.dst_cpu		= target_cpu,
+		.dst_rq			= target_rq,
+		.src_cpu		= busiest_rq->cpu,
+		.src_rq			= busiest_rq,
+		.idle			= CPU_IDLE,
+		.busiest_nr_running	= 0,
+		.busiest_grp_capacity	= 0,
+		.flags			= 0,
+		.loop			= 0,
+	};
+	bool moved = false;
 
 	raw_spin_lock_irq(&busiest_rq->lock);
 
@@ -8095,6 +9395,20 @@
 	 */
 	BUG_ON(busiest_rq == target_rq);
 
+	push_task = busiest_rq->push_task;
+	target_cpu = busiest_rq->push_cpu;
+	if (push_task) {
+		if (task_on_rq_queued(push_task) &&
+			push_task->state == TASK_RUNNING &&
+			task_cpu(push_task) == busiest_cpu &&
+					cpu_online(target_cpu)) {
+			detach_task(push_task, &env);
+			push_task_detached = 1;
+			moved = true;
+		}
+		goto out_unlock;
+	}
+
 	/* Search for an sd spanning us and the target CPU. */
 	rcu_read_lock();
 	for_each_domain(target_cpu, sd) {
@@ -8104,15 +9418,7 @@
 	}
 
 	if (likely(sd)) {
-		struct lb_env env = {
-			.sd		= sd,
-			.dst_cpu	= target_cpu,
-			.dst_rq		= target_rq,
-			.src_cpu	= busiest_rq->cpu,
-			.src_rq		= busiest_rq,
-			.idle		= CPU_IDLE,
-		};
-
+		env.sd = sd;
 		schedstat_inc(sd->alb_count);
 
 		p = detach_one_task(&env);
@@ -8120,6 +9426,7 @@
 			schedstat_inc(sd->alb_pushed);
 			/* Active balancing done, reset the failure counter. */
 			sd->nr_balance_failed = 0;
+			moved = true;
 		} else {
 			schedstat_inc(sd->alb_failed);
 		}
@@ -8127,6 +9434,12 @@
 	rcu_read_unlock();
 out_unlock:
 	busiest_rq->active_balance = 0;
+	push_task = busiest_rq->push_task;
+	target_cpu = busiest_rq->push_cpu;
+
+	if (push_task)
+		busiest_rq->push_task = NULL;
+
 	raw_spin_unlock(&busiest_rq->lock);
 
 	if (p)
@@ -8134,6 +9447,15 @@
 
 	local_irq_enable();
 
+	if (moved && !same_freq_domain(busiest_cpu, target_cpu)) {
+		int check_groups = !!(env.flags &
+					 LBF_MOVED_RELATED_THREAD_GROUP_TASK);
+		check_for_freq_change(busiest_rq, false, check_groups);
+		check_for_freq_change(target_rq, false, check_groups);
+	} else if (moved) {
+		check_for_freq_change(target_rq, true, false);
+	}
+
 	return 0;
 }
 
@@ -8155,10 +9477,45 @@
 	unsigned long next_balance;     /* in jiffy units */
 } nohz ____cacheline_aligned;
 
-static inline int find_new_ilb(void)
+#ifdef CONFIG_SCHED_HMP
+static inline int find_new_hmp_ilb(int type)
+{
+	int call_cpu = raw_smp_processor_id();
+	struct sched_domain *sd;
+	int ilb;
+
+	rcu_read_lock();
+
+	/* Pick an idle cpu "closest" to call_cpu */
+	for_each_domain(call_cpu, sd) {
+		for_each_cpu_and(ilb, nohz.idle_cpus_mask,
+						sched_domain_span(sd)) {
+			if (idle_cpu(ilb) && (type != NOHZ_KICK_RESTRICT ||
+					(hmp_capable() &&
+					 cpu_max_possible_capacity(ilb) <=
+					cpu_max_possible_capacity(call_cpu)) ||
+					cpu_max_power_cost(ilb) <=
+					cpu_max_power_cost(call_cpu))) {
+				rcu_read_unlock();
+				reset_balance_interval(ilb);
+				return ilb;
+			}
+		}
+	}
+
+	rcu_read_unlock();
+	return nr_cpu_ids;
+}
+#endif	/* CONFIG_SCHED_HMP */
+
+static inline int find_new_ilb(int type)
 {
 	int ilb = cpumask_first(nohz.idle_cpus_mask);
 
+#ifdef CONFIG_SCHED_HMP
+	return find_new_hmp_ilb(type);
+#endif
+
 	if (ilb < nr_cpu_ids && idle_cpu(ilb))
 		return ilb;
 
@@ -8170,13 +9527,13 @@
  * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
  * CPU (if there is one).
  */
-static void nohz_balancer_kick(void)
+static void nohz_balancer_kick(int type)
 {
 	int ilb_cpu;
 
 	nohz.next_balance++;
 
-	ilb_cpu = find_new_ilb();
+	ilb_cpu = find_new_ilb(type);
 
 	if (ilb_cpu >= nr_cpu_ids)
 		return;
@@ -8449,6 +9806,66 @@
 	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu));
 }
 
+#ifdef CONFIG_SCHED_HMP
+static inline int _nohz_kick_needed_hmp(struct rq *rq, int cpu, int *type)
+{
+	struct sched_domain *sd;
+	int i;
+
+	if (rq->nr_running < 2)
+		return 0;
+
+	if (!sysctl_sched_restrict_cluster_spill || sched_boost())
+		return 1;
+
+	if (hmp_capable() && cpu_max_possible_capacity(cpu) ==
+			max_possible_capacity)
+		return 1;
+
+	rcu_read_lock();
+	sd = rcu_dereference_check_sched_domain(rq->sd);
+	if (!sd) {
+		rcu_read_unlock();
+		return 0;
+	}
+
+	for_each_cpu(i, sched_domain_span(sd)) {
+		if (cpu_load(i) < sched_spill_load &&
+				cpu_rq(i)->nr_running <
+				sysctl_sched_spill_nr_run) {
+			/* Change the kick type to limit to CPUs that
+			 * are of equal or lower capacity.
+			 */
+			*type = NOHZ_KICK_RESTRICT;
+			break;
+		}
+	}
+	rcu_read_unlock();
+	return 1;
+}
+#endif /* CONFIG_SCHED_HMP */
+
+static inline int _nohz_kick_needed(struct rq *rq, int cpu, int *type)
+{
+	unsigned long now = jiffies;
+
+	/*
+	 * None are in tickless mode and hence no need for NOHZ idle load
+	 * balancing.
+	 */
+	if (likely(!atomic_read(&nohz.nr_cpus)))
+		return 0;
+
+#ifdef CONFIG_SCHED_HMP
+	return _nohz_kick_needed_hmp(rq, cpu, type);
+#endif
+
+	if (time_before(now, nohz.next_balance))
+		return 0;
+
+	return (rq->nr_running >= 2);
+}
+
 /*
  * Current heuristic for kicking the idle load balancer in the presence
  * of an idle cpu in the system.
@@ -8460,12 +9877,14 @@
  *   - For SD_ASYM_PACKING, if the lower numbered cpu's in the scheduler
  *     domain span are idle.
  */
-static inline bool nohz_kick_needed(struct rq *rq)
+static inline bool nohz_kick_needed(struct rq *rq, int *type)
 {
-	unsigned long now = jiffies;
+#ifndef CONFIG_SCHED_HMP
 	struct sched_domain_shared *sds;
 	struct sched_domain *sd;
-	int nr_busy, cpu = rq->cpu;
+	int nr_busy;
+#endif
+	int cpu = rq->cpu;
 	bool kick = false;
 
 	if (unlikely(rq->idle_balance))
@@ -8478,19 +9897,10 @@
 	set_cpu_sd_state_busy();
 	nohz_balance_exit_idle(cpu);
 
-	/*
-	 * None are in tickless mode and hence no need for NOHZ idle load
-	 * balancing.
-	 */
-	if (likely(!atomic_read(&nohz.nr_cpus)))
-		return false;
-
-	if (time_before(now, nohz.next_balance))
-		return false;
-
-	if (rq->nr_running >= 2)
+	if (_nohz_kick_needed(rq, cpu, type))
 		return true;
 
+#ifndef CONFIG_SCHED_HMP
 	rcu_read_lock();
 	sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
 	if (sds) {
@@ -8524,6 +9934,7 @@
 
 unlock:
 	rcu_read_unlock();
+#endif
 	return kick;
 }
 #else
@@ -8557,6 +9968,8 @@
  */
 void trigger_load_balance(struct rq *rq)
 {
+	int type = NOHZ_KICK_ANY;
+
 	/* Don't need to rebalance while attached to NULL domain */
 	if (unlikely(on_null_domain(rq)))
 		return;
@@ -8564,8 +9977,8 @@
 	if (time_after_eq(jiffies, rq->next_balance))
 		raise_softirq(SCHED_SOFTIRQ);
 #ifdef CONFIG_NO_HZ_COMMON
-	if (nohz_kick_needed(rq))
-		nohz_balancer_kick();
+	if (nohz_kick_needed(rq, &type))
+		nohz_balancer_kick(type);
 #endif
 }
 
@@ -9060,6 +10473,9 @@
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	.task_change_group	= task_change_group_fair,
 #endif
+#ifdef CONFIG_SCHED_HMP
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_fair,
+#endif
 };
 
 #ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 69631fa..6651a37 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -49,7 +49,7 @@
  * Queue remote wakeups on the target CPU and process them
  * using the scheduler IPI. Reduces rq->lock contention/bounces.
  */
-SCHED_FEAT(TTWU_QUEUE, true)
+SCHED_FEAT(TTWU_QUEUE, false)
 
 #ifdef HAVE_RT_PUSH_IPI
 /*
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
new file mode 100644
index 0000000..9bc1bac
--- /dev/null
+++ b/kernel/sched/hmp.c
@@ -0,0 +1,3975 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * Implementation credits: Srivatsa Vaddagiri, Steve Muckle
+ * Syed Rameez Mustafa, Olav haugan, Joonwoo Park, Pavan Kumar Kondeti
+ * and Vikram Mulukutla
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/list_sort.h>
+#include <linux/syscore_ops.h>
+
+#include "sched.h"
+
+#include <trace/events/sched.h>
+
+const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
+		"TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE", "IRQ_UPDATE"};
+
+const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
+				"RQ_TO_RQ", "GROUP_TO_GROUP"};
+
+
+static ktime_t ktime_last;
+static bool sched_ktime_suspended;
+
+static bool use_cycle_counter;
+static struct cpu_cycle_counter_cb cpu_cycle_counter_cb;
+
+u64 sched_ktime_clock(void)
+{
+	if (unlikely(sched_ktime_suspended))
+		return ktime_to_ns(ktime_last);
+	return ktime_get_ns();
+}
+
+static void sched_resume(void)
+{
+	sched_ktime_suspended = false;
+}
+
+static int sched_suspend(void)
+{
+	ktime_last = ktime_get();
+	sched_ktime_suspended = true;
+	return 0;
+}
+
+static struct syscore_ops sched_syscore_ops = {
+	.resume	= sched_resume,
+	.suspend = sched_suspend
+};
+
+static int __init sched_init_ops(void)
+{
+	register_syscore_ops(&sched_syscore_ops);
+	return 0;
+}
+late_initcall(sched_init_ops);
+
+inline void clear_ed_task(struct task_struct *p, struct rq *rq)
+{
+	if (p == rq->ed_task)
+		rq->ed_task = NULL;
+}
+
+inline void set_task_last_wake(struct task_struct *p, u64 wallclock)
+{
+	p->last_wake_ts = wallclock;
+}
+
+inline void set_task_last_switch_out(struct task_struct *p, u64 wallclock)
+{
+	p->last_switch_out_ts = wallclock;
+}
+
+/*
+ * Note C-state for (idle) cpus.
+ *
+ * @cstate = cstate index, 0 -> active state
+ * @wakeup_energy = energy spent in waking up cpu
+ * @wakeup_latency = latency to wakeup from cstate
+ *
+ */
+void
+sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	rq->cstate = cstate; /* C1, C2 etc */
+	rq->wakeup_energy = wakeup_energy;
+	rq->wakeup_latency = wakeup_latency;
+}
+
+/*
+ * Note D-state for (idle) cluster.
+ *
+ * @dstate = dstate index, 0 -> active state
+ * @wakeup_energy = energy spent in waking up cluster
+ * @wakeup_latency = latency to wakeup from cluster
+ *
+ */
+void sched_set_cluster_dstate(const cpumask_t *cluster_cpus, int dstate,
+			int wakeup_energy, int wakeup_latency)
+{
+	struct sched_cluster *cluster =
+		cpu_rq(cpumask_first(cluster_cpus))->cluster;
+	cluster->dstate = dstate;
+	cluster->dstate_wakeup_energy = wakeup_energy;
+	cluster->dstate_wakeup_latency = wakeup_latency;
+}
+
+u32 __weak get_freq_max_load(int cpu, u32 freq)
+{
+	/* 100% by default */
+	return 100;
+}
+
+struct freq_max_load_entry {
+	/* The maximum load which has accounted governor's headroom. */
+	u64 hdemand;
+};
+
+struct freq_max_load {
+	struct rcu_head rcu;
+	int length;
+	struct freq_max_load_entry freqs[0];
+};
+
+static DEFINE_PER_CPU(struct freq_max_load *, freq_max_load);
+static DEFINE_SPINLOCK(freq_max_load_lock);
+
+struct cpu_pwr_stats __weak *get_cpu_pwr_stats(void)
+{
+	return NULL;
+}
+
+int sched_update_freq_max_load(const cpumask_t *cpumask)
+{
+	int i, cpu, ret;
+	unsigned int freq;
+	struct cpu_pstate_pwr *costs;
+	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+	struct freq_max_load *max_load, *old_max_load;
+	struct freq_max_load_entry *entry;
+	u64 max_demand_capacity, max_demand;
+	unsigned long flags;
+	u32 hfreq;
+	int hpct;
+
+	if (!per_cpu_info)
+		return 0;
+
+	spin_lock_irqsave(&freq_max_load_lock, flags);
+	max_demand_capacity = div64_u64(max_task_load(), max_possible_capacity);
+	for_each_cpu(cpu, cpumask) {
+		if (!per_cpu_info[cpu].ptable) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		old_max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
+
+		/*
+		 * allocate len + 1 and leave the last power cost as 0 for
+		 * power_cost() can stop iterating index when
+		 * per_cpu_info[cpu].len > len of max_load due to race between
+		 * cpu power stats update and get_cpu_pwr_stats().
+		 */
+		max_load = kzalloc(sizeof(struct freq_max_load) +
+				   sizeof(struct freq_max_load_entry) *
+				   (per_cpu_info[cpu].len + 1), GFP_ATOMIC);
+		if (unlikely(!max_load)) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		max_load->length = per_cpu_info[cpu].len;
+
+		max_demand = max_demand_capacity *
+			     cpu_max_possible_capacity(cpu);
+
+		i = 0;
+		costs = per_cpu_info[cpu].ptable;
+		while (costs[i].freq) {
+			entry = &max_load->freqs[i];
+			freq = costs[i].freq;
+			hpct = get_freq_max_load(cpu, freq);
+			if (hpct <= 0 && hpct > 100)
+				hpct = 100;
+			hfreq = div64_u64((u64)freq * hpct, 100);
+			entry->hdemand =
+			    div64_u64(max_demand * hfreq,
+				      cpu_max_possible_freq(cpu));
+			i++;
+		}
+
+		rcu_assign_pointer(per_cpu(freq_max_load, cpu), max_load);
+		if (old_max_load)
+			kfree_rcu(old_max_load, rcu);
+	}
+
+	spin_unlock_irqrestore(&freq_max_load_lock, flags);
+	return 0;
+
+fail:
+	for_each_cpu(cpu, cpumask) {
+		max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
+		if (max_load) {
+			rcu_assign_pointer(per_cpu(freq_max_load, cpu), NULL);
+			kfree_rcu(max_load, rcu);
+		}
+	}
+
+	spin_unlock_irqrestore(&freq_max_load_lock, flags);
+	return ret;
+}
+
+unsigned int max_possible_efficiency = 1;
+unsigned int min_possible_efficiency = UINT_MAX;
+
+unsigned long __weak arch_get_cpu_efficiency(int cpu)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
+/* Keep track of max/min capacity possible across CPUs "currently" */
+static void __update_min_max_capacity(void)
+{
+	int i;
+	int max_cap = 0, min_cap = INT_MAX;
+
+	for_each_online_cpu(i) {
+		max_cap = max(max_cap, cpu_capacity(i));
+		min_cap = min(min_cap, cpu_capacity(i));
+	}
+
+	max_capacity = max_cap;
+	min_capacity = min_cap;
+}
+
+static void update_min_max_capacity(void)
+{
+	unsigned long flags;
+	int i;
+
+	local_irq_save(flags);
+	for_each_possible_cpu(i)
+		raw_spin_lock(&cpu_rq(i)->lock);
+
+	__update_min_max_capacity();
+
+	for_each_possible_cpu(i)
+		raw_spin_unlock(&cpu_rq(i)->lock);
+	local_irq_restore(flags);
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to "least" efficient cpu, such that
+ * least efficient cpu gets capacity of 1024
+ */
+static unsigned long
+capacity_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+	return (1024 * cluster->efficiency) / min_possible_efficiency;
+}
+
+/*
+ * Return 'capacity' of a cpu in reference to cpu with lowest max_freq
+ * (min_max_freq), such that one with lowest max_freq gets capacity of 1024.
+ */
+static unsigned long capacity_scale_cpu_freq(struct sched_cluster *cluster)
+{
+	return (1024 * cluster_max_freq(cluster)) / min_max_freq;
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to "most" efficient cpu, so
+ * that "most" efficient cpu gets a load_scale_factor of 1
+ */
+static inline unsigned long
+load_scale_cpu_efficiency(struct sched_cluster *cluster)
+{
+	return DIV_ROUND_UP(1024 * max_possible_efficiency,
+			    cluster->efficiency);
+}
+
+/*
+ * Return load_scale_factor of a cpu in reference to cpu with best max_freq
+ * (max_possible_freq), so that one with best max_freq gets a load_scale_factor
+ * of 1.
+ */
+static inline unsigned long load_scale_cpu_freq(struct sched_cluster *cluster)
+{
+	return DIV_ROUND_UP(1024 * max_possible_freq,
+			   cluster_max_freq(cluster));
+}
+
+static int compute_capacity(struct sched_cluster *cluster)
+{
+	int capacity = 1024;
+
+	capacity *= capacity_scale_cpu_efficiency(cluster);
+	capacity >>= 10;
+
+	capacity *= capacity_scale_cpu_freq(cluster);
+	capacity >>= 10;
+
+	return capacity;
+}
+
+static int compute_max_possible_capacity(struct sched_cluster *cluster)
+{
+	int capacity = 1024;
+
+	capacity *= capacity_scale_cpu_efficiency(cluster);
+	capacity >>= 10;
+
+	capacity *= (1024 * cluster->max_possible_freq) / min_max_freq;
+	capacity >>= 10;
+
+	return capacity;
+}
+
+static int compute_load_scale_factor(struct sched_cluster *cluster)
+{
+	int load_scale = 1024;
+
+	/*
+	 * load_scale_factor accounts for the fact that task load
+	 * is in reference to "best" performing cpu. Task's load will need to be
+	 * scaled (up) by a factor to determine suitability to be placed on a
+	 * (little) cpu.
+	 */
+	load_scale *= load_scale_cpu_efficiency(cluster);
+	load_scale >>= 10;
+
+	load_scale *= load_scale_cpu_freq(cluster);
+	load_scale >>= 10;
+
+	return load_scale;
+}
+
+struct list_head cluster_head;
+static DEFINE_MUTEX(cluster_lock);
+static cpumask_t all_cluster_cpus = CPU_MASK_NONE;
+DECLARE_BITMAP(all_cluster_ids, NR_CPUS);
+struct sched_cluster *sched_cluster[NR_CPUS];
+int num_clusters;
+
+struct sched_cluster init_cluster = {
+	.list			=	LIST_HEAD_INIT(init_cluster.list),
+	.id			=	0,
+	.max_power_cost		=	1,
+	.min_power_cost		=	1,
+	.capacity		=	1024,
+	.max_possible_capacity	=	1024,
+	.efficiency		=	1,
+	.load_scale_factor	=	1024,
+	.cur_freq		=	1,
+	.max_freq		=	1,
+	.max_mitigated_freq	=	UINT_MAX,
+	.min_freq		=	1,
+	.max_possible_freq	=	1,
+	.dstate			=	0,
+	.dstate_wakeup_energy	=	0,
+	.dstate_wakeup_latency	=	0,
+	.exec_scale_factor	=	1024,
+	.notifier_sent		=	0,
+};
+
+static void update_all_clusters_stats(void)
+{
+	struct sched_cluster *cluster;
+	u64 highest_mpc = 0, lowest_mpc = U64_MAX;
+
+	pre_big_task_count_change(cpu_possible_mask);
+
+	for_each_sched_cluster(cluster) {
+		u64 mpc;
+
+		cluster->capacity = compute_capacity(cluster);
+		mpc = cluster->max_possible_capacity =
+			compute_max_possible_capacity(cluster);
+		cluster->load_scale_factor = compute_load_scale_factor(cluster);
+
+		cluster->exec_scale_factor =
+			DIV_ROUND_UP(cluster->efficiency * 1024,
+				     max_possible_efficiency);
+
+		if (mpc > highest_mpc)
+			highest_mpc = mpc;
+
+		if (mpc < lowest_mpc)
+			lowest_mpc = mpc;
+	}
+
+	max_possible_capacity = highest_mpc;
+	min_max_possible_capacity = lowest_mpc;
+
+	__update_min_max_capacity();
+	sched_update_freq_max_load(cpu_possible_mask);
+	post_big_task_count_change(cpu_possible_mask);
+}
+
+static void assign_cluster_ids(struct list_head *head)
+{
+	struct sched_cluster *cluster;
+	int pos = 0;
+
+	list_for_each_entry(cluster, head, list) {
+		cluster->id = pos;
+		sched_cluster[pos++] = cluster;
+	}
+}
+
+static void
+move_list(struct list_head *dst, struct list_head *src, bool sync_rcu)
+{
+	struct list_head *first, *last;
+
+	first = src->next;
+	last = src->prev;
+
+	if (sync_rcu) {
+		INIT_LIST_HEAD_RCU(src);
+		synchronize_rcu();
+	}
+
+	first->prev = dst;
+	dst->prev = last;
+	last->next = dst;
+
+	/* Ensure list sanity before making the head visible to all CPUs. */
+	smp_mb();
+	dst->next = first;
+}
+
+static int
+compare_clusters(void *priv, struct list_head *a, struct list_head *b)
+{
+	struct sched_cluster *cluster1, *cluster2;
+	int ret;
+
+	cluster1 = container_of(a, struct sched_cluster, list);
+	cluster2 = container_of(b, struct sched_cluster, list);
+
+	ret = cluster1->max_power_cost > cluster2->max_power_cost ||
+		(cluster1->max_power_cost == cluster2->max_power_cost &&
+		cluster1->max_possible_capacity <
+				cluster2->max_possible_capacity);
+
+	return ret;
+}
+
+static void sort_clusters(void)
+{
+	struct sched_cluster *cluster;
+	struct list_head new_head;
+
+	INIT_LIST_HEAD(&new_head);
+
+	for_each_sched_cluster(cluster) {
+		cluster->max_power_cost = power_cost(cluster_first_cpu(cluster),
+							       max_task_load());
+		cluster->min_power_cost = power_cost(cluster_first_cpu(cluster),
+							       0);
+	}
+
+	move_list(&new_head, &cluster_head, true);
+
+	list_sort(NULL, &new_head, compare_clusters);
+	assign_cluster_ids(&new_head);
+
+	/*
+	 * Ensure cluster ids are visible to all CPUs before making
+	 * cluster_head visible.
+	 */
+	move_list(&cluster_head, &new_head, false);
+}
+
+static void
+insert_cluster(struct sched_cluster *cluster, struct list_head *head)
+{
+	struct sched_cluster *tmp;
+	struct list_head *iter = head;
+
+	list_for_each_entry(tmp, head, list) {
+		if (cluster->max_power_cost < tmp->max_power_cost)
+			break;
+		iter = &tmp->list;
+	}
+
+	list_add(&cluster->list, iter);
+}
+
+static struct sched_cluster *alloc_new_cluster(const struct cpumask *cpus)
+{
+	struct sched_cluster *cluster = NULL;
+
+	cluster = kzalloc(sizeof(struct sched_cluster), GFP_ATOMIC);
+	if (!cluster) {
+		__WARN_printf("Cluster allocation failed. \
+				Possible bad scheduling\n");
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&cluster->list);
+	cluster->max_power_cost		=	1;
+	cluster->min_power_cost		=	1;
+	cluster->capacity		=	1024;
+	cluster->max_possible_capacity	=	1024;
+	cluster->efficiency		=	1;
+	cluster->load_scale_factor	=	1024;
+	cluster->cur_freq		=	1;
+	cluster->max_freq		=	1;
+	cluster->max_mitigated_freq	=	UINT_MAX;
+	cluster->min_freq		=	1;
+	cluster->max_possible_freq	=	1;
+	cluster->dstate			=	0;
+	cluster->dstate_wakeup_energy	=	0;
+	cluster->dstate_wakeup_latency	=	0;
+	cluster->freq_init_done		=	false;
+
+	cluster->cpus = *cpus;
+	cluster->efficiency = arch_get_cpu_efficiency(cpumask_first(cpus));
+
+	if (cluster->efficiency > max_possible_efficiency)
+		max_possible_efficiency = cluster->efficiency;
+	if (cluster->efficiency < min_possible_efficiency)
+		min_possible_efficiency = cluster->efficiency;
+
+	cluster->notifier_sent = 0;
+	return cluster;
+}
+
+static void add_cluster(const struct cpumask *cpus, struct list_head *head)
+{
+	struct sched_cluster *cluster = alloc_new_cluster(cpus);
+	int i;
+
+	if (!cluster)
+		return;
+
+	for_each_cpu(i, cpus)
+		cpu_rq(i)->cluster = cluster;
+
+	insert_cluster(cluster, head);
+	set_bit(num_clusters, all_cluster_ids);
+	num_clusters++;
+}
+
+void update_cluster_topology(void)
+{
+	struct cpumask cpus = *cpu_possible_mask;
+	const struct cpumask *cluster_cpus;
+	struct list_head new_head;
+	int i;
+
+	INIT_LIST_HEAD(&new_head);
+
+	for_each_cpu(i, &cpus) {
+		cluster_cpus = cpu_coregroup_mask(i);
+		cpumask_or(&all_cluster_cpus, &all_cluster_cpus, cluster_cpus);
+		cpumask_andnot(&cpus, &cpus, cluster_cpus);
+		add_cluster(cluster_cpus, &new_head);
+	}
+
+	assign_cluster_ids(&new_head);
+
+	/*
+	 * Ensure cluster ids are visible to all CPUs before making
+	 * cluster_head visible.
+	 */
+	move_list(&cluster_head, &new_head, false);
+}
+
+void init_clusters(void)
+{
+	bitmap_clear(all_cluster_ids, 0, NR_CPUS);
+	init_cluster.cpus = *cpu_possible_mask;
+	INIT_LIST_HEAD(&cluster_head);
+}
+
+int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb)
+{
+	mutex_lock(&cluster_lock);
+	if (!cb->get_cpu_cycle_counter) {
+		mutex_unlock(&cluster_lock);
+		return -EINVAL;
+	}
+
+	cpu_cycle_counter_cb = *cb;
+	use_cycle_counter = true;
+	mutex_unlock(&cluster_lock);
+
+	return 0;
+}
+
+int got_boost_kick(void)
+{
+	int cpu = smp_processor_id();
+	struct rq *rq = cpu_rq(cpu);
+
+	return test_bit(BOOST_KICK, &rq->hmp_flags);
+}
+
+inline void clear_boost_kick(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	clear_bit(BOOST_KICK, &rq->hmp_flags);
+}
+
+inline void boost_kick(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	if (!test_and_set_bit(BOOST_KICK, &rq->hmp_flags))
+		smp_send_reschedule(cpu);
+}
+
+/* Clear any HMP scheduler related requests pending from or on cpu */
+void clear_hmp_request(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags;
+
+	clear_boost_kick(cpu);
+	clear_reserved(cpu);
+	if (rq->push_task) {
+		raw_spin_lock_irqsave(&rq->lock, flags);
+		if (rq->push_task) {
+			clear_reserved(rq->push_cpu);
+			put_task_struct(rq->push_task);
+			rq->push_task = NULL;
+		}
+		rq->active_balance = 0;
+		raw_spin_unlock_irqrestore(&rq->lock, flags);
+	}
+}
+
+int sched_set_static_cpu_pwr_cost(int cpu, unsigned int cost)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	rq->static_cpu_pwr_cost = cost;
+	return 0;
+}
+
+unsigned int sched_get_static_cpu_pwr_cost(int cpu)
+{
+	return cpu_rq(cpu)->static_cpu_pwr_cost;
+}
+
+int sched_set_static_cluster_pwr_cost(int cpu, unsigned int cost)
+{
+	struct sched_cluster *cluster = cpu_rq(cpu)->cluster;
+
+	cluster->static_cluster_pwr_cost = cost;
+	return 0;
+}
+
+unsigned int sched_get_static_cluster_pwr_cost(int cpu)
+{
+	return cpu_rq(cpu)->cluster->static_cluster_pwr_cost;
+}
+
+/*
+ * sched_window_stats_policy and sched_ravg_hist_size have a 'sysctl' copy
+ * associated with them. This is required for atomic update of those variables
+ * when being modifed via sysctl interface.
+ *
+ * IMPORTANT: Initialize both copies to same value!!
+ */
+
+/*
+ * Tasks that are runnable continuously for a period greather than
+ * EARLY_DETECTION_DURATION can be flagged early as potential
+ * high load tasks.
+ */
+#define EARLY_DETECTION_DURATION 9500000
+
+static __read_mostly unsigned int sched_ravg_hist_size = 5;
+__read_mostly unsigned int sysctl_sched_ravg_hist_size = 5;
+
+static __read_mostly unsigned int sched_window_stats_policy =
+	 WINDOW_STATS_MAX_RECENT_AVG;
+__read_mostly unsigned int sysctl_sched_window_stats_policy =
+	WINDOW_STATS_MAX_RECENT_AVG;
+
+#define SCHED_ACCOUNT_WAIT_TIME 1
+
+__read_mostly unsigned int sysctl_sched_cpu_high_irqload = (10 * NSEC_PER_MSEC);
+
+unsigned int __read_mostly sysctl_sched_enable_colocation = 1;
+
+/*
+ * Enable colocation and frequency aggregation for all threads in a process.
+ * The children inherits the group id from the parent.
+ */
+unsigned int __read_mostly sysctl_sched_enable_thread_grouping;
+
+
+__read_mostly unsigned int sysctl_sched_new_task_windows = 5;
+
+#define SCHED_FREQ_ACCOUNT_WAIT_TIME 0
+
+/*
+ * For increase, send notification if
+ *      freq_required - cur_freq > sysctl_sched_freq_inc_notify
+ */
+__read_mostly int sysctl_sched_freq_inc_notify = 10 * 1024 * 1024; /* + 10GHz */
+
+/*
+ * For decrease, send notification if
+ *      cur_freq - freq_required > sysctl_sched_freq_dec_notify
+ */
+__read_mostly int sysctl_sched_freq_dec_notify = 10 * 1024 * 1024; /* - 10GHz */
+
+static __read_mostly unsigned int sched_io_is_busy;
+
+__read_mostly unsigned int sysctl_sched_pred_alert_freq = 10 * 1024 * 1024;
+
+/*
+ * Maximum possible frequency across all cpus. Task demand and cpu
+ * capacity (cpu_power) metrics are scaled in reference to it.
+ */
+unsigned int max_possible_freq = 1;
+
+/*
+ * Minimum possible max_freq across all cpus. This will be same as
+ * max_possible_freq on homogeneous systems and could be different from
+ * max_possible_freq on heterogenous systems. min_max_freq is used to derive
+ * capacity (cpu_power) of cpus.
+ */
+unsigned int min_max_freq = 1;
+
+unsigned int max_capacity = 1024; /* max(rq->capacity) */
+unsigned int min_capacity = 1024; /* min(rq->capacity) */
+unsigned int max_possible_capacity = 1024; /* max(rq->max_possible_capacity) */
+unsigned int
+min_max_possible_capacity = 1024; /* min(rq->max_possible_capacity) */
+
+/* Window size (in ns) */
+__read_mostly unsigned int sched_ravg_window = 10000000;
+
+/* Min window size (in ns) = 10ms */
+#define MIN_SCHED_RAVG_WINDOW 10000000
+
+/* Max window size (in ns) = 1s */
+#define MAX_SCHED_RAVG_WINDOW 1000000000
+
+/* Temporarily disable window-stats activity on all cpus */
+unsigned int __read_mostly sched_disable_window_stats;
+
+/*
+ * Major task runtime. If a task runs for more than sched_major_task_runtime
+ * in a window, it's considered to be generating majority of workload
+ * for this window. Prediction could be adjusted for such tasks.
+ */
+__read_mostly unsigned int sched_major_task_runtime = 10000000;
+
+static unsigned int sync_cpu;
+
+static LIST_HEAD(related_thread_groups);
+static DEFINE_RWLOCK(related_thread_group_lock);
+
+#define for_each_related_thread_group(grp) \
+	list_for_each_entry(grp, &related_thread_groups, list)
+
+/*
+ * Demand aggregation for frequency purpose:
+ *
+ * 'sched_freq_aggregate' controls aggregation of cpu demand of related threads
+ * for frequency determination purpose. This aggregation is done per-cluster.
+ *
+ * CPU demand of tasks from various related groups is aggregated per-cluster and
+ * added to the "max_busy_cpu" in that cluster, where max_busy_cpu is determined
+ * by just rq->prev_runnable_sum.
+ *
+ * Some examples follow, which assume:
+ *	Cluster0 = CPU0-3, Cluster1 = CPU4-7
+ *	One related thread group A that has tasks A0, A1, A2
+ *
+ *	A->cpu_time[X].curr/prev_sum = counters in which cpu execution stats of
+ *	tasks belonging to group A are accumulated when they run on cpu X.
+ *
+ *	CX->curr/prev_sum = counters in which cpu execution stats of all tasks
+ *	not belonging to group A are accumulated when they run on cpu X
+ *
+ * Lets say the stats for window M was as below:
+ *
+ *	C0->prev_sum = 1ms, A->cpu_time[0].prev_sum = 5ms
+ *		Task A0 ran 5ms on CPU0
+ *		Task B0 ran 1ms on CPU0
+ *
+ *	C1->prev_sum = 5ms, A->cpu_time[1].prev_sum = 6ms
+ *		Task A1 ran 4ms on CPU1
+ *		Task A2 ran 2ms on CPU1
+ *		Task B1 ran 5ms on CPU1
+ *
+ *	C2->prev_sum = 0ms, A->cpu_time[2].prev_sum = 0
+ *		CPU2 idle
+ *
+ *	C3->prev_sum = 0ms, A->cpu_time[3].prev_sum = 0
+ *		CPU3 idle
+ *
+ * In this case, CPU1 was most busy going by just its prev_sum counter. Demand
+ * from all group A tasks are added to CPU1. IOW, at end of window M, cpu busy
+ * time reported to governor will be:
+ *
+ *
+ *	C0 busy time = 1ms
+ *	C1 busy time = 5 + 5 + 6 = 16ms
+ *
+ */
+static __read_mostly unsigned int sched_freq_aggregate;
+__read_mostly unsigned int sysctl_sched_freq_aggregate;
+
+unsigned int __read_mostly sysctl_sched_freq_aggregate_threshold_pct;
+static unsigned int __read_mostly sched_freq_aggregate_threshold;
+
+/* Initial task load. Newly created tasks are assigned this load. */
+unsigned int __read_mostly sched_init_task_load_windows;
+unsigned int __read_mostly sysctl_sched_init_task_load_pct = 15;
+
+unsigned int max_task_load(void)
+{
+	return sched_ravg_window;
+}
+
+/*
+ * Scheduler boost is a mechanism to temporarily place tasks on CPUs
+ * with higher capacity than those where a task would have normally
+ * ended up with their load characteristics. Any entity enabling
+ * boost is responsible for disabling it as well.
+ */
+unsigned int sysctl_sched_boost;
+
+/* A cpu can no longer accommodate more tasks if:
+ *
+ *	rq->nr_running > sysctl_sched_spill_nr_run ||
+ *	rq->hmp_stats.cumulative_runnable_avg > sched_spill_load
+ */
+unsigned int __read_mostly sysctl_sched_spill_nr_run = 10;
+
+/*
+ * Place sync wakee tasks those have less than configured demand to the waker's
+ * cluster.
+ */
+unsigned int __read_mostly sched_small_wakee_task_load;
+unsigned int __read_mostly sysctl_sched_small_wakee_task_load_pct = 10;
+
+unsigned int __read_mostly sched_big_waker_task_load;
+unsigned int __read_mostly sysctl_sched_big_waker_task_load_pct = 25;
+
+/*
+ * CPUs with load greater than the sched_spill_load_threshold are not
+ * eligible for task placement. When all CPUs in a cluster achieve a
+ * load higher than this level, tasks becomes eligible for inter
+ * cluster migration.
+ */
+unsigned int __read_mostly sched_spill_load;
+unsigned int __read_mostly sysctl_sched_spill_load_pct = 100;
+
+/*
+ * Tasks whose bandwidth consumption on a cpu is more than
+ * sched_upmigrate are considered "big" tasks. Big tasks will be
+ * considered for "up" migration, i.e migrating to a cpu with better
+ * capacity.
+ */
+unsigned int __read_mostly sched_upmigrate;
+unsigned int __read_mostly sysctl_sched_upmigrate_pct = 80;
+
+/*
+ * Big tasks, once migrated, will need to drop their bandwidth
+ * consumption to less than sched_downmigrate before they are "down"
+ * migrated.
+ */
+unsigned int __read_mostly sched_downmigrate;
+unsigned int __read_mostly sysctl_sched_downmigrate_pct = 60;
+
+/*
+ * The load scale factor of a CPU gets boosted when its max frequency
+ * is restricted due to which the tasks are migrating to higher capacity
+ * CPUs early. The sched_upmigrate threshold is auto-upgraded by
+ * rq->max_possible_freq/rq->max_freq of a lower capacity CPU.
+ */
+unsigned int up_down_migrate_scale_factor = 1024;
+
+/*
+ * Scheduler selects and places task to its previous CPU if sleep time is
+ * less than sysctl_sched_select_prev_cpu_us.
+ */
+unsigned int __read_mostly
+sched_short_sleep_task_threshold = 2000 * NSEC_PER_USEC;
+
+unsigned int __read_mostly sysctl_sched_select_prev_cpu_us = 2000;
+
+unsigned int __read_mostly
+sched_long_cpu_selection_threshold = 100 * NSEC_PER_MSEC;
+
+unsigned int __read_mostly sysctl_sched_restrict_cluster_spill;
+
+void update_up_down_migrate(void)
+{
+	unsigned int up_migrate = pct_to_real(sysctl_sched_upmigrate_pct);
+	unsigned int down_migrate = pct_to_real(sysctl_sched_downmigrate_pct);
+	unsigned int delta;
+
+	if (up_down_migrate_scale_factor == 1024)
+		goto done;
+
+	delta = up_migrate - down_migrate;
+
+	up_migrate /= NSEC_PER_USEC;
+	up_migrate *= up_down_migrate_scale_factor;
+	up_migrate >>= 10;
+	up_migrate *= NSEC_PER_USEC;
+
+	up_migrate = min(up_migrate, sched_ravg_window);
+
+	down_migrate /= NSEC_PER_USEC;
+	down_migrate *= up_down_migrate_scale_factor;
+	down_migrate >>= 10;
+	down_migrate *= NSEC_PER_USEC;
+
+	down_migrate = min(down_migrate, up_migrate - delta);
+done:
+	sched_upmigrate = up_migrate;
+	sched_downmigrate = down_migrate;
+}
+
+void set_hmp_defaults(void)
+{
+	sched_spill_load =
+		pct_to_real(sysctl_sched_spill_load_pct);
+
+	update_up_down_migrate();
+
+	sched_major_task_runtime =
+		mult_frac(sched_ravg_window, MAJOR_TASK_PCT, 100);
+
+	sched_init_task_load_windows =
+		div64_u64((u64)sysctl_sched_init_task_load_pct *
+			  (u64)sched_ravg_window, 100);
+
+	sched_short_sleep_task_threshold = sysctl_sched_select_prev_cpu_us *
+					   NSEC_PER_USEC;
+
+	sched_small_wakee_task_load =
+		div64_u64((u64)sysctl_sched_small_wakee_task_load_pct *
+			  (u64)sched_ravg_window, 100);
+
+	sched_big_waker_task_load =
+		div64_u64((u64)sysctl_sched_big_waker_task_load_pct *
+			  (u64)sched_ravg_window, 100);
+
+	sched_freq_aggregate_threshold =
+		pct_to_real(sysctl_sched_freq_aggregate_threshold_pct);
+}
+
+u32 sched_get_init_task_load(struct task_struct *p)
+{
+	return p->init_load_pct;
+}
+
+int sched_set_init_task_load(struct task_struct *p, int init_load_pct)
+{
+	if (init_load_pct < 0 || init_load_pct > 100)
+		return -EINVAL;
+
+	p->init_load_pct = init_load_pct;
+
+	return 0;
+}
+
+#ifdef CONFIG_CGROUP_SCHED
+
+int upmigrate_discouraged(struct task_struct *p)
+{
+	return task_group(p)->upmigrate_discouraged;
+}
+
+#else
+
+static inline int upmigrate_discouraged(struct task_struct *p)
+{
+	return 0;
+}
+
+#endif
+
+/* Is a task "big" on its current cpu */
+static inline int __is_big_task(struct task_struct *p, u64 scaled_load)
+{
+	int nice = task_nice(p);
+
+	if (nice > SCHED_UPMIGRATE_MIN_NICE || upmigrate_discouraged(p))
+		return 0;
+
+	return scaled_load > sched_upmigrate;
+}
+
+int is_big_task(struct task_struct *p)
+{
+	return __is_big_task(p, scale_load_to_cpu(task_load(p), task_cpu(p)));
+}
+
+u64 cpu_load(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	return scale_load_to_cpu(rq->hmp_stats.cumulative_runnable_avg, cpu);
+}
+
+u64 cpu_load_sync(int cpu, int sync)
+{
+	return scale_load_to_cpu(cpu_cravg_sync(cpu, sync), cpu);
+}
+
+static int boost_refcount;
+static DEFINE_SPINLOCK(boost_lock);
+static DEFINE_MUTEX(boost_mutex);
+
+static void boost_kick_cpus(void)
+{
+	int i;
+
+	for_each_online_cpu(i) {
+		if (cpu_capacity(i) != max_capacity)
+			boost_kick(i);
+	}
+}
+
+int sched_boost(void)
+{
+	return boost_refcount > 0;
+}
+
+int sched_set_boost(int enable)
+{
+	unsigned long flags;
+	int ret = 0;
+	int old_refcount;
+
+	spin_lock_irqsave(&boost_lock, flags);
+
+	old_refcount = boost_refcount;
+
+	if (enable == 1) {
+		boost_refcount++;
+	} else if (!enable) {
+		if (boost_refcount >= 1)
+			boost_refcount--;
+		else
+			ret = -EINVAL;
+	} else {
+		ret = -EINVAL;
+	}
+
+	if (!old_refcount && boost_refcount)
+		boost_kick_cpus();
+
+	trace_sched_set_boost(boost_refcount);
+	spin_unlock_irqrestore(&boost_lock, flags);
+
+	return ret;
+}
+
+int sched_boost_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+
+	mutex_lock(&boost_mutex);
+	if (!write)
+		sysctl_sched_boost = sched_boost();
+
+	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+	if (ret || !write)
+		goto done;
+
+	ret = (sysctl_sched_boost <= 1) ?
+		sched_set_boost(sysctl_sched_boost) : -EINVAL;
+
+done:
+	mutex_unlock(&boost_mutex);
+	return ret;
+}
+
+/*
+ * Task will fit on a cpu if it's bandwidth consumption on that cpu
+ * will be less than sched_upmigrate. A big task that was previously
+ * "up" migrated will be considered fitting on "little" cpu if its
+ * bandwidth consumption on "little" cpu will be less than
+ * sched_downmigrate. This will help avoid frequenty migrations for
+ * tasks with load close to the upmigrate threshold
+ */
+int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
+			      enum sched_boost_type boost_type)
+{
+	int upmigrate;
+
+	if (cpu_capacity(cpu) == max_capacity)
+		return 1;
+
+	if (boost_type != SCHED_BOOST_ON_BIG) {
+		if (task_nice(p) > SCHED_UPMIGRATE_MIN_NICE ||
+		    upmigrate_discouraged(p))
+			return 1;
+
+		upmigrate = sched_upmigrate;
+		if (cpu_capacity(task_cpu(p)) > cpu_capacity(cpu))
+			upmigrate = sched_downmigrate;
+
+		if (task_load < upmigrate)
+			return 1;
+	}
+
+	return 0;
+}
+
+enum sched_boost_type sched_boost_type(void)
+{
+	if (sched_boost()) {
+		if (min_possible_efficiency != max_possible_efficiency)
+			return SCHED_BOOST_ON_BIG;
+		else
+			return SCHED_BOOST_ON_ALL;
+	}
+	return SCHED_BOOST_NONE;
+}
+
+int task_will_fit(struct task_struct *p, int cpu)
+{
+	u64 tload = scale_load_to_cpu(task_load(p), cpu);
+
+	return task_load_will_fit(p, tload, cpu, sched_boost_type());
+}
+
+int group_will_fit(struct sched_cluster *cluster,
+		 struct related_thread_group *grp, u64 demand)
+{
+	int cpu = cluster_first_cpu(cluster);
+	int prev_capacity = 0;
+	unsigned int threshold = sched_upmigrate;
+	u64 load;
+
+	if (cluster->capacity == max_capacity)
+		return 1;
+
+	if (grp->preferred_cluster)
+		prev_capacity = grp->preferred_cluster->capacity;
+
+	if (cluster->capacity < prev_capacity)
+		threshold = sched_downmigrate;
+
+	load = scale_load_to_cpu(demand, cpu);
+	if (load < threshold)
+		return 1;
+
+	return 0;
+}
+
+/*
+ * Return the cost of running task p on CPU cpu. This function
+ * currently assumes that task p is the only task which will run on
+ * the CPU.
+ */
+unsigned int power_cost(int cpu, u64 demand)
+{
+	int first, mid, last;
+	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+	struct cpu_pstate_pwr *costs;
+	struct freq_max_load *max_load;
+	int total_static_pwr_cost = 0;
+	struct rq *rq = cpu_rq(cpu);
+	unsigned int pc;
+
+	if (!per_cpu_info || !per_cpu_info[cpu].ptable)
+		/*
+		 * When power aware scheduling is not in use, or CPU
+		 * power data is not available, just use the CPU
+		 * capacity as a rough stand-in for real CPU power
+		 * numbers, assuming bigger CPUs are more power
+		 * hungry.
+		 */
+		return cpu_max_possible_capacity(cpu);
+
+	rcu_read_lock();
+	max_load = rcu_dereference(per_cpu(freq_max_load, cpu));
+	if (!max_load) {
+		pc = cpu_max_possible_capacity(cpu);
+		goto unlock;
+	}
+
+	costs = per_cpu_info[cpu].ptable;
+
+	if (demand <= max_load->freqs[0].hdemand) {
+		pc = costs[0].power;
+		goto unlock;
+	} else if (demand > max_load->freqs[max_load->length - 1].hdemand) {
+		pc = costs[max_load->length - 1].power;
+		goto unlock;
+	}
+
+	first = 0;
+	last = max_load->length - 1;
+	mid = (last - first) >> 1;
+	while (1) {
+		if (demand <= max_load->freqs[mid].hdemand)
+			last = mid;
+		else
+			first = mid;
+
+		if (last - first == 1)
+			break;
+		mid = first + ((last - first) >> 1);
+	}
+
+	pc = costs[last].power;
+
+unlock:
+	rcu_read_unlock();
+
+	if (idle_cpu(cpu) && rq->cstate) {
+		total_static_pwr_cost += rq->static_cpu_pwr_cost;
+		if (rq->cluster->dstate)
+			total_static_pwr_cost +=
+				rq->cluster->static_cluster_pwr_cost;
+	}
+
+	return pc + total_static_pwr_cost;
+
+}
+
+void inc_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	if (is_big_task(p))
+		stats->nr_big_tasks++;
+}
+
+void dec_nr_big_task(struct hmp_sched_stats *stats, struct task_struct *p)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	if (is_big_task(p))
+		stats->nr_big_tasks--;
+
+	BUG_ON(stats->nr_big_tasks < 0);
+}
+
+void inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+{
+	inc_nr_big_task(&rq->hmp_stats, p);
+	if (change_cra)
+		inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+void dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra)
+{
+	dec_nr_big_task(&rq->hmp_stats, p);
+	if (change_cra)
+		dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void reset_hmp_stats(struct hmp_sched_stats *stats, int reset_cra)
+{
+	stats->nr_big_tasks = 0;
+	if (reset_cra) {
+		stats->cumulative_runnable_avg = 0;
+		stats->pred_demands_sum = 0;
+	}
+}
+
+/*
+ * Invoked from three places:
+ *	1) try_to_wake_up() -> ... -> select_best_cpu()
+ *	2) scheduler_tick() -> ... -> migration_needed() -> select_best_cpu()
+ *	3) can_migrate_task()
+ *
+ * Its safe to de-reference p->grp in first case (since p->pi_lock is held)
+ * but not in other cases. p->grp is hence freed after a RCU grace period and
+ * accessed under rcu_read_lock()
+ */
+int preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+{
+	struct related_thread_group *grp;
+	int rc = 0;
+
+	rcu_read_lock();
+
+	grp = task_related_thread_group(p);
+	if (!grp || !sysctl_sched_enable_colocation)
+		rc = 1;
+	else
+		rc = (grp->preferred_cluster == cluster);
+
+	rcu_read_unlock();
+	return rc;
+}
+
+struct sched_cluster *rq_cluster(struct rq *rq)
+{
+	return rq->cluster;
+}
+
+/*
+ * reset_cpu_hmp_stats - reset HMP stats for a cpu
+ *	nr_big_tasks
+ *	cumulative_runnable_avg (iff reset_cra is true)
+ */
+void reset_cpu_hmp_stats(int cpu, int reset_cra)
+{
+	reset_cfs_rq_hmp_stats(cpu, reset_cra);
+	reset_hmp_stats(&cpu_rq(cpu)->hmp_stats, reset_cra);
+}
+
+void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+				struct task_struct *p, s64 delta)
+{
+	u64 new_task_load;
+	u64 old_task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	old_task_load = scale_load_to_cpu(task_load(p), task_cpu(p));
+	new_task_load = scale_load_to_cpu(delta + task_load(p), task_cpu(p));
+
+	if (__is_big_task(p, old_task_load) && !__is_big_task(p, new_task_load))
+		stats->nr_big_tasks--;
+	else if (!__is_big_task(p, old_task_load) &&
+		 __is_big_task(p, new_task_load))
+		stats->nr_big_tasks++;
+
+	BUG_ON(stats->nr_big_tasks < 0);
+}
+
+/*
+ * Walk runqueue of cpu and re-initialize 'nr_big_tasks' counters.
+ */
+static void update_nr_big_tasks(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	struct task_struct *p;
+
+	/* Do not reset cumulative_runnable_avg */
+	reset_cpu_hmp_stats(cpu, 0);
+
+	list_for_each_entry(p, &rq->cfs_tasks, se.group_node)
+		inc_hmp_sched_stats_fair(rq, p, 0);
+}
+
+/* Disable interrupts and grab runqueue lock of all cpus listed in @cpus */
+void pre_big_task_count_change(const struct cpumask *cpus)
+{
+	int i;
+
+	local_irq_disable();
+
+	for_each_cpu(i, cpus)
+		raw_spin_lock(&cpu_rq(i)->lock);
+}
+
+/*
+ * Reinitialize 'nr_big_tasks' counters on all affected cpus
+ */
+void post_big_task_count_change(const struct cpumask *cpus)
+{
+	int i;
+
+	/* Assumes local_irq_disable() keeps online cpumap stable */
+	for_each_cpu(i, cpus)
+		update_nr_big_tasks(i);
+
+	for_each_cpu(i, cpus)
+		raw_spin_unlock(&cpu_rq(i)->lock);
+
+	local_irq_enable();
+}
+
+DEFINE_MUTEX(policy_mutex);
+
+static inline int invalid_value_freq_input(unsigned int *data)
+{
+	if (data == &sysctl_sched_freq_aggregate)
+		return !(*data == 0 || *data == 1);
+
+	return 0;
+}
+
+static inline int invalid_value(unsigned int *data)
+{
+	unsigned int val = *data;
+
+	if (data == &sysctl_sched_ravg_hist_size)
+		return (val < 2 || val > RAVG_HIST_SIZE_MAX);
+
+	if (data == &sysctl_sched_window_stats_policy)
+		return val >= WINDOW_STATS_INVALID_POLICY;
+
+	return invalid_value_freq_input(data);
+}
+
+/*
+ * Handle "atomic" update of sysctl_sched_window_stats_policy,
+ * sysctl_sched_ravg_hist_size and sched_freq_legacy_mode variables.
+ */
+int sched_window_update_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+	unsigned int *data = (unsigned int *)table->data;
+	unsigned int old_val;
+
+	mutex_lock(&policy_mutex);
+
+	old_val = *data;
+
+	ret = proc_dointvec(table, write, buffer, lenp, ppos);
+	if (ret || !write || (write && (old_val == *data)))
+		goto done;
+
+	if (invalid_value(data)) {
+		*data = old_val;
+		ret = -EINVAL;
+		goto done;
+	}
+
+	reset_all_window_stats(0, 0);
+
+done:
+	mutex_unlock(&policy_mutex);
+
+	return ret;
+}
+
+/*
+ * Convert percentage value into absolute form. This will avoid div() operation
+ * in fast path, to convert task load in percentage scale.
+ */
+int sched_hmp_proc_update_handler(struct ctl_table *table, int write,
+		void __user *buffer, size_t *lenp,
+		loff_t *ppos)
+{
+	int ret;
+	unsigned int old_val;
+	unsigned int *data = (unsigned int *)table->data;
+	int update_min_nice = 0;
+
+	mutex_lock(&policy_mutex);
+
+	old_val = *data;
+
+	ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+
+	if (ret || !write)
+		goto done;
+
+	if (write && (old_val == *data))
+		goto done;
+
+	if (sysctl_sched_downmigrate_pct > sysctl_sched_upmigrate_pct) {
+		*data = old_val;
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Big task tunable change will need to re-classify tasks on
+	 * runqueue as big and set their counters appropriately.
+	 * sysctl interface affects secondary variables (*_pct), which is then
+	 * "atomically" carried over to the primary variables. Atomic change
+	 * includes taking runqueue lock of all online cpus and re-initiatizing
+	 * their big counter values based on changed criteria.
+	 */
+	if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
+		get_online_cpus();
+		pre_big_task_count_change(cpu_online_mask);
+	}
+
+	set_hmp_defaults();
+
+	if ((data == &sysctl_sched_upmigrate_pct || update_min_nice)) {
+		post_big_task_count_change(cpu_online_mask);
+		put_online_cpus();
+	}
+
+done:
+	mutex_unlock(&policy_mutex);
+	return ret;
+}
+
+inline int nr_big_tasks(struct rq *rq)
+{
+	return rq->hmp_stats.nr_big_tasks;
+}
+
+unsigned int cpu_temp(int cpu)
+{
+	struct cpu_pwr_stats *per_cpu_info = get_cpu_pwr_stats();
+
+	if (per_cpu_info)
+		return per_cpu_info[cpu].temp;
+	else
+		return 0;
+}
+
+void init_new_task_load(struct task_struct *p)
+{
+	int i;
+	u32 init_load_windows = sched_init_task_load_windows;
+	u32 init_load_pct = current->init_load_pct;
+
+	p->init_load_pct = 0;
+	rcu_assign_pointer(p->grp, NULL);
+	INIT_LIST_HEAD(&p->grp_list);
+	memset(&p->ravg, 0, sizeof(struct ravg));
+	p->cpu_cycles = 0;
+
+	if (init_load_pct)
+		init_load_windows = div64_u64((u64)init_load_pct *
+			  (u64)sched_ravg_window, 100);
+
+	p->ravg.demand = init_load_windows;
+	p->ravg.pred_demand = 0;
+	for (i = 0; i < RAVG_HIST_SIZE_MAX; ++i)
+		p->ravg.sum_history[i] = init_load_windows;
+}
+
+/* Return task demand in percentage scale */
+unsigned int pct_task_load(struct task_struct *p)
+{
+	unsigned int load;
+
+	load = div64_u64((u64)task_load(p) * 100, (u64)max_task_load());
+
+	return load;
+}
+
+/*
+ * Return total number of tasks "eligible" to run on highest capacity cpu
+ *
+ * This is simply nr_big_tasks for cpus which are not of max_capacity and
+ * nr_running for cpus of max_capacity
+ */
+unsigned int nr_eligible_big_tasks(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	int nr_big = rq->hmp_stats.nr_big_tasks;
+	int nr = rq->nr_running;
+
+	if (cpu_max_possible_capacity(cpu) != max_possible_capacity)
+		return nr_big;
+
+	return nr;
+}
+
+static inline int exiting_task(struct task_struct *p)
+{
+	return (p->ravg.sum_history[0] == EXITING_TASK_MARKER);
+}
+
+static int __init set_sched_ravg_window(char *str)
+{
+	unsigned int window_size;
+
+	get_option(&str, &window_size);
+
+	if (window_size < MIN_SCHED_RAVG_WINDOW ||
+			window_size > MAX_SCHED_RAVG_WINDOW) {
+		WARN_ON(1);
+		return -EINVAL;
+	}
+
+	sched_ravg_window = window_size;
+	return 0;
+}
+
+early_param("sched_ravg_window", set_sched_ravg_window);
+
+static inline void
+update_window_start(struct rq *rq, u64 wallclock)
+{
+	s64 delta;
+	int nr_windows;
+
+	delta = wallclock - rq->window_start;
+	BUG_ON(delta < 0);
+	if (delta < sched_ravg_window)
+		return;
+
+	nr_windows = div64_u64(delta, sched_ravg_window);
+	rq->window_start += (u64)nr_windows * (u64)sched_ravg_window;
+}
+
+#define DIV64_U64_ROUNDUP(X, Y) div64_u64((X) + (Y - 1), Y)
+
+static inline u64 scale_exec_time(u64 delta, struct rq *rq)
+{
+	u32 freq;
+
+	freq = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
+	delta = DIV64_U64_ROUNDUP(delta * freq, max_possible_freq);
+	delta *= rq->cluster->exec_scale_factor;
+	delta >>= 10;
+
+	return delta;
+}
+
+static inline int cpu_is_waiting_on_io(struct rq *rq)
+{
+	if (!sched_io_is_busy)
+		return 0;
+
+	return atomic_read(&rq->nr_iowait);
+}
+
+/* Does freq_required sufficiently exceed or fall behind cur_freq? */
+static inline int
+nearly_same_freq(unsigned int cur_freq, unsigned int freq_required)
+{
+	int delta = freq_required - cur_freq;
+
+	if (freq_required > cur_freq)
+		return delta < sysctl_sched_freq_inc_notify;
+
+	delta = -delta;
+
+	return delta < sysctl_sched_freq_dec_notify;
+}
+
+/* Convert busy time to frequency equivalent */
+static inline unsigned int load_to_freq(struct rq *rq, u64 load)
+{
+	unsigned int freq;
+
+	load = scale_load_to_cpu(load, cpu_of(rq));
+	load *= 128;
+	load = div64_u64(load, max_task_load());
+
+	freq = load * cpu_max_possible_freq(cpu_of(rq));
+	freq /= 128;
+
+	return freq;
+}
+
+static inline struct group_cpu_time *
+_group_cpu_time(struct related_thread_group *grp, int cpu);
+
+/*
+ * Return load from all related group in given cpu.
+ * Caller must ensure that related_thread_group_lock is held.
+ */
+static void _group_load_in_cpu(int cpu, u64 *grp_load, u64 *new_grp_load)
+{
+	struct related_thread_group *grp;
+
+	for_each_related_thread_group(grp) {
+		struct group_cpu_time *cpu_time;
+
+		cpu_time = _group_cpu_time(grp, cpu);
+		*grp_load += cpu_time->prev_runnable_sum;
+		if (new_grp_load)
+			*new_grp_load += cpu_time->nt_prev_runnable_sum;
+	}
+}
+
+/*
+ * Return load from all related groups in given frequency domain.
+ * Caller must ensure that related_thread_group_lock is held.
+ */
+static void group_load_in_freq_domain(struct cpumask *cpus,
+				u64 *grp_load, u64 *new_grp_load)
+{
+	struct related_thread_group *grp;
+	int j;
+
+	for_each_related_thread_group(grp) {
+		for_each_cpu(j, cpus) {
+			struct group_cpu_time *cpu_time;
+
+			cpu_time = _group_cpu_time(grp, j);
+			*grp_load += cpu_time->prev_runnable_sum;
+			*new_grp_load += cpu_time->nt_prev_runnable_sum;
+		}
+	}
+}
+
+/*
+ * Should scheduler alert governor for changing frequency?
+ *
+ * @check_pred - evaluate frequency based on the predictive demand
+ * @check_groups - add load from all related groups on given cpu
+ *
+ * check_groups is set to 1 if a "related" task movement/wakeup is triggering
+ * the notification check. To avoid "re-aggregation" of demand in such cases,
+ * we check whether the migrated/woken tasks demand (along with demand from
+ * existing tasks on the cpu) can be met on target cpu
+ *
+ */
+
+static int send_notification(struct rq *rq, int check_pred, int check_groups)
+{
+	unsigned int cur_freq, freq_required;
+	unsigned long flags;
+	int rc = 0;
+	u64 group_load = 0, new_load  = 0;
+
+	if (check_pred) {
+		u64 prev = rq->old_busy_time;
+		u64 predicted = rq->hmp_stats.pred_demands_sum;
+
+		if (rq->cluster->cur_freq == cpu_max_freq(cpu_of(rq)))
+			return 0;
+
+		prev = max(prev, rq->old_estimated_time);
+		if (prev > predicted)
+			return 0;
+
+		cur_freq = load_to_freq(rq, prev);
+		freq_required = load_to_freq(rq, predicted);
+
+		if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
+			return 0;
+	} else {
+		read_lock(&related_thread_group_lock);
+		/*
+		 * Protect from concurrent update of rq->prev_runnable_sum and
+		 * group cpu load
+		 */
+		raw_spin_lock_irqsave(&rq->lock, flags);
+		if (check_groups)
+			_group_load_in_cpu(cpu_of(rq), &group_load, NULL);
+
+		new_load = rq->prev_runnable_sum + group_load;
+
+		raw_spin_unlock_irqrestore(&rq->lock, flags);
+		read_unlock(&related_thread_group_lock);
+
+		cur_freq = load_to_freq(rq, rq->old_busy_time);
+		freq_required = load_to_freq(rq, new_load);
+
+		if (nearly_same_freq(cur_freq, freq_required))
+			return 0;
+	}
+
+	raw_spin_lock_irqsave(&rq->lock, flags);
+	if (!rq->cluster->notifier_sent) {
+		rq->cluster->notifier_sent = 1;
+		rc = 1;
+		trace_sched_freq_alert(cpu_of(rq), check_pred, check_groups, rq,
+				       new_load);
+	}
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+	return rc;
+}
+
+/* Alert governor if there is a need to change frequency */
+void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups)
+{
+	int cpu = cpu_of(rq);
+
+	if (!send_notification(rq, check_pred, check_groups))
+		return;
+
+	atomic_notifier_call_chain(
+		&load_alert_notifier_head, 0,
+		(void *)(long)cpu);
+}
+
+void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
+			     struct task_struct *p)
+{
+	bool check_groups;
+
+	rcu_read_lock();
+	check_groups = task_in_related_thread_group(p);
+	rcu_read_unlock();
+
+	if (!same_freq_domain(src_cpu, dest_cpu)) {
+		if (!src_cpu_dead)
+			check_for_freq_change(cpu_rq(src_cpu), false,
+					      check_groups);
+		check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
+	} else {
+		check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
+	}
+}
+
+static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
+				     u64 irqtime, int event)
+{
+	if (is_idle_task(p)) {
+		/* TASK_WAKE && TASK_MIGRATE is not possible on idle task! */
+		if (event == PICK_NEXT_TASK)
+			return 0;
+
+		/* PUT_PREV_TASK, TASK_UPDATE && IRQ_UPDATE are left */
+		return irqtime || cpu_is_waiting_on_io(rq);
+	}
+
+	if (event == TASK_WAKE)
+		return 0;
+
+	if (event == PUT_PREV_TASK || event == IRQ_UPDATE)
+		return 1;
+
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (rq->curr == p)
+			return 1;
+
+		return p->on_rq ? SCHED_FREQ_ACCOUNT_WAIT_TIME : 0;
+	}
+
+	/* TASK_MIGRATE, PICK_NEXT_TASK left */
+	return SCHED_FREQ_ACCOUNT_WAIT_TIME;
+}
+
+static inline bool is_new_task(struct task_struct *p)
+{
+	return p->ravg.active_windows < sysctl_sched_new_task_windows;
+}
+
+#define INC_STEP 8
+#define DEC_STEP 2
+#define CONSISTENT_THRES 16
+#define INC_STEP_BIG 16
+/*
+ * bucket_increase - update the count of all buckets
+ *
+ * @buckets: array of buckets tracking busy time of a task
+ * @idx: the index of bucket to be incremented
+ *
+ * Each time a complete window finishes, count of bucket that runtime
+ * falls in (@idx) is incremented. Counts of all other buckets are
+ * decayed. The rate of increase and decay could be different based
+ * on current count in the bucket.
+ */
+static inline void bucket_increase(u8 *buckets, int idx)
+{
+	int i, step;
+
+	for (i = 0; i < NUM_BUSY_BUCKETS; i++) {
+		if (idx != i) {
+			if (buckets[i] > DEC_STEP)
+				buckets[i] -= DEC_STEP;
+			else
+				buckets[i] = 0;
+		} else {
+			step = buckets[i] >= CONSISTENT_THRES ?
+						INC_STEP_BIG : INC_STEP;
+			if (buckets[i] > U8_MAX - step)
+				buckets[i] = U8_MAX;
+			else
+				buckets[i] += step;
+		}
+	}
+}
+
+static inline int busy_to_bucket(u32 normalized_rt)
+{
+	int bidx;
+
+	bidx = mult_frac(normalized_rt, NUM_BUSY_BUCKETS, max_task_load());
+	bidx = min(bidx, NUM_BUSY_BUCKETS - 1);
+
+	/*
+	 * Combine lowest two buckets. The lowest frequency falls into
+	 * 2nd bucket and thus keep predicting lowest bucket is not
+	 * useful.
+	 */
+	if (!bidx)
+		bidx++;
+
+	return bidx;
+}
+
+static inline u64
+scale_load_to_freq(u64 load, unsigned int src_freq, unsigned int dst_freq)
+{
+	return div64_u64(load * (u64)src_freq, (u64)dst_freq);
+}
+
+#define HEAVY_TASK_SKIP 2
+#define HEAVY_TASK_SKIP_LIMIT 4
+/*
+ * get_pred_busy - calculate predicted demand for a task on runqueue
+ *
+ * @rq: runqueue of task p
+ * @p: task whose prediction is being updated
+ * @start: starting bucket. returned prediction should not be lower than
+ *         this bucket.
+ * @runtime: runtime of the task. returned prediction should not be lower
+ *           than this runtime.
+ * Note: @start can be derived from @runtime. It's passed in only to
+ * avoid duplicated calculation in some cases.
+ *
+ * A new predicted busy time is returned for task @p based on @runtime
+ * passed in. The function searches through buckets that represent busy
+ * time equal to or bigger than @runtime and attempts to find the bucket to
+ * to use for prediction. Once found, it searches through historical busy
+ * time and returns the latest that falls into the bucket. If no such busy
+ * time exists, it returns the medium of that bucket.
+ */
+static u32 get_pred_busy(struct rq *rq, struct task_struct *p,
+				int start, u32 runtime)
+{
+	int i;
+	u8 *buckets = p->ravg.busy_buckets;
+	u32 *hist = p->ravg.sum_history;
+	u32 dmin, dmax;
+	u64 cur_freq_runtime = 0;
+	int first = NUM_BUSY_BUCKETS, final, skip_to;
+	u32 ret = runtime;
+
+	/* skip prediction for new tasks due to lack of history */
+	if (unlikely(is_new_task(p)))
+		goto out;
+
+	/* find minimal bucket index to pick */
+	for (i = start; i < NUM_BUSY_BUCKETS; i++) {
+		if (buckets[i]) {
+			first = i;
+			break;
+		}
+	}
+	/* if no higher buckets are filled, predict runtime */
+	if (first >= NUM_BUSY_BUCKETS)
+		goto out;
+
+	/* compute the bucket for prediction */
+	final = first;
+	if (first < HEAVY_TASK_SKIP_LIMIT) {
+		/* compute runtime at current CPU frequency */
+		cur_freq_runtime = mult_frac(runtime, max_possible_efficiency,
+					     rq->cluster->efficiency);
+		cur_freq_runtime = scale_load_to_freq(cur_freq_runtime,
+				max_possible_freq, rq->cluster->cur_freq);
+		/*
+		 * if the task runs for majority of the window, try to
+		 * pick higher buckets.
+		 */
+		if (cur_freq_runtime >= sched_major_task_runtime) {
+			int next = NUM_BUSY_BUCKETS;
+			/*
+			 * if there is a higher bucket that's consistently
+			 * hit, don't jump beyond that.
+			 */
+			for (i = start + 1; i <= HEAVY_TASK_SKIP_LIMIT &&
+			     i < NUM_BUSY_BUCKETS; i++) {
+				if (buckets[i] > CONSISTENT_THRES) {
+					next = i;
+					break;
+				}
+			}
+			skip_to = min(next, start + HEAVY_TASK_SKIP);
+			/* don't jump beyond HEAVY_TASK_SKIP_LIMIT */
+			skip_to = min(HEAVY_TASK_SKIP_LIMIT, skip_to);
+			/* don't go below first non-empty bucket, if any */
+			final = max(first, skip_to);
+		}
+	}
+
+	/* determine demand range for the predicted bucket */
+	if (final < 2) {
+		/* lowest two buckets are combined */
+		dmin = 0;
+		final = 1;
+	} else {
+		dmin = mult_frac(final, max_task_load(), NUM_BUSY_BUCKETS);
+	}
+	dmax = mult_frac(final + 1, max_task_load(), NUM_BUSY_BUCKETS);
+
+	/*
+	 * search through runtime history and return first runtime that falls
+	 * into the range of predicted bucket.
+	 */
+	for (i = 0; i < sched_ravg_hist_size; i++) {
+		if (hist[i] >= dmin && hist[i] < dmax) {
+			ret = hist[i];
+			break;
+		}
+	}
+	/* no historical runtime within bucket found, use average of the bin */
+	if (ret < dmin)
+		ret = (dmin + dmax) / 2;
+	/*
+	 * when updating in middle of a window, runtime could be higher
+	 * than all recorded history. Always predict at least runtime.
+	 */
+	ret = max(runtime, ret);
+out:
+	trace_sched_update_pred_demand(rq, p, runtime,
+		mult_frac((unsigned int)cur_freq_runtime, 100,
+			  sched_ravg_window), ret);
+	return ret;
+}
+
+static inline u32 calc_pred_demand(struct rq *rq, struct task_struct *p)
+{
+	if (p->ravg.pred_demand >= p->ravg.curr_window)
+		return p->ravg.pred_demand;
+
+	return get_pred_busy(rq, p, busy_to_bucket(p->ravg.curr_window),
+			     p->ravg.curr_window);
+}
+
+/*
+ * predictive demand of a task is calculated at the window roll-over.
+ * if the task current window busy time exceeds the predicted
+ * demand, update it here to reflect the task needs.
+ */
+void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
+{
+	u32 new, old;
+
+	if (is_idle_task(p) || exiting_task(p))
+		return;
+
+	if (event != PUT_PREV_TASK && event != TASK_UPDATE &&
+			(!SCHED_FREQ_ACCOUNT_WAIT_TIME ||
+			 (event != TASK_MIGRATE &&
+			 event != PICK_NEXT_TASK)))
+		return;
+
+	/*
+	 * TASK_UPDATE can be called on sleeping task, when its moved between
+	 * related groups
+	 */
+	if (event == TASK_UPDATE) {
+		if (!p->on_rq && !SCHED_FREQ_ACCOUNT_WAIT_TIME)
+			return;
+	}
+
+	new = calc_pred_demand(rq, p);
+	old = p->ravg.pred_demand;
+
+	if (old >= new)
+		return;
+
+	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+				!p->dl.dl_throttled))
+		p->sched_class->fixup_hmp_sched_stats(rq, p,
+				p->ravg.demand,
+				new);
+
+	p->ravg.pred_demand = new;
+}
+
+/*
+ * Account cpu activity in its busy time counters (rq->curr/prev_runnable_sum)
+ */
+static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
+				 int event, u64 wallclock, u64 irqtime)
+{
+	int new_window, full_window = 0;
+	int p_is_curr_task = (p == rq->curr);
+	u64 mark_start = p->ravg.mark_start;
+	u64 window_start = rq->window_start;
+	u32 window_size = sched_ravg_window;
+	u64 delta;
+	u64 *curr_runnable_sum = &rq->curr_runnable_sum;
+	u64 *prev_runnable_sum = &rq->prev_runnable_sum;
+	u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+	u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+	int flip_counters = 0;
+	int prev_sum_reset = 0;
+	bool new_task;
+	struct related_thread_group *grp;
+
+	new_window = mark_start < window_start;
+	if (new_window) {
+		full_window = (window_start - mark_start) >= window_size;
+		if (p->ravg.active_windows < USHRT_MAX)
+			p->ravg.active_windows++;
+	}
+
+	new_task = is_new_task(p);
+
+	grp = p->grp;
+	if (grp && sched_freq_aggregate) {
+		/* cpu_time protected by rq_lock */
+		struct group_cpu_time *cpu_time =
+			_group_cpu_time(grp, cpu_of(rq));
+
+		curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+		nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+		if (cpu_time->window_start != rq->window_start) {
+			int nr_windows;
+
+			delta = rq->window_start - cpu_time->window_start;
+			nr_windows = div64_u64(delta, window_size);
+			if (nr_windows > 1)
+				prev_sum_reset = 1;
+
+			cpu_time->window_start = rq->window_start;
+			flip_counters = 1;
+		}
+
+		if (p_is_curr_task && new_window) {
+			u64 curr_sum = rq->curr_runnable_sum;
+			u64 nt_curr_sum = rq->nt_curr_runnable_sum;
+
+			if (full_window)
+				curr_sum = nt_curr_sum = 0;
+
+			rq->prev_runnable_sum = curr_sum;
+			rq->nt_prev_runnable_sum = nt_curr_sum;
+
+			rq->curr_runnable_sum = 0;
+			rq->nt_curr_runnable_sum = 0;
+		}
+	} else {
+		if (p_is_curr_task && new_window) {
+			flip_counters = 1;
+			if (full_window)
+				prev_sum_reset = 1;
+		}
+	}
+
+	/*
+	 * Handle per-task window rollover. We don't care about the idle
+	 * task or exiting tasks.
+	 */
+	if (new_window && !is_idle_task(p) && !exiting_task(p)) {
+		u32 curr_window = 0;
+
+		if (!full_window)
+			curr_window = p->ravg.curr_window;
+
+		p->ravg.prev_window = curr_window;
+		p->ravg.curr_window = 0;
+	}
+
+	if (flip_counters) {
+		u64 curr_sum = *curr_runnable_sum;
+		u64 nt_curr_sum = *nt_curr_runnable_sum;
+
+		if (prev_sum_reset)
+			curr_sum = nt_curr_sum = 0;
+
+		*prev_runnable_sum = curr_sum;
+		*nt_prev_runnable_sum = nt_curr_sum;
+
+		*curr_runnable_sum = 0;
+		*nt_curr_runnable_sum = 0;
+	}
+
+	if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
+		/*
+		 * account_busy_for_cpu_time() = 0, so no update to the
+		 * task's current window needs to be made. This could be
+		 * for example
+		 *
+		 *   - a wakeup event on a task within the current
+		 *     window (!new_window below, no action required),
+		 *   - switching to a new task from idle (PICK_NEXT_TASK)
+		 *     in a new window where irqtime is 0 and we aren't
+		 *     waiting on IO
+		 */
+
+		if (!new_window)
+			return;
+
+		/*
+		 * A new window has started. The RQ demand must be rolled
+		 * over if p is the current task.
+		 */
+		if (p_is_curr_task) {
+			/* p is idle task */
+			BUG_ON(p != rq->idle);
+		}
+
+		return;
+	}
+
+	if (!new_window) {
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
+		 * to be accounted to the current window. No rollover
+		 * since we didn't start a new window. An example of this is
+		 * when a task starts execution and then sleeps within the
+		 * same window.
+		 */
+
+		if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq))
+			delta = wallclock - mark_start;
+		else
+			delta = irqtime;
+		delta = scale_exec_time(delta, rq);
+		*curr_runnable_sum += delta;
+		if (new_task)
+			*nt_curr_runnable_sum += delta;
+
+		if (!is_idle_task(p) && !exiting_task(p))
+			p->ravg.curr_window += delta;
+
+		return;
+	}
+
+	if (!p_is_curr_task) {
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
+		 * to be accounted to the current window. A new window
+		 * has also started, but p is not the current task, so the
+		 * window is not rolled over - just split up and account
+		 * as necessary into curr and prev. The window is only
+		 * rolled over when a new window is processed for the current
+		 * task.
+		 *
+		 * Irqtime can't be accounted by a task that isn't the
+		 * currently running task.
+		 */
+
+		if (!full_window) {
+			/*
+			 * A full window hasn't elapsed, account partial
+			 * contribution to previous completed window.
+			 */
+			delta = scale_exec_time(window_start - mark_start, rq);
+			if (!exiting_task(p))
+				p->ravg.prev_window += delta;
+		} else {
+			/*
+			 * Since at least one full window has elapsed,
+			 * the contribution to the previous window is the
+			 * full window (window_size).
+			 */
+			delta = scale_exec_time(window_size, rq);
+			if (!exiting_task(p))
+				p->ravg.prev_window = delta;
+		}
+
+		*prev_runnable_sum += delta;
+		if (new_task)
+			*nt_prev_runnable_sum += delta;
+
+		/* Account piece of busy time in the current window. */
+		delta = scale_exec_time(wallclock - window_start, rq);
+		*curr_runnable_sum += delta;
+		if (new_task)
+			*nt_curr_runnable_sum += delta;
+
+		if (!exiting_task(p))
+			p->ravg.curr_window = delta;
+
+		return;
+	}
+
+	if (!irqtime || !is_idle_task(p) || cpu_is_waiting_on_io(rq)) {
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
+		 * to be accounted to the current window. A new window
+		 * has started and p is the current task so rollover is
+		 * needed. If any of these three above conditions are true
+		 * then this busy time can't be accounted as irqtime.
+		 *
+		 * Busy time for the idle task or exiting tasks need not
+		 * be accounted.
+		 *
+		 * An example of this would be a task that starts execution
+		 * and then sleeps once a new window has begun.
+		 */
+
+		if (!full_window) {
+			/*
+			 * A full window hasn't elapsed, account partial
+			 * contribution to previous completed window.
+			 */
+			delta = scale_exec_time(window_start - mark_start, rq);
+			if (!is_idle_task(p) && !exiting_task(p))
+				p->ravg.prev_window += delta;
+		} else {
+			/*
+			 * Since at least one full window has elapsed,
+			 * the contribution to the previous window is the
+			 * full window (window_size).
+			 */
+			delta = scale_exec_time(window_size, rq);
+			if (!is_idle_task(p) && !exiting_task(p))
+				p->ravg.prev_window = delta;
+		}
+
+		/*
+		 * Rollover is done here by overwriting the values in
+		 * prev_runnable_sum and curr_runnable_sum.
+		 */
+		*prev_runnable_sum += delta;
+		if (new_task)
+			*nt_prev_runnable_sum += delta;
+
+		/* Account piece of busy time in the current window. */
+		delta = scale_exec_time(wallclock - window_start, rq);
+		*curr_runnable_sum += delta;
+		if (new_task)
+			*nt_curr_runnable_sum += delta;
+
+		if (!is_idle_task(p) && !exiting_task(p))
+			p->ravg.curr_window = delta;
+
+		return;
+	}
+
+	if (irqtime) {
+		/*
+		 * account_busy_for_cpu_time() = 1 so busy time needs
+		 * to be accounted to the current window. A new window
+		 * has started and p is the current task so rollover is
+		 * needed. The current task must be the idle task because
+		 * irqtime is not accounted for any other task.
+		 *
+		 * Irqtime will be accounted each time we process IRQ activity
+		 * after a period of idleness, so we know the IRQ busy time
+		 * started at wallclock - irqtime.
+		 */
+
+		BUG_ON(!is_idle_task(p));
+		mark_start = wallclock - irqtime;
+
+		/*
+		 * Roll window over. If IRQ busy time was just in the current
+		 * window then that is all that need be accounted.
+		 */
+		if (mark_start > window_start) {
+			*curr_runnable_sum = scale_exec_time(irqtime, rq);
+			return;
+		}
+
+		/*
+		 * The IRQ busy time spanned multiple windows. Process the
+		 * busy time preceding the current window start first.
+		 */
+		delta = window_start - mark_start;
+		if (delta > window_size)
+			delta = window_size;
+		delta = scale_exec_time(delta, rq);
+		*prev_runnable_sum += delta;
+
+		/* Process the remaining IRQ busy time in the current window. */
+		delta = wallclock - window_start;
+		rq->curr_runnable_sum = scale_exec_time(delta, rq);
+
+		return;
+	}
+
+	BUG();
+}
+
+static inline u32 predict_and_update_buckets(struct rq *rq,
+			struct task_struct *p, u32 runtime) {
+
+	int bidx;
+	u32 pred_demand;
+
+	bidx = busy_to_bucket(runtime);
+	pred_demand = get_pred_busy(rq, p, bidx, runtime);
+	bucket_increase(p->ravg.busy_buckets, bidx);
+
+	return pred_demand;
+}
+
+static void update_task_cpu_cycles(struct task_struct *p, int cpu)
+{
+	if (use_cycle_counter)
+		p->cpu_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+}
+
+static void
+update_task_rq_cpu_cycles(struct task_struct *p, struct rq *rq, int event,
+			  u64 wallclock, u64 irqtime)
+{
+	u64 cur_cycles;
+	int cpu = cpu_of(rq);
+
+	lockdep_assert_held(&rq->lock);
+
+	if (!use_cycle_counter) {
+		rq->cc.cycles = cpu_cur_freq(cpu);
+		rq->cc.time = 1;
+		return;
+	}
+
+	cur_cycles = cpu_cycle_counter_cb.get_cpu_cycle_counter(cpu);
+
+	/*
+	 * If current task is idle task and irqtime == 0 CPU was
+	 * indeed idle and probably its cycle counter was not
+	 * increasing.  We still need estimatied CPU frequency
+	 * for IO wait time accounting.  Use the previously
+	 * calculated frequency in such a case.
+	 */
+	if (!is_idle_task(rq->curr) || irqtime) {
+		if (unlikely(cur_cycles < p->cpu_cycles))
+			rq->cc.cycles = cur_cycles + (U64_MAX - p->cpu_cycles);
+		else
+			rq->cc.cycles = cur_cycles - p->cpu_cycles;
+		rq->cc.cycles = rq->cc.cycles * NSEC_PER_MSEC;
+
+		if (event == IRQ_UPDATE && is_idle_task(p))
+			/*
+			 * Time between mark_start of idle task and IRQ handler
+			 * entry time is CPU cycle counter stall period.
+			 * Upon IRQ handler entry sched_account_irqstart()
+			 * replenishes idle task's cpu cycle counter so
+			 * rq->cc.cycles now represents increased cycles during
+			 * IRQ handler rather than time between idle entry and
+			 * IRQ exit.  Thus use irqtime as time delta.
+			 */
+			rq->cc.time = irqtime;
+		else
+			rq->cc.time = wallclock - p->ravg.mark_start;
+		BUG_ON((s64)rq->cc.time < 0);
+	}
+
+	p->cpu_cycles = cur_cycles;
+
+	trace_sched_get_task_cpu_cycles(cpu, event, rq->cc.cycles, rq->cc.time);
+}
+
+static int account_busy_for_task_demand(struct task_struct *p, int event)
+{
+	/*
+	 * No need to bother updating task demand for exiting tasks
+	 * or the idle task.
+	 */
+	if (exiting_task(p) || is_idle_task(p))
+		return 0;
+
+	/*
+	 * When a task is waking up it is completing a segment of non-busy
+	 * time. Likewise, if wait time is not treated as busy time, then
+	 * when a task begins to run or is migrated, it is not running and
+	 * is completing a segment of non-busy time.
+	 */
+	if (event == TASK_WAKE || (!SCHED_ACCOUNT_WAIT_TIME &&
+			 (event == PICK_NEXT_TASK || event == TASK_MIGRATE)))
+		return 0;
+
+	return 1;
+}
+
+/*
+ * Called when new window is starting for a task, to record cpu usage over
+ * recently concluded window(s). Normally 'samples' should be 1. It can be > 1
+ * when, say, a real-time task runs without preemption for several windows at a
+ * stretch.
+ */
+static void update_history(struct rq *rq, struct task_struct *p,
+			 u32 runtime, int samples, int event)
+{
+	u32 *hist = &p->ravg.sum_history[0];
+	int ridx, widx;
+	u32 max = 0, avg, demand, pred_demand;
+	u64 sum = 0;
+
+	/* Ignore windows where task had no activity */
+	if (!runtime || is_idle_task(p) || exiting_task(p) || !samples)
+		goto done;
+
+	/* Push new 'runtime' value onto stack */
+	widx = sched_ravg_hist_size - 1;
+	ridx = widx - samples;
+	for (; ridx >= 0; --widx, --ridx) {
+		hist[widx] = hist[ridx];
+		sum += hist[widx];
+		if (hist[widx] > max)
+			max = hist[widx];
+	}
+
+	for (widx = 0; widx < samples && widx < sched_ravg_hist_size; widx++) {
+		hist[widx] = runtime;
+		sum += hist[widx];
+		if (hist[widx] > max)
+			max = hist[widx];
+	}
+
+	p->ravg.sum = 0;
+
+	if (sched_window_stats_policy == WINDOW_STATS_RECENT) {
+		demand = runtime;
+	} else if (sched_window_stats_policy == WINDOW_STATS_MAX) {
+		demand = max;
+	} else {
+		avg = div64_u64(sum, sched_ravg_hist_size);
+		if (sched_window_stats_policy == WINDOW_STATS_AVG)
+			demand = avg;
+		else
+			demand = max(avg, runtime);
+	}
+	pred_demand = predict_and_update_buckets(rq, p, runtime);
+
+	/*
+	 * A throttled deadline sched class task gets dequeued without
+	 * changing p->on_rq. Since the dequeue decrements hmp stats
+	 * avoid decrementing it here again.
+	 */
+	if (task_on_rq_queued(p) && (!task_has_dl_policy(p) ||
+						!p->dl.dl_throttled))
+		p->sched_class->fixup_hmp_sched_stats(rq, p, demand,
+						      pred_demand);
+
+	p->ravg.demand = demand;
+	p->ravg.pred_demand = pred_demand;
+
+done:
+	trace_sched_update_history(rq, p, runtime, samples, event);
+}
+
+static void add_to_task_demand(struct rq *rq, struct task_struct *p, u64 delta)
+{
+	delta = scale_exec_time(delta, rq);
+	p->ravg.sum += delta;
+	if (unlikely(p->ravg.sum > sched_ravg_window))
+		p->ravg.sum = sched_ravg_window;
+}
+
+/*
+ * Account cpu demand of task and/or update task's cpu demand history
+ *
+ * ms = p->ravg.mark_start;
+ * wc = wallclock
+ * ws = rq->window_start
+ *
+ * Three possibilities:
+ *
+ *	a) Task event is contained within one window.
+ *		window_start < mark_start < wallclock
+ *
+ *		ws   ms  wc
+ *		|    |   |
+ *		V    V   V
+ *		|---------------|
+ *
+ *	In this case, p->ravg.sum is updated *iff* event is appropriate
+ *	(ex: event == PUT_PREV_TASK)
+ *
+ *	b) Task event spans two windows.
+ *		mark_start < window_start < wallclock
+ *
+ *		ms   ws   wc
+ *		|    |    |
+ *		V    V    V
+ *		-----|-------------------
+ *
+ *	In this case, p->ravg.sum is updated with (ws - ms) *iff* event
+ *	is appropriate, then a new window sample is recorded followed
+ *	by p->ravg.sum being set to (wc - ws) *iff* event is appropriate.
+ *
+ *	c) Task event spans more than two windows.
+ *
+ *		ms ws_tmp			   ws  wc
+ *		|  |				   |   |
+ *		V  V				   V   V
+ *		---|-------|-------|-------|-------|------
+ *		   |				   |
+ *		   |<------ nr_full_windows ------>|
+ *
+ *	In this case, p->ravg.sum is updated with (ws_tmp - ms) first *iff*
+ *	event is appropriate, window sample of p->ravg.sum is recorded,
+ *	'nr_full_window' samples of window_size is also recorded *iff*
+ *	event is appropriate and finally p->ravg.sum is set to (wc - ws)
+ *	*iff* event is appropriate.
+ *
+ * IMPORTANT : Leave p->ravg.mark_start unchanged, as update_cpu_busy_time()
+ * depends on it!
+ */
+static void update_task_demand(struct task_struct *p, struct rq *rq,
+			       int event, u64 wallclock)
+{
+	u64 mark_start = p->ravg.mark_start;
+	u64 delta, window_start = rq->window_start;
+	int new_window, nr_full_windows;
+	u32 window_size = sched_ravg_window;
+
+	new_window = mark_start < window_start;
+	if (!account_busy_for_task_demand(p, event)) {
+		if (new_window)
+			/*
+			 * If the time accounted isn't being accounted as
+			 * busy time, and a new window started, only the
+			 * previous window need be closed out with the
+			 * pre-existing demand. Multiple windows may have
+			 * elapsed, but since empty windows are dropped,
+			 * it is not necessary to account those.
+			 */
+			update_history(rq, p, p->ravg.sum, 1, event);
+		return;
+	}
+
+	if (!new_window) {
+		/*
+		 * The simple case - busy time contained within the existing
+		 * window.
+		 */
+		add_to_task_demand(rq, p, wallclock - mark_start);
+		return;
+	}
+
+	/*
+	 * Busy time spans at least two windows. Temporarily rewind
+	 * window_start to first window boundary after mark_start.
+	 */
+	delta = window_start - mark_start;
+	nr_full_windows = div64_u64(delta, window_size);
+	window_start -= (u64)nr_full_windows * (u64)window_size;
+
+	/* Process (window_start - mark_start) first */
+	add_to_task_demand(rq, p, window_start - mark_start);
+
+	/* Push new sample(s) into task's demand history */
+	update_history(rq, p, p->ravg.sum, 1, event);
+	if (nr_full_windows)
+		update_history(rq, p, scale_exec_time(window_size, rq),
+			       nr_full_windows, event);
+
+	/*
+	 * Roll window_start back to current to process any remainder
+	 * in current window.
+	 */
+	window_start += (u64)nr_full_windows * (u64)window_size;
+
+	/* Process (wallclock - window_start) next */
+	mark_start = window_start;
+	add_to_task_demand(rq, p, wallclock - mark_start);
+}
+
+/* Reflect task activity on its demand and cpu's busy time statistics */
+void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+						u64 wallclock, u64 irqtime)
+{
+	if (!rq->window_start || sched_disable_window_stats)
+		return;
+
+	lockdep_assert_held(&rq->lock);
+
+	update_window_start(rq, wallclock);
+
+	if (!p->ravg.mark_start) {
+		update_task_cpu_cycles(p, cpu_of(rq));
+		goto done;
+	}
+
+	update_task_rq_cpu_cycles(p, rq, event, wallclock, irqtime);
+	update_task_demand(p, rq, event, wallclock);
+	update_cpu_busy_time(p, rq, event, wallclock, irqtime);
+	update_task_pred_demand(rq, p, event);
+done:
+	trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
+				     rq->cc.cycles, rq->cc.time,
+				     _group_cpu_time(p->grp, cpu_of(rq)));
+
+	p->ravg.mark_start = wallclock;
+}
+
+void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock)
+{
+	struct rq *rq = cpu_rq(cpu);
+	unsigned long flags, nr_windows;
+	u64 cur_jiffies_ts;
+
+	raw_spin_lock_irqsave(&rq->lock, flags);
+
+	/*
+	 * cputime (wallclock) uses sched_clock so use the same here for
+	 * consistency.
+	 */
+	delta += sched_clock() - wallclock;
+	cur_jiffies_ts = get_jiffies_64();
+
+	if (is_idle_task(curr))
+		update_task_ravg(curr, rq, IRQ_UPDATE, sched_ktime_clock(),
+				 delta);
+
+	nr_windows = cur_jiffies_ts - rq->irqload_ts;
+
+	if (nr_windows) {
+		if (nr_windows < 10) {
+			/* Decay CPU's irqload by 3/4 for each window. */
+			rq->avg_irqload *= (3 * nr_windows);
+			rq->avg_irqload = div64_u64(rq->avg_irqload,
+						    4 * nr_windows);
+		} else {
+			rq->avg_irqload = 0;
+		}
+		rq->avg_irqload += rq->cur_irqload;
+		rq->cur_irqload = 0;
+	}
+
+	rq->cur_irqload += delta;
+	rq->irqload_ts = cur_jiffies_ts;
+	raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+void sched_account_irqstart(int cpu, struct task_struct *curr, u64 wallclock)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	if (!rq->window_start || sched_disable_window_stats)
+		return;
+
+	if (is_idle_task(curr)) {
+		/* We're here without rq->lock held, IRQ disabled */
+		raw_spin_lock(&rq->lock);
+		update_task_cpu_cycles(curr, cpu);
+		raw_spin_unlock(&rq->lock);
+	}
+}
+
+void reset_task_stats(struct task_struct *p)
+{
+	u32 sum = 0;
+
+	if (exiting_task(p))
+		sum = EXITING_TASK_MARKER;
+
+	memset(&p->ravg, 0, sizeof(struct ravg));
+	/* Retain EXITING_TASK marker */
+	p->ravg.sum_history[0] = sum;
+}
+
+void mark_task_starting(struct task_struct *p)
+{
+	u64 wallclock;
+	struct rq *rq = task_rq(p);
+
+	if (!rq->window_start || sched_disable_window_stats) {
+		reset_task_stats(p);
+		return;
+	}
+
+	wallclock = sched_ktime_clock();
+	p->ravg.mark_start = p->last_wake_ts = wallclock;
+	p->last_cpu_selected_ts = wallclock;
+	p->last_switch_out_ts = 0;
+	update_task_cpu_cycles(p, cpu_of(rq));
+}
+
+void set_window_start(struct rq *rq)
+{
+	int cpu = cpu_of(rq);
+	struct rq *sync_rq = cpu_rq(sync_cpu);
+
+	if (rq->window_start)
+		return;
+
+	if (cpu == sync_cpu) {
+		rq->window_start = sched_ktime_clock();
+	} else {
+		raw_spin_unlock(&rq->lock);
+		double_rq_lock(rq, sync_rq);
+		rq->window_start = cpu_rq(sync_cpu)->window_start;
+		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+		raw_spin_unlock(&sync_rq->lock);
+	}
+
+	rq->curr->ravg.mark_start = rq->window_start;
+}
+
+void migrate_sync_cpu(int cpu)
+{
+	if (cpu == sync_cpu)
+		sync_cpu = smp_processor_id();
+}
+
+static void reset_all_task_stats(void)
+{
+	struct task_struct *g, *p;
+
+	read_lock(&tasklist_lock);
+	do_each_thread(g, p) {
+		reset_task_stats(p);
+	}  while_each_thread(g, p);
+	read_unlock(&tasklist_lock);
+}
+
+static void disable_window_stats(void)
+{
+	unsigned long flags;
+	int i;
+
+	local_irq_save(flags);
+	for_each_possible_cpu(i)
+		raw_spin_lock(&cpu_rq(i)->lock);
+
+	sched_disable_window_stats = 1;
+
+	for_each_possible_cpu(i)
+		raw_spin_unlock(&cpu_rq(i)->lock);
+
+	local_irq_restore(flags);
+}
+
+/* Called with all cpu's rq->lock held */
+static void enable_window_stats(void)
+{
+	sched_disable_window_stats = 0;
+
+}
+
+enum reset_reason_code {
+	WINDOW_CHANGE,
+	POLICY_CHANGE,
+	HIST_SIZE_CHANGE,
+	FREQ_AGGREGATE_CHANGE,
+};
+
+const char *sched_window_reset_reasons[] = {
+	"WINDOW_CHANGE",
+	"POLICY_CHANGE",
+	"HIST_SIZE_CHANGE",
+};
+
+/* Called with IRQs enabled */
+void reset_all_window_stats(u64 window_start, unsigned int window_size)
+{
+	int cpu;
+	unsigned long flags;
+	u64 start_ts = sched_ktime_clock();
+	int reason = WINDOW_CHANGE;
+	unsigned int old = 0, new = 0;
+	struct related_thread_group *grp;
+
+	disable_window_stats();
+
+	reset_all_task_stats();
+
+	local_irq_save(flags);
+
+	read_lock(&related_thread_group_lock);
+
+	for_each_possible_cpu(cpu)
+		raw_spin_lock(&cpu_rq(cpu)->lock);
+
+	list_for_each_entry(grp, &related_thread_groups, list) {
+		int j;
+
+		for_each_possible_cpu(j) {
+			struct group_cpu_time *cpu_time;
+			/* Protected by rq lock */
+			cpu_time = _group_cpu_time(grp, j);
+			memset(cpu_time, 0, sizeof(struct group_cpu_time));
+			if (window_start)
+				cpu_time->window_start = window_start;
+		}
+	}
+
+	if (window_size) {
+		sched_ravg_window = window_size * TICK_NSEC;
+		set_hmp_defaults();
+	}
+
+	enable_window_stats();
+
+	for_each_possible_cpu(cpu) {
+		struct rq *rq = cpu_rq(cpu);
+
+		if (window_start)
+			rq->window_start = window_start;
+		rq->curr_runnable_sum = rq->prev_runnable_sum = 0;
+		rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
+		reset_cpu_hmp_stats(cpu, 1);
+	}
+
+	if (sched_window_stats_policy != sysctl_sched_window_stats_policy) {
+		reason = POLICY_CHANGE;
+		old = sched_window_stats_policy;
+		new = sysctl_sched_window_stats_policy;
+		sched_window_stats_policy = sysctl_sched_window_stats_policy;
+	} else if (sched_ravg_hist_size != sysctl_sched_ravg_hist_size) {
+		reason = HIST_SIZE_CHANGE;
+		old = sched_ravg_hist_size;
+		new = sysctl_sched_ravg_hist_size;
+		sched_ravg_hist_size = sysctl_sched_ravg_hist_size;
+	} else if (sched_freq_aggregate !=
+					sysctl_sched_freq_aggregate) {
+		reason = FREQ_AGGREGATE_CHANGE;
+		old = sched_freq_aggregate;
+		new = sysctl_sched_freq_aggregate;
+		sched_freq_aggregate = sysctl_sched_freq_aggregate;
+	}
+
+	for_each_possible_cpu(cpu)
+		raw_spin_unlock(&cpu_rq(cpu)->lock);
+
+	read_unlock(&related_thread_group_lock);
+
+	local_irq_restore(flags);
+
+	trace_sched_reset_all_window_stats(window_start, window_size,
+		sched_ktime_clock() - start_ts, reason, old, new);
+}
+
+static inline void
+sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time);
+
+void sched_get_cpus_busy(struct sched_load *busy,
+			 const struct cpumask *query_cpus)
+{
+	unsigned long flags;
+	struct rq *rq;
+	const int cpus = cpumask_weight(query_cpus);
+	u64 load[cpus], group_load[cpus];
+	u64 nload[cpus], ngload[cpus];
+	u64 pload[cpus];
+	unsigned int cur_freq[cpus], max_freq[cpus];
+	int notifier_sent = 0;
+	int early_detection[cpus];
+	int cpu, i = 0;
+	unsigned int window_size;
+	u64 max_prev_sum = 0;
+	int max_busy_cpu = cpumask_first(query_cpus);
+	struct related_thread_group *grp;
+	u64 total_group_load = 0, total_ngload = 0;
+	bool aggregate_load = false;
+
+	if (unlikely(cpus == 0))
+		return;
+
+	/*
+	 * This function could be called in timer context, and the
+	 * current task may have been executing for a long time. Ensure
+	 * that the window stats are current by doing an update.
+	 */
+	read_lock(&related_thread_group_lock);
+
+	local_irq_save(flags);
+	for_each_cpu(cpu, query_cpus)
+		raw_spin_lock(&cpu_rq(cpu)->lock);
+
+	window_size = sched_ravg_window;
+
+	for_each_cpu(cpu, query_cpus) {
+		rq = cpu_rq(cpu);
+
+		update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
+				 0);
+		cur_freq[i] = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
+
+		load[i] = rq->old_busy_time = rq->prev_runnable_sum;
+		nload[i] = rq->nt_prev_runnable_sum;
+		pload[i] = rq->hmp_stats.pred_demands_sum;
+		rq->old_estimated_time = pload[i];
+
+		if (load[i] > max_prev_sum) {
+			max_prev_sum = load[i];
+			max_busy_cpu = cpu;
+		}
+
+		/*
+		 * sched_get_cpus_busy() is called for all CPUs in a
+		 * frequency domain. So the notifier_sent flag per
+		 * cluster works even when a frequency domain spans
+		 * more than 1 cluster.
+		 */
+		if (rq->cluster->notifier_sent) {
+			notifier_sent = 1;
+			rq->cluster->notifier_sent = 0;
+		}
+		early_detection[i] = (rq->ed_task != NULL);
+		cur_freq[i] = cpu_cur_freq(cpu);
+		max_freq[i] = cpu_max_freq(cpu);
+		i++;
+	}
+
+	for_each_related_thread_group(grp) {
+		for_each_cpu(cpu, query_cpus) {
+			/* Protected by rq_lock */
+			struct group_cpu_time *cpu_time =
+						_group_cpu_time(grp, cpu);
+			sync_window_start(cpu_rq(cpu), cpu_time);
+		}
+	}
+
+	group_load_in_freq_domain(
+			&cpu_rq(max_busy_cpu)->freq_domain_cpumask,
+			&total_group_load, &total_ngload);
+	aggregate_load = !!(total_group_load > sched_freq_aggregate_threshold);
+
+	i = 0;
+	for_each_cpu(cpu, query_cpus) {
+		group_load[i] = 0;
+		ngload[i] = 0;
+
+		if (early_detection[i])
+			goto skip_early;
+
+		rq = cpu_rq(cpu);
+		if (aggregate_load) {
+			if (cpu == max_busy_cpu) {
+				group_load[i] = total_group_load;
+				ngload[i] = total_ngload;
+			}
+		} else {
+			_group_load_in_cpu(cpu, &group_load[i], &ngload[i]);
+		}
+
+		load[i] += group_load[i];
+		nload[i] += ngload[i];
+		/*
+		 * Scale load in reference to cluster max_possible_freq.
+		 *
+		 * Note that scale_load_to_cpu() scales load in reference to
+		 * the cluster max_freq.
+		 */
+		load[i] = scale_load_to_cpu(load[i], cpu);
+		nload[i] = scale_load_to_cpu(nload[i], cpu);
+		pload[i] = scale_load_to_cpu(pload[i], cpu);
+skip_early:
+		i++;
+	}
+
+	for_each_cpu(cpu, query_cpus)
+		raw_spin_unlock(&(cpu_rq(cpu))->lock);
+	local_irq_restore(flags);
+
+	read_unlock(&related_thread_group_lock);
+
+	i = 0;
+	for_each_cpu(cpu, query_cpus) {
+		rq = cpu_rq(cpu);
+
+		if (early_detection[i]) {
+			busy[i].prev_load = div64_u64(sched_ravg_window,
+							NSEC_PER_USEC);
+			busy[i].new_task_load = 0;
+			goto exit_early;
+		}
+
+		/*
+		 * When the load aggregation is controlled by
+		 * sched_freq_aggregate_threshold, allow reporting loads
+		 * greater than 100 @ Fcur to ramp up the frequency
+		 * faster.
+		 */
+		if (notifier_sent || (aggregate_load &&
+					sched_freq_aggregate_threshold)) {
+			load[i] = scale_load_to_freq(load[i], max_freq[i],
+						    cpu_max_possible_freq(cpu));
+			nload[i] = scale_load_to_freq(nload[i], max_freq[i],
+						    cpu_max_possible_freq(cpu));
+		} else {
+			load[i] = scale_load_to_freq(load[i], max_freq[i],
+						     cur_freq[i]);
+			nload[i] = scale_load_to_freq(nload[i], max_freq[i],
+						      cur_freq[i]);
+			if (load[i] > window_size)
+				load[i] = window_size;
+			if (nload[i] > window_size)
+				nload[i] = window_size;
+
+			load[i] = scale_load_to_freq(load[i], cur_freq[i],
+						    cpu_max_possible_freq(cpu));
+			nload[i] = scale_load_to_freq(nload[i], cur_freq[i],
+						    cpu_max_possible_freq(cpu));
+		}
+		pload[i] = scale_load_to_freq(pload[i], max_freq[i],
+					     rq->cluster->max_possible_freq);
+
+		busy[i].prev_load = div64_u64(load[i], NSEC_PER_USEC);
+		busy[i].new_task_load = div64_u64(nload[i], NSEC_PER_USEC);
+		busy[i].predicted_load = div64_u64(pload[i], NSEC_PER_USEC);
+
+exit_early:
+		trace_sched_get_busy(cpu, busy[i].prev_load,
+				     busy[i].new_task_load,
+				     busy[i].predicted_load,
+				     early_detection[i]);
+		i++;
+	}
+}
+
+void sched_set_io_is_busy(int val)
+{
+	sched_io_is_busy = val;
+}
+
+int sched_set_window(u64 window_start, unsigned int window_size)
+{
+	u64 now, cur_jiffies, jiffy_ktime_ns;
+	s64 ws;
+	unsigned long flags;
+
+	if (window_size * TICK_NSEC <  MIN_SCHED_RAVG_WINDOW)
+		return -EINVAL;
+
+	mutex_lock(&policy_mutex);
+
+	/*
+	 * Get a consistent view of ktime, jiffies, and the time
+	 * since the last jiffy (based on last_jiffies_update).
+	 */
+	local_irq_save(flags);
+	cur_jiffies = jiffy_to_ktime_ns(&now, &jiffy_ktime_ns);
+	local_irq_restore(flags);
+
+	/* translate window_start from jiffies to nanoseconds */
+	ws = (window_start - cur_jiffies); /* jiffy difference */
+	ws *= TICK_NSEC;
+	ws += jiffy_ktime_ns;
+
+	/*
+	 * Roll back calculated window start so that it is in
+	 * the past (window stats must have a current window).
+	 */
+	while (ws > now)
+		ws -= (window_size * TICK_NSEC);
+
+	BUG_ON(sched_ktime_clock() < ws);
+
+	reset_all_window_stats(ws, window_size);
+
+	sched_update_freq_max_load(cpu_possible_mask);
+
+	mutex_unlock(&policy_mutex);
+
+	return 0;
+}
+
+void fixup_busy_time(struct task_struct *p, int new_cpu)
+{
+	struct rq *src_rq = task_rq(p);
+	struct rq *dest_rq = cpu_rq(new_cpu);
+	u64 wallclock;
+	u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+	u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+	u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+	u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+	int migrate_type;
+	struct migration_sum_data d;
+	bool new_task;
+	struct related_thread_group *grp;
+
+	if (!p->on_rq && p->state != TASK_WAKING)
+		return;
+
+	if (exiting_task(p)) {
+		clear_ed_task(p, src_rq);
+		return;
+	}
+
+	if (p->state == TASK_WAKING)
+		double_rq_lock(src_rq, dest_rq);
+
+	if (sched_disable_window_stats)
+		goto done;
+
+	wallclock = sched_ktime_clock();
+
+	update_task_ravg(task_rq(p)->curr, task_rq(p),
+			 TASK_UPDATE,
+			 wallclock, 0);
+	update_task_ravg(dest_rq->curr, dest_rq,
+			 TASK_UPDATE, wallclock, 0);
+
+	update_task_ravg(p, task_rq(p), TASK_MIGRATE,
+			 wallclock, 0);
+
+	update_task_cpu_cycles(p, new_cpu);
+
+	new_task = is_new_task(p);
+	/* Protected by rq_lock */
+	grp = p->grp;
+	if (grp && sched_freq_aggregate) {
+		struct group_cpu_time *cpu_time;
+
+		migrate_type = GROUP_TO_GROUP;
+		/* Protected by rq_lock */
+		cpu_time = _group_cpu_time(grp, cpu_of(src_rq));
+		d.src_rq = NULL;
+		d.src_cpu_time = cpu_time;
+		src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+		src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+		/* Protected by rq_lock */
+		cpu_time = _group_cpu_time(grp, cpu_of(dest_rq));
+		d.dst_rq = NULL;
+		d.dst_cpu_time = cpu_time;
+		dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+		dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+		sync_window_start(dest_rq, cpu_time);
+	} else {
+		migrate_type = RQ_TO_RQ;
+		d.src_rq = src_rq;
+		d.src_cpu_time = NULL;
+		d.dst_rq = dest_rq;
+		d.dst_cpu_time = NULL;
+		src_curr_runnable_sum = &src_rq->curr_runnable_sum;
+		src_prev_runnable_sum = &src_rq->prev_runnable_sum;
+		src_nt_curr_runnable_sum = &src_rq->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &src_rq->nt_prev_runnable_sum;
+
+		dst_curr_runnable_sum = &dest_rq->curr_runnable_sum;
+		dst_prev_runnable_sum = &dest_rq->prev_runnable_sum;
+		dst_nt_curr_runnable_sum = &dest_rq->nt_curr_runnable_sum;
+		dst_nt_prev_runnable_sum = &dest_rq->nt_prev_runnable_sum;
+	}
+
+	if (p->ravg.curr_window) {
+		*src_curr_runnable_sum -= p->ravg.curr_window;
+		*dst_curr_runnable_sum += p->ravg.curr_window;
+		if (new_task) {
+			*src_nt_curr_runnable_sum -= p->ravg.curr_window;
+			*dst_nt_curr_runnable_sum += p->ravg.curr_window;
+		}
+	}
+
+	if (p->ravg.prev_window) {
+		*src_prev_runnable_sum -= p->ravg.prev_window;
+		*dst_prev_runnable_sum += p->ravg.prev_window;
+		if (new_task) {
+			*src_nt_prev_runnable_sum -= p->ravg.prev_window;
+			*dst_nt_prev_runnable_sum += p->ravg.prev_window;
+		}
+	}
+
+	if (p == src_rq->ed_task) {
+		src_rq->ed_task = NULL;
+		if (!dest_rq->ed_task)
+			dest_rq->ed_task = p;
+	}
+
+	trace_sched_migration_update_sum(p, migrate_type, &d);
+	BUG_ON((s64)*src_prev_runnable_sum < 0);
+	BUG_ON((s64)*src_curr_runnable_sum < 0);
+	BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
+	BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
+
+done:
+	if (p->state == TASK_WAKING)
+		double_rq_unlock(src_rq, dest_rq);
+}
+
+#define sched_up_down_migrate_auto_update 1
+static void check_for_up_down_migrate_update(const struct cpumask *cpus)
+{
+	int i = cpumask_first(cpus);
+
+	if (!sched_up_down_migrate_auto_update)
+		return;
+
+	if (cpu_max_possible_capacity(i) == max_possible_capacity)
+		return;
+
+	if (cpu_max_possible_freq(i) == cpu_max_freq(i))
+		up_down_migrate_scale_factor = 1024;
+	else
+		up_down_migrate_scale_factor = (1024 *
+				 cpu_max_possible_freq(i)) / cpu_max_freq(i);
+
+	update_up_down_migrate();
+}
+
+/* Return cluster which can offer required capacity for group */
+static struct sched_cluster *
+best_cluster(struct related_thread_group *grp, u64 total_demand)
+{
+	struct sched_cluster *cluster = NULL;
+
+	for_each_sched_cluster(cluster) {
+		if (group_will_fit(cluster, grp, total_demand))
+			return cluster;
+	}
+
+	return NULL;
+}
+
+static void _set_preferred_cluster(struct related_thread_group *grp)
+{
+	struct task_struct *p;
+	u64 combined_demand = 0;
+
+	if (!sysctl_sched_enable_colocation) {
+		grp->last_update = sched_ktime_clock();
+		grp->preferred_cluster = NULL;
+		return;
+	}
+
+	/*
+	 * wakeup of two or more related tasks could race with each other and
+	 * could result in multiple calls to _set_preferred_cluster being issued
+	 * at same time. Avoid overhead in such cases of rechecking preferred
+	 * cluster
+	 */
+	if (sched_ktime_clock() - grp->last_update < sched_ravg_window / 10)
+		return;
+
+	list_for_each_entry(p, &grp->tasks, grp_list)
+		combined_demand += p->ravg.demand;
+
+	grp->preferred_cluster = best_cluster(grp, combined_demand);
+	grp->last_update = sched_ktime_clock();
+	trace_sched_set_preferred_cluster(grp, combined_demand);
+}
+
+void set_preferred_cluster(struct related_thread_group *grp)
+{
+	raw_spin_lock(&grp->lock);
+	_set_preferred_cluster(grp);
+	raw_spin_unlock(&grp->lock);
+}
+
+#define ADD_TASK	0
+#define REM_TASK	1
+
+static inline void free_group_cputime(struct related_thread_group *grp)
+{
+	free_percpu(grp->cpu_time);
+}
+
+static int alloc_group_cputime(struct related_thread_group *grp)
+{
+	int i;
+	struct group_cpu_time *cpu_time;
+	int cpu = raw_smp_processor_id();
+	struct rq *rq = cpu_rq(cpu);
+	u64 window_start = rq->window_start;
+
+	grp->cpu_time = alloc_percpu(struct group_cpu_time);
+	if (!grp->cpu_time)
+		return -ENOMEM;
+
+	for_each_possible_cpu(i) {
+		cpu_time = per_cpu_ptr(grp->cpu_time, i);
+		memset(cpu_time, 0, sizeof(struct group_cpu_time));
+		cpu_time->window_start = window_start;
+	}
+
+	return 0;
+}
+
+/*
+ * A group's window_start may be behind. When moving it forward, flip prev/curr
+ * counters. When moving forward > 1 window, prev counter is set to 0
+ */
+static inline void
+sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time)
+{
+	u64 delta;
+	int nr_windows;
+	u64 curr_sum = cpu_time->curr_runnable_sum;
+	u64 nt_curr_sum = cpu_time->nt_curr_runnable_sum;
+
+	delta = rq->window_start - cpu_time->window_start;
+	if (!delta)
+		return;
+
+	nr_windows = div64_u64(delta, sched_ravg_window);
+	if (nr_windows > 1)
+		curr_sum = nt_curr_sum = 0;
+
+	cpu_time->prev_runnable_sum  = curr_sum;
+	cpu_time->curr_runnable_sum  = 0;
+
+	cpu_time->nt_prev_runnable_sum = nt_curr_sum;
+	cpu_time->nt_curr_runnable_sum = 0;
+
+	cpu_time->window_start = rq->window_start;
+}
+
+/*
+ * Task's cpu usage is accounted in:
+ *	rq->curr/prev_runnable_sum,  when its ->grp is NULL
+ *	grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL
+ *
+ * Transfer task's cpu usage between those counters when transitioning between
+ * groups
+ */
+static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
+				struct task_struct *p, int event)
+{
+	u64 wallclock;
+	struct group_cpu_time *cpu_time;
+	u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+	u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+	u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+	u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+	struct migration_sum_data d;
+	int migrate_type;
+
+	if (!sched_freq_aggregate)
+		return;
+
+	wallclock = sched_ktime_clock();
+
+	update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+	update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
+
+	/* cpu_time protected by related_thread_group_lock, grp->lock rq_lock */
+	cpu_time = _group_cpu_time(grp, cpu_of(rq));
+	if (event == ADD_TASK) {
+		sync_window_start(rq, cpu_time);
+		migrate_type = RQ_TO_GROUP;
+		d.src_rq = rq;
+		d.src_cpu_time = NULL;
+		d.dst_rq = NULL;
+		d.dst_cpu_time = cpu_time;
+		src_curr_runnable_sum = &rq->curr_runnable_sum;
+		dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		src_prev_runnable_sum = &rq->prev_runnable_sum;
+		dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+		src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+		dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+		dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+	} else {
+		migrate_type = GROUP_TO_RQ;
+		d.src_rq = NULL;
+		d.src_cpu_time = cpu_time;
+		d.dst_rq = rq;
+		d.dst_cpu_time = NULL;
+
+		/*
+		 * In case of REM_TASK, cpu_time->window_start would be
+		 * uptodate, because of the update_task_ravg() we called
+		 * above on the moving task. Hence no need for
+		 * sync_window_start()
+		 */
+		src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+		dst_curr_runnable_sum = &rq->curr_runnable_sum;
+		src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+		dst_prev_runnable_sum = &rq->prev_runnable_sum;
+
+		src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+		dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+		src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+		dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+	}
+
+	*src_curr_runnable_sum -= p->ravg.curr_window;
+	*dst_curr_runnable_sum += p->ravg.curr_window;
+
+	*src_prev_runnable_sum -= p->ravg.prev_window;
+	*dst_prev_runnable_sum += p->ravg.prev_window;
+
+	if (is_new_task(p)) {
+		*src_nt_curr_runnable_sum -= p->ravg.curr_window;
+		*dst_nt_curr_runnable_sum += p->ravg.curr_window;
+		*src_nt_prev_runnable_sum -= p->ravg.prev_window;
+		*dst_nt_prev_runnable_sum += p->ravg.prev_window;
+	}
+
+	trace_sched_migration_update_sum(p, migrate_type, &d);
+
+	BUG_ON((s64)*src_curr_runnable_sum < 0);
+	BUG_ON((s64)*src_prev_runnable_sum < 0);
+}
+
+static inline struct group_cpu_time *
+task_group_cpu_time(struct task_struct *p, int cpu)
+{
+	return _group_cpu_time(rcu_dereference(p->grp), cpu);
+}
+
+static inline struct group_cpu_time *
+_group_cpu_time(struct related_thread_group *grp, int cpu)
+{
+	return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
+}
+
+struct related_thread_group *alloc_related_thread_group(int group_id)
+{
+	struct related_thread_group *grp;
+
+	grp = kzalloc(sizeof(*grp), GFP_KERNEL);
+	if (!grp)
+		return ERR_PTR(-ENOMEM);
+
+	if (alloc_group_cputime(grp)) {
+		kfree(grp);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	grp->id = group_id;
+	INIT_LIST_HEAD(&grp->tasks);
+	INIT_LIST_HEAD(&grp->list);
+	raw_spin_lock_init(&grp->lock);
+
+	return grp;
+}
+
+struct related_thread_group *lookup_related_thread_group(unsigned int group_id)
+{
+	struct related_thread_group *grp;
+
+	list_for_each_entry(grp, &related_thread_groups, list) {
+		if (grp->id == group_id)
+			return grp;
+	}
+
+	return NULL;
+}
+
+/* See comments before preferred_cluster() */
+static void free_related_thread_group(struct rcu_head *rcu)
+{
+	struct related_thread_group *grp = container_of(rcu, struct
+			related_thread_group, rcu);
+
+	free_group_cputime(grp);
+	kfree(grp);
+}
+
+static void remove_task_from_group(struct task_struct *p)
+{
+	struct related_thread_group *grp = p->grp;
+	struct rq *rq;
+	int empty_group = 1;
+	struct rq_flags rf;
+
+	raw_spin_lock(&grp->lock);
+
+	rq = __task_rq_lock(p, &rf);
+	transfer_busy_time(rq, p->grp, p, REM_TASK);
+	list_del_init(&p->grp_list);
+	rcu_assign_pointer(p->grp, NULL);
+	__task_rq_unlock(rq, &rf);
+
+	if (!list_empty(&grp->tasks)) {
+		empty_group = 0;
+		_set_preferred_cluster(grp);
+	}
+
+	raw_spin_unlock(&grp->lock);
+
+	if (empty_group) {
+		list_del(&grp->list);
+		call_rcu(&grp->rcu, free_related_thread_group);
+	}
+}
+
+static int
+add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
+{
+	struct rq *rq;
+	struct rq_flags rf;
+
+	raw_spin_lock(&grp->lock);
+
+	/*
+	 * Change p->grp under rq->lock. Will prevent races with read-side
+	 * reference of p->grp in various hot-paths
+	 */
+	rq = __task_rq_lock(p, &rf);
+	transfer_busy_time(rq, grp, p, ADD_TASK);
+	list_add(&p->grp_list, &grp->tasks);
+	rcu_assign_pointer(p->grp, grp);
+	__task_rq_unlock(rq, &rf);
+
+	_set_preferred_cluster(grp);
+
+	raw_spin_unlock(&grp->lock);
+
+	return 0;
+}
+
+void add_new_task_to_grp(struct task_struct *new)
+{
+	unsigned long flags;
+	struct related_thread_group *grp;
+	struct task_struct *parent;
+
+	if (!sysctl_sched_enable_thread_grouping)
+		return;
+
+	if (thread_group_leader(new))
+		return;
+
+	parent = new->group_leader;
+
+	/*
+	 * The parent's pi_lock is required here to protect race
+	 * against the parent task being removed from the
+	 * group.
+	 */
+	raw_spin_lock_irqsave(&parent->pi_lock, flags);
+
+	/* protected by pi_lock. */
+	grp = task_related_thread_group(parent);
+	if (!grp) {
+		raw_spin_unlock_irqrestore(&parent->pi_lock, flags);
+		return;
+	}
+	raw_spin_lock(&grp->lock);
+
+	rcu_assign_pointer(new->grp, grp);
+	list_add(&new->grp_list, &grp->tasks);
+
+	raw_spin_unlock(&grp->lock);
+	raw_spin_unlock_irqrestore(&parent->pi_lock, flags);
+}
+
+int sched_set_group_id(struct task_struct *p, unsigned int group_id)
+{
+	int rc = 0, destroy = 0;
+	unsigned long flags;
+	struct related_thread_group *grp = NULL, *new = NULL;
+
+redo:
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+
+	if ((current != p && p->flags & PF_EXITING) ||
+			(!p->grp && !group_id) ||
+			(p->grp && p->grp->id == group_id))
+		goto done;
+
+	write_lock(&related_thread_group_lock);
+
+	if (!group_id) {
+		remove_task_from_group(p);
+		write_unlock(&related_thread_group_lock);
+		goto done;
+	}
+
+	if (p->grp && p->grp->id != group_id)
+		remove_task_from_group(p);
+
+	grp = lookup_related_thread_group(group_id);
+	if (!grp && !new) {
+		/* New group */
+		write_unlock(&related_thread_group_lock);
+		raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+		new = alloc_related_thread_group(group_id);
+		if (IS_ERR(new))
+			return -ENOMEM;
+		destroy = 1;
+		/* Rerun checks (like task exiting), since we dropped pi_lock */
+		goto redo;
+	} else if (!grp && new) {
+		/* New group - use object allocated before */
+		destroy = 0;
+		list_add(&new->list, &related_thread_groups);
+		grp = new;
+	}
+
+	BUG_ON(!grp);
+	rc = add_task_to_group(p, grp);
+	write_unlock(&related_thread_group_lock);
+done:
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+	if (new && destroy) {
+		free_group_cputime(new);
+		kfree(new);
+	}
+
+	return rc;
+}
+
+unsigned int sched_get_group_id(struct task_struct *p)
+{
+	unsigned int group_id;
+	struct related_thread_group *grp;
+
+	rcu_read_lock();
+	grp = task_related_thread_group(p);
+	group_id = grp ? grp->id : 0;
+	rcu_read_unlock();
+
+	return group_id;
+}
+
+static void update_cpu_cluster_capacity(const cpumask_t *cpus)
+{
+	int i;
+	struct sched_cluster *cluster;
+	struct cpumask cpumask;
+
+	cpumask_copy(&cpumask, cpus);
+	pre_big_task_count_change(cpu_possible_mask);
+
+	for_each_cpu(i, &cpumask) {
+		cluster = cpu_rq(i)->cluster;
+		cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+		cluster->capacity = compute_capacity(cluster);
+		cluster->load_scale_factor = compute_load_scale_factor(cluster);
+
+		/* 'cpus' can contain cpumask more than one cluster */
+		check_for_up_down_migrate_update(&cluster->cpus);
+	}
+
+	__update_min_max_capacity();
+
+	post_big_task_count_change(cpu_possible_mask);
+}
+
+static DEFINE_SPINLOCK(cpu_freq_min_max_lock);
+void sched_update_cpu_freq_min_max(const cpumask_t *cpus, u32 fmin, u32 fmax)
+{
+	struct cpumask cpumask;
+	struct sched_cluster *cluster;
+	int i, update_capacity = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cpu_freq_min_max_lock, flags);
+	cpumask_copy(&cpumask, cpus);
+	for_each_cpu(i, &cpumask) {
+		cluster = cpu_rq(i)->cluster;
+		cpumask_andnot(&cpumask, &cpumask, &cluster->cpus);
+
+		update_capacity += (cluster->max_mitigated_freq != fmax);
+		cluster->max_mitigated_freq = fmax;
+	}
+	spin_unlock_irqrestore(&cpu_freq_min_max_lock, flags);
+
+	if (update_capacity)
+		update_cpu_cluster_capacity(cpus);
+}
+
+static int cpufreq_notifier_policy(struct notifier_block *nb,
+		unsigned long val, void *data)
+{
+	struct cpufreq_policy *policy = (struct cpufreq_policy *)data;
+	struct sched_cluster *cluster = NULL;
+	struct cpumask policy_cluster = *policy->related_cpus;
+	unsigned int orig_max_freq = 0;
+	int i, j, update_capacity = 0;
+
+	if (val != CPUFREQ_NOTIFY && val != CPUFREQ_REMOVE_POLICY &&
+						val != CPUFREQ_CREATE_POLICY)
+		return 0;
+
+	if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
+		update_min_max_capacity();
+		return 0;
+	}
+
+	max_possible_freq = max(max_possible_freq, policy->cpuinfo.max_freq);
+	if (min_max_freq == 1)
+		min_max_freq = UINT_MAX;
+	min_max_freq = min(min_max_freq, policy->cpuinfo.max_freq);
+	BUG_ON(!min_max_freq);
+	BUG_ON(!policy->max);
+
+	for_each_cpu(i, &policy_cluster) {
+		cluster = cpu_rq(i)->cluster;
+		cpumask_andnot(&policy_cluster, &policy_cluster,
+						&cluster->cpus);
+
+		orig_max_freq = cluster->max_freq;
+		cluster->min_freq = policy->min;
+		cluster->max_freq = policy->max;
+		cluster->cur_freq = policy->cur;
+
+		if (!cluster->freq_init_done) {
+			mutex_lock(&cluster_lock);
+			for_each_cpu(j, &cluster->cpus)
+				cpumask_copy(&cpu_rq(j)->freq_domain_cpumask,
+						policy->related_cpus);
+			cluster->max_possible_freq = policy->cpuinfo.max_freq;
+			cluster->max_possible_capacity =
+				compute_max_possible_capacity(cluster);
+			cluster->freq_init_done = true;
+
+			sort_clusters();
+			update_all_clusters_stats();
+			mutex_unlock(&cluster_lock);
+			continue;
+		}
+
+		update_capacity += (orig_max_freq != cluster->max_freq);
+	}
+
+	if (update_capacity)
+		update_cpu_cluster_capacity(policy->related_cpus);
+
+	return 0;
+}
+
+static int cpufreq_notifier_trans(struct notifier_block *nb,
+		unsigned long val, void *data)
+{
+	struct cpufreq_freqs *freq = (struct cpufreq_freqs *)data;
+	unsigned int cpu = freq->cpu, new_freq = freq->new;
+	unsigned long flags;
+	struct sched_cluster *cluster;
+	struct cpumask policy_cpus = cpu_rq(cpu)->freq_domain_cpumask;
+	int i, j;
+
+	if (val != CPUFREQ_POSTCHANGE)
+		return 0;
+
+	BUG_ON(!new_freq);
+
+	if (cpu_cur_freq(cpu) == new_freq)
+		return 0;
+
+	for_each_cpu(i, &policy_cpus) {
+		cluster = cpu_rq(i)->cluster;
+
+		for_each_cpu(j, &cluster->cpus) {
+			struct rq *rq = cpu_rq(j);
+
+			raw_spin_lock_irqsave(&rq->lock, flags);
+			update_task_ravg(rq->curr, rq, TASK_UPDATE,
+						sched_ktime_clock(), 0);
+			raw_spin_unlock_irqrestore(&rq->lock, flags);
+		}
+
+		cluster->cur_freq = new_freq;
+		cpumask_andnot(&policy_cpus, &policy_cpus, &cluster->cpus);
+	}
+
+	return 0;
+}
+
+static int pwr_stats_ready_notifier(struct notifier_block *nb,
+				    unsigned long cpu, void *data)
+{
+	cpumask_t mask = CPU_MASK_NONE;
+
+	cpumask_set_cpu(cpu, &mask);
+	sched_update_freq_max_load(&mask);
+
+	mutex_lock(&cluster_lock);
+	sort_clusters();
+	mutex_unlock(&cluster_lock);
+
+	return 0;
+}
+
+static struct notifier_block notifier_policy_block = {
+	.notifier_call = cpufreq_notifier_policy
+};
+
+static struct notifier_block notifier_trans_block = {
+	.notifier_call = cpufreq_notifier_trans
+};
+
+static struct notifier_block notifier_pwr_stats_ready = {
+	.notifier_call = pwr_stats_ready_notifier
+};
+
+int __weak register_cpu_pwr_stats_ready_notifier(struct notifier_block *nb)
+{
+	return -EINVAL;
+}
+
+static int register_sched_callback(void)
+{
+	int ret;
+
+	ret = cpufreq_register_notifier(&notifier_policy_block,
+						CPUFREQ_POLICY_NOTIFIER);
+
+	if (!ret)
+		ret = cpufreq_register_notifier(&notifier_trans_block,
+						CPUFREQ_TRANSITION_NOTIFIER);
+
+	register_cpu_pwr_stats_ready_notifier(&notifier_pwr_stats_ready);
+
+	return 0;
+}
+
+/*
+ * cpufreq callbacks can be registered at core_initcall or later time.
+ * Any registration done prior to that is "forgotten" by cpufreq. See
+ * initialization of variable init_cpufreq_transition_notifier_list_called
+ * for further information.
+ */
+core_initcall(register_sched_callback);
+
+int update_preferred_cluster(struct related_thread_group *grp,
+		struct task_struct *p, u32 old_load)
+{
+	u32 new_load = task_load(p);
+
+	if (!grp)
+		return 0;
+
+	/*
+	 * Update if task's load has changed significantly or a complete window
+	 * has passed since we last updated preference
+	 */
+	if (abs(new_load - old_load) > sched_ravg_window / 4 ||
+		sched_ktime_clock() - grp->last_update > sched_ravg_window)
+		return 1;
+
+	return 0;
+}
+
+bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+	struct task_struct *p;
+	int loop_max = 10;
+
+	if (!sched_boost() || !rq->cfs.h_nr_running)
+		return 0;
+
+	rq->ed_task = NULL;
+	list_for_each_entry(p, &rq->cfs_tasks, se.group_node) {
+		if (!loop_max)
+			break;
+
+		if (wallclock - p->last_wake_ts >= EARLY_DETECTION_DURATION) {
+			rq->ed_task = p;
+			return 1;
+		}
+
+		loop_max--;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_CGROUP_SCHED
+u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
+					  struct cftype *cft)
+{
+	struct task_group *tg = css_tg(css);
+
+	return tg->upmigrate_discouraged;
+}
+
+int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
+				struct cftype *cft, u64 upmigrate_discourage)
+{
+	struct task_group *tg = css_tg(css);
+	int discourage = upmigrate_discourage > 0;
+
+	if (tg->upmigrate_discouraged == discourage)
+		return 0;
+
+	/*
+	 * Revisit big-task classification for tasks of this cgroup. It would
+	 * have been efficient to walk tasks of just this cgroup in running
+	 * state, but we don't have easy means to do that. Walk all tasks in
+	 * running state on all cpus instead and re-visit their big task
+	 * classification.
+	 */
+	get_online_cpus();
+	pre_big_task_count_change(cpu_online_mask);
+
+	tg->upmigrate_discouraged = discourage;
+
+	post_big_task_count_change(cpu_online_mask);
+	put_online_cpus();
+
+	return 0;
+}
+#endif /* CONFIG_CGROUP_SCHED */
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 5405d3f..13c8818 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -78,6 +78,14 @@
 {
 }
 
+#ifdef CONFIG_SCHED_HMP
+static void
+fixup_hmp_sched_stats_idle(struct rq *rq, struct task_struct *p,
+			   u32 new_task_load, u32 new_pred_demand)
+{
+}
+#endif
+
 /*
  * Simple, special scheduling class for the per-CPU idle tasks:
  */
@@ -106,4 +114,7 @@
 	.prio_changed		= prio_changed_idle,
 	.switched_to		= switched_to_idle,
 	.update_curr		= update_curr_idle,
+#ifdef CONFIG_SCHED_HMP
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_idle,
+#endif
 };
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 2516b8d..a791486 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -7,6 +7,7 @@
 
 #include <linux/slab.h>
 #include <linux/irq_work.h>
+#include <trace/events/sched.h>
 
 int sched_rr_timeslice = RR_TIMESLICE;
 
@@ -897,6 +898,51 @@
 	return rt_task_of(rt_se)->prio;
 }
 
+static void dump_throttled_rt_tasks(struct rt_rq *rt_rq)
+{
+	struct rt_prio_array *array = &rt_rq->active;
+	struct sched_rt_entity *rt_se;
+	char buf[500];
+	char *pos = buf;
+	char *end = buf + sizeof(buf);
+	int idx;
+
+	pos += snprintf(pos, sizeof(buf),
+		"sched: RT throttling activated for rt_rq %p (cpu %d)\n",
+		rt_rq, cpu_of(rq_of_rt_rq(rt_rq)));
+
+	if (bitmap_empty(array->bitmap, MAX_RT_PRIO))
+		goto out;
+
+	pos += snprintf(pos, end - pos, "potential CPU hogs:\n");
+	idx = sched_find_first_bit(array->bitmap);
+	while (idx < MAX_RT_PRIO) {
+		list_for_each_entry(rt_se, array->queue + idx, run_list) {
+			struct task_struct *p;
+
+			if (!rt_entity_is_task(rt_se))
+				continue;
+
+			p = rt_task_of(rt_se);
+			if (pos < end)
+				pos += snprintf(pos, end - pos, "\t%s (%d)\n",
+					p->comm, p->pid);
+		}
+		idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1);
+	}
+out:
+#ifdef CONFIG_PANIC_ON_RT_THROTTLING
+	/*
+	 * Use pr_err() in the BUG() case since printk_sched() will
+	 * not get flushed and deadlock is not a concern.
+	 */
+	pr_err("%s", buf);
+	BUG();
+#else
+	printk_deferred("%s", buf);
+#endif
+}
+
 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
 {
 	u64 runtime = sched_rt_runtime(rt_rq);
@@ -920,8 +966,14 @@
 		 * but accrue some time due to boosting.
 		 */
 		if (likely(rt_b->rt_runtime)) {
+			static bool once;
+
 			rt_rq->rt_throttled = 1;
-			printk_deferred_once("sched: RT throttling activated\n");
+
+			if (!once) {
+				once = true;
+				dump_throttled_rt_tasks(rt_rq);
+			}
 		} else {
 			/*
 			 * In case we did anyway, make it go away,
@@ -1141,6 +1193,41 @@
 
 #endif /* CONFIG_RT_GROUP_SCHED */
 
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
+{
+	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p)
+{
+	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p,
+			 u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+}
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_rt(struct rq *rq, struct task_struct *p) { }
+
+#endif	/* CONFIG_SCHED_HMP */
+
 static inline
 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
 {
@@ -1322,6 +1409,7 @@
 		rt_se->timeout = 0;
 
 	enqueue_rt_entity(rt_se, flags);
+	inc_hmp_sched_stats_rt(rq, p);
 
 	if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1)
 		enqueue_pushable_task(rq, p);
@@ -1333,6 +1421,7 @@
 
 	update_curr_rt(rq);
 	dequeue_rt_entity(rt_se, flags);
+	dec_hmp_sched_stats_rt(rq, p);
 
 	dequeue_pushable_task(rq, p);
 }
@@ -1374,12 +1463,32 @@
 #ifdef CONFIG_SMP
 static int find_lowest_rq(struct task_struct *task);
 
+#ifdef CONFIG_SCHED_HMP
+static int
+select_task_rq_rt_hmp(struct task_struct *p, int cpu, int sd_flag, int flags)
+{
+	int target;
+
+	rcu_read_lock();
+	target = find_lowest_rq(p);
+	if (target != -1)
+		cpu = target;
+	rcu_read_unlock();
+
+	return cpu;
+}
+#endif /* CONFIG_SCHED_HMP */
+
 static int
 select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
 {
 	struct task_struct *curr;
 	struct rq *rq;
 
+#ifdef CONFIG_SCHED_HMP
+	return select_task_rq_rt_hmp(p, cpu, sd_flag, flags);
+#endif
+
 	/* For anything but wake ups, just return the task_cpu */
 	if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
 		goto out;
@@ -1617,6 +1726,65 @@
 
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
 
+#ifdef CONFIG_SCHED_HMP
+static int find_lowest_rq_hmp(struct task_struct *task)
+{
+	struct cpumask *lowest_mask = *this_cpu_ptr(&local_cpu_mask);
+	struct cpumask candidate_mask = CPU_MASK_NONE;
+	struct sched_cluster *cluster;
+	int best_cpu = -1;
+	int prev_cpu = task_cpu(task);
+	u64 cpu_load, min_load = ULLONG_MAX;
+	int i;
+	int restrict_cluster = sched_boost() ? 0 :
+				sysctl_sched_restrict_cluster_spill;
+
+	/* Make sure the mask is initialized first */
+	if (unlikely(!lowest_mask))
+		return best_cpu;
+
+	if (task->nr_cpus_allowed == 1)
+		return best_cpu; /* No other targets possible */
+
+	if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
+		return best_cpu; /* No targets found */
+
+	/*
+	 * At this point we have built a mask of cpus representing the
+	 * lowest priority tasks in the system.  Now we want to elect
+	 * the best one based on our affinity and topology.
+	 */
+
+	for_each_sched_cluster(cluster) {
+		cpumask_and(&candidate_mask, &cluster->cpus, lowest_mask);
+
+		if (cpumask_empty(&candidate_mask))
+			continue;
+
+		for_each_cpu(i, &candidate_mask) {
+			if (sched_cpu_high_irqload(i))
+				continue;
+
+			cpu_load = cpu_rq(i)->hmp_stats.cumulative_runnable_avg;
+			if (!restrict_cluster)
+				cpu_load = scale_load_to_cpu(cpu_load, i);
+
+			if (cpu_load < min_load ||
+				(cpu_load == min_load &&
+				(i == prev_cpu || (best_cpu != prev_cpu &&
+				cpus_share_cache(prev_cpu, i))))) {
+				min_load = cpu_load;
+				best_cpu = i;
+			}
+		}
+		if (restrict_cluster && best_cpu != -1)
+			break;
+	}
+
+	return best_cpu;
+}
+#endif	/* CONFIG_SCHED_HMP */
+
 static int find_lowest_rq(struct task_struct *task)
 {
 	struct sched_domain *sd;
@@ -1624,6 +1792,10 @@
 	int this_cpu = smp_processor_id();
 	int cpu      = task_cpu(task);
 
+#ifdef CONFIG_SCHED_HMP
+	return find_lowest_rq_hmp(task);
+#endif
+
 	/* Make sure the mask is initialized first */
 	if (unlikely(!lowest_mask))
 		return -1;
@@ -1841,9 +2013,11 @@
 		goto retry;
 	}
 
+	next_task->on_rq = TASK_ON_RQ_MIGRATING;
 	deactivate_task(rq, next_task, 0);
 	set_task_cpu(next_task, lowest_rq->cpu);
 	activate_task(lowest_rq, next_task, 0);
+	next_task->on_rq = TASK_ON_RQ_QUEUED;
 	ret = 1;
 
 	resched_curr(lowest_rq);
@@ -2095,9 +2269,11 @@
 
 			resched = true;
 
+			p->on_rq = TASK_ON_RQ_MIGRATING;
 			deactivate_task(src_rq, p, 0);
 			set_task_cpu(p, this_cpu);
 			activate_task(this_rq, p, 0);
+			p->on_rq = TASK_ON_RQ_QUEUED;
 			/*
 			 * We continue with the search, just in
 			 * case there's an even higher prio task
@@ -2352,6 +2528,9 @@
 	.switched_to		= switched_to_rt,
 
 	.update_curr		= update_curr_rt,
+#ifdef CONFIG_SCHED_HMP
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_rt,
+#endif
 };
 
 #ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 055f935..f6e2bf1 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -255,6 +255,10 @@
 struct task_group {
 	struct cgroup_subsys_state css;
 
+#ifdef CONFIG_SCHED_HMP
+	bool upmigrate_discouraged;
+#endif
+
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	/* schedulable entities of this group on each cpu */
 	struct sched_entity **se;
@@ -365,12 +369,83 @@
 #endif /* CONFIG_SMP */
 #endif /* CONFIG_FAIR_GROUP_SCHED */
 
+extern struct task_group *css_tg(struct cgroup_subsys_state *css);
+
 #else /* CONFIG_CGROUP_SCHED */
 
 struct cfs_bandwidth { };
 
 #endif	/* CONFIG_CGROUP_SCHED */
 
+#ifdef CONFIG_SCHED_HMP
+
+struct hmp_sched_stats {
+	int nr_big_tasks;
+	u64 cumulative_runnable_avg;
+	u64 pred_demands_sum;
+};
+
+struct sched_cluster {
+	struct list_head list;
+	struct cpumask cpus;
+	int id;
+	int max_power_cost;
+	int min_power_cost;
+	int max_possible_capacity;
+	int capacity;
+	int efficiency; /* Differentiate cpus with different IPC capability */
+	int load_scale_factor;
+	unsigned int exec_scale_factor;
+	/*
+	 * max_freq = user maximum
+	 * max_mitigated_freq = thermal defined maximum
+	 * max_possible_freq = maximum supported by hardware
+	 */
+	unsigned int cur_freq, max_freq, max_mitigated_freq, min_freq;
+	unsigned int max_possible_freq;
+	bool freq_init_done;
+	int dstate, dstate_wakeup_latency, dstate_wakeup_energy;
+	unsigned int static_cluster_pwr_cost;
+	int notifier_sent;
+};
+
+extern unsigned long all_cluster_ids[];
+
+static inline int cluster_first_cpu(struct sched_cluster *cluster)
+{
+	return cpumask_first(&cluster->cpus);
+}
+
+struct related_thread_group {
+	int id;
+	raw_spinlock_t lock;
+	struct list_head tasks;
+	struct list_head list;
+	struct sched_cluster *preferred_cluster;
+	struct rcu_head rcu;
+	u64 last_update;
+	struct group_cpu_time __percpu *cpu_time;	/* one per cluster */
+};
+
+struct migration_sum_data {
+	struct rq *src_rq, *dst_rq;
+	struct group_cpu_time *src_cpu_time, *dst_cpu_time;
+};
+
+extern struct list_head cluster_head;
+extern int num_clusters;
+extern struct sched_cluster *sched_cluster[NR_CPUS];
+
+struct cpu_cycle {
+	u64 cycles;
+	u64 time;
+};
+
+#define for_each_sched_cluster(cluster) \
+	list_for_each_entry_rcu(cluster, &cluster_head, list)
+
+#endif /* CONFIG_SCHED_HMP */
+
 /* CFS-related fields in a runqueue */
 struct cfs_rq {
 	struct load_weight load;
@@ -439,6 +514,10 @@
 	struct task_group *tg;	/* group that "owns" this runqueue */
 
 #ifdef CONFIG_CFS_BANDWIDTH
+#ifdef CONFIG_SCHED_HMP
+	struct hmp_sched_stats hmp_stats;
+#endif
+
 	int runtime_enabled;
 	u64 runtime_expires;
 	s64 runtime_remaining;
@@ -656,6 +735,7 @@
 	/* For active balancing */
 	int active_balance;
 	int push_cpu;
+	struct task_struct *push_task;
 	struct cpu_stop_work active_balance_work;
 	/* cpu of this runqueue: */
 	int cpu;
@@ -672,6 +752,29 @@
 	u64 max_idle_balance_cost;
 #endif
 
+#ifdef CONFIG_SCHED_HMP
+	struct sched_cluster *cluster;
+	struct cpumask freq_domain_cpumask;
+	struct hmp_sched_stats hmp_stats;
+
+	int cstate, wakeup_latency, wakeup_energy;
+	u64 window_start;
+	unsigned long hmp_flags;
+
+	u64 cur_irqload;
+	u64 avg_irqload;
+	u64 irqload_ts;
+	unsigned int static_cpu_pwr_cost;
+	struct task_struct *ed_task;
+	struct cpu_cycle cc;
+	u64 old_busy_time, old_busy_time_group;
+	u64 old_estimated_time;
+	u64 curr_runnable_sum;
+	u64 prev_runnable_sum;
+	u64 nt_curr_runnable_sum;
+	u64 nt_prev_runnable_sum;
+#endif
+
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
 	u64 prev_irq_time;
 #endif
@@ -962,6 +1065,583 @@
 #include "stats.h"
 #include "auto_group.h"
 
+#ifdef CONFIG_SCHED_HMP
+
+#define WINDOW_STATS_RECENT		0
+#define WINDOW_STATS_MAX		1
+#define WINDOW_STATS_MAX_RECENT_AVG	2
+#define WINDOW_STATS_AVG		3
+#define WINDOW_STATS_INVALID_POLICY	4
+
+#define MAJOR_TASK_PCT 85
+#define SCHED_UPMIGRATE_MIN_NICE 15
+#define EXITING_TASK_MARKER	0xdeaddead
+
+#define UP_MIGRATION		1
+#define DOWN_MIGRATION		2
+#define IRQLOAD_MIGRATION	3
+
+extern struct mutex policy_mutex;
+extern unsigned int sched_ravg_window;
+extern unsigned int sched_disable_window_stats;
+extern unsigned int max_possible_freq;
+extern unsigned int min_max_freq;
+extern unsigned int pct_task_load(struct task_struct *p);
+extern unsigned int max_possible_efficiency;
+extern unsigned int min_possible_efficiency;
+extern unsigned int max_capacity;
+extern unsigned int min_capacity;
+extern unsigned int max_load_scale_factor;
+extern unsigned int max_possible_capacity;
+extern unsigned int min_max_possible_capacity;
+extern unsigned int sched_upmigrate;
+extern unsigned int sched_downmigrate;
+extern unsigned int sched_init_task_load_windows;
+extern unsigned int up_down_migrate_scale_factor;
+extern unsigned int sysctl_sched_restrict_cluster_spill;
+extern unsigned int sched_pred_alert_load;
+extern unsigned int sched_major_task_runtime;
+extern struct sched_cluster init_cluster;
+extern unsigned int  __read_mostly sched_short_sleep_task_threshold;
+extern unsigned int  __read_mostly sched_long_cpu_selection_threshold;
+extern unsigned int  __read_mostly sched_big_waker_task_load;
+extern unsigned int  __read_mostly sched_small_wakee_task_load;
+extern unsigned int  __read_mostly sched_spill_load;
+extern unsigned int  __read_mostly sched_upmigrate;
+extern unsigned int  __read_mostly sched_downmigrate;
+extern unsigned int  __read_mostly sysctl_sched_spill_nr_run;
+
+extern void init_new_task_load(struct task_struct *p);
+extern u64 sched_ktime_clock(void);
+extern int got_boost_kick(void);
+extern int register_cpu_cycle_counter_cb(struct cpu_cycle_counter_cb *cb);
+extern void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
+						u64 wallclock, u64 irqtime);
+extern bool early_detection_notify(struct rq *rq, u64 wallclock);
+extern void clear_ed_task(struct task_struct *p, struct rq *rq);
+extern void fixup_busy_time(struct task_struct *p, int new_cpu);
+extern void clear_boost_kick(int cpu);
+extern void clear_hmp_request(int cpu);
+extern void mark_task_starting(struct task_struct *p);
+extern void set_window_start(struct rq *rq);
+extern void migrate_sync_cpu(int cpu);
+extern void update_cluster_topology(void);
+extern void set_task_last_wake(struct task_struct *p, u64 wallclock);
+extern void set_task_last_switch_out(struct task_struct *p, u64 wallclock);
+extern void init_clusters(void);
+extern void reset_cpu_hmp_stats(int cpu, int reset_cra);
+extern unsigned int max_task_load(void);
+extern void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock);
+extern void sched_account_irqstart(int cpu, struct task_struct *curr,
+				   u64 wallclock);
+extern unsigned int cpu_temp(int cpu);
+extern unsigned int nr_eligible_big_tasks(int cpu);
+extern void update_up_down_migrate(void);
+extern int update_preferred_cluster(struct related_thread_group *grp,
+			struct task_struct *p, u32 old_load);
+extern void set_preferred_cluster(struct related_thread_group *grp);
+extern void add_new_task_to_grp(struct task_struct *new);
+
+enum sched_boost_type {
+	SCHED_BOOST_NONE,
+	SCHED_BOOST_ON_BIG,
+	SCHED_BOOST_ON_ALL,
+};
+
+static inline struct sched_cluster *cpu_cluster(int cpu)
+{
+	return cpu_rq(cpu)->cluster;
+}
+
+static inline int cpu_capacity(int cpu)
+{
+	return cpu_rq(cpu)->cluster->capacity;
+}
+
+static inline int cpu_max_possible_capacity(int cpu)
+{
+	return cpu_rq(cpu)->cluster->max_possible_capacity;
+}
+
+static inline int cpu_load_scale_factor(int cpu)
+{
+	return cpu_rq(cpu)->cluster->load_scale_factor;
+}
+
+static inline int cpu_efficiency(int cpu)
+{
+	return cpu_rq(cpu)->cluster->efficiency;
+}
+
+static inline unsigned int cpu_cur_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->cur_freq;
+}
+
+static inline unsigned int cpu_min_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->min_freq;
+}
+
+static inline unsigned int cluster_max_freq(struct sched_cluster *cluster)
+{
+	/*
+	 * Governor and thermal driver don't know the other party's mitigation
+	 * voting. So struct cluster saves both and return min() for current
+	 * cluster fmax.
+	 */
+	return min(cluster->max_mitigated_freq, cluster->max_freq);
+}
+
+static inline unsigned int cpu_max_freq(int cpu)
+{
+	return cluster_max_freq(cpu_rq(cpu)->cluster);
+}
+
+static inline unsigned int cpu_max_possible_freq(int cpu)
+{
+	return cpu_rq(cpu)->cluster->max_possible_freq;
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu)
+{
+	return cpu_rq(src_cpu)->cluster == cpu_rq(dst_cpu)->cluster;
+}
+
+static inline int cpu_max_power_cost(int cpu)
+{
+	return cpu_rq(cpu)->cluster->max_power_cost;
+}
+
+static inline u32 cpu_cycles_to_freq(u64 cycles, u32 period)
+{
+	return div64_u64(cycles, period);
+}
+
+static inline bool hmp_capable(void)
+{
+	return max_possible_capacity != min_max_possible_capacity;
+}
+
+/*
+ * 'load' is in reference to "best cpu" at its best frequency.
+ * Scale that in reference to a given cpu, accounting for how bad it is
+ * in reference to "best cpu".
+ */
+static inline u64 scale_load_to_cpu(u64 task_load, int cpu)
+{
+	u64 lsf = cpu_load_scale_factor(cpu);
+
+	if (lsf != 1024) {
+		task_load *= lsf;
+		task_load /= 1024;
+	}
+
+	return task_load;
+}
+
+static inline unsigned int task_load(struct task_struct *p)
+{
+	return p->ravg.demand;
+}
+
+static inline void
+inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+				 struct task_struct *p)
+{
+	u32 task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+	stats->cumulative_runnable_avg += task_load;
+	stats->pred_demands_sum += p->ravg.pred_demand;
+}
+
+static inline void
+dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+				struct task_struct *p)
+{
+	u32 task_load;
+
+	if (sched_disable_window_stats)
+		return;
+
+	task_load = sched_disable_window_stats ? 0 : p->ravg.demand;
+
+	stats->cumulative_runnable_avg -= task_load;
+
+	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+	stats->pred_demands_sum -= p->ravg.pred_demand;
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+static inline void
+fixup_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+			      struct task_struct *p, s64 task_load_delta,
+			      s64 pred_demand_delta)
+{
+	if (sched_disable_window_stats)
+		return;
+
+	stats->cumulative_runnable_avg += task_load_delta;
+	BUG_ON((s64)stats->cumulative_runnable_avg < 0);
+
+	stats->pred_demands_sum += pred_demand_delta;
+	BUG_ON((s64)stats->pred_demands_sum < 0);
+}
+
+#define pct_to_real(tunable)	\
+		(div64_u64((u64)tunable * (u64)max_task_load(), 100))
+
+#define real_to_pct(tunable)	\
+		(div64_u64((u64)tunable * (u64)100, (u64)max_task_load()))
+
+#define SCHED_HIGH_IRQ_TIMEOUT 3
+static inline u64 sched_irqload(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+	s64 delta;
+
+	delta = get_jiffies_64() - rq->irqload_ts;
+	/*
+	 * Current context can be preempted by irq and rq->irqload_ts can be
+	 * updated by irq context so that delta can be negative.
+	 * But this is okay and we can safely return as this means there
+	 * was recent irq occurrence.
+	 */
+
+	if (delta < SCHED_HIGH_IRQ_TIMEOUT)
+		return rq->avg_irqload;
+	else
+		return 0;
+}
+
+static inline int sched_cpu_high_irqload(int cpu)
+{
+	return sched_irqload(cpu) >= sysctl_sched_cpu_high_irqload;
+}
+
+static inline bool task_in_related_thread_group(struct task_struct *p)
+{
+	return !!(rcu_access_pointer(p->grp) != NULL);
+}
+
+static inline
+struct related_thread_group *task_related_thread_group(struct task_struct *p)
+{
+	return rcu_dereference(p->grp);
+}
+
+#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
+
+extern void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
+
+extern void notify_migration(int src_cpu, int dest_cpu,
+			bool src_cpu_dead, struct task_struct *p);
+
+struct group_cpu_time {
+	u64 curr_runnable_sum;
+	u64 prev_runnable_sum;
+	u64 nt_curr_runnable_sum;
+	u64 nt_prev_runnable_sum;
+	u64 window_start;
+};
+
+/* Is frequency of two cpus synchronized with each other? */
+static inline int same_freq_domain(int src_cpu, int dst_cpu)
+{
+	struct rq *rq = cpu_rq(src_cpu);
+
+	if (src_cpu == dst_cpu)
+		return 1;
+
+	return cpumask_test_cpu(dst_cpu, &rq->freq_domain_cpumask);
+}
+
+#define	BOOST_KICK	0
+#define	CPU_RESERVED	1
+
+static inline int is_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	return test_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline int mark_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	/* Name boost_flags as hmp_flags? */
+	return test_and_set_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline void clear_reserved(int cpu)
+{
+	struct rq *rq = cpu_rq(cpu);
+
+	clear_bit(CPU_RESERVED, &rq->hmp_flags);
+}
+
+static inline u64 cpu_cravg_sync(int cpu, int sync)
+{
+	struct rq *rq = cpu_rq(cpu);
+	u64 load;
+
+	load = rq->hmp_stats.cumulative_runnable_avg;
+
+	/*
+	 * If load is being checked in a sync wakeup environment,
+	 * we may want to discount the load of the currently running
+	 * task.
+	 */
+	if (sync && cpu == smp_processor_id()) {
+		if (load > rq->curr->ravg.demand)
+			load -= rq->curr->ravg.demand;
+		else
+			load = 0;
+	}
+
+	return load;
+}
+
+extern void check_for_migration(struct rq *rq, struct task_struct *p);
+extern void pre_big_task_count_change(const struct cpumask *cpus);
+extern void post_big_task_count_change(const struct cpumask *cpus);
+extern void set_hmp_defaults(void);
+extern int power_delta_exceeded(unsigned int cpu_cost, unsigned int base_cost);
+extern unsigned int power_cost(int cpu, u64 demand);
+extern void reset_all_window_stats(u64 window_start, unsigned int window_size);
+extern void boost_kick(int cpu);
+extern int sched_boost(void);
+extern int task_load_will_fit(struct task_struct *p, u64 task_load, int cpu,
+					enum sched_boost_type boost_type);
+extern enum sched_boost_type sched_boost_type(void);
+extern int task_will_fit(struct task_struct *p, int cpu);
+extern int group_will_fit(struct sched_cluster *cluster,
+		 struct related_thread_group *grp, u64 demand);
+extern u64 cpu_load(int cpu);
+extern u64 cpu_load_sync(int cpu, int sync);
+extern int preferred_cluster(struct sched_cluster *cluster,
+						struct task_struct *p);
+extern void inc_nr_big_task(struct hmp_sched_stats *stats,
+					struct task_struct *p);
+extern void dec_nr_big_task(struct hmp_sched_stats *stats,
+					struct task_struct *p);
+extern void inc_rq_hmp_stats(struct rq *rq,
+				struct task_struct *p, int change_cra);
+extern void dec_rq_hmp_stats(struct rq *rq,
+				struct task_struct *p, int change_cra);
+extern int is_big_task(struct task_struct *p);
+extern int upmigrate_discouraged(struct task_struct *p);
+extern struct sched_cluster *rq_cluster(struct rq *rq);
+extern int nr_big_tasks(struct rq *rq);
+extern void fixup_nr_big_tasks(struct hmp_sched_stats *stats,
+					struct task_struct *p, s64 delta);
+extern void reset_task_stats(struct task_struct *p);
+extern void reset_cfs_rq_hmp_stats(int cpu, int reset_cra);
+extern void inc_hmp_sched_stats_fair(struct rq *rq,
+			struct task_struct *p, int change_cra);
+extern u64 cpu_upmigrate_discourage_read_u64(struct cgroup_subsys_state *css,
+					struct cftype *cft);
+extern int cpu_upmigrate_discourage_write_u64(struct cgroup_subsys_state *css,
+				struct cftype *cft, u64 upmigrate_discourage);
+
+#else	/* CONFIG_SCHED_HMP */
+
+struct hmp_sched_stats;
+struct related_thread_group;
+struct sched_cluster;
+
+static inline int got_boost_kick(void)
+{
+	return 0;
+}
+
+static inline void update_task_ravg(struct task_struct *p, struct rq *rq,
+				int event, u64 wallclock, u64 irqtime) { }
+
+static inline bool early_detection_notify(struct rq *rq, u64 wallclock)
+{
+	return 0;
+}
+
+static inline void clear_ed_task(struct task_struct *p, struct rq *rq) { }
+static inline void fixup_busy_time(struct task_struct *p, int new_cpu) { }
+static inline void clear_boost_kick(int cpu) { }
+static inline void clear_hmp_request(int cpu) { }
+static inline void mark_task_starting(struct task_struct *p) { }
+static inline void set_window_start(struct rq *rq) { }
+static inline void migrate_sync_cpu(int cpu) { }
+static inline void update_cluster_topology(void) { }
+static inline void set_task_last_wake(struct task_struct *p, u64 wallclock) { }
+static inline void set_task_last_switch_out(struct task_struct *p,
+					    u64 wallclock) { }
+
+static inline int task_will_fit(struct task_struct *p, int cpu)
+{
+	return 1;
+}
+
+static inline unsigned int power_cost(int cpu, u64 demand)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
+static inline int sched_boost(void)
+{
+	return 0;
+}
+
+static inline int is_big_task(struct task_struct *p)
+{
+	return 0;
+}
+
+static inline int nr_big_tasks(struct rq *rq)
+{
+	return 0;
+}
+
+static inline int is_cpu_throttling_imminent(int cpu)
+{
+	return 0;
+}
+
+static inline int is_task_migration_throttled(struct task_struct *p)
+{
+	return 0;
+}
+
+static inline unsigned int cpu_temp(int cpu)
+{
+	return 0;
+}
+
+static inline void
+inc_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+
+static inline void
+dec_rq_hmp_stats(struct rq *rq, struct task_struct *p, int change_cra) { }
+
+static inline int
+preferred_cluster(struct sched_cluster *cluster, struct task_struct *p)
+{
+	return 1;
+}
+
+static inline struct sched_cluster *rq_cluster(struct rq *rq)
+{
+	return NULL;
+}
+
+static inline void init_new_task_load(struct task_struct *p) { }
+
+static inline u64 scale_load_to_cpu(u64 load, int cpu)
+{
+	return load;
+}
+
+static inline unsigned int nr_eligible_big_tasks(int cpu)
+{
+	return 0;
+}
+
+static inline int pct_task_load(struct task_struct *p) { return 0; }
+
+static inline int cpu_capacity(int cpu)
+{
+	return SCHED_CAPACITY_SCALE;
+}
+
+static inline int same_cluster(int src_cpu, int dst_cpu) { return 1; }
+
+static inline void inc_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+		 struct task_struct *p)
+{
+}
+
+static inline void dec_cumulative_runnable_avg(struct hmp_sched_stats *stats,
+		 struct task_struct *p)
+{
+}
+
+static inline void sched_account_irqtime(int cpu, struct task_struct *curr,
+				 u64 delta, u64 wallclock)
+{
+}
+
+static inline void sched_account_irqstart(int cpu, struct task_struct *curr,
+					  u64 wallclock)
+{
+}
+
+static inline int sched_cpu_high_irqload(int cpu) { return 0; }
+
+static inline void set_preferred_cluster(struct related_thread_group *grp) { }
+
+static inline bool task_in_related_thread_group(struct task_struct *p)
+{
+	return false;
+}
+
+static inline
+struct related_thread_group *task_related_thread_group(struct task_struct *p)
+{
+	return NULL;
+}
+
+static inline u32 task_load(struct task_struct *p) { return 0; }
+
+static inline int update_preferred_cluster(struct related_thread_group *grp,
+			 struct task_struct *p, u32 old_load)
+{
+	return 0;
+}
+
+static inline void add_new_task_to_grp(struct task_struct *new) {}
+
+#define sched_freq_legacy_mode 1
+#define sched_migration_fixup	0
+#define PRED_DEMAND_DELTA (0)
+
+static inline void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
+
+static inline void notify_migration(int src_cpu, int dest_cpu,
+			bool src_cpu_dead, struct task_struct *p) { }
+
+static inline int same_freq_domain(int src_cpu, int dst_cpu)
+{
+	return 1;
+}
+
+static inline void check_for_migration(struct rq *rq, struct task_struct *p) { }
+static inline void pre_big_task_count_change(void) { }
+static inline void post_big_task_count_change(void) { }
+static inline void set_hmp_defaults(void) { }
+
+static inline void clear_reserved(int cpu) { }
+
+#define trace_sched_cpu_load(...)
+#define trace_sched_cpu_load_lb(...)
+#define trace_sched_cpu_load_cgroup(...)
+#define trace_sched_cpu_load_wakeup(...)
+
+#endif	/* CONFIG_SCHED_HMP */
+
+/*
+ * Returns the rq capacity of any rq in a group. This does not play
+ * well with groups where rq capacity can change independently.
+ */
+#define group_rq_capacity(group) cpu_capacity(group_first_cpu(group))
+
 #ifdef CONFIG_CGROUP_SCHED
 
 /*
@@ -1163,6 +1843,7 @@
 #define WF_SYNC		0x01		/* waker goes to sleep after wakeup */
 #define WF_FORK		0x02		/* child wakeup after fork */
 #define WF_MIGRATED	0x4		/* internal use, task got migrated */
+#define WF_NO_NOTIFIER	0x08		/* do not notify governor */
 
 /*
  * To aid in avoiding the subversion of "niceness" due to uneven distribution
@@ -1278,6 +1959,10 @@
 #ifdef CONFIG_FAIR_GROUP_SCHED
 	void (*task_change_group) (struct task_struct *p, int type);
 #endif
+#ifdef CONFIG_SCHED_HMP
+	void (*fixup_hmp_sched_stats)(struct rq *rq, struct task_struct *p,
+				      u32 new_task_load, u32 new_pred_demand);
+#endif
 };
 
 static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
@@ -1391,6 +2076,7 @@
 {
 	unsigned prev_nr = rq->nr_running;
 
+	sched_update_nr_prod(cpu_of(rq), count, true);
 	rq->nr_running = prev_nr + count;
 
 	if (prev_nr < 2 && rq->nr_running >= 2) {
@@ -1405,6 +2091,7 @@
 
 static inline void sub_nr_running(struct rq *rq, unsigned count)
 {
+	sched_update_nr_prod(cpu_of(rq), count, false);
 	rq->nr_running -= count;
 	/* Check if we still need preemption */
 	sched_update_tick_dependency(rq);
@@ -1732,6 +2419,9 @@
 	NOHZ_BALANCE_KICK,
 };
 
+#define NOHZ_KICK_ANY 0
+#define NOHZ_KICK_RESTRICT 1
+
 #define nohz_flags(cpu)	(&cpu_rq(cpu)->nohz_flags)
 
 extern void nohz_balance_exit_idle(unsigned int cpu);
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
new file mode 100644
index 0000000..c70e046
--- /dev/null
+++ b/kernel/sched/sched_avg.c
@@ -0,0 +1,128 @@
+/* Copyright (c) 2012, 2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Scheduler hook for average runqueue determination
+ */
+#include <linux/module.h>
+#include <linux/percpu.h>
+#include <linux/hrtimer.h>
+#include <linux/sched.h>
+#include <linux/math64.h>
+
+#include "sched.h"
+#include <trace/events/sched.h>
+
+static DEFINE_PER_CPU(u64, nr_prod_sum);
+static DEFINE_PER_CPU(u64, last_time);
+static DEFINE_PER_CPU(u64, nr_big_prod_sum);
+static DEFINE_PER_CPU(u64, nr);
+
+static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
+static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
+static s64 last_get_time;
+
+/**
+ * sched_get_nr_running_avg
+ * @return: Average nr_running, iowait and nr_big_tasks value since last poll.
+ *	    Returns the avg * 100 to return up to two decimal points
+ *	    of accuracy.
+ *
+ * Obtains the average nr_running value since the last poll.
+ * This function may not be called concurrently with itself
+ */
+void sched_get_nr_running_avg(int *avg, int *iowait_avg, int *big_avg)
+{
+	int cpu;
+	u64 curr_time = sched_clock();
+	u64 diff = curr_time - last_get_time;
+	u64 tmp_avg = 0, tmp_iowait = 0, tmp_big_avg = 0;
+
+	*avg = 0;
+	*iowait_avg = 0;
+	*big_avg = 0;
+
+	if (!diff)
+		return;
+
+	/* read and reset nr_running counts */
+	for_each_possible_cpu(cpu) {
+		unsigned long flags;
+
+		spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+		curr_time = sched_clock();
+		tmp_avg += per_cpu(nr_prod_sum, cpu);
+		tmp_avg += per_cpu(nr, cpu) *
+			(curr_time - per_cpu(last_time, cpu));
+
+		tmp_big_avg += per_cpu(nr_big_prod_sum, cpu);
+		tmp_big_avg += nr_eligible_big_tasks(cpu) *
+			(curr_time - per_cpu(last_time, cpu));
+
+		tmp_iowait += per_cpu(iowait_prod_sum, cpu);
+		tmp_iowait +=  nr_iowait_cpu(cpu) *
+			(curr_time - per_cpu(last_time, cpu));
+
+		per_cpu(last_time, cpu) = curr_time;
+
+		per_cpu(nr_prod_sum, cpu) = 0;
+		per_cpu(nr_big_prod_sum, cpu) = 0;
+		per_cpu(iowait_prod_sum, cpu) = 0;
+
+		spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+	}
+
+	diff = curr_time - last_get_time;
+	last_get_time = curr_time;
+
+	*avg = (int)div64_u64(tmp_avg * 100, diff);
+	*big_avg = (int)div64_u64(tmp_big_avg * 100, diff);
+	*iowait_avg = (int)div64_u64(tmp_iowait * 100, diff);
+
+	trace_sched_get_nr_running_avg(*avg, *big_avg, *iowait_avg);
+
+	BUG_ON(*avg < 0 || *big_avg < 0 || *iowait_avg < 0);
+	pr_debug("%s - avg:%d big_avg:%d iowait_avg:%d\n",
+				 __func__, *avg, *big_avg, *iowait_avg);
+}
+EXPORT_SYMBOL(sched_get_nr_running_avg);
+
+/**
+ * sched_update_nr_prod
+ * @cpu: The core id of the nr running driver.
+ * @delta: Adjust nr by 'delta' amount
+ * @inc: Whether we are increasing or decreasing the count
+ * @return: N/A
+ *
+ * Update average with latest nr_running value for CPU
+ */
+void sched_update_nr_prod(int cpu, long delta, bool inc)
+{
+	int diff;
+	s64 curr_time;
+	unsigned long flags, nr_running;
+
+	spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
+	nr_running = per_cpu(nr, cpu);
+	curr_time = sched_clock();
+	diff = curr_time - per_cpu(last_time, cpu);
+	per_cpu(last_time, cpu) = curr_time;
+	per_cpu(nr, cpu) = nr_running + (inc ? delta : -delta);
+
+	BUG_ON((s64)per_cpu(nr, cpu) < 0);
+
+	per_cpu(nr_prod_sum, cpu) += nr_running * diff;
+	per_cpu(nr_big_prod_sum, cpu) += nr_eligible_big_tasks(cpu) * diff;
+	per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
+	spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
+}
+EXPORT_SYMBOL(sched_update_nr_prod);
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 604297a..427bc8f 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -17,6 +17,41 @@
 }
 #endif /* CONFIG_SMP */
 
+#ifdef CONFIG_SCHED_HMP
+
+static void
+inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
+{
+	inc_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p)
+{
+	dec_cumulative_runnable_avg(&rq->hmp_stats, p);
+}
+
+static void
+fixup_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p,
+			   u32 new_task_load, u32 new_pred_demand)
+{
+	s64 task_load_delta = (s64)new_task_load - task_load(p);
+	s64 pred_demand_delta = PRED_DEMAND_DELTA;
+
+	fixup_cumulative_runnable_avg(&rq->hmp_stats, p, task_load_delta,
+				      pred_demand_delta);
+}
+
+#else	/* CONFIG_SCHED_HMP */
+
+static inline void
+inc_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
+
+static inline void
+dec_hmp_sched_stats_stop(struct rq *rq, struct task_struct *p) { }
+
+#endif	/* CONFIG_SCHED_HMP */
+
 static void
 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
 {
@@ -42,12 +77,14 @@
 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 {
 	add_nr_running(rq, 1);
+	inc_hmp_sched_stats_stop(rq, p);
 }
 
 static void
 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags)
 {
 	sub_nr_running(rq, 1);
+	dec_hmp_sched_stats_stop(rq, p);
 }
 
 static void yield_task_stop(struct rq *rq)
@@ -134,4 +171,7 @@
 	.prio_changed		= prio_changed_stop,
 	.switched_to		= switched_to_stop,
 	.update_curr		= update_curr_stop,
+#ifdef CONFIG_SCHED_HMP
+	.fixup_hmp_sched_stats	= fixup_hmp_sched_stats_stop,
+#endif
 };
diff --git a/kernel/smp.c b/kernel/smp.c
index bba3b20..fa362c0 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,6 +33,9 @@
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
 static void flush_smp_call_function_queue(bool warn_cpu_offline);
+/* CPU mask indicating which CPUs to bring online during smp_init() */
+static bool have_boot_cpu_mask;
+static cpumask_var_t boot_cpu_mask;
 
 int smpcfd_prepare_cpu(unsigned int cpu)
 {
@@ -533,6 +536,19 @@
 
 early_param("maxcpus", maxcpus);
 
+static int __init boot_cpus(char *str)
+{
+	alloc_bootmem_cpumask_var(&boot_cpu_mask);
+	if (cpulist_parse(str, boot_cpu_mask) < 0) {
+		pr_warn("SMP: Incorrect boot_cpus cpumask\n");
+		return -EINVAL;
+	}
+	have_boot_cpu_mask = true;
+	return 0;
+}
+
+early_param("boot_cpus", boot_cpus);
+
 /* Setup number of possible processor ids */
 int nr_cpu_ids __read_mostly = NR_CPUS;
 EXPORT_SYMBOL(nr_cpu_ids);
@@ -548,6 +564,20 @@
 	printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
 }
 
+static inline bool boot_cpu(int cpu)
+{
+	if (!have_boot_cpu_mask)
+		return true;
+
+	return cpumask_test_cpu(cpu, boot_cpu_mask);
+}
+
+static inline void free_boot_cpu_mask(void)
+{
+	if (have_boot_cpu_mask)	/* Allocated from boot_cpus() */
+		free_bootmem_cpumask_var(boot_cpu_mask);
+}
+
 /* Called by boot processor to activate the rest. */
 void __init smp_init(void)
 {
@@ -560,10 +590,12 @@
 	for_each_present_cpu(cpu) {
 		if (num_online_cpus() >= setup_max_cpus)
 			break;
-		if (!cpu_online(cpu))
+		if (!cpu_online(cpu) && boot_cpu(cpu))
 			cpu_up(cpu);
 	}
 
+	free_boot_cpu_mask();
+
 	/* Any cleanup work */
 	smp_announce();
 	smp_cpus_done(setup_max_cpus);
diff --git a/kernel/sys.c b/kernel/sys.c
index 89d5be4..4ccf5f0 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -41,6 +41,8 @@
 #include <linux/syscore_ops.h>
 #include <linux/version.h>
 #include <linux/ctype.h>
+#include <linux/mm.h>
+#include <linux/mempolicy.h>
 
 #include <linux/compat.h>
 #include <linux/syscalls.h>
@@ -2072,10 +2074,158 @@
 }
 #endif
 
+#ifdef CONFIG_MMU
+static int prctl_update_vma_anon_name(struct vm_area_struct *vma,
+		struct vm_area_struct **prev,
+		unsigned long start, unsigned long end,
+		const char __user *name_addr)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	int error = 0;
+	pgoff_t pgoff;
+
+	if (name_addr == vma_get_anon_name(vma)) {
+		*prev = vma;
+		goto out;
+	}
+
+	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
+	*prev = vma_merge(mm, *prev, start, end, vma->vm_flags, vma->anon_vma,
+				vma->vm_file, pgoff, vma_policy(vma),
+				vma->vm_userfaultfd_ctx, name_addr);
+	if (*prev) {
+		vma = *prev;
+		goto success;
+	}
+
+	*prev = vma;
+
+	if (start != vma->vm_start) {
+		error = split_vma(mm, vma, start, 1);
+		if (error)
+			goto out;
+	}
+
+	if (end != vma->vm_end) {
+		error = split_vma(mm, vma, end, 0);
+		if (error)
+			goto out;
+	}
+
+success:
+	if (!vma->vm_file)
+		vma->anon_name = name_addr;
+
+out:
+	if (error == -ENOMEM)
+		error = -EAGAIN;
+	return error;
+}
+
+static int prctl_set_vma_anon_name(unsigned long start, unsigned long end,
+			unsigned long arg)
+{
+	unsigned long tmp;
+	struct vm_area_struct *vma, *prev;
+	int unmapped_error = 0;
+	int error = -EINVAL;
+
+	/*
+	 * If the interval [start,end) covers some unmapped address
+	 * ranges, just ignore them, but return -ENOMEM at the end.
+	 * - this matches the handling in madvise.
+	 */
+	vma = find_vma_prev(current->mm, start, &prev);
+	if (vma && start > vma->vm_start)
+		prev = vma;
+
+	for (;;) {
+		/* Still start < end. */
+		error = -ENOMEM;
+		if (!vma)
+			return error;
+
+		/* Here start < (end|vma->vm_end). */
+		if (start < vma->vm_start) {
+			unmapped_error = -ENOMEM;
+			start = vma->vm_start;
+			if (start >= end)
+				return error;
+		}
+
+		/* Here vma->vm_start <= start < (end|vma->vm_end) */
+		tmp = vma->vm_end;
+		if (end < tmp)
+			tmp = end;
+
+		/* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
+		error = prctl_update_vma_anon_name(vma, &prev, start, tmp,
+				(const char __user *)arg);
+		if (error)
+			return error;
+		start = tmp;
+		if (prev && start < prev->vm_end)
+			start = prev->vm_end;
+		error = unmapped_error;
+		if (start >= end)
+			return error;
+		if (prev)
+			vma = prev->vm_next;
+		else	/* madvise_remove dropped mmap_sem */
+			vma = find_vma(current->mm, start);
+	}
+}
+
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	struct mm_struct *mm = current->mm;
+	int error;
+	unsigned long len;
+	unsigned long end;
+
+	if (start & ~PAGE_MASK)
+		return -EINVAL;
+	len = (len_in + ~PAGE_MASK) & PAGE_MASK;
+
+	/* Check to see whether len was rounded up from small -ve to zero */
+	if (len_in && !len)
+		return -EINVAL;
+
+	end = start + len;
+	if (end < start)
+		return -EINVAL;
+
+	if (end == start)
+		return 0;
+
+	down_write(&mm->mmap_sem);
+
+	switch (opt) {
+	case PR_SET_VMA_ANON_NAME:
+		error = prctl_set_vma_anon_name(start, end, arg);
+		break;
+	default:
+		error = -EINVAL;
+	}
+
+	up_write(&mm->mmap_sem);
+
+	return error;
+}
+#else /* CONFIG_MMU */
+static int prctl_set_vma(unsigned long opt, unsigned long start,
+		unsigned long len_in, unsigned long arg)
+{
+	return -EINVAL;
+}
+#endif
+
 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 		unsigned long, arg4, unsigned long, arg5)
 {
 	struct task_struct *me = current;
+	struct task_struct *tsk;
 	unsigned char comm[sizeof(me->comm)];
 	long error;
 
@@ -2221,6 +2371,26 @@
 	case PR_GET_TID_ADDRESS:
 		error = prctl_get_tid_address(me, (int __user **)arg2);
 		break;
+	case PR_SET_TIMERSLACK_PID:
+		if (task_pid_vnr(current) != (pid_t)arg3 &&
+				!capable(CAP_SYS_NICE))
+			return -EPERM;
+		rcu_read_lock();
+		tsk = find_task_by_vpid((pid_t)arg3);
+		if (tsk == NULL) {
+			rcu_read_unlock();
+			return -EINVAL;
+		}
+		get_task_struct(tsk);
+		rcu_read_unlock();
+		if (arg2 <= 0)
+			tsk->timer_slack_ns =
+				tsk->default_timer_slack_ns;
+		else
+			tsk->timer_slack_ns = arg2;
+		put_task_struct(tsk);
+		error = 0;
+		break;
 	case PR_SET_CHILD_SUBREAPER:
 		me->signal->is_child_subreaper = !!arg2;
 		break;
@@ -2270,6 +2440,9 @@
 	case PR_GET_FP_MODE:
 		error = GET_FP_MODE(me);
 		break;
+	case PR_SET_VMA:
+		error = prctl_set_vma(arg2, arg3, arg4, arg5);
+		break;
 	default:
 		error = -EINVAL;
 		break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 706309f..fe65371 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -288,6 +288,190 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+	{
+		.procname	= "sched_wake_to_idle",
+		.data		= &sysctl_sched_wake_to_idle,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
+#ifdef CONFIG_SCHED_HMP
+	{
+		.procname	= "sched_freq_inc_notify",
+		.data		= &sysctl_sched_freq_inc_notify,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+	},
+	{
+		.procname	= "sched_freq_dec_notify",
+		.data		= &sysctl_sched_freq_dec_notify,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+	},
+	{
+		.procname       = "sched_cpu_high_irqload",
+		.data           = &sysctl_sched_cpu_high_irqload,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec,
+	},
+	{
+		.procname       = "sched_ravg_hist_size",
+		.data           = &sysctl_sched_ravg_hist_size,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = sched_window_update_handler,
+	},
+	{
+		.procname       = "sched_window_stats_policy",
+		.data           = &sysctl_sched_window_stats_policy,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = sched_window_update_handler,
+	},
+	{
+		.procname	= "sched_spill_load",
+		.data		= &sysctl_sched_spill_load_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_spill_nr_run",
+		.data		= &sysctl_sched_spill_nr_run,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+	},
+	{
+		.procname	= "sched_upmigrate",
+		.data		= &sysctl_sched_upmigrate_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_downmigrate",
+		.data		= &sysctl_sched_downmigrate_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_init_task_load",
+		.data		= &sysctl_sched_init_task_load_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_select_prev_cpu_us",
+		.data		= &sysctl_sched_select_prev_cpu_us,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler   = sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+	},
+	{
+		.procname       = "sched_enable_colocation",
+		.data           = &sysctl_sched_enable_colocation,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
+		.procname	= "sched_restrict_cluster_spill",
+		.data		= &sysctl_sched_restrict_cluster_spill,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
+		.procname	= "sched_small_wakee_task_load",
+		.data		= &sysctl_sched_small_wakee_task_load_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler   = sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname	= "sched_big_waker_task_load",
+		.data		= &sysctl_sched_big_waker_task_load_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler   = sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		.extra2		= &one_hundred,
+	},
+	{
+		.procname       = "sched_enable_thread_grouping",
+		.data           = &sysctl_sched_enable_thread_grouping,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = proc_dointvec,
+	},
+	{
+		.procname       = "sched_new_task_windows",
+		.data           = &sysctl_sched_new_task_windows,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = sched_window_update_handler,
+	},
+	{
+		.procname	= "sched_pred_alert_freq",
+		.data		= &sysctl_sched_pred_alert_freq,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+	},
+	{
+		.procname       = "sched_freq_aggregate",
+		.data           = &sysctl_sched_freq_aggregate,
+		.maxlen         = sizeof(unsigned int),
+		.mode           = 0644,
+		.proc_handler   = sched_window_update_handler,
+	},
+	{
+		.procname	= "sched_freq_aggregate_threshold",
+		.data		= &sysctl_sched_freq_aggregate_threshold_pct,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_hmp_proc_update_handler,
+		.extra1		= &zero,
+		/*
+		 * Special handling for sched_freq_aggregate_threshold_pct
+		 * which can be greater than 100. Use 1000 as an upper bound
+		 * value which works for all practical use cases.
+		 */
+		.extra2		= &one_thousand,
+	},
+	{
+		.procname	= "sched_boost",
+		.data		= &sysctl_sched_boost,
+		.maxlen		= sizeof(unsigned int),
+		.mode		= 0644,
+		.proc_handler	= sched_boost_handler,
+	},
+#endif	/* CONFIG_SCHED_HMP */
 #ifdef CONFIG_SCHED_DEBUG
 	{
 		.procname	= "sched_min_granularity_ns",
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3bcb61b..189e77b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -23,6 +23,7 @@
 #include <linux/irq_work.h>
 #include <linux/posix-timers.h>
 #include <linux/context_tracking.h>
+#include <linux/rq_stats.h>
 
 #include <asm/irq_regs.h>
 
@@ -30,6 +31,10 @@
 
 #include <trace/events/timer.h>
 
+struct rq_data rq_info;
+struct workqueue_struct *rq_wq;
+spinlock_t rq_lock;
+
 /*
  * Per-CPU nohz control structure
  */
@@ -46,6 +51,21 @@
  */
 static ktime_t last_jiffies_update;
 
+u64 jiffy_to_ktime_ns(u64 *now, u64 *jiffy_ktime_ns)
+{
+	u64 cur_jiffies;
+	unsigned long seq;
+
+	do {
+		seq = read_seqbegin(&jiffies_lock);
+		*now = ktime_get_ns();
+		*jiffy_ktime_ns = ktime_to_ns(last_jiffies_update);
+		cur_jiffies = get_jiffies_64();
+	} while (read_seqretry(&jiffies_lock, seq));
+
+	return cur_jiffies;
+}
+
 /*
  * Must be called with interrupts disabled !
  */
@@ -1142,6 +1162,42 @@
  * High resolution timer specific code
  */
 #ifdef CONFIG_HIGH_RES_TIMERS
+static void update_rq_stats(void)
+{
+	unsigned long jiffy_gap = 0;
+	unsigned int rq_avg = 0;
+	unsigned long flags = 0;
+
+	jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
+	if (jiffy_gap >= rq_info.rq_poll_jiffies) {
+		spin_lock_irqsave(&rq_lock, flags);
+		if (!rq_info.rq_avg)
+			rq_info.rq_poll_total_jiffies = 0;
+		rq_avg = nr_running() * 10;
+		if (rq_info.rq_poll_total_jiffies) {
+			rq_avg = (rq_avg * jiffy_gap) +
+				(rq_info.rq_avg *
+				 rq_info.rq_poll_total_jiffies);
+			do_div(rq_avg,
+				rq_info.rq_poll_total_jiffies + jiffy_gap);
+		}
+		rq_info.rq_avg = rq_avg;
+		rq_info.rq_poll_total_jiffies += jiffy_gap;
+		rq_info.rq_poll_last_jiffy = jiffies;
+		spin_unlock_irqrestore(&rq_lock, flags);
+	}
+}
+static void wakeup_user(void)
+{
+	unsigned long jiffy_gap;
+
+	jiffy_gap = jiffies - rq_info.def_timer_last_jiffy;
+	if (jiffy_gap >= rq_info.def_timer_jiffies) {
+		rq_info.def_timer_last_jiffy = jiffies;
+		queue_work(rq_wq, &rq_info.def_timer_work);
+	}
+}
+
 /*
  * We rearm the timer until we get disabled by the idle code.
  * Called with interrupts disabled.
@@ -1159,8 +1215,20 @@
 	 * Do not call, when we are not in irq context and have
 	 * no valid regs pointer
 	 */
-	if (regs)
+	if (regs) {
 		tick_sched_handle(ts, regs);
+		if (rq_info.init == 1 &&
+				tick_do_timer_cpu == smp_processor_id()) {
+			/*
+			 * update run queue statistics
+			 */
+			update_rq_stats();
+			/*
+			 * wakeup user if needed
+			 */
+			wakeup_user();
+		}
+	}
 
 	/* No need to reprogram if we are in idle or full dynticks mode */
 	if (unlikely(ts->tick_stopped))
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 2a96b06..4274797 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -72,6 +72,9 @@
 	select CONTEXT_SWITCH_TRACER
 	bool
 
+config GPU_TRACEPOINTS
+	bool
+
 config CONTEXT_SWITCH_TRACER
 	bool
 
@@ -81,6 +84,35 @@
 	 Allow the use of ring_buffer_swap_cpu.
 	 Adds a very slight overhead to tracing when enabled.
 
+config IPC_LOGGING
+	bool "Debug Logging for IPC Drivers"
+	select GENERIC_TRACER
+	help
+	  IPC Logging driver provides a logging option for IPC Drivers.
+	  This provides a cyclic buffer based logging support in a driver
+	  specific context. This driver also provides a debugfs interface
+	  to dump the logs in a live fashion.
+
+	  If in doubt, say no.
+
+config QCOM_RTB
+	bool "Register tracing"
+	help
+	  Enable the kernel to trace every kernel function. This is done
+	  Add support for logging different events to a small uncached
+	  region. This is designed to aid in debugging reset cases where the
+	  caches may not be flushed before the target resets.
+
+config QCOM_RTB_SEPARATE_CPUS
+	bool "Separate entries for each cpu"
+	depends on QCOM_RTB
+	depends on SMP
+	help
+	  Under some circumstances, it may be beneficial to give dedicated space
+	  for each cpu to log accesses. Selecting this option will log each cpu
+	  separately. This will guarantee that the last acesses for each cpu
+	  will be logged but there will be fewer entries per cpu
+
 # All tracer options should select GENERIC_TRACER. For those options that are
 # enabled by all tracers (context switch and event tracer) they select TRACING.
 # This allows those options to appear when no other tracer is selected. But the
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index e579808..08e5e47 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -67,7 +67,14 @@
 endif
 obj-$(CONFIG_PROBE_EVENTS) += trace_probe.o
 obj-$(CONFIG_UPROBE_EVENT) += trace_uprobe.o
+obj-$(CONFIG_GPU_TRACEPOINTS) += gpu-traces.o
 
 obj-$(CONFIG_TRACEPOINT_BENCHMARK) += trace_benchmark.o
 
+obj-$(CONFIG_QCOM_RTB) += msm_rtb.o
+obj-$(CONFIG_IPC_LOGGING) += ipc_logging.o
+ifdef CONFIG_DEBUG_FS
+obj-$(CONFIG_IPC_LOGGING) += ipc_logging_debug.o
+endif
 libftrace-y := ftrace.o
+
diff --git a/kernel/trace/gpu-traces.c b/kernel/trace/gpu-traces.c
new file mode 100644
index 0000000..a4b3f00
--- /dev/null
+++ b/kernel/trace/gpu-traces.c
@@ -0,0 +1,23 @@
+/*
+ * GPU tracepoints
+ *
+ * Copyright (C) 2013 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/gpu.h>
+
+EXPORT_TRACEPOINT_SYMBOL(gpu_sched_switch);
+EXPORT_TRACEPOINT_SYMBOL(gpu_job_enqueue);
diff --git a/kernel/trace/ipc_logging.c b/kernel/trace/ipc_logging.c
new file mode 100644
index 0000000..62110a3
--- /dev/null
+++ b/kernel/trace/ipc_logging.c
@@ -0,0 +1,889 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/arch_timer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/ipc_logging.h>
+
+#include "ipc_logging_private.h"
+
+#define LOG_PAGE_DATA_SIZE	sizeof(((struct ipc_log_page *)0)->data)
+#define LOG_PAGE_FLAG (1 << 31)
+
+static LIST_HEAD(ipc_log_context_list);
+static DEFINE_RWLOCK(context_list_lock_lha1);
+static void *get_deserialization_func(struct ipc_log_context *ilctxt,
+				      int type);
+
+static struct ipc_log_page *get_first_page(struct ipc_log_context *ilctxt)
+{
+	struct ipc_log_page_header *p_pghdr;
+	struct ipc_log_page *pg = NULL;
+
+	if (!ilctxt)
+		return NULL;
+	p_pghdr = list_first_entry(&ilctxt->page_list,
+				   struct ipc_log_page_header, list);
+	pg = container_of(p_pghdr, struct ipc_log_page, hdr);
+	return pg;
+}
+
+/**
+ * is_nd_read_empty - Returns true if no data is available to read in log
+ *
+ * @ilctxt: logging context
+ * @returns: > 1 if context is empty; 0 if not empty; <0 for failure
+ *
+ * This is for the debugfs read pointer which allows for a non-destructive read.
+ * There may still be data in the log, but it may have already been read.
+ */
+static int is_nd_read_empty(struct ipc_log_context *ilctxt)
+{
+	if (!ilctxt)
+		return -EINVAL;
+
+	return ((ilctxt->nd_read_page == ilctxt->write_page) &&
+		(ilctxt->nd_read_page->hdr.nd_read_offset ==
+		 ilctxt->write_page->hdr.write_offset));
+}
+
+/**
+ * is_read_empty - Returns true if no data is available in log
+ *
+ * @ilctxt: logging context
+ * @returns: > 1 if context is empty; 0 if not empty; <0 for failure
+ *
+ * This is for the actual log contents.  If it is empty, then there
+ * is no data at all in the log.
+ */
+static int is_read_empty(struct ipc_log_context *ilctxt)
+{
+	if (!ilctxt)
+		return -EINVAL;
+
+	return ((ilctxt->read_page == ilctxt->write_page) &&
+		(ilctxt->read_page->hdr.read_offset ==
+		 ilctxt->write_page->hdr.write_offset));
+}
+
+/**
+ * is_nd_read_equal_read - Return true if the non-destructive read is equal to
+ * the destructive read
+ *
+ * @ilctxt: logging context
+ * @returns: true if nd read is equal to read; false otherwise
+ */
+static bool is_nd_read_equal_read(struct ipc_log_context *ilctxt)
+{
+	uint16_t read_offset;
+	uint16_t nd_read_offset;
+
+	if (ilctxt->nd_read_page == ilctxt->read_page) {
+		read_offset = ilctxt->read_page->hdr.read_offset;
+		nd_read_offset = ilctxt->nd_read_page->hdr.nd_read_offset;
+
+		if (read_offset == nd_read_offset)
+			return true;
+	}
+
+	return false;
+}
+
+
+static struct ipc_log_page *get_next_page(struct ipc_log_context *ilctxt,
+					  struct ipc_log_page *cur_pg)
+{
+	struct ipc_log_page_header *p_pghdr;
+	struct ipc_log_page *pg = NULL;
+
+	if (!ilctxt || !cur_pg)
+		return NULL;
+
+	if (ilctxt->last_page == cur_pg)
+		return ilctxt->first_page;
+
+	p_pghdr = list_first_entry(&cur_pg->hdr.list,
+			struct ipc_log_page_header, list);
+	pg = container_of(p_pghdr, struct ipc_log_page, hdr);
+
+	return pg;
+}
+
+/**
+ * ipc_log_read - do non-destructive read of the log
+ *
+ * @ilctxt:  Logging context
+ * @data:  Data pointer to receive the data
+ * @data_size:  Number of bytes to read (must be <= bytes available in log)
+ *
+ * This read will update a runtime read pointer, but will not affect the actual
+ * contents of the log which allows for reading the logs continuously while
+ * debugging and if the system crashes, then the full logs can still be
+ * extracted.
+ */
+static void ipc_log_read(struct ipc_log_context *ilctxt,
+			 void *data, int data_size)
+{
+	int bytes_to_read;
+
+	bytes_to_read = MIN(LOG_PAGE_DATA_SIZE
+				- ilctxt->nd_read_page->hdr.nd_read_offset,
+			      data_size);
+
+	memcpy(data, (ilctxt->nd_read_page->data +
+		ilctxt->nd_read_page->hdr.nd_read_offset), bytes_to_read);
+
+	if (bytes_to_read != data_size) {
+		/* not enough space, wrap read to next page */
+		ilctxt->nd_read_page->hdr.nd_read_offset = 0;
+		ilctxt->nd_read_page = get_next_page(ilctxt,
+			ilctxt->nd_read_page);
+		if (WARN_ON(ilctxt->nd_read_page == NULL))
+			return;
+
+		memcpy((data + bytes_to_read),
+			   (ilctxt->nd_read_page->data +
+			ilctxt->nd_read_page->hdr.nd_read_offset),
+			   (data_size - bytes_to_read));
+		bytes_to_read = (data_size - bytes_to_read);
+	}
+	ilctxt->nd_read_page->hdr.nd_read_offset += bytes_to_read;
+}
+
+/**
+ * ipc_log_drop - do destructive read of the log
+ *
+ * @ilctxt:  Logging context
+ * @data:  Data pointer to receive the data (or NULL)
+ * @data_size:  Number of bytes to read (must be <= bytes available in log)
+ */
+static void ipc_log_drop(struct ipc_log_context *ilctxt, void *data,
+		int data_size)
+{
+	int bytes_to_read;
+	bool push_nd_read;
+
+	bytes_to_read = MIN(LOG_PAGE_DATA_SIZE
+				- ilctxt->read_page->hdr.read_offset,
+			      data_size);
+	if (data)
+		memcpy(data, (ilctxt->read_page->data +
+			ilctxt->read_page->hdr.read_offset), bytes_to_read);
+
+	if (bytes_to_read != data_size) {
+		/* not enough space, wrap read to next page */
+		push_nd_read = is_nd_read_equal_read(ilctxt);
+
+		ilctxt->read_page->hdr.read_offset = 0;
+		if (push_nd_read) {
+			ilctxt->read_page->hdr.nd_read_offset = 0;
+			ilctxt->read_page = get_next_page(ilctxt,
+				ilctxt->read_page);
+			if (WARN_ON(ilctxt->read_page == NULL))
+				return;
+			ilctxt->nd_read_page = ilctxt->read_page;
+		} else {
+			ilctxt->read_page = get_next_page(ilctxt,
+				ilctxt->read_page);
+			if (WARN_ON(ilctxt->read_page == NULL))
+				return;
+		}
+
+		if (data)
+			memcpy((data + bytes_to_read),
+				   (ilctxt->read_page->data +
+				ilctxt->read_page->hdr.read_offset),
+				   (data_size - bytes_to_read));
+
+		bytes_to_read = (data_size - bytes_to_read);
+	}
+
+	/* update non-destructive read pointer if necessary */
+	push_nd_read = is_nd_read_equal_read(ilctxt);
+	ilctxt->read_page->hdr.read_offset += bytes_to_read;
+	ilctxt->write_avail += data_size;
+
+	if (push_nd_read)
+		ilctxt->nd_read_page->hdr.nd_read_offset += bytes_to_read;
+}
+
+/**
+ * msg_read - Reads a message.
+ *
+ * If a message is read successfully, then the message context
+ * will be set to:
+ *     .hdr    message header .size and .type values
+ *     .offset beginning of message data
+ *
+ * @ilctxt	Logging context
+ * @ectxt   Message context
+ *
+ * @returns 0 - no message available; >0 message size; <0 error
+ */
+static int msg_read(struct ipc_log_context *ilctxt,
+	     struct encode_context *ectxt)
+{
+	struct tsv_header hdr;
+
+	if (!ectxt)
+		return -EINVAL;
+
+	if (is_nd_read_empty(ilctxt))
+		return 0;
+
+	ipc_log_read(ilctxt, &hdr, sizeof(hdr));
+	ectxt->hdr.type = hdr.type;
+	ectxt->hdr.size = hdr.size;
+	ectxt->offset = sizeof(hdr);
+	ipc_log_read(ilctxt, (ectxt->buff + ectxt->offset),
+			 (int)hdr.size);
+
+	return sizeof(hdr) + (int)hdr.size;
+}
+
+/**
+ * msg_drop - Drops a message.
+ *
+ * @ilctxt	Logging context
+ */
+static void msg_drop(struct ipc_log_context *ilctxt)
+{
+	struct tsv_header hdr;
+
+	if (!is_read_empty(ilctxt)) {
+		ipc_log_drop(ilctxt, &hdr, sizeof(hdr));
+		ipc_log_drop(ilctxt, NULL, (int)hdr.size);
+	}
+}
+
+/*
+ * Commits messages to the FIFO.  If the FIFO is full, then enough
+ * messages are dropped to create space for the new message.
+ */
+void ipc_log_write(void *ctxt, struct encode_context *ectxt)
+{
+	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+	int bytes_to_write;
+	unsigned long flags;
+
+	if (!ilctxt || !ectxt) {
+		pr_err("%s: Invalid ipc_log or encode context\n", __func__);
+		return;
+	}
+
+	read_lock_irqsave(&context_list_lock_lha1, flags);
+	spin_lock(&ilctxt->context_lock_lhb1);
+	while (ilctxt->write_avail <= ectxt->offset)
+		msg_drop(ilctxt);
+
+	bytes_to_write = MIN(LOG_PAGE_DATA_SIZE
+				- ilctxt->write_page->hdr.write_offset,
+				ectxt->offset);
+	memcpy((ilctxt->write_page->data +
+		ilctxt->write_page->hdr.write_offset),
+		ectxt->buff, bytes_to_write);
+
+	if (bytes_to_write != ectxt->offset) {
+		uint64_t t_now = sched_clock();
+
+		ilctxt->write_page->hdr.write_offset += bytes_to_write;
+		ilctxt->write_page->hdr.end_time = t_now;
+
+		ilctxt->write_page = get_next_page(ilctxt, ilctxt->write_page);
+		if (WARN_ON(ilctxt->write_page == NULL))
+			return;
+		ilctxt->write_page->hdr.write_offset = 0;
+		ilctxt->write_page->hdr.start_time = t_now;
+		memcpy((ilctxt->write_page->data +
+			ilctxt->write_page->hdr.write_offset),
+		       (ectxt->buff + bytes_to_write),
+		       (ectxt->offset - bytes_to_write));
+		bytes_to_write = (ectxt->offset - bytes_to_write);
+	}
+	ilctxt->write_page->hdr.write_offset += bytes_to_write;
+	ilctxt->write_avail -= ectxt->offset;
+	complete(&ilctxt->read_avail);
+	spin_unlock(&ilctxt->context_lock_lhb1);
+	read_unlock_irqrestore(&context_list_lock_lha1, flags);
+}
+EXPORT_SYMBOL(ipc_log_write);
+
+/*
+ * Starts a new message after which you can add serialized data and
+ * then complete the message by calling msg_encode_end().
+ */
+void msg_encode_start(struct encode_context *ectxt, uint32_t type)
+{
+	if (!ectxt) {
+		pr_err("%s: Invalid encode context\n", __func__);
+		return;
+	}
+
+	ectxt->hdr.type = type;
+	ectxt->hdr.size = 0;
+	ectxt->offset = sizeof(ectxt->hdr);
+}
+EXPORT_SYMBOL(msg_encode_start);
+
+/*
+ * Completes the message
+ */
+void msg_encode_end(struct encode_context *ectxt)
+{
+	if (!ectxt) {
+		pr_err("%s: Invalid encode context\n", __func__);
+		return;
+	}
+
+	/* finalize data size */
+	ectxt->hdr.size = ectxt->offset - sizeof(ectxt->hdr);
+	if (WARN_ON(ectxt->hdr.size > MAX_MSG_SIZE))
+		return;
+	memcpy(ectxt->buff, &ectxt->hdr, sizeof(ectxt->hdr));
+}
+EXPORT_SYMBOL(msg_encode_end);
+
+/*
+ * Helper function used to write data to a message context.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @data  data to write
+ * @size  number of bytes of data to write
+ */
+static inline int tsv_write_data(struct encode_context *ectxt,
+				 void *data, uint32_t size)
+{
+	if (!ectxt) {
+		pr_err("%s: Invalid encode context\n", __func__);
+		return -EINVAL;
+	}
+	if ((ectxt->offset + size) > MAX_MSG_SIZE) {
+		pr_err("%s: No space to encode further\n", __func__);
+		return -EINVAL;
+	}
+
+	memcpy((void *)(ectxt->buff + ectxt->offset), data, size);
+	ectxt->offset += size;
+	return 0;
+}
+
+/*
+ * Helper function that writes a type to the context.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @type  primitive type
+ * @size  size of primitive in bytes
+ */
+static inline int tsv_write_header(struct encode_context *ectxt,
+				   uint32_t type, uint32_t size)
+{
+	struct tsv_header hdr;
+
+	hdr.type = (unsigned char)type;
+	hdr.size = (unsigned char)size;
+	return tsv_write_data(ectxt, &hdr, sizeof(hdr));
+}
+
+/*
+ * Writes the current timestamp count.
+ *
+ * @ectxt   context initialized by calling msg_encode_start()
+ */
+int tsv_timestamp_write(struct encode_context *ectxt)
+{
+	int ret;
+	uint64_t t_now = sched_clock();
+
+	ret = tsv_write_header(ectxt, TSV_TYPE_TIMESTAMP, sizeof(t_now));
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, &t_now, sizeof(t_now));
+}
+EXPORT_SYMBOL(tsv_timestamp_write);
+
+/*
+ * Writes the current QTimer timestamp count.
+ *
+ * @ectxt   context initialized by calling msg_encode_start()
+ */
+int tsv_qtimer_write(struct encode_context *ectxt)
+{
+	int ret;
+	uint64_t t_now = arch_counter_get_cntvct();
+
+	ret = tsv_write_header(ectxt, TSV_TYPE_QTIMER, sizeof(t_now));
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, &t_now, sizeof(t_now));
+}
+EXPORT_SYMBOL(tsv_qtimer_write);
+
+/*
+ * Writes a data pointer.
+ *
+ * @ectxt   context initialized by calling msg_encode_start()
+ * @pointer pointer value to write
+ */
+int tsv_pointer_write(struct encode_context *ectxt, void *pointer)
+{
+	int ret;
+
+	ret = tsv_write_header(ectxt, TSV_TYPE_POINTER, sizeof(pointer));
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, &pointer, sizeof(pointer));
+}
+EXPORT_SYMBOL(tsv_pointer_write);
+
+/*
+ * Writes a 32-bit integer value.
+ *
+ * @ectxt context initialized by calling msg_encode_start()
+ * @n     integer to write
+ */
+int tsv_int32_write(struct encode_context *ectxt, int32_t n)
+{
+	int ret;
+
+	ret = tsv_write_header(ectxt, TSV_TYPE_INT32, sizeof(n));
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, &n, sizeof(n));
+}
+EXPORT_SYMBOL(tsv_int32_write);
+
+/*
+ * Writes a byte array.
+ *
+ * @ectxt context initialized by calling msg_write_start()
+ * @data  Beginning address of data
+ * @data_size Size of data to be written
+ */
+int tsv_byte_array_write(struct encode_context *ectxt,
+			 void *data, int data_size)
+{
+	int ret;
+
+	ret = tsv_write_header(ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
+	if (ret)
+		return ret;
+	return tsv_write_data(ectxt, data, data_size);
+}
+EXPORT_SYMBOL(tsv_byte_array_write);
+
+/*
+ * Helper function to log a string
+ *
+ * @ilctxt ipc_log_context created using ipc_log_context_create()
+ * @fmt Data specified using format specifiers
+ */
+int ipc_log_string(void *ilctxt, const char *fmt, ...)
+{
+	struct encode_context ectxt;
+	int avail_size, data_size, hdr_size = sizeof(struct tsv_header);
+	va_list arg_list;
+
+	if (!ilctxt)
+		return -EINVAL;
+
+	msg_encode_start(&ectxt, TSV_TYPE_STRING);
+	tsv_timestamp_write(&ectxt);
+	tsv_qtimer_write(&ectxt);
+	avail_size = (MAX_MSG_SIZE - (ectxt.offset + hdr_size));
+	va_start(arg_list, fmt);
+	data_size = vsnprintf((ectxt.buff + ectxt.offset + hdr_size),
+			      avail_size, fmt, arg_list);
+	va_end(arg_list);
+	tsv_write_header(&ectxt, TSV_TYPE_BYTE_ARRAY, data_size);
+	ectxt.offset += data_size;
+	msg_encode_end(&ectxt);
+	ipc_log_write(ilctxt, &ectxt);
+	return 0;
+}
+EXPORT_SYMBOL(ipc_log_string);
+
+/**
+ * ipc_log_extract - Reads and deserializes log
+ *
+ * @ctxt:  logging context
+ * @buff:    buffer to receive the data
+ * @size:    size of the buffer
+ * @returns: 0 if no data read; >0 number of bytes read; < 0 error
+ *
+ * If no data is available to be read, then the ilctxt::read_avail
+ * completion is reinitialized.  This allows clients to block
+ * until new log data is save.
+ */
+int ipc_log_extract(void *ctxt, char *buff, int size)
+{
+	struct encode_context ectxt;
+	struct decode_context dctxt;
+	void (*deserialize_func)(struct encode_context *ectxt,
+				 struct decode_context *dctxt);
+	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+	unsigned long flags;
+
+	if (size < MAX_MSG_DECODED_SIZE)
+		return -EINVAL;
+
+	dctxt.output_format = OUTPUT_DEBUGFS;
+	dctxt.buff = buff;
+	dctxt.size = size;
+	read_lock_irqsave(&context_list_lock_lha1, flags);
+	spin_lock(&ilctxt->context_lock_lhb1);
+	while (dctxt.size >= MAX_MSG_DECODED_SIZE &&
+	       !is_nd_read_empty(ilctxt)) {
+		msg_read(ilctxt, &ectxt);
+		deserialize_func = get_deserialization_func(ilctxt,
+							ectxt.hdr.type);
+		spin_unlock(&ilctxt->context_lock_lhb1);
+		read_unlock_irqrestore(&context_list_lock_lha1, flags);
+		if (deserialize_func)
+			deserialize_func(&ectxt, &dctxt);
+		else
+			pr_err("%s: unknown message 0x%x\n",
+				__func__, ectxt.hdr.type);
+		read_lock_irqsave(&context_list_lock_lha1, flags);
+		spin_lock(&ilctxt->context_lock_lhb1);
+	}
+	if ((size - dctxt.size) == 0)
+		reinit_completion(&ilctxt->read_avail);
+	spin_unlock(&ilctxt->context_lock_lhb1);
+	read_unlock_irqrestore(&context_list_lock_lha1, flags);
+	return size - dctxt.size;
+}
+EXPORT_SYMBOL(ipc_log_extract);
+
+/*
+ * Helper function used to read data from a message context.
+ *
+ * @ectxt  context initialized by calling msg_read()
+ * @data  data to read
+ * @size  number of bytes of data to read
+ */
+static void tsv_read_data(struct encode_context *ectxt,
+			  void *data, uint32_t size)
+{
+	if (WARN_ON((ectxt->offset + size) > MAX_MSG_SIZE))
+		return;
+	memcpy(data, (ectxt->buff + ectxt->offset), size);
+	ectxt->offset += size;
+}
+
+/*
+ * Helper function that reads a type from the context and updates the
+ * context pointers.
+ *
+ * @ectxt  context initialized by calling msg_read()
+ * @hdr   type header
+ */
+static void tsv_read_header(struct encode_context *ectxt,
+			    struct tsv_header *hdr)
+{
+	if (WARN_ON((ectxt->offset + sizeof(*hdr)) > MAX_MSG_SIZE))
+		return;
+	memcpy(hdr, (ectxt->buff + ectxt->offset), sizeof(*hdr));
+	ectxt->offset += sizeof(*hdr);
+}
+
+/*
+ * Reads a timestamp.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format (appended to %6u.09u timestamp format)
+ */
+void tsv_timestamp_read(struct encode_context *ectxt,
+			struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+	uint64_t val;
+	unsigned long nanosec_rem;
+
+	tsv_read_header(ectxt, &hdr);
+	if (WARN_ON(hdr.type != TSV_TYPE_TIMESTAMP))
+		return;
+	tsv_read_data(ectxt, &val, sizeof(val));
+	nanosec_rem = do_div(val, 1000000000U);
+	IPC_SPRINTF_DECODE(dctxt, "[%6u.%09lu%s/",
+			(unsigned int)val, nanosec_rem, format);
+}
+EXPORT_SYMBOL(tsv_timestamp_read);
+
+/*
+ * Reads a QTimer timestamp.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format (appended to %#18llx timestamp format)
+ */
+void tsv_qtimer_read(struct encode_context *ectxt,
+		     struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+	uint64_t val;
+
+	tsv_read_header(ectxt, &hdr);
+	if (WARN_ON(hdr.type != TSV_TYPE_QTIMER))
+		return;
+	tsv_read_data(ectxt, &val, sizeof(val));
+
+	/*
+	 * This gives 16 hex digits of output. The # prefix prepends
+	 * a 0x, and these characters count as part of the number.
+	 */
+	IPC_SPRINTF_DECODE(dctxt, "%#18llx]%s", val, format);
+}
+EXPORT_SYMBOL(tsv_qtimer_read);
+
+/*
+ * Reads a data pointer.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format
+ */
+void tsv_pointer_read(struct encode_context *ectxt,
+		      struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+	void *val;
+
+	tsv_read_header(ectxt, &hdr);
+	if (WARN_ON(hdr.type != TSV_TYPE_POINTER))
+		return;
+	tsv_read_data(ectxt, &val, sizeof(val));
+
+	IPC_SPRINTF_DECODE(dctxt, format, val);
+}
+EXPORT_SYMBOL(tsv_pointer_read);
+
+/*
+ * Reads a 32-bit integer value.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format
+ */
+int32_t tsv_int32_read(struct encode_context *ectxt,
+		       struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+	int32_t val;
+
+	tsv_read_header(ectxt, &hdr);
+	if (WARN_ON(hdr.type != TSV_TYPE_INT32))
+		return -EINVAL;
+	tsv_read_data(ectxt, &val, sizeof(val));
+
+	IPC_SPRINTF_DECODE(dctxt, format, val);
+	return val;
+}
+EXPORT_SYMBOL(tsv_int32_read);
+
+/*
+ * Reads a byte array/string.
+ *
+ * @ectxt   context initialized by calling msg_read()
+ * @dctxt   deserialization context
+ * @format output format
+ */
+void tsv_byte_array_read(struct encode_context *ectxt,
+			 struct decode_context *dctxt, const char *format)
+{
+	struct tsv_header hdr;
+
+	tsv_read_header(ectxt, &hdr);
+	if (WARN_ON(hdr.type != TSV_TYPE_BYTE_ARRAY))
+		return;
+	tsv_read_data(ectxt, dctxt->buff, hdr.size);
+	dctxt->buff += hdr.size;
+	dctxt->size -= hdr.size;
+}
+EXPORT_SYMBOL(tsv_byte_array_read);
+
+int add_deserialization_func(void *ctxt, int type,
+			void (*dfunc)(struct encode_context *,
+				      struct decode_context *))
+{
+	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+	struct dfunc_info *df_info;
+	unsigned long flags;
+
+	if (!ilctxt || !dfunc)
+		return -EINVAL;
+
+	df_info = kmalloc(sizeof(struct dfunc_info), GFP_KERNEL);
+	if (!df_info)
+		return -ENOSPC;
+
+	read_lock_irqsave(&context_list_lock_lha1, flags);
+	spin_lock(&ilctxt->context_lock_lhb1);
+	df_info->type = type;
+	df_info->dfunc = dfunc;
+	list_add_tail(&df_info->list, &ilctxt->dfunc_info_list);
+	spin_unlock(&ilctxt->context_lock_lhb1);
+	read_unlock_irqrestore(&context_list_lock_lha1, flags);
+	return 0;
+}
+EXPORT_SYMBOL(add_deserialization_func);
+
+static void *get_deserialization_func(struct ipc_log_context *ilctxt,
+				      int type)
+{
+	struct dfunc_info *df_info = NULL;
+
+	if (!ilctxt)
+		return NULL;
+
+	list_for_each_entry(df_info, &ilctxt->dfunc_info_list, list) {
+		if (df_info->type == type)
+			return df_info->dfunc;
+	}
+	return NULL;
+}
+
+/**
+ * ipc_log_context_create: Create a debug log context
+ *                         Should not be called from atomic context
+ *
+ * @max_num_pages: Number of pages of logging space required (max. 10)
+ * @mod_name     : Name of the directory entry under DEBUGFS
+ * @user_version : Version number of user-defined message formats
+ *
+ * returns context id on success, NULL on failure
+ */
+void *ipc_log_context_create(int max_num_pages,
+			     const char *mod_name, uint16_t user_version)
+{
+	struct ipc_log_context *ctxt;
+	struct ipc_log_page *pg = NULL;
+	int page_cnt;
+	unsigned long flags;
+
+	ctxt = kzalloc(sizeof(struct ipc_log_context), GFP_KERNEL);
+	if (!ctxt)
+		return 0;
+
+	init_completion(&ctxt->read_avail);
+	INIT_LIST_HEAD(&ctxt->page_list);
+	INIT_LIST_HEAD(&ctxt->dfunc_info_list);
+	spin_lock_init(&ctxt->context_lock_lhb1);
+	for (page_cnt = 0; page_cnt < max_num_pages; page_cnt++) {
+		pg = kzalloc(sizeof(struct ipc_log_page), GFP_KERNEL);
+		if (!pg) {
+			pr_err("%s: cannot create ipc_log_page\n", __func__);
+			goto release_ipc_log_context;
+		}
+		pg->hdr.log_id = (uint64_t)(uintptr_t)ctxt;
+		pg->hdr.page_num = LOG_PAGE_FLAG | page_cnt;
+		pg->hdr.ctx_offset = (int64_t)((uint64_t)(uintptr_t)ctxt -
+			(uint64_t)(uintptr_t)&pg->hdr);
+
+		/* set magic last to signal that page init is complete */
+		pg->hdr.magic = IPC_LOGGING_MAGIC_NUM;
+		pg->hdr.nmagic = ~(IPC_LOGGING_MAGIC_NUM);
+
+		spin_lock_irqsave(&ctxt->context_lock_lhb1, flags);
+		list_add_tail(&pg->hdr.list, &ctxt->page_list);
+		spin_unlock_irqrestore(&ctxt->context_lock_lhb1, flags);
+	}
+
+	ctxt->log_id = (uint64_t)(uintptr_t)ctxt;
+	ctxt->version = IPC_LOG_VERSION;
+	strlcpy(ctxt->name, mod_name, IPC_LOG_MAX_CONTEXT_NAME_LEN);
+	ctxt->user_version = user_version;
+	ctxt->first_page = get_first_page(ctxt);
+	ctxt->last_page = pg;
+	ctxt->write_page = ctxt->first_page;
+	ctxt->read_page = ctxt->first_page;
+	ctxt->nd_read_page = ctxt->first_page;
+	ctxt->write_avail = max_num_pages * LOG_PAGE_DATA_SIZE;
+	ctxt->header_size = sizeof(struct ipc_log_page_header);
+	create_ctx_debugfs(ctxt, mod_name);
+
+	/* set magic last to signal context init is complete */
+	ctxt->magic = IPC_LOG_CONTEXT_MAGIC_NUM;
+	ctxt->nmagic = ~(IPC_LOG_CONTEXT_MAGIC_NUM);
+
+	write_lock_irqsave(&context_list_lock_lha1, flags);
+	list_add_tail(&ctxt->list, &ipc_log_context_list);
+	write_unlock_irqrestore(&context_list_lock_lha1, flags);
+	return (void *)ctxt;
+
+release_ipc_log_context:
+	while (page_cnt-- > 0) {
+		pg = get_first_page(ctxt);
+		list_del(&pg->hdr.list);
+		kfree(pg);
+	}
+	kfree(ctxt);
+	return 0;
+}
+EXPORT_SYMBOL(ipc_log_context_create);
+
+/*
+ * Destroy debug log context
+ *
+ * @ctxt: debug log context created by calling ipc_log_context_create API.
+ */
+int ipc_log_context_destroy(void *ctxt)
+{
+	struct ipc_log_context *ilctxt = (struct ipc_log_context *)ctxt;
+	struct ipc_log_page *pg = NULL;
+	unsigned long flags;
+
+	if (!ilctxt)
+		return 0;
+
+	while (!list_empty(&ilctxt->page_list)) {
+		pg = get_first_page(ctxt);
+		list_del(&pg->hdr.list);
+		kfree(pg);
+	}
+
+	write_lock_irqsave(&context_list_lock_lha1, flags);
+	list_del(&ilctxt->list);
+	write_unlock_irqrestore(&context_list_lock_lha1, flags);
+
+	debugfs_remove_recursive(ilctxt->dent);
+
+	kfree(ilctxt);
+	return 0;
+}
+EXPORT_SYMBOL(ipc_log_context_destroy);
+
+static int __init ipc_logging_init(void)
+{
+	check_and_create_debugfs();
+	return 0;
+}
+
+module_init(ipc_logging_init);
+
+MODULE_DESCRIPTION("ipc logging");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/trace/ipc_logging_debug.c b/kernel/trace/ipc_logging_debug.c
new file mode 100644
index 0000000..a545387
--- /dev/null
+++ b/kernel/trace/ipc_logging_debug.c
@@ -0,0 +1,184 @@
+/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/debugfs.h>
+#include <linux/io.h>
+#include <linux/idr.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <linux/ipc_logging.h>
+
+#include "ipc_logging_private.h"
+
+static DEFINE_MUTEX(ipc_log_debugfs_init_lock);
+static struct dentry *root_dent;
+
+static int debug_log(struct ipc_log_context *ilctxt,
+		     char *buff, int size, int cont)
+{
+	int i = 0;
+	int ret;
+
+	if (size < MAX_MSG_DECODED_SIZE) {
+		pr_err("%s: buffer size %d < %d\n", __func__, size,
+			MAX_MSG_DECODED_SIZE);
+		return -ENOMEM;
+	}
+	do {
+		i = ipc_log_extract(ilctxt, buff, size - 1);
+		if (cont && i == 0) {
+			ret = wait_for_completion_interruptible(
+				&ilctxt->read_avail);
+			if (ret < 0)
+				return ret;
+		}
+	} while (cont && i == 0);
+
+	return i;
+}
+
+/*
+ * VFS Read operation helper which dispatches the call to the debugfs
+ * read command stored in file->private_data.
+ *
+ * @file  File structure
+ * @buff   user buffer
+ * @count size of user buffer
+ * @ppos  file position to read from (only a value of 0 is accepted)
+ * @cont  1 = continuous mode (don't return 0 to signal end-of-file)
+ *
+ * @returns ==0 end of file
+ *           >0 number of bytes read
+ *           <0 error
+ */
+static ssize_t debug_read_helper(struct file *file, char __user *buff,
+				 size_t count, loff_t *ppos, int cont)
+{
+	struct ipc_log_context *ilctxt = file->private_data;
+	char *buffer;
+	int bsize;
+
+	buffer = kmalloc(count, GFP_KERNEL);
+	if (!buffer)
+		return -ENOMEM;
+
+	bsize = debug_log(ilctxt, buffer, count, cont);
+	if (bsize > 0) {
+		if (copy_to_user(buff, buffer, bsize)) {
+			kfree(buffer);
+			return -EFAULT;
+		}
+		*ppos += bsize;
+	}
+	kfree(buffer);
+	return bsize;
+}
+
+static ssize_t debug_read(struct file *file, char __user *buff,
+			  size_t count, loff_t *ppos)
+{
+	return debug_read_helper(file, buff, count, ppos, 0);
+}
+
+static ssize_t debug_read_cont(struct file *file, char __user *buff,
+			       size_t count, loff_t *ppos)
+{
+	return debug_read_helper(file, buff, count, ppos, 1);
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static const struct file_operations debug_ops = {
+	.read = debug_read,
+	.open = debug_open,
+};
+
+static const struct file_operations debug_ops_cont = {
+	.read = debug_read_cont,
+	.open = debug_open,
+};
+
+static void debug_create(const char *name, mode_t mode,
+			 struct dentry *dent,
+			 struct ipc_log_context *ilctxt,
+			 const struct file_operations *fops)
+{
+	debugfs_create_file(name, mode, dent, ilctxt, fops);
+}
+
+static void dfunc_string(struct encode_context *ectxt,
+			 struct decode_context *dctxt)
+{
+	tsv_timestamp_read(ectxt, dctxt, "");
+	tsv_qtimer_read(ectxt, dctxt, " ");
+	tsv_byte_array_read(ectxt, dctxt, "");
+
+	/* add trailing \n if necessary */
+	if (*(dctxt->buff - 1) != '\n') {
+		if (dctxt->size) {
+			++dctxt->buff;
+			--dctxt->size;
+		}
+		*(dctxt->buff - 1) = '\n';
+	}
+}
+
+void check_and_create_debugfs(void)
+{
+	mutex_lock(&ipc_log_debugfs_init_lock);
+	if (!root_dent) {
+		root_dent = debugfs_create_dir("ipc_logging", 0);
+
+		if (IS_ERR(root_dent)) {
+			pr_err("%s: unable to create debugfs %ld\n",
+				__func__, PTR_ERR(root_dent));
+			root_dent = NULL;
+		}
+	}
+	mutex_unlock(&ipc_log_debugfs_init_lock);
+}
+EXPORT_SYMBOL(check_and_create_debugfs);
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt,
+			const char *mod_name)
+{
+	if (!root_dent)
+		check_and_create_debugfs();
+
+	if (root_dent) {
+		ctxt->dent = debugfs_create_dir(mod_name, root_dent);
+		if (!IS_ERR(ctxt->dent)) {
+			debug_create("log", 0444, ctxt->dent,
+				     ctxt, &debug_ops);
+			debug_create("log_cont", 0444, ctxt->dent,
+				     ctxt, &debug_ops_cont);
+		}
+	}
+	add_deserialization_func((void *)ctxt,
+				 TSV_TYPE_STRING, dfunc_string);
+}
+EXPORT_SYMBOL(create_ctx_debugfs);
diff --git a/kernel/trace/ipc_logging_private.h b/kernel/trace/ipc_logging_private.h
new file mode 100644
index 0000000..594027a
--- /dev/null
+++ b/kernel/trace/ipc_logging_private.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _IPC_LOGGING_PRIVATE_H
+#define _IPC_LOGGING_PRIVATE_H
+
+#include <linux/ipc_logging.h>
+
+#define IPC_LOG_VERSION 0x0003
+#define IPC_LOG_MAX_CONTEXT_NAME_LEN 32
+
+/**
+ * struct ipc_log_page_header - Individual log page header
+ *
+ * @magic: Magic number (used for log extraction)
+ * @nmagic: Inverse of magic number (used for log extraction)
+ * @page_num: Index of page (0.. N - 1) (note top bit is always set)
+ * @read_offset:  Read offset in page
+ * @write_offset: Write offset in page (or 0xFFFF if full)
+ * @log_id: ID of logging context that owns this page
+ * @start_time:  Scheduler clock for first write time in page
+ * @end_time:  Scheduler clock for last write time in page
+ * @ctx_offset:  Signed offset from page to the logging context.  Used to
+ *               optimize ram-dump extraction.
+ *
+ * @list:  Linked list of pages that make up a log
+ * @nd_read_offset:  Non-destructive read offset used for debugfs
+ *
+ * The first part of the structure defines data that is used to extract the
+ * logs from a memory dump and elements in this section should not be changed
+ * or re-ordered.  New local data structures can be added to the end of the
+ * structure since they will be ignored by the extraction tool.
+ */
+struct ipc_log_page_header {
+	uint32_t magic;
+	uint32_t nmagic;
+	uint32_t page_num;
+	uint16_t read_offset;
+	uint16_t write_offset;
+	uint64_t log_id;
+	uint64_t start_time;
+	uint64_t end_time;
+	int64_t ctx_offset;
+
+	/* add local data structures after this point */
+	struct list_head list;
+	uint16_t nd_read_offset;
+};
+
+/**
+ * struct ipc_log_page - Individual log page
+ *
+ * @hdr: Log page header
+ * @data: Log data
+ *
+ * Each log consists of 1 to N log pages.  Data size is adjusted to always fit
+ * the structure into a single kernel page.
+ */
+struct ipc_log_page {
+	struct ipc_log_page_header hdr;
+	char data[PAGE_SIZE - sizeof(struct ipc_log_page_header)];
+};
+
+/**
+ * struct ipc_log_context - main logging context
+ *
+ * @magic:  Magic number (used for log extraction)
+ * @nmagic:  Inverse of magic number (used for log extraction)
+ * @version:  IPC Logging version of log format
+ * @user_version:  Version number for user-defined messages
+ * @header_size:  Size of the log header which is used to determine the offset
+ *                of ipc_log_page::data
+ * @log_id:  Log ID (assigned when log is created)
+ * @name:  Name of the log used to uniquely identify the log during extraction
+ *
+ * @list:  List of log contexts (struct ipc_log_context)
+ * @page_list:  List of log pages (struct ipc_log_page)
+ * @first_page:  First page in list of logging pages
+ * @last_page:  Last page in list of logging pages
+ * @write_page:  Current write page
+ * @read_page:  Current read page (for internal reads)
+ * @nd_read_page:  Current debugfs extraction page (non-destructive)
+ *
+ * @write_avail:  Number of bytes available to write in all pages
+ * @dent:  Debugfs node for run-time log extraction
+ * @dfunc_info_list:  List of deserialization functions
+ * @context_lock_lhb1:  Lock for entire structure
+ * @read_avail:  Completed when new data is added to the log
+ */
+struct ipc_log_context {
+	uint32_t magic;
+	uint32_t nmagic;
+	uint32_t version;
+	uint16_t user_version;
+	uint16_t header_size;
+	uint64_t log_id;
+	char name[IPC_LOG_MAX_CONTEXT_NAME_LEN];
+
+	/* add local data structures after this point */
+	struct list_head list;
+	struct list_head page_list;
+	struct ipc_log_page *first_page;
+	struct ipc_log_page *last_page;
+	struct ipc_log_page *write_page;
+	struct ipc_log_page *read_page;
+	struct ipc_log_page *nd_read_page;
+
+	uint32_t write_avail;
+	struct dentry *dent;
+	struct list_head dfunc_info_list;
+	spinlock_t context_lock_lhb1;
+	struct completion read_avail;
+};
+
+struct dfunc_info {
+	struct list_head list;
+	int type;
+	void (*dfunc)(struct encode_context *, struct decode_context *);
+};
+
+enum {
+	TSV_TYPE_INVALID,
+	TSV_TYPE_TIMESTAMP,
+	TSV_TYPE_POINTER,
+	TSV_TYPE_INT32,
+	TSV_TYPE_BYTE_ARRAY,
+	TSV_TYPE_QTIMER,
+};
+
+enum {
+	OUTPUT_DEBUGFS,
+};
+
+#define IPC_LOG_CONTEXT_MAGIC_NUM 0x25874452
+#define IPC_LOGGING_MAGIC_NUM 0x52784425
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#define IS_MSG_TYPE(x) (((x) > TSV_TYPE_MSG_START) && \
+			((x) < TSV_TYPE_MSG_END))
+#define MAX_MSG_DECODED_SIZE (MAX_MSG_SIZE*4)
+
+#if (defined(CONFIG_DEBUG_FS))
+void check_and_create_debugfs(void);
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt,
+			const char *mod_name);
+#else
+void check_and_create_debugfs(void)
+{
+}
+
+void create_ctx_debugfs(struct ipc_log_context *ctxt, const char *mod_name)
+{
+}
+#endif
+
+#endif
diff --git a/kernel/trace/msm_rtb.c b/kernel/trace/msm_rtb.c
new file mode 100644
index 0000000..34c48b1
--- /dev/null
+++ b/kernel/trace/msm_rtb.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/atomic.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/mod_devicetable.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/io.h>
+#include <asm-generic/sizes.h>
+#include <linux/msm_rtb.h>
+
+#define SENTINEL_BYTE_1 0xFF
+#define SENTINEL_BYTE_2 0xAA
+#define SENTINEL_BYTE_3 0xFF
+
+#define RTB_COMPAT_STR	"qcom,msm-rtb"
+
+/* Write
+ * 1) 3 bytes sentinel
+ * 2) 1 bytes of log type
+ * 3) 8 bytes of where the caller came from
+ * 4) 4 bytes index
+ * 4) 8 bytes extra data from the caller
+ * 5) 8 bytes of timestamp
+ *
+ * Total = 32 bytes.
+ */
+struct msm_rtb_layout {
+	unsigned char sentinel[3];
+	unsigned char log_type;
+	uint32_t idx;
+	uint64_t caller;
+	uint64_t data;
+	uint64_t timestamp;
+} __attribute__ ((__packed__));
+
+
+struct msm_rtb_state {
+	struct msm_rtb_layout *rtb;
+	phys_addr_t phys;
+	int nentries;
+	int size;
+	int enabled;
+	int initialized;
+	uint32_t filter;
+	int step_size;
+};
+
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+DEFINE_PER_CPU(atomic_t, msm_rtb_idx_cpu);
+#else
+static atomic_t msm_rtb_idx;
+#endif
+
+static struct msm_rtb_state msm_rtb = {
+	.filter = 1 << LOGK_LOGBUF,
+	.enabled = 1,
+};
+
+module_param_named(filter, msm_rtb.filter, uint, 0644);
+module_param_named(enable, msm_rtb.enabled, int, 0644);
+
+static int msm_rtb_panic_notifier(struct notifier_block *this,
+					unsigned long event, void *ptr)
+{
+	msm_rtb.enabled = 0;
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block msm_rtb_panic_blk = {
+	.notifier_call  = msm_rtb_panic_notifier,
+	.priority = INT_MAX,
+};
+
+int notrace msm_rtb_event_should_log(enum logk_event_type log_type)
+{
+	return msm_rtb.initialized && msm_rtb.enabled &&
+		((1 << (log_type & ~LOGTYPE_NOPC)) & msm_rtb.filter);
+}
+EXPORT_SYMBOL(msm_rtb_event_should_log);
+
+static void msm_rtb_emit_sentinel(struct msm_rtb_layout *start)
+{
+	start->sentinel[0] = SENTINEL_BYTE_1;
+	start->sentinel[1] = SENTINEL_BYTE_2;
+	start->sentinel[2] = SENTINEL_BYTE_3;
+}
+
+static void msm_rtb_write_type(enum logk_event_type log_type,
+			struct msm_rtb_layout *start)
+{
+	start->log_type = (char)log_type;
+}
+
+static void msm_rtb_write_caller(uint64_t caller, struct msm_rtb_layout *start)
+{
+	start->caller = caller;
+}
+
+static void msm_rtb_write_idx(uint32_t idx,
+				struct msm_rtb_layout *start)
+{
+	start->idx = idx;
+}
+
+static void msm_rtb_write_data(uint64_t data, struct msm_rtb_layout *start)
+{
+	start->data = data;
+}
+
+static void msm_rtb_write_timestamp(struct msm_rtb_layout *start)
+{
+	start->timestamp = sched_clock();
+}
+
+static void uncached_logk_pc_idx(enum logk_event_type log_type, uint64_t caller,
+				 uint64_t data, int idx)
+{
+	struct msm_rtb_layout *start;
+
+	start = &msm_rtb.rtb[idx & (msm_rtb.nentries - 1)];
+
+	msm_rtb_emit_sentinel(start);
+	msm_rtb_write_type(log_type, start);
+	msm_rtb_write_caller(caller, start);
+	msm_rtb_write_idx(idx, start);
+	msm_rtb_write_data(data, start);
+	msm_rtb_write_timestamp(start);
+	mb();
+
+}
+
+static void uncached_logk_timestamp(int idx)
+{
+	unsigned long long timestamp;
+
+	timestamp = sched_clock();
+	uncached_logk_pc_idx(LOGK_TIMESTAMP|LOGTYPE_NOPC,
+			(uint64_t)lower_32_bits(timestamp),
+			(uint64_t)upper_32_bits(timestamp), idx);
+}
+
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+static int msm_rtb_get_idx(void)
+{
+	int cpu, i, offset;
+	atomic_t *index;
+
+	/*
+	 * ideally we would use get_cpu but this is a close enough
+	 * approximation for our purposes.
+	 */
+	cpu = raw_smp_processor_id();
+
+	index = &per_cpu(msm_rtb_idx_cpu, cpu);
+
+	i = atomic_add_return(msm_rtb.step_size, index);
+	i -= msm_rtb.step_size;
+
+	/* Check if index has wrapped around */
+	offset = (i & (msm_rtb.nentries - 1)) -
+		 ((i - msm_rtb.step_size) & (msm_rtb.nentries - 1));
+	if (offset < 0) {
+		uncached_logk_timestamp(i);
+		i = atomic_add_return(msm_rtb.step_size, index);
+		i -= msm_rtb.step_size;
+	}
+
+	return i;
+}
+#else
+static int msm_rtb_get_idx(void)
+{
+	int i, offset;
+
+	i = atomic_inc_return(&msm_rtb_idx);
+	i--;
+
+	/* Check if index has wrapped around */
+	offset = (i & (msm_rtb.nentries - 1)) -
+		 ((i - 1) & (msm_rtb.nentries - 1));
+	if (offset < 0) {
+		uncached_logk_timestamp(i);
+		i = atomic_inc_return(&msm_rtb_idx);
+		i--;
+	}
+
+	return i;
+}
+#endif
+
+int notrace uncached_logk_pc(enum logk_event_type log_type, void *caller,
+				void *data)
+{
+	int i;
+
+	if (!msm_rtb_event_should_log(log_type))
+		return 0;
+
+	i = msm_rtb_get_idx();
+	uncached_logk_pc_idx(log_type, (uint64_t)((unsigned long) caller),
+				(uint64_t)((unsigned long) data), i);
+
+	return 1;
+}
+EXPORT_SYMBOL(uncached_logk_pc);
+
+noinline int notrace uncached_logk(enum logk_event_type log_type, void *data)
+{
+	return uncached_logk_pc(log_type, __builtin_return_address(0), data);
+}
+EXPORT_SYMBOL(uncached_logk);
+
+static int msm_rtb_probe(struct platform_device *pdev)
+{
+	struct msm_rtb_platform_data *d = pdev->dev.platform_data;
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+	unsigned int cpu;
+#endif
+	int ret;
+
+	if (!pdev->dev.of_node) {
+		msm_rtb.size = d->size;
+	} else {
+		u64 size;
+		struct device_node *pnode;
+
+		pnode = of_parse_phandle(pdev->dev.of_node,
+						"linux,contiguous-region", 0);
+		if (pnode != NULL) {
+			const u32 *addr;
+
+			addr = of_get_address(pnode, 0, &size, NULL);
+			if (!addr) {
+				of_node_put(pnode);
+				return -EINVAL;
+			}
+			of_node_put(pnode);
+		} else {
+			ret = of_property_read_u32(pdev->dev.of_node,
+					"qcom,rtb-size",
+					(u32 *)&size);
+			if (ret < 0)
+				return ret;
+
+		}
+
+		msm_rtb.size = size;
+	}
+
+	if (msm_rtb.size <= 0 || msm_rtb.size > SZ_1M)
+		return -EINVAL;
+
+	msm_rtb.rtb = dma_alloc_coherent(&pdev->dev, msm_rtb.size,
+						&msm_rtb.phys,
+						GFP_KERNEL);
+
+	if (!msm_rtb.rtb)
+		return -ENOMEM;
+
+	msm_rtb.nentries = msm_rtb.size / sizeof(struct msm_rtb_layout);
+
+	/* Round this down to a power of 2 */
+	msm_rtb.nentries = __rounddown_pow_of_two(msm_rtb.nentries);
+
+	memset(msm_rtb.rtb, 0, msm_rtb.size);
+
+
+#if defined(CONFIG_QCOM_RTB_SEPARATE_CPUS)
+	for_each_possible_cpu(cpu) {
+		atomic_t *a = &per_cpu(msm_rtb_idx_cpu, cpu);
+
+		atomic_set(a, cpu);
+	}
+	msm_rtb.step_size = num_possible_cpus();
+#else
+	atomic_set(&msm_rtb_idx, 0);
+	msm_rtb.step_size = 1;
+#endif
+
+	atomic_notifier_chain_register(&panic_notifier_list,
+						&msm_rtb_panic_blk);
+	msm_rtb.initialized = 1;
+	return 0;
+}
+
+static const struct of_device_id msm_match_table[] = {
+	{.compatible = RTB_COMPAT_STR},
+	{},
+};
+
+static struct platform_driver msm_rtb_driver = {
+	.driver         = {
+		.name = "msm_rtb",
+		.owner = THIS_MODULE,
+		.of_match_table = msm_match_table
+	},
+};
+
+static int __init msm_rtb_init(void)
+{
+	return platform_driver_probe(&msm_rtb_driver, msm_rtb_probe);
+}
+
+static void __exit msm_rtb_exit(void)
+{
+	platform_driver_unregister(&msm_rtb_driver);
+}
+module_init(msm_rtb_init)
+module_exit(msm_rtb_exit)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8696ce6..df9cde4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -41,6 +41,7 @@
 #include <linux/nmi.h>
 #include <linux/fs.h>
 #include <linux/sched/rt.h>
+#include <linux/coresight-stm.h>
 
 #include "trace.h"
 #include "trace_output.h"
@@ -1597,6 +1598,7 @@
 
 #define SAVED_CMDLINES_DEFAULT 128
 #define NO_CMDLINE_MAP UINT_MAX
+static unsigned saved_tgids[SAVED_CMDLINES_DEFAULT];
 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 struct saved_cmdlines_buffer {
 	unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
@@ -1835,7 +1837,7 @@
 	}
 
 	set_cmdline(idx, tsk->comm);
-
+	saved_tgids[idx] = tsk->tgid;
 	arch_spin_unlock(&trace_cmdline_lock);
 
 	return 1;
@@ -1878,6 +1880,25 @@
 	preempt_enable();
 }
 
+int trace_find_tgid(int pid)
+{
+	unsigned map;
+	int tgid;
+
+	preempt_disable();
+	arch_spin_lock(&trace_cmdline_lock);
+	map = savedcmd->map_pid_to_cmdline[pid];
+	if (map != NO_CMDLINE_MAP)
+		tgid = saved_tgids[map];
+	else
+		tgid = -1;
+
+	arch_spin_unlock(&trace_cmdline_lock);
+	preempt_enable();
+
+	return tgid;
+}
+
 void tracing_record_cmdline(struct task_struct *tsk)
 {
 	if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
@@ -2553,6 +2574,7 @@
 
 	memcpy(&entry->buf, tbuffer, len + 1);
 	if (!call_filter_check_discard(call, entry, buffer, event)) {
+		stm_log(OST_ENTITY_TRACE_PRINTK, entry->buf, len + 1);
 		__buffer_unlock_commit(buffer, event);
 		ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
 	}
@@ -2920,6 +2942,13 @@
 		    "#              | |       |          |         |\n");
 }
 
+static void print_func_help_header_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+	print_event_info(buf, m);
+	seq_puts(m, "#           TASK-PID    TGID   CPU#      TIMESTAMP  FUNCTION\n");
+	seq_puts(m, "#              | |        |      |          |         |\n");
+}
+
 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m)
 {
 	print_event_info(buf, m);
@@ -2932,6 +2961,18 @@
 		    "#              | |       |   ||||       |         |\n");
 }
 
+static void print_func_help_header_irq_tgid(struct trace_buffer *buf, struct seq_file *m)
+{
+	print_event_info(buf, m);
+	seq_puts(m, "#                                      _-----=> irqs-off\n");
+	seq_puts(m, "#                                     / _----=> need-resched\n");
+	seq_puts(m, "#                                    | / _---=> hardirq/softirq\n");
+	seq_puts(m, "#                                    || / _--=> preempt-depth\n");
+	seq_puts(m, "#                                    ||| /     delay\n");
+	seq_puts(m, "#           TASK-PID    TGID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
+	seq_puts(m, "#              | |        |      |   ||||       |         |\n");
+}
+
 void
 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
 {
@@ -3244,9 +3285,15 @@
 	} else {
 		if (!(trace_flags & TRACE_ITER_VERBOSE)) {
 			if (trace_flags & TRACE_ITER_IRQ_INFO)
-				print_func_help_header_irq(iter->trace_buffer, m);
+				if (trace_flags & TRACE_ITER_TGID)
+					print_func_help_header_irq_tgid(iter->trace_buffer, m);
+				else
+					print_func_help_header_irq(iter->trace_buffer, m);
 			else
-				print_func_help_header(iter->trace_buffer, m);
+				if (trace_flags & TRACE_ITER_TGID)
+					print_func_help_header_tgid(iter->trace_buffer, m);
+				else
+					print_func_help_header(iter->trace_buffer, m);
 		}
 	}
 }
@@ -4582,6 +4629,50 @@
 }
 
 static ssize_t
+tracing_saved_tgids_read(struct file *file, char __user *ubuf,
+				size_t cnt, loff_t *ppos)
+{
+	char *file_buf;
+	char *buf;
+	int len = 0;
+	int pid;
+	int i;
+
+	file_buf = kmalloc(SAVED_CMDLINES_DEFAULT*(16+1+16), GFP_KERNEL);
+	if (!file_buf)
+		return -ENOMEM;
+
+	buf = file_buf;
+
+	for (i = 0; i < SAVED_CMDLINES_DEFAULT; i++) {
+		int tgid;
+		int r;
+
+		pid = savedcmd->map_cmdline_to_pid[i];
+		if (pid == -1 || pid == NO_CMDLINE_MAP)
+			continue;
+
+		tgid = trace_find_tgid(pid);
+		r = sprintf(buf, "%d %d\n", pid, tgid);
+		buf += r;
+		len += r;
+	}
+
+	len = simple_read_from_buffer(ubuf, cnt, ppos,
+				      file_buf, len);
+
+	kfree(file_buf);
+
+	return len;
+}
+
+static const struct file_operations tracing_saved_tgids_fops = {
+	.open	= tracing_open_generic,
+	.read	= tracing_saved_tgids_read,
+	.llseek	= generic_file_llseek,
+};
+
+static ssize_t
 tracing_set_trace_read(struct file *filp, char __user *ubuf,
 		       size_t cnt, loff_t *ppos)
 {
@@ -5601,8 +5692,11 @@
 	if (entry->buf[cnt - 1] != '\n') {
 		entry->buf[cnt] = '\n';
 		entry->buf[cnt + 1] = '\0';
-	} else
+		stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 2);
+	} else {
 		entry->buf[cnt] = '\0';
+		stm_log(OST_ENTITY_TRACE_MARKER, entry->buf, cnt + 1);
+	}
 
 	__buffer_unlock_commit(buffer, event);
 
@@ -7214,6 +7308,9 @@
 	trace_create_file("trace_marker", 0220, d_tracer,
 			  tr, &tracing_mark_fops);
 
+	trace_create_file("saved_tgids", 0444, d_tracer,
+			  tr, &tracing_saved_tgids_fops);
+
 	trace_create_file("trace_clock", 0644, d_tracer, tr,
 			  &trace_clock_fops);
 
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fd24b1f..38dbb36 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -690,6 +690,7 @@
 
 extern void trace_find_cmdline(int pid, char comm[]);
 extern void trace_event_follow_fork(struct trace_array *tr, bool enable);
+extern int trace_find_tgid(int pid);
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 extern unsigned long ftrace_update_tot_cnt;
@@ -1007,7 +1008,8 @@
 		FUNCTION_FLAGS					\
 		FGRAPH_FLAGS					\
 		STACK_FLAGS					\
-		BRANCH_FLAGS
+		BRANCH_FLAGS					\
+		C(TGID,			"print-tgid"),
 
 /*
  * By defining C, we can make TRACE_FLAGS a list of bit names
@@ -1179,6 +1181,7 @@
  * @entry: The event itself
  * @irq_flags: The state of the interrupts at the start of the event
  * @pc: The state of the preempt count at the start of the event.
+ * @len: The length of the payload data required for stm logging.
  *
  * This is a helper function to handle triggers that require data
  * from the event itself. It also tests the event against filters and
@@ -1188,12 +1191,17 @@
 event_trigger_unlock_commit(struct trace_event_file *file,
 			    struct ring_buffer *buffer,
 			    struct ring_buffer_event *event,
-			    void *entry, unsigned long irq_flags, int pc)
+			    void *entry, unsigned long irq_flags, int pc,
+			    unsigned long len)
 {
 	enum event_trigger_type tt = ETT_NONE;
 
-	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
+	if (!__event_trigger_test_discard(file, buffer, event, entry, &tt)) {
+		if (len)
+			stm_log(OST_ENTITY_FTRACE_EVENTS, entry, len);
+
 		trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
+	}
 
 	if (tt)
 		event_triggers_post_call(file, tt, entry);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 03c0a48..72687f4 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -312,14 +312,15 @@
 	spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
 }
 
-void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
+void trace_event_buffer_commit(struct trace_event_buffer *fbuffer,
+			       unsigned long len)
 {
 	if (tracepoint_printk)
 		output_printk(fbuffer);
 
 	event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
 				    fbuffer->event, fbuffer->entry,
-				    fbuffer->flags, fbuffer->pc);
+				    fbuffer->flags, fbuffer->pc, len);
 }
 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
 
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 4e480e8..305f535 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -65,6 +65,9 @@
 
 #define TRACE_GRAPH_INDENT	2
 
+/* Flag options */
+#define TRACE_GRAPH_PRINT_FLAT		0x80
+
 static unsigned int max_depth;
 
 static struct tracer_opt trace_opts[] = {
@@ -88,6 +91,8 @@
 	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
 	/* Include time within nested functions */
 	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
+	/* Use standard trace formatting rather than hierarchical */
+	{ TRACER_OPT(funcgraph-flat, TRACE_GRAPH_PRINT_FLAT) },
 	{ } /* Empty entry */
 };
 
@@ -1232,6 +1237,9 @@
 	int cpu = iter->cpu;
 	int ret;
 
+	if (flags & TRACE_GRAPH_PRINT_FLAT)
+		return TRACE_TYPE_UNHANDLED;
+
 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
 		return TRACE_TYPE_HANDLED;
@@ -1289,13 +1297,6 @@
 	return print_graph_function_flags(iter, tracer_flags.val);
 }
 
-static enum print_line_t
-print_graph_function_event(struct trace_iterator *iter, int flags,
-			   struct trace_event *event)
-{
-	return print_graph_function(iter);
-}
-
 static void print_lat_header(struct seq_file *s, u32 flags)
 {
 	static const char spaces[] = "                "	/* 16 spaces */
@@ -1364,6 +1365,11 @@
 	struct trace_iterator *iter = s->private;
 	struct trace_array *tr = iter->tr;
 
+	if (flags & TRACE_GRAPH_PRINT_FLAT) {
+		trace_default_header(s);
+		return;
+	}
+
 	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 		return;
 
@@ -1445,19 +1451,6 @@
 	return 0;
 }
 
-static struct trace_event_functions graph_functions = {
-	.trace		= print_graph_function_event,
-};
-
-static struct trace_event graph_trace_entry_event = {
-	.type		= TRACE_GRAPH_ENT,
-	.funcs		= &graph_functions,
-};
-
-static struct trace_event graph_trace_ret_event = {
-	.type		= TRACE_GRAPH_RET,
-	.funcs		= &graph_functions
-};
 
 static struct tracer graph_trace __tracer_data = {
 	.name		= "function_graph",
@@ -1534,16 +1527,6 @@
 {
 	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
 
-	if (!register_trace_event(&graph_trace_entry_event)) {
-		pr_warn("Warning: could not register graph trace events\n");
-		return 1;
-	}
-
-	if (!register_trace_event(&graph_trace_ret_event)) {
-		pr_warn("Warning: could not register graph trace events\n");
-		return 1;
-	}
-
 	return register_tracer(&graph_trace);
 }
 
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 3fc2042..0346759 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -530,11 +530,21 @@
 	unsigned long long t;
 	unsigned long secs, usec_rem;
 	char comm[TASK_COMM_LEN];
+	int tgid;
 
 	trace_find_cmdline(entry->pid, comm);
 
-	trace_seq_printf(s, "%16s-%-5d [%03d] ",
-			       comm, entry->pid, iter->cpu);
+	trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+
+	if (tr->trace_flags & TRACE_ITER_TGID) {
+		tgid = trace_find_tgid(entry->pid);
+		if (tgid < 0)
+			trace_seq_puts(s, "(-----) ");
+		else
+			trace_seq_printf(s, "(%5d) ", tgid);
+	}
+
+	trace_seq_printf(s, "[%03d] ", iter->cpu);
 
 	if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
 		trace_print_lat_fmt(s, entry);
@@ -849,6 +859,174 @@
 	.funcs		= &trace_fn_funcs,
 };
 
+/* TRACE_GRAPH_ENT */
+static enum print_line_t trace_graph_ent_trace(struct trace_iterator *iter, int flags,
+					struct trace_event *event)
+{
+	struct trace_seq *s = &iter->seq;
+	struct ftrace_graph_ent_entry *field;
+
+	trace_assign_type(field, iter->ent);
+
+	trace_seq_puts(s, "graph_ent: func=");
+	if (trace_seq_has_overflowed(s))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	if (!seq_print_ip_sym(s, field->graph_ent.func, flags))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	trace_seq_puts(s, "\n");
+	if (trace_seq_has_overflowed(s))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_raw(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ent_entry *field;
+
+	trace_assign_type(field, iter->ent);
+
+	trace_seq_printf(&iter->seq, "%lx %d\n",
+			      field->graph_ent.func,
+			      field->graph_ent.depth);
+	if (trace_seq_has_overflowed(&iter->seq))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_hex(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ent_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_HEX_FIELD(s, field->graph_ent.func);
+	SEQ_PUT_HEX_FIELD(s, field->graph_ent.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ent_bin(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ent_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_FIELD(s, field->graph_ent.func);
+	SEQ_PUT_FIELD(s, field->graph_ent.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ent_funcs = {
+	.trace		= trace_graph_ent_trace,
+	.raw		= trace_graph_ent_raw,
+	.hex		= trace_graph_ent_hex,
+	.binary		= trace_graph_ent_bin,
+};
+
+static struct trace_event trace_graph_ent_event = {
+	.type		= TRACE_GRAPH_ENT,
+	.funcs		= &trace_graph_ent_funcs,
+};
+
+/* TRACE_GRAPH_RET */
+static enum print_line_t trace_graph_ret_trace(struct trace_iterator *iter, int flags,
+					struct trace_event *event)
+{
+	struct trace_seq *s = &iter->seq;
+	struct trace_entry *entry = iter->ent;
+	struct ftrace_graph_ret_entry *field;
+
+	trace_assign_type(field, entry);
+
+	trace_seq_puts(s, "graph_ret: func=");
+	if (trace_seq_has_overflowed(s))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	if (!seq_print_ip_sym(s, field->ret.func, flags))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	trace_seq_puts(s, "\n");
+	if (trace_seq_has_overflowed(s))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_raw(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ret_entry *field;
+
+	trace_assign_type(field, iter->ent);
+
+	trace_seq_printf(&iter->seq, "%lx %lld %lld %ld %d\n",
+			      field->ret.func,
+			      field->ret.calltime,
+			      field->ret.rettime,
+			      field->ret.overrun,
+			      field->ret.depth);
+	if (trace_seq_has_overflowed(&iter->seq))
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_hex(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ret_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_HEX_FIELD(s, field->ret.func);
+	SEQ_PUT_HEX_FIELD(s, field->ret.calltime);
+	SEQ_PUT_HEX_FIELD(s, field->ret.rettime);
+	SEQ_PUT_HEX_FIELD(s, field->ret.overrun);
+	SEQ_PUT_HEX_FIELD(s, field->ret.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static enum print_line_t trace_graph_ret_bin(struct trace_iterator *iter, int flags,
+				      struct trace_event *event)
+{
+	struct ftrace_graph_ret_entry *field;
+	struct trace_seq *s = &iter->seq;
+
+	trace_assign_type(field, iter->ent);
+
+	SEQ_PUT_FIELD(s, field->ret.func);
+	SEQ_PUT_FIELD(s, field->ret.calltime);
+	SEQ_PUT_FIELD(s, field->ret.rettime);
+	SEQ_PUT_FIELD(s, field->ret.overrun);
+	SEQ_PUT_FIELD(s, field->ret.depth);
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static struct trace_event_functions trace_graph_ret_funcs = {
+	.trace		= trace_graph_ret_trace,
+	.raw		= trace_graph_ret_raw,
+	.hex		= trace_graph_ret_hex,
+	.binary		= trace_graph_ret_bin,
+};
+
+static struct trace_event trace_graph_ret_event = {
+	.type		= TRACE_GRAPH_RET,
+	.funcs		= &trace_graph_ret_funcs,
+};
+
 /* TRACE_CTX an TRACE_WAKE */
 static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter,
 					     char *delim)
@@ -1291,6 +1469,8 @@
 
 static struct trace_event *events[] __initdata = {
 	&trace_fn_event,
+	&trace_graph_ent_event,
+	&trace_graph_ret_event,
 	&trace_ctx_event,
 	&trace_wake_event,
 	&trace_stack_event,
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 5e10395..fa5fe21 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -349,7 +349,7 @@
 	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
 
 	event_trigger_unlock_commit(trace_file, buffer, event, entry,
-				    irq_flags, pc);
+				    irq_flags, pc, 0);
 }
 
 static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
@@ -395,7 +395,7 @@
 	entry->ret = syscall_get_return_value(current, regs);
 
 	event_trigger_unlock_commit(trace_file, buffer, event, entry,
-				    irq_flags, pc);
+				    irq_flags, pc, 0);
 }
 
 static int reg_event_syscall_enter(struct trace_event_file *file,
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 0913693..bc6c6ec 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -821,7 +821,7 @@
 
 	memcpy(data, ucb->buf, tu->tp.size + dsize);
 
-	event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0);
+	event_trigger_unlock_commit(trace_file, buffer, event, entry, 0, 0, 0);
 }
 
 /* uprobe handler */
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 9acb29f..a1f78d4 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -104,6 +104,11 @@
 static DEFINE_PER_CPU(bool, hard_watchdog_warn);
 static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static cpumask_t __read_mostly watchdog_cpus;
+#endif
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 static unsigned long soft_lockup_nmi_warn;
@@ -115,7 +120,7 @@
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 unsigned int __read_mostly hardlockup_panic =
 			CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE;
-static unsigned long hardlockup_allcpu_dumped;
+static unsigned long __maybe_unused hardlockup_allcpu_dumped;
 /*
  * We may not want to enable hard lockup detection by default in all cases,
  * for example when running the kernel as a guest on a hypervisor. In these
@@ -287,7 +292,7 @@
 	__this_cpu_write(watchdog_touch_ts, 0);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /* watchdog detector functions */
 static bool is_hardlockup(void)
 {
@@ -301,6 +306,76 @@
 }
 #endif
 
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static unsigned int watchdog_next_cpu(unsigned int cpu)
+{
+	cpumask_t cpus = watchdog_cpus;
+	unsigned int next_cpu;
+
+	next_cpu = cpumask_next(cpu, &cpus);
+	if (next_cpu >= nr_cpu_ids)
+		next_cpu = cpumask_first(&cpus);
+
+	if (next_cpu == cpu)
+		return nr_cpu_ids;
+
+	return next_cpu;
+}
+
+static int is_hardlockup_other_cpu(unsigned int cpu)
+{
+	unsigned long hrint = per_cpu(hrtimer_interrupts, cpu);
+
+	if (per_cpu(hrtimer_interrupts_saved, cpu) == hrint)
+		return 1;
+
+	per_cpu(hrtimer_interrupts_saved, cpu) = hrint;
+	return 0;
+}
+
+static void watchdog_check_hardlockup_other_cpu(void)
+{
+	unsigned int next_cpu;
+
+	/*
+	 * Test for hardlockups every 3 samples.  The sample period is
+	 *  watchdog_thresh * 2 / 5, so 3 samples gets us back to slightly over
+	 *  watchdog_thresh (over by 20%).
+	 */
+	if (__this_cpu_read(hrtimer_interrupts) % 3 != 0)
+		return;
+
+	/* check for a hardlockup on the next cpu */
+	next_cpu = watchdog_next_cpu(smp_processor_id());
+	if (next_cpu >= nr_cpu_ids)
+		return;
+
+	smp_rmb();
+
+	if (per_cpu(watchdog_nmi_touch, next_cpu) == true) {
+		per_cpu(watchdog_nmi_touch, next_cpu) = false;
+		return;
+	}
+
+	if (is_hardlockup_other_cpu(next_cpu)) {
+		/* only warn once */
+		if (per_cpu(hard_watchdog_warn, next_cpu) == true)
+			return;
+
+		if (hardlockup_panic)
+			panic("Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+		else
+			WARN(1, "Watchdog detected hard LOCKUP on cpu %u", next_cpu);
+
+		per_cpu(hard_watchdog_warn, next_cpu) = true;
+	} else {
+		per_cpu(hard_watchdog_warn, next_cpu) = false;
+	}
+}
+#else
+static inline void watchdog_check_hardlockup_other_cpu(void) { return; }
+#endif
+
 static int is_softlockup(unsigned long touch_ts)
 {
 	unsigned long now = get_timestamp();
@@ -313,7 +388,7 @@
 	return 0;
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 
 static struct perf_event_attr wd_hw_attr = {
 	.type		= PERF_TYPE_HARDWARE,
@@ -376,7 +451,7 @@
 	__this_cpu_write(hard_watchdog_warn, false);
 	return;
 }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static void watchdog_interrupt_count(void)
 {
@@ -400,6 +475,9 @@
 	/* kick the hardlockup detector */
 	watchdog_interrupt_count();
 
+	/* test for hardlockups on the next cpu */
+	watchdog_check_hardlockup_other_cpu();
+
 	/* kick the softlockup detector */
 	wake_up_process(__this_cpu_read(softlockup_watchdog));
 
@@ -577,7 +655,7 @@
 		watchdog_nmi_disable(cpu);
 }
 
-#ifdef CONFIG_HARDLOCKUP_DETECTOR
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_NMI
 /*
  * People like the simple clean cpu node info on boot.
  * Reduce the watchdog noise by only printing messages
@@ -676,9 +754,44 @@
 }
 
 #else
+#ifdef CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU
+static int watchdog_nmi_enable(unsigned int cpu)
+{
+	/*
+	 * The new cpu will be marked online before the first hrtimer interrupt
+	 * runs on it.  If another cpu tests for a hardlockup on the new cpu
+	 * before it has run its first hrtimer, it will get a false positive.
+	 * Touch the watchdog on the new cpu to delay the first check for at
+	 * least 3 sampling periods to guarantee one hrtimer has run on the new
+	 * cpu.
+	 */
+	per_cpu(watchdog_nmi_touch, cpu) = true;
+	smp_wmb();
+	cpumask_set_cpu(cpu, &watchdog_cpus);
+	return 0;
+}
+
+static void watchdog_nmi_disable(unsigned int cpu)
+{
+	unsigned int next_cpu = watchdog_next_cpu(cpu);
+
+	/*
+	 * Offlining this cpu will cause the cpu before this one to start
+	 * checking the one after this one.  If this cpu just finished checking
+	 * the next cpu and updating hrtimer_interrupts_saved, and then the
+	 * previous cpu checks it within one sample period, it will trigger a
+	 * false positive.  Touch the watchdog on the next cpu to prevent it.
+	 */
+	if (next_cpu < nr_cpu_ids)
+		per_cpu(watchdog_nmi_touch, next_cpu) = true;
+	smp_wmb();
+	cpumask_clear_cpu(cpu, &watchdog_cpus);
+}
+#else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
 static void watchdog_nmi_disable(unsigned int cpu) { return; }
-#endif /* CONFIG_HARDLOCKUP_DETECTOR */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_OTHER_CPU */
+#endif /* CONFIG_HARDLOCKUP_DETECTOR_NMI */
 
 static struct smp_hotplug_thread watchdog_threads = {
 	.store			= &softlockup_watchdog,
diff --git a/lib/Kconfig b/lib/Kconfig
index 260a80e..8b6c41e 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -550,4 +550,20 @@
 config SBITMAP
 	bool
 
+config QMI_ENCDEC
+	bool "QMI Encode/Decode Library"
+	help
+	  Library to encode & decode QMI messages from within
+	  the kernel. The kernel drivers encode the C structure into
+	  QMI message wire format and then send it over a transport.
+	  The kernel drivers receive the QMI message over a transport
+	  and then decode it into a C structure.
+
+config QMI_ENCDEC_DEBUG
+	bool "QMI Encode/Decode Library Debug"
+	help
+	  Kernel config option to enable debugging QMI Encode/Decode
+	  library. This will log the information regarding the element
+	  and message being encoded & decoded.
+
 endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a6c8db1..87a3de7 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -764,15 +764,27 @@
 	  The overhead should be minimal.  A periodic hrtimer runs to
 	  generate interrupts and kick the watchdog task every 4 seconds.
 	  An NMI is generated every 10 seconds or so to check for hardlockups.
+	  If NMIs are not available on the platform, every 12 seconds the
+	  hrtimer interrupt on one cpu will be used to check for hardlockups
+	  on the next cpu.
 
 	  The frequency of hrtimer and NMI events and the soft and hard lockup
 	  thresholds can be controlled through the sysctl watchdog_thresh.
 
-config HARDLOCKUP_DETECTOR
+config HARDLOCKUP_DETECTOR_NMI
 	def_bool y
 	depends on LOCKUP_DETECTOR && !HAVE_NMI_WATCHDOG
 	depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI
 
+config HARDLOCKUP_DETECTOR_OTHER_CPU
+	def_bool y
+	depends on LOCKUP_DETECTOR && SMP
+	depends on !HARDLOCKUP_DETECTOR_NMI && !HAVE_NMI_WATCHDOG
+
+config HARDLOCKUP_DETECTOR
+	def_bool y
+	depends on HARDLOCKUP_DETECTOR_NMI || HARDLOCKUP_DETECTOR_OTHER_CPU
+
 config BOOTPARAM_HARDLOCKUP_PANIC
 	bool "Panic (Reboot) On Hard Lockups"
 	depends on HARDLOCKUP_DETECTOR
@@ -923,6 +935,25 @@
 	bool
 	default n
 
+config PANIC_ON_SCHED_BUG
+	bool "Panic on all bugs encountered by the scheduler"
+	help
+	  Say Y here to panic on all 'BUG:' conditions encountered by the
+	  scheduler, even potentially-recoverable ones such as scheduling
+	  while atomic, sleeping from invalid context, and detection of
+	  broken arch topologies.
+
+	  Say N if unsure.
+
+config PANIC_ON_RT_THROTTLING
+	bool "Panic on RT throttling"
+	help
+	  Say Y here to enable the kernel to panic when a realtime
+	  runqueue is throttled. This may be useful for detecting
+	  and debugging RT throttling issues.
+
+	  Say N if unsure.
+
 config SCHEDSTATS
 	bool "Collect scheduler statistics"
 	depends on DEBUG_KERNEL && PROC_FS
@@ -1003,6 +1034,28 @@
 	  and certain other kinds of spinlock errors commonly made.  This is
 	  best used in conjunction with the NMI watchdog so that spinlock
 	  deadlocks are also debuggable.
+choice
+        prompt "Perform Action on spinlock bug"
+        depends on DEBUG_SPINLOCK
+
+        default DEBUG_SPINLOCK_BITE_ON_BUG
+
+        config DEBUG_SPINLOCK_BITE_ON_BUG
+                bool "Cause a Watchdog Bite on Spinlock bug"
+                depends on QCOM_WATCHDOG_V2
+                help
+                  On a spinlock bug, cause a watchdog bite so that we can get
+                  the precise state of the system captured at the time of spin
+                  dump. This is mutually exclusive with the below
+                  DEBUG_SPINLOCK_PANIC_ON_BUG config.
+
+        config DEBUG_SPINLOCK_PANIC_ON_BUG
+                bool "Cause a Kernel Panic on Spinlock bug"
+                help
+                  On a spinlock bug, cause a kernel panic so that we can get the complete
+                  information about the system at the time of spin dump in the dmesg.
+                  This is mutually exclusive with the above DEBUG_SPINLOCK_BITE_ON_BUG.
+endchoice
 
 config DEBUG_MUTEXES
 	bool "Mutex debugging: basic checks"
@@ -1183,7 +1236,7 @@
 	depends on DEBUG_KERNEL
 	help
 	  If you say Y here, some extra kobject debugging messages will be sent
-	  to the syslog. 
+	  to the syslog.
 
 config DEBUG_KOBJECT_RELEASE
 	bool "kobject release debugging"
diff --git a/lib/Makefile b/lib/Makefile
index 50144a3..e0eb131 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -230,3 +230,4 @@
 UBSAN_SANITIZE_ubsan.o := n
 
 obj-$(CONFIG_SBITMAP) += sbitmap.o
+obj-$(CONFIG_QMI_ENCDEC) += qmi_encdec.o
diff --git a/lib/iomap.c b/lib/iomap.c
index fc3dcb4..b29a91e 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -5,6 +5,7 @@
  */
 #include <linux/pci.h>
 #include <linux/io.h>
+#include <linux/msm_rtb.h>
 
 #include <linux/export.h>
 
@@ -70,26 +71,31 @@
 
 unsigned int ioread8(void __iomem *addr)
 {
-	IO_COND(addr, return inb(port), return readb(addr));
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr);
+	IO_COND(addr, return inb(port), return readb_no_log(addr));
 	return 0xff;
 }
 unsigned int ioread16(void __iomem *addr)
 {
-	IO_COND(addr, return inw(port), return readw(addr));
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr);
+	IO_COND(addr, return inw(port), return readw_no_log(addr));
 	return 0xffff;
 }
 unsigned int ioread16be(void __iomem *addr)
 {
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr);
 	IO_COND(addr, return pio_read16be(port), return mmio_read16be(addr));
 	return 0xffff;
 }
 unsigned int ioread32(void __iomem *addr)
 {
-	IO_COND(addr, return inl(port), return readl(addr));
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr);
+	IO_COND(addr, return inl(port), return readl_no_log(addr));
 	return 0xffffffff;
 }
 unsigned int ioread32be(void __iomem *addr)
 {
+	uncached_logk_pc(LOGK_READL, __builtin_return_address(0), addr);
 	IO_COND(addr, return pio_read32be(port), return mmio_read32be(addr));
 	return 0xffffffff;
 }
@@ -111,22 +117,27 @@
 
 void iowrite8(u8 val, void __iomem *addr)
 {
-	IO_COND(addr, outb(val,port), writeb(val, addr));
+	uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr);
+	IO_COND(addr, outb(val, port), writeb_no_log(val, addr));
 }
 void iowrite16(u16 val, void __iomem *addr)
 {
-	IO_COND(addr, outw(val,port), writew(val, addr));
+	uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr);
+	IO_COND(addr, outw(val, port), writew_no_log(val, addr));
 }
 void iowrite16be(u16 val, void __iomem *addr)
 {
+	uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr);
 	IO_COND(addr, pio_write16be(val,port), mmio_write16be(val, addr));
 }
 void iowrite32(u32 val, void __iomem *addr)
 {
-	IO_COND(addr, outl(val,port), writel(val, addr));
+	uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr);
+	IO_COND(addr, outl(val, port), writel_no_log(val, addr));
 }
 void iowrite32be(u32 val, void __iomem *addr)
 {
+	uncached_logk_pc(LOGK_WRITEL, __builtin_return_address(0), addr);
 	IO_COND(addr, pio_write32be(val,port), mmio_write32be(val, addr));
 }
 EXPORT_SYMBOL(iowrite8);
diff --git a/lib/qmi_encdec.c b/lib/qmi_encdec.c
new file mode 100644
index 0000000..d7221d8
--- /dev/null
+++ b/lib/qmi_encdec.c
@@ -0,0 +1,877 @@
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/qmi_encdec.h>
+
+#include "qmi_encdec_priv.h"
+
+#define TLV_LEN_SIZE sizeof(uint16_t)
+#define TLV_TYPE_SIZE sizeof(uint8_t)
+#define OPTIONAL_TLV_TYPE_START 0x10
+
+#ifdef CONFIG_QMI_ENCDEC_DEBUG
+
+#define qmi_encdec_dump(prefix_str, buf, buf_len) do { \
+	const u8 *ptr = buf; \
+	int i, linelen, remaining = buf_len; \
+	int rowsize = 16, groupsize = 1; \
+	unsigned char linebuf[256]; \
+	for (i = 0; i < buf_len; i += rowsize) { \
+		linelen = min(remaining, rowsize); \
+		remaining -= linelen; \
+		hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, \
+				   linebuf, sizeof(linebuf), false); \
+		pr_debug("%s: %s\n", prefix_str, linebuf); \
+	} \
+} while (0)
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) \
+	qmi_encdec_dump("QMI_ENCODE_MSG", buf, buf_len)
+
+#define QMI_DECODE_LOG_MSG(buf, buf_len) \
+	qmi_encdec_dump("QMI_DECODE_MSG", buf, buf_len)
+
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+	pr_debug("QMI_ENCODE_ELEM lvl: %d, len: %d, size: %d\n", \
+		 level, elem_len, elem_size); \
+	qmi_encdec_dump("QMI_ENCODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) do { \
+	pr_debug("QMI_DECODE_ELEM lvl: %d, len: %d, size: %d\n", \
+		 level, elem_len, elem_size); \
+	qmi_encdec_dump("QMI_DECODE_ELEM", buf, (elem_len * elem_size)); \
+} while (0)
+
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) \
+	pr_debug("QMI_ENCODE_TLV type: %d, len: %d\n", tlv_type, tlv_len)
+
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) \
+	pr_debug("QMI_DECODE_TLV type: %d, len: %d\n", tlv_type, tlv_len)
+
+#else
+
+#define QMI_ENCODE_LOG_MSG(buf, buf_len) { }
+#define QMI_DECODE_LOG_MSG(buf, buf_len) { }
+#define QMI_ENCODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_DECODE_LOG_ELEM(level, elem_len, elem_size, buf) { }
+#define QMI_ENCODE_LOG_TLV(tlv_type, tlv_len) { }
+#define QMI_DECODE_LOG_TLV(tlv_type, tlv_len) { }
+
+#endif
+
+static int _qmi_kernel_encode(struct elem_info *ei_array,
+			      void *out_buf, void *in_c_struct,
+			      uint32_t out_buf_len, int enc_level);
+
+static int _qmi_kernel_decode(struct elem_info *ei_array,
+			      void *out_c_struct,
+			      void *in_buf, uint32_t in_buf_len,
+			      int dec_level);
+static struct elem_info *skip_to_next_elem(struct elem_info *ei_array,
+					   int level);
+
+/**
+ * qmi_calc_max_msg_len() - Calculate the maximum length of a QMI message
+ * @ei_array: Struct info array describing the structure.
+ * @level: Level to identify the depth of the nested structures.
+ *
+ * @return: expected maximum length of the QMI message or 0 on failure.
+ */
+static int qmi_calc_max_msg_len(struct elem_info *ei_array,
+				int level)
+{
+	int max_msg_len = 0;
+	struct elem_info *temp_ei;
+
+	if (!ei_array)
+		return max_msg_len;
+
+	for (temp_ei = ei_array; temp_ei->data_type != QMI_EOTI; temp_ei++) {
+		/* Flag to identify the optional element is not encoded */
+		if (temp_ei->data_type == QMI_OPT_FLAG)
+			continue;
+
+		if (temp_ei->data_type == QMI_DATA_LEN) {
+			max_msg_len += (temp_ei->elem_size == sizeof(uint8_t) ?
+					sizeof(uint8_t) : sizeof(uint16_t));
+			continue;
+		} else if (temp_ei->data_type == QMI_STRUCT) {
+			max_msg_len += (temp_ei->elem_len *
+					qmi_calc_max_msg_len(temp_ei->ei_array,
+							    (level + 1)));
+		} else if (temp_ei->data_type == QMI_STRING) {
+			if (level > 1)
+				max_msg_len += temp_ei->elem_len <= U8_MAX ?
+					sizeof(uint8_t) : sizeof(uint16_t);
+			max_msg_len += temp_ei->elem_len * temp_ei->elem_size;
+		} else {
+			max_msg_len += (temp_ei->elem_len * temp_ei->elem_size);
+		}
+
+		/*
+		 * Type & Length info. not prepended for elements in the
+		 * nested structure.
+		 */
+		if (level == 1)
+			max_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+	}
+	return max_msg_len;
+}
+
+/**
+ * qmi_calc_min_msg_len() - Calculate the minimum length of a QMI message
+ * @ei_array: Struct info array describing the structure.
+ * @level: Level to identify the depth of the nested structures.
+ *
+ * @return: expected minimum length of the QMI message or 0 on failure.
+ */
+static int qmi_calc_min_msg_len(struct elem_info *ei_array,
+				int level)
+{
+	int min_msg_len = 0;
+	struct elem_info *temp_ei = ei_array;
+
+	if (!ei_array)
+		return min_msg_len;
+
+	while (temp_ei->data_type != QMI_EOTI) {
+		/* Optional elements do not count in minimum length */
+		if (temp_ei->data_type == QMI_OPT_FLAG) {
+			temp_ei = skip_to_next_elem(temp_ei, level);
+			continue;
+		}
+
+		if (temp_ei->data_type == QMI_DATA_LEN) {
+			min_msg_len += (temp_ei->elem_size == sizeof(uint8_t) ?
+					sizeof(uint8_t) : sizeof(uint16_t));
+			temp_ei++;
+			continue;
+		} else if (temp_ei->data_type == QMI_STRUCT) {
+			min_msg_len += qmi_calc_min_msg_len(temp_ei->ei_array,
+							    (level + 1));
+			temp_ei++;
+		} else if (temp_ei->data_type == QMI_STRING) {
+			if (level > 1)
+				min_msg_len += temp_ei->elem_len <= U8_MAX ?
+					sizeof(uint8_t) : sizeof(uint16_t);
+			min_msg_len += temp_ei->elem_len * temp_ei->elem_size;
+			temp_ei++;
+		} else {
+			min_msg_len += (temp_ei->elem_len * temp_ei->elem_size);
+			temp_ei++;
+		}
+
+		/*
+		 * Type & Length info. not prepended for elements in the
+		 * nested structure.
+		 */
+		if (level == 1)
+			min_msg_len += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+	}
+	return min_msg_len;
+}
+
+/**
+ * qmi_verify_max_msg_len() - Verify the maximum length of a QMI message
+ * @desc: Pointer to structure descriptor.
+ *
+ * @return: true if the maximum message length embedded in structure
+ *          descriptor matches the calculated value, else false.
+ */
+bool qmi_verify_max_msg_len(struct msg_desc *desc)
+{
+	int calc_max_msg_len;
+
+	if (!desc)
+		return false;
+
+	calc_max_msg_len = qmi_calc_max_msg_len(desc->ei_array, 1);
+	if (calc_max_msg_len != desc->max_msg_len) {
+		pr_err("%s: Calc. len %d != Passed len %d\n",
+			__func__, calc_max_msg_len, desc->max_msg_len);
+		return false;
+	}
+	return true;
+}
+
+/**
+ * qmi_kernel_encode() - Encode to QMI message wire format
+ * @desc: Pointer to structure descriptor.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @out_buf_len: Length of the out buffer.
+ * @in_c_struct: C Structure to be encoded.
+ *
+ * @return: size of encoded message on success, < 0 for error.
+ */
+int qmi_kernel_encode(struct msg_desc *desc,
+		      void *out_buf, uint32_t out_buf_len,
+		      void *in_c_struct)
+{
+	int enc_level = 1;
+	int ret, calc_max_msg_len, calc_min_msg_len;
+
+	if (!desc)
+		return -EINVAL;
+
+	/* Check the possibility of a zero length QMI message */
+	if (!in_c_struct) {
+		calc_min_msg_len = qmi_calc_min_msg_len(desc->ei_array, 1);
+		if (calc_min_msg_len) {
+			pr_err("%s: Calc. len %d != 0, but NULL in_c_struct\n",
+				__func__, calc_min_msg_len);
+			return -EINVAL;
+		} else {
+			return 0;
+		}
+	}
+
+	/*
+	 * Not a zero-length message. Ensure the output buffer and
+	 * element information array are not NULL.
+	 */
+	if (!out_buf || !desc->ei_array)
+		return -EINVAL;
+
+	if (desc->max_msg_len < out_buf_len)
+		return -ETOOSMALL;
+
+	ret = _qmi_kernel_encode(desc->ei_array, out_buf,
+				 in_c_struct, out_buf_len, enc_level);
+	if (ret == -ETOOSMALL) {
+		calc_max_msg_len = qmi_calc_max_msg_len(desc->ei_array, 1);
+		pr_err("%s: Calc. len %d != Out buf len %d\n",
+			__func__, calc_max_msg_len, out_buf_len);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qmi_kernel_encode);
+
+/**
+ * qmi_encode_basic_elem() - Encodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @elem_size: Size of a single instance of the element to be encoded.
+ *
+ * @return: number of bytes of encoded information.
+ *
+ * This function encodes the "elem_len" number of data elements, each of
+ * size "elem_size" bytes from the source buffer "buf_src" and stores the
+ * encoded information in the destination buffer "buf_dst". The elements are
+ * of primary data type which include uint8_t - uint64_t or similar. This
+ * function returns the number of bytes of encoded information.
+ */
+static int qmi_encode_basic_elem(void *buf_dst, void *buf_src,
+				 uint32_t elem_len, uint32_t elem_size)
+{
+	uint32_t i, rc = 0;
+
+	for (i = 0; i < elem_len; i++) {
+		QMI_ENCDEC_ENCODE_N_BYTES(buf_dst, buf_src, elem_size);
+		rc += elem_size;
+	}
+
+	return rc;
+}
+
+/**
+ * qmi_encode_struct_elem() - Encodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @elem_len: Number of elements, in the buf_src, to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the nested structure from the main structure.
+ *
+ * @return: Number of bytes of encoded information, on success.
+ *          < 0 on error.
+ *
+ * This function encodes the "elem_len" number of struct elements, each of
+ * size "ei_array->elem_size" bytes from the source buffer "buf_src" and
+ * stores the encoded information in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of encoded information.
+ */
+static int qmi_encode_struct_elem(struct elem_info *ei_array,
+				  void *buf_dst, void *buf_src,
+				  uint32_t elem_len, uint32_t out_buf_len,
+				  int enc_level)
+{
+	int i, rc, encoded_bytes = 0;
+	struct elem_info *temp_ei = ei_array;
+
+	for (i = 0; i < elem_len; i++) {
+		rc = _qmi_kernel_encode(temp_ei->ei_array, buf_dst, buf_src,
+					(out_buf_len - encoded_bytes),
+					enc_level);
+		if (rc < 0) {
+			pr_err("%s: STRUCT Encode failure\n", __func__);
+			return rc;
+		}
+		buf_dst = buf_dst + rc;
+		buf_src = buf_src + temp_ei->elem_size;
+		encoded_bytes += rc;
+	}
+
+	return encoded_bytes;
+}
+
+/**
+ * qmi_encode_string_elem() - Encodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the encoded information.
+ * @buf_src: Buffer containing the elements to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Depth of the string element from the main structure.
+ *
+ * @return: Number of bytes of encoded information, on success.
+ *          < 0 on error.
+ *
+ * This function encodes a string element of maximum length "ei_array->elem_len"
+ * bytes from the source buffer "buf_src" and stores the encoded information in
+ * the destination buffer "buf_dst". This function returns the number of bytes
+ * of encoded information.
+ */
+static int qmi_encode_string_elem(struct elem_info *ei_array,
+				  void *buf_dst, void *buf_src,
+				  uint32_t out_buf_len, int enc_level)
+{
+	int rc;
+	int encoded_bytes = 0;
+	struct elem_info *temp_ei = ei_array;
+	uint32_t string_len = 0;
+	uint32_t string_len_sz = 0;
+
+	string_len = strlen(buf_src);
+	string_len_sz = temp_ei->elem_len <= U8_MAX ?
+			sizeof(uint8_t) : sizeof(uint16_t);
+	if (string_len > temp_ei->elem_len) {
+		pr_err("%s: String to be encoded is longer - %d > %d\n",
+			__func__, string_len, temp_ei->elem_len);
+		return -EINVAL;
+	}
+
+	if (enc_level == 1) {
+		if (string_len + TLV_LEN_SIZE + TLV_TYPE_SIZE >
+		    out_buf_len) {
+			pr_err("%s: Output len %d > Out Buf len %d\n",
+				__func__, string_len, out_buf_len);
+			return -ETOOSMALL;
+		}
+	} else {
+		if (string_len + string_len_sz > out_buf_len) {
+			pr_err("%s: Output len %d > Out Buf len %d\n",
+				__func__, string_len, out_buf_len);
+			return -ETOOSMALL;
+		}
+		rc = qmi_encode_basic_elem(buf_dst, &string_len,
+					   1, string_len_sz);
+		encoded_bytes += rc;
+	}
+
+	rc = qmi_encode_basic_elem(buf_dst + encoded_bytes, buf_src,
+				   string_len, temp_ei->elem_size);
+	encoded_bytes += rc;
+	QMI_ENCODE_LOG_ELEM(enc_level, string_len, temp_ei->elem_size, buf_src);
+	return encoded_bytes;
+}
+
+/**
+ * skip_to_next_elem() - Skip to next element in the structure to be encoded
+ * @ei_array: Struct info describing the element to be skipped.
+ * @level: Depth level of encoding/decoding to identify nested structures.
+ *
+ * @return: Struct info of the next element that can be encoded.
+ *
+ * This function is used while encoding optional elements. If the flag
+ * corresponding to an optional element is not set, then encoding the
+ * optional element can be skipped. This function can be used to perform
+ * that operation.
+ */
+static struct elem_info *skip_to_next_elem(struct elem_info *ei_array,
+					   int level)
+{
+	struct elem_info *temp_ei = ei_array;
+	uint8_t tlv_type;
+
+	if (level > 1) {
+		temp_ei = temp_ei + 1;
+	} else {
+		do {
+			tlv_type = temp_ei->tlv_type;
+			temp_ei = temp_ei + 1;
+		} while (tlv_type == temp_ei->tlv_type);
+	}
+
+	return temp_ei;
+}
+
+/**
+ * _qmi_kernel_encode() - Core Encode Function
+ * @ei_array: Struct info array describing the structure to be encoded.
+ * @out_buf: Buffer to hold the encoded QMI message.
+ * @in_c_struct: Pointer to the C structure to be encoded.
+ * @out_buf_len: Available space in the encode buffer.
+ * @enc_level: Encode level to indicate the depth of the nested structure,
+ *             within the main structure, being encoded.
+ *
+ * @return: Number of bytes of encoded information, on success.
+ *          < 0 on error.
+ */
+static int _qmi_kernel_encode(struct elem_info *ei_array,
+			      void *out_buf, void *in_c_struct,
+			      uint32_t out_buf_len, int enc_level)
+{
+	struct elem_info *temp_ei = ei_array;
+	uint8_t opt_flag_value = 0;
+	uint32_t data_len_value = 0, data_len_sz;
+	uint8_t *buf_dst = (uint8_t *)out_buf;
+	uint8_t *tlv_pointer;
+	uint32_t tlv_len;
+	uint8_t tlv_type;
+	uint32_t encoded_bytes = 0;
+	void *buf_src;
+	int encode_tlv = 0;
+	int rc;
+
+	tlv_pointer = buf_dst;
+	tlv_len = 0;
+	if (enc_level == 1)
+		buf_dst = buf_dst + (TLV_LEN_SIZE + TLV_TYPE_SIZE);
+
+	while (temp_ei->data_type != QMI_EOTI) {
+		buf_src = in_c_struct + temp_ei->offset;
+		tlv_type = temp_ei->tlv_type;
+
+		if (temp_ei->is_array == NO_ARRAY) {
+			data_len_value = 1;
+		} else if (temp_ei->is_array == STATIC_ARRAY) {
+			data_len_value = temp_ei->elem_len;
+		} else if (data_len_value <= 0 ||
+			    temp_ei->elem_len < data_len_value) {
+			pr_err("%s: Invalid data length\n", __func__);
+			return -EINVAL;
+		}
+
+		switch (temp_ei->data_type) {
+		case QMI_OPT_FLAG:
+			rc = qmi_encode_basic_elem(&opt_flag_value, buf_src,
+						   1, sizeof(uint8_t));
+			if (opt_flag_value)
+				temp_ei = temp_ei + 1;
+			else
+				temp_ei = skip_to_next_elem(temp_ei, enc_level);
+			break;
+
+		case QMI_DATA_LEN:
+			memcpy(&data_len_value, buf_src, temp_ei->elem_size);
+			data_len_sz = temp_ei->elem_size == sizeof(uint8_t) ?
+					sizeof(uint8_t) : sizeof(uint16_t);
+			/* Check to avoid out of range buffer access */
+			if ((data_len_sz + encoded_bytes + TLV_LEN_SIZE +
+			    TLV_TYPE_SIZE) > out_buf_len) {
+				pr_err("%s: Too Small Buffer @DATA_LEN\n",
+					__func__);
+				return -ETOOSMALL;
+			}
+			rc = qmi_encode_basic_elem(buf_dst, &data_len_value,
+						   1, data_len_sz);
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+				encoded_bytes, tlv_len, encode_tlv, rc);
+			if (!data_len_value)
+				temp_ei = skip_to_next_elem(temp_ei, enc_level);
+			else
+				encode_tlv = 0;
+			break;
+
+		case QMI_UNSIGNED_1_BYTE:
+		case QMI_UNSIGNED_2_BYTE:
+		case QMI_UNSIGNED_4_BYTE:
+		case QMI_UNSIGNED_8_BYTE:
+		case QMI_SIGNED_2_BYTE_ENUM:
+		case QMI_SIGNED_4_BYTE_ENUM:
+			/* Check to avoid out of range buffer access */
+			if (((data_len_value * temp_ei->elem_size) +
+			    encoded_bytes + TLV_LEN_SIZE + TLV_TYPE_SIZE) >
+			    out_buf_len) {
+				pr_err("%s: Too Small Buffer @data_type:%d\n",
+					__func__, temp_ei->data_type);
+				return -ETOOSMALL;
+			}
+			rc = qmi_encode_basic_elem(buf_dst, buf_src,
+				data_len_value, temp_ei->elem_size);
+			QMI_ENCODE_LOG_ELEM(enc_level, data_len_value,
+				temp_ei->elem_size, buf_src);
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+				encoded_bytes, tlv_len, encode_tlv, rc);
+			break;
+
+		case QMI_STRUCT:
+			rc = qmi_encode_struct_elem(temp_ei, buf_dst, buf_src,
+				data_len_value, (out_buf_len - encoded_bytes),
+				(enc_level + 1));
+			if (rc < 0)
+				return rc;
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+				encoded_bytes, tlv_len, encode_tlv, rc);
+			break;
+
+		case QMI_STRING:
+			rc = qmi_encode_string_elem(temp_ei, buf_dst, buf_src,
+				out_buf_len - encoded_bytes, enc_level);
+			if (rc < 0)
+				return rc;
+			UPDATE_ENCODE_VARIABLES(temp_ei, buf_dst,
+				encoded_bytes, tlv_len, encode_tlv, rc);
+			break;
+		default:
+			pr_err("%s: Unrecognized data type\n", __func__);
+			return -EINVAL;
+
+		}
+
+		if (encode_tlv && enc_level == 1) {
+			QMI_ENCDEC_ENCODE_TLV(tlv_type, tlv_len, tlv_pointer);
+			QMI_ENCODE_LOG_TLV(tlv_type, tlv_len);
+			encoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+			tlv_pointer = buf_dst;
+			tlv_len = 0;
+			buf_dst = buf_dst + TLV_LEN_SIZE + TLV_TYPE_SIZE;
+			encode_tlv = 0;
+		}
+	}
+	QMI_ENCODE_LOG_MSG(out_buf, encoded_bytes);
+	return encoded_bytes;
+}
+
+/**
+ * qmi_kernel_decode() - Decode to C Structure format
+ * @desc: Pointer to structure descriptor.
+ * @out_c_struct: Buffer to hold the decoded C structure.
+ * @in_buf: Buffer containg the QMI message to be decoded.
+ * @in_buf_len: Length of the incoming QMI message.
+ *
+ * @return: 0 on success, < 0 on error.
+ */
+int qmi_kernel_decode(struct msg_desc *desc, void *out_c_struct,
+		      void *in_buf, uint32_t in_buf_len)
+{
+	int dec_level = 1;
+	int rc = 0;
+
+	if (!desc || !desc->ei_array)
+		return -EINVAL;
+
+	if (!out_c_struct || !in_buf || !in_buf_len)
+		return -EINVAL;
+
+	if (desc->max_msg_len < in_buf_len)
+		return -EINVAL;
+
+	rc = _qmi_kernel_decode(desc->ei_array, out_c_struct,
+				in_buf, in_buf_len, dec_level);
+	if (rc < 0)
+		return rc;
+	else
+		return 0;
+}
+EXPORT_SYMBOL(qmi_kernel_decode);
+
+/**
+ * qmi_decode_basic_elem() - Decodes elements of basic/primary data type
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @elem_size: Size of a single instance of the element to be decoded.
+ *
+ * @return: Total size of the decoded data elements, in bytes.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "elem_size" bytes from the source buffer "buf_src" and stores
+ * the decoded elements in the destination buffer "buf_dst". The elements are
+ * of primary data type which include uint8_t - uint64_t or similar. This
+ * function returns the number of bytes of decoded information.
+ */
+static int qmi_decode_basic_elem(void *buf_dst, void *buf_src,
+				 uint32_t elem_len, uint32_t elem_size)
+{
+	uint32_t i, rc = 0;
+
+	for (i = 0; i < elem_len; i++) {
+		QMI_ENCDEC_DECODE_N_BYTES(buf_dst, buf_src, elem_size);
+		rc += elem_size;
+	}
+
+	return rc;
+}
+
+/**
+ * qmi_decode_struct_elem() - Decodes elements of struct data type
+ * @ei_array: Struct info array descibing the struct element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @elem_len: Number of elements to be decoded.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ *           this struct element.
+ * @dec_level: Depth of the nested structure from the main structure.
+ *
+ * @return: Total size of the decoded data elements, on success.
+ *          < 0 on error.
+ *
+ * This function decodes the "elem_len" number of elements in QMI wire format,
+ * each of size "(tlv_len/elem_len)" bytes from the source buffer "buf_src"
+ * and stores the decoded elements in the destination buffer "buf_dst". The
+ * elements are of struct data type which includes any C structure. This
+ * function returns the number of bytes of decoded information.
+ */
+static int qmi_decode_struct_elem(struct elem_info *ei_array, void *buf_dst,
+				  void *buf_src, uint32_t elem_len,
+				  uint32_t tlv_len, int dec_level)
+{
+	int i, rc, decoded_bytes = 0;
+	struct elem_info *temp_ei = ei_array;
+
+	for (i = 0; i < elem_len && decoded_bytes < tlv_len; i++) {
+		rc = _qmi_kernel_decode(temp_ei->ei_array, buf_dst, buf_src,
+					(tlv_len - decoded_bytes), dec_level);
+		if (rc < 0)
+			return rc;
+		buf_src = buf_src + rc;
+		buf_dst = buf_dst + temp_ei->elem_size;
+		decoded_bytes += rc;
+	}
+
+	if ((dec_level <= 2 && decoded_bytes != tlv_len) ||
+	    (dec_level > 2 && (i < elem_len || decoded_bytes > tlv_len))) {
+		pr_err("%s: Fault in decoding: dl(%d), db(%d), tl(%d), i(%d), el(%d)\n",
+			__func__, dec_level, decoded_bytes, tlv_len,
+			i, elem_len);
+		return -EFAULT;
+	}
+	return decoded_bytes;
+}
+
+/**
+ * qmi_decode_string_elem() - Decodes elements of string data type
+ * @ei_array: Struct info array descibing the string element.
+ * @buf_dst: Buffer to store the decoded element.
+ * @buf_src: Buffer containing the elements in QMI wire format.
+ * @tlv_len: Total size of the encoded inforation corresponding to
+ *           this string element.
+ * @dec_level: Depth of the string element from the main structure.
+ *
+ * @return: Total size of the decoded data elements, on success.
+ *          < 0 on error.
+ *
+ * This function decodes the string element of maximum length
+ * "ei_array->elem_len" from the source buffer "buf_src" and puts it into
+ * the destination buffer "buf_dst". This function returns number of bytes
+ * decoded from the input buffer.
+ */
+static int qmi_decode_string_elem(struct elem_info *ei_array, void *buf_dst,
+				  void *buf_src, uint32_t tlv_len,
+				  int dec_level)
+{
+	int rc;
+	int decoded_bytes = 0;
+	uint32_t string_len = 0;
+	uint32_t string_len_sz = 0;
+	struct elem_info *temp_ei = ei_array;
+
+	if (dec_level == 1) {
+		string_len = tlv_len;
+	} else {
+		string_len_sz = temp_ei->elem_len <= U8_MAX ?
+				sizeof(uint8_t) : sizeof(uint16_t);
+		rc = qmi_decode_basic_elem(&string_len, buf_src,
+					   1, string_len_sz);
+		decoded_bytes += rc;
+	}
+
+	if (string_len > temp_ei->elem_len) {
+		pr_err("%s: String len %d > Max Len %d\n",
+			__func__, string_len, temp_ei->elem_len);
+		return -ETOOSMALL;
+	} else if (string_len > tlv_len) {
+		pr_err("%s: String len %d > Input Buffer Len %d\n",
+			__func__, string_len, tlv_len);
+		return -EFAULT;
+	}
+
+	rc = qmi_decode_basic_elem(buf_dst, buf_src + decoded_bytes,
+				   string_len, temp_ei->elem_size);
+	*((char *)buf_dst + string_len) = '\0';
+	decoded_bytes += rc;
+	QMI_DECODE_LOG_ELEM(dec_level, string_len, temp_ei->elem_size, buf_dst);
+	return decoded_bytes;
+}
+
+/**
+ * find_ei() - Find element info corresponding to TLV Type
+ * @ei_array: Struct info array of the message being decoded.
+ * @type: TLV Type of the element being searched.
+ *
+ * @return: Pointer to struct info, if found
+ *
+ * Every element that got encoded in the QMI message will have a type
+ * information associated with it. While decoding the QMI message,
+ * this function is used to find the struct info regarding the element
+ * that corresponds to the type being decoded.
+ */
+static struct elem_info *find_ei(struct elem_info *ei_array,
+				   uint32_t type)
+{
+	struct elem_info *temp_ei = ei_array;
+
+	while (temp_ei->data_type != QMI_EOTI) {
+		if (temp_ei->tlv_type == (uint8_t)type)
+			return temp_ei;
+		temp_ei = temp_ei + 1;
+	}
+	return NULL;
+}
+
+/**
+ * _qmi_kernel_decode() - Core Decode Function
+ * @ei_array: Struct info array describing the structure to be decoded.
+ * @out_c_struct: Buffer to hold the decoded C struct
+ * @in_buf: Buffer containing the QMI message to be decoded
+ * @in_buf_len: Length of the QMI message to be decoded
+ * @dec_level: Decode level to indicate the depth of the nested structure,
+ *             within the main structure, being decoded
+ *
+ * @return: Number of bytes of decoded information, on success
+ *          < 0 on error.
+ */
+static int _qmi_kernel_decode(struct elem_info *ei_array,
+			      void *out_c_struct,
+			      void *in_buf, uint32_t in_buf_len,
+			      int dec_level)
+{
+	struct elem_info *temp_ei = ei_array;
+	uint8_t opt_flag_value = 1;
+	uint32_t data_len_value = 0, data_len_sz = 0;
+	uint8_t *buf_dst = out_c_struct;
+	uint8_t *tlv_pointer;
+	uint32_t tlv_len = 0;
+	uint32_t tlv_type;
+	uint32_t decoded_bytes = 0;
+	void *buf_src = in_buf;
+	int rc;
+
+	QMI_DECODE_LOG_MSG(in_buf, in_buf_len);
+	while (decoded_bytes < in_buf_len) {
+		if (dec_level >= 2 && temp_ei->data_type == QMI_EOTI)
+			return decoded_bytes;
+
+		if (dec_level == 1) {
+			tlv_pointer = buf_src;
+			QMI_ENCDEC_DECODE_TLV(&tlv_type,
+					      &tlv_len, tlv_pointer);
+			QMI_DECODE_LOG_TLV(tlv_type, tlv_len);
+			buf_src += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+			decoded_bytes += (TLV_TYPE_SIZE + TLV_LEN_SIZE);
+			temp_ei = find_ei(ei_array, tlv_type);
+			if (!temp_ei && (tlv_type < OPTIONAL_TLV_TYPE_START)) {
+				pr_err("%s: Inval element info\n", __func__);
+				return -EINVAL;
+			} else if (!temp_ei) {
+				UPDATE_DECODE_VARIABLES(buf_src,
+						decoded_bytes, tlv_len);
+				continue;
+			}
+		} else {
+			/*
+			 * No length information for elements in nested
+			 * structures. So use remaining decodable buffer space.
+			 */
+			tlv_len = in_buf_len - decoded_bytes;
+		}
+
+		buf_dst = out_c_struct + temp_ei->offset;
+		if (temp_ei->data_type == QMI_OPT_FLAG) {
+			memcpy(buf_dst, &opt_flag_value, sizeof(uint8_t));
+			temp_ei = temp_ei + 1;
+			buf_dst = out_c_struct + temp_ei->offset;
+		}
+
+		if (temp_ei->data_type == QMI_DATA_LEN) {
+			data_len_sz = temp_ei->elem_size == sizeof(uint8_t) ?
+					sizeof(uint8_t) : sizeof(uint16_t);
+			rc = qmi_decode_basic_elem(&data_len_value, buf_src,
+						   1, data_len_sz);
+			memcpy(buf_dst, &data_len_value, sizeof(uint32_t));
+			temp_ei = temp_ei + 1;
+			buf_dst = out_c_struct + temp_ei->offset;
+			tlv_len -= data_len_sz;
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+		}
+
+		if (temp_ei->is_array == NO_ARRAY) {
+			data_len_value = 1;
+		} else if (temp_ei->is_array == STATIC_ARRAY) {
+			data_len_value = temp_ei->elem_len;
+		} else if (data_len_value > temp_ei->elem_len) {
+			pr_err("%s: Data len %d > max spec %d\n",
+				__func__, data_len_value, temp_ei->elem_len);
+			return -ETOOSMALL;
+		}
+
+		switch (temp_ei->data_type) {
+		case QMI_UNSIGNED_1_BYTE:
+		case QMI_UNSIGNED_2_BYTE:
+		case QMI_UNSIGNED_4_BYTE:
+		case QMI_UNSIGNED_8_BYTE:
+		case QMI_SIGNED_2_BYTE_ENUM:
+		case QMI_SIGNED_4_BYTE_ENUM:
+			rc = qmi_decode_basic_elem(buf_dst, buf_src,
+				data_len_value, temp_ei->elem_size);
+			QMI_DECODE_LOG_ELEM(dec_level, data_len_value,
+				temp_ei->elem_size, buf_dst);
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+			break;
+
+		case QMI_STRUCT:
+			rc = qmi_decode_struct_elem(temp_ei, buf_dst, buf_src,
+				data_len_value, tlv_len, (dec_level + 1));
+			if (rc < 0)
+				return rc;
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+			break;
+
+		case QMI_STRING:
+			rc = qmi_decode_string_elem(temp_ei, buf_dst, buf_src,
+						     tlv_len, dec_level);
+			if (rc < 0)
+				return rc;
+			UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc);
+			break;
+
+		default:
+			pr_err("%s: Unrecognized data type\n", __func__);
+			return -EINVAL;
+		}
+		temp_ei = temp_ei + 1;
+	}
+	return decoded_bytes;
+}
+MODULE_DESCRIPTION("QMI kernel enc/dec");
+MODULE_LICENSE("GPL v2");
diff --git a/lib/qmi_encdec_priv.h b/lib/qmi_encdec_priv.h
new file mode 100644
index 0000000..97fe45b
--- /dev/null
+++ b/lib/qmi_encdec_priv.h
@@ -0,0 +1,66 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QMI_ENCDEC_PRIV_H_
+#define _QMI_ENCDEC_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/socket.h>
+#include <linux/gfp.h>
+#include <linux/qmi_encdec.h>
+
+#define QMI_ENCDEC_ENCODE_TLV(type, length, p_dst) do { \
+	*p_dst++ = type; \
+	*p_dst++ = ((uint8_t)((length) & 0xFF)); \
+	*p_dst++ = ((uint8_t)(((length) >> 8) & 0xFF)); \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_TLV(p_type, p_length, p_src) do { \
+	*p_type = (uint8_t)*p_src++; \
+	*p_length = (uint8_t)*p_src++; \
+	*p_length |= ((uint8_t)*p_src) << 8; \
+} while (0)
+
+#define QMI_ENCDEC_ENCODE_N_BYTES(p_dst, p_src, size) \
+do { \
+	memcpy(p_dst, p_src, size); \
+	p_dst = (uint8_t *)p_dst + size; \
+	p_src = (uint8_t *)p_src + size; \
+} while (0)
+
+#define QMI_ENCDEC_DECODE_N_BYTES(p_dst, p_src, size) \
+do { \
+	memcpy(p_dst, p_src, size); \
+	p_dst = (uint8_t *)p_dst + size; \
+	p_src = (uint8_t *)p_src + size; \
+} while (0)
+
+#define UPDATE_ENCODE_VARIABLES(temp_si, buf_dst, \
+				encoded_bytes, tlv_len, encode_tlv, rc) \
+do { \
+	buf_dst = (uint8_t *)buf_dst + rc; \
+	encoded_bytes += rc; \
+	tlv_len += rc; \
+	temp_si = temp_si + 1; \
+	encode_tlv = 1; \
+} while (0)
+
+#define UPDATE_DECODE_VARIABLES(buf_src, decoded_bytes, rc) \
+do { \
+	buf_src = (uint8_t *)buf_src + rc; \
+	decoded_bytes += rc; \
+} while (0)
+
+#endif
diff --git a/mm/Makefile b/mm/Makefile
index 295bd7a..a7e9b6a 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -37,7 +37,7 @@
 			   mm_init.o mmu_context.o percpu.o slab_common.o \
 			   compaction.o vmacache.o \
 			   interval_tree.o list_lru.o workingset.o \
-			   debug.o $(mmu-y)
+			   debug.o $(mmu-y) showmem.o
 
 obj-y += init-mm.o
 
diff --git a/mm/bootmem.c b/mm/bootmem.c
index e8a55a3..d14efd6 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -151,7 +151,7 @@
  * down, but we are still initializing the system.  Pages are given directly
  * to the page allocator, no bootmem metadata is updated because it is gone.
  */
-void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
+void free_bootmem_late(unsigned long physaddr, unsigned long size)
 {
 	unsigned long cursor, end;
 
diff --git a/mm/madvise.c b/mm/madvise.c
index 93fb63e..279627a 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -108,7 +108,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma),
-			  vma->vm_userfaultfd_ctx);
+			  vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/memblock.c b/mm/memblock.c
index 7608bc3..166f17a 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -19,6 +19,9 @@
 #include <linux/debugfs.h>
 #include <linux/seq_file.h>
 #include <linux/memblock.h>
+#include <linux/preempt.h>
+#include <linux/seqlock.h>
+#include <linux/irqflags.h>
 
 #include <asm/sections.h>
 #include <linux/io.h>
@@ -31,6 +34,7 @@
 static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
 #endif
 
+static seqcount_t memblock_seq;
 struct memblock memblock __initdata_memblock = {
 	.memory.regions		= memblock_memory_init_regions,
 	.memory.cnt		= 1,	/* empty dummy entry */
@@ -723,7 +727,8 @@
 		     (unsigned long long)base + size - 1,
 		     (void *)_RET_IP_);
 
-	kmemleak_free_part_phys(base, size);
+	if (base < memblock.current_limit)
+		kmemleak_free_part_phys(base, size);
 	return memblock_remove_range(&memblock.reserved, base, size);
 }
 
@@ -1152,7 +1157,8 @@
 		 * The min_count is set to 0 so that memblock allocations are
 		 * never reported as leaks.
 		 */
-		kmemleak_alloc_phys(found, size, 0, 0);
+		if (found < memblock.current_limit)
+			kmemleak_alloc_phys(found, size, 0, 0);
 		return found;
 	}
 	return 0;
@@ -1544,7 +1550,8 @@
 			      (phys_addr_t)ULLONG_MAX);
 }
 
-static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
+static int __init_memblock __memblock_search(struct memblock_type *type,
+					     phys_addr_t addr)
 {
 	unsigned int left = 0, right = type->cnt;
 
@@ -1562,6 +1569,20 @@
 	return -1;
 }
 
+static int __init_memblock memblock_search(struct memblock_type *type,
+					   phys_addr_t addr)
+{
+	int ret;
+	unsigned long seq;
+
+	do {
+		seq = raw_read_seqcount_begin(&memblock_seq);
+		ret = __memblock_search(type, addr);
+	} while (unlikely(read_seqcount_retry(&memblock_seq, seq)));
+
+	return ret;
+}
+
 bool __init memblock_is_reserved(phys_addr_t addr)
 {
 	return memblock_search(&memblock.reserved, addr) != -1;
@@ -1620,6 +1641,13 @@
 		 memblock.memory.regions[idx].size) >= end;
 }
 
+int __init_memblock memblock_overlaps_memory(phys_addr_t base, phys_addr_t size)
+{
+	memblock_cap_size(base, &size);
+
+	return memblock_overlaps_region(&memblock.memory, base, size) >= 0;
+}
+
 /**
  * memblock_is_region_reserved - check if a region intersects reserved memory
  * @base: base of region to check
@@ -1712,6 +1740,37 @@
 	memblock_can_resize = 1;
 }
 
+static unsigned long __init_memblock
+memblock_resize_late(int begin, unsigned long flags)
+{
+	static int memblock_can_resize_old;
+
+	if (begin) {
+		preempt_disable();
+		local_irq_save(flags);
+		memblock_can_resize_old = memblock_can_resize;
+		memblock_can_resize = 0;
+		raw_write_seqcount_begin(&memblock_seq);
+	} else {
+		raw_write_seqcount_end(&memblock_seq);
+		memblock_can_resize = memblock_can_resize_old;
+		local_irq_restore(flags);
+		preempt_enable();
+	}
+
+	return flags;
+}
+
+unsigned long __init_memblock memblock_region_resize_late_begin(void)
+{
+	return memblock_resize_late(1, 0);
+}
+
+void __init_memblock memblock_region_resize_late_end(unsigned long flags)
+{
+	memblock_resize_late(0, flags);
+}
+
 static int __init early_memblock(char *p)
 {
 	if (p && strstr(p, "debug"))
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0f870ba..f60bd04 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4843,6 +4843,11 @@
 	return ret;
 }
 
+static int mem_cgroup_allow_attach(struct cgroup_taskset *tset)
+{
+	return subsys_cgroup_allow_attach(tset);
+}
+
 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
 {
 	if (mc.to)
@@ -4998,6 +5003,10 @@
 {
 	return 0;
 }
+static int mem_cgroup_allow_attach(struct cgroup_taskset *tset)
+{
+	return 0;
+}
 static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
 {
 }
@@ -5286,6 +5295,7 @@
 	.can_attach = mem_cgroup_can_attach,
 	.cancel_attach = mem_cgroup_cancel_attach,
 	.post_attach = mem_cgroup_move_task,
+	.allow_attach = mem_cgroup_allow_attach,
 	.bind = mem_cgroup_bind,
 	.dfl_cftypes = memory_files,
 	.legacy_cftypes = mem_cgroup_legacy_files,
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 0b859af..238c4e8 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -752,7 +752,8 @@
 			((vmstart - vma->vm_start) >> PAGE_SHIFT);
 		prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
 				 vma->anon_vma, vma->vm_file, pgoff,
-				 new_pol, vma->vm_userfaultfd_ctx);
+				 new_pol, vma->vm_userfaultfd_ctx,
+				 vma_get_anon_name(vma));
 		if (prev) {
 			vma = prev;
 			next = vma->vm_next;
diff --git a/mm/mlock.c b/mm/mlock.c
index cdbed8a..facf6e7 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -529,7 +529,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
 			  vma->vm_file, pgoff, vma_policy(vma),
-			  vma->vm_userfaultfd_ctx);
+			  vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*prev) {
 		vma = *prev;
 		goto success;
diff --git a/mm/mmap.c b/mm/mmap.c
index 1af87c1..143d62f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -956,7 +956,8 @@
  */
 static inline int is_mergeable_vma(struct vm_area_struct *vma,
 				struct file *file, unsigned long vm_flags,
-				struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+				struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+				const char __user *anon_name)
 {
 	/*
 	 * VM_SOFTDIRTY should not prevent from VMA merging, if we
@@ -974,6 +975,8 @@
 		return 0;
 	if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
 		return 0;
+	if (vma_get_anon_name(vma) != anon_name)
+		return 0;
 	return 1;
 }
 
@@ -1006,9 +1009,10 @@
 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
 		     struct anon_vma *anon_vma, struct file *file,
 		     pgoff_t vm_pgoff,
-		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+		     struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+		     const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		if (vma->vm_pgoff == vm_pgoff)
 			return 1;
@@ -1027,9 +1031,10 @@
 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
 		    struct anon_vma *anon_vma, struct file *file,
 		    pgoff_t vm_pgoff,
-		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+		    struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+		    const char __user *anon_name)
 {
-	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
+	if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
 	    is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
 		pgoff_t vm_pglen;
 		vm_pglen = vma_pages(vma);
@@ -1040,9 +1045,9 @@
 }
 
 /*
- * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
- * whether that can be merged with its predecessor or its successor.
- * Or both (it neatly fills a hole).
+ * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
+ * figure out whether that can be merged with its predecessor or its
+ * successor.  Or both (it neatly fills a hole).
  *
  * In most cases - when called for mmap, brk or mremap - [addr,end) is
  * certain not to be mapped by the time vma_merge is called; but when
@@ -1084,7 +1089,8 @@
 			unsigned long end, unsigned long vm_flags,
 			struct anon_vma *anon_vma, struct file *file,
 			pgoff_t pgoff, struct mempolicy *policy,
-			struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
+			struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
+			const char __user *anon_name)
 {
 	pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
 	struct vm_area_struct *area, *next;
@@ -1117,7 +1123,8 @@
 			mpol_equal(vma_policy(prev), policy) &&
 			can_vma_merge_after(prev, vm_flags,
 					    anon_vma, file, pgoff,
-					    vm_userfaultfd_ctx)) {
+					    vm_userfaultfd_ctx,
+					    anon_name)) {
 		/*
 		 * OK, it can.  Can we now merge in the successor as well?
 		 */
@@ -1126,7 +1133,8 @@
 				can_vma_merge_before(next, vm_flags,
 						     anon_vma, file,
 						     pgoff+pglen,
-						     vm_userfaultfd_ctx) &&
+						     vm_userfaultfd_ctx,
+						     anon_name) &&
 				is_mergeable_anon_vma(prev->anon_vma,
 						      next->anon_vma, NULL)) {
 							/* cases 1, 6 */
@@ -1149,7 +1157,8 @@
 			mpol_equal(policy, vma_policy(next)) &&
 			can_vma_merge_before(next, vm_flags,
 					     anon_vma, file, pgoff+pglen,
-					     vm_userfaultfd_ctx)) {
+					     vm_userfaultfd_ctx,
+					     anon_name)) {
 		if (prev && addr < prev->vm_end)	/* case 4 */
 			err = __vma_adjust(prev, prev->vm_start,
 					 addr, prev->vm_pgoff, NULL, next);
@@ -1627,7 +1636,7 @@
 	 * Can we just expand an old mapping?
 	 */
 	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
-			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
+			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
 	if (vma)
 		goto out;
 
@@ -2663,6 +2672,7 @@
 
 	return 0;
 }
+EXPORT_SYMBOL(do_munmap);
 
 int vm_munmap(unsigned long start, size_t len)
 {
@@ -2858,7 +2868,7 @@
 
 	/* Can we just expand an old private anonymous mapping? */
 	vma = vma_merge(mm, prev, addr, addr + len, flags,
-			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
+			NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
 	if (vma)
 		goto out;
 
@@ -3019,7 +3029,7 @@
 		return NULL;	/* should never get here */
 	new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
 			    vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-			    vma->vm_userfaultfd_ctx);
+			    vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (new_vma) {
 		/*
 		 * Source vma may have been merged into new_vma
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 1193652..2c40836 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -302,7 +302,7 @@
 	pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
 	*pprev = vma_merge(mm, *pprev, start, end, newflags,
 			   vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
-			   vma->vm_userfaultfd_ctx);
+			   vma->vm_userfaultfd_ctx, vma_get_anon_name(vma));
 	if (*pprev) {
 		vma = *pprev;
 		VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 487dad6..e1e8c63 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -80,7 +80,7 @@
  * down, but we are still initializing the system.  Pages are given directly
  * to the page allocator, no bootmem metadata is updated because it is gone.
  */
-void __init free_bootmem_late(unsigned long addr, unsigned long size)
+void free_bootmem_late(unsigned long addr, unsigned long size)
 {
 	unsigned long cursor, end;
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6de9440..8702d66 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1250,7 +1250,7 @@
 	local_irq_restore(flags);
 }
 
-static void __init __free_pages_boot_core(struct page *page, unsigned int order)
+static void __free_pages_boot_core(struct page *page, unsigned int order)
 {
 	unsigned int nr_pages = 1 << order;
 	struct page *p = page;
@@ -1322,7 +1322,7 @@
 #endif
 
 
-void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
+void __free_pages_bootmem(struct page *page, unsigned long pfn,
 							unsigned int order)
 {
 	if (early_page_uninitialised(pfn))
diff --git a/mm/shmem.c b/mm/shmem.c
index 166ebf5..a1dfdc3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -4039,6 +4039,14 @@
 }
 EXPORT_SYMBOL_GPL(shmem_file_setup);
 
+void shmem_set_file(struct vm_area_struct *vma, struct file *file)
+{
+	if (vma->vm_file)
+		fput(vma->vm_file);
+	vma->vm_file = file;
+	vma->vm_ops = &shmem_vm_ops;
+}
+
 /**
  * shmem_zero_setup - setup a shared anonymous mapping
  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
@@ -4058,10 +4066,7 @@
 	if (IS_ERR(file))
 		return PTR_ERR(file);
 
-	if (vma->vm_file)
-		fput(vma->vm_file);
-	vma->vm_file = file;
-	vma->vm_ops = &shmem_vm_ops;
+	shmem_set_file(vma, file);
 
 	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
 			((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
diff --git a/mm/showmem.c b/mm/showmem.c
new file mode 100644
index 0000000..57ed07b8
--- /dev/null
+++ b/mm/showmem.c
@@ -0,0 +1,54 @@
+/*
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/debugfs.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+
+ATOMIC_NOTIFIER_HEAD(show_mem_notifier);
+
+int show_mem_notifier_register(struct notifier_block *nb)
+{
+	return atomic_notifier_chain_register(&show_mem_notifier, nb);
+}
+
+int show_mem_notifier_unregister(struct notifier_block *nb)
+{
+	return  atomic_notifier_chain_unregister(&show_mem_notifier, nb);
+}
+
+void show_mem_call_notifiers(void)
+{
+	atomic_notifier_call_chain(&show_mem_notifier, 0, NULL);
+}
+
+static int show_mem_notifier_get(void *dat, u64 *val)
+{
+	show_mem_call_notifiers();
+	*val = 0;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(show_mem_notifier_debug_ops, show_mem_notifier_get,
+				NULL, "%llu\n");
+
+int show_mem_notifier_debugfs_register(void)
+{
+	debugfs_create_file("show_mem_notifier", 0664, NULL, NULL,
+				&show_mem_notifier_debug_ops);
+
+	return 0;
+}
+late_initcall(show_mem_notifier_debugfs_register);
diff --git a/net/Kconfig b/net/Kconfig
index 7b6cd34..cd20118 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -89,6 +89,12 @@
 
 endif # if INET
 
+config ANDROID_PARANOID_NETWORK
+	bool "Only allow certain groups to create sockets"
+	default y
+	help
+		none
+
 config NETWORK_SECMARK
 	bool "Security Marking"
 	help
@@ -238,6 +244,7 @@
 source "net/l3mdev/Kconfig"
 source "net/qrtr/Kconfig"
 source "net/ncsi/Kconfig"
+source "net/rmnet_data/Kconfig"
 
 config RPS
 	bool
@@ -422,6 +429,8 @@
 	  on MAY_USE_DEVLINK to ensure they do not cause link errors when
 	  devlink is a loadable module and the driver using it is built-in.
 
+source "net/ipc_router/Kconfig"
+
 endif   # if NET
 
 # Used by archs to tell that they support BPF JIT compiler plus which flavour.
diff --git a/net/Makefile b/net/Makefile
index 4cafaa2..c84a347 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -81,3 +81,5 @@
 endif
 obj-$(CONFIG_QRTR)		+= qrtr/
 obj-$(CONFIG_NET_NCSI)		+= ncsi/
+obj-$(CONFIG_RMNET_DATA) += rmnet_data/
+obj-$(CONFIG_IPC_ROUTER)	+= ipc_router/
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 1aff2da..4b32525 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -106,11 +106,40 @@
 }
 EXPORT_SYMBOL(bt_sock_unregister);
 
+#ifdef CONFIG_PARANOID_NETWORK
+static inline int current_has_bt_admin(void)
+{
+	return !current_euid();
+}
+
+static inline int current_has_bt(void)
+{
+	return current_has_bt_admin();
+}
+# else
+static inline int current_has_bt_admin(void)
+{
+	return 1;
+}
+
+static inline int current_has_bt(void)
+{
+	return 1;
+}
+#endif
+
 static int bt_sock_create(struct net *net, struct socket *sock, int proto,
 			  int kern)
 {
 	int err;
 
+	if (proto == BTPROTO_RFCOMM || proto == BTPROTO_SCO ||
+			proto == BTPROTO_L2CAP) {
+		if (!current_has_bt())
+			return -EPERM;
+	} else if (!current_has_bt_admin())
+		return -EPERM;
+
 	if (net != &init_net)
 		return -EAFNOSUPPORT;
 
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 89a687f..fcaa484 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -48,16 +48,17 @@
 		return NETDEV_TX_OK;
 	}
 
-	u64_stats_update_begin(&brstats->syncp);
-	brstats->tx_packets++;
-	brstats->tx_bytes += skb->len;
-	u64_stats_update_end(&brstats->syncp);
-
 	BR_INPUT_SKB_CB(skb)->brdev = dev;
 
 	skb_reset_mac_header(skb);
 	skb_pull(skb, ETH_HLEN);
 
+	u64_stats_update_begin(&brstats->syncp);
+	brstats->tx_packets++;
+	/* Exclude ETH_HLEN from byte stats for consistency with Rx chain */
+	brstats->tx_bytes += skb->len;
+	u64_stats_update_end(&brstats->syncp);
+
 	if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid))
 		goto out;
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 6666b28..46a4830 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4855,8 +4855,7 @@
 			rcu_read_unlock();
 			input_queue_head_incr(sd);
 			if (++work >= quota)
-				return work;
-
+				goto state_changed;
 		}
 
 		local_irq_disable();
@@ -4880,6 +4879,10 @@
 		local_irq_enable();
 	}
 
+state_changed:
+	napi_gro_flush(napi, false);
+	sd->current_napi = NULL;
+
 	return work;
 }
 
@@ -4914,10 +4917,13 @@
 
 void __napi_complete(struct napi_struct *n)
 {
+	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
 	BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
 
 	list_del_init(&n->poll_list);
 	smp_mb__before_atomic();
+	sd->current_napi = NULL;
 	clear_bit(NAPI_STATE_SCHED, &n->state);
 }
 EXPORT_SYMBOL(__napi_complete);
@@ -5133,6 +5139,14 @@
 }
 EXPORT_SYMBOL(netif_napi_del);
 
+struct napi_struct *get_current_napi_context(void)
+{
+	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+	return sd->current_napi;
+}
+EXPORT_SYMBOL(get_current_napi_context);
+
 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
 {
 	void *have;
@@ -5152,6 +5166,9 @@
 	 */
 	work = 0;
 	if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+		struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+		sd->current_napi = n;
 		work = n->poll(n, weight);
 		trace_napi_poll(n, work, weight);
 	}
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index be4629c..a384a10 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -33,6 +33,8 @@
 	r->table = table;
 	r->flags = flags;
 	r->fr_net = ops->fro_net;
+	r->uid_start = INVALID_UID;
+	r->uid_end = INVALID_UID;
 
 	r->suppress_prefixlen = -1;
 	r->suppress_ifgroup = -1;
@@ -172,6 +174,23 @@
 }
 EXPORT_SYMBOL_GPL(fib_rules_unregister);
 
+static inline kuid_t fib_nl_uid(struct nlattr *nla)
+{
+	return make_kuid(current_user_ns(), nla_get_u32(nla));
+}
+
+static int nla_put_uid(struct sk_buff *skb, int idx, kuid_t uid)
+{
+	return nla_put_u32(skb, idx, from_kuid_munged(current_user_ns(), uid));
+}
+
+static int fib_uid_range_match(struct flowi *fl, struct fib_rule *rule)
+{
+	return (!uid_valid(rule->uid_start) && !uid_valid(rule->uid_end)) ||
+	       (uid_gte(fl->flowi_uid, rule->uid_start) &&
+		uid_lte(fl->flowi_uid, rule->uid_end));
+}
+
 static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
 			  struct flowi *fl, int flags,
 			  struct fib_lookup_arg *arg)
@@ -193,6 +212,9 @@
 	if (rule->l3mdev && !l3mdev_fib_rule_match(rule->fr_net, fl, arg))
 		goto out;
 
+	if (!fib_uid_range_match(fl, rule))
+		goto out;
+
 	ret = ops->match(rule, fl, flags);
 out:
 	return (rule->flags & FIB_RULE_INVERT) ? !ret : ret;
@@ -435,6 +457,19 @@
 		goto errout_free;
 	}
 
+	/* UID start and end must either both be valid or both unspecified. */
+	rule->uid_start = rule->uid_end = INVALID_UID;
+	if (tb[FRA_UID_START] || tb[FRA_UID_END]) {
+		if (tb[FRA_UID_START] && tb[FRA_UID_END]) {
+			rule->uid_start = fib_nl_uid(tb[FRA_UID_START]);
+			rule->uid_end = fib_nl_uid(tb[FRA_UID_END]);
+		}
+		if (!uid_valid(rule->uid_start) ||
+		    !uid_valid(rule->uid_end) ||
+		    !uid_lte(rule->uid_start, rule->uid_end))
+		goto errout_free;
+	}
+
 	err = ops->configure(rule, skb, frh, tb);
 	if (err < 0)
 		goto errout_free;
@@ -552,6 +587,14 @@
 		    (rule->l3mdev != nla_get_u8(tb[FRA_L3MDEV])))
 			continue;
 
+		if (tb[FRA_UID_START] &&
+		    !uid_eq(rule->uid_start, fib_nl_uid(tb[FRA_UID_START])))
+			continue;
+
+		if (tb[FRA_UID_END] &&
+		    !uid_eq(rule->uid_end, fib_nl_uid(tb[FRA_UID_END])))
+			continue;
+
 		if (!ops->compare(rule, frh, tb))
 			continue;
 
@@ -619,7 +662,9 @@
 			 + nla_total_size(4) /* FRA_SUPPRESS_IFGROUP */
 			 + nla_total_size(4) /* FRA_FWMARK */
 			 + nla_total_size(4) /* FRA_FWMASK */
-			 + nla_total_size_64bit(8); /* FRA_TUN_ID */
+			 + nla_total_size_64bit(8) /* FRA_TUN_ID */
+			 + nla_total_size(4) /* FRA_UID_START */
+			 + nla_total_size(4); /* FRA_UID_END */
 
 	if (ops->nlmsg_payload)
 		payload += ops->nlmsg_payload(rule);
@@ -679,7 +724,11 @@
 	    (rule->tun_id &&
 	     nla_put_be64(skb, FRA_TUN_ID, rule->tun_id, FRA_PAD)) ||
 	    (rule->l3mdev &&
-	     nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)))
+	     nla_put_u8(skb, FRA_L3MDEV, rule->l3mdev)) ||
+	    (uid_valid(rule->uid_start) &&
+	     nla_put_uid(skb, FRA_UID_START, rule->uid_start)) ||
+	    (uid_valid(rule->uid_end) &&
+	     nla_put_uid(skb, FRA_UID_END, rule->uid_end)))
 		goto nla_put_failure;
 
 	if (rule->suppress_ifgroup != -1) {
diff --git a/net/ipc_router/Kconfig b/net/ipc_router/Kconfig
new file mode 100644
index 0000000..30cd45a
--- /dev/null
+++ b/net/ipc_router/Kconfig
@@ -0,0 +1,25 @@
+#
+# IPC_ROUTER Configuration
+#
+
+menuconfig IPC_ROUTER
+	bool "IPC Router support"
+	help
+	  IPC Router provides a connectionless message routing service
+	  between multiple modules within a System-on-Chip(SoC). The
+	  communicating entities can run either in the same processor or
+	  in a different processor within the SoC. The IPC Router has been
+	  designed to route messages of any types and support a broader
+	  network of processors.
+
+	  If in doubt, say N.
+
+config IPC_ROUTER_SECURITY
+	depends on IPC_ROUTER
+	bool "IPC Router Security support"
+	help
+	  This feature of IPC Router will enforce security rules
+	  configured by a security script from the user-space. IPC Router
+	  once configured with the security rules will ensure that the
+	  sender of the message to a service belongs to the relevant
+	  Linux group as configured by the security script.
diff --git a/net/ipc_router/Makefile b/net/ipc_router/Makefile
new file mode 100644
index 0000000..501688e
--- /dev/null
+++ b/net/ipc_router/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the Linux IPC_ROUTER
+#
+
+obj-$(CONFIG_IPC_ROUTER) := ipc_router_core.o
+obj-$(CONFIG_IPC_ROUTER) += ipc_router_socket.o
+obj-$(CONFIG_IPC_ROUTER_SECURITY) += ipc_router_security.o
diff --git a/net/ipc_router/ipc_router_core.c b/net/ipc_router/ipc_router_core.c
new file mode 100644
index 0000000..cdf372f
--- /dev/null
+++ b/net/ipc_router/ipc_router_core.c
@@ -0,0 +1,4334 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/poll.h>
+#include <linux/pm.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/rwsem.h>
+#include <linux/ipc_logging.h>
+#include <linux/uaccess.h>
+#include <linux/ipc_router.h>
+#include <linux/ipc_router_xprt.h>
+#include <linux/kref.h>
+#include <soc/qcom/subsystem_notif.h>
+#include <soc/qcom/subsystem_restart.h>
+
+#include <asm/byteorder.h>
+
+#include "ipc_router_private.h"
+#include "ipc_router_security.h"
+
+enum {
+	SMEM_LOG = 1U << 0,
+	RTR_DBG = 1U << 1,
+};
+
+static int msm_ipc_router_debug_mask;
+module_param_named(debug_mask, msm_ipc_router_debug_mask,
+		   int, 0664);
+#define MODULE_NAME "ipc_router"
+
+#define IPC_RTR_INFO_PAGES 6
+
+#define IPC_RTR_INFO(log_ctx, x...) do { \
+typeof(log_ctx) _log_ctx = (log_ctx); \
+if (_log_ctx) \
+	ipc_log_string(_log_ctx, x); \
+if (msm_ipc_router_debug_mask & RTR_DBG) \
+	pr_info("[IPCRTR] "x); \
+} while (0)
+
+#define IPC_ROUTER_LOG_EVENT_TX         0x01
+#define IPC_ROUTER_LOG_EVENT_RX         0x02
+#define IPC_ROUTER_LOG_EVENT_TX_ERR     0x03
+#define IPC_ROUTER_LOG_EVENT_RX_ERR     0x04
+#define IPC_ROUTER_DUMMY_DEST_NODE	0xFFFFFFFF
+
+#define ipc_port_sk(port) ((struct sock *)(port))
+
+static LIST_HEAD(control_ports);
+static DECLARE_RWSEM(control_ports_lock_lha5);
+
+#define LP_HASH_SIZE 32
+static struct list_head local_ports[LP_HASH_SIZE];
+static DECLARE_RWSEM(local_ports_lock_lhc2);
+
+/* Server info is organized as a hash table. The server's service ID is
+ * used to index into the hash table. The instance ID of most of the servers
+ * are 1 or 2. The service IDs are well distributed compared to the instance
+ * IDs and hence choosing service ID to index into this hash table optimizes
+ * the hash table operations like add, lookup, destroy.
+ */
+#define SRV_HASH_SIZE 32
+static struct list_head server_list[SRV_HASH_SIZE];
+static DECLARE_RWSEM(server_list_lock_lha2);
+
+struct msm_ipc_server {
+	struct list_head list;
+	struct kref ref;
+	struct msm_ipc_port_name name;
+	char pdev_name[32];
+	int next_pdev_id;
+	int synced_sec_rule;
+	struct list_head server_port_list;
+};
+
+struct msm_ipc_server_port {
+	struct list_head list;
+	struct platform_device *pdev;
+	struct msm_ipc_port_addr server_addr;
+	struct msm_ipc_router_xprt_info *xprt_info;
+};
+
+struct msm_ipc_resume_tx_port {
+	struct list_head list;
+	u32 port_id;
+	u32 node_id;
+};
+
+struct ipc_router_conn_info {
+	struct list_head list;
+	u32 port_id;
+};
+
+enum {
+	RESET = 0,
+	VALID = 1,
+};
+
+#define RP_HASH_SIZE 32
+struct msm_ipc_router_remote_port {
+	struct list_head list;
+	struct kref ref;
+	struct mutex rport_lock_lhb2; /* lock for remote port state access */
+	u32 node_id;
+	u32 port_id;
+	int status;
+	u32 tx_quota_cnt;
+	struct list_head resume_tx_port_list;
+	struct list_head conn_info_list;
+	void *sec_rule;
+	struct msm_ipc_server *server;
+};
+
+struct msm_ipc_router_xprt_info {
+	struct list_head list;
+	struct msm_ipc_router_xprt *xprt;
+	u32 remote_node_id;
+	u32 initialized;
+	struct list_head pkt_list;
+	struct wakeup_source ws;
+	struct mutex rx_lock_lhb2; /* lock for xprt rx operations */
+	struct mutex tx_lock_lhb2; /* lock for xprt tx operations */
+	u32 need_len;
+	u32 abort_data_read;
+	struct work_struct read_data;
+	struct workqueue_struct *workqueue;
+	void *log_ctx;
+	struct kref ref;
+	struct completion ref_complete;
+};
+
+#define RT_HASH_SIZE 4
+struct msm_ipc_routing_table_entry {
+	struct list_head list;
+	struct kref ref;
+	u32 node_id;
+	u32 neighbor_node_id;
+	struct list_head remote_port_list[RP_HASH_SIZE];
+	struct msm_ipc_router_xprt_info *xprt_info;
+	struct rw_semaphore lock_lha4;
+	unsigned long num_tx_bytes;
+	unsigned long num_rx_bytes;
+};
+
+#define LOG_CTX_NAME_LEN 32
+struct ipc_rtr_log_ctx {
+	struct list_head list;
+	char log_ctx_name[LOG_CTX_NAME_LEN];
+	void *log_ctx;
+};
+
+static struct list_head routing_table[RT_HASH_SIZE];
+static DECLARE_RWSEM(routing_table_lock_lha3);
+static int routing_table_inited;
+
+static void do_read_data(struct work_struct *work);
+
+static LIST_HEAD(xprt_info_list);
+static DECLARE_RWSEM(xprt_info_list_lock_lha5);
+
+static DEFINE_MUTEX(log_ctx_list_lock_lha0);
+static LIST_HEAD(log_ctx_list);
+static DEFINE_MUTEX(ipc_router_init_lock);
+static bool is_ipc_router_inited;
+static int ipc_router_core_init(void);
+#define IPC_ROUTER_INIT_TIMEOUT (10 * HZ)
+
+static u32 next_port_id;
+static DEFINE_MUTEX(next_port_id_lock_lhc1);
+static struct workqueue_struct *msm_ipc_router_workqueue;
+
+static void *local_log_ctx;
+static void *ipc_router_get_log_ctx(char *sub_name);
+static int process_resume_tx_msg(union rr_control_msg *msg,
+				 struct rr_packet *pkt);
+static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr);
+static int ipc_router_get_xprt_info_ref(
+		struct msm_ipc_router_xprt_info *xprt_info);
+static void ipc_router_put_xprt_info_ref(
+		struct msm_ipc_router_xprt_info *xprt_info);
+static void ipc_router_release_xprt_info_ref(struct kref *ref);
+
+struct pil_vote_info {
+	void *pil_handle;
+	struct work_struct load_work;
+	struct work_struct unload_work;
+};
+
+#define PIL_SUBSYSTEM_NAME_LEN 32
+static char default_peripheral[PIL_SUBSYSTEM_NAME_LEN];
+
+enum {
+	DOWN,
+	UP,
+};
+
+static void init_routing_table(void)
+{
+	int i;
+
+	for (i = 0; i < RT_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&routing_table[i]);
+}
+
+/**
+ * ipc_router_calc_checksum() - compute the checksum for extended HELLO message
+ * @msg:	Reference to the IPC Router HELLO message.
+ *
+ * Return: Computed checksum value, 0 if msg is NULL.
+ */
+static u32 ipc_router_calc_checksum(union rr_control_msg *msg)
+{
+	u32 checksum = 0;
+	int i, len;
+	u16 upper_nb;
+	u16 lower_nb;
+	void *hello;
+
+	if (!msg)
+		return checksum;
+	hello = msg;
+	len = sizeof(*msg);
+
+	for (i = 0; i < len / IPCR_WORD_SIZE; i++) {
+		lower_nb = (*((u32 *)hello)) & IPC_ROUTER_CHECKSUM_MASK;
+		upper_nb = ((*((u32 *)hello)) >> 16) &
+				IPC_ROUTER_CHECKSUM_MASK;
+		checksum = checksum + upper_nb + lower_nb;
+		hello = ((u32 *)hello) + 1;
+	}
+	while (checksum > 0xFFFF)
+		checksum = (checksum & IPC_ROUTER_CHECKSUM_MASK) +
+				((checksum >> 16) & IPC_ROUTER_CHECKSUM_MASK);
+
+	checksum = ~checksum & IPC_ROUTER_CHECKSUM_MASK;
+	return checksum;
+}
+
+/**
+ * skb_copy_to_log_buf() - copies the required number bytes from the skb_queue
+ * @skb_head:	skb_queue head that contains the data.
+ * @pl_len:	length of payload need to be copied.
+ * @hdr_offset:	length of the header present in first skb
+ * @log_buf:	The output buffer which will contain the formatted log string
+ *
+ * This function copies the first specified number of bytes from the skb_queue
+ * to a new buffer and formats them to a string for logging.
+ */
+static void skb_copy_to_log_buf(struct sk_buff_head *skb_head,
+				unsigned int pl_len, unsigned int hdr_offset,
+				u64 *log_buf)
+{
+	struct sk_buff *temp_skb;
+	unsigned int copied_len = 0, copy_len = 0;
+	int remaining;
+
+	if (!skb_head) {
+		IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
+		return;
+	}
+	temp_skb = skb_peek(skb_head);
+	if (unlikely(!temp_skb || !temp_skb->data)) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return;
+	}
+
+	remaining = temp_skb->len - hdr_offset;
+	skb_queue_walk(skb_head, temp_skb) {
+		copy_len = remaining < pl_len ? remaining : pl_len;
+		memcpy(log_buf + copied_len, temp_skb->data + hdr_offset,
+		       copy_len);
+		copied_len += copy_len;
+		hdr_offset = 0;
+		if (copied_len == pl_len)
+			break;
+		remaining = pl_len - remaining;
+	}
+}
+
+/**
+ * ipc_router_log_msg() - log all data messages exchanged
+ * @log_ctx:	IPC Logging context specific to each transport
+ * @xchng_type:	Identifies the data to be a receive or send.
+ * @data:	IPC Router data packet or control msg received or to be send.
+ * @hdr:	Reference to the router header
+ * @port_ptr:	Local IPC Router port.
+ * @rport_ptr:	Remote IPC Router port
+ *
+ * This function builds the log message that would be passed on to the IPC
+ * logging framework. The data messages that would be passed corresponds to
+ * the information that is exchanged between the IPC Router and it's clients.
+ */
+static void ipc_router_log_msg(void *log_ctx, u32 xchng_type,
+			       void *data, struct rr_header_v1 *hdr,
+			       struct msm_ipc_port *port_ptr,
+			       struct msm_ipc_router_remote_port *rport_ptr)
+{
+	struct sk_buff_head *skb_head = NULL;
+	union rr_control_msg *msg = NULL;
+	struct rr_packet *pkt = NULL;
+	u64 pl_buf = 0;
+	struct sk_buff *skb;
+	u32 buf_len = 8;
+	u32 svc_id = 0;
+	u32 svc_ins = 0;
+	unsigned int hdr_offset = 0;
+	u32 port_type = 0;
+
+	if (!log_ctx || !hdr || !data)
+		return;
+
+	if (hdr->type == IPC_ROUTER_CTRL_CMD_DATA) {
+		pkt = (struct rr_packet *)data;
+		skb_head = pkt->pkt_fragment_q;
+		skb = skb_peek(skb_head);
+		if (!skb || !skb->data) {
+			IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+			return;
+		}
+
+		if (skb_queue_len(skb_head) == 1 && skb->len < 8)
+			buf_len = skb->len;
+		if (xchng_type == IPC_ROUTER_LOG_EVENT_TX && hdr->dst_node_id
+				!= IPC_ROUTER_NID_LOCAL) {
+			if (hdr->version == IPC_ROUTER_V1)
+				hdr_offset = sizeof(struct rr_header_v1);
+			else if (hdr->version == IPC_ROUTER_V2)
+				hdr_offset = sizeof(struct rr_header_v2);
+		}
+		skb_copy_to_log_buf(skb_head, buf_len, hdr_offset, &pl_buf);
+
+		if (port_ptr && rport_ptr && (port_ptr->type == CLIENT_PORT) &&
+		    rport_ptr->server) {
+			svc_id = rport_ptr->server->name.service;
+			svc_ins = rport_ptr->server->name.instance;
+			port_type = CLIENT_PORT;
+		} else if (port_ptr && (port_ptr->type == SERVER_PORT)) {
+			svc_id = port_ptr->port_name.service;
+			svc_ins = port_ptr->port_name.instance;
+			port_type = SERVER_PORT;
+		}
+		IPC_RTR_INFO(log_ctx,
+			     "%s %s %s Len:0x%x T:0x%x CF:0x%x SVC:<0x%x:0x%x> SRC:<0x%x:0x%x> DST:<0x%x:0x%x> DATA: %08x %08x",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ?
+			 current->comm : "")),
+			(port_type == CLIENT_PORT ? "CLI" : "SRV"),
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
+			 "UNKNOWN")))),
+			hdr->size, hdr->type, hdr->control_flag,
+			svc_id, svc_ins, hdr->src_node_id, hdr->src_port_id,
+			hdr->dst_node_id, hdr->dst_port_id,
+			(unsigned int)pl_buf, (unsigned int)(pl_buf >> 32));
+
+	} else {
+		msg = (union rr_control_msg *)data;
+		if (msg->cmd == IPC_ROUTER_CTRL_CMD_NEW_SERVER ||
+		    msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_SERVER)
+			IPC_RTR_INFO(log_ctx,
+				     "CTL MSG: %s cmd:0x%x SVC:<0x%x:0x%x> ADDR:<0x%x:0x%x>",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX_ERR ? "TX_ERR" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX_ERR ? "RX_ERR" :
+			 "UNKNOWN")))),
+			msg->cmd, msg->srv.service, msg->srv.instance,
+			msg->srv.node_id, msg->srv.port_id);
+		else if (msg->cmd == IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT ||
+			 msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX)
+			IPC_RTR_INFO(log_ctx,
+				     "CTL MSG: %s cmd:0x%x ADDR: <0x%x:0x%x>",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
+			msg->cmd, msg->cli.node_id, msg->cli.port_id);
+		else if (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO && hdr)
+			IPC_RTR_INFO(log_ctx,
+				     "CTL MSG %s cmd:0x%x ADDR:0x%x",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
+			msg->cmd, hdr->src_node_id);
+		else
+			IPC_RTR_INFO(log_ctx,
+				     "%s UNKNOWN cmd:0x%x",
+			(xchng_type == IPC_ROUTER_LOG_EVENT_RX ? "RX" :
+			(xchng_type == IPC_ROUTER_LOG_EVENT_TX ? "TX" : "ERR")),
+			msg->cmd);
+	}
+}
+
+/* Must be called with routing_table_lock_lha3 locked. */
+static struct msm_ipc_routing_table_entry *lookup_routing_table(
+	u32 node_id)
+{
+	u32 key = (node_id % RT_HASH_SIZE);
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	list_for_each_entry(rt_entry, &routing_table[key], list) {
+		if (rt_entry->node_id == node_id)
+			return rt_entry;
+	}
+	return NULL;
+}
+
+/**
+ * create_routing_table_entry() - Lookup and create a routing table entry
+ * @node_id: Node ID of the routing table entry to be created.
+ * @xprt_info: XPRT through which the node ID is reachable.
+ *
+ * @return: a reference to the routing table entry on success, NULL on failure.
+ */
+static struct msm_ipc_routing_table_entry *create_routing_table_entry(
+	u32 node_id, struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int i;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	u32 key;
+
+	down_write(&routing_table_lock_lha3);
+	rt_entry = lookup_routing_table(node_id);
+	if (rt_entry)
+		goto out_create_rtentry1;
+
+	rt_entry = kmalloc(sizeof(*rt_entry), GFP_KERNEL);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: rt_entry allocation failed for %d\n",
+			    __func__, node_id);
+		goto out_create_rtentry2;
+	}
+
+	for (i = 0; i < RP_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&rt_entry->remote_port_list[i]);
+	init_rwsem(&rt_entry->lock_lha4);
+	kref_init(&rt_entry->ref);
+	rt_entry->node_id = node_id;
+	rt_entry->xprt_info = xprt_info;
+	if (xprt_info)
+		rt_entry->neighbor_node_id = xprt_info->remote_node_id;
+
+	key = (node_id % RT_HASH_SIZE);
+	list_add_tail(&rt_entry->list, &routing_table[key]);
+out_create_rtentry1:
+	kref_get(&rt_entry->ref);
+out_create_rtentry2:
+	up_write(&routing_table_lock_lha3);
+	return rt_entry;
+}
+
+/**
+ * ipc_router_get_rtentry_ref() - Get a reference to the routing table entry
+ * @node_id: Node ID of the routing table entry.
+ *
+ * @return: a reference to the routing table entry on success, NULL on failure.
+ *
+ * This function is used to obtain a reference to the rounting table entry
+ * corresponding to a node id.
+ */
+static struct msm_ipc_routing_table_entry *ipc_router_get_rtentry_ref(
+	u32 node_id)
+{
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	down_read(&routing_table_lock_lha3);
+	rt_entry = lookup_routing_table(node_id);
+	if (rt_entry)
+		kref_get(&rt_entry->ref);
+	up_read(&routing_table_lock_lha3);
+	return rt_entry;
+}
+
+/**
+ * ipc_router_release_rtentry() - Cleanup and release the routing table entry
+ * @ref: Reference to the entry.
+ *
+ * This function is called when all references to the routing table entry are
+ * released.
+ */
+void ipc_router_release_rtentry(struct kref *ref)
+{
+	struct msm_ipc_routing_table_entry *rt_entry =
+		container_of(ref, struct msm_ipc_routing_table_entry, ref);
+
+	/* All references to a routing entry will be put only under SSR.
+	 * As part of SSR, all the internals of the routing table entry
+	 * are cleaned. So just free the routing table entry.
+	 */
+	kfree(rt_entry);
+}
+
+struct rr_packet *rr_read(struct msm_ipc_router_xprt_info *xprt_info)
+{
+	struct rr_packet *temp_pkt;
+
+	if (!xprt_info)
+		return NULL;
+
+	mutex_lock(&xprt_info->rx_lock_lhb2);
+	if (xprt_info->abort_data_read) {
+		mutex_unlock(&xprt_info->rx_lock_lhb2);
+		IPC_RTR_ERR("%s detected SSR & exiting now\n",
+			    xprt_info->xprt->name);
+		return NULL;
+	}
+
+	if (list_empty(&xprt_info->pkt_list)) {
+		mutex_unlock(&xprt_info->rx_lock_lhb2);
+		return NULL;
+	}
+
+	temp_pkt = list_first_entry(&xprt_info->pkt_list,
+				    struct rr_packet, list);
+	list_del(&temp_pkt->list);
+	if (list_empty(&xprt_info->pkt_list))
+		__pm_relax(&xprt_info->ws);
+	mutex_unlock(&xprt_info->rx_lock_lhb2);
+	return temp_pkt;
+}
+
+struct rr_packet *clone_pkt(struct rr_packet *pkt)
+{
+	struct rr_packet *cloned_pkt;
+	struct sk_buff *temp_skb, *cloned_skb;
+	struct sk_buff_head *pkt_fragment_q;
+
+	cloned_pkt = kzalloc(sizeof(*cloned_pkt), GFP_KERNEL);
+	if (!cloned_pkt) {
+		IPC_RTR_ERR("%s: failure\n", __func__);
+		return NULL;
+	}
+	memcpy(&cloned_pkt->hdr, &pkt->hdr, sizeof(struct rr_header_v1));
+	if (pkt->opt_hdr.len > 0) {
+		cloned_pkt->opt_hdr.data = kmalloc(pkt->opt_hdr.len,
+							GFP_KERNEL);
+		if (!cloned_pkt->opt_hdr.data) {
+			IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
+		} else {
+			cloned_pkt->opt_hdr.len = pkt->opt_hdr.len;
+			memcpy(cloned_pkt->opt_hdr.data, pkt->opt_hdr.data,
+			       pkt->opt_hdr.len);
+		}
+	}
+
+	pkt_fragment_q = kmalloc(sizeof(*pkt_fragment_q), GFP_KERNEL);
+	if (!pkt_fragment_q) {
+		IPC_RTR_ERR("%s: pkt_frag_q alloc failure\n", __func__);
+		kfree(cloned_pkt);
+		return NULL;
+	}
+	skb_queue_head_init(pkt_fragment_q);
+	kref_init(&cloned_pkt->ref);
+
+	skb_queue_walk(pkt->pkt_fragment_q, temp_skb) {
+		cloned_skb = skb_clone(temp_skb, GFP_KERNEL);
+		if (!cloned_skb)
+			goto fail_clone;
+		skb_queue_tail(pkt_fragment_q, cloned_skb);
+	}
+	cloned_pkt->pkt_fragment_q = pkt_fragment_q;
+	cloned_pkt->length = pkt->length;
+	return cloned_pkt;
+
+fail_clone:
+	while (!skb_queue_empty(pkt_fragment_q)) {
+		temp_skb = skb_dequeue(pkt_fragment_q);
+		kfree_skb(temp_skb);
+	}
+	kfree(pkt_fragment_q);
+	if (cloned_pkt->opt_hdr.len > 0)
+		kfree(cloned_pkt->opt_hdr.data);
+	kfree(cloned_pkt);
+	return NULL;
+}
+
+/**
+ * create_pkt() - Create a Router packet
+ * @data: SKB queue to be contained inside the packet.
+ *
+ * @return: pointer to packet on success, NULL on failure.
+ */
+struct rr_packet *create_pkt(struct sk_buff_head *data)
+{
+	struct rr_packet *pkt;
+	struct sk_buff *temp_skb;
+
+	pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
+	if (!pkt) {
+		IPC_RTR_ERR("%s: failure\n", __func__);
+		return NULL;
+	}
+
+	if (data) {
+		pkt->pkt_fragment_q = data;
+		skb_queue_walk(pkt->pkt_fragment_q, temp_skb)
+			pkt->length += temp_skb->len;
+	} else {
+		pkt->pkt_fragment_q = kmalloc(sizeof(*pkt->pkt_fragment_q),
+					      GFP_KERNEL);
+		if (!pkt->pkt_fragment_q) {
+			IPC_RTR_ERR("%s: Couldn't alloc pkt_fragment_q\n",
+				    __func__);
+			kfree(pkt);
+			return NULL;
+		}
+		skb_queue_head_init(pkt->pkt_fragment_q);
+	}
+	kref_init(&pkt->ref);
+	return pkt;
+}
+
+void release_pkt(struct rr_packet *pkt)
+{
+	struct sk_buff *temp_skb;
+
+	if (!pkt)
+		return;
+
+	if (!pkt->pkt_fragment_q) {
+		kfree(pkt);
+		return;
+	}
+
+	while (!skb_queue_empty(pkt->pkt_fragment_q)) {
+		temp_skb = skb_dequeue(pkt->pkt_fragment_q);
+		kfree_skb(temp_skb);
+	}
+	kfree(pkt->pkt_fragment_q);
+	if (pkt->opt_hdr.len > 0)
+		kfree(pkt->opt_hdr.data);
+	kfree(pkt);
+}
+
+static struct sk_buff_head *msm_ipc_router_buf_to_skb(void *buf,
+						      unsigned int buf_len)
+{
+	struct sk_buff_head *skb_head;
+	struct sk_buff *skb;
+	int first = 1, offset = 0;
+	int skb_size, data_size;
+	void *data;
+	int last = 1;
+	int align_size;
+
+	skb_head = kmalloc(sizeof(*skb_head), GFP_KERNEL);
+	if (!skb_head) {
+		IPC_RTR_ERR("%s: Couldnot allocate skb_head\n", __func__);
+		return NULL;
+	}
+	skb_queue_head_init(skb_head);
+
+	data_size = buf_len;
+	align_size = ALIGN_SIZE(data_size);
+	while (offset != buf_len) {
+		skb_size = data_size;
+		if (first)
+			skb_size += IPC_ROUTER_HDR_SIZE;
+		if (last)
+			skb_size += align_size;
+
+		skb = alloc_skb(skb_size, GFP_KERNEL);
+		if (!skb) {
+			if (skb_size <= (PAGE_SIZE / 2)) {
+				IPC_RTR_ERR("%s: cannot allocate skb\n",
+					    __func__);
+				goto buf_to_skb_error;
+			}
+			data_size = data_size / 2;
+			last = 0;
+			continue;
+		}
+
+		if (first) {
+			skb_reserve(skb, IPC_ROUTER_HDR_SIZE);
+			first = 0;
+		}
+
+		data = skb_put(skb, data_size);
+		memcpy(skb->data, buf + offset, data_size);
+		skb_queue_tail(skb_head, skb);
+		offset += data_size;
+		data_size = buf_len - offset;
+		last = 1;
+	}
+	return skb_head;
+
+buf_to_skb_error:
+	while (!skb_queue_empty(skb_head)) {
+		skb = skb_dequeue(skb_head);
+		kfree_skb(skb);
+	}
+	kfree(skb_head);
+	return NULL;
+}
+
+static void *msm_ipc_router_skb_to_buf(struct sk_buff_head *skb_head,
+				       unsigned int len)
+{
+	struct sk_buff *temp;
+	unsigned int offset = 0, buf_len = 0, copy_len;
+	void *buf;
+
+	if (!skb_head) {
+		IPC_RTR_ERR("%s: NULL skb_head\n", __func__);
+		return NULL;
+	}
+
+	temp = skb_peek(skb_head);
+	buf_len = len;
+	buf = kmalloc(buf_len, GFP_KERNEL);
+	if (!buf) {
+		IPC_RTR_ERR("%s: cannot allocate buf\n", __func__);
+		return NULL;
+	}
+	skb_queue_walk(skb_head, temp) {
+		copy_len = buf_len < temp->len ? buf_len : temp->len;
+		memcpy(buf + offset, temp->data, copy_len);
+		offset += copy_len;
+		buf_len -= copy_len;
+	}
+	return buf;
+}
+
+void msm_ipc_router_free_skb(struct sk_buff_head *skb_head)
+{
+	struct sk_buff *temp_skb;
+
+	if (!skb_head)
+		return;
+
+	while (!skb_queue_empty(skb_head)) {
+		temp_skb = skb_dequeue(skb_head);
+		kfree_skb(temp_skb);
+	}
+	kfree(skb_head);
+}
+
+/**
+ * extract_optional_header() - Extract the optional header from skb
+ * @pkt:	Packet structure into which the header has to be extracted.
+ * @opt_len:	The optional header length in word size.
+ *
+ * @return:	Length of optional header in bytes if success, zero otherwise.
+ */
+static int extract_optional_header(struct rr_packet *pkt, u8 opt_len)
+{
+	size_t offset = 0, buf_len = 0, copy_len, opt_hdr_len;
+	struct sk_buff *temp;
+	struct sk_buff_head *skb_head;
+
+	opt_hdr_len = opt_len * IPCR_WORD_SIZE;
+	pkt->opt_hdr.data = kmalloc(opt_hdr_len, GFP_KERNEL);
+	if (!pkt->opt_hdr.data) {
+		IPC_RTR_ERR("%s: Memory allocation Failed\n", __func__);
+		return 0;
+	}
+	skb_head = pkt->pkt_fragment_q;
+	buf_len = opt_hdr_len;
+	skb_queue_walk(skb_head, temp) {
+		copy_len = buf_len < temp->len ? buf_len : temp->len;
+		memcpy(pkt->opt_hdr.data + offset, temp->data, copy_len);
+		offset += copy_len;
+		buf_len -= copy_len;
+		skb_pull(temp, copy_len);
+		if (temp->len == 0) {
+			skb_dequeue(skb_head);
+			kfree_skb(temp);
+		}
+	}
+	pkt->opt_hdr.len = opt_hdr_len;
+	return opt_hdr_len;
+}
+
+/**
+ * extract_header_v1() - Extract IPC Router header of version 1
+ * @pkt: Packet structure into which the header has to be extraced.
+ * @skb: SKB from which the header has to be extracted.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int extract_header_v1(struct rr_packet *pkt, struct sk_buff *skb)
+{
+	if (!pkt || !skb) {
+		IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
+		return -EINVAL;
+	}
+
+	memcpy(&pkt->hdr, skb->data, sizeof(struct rr_header_v1));
+	skb_pull(skb, sizeof(struct rr_header_v1));
+	pkt->length -= sizeof(struct rr_header_v1);
+	return 0;
+}
+
+/**
+ * extract_header_v2() - Extract IPC Router header of version 2
+ * @pkt: Packet structure into which the header has to be extraced.
+ * @skb: SKB from which the header has to be extracted.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int extract_header_v2(struct rr_packet *pkt, struct sk_buff *skb)
+{
+	struct rr_header_v2 *hdr;
+	u8 opt_len;
+	size_t opt_hdr_len;
+	size_t total_hdr_size = sizeof(*hdr);
+
+	if (!pkt || !skb) {
+		IPC_RTR_ERR("%s: Invalid pkt or skb\n", __func__);
+		return -EINVAL;
+	}
+
+	hdr = (struct rr_header_v2 *)skb->data;
+	pkt->hdr.version = (u32)hdr->version;
+	pkt->hdr.type = (u32)hdr->type;
+	pkt->hdr.src_node_id = (u32)hdr->src_node_id;
+	pkt->hdr.src_port_id = (u32)hdr->src_port_id;
+	pkt->hdr.size = (u32)hdr->size;
+	pkt->hdr.control_flag = (u32)hdr->control_flag;
+	pkt->hdr.dst_node_id = (u32)hdr->dst_node_id;
+	pkt->hdr.dst_port_id = (u32)hdr->dst_port_id;
+	opt_len = hdr->opt_len;
+	skb_pull(skb, total_hdr_size);
+	if (opt_len > 0) {
+		opt_hdr_len = extract_optional_header(pkt, opt_len);
+		total_hdr_size += opt_hdr_len;
+	}
+	pkt->length -= total_hdr_size;
+	return 0;
+}
+
+/**
+ * extract_header() - Extract IPC Router header
+ * @pkt: Packet from which the header has to be extraced.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function will check if the header version is v1 or v2 and invoke
+ * the corresponding helper function to extract the IPC Router header.
+ */
+static int extract_header(struct rr_packet *pkt)
+{
+	struct sk_buff *temp_skb;
+	int ret;
+
+	if (!pkt) {
+		IPC_RTR_ERR("%s: NULL PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek(pkt->pkt_fragment_q);
+	if (!temp_skb || !temp_skb->data) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return -EINVAL;
+	}
+
+	if (temp_skb->data[0] == IPC_ROUTER_V1) {
+		ret = extract_header_v1(pkt, temp_skb);
+	} else if (temp_skb->data[0] == IPC_ROUTER_V2) {
+		ret = extract_header_v2(pkt, temp_skb);
+	} else {
+		IPC_RTR_ERR("%s: Invalid Header version %02x\n",
+			    __func__, temp_skb->data[0]);
+		print_hex_dump(KERN_ERR, "Header: ", DUMP_PREFIX_ADDRESS,
+			       16, 1, temp_skb->data, pkt->length, true);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+/**
+ * calc_tx_header_size() - Calculate header size to be reserved in SKB
+ * @pkt: Packet in which the space for header has to be reserved.
+ * @dst_xprt_info: XPRT through which the destination is reachable.
+ *
+ * @return: required header size on success,
+ *          starndard Linux error codes on failure.
+ *
+ * This function is used to calculate the header size that has to be reserved
+ * in a transmit SKB. The header size is calculated based on the XPRT through
+ * which the destination node is reachable.
+ */
+static int calc_tx_header_size(struct rr_packet *pkt,
+			       struct msm_ipc_router_xprt_info *dst_xprt_info)
+{
+	int hdr_size = 0;
+	int xprt_version = 0;
+	struct msm_ipc_router_xprt_info *xprt_info = dst_xprt_info;
+
+	if (!pkt) {
+		IPC_RTR_ERR("%s: NULL PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	if (xprt_info)
+		xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
+
+	if (xprt_version == IPC_ROUTER_V1) {
+		pkt->hdr.version = IPC_ROUTER_V1;
+		hdr_size = sizeof(struct rr_header_v1);
+	} else if (xprt_version == IPC_ROUTER_V2) {
+		pkt->hdr.version = IPC_ROUTER_V2;
+		hdr_size = sizeof(struct rr_header_v2) + pkt->opt_hdr.len;
+	} else {
+		IPC_RTR_ERR("%s: Invalid xprt_version %d\n",
+			    __func__, xprt_version);
+		hdr_size = -EINVAL;
+	}
+
+	return hdr_size;
+}
+
+/**
+ * calc_rx_header_size() - Calculate the RX header size
+ * @xprt_info: XPRT info of the received message.
+ *
+ * @return: valid header size on success, INT_MAX on failure.
+ */
+static int calc_rx_header_size(struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int xprt_version = 0;
+	int hdr_size = INT_MAX;
+
+	if (xprt_info)
+		xprt_version = xprt_info->xprt->get_version(xprt_info->xprt);
+
+	if (xprt_version == IPC_ROUTER_V1)
+		hdr_size = sizeof(struct rr_header_v1);
+	else if (xprt_version == IPC_ROUTER_V2)
+		hdr_size = sizeof(struct rr_header_v2);
+	return hdr_size;
+}
+
+/**
+ * prepend_header_v1() - Prepend IPC Router header of version 1
+ * @pkt: Packet structure which contains the header info to be prepended.
+ * @hdr_size: Size of the header
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int prepend_header_v1(struct rr_packet *pkt, int hdr_size)
+{
+	struct sk_buff *temp_skb;
+	struct rr_header_v1 *hdr;
+
+	if (!pkt || hdr_size <= 0) {
+		IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek(pkt->pkt_fragment_q);
+	if (!temp_skb || !temp_skb->data) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return -EINVAL;
+	}
+
+	if (skb_headroom(temp_skb) < hdr_size) {
+		temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
+		if (!temp_skb) {
+			IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
+				    __func__, hdr_size);
+			return -ENOMEM;
+		}
+		skb_reserve(temp_skb, hdr_size);
+	}
+
+	hdr = (struct rr_header_v1 *)skb_push(temp_skb, hdr_size);
+	memcpy(hdr, &pkt->hdr, hdr_size);
+	if (temp_skb != skb_peek(pkt->pkt_fragment_q))
+		skb_queue_head(pkt->pkt_fragment_q, temp_skb);
+	pkt->length += hdr_size;
+	return 0;
+}
+
+/**
+ * prepend_header_v2() - Prepend IPC Router header of version 2
+ * @pkt: Packet structure which contains the header info to be prepended.
+ * @hdr_size: Size of the header
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+static int prepend_header_v2(struct rr_packet *pkt, int hdr_size)
+{
+	struct sk_buff *temp_skb;
+	struct rr_header_v2 *hdr;
+
+	if (!pkt || hdr_size <= 0) {
+		IPC_RTR_ERR("%s: Invalid input parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek(pkt->pkt_fragment_q);
+	if (!temp_skb || !temp_skb->data) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return -EINVAL;
+	}
+
+	if (skb_headroom(temp_skb) < hdr_size) {
+		temp_skb = alloc_skb(hdr_size, GFP_KERNEL);
+		if (!temp_skb) {
+			IPC_RTR_ERR("%s: Could not allocate SKB of size %d\n",
+				    __func__, hdr_size);
+			return -ENOMEM;
+		}
+		skb_reserve(temp_skb, hdr_size);
+	}
+
+	hdr = (struct rr_header_v2 *)skb_push(temp_skb, hdr_size);
+	hdr->version = (u8)pkt->hdr.version;
+	hdr->type = (u8)pkt->hdr.type;
+	hdr->control_flag = (u8)pkt->hdr.control_flag;
+	hdr->size = (u32)pkt->hdr.size;
+	hdr->src_node_id = (u16)pkt->hdr.src_node_id;
+	hdr->src_port_id = (u16)pkt->hdr.src_port_id;
+	hdr->dst_node_id = (u16)pkt->hdr.dst_node_id;
+	hdr->dst_port_id = (u16)pkt->hdr.dst_port_id;
+	if (pkt->opt_hdr.len > 0) {
+		hdr->opt_len = pkt->opt_hdr.len / IPCR_WORD_SIZE;
+		memcpy(hdr + sizeof(*hdr), pkt->opt_hdr.data, pkt->opt_hdr.len);
+	} else {
+		hdr->opt_len = 0;
+	}
+	if (temp_skb != skb_peek(pkt->pkt_fragment_q))
+		skb_queue_head(pkt->pkt_fragment_q, temp_skb);
+	pkt->length += hdr_size;
+	return 0;
+}
+
+/**
+ * prepend_header() - Prepend IPC Router header
+ * @pkt: Packet structure which contains the header info to be prepended.
+ * @xprt_info: XPRT through which the packet is transmitted.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * This function prepends the header to the packet to be transmitted. The
+ * IPC Router header version to be prepended depends on the XPRT through
+ * which the destination is reachable.
+ */
+static int prepend_header(struct rr_packet *pkt,
+			  struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int hdr_size;
+	struct sk_buff *temp_skb;
+
+	if (!pkt) {
+		IPC_RTR_ERR("%s: NULL PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek(pkt->pkt_fragment_q);
+	if (!temp_skb || !temp_skb->data) {
+		IPC_RTR_ERR("%s: No SKBs in skb_queue\n", __func__);
+		return -EINVAL;
+	}
+
+	hdr_size = calc_tx_header_size(pkt, xprt_info);
+	if (hdr_size <= 0)
+		return hdr_size;
+
+	if (pkt->hdr.version == IPC_ROUTER_V1)
+		return prepend_header_v1(pkt, hdr_size);
+	else if (pkt->hdr.version == IPC_ROUTER_V2)
+		return prepend_header_v2(pkt, hdr_size);
+	else
+		return -EINVAL;
+}
+
+/**
+ * defragment_pkt() - Defragment and linearize the packet
+ * @pkt: Packet to be linearized.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ *
+ * Some packets contain fragments of data over multiple SKBs. If an XPRT
+ * does not supported fragmented writes, linearize multiple SKBs into one
+ * single SKB.
+ */
+static int defragment_pkt(struct rr_packet *pkt)
+{
+	struct sk_buff *dst_skb, *src_skb, *temp_skb;
+	int offset = 0, buf_len = 0, copy_len;
+	void *buf;
+	int align_size;
+
+	if (!pkt || pkt->length <= 0) {
+		IPC_RTR_ERR("%s: Invalid PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	if (skb_queue_len(pkt->pkt_fragment_q) == 1)
+		return 0;
+
+	align_size = ALIGN_SIZE(pkt->length);
+	dst_skb = alloc_skb(pkt->length + align_size, GFP_KERNEL);
+	if (!dst_skb) {
+		IPC_RTR_ERR("%s: could not allocate one skb of size %d\n",
+			    __func__, pkt->length);
+		return -ENOMEM;
+	}
+	buf = skb_put(dst_skb, pkt->length);
+	buf_len = pkt->length;
+
+	skb_queue_walk(pkt->pkt_fragment_q, src_skb) {
+		copy_len =  buf_len < src_skb->len ? buf_len : src_skb->len;
+		memcpy(buf + offset, src_skb->data, copy_len);
+		offset += copy_len;
+		buf_len -= copy_len;
+	}
+
+	while (!skb_queue_empty(pkt->pkt_fragment_q)) {
+		temp_skb = skb_dequeue(pkt->pkt_fragment_q);
+		kfree_skb(temp_skb);
+	}
+	skb_queue_tail(pkt->pkt_fragment_q, dst_skb);
+	return 0;
+}
+
+static int post_pkt_to_port(struct msm_ipc_port *port_ptr,
+			    struct rr_packet *pkt, int clone)
+{
+	struct rr_packet *temp_pkt = pkt;
+	void (*notify)(unsigned int event, void *oob_data,
+		       size_t oob_data_len, void *priv);
+	void (*data_ready)(struct sock *sk) = NULL;
+	struct sock *sk;
+	u32 pkt_type;
+
+	if (unlikely(!port_ptr || !pkt))
+		return -EINVAL;
+
+	if (clone) {
+		temp_pkt = clone_pkt(pkt);
+		if (!temp_pkt) {
+			IPC_RTR_ERR(
+			"%s: Error cloning packet for port %08x:%08x\n",
+				__func__, port_ptr->this_port.node_id,
+				port_ptr->this_port.port_id);
+			return -ENOMEM;
+		}
+	}
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	__pm_stay_awake(port_ptr->port_rx_ws);
+	list_add_tail(&temp_pkt->list, &port_ptr->port_rx_q);
+	wake_up(&port_ptr->port_rx_wait_q);
+	notify = port_ptr->notify;
+	pkt_type = temp_pkt->hdr.type;
+	sk = (struct sock *)port_ptr->endpoint;
+	if (sk) {
+		read_lock(&sk->sk_callback_lock);
+		data_ready = sk->sk_data_ready;
+		read_unlock(&sk->sk_callback_lock);
+	}
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+	if (notify)
+		notify(pkt_type, NULL, 0, port_ptr->priv);
+	else if (sk && data_ready)
+		data_ready(sk);
+
+	return 0;
+}
+
+/**
+ * ipc_router_peek_pkt_size() - Peek into the packet header to get potential
+ *				packet size
+ * @data: Starting address of the packet which points to router header.
+ *
+ * @returns: potential packet size on success, < 0 on error.
+ *
+ * This function is used by the underlying transport abstraction layer to
+ * peek into the potential packet size of an incoming packet. This information
+ * is used to perform link layer fragmentation and re-assembly
+ */
+int ipc_router_peek_pkt_size(char *data)
+{
+	int size;
+
+	if (!data) {
+		pr_err("%s: NULL PKT\n", __func__);
+		return -EINVAL;
+	}
+
+	if (data[0] == IPC_ROUTER_V1)
+		size = ((struct rr_header_v1 *)data)->size +
+			sizeof(struct rr_header_v1);
+	else if (data[0] == IPC_ROUTER_V2)
+		size = ((struct rr_header_v2 *)data)->size +
+			((struct rr_header_v2 *)data)->opt_len * IPCR_WORD_SIZE
+			+ sizeof(struct rr_header_v2);
+	else
+		return -EINVAL;
+
+	size += ALIGN_SIZE(size);
+	return size;
+}
+
+static int post_control_ports(struct rr_packet *pkt)
+{
+	struct msm_ipc_port *port_ptr;
+
+	if (!pkt)
+		return -EINVAL;
+
+	down_read(&control_ports_lock_lha5);
+	list_for_each_entry(port_ptr, &control_ports, list)
+		post_pkt_to_port(port_ptr, pkt, 1);
+	up_read(&control_ports_lock_lha5);
+	return 0;
+}
+
+static u32 allocate_port_id(void)
+{
+	u32 port_id = 0, prev_port_id, key;
+	struct msm_ipc_port *port_ptr;
+
+	mutex_lock(&next_port_id_lock_lhc1);
+	prev_port_id = next_port_id;
+	down_read(&local_ports_lock_lhc2);
+	do {
+		next_port_id++;
+		if ((next_port_id & IPC_ROUTER_ADDRESS) == IPC_ROUTER_ADDRESS)
+			next_port_id = 1;
+
+		key = (next_port_id & (LP_HASH_SIZE - 1));
+		if (list_empty(&local_ports[key])) {
+			port_id = next_port_id;
+			break;
+		}
+		list_for_each_entry(port_ptr, &local_ports[key], list) {
+			if (port_ptr->this_port.port_id == next_port_id) {
+				port_id = next_port_id;
+				break;
+			}
+		}
+		if (!port_id) {
+			port_id = next_port_id;
+			break;
+		}
+		port_id = 0;
+	} while (next_port_id != prev_port_id);
+	up_read(&local_ports_lock_lhc2);
+	mutex_unlock(&next_port_id_lock_lhc1);
+
+	return port_id;
+}
+
+void msm_ipc_router_add_local_port(struct msm_ipc_port *port_ptr)
+{
+	u32 key;
+
+	if (!port_ptr)
+		return;
+
+	key = (port_ptr->this_port.port_id & (LP_HASH_SIZE - 1));
+	down_write(&local_ports_lock_lhc2);
+	list_add_tail(&port_ptr->list, &local_ports[key]);
+	up_write(&local_ports_lock_lhc2);
+}
+
+/**
+ * msm_ipc_router_create_raw_port() - Create an IPC Router port
+ * @endpoint: User-space space socket information to be cached.
+ * @notify: Function to notify incoming events on the port.
+ *   @event: Event ID to be handled.
+ *   @oob_data: Any out-of-band data associated with the event.
+ *   @oob_data_len: Size of the out-of-band data, if valid.
+ *   @priv: Private data registered during the port creation.
+ * @priv: Private Data to be passed during the event notification.
+ *
+ * @return: Valid pointer to port on success, NULL on failure.
+ *
+ * This function is used to create an IPC Router port. The port is used for
+ * communication locally or outside the subsystem.
+ */
+struct msm_ipc_port *
+msm_ipc_router_create_raw_port(void *endpoint,
+			       void (*notify)(unsigned int event,
+					      void *oob_data,
+					      size_t oob_data_len, void *priv),
+			       void *priv)
+{
+	struct msm_ipc_port *port_ptr;
+
+	port_ptr = kzalloc(sizeof(*port_ptr), GFP_KERNEL);
+	if (!port_ptr)
+		return NULL;
+
+	port_ptr->this_port.node_id = IPC_ROUTER_NID_LOCAL;
+	port_ptr->this_port.port_id = allocate_port_id();
+	if (!port_ptr->this_port.port_id) {
+		IPC_RTR_ERR("%s: All port ids are in use\n", __func__);
+		kfree(port_ptr);
+		return NULL;
+	}
+
+	mutex_init(&port_ptr->port_lock_lhc3);
+	INIT_LIST_HEAD(&port_ptr->port_rx_q);
+	mutex_init(&port_ptr->port_rx_q_lock_lhc3);
+	init_waitqueue_head(&port_ptr->port_rx_wait_q);
+	snprintf(port_ptr->rx_ws_name, MAX_WS_NAME_SZ,
+		 "ipc%08x_%s",
+		 port_ptr->this_port.port_id,
+		 current->comm);
+	port_ptr->port_rx_ws = wakeup_source_register(port_ptr->rx_ws_name);
+	if (!port_ptr->port_rx_ws) {
+		kfree(port_ptr);
+		return NULL;
+	}
+	init_waitqueue_head(&port_ptr->port_tx_wait_q);
+	kref_init(&port_ptr->ref);
+
+	port_ptr->endpoint = endpoint;
+	port_ptr->notify = notify;
+	port_ptr->priv = priv;
+
+	msm_ipc_router_add_local_port(port_ptr);
+	if (endpoint)
+		sock_hold(ipc_port_sk(endpoint));
+	return port_ptr;
+}
+
+/**
+ * ipc_router_get_port_ref() - Get a reference to the local port
+ * @port_id: Port ID of the local port for which reference is get.
+ *
+ * @return: If port is found, a reference to the port is returned.
+ *          Else NULL is returned.
+ */
+static struct msm_ipc_port *ipc_router_get_port_ref(u32 port_id)
+{
+	int key = (port_id & (LP_HASH_SIZE - 1));
+	struct msm_ipc_port *port_ptr;
+
+	down_read(&local_ports_lock_lhc2);
+	list_for_each_entry(port_ptr, &local_ports[key], list) {
+		if (port_ptr->this_port.port_id == port_id) {
+			kref_get(&port_ptr->ref);
+			up_read(&local_ports_lock_lhc2);
+			return port_ptr;
+		}
+	}
+	up_read(&local_ports_lock_lhc2);
+	return NULL;
+}
+
+/**
+ * ipc_router_release_port() - Cleanup and release the port
+ * @ref: Reference to the port.
+ *
+ * This function is called when all references to the port are released.
+ */
+void ipc_router_release_port(struct kref *ref)
+{
+	struct rr_packet *pkt, *temp_pkt;
+	struct msm_ipc_port *port_ptr =
+		container_of(ref, struct msm_ipc_port, ref);
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	list_for_each_entry_safe(pkt, temp_pkt, &port_ptr->port_rx_q, list) {
+		list_del(&pkt->list);
+		release_pkt(pkt);
+	}
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+	wakeup_source_unregister(port_ptr->port_rx_ws);
+	if (port_ptr->endpoint)
+		sock_put(ipc_port_sk(port_ptr->endpoint));
+	kfree(port_ptr);
+}
+
+/**
+ * ipc_router_get_rport_ref()- Get reference to the remote port
+ * @node_id: Node ID corresponding to the remote port.
+ * @port_id: Port ID corresponding to the remote port.
+ *
+ * @return: a reference to the remote port on success, NULL on failure.
+ */
+static struct msm_ipc_router_remote_port *ipc_router_get_rport_ref(
+		u32 node_id, u32 port_id)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	int key = (port_id & (RP_HASH_SIZE - 1));
+
+	rt_entry = ipc_router_get_rtentry_ref(node_id);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: Node is not up\n", __func__);
+		return NULL;
+	}
+
+	down_read(&rt_entry->lock_lha4);
+	list_for_each_entry(rport_ptr,
+			    &rt_entry->remote_port_list[key], list) {
+		if (rport_ptr->port_id == port_id) {
+			kref_get(&rport_ptr->ref);
+			goto out_lookup_rmt_port1;
+		}
+	}
+	rport_ptr = NULL;
+out_lookup_rmt_port1:
+	up_read(&rt_entry->lock_lha4);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+	return rport_ptr;
+}
+
+/**
+ * ipc_router_create_rport() - Create a remote port
+ * @node_id: Node ID corresponding to the remote port.
+ * @port_id: Port ID corresponding to the remote port.
+ * @xprt_info: XPRT through which the concerned node is reachable.
+ *
+ * @return: a reference to the remote port on success, NULL on failure.
+ */
+static struct msm_ipc_router_remote_port *ipc_router_create_rport(
+				u32 node_id, u32 port_id,
+				struct msm_ipc_router_xprt_info *xprt_info)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	int key = (port_id & (RP_HASH_SIZE - 1));
+
+	rt_entry = create_routing_table_entry(node_id, xprt_info);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: Node cannot be created\n", __func__);
+		return NULL;
+	}
+
+	down_write(&rt_entry->lock_lha4);
+	list_for_each_entry(rport_ptr,
+			    &rt_entry->remote_port_list[key], list) {
+		if (rport_ptr->port_id == port_id)
+			goto out_create_rmt_port1;
+	}
+
+	rport_ptr = kmalloc(sizeof(*rport_ptr), GFP_KERNEL);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: Remote port alloc failed\n", __func__);
+		goto out_create_rmt_port2;
+	}
+	rport_ptr->port_id = port_id;
+	rport_ptr->node_id = node_id;
+	rport_ptr->status = VALID;
+	rport_ptr->sec_rule = NULL;
+	rport_ptr->server = NULL;
+	rport_ptr->tx_quota_cnt = 0;
+	kref_init(&rport_ptr->ref);
+	mutex_init(&rport_ptr->rport_lock_lhb2);
+	INIT_LIST_HEAD(&rport_ptr->resume_tx_port_list);
+	INIT_LIST_HEAD(&rport_ptr->conn_info_list);
+	list_add_tail(&rport_ptr->list,
+		      &rt_entry->remote_port_list[key]);
+out_create_rmt_port1:
+	kref_get(&rport_ptr->ref);
+out_create_rmt_port2:
+	up_write(&rt_entry->lock_lha4);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+	return rport_ptr;
+}
+
+/**
+ * msm_ipc_router_free_resume_tx_port() - Free the resume_tx ports
+ * @rport_ptr: Pointer to the remote port.
+ *
+ * This function deletes all the resume_tx ports associated with a remote port
+ * and frees the memory allocated to each resume_tx port.
+ *
+ * Must be called with rport_ptr->rport_lock_lhb2 locked.
+ */
+static void msm_ipc_router_free_resume_tx_port(
+	struct msm_ipc_router_remote_port *rport_ptr)
+{
+	struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
+
+	list_for_each_entry_safe(rtx_port, tmp_rtx_port,
+				 &rport_ptr->resume_tx_port_list, list) {
+		list_del(&rtx_port->list);
+		kfree(rtx_port);
+	}
+}
+
+/**
+ * msm_ipc_router_lookup_resume_tx_port() - Lookup resume_tx port list
+ * @rport_ptr: Remote port whose resume_tx port list needs to be looked.
+ * @port_id: Port ID which needs to be looked from the list.
+ *
+ * return 1 if the port_id is found in the list, else 0.
+ *
+ * This function is used to lookup the existence of a local port in
+ * remote port's resume_tx list. This function is used to ensure that
+ * the same port is not added to the remote_port's resume_tx list repeatedly.
+ *
+ * Must be called with rport_ptr->rport_lock_lhb2 locked.
+ */
+static int msm_ipc_router_lookup_resume_tx_port(
+	struct msm_ipc_router_remote_port *rport_ptr, u32 port_id)
+{
+	struct msm_ipc_resume_tx_port *rtx_port;
+
+	list_for_each_entry(rtx_port, &rport_ptr->resume_tx_port_list, list) {
+		if (port_id == rtx_port->port_id)
+			return 1;
+	}
+	return 0;
+}
+
+/**
+ * ipc_router_dummy_write_space() - Dummy write space available callback
+ * @sk:	Socket pointer for which the callback is called.
+ */
+void ipc_router_dummy_write_space(struct sock *sk)
+{
+}
+
+/**
+ * post_resume_tx() - Post the resume_tx event
+ * @rport_ptr: Pointer to the remote port
+ * @pkt : The data packet that is received on a resume_tx event
+ * @msg: Out of band data to be passed to kernel drivers
+ *
+ * This function informs about the reception of the resume_tx message from a
+ * remote port pointed by rport_ptr to all the local ports that are in the
+ * resume_tx_ports_list of this remote port. On posting the information, this
+ * function sequentially deletes each entry in the resume_tx_port_list of the
+ * remote port.
+ *
+ * Must be called with rport_ptr->rport_lock_lhb2 locked.
+ */
+static void post_resume_tx(struct msm_ipc_router_remote_port *rport_ptr,
+			   struct rr_packet *pkt, union rr_control_msg *msg)
+{
+	struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
+	struct msm_ipc_port *local_port;
+	struct sock *sk;
+	void (*write_space)(struct sock *sk) = NULL;
+
+	list_for_each_entry_safe(rtx_port, tmp_rtx_port,
+				 &rport_ptr->resume_tx_port_list, list) {
+		local_port = ipc_router_get_port_ref(rtx_port->port_id);
+		if (local_port && local_port->notify) {
+			wake_up(&local_port->port_tx_wait_q);
+			local_port->notify(IPC_ROUTER_CTRL_CMD_RESUME_TX, msg,
+					   sizeof(*msg), local_port->priv);
+		} else if (local_port) {
+			wake_up(&local_port->port_tx_wait_q);
+			sk = ipc_port_sk(local_port->endpoint);
+			if (sk) {
+				read_lock(&sk->sk_callback_lock);
+				write_space = sk->sk_write_space;
+				read_unlock(&sk->sk_callback_lock);
+			}
+			if (write_space &&
+			    write_space != ipc_router_dummy_write_space)
+				write_space(sk);
+			else
+				post_pkt_to_port(local_port, pkt, 1);
+		} else {
+			IPC_RTR_ERR("%s: Local Port %d not Found",
+				    __func__, rtx_port->port_id);
+		}
+		if (local_port)
+			kref_put(&local_port->ref, ipc_router_release_port);
+		list_del(&rtx_port->list);
+		kfree(rtx_port);
+	}
+}
+
+/**
+ * signal_rport_exit() - Signal the local ports of remote port exit
+ * @rport_ptr: Remote port that is exiting.
+ *
+ * This function is used to signal the local ports that are waiting
+ * to resume transmission to a remote port that is exiting.
+ */
+static void signal_rport_exit(struct msm_ipc_router_remote_port *rport_ptr)
+{
+	struct msm_ipc_resume_tx_port *rtx_port, *tmp_rtx_port;
+	struct msm_ipc_port *local_port;
+
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	rport_ptr->status = RESET;
+	list_for_each_entry_safe(rtx_port, tmp_rtx_port,
+				 &rport_ptr->resume_tx_port_list, list) {
+		local_port = ipc_router_get_port_ref(rtx_port->port_id);
+		if (local_port) {
+			wake_up(&local_port->port_tx_wait_q);
+			kref_put(&local_port->ref, ipc_router_release_port);
+		}
+		list_del(&rtx_port->list);
+		kfree(rtx_port);
+	}
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+}
+
+/**
+ * ipc_router_release_rport() - Cleanup and release the remote port
+ * @ref: Reference to the remote port.
+ *
+ * This function is called when all references to the remote port are released.
+ */
+static void ipc_router_release_rport(struct kref *ref)
+{
+	struct msm_ipc_router_remote_port *rport_ptr =
+		container_of(ref, struct msm_ipc_router_remote_port, ref);
+
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	msm_ipc_router_free_resume_tx_port(rport_ptr);
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+	kfree(rport_ptr);
+}
+
+/**
+ * ipc_router_destroy_rport() - Destroy the remote port
+ * @rport_ptr: Pointer to the remote port to be destroyed.
+ */
+static void ipc_router_destroy_rport(
+	struct msm_ipc_router_remote_port *rport_ptr)
+{
+	u32 node_id;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	if (!rport_ptr)
+		return;
+
+	node_id = rport_ptr->node_id;
+	rt_entry = ipc_router_get_rtentry_ref(node_id);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: Node %d is not up\n", __func__, node_id);
+		return;
+	}
+	down_write(&rt_entry->lock_lha4);
+	list_del(&rport_ptr->list);
+	up_write(&rt_entry->lock_lha4);
+	signal_rport_exit(rport_ptr);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+}
+
+/**
+ * msm_ipc_router_lookup_server() - Lookup server information
+ * @service: Service ID of the server info to be looked up.
+ * @instance: Instance ID of the server info to be looked up.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * @return: If found Pointer to server structure, else NULL.
+ *
+ * Note1: Lock the server_list_lock_lha2 before accessing this function.
+ * Note2: If the <node_id:port_id> are <0:0>, then the lookup is restricted
+ *        to <service:instance>. Used only when a client wants to send a
+ *        message to any QMI server.
+ */
+static struct msm_ipc_server *msm_ipc_router_lookup_server(
+				u32 service,
+				u32 instance,
+				u32 node_id,
+				u32 port_id)
+{
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+	int key = (service & (SRV_HASH_SIZE - 1));
+
+	list_for_each_entry(server, &server_list[key], list) {
+		if ((server->name.service != service) ||
+		    (server->name.instance != instance))
+			continue;
+		if ((node_id == 0) && (port_id == 0))
+			return server;
+		list_for_each_entry(server_port, &server->server_port_list,
+				    list) {
+			if ((server_port->server_addr.node_id == node_id) &&
+			    (server_port->server_addr.port_id == port_id))
+				return server;
+		}
+	}
+	return NULL;
+}
+
+/**
+ * ipc_router_get_server_ref() - Get reference to the server
+ * @svc: Service ID for which the reference is required.
+ * @ins: Instance ID for which the reference is required.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * @return: If found return reference to server, else NULL.
+ */
+static struct msm_ipc_server *ipc_router_get_server_ref(
+	u32 svc, u32 ins, u32 node_id, u32 port_id)
+{
+	struct msm_ipc_server *server;
+
+	down_read(&server_list_lock_lha2);
+	server = msm_ipc_router_lookup_server(svc, ins, node_id, port_id);
+	if (server)
+		kref_get(&server->ref);
+	up_read(&server_list_lock_lha2);
+	return server;
+}
+
+/**
+ * ipc_router_release_server() - Cleanup and release the server
+ * @ref: Reference to the server.
+ *
+ * This function is called when all references to the server are released.
+ */
+static void ipc_router_release_server(struct kref *ref)
+{
+	struct msm_ipc_server *server =
+		container_of(ref, struct msm_ipc_server, ref);
+
+	kfree(server);
+}
+
+/**
+ * msm_ipc_router_create_server() - Add server info to hash table
+ * @service: Service ID of the server info to be created.
+ * @instance: Instance ID of the server info to be created.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ * @xprt_info: XPRT through which the node hosting the server is reached.
+ *
+ * @return: Pointer to server structure on success, else NULL.
+ *
+ * This function adds the server info to the hash table. If the same
+ * server(i.e. <service_id:instance_id>) is hosted in different nodes,
+ * they are maintained as list of "server_port" under "server" structure.
+ */
+static struct msm_ipc_server *msm_ipc_router_create_server(
+					u32 service,
+					u32 instance,
+					u32 node_id,
+					u32 port_id,
+		struct msm_ipc_router_xprt_info *xprt_info)
+{
+	struct msm_ipc_server *server = NULL;
+	struct msm_ipc_server_port *server_port;
+	struct platform_device *pdev;
+	int key = (service & (SRV_HASH_SIZE - 1));
+
+	down_write(&server_list_lock_lha2);
+	server = msm_ipc_router_lookup_server(service, instance, 0, 0);
+	if (server) {
+		list_for_each_entry(server_port, &server->server_port_list,
+				    list) {
+			if ((server_port->server_addr.node_id == node_id) &&
+			    (server_port->server_addr.port_id == port_id))
+				goto return_server;
+		}
+		goto create_srv_port;
+	}
+
+	server = kzalloc(sizeof(*server), GFP_KERNEL);
+	if (!server) {
+		up_write(&server_list_lock_lha2);
+		IPC_RTR_ERR("%s: Server allocation failed\n", __func__);
+		return NULL;
+	}
+	server->name.service = service;
+	server->name.instance = instance;
+	server->synced_sec_rule = 0;
+	INIT_LIST_HEAD(&server->server_port_list);
+	kref_init(&server->ref);
+	list_add_tail(&server->list, &server_list[key]);
+	scnprintf(server->pdev_name, sizeof(server->pdev_name),
+		  "SVC%08x:%08x", service, instance);
+	server->next_pdev_id = 1;
+
+create_srv_port:
+	server_port = kzalloc(sizeof(*server_port), GFP_KERNEL);
+	pdev = platform_device_alloc(server->pdev_name, server->next_pdev_id);
+	if (!server_port || !pdev) {
+		kfree(server_port);
+		if (pdev)
+			platform_device_put(pdev);
+		if (list_empty(&server->server_port_list)) {
+			list_del(&server->list);
+			kfree(server);
+		}
+		up_write(&server_list_lock_lha2);
+		IPC_RTR_ERR("%s: Server Port allocation failed\n", __func__);
+		return NULL;
+	}
+	server_port->pdev = pdev;
+	server_port->server_addr.node_id = node_id;
+	server_port->server_addr.port_id = port_id;
+	server_port->xprt_info = xprt_info;
+	list_add_tail(&server_port->list, &server->server_port_list);
+	server->next_pdev_id++;
+	platform_device_add(server_port->pdev);
+
+return_server:
+	/* Add a reference so that the caller can put it back */
+	kref_get(&server->ref);
+	up_write(&server_list_lock_lha2);
+	return server;
+}
+
+/**
+ * ipc_router_destroy_server_nolock() - Remove server info from hash table
+ * @server: Server info to be removed.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * This function removes the server_port identified using <node_id:port_id>
+ * from the server structure. If the server_port list under server structure
+ * is empty after removal, then remove the server structure from the server
+ * hash table. This function must be called with server_list_lock_lha2 locked.
+ */
+static void ipc_router_destroy_server_nolock(struct msm_ipc_server *server,
+					     u32 node_id, u32 port_id)
+{
+	struct msm_ipc_server_port *server_port;
+	bool server_port_found = false;
+
+	if (!server)
+		return;
+
+	list_for_each_entry(server_port, &server->server_port_list, list) {
+		if ((server_port->server_addr.node_id == node_id) &&
+		    (server_port->server_addr.port_id == port_id)) {
+			server_port_found = true;
+			break;
+		}
+	}
+	if (server_port_found && server_port) {
+		platform_device_unregister(server_port->pdev);
+		list_del(&server_port->list);
+		kfree(server_port);
+	}
+	if (list_empty(&server->server_port_list)) {
+		list_del(&server->list);
+		kref_put(&server->ref, ipc_router_release_server);
+	}
+}
+
+/**
+ * ipc_router_destroy_server() - Remove server info from hash table
+ * @server: Server info to be removed.
+ * @node_id: Node/Processor ID in which the server is hosted.
+ * @port_id: Port ID within the node in which the server is hosted.
+ *
+ * This function removes the server_port identified using <node_id:port_id>
+ * from the server structure. If the server_port list under server structure
+ * is empty after removal, then remove the server structure from the server
+ * hash table.
+ */
+static void ipc_router_destroy_server(struct msm_ipc_server *server,
+				      u32 node_id, u32 port_id)
+{
+	down_write(&server_list_lock_lha2);
+	ipc_router_destroy_server_nolock(server, node_id, port_id);
+	up_write(&server_list_lock_lha2);
+}
+
+static int ipc_router_send_ctl_msg(
+		struct msm_ipc_router_xprt_info *xprt_info,
+		union rr_control_msg *msg,
+		u32 dst_node_id)
+{
+	struct rr_packet *pkt;
+	struct sk_buff *ipc_rtr_pkt;
+	struct rr_header_v1 *hdr;
+	int pkt_size;
+	void *data;
+	int ret = -EINVAL;
+
+	pkt = create_pkt(NULL);
+	if (!pkt) {
+		IPC_RTR_ERR("%s: pkt alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	pkt_size = IPC_ROUTER_HDR_SIZE + sizeof(*msg);
+	ipc_rtr_pkt = alloc_skb(pkt_size, GFP_KERNEL);
+	if (!ipc_rtr_pkt) {
+		IPC_RTR_ERR("%s: ipc_rtr_pkt alloc failed\n", __func__);
+		release_pkt(pkt);
+		return -ENOMEM;
+	}
+
+	skb_reserve(ipc_rtr_pkt, IPC_ROUTER_HDR_SIZE);
+	data = skb_put(ipc_rtr_pkt, sizeof(*msg));
+	memcpy(data, msg, sizeof(*msg));
+	skb_queue_tail(pkt->pkt_fragment_q, ipc_rtr_pkt);
+	pkt->length = sizeof(*msg);
+
+	hdr = &pkt->hdr;
+	hdr->version = IPC_ROUTER_V1;
+	hdr->type = msg->cmd;
+	hdr->src_node_id = IPC_ROUTER_NID_LOCAL;
+	hdr->src_port_id = IPC_ROUTER_ADDRESS;
+	hdr->control_flag = 0;
+	hdr->size = sizeof(*msg);
+	if (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX ||
+	    (!xprt_info && dst_node_id == IPC_ROUTER_NID_LOCAL))
+		hdr->dst_node_id = dst_node_id;
+	else if (xprt_info)
+		hdr->dst_node_id = xprt_info->remote_node_id;
+	hdr->dst_port_id = IPC_ROUTER_ADDRESS;
+
+	if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
+	    msg->cmd != IPC_ROUTER_CTRL_CMD_RESUME_TX) {
+		ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
+				   hdr, NULL, NULL);
+		ret = post_control_ports(pkt);
+	} else if (dst_node_id == IPC_ROUTER_NID_LOCAL &&
+		   msg->cmd == IPC_ROUTER_CTRL_CMD_RESUME_TX) {
+		ipc_router_log_msg(local_log_ctx, IPC_ROUTER_LOG_EVENT_TX, msg,
+				   hdr, NULL, NULL);
+		ret = process_resume_tx_msg(msg, pkt);
+	} else if (xprt_info && (msg->cmd == IPC_ROUTER_CTRL_CMD_HELLO ||
+		   xprt_info->initialized)) {
+		mutex_lock(&xprt_info->tx_lock_lhb2);
+		ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_TX,
+				   msg, hdr, NULL, NULL);
+		ret = prepend_header(pkt, xprt_info);
+		if (ret < 0) {
+			mutex_unlock(&xprt_info->tx_lock_lhb2);
+			IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
+			release_pkt(pkt);
+			return ret;
+		}
+
+		ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
+		mutex_unlock(&xprt_info->tx_lock_lhb2);
+	}
+
+	release_pkt(pkt);
+	return ret;
+}
+
+static int
+msm_ipc_router_send_server_list(u32 node_id,
+				struct msm_ipc_router_xprt_info *xprt_info)
+{
+	union rr_control_msg ctl;
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+	int i;
+
+	if (!xprt_info || !xprt_info->initialized) {
+		IPC_RTR_ERR("%s: Xprt info not initialized\n", __func__);
+		return -EINVAL;
+	}
+
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
+
+	for (i = 0; i < SRV_HASH_SIZE; i++) {
+		list_for_each_entry(server, &server_list[i], list) {
+			ctl.srv.service = server->name.service;
+			ctl.srv.instance = server->name.instance;
+			list_for_each_entry(server_port,
+					    &server->server_port_list, list) {
+				if (server_port->server_addr.node_id !=
+				    node_id)
+					continue;
+
+				ctl.srv.node_id =
+					server_port->server_addr.node_id;
+				ctl.srv.port_id =
+					server_port->server_addr.port_id;
+				ipc_router_send_ctl_msg
+						(xprt_info, &ctl,
+						 IPC_ROUTER_DUMMY_DEST_NODE);
+			}
+		}
+	}
+
+	return 0;
+}
+
+static int broadcast_ctl_msg_locally(union rr_control_msg *msg)
+{
+	return ipc_router_send_ctl_msg(NULL, msg, IPC_ROUTER_NID_LOCAL);
+}
+
+static int broadcast_ctl_msg(union rr_control_msg *ctl)
+{
+	struct msm_ipc_router_xprt_info *xprt_info;
+
+	down_read(&xprt_info_list_lock_lha5);
+	list_for_each_entry(xprt_info, &xprt_info_list, list) {
+		ipc_router_send_ctl_msg(xprt_info, ctl,
+					IPC_ROUTER_DUMMY_DEST_NODE);
+	}
+	up_read(&xprt_info_list_lock_lha5);
+	broadcast_ctl_msg_locally(ctl);
+
+	return 0;
+}
+
+static int relay_ctl_msg(struct msm_ipc_router_xprt_info *xprt_info,
+			 union rr_control_msg *ctl)
+{
+	struct msm_ipc_router_xprt_info *fwd_xprt_info;
+
+	if (!xprt_info || !ctl)
+		return -EINVAL;
+
+	down_read(&xprt_info_list_lock_lha5);
+	list_for_each_entry(fwd_xprt_info, &xprt_info_list, list) {
+		if (xprt_info->xprt->link_id != fwd_xprt_info->xprt->link_id)
+			ipc_router_send_ctl_msg(fwd_xprt_info, ctl,
+						IPC_ROUTER_DUMMY_DEST_NODE);
+	}
+	up_read(&xprt_info_list_lock_lha5);
+
+	return 0;
+}
+
+static int forward_msg(struct msm_ipc_router_xprt_info *xprt_info,
+		       struct rr_packet *pkt)
+{
+	struct rr_header_v1 *hdr;
+	struct msm_ipc_router_xprt_info *fwd_xprt_info;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	int ret = 0;
+	int fwd_xprt_option;
+
+	if (!xprt_info || !pkt)
+		return -EINVAL;
+
+	hdr = &pkt->hdr;
+	rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
+	if (!(rt_entry) || !(rt_entry->xprt_info)) {
+		IPC_RTR_ERR("%s: Routing table not initialized\n", __func__);
+		ret = -ENODEV;
+		goto fm_error1;
+	}
+
+	down_read(&rt_entry->lock_lha4);
+	fwd_xprt_info = rt_entry->xprt_info;
+	ret = ipc_router_get_xprt_info_ref(fwd_xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
+		goto fm_error_xprt;
+	}
+	ret = prepend_header(pkt, fwd_xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
+		goto fm_error2;
+	}
+	fwd_xprt_option = fwd_xprt_info->xprt->get_option(fwd_xprt_info->xprt);
+	if (!(fwd_xprt_option & FRAG_PKT_WRITE_ENABLE)) {
+		ret = defragment_pkt(pkt);
+		if (ret < 0)
+			goto fm_error2;
+	}
+
+	mutex_lock(&fwd_xprt_info->tx_lock_lhb2);
+	if (xprt_info->remote_node_id == fwd_xprt_info->remote_node_id) {
+		IPC_RTR_ERR("%s: Discarding Command to route back\n", __func__);
+		ret = -EINVAL;
+		goto fm_error3;
+	}
+
+	if (xprt_info->xprt->link_id == fwd_xprt_info->xprt->link_id) {
+		IPC_RTR_ERR("%s: DST in the same cluster\n", __func__);
+		ret = 0;
+		goto fm_error3;
+	}
+	fwd_xprt_info->xprt->write(pkt, pkt->length, fwd_xprt_info->xprt);
+	IPC_RTR_INFO(fwd_xprt_info->log_ctx,
+		     "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
+		     "FWD", "TX", hdr->size, hdr->type, hdr->control_flag,
+		     hdr->src_node_id, hdr->src_port_id,
+		     hdr->dst_node_id, hdr->dst_port_id);
+
+fm_error3:
+	mutex_unlock(&fwd_xprt_info->tx_lock_lhb2);
+fm_error2:
+	ipc_router_put_xprt_info_ref(fwd_xprt_info);
+fm_error_xprt:
+	up_read(&rt_entry->lock_lha4);
+fm_error1:
+	if (rt_entry)
+		kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+	return ret;
+}
+
+static int msm_ipc_router_send_remove_client(struct comm_mode_info *mode_info,
+					     u32 node_id, u32 port_id)
+{
+	union rr_control_msg msg;
+	struct msm_ipc_router_xprt_info *tmp_xprt_info;
+	int mode;
+	void *xprt_info;
+	int rc = 0;
+
+	if (!mode_info) {
+		IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
+		return -EINVAL;
+	}
+	mode = mode_info->mode;
+	xprt_info = mode_info->xprt_info;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
+	msg.cli.node_id = node_id;
+	msg.cli.port_id = port_id;
+
+	if ((mode == SINGLE_LINK_MODE) && xprt_info) {
+		down_read(&xprt_info_list_lock_lha5);
+		list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
+			if (tmp_xprt_info != xprt_info)
+				continue;
+			ipc_router_send_ctl_msg(tmp_xprt_info, &msg,
+						IPC_ROUTER_DUMMY_DEST_NODE);
+			break;
+		}
+		up_read(&xprt_info_list_lock_lha5);
+	} else if ((mode == SINGLE_LINK_MODE) && !xprt_info) {
+		broadcast_ctl_msg_locally(&msg);
+	} else if (mode == MULTI_LINK_MODE) {
+		broadcast_ctl_msg(&msg);
+	} else if (mode != NULL_MODE) {
+		IPC_RTR_ERR(
+		"%s: Invalid mode(%d) + xprt_inf(%p) for %08x:%08x\n",
+			__func__, mode, xprt_info, node_id, port_id);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static void update_comm_mode_info(struct comm_mode_info *mode_info,
+				  struct msm_ipc_router_xprt_info *xprt_info)
+{
+	if (!mode_info) {
+		IPC_RTR_ERR("%s: NULL mode_info\n", __func__);
+		return;
+	}
+
+	if (mode_info->mode == NULL_MODE) {
+		mode_info->xprt_info = xprt_info;
+		mode_info->mode = SINGLE_LINK_MODE;
+	} else if (mode_info->mode == SINGLE_LINK_MODE &&
+		   mode_info->xprt_info != xprt_info) {
+		mode_info->mode = MULTI_LINK_MODE;
+	}
+}
+
+/**
+ * cleanup_rmt_server() - Cleanup server hosted in the remote port
+ * @xprt_info: XPRT through which this cleanup event is handled.
+ * @rport_ptr: Remote port that is being cleaned up.
+ * @server: Server that is hosted in the remote port.
+ */
+static void cleanup_rmt_server(struct msm_ipc_router_xprt_info *xprt_info,
+			       struct msm_ipc_router_remote_port *rport_ptr,
+			       struct msm_ipc_server *server)
+{
+	union rr_control_msg ctl;
+
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
+	ctl.srv.service = server->name.service;
+	ctl.srv.instance = server->name.instance;
+	ctl.srv.node_id = rport_ptr->node_id;
+	ctl.srv.port_id = rport_ptr->port_id;
+	if (xprt_info)
+		relay_ctl_msg(xprt_info, &ctl);
+	broadcast_ctl_msg_locally(&ctl);
+	ipc_router_destroy_server_nolock(server, rport_ptr->node_id,
+					 rport_ptr->port_id);
+}
+
+static void cleanup_rmt_ports(struct msm_ipc_router_xprt_info *xprt_info,
+			      struct msm_ipc_routing_table_entry *rt_entry)
+{
+	struct msm_ipc_router_remote_port *rport_ptr, *tmp_rport_ptr;
+	struct msm_ipc_server *server;
+	union rr_control_msg ctl;
+	int j;
+
+	memset(&ctl, 0, sizeof(ctl));
+	for (j = 0; j < RP_HASH_SIZE; j++) {
+		list_for_each_entry_safe(rport_ptr, tmp_rport_ptr,
+					 &rt_entry->remote_port_list[j], list) {
+			list_del(&rport_ptr->list);
+			mutex_lock(&rport_ptr->rport_lock_lhb2);
+			server = rport_ptr->server;
+			rport_ptr->server = NULL;
+			mutex_unlock(&rport_ptr->rport_lock_lhb2);
+			ipc_router_reset_conn(rport_ptr);
+			if (server) {
+				cleanup_rmt_server(xprt_info, rport_ptr,
+						   server);
+				server = NULL;
+			}
+
+			ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT;
+			ctl.cli.node_id = rport_ptr->node_id;
+			ctl.cli.port_id = rport_ptr->port_id;
+			kref_put(&rport_ptr->ref, ipc_router_release_rport);
+
+			relay_ctl_msg(xprt_info, &ctl);
+			broadcast_ctl_msg_locally(&ctl);
+		}
+	}
+}
+
+static void msm_ipc_cleanup_routing_table(
+	struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int i;
+	struct msm_ipc_routing_table_entry *rt_entry, *tmp_rt_entry;
+
+	if (!xprt_info) {
+		IPC_RTR_ERR("%s: Invalid xprt_info\n", __func__);
+		return;
+	}
+
+	down_write(&server_list_lock_lha2);
+	down_write(&routing_table_lock_lha3);
+	for (i = 0; i < RT_HASH_SIZE; i++) {
+		list_for_each_entry_safe(rt_entry, tmp_rt_entry,
+					 &routing_table[i], list) {
+			down_write(&rt_entry->lock_lha4);
+			if (rt_entry->xprt_info != xprt_info) {
+				up_write(&rt_entry->lock_lha4);
+				continue;
+			}
+			cleanup_rmt_ports(xprt_info, rt_entry);
+			rt_entry->xprt_info = NULL;
+			up_write(&rt_entry->lock_lha4);
+			list_del(&rt_entry->list);
+			kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+		}
+	}
+	up_write(&routing_table_lock_lha3);
+	up_write(&server_list_lock_lha2);
+}
+
+/**
+ * sync_sec_rule() - Synchrnoize the security rule into the server structure
+ * @server: Server structure where the rule has to be synchronized.
+ * @rule: Security tule to be synchronized.
+ *
+ * This function is used to update the server structure with the security
+ * rule configured for the <service:instance> corresponding to that server.
+ */
+static void sync_sec_rule(struct msm_ipc_server *server, void *rule)
+{
+	struct msm_ipc_server_port *server_port;
+	struct msm_ipc_router_remote_port *rport_ptr = NULL;
+
+	list_for_each_entry(server_port, &server->server_port_list, list) {
+		rport_ptr = ipc_router_get_rport_ref(
+				server_port->server_addr.node_id,
+				server_port->server_addr.port_id);
+		if (!rport_ptr)
+			continue;
+		rport_ptr->sec_rule = rule;
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	}
+	server->synced_sec_rule = 1;
+}
+
+/**
+ * msm_ipc_sync_sec_rule() - Sync the security rule to the service
+ * @service: Service for which the rule has to be synchronized.
+ * @instance: Instance for which the rule has to be synchronized.
+ * @rule: Security rule to be synchronized.
+ *
+ * This function is used to syncrhonize the security rule with the server
+ * hash table, if the user-space script configures the rule after the service
+ * has come up. This function is used to synchronize the security rule to a
+ * specific service and optionally a specific instance.
+ */
+void msm_ipc_sync_sec_rule(u32 service, u32 instance, void *rule)
+{
+	int key = (service & (SRV_HASH_SIZE - 1));
+	struct msm_ipc_server *server;
+
+	down_write(&server_list_lock_lha2);
+	list_for_each_entry(server, &server_list[key], list) {
+		if (server->name.service != service)
+			continue;
+
+		if (server->name.instance != instance &&
+		    instance != ALL_INSTANCE)
+			continue;
+
+		/* If the rule applies to all instances and if the specific
+		 * instance of a service has a rule synchronized already,
+		 * do not apply the rule for that specific instance.
+		 */
+		if (instance == ALL_INSTANCE && server->synced_sec_rule)
+			continue;
+
+		sync_sec_rule(server, rule);
+	}
+	up_write(&server_list_lock_lha2);
+}
+
+/**
+ * msm_ipc_sync_default_sec_rule() - Default security rule to all services
+ * @rule: Security rule to be synchronized.
+ *
+ * This function is used to syncrhonize the security rule with the server
+ * hash table, if the user-space script configures the rule after the service
+ * has come up. This function is used to synchronize the security rule that
+ * applies to all services, if the concerned service do not have any rule
+ * defined.
+ */
+void msm_ipc_sync_default_sec_rule(void *rule)
+{
+	int key;
+	struct msm_ipc_server *server;
+
+	down_write(&server_list_lock_lha2);
+	for (key = 0; key < SRV_HASH_SIZE; key++) {
+		list_for_each_entry(server, &server_list[key], list) {
+			if (server->synced_sec_rule)
+				continue;
+
+			sync_sec_rule(server, rule);
+		}
+	}
+	up_write(&server_list_lock_lha2);
+}
+
+/**
+ * ipc_router_reset_conn() - Reset the connection to remote port
+ * @rport_ptr: Pointer to the remote port to be disconnected.
+ *
+ * This function is used to reset all the local ports that are connected to
+ * the remote port being passed.
+ */
+static void ipc_router_reset_conn(struct msm_ipc_router_remote_port *rport_ptr)
+{
+	struct msm_ipc_port *port_ptr;
+	struct ipc_router_conn_info *conn_info, *tmp_conn_info;
+
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	list_for_each_entry_safe(conn_info, tmp_conn_info,
+				 &rport_ptr->conn_info_list, list) {
+		port_ptr = ipc_router_get_port_ref(conn_info->port_id);
+		if (port_ptr) {
+			mutex_lock(&port_ptr->port_lock_lhc3);
+			port_ptr->conn_status = CONNECTION_RESET;
+			mutex_unlock(&port_ptr->port_lock_lhc3);
+			wake_up(&port_ptr->port_rx_wait_q);
+			kref_put(&port_ptr->ref, ipc_router_release_port);
+		}
+
+		list_del(&conn_info->list);
+		kfree(conn_info);
+	}
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+}
+
+/**
+ * ipc_router_set_conn() - Set the connection by initializing dest address
+ * @port_ptr: Local port in which the connection has to be set.
+ * @addr: Destination address of the connection.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
+			struct msm_ipc_addr *addr)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct ipc_router_conn_info *conn_info;
+
+	if (unlikely(!port_ptr || !addr))
+		return -EINVAL;
+
+	if (addr->addrtype != MSM_IPC_ADDR_ID) {
+		IPC_RTR_ERR("%s: Invalid Address type\n", __func__);
+		return -EINVAL;
+	}
+
+	if (port_ptr->type == SERVER_PORT) {
+		IPC_RTR_ERR("%s: Connection refused on a server port\n",
+			    __func__);
+		return -ECONNREFUSED;
+	}
+
+	if (port_ptr->conn_status == CONNECTED) {
+		IPC_RTR_ERR("%s: Port %08x already connected\n",
+			    __func__, port_ptr->this_port.port_id);
+		return -EISCONN;
+	}
+
+	conn_info = kzalloc(sizeof(*conn_info), GFP_KERNEL);
+	if (!conn_info) {
+		IPC_RTR_ERR("%s: Error allocating conn_info\n", __func__);
+		return -ENOMEM;
+	}
+	INIT_LIST_HEAD(&conn_info->list);
+	conn_info->port_id = port_ptr->this_port.port_id;
+
+	rport_ptr = ipc_router_get_rport_ref(addr->addr.port_addr.node_id,
+					     addr->addr.port_addr.port_id);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: Invalid remote endpoint\n", __func__);
+		kfree(conn_info);
+		return -ENODEV;
+	}
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	list_add_tail(&conn_info->list, &rport_ptr->conn_info_list);
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	memcpy(&port_ptr->dest_addr, &addr->addr.port_addr,
+	       sizeof(struct msm_ipc_port_addr));
+	port_ptr->conn_status = CONNECTED;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	return 0;
+}
+
+/**
+ * do_version_negotiation() - perform a version negotiation and set the version
+ * @xprt_info:	Pointer to the IPC Router transport info structure.
+ * @msg:	Pointer to the IPC Router HELLO message.
+ *
+ * This function performs the version negotiation by verifying the computed
+ * checksum first. If the checksum matches with the magic number, it sets the
+ * negotiated IPC Router version in transport.
+ */
+static void do_version_negotiation(struct msm_ipc_router_xprt_info *xprt_info,
+				   union rr_control_msg *msg)
+{
+	u32 magic;
+	unsigned int version;
+
+	if (!xprt_info)
+		return;
+	magic = ipc_router_calc_checksum(msg);
+	if (magic == IPC_ROUTER_HELLO_MAGIC) {
+		version = fls(msg->hello.versions & IPC_ROUTER_VER_BITMASK) - 1;
+		/*Bit 0 & 31 are reserved for future usage*/
+		if ((version > 0) &&
+		    (version != (sizeof(version) * BITS_PER_BYTE - 1)) &&
+			xprt_info->xprt->set_version)
+			xprt_info->xprt->set_version(xprt_info->xprt, version);
+	}
+}
+
+static int process_hello_msg(struct msm_ipc_router_xprt_info *xprt_info,
+			     union rr_control_msg *msg,
+			     struct rr_header_v1 *hdr)
+{
+	int i, rc = 0;
+	union rr_control_msg ctl;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	if (!hdr)
+		return -EINVAL;
+
+	xprt_info->remote_node_id = hdr->src_node_id;
+	rt_entry = create_routing_table_entry(hdr->src_node_id, xprt_info);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: rt_entry allocation failed\n", __func__);
+		return -ENOMEM;
+	}
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+	do_version_negotiation(xprt_info, msg);
+	/* Send a reply HELLO message */
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.hello.cmd = IPC_ROUTER_CTRL_CMD_HELLO;
+	ctl.hello.checksum = IPC_ROUTER_HELLO_MAGIC;
+	ctl.hello.versions = (u32)IPC_ROUTER_VER_BITMASK;
+	ctl.hello.checksum = ipc_router_calc_checksum(&ctl);
+	rc = ipc_router_send_ctl_msg(xprt_info, &ctl,
+				     IPC_ROUTER_DUMMY_DEST_NODE);
+	if (rc < 0) {
+		IPC_RTR_ERR("%s: Error sending reply HELLO message\n",
+			    __func__);
+		return rc;
+	}
+	xprt_info->initialized = 1;
+
+	/* Send list of servers from the local node and from nodes
+	 * outside the mesh network in which this XPRT is part of.
+	 */
+	down_read(&server_list_lock_lha2);
+	down_read(&routing_table_lock_lha3);
+	for (i = 0; i < RT_HASH_SIZE; i++) {
+		list_for_each_entry(rt_entry, &routing_table[i], list) {
+			if ((rt_entry->node_id != IPC_ROUTER_NID_LOCAL) &&
+			    (!rt_entry->xprt_info ||
+			     (rt_entry->xprt_info->xprt->link_id ==
+			      xprt_info->xprt->link_id)))
+				continue;
+			rc = msm_ipc_router_send_server_list(rt_entry->node_id,
+							     xprt_info);
+			if (rc < 0) {
+				up_read(&routing_table_lock_lha3);
+				up_read(&server_list_lock_lha2);
+				return rc;
+			}
+		}
+	}
+	up_read(&routing_table_lock_lha3);
+	up_read(&server_list_lock_lha2);
+	return rc;
+}
+
+static int process_resume_tx_msg(union rr_control_msg *msg,
+				 struct rr_packet *pkt)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
+					     msg->cli.port_id);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: Unable to resume client\n", __func__);
+		return -ENODEV;
+	}
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	rport_ptr->tx_quota_cnt = 0;
+	post_resume_tx(rport_ptr, pkt, msg);
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	return 0;
+}
+
+static int process_new_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
+				  union rr_control_msg *msg,
+				  struct rr_packet *pkt)
+{
+	struct msm_ipc_routing_table_entry *rt_entry;
+	struct msm_ipc_server *server;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	if (msg->srv.instance == 0) {
+		IPC_RTR_ERR("%s: Server %08x create rejected, version = 0\n",
+			    __func__, msg->srv.service);
+		return -EINVAL;
+	}
+
+	rt_entry = ipc_router_get_rtentry_ref(msg->srv.node_id);
+	if (!rt_entry) {
+		rt_entry = create_routing_table_entry(msg->srv.node_id,
+						      xprt_info);
+		if (!rt_entry) {
+			IPC_RTR_ERR("%s: rt_entry allocation failed\n",
+				    __func__);
+			return -ENOMEM;
+		}
+	}
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+	/* If the service already exists in the table, create_server returns
+	 * a reference to it.
+	 */
+	rport_ptr = ipc_router_create_rport(msg->srv.node_id,
+					    msg->srv.port_id, xprt_info);
+	if (!rport_ptr)
+		return -ENOMEM;
+
+	server = msm_ipc_router_create_server(
+			msg->srv.service, msg->srv.instance,
+			msg->srv.node_id, msg->srv.port_id, xprt_info);
+	if (!server) {
+		IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
+			    __func__, msg->srv.service, msg->srv.instance);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		ipc_router_destroy_rport(rport_ptr);
+		return -ENOMEM;
+	}
+	mutex_lock(&rport_ptr->rport_lock_lhb2);
+	rport_ptr->server = server;
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+	rport_ptr->sec_rule = msm_ipc_get_security_rule(
+					msg->srv.service, msg->srv.instance);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	kref_put(&server->ref, ipc_router_release_server);
+
+	/* Relay the new server message to other subsystems that do not belong
+	 * to the cluster from which this message is received. Notify the
+	 * local clients waiting for this service.
+	 */
+	relay_ctl_msg(xprt_info, msg);
+	post_control_ports(pkt);
+	return 0;
+}
+
+static int process_rmv_server_msg(struct msm_ipc_router_xprt_info *xprt_info,
+				  union rr_control_msg *msg,
+				  struct rr_packet *pkt)
+{
+	struct msm_ipc_server *server;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	server = ipc_router_get_server_ref(msg->srv.service, msg->srv.instance,
+					   msg->srv.node_id, msg->srv.port_id);
+	rport_ptr = ipc_router_get_rport_ref(msg->srv.node_id,
+					     msg->srv.port_id);
+	if (rport_ptr) {
+		mutex_lock(&rport_ptr->rport_lock_lhb2);
+		if (rport_ptr->server == server)
+			rport_ptr->server = NULL;
+		mutex_unlock(&rport_ptr->rport_lock_lhb2);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	}
+
+	if (server) {
+		kref_put(&server->ref, ipc_router_release_server);
+		ipc_router_destroy_server(server, msg->srv.node_id,
+					  msg->srv.port_id);
+		/* Relay the new server message to other subsystems that do not
+		 * belong to the cluster from which this message is received.
+		 * Notify the local clients communicating with the service.
+		 */
+		relay_ctl_msg(xprt_info, msg);
+		post_control_ports(pkt);
+	}
+	return 0;
+}
+
+static int process_rmv_client_msg(struct msm_ipc_router_xprt_info *xprt_info,
+				  union rr_control_msg *msg,
+				  struct rr_packet *pkt)
+{
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct msm_ipc_server *server;
+
+	rport_ptr = ipc_router_get_rport_ref(msg->cli.node_id,
+					     msg->cli.port_id);
+	if (rport_ptr) {
+		mutex_lock(&rport_ptr->rport_lock_lhb2);
+		server = rport_ptr->server;
+		rport_ptr->server = NULL;
+		mutex_unlock(&rport_ptr->rport_lock_lhb2);
+		ipc_router_reset_conn(rport_ptr);
+		down_write(&server_list_lock_lha2);
+		if (server)
+			cleanup_rmt_server(NULL, rport_ptr, server);
+		up_write(&server_list_lock_lha2);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		ipc_router_destroy_rport(rport_ptr);
+	}
+
+	relay_ctl_msg(xprt_info, msg);
+	post_control_ports(pkt);
+	return 0;
+}
+
+static int process_control_msg(struct msm_ipc_router_xprt_info *xprt_info,
+			       struct rr_packet *pkt)
+{
+	union rr_control_msg *msg;
+	int rc = 0;
+	struct rr_header_v1 *hdr;
+
+	if (pkt->length != sizeof(*msg)) {
+		IPC_RTR_ERR("%s: r2r msg size %d != %zu\n", __func__,
+			    pkt->length, sizeof(*msg));
+		return -EINVAL;
+	}
+
+	hdr = &pkt->hdr;
+	msg = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, sizeof(*msg));
+	if (!msg) {
+		IPC_RTR_ERR("%s: Error extracting control msg\n", __func__);
+		return -ENOMEM;
+	}
+
+	ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX, msg,
+			   hdr, NULL, NULL);
+
+	switch (msg->cmd) {
+	case IPC_ROUTER_CTRL_CMD_HELLO:
+		rc = process_hello_msg(xprt_info, msg, hdr);
+		break;
+	case IPC_ROUTER_CTRL_CMD_RESUME_TX:
+		rc = process_resume_tx_msg(msg, pkt);
+		break;
+	case IPC_ROUTER_CTRL_CMD_NEW_SERVER:
+		rc = process_new_server_msg(xprt_info, msg, pkt);
+		break;
+	case IPC_ROUTER_CTRL_CMD_REMOVE_SERVER:
+		rc = process_rmv_server_msg(xprt_info, msg, pkt);
+		break;
+	case IPC_ROUTER_CTRL_CMD_REMOVE_CLIENT:
+		rc = process_rmv_client_msg(xprt_info, msg, pkt);
+		break;
+	default:
+		rc = -EINVAL;
+	}
+	kfree(msg);
+	return rc;
+}
+
+static void do_read_data(struct work_struct *work)
+{
+	struct rr_header_v1 *hdr;
+	struct rr_packet *pkt = NULL;
+	struct msm_ipc_port *port_ptr;
+	struct msm_ipc_router_remote_port *rport_ptr;
+	int ret;
+
+	struct msm_ipc_router_xprt_info *xprt_info =
+		container_of(work,
+			     struct msm_ipc_router_xprt_info,
+			     read_data);
+
+	while ((pkt = rr_read(xprt_info)) != NULL) {
+		if (pkt->length < calc_rx_header_size(xprt_info) ||
+		    pkt->length > MAX_IPC_PKT_SIZE) {
+			IPC_RTR_ERR("%s: Invalid pkt length %d\n", __func__,
+				    pkt->length);
+			goto read_next_pkt1;
+		}
+
+		ret = extract_header(pkt);
+		if (ret < 0)
+			goto read_next_pkt1;
+		hdr = &pkt->hdr;
+
+		if ((hdr->dst_node_id != IPC_ROUTER_NID_LOCAL) &&
+		    ((hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX) ||
+		     (hdr->type == IPC_ROUTER_CTRL_CMD_DATA))) {
+			IPC_RTR_INFO(xprt_info->log_ctx,
+				     "%s %s Len:0x%x T:0x%x CF:0x%x SRC:<0x%x:0x%x> DST:<0x%x:0x%x>\n",
+				     "FWD", "RX", hdr->size, hdr->type,
+				     hdr->control_flag, hdr->src_node_id,
+				     hdr->src_port_id, hdr->dst_node_id,
+				     hdr->dst_port_id);
+			forward_msg(xprt_info, pkt);
+			goto read_next_pkt1;
+		}
+
+		if (hdr->type != IPC_ROUTER_CTRL_CMD_DATA) {
+			process_control_msg(xprt_info, pkt);
+			goto read_next_pkt1;
+		}
+
+		port_ptr = ipc_router_get_port_ref(hdr->dst_port_id);
+		if (!port_ptr) {
+			IPC_RTR_ERR("%s: No local port id %08x\n", __func__,
+				    hdr->dst_port_id);
+			goto read_next_pkt1;
+		}
+
+		rport_ptr = ipc_router_get_rport_ref(hdr->src_node_id,
+						     hdr->src_port_id);
+		if (!rport_ptr) {
+			rport_ptr = ipc_router_create_rport(hdr->src_node_id,
+							    hdr->src_port_id,
+							    xprt_info);
+			if (!rport_ptr) {
+				IPC_RTR_ERR(
+					"%s: Rmt Prt %08x:%08x create failed\n",
+					__func__, hdr->src_node_id,
+					hdr->src_port_id);
+				goto read_next_pkt2;
+			}
+		}
+
+		ipc_router_log_msg(xprt_info->log_ctx, IPC_ROUTER_LOG_EVENT_RX,
+				   pkt, hdr, port_ptr, rport_ptr);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		post_pkt_to_port(port_ptr, pkt, 0);
+		kref_put(&port_ptr->ref, ipc_router_release_port);
+		continue;
+read_next_pkt2:
+		kref_put(&port_ptr->ref, ipc_router_release_port);
+read_next_pkt1:
+		release_pkt(pkt);
+	}
+}
+
+int msm_ipc_router_register_server(struct msm_ipc_port *port_ptr,
+				   struct msm_ipc_addr *name)
+{
+	struct msm_ipc_server *server;
+	union rr_control_msg ctl;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	if (!port_ptr || !name)
+		return -EINVAL;
+
+	if (name->addrtype != MSM_IPC_ADDR_NAME)
+		return -EINVAL;
+
+	rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
+					    port_ptr->this_port.port_id, NULL);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: RPort %08x:%08x creation failed\n", __func__,
+			    IPC_ROUTER_NID_LOCAL, port_ptr->this_port.port_id);
+		return -ENOMEM;
+	}
+
+	server = msm_ipc_router_create_server(name->addr.port_name.service,
+					      name->addr.port_name.instance,
+					      IPC_ROUTER_NID_LOCAL,
+					      port_ptr->this_port.port_id,
+					      NULL);
+	if (!server) {
+		IPC_RTR_ERR("%s: Server %08x:%08x Create failed\n",
+			    __func__, name->addr.port_name.service,
+			    name->addr.port_name.instance);
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		ipc_router_destroy_rport(rport_ptr);
+		return -ENOMEM;
+	}
+
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.cmd = IPC_ROUTER_CTRL_CMD_NEW_SERVER;
+	ctl.srv.service = server->name.service;
+	ctl.srv.instance = server->name.instance;
+	ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
+	ctl.srv.port_id = port_ptr->this_port.port_id;
+	broadcast_ctl_msg(&ctl);
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	port_ptr->type = SERVER_PORT;
+	port_ptr->mode_info.mode = MULTI_LINK_MODE;
+	port_ptr->port_name.service = server->name.service;
+	port_ptr->port_name.instance = server->name.instance;
+	port_ptr->rport_info = rport_ptr;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	kref_put(&server->ref, ipc_router_release_server);
+	return 0;
+}
+
+int msm_ipc_router_unregister_server(struct msm_ipc_port *port_ptr)
+{
+	struct msm_ipc_server *server;
+	union rr_control_msg ctl;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	if (!port_ptr)
+		return -EINVAL;
+
+	if (port_ptr->type != SERVER_PORT) {
+		IPC_RTR_ERR("%s: Trying to unregister a non-server port\n",
+			    __func__);
+		return -EINVAL;
+	}
+
+	if (port_ptr->this_port.node_id != IPC_ROUTER_NID_LOCAL) {
+		IPC_RTR_ERR(
+		"%s: Trying to unregister a remote server locally\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	server = ipc_router_get_server_ref(port_ptr->port_name.service,
+					   port_ptr->port_name.instance,
+					   port_ptr->this_port.node_id,
+					   port_ptr->this_port.port_id);
+	if (!server) {
+		IPC_RTR_ERR("%s: Server lookup failed\n", __func__);
+		return -ENODEV;
+	}
+
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	port_ptr->type = CLIENT_PORT;
+	rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	if (rport_ptr)
+		ipc_router_reset_conn(rport_ptr);
+	memset(&ctl, 0, sizeof(ctl));
+	ctl.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
+	ctl.srv.service = server->name.service;
+	ctl.srv.instance = server->name.instance;
+	ctl.srv.node_id = IPC_ROUTER_NID_LOCAL;
+	ctl.srv.port_id = port_ptr->this_port.port_id;
+	kref_put(&server->ref, ipc_router_release_server);
+	ipc_router_destroy_server(server, port_ptr->this_port.node_id,
+				  port_ptr->this_port.port_id);
+	broadcast_ctl_msg(&ctl);
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	port_ptr->type = CLIENT_PORT;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	return 0;
+}
+
+static int loopback_data(struct msm_ipc_port *src,
+			 u32 port_id,
+			 struct rr_packet *pkt)
+{
+	struct msm_ipc_port *port_ptr;
+	struct sk_buff *temp_skb;
+	int align_size;
+
+	if (!pkt) {
+		IPC_RTR_ERR("%s: Invalid pkt pointer\n", __func__);
+		return -EINVAL;
+	}
+
+	temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
+	align_size = ALIGN_SIZE(pkt->length);
+	skb_put(temp_skb, align_size);
+	pkt->length += align_size;
+
+	port_ptr = ipc_router_get_port_ref(port_id);
+	if (!port_ptr) {
+		IPC_RTR_ERR("%s: Local port %d not present\n", __func__,
+			    port_id);
+		return -ENODEV;
+	}
+	post_pkt_to_port(port_ptr, pkt, 1);
+	update_comm_mode_info(&src->mode_info, NULL);
+	kref_put(&port_ptr->ref, ipc_router_release_port);
+
+	return pkt->hdr.size;
+}
+
+static int ipc_router_tx_wait(struct msm_ipc_port *src,
+			      struct msm_ipc_router_remote_port *rport_ptr,
+			      u32 *set_confirm_rx,
+			      long timeout)
+{
+	struct msm_ipc_resume_tx_port *resume_tx_port;
+	int ret;
+
+	if (unlikely(!src || !rport_ptr))
+		return -EINVAL;
+
+	for (;;) {
+		mutex_lock(&rport_ptr->rport_lock_lhb2);
+		if (rport_ptr->status == RESET) {
+			mutex_unlock(&rport_ptr->rport_lock_lhb2);
+			IPC_RTR_ERR("%s: RPort %08x:%08x is in reset state\n",
+				    __func__, rport_ptr->node_id,
+				    rport_ptr->port_id);
+			return -ENETRESET;
+		}
+
+		if (rport_ptr->tx_quota_cnt < IPC_ROUTER_HIGH_RX_QUOTA)
+			break;
+
+		if (msm_ipc_router_lookup_resume_tx_port(
+			rport_ptr, src->this_port.port_id))
+			goto check_timeo;
+
+		resume_tx_port =
+			kzalloc(sizeof(struct msm_ipc_resume_tx_port),
+				GFP_KERNEL);
+		if (!resume_tx_port) {
+			IPC_RTR_ERR("%s: Resume_Tx port allocation failed\n",
+				    __func__);
+			mutex_unlock(&rport_ptr->rport_lock_lhb2);
+			return -ENOMEM;
+		}
+		INIT_LIST_HEAD(&resume_tx_port->list);
+		resume_tx_port->port_id = src->this_port.port_id;
+		resume_tx_port->node_id = src->this_port.node_id;
+		list_add_tail(&resume_tx_port->list,
+			      &rport_ptr->resume_tx_port_list);
+check_timeo:
+		mutex_unlock(&rport_ptr->rport_lock_lhb2);
+		if (!timeout) {
+			return -EAGAIN;
+		} else if (timeout < 0) {
+			ret =
+			wait_event_interruptible(src->port_tx_wait_q,
+						 (rport_ptr->tx_quota_cnt !=
+						  IPC_ROUTER_HIGH_RX_QUOTA ||
+						  rport_ptr->status == RESET));
+			if (ret)
+				return ret;
+		} else {
+			ret = wait_event_interruptible_timeout(
+					src->port_tx_wait_q,
+					(rport_ptr->tx_quota_cnt !=
+					 IPC_ROUTER_HIGH_RX_QUOTA ||
+					 rport_ptr->status == RESET),
+					msecs_to_jiffies(timeout));
+			if (ret < 0) {
+				return ret;
+			} else if (ret == 0) {
+				IPC_RTR_ERR("%s: Resume_tx Timeout %08x:%08x\n",
+					    __func__, rport_ptr->node_id,
+					    rport_ptr->port_id);
+				return -ETIMEDOUT;
+			}
+		}
+	}
+	rport_ptr->tx_quota_cnt++;
+	if (rport_ptr->tx_quota_cnt == IPC_ROUTER_LOW_RX_QUOTA)
+		*set_confirm_rx = 1;
+	mutex_unlock(&rport_ptr->rport_lock_lhb2);
+	return 0;
+}
+
+static int
+msm_ipc_router_write_pkt(struct msm_ipc_port *src,
+			 struct msm_ipc_router_remote_port *rport_ptr,
+			 struct rr_packet *pkt, long timeout)
+{
+	struct rr_header_v1 *hdr;
+	struct msm_ipc_router_xprt_info *xprt_info;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	struct sk_buff *temp_skb;
+	int xprt_option;
+	int ret;
+	int align_size;
+	u32 set_confirm_rx = 0;
+
+	if (!rport_ptr || !src || !pkt)
+		return -EINVAL;
+
+	hdr = &pkt->hdr;
+	hdr->version = IPC_ROUTER_V1;
+	hdr->type = IPC_ROUTER_CTRL_CMD_DATA;
+	hdr->src_node_id = src->this_port.node_id;
+	hdr->src_port_id = src->this_port.port_id;
+	hdr->size = pkt->length;
+	hdr->control_flag = 0;
+	hdr->dst_node_id = rport_ptr->node_id;
+	hdr->dst_port_id = rport_ptr->port_id;
+
+	ret = ipc_router_tx_wait(src, rport_ptr, &set_confirm_rx, timeout);
+	if (ret < 0)
+		return ret;
+	if (set_confirm_rx)
+		hdr->control_flag |= CONTROL_FLAG_CONFIRM_RX;
+
+	if (hdr->dst_node_id == IPC_ROUTER_NID_LOCAL) {
+		ipc_router_log_msg(local_log_ctx,
+				   IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src,
+				   rport_ptr);
+		ret = loopback_data(src, hdr->dst_port_id, pkt);
+		return ret;
+	}
+
+	rt_entry = ipc_router_get_rtentry_ref(hdr->dst_node_id);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: Remote node %d not up\n",
+			    __func__, hdr->dst_node_id);
+		return -ENODEV;
+	}
+	down_read(&rt_entry->lock_lha4);
+	xprt_info = rt_entry->xprt_info;
+	ret = ipc_router_get_xprt_info_ref(xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
+		up_read(&rt_entry->lock_lha4);
+		kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+		return ret;
+	}
+	ret = prepend_header(pkt, xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Prepend Header failed\n", __func__);
+		goto out_write_pkt;
+	}
+	xprt_option = xprt_info->xprt->get_option(xprt_info->xprt);
+	if (!(xprt_option & FRAG_PKT_WRITE_ENABLE)) {
+		ret = defragment_pkt(pkt);
+		if (ret < 0)
+			goto out_write_pkt;
+	}
+
+	temp_skb = skb_peek_tail(pkt->pkt_fragment_q);
+	align_size = ALIGN_SIZE(pkt->length);
+	skb_put(temp_skb, align_size);
+	pkt->length += align_size;
+	mutex_lock(&xprt_info->tx_lock_lhb2);
+	ret = xprt_info->xprt->write(pkt, pkt->length, xprt_info->xprt);
+	mutex_unlock(&xprt_info->tx_lock_lhb2);
+out_write_pkt:
+	up_read(&rt_entry->lock_lha4);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Write on XPRT failed\n", __func__);
+		ipc_router_log_msg(xprt_info->log_ctx,
+				   IPC_ROUTER_LOG_EVENT_TX_ERR, pkt, hdr, src,
+				   rport_ptr);
+
+		ipc_router_put_xprt_info_ref(xprt_info);
+		return ret;
+	}
+	update_comm_mode_info(&src->mode_info, xprt_info);
+	ipc_router_log_msg(xprt_info->log_ctx,
+			   IPC_ROUTER_LOG_EVENT_TX, pkt, hdr, src, rport_ptr);
+
+	ipc_router_put_xprt_info_ref(xprt_info);
+	return hdr->size;
+}
+
+int msm_ipc_router_send_to(struct msm_ipc_port *src,
+			   struct sk_buff_head *data,
+			   struct msm_ipc_addr *dest,
+			   long timeout)
+{
+	u32 dst_node_id = 0, dst_port_id = 0;
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+	struct msm_ipc_router_remote_port *rport_ptr = NULL;
+	struct msm_ipc_router_remote_port *src_rport_ptr = NULL;
+	struct rr_packet *pkt;
+	int ret;
+
+	if (!src || !data || !dest) {
+		IPC_RTR_ERR("%s: Invalid Parameters\n", __func__);
+		return -EINVAL;
+	}
+
+	/* Resolve Address*/
+	if (dest->addrtype == MSM_IPC_ADDR_ID) {
+		dst_node_id = dest->addr.port_addr.node_id;
+		dst_port_id = dest->addr.port_addr.port_id;
+	} else if (dest->addrtype == MSM_IPC_ADDR_NAME) {
+		server =
+		ipc_router_get_server_ref(dest->addr.port_name.service,
+					  dest->addr.port_name.instance,
+					  0, 0);
+		if (!server) {
+			IPC_RTR_ERR("%s: Destination not reachable\n",
+				    __func__);
+			return -ENODEV;
+		}
+		server_port = list_first_entry(&server->server_port_list,
+					       struct msm_ipc_server_port,
+					       list);
+		dst_node_id = server_port->server_addr.node_id;
+		dst_port_id = server_port->server_addr.port_id;
+		kref_put(&server->ref, ipc_router_release_server);
+	}
+
+	rport_ptr = ipc_router_get_rport_ref(dst_node_id, dst_port_id);
+	if (!rport_ptr) {
+		IPC_RTR_ERR("%s: Remote port not found\n", __func__);
+		return -ENODEV;
+	}
+
+	if (src->check_send_permissions) {
+		ret = src->check_send_permissions(rport_ptr->sec_rule);
+		if (ret <= 0) {
+			kref_put(&rport_ptr->ref, ipc_router_release_rport);
+			IPC_RTR_ERR("%s: permission failure for %s\n",
+				    __func__, current->comm);
+			return -EPERM;
+		}
+	}
+
+	if (dst_node_id == IPC_ROUTER_NID_LOCAL && !src->rport_info) {
+		src_rport_ptr = ipc_router_create_rport(IPC_ROUTER_NID_LOCAL,
+							src->this_port.port_id,
+							NULL);
+		if (!src_rport_ptr) {
+			kref_put(&rport_ptr->ref, ipc_router_release_rport);
+			IPC_RTR_ERR("%s: RPort creation failed\n", __func__);
+			return -ENOMEM;
+		}
+		mutex_lock(&src->port_lock_lhc3);
+		src->rport_info = src_rport_ptr;
+		mutex_unlock(&src->port_lock_lhc3);
+		kref_put(&src_rport_ptr->ref, ipc_router_release_rport);
+	}
+
+	pkt = create_pkt(data);
+	if (!pkt) {
+		kref_put(&rport_ptr->ref, ipc_router_release_rport);
+		IPC_RTR_ERR("%s: Pkt creation failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	ret = msm_ipc_router_write_pkt(src, rport_ptr, pkt, timeout);
+	kref_put(&rport_ptr->ref, ipc_router_release_rport);
+	if (ret < 0)
+		pkt->pkt_fragment_q = NULL;
+	release_pkt(pkt);
+
+	return ret;
+}
+
+int msm_ipc_router_send_msg(struct msm_ipc_port *src,
+			    struct msm_ipc_addr *dest,
+			    void *data, unsigned int data_len)
+{
+	struct sk_buff_head *out_skb_head;
+	int ret;
+
+	out_skb_head = msm_ipc_router_buf_to_skb(data, data_len);
+	if (!out_skb_head) {
+		IPC_RTR_ERR("%s: SKB conversion failed\n", __func__);
+		return -EFAULT;
+	}
+
+	ret = msm_ipc_router_send_to(src, out_skb_head, dest, 0);
+	if (ret < 0) {
+		if (ret != -EAGAIN)
+			IPC_RTR_ERR(
+			"%s: msm_ipc_router_send_to failed - ret: %d\n",
+				__func__, ret);
+		msm_ipc_router_free_skb(out_skb_head);
+		return ret;
+	}
+	return 0;
+}
+
+/**
+ * msm_ipc_router_send_resume_tx() - Send Resume_Tx message
+ * @data: Pointer to received data packet that has confirm_rx bit set
+ *
+ * @return: On success, number of bytes transferred is returned, else
+ *	    standard linux error code is returned.
+ *
+ * This function sends the Resume_Tx event to the remote node that
+ * sent the data with confirm_rx field set. In case of a multi-hop
+ * scenario also, this function makes sure that the destination node_id
+ * to which the resume_tx event should reach is right.
+ */
+static int msm_ipc_router_send_resume_tx(void *data)
+{
+	union rr_control_msg msg;
+	struct rr_header_v1 *hdr = (struct rr_header_v1 *)data;
+	struct msm_ipc_routing_table_entry *rt_entry;
+	int ret;
+
+	memset(&msg, 0, sizeof(msg));
+	msg.cmd = IPC_ROUTER_CTRL_CMD_RESUME_TX;
+	msg.cli.node_id = hdr->dst_node_id;
+	msg.cli.port_id = hdr->dst_port_id;
+	rt_entry = ipc_router_get_rtentry_ref(hdr->src_node_id);
+	if (!rt_entry) {
+		IPC_RTR_ERR("%s: %d Node is not present", __func__,
+			    hdr->src_node_id);
+		return -ENODEV;
+	}
+	ret = ipc_router_get_xprt_info_ref(rt_entry->xprt_info);
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Abort invalid xprt\n", __func__);
+		kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+		return ret;
+	}
+	ret = ipc_router_send_ctl_msg(rt_entry->xprt_info, &msg,
+				      hdr->src_node_id);
+	ipc_router_put_xprt_info_ref(rt_entry->xprt_info);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+	if (ret < 0)
+		IPC_RTR_ERR(
+		"%s: Send Resume_Tx Failed SRC_NODE: %d SRC_PORT: %d DEST_NODE: %d",
+			__func__, hdr->dst_node_id, hdr->dst_port_id,
+			hdr->src_node_id);
+
+	return ret;
+}
+
+int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
+			struct rr_packet **read_pkt,
+			size_t buf_len)
+{
+	struct rr_packet *pkt;
+
+	if (!port_ptr || !read_pkt)
+		return -EINVAL;
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	if (list_empty(&port_ptr->port_rx_q)) {
+		mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+		return -EAGAIN;
+	}
+
+	pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet, list);
+	if ((buf_len) && (pkt->hdr.size > buf_len)) {
+		mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+		return -ETOOSMALL;
+	}
+	list_del(&pkt->list);
+	if (list_empty(&port_ptr->port_rx_q))
+		__pm_relax(port_ptr->port_rx_ws);
+	*read_pkt = pkt;
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+	if (pkt->hdr.control_flag & CONTROL_FLAG_CONFIRM_RX)
+		msm_ipc_router_send_resume_tx(&pkt->hdr);
+
+	return pkt->length;
+}
+
+/**
+ * msm_ipc_router_rx_data_wait() - Wait for new message destined to a local
+ *				   port.
+ * @port_ptr: Pointer to the local port
+ * @timeout: < 0 timeout indicates infinite wait till a message arrives.
+ *	     > 0 timeout indicates the wait time.
+ *	     0 indicates that we do not wait.
+ * @return: 0 if there are pending messages to read,
+ *	    standard Linux error code otherwise.
+ *
+ * Checks for the availability of messages that are destined to a local port.
+ * If no messages are present then waits as per @timeout.
+ */
+int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout)
+{
+	int ret = 0;
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	while (list_empty(&port_ptr->port_rx_q)) {
+		mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+		if (timeout < 0) {
+			ret = wait_event_interruptible(
+					port_ptr->port_rx_wait_q,
+					!list_empty(&port_ptr->port_rx_q));
+			if (ret)
+				return ret;
+		} else if (timeout > 0) {
+			timeout = wait_event_interruptible_timeout(
+					port_ptr->port_rx_wait_q,
+					!list_empty(&port_ptr->port_rx_q),
+					timeout);
+			if (timeout < 0)
+				return -EFAULT;
+		}
+		if (timeout == 0)
+			return -ENOMSG;
+		mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	}
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+
+	return ret;
+}
+
+/**
+ * msm_ipc_router_recv_from() - Receive messages destined to a local port.
+ * @port_ptr: Pointer to the local port
+ * @pkt : Pointer to the router-to-router packet
+ * @src: Pointer to local port address
+ * @timeout: < 0 timeout indicates infinite wait till a message arrives.
+ *	     > 0 timeout indicates the wait time.
+ *	     0 indicates that we do not wait.
+ * @return: = Number of bytes read(On successful read operation).
+ *	    = -ENOMSG (If there are no pending messages and timeout is 0).
+ *	    = -EINVAL (If either of the arguments, port_ptr or data is invalid)
+ *	    = -EFAULT (If there are no pending messages when timeout is > 0
+ *	      and the wait_event_interruptible_timeout has returned value > 0)
+ *	    = -ERESTARTSYS (If there are no pending messages when timeout
+ *	      is < 0 and wait_event_interruptible was interrupted by a signal)
+ *
+ * This function reads the messages that are destined for a local port. It
+ * is used by modules that exist with-in the kernel and use IPC Router for
+ * transport. The function checks if there are any messages that are already
+ * received. If yes, it reads them, else it waits as per the timeout value.
+ * On a successful read, the return value of the function indicates the number
+ * of bytes that are read.
+ */
+int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
+			     struct rr_packet **pkt,
+			     struct msm_ipc_addr *src,
+			     long timeout)
+{
+	int ret, data_len, align_size;
+	struct sk_buff *temp_skb;
+	struct rr_header_v1 *hdr = NULL;
+
+	if (!port_ptr || !pkt) {
+		IPC_RTR_ERR("%s: Invalid pointers being passed\n", __func__);
+		return -EINVAL;
+	}
+
+	*pkt = NULL;
+
+	ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
+	if (ret)
+		return ret;
+
+	ret = msm_ipc_router_read(port_ptr, pkt, 0);
+	if (ret <= 0 || !(*pkt))
+		return ret;
+
+	hdr = &((*pkt)->hdr);
+	if (src) {
+		src->addrtype = MSM_IPC_ADDR_ID;
+		src->addr.port_addr.node_id = hdr->src_node_id;
+		src->addr.port_addr.port_id = hdr->src_port_id;
+	}
+
+	data_len = hdr->size;
+	align_size = ALIGN_SIZE(data_len);
+	if (align_size) {
+		temp_skb = skb_peek_tail((*pkt)->pkt_fragment_q);
+		skb_trim(temp_skb, (temp_skb->len - align_size));
+	}
+	return data_len;
+}
+
+int msm_ipc_router_read_msg(struct msm_ipc_port *port_ptr,
+			    struct msm_ipc_addr *src,
+			    unsigned char **data,
+			    unsigned int *len)
+{
+	struct rr_packet *pkt;
+	int ret;
+
+	ret = msm_ipc_router_recv_from(port_ptr, &pkt, src, 0);
+	if (ret < 0) {
+		if (ret != -ENOMSG)
+			IPC_RTR_ERR(
+			"%s: msm_ipc_router_recv_from failed - ret: %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	*data = msm_ipc_router_skb_to_buf(pkt->pkt_fragment_q, ret);
+	if (!(*data)) {
+		IPC_RTR_ERR("%s: Buf conversion failed\n", __func__);
+		release_pkt(pkt);
+		return -ENOMEM;
+	}
+
+	*len = ret;
+	release_pkt(pkt);
+	return 0;
+}
+
+/**
+ * msm_ipc_router_create_port() - Create a IPC Router port/endpoint
+ * @notify: Callback function to notify any event on the port.
+ *   @event: Event ID to be handled.
+ *   @oob_data: Any out-of-band data associated with the event.
+ *   @oob_data_len: Size of the out-of-band data, if valid.
+ *   @priv: Private data registered during the port creation.
+ * @priv: Private info to be passed while the notification is generated.
+ *
+ * @return: Pointer to the port on success, NULL on error.
+ */
+struct msm_ipc_port *msm_ipc_router_create_port(
+	void (*notify)(unsigned int event, void *oob_data,
+		       size_t oob_data_len, void *priv),
+	void *priv)
+{
+	struct msm_ipc_port *port_ptr;
+	int ret;
+
+	ret = ipc_router_core_init();
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
+			    __func__, ret);
+		return NULL;
+	}
+
+	port_ptr = msm_ipc_router_create_raw_port(NULL, notify, priv);
+	if (!port_ptr)
+		IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
+
+	return port_ptr;
+}
+
+int msm_ipc_router_close_port(struct msm_ipc_port *port_ptr)
+{
+	union rr_control_msg msg;
+	struct msm_ipc_server *server;
+	struct msm_ipc_router_remote_port *rport_ptr;
+
+	if (!port_ptr)
+		return -EINVAL;
+
+	if (port_ptr->type == SERVER_PORT || port_ptr->type == CLIENT_PORT) {
+		down_write(&local_ports_lock_lhc2);
+		list_del(&port_ptr->list);
+		up_write(&local_ports_lock_lhc2);
+
+		mutex_lock(&port_ptr->port_lock_lhc3);
+		rport_ptr = (struct msm_ipc_router_remote_port *)
+						port_ptr->rport_info;
+		port_ptr->rport_info = NULL;
+		mutex_unlock(&port_ptr->port_lock_lhc3);
+		if (rport_ptr) {
+			ipc_router_reset_conn(rport_ptr);
+			ipc_router_destroy_rport(rport_ptr);
+		}
+
+		if (port_ptr->type == SERVER_PORT) {
+			memset(&msg, 0, sizeof(msg));
+			msg.cmd = IPC_ROUTER_CTRL_CMD_REMOVE_SERVER;
+			msg.srv.service = port_ptr->port_name.service;
+			msg.srv.instance = port_ptr->port_name.instance;
+			msg.srv.node_id = port_ptr->this_port.node_id;
+			msg.srv.port_id = port_ptr->this_port.port_id;
+			broadcast_ctl_msg(&msg);
+		}
+
+		/* Server port could have been a client port earlier.
+		 * Send REMOVE_CLIENT message in either case.
+		 */
+		msm_ipc_router_send_remove_client(&port_ptr->mode_info,
+						  port_ptr->this_port.node_id,
+						  port_ptr->this_port.port_id);
+	} else if (port_ptr->type == CONTROL_PORT) {
+		down_write(&control_ports_lock_lha5);
+		list_del(&port_ptr->list);
+		up_write(&control_ports_lock_lha5);
+	} else if (port_ptr->type == IRSC_PORT) {
+		down_write(&local_ports_lock_lhc2);
+		list_del(&port_ptr->list);
+		up_write(&local_ports_lock_lhc2);
+		signal_irsc_completion();
+	}
+
+	if (port_ptr->type == SERVER_PORT) {
+		server = ipc_router_get_server_ref(
+				port_ptr->port_name.service,
+				port_ptr->port_name.instance,
+				port_ptr->this_port.node_id,
+				port_ptr->this_port.port_id);
+		if (server) {
+			kref_put(&server->ref, ipc_router_release_server);
+			ipc_router_destroy_server(server,
+						  port_ptr->this_port.node_id,
+						  port_ptr->this_port.port_id);
+		}
+	}
+
+	mutex_lock(&port_ptr->port_lock_lhc3);
+	rport_ptr = (struct msm_ipc_router_remote_port *)port_ptr->rport_info;
+	port_ptr->rport_info = NULL;
+	mutex_unlock(&port_ptr->port_lock_lhc3);
+	if (rport_ptr)
+		ipc_router_destroy_rport(rport_ptr);
+
+	kref_put(&port_ptr->ref, ipc_router_release_port);
+	return 0;
+}
+
+int msm_ipc_router_get_curr_pkt_size(struct msm_ipc_port *port_ptr)
+{
+	struct rr_packet *pkt;
+	int rc = 0;
+
+	if (!port_ptr)
+		return -EINVAL;
+
+	mutex_lock(&port_ptr->port_rx_q_lock_lhc3);
+	if (!list_empty(&port_ptr->port_rx_q)) {
+		pkt = list_first_entry(&port_ptr->port_rx_q, struct rr_packet,
+				       list);
+		rc = pkt->hdr.size;
+	}
+	mutex_unlock(&port_ptr->port_rx_q_lock_lhc3);
+
+	return rc;
+}
+
+int msm_ipc_router_bind_control_port(struct msm_ipc_port *port_ptr)
+{
+	if (unlikely(!port_ptr || port_ptr->type != CLIENT_PORT))
+		return -EINVAL;
+
+	down_write(&local_ports_lock_lhc2);
+	list_del(&port_ptr->list);
+	up_write(&local_ports_lock_lhc2);
+	port_ptr->type = CONTROL_PORT;
+	down_write(&control_ports_lock_lha5);
+	list_add_tail(&port_ptr->list, &control_ports);
+	up_write(&control_ports_lock_lha5);
+
+	return 0;
+}
+
+int msm_ipc_router_lookup_server_name(struct msm_ipc_port_name *srv_name,
+				      struct msm_ipc_server_info *srv_info,
+				      int num_entries_in_array, u32 lookup_mask)
+{
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+	int key, i = 0; /*num_entries_found*/
+
+	if (!srv_name) {
+		IPC_RTR_ERR("%s: Invalid srv_name\n", __func__);
+		return -EINVAL;
+	}
+
+	if (num_entries_in_array && !srv_info) {
+		IPC_RTR_ERR("%s: srv_info NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	down_read(&server_list_lock_lha2);
+	key = (srv_name->service & (SRV_HASH_SIZE - 1));
+	list_for_each_entry(server, &server_list[key], list) {
+		if ((server->name.service != srv_name->service) ||
+		    ((server->name.instance & lookup_mask) !=
+			srv_name->instance))
+			continue;
+
+		list_for_each_entry(server_port, &server->server_port_list,
+				    list) {
+			if (i < num_entries_in_array) {
+				srv_info[i].node_id =
+					  server_port->server_addr.node_id;
+				srv_info[i].port_id =
+					  server_port->server_addr.port_id;
+				srv_info[i].service = server->name.service;
+				srv_info[i].instance = server->name.instance;
+			}
+			i++;
+		}
+	}
+	up_read(&server_list_lock_lha2);
+
+	return i;
+}
+
+int msm_ipc_router_close(void)
+{
+	struct msm_ipc_router_xprt_info *xprt_info, *tmp_xprt_info;
+
+	down_write(&xprt_info_list_lock_lha5);
+	list_for_each_entry_safe(xprt_info, tmp_xprt_info,
+				 &xprt_info_list, list) {
+		xprt_info->xprt->close(xprt_info->xprt);
+		list_del(&xprt_info->list);
+		kfree(xprt_info);
+	}
+	up_write(&xprt_info_list_lock_lha5);
+	return 0;
+}
+
+/**
+ * pil_vote_load_worker() - Process vote to load the modem
+ *
+ * @work: Work item to process
+ *
+ * This function is called to process votes to load the modem that have been
+ * queued by msm_ipc_load_default_node().
+ */
+static void pil_vote_load_worker(struct work_struct *work)
+{
+	struct pil_vote_info *vote_info;
+
+	vote_info = container_of(work, struct pil_vote_info, load_work);
+	if (strlen(default_peripheral)) {
+		vote_info->pil_handle = subsystem_get(default_peripheral);
+		if (IS_ERR(vote_info->pil_handle)) {
+			IPC_RTR_ERR("%s: Failed to load %s\n",
+				    __func__, default_peripheral);
+			vote_info->pil_handle = NULL;
+		}
+	} else {
+		vote_info->pil_handle = NULL;
+	}
+}
+
+/**
+ * pil_vote_unload_worker() - Process vote to unload the modem
+ *
+ * @work: Work item to process
+ *
+ * This function is called to process votes to unload the modem that have been
+ * queued by msm_ipc_unload_default_node().
+ */
+static void pil_vote_unload_worker(struct work_struct *work)
+{
+	struct pil_vote_info *vote_info;
+
+	vote_info = container_of(work, struct pil_vote_info, unload_work);
+
+	if (vote_info->pil_handle) {
+		subsystem_put(vote_info->pil_handle);
+		vote_info->pil_handle = NULL;
+	}
+	kfree(vote_info);
+}
+
+/**
+ * msm_ipc_load_default_node() - Queue a vote to load the modem.
+ *
+ * @return: PIL vote info structure on success, NULL on failure.
+ *
+ * This function places a work item that loads the modem on the
+ * single-threaded workqueue used for processing PIL votes to load
+ * or unload the modem.
+ */
+void *msm_ipc_load_default_node(void)
+{
+	struct pil_vote_info *vote_info;
+
+	vote_info = kmalloc(sizeof(*vote_info), GFP_KERNEL);
+	if (!vote_info)
+		return vote_info;
+
+	INIT_WORK(&vote_info->load_work, pil_vote_load_worker);
+	queue_work(msm_ipc_router_workqueue, &vote_info->load_work);
+
+	return vote_info;
+}
+
+/**
+ * msm_ipc_unload_default_node() - Queue a vote to unload the modem.
+ *
+ * @pil_vote: PIL vote info structure, containing the PIL handle
+ * and work structure.
+ *
+ * This function places a work item that unloads the modem on the
+ * single-threaded workqueue used for processing PIL votes to load
+ * or unload the modem.
+ */
+void msm_ipc_unload_default_node(void *pil_vote)
+{
+	struct pil_vote_info *vote_info;
+
+	if (pil_vote) {
+		vote_info = (struct pil_vote_info *)pil_vote;
+		INIT_WORK(&vote_info->unload_work, pil_vote_unload_worker);
+		queue_work(msm_ipc_router_workqueue, &vote_info->unload_work);
+	}
+}
+
+#if defined(CONFIG_DEBUG_FS)
+static void dump_routing_table(struct seq_file *s)
+{
+	int j;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	seq_printf(s, "%-10s|%-20s|%-10s|\n", "Node Id", "XPRT Name",
+		   "Next Hop");
+	seq_puts(s, "----------------------------------------------\n");
+	for (j = 0; j < RT_HASH_SIZE; j++) {
+		down_read(&routing_table_lock_lha3);
+		list_for_each_entry(rt_entry, &routing_table[j], list) {
+			down_read(&rt_entry->lock_lha4);
+			seq_printf(s, "0x%08x|", rt_entry->node_id);
+			if (rt_entry->node_id == IPC_ROUTER_NID_LOCAL)
+				seq_printf(s, "%-20s|0x%08x|\n", "Loopback",
+					   rt_entry->node_id);
+			else
+				seq_printf(s, "%-20s|0x%08x|\n",
+					   rt_entry->xprt_info->xprt->name,
+					   rt_entry->node_id);
+			up_read(&rt_entry->lock_lha4);
+		}
+		up_read(&routing_table_lock_lha3);
+	}
+}
+
+static void dump_xprt_info(struct seq_file *s)
+{
+	struct msm_ipc_router_xprt_info *xprt_info;
+
+	seq_printf(s, "%-20s|%-10s|%-12s|%-15s|\n", "XPRT Name", "Link ID",
+		   "Initialized", "Remote Node Id");
+	seq_puts(s, "------------------------------------------------------------\n");
+	down_read(&xprt_info_list_lock_lha5);
+	list_for_each_entry(xprt_info, &xprt_info_list, list)
+		seq_printf(s, "%-20s|0x%08x|%-12s|0x%08x|\n",
+			   xprt_info->xprt->name, xprt_info->xprt->link_id,
+			   (xprt_info->initialized ? "Y" : "N"),
+			   xprt_info->remote_node_id);
+	up_read(&xprt_info_list_lock_lha5);
+}
+
+static void dump_servers(struct seq_file *s)
+{
+	int j;
+	struct msm_ipc_server *server;
+	struct msm_ipc_server_port *server_port;
+
+	seq_printf(s, "%-11s|%-11s|%-11s|%-11s|\n", "Service", "Instance",
+		   "Node_id", "Port_id");
+	seq_puts(s, "------------------------------------------------------------\n");
+	down_read(&server_list_lock_lha2);
+	for (j = 0; j < SRV_HASH_SIZE; j++) {
+		list_for_each_entry(server, &server_list[j], list) {
+			list_for_each_entry(server_port,
+					    &server->server_port_list,
+					    list)
+				seq_printf(s, "0x%08x |0x%08x |0x%08x |0x%08x |\n",
+					   server->name.service,
+					   server->name.instance,
+					   server_port->server_addr.node_id,
+					   server_port->server_addr.port_id);
+		}
+	}
+	up_read(&server_list_lock_lha2);
+}
+
+static void dump_remote_ports(struct seq_file *s)
+{
+	int j, k;
+	struct msm_ipc_router_remote_port *rport_ptr;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	seq_printf(s, "%-11s|%-11s|%-10s|\n", "Node_id", "Port_id",
+		   "Quota_cnt");
+	seq_puts(s, "------------------------------------------------------------\n");
+	for (j = 0; j < RT_HASH_SIZE; j++) {
+		down_read(&routing_table_lock_lha3);
+		list_for_each_entry(rt_entry, &routing_table[j], list) {
+			down_read(&rt_entry->lock_lha4);
+			for (k = 0; k < RP_HASH_SIZE; k++) {
+				list_for_each_entry
+						(rport_ptr,
+						 &rt_entry->remote_port_list[k],
+						 list)
+					seq_printf(s, "0x%08x |0x%08x |0x%08x|\n",
+						   rport_ptr->node_id,
+						   rport_ptr->port_id,
+						   rport_ptr->tx_quota_cnt);
+			}
+			up_read(&rt_entry->lock_lha4);
+		}
+		up_read(&routing_table_lock_lha3);
+	}
+}
+
+static void dump_control_ports(struct seq_file *s)
+{
+	struct msm_ipc_port *port_ptr;
+
+	seq_printf(s, "%-11s|%-11s|\n", "Node_id", "Port_id");
+	seq_puts(s, "------------------------------------------------------------\n");
+	down_read(&control_ports_lock_lha5);
+	list_for_each_entry(port_ptr, &control_ports, list)
+		seq_printf(s, "0x%08x |0x%08x |\n", port_ptr->this_port.node_id,
+			   port_ptr->this_port.port_id);
+	up_read(&control_ports_lock_lha5);
+}
+
+static void dump_local_ports(struct seq_file *s)
+{
+	int j;
+	struct msm_ipc_port *port_ptr;
+
+	seq_printf(s, "%-11s|%-11s|\n", "Node_id", "Port_id");
+	seq_puts(s, "------------------------------------------------------------\n");
+	down_read(&local_ports_lock_lhc2);
+	for (j = 0; j < LP_HASH_SIZE; j++) {
+		list_for_each_entry(port_ptr, &local_ports[j], list) {
+			mutex_lock(&port_ptr->port_lock_lhc3);
+			seq_printf(s, "0x%08x |0x%08x |\n",
+				   port_ptr->this_port.node_id,
+				   port_ptr->this_port.port_id);
+			mutex_unlock(&port_ptr->port_lock_lhc3);
+		}
+	}
+	up_read(&local_ports_lock_lhc2);
+}
+
+static int debugfs_show(struct seq_file *s, void *data)
+{
+	void (*show)(struct seq_file *) = s->private;
+
+	show(s);
+	return 0;
+}
+
+static int debug_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, debugfs_show, inode->i_private);
+}
+
+static const struct file_operations debug_ops = {
+	.open = debug_open,
+	.release = single_release,
+	.read = seq_read,
+	.llseek = seq_lseek,
+};
+
+static void debug_create(const char *name, struct dentry *dent,
+			 void (*show)(struct seq_file *))
+{
+	debugfs_create_file(name, 0444, dent, show, &debug_ops);
+}
+
+static void debugfs_init(void)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("msm_ipc_router", 0);
+	if (IS_ERR(dent))
+		return;
+
+	debug_create("dump_local_ports", dent, dump_local_ports);
+	debug_create("dump_remote_ports", dent, dump_remote_ports);
+	debug_create("dump_control_ports", dent, dump_control_ports);
+	debug_create("dump_servers", dent, dump_servers);
+	debug_create("dump_xprt_info", dent, dump_xprt_info);
+	debug_create("dump_routing_table", dent, dump_routing_table);
+}
+
+#else
+static void debugfs_init(void) {}
+#endif
+
+/**
+ * ipc_router_create_log_ctx() - Create and add the log context based on
+ *				 transport
+ * @name:	subsystem name
+ *
+ * Return:	a reference to the log context created
+ *
+ * This function creates ipc log context based on transport and adds it to a
+ * global list. This log context can be reused from the list in case of a
+ * subsystem restart.
+ */
+static void *ipc_router_create_log_ctx(char *name)
+{
+	struct ipc_rtr_log_ctx *sub_log_ctx;
+
+	sub_log_ctx = kmalloc(sizeof(*sub_log_ctx), GFP_KERNEL);
+	if (!sub_log_ctx)
+		return NULL;
+	sub_log_ctx->log_ctx = ipc_log_context_create(
+				IPC_RTR_INFO_PAGES, name, 0);
+	if (!sub_log_ctx->log_ctx) {
+		IPC_RTR_ERR("%s: Unable to create IPC logging for [%s]",
+			    __func__, name);
+		kfree(sub_log_ctx);
+		return NULL;
+	}
+	strlcpy(sub_log_ctx->log_ctx_name, name, LOG_CTX_NAME_LEN);
+	INIT_LIST_HEAD(&sub_log_ctx->list);
+	list_add_tail(&sub_log_ctx->list, &log_ctx_list);
+	return sub_log_ctx->log_ctx;
+}
+
+static void ipc_router_log_ctx_init(void)
+{
+	mutex_lock(&log_ctx_list_lock_lha0);
+	local_log_ctx = ipc_router_create_log_ctx("local_IPCRTR");
+	mutex_unlock(&log_ctx_list_lock_lha0);
+}
+
+/**
+ * ipc_router_get_log_ctx() - Retrieves the ipc log context based on subsystem
+ *			      name.
+ * @sub_name:	subsystem name
+ *
+ * Return:	a reference to the log context
+ */
+static void *ipc_router_get_log_ctx(char *sub_name)
+{
+	void *log_ctx = NULL;
+	struct ipc_rtr_log_ctx *temp_log_ctx;
+
+	mutex_lock(&log_ctx_list_lock_lha0);
+	list_for_each_entry(temp_log_ctx, &log_ctx_list, list)
+		if (!strcmp(temp_log_ctx->log_ctx_name, sub_name)) {
+			log_ctx = temp_log_ctx->log_ctx;
+			mutex_unlock(&log_ctx_list_lock_lha0);
+			return log_ctx;
+		}
+	log_ctx = ipc_router_create_log_ctx(sub_name);
+	mutex_unlock(&log_ctx_list_lock_lha0);
+
+	return log_ctx;
+}
+
+/**
+ * ipc_router_get_xprt_info_ref() - Get a reference to the xprt_info structure
+ * @xprt_info: pointer to the xprt_info.
+ *
+ * @return: Zero on success, -ENODEV on failure.
+ *
+ * This function is used to obtain a reference to the xprt_info structure
+ * corresponding to the requested @xprt_info pointer.
+ */
+static int ipc_router_get_xprt_info_ref(
+		struct msm_ipc_router_xprt_info *xprt_info)
+{
+	int ret = -ENODEV;
+	struct msm_ipc_router_xprt_info *tmp_xprt_info;
+
+	if (!xprt_info)
+		return 0;
+
+	down_read(&xprt_info_list_lock_lha5);
+	list_for_each_entry(tmp_xprt_info, &xprt_info_list, list) {
+		if (tmp_xprt_info == xprt_info) {
+			kref_get(&xprt_info->ref);
+			ret = 0;
+			break;
+		}
+	}
+	up_read(&xprt_info_list_lock_lha5);
+
+	return ret;
+}
+
+/**
+ * ipc_router_put_xprt_info_ref() - Put a reference to the xprt_info structure
+ * @xprt_info: pointer to the xprt_info.
+ *
+ * This function is used to put the reference to the xprt_info structure
+ * corresponding to the requested @xprt_info pointer.
+ */
+static void ipc_router_put_xprt_info_ref(
+		struct msm_ipc_router_xprt_info *xprt_info)
+{
+	if (xprt_info)
+		kref_put(&xprt_info->ref, ipc_router_release_xprt_info_ref);
+}
+
+/**
+ * ipc_router_release_xprt_info_ref() - release the xprt_info last reference
+ * @ref: Reference to the xprt_info structure.
+ *
+ * This function is called when all references to the xprt_info structure
+ * are released.
+ */
+static void ipc_router_release_xprt_info_ref(struct kref *ref)
+{
+	struct msm_ipc_router_xprt_info *xprt_info =
+		container_of(ref, struct msm_ipc_router_xprt_info, ref);
+
+	complete_all(&xprt_info->ref_complete);
+}
+
+static int msm_ipc_router_add_xprt(struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_xprt_info *xprt_info;
+
+	xprt_info = kmalloc(sizeof(*xprt_info), GFP_KERNEL);
+	if (!xprt_info)
+		return -ENOMEM;
+
+	xprt_info->xprt = xprt;
+	xprt_info->initialized = 0;
+	xprt_info->remote_node_id = -1;
+	INIT_LIST_HEAD(&xprt_info->pkt_list);
+	mutex_init(&xprt_info->rx_lock_lhb2);
+	mutex_init(&xprt_info->tx_lock_lhb2);
+	wakeup_source_init(&xprt_info->ws, xprt->name);
+	xprt_info->need_len = 0;
+	xprt_info->abort_data_read = 0;
+	INIT_WORK(&xprt_info->read_data, do_read_data);
+	INIT_LIST_HEAD(&xprt_info->list);
+	kref_init(&xprt_info->ref);
+	init_completion(&xprt_info->ref_complete);
+
+	xprt_info->workqueue = create_singlethread_workqueue(xprt->name);
+	if (!xprt_info->workqueue) {
+		kfree(xprt_info);
+		return -ENOMEM;
+	}
+
+	xprt_info->log_ctx = ipc_router_get_log_ctx(xprt->name);
+
+	if (!strcmp(xprt->name, "msm_ipc_router_loopback_xprt")) {
+		xprt_info->remote_node_id = IPC_ROUTER_NID_LOCAL;
+		xprt_info->initialized = 1;
+	}
+
+	IPC_RTR_INFO(xprt_info->log_ctx, "Adding xprt: [%s]\n", xprt->name);
+	down_write(&xprt_info_list_lock_lha5);
+	list_add_tail(&xprt_info->list, &xprt_info_list);
+	up_write(&xprt_info_list_lock_lha5);
+
+	down_write(&routing_table_lock_lha3);
+	if (!routing_table_inited) {
+		init_routing_table();
+		routing_table_inited = 1;
+	}
+	up_write(&routing_table_lock_lha3);
+
+	xprt->priv = xprt_info;
+
+	return 0;
+}
+
+static void msm_ipc_router_remove_xprt(struct msm_ipc_router_xprt *xprt)
+{
+	struct msm_ipc_router_xprt_info *xprt_info;
+	struct rr_packet *temp_pkt, *pkt;
+
+	if (xprt && xprt->priv) {
+		xprt_info = xprt->priv;
+
+		IPC_RTR_INFO(xprt_info->log_ctx, "Removing xprt: [%s]\n",
+			     xprt->name);
+		mutex_lock(&xprt_info->rx_lock_lhb2);
+		xprt_info->abort_data_read = 1;
+		mutex_unlock(&xprt_info->rx_lock_lhb2);
+		flush_workqueue(xprt_info->workqueue);
+		destroy_workqueue(xprt_info->workqueue);
+		mutex_lock(&xprt_info->rx_lock_lhb2);
+		list_for_each_entry_safe(pkt, temp_pkt,
+					 &xprt_info->pkt_list, list) {
+			list_del(&pkt->list);
+			release_pkt(pkt);
+		}
+		mutex_unlock(&xprt_info->rx_lock_lhb2);
+
+		down_write(&xprt_info_list_lock_lha5);
+		list_del(&xprt_info->list);
+		up_write(&xprt_info_list_lock_lha5);
+
+		msm_ipc_cleanup_routing_table(xprt_info);
+
+		wakeup_source_trash(&xprt_info->ws);
+
+		ipc_router_put_xprt_info_ref(xprt_info);
+		wait_for_completion(&xprt_info->ref_complete);
+
+		xprt->priv = 0;
+		kfree(xprt_info);
+	}
+}
+
+struct msm_ipc_router_xprt_work {
+	struct msm_ipc_router_xprt *xprt;
+	struct work_struct work;
+};
+
+static void xprt_open_worker(struct work_struct *work)
+{
+	struct msm_ipc_router_xprt_work *xprt_work =
+		container_of(work, struct msm_ipc_router_xprt_work, work);
+
+	msm_ipc_router_add_xprt(xprt_work->xprt);
+	kfree(xprt_work);
+}
+
+static void xprt_close_worker(struct work_struct *work)
+{
+	struct msm_ipc_router_xprt_work *xprt_work =
+		container_of(work, struct msm_ipc_router_xprt_work, work);
+
+	msm_ipc_router_remove_xprt(xprt_work->xprt);
+	xprt_work->xprt->sft_close_done(xprt_work->xprt);
+	kfree(xprt_work);
+}
+
+void msm_ipc_router_xprt_notify(struct msm_ipc_router_xprt *xprt,
+				unsigned int event,
+				void *data)
+{
+	struct msm_ipc_router_xprt_info *xprt_info = xprt->priv;
+	struct msm_ipc_router_xprt_work *xprt_work;
+	struct rr_packet *pkt;
+	int ret;
+
+	ret = ipc_router_core_init();
+	if (ret < 0) {
+		IPC_RTR_ERR("%s: Error %d initializing IPC Router\n",
+			    __func__, ret);
+		return;
+	}
+
+	switch (event) {
+	case IPC_ROUTER_XPRT_EVENT_OPEN:
+		xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
+		if (xprt_work) {
+			xprt_work->xprt = xprt;
+			INIT_WORK(&xprt_work->work, xprt_open_worker);
+			queue_work(msm_ipc_router_workqueue, &xprt_work->work);
+		} else {
+			IPC_RTR_ERR(
+			"%s: malloc failure - Couldn't notify OPEN event",
+				__func__);
+		}
+		break;
+
+	case IPC_ROUTER_XPRT_EVENT_CLOSE:
+		xprt_work = kmalloc(sizeof(*xprt_work), GFP_ATOMIC);
+		if (xprt_work) {
+			xprt_work->xprt = xprt;
+			INIT_WORK(&xprt_work->work, xprt_close_worker);
+			queue_work(msm_ipc_router_workqueue, &xprt_work->work);
+		} else {
+			IPC_RTR_ERR(
+			"%s: malloc failure - Couldn't notify CLOSE event",
+				__func__);
+		}
+		break;
+	}
+
+	if (!data)
+		return;
+
+	while (!xprt_info) {
+		msleep(100);
+		xprt_info = xprt->priv;
+	}
+
+	pkt = clone_pkt((struct rr_packet *)data);
+	if (!pkt)
+		return;
+
+	mutex_lock(&xprt_info->rx_lock_lhb2);
+	list_add_tail(&pkt->list, &xprt_info->pkt_list);
+	__pm_stay_awake(&xprt_info->ws);
+	mutex_unlock(&xprt_info->rx_lock_lhb2);
+	queue_work(xprt_info->workqueue, &xprt_info->read_data);
+}
+
+/**
+ * parse_devicetree() - parse device tree binding
+ *
+ * @node: pointer to device tree node
+ *
+ * @return: 0 on success, -ENODEV on failure.
+ */
+static int parse_devicetree(struct device_node *node)
+{
+	char *key;
+	const char *peripheral = NULL;
+
+	key = "qcom,default-peripheral";
+	peripheral = of_get_property(node, key, NULL);
+	if (peripheral)
+		strlcpy(default_peripheral, peripheral, PIL_SUBSYSTEM_NAME_LEN);
+
+	return 0;
+}
+
+/**
+ * ipc_router_probe() - Probe the IPC Router
+ *
+ * @pdev: Platform device corresponding to IPC Router.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to IPC Router.
+ */
+static int ipc_router_probe(struct platform_device *pdev)
+{
+	int ret = 0;
+
+	if (pdev && pdev->dev.of_node) {
+		ret = parse_devicetree(pdev->dev.of_node);
+		if (ret)
+			IPC_RTR_ERR("%s: Failed to parse device tree\n",
+				    __func__);
+	}
+	return ret;
+}
+
+static const struct of_device_id ipc_router_match_table[] = {
+	{ .compatible = "qcom,ipc_router" },
+	{},
+};
+
+static struct platform_driver ipc_router_driver = {
+	.probe = ipc_router_probe,
+	.driver = {
+		.name = MODULE_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = ipc_router_match_table,
+	 },
+};
+
+/**
+ * ipc_router_core_init() - Initialize all IPC Router core data structures
+ *
+ * Return: 0 on Success or Standard error code otherwise.
+ *
+ * This function only initializes all the core data structures to the IPC Router
+ * module. The remaining initialization is done inside msm_ipc_router_init().
+ */
+static int ipc_router_core_init(void)
+{
+	int i;
+	int ret;
+	struct msm_ipc_routing_table_entry *rt_entry;
+
+	mutex_lock(&ipc_router_init_lock);
+	if (likely(is_ipc_router_inited)) {
+		mutex_unlock(&ipc_router_init_lock);
+		return 0;
+	}
+
+	debugfs_init();
+
+	for (i = 0; i < SRV_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&server_list[i]);
+
+	for (i = 0; i < LP_HASH_SIZE; i++)
+		INIT_LIST_HEAD(&local_ports[i]);
+
+	down_write(&routing_table_lock_lha3);
+	if (!routing_table_inited) {
+		init_routing_table();
+		routing_table_inited = 1;
+	}
+	up_write(&routing_table_lock_lha3);
+	rt_entry = create_routing_table_entry(IPC_ROUTER_NID_LOCAL, NULL);
+	kref_put(&rt_entry->ref, ipc_router_release_rtentry);
+
+	msm_ipc_router_workqueue =
+		create_singlethread_workqueue("msm_ipc_router");
+	if (!msm_ipc_router_workqueue) {
+		mutex_unlock(&ipc_router_init_lock);
+		return -ENOMEM;
+	}
+
+	ret = msm_ipc_router_security_init();
+	if (ret < 0)
+		IPC_RTR_ERR("%s: Security Init failed\n", __func__);
+	else
+		is_ipc_router_inited = true;
+	mutex_unlock(&ipc_router_init_lock);
+
+	return ret;
+}
+
+static int msm_ipc_router_init(void)
+{
+	int ret;
+
+	ret = ipc_router_core_init();
+	if (ret < 0)
+		return ret;
+
+	ret = platform_driver_register(&ipc_router_driver);
+	if (ret)
+		IPC_RTR_ERR(
+		"%s: ipc_router_driver register failed %d\n", __func__, ret);
+
+	ret = msm_ipc_router_init_sockets();
+	if (ret < 0)
+		IPC_RTR_ERR("%s: Init sockets failed\n", __func__);
+
+	ipc_router_log_ctx_init();
+	return ret;
+}
+
+module_init(msm_ipc_router_init);
+MODULE_DESCRIPTION("MSM IPC Router");
+MODULE_LICENSE("GPL v2");
diff --git a/net/ipc_router/ipc_router_private.h b/net/ipc_router/ipc_router_private.h
new file mode 100644
index 0000000..3ec9818
--- /dev/null
+++ b/net/ipc_router/ipc_router_private.h
@@ -0,0 +1,150 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_PRIVATE_H
+#define _IPC_ROUTER_PRIVATE_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/msm_ipc.h>
+#include <linux/ipc_router.h>
+#include <linux/ipc_router_xprt.h>
+
+#include <net/sock.h>
+
+/* definitions for the R2R wire protocol */
+#define IPC_ROUTER_V1		1
+/* Ambiguous definition but will enable multiplexing IPC_ROUTER_V2 packets
+ * with an existing alternate transport in user-space, if needed.
+ */
+#define IPC_ROUTER_V2		3
+#define IPC_ROUTER_VER_BITMASK ((BIT(IPC_ROUTER_V1)) | (BIT(IPC_ROUTER_V2)))
+#define IPC_ROUTER_HELLO_MAGIC 0xE110
+#define IPC_ROUTER_CHECKSUM_MASK 0xFFFF
+
+#define IPC_ROUTER_ADDRESS			0x0000FFFF
+
+#define IPC_ROUTER_NID_LOCAL			1
+#define MAX_IPC_PKT_SIZE 66000
+
+#define IPC_ROUTER_LOW_RX_QUOTA		5
+#define IPC_ROUTER_HIGH_RX_QUOTA	10
+
+#define IPC_ROUTER_INFINITY -1
+#define DEFAULT_RCV_TIMEO IPC_ROUTER_INFINITY
+#define DEFAULT_SND_TIMEO IPC_ROUTER_INFINITY
+
+#define ALIGN_SIZE(x) ((4 - ((x) & 3)) & 3)
+
+#define ALL_SERVICE 0xFFFFFFFF
+#define ALL_INSTANCE 0xFFFFFFFF
+
+#define CONTROL_FLAG_CONFIRM_RX 0x1
+#define CONTROL_FLAG_OPT_HDR 0x2
+
+enum {
+	CLIENT_PORT,
+	SERVER_PORT,
+	CONTROL_PORT,
+	IRSC_PORT,
+};
+
+enum {
+	NULL_MODE,
+	SINGLE_LINK_MODE,
+	MULTI_LINK_MODE,
+};
+
+enum {
+	CONNECTION_RESET = -1,
+	NOT_CONNECTED,
+	CONNECTED,
+};
+
+struct msm_ipc_sock {
+	struct sock sk;
+	struct msm_ipc_port *port;
+	void *default_node_vote_info;
+};
+
+/**
+ * msm_ipc_router_create_raw_port() - Create an IPC Router port
+ * @endpoint: User-space space socket information to be cached.
+ * @notify: Function to notify incoming events on the port.
+ *   @event: Event ID to be handled.
+ *   @oob_data: Any out-of-band data associated with the event.
+ *   @oob_data_len: Size of the out-of-band data, if valid.
+ *   @priv: Private data registered during the port creation.
+ * @priv: Private Data to be passed during the event notification.
+ *
+ * @return: Valid pointer to port on success, NULL on failure.
+ *
+ * This function is used to create an IPC Router port. The port is used for
+ * communication locally or outside the subsystem.
+ */
+struct msm_ipc_port *
+msm_ipc_router_create_raw_port(void *endpoint,
+			       void (*notify)(unsigned int event,
+					      void *oob_data,
+					      size_t oob_data_len, void *priv),
+			       void *priv);
+int msm_ipc_router_send_to(struct msm_ipc_port *src,
+			   struct sk_buff_head *data,
+			   struct msm_ipc_addr *dest,
+			   long timeout);
+int msm_ipc_router_read(struct msm_ipc_port *port_ptr,
+			struct rr_packet **pkt,
+			size_t buf_len);
+
+int msm_ipc_router_recv_from(struct msm_ipc_port *port_ptr,
+			     struct rr_packet **pkt,
+			     struct msm_ipc_addr *src_addr, long timeout);
+int msm_ipc_router_register_server(struct msm_ipc_port *server_port,
+				   struct msm_ipc_addr *name);
+int msm_ipc_router_unregister_server(struct msm_ipc_port *server_port);
+
+int msm_ipc_router_init_sockets(void);
+void msm_ipc_router_exit_sockets(void);
+
+void msm_ipc_sync_sec_rule(u32 service, u32 instance, void *rule);
+
+void msm_ipc_sync_default_sec_rule(void *rule);
+
+int msm_ipc_router_rx_data_wait(struct msm_ipc_port *port_ptr, long timeout);
+
+void msm_ipc_router_free_skb(struct sk_buff_head *skb_head);
+
+/**
+ * ipc_router_set_conn() - Set the connection by initializing dest address
+ * @port_ptr: Local port in which the connection has to be set.
+ * @addr: Destination address of the connection.
+ *
+ * @return: 0 on success, standard Linux error codes on failure.
+ */
+int ipc_router_set_conn(struct msm_ipc_port *port_ptr,
+			struct msm_ipc_addr *addr);
+
+void *msm_ipc_load_default_node(void);
+
+void msm_ipc_unload_default_node(void *pil);
+
+/**
+ * ipc_router_dummy_write_space() - Dummy write space available callback
+ * @sk:	Socket pointer for which the callback is called.
+ */
+void ipc_router_dummy_write_space(struct sock *sk);
+
+#endif
diff --git a/net/ipc_router/ipc_router_security.c b/net/ipc_router/ipc_router_security.c
new file mode 100644
index 0000000..1359d64
--- /dev/null
+++ b/net/ipc_router/ipc_router_security.c
@@ -0,0 +1,330 @@
+/* Copyright (c) 2012-2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/fcntl.h>
+#include <linux/gfp.h>
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+#include <linux/msm_ipc.h>
+#include <linux/rwsem.h>
+#include <linux/uaccess.h>
+
+#include <net/sock.h>
+#include "ipc_router_private.h"
+#include "ipc_router_security.h"
+
+#define IRSC_COMPLETION_TIMEOUT_MS 30000
+#define SEC_RULES_HASH_SZ 32
+
+#ifndef SIZE_MAX
+#define SIZE_MAX ((size_t)-1)
+#endif
+
+struct security_rule {
+	struct list_head list;
+	u32 service_id;
+	u32 instance_id;
+	unsigned int reserved;
+	int num_group_info;
+	kgid_t *group_id;
+};
+
+static DECLARE_RWSEM(security_rules_lock_lha4);
+static struct list_head security_rules[SEC_RULES_HASH_SZ];
+static DECLARE_COMPLETION(irsc_completion);
+
+/**
+ * wait_for_irsc_completion() - Wait for IPC Router Security Configuration
+ *                              (IRSC) to complete
+ */
+void wait_for_irsc_completion(void)
+{
+	unsigned long rem_jiffies;
+
+	do {
+		rem_jiffies = wait_for_completion_timeout
+				(&irsc_completion,
+				 msecs_to_jiffies(IRSC_COMPLETION_TIMEOUT_MS));
+		if (rem_jiffies)
+			return;
+		pr_err("%s: waiting for IPC Security Conf.\n", __func__);
+	} while (1);
+}
+
+/**
+ * signal_irsc_completion() - Signal the completion of IRSC
+ */
+void signal_irsc_completion(void)
+{
+	complete_all(&irsc_completion);
+}
+
+/**
+ * check_permisions() - Check whether the process has permissions to
+ *                      create an interface handle with IPC Router
+ *
+ * @return: true if the process has permissions, else false.
+ */
+int check_permissions(void)
+{
+	int rc = 0;
+
+	if (capable(CAP_NET_RAW) || capable(CAP_NET_BIND_SERVICE))
+		rc = 1;
+	return rc;
+}
+EXPORT_SYMBOL(check_permissions);
+
+/**
+ * msm_ipc_config_sec_rules() - Add a security rule to the database
+ * @arg: Pointer to the buffer containing the rule.
+ *
+ * @return: 0 if successfully added, < 0 for error.
+ *
+ * A security rule is defined using <Service_ID: Group_ID> tuple. The rule
+ * implies that a user-space process in order to send a QMI message to
+ * service Service_ID should belong to the Linux group Group_ID.
+ */
+int msm_ipc_config_sec_rules(void *arg)
+{
+	struct config_sec_rules_args sec_rules_arg;
+	struct security_rule *rule, *temp_rule;
+	int key;
+	size_t kgroup_info_sz;
+	int ret;
+	size_t group_info_sz;
+	gid_t *group_id = NULL;
+	int loop;
+
+	if (!uid_eq(current_euid(), GLOBAL_ROOT_UID))
+		return -EPERM;
+
+	ret = copy_from_user(&sec_rules_arg, (void *)arg,
+			     sizeof(sec_rules_arg));
+	if (ret)
+		return -EFAULT;
+
+	if (sec_rules_arg.num_group_info <= 0)
+		return -EINVAL;
+
+	if (sec_rules_arg.num_group_info > (SIZE_MAX / sizeof(gid_t))) {
+		pr_err("%s: Integer Overflow %zu * %d\n", __func__,
+		       sizeof(gid_t), sec_rules_arg.num_group_info);
+		return -EINVAL;
+	}
+	group_info_sz = sec_rules_arg.num_group_info * sizeof(gid_t);
+
+	if (sec_rules_arg.num_group_info > (SIZE_MAX / sizeof(kgid_t))) {
+		pr_err("%s: Integer Overflow %zu * %d\n", __func__,
+		       sizeof(kgid_t), sec_rules_arg.num_group_info);
+		return -EINVAL;
+	}
+	kgroup_info_sz = sec_rules_arg.num_group_info * sizeof(kgid_t);
+
+	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+	if (!rule)
+		return -ENOMEM;
+
+	rule->group_id = kzalloc(kgroup_info_sz, GFP_KERNEL);
+	if (!rule->group_id) {
+		kfree(rule);
+		return -ENOMEM;
+	}
+
+	group_id = kzalloc(group_info_sz, GFP_KERNEL);
+	if (!group_id) {
+		kfree(rule->group_id);
+		kfree(rule);
+		return -ENOMEM;
+	}
+
+	rule->service_id = sec_rules_arg.service_id;
+	rule->instance_id = sec_rules_arg.instance_id;
+	rule->reserved = sec_rules_arg.reserved;
+	rule->num_group_info = sec_rules_arg.num_group_info;
+	ret = copy_from_user(group_id, ((void *)(arg + sizeof(sec_rules_arg))),
+			     group_info_sz);
+	if (ret) {
+		kfree(group_id);
+		kfree(rule->group_id);
+		kfree(rule);
+		return -EFAULT;
+	}
+	for (loop = 0; loop < rule->num_group_info; loop++)
+		rule->group_id[loop] = make_kgid(current_user_ns(),
+						 group_id[loop]);
+	kfree(group_id);
+
+	key = rule->service_id & (SEC_RULES_HASH_SZ - 1);
+	down_write(&security_rules_lock_lha4);
+	if (rule->service_id == ALL_SERVICE) {
+		temp_rule = list_first_entry(&security_rules[key],
+					     struct security_rule, list);
+		list_del(&temp_rule->list);
+		kfree(temp_rule->group_id);
+		kfree(temp_rule);
+	}
+	list_add_tail(&rule->list, &security_rules[key]);
+	up_write(&security_rules_lock_lha4);
+
+	if (rule->service_id == ALL_SERVICE)
+		msm_ipc_sync_default_sec_rule((void *)rule);
+	else
+		msm_ipc_sync_sec_rule(rule->service_id, rule->instance_id,
+				      (void *)rule);
+
+	return 0;
+}
+EXPORT_SYMBOL(msm_ipc_config_sec_rules);
+
+/**
+ * msm_ipc_add_default_rule() - Add default security rule
+ *
+ * @return: 0 on success, < 0 on error/
+ *
+ * This function is used to ensure the basic security, if there is no
+ * security rule defined for a service. It can be overwritten by the
+ * default security rule from user-space script.
+ */
+static int msm_ipc_add_default_rule(void)
+{
+	struct security_rule *rule;
+	int key;
+
+	rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+	if (!rule)
+		return -ENOMEM;
+
+	rule->group_id = kzalloc(sizeof(*rule->group_id), GFP_KERNEL);
+	if (!rule->group_id) {
+		kfree(rule);
+		return -ENOMEM;
+	}
+
+	rule->service_id = ALL_SERVICE;
+	rule->instance_id = ALL_INSTANCE;
+	rule->num_group_info = 1;
+	*rule->group_id = AID_NET_RAW;
+	down_write(&security_rules_lock_lha4);
+	key = (ALL_SERVICE & (SEC_RULES_HASH_SZ - 1));
+	list_add_tail(&rule->list, &security_rules[key]);
+	up_write(&security_rules_lock_lha4);
+	return 0;
+}
+
+/**
+ * msm_ipc_get_security_rule() - Get the security rule corresponding to a
+ *                               service
+ * @service_id: Service ID for which the rule has to be got.
+ * @instance_id: Instance ID for which the rule has to be got.
+ *
+ * @return: Returns the rule info on success, NULL on error.
+ *
+ * This function is used when the service comes up and gets registered with
+ * the IPC Router.
+ */
+void *msm_ipc_get_security_rule(u32 service_id, u32 instance_id)
+{
+	int key;
+	struct security_rule *rule;
+
+	key = (service_id & (SEC_RULES_HASH_SZ - 1));
+	down_read(&security_rules_lock_lha4);
+	/* Return the rule for a specific <service:instance>, if found. */
+	list_for_each_entry(rule, &security_rules[key], list) {
+		if ((rule->service_id == service_id) &&
+		    (rule->instance_id == instance_id)) {
+			up_read(&security_rules_lock_lha4);
+			return (void *)rule;
+		}
+	}
+
+	/* Return the rule for a specific service, if found. */
+	list_for_each_entry(rule, &security_rules[key], list) {
+		if ((rule->service_id == service_id) &&
+		    (rule->instance_id == ALL_INSTANCE)) {
+			up_read(&security_rules_lock_lha4);
+			return (void *)rule;
+		}
+	}
+
+	/* Return the default rule, if no rule defined for a service. */
+	key = (ALL_SERVICE & (SEC_RULES_HASH_SZ - 1));
+	list_for_each_entry(rule, &security_rules[key], list) {
+		if ((rule->service_id == ALL_SERVICE) &&
+		    (rule->instance_id == ALL_INSTANCE)) {
+			up_read(&security_rules_lock_lha4);
+			return (void *)rule;
+		}
+	}
+	up_read(&security_rules_lock_lha4);
+	return NULL;
+}
+EXPORT_SYMBOL(msm_ipc_get_security_rule);
+
+/**
+ * msm_ipc_check_send_permissions() - Check if the sendng process has
+ *                                    permissions specified as per the rule
+ * @data: Security rule to be checked.
+ *
+ * @return: true if the process has permissions, else false.
+ *
+ * This function is used to check if the current executing process has
+ * permissions to send message to the remote entity. The security rule
+ * corresponding to the remote entity is specified by "data" parameter
+ */
+int msm_ipc_check_send_permissions(void *data)
+{
+	int i;
+	struct security_rule *rule = (struct security_rule *)data;
+
+	/* Source/Sender is Root user */
+	if (uid_eq(current_euid(), GLOBAL_ROOT_UID))
+		return 1;
+
+	/* Destination has no rules defined, possibly a client. */
+	if (!rule)
+		return 1;
+
+	for (i = 0; i < rule->num_group_info; i++) {
+		if (!gid_valid(rule->group_id[i]))
+			continue;
+		if (in_egroup_p(rule->group_id[i]))
+			return 1;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(msm_ipc_check_send_permissions);
+
+/**
+ * msm_ipc_router_security_init() - Initialize the security rule database
+ *
+ * @return: 0 if successful, < 0 for error.
+ */
+int msm_ipc_router_security_init(void)
+{
+	int i;
+
+	for (i = 0; i < SEC_RULES_HASH_SZ; i++)
+		INIT_LIST_HEAD(&security_rules[i]);
+
+	msm_ipc_add_default_rule();
+	return 0;
+}
+EXPORT_SYMBOL(msm_ipc_router_security_init);
diff --git a/net/ipc_router/ipc_router_security.h b/net/ipc_router/ipc_router_security.h
new file mode 100644
index 0000000..55e5887
--- /dev/null
+++ b/net/ipc_router/ipc_router_security.h
@@ -0,0 +1,120 @@
+/* Copyright (c) 2012-2014,2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _IPC_ROUTER_SECURITY_H
+#define _IPC_ROUTER_SECURITY_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+
+#ifdef CONFIG_IPC_ROUTER_SECURITY
+#include <linux/android_aid.h>
+
+/**
+ * check_permisions() - Check whether the process has permissions to
+ *                      create an interface handle with IPC Router
+ *
+ * @return: true if the process has permissions, else false.
+ */
+int check_permissions(void);
+
+/**
+ * msm_ipc_config_sec_rules() - Add a security rule to the database
+ * @arg: Pointer to the buffer containing the rule.
+ *
+ * @return: 0 if successfully added, < 0 for error.
+ *
+ * A security rule is defined using <Service_ID: Group_ID> tuple. The rule
+ * implies that a user-space process in order to send a QMI message to
+ * service Service_ID should belong to the Linux group Group_ID.
+ */
+int msm_ipc_config_sec_rules(void *arg);
+
+/**
+ * msm_ipc_get_security_rule() - Get the security rule corresponding to a
+ *                               service
+ * @service_id: Service ID for which the rule has to be got.
+ * @instance_id: Instance ID for which the rule has to be got.
+ *
+ * @return: Returns the rule info on success, NULL on error.
+ *
+ * This function is used when the service comes up and gets registered with
+ * the IPC Router.
+ */
+void *msm_ipc_get_security_rule(u32 service_id, u32 instance_id);
+
+/**
+ * msm_ipc_check_send_permissions() - Check if the sendng process has
+ *                                    permissions specified as per the rule
+ * @data: Security rule to be checked.
+ *
+ * @return: true if the process has permissions, else false.
+ *
+ * This function is used to check if the current executing process has
+ * permissions to send message to the remote entity. The security rule
+ * corresponding to the remote entity is specified by "data" parameter
+ */
+int msm_ipc_check_send_permissions(void *data);
+
+/**
+ * msm_ipc_router_security_init() - Initialize the security rule database
+ *
+ * @return: 0 if successful, < 0 for error.
+ */
+int msm_ipc_router_security_init(void);
+
+/**
+ * wait_for_irsc_completion() - Wait for IPC Router Security Configuration
+ *                              (IRSC) to complete
+ */
+void wait_for_irsc_completion(void);
+
+/**
+ * signal_irsc_completion() - Signal the completion of IRSC
+ */
+void signal_irsc_completion(void);
+
+#else
+
+static inline int check_permissions(void)
+{
+	return 1;
+}
+
+static inline int msm_ipc_config_sec_rules(void *arg)
+{
+	return -ENODEV;
+}
+
+static inline void *msm_ipc_get_security_rule(u32 service_id,
+					      u32 instance_id)
+{
+	return NULL;
+}
+
+static inline int msm_ipc_check_send_permissions(void *data)
+{
+	return 1;
+}
+
+static inline int msm_ipc_router_security_init(void)
+{
+	return 0;
+}
+
+static inline void wait_for_irsc_completion(void) { }
+
+static inline void signal_irsc_completion(void) { }
+
+#endif
+#endif
diff --git a/net/ipc_router/ipc_router_socket.c b/net/ipc_router/ipc_router_socket.c
new file mode 100644
index 0000000..a84fc11
--- /dev/null
+++ b/net/ipc_router/ipc_router_socket.c
@@ -0,0 +1,681 @@
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/fcntl.h>
+#include <linux/gfp.h>
+#include <linux/msm_ipc.h>
+#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/slab.h>
+#include <linux/kmemleak.h>
+#include <linux/ipc_logging.h>
+#include <linux/string.h>
+#include <linux/atomic.h>
+#include <linux/ipc_router.h>
+
+#include <net/sock.h>
+
+#include "ipc_router_private.h"
+#include "ipc_router_security.h"
+
+#define msm_ipc_sk(sk) ((struct msm_ipc_sock *)(sk))
+#define msm_ipc_sk_port(sk) ((struct msm_ipc_port *)(msm_ipc_sk(sk)->port))
+
+#ifndef SIZE_MAX
+#define SIZE_MAX ((size_t)-1)
+#endif
+
+static int sockets_enabled;
+static struct proto msm_ipc_proto;
+static const struct proto_ops msm_ipc_proto_ops;
+static RAW_NOTIFIER_HEAD(ipcrtr_af_init_chain);
+static DEFINE_MUTEX(ipcrtr_af_init_lock);
+
+static struct sk_buff_head *msm_ipc_router_build_msg(struct msghdr *m,
+						     size_t total_len)
+{
+	struct sk_buff_head *msg_head;
+	struct sk_buff *msg;
+	int first = 1;
+	int last = 1;
+	size_t data_size = 0;
+	size_t alloc_size, align_size;
+	void *data;
+	size_t total_copied_size = 0, copied_size;
+
+	if (iov_iter_count(&m->msg_iter) == total_len)
+		data_size = total_len;
+
+	if (!data_size)
+		return NULL;
+	align_size = ALIGN_SIZE(data_size);
+
+	msg_head = kmalloc(sizeof(*msg_head), GFP_KERNEL);
+	if (!msg_head) {
+		IPC_RTR_ERR("%s: cannot allocate skb_head\n", __func__);
+		return NULL;
+	}
+	skb_queue_head_init(msg_head);
+
+	while (total_copied_size < total_len) {
+		alloc_size = data_size;
+		if (first)
+			alloc_size += IPC_ROUTER_HDR_SIZE;
+		if (last)
+			alloc_size += align_size;
+
+		msg = alloc_skb(alloc_size, GFP_KERNEL);
+		if (!msg) {
+			if (alloc_size <= (PAGE_SIZE / 2)) {
+				IPC_RTR_ERR("%s: cannot allocated skb\n",
+					    __func__);
+				goto msg_build_failure;
+			}
+			data_size = data_size / 2;
+			last = 0;
+			continue;
+		}
+
+		if (first) {
+			skb_reserve(msg, IPC_ROUTER_HDR_SIZE);
+			first = 0;
+		}
+
+		data = skb_put(msg, data_size);
+		copied_size = copy_from_iter(msg->data, data_size,
+					     &m->msg_iter);
+		if (copied_size != data_size) {
+			IPC_RTR_ERR("%s: copy_from_iter failed %zu %zu %zu\n",
+				    __func__, alloc_size, data_size,
+				    copied_size);
+			kfree_skb(msg);
+			goto msg_build_failure;
+		}
+		skb_queue_tail(msg_head, msg);
+		total_copied_size += data_size;
+		data_size = total_len - total_copied_size;
+		last = 1;
+	}
+	return msg_head;
+
+msg_build_failure:
+	while (!skb_queue_empty(msg_head)) {
+		msg = skb_dequeue(msg_head);
+		kfree_skb(msg);
+	}
+	kfree(msg_head);
+	return NULL;
+}
+
+static int msm_ipc_router_extract_msg(struct msghdr *m,
+				      struct rr_packet *pkt)
+{
+	struct sockaddr_msm_ipc *addr;
+	struct rr_header_v1 *hdr;
+	struct sk_buff *temp;
+	union rr_control_msg *ctl_msg;
+	int offset = 0, data_len = 0, copy_len, copied_len;
+
+	if (!m || !pkt) {
+		IPC_RTR_ERR("%s: Invalid pointers passed\n", __func__);
+		return -EINVAL;
+	}
+	addr = (struct sockaddr_msm_ipc *)m->msg_name;
+
+	hdr = &pkt->hdr;
+	if (addr && (hdr->type == IPC_ROUTER_CTRL_CMD_RESUME_TX)) {
+		temp = skb_peek(pkt->pkt_fragment_q);
+		ctl_msg = (union rr_control_msg *)(temp->data);
+		addr->family = AF_MSM_IPC;
+		addr->address.addrtype = MSM_IPC_ADDR_ID;
+		addr->address.addr.port_addr.node_id = ctl_msg->cli.node_id;
+		addr->address.addr.port_addr.port_id = ctl_msg->cli.port_id;
+		m->msg_namelen = sizeof(struct sockaddr_msm_ipc);
+		return offset;
+	}
+	if (addr && (hdr->type == IPC_ROUTER_CTRL_CMD_DATA)) {
+		addr->family = AF_MSM_IPC;
+		addr->address.addrtype = MSM_IPC_ADDR_ID;
+		addr->address.addr.port_addr.node_id = hdr->src_node_id;
+		addr->address.addr.port_addr.port_id = hdr->src_port_id;
+		m->msg_namelen = sizeof(struct sockaddr_msm_ipc);
+	}
+
+	data_len = hdr->size;
+	skb_queue_walk(pkt->pkt_fragment_q, temp) {
+		copy_len = data_len < temp->len ? data_len : temp->len;
+		copied_len = copy_to_iter(temp->data, copy_len, &m->msg_iter);
+		if (copy_len != copied_len) {
+			IPC_RTR_ERR("%s: Copy to user failed\n", __func__);
+			return -EFAULT;
+		}
+		offset += copy_len;
+		data_len -= copy_len;
+	}
+	return offset;
+}
+
+static int msm_ipc_router_create(struct net *net,
+				 struct socket *sock,
+				 int protocol,
+				 int kern)
+{
+	struct sock *sk;
+	struct msm_ipc_port *port_ptr;
+
+	if (unlikely(protocol != 0)) {
+		IPC_RTR_ERR("%s: Protocol not supported\n", __func__);
+		return -EPROTONOSUPPORT;
+	}
+
+	switch (sock->type) {
+	case SOCK_DGRAM:
+		break;
+	default:
+		IPC_RTR_ERR("%s: Protocol type not supported\n", __func__);
+		return -EPROTOTYPE;
+	}
+
+	sk = sk_alloc(net, AF_MSM_IPC, GFP_KERNEL, &msm_ipc_proto, kern);
+	if (!sk) {
+		IPC_RTR_ERR("%s: sk_alloc failed\n", __func__);
+		return -ENOMEM;
+	}
+
+	sock->ops = &msm_ipc_proto_ops;
+	sock_init_data(sock, sk);
+	sk->sk_data_ready = NULL;
+	sk->sk_write_space = ipc_router_dummy_write_space;
+	sk->sk_rcvtimeo = DEFAULT_RCV_TIMEO;
+	sk->sk_sndtimeo = DEFAULT_SND_TIMEO;
+
+	port_ptr = msm_ipc_router_create_raw_port(sk, NULL, NULL);
+	if (!port_ptr) {
+		IPC_RTR_ERR("%s: port_ptr alloc failed\n", __func__);
+		sock_put(sk);
+		sock->sk = NULL;
+		return -ENOMEM;
+	}
+
+	port_ptr->check_send_permissions = msm_ipc_check_send_permissions;
+	msm_ipc_sk(sk)->port = port_ptr;
+	msm_ipc_sk(sk)->default_node_vote_info = NULL;
+
+	return 0;
+}
+
+int msm_ipc_router_bind(struct socket *sock, struct sockaddr *uaddr,
+			int uaddr_len)
+{
+	struct sockaddr_msm_ipc *addr = (struct sockaddr_msm_ipc *)uaddr;
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr;
+	int ret;
+
+	if (!sk)
+		return -EINVAL;
+
+	if (!check_permissions()) {
+		IPC_RTR_ERR("%s: %s Do not have permissions\n",
+			    __func__, current->comm);
+		return -EPERM;
+	}
+
+	if (!uaddr_len) {
+		IPC_RTR_ERR("%s: Invalid address length\n", __func__);
+		return -EINVAL;
+	}
+
+	if (addr->family != AF_MSM_IPC) {
+		IPC_RTR_ERR("%s: Address family is incorrect\n", __func__);
+		return -EAFNOSUPPORT;
+	}
+
+	if (addr->address.addrtype != MSM_IPC_ADDR_NAME) {
+		IPC_RTR_ERR("%s: Address type is incorrect\n", __func__);
+		return -EINVAL;
+	}
+
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr)
+		return -ENODEV;
+
+	if (!msm_ipc_sk(sk)->default_node_vote_info)
+		msm_ipc_sk(sk)->default_node_vote_info =
+			msm_ipc_load_default_node();
+	lock_sock(sk);
+
+	ret = msm_ipc_router_register_server(port_ptr, &addr->address);
+
+	release_sock(sk);
+	return ret;
+}
+
+static int ipc_router_connect(struct socket *sock, struct sockaddr *uaddr,
+			      int uaddr_len, int flags)
+{
+	struct sockaddr_msm_ipc *addr = (struct sockaddr_msm_ipc *)uaddr;
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr;
+	int ret;
+
+	if (!sk)
+		return -EINVAL;
+
+	if (uaddr_len <= 0) {
+		IPC_RTR_ERR("%s: Invalid address length\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!addr) {
+		IPC_RTR_ERR("%s: Invalid address\n", __func__);
+		return -EINVAL;
+	}
+
+	if (addr->family != AF_MSM_IPC) {
+		IPC_RTR_ERR("%s: Address family is incorrect\n", __func__);
+		return -EAFNOSUPPORT;
+	}
+
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr)
+		return -ENODEV;
+
+	lock_sock(sk);
+	ret = ipc_router_set_conn(port_ptr, &addr->address);
+	release_sock(sk);
+	return ret;
+}
+
+static int msm_ipc_router_sendmsg(struct socket *sock,
+				  struct msghdr *m, size_t total_len)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+	struct sockaddr_msm_ipc *dest = (struct sockaddr_msm_ipc *)m->msg_name;
+	struct sk_buff_head *msg;
+	int ret;
+	struct msm_ipc_addr dest_addr = {0};
+	long timeout;
+
+	if (dest) {
+		if (m->msg_namelen < sizeof(*dest) ||
+		    dest->family != AF_MSM_IPC)
+			return -EINVAL;
+		memcpy(&dest_addr, &dest->address, sizeof(dest_addr));
+	} else {
+		if (port_ptr->conn_status == NOT_CONNECTED)
+			return -EDESTADDRREQ;
+		if (port_ptr->conn_status < CONNECTION_RESET)
+			return -ENETRESET;
+		memcpy(&dest_addr.addr.port_addr, &port_ptr->dest_addr,
+		       sizeof(struct msm_ipc_port_addr));
+		dest_addr.addrtype = MSM_IPC_ADDR_ID;
+	}
+
+	if (total_len > MAX_IPC_PKT_SIZE)
+		return -EINVAL;
+
+	lock_sock(sk);
+	timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+	msg = msm_ipc_router_build_msg(m, total_len);
+	if (!msg) {
+		IPC_RTR_ERR("%s: Msg build failure\n", __func__);
+		ret = -ENOMEM;
+		goto out_sendmsg;
+	}
+	kmemleak_not_leak(msg);
+
+	if (port_ptr->type == CLIENT_PORT)
+		wait_for_irsc_completion();
+	ret = msm_ipc_router_send_to(port_ptr, msg, &dest_addr, timeout);
+	if (ret != total_len) {
+		if (ret < 0) {
+			if (ret != -EAGAIN)
+				IPC_RTR_ERR("%s: Send_to failure %d\n",
+					    __func__, ret);
+			msm_ipc_router_free_skb(msg);
+		} else if (ret >= 0) {
+			ret = -EFAULT;
+		}
+	}
+
+out_sendmsg:
+	release_sock(sk);
+	return ret;
+}
+
+static int msm_ipc_router_recvmsg(struct socket *sock,
+				  struct msghdr *m, size_t buf_len, int flags)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+	struct rr_packet *pkt;
+	long timeout;
+	int ret;
+
+	lock_sock(sk);
+	if (!buf_len) {
+		if (flags & MSG_PEEK)
+			ret = msm_ipc_router_get_curr_pkt_size(port_ptr);
+		else
+			ret = -EINVAL;
+		release_sock(sk);
+		return ret;
+	}
+	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
+
+	ret = msm_ipc_router_rx_data_wait(port_ptr, timeout);
+	if (ret) {
+		release_sock(sk);
+		if (ret == -ENOMSG)
+			m->msg_namelen = 0;
+		return ret;
+	}
+
+	ret = msm_ipc_router_read(port_ptr, &pkt, buf_len);
+	if (ret <= 0 || !pkt) {
+		release_sock(sk);
+		return ret;
+	}
+
+	ret = msm_ipc_router_extract_msg(m, pkt);
+	release_pkt(pkt);
+	release_sock(sk);
+	return ret;
+}
+
+static int msm_ipc_router_ioctl(struct socket *sock,
+				unsigned int cmd, unsigned long arg)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr;
+	struct server_lookup_args server_arg;
+	struct msm_ipc_server_info *srv_info = NULL;
+	unsigned int n;
+	size_t srv_info_sz = 0;
+	int ret;
+
+	if (!sk)
+		return -EINVAL;
+
+	lock_sock(sk);
+	port_ptr = msm_ipc_sk_port(sock->sk);
+	if (!port_ptr) {
+		release_sock(sk);
+		return -EINVAL;
+	}
+
+	switch (cmd) {
+	case IPC_ROUTER_IOCTL_GET_VERSION:
+		n = IPC_ROUTER_V1;
+		ret = put_user(n, (unsigned int *)arg);
+		break;
+
+	case IPC_ROUTER_IOCTL_GET_MTU:
+		n = (MAX_IPC_PKT_SIZE - IPC_ROUTER_HDR_SIZE);
+		ret = put_user(n, (unsigned int *)arg);
+		break;
+
+	case IPC_ROUTER_IOCTL_GET_CURR_PKT_SIZE:
+		ret = msm_ipc_router_get_curr_pkt_size(port_ptr);
+		break;
+
+	case IPC_ROUTER_IOCTL_LOOKUP_SERVER:
+		if (!msm_ipc_sk(sk)->default_node_vote_info)
+			msm_ipc_sk(sk)->default_node_vote_info =
+				msm_ipc_load_default_node();
+
+		ret = copy_from_user(&server_arg, (void *)arg,
+				     sizeof(server_arg));
+		if (ret) {
+			ret = -EFAULT;
+			break;
+		}
+
+		if (server_arg.num_entries_in_array < 0) {
+			ret = -EINVAL;
+			break;
+		}
+		if (server_arg.num_entries_in_array) {
+			if (server_arg.num_entries_in_array >
+				(SIZE_MAX / sizeof(*srv_info))) {
+				IPC_RTR_ERR("%s: Integer Overflow %zu * %d\n",
+					    __func__, sizeof(*srv_info),
+					server_arg.num_entries_in_array);
+				ret = -EINVAL;
+				break;
+			}
+			srv_info_sz = server_arg.num_entries_in_array *
+					sizeof(*srv_info);
+			srv_info = kmalloc(srv_info_sz, GFP_KERNEL);
+			if (!srv_info) {
+				ret = -ENOMEM;
+				break;
+			}
+		}
+		ret = msm_ipc_router_lookup_server_name
+				(&server_arg.port_name, srv_info,
+				server_arg.num_entries_in_array,
+				server_arg.lookup_mask);
+		if (ret < 0) {
+			IPC_RTR_ERR("%s: Server not found\n", __func__);
+			ret = -ENODEV;
+			kfree(srv_info);
+			break;
+		}
+		server_arg.num_entries_found = ret;
+
+		ret = copy_to_user((void *)arg, &server_arg,
+				   sizeof(server_arg));
+
+		n = min(server_arg.num_entries_found,
+			server_arg.num_entries_in_array);
+
+		if (ret == 0 && n) {
+			ret = copy_to_user((void *)(arg + sizeof(server_arg)),
+					   srv_info, n * sizeof(*srv_info));
+		}
+
+		if (ret)
+			ret = -EFAULT;
+		kfree(srv_info);
+		break;
+
+	case IPC_ROUTER_IOCTL_BIND_CONTROL_PORT:
+		ret = msm_ipc_router_bind_control_port(port_ptr);
+		break;
+
+	case IPC_ROUTER_IOCTL_CONFIG_SEC_RULES:
+		ret = msm_ipc_config_sec_rules((void *)arg);
+		if (ret != -EPERM)
+			port_ptr->type = IRSC_PORT;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+	release_sock(sk);
+	return ret;
+}
+
+static unsigned int msm_ipc_router_poll(struct file *file, struct socket *sock,
+					poll_table *wait)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr;
+	u32 mask = 0;
+
+	if (!sk)
+		return -EINVAL;
+
+	port_ptr = msm_ipc_sk_port(sk);
+	if (!port_ptr)
+		return -EINVAL;
+
+	poll_wait(file, &port_ptr->port_rx_wait_q, wait);
+
+	if (!list_empty(&port_ptr->port_rx_q))
+		mask |= (POLLRDNORM | POLLIN);
+
+	if (port_ptr->conn_status == CONNECTION_RESET)
+		mask |= (POLLHUP | POLLERR);
+
+	return mask;
+}
+
+static int msm_ipc_router_close(struct socket *sock)
+{
+	struct sock *sk = sock->sk;
+	struct msm_ipc_port *port_ptr = msm_ipc_sk_port(sk);
+	int ret;
+
+	lock_sock(sk);
+	ret = msm_ipc_router_close_port(port_ptr);
+	msm_ipc_unload_default_node(msm_ipc_sk(sk)->default_node_vote_info);
+	release_sock(sk);
+	sock_put(sk);
+	sock->sk = NULL;
+
+	return ret;
+}
+
+/**
+ * register_ipcrtr_af_init_notifier() - Register for ipc router socket
+ *				address family initialization callback
+ * @nb: Notifier block which will be notified when address family is
+ *	initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int register_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+	int ret;
+
+	if (!nb)
+		return -EINVAL;
+	mutex_lock(&ipcrtr_af_init_lock);
+	if (sockets_enabled)
+		nb->notifier_call(nb, IPCRTR_AF_INIT, NULL);
+	ret = raw_notifier_chain_register(&ipcrtr_af_init_chain, nb);
+	mutex_unlock(&ipcrtr_af_init_lock);
+	return ret;
+}
+EXPORT_SYMBOL(register_ipcrtr_af_init_notifier);
+
+/**
+ * unregister_ipcrtr_af_init_notifier() - Unregister for ipc router socket
+ *				address family initialization callback
+ * @nb: Notifier block which will be notified once address family is
+ *	initialized.
+ *
+ * Return: 0 on success, standard error code otherwise.
+ */
+int unregister_ipcrtr_af_init_notifier(struct notifier_block *nb)
+{
+	int ret;
+
+	if (!nb)
+		return -EINVAL;
+	ret = raw_notifier_chain_unregister(&ipcrtr_af_init_chain, nb);
+	return ret;
+}
+EXPORT_SYMBOL(unregister_ipcrtr_af_init_notifier);
+
+static const struct net_proto_family msm_ipc_family_ops = {
+	.owner		= THIS_MODULE,
+	.family		= AF_MSM_IPC,
+	.create		= msm_ipc_router_create
+};
+
+static const struct proto_ops msm_ipc_proto_ops = {
+	.family			= AF_MSM_IPC,
+	.owner			= THIS_MODULE,
+	.release		= msm_ipc_router_close,
+	.bind			= msm_ipc_router_bind,
+	.connect		= ipc_router_connect,
+	.socketpair		= sock_no_socketpair,
+	.accept			= sock_no_accept,
+	.getname		= sock_no_getname,
+	.poll			= msm_ipc_router_poll,
+	.ioctl			= msm_ipc_router_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl		= msm_ipc_router_ioctl,
+#endif
+	.listen			= sock_no_listen,
+	.shutdown		= sock_no_shutdown,
+	.setsockopt		= sock_no_setsockopt,
+	.getsockopt		= sock_no_getsockopt,
+#ifdef CONFIG_COMPAT
+	.compat_setsockopt	= sock_no_setsockopt,
+	.compat_getsockopt	= sock_no_getsockopt,
+#endif
+	.sendmsg		= msm_ipc_router_sendmsg,
+	.recvmsg		= msm_ipc_router_recvmsg,
+	.mmap			= sock_no_mmap,
+	.sendpage		= sock_no_sendpage,
+};
+
+static struct proto msm_ipc_proto = {
+	.name           = "MSM_IPC",
+	.owner          = THIS_MODULE,
+	.obj_size       = sizeof(struct msm_ipc_sock),
+};
+
+int msm_ipc_router_init_sockets(void)
+{
+	int ret;
+
+	ret = proto_register(&msm_ipc_proto, 1);
+	if (ret) {
+		IPC_RTR_ERR("%s: Failed to register MSM_IPC protocol type\n",
+			    __func__);
+		goto out_init_sockets;
+	}
+
+	ret = sock_register(&msm_ipc_family_ops);
+	if (ret) {
+		IPC_RTR_ERR("%s: Failed to register MSM_IPC socket type\n",
+			    __func__);
+		proto_unregister(&msm_ipc_proto);
+		goto out_init_sockets;
+	}
+
+	mutex_lock(&ipcrtr_af_init_lock);
+	sockets_enabled = 1;
+	raw_notifier_call_chain(&ipcrtr_af_init_chain,
+				IPCRTR_AF_INIT, NULL);
+	mutex_unlock(&ipcrtr_af_init_lock);
+out_init_sockets:
+	return ret;
+}
+
+void msm_ipc_router_exit_sockets(void)
+{
+	if (!sockets_enabled)
+		return;
+
+	sock_unregister(msm_ipc_family_ops.family);
+	proto_unregister(&msm_ipc_proto);
+	mutex_lock(&ipcrtr_af_init_lock);
+	sockets_enabled = 0;
+	raw_notifier_call_chain(&ipcrtr_af_init_chain,
+				IPCRTR_AF_DEINIT, NULL);
+	mutex_unlock(&ipcrtr_af_init_lock);
+}
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index bc6a6c8..a8b934a 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -16,6 +16,7 @@
 
 obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+obj-$(CONFIG_SYSFS) += sysfs_net_ipv4.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 21514324..c836bfe 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -121,6 +121,19 @@
 #endif
 #include <net/l3mdev.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+	return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+	return 1;
+}
+#endif
 
 /* The inetsw table contains everything that inet_create needs to
  * build a new socket.
@@ -255,6 +268,9 @@
 	if (protocol < 0 || protocol >= IPPROTO_MAX)
 		return -EINVAL;
 
+	if (!current_has_network())
+		return -EACCES;
+
 	sock->state = SS_UNCONNECTED;
 
 	/* Look for the requested type/protocol pair. */
@@ -303,8 +319,7 @@
 	}
 
 	err = -EPERM;
-	if (sock->type == SOCK_RAW && !kern &&
-	    !ns_capable(net->user_ns, CAP_NET_RAW))
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	sock->ops = answer->ops;
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 161fc0f..121384b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -620,6 +620,7 @@
 	[RTA_FLOW]		= { .type = NLA_U32 },
 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
 	[RTA_ENCAP]		= { .type = NLA_NESTED },
+	[RTA_UID]		= { .type = NLA_U32 },
 };
 
 static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 61a9dee..a16b439 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -415,7 +415,7 @@
 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
-			   htons(ireq->ir_num));
+			   htons(ireq->ir_num), sock_i_uid((struct sock *)sk));
 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 	rt = ip_route_output_flow(net, fl4, sk);
 	if (IS_ERR(rt))
@@ -452,7 +452,7 @@
 			   sk->sk_protocol, inet_sk_flowi_flags(sk),
 			   (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
 			   ireq->ir_loc_addr, ireq->ir_rmt_port,
-			   htons(ireq->ir_num));
+			   htons(ireq->ir_num), sock_i_uid((struct sock *)sk));
 	security_req_classify_flow(req, flowi4_to_flowi(fl4));
 	rt = ip_route_output_flow(net, fl4, sk);
 	if (IS_ERR(rt))
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 877bdb0..d24fa20 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1594,7 +1594,8 @@
 			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
 			   ip_reply_arg_flowi_flags(arg),
 			   daddr, saddr,
-			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest);
+			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
+			   arg->uid);
 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(net, &fl4);
 	if (IS_ERR(rt))
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 205e200..a1a8859 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -789,7 +789,8 @@
 
 	flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
 			   RT_SCOPE_UNIVERSE, sk->sk_protocol,
-			   inet_sk_flowi_flags(sk), faddr, saddr, 0, 0);
+			   inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
+			   sock_i_uid(sk));
 
 	security_sk_classify_flow(sk, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_flow(net, &fl4, sk);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index ecbe5a7..65c3bc9 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -604,7 +604,8 @@
 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
 			   inet_sk_flowi_flags(sk) |
 			    (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
-			   daddr, saddr, 0, 0);
+			   daddr, saddr, 0, 0,
+			   sock_i_uid(sk));
 
 	if (!inet->hdrincl) {
 		rfv.msg = msg;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2a57566..8cb3f52 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -507,7 +507,7 @@
 }
 EXPORT_SYMBOL(__ip_select_ident);
 
-static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void __build_flow_key(struct flowi4 *fl4, struct sock *sk,
 			     const struct iphdr *iph,
 			     int oif, u8 tos,
 			     u8 prot, u32 mark, int flow_flags)
@@ -523,11 +523,12 @@
 	flowi4_init_output(fl4, oif, mark, tos,
 			   RT_SCOPE_UNIVERSE, prot,
 			   flow_flags,
-			   iph->daddr, iph->saddr, 0, 0);
+			   iph->daddr, iph->saddr, 0, 0,
+			   sk ? sock_i_uid(sk) : GLOBAL_ROOT_UID);
 }
 
 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
-			       const struct sock *sk)
+			       struct sock *sk)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	int oif = skb->dev->ifindex;
@@ -538,7 +539,7 @@
 	__build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
 }
 
-static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
+static void build_sk_flow_key(struct flowi4 *fl4, struct sock *sk)
 {
 	const struct inet_sock *inet = inet_sk(sk);
 	const struct ip_options_rcu *inet_opt;
@@ -552,11 +553,12 @@
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
 			   inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
 			   inet_sk_flowi_flags(sk),
-			   daddr, inet->inet_saddr, 0, 0);
+			   daddr, inet->inet_saddr, 0, 0,
+			   sock_i_uid(sk));
 	rcu_read_unlock();
 }
 
-static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
+static void ip_rt_build_flow_key(struct flowi4 *fl4, struct sock *sk,
 				 const struct sk_buff *skb)
 {
 	if (skb)
@@ -2495,6 +2497,11 @@
 	    nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
 		goto nla_put_failure;
 
+	if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
+	    nla_put_u32(skb, RTA_UID,
+			from_kuid_munged(current_user_ns(), fl4->flowi4_uid)))
+		goto nla_put_failure;
+
 	error = rt->dst.error;
 
 	if (rt_is_input_route(rt)) {
@@ -2547,6 +2554,7 @@
 	int mark;
 	struct sk_buff *skb;
 	u32 table_id = RT_TABLE_MAIN;
+	kuid_t uid;
 
 	err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
 	if (err < 0)
@@ -2574,6 +2582,10 @@
 	dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
 	iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
 	mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
+	if (tb[RTA_UID])
+		uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
+	else
+		uid = (iif ? INVALID_UID : current_uid());
 
 	memset(&fl4, 0, sizeof(fl4));
 	fl4.daddr = dst;
@@ -2581,6 +2593,7 @@
 	fl4.flowi4_tos = rtm->rtm_tos;
 	fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
 	fl4.flowi4_mark = mark;
+	fl4.flowi4_uid = uid;
 
 	if (iif) {
 		struct net_device *dev;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index e3c4043..63d07b8 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -371,8 +371,9 @@
 	flowi4_init_output(&fl4, ireq->ir_iif, ireq->ir_mark,
 			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,
 			   inet_sk_flowi_flags(sk),
-			   opt->srr ? opt->faddr : ireq->ir_rmt_addr,
-			   ireq->ir_loc_addr, th->source, th->dest);
+			   (opt && opt->srr) ? opt->faddr : ireq->ir_rmt_addr,
+			   ireq->ir_loc_addr, th->source, th->dest,
+			   sock_i_uid(sk));
 	security_req_classify_flow(req, flowi4_to_flowi(&fl4));
 	rt = ip_route_output_key(sock_net(sk), &fl4);
 	if (IS_ERR(rt)) {
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 80bc36b..cf7cfa4 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -151,6 +151,21 @@
 	return ret;
 }
 
+/* Validate changes from /proc interface. */
+static int proc_tcp_default_init_rwnd(struct ctl_table *ctl, int write,
+				      void __user *buffer,
+				      size_t *lenp, loff_t *ppos)
+{
+	int old_value = *(int *)ctl->data;
+	int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+	int new_value = *(int *)ctl->data;
+
+	if (write && ret == 0 && (new_value < 3 || new_value > 100))
+		*(int *)ctl->data = old_value;
+
+	return ret;
+}
+
 static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
 				       void __user *buffer, size_t *lenp, loff_t *ppos)
 {
@@ -624,6 +639,13 @@
 		.proc_handler	= proc_dointvec_ms_jiffies,
 	},
 	{
+		.procname       = "tcp_default_init_rwnd",
+		.data           = &sysctl_tcp_default_init_rwnd,
+		.maxlen         = sizeof(int),
+		.mode           = 0644,
+		.proc_handler   = proc_tcp_default_init_rwnd
+	},
+	{
 		.procname	= "icmp_msgs_per_sec",
 		.data		= &sysctl_icmp_msgs_per_sec,
 		.maxlen		= sizeof(int),
diff --git a/net/ipv4/sysfs_net_ipv4.c b/net/ipv4/sysfs_net_ipv4.c
new file mode 100644
index 0000000..0cbbf10
--- /dev/null
+++ b/net/ipv4/sysfs_net_ipv4.c
@@ -0,0 +1,88 @@
+/*
+ * net/ipv4/sysfs_net_ipv4.c
+ *
+ * sysfs-based networking knobs (so we can, unlike with sysctl, control perms)
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kobject.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <net/tcp.h>
+
+#define CREATE_IPV4_FILE(_name, _var) \
+static ssize_t _name##_show(struct kobject *kobj, \
+			    struct kobj_attribute *attr, char *buf) \
+{ \
+	return sprintf(buf, "%d\n", _var); \
+} \
+static ssize_t _name##_store(struct kobject *kobj, \
+			     struct kobj_attribute *attr, \
+			     const char *buf, size_t count) \
+{ \
+	int val, ret; \
+	ret = sscanf(buf, "%d", &val); \
+	if (ret != 1) \
+		return -EINVAL; \
+	if (val < 0) \
+		return -EINVAL; \
+	_var = val; \
+	return count; \
+} \
+static struct kobj_attribute _name##_attr = \
+	__ATTR(_name, 0644, _name##_show, _name##_store)
+
+CREATE_IPV4_FILE(tcp_wmem_min, sysctl_tcp_wmem[0]);
+CREATE_IPV4_FILE(tcp_wmem_def, sysctl_tcp_wmem[1]);
+CREATE_IPV4_FILE(tcp_wmem_max, sysctl_tcp_wmem[2]);
+
+CREATE_IPV4_FILE(tcp_rmem_min, sysctl_tcp_rmem[0]);
+CREATE_IPV4_FILE(tcp_rmem_def, sysctl_tcp_rmem[1]);
+CREATE_IPV4_FILE(tcp_rmem_max, sysctl_tcp_rmem[2]);
+
+static struct attribute *ipv4_attrs[] = {
+	&tcp_wmem_min_attr.attr,
+	&tcp_wmem_def_attr.attr,
+	&tcp_wmem_max_attr.attr,
+	&tcp_rmem_min_attr.attr,
+	&tcp_rmem_def_attr.attr,
+	&tcp_rmem_max_attr.attr,
+	NULL
+};
+
+static struct attribute_group ipv4_attr_group = {
+	.attrs = ipv4_attrs,
+};
+
+static __init int sysfs_ipv4_init(void)
+{
+	struct kobject *ipv4_kobject;
+	int ret;
+
+	ipv4_kobject = kobject_create_and_add("ipv4", kernel_kobj);
+	if (!ipv4_kobject)
+		return -ENOMEM;
+
+	ret = sysfs_create_group(ipv4_kobject, &ipv4_attr_group);
+	if (ret) {
+		kobject_put(ipv4_kobject);
+		return ret;
+	}
+
+	return 0;
+}
+
+subsys_initcall(sysfs_ipv4_init);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a27b9c0..a823737 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -100,6 +100,7 @@
 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
 int sysctl_tcp_early_retrans __read_mostly = 3;
 int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
+int sysctl_tcp_default_init_rwnd __read_mostly = TCP_INIT_CWND * 2;
 
 #define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
 #define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 896e9df..cd8e189 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -188,7 +188,7 @@
 	 * (RFC 3517, Section 4, NextSeg() rule (2)). Further place a
 	 * limit when mss is larger than 1460.
 	 */
-	u32 init_rwnd = TCP_INIT_CWND * 2;
+	u32 init_rwnd = sysctl_tcp_default_init_rwnd;
 
 	if (mss > 1460)
 		init_rwnd = max((1460 * init_rwnd) / mss, 2U);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 5bab6c3..3e6daef2 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1019,7 +1019,8 @@
 		flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
 				   RT_SCOPE_UNIVERSE, sk->sk_protocol,
 				   flow_flags,
-				   faddr, saddr, dport, inet->inet_sport);
+				   faddr, saddr, dport, inet->inet_sport,
+				   sock_i_uid(sk));
 
 		security_sk_classify_flow(sk, flowi4_to_flowi(fl4));
 		rt = ip_route_output_flow(net, fl4, sk);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 4bc5ba3..e03efc6 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -226,6 +226,7 @@
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
@@ -272,6 +273,7 @@
 	.accept_ra_rt_info_max_plen = 0,
 #endif
 #endif
+	.accept_ra_rt_table	= 0,
 	.proxy_ndp		= 0,
 	.accept_source_route	= 0,	/* we do not accept RH0 by default. */
 	.disable_ipv6		= 0,
@@ -2141,6 +2143,16 @@
 		return addrconf_ifid_ieee1394(eui, dev);
 	case ARPHRD_TUNNEL6:
 		return addrconf_ifid_ip6tnl(eui, dev);
+	case ARPHRD_RAWIP: {
+		struct in6_addr lladdr;
+
+		if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
+			get_random_bytes(eui, 8);
+		else
+			memcpy(eui, lladdr.s6_addr + 8, 8);
+
+		return 0;
+	}
 	}
 	return -1;
 }
@@ -2200,6 +2212,31 @@
 		ipv6_regen_rndid(idev);
 }
 
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table) {
+	/* Determines into what table to put autoconf PIO/RIO/default routes
+	 * learned on this device.
+	 *
+	 * - If 0, use the same table for every device. This puts routes into
+	 *   one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+	 *   (but note that these three are currently all equal to
+	 *   RT6_TABLE_MAIN).
+	 * - If > 0, use the specified table.
+	 * - If < 0, put routes into table dev->ifindex + (-rt_table).
+	 */
+	struct inet6_dev *idev = in6_dev_get(dev);
+	u32 table;
+	int sysctl = idev->cnf.accept_ra_rt_table;
+	if (sysctl == 0) {
+		table = default_table;
+	} else if (sysctl > 0) {
+		table = (u32) sysctl;
+	} else {
+		table = (unsigned) dev->ifindex + (-sysctl);
+	}
+	in6_dev_put(idev);
+	return table;
+}
+
 /*
  *	Add prefix route.
  */
@@ -2209,7 +2246,7 @@
 		      unsigned long expires, u32 flags)
 {
 	struct fib6_config cfg = {
-		.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
+		.fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX),
 		.fc_metric = IP6_RT_PRIO_ADDRCONF,
 		.fc_ifindex = dev->ifindex,
 		.fc_expires = expires,
@@ -2242,7 +2279,7 @@
 	struct fib6_node *fn;
 	struct rt6_info *rt = NULL;
 	struct fib6_table *table;
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
+	u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX);
 
 	table = fib6_get_table(dev_net(dev), tb_id);
 	if (!table)
@@ -3183,7 +3220,8 @@
 	    (dev->type != ARPHRD_IEEE1394) &&
 	    (dev->type != ARPHRD_TUNNEL6) &&
 	    (dev->type != ARPHRD_6LOWPAN) &&
-	    (dev->type != ARPHRD_NONE)) {
+	    (dev->type != ARPHRD_NONE) &&
+	    (dev->type != ARPHRD_RAWIP)) {
 		/* Alas, we support only Ethernet autoconfiguration. */
 		return;
 	}
@@ -4928,6 +4966,7 @@
 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
 #endif
 #endif
+	array[DEVCONF_ACCEPT_RA_RT_TABLE] = cnf->accept_ra_rt_table;
 	array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
 	array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
@@ -5924,6 +5963,13 @@
 		.mode		= 0644,
 		.proc_handler	= proc_dointvec,
 	},
+	{
+		.procname	= "accept_ra_rt_table",
+		.data		= &ipv6_devconf.accept_ra_rt_table,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec,
+	},
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
 	{
 		.procname	= "optimistic_dad",
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 46ad699..e29162d 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -65,6 +65,20 @@
 #include <asm/uaccess.h>
 #include <linux/mroute6.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+
+static inline int current_has_network(void)
+{
+	return in_egroup_p(AID_INET) || capable(CAP_NET_RAW);
+}
+#else
+static inline int current_has_network(void)
+{
+	return 1;
+}
+#endif
+
 #include "ip6_offload.h"
 
 MODULE_AUTHOR("Cast of dozens");
@@ -121,6 +135,9 @@
 	if (protocol < 0 || protocol >= IPPROTO_MAX)
 		return -EINVAL;
 
+	if (!current_has_network())
+		return -EACCES;
+
 	/* Look for the requested type/protocol pair. */
 lookup_protocol:
 	err = -ESOCKTNOSUPPORT;
@@ -167,8 +184,7 @@
 	}
 
 	err = -EPERM;
-	if (sock->type == SOCK_RAW && !kern &&
-	    !ns_capable(net->user_ns, CAP_NET_RAW))
+	if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
 		goto out_rcu_unlock;
 
 	sock->ops = answer->ops;
@@ -678,6 +694,7 @@
 		fl6.flowi6_mark = sk->sk_mark;
 		fl6.fl6_dport = inet->inet_dport;
 		fl6.fl6_sport = inet->inet_sport;
+		fl6.flowi6_uid = sock_i_uid(sk);
 		security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
 
 		rcu_read_lock();
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 0630a4d5..c52b8fc 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -664,7 +664,7 @@
 	if (type == NDISC_REDIRECT)
 		ip6_redirect(skb, net, skb->dev->ifindex, 0);
 	else
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
 	xfrm_state_put(x);
 
 	return 0;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index ccf4055..50dacc8 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -54,6 +54,7 @@
 	fl6->fl6_dport = inet->inet_dport;
 	fl6->fl6_sport = inet->inet_sport;
 	fl6->flowlabel = np->flow_label;
+	fl6->flowi6_uid = sock_i_uid(sk);
 
 	if (!fl6->flowi6_oif)
 		fl6->flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 111ba55..17df6bb 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -476,7 +476,7 @@
 	if (type == NDISC_REDIRECT)
 		ip6_redirect(skb, net, skb->dev->ifindex, 0);
 	else
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
 	xfrm_state_put(x);
 
 	return 0;
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 305e2ed..477692f 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -166,15 +166,15 @@
  * to explore inner IPv6 header, eg. ICMPv6 error messages.
  *
  * If target header is found, its offset is set in *offset and return protocol
- * number. Otherwise, return -1.
+ * number. Otherwise, return -ENOENT or -EBADMSG.
  *
  * If the first fragment doesn't contain the final protocol header or
  * NEXTHDR_NONE it is considered invalid.
  *
  * Note that non-1st fragment is special case that "the protocol number
  * of last header" is "next header" field in Fragment header. In this case,
- * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
- * isn't NULL.
+ * *offset is meaningless. If fragoff is not NULL, the fragment offset is
+ * stored in *fragoff; if it is NULL, return -EINVAL.
  *
  * if flags is not NULL and it's a fragment, then the frag flag
  * IP6_FH_F_FRAG will be set. If it's an AH header, the
@@ -253,9 +253,12 @@
 				if (target < 0 &&
 				    ((!ipv6_ext_hdr(hp->nexthdr)) ||
 				     hp->nexthdr == NEXTHDR_NONE)) {
-					if (fragoff)
+					if (fragoff) {
 						*fragoff = _frag_off;
-					return hp->nexthdr;
+						return hp->nexthdr;
+					} else {
+						return -EINVAL;
+					}
 				}
 				if (!found)
 					return -ENOENT;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 2772004..15db375 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -92,7 +92,7 @@
 	struct net *net = dev_net(skb->dev);
 
 	if (type == ICMPV6_PKT_TOOBIG)
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
 	else if (type == NDISC_REDIRECT)
 		ip6_redirect(skb, net, skb->dev->ifindex, 0);
 
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 532c3ef..dd6f8aa 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -88,6 +88,7 @@
 	fl6->flowi6_mark = ireq->ir_mark;
 	fl6->fl6_dport = ireq->ir_rmt_port;
 	fl6->fl6_sport = htons(ireq->ir_num);
+	fl6->flowi6_uid = sock_i_uid((struct sock *)sk);
 	security_req_classify_flow(req, flowi6_to_flowi(fl6));
 
 	dst = ip6_dst_lookup_flow(sk, fl6, final_p);
@@ -136,6 +137,7 @@
 	fl6->flowi6_mark = sk->sk_mark;
 	fl6->fl6_sport = inet->inet_sport;
 	fl6->fl6_dport = inet->inet_dport;
+	fl6->flowi6_uid = sock_i_uid(sk);
 	security_sk_classify_flow(sk, flowi6_to_flowi(fl6));
 
 	rcu_read_lock();
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index c299c1e..d58480a 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -610,7 +610,7 @@
 	if (type == NDISC_REDIRECT)
 		ip6_redirect(skb, net, skb->dev->ifindex, 0);
 	else
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
 	xfrm_state_put(x);
 
 	return 0;
diff --git a/net/ipv6/ipcomp6.c b/net/ipv6/ipcomp6.c
index 1b9316e..b247bac 100644
--- a/net/ipv6/ipcomp6.c
+++ b/net/ipv6/ipcomp6.c
@@ -76,7 +76,7 @@
 	if (type == NDISC_REDIRECT)
 		ip6_redirect(skb, net, skb->dev->ifindex, 0);
 	else
-		ip6_update_pmtu(skb, net, info, 0, 0);
+		ip6_update_pmtu(skb, net, info, 0, 0, INVALID_UID);
 	xfrm_state_put(x);
 
 	return 0;
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 66e2d9d..505c442 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -113,6 +113,7 @@
 	fl6.daddr = *daddr;
 	fl6.flowi6_oif = oif;
 	fl6.flowi6_mark = sk->sk_mark;
+	fl6.flowi6_uid = sock_i_uid(sk);
 	fl6.fl6_icmp_type = user_icmph.icmp6_type;
 	fl6.fl6_icmp_code = user_icmph.icmp6_code;
 	security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 054a1d8..f7b0b59 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -774,6 +774,7 @@
 	memset(&fl6, 0, sizeof(fl6));
 
 	fl6.flowi6_mark = sk->sk_mark;
+	fl6.flowi6_uid = sock_i_uid(sk);
 
 	ipc6.hlimit = -1;
 	ipc6.tclass = -1;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1b57e11..77e6547 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -100,15 +100,12 @@
 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr,
-					   struct net_device *dev,
-					   unsigned int pref);
-static struct rt6_info *rt6_get_route_info(struct net *net,
+					   const struct in6_addr *gwaddr, unsigned int pref);
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr,
-					   struct net_device *dev);
+					   const struct in6_addr *gwaddr);
 #endif
 
 struct uncached_list {
@@ -760,7 +757,6 @@
 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
 		  const struct in6_addr *gwaddr)
 {
-	struct net *net = dev_net(dev);
 	struct route_info *rinfo = (struct route_info *) opt;
 	struct in6_addr prefix_buf, *prefix;
 	unsigned int pref;
@@ -805,8 +801,7 @@
 	if (rinfo->prefix_len == 0)
 		rt = rt6_get_dflt_router(gwaddr, dev);
 	else
-		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
-					gwaddr, dev);
+		rt = rt6_get_route_info(dev, prefix, rinfo->prefix_len,	gwaddr);
 
 	if (rt && !lifetime) {
 		ip6_del_rt(rt);
@@ -814,8 +809,7 @@
 	}
 
 	if (!rt && lifetime)
-		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
-					dev, pref);
+		rt = rt6_add_route_info(dev, prefix, rinfo->prefix_len, gwaddr, pref);
 	else if (rt)
 		rt->rt6i_flags = RTF_ROUTEINFO |
 				 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
@@ -1408,7 +1402,7 @@
 }
 
 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
-		     int oif, u32 mark)
+		     int oif, u32 mark, kuid_t uid)
 {
 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
 	struct dst_entry *dst;
@@ -1420,6 +1414,7 @@
 	fl6.daddr = iph->daddr;
 	fl6.saddr = iph->saddr;
 	fl6.flowlabel = ip6_flowinfo(iph);
+	fl6.flowi6_uid = uid;
 
 	dst = ip6_route_output(net, NULL, &fl6);
 	if (!dst->error)
@@ -1433,7 +1428,7 @@
 	struct dst_entry *dst;
 
 	ip6_update_pmtu(skb, sock_net(sk), mtu,
-			sk->sk_bound_dev_if, sk->sk_mark);
+			sk->sk_bound_dev_if, sk->sk_mark, sock_i_uid(sk));
 
 	dst = __sk_dst_get(sk);
 	if (!dst || !dst->obsolete ||
@@ -2330,18 +2325,16 @@
 }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-static struct rt6_info *rt6_get_route_info(struct net *net,
+static struct rt6_info *rt6_get_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr,
-					   struct net_device *dev)
+					   const struct in6_addr *gwaddr)
 {
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
-	int ifindex = dev->ifindex;
 	struct fib6_node *fn;
 	struct rt6_info *rt = NULL;
 	struct fib6_table *table;
 
-	table = fib6_get_table(net, tb_id);
+	table = fib6_get_table(dev_net(dev),
+			       addrconf_rt_table(dev, RT6_TABLE_INFO));
 	if (!table)
 		return NULL;
 
@@ -2351,7 +2344,7 @@
 		goto out;
 
 	for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
-		if (rt->dst.dev->ifindex != ifindex)
+		if (rt->dst.dev->ifindex != dev->ifindex)
 			continue;
 		if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
 			continue;
@@ -2365,11 +2358,9 @@
 	return rt;
 }
 
-static struct rt6_info *rt6_add_route_info(struct net *net,
+static struct rt6_info *rt6_add_route_info(struct net_device *dev,
 					   const struct in6_addr *prefix, int prefixlen,
-					   const struct in6_addr *gwaddr,
-					   struct net_device *dev,
-					   unsigned int pref)
+					   const struct in6_addr *gwaddr, unsigned int pref)
 {
 	struct fib6_config cfg = {
 		.fc_metric	= IP6_RT_PRIO_USER,
@@ -2379,10 +2370,10 @@
 				  RTF_UP | RTF_PREF(pref),
 		.fc_nlinfo.portid = 0,
 		.fc_nlinfo.nlh = NULL,
-		.fc_nlinfo.nl_net = net,
+		.fc_nlinfo.nl_net = dev_net(dev),
 	};
 
-	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
+	cfg.fc_table = l3mdev_fib_table_by_index(dev_net(dev), dev->ifindex) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
 	cfg.fc_dst = *prefix;
 	cfg.fc_gateway = *gwaddr;
 
@@ -2392,17 +2383,17 @@
 
 	ip6_route_add(&cfg);
 
-	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
+	return rt6_get_route_info(dev, prefix, prefixlen, gwaddr);
 }
 #endif
 
 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
 {
-	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
 	struct rt6_info *rt;
 	struct fib6_table *table;
 
-	table = fib6_get_table(dev_net(dev), tb_id);
+	table = fib6_get_table(dev_net(dev),
+			       addrconf_rt_table(dev, RT6_TABLE_MAIN));
 	if (!table)
 		return NULL;
 
@@ -2424,7 +2415,7 @@
 				     unsigned int pref)
 {
 	struct fib6_config cfg = {
-		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
+		.fc_table	= l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT),
 		.fc_metric	= IP6_RT_PRIO_USER,
 		.fc_ifindex	= dev->ifindex,
 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -2447,43 +2438,17 @@
 	return rt6_get_dflt_router(gwaddr, dev);
 }
 
-static void __rt6_purge_dflt_routers(struct fib6_table *table)
-{
-	struct rt6_info *rt;
 
-restart:
-	read_lock_bh(&table->tb6_lock);
-	for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
-		if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
-		    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
-			dst_hold(&rt->dst);
-			read_unlock_bh(&table->tb6_lock);
-			ip6_del_rt(rt);
-			goto restart;
-		}
-	}
-	read_unlock_bh(&table->tb6_lock);
-
-	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
+int rt6_addrconf_purge(struct rt6_info *rt, void *arg) {
+	if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+	    (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2))
+		return -1;
+	return 0;
 }
 
 void rt6_purge_dflt_routers(struct net *net)
 {
-	struct fib6_table *table;
-	struct hlist_head *head;
-	unsigned int h;
-
-	rcu_read_lock();
-
-	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
-		head = &net->ipv6.fib_table_hash[h];
-		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
-			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
-				__rt6_purge_dflt_routers(table);
-		}
-	}
-
-	rcu_read_unlock();
+	fib6_clean_all(net, rt6_addrconf_purge, NULL);
 }
 
 static void rtmsg_to_fib6_config(struct net *net,
@@ -2801,6 +2766,7 @@
 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
 	[RTA_ENCAP]		= { .type = NLA_NESTED },
 	[RTA_EXPIRES]		= { .type = NLA_U32 },
+	[RTA_UID]		= { .type = NLA_U32 },
 };
 
 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
@@ -3375,6 +3341,11 @@
 	if (tb[RTA_MARK])
 		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
 
+	if (tb[RTA_UID])
+		fl6.flowi6_uid = make_kuid(current_user_ns(),
+					   nla_get_u32(tb[RTA_UID]));
+	else
+		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
 	if (iif) {
 		struct net_device *dev;
 		int flags = 0;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 59c4839..7042046 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -227,6 +227,7 @@
 		fl6.flowi6_mark = ireq->ir_mark;
 		fl6.fl6_dport = ireq->ir_rmt_port;
 		fl6.fl6_sport = inet_sk(sk)->inet_sport;
+		fl6.flowi6_uid = sock_i_uid(sk);
 		security_req_classify_flow(req, flowi6_to_flowi(&fl6));
 
 		dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index b9f1fee..6e24ed2 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -233,6 +233,7 @@
 	fl6.flowi6_mark = sk->sk_mark;
 	fl6.fl6_dport = usin->sin6_port;
 	fl6.fl6_sport = inet->inet_sport;
+	fl6.flowi6_uid = sock_i_uid(sk);
 
 	opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
 	final_p = fl6_update_dst(&fl6, opt, &final);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index e4a8000..d541bf1 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1156,6 +1156,7 @@
 		fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
 
 	fl6.flowi6_mark = sk->sk_mark;
+	fl6.flowi6_uid = sock_i_uid(sk);
 	sockc.tsflags = sk->sk_tsflags;
 
 	if (msg->msg_controllen) {
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e8d56d9..177d3ae 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1316,6 +1316,8 @@
 	based on who created the socket: the user or group. It is also
 	possible to check whether a socket actually exists.
 
+	Conflicts with '"quota, tag, uid" match'
+
 config NETFILTER_XT_MATCH_POLICY
 	tristate 'IPsec "policy" match support'
 	depends on XFRM
@@ -1349,6 +1351,22 @@
 
 	  To compile it as a module, choose M here.  If unsure, say N.
 
+config NETFILTER_XT_MATCH_QTAGUID
+	bool '"quota, tag, owner" match and stats support'
+        depends on NETFILTER_XT_MATCH_SOCKET
+	depends on NETFILTER_XT_MATCH_OWNER=n
+	help
+	  This option replaces the `owner' match. In addition to matching
+	  on uid, it keeps stats based on a tag assigned to a socket.
+	  The full tag is comprised of a UID and an accounting tag.
+	  The tags are assignable to sockets from user space (e.g. a download
+	  manager can assign the socket to another UID for accounting).
+	  Stats and control are done via /proc/net/xt_qtaguid/.
+	  It replaces owner as it takes the same arguments, but should
+	  really be recognized by the iptables tool.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_QUOTA
 	tristate '"quota" match support'
 	depends on NETFILTER_ADVANCED
@@ -1359,6 +1377,29 @@
 	  If you want to compile it as a module, say M here and read
 	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
 
+config NETFILTER_XT_MATCH_QUOTA2
+	tristate '"quota2" match support'
+	depends on NETFILTER_ADVANCED
+	help
+	  This option adds a `quota2' match, which allows to match on a
+	  byte counter correctly and not per CPU.
+	  It allows naming the quotas.
+	  This is based on http://xtables-addons.git.sourceforge.net
+
+	  If you want to compile it as a module, say M here and read
+	  <file:Documentation/kbuild/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+	bool '"quota2" Netfilter LOG support'
+	depends on NETFILTER_XT_MATCH_QUOTA2
+	default n
+	help
+	  This option allows `quota2' to log ONCE when a quota limit
+	  is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+	  It logs similarly to how ipt_ULOG would without data.
+
+	  If unsure, say `N'.
+
 config NETFILTER_XT_MATCH_RATEEST
 	tristate '"rateest" match support'
 	depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index c23c3c8..54ba5aa 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -167,7 +167,9 @@
 obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QTAGUID) += xt_qtaguid_print.o xt_qtaguid.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
 obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index daf45da..4a2d853 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -5,6 +5,7 @@
  * After timer expires a kevent will be sent.
  *
  * Copyright (C) 2004, 2010 Nokia Corporation
+ *
  * Written by Timo Teras <ext-timo.teras@nokia.com>
  *
  * Converted to x_tables and reworked for upstream inclusion
@@ -38,8 +39,16 @@
 #include <linux/netfilter/xt_IDLETIMER.h>
 #include <linux/kdev_t.h>
 #include <linux/kobject.h>
+#include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <linux/sysfs.h>
+#include <linux/rtc.h>
+#include <linux/time.h>
+#include <linux/math64.h>
+#include <linux/suspend.h>
+#include <linux/notifier.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
 
 struct idletimer_tg_attr {
 	struct attribute attr;
@@ -55,14 +64,110 @@
 	struct kobject *kobj;
 	struct idletimer_tg_attr attr;
 
+	struct timespec delayed_timer_trigger;
+	struct timespec last_modified_timer;
+	struct timespec last_suspend_time;
+	struct notifier_block pm_nb;
+
+	int timeout;
 	unsigned int refcnt;
+	bool work_pending;
+	bool send_nl_msg;
+	bool active;
+	uid_t uid;
 };
 
 static LIST_HEAD(idletimer_tg_list);
 static DEFINE_MUTEX(list_mutex);
+static DEFINE_SPINLOCK(timestamp_lock);
 
 static struct kobject *idletimer_tg_kobj;
 
+static bool check_for_delayed_trigger(struct idletimer_tg *timer,
+		struct timespec *ts)
+{
+	bool state;
+	struct timespec temp;
+	spin_lock_bh(&timestamp_lock);
+	timer->work_pending = false;
+	if ((ts->tv_sec - timer->last_modified_timer.tv_sec) > timer->timeout ||
+			timer->delayed_timer_trigger.tv_sec != 0) {
+		state = false;
+		temp.tv_sec = timer->timeout;
+		temp.tv_nsec = 0;
+		if (timer->delayed_timer_trigger.tv_sec != 0) {
+			temp = timespec_add(timer->delayed_timer_trigger, temp);
+			ts->tv_sec = temp.tv_sec;
+			ts->tv_nsec = temp.tv_nsec;
+			timer->delayed_timer_trigger.tv_sec = 0;
+			timer->work_pending = true;
+			schedule_work(&timer->work);
+		} else {
+			temp = timespec_add(timer->last_modified_timer, temp);
+			ts->tv_sec = temp.tv_sec;
+			ts->tv_nsec = temp.tv_nsec;
+		}
+	} else {
+		state = timer->active;
+	}
+	spin_unlock_bh(&timestamp_lock);
+	return state;
+}
+
+static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
+{
+	char iface_msg[NLMSG_MAX_SIZE];
+	char state_msg[NLMSG_MAX_SIZE];
+	char timestamp_msg[NLMSG_MAX_SIZE];
+	char uid_msg[NLMSG_MAX_SIZE];
+	char *envp[] = { iface_msg, state_msg, timestamp_msg, uid_msg, NULL };
+	int res;
+	struct timespec ts;
+	uint64_t time_ns;
+	bool state;
+
+	res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+		       iface);
+	if (NLMSG_MAX_SIZE <= res) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+
+	get_monotonic_boottime(&ts);
+	state = check_for_delayed_trigger(timer, &ts);
+	res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+			state ? "active" : "inactive");
+
+	if (NLMSG_MAX_SIZE <= res) {
+		pr_err("message too long (%d)", res);
+		return;
+	}
+
+	if (state) {
+		res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=%u", timer->uid);
+		if (NLMSG_MAX_SIZE <= res)
+			pr_err("message too long (%d)", res);
+	} else {
+		res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=");
+		if (NLMSG_MAX_SIZE <= res)
+			pr_err("message too long (%d)", res);
+	}
+
+	time_ns = timespec_to_ns(&ts);
+	res = snprintf(timestamp_msg, NLMSG_MAX_SIZE, "TIME_NS=%llu", time_ns);
+	if (NLMSG_MAX_SIZE <= res) {
+		timestamp_msg[0] = '\0';
+		pr_err("message too long (%d)", res);
+	}
+
+	pr_debug("putting nlmsg: <%s> <%s> <%s> <%s>\n", iface_msg, state_msg,
+		 timestamp_msg, uid_msg);
+	kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
+	return;
+
+
+}
+
 static
 struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
 {
@@ -83,6 +188,7 @@
 {
 	struct idletimer_tg *timer;
 	unsigned long expires = 0;
+	unsigned long now = jiffies;
 
 	mutex_lock(&list_mutex);
 
@@ -92,11 +198,15 @@
 
 	mutex_unlock(&list_mutex);
 
-	if (time_after(expires, jiffies))
+	if (time_after(expires, now))
 		return sprintf(buf, "%u\n",
-			       jiffies_to_msecs(expires - jiffies) / 1000);
+			       jiffies_to_msecs(expires - now) / 1000);
 
-	return sprintf(buf, "0\n");
+	if (timer->send_nl_msg)
+		return sprintf(buf, "0 %d\n",
+			jiffies_to_msecs(now - expires) / 1000);
+	else
+		return sprintf(buf, "0\n");
 }
 
 static void idletimer_tg_work(struct work_struct *work)
@@ -105,6 +215,9 @@
 						  work);
 
 	sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+	if (timer->send_nl_msg)
+		notify_netlink_uevent(timer->attr.attr.name, timer);
 }
 
 static void idletimer_tg_expired(unsigned long data)
@@ -112,8 +225,55 @@
 	struct idletimer_tg *timer = (struct idletimer_tg *) data;
 
 	pr_debug("timer %s expired\n", timer->attr.attr.name);
-
+	spin_lock_bh(&timestamp_lock);
+	timer->active = false;
+	timer->work_pending = true;
 	schedule_work(&timer->work);
+	spin_unlock_bh(&timestamp_lock);
+}
+
+static int idletimer_resume(struct notifier_block *notifier,
+		unsigned long pm_event, void *unused)
+{
+	struct timespec ts;
+	unsigned long time_diff, now = jiffies;
+	struct idletimer_tg *timer = container_of(notifier,
+			struct idletimer_tg, pm_nb);
+	if (!timer)
+		return NOTIFY_DONE;
+	switch (pm_event) {
+	case PM_SUSPEND_PREPARE:
+		get_monotonic_boottime(&timer->last_suspend_time);
+		break;
+	case PM_POST_SUSPEND:
+		spin_lock_bh(&timestamp_lock);
+		if (!timer->active) {
+			spin_unlock_bh(&timestamp_lock);
+			break;
+		}
+		/* since jiffies are not updated when suspended now represents
+		 * the time it would have suspended */
+		if (time_after(timer->timer.expires, now)) {
+			get_monotonic_boottime(&ts);
+			ts = timespec_sub(ts, timer->last_suspend_time);
+			time_diff = timespec_to_jiffies(&ts);
+			if (timer->timer.expires > (time_diff + now)) {
+				mod_timer_pending(&timer->timer,
+						(timer->timer.expires - time_diff));
+			} else {
+				del_timer(&timer->timer);
+				timer->timer.expires = 0;
+				timer->active = false;
+				timer->work_pending = true;
+				schedule_work(&timer->work);
+			}
+		}
+		spin_unlock_bh(&timestamp_lock);
+		break;
+	default:
+		break;
+	}
+	return NOTIFY_DONE;
 }
 
 static int idletimer_tg_create(struct idletimer_tg_info *info)
@@ -146,6 +306,21 @@
 	setup_timer(&info->timer->timer, idletimer_tg_expired,
 		    (unsigned long) info->timer);
 	info->timer->refcnt = 1;
+	info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true;
+	info->timer->active = true;
+	info->timer->timeout = info->timeout;
+
+	info->timer->delayed_timer_trigger.tv_sec = 0;
+	info->timer->delayed_timer_trigger.tv_nsec = 0;
+	info->timer->work_pending = false;
+	info->timer->uid = 0;
+	get_monotonic_boottime(&info->timer->last_modified_timer);
+
+	info->timer->pm_nb.notifier_call = idletimer_resume;
+	ret = register_pm_notifier(&info->timer->pm_nb);
+	if (ret)
+		printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
+				__func__, ret);
 
 	mod_timer(&info->timer->timer,
 		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
@@ -162,6 +337,42 @@
 	return ret;
 }
 
+static void reset_timer(const struct idletimer_tg_info *info,
+			struct sk_buff *skb)
+{
+	unsigned long now = jiffies;
+	struct idletimer_tg *timer = info->timer;
+	bool timer_prev;
+
+	spin_lock_bh(&timestamp_lock);
+	timer_prev = timer->active;
+	timer->active = true;
+	/* timer_prev is used to guard overflow problem in time_before*/
+	if (!timer_prev || time_before(timer->timer.expires, now)) {
+		pr_debug("Starting Checkentry timer (Expired, Jiffies): %lu, %lu\n",
+				timer->timer.expires, now);
+
+		/* Stores the uid resposible for waking up the radio */
+		if (skb && (skb->sk)) {
+			timer->uid = from_kuid_munged(current_user_ns(),
+						sock_i_uid(skb->sk));
+		}
+
+		/* checks if there is a pending inactive notification*/
+		if (timer->work_pending)
+			timer->delayed_timer_trigger = timer->last_modified_timer;
+		else {
+			timer->work_pending = true;
+			schedule_work(&timer->work);
+		}
+	}
+
+	get_monotonic_boottime(&timer->last_modified_timer);
+	mod_timer(&timer->timer,
+			msecs_to_jiffies(info->timeout * 1000) + now);
+	spin_unlock_bh(&timestamp_lock);
+}
+
 /*
  * The actual xt_tables plugin.
  */
@@ -169,15 +380,23 @@
 					 const struct xt_action_param *par)
 {
 	const struct idletimer_tg_info *info = par->targinfo;
+	unsigned long now = jiffies;
 
 	pr_debug("resetting timer %s, timeout period %u\n",
 		 info->label, info->timeout);
 
 	BUG_ON(!info->timer);
 
-	mod_timer(&info->timer->timer,
-		  msecs_to_jiffies(info->timeout * 1000) + jiffies);
+	info->timer->active = true;
 
+	if (time_before(info->timer->timer.expires, now)) {
+		schedule_work(&info->timer->work);
+		pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+			 info->label, info->timer->timer.expires, now);
+	}
+
+	/* TODO: Avoid modifying timers on each packet */
+	reset_timer(info, skb);
 	return XT_CONTINUE;
 }
 
@@ -186,7 +405,7 @@
 	struct idletimer_tg_info *info = par->targinfo;
 	int ret;
 
-	pr_debug("checkentry targinfo%s\n", info->label);
+	pr_debug("checkentry targinfo %s\n", info->label);
 
 	if (info->timeout == 0) {
 		pr_debug("timeout value is zero\n");
@@ -205,9 +424,7 @@
 	info->timer = __idletimer_tg_find_by_label(info->label);
 	if (info->timer) {
 		info->timer->refcnt++;
-		mod_timer(&info->timer->timer,
-			  msecs_to_jiffies(info->timeout * 1000) + jiffies);
-
+		reset_timer(info, NULL);
 		pr_debug("increased refcnt of timer %s to %u\n",
 			 info->label, info->timer->refcnt);
 	} else {
@@ -220,6 +437,7 @@
 	}
 
 	mutex_unlock(&list_mutex);
+
 	return 0;
 }
 
@@ -238,11 +456,12 @@
 		del_timer_sync(&info->timer->timer);
 		cancel_work_sync(&info->timer->work);
 		sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+		unregister_pm_notifier(&info->timer->pm_nb);
 		kfree(info->timer->attr.attr.name);
 		kfree(info->timer);
 	} else {
 		pr_debug("decreased refcnt of timer %s to %u\n",
-			 info->label, info->timer->refcnt);
+		info->label, info->timer->refcnt);
 	}
 
 	mutex_unlock(&list_mutex);
@@ -250,6 +469,7 @@
 
 static struct xt_target idletimer_tg __read_mostly = {
 	.name		= "IDLETIMER",
+	.revision	= 1,
 	.family		= NFPROTO_UNSPEC,
 	.target		= idletimer_tg_target,
 	.targetsize     = sizeof(struct idletimer_tg_info),
@@ -315,3 +535,4 @@
 MODULE_LICENSE("GPL v2");
 MODULE_ALIAS("ipt_IDLETIMER");
 MODULE_ALIAS("ip6t_IDLETIMER");
+MODULE_ALIAS("arpt_IDLETIMER");
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
new file mode 100644
index 0000000..3bf0c59
--- /dev/null
+++ b/net/netfilter/xt_qtaguid.c
@@ -0,0 +1,3032 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * There are run-time debug flags enabled via the debug_mask module param, or
+ * via the DEFAULT_DEBUG_MASK. See xt_qtaguid_internal.h.
+ */
+#define DEBUG
+
+#include <linux/file.h>
+#include <linux/inetdevice.h>
+#include <linux/module.h>
+#include <linux/miscdevice.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_qtaguid.h>
+#include <linux/ratelimit.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/addrconf.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#endif
+
+#include <linux/netfilter/xt_socket.h>
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+#include "../../fs/proc/internal.h"
+
+/*
+ * We only use the xt_socket funcs within a similar context to avoid unexpected
+ * return values.
+ */
+#define XT_SOCKET_SUPPORTED_HOOKS \
+	((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN))
+
+
+static const char *module_procdirname = "xt_qtaguid";
+static struct proc_dir_entry *xt_qtaguid_procdir;
+
+static unsigned int proc_iface_perms = S_IRUGO;
+module_param_named(iface_perms, proc_iface_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_stats_file;
+static unsigned int proc_stats_perms = S_IRUGO;
+module_param_named(stats_perms, proc_stats_perms, uint, S_IRUGO | S_IWUSR);
+
+static struct proc_dir_entry *xt_qtaguid_ctrl_file;
+
+/* Everybody can write. But proc_ctrl_write_limited is true by default which
+ * limits what can be controlled. See the can_*() functions.
+ */
+static unsigned int proc_ctrl_perms = S_IRUGO | S_IWUGO;
+module_param_named(ctrl_perms, proc_ctrl_perms, uint, S_IRUGO | S_IWUSR);
+
+/* Limited by default, so the gid of the ctrl and stats proc entries
+ * will limit what can be done. See the can_*() functions.
+ */
+static bool proc_stats_readall_limited = true;
+static bool proc_ctrl_write_limited = true;
+
+module_param_named(stats_readall_limited, proc_stats_readall_limited, bool,
+		   S_IRUGO | S_IWUSR);
+module_param_named(ctrl_write_limited, proc_ctrl_write_limited, bool,
+		   S_IRUGO | S_IWUSR);
+
+/*
+ * Limit the number of active tags (via socket tags) for a given UID.
+ * Multiple processes could share the UID.
+ */
+static int max_sock_tags = DEFAULT_MAX_SOCK_TAGS;
+module_param(max_sock_tags, int, S_IRUGO | S_IWUSR);
+
+/*
+ * After the kernel has initiallized this module, it is still possible
+ * to make it passive.
+ * Setting passive to Y:
+ *  - the iface stats handling will not act on notifications.
+ *  - iptables matches will never match.
+ *  - ctrl commands silently succeed.
+ *  - stats are always empty.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool module_passive;
+module_param_named(passive, module_passive, bool, S_IRUGO | S_IWUSR);
+
+/*
+ * Control how qtaguid data is tracked per proc/uid.
+ * Setting tag_tracking_passive to Y:
+ *  - don't create proc specific structs to track tags
+ *  - don't check that active tag stats exceed some limits.
+ *  - don't clean up socket tags on process exits.
+ * This is mostly usefull when a bug is suspected.
+ */
+static bool qtu_proc_handling_passive;
+module_param_named(tag_tracking_passive, qtu_proc_handling_passive, bool,
+		   S_IRUGO | S_IWUSR);
+
+#define QTU_DEV_NAME "xt_qtaguid"
+
+uint qtaguid_debug_mask = DEFAULT_DEBUG_MASK;
+module_param_named(debug_mask, qtaguid_debug_mask, uint, S_IRUGO | S_IWUSR);
+
+/*---------------------------------------------------------------------------*/
+static const char *iface_stat_procdirname = "iface_stat";
+static struct proc_dir_entry *iface_stat_procdir;
+/*
+ * The iface_stat_all* will go away once userspace gets use to the new fields
+ * that have a format line.
+ */
+static const char *iface_stat_all_procfilename = "iface_stat_all";
+static struct proc_dir_entry *iface_stat_all_procfile;
+static const char *iface_stat_fmt_procfilename = "iface_stat_fmt";
+static struct proc_dir_entry *iface_stat_fmt_procfile;
+
+
+static LIST_HEAD(iface_stat_list);
+static DEFINE_SPINLOCK(iface_stat_list_lock);
+
+static struct rb_root sock_tag_tree = RB_ROOT;
+static DEFINE_SPINLOCK(sock_tag_list_lock);
+
+static struct rb_root tag_counter_set_tree = RB_ROOT;
+static DEFINE_SPINLOCK(tag_counter_set_list_lock);
+
+static struct rb_root uid_tag_data_tree = RB_ROOT;
+static DEFINE_SPINLOCK(uid_tag_data_tree_lock);
+
+static struct rb_root proc_qtu_data_tree = RB_ROOT;
+/* No proc_qtu_data_tree_lock; use uid_tag_data_tree_lock */
+
+static struct qtaguid_event_counts qtu_events;
+/*----------------------------------------------*/
+static bool can_manipulate_uids(void)
+{
+	/* root pwnd */
+	return in_egroup_p(xt_qtaguid_ctrl_file->gid)
+		|| unlikely(!from_kuid(&init_user_ns, current_fsuid())) || unlikely(!proc_ctrl_write_limited)
+		|| unlikely(uid_eq(current_fsuid(), xt_qtaguid_ctrl_file->uid));
+}
+
+static bool can_impersonate_uid(kuid_t uid)
+{
+	return uid_eq(uid, current_fsuid()) || can_manipulate_uids();
+}
+
+static bool can_read_other_uid_stats(kuid_t uid)
+{
+	/* root pwnd */
+	return in_egroup_p(xt_qtaguid_stats_file->gid)
+		|| unlikely(!from_kuid(&init_user_ns, current_fsuid())) || uid_eq(uid, current_fsuid())
+		|| unlikely(!proc_stats_readall_limited)
+		|| unlikely(uid_eq(current_fsuid(), xt_qtaguid_ctrl_file->uid));
+}
+
+static inline void dc_add_byte_packets(struct data_counters *counters, int set,
+				  enum ifs_tx_rx direction,
+				  enum ifs_proto ifs_proto,
+				  int bytes,
+				  int packets)
+{
+	counters->bpc[set][direction][ifs_proto].bytes += bytes;
+	counters->bpc[set][direction][ifs_proto].packets += packets;
+}
+
+static struct tag_node *tag_node_tree_search(struct rb_root *root, tag_t tag)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct tag_node *data = rb_entry(node, struct tag_node, node);
+		int result;
+		RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+			 " node=%p data=%p\n", tag, node, data);
+		result = tag_compare(tag, data->tag);
+		RB_DEBUG("qtaguid: tag_node_tree_search(0x%llx): "
+			 " data.tag=0x%llx (uid=%u) res=%d\n",
+			 tag, data->tag, get_uid_from_tag(data->tag), result);
+		if (result < 0)
+			node = node->rb_left;
+		else if (result > 0)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+static void tag_node_tree_insert(struct tag_node *data, struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct tag_node *this = rb_entry(*new, struct tag_node,
+						 node);
+		int result = tag_compare(data->tag, this->tag);
+		RB_DEBUG("qtaguid: %s(): tag=0x%llx"
+			 " (uid=%u)\n", __func__,
+			 this->tag,
+			 get_uid_from_tag(this->tag));
+		parent = *new;
+		if (result < 0)
+			new = &((*new)->rb_left);
+		else if (result > 0)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+}
+
+static void tag_stat_tree_insert(struct tag_stat *data, struct rb_root *root)
+{
+	tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_stat *tag_stat_tree_search(struct rb_root *root, tag_t tag)
+{
+	struct tag_node *node = tag_node_tree_search(root, tag);
+	if (!node)
+		return NULL;
+	return rb_entry(&node->node, struct tag_stat, tn.node);
+}
+
+static void tag_counter_set_tree_insert(struct tag_counter_set *data,
+					struct rb_root *root)
+{
+	tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_counter_set *tag_counter_set_tree_search(struct rb_root *root,
+							   tag_t tag)
+{
+	struct tag_node *node = tag_node_tree_search(root, tag);
+	if (!node)
+		return NULL;
+	return rb_entry(&node->node, struct tag_counter_set, tn.node);
+
+}
+
+static void tag_ref_tree_insert(struct tag_ref *data, struct rb_root *root)
+{
+	tag_node_tree_insert(&data->tn, root);
+}
+
+static struct tag_ref *tag_ref_tree_search(struct rb_root *root, tag_t tag)
+{
+	struct tag_node *node = tag_node_tree_search(root, tag);
+	if (!node)
+		return NULL;
+	return rb_entry(&node->node, struct tag_ref, tn.node);
+}
+
+static struct sock_tag *sock_tag_tree_search(struct rb_root *root,
+					     const struct sock *sk)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct sock_tag *data = rb_entry(node, struct sock_tag,
+						 sock_node);
+		if (sk < data->sk)
+			node = node->rb_left;
+		else if (sk > data->sk)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+static void sock_tag_tree_insert(struct sock_tag *data, struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct sock_tag *this = rb_entry(*new, struct sock_tag,
+						 sock_node);
+		parent = *new;
+		if (data->sk < this->sk)
+			new = &((*new)->rb_left);
+		else if (data->sk > this->sk)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->sock_node, parent, new);
+	rb_insert_color(&data->sock_node, root);
+}
+
+static void sock_tag_tree_erase(struct rb_root *st_to_free_tree)
+{
+	struct rb_node *node;
+	struct sock_tag *st_entry;
+
+	node = rb_first(st_to_free_tree);
+	while (node) {
+		st_entry = rb_entry(node, struct sock_tag, sock_node);
+		node = rb_next(node);
+		CT_DEBUG("qtaguid: %s(): "
+			 "erase st: sk=%p tag=0x%llx (uid=%u)\n", __func__,
+			 st_entry->sk,
+			 st_entry->tag,
+			 get_uid_from_tag(st_entry->tag));
+		rb_erase(&st_entry->sock_node, st_to_free_tree);
+		sockfd_put(st_entry->socket);
+		kfree(st_entry);
+	}
+}
+
+static struct proc_qtu_data *proc_qtu_data_tree_search(struct rb_root *root,
+						       const pid_t pid)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct proc_qtu_data *data = rb_entry(node,
+						      struct proc_qtu_data,
+						      node);
+		if (pid < data->pid)
+			node = node->rb_left;
+		else if (pid > data->pid)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+static void proc_qtu_data_tree_insert(struct proc_qtu_data *data,
+				      struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct proc_qtu_data *this = rb_entry(*new,
+						      struct proc_qtu_data,
+						      node);
+		parent = *new;
+		if (data->pid < this->pid)
+			new = &((*new)->rb_left);
+		else if (data->pid > this->pid)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+}
+
+static void uid_tag_data_tree_insert(struct uid_tag_data *data,
+				     struct rb_root *root)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+
+	/* Figure out where to put new node */
+	while (*new) {
+		struct uid_tag_data *this = rb_entry(*new,
+						     struct uid_tag_data,
+						     node);
+		parent = *new;
+		if (data->uid < this->uid)
+			new = &((*new)->rb_left);
+		else if (data->uid > this->uid)
+			new = &((*new)->rb_right);
+		else
+			BUG();
+	}
+
+	/* Add new node and rebalance tree. */
+	rb_link_node(&data->node, parent, new);
+	rb_insert_color(&data->node, root);
+}
+
+static struct uid_tag_data *uid_tag_data_tree_search(struct rb_root *root,
+						     uid_t uid)
+{
+	struct rb_node *node = root->rb_node;
+
+	while (node) {
+		struct uid_tag_data *data = rb_entry(node,
+						     struct uid_tag_data,
+						     node);
+		if (uid < data->uid)
+			node = node->rb_left;
+		else if (uid > data->uid)
+			node = node->rb_right;
+		else
+			return data;
+	}
+	return NULL;
+}
+
+/*
+ * Allocates a new uid_tag_data struct if needed.
+ * Returns a pointer to the found or allocated uid_tag_data.
+ * Returns a PTR_ERR on failures, and lock is not held.
+ * If found is not NULL:
+ *   sets *found to true if not allocated.
+ *   sets *found to false if allocated.
+ */
+struct uid_tag_data *get_uid_data(uid_t uid, bool *found_res)
+{
+	struct uid_tag_data *utd_entry;
+
+	/* Look for top level uid_tag_data for the UID */
+	utd_entry = uid_tag_data_tree_search(&uid_tag_data_tree, uid);
+	DR_DEBUG("qtaguid: get_uid_data(%u) utd=%p\n", uid, utd_entry);
+
+	if (found_res)
+		*found_res = utd_entry;
+	if (utd_entry)
+		return utd_entry;
+
+	utd_entry = kzalloc(sizeof(*utd_entry), GFP_ATOMIC);
+	if (!utd_entry) {
+		pr_err("qtaguid: get_uid_data(%u): "
+		       "tag data alloc failed\n", uid);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	utd_entry->uid = uid;
+	utd_entry->tag_ref_tree = RB_ROOT;
+	uid_tag_data_tree_insert(utd_entry, &uid_tag_data_tree);
+	DR_DEBUG("qtaguid: get_uid_data(%u) new utd=%p\n", uid, utd_entry);
+	return utd_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *new_tag_ref(tag_t new_tag,
+				   struct uid_tag_data *utd_entry)
+{
+	struct tag_ref *tr_entry;
+	int res;
+
+	if (utd_entry->num_active_tags + 1 > max_sock_tags) {
+		pr_info("qtaguid: new_tag_ref(0x%llx): "
+			"tag ref alloc quota exceeded. max=%d\n",
+			new_tag, max_sock_tags);
+		res = -EMFILE;
+		goto err_res;
+
+	}
+
+	tr_entry = kzalloc(sizeof(*tr_entry), GFP_ATOMIC);
+	if (!tr_entry) {
+		pr_err("qtaguid: new_tag_ref(0x%llx): "
+		       "tag ref alloc failed\n",
+		       new_tag);
+		res = -ENOMEM;
+		goto err_res;
+	}
+	tr_entry->tn.tag = new_tag;
+	/* tr_entry->num_sock_tags  handled by caller */
+	utd_entry->num_active_tags++;
+	tag_ref_tree_insert(tr_entry, &utd_entry->tag_ref_tree);
+	DR_DEBUG("qtaguid: new_tag_ref(0x%llx): "
+		 " inserted new tag ref %p\n",
+		 new_tag, tr_entry);
+	return tr_entry;
+
+err_res:
+	return ERR_PTR(res);
+}
+
+static struct tag_ref *lookup_tag_ref(tag_t full_tag,
+				      struct uid_tag_data **utd_res)
+{
+	struct uid_tag_data *utd_entry;
+	struct tag_ref *tr_entry;
+	bool found_utd;
+	uid_t uid = get_uid_from_tag(full_tag);
+
+	DR_DEBUG("qtaguid: lookup_tag_ref(tag=0x%llx (uid=%u))\n",
+		 full_tag, uid);
+
+	utd_entry = get_uid_data(uid, &found_utd);
+	if (IS_ERR_OR_NULL(utd_entry)) {
+		if (utd_res)
+			*utd_res = utd_entry;
+		return NULL;
+	}
+
+	tr_entry = tag_ref_tree_search(&utd_entry->tag_ref_tree, full_tag);
+	if (utd_res)
+		*utd_res = utd_entry;
+	DR_DEBUG("qtaguid: lookup_tag_ref(0x%llx) utd_entry=%p tr_entry=%p\n",
+		 full_tag, utd_entry, tr_entry);
+	return tr_entry;
+}
+
+/* Never returns NULL. Either PTR_ERR or a valid ptr. */
+static struct tag_ref *get_tag_ref(tag_t full_tag,
+				   struct uid_tag_data **utd_res)
+{
+	struct uid_tag_data *utd_entry;
+	struct tag_ref *tr_entry;
+
+	DR_DEBUG("qtaguid: get_tag_ref(0x%llx)\n",
+		 full_tag);
+	spin_lock_bh(&uid_tag_data_tree_lock);
+	tr_entry = lookup_tag_ref(full_tag, &utd_entry);
+	BUG_ON(IS_ERR_OR_NULL(utd_entry));
+	if (!tr_entry)
+		tr_entry = new_tag_ref(full_tag, utd_entry);
+
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	if (utd_res)
+		*utd_res = utd_entry;
+	DR_DEBUG("qtaguid: get_tag_ref(0x%llx) utd=%p tr=%p\n",
+		 full_tag, utd_entry, tr_entry);
+	return tr_entry;
+}
+
+/* Checks and maybe frees the UID Tag Data entry */
+static void put_utd_entry(struct uid_tag_data *utd_entry)
+{
+	/* Are we done with the UID tag data entry? */
+	if (RB_EMPTY_ROOT(&utd_entry->tag_ref_tree) &&
+		!utd_entry->num_pqd) {
+		DR_DEBUG("qtaguid: %s(): "
+			 "erase utd_entry=%p uid=%u "
+			 "by pid=%u tgid=%u uid=%u\n", __func__,
+			 utd_entry, utd_entry->uid,
+			 current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+		BUG_ON(utd_entry->num_active_tags);
+		rb_erase(&utd_entry->node, &uid_tag_data_tree);
+		kfree(utd_entry);
+	} else {
+		DR_DEBUG("qtaguid: %s(): "
+			 "utd_entry=%p still has %d tags %d proc_qtu_data\n",
+			 __func__, utd_entry, utd_entry->num_active_tags,
+			 utd_entry->num_pqd);
+		BUG_ON(!(utd_entry->num_active_tags ||
+			 utd_entry->num_pqd));
+	}
+}
+
+/*
+ * If no sock_tags are using this tag_ref,
+ * decrements refcount of utd_entry, removes tr_entry
+ * from utd_entry->tag_ref_tree and frees.
+ */
+static void free_tag_ref_from_utd_entry(struct tag_ref *tr_entry,
+					struct uid_tag_data *utd_entry)
+{
+	DR_DEBUG("qtaguid: %s(): %p tag=0x%llx (uid=%u)\n", __func__,
+		 tr_entry, tr_entry->tn.tag,
+		 get_uid_from_tag(tr_entry->tn.tag));
+	if (!tr_entry->num_sock_tags) {
+		BUG_ON(!utd_entry->num_active_tags);
+		utd_entry->num_active_tags--;
+		rb_erase(&tr_entry->tn.node, &utd_entry->tag_ref_tree);
+		DR_DEBUG("qtaguid: %s(): erased %p\n", __func__, tr_entry);
+		kfree(tr_entry);
+	}
+}
+
+static void put_tag_ref_tree(tag_t full_tag, struct uid_tag_data *utd_entry)
+{
+	struct rb_node *node;
+	struct tag_ref *tr_entry;
+	tag_t acct_tag;
+
+	DR_DEBUG("qtaguid: %s(tag=0x%llx (uid=%u))\n", __func__,
+		 full_tag, get_uid_from_tag(full_tag));
+	acct_tag = get_atag_from_tag(full_tag);
+	node = rb_first(&utd_entry->tag_ref_tree);
+	while (node) {
+		tr_entry = rb_entry(node, struct tag_ref, tn.node);
+		node = rb_next(node);
+		if (!acct_tag || tr_entry->tn.tag == full_tag)
+			free_tag_ref_from_utd_entry(tr_entry, utd_entry);
+	}
+}
+
+static ssize_t read_proc_u64(struct file *file, char __user *buf,
+			 size_t size, loff_t *ppos)
+{
+	uint64_t *valuep = PDE_DATA(file_inode(file));
+	char tmp[24];
+	size_t tmp_size;
+
+	tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", *valuep);
+	return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t read_proc_bool(struct file *file, char __user *buf,
+			  size_t size, loff_t *ppos)
+{
+	bool *valuep = PDE_DATA(file_inode(file));
+	char tmp[24];
+	size_t tmp_size;
+
+	tmp_size = scnprintf(tmp, sizeof(tmp), "%u\n", *valuep);
+	return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static int get_active_counter_set(tag_t tag)
+{
+	int active_set = 0;
+	struct tag_counter_set *tcs;
+
+	MT_DEBUG("qtaguid: get_active_counter_set(tag=0x%llx)"
+		 " (uid=%u)\n",
+		 tag, get_uid_from_tag(tag));
+	/* For now we only handle UID tags for active sets */
+	tag = get_utag_from_tag(tag);
+	spin_lock_bh(&tag_counter_set_list_lock);
+	tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+	if (tcs)
+		active_set = tcs->active_set;
+	spin_unlock_bh(&tag_counter_set_list_lock);
+	return active_set;
+}
+
+/*
+ * Find the entry for tracking the specified interface.
+ * Caller must hold iface_stat_list_lock
+ */
+static struct iface_stat *get_iface_entry(const char *ifname)
+{
+	struct iface_stat *iface_entry;
+
+	/* Find the entry for tracking the specified tag within the interface */
+	if (ifname == NULL) {
+		pr_info("qtaguid: iface_stat: get() NULL device name\n");
+		return NULL;
+	}
+
+	/* Iterate over interfaces */
+	list_for_each_entry(iface_entry, &iface_stat_list, list) {
+		if (!strcmp(ifname, iface_entry->ifname))
+			goto done;
+	}
+	iface_entry = NULL;
+done:
+	return iface_entry;
+}
+
+/* This is for fmt2 only */
+static void pp_iface_stat_header(struct seq_file *m)
+{
+	seq_puts(m,
+		 "ifname "
+		 "total_skb_rx_bytes total_skb_rx_packets "
+		 "total_skb_tx_bytes total_skb_tx_packets "
+		 "rx_tcp_bytes rx_tcp_packets "
+		 "rx_udp_bytes rx_udp_packets "
+		 "rx_other_bytes rx_other_packets "
+		 "tx_tcp_bytes tx_tcp_packets "
+		 "tx_udp_bytes tx_udp_packets "
+		 "tx_other_bytes tx_other_packets\n"
+	);
+}
+
+static void pp_iface_stat_line(struct seq_file *m,
+			       struct iface_stat *iface_entry)
+{
+	struct data_counters *cnts;
+	int cnt_set = 0;   /* We only use one set for the device */
+	cnts = &iface_entry->totals_via_skb;
+	seq_printf(m, "%s %llu %llu %llu %llu %llu %llu %llu %llu "
+		   "%llu %llu %llu %llu %llu %llu %llu %llu\n",
+		   iface_entry->ifname,
+		   dc_sum_bytes(cnts, cnt_set, IFS_RX),
+		   dc_sum_packets(cnts, cnt_set, IFS_RX),
+		   dc_sum_bytes(cnts, cnt_set, IFS_TX),
+		   dc_sum_packets(cnts, cnt_set, IFS_TX),
+		   cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+		   cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+		   cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+}
+
+struct proc_iface_stat_fmt_info {
+	int fmt;
+};
+
+static void *iface_stat_fmt_proc_start(struct seq_file *m, loff_t *pos)
+{
+	struct proc_iface_stat_fmt_info *p = m->private;
+	loff_t n = *pos;
+
+	/*
+	 * This lock will prevent iface_stat_update() from changing active,
+	 * and in turn prevent an interface from unregistering itself.
+	 */
+	spin_lock_bh(&iface_stat_list_lock);
+
+	if (unlikely(module_passive))
+		return NULL;
+
+	if (!n && p->fmt == 2)
+		pp_iface_stat_header(m);
+
+	return seq_list_start(&iface_stat_list, n);
+}
+
+static void *iface_stat_fmt_proc_next(struct seq_file *m, void *p, loff_t *pos)
+{
+	return seq_list_next(p, &iface_stat_list, pos);
+}
+
+static void iface_stat_fmt_proc_stop(struct seq_file *m, void *p)
+{
+	spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static int iface_stat_fmt_proc_show(struct seq_file *m, void *v)
+{
+	struct proc_iface_stat_fmt_info *p = m->private;
+	struct iface_stat *iface_entry;
+	struct rtnl_link_stats64 dev_stats, *stats;
+	struct rtnl_link_stats64 no_dev_stats = {0};
+
+
+	CT_DEBUG("qtaguid:proc iface_stat_fmt pid=%u tgid=%u uid=%u\n",
+		 current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+
+	iface_entry = list_entry(v, struct iface_stat, list);
+
+	if (iface_entry->active) {
+		stats = dev_get_stats(iface_entry->net_dev,
+				      &dev_stats);
+	} else {
+		stats = &no_dev_stats;
+	}
+	/*
+	 * If the meaning of the data changes, then update the fmtX
+	 * string.
+	 */
+	if (p->fmt == 1) {
+		seq_printf(m, "%s %d %llu %llu %llu %llu %llu %llu %llu %llu\n",
+			   iface_entry->ifname,
+			   iface_entry->active,
+			   iface_entry->totals_via_dev[IFS_RX].bytes,
+			   iface_entry->totals_via_dev[IFS_RX].packets,
+			   iface_entry->totals_via_dev[IFS_TX].bytes,
+			   iface_entry->totals_via_dev[IFS_TX].packets,
+			   stats->rx_bytes, stats->rx_packets,
+			   stats->tx_bytes, stats->tx_packets
+			   );
+	} else {
+		pp_iface_stat_line(m, iface_entry);
+	}
+	return 0;
+}
+
+static const struct file_operations read_u64_fops = {
+	.read		= read_proc_u64,
+	.llseek		= default_llseek,
+};
+
+static const struct file_operations read_bool_fops = {
+	.read		= read_proc_bool,
+	.llseek		= default_llseek,
+};
+
+static void iface_create_proc_worker(struct work_struct *work)
+{
+	struct proc_dir_entry *proc_entry;
+	struct iface_stat_work *isw = container_of(work, struct iface_stat_work,
+						   iface_work);
+	struct iface_stat *new_iface  = isw->iface_entry;
+
+	/* iface_entries are not deleted, so safe to manipulate. */
+	proc_entry = proc_mkdir(new_iface->ifname, iface_stat_procdir);
+	if (IS_ERR_OR_NULL(proc_entry)) {
+		pr_err("qtaguid: iface_stat: create_proc(): alloc failed.\n");
+		kfree(isw);
+		return;
+	}
+
+	new_iface->proc_ptr = proc_entry;
+
+	proc_create_data("tx_bytes", proc_iface_perms, proc_entry,
+			 &read_u64_fops,
+			 &new_iface->totals_via_dev[IFS_TX].bytes);
+	proc_create_data("rx_bytes", proc_iface_perms, proc_entry,
+			 &read_u64_fops,
+			 &new_iface->totals_via_dev[IFS_RX].bytes);
+	proc_create_data("tx_packets", proc_iface_perms, proc_entry,
+			 &read_u64_fops,
+			 &new_iface->totals_via_dev[IFS_TX].packets);
+	proc_create_data("rx_packets", proc_iface_perms, proc_entry,
+			 &read_u64_fops,
+			 &new_iface->totals_via_dev[IFS_RX].packets);
+	proc_create_data("active", proc_iface_perms, proc_entry,
+			 &read_bool_fops, &new_iface->active);
+
+	IF_DEBUG("qtaguid: iface_stat: create_proc(): done "
+		 "entry=%p dev=%s\n", new_iface, new_iface->ifname);
+	kfree(isw);
+}
+
+/*
+ * Will set the entry's active state, and
+ * update the net_dev accordingly also.
+ */
+static void _iface_stat_set_active(struct iface_stat *entry,
+				   struct net_device *net_dev,
+				   bool activate)
+{
+	if (activate) {
+		entry->net_dev = net_dev;
+		entry->active = true;
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "enable tracking. rfcnt=%d\n", __func__,
+			 entry->ifname,
+			 __this_cpu_read(*net_dev->pcpu_refcnt));
+	} else {
+		entry->active = false;
+		entry->net_dev = NULL;
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "disable tracking. rfcnt=%d\n", __func__,
+			 entry->ifname,
+			 __this_cpu_read(*net_dev->pcpu_refcnt));
+
+	}
+}
+
+/* Caller must hold iface_stat_list_lock */
+static struct iface_stat *iface_alloc(struct net_device *net_dev)
+{
+	struct iface_stat *new_iface;
+	struct iface_stat_work *isw;
+
+	new_iface = kzalloc(sizeof(*new_iface), GFP_ATOMIC);
+	if (new_iface == NULL) {
+		pr_err("qtaguid: iface_stat: create(%s): "
+		       "iface_stat alloc failed\n", net_dev->name);
+		return NULL;
+	}
+	new_iface->ifname = kstrdup(net_dev->name, GFP_ATOMIC);
+	if (new_iface->ifname == NULL) {
+		pr_err("qtaguid: iface_stat: create(%s): "
+		       "ifname alloc failed\n", net_dev->name);
+		kfree(new_iface);
+		return NULL;
+	}
+	spin_lock_init(&new_iface->tag_stat_list_lock);
+	new_iface->tag_stat_tree = RB_ROOT;
+	_iface_stat_set_active(new_iface, net_dev, true);
+
+	/*
+	 * ipv6 notifier chains are atomic :(
+	 * No create_proc_read_entry() for you!
+	 */
+	isw = kmalloc(sizeof(*isw), GFP_ATOMIC);
+	if (!isw) {
+		pr_err("qtaguid: iface_stat: create(%s): "
+		       "work alloc failed\n", new_iface->ifname);
+		_iface_stat_set_active(new_iface, net_dev, false);
+		kfree(new_iface->ifname);
+		kfree(new_iface);
+		return NULL;
+	}
+	isw->iface_entry = new_iface;
+	INIT_WORK(&isw->iface_work, iface_create_proc_worker);
+	schedule_work(&isw->iface_work);
+	list_add(&new_iface->list, &iface_stat_list);
+	return new_iface;
+}
+
+static void iface_check_stats_reset_and_adjust(struct net_device *net_dev,
+					       struct iface_stat *iface)
+{
+	struct rtnl_link_stats64 dev_stats, *stats;
+	bool stats_rewound;
+
+	stats = dev_get_stats(net_dev, &dev_stats);
+	/* No empty packets */
+	stats_rewound =
+		(stats->rx_bytes < iface->last_known[IFS_RX].bytes)
+		|| (stats->tx_bytes < iface->last_known[IFS_TX].bytes);
+
+	IF_DEBUG("qtaguid: %s(%s): iface=%p netdev=%p "
+		 "bytes rx/tx=%llu/%llu "
+		 "active=%d last_known=%d "
+		 "stats_rewound=%d\n", __func__,
+		 net_dev ? net_dev->name : "?",
+		 iface, net_dev,
+		 stats->rx_bytes, stats->tx_bytes,
+		 iface->active, iface->last_known_valid, stats_rewound);
+
+	if (iface->active && iface->last_known_valid && stats_rewound) {
+		pr_warn_once("qtaguid: iface_stat: %s(%s): "
+			     "iface reset its stats unexpectedly\n", __func__,
+			     net_dev->name);
+
+		iface->totals_via_dev[IFS_TX].bytes +=
+			iface->last_known[IFS_TX].bytes;
+		iface->totals_via_dev[IFS_TX].packets +=
+			iface->last_known[IFS_TX].packets;
+		iface->totals_via_dev[IFS_RX].bytes +=
+			iface->last_known[IFS_RX].bytes;
+		iface->totals_via_dev[IFS_RX].packets +=
+			iface->last_known[IFS_RX].packets;
+		iface->last_known_valid = false;
+		IF_DEBUG("qtaguid: %s(%s): iface=%p "
+			 "used last known bytes rx/tx=%llu/%llu\n", __func__,
+			 iface->ifname, iface, iface->last_known[IFS_RX].bytes,
+			 iface->last_known[IFS_TX].bytes);
+	}
+}
+
+/*
+ * Create a new entry for tracking the specified interface.
+ * Do nothing if the entry already exists.
+ * Called when an interface is configured with a valid IP address.
+ */
+static void iface_stat_create(struct net_device *net_dev,
+			      struct in_ifaddr *ifa)
+{
+	struct in_device *in_dev = NULL;
+	const char *ifname;
+	struct iface_stat *entry;
+	__be32 ipaddr = 0;
+	struct iface_stat *new_iface;
+
+	IF_DEBUG("qtaguid: iface_stat: create(%s): ifa=%p netdev=%p\n",
+		 net_dev ? net_dev->name : "?",
+		 ifa, net_dev);
+	if (!net_dev) {
+		pr_err("qtaguid: iface_stat: create(): no net dev\n");
+		return;
+	}
+
+	ifname = net_dev->name;
+	if (!ifa) {
+		in_dev = in_dev_get(net_dev);
+		if (!in_dev) {
+			pr_err("qtaguid: iface_stat: create(%s): no inet dev\n",
+			       ifname);
+			return;
+		}
+		IF_DEBUG("qtaguid: iface_stat: create(%s): in_dev=%p\n",
+			 ifname, in_dev);
+		for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
+			IF_DEBUG("qtaguid: iface_stat: create(%s): "
+				 "ifa=%p ifa_label=%s\n",
+				 ifname, ifa,
+				 ifa->ifa_label ? ifa->ifa_label : "(null)");
+			if (ifa->ifa_label && !strcmp(ifname, ifa->ifa_label))
+				break;
+		}
+	}
+
+	if (!ifa) {
+		IF_DEBUG("qtaguid: iface_stat: create(%s): no matching IP\n",
+			 ifname);
+		goto done_put;
+	}
+	ipaddr = ifa->ifa_local;
+
+	spin_lock_bh(&iface_stat_list_lock);
+	entry = get_iface_entry(ifname);
+	if (entry != NULL) {
+		IF_DEBUG("qtaguid: iface_stat: create(%s): entry=%p\n",
+			 ifname, entry);
+		iface_check_stats_reset_and_adjust(net_dev, entry);
+		_iface_stat_set_active(entry, net_dev, true);
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "tracking now %d on ip=%pI4\n", __func__,
+			 entry->ifname, true, &ipaddr);
+		goto done_unlock_put;
+	}
+
+	new_iface = iface_alloc(net_dev);
+	IF_DEBUG("qtaguid: iface_stat: create(%s): done "
+		 "entry=%p ip=%pI4\n", ifname, new_iface, &ipaddr);
+done_unlock_put:
+	spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+	if (in_dev)
+		in_dev_put(in_dev);
+}
+
+static void iface_stat_create_ipv6(struct net_device *net_dev,
+				   struct inet6_ifaddr *ifa)
+{
+	struct in_device *in_dev;
+	const char *ifname;
+	struct iface_stat *entry;
+	struct iface_stat *new_iface;
+	int addr_type;
+
+	IF_DEBUG("qtaguid: iface_stat: create6(): ifa=%p netdev=%p->name=%s\n",
+		 ifa, net_dev, net_dev ? net_dev->name : "");
+	if (!net_dev) {
+		pr_err("qtaguid: iface_stat: create6(): no net dev!\n");
+		return;
+	}
+	ifname = net_dev->name;
+
+	in_dev = in_dev_get(net_dev);
+	if (!in_dev) {
+		pr_err("qtaguid: iface_stat: create6(%s): no inet dev\n",
+		       ifname);
+		return;
+	}
+
+	IF_DEBUG("qtaguid: iface_stat: create6(%s): in_dev=%p\n",
+		 ifname, in_dev);
+
+	if (!ifa) {
+		IF_DEBUG("qtaguid: iface_stat: create6(%s): no matching IP\n",
+			 ifname);
+		goto done_put;
+	}
+	addr_type = ipv6_addr_type(&ifa->addr);
+
+	spin_lock_bh(&iface_stat_list_lock);
+	entry = get_iface_entry(ifname);
+	if (entry != NULL) {
+		IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+			 ifname, entry);
+		iface_check_stats_reset_and_adjust(net_dev, entry);
+		_iface_stat_set_active(entry, net_dev, true);
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "tracking now %d on ip=%pI6c\n", __func__,
+			 entry->ifname, true, &ifa->addr);
+		goto done_unlock_put;
+	}
+
+	new_iface = iface_alloc(net_dev);
+	IF_DEBUG("qtaguid: iface_stat: create6(%s): done "
+		 "entry=%p ip=%pI6c\n", ifname, new_iface, &ifa->addr);
+
+done_unlock_put:
+	spin_unlock_bh(&iface_stat_list_lock);
+done_put:
+	in_dev_put(in_dev);
+}
+
+static struct sock_tag *get_sock_stat_nl(const struct sock *sk)
+{
+	MT_DEBUG("qtaguid: get_sock_stat_nl(sk=%p)\n", sk);
+	return sock_tag_tree_search(&sock_tag_tree, sk);
+}
+
+static struct sock_tag *get_sock_stat(const struct sock *sk)
+{
+	struct sock_tag *sock_tag_entry;
+	MT_DEBUG("qtaguid: get_sock_stat(sk=%p)\n", sk);
+	if (!sk)
+		return NULL;
+	spin_lock_bh(&sock_tag_list_lock);
+	sock_tag_entry = get_sock_stat_nl(sk);
+	spin_unlock_bh(&sock_tag_list_lock);
+	return sock_tag_entry;
+}
+
+static int ipx_proto(const struct sk_buff *skb,
+		     struct xt_action_param *par)
+{
+	int thoff = 0, tproto;
+
+	switch (par->family) {
+	case NFPROTO_IPV6:
+		tproto = ipv6_find_hdr(skb, &thoff, -1, NULL, NULL);
+		if (tproto < 0)
+			MT_DEBUG("%s(): transport header not found in ipv6"
+				 " skb=%p\n", __func__, skb);
+		break;
+	case NFPROTO_IPV4:
+		tproto = ip_hdr(skb)->protocol;
+		break;
+	default:
+		tproto = IPPROTO_RAW;
+	}
+	return tproto;
+}
+
+static void
+data_counters_update(struct data_counters *dc, int set,
+		     enum ifs_tx_rx direction, int proto, int bytes)
+{
+	switch (proto) {
+	case IPPROTO_TCP:
+		dc_add_byte_packets(dc, set, direction, IFS_TCP, bytes, 1);
+		break;
+	case IPPROTO_UDP:
+		dc_add_byte_packets(dc, set, direction, IFS_UDP, bytes, 1);
+		break;
+	case IPPROTO_IP:
+	default:
+		dc_add_byte_packets(dc, set, direction, IFS_PROTO_OTHER, bytes,
+				    1);
+		break;
+	}
+}
+
+/*
+ * Update stats for the specified interface. Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called when an device is being unregistered.
+ */
+static void iface_stat_update(struct net_device *net_dev, bool stash_only)
+{
+	struct rtnl_link_stats64 dev_stats, *stats;
+	struct iface_stat *entry;
+
+	stats = dev_get_stats(net_dev, &dev_stats);
+	spin_lock_bh(&iface_stat_list_lock);
+	entry = get_iface_entry(net_dev->name);
+	if (entry == NULL) {
+		IF_DEBUG("qtaguid: iface_stat: update(%s): not tracked\n",
+			 net_dev->name);
+		spin_unlock_bh(&iface_stat_list_lock);
+		return;
+	}
+
+	IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+		 net_dev->name, entry);
+	if (!entry->active) {
+		IF_DEBUG("qtaguid: %s(%s): already disabled\n", __func__,
+			 net_dev->name);
+		spin_unlock_bh(&iface_stat_list_lock);
+		return;
+	}
+
+	if (stash_only) {
+		entry->last_known[IFS_TX].bytes = stats->tx_bytes;
+		entry->last_known[IFS_TX].packets = stats->tx_packets;
+		entry->last_known[IFS_RX].bytes = stats->rx_bytes;
+		entry->last_known[IFS_RX].packets = stats->rx_packets;
+		entry->last_known_valid = true;
+		IF_DEBUG("qtaguid: %s(%s): "
+			 "dev stats stashed rx/tx=%llu/%llu\n", __func__,
+			 net_dev->name, stats->rx_bytes, stats->tx_bytes);
+		spin_unlock_bh(&iface_stat_list_lock);
+		return;
+	}
+	entry->totals_via_dev[IFS_TX].bytes += stats->tx_bytes;
+	entry->totals_via_dev[IFS_TX].packets += stats->tx_packets;
+	entry->totals_via_dev[IFS_RX].bytes += stats->rx_bytes;
+	entry->totals_via_dev[IFS_RX].packets += stats->rx_packets;
+	/* We don't need the last_known[] anymore */
+	entry->last_known_valid = false;
+	_iface_stat_set_active(entry, net_dev, false);
+	IF_DEBUG("qtaguid: %s(%s): "
+		 "disable tracking. rx/tx=%llu/%llu\n", __func__,
+		 net_dev->name, stats->rx_bytes, stats->tx_bytes);
+	spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Update stats for the specified interface from the skb.
+ * Do nothing if the entry
+ * does not exist (when a device was never configured with an IP address).
+ * Called on each sk.
+ */
+static void iface_stat_update_from_skb(const struct sk_buff *skb,
+				       struct xt_action_param *par)
+{
+	struct iface_stat *entry;
+	const struct net_device *el_dev;
+	enum ifs_tx_rx direction = par->in ? IFS_RX : IFS_TX;
+	int bytes = skb->len;
+	int proto;
+
+	if (!skb->dev) {
+		MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+		el_dev = par->in ? : par->out;
+	} else {
+		const struct net_device *other_dev;
+		el_dev = skb->dev;
+		other_dev = par->in ? : par->out;
+		if (el_dev != other_dev) {
+			MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+				 "par->(in/out)=%p %s\n",
+				 par->hooknum, el_dev, el_dev->name, other_dev,
+				 other_dev->name);
+		}
+	}
+
+	if (unlikely(!el_dev)) {
+		pr_err_ratelimited("qtaguid[%d]: %s(): no par->in/out?!!\n",
+				   par->hooknum, __func__);
+		BUG();
+	} else if (unlikely(!el_dev->name)) {
+		pr_err_ratelimited("qtaguid[%d]: %s(): no dev->name?!!\n",
+				   par->hooknum, __func__);
+		BUG();
+	} else {
+		proto = ipx_proto(skb, par);
+		MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+			 par->hooknum, el_dev->name, el_dev->type,
+			 par->family, proto);
+	}
+
+	spin_lock_bh(&iface_stat_list_lock);
+	entry = get_iface_entry(el_dev->name);
+	if (entry == NULL) {
+		IF_DEBUG("qtaguid: iface_stat: %s(%s): not tracked\n",
+			 __func__, el_dev->name);
+		spin_unlock_bh(&iface_stat_list_lock);
+		return;
+	}
+
+	IF_DEBUG("qtaguid: %s(%s): entry=%p\n", __func__,
+		 el_dev->name, entry);
+
+	data_counters_update(&entry->totals_via_skb, 0, direction, proto,
+			     bytes);
+	spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static void tag_stat_update(struct tag_stat *tag_entry,
+			enum ifs_tx_rx direction, int proto, int bytes)
+{
+	int active_set;
+	active_set = get_active_counter_set(tag_entry->tn.tag);
+	MT_DEBUG("qtaguid: tag_stat_update(tag=0x%llx (uid=%u) set=%d "
+		 "dir=%d proto=%d bytes=%d)\n",
+		 tag_entry->tn.tag, get_uid_from_tag(tag_entry->tn.tag),
+		 active_set, direction, proto, bytes);
+	data_counters_update(&tag_entry->counters, active_set, direction,
+			     proto, bytes);
+	if (tag_entry->parent_counters)
+		data_counters_update(tag_entry->parent_counters, active_set,
+				     direction, proto, bytes);
+}
+
+/*
+ * Create a new entry for tracking the specified {acct_tag,uid_tag} within
+ * the interface.
+ * iface_entry->tag_stat_list_lock should be held.
+ */
+static struct tag_stat *create_if_tag_stat(struct iface_stat *iface_entry,
+					   tag_t tag)
+{
+	struct tag_stat *new_tag_stat_entry = NULL;
+	IF_DEBUG("qtaguid: iface_stat: %s(): ife=%p tag=0x%llx"
+		 " (uid=%u)\n", __func__,
+		 iface_entry, tag, get_uid_from_tag(tag));
+	new_tag_stat_entry = kzalloc(sizeof(*new_tag_stat_entry), GFP_ATOMIC);
+	if (!new_tag_stat_entry) {
+		pr_err("qtaguid: iface_stat: tag stat alloc failed\n");
+		goto done;
+	}
+	new_tag_stat_entry->tn.tag = tag;
+	tag_stat_tree_insert(new_tag_stat_entry, &iface_entry->tag_stat_tree);
+done:
+	return new_tag_stat_entry;
+}
+
+static void if_tag_stat_update(const char *ifname, uid_t uid,
+			       const struct sock *sk, enum ifs_tx_rx direction,
+			       int proto, int bytes)
+{
+	struct tag_stat *tag_stat_entry;
+	tag_t tag, acct_tag;
+	tag_t uid_tag;
+	struct data_counters *uid_tag_counters;
+	struct sock_tag *sock_tag_entry;
+	struct iface_stat *iface_entry;
+	struct tag_stat *new_tag_stat = NULL;
+	MT_DEBUG("qtaguid: if_tag_stat_update(ifname=%s "
+		"uid=%u sk=%p dir=%d proto=%d bytes=%d)\n",
+		 ifname, uid, sk, direction, proto, bytes);
+
+	spin_lock_bh(&iface_stat_list_lock);
+	iface_entry = get_iface_entry(ifname);
+	if (!iface_entry) {
+		pr_err_ratelimited("qtaguid: iface_stat: stat_update() "
+				   "%s not found\n", ifname);
+		spin_unlock_bh(&iface_stat_list_lock);
+		return;
+	}
+	/* It is ok to process data when an iface_entry is inactive */
+
+	MT_DEBUG("qtaguid: iface_stat: stat_update() dev=%s entry=%p\n",
+		 ifname, iface_entry);
+
+	/*
+	 * Look for a tagged sock.
+	 * It will have an acct_uid.
+	 */
+	sock_tag_entry = get_sock_stat(sk);
+	if (sock_tag_entry) {
+		tag = sock_tag_entry->tag;
+		acct_tag = get_atag_from_tag(tag);
+		uid_tag = get_utag_from_tag(tag);
+	} else {
+		acct_tag = make_atag_from_value(0);
+		tag = combine_atag_with_uid(acct_tag, uid);
+		uid_tag = make_tag_from_uid(uid);
+	}
+	MT_DEBUG("qtaguid: iface_stat: stat_update(): "
+		 " looking for tag=0x%llx (uid=%u) in ife=%p\n",
+		 tag, get_uid_from_tag(tag), iface_entry);
+	/* Loop over tag list under this interface for {acct_tag,uid_tag} */
+	spin_lock_bh(&iface_entry->tag_stat_list_lock);
+
+	tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+					      tag);
+	if (tag_stat_entry) {
+		/*
+		 * Updating the {acct_tag, uid_tag} entry handles both stats:
+		 * {0, uid_tag} will also get updated.
+		 */
+		tag_stat_update(tag_stat_entry, direction, proto, bytes);
+		goto unlock;
+	}
+
+	/* Loop over tag list under this interface for {0,uid_tag} */
+	tag_stat_entry = tag_stat_tree_search(&iface_entry->tag_stat_tree,
+					      uid_tag);
+	if (!tag_stat_entry) {
+		/* Here: the base uid_tag did not exist */
+		/*
+		 * No parent counters. So
+		 *  - No {0, uid_tag} stats and no {acc_tag, uid_tag} stats.
+		 */
+		new_tag_stat = create_if_tag_stat(iface_entry, uid_tag);
+		if (!new_tag_stat)
+			goto unlock;
+		uid_tag_counters = &new_tag_stat->counters;
+	} else {
+		uid_tag_counters = &tag_stat_entry->counters;
+	}
+
+	if (acct_tag) {
+		/* Create the child {acct_tag, uid_tag} and hook up parent. */
+		new_tag_stat = create_if_tag_stat(iface_entry, tag);
+		if (!new_tag_stat)
+			goto unlock;
+		new_tag_stat->parent_counters = uid_tag_counters;
+	} else {
+		/*
+		 * For new_tag_stat to be still NULL here would require:
+		 *  {0, uid_tag} exists
+		 *  and {acct_tag, uid_tag} doesn't exist
+		 *  AND acct_tag == 0.
+		 * Impossible. This reassures us that new_tag_stat
+		 * below will always be assigned.
+		 */
+		BUG_ON(!new_tag_stat);
+	}
+	tag_stat_update(new_tag_stat, direction, proto, bytes);
+unlock:
+	spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+	spin_unlock_bh(&iface_stat_list_lock);
+}
+
+static int iface_netdev_event_handler(struct notifier_block *nb,
+				      unsigned long event, void *ptr) {
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	if (unlikely(module_passive))
+		return NOTIFY_DONE;
+
+	IF_DEBUG("qtaguid: iface_stat: netdev_event(): "
+		 "ev=0x%lx/%s netdev=%p->name=%s\n",
+		 event, netdev_evt_str(event), dev, dev ? dev->name : "");
+
+	switch (event) {
+	case NETDEV_UP:
+		iface_stat_create(dev, NULL);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		iface_stat_update(dev, event == NETDEV_DOWN);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static int iface_inet6addr_event_handler(struct notifier_block *nb,
+					 unsigned long event, void *ptr)
+{
+	struct inet6_ifaddr *ifa = ptr;
+	struct net_device *dev;
+
+	if (unlikely(module_passive))
+		return NOTIFY_DONE;
+
+	IF_DEBUG("qtaguid: iface_stat: inet6addr_event(): "
+		 "ev=0x%lx/%s ifa=%p\n",
+		 event, netdev_evt_str(event), ifa);
+
+	switch (event) {
+	case NETDEV_UP:
+		BUG_ON(!ifa || !ifa->idev);
+		dev = (struct net_device *)ifa->idev->dev;
+		iface_stat_create_ipv6(dev, ifa);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		BUG_ON(!ifa || !ifa->idev);
+		dev = (struct net_device *)ifa->idev->dev;
+		iface_stat_update(dev, event == NETDEV_DOWN);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static int iface_inetaddr_event_handler(struct notifier_block *nb,
+					unsigned long event, void *ptr)
+{
+	struct in_ifaddr *ifa = ptr;
+	struct net_device *dev;
+
+	if (unlikely(module_passive))
+		return NOTIFY_DONE;
+
+	IF_DEBUG("qtaguid: iface_stat: inetaddr_event(): "
+		 "ev=0x%lx/%s ifa=%p\n",
+		 event, netdev_evt_str(event), ifa);
+
+	switch (event) {
+	case NETDEV_UP:
+		BUG_ON(!ifa || !ifa->ifa_dev);
+		dev = ifa->ifa_dev->dev;
+		iface_stat_create(dev, ifa);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		BUG_ON(!ifa || !ifa->ifa_dev);
+		dev = ifa->ifa_dev->dev;
+		iface_stat_update(dev, event == NETDEV_DOWN);
+		atomic64_inc(&qtu_events.iface_events);
+		break;
+	}
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block iface_netdev_notifier_blk = {
+	.notifier_call = iface_netdev_event_handler,
+};
+
+static struct notifier_block iface_inetaddr_notifier_blk = {
+	.notifier_call = iface_inetaddr_event_handler,
+};
+
+static struct notifier_block iface_inet6addr_notifier_blk = {
+	.notifier_call = iface_inet6addr_event_handler,
+};
+
+static const struct seq_operations iface_stat_fmt_proc_seq_ops = {
+	.start	= iface_stat_fmt_proc_start,
+	.next	= iface_stat_fmt_proc_next,
+	.stop	= iface_stat_fmt_proc_stop,
+	.show	= iface_stat_fmt_proc_show,
+};
+
+static int proc_iface_stat_fmt_open(struct inode *inode, struct file *file)
+{
+	struct proc_iface_stat_fmt_info *s;
+
+	s = __seq_open_private(file, &iface_stat_fmt_proc_seq_ops,
+			sizeof(struct proc_iface_stat_fmt_info));
+	if (!s)
+		return -ENOMEM;
+
+	s->fmt = (uintptr_t)PDE_DATA(inode);
+	return 0;
+}
+
+static const struct file_operations proc_iface_stat_fmt_fops = {
+	.open		= proc_iface_stat_fmt_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release_private,
+};
+
+static int __init iface_stat_init(struct proc_dir_entry *parent_procdir)
+{
+	int err;
+
+	iface_stat_procdir = proc_mkdir(iface_stat_procdirname, parent_procdir);
+	if (!iface_stat_procdir) {
+		pr_err("qtaguid: iface_stat: init failed to create proc entry\n");
+		err = -1;
+		goto err;
+	}
+
+	iface_stat_all_procfile = proc_create_data(iface_stat_all_procfilename,
+						   proc_iface_perms,
+						   parent_procdir,
+						   &proc_iface_stat_fmt_fops,
+						   (void *)1 /* fmt1 */);
+	if (!iface_stat_all_procfile) {
+		pr_err("qtaguid: iface_stat: init "
+		       " failed to create stat_old proc entry\n");
+		err = -1;
+		goto err_zap_entry;
+	}
+
+	iface_stat_fmt_procfile = proc_create_data(iface_stat_fmt_procfilename,
+						   proc_iface_perms,
+						   parent_procdir,
+						   &proc_iface_stat_fmt_fops,
+						   (void *)2 /* fmt2 */);
+	if (!iface_stat_fmt_procfile) {
+		pr_err("qtaguid: iface_stat: init "
+		       " failed to create stat_all proc entry\n");
+		err = -1;
+		goto err_zap_all_stats_entry;
+	}
+
+
+	err = register_netdevice_notifier(&iface_netdev_notifier_blk);
+	if (err) {
+		pr_err("qtaguid: iface_stat: init "
+		       "failed to register dev event handler\n");
+		goto err_zap_all_stats_entries;
+	}
+	err = register_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+	if (err) {
+		pr_err("qtaguid: iface_stat: init "
+		       "failed to register ipv4 dev event handler\n");
+		goto err_unreg_nd;
+	}
+
+	err = register_inet6addr_notifier(&iface_inet6addr_notifier_blk);
+	if (err) {
+		pr_err("qtaguid: iface_stat: init "
+		       "failed to register ipv6 dev event handler\n");
+		goto err_unreg_ip4_addr;
+	}
+	return 0;
+
+err_unreg_ip4_addr:
+	unregister_inetaddr_notifier(&iface_inetaddr_notifier_blk);
+err_unreg_nd:
+	unregister_netdevice_notifier(&iface_netdev_notifier_blk);
+err_zap_all_stats_entries:
+	remove_proc_entry(iface_stat_fmt_procfilename, parent_procdir);
+err_zap_all_stats_entry:
+	remove_proc_entry(iface_stat_all_procfilename, parent_procdir);
+err_zap_entry:
+	remove_proc_entry(iface_stat_procdirname, parent_procdir);
+err:
+	return err;
+}
+
+static struct sock *qtaguid_find_sk(const struct sk_buff *skb,
+				    struct xt_action_param *par)
+{
+	struct sock *sk;
+	unsigned int hook_mask = (1 << par->hooknum);
+
+	MT_DEBUG("qtaguid: find_sk(skb=%p) hooknum=%d family=%d\n", skb,
+		 par->hooknum, par->family);
+
+	/*
+	 * Let's not abuse the the xt_socket_get*_sk(), or else it will
+	 * return garbage SKs.
+	 */
+	if (!(hook_mask & XT_SOCKET_SUPPORTED_HOOKS))
+		return NULL;
+
+	switch (par->family) {
+	case NFPROTO_IPV6:
+		sk = xt_socket_lookup_slow_v6(dev_net(skb->dev), skb, par->in);
+		break;
+	case NFPROTO_IPV4:
+		sk = xt_socket_lookup_slow_v4(dev_net(skb->dev), skb, par->in);
+		break;
+	default:
+		return NULL;
+	}
+
+	if (sk) {
+		MT_DEBUG("qtaguid: %p->sk_proto=%u "
+			 "->sk_state=%d\n", sk, sk->sk_protocol, sk->sk_state);
+		/*
+		 * When in TCP_TIME_WAIT the sk is not a "struct sock" but
+		 * "struct inet_timewait_sock" which is missing fields.
+		 */
+		if (!sk_fullsock(sk) || sk->sk_state  == TCP_TIME_WAIT) {
+			sock_gen_put(sk);
+			sk = NULL;
+		}
+	}
+	return sk;
+}
+
+static void account_for_uid(const struct sk_buff *skb,
+			    const struct sock *alternate_sk, uid_t uid,
+			    struct xt_action_param *par)
+{
+	const struct net_device *el_dev;
+
+	if (!skb->dev) {
+		MT_DEBUG("qtaguid[%d]: no skb->dev\n", par->hooknum);
+		el_dev = par->in ? : par->out;
+	} else {
+		const struct net_device *other_dev;
+		el_dev = skb->dev;
+		other_dev = par->in ? : par->out;
+		if (el_dev != other_dev) {
+			MT_DEBUG("qtaguid[%d]: skb->dev=%p %s vs "
+				"par->(in/out)=%p %s\n",
+				par->hooknum, el_dev, el_dev->name, other_dev,
+				other_dev->name);
+		}
+	}
+
+	if (unlikely(!el_dev)) {
+		pr_info("qtaguid[%d]: no par->in/out?!!\n", par->hooknum);
+	} else if (unlikely(!el_dev->name)) {
+		pr_info("qtaguid[%d]: no dev->name?!!\n", par->hooknum);
+	} else {
+		int proto = ipx_proto(skb, par);
+		MT_DEBUG("qtaguid[%d]: dev name=%s type=%d fam=%d proto=%d\n",
+			 par->hooknum, el_dev->name, el_dev->type,
+			 par->family, proto);
+
+		if_tag_stat_update(el_dev->name, uid,
+				skb->sk ? skb->sk : alternate_sk,
+				par->in ? IFS_RX : IFS_TX,
+				proto, skb->len);
+	}
+}
+
+static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	const struct xt_qtaguid_match_info *info = par->matchinfo;
+	const struct file *filp;
+	bool got_sock = false;
+	struct sock *sk;
+	kuid_t sock_uid;
+	bool res;
+	bool set_sk_callback_lock = false;
+
+	if (unlikely(module_passive))
+		return (info->match ^ info->invert) == 0;
+
+	MT_DEBUG("qtaguid[%d]: entered skb=%p par->in=%p/out=%p fam=%d\n",
+		 par->hooknum, skb, par->in, par->out, par->family);
+
+	atomic64_inc(&qtu_events.match_calls);
+	if (skb == NULL) {
+		res = (info->match ^ info->invert) == 0;
+		goto ret_res;
+	}
+
+	switch (par->hooknum) {
+	case NF_INET_PRE_ROUTING:
+	case NF_INET_POST_ROUTING:
+		atomic64_inc(&qtu_events.match_calls_prepost);
+		iface_stat_update_from_skb(skb, par);
+		/*
+		 * We are done in pre/post. The skb will get processed
+		 * further alter.
+		 */
+		res = (info->match ^ info->invert);
+		goto ret_res;
+		break;
+	/* default: Fall through and do UID releated work */
+	}
+
+	sk = skb_to_full_sk(skb);
+	/*
+	 * When in TCP_TIME_WAIT the sk is not a "struct sock" but
+	 * "struct inet_timewait_sock" which is missing fields.
+	 * So we ignore it.
+	 */
+	if (sk && sk->sk_state == TCP_TIME_WAIT)
+		sk = NULL;
+	if (sk == NULL) {
+		/*
+		 * A missing sk->sk_socket happens when packets are in-flight
+		 * and the matching socket is already closed and gone.
+		 */
+		sk = qtaguid_find_sk(skb, par);
+		/*
+		 * If we got the socket from the find_sk(), we will need to put
+		 * it back, as nf_tproxy_get_sock_v4() got it.
+		 */
+		got_sock = sk;
+		if (sk)
+			atomic64_inc(&qtu_events.match_found_sk_in_ct);
+		else
+			atomic64_inc(&qtu_events.match_found_no_sk_in_ct);
+	} else {
+		atomic64_inc(&qtu_events.match_found_sk);
+	}
+	MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
+		 par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
+	if (sk != NULL) {
+		set_sk_callback_lock = true;
+		read_lock_bh(&sk->sk_callback_lock);
+		MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
+			par->hooknum, sk, sk->sk_socket,
+			sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+		filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+		MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
+			par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
+	}
+
+	if (sk == NULL || sk->sk_socket == NULL) {
+		/*
+		 * Here, the qtaguid_find_sk() using connection tracking
+		 * couldn't find the owner, so for now we just count them
+		 * against the system.
+		 */
+		/*
+		 * TODO: unhack how to force just accounting.
+		 * For now we only do iface stats when the uid-owner is not
+		 * requested.
+		 */
+		if (!(info->match & XT_QTAGUID_UID))
+			account_for_uid(skb, sk, 0, par);
+		MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
+			par->hooknum,
+			sk ? sk->sk_socket : NULL);
+		res = (info->match ^ info->invert) == 0;
+		atomic64_inc(&qtu_events.match_no_sk);
+		goto put_sock_ret_res;
+	} else if (info->match & info->invert & XT_QTAGUID_SOCKET) {
+		res = false;
+		goto put_sock_ret_res;
+	}
+	filp = sk->sk_socket->file;
+	if (filp == NULL) {
+		MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
+		account_for_uid(skb, sk, 0, par);
+		res = ((info->match ^ info->invert) &
+			(XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
+		atomic64_inc(&qtu_events.match_no_sk_file);
+		goto put_sock_ret_res;
+	}
+	sock_uid = filp->f_cred->fsuid;
+	/*
+	 * TODO: unhack how to force just accounting.
+	 * For now we only do iface stats when the uid-owner is not requested
+	 */
+	if (!(info->match & XT_QTAGUID_UID))
+		account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par);
+
+	/*
+	 * The following two tests fail the match when:
+	 *    id not in range AND no inverted condition requested
+	 * or id     in range AND    inverted condition requested
+	 * Thus (!a && b) || (a && !b) == a ^ b
+	 */
+	if (info->match & XT_QTAGUID_UID) {
+		kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
+		kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
+
+		if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
+		     uid_lte(filp->f_cred->fsuid, uid_max)) ^
+		    !(info->invert & XT_QTAGUID_UID)) {
+			MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
+				 par->hooknum);
+			res = false;
+			goto put_sock_ret_res;
+		}
+	}
+	if (info->match & XT_QTAGUID_GID) {
+		kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
+		kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
+
+		if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
+				gid_lte(filp->f_cred->fsgid, gid_max)) ^
+			!(info->invert & XT_QTAGUID_GID)) {
+			MT_DEBUG("qtaguid[%d]: leaving gid not matching\n",
+				par->hooknum);
+			res = false;
+			goto put_sock_ret_res;
+		}
+	}
+	MT_DEBUG("qtaguid[%d]: leaving matched\n", par->hooknum);
+	res = true;
+
+put_sock_ret_res:
+	if (got_sock)
+		sock_gen_put(sk);
+	if (set_sk_callback_lock)
+		read_unlock_bh(&sk->sk_callback_lock);
+ret_res:
+	MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
+	return res;
+}
+
+#ifdef DDEBUG
+/* This function is not in xt_qtaguid_print.c because of locks visibility */
+static void prdebug_full_state(int indent_level, const char *fmt, ...)
+{
+	va_list args;
+	char *fmt_buff;
+	char *buff;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	fmt_buff = kasprintf(GFP_ATOMIC,
+			     "qtaguid: %s(): %s {\n", __func__, fmt);
+	BUG_ON(!fmt_buff);
+	va_start(args, fmt);
+	buff = kvasprintf(GFP_ATOMIC,
+			  fmt_buff, args);
+	BUG_ON(!buff);
+	pr_debug("%s", buff);
+	kfree(fmt_buff);
+	kfree(buff);
+	va_end(args);
+
+	spin_lock_bh(&sock_tag_list_lock);
+	prdebug_sock_tag_tree(indent_level, &sock_tag_tree);
+	spin_unlock_bh(&sock_tag_list_lock);
+
+	spin_lock_bh(&sock_tag_list_lock);
+	spin_lock_bh(&uid_tag_data_tree_lock);
+	prdebug_uid_tag_data_tree(indent_level, &uid_tag_data_tree);
+	prdebug_proc_qtu_data_tree(indent_level, &proc_qtu_data_tree);
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	spin_unlock_bh(&sock_tag_list_lock);
+
+	spin_lock_bh(&iface_stat_list_lock);
+	prdebug_iface_stat_list(indent_level, &iface_stat_list);
+	spin_unlock_bh(&iface_stat_list_lock);
+
+	pr_debug("qtaguid: %s(): }\n", __func__);
+}
+#else
+static void prdebug_full_state(int indent_level, const char *fmt, ...) {}
+#endif
+
+struct proc_ctrl_print_info {
+	struct sock *sk; /* socket found by reading to sk_pos */
+	loff_t sk_pos;
+};
+
+static void *qtaguid_ctrl_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct proc_ctrl_print_info *pcpi = m->private;
+	struct sock_tag *sock_tag_entry = v;
+	struct rb_node *node;
+
+	(*pos)++;
+
+	if (!v || v  == SEQ_START_TOKEN)
+		return NULL;
+
+	node = rb_next(&sock_tag_entry->sock_node);
+	if (!node) {
+		pcpi->sk = NULL;
+		sock_tag_entry = SEQ_START_TOKEN;
+	} else {
+		sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+		pcpi->sk = sock_tag_entry->sk;
+	}
+	pcpi->sk_pos = *pos;
+	return sock_tag_entry;
+}
+
+static void *qtaguid_ctrl_proc_start(struct seq_file *m, loff_t *pos)
+{
+	struct proc_ctrl_print_info *pcpi = m->private;
+	struct sock_tag *sock_tag_entry;
+	struct rb_node *node;
+
+	spin_lock_bh(&sock_tag_list_lock);
+
+	if (unlikely(module_passive))
+		return NULL;
+
+	if (*pos == 0) {
+		pcpi->sk_pos = 0;
+		node = rb_first(&sock_tag_tree);
+		if (!node) {
+			pcpi->sk = NULL;
+			return SEQ_START_TOKEN;
+		}
+		sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+		pcpi->sk = sock_tag_entry->sk;
+	} else {
+		sock_tag_entry = (pcpi->sk ? get_sock_stat_nl(pcpi->sk) :
+						NULL) ?: SEQ_START_TOKEN;
+		if (*pos != pcpi->sk_pos) {
+			/* seq_read skipped a next call */
+			*pos = pcpi->sk_pos;
+			return qtaguid_ctrl_proc_next(m, sock_tag_entry, pos);
+		}
+	}
+	return sock_tag_entry;
+}
+
+static void qtaguid_ctrl_proc_stop(struct seq_file *m, void *v)
+{
+	spin_unlock_bh(&sock_tag_list_lock);
+}
+
+/*
+ * Procfs reader to get all active socket tags using style "1)" as described in
+ * fs/proc/generic.c
+ */
+static int qtaguid_ctrl_proc_show(struct seq_file *m, void *v)
+{
+	struct sock_tag *sock_tag_entry = v;
+	uid_t uid;
+	long f_count;
+
+	CT_DEBUG("qtaguid: proc ctrl pid=%u tgid=%u uid=%u\n",
+		 current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+
+	if (sock_tag_entry != SEQ_START_TOKEN) {
+		uid = get_uid_from_tag(sock_tag_entry->tag);
+		CT_DEBUG("qtaguid: proc_read(): sk=%p tag=0x%llx (uid=%u) "
+			 "pid=%u\n",
+			 sock_tag_entry->sk,
+			 sock_tag_entry->tag,
+			 uid,
+			 sock_tag_entry->pid
+			);
+		f_count = atomic_long_read(
+			&sock_tag_entry->socket->file->f_count);
+		seq_printf(m, "sock=%pK tag=0x%llx (uid=%u) pid=%u "
+			   "f_count=%lu\n",
+			   sock_tag_entry->sk,
+			   sock_tag_entry->tag, uid,
+			   sock_tag_entry->pid, f_count);
+	} else {
+		seq_printf(m, "events: sockets_tagged=%llu "
+			   "sockets_untagged=%llu "
+			   "counter_set_changes=%llu "
+			   "delete_cmds=%llu "
+			   "iface_events=%llu "
+			   "match_calls=%llu "
+			   "match_calls_prepost=%llu "
+			   "match_found_sk=%llu "
+			   "match_found_sk_in_ct=%llu "
+			   "match_found_no_sk_in_ct=%llu "
+			   "match_no_sk=%llu "
+			   "match_no_sk_file=%llu\n",
+			   (u64)atomic64_read(&qtu_events.sockets_tagged),
+			   (u64)atomic64_read(&qtu_events.sockets_untagged),
+			   (u64)atomic64_read(&qtu_events.counter_set_changes),
+			   (u64)atomic64_read(&qtu_events.delete_cmds),
+			   (u64)atomic64_read(&qtu_events.iface_events),
+			   (u64)atomic64_read(&qtu_events.match_calls),
+			   (u64)atomic64_read(&qtu_events.match_calls_prepost),
+			   (u64)atomic64_read(&qtu_events.match_found_sk),
+			   (u64)atomic64_read(&qtu_events.match_found_sk_in_ct),
+			   (u64)atomic64_read(&qtu_events.match_found_no_sk_in_ct),
+			   (u64)atomic64_read(&qtu_events.match_no_sk),
+			   (u64)atomic64_read(&qtu_events.match_no_sk_file));
+
+		/* Count the following as part of the last item_index */
+		prdebug_full_state(0, "proc ctrl");
+	}
+
+	return 0;
+}
+
+/*
+ * Delete socket tags, and stat tags associated with a given
+ * accouting tag and uid.
+ */
+static int ctrl_cmd_delete(const char *input)
+{
+	char cmd;
+	int uid_int;
+	kuid_t uid;
+	uid_t entry_uid;
+	tag_t acct_tag;
+	tag_t tag;
+	int res, argc;
+	struct iface_stat *iface_entry;
+	struct rb_node *node;
+	struct sock_tag *st_entry;
+	struct rb_root st_to_free_tree = RB_ROOT;
+	struct tag_stat *ts_entry;
+	struct tag_counter_set *tcs_entry;
+	struct tag_ref *tr_entry;
+	struct uid_tag_data *utd_entry;
+
+	argc = sscanf(input, "%c %llu %u", &cmd, &acct_tag, &uid_int);
+	uid = make_kuid(&init_user_ns, uid_int);
+	CT_DEBUG("qtaguid: ctrl_delete(%s): argc=%d cmd=%c "
+		 "user_tag=0x%llx uid=%u\n", input, argc, cmd,
+		 acct_tag, uid_int);
+	if (argc < 2) {
+		res = -EINVAL;
+		goto err;
+	}
+	if (!valid_atag(acct_tag)) {
+		pr_info("qtaguid: ctrl_delete(%s): invalid tag\n", input);
+		res = -EINVAL;
+		goto err;
+	}
+	if (argc < 3) {
+		uid = current_fsuid();
+		uid_int = from_kuid(&init_user_ns, uid);
+	} else if (!can_impersonate_uid(uid)) {
+		pr_info("qtaguid: ctrl_delete(%s): "
+			"insufficient priv from pid=%u tgid=%u uid=%u\n",
+			input, current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+		res = -EPERM;
+		goto err;
+	}
+
+	tag = combine_atag_with_uid(acct_tag, uid_int);
+	CT_DEBUG("qtaguid: ctrl_delete(%s): "
+		 "looking for tag=0x%llx (uid=%u)\n",
+		 input, tag, uid_int);
+
+	/* Delete socket tags */
+	spin_lock_bh(&sock_tag_list_lock);
+	node = rb_first(&sock_tag_tree);
+	while (node) {
+		st_entry = rb_entry(node, struct sock_tag, sock_node);
+		entry_uid = get_uid_from_tag(st_entry->tag);
+		node = rb_next(node);
+		if (entry_uid != uid_int)
+			continue;
+
+		CT_DEBUG("qtaguid: ctrl_delete(%s): st tag=0x%llx (uid=%u)\n",
+			 input, st_entry->tag, entry_uid);
+
+		if (!acct_tag || st_entry->tag == tag) {
+			rb_erase(&st_entry->sock_node, &sock_tag_tree);
+			/* Can't sockfd_put() within spinlock, do it later. */
+			sock_tag_tree_insert(st_entry, &st_to_free_tree);
+			tr_entry = lookup_tag_ref(st_entry->tag, NULL);
+			BUG_ON(tr_entry->num_sock_tags <= 0);
+			tr_entry->num_sock_tags--;
+			/*
+			 * TODO: remove if, and start failing.
+			 * This is a hack to work around the fact that in some
+			 * places we have "if (IS_ERR_OR_NULL(pqd_entry))"
+			 * and are trying to work around apps
+			 * that didn't open the /dev/xt_qtaguid.
+			 */
+			if (st_entry->list.next && st_entry->list.prev)
+				list_del(&st_entry->list);
+		}
+	}
+	spin_unlock_bh(&sock_tag_list_lock);
+
+	sock_tag_tree_erase(&st_to_free_tree);
+
+	/* Delete tag counter-sets */
+	spin_lock_bh(&tag_counter_set_list_lock);
+	/* Counter sets are only on the uid tag, not full tag */
+	tcs_entry = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+	if (tcs_entry) {
+		CT_DEBUG("qtaguid: ctrl_delete(%s): "
+			 "erase tcs: tag=0x%llx (uid=%u) set=%d\n",
+			 input,
+			 tcs_entry->tn.tag,
+			 get_uid_from_tag(tcs_entry->tn.tag),
+			 tcs_entry->active_set);
+		rb_erase(&tcs_entry->tn.node, &tag_counter_set_tree);
+		kfree(tcs_entry);
+	}
+	spin_unlock_bh(&tag_counter_set_list_lock);
+
+	/*
+	 * If acct_tag is 0, then all entries belonging to uid are
+	 * erased.
+	 */
+	spin_lock_bh(&iface_stat_list_lock);
+	list_for_each_entry(iface_entry, &iface_stat_list, list) {
+		spin_lock_bh(&iface_entry->tag_stat_list_lock);
+		node = rb_first(&iface_entry->tag_stat_tree);
+		while (node) {
+			ts_entry = rb_entry(node, struct tag_stat, tn.node);
+			entry_uid = get_uid_from_tag(ts_entry->tn.tag);
+			node = rb_next(node);
+
+			CT_DEBUG("qtaguid: ctrl_delete(%s): "
+				 "ts tag=0x%llx (uid=%u)\n",
+				 input, ts_entry->tn.tag, entry_uid);
+
+			if (entry_uid != uid_int)
+				continue;
+			if (!acct_tag || ts_entry->tn.tag == tag) {
+				CT_DEBUG("qtaguid: ctrl_delete(%s): "
+					 "erase ts: %s 0x%llx %u\n",
+					 input, iface_entry->ifname,
+					 get_atag_from_tag(ts_entry->tn.tag),
+					 entry_uid);
+				rb_erase(&ts_entry->tn.node,
+					 &iface_entry->tag_stat_tree);
+				kfree(ts_entry);
+			}
+		}
+		spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+	}
+	spin_unlock_bh(&iface_stat_list_lock);
+
+	/* Cleanup the uid_tag_data */
+	spin_lock_bh(&uid_tag_data_tree_lock);
+	node = rb_first(&uid_tag_data_tree);
+	while (node) {
+		utd_entry = rb_entry(node, struct uid_tag_data, node);
+		entry_uid = utd_entry->uid;
+		node = rb_next(node);
+
+		CT_DEBUG("qtaguid: ctrl_delete(%s): "
+			 "utd uid=%u\n",
+			 input, entry_uid);
+
+		if (entry_uid != uid_int)
+			continue;
+		/*
+		 * Go over the tag_refs, and those that don't have
+		 * sock_tags using them are freed.
+		 */
+		put_tag_ref_tree(tag, utd_entry);
+		put_utd_entry(utd_entry);
+	}
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+
+	atomic64_inc(&qtu_events.delete_cmds);
+	res = 0;
+
+err:
+	return res;
+}
+
+static int ctrl_cmd_counter_set(const char *input)
+{
+	char cmd;
+	uid_t uid = 0;
+	tag_t tag;
+	int res, argc;
+	struct tag_counter_set *tcs;
+	int counter_set;
+
+	argc = sscanf(input, "%c %d %u", &cmd, &counter_set, &uid);
+	CT_DEBUG("qtaguid: ctrl_counterset(%s): argc=%d cmd=%c "
+		 "set=%d uid=%u\n", input, argc, cmd,
+		 counter_set, uid);
+	if (argc != 3) {
+		res = -EINVAL;
+		goto err;
+	}
+	if (counter_set < 0 || counter_set >= IFS_MAX_COUNTER_SETS) {
+		pr_info("qtaguid: ctrl_counterset(%s): invalid counter_set range\n",
+			input);
+		res = -EINVAL;
+		goto err;
+	}
+	if (!can_manipulate_uids()) {
+		pr_info("qtaguid: ctrl_counterset(%s): "
+			"insufficient priv from pid=%u tgid=%u uid=%u\n",
+			input, current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+		res = -EPERM;
+		goto err;
+	}
+
+	tag = make_tag_from_uid(uid);
+	spin_lock_bh(&tag_counter_set_list_lock);
+	tcs = tag_counter_set_tree_search(&tag_counter_set_tree, tag);
+	if (!tcs) {
+		tcs = kzalloc(sizeof(*tcs), GFP_ATOMIC);
+		if (!tcs) {
+			spin_unlock_bh(&tag_counter_set_list_lock);
+			pr_err("qtaguid: ctrl_counterset(%s): "
+			       "failed to alloc counter set\n",
+			       input);
+			res = -ENOMEM;
+			goto err;
+		}
+		tcs->tn.tag = tag;
+		tag_counter_set_tree_insert(tcs, &tag_counter_set_tree);
+		CT_DEBUG("qtaguid: ctrl_counterset(%s): added tcs tag=0x%llx "
+			 "(uid=%u) set=%d\n",
+			 input, tag, get_uid_from_tag(tag), counter_set);
+	}
+	tcs->active_set = counter_set;
+	spin_unlock_bh(&tag_counter_set_list_lock);
+	atomic64_inc(&qtu_events.counter_set_changes);
+	res = 0;
+
+err:
+	return res;
+}
+
+static int ctrl_cmd_tag(const char *input)
+{
+	char cmd;
+	int sock_fd = 0;
+	kuid_t uid;
+	unsigned int uid_int = 0;
+	tag_t acct_tag = make_atag_from_value(0);
+	tag_t full_tag;
+	struct socket *el_socket;
+	int res, argc;
+	struct sock_tag *sock_tag_entry;
+	struct tag_ref *tag_ref_entry;
+	struct uid_tag_data *uid_tag_data_entry;
+	struct proc_qtu_data *pqd_entry;
+
+	/* Unassigned args will get defaulted later. */
+	argc = sscanf(input, "%c %d %llu %u", &cmd, &sock_fd, &acct_tag, &uid_int);
+	uid = make_kuid(&init_user_ns, uid_int);
+	CT_DEBUG("qtaguid: ctrl_tag(%s): argc=%d cmd=%c sock_fd=%d "
+		 "acct_tag=0x%llx uid=%u\n", input, argc, cmd, sock_fd,
+		 acct_tag, uid_int);
+	if (argc < 2) {
+		res = -EINVAL;
+		goto err;
+	}
+	el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
+	if (!el_socket) {
+		pr_info("qtaguid: ctrl_tag(%s): failed to lookup"
+			" sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+			input, sock_fd, res, current->pid, current->tgid,
+			from_kuid(&init_user_ns, current_fsuid()));
+		goto err;
+	}
+	CT_DEBUG("qtaguid: ctrl_tag(%s): socket->...->f_count=%ld ->sk=%p\n",
+		 input, atomic_long_read(&el_socket->file->f_count),
+		 el_socket->sk);
+	if (argc < 3) {
+		acct_tag = make_atag_from_value(0);
+	} else if (!valid_atag(acct_tag)) {
+		pr_info("qtaguid: ctrl_tag(%s): invalid tag\n", input);
+		res = -EINVAL;
+		goto err_put;
+	}
+	CT_DEBUG("qtaguid: ctrl_tag(%s): "
+		 "pid=%u tgid=%u uid=%u euid=%u fsuid=%u "
+		 "ctrl.gid=%u in_group()=%d in_egroup()=%d\n",
+		 input, current->pid, current->tgid,
+		 from_kuid(&init_user_ns, current_uid()),
+		 from_kuid(&init_user_ns, current_euid()),
+		 from_kuid(&init_user_ns, current_fsuid()),
+		 from_kgid(&init_user_ns, xt_qtaguid_ctrl_file->gid),
+		 in_group_p(xt_qtaguid_ctrl_file->gid),
+		 in_egroup_p(xt_qtaguid_ctrl_file->gid));
+	if (argc < 4) {
+		uid = current_fsuid();
+		uid_int = from_kuid(&init_user_ns, uid);
+	} else if (!can_impersonate_uid(uid)) {
+		pr_info("qtaguid: ctrl_tag(%s): "
+			"insufficient priv from pid=%u tgid=%u uid=%u\n",
+			input, current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+		res = -EPERM;
+		goto err_put;
+	}
+	full_tag = combine_atag_with_uid(acct_tag, uid_int);
+
+	spin_lock_bh(&sock_tag_list_lock);
+	sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+	tag_ref_entry = get_tag_ref(full_tag, &uid_tag_data_entry);
+	if (IS_ERR(tag_ref_entry)) {
+		res = PTR_ERR(tag_ref_entry);
+		spin_unlock_bh(&sock_tag_list_lock);
+		goto err_put;
+	}
+	tag_ref_entry->num_sock_tags++;
+	if (sock_tag_entry) {
+		struct tag_ref *prev_tag_ref_entry;
+
+		CT_DEBUG("qtaguid: ctrl_tag(%s): retag for sk=%p "
+			 "st@%p ...->f_count=%ld\n",
+			 input, el_socket->sk, sock_tag_entry,
+			 atomic_long_read(&el_socket->file->f_count));
+		/*
+		 * This is a re-tagging, so release the sock_fd that was
+		 * locked at the time of the 1st tagging.
+		 * There is still the ref from this call's sockfd_lookup() so
+		 * it can be done within the spinlock.
+		 */
+		sockfd_put(sock_tag_entry->socket);
+		prev_tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag,
+						    &uid_tag_data_entry);
+		BUG_ON(IS_ERR_OR_NULL(prev_tag_ref_entry));
+		BUG_ON(prev_tag_ref_entry->num_sock_tags <= 0);
+		prev_tag_ref_entry->num_sock_tags--;
+		sock_tag_entry->tag = full_tag;
+	} else {
+		CT_DEBUG("qtaguid: ctrl_tag(%s): newtag for sk=%p\n",
+			 input, el_socket->sk);
+		sock_tag_entry = kzalloc(sizeof(*sock_tag_entry),
+					 GFP_ATOMIC);
+		if (!sock_tag_entry) {
+			pr_err("qtaguid: ctrl_tag(%s): "
+			       "socket tag alloc failed\n",
+			       input);
+			spin_unlock_bh(&sock_tag_list_lock);
+			res = -ENOMEM;
+			goto err_tag_unref_put;
+		}
+		sock_tag_entry->sk = el_socket->sk;
+		sock_tag_entry->socket = el_socket;
+		sock_tag_entry->pid = current->tgid;
+		sock_tag_entry->tag = combine_atag_with_uid(acct_tag, uid_int);
+		spin_lock_bh(&uid_tag_data_tree_lock);
+		pqd_entry = proc_qtu_data_tree_search(
+			&proc_qtu_data_tree, current->tgid);
+		/*
+		 * TODO: remove if, and start failing.
+		 * At first, we want to catch user-space code that is not
+		 * opening the /dev/xt_qtaguid.
+		 */
+		if (IS_ERR_OR_NULL(pqd_entry))
+			pr_warn_once(
+				"qtaguid: %s(): "
+				"User space forgot to open /dev/xt_qtaguid? "
+				"pid=%u tgid=%u uid=%u\n", __func__,
+				current->pid, current->tgid,
+				from_kuid(&init_user_ns, current_fsuid()));
+		else
+			list_add(&sock_tag_entry->list,
+				 &pqd_entry->sock_tag_list);
+		spin_unlock_bh(&uid_tag_data_tree_lock);
+
+		sock_tag_tree_insert(sock_tag_entry, &sock_tag_tree);
+		atomic64_inc(&qtu_events.sockets_tagged);
+	}
+	spin_unlock_bh(&sock_tag_list_lock);
+	/* We keep the ref to the socket (file) until it is untagged */
+	CT_DEBUG("qtaguid: ctrl_tag(%s): done st@%p ...->f_count=%ld\n",
+		 input, sock_tag_entry,
+		 atomic_long_read(&el_socket->file->f_count));
+	return 0;
+
+err_tag_unref_put:
+	BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+	tag_ref_entry->num_sock_tags--;
+	free_tag_ref_from_utd_entry(tag_ref_entry, uid_tag_data_entry);
+err_put:
+	CT_DEBUG("qtaguid: ctrl_tag(%s): done. ...->f_count=%ld\n",
+		 input, atomic_long_read(&el_socket->file->f_count) - 1);
+	/* Release the sock_fd that was grabbed by sockfd_lookup(). */
+	sockfd_put(el_socket);
+	return res;
+
+err:
+	CT_DEBUG("qtaguid: ctrl_tag(%s): done.\n", input);
+	return res;
+}
+
+static int ctrl_cmd_untag(const char *input)
+{
+	char cmd;
+	int sock_fd = 0;
+	struct socket *el_socket;
+	int res, argc;
+	struct sock_tag *sock_tag_entry;
+	struct tag_ref *tag_ref_entry;
+	struct uid_tag_data *utd_entry;
+	struct proc_qtu_data *pqd_entry;
+
+	argc = sscanf(input, "%c %d", &cmd, &sock_fd);
+	CT_DEBUG("qtaguid: ctrl_untag(%s): argc=%d cmd=%c sock_fd=%d\n",
+		 input, argc, cmd, sock_fd);
+	if (argc < 2) {
+		res = -EINVAL;
+		goto err;
+	}
+	el_socket = sockfd_lookup(sock_fd, &res);  /* This locks the file */
+	if (!el_socket) {
+		pr_info("qtaguid: ctrl_untag(%s): failed to lookup"
+			" sock_fd=%d err=%d pid=%u tgid=%u uid=%u\n",
+			input, sock_fd, res, current->pid, current->tgid,
+			from_kuid(&init_user_ns, current_fsuid()));
+		goto err;
+	}
+	CT_DEBUG("qtaguid: ctrl_untag(%s): socket->...->f_count=%ld ->sk=%p\n",
+		 input, atomic_long_read(&el_socket->file->f_count),
+		 el_socket->sk);
+	spin_lock_bh(&sock_tag_list_lock);
+	sock_tag_entry = get_sock_stat_nl(el_socket->sk);
+	if (!sock_tag_entry) {
+		spin_unlock_bh(&sock_tag_list_lock);
+		res = -EINVAL;
+		goto err_put;
+	}
+	/*
+	 * The socket already belongs to the current process
+	 * so it can do whatever it wants to it.
+	 */
+	rb_erase(&sock_tag_entry->sock_node, &sock_tag_tree);
+
+	tag_ref_entry = lookup_tag_ref(sock_tag_entry->tag, &utd_entry);
+	BUG_ON(!tag_ref_entry);
+	BUG_ON(tag_ref_entry->num_sock_tags <= 0);
+	spin_lock_bh(&uid_tag_data_tree_lock);
+	pqd_entry = proc_qtu_data_tree_search(
+		&proc_qtu_data_tree, current->tgid);
+	/*
+	 * TODO: remove if, and start failing.
+	 * At first, we want to catch user-space code that is not
+	 * opening the /dev/xt_qtaguid.
+	 */
+	if (IS_ERR_OR_NULL(pqd_entry))
+		pr_warn_once("qtaguid: %s(): "
+			     "User space forgot to open /dev/xt_qtaguid? "
+			     "pid=%u tgid=%u uid=%u\n", __func__,
+			     current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+	else
+		list_del(&sock_tag_entry->list);
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	/*
+	 * We don't free tag_ref from the utd_entry here,
+	 * only during a cmd_delete().
+	 */
+	tag_ref_entry->num_sock_tags--;
+	spin_unlock_bh(&sock_tag_list_lock);
+	/*
+	 * Release the sock_fd that was grabbed at tag time,
+	 * and once more for the sockfd_lookup() here.
+	 */
+	sockfd_put(sock_tag_entry->socket);
+	CT_DEBUG("qtaguid: ctrl_untag(%s): done. st@%p ...->f_count=%ld\n",
+		 input, sock_tag_entry,
+		 atomic_long_read(&el_socket->file->f_count) - 1);
+	sockfd_put(el_socket);
+
+	kfree(sock_tag_entry);
+	atomic64_inc(&qtu_events.sockets_untagged);
+
+	return 0;
+
+err_put:
+	CT_DEBUG("qtaguid: ctrl_untag(%s): done. socket->...->f_count=%ld\n",
+		 input, atomic_long_read(&el_socket->file->f_count) - 1);
+	/* Release the sock_fd that was grabbed by sockfd_lookup(). */
+	sockfd_put(el_socket);
+	return res;
+
+err:
+	CT_DEBUG("qtaguid: ctrl_untag(%s): done.\n", input);
+	return res;
+}
+
+static ssize_t qtaguid_ctrl_parse(const char *input, size_t count)
+{
+	char cmd;
+	ssize_t res;
+
+	CT_DEBUG("qtaguid: ctrl(%s): pid=%u tgid=%u uid=%u\n",
+		 input, current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+
+	cmd = input[0];
+	/* Collect params for commands */
+	switch (cmd) {
+	case 'd':
+		res = ctrl_cmd_delete(input);
+		break;
+
+	case 's':
+		res = ctrl_cmd_counter_set(input);
+		break;
+
+	case 't':
+		res = ctrl_cmd_tag(input);
+		break;
+
+	case 'u':
+		res = ctrl_cmd_untag(input);
+		break;
+
+	default:
+		res = -EINVAL;
+		goto err;
+	}
+	if (!res)
+		res = count;
+err:
+	CT_DEBUG("qtaguid: ctrl(%s): res=%zd\n", input, res);
+	return res;
+}
+
+#define MAX_QTAGUID_CTRL_INPUT_LEN 255
+static ssize_t qtaguid_ctrl_proc_write(struct file *file, const char __user *buffer,
+				   size_t count, loff_t *offp)
+{
+	char input_buf[MAX_QTAGUID_CTRL_INPUT_LEN];
+
+	if (unlikely(module_passive))
+		return count;
+
+	if (count >= MAX_QTAGUID_CTRL_INPUT_LEN)
+		return -EINVAL;
+
+	if (copy_from_user(input_buf, buffer, count))
+		return -EFAULT;
+
+	input_buf[count] = '\0';
+	return qtaguid_ctrl_parse(input_buf, count);
+}
+
+struct proc_print_info {
+	struct iface_stat *iface_entry;
+	int item_index;
+	tag_t tag; /* tag found by reading to tag_pos */
+	off_t tag_pos;
+	int tag_item_index;
+};
+
+static void pp_stats_header(struct seq_file *m)
+{
+	seq_puts(m,
+		 "idx iface acct_tag_hex uid_tag_int cnt_set "
+		 "rx_bytes rx_packets "
+		 "tx_bytes tx_packets "
+		 "rx_tcp_bytes rx_tcp_packets "
+		 "rx_udp_bytes rx_udp_packets "
+		 "rx_other_bytes rx_other_packets "
+		 "tx_tcp_bytes tx_tcp_packets "
+		 "tx_udp_bytes tx_udp_packets "
+		 "tx_other_bytes tx_other_packets\n");
+}
+
+static int pp_stats_line(struct seq_file *m, struct tag_stat *ts_entry,
+			 int cnt_set)
+{
+	struct data_counters *cnts;
+	tag_t tag = ts_entry->tn.tag;
+	uid_t stat_uid = get_uid_from_tag(tag);
+	struct proc_print_info *ppi = m->private;
+	/* Detailed tags are not available to everybody */
+	if (!can_read_other_uid_stats(make_kuid(&init_user_ns,stat_uid))) {
+		CT_DEBUG("qtaguid: stats line: "
+			 "%s 0x%llx %u: insufficient priv "
+			 "from pid=%u tgid=%u uid=%u stats.gid=%u\n",
+			 ppi->iface_entry->ifname,
+			 get_atag_from_tag(tag), stat_uid,
+			 current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()),
+			 from_kgid(&init_user_ns,xt_qtaguid_stats_file->gid));
+		return 0;
+	}
+	ppi->item_index++;
+	cnts = &ts_entry->counters;
+	seq_printf(m, "%d %s 0x%llx %u %u "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu "
+		"%llu %llu\n",
+		ppi->item_index,
+		ppi->iface_entry->ifname,
+		get_atag_from_tag(tag),
+		stat_uid,
+		cnt_set,
+		dc_sum_bytes(cnts, cnt_set, IFS_RX),
+		dc_sum_packets(cnts, cnt_set, IFS_RX),
+		dc_sum_bytes(cnts, cnt_set, IFS_TX),
+		dc_sum_packets(cnts, cnt_set, IFS_TX),
+		cnts->bpc[cnt_set][IFS_RX][IFS_TCP].bytes,
+		cnts->bpc[cnt_set][IFS_RX][IFS_TCP].packets,
+		cnts->bpc[cnt_set][IFS_RX][IFS_UDP].bytes,
+		cnts->bpc[cnt_set][IFS_RX][IFS_UDP].packets,
+		cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].bytes,
+		cnts->bpc[cnt_set][IFS_RX][IFS_PROTO_OTHER].packets,
+		cnts->bpc[cnt_set][IFS_TX][IFS_TCP].bytes,
+		cnts->bpc[cnt_set][IFS_TX][IFS_TCP].packets,
+		cnts->bpc[cnt_set][IFS_TX][IFS_UDP].bytes,
+		cnts->bpc[cnt_set][IFS_TX][IFS_UDP].packets,
+		cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].bytes,
+		cnts->bpc[cnt_set][IFS_TX][IFS_PROTO_OTHER].packets);
+	return seq_has_overflowed(m) ? -ENOSPC : 1;
+}
+
+static bool pp_sets(struct seq_file *m, struct tag_stat *ts_entry)
+{
+	int ret;
+	int counter_set;
+	for (counter_set = 0; counter_set < IFS_MAX_COUNTER_SETS;
+	     counter_set++) {
+		ret = pp_stats_line(m, ts_entry, counter_set);
+		if (ret < 0)
+			return false;
+	}
+	return true;
+}
+
+static int qtaguid_stats_proc_iface_stat_ptr_valid(struct iface_stat *ptr)
+{
+	struct iface_stat *iface_entry;
+
+	if (!ptr)
+		return false;
+
+	list_for_each_entry(iface_entry, &iface_stat_list, list)
+		if (iface_entry == ptr)
+			return true;
+	return false;
+}
+
+static void qtaguid_stats_proc_next_iface_entry(struct proc_print_info *ppi)
+{
+	spin_unlock_bh(&ppi->iface_entry->tag_stat_list_lock);
+	list_for_each_entry_continue(ppi->iface_entry, &iface_stat_list, list) {
+		spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+		return;
+	}
+	ppi->iface_entry = NULL;
+}
+
+static void *qtaguid_stats_proc_next(struct seq_file *m, void *v, loff_t *pos)
+{
+	struct proc_print_info *ppi = m->private;
+	struct tag_stat *ts_entry;
+	struct rb_node *node;
+
+	if (!v) {
+		pr_err("qtaguid: %s(): unexpected v: NULL\n", __func__);
+		return NULL;
+	}
+
+	(*pos)++;
+
+	if (!ppi->iface_entry || unlikely(module_passive))
+		return NULL;
+
+	if (v == SEQ_START_TOKEN)
+		node = rb_first(&ppi->iface_entry->tag_stat_tree);
+	else
+		node = rb_next(&((struct tag_stat *)v)->tn.node);
+
+	while (!node) {
+		qtaguid_stats_proc_next_iface_entry(ppi);
+		if (!ppi->iface_entry)
+			return NULL;
+		node = rb_first(&ppi->iface_entry->tag_stat_tree);
+	}
+
+	ts_entry = rb_entry(node, struct tag_stat, tn.node);
+	ppi->tag = ts_entry->tn.tag;
+	ppi->tag_pos = *pos;
+	ppi->tag_item_index = ppi->item_index;
+	return ts_entry;
+}
+
+static void *qtaguid_stats_proc_start(struct seq_file *m, loff_t *pos)
+{
+	struct proc_print_info *ppi = m->private;
+	struct tag_stat *ts_entry = NULL;
+
+	spin_lock_bh(&iface_stat_list_lock);
+
+	if (*pos == 0) {
+		ppi->item_index = 1;
+		ppi->tag_pos = 0;
+		if (list_empty(&iface_stat_list)) {
+			ppi->iface_entry = NULL;
+		} else {
+			ppi->iface_entry = list_first_entry(&iface_stat_list,
+							    struct iface_stat,
+							    list);
+			spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+		}
+		return SEQ_START_TOKEN;
+	}
+	if (!qtaguid_stats_proc_iface_stat_ptr_valid(ppi->iface_entry)) {
+		if (ppi->iface_entry) {
+			pr_err("qtaguid: %s(): iface_entry %p not found\n",
+			       __func__, ppi->iface_entry);
+			ppi->iface_entry = NULL;
+		}
+		return NULL;
+	}
+
+	spin_lock_bh(&ppi->iface_entry->tag_stat_list_lock);
+
+	if (!ppi->tag_pos) {
+		/* seq_read skipped first next call */
+		ts_entry = SEQ_START_TOKEN;
+	} else {
+		ts_entry = tag_stat_tree_search(
+				&ppi->iface_entry->tag_stat_tree, ppi->tag);
+		if (!ts_entry) {
+			pr_info("qtaguid: %s(): tag_stat.tag 0x%llx not found. Abort.\n",
+				__func__, ppi->tag);
+			return NULL;
+		}
+	}
+
+	if (*pos == ppi->tag_pos) { /* normal resume */
+		ppi->item_index = ppi->tag_item_index;
+	} else {
+		/* seq_read skipped a next call */
+		*pos = ppi->tag_pos;
+		ts_entry = qtaguid_stats_proc_next(m, ts_entry, pos);
+	}
+
+	return ts_entry;
+}
+
+static void qtaguid_stats_proc_stop(struct seq_file *m, void *v)
+{
+	struct proc_print_info *ppi = m->private;
+	if (ppi->iface_entry)
+		spin_unlock_bh(&ppi->iface_entry->tag_stat_list_lock);
+	spin_unlock_bh(&iface_stat_list_lock);
+}
+
+/*
+ * Procfs reader to get all tag stats using style "1)" as described in
+ * fs/proc/generic.c
+ * Groups all protocols tx/rx bytes.
+ */
+static int qtaguid_stats_proc_show(struct seq_file *m, void *v)
+{
+	struct tag_stat *ts_entry = v;
+
+	if (v == SEQ_START_TOKEN)
+		pp_stats_header(m);
+	else
+		pp_sets(m, ts_entry);
+
+	return 0;
+}
+
+/*------------------------------------------*/
+static int qtudev_open(struct inode *inode, struct file *file)
+{
+	struct uid_tag_data *utd_entry;
+	struct proc_qtu_data  *pqd_entry;
+	struct proc_qtu_data  *new_pqd_entry;
+	int res;
+	bool utd_entry_found;
+
+	if (unlikely(qtu_proc_handling_passive))
+		return 0;
+
+	DR_DEBUG("qtaguid: qtudev_open(): pid=%u tgid=%u uid=%u\n",
+		 current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+
+	spin_lock_bh(&uid_tag_data_tree_lock);
+
+	/* Look for existing uid data, or alloc one. */
+	utd_entry = get_uid_data(from_kuid(&init_user_ns, current_fsuid()), &utd_entry_found);
+	if (IS_ERR_OR_NULL(utd_entry)) {
+		res = PTR_ERR(utd_entry);
+		goto err_unlock;
+	}
+
+	/* Look for existing PID based proc_data */
+	pqd_entry = proc_qtu_data_tree_search(&proc_qtu_data_tree,
+					      current->tgid);
+	if (pqd_entry) {
+		pr_err("qtaguid: qtudev_open(): %u/%u %u "
+		       "%s already opened\n",
+		       current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()),
+		       QTU_DEV_NAME);
+		res = -EBUSY;
+		goto err_unlock_free_utd;
+	}
+
+	new_pqd_entry = kzalloc(sizeof(*new_pqd_entry), GFP_ATOMIC);
+	if (!new_pqd_entry) {
+		pr_err("qtaguid: qtudev_open(): %u/%u %u: "
+		       "proc data alloc failed\n",
+		       current->pid, current->tgid, from_kuid(&init_user_ns, current_fsuid()));
+		res = -ENOMEM;
+		goto err_unlock_free_utd;
+	}
+	new_pqd_entry->pid = current->tgid;
+	INIT_LIST_HEAD(&new_pqd_entry->sock_tag_list);
+	new_pqd_entry->parent_tag_data = utd_entry;
+	utd_entry->num_pqd++;
+
+	proc_qtu_data_tree_insert(new_pqd_entry,
+				  &proc_qtu_data_tree);
+
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	DR_DEBUG("qtaguid: tracking data for uid=%u in pqd=%p\n",
+		 from_kuid(&init_user_ns, current_fsuid()), new_pqd_entry);
+	file->private_data = new_pqd_entry;
+	return 0;
+
+err_unlock_free_utd:
+	if (!utd_entry_found) {
+		rb_erase(&utd_entry->node, &uid_tag_data_tree);
+		kfree(utd_entry);
+	}
+err_unlock:
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	return res;
+}
+
+static int qtudev_release(struct inode *inode, struct file *file)
+{
+	struct proc_qtu_data  *pqd_entry = file->private_data;
+	struct uid_tag_data  *utd_entry = pqd_entry->parent_tag_data;
+	struct sock_tag *st_entry;
+	struct rb_root st_to_free_tree = RB_ROOT;
+	struct list_head *entry, *next;
+	struct tag_ref *tr;
+
+	if (unlikely(qtu_proc_handling_passive))
+		return 0;
+
+	/*
+	 * Do not trust the current->pid, it might just be a kworker cleaning
+	 * up after a dead proc.
+	 */
+	DR_DEBUG("qtaguid: qtudev_release(): "
+		 "pid=%u tgid=%u uid=%u "
+		 "pqd_entry=%p->pid=%u utd_entry=%p->active_tags=%d\n",
+		 current->pid, current->tgid, pqd_entry->parent_tag_data->uid,
+		 pqd_entry, pqd_entry->pid, utd_entry,
+		 utd_entry->num_active_tags);
+
+	spin_lock_bh(&sock_tag_list_lock);
+	spin_lock_bh(&uid_tag_data_tree_lock);
+
+	list_for_each_safe(entry, next, &pqd_entry->sock_tag_list) {
+		st_entry = list_entry(entry, struct sock_tag, list);
+		DR_DEBUG("qtaguid: %s(): "
+			 "erase sock_tag=%p->sk=%p pid=%u tgid=%u uid=%u\n",
+			 __func__,
+			 st_entry, st_entry->sk,
+			 current->pid, current->tgid,
+			 pqd_entry->parent_tag_data->uid);
+
+		utd_entry = uid_tag_data_tree_search(
+			&uid_tag_data_tree,
+			get_uid_from_tag(st_entry->tag));
+		BUG_ON(IS_ERR_OR_NULL(utd_entry));
+		DR_DEBUG("qtaguid: %s(): "
+			 "looking for tag=0x%llx in utd_entry=%p\n", __func__,
+			 st_entry->tag, utd_entry);
+		tr = tag_ref_tree_search(&utd_entry->tag_ref_tree,
+					 st_entry->tag);
+		BUG_ON(!tr);
+		BUG_ON(tr->num_sock_tags <= 0);
+		tr->num_sock_tags--;
+		free_tag_ref_from_utd_entry(tr, utd_entry);
+
+		rb_erase(&st_entry->sock_node, &sock_tag_tree);
+		list_del(&st_entry->list);
+		/* Can't sockfd_put() within spinlock, do it later. */
+		sock_tag_tree_insert(st_entry, &st_to_free_tree);
+
+		/*
+		 * Try to free the utd_entry if no other proc_qtu_data is
+		 * using it (num_pqd is 0) and it doesn't have active tags
+		 * (num_active_tags is 0).
+		 */
+		put_utd_entry(utd_entry);
+	}
+
+	rb_erase(&pqd_entry->node, &proc_qtu_data_tree);
+	BUG_ON(pqd_entry->parent_tag_data->num_pqd < 1);
+	pqd_entry->parent_tag_data->num_pqd--;
+	put_utd_entry(pqd_entry->parent_tag_data);
+	kfree(pqd_entry);
+	file->private_data = NULL;
+
+	spin_unlock_bh(&uid_tag_data_tree_lock);
+	spin_unlock_bh(&sock_tag_list_lock);
+
+
+	sock_tag_tree_erase(&st_to_free_tree);
+
+	prdebug_full_state(0, "%s(): pid=%u tgid=%u", __func__,
+			   current->pid, current->tgid);
+	return 0;
+}
+
+/*------------------------------------------*/
+static const struct file_operations qtudev_fops = {
+	.owner = THIS_MODULE,
+	.open = qtudev_open,
+	.release = qtudev_release,
+};
+
+static struct miscdevice qtu_device = {
+	.minor = MISC_DYNAMIC_MINOR,
+	.name = QTU_DEV_NAME,
+	.fops = &qtudev_fops,
+	/* How sad it doesn't allow for defaults: .mode = S_IRUGO | S_IWUSR */
+};
+
+static const struct seq_operations proc_qtaguid_ctrl_seqops = {
+	.start = qtaguid_ctrl_proc_start,
+	.next = qtaguid_ctrl_proc_next,
+	.stop = qtaguid_ctrl_proc_stop,
+	.show = qtaguid_ctrl_proc_show,
+};
+
+static int proc_qtaguid_ctrl_open(struct inode *inode, struct file *file)
+{
+	return seq_open_private(file, &proc_qtaguid_ctrl_seqops,
+				sizeof(struct proc_ctrl_print_info));
+}
+
+static const struct file_operations proc_qtaguid_ctrl_fops = {
+	.open		= proc_qtaguid_ctrl_open,
+	.read		= seq_read,
+	.write		= qtaguid_ctrl_proc_write,
+	.llseek		= seq_lseek,
+	.release	= seq_release_private,
+};
+
+static const struct seq_operations proc_qtaguid_stats_seqops = {
+	.start = qtaguid_stats_proc_start,
+	.next = qtaguid_stats_proc_next,
+	.stop = qtaguid_stats_proc_stop,
+	.show = qtaguid_stats_proc_show,
+};
+
+static int proc_qtaguid_stats_open(struct inode *inode, struct file *file)
+{
+	return seq_open_private(file, &proc_qtaguid_stats_seqops,
+				sizeof(struct proc_print_info));
+}
+
+static const struct file_operations proc_qtaguid_stats_fops = {
+	.open		= proc_qtaguid_stats_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release_private,
+};
+
+/*------------------------------------------*/
+static int __init qtaguid_proc_register(struct proc_dir_entry **res_procdir)
+{
+	int ret;
+	*res_procdir = proc_mkdir(module_procdirname, init_net.proc_net);
+	if (!*res_procdir) {
+		pr_err("qtaguid: failed to create proc/.../xt_qtaguid\n");
+		ret = -ENOMEM;
+		goto no_dir;
+	}
+
+	xt_qtaguid_ctrl_file = proc_create_data("ctrl", proc_ctrl_perms,
+						*res_procdir,
+						&proc_qtaguid_ctrl_fops,
+						NULL);
+	if (!xt_qtaguid_ctrl_file) {
+		pr_err("qtaguid: failed to create xt_qtaguid/ctrl "
+			" file\n");
+		ret = -ENOMEM;
+		goto no_ctrl_entry;
+	}
+
+	xt_qtaguid_stats_file = proc_create_data("stats", proc_stats_perms,
+						 *res_procdir,
+						 &proc_qtaguid_stats_fops,
+						 NULL);
+	if (!xt_qtaguid_stats_file) {
+		pr_err("qtaguid: failed to create xt_qtaguid/stats "
+			"file\n");
+		ret = -ENOMEM;
+		goto no_stats_entry;
+	}
+	/*
+	 * TODO: add support counter hacking
+	 * xt_qtaguid_stats_file->write_proc = qtaguid_stats_proc_write;
+	 */
+	return 0;
+
+no_stats_entry:
+	remove_proc_entry("ctrl", *res_procdir);
+no_ctrl_entry:
+	remove_proc_entry("xt_qtaguid", NULL);
+no_dir:
+	return ret;
+}
+
+static struct xt_match qtaguid_mt_reg __read_mostly = {
+	/*
+	 * This module masquerades as the "owner" module so that iptables
+	 * tools can deal with it.
+	 */
+	.name       = "owner",
+	.revision   = 1,
+	.family     = NFPROTO_UNSPEC,
+	.match      = qtaguid_mt,
+	.matchsize  = sizeof(struct xt_qtaguid_match_info),
+	.me         = THIS_MODULE,
+};
+
+static int __init qtaguid_mt_init(void)
+{
+	if (qtaguid_proc_register(&xt_qtaguid_procdir)
+	    || iface_stat_init(xt_qtaguid_procdir)
+	    || xt_register_match(&qtaguid_mt_reg)
+	    || misc_register(&qtu_device))
+		return -1;
+	return 0;
+}
+
+/*
+ * TODO: allow unloading of the module.
+ * For now stats are permanent.
+ * Kconfig forces'y/n' and never an 'm'.
+ */
+
+module_init(qtaguid_mt_init);
+MODULE_AUTHOR("jpa <jpa@google.com>");
+MODULE_DESCRIPTION("Xtables: socket owner+tag matching and associated stats");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_owner");
+MODULE_ALIAS("ip6t_owner");
+MODULE_ALIAS("ipt_qtaguid");
+MODULE_ALIAS("ip6t_qtaguid");
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
new file mode 100644
index 0000000..6dc14a9
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -0,0 +1,352 @@
+/*
+ * Kernel iptables module to track stats for packets based on user tags.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_INTERNAL_H__
+#define __XT_QTAGUID_INTERNAL_H__
+
+#include <linux/types.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock_types.h>
+#include <linux/workqueue.h>
+
+/* Iface handling */
+#define IDEBUG_MASK (1<<0)
+/* Iptable Matching. Per packet. */
+#define MDEBUG_MASK (1<<1)
+/* Red-black tree handling. Per packet. */
+#define RDEBUG_MASK (1<<2)
+/* procfs ctrl/stats handling */
+#define CDEBUG_MASK (1<<3)
+/* dev and resource tracking */
+#define DDEBUG_MASK (1<<4)
+
+/* E.g (IDEBUG_MASK | CDEBUG_MASK | DDEBUG_MASK) */
+#define DEFAULT_DEBUG_MASK 0
+
+/*
+ * (Un)Define these *DEBUG to compile out/in the pr_debug calls.
+ * All undef: text size ~ 0x3030; all def: ~ 0x4404.
+ */
+#define IDEBUG
+#define MDEBUG
+#define RDEBUG
+#define CDEBUG
+#define DDEBUG
+
+#define MSK_DEBUG(mask, ...) do {                           \
+		if (unlikely(qtaguid_debug_mask & (mask)))  \
+			pr_debug(__VA_ARGS__);              \
+	} while (0)
+#ifdef IDEBUG
+#define IF_DEBUG(...) MSK_DEBUG(IDEBUG_MASK, __VA_ARGS__)
+#else
+#define IF_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef MDEBUG
+#define MT_DEBUG(...) MSK_DEBUG(MDEBUG_MASK, __VA_ARGS__)
+#else
+#define MT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef RDEBUG
+#define RB_DEBUG(...) MSK_DEBUG(RDEBUG_MASK, __VA_ARGS__)
+#else
+#define RB_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef CDEBUG
+#define CT_DEBUG(...) MSK_DEBUG(CDEBUG_MASK, __VA_ARGS__)
+#else
+#define CT_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+#ifdef DDEBUG
+#define DR_DEBUG(...) MSK_DEBUG(DDEBUG_MASK, __VA_ARGS__)
+#else
+#define DR_DEBUG(...) no_printk(__VA_ARGS__)
+#endif
+
+extern uint qtaguid_debug_mask;
+
+/*---------------------------------------------------------------------------*/
+/*
+ * Tags:
+ *
+ * They represent what the data usage counters will be tracked against.
+ * By default a tag is just based on the UID.
+ * The UID is used as the base for policing, and can not be ignored.
+ * So a tag will always at least represent a UID (uid_tag).
+ *
+ * A tag can be augmented with an "accounting tag" which is associated
+ * with a UID.
+ * User space can set the acct_tag portion of the tag which is then used
+ * with sockets: all data belonging to that socket will be counted against the
+ * tag. The policing is then based on the tag's uid_tag portion,
+ * and stats are collected for the acct_tag portion separately.
+ *
+ * There could be
+ * a:  {acct_tag=1, uid_tag=10003}
+ * b:  {acct_tag=2, uid_tag=10003}
+ * c:  {acct_tag=3, uid_tag=10003}
+ * d:  {acct_tag=0, uid_tag=10003}
+ * a, b, and c represent tags associated with specific sockets.
+ * d is for the totals for that uid, including all untagged traffic.
+ * Typically d is used with policing/quota rules.
+ *
+ * We want tag_t big enough to distinguish uid_t and acct_tag.
+ * It might become a struct if needed.
+ * Nothing should be using it as an int.
+ */
+typedef uint64_t tag_t;  /* Only used via accessors */
+
+#define TAG_UID_MASK 0xFFFFFFFFULL
+#define TAG_ACCT_MASK (~0xFFFFFFFFULL)
+
+static inline int tag_compare(tag_t t1, tag_t t2)
+{
+	return t1 < t2 ? -1 : t1 == t2 ? 0 : 1;
+}
+
+static inline tag_t combine_atag_with_uid(tag_t acct_tag, uid_t uid)
+{
+	return acct_tag | uid;
+}
+static inline tag_t make_tag_from_uid(uid_t uid)
+{
+	return uid;
+}
+static inline uid_t get_uid_from_tag(tag_t tag)
+{
+	return tag & TAG_UID_MASK;
+}
+static inline tag_t get_utag_from_tag(tag_t tag)
+{
+	return tag & TAG_UID_MASK;
+}
+static inline tag_t get_atag_from_tag(tag_t tag)
+{
+	return tag & TAG_ACCT_MASK;
+}
+
+static inline bool valid_atag(tag_t tag)
+{
+	return !(tag & TAG_UID_MASK);
+}
+static inline tag_t make_atag_from_value(uint32_t value)
+{
+	return (uint64_t)value << 32;
+}
+/*---------------------------------------------------------------------------*/
+
+/*
+ * Maximum number of socket tags that a UID is allowed to have active.
+ * Multiple processes belonging to the same UID contribute towards this limit.
+ * Special UIDs that can impersonate a UID also contribute (e.g. download
+ * manager, ...)
+ */
+#define DEFAULT_MAX_SOCK_TAGS 1024
+
+/*
+ * For now we only track 2 sets of counters.
+ * The default set is 0.
+ * Userspace can activate another set for a given uid being tracked.
+ */
+#define IFS_MAX_COUNTER_SETS 2
+
+enum ifs_tx_rx {
+	IFS_TX,
+	IFS_RX,
+	IFS_MAX_DIRECTIONS
+};
+
+/* For now, TCP, UDP, the rest */
+enum ifs_proto {
+	IFS_TCP,
+	IFS_UDP,
+	IFS_PROTO_OTHER,
+	IFS_MAX_PROTOS
+};
+
+struct byte_packet_counters {
+	uint64_t bytes;
+	uint64_t packets;
+};
+
+struct data_counters {
+	struct byte_packet_counters bpc[IFS_MAX_COUNTER_SETS][IFS_MAX_DIRECTIONS][IFS_MAX_PROTOS];
+};
+
+static inline uint64_t dc_sum_bytes(struct data_counters *counters,
+				    int set,
+				    enum ifs_tx_rx direction)
+{
+	return counters->bpc[set][direction][IFS_TCP].bytes
+		+ counters->bpc[set][direction][IFS_UDP].bytes
+		+ counters->bpc[set][direction][IFS_PROTO_OTHER].bytes;
+}
+
+static inline uint64_t dc_sum_packets(struct data_counters *counters,
+				      int set,
+				      enum ifs_tx_rx direction)
+{
+	return counters->bpc[set][direction][IFS_TCP].packets
+		+ counters->bpc[set][direction][IFS_UDP].packets
+		+ counters->bpc[set][direction][IFS_PROTO_OTHER].packets;
+}
+
+
+/* Generic X based nodes used as a base for rb_tree ops */
+struct tag_node {
+	struct rb_node node;
+	tag_t tag;
+};
+
+struct tag_stat {
+	struct tag_node tn;
+	struct data_counters counters;
+	/*
+	 * If this tag is acct_tag based, we need to count against the
+	 * matching parent uid_tag.
+	 */
+	struct data_counters *parent_counters;
+};
+
+struct iface_stat {
+	struct list_head list;  /* in iface_stat_list */
+	char *ifname;
+	bool active;
+	/* net_dev is only valid for active iface_stat */
+	struct net_device *net_dev;
+
+	struct byte_packet_counters totals_via_dev[IFS_MAX_DIRECTIONS];
+	struct data_counters totals_via_skb;
+	/*
+	 * We keep the last_known, because some devices reset their counters
+	 * just before NETDEV_UP, while some will reset just before
+	 * NETDEV_REGISTER (which is more normal).
+	 * So now, if the device didn't do a NETDEV_UNREGISTER and we see
+	 * its current dev stats smaller that what was previously known, we
+	 * assume an UNREGISTER and just use the last_known.
+	 */
+	struct byte_packet_counters last_known[IFS_MAX_DIRECTIONS];
+	/* last_known is usable when last_known_valid is true */
+	bool last_known_valid;
+
+	struct proc_dir_entry *proc_ptr;
+
+	struct rb_root tag_stat_tree;
+	spinlock_t tag_stat_list_lock;
+};
+
+/* This is needed to create proc_dir_entries from atomic context. */
+struct iface_stat_work {
+	struct work_struct iface_work;
+	struct iface_stat *iface_entry;
+};
+
+/*
+ * Track tag that this socket is transferring data for, and not necessarily
+ * the uid that owns the socket.
+ * This is the tag against which tag_stat.counters will be billed.
+ * These structs need to be looked up by sock and pid.
+ */
+struct sock_tag {
+	struct rb_node sock_node;
+	struct sock *sk;  /* Only used as a number, never dereferenced */
+	/* The socket is needed for sockfd_put() */
+	struct socket *socket;
+	/* Used to associate with a given pid */
+	struct list_head list;   /* in proc_qtu_data.sock_tag_list */
+	pid_t pid;
+
+	tag_t tag;
+};
+
+struct qtaguid_event_counts {
+	/* Various successful events */
+	atomic64_t sockets_tagged;
+	atomic64_t sockets_untagged;
+	atomic64_t counter_set_changes;
+	atomic64_t delete_cmds;
+	atomic64_t iface_events;  /* Number of NETDEV_* events handled */
+
+	atomic64_t match_calls;   /* Number of times iptables called mt */
+	/* Number of times iptables called mt from pre or post routing hooks */
+	atomic64_t match_calls_prepost;
+	/*
+	 * match_found_sk_*: numbers related to the netfilter matching
+	 * function finding a sock for the sk_buff.
+	 * Total skbs processed is sum(match_found*).
+	 */
+	atomic64_t match_found_sk;   /* An sk was already in the sk_buff. */
+	/* The connection tracker had or didn't have the sk. */
+	atomic64_t match_found_sk_in_ct;
+	atomic64_t match_found_no_sk_in_ct;
+	/*
+	 * No sk could be found. No apparent owner. Could happen with
+	 * unsolicited traffic.
+	 */
+	atomic64_t match_no_sk;
+	/*
+	 * The file ptr in the sk_socket wasn't there.
+	 * This might happen for traffic while the socket is being closed.
+	 */
+	atomic64_t match_no_sk_file;
+};
+
+/* Track the set active_set for the given tag. */
+struct tag_counter_set {
+	struct tag_node tn;
+	int active_set;
+};
+
+/*----------------------------------------------*/
+/*
+ * The qtu uid data is used to track resources that are created directly or
+ * indirectly by processes (uid tracked).
+ * It is shared by the processes with the same uid.
+ * Some of the resource will be counted to prevent further rogue allocations,
+ * some will need freeing once the owner process (uid) exits.
+ */
+struct uid_tag_data {
+	struct rb_node node;
+	uid_t uid;
+
+	/*
+	 * For the uid, how many accounting tags have been set.
+	 */
+	int num_active_tags;
+	/* Track the number of proc_qtu_data that reference it */
+	int num_pqd;
+	struct rb_root tag_ref_tree;
+	/* No tag_node_tree_lock; use uid_tag_data_tree_lock */
+};
+
+struct tag_ref {
+	struct tag_node tn;
+
+	/*
+	 * This tracks the number of active sockets that have a tag on them
+	 * which matches this tag_ref.tn.tag.
+	 * A tag ref can live on after the sockets are untagged.
+	 * A tag ref can only be removed during a tag delete command.
+	 */
+	int num_sock_tags;
+};
+
+struct proc_qtu_data {
+	struct rb_node node;
+	pid_t pid;
+
+	struct uid_tag_data *parent_tag_data;
+
+	/* Tracks the sock_tags that need freeing upon this proc's death */
+	struct list_head sock_tag_list;
+	/* No spinlock_t sock_tag_list_lock; use the global one. */
+};
+
+/*----------------------------------------------*/
+#endif  /* ifndef __XT_QTAGUID_INTERNAL_H__ */
diff --git a/net/netfilter/xt_qtaguid_print.c b/net/netfilter/xt_qtaguid_print.c
new file mode 100644
index 0000000..f6a00a3
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.c
@@ -0,0 +1,566 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Most of the functions in this file just waste time if DEBUG is not defined.
+ * The matching xt_qtaguid_print.h will static inline empty funcs if the needed
+ * debug flags ore not defined.
+ * Those funcs that fail to allocate memory will panic as there is no need to
+ * hobble allong just pretending to do the requested work.
+ */
+
+#define DEBUG
+
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/net.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+
+
+#include "xt_qtaguid_internal.h"
+#include "xt_qtaguid_print.h"
+
+#ifdef DDEBUG
+
+static void _bug_on_err_or_null(void *ptr)
+{
+	if (IS_ERR_OR_NULL(ptr)) {
+		pr_err("qtaguid: kmalloc failed\n");
+		BUG();
+	}
+}
+
+char *pp_tag_t(tag_t *tag)
+{
+	char *res;
+
+	if (!tag)
+		res = kasprintf(GFP_ATOMIC, "tag_t@null{}");
+	else
+		res = kasprintf(GFP_ATOMIC,
+				"tag_t@%p{tag=0x%llx, uid=%u}",
+				tag, *tag, get_uid_from_tag(*tag));
+	_bug_on_err_or_null(res);
+	return res;
+}
+
+char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+	char *res;
+
+	if (!dc)
+		res = kasprintf(GFP_ATOMIC, "data_counters@null{}");
+	else if (showValues)
+		res = kasprintf(
+			GFP_ATOMIC, "data_counters@%p{"
+			"set0{"
+			"rx{"
+			"tcp{b=%llu, p=%llu}, "
+			"udp{b=%llu, p=%llu},"
+			"other{b=%llu, p=%llu}}, "
+			"tx{"
+			"tcp{b=%llu, p=%llu}, "
+			"udp{b=%llu, p=%llu},"
+			"other{b=%llu, p=%llu}}}, "
+			"set1{"
+			"rx{"
+			"tcp{b=%llu, p=%llu}, "
+			"udp{b=%llu, p=%llu},"
+			"other{b=%llu, p=%llu}}, "
+			"tx{"
+			"tcp{b=%llu, p=%llu}, "
+			"udp{b=%llu, p=%llu},"
+			"other{b=%llu, p=%llu}}}}",
+			dc,
+			dc->bpc[0][IFS_RX][IFS_TCP].bytes,
+			dc->bpc[0][IFS_RX][IFS_TCP].packets,
+			dc->bpc[0][IFS_RX][IFS_UDP].bytes,
+			dc->bpc[0][IFS_RX][IFS_UDP].packets,
+			dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].bytes,
+			dc->bpc[0][IFS_RX][IFS_PROTO_OTHER].packets,
+			dc->bpc[0][IFS_TX][IFS_TCP].bytes,
+			dc->bpc[0][IFS_TX][IFS_TCP].packets,
+			dc->bpc[0][IFS_TX][IFS_UDP].bytes,
+			dc->bpc[0][IFS_TX][IFS_UDP].packets,
+			dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].bytes,
+			dc->bpc[0][IFS_TX][IFS_PROTO_OTHER].packets,
+			dc->bpc[1][IFS_RX][IFS_TCP].bytes,
+			dc->bpc[1][IFS_RX][IFS_TCP].packets,
+			dc->bpc[1][IFS_RX][IFS_UDP].bytes,
+			dc->bpc[1][IFS_RX][IFS_UDP].packets,
+			dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].bytes,
+			dc->bpc[1][IFS_RX][IFS_PROTO_OTHER].packets,
+			dc->bpc[1][IFS_TX][IFS_TCP].bytes,
+			dc->bpc[1][IFS_TX][IFS_TCP].packets,
+			dc->bpc[1][IFS_TX][IFS_UDP].bytes,
+			dc->bpc[1][IFS_TX][IFS_UDP].packets,
+			dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].bytes,
+			dc->bpc[1][IFS_TX][IFS_PROTO_OTHER].packets);
+	else
+		res = kasprintf(GFP_ATOMIC, "data_counters@%p{...}", dc);
+	_bug_on_err_or_null(res);
+	return res;
+}
+
+char *pp_tag_node(struct tag_node *tn)
+{
+	char *tag_str;
+	char *res;
+
+	if (!tn) {
+		res = kasprintf(GFP_ATOMIC, "tag_node@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	tag_str = pp_tag_t(&tn->tag);
+	res = kasprintf(GFP_ATOMIC,
+			"tag_node@%p{tag=%s}",
+			tn, tag_str);
+	_bug_on_err_or_null(res);
+	kfree(tag_str);
+	return res;
+}
+
+char *pp_tag_ref(struct tag_ref *tr)
+{
+	char *tn_str;
+	char *res;
+
+	if (!tr) {
+		res = kasprintf(GFP_ATOMIC, "tag_ref@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	tn_str = pp_tag_node(&tr->tn);
+	res = kasprintf(GFP_ATOMIC,
+			"tag_ref@%p{%s, num_sock_tags=%d}",
+			tr, tn_str, tr->num_sock_tags);
+	_bug_on_err_or_null(res);
+	kfree(tn_str);
+	return res;
+}
+
+char *pp_tag_stat(struct tag_stat *ts)
+{
+	char *tn_str;
+	char *counters_str;
+	char *parent_counters_str;
+	char *res;
+
+	if (!ts) {
+		res = kasprintf(GFP_ATOMIC, "tag_stat@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	tn_str = pp_tag_node(&ts->tn);
+	counters_str = pp_data_counters(&ts->counters, true);
+	parent_counters_str = pp_data_counters(ts->parent_counters, false);
+	res = kasprintf(GFP_ATOMIC,
+			"tag_stat@%p{%s, counters=%s, parent_counters=%s}",
+			ts, tn_str, counters_str, parent_counters_str);
+	_bug_on_err_or_null(res);
+	kfree(tn_str);
+	kfree(counters_str);
+	kfree(parent_counters_str);
+	return res;
+}
+
+char *pp_iface_stat(struct iface_stat *is)
+{
+	char *res;
+	if (!is) {
+		res = kasprintf(GFP_ATOMIC, "iface_stat@null{}");
+	} else {
+		struct data_counters *cnts = &is->totals_via_skb;
+		res = kasprintf(GFP_ATOMIC, "iface_stat@%p{"
+				"list=list_head{...}, "
+				"ifname=%s, "
+				"total_dev={rx={bytes=%llu, "
+				"packets=%llu}, "
+				"tx={bytes=%llu, "
+				"packets=%llu}}, "
+				"total_skb={rx={bytes=%llu, "
+				"packets=%llu}, "
+				"tx={bytes=%llu, "
+				"packets=%llu}}, "
+				"last_known_valid=%d, "
+				"last_known={rx={bytes=%llu, "
+				"packets=%llu}, "
+				"tx={bytes=%llu, "
+				"packets=%llu}}, "
+				"active=%d, "
+				"net_dev=%p, "
+				"proc_ptr=%p, "
+				"tag_stat_tree=rb_root{...}}",
+				is,
+				is->ifname,
+				is->totals_via_dev[IFS_RX].bytes,
+				is->totals_via_dev[IFS_RX].packets,
+				is->totals_via_dev[IFS_TX].bytes,
+				is->totals_via_dev[IFS_TX].packets,
+				dc_sum_bytes(cnts, 0, IFS_RX),
+				dc_sum_packets(cnts, 0, IFS_RX),
+				dc_sum_bytes(cnts, 0, IFS_TX),
+				dc_sum_packets(cnts, 0, IFS_TX),
+				is->last_known_valid,
+				is->last_known[IFS_RX].bytes,
+				is->last_known[IFS_RX].packets,
+				is->last_known[IFS_TX].bytes,
+				is->last_known[IFS_TX].packets,
+				is->active,
+				is->net_dev,
+				is->proc_ptr);
+	}
+	_bug_on_err_or_null(res);
+	return res;
+}
+
+char *pp_sock_tag(struct sock_tag *st)
+{
+	char *tag_str;
+	char *res;
+
+	if (!st) {
+		res = kasprintf(GFP_ATOMIC, "sock_tag@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	tag_str = pp_tag_t(&st->tag);
+	res = kasprintf(GFP_ATOMIC, "sock_tag@%p{"
+			"sock_node=rb_node{...}, "
+			"sk=%p socket=%p (f_count=%lu), list=list_head{...}, "
+			"pid=%u, tag=%s}",
+			st, st->sk, st->socket, atomic_long_read(
+				&st->socket->file->f_count),
+			st->pid, tag_str);
+	_bug_on_err_or_null(res);
+	kfree(tag_str);
+	return res;
+}
+
+char *pp_uid_tag_data(struct uid_tag_data *utd)
+{
+	char *res;
+
+	if (!utd)
+		res = kasprintf(GFP_ATOMIC, "uid_tag_data@null{}");
+	else
+		res = kasprintf(GFP_ATOMIC, "uid_tag_data@%p{"
+				"uid=%u, num_active_acct_tags=%d, "
+				"num_pqd=%d, "
+				"tag_node_tree=rb_root{...}, "
+				"proc_qtu_data_tree=rb_root{...}}",
+				utd, utd->uid,
+				utd->num_active_tags, utd->num_pqd);
+	_bug_on_err_or_null(res);
+	return res;
+}
+
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+	char *parent_tag_data_str;
+	char *res;
+
+	if (!pqd) {
+		res = kasprintf(GFP_ATOMIC, "proc_qtu_data@null{}");
+		_bug_on_err_or_null(res);
+		return res;
+	}
+	parent_tag_data_str = pp_uid_tag_data(pqd->parent_tag_data);
+	res = kasprintf(GFP_ATOMIC, "proc_qtu_data@%p{"
+			"node=rb_node{...}, pid=%u, "
+			"parent_tag_data=%s, "
+			"sock_tag_list=list_head{...}}",
+			pqd, pqd->pid, parent_tag_data_str
+		);
+	_bug_on_err_or_null(res);
+	kfree(parent_tag_data_str);
+	return res;
+}
+
+/*------------------------------------------*/
+void prdebug_sock_tag_tree(int indent_level,
+			   struct rb_root *sock_tag_tree)
+{
+	struct rb_node *node;
+	struct sock_tag *sock_tag_entry;
+	char *str;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(sock_tag_tree)) {
+		str = "sock_tag_tree=rb_root{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "sock_tag_tree=rb_root{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(sock_tag_tree);
+	     node;
+	     node = rb_next(node)) {
+		sock_tag_entry = rb_entry(node, struct sock_tag, sock_node);
+		str = pp_sock_tag(sock_tag_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+		kfree(str);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_sock_tag_list(int indent_level,
+			   struct list_head *sock_tag_list)
+{
+	struct sock_tag *sock_tag_entry;
+	char *str;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (list_empty(sock_tag_list)) {
+		str = "sock_tag_list=list_head{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "sock_tag_list=list_head{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	list_for_each_entry(sock_tag_entry, sock_tag_list, list) {
+		str = pp_sock_tag(sock_tag_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+		kfree(str);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_proc_qtu_data_tree(int indent_level,
+				struct rb_root *proc_qtu_data_tree)
+{
+	char *str;
+	struct rb_node *node;
+	struct proc_qtu_data *proc_qtu_data_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(proc_qtu_data_tree)) {
+		str = "proc_qtu_data_tree=rb_root{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "proc_qtu_data_tree=rb_root{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(proc_qtu_data_tree);
+	     node;
+	     node = rb_next(node)) {
+		proc_qtu_data_entry = rb_entry(node,
+					       struct proc_qtu_data,
+					       node);
+		str = pp_proc_qtu_data(proc_qtu_data_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+			 str);
+		kfree(str);
+		indent_level++;
+		prdebug_sock_tag_list(indent_level,
+				      &proc_qtu_data_entry->sock_tag_list);
+		indent_level--;
+
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+	char *str;
+	struct rb_node *node;
+	struct tag_ref *tag_ref_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(tag_ref_tree)) {
+		str = "tag_ref_tree{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "tag_ref_tree{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(tag_ref_tree);
+	     node;
+	     node = rb_next(node)) {
+		tag_ref_entry = rb_entry(node,
+					 struct tag_ref,
+					 tn.node);
+		str = pp_tag_ref(tag_ref_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level,
+			 str);
+		kfree(str);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_uid_tag_data_tree(int indent_level,
+			       struct rb_root *uid_tag_data_tree)
+{
+	char *str;
+	struct rb_node *node;
+	struct uid_tag_data *uid_tag_data_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(uid_tag_data_tree)) {
+		str = "uid_tag_data_tree=rb_root{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "uid_tag_data_tree=rb_root{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(uid_tag_data_tree);
+	     node;
+	     node = rb_next(node)) {
+		uid_tag_data_entry = rb_entry(node, struct uid_tag_data,
+					      node);
+		str = pp_uid_tag_data(uid_tag_data_entry);
+		pr_debug("%*d: %s,\n", indent_level*2, indent_level, str);
+		kfree(str);
+		if (!RB_EMPTY_ROOT(&uid_tag_data_entry->tag_ref_tree)) {
+			indent_level++;
+			prdebug_tag_ref_tree(indent_level,
+					     &uid_tag_data_entry->tag_ref_tree);
+			indent_level--;
+		}
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_tag_stat_tree(int indent_level,
+				  struct rb_root *tag_stat_tree)
+{
+	char *str;
+	struct rb_node *node;
+	struct tag_stat *ts_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (RB_EMPTY_ROOT(tag_stat_tree)) {
+		str = "tag_stat_tree{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "tag_stat_tree{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	for (node = rb_first(tag_stat_tree);
+	     node;
+	     node = rb_next(node)) {
+		ts_entry = rb_entry(node, struct tag_stat, tn.node);
+		str = pp_tag_stat(ts_entry);
+		pr_debug("%*d: %s\n", indent_level*2, indent_level,
+			 str);
+		kfree(str);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+void prdebug_iface_stat_list(int indent_level,
+			     struct list_head *iface_stat_list)
+{
+	char *str;
+	struct iface_stat *iface_entry;
+
+	if (!unlikely(qtaguid_debug_mask & DDEBUG_MASK))
+		return;
+
+	if (list_empty(iface_stat_list)) {
+		str = "iface_stat_list=list_head{}";
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		return;
+	}
+
+	str = "iface_stat_list=list_head{";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+	indent_level++;
+	list_for_each_entry(iface_entry, iface_stat_list, list) {
+		str = pp_iface_stat(iface_entry);
+		pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+		kfree(str);
+
+		spin_lock_bh(&iface_entry->tag_stat_list_lock);
+		if (!RB_EMPTY_ROOT(&iface_entry->tag_stat_tree)) {
+			indent_level++;
+			prdebug_tag_stat_tree(indent_level,
+					      &iface_entry->tag_stat_tree);
+			indent_level--;
+		}
+		spin_unlock_bh(&iface_entry->tag_stat_list_lock);
+	}
+	indent_level--;
+	str = "}";
+	pr_debug("%*d: %s\n", indent_level*2, indent_level, str);
+}
+
+#endif  /* ifdef DDEBUG */
+/*------------------------------------------*/
+static const char * const netdev_event_strings[] = {
+	"netdev_unknown",
+	"NETDEV_UP",
+	"NETDEV_DOWN",
+	"NETDEV_REBOOT",
+	"NETDEV_CHANGE",
+	"NETDEV_REGISTER",
+	"NETDEV_UNREGISTER",
+	"NETDEV_CHANGEMTU",
+	"NETDEV_CHANGEADDR",
+	"NETDEV_GOING_DOWN",
+	"NETDEV_CHANGENAME",
+	"NETDEV_FEAT_CHANGE",
+	"NETDEV_BONDING_FAILOVER",
+	"NETDEV_PRE_UP",
+	"NETDEV_PRE_TYPE_CHANGE",
+	"NETDEV_POST_TYPE_CHANGE",
+	"NETDEV_POST_INIT",
+	"NETDEV_UNREGISTER_BATCH",
+	"NETDEV_RELEASE",
+	"NETDEV_NOTIFY_PEERS",
+	"NETDEV_JOIN",
+};
+
+const char *netdev_evt_str(int netdev_event)
+{
+	if (netdev_event < 0
+	    || netdev_event >= ARRAY_SIZE(netdev_event_strings))
+		return "bad event num";
+	return netdev_event_strings[netdev_event];
+}
diff --git a/net/netfilter/xt_qtaguid_print.h b/net/netfilter/xt_qtaguid_print.h
new file mode 100644
index 0000000..b63871a
--- /dev/null
+++ b/net/netfilter/xt_qtaguid_print.h
@@ -0,0 +1,120 @@
+/*
+ * Pretty printing Support for iptables xt_qtaguid module.
+ *
+ * (C) 2011 Google, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __XT_QTAGUID_PRINT_H__
+#define __XT_QTAGUID_PRINT_H__
+
+#include "xt_qtaguid_internal.h"
+
+#ifdef DDEBUG
+
+char *pp_tag_t(tag_t *tag);
+char *pp_data_counters(struct data_counters *dc, bool showValues);
+char *pp_tag_node(struct tag_node *tn);
+char *pp_tag_ref(struct tag_ref *tr);
+char *pp_tag_stat(struct tag_stat *ts);
+char *pp_iface_stat(struct iface_stat *is);
+char *pp_sock_tag(struct sock_tag *st);
+char *pp_uid_tag_data(struct uid_tag_data *qtd);
+char *pp_proc_qtu_data(struct proc_qtu_data *pqd);
+
+/*------------------------------------------*/
+void prdebug_sock_tag_list(int indent_level,
+			   struct list_head *sock_tag_list);
+void prdebug_sock_tag_tree(int indent_level,
+			   struct rb_root *sock_tag_tree);
+void prdebug_proc_qtu_data_tree(int indent_level,
+				struct rb_root *proc_qtu_data_tree);
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree);
+void prdebug_uid_tag_data_tree(int indent_level,
+			       struct rb_root *uid_tag_data_tree);
+void prdebug_tag_stat_tree(int indent_level,
+			   struct rb_root *tag_stat_tree);
+void prdebug_iface_stat_list(int indent_level,
+			     struct list_head *iface_stat_list);
+
+#else
+
+/*------------------------------------------*/
+static inline char *pp_tag_t(tag_t *tag)
+{
+	return NULL;
+}
+static inline char *pp_data_counters(struct data_counters *dc, bool showValues)
+{
+	return NULL;
+}
+static inline char *pp_tag_node(struct tag_node *tn)
+{
+	return NULL;
+}
+static inline char *pp_tag_ref(struct tag_ref *tr)
+{
+	return NULL;
+}
+static inline char *pp_tag_stat(struct tag_stat *ts)
+{
+	return NULL;
+}
+static inline char *pp_iface_stat(struct iface_stat *is)
+{
+	return NULL;
+}
+static inline char *pp_sock_tag(struct sock_tag *st)
+{
+	return NULL;
+}
+static inline char *pp_uid_tag_data(struct uid_tag_data *qtd)
+{
+	return NULL;
+}
+static inline char *pp_proc_qtu_data(struct proc_qtu_data *pqd)
+{
+	return NULL;
+}
+
+/*------------------------------------------*/
+static inline
+void prdebug_sock_tag_list(int indent_level,
+			   struct list_head *sock_tag_list)
+{
+}
+static inline
+void prdebug_sock_tag_tree(int indent_level,
+			   struct rb_root *sock_tag_tree)
+{
+}
+static inline
+void prdebug_proc_qtu_data_tree(int indent_level,
+				struct rb_root *proc_qtu_data_tree)
+{
+}
+static inline
+void prdebug_tag_ref_tree(int indent_level, struct rb_root *tag_ref_tree)
+{
+}
+static inline
+void prdebug_uid_tag_data_tree(int indent_level,
+			       struct rb_root *uid_tag_data_tree)
+{
+}
+static inline
+void prdebug_tag_stat_tree(int indent_level,
+			   struct rb_root *tag_stat_tree)
+{
+}
+static inline
+void prdebug_iface_stat_list(int indent_level,
+			     struct list_head *iface_stat_list)
+{
+}
+#endif
+/*------------------------------------------*/
+const char *netdev_evt_str(int netdev_event);
+#endif  /* ifndef __XT_QTAGUID_PRINT_H__ */
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 0000000..834594a
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,401 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ * 	netfilter module to enforce network quotas
+ * 	Sam Johnston <samj@samj.net>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License; either
+ *	version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* For compatibility, these definitions are copied from the
+ * deprecated header file <linux/netfilter_ipv4/ipt_ULOG.h> */
+#define ULOG_MAC_LEN	80
+#define ULOG_PREFIX_LEN	32
+
+/* Format of the ULOG packets passed through netlink */
+typedef struct ulog_packet_msg {
+	unsigned long mark;
+	long timestamp_sec;
+	long timestamp_usec;
+	unsigned int hook;
+	char indev_name[IFNAMSIZ];
+	char outdev_name[IFNAMSIZ];
+	size_t data_len;
+	char prefix[ULOG_PREFIX_LEN];
+	unsigned char mac_len;
+	unsigned char mac[ULOG_MAC_LEN];
+	unsigned char payload[0];
+} ulog_packet_msg_t;
+#endif
+
+/**
+ * @lock:	lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+	u_int64_t quota;
+	spinlock_t lock;
+	struct list_head list;
+	atomic_t ref;
+	char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+	struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+		 "Event number for NETLINK_NFLOG message. 0 disables log."
+		 "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static kuid_t quota_list_uid = KUIDT_INIT(0);
+static kgid_t quota_list_gid = KGIDT_INIT(0);
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+		       const struct sk_buff *skb,
+		       const struct net_device *in,
+		       const struct net_device *out,
+		       const char *prefix)
+{
+	ulog_packet_msg_t *pm;
+	struct sk_buff *log_skb;
+	size_t size;
+	struct nlmsghdr *nlh;
+
+	if (!qlog_nl_event)
+		return;
+
+	size = NLMSG_SPACE(sizeof(*pm));
+	size = max(size, (size_t)NLMSG_GOODSIZE);
+	log_skb = alloc_skb(size, GFP_ATOMIC);
+	if (!log_skb) {
+		pr_err("xt_quota2: cannot alloc skb for logging\n");
+		return;
+	}
+
+	nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+			sizeof(*pm), 0);
+	if (!nlh) {
+		pr_err("xt_quota2: nlmsg_put failed\n");
+		kfree_skb(log_skb);
+		return;
+	}
+	pm = nlmsg_data(nlh);
+	if (skb->tstamp.tv64 == 0)
+		__net_timestamp((struct sk_buff *)skb);
+	pm->data_len = 0;
+	pm->hook = hooknum;
+	if (prefix != NULL)
+		strlcpy(pm->prefix, prefix, sizeof(pm->prefix));
+	else
+		*(pm->prefix) = '\0';
+	if (in)
+		strlcpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+	else
+		pm->indev_name[0] = '\0';
+
+	if (out)
+		strlcpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+	else
+		pm->outdev_name[0] = '\0';
+
+	NETLINK_CB(log_skb).dst_group = 1;
+	pr_debug("throwing 1 packets to netlink group 1\n");
+	netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+}
+#else
+static void quota2_log(unsigned int hooknum,
+		       const struct sk_buff *skb,
+		       const struct net_device *in,
+		       const struct net_device *out,
+		       const char *prefix)
+{
+}
+#endif  /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static ssize_t quota_proc_read(struct file *file, char __user *buf,
+			   size_t size, loff_t *ppos)
+{
+	struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+	char tmp[24];
+	size_t tmp_size;
+
+	spin_lock_bh(&e->lock);
+	tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", e->quota);
+	spin_unlock_bh(&e->lock);
+	return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t quota_proc_write(struct file *file, const char __user *input,
+                            size_t size, loff_t *ppos)
+{
+	struct xt_quota_counter *e = PDE_DATA(file_inode(file));
+	char buf[sizeof("18446744073709551616")];
+
+	if (size > sizeof(buf))
+		size = sizeof(buf);
+	if (copy_from_user(buf, input, size) != 0)
+		return -EFAULT;
+	buf[sizeof(buf)-1] = '\0';
+
+	spin_lock_bh(&e->lock);
+	e->quota = simple_strtoull(buf, NULL, 0);
+	spin_unlock_bh(&e->lock);
+	return size;
+}
+
+static const struct file_operations q2_counter_fops = {
+	.read		= quota_proc_read,
+	.write		= quota_proc_write,
+	.llseek		= default_llseek,
+};
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+	struct xt_quota_counter *e;
+	unsigned int size;
+
+	/* Do not need all the procfs things for anonymous counters. */
+	size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+	e = kmalloc(size, GFP_KERNEL);
+	if (e == NULL)
+		return NULL;
+
+	e->quota = q->quota;
+	spin_lock_init(&e->lock);
+	if (!anon) {
+		INIT_LIST_HEAD(&e->list);
+		atomic_set(&e->ref, 1);
+		strlcpy(e->name, q->name, sizeof(e->name));
+	}
+	return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name:	name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+	struct proc_dir_entry *p;
+	struct xt_quota_counter *e = NULL;
+	struct xt_quota_counter *new_e;
+
+	if (*q->name == '\0')
+		return q2_new_counter(q, true);
+
+	/* No need to hold a lock while getting a new counter */
+	new_e = q2_new_counter(q, false);
+	if (new_e == NULL)
+		goto out;
+
+	spin_lock_bh(&counter_list_lock);
+	list_for_each_entry(e, &counter_list, list)
+		if (strcmp(e->name, q->name) == 0) {
+			atomic_inc(&e->ref);
+			spin_unlock_bh(&counter_list_lock);
+			kfree(new_e);
+			pr_debug("xt_quota2: old counter name=%s", e->name);
+			return e;
+		}
+	e = new_e;
+	pr_debug("xt_quota2: new_counter name=%s", e->name);
+	list_add_tail(&e->list, &counter_list);
+	/* The entry having a refcount of 1 is not directly destructible.
+	 * This func has not yet returned the new entry, thus iptables
+	 * has not references for destroying this entry.
+	 * For another rule to try to destroy it, it would 1st need for this
+	 * func* to be re-invoked, acquire a new ref for the same named quota.
+	 * Nobody will access the e->procfs_entry either.
+	 * So release the lock. */
+	spin_unlock_bh(&counter_list_lock);
+
+	/* create_proc_entry() is not spin_lock happy */
+	p = e->procfs_entry = proc_create_data(e->name, quota_list_perms,
+	                      proc_xt_quota, &q2_counter_fops, e);
+
+	if (IS_ERR_OR_NULL(p)) {
+		spin_lock_bh(&counter_list_lock);
+		list_del(&e->list);
+		spin_unlock_bh(&counter_list_lock);
+		goto out;
+	}
+	proc_set_user(p, quota_list_uid, quota_list_gid);
+	return e;
+
+ out:
+	kfree(e);
+	return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+	pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+	if (q->flags & ~XT_QUOTA_MASK)
+		return -EINVAL;
+
+	q->name[sizeof(q->name)-1] = '\0';
+	if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+		printk(KERN_ERR "xt_quota.3: illegal name\n");
+		return -EINVAL;
+	}
+
+	q->master = q2_get_counter(q);
+	if (q->master == NULL) {
+		printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+	struct xt_quota_mtinfo2 *q = par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+
+	if (*q->name == '\0') {
+		kfree(e);
+		return;
+	}
+
+	spin_lock_bh(&counter_list_lock);
+	if (!atomic_dec_and_test(&e->ref)) {
+		spin_unlock_bh(&counter_list_lock);
+		return;
+	}
+
+	list_del(&e->list);
+	remove_proc_entry(e->name, proc_xt_quota);
+	spin_unlock_bh(&counter_list_lock);
+	kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+	struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+	struct xt_quota_counter *e = q->master;
+	bool ret = q->flags & XT_QUOTA_INVERT;
+
+	spin_lock_bh(&e->lock);
+	if (q->flags & XT_QUOTA_GROW) {
+		/*
+		 * While no_change is pointless in "grow" mode, we will
+		 * implement it here simply to have a consistent behavior.
+		 */
+		if (!(q->flags & XT_QUOTA_NO_CHANGE)) {
+			e->quota += (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+		}
+		ret = true;
+	} else {
+		if (e->quota >= skb->len) {
+			if (!(q->flags & XT_QUOTA_NO_CHANGE))
+				e->quota -= (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+			ret = !ret;
+		} else {
+			/* We are transitioning, log that fact. */
+			if (e->quota) {
+				quota2_log(par->hooknum,
+					   skb,
+					   par->in,
+					   par->out,
+					   q->name);
+			}
+			/* we do not allow even small packets from now on */
+			e->quota = 0;
+		}
+	}
+	spin_unlock_bh(&e->lock);
+	return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV4,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.me         = THIS_MODULE,
+	},
+	{
+		.name       = "quota2",
+		.revision   = 3,
+		.family     = NFPROTO_IPV6,
+		.checkentry = quota_mt2_check,
+		.match      = quota_mt2,
+		.destroy    = quota_mt2_destroy,
+		.matchsize  = sizeof(struct xt_quota_mtinfo2),
+		.me         = THIS_MODULE,
+	},
+};
+
+static int __init quota_mt2_init(void)
+{
+	int ret;
+	pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+	nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
+	if (!nflognl)
+		return -ENOMEM;
+#endif
+
+	proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+	if (proc_xt_quota == NULL)
+		return -EACCES;
+
+	ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	if (ret < 0)
+		remove_proc_entry("xt_quota", init_net.proc_net);
+	pr_debug("xt_quota2: init() %d", ret);
+	return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+	xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+	remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index b10ade2..a52fbaf 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -144,13 +144,14 @@
 	}
 }
 
-static struct sock *xt_socket_lookup_slow_v4(struct net *net,
+struct sock *xt_socket_lookup_slow_v4(struct net *net,
 					     const struct sk_buff *skb,
 					     const struct net_device *indev)
 {
 	const struct iphdr *iph = ip_hdr(skb);
 	struct sk_buff *data_skb = NULL;
 	int doff = 0;
+	struct sock *sk = skb->sk;
 	__be32 uninitialized_var(daddr), uninitialized_var(saddr);
 	__be16 uninitialized_var(dport), uninitialized_var(sport);
 	u8 uninitialized_var(protocol);
@@ -205,9 +206,16 @@
 	}
 #endif
 
-	return xt_socket_get_sock_v4(net, data_skb, doff, protocol, saddr,
-				     daddr, sport, dport, indev);
+	if (sk)
+		atomic_inc(&sk->sk_refcnt);
+	else
+		sk = xt_socket_get_sock_v4(dev_net(skb->dev), data_skb, doff,
+					   protocol, saddr, daddr, sport,
+					   dport, indev);
+
+	return sk;
 }
+EXPORT_SYMBOL(xt_socket_lookup_slow_v4);
 
 static bool
 socket_match(const struct sk_buff *skb, struct xt_action_param *par,
@@ -239,8 +247,7 @@
 		    transparent)
 			pskb->mark = sk->sk_mark;
 
-		if (sk != skb->sk)
-			sock_gen_put(sk);
+		sock_gen_put(sk);
 
 		if (wildcard || !transparent)
 			sk = NULL;
@@ -344,10 +351,11 @@
 	return NULL;
 }
 
-static struct sock *xt_socket_lookup_slow_v6(struct net *net,
+struct sock *xt_socket_lookup_slow_v6(struct net *net,
 					     const struct sk_buff *skb,
 					     const struct net_device *indev)
 {
+	struct sock *sk = skb->sk;
 	__be16 uninitialized_var(dport), uninitialized_var(sport);
 	const struct in6_addr *daddr = NULL, *saddr = NULL;
 	struct ipv6hdr *iph = ipv6_hdr(skb);
@@ -387,9 +395,16 @@
 		return NULL;
 	}
 
-	return xt_socket_get_sock_v6(net, data_skb, doff, tproto, saddr, daddr,
-				     sport, dport, indev);
+	if (sk)
+		atomic_inc(&sk->sk_refcnt);
+	else
+		sk = xt_socket_get_sock_v6(dev_net(skb->dev), data_skb, doff,
+					   tproto, saddr, daddr, sport, dport,
+					   indev);
+
+	return sk;
 }
+EXPORT_SYMBOL(xt_socket_lookup_slow_v6);
 
 static bool
 socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 49c28e8..b09d475 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -994,7 +994,7 @@
 
 static int genl_bind(struct net *net, int group)
 {
-	int i, err = -ENOENT;
+	int i, err = 0;
 
 	down_read(&cb_lock);
 	for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
diff --git a/net/rfkill/Kconfig b/net/rfkill/Kconfig
index 868f1ad..8463a6d 100644
--- a/net/rfkill/Kconfig
+++ b/net/rfkill/Kconfig
@@ -10,6 +10,11 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called rfkill.
 
+config RFKILL_PM
+	bool "Power off on suspend"
+	depends on RFKILL && PM
+	default y
+
 # LED trigger support
 config RFKILL_LEDS
 	bool
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 884027f..9b4260d 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -796,7 +796,7 @@
 }
 EXPORT_SYMBOL(rfkill_resume_polling);
 
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_RFKILL_PM
 static int rfkill_suspend(struct device *dev)
 {
 	struct rfkill *rfkill = to_rfkill(dev);
@@ -837,7 +837,9 @@
 	.dev_release	= rfkill_release,
 	.dev_groups	= rfkill_dev_groups,
 	.dev_uevent	= rfkill_dev_uevent,
+#ifdef CONFIG_RFKILL_PM
 	.pm		= RFKILL_PM_OPS,
+#endif
 };
 
 bool rfkill_blocked(struct rfkill *rfkill)
diff --git a/net/rmnet_data/Kconfig b/net/rmnet_data/Kconfig
new file mode 100644
index 0000000..36d5817
--- /dev/null
+++ b/net/rmnet_data/Kconfig
@@ -0,0 +1,29 @@
+#
+# RMNET Data and MAP driver
+#
+
+menuconfig RMNET_DATA
+	depends on NETDEVICES
+	bool "RmNet Data and MAP driver"
+	---help---
+	  If you say Y here, then the rmnet_data module will be statically
+	  compiled into the kernel. The rmnet data module provides MAP
+	  functionality for embedded and bridged traffic.
+if RMNET_DATA
+
+config RMNET_DATA_FC
+	bool "RmNet Data Flow Control"
+	depends on NET_SCHED && NET_SCH_PRIO
+	---help---
+	  Say Y here if you want RmNet data to handle in-band flow control and
+	  ioctl based flow control. This depends on net scheduler and prio queue
+	  capability being present in the kernel. In-band flow control requires
+	  MAP protocol be used.
+config RMNET_DATA_DEBUG_PKT
+	bool "Packet Debug Logging"
+	---help---
+	  Say Y here if you want RmNet data to be able to log packets in main
+	  system log. This should not be enabled on production builds as it can
+	  impact system performance. Note that simply enabling it here will not
+	  enable the logging; it must be enabled at run-time as well.
+endif # RMNET_DATA
diff --git a/net/rmnet_data/Makefile b/net/rmnet_data/Makefile
new file mode 100644
index 0000000..ccb8b5b
--- /dev/null
+++ b/net/rmnet_data/Makefile
@@ -0,0 +1,14 @@
+#
+# Makefile for the RMNET Data module
+#
+
+rmnet_data-y		 := rmnet_data_main.o
+rmnet_data-y		 += rmnet_data_config.o
+rmnet_data-y		 += rmnet_data_vnd.o
+rmnet_data-y		 += rmnet_data_handlers.o
+rmnet_data-y		 += rmnet_map_data.o
+rmnet_data-y		 += rmnet_map_command.o
+rmnet_data-y		 += rmnet_data_stats.o
+obj-$(CONFIG_RMNET_DATA) += rmnet_data.o
+
+CFLAGS_rmnet_data_main.o := -I$(src)
diff --git a/net/rmnet_data/rmnet_data_config.c b/net/rmnet_data/rmnet_data_config.c
new file mode 100644
index 0000000..8eab5ae
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_config.c
@@ -0,0 +1,1244 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration engine
+ */
+
+#include <net/sock.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/rmnet_data.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_config.h"
+#include "rmnet_data_handlers.h"
+#include "rmnet_data_vnd.h"
+#include "rmnet_data_private.h"
+#include "rmnet_data_trace.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_CONFIG);
+
+/* Local Definitions and Declarations */
+static struct sock *nl_socket_handle;
+
+#ifndef RMNET_KERNEL_PRE_3_8
+static struct netlink_kernel_cfg rmnet_netlink_cfg = {
+	.input = rmnet_config_netlink_msg_handler
+};
+#endif
+
+static struct notifier_block rmnet_dev_notifier = {
+	.notifier_call = rmnet_config_notify_cb,
+	.next = 0,
+	.priority = 0
+};
+
+struct rmnet_free_vnd_work {
+	struct work_struct work;
+	int vnd_id[RMNET_DATA_MAX_VND];
+	int count;
+};
+
+/* Init and Cleanup */
+
+#ifdef RMNET_KERNEL_PRE_3_8
+static struct sock *_rmnet_config_start_netlink(void)
+{
+	return netlink_kernel_create(&init_net,
+				     RMNET_NETLINK_PROTO,
+				     0,
+				     rmnet_config_netlink_msg_handler,
+				     NULL,
+				     THIS_MODULE);
+}
+#else
+static struct sock *_rmnet_config_start_netlink(void)
+{
+	return netlink_kernel_create(&init_net,
+				     RMNET_NETLINK_PROTO,
+				     &rmnet_netlink_cfg);
+}
+#endif /* RMNET_KERNEL_PRE_3_8 */
+
+/* rmnet_config_init() - Startup init
+ *
+ * Registers netlink protocol with kernel and opens socket. Netlink handler is
+ * registered with kernel.
+ */
+int rmnet_config_init(void)
+{
+	int rc;
+
+	nl_socket_handle = _rmnet_config_start_netlink();
+	if (!nl_socket_handle) {
+		LOGE("%s", "Failed to init netlink socket");
+		return RMNET_INIT_ERROR;
+	}
+
+	rc = register_netdevice_notifier(&rmnet_dev_notifier);
+	if (rc != 0) {
+		LOGE("Failed to register device notifier; rc=%d", rc);
+		/* TODO: Cleanup the nl socket */
+		return RMNET_INIT_ERROR;
+	}
+
+	return 0;
+}
+
+/* rmnet_config_exit() - Cleans up all netlink related resources */
+void rmnet_config_exit(void)
+{
+	int rc;
+
+	netlink_kernel_release(nl_socket_handle);
+	rc = unregister_netdevice_notifier(&rmnet_dev_notifier);
+	if (rc != 0)
+		LOGE("Failed to unregister device notifier; rc=%d", rc);
+}
+
+/* Helper Functions */
+
+/* _rmnet_is_physical_endpoint_associated() - Determines if device is associated
+ * @dev:      Device to get check
+ *
+ * Compares device rx_handler callback pointer against known function
+ *
+ * Return:
+ *      - 1 if associated
+ *      - 0 if NOT associated
+ */
+static inline int _rmnet_is_physical_endpoint_associated(struct net_device *dev)
+{
+	rx_handler_func_t *rx_handler;
+
+	rx_handler = rcu_dereference(dev->rx_handler);
+
+	if (rx_handler == rmnet_rx_handler)
+		return 1;
+	else
+		return 0;
+}
+
+/* _rmnet_get_phys_ep_config() - Get physical ep config for an associated device
+ * @dev:      Device to get endpoint configuration from
+ *
+ * Return:
+ *     - pointer to configuration if successful
+ *     - 0 (null) if device is not associated
+ */
+struct rmnet_phys_ep_config *_rmnet_get_phys_ep_config
+						(struct net_device *dev)
+{
+	struct rmnet_phys_ep_conf_s *_rmnet_phys_ep_config;
+
+	if (_rmnet_is_physical_endpoint_associated(dev)) {
+		_rmnet_phys_ep_config = (struct rmnet_phys_ep_conf_s *)
+					rcu_dereference(dev->rx_handler_data);
+		if (_rmnet_phys_ep_config && _rmnet_phys_ep_config->config)
+			return (struct rmnet_phys_ep_config *)
+					_rmnet_phys_ep_config->config;
+		else
+			return 0;
+	} else {
+		return 0;
+	}
+}
+
+/* _rmnet_get_logical_ep() - Gets the logical end point configuration
+ * structure for a network device
+ * @dev:             Device to get endpoint configuration from
+ * @config_id:       Logical endpoint id on device
+ * Retrieves the logical_endpoint_config structure.
+ *
+ * Return:
+ *      - End point configuration structure
+ *      - NULL in case of an error
+ */
+struct rmnet_logical_ep_conf_s *_rmnet_get_logical_ep(struct net_device *dev,
+						      int config_id)
+{
+	struct rmnet_phys_ep_config *config;
+	struct rmnet_logical_ep_conf_s *epconfig_l;
+
+	if (rmnet_vnd_is_vnd(dev)) {
+		epconfig_l = rmnet_vnd_get_le_config(dev);
+	} else {
+		config = _rmnet_get_phys_ep_config(dev);
+
+		if (!config)
+			return NULL;
+
+		if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT)
+			epconfig_l = &config->local_ep;
+		else
+			epconfig_l = &config->muxed_ep[config_id];
+	}
+
+	return epconfig_l;
+}
+
+/* Netlink Handler */
+static void _rmnet_netlink_set_link_egress_data_format
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+	dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+
+	if (!dev) {
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+		return;
+	}
+
+	resp_rmnet->return_code =
+		rmnet_set_egress_data_format(dev,
+					     rmnet_header->data_format.flags,
+					     rmnet_header->data_format.agg_size,
+					     rmnet_header->data_format.agg_count
+					     );
+	dev_put(dev);
+}
+
+static void _rmnet_netlink_set_link_ingress_data_format
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+	dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+	if (!dev) {
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+		return;
+	}
+
+	resp_rmnet->return_code = rmnet_set_ingress_data_format(
+					dev,
+					rmnet_header->data_format.flags,
+					rmnet_header->data_format.tail_spacing);
+	dev_put(dev);
+}
+
+static void _rmnet_netlink_set_logical_ep_config
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev, *dev2;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+	if (rmnet_header->local_ep_config.ep_id < -1 ||
+	    rmnet_header->local_ep_config.ep_id > 254) {
+		resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
+		return;
+	}
+
+	dev = dev_get_by_name(&init_net,
+			      rmnet_header->local_ep_config.dev);
+
+	dev2 = dev_get_by_name(&init_net,
+			       rmnet_header->local_ep_config.next_dev);
+
+	if (dev && dev2)
+		resp_rmnet->return_code =
+			rmnet_set_logical_endpoint_config(
+				dev,
+				rmnet_header->local_ep_config.ep_id,
+				rmnet_header->local_ep_config.operating_mode,
+				dev2);
+	else
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	if (dev)
+		dev_put(dev);
+	if (dev2)
+		dev_put(dev2);
+}
+
+static void _rmnet_netlink_unset_logical_ep_config
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+	if (rmnet_header->local_ep_config.ep_id < -1 ||
+	    rmnet_header->local_ep_config.ep_id > 254) {
+		resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
+		return;
+	}
+
+	dev = dev_get_by_name(&init_net,
+			      rmnet_header->local_ep_config.dev);
+
+	if (dev) {
+		resp_rmnet->return_code =
+			rmnet_unset_logical_endpoint_config(
+				dev,
+				rmnet_header->local_ep_config.ep_id);
+		dev_put(dev);
+	} else {
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+	}
+}
+
+static void _rmnet_netlink_get_logical_ep_config
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+	if (rmnet_header->local_ep_config.ep_id < -1 ||
+	    rmnet_header->local_ep_config.ep_id > 254) {
+		resp_rmnet->return_code = RMNET_CONFIG_BAD_ARGUMENTS;
+		return;
+	}
+
+	dev = dev_get_by_name(&init_net,
+			      rmnet_header->local_ep_config.dev);
+
+	if (dev)
+		resp_rmnet->return_code =
+			rmnet_get_logical_endpoint_config(
+				dev,
+				rmnet_header->local_ep_config.ep_id,
+				&resp_rmnet->local_ep_config.operating_mode,
+				resp_rmnet->local_ep_config.next_dev,
+				sizeof(resp_rmnet->local_ep_config.next_dev));
+	else {
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+		return;
+	}
+
+	if (resp_rmnet->return_code == RMNET_CONFIG_OK) {
+		/* Begin Data */
+		resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+		resp_rmnet->arg_length = sizeof(((struct rmnet_nl_msg_s *)0)
+						->local_ep_config);
+	}
+	dev_put(dev);
+}
+
+static void _rmnet_netlink_associate_network_device
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+	dev = dev_get_by_name(&init_net, rmnet_header->data);
+	if (!dev) {
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+		return;
+	}
+
+	resp_rmnet->return_code = rmnet_associate_network_device(dev);
+	dev_put(dev);
+}
+
+static void _rmnet_netlink_unassociate_network_device
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+	dev = dev_get_by_name(&init_net, rmnet_header->data);
+	if (!dev) {
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+		return;
+	}
+
+	resp_rmnet->return_code = rmnet_unassociate_network_device(dev);
+	dev_put(dev);
+}
+
+static void _rmnet_netlink_get_network_device_associated
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+	dev = dev_get_by_name(&init_net, rmnet_header->data);
+	if (!dev) {
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+		return;
+	}
+
+	resp_rmnet->return_code = _rmnet_is_physical_endpoint_associated(dev);
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+	dev_put(dev);
+}
+
+static void _rmnet_netlink_get_link_egress_data_format
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev;
+	struct rmnet_phys_ep_config *config;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+	dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+	if (!dev) {
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+		return;
+	}
+
+	config = _rmnet_get_phys_ep_config(dev);
+	if (!config) {
+		resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+		dev_put(dev);
+		return;
+	}
+
+	/* Begin Data */
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+	resp_rmnet->arg_length = sizeof(((struct rmnet_nl_msg_s *)0)
+					->data_format);
+	resp_rmnet->data_format.flags = config->egress_data_format;
+	resp_rmnet->data_format.agg_count = config->egress_agg_count;
+	resp_rmnet->data_format.agg_size  = config->egress_agg_size;
+	dev_put(dev);
+}
+
+static void _rmnet_netlink_get_link_ingress_data_format
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	struct net_device *dev;
+	struct rmnet_phys_ep_config *config;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+	dev = dev_get_by_name(&init_net, rmnet_header->data_format.dev);
+	if (!dev) {
+		resp_rmnet->return_code = RMNET_CONFIG_NO_SUCH_DEVICE;
+		return;
+	}
+
+	config = _rmnet_get_phys_ep_config(dev);
+	if (!config) {
+		resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+		dev_put(dev);
+		return;
+	}
+
+	/* Begin Data */
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+	resp_rmnet->arg_length = sizeof(((struct rmnet_nl_msg_s *)0)
+					->data_format);
+	resp_rmnet->data_format.flags = config->ingress_data_format;
+	resp_rmnet->data_format.tail_spacing = config->tail_spacing;
+	dev_put(dev);
+}
+
+static void _rmnet_netlink_get_vnd_name
+					(struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	int r;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+	r = rmnet_vnd_get_name(rmnet_header->vnd.id, resp_rmnet->vnd.vnd_name,
+			       RMNET_MAX_STR_LEN);
+
+	if (r != 0) {
+		resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+		return;
+	}
+
+	/* Begin Data */
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNDATA;
+	resp_rmnet->arg_length = sizeof(((struct rmnet_nl_msg_s *)0)->vnd);
+}
+
+static void _rmnet_netlink_add_del_vnd_tc_flow
+					(u32 command,
+					 struct rmnet_nl_msg_s *rmnet_header,
+					 struct rmnet_nl_msg_s *resp_rmnet)
+{
+	u32 id;
+	u32 map_flow_id;
+	u32 tc_flow_id;
+
+	if (!rmnet_header || !resp_rmnet)
+		return;
+
+	resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+
+	id = rmnet_header->flow_control.id;
+	map_flow_id = rmnet_header->flow_control.map_flow_id;
+	tc_flow_id = rmnet_header->flow_control.tc_flow_id;
+
+	switch (command) {
+	case RMNET_NETLINK_ADD_VND_TC_FLOW:
+		resp_rmnet->return_code = rmnet_vnd_add_tc_flow(id,
+								map_flow_id,
+								tc_flow_id);
+		break;
+	case RMNET_NETLINK_DEL_VND_TC_FLOW:
+		resp_rmnet->return_code = rmnet_vnd_del_tc_flow(id,
+								map_flow_id,
+								tc_flow_id);
+		break;
+	default:
+		LOGM("Called with unhandled command %d", command);
+		resp_rmnet->return_code = RMNET_CONFIG_INVALID_REQUEST;
+		break;
+	}
+}
+
+/* rmnet_config_netlink_msg_handler() - Netlink message handler callback
+ * @skb:      Packet containing netlink messages
+ *
+ * Standard kernel-expected format for a netlink message handler. Processes SKBs
+ * which contain RmNet data specific netlink messages.
+ */
+void rmnet_config_netlink_msg_handler(struct sk_buff *skb)
+{
+	struct nlmsghdr *nlmsg_header, *resp_nlmsg;
+	struct rmnet_nl_msg_s *rmnet_header, *resp_rmnet;
+	int return_pid, response_data_length;
+	struct sk_buff *skb_response;
+
+	response_data_length = 0;
+	nlmsg_header = (struct nlmsghdr *)skb->data;
+	rmnet_header = (struct rmnet_nl_msg_s *)nlmsg_data(nlmsg_header);
+
+	LOGL("Netlink message pid=%d, seq=%d, length=%d, rmnet_type=%d",
+	     nlmsg_header->nlmsg_pid,
+	     nlmsg_header->nlmsg_seq,
+	     nlmsg_header->nlmsg_len,
+	     rmnet_header->message_type);
+
+	return_pid = nlmsg_header->nlmsg_pid;
+
+	skb_response = nlmsg_new(sizeof(struct nlmsghdr)
+				 + sizeof(struct rmnet_nl_msg_s),
+				 GFP_KERNEL);
+
+	if (!skb_response) {
+		LOGH("%s", "Failed to allocate response buffer");
+		return;
+	}
+
+	resp_nlmsg = nlmsg_put(skb_response,
+			       0,
+			       nlmsg_header->nlmsg_seq,
+			       NLMSG_DONE,
+			       sizeof(struct rmnet_nl_msg_s),
+			       0);
+
+	resp_rmnet = nlmsg_data(resp_nlmsg);
+
+	if (!resp_rmnet)
+		return;
+
+	resp_rmnet->message_type = rmnet_header->message_type;
+	rtnl_lock();
+	switch (rmnet_header->message_type) {
+	case RMNET_NETLINK_ASSOCIATE_NETWORK_DEVICE:
+		_rmnet_netlink_associate_network_device
+						(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_UNASSOCIATE_NETWORK_DEVICE:
+		_rmnet_netlink_unassociate_network_device
+						(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_GET_NETWORK_DEVICE_ASSOCIATED:
+		_rmnet_netlink_get_network_device_associated
+						(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_SET_LINK_EGRESS_DATA_FORMAT:
+		_rmnet_netlink_set_link_egress_data_format
+						(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_GET_LINK_EGRESS_DATA_FORMAT:
+		_rmnet_netlink_get_link_egress_data_format
+						(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_SET_LINK_INGRESS_DATA_FORMAT:
+		_rmnet_netlink_set_link_ingress_data_format
+						(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_GET_LINK_INGRESS_DATA_FORMAT:
+		_rmnet_netlink_get_link_ingress_data_format
+						(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_SET_LOGICAL_EP_CONFIG:
+		_rmnet_netlink_set_logical_ep_config(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_UNSET_LOGICAL_EP_CONFIG:
+		_rmnet_netlink_unset_logical_ep_config(rmnet_header,
+						       resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_GET_LOGICAL_EP_CONFIG:
+		_rmnet_netlink_get_logical_ep_config(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_NEW_VND:
+		resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+		resp_rmnet->return_code =
+					 rmnet_create_vnd(rmnet_header->vnd.id);
+		break;
+
+	case RMNET_NETLINK_NEW_VND_WITH_PREFIX:
+		resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+		resp_rmnet->return_code = rmnet_create_vnd_prefix(
+						rmnet_header->vnd.id,
+						rmnet_header->vnd.vnd_name);
+		break;
+
+	case RMNET_NETLINK_FREE_VND:
+		resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+		/* Please check rmnet_vnd_free_dev documentation regarding
+		 * the below locking sequence
+		 */
+		rtnl_unlock();
+		resp_rmnet->return_code = rmnet_free_vnd(rmnet_header->vnd.id);
+		rtnl_lock();
+		break;
+
+	case RMNET_NETLINK_GET_VND_NAME:
+		_rmnet_netlink_get_vnd_name(rmnet_header, resp_rmnet);
+		break;
+
+	case RMNET_NETLINK_DEL_VND_TC_FLOW:
+	case RMNET_NETLINK_ADD_VND_TC_FLOW:
+		_rmnet_netlink_add_del_vnd_tc_flow(rmnet_header->message_type,
+						   rmnet_header,
+						   resp_rmnet);
+		break;
+
+	default:
+		resp_rmnet->crd = RMNET_NETLINK_MSG_RETURNCODE;
+		resp_rmnet->return_code = RMNET_CONFIG_UNKNOWN_MESSAGE;
+		break;
+	}
+	rtnl_unlock();
+	nlmsg_unicast(nl_socket_handle, skb_response, return_pid);
+	LOGD("%s", "Done processing command");
+}
+
+/* Configuration API */
+
+/* rmnet_unassociate_network_device() - Unassociate network device
+ * @dev:      Device to unassociate
+ *
+ * Frees all structures generate for device. Unregisters rx_handler
+ * todo: needs to do some sanity verification first (is device in use, etc...)
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ *      - RMNET_CONFIG_INVALID_REQUEST if device is not already associated
+ *      - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset
+ *      - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ */
+int rmnet_unassociate_network_device(struct net_device *dev)
+{
+	struct rmnet_phys_ep_conf_s *config;
+	int config_id = RMNET_LOCAL_LOGICAL_ENDPOINT;
+	struct rmnet_logical_ep_conf_s *epconfig_l;
+
+	ASSERT_RTNL();
+
+	LOGL("(%s);", dev->name);
+
+	if (!dev)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	if (!_rmnet_is_physical_endpoint_associated(dev))
+		return RMNET_CONFIG_INVALID_REQUEST;
+
+	for (; config_id < RMNET_DATA_MAX_LOGICAL_EP; config_id++) {
+		epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+		if (epconfig_l && epconfig_l->refcount)
+			return RMNET_CONFIG_DEVICE_IN_USE;
+	}
+
+	config = (struct rmnet_phys_ep_conf_s *)
+		rcu_dereference(dev->rx_handler_data);
+
+	if (!config)
+		return RMNET_CONFIG_UNKNOWN_ERROR;
+
+	kfree(config);
+
+	netdev_rx_handler_unregister(dev);
+
+	/* Explicitly release the reference from the device */
+	dev_put(dev);
+	trace_rmnet_unassociate(dev);
+	return RMNET_CONFIG_OK;
+}
+
+/* rmnet_set_ingress_data_format() - Set ingress data format on network device
+ * @dev:                 Device to ingress data format on
+ * @egress_data_format:  32-bit unsigned bitmask of ingress format
+ *
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ *      - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ */
+int rmnet_set_ingress_data_format(struct net_device *dev,
+				  u32 ingress_data_format,
+				  uint8_t  tail_spacing)
+{
+	struct rmnet_phys_ep_config *config;
+
+	ASSERT_RTNL();
+
+	LOGL("(%s,0x%08X);", dev->name, ingress_data_format);
+
+	if (!dev)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	config = _rmnet_get_phys_ep_config(dev);
+
+	if (!config)
+		return RMNET_CONFIG_INVALID_REQUEST;
+
+	config->ingress_data_format = ingress_data_format;
+	config->tail_spacing = tail_spacing;
+
+	return RMNET_CONFIG_OK;
+}
+
+/* rmnet_set_egress_data_format() - Set egress data format on network device
+ * @dev:                 Device to egress data format on
+ * @egress_data_format:  32-bit unsigned bitmask of egress format
+ *
+ * Network device must already have association with RmNet Data driver
+ * todo: Bounds check on agg_*
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ *      - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ */
+int rmnet_set_egress_data_format(struct net_device *dev,
+				 u32 egress_data_format,
+				 u16 agg_size,
+				 u16 agg_count)
+{
+	struct rmnet_phys_ep_config *config;
+
+	ASSERT_RTNL();
+
+	LOGL("(%s,0x%08X, %d, %d);",
+	     dev->name, egress_data_format, agg_size, agg_count);
+
+	if (!dev)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	config = _rmnet_get_phys_ep_config(dev);
+
+	if (!config)
+		return RMNET_CONFIG_UNKNOWN_ERROR;
+
+	config->egress_data_format = egress_data_format;
+	config->egress_agg_size = agg_size;
+	config->egress_agg_count = agg_count;
+
+	return RMNET_CONFIG_OK;
+}
+
+/* rmnet_associate_network_device() - Associate network device
+ * @dev:      Device to register with RmNet data
+ *
+ * Typically used on physical network devices. Registers RX handler and private
+ * metadata structures.
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE dev is null
+ *      - RMNET_CONFIG_INVALID_REQUEST if the device to be associated is a vnd
+ *      - RMNET_CONFIG_DEVICE_IN_USE if dev rx_handler is already filled
+ *      - RMNET_CONFIG_DEVICE_IN_USE if netdev_rx_handler_register() fails
+ */
+int rmnet_associate_network_device(struct net_device *dev)
+{
+	struct rmnet_phys_ep_conf_s *config;
+	struct rmnet_phys_ep_config *conf;
+	int rc;
+
+	ASSERT_RTNL();
+
+	LOGL("(%s);\n", dev->name);
+
+	if (!dev)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	if (_rmnet_is_physical_endpoint_associated(dev)) {
+		LOGM("%s is already regestered", dev->name);
+		return RMNET_CONFIG_DEVICE_IN_USE;
+	}
+
+	if (rmnet_vnd_is_vnd(dev)) {
+		LOGM("%s is a vnd", dev->name);
+		return RMNET_CONFIG_INVALID_REQUEST;
+	}
+
+	config = kmalloc(sizeof(*config), GFP_ATOMIC);
+	conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
+
+	if (!config || !conf)
+		return RMNET_CONFIG_NOMEM;
+
+	memset(config, 0, sizeof(struct rmnet_phys_ep_conf_s));
+	memset(conf, 0, sizeof(struct rmnet_phys_ep_config));
+
+	config->config = conf;
+	conf->dev = dev;
+	spin_lock_init(&conf->agg_lock);
+	config->recycle = kfree_skb;
+
+	rc = netdev_rx_handler_register(dev, rmnet_rx_handler, config);
+
+	if (rc) {
+		LOGM("netdev_rx_handler_register returns %d", rc);
+		kfree(config);
+		kfree(conf);
+		return RMNET_CONFIG_DEVICE_IN_USE;
+	}
+
+	/* Explicitly hold a reference to the device */
+	dev_hold(dev);
+	trace_rmnet_associate(dev);
+	return RMNET_CONFIG_OK;
+}
+
+/* _rmnet_set_logical_endpoint_config() - Set logical endpoing config on device
+ * @dev:         Device to set endpoint configuration on
+ * @config_id:   logical endpoint id on device
+ * @epconfig:    endpoing configuration structure to set
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
+ *      - RMNET_CONFIG_DEVICE_IN_USE if device already has a logical ep
+ *      - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int _rmnet_set_logical_endpoint_config(struct net_device *dev,
+				       int config_id,
+				       struct rmnet_logical_ep_conf_s *epconfig)
+{
+	struct rmnet_logical_ep_conf_s *epconfig_l;
+
+	ASSERT_RTNL();
+
+	if (!dev)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT ||
+	    config_id >= RMNET_DATA_MAX_LOGICAL_EP)
+		return RMNET_CONFIG_BAD_ARGUMENTS;
+
+	epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+
+	if (!epconfig_l)
+		return RMNET_CONFIG_UNKNOWN_ERROR;
+
+	if (epconfig_l->refcount)
+		return RMNET_CONFIG_DEVICE_IN_USE;
+
+	memcpy(epconfig_l, epconfig, sizeof(struct rmnet_logical_ep_conf_s));
+	if (config_id == RMNET_LOCAL_LOGICAL_ENDPOINT)
+		epconfig_l->mux_id = 0;
+	else
+		epconfig_l->mux_id = config_id;
+
+	/* Explicitly hold a reference to the egress device */
+	dev_hold(epconfig_l->egress_dev);
+	return RMNET_CONFIG_OK;
+}
+
+/* _rmnet_unset_logical_endpoint_config() - Un-set the logical endpoing config
+ * on device
+ * @dev:         Device to set endpoint configuration on
+ * @config_id:   logical endpoint id on device
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
+ *      - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int _rmnet_unset_logical_endpoint_config(struct net_device *dev,
+					 int config_id)
+{
+	struct rmnet_logical_ep_conf_s *epconfig_l = 0;
+
+	ASSERT_RTNL();
+
+	if (!dev)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT ||
+	    config_id >= RMNET_DATA_MAX_LOGICAL_EP)
+		return RMNET_CONFIG_BAD_ARGUMENTS;
+
+	epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+
+	if (!epconfig_l || !epconfig_l->refcount)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	/* Explicitly release the reference from the egress device */
+	dev_put(epconfig_l->egress_dev);
+	memset(epconfig_l, 0, sizeof(struct rmnet_logical_ep_conf_s));
+
+	return RMNET_CONFIG_OK;
+}
+
+/* rmnet_set_logical_endpoint_config() - Set logical endpoint config on a device
+ * @dev:            Device to set endpoint configuration on
+ * @config_id:      logical endpoint id on device
+ * @rmnet_mode:     endpoint mode. Values from: rmnet_config_endpoint_modes_e
+ * @egress_device:  device node to forward packet to once done processing in
+ *                  ingress/egress handlers
+ *
+ * Creates a logical_endpoint_config structure and fills in the information from
+ * function arguments. Calls _rmnet_set_logical_endpoint_config() to finish
+ * configuration. Network device must already have association with RmNet Data
+ * driver
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is null
+ *      - RMNET_CONFIG_BAD_EGRESS_DEVICE if egress device is not handled by
+ *                                       RmNet data module
+ *      - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE if device to set config on is null
+ *      - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int rmnet_set_logical_endpoint_config(struct net_device *dev,
+				      int config_id,
+				      u8 rmnet_mode,
+				      struct net_device *egress_dev)
+{
+	struct rmnet_logical_ep_conf_s epconfig;
+
+	LOGL("(%s, %d, %d, %s);",
+	     dev->name, config_id, rmnet_mode, egress_dev->name);
+
+	if (!egress_dev ||
+	    ((!_rmnet_is_physical_endpoint_associated(egress_dev)) &&
+	    (!rmnet_vnd_is_vnd(egress_dev)))) {
+		return RMNET_CONFIG_BAD_EGRESS_DEVICE;
+	}
+
+	memset(&epconfig, 0, sizeof(struct rmnet_logical_ep_conf_s));
+	epconfig.refcount = 1;
+	epconfig.rmnet_mode = rmnet_mode;
+	epconfig.egress_dev = egress_dev;
+
+	return _rmnet_set_logical_endpoint_config(dev, config_id, &epconfig);
+}
+
+/* rmnet_unset_logical_endpoint_config() - Un-set logical endpoing configuration
+ * on a device
+ * @dev:            Device to set endpoint configuration on
+ * @config_id:      logical endpoint id on device
+ *
+ * Retrieves the logical_endpoint_config structure and frees the egress device.
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE device is not associated
+ *      - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range
+ */
+int rmnet_unset_logical_endpoint_config(struct net_device *dev,
+					int config_id)
+{
+	LOGL("(%s, %d);", dev->name, config_id);
+
+	if (!dev || ((!_rmnet_is_physical_endpoint_associated(dev)) &&
+		     (!rmnet_vnd_is_vnd(dev)))) {
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+	}
+
+	return _rmnet_unset_logical_endpoint_config(dev, config_id);
+}
+
+/* rmnet_get_logical_endpoint_config() - Gets logical endpoing configuration
+ * for a device
+ * @dev:                  Device to get endpoint configuration on
+ * @config_id:            logical endpoint id on device
+ * @rmnet_mode:           (I/O) logical endpoint mode
+ * @egress_dev_name:      (I/O) logical endpoint egress device name
+ * @egress_dev_name_size: The maximal size of the I/O egress_dev_name
+ *
+ * Retrieves the logical_endpoint_config structure.
+ * Network device must already have association with RmNet Data driver
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_UNKNOWN_ERROR net_device private section is null
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE device is not associated
+ *      - RMNET_CONFIG_BAD_ARGUMENTS if logical endpoint id is out of range or
+ *        if the provided buffer size for egress dev name is too short
+ */
+int rmnet_get_logical_endpoint_config(struct net_device *dev,
+				      int config_id,
+				      u8 *rmnet_mode,
+				      u8 *egress_dev_name,
+				      size_t egress_dev_name_size)
+{
+	struct rmnet_logical_ep_conf_s *epconfig_l = 0;
+	size_t strlcpy_res = 0;
+
+	LOGL("(%s, %d);", dev->name, config_id);
+
+	if (!egress_dev_name || !rmnet_mode)
+		return RMNET_CONFIG_BAD_ARGUMENTS;
+	if (config_id < RMNET_LOCAL_LOGICAL_ENDPOINT ||
+	    config_id >= RMNET_DATA_MAX_LOGICAL_EP)
+		return RMNET_CONFIG_BAD_ARGUMENTS;
+
+	epconfig_l = _rmnet_get_logical_ep(dev, config_id);
+
+	if (!epconfig_l || !epconfig_l->refcount)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	*rmnet_mode = epconfig_l->rmnet_mode;
+
+	strlcpy_res = strlcpy(egress_dev_name, epconfig_l->egress_dev->name,
+			      egress_dev_name_size);
+
+	if (strlcpy_res >= egress_dev_name_size)
+		return RMNET_CONFIG_BAD_ARGUMENTS;
+
+	return RMNET_CONFIG_OK;
+}
+
+/* rmnet_create_vnd() - Create virtual network device node
+ * @id:       RmNet virtual device node id
+ *
+ * Return:
+ *      - result of rmnet_vnd_create_dev()
+ */
+int rmnet_create_vnd(int id)
+{
+	struct net_device *dev;
+
+	ASSERT_RTNL();
+	LOGL("(%d);", id);
+	return rmnet_vnd_create_dev(id, &dev, NULL);
+}
+
+/* rmnet_create_vnd() - Create virtual network device node
+ * @id:       RmNet virtual device node id
+ * @prefix:   String prefix for device name
+ *
+ * Return:
+ *      - result of rmnet_vnd_create_dev()
+ */
+int rmnet_create_vnd_prefix(int id, const char *prefix)
+{
+	struct net_device *dev;
+
+	ASSERT_RTNL();
+	LOGL("(%d, \"%s\");", id, prefix);
+	return rmnet_vnd_create_dev(id, &dev, prefix);
+}
+
+/* rmnet_free_vnd() - Free virtual network device node
+ * @id:       RmNet virtual device node id
+ *
+ * Return:
+ *      - result of rmnet_vnd_free_dev()
+ */
+int rmnet_free_vnd(int id)
+{
+	LOGL("(%d);", id);
+	return rmnet_vnd_free_dev(id);
+}
+
+static void _rmnet_free_vnd_later(struct work_struct *work)
+{
+	int i;
+	struct rmnet_free_vnd_work *fwork;
+
+	fwork = container_of(work, struct rmnet_free_vnd_work, work);
+
+	for (i = 0; i < fwork->count; i++)
+		rmnet_free_vnd(fwork->vnd_id[i]);
+	kfree(fwork);
+}
+
+/* rmnet_force_unassociate_device() - Force a device to unassociate
+ * @dev:       Device to unassociate
+ *
+ * Return:
+ *      - void
+ */
+static void rmnet_force_unassociate_device(struct net_device *dev)
+{
+	int i, j;
+	struct net_device *vndev;
+	struct rmnet_logical_ep_conf_s *cfg;
+	struct rmnet_free_vnd_work *vnd_work;
+
+	ASSERT_RTNL();
+
+	if (!dev)
+		return;
+
+	if (!_rmnet_is_physical_endpoint_associated(dev)) {
+		LOGM("%s", "Called on unassociated device, skipping");
+		return;
+	}
+
+	trace_rmnet_unregister_cb_clear_vnds(dev);
+	vnd_work = kmalloc(sizeof(*vnd_work), GFP_KERNEL);
+	if (!vnd_work) {
+		LOGH("%s", "Out of Memory");
+		return;
+	}
+	INIT_WORK(&vnd_work->work, _rmnet_free_vnd_later);
+	vnd_work->count = 0;
+
+	/* Check the VNDs for offending mappings */
+	for (i = 0, j = 0; i < RMNET_DATA_MAX_VND &&
+	     j < RMNET_DATA_MAX_VND; i++) {
+		vndev = rmnet_vnd_get_by_id(i);
+		if (!vndev) {
+			LOGL("VND %d not in use; skipping", i);
+			continue;
+		}
+		cfg = rmnet_vnd_get_le_config(vndev);
+		if (!cfg) {
+			LOGH("Got NULL config from VND %d", i);
+			continue;
+		}
+		if (cfg->refcount && (cfg->egress_dev == dev)) {
+			/* Make sure the device is down before clearing any of
+			 * the mappings. Otherwise we could see a potential
+			 * race condition if packets are actively being
+			 * transmitted.
+			 */
+			dev_close(vndev);
+			rmnet_unset_logical_endpoint_config
+				(vndev, RMNET_LOCAL_LOGICAL_ENDPOINT);
+			vnd_work->vnd_id[j] = i;
+			j++;
+		}
+	}
+	if (j > 0) {
+		vnd_work->count = j;
+		schedule_work(&vnd_work->work);
+	} else {
+		kfree(vnd_work);
+	}
+
+	/* Clear the mappings on the phys ep */
+	trace_rmnet_unregister_cb_clear_lepcs(dev);
+	rmnet_unset_logical_endpoint_config(dev, RMNET_LOCAL_LOGICAL_ENDPOINT);
+	for (i = 0; i < RMNET_DATA_MAX_LOGICAL_EP; i++)
+		rmnet_unset_logical_endpoint_config(dev, i);
+	rmnet_unassociate_network_device(dev);
+}
+
+/* rmnet_config_notify_cb() - Callback for netdevice notifier chain
+ * @nb:       Notifier block data
+ * @event:    Netdevice notifier event ID
+ * @data:     Contains a net device for which we are getting notified
+ *
+ * Return:
+ *      - result of NOTIFY_DONE()
+ */
+int rmnet_config_notify_cb(struct notifier_block *nb,
+			   unsigned long event, void *data)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(data);
+
+	if (!dev)
+		return NOTIFY_DONE;
+
+	LOGL("(..., %lu, %s)", event, dev->name);
+
+	switch (event) {
+	case NETDEV_UNREGISTER_FINAL:
+	case NETDEV_UNREGISTER:
+		trace_rmnet_unregister_cb_entry(dev);
+		LOGH("Kernel is trying to unregister %s", dev->name);
+		rmnet_force_unassociate_device(dev);
+		trace_rmnet_unregister_cb_exit(dev);
+		break;
+
+	default:
+		trace_rmnet_unregister_cb_unhandled(dev);
+		LOGD("Unhandeled event [%lu]", event);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
diff --git a/net/rmnet_data/rmnet_data_config.h b/net/rmnet_data/rmnet_data_config.h
new file mode 100644
index 0000000..3e356c0
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_config.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data configuration engine
+ */
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/spinlock.h>
+#include <net/rmnet_config.h>
+
+#ifndef _RMNET_DATA_CONFIG_H_
+#define _RMNET_DATA_CONFIG_H_
+
+#define RMNET_DATA_MAX_LOGICAL_EP 256
+
+/**
+ * struct rmnet_logical_ep_conf_s - Logical end-point configuration
+ *
+ * @refcount: Reference count for this endpoint. 0 signifies the endpoint is not
+ *            configured for use
+ * @rmnet_mode: Specifies how the traffic should be finally delivered. Possible
+ *            options are available in enum rmnet_config_endpoint_modes_e
+ * @mux_id: Virtual channel ID used by MAP protocol
+ * @egress_dev: Next device to deliver the packet to. Exact usage of this
+ *            parmeter depends on the rmnet_mode
+ */
+struct rmnet_logical_ep_conf_s {
+	u8 refcount;
+	u8 rmnet_mode;
+	u8 mux_id;
+	struct timespec flush_time;
+	struct net_device *egress_dev;
+};
+
+/**
+ * struct rmnet_phys_ep_conf_s - Physical endpoint configuration
+ * One instance of this structure is instantiated for each net_device associated
+ * with rmnet_data.
+ *
+ * @dev: The device which is associated with rmnet_data. Corresponds to this
+ *       specific instance of rmnet_phys_ep_conf_s
+ * @local_ep: Default non-muxed endpoint. Used for non-MAP protocols/formats
+ * @muxed_ep: All multiplexed logical endpoints associated with this device
+ * @ingress_data_format: RMNET_INGRESS_FORMAT_* flags from rmnet_data.h
+ * @egress_data_format: RMNET_EGRESS_FORMAT_* flags from rmnet_data.h
+ *
+ * @egress_agg_size: Maximum size (bytes) of data which should be aggregated
+ * @egress_agg_count: Maximum count (packets) of data which should be aggregated
+ *                  Smaller of the two parameters above are chosen for
+ *                  aggregation
+ * @tail_spacing: Guaranteed padding (bytes) when de-aggregating ingress frames
+ * @agg_time: Wall clock time when aggregated frame was created
+ * @agg_last: Last time the aggregation routing was invoked
+ */
+struct rmnet_phys_ep_config {
+	struct net_device *dev;
+	struct rmnet_logical_ep_conf_s local_ep;
+	struct rmnet_logical_ep_conf_s muxed_ep[RMNET_DATA_MAX_LOGICAL_EP];
+	u32 ingress_data_format;
+	u32 egress_data_format;
+
+	/* MAP specific */
+	u16 egress_agg_size;
+	u16 egress_agg_count;
+	u8 tail_spacing;
+	/* MAP aggregation state machine
+	 *  - This is not sctrictly configuration and is updated at runtime
+	 *    Make sure all of these are protected by the agg_lock
+	 */
+	spinlock_t agg_lock;
+	struct sk_buff *agg_skb;
+	u8 agg_state;
+	u8 agg_count;
+	struct timespec agg_time;
+	struct timespec agg_last;
+};
+
+int rmnet_config_init(void);
+void rmnet_config_exit(void);
+
+int rmnet_unassociate_network_device(struct net_device *dev);
+int rmnet_set_ingress_data_format(struct net_device *dev,
+				  u32 ingress_data_format,
+				  u8  tail_spacing);
+int rmnet_set_egress_data_format(struct net_device *dev,
+				 u32 egress_data_format,
+				 u16 agg_size,
+				 u16 agg_count);
+int rmnet_associate_network_device(struct net_device *dev);
+int _rmnet_set_logical_endpoint_config
+	(struct net_device *dev, int config_id,
+	 struct rmnet_logical_ep_conf_s *epconfig);
+int rmnet_set_logical_endpoint_config(struct net_device *dev,
+				      int config_id,
+				      u8 rmnet_mode,
+				      struct net_device *egress_dev);
+int _rmnet_unset_logical_endpoint_config(struct net_device *dev,
+					 int config_id);
+int rmnet_unset_logical_endpoint_config(struct net_device *dev,
+					int config_id);
+int _rmnet_get_logical_endpoint_config
+	(struct net_device *dev, int config_id,
+	 struct rmnet_logical_ep_conf_s *epconfig);
+int rmnet_get_logical_endpoint_config(struct net_device *dev,
+				      int config_id,
+				      u8 *rmnet_mode,
+				      u8 *egress_dev_name,
+				      size_t egress_dev_name_size);
+void rmnet_config_netlink_msg_handler (struct sk_buff *skb);
+int rmnet_config_notify_cb(struct notifier_block *nb,
+			   unsigned long event, void *data);
+int rmnet_create_vnd(int id);
+int rmnet_create_vnd_prefix(int id, const char *name);
+int rmnet_free_vnd(int id);
+
+struct rmnet_phys_ep_config *_rmnet_get_phys_ep_config
+						(struct net_device *dev);
+
+#endif /* _RMNET_DATA_CONFIG_H_ */
diff --git a/net/rmnet_data/rmnet_data_handlers.c b/net/rmnet_data/rmnet_data_handlers.c
new file mode 100644
index 0000000..825d57e
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_handlers.c
@@ -0,0 +1,686 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data ingress/egress handler
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+#include <linux/rmnet_data.h>
+#include <linux/net_map.h>
+#include <linux/netdev_features.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_private.h"
+#include "rmnet_data_config.h"
+#include "rmnet_data_vnd.h"
+#include "rmnet_map.h"
+#include "rmnet_data_stats.h"
+#include "rmnet_data_trace.h"
+#include "rmnet_data_handlers.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_HANDLER);
+
+#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
+unsigned int dump_pkt_rx;
+module_param(dump_pkt_rx, uint, 0644);
+MODULE_PARM_DESC(dump_pkt_rx, "Dump packets entering ingress handler");
+
+unsigned int dump_pkt_tx;
+module_param(dump_pkt_tx, uint, 0644);
+MODULE_PARM_DESC(dump_pkt_tx, "Dump packets exiting egress handler");
+#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
+
+/* Time in nano seconds. This number must be less that a second. */
+long gro_flush_time __read_mostly = 10000L;
+module_param(gro_flush_time, long, 0644);
+MODULE_PARM_DESC(gro_flush_time, "Flush GRO when spaced more than this");
+
+#define RMNET_DATA_IP_VERSION_4 0x40
+#define RMNET_DATA_IP_VERSION_6 0x60
+
+#define RMNET_DATA_GRO_RCV_FAIL 0
+#define RMNET_DATA_GRO_RCV_PASS 1
+
+/* Helper Functions */
+
+/* __rmnet_data_set_skb_proto() - Set skb->protocol field
+ * @skb:      packet being modified
+ *
+ * Peek at the first byte of the packet and set the protocol. There is not
+ * good way to determine if a packet has a MAP header. As of writing this,
+ * the reserved bit in the MAP frame will prevent it from overlapping with
+ * IPv4/IPv6 frames. This could change in the future!
+ */
+static inline void __rmnet_data_set_skb_proto(struct sk_buff *skb)
+{
+	switch (skb->data[0] & 0xF0) {
+	case RMNET_DATA_IP_VERSION_4:
+		skb->protocol = htons(ETH_P_IP);
+		break;
+	case RMNET_DATA_IP_VERSION_6:
+		skb->protocol = htons(ETH_P_IPV6);
+		break;
+	default:
+		skb->protocol = htons(ETH_P_MAP);
+		break;
+	}
+}
+
+#ifdef CONFIG_RMNET_DATA_DEBUG_PKT
+/* rmnet_print_packet() - Print packet / diagnostics
+ * @skb:      Packet to print
+ * @printlen: Number of bytes to print
+ * @dev:      Name of interface
+ * @dir:      Character representing direction (e.g.. 'r' for receive)
+ *
+ * This function prints out raw bytes in an SKB. Use of this will have major
+ * performance impacts and may even trigger watchdog resets if too much is being
+ * printed. Hence, this should always be compiled out unless absolutely needed.
+ */
+void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
+{
+	char buffer[200];
+	unsigned int len, printlen;
+	int i, buffloc = 0;
+
+	switch (dir) {
+	case 'r':
+		printlen = dump_pkt_rx;
+		break;
+
+	case 't':
+		printlen = dump_pkt_tx;
+		break;
+
+	default:
+		printlen = 0;
+		break;
+	}
+
+	if (!printlen)
+		return;
+
+	pr_err("[%s][%c] - PKT skb->len=%d skb->head=%pK skb->data=%pK\n",
+	       dev, dir, skb->len, (void *)skb->head, (void *)skb->data);
+	pr_err("[%s][%c] - PKT skb->tail=%pK skb->end=%pK\n",
+	       dev, dir, skb_tail_pointer(skb), skb_end_pointer(skb));
+
+	if (skb->len > 0)
+		len = skb->len;
+	else
+		len = ((unsigned int)(uintptr_t)skb->end) -
+		      ((unsigned int)(uintptr_t)skb->data);
+
+	pr_err("[%s][%c] - PKT len: %d, printing first %d bytes\n",
+	       dev, dir, len, printlen);
+
+	memset(buffer, 0, sizeof(buffer));
+	for (i = 0; (i < printlen) && (i < len); i++) {
+		if ((i % 16) == 0) {
+			pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
+			memset(buffer, 0, sizeof(buffer));
+			buffloc = 0;
+			buffloc += snprintf(&buffer[buffloc],
+					    sizeof(buffer) - buffloc, "%04X:",
+					    i);
+		}
+
+		buffloc += snprintf(&buffer[buffloc], sizeof(buffer) - buffloc,
+					" %02x", skb->data[i]);
+	}
+	pr_err("[%s][%c] - PKT%s\n", dev, dir, buffer);
+}
+#else
+void rmnet_print_packet(const struct sk_buff *skb, const char *dev, char dir)
+{
+}
+#endif /* CONFIG_RMNET_DATA_DEBUG_PKT */
+
+/* Generic handler */
+
+/* rmnet_bridge_handler() - Bridge related functionality
+ *
+ * Return:
+ *      - RX_HANDLER_CONSUMED in all cases
+ */
+static rx_handler_result_t rmnet_bridge_handler
+	(struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
+{
+	if (!ep->egress_dev) {
+		LOGD("Missing egress device for packet arriving on %s",
+		     skb->dev->name);
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_BRDG_NO_EGRESS);
+	} else {
+		rmnet_egress_handler(skb, ep);
+	}
+
+	return RX_HANDLER_CONSUMED;
+}
+
+#ifdef NET_SKBUFF_DATA_USES_OFFSET
+static void rmnet_reset_mac_header(struct sk_buff *skb)
+{
+	skb->mac_header = 0;
+	skb->mac_len = 0;
+}
+#else
+static void rmnet_reset_mac_header(struct sk_buff *skb)
+{
+	skb->mac_header = skb->network_header;
+	skb->mac_len = 0;
+}
+#endif /*NET_SKBUFF_DATA_USES_OFFSET*/
+
+/* rmnet_check_skb_can_gro() - Check is skb can be passed through GRO handler
+ *
+ * Determines whether to pass the skb to the GRO handler napi_gro_receive() or
+ * handle normally by passing to netif_receive_skb().
+ *
+ * Warning:
+ * This assumes that only TCP packets can be coalesced by the GRO handler which
+ * is not true in general. We lose the ability to use GRO for cases like UDP
+ * encapsulation protocols.
+ *
+ * Return:
+ *      - RMNET_DATA_GRO_RCV_FAIL if packet is sent to netif_receive_skb()
+ *      - RMNET_DATA_GRO_RCV_PASS if packet is sent to napi_gro_receive()
+ */
+static int rmnet_check_skb_can_gro(struct sk_buff *skb)
+{
+	switch (skb->data[0] & 0xF0) {
+	case RMNET_DATA_IP_VERSION_4:
+		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+			return RMNET_DATA_GRO_RCV_PASS;
+		break;
+	case RMNET_DATA_IP_VERSION_6:
+		if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+			return RMNET_DATA_GRO_RCV_PASS;
+		/* Fall through */
+	}
+
+	return RMNET_DATA_GRO_RCV_FAIL;
+}
+
+/* rmnet_optional_gro_flush() - Check if GRO handler needs to flush now
+ *
+ * Determines whether GRO handler needs to flush packets which it has
+ * coalesced so far.
+ *
+ * Tuning this parameter will trade TCP slow start performance for GRO coalesce
+ * ratio.
+ */
+static void rmnet_optional_gro_flush(struct napi_struct *napi,
+				     struct rmnet_logical_ep_conf_s *ep)
+{
+	struct timespec curr_time, diff;
+
+	if (!gro_flush_time)
+		return;
+
+	if (unlikely(ep->flush_time.tv_sec == 0)) {
+		getnstimeofday(&ep->flush_time);
+	} else {
+		getnstimeofday(&(curr_time));
+		diff = timespec_sub(curr_time, ep->flush_time);
+		if ((diff.tv_sec > 0) || (diff.tv_nsec > gro_flush_time)) {
+			napi_gro_flush(napi, false);
+			getnstimeofday(&ep->flush_time);
+		}
+	}
+}
+
+/* __rmnet_deliver_skb() - Deliver skb
+ *
+ * Determines where to deliver skb. Options are: consume by network stack,
+ * pass to bridge handler, or pass to virtual network device
+ *
+ * Return:
+ *      - RX_HANDLER_CONSUMED if packet forwarded or dropped
+ *      - RX_HANDLER_PASS if packet is to be consumed by network stack as-is
+ */
+static rx_handler_result_t __rmnet_deliver_skb
+	(struct sk_buff *skb, struct rmnet_logical_ep_conf_s *ep)
+{
+	struct napi_struct *napi = NULL;
+	gro_result_t gro_res;
+
+	trace___rmnet_deliver_skb(skb);
+	switch (ep->rmnet_mode) {
+	case RMNET_EPMODE_NONE:
+		return RX_HANDLER_PASS;
+
+	case RMNET_EPMODE_BRIDGE:
+		return rmnet_bridge_handler(skb, ep);
+
+	case RMNET_EPMODE_VND:
+		skb_reset_transport_header(skb);
+		skb_reset_network_header(skb);
+		switch (rmnet_vnd_rx_fixup(skb, skb->dev)) {
+		case RX_HANDLER_CONSUMED:
+			return RX_HANDLER_CONSUMED;
+
+		case RX_HANDLER_PASS:
+			skb->pkt_type = PACKET_HOST;
+			rmnet_reset_mac_header(skb);
+			if (rmnet_check_skb_can_gro(skb) &&
+			    (skb->dev->features & NETIF_F_GRO)) {
+				napi = get_current_napi_context();
+				if (napi) {
+					gro_res = napi_gro_receive(napi, skb);
+					trace_rmnet_gro_downlink(gro_res);
+					rmnet_optional_gro_flush(napi, ep);
+				} else {
+					WARN_ONCE(1, "current napi is NULL\n");
+					netif_receive_skb(skb);
+				}
+			} else {
+				netif_receive_skb(skb);
+			}
+			return RX_HANDLER_CONSUMED;
+		}
+		return RX_HANDLER_PASS;
+
+	default:
+		LOGD("Unknown ep mode %d", ep->rmnet_mode);
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_DELIVER_NO_EP);
+		return RX_HANDLER_CONSUMED;
+	}
+}
+
+/* rmnet_ingress_deliver_packet() - Ingress handler for raw IP and bridged
+ *                                  MAP packets.
+ * @skb:     Packet needing a destination.
+ * @config:  Physical end point configuration that the packet arrived on.
+ *
+ * Return:
+ *      - RX_HANDLER_CONSUMED if packet forwarded/dropped
+ *      - RX_HANDLER_PASS if packet should be passed up the stack by caller
+ */
+static rx_handler_result_t rmnet_ingress_deliver_packet
+	(struct sk_buff *skb, struct rmnet_phys_ep_config *config)
+{
+	if (!config) {
+		LOGD("%s", "NULL physical EP provided");
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	if (!(config->local_ep.refcount)) {
+		LOGD("Packet on %s has no local endpoint configuration",
+		     skb->dev->name);
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_IPINGRESS_NO_EP);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	skb->dev = config->local_ep.egress_dev;
+
+	return __rmnet_deliver_skb(skb, &config->local_ep);
+}
+
+/* MAP handler */
+
+/* _rmnet_map_ingress_handler() - Actual MAP ingress handler
+ * @skb:        Packet being received
+ * @config:     Physical endpoint configuration for the ingress device
+ *
+ * Most MAP ingress functions are processed here. Packets are processed
+ * individually; aggregated packets should use rmnet_map_ingress_handler()
+ *
+ * Return:
+ *      - RX_HANDLER_CONSUMED if packet is dropped
+ *      - result of __rmnet_deliver_skb() for all other cases
+ */
+static rx_handler_result_t _rmnet_map_ingress_handler
+	(struct sk_buff *skb, struct rmnet_phys_ep_config *config)
+{
+	struct rmnet_logical_ep_conf_s *ep;
+	u8 mux_id;
+	u16 len;
+	int ckresult;
+
+	if (RMNET_MAP_GET_CD_BIT(skb)) {
+		if (config->ingress_data_format
+		    & RMNET_INGRESS_FORMAT_MAP_COMMANDS)
+			return rmnet_map_command(skb, config);
+
+		LOGM("MAP command packet on %s; %s", skb->dev->name,
+		     "Not configured for MAP commands");
+		rmnet_kfree_skb(skb,
+				RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	mux_id = RMNET_MAP_GET_MUX_ID(skb);
+	len = RMNET_MAP_GET_LENGTH(skb)
+			- RMNET_MAP_GET_PAD(skb)
+			- config->tail_spacing;
+
+	if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
+		LOGD("Got packet on %s with bad mux id %d",
+		     skb->dev->name, mux_id);
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX);
+			return RX_HANDLER_CONSUMED;
+	}
+
+	ep = &config->muxed_ep[mux_id];
+
+	if (!ep->refcount) {
+		LOGD("Packet on %s:%d; has no logical endpoint config",
+		     skb->dev->name, mux_id);
+
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING)
+		skb->dev = ep->egress_dev;
+
+	if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
+	    (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
+		ckresult = rmnet_map_checksum_downlink_packet(skb);
+		trace_rmnet_map_checksum_downlink_packet(skb, ckresult);
+		rmnet_stats_dl_checksum(ckresult);
+		if (likely((ckresult == RMNET_MAP_CHECKSUM_OK) ||
+			   (ckresult == RMNET_MAP_CHECKSUM_SKIPPED)))
+			skb->ip_summed |= CHECKSUM_UNNECESSARY;
+		else if (ckresult !=
+			 RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION &&
+			 ckresult != RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT &&
+			 ckresult != RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET &&
+			 ckresult != RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET) {
+			rmnet_kfree_skb
+			(skb, RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM);
+			return RX_HANDLER_CONSUMED;
+		}
+	}
+
+	/* Subtract MAP header */
+	skb_pull(skb, sizeof(struct rmnet_map_header_s));
+	skb_trim(skb, len);
+	__rmnet_data_set_skb_proto(skb);
+	return __rmnet_deliver_skb(skb, ep);
+}
+
+/* rmnet_map_ingress_handler() - MAP ingress handler
+ * @skb:        Packet being received
+ * @config:     Physical endpoint configuration for the ingress device
+ *
+ * Called if and only if MAP is configured in the ingress device's ingress data
+ * format. Deaggregation is done here, actual MAP processing is done in
+ * _rmnet_map_ingress_handler().
+ *
+ * Return:
+ *      - RX_HANDLER_CONSUMED for aggregated packets
+ *      - RX_HANDLER_CONSUMED for dropped packets
+ *      - result of _rmnet_map_ingress_handler() for all other cases
+ */
+static rx_handler_result_t rmnet_map_ingress_handler
+	(struct sk_buff *skb, struct rmnet_phys_ep_config *config)
+{
+	struct sk_buff *skbn;
+	int rc, co = 0;
+
+	if (config->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) {
+		trace_rmnet_start_deaggregation(skb);
+		while ((skbn = rmnet_map_deaggregate(skb, config)) != 0) {
+			_rmnet_map_ingress_handler(skbn, config);
+			co++;
+		}
+		trace_rmnet_end_deaggregation(skb, co);
+		LOGD("De-aggregated %d packets", co);
+		rmnet_stats_deagg_pkts(co);
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF);
+		rc = RX_HANDLER_CONSUMED;
+	} else {
+		rc = _rmnet_map_ingress_handler(skb, config);
+	}
+
+	return rc;
+}
+
+/* rmnet_map_egress_handler() - MAP egress handler
+ * @skb:        Packet being sent
+ * @config:     Physical endpoint configuration for the egress device
+ * @ep:         logical endpoint configuration of the packet originator
+ *              (e.g.. RmNet virtual network device)
+ * @orig_dev:   The originator vnd device
+ *
+ * Called if and only if MAP is configured in the egress device's egress data
+ * format. Will expand skb if there is insufficient headroom for MAP protocol.
+ * Note: headroomexpansion will incur a performance penalty.
+ *
+ * Return:
+ *      - 0 on success
+ *      - 1 on failure
+ */
+static int rmnet_map_egress_handler(struct sk_buff *skb,
+				    struct rmnet_phys_ep_config *config,
+				    struct rmnet_logical_ep_conf_s *ep,
+				    struct net_device *orig_dev)
+{
+	int required_headroom, additional_header_length, ckresult;
+	struct rmnet_map_header_s *map_header;
+
+	additional_header_length = 0;
+
+	required_headroom = sizeof(struct rmnet_map_header_s);
+	if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
+	    (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
+		required_headroom +=
+			sizeof(struct rmnet_map_ul_checksum_header_s);
+		additional_header_length +=
+			sizeof(struct rmnet_map_ul_checksum_header_s);
+	}
+
+	LOGD("headroom of %d bytes", required_headroom);
+
+	if (skb_headroom(skb) < required_headroom) {
+		if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) {
+			LOGD("Failed to add headroom of %d bytes",
+			     required_headroom);
+			return 1;
+		}
+	}
+
+	if ((config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV3) ||
+	    (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4)) {
+		ckresult = rmnet_map_checksum_uplink_packet
+				(skb, orig_dev, config->egress_data_format);
+		trace_rmnet_map_checksum_uplink_packet(orig_dev, ckresult);
+		rmnet_stats_ul_checksum(ckresult);
+	}
+
+	if ((!(config->egress_data_format &
+	    RMNET_EGRESS_FORMAT_AGGREGATION)) ||
+	    ((orig_dev->features & NETIF_F_GSO) && skb_is_nonlinear(skb)))
+		map_header = rmnet_map_add_map_header
+		(skb, additional_header_length, RMNET_MAP_NO_PAD_BYTES);
+	else
+		map_header = rmnet_map_add_map_header
+		(skb, additional_header_length, RMNET_MAP_ADD_PAD_BYTES);
+
+	if (!map_header) {
+		LOGD("%s", "Failed to add MAP header to egress packet");
+		return 1;
+	}
+
+	if (config->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) {
+		if (ep->mux_id == 0xff)
+			map_header->mux_id = 0;
+		else
+			map_header->mux_id = ep->mux_id;
+	}
+
+	skb->protocol = htons(ETH_P_MAP);
+
+	if (config->egress_data_format & RMNET_EGRESS_FORMAT_AGGREGATION) {
+		rmnet_map_aggregate(skb, config);
+		return RMNET_MAP_CONSUMED;
+	}
+
+	return RMNET_MAP_SUCCESS;
+}
+
+/* Ingress / Egress Entry Points */
+
+/* rmnet_ingress_handler() - Ingress handler entry point
+ * @skb: Packet being received
+ *
+ * Processes packet as per ingress data format for receiving device. Logical
+ * endpoint is determined from packet inspection. Packet is then sent to the
+ * egress device listed in the logical endpoint configuration.
+ *
+ * Return:
+ *      - RX_HANDLER_PASS if packet is not processed by handler (caller must
+ *        deal with the packet)
+ *      - RX_HANDLER_CONSUMED if packet is forwarded or processed by MAP
+ */
+rx_handler_result_t rmnet_ingress_handler(struct sk_buff *skb)
+{
+	struct rmnet_phys_ep_config *config;
+	struct net_device *dev;
+	int rc;
+
+	if (!skb)
+		return RX_HANDLER_CONSUMED;
+
+	dev = skb->dev;
+	trace_rmnet_ingress_handler(skb);
+	rmnet_print_packet(skb, dev->name, 'r');
+
+	config = _rmnet_get_phys_ep_config(skb->dev);
+
+	if (!config) {
+		LOGD("%s is not associated with rmnet_data", skb->dev->name);
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	/* Sometimes devices operate in ethernet mode even thouth there is no
+	 * ethernet header. This causes the skb->protocol to contain a bogus
+	 * value and the skb->data pointer to be off by 14 bytes. Fix it if
+	 * configured to do so
+	 */
+	if (config->ingress_data_format & RMNET_INGRESS_FIX_ETHERNET) {
+		skb_push(skb, RMNET_ETHERNET_HEADER_LENGTH);
+		__rmnet_data_set_skb_proto(skb);
+	}
+
+	if (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) {
+		rc = rmnet_map_ingress_handler(skb, config);
+	} else {
+		switch (ntohs(skb->protocol)) {
+		case ETH_P_MAP:
+			if (config->local_ep.rmnet_mode ==
+				RMNET_EPMODE_BRIDGE) {
+				rc = rmnet_ingress_deliver_packet(skb, config);
+			} else {
+				LOGD("MAP packet on %s; MAP not set",
+				     dev->name);
+				rmnet_kfree_skb
+				(skb,
+				 RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD);
+				rc = RX_HANDLER_CONSUMED;
+			}
+			break;
+
+		case ETH_P_ARP:
+		case ETH_P_IP:
+		case ETH_P_IPV6:
+			rc = rmnet_ingress_deliver_packet(skb, config);
+			break;
+
+		default:
+			LOGD("Unknown skb->proto 0x%04X\n",
+			     ntohs(skb->protocol) & 0xFFFF);
+			rc = RX_HANDLER_PASS;
+		}
+	}
+
+	return rc;
+}
+
+/* rmnet_rx_handler() - Rx handler callback registered with kernel
+ * @pskb: Packet to be processed by rx handler
+ *
+ * Standard kernel-expected footprint for rx handlers. Calls
+ * rmnet_ingress_handler with correctly formatted arguments
+ *
+ * Return:
+ *      - Whatever rmnet_ingress_handler() returns
+ */
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
+{
+	return rmnet_ingress_handler(*pskb);
+}
+
+/* rmnet_egress_handler() - Egress handler entry point
+ * @skb:        packet to transmit
+ * @ep:         logical endpoint configuration of the packet originator
+ *              (e.g.. RmNet virtual network device)
+ *
+ * Modifies packet as per logical endpoint configuration and egress data format
+ * for egress device configured in logical endpoint. Packet is then transmitted
+ * on the egress device.
+ */
+void rmnet_egress_handler(struct sk_buff *skb,
+			  struct rmnet_logical_ep_conf_s *ep)
+{
+	struct rmnet_phys_ep_config *config;
+	struct net_device *orig_dev;
+	int rc;
+
+	orig_dev = skb->dev;
+	skb->dev = ep->egress_dev;
+
+	config = _rmnet_get_phys_ep_config(skb->dev);
+
+	if (!config) {
+		LOGD("%s is not associated with rmnet_data", skb->dev->name);
+		kfree_skb(skb);
+		return;
+	}
+
+	LOGD("Packet going out on %s with egress format 0x%08X",
+	     skb->dev->name, config->egress_data_format);
+
+	if (config->egress_data_format & RMNET_EGRESS_FORMAT_MAP) {
+		switch (rmnet_map_egress_handler(skb, config, ep, orig_dev)) {
+		case RMNET_MAP_CONSUMED:
+			LOGD("%s", "MAP process consumed packet");
+			return;
+
+		case RMNET_MAP_SUCCESS:
+			break;
+
+		default:
+			LOGD("MAP egress failed on packet on %s",
+			     skb->dev->name);
+			rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_EGR_MAPFAIL);
+			return;
+		}
+	}
+
+	if (ep->rmnet_mode == RMNET_EPMODE_VND)
+		rmnet_vnd_tx_fixup(skb, orig_dev);
+
+	rmnet_print_packet(skb, skb->dev->name, 't');
+	trace_rmnet_egress_handler(skb);
+	rc = dev_queue_xmit(skb);
+	if (rc != 0) {
+		LOGD("Failed to queue packet for transmission on [%s]",
+		     skb->dev->name);
+	}
+	rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_EGRESS);
+}
diff --git a/net/rmnet_data/rmnet_data_handlers.h b/net/rmnet_data/rmnet_data_handlers.h
new file mode 100644
index 0000000..1995de7
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_handlers.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2013, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data ingress/egress handler
+ */
+
+#ifndef _RMNET_DATA_HANDLERS_H_
+#define _RMNET_DATA_HANDLERS_H_
+
+void rmnet_egress_handler(struct sk_buff *skb,
+			  struct rmnet_logical_ep_conf_s *ep);
+
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
+
+#endif /* _RMNET_DATA_HANDLERS_H_ */
diff --git a/net/rmnet_data/rmnet_data_main.c b/net/rmnet_data/rmnet_data_main.c
new file mode 100644
index 0000000..b5baa6f
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_main.c
@@ -0,0 +1,59 @@
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data generic framework
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include "rmnet_data_private.h"
+#include "rmnet_data_config.h"
+#include "rmnet_data_vnd.h"
+
+/* Trace Points */
+#define CREATE_TRACE_POINTS
+#include "rmnet_data_trace.h"
+
+/* Module Parameters */
+unsigned int rmnet_data_log_level = RMNET_LOG_LVL_ERR | RMNET_LOG_LVL_HI;
+module_param(rmnet_data_log_level, uint, 0644);
+MODULE_PARM_DESC(log_level, "Logging level");
+
+unsigned int rmnet_data_log_module_mask;
+module_param(rmnet_data_log_module_mask, uint, 0644);
+MODULE_PARM_DESC(rmnet_data_log_module_mask, "Logging module mask");
+
+/* Startup/Shutdown */
+
+/* rmnet_init() - Module initialization
+ *
+ * todo: check for (and init) startup errors
+ */
+static int __init rmnet_init(void)
+{
+	rmnet_config_init();
+	rmnet_vnd_init();
+
+	LOGL("%s", "RMNET Data driver loaded successfully");
+	return 0;
+}
+
+static void __exit rmnet_exit(void)
+{
+	rmnet_config_exit();
+	rmnet_vnd_exit();
+}
+
+module_init(rmnet_init)
+module_exit(rmnet_exit)
+MODULE_LICENSE("GPL v2");
diff --git a/net/rmnet_data/rmnet_data_private.h b/net/rmnet_data/rmnet_data_private.h
new file mode 100644
index 0000000..0ac68cb
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_private.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _RMNET_DATA_PRIVATE_H_
+#define _RMNET_DATA_PRIVATE_H_
+
+#define RMNET_DATA_MAX_VND              32
+#define RMNET_DATA_MAX_PACKET_SIZE      16384
+#define RMNET_DATA_DFLT_PACKET_SIZE     1500
+#define RMNET_DATA_DEV_NAME_STR         "rmnet_data"
+#define RMNET_DATA_NEEDED_HEADROOM      16
+#define RMNET_DATA_TX_QUEUE_LEN         1000
+#define RMNET_ETHERNET_HEADER_LENGTH    14
+
+extern unsigned int rmnet_data_log_level;
+extern unsigned int rmnet_data_log_module_mask;
+
+#define RMNET_INIT_OK     0
+#define RMNET_INIT_ERROR  1
+
+#define RMNET_LOG_LVL_DBG BIT(4)
+#define RMNET_LOG_LVL_LOW BIT(3)
+#define RMNET_LOG_LVL_MED BIT(2)
+#define RMNET_LOG_LVL_HI  BIT(1)
+#define RMNET_LOG_LVL_ERR BIT(0)
+
+#define RMNET_LOG_MODULE(X) \
+	static u32 rmnet_mod_mask = X
+
+#define RMNET_DATA_LOGMASK_CONFIG  BIT(0)
+#define RMNET_DATA_LOGMASK_HANDLER BIT(1)
+#define RMNET_DATA_LOGMASK_VND     BIT(2)
+#define RMNET_DATA_LOGMASK_MAPD    BIT(3)
+#define RMNET_DATA_LOGMASK_MAPC    BIT(4)
+
+#define LOGE(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_ERR) \
+			pr_err("[RMNET:ERR] %s(): " fmt "\n", __func__, \
+				##__VA_ARGS__); \
+			} while (0)
+
+#define LOGH(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_HI) \
+			pr_err("[RMNET:HI] %s(): " fmt "\n", __func__, \
+				##__VA_ARGS__); \
+			} while (0)
+
+#define LOGM(fmt, ...) do { if (rmnet_data_log_level & RMNET_LOG_LVL_MED) \
+			pr_warn("[RMNET:MED] %s(): " fmt "\n", __func__, \
+				##__VA_ARGS__); \
+			} while (0)
+
+#define LOGL(fmt, ...) do { if (unlikely \
+			(rmnet_data_log_level & RMNET_LOG_LVL_LOW)) \
+			pr_notice("[RMNET:LOW] %s(): " fmt "\n", __func__, \
+				##__VA_ARGS__); \
+			} while (0)
+
+/* Don't use pr_debug as it is compiled out of the kernel. We can be sure of
+ * minimal impact as LOGD is not enabled by default.
+ */
+#define LOGD(fmt, ...) do { if (unlikely( \
+			    (rmnet_data_log_level & RMNET_LOG_LVL_DBG) && \
+			    (rmnet_data_log_module_mask & rmnet_mod_mask))) \
+			pr_notice("[RMNET:DBG] %s(): " fmt "\n", __func__, \
+				  ##__VA_ARGS__); \
+			} while (0)
+
+#endif /* _RMNET_DATA_PRIVATE_H_ */
diff --git a/net/rmnet_data/rmnet_data_stats.c b/net/rmnet_data/rmnet_data_stats.c
new file mode 100644
index 0000000..f4aa492
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_stats.c
@@ -0,0 +1,143 @@
+/* Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data statistics
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_private.h"
+#include "rmnet_data_stats.h"
+#include "rmnet_data_config.h"
+#include "rmnet_map.h"
+
+enum rmnet_deagg_e {
+	RMNET_STATS_AGG_BUFF,
+	RMNET_STATS_AGG_PKT,
+	RMNET_STATS_AGG_MAX
+};
+
+static DEFINE_SPINLOCK(rmnet_skb_free_lock);
+unsigned long int skb_free[RMNET_STATS_SKBFREE_MAX];
+module_param_array(skb_free, ulong, 0, 0444);
+MODULE_PARM_DESC(skb_free, "SKBs dropped or freed");
+
+static DEFINE_SPINLOCK(rmnet_queue_xmit_lock);
+unsigned long int queue_xmit[RMNET_STATS_QUEUE_XMIT_MAX * 2];
+module_param_array(queue_xmit, ulong, 0, 0444);
+MODULE_PARM_DESC(queue_xmit, "SKBs queued for transmit");
+
+static DEFINE_SPINLOCK(rmnet_deagg_count);
+unsigned long int deagg_count[RMNET_STATS_AGG_MAX];
+module_param_array(deagg_count, ulong, 0, 0444);
+MODULE_PARM_DESC(deagg_count, "SKBs De-aggregated");
+
+static DEFINE_SPINLOCK(rmnet_agg_count);
+unsigned long int agg_count[RMNET_STATS_AGG_MAX];
+module_param_array(agg_count, ulong, 0, 0444);
+MODULE_PARM_DESC(agg_count, "SKBs Aggregated");
+
+static DEFINE_SPINLOCK(rmnet_checksum_dl_stats);
+unsigned long int checksum_dl_stats[RMNET_MAP_CHECKSUM_ENUM_LENGTH];
+module_param_array(checksum_dl_stats, ulong, 0, 0444);
+MODULE_PARM_DESC(checksum_dl_stats, "Downlink Checksum Statistics");
+
+static DEFINE_SPINLOCK(rmnet_checksum_ul_stats);
+unsigned long int checksum_ul_stats[RMNET_MAP_CHECKSUM_ENUM_LENGTH];
+module_param_array(checksum_ul_stats, ulong, 0, 0444);
+MODULE_PARM_DESC(checksum_ul_stats, "Uplink Checksum Statistics");
+
+void rmnet_kfree_skb(struct sk_buff *skb, unsigned int reason)
+{
+	unsigned long flags;
+
+	if (reason >= RMNET_STATS_SKBFREE_MAX)
+		reason = RMNET_STATS_SKBFREE_UNKNOWN;
+
+	spin_lock_irqsave(&rmnet_skb_free_lock, flags);
+	skb_free[reason]++;
+	spin_unlock_irqrestore(&rmnet_skb_free_lock, flags);
+
+	if (likely(skb)) {
+		struct rmnet_phys_ep_conf_s *config;
+
+		config = (struct rmnet_phys_ep_conf_s *)rcu_dereference
+			 (skb->dev->rx_handler_data);
+		if (likely(config))
+			config->recycle(skb);
+		else
+			kfree_skb(skb);
+	}
+}
+
+void rmnet_stats_queue_xmit(int rc, unsigned int reason)
+{
+	unsigned long flags;
+
+	if (rc != 0)
+		reason += RMNET_STATS_QUEUE_XMIT_MAX;
+	if (reason >= RMNET_STATS_QUEUE_XMIT_MAX * 2)
+		reason = RMNET_STATS_SKBFREE_UNKNOWN;
+
+	spin_lock_irqsave(&rmnet_queue_xmit_lock, flags);
+	queue_xmit[reason]++;
+	spin_unlock_irqrestore(&rmnet_queue_xmit_lock, flags);
+}
+
+void rmnet_stats_agg_pkts(int aggcount)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rmnet_agg_count, flags);
+	agg_count[RMNET_STATS_AGG_BUFF]++;
+	agg_count[RMNET_STATS_AGG_PKT] += aggcount;
+	spin_unlock_irqrestore(&rmnet_agg_count, flags);
+}
+
+void rmnet_stats_deagg_pkts(int aggcount)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&rmnet_deagg_count, flags);
+	deagg_count[RMNET_STATS_AGG_BUFF]++;
+	deagg_count[RMNET_STATS_AGG_PKT] += aggcount;
+	spin_unlock_irqrestore(&rmnet_deagg_count, flags);
+}
+
+void rmnet_stats_dl_checksum(unsigned int rc)
+{
+	unsigned long flags;
+
+	if (rc >= RMNET_MAP_CHECKSUM_ENUM_LENGTH)
+		rc = RMNET_MAP_CHECKSUM_ERR_UNKNOWN;
+
+	spin_lock_irqsave(&rmnet_checksum_dl_stats, flags);
+	checksum_dl_stats[rc]++;
+	spin_unlock_irqrestore(&rmnet_checksum_dl_stats, flags);
+}
+
+void rmnet_stats_ul_checksum(unsigned int rc)
+{
+	unsigned long flags;
+
+	if (rc >= RMNET_MAP_CHECKSUM_ENUM_LENGTH)
+		rc = RMNET_MAP_CHECKSUM_ERR_UNKNOWN;
+
+	spin_lock_irqsave(&rmnet_checksum_ul_stats, flags);
+	checksum_ul_stats[rc]++;
+	spin_unlock_irqrestore(&rmnet_checksum_ul_stats, flags);
+}
diff --git a/net/rmnet_data/rmnet_data_stats.h b/net/rmnet_data/rmnet_data_stats.h
new file mode 100644
index 0000000..e3350ef
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_stats.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data statistics
+ *
+ */
+
+#ifndef _RMNET_DATA_STATS_H_
+#define _RMNET_DATA_STATS_H_
+
+enum rmnet_skb_free_e {
+	RMNET_STATS_SKBFREE_UNKNOWN,
+	RMNET_STATS_SKBFREE_BRDG_NO_EGRESS,
+	RMNET_STATS_SKBFREE_DELIVER_NO_EP,
+	RMNET_STATS_SKBFREE_IPINGRESS_NO_EP,
+	RMNET_STATS_SKBFREE_MAPINGRESS_BAD_MUX,
+	RMNET_STATS_SKBFREE_MAPINGRESS_MUX_NO_EP,
+	RMNET_STATS_SKBFREE_MAPINGRESS_AGGBUF,
+	RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPD,
+	RMNET_STATS_SKBFREE_INGRESS_NOT_EXPECT_MAPC,
+	RMNET_STATS_SKBFREE_EGR_MAPFAIL,
+	RMNET_STATS_SKBFREE_VND_NO_EGRESS,
+	RMNET_STATS_SKBFREE_MAPC_BAD_MUX,
+	RMNET_STATS_SKBFREE_MAPC_MUX_NO_EP,
+	RMNET_STATS_SKBFREE_AGG_CPY_EXPAND,
+	RMNET_STATS_SKBFREE_AGG_INTO_BUFF,
+	RMNET_STATS_SKBFREE_DEAGG_MALFORMED,
+	RMNET_STATS_SKBFREE_DEAGG_CLONE_FAIL,
+	RMNET_STATS_SKBFREE_DEAGG_UNKNOWN_IP_TYPE,
+	RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0,
+	RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM,
+	RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED,
+	RMNET_STATS_SKBFREE_MAX
+};
+
+enum rmnet_queue_xmit_e {
+	RMNET_STATS_QUEUE_XMIT_UNKNOWN,
+	RMNET_STATS_QUEUE_XMIT_EGRESS,
+	RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER,
+	RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT,
+	RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL,
+	RMNET_STATS_QUEUE_XMIT_AGG_SKIP,
+	RMNET_STATS_QUEUE_XMIT_MAX
+};
+
+void rmnet_kfree_skb(struct sk_buff *skb, unsigned int reason);
+void rmnet_stats_queue_xmit(int rc, unsigned int reason);
+void rmnet_stats_deagg_pkts(int aggcount);
+void rmnet_stats_agg_pkts(int aggcount);
+void rmnet_stats_dl_checksum(unsigned int rc);
+void rmnet_stats_ul_checksum(unsigned int rc);
+#endif /* _RMNET_DATA_STATS_H_ */
diff --git a/net/rmnet_data/rmnet_data_trace.h b/net/rmnet_data/rmnet_data_trace.h
new file mode 100644
index 0000000..428197f
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_trace.h
@@ -0,0 +1,358 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rmnet_data
+#define TRACE_INCLUDE_FILE rmnet_data_trace
+
+#if !defined(_TRACE_MSM_LOW_POWER_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _RMNET_DATA_TRACE_H_
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS
+	(rmnet_handler_template,
+
+	 TP_PROTO(struct sk_buff *skb),
+
+	 TP_ARGS(skb),
+
+	 TP_STRUCT__entry(
+		__field(void *, skbaddr)
+		__field(unsigned int, len)
+		__string(name, skb->dev->name)
+	 ),
+
+	 TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->len = skb->len;
+		__assign_str(name, skb->dev->name);
+	 ),
+
+	 TP_printk("dev=%s skbaddr=%pK len=%u",
+		   __get_str(name), __entry->skbaddr, __entry->len)
+)
+
+DEFINE_EVENT
+	(rmnet_handler_template, rmnet_egress_handler,
+
+	 TP_PROTO(struct sk_buff *skb),
+
+	 TP_ARGS(skb)
+);
+
+DEFINE_EVENT
+	(rmnet_handler_template, rmnet_ingress_handler,
+
+	 TP_PROTO(struct sk_buff *skb),
+
+	 TP_ARGS(skb)
+);
+
+DEFINE_EVENT
+	(rmnet_handler_template, rmnet_vnd_start_xmit,
+
+	 TP_PROTO(struct sk_buff *skb),
+
+	 TP_ARGS(skb)
+);
+
+DEFINE_EVENT
+	(rmnet_handler_template, __rmnet_deliver_skb,
+
+	 TP_PROTO(struct sk_buff *skb),
+
+	 TP_ARGS(skb)
+);
+
+DECLARE_EVENT_CLASS
+	(rmnet_tc_fc_template,
+
+	 TP_PROTO(u32 tcm_handle, int qdisc_len, int is_enable),
+
+	 TP_ARGS(tcm_handle, qdisc_len, is_enable),
+
+	 TP_STRUCT__entry(
+		__field(u32, handle)
+		__field(int, qlen)
+		__field(int, enable)
+	 ),
+
+	 TP_fast_assign(
+		__entry->handle = tcm_handle;
+		__entry->qlen = qdisc_len;
+		__entry->enable = is_enable;
+	 ),
+
+	 TP_printk("tcm_handle=%d qdisc length=%d flow %s",
+		   __entry->handle, __entry->qlen,
+		   __entry->enable ? "enable" : "disable")
+)
+
+DEFINE_EVENT
+	(rmnet_tc_fc_template, rmnet_fc_qmi,
+
+	 TP_PROTO(u32 tcm_handle, int qdisc_len, int is_enable),
+
+	 TP_ARGS(tcm_handle, qdisc_len, is_enable)
+);
+
+DEFINE_EVENT
+	(rmnet_tc_fc_template, rmnet_fc_map,
+
+	 TP_PROTO(u32 tcm_handle, int qdisc_len, int is_enable),
+
+	 TP_ARGS(tcm_handle, qdisc_len, is_enable)
+);
+
+DECLARE_EVENT_CLASS
+	(rmnet_aggregation_template,
+
+	 TP_PROTO(struct sk_buff *skb, int num_agg_pakcets),
+
+	 TP_ARGS(skb, num_agg_pakcets),
+
+	 TP_STRUCT__entry(
+		__field(void *, skbaddr)
+		__field(unsigned int, len)
+		__string(name, skb->dev->name)
+		__field(int, num)
+	 ),
+
+	 TP_fast_assign(
+		__entry->skbaddr = skb;
+		__entry->len = skb->len;
+		__assign_str(name, skb->dev->name);
+		__entry->num = num_agg_pakcets;
+	 ),
+
+	 TP_printk("dev=%s skbaddr=%pK len=%u agg_count: %d",
+		   __get_str(name), __entry->skbaddr, __entry->len,
+		   __entry->num)
+)
+
+DEFINE_EVENT
+	(rmnet_aggregation_template, rmnet_map_aggregate,
+
+	 TP_PROTO(struct sk_buff *skb, int num_agg_pakcets),
+
+	 TP_ARGS(skb, num_agg_pakcets)
+);
+
+DEFINE_EVENT
+	(rmnet_aggregation_template, rmnet_map_flush_packet_queue,
+
+	 TP_PROTO(struct sk_buff *skb, int num_agg_pakcets),
+
+	 TP_ARGS(skb, num_agg_pakcets)
+);
+
+TRACE_EVENT
+	(rmnet_start_aggregation,
+
+	 TP_PROTO(struct sk_buff *skb),
+
+	 TP_ARGS(skb),
+
+	 TP_STRUCT__entry(
+		__string(name, skb->dev->name)
+	 ),
+
+	 TP_fast_assign(
+		__assign_str(name, skb->dev->name);
+	 ),
+
+	 TP_printk("dev: %s, aggregated first packet", __get_str(name))
+)
+
+TRACE_EVENT
+	(rmnet_start_deaggregation,
+
+	 TP_PROTO(struct sk_buff *skb),
+
+	 TP_ARGS(skb),
+
+	 TP_STRUCT__entry(
+		__string(name, skb->dev->name)
+	 ),
+
+	 TP_fast_assign(
+		__assign_str(name, skb->dev->name);
+	 ),
+
+	 TP_printk("dev: %s, deaggregated first packet", __get_str(name))
+)
+
+TRACE_EVENT
+	(rmnet_end_deaggregation,
+
+	 TP_PROTO(struct sk_buff *skb, int num_deagg_packets),
+
+	 TP_ARGS(skb, num_deagg_packets),
+
+	 TP_STRUCT__entry(
+		__string(name, skb->dev->name)
+		__field(int, num)
+	 ),
+
+	 TP_fast_assign(
+		__assign_str(name, skb->dev->name);
+		__entry->num = num_deagg_packets;
+	 ),
+
+	 TP_printk("dev: %s, deaggregate end count: %d",
+		   __get_str(name), __entry->num)
+)
+
+TRACE_EVENT
+	(rmnet_map_checksum_downlink_packet,
+
+	 TP_PROTO(struct sk_buff *skb, int ckresult),
+
+	 TP_ARGS(skb, ckresult),
+
+	 TP_STRUCT__entry(
+		__string(name, skb->dev->name)
+		__field(int, res)
+	 ),
+
+	 TP_fast_assign(
+		__assign_str(name, skb->dev->name);
+		__entry->res = ckresult;
+	 ),
+
+	 TP_printk("DL checksum on dev=%s, res: %d",
+		   __get_str(name), __entry->res)
+)
+
+TRACE_EVENT
+	(rmnet_map_checksum_uplink_packet,
+
+	 TP_PROTO(struct net_device *dev, int ckresult),
+
+	 TP_ARGS(dev, ckresult),
+
+	 TP_STRUCT__entry(
+		__string(name, dev->name)
+		__field(int, res)
+	 ),
+
+	 TP_fast_assign(
+		__assign_str(name, dev->name);
+		__entry->res = ckresult;
+	 ),
+
+	 TP_printk("UL checksum on dev=%s, res: %d",
+		   __get_str(name), __entry->res)
+)
+
+DECLARE_EVENT_CLASS
+	(rmnet_physdev_action_template,
+
+	 TP_PROTO(struct net_device *dev),
+
+	 TP_ARGS(dev),
+
+	 TP_STRUCT__entry(
+		__string(name, dev->name)
+	 ),
+
+	 TP_fast_assign(
+		__assign_str(name, dev->name);
+	 ),
+
+	 TP_printk("Physical dev=%s", __get_str(name))
+)
+
+DEFINE_EVENT
+	(rmnet_physdev_action_template, rmnet_unregister_cb_unhandled,
+
+	 TP_PROTO(struct net_device *dev),
+
+	 TP_ARGS(dev)
+);
+
+DEFINE_EVENT
+	(rmnet_physdev_action_template, rmnet_unregister_cb_entry,
+
+	TP_PROTO(struct net_device *dev),
+
+	TP_ARGS(dev)
+);
+
+DEFINE_EVENT
+	(rmnet_physdev_action_template, rmnet_unregister_cb_exit,
+
+	 TP_PROTO(struct net_device *dev),
+
+	 TP_ARGS(dev)
+);
+
+DEFINE_EVENT
+	(rmnet_physdev_action_template, rmnet_unregister_cb_clear_vnds,
+
+	 TP_PROTO(struct net_device *dev),
+
+	 TP_ARGS(dev)
+);
+
+DEFINE_EVENT
+	(rmnet_physdev_action_template, rmnet_unregister_cb_clear_lepcs,
+
+	 TP_PROTO(struct net_device *dev),
+
+	 TP_ARGS(dev)
+);
+
+DEFINE_EVENT
+	(rmnet_physdev_action_template, rmnet_associate,
+
+	 TP_PROTO(struct net_device *dev),
+
+	 TP_ARGS(dev)
+);
+
+DEFINE_EVENT
+	(rmnet_physdev_action_template, rmnet_unassociate,
+
+	 TP_PROTO(struct net_device *dev),
+
+	 TP_ARGS(dev)
+);
+
+TRACE_EVENT
+	(rmnet_gro_downlink,
+
+	 TP_PROTO(gro_result_t gro_res),
+
+	 TP_ARGS(gro_res),
+
+	 TP_STRUCT__entry(
+		__field(gro_result_t, res)
+	 ),
+
+	 TP_fast_assign(
+		__entry->res = gro_res;
+	 ),
+
+	 TP_printk("GRO res: %d", __entry->res)
+)
+
+#endif /* _RMNET_DATA_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
+
diff --git a/net/rmnet_data/rmnet_data_vnd.c b/net/rmnet_data/rmnet_data_vnd.c
new file mode 100644
index 0000000..166b445
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_vnd.c
@@ -0,0 +1,1081 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ *
+ * RMNET Data virtual network driver
+ */
+
+#include <linux/types.h>
+#include <linux/rmnet_data.h>
+#include <linux/msm_rmnet.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/spinlock.h>
+#include <net/pkt_sched.h>
+#include <linux/atomic.h>
+#include <linux/net_map.h>
+#include "rmnet_data_config.h"
+#include "rmnet_data_handlers.h"
+#include "rmnet_data_private.h"
+#include "rmnet_map.h"
+#include "rmnet_data_vnd.h"
+#include "rmnet_data_stats.h"
+#include "rmnet_data_trace.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_VND);
+
+#define RMNET_MAP_FLOW_NUM_TC_HANDLE 3
+#define RMNET_VND_UF_ACTION_ADD 0
+#define RMNET_VND_UF_ACTION_DEL 1
+enum {
+	RMNET_VND_UPDATE_FLOW_OK,
+	RMNET_VND_UPDATE_FLOW_NO_ACTION,
+	RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM,
+	RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT
+};
+
+struct net_device *rmnet_devices[RMNET_DATA_MAX_VND];
+
+struct rmnet_map_flow_mapping_s {
+	struct list_head list;
+	u32 map_flow_id;
+	u32 tc_flow_valid[RMNET_MAP_FLOW_NUM_TC_HANDLE];
+	u32 tc_flow_id[RMNET_MAP_FLOW_NUM_TC_HANDLE];
+	atomic_t v4_seq;
+	atomic_t v6_seq;
+};
+
+struct rmnet_vnd_private_s {
+	u32 qos_version;
+	struct rmnet_logical_ep_conf_s local_ep;
+
+	rwlock_t flow_map_lock;
+	struct list_head flow_head;
+	struct rmnet_map_flow_mapping_s root_flow;
+};
+
+#define RMNET_VND_FC_QUEUED      0
+#define RMNET_VND_FC_NOT_ENABLED 1
+#define RMNET_VND_FC_KMALLOC_ERR 2
+
+/* Helper Functions */
+
+/* rmnet_vnd_add_qos_header() - Adds QoS header to front of skb->data
+ * @skb:        Socket buffer ("packet") to modify
+ * @dev:        Egress interface
+ *
+ * Does not check for sufficient headroom! Caller must make sure there is enough
+ * headroom.
+ */
+static void rmnet_vnd_add_qos_header(struct sk_buff *skb,
+				     struct net_device *dev,
+				     uint32_t qos_version)
+{
+	struct QMI_QOS_HDR_S *qmih;
+	struct qmi_qos_hdr8_s *qmi8h;
+
+	if (qos_version & RMNET_IOCTL_QOS_MODE_6) {
+		qmih = (struct QMI_QOS_HDR_S *)
+			skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
+		qmih->version = 1;
+		qmih->flags = 0;
+		qmih->flow_id = skb->mark;
+	} else if (qos_version & RMNET_IOCTL_QOS_MODE_8) {
+		qmi8h = (struct qmi_qos_hdr8_s *)
+			skb_push(skb, sizeof(struct qmi_qos_hdr8_s));
+		/* Flags are 0 always */
+		qmi8h->hdr.version = 0;
+		qmi8h->hdr.flags = 0;
+		memset(qmi8h->reserved, 0, sizeof(qmi8h->reserved));
+		qmi8h->hdr.flow_id = skb->mark;
+	} else {
+		LOGD("%s(): Bad QoS version configured\n", __func__);
+	}
+}
+
+/* RX/TX Fixup */
+
+/* rmnet_vnd_rx_fixup() - Virtual Network Device receive fixup hook
+ * @skb:        Socket buffer ("packet") to modify
+ * @dev:        Virtual network device
+ *
+ * Additional VND specific packet processing for ingress packets
+ *
+ * Return:
+ *      - RX_HANDLER_PASS if packet should continue to process in stack
+ *      - RX_HANDLER_CONSUMED if packet should not be processed in stack
+ *
+ */
+int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+	if (unlikely(!dev || !skb))
+		return RX_HANDLER_CONSUMED;
+
+	dev->stats.rx_packets++;
+	dev->stats.rx_bytes += skb->len;
+
+	return RX_HANDLER_PASS;
+}
+
+/* rmnet_vnd_tx_fixup() - Virtual Network Device transmic fixup hook
+ * @skb:      Socket buffer ("packet") to modify
+ * @dev:      Virtual network device
+ *
+ * Additional VND specific packet processing for egress packets
+ *
+ * Return:
+ *      - RX_HANDLER_PASS if packet should continue to be transmitted
+ *      - RX_HANDLER_CONSUMED if packet should not be transmitted by stack
+ */
+int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rmnet_vnd_private_s *dev_conf;
+
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+
+	if (unlikely(!dev || !skb))
+		return RX_HANDLER_CONSUMED;
+
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+
+	return RX_HANDLER_PASS;
+}
+
+/* Network Device Operations */
+
+/* rmnet_vnd_start_xmit() - Transmit NDO callback
+ * @skb:        Socket buffer ("packet") being sent from network stack
+ * @dev:        Virtual Network Device
+ *
+ * Standard network driver operations hook to transmit packets on virtual
+ * network device. Called by network stack. Packet is not transmitted directly
+ * from here; instead it is given to the rmnet egress handler.
+ *
+ * Return:
+ *      - NETDEV_TX_OK under all cirumstances (cannot block/fail)
+ */
+static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
+					struct net_device *dev)
+{
+	struct rmnet_vnd_private_s *dev_conf;
+
+	trace_rmnet_vnd_start_xmit(skb);
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+	if (dev_conf->local_ep.egress_dev) {
+		/* QoS header should come after MAP header */
+		if (dev_conf->qos_version)
+			rmnet_vnd_add_qos_header(skb,
+						 dev,
+						 dev_conf->qos_version);
+		rmnet_egress_handler(skb, &dev_conf->local_ep);
+	} else {
+		dev->stats.tx_dropped++;
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_VND_NO_EGRESS);
+	}
+	return NETDEV_TX_OK;
+}
+
+/* rmnet_vnd_change_mtu() - Change MTU NDO callback
+ * @dev:         Virtual network device
+ * @new_mtu:     New MTU value to set (in bytes)
+ *
+ * Standard network driver operations hook to set the MTU. Called by kernel to
+ * set the device MTU. Checks if desired MTU is less than zero or greater than
+ * RMNET_DATA_MAX_PACKET_SIZE;
+ *
+ * Return:
+ *      - 0 if successful
+ *      - -EINVAL if new_mtu is out of range
+ */
+static int rmnet_vnd_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if (new_mtu < 0 || new_mtu > RMNET_DATA_MAX_PACKET_SIZE)
+		return -EINVAL;
+
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+#ifdef CONFIG_RMNET_DATA_FC
+static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
+				   struct ifreq *ifr,
+				   int cmd)
+{
+	struct rmnet_vnd_private_s *dev_conf;
+	int rc, qdisc_len = 0;
+	struct rmnet_ioctl_data_s ioctl_data;
+
+	rc = 0;
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+
+	switch (cmd) {
+	case RMNET_IOCTL_SET_QOS_ENABLE:
+		LOGM("RMNET_IOCTL_SET_QOS_ENABLE on %s", dev->name);
+		if (!dev_conf->qos_version)
+			dev_conf->qos_version = RMNET_IOCTL_QOS_MODE_6;
+		break;
+
+	case RMNET_IOCTL_SET_QOS_DISABLE:
+		LOGM("RMNET_IOCTL_SET_QOS_DISABLE on %s", dev->name);
+		dev_conf->qos_version = 0;
+		break;
+
+	case RMNET_IOCTL_GET_QOS:           /* Get QoS header state    */
+		LOGM("RMNET_IOCTL_GET_QOS on %s", dev->name);
+		ioctl_data.u.operation_mode = (dev_conf->qos_version ==
+						RMNET_IOCTL_QOS_MODE_6);
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+				 sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+
+	case RMNET_IOCTL_FLOW_ENABLE:
+		LOGL("RMNET_IOCTL_FLOW_ENABLE on %s", dev->name);
+		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+				   sizeof(struct rmnet_ioctl_data_s))) {
+			rc = -EFAULT;
+			break;
+		}
+		qdisc_len = tc_qdisc_flow_control(dev,
+						  ioctl_data.u.tcm_handle, 1);
+		trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 1);
+		break;
+
+	case RMNET_IOCTL_FLOW_DISABLE:
+		LOGL("RMNET_IOCTL_FLOW_DISABLE on %s", dev->name);
+		if (copy_from_user(&ioctl_data, ifr->ifr_ifru.ifru_data,
+				   sizeof(struct rmnet_ioctl_data_s))) {
+			rc = -EFAULT;
+		break;
+		}
+		qdisc_len = tc_qdisc_flow_control(dev,
+						  ioctl_data.u.tcm_handle, 0);
+		trace_rmnet_fc_qmi(ioctl_data.u.tcm_handle, qdisc_len, 0);
+		break;
+
+	default:
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+struct rmnet_vnd_fc_work {
+	struct work_struct work;
+	struct net_device *dev;
+	u32 tc_handle;
+	int enable;
+};
+
+static void _rmnet_vnd_wq_flow_control(struct work_struct *work)
+{
+	struct rmnet_vnd_fc_work *fcwork;
+	int qdisc_len = 0;
+
+	fcwork = (struct rmnet_vnd_fc_work *)work;
+
+	rtnl_lock();
+	qdisc_len = tc_qdisc_flow_control(fcwork->dev, fcwork->tc_handle,
+					  fcwork->enable);
+	trace_rmnet_fc_map(fcwork->tc_handle, qdisc_len, fcwork->enable);
+	rtnl_unlock();
+
+	LOGL("[%s] handle:%08X enable:%d",
+	     fcwork->dev->name, fcwork->tc_handle, fcwork->enable);
+
+	kfree(work);
+}
+
+static int _rmnet_vnd_do_flow_control(struct net_device *dev,
+				      u32 tc_handle,
+				      int enable)
+{
+	struct rmnet_vnd_fc_work *fcwork;
+
+	fcwork = kmalloc(sizeof(*fcwork), GFP_ATOMIC);
+	if (!fcwork)
+		return RMNET_VND_FC_KMALLOC_ERR;
+	memset(fcwork, 0, sizeof(struct rmnet_vnd_fc_work));
+
+	INIT_WORK((struct work_struct *)fcwork, _rmnet_vnd_wq_flow_control);
+	fcwork->dev = dev;
+	fcwork->tc_handle = tc_handle;
+	fcwork->enable = enable;
+
+	schedule_work((struct work_struct *)fcwork);
+	return RMNET_VND_FC_QUEUED;
+}
+#else
+static int _rmnet_vnd_do_qos_ioctl(struct net_device *dev,
+				   struct ifreq *ifr,
+				   int cmd)
+{
+	return -EINVAL;
+}
+
+static inline int _rmnet_vnd_do_flow_control(struct net_device *dev,
+					     u32 tc_handle,
+					     int enable)
+{
+	LOGD("[%s] called with no QoS support", dev->name);
+	return RMNET_VND_FC_NOT_ENABLED;
+}
+#endif /* CONFIG_RMNET_DATA_FC */
+
+static int rmnet_vnd_ioctl_extended(struct net_device *dev, struct ifreq *ifr)
+{
+	struct rmnet_vnd_private_s *dev_conf;
+	struct rmnet_ioctl_extended_s ext_cmd;
+	int rc = 0;
+
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+
+	rc = copy_from_user(&ext_cmd, ifr->ifr_ifru.ifru_data,
+			    sizeof(struct rmnet_ioctl_extended_s));
+	if (rc) {
+		LOGM("%s(): copy_from_user() failed\n", __func__);
+		return rc;
+	}
+
+	switch (ext_cmd.extended_ioctl) {
+	case RMNET_IOCTL_GET_SUPPORTED_FEATURES:
+		ext_cmd.u.data = 0;
+		break;
+
+	case RMNET_IOCTL_GET_DRIVER_NAME:
+		strlcpy(ext_cmd.u.if_name, "rmnet_data",
+			sizeof(ext_cmd.u.if_name));
+		break;
+
+	case RMNET_IOCTL_GET_SUPPORTED_QOS_MODES:
+		ext_cmd.u.data = RMNET_IOCTL_QOS_MODE_6
+				 | RMNET_IOCTL_QOS_MODE_8;
+		break;
+
+	case RMNET_IOCTL_GET_QOS_VERSION:
+		ext_cmd.u.data = dev_conf->qos_version;
+		break;
+
+	case RMNET_IOCTL_SET_QOS_VERSION:
+		if (ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_6 ||
+		    ext_cmd.u.data == RMNET_IOCTL_QOS_MODE_8 ||
+		    ext_cmd.u.data == 0) {
+			dev_conf->qos_version = ext_cmd.u.data;
+		} else {
+			rc = -EINVAL;
+			goto done;
+		}
+		break;
+
+	default:
+		rc = -EINVAL;
+		goto done;
+	}
+
+	rc = copy_to_user(ifr->ifr_ifru.ifru_data, &ext_cmd,
+			  sizeof(struct rmnet_ioctl_extended_s));
+	if (rc)
+		LOGM("%s(): copy_to_user() failed\n", __func__);
+
+done:
+	return rc;
+}
+
+/* rmnet_vnd_ioctl() - IOCTL NDO callback
+ * @dev:         Virtual network device
+ * @ifreq:       User data
+ * @cmd:         IOCTL command value
+ *
+ * Standard network driver operations hook to process IOCTLs. Called by kernel
+ * to process non-stanard IOCTLs for device
+ *
+ * Return:
+ *      - 0 if successful
+ *      - -EINVAL if unknown IOCTL
+ */
+static int rmnet_vnd_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct rmnet_vnd_private_s *dev_conf;
+	int rc;
+	struct rmnet_ioctl_data_s ioctl_data;
+
+	rc = 0;
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+
+	rc = _rmnet_vnd_do_qos_ioctl(dev, ifr, cmd);
+	if (rc != -EINVAL)
+		return rc;
+	rc = 0; /* Reset rc as it may contain -EINVAL from above */
+
+	switch (cmd) {
+	case RMNET_IOCTL_OPEN: /* Do nothing. Support legacy behavior */
+		LOGM("RMNET_IOCTL_OPEN on %s (ignored)", dev->name);
+		break;
+
+	case RMNET_IOCTL_CLOSE: /* Do nothing. Support legacy behavior */
+		LOGM("RMNET_IOCTL_CLOSE on %s (ignored)", dev->name);
+		break;
+
+	case RMNET_IOCTL_SET_LLP_ETHERNET:
+		LOGM("RMNET_IOCTL_SET_LLP_ETHERNET on %s (no support)",
+		     dev->name);
+		rc = -EINVAL;
+		break;
+
+	case RMNET_IOCTL_SET_LLP_IP: /* Do nothing. Support legacy behavior */
+		LOGM("RMNET_IOCTL_SET_LLP_IP on %s (ignored)", dev->name);
+		break;
+
+	case RMNET_IOCTL_GET_LLP: /* Always return IP mode */
+		LOGM("RMNET_IOCTL_GET_LLP on %s", dev->name);
+		ioctl_data.u.operation_mode = RMNET_MODE_LLP_IP;
+		if (copy_to_user(ifr->ifr_ifru.ifru_data, &ioctl_data,
+				 sizeof(struct rmnet_ioctl_data_s)))
+			rc = -EFAULT;
+		break;
+
+	case RMNET_IOCTL_EXTENDED:
+		rc = rmnet_vnd_ioctl_extended(dev, ifr);
+		break;
+
+	default:
+		LOGM("Unknown IOCTL 0x%08X", cmd);
+		rc = -EINVAL;
+	}
+
+	return rc;
+}
+
+static const struct net_device_ops rmnet_data_vnd_ops = {
+	.ndo_init = 0,
+	.ndo_start_xmit = rmnet_vnd_start_xmit,
+	.ndo_do_ioctl = rmnet_vnd_ioctl,
+	.ndo_change_mtu = rmnet_vnd_change_mtu,
+	.ndo_set_mac_address = 0,
+	.ndo_validate_addr = 0,
+};
+
+/* rmnet_vnd_setup() - net_device initialization callback
+ * @dev:      Virtual network device
+ *
+ * Called by kernel whenever a new rmnet_data<n> device is created. Sets MTU,
+ * flags, ARP type, needed headroom, etc...
+ */
+static void rmnet_vnd_setup(struct net_device *dev)
+{
+	struct rmnet_vnd_private_s *dev_conf;
+
+	LOGM("Setting up device %s", dev->name);
+
+	/* Clear out private data */
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+	memset(dev_conf, 0, sizeof(struct rmnet_vnd_private_s));
+
+	dev->netdev_ops = &rmnet_data_vnd_ops;
+	dev->mtu = RMNET_DATA_DFLT_PACKET_SIZE;
+	dev->needed_headroom = RMNET_DATA_NEEDED_HEADROOM;
+	random_ether_addr(dev->dev_addr);
+	dev->tx_queue_len = RMNET_DATA_TX_QUEUE_LEN;
+
+	/* Raw IP mode */
+	dev->header_ops = 0;  /* No header */
+	dev->type = ARPHRD_RAWIP;
+	dev->hard_header_len = 0;
+	dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+
+	/* Flow control */
+	rwlock_init(&dev_conf->flow_map_lock);
+	INIT_LIST_HEAD(&dev_conf->flow_head);
+}
+
+/* Exposed API */
+
+/* rmnet_vnd_exit() - Shutdown cleanup hook
+ *
+ * Called by RmNet main on module unload. Cleans up data structures and
+ * unregisters/frees net_devices.
+ */
+void rmnet_vnd_exit(void)
+{
+	int i;
+
+	for (i = 0; i < RMNET_DATA_MAX_VND; i++)
+		if (rmnet_devices[i]) {
+			unregister_netdev(rmnet_devices[i]);
+			free_netdev(rmnet_devices[i]);
+	}
+}
+
+/* rmnet_vnd_init() - Init hook
+ *
+ * Called by RmNet main on module load. Initializes data structures
+ */
+int rmnet_vnd_init(void)
+{
+	memset(rmnet_devices, 0,
+	       sizeof(struct net_device *) * RMNET_DATA_MAX_VND);
+	return 0;
+}
+
+/* rmnet_vnd_create_dev() - Create a new virtual network device node.
+ * @id:         Virtual device node id
+ * @new_device: Pointer to newly created device node
+ * @prefix:     Device name prefix
+ *
+ * Allocates structures for new virtual network devices. Sets the name of the
+ * new device and registers it with the network stack. Device will appear in
+ * ifconfig list after this is called. If the prefix is null, then
+ * RMNET_DATA_DEV_NAME_STR will be assumed.
+ *
+ * Return:
+ *      - 0 if successful
+ *      - RMNET_CONFIG_BAD_ARGUMENTS if id is out of range or prefix is too long
+ *      - RMNET_CONFIG_DEVICE_IN_USE if id already in use
+ *      - RMNET_CONFIG_NOMEM if net_device allocation failed
+ *      - RMNET_CONFIG_UNKNOWN_ERROR if register_netdevice() fails
+ */
+int rmnet_vnd_create_dev(int id, struct net_device **new_device,
+			 const char *prefix)
+{
+	struct net_device *dev;
+	char dev_prefix[IFNAMSIZ];
+	int p, rc = 0;
+
+	if (id < 0 || id >= RMNET_DATA_MAX_VND) {
+		*new_device = 0;
+		return RMNET_CONFIG_BAD_ARGUMENTS;
+	}
+
+	if (rmnet_devices[id] != 0) {
+		*new_device = 0;
+		return RMNET_CONFIG_DEVICE_IN_USE;
+	}
+
+	if (!prefix)
+		p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d",
+			      RMNET_DATA_DEV_NAME_STR);
+	else
+		p = scnprintf(dev_prefix, IFNAMSIZ, "%s%%d", prefix);
+	if (p >= (IFNAMSIZ - 1)) {
+		LOGE("Specified prefix longer than IFNAMSIZ");
+		return RMNET_CONFIG_BAD_ARGUMENTS;
+	}
+
+	dev = alloc_netdev(sizeof(struct rmnet_vnd_private_s),
+			   dev_prefix,
+			   NET_NAME_ENUM,
+			   rmnet_vnd_setup);
+	if (!dev) {
+		LOGE("Failed to to allocate netdev for id %d", id);
+		*new_device = 0;
+		return RMNET_CONFIG_NOMEM;
+	}
+
+	if (!prefix) {
+		/* Configuring DL checksum offload on rmnet_data interfaces */
+		dev->hw_features = NETIF_F_RXCSUM;
+		/* Configuring UL checksum offload on rmnet_data interfaces */
+		dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+		/* Configuring GRO on rmnet_data interfaces */
+		dev->hw_features |= NETIF_F_GRO;
+		/* Configuring Scatter-Gather on rmnet_data interfaces */
+		dev->hw_features |= NETIF_F_SG;
+		/* Configuring GSO on rmnet_data interfaces */
+		dev->hw_features |= NETIF_F_GSO;
+		dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+		dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+	}
+
+	rc = register_netdevice(dev);
+	if (rc != 0) {
+		LOGE("Failed to to register netdev [%s]", dev->name);
+		free_netdev(dev);
+		*new_device = 0;
+		rc = RMNET_CONFIG_UNKNOWN_ERROR;
+	} else {
+		rmnet_devices[id] = dev;
+		*new_device = dev;
+		LOGM("Registered device %s", dev->name);
+	}
+
+	return rc;
+}
+
+/* rmnet_vnd_free_dev() - free a virtual network device node.
+ * @id:         Virtual device node id
+ *
+ * Unregisters the virtual network device node and frees it.
+ * unregister_netdev locks the rtnl mutex, so the mutex must not be locked
+ * by the caller of the function. unregister_netdev enqueues the request to
+ * unregister the device into a TODO queue. The requests in the TODO queue
+ * are only done after rtnl mutex is unlocked, therefore free_netdev has to
+ * called after unlocking rtnl mutex.
+ *
+ * Return:
+ *      - 0 if successful
+ *      - RMNET_CONFIG_NO_SUCH_DEVICE if id is invalid or not in range
+ *      - RMNET_CONFIG_DEVICE_IN_USE if device has logical ep that wasn't unset
+ */
+int rmnet_vnd_free_dev(int id)
+{
+	struct rmnet_logical_ep_conf_s *epconfig_l;
+	struct net_device *dev;
+
+	rtnl_lock();
+	if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
+		rtnl_unlock();
+		LOGM("Invalid id [%d]", id);
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+	}
+
+	epconfig_l = rmnet_vnd_get_le_config(rmnet_devices[id]);
+	if (epconfig_l && epconfig_l->refcount) {
+		rtnl_unlock();
+		return RMNET_CONFIG_DEVICE_IN_USE;
+	}
+
+	dev = rmnet_devices[id];
+	rmnet_devices[id] = 0;
+	rtnl_unlock();
+
+	if (dev) {
+		unregister_netdev(dev);
+		free_netdev(dev);
+		return 0;
+	} else {
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+	}
+}
+
+/* rmnet_vnd_get_name() - Gets the string name of a VND based on ID
+ * @id:         Virtual device node id
+ * @name:       Buffer to store name of virtual device node
+ * @name_len:   Length of name buffer
+ *
+ * Copies the name of the virtual device node into the users buffer. Will throw
+ * an error if the buffer is null, or too small to hold the device name.
+ *
+ * Return:
+ *      - 0 if successful
+ *      - -EINVAL if name is null
+ *      - -EINVAL if id is invalid or not in range
+ *      - -EINVAL if name is too small to hold things
+ */
+int rmnet_vnd_get_name(int id, char *name, int name_len)
+{
+	int p;
+
+	if (!name) {
+		LOGM("%s", "Bad arguments; name buffer null");
+		return -EINVAL;
+	}
+
+	if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
+		LOGM("Invalid id [%d]", id);
+		return -EINVAL;
+	}
+
+	p = strlcpy(name, rmnet_devices[id]->name, name_len);
+	if (p >= name_len) {
+		LOGM("Buffer to small (%d) to fit device name", name_len);
+		return -EINVAL;
+	}
+	LOGL("Found mapping [%d]->\"%s\"", id, name);
+
+	return 0;
+}
+
+/* rmnet_vnd_is_vnd() - Determine if net_device is RmNet owned virtual devices
+ * @dev:        Network device to test
+ *
+ * Searches through list of known RmNet virtual devices. This function is O(n)
+ * and should not be used in the data path.
+ *
+ * Return:
+ *      - 0 if device is not RmNet virtual device
+ *      - 1 if device is RmNet virtual device
+ */
+int rmnet_vnd_is_vnd(struct net_device *dev)
+{
+	/* This is not an efficient search, but, this will only be called in
+	 * a configuration context, and the list is small.
+	 */
+	int i;
+
+	if (!dev)
+		return 0;
+
+	for (i = 0; i < RMNET_DATA_MAX_VND; i++)
+		if (dev == rmnet_devices[i])
+			return i + 1;
+
+	return 0;
+}
+
+/* rmnet_vnd_get_le_config() - Get the logical endpoint configuration
+ * @dev:      Virtual device node
+ *
+ * Gets the logical endpoint configuration for a RmNet virtual network device
+ * node. Caller should confirm that devices is a RmNet VND before calling.
+ *
+ * Return:
+ *      - Pointer to logical endpoint configuration structure
+ *      - 0 (null) if dev is null
+ */
+struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev)
+{
+	struct rmnet_vnd_private_s *dev_conf;
+
+	if (!dev)
+		return 0;
+
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+	if (!dev_conf)
+		return 0;
+
+	return &dev_conf->local_ep;
+}
+
+/* _rmnet_vnd_get_flow_map() - Gets object representing a MAP flow handle
+ * @dev_conf: Private configuration structure for virtual network device
+ * @map_flow: MAP flow handle IF
+ *
+ * Loops through available flow mappings and compares the MAP flow handle.
+ * Returns when mapping is found.
+ *
+ * Return:
+ *      - Null if no mapping was found
+ *      - Pointer to mapping otherwise
+ */
+static struct rmnet_map_flow_mapping_s *_rmnet_vnd_get_flow_map
+					(struct rmnet_vnd_private_s *dev_conf,
+					 u32 map_flow)
+{
+	struct list_head *p;
+	struct rmnet_map_flow_mapping_s *itm;
+
+	list_for_each(p, &dev_conf->flow_head) {
+		itm = list_entry(p, struct rmnet_map_flow_mapping_s, list);
+
+		if (unlikely(!itm))
+			return 0;
+
+		if (itm->map_flow_id == map_flow)
+			return itm;
+	}
+	return 0;
+}
+
+/* _rmnet_vnd_update_flow_map() - Add or remove individual TC flow handles
+ * @action: One of RMNET_VND_UF_ACTION_ADD / RMNET_VND_UF_ACTION_DEL
+ * @itm: Flow mapping object
+ * @map_flow: TC flow handle
+ *
+ * RMNET_VND_UF_ACTION_ADD:
+ * Will check for a free mapping slot in the mapping object. If one is found,
+ * valid for that slot will be set to 1 and the value will be set.
+ *
+ * RMNET_VND_UF_ACTION_DEL:
+ * Will check for matching tc handle. If found, valid for that slot will be
+ * set to 0 and the value will also be zeroed.
+ *
+ * Return:
+ *      - RMNET_VND_UPDATE_FLOW_OK tc flow handle is added/removed ok
+ *      - RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM if there are no more tc handles
+ *      - RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT if flow mapping is now empty
+ *      - RMNET_VND_UPDATE_FLOW_NO_ACTION if no action was taken
+ */
+static int _rmnet_vnd_update_flow_map(u8 action,
+				      struct rmnet_map_flow_mapping_s *itm,
+				      u32 tc_flow)
+{
+	int rc, i, j;
+
+	rc = RMNET_VND_UPDATE_FLOW_OK;
+
+	switch (action) {
+	case RMNET_VND_UF_ACTION_ADD:
+		rc = RMNET_VND_UPDATE_FLOW_NO_MORE_ROOM;
+		for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
+			if (itm->tc_flow_valid[i] == 0) {
+				itm->tc_flow_valid[i] = 1;
+				itm->tc_flow_id[i] = tc_flow;
+				rc = RMNET_VND_UPDATE_FLOW_OK;
+				LOGD("{%pK}->tc_flow_id[%d]=%08X",
+				     itm, i, tc_flow);
+				break;
+			}
+		}
+		break;
+
+	case RMNET_VND_UF_ACTION_DEL:
+		j = 0;
+		rc = RMNET_VND_UPDATE_FLOW_OK;
+		for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
+			if (itm->tc_flow_valid[i] == 1) {
+				if (itm->tc_flow_id[i] == tc_flow) {
+					itm->tc_flow_valid[i] = 0;
+					itm->tc_flow_id[i] = 0;
+					j++;
+					LOGD("{%pK}->tc_flow_id[%d]=0", itm, i);
+				}
+			} else {
+				j++;
+			}
+		}
+		if (j == RMNET_MAP_FLOW_NUM_TC_HANDLE)
+			rc = RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT;
+		break;
+
+	default:
+		rc = RMNET_VND_UPDATE_FLOW_NO_ACTION;
+		break;
+	}
+	return rc;
+}
+
+/* rmnet_vnd_add_tc_flow() - Add a MAP/TC flow handle mapping
+ * @id: Virtual network device ID
+ * @map_flow: MAP flow handle
+ * @tc_flow: TC flow handle
+ *
+ * Checkes for an existing flow mapping object corresponding to map_flow. If one
+ * is found, then it will try to add to the existing mapping object. Otherwise,
+ * a new mapping object is created.
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful
+ *      - RMNET_CONFIG_TC_HANDLE_FULL if there is no more room in the map object
+ *      - RMNET_CONFIG_NOMEM failed to allocate a new map object
+ */
+int rmnet_vnd_add_tc_flow(u32 id, u32 map_flow, u32 tc_flow)
+{
+	struct rmnet_map_flow_mapping_s *itm;
+	struct net_device *dev;
+	struct rmnet_vnd_private_s *dev_conf;
+	int r;
+	unsigned long flags;
+
+	if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
+		LOGM("Invalid VND id [%d]", id);
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+	}
+
+	dev = rmnet_devices[id];
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+
+	if (!dev_conf)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	write_lock_irqsave(&dev_conf->flow_map_lock, flags);
+	itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
+	if (itm) {
+		r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_ADD,
+					       itm, tc_flow);
+		if (r != RMNET_VND_UPDATE_FLOW_OK) {
+			write_unlock_irqrestore(&dev_conf->flow_map_lock,
+						flags);
+			return RMNET_CONFIG_TC_HANDLE_FULL;
+		}
+		write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
+		return RMNET_CONFIG_OK;
+	}
+	write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
+
+	itm = kmalloc(sizeof(*itm), GFP_KERNEL);
+
+	if (!itm) {
+		LOGM("%s", "Failure allocating flow mapping");
+		return RMNET_CONFIG_NOMEM;
+	}
+	memset(itm, 0, sizeof(struct rmnet_map_flow_mapping_s));
+
+	itm->map_flow_id = map_flow;
+	itm->tc_flow_valid[0] = 1;
+	itm->tc_flow_id[0] = tc_flow;
+
+	/* How can we dynamically init these safely? Kernel only provides static
+	 * initializers for atomic_t
+	 */
+	itm->v4_seq.counter =  0; /* Init is broken: ATOMIC_INIT(0); */
+	itm->v6_seq.counter =  0; /* Init is broken: ATOMIC_INIT(0); */
+
+	write_lock_irqsave(&dev_conf->flow_map_lock, flags);
+	list_add(&itm->list, &dev_conf->flow_head);
+	write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
+
+	LOGD("Created flow mapping [%s][0x%08X][0x%08X]@%pK",
+	     dev->name, itm->map_flow_id, itm->tc_flow_id[0], itm);
+
+	return RMNET_CONFIG_OK;
+}
+
+/* rmnet_vnd_del_tc_flow() - Delete a MAP/TC flow handle mapping
+ * @id: Virtual network device ID
+ * @map_flow: MAP flow handle
+ * @tc_flow: TC flow handle
+ *
+ * Checkes for an existing flow mapping object corresponding to map_flow. If one
+ * is found, then it will try to remove the existing tc_flow mapping. If the
+ * mapping object no longer contains any mappings, then it is freed. Otherwise
+ * the mapping object is left in the list
+ *
+ * Return:
+ *      - RMNET_CONFIG_OK if successful or if there was no such tc_flow
+ *      - RMNET_CONFIG_INVALID_REQUEST if there is no such map_flow
+ */
+int rmnet_vnd_del_tc_flow(u32 id, u32 map_flow, u32 tc_flow)
+{
+	struct rmnet_vnd_private_s *dev_conf;
+	struct net_device *dev;
+	struct rmnet_map_flow_mapping_s *itm;
+	int r;
+	unsigned long flags;
+	int rc = RMNET_CONFIG_OK;
+
+	if ((id < 0) || (id >= RMNET_DATA_MAX_VND) || !rmnet_devices[id]) {
+		LOGM("Invalid VND id [%d]", id);
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+	}
+
+	dev = rmnet_devices[id];
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+
+	if (!dev_conf)
+		return RMNET_CONFIG_NO_SUCH_DEVICE;
+
+	r = RMNET_VND_UPDATE_FLOW_NO_ACTION;
+	write_lock_irqsave(&dev_conf->flow_map_lock, flags);
+	itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow);
+	if (!itm) {
+		rc = RMNET_CONFIG_INVALID_REQUEST;
+	} else {
+		r = _rmnet_vnd_update_flow_map(RMNET_VND_UF_ACTION_DEL,
+					       itm, tc_flow);
+		if (r ==  RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT)
+			list_del(&itm->list);
+	}
+	write_unlock_irqrestore(&dev_conf->flow_map_lock, flags);
+
+	if (r ==  RMNET_VND_UPDATE_FLOW_NO_VALID_LEFT) {
+		if (itm)
+			LOGD("Removed flow mapping [%s][0x%08X]@%pK",
+			     dev->name, itm->map_flow_id, itm);
+		kfree(itm);
+	}
+
+	return rc;
+}
+
+/* rmnet_vnd_do_flow_control() - Process flow control request
+ * @dev: Virtual network device node to do lookup on
+ * @map_flow_id: Flow ID from MAP message
+ * @v4_seq: pointer to IPv4 indication sequence number
+ * @v6_seq: pointer to IPv6 indication sequence number
+ * @enable: boolean to enable/disable flow.
+ *
+ * Return:
+ *      - 0 if successful
+ *      - 1 if no mapping is found
+ *      - 2 if dev is not RmNet virtual network device node
+ */
+int rmnet_vnd_do_flow_control(struct net_device *dev,
+			      u32 map_flow_id,
+			      u16 v4_seq,
+			      u16 v6_seq,
+			      int enable)
+{
+	struct rmnet_vnd_private_s *dev_conf;
+	struct rmnet_map_flow_mapping_s *itm;
+	int do_fc, error, i;
+
+	error = 0;
+	do_fc = 0;
+
+	if (unlikely(!dev))
+		return 2;
+
+	if (!rmnet_vnd_is_vnd(dev))
+		return 2;
+
+	dev_conf = (struct rmnet_vnd_private_s *)netdev_priv(dev);
+
+	if (unlikely(!dev_conf))
+		return 2;
+
+	read_lock(&dev_conf->flow_map_lock);
+	if (map_flow_id == 0xFFFFFFFF) {
+		itm = &dev_conf->root_flow;
+		goto nolookup;
+	}
+
+	itm = _rmnet_vnd_get_flow_map(dev_conf, map_flow_id);
+
+	if (!itm) {
+		LOGL("Got flow control request for unknown flow %08X",
+		     map_flow_id);
+		goto fcdone;
+	}
+
+nolookup:
+	if (v4_seq == 0 || v4_seq >= atomic_read(&itm->v4_seq)) {
+		atomic_set(&itm->v4_seq, v4_seq);
+		if (map_flow_id == 0xFFFFFFFF) {
+			LOGD("Setting VND TX queue state to %d", enable);
+			/* Although we expect similar number of enable/disable
+			 * commands, optimize for the disable. That is more
+			 * latency sensitive than enable
+			 */
+			if (unlikely(enable))
+				netif_wake_queue(dev);
+			else
+				netif_stop_queue(dev);
+			trace_rmnet_fc_map(0xFFFFFFFF, 0, enable);
+			goto fcdone;
+		}
+		for (i = 0; i < RMNET_MAP_FLOW_NUM_TC_HANDLE; i++) {
+			if (itm->tc_flow_valid[i] == 1) {
+				LOGD("Found [%s][0x%08X][%d:0x%08X]",
+				     dev->name, itm->map_flow_id, i,
+				     itm->tc_flow_id[i]);
+
+				_rmnet_vnd_do_flow_control(dev,
+							   itm->tc_flow_id[i],
+							   enable);
+			}
+		}
+	} else {
+		LOGD("Internal seq(%hd) higher than called(%hd)",
+		     atomic_read(&itm->v4_seq), v4_seq);
+	}
+
+fcdone:
+	read_unlock(&dev_conf->flow_map_lock);
+
+	return error;
+}
+
+/* rmnet_vnd_get_by_id() - Get VND by array index ID
+ * @id: Virtual network deice id [0:RMNET_DATA_MAX_VND]
+ *
+ * Return:
+ *      - 0 if no device or ID out of range
+ *      - otherwise return pointer to VND net_device struct
+ */
+struct net_device *rmnet_vnd_get_by_id(int id)
+{
+	if (id < 0 || id >= RMNET_DATA_MAX_VND) {
+		LOGE("Bug; VND ID out of bounds");
+		return 0;
+	}
+	return rmnet_devices[id];
+}
diff --git a/net/rmnet_data/rmnet_data_vnd.h b/net/rmnet_data/rmnet_data_vnd.h
new file mode 100644
index 0000000..e0afeff
--- /dev/null
+++ b/net/rmnet_data/rmnet_data_vnd.h
@@ -0,0 +1,39 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Virtual Network Device APIs
+ */
+
+#include <linux/types.h>
+
+#ifndef _RMNET_DATA_VND_H_
+#define _RMNET_DATA_VND_H_
+
+int rmnet_vnd_do_flow_control(struct net_device *dev,
+			      u32 map_flow_id,
+			      u16 v4_seq,
+			      u16 v6_seq,
+			      int enable);
+struct rmnet_logical_ep_conf_s *rmnet_vnd_get_le_config(struct net_device *dev);
+int rmnet_vnd_get_name(int id, char *name, int name_len);
+int rmnet_vnd_create_dev(int id, struct net_device **new_device,
+			 const char *prefix);
+int rmnet_vnd_free_dev(int id);
+int rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
+int rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
+int rmnet_vnd_is_vnd(struct net_device *dev);
+int rmnet_vnd_add_tc_flow(u32 id, u32 map_flow, u32 tc_flow);
+int rmnet_vnd_del_tc_flow(u32 id, u32 map_flow, u32 tc_flow);
+int rmnet_vnd_init(void);
+void rmnet_vnd_exit(void);
+struct net_device *rmnet_vnd_get_by_id(int id);
+
+#endif /* _RMNET_DATA_VND_H_ */
diff --git a/net/rmnet_data/rmnet_map.h b/net/rmnet_data/rmnet_map.h
new file mode 100644
index 0000000..f597f1b
--- /dev/null
+++ b/net/rmnet_data/rmnet_map.h
@@ -0,0 +1,150 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <net/rmnet_config.h>
+
+#ifndef _RMNET_MAP_H_
+#define _RMNET_MAP_H_
+
+struct rmnet_map_control_command_s {
+	u8  command_name;
+#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS
+	u8  cmd_type:2;
+	u8  reserved:6;
+#else
+	u8  reserved:6;
+	u8  cmd_type:2;
+#endif /* RMNET_USE_BIG_ENDIAN_STRUCTS */
+	u16 reserved2;
+	u32 transaction_id;
+	union {
+		u8  data[65528];
+		struct {
+#ifndef RMNET_USE_BIG_ENDIAN_STRUCTS
+			u16  ip_family:2;
+			u16  reserved:14;
+#else
+			u16  reserved:14;
+			u16  ip_family:2;
+#endif /* RMNET_USE_BIG_ENDIAN_STRUCTS */
+			u16  flow_control_seq_num;
+			u32  qos_id;
+		} flow_control;
+	};
+}  __aligned(1);
+
+struct rmnet_map_dl_checksum_trailer_s {
+	unsigned char  reserved_h;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	unsigned char  valid:1;
+	unsigned char  reserved_l:7;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	unsigned char  reserved_l:7;
+	unsigned char  valid:1;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+	unsigned short checksum_start_offset;
+	unsigned short checksum_length;
+	unsigned short checksum_value;
+} __aligned(1);
+
+struct rmnet_map_ul_checksum_header_s {
+	unsigned short checksum_start_offset;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	unsigned short checksum_insert_offset:14;
+	unsigned short udp_ip4_ind:1;
+	unsigned short cks_en:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	unsigned short cks_en:1;
+	unsigned short udp_ip4_ind:1;
+	unsigned short checksum_insert_offset:14;
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+} __aligned(1);
+
+enum rmnet_map_results_e {
+	RMNET_MAP_SUCCESS,
+	RMNET_MAP_CONSUMED,
+	RMNET_MAP_GENERAL_FAILURE,
+	RMNET_MAP_NOT_ENABLED,
+	RMNET_MAP_FAILED_AGGREGATION,
+	RMNET_MAP_FAILED_MUX
+};
+
+enum rmnet_map_mux_errors_e {
+	RMNET_MAP_MUX_SUCCESS,
+	RMNET_MAP_MUX_INVALID_MUX_ID,
+	RMNET_MAP_MUX_INVALID_PAD_LENGTH,
+	RMNET_MAP_MUX_INVALID_PKT_LENGTH,
+	/* This should always be the last element */
+	RMNET_MAP_MUX_ENUM_LENGTH
+};
+
+enum rmnet_map_checksum_errors_e {
+	RMNET_MAP_CHECKSUM_OK,
+	RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET,
+	RMNET_MAP_CHECKSUM_VALIDATION_FAILED,
+	RMNET_MAP_CHECKSUM_ERR_UNKNOWN,
+	RMNET_MAP_CHECKSUM_ERR_NOT_DATA_PACKET,
+	RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER,
+	RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION,
+	RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT,
+	RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET,
+	RMNET_MAP_CHECKSUM_SKIPPED,
+	RMNET_MAP_CHECKSUM_SW,
+	/* This should always be the last element */
+	RMNET_MAP_CHECKSUM_ENUM_LENGTH
+};
+
+enum rmnet_map_commands_e {
+	RMNET_MAP_COMMAND_NONE,
+	RMNET_MAP_COMMAND_FLOW_DISABLE,
+	RMNET_MAP_COMMAND_FLOW_ENABLE,
+	/* These should always be the last 2 elements */
+	RMNET_MAP_COMMAND_UNKNOWN,
+	RMNET_MAP_COMMAND_ENUM_LENGTH
+};
+
+enum rmnet_map_agg_state_e {
+	RMNET_MAP_AGG_IDLE,
+	RMNET_MAP_TXFER_SCHEDULED
+};
+
+#define RMNET_MAP_COMMAND_REQUEST     0
+#define RMNET_MAP_COMMAND_ACK         1
+#define RMNET_MAP_COMMAND_UNSUPPORTED 2
+#define RMNET_MAP_COMMAND_INVALID     3
+
+#define RMNET_MAP_NO_PAD_BYTES        0
+#define RMNET_MAP_ADD_PAD_BYTES       1
+
+uint8_t rmnet_map_demultiplex(struct sk_buff *skb);
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_phys_ep_config *config);
+
+struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
+						    int hdrlen, int pad);
+rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
+				      struct rmnet_phys_ep_config *config);
+void rmnet_map_aggregate(struct sk_buff *skb,
+			 struct rmnet_phys_ep_config *config);
+
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb);
+int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				     struct net_device *orig_dev,
+				     u32 egress_data_format);
+
+#endif /* _RMNET_MAP_H_ */
diff --git a/net/rmnet_data/rmnet_map_command.c b/net/rmnet_data/rmnet_map_command.c
new file mode 100644
index 0000000..306b6fb
--- /dev/null
+++ b/net/rmnet_data/rmnet_map_command.c
@@ -0,0 +1,196 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rmnet_data.h>
+#include <linux/net_map.h>
+#include <net/pkt_sched.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_config.h"
+#include "rmnet_map.h"
+#include "rmnet_data_private.h"
+#include "rmnet_data_vnd.h"
+#include "rmnet_data_stats.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPC);
+
+unsigned long int rmnet_map_command_stats[RMNET_MAP_COMMAND_ENUM_LENGTH];
+module_param_array(rmnet_map_command_stats, ulong, 0, 0444);
+MODULE_PARM_DESC(rmnet_map_command_stats, "MAP command statistics");
+
+/* rmnet_map_do_flow_control() - Process MAP flow control command
+ * @skb: Socket buffer containing the MAP flow control message
+ * @config: Physical end-point configuration of ingress device
+ * @enable: boolean for enable/disable
+ *
+ * Process in-band MAP flow control messages. Assumes mux ID is mapped to a
+ * RmNet Data vitrual network device.
+ *
+ * Return:
+ *      - RMNET_MAP_COMMAND_UNSUPPORTED on any error
+ *      - RMNET_MAP_COMMAND_ACK on success
+ */
+static uint8_t rmnet_map_do_flow_control(struct sk_buff *skb,
+					 struct rmnet_phys_ep_config *config,
+					 int enable)
+{
+	struct rmnet_map_control_command_s *cmd;
+	struct net_device *vnd;
+	struct rmnet_logical_ep_conf_s *ep;
+	u8 mux_id;
+	u16  ip_family;
+	u16  fc_seq;
+	u32  qos_id;
+	int r;
+
+	if (unlikely(!skb || !config))
+		return RX_HANDLER_CONSUMED;
+
+	mux_id = RMNET_MAP_GET_MUX_ID(skb);
+	cmd = RMNET_MAP_GET_CMD_START(skb);
+
+	if (mux_id >= RMNET_DATA_MAX_LOGICAL_EP) {
+		LOGD("Got packet on %s with bad mux id %d",
+		     skb->dev->name, mux_id);
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_BAD_MUX);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	ep = &config->muxed_ep[mux_id];
+
+	if (!ep->refcount) {
+		LOGD("Packet on %s:%d; has no logical endpoint config",
+		     skb->dev->name, mux_id);
+
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_MUX_NO_EP);
+			return RX_HANDLER_CONSUMED;
+	}
+
+	vnd = ep->egress_dev;
+
+	ip_family = cmd->flow_control.ip_family;
+	fc_seq = ntohs(cmd->flow_control.flow_control_seq_num);
+	qos_id = ntohl(cmd->flow_control.qos_id);
+
+	/* Ignore the ip family and pass the sequence number for both v4 and v6
+	 * sequence. User space does not support creating dedicated flows for
+	 * the 2 protocols
+	 */
+	r = rmnet_vnd_do_flow_control(vnd, qos_id, fc_seq, fc_seq, enable);
+	LOGD("dev:%s, qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d",
+	     skb->dev->name, qos_id, ip_family & 3, fc_seq, enable);
+
+	if (r) {
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED);
+		return RMNET_MAP_COMMAND_UNSUPPORTED;
+	} else {
+		return RMNET_MAP_COMMAND_ACK;
+	}
+}
+
+/* rmnet_map_send_ack() - Send N/ACK message for MAP commands
+ * @skb: Socket buffer containing the MAP command message
+ * @type: N/ACK message selector
+ * @config: Physical end-point configuration of ingress device
+ *
+ * skb is modified to contain the message type selector. The message is then
+ * transmitted on skb->dev. Note that this function grabs global Tx lock on
+ * skb->dev for latency reasons.
+ *
+ * Return:
+ *      - void
+ */
+static void rmnet_map_send_ack(struct sk_buff *skb,
+			       unsigned char type,
+			       struct rmnet_phys_ep_config *config)
+{
+	struct rmnet_map_control_command_s *cmd;
+	int xmit_status;
+
+	if (unlikely(!skb))
+		return;
+
+	skb->protocol = htons(ETH_P_MAP);
+
+	if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
+	    (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4)) {
+		if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) +
+		    + RMNET_MAP_GET_LENGTH(skb)
+		    + sizeof(struct rmnet_map_dl_checksum_trailer_s)))) {
+			rmnet_stats_dl_checksum(
+			  RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER);
+			return;
+		}
+
+		skb_trim(skb, skb->len -
+			 sizeof(struct rmnet_map_dl_checksum_trailer_s));
+	}
+
+	cmd = RMNET_MAP_GET_CMD_START(skb);
+	cmd->cmd_type = type & 0x03;
+
+	netif_tx_lock(skb->dev);
+	xmit_status = skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev);
+	netif_tx_unlock(skb->dev);
+
+	LOGD("MAP command ACK=%hhu sent with rc: %d", type & 0x03, xmit_status);
+}
+
+/* rmnet_map_command() - Entry point for handling MAP commands
+ * @skb: Socket buffer containing the MAP command message
+ * @config: Physical end-point configuration of ingress device
+ *
+ * Process MAP command frame and send N/ACK message as appropriate. Message cmd
+ * name is decoded here and appropriate handler is called.
+ *
+ * Return:
+ *      - RX_HANDLER_CONSUMED. Command frames are always consumed.
+ */
+rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
+				      struct rmnet_phys_ep_config *config)
+{
+	struct rmnet_map_control_command_s *cmd;
+	unsigned char command_name;
+	unsigned char rc = 0;
+
+	if (unlikely(!skb))
+		return RX_HANDLER_CONSUMED;
+
+	cmd = RMNET_MAP_GET_CMD_START(skb);
+	command_name = cmd->command_name;
+
+	if (command_name < RMNET_MAP_COMMAND_ENUM_LENGTH)
+		rmnet_map_command_stats[command_name]++;
+
+	switch (command_name) {
+	case RMNET_MAP_COMMAND_FLOW_ENABLE:
+		rc = rmnet_map_do_flow_control(skb, config, 1);
+		break;
+
+	case RMNET_MAP_COMMAND_FLOW_DISABLE:
+		rc = rmnet_map_do_flow_control(skb, config, 0);
+		break;
+
+	default:
+		rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]++;
+		LOGM("Uknown MAP command: %d", command_name);
+		rc = RMNET_MAP_COMMAND_UNSUPPORTED;
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED);
+		break;
+	}
+	if (rc == RMNET_MAP_COMMAND_ACK)
+		rmnet_map_send_ack(skb, rc, config);
+	return RX_HANDLER_CONSUMED;
+}
diff --git a/net/rmnet_data/rmnet_map_data.c b/net/rmnet_data/rmnet_map_data.c
new file mode 100644
index 0000000..d7e420b
--- /dev/null
+++ b/net/rmnet_data/rmnet_map_data.c
@@ -0,0 +1,750 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data MAP protocol
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rmnet_data.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/time.h>
+#include <linux/net_map.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/in.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <net/rmnet_config.h>
+#include "rmnet_data_config.h"
+#include "rmnet_map.h"
+#include "rmnet_data_private.h"
+#include "rmnet_data_stats.h"
+#include "rmnet_data_trace.h"
+
+RMNET_LOG_MODULE(RMNET_DATA_LOGMASK_MAPD);
+
+/* Local Definitions */
+
+long agg_time_limit __read_mostly = 1000000L;
+module_param(agg_time_limit, long, 0644);
+MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
+
+long agg_bypass_time __read_mostly = 10000000L;
+module_param(agg_bypass_time, long, 0644);
+MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
+
+struct agg_work {
+	struct delayed_work work;
+	struct rmnet_phys_ep_config *config;
+};
+
+#define RMNET_MAP_DEAGGR_SPACING  64
+#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+
+/* rmnet_map_add_map_header() - Adds MAP header to front of skb->data
+ * @skb:        Socket buffer ("packet") to modify
+ * @hdrlen:     Number of bytes of header data which should not be included in
+ *              MAP length field
+ * @pad:        Specify if padding the MAP packet to make it 4 byte aligned is
+ *              necessary
+ *
+ * Padding is calculated and set appropriately in MAP header. Mux ID is
+ * initialized to 0.
+ *
+ * Return:
+ *      - Pointer to MAP structure
+ *      - 0 (null) if insufficient headroom
+ *      - 0 (null) if insufficient tailroom for padding bytes
+ */
+struct rmnet_map_header_s *rmnet_map_add_map_header(struct sk_buff *skb,
+						    int hdrlen, int pad)
+{
+	u32 padding, map_datalen;
+	u8 *padbytes;
+	struct rmnet_map_header_s *map_header;
+
+	if (skb_headroom(skb) < sizeof(struct rmnet_map_header_s))
+		return 0;
+
+	map_datalen = skb->len - hdrlen;
+	map_header = (struct rmnet_map_header_s *)
+			skb_push(skb, sizeof(struct rmnet_map_header_s));
+	memset(map_header, 0, sizeof(struct rmnet_map_header_s));
+
+	if (pad == RMNET_MAP_NO_PAD_BYTES) {
+		map_header->pkt_len = htons(map_datalen);
+		return map_header;
+	}
+
+	padding = ALIGN(map_datalen, 4) - map_datalen;
+
+	if (padding == 0)
+		goto done;
+
+	if (skb_tailroom(skb) < padding)
+		return 0;
+
+	padbytes = (u8 *)skb_put(skb, padding);
+	LOGD("pad: %d", padding);
+	memset(padbytes, 0, padding);
+
+done:
+	map_header->pkt_len = htons(map_datalen + padding);
+	map_header->pad_len = padding & 0x3F;
+
+	return map_header;
+}
+
+/* rmnet_map_deaggregate() - Deaggregates a single packet
+ * @skb:        Source socket buffer containing multiple MAP frames
+ * @config:     Physical endpoint configuration of the ingress device
+ *
+ * A whole new buffer is allocated for each portion of an aggregated frame.
+ * Caller should keep calling deaggregate() on the source skb until 0 is
+ * returned, indicating that there are no more packets to deaggregate. Caller
+ * is responsible for freeing the original skb.
+ *
+ * Return:
+ *     - Pointer to new skb
+ *     - 0 (null) if no more aggregated packets
+ */
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+				      struct rmnet_phys_ep_config *config)
+{
+	struct sk_buff *skbn;
+	struct rmnet_map_header_s *maph;
+	u32 packet_len;
+
+	if (skb->len == 0)
+		return 0;
+
+	maph = (struct rmnet_map_header_s *)skb->data;
+	packet_len = ntohs(maph->pkt_len) + sizeof(struct rmnet_map_header_s);
+
+	if ((config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV3) ||
+	    (config->ingress_data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4))
+		packet_len += sizeof(struct rmnet_map_dl_checksum_trailer_s);
+
+	if ((((int)skb->len) - ((int)packet_len)) < 0) {
+		LOGM("%s", "Got malformed packet. Dropping");
+		return 0;
+	}
+
+	skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
+	if (!skbn)
+		return 0;
+
+	skbn->dev = skb->dev;
+	skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
+	skb_put(skbn, packet_len);
+	memcpy(skbn->data, skb->data, packet_len);
+	skb_pull(skb, packet_len);
+
+	/* Some hardware can send us empty frames. Catch them */
+	if (ntohs(maph->pkt_len) == 0) {
+		LOGD("Dropping empty MAP frame");
+		rmnet_kfree_skb(skbn, RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0);
+		return 0;
+	}
+
+	return skbn;
+}
+
+/* rmnet_map_flush_packet_queue() - Transmits aggregeted frame on timeout
+ * @work:        struct agg_work containing delayed work and skb to flush
+ *
+ * This function is scheduled to run in a specified number of jiffies after
+ * the last frame transmitted by the network stack. When run, the buffer
+ * containing aggregated packets is finally transmitted on the underlying link.
+ *
+ */
+static void rmnet_map_flush_packet_queue(struct work_struct *work)
+{
+	struct agg_work *real_work;
+	struct rmnet_phys_ep_config *config;
+	unsigned long flags;
+	struct sk_buff *skb;
+	int rc, agg_count = 0;
+
+	skb = 0;
+	real_work = (struct agg_work *)work;
+	config = real_work->config;
+	LOGD("%s", "Entering flush thread");
+	spin_lock_irqsave(&config->agg_lock, flags);
+	if (likely(config->agg_state == RMNET_MAP_TXFER_SCHEDULED)) {
+		/* Buffer may have already been shipped out */
+		if (likely(config->agg_skb)) {
+			rmnet_stats_agg_pkts(config->agg_count);
+			if (config->agg_count > 1)
+				LOGL("Agg count: %d", config->agg_count);
+			skb = config->agg_skb;
+			agg_count = config->agg_count;
+			config->agg_skb = 0;
+			config->agg_count = 0;
+			memset(&config->agg_time, 0, sizeof(struct timespec));
+		}
+		config->agg_state = RMNET_MAP_AGG_IDLE;
+	} else {
+		/* How did we get here? */
+		LOGE("Ran queued command when state %s",
+		     "is idle. State machine likely broken");
+	}
+
+	spin_unlock_irqrestore(&config->agg_lock, flags);
+	if (skb) {
+		trace_rmnet_map_flush_packet_queue(skb, agg_count);
+		rc = dev_queue_xmit(skb);
+		rmnet_stats_queue_xmit(rc, RMNET_STATS_QUEUE_XMIT_AGG_TIMEOUT);
+	}
+	kfree(work);
+}
+
+/* rmnet_map_aggregate() - Software aggregates multiple packets.
+ * @skb:        current packet being transmitted
+ * @config:     Physical endpoint configuration of the ingress device
+ *
+ * Aggregates multiple SKBs into a single large SKB for transmission. MAP
+ * protocol is used to separate the packets in the buffer. This function
+ * consumes the argument SKB and should not be further processed by any other
+ * function.
+ */
+void rmnet_map_aggregate(struct sk_buff *skb,
+			 struct rmnet_phys_ep_config *config) {
+	u8 *dest_buff;
+	struct agg_work *work;
+	unsigned long flags;
+	struct sk_buff *agg_skb;
+	struct timespec diff, last;
+	int size, rc, agg_count = 0;
+
+	if (!skb || !config)
+		return;
+	size = config->egress_agg_size - skb->len;
+
+	if (size < 2000) {
+		LOGL("Invalid length %d", size);
+		return;
+	}
+
+new_packet:
+	spin_lock_irqsave(&config->agg_lock, flags);
+
+	memcpy(&last, &config->agg_last, sizeof(struct timespec));
+	getnstimeofday(&config->agg_last);
+
+	if (!config->agg_skb) {
+		/* Check to see if we should agg first. If the traffic is very
+		 * sparse, don't aggregate. We will need to tune this later
+		 */
+		diff = timespec_sub(config->agg_last, last);
+
+		if ((diff.tv_sec > 0) || (diff.tv_nsec > agg_bypass_time)) {
+			spin_unlock_irqrestore(&config->agg_lock, flags);
+			LOGL("delta t: %ld.%09lu\tcount: bypass", diff.tv_sec,
+			     diff.tv_nsec);
+			rmnet_stats_agg_pkts(1);
+			trace_rmnet_map_aggregate(skb, 0);
+			rc = dev_queue_xmit(skb);
+			rmnet_stats_queue_xmit(rc,
+					       RMNET_STATS_QUEUE_XMIT_AGG_SKIP);
+			return;
+		}
+
+		config->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
+		if (!config->agg_skb) {
+			config->agg_skb = 0;
+			config->agg_count = 0;
+			memset(&config->agg_time, 0, sizeof(struct timespec));
+			spin_unlock_irqrestore(&config->agg_lock, flags);
+			rmnet_stats_agg_pkts(1);
+			trace_rmnet_map_aggregate(skb, 0);
+			rc = dev_queue_xmit(skb);
+			rmnet_stats_queue_xmit
+				(rc,
+				 RMNET_STATS_QUEUE_XMIT_AGG_CPY_EXP_FAIL);
+			return;
+		}
+		config->agg_count = 1;
+		getnstimeofday(&config->agg_time);
+		trace_rmnet_start_aggregation(skb);
+		rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_CPY_EXPAND);
+		goto schedule;
+	}
+	diff = timespec_sub(config->agg_last, config->agg_time);
+
+	if (skb->len > (config->egress_agg_size - config->agg_skb->len) ||
+	    (config->agg_count >= config->egress_agg_count) ||
+	    (diff.tv_sec > 0) || (diff.tv_nsec > agg_time_limit)) {
+		rmnet_stats_agg_pkts(config->agg_count);
+		agg_skb = config->agg_skb;
+		agg_count = config->agg_count;
+		config->agg_skb = 0;
+		config->agg_count = 0;
+		memset(&config->agg_time, 0, sizeof(struct timespec));
+		spin_unlock_irqrestore(&config->agg_lock, flags);
+		LOGL("delta t: %ld.%09lu\tcount: %d", diff.tv_sec,
+		     diff.tv_nsec, agg_count);
+		trace_rmnet_map_aggregate(skb, agg_count);
+		rc = dev_queue_xmit(agg_skb);
+		rmnet_stats_queue_xmit(rc,
+				       RMNET_STATS_QUEUE_XMIT_AGG_FILL_BUFFER);
+		goto new_packet;
+	}
+
+	dest_buff = skb_put(config->agg_skb, skb->len);
+	memcpy(dest_buff, skb->data, skb->len);
+	config->agg_count++;
+	rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_AGG_INTO_BUFF);
+
+schedule:
+	if (config->agg_state != RMNET_MAP_TXFER_SCHEDULED) {
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work) {
+			LOGE("Failed to allocate work item for packet %s",
+			     "transfer. DATA PATH LIKELY BROKEN!");
+			config->agg_state = RMNET_MAP_AGG_IDLE;
+			spin_unlock_irqrestore(&config->agg_lock, flags);
+			return;
+		}
+		INIT_DELAYED_WORK((struct delayed_work *)work,
+				  rmnet_map_flush_packet_queue);
+		work->config = config;
+		config->agg_state = RMNET_MAP_TXFER_SCHEDULED;
+		schedule_delayed_work((struct delayed_work *)work, 1);
+	}
+	spin_unlock_irqrestore(&config->agg_lock, flags);
+}
+
+/* Checksum Offload */
+
+static inline u16 *rmnet_map_get_checksum_field(unsigned char protocol,
+						const void *txporthdr)
+{
+	u16 *check = 0;
+
+	switch (protocol) {
+	case IPPROTO_TCP:
+		check = &(((struct tcphdr *)txporthdr)->check);
+		break;
+
+	case IPPROTO_UDP:
+		check = &(((struct udphdr *)txporthdr)->check);
+		break;
+
+	default:
+		check = 0;
+		break;
+	}
+
+	return check;
+}
+
+static inline u16 rmnet_map_add_checksums(u16 val1, u16 val2)
+{
+	int sum = val1 + val2;
+
+	sum = (((sum & 0xFFFF0000) >> 16) + sum) & 0x0000FFFF;
+	return (u16)(sum & 0x0000FFFF);
+}
+
+static inline u16 rmnet_map_subtract_checksums(u16 val1, u16 val2)
+{
+	return rmnet_map_add_checksums(val1, ~val2);
+}
+
+/* rmnet_map_validate_ipv4_packet_checksum() - Validates TCP/UDP checksum
+ *	value for IPv4 packet
+ * @map_payload:	Pointer to the beginning of the map payload
+ * @cksum_trailer:	Pointer to the checksum trailer
+ *
+ * Validates the TCP/UDP checksum for the packet using the checksum value
+ * from the checksum trailer added to the packet.
+ * The validation formula is the following:
+ * 1. Performs 1's complement over the checksum value from the trailer
+ * 2. Computes 1's complement checksum over IPv4 header and subtracts it from
+ *    the value from step 1
+ * 3. Computes 1's complement checksum over IPv4 pseudo header and adds it to
+ *    the value from step 2
+ * 4. Subtracts the checksum value from the TCP/UDP header from the value from
+ *    step 3
+ * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
+ *    header
+ *
+ * Fragmentation and tunneling are not supported.
+ *
+ * Return: 0 is validation succeeded.
+ */
+static int rmnet_map_validate_ipv4_packet_checksum
+	(unsigned char *map_payload,
+	 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
+{
+	struct iphdr *ip4h;
+	u16 *checksum_field;
+	void *txporthdr;
+	u16 pseudo_checksum;
+	u16 ip_hdr_checksum;
+	u16 checksum_value;
+	u16 ip_payload_checksum;
+	u16 ip_pseudo_payload_checksum;
+	u16 checksum_value_final;
+
+	ip4h = (struct iphdr *)map_payload;
+	if ((ntohs(ip4h->frag_off) & IP_MF) ||
+	    ((ntohs(ip4h->frag_off) & IP_OFFSET) > 0))
+		return RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET;
+
+	txporthdr = map_payload + ip4h->ihl * 4;
+
+	checksum_field = rmnet_map_get_checksum_field(ip4h->protocol,
+						      txporthdr);
+
+	if (unlikely(!checksum_field))
+		return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
+
+	/* RFC 768 - Skip IPv4 UDP packets where sender checksum field is 0 */
+	if ((*checksum_field == 0) && (ip4h->protocol == IPPROTO_UDP))
+		return RMNET_MAP_CHECKSUM_SKIPPED;
+
+	checksum_value = ~ntohs(cksum_trailer->checksum_value);
+	ip_hdr_checksum = ~ip_fast_csum(ip4h, (int)ip4h->ihl);
+	ip_payload_checksum = rmnet_map_subtract_checksums(checksum_value,
+							   ip_hdr_checksum);
+
+	pseudo_checksum = ~ntohs(csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+		(u16)(ntohs(ip4h->tot_len) - ip4h->ihl * 4),
+		(u16)ip4h->protocol, 0));
+	ip_pseudo_payload_checksum = rmnet_map_add_checksums(
+		ip_payload_checksum, pseudo_checksum);
+
+	checksum_value_final = ~rmnet_map_subtract_checksums(
+		ip_pseudo_payload_checksum, ntohs(*checksum_field));
+
+	if (unlikely(checksum_value_final == 0)) {
+		switch (ip4h->protocol) {
+		case IPPROTO_UDP:
+			/* RFC 768 */
+			LOGD("DL4 1's complement rule for UDP checksum 0");
+			checksum_value_final = ~checksum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			if (*checksum_field == 0xFFFF) {
+				LOGD(
+				"DL4 Non-RFC compliant TCP checksum found");
+				checksum_value_final = ~checksum_value_final;
+			}
+			break;
+		}
+	}
+
+	LOGD(
+	"DL4 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
+	~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
+	pseudo_checksum, checksum_value_final);
+
+	if (checksum_value_final == ntohs(*checksum_field))
+		return RMNET_MAP_CHECKSUM_OK;
+	else
+		return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
+}
+
+/* rmnet_map_validate_ipv6_packet_checksum() - Validates TCP/UDP checksum
+ *	value for IPv6 packet
+ * @map_payload:	Pointer to the beginning of the map payload
+ * @cksum_trailer:	Pointer to the checksum trailer
+ *
+ * Validates the TCP/UDP checksum for the packet using the checksum value
+ * from the checksum trailer added to the packet.
+ * The validation formula is the following:
+ * 1. Performs 1's complement over the checksum value from the trailer
+ * 2. Computes 1's complement checksum over IPv6 header and subtracts it from
+ *    the value from step 1
+ * 3. Computes 1's complement checksum over IPv6 pseudo header and adds it to
+ *    the value from step 2
+ * 4. Subtracts the checksum value from the TCP/UDP header from the value from
+ *    step 3
+ * 5. Compares the value from step 4 to the checksum value from the TCP/UDP
+ *    header
+ *
+ * Fragmentation, extension headers and tunneling are not supported.
+ *
+ * Return: 0 is validation succeeded.
+ */
+static int rmnet_map_validate_ipv6_packet_checksum
+	(unsigned char *map_payload,
+	 struct rmnet_map_dl_checksum_trailer_s *cksum_trailer)
+{
+	struct ipv6hdr *ip6h;
+	u16 *checksum_field;
+	void *txporthdr;
+	u16 pseudo_checksum;
+	u16 ip_hdr_checksum;
+	u16 checksum_value;
+	u16 ip_payload_checksum;
+	u16 ip_pseudo_payload_checksum;
+	u16 checksum_value_final;
+	u32 length;
+
+	ip6h = (struct ipv6hdr *)map_payload;
+
+	txporthdr = map_payload + sizeof(struct ipv6hdr);
+	checksum_field = rmnet_map_get_checksum_field(ip6h->nexthdr,
+						      txporthdr);
+
+	if (unlikely(!checksum_field))
+		return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT;
+
+	checksum_value = ~ntohs(cksum_trailer->checksum_value);
+	ip_hdr_checksum = ~ntohs(ip_compute_csum(ip6h,
+				 (int)(txporthdr - (void *)map_payload)));
+	ip_payload_checksum = rmnet_map_subtract_checksums
+				(checksum_value, ip_hdr_checksum);
+
+	length = (ip6h->nexthdr == IPPROTO_UDP) ?
+		ntohs(((struct udphdr *)txporthdr)->len) :
+		ntohs(ip6h->payload_len);
+	pseudo_checksum = ~ntohs(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+		length, ip6h->nexthdr, 0));
+	ip_pseudo_payload_checksum = rmnet_map_add_checksums(
+		ip_payload_checksum, pseudo_checksum);
+
+	checksum_value_final = ~rmnet_map_subtract_checksums(
+		ip_pseudo_payload_checksum, ntohs(*checksum_field));
+
+	if (unlikely(checksum_value_final == 0)) {
+		switch (ip6h->nexthdr) {
+		case IPPROTO_UDP:
+			/* RFC 2460 section 8.1 */
+			LOGD("DL6 One's complement rule for UDP checksum 0");
+			checksum_value_final = ~checksum_value_final;
+			break;
+
+		case IPPROTO_TCP:
+			if (*checksum_field == 0xFFFF) {
+				LOGD(
+				"DL6 Non-RFC compliant TCP checksum found");
+				checksum_value_final = ~checksum_value_final;
+			}
+			break;
+		}
+	}
+
+	LOGD(
+	"DL6 cksum: ~HW: %04X, field: %04X, pseudo header: %04X, final: %04X",
+	~ntohs(cksum_trailer->checksum_value), ntohs(*checksum_field),
+	pseudo_checksum, checksum_value_final);
+
+	if (checksum_value_final == ntohs(*checksum_field))
+		return RMNET_MAP_CHECKSUM_OK;
+	else
+		return RMNET_MAP_CHECKSUM_VALIDATION_FAILED;
+	}
+
+/* rmnet_map_checksum_downlink_packet() - Validates checksum on
+ * a downlink packet
+ * @skb:	Pointer to the packet's skb.
+ *
+ * Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the entire MAP
+ * frame: MAP header + IP payload + padding + checksum trailer.
+ * Currently, only IPv4 and IPv6 are supported along with
+ * TCP & UDP. Fragmented or tunneled packets are not supported.
+ *
+ * Return:
+ *   - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
+ *   - RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER: Skb buffer given is corrupted.
+ *   - RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET: Valid flag is not set in the
+ *					      checksum trailer.
+ *   - RMNET_MAP_CHECKSUM_FRAGMENTED_PACKET: The packet is a fragment.
+ *   - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_TRANSPORT: The transport header is
+ *						   not TCP/UDP.
+ *   - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
+ *   - RMNET_MAP_CHECKSUM_VALIDATION_FAILED: In case the validation failed.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb)
+{
+	struct rmnet_map_dl_checksum_trailer_s *cksum_trailer;
+	unsigned int data_len;
+	unsigned char *map_payload;
+	unsigned char ip_version;
+
+	data_len = RMNET_MAP_GET_LENGTH(skb);
+
+	if (unlikely(skb->len < (sizeof(struct rmnet_map_header_s) + data_len +
+	    sizeof(struct rmnet_map_dl_checksum_trailer_s))))
+		return RMNET_MAP_CHECKSUM_ERR_BAD_BUFFER;
+
+	cksum_trailer = (struct rmnet_map_dl_checksum_trailer_s *)
+			(skb->data + data_len
+			+ sizeof(struct rmnet_map_header_s));
+
+	if (unlikely(!ntohs(cksum_trailer->valid)))
+		return RMNET_MAP_CHECKSUM_VALID_FLAG_NOT_SET;
+
+	map_payload = (unsigned char *)(skb->data
+		+ sizeof(struct rmnet_map_header_s));
+
+	ip_version = (*map_payload & 0xF0) >> 4;
+	if (ip_version == 0x04)
+		return rmnet_map_validate_ipv4_packet_checksum(map_payload,
+			cksum_trailer);
+	else if (ip_version == 0x06)
+		return rmnet_map_validate_ipv6_packet_checksum(map_payload,
+			cksum_trailer);
+
+	return RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
+}
+
+static void rmnet_map_fill_ipv4_packet_ul_checksum_header
+	(void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
+	 struct sk_buff *skb)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	unsigned short *hdr = (unsigned short *)ul_header;
+
+	ul_header->checksum_start_offset = htons((unsigned short)
+		(skb_transport_header(skb) - (unsigned char *)iphdr));
+	ul_header->checksum_insert_offset = skb->csum_offset;
+	ul_header->cks_en = 1;
+	if (ip4h->protocol == IPPROTO_UDP)
+		ul_header->udp_ip4_ind = 1;
+	else
+		ul_header->udp_ip4_ind = 0;
+	/* Changing checksum_insert_offset to network order */
+	hdr++;
+	*hdr = htons(*hdr);
+	skb->ip_summed = CHECKSUM_NONE;
+}
+
+static void rmnet_map_fill_ipv6_packet_ul_checksum_header
+	(void *iphdr, struct rmnet_map_ul_checksum_header_s *ul_header,
+	 struct sk_buff *skb)
+{
+	unsigned short *hdr = (unsigned short *)ul_header;
+
+	ul_header->checksum_start_offset = htons((unsigned short)
+		(skb_transport_header(skb) - (unsigned char *)iphdr));
+	ul_header->checksum_insert_offset = skb->csum_offset;
+	ul_header->cks_en = 1;
+	ul_header->udp_ip4_ind = 0;
+	/* Changing checksum_insert_offset to network order */
+	hdr++;
+	*hdr = htons(*hdr);
+	skb->ip_summed = CHECKSUM_NONE;
+}
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(void *iphdr)
+{
+	struct iphdr *ip4h = (struct iphdr *)iphdr;
+	void *txporthdr;
+	u16 *csum;
+
+	txporthdr = iphdr + ip4h->ihl * 4;
+
+	if ((ip4h->protocol == IPPROTO_TCP) ||
+	    (ip4h->protocol == IPPROTO_UDP)) {
+		csum = (u16 *)rmnet_map_get_checksum_field(ip4h->protocol,
+								txporthdr);
+		*csum = ~(*csum);
+	}
+}
+
+static void rmnet_map_complement_ipv6_txporthdr_csum_field(void *ip6hdr)
+{
+	struct ipv6hdr *ip6h = (struct ipv6hdr *)ip6hdr;
+	void *txporthdr;
+	u16 *csum;
+
+	txporthdr = ip6hdr + sizeof(struct ipv6hdr);
+
+	if ((ip6h->nexthdr == IPPROTO_TCP) || (ip6h->nexthdr == IPPROTO_UDP)) {
+		csum = (u16 *)rmnet_map_get_checksum_field(ip6h->nexthdr,
+								txporthdr);
+		*csum = ~(*csum);
+	}
+}
+
+/* rmnet_map_checksum_uplink_packet() - Generates UL checksum
+ * meta info header
+ * @skb:	Pointer to the packet's skb.
+ *
+ * Generates UL checksum meta info header for IPv4 and IPv6  over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ *
+ * Return:
+ *   - RMNET_MAP_CHECKSUM_OK: Validation of checksum succeeded.
+ *   - RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION: Unrecognized IP header.
+ *   - RMNET_MAP_CHECKSUM_SW: Unsupported packet for UL checksum offload.
+ */
+int rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+				     struct net_device *orig_dev,
+				     u32 egress_data_format)
+{
+	unsigned char ip_version;
+	struct rmnet_map_ul_checksum_header_s *ul_header;
+	void *iphdr;
+	int ret;
+
+	ul_header = (struct rmnet_map_ul_checksum_header_s *)
+		skb_push(skb, sizeof(struct rmnet_map_ul_checksum_header_s));
+
+	if (unlikely(!(orig_dev->features &
+		(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))) {
+		ret = RMNET_MAP_CHECKSUM_SW;
+		goto sw_checksum;
+	}
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		iphdr = (char *)ul_header +
+			sizeof(struct rmnet_map_ul_checksum_header_s);
+		ip_version = (*(char *)iphdr & 0xF0) >> 4;
+		if (ip_version == 0x04) {
+			rmnet_map_fill_ipv4_packet_ul_checksum_header
+				(iphdr, ul_header, skb);
+			if (egress_data_format &
+			    RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+				rmnet_map_complement_ipv4_txporthdr_csum_field(
+					iphdr);
+			ret = RMNET_MAP_CHECKSUM_OK;
+			goto done;
+		} else if (ip_version == 0x06) {
+			rmnet_map_fill_ipv6_packet_ul_checksum_header
+				(iphdr, ul_header, skb);
+			if (egress_data_format &
+			    RMNET_EGRESS_FORMAT_MAP_CKSUMV4)
+				rmnet_map_complement_ipv6_txporthdr_csum_field(
+					iphdr);
+			ret =  RMNET_MAP_CHECKSUM_OK;
+			goto done;
+		} else {
+			ret = RMNET_MAP_CHECKSUM_ERR_UNKNOWN_IP_VERSION;
+			goto sw_checksum;
+		}
+	} else {
+		ret = RMNET_MAP_CHECKSUM_SW;
+		goto sw_checksum;
+	}
+
+sw_checksum:
+	ul_header->checksum_start_offset = 0;
+	ul_header->checksum_insert_offset = 0;
+	ul_header->cks_en = 0;
+	ul_header->udp_ip4_ind = 0;
+done:
+	return ret;
+}
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 5497d022..9175073 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -319,7 +319,8 @@
 		if (!c)
 			return -EINVAL;
 
-		if (c->flags & IEEE80211_CHAN_RADAR)
+		if ((c->flags & IEEE80211_CHAN_RADAR) &&
+		    !(wiphy->flags & WIPHY_FLAG_DFS_OFFLOAD))
 			return 1;
 	}
 	return 0;
@@ -592,10 +593,15 @@
 
 	for (freq = start_freq; freq <= end_freq; freq += 20) {
 		c = ieee80211_get_channel(wiphy, freq);
-		if (!c || c->flags & prohibited_flags)
+
+		if (!c)
+			return false;
+		/* check for radar flags */
+		if ((!(wiphy->flags & WIPHY_FLAG_DFS_OFFLOAD)) &&
+		    (prohibited_flags & c->flags & IEEE80211_CHAN_RADAR) &&
+		    (c->dfs_state != NL80211_DFS_AVAILABLE))
 			return false;
 	}
-
 	return true;
 }
 
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c510810..380c0b6 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7212,6 +7212,9 @@
 	if (netif_carrier_ok(dev))
 		return -EBUSY;
 
+	if (rdev->wiphy.flags & WIPHY_FLAG_DFS_OFFLOAD)
+		return -EOPNOTSUPP;
+
 	if (wdev->cac_started)
 		return -EBUSY;
 
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 5dbac37..eeb23d2 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -418,6 +418,11 @@
 	return true;
 }
 
+static bool is_cfg80211_regdom_intersected(void)
+{
+	return is_intersected_alpha2(get_cfg80211_regdom()->alpha2);
+}
+
 static const struct ieee80211_regdomain *
 reg_copy_regd(const struct ieee80211_regdomain *src_regd)
 {
@@ -1872,9 +1877,14 @@
 	 */
 	if ((lr->initiator == NL80211_REGDOM_SET_BY_CORE ||
 	     lr->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
-	     lr->initiator == NL80211_REGDOM_SET_BY_USER) &&
-	    regdom_changes(lr->alpha2))
-		return REG_REQ_IGNORE;
+	     lr->initiator == NL80211_REGDOM_SET_BY_USER)) {
+		if (lr->intersect) {
+			if (!is_cfg80211_regdom_intersected())
+				return REG_REQ_IGNORE;
+		} else if (regdom_changes(lr->alpha2)) {
+			return REG_REQ_IGNORE;
+		}
+	}
 
 	if (!regdom_changes(user_request->alpha2))
 		return REG_REQ_ALREADY_SET;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 35ad69f..e318878 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -70,7 +70,7 @@
 MODULE_PARM_DESC(bss_entries_limit,
                  "limit to number of scan BSS entries (per wiphy, default 1000)");
 
-#define IEEE80211_SCAN_RESULT_EXPIRE	(30 * HZ)
+#define IEEE80211_SCAN_RESULT_EXPIRE	(7 * HZ)
 
 static void bss_free(struct cfg80211_internal_bss *bss)
 {
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 50616ea..2e70c6f 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -11,7 +11,7 @@
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
-include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
+-include $(if $(wildcard $(kbuild-dir)/Kbuild), $(kbuild-dir)/Kbuild, $(kbuild-dir)/Makefile)
 
 # Figure out what we need to build from the various variables
 # ==========================================================================
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0a07f90..d3d3320 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -314,6 +314,12 @@
 
 dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
 
+# cat
+# ---------------------------------------------------------------------------
+# Concatentate multiple files together
+quiet_cmd_cat = CAT     $@
+cmd_cat = (cat $(filter-out FORCE,$^) > $@) || (rm -f $@; false)
+
 # Bzip2
 # ---------------------------------------------------------------------------
 
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 07650ee..6f4c3f5 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -29,7 +29,7 @@
 INSTALL_MOD_DIR ?= extra
 ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D))
 
-modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
+modinst_dir ?= $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 
 $(modules):
 	$(call cmd,modules_install,$(MODLIB)/$(modinst_dir))
diff --git a/scripts/build-all.py b/scripts/build-all.py
new file mode 100755
index 0000000..0f8babf
--- /dev/null
+++ b/scripts/build-all.py
@@ -0,0 +1,428 @@
+#! /usr/bin/env python2
+
+# Copyright (c) 2009-2015, The Linux Foundation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of The Linux Foundation nor
+#       the names of its contributors may be used to endorse or promote
+#       products derived from this software without specific prior written
+#       permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NON-INFRINGEMENT ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Build the kernel for all targets using the Android build environment.
+
+from collections import namedtuple
+import glob
+from optparse import OptionParser
+import os
+import re
+import shutil
+import subprocess
+import sys
+import threading
+import Queue
+
+version = 'build-all.py, version 1.99'
+
+build_dir = '../all-kernels'
+make_command = ["vmlinux", "modules", "dtbs"]
+all_options = {}
+compile64 = os.environ.get('CROSS_COMPILE64')
+
+def error(msg):
+    sys.stderr.write("error: %s\n" % msg)
+
+def fail(msg):
+    """Fail with a user-printed message"""
+    error(msg)
+    sys.exit(1)
+
+if not os.environ.get('CROSS_COMPILE'):
+    fail("CROSS_COMPILE must be set in the environment")
+
+def check_kernel():
+    """Ensure that PWD is a kernel directory"""
+    have_defconfig = any([
+        os.path.isfile('arch/arm64/configs/msm_defconfig'),
+        os.path.isfile('arch/arm64/configs/msmskunk_defconfig')])
+
+    if not all([os.path.isfile('MAINTAINERS'), have_defconfig]):
+        fail("This doesn't seem to be an MSM kernel dir")
+
+def check_build():
+    """Ensure that the build directory is present."""
+    if not os.path.isdir(build_dir):
+        try:
+            os.makedirs(build_dir)
+        except OSError as exc:
+            if exc.errno == errno.EEXIST:
+                pass
+            else:
+                raise
+
+failed_targets = []
+
+BuildResult = namedtuple('BuildResult', ['status', 'messages'])
+
+class BuildSequence(namedtuple('BuildSequence', ['log_name', 'short_name', 'steps'])):
+
+    def set_width(self, width):
+        self.width = width
+
+    def __enter__(self):
+        self.log = open(self.log_name, 'w')
+    def __exit__(self, type, value, traceback):
+        self.log.close()
+
+    def run(self):
+        self.status = None
+        messages = ["Building: " + self.short_name]
+        def printer(line):
+            text = "[%-*s] %s" % (self.width, self.short_name, line)
+            messages.append(text)
+            self.log.write(text)
+            self.log.write('\n')
+        for step in self.steps:
+            st = step.run(printer)
+            if st:
+                self.status = BuildResult(self.short_name, messages)
+                break
+        if not self.status:
+            self.status = BuildResult(None, messages)
+
+class BuildTracker:
+    """Manages all of the steps necessary to perform a build.  The
+    build consists of one or more sequences of steps.  The different
+    sequences can be processed independently, while the steps within a
+    sequence must be done in order."""
+
+    def __init__(self, parallel_builds):
+        self.sequence = []
+        self.lock = threading.Lock()
+        self.parallel_builds = parallel_builds
+
+    def add_sequence(self, log_name, short_name, steps):
+        self.sequence.append(BuildSequence(log_name, short_name, steps))
+
+    def longest_name(self):
+        longest = 0
+        for seq in self.sequence:
+            longest = max(longest, len(seq.short_name))
+        return longest
+
+    def __repr__(self):
+        return "BuildTracker(%s)" % self.sequence
+
+    def run_child(self, seq):
+        seq.set_width(self.longest)
+        tok = self.build_tokens.get()
+        with self.lock:
+            print "Building:", seq.short_name
+        with seq:
+            seq.run()
+            self.results.put(seq.status)
+        self.build_tokens.put(tok)
+
+    def run(self):
+        self.longest = self.longest_name()
+        self.results = Queue.Queue()
+        children = []
+        errors = []
+        self.build_tokens = Queue.Queue()
+        nthreads = self.parallel_builds
+        print "Building with", nthreads, "threads"
+        for i in range(nthreads):
+            self.build_tokens.put(True)
+        for seq in self.sequence:
+            child = threading.Thread(target=self.run_child, args=[seq])
+            children.append(child)
+            child.start()
+        for child in children:
+            stats = self.results.get()
+            if all_options.verbose:
+                with self.lock:
+                    for line in stats.messages:
+                        print line
+                    sys.stdout.flush()
+            if stats.status:
+                errors.append(stats.status)
+        for child in children:
+            child.join()
+        if errors:
+            fail("\n  ".join(["Failed targets:"] + errors))
+
+class PrintStep:
+    """A step that just prints a message"""
+    def __init__(self, message):
+        self.message = message
+
+    def run(self, outp):
+        outp(self.message)
+
+class MkdirStep:
+    """A step that makes a directory"""
+    def __init__(self, direc):
+        self.direc = direc
+
+    def run(self, outp):
+        outp("mkdir %s" % self.direc)
+        os.mkdir(self.direc)
+
+class RmtreeStep:
+    def __init__(self, direc):
+        self.direc = direc
+
+    def run(self, outp):
+        outp("rmtree %s" % self.direc)
+        shutil.rmtree(self.direc, ignore_errors=True)
+
+class CopyfileStep:
+    def __init__(self, src, dest):
+        self.src = src
+        self.dest = dest
+
+    def run(self, outp):
+        outp("cp %s %s" % (self.src, self.dest))
+        shutil.copyfile(self.src, self.dest)
+
+class ExecStep:
+    def __init__(self, cmd, **kwargs):
+        self.cmd = cmd
+        self.kwargs = kwargs
+
+    def run(self, outp):
+        outp("exec: %s" % (" ".join(self.cmd),))
+        with open('/dev/null', 'r') as devnull:
+            proc = subprocess.Popen(self.cmd, stdin=devnull,
+                    stdout=subprocess.PIPE,
+                    stderr=subprocess.STDOUT,
+                    **self.kwargs)
+            stdout = proc.stdout
+            while True:
+                line = stdout.readline()
+                if not line:
+                    break
+                line = line.rstrip('\n')
+                outp(line)
+            result = proc.wait()
+            if result != 0:
+                return ('error', result)
+            else:
+                return None
+
+class Builder():
+
+    def __init__(self, name, defconfig):
+        self.name = name
+        self.defconfig = defconfig
+
+        self.confname = self.defconfig.split('/')[-1]
+
+        # Determine if this is a 64-bit target based on the location
+        # of the defconfig.
+        self.make_env = os.environ.copy()
+        if "/arm64/" in defconfig:
+            if compile64:
+                self.make_env['CROSS_COMPILE'] = compile64
+            else:
+                fail("Attempting to build 64-bit, without setting CROSS_COMPILE64")
+            self.make_env['ARCH'] = 'arm64'
+        else:
+            self.make_env['ARCH'] = 'arm'
+        self.make_env['KCONFIG_NOTIMESTAMP'] = 'true'
+        self.log_name = "%s/log-%s.log" % (build_dir, self.name)
+
+    def build(self):
+        steps = []
+        dest_dir = os.path.join(build_dir, self.name)
+        log_name = "%s/log-%s.log" % (build_dir, self.name)
+        steps.append(PrintStep('Building %s in %s log %s' %
+            (self.name, dest_dir, log_name)))
+        if not os.path.isdir(dest_dir):
+            steps.append(MkdirStep(dest_dir))
+        defconfig = self.defconfig
+        dotconfig = '%s/.config' % dest_dir
+        savedefconfig = '%s/defconfig' % dest_dir
+
+        staging_dir = 'install_staging'
+        modi_dir = '%s' % staging_dir
+        hdri_dir = '%s/usr' % staging_dir
+        steps.append(RmtreeStep(os.path.join(dest_dir, staging_dir)))
+
+        steps.append(ExecStep(['make', 'O=%s' % dest_dir,
+            self.confname], env=self.make_env))
+
+        if not all_options.updateconfigs:
+            # Build targets can be dependent upon the completion of
+            # previous build targets, so build them one at a time.
+            cmd_line = ['make',
+                'INSTALL_HDR_PATH=%s' % hdri_dir,
+                'INSTALL_MOD_PATH=%s' % modi_dir,
+                'O=%s' % dest_dir]
+            build_targets = []
+            for c in make_command:
+                if re.match(r'^-{1,2}\w', c):
+                    cmd_line.append(c)
+                else:
+                    build_targets.append(c)
+            for t in build_targets:
+                steps.append(ExecStep(cmd_line + [t], env=self.make_env))
+
+        # Copy the defconfig back.
+        if all_options.configs or all_options.updateconfigs:
+            steps.append(ExecStep(['make', 'O=%s' % dest_dir,
+                'savedefconfig'], env=self.make_env))
+            steps.append(CopyfileStep(savedefconfig, defconfig))
+
+        return steps
+
+def update_config(file, str):
+    print 'Updating %s with \'%s\'\n' % (file, str)
+    with open(file, 'a') as defconfig:
+        defconfig.write(str + '\n')
+
+def scan_configs():
+    """Get the full list of defconfigs appropriate for this tree."""
+    names = []
+    arch_pats = (
+        r'[fm]sm[0-9]*_defconfig',
+        r'apq*_defconfig',
+        r'qsd*_defconfig',
+	r'mpq*_defconfig',
+        )
+    arch64_pats = (
+	r'msm*_defconfig',
+        )
+    for p in arch_pats:
+        for n in glob.glob('arch/arm/configs/' + p):
+            name = os.path.basename(n)[:-10]
+            names.append(Builder(name, n))
+    if 'CROSS_COMPILE64' in os.environ:
+        for p in arch64_pats:
+            for n in glob.glob('arch/arm64/configs/' + p):
+                name = os.path.basename(n)[:-10] + "-64"
+                names.append(Builder(name, n))
+    return names
+
+def build_many(targets):
+    print "Building %d target(s)" % len(targets)
+
+    # To try and make up for the link phase being serial, try to do
+    # two full builds in parallel.  Don't do too many because lots of
+    # parallel builds tends to use up available memory rather quickly.
+    parallel = 2
+    if all_options.jobs and all_options.jobs > 1:
+        j = max(all_options.jobs / parallel, 2)
+        make_command.append("-j" + str(j))
+
+    tracker = BuildTracker(parallel)
+    for target in targets:
+        if all_options.updateconfigs:
+            update_config(target.defconfig, all_options.updateconfigs)
+        steps = target.build()
+        tracker.add_sequence(target.log_name, target.name, steps)
+    tracker.run()
+
+def main():
+    global make_command
+
+    check_kernel()
+    check_build()
+
+    configs = scan_configs()
+
+    usage = ("""
+           %prog [options] all                 -- Build all targets
+           %prog [options] target target ...   -- List specific targets
+           %prog [options] perf                -- Build all perf targets
+           %prog [options] noperf              -- Build all non-perf targets""")
+    parser = OptionParser(usage=usage, version=version)
+    parser.add_option('--configs', action='store_true',
+            dest='configs',
+            help="Copy configs back into tree")
+    parser.add_option('--list', action='store_true',
+            dest='list',
+            help='List available targets')
+    parser.add_option('-v', '--verbose', action='store_true',
+            dest='verbose',
+            help='Output to stdout in addition to log file')
+    parser.add_option('--oldconfig', action='store_true',
+            dest='oldconfig',
+            help='Only process "make oldconfig"')
+    parser.add_option('--updateconfigs',
+            dest='updateconfigs',
+            help="Update defconfigs with provided option setting, "
+                 "e.g. --updateconfigs=\'CONFIG_USE_THING=y\'")
+    parser.add_option('-j', '--jobs', type='int', dest="jobs",
+            help="Number of simultaneous jobs")
+    parser.add_option('-l', '--load-average', type='int',
+            dest='load_average',
+            help="Don't start multiple jobs unless load is below LOAD_AVERAGE")
+    parser.add_option('-k', '--keep-going', action='store_true',
+            dest='keep_going', default=False,
+            help="Keep building other targets if a target fails")
+    parser.add_option('-m', '--make-target', action='append',
+            help='Build the indicated make target (default: %s)' %
+                 ' '.join(make_command))
+
+    (options, args) = parser.parse_args()
+    global all_options
+    all_options = options
+
+    if options.list:
+        print "Available targets:"
+        for target in configs:
+            print "   %s" % target.name
+        sys.exit(0)
+
+    if options.oldconfig:
+        make_command = ["oldconfig"]
+    elif options.make_target:
+        make_command = options.make_target
+
+    if args == ['all']:
+        build_many(configs)
+    elif args == ['perf']:
+        targets = []
+        for t in configs:
+            if "perf" in t.name:
+                targets.append(t)
+        build_many(targets)
+    elif args == ['noperf']:
+        targets = []
+        for t in configs:
+            if "perf" not in t.name:
+                targets.append(t)
+        build_many(targets)
+    elif len(args) > 0:
+        all_configs = {}
+        for t in configs:
+            all_configs[t.name] = t
+        targets = []
+        for t in args:
+            if t not in all_configs:
+                parser.error("Target '%s' not one of %s" % (t, all_configs.keys()))
+            targets.append(all_configs[t])
+        build_many(targets)
+    else:
+        parser.error("Must specify a target to build, or 'all'")
+
+if __name__ == "__main__":
+    main()
diff --git a/scripts/check-config-debug-exemptions b/scripts/check-config-debug-exemptions
new file mode 100644
index 0000000..331a924
--- /dev/null
+++ b/scripts/check-config-debug-exemptions
@@ -0,0 +1,58 @@
+CP15_BARRIER_EMULATION
+DEVKMEM
+DEVMEM
+HID_A4TECH
+HID_ACRUX
+HID_BELKIN
+HID_CHERRY
+HID_CHICONY
+HID_CYPRESS
+HID_DRAGONRISE
+HID_EMS_FF
+HID_EZKEY
+HID_GREENASIA
+HID_GYRATION
+HID_HOLTEK
+HID_KENSINGTON
+HID_KEYTOUCH
+HID_KYE
+HID_LCPOWER
+HID_LOGITECH
+HID_MONTEREY
+HID_NTRIG
+HID_ORTEK
+HID_PANTHERLORD
+HID_PETALYNX
+HID_PICOLCD
+HID_PRIMAX
+HID_PRODIKEYS
+HID_ROCCAT
+HID_SAITEK
+HID_SAMSUNG
+HID_SMARTJOYPLUS
+HID_SONY
+HID_SPEEDLINK
+HID_SUNPLUS
+HID_THRUSTMASTER
+HID_TIVO
+HID_TOPSEED
+HID_TWINHAN
+HID_UCLOGIC
+HID_WACOM
+HID_WALTOP
+HID_WIIMOTE
+HID_ZEROPLUS
+HID_ZYDACRON
+JOYSTICK_XPAD_FF
+JOYSTICK_XPAD_LEDS
+KSM
+MODULES
+PSTORE
+SETEND_EMULATION
+TABLET_USB_ACECAD
+TABLET_USB_AIPTEK
+TABLET_USB_GTCO
+TABLET_USB_HANWANG
+TABLET_USB_KBTAB
+USB_CONFIGFS
+USB_OTG_WAKELOCK
diff --git a/scripts/check-config-perf-exemptions b/scripts/check-config-perf-exemptions
new file mode 100644
index 0000000..d499f5a
--- /dev/null
+++ b/scripts/check-config-perf-exemptions
@@ -0,0 +1,61 @@
+CGROUP_DEBUG
+CP15_BARRIER_EMULATION
+DEVKMEM
+DEVMEM
+HID_A4TECH
+HID_ACRUX
+HID_BELKIN
+HID_CHERRY
+HID_CHICONY
+HID_CYPRESS
+HID_DRAGONRISE
+HID_EMS_FF
+HID_EZKEY
+HID_GREENASIA
+HID_GYRATION
+HID_HOLTEK
+HID_KENSINGTON
+HID_KEYTOUCH
+HID_KYE
+HID_LCPOWER
+HID_LOGITECH
+HID_MONTEREY
+HID_NTRIG
+HID_ORTEK
+HID_PANTHERLORD
+HID_PETALYNX
+HID_PICOLCD
+HID_PRIMAX
+HID_PRODIKEYS
+HID_ROCCAT
+HID_SAITEK
+HID_SAMSUNG
+HID_SMARTJOYPLUS
+HID_SONY
+HID_SPEEDLINK
+HID_SUNPLUS
+HID_THRUSTMASTER
+HID_TIVO
+HID_TOPSEED
+HID_TWINHAN
+HID_UCLOGIC
+HID_WACOM
+HID_WALTOP
+HID_WIIMOTE
+HID_ZEROPLUS
+HID_ZYDACRON
+JOYSTICK_XPAD_FF
+JOYSTICK_XPAD_LEDS
+KSM
+MODULES
+PM_DEBUG
+PSTORE
+SETEND_EMULATION
+SUSPEND_TIME
+TABLET_USB_ACECAD
+TABLET_USB_AIPTEK
+TABLET_USB_GTCO
+TABLET_USB_HANWANG
+TABLET_USB_KBTAB
+USB_CONFIGFS
+USB_OTG_WAKELOCK
diff --git a/scripts/check-config.py b/scripts/check-config.py
new file mode 100755
index 0000000..79c2bdf
--- /dev/null
+++ b/scripts/check-config.py
@@ -0,0 +1,147 @@
+#! /usr/bin/env python
+
+# Copyright (c) 2015, The Linux Foundation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of The Linux Foundation nor
+#       the names of its contributors may be used to endorse or promote
+#       products derived from this software without specific prior written
+#       permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NON-INFRINGEMENT ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Android kernel configuration validator.
+
+The Android kernel reference trees contain some config stubs of
+configuration options that are required for Android to function
+correctly, and additional ones that are recommended.
+
+This script can help compare these base configs with the ".config"
+output of the compiler to determine if the proper configs are defined.
+"""
+
+from collections import namedtuple
+from optparse import OptionParser
+import re
+import sys
+
+version = "check-config.py, version 0.0.1"
+
+req_re = re.compile(r'''^CONFIG_(.*)=(.*)$''')
+forb_re = re.compile(r'''^# CONFIG_(.*) is not set$''')
+comment_re = re.compile(r'''^(#.*|)$''')
+
+Enabled = namedtuple('Enabled', ['name', 'value'])
+Disabled = namedtuple('Disabled', ['name'])
+
+def walk_config(name):
+    with open(name, 'r') as fd:
+        for line in fd:
+            line = line.rstrip()
+            m = req_re.match(line)
+            if m:
+                yield Enabled(m.group(1), m.group(2))
+                continue
+
+            m = forb_re.match(line)
+            if m:
+                yield Disabled(m.group(1))
+                continue
+
+            m = comment_re.match(line)
+            if m:
+                continue
+
+            print "WARNING: Unknown .config line: ", line
+
+class Checker():
+    def __init__(self):
+        self.required = {}
+        self.exempted = set()
+        self.forbidden = set()
+
+    def add_required(self, fname):
+        for ent in walk_config(fname):
+            if type(ent) is Enabled:
+                self.required[ent.name] = ent.value
+            elif type(ent) is Disabled:
+                if ent.name in self.required:
+                    del self.required[ent.name]
+                self.forbidden.add(ent.name)
+
+    def add_exempted(self, fname):
+        with open(fname, 'r') as fd:
+            for line in fd:
+                line = line.rstrip()
+                self.exempted.add(line)
+
+    def check(self, path):
+        failure = False
+
+        # Don't run this for mdm targets
+        if re.search('mdm', path):
+            print "Not applicable to mdm targets... bypassing"
+        else:
+            for ent in walk_config(path):
+                # Go to the next iteration if this config is exempt
+                if ent.name in self.exempted:
+                   continue
+
+                if type(ent) is Enabled:
+                    if ent.name in self.forbidden:
+                        print "error: Config should not be present: %s" %ent.name
+                        failure = True
+
+                    if ent.name in self.required and ent.value != self.required[ent.name]:
+                        print "error: Config has wrong value: %s %s expecting: %s" \
+                                 %(ent.name, ent.value, self.required[ent.name])
+                        failure = True
+
+                elif type(ent) is Disabled:
+                    if ent.name in self.required:
+                        print "error: Config should be present, but is disabled: %s" %ent.name
+                        failure = True
+
+        if failure:
+            sys.exit(1)
+
+def main():
+    usage = """%prog [options] path/to/.config"""
+    parser = OptionParser(usage=usage, version=version)
+    parser.add_option('-r', '--required', dest="required",
+            action="append")
+    parser.add_option('-e', '--exempted', dest="exempted",
+            action="append")
+    (options, args) = parser.parse_args()
+    if len(args) != 1:
+        parser.error("Expecting a single path argument to .config")
+    elif options.required is None or options.exempted is None:
+        parser.error("Expecting a file containing required configurations")
+
+    ch = Checker()
+    for r in options.required:
+        ch.add_required(r)
+    for e in options.exempted:
+        ch.add_exempted(e)
+
+    ch.check(args[0])
+
+if __name__ == '__main__':
+    main()
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index a8368d1..81ff8e2 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -11,6 +11,13 @@
 use Cwd 'abs_path';
 use Term::ANSIColor qw(:constants);
 
+use constant BEFORE_SHORTTEXT => 0;
+use constant IN_SHORTTEXT_BLANKLINE => 1;
+use constant IN_SHORTTEXT => 2;
+use constant AFTER_SHORTTEXT => 3;
+use constant CHECK_NEXT_SHORTTEXT => 4;
+use constant SHORTTEXT_LIMIT => 75;
+
 my $P = $0;
 my $D = dirname(abs_path($P));
 
@@ -22,6 +29,7 @@
 my $tree = 1;
 my $chk_signoff = 1;
 my $chk_patch = 1;
+my $chk_author = 1;
 my $tst_only;
 my $emacs = 0;
 my $terse = 0;
@@ -69,6 +77,7 @@
   -q, --quiet                quiet
   --no-tree                  run without a kernel tree
   --no-signoff               do not check for 'Signed-off-by' line
+  --no-author                do not check for unexpected authors
   --patch                    treat FILE as patchfile (default)
   --emacs                    emacs compile window format
   --terse                    one line per report
@@ -184,6 +193,7 @@
 	'tree!'		=> \$tree,
 	'signoff!'	=> \$chk_signoff,
 	'patch!'	=> \$chk_patch,
+	'author!'	=> \$chk_author,
 	'emacs!'	=> \$emacs,
 	'terse!'	=> \$terse,
 	'showfile!'	=> \$showfile,
@@ -2084,6 +2094,33 @@
 	return "$leading";
 }
 
+sub cleanup_continuation_headers {
+	# Collapse any header-continuation lines into a single line so they
+	# can be parsed meaningfully, as the parser only has one line
+	# of context to work with.
+	my $again;
+	do {
+		$again = 0;
+		foreach my $n (0 .. scalar(@rawlines) - 2) {
+			if ($rawlines[$n]=~/^\s*$/) {
+				# A blank line means there's no more chance
+				# of finding headers.  Shortcut to done.
+				return;
+			}
+			if ($rawlines[$n]=~/^[\x21-\x39\x3b-\x7e]+:/ &&
+			    $rawlines[$n+1]=~/^\s+/) {
+				# Continuation header.  Collapse it.
+				my $line = splice @rawlines, $n+1, 1;
+				$line=~s/^\s+/ /;
+				$rawlines[$n] .= $line;
+				# We've 'destabilized' the list, so restart.
+				$again = 1;
+				last;
+			}
+		}
+	} while ($again);
+}
+
 sub pos_last_openparen {
 	my ($line) = @_;
 
@@ -2122,6 +2159,8 @@
 	my $prevrawline="";
 	my $stashline="";
 	my $stashrawline="";
+	my $subjectline="";
+	my $sublinenr="";
 
 	my $length;
 	my $indent;
@@ -2176,9 +2215,14 @@
 	my $setup_docs = 0;
 
 	my $camelcase_file_seeded = 0;
+	my $shorttext = BEFORE_SHORTTEXT;
+	my $shorttext_exspc = 0;
+	my $commit_text_present = 0;
 
 	sanitise_line_reset();
+	cleanup_continuation_headers();
 	my $line;
+
 	foreach my $rawline (@rawlines) {
 		$linenr++;
 		$line = $rawline;
@@ -2364,13 +2408,115 @@
 			}
 			next;
 		}
-
 		$here .= "FILE: $realfile:$realline:" if ($realcnt != 0);
 
 		my $hereline = "$here\n$rawline\n";
 		my $herecurr = "$here\n$rawline\n";
 		my $hereprev = "$here\n$prevrawline\n$rawline\n";
 
+		if ($shorttext != AFTER_SHORTTEXT) {
+			if ($shorttext == IN_SHORTTEXT_BLANKLINE && $line=~/\S/) {
+				# the subject line was just processed,
+				# a blank line must be next
+				WARN("NONBLANK_AFTER_SUMMARY",
+				     "non-blank line after summary line\n" . $herecurr);
+				$shorttext = IN_SHORTTEXT;
+				# this non-blank line may or may not be commit text -
+				# a warning has been generated so assume it is commit
+				# text and move on
+				$commit_text_present = 1;
+				# fall through and treat this line as IN_SHORTTEXT
+			}
+			if ($shorttext == IN_SHORTTEXT) {
+				if ($line=~/^---/ || $line=~/^diff.*/) {
+					if ($commit_text_present == 0) {
+						WARN("NO_COMMIT_TEXT",
+						     "please add commit text explaining " .
+						     "*why* the change is needed\n" .
+						     $herecurr);
+					}
+					$shorttext = AFTER_SHORTTEXT;
+				} elsif (length($line) > (SHORTTEXT_LIMIT +
+							  $shorttext_exspc)
+					 && $line !~ /^:([0-7]{6}\s){2}
+						      ([[:xdigit:]]+\.*
+						       \s){2}\w+\s\w+/xms) {
+					WARN("LONG_COMMIT_TEXT",
+					     "commit text line over " .
+					     SHORTTEXT_LIMIT .
+					     " characters\n" . $herecurr);
+				} elsif ($line=~/^\s*change-id:/i ||
+					 $line=~/^\s*signed-off-by:/i ||
+					 $line=~/^\s*crs-fixed:/i ||
+					 $line=~/^\s*acked-by:/i) {
+					# this is a tag, there must be commit
+					# text by now
+					if ($commit_text_present == 0) {
+						WARN("NO_COMMIT_TEXT",
+						     "please add commit text explaining " .
+						     "*why* the change is needed\n" .
+						     $herecurr);
+						# prevent duplicate warnings
+						$commit_text_present = 1;
+					}
+				} elsif ($line=~/\S/) {
+					$commit_text_present = 1;
+				}
+			} elsif ($shorttext == IN_SHORTTEXT_BLANKLINE) {
+				# case of non-blank line in this state handled above
+				$shorttext = IN_SHORTTEXT;
+			} elsif ($shorttext == CHECK_NEXT_SHORTTEXT) {
+# The Subject line doesn't have to be the last header in the patch.
+# Avoid moving to the IN_SHORTTEXT state until clear of all headers.
+# Per RFC5322, continuation lines must be folded, so any left-justified
+# text which looks like a header is definitely a header.
+				if ($line!~/^[\x21-\x39\x3b-\x7e]+:/) {
+					$shorttext = IN_SHORTTEXT;
+					# Check for Subject line followed by a blank line.
+					if (length($line) != 0) {
+						WARN("NONBLANK_AFTER_SUMMARY",
+						     "non-blank line after " .
+						     "summary line\n" .
+						     $sublinenr . $here .
+						     "\n" . $subjectline .
+						     "\n" . $line . "\n");
+						# this non-blank line may or may not
+						# be commit text - a warning has been
+						# generated so assume it is commit
+						# text and move on
+						$commit_text_present = 1;
+					}
+				}
+			# The next two cases are BEFORE_SHORTTEXT.
+			} elsif ($line=~/^Subject: \[[^\]]*\] (.*)/) {
+				# This is the subject line. Go to
+				# CHECK_NEXT_SHORTTEXT to wait for the commit
+				# text to show up.
+				$shorttext = CHECK_NEXT_SHORTTEXT;
+				$subjectline = $line;
+				$sublinenr = "#$linenr & ";
+# Check for Subject line less than line limit
+				if (length($1) > SHORTTEXT_LIMIT && !($1 =~ m/Revert\ \"/)) {
+					WARN("LONG_SUMMARY_LINE",
+					     "summary line over " .
+					     SHORTTEXT_LIMIT .
+					     " characters\n" . $herecurr);
+				}
+			} elsif ($line=~/^    (.*)/) {
+				# Indented format, this must be the summary
+				# line (i.e. git show). There will be no more
+				# headers so we are now in the shorttext.
+				$shorttext = IN_SHORTTEXT_BLANKLINE;
+				$shorttext_exspc = 4;
+				if (length($1) > SHORTTEXT_LIMIT && !($1 =~ m/Revert\ \"/)) {
+					WARN("LONG_SUMMARY_LINE",
+					     "summary line over " .
+					     SHORTTEXT_LIMIT .
+					     " characters\n" . $herecurr);
+				}
+			}
+		}
+
 		$cnt_lines++ if ($realcnt != 0);
 
 # Check if the commit log has what seems like a diff which can confuse patch
@@ -2463,6 +2609,10 @@
 					     "email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
 				}
 			}
+			if ($chk_author && $line =~ /^\s*signed-off-by:.*(quicinc|qualcomm)\.com/i) {
+				WARN("BAD_SIGN_OFF",
+				     "invalid Signed-off-by identity\n" . $line );
+			}			
 
 # Check for duplicate signatures
 			my $sig_nospace = $line;
@@ -2594,6 +2744,11 @@
 			     "added, moved or deleted file(s), does MAINTAINERS need updating?\n" . $herecurr);
 		}
 
+#check the patch for invalid author credentials
+		if ($chk_author && $line =~ /^From:.*(quicinc|qualcomm)\.com/) {
+			WARN("BAD_AUTHOR", "invalid author identity\n" . $line );
+		}
+
 # Check for wrappage within a valid hunk of the file
 		if ($realcnt != 0 && $line !~ m{^(?:\+|-| |\\ No newline|$)}) {
 			ERROR("CORRUPTED_PATCH",
@@ -2821,8 +2976,7 @@
 #
 # if LONG_LINE is ignored, the other 2 types are also ignored
 #
-
-		if ($line =~ /^\+/ && $length > $max_line_length) {
+		if ($line =~ /^\+/ && $length > $max_line_length && $realfile ne "scripts/checkpatch.pl") {
 			my $msg_type = "LONG_LINE";
 
 			# Check the allowed long line types first
@@ -4372,7 +4526,7 @@
 
 # check spacing on parentheses
 		if ($line =~ /\(\s/ && $line !~ /\(\s*(?:\\)?$/ &&
-		    $line !~ /for\s*\(\s+;/) {
+		    $line !~ /for\s*\(\s+;/ && $line !~ /^\+\s*[A-Z_][A-Z\d_]*\(\s*\d+(\,.*)?\)\,?$/) {
 			if (ERROR("SPACING",
 				  "space prohibited after that open parenthesis '('\n" . $herecurr) &&
 			    $fix) {
@@ -4743,7 +4897,7 @@
 		if ($realfile !~ m@/vmlinux.lds.h$@ &&
 		    $line =~ /^.\s*\#\s*define\s*$Ident(\()?/) {
 			my $ln = $linenr;
-			my $cnt = $realcnt;
+			my $cnt = $realcnt - 1;
 			my ($off, $dstat, $dcond, $rest);
 			my $ctx = '';
 			my $has_flow_statement = 0;
@@ -4780,6 +4934,12 @@
 			{
 			}
 
+			# Extremely long macros may fall off the end of the
+			# available context without closing.  Give a dangling
+			# backslash the benefit of the doubt and allow it
+			# to gobble any hanging open-parens.
+			$dstat =~ s/\(.+\\$/1/;
+
 			# Flatten any obvious string concatentation.
 			while ($dstat =~ s/($String)\s*$Ident/$1/ ||
 			       $dstat =~ s/$Ident\s*($String)/$1/)
@@ -4795,6 +4955,7 @@
 				MODULE_PARM_DESC|
 				DECLARE_PER_CPU|
 				DEFINE_PER_CPU|
+				CLK_[A-Z\d_]+|
 				__typeof__\(|
 				union|
 				struct|
@@ -5188,11 +5349,79 @@
 			     "Avoid line continuations in quoted strings\n" . $herecurr);
 		}
 
+# sys_open/read/write/close are not allowed in the kernel
+		if ($line =~ /\b(sys_(?:open|read|write|close))\b/) {
+			ERROR("FILE_OPS",
+			      "$1 is inappropriate in kernel code.\n" .
+			      $herecurr);
+		}
+
+# filp_open is a backdoor for sys_open
+		if ($line =~ /\b(filp_open)\b/) {
+			ERROR("FILE_OPS",
+			      "$1 is inappropriate in kernel code.\n" .
+			      $herecurr);
+		}
+
+# read[bwl] & write[bwl] use too many barriers, use the _relaxed variants
+		if ($line =~ /\b((?:read|write)[bwl])\b/) {
+			ERROR("NON_RELAXED_IO",
+			      "Use of $1 is deprecated: use $1_relaxed\n\t" .
+			      "with appropriate memory barriers instead.\n" .
+			      $herecurr);
+		}
+
+# likewise, in/out[bwl] should be __raw_read/write[bwl]...
+		if ($line =~ /\b((in|out)([bwl]))\b/) {
+			my ($all, $pref, $suf) = ($1, $2, $3);
+			$pref =~ s/in/read/;
+			$pref =~ s/out/write/;
+			ERROR("NON_RELAXED_IO",
+			      "Use of $all is deprecated: use " .
+			      "__raw_$pref$suf\n\t" .
+			      "with appropriate memory barriers instead.\n" .
+			      $herecurr);
+		}
+
+# dsb is too ARMish, and should usually be mb.
+		if ($line =~ /[^-_>*\.]\bdsb\b[^-_\.;]/) {
+			WARN("ARM_BARRIER",
+			     "Use of dsb is discouranged: prefer mb.\n" .
+			     $herecurr);
+		}
+
+# unbounded string functions are overflow risks
+		my %str_fns = (
+			"sprintf" => "snprintf",
+			"strcpy"  => "strlcpy",
+			"strncpy"  => "strlcpy",
+			"strcat"  => "strlcat",
+			"strncat"  => "strlcat",
+			"vsprintf"  => "vsnprintf",
+			"strchr" => "strnchr",
+			"strstr" => "strnstr",
+		);
+		foreach my $k (keys %str_fns) {
+			if ($line =~ /\b$k\b/) {
+				ERROR("UNBOUNDED_STRING_FNS",
+				      "Use of $k is deprecated: " .
+				      "use $str_fns{$k} instead.\n" .
+				      $herecurr);
+			}
+		}
+
 # warn about #if 0
 		if ($line =~ /^.\s*\#\s*if\s+0\b/) {
-			CHK("REDUNDANT_CODE",
-			    "if this code is redundant consider removing it\n" .
-				$herecurr);
+			WARN("IF_0",
+			     "if this code is redundant consider removing it\n"
+				.  $herecurr);
+		}
+
+# warn about #if 1
+		if ($line =~ /^.\s*\#\s*if\s+1\b/) {
+			WARN("IF_1",
+			     "if this code is required consider removing"
+				. " #if 1\n" .  $herecurr);
 		}
 
 # check for needless "if (<foo>) fn(<foo>)" uses
@@ -5382,6 +5611,12 @@
 			     "Comparing get_jiffies_64() is almost always wrong; prefer time_after64, time_before64 and friends\n" . $herecurr);
 		}
 
+# check the patch for use of mdelay
+		if ($line =~ /\bmdelay\s*\(/) {
+			WARN("MDELAY",
+			     "use of mdelay() found: msleep() is the preferred API.\n" . $herecurr );
+		}
+
 # warn about #ifdefs in C files
 #		if ($line =~ /^.\s*\#\s*if(|n)def/ && ($realfile =~ /\.c$/)) {
 #			print "#ifdef in C files should be avoided\n";
@@ -5943,6 +6178,12 @@
 			     "switch default: should use break\n" . $herectx);
 		}
 
+# check for return codes on error paths
+		if ($line =~ /\breturn\s+-\d+/) {
+			ERROR("NO_ERROR_CODE",
+			      "illegal return value, please use an error code\n" . $herecurr);
+		}
+
 # check for gcc specific __FUNCTION__
 		if ($line =~ /\b__FUNCTION__\b/) {
 			if (WARN("USE_FUNC",
diff --git a/scripts/gcc-wrapper.py b/scripts/gcc-wrapper.py
new file mode 100755
index 0000000..8a0e0af
--- /dev/null
+++ b/scripts/gcc-wrapper.py
@@ -0,0 +1,99 @@
+#! /usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above copyright
+#       notice, this list of conditions and the following disclaimer in the
+#       documentation and/or other materials provided with the distribution.
+#     * Neither the name of The Linux Foundation nor
+#       the names of its contributors may be used to endorse or promote
+#       products derived from this software without specific prior written
+#       permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NON-INFRINGEMENT ARE DISCLAIMED.  IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Invoke gcc, looking for warnings, and causing a failure if there are
+# non-whitelisted warnings.
+
+import errno
+import re
+import os
+import sys
+import subprocess
+
+# Note that gcc uses unicode, which may depend on the locale.  TODO:
+# force LANG to be set to en_US.UTF-8 to get consistent warnings.
+
+allowed_warnings = set([
+    "core.c:144",
+    "inet_connection_sock.c:430",
+    "inet_connection_sock.c:467",
+    "inet6_connection_sock.c:89",
+ ])
+
+# Capture the name of the object file, can find it.
+ofile = None
+
+warning_re = re.compile(r'''(.*/|)([^/]+\.[a-z]+:\d+):(\d+:)? warning:''')
+def interpret_warning(line):
+    """Decode the message from gcc.  The messages we care about have a filename, and a warning"""
+    line = line.rstrip('\n')
+    m = warning_re.match(line)
+    if m and m.group(2) not in allowed_warnings:
+        print "error, forbidden warning:", m.group(2)
+
+        # If there is a warning, remove any object if it exists.
+        if ofile:
+            try:
+                os.remove(ofile)
+            except OSError:
+                pass
+        sys.exit(1)
+
+def run_gcc():
+    args = sys.argv[1:]
+    # Look for -o
+    try:
+        i = args.index('-o')
+        global ofile
+        ofile = args[i+1]
+    except (ValueError, IndexError):
+        pass
+
+    compiler = sys.argv[0]
+
+    try:
+        proc = subprocess.Popen(args, stderr=subprocess.PIPE)
+        for line in proc.stderr:
+            print line,
+            interpret_warning(line)
+
+        result = proc.wait()
+    except OSError as e:
+        result = e.errno
+        if result == errno.ENOENT:
+            print args[0] + ':',e.strerror
+            print 'Is your PATH set correctly?'
+        else:
+            print ' '.join(args), str(e)
+
+    return result
+
+if __name__ == '__main__':
+    status = run_gcc()
+    sys.exit(status)
diff --git a/security/Kconfig b/security/Kconfig
index 118f454..59aea7d 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -18,6 +18,15 @@
 
 	  If you are unsure how to answer this question, answer N.
 
+config SECURITY_PERF_EVENTS_RESTRICT
+	bool "Restrict unprivileged use of performance events"
+	depends on PERF_EVENTS
+	help
+	  If you say Y here, the kernel.perf_event_paranoid sysctl
+	  will be set to 3 by default, and no unprivileged use of the
+	  perf_event_open syscall will be permitted unless it is
+	  changed.
+
 config SECURITY
 	bool "Enable different security models"
 	depends on SYSFS
diff --git a/security/commoncap.c b/security/commoncap.c
index 8df676f..a8e4aac 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -31,6 +31,10 @@
 #include <linux/binfmts.h>
 #include <linux/personality.h>
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+#include <linux/android_aid.h>
+#endif
+
 /*
  * If a non-root user executes a setuid-root binary in
  * !secure(SECURE_NOROOT) mode, then we raise capabilities.
@@ -73,6 +77,13 @@
 {
 	struct user_namespace *ns = targ_ns;
 
+#ifdef CONFIG_ANDROID_PARANOID_NETWORK
+	if (cap == CAP_NET_RAW && in_egroup_p(AID_NET_RAW))
+		return 0;
+	if (cap == CAP_NET_ADMIN && in_egroup_p(AID_NET_ADMIN))
+		return 0;
+#endif
+
 	/* See if cred has the capability in the target user namespace
 	 * by examining the target user namespace and all of the target
 	 * user namespace's parents.
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 09fd610..24bd84d 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -491,6 +491,7 @@
 		!strcmp(sb->s_type->name, "sysfs") ||
 		!strcmp(sb->s_type->name, "pstore") ||
 		!strcmp(sb->s_type->name, "debugfs") ||
+		!strcmp(sb->s_type->name, "tracefs") ||
 		!strcmp(sb->s_type->name, "rootfs");
 }
 
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 3628d3a..3134537 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -374,6 +374,32 @@
 	       chain2_len_sum);
 }
 
+/*
+ * extended permissions compatibility. Make ToT Android kernels compatible
+ * with Android M releases
+ */
+#define AVTAB_OPTYPE_ALLOWED	0x1000
+#define AVTAB_OPTYPE_AUDITALLOW	0x2000
+#define AVTAB_OPTYPE_DONTAUDIT	0x4000
+#define AVTAB_OPTYPE		(AVTAB_OPTYPE_ALLOWED | \
+				AVTAB_OPTYPE_AUDITALLOW | \
+				AVTAB_OPTYPE_DONTAUDIT)
+#define AVTAB_XPERMS_OPTYPE	4
+
+#define avtab_xperms_to_optype(x) (x << AVTAB_XPERMS_OPTYPE)
+#define avtab_optype_to_xperms(x) (x >> AVTAB_XPERMS_OPTYPE)
+
+static unsigned int avtab_android_m_compat;
+
+static void avtab_android_m_compat_set(void)
+{
+	if (!avtab_android_m_compat) {
+		pr_info("SELinux:  Android master kernel running Android"
+				" M policy in compatibility mode.\n");
+		avtab_android_m_compat = 1;
+	}
+}
+
 static uint16_t spec_order[] = {
 	AVTAB_ALLOWED,
 	AVTAB_AUDITDENY,
@@ -398,6 +424,7 @@
 	struct avtab_datum datum;
 	struct avtab_extended_perms xperms;
 	__le32 buf32[ARRAY_SIZE(xperms.perms.p)];
+	unsigned int android_m_compat_optype = 0;
 	int i, rc;
 	unsigned set;
 
@@ -488,6 +515,13 @@
 	key.target_class = le16_to_cpu(buf16[items++]);
 	key.specified = le16_to_cpu(buf16[items++]);
 
+	if ((key.specified & AVTAB_OPTYPE) &&
+			(vers == POLICYDB_VERSION_XPERMS_IOCTL)) {
+		key.specified = avtab_optype_to_xperms(key.specified);
+		android_m_compat_optype = 1;
+		avtab_android_m_compat_set();
+	}
+
 	if (!policydb_type_isvalid(pol, key.source_type) ||
 	    !policydb_type_isvalid(pol, key.target_type) ||
 	    !policydb_class_isvalid(pol, key.target_class)) {
@@ -518,10 +552,22 @@
 			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
 			return rc;
 		}
-		rc = next_entry(&xperms.driver, fp, sizeof(u8));
-		if (rc) {
-			printk(KERN_ERR "SELinux: avtab: truncated entry\n");
-			return rc;
+		if (avtab_android_m_compat ||
+			    ((xperms.specified != AVTAB_XPERMS_IOCTLFUNCTION) &&
+			    (xperms.specified != AVTAB_XPERMS_IOCTLDRIVER) &&
+			    (vers == POLICYDB_VERSION_XPERMS_IOCTL))) {
+			xperms.driver = xperms.specified;
+			if (android_m_compat_optype)
+				xperms.specified = AVTAB_XPERMS_IOCTLDRIVER;
+			else
+				xperms.specified = AVTAB_XPERMS_IOCTLFUNCTION;
+			avtab_android_m_compat_set();
+		} else {
+			rc = next_entry(&xperms.driver, fp, sizeof(u8));
+			if (rc) {
+				printk(KERN_ERR "SELinux: avtab: truncated entry\n");
+				return rc;
+			}
 		}
 		rc = next_entry(buf32, fp, sizeof(u32)*ARRAY_SIZE(xperms.perms.p));
 		if (rc) {
@@ -607,15 +653,22 @@
 	buf16[0] = cpu_to_le16(cur->key.source_type);
 	buf16[1] = cpu_to_le16(cur->key.target_type);
 	buf16[2] = cpu_to_le16(cur->key.target_class);
-	buf16[3] = cpu_to_le16(cur->key.specified);
+	if (avtab_android_m_compat && (cur->key.specified & AVTAB_XPERMS) &&
+		    (cur->datum.u.xperms->specified == AVTAB_XPERMS_IOCTLDRIVER))
+		buf16[3] = cpu_to_le16(avtab_xperms_to_optype(cur->key.specified));
+	else
+		buf16[3] = cpu_to_le16(cur->key.specified);
 	rc = put_entry(buf16, sizeof(u16), 4, fp);
 	if (rc)
 		return rc;
 
 	if (cur->key.specified & AVTAB_XPERMS) {
-		rc = put_entry(&cur->datum.u.xperms->specified, sizeof(u8), 1, fp);
-		if (rc)
-			return rc;
+		if (avtab_android_m_compat == 0) {
+			rc = put_entry(&cur->datum.u.xperms->specified,
+					sizeof(u8), 1, fp);
+			if (rc)
+				return rc;
+		}
 		rc = put_entry(&cur->datum.u.xperms->driver, sizeof(u8), 1, fp);
 		if (rc)
 			return rc;